usb-bot: hotplug support
[qemu.git] / linux-user / signal.c
blob1dadddf2dd49c6d1df602df8cb0c242cf471d8c6
1 /*
2 * Emulation of Linux signals
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qemu/bitops.h"
21 #include <sys/ucontext.h>
22 #include <sys/resource.h>
24 #include "qemu.h"
25 #include "qemu-common.h"
26 #include "target_signal.h"
27 #include "trace.h"
29 static struct target_sigaltstack target_sigaltstack_used = {
30 .ss_sp = 0,
31 .ss_size = 0,
32 .ss_flags = TARGET_SS_DISABLE,
35 static struct target_sigaction sigact_table[TARGET_NSIG];
37 static void host_signal_handler(int host_signum, siginfo_t *info,
38 void *puc);
40 static uint8_t host_to_target_signal_table[_NSIG] = {
41 [SIGHUP] = TARGET_SIGHUP,
42 [SIGINT] = TARGET_SIGINT,
43 [SIGQUIT] = TARGET_SIGQUIT,
44 [SIGILL] = TARGET_SIGILL,
45 [SIGTRAP] = TARGET_SIGTRAP,
46 [SIGABRT] = TARGET_SIGABRT,
47 /* [SIGIOT] = TARGET_SIGIOT,*/
48 [SIGBUS] = TARGET_SIGBUS,
49 [SIGFPE] = TARGET_SIGFPE,
50 [SIGKILL] = TARGET_SIGKILL,
51 [SIGUSR1] = TARGET_SIGUSR1,
52 [SIGSEGV] = TARGET_SIGSEGV,
53 [SIGUSR2] = TARGET_SIGUSR2,
54 [SIGPIPE] = TARGET_SIGPIPE,
55 [SIGALRM] = TARGET_SIGALRM,
56 [SIGTERM] = TARGET_SIGTERM,
57 #ifdef SIGSTKFLT
58 [SIGSTKFLT] = TARGET_SIGSTKFLT,
59 #endif
60 [SIGCHLD] = TARGET_SIGCHLD,
61 [SIGCONT] = TARGET_SIGCONT,
62 [SIGSTOP] = TARGET_SIGSTOP,
63 [SIGTSTP] = TARGET_SIGTSTP,
64 [SIGTTIN] = TARGET_SIGTTIN,
65 [SIGTTOU] = TARGET_SIGTTOU,
66 [SIGURG] = TARGET_SIGURG,
67 [SIGXCPU] = TARGET_SIGXCPU,
68 [SIGXFSZ] = TARGET_SIGXFSZ,
69 [SIGVTALRM] = TARGET_SIGVTALRM,
70 [SIGPROF] = TARGET_SIGPROF,
71 [SIGWINCH] = TARGET_SIGWINCH,
72 [SIGIO] = TARGET_SIGIO,
73 [SIGPWR] = TARGET_SIGPWR,
74 [SIGSYS] = TARGET_SIGSYS,
75 /* next signals stay the same */
76 /* Nasty hack: Reverse SIGRTMIN and SIGRTMAX to avoid overlap with
77 host libpthread signals. This assumes no one actually uses SIGRTMAX :-/
78 To fix this properly we need to do manual signal delivery multiplexed
79 over a single host signal. */
80 [__SIGRTMIN] = __SIGRTMAX,
81 [__SIGRTMAX] = __SIGRTMIN,
83 static uint8_t target_to_host_signal_table[_NSIG];
85 static inline int on_sig_stack(unsigned long sp)
87 return (sp - target_sigaltstack_used.ss_sp
88 < target_sigaltstack_used.ss_size);
91 static inline int sas_ss_flags(unsigned long sp)
93 return (target_sigaltstack_used.ss_size == 0 ? SS_DISABLE
94 : on_sig_stack(sp) ? SS_ONSTACK : 0);
97 int host_to_target_signal(int sig)
99 if (sig < 0 || sig >= _NSIG)
100 return sig;
101 return host_to_target_signal_table[sig];
104 int target_to_host_signal(int sig)
106 if (sig < 0 || sig >= _NSIG)
107 return sig;
108 return target_to_host_signal_table[sig];
111 static inline void target_sigemptyset(target_sigset_t *set)
113 memset(set, 0, sizeof(*set));
116 static inline void target_sigaddset(target_sigset_t *set, int signum)
118 signum--;
119 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
120 set->sig[signum / TARGET_NSIG_BPW] |= mask;
123 static inline int target_sigismember(const target_sigset_t *set, int signum)
125 signum--;
126 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
127 return ((set->sig[signum / TARGET_NSIG_BPW] & mask) != 0);
130 static void host_to_target_sigset_internal(target_sigset_t *d,
131 const sigset_t *s)
133 int i;
134 target_sigemptyset(d);
135 for (i = 1; i <= TARGET_NSIG; i++) {
136 if (sigismember(s, i)) {
137 target_sigaddset(d, host_to_target_signal(i));
142 void host_to_target_sigset(target_sigset_t *d, const sigset_t *s)
144 target_sigset_t d1;
145 int i;
147 host_to_target_sigset_internal(&d1, s);
148 for(i = 0;i < TARGET_NSIG_WORDS; i++)
149 d->sig[i] = tswapal(d1.sig[i]);
152 static void target_to_host_sigset_internal(sigset_t *d,
153 const target_sigset_t *s)
155 int i;
156 sigemptyset(d);
157 for (i = 1; i <= TARGET_NSIG; i++) {
158 if (target_sigismember(s, i)) {
159 sigaddset(d, target_to_host_signal(i));
164 void target_to_host_sigset(sigset_t *d, const target_sigset_t *s)
166 target_sigset_t s1;
167 int i;
169 for(i = 0;i < TARGET_NSIG_WORDS; i++)
170 s1.sig[i] = tswapal(s->sig[i]);
171 target_to_host_sigset_internal(d, &s1);
174 void host_to_target_old_sigset(abi_ulong *old_sigset,
175 const sigset_t *sigset)
177 target_sigset_t d;
178 host_to_target_sigset(&d, sigset);
179 *old_sigset = d.sig[0];
182 void target_to_host_old_sigset(sigset_t *sigset,
183 const abi_ulong *old_sigset)
185 target_sigset_t d;
186 int i;
188 d.sig[0] = *old_sigset;
189 for(i = 1;i < TARGET_NSIG_WORDS; i++)
190 d.sig[i] = 0;
191 target_to_host_sigset(sigset, &d);
194 int block_signals(void)
196 TaskState *ts = (TaskState *)thread_cpu->opaque;
197 sigset_t set;
199 /* It's OK to block everything including SIGSEGV, because we won't
200 * run any further guest code before unblocking signals in
201 * process_pending_signals().
203 sigfillset(&set);
204 sigprocmask(SIG_SETMASK, &set, 0);
206 return atomic_xchg(&ts->signal_pending, 1);
209 /* Wrapper for sigprocmask function
210 * Emulates a sigprocmask in a safe way for the guest. Note that set and oldset
211 * are host signal set, not guest ones. Returns -TARGET_ERESTARTSYS if
212 * a signal was already pending and the syscall must be restarted, or
213 * 0 on success.
214 * If set is NULL, this is guaranteed not to fail.
216 int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset)
218 TaskState *ts = (TaskState *)thread_cpu->opaque;
220 if (oldset) {
221 *oldset = ts->signal_mask;
224 if (set) {
225 int i;
227 if (block_signals()) {
228 return -TARGET_ERESTARTSYS;
231 switch (how) {
232 case SIG_BLOCK:
233 sigorset(&ts->signal_mask, &ts->signal_mask, set);
234 break;
235 case SIG_UNBLOCK:
236 for (i = 1; i <= NSIG; ++i) {
237 if (sigismember(set, i)) {
238 sigdelset(&ts->signal_mask, i);
241 break;
242 case SIG_SETMASK:
243 ts->signal_mask = *set;
244 break;
245 default:
246 g_assert_not_reached();
249 /* Silently ignore attempts to change blocking status of KILL or STOP */
250 sigdelset(&ts->signal_mask, SIGKILL);
251 sigdelset(&ts->signal_mask, SIGSTOP);
253 return 0;
256 #if !defined(TARGET_OPENRISC) && !defined(TARGET_UNICORE32) && \
257 !defined(TARGET_X86_64)
258 /* Just set the guest's signal mask to the specified value; the
259 * caller is assumed to have called block_signals() already.
261 static void set_sigmask(const sigset_t *set)
263 TaskState *ts = (TaskState *)thread_cpu->opaque;
265 ts->signal_mask = *set;
267 #endif
269 /* siginfo conversion */
271 static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo,
272 const siginfo_t *info)
274 int sig = host_to_target_signal(info->si_signo);
275 int si_code = info->si_code;
276 int si_type;
277 tinfo->si_signo = sig;
278 tinfo->si_errno = 0;
279 tinfo->si_code = info->si_code;
281 /* This is awkward, because we have to use a combination of
282 * the si_code and si_signo to figure out which of the union's
283 * members are valid. (Within the host kernel it is always possible
284 * to tell, but the kernel carefully avoids giving userspace the
285 * high 16 bits of si_code, so we don't have the information to
286 * do this the easy way...) We therefore make our best guess,
287 * bearing in mind that a guest can spoof most of the si_codes
288 * via rt_sigqueueinfo() if it likes.
290 * Once we have made our guess, we record it in the top 16 bits of
291 * the si_code, so that tswap_siginfo() later can use it.
292 * tswap_siginfo() will strip these top bits out before writing
293 * si_code to the guest (sign-extending the lower bits).
296 switch (si_code) {
297 case SI_USER:
298 case SI_TKILL:
299 case SI_KERNEL:
300 /* Sent via kill(), tkill() or tgkill(), or direct from the kernel.
301 * These are the only unspoofable si_code values.
303 tinfo->_sifields._kill._pid = info->si_pid;
304 tinfo->_sifields._kill._uid = info->si_uid;
305 si_type = QEMU_SI_KILL;
306 break;
307 default:
308 /* Everything else is spoofable. Make best guess based on signal */
309 switch (sig) {
310 case TARGET_SIGCHLD:
311 tinfo->_sifields._sigchld._pid = info->si_pid;
312 tinfo->_sifields._sigchld._uid = info->si_uid;
313 tinfo->_sifields._sigchld._status
314 = host_to_target_waitstatus(info->si_status);
315 tinfo->_sifields._sigchld._utime = info->si_utime;
316 tinfo->_sifields._sigchld._stime = info->si_stime;
317 si_type = QEMU_SI_CHLD;
318 break;
319 case TARGET_SIGIO:
320 tinfo->_sifields._sigpoll._band = info->si_band;
321 tinfo->_sifields._sigpoll._fd = info->si_fd;
322 si_type = QEMU_SI_POLL;
323 break;
324 default:
325 /* Assume a sigqueue()/mq_notify()/rt_sigqueueinfo() source. */
326 tinfo->_sifields._rt._pid = info->si_pid;
327 tinfo->_sifields._rt._uid = info->si_uid;
328 /* XXX: potential problem if 64 bit */
329 tinfo->_sifields._rt._sigval.sival_ptr
330 = (abi_ulong)(unsigned long)info->si_value.sival_ptr;
331 si_type = QEMU_SI_RT;
332 break;
334 break;
337 tinfo->si_code = deposit32(si_code, 16, 16, si_type);
340 static void tswap_siginfo(target_siginfo_t *tinfo,
341 const target_siginfo_t *info)
343 int si_type = extract32(info->si_code, 16, 16);
344 int si_code = sextract32(info->si_code, 0, 16);
346 __put_user(info->si_signo, &tinfo->si_signo);
347 __put_user(info->si_errno, &tinfo->si_errno);
348 __put_user(si_code, &tinfo->si_code);
350 /* We can use our internal marker of which fields in the structure
351 * are valid, rather than duplicating the guesswork of
352 * host_to_target_siginfo_noswap() here.
354 switch (si_type) {
355 case QEMU_SI_KILL:
356 __put_user(info->_sifields._kill._pid, &tinfo->_sifields._kill._pid);
357 __put_user(info->_sifields._kill._uid, &tinfo->_sifields._kill._uid);
358 break;
359 case QEMU_SI_TIMER:
360 __put_user(info->_sifields._timer._timer1,
361 &tinfo->_sifields._timer._timer1);
362 __put_user(info->_sifields._timer._timer2,
363 &tinfo->_sifields._timer._timer2);
364 break;
365 case QEMU_SI_POLL:
366 __put_user(info->_sifields._sigpoll._band,
367 &tinfo->_sifields._sigpoll._band);
368 __put_user(info->_sifields._sigpoll._fd,
369 &tinfo->_sifields._sigpoll._fd);
370 break;
371 case QEMU_SI_FAULT:
372 __put_user(info->_sifields._sigfault._addr,
373 &tinfo->_sifields._sigfault._addr);
374 break;
375 case QEMU_SI_CHLD:
376 __put_user(info->_sifields._sigchld._pid,
377 &tinfo->_sifields._sigchld._pid);
378 __put_user(info->_sifields._sigchld._uid,
379 &tinfo->_sifields._sigchld._uid);
380 __put_user(info->_sifields._sigchld._status,
381 &tinfo->_sifields._sigchld._status);
382 __put_user(info->_sifields._sigchld._utime,
383 &tinfo->_sifields._sigchld._utime);
384 __put_user(info->_sifields._sigchld._stime,
385 &tinfo->_sifields._sigchld._stime);
386 break;
387 case QEMU_SI_RT:
388 __put_user(info->_sifields._rt._pid, &tinfo->_sifields._rt._pid);
389 __put_user(info->_sifields._rt._uid, &tinfo->_sifields._rt._uid);
390 __put_user(info->_sifields._rt._sigval.sival_ptr,
391 &tinfo->_sifields._rt._sigval.sival_ptr);
392 break;
393 default:
394 g_assert_not_reached();
398 void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info)
400 host_to_target_siginfo_noswap(tinfo, info);
401 tswap_siginfo(tinfo, tinfo);
404 /* XXX: we support only POSIX RT signals are used. */
405 /* XXX: find a solution for 64 bit (additional malloced data is needed) */
406 void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo)
408 /* This conversion is used only for the rt_sigqueueinfo syscall,
409 * and so we know that the _rt fields are the valid ones.
411 abi_ulong sival_ptr;
413 __get_user(info->si_signo, &tinfo->si_signo);
414 __get_user(info->si_errno, &tinfo->si_errno);
415 __get_user(info->si_code, &tinfo->si_code);
416 __get_user(info->si_pid, &tinfo->_sifields._rt._pid);
417 __get_user(info->si_uid, &tinfo->_sifields._rt._uid);
418 __get_user(sival_ptr, &tinfo->_sifields._rt._sigval.sival_ptr);
419 info->si_value.sival_ptr = (void *)(long)sival_ptr;
422 static int fatal_signal (int sig)
424 switch (sig) {
425 case TARGET_SIGCHLD:
426 case TARGET_SIGURG:
427 case TARGET_SIGWINCH:
428 /* Ignored by default. */
429 return 0;
430 case TARGET_SIGCONT:
431 case TARGET_SIGSTOP:
432 case TARGET_SIGTSTP:
433 case TARGET_SIGTTIN:
434 case TARGET_SIGTTOU:
435 /* Job control signals. */
436 return 0;
437 default:
438 return 1;
442 /* returns 1 if given signal should dump core if not handled */
443 static int core_dump_signal(int sig)
445 switch (sig) {
446 case TARGET_SIGABRT:
447 case TARGET_SIGFPE:
448 case TARGET_SIGILL:
449 case TARGET_SIGQUIT:
450 case TARGET_SIGSEGV:
451 case TARGET_SIGTRAP:
452 case TARGET_SIGBUS:
453 return (1);
454 default:
455 return (0);
459 void signal_init(void)
461 TaskState *ts = (TaskState *)thread_cpu->opaque;
462 struct sigaction act;
463 struct sigaction oact;
464 int i, j;
465 int host_sig;
467 /* generate signal conversion tables */
468 for(i = 1; i < _NSIG; i++) {
469 if (host_to_target_signal_table[i] == 0)
470 host_to_target_signal_table[i] = i;
472 for(i = 1; i < _NSIG; i++) {
473 j = host_to_target_signal_table[i];
474 target_to_host_signal_table[j] = i;
477 /* Set the signal mask from the host mask. */
478 sigprocmask(0, 0, &ts->signal_mask);
480 /* set all host signal handlers. ALL signals are blocked during
481 the handlers to serialize them. */
482 memset(sigact_table, 0, sizeof(sigact_table));
484 sigfillset(&act.sa_mask);
485 act.sa_flags = SA_SIGINFO;
486 act.sa_sigaction = host_signal_handler;
487 for(i = 1; i <= TARGET_NSIG; i++) {
488 host_sig = target_to_host_signal(i);
489 sigaction(host_sig, NULL, &oact);
490 if (oact.sa_sigaction == (void *)SIG_IGN) {
491 sigact_table[i - 1]._sa_handler = TARGET_SIG_IGN;
492 } else if (oact.sa_sigaction == (void *)SIG_DFL) {
493 sigact_table[i - 1]._sa_handler = TARGET_SIG_DFL;
495 /* If there's already a handler installed then something has
496 gone horribly wrong, so don't even try to handle that case. */
497 /* Install some handlers for our own use. We need at least
498 SIGSEGV and SIGBUS, to detect exceptions. We can not just
499 trap all signals because it affects syscall interrupt
500 behavior. But do trap all default-fatal signals. */
501 if (fatal_signal (i))
502 sigaction(host_sig, &act, NULL);
507 /* abort execution with signal */
508 static void QEMU_NORETURN force_sig(int target_sig)
510 CPUState *cpu = thread_cpu;
511 CPUArchState *env = cpu->env_ptr;
512 TaskState *ts = (TaskState *)cpu->opaque;
513 int host_sig, core_dumped = 0;
514 struct sigaction act;
516 host_sig = target_to_host_signal(target_sig);
517 trace_user_force_sig(env, target_sig, host_sig);
518 gdb_signalled(env, target_sig);
520 /* dump core if supported by target binary format */
521 if (core_dump_signal(target_sig) && (ts->bprm->core_dump != NULL)) {
522 stop_all_tasks();
523 core_dumped =
524 ((*ts->bprm->core_dump)(target_sig, env) == 0);
526 if (core_dumped) {
527 /* we already dumped the core of target process, we don't want
528 * a coredump of qemu itself */
529 struct rlimit nodump;
530 getrlimit(RLIMIT_CORE, &nodump);
531 nodump.rlim_cur=0;
532 setrlimit(RLIMIT_CORE, &nodump);
533 (void) fprintf(stderr, "qemu: uncaught target signal %d (%s) - %s\n",
534 target_sig, strsignal(host_sig), "core dumped" );
537 /* The proper exit code for dying from an uncaught signal is
538 * -<signal>. The kernel doesn't allow exit() or _exit() to pass
539 * a negative value. To get the proper exit code we need to
540 * actually die from an uncaught signal. Here the default signal
541 * handler is installed, we send ourself a signal and we wait for
542 * it to arrive. */
543 sigfillset(&act.sa_mask);
544 act.sa_handler = SIG_DFL;
545 act.sa_flags = 0;
546 sigaction(host_sig, &act, NULL);
548 /* For some reason raise(host_sig) doesn't send the signal when
549 * statically linked on x86-64. */
550 kill(getpid(), host_sig);
552 /* Make sure the signal isn't masked (just reuse the mask inside
553 of act) */
554 sigdelset(&act.sa_mask, host_sig);
555 sigsuspend(&act.sa_mask);
557 /* unreachable */
558 abort();
561 /* queue a signal so that it will be send to the virtual CPU as soon
562 as possible */
563 int queue_signal(CPUArchState *env, int sig, target_siginfo_t *info)
565 CPUState *cpu = ENV_GET_CPU(env);
566 TaskState *ts = cpu->opaque;
568 trace_user_queue_signal(env, sig);
570 /* Currently all callers define siginfo structures which
571 * use the _sifields._sigfault union member, so we can
572 * set the type here. If that changes we should push this
573 * out so the si_type is passed in by callers.
575 info->si_code = deposit32(info->si_code, 16, 16, QEMU_SI_FAULT);
577 ts->sync_signal.info = *info;
578 ts->sync_signal.pending = sig;
579 /* signal that a new signal is pending */
580 atomic_set(&ts->signal_pending, 1);
581 return 1; /* indicates that the signal was queued */
584 #ifndef HAVE_SAFE_SYSCALL
585 static inline void rewind_if_in_safe_syscall(void *puc)
587 /* Default version: never rewind */
589 #endif
591 static void host_signal_handler(int host_signum, siginfo_t *info,
592 void *puc)
594 CPUArchState *env = thread_cpu->env_ptr;
595 CPUState *cpu = ENV_GET_CPU(env);
596 TaskState *ts = cpu->opaque;
598 int sig;
599 target_siginfo_t tinfo;
600 ucontext_t *uc = puc;
601 struct emulated_sigtable *k;
603 /* the CPU emulator uses some host signals to detect exceptions,
604 we forward to it some signals */
605 if ((host_signum == SIGSEGV || host_signum == SIGBUS)
606 && info->si_code > 0) {
607 if (cpu_signal_handler(host_signum, info, puc))
608 return;
611 /* get target signal number */
612 sig = host_to_target_signal(host_signum);
613 if (sig < 1 || sig > TARGET_NSIG)
614 return;
615 trace_user_host_signal(env, host_signum, sig);
617 rewind_if_in_safe_syscall(puc);
619 host_to_target_siginfo_noswap(&tinfo, info);
620 k = &ts->sigtab[sig - 1];
621 k->info = tinfo;
622 k->pending = sig;
623 ts->signal_pending = 1;
625 /* Block host signals until target signal handler entered. We
626 * can't block SIGSEGV or SIGBUS while we're executing guest
627 * code in case the guest code provokes one in the window between
628 * now and it getting out to the main loop. Signals will be
629 * unblocked again in process_pending_signals().
631 sigfillset(&uc->uc_sigmask);
632 sigdelset(&uc->uc_sigmask, SIGSEGV);
633 sigdelset(&uc->uc_sigmask, SIGBUS);
635 /* interrupt the virtual CPU as soon as possible */
636 cpu_exit(thread_cpu);
639 /* do_sigaltstack() returns target values and errnos. */
640 /* compare linux/kernel/signal.c:do_sigaltstack() */
641 abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr, abi_ulong sp)
643 int ret;
644 struct target_sigaltstack oss;
646 /* XXX: test errors */
647 if(uoss_addr)
649 __put_user(target_sigaltstack_used.ss_sp, &oss.ss_sp);
650 __put_user(target_sigaltstack_used.ss_size, &oss.ss_size);
651 __put_user(sas_ss_flags(sp), &oss.ss_flags);
654 if(uss_addr)
656 struct target_sigaltstack *uss;
657 struct target_sigaltstack ss;
658 size_t minstacksize = TARGET_MINSIGSTKSZ;
660 #if defined(TARGET_PPC64)
661 /* ELF V2 for PPC64 has a 4K minimum stack size for signal handlers */
662 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info;
663 if (get_ppc64_abi(image) > 1) {
664 minstacksize = 4096;
666 #endif
668 ret = -TARGET_EFAULT;
669 if (!lock_user_struct(VERIFY_READ, uss, uss_addr, 1)) {
670 goto out;
672 __get_user(ss.ss_sp, &uss->ss_sp);
673 __get_user(ss.ss_size, &uss->ss_size);
674 __get_user(ss.ss_flags, &uss->ss_flags);
675 unlock_user_struct(uss, uss_addr, 0);
677 ret = -TARGET_EPERM;
678 if (on_sig_stack(sp))
679 goto out;
681 ret = -TARGET_EINVAL;
682 if (ss.ss_flags != TARGET_SS_DISABLE
683 && ss.ss_flags != TARGET_SS_ONSTACK
684 && ss.ss_flags != 0)
685 goto out;
687 if (ss.ss_flags == TARGET_SS_DISABLE) {
688 ss.ss_size = 0;
689 ss.ss_sp = 0;
690 } else {
691 ret = -TARGET_ENOMEM;
692 if (ss.ss_size < minstacksize) {
693 goto out;
697 target_sigaltstack_used.ss_sp = ss.ss_sp;
698 target_sigaltstack_used.ss_size = ss.ss_size;
701 if (uoss_addr) {
702 ret = -TARGET_EFAULT;
703 if (copy_to_user(uoss_addr, &oss, sizeof(oss)))
704 goto out;
707 ret = 0;
708 out:
709 return ret;
712 /* do_sigaction() return target values and host errnos */
713 int do_sigaction(int sig, const struct target_sigaction *act,
714 struct target_sigaction *oact)
716 struct target_sigaction *k;
717 struct sigaction act1;
718 int host_sig;
719 int ret = 0;
721 if (sig < 1 || sig > TARGET_NSIG || sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP) {
722 return -TARGET_EINVAL;
725 if (block_signals()) {
726 return -TARGET_ERESTARTSYS;
729 k = &sigact_table[sig - 1];
730 if (oact) {
731 __put_user(k->_sa_handler, &oact->_sa_handler);
732 __put_user(k->sa_flags, &oact->sa_flags);
733 #if !defined(TARGET_MIPS)
734 __put_user(k->sa_restorer, &oact->sa_restorer);
735 #endif
736 /* Not swapped. */
737 oact->sa_mask = k->sa_mask;
739 if (act) {
740 /* FIXME: This is not threadsafe. */
741 __get_user(k->_sa_handler, &act->_sa_handler);
742 __get_user(k->sa_flags, &act->sa_flags);
743 #if !defined(TARGET_MIPS)
744 __get_user(k->sa_restorer, &act->sa_restorer);
745 #endif
746 /* To be swapped in target_to_host_sigset. */
747 k->sa_mask = act->sa_mask;
749 /* we update the host linux signal state */
750 host_sig = target_to_host_signal(sig);
751 if (host_sig != SIGSEGV && host_sig != SIGBUS) {
752 sigfillset(&act1.sa_mask);
753 act1.sa_flags = SA_SIGINFO;
754 if (k->sa_flags & TARGET_SA_RESTART)
755 act1.sa_flags |= SA_RESTART;
756 /* NOTE: it is important to update the host kernel signal
757 ignore state to avoid getting unexpected interrupted
758 syscalls */
759 if (k->_sa_handler == TARGET_SIG_IGN) {
760 act1.sa_sigaction = (void *)SIG_IGN;
761 } else if (k->_sa_handler == TARGET_SIG_DFL) {
762 if (fatal_signal (sig))
763 act1.sa_sigaction = host_signal_handler;
764 else
765 act1.sa_sigaction = (void *)SIG_DFL;
766 } else {
767 act1.sa_sigaction = host_signal_handler;
769 ret = sigaction(host_sig, &act1, NULL);
772 return ret;
775 #if defined(TARGET_I386) && TARGET_ABI_BITS == 32
777 /* from the Linux kernel */
779 struct target_fpreg {
780 uint16_t significand[4];
781 uint16_t exponent;
784 struct target_fpxreg {
785 uint16_t significand[4];
786 uint16_t exponent;
787 uint16_t padding[3];
790 struct target_xmmreg {
791 abi_ulong element[4];
794 struct target_fpstate {
795 /* Regular FPU environment */
796 abi_ulong cw;
797 abi_ulong sw;
798 abi_ulong tag;
799 abi_ulong ipoff;
800 abi_ulong cssel;
801 abi_ulong dataoff;
802 abi_ulong datasel;
803 struct target_fpreg _st[8];
804 uint16_t status;
805 uint16_t magic; /* 0xffff = regular FPU data only */
807 /* FXSR FPU environment */
808 abi_ulong _fxsr_env[6]; /* FXSR FPU env is ignored */
809 abi_ulong mxcsr;
810 abi_ulong reserved;
811 struct target_fpxreg _fxsr_st[8]; /* FXSR FPU reg data is ignored */
812 struct target_xmmreg _xmm[8];
813 abi_ulong padding[56];
816 #define X86_FXSR_MAGIC 0x0000
818 struct target_sigcontext {
819 uint16_t gs, __gsh;
820 uint16_t fs, __fsh;
821 uint16_t es, __esh;
822 uint16_t ds, __dsh;
823 abi_ulong edi;
824 abi_ulong esi;
825 abi_ulong ebp;
826 abi_ulong esp;
827 abi_ulong ebx;
828 abi_ulong edx;
829 abi_ulong ecx;
830 abi_ulong eax;
831 abi_ulong trapno;
832 abi_ulong err;
833 abi_ulong eip;
834 uint16_t cs, __csh;
835 abi_ulong eflags;
836 abi_ulong esp_at_signal;
837 uint16_t ss, __ssh;
838 abi_ulong fpstate; /* pointer */
839 abi_ulong oldmask;
840 abi_ulong cr2;
843 struct target_ucontext {
844 abi_ulong tuc_flags;
845 abi_ulong tuc_link;
846 target_stack_t tuc_stack;
847 struct target_sigcontext tuc_mcontext;
848 target_sigset_t tuc_sigmask; /* mask last for extensibility */
851 struct sigframe
853 abi_ulong pretcode;
854 int sig;
855 struct target_sigcontext sc;
856 struct target_fpstate fpstate;
857 abi_ulong extramask[TARGET_NSIG_WORDS-1];
858 char retcode[8];
861 struct rt_sigframe
863 abi_ulong pretcode;
864 int sig;
865 abi_ulong pinfo;
866 abi_ulong puc;
867 struct target_siginfo info;
868 struct target_ucontext uc;
869 struct target_fpstate fpstate;
870 char retcode[8];
874 * Set up a signal frame.
877 /* XXX: save x87 state */
878 static void setup_sigcontext(struct target_sigcontext *sc,
879 struct target_fpstate *fpstate, CPUX86State *env, abi_ulong mask,
880 abi_ulong fpstate_addr)
882 CPUState *cs = CPU(x86_env_get_cpu(env));
883 uint16_t magic;
885 /* already locked in setup_frame() */
886 __put_user(env->segs[R_GS].selector, (unsigned int *)&sc->gs);
887 __put_user(env->segs[R_FS].selector, (unsigned int *)&sc->fs);
888 __put_user(env->segs[R_ES].selector, (unsigned int *)&sc->es);
889 __put_user(env->segs[R_DS].selector, (unsigned int *)&sc->ds);
890 __put_user(env->regs[R_EDI], &sc->edi);
891 __put_user(env->regs[R_ESI], &sc->esi);
892 __put_user(env->regs[R_EBP], &sc->ebp);
893 __put_user(env->regs[R_ESP], &sc->esp);
894 __put_user(env->regs[R_EBX], &sc->ebx);
895 __put_user(env->regs[R_EDX], &sc->edx);
896 __put_user(env->regs[R_ECX], &sc->ecx);
897 __put_user(env->regs[R_EAX], &sc->eax);
898 __put_user(cs->exception_index, &sc->trapno);
899 __put_user(env->error_code, &sc->err);
900 __put_user(env->eip, &sc->eip);
901 __put_user(env->segs[R_CS].selector, (unsigned int *)&sc->cs);
902 __put_user(env->eflags, &sc->eflags);
903 __put_user(env->regs[R_ESP], &sc->esp_at_signal);
904 __put_user(env->segs[R_SS].selector, (unsigned int *)&sc->ss);
906 cpu_x86_fsave(env, fpstate_addr, 1);
907 fpstate->status = fpstate->sw;
908 magic = 0xffff;
909 __put_user(magic, &fpstate->magic);
910 __put_user(fpstate_addr, &sc->fpstate);
912 /* non-iBCS2 extensions.. */
913 __put_user(mask, &sc->oldmask);
914 __put_user(env->cr[2], &sc->cr2);
918 * Determine which stack to use..
921 static inline abi_ulong
922 get_sigframe(struct target_sigaction *ka, CPUX86State *env, size_t frame_size)
924 unsigned long esp;
926 /* Default to using normal stack */
927 esp = env->regs[R_ESP];
928 /* This is the X/Open sanctioned signal stack switching. */
929 if (ka->sa_flags & TARGET_SA_ONSTACK) {
930 if (sas_ss_flags(esp) == 0) {
931 esp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
933 } else {
935 /* This is the legacy signal stack switching. */
936 if ((env->segs[R_SS].selector & 0xffff) != __USER_DS &&
937 !(ka->sa_flags & TARGET_SA_RESTORER) &&
938 ka->sa_restorer) {
939 esp = (unsigned long) ka->sa_restorer;
942 return (esp - frame_size) & -8ul;
945 /* compare linux/arch/i386/kernel/signal.c:setup_frame() */
946 static void setup_frame(int sig, struct target_sigaction *ka,
947 target_sigset_t *set, CPUX86State *env)
949 abi_ulong frame_addr;
950 struct sigframe *frame;
951 int i;
953 frame_addr = get_sigframe(ka, env, sizeof(*frame));
954 trace_user_setup_frame(env, frame_addr);
956 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
957 goto give_sigsegv;
959 __put_user(sig, &frame->sig);
961 setup_sigcontext(&frame->sc, &frame->fpstate, env, set->sig[0],
962 frame_addr + offsetof(struct sigframe, fpstate));
964 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
965 __put_user(set->sig[i], &frame->extramask[i - 1]);
968 /* Set up to return from userspace. If provided, use a stub
969 already in userspace. */
970 if (ka->sa_flags & TARGET_SA_RESTORER) {
971 __put_user(ka->sa_restorer, &frame->pretcode);
972 } else {
973 uint16_t val16;
974 abi_ulong retcode_addr;
975 retcode_addr = frame_addr + offsetof(struct sigframe, retcode);
976 __put_user(retcode_addr, &frame->pretcode);
977 /* This is popl %eax ; movl $,%eax ; int $0x80 */
978 val16 = 0xb858;
979 __put_user(val16, (uint16_t *)(frame->retcode+0));
980 __put_user(TARGET_NR_sigreturn, (int *)(frame->retcode+2));
981 val16 = 0x80cd;
982 __put_user(val16, (uint16_t *)(frame->retcode+6));
986 /* Set up registers for signal handler */
987 env->regs[R_ESP] = frame_addr;
988 env->eip = ka->_sa_handler;
990 cpu_x86_load_seg(env, R_DS, __USER_DS);
991 cpu_x86_load_seg(env, R_ES, __USER_DS);
992 cpu_x86_load_seg(env, R_SS, __USER_DS);
993 cpu_x86_load_seg(env, R_CS, __USER_CS);
994 env->eflags &= ~TF_MASK;
996 unlock_user_struct(frame, frame_addr, 1);
998 return;
1000 give_sigsegv:
1001 if (sig == TARGET_SIGSEGV) {
1002 ka->_sa_handler = TARGET_SIG_DFL;
1004 force_sig(TARGET_SIGSEGV /* , current */);
1007 /* compare linux/arch/i386/kernel/signal.c:setup_rt_frame() */
1008 static void setup_rt_frame(int sig, struct target_sigaction *ka,
1009 target_siginfo_t *info,
1010 target_sigset_t *set, CPUX86State *env)
1012 abi_ulong frame_addr, addr;
1013 struct rt_sigframe *frame;
1014 int i;
1016 frame_addr = get_sigframe(ka, env, sizeof(*frame));
1017 trace_user_setup_rt_frame(env, frame_addr);
1019 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
1020 goto give_sigsegv;
1022 __put_user(sig, &frame->sig);
1023 addr = frame_addr + offsetof(struct rt_sigframe, info);
1024 __put_user(addr, &frame->pinfo);
1025 addr = frame_addr + offsetof(struct rt_sigframe, uc);
1026 __put_user(addr, &frame->puc);
1027 tswap_siginfo(&frame->info, info);
1029 /* Create the ucontext. */
1030 __put_user(0, &frame->uc.tuc_flags);
1031 __put_user(0, &frame->uc.tuc_link);
1032 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
1033 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
1034 &frame->uc.tuc_stack.ss_flags);
1035 __put_user(target_sigaltstack_used.ss_size,
1036 &frame->uc.tuc_stack.ss_size);
1037 setup_sigcontext(&frame->uc.tuc_mcontext, &frame->fpstate, env,
1038 set->sig[0], frame_addr + offsetof(struct rt_sigframe, fpstate));
1040 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
1041 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
1044 /* Set up to return from userspace. If provided, use a stub
1045 already in userspace. */
1046 if (ka->sa_flags & TARGET_SA_RESTORER) {
1047 __put_user(ka->sa_restorer, &frame->pretcode);
1048 } else {
1049 uint16_t val16;
1050 addr = frame_addr + offsetof(struct rt_sigframe, retcode);
1051 __put_user(addr, &frame->pretcode);
1052 /* This is movl $,%eax ; int $0x80 */
1053 __put_user(0xb8, (char *)(frame->retcode+0));
1054 __put_user(TARGET_NR_rt_sigreturn, (int *)(frame->retcode+1));
1055 val16 = 0x80cd;
1056 __put_user(val16, (uint16_t *)(frame->retcode+5));
1059 /* Set up registers for signal handler */
1060 env->regs[R_ESP] = frame_addr;
1061 env->eip = ka->_sa_handler;
1063 cpu_x86_load_seg(env, R_DS, __USER_DS);
1064 cpu_x86_load_seg(env, R_ES, __USER_DS);
1065 cpu_x86_load_seg(env, R_SS, __USER_DS);
1066 cpu_x86_load_seg(env, R_CS, __USER_CS);
1067 env->eflags &= ~TF_MASK;
1069 unlock_user_struct(frame, frame_addr, 1);
1071 return;
1073 give_sigsegv:
1074 if (sig == TARGET_SIGSEGV) {
1075 ka->_sa_handler = TARGET_SIG_DFL;
1077 force_sig(TARGET_SIGSEGV /* , current */);
1080 static int
1081 restore_sigcontext(CPUX86State *env, struct target_sigcontext *sc)
1083 unsigned int err = 0;
1084 abi_ulong fpstate_addr;
1085 unsigned int tmpflags;
1087 cpu_x86_load_seg(env, R_GS, tswap16(sc->gs));
1088 cpu_x86_load_seg(env, R_FS, tswap16(sc->fs));
1089 cpu_x86_load_seg(env, R_ES, tswap16(sc->es));
1090 cpu_x86_load_seg(env, R_DS, tswap16(sc->ds));
1092 env->regs[R_EDI] = tswapl(sc->edi);
1093 env->regs[R_ESI] = tswapl(sc->esi);
1094 env->regs[R_EBP] = tswapl(sc->ebp);
1095 env->regs[R_ESP] = tswapl(sc->esp);
1096 env->regs[R_EBX] = tswapl(sc->ebx);
1097 env->regs[R_EDX] = tswapl(sc->edx);
1098 env->regs[R_ECX] = tswapl(sc->ecx);
1099 env->regs[R_EAX] = tswapl(sc->eax);
1100 env->eip = tswapl(sc->eip);
1102 cpu_x86_load_seg(env, R_CS, lduw_p(&sc->cs) | 3);
1103 cpu_x86_load_seg(env, R_SS, lduw_p(&sc->ss) | 3);
1105 tmpflags = tswapl(sc->eflags);
1106 env->eflags = (env->eflags & ~0x40DD5) | (tmpflags & 0x40DD5);
1107 // regs->orig_eax = -1; /* disable syscall checks */
1109 fpstate_addr = tswapl(sc->fpstate);
1110 if (fpstate_addr != 0) {
1111 if (!access_ok(VERIFY_READ, fpstate_addr,
1112 sizeof(struct target_fpstate)))
1113 goto badframe;
1114 cpu_x86_frstor(env, fpstate_addr, 1);
1117 return err;
1118 badframe:
1119 return 1;
1122 long do_sigreturn(CPUX86State *env)
1124 struct sigframe *frame;
1125 abi_ulong frame_addr = env->regs[R_ESP] - 8;
1126 target_sigset_t target_set;
1127 sigset_t set;
1128 int i;
1130 trace_user_do_sigreturn(env, frame_addr);
1131 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
1132 goto badframe;
1133 /* set blocked signals */
1134 __get_user(target_set.sig[0], &frame->sc.oldmask);
1135 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1136 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
1139 target_to_host_sigset_internal(&set, &target_set);
1140 set_sigmask(&set);
1142 /* restore registers */
1143 if (restore_sigcontext(env, &frame->sc))
1144 goto badframe;
1145 unlock_user_struct(frame, frame_addr, 0);
1146 return -TARGET_QEMU_ESIGRETURN;
1148 badframe:
1149 unlock_user_struct(frame, frame_addr, 0);
1150 force_sig(TARGET_SIGSEGV);
1151 return 0;
1154 long do_rt_sigreturn(CPUX86State *env)
1156 abi_ulong frame_addr;
1157 struct rt_sigframe *frame;
1158 sigset_t set;
1160 frame_addr = env->regs[R_ESP] - 4;
1161 trace_user_do_rt_sigreturn(env, frame_addr);
1162 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
1163 goto badframe;
1164 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
1165 set_sigmask(&set);
1167 if (restore_sigcontext(env, &frame->uc.tuc_mcontext)) {
1168 goto badframe;
1171 if (do_sigaltstack(frame_addr + offsetof(struct rt_sigframe, uc.tuc_stack), 0,
1172 get_sp_from_cpustate(env)) == -EFAULT) {
1173 goto badframe;
1176 unlock_user_struct(frame, frame_addr, 0);
1177 return -TARGET_QEMU_ESIGRETURN;
1179 badframe:
1180 unlock_user_struct(frame, frame_addr, 0);
1181 force_sig(TARGET_SIGSEGV);
1182 return 0;
1185 #elif defined(TARGET_AARCH64)
1187 struct target_sigcontext {
1188 uint64_t fault_address;
1189 /* AArch64 registers */
1190 uint64_t regs[31];
1191 uint64_t sp;
1192 uint64_t pc;
1193 uint64_t pstate;
1194 /* 4K reserved for FP/SIMD state and future expansion */
1195 char __reserved[4096] __attribute__((__aligned__(16)));
1198 struct target_ucontext {
1199 abi_ulong tuc_flags;
1200 abi_ulong tuc_link;
1201 target_stack_t tuc_stack;
1202 target_sigset_t tuc_sigmask;
1203 /* glibc uses a 1024-bit sigset_t */
1204 char __unused[1024 / 8 - sizeof(target_sigset_t)];
1205 /* last for future expansion */
1206 struct target_sigcontext tuc_mcontext;
1210 * Header to be used at the beginning of structures extending the user
1211 * context. Such structures must be placed after the rt_sigframe on the stack
1212 * and be 16-byte aligned. The last structure must be a dummy one with the
1213 * magic and size set to 0.
1215 struct target_aarch64_ctx {
1216 uint32_t magic;
1217 uint32_t size;
1220 #define TARGET_FPSIMD_MAGIC 0x46508001
1222 struct target_fpsimd_context {
1223 struct target_aarch64_ctx head;
1224 uint32_t fpsr;
1225 uint32_t fpcr;
1226 uint64_t vregs[32 * 2]; /* really uint128_t vregs[32] */
1230 * Auxiliary context saved in the sigcontext.__reserved array. Not exported to
1231 * user space as it will change with the addition of new context. User space
1232 * should check the magic/size information.
1234 struct target_aux_context {
1235 struct target_fpsimd_context fpsimd;
1236 /* additional context to be added before "end" */
1237 struct target_aarch64_ctx end;
1240 struct target_rt_sigframe {
1241 struct target_siginfo info;
1242 struct target_ucontext uc;
1243 uint64_t fp;
1244 uint64_t lr;
1245 uint32_t tramp[2];
1248 static int target_setup_sigframe(struct target_rt_sigframe *sf,
1249 CPUARMState *env, target_sigset_t *set)
1251 int i;
1252 struct target_aux_context *aux =
1253 (struct target_aux_context *)sf->uc.tuc_mcontext.__reserved;
1255 /* set up the stack frame for unwinding */
1256 __put_user(env->xregs[29], &sf->fp);
1257 __put_user(env->xregs[30], &sf->lr);
1259 for (i = 0; i < 31; i++) {
1260 __put_user(env->xregs[i], &sf->uc.tuc_mcontext.regs[i]);
1262 __put_user(env->xregs[31], &sf->uc.tuc_mcontext.sp);
1263 __put_user(env->pc, &sf->uc.tuc_mcontext.pc);
1264 __put_user(pstate_read(env), &sf->uc.tuc_mcontext.pstate);
1266 __put_user(env->exception.vaddress, &sf->uc.tuc_mcontext.fault_address);
1268 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
1269 __put_user(set->sig[i], &sf->uc.tuc_sigmask.sig[i]);
1272 for (i = 0; i < 32; i++) {
1273 #ifdef TARGET_WORDS_BIGENDIAN
1274 __put_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2 + 1]);
1275 __put_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2]);
1276 #else
1277 __put_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2]);
1278 __put_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2 + 1]);
1279 #endif
1281 __put_user(vfp_get_fpsr(env), &aux->fpsimd.fpsr);
1282 __put_user(vfp_get_fpcr(env), &aux->fpsimd.fpcr);
1283 __put_user(TARGET_FPSIMD_MAGIC, &aux->fpsimd.head.magic);
1284 __put_user(sizeof(struct target_fpsimd_context),
1285 &aux->fpsimd.head.size);
1287 /* set the "end" magic */
1288 __put_user(0, &aux->end.magic);
1289 __put_user(0, &aux->end.size);
1291 return 0;
1294 static int target_restore_sigframe(CPUARMState *env,
1295 struct target_rt_sigframe *sf)
1297 sigset_t set;
1298 int i;
1299 struct target_aux_context *aux =
1300 (struct target_aux_context *)sf->uc.tuc_mcontext.__reserved;
1301 uint32_t magic, size, fpsr, fpcr;
1302 uint64_t pstate;
1304 target_to_host_sigset(&set, &sf->uc.tuc_sigmask);
1305 set_sigmask(&set);
1307 for (i = 0; i < 31; i++) {
1308 __get_user(env->xregs[i], &sf->uc.tuc_mcontext.regs[i]);
1311 __get_user(env->xregs[31], &sf->uc.tuc_mcontext.sp);
1312 __get_user(env->pc, &sf->uc.tuc_mcontext.pc);
1313 __get_user(pstate, &sf->uc.tuc_mcontext.pstate);
1314 pstate_write(env, pstate);
1316 __get_user(magic, &aux->fpsimd.head.magic);
1317 __get_user(size, &aux->fpsimd.head.size);
1319 if (magic != TARGET_FPSIMD_MAGIC
1320 || size != sizeof(struct target_fpsimd_context)) {
1321 return 1;
1324 for (i = 0; i < 32; i++) {
1325 #ifdef TARGET_WORDS_BIGENDIAN
1326 __get_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2 + 1]);
1327 __get_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2]);
1328 #else
1329 __get_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2]);
1330 __get_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2 + 1]);
1331 #endif
1333 __get_user(fpsr, &aux->fpsimd.fpsr);
1334 vfp_set_fpsr(env, fpsr);
1335 __get_user(fpcr, &aux->fpsimd.fpcr);
1336 vfp_set_fpcr(env, fpcr);
1338 return 0;
1341 static abi_ulong get_sigframe(struct target_sigaction *ka, CPUARMState *env)
1343 abi_ulong sp;
1345 sp = env->xregs[31];
1348 * This is the X/Open sanctioned signal stack switching.
1350 if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) {
1351 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
1354 sp = (sp - sizeof(struct target_rt_sigframe)) & ~15;
1356 return sp;
1359 static void target_setup_frame(int usig, struct target_sigaction *ka,
1360 target_siginfo_t *info, target_sigset_t *set,
1361 CPUARMState *env)
1363 struct target_rt_sigframe *frame;
1364 abi_ulong frame_addr, return_addr;
1366 frame_addr = get_sigframe(ka, env);
1367 trace_user_setup_frame(env, frame_addr);
1368 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
1369 goto give_sigsegv;
1372 __put_user(0, &frame->uc.tuc_flags);
1373 __put_user(0, &frame->uc.tuc_link);
1375 __put_user(target_sigaltstack_used.ss_sp,
1376 &frame->uc.tuc_stack.ss_sp);
1377 __put_user(sas_ss_flags(env->xregs[31]),
1378 &frame->uc.tuc_stack.ss_flags);
1379 __put_user(target_sigaltstack_used.ss_size,
1380 &frame->uc.tuc_stack.ss_size);
1381 target_setup_sigframe(frame, env, set);
1382 if (ka->sa_flags & TARGET_SA_RESTORER) {
1383 return_addr = ka->sa_restorer;
1384 } else {
1385 /* mov x8,#__NR_rt_sigreturn; svc #0 */
1386 __put_user(0xd2801168, &frame->tramp[0]);
1387 __put_user(0xd4000001, &frame->tramp[1]);
1388 return_addr = frame_addr + offsetof(struct target_rt_sigframe, tramp);
1390 env->xregs[0] = usig;
1391 env->xregs[31] = frame_addr;
1392 env->xregs[29] = env->xregs[31] + offsetof(struct target_rt_sigframe, fp);
1393 env->pc = ka->_sa_handler;
1394 env->xregs[30] = return_addr;
1395 if (info) {
1396 tswap_siginfo(&frame->info, info);
1397 env->xregs[1] = frame_addr + offsetof(struct target_rt_sigframe, info);
1398 env->xregs[2] = frame_addr + offsetof(struct target_rt_sigframe, uc);
1401 unlock_user_struct(frame, frame_addr, 1);
1402 return;
1404 give_sigsegv:
1405 unlock_user_struct(frame, frame_addr, 1);
1406 force_sig(TARGET_SIGSEGV);
1409 static void setup_rt_frame(int sig, struct target_sigaction *ka,
1410 target_siginfo_t *info, target_sigset_t *set,
1411 CPUARMState *env)
1413 target_setup_frame(sig, ka, info, set, env);
1416 static void setup_frame(int sig, struct target_sigaction *ka,
1417 target_sigset_t *set, CPUARMState *env)
1419 target_setup_frame(sig, ka, 0, set, env);
1422 long do_rt_sigreturn(CPUARMState *env)
1424 struct target_rt_sigframe *frame = NULL;
1425 abi_ulong frame_addr = env->xregs[31];
1427 trace_user_do_rt_sigreturn(env, frame_addr);
1428 if (frame_addr & 15) {
1429 goto badframe;
1432 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
1433 goto badframe;
1436 if (target_restore_sigframe(env, frame)) {
1437 goto badframe;
1440 if (do_sigaltstack(frame_addr +
1441 offsetof(struct target_rt_sigframe, uc.tuc_stack),
1442 0, get_sp_from_cpustate(env)) == -EFAULT) {
1443 goto badframe;
1446 unlock_user_struct(frame, frame_addr, 0);
1447 return -TARGET_QEMU_ESIGRETURN;
1449 badframe:
1450 unlock_user_struct(frame, frame_addr, 0);
1451 force_sig(TARGET_SIGSEGV);
1452 return 0;
1455 long do_sigreturn(CPUARMState *env)
1457 return do_rt_sigreturn(env);
1460 #elif defined(TARGET_ARM)
1462 struct target_sigcontext {
1463 abi_ulong trap_no;
1464 abi_ulong error_code;
1465 abi_ulong oldmask;
1466 abi_ulong arm_r0;
1467 abi_ulong arm_r1;
1468 abi_ulong arm_r2;
1469 abi_ulong arm_r3;
1470 abi_ulong arm_r4;
1471 abi_ulong arm_r5;
1472 abi_ulong arm_r6;
1473 abi_ulong arm_r7;
1474 abi_ulong arm_r8;
1475 abi_ulong arm_r9;
1476 abi_ulong arm_r10;
1477 abi_ulong arm_fp;
1478 abi_ulong arm_ip;
1479 abi_ulong arm_sp;
1480 abi_ulong arm_lr;
1481 abi_ulong arm_pc;
1482 abi_ulong arm_cpsr;
1483 abi_ulong fault_address;
1486 struct target_ucontext_v1 {
1487 abi_ulong tuc_flags;
1488 abi_ulong tuc_link;
1489 target_stack_t tuc_stack;
1490 struct target_sigcontext tuc_mcontext;
1491 target_sigset_t tuc_sigmask; /* mask last for extensibility */
1494 struct target_ucontext_v2 {
1495 abi_ulong tuc_flags;
1496 abi_ulong tuc_link;
1497 target_stack_t tuc_stack;
1498 struct target_sigcontext tuc_mcontext;
1499 target_sigset_t tuc_sigmask; /* mask last for extensibility */
1500 char __unused[128 - sizeof(target_sigset_t)];
1501 abi_ulong tuc_regspace[128] __attribute__((__aligned__(8)));
1504 struct target_user_vfp {
1505 uint64_t fpregs[32];
1506 abi_ulong fpscr;
1509 struct target_user_vfp_exc {
1510 abi_ulong fpexc;
1511 abi_ulong fpinst;
1512 abi_ulong fpinst2;
1515 struct target_vfp_sigframe {
1516 abi_ulong magic;
1517 abi_ulong size;
1518 struct target_user_vfp ufp;
1519 struct target_user_vfp_exc ufp_exc;
1520 } __attribute__((__aligned__(8)));
1522 struct target_iwmmxt_sigframe {
1523 abi_ulong magic;
1524 abi_ulong size;
1525 uint64_t regs[16];
1526 /* Note that not all the coprocessor control registers are stored here */
1527 uint32_t wcssf;
1528 uint32_t wcasf;
1529 uint32_t wcgr0;
1530 uint32_t wcgr1;
1531 uint32_t wcgr2;
1532 uint32_t wcgr3;
1533 } __attribute__((__aligned__(8)));
1535 #define TARGET_VFP_MAGIC 0x56465001
1536 #define TARGET_IWMMXT_MAGIC 0x12ef842a
1538 struct sigframe_v1
1540 struct target_sigcontext sc;
1541 abi_ulong extramask[TARGET_NSIG_WORDS-1];
1542 abi_ulong retcode;
1545 struct sigframe_v2
1547 struct target_ucontext_v2 uc;
1548 abi_ulong retcode;
1551 struct rt_sigframe_v1
1553 abi_ulong pinfo;
1554 abi_ulong puc;
1555 struct target_siginfo info;
1556 struct target_ucontext_v1 uc;
1557 abi_ulong retcode;
1560 struct rt_sigframe_v2
1562 struct target_siginfo info;
1563 struct target_ucontext_v2 uc;
1564 abi_ulong retcode;
1567 #define TARGET_CONFIG_CPU_32 1
1570 * For ARM syscalls, we encode the syscall number into the instruction.
1572 #define SWI_SYS_SIGRETURN (0xef000000|(TARGET_NR_sigreturn + ARM_SYSCALL_BASE))
1573 #define SWI_SYS_RT_SIGRETURN (0xef000000|(TARGET_NR_rt_sigreturn + ARM_SYSCALL_BASE))
1576 * For Thumb syscalls, we pass the syscall number via r7. We therefore
1577 * need two 16-bit instructions.
1579 #define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_sigreturn))
1580 #define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_rt_sigreturn))
1582 static const abi_ulong retcodes[4] = {
1583 SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN,
1584 SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN
1588 static inline int valid_user_regs(CPUARMState *regs)
1590 return 1;
1593 static void
1594 setup_sigcontext(struct target_sigcontext *sc, /*struct _fpstate *fpstate,*/
1595 CPUARMState *env, abi_ulong mask)
1597 __put_user(env->regs[0], &sc->arm_r0);
1598 __put_user(env->regs[1], &sc->arm_r1);
1599 __put_user(env->regs[2], &sc->arm_r2);
1600 __put_user(env->regs[3], &sc->arm_r3);
1601 __put_user(env->regs[4], &sc->arm_r4);
1602 __put_user(env->regs[5], &sc->arm_r5);
1603 __put_user(env->regs[6], &sc->arm_r6);
1604 __put_user(env->regs[7], &sc->arm_r7);
1605 __put_user(env->regs[8], &sc->arm_r8);
1606 __put_user(env->regs[9], &sc->arm_r9);
1607 __put_user(env->regs[10], &sc->arm_r10);
1608 __put_user(env->regs[11], &sc->arm_fp);
1609 __put_user(env->regs[12], &sc->arm_ip);
1610 __put_user(env->regs[13], &sc->arm_sp);
1611 __put_user(env->regs[14], &sc->arm_lr);
1612 __put_user(env->regs[15], &sc->arm_pc);
1613 #ifdef TARGET_CONFIG_CPU_32
1614 __put_user(cpsr_read(env), &sc->arm_cpsr);
1615 #endif
1617 __put_user(/* current->thread.trap_no */ 0, &sc->trap_no);
1618 __put_user(/* current->thread.error_code */ 0, &sc->error_code);
1619 __put_user(/* current->thread.address */ 0, &sc->fault_address);
1620 __put_user(mask, &sc->oldmask);
1623 static inline abi_ulong
1624 get_sigframe(struct target_sigaction *ka, CPUARMState *regs, int framesize)
1626 unsigned long sp = regs->regs[13];
1629 * This is the X/Open sanctioned signal stack switching.
1631 if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) {
1632 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
1635 * ATPCS B01 mandates 8-byte alignment
1637 return (sp - framesize) & ~7;
1640 static void
1641 setup_return(CPUARMState *env, struct target_sigaction *ka,
1642 abi_ulong *rc, abi_ulong frame_addr, int usig, abi_ulong rc_addr)
1644 abi_ulong handler = ka->_sa_handler;
1645 abi_ulong retcode;
1646 int thumb = handler & 1;
1647 uint32_t cpsr = cpsr_read(env);
1649 cpsr &= ~CPSR_IT;
1650 if (thumb) {
1651 cpsr |= CPSR_T;
1652 } else {
1653 cpsr &= ~CPSR_T;
1656 if (ka->sa_flags & TARGET_SA_RESTORER) {
1657 retcode = ka->sa_restorer;
1658 } else {
1659 unsigned int idx = thumb;
1661 if (ka->sa_flags & TARGET_SA_SIGINFO) {
1662 idx += 2;
1665 __put_user(retcodes[idx], rc);
1667 retcode = rc_addr + thumb;
1670 env->regs[0] = usig;
1671 env->regs[13] = frame_addr;
1672 env->regs[14] = retcode;
1673 env->regs[15] = handler & (thumb ? ~1 : ~3);
1674 cpsr_write(env, cpsr, CPSR_IT | CPSR_T, CPSRWriteByInstr);
1677 static abi_ulong *setup_sigframe_v2_vfp(abi_ulong *regspace, CPUARMState *env)
1679 int i;
1680 struct target_vfp_sigframe *vfpframe;
1681 vfpframe = (struct target_vfp_sigframe *)regspace;
1682 __put_user(TARGET_VFP_MAGIC, &vfpframe->magic);
1683 __put_user(sizeof(*vfpframe), &vfpframe->size);
1684 for (i = 0; i < 32; i++) {
1685 __put_user(float64_val(env->vfp.regs[i]), &vfpframe->ufp.fpregs[i]);
1687 __put_user(vfp_get_fpscr(env), &vfpframe->ufp.fpscr);
1688 __put_user(env->vfp.xregs[ARM_VFP_FPEXC], &vfpframe->ufp_exc.fpexc);
1689 __put_user(env->vfp.xregs[ARM_VFP_FPINST], &vfpframe->ufp_exc.fpinst);
1690 __put_user(env->vfp.xregs[ARM_VFP_FPINST2], &vfpframe->ufp_exc.fpinst2);
1691 return (abi_ulong*)(vfpframe+1);
1694 static abi_ulong *setup_sigframe_v2_iwmmxt(abi_ulong *regspace,
1695 CPUARMState *env)
1697 int i;
1698 struct target_iwmmxt_sigframe *iwmmxtframe;
1699 iwmmxtframe = (struct target_iwmmxt_sigframe *)regspace;
1700 __put_user(TARGET_IWMMXT_MAGIC, &iwmmxtframe->magic);
1701 __put_user(sizeof(*iwmmxtframe), &iwmmxtframe->size);
1702 for (i = 0; i < 16; i++) {
1703 __put_user(env->iwmmxt.regs[i], &iwmmxtframe->regs[i]);
1705 __put_user(env->vfp.xregs[ARM_IWMMXT_wCSSF], &iwmmxtframe->wcssf);
1706 __put_user(env->vfp.xregs[ARM_IWMMXT_wCASF], &iwmmxtframe->wcssf);
1707 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR0], &iwmmxtframe->wcgr0);
1708 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR1], &iwmmxtframe->wcgr1);
1709 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR2], &iwmmxtframe->wcgr2);
1710 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR3], &iwmmxtframe->wcgr3);
1711 return (abi_ulong*)(iwmmxtframe+1);
1714 static void setup_sigframe_v2(struct target_ucontext_v2 *uc,
1715 target_sigset_t *set, CPUARMState *env)
1717 struct target_sigaltstack stack;
1718 int i;
1719 abi_ulong *regspace;
1721 /* Clear all the bits of the ucontext we don't use. */
1722 memset(uc, 0, offsetof(struct target_ucontext_v2, tuc_mcontext));
1724 memset(&stack, 0, sizeof(stack));
1725 __put_user(target_sigaltstack_used.ss_sp, &stack.ss_sp);
1726 __put_user(target_sigaltstack_used.ss_size, &stack.ss_size);
1727 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &stack.ss_flags);
1728 memcpy(&uc->tuc_stack, &stack, sizeof(stack));
1730 setup_sigcontext(&uc->tuc_mcontext, env, set->sig[0]);
1731 /* Save coprocessor signal frame. */
1732 regspace = uc->tuc_regspace;
1733 if (arm_feature(env, ARM_FEATURE_VFP)) {
1734 regspace = setup_sigframe_v2_vfp(regspace, env);
1736 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
1737 regspace = setup_sigframe_v2_iwmmxt(regspace, env);
1740 /* Write terminating magic word */
1741 __put_user(0, regspace);
1743 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
1744 __put_user(set->sig[i], &uc->tuc_sigmask.sig[i]);
1748 /* compare linux/arch/arm/kernel/signal.c:setup_frame() */
1749 static void setup_frame_v1(int usig, struct target_sigaction *ka,
1750 target_sigset_t *set, CPUARMState *regs)
1752 struct sigframe_v1 *frame;
1753 abi_ulong frame_addr = get_sigframe(ka, regs, sizeof(*frame));
1754 int i;
1756 trace_user_setup_frame(regs, frame_addr);
1757 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
1758 return;
1761 setup_sigcontext(&frame->sc, regs, set->sig[0]);
1763 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1764 __put_user(set->sig[i], &frame->extramask[i - 1]);
1767 setup_return(regs, ka, &frame->retcode, frame_addr, usig,
1768 frame_addr + offsetof(struct sigframe_v1, retcode));
1770 unlock_user_struct(frame, frame_addr, 1);
1773 static void setup_frame_v2(int usig, struct target_sigaction *ka,
1774 target_sigset_t *set, CPUARMState *regs)
1776 struct sigframe_v2 *frame;
1777 abi_ulong frame_addr = get_sigframe(ka, regs, sizeof(*frame));
1779 trace_user_setup_frame(regs, frame_addr);
1780 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
1781 return;
1784 setup_sigframe_v2(&frame->uc, set, regs);
1786 setup_return(regs, ka, &frame->retcode, frame_addr, usig,
1787 frame_addr + offsetof(struct sigframe_v2, retcode));
1789 unlock_user_struct(frame, frame_addr, 1);
1792 static void setup_frame(int usig, struct target_sigaction *ka,
1793 target_sigset_t *set, CPUARMState *regs)
1795 if (get_osversion() >= 0x020612) {
1796 setup_frame_v2(usig, ka, set, regs);
1797 } else {
1798 setup_frame_v1(usig, ka, set, regs);
1802 /* compare linux/arch/arm/kernel/signal.c:setup_rt_frame() */
1803 static void setup_rt_frame_v1(int usig, struct target_sigaction *ka,
1804 target_siginfo_t *info,
1805 target_sigset_t *set, CPUARMState *env)
1807 struct rt_sigframe_v1 *frame;
1808 abi_ulong frame_addr = get_sigframe(ka, env, sizeof(*frame));
1809 struct target_sigaltstack stack;
1810 int i;
1811 abi_ulong info_addr, uc_addr;
1813 trace_user_setup_rt_frame(env, frame_addr);
1814 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
1815 return /* 1 */;
1818 info_addr = frame_addr + offsetof(struct rt_sigframe_v1, info);
1819 __put_user(info_addr, &frame->pinfo);
1820 uc_addr = frame_addr + offsetof(struct rt_sigframe_v1, uc);
1821 __put_user(uc_addr, &frame->puc);
1822 tswap_siginfo(&frame->info, info);
1824 /* Clear all the bits of the ucontext we don't use. */
1825 memset(&frame->uc, 0, offsetof(struct target_ucontext_v1, tuc_mcontext));
1827 memset(&stack, 0, sizeof(stack));
1828 __put_user(target_sigaltstack_used.ss_sp, &stack.ss_sp);
1829 __put_user(target_sigaltstack_used.ss_size, &stack.ss_size);
1830 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &stack.ss_flags);
1831 memcpy(&frame->uc.tuc_stack, &stack, sizeof(stack));
1833 setup_sigcontext(&frame->uc.tuc_mcontext, env, set->sig[0]);
1834 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
1835 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
1838 setup_return(env, ka, &frame->retcode, frame_addr, usig,
1839 frame_addr + offsetof(struct rt_sigframe_v1, retcode));
1841 env->regs[1] = info_addr;
1842 env->regs[2] = uc_addr;
1844 unlock_user_struct(frame, frame_addr, 1);
1847 static void setup_rt_frame_v2(int usig, struct target_sigaction *ka,
1848 target_siginfo_t *info,
1849 target_sigset_t *set, CPUARMState *env)
1851 struct rt_sigframe_v2 *frame;
1852 abi_ulong frame_addr = get_sigframe(ka, env, sizeof(*frame));
1853 abi_ulong info_addr, uc_addr;
1855 trace_user_setup_rt_frame(env, frame_addr);
1856 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
1857 return /* 1 */;
1860 info_addr = frame_addr + offsetof(struct rt_sigframe_v2, info);
1861 uc_addr = frame_addr + offsetof(struct rt_sigframe_v2, uc);
1862 tswap_siginfo(&frame->info, info);
1864 setup_sigframe_v2(&frame->uc, set, env);
1866 setup_return(env, ka, &frame->retcode, frame_addr, usig,
1867 frame_addr + offsetof(struct rt_sigframe_v2, retcode));
1869 env->regs[1] = info_addr;
1870 env->regs[2] = uc_addr;
1872 unlock_user_struct(frame, frame_addr, 1);
1875 static void setup_rt_frame(int usig, struct target_sigaction *ka,
1876 target_siginfo_t *info,
1877 target_sigset_t *set, CPUARMState *env)
1879 if (get_osversion() >= 0x020612) {
1880 setup_rt_frame_v2(usig, ka, info, set, env);
1881 } else {
1882 setup_rt_frame_v1(usig, ka, info, set, env);
1886 static int
1887 restore_sigcontext(CPUARMState *env, struct target_sigcontext *sc)
1889 int err = 0;
1890 uint32_t cpsr;
1892 __get_user(env->regs[0], &sc->arm_r0);
1893 __get_user(env->regs[1], &sc->arm_r1);
1894 __get_user(env->regs[2], &sc->arm_r2);
1895 __get_user(env->regs[3], &sc->arm_r3);
1896 __get_user(env->regs[4], &sc->arm_r4);
1897 __get_user(env->regs[5], &sc->arm_r5);
1898 __get_user(env->regs[6], &sc->arm_r6);
1899 __get_user(env->regs[7], &sc->arm_r7);
1900 __get_user(env->regs[8], &sc->arm_r8);
1901 __get_user(env->regs[9], &sc->arm_r9);
1902 __get_user(env->regs[10], &sc->arm_r10);
1903 __get_user(env->regs[11], &sc->arm_fp);
1904 __get_user(env->regs[12], &sc->arm_ip);
1905 __get_user(env->regs[13], &sc->arm_sp);
1906 __get_user(env->regs[14], &sc->arm_lr);
1907 __get_user(env->regs[15], &sc->arm_pc);
1908 #ifdef TARGET_CONFIG_CPU_32
1909 __get_user(cpsr, &sc->arm_cpsr);
1910 cpsr_write(env, cpsr, CPSR_USER | CPSR_EXEC, CPSRWriteByInstr);
1911 #endif
1913 err |= !valid_user_regs(env);
1915 return err;
1918 static long do_sigreturn_v1(CPUARMState *env)
1920 abi_ulong frame_addr;
1921 struct sigframe_v1 *frame = NULL;
1922 target_sigset_t set;
1923 sigset_t host_set;
1924 int i;
1927 * Since we stacked the signal on a 64-bit boundary,
1928 * then 'sp' should be word aligned here. If it's
1929 * not, then the user is trying to mess with us.
1931 frame_addr = env->regs[13];
1932 trace_user_do_sigreturn(env, frame_addr);
1933 if (frame_addr & 7) {
1934 goto badframe;
1937 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
1938 goto badframe;
1941 __get_user(set.sig[0], &frame->sc.oldmask);
1942 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1943 __get_user(set.sig[i], &frame->extramask[i - 1]);
1946 target_to_host_sigset_internal(&host_set, &set);
1947 set_sigmask(&host_set);
1949 if (restore_sigcontext(env, &frame->sc)) {
1950 goto badframe;
1953 #if 0
1954 /* Send SIGTRAP if we're single-stepping */
1955 if (ptrace_cancel_bpt(current))
1956 send_sig(SIGTRAP, current, 1);
1957 #endif
1958 unlock_user_struct(frame, frame_addr, 0);
1959 return -TARGET_QEMU_ESIGRETURN;
1961 badframe:
1962 force_sig(TARGET_SIGSEGV /* , current */);
1963 return 0;
1966 static abi_ulong *restore_sigframe_v2_vfp(CPUARMState *env, abi_ulong *regspace)
1968 int i;
1969 abi_ulong magic, sz;
1970 uint32_t fpscr, fpexc;
1971 struct target_vfp_sigframe *vfpframe;
1972 vfpframe = (struct target_vfp_sigframe *)regspace;
1974 __get_user(magic, &vfpframe->magic);
1975 __get_user(sz, &vfpframe->size);
1976 if (magic != TARGET_VFP_MAGIC || sz != sizeof(*vfpframe)) {
1977 return 0;
1979 for (i = 0; i < 32; i++) {
1980 __get_user(float64_val(env->vfp.regs[i]), &vfpframe->ufp.fpregs[i]);
1982 __get_user(fpscr, &vfpframe->ufp.fpscr);
1983 vfp_set_fpscr(env, fpscr);
1984 __get_user(fpexc, &vfpframe->ufp_exc.fpexc);
1985 /* Sanitise FPEXC: ensure VFP is enabled, FPINST2 is invalid
1986 * and the exception flag is cleared
1988 fpexc |= (1 << 30);
1989 fpexc &= ~((1 << 31) | (1 << 28));
1990 env->vfp.xregs[ARM_VFP_FPEXC] = fpexc;
1991 __get_user(env->vfp.xregs[ARM_VFP_FPINST], &vfpframe->ufp_exc.fpinst);
1992 __get_user(env->vfp.xregs[ARM_VFP_FPINST2], &vfpframe->ufp_exc.fpinst2);
1993 return (abi_ulong*)(vfpframe + 1);
1996 static abi_ulong *restore_sigframe_v2_iwmmxt(CPUARMState *env,
1997 abi_ulong *regspace)
1999 int i;
2000 abi_ulong magic, sz;
2001 struct target_iwmmxt_sigframe *iwmmxtframe;
2002 iwmmxtframe = (struct target_iwmmxt_sigframe *)regspace;
2004 __get_user(magic, &iwmmxtframe->magic);
2005 __get_user(sz, &iwmmxtframe->size);
2006 if (magic != TARGET_IWMMXT_MAGIC || sz != sizeof(*iwmmxtframe)) {
2007 return 0;
2009 for (i = 0; i < 16; i++) {
2010 __get_user(env->iwmmxt.regs[i], &iwmmxtframe->regs[i]);
2012 __get_user(env->vfp.xregs[ARM_IWMMXT_wCSSF], &iwmmxtframe->wcssf);
2013 __get_user(env->vfp.xregs[ARM_IWMMXT_wCASF], &iwmmxtframe->wcssf);
2014 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR0], &iwmmxtframe->wcgr0);
2015 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR1], &iwmmxtframe->wcgr1);
2016 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR2], &iwmmxtframe->wcgr2);
2017 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR3], &iwmmxtframe->wcgr3);
2018 return (abi_ulong*)(iwmmxtframe + 1);
2021 static int do_sigframe_return_v2(CPUARMState *env, target_ulong frame_addr,
2022 struct target_ucontext_v2 *uc)
2024 sigset_t host_set;
2025 abi_ulong *regspace;
2027 target_to_host_sigset(&host_set, &uc->tuc_sigmask);
2028 set_sigmask(&host_set);
2030 if (restore_sigcontext(env, &uc->tuc_mcontext))
2031 return 1;
2033 /* Restore coprocessor signal frame */
2034 regspace = uc->tuc_regspace;
2035 if (arm_feature(env, ARM_FEATURE_VFP)) {
2036 regspace = restore_sigframe_v2_vfp(env, regspace);
2037 if (!regspace) {
2038 return 1;
2041 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
2042 regspace = restore_sigframe_v2_iwmmxt(env, regspace);
2043 if (!regspace) {
2044 return 1;
2048 if (do_sigaltstack(frame_addr + offsetof(struct target_ucontext_v2, tuc_stack), 0, get_sp_from_cpustate(env)) == -EFAULT)
2049 return 1;
2051 #if 0
2052 /* Send SIGTRAP if we're single-stepping */
2053 if (ptrace_cancel_bpt(current))
2054 send_sig(SIGTRAP, current, 1);
2055 #endif
2057 return 0;
2060 static long do_sigreturn_v2(CPUARMState *env)
2062 abi_ulong frame_addr;
2063 struct sigframe_v2 *frame = NULL;
2066 * Since we stacked the signal on a 64-bit boundary,
2067 * then 'sp' should be word aligned here. If it's
2068 * not, then the user is trying to mess with us.
2070 frame_addr = env->regs[13];
2071 trace_user_do_sigreturn(env, frame_addr);
2072 if (frame_addr & 7) {
2073 goto badframe;
2076 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
2077 goto badframe;
2080 if (do_sigframe_return_v2(env, frame_addr, &frame->uc)) {
2081 goto badframe;
2084 unlock_user_struct(frame, frame_addr, 0);
2085 return -TARGET_QEMU_ESIGRETURN;
2087 badframe:
2088 unlock_user_struct(frame, frame_addr, 0);
2089 force_sig(TARGET_SIGSEGV /* , current */);
2090 return 0;
2093 long do_sigreturn(CPUARMState *env)
2095 if (get_osversion() >= 0x020612) {
2096 return do_sigreturn_v2(env);
2097 } else {
2098 return do_sigreturn_v1(env);
2102 static long do_rt_sigreturn_v1(CPUARMState *env)
2104 abi_ulong frame_addr;
2105 struct rt_sigframe_v1 *frame = NULL;
2106 sigset_t host_set;
2109 * Since we stacked the signal on a 64-bit boundary,
2110 * then 'sp' should be word aligned here. If it's
2111 * not, then the user is trying to mess with us.
2113 frame_addr = env->regs[13];
2114 trace_user_do_rt_sigreturn(env, frame_addr);
2115 if (frame_addr & 7) {
2116 goto badframe;
2119 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
2120 goto badframe;
2123 target_to_host_sigset(&host_set, &frame->uc.tuc_sigmask);
2124 set_sigmask(&host_set);
2126 if (restore_sigcontext(env, &frame->uc.tuc_mcontext)) {
2127 goto badframe;
2130 if (do_sigaltstack(frame_addr + offsetof(struct rt_sigframe_v1, uc.tuc_stack), 0, get_sp_from_cpustate(env)) == -EFAULT)
2131 goto badframe;
2133 #if 0
2134 /* Send SIGTRAP if we're single-stepping */
2135 if (ptrace_cancel_bpt(current))
2136 send_sig(SIGTRAP, current, 1);
2137 #endif
2138 unlock_user_struct(frame, frame_addr, 0);
2139 return -TARGET_QEMU_ESIGRETURN;
2141 badframe:
2142 unlock_user_struct(frame, frame_addr, 0);
2143 force_sig(TARGET_SIGSEGV /* , current */);
2144 return 0;
2147 static long do_rt_sigreturn_v2(CPUARMState *env)
2149 abi_ulong frame_addr;
2150 struct rt_sigframe_v2 *frame = NULL;
2153 * Since we stacked the signal on a 64-bit boundary,
2154 * then 'sp' should be word aligned here. If it's
2155 * not, then the user is trying to mess with us.
2157 frame_addr = env->regs[13];
2158 trace_user_do_rt_sigreturn(env, frame_addr);
2159 if (frame_addr & 7) {
2160 goto badframe;
2163 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
2164 goto badframe;
2167 if (do_sigframe_return_v2(env, frame_addr, &frame->uc)) {
2168 goto badframe;
2171 unlock_user_struct(frame, frame_addr, 0);
2172 return -TARGET_QEMU_ESIGRETURN;
2174 badframe:
2175 unlock_user_struct(frame, frame_addr, 0);
2176 force_sig(TARGET_SIGSEGV /* , current */);
2177 return 0;
2180 long do_rt_sigreturn(CPUARMState *env)
2182 if (get_osversion() >= 0x020612) {
2183 return do_rt_sigreturn_v2(env);
2184 } else {
2185 return do_rt_sigreturn_v1(env);
2189 #elif defined(TARGET_SPARC)
2191 #define __SUNOS_MAXWIN 31
2193 /* This is what SunOS does, so shall I. */
2194 struct target_sigcontext {
2195 abi_ulong sigc_onstack; /* state to restore */
2197 abi_ulong sigc_mask; /* sigmask to restore */
2198 abi_ulong sigc_sp; /* stack pointer */
2199 abi_ulong sigc_pc; /* program counter */
2200 abi_ulong sigc_npc; /* next program counter */
2201 abi_ulong sigc_psr; /* for condition codes etc */
2202 abi_ulong sigc_g1; /* User uses these two registers */
2203 abi_ulong sigc_o0; /* within the trampoline code. */
2205 /* Now comes information regarding the users window set
2206 * at the time of the signal.
2208 abi_ulong sigc_oswins; /* outstanding windows */
2210 /* stack ptrs for each regwin buf */
2211 char *sigc_spbuf[__SUNOS_MAXWIN];
2213 /* Windows to restore after signal */
2214 struct {
2215 abi_ulong locals[8];
2216 abi_ulong ins[8];
2217 } sigc_wbuf[__SUNOS_MAXWIN];
2219 /* A Sparc stack frame */
2220 struct sparc_stackf {
2221 abi_ulong locals[8];
2222 abi_ulong ins[8];
2223 /* It's simpler to treat fp and callers_pc as elements of ins[]
2224 * since we never need to access them ourselves.
2226 char *structptr;
2227 abi_ulong xargs[6];
2228 abi_ulong xxargs[1];
2231 typedef struct {
2232 struct {
2233 abi_ulong psr;
2234 abi_ulong pc;
2235 abi_ulong npc;
2236 abi_ulong y;
2237 abi_ulong u_regs[16]; /* globals and ins */
2238 } si_regs;
2239 int si_mask;
2240 } __siginfo_t;
2242 typedef struct {
2243 abi_ulong si_float_regs[32];
2244 unsigned long si_fsr;
2245 unsigned long si_fpqdepth;
2246 struct {
2247 unsigned long *insn_addr;
2248 unsigned long insn;
2249 } si_fpqueue [16];
2250 } qemu_siginfo_fpu_t;
2253 struct target_signal_frame {
2254 struct sparc_stackf ss;
2255 __siginfo_t info;
2256 abi_ulong fpu_save;
2257 abi_ulong insns[2] __attribute__ ((aligned (8)));
2258 abi_ulong extramask[TARGET_NSIG_WORDS - 1];
2259 abi_ulong extra_size; /* Should be 0 */
2260 qemu_siginfo_fpu_t fpu_state;
2262 struct target_rt_signal_frame {
2263 struct sparc_stackf ss;
2264 siginfo_t info;
2265 abi_ulong regs[20];
2266 sigset_t mask;
2267 abi_ulong fpu_save;
2268 unsigned int insns[2];
2269 stack_t stack;
2270 unsigned int extra_size; /* Should be 0 */
2271 qemu_siginfo_fpu_t fpu_state;
2274 #define UREG_O0 16
2275 #define UREG_O6 22
2276 #define UREG_I0 0
2277 #define UREG_I1 1
2278 #define UREG_I2 2
2279 #define UREG_I3 3
2280 #define UREG_I4 4
2281 #define UREG_I5 5
2282 #define UREG_I6 6
2283 #define UREG_I7 7
2284 #define UREG_L0 8
2285 #define UREG_FP UREG_I6
2286 #define UREG_SP UREG_O6
2288 static inline abi_ulong get_sigframe(struct target_sigaction *sa,
2289 CPUSPARCState *env,
2290 unsigned long framesize)
2292 abi_ulong sp;
2294 sp = env->regwptr[UREG_FP];
2296 /* This is the X/Open sanctioned signal stack switching. */
2297 if (sa->sa_flags & TARGET_SA_ONSTACK) {
2298 if (!on_sig_stack(sp)
2299 && !((target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size) & 7)) {
2300 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
2303 return sp - framesize;
2306 static int
2307 setup___siginfo(__siginfo_t *si, CPUSPARCState *env, abi_ulong mask)
2309 int err = 0, i;
2311 __put_user(env->psr, &si->si_regs.psr);
2312 __put_user(env->pc, &si->si_regs.pc);
2313 __put_user(env->npc, &si->si_regs.npc);
2314 __put_user(env->y, &si->si_regs.y);
2315 for (i=0; i < 8; i++) {
2316 __put_user(env->gregs[i], &si->si_regs.u_regs[i]);
2318 for (i=0; i < 8; i++) {
2319 __put_user(env->regwptr[UREG_I0 + i], &si->si_regs.u_regs[i+8]);
2321 __put_user(mask, &si->si_mask);
2322 return err;
2325 #if 0
2326 static int
2327 setup_sigcontext(struct target_sigcontext *sc, /*struct _fpstate *fpstate,*/
2328 CPUSPARCState *env, unsigned long mask)
2330 int err = 0;
2332 __put_user(mask, &sc->sigc_mask);
2333 __put_user(env->regwptr[UREG_SP], &sc->sigc_sp);
2334 __put_user(env->pc, &sc->sigc_pc);
2335 __put_user(env->npc, &sc->sigc_npc);
2336 __put_user(env->psr, &sc->sigc_psr);
2337 __put_user(env->gregs[1], &sc->sigc_g1);
2338 __put_user(env->regwptr[UREG_O0], &sc->sigc_o0);
2340 return err;
2342 #endif
2343 #define NF_ALIGNEDSZ (((sizeof(struct target_signal_frame) + 7) & (~7)))
2345 static void setup_frame(int sig, struct target_sigaction *ka,
2346 target_sigset_t *set, CPUSPARCState *env)
2348 abi_ulong sf_addr;
2349 struct target_signal_frame *sf;
2350 int sigframe_size, err, i;
2352 /* 1. Make sure everything is clean */
2353 //synchronize_user_stack();
2355 sigframe_size = NF_ALIGNEDSZ;
2356 sf_addr = get_sigframe(ka, env, sigframe_size);
2357 trace_user_setup_frame(env, sf_addr);
2359 sf = lock_user(VERIFY_WRITE, sf_addr,
2360 sizeof(struct target_signal_frame), 0);
2361 if (!sf) {
2362 goto sigsegv;
2364 #if 0
2365 if (invalid_frame_pointer(sf, sigframe_size))
2366 goto sigill_and_return;
2367 #endif
2368 /* 2. Save the current process state */
2369 err = setup___siginfo(&sf->info, env, set->sig[0]);
2370 __put_user(0, &sf->extra_size);
2372 //save_fpu_state(regs, &sf->fpu_state);
2373 //__put_user(&sf->fpu_state, &sf->fpu_save);
2375 __put_user(set->sig[0], &sf->info.si_mask);
2376 for (i = 0; i < TARGET_NSIG_WORDS - 1; i++) {
2377 __put_user(set->sig[i + 1], &sf->extramask[i]);
2380 for (i = 0; i < 8; i++) {
2381 __put_user(env->regwptr[i + UREG_L0], &sf->ss.locals[i]);
2383 for (i = 0; i < 8; i++) {
2384 __put_user(env->regwptr[i + UREG_I0], &sf->ss.ins[i]);
2386 if (err)
2387 goto sigsegv;
2389 /* 3. signal handler back-trampoline and parameters */
2390 env->regwptr[UREG_FP] = sf_addr;
2391 env->regwptr[UREG_I0] = sig;
2392 env->regwptr[UREG_I1] = sf_addr +
2393 offsetof(struct target_signal_frame, info);
2394 env->regwptr[UREG_I2] = sf_addr +
2395 offsetof(struct target_signal_frame, info);
2397 /* 4. signal handler */
2398 env->pc = ka->_sa_handler;
2399 env->npc = (env->pc + 4);
2400 /* 5. return to kernel instructions */
2401 if (ka->sa_restorer) {
2402 env->regwptr[UREG_I7] = ka->sa_restorer;
2403 } else {
2404 uint32_t val32;
2406 env->regwptr[UREG_I7] = sf_addr +
2407 offsetof(struct target_signal_frame, insns) - 2 * 4;
2409 /* mov __NR_sigreturn, %g1 */
2410 val32 = 0x821020d8;
2411 __put_user(val32, &sf->insns[0]);
2413 /* t 0x10 */
2414 val32 = 0x91d02010;
2415 __put_user(val32, &sf->insns[1]);
2416 if (err)
2417 goto sigsegv;
2419 /* Flush instruction space. */
2420 // flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0]));
2421 // tb_flush(env);
2423 unlock_user(sf, sf_addr, sizeof(struct target_signal_frame));
2424 return;
2425 #if 0
2426 sigill_and_return:
2427 force_sig(TARGET_SIGILL);
2428 #endif
2429 sigsegv:
2430 unlock_user(sf, sf_addr, sizeof(struct target_signal_frame));
2431 force_sig(TARGET_SIGSEGV);
2434 static void setup_rt_frame(int sig, struct target_sigaction *ka,
2435 target_siginfo_t *info,
2436 target_sigset_t *set, CPUSPARCState *env)
2438 fprintf(stderr, "setup_rt_frame: not implemented\n");
2441 long do_sigreturn(CPUSPARCState *env)
2443 abi_ulong sf_addr;
2444 struct target_signal_frame *sf;
2445 uint32_t up_psr, pc, npc;
2446 target_sigset_t set;
2447 sigset_t host_set;
2448 int err=0, i;
2450 sf_addr = env->regwptr[UREG_FP];
2451 trace_user_do_sigreturn(env, sf_addr);
2452 if (!lock_user_struct(VERIFY_READ, sf, sf_addr, 1)) {
2453 goto segv_and_exit;
2456 /* 1. Make sure we are not getting garbage from the user */
2458 if (sf_addr & 3)
2459 goto segv_and_exit;
2461 __get_user(pc, &sf->info.si_regs.pc);
2462 __get_user(npc, &sf->info.si_regs.npc);
2464 if ((pc | npc) & 3) {
2465 goto segv_and_exit;
2468 /* 2. Restore the state */
2469 __get_user(up_psr, &sf->info.si_regs.psr);
2471 /* User can only change condition codes and FPU enabling in %psr. */
2472 env->psr = (up_psr & (PSR_ICC /* | PSR_EF */))
2473 | (env->psr & ~(PSR_ICC /* | PSR_EF */));
2475 env->pc = pc;
2476 env->npc = npc;
2477 __get_user(env->y, &sf->info.si_regs.y);
2478 for (i=0; i < 8; i++) {
2479 __get_user(env->gregs[i], &sf->info.si_regs.u_regs[i]);
2481 for (i=0; i < 8; i++) {
2482 __get_user(env->regwptr[i + UREG_I0], &sf->info.si_regs.u_regs[i+8]);
2485 /* FIXME: implement FPU save/restore:
2486 * __get_user(fpu_save, &sf->fpu_save);
2487 * if (fpu_save)
2488 * err |= restore_fpu_state(env, fpu_save);
2491 /* This is pretty much atomic, no amount locking would prevent
2492 * the races which exist anyways.
2494 __get_user(set.sig[0], &sf->info.si_mask);
2495 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
2496 __get_user(set.sig[i], &sf->extramask[i - 1]);
2499 target_to_host_sigset_internal(&host_set, &set);
2500 set_sigmask(&host_set);
2502 if (err) {
2503 goto segv_and_exit;
2505 unlock_user_struct(sf, sf_addr, 0);
2506 return -TARGET_QEMU_ESIGRETURN;
2508 segv_and_exit:
2509 unlock_user_struct(sf, sf_addr, 0);
2510 force_sig(TARGET_SIGSEGV);
2513 long do_rt_sigreturn(CPUSPARCState *env)
2515 trace_user_do_rt_sigreturn(env, 0);
2516 fprintf(stderr, "do_rt_sigreturn: not implemented\n");
2517 return -TARGET_ENOSYS;
2520 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
2521 #define MC_TSTATE 0
2522 #define MC_PC 1
2523 #define MC_NPC 2
2524 #define MC_Y 3
2525 #define MC_G1 4
2526 #define MC_G2 5
2527 #define MC_G3 6
2528 #define MC_G4 7
2529 #define MC_G5 8
2530 #define MC_G6 9
2531 #define MC_G7 10
2532 #define MC_O0 11
2533 #define MC_O1 12
2534 #define MC_O2 13
2535 #define MC_O3 14
2536 #define MC_O4 15
2537 #define MC_O5 16
2538 #define MC_O6 17
2539 #define MC_O7 18
2540 #define MC_NGREG 19
2542 typedef abi_ulong target_mc_greg_t;
2543 typedef target_mc_greg_t target_mc_gregset_t[MC_NGREG];
2545 struct target_mc_fq {
2546 abi_ulong *mcfq_addr;
2547 uint32_t mcfq_insn;
2550 struct target_mc_fpu {
2551 union {
2552 uint32_t sregs[32];
2553 uint64_t dregs[32];
2554 //uint128_t qregs[16];
2555 } mcfpu_fregs;
2556 abi_ulong mcfpu_fsr;
2557 abi_ulong mcfpu_fprs;
2558 abi_ulong mcfpu_gsr;
2559 struct target_mc_fq *mcfpu_fq;
2560 unsigned char mcfpu_qcnt;
2561 unsigned char mcfpu_qentsz;
2562 unsigned char mcfpu_enab;
2564 typedef struct target_mc_fpu target_mc_fpu_t;
2566 typedef struct {
2567 target_mc_gregset_t mc_gregs;
2568 target_mc_greg_t mc_fp;
2569 target_mc_greg_t mc_i7;
2570 target_mc_fpu_t mc_fpregs;
2571 } target_mcontext_t;
2573 struct target_ucontext {
2574 struct target_ucontext *tuc_link;
2575 abi_ulong tuc_flags;
2576 target_sigset_t tuc_sigmask;
2577 target_mcontext_t tuc_mcontext;
2580 /* A V9 register window */
2581 struct target_reg_window {
2582 abi_ulong locals[8];
2583 abi_ulong ins[8];
2586 #define TARGET_STACK_BIAS 2047
2588 /* {set, get}context() needed for 64-bit SparcLinux userland. */
2589 void sparc64_set_context(CPUSPARCState *env)
2591 abi_ulong ucp_addr;
2592 struct target_ucontext *ucp;
2593 target_mc_gregset_t *grp;
2594 abi_ulong pc, npc, tstate;
2595 abi_ulong fp, i7, w_addr;
2596 unsigned int i;
2598 ucp_addr = env->regwptr[UREG_I0];
2599 if (!lock_user_struct(VERIFY_READ, ucp, ucp_addr, 1)) {
2600 goto do_sigsegv;
2602 grp = &ucp->tuc_mcontext.mc_gregs;
2603 __get_user(pc, &((*grp)[MC_PC]));
2604 __get_user(npc, &((*grp)[MC_NPC]));
2605 if ((pc | npc) & 3) {
2606 goto do_sigsegv;
2608 if (env->regwptr[UREG_I1]) {
2609 target_sigset_t target_set;
2610 sigset_t set;
2612 if (TARGET_NSIG_WORDS == 1) {
2613 __get_user(target_set.sig[0], &ucp->tuc_sigmask.sig[0]);
2614 } else {
2615 abi_ulong *src, *dst;
2616 src = ucp->tuc_sigmask.sig;
2617 dst = target_set.sig;
2618 for (i = 0; i < TARGET_NSIG_WORDS; i++, dst++, src++) {
2619 __get_user(*dst, src);
2622 target_to_host_sigset_internal(&set, &target_set);
2623 set_sigmask(&set);
2625 env->pc = pc;
2626 env->npc = npc;
2627 __get_user(env->y, &((*grp)[MC_Y]));
2628 __get_user(tstate, &((*grp)[MC_TSTATE]));
2629 env->asi = (tstate >> 24) & 0xff;
2630 cpu_put_ccr(env, tstate >> 32);
2631 cpu_put_cwp64(env, tstate & 0x1f);
2632 __get_user(env->gregs[1], (&(*grp)[MC_G1]));
2633 __get_user(env->gregs[2], (&(*grp)[MC_G2]));
2634 __get_user(env->gregs[3], (&(*grp)[MC_G3]));
2635 __get_user(env->gregs[4], (&(*grp)[MC_G4]));
2636 __get_user(env->gregs[5], (&(*grp)[MC_G5]));
2637 __get_user(env->gregs[6], (&(*grp)[MC_G6]));
2638 __get_user(env->gregs[7], (&(*grp)[MC_G7]));
2639 __get_user(env->regwptr[UREG_I0], (&(*grp)[MC_O0]));
2640 __get_user(env->regwptr[UREG_I1], (&(*grp)[MC_O1]));
2641 __get_user(env->regwptr[UREG_I2], (&(*grp)[MC_O2]));
2642 __get_user(env->regwptr[UREG_I3], (&(*grp)[MC_O3]));
2643 __get_user(env->regwptr[UREG_I4], (&(*grp)[MC_O4]));
2644 __get_user(env->regwptr[UREG_I5], (&(*grp)[MC_O5]));
2645 __get_user(env->regwptr[UREG_I6], (&(*grp)[MC_O6]));
2646 __get_user(env->regwptr[UREG_I7], (&(*grp)[MC_O7]));
2648 __get_user(fp, &(ucp->tuc_mcontext.mc_fp));
2649 __get_user(i7, &(ucp->tuc_mcontext.mc_i7));
2651 w_addr = TARGET_STACK_BIAS+env->regwptr[UREG_I6];
2652 if (put_user(fp, w_addr + offsetof(struct target_reg_window, ins[6]),
2653 abi_ulong) != 0) {
2654 goto do_sigsegv;
2656 if (put_user(i7, w_addr + offsetof(struct target_reg_window, ins[7]),
2657 abi_ulong) != 0) {
2658 goto do_sigsegv;
2660 /* FIXME this does not match how the kernel handles the FPU in
2661 * its sparc64_set_context implementation. In particular the FPU
2662 * is only restored if fenab is non-zero in:
2663 * __get_user(fenab, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_enab));
2665 __get_user(env->fprs, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fprs));
2667 uint32_t *src = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs;
2668 for (i = 0; i < 64; i++, src++) {
2669 if (i & 1) {
2670 __get_user(env->fpr[i/2].l.lower, src);
2671 } else {
2672 __get_user(env->fpr[i/2].l.upper, src);
2676 __get_user(env->fsr,
2677 &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fsr));
2678 __get_user(env->gsr,
2679 &(ucp->tuc_mcontext.mc_fpregs.mcfpu_gsr));
2680 unlock_user_struct(ucp, ucp_addr, 0);
2681 return;
2682 do_sigsegv:
2683 unlock_user_struct(ucp, ucp_addr, 0);
2684 force_sig(TARGET_SIGSEGV);
2687 void sparc64_get_context(CPUSPARCState *env)
2689 abi_ulong ucp_addr;
2690 struct target_ucontext *ucp;
2691 target_mc_gregset_t *grp;
2692 target_mcontext_t *mcp;
2693 abi_ulong fp, i7, w_addr;
2694 int err;
2695 unsigned int i;
2696 target_sigset_t target_set;
2697 sigset_t set;
2699 ucp_addr = env->regwptr[UREG_I0];
2700 if (!lock_user_struct(VERIFY_WRITE, ucp, ucp_addr, 0)) {
2701 goto do_sigsegv;
2704 mcp = &ucp->tuc_mcontext;
2705 grp = &mcp->mc_gregs;
2707 /* Skip over the trap instruction, first. */
2708 env->pc = env->npc;
2709 env->npc += 4;
2711 /* If we're only reading the signal mask then do_sigprocmask()
2712 * is guaranteed not to fail, which is important because we don't
2713 * have any way to signal a failure or restart this operation since
2714 * this is not a normal syscall.
2716 err = do_sigprocmask(0, NULL, &set);
2717 assert(err == 0);
2718 host_to_target_sigset_internal(&target_set, &set);
2719 if (TARGET_NSIG_WORDS == 1) {
2720 __put_user(target_set.sig[0],
2721 (abi_ulong *)&ucp->tuc_sigmask);
2722 } else {
2723 abi_ulong *src, *dst;
2724 src = target_set.sig;
2725 dst = ucp->tuc_sigmask.sig;
2726 for (i = 0; i < TARGET_NSIG_WORDS; i++, dst++, src++) {
2727 __put_user(*src, dst);
2729 if (err)
2730 goto do_sigsegv;
2733 /* XXX: tstate must be saved properly */
2734 // __put_user(env->tstate, &((*grp)[MC_TSTATE]));
2735 __put_user(env->pc, &((*grp)[MC_PC]));
2736 __put_user(env->npc, &((*grp)[MC_NPC]));
2737 __put_user(env->y, &((*grp)[MC_Y]));
2738 __put_user(env->gregs[1], &((*grp)[MC_G1]));
2739 __put_user(env->gregs[2], &((*grp)[MC_G2]));
2740 __put_user(env->gregs[3], &((*grp)[MC_G3]));
2741 __put_user(env->gregs[4], &((*grp)[MC_G4]));
2742 __put_user(env->gregs[5], &((*grp)[MC_G5]));
2743 __put_user(env->gregs[6], &((*grp)[MC_G6]));
2744 __put_user(env->gregs[7], &((*grp)[MC_G7]));
2745 __put_user(env->regwptr[UREG_I0], &((*grp)[MC_O0]));
2746 __put_user(env->regwptr[UREG_I1], &((*grp)[MC_O1]));
2747 __put_user(env->regwptr[UREG_I2], &((*grp)[MC_O2]));
2748 __put_user(env->regwptr[UREG_I3], &((*grp)[MC_O3]));
2749 __put_user(env->regwptr[UREG_I4], &((*grp)[MC_O4]));
2750 __put_user(env->regwptr[UREG_I5], &((*grp)[MC_O5]));
2751 __put_user(env->regwptr[UREG_I6], &((*grp)[MC_O6]));
2752 __put_user(env->regwptr[UREG_I7], &((*grp)[MC_O7]));
2754 w_addr = TARGET_STACK_BIAS+env->regwptr[UREG_I6];
2755 fp = i7 = 0;
2756 if (get_user(fp, w_addr + offsetof(struct target_reg_window, ins[6]),
2757 abi_ulong) != 0) {
2758 goto do_sigsegv;
2760 if (get_user(i7, w_addr + offsetof(struct target_reg_window, ins[7]),
2761 abi_ulong) != 0) {
2762 goto do_sigsegv;
2764 __put_user(fp, &(mcp->mc_fp));
2765 __put_user(i7, &(mcp->mc_i7));
2768 uint32_t *dst = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs;
2769 for (i = 0; i < 64; i++, dst++) {
2770 if (i & 1) {
2771 __put_user(env->fpr[i/2].l.lower, dst);
2772 } else {
2773 __put_user(env->fpr[i/2].l.upper, dst);
2777 __put_user(env->fsr, &(mcp->mc_fpregs.mcfpu_fsr));
2778 __put_user(env->gsr, &(mcp->mc_fpregs.mcfpu_gsr));
2779 __put_user(env->fprs, &(mcp->mc_fpregs.mcfpu_fprs));
2781 if (err)
2782 goto do_sigsegv;
2783 unlock_user_struct(ucp, ucp_addr, 1);
2784 return;
2785 do_sigsegv:
2786 unlock_user_struct(ucp, ucp_addr, 1);
2787 force_sig(TARGET_SIGSEGV);
2789 #endif
2790 #elif defined(TARGET_MIPS) || defined(TARGET_MIPS64)
2792 # if defined(TARGET_ABI_MIPSO32)
2793 struct target_sigcontext {
2794 uint32_t sc_regmask; /* Unused */
2795 uint32_t sc_status;
2796 uint64_t sc_pc;
2797 uint64_t sc_regs[32];
2798 uint64_t sc_fpregs[32];
2799 uint32_t sc_ownedfp; /* Unused */
2800 uint32_t sc_fpc_csr;
2801 uint32_t sc_fpc_eir; /* Unused */
2802 uint32_t sc_used_math;
2803 uint32_t sc_dsp; /* dsp status, was sc_ssflags */
2804 uint32_t pad0;
2805 uint64_t sc_mdhi;
2806 uint64_t sc_mdlo;
2807 target_ulong sc_hi1; /* Was sc_cause */
2808 target_ulong sc_lo1; /* Was sc_badvaddr */
2809 target_ulong sc_hi2; /* Was sc_sigset[4] */
2810 target_ulong sc_lo2;
2811 target_ulong sc_hi3;
2812 target_ulong sc_lo3;
2814 # else /* N32 || N64 */
2815 struct target_sigcontext {
2816 uint64_t sc_regs[32];
2817 uint64_t sc_fpregs[32];
2818 uint64_t sc_mdhi;
2819 uint64_t sc_hi1;
2820 uint64_t sc_hi2;
2821 uint64_t sc_hi3;
2822 uint64_t sc_mdlo;
2823 uint64_t sc_lo1;
2824 uint64_t sc_lo2;
2825 uint64_t sc_lo3;
2826 uint64_t sc_pc;
2827 uint32_t sc_fpc_csr;
2828 uint32_t sc_used_math;
2829 uint32_t sc_dsp;
2830 uint32_t sc_reserved;
2832 # endif /* O32 */
2834 struct sigframe {
2835 uint32_t sf_ass[4]; /* argument save space for o32 */
2836 uint32_t sf_code[2]; /* signal trampoline */
2837 struct target_sigcontext sf_sc;
2838 target_sigset_t sf_mask;
2841 struct target_ucontext {
2842 target_ulong tuc_flags;
2843 target_ulong tuc_link;
2844 target_stack_t tuc_stack;
2845 target_ulong pad0;
2846 struct target_sigcontext tuc_mcontext;
2847 target_sigset_t tuc_sigmask;
2850 struct target_rt_sigframe {
2851 uint32_t rs_ass[4]; /* argument save space for o32 */
2852 uint32_t rs_code[2]; /* signal trampoline */
2853 struct target_siginfo rs_info;
2854 struct target_ucontext rs_uc;
2857 /* Install trampoline to jump back from signal handler */
2858 static inline int install_sigtramp(unsigned int *tramp, unsigned int syscall)
2860 int err = 0;
2863 * Set up the return code ...
2865 * li v0, __NR__foo_sigreturn
2866 * syscall
2869 __put_user(0x24020000 + syscall, tramp + 0);
2870 __put_user(0x0000000c , tramp + 1);
2871 return err;
2874 static inline void setup_sigcontext(CPUMIPSState *regs,
2875 struct target_sigcontext *sc)
2877 int i;
2879 __put_user(exception_resume_pc(regs), &sc->sc_pc);
2880 regs->hflags &= ~MIPS_HFLAG_BMASK;
2882 __put_user(0, &sc->sc_regs[0]);
2883 for (i = 1; i < 32; ++i) {
2884 __put_user(regs->active_tc.gpr[i], &sc->sc_regs[i]);
2887 __put_user(regs->active_tc.HI[0], &sc->sc_mdhi);
2888 __put_user(regs->active_tc.LO[0], &sc->sc_mdlo);
2890 /* Rather than checking for dsp existence, always copy. The storage
2891 would just be garbage otherwise. */
2892 __put_user(regs->active_tc.HI[1], &sc->sc_hi1);
2893 __put_user(regs->active_tc.HI[2], &sc->sc_hi2);
2894 __put_user(regs->active_tc.HI[3], &sc->sc_hi3);
2895 __put_user(regs->active_tc.LO[1], &sc->sc_lo1);
2896 __put_user(regs->active_tc.LO[2], &sc->sc_lo2);
2897 __put_user(regs->active_tc.LO[3], &sc->sc_lo3);
2899 uint32_t dsp = cpu_rddsp(0x3ff, regs);
2900 __put_user(dsp, &sc->sc_dsp);
2903 __put_user(1, &sc->sc_used_math);
2905 for (i = 0; i < 32; ++i) {
2906 __put_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i]);
2910 static inline void
2911 restore_sigcontext(CPUMIPSState *regs, struct target_sigcontext *sc)
2913 int i;
2915 __get_user(regs->CP0_EPC, &sc->sc_pc);
2917 __get_user(regs->active_tc.HI[0], &sc->sc_mdhi);
2918 __get_user(regs->active_tc.LO[0], &sc->sc_mdlo);
2920 for (i = 1; i < 32; ++i) {
2921 __get_user(regs->active_tc.gpr[i], &sc->sc_regs[i]);
2924 __get_user(regs->active_tc.HI[1], &sc->sc_hi1);
2925 __get_user(regs->active_tc.HI[2], &sc->sc_hi2);
2926 __get_user(regs->active_tc.HI[3], &sc->sc_hi3);
2927 __get_user(regs->active_tc.LO[1], &sc->sc_lo1);
2928 __get_user(regs->active_tc.LO[2], &sc->sc_lo2);
2929 __get_user(regs->active_tc.LO[3], &sc->sc_lo3);
2931 uint32_t dsp;
2932 __get_user(dsp, &sc->sc_dsp);
2933 cpu_wrdsp(dsp, 0x3ff, regs);
2936 for (i = 0; i < 32; ++i) {
2937 __get_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i]);
2942 * Determine which stack to use..
2944 static inline abi_ulong
2945 get_sigframe(struct target_sigaction *ka, CPUMIPSState *regs, size_t frame_size)
2947 unsigned long sp;
2949 /* Default to using normal stack */
2950 sp = regs->active_tc.gpr[29];
2953 * FPU emulator may have its own trampoline active just
2954 * above the user stack, 16-bytes before the next lowest
2955 * 16 byte boundary. Try to avoid trashing it.
2957 sp -= 32;
2959 /* This is the X/Open sanctioned signal stack switching. */
2960 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags (sp) == 0)) {
2961 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
2964 return (sp - frame_size) & ~7;
2967 static void mips_set_hflags_isa_mode_from_pc(CPUMIPSState *env)
2969 if (env->insn_flags & (ASE_MIPS16 | ASE_MICROMIPS)) {
2970 env->hflags &= ~MIPS_HFLAG_M16;
2971 env->hflags |= (env->active_tc.PC & 1) << MIPS_HFLAG_M16_SHIFT;
2972 env->active_tc.PC &= ~(target_ulong) 1;
2976 # if defined(TARGET_ABI_MIPSO32)
2977 /* compare linux/arch/mips/kernel/signal.c:setup_frame() */
2978 static void setup_frame(int sig, struct target_sigaction * ka,
2979 target_sigset_t *set, CPUMIPSState *regs)
2981 struct sigframe *frame;
2982 abi_ulong frame_addr;
2983 int i;
2985 frame_addr = get_sigframe(ka, regs, sizeof(*frame));
2986 trace_user_setup_frame(regs, frame_addr);
2987 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
2988 goto give_sigsegv;
2991 install_sigtramp(frame->sf_code, TARGET_NR_sigreturn);
2993 setup_sigcontext(regs, &frame->sf_sc);
2995 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
2996 __put_user(set->sig[i], &frame->sf_mask.sig[i]);
3000 * Arguments to signal handler:
3002 * a0 = signal number
3003 * a1 = 0 (should be cause)
3004 * a2 = pointer to struct sigcontext
3006 * $25 and PC point to the signal handler, $29 points to the
3007 * struct sigframe.
3009 regs->active_tc.gpr[ 4] = sig;
3010 regs->active_tc.gpr[ 5] = 0;
3011 regs->active_tc.gpr[ 6] = frame_addr + offsetof(struct sigframe, sf_sc);
3012 regs->active_tc.gpr[29] = frame_addr;
3013 regs->active_tc.gpr[31] = frame_addr + offsetof(struct sigframe, sf_code);
3014 /* The original kernel code sets CP0_EPC to the handler
3015 * since it returns to userland using eret
3016 * we cannot do this here, and we must set PC directly */
3017 regs->active_tc.PC = regs->active_tc.gpr[25] = ka->_sa_handler;
3018 mips_set_hflags_isa_mode_from_pc(regs);
3019 unlock_user_struct(frame, frame_addr, 1);
3020 return;
3022 give_sigsegv:
3023 force_sig(TARGET_SIGSEGV/*, current*/);
3026 long do_sigreturn(CPUMIPSState *regs)
3028 struct sigframe *frame;
3029 abi_ulong frame_addr;
3030 sigset_t blocked;
3031 target_sigset_t target_set;
3032 int i;
3034 frame_addr = regs->active_tc.gpr[29];
3035 trace_user_do_sigreturn(regs, frame_addr);
3036 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
3037 goto badframe;
3039 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
3040 __get_user(target_set.sig[i], &frame->sf_mask.sig[i]);
3043 target_to_host_sigset_internal(&blocked, &target_set);
3044 set_sigmask(&blocked);
3046 restore_sigcontext(regs, &frame->sf_sc);
3048 #if 0
3050 * Don't let your children do this ...
3052 __asm__ __volatile__(
3053 "move\t$29, %0\n\t"
3054 "j\tsyscall_exit"
3055 :/* no outputs */
3056 :"r" (&regs));
3057 /* Unreached */
3058 #endif
3060 regs->active_tc.PC = regs->CP0_EPC;
3061 mips_set_hflags_isa_mode_from_pc(regs);
3062 /* I am not sure this is right, but it seems to work
3063 * maybe a problem with nested signals ? */
3064 regs->CP0_EPC = 0;
3065 return -TARGET_QEMU_ESIGRETURN;
3067 badframe:
3068 force_sig(TARGET_SIGSEGV/*, current*/);
3069 return 0;
3071 # endif /* O32 */
3073 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3074 target_siginfo_t *info,
3075 target_sigset_t *set, CPUMIPSState *env)
3077 struct target_rt_sigframe *frame;
3078 abi_ulong frame_addr;
3079 int i;
3081 frame_addr = get_sigframe(ka, env, sizeof(*frame));
3082 trace_user_setup_rt_frame(env, frame_addr);
3083 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3084 goto give_sigsegv;
3087 install_sigtramp(frame->rs_code, TARGET_NR_rt_sigreturn);
3089 tswap_siginfo(&frame->rs_info, info);
3091 __put_user(0, &frame->rs_uc.tuc_flags);
3092 __put_user(0, &frame->rs_uc.tuc_link);
3093 __put_user(target_sigaltstack_used.ss_sp, &frame->rs_uc.tuc_stack.ss_sp);
3094 __put_user(target_sigaltstack_used.ss_size, &frame->rs_uc.tuc_stack.ss_size);
3095 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
3096 &frame->rs_uc.tuc_stack.ss_flags);
3098 setup_sigcontext(env, &frame->rs_uc.tuc_mcontext);
3100 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
3101 __put_user(set->sig[i], &frame->rs_uc.tuc_sigmask.sig[i]);
3105 * Arguments to signal handler:
3107 * a0 = signal number
3108 * a1 = pointer to siginfo_t
3109 * a2 = pointer to struct ucontext
3111 * $25 and PC point to the signal handler, $29 points to the
3112 * struct sigframe.
3114 env->active_tc.gpr[ 4] = sig;
3115 env->active_tc.gpr[ 5] = frame_addr
3116 + offsetof(struct target_rt_sigframe, rs_info);
3117 env->active_tc.gpr[ 6] = frame_addr
3118 + offsetof(struct target_rt_sigframe, rs_uc);
3119 env->active_tc.gpr[29] = frame_addr;
3120 env->active_tc.gpr[31] = frame_addr
3121 + offsetof(struct target_rt_sigframe, rs_code);
3122 /* The original kernel code sets CP0_EPC to the handler
3123 * since it returns to userland using eret
3124 * we cannot do this here, and we must set PC directly */
3125 env->active_tc.PC = env->active_tc.gpr[25] = ka->_sa_handler;
3126 mips_set_hflags_isa_mode_from_pc(env);
3127 unlock_user_struct(frame, frame_addr, 1);
3128 return;
3130 give_sigsegv:
3131 unlock_user_struct(frame, frame_addr, 1);
3132 force_sig(TARGET_SIGSEGV/*, current*/);
3135 long do_rt_sigreturn(CPUMIPSState *env)
3137 struct target_rt_sigframe *frame;
3138 abi_ulong frame_addr;
3139 sigset_t blocked;
3141 frame_addr = env->active_tc.gpr[29];
3142 trace_user_do_rt_sigreturn(env, frame_addr);
3143 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
3144 goto badframe;
3147 target_to_host_sigset(&blocked, &frame->rs_uc.tuc_sigmask);
3148 set_sigmask(&blocked);
3150 restore_sigcontext(env, &frame->rs_uc.tuc_mcontext);
3152 if (do_sigaltstack(frame_addr +
3153 offsetof(struct target_rt_sigframe, rs_uc.tuc_stack),
3154 0, get_sp_from_cpustate(env)) == -EFAULT)
3155 goto badframe;
3157 env->active_tc.PC = env->CP0_EPC;
3158 mips_set_hflags_isa_mode_from_pc(env);
3159 /* I am not sure this is right, but it seems to work
3160 * maybe a problem with nested signals ? */
3161 env->CP0_EPC = 0;
3162 return -TARGET_QEMU_ESIGRETURN;
3164 badframe:
3165 force_sig(TARGET_SIGSEGV/*, current*/);
3166 return 0;
3169 #elif defined(TARGET_SH4)
3172 * code and data structures from linux kernel:
3173 * include/asm-sh/sigcontext.h
3174 * arch/sh/kernel/signal.c
3177 struct target_sigcontext {
3178 target_ulong oldmask;
3180 /* CPU registers */
3181 target_ulong sc_gregs[16];
3182 target_ulong sc_pc;
3183 target_ulong sc_pr;
3184 target_ulong sc_sr;
3185 target_ulong sc_gbr;
3186 target_ulong sc_mach;
3187 target_ulong sc_macl;
3189 /* FPU registers */
3190 target_ulong sc_fpregs[16];
3191 target_ulong sc_xfpregs[16];
3192 unsigned int sc_fpscr;
3193 unsigned int sc_fpul;
3194 unsigned int sc_ownedfp;
3197 struct target_sigframe
3199 struct target_sigcontext sc;
3200 target_ulong extramask[TARGET_NSIG_WORDS-1];
3201 uint16_t retcode[3];
3205 struct target_ucontext {
3206 target_ulong tuc_flags;
3207 struct target_ucontext *tuc_link;
3208 target_stack_t tuc_stack;
3209 struct target_sigcontext tuc_mcontext;
3210 target_sigset_t tuc_sigmask; /* mask last for extensibility */
3213 struct target_rt_sigframe
3215 struct target_siginfo info;
3216 struct target_ucontext uc;
3217 uint16_t retcode[3];
3221 #define MOVW(n) (0x9300|((n)-2)) /* Move mem word at PC+n to R3 */
3222 #define TRAP_NOARG 0xc310 /* Syscall w/no args (NR in R3) SH3/4 */
3224 static abi_ulong get_sigframe(struct target_sigaction *ka,
3225 unsigned long sp, size_t frame_size)
3227 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags(sp) == 0)) {
3228 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
3231 return (sp - frame_size) & -8ul;
3234 static void setup_sigcontext(struct target_sigcontext *sc,
3235 CPUSH4State *regs, unsigned long mask)
3237 int i;
3239 #define COPY(x) __put_user(regs->x, &sc->sc_##x)
3240 COPY(gregs[0]); COPY(gregs[1]);
3241 COPY(gregs[2]); COPY(gregs[3]);
3242 COPY(gregs[4]); COPY(gregs[5]);
3243 COPY(gregs[6]); COPY(gregs[7]);
3244 COPY(gregs[8]); COPY(gregs[9]);
3245 COPY(gregs[10]); COPY(gregs[11]);
3246 COPY(gregs[12]); COPY(gregs[13]);
3247 COPY(gregs[14]); COPY(gregs[15]);
3248 COPY(gbr); COPY(mach);
3249 COPY(macl); COPY(pr);
3250 COPY(sr); COPY(pc);
3251 #undef COPY
3253 for (i=0; i<16; i++) {
3254 __put_user(regs->fregs[i], &sc->sc_fpregs[i]);
3256 __put_user(regs->fpscr, &sc->sc_fpscr);
3257 __put_user(regs->fpul, &sc->sc_fpul);
3259 /* non-iBCS2 extensions.. */
3260 __put_user(mask, &sc->oldmask);
3263 static void restore_sigcontext(CPUSH4State *regs, struct target_sigcontext *sc)
3265 int i;
3267 #define COPY(x) __get_user(regs->x, &sc->sc_##x)
3268 COPY(gregs[0]); COPY(gregs[1]);
3269 COPY(gregs[2]); COPY(gregs[3]);
3270 COPY(gregs[4]); COPY(gregs[5]);
3271 COPY(gregs[6]); COPY(gregs[7]);
3272 COPY(gregs[8]); COPY(gregs[9]);
3273 COPY(gregs[10]); COPY(gregs[11]);
3274 COPY(gregs[12]); COPY(gregs[13]);
3275 COPY(gregs[14]); COPY(gregs[15]);
3276 COPY(gbr); COPY(mach);
3277 COPY(macl); COPY(pr);
3278 COPY(sr); COPY(pc);
3279 #undef COPY
3281 for (i=0; i<16; i++) {
3282 __get_user(regs->fregs[i], &sc->sc_fpregs[i]);
3284 __get_user(regs->fpscr, &sc->sc_fpscr);
3285 __get_user(regs->fpul, &sc->sc_fpul);
3287 regs->tra = -1; /* disable syscall checks */
3290 static void setup_frame(int sig, struct target_sigaction *ka,
3291 target_sigset_t *set, CPUSH4State *regs)
3293 struct target_sigframe *frame;
3294 abi_ulong frame_addr;
3295 int i;
3297 frame_addr = get_sigframe(ka, regs->gregs[15], sizeof(*frame));
3298 trace_user_setup_frame(regs, frame_addr);
3299 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3300 goto give_sigsegv;
3303 setup_sigcontext(&frame->sc, regs, set->sig[0]);
3305 for (i = 0; i < TARGET_NSIG_WORDS - 1; i++) {
3306 __put_user(set->sig[i + 1], &frame->extramask[i]);
3309 /* Set up to return from userspace. If provided, use a stub
3310 already in userspace. */
3311 if (ka->sa_flags & TARGET_SA_RESTORER) {
3312 regs->pr = (unsigned long) ka->sa_restorer;
3313 } else {
3314 /* Generate return code (system call to sigreturn) */
3315 abi_ulong retcode_addr = frame_addr +
3316 offsetof(struct target_sigframe, retcode);
3317 __put_user(MOVW(2), &frame->retcode[0]);
3318 __put_user(TRAP_NOARG, &frame->retcode[1]);
3319 __put_user((TARGET_NR_sigreturn), &frame->retcode[2]);
3320 regs->pr = (unsigned long) retcode_addr;
3323 /* Set up registers for signal handler */
3324 regs->gregs[15] = frame_addr;
3325 regs->gregs[4] = sig; /* Arg for signal handler */
3326 regs->gregs[5] = 0;
3327 regs->gregs[6] = frame_addr += offsetof(typeof(*frame), sc);
3328 regs->pc = (unsigned long) ka->_sa_handler;
3330 unlock_user_struct(frame, frame_addr, 1);
3331 return;
3333 give_sigsegv:
3334 unlock_user_struct(frame, frame_addr, 1);
3335 force_sig(TARGET_SIGSEGV);
3338 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3339 target_siginfo_t *info,
3340 target_sigset_t *set, CPUSH4State *regs)
3342 struct target_rt_sigframe *frame;
3343 abi_ulong frame_addr;
3344 int i;
3346 frame_addr = get_sigframe(ka, regs->gregs[15], sizeof(*frame));
3347 trace_user_setup_rt_frame(regs, frame_addr);
3348 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3349 goto give_sigsegv;
3352 tswap_siginfo(&frame->info, info);
3354 /* Create the ucontext. */
3355 __put_user(0, &frame->uc.tuc_flags);
3356 __put_user(0, (unsigned long *)&frame->uc.tuc_link);
3357 __put_user((unsigned long)target_sigaltstack_used.ss_sp,
3358 &frame->uc.tuc_stack.ss_sp);
3359 __put_user(sas_ss_flags(regs->gregs[15]),
3360 &frame->uc.tuc_stack.ss_flags);
3361 __put_user(target_sigaltstack_used.ss_size,
3362 &frame->uc.tuc_stack.ss_size);
3363 setup_sigcontext(&frame->uc.tuc_mcontext,
3364 regs, set->sig[0]);
3365 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
3366 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
3369 /* Set up to return from userspace. If provided, use a stub
3370 already in userspace. */
3371 if (ka->sa_flags & TARGET_SA_RESTORER) {
3372 regs->pr = (unsigned long) ka->sa_restorer;
3373 } else {
3374 /* Generate return code (system call to sigreturn) */
3375 abi_ulong retcode_addr = frame_addr +
3376 offsetof(struct target_rt_sigframe, retcode);
3377 __put_user(MOVW(2), &frame->retcode[0]);
3378 __put_user(TRAP_NOARG, &frame->retcode[1]);
3379 __put_user((TARGET_NR_rt_sigreturn), &frame->retcode[2]);
3380 regs->pr = (unsigned long) retcode_addr;
3383 /* Set up registers for signal handler */
3384 regs->gregs[15] = frame_addr;
3385 regs->gregs[4] = sig; /* Arg for signal handler */
3386 regs->gregs[5] = frame_addr + offsetof(typeof(*frame), info);
3387 regs->gregs[6] = frame_addr + offsetof(typeof(*frame), uc);
3388 regs->pc = (unsigned long) ka->_sa_handler;
3390 unlock_user_struct(frame, frame_addr, 1);
3391 return;
3393 give_sigsegv:
3394 unlock_user_struct(frame, frame_addr, 1);
3395 force_sig(TARGET_SIGSEGV);
3398 long do_sigreturn(CPUSH4State *regs)
3400 struct target_sigframe *frame;
3401 abi_ulong frame_addr;
3402 sigset_t blocked;
3403 target_sigset_t target_set;
3404 int i;
3405 int err = 0;
3407 frame_addr = regs->gregs[15];
3408 trace_user_do_sigreturn(regs, frame_addr);
3409 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
3410 goto badframe;
3413 __get_user(target_set.sig[0], &frame->sc.oldmask);
3414 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
3415 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
3418 if (err)
3419 goto badframe;
3421 target_to_host_sigset_internal(&blocked, &target_set);
3422 set_sigmask(&blocked);
3424 restore_sigcontext(regs, &frame->sc);
3426 unlock_user_struct(frame, frame_addr, 0);
3427 return -TARGET_QEMU_ESIGRETURN;
3429 badframe:
3430 unlock_user_struct(frame, frame_addr, 0);
3431 force_sig(TARGET_SIGSEGV);
3432 return 0;
3435 long do_rt_sigreturn(CPUSH4State *regs)
3437 struct target_rt_sigframe *frame;
3438 abi_ulong frame_addr;
3439 sigset_t blocked;
3441 frame_addr = regs->gregs[15];
3442 trace_user_do_rt_sigreturn(regs, frame_addr);
3443 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
3444 goto badframe;
3447 target_to_host_sigset(&blocked, &frame->uc.tuc_sigmask);
3448 set_sigmask(&blocked);
3450 restore_sigcontext(regs, &frame->uc.tuc_mcontext);
3452 if (do_sigaltstack(frame_addr +
3453 offsetof(struct target_rt_sigframe, uc.tuc_stack),
3454 0, get_sp_from_cpustate(regs)) == -EFAULT) {
3455 goto badframe;
3458 unlock_user_struct(frame, frame_addr, 0);
3459 return -TARGET_QEMU_ESIGRETURN;
3461 badframe:
3462 unlock_user_struct(frame, frame_addr, 0);
3463 force_sig(TARGET_SIGSEGV);
3464 return 0;
3466 #elif defined(TARGET_MICROBLAZE)
3468 struct target_sigcontext {
3469 struct target_pt_regs regs; /* needs to be first */
3470 uint32_t oldmask;
3473 struct target_stack_t {
3474 abi_ulong ss_sp;
3475 int ss_flags;
3476 unsigned int ss_size;
3479 struct target_ucontext {
3480 abi_ulong tuc_flags;
3481 abi_ulong tuc_link;
3482 struct target_stack_t tuc_stack;
3483 struct target_sigcontext tuc_mcontext;
3484 uint32_t tuc_extramask[TARGET_NSIG_WORDS - 1];
3487 /* Signal frames. */
3488 struct target_signal_frame {
3489 struct target_ucontext uc;
3490 uint32_t extramask[TARGET_NSIG_WORDS - 1];
3491 uint32_t tramp[2];
3494 struct rt_signal_frame {
3495 siginfo_t info;
3496 struct ucontext uc;
3497 uint32_t tramp[2];
3500 static void setup_sigcontext(struct target_sigcontext *sc, CPUMBState *env)
3502 __put_user(env->regs[0], &sc->regs.r0);
3503 __put_user(env->regs[1], &sc->regs.r1);
3504 __put_user(env->regs[2], &sc->regs.r2);
3505 __put_user(env->regs[3], &sc->regs.r3);
3506 __put_user(env->regs[4], &sc->regs.r4);
3507 __put_user(env->regs[5], &sc->regs.r5);
3508 __put_user(env->regs[6], &sc->regs.r6);
3509 __put_user(env->regs[7], &sc->regs.r7);
3510 __put_user(env->regs[8], &sc->regs.r8);
3511 __put_user(env->regs[9], &sc->regs.r9);
3512 __put_user(env->regs[10], &sc->regs.r10);
3513 __put_user(env->regs[11], &sc->regs.r11);
3514 __put_user(env->regs[12], &sc->regs.r12);
3515 __put_user(env->regs[13], &sc->regs.r13);
3516 __put_user(env->regs[14], &sc->regs.r14);
3517 __put_user(env->regs[15], &sc->regs.r15);
3518 __put_user(env->regs[16], &sc->regs.r16);
3519 __put_user(env->regs[17], &sc->regs.r17);
3520 __put_user(env->regs[18], &sc->regs.r18);
3521 __put_user(env->regs[19], &sc->regs.r19);
3522 __put_user(env->regs[20], &sc->regs.r20);
3523 __put_user(env->regs[21], &sc->regs.r21);
3524 __put_user(env->regs[22], &sc->regs.r22);
3525 __put_user(env->regs[23], &sc->regs.r23);
3526 __put_user(env->regs[24], &sc->regs.r24);
3527 __put_user(env->regs[25], &sc->regs.r25);
3528 __put_user(env->regs[26], &sc->regs.r26);
3529 __put_user(env->regs[27], &sc->regs.r27);
3530 __put_user(env->regs[28], &sc->regs.r28);
3531 __put_user(env->regs[29], &sc->regs.r29);
3532 __put_user(env->regs[30], &sc->regs.r30);
3533 __put_user(env->regs[31], &sc->regs.r31);
3534 __put_user(env->sregs[SR_PC], &sc->regs.pc);
3537 static void restore_sigcontext(struct target_sigcontext *sc, CPUMBState *env)
3539 __get_user(env->regs[0], &sc->regs.r0);
3540 __get_user(env->regs[1], &sc->regs.r1);
3541 __get_user(env->regs[2], &sc->regs.r2);
3542 __get_user(env->regs[3], &sc->regs.r3);
3543 __get_user(env->regs[4], &sc->regs.r4);
3544 __get_user(env->regs[5], &sc->regs.r5);
3545 __get_user(env->regs[6], &sc->regs.r6);
3546 __get_user(env->regs[7], &sc->regs.r7);
3547 __get_user(env->regs[8], &sc->regs.r8);
3548 __get_user(env->regs[9], &sc->regs.r9);
3549 __get_user(env->regs[10], &sc->regs.r10);
3550 __get_user(env->regs[11], &sc->regs.r11);
3551 __get_user(env->regs[12], &sc->regs.r12);
3552 __get_user(env->regs[13], &sc->regs.r13);
3553 __get_user(env->regs[14], &sc->regs.r14);
3554 __get_user(env->regs[15], &sc->regs.r15);
3555 __get_user(env->regs[16], &sc->regs.r16);
3556 __get_user(env->regs[17], &sc->regs.r17);
3557 __get_user(env->regs[18], &sc->regs.r18);
3558 __get_user(env->regs[19], &sc->regs.r19);
3559 __get_user(env->regs[20], &sc->regs.r20);
3560 __get_user(env->regs[21], &sc->regs.r21);
3561 __get_user(env->regs[22], &sc->regs.r22);
3562 __get_user(env->regs[23], &sc->regs.r23);
3563 __get_user(env->regs[24], &sc->regs.r24);
3564 __get_user(env->regs[25], &sc->regs.r25);
3565 __get_user(env->regs[26], &sc->regs.r26);
3566 __get_user(env->regs[27], &sc->regs.r27);
3567 __get_user(env->regs[28], &sc->regs.r28);
3568 __get_user(env->regs[29], &sc->regs.r29);
3569 __get_user(env->regs[30], &sc->regs.r30);
3570 __get_user(env->regs[31], &sc->regs.r31);
3571 __get_user(env->sregs[SR_PC], &sc->regs.pc);
3574 static abi_ulong get_sigframe(struct target_sigaction *ka,
3575 CPUMBState *env, int frame_size)
3577 abi_ulong sp = env->regs[1];
3579 if ((ka->sa_flags & TARGET_SA_ONSTACK) != 0 && !on_sig_stack(sp)) {
3580 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
3583 return ((sp - frame_size) & -8UL);
3586 static void setup_frame(int sig, struct target_sigaction *ka,
3587 target_sigset_t *set, CPUMBState *env)
3589 struct target_signal_frame *frame;
3590 abi_ulong frame_addr;
3591 int i;
3593 frame_addr = get_sigframe(ka, env, sizeof *frame);
3594 trace_user_setup_frame(env, frame_addr);
3595 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
3596 goto badframe;
3598 /* Save the mask. */
3599 __put_user(set->sig[0], &frame->uc.tuc_mcontext.oldmask);
3601 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
3602 __put_user(set->sig[i], &frame->extramask[i - 1]);
3605 setup_sigcontext(&frame->uc.tuc_mcontext, env);
3607 /* Set up to return from userspace. If provided, use a stub
3608 already in userspace. */
3609 /* minus 8 is offset to cater for "rtsd r15,8" offset */
3610 if (ka->sa_flags & TARGET_SA_RESTORER) {
3611 env->regs[15] = ((unsigned long)ka->sa_restorer)-8;
3612 } else {
3613 uint32_t t;
3614 /* Note, these encodings are _big endian_! */
3615 /* addi r12, r0, __NR_sigreturn */
3616 t = 0x31800000UL | TARGET_NR_sigreturn;
3617 __put_user(t, frame->tramp + 0);
3618 /* brki r14, 0x8 */
3619 t = 0xb9cc0008UL;
3620 __put_user(t, frame->tramp + 1);
3622 /* Return from sighandler will jump to the tramp.
3623 Negative 8 offset because return is rtsd r15, 8 */
3624 env->regs[15] = frame_addr + offsetof(struct target_signal_frame, tramp)
3625 - 8;
3628 /* Set up registers for signal handler */
3629 env->regs[1] = frame_addr;
3630 /* Signal handler args: */
3631 env->regs[5] = sig; /* Arg 0: signum */
3632 env->regs[6] = 0;
3633 /* arg 1: sigcontext */
3634 env->regs[7] = frame_addr += offsetof(typeof(*frame), uc);
3636 /* Offset of 4 to handle microblaze rtid r14, 0 */
3637 env->sregs[SR_PC] = (unsigned long)ka->_sa_handler;
3639 unlock_user_struct(frame, frame_addr, 1);
3640 return;
3641 badframe:
3642 force_sig(TARGET_SIGSEGV);
3645 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3646 target_siginfo_t *info,
3647 target_sigset_t *set, CPUMBState *env)
3649 fprintf(stderr, "Microblaze setup_rt_frame: not implemented\n");
3652 long do_sigreturn(CPUMBState *env)
3654 struct target_signal_frame *frame;
3655 abi_ulong frame_addr;
3656 target_sigset_t target_set;
3657 sigset_t set;
3658 int i;
3660 frame_addr = env->regs[R_SP];
3661 trace_user_do_sigreturn(env, frame_addr);
3662 /* Make sure the guest isn't playing games. */
3663 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1))
3664 goto badframe;
3666 /* Restore blocked signals */
3667 __get_user(target_set.sig[0], &frame->uc.tuc_mcontext.oldmask);
3668 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
3669 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
3671 target_to_host_sigset_internal(&set, &target_set);
3672 set_sigmask(&set);
3674 restore_sigcontext(&frame->uc.tuc_mcontext, env);
3675 /* We got here through a sigreturn syscall, our path back is via an
3676 rtb insn so setup r14 for that. */
3677 env->regs[14] = env->sregs[SR_PC];
3679 unlock_user_struct(frame, frame_addr, 0);
3680 return -TARGET_QEMU_ESIGRETURN;
3681 badframe:
3682 force_sig(TARGET_SIGSEGV);
3685 long do_rt_sigreturn(CPUMBState *env)
3687 trace_user_do_rt_sigreturn(env, 0);
3688 fprintf(stderr, "Microblaze do_rt_sigreturn: not implemented\n");
3689 return -TARGET_ENOSYS;
3692 #elif defined(TARGET_CRIS)
3694 struct target_sigcontext {
3695 struct target_pt_regs regs; /* needs to be first */
3696 uint32_t oldmask;
3697 uint32_t usp; /* usp before stacking this gunk on it */
3700 /* Signal frames. */
3701 struct target_signal_frame {
3702 struct target_sigcontext sc;
3703 uint32_t extramask[TARGET_NSIG_WORDS - 1];
3704 uint16_t retcode[4]; /* Trampoline code. */
3707 struct rt_signal_frame {
3708 siginfo_t *pinfo;
3709 void *puc;
3710 siginfo_t info;
3711 struct ucontext uc;
3712 uint16_t retcode[4]; /* Trampoline code. */
3715 static void setup_sigcontext(struct target_sigcontext *sc, CPUCRISState *env)
3717 __put_user(env->regs[0], &sc->regs.r0);
3718 __put_user(env->regs[1], &sc->regs.r1);
3719 __put_user(env->regs[2], &sc->regs.r2);
3720 __put_user(env->regs[3], &sc->regs.r3);
3721 __put_user(env->regs[4], &sc->regs.r4);
3722 __put_user(env->regs[5], &sc->regs.r5);
3723 __put_user(env->regs[6], &sc->regs.r6);
3724 __put_user(env->regs[7], &sc->regs.r7);
3725 __put_user(env->regs[8], &sc->regs.r8);
3726 __put_user(env->regs[9], &sc->regs.r9);
3727 __put_user(env->regs[10], &sc->regs.r10);
3728 __put_user(env->regs[11], &sc->regs.r11);
3729 __put_user(env->regs[12], &sc->regs.r12);
3730 __put_user(env->regs[13], &sc->regs.r13);
3731 __put_user(env->regs[14], &sc->usp);
3732 __put_user(env->regs[15], &sc->regs.acr);
3733 __put_user(env->pregs[PR_MOF], &sc->regs.mof);
3734 __put_user(env->pregs[PR_SRP], &sc->regs.srp);
3735 __put_user(env->pc, &sc->regs.erp);
3738 static void restore_sigcontext(struct target_sigcontext *sc, CPUCRISState *env)
3740 __get_user(env->regs[0], &sc->regs.r0);
3741 __get_user(env->regs[1], &sc->regs.r1);
3742 __get_user(env->regs[2], &sc->regs.r2);
3743 __get_user(env->regs[3], &sc->regs.r3);
3744 __get_user(env->regs[4], &sc->regs.r4);
3745 __get_user(env->regs[5], &sc->regs.r5);
3746 __get_user(env->regs[6], &sc->regs.r6);
3747 __get_user(env->regs[7], &sc->regs.r7);
3748 __get_user(env->regs[8], &sc->regs.r8);
3749 __get_user(env->regs[9], &sc->regs.r9);
3750 __get_user(env->regs[10], &sc->regs.r10);
3751 __get_user(env->regs[11], &sc->regs.r11);
3752 __get_user(env->regs[12], &sc->regs.r12);
3753 __get_user(env->regs[13], &sc->regs.r13);
3754 __get_user(env->regs[14], &sc->usp);
3755 __get_user(env->regs[15], &sc->regs.acr);
3756 __get_user(env->pregs[PR_MOF], &sc->regs.mof);
3757 __get_user(env->pregs[PR_SRP], &sc->regs.srp);
3758 __get_user(env->pc, &sc->regs.erp);
3761 static abi_ulong get_sigframe(CPUCRISState *env, int framesize)
3763 abi_ulong sp;
3764 /* Align the stack downwards to 4. */
3765 sp = (env->regs[R_SP] & ~3);
3766 return sp - framesize;
3769 static void setup_frame(int sig, struct target_sigaction *ka,
3770 target_sigset_t *set, CPUCRISState *env)
3772 struct target_signal_frame *frame;
3773 abi_ulong frame_addr;
3774 int i;
3776 frame_addr = get_sigframe(env, sizeof *frame);
3777 trace_user_setup_frame(env, frame_addr);
3778 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
3779 goto badframe;
3782 * The CRIS signal return trampoline. A real linux/CRIS kernel doesn't
3783 * use this trampoline anymore but it sets it up for GDB.
3784 * In QEMU, using the trampoline simplifies things a bit so we use it.
3786 * This is movu.w __NR_sigreturn, r9; break 13;
3788 __put_user(0x9c5f, frame->retcode+0);
3789 __put_user(TARGET_NR_sigreturn,
3790 frame->retcode + 1);
3791 __put_user(0xe93d, frame->retcode + 2);
3793 /* Save the mask. */
3794 __put_user(set->sig[0], &frame->sc.oldmask);
3796 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
3797 __put_user(set->sig[i], &frame->extramask[i - 1]);
3800 setup_sigcontext(&frame->sc, env);
3802 /* Move the stack and setup the arguments for the handler. */
3803 env->regs[R_SP] = frame_addr;
3804 env->regs[10] = sig;
3805 env->pc = (unsigned long) ka->_sa_handler;
3806 /* Link SRP so the guest returns through the trampoline. */
3807 env->pregs[PR_SRP] = frame_addr + offsetof(typeof(*frame), retcode);
3809 unlock_user_struct(frame, frame_addr, 1);
3810 return;
3811 badframe:
3812 force_sig(TARGET_SIGSEGV);
3815 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3816 target_siginfo_t *info,
3817 target_sigset_t *set, CPUCRISState *env)
3819 fprintf(stderr, "CRIS setup_rt_frame: not implemented\n");
3822 long do_sigreturn(CPUCRISState *env)
3824 struct target_signal_frame *frame;
3825 abi_ulong frame_addr;
3826 target_sigset_t target_set;
3827 sigset_t set;
3828 int i;
3830 frame_addr = env->regs[R_SP];
3831 trace_user_do_sigreturn(env, frame_addr);
3832 /* Make sure the guest isn't playing games. */
3833 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1)) {
3834 goto badframe;
3837 /* Restore blocked signals */
3838 __get_user(target_set.sig[0], &frame->sc.oldmask);
3839 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
3840 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
3842 target_to_host_sigset_internal(&set, &target_set);
3843 set_sigmask(&set);
3845 restore_sigcontext(&frame->sc, env);
3846 unlock_user_struct(frame, frame_addr, 0);
3847 return -TARGET_QEMU_ESIGRETURN;
3848 badframe:
3849 force_sig(TARGET_SIGSEGV);
3852 long do_rt_sigreturn(CPUCRISState *env)
3854 trace_user_do_rt_sigreturn(env, 0);
3855 fprintf(stderr, "CRIS do_rt_sigreturn: not implemented\n");
3856 return -TARGET_ENOSYS;
3859 #elif defined(TARGET_OPENRISC)
3861 struct target_sigcontext {
3862 struct target_pt_regs regs;
3863 abi_ulong oldmask;
3864 abi_ulong usp;
3867 struct target_ucontext {
3868 abi_ulong tuc_flags;
3869 abi_ulong tuc_link;
3870 target_stack_t tuc_stack;
3871 struct target_sigcontext tuc_mcontext;
3872 target_sigset_t tuc_sigmask; /* mask last for extensibility */
3875 struct target_rt_sigframe {
3876 abi_ulong pinfo;
3877 uint64_t puc;
3878 struct target_siginfo info;
3879 struct target_sigcontext sc;
3880 struct target_ucontext uc;
3881 unsigned char retcode[16]; /* trampoline code */
3884 /* This is the asm-generic/ucontext.h version */
3885 #if 0
3886 static int restore_sigcontext(CPUOpenRISCState *regs,
3887 struct target_sigcontext *sc)
3889 unsigned int err = 0;
3890 unsigned long old_usp;
3892 /* Alwys make any pending restarted system call return -EINTR */
3893 current_thread_info()->restart_block.fn = do_no_restart_syscall;
3895 /* restore the regs from &sc->regs (same as sc, since regs is first)
3896 * (sc is already checked for VERIFY_READ since the sigframe was
3897 * checked in sys_sigreturn previously)
3900 if (copy_from_user(regs, &sc, sizeof(struct target_pt_regs))) {
3901 goto badframe;
3904 /* make sure the U-flag is set so user-mode cannot fool us */
3906 regs->sr &= ~SR_SM;
3908 /* restore the old USP as it was before we stacked the sc etc.
3909 * (we cannot just pop the sigcontext since we aligned the sp and
3910 * stuff after pushing it)
3913 __get_user(old_usp, &sc->usp);
3914 phx_signal("old_usp 0x%lx", old_usp);
3916 __PHX__ REALLY /* ??? */
3917 wrusp(old_usp);
3918 regs->gpr[1] = old_usp;
3920 /* TODO: the other ports use regs->orig_XX to disable syscall checks
3921 * after this completes, but we don't use that mechanism. maybe we can
3922 * use it now ?
3925 return err;
3927 badframe:
3928 return 1;
3930 #endif
3932 /* Set up a signal frame. */
3934 static void setup_sigcontext(struct target_sigcontext *sc,
3935 CPUOpenRISCState *regs,
3936 unsigned long mask)
3938 unsigned long usp = regs->gpr[1];
3940 /* copy the regs. they are first in sc so we can use sc directly */
3942 /*copy_to_user(&sc, regs, sizeof(struct target_pt_regs));*/
3944 /* Set the frametype to CRIS_FRAME_NORMAL for the execution of
3945 the signal handler. The frametype will be restored to its previous
3946 value in restore_sigcontext. */
3947 /*regs->frametype = CRIS_FRAME_NORMAL;*/
3949 /* then some other stuff */
3950 __put_user(mask, &sc->oldmask);
3951 __put_user(usp, &sc->usp);
3954 static inline unsigned long align_sigframe(unsigned long sp)
3956 return sp & ~3UL;
3959 static inline abi_ulong get_sigframe(struct target_sigaction *ka,
3960 CPUOpenRISCState *regs,
3961 size_t frame_size)
3963 unsigned long sp = regs->gpr[1];
3964 int onsigstack = on_sig_stack(sp);
3966 /* redzone */
3967 /* This is the X/Open sanctioned signal stack switching. */
3968 if ((ka->sa_flags & TARGET_SA_ONSTACK) != 0 && !onsigstack) {
3969 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
3972 sp = align_sigframe(sp - frame_size);
3975 * If we are on the alternate signal stack and would overflow it, don't.
3976 * Return an always-bogus address instead so we will die with SIGSEGV.
3979 if (onsigstack && !likely(on_sig_stack(sp))) {
3980 return -1L;
3983 return sp;
3986 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3987 target_siginfo_t *info,
3988 target_sigset_t *set, CPUOpenRISCState *env)
3990 int err = 0;
3991 abi_ulong frame_addr;
3992 unsigned long return_ip;
3993 struct target_rt_sigframe *frame;
3994 abi_ulong info_addr, uc_addr;
3996 frame_addr = get_sigframe(ka, env, sizeof(*frame));
3997 trace_user_setup_rt_frame(env, frame_addr);
3998 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3999 goto give_sigsegv;
4002 info_addr = frame_addr + offsetof(struct target_rt_sigframe, info);
4003 __put_user(info_addr, &frame->pinfo);
4004 uc_addr = frame_addr + offsetof(struct target_rt_sigframe, uc);
4005 __put_user(uc_addr, &frame->puc);
4007 if (ka->sa_flags & SA_SIGINFO) {
4008 tswap_siginfo(&frame->info, info);
4011 /*err |= __clear_user(&frame->uc, offsetof(struct ucontext, uc_mcontext));*/
4012 __put_user(0, &frame->uc.tuc_flags);
4013 __put_user(0, &frame->uc.tuc_link);
4014 __put_user(target_sigaltstack_used.ss_sp,
4015 &frame->uc.tuc_stack.ss_sp);
4016 __put_user(sas_ss_flags(env->gpr[1]), &frame->uc.tuc_stack.ss_flags);
4017 __put_user(target_sigaltstack_used.ss_size,
4018 &frame->uc.tuc_stack.ss_size);
4019 setup_sigcontext(&frame->sc, env, set->sig[0]);
4021 /*err |= copy_to_user(frame->uc.tuc_sigmask, set, sizeof(*set));*/
4023 /* trampoline - the desired return ip is the retcode itself */
4024 return_ip = (unsigned long)&frame->retcode;
4025 /* This is l.ori r11,r0,__NR_sigreturn, l.sys 1 */
4026 __put_user(0xa960, (short *)(frame->retcode + 0));
4027 __put_user(TARGET_NR_rt_sigreturn, (short *)(frame->retcode + 2));
4028 __put_user(0x20000001, (unsigned long *)(frame->retcode + 4));
4029 __put_user(0x15000000, (unsigned long *)(frame->retcode + 8));
4031 if (err) {
4032 goto give_sigsegv;
4035 /* TODO what is the current->exec_domain stuff and invmap ? */
4037 /* Set up registers for signal handler */
4038 env->pc = (unsigned long)ka->_sa_handler; /* what we enter NOW */
4039 env->gpr[9] = (unsigned long)return_ip; /* what we enter LATER */
4040 env->gpr[3] = (unsigned long)sig; /* arg 1: signo */
4041 env->gpr[4] = (unsigned long)&frame->info; /* arg 2: (siginfo_t*) */
4042 env->gpr[5] = (unsigned long)&frame->uc; /* arg 3: ucontext */
4044 /* actually move the usp to reflect the stacked frame */
4045 env->gpr[1] = (unsigned long)frame;
4047 return;
4049 give_sigsegv:
4050 unlock_user_struct(frame, frame_addr, 1);
4051 if (sig == TARGET_SIGSEGV) {
4052 ka->_sa_handler = TARGET_SIG_DFL;
4054 force_sig(TARGET_SIGSEGV);
4057 long do_sigreturn(CPUOpenRISCState *env)
4059 trace_user_do_sigreturn(env, 0);
4060 fprintf(stderr, "do_sigreturn: not implemented\n");
4061 return -TARGET_ENOSYS;
4064 long do_rt_sigreturn(CPUOpenRISCState *env)
4066 trace_user_do_rt_sigreturn(env, 0);
4067 fprintf(stderr, "do_rt_sigreturn: not implemented\n");
4068 return -TARGET_ENOSYS;
4070 /* TARGET_OPENRISC */
4072 #elif defined(TARGET_S390X)
4074 #define __NUM_GPRS 16
4075 #define __NUM_FPRS 16
4076 #define __NUM_ACRS 16
4078 #define S390_SYSCALL_SIZE 2
4079 #define __SIGNAL_FRAMESIZE 160 /* FIXME: 31-bit mode -> 96 */
4081 #define _SIGCONTEXT_NSIG 64
4082 #define _SIGCONTEXT_NSIG_BPW 64 /* FIXME: 31-bit mode -> 32 */
4083 #define _SIGCONTEXT_NSIG_WORDS (_SIGCONTEXT_NSIG / _SIGCONTEXT_NSIG_BPW)
4084 #define _SIGMASK_COPY_SIZE (sizeof(unsigned long)*_SIGCONTEXT_NSIG_WORDS)
4085 #define PSW_ADDR_AMODE 0x0000000000000000UL /* 0x80000000UL for 31-bit */
4086 #define S390_SYSCALL_OPCODE ((uint16_t)0x0a00)
4088 typedef struct {
4089 target_psw_t psw;
4090 target_ulong gprs[__NUM_GPRS];
4091 unsigned int acrs[__NUM_ACRS];
4092 } target_s390_regs_common;
4094 typedef struct {
4095 unsigned int fpc;
4096 double fprs[__NUM_FPRS];
4097 } target_s390_fp_regs;
4099 typedef struct {
4100 target_s390_regs_common regs;
4101 target_s390_fp_regs fpregs;
4102 } target_sigregs;
4104 struct target_sigcontext {
4105 target_ulong oldmask[_SIGCONTEXT_NSIG_WORDS];
4106 target_sigregs *sregs;
4109 typedef struct {
4110 uint8_t callee_used_stack[__SIGNAL_FRAMESIZE];
4111 struct target_sigcontext sc;
4112 target_sigregs sregs;
4113 int signo;
4114 uint8_t retcode[S390_SYSCALL_SIZE];
4115 } sigframe;
4117 struct target_ucontext {
4118 target_ulong tuc_flags;
4119 struct target_ucontext *tuc_link;
4120 target_stack_t tuc_stack;
4121 target_sigregs tuc_mcontext;
4122 target_sigset_t tuc_sigmask; /* mask last for extensibility */
4125 typedef struct {
4126 uint8_t callee_used_stack[__SIGNAL_FRAMESIZE];
4127 uint8_t retcode[S390_SYSCALL_SIZE];
4128 struct target_siginfo info;
4129 struct target_ucontext uc;
4130 } rt_sigframe;
4132 static inline abi_ulong
4133 get_sigframe(struct target_sigaction *ka, CPUS390XState *env, size_t frame_size)
4135 abi_ulong sp;
4137 /* Default to using normal stack */
4138 sp = env->regs[15];
4140 /* This is the X/Open sanctioned signal stack switching. */
4141 if (ka->sa_flags & TARGET_SA_ONSTACK) {
4142 if (!sas_ss_flags(sp)) {
4143 sp = target_sigaltstack_used.ss_sp +
4144 target_sigaltstack_used.ss_size;
4148 /* This is the legacy signal stack switching. */
4149 else if (/* FIXME !user_mode(regs) */ 0 &&
4150 !(ka->sa_flags & TARGET_SA_RESTORER) &&
4151 ka->sa_restorer) {
4152 sp = (abi_ulong) ka->sa_restorer;
4155 return (sp - frame_size) & -8ul;
4158 static void save_sigregs(CPUS390XState *env, target_sigregs *sregs)
4160 int i;
4161 //save_access_regs(current->thread.acrs); FIXME
4163 /* Copy a 'clean' PSW mask to the user to avoid leaking
4164 information about whether PER is currently on. */
4165 __put_user(env->psw.mask, &sregs->regs.psw.mask);
4166 __put_user(env->psw.addr, &sregs->regs.psw.addr);
4167 for (i = 0; i < 16; i++) {
4168 __put_user(env->regs[i], &sregs->regs.gprs[i]);
4170 for (i = 0; i < 16; i++) {
4171 __put_user(env->aregs[i], &sregs->regs.acrs[i]);
4174 * We have to store the fp registers to current->thread.fp_regs
4175 * to merge them with the emulated registers.
4177 //save_fp_regs(&current->thread.fp_regs); FIXME
4178 for (i = 0; i < 16; i++) {
4179 __put_user(get_freg(env, i)->ll, &sregs->fpregs.fprs[i]);
4183 static void setup_frame(int sig, struct target_sigaction *ka,
4184 target_sigset_t *set, CPUS390XState *env)
4186 sigframe *frame;
4187 abi_ulong frame_addr;
4189 frame_addr = get_sigframe(ka, env, sizeof(*frame));
4190 trace_user_setup_frame(env, frame_addr);
4191 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
4192 goto give_sigsegv;
4195 __put_user(set->sig[0], &frame->sc.oldmask[0]);
4197 save_sigregs(env, &frame->sregs);
4199 __put_user((abi_ulong)(unsigned long)&frame->sregs,
4200 (abi_ulong *)&frame->sc.sregs);
4202 /* Set up to return from userspace. If provided, use a stub
4203 already in userspace. */
4204 if (ka->sa_flags & TARGET_SA_RESTORER) {
4205 env->regs[14] = (unsigned long)
4206 ka->sa_restorer | PSW_ADDR_AMODE;
4207 } else {
4208 env->regs[14] = (frame_addr + offsetof(sigframe, retcode))
4209 | PSW_ADDR_AMODE;
4210 __put_user(S390_SYSCALL_OPCODE | TARGET_NR_sigreturn,
4211 (uint16_t *)(frame->retcode));
4214 /* Set up backchain. */
4215 __put_user(env->regs[15], (abi_ulong *) frame);
4217 /* Set up registers for signal handler */
4218 env->regs[15] = frame_addr;
4219 env->psw.addr = (target_ulong) ka->_sa_handler | PSW_ADDR_AMODE;
4221 env->regs[2] = sig; //map_signal(sig);
4222 env->regs[3] = frame_addr += offsetof(typeof(*frame), sc);
4224 /* We forgot to include these in the sigcontext.
4225 To avoid breaking binary compatibility, they are passed as args. */
4226 env->regs[4] = 0; // FIXME: no clue... current->thread.trap_no;
4227 env->regs[5] = 0; // FIXME: no clue... current->thread.prot_addr;
4229 /* Place signal number on stack to allow backtrace from handler. */
4230 __put_user(env->regs[2], (int *) &frame->signo);
4231 unlock_user_struct(frame, frame_addr, 1);
4232 return;
4234 give_sigsegv:
4235 force_sig(TARGET_SIGSEGV);
4238 static void setup_rt_frame(int sig, struct target_sigaction *ka,
4239 target_siginfo_t *info,
4240 target_sigset_t *set, CPUS390XState *env)
4242 int i;
4243 rt_sigframe *frame;
4244 abi_ulong frame_addr;
4246 frame_addr = get_sigframe(ka, env, sizeof *frame);
4247 trace_user_setup_rt_frame(env, frame_addr);
4248 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
4249 goto give_sigsegv;
4252 tswap_siginfo(&frame->info, info);
4254 /* Create the ucontext. */
4255 __put_user(0, &frame->uc.tuc_flags);
4256 __put_user((abi_ulong)0, (abi_ulong *)&frame->uc.tuc_link);
4257 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
4258 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
4259 &frame->uc.tuc_stack.ss_flags);
4260 __put_user(target_sigaltstack_used.ss_size, &frame->uc.tuc_stack.ss_size);
4261 save_sigregs(env, &frame->uc.tuc_mcontext);
4262 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
4263 __put_user((abi_ulong)set->sig[i],
4264 (abi_ulong *)&frame->uc.tuc_sigmask.sig[i]);
4267 /* Set up to return from userspace. If provided, use a stub
4268 already in userspace. */
4269 if (ka->sa_flags & TARGET_SA_RESTORER) {
4270 env->regs[14] = (unsigned long) ka->sa_restorer | PSW_ADDR_AMODE;
4271 } else {
4272 env->regs[14] = (unsigned long) frame->retcode | PSW_ADDR_AMODE;
4273 __put_user(S390_SYSCALL_OPCODE | TARGET_NR_rt_sigreturn,
4274 (uint16_t *)(frame->retcode));
4277 /* Set up backchain. */
4278 __put_user(env->regs[15], (abi_ulong *) frame);
4280 /* Set up registers for signal handler */
4281 env->regs[15] = frame_addr;
4282 env->psw.addr = (target_ulong) ka->_sa_handler | PSW_ADDR_AMODE;
4284 env->regs[2] = sig; //map_signal(sig);
4285 env->regs[3] = frame_addr + offsetof(typeof(*frame), info);
4286 env->regs[4] = frame_addr + offsetof(typeof(*frame), uc);
4287 return;
4289 give_sigsegv:
4290 force_sig(TARGET_SIGSEGV);
4293 static int
4294 restore_sigregs(CPUS390XState *env, target_sigregs *sc)
4296 int err = 0;
4297 int i;
4299 for (i = 0; i < 16; i++) {
4300 __get_user(env->regs[i], &sc->regs.gprs[i]);
4303 __get_user(env->psw.mask, &sc->regs.psw.mask);
4304 trace_user_s390x_restore_sigregs(env, (unsigned long long)sc->regs.psw.addr,
4305 (unsigned long long)env->psw.addr);
4306 __get_user(env->psw.addr, &sc->regs.psw.addr);
4307 /* FIXME: 31-bit -> | PSW_ADDR_AMODE */
4309 for (i = 0; i < 16; i++) {
4310 __get_user(env->aregs[i], &sc->regs.acrs[i]);
4312 for (i = 0; i < 16; i++) {
4313 __get_user(get_freg(env, i)->ll, &sc->fpregs.fprs[i]);
4316 return err;
4319 long do_sigreturn(CPUS390XState *env)
4321 sigframe *frame;
4322 abi_ulong frame_addr = env->regs[15];
4323 target_sigset_t target_set;
4324 sigset_t set;
4326 trace_user_do_sigreturn(env, frame_addr);
4327 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
4328 goto badframe;
4330 __get_user(target_set.sig[0], &frame->sc.oldmask[0]);
4332 target_to_host_sigset_internal(&set, &target_set);
4333 set_sigmask(&set); /* ~_BLOCKABLE? */
4335 if (restore_sigregs(env, &frame->sregs)) {
4336 goto badframe;
4339 unlock_user_struct(frame, frame_addr, 0);
4340 return -TARGET_QEMU_ESIGRETURN;
4342 badframe:
4343 force_sig(TARGET_SIGSEGV);
4344 return 0;
4347 long do_rt_sigreturn(CPUS390XState *env)
4349 rt_sigframe *frame;
4350 abi_ulong frame_addr = env->regs[15];
4351 sigset_t set;
4353 trace_user_do_rt_sigreturn(env, frame_addr);
4354 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
4355 goto badframe;
4357 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
4359 set_sigmask(&set); /* ~_BLOCKABLE? */
4361 if (restore_sigregs(env, &frame->uc.tuc_mcontext)) {
4362 goto badframe;
4365 if (do_sigaltstack(frame_addr + offsetof(rt_sigframe, uc.tuc_stack), 0,
4366 get_sp_from_cpustate(env)) == -EFAULT) {
4367 goto badframe;
4369 unlock_user_struct(frame, frame_addr, 0);
4370 return -TARGET_QEMU_ESIGRETURN;
4372 badframe:
4373 unlock_user_struct(frame, frame_addr, 0);
4374 force_sig(TARGET_SIGSEGV);
4375 return 0;
4378 #elif defined(TARGET_PPC)
4380 /* Size of dummy stack frame allocated when calling signal handler.
4381 See arch/powerpc/include/asm/ptrace.h. */
4382 #if defined(TARGET_PPC64)
4383 #define SIGNAL_FRAMESIZE 128
4384 #else
4385 #define SIGNAL_FRAMESIZE 64
4386 #endif
4388 /* See arch/powerpc/include/asm/ucontext.h. Only used for 32-bit PPC;
4389 on 64-bit PPC, sigcontext and mcontext are one and the same. */
4390 struct target_mcontext {
4391 target_ulong mc_gregs[48];
4392 /* Includes fpscr. */
4393 uint64_t mc_fregs[33];
4394 target_ulong mc_pad[2];
4395 /* We need to handle Altivec and SPE at the same time, which no
4396 kernel needs to do. Fortunately, the kernel defines this bit to
4397 be Altivec-register-large all the time, rather than trying to
4398 twiddle it based on the specific platform. */
4399 union {
4400 /* SPE vector registers. One extra for SPEFSCR. */
4401 uint32_t spe[33];
4402 /* Altivec vector registers. The packing of VSCR and VRSAVE
4403 varies depending on whether we're PPC64 or not: PPC64 splits
4404 them apart; PPC32 stuffs them together. */
4405 #if defined(TARGET_PPC64)
4406 #define QEMU_NVRREG 34
4407 #else
4408 #define QEMU_NVRREG 33
4409 #endif
4410 ppc_avr_t altivec[QEMU_NVRREG];
4411 #undef QEMU_NVRREG
4412 } mc_vregs __attribute__((__aligned__(16)));
4415 /* See arch/powerpc/include/asm/sigcontext.h. */
4416 struct target_sigcontext {
4417 target_ulong _unused[4];
4418 int32_t signal;
4419 #if defined(TARGET_PPC64)
4420 int32_t pad0;
4421 #endif
4422 target_ulong handler;
4423 target_ulong oldmask;
4424 target_ulong regs; /* struct pt_regs __user * */
4425 #if defined(TARGET_PPC64)
4426 struct target_mcontext mcontext;
4427 #endif
4430 /* Indices for target_mcontext.mc_gregs, below.
4431 See arch/powerpc/include/asm/ptrace.h for details. */
4432 enum {
4433 TARGET_PT_R0 = 0,
4434 TARGET_PT_R1 = 1,
4435 TARGET_PT_R2 = 2,
4436 TARGET_PT_R3 = 3,
4437 TARGET_PT_R4 = 4,
4438 TARGET_PT_R5 = 5,
4439 TARGET_PT_R6 = 6,
4440 TARGET_PT_R7 = 7,
4441 TARGET_PT_R8 = 8,
4442 TARGET_PT_R9 = 9,
4443 TARGET_PT_R10 = 10,
4444 TARGET_PT_R11 = 11,
4445 TARGET_PT_R12 = 12,
4446 TARGET_PT_R13 = 13,
4447 TARGET_PT_R14 = 14,
4448 TARGET_PT_R15 = 15,
4449 TARGET_PT_R16 = 16,
4450 TARGET_PT_R17 = 17,
4451 TARGET_PT_R18 = 18,
4452 TARGET_PT_R19 = 19,
4453 TARGET_PT_R20 = 20,
4454 TARGET_PT_R21 = 21,
4455 TARGET_PT_R22 = 22,
4456 TARGET_PT_R23 = 23,
4457 TARGET_PT_R24 = 24,
4458 TARGET_PT_R25 = 25,
4459 TARGET_PT_R26 = 26,
4460 TARGET_PT_R27 = 27,
4461 TARGET_PT_R28 = 28,
4462 TARGET_PT_R29 = 29,
4463 TARGET_PT_R30 = 30,
4464 TARGET_PT_R31 = 31,
4465 TARGET_PT_NIP = 32,
4466 TARGET_PT_MSR = 33,
4467 TARGET_PT_ORIG_R3 = 34,
4468 TARGET_PT_CTR = 35,
4469 TARGET_PT_LNK = 36,
4470 TARGET_PT_XER = 37,
4471 TARGET_PT_CCR = 38,
4472 /* Yes, there are two registers with #39. One is 64-bit only. */
4473 TARGET_PT_MQ = 39,
4474 TARGET_PT_SOFTE = 39,
4475 TARGET_PT_TRAP = 40,
4476 TARGET_PT_DAR = 41,
4477 TARGET_PT_DSISR = 42,
4478 TARGET_PT_RESULT = 43,
4479 TARGET_PT_REGS_COUNT = 44
4483 struct target_ucontext {
4484 target_ulong tuc_flags;
4485 target_ulong tuc_link; /* struct ucontext __user * */
4486 struct target_sigaltstack tuc_stack;
4487 #if !defined(TARGET_PPC64)
4488 int32_t tuc_pad[7];
4489 target_ulong tuc_regs; /* struct mcontext __user *
4490 points to uc_mcontext field */
4491 #endif
4492 target_sigset_t tuc_sigmask;
4493 #if defined(TARGET_PPC64)
4494 target_sigset_t unused[15]; /* Allow for uc_sigmask growth */
4495 struct target_sigcontext tuc_sigcontext;
4496 #else
4497 int32_t tuc_maskext[30];
4498 int32_t tuc_pad2[3];
4499 struct target_mcontext tuc_mcontext;
4500 #endif
4503 /* See arch/powerpc/kernel/signal_32.c. */
4504 struct target_sigframe {
4505 struct target_sigcontext sctx;
4506 struct target_mcontext mctx;
4507 int32_t abigap[56];
4510 #if defined(TARGET_PPC64)
4512 #define TARGET_TRAMP_SIZE 6
4514 struct target_rt_sigframe {
4515 /* sys_rt_sigreturn requires the ucontext be the first field */
4516 struct target_ucontext uc;
4517 target_ulong _unused[2];
4518 uint32_t trampoline[TARGET_TRAMP_SIZE];
4519 target_ulong pinfo; /* struct siginfo __user * */
4520 target_ulong puc; /* void __user * */
4521 struct target_siginfo info;
4522 /* 64 bit ABI allows for 288 bytes below sp before decrementing it. */
4523 char abigap[288];
4524 } __attribute__((aligned(16)));
4526 #else
4528 struct target_rt_sigframe {
4529 struct target_siginfo info;
4530 struct target_ucontext uc;
4531 int32_t abigap[56];
4534 #endif
4536 #if defined(TARGET_PPC64)
4538 struct target_func_ptr {
4539 target_ulong entry;
4540 target_ulong toc;
4543 #endif
4545 /* We use the mc_pad field for the signal return trampoline. */
4546 #define tramp mc_pad
4548 /* See arch/powerpc/kernel/signal.c. */
4549 static target_ulong get_sigframe(struct target_sigaction *ka,
4550 CPUPPCState *env,
4551 int frame_size)
4553 target_ulong oldsp;
4555 oldsp = env->gpr[1];
4557 if ((ka->sa_flags & TARGET_SA_ONSTACK) &&
4558 (sas_ss_flags(oldsp) == 0)) {
4559 oldsp = (target_sigaltstack_used.ss_sp
4560 + target_sigaltstack_used.ss_size);
4563 return (oldsp - frame_size) & ~0xFUL;
4566 static void save_user_regs(CPUPPCState *env, struct target_mcontext *frame)
4568 target_ulong msr = env->msr;
4569 int i;
4570 target_ulong ccr = 0;
4572 /* In general, the kernel attempts to be intelligent about what it
4573 needs to save for Altivec/FP/SPE registers. We don't care that
4574 much, so we just go ahead and save everything. */
4576 /* Save general registers. */
4577 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
4578 __put_user(env->gpr[i], &frame->mc_gregs[i]);
4580 __put_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]);
4581 __put_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]);
4582 __put_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]);
4583 __put_user(env->xer, &frame->mc_gregs[TARGET_PT_XER]);
4585 for (i = 0; i < ARRAY_SIZE(env->crf); i++) {
4586 ccr |= env->crf[i] << (32 - ((i + 1) * 4));
4588 __put_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]);
4590 /* Save Altivec registers if necessary. */
4591 if (env->insns_flags & PPC_ALTIVEC) {
4592 for (i = 0; i < ARRAY_SIZE(env->avr); i++) {
4593 ppc_avr_t *avr = &env->avr[i];
4594 ppc_avr_t *vreg = &frame->mc_vregs.altivec[i];
4596 __put_user(avr->u64[0], &vreg->u64[0]);
4597 __put_user(avr->u64[1], &vreg->u64[1]);
4599 /* Set MSR_VR in the saved MSR value to indicate that
4600 frame->mc_vregs contains valid data. */
4601 msr |= MSR_VR;
4602 __put_user((uint32_t)env->spr[SPR_VRSAVE],
4603 &frame->mc_vregs.altivec[32].u32[3]);
4606 /* Save floating point registers. */
4607 if (env->insns_flags & PPC_FLOAT) {
4608 for (i = 0; i < ARRAY_SIZE(env->fpr); i++) {
4609 __put_user(env->fpr[i], &frame->mc_fregs[i]);
4611 __put_user((uint64_t) env->fpscr, &frame->mc_fregs[32]);
4614 /* Save SPE registers. The kernel only saves the high half. */
4615 if (env->insns_flags & PPC_SPE) {
4616 #if defined(TARGET_PPC64)
4617 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
4618 __put_user(env->gpr[i] >> 32, &frame->mc_vregs.spe[i]);
4620 #else
4621 for (i = 0; i < ARRAY_SIZE(env->gprh); i++) {
4622 __put_user(env->gprh[i], &frame->mc_vregs.spe[i]);
4624 #endif
4625 /* Set MSR_SPE in the saved MSR value to indicate that
4626 frame->mc_vregs contains valid data. */
4627 msr |= MSR_SPE;
4628 __put_user(env->spe_fscr, &frame->mc_vregs.spe[32]);
4631 /* Store MSR. */
4632 __put_user(msr, &frame->mc_gregs[TARGET_PT_MSR]);
4635 static void encode_trampoline(int sigret, uint32_t *tramp)
4637 /* Set up the sigreturn trampoline: li r0,sigret; sc. */
4638 if (sigret) {
4639 __put_user(0x38000000 | sigret, &tramp[0]);
4640 __put_user(0x44000002, &tramp[1]);
4644 static void restore_user_regs(CPUPPCState *env,
4645 struct target_mcontext *frame, int sig)
4647 target_ulong save_r2 = 0;
4648 target_ulong msr;
4649 target_ulong ccr;
4651 int i;
4653 if (!sig) {
4654 save_r2 = env->gpr[2];
4657 /* Restore general registers. */
4658 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
4659 __get_user(env->gpr[i], &frame->mc_gregs[i]);
4661 __get_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]);
4662 __get_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]);
4663 __get_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]);
4664 __get_user(env->xer, &frame->mc_gregs[TARGET_PT_XER]);
4665 __get_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]);
4667 for (i = 0; i < ARRAY_SIZE(env->crf); i++) {
4668 env->crf[i] = (ccr >> (32 - ((i + 1) * 4))) & 0xf;
4671 if (!sig) {
4672 env->gpr[2] = save_r2;
4674 /* Restore MSR. */
4675 __get_user(msr, &frame->mc_gregs[TARGET_PT_MSR]);
4677 /* If doing signal return, restore the previous little-endian mode. */
4678 if (sig)
4679 env->msr = (env->msr & ~(1ull << MSR_LE)) | (msr & (1ull << MSR_LE));
4681 /* Restore Altivec registers if necessary. */
4682 if (env->insns_flags & PPC_ALTIVEC) {
4683 for (i = 0; i < ARRAY_SIZE(env->avr); i++) {
4684 ppc_avr_t *avr = &env->avr[i];
4685 ppc_avr_t *vreg = &frame->mc_vregs.altivec[i];
4687 __get_user(avr->u64[0], &vreg->u64[0]);
4688 __get_user(avr->u64[1], &vreg->u64[1]);
4690 /* Set MSR_VEC in the saved MSR value to indicate that
4691 frame->mc_vregs contains valid data. */
4692 __get_user(env->spr[SPR_VRSAVE],
4693 (target_ulong *)(&frame->mc_vregs.altivec[32].u32[3]));
4696 /* Restore floating point registers. */
4697 if (env->insns_flags & PPC_FLOAT) {
4698 uint64_t fpscr;
4699 for (i = 0; i < ARRAY_SIZE(env->fpr); i++) {
4700 __get_user(env->fpr[i], &frame->mc_fregs[i]);
4702 __get_user(fpscr, &frame->mc_fregs[32]);
4703 env->fpscr = (uint32_t) fpscr;
4706 /* Save SPE registers. The kernel only saves the high half. */
4707 if (env->insns_flags & PPC_SPE) {
4708 #if defined(TARGET_PPC64)
4709 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
4710 uint32_t hi;
4712 __get_user(hi, &frame->mc_vregs.spe[i]);
4713 env->gpr[i] = ((uint64_t)hi << 32) | ((uint32_t) env->gpr[i]);
4715 #else
4716 for (i = 0; i < ARRAY_SIZE(env->gprh); i++) {
4717 __get_user(env->gprh[i], &frame->mc_vregs.spe[i]);
4719 #endif
4720 __get_user(env->spe_fscr, &frame->mc_vregs.spe[32]);
4724 static void setup_frame(int sig, struct target_sigaction *ka,
4725 target_sigset_t *set, CPUPPCState *env)
4727 struct target_sigframe *frame;
4728 struct target_sigcontext *sc;
4729 target_ulong frame_addr, newsp;
4730 int err = 0;
4731 #if defined(TARGET_PPC64)
4732 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info;
4733 #endif
4735 frame_addr = get_sigframe(ka, env, sizeof(*frame));
4736 trace_user_setup_frame(env, frame_addr);
4737 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1))
4738 goto sigsegv;
4739 sc = &frame->sctx;
4741 __put_user(ka->_sa_handler, &sc->handler);
4742 __put_user(set->sig[0], &sc->oldmask);
4743 #if TARGET_ABI_BITS == 64
4744 __put_user(set->sig[0] >> 32, &sc->_unused[3]);
4745 #else
4746 __put_user(set->sig[1], &sc->_unused[3]);
4747 #endif
4748 __put_user(h2g(&frame->mctx), &sc->regs);
4749 __put_user(sig, &sc->signal);
4751 /* Save user regs. */
4752 save_user_regs(env, &frame->mctx);
4754 /* Construct the trampoline code on the stack. */
4755 encode_trampoline(TARGET_NR_sigreturn, (uint32_t *)&frame->mctx.tramp);
4757 /* The kernel checks for the presence of a VDSO here. We don't
4758 emulate a vdso, so use a sigreturn system call. */
4759 env->lr = (target_ulong) h2g(frame->mctx.tramp);
4761 /* Turn off all fp exceptions. */
4762 env->fpscr = 0;
4764 /* Create a stack frame for the caller of the handler. */
4765 newsp = frame_addr - SIGNAL_FRAMESIZE;
4766 err |= put_user(env->gpr[1], newsp, target_ulong);
4768 if (err)
4769 goto sigsegv;
4771 /* Set up registers for signal handler. */
4772 env->gpr[1] = newsp;
4773 env->gpr[3] = sig;
4774 env->gpr[4] = frame_addr + offsetof(struct target_sigframe, sctx);
4776 #if defined(TARGET_PPC64)
4777 if (get_ppc64_abi(image) < 2) {
4778 /* ELFv1 PPC64 function pointers are pointers to OPD entries. */
4779 struct target_func_ptr *handler =
4780 (struct target_func_ptr *)g2h(ka->_sa_handler);
4781 env->nip = tswapl(handler->entry);
4782 env->gpr[2] = tswapl(handler->toc);
4783 } else {
4784 /* ELFv2 PPC64 function pointers are entry points, but R12
4785 * must also be set */
4786 env->nip = tswapl((target_ulong) ka->_sa_handler);
4787 env->gpr[12] = env->nip;
4789 #else
4790 env->nip = (target_ulong) ka->_sa_handler;
4791 #endif
4793 /* Signal handlers are entered in big-endian mode. */
4794 env->msr &= ~(1ull << MSR_LE);
4796 unlock_user_struct(frame, frame_addr, 1);
4797 return;
4799 sigsegv:
4800 unlock_user_struct(frame, frame_addr, 1);
4801 force_sig(TARGET_SIGSEGV);
4804 static void setup_rt_frame(int sig, struct target_sigaction *ka,
4805 target_siginfo_t *info,
4806 target_sigset_t *set, CPUPPCState *env)
4808 struct target_rt_sigframe *rt_sf;
4809 uint32_t *trampptr = 0;
4810 struct target_mcontext *mctx = 0;
4811 target_ulong rt_sf_addr, newsp = 0;
4812 int i, err = 0;
4813 #if defined(TARGET_PPC64)
4814 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info;
4815 #endif
4817 rt_sf_addr = get_sigframe(ka, env, sizeof(*rt_sf));
4818 if (!lock_user_struct(VERIFY_WRITE, rt_sf, rt_sf_addr, 1))
4819 goto sigsegv;
4821 tswap_siginfo(&rt_sf->info, info);
4823 __put_user(0, &rt_sf->uc.tuc_flags);
4824 __put_user(0, &rt_sf->uc.tuc_link);
4825 __put_user((target_ulong)target_sigaltstack_used.ss_sp,
4826 &rt_sf->uc.tuc_stack.ss_sp);
4827 __put_user(sas_ss_flags(env->gpr[1]),
4828 &rt_sf->uc.tuc_stack.ss_flags);
4829 __put_user(target_sigaltstack_used.ss_size,
4830 &rt_sf->uc.tuc_stack.ss_size);
4831 #if !defined(TARGET_PPC64)
4832 __put_user(h2g (&rt_sf->uc.tuc_mcontext),
4833 &rt_sf->uc.tuc_regs);
4834 #endif
4835 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
4836 __put_user(set->sig[i], &rt_sf->uc.tuc_sigmask.sig[i]);
4839 #if defined(TARGET_PPC64)
4840 mctx = &rt_sf->uc.tuc_sigcontext.mcontext;
4841 trampptr = &rt_sf->trampoline[0];
4842 #else
4843 mctx = &rt_sf->uc.tuc_mcontext;
4844 trampptr = (uint32_t *)&rt_sf->uc.tuc_mcontext.tramp;
4845 #endif
4847 save_user_regs(env, mctx);
4848 encode_trampoline(TARGET_NR_rt_sigreturn, trampptr);
4850 /* The kernel checks for the presence of a VDSO here. We don't
4851 emulate a vdso, so use a sigreturn system call. */
4852 env->lr = (target_ulong) h2g(trampptr);
4854 /* Turn off all fp exceptions. */
4855 env->fpscr = 0;
4857 /* Create a stack frame for the caller of the handler. */
4858 newsp = rt_sf_addr - (SIGNAL_FRAMESIZE + 16);
4859 err |= put_user(env->gpr[1], newsp, target_ulong);
4861 if (err)
4862 goto sigsegv;
4864 /* Set up registers for signal handler. */
4865 env->gpr[1] = newsp;
4866 env->gpr[3] = (target_ulong) sig;
4867 env->gpr[4] = (target_ulong) h2g(&rt_sf->info);
4868 env->gpr[5] = (target_ulong) h2g(&rt_sf->uc);
4869 env->gpr[6] = (target_ulong) h2g(rt_sf);
4871 #if defined(TARGET_PPC64)
4872 if (get_ppc64_abi(image) < 2) {
4873 /* ELFv1 PPC64 function pointers are pointers to OPD entries. */
4874 struct target_func_ptr *handler =
4875 (struct target_func_ptr *)g2h(ka->_sa_handler);
4876 env->nip = tswapl(handler->entry);
4877 env->gpr[2] = tswapl(handler->toc);
4878 } else {
4879 /* ELFv2 PPC64 function pointers are entry points, but R12
4880 * must also be set */
4881 env->nip = tswapl((target_ulong) ka->_sa_handler);
4882 env->gpr[12] = env->nip;
4884 #else
4885 env->nip = (target_ulong) ka->_sa_handler;
4886 #endif
4888 /* Signal handlers are entered in big-endian mode. */
4889 env->msr &= ~(1ull << MSR_LE);
4891 unlock_user_struct(rt_sf, rt_sf_addr, 1);
4892 return;
4894 sigsegv:
4895 unlock_user_struct(rt_sf, rt_sf_addr, 1);
4896 force_sig(TARGET_SIGSEGV);
4900 long do_sigreturn(CPUPPCState *env)
4902 struct target_sigcontext *sc = NULL;
4903 struct target_mcontext *sr = NULL;
4904 target_ulong sr_addr = 0, sc_addr;
4905 sigset_t blocked;
4906 target_sigset_t set;
4908 sc_addr = env->gpr[1] + SIGNAL_FRAMESIZE;
4909 if (!lock_user_struct(VERIFY_READ, sc, sc_addr, 1))
4910 goto sigsegv;
4912 #if defined(TARGET_PPC64)
4913 set.sig[0] = sc->oldmask + ((uint64_t)(sc->_unused[3]) << 32);
4914 #else
4915 __get_user(set.sig[0], &sc->oldmask);
4916 __get_user(set.sig[1], &sc->_unused[3]);
4917 #endif
4918 target_to_host_sigset_internal(&blocked, &set);
4919 set_sigmask(&blocked);
4921 __get_user(sr_addr, &sc->regs);
4922 if (!lock_user_struct(VERIFY_READ, sr, sr_addr, 1))
4923 goto sigsegv;
4924 restore_user_regs(env, sr, 1);
4926 unlock_user_struct(sr, sr_addr, 1);
4927 unlock_user_struct(sc, sc_addr, 1);
4928 return -TARGET_QEMU_ESIGRETURN;
4930 sigsegv:
4931 unlock_user_struct(sr, sr_addr, 1);
4932 unlock_user_struct(sc, sc_addr, 1);
4933 force_sig(TARGET_SIGSEGV);
4934 return 0;
4937 /* See arch/powerpc/kernel/signal_32.c. */
4938 static int do_setcontext(struct target_ucontext *ucp, CPUPPCState *env, int sig)
4940 struct target_mcontext *mcp;
4941 target_ulong mcp_addr;
4942 sigset_t blocked;
4943 target_sigset_t set;
4945 if (copy_from_user(&set, h2g(ucp) + offsetof(struct target_ucontext, tuc_sigmask),
4946 sizeof (set)))
4947 return 1;
4949 #if defined(TARGET_PPC64)
4950 mcp_addr = h2g(ucp) +
4951 offsetof(struct target_ucontext, tuc_sigcontext.mcontext);
4952 #else
4953 __get_user(mcp_addr, &ucp->tuc_regs);
4954 #endif
4956 if (!lock_user_struct(VERIFY_READ, mcp, mcp_addr, 1))
4957 return 1;
4959 target_to_host_sigset_internal(&blocked, &set);
4960 set_sigmask(&blocked);
4961 restore_user_regs(env, mcp, sig);
4963 unlock_user_struct(mcp, mcp_addr, 1);
4964 return 0;
4967 long do_rt_sigreturn(CPUPPCState *env)
4969 struct target_rt_sigframe *rt_sf = NULL;
4970 target_ulong rt_sf_addr;
4972 rt_sf_addr = env->gpr[1] + SIGNAL_FRAMESIZE + 16;
4973 if (!lock_user_struct(VERIFY_READ, rt_sf, rt_sf_addr, 1))
4974 goto sigsegv;
4976 if (do_setcontext(&rt_sf->uc, env, 1))
4977 goto sigsegv;
4979 do_sigaltstack(rt_sf_addr
4980 + offsetof(struct target_rt_sigframe, uc.tuc_stack),
4981 0, env->gpr[1]);
4983 unlock_user_struct(rt_sf, rt_sf_addr, 1);
4984 return -TARGET_QEMU_ESIGRETURN;
4986 sigsegv:
4987 unlock_user_struct(rt_sf, rt_sf_addr, 1);
4988 force_sig(TARGET_SIGSEGV);
4989 return 0;
4992 #elif defined(TARGET_M68K)
4994 struct target_sigcontext {
4995 abi_ulong sc_mask;
4996 abi_ulong sc_usp;
4997 abi_ulong sc_d0;
4998 abi_ulong sc_d1;
4999 abi_ulong sc_a0;
5000 abi_ulong sc_a1;
5001 unsigned short sc_sr;
5002 abi_ulong sc_pc;
5005 struct target_sigframe
5007 abi_ulong pretcode;
5008 int sig;
5009 int code;
5010 abi_ulong psc;
5011 char retcode[8];
5012 abi_ulong extramask[TARGET_NSIG_WORDS-1];
5013 struct target_sigcontext sc;
5016 typedef int target_greg_t;
5017 #define TARGET_NGREG 18
5018 typedef target_greg_t target_gregset_t[TARGET_NGREG];
5020 typedef struct target_fpregset {
5021 int f_fpcntl[3];
5022 int f_fpregs[8*3];
5023 } target_fpregset_t;
5025 struct target_mcontext {
5026 int version;
5027 target_gregset_t gregs;
5028 target_fpregset_t fpregs;
5031 #define TARGET_MCONTEXT_VERSION 2
5033 struct target_ucontext {
5034 abi_ulong tuc_flags;
5035 abi_ulong tuc_link;
5036 target_stack_t tuc_stack;
5037 struct target_mcontext tuc_mcontext;
5038 abi_long tuc_filler[80];
5039 target_sigset_t tuc_sigmask;
5042 struct target_rt_sigframe
5044 abi_ulong pretcode;
5045 int sig;
5046 abi_ulong pinfo;
5047 abi_ulong puc;
5048 char retcode[8];
5049 struct target_siginfo info;
5050 struct target_ucontext uc;
5053 static void setup_sigcontext(struct target_sigcontext *sc, CPUM68KState *env,
5054 abi_ulong mask)
5056 __put_user(mask, &sc->sc_mask);
5057 __put_user(env->aregs[7], &sc->sc_usp);
5058 __put_user(env->dregs[0], &sc->sc_d0);
5059 __put_user(env->dregs[1], &sc->sc_d1);
5060 __put_user(env->aregs[0], &sc->sc_a0);
5061 __put_user(env->aregs[1], &sc->sc_a1);
5062 __put_user(env->sr, &sc->sc_sr);
5063 __put_user(env->pc, &sc->sc_pc);
5066 static void
5067 restore_sigcontext(CPUM68KState *env, struct target_sigcontext *sc)
5069 int temp;
5071 __get_user(env->aregs[7], &sc->sc_usp);
5072 __get_user(env->dregs[0], &sc->sc_d0);
5073 __get_user(env->dregs[1], &sc->sc_d1);
5074 __get_user(env->aregs[0], &sc->sc_a0);
5075 __get_user(env->aregs[1], &sc->sc_a1);
5076 __get_user(env->pc, &sc->sc_pc);
5077 __get_user(temp, &sc->sc_sr);
5078 env->sr = (env->sr & 0xff00) | (temp & 0xff);
5082 * Determine which stack to use..
5084 static inline abi_ulong
5085 get_sigframe(struct target_sigaction *ka, CPUM68KState *regs,
5086 size_t frame_size)
5088 unsigned long sp;
5090 sp = regs->aregs[7];
5092 /* This is the X/Open sanctioned signal stack switching. */
5093 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags (sp) == 0)) {
5094 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
5097 return ((sp - frame_size) & -8UL);
5100 static void setup_frame(int sig, struct target_sigaction *ka,
5101 target_sigset_t *set, CPUM68KState *env)
5103 struct target_sigframe *frame;
5104 abi_ulong frame_addr;
5105 abi_ulong retcode_addr;
5106 abi_ulong sc_addr;
5107 int i;
5109 frame_addr = get_sigframe(ka, env, sizeof *frame);
5110 trace_user_setup_frame(env, frame_addr);
5111 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
5112 goto give_sigsegv;
5115 __put_user(sig, &frame->sig);
5117 sc_addr = frame_addr + offsetof(struct target_sigframe, sc);
5118 __put_user(sc_addr, &frame->psc);
5120 setup_sigcontext(&frame->sc, env, set->sig[0]);
5122 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
5123 __put_user(set->sig[i], &frame->extramask[i - 1]);
5126 /* Set up to return from userspace. */
5128 retcode_addr = frame_addr + offsetof(struct target_sigframe, retcode);
5129 __put_user(retcode_addr, &frame->pretcode);
5131 /* moveq #,d0; trap #0 */
5133 __put_user(0x70004e40 + (TARGET_NR_sigreturn << 16),
5134 (uint32_t *)(frame->retcode));
5136 /* Set up to return from userspace */
5138 env->aregs[7] = frame_addr;
5139 env->pc = ka->_sa_handler;
5141 unlock_user_struct(frame, frame_addr, 1);
5142 return;
5144 give_sigsegv:
5145 force_sig(TARGET_SIGSEGV);
5148 static inline int target_rt_setup_ucontext(struct target_ucontext *uc,
5149 CPUM68KState *env)
5151 target_greg_t *gregs = uc->tuc_mcontext.gregs;
5153 __put_user(TARGET_MCONTEXT_VERSION, &uc->tuc_mcontext.version);
5154 __put_user(env->dregs[0], &gregs[0]);
5155 __put_user(env->dregs[1], &gregs[1]);
5156 __put_user(env->dregs[2], &gregs[2]);
5157 __put_user(env->dregs[3], &gregs[3]);
5158 __put_user(env->dregs[4], &gregs[4]);
5159 __put_user(env->dregs[5], &gregs[5]);
5160 __put_user(env->dregs[6], &gregs[6]);
5161 __put_user(env->dregs[7], &gregs[7]);
5162 __put_user(env->aregs[0], &gregs[8]);
5163 __put_user(env->aregs[1], &gregs[9]);
5164 __put_user(env->aregs[2], &gregs[10]);
5165 __put_user(env->aregs[3], &gregs[11]);
5166 __put_user(env->aregs[4], &gregs[12]);
5167 __put_user(env->aregs[5], &gregs[13]);
5168 __put_user(env->aregs[6], &gregs[14]);
5169 __put_user(env->aregs[7], &gregs[15]);
5170 __put_user(env->pc, &gregs[16]);
5171 __put_user(env->sr, &gregs[17]);
5173 return 0;
5176 static inline int target_rt_restore_ucontext(CPUM68KState *env,
5177 struct target_ucontext *uc)
5179 int temp;
5180 target_greg_t *gregs = uc->tuc_mcontext.gregs;
5182 __get_user(temp, &uc->tuc_mcontext.version);
5183 if (temp != TARGET_MCONTEXT_VERSION)
5184 goto badframe;
5186 /* restore passed registers */
5187 __get_user(env->dregs[0], &gregs[0]);
5188 __get_user(env->dregs[1], &gregs[1]);
5189 __get_user(env->dregs[2], &gregs[2]);
5190 __get_user(env->dregs[3], &gregs[3]);
5191 __get_user(env->dregs[4], &gregs[4]);
5192 __get_user(env->dregs[5], &gregs[5]);
5193 __get_user(env->dregs[6], &gregs[6]);
5194 __get_user(env->dregs[7], &gregs[7]);
5195 __get_user(env->aregs[0], &gregs[8]);
5196 __get_user(env->aregs[1], &gregs[9]);
5197 __get_user(env->aregs[2], &gregs[10]);
5198 __get_user(env->aregs[3], &gregs[11]);
5199 __get_user(env->aregs[4], &gregs[12]);
5200 __get_user(env->aregs[5], &gregs[13]);
5201 __get_user(env->aregs[6], &gregs[14]);
5202 __get_user(env->aregs[7], &gregs[15]);
5203 __get_user(env->pc, &gregs[16]);
5204 __get_user(temp, &gregs[17]);
5205 env->sr = (env->sr & 0xff00) | (temp & 0xff);
5207 return 0;
5209 badframe:
5210 return 1;
5213 static void setup_rt_frame(int sig, struct target_sigaction *ka,
5214 target_siginfo_t *info,
5215 target_sigset_t *set, CPUM68KState *env)
5217 struct target_rt_sigframe *frame;
5218 abi_ulong frame_addr;
5219 abi_ulong retcode_addr;
5220 abi_ulong info_addr;
5221 abi_ulong uc_addr;
5222 int err = 0;
5223 int i;
5225 frame_addr = get_sigframe(ka, env, sizeof *frame);
5226 trace_user_setup_rt_frame(env, frame_addr);
5227 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
5228 goto give_sigsegv;
5231 __put_user(sig, &frame->sig);
5233 info_addr = frame_addr + offsetof(struct target_rt_sigframe, info);
5234 __put_user(info_addr, &frame->pinfo);
5236 uc_addr = frame_addr + offsetof(struct target_rt_sigframe, uc);
5237 __put_user(uc_addr, &frame->puc);
5239 tswap_siginfo(&frame->info, info);
5241 /* Create the ucontext */
5243 __put_user(0, &frame->uc.tuc_flags);
5244 __put_user(0, &frame->uc.tuc_link);
5245 __put_user(target_sigaltstack_used.ss_sp,
5246 &frame->uc.tuc_stack.ss_sp);
5247 __put_user(sas_ss_flags(env->aregs[7]),
5248 &frame->uc.tuc_stack.ss_flags);
5249 __put_user(target_sigaltstack_used.ss_size,
5250 &frame->uc.tuc_stack.ss_size);
5251 err |= target_rt_setup_ucontext(&frame->uc, env);
5253 if (err)
5254 goto give_sigsegv;
5256 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
5257 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
5260 /* Set up to return from userspace. */
5262 retcode_addr = frame_addr + offsetof(struct target_sigframe, retcode);
5263 __put_user(retcode_addr, &frame->pretcode);
5265 /* moveq #,d0; notb d0; trap #0 */
5267 __put_user(0x70004600 + ((TARGET_NR_rt_sigreturn ^ 0xff) << 16),
5268 (uint32_t *)(frame->retcode + 0));
5269 __put_user(0x4e40, (uint16_t *)(frame->retcode + 4));
5271 if (err)
5272 goto give_sigsegv;
5274 /* Set up to return from userspace */
5276 env->aregs[7] = frame_addr;
5277 env->pc = ka->_sa_handler;
5279 unlock_user_struct(frame, frame_addr, 1);
5280 return;
5282 give_sigsegv:
5283 unlock_user_struct(frame, frame_addr, 1);
5284 force_sig(TARGET_SIGSEGV);
5287 long do_sigreturn(CPUM68KState *env)
5289 struct target_sigframe *frame;
5290 abi_ulong frame_addr = env->aregs[7] - 4;
5291 target_sigset_t target_set;
5292 sigset_t set;
5293 int i;
5295 trace_user_do_sigreturn(env, frame_addr);
5296 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
5297 goto badframe;
5299 /* set blocked signals */
5301 __get_user(target_set.sig[0], &frame->sc.sc_mask);
5303 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
5304 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
5307 target_to_host_sigset_internal(&set, &target_set);
5308 set_sigmask(&set);
5310 /* restore registers */
5312 restore_sigcontext(env, &frame->sc);
5314 unlock_user_struct(frame, frame_addr, 0);
5315 return -TARGET_QEMU_ESIGRETURN;
5317 badframe:
5318 force_sig(TARGET_SIGSEGV);
5319 return 0;
5322 long do_rt_sigreturn(CPUM68KState *env)
5324 struct target_rt_sigframe *frame;
5325 abi_ulong frame_addr = env->aregs[7] - 4;
5326 target_sigset_t target_set;
5327 sigset_t set;
5329 trace_user_do_rt_sigreturn(env, frame_addr);
5330 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
5331 goto badframe;
5333 target_to_host_sigset_internal(&set, &target_set);
5334 set_sigmask(&set);
5336 /* restore registers */
5338 if (target_rt_restore_ucontext(env, &frame->uc))
5339 goto badframe;
5341 if (do_sigaltstack(frame_addr +
5342 offsetof(struct target_rt_sigframe, uc.tuc_stack),
5343 0, get_sp_from_cpustate(env)) == -EFAULT)
5344 goto badframe;
5346 unlock_user_struct(frame, frame_addr, 0);
5347 return -TARGET_QEMU_ESIGRETURN;
5349 badframe:
5350 unlock_user_struct(frame, frame_addr, 0);
5351 force_sig(TARGET_SIGSEGV);
5352 return 0;
5355 #elif defined(TARGET_ALPHA)
5357 struct target_sigcontext {
5358 abi_long sc_onstack;
5359 abi_long sc_mask;
5360 abi_long sc_pc;
5361 abi_long sc_ps;
5362 abi_long sc_regs[32];
5363 abi_long sc_ownedfp;
5364 abi_long sc_fpregs[32];
5365 abi_ulong sc_fpcr;
5366 abi_ulong sc_fp_control;
5367 abi_ulong sc_reserved1;
5368 abi_ulong sc_reserved2;
5369 abi_ulong sc_ssize;
5370 abi_ulong sc_sbase;
5371 abi_ulong sc_traparg_a0;
5372 abi_ulong sc_traparg_a1;
5373 abi_ulong sc_traparg_a2;
5374 abi_ulong sc_fp_trap_pc;
5375 abi_ulong sc_fp_trigger_sum;
5376 abi_ulong sc_fp_trigger_inst;
5379 struct target_ucontext {
5380 abi_ulong tuc_flags;
5381 abi_ulong tuc_link;
5382 abi_ulong tuc_osf_sigmask;
5383 target_stack_t tuc_stack;
5384 struct target_sigcontext tuc_mcontext;
5385 target_sigset_t tuc_sigmask;
5388 struct target_sigframe {
5389 struct target_sigcontext sc;
5390 unsigned int retcode[3];
5393 struct target_rt_sigframe {
5394 target_siginfo_t info;
5395 struct target_ucontext uc;
5396 unsigned int retcode[3];
5399 #define INSN_MOV_R30_R16 0x47fe0410
5400 #define INSN_LDI_R0 0x201f0000
5401 #define INSN_CALLSYS 0x00000083
5403 static void setup_sigcontext(struct target_sigcontext *sc, CPUAlphaState *env,
5404 abi_ulong frame_addr, target_sigset_t *set)
5406 int i;
5408 __put_user(on_sig_stack(frame_addr), &sc->sc_onstack);
5409 __put_user(set->sig[0], &sc->sc_mask);
5410 __put_user(env->pc, &sc->sc_pc);
5411 __put_user(8, &sc->sc_ps);
5413 for (i = 0; i < 31; ++i) {
5414 __put_user(env->ir[i], &sc->sc_regs[i]);
5416 __put_user(0, &sc->sc_regs[31]);
5418 for (i = 0; i < 31; ++i) {
5419 __put_user(env->fir[i], &sc->sc_fpregs[i]);
5421 __put_user(0, &sc->sc_fpregs[31]);
5422 __put_user(cpu_alpha_load_fpcr(env), &sc->sc_fpcr);
5424 __put_user(0, &sc->sc_traparg_a0); /* FIXME */
5425 __put_user(0, &sc->sc_traparg_a1); /* FIXME */
5426 __put_user(0, &sc->sc_traparg_a2); /* FIXME */
5429 static void restore_sigcontext(CPUAlphaState *env,
5430 struct target_sigcontext *sc)
5432 uint64_t fpcr;
5433 int i;
5435 __get_user(env->pc, &sc->sc_pc);
5437 for (i = 0; i < 31; ++i) {
5438 __get_user(env->ir[i], &sc->sc_regs[i]);
5440 for (i = 0; i < 31; ++i) {
5441 __get_user(env->fir[i], &sc->sc_fpregs[i]);
5444 __get_user(fpcr, &sc->sc_fpcr);
5445 cpu_alpha_store_fpcr(env, fpcr);
5448 static inline abi_ulong get_sigframe(struct target_sigaction *sa,
5449 CPUAlphaState *env,
5450 unsigned long framesize)
5452 abi_ulong sp = env->ir[IR_SP];
5454 /* This is the X/Open sanctioned signal stack switching. */
5455 if ((sa->sa_flags & TARGET_SA_ONSTACK) != 0 && !sas_ss_flags(sp)) {
5456 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
5458 return (sp - framesize) & -32;
5461 static void setup_frame(int sig, struct target_sigaction *ka,
5462 target_sigset_t *set, CPUAlphaState *env)
5464 abi_ulong frame_addr, r26;
5465 struct target_sigframe *frame;
5466 int err = 0;
5468 frame_addr = get_sigframe(ka, env, sizeof(*frame));
5469 trace_user_setup_frame(env, frame_addr);
5470 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
5471 goto give_sigsegv;
5474 setup_sigcontext(&frame->sc, env, frame_addr, set);
5476 if (ka->sa_restorer) {
5477 r26 = ka->sa_restorer;
5478 } else {
5479 __put_user(INSN_MOV_R30_R16, &frame->retcode[0]);
5480 __put_user(INSN_LDI_R0 + TARGET_NR_sigreturn,
5481 &frame->retcode[1]);
5482 __put_user(INSN_CALLSYS, &frame->retcode[2]);
5483 /* imb() */
5484 r26 = frame_addr;
5487 unlock_user_struct(frame, frame_addr, 1);
5489 if (err) {
5490 give_sigsegv:
5491 if (sig == TARGET_SIGSEGV) {
5492 ka->_sa_handler = TARGET_SIG_DFL;
5494 force_sig(TARGET_SIGSEGV);
5497 env->ir[IR_RA] = r26;
5498 env->ir[IR_PV] = env->pc = ka->_sa_handler;
5499 env->ir[IR_A0] = sig;
5500 env->ir[IR_A1] = 0;
5501 env->ir[IR_A2] = frame_addr + offsetof(struct target_sigframe, sc);
5502 env->ir[IR_SP] = frame_addr;
5505 static void setup_rt_frame(int sig, struct target_sigaction *ka,
5506 target_siginfo_t *info,
5507 target_sigset_t *set, CPUAlphaState *env)
5509 abi_ulong frame_addr, r26;
5510 struct target_rt_sigframe *frame;
5511 int i, err = 0;
5513 frame_addr = get_sigframe(ka, env, sizeof(*frame));
5514 trace_user_setup_rt_frame(env, frame_addr);
5515 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
5516 goto give_sigsegv;
5519 tswap_siginfo(&frame->info, info);
5521 __put_user(0, &frame->uc.tuc_flags);
5522 __put_user(0, &frame->uc.tuc_link);
5523 __put_user(set->sig[0], &frame->uc.tuc_osf_sigmask);
5524 __put_user(target_sigaltstack_used.ss_sp,
5525 &frame->uc.tuc_stack.ss_sp);
5526 __put_user(sas_ss_flags(env->ir[IR_SP]),
5527 &frame->uc.tuc_stack.ss_flags);
5528 __put_user(target_sigaltstack_used.ss_size,
5529 &frame->uc.tuc_stack.ss_size);
5530 setup_sigcontext(&frame->uc.tuc_mcontext, env, frame_addr, set);
5531 for (i = 0; i < TARGET_NSIG_WORDS; ++i) {
5532 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
5535 if (ka->sa_restorer) {
5536 r26 = ka->sa_restorer;
5537 } else {
5538 __put_user(INSN_MOV_R30_R16, &frame->retcode[0]);
5539 __put_user(INSN_LDI_R0 + TARGET_NR_rt_sigreturn,
5540 &frame->retcode[1]);
5541 __put_user(INSN_CALLSYS, &frame->retcode[2]);
5542 /* imb(); */
5543 r26 = frame_addr;
5546 if (err) {
5547 give_sigsegv:
5548 if (sig == TARGET_SIGSEGV) {
5549 ka->_sa_handler = TARGET_SIG_DFL;
5551 force_sig(TARGET_SIGSEGV);
5554 env->ir[IR_RA] = r26;
5555 env->ir[IR_PV] = env->pc = ka->_sa_handler;
5556 env->ir[IR_A0] = sig;
5557 env->ir[IR_A1] = frame_addr + offsetof(struct target_rt_sigframe, info);
5558 env->ir[IR_A2] = frame_addr + offsetof(struct target_rt_sigframe, uc);
5559 env->ir[IR_SP] = frame_addr;
5562 long do_sigreturn(CPUAlphaState *env)
5564 struct target_sigcontext *sc;
5565 abi_ulong sc_addr = env->ir[IR_A0];
5566 target_sigset_t target_set;
5567 sigset_t set;
5569 if (!lock_user_struct(VERIFY_READ, sc, sc_addr, 1)) {
5570 goto badframe;
5573 target_sigemptyset(&target_set);
5574 __get_user(target_set.sig[0], &sc->sc_mask);
5576 target_to_host_sigset_internal(&set, &target_set);
5577 set_sigmask(&set);
5579 restore_sigcontext(env, sc);
5580 unlock_user_struct(sc, sc_addr, 0);
5581 return -TARGET_QEMU_ESIGRETURN;
5583 badframe:
5584 force_sig(TARGET_SIGSEGV);
5587 long do_rt_sigreturn(CPUAlphaState *env)
5589 abi_ulong frame_addr = env->ir[IR_A0];
5590 struct target_rt_sigframe *frame;
5591 sigset_t set;
5593 trace_user_do_rt_sigreturn(env, frame_addr);
5594 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
5595 goto badframe;
5597 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
5598 set_sigmask(&set);
5600 restore_sigcontext(env, &frame->uc.tuc_mcontext);
5601 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe,
5602 uc.tuc_stack),
5603 0, env->ir[IR_SP]) == -EFAULT) {
5604 goto badframe;
5607 unlock_user_struct(frame, frame_addr, 0);
5608 return -TARGET_QEMU_ESIGRETURN;
5611 badframe:
5612 unlock_user_struct(frame, frame_addr, 0);
5613 force_sig(TARGET_SIGSEGV);
5616 #elif defined(TARGET_TILEGX)
5618 struct target_sigcontext {
5619 union {
5620 /* General-purpose registers. */
5621 abi_ulong gregs[56];
5622 struct {
5623 abi_ulong __gregs[53];
5624 abi_ulong tp; /* Aliases gregs[TREG_TP]. */
5625 abi_ulong sp; /* Aliases gregs[TREG_SP]. */
5626 abi_ulong lr; /* Aliases gregs[TREG_LR]. */
5629 abi_ulong pc; /* Program counter. */
5630 abi_ulong ics; /* In Interrupt Critical Section? */
5631 abi_ulong faultnum; /* Fault number. */
5632 abi_ulong pad[5];
5635 struct target_ucontext {
5636 abi_ulong tuc_flags;
5637 abi_ulong tuc_link;
5638 target_stack_t tuc_stack;
5639 struct target_sigcontext tuc_mcontext;
5640 target_sigset_t tuc_sigmask; /* mask last for extensibility */
5643 struct target_rt_sigframe {
5644 unsigned char save_area[16]; /* caller save area */
5645 struct target_siginfo info;
5646 struct target_ucontext uc;
5647 abi_ulong retcode[2];
5650 #define INSN_MOVELI_R10_139 0x00045fe551483000ULL /* { moveli r10, 139 } */
5651 #define INSN_SWINT1 0x286b180051485000ULL /* { swint1 } */
5654 static void setup_sigcontext(struct target_sigcontext *sc,
5655 CPUArchState *env, int signo)
5657 int i;
5659 for (i = 0; i < TILEGX_R_COUNT; ++i) {
5660 __put_user(env->regs[i], &sc->gregs[i]);
5663 __put_user(env->pc, &sc->pc);
5664 __put_user(0, &sc->ics);
5665 __put_user(signo, &sc->faultnum);
5668 static void restore_sigcontext(CPUTLGState *env, struct target_sigcontext *sc)
5670 int i;
5672 for (i = 0; i < TILEGX_R_COUNT; ++i) {
5673 __get_user(env->regs[i], &sc->gregs[i]);
5676 __get_user(env->pc, &sc->pc);
5679 static abi_ulong get_sigframe(struct target_sigaction *ka, CPUArchState *env,
5680 size_t frame_size)
5682 unsigned long sp = env->regs[TILEGX_R_SP];
5684 if (on_sig_stack(sp) && !likely(on_sig_stack(sp - frame_size))) {
5685 return -1UL;
5688 if ((ka->sa_flags & SA_ONSTACK) && !sas_ss_flags(sp)) {
5689 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
5692 sp -= frame_size;
5693 sp &= -16UL;
5694 return sp;
5697 static void setup_rt_frame(int sig, struct target_sigaction *ka,
5698 target_siginfo_t *info,
5699 target_sigset_t *set, CPUArchState *env)
5701 abi_ulong frame_addr;
5702 struct target_rt_sigframe *frame;
5703 unsigned long restorer;
5705 frame_addr = get_sigframe(ka, env, sizeof(*frame));
5706 trace_user_setup_rt_frame(env, frame_addr);
5707 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
5708 goto give_sigsegv;
5711 /* Always write at least the signal number for the stack backtracer. */
5712 if (ka->sa_flags & TARGET_SA_SIGINFO) {
5713 /* At sigreturn time, restore the callee-save registers too. */
5714 tswap_siginfo(&frame->info, info);
5715 /* regs->flags |= PT_FLAGS_RESTORE_REGS; FIXME: we can skip it? */
5716 } else {
5717 __put_user(info->si_signo, &frame->info.si_signo);
5720 /* Create the ucontext. */
5721 __put_user(0, &frame->uc.tuc_flags);
5722 __put_user(0, &frame->uc.tuc_link);
5723 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
5724 __put_user(sas_ss_flags(env->regs[TILEGX_R_SP]),
5725 &frame->uc.tuc_stack.ss_flags);
5726 __put_user(target_sigaltstack_used.ss_size, &frame->uc.tuc_stack.ss_size);
5727 setup_sigcontext(&frame->uc.tuc_mcontext, env, info->si_signo);
5729 if (ka->sa_flags & TARGET_SA_RESTORER) {
5730 restorer = (unsigned long) ka->sa_restorer;
5731 } else {
5732 __put_user(INSN_MOVELI_R10_139, &frame->retcode[0]);
5733 __put_user(INSN_SWINT1, &frame->retcode[1]);
5734 restorer = frame_addr + offsetof(struct target_rt_sigframe, retcode);
5736 env->pc = (unsigned long) ka->_sa_handler;
5737 env->regs[TILEGX_R_SP] = (unsigned long) frame;
5738 env->regs[TILEGX_R_LR] = restorer;
5739 env->regs[0] = (unsigned long) sig;
5740 env->regs[1] = (unsigned long) &frame->info;
5741 env->regs[2] = (unsigned long) &frame->uc;
5742 /* regs->flags |= PT_FLAGS_CALLER_SAVES; FIXME: we can skip it? */
5744 unlock_user_struct(frame, frame_addr, 1);
5745 return;
5747 give_sigsegv:
5748 if (sig == TARGET_SIGSEGV) {
5749 ka->_sa_handler = TARGET_SIG_DFL;
5751 force_sig(TARGET_SIGSEGV /* , current */);
5754 long do_rt_sigreturn(CPUTLGState *env)
5756 abi_ulong frame_addr = env->regs[TILEGX_R_SP];
5757 struct target_rt_sigframe *frame;
5758 sigset_t set;
5760 trace_user_do_rt_sigreturn(env, frame_addr);
5761 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
5762 goto badframe;
5764 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
5765 set_sigmask(&set);
5767 restore_sigcontext(env, &frame->uc.tuc_mcontext);
5768 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe,
5769 uc.tuc_stack),
5770 0, env->regs[TILEGX_R_SP]) == -EFAULT) {
5771 goto badframe;
5774 unlock_user_struct(frame, frame_addr, 0);
5775 return -TARGET_QEMU_ESIGRETURN;
5778 badframe:
5779 unlock_user_struct(frame, frame_addr, 0);
5780 force_sig(TARGET_SIGSEGV);
5783 #else
5785 static void setup_frame(int sig, struct target_sigaction *ka,
5786 target_sigset_t *set, CPUArchState *env)
5788 fprintf(stderr, "setup_frame: not implemented\n");
5791 static void setup_rt_frame(int sig, struct target_sigaction *ka,
5792 target_siginfo_t *info,
5793 target_sigset_t *set, CPUArchState *env)
5795 fprintf(stderr, "setup_rt_frame: not implemented\n");
5798 long do_sigreturn(CPUArchState *env)
5800 fprintf(stderr, "do_sigreturn: not implemented\n");
5801 return -TARGET_ENOSYS;
5804 long do_rt_sigreturn(CPUArchState *env)
5806 fprintf(stderr, "do_rt_sigreturn: not implemented\n");
5807 return -TARGET_ENOSYS;
5810 #endif
5812 static void handle_pending_signal(CPUArchState *cpu_env, int sig)
5814 CPUState *cpu = ENV_GET_CPU(cpu_env);
5815 abi_ulong handler;
5816 sigset_t set;
5817 target_sigset_t target_old_set;
5818 struct target_sigaction *sa;
5819 TaskState *ts = cpu->opaque;
5820 struct emulated_sigtable *k = &ts->sigtab[sig - 1];
5822 trace_user_handle_signal(cpu_env, sig);
5823 /* dequeue signal */
5824 k->pending = 0;
5826 sig = gdb_handlesig(cpu, sig);
5827 if (!sig) {
5828 sa = NULL;
5829 handler = TARGET_SIG_IGN;
5830 } else {
5831 sa = &sigact_table[sig - 1];
5832 handler = sa->_sa_handler;
5835 if (handler == TARGET_SIG_DFL) {
5836 /* default handler : ignore some signal. The other are job control or fatal */
5837 if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) {
5838 kill(getpid(),SIGSTOP);
5839 } else if (sig != TARGET_SIGCHLD &&
5840 sig != TARGET_SIGURG &&
5841 sig != TARGET_SIGWINCH &&
5842 sig != TARGET_SIGCONT) {
5843 force_sig(sig);
5845 } else if (handler == TARGET_SIG_IGN) {
5846 /* ignore sig */
5847 } else if (handler == TARGET_SIG_ERR) {
5848 force_sig(sig);
5849 } else {
5850 /* compute the blocked signals during the handler execution */
5851 sigset_t *blocked_set;
5853 target_to_host_sigset(&set, &sa->sa_mask);
5854 /* SA_NODEFER indicates that the current signal should not be
5855 blocked during the handler */
5856 if (!(sa->sa_flags & TARGET_SA_NODEFER))
5857 sigaddset(&set, target_to_host_signal(sig));
5859 /* save the previous blocked signal state to restore it at the
5860 end of the signal execution (see do_sigreturn) */
5861 host_to_target_sigset_internal(&target_old_set, &ts->signal_mask);
5863 /* block signals in the handler */
5864 blocked_set = ts->in_sigsuspend ?
5865 &ts->sigsuspend_mask : &ts->signal_mask;
5866 sigorset(&ts->signal_mask, blocked_set, &set);
5867 ts->in_sigsuspend = 0;
5869 /* if the CPU is in VM86 mode, we restore the 32 bit values */
5870 #if defined(TARGET_I386) && !defined(TARGET_X86_64)
5872 CPUX86State *env = cpu_env;
5873 if (env->eflags & VM_MASK)
5874 save_v86_state(env);
5876 #endif
5877 /* prepare the stack frame of the virtual CPU */
5878 #if defined(TARGET_ABI_MIPSN32) || defined(TARGET_ABI_MIPSN64) \
5879 || defined(TARGET_OPENRISC) || defined(TARGET_TILEGX)
5880 /* These targets do not have traditional signals. */
5881 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
5882 #else
5883 if (sa->sa_flags & TARGET_SA_SIGINFO)
5884 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
5885 else
5886 setup_frame(sig, sa, &target_old_set, cpu_env);
5887 #endif
5888 if (sa->sa_flags & TARGET_SA_RESETHAND) {
5889 sa->_sa_handler = TARGET_SIG_DFL;
5894 void process_pending_signals(CPUArchState *cpu_env)
5896 CPUState *cpu = ENV_GET_CPU(cpu_env);
5897 int sig;
5898 TaskState *ts = cpu->opaque;
5899 sigset_t set;
5900 sigset_t *blocked_set;
5902 while (atomic_read(&ts->signal_pending)) {
5903 /* FIXME: This is not threadsafe. */
5904 sigfillset(&set);
5905 sigprocmask(SIG_SETMASK, &set, 0);
5907 sig = ts->sync_signal.pending;
5908 if (sig) {
5909 /* Synchronous signals are forced,
5910 * see force_sig_info() and callers in Linux
5911 * Note that not all of our queue_signal() calls in QEMU correspond
5912 * to force_sig_info() calls in Linux (some are send_sig_info()).
5913 * However it seems like a kernel bug to me to allow the process
5914 * to block a synchronous signal since it could then just end up
5915 * looping round and round indefinitely.
5917 if (sigismember(&ts->signal_mask, target_to_host_signal_table[sig])
5918 || sigact_table[sig - 1]._sa_handler == TARGET_SIG_IGN) {
5919 sigdelset(&ts->signal_mask, target_to_host_signal_table[sig]);
5920 sigact_table[sig - 1]._sa_handler = TARGET_SIG_DFL;
5923 handle_pending_signal(cpu_env, sig);
5926 for (sig = 1; sig <= TARGET_NSIG; sig++) {
5927 blocked_set = ts->in_sigsuspend ?
5928 &ts->sigsuspend_mask : &ts->signal_mask;
5930 if (ts->sigtab[sig - 1].pending &&
5931 (!sigismember(blocked_set,
5932 target_to_host_signal_table[sig]))) {
5933 handle_pending_signal(cpu_env, sig);
5934 /* Restart scan from the beginning */
5935 sig = 1;
5939 /* if no signal is pending, unblock signals and recheck (the act
5940 * of unblocking might cause us to take another host signal which
5941 * will set signal_pending again).
5943 atomic_set(&ts->signal_pending, 0);
5944 ts->in_sigsuspend = 0;
5945 set = ts->signal_mask;
5946 sigdelset(&set, SIGSEGV);
5947 sigdelset(&set, SIGBUS);
5948 sigprocmask(SIG_SETMASK, &set, 0);
5950 ts->in_sigsuspend = 0;