spapr: CPU hot unplug support
[qemu.git] / linux-user / signal.c
blob61c114544633e20613f91bb0063f370b7e21bc63
1 /*
2 * Emulation of Linux signals
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qemu/bitops.h"
21 #include <sys/ucontext.h>
22 #include <sys/resource.h>
24 #include "qemu.h"
25 #include "qemu-common.h"
26 #include "target_signal.h"
27 #include "trace.h"
29 static struct target_sigaltstack target_sigaltstack_used = {
30 .ss_sp = 0,
31 .ss_size = 0,
32 .ss_flags = TARGET_SS_DISABLE,
35 static struct target_sigaction sigact_table[TARGET_NSIG];
37 static void host_signal_handler(int host_signum, siginfo_t *info,
38 void *puc);
40 static uint8_t host_to_target_signal_table[_NSIG] = {
41 [SIGHUP] = TARGET_SIGHUP,
42 [SIGINT] = TARGET_SIGINT,
43 [SIGQUIT] = TARGET_SIGQUIT,
44 [SIGILL] = TARGET_SIGILL,
45 [SIGTRAP] = TARGET_SIGTRAP,
46 [SIGABRT] = TARGET_SIGABRT,
47 /* [SIGIOT] = TARGET_SIGIOT,*/
48 [SIGBUS] = TARGET_SIGBUS,
49 [SIGFPE] = TARGET_SIGFPE,
50 [SIGKILL] = TARGET_SIGKILL,
51 [SIGUSR1] = TARGET_SIGUSR1,
52 [SIGSEGV] = TARGET_SIGSEGV,
53 [SIGUSR2] = TARGET_SIGUSR2,
54 [SIGPIPE] = TARGET_SIGPIPE,
55 [SIGALRM] = TARGET_SIGALRM,
56 [SIGTERM] = TARGET_SIGTERM,
57 #ifdef SIGSTKFLT
58 [SIGSTKFLT] = TARGET_SIGSTKFLT,
59 #endif
60 [SIGCHLD] = TARGET_SIGCHLD,
61 [SIGCONT] = TARGET_SIGCONT,
62 [SIGSTOP] = TARGET_SIGSTOP,
63 [SIGTSTP] = TARGET_SIGTSTP,
64 [SIGTTIN] = TARGET_SIGTTIN,
65 [SIGTTOU] = TARGET_SIGTTOU,
66 [SIGURG] = TARGET_SIGURG,
67 [SIGXCPU] = TARGET_SIGXCPU,
68 [SIGXFSZ] = TARGET_SIGXFSZ,
69 [SIGVTALRM] = TARGET_SIGVTALRM,
70 [SIGPROF] = TARGET_SIGPROF,
71 [SIGWINCH] = TARGET_SIGWINCH,
72 [SIGIO] = TARGET_SIGIO,
73 [SIGPWR] = TARGET_SIGPWR,
74 [SIGSYS] = TARGET_SIGSYS,
75 /* next signals stay the same */
76 /* Nasty hack: Reverse SIGRTMIN and SIGRTMAX to avoid overlap with
77 host libpthread signals. This assumes no one actually uses SIGRTMAX :-/
78 To fix this properly we need to do manual signal delivery multiplexed
79 over a single host signal. */
80 [__SIGRTMIN] = __SIGRTMAX,
81 [__SIGRTMAX] = __SIGRTMIN,
83 static uint8_t target_to_host_signal_table[_NSIG];
85 static inline int on_sig_stack(unsigned long sp)
87 return (sp - target_sigaltstack_used.ss_sp
88 < target_sigaltstack_used.ss_size);
91 static inline int sas_ss_flags(unsigned long sp)
93 return (target_sigaltstack_used.ss_size == 0 ? SS_DISABLE
94 : on_sig_stack(sp) ? SS_ONSTACK : 0);
97 int host_to_target_signal(int sig)
99 if (sig < 0 || sig >= _NSIG)
100 return sig;
101 return host_to_target_signal_table[sig];
104 int target_to_host_signal(int sig)
106 if (sig < 0 || sig >= _NSIG)
107 return sig;
108 return target_to_host_signal_table[sig];
111 static inline void target_sigemptyset(target_sigset_t *set)
113 memset(set, 0, sizeof(*set));
116 static inline void target_sigaddset(target_sigset_t *set, int signum)
118 signum--;
119 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
120 set->sig[signum / TARGET_NSIG_BPW] |= mask;
123 static inline int target_sigismember(const target_sigset_t *set, int signum)
125 signum--;
126 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
127 return ((set->sig[signum / TARGET_NSIG_BPW] & mask) != 0);
130 static void host_to_target_sigset_internal(target_sigset_t *d,
131 const sigset_t *s)
133 int i;
134 target_sigemptyset(d);
135 for (i = 1; i <= TARGET_NSIG; i++) {
136 if (sigismember(s, i)) {
137 target_sigaddset(d, host_to_target_signal(i));
142 void host_to_target_sigset(target_sigset_t *d, const sigset_t *s)
144 target_sigset_t d1;
145 int i;
147 host_to_target_sigset_internal(&d1, s);
148 for(i = 0;i < TARGET_NSIG_WORDS; i++)
149 d->sig[i] = tswapal(d1.sig[i]);
152 static void target_to_host_sigset_internal(sigset_t *d,
153 const target_sigset_t *s)
155 int i;
156 sigemptyset(d);
157 for (i = 1; i <= TARGET_NSIG; i++) {
158 if (target_sigismember(s, i)) {
159 sigaddset(d, target_to_host_signal(i));
164 void target_to_host_sigset(sigset_t *d, const target_sigset_t *s)
166 target_sigset_t s1;
167 int i;
169 for(i = 0;i < TARGET_NSIG_WORDS; i++)
170 s1.sig[i] = tswapal(s->sig[i]);
171 target_to_host_sigset_internal(d, &s1);
174 void host_to_target_old_sigset(abi_ulong *old_sigset,
175 const sigset_t *sigset)
177 target_sigset_t d;
178 host_to_target_sigset(&d, sigset);
179 *old_sigset = d.sig[0];
182 void target_to_host_old_sigset(sigset_t *sigset,
183 const abi_ulong *old_sigset)
185 target_sigset_t d;
186 int i;
188 d.sig[0] = *old_sigset;
189 for(i = 1;i < TARGET_NSIG_WORDS; i++)
190 d.sig[i] = 0;
191 target_to_host_sigset(sigset, &d);
194 int block_signals(void)
196 TaskState *ts = (TaskState *)thread_cpu->opaque;
197 sigset_t set;
198 int pending;
200 /* It's OK to block everything including SIGSEGV, because we won't
201 * run any further guest code before unblocking signals in
202 * process_pending_signals().
204 sigfillset(&set);
205 sigprocmask(SIG_SETMASK, &set, 0);
207 pending = atomic_xchg(&ts->signal_pending, 1);
209 return pending;
212 /* Wrapper for sigprocmask function
213 * Emulates a sigprocmask in a safe way for the guest. Note that set and oldset
214 * are host signal set, not guest ones. Returns -TARGET_ERESTARTSYS if
215 * a signal was already pending and the syscall must be restarted, or
216 * 0 on success.
217 * If set is NULL, this is guaranteed not to fail.
219 int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset)
221 TaskState *ts = (TaskState *)thread_cpu->opaque;
223 if (oldset) {
224 *oldset = ts->signal_mask;
227 if (set) {
228 int i;
230 if (block_signals()) {
231 return -TARGET_ERESTARTSYS;
234 switch (how) {
235 case SIG_BLOCK:
236 sigorset(&ts->signal_mask, &ts->signal_mask, set);
237 break;
238 case SIG_UNBLOCK:
239 for (i = 1; i <= NSIG; ++i) {
240 if (sigismember(set, i)) {
241 sigdelset(&ts->signal_mask, i);
244 break;
245 case SIG_SETMASK:
246 ts->signal_mask = *set;
247 break;
248 default:
249 g_assert_not_reached();
252 /* Silently ignore attempts to change blocking status of KILL or STOP */
253 sigdelset(&ts->signal_mask, SIGKILL);
254 sigdelset(&ts->signal_mask, SIGSTOP);
256 return 0;
259 #if !defined(TARGET_OPENRISC) && !defined(TARGET_UNICORE32) && \
260 !defined(TARGET_X86_64)
261 /* Just set the guest's signal mask to the specified value; the
262 * caller is assumed to have called block_signals() already.
264 static void set_sigmask(const sigset_t *set)
266 TaskState *ts = (TaskState *)thread_cpu->opaque;
268 ts->signal_mask = *set;
270 #endif
272 /* siginfo conversion */
274 static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo,
275 const siginfo_t *info)
277 int sig = host_to_target_signal(info->si_signo);
278 int si_code = info->si_code;
279 int si_type;
280 tinfo->si_signo = sig;
281 tinfo->si_errno = 0;
282 tinfo->si_code = info->si_code;
284 /* This is awkward, because we have to use a combination of
285 * the si_code and si_signo to figure out which of the union's
286 * members are valid. (Within the host kernel it is always possible
287 * to tell, but the kernel carefully avoids giving userspace the
288 * high 16 bits of si_code, so we don't have the information to
289 * do this the easy way...) We therefore make our best guess,
290 * bearing in mind that a guest can spoof most of the si_codes
291 * via rt_sigqueueinfo() if it likes.
293 * Once we have made our guess, we record it in the top 16 bits of
294 * the si_code, so that tswap_siginfo() later can use it.
295 * tswap_siginfo() will strip these top bits out before writing
296 * si_code to the guest (sign-extending the lower bits).
299 switch (si_code) {
300 case SI_USER:
301 case SI_TKILL:
302 case SI_KERNEL:
303 /* Sent via kill(), tkill() or tgkill(), or direct from the kernel.
304 * These are the only unspoofable si_code values.
306 tinfo->_sifields._kill._pid = info->si_pid;
307 tinfo->_sifields._kill._uid = info->si_uid;
308 si_type = QEMU_SI_KILL;
309 break;
310 default:
311 /* Everything else is spoofable. Make best guess based on signal */
312 switch (sig) {
313 case TARGET_SIGCHLD:
314 tinfo->_sifields._sigchld._pid = info->si_pid;
315 tinfo->_sifields._sigchld._uid = info->si_uid;
316 tinfo->_sifields._sigchld._status
317 = host_to_target_waitstatus(info->si_status);
318 tinfo->_sifields._sigchld._utime = info->si_utime;
319 tinfo->_sifields._sigchld._stime = info->si_stime;
320 si_type = QEMU_SI_CHLD;
321 break;
322 case TARGET_SIGIO:
323 tinfo->_sifields._sigpoll._band = info->si_band;
324 tinfo->_sifields._sigpoll._fd = info->si_fd;
325 si_type = QEMU_SI_POLL;
326 break;
327 default:
328 /* Assume a sigqueue()/mq_notify()/rt_sigqueueinfo() source. */
329 tinfo->_sifields._rt._pid = info->si_pid;
330 tinfo->_sifields._rt._uid = info->si_uid;
331 /* XXX: potential problem if 64 bit */
332 tinfo->_sifields._rt._sigval.sival_ptr
333 = (abi_ulong)(unsigned long)info->si_value.sival_ptr;
334 si_type = QEMU_SI_RT;
335 break;
337 break;
340 tinfo->si_code = deposit32(si_code, 16, 16, si_type);
343 static void tswap_siginfo(target_siginfo_t *tinfo,
344 const target_siginfo_t *info)
346 int si_type = extract32(info->si_code, 16, 16);
347 int si_code = sextract32(info->si_code, 0, 16);
349 __put_user(info->si_signo, &tinfo->si_signo);
350 __put_user(info->si_errno, &tinfo->si_errno);
351 __put_user(si_code, &tinfo->si_code);
353 /* We can use our internal marker of which fields in the structure
354 * are valid, rather than duplicating the guesswork of
355 * host_to_target_siginfo_noswap() here.
357 switch (si_type) {
358 case QEMU_SI_KILL:
359 __put_user(info->_sifields._kill._pid, &tinfo->_sifields._kill._pid);
360 __put_user(info->_sifields._kill._uid, &tinfo->_sifields._kill._uid);
361 break;
362 case QEMU_SI_TIMER:
363 __put_user(info->_sifields._timer._timer1,
364 &tinfo->_sifields._timer._timer1);
365 __put_user(info->_sifields._timer._timer2,
366 &tinfo->_sifields._timer._timer2);
367 break;
368 case QEMU_SI_POLL:
369 __put_user(info->_sifields._sigpoll._band,
370 &tinfo->_sifields._sigpoll._band);
371 __put_user(info->_sifields._sigpoll._fd,
372 &tinfo->_sifields._sigpoll._fd);
373 break;
374 case QEMU_SI_FAULT:
375 __put_user(info->_sifields._sigfault._addr,
376 &tinfo->_sifields._sigfault._addr);
377 break;
378 case QEMU_SI_CHLD:
379 __put_user(info->_sifields._sigchld._pid,
380 &tinfo->_sifields._sigchld._pid);
381 __put_user(info->_sifields._sigchld._uid,
382 &tinfo->_sifields._sigchld._uid);
383 __put_user(info->_sifields._sigchld._status,
384 &tinfo->_sifields._sigchld._status);
385 __put_user(info->_sifields._sigchld._utime,
386 &tinfo->_sifields._sigchld._utime);
387 __put_user(info->_sifields._sigchld._stime,
388 &tinfo->_sifields._sigchld._stime);
389 break;
390 case QEMU_SI_RT:
391 __put_user(info->_sifields._rt._pid, &tinfo->_sifields._rt._pid);
392 __put_user(info->_sifields._rt._uid, &tinfo->_sifields._rt._uid);
393 __put_user(info->_sifields._rt._sigval.sival_ptr,
394 &tinfo->_sifields._rt._sigval.sival_ptr);
395 break;
396 default:
397 g_assert_not_reached();
401 void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info)
403 host_to_target_siginfo_noswap(tinfo, info);
404 tswap_siginfo(tinfo, tinfo);
407 /* XXX: we support only POSIX RT signals are used. */
408 /* XXX: find a solution for 64 bit (additional malloced data is needed) */
409 void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo)
411 /* This conversion is used only for the rt_sigqueueinfo syscall,
412 * and so we know that the _rt fields are the valid ones.
414 abi_ulong sival_ptr;
416 __get_user(info->si_signo, &tinfo->si_signo);
417 __get_user(info->si_errno, &tinfo->si_errno);
418 __get_user(info->si_code, &tinfo->si_code);
419 __get_user(info->si_pid, &tinfo->_sifields._rt._pid);
420 __get_user(info->si_uid, &tinfo->_sifields._rt._uid);
421 __get_user(sival_ptr, &tinfo->_sifields._rt._sigval.sival_ptr);
422 info->si_value.sival_ptr = (void *)(long)sival_ptr;
425 static int fatal_signal (int sig)
427 switch (sig) {
428 case TARGET_SIGCHLD:
429 case TARGET_SIGURG:
430 case TARGET_SIGWINCH:
431 /* Ignored by default. */
432 return 0;
433 case TARGET_SIGCONT:
434 case TARGET_SIGSTOP:
435 case TARGET_SIGTSTP:
436 case TARGET_SIGTTIN:
437 case TARGET_SIGTTOU:
438 /* Job control signals. */
439 return 0;
440 default:
441 return 1;
445 /* returns 1 if given signal should dump core if not handled */
446 static int core_dump_signal(int sig)
448 switch (sig) {
449 case TARGET_SIGABRT:
450 case TARGET_SIGFPE:
451 case TARGET_SIGILL:
452 case TARGET_SIGQUIT:
453 case TARGET_SIGSEGV:
454 case TARGET_SIGTRAP:
455 case TARGET_SIGBUS:
456 return (1);
457 default:
458 return (0);
462 void signal_init(void)
464 TaskState *ts = (TaskState *)thread_cpu->opaque;
465 struct sigaction act;
466 struct sigaction oact;
467 int i, j;
468 int host_sig;
470 /* generate signal conversion tables */
471 for(i = 1; i < _NSIG; i++) {
472 if (host_to_target_signal_table[i] == 0)
473 host_to_target_signal_table[i] = i;
475 for(i = 1; i < _NSIG; i++) {
476 j = host_to_target_signal_table[i];
477 target_to_host_signal_table[j] = i;
480 /* Set the signal mask from the host mask. */
481 sigprocmask(0, 0, &ts->signal_mask);
483 /* set all host signal handlers. ALL signals are blocked during
484 the handlers to serialize them. */
485 memset(sigact_table, 0, sizeof(sigact_table));
487 sigfillset(&act.sa_mask);
488 act.sa_flags = SA_SIGINFO;
489 act.sa_sigaction = host_signal_handler;
490 for(i = 1; i <= TARGET_NSIG; i++) {
491 host_sig = target_to_host_signal(i);
492 sigaction(host_sig, NULL, &oact);
493 if (oact.sa_sigaction == (void *)SIG_IGN) {
494 sigact_table[i - 1]._sa_handler = TARGET_SIG_IGN;
495 } else if (oact.sa_sigaction == (void *)SIG_DFL) {
496 sigact_table[i - 1]._sa_handler = TARGET_SIG_DFL;
498 /* If there's already a handler installed then something has
499 gone horribly wrong, so don't even try to handle that case. */
500 /* Install some handlers for our own use. We need at least
501 SIGSEGV and SIGBUS, to detect exceptions. We can not just
502 trap all signals because it affects syscall interrupt
503 behavior. But do trap all default-fatal signals. */
504 if (fatal_signal (i))
505 sigaction(host_sig, &act, NULL);
510 /* abort execution with signal */
511 static void QEMU_NORETURN force_sig(int target_sig)
513 CPUState *cpu = thread_cpu;
514 CPUArchState *env = cpu->env_ptr;
515 TaskState *ts = (TaskState *)cpu->opaque;
516 int host_sig, core_dumped = 0;
517 struct sigaction act;
519 host_sig = target_to_host_signal(target_sig);
520 trace_user_force_sig(env, target_sig, host_sig);
521 gdb_signalled(env, target_sig);
523 /* dump core if supported by target binary format */
524 if (core_dump_signal(target_sig) && (ts->bprm->core_dump != NULL)) {
525 stop_all_tasks();
526 core_dumped =
527 ((*ts->bprm->core_dump)(target_sig, env) == 0);
529 if (core_dumped) {
530 /* we already dumped the core of target process, we don't want
531 * a coredump of qemu itself */
532 struct rlimit nodump;
533 getrlimit(RLIMIT_CORE, &nodump);
534 nodump.rlim_cur=0;
535 setrlimit(RLIMIT_CORE, &nodump);
536 (void) fprintf(stderr, "qemu: uncaught target signal %d (%s) - %s\n",
537 target_sig, strsignal(host_sig), "core dumped" );
540 /* The proper exit code for dying from an uncaught signal is
541 * -<signal>. The kernel doesn't allow exit() or _exit() to pass
542 * a negative value. To get the proper exit code we need to
543 * actually die from an uncaught signal. Here the default signal
544 * handler is installed, we send ourself a signal and we wait for
545 * it to arrive. */
546 sigfillset(&act.sa_mask);
547 act.sa_handler = SIG_DFL;
548 act.sa_flags = 0;
549 sigaction(host_sig, &act, NULL);
551 /* For some reason raise(host_sig) doesn't send the signal when
552 * statically linked on x86-64. */
553 kill(getpid(), host_sig);
555 /* Make sure the signal isn't masked (just reuse the mask inside
556 of act) */
557 sigdelset(&act.sa_mask, host_sig);
558 sigsuspend(&act.sa_mask);
560 /* unreachable */
561 abort();
564 /* queue a signal so that it will be send to the virtual CPU as soon
565 as possible */
566 int queue_signal(CPUArchState *env, int sig, target_siginfo_t *info)
568 CPUState *cpu = ENV_GET_CPU(env);
569 TaskState *ts = cpu->opaque;
571 trace_user_queue_signal(env, sig);
573 /* Currently all callers define siginfo structures which
574 * use the _sifields._sigfault union member, so we can
575 * set the type here. If that changes we should push this
576 * out so the si_type is passed in by callers.
578 info->si_code = deposit32(info->si_code, 16, 16, QEMU_SI_FAULT);
580 ts->sync_signal.info = *info;
581 ts->sync_signal.pending = sig;
582 /* signal that a new signal is pending */
583 atomic_set(&ts->signal_pending, 1);
584 return 1; /* indicates that the signal was queued */
587 #ifndef HAVE_SAFE_SYSCALL
588 static inline void rewind_if_in_safe_syscall(void *puc)
590 /* Default version: never rewind */
592 #endif
594 static void host_signal_handler(int host_signum, siginfo_t *info,
595 void *puc)
597 CPUArchState *env = thread_cpu->env_ptr;
598 CPUState *cpu = ENV_GET_CPU(env);
599 TaskState *ts = cpu->opaque;
601 int sig;
602 target_siginfo_t tinfo;
603 ucontext_t *uc = puc;
604 struct emulated_sigtable *k;
606 /* the CPU emulator uses some host signals to detect exceptions,
607 we forward to it some signals */
608 if ((host_signum == SIGSEGV || host_signum == SIGBUS)
609 && info->si_code > 0) {
610 if (cpu_signal_handler(host_signum, info, puc))
611 return;
614 /* get target signal number */
615 sig = host_to_target_signal(host_signum);
616 if (sig < 1 || sig > TARGET_NSIG)
617 return;
618 trace_user_host_signal(env, host_signum, sig);
620 rewind_if_in_safe_syscall(puc);
622 host_to_target_siginfo_noswap(&tinfo, info);
623 k = &ts->sigtab[sig - 1];
624 k->info = tinfo;
625 k->pending = sig;
626 ts->signal_pending = 1;
628 /* Block host signals until target signal handler entered. We
629 * can't block SIGSEGV or SIGBUS while we're executing guest
630 * code in case the guest code provokes one in the window between
631 * now and it getting out to the main loop. Signals will be
632 * unblocked again in process_pending_signals().
634 sigfillset(&uc->uc_sigmask);
635 sigdelset(&uc->uc_sigmask, SIGSEGV);
636 sigdelset(&uc->uc_sigmask, SIGBUS);
638 /* interrupt the virtual CPU as soon as possible */
639 cpu_exit(thread_cpu);
642 /* do_sigaltstack() returns target values and errnos. */
643 /* compare linux/kernel/signal.c:do_sigaltstack() */
644 abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr, abi_ulong sp)
646 int ret;
647 struct target_sigaltstack oss;
649 /* XXX: test errors */
650 if(uoss_addr)
652 __put_user(target_sigaltstack_used.ss_sp, &oss.ss_sp);
653 __put_user(target_sigaltstack_used.ss_size, &oss.ss_size);
654 __put_user(sas_ss_flags(sp), &oss.ss_flags);
657 if(uss_addr)
659 struct target_sigaltstack *uss;
660 struct target_sigaltstack ss;
661 size_t minstacksize = TARGET_MINSIGSTKSZ;
663 #if defined(TARGET_PPC64)
664 /* ELF V2 for PPC64 has a 4K minimum stack size for signal handlers */
665 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info;
666 if (get_ppc64_abi(image) > 1) {
667 minstacksize = 4096;
669 #endif
671 ret = -TARGET_EFAULT;
672 if (!lock_user_struct(VERIFY_READ, uss, uss_addr, 1)) {
673 goto out;
675 __get_user(ss.ss_sp, &uss->ss_sp);
676 __get_user(ss.ss_size, &uss->ss_size);
677 __get_user(ss.ss_flags, &uss->ss_flags);
678 unlock_user_struct(uss, uss_addr, 0);
680 ret = -TARGET_EPERM;
681 if (on_sig_stack(sp))
682 goto out;
684 ret = -TARGET_EINVAL;
685 if (ss.ss_flags != TARGET_SS_DISABLE
686 && ss.ss_flags != TARGET_SS_ONSTACK
687 && ss.ss_flags != 0)
688 goto out;
690 if (ss.ss_flags == TARGET_SS_DISABLE) {
691 ss.ss_size = 0;
692 ss.ss_sp = 0;
693 } else {
694 ret = -TARGET_ENOMEM;
695 if (ss.ss_size < minstacksize) {
696 goto out;
700 target_sigaltstack_used.ss_sp = ss.ss_sp;
701 target_sigaltstack_used.ss_size = ss.ss_size;
704 if (uoss_addr) {
705 ret = -TARGET_EFAULT;
706 if (copy_to_user(uoss_addr, &oss, sizeof(oss)))
707 goto out;
710 ret = 0;
711 out:
712 return ret;
715 /* do_sigaction() return target values and host errnos */
716 int do_sigaction(int sig, const struct target_sigaction *act,
717 struct target_sigaction *oact)
719 struct target_sigaction *k;
720 struct sigaction act1;
721 int host_sig;
722 int ret = 0;
724 if (sig < 1 || sig > TARGET_NSIG || sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP) {
725 return -TARGET_EINVAL;
728 if (block_signals()) {
729 return -TARGET_ERESTARTSYS;
732 k = &sigact_table[sig - 1];
733 if (oact) {
734 __put_user(k->_sa_handler, &oact->_sa_handler);
735 __put_user(k->sa_flags, &oact->sa_flags);
736 #if !defined(TARGET_MIPS)
737 __put_user(k->sa_restorer, &oact->sa_restorer);
738 #endif
739 /* Not swapped. */
740 oact->sa_mask = k->sa_mask;
742 if (act) {
743 /* FIXME: This is not threadsafe. */
744 __get_user(k->_sa_handler, &act->_sa_handler);
745 __get_user(k->sa_flags, &act->sa_flags);
746 #if !defined(TARGET_MIPS)
747 __get_user(k->sa_restorer, &act->sa_restorer);
748 #endif
749 /* To be swapped in target_to_host_sigset. */
750 k->sa_mask = act->sa_mask;
752 /* we update the host linux signal state */
753 host_sig = target_to_host_signal(sig);
754 if (host_sig != SIGSEGV && host_sig != SIGBUS) {
755 sigfillset(&act1.sa_mask);
756 act1.sa_flags = SA_SIGINFO;
757 if (k->sa_flags & TARGET_SA_RESTART)
758 act1.sa_flags |= SA_RESTART;
759 /* NOTE: it is important to update the host kernel signal
760 ignore state to avoid getting unexpected interrupted
761 syscalls */
762 if (k->_sa_handler == TARGET_SIG_IGN) {
763 act1.sa_sigaction = (void *)SIG_IGN;
764 } else if (k->_sa_handler == TARGET_SIG_DFL) {
765 if (fatal_signal (sig))
766 act1.sa_sigaction = host_signal_handler;
767 else
768 act1.sa_sigaction = (void *)SIG_DFL;
769 } else {
770 act1.sa_sigaction = host_signal_handler;
772 ret = sigaction(host_sig, &act1, NULL);
775 return ret;
778 #if defined(TARGET_I386) && TARGET_ABI_BITS == 32
780 /* from the Linux kernel */
782 struct target_fpreg {
783 uint16_t significand[4];
784 uint16_t exponent;
787 struct target_fpxreg {
788 uint16_t significand[4];
789 uint16_t exponent;
790 uint16_t padding[3];
793 struct target_xmmreg {
794 abi_ulong element[4];
797 struct target_fpstate {
798 /* Regular FPU environment */
799 abi_ulong cw;
800 abi_ulong sw;
801 abi_ulong tag;
802 abi_ulong ipoff;
803 abi_ulong cssel;
804 abi_ulong dataoff;
805 abi_ulong datasel;
806 struct target_fpreg _st[8];
807 uint16_t status;
808 uint16_t magic; /* 0xffff = regular FPU data only */
810 /* FXSR FPU environment */
811 abi_ulong _fxsr_env[6]; /* FXSR FPU env is ignored */
812 abi_ulong mxcsr;
813 abi_ulong reserved;
814 struct target_fpxreg _fxsr_st[8]; /* FXSR FPU reg data is ignored */
815 struct target_xmmreg _xmm[8];
816 abi_ulong padding[56];
819 #define X86_FXSR_MAGIC 0x0000
821 struct target_sigcontext {
822 uint16_t gs, __gsh;
823 uint16_t fs, __fsh;
824 uint16_t es, __esh;
825 uint16_t ds, __dsh;
826 abi_ulong edi;
827 abi_ulong esi;
828 abi_ulong ebp;
829 abi_ulong esp;
830 abi_ulong ebx;
831 abi_ulong edx;
832 abi_ulong ecx;
833 abi_ulong eax;
834 abi_ulong trapno;
835 abi_ulong err;
836 abi_ulong eip;
837 uint16_t cs, __csh;
838 abi_ulong eflags;
839 abi_ulong esp_at_signal;
840 uint16_t ss, __ssh;
841 abi_ulong fpstate; /* pointer */
842 abi_ulong oldmask;
843 abi_ulong cr2;
846 struct target_ucontext {
847 abi_ulong tuc_flags;
848 abi_ulong tuc_link;
849 target_stack_t tuc_stack;
850 struct target_sigcontext tuc_mcontext;
851 target_sigset_t tuc_sigmask; /* mask last for extensibility */
854 struct sigframe
856 abi_ulong pretcode;
857 int sig;
858 struct target_sigcontext sc;
859 struct target_fpstate fpstate;
860 abi_ulong extramask[TARGET_NSIG_WORDS-1];
861 char retcode[8];
864 struct rt_sigframe
866 abi_ulong pretcode;
867 int sig;
868 abi_ulong pinfo;
869 abi_ulong puc;
870 struct target_siginfo info;
871 struct target_ucontext uc;
872 struct target_fpstate fpstate;
873 char retcode[8];
877 * Set up a signal frame.
880 /* XXX: save x87 state */
881 static void setup_sigcontext(struct target_sigcontext *sc,
882 struct target_fpstate *fpstate, CPUX86State *env, abi_ulong mask,
883 abi_ulong fpstate_addr)
885 CPUState *cs = CPU(x86_env_get_cpu(env));
886 uint16_t magic;
888 /* already locked in setup_frame() */
889 __put_user(env->segs[R_GS].selector, (unsigned int *)&sc->gs);
890 __put_user(env->segs[R_FS].selector, (unsigned int *)&sc->fs);
891 __put_user(env->segs[R_ES].selector, (unsigned int *)&sc->es);
892 __put_user(env->segs[R_DS].selector, (unsigned int *)&sc->ds);
893 __put_user(env->regs[R_EDI], &sc->edi);
894 __put_user(env->regs[R_ESI], &sc->esi);
895 __put_user(env->regs[R_EBP], &sc->ebp);
896 __put_user(env->regs[R_ESP], &sc->esp);
897 __put_user(env->regs[R_EBX], &sc->ebx);
898 __put_user(env->regs[R_EDX], &sc->edx);
899 __put_user(env->regs[R_ECX], &sc->ecx);
900 __put_user(env->regs[R_EAX], &sc->eax);
901 __put_user(cs->exception_index, &sc->trapno);
902 __put_user(env->error_code, &sc->err);
903 __put_user(env->eip, &sc->eip);
904 __put_user(env->segs[R_CS].selector, (unsigned int *)&sc->cs);
905 __put_user(env->eflags, &sc->eflags);
906 __put_user(env->regs[R_ESP], &sc->esp_at_signal);
907 __put_user(env->segs[R_SS].selector, (unsigned int *)&sc->ss);
909 cpu_x86_fsave(env, fpstate_addr, 1);
910 fpstate->status = fpstate->sw;
911 magic = 0xffff;
912 __put_user(magic, &fpstate->magic);
913 __put_user(fpstate_addr, &sc->fpstate);
915 /* non-iBCS2 extensions.. */
916 __put_user(mask, &sc->oldmask);
917 __put_user(env->cr[2], &sc->cr2);
921 * Determine which stack to use..
924 static inline abi_ulong
925 get_sigframe(struct target_sigaction *ka, CPUX86State *env, size_t frame_size)
927 unsigned long esp;
929 /* Default to using normal stack */
930 esp = env->regs[R_ESP];
931 /* This is the X/Open sanctioned signal stack switching. */
932 if (ka->sa_flags & TARGET_SA_ONSTACK) {
933 if (sas_ss_flags(esp) == 0) {
934 esp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
936 } else {
938 /* This is the legacy signal stack switching. */
939 if ((env->segs[R_SS].selector & 0xffff) != __USER_DS &&
940 !(ka->sa_flags & TARGET_SA_RESTORER) &&
941 ka->sa_restorer) {
942 esp = (unsigned long) ka->sa_restorer;
945 return (esp - frame_size) & -8ul;
948 /* compare linux/arch/i386/kernel/signal.c:setup_frame() */
949 static void setup_frame(int sig, struct target_sigaction *ka,
950 target_sigset_t *set, CPUX86State *env)
952 abi_ulong frame_addr;
953 struct sigframe *frame;
954 int i;
956 frame_addr = get_sigframe(ka, env, sizeof(*frame));
957 trace_user_setup_frame(env, frame_addr);
959 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
960 goto give_sigsegv;
962 __put_user(sig, &frame->sig);
964 setup_sigcontext(&frame->sc, &frame->fpstate, env, set->sig[0],
965 frame_addr + offsetof(struct sigframe, fpstate));
967 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
968 __put_user(set->sig[i], &frame->extramask[i - 1]);
971 /* Set up to return from userspace. If provided, use a stub
972 already in userspace. */
973 if (ka->sa_flags & TARGET_SA_RESTORER) {
974 __put_user(ka->sa_restorer, &frame->pretcode);
975 } else {
976 uint16_t val16;
977 abi_ulong retcode_addr;
978 retcode_addr = frame_addr + offsetof(struct sigframe, retcode);
979 __put_user(retcode_addr, &frame->pretcode);
980 /* This is popl %eax ; movl $,%eax ; int $0x80 */
981 val16 = 0xb858;
982 __put_user(val16, (uint16_t *)(frame->retcode+0));
983 __put_user(TARGET_NR_sigreturn, (int *)(frame->retcode+2));
984 val16 = 0x80cd;
985 __put_user(val16, (uint16_t *)(frame->retcode+6));
989 /* Set up registers for signal handler */
990 env->regs[R_ESP] = frame_addr;
991 env->eip = ka->_sa_handler;
993 cpu_x86_load_seg(env, R_DS, __USER_DS);
994 cpu_x86_load_seg(env, R_ES, __USER_DS);
995 cpu_x86_load_seg(env, R_SS, __USER_DS);
996 cpu_x86_load_seg(env, R_CS, __USER_CS);
997 env->eflags &= ~TF_MASK;
999 unlock_user_struct(frame, frame_addr, 1);
1001 return;
1003 give_sigsegv:
1004 if (sig == TARGET_SIGSEGV) {
1005 ka->_sa_handler = TARGET_SIG_DFL;
1007 force_sig(TARGET_SIGSEGV /* , current */);
1010 /* compare linux/arch/i386/kernel/signal.c:setup_rt_frame() */
1011 static void setup_rt_frame(int sig, struct target_sigaction *ka,
1012 target_siginfo_t *info,
1013 target_sigset_t *set, CPUX86State *env)
1015 abi_ulong frame_addr, addr;
1016 struct rt_sigframe *frame;
1017 int i;
1019 frame_addr = get_sigframe(ka, env, sizeof(*frame));
1020 trace_user_setup_rt_frame(env, frame_addr);
1022 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
1023 goto give_sigsegv;
1025 __put_user(sig, &frame->sig);
1026 addr = frame_addr + offsetof(struct rt_sigframe, info);
1027 __put_user(addr, &frame->pinfo);
1028 addr = frame_addr + offsetof(struct rt_sigframe, uc);
1029 __put_user(addr, &frame->puc);
1030 tswap_siginfo(&frame->info, info);
1032 /* Create the ucontext. */
1033 __put_user(0, &frame->uc.tuc_flags);
1034 __put_user(0, &frame->uc.tuc_link);
1035 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
1036 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
1037 &frame->uc.tuc_stack.ss_flags);
1038 __put_user(target_sigaltstack_used.ss_size,
1039 &frame->uc.tuc_stack.ss_size);
1040 setup_sigcontext(&frame->uc.tuc_mcontext, &frame->fpstate, env,
1041 set->sig[0], frame_addr + offsetof(struct rt_sigframe, fpstate));
1043 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
1044 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
1047 /* Set up to return from userspace. If provided, use a stub
1048 already in userspace. */
1049 if (ka->sa_flags & TARGET_SA_RESTORER) {
1050 __put_user(ka->sa_restorer, &frame->pretcode);
1051 } else {
1052 uint16_t val16;
1053 addr = frame_addr + offsetof(struct rt_sigframe, retcode);
1054 __put_user(addr, &frame->pretcode);
1055 /* This is movl $,%eax ; int $0x80 */
1056 __put_user(0xb8, (char *)(frame->retcode+0));
1057 __put_user(TARGET_NR_rt_sigreturn, (int *)(frame->retcode+1));
1058 val16 = 0x80cd;
1059 __put_user(val16, (uint16_t *)(frame->retcode+5));
1062 /* Set up registers for signal handler */
1063 env->regs[R_ESP] = frame_addr;
1064 env->eip = ka->_sa_handler;
1066 cpu_x86_load_seg(env, R_DS, __USER_DS);
1067 cpu_x86_load_seg(env, R_ES, __USER_DS);
1068 cpu_x86_load_seg(env, R_SS, __USER_DS);
1069 cpu_x86_load_seg(env, R_CS, __USER_CS);
1070 env->eflags &= ~TF_MASK;
1072 unlock_user_struct(frame, frame_addr, 1);
1074 return;
1076 give_sigsegv:
1077 if (sig == TARGET_SIGSEGV) {
1078 ka->_sa_handler = TARGET_SIG_DFL;
1080 force_sig(TARGET_SIGSEGV /* , current */);
1083 static int
1084 restore_sigcontext(CPUX86State *env, struct target_sigcontext *sc)
1086 unsigned int err = 0;
1087 abi_ulong fpstate_addr;
1088 unsigned int tmpflags;
1090 cpu_x86_load_seg(env, R_GS, tswap16(sc->gs));
1091 cpu_x86_load_seg(env, R_FS, tswap16(sc->fs));
1092 cpu_x86_load_seg(env, R_ES, tswap16(sc->es));
1093 cpu_x86_load_seg(env, R_DS, tswap16(sc->ds));
1095 env->regs[R_EDI] = tswapl(sc->edi);
1096 env->regs[R_ESI] = tswapl(sc->esi);
1097 env->regs[R_EBP] = tswapl(sc->ebp);
1098 env->regs[R_ESP] = tswapl(sc->esp);
1099 env->regs[R_EBX] = tswapl(sc->ebx);
1100 env->regs[R_EDX] = tswapl(sc->edx);
1101 env->regs[R_ECX] = tswapl(sc->ecx);
1102 env->regs[R_EAX] = tswapl(sc->eax);
1103 env->eip = tswapl(sc->eip);
1105 cpu_x86_load_seg(env, R_CS, lduw_p(&sc->cs) | 3);
1106 cpu_x86_load_seg(env, R_SS, lduw_p(&sc->ss) | 3);
1108 tmpflags = tswapl(sc->eflags);
1109 env->eflags = (env->eflags & ~0x40DD5) | (tmpflags & 0x40DD5);
1110 // regs->orig_eax = -1; /* disable syscall checks */
1112 fpstate_addr = tswapl(sc->fpstate);
1113 if (fpstate_addr != 0) {
1114 if (!access_ok(VERIFY_READ, fpstate_addr,
1115 sizeof(struct target_fpstate)))
1116 goto badframe;
1117 cpu_x86_frstor(env, fpstate_addr, 1);
1120 return err;
1121 badframe:
1122 return 1;
1125 long do_sigreturn(CPUX86State *env)
1127 struct sigframe *frame;
1128 abi_ulong frame_addr = env->regs[R_ESP] - 8;
1129 target_sigset_t target_set;
1130 sigset_t set;
1131 int i;
1133 trace_user_do_sigreturn(env, frame_addr);
1134 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
1135 goto badframe;
1136 /* set blocked signals */
1137 __get_user(target_set.sig[0], &frame->sc.oldmask);
1138 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1139 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
1142 target_to_host_sigset_internal(&set, &target_set);
1143 set_sigmask(&set);
1145 /* restore registers */
1146 if (restore_sigcontext(env, &frame->sc))
1147 goto badframe;
1148 unlock_user_struct(frame, frame_addr, 0);
1149 return -TARGET_QEMU_ESIGRETURN;
1151 badframe:
1152 unlock_user_struct(frame, frame_addr, 0);
1153 force_sig(TARGET_SIGSEGV);
1154 return 0;
1157 long do_rt_sigreturn(CPUX86State *env)
1159 abi_ulong frame_addr;
1160 struct rt_sigframe *frame;
1161 sigset_t set;
1163 frame_addr = env->regs[R_ESP] - 4;
1164 trace_user_do_rt_sigreturn(env, frame_addr);
1165 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
1166 goto badframe;
1167 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
1168 set_sigmask(&set);
1170 if (restore_sigcontext(env, &frame->uc.tuc_mcontext)) {
1171 goto badframe;
1174 if (do_sigaltstack(frame_addr + offsetof(struct rt_sigframe, uc.tuc_stack), 0,
1175 get_sp_from_cpustate(env)) == -EFAULT) {
1176 goto badframe;
1179 unlock_user_struct(frame, frame_addr, 0);
1180 return -TARGET_QEMU_ESIGRETURN;
1182 badframe:
1183 unlock_user_struct(frame, frame_addr, 0);
1184 force_sig(TARGET_SIGSEGV);
1185 return 0;
1188 #elif defined(TARGET_AARCH64)
1190 struct target_sigcontext {
1191 uint64_t fault_address;
1192 /* AArch64 registers */
1193 uint64_t regs[31];
1194 uint64_t sp;
1195 uint64_t pc;
1196 uint64_t pstate;
1197 /* 4K reserved for FP/SIMD state and future expansion */
1198 char __reserved[4096] __attribute__((__aligned__(16)));
1201 struct target_ucontext {
1202 abi_ulong tuc_flags;
1203 abi_ulong tuc_link;
1204 target_stack_t tuc_stack;
1205 target_sigset_t tuc_sigmask;
1206 /* glibc uses a 1024-bit sigset_t */
1207 char __unused[1024 / 8 - sizeof(target_sigset_t)];
1208 /* last for future expansion */
1209 struct target_sigcontext tuc_mcontext;
1213 * Header to be used at the beginning of structures extending the user
1214 * context. Such structures must be placed after the rt_sigframe on the stack
1215 * and be 16-byte aligned. The last structure must be a dummy one with the
1216 * magic and size set to 0.
1218 struct target_aarch64_ctx {
1219 uint32_t magic;
1220 uint32_t size;
1223 #define TARGET_FPSIMD_MAGIC 0x46508001
1225 struct target_fpsimd_context {
1226 struct target_aarch64_ctx head;
1227 uint32_t fpsr;
1228 uint32_t fpcr;
1229 uint64_t vregs[32 * 2]; /* really uint128_t vregs[32] */
1233 * Auxiliary context saved in the sigcontext.__reserved array. Not exported to
1234 * user space as it will change with the addition of new context. User space
1235 * should check the magic/size information.
1237 struct target_aux_context {
1238 struct target_fpsimd_context fpsimd;
1239 /* additional context to be added before "end" */
1240 struct target_aarch64_ctx end;
1243 struct target_rt_sigframe {
1244 struct target_siginfo info;
1245 struct target_ucontext uc;
1246 uint64_t fp;
1247 uint64_t lr;
1248 uint32_t tramp[2];
1251 static int target_setup_sigframe(struct target_rt_sigframe *sf,
1252 CPUARMState *env, target_sigset_t *set)
1254 int i;
1255 struct target_aux_context *aux =
1256 (struct target_aux_context *)sf->uc.tuc_mcontext.__reserved;
1258 /* set up the stack frame for unwinding */
1259 __put_user(env->xregs[29], &sf->fp);
1260 __put_user(env->xregs[30], &sf->lr);
1262 for (i = 0; i < 31; i++) {
1263 __put_user(env->xregs[i], &sf->uc.tuc_mcontext.regs[i]);
1265 __put_user(env->xregs[31], &sf->uc.tuc_mcontext.sp);
1266 __put_user(env->pc, &sf->uc.tuc_mcontext.pc);
1267 __put_user(pstate_read(env), &sf->uc.tuc_mcontext.pstate);
1269 __put_user(env->exception.vaddress, &sf->uc.tuc_mcontext.fault_address);
1271 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
1272 __put_user(set->sig[i], &sf->uc.tuc_sigmask.sig[i]);
1275 for (i = 0; i < 32; i++) {
1276 #ifdef TARGET_WORDS_BIGENDIAN
1277 __put_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2 + 1]);
1278 __put_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2]);
1279 #else
1280 __put_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2]);
1281 __put_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2 + 1]);
1282 #endif
1284 __put_user(vfp_get_fpsr(env), &aux->fpsimd.fpsr);
1285 __put_user(vfp_get_fpcr(env), &aux->fpsimd.fpcr);
1286 __put_user(TARGET_FPSIMD_MAGIC, &aux->fpsimd.head.magic);
1287 __put_user(sizeof(struct target_fpsimd_context),
1288 &aux->fpsimd.head.size);
1290 /* set the "end" magic */
1291 __put_user(0, &aux->end.magic);
1292 __put_user(0, &aux->end.size);
1294 return 0;
1297 static int target_restore_sigframe(CPUARMState *env,
1298 struct target_rt_sigframe *sf)
1300 sigset_t set;
1301 int i;
1302 struct target_aux_context *aux =
1303 (struct target_aux_context *)sf->uc.tuc_mcontext.__reserved;
1304 uint32_t magic, size, fpsr, fpcr;
1305 uint64_t pstate;
1307 target_to_host_sigset(&set, &sf->uc.tuc_sigmask);
1308 set_sigmask(&set);
1310 for (i = 0; i < 31; i++) {
1311 __get_user(env->xregs[i], &sf->uc.tuc_mcontext.regs[i]);
1314 __get_user(env->xregs[31], &sf->uc.tuc_mcontext.sp);
1315 __get_user(env->pc, &sf->uc.tuc_mcontext.pc);
1316 __get_user(pstate, &sf->uc.tuc_mcontext.pstate);
1317 pstate_write(env, pstate);
1319 __get_user(magic, &aux->fpsimd.head.magic);
1320 __get_user(size, &aux->fpsimd.head.size);
1322 if (magic != TARGET_FPSIMD_MAGIC
1323 || size != sizeof(struct target_fpsimd_context)) {
1324 return 1;
1327 for (i = 0; i < 32; i++) {
1328 #ifdef TARGET_WORDS_BIGENDIAN
1329 __get_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2 + 1]);
1330 __get_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2]);
1331 #else
1332 __get_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2]);
1333 __get_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2 + 1]);
1334 #endif
1336 __get_user(fpsr, &aux->fpsimd.fpsr);
1337 vfp_set_fpsr(env, fpsr);
1338 __get_user(fpcr, &aux->fpsimd.fpcr);
1339 vfp_set_fpcr(env, fpcr);
1341 return 0;
1344 static abi_ulong get_sigframe(struct target_sigaction *ka, CPUARMState *env)
1346 abi_ulong sp;
1348 sp = env->xregs[31];
1351 * This is the X/Open sanctioned signal stack switching.
1353 if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) {
1354 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
1357 sp = (sp - sizeof(struct target_rt_sigframe)) & ~15;
1359 return sp;
1362 static void target_setup_frame(int usig, struct target_sigaction *ka,
1363 target_siginfo_t *info, target_sigset_t *set,
1364 CPUARMState *env)
1366 struct target_rt_sigframe *frame;
1367 abi_ulong frame_addr, return_addr;
1369 frame_addr = get_sigframe(ka, env);
1370 trace_user_setup_frame(env, frame_addr);
1371 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
1372 goto give_sigsegv;
1375 __put_user(0, &frame->uc.tuc_flags);
1376 __put_user(0, &frame->uc.tuc_link);
1378 __put_user(target_sigaltstack_used.ss_sp,
1379 &frame->uc.tuc_stack.ss_sp);
1380 __put_user(sas_ss_flags(env->xregs[31]),
1381 &frame->uc.tuc_stack.ss_flags);
1382 __put_user(target_sigaltstack_used.ss_size,
1383 &frame->uc.tuc_stack.ss_size);
1384 target_setup_sigframe(frame, env, set);
1385 if (ka->sa_flags & TARGET_SA_RESTORER) {
1386 return_addr = ka->sa_restorer;
1387 } else {
1388 /* mov x8,#__NR_rt_sigreturn; svc #0 */
1389 __put_user(0xd2801168, &frame->tramp[0]);
1390 __put_user(0xd4000001, &frame->tramp[1]);
1391 return_addr = frame_addr + offsetof(struct target_rt_sigframe, tramp);
1393 env->xregs[0] = usig;
1394 env->xregs[31] = frame_addr;
1395 env->xregs[29] = env->xregs[31] + offsetof(struct target_rt_sigframe, fp);
1396 env->pc = ka->_sa_handler;
1397 env->xregs[30] = return_addr;
1398 if (info) {
1399 tswap_siginfo(&frame->info, info);
1400 env->xregs[1] = frame_addr + offsetof(struct target_rt_sigframe, info);
1401 env->xregs[2] = frame_addr + offsetof(struct target_rt_sigframe, uc);
1404 unlock_user_struct(frame, frame_addr, 1);
1405 return;
1407 give_sigsegv:
1408 unlock_user_struct(frame, frame_addr, 1);
1409 force_sig(TARGET_SIGSEGV);
1412 static void setup_rt_frame(int sig, struct target_sigaction *ka,
1413 target_siginfo_t *info, target_sigset_t *set,
1414 CPUARMState *env)
1416 target_setup_frame(sig, ka, info, set, env);
1419 static void setup_frame(int sig, struct target_sigaction *ka,
1420 target_sigset_t *set, CPUARMState *env)
1422 target_setup_frame(sig, ka, 0, set, env);
1425 long do_rt_sigreturn(CPUARMState *env)
1427 struct target_rt_sigframe *frame = NULL;
1428 abi_ulong frame_addr = env->xregs[31];
1430 trace_user_do_rt_sigreturn(env, frame_addr);
1431 if (frame_addr & 15) {
1432 goto badframe;
1435 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
1436 goto badframe;
1439 if (target_restore_sigframe(env, frame)) {
1440 goto badframe;
1443 if (do_sigaltstack(frame_addr +
1444 offsetof(struct target_rt_sigframe, uc.tuc_stack),
1445 0, get_sp_from_cpustate(env)) == -EFAULT) {
1446 goto badframe;
1449 unlock_user_struct(frame, frame_addr, 0);
1450 return -TARGET_QEMU_ESIGRETURN;
1452 badframe:
1453 unlock_user_struct(frame, frame_addr, 0);
1454 force_sig(TARGET_SIGSEGV);
1455 return 0;
1458 long do_sigreturn(CPUARMState *env)
1460 return do_rt_sigreturn(env);
1463 #elif defined(TARGET_ARM)
1465 struct target_sigcontext {
1466 abi_ulong trap_no;
1467 abi_ulong error_code;
1468 abi_ulong oldmask;
1469 abi_ulong arm_r0;
1470 abi_ulong arm_r1;
1471 abi_ulong arm_r2;
1472 abi_ulong arm_r3;
1473 abi_ulong arm_r4;
1474 abi_ulong arm_r5;
1475 abi_ulong arm_r6;
1476 abi_ulong arm_r7;
1477 abi_ulong arm_r8;
1478 abi_ulong arm_r9;
1479 abi_ulong arm_r10;
1480 abi_ulong arm_fp;
1481 abi_ulong arm_ip;
1482 abi_ulong arm_sp;
1483 abi_ulong arm_lr;
1484 abi_ulong arm_pc;
1485 abi_ulong arm_cpsr;
1486 abi_ulong fault_address;
1489 struct target_ucontext_v1 {
1490 abi_ulong tuc_flags;
1491 abi_ulong tuc_link;
1492 target_stack_t tuc_stack;
1493 struct target_sigcontext tuc_mcontext;
1494 target_sigset_t tuc_sigmask; /* mask last for extensibility */
1497 struct target_ucontext_v2 {
1498 abi_ulong tuc_flags;
1499 abi_ulong tuc_link;
1500 target_stack_t tuc_stack;
1501 struct target_sigcontext tuc_mcontext;
1502 target_sigset_t tuc_sigmask; /* mask last for extensibility */
1503 char __unused[128 - sizeof(target_sigset_t)];
1504 abi_ulong tuc_regspace[128] __attribute__((__aligned__(8)));
1507 struct target_user_vfp {
1508 uint64_t fpregs[32];
1509 abi_ulong fpscr;
1512 struct target_user_vfp_exc {
1513 abi_ulong fpexc;
1514 abi_ulong fpinst;
1515 abi_ulong fpinst2;
1518 struct target_vfp_sigframe {
1519 abi_ulong magic;
1520 abi_ulong size;
1521 struct target_user_vfp ufp;
1522 struct target_user_vfp_exc ufp_exc;
1523 } __attribute__((__aligned__(8)));
1525 struct target_iwmmxt_sigframe {
1526 abi_ulong magic;
1527 abi_ulong size;
1528 uint64_t regs[16];
1529 /* Note that not all the coprocessor control registers are stored here */
1530 uint32_t wcssf;
1531 uint32_t wcasf;
1532 uint32_t wcgr0;
1533 uint32_t wcgr1;
1534 uint32_t wcgr2;
1535 uint32_t wcgr3;
1536 } __attribute__((__aligned__(8)));
1538 #define TARGET_VFP_MAGIC 0x56465001
1539 #define TARGET_IWMMXT_MAGIC 0x12ef842a
1541 struct sigframe_v1
1543 struct target_sigcontext sc;
1544 abi_ulong extramask[TARGET_NSIG_WORDS-1];
1545 abi_ulong retcode;
1548 struct sigframe_v2
1550 struct target_ucontext_v2 uc;
1551 abi_ulong retcode;
1554 struct rt_sigframe_v1
1556 abi_ulong pinfo;
1557 abi_ulong puc;
1558 struct target_siginfo info;
1559 struct target_ucontext_v1 uc;
1560 abi_ulong retcode;
1563 struct rt_sigframe_v2
1565 struct target_siginfo info;
1566 struct target_ucontext_v2 uc;
1567 abi_ulong retcode;
1570 #define TARGET_CONFIG_CPU_32 1
1573 * For ARM syscalls, we encode the syscall number into the instruction.
1575 #define SWI_SYS_SIGRETURN (0xef000000|(TARGET_NR_sigreturn + ARM_SYSCALL_BASE))
1576 #define SWI_SYS_RT_SIGRETURN (0xef000000|(TARGET_NR_rt_sigreturn + ARM_SYSCALL_BASE))
1579 * For Thumb syscalls, we pass the syscall number via r7. We therefore
1580 * need two 16-bit instructions.
1582 #define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_sigreturn))
1583 #define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_rt_sigreturn))
1585 static const abi_ulong retcodes[4] = {
1586 SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN,
1587 SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN
1591 static inline int valid_user_regs(CPUARMState *regs)
1593 return 1;
1596 static void
1597 setup_sigcontext(struct target_sigcontext *sc, /*struct _fpstate *fpstate,*/
1598 CPUARMState *env, abi_ulong mask)
1600 __put_user(env->regs[0], &sc->arm_r0);
1601 __put_user(env->regs[1], &sc->arm_r1);
1602 __put_user(env->regs[2], &sc->arm_r2);
1603 __put_user(env->regs[3], &sc->arm_r3);
1604 __put_user(env->regs[4], &sc->arm_r4);
1605 __put_user(env->regs[5], &sc->arm_r5);
1606 __put_user(env->regs[6], &sc->arm_r6);
1607 __put_user(env->regs[7], &sc->arm_r7);
1608 __put_user(env->regs[8], &sc->arm_r8);
1609 __put_user(env->regs[9], &sc->arm_r9);
1610 __put_user(env->regs[10], &sc->arm_r10);
1611 __put_user(env->regs[11], &sc->arm_fp);
1612 __put_user(env->regs[12], &sc->arm_ip);
1613 __put_user(env->regs[13], &sc->arm_sp);
1614 __put_user(env->regs[14], &sc->arm_lr);
1615 __put_user(env->regs[15], &sc->arm_pc);
1616 #ifdef TARGET_CONFIG_CPU_32
1617 __put_user(cpsr_read(env), &sc->arm_cpsr);
1618 #endif
1620 __put_user(/* current->thread.trap_no */ 0, &sc->trap_no);
1621 __put_user(/* current->thread.error_code */ 0, &sc->error_code);
1622 __put_user(/* current->thread.address */ 0, &sc->fault_address);
1623 __put_user(mask, &sc->oldmask);
1626 static inline abi_ulong
1627 get_sigframe(struct target_sigaction *ka, CPUARMState *regs, int framesize)
1629 unsigned long sp = regs->regs[13];
1632 * This is the X/Open sanctioned signal stack switching.
1634 if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) {
1635 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
1638 * ATPCS B01 mandates 8-byte alignment
1640 return (sp - framesize) & ~7;
1643 static void
1644 setup_return(CPUARMState *env, struct target_sigaction *ka,
1645 abi_ulong *rc, abi_ulong frame_addr, int usig, abi_ulong rc_addr)
1647 abi_ulong handler = ka->_sa_handler;
1648 abi_ulong retcode;
1649 int thumb = handler & 1;
1650 uint32_t cpsr = cpsr_read(env);
1652 cpsr &= ~CPSR_IT;
1653 if (thumb) {
1654 cpsr |= CPSR_T;
1655 } else {
1656 cpsr &= ~CPSR_T;
1659 if (ka->sa_flags & TARGET_SA_RESTORER) {
1660 retcode = ka->sa_restorer;
1661 } else {
1662 unsigned int idx = thumb;
1664 if (ka->sa_flags & TARGET_SA_SIGINFO) {
1665 idx += 2;
1668 __put_user(retcodes[idx], rc);
1670 retcode = rc_addr + thumb;
1673 env->regs[0] = usig;
1674 env->regs[13] = frame_addr;
1675 env->regs[14] = retcode;
1676 env->regs[15] = handler & (thumb ? ~1 : ~3);
1677 cpsr_write(env, cpsr, CPSR_IT | CPSR_T, CPSRWriteByInstr);
1680 static abi_ulong *setup_sigframe_v2_vfp(abi_ulong *regspace, CPUARMState *env)
1682 int i;
1683 struct target_vfp_sigframe *vfpframe;
1684 vfpframe = (struct target_vfp_sigframe *)regspace;
1685 __put_user(TARGET_VFP_MAGIC, &vfpframe->magic);
1686 __put_user(sizeof(*vfpframe), &vfpframe->size);
1687 for (i = 0; i < 32; i++) {
1688 __put_user(float64_val(env->vfp.regs[i]), &vfpframe->ufp.fpregs[i]);
1690 __put_user(vfp_get_fpscr(env), &vfpframe->ufp.fpscr);
1691 __put_user(env->vfp.xregs[ARM_VFP_FPEXC], &vfpframe->ufp_exc.fpexc);
1692 __put_user(env->vfp.xregs[ARM_VFP_FPINST], &vfpframe->ufp_exc.fpinst);
1693 __put_user(env->vfp.xregs[ARM_VFP_FPINST2], &vfpframe->ufp_exc.fpinst2);
1694 return (abi_ulong*)(vfpframe+1);
1697 static abi_ulong *setup_sigframe_v2_iwmmxt(abi_ulong *regspace,
1698 CPUARMState *env)
1700 int i;
1701 struct target_iwmmxt_sigframe *iwmmxtframe;
1702 iwmmxtframe = (struct target_iwmmxt_sigframe *)regspace;
1703 __put_user(TARGET_IWMMXT_MAGIC, &iwmmxtframe->magic);
1704 __put_user(sizeof(*iwmmxtframe), &iwmmxtframe->size);
1705 for (i = 0; i < 16; i++) {
1706 __put_user(env->iwmmxt.regs[i], &iwmmxtframe->regs[i]);
1708 __put_user(env->vfp.xregs[ARM_IWMMXT_wCSSF], &iwmmxtframe->wcssf);
1709 __put_user(env->vfp.xregs[ARM_IWMMXT_wCASF], &iwmmxtframe->wcssf);
1710 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR0], &iwmmxtframe->wcgr0);
1711 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR1], &iwmmxtframe->wcgr1);
1712 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR2], &iwmmxtframe->wcgr2);
1713 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR3], &iwmmxtframe->wcgr3);
1714 return (abi_ulong*)(iwmmxtframe+1);
1717 static void setup_sigframe_v2(struct target_ucontext_v2 *uc,
1718 target_sigset_t *set, CPUARMState *env)
1720 struct target_sigaltstack stack;
1721 int i;
1722 abi_ulong *regspace;
1724 /* Clear all the bits of the ucontext we don't use. */
1725 memset(uc, 0, offsetof(struct target_ucontext_v2, tuc_mcontext));
1727 memset(&stack, 0, sizeof(stack));
1728 __put_user(target_sigaltstack_used.ss_sp, &stack.ss_sp);
1729 __put_user(target_sigaltstack_used.ss_size, &stack.ss_size);
1730 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &stack.ss_flags);
1731 memcpy(&uc->tuc_stack, &stack, sizeof(stack));
1733 setup_sigcontext(&uc->tuc_mcontext, env, set->sig[0]);
1734 /* Save coprocessor signal frame. */
1735 regspace = uc->tuc_regspace;
1736 if (arm_feature(env, ARM_FEATURE_VFP)) {
1737 regspace = setup_sigframe_v2_vfp(regspace, env);
1739 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
1740 regspace = setup_sigframe_v2_iwmmxt(regspace, env);
1743 /* Write terminating magic word */
1744 __put_user(0, regspace);
1746 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
1747 __put_user(set->sig[i], &uc->tuc_sigmask.sig[i]);
1751 /* compare linux/arch/arm/kernel/signal.c:setup_frame() */
1752 static void setup_frame_v1(int usig, struct target_sigaction *ka,
1753 target_sigset_t *set, CPUARMState *regs)
1755 struct sigframe_v1 *frame;
1756 abi_ulong frame_addr = get_sigframe(ka, regs, sizeof(*frame));
1757 int i;
1759 trace_user_setup_frame(regs, frame_addr);
1760 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
1761 return;
1764 setup_sigcontext(&frame->sc, regs, set->sig[0]);
1766 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1767 __put_user(set->sig[i], &frame->extramask[i - 1]);
1770 setup_return(regs, ka, &frame->retcode, frame_addr, usig,
1771 frame_addr + offsetof(struct sigframe_v1, retcode));
1773 unlock_user_struct(frame, frame_addr, 1);
1776 static void setup_frame_v2(int usig, struct target_sigaction *ka,
1777 target_sigset_t *set, CPUARMState *regs)
1779 struct sigframe_v2 *frame;
1780 abi_ulong frame_addr = get_sigframe(ka, regs, sizeof(*frame));
1782 trace_user_setup_frame(regs, frame_addr);
1783 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
1784 return;
1787 setup_sigframe_v2(&frame->uc, set, regs);
1789 setup_return(regs, ka, &frame->retcode, frame_addr, usig,
1790 frame_addr + offsetof(struct sigframe_v2, retcode));
1792 unlock_user_struct(frame, frame_addr, 1);
1795 static void setup_frame(int usig, struct target_sigaction *ka,
1796 target_sigset_t *set, CPUARMState *regs)
1798 if (get_osversion() >= 0x020612) {
1799 setup_frame_v2(usig, ka, set, regs);
1800 } else {
1801 setup_frame_v1(usig, ka, set, regs);
1805 /* compare linux/arch/arm/kernel/signal.c:setup_rt_frame() */
1806 static void setup_rt_frame_v1(int usig, struct target_sigaction *ka,
1807 target_siginfo_t *info,
1808 target_sigset_t *set, CPUARMState *env)
1810 struct rt_sigframe_v1 *frame;
1811 abi_ulong frame_addr = get_sigframe(ka, env, sizeof(*frame));
1812 struct target_sigaltstack stack;
1813 int i;
1814 abi_ulong info_addr, uc_addr;
1816 trace_user_setup_rt_frame(env, frame_addr);
1817 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
1818 return /* 1 */;
1821 info_addr = frame_addr + offsetof(struct rt_sigframe_v1, info);
1822 __put_user(info_addr, &frame->pinfo);
1823 uc_addr = frame_addr + offsetof(struct rt_sigframe_v1, uc);
1824 __put_user(uc_addr, &frame->puc);
1825 tswap_siginfo(&frame->info, info);
1827 /* Clear all the bits of the ucontext we don't use. */
1828 memset(&frame->uc, 0, offsetof(struct target_ucontext_v1, tuc_mcontext));
1830 memset(&stack, 0, sizeof(stack));
1831 __put_user(target_sigaltstack_used.ss_sp, &stack.ss_sp);
1832 __put_user(target_sigaltstack_used.ss_size, &stack.ss_size);
1833 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &stack.ss_flags);
1834 memcpy(&frame->uc.tuc_stack, &stack, sizeof(stack));
1836 setup_sigcontext(&frame->uc.tuc_mcontext, env, set->sig[0]);
1837 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
1838 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
1841 setup_return(env, ka, &frame->retcode, frame_addr, usig,
1842 frame_addr + offsetof(struct rt_sigframe_v1, retcode));
1844 env->regs[1] = info_addr;
1845 env->regs[2] = uc_addr;
1847 unlock_user_struct(frame, frame_addr, 1);
1850 static void setup_rt_frame_v2(int usig, struct target_sigaction *ka,
1851 target_siginfo_t *info,
1852 target_sigset_t *set, CPUARMState *env)
1854 struct rt_sigframe_v2 *frame;
1855 abi_ulong frame_addr = get_sigframe(ka, env, sizeof(*frame));
1856 abi_ulong info_addr, uc_addr;
1858 trace_user_setup_rt_frame(env, frame_addr);
1859 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
1860 return /* 1 */;
1863 info_addr = frame_addr + offsetof(struct rt_sigframe_v2, info);
1864 uc_addr = frame_addr + offsetof(struct rt_sigframe_v2, uc);
1865 tswap_siginfo(&frame->info, info);
1867 setup_sigframe_v2(&frame->uc, set, env);
1869 setup_return(env, ka, &frame->retcode, frame_addr, usig,
1870 frame_addr + offsetof(struct rt_sigframe_v2, retcode));
1872 env->regs[1] = info_addr;
1873 env->regs[2] = uc_addr;
1875 unlock_user_struct(frame, frame_addr, 1);
1878 static void setup_rt_frame(int usig, struct target_sigaction *ka,
1879 target_siginfo_t *info,
1880 target_sigset_t *set, CPUARMState *env)
1882 if (get_osversion() >= 0x020612) {
1883 setup_rt_frame_v2(usig, ka, info, set, env);
1884 } else {
1885 setup_rt_frame_v1(usig, ka, info, set, env);
1889 static int
1890 restore_sigcontext(CPUARMState *env, struct target_sigcontext *sc)
1892 int err = 0;
1893 uint32_t cpsr;
1895 __get_user(env->regs[0], &sc->arm_r0);
1896 __get_user(env->regs[1], &sc->arm_r1);
1897 __get_user(env->regs[2], &sc->arm_r2);
1898 __get_user(env->regs[3], &sc->arm_r3);
1899 __get_user(env->regs[4], &sc->arm_r4);
1900 __get_user(env->regs[5], &sc->arm_r5);
1901 __get_user(env->regs[6], &sc->arm_r6);
1902 __get_user(env->regs[7], &sc->arm_r7);
1903 __get_user(env->regs[8], &sc->arm_r8);
1904 __get_user(env->regs[9], &sc->arm_r9);
1905 __get_user(env->regs[10], &sc->arm_r10);
1906 __get_user(env->regs[11], &sc->arm_fp);
1907 __get_user(env->regs[12], &sc->arm_ip);
1908 __get_user(env->regs[13], &sc->arm_sp);
1909 __get_user(env->regs[14], &sc->arm_lr);
1910 __get_user(env->regs[15], &sc->arm_pc);
1911 #ifdef TARGET_CONFIG_CPU_32
1912 __get_user(cpsr, &sc->arm_cpsr);
1913 cpsr_write(env, cpsr, CPSR_USER | CPSR_EXEC, CPSRWriteByInstr);
1914 #endif
1916 err |= !valid_user_regs(env);
1918 return err;
1921 static long do_sigreturn_v1(CPUARMState *env)
1923 abi_ulong frame_addr;
1924 struct sigframe_v1 *frame = NULL;
1925 target_sigset_t set;
1926 sigset_t host_set;
1927 int i;
1930 * Since we stacked the signal on a 64-bit boundary,
1931 * then 'sp' should be word aligned here. If it's
1932 * not, then the user is trying to mess with us.
1934 frame_addr = env->regs[13];
1935 trace_user_do_sigreturn(env, frame_addr);
1936 if (frame_addr & 7) {
1937 goto badframe;
1940 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
1941 goto badframe;
1944 __get_user(set.sig[0], &frame->sc.oldmask);
1945 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1946 __get_user(set.sig[i], &frame->extramask[i - 1]);
1949 target_to_host_sigset_internal(&host_set, &set);
1950 set_sigmask(&host_set);
1952 if (restore_sigcontext(env, &frame->sc)) {
1953 goto badframe;
1956 #if 0
1957 /* Send SIGTRAP if we're single-stepping */
1958 if (ptrace_cancel_bpt(current))
1959 send_sig(SIGTRAP, current, 1);
1960 #endif
1961 unlock_user_struct(frame, frame_addr, 0);
1962 return -TARGET_QEMU_ESIGRETURN;
1964 badframe:
1965 force_sig(TARGET_SIGSEGV /* , current */);
1966 return 0;
1969 static abi_ulong *restore_sigframe_v2_vfp(CPUARMState *env, abi_ulong *regspace)
1971 int i;
1972 abi_ulong magic, sz;
1973 uint32_t fpscr, fpexc;
1974 struct target_vfp_sigframe *vfpframe;
1975 vfpframe = (struct target_vfp_sigframe *)regspace;
1977 __get_user(magic, &vfpframe->magic);
1978 __get_user(sz, &vfpframe->size);
1979 if (magic != TARGET_VFP_MAGIC || sz != sizeof(*vfpframe)) {
1980 return 0;
1982 for (i = 0; i < 32; i++) {
1983 __get_user(float64_val(env->vfp.regs[i]), &vfpframe->ufp.fpregs[i]);
1985 __get_user(fpscr, &vfpframe->ufp.fpscr);
1986 vfp_set_fpscr(env, fpscr);
1987 __get_user(fpexc, &vfpframe->ufp_exc.fpexc);
1988 /* Sanitise FPEXC: ensure VFP is enabled, FPINST2 is invalid
1989 * and the exception flag is cleared
1991 fpexc |= (1 << 30);
1992 fpexc &= ~((1 << 31) | (1 << 28));
1993 env->vfp.xregs[ARM_VFP_FPEXC] = fpexc;
1994 __get_user(env->vfp.xregs[ARM_VFP_FPINST], &vfpframe->ufp_exc.fpinst);
1995 __get_user(env->vfp.xregs[ARM_VFP_FPINST2], &vfpframe->ufp_exc.fpinst2);
1996 return (abi_ulong*)(vfpframe + 1);
1999 static abi_ulong *restore_sigframe_v2_iwmmxt(CPUARMState *env,
2000 abi_ulong *regspace)
2002 int i;
2003 abi_ulong magic, sz;
2004 struct target_iwmmxt_sigframe *iwmmxtframe;
2005 iwmmxtframe = (struct target_iwmmxt_sigframe *)regspace;
2007 __get_user(magic, &iwmmxtframe->magic);
2008 __get_user(sz, &iwmmxtframe->size);
2009 if (magic != TARGET_IWMMXT_MAGIC || sz != sizeof(*iwmmxtframe)) {
2010 return 0;
2012 for (i = 0; i < 16; i++) {
2013 __get_user(env->iwmmxt.regs[i], &iwmmxtframe->regs[i]);
2015 __get_user(env->vfp.xregs[ARM_IWMMXT_wCSSF], &iwmmxtframe->wcssf);
2016 __get_user(env->vfp.xregs[ARM_IWMMXT_wCASF], &iwmmxtframe->wcssf);
2017 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR0], &iwmmxtframe->wcgr0);
2018 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR1], &iwmmxtframe->wcgr1);
2019 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR2], &iwmmxtframe->wcgr2);
2020 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR3], &iwmmxtframe->wcgr3);
2021 return (abi_ulong*)(iwmmxtframe + 1);
2024 static int do_sigframe_return_v2(CPUARMState *env, target_ulong frame_addr,
2025 struct target_ucontext_v2 *uc)
2027 sigset_t host_set;
2028 abi_ulong *regspace;
2030 target_to_host_sigset(&host_set, &uc->tuc_sigmask);
2031 set_sigmask(&host_set);
2033 if (restore_sigcontext(env, &uc->tuc_mcontext))
2034 return 1;
2036 /* Restore coprocessor signal frame */
2037 regspace = uc->tuc_regspace;
2038 if (arm_feature(env, ARM_FEATURE_VFP)) {
2039 regspace = restore_sigframe_v2_vfp(env, regspace);
2040 if (!regspace) {
2041 return 1;
2044 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
2045 regspace = restore_sigframe_v2_iwmmxt(env, regspace);
2046 if (!regspace) {
2047 return 1;
2051 if (do_sigaltstack(frame_addr + offsetof(struct target_ucontext_v2, tuc_stack), 0, get_sp_from_cpustate(env)) == -EFAULT)
2052 return 1;
2054 #if 0
2055 /* Send SIGTRAP if we're single-stepping */
2056 if (ptrace_cancel_bpt(current))
2057 send_sig(SIGTRAP, current, 1);
2058 #endif
2060 return 0;
2063 static long do_sigreturn_v2(CPUARMState *env)
2065 abi_ulong frame_addr;
2066 struct sigframe_v2 *frame = NULL;
2069 * Since we stacked the signal on a 64-bit boundary,
2070 * then 'sp' should be word aligned here. If it's
2071 * not, then the user is trying to mess with us.
2073 frame_addr = env->regs[13];
2074 trace_user_do_sigreturn(env, frame_addr);
2075 if (frame_addr & 7) {
2076 goto badframe;
2079 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
2080 goto badframe;
2083 if (do_sigframe_return_v2(env, frame_addr, &frame->uc)) {
2084 goto badframe;
2087 unlock_user_struct(frame, frame_addr, 0);
2088 return -TARGET_QEMU_ESIGRETURN;
2090 badframe:
2091 unlock_user_struct(frame, frame_addr, 0);
2092 force_sig(TARGET_SIGSEGV /* , current */);
2093 return 0;
2096 long do_sigreturn(CPUARMState *env)
2098 if (get_osversion() >= 0x020612) {
2099 return do_sigreturn_v2(env);
2100 } else {
2101 return do_sigreturn_v1(env);
2105 static long do_rt_sigreturn_v1(CPUARMState *env)
2107 abi_ulong frame_addr;
2108 struct rt_sigframe_v1 *frame = NULL;
2109 sigset_t host_set;
2112 * Since we stacked the signal on a 64-bit boundary,
2113 * then 'sp' should be word aligned here. If it's
2114 * not, then the user is trying to mess with us.
2116 frame_addr = env->regs[13];
2117 trace_user_do_rt_sigreturn(env, frame_addr);
2118 if (frame_addr & 7) {
2119 goto badframe;
2122 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
2123 goto badframe;
2126 target_to_host_sigset(&host_set, &frame->uc.tuc_sigmask);
2127 set_sigmask(&host_set);
2129 if (restore_sigcontext(env, &frame->uc.tuc_mcontext)) {
2130 goto badframe;
2133 if (do_sigaltstack(frame_addr + offsetof(struct rt_sigframe_v1, uc.tuc_stack), 0, get_sp_from_cpustate(env)) == -EFAULT)
2134 goto badframe;
2136 #if 0
2137 /* Send SIGTRAP if we're single-stepping */
2138 if (ptrace_cancel_bpt(current))
2139 send_sig(SIGTRAP, current, 1);
2140 #endif
2141 unlock_user_struct(frame, frame_addr, 0);
2142 return -TARGET_QEMU_ESIGRETURN;
2144 badframe:
2145 unlock_user_struct(frame, frame_addr, 0);
2146 force_sig(TARGET_SIGSEGV /* , current */);
2147 return 0;
2150 static long do_rt_sigreturn_v2(CPUARMState *env)
2152 abi_ulong frame_addr;
2153 struct rt_sigframe_v2 *frame = NULL;
2156 * Since we stacked the signal on a 64-bit boundary,
2157 * then 'sp' should be word aligned here. If it's
2158 * not, then the user is trying to mess with us.
2160 frame_addr = env->regs[13];
2161 trace_user_do_rt_sigreturn(env, frame_addr);
2162 if (frame_addr & 7) {
2163 goto badframe;
2166 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
2167 goto badframe;
2170 if (do_sigframe_return_v2(env, frame_addr, &frame->uc)) {
2171 goto badframe;
2174 unlock_user_struct(frame, frame_addr, 0);
2175 return -TARGET_QEMU_ESIGRETURN;
2177 badframe:
2178 unlock_user_struct(frame, frame_addr, 0);
2179 force_sig(TARGET_SIGSEGV /* , current */);
2180 return 0;
2183 long do_rt_sigreturn(CPUARMState *env)
2185 if (get_osversion() >= 0x020612) {
2186 return do_rt_sigreturn_v2(env);
2187 } else {
2188 return do_rt_sigreturn_v1(env);
2192 #elif defined(TARGET_SPARC)
2194 #define __SUNOS_MAXWIN 31
2196 /* This is what SunOS does, so shall I. */
2197 struct target_sigcontext {
2198 abi_ulong sigc_onstack; /* state to restore */
2200 abi_ulong sigc_mask; /* sigmask to restore */
2201 abi_ulong sigc_sp; /* stack pointer */
2202 abi_ulong sigc_pc; /* program counter */
2203 abi_ulong sigc_npc; /* next program counter */
2204 abi_ulong sigc_psr; /* for condition codes etc */
2205 abi_ulong sigc_g1; /* User uses these two registers */
2206 abi_ulong sigc_o0; /* within the trampoline code. */
2208 /* Now comes information regarding the users window set
2209 * at the time of the signal.
2211 abi_ulong sigc_oswins; /* outstanding windows */
2213 /* stack ptrs for each regwin buf */
2214 char *sigc_spbuf[__SUNOS_MAXWIN];
2216 /* Windows to restore after signal */
2217 struct {
2218 abi_ulong locals[8];
2219 abi_ulong ins[8];
2220 } sigc_wbuf[__SUNOS_MAXWIN];
2222 /* A Sparc stack frame */
2223 struct sparc_stackf {
2224 abi_ulong locals[8];
2225 abi_ulong ins[8];
2226 /* It's simpler to treat fp and callers_pc as elements of ins[]
2227 * since we never need to access them ourselves.
2229 char *structptr;
2230 abi_ulong xargs[6];
2231 abi_ulong xxargs[1];
2234 typedef struct {
2235 struct {
2236 abi_ulong psr;
2237 abi_ulong pc;
2238 abi_ulong npc;
2239 abi_ulong y;
2240 abi_ulong u_regs[16]; /* globals and ins */
2241 } si_regs;
2242 int si_mask;
2243 } __siginfo_t;
2245 typedef struct {
2246 abi_ulong si_float_regs[32];
2247 unsigned long si_fsr;
2248 unsigned long si_fpqdepth;
2249 struct {
2250 unsigned long *insn_addr;
2251 unsigned long insn;
2252 } si_fpqueue [16];
2253 } qemu_siginfo_fpu_t;
2256 struct target_signal_frame {
2257 struct sparc_stackf ss;
2258 __siginfo_t info;
2259 abi_ulong fpu_save;
2260 abi_ulong insns[2] __attribute__ ((aligned (8)));
2261 abi_ulong extramask[TARGET_NSIG_WORDS - 1];
2262 abi_ulong extra_size; /* Should be 0 */
2263 qemu_siginfo_fpu_t fpu_state;
2265 struct target_rt_signal_frame {
2266 struct sparc_stackf ss;
2267 siginfo_t info;
2268 abi_ulong regs[20];
2269 sigset_t mask;
2270 abi_ulong fpu_save;
2271 unsigned int insns[2];
2272 stack_t stack;
2273 unsigned int extra_size; /* Should be 0 */
2274 qemu_siginfo_fpu_t fpu_state;
2277 #define UREG_O0 16
2278 #define UREG_O6 22
2279 #define UREG_I0 0
2280 #define UREG_I1 1
2281 #define UREG_I2 2
2282 #define UREG_I3 3
2283 #define UREG_I4 4
2284 #define UREG_I5 5
2285 #define UREG_I6 6
2286 #define UREG_I7 7
2287 #define UREG_L0 8
2288 #define UREG_FP UREG_I6
2289 #define UREG_SP UREG_O6
2291 static inline abi_ulong get_sigframe(struct target_sigaction *sa,
2292 CPUSPARCState *env,
2293 unsigned long framesize)
2295 abi_ulong sp;
2297 sp = env->regwptr[UREG_FP];
2299 /* This is the X/Open sanctioned signal stack switching. */
2300 if (sa->sa_flags & TARGET_SA_ONSTACK) {
2301 if (!on_sig_stack(sp)
2302 && !((target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size) & 7)) {
2303 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
2306 return sp - framesize;
2309 static int
2310 setup___siginfo(__siginfo_t *si, CPUSPARCState *env, abi_ulong mask)
2312 int err = 0, i;
2314 __put_user(env->psr, &si->si_regs.psr);
2315 __put_user(env->pc, &si->si_regs.pc);
2316 __put_user(env->npc, &si->si_regs.npc);
2317 __put_user(env->y, &si->si_regs.y);
2318 for (i=0; i < 8; i++) {
2319 __put_user(env->gregs[i], &si->si_regs.u_regs[i]);
2321 for (i=0; i < 8; i++) {
2322 __put_user(env->regwptr[UREG_I0 + i], &si->si_regs.u_regs[i+8]);
2324 __put_user(mask, &si->si_mask);
2325 return err;
2328 #if 0
2329 static int
2330 setup_sigcontext(struct target_sigcontext *sc, /*struct _fpstate *fpstate,*/
2331 CPUSPARCState *env, unsigned long mask)
2333 int err = 0;
2335 __put_user(mask, &sc->sigc_mask);
2336 __put_user(env->regwptr[UREG_SP], &sc->sigc_sp);
2337 __put_user(env->pc, &sc->sigc_pc);
2338 __put_user(env->npc, &sc->sigc_npc);
2339 __put_user(env->psr, &sc->sigc_psr);
2340 __put_user(env->gregs[1], &sc->sigc_g1);
2341 __put_user(env->regwptr[UREG_O0], &sc->sigc_o0);
2343 return err;
2345 #endif
2346 #define NF_ALIGNEDSZ (((sizeof(struct target_signal_frame) + 7) & (~7)))
2348 static void setup_frame(int sig, struct target_sigaction *ka,
2349 target_sigset_t *set, CPUSPARCState *env)
2351 abi_ulong sf_addr;
2352 struct target_signal_frame *sf;
2353 int sigframe_size, err, i;
2355 /* 1. Make sure everything is clean */
2356 //synchronize_user_stack();
2358 sigframe_size = NF_ALIGNEDSZ;
2359 sf_addr = get_sigframe(ka, env, sigframe_size);
2360 trace_user_setup_frame(env, sf_addr);
2362 sf = lock_user(VERIFY_WRITE, sf_addr,
2363 sizeof(struct target_signal_frame), 0);
2364 if (!sf) {
2365 goto sigsegv;
2367 #if 0
2368 if (invalid_frame_pointer(sf, sigframe_size))
2369 goto sigill_and_return;
2370 #endif
2371 /* 2. Save the current process state */
2372 err = setup___siginfo(&sf->info, env, set->sig[0]);
2373 __put_user(0, &sf->extra_size);
2375 //save_fpu_state(regs, &sf->fpu_state);
2376 //__put_user(&sf->fpu_state, &sf->fpu_save);
2378 __put_user(set->sig[0], &sf->info.si_mask);
2379 for (i = 0; i < TARGET_NSIG_WORDS - 1; i++) {
2380 __put_user(set->sig[i + 1], &sf->extramask[i]);
2383 for (i = 0; i < 8; i++) {
2384 __put_user(env->regwptr[i + UREG_L0], &sf->ss.locals[i]);
2386 for (i = 0; i < 8; i++) {
2387 __put_user(env->regwptr[i + UREG_I0], &sf->ss.ins[i]);
2389 if (err)
2390 goto sigsegv;
2392 /* 3. signal handler back-trampoline and parameters */
2393 env->regwptr[UREG_FP] = sf_addr;
2394 env->regwptr[UREG_I0] = sig;
2395 env->regwptr[UREG_I1] = sf_addr +
2396 offsetof(struct target_signal_frame, info);
2397 env->regwptr[UREG_I2] = sf_addr +
2398 offsetof(struct target_signal_frame, info);
2400 /* 4. signal handler */
2401 env->pc = ka->_sa_handler;
2402 env->npc = (env->pc + 4);
2403 /* 5. return to kernel instructions */
2404 if (ka->sa_restorer) {
2405 env->regwptr[UREG_I7] = ka->sa_restorer;
2406 } else {
2407 uint32_t val32;
2409 env->regwptr[UREG_I7] = sf_addr +
2410 offsetof(struct target_signal_frame, insns) - 2 * 4;
2412 /* mov __NR_sigreturn, %g1 */
2413 val32 = 0x821020d8;
2414 __put_user(val32, &sf->insns[0]);
2416 /* t 0x10 */
2417 val32 = 0x91d02010;
2418 __put_user(val32, &sf->insns[1]);
2419 if (err)
2420 goto sigsegv;
2422 /* Flush instruction space. */
2423 // flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0]));
2424 // tb_flush(env);
2426 unlock_user(sf, sf_addr, sizeof(struct target_signal_frame));
2427 return;
2428 #if 0
2429 sigill_and_return:
2430 force_sig(TARGET_SIGILL);
2431 #endif
2432 sigsegv:
2433 unlock_user(sf, sf_addr, sizeof(struct target_signal_frame));
2434 force_sig(TARGET_SIGSEGV);
2437 static void setup_rt_frame(int sig, struct target_sigaction *ka,
2438 target_siginfo_t *info,
2439 target_sigset_t *set, CPUSPARCState *env)
2441 fprintf(stderr, "setup_rt_frame: not implemented\n");
2444 long do_sigreturn(CPUSPARCState *env)
2446 abi_ulong sf_addr;
2447 struct target_signal_frame *sf;
2448 uint32_t up_psr, pc, npc;
2449 target_sigset_t set;
2450 sigset_t host_set;
2451 int err=0, i;
2453 sf_addr = env->regwptr[UREG_FP];
2454 trace_user_do_sigreturn(env, sf_addr);
2455 if (!lock_user_struct(VERIFY_READ, sf, sf_addr, 1)) {
2456 goto segv_and_exit;
2459 /* 1. Make sure we are not getting garbage from the user */
2461 if (sf_addr & 3)
2462 goto segv_and_exit;
2464 __get_user(pc, &sf->info.si_regs.pc);
2465 __get_user(npc, &sf->info.si_regs.npc);
2467 if ((pc | npc) & 3) {
2468 goto segv_and_exit;
2471 /* 2. Restore the state */
2472 __get_user(up_psr, &sf->info.si_regs.psr);
2474 /* User can only change condition codes and FPU enabling in %psr. */
2475 env->psr = (up_psr & (PSR_ICC /* | PSR_EF */))
2476 | (env->psr & ~(PSR_ICC /* | PSR_EF */));
2478 env->pc = pc;
2479 env->npc = npc;
2480 __get_user(env->y, &sf->info.si_regs.y);
2481 for (i=0; i < 8; i++) {
2482 __get_user(env->gregs[i], &sf->info.si_regs.u_regs[i]);
2484 for (i=0; i < 8; i++) {
2485 __get_user(env->regwptr[i + UREG_I0], &sf->info.si_regs.u_regs[i+8]);
2488 /* FIXME: implement FPU save/restore:
2489 * __get_user(fpu_save, &sf->fpu_save);
2490 * if (fpu_save)
2491 * err |= restore_fpu_state(env, fpu_save);
2494 /* This is pretty much atomic, no amount locking would prevent
2495 * the races which exist anyways.
2497 __get_user(set.sig[0], &sf->info.si_mask);
2498 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
2499 __get_user(set.sig[i], &sf->extramask[i - 1]);
2502 target_to_host_sigset_internal(&host_set, &set);
2503 set_sigmask(&host_set);
2505 if (err) {
2506 goto segv_and_exit;
2508 unlock_user_struct(sf, sf_addr, 0);
2509 return -TARGET_QEMU_ESIGRETURN;
2511 segv_and_exit:
2512 unlock_user_struct(sf, sf_addr, 0);
2513 force_sig(TARGET_SIGSEGV);
2516 long do_rt_sigreturn(CPUSPARCState *env)
2518 trace_user_do_rt_sigreturn(env, 0);
2519 fprintf(stderr, "do_rt_sigreturn: not implemented\n");
2520 return -TARGET_ENOSYS;
2523 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
2524 #define MC_TSTATE 0
2525 #define MC_PC 1
2526 #define MC_NPC 2
2527 #define MC_Y 3
2528 #define MC_G1 4
2529 #define MC_G2 5
2530 #define MC_G3 6
2531 #define MC_G4 7
2532 #define MC_G5 8
2533 #define MC_G6 9
2534 #define MC_G7 10
2535 #define MC_O0 11
2536 #define MC_O1 12
2537 #define MC_O2 13
2538 #define MC_O3 14
2539 #define MC_O4 15
2540 #define MC_O5 16
2541 #define MC_O6 17
2542 #define MC_O7 18
2543 #define MC_NGREG 19
2545 typedef abi_ulong target_mc_greg_t;
2546 typedef target_mc_greg_t target_mc_gregset_t[MC_NGREG];
2548 struct target_mc_fq {
2549 abi_ulong *mcfq_addr;
2550 uint32_t mcfq_insn;
2553 struct target_mc_fpu {
2554 union {
2555 uint32_t sregs[32];
2556 uint64_t dregs[32];
2557 //uint128_t qregs[16];
2558 } mcfpu_fregs;
2559 abi_ulong mcfpu_fsr;
2560 abi_ulong mcfpu_fprs;
2561 abi_ulong mcfpu_gsr;
2562 struct target_mc_fq *mcfpu_fq;
2563 unsigned char mcfpu_qcnt;
2564 unsigned char mcfpu_qentsz;
2565 unsigned char mcfpu_enab;
2567 typedef struct target_mc_fpu target_mc_fpu_t;
2569 typedef struct {
2570 target_mc_gregset_t mc_gregs;
2571 target_mc_greg_t mc_fp;
2572 target_mc_greg_t mc_i7;
2573 target_mc_fpu_t mc_fpregs;
2574 } target_mcontext_t;
2576 struct target_ucontext {
2577 struct target_ucontext *tuc_link;
2578 abi_ulong tuc_flags;
2579 target_sigset_t tuc_sigmask;
2580 target_mcontext_t tuc_mcontext;
2583 /* A V9 register window */
2584 struct target_reg_window {
2585 abi_ulong locals[8];
2586 abi_ulong ins[8];
2589 #define TARGET_STACK_BIAS 2047
2591 /* {set, get}context() needed for 64-bit SparcLinux userland. */
2592 void sparc64_set_context(CPUSPARCState *env)
2594 abi_ulong ucp_addr;
2595 struct target_ucontext *ucp;
2596 target_mc_gregset_t *grp;
2597 abi_ulong pc, npc, tstate;
2598 abi_ulong fp, i7, w_addr;
2599 unsigned int i;
2601 ucp_addr = env->regwptr[UREG_I0];
2602 if (!lock_user_struct(VERIFY_READ, ucp, ucp_addr, 1)) {
2603 goto do_sigsegv;
2605 grp = &ucp->tuc_mcontext.mc_gregs;
2606 __get_user(pc, &((*grp)[MC_PC]));
2607 __get_user(npc, &((*grp)[MC_NPC]));
2608 if ((pc | npc) & 3) {
2609 goto do_sigsegv;
2611 if (env->regwptr[UREG_I1]) {
2612 target_sigset_t target_set;
2613 sigset_t set;
2615 if (TARGET_NSIG_WORDS == 1) {
2616 __get_user(target_set.sig[0], &ucp->tuc_sigmask.sig[0]);
2617 } else {
2618 abi_ulong *src, *dst;
2619 src = ucp->tuc_sigmask.sig;
2620 dst = target_set.sig;
2621 for (i = 0; i < TARGET_NSIG_WORDS; i++, dst++, src++) {
2622 __get_user(*dst, src);
2625 target_to_host_sigset_internal(&set, &target_set);
2626 set_sigmask(&set);
2628 env->pc = pc;
2629 env->npc = npc;
2630 __get_user(env->y, &((*grp)[MC_Y]));
2631 __get_user(tstate, &((*grp)[MC_TSTATE]));
2632 env->asi = (tstate >> 24) & 0xff;
2633 cpu_put_ccr(env, tstate >> 32);
2634 cpu_put_cwp64(env, tstate & 0x1f);
2635 __get_user(env->gregs[1], (&(*grp)[MC_G1]));
2636 __get_user(env->gregs[2], (&(*grp)[MC_G2]));
2637 __get_user(env->gregs[3], (&(*grp)[MC_G3]));
2638 __get_user(env->gregs[4], (&(*grp)[MC_G4]));
2639 __get_user(env->gregs[5], (&(*grp)[MC_G5]));
2640 __get_user(env->gregs[6], (&(*grp)[MC_G6]));
2641 __get_user(env->gregs[7], (&(*grp)[MC_G7]));
2642 __get_user(env->regwptr[UREG_I0], (&(*grp)[MC_O0]));
2643 __get_user(env->regwptr[UREG_I1], (&(*grp)[MC_O1]));
2644 __get_user(env->regwptr[UREG_I2], (&(*grp)[MC_O2]));
2645 __get_user(env->regwptr[UREG_I3], (&(*grp)[MC_O3]));
2646 __get_user(env->regwptr[UREG_I4], (&(*grp)[MC_O4]));
2647 __get_user(env->regwptr[UREG_I5], (&(*grp)[MC_O5]));
2648 __get_user(env->regwptr[UREG_I6], (&(*grp)[MC_O6]));
2649 __get_user(env->regwptr[UREG_I7], (&(*grp)[MC_O7]));
2651 __get_user(fp, &(ucp->tuc_mcontext.mc_fp));
2652 __get_user(i7, &(ucp->tuc_mcontext.mc_i7));
2654 w_addr = TARGET_STACK_BIAS+env->regwptr[UREG_I6];
2655 if (put_user(fp, w_addr + offsetof(struct target_reg_window, ins[6]),
2656 abi_ulong) != 0) {
2657 goto do_sigsegv;
2659 if (put_user(i7, w_addr + offsetof(struct target_reg_window, ins[7]),
2660 abi_ulong) != 0) {
2661 goto do_sigsegv;
2663 /* FIXME this does not match how the kernel handles the FPU in
2664 * its sparc64_set_context implementation. In particular the FPU
2665 * is only restored if fenab is non-zero in:
2666 * __get_user(fenab, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_enab));
2668 __get_user(env->fprs, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fprs));
2670 uint32_t *src = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs;
2671 for (i = 0; i < 64; i++, src++) {
2672 if (i & 1) {
2673 __get_user(env->fpr[i/2].l.lower, src);
2674 } else {
2675 __get_user(env->fpr[i/2].l.upper, src);
2679 __get_user(env->fsr,
2680 &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fsr));
2681 __get_user(env->gsr,
2682 &(ucp->tuc_mcontext.mc_fpregs.mcfpu_gsr));
2683 unlock_user_struct(ucp, ucp_addr, 0);
2684 return;
2685 do_sigsegv:
2686 unlock_user_struct(ucp, ucp_addr, 0);
2687 force_sig(TARGET_SIGSEGV);
2690 void sparc64_get_context(CPUSPARCState *env)
2692 abi_ulong ucp_addr;
2693 struct target_ucontext *ucp;
2694 target_mc_gregset_t *grp;
2695 target_mcontext_t *mcp;
2696 abi_ulong fp, i7, w_addr;
2697 int err;
2698 unsigned int i;
2699 target_sigset_t target_set;
2700 sigset_t set;
2702 ucp_addr = env->regwptr[UREG_I0];
2703 if (!lock_user_struct(VERIFY_WRITE, ucp, ucp_addr, 0)) {
2704 goto do_sigsegv;
2707 mcp = &ucp->tuc_mcontext;
2708 grp = &mcp->mc_gregs;
2710 /* Skip over the trap instruction, first. */
2711 env->pc = env->npc;
2712 env->npc += 4;
2714 /* If we're only reading the signal mask then do_sigprocmask()
2715 * is guaranteed not to fail, which is important because we don't
2716 * have any way to signal a failure or restart this operation since
2717 * this is not a normal syscall.
2719 err = do_sigprocmask(0, NULL, &set);
2720 assert(err == 0);
2721 host_to_target_sigset_internal(&target_set, &set);
2722 if (TARGET_NSIG_WORDS == 1) {
2723 __put_user(target_set.sig[0],
2724 (abi_ulong *)&ucp->tuc_sigmask);
2725 } else {
2726 abi_ulong *src, *dst;
2727 src = target_set.sig;
2728 dst = ucp->tuc_sigmask.sig;
2729 for (i = 0; i < TARGET_NSIG_WORDS; i++, dst++, src++) {
2730 __put_user(*src, dst);
2732 if (err)
2733 goto do_sigsegv;
2736 /* XXX: tstate must be saved properly */
2737 // __put_user(env->tstate, &((*grp)[MC_TSTATE]));
2738 __put_user(env->pc, &((*grp)[MC_PC]));
2739 __put_user(env->npc, &((*grp)[MC_NPC]));
2740 __put_user(env->y, &((*grp)[MC_Y]));
2741 __put_user(env->gregs[1], &((*grp)[MC_G1]));
2742 __put_user(env->gregs[2], &((*grp)[MC_G2]));
2743 __put_user(env->gregs[3], &((*grp)[MC_G3]));
2744 __put_user(env->gregs[4], &((*grp)[MC_G4]));
2745 __put_user(env->gregs[5], &((*grp)[MC_G5]));
2746 __put_user(env->gregs[6], &((*grp)[MC_G6]));
2747 __put_user(env->gregs[7], &((*grp)[MC_G7]));
2748 __put_user(env->regwptr[UREG_I0], &((*grp)[MC_O0]));
2749 __put_user(env->regwptr[UREG_I1], &((*grp)[MC_O1]));
2750 __put_user(env->regwptr[UREG_I2], &((*grp)[MC_O2]));
2751 __put_user(env->regwptr[UREG_I3], &((*grp)[MC_O3]));
2752 __put_user(env->regwptr[UREG_I4], &((*grp)[MC_O4]));
2753 __put_user(env->regwptr[UREG_I5], &((*grp)[MC_O5]));
2754 __put_user(env->regwptr[UREG_I6], &((*grp)[MC_O6]));
2755 __put_user(env->regwptr[UREG_I7], &((*grp)[MC_O7]));
2757 w_addr = TARGET_STACK_BIAS+env->regwptr[UREG_I6];
2758 fp = i7 = 0;
2759 if (get_user(fp, w_addr + offsetof(struct target_reg_window, ins[6]),
2760 abi_ulong) != 0) {
2761 goto do_sigsegv;
2763 if (get_user(i7, w_addr + offsetof(struct target_reg_window, ins[7]),
2764 abi_ulong) != 0) {
2765 goto do_sigsegv;
2767 __put_user(fp, &(mcp->mc_fp));
2768 __put_user(i7, &(mcp->mc_i7));
2771 uint32_t *dst = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs;
2772 for (i = 0; i < 64; i++, dst++) {
2773 if (i & 1) {
2774 __put_user(env->fpr[i/2].l.lower, dst);
2775 } else {
2776 __put_user(env->fpr[i/2].l.upper, dst);
2780 __put_user(env->fsr, &(mcp->mc_fpregs.mcfpu_fsr));
2781 __put_user(env->gsr, &(mcp->mc_fpregs.mcfpu_gsr));
2782 __put_user(env->fprs, &(mcp->mc_fpregs.mcfpu_fprs));
2784 if (err)
2785 goto do_sigsegv;
2786 unlock_user_struct(ucp, ucp_addr, 1);
2787 return;
2788 do_sigsegv:
2789 unlock_user_struct(ucp, ucp_addr, 1);
2790 force_sig(TARGET_SIGSEGV);
2792 #endif
2793 #elif defined(TARGET_MIPS) || defined(TARGET_MIPS64)
2795 # if defined(TARGET_ABI_MIPSO32)
2796 struct target_sigcontext {
2797 uint32_t sc_regmask; /* Unused */
2798 uint32_t sc_status;
2799 uint64_t sc_pc;
2800 uint64_t sc_regs[32];
2801 uint64_t sc_fpregs[32];
2802 uint32_t sc_ownedfp; /* Unused */
2803 uint32_t sc_fpc_csr;
2804 uint32_t sc_fpc_eir; /* Unused */
2805 uint32_t sc_used_math;
2806 uint32_t sc_dsp; /* dsp status, was sc_ssflags */
2807 uint32_t pad0;
2808 uint64_t sc_mdhi;
2809 uint64_t sc_mdlo;
2810 target_ulong sc_hi1; /* Was sc_cause */
2811 target_ulong sc_lo1; /* Was sc_badvaddr */
2812 target_ulong sc_hi2; /* Was sc_sigset[4] */
2813 target_ulong sc_lo2;
2814 target_ulong sc_hi3;
2815 target_ulong sc_lo3;
2817 # else /* N32 || N64 */
2818 struct target_sigcontext {
2819 uint64_t sc_regs[32];
2820 uint64_t sc_fpregs[32];
2821 uint64_t sc_mdhi;
2822 uint64_t sc_hi1;
2823 uint64_t sc_hi2;
2824 uint64_t sc_hi3;
2825 uint64_t sc_mdlo;
2826 uint64_t sc_lo1;
2827 uint64_t sc_lo2;
2828 uint64_t sc_lo3;
2829 uint64_t sc_pc;
2830 uint32_t sc_fpc_csr;
2831 uint32_t sc_used_math;
2832 uint32_t sc_dsp;
2833 uint32_t sc_reserved;
2835 # endif /* O32 */
2837 struct sigframe {
2838 uint32_t sf_ass[4]; /* argument save space for o32 */
2839 uint32_t sf_code[2]; /* signal trampoline */
2840 struct target_sigcontext sf_sc;
2841 target_sigset_t sf_mask;
2844 struct target_ucontext {
2845 target_ulong tuc_flags;
2846 target_ulong tuc_link;
2847 target_stack_t tuc_stack;
2848 target_ulong pad0;
2849 struct target_sigcontext tuc_mcontext;
2850 target_sigset_t tuc_sigmask;
2853 struct target_rt_sigframe {
2854 uint32_t rs_ass[4]; /* argument save space for o32 */
2855 uint32_t rs_code[2]; /* signal trampoline */
2856 struct target_siginfo rs_info;
2857 struct target_ucontext rs_uc;
2860 /* Install trampoline to jump back from signal handler */
2861 static inline int install_sigtramp(unsigned int *tramp, unsigned int syscall)
2863 int err = 0;
2866 * Set up the return code ...
2868 * li v0, __NR__foo_sigreturn
2869 * syscall
2872 __put_user(0x24020000 + syscall, tramp + 0);
2873 __put_user(0x0000000c , tramp + 1);
2874 return err;
2877 static inline void setup_sigcontext(CPUMIPSState *regs,
2878 struct target_sigcontext *sc)
2880 int i;
2882 __put_user(exception_resume_pc(regs), &sc->sc_pc);
2883 regs->hflags &= ~MIPS_HFLAG_BMASK;
2885 __put_user(0, &sc->sc_regs[0]);
2886 for (i = 1; i < 32; ++i) {
2887 __put_user(regs->active_tc.gpr[i], &sc->sc_regs[i]);
2890 __put_user(regs->active_tc.HI[0], &sc->sc_mdhi);
2891 __put_user(regs->active_tc.LO[0], &sc->sc_mdlo);
2893 /* Rather than checking for dsp existence, always copy. The storage
2894 would just be garbage otherwise. */
2895 __put_user(regs->active_tc.HI[1], &sc->sc_hi1);
2896 __put_user(regs->active_tc.HI[2], &sc->sc_hi2);
2897 __put_user(regs->active_tc.HI[3], &sc->sc_hi3);
2898 __put_user(regs->active_tc.LO[1], &sc->sc_lo1);
2899 __put_user(regs->active_tc.LO[2], &sc->sc_lo2);
2900 __put_user(regs->active_tc.LO[3], &sc->sc_lo3);
2902 uint32_t dsp = cpu_rddsp(0x3ff, regs);
2903 __put_user(dsp, &sc->sc_dsp);
2906 __put_user(1, &sc->sc_used_math);
2908 for (i = 0; i < 32; ++i) {
2909 __put_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i]);
2913 static inline void
2914 restore_sigcontext(CPUMIPSState *regs, struct target_sigcontext *sc)
2916 int i;
2918 __get_user(regs->CP0_EPC, &sc->sc_pc);
2920 __get_user(regs->active_tc.HI[0], &sc->sc_mdhi);
2921 __get_user(regs->active_tc.LO[0], &sc->sc_mdlo);
2923 for (i = 1; i < 32; ++i) {
2924 __get_user(regs->active_tc.gpr[i], &sc->sc_regs[i]);
2927 __get_user(regs->active_tc.HI[1], &sc->sc_hi1);
2928 __get_user(regs->active_tc.HI[2], &sc->sc_hi2);
2929 __get_user(regs->active_tc.HI[3], &sc->sc_hi3);
2930 __get_user(regs->active_tc.LO[1], &sc->sc_lo1);
2931 __get_user(regs->active_tc.LO[2], &sc->sc_lo2);
2932 __get_user(regs->active_tc.LO[3], &sc->sc_lo3);
2934 uint32_t dsp;
2935 __get_user(dsp, &sc->sc_dsp);
2936 cpu_wrdsp(dsp, 0x3ff, regs);
2939 for (i = 0; i < 32; ++i) {
2940 __get_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i]);
2945 * Determine which stack to use..
2947 static inline abi_ulong
2948 get_sigframe(struct target_sigaction *ka, CPUMIPSState *regs, size_t frame_size)
2950 unsigned long sp;
2952 /* Default to using normal stack */
2953 sp = regs->active_tc.gpr[29];
2956 * FPU emulator may have its own trampoline active just
2957 * above the user stack, 16-bytes before the next lowest
2958 * 16 byte boundary. Try to avoid trashing it.
2960 sp -= 32;
2962 /* This is the X/Open sanctioned signal stack switching. */
2963 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags (sp) == 0)) {
2964 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
2967 return (sp - frame_size) & ~7;
2970 static void mips_set_hflags_isa_mode_from_pc(CPUMIPSState *env)
2972 if (env->insn_flags & (ASE_MIPS16 | ASE_MICROMIPS)) {
2973 env->hflags &= ~MIPS_HFLAG_M16;
2974 env->hflags |= (env->active_tc.PC & 1) << MIPS_HFLAG_M16_SHIFT;
2975 env->active_tc.PC &= ~(target_ulong) 1;
2979 # if defined(TARGET_ABI_MIPSO32)
2980 /* compare linux/arch/mips/kernel/signal.c:setup_frame() */
2981 static void setup_frame(int sig, struct target_sigaction * ka,
2982 target_sigset_t *set, CPUMIPSState *regs)
2984 struct sigframe *frame;
2985 abi_ulong frame_addr;
2986 int i;
2988 frame_addr = get_sigframe(ka, regs, sizeof(*frame));
2989 trace_user_setup_frame(regs, frame_addr);
2990 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
2991 goto give_sigsegv;
2994 install_sigtramp(frame->sf_code, TARGET_NR_sigreturn);
2996 setup_sigcontext(regs, &frame->sf_sc);
2998 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
2999 __put_user(set->sig[i], &frame->sf_mask.sig[i]);
3003 * Arguments to signal handler:
3005 * a0 = signal number
3006 * a1 = 0 (should be cause)
3007 * a2 = pointer to struct sigcontext
3009 * $25 and PC point to the signal handler, $29 points to the
3010 * struct sigframe.
3012 regs->active_tc.gpr[ 4] = sig;
3013 regs->active_tc.gpr[ 5] = 0;
3014 regs->active_tc.gpr[ 6] = frame_addr + offsetof(struct sigframe, sf_sc);
3015 regs->active_tc.gpr[29] = frame_addr;
3016 regs->active_tc.gpr[31] = frame_addr + offsetof(struct sigframe, sf_code);
3017 /* The original kernel code sets CP0_EPC to the handler
3018 * since it returns to userland using eret
3019 * we cannot do this here, and we must set PC directly */
3020 regs->active_tc.PC = regs->active_tc.gpr[25] = ka->_sa_handler;
3021 mips_set_hflags_isa_mode_from_pc(regs);
3022 unlock_user_struct(frame, frame_addr, 1);
3023 return;
3025 give_sigsegv:
3026 force_sig(TARGET_SIGSEGV/*, current*/);
3029 long do_sigreturn(CPUMIPSState *regs)
3031 struct sigframe *frame;
3032 abi_ulong frame_addr;
3033 sigset_t blocked;
3034 target_sigset_t target_set;
3035 int i;
3037 frame_addr = regs->active_tc.gpr[29];
3038 trace_user_do_sigreturn(regs, frame_addr);
3039 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
3040 goto badframe;
3042 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
3043 __get_user(target_set.sig[i], &frame->sf_mask.sig[i]);
3046 target_to_host_sigset_internal(&blocked, &target_set);
3047 set_sigmask(&blocked);
3049 restore_sigcontext(regs, &frame->sf_sc);
3051 #if 0
3053 * Don't let your children do this ...
3055 __asm__ __volatile__(
3056 "move\t$29, %0\n\t"
3057 "j\tsyscall_exit"
3058 :/* no outputs */
3059 :"r" (&regs));
3060 /* Unreached */
3061 #endif
3063 regs->active_tc.PC = regs->CP0_EPC;
3064 mips_set_hflags_isa_mode_from_pc(regs);
3065 /* I am not sure this is right, but it seems to work
3066 * maybe a problem with nested signals ? */
3067 regs->CP0_EPC = 0;
3068 return -TARGET_QEMU_ESIGRETURN;
3070 badframe:
3071 force_sig(TARGET_SIGSEGV/*, current*/);
3072 return 0;
3074 # endif /* O32 */
3076 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3077 target_siginfo_t *info,
3078 target_sigset_t *set, CPUMIPSState *env)
3080 struct target_rt_sigframe *frame;
3081 abi_ulong frame_addr;
3082 int i;
3084 frame_addr = get_sigframe(ka, env, sizeof(*frame));
3085 trace_user_setup_rt_frame(env, frame_addr);
3086 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3087 goto give_sigsegv;
3090 install_sigtramp(frame->rs_code, TARGET_NR_rt_sigreturn);
3092 tswap_siginfo(&frame->rs_info, info);
3094 __put_user(0, &frame->rs_uc.tuc_flags);
3095 __put_user(0, &frame->rs_uc.tuc_link);
3096 __put_user(target_sigaltstack_used.ss_sp, &frame->rs_uc.tuc_stack.ss_sp);
3097 __put_user(target_sigaltstack_used.ss_size, &frame->rs_uc.tuc_stack.ss_size);
3098 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
3099 &frame->rs_uc.tuc_stack.ss_flags);
3101 setup_sigcontext(env, &frame->rs_uc.tuc_mcontext);
3103 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
3104 __put_user(set->sig[i], &frame->rs_uc.tuc_sigmask.sig[i]);
3108 * Arguments to signal handler:
3110 * a0 = signal number
3111 * a1 = pointer to siginfo_t
3112 * a2 = pointer to struct ucontext
3114 * $25 and PC point to the signal handler, $29 points to the
3115 * struct sigframe.
3117 env->active_tc.gpr[ 4] = sig;
3118 env->active_tc.gpr[ 5] = frame_addr
3119 + offsetof(struct target_rt_sigframe, rs_info);
3120 env->active_tc.gpr[ 6] = frame_addr
3121 + offsetof(struct target_rt_sigframe, rs_uc);
3122 env->active_tc.gpr[29] = frame_addr;
3123 env->active_tc.gpr[31] = frame_addr
3124 + offsetof(struct target_rt_sigframe, rs_code);
3125 /* The original kernel code sets CP0_EPC to the handler
3126 * since it returns to userland using eret
3127 * we cannot do this here, and we must set PC directly */
3128 env->active_tc.PC = env->active_tc.gpr[25] = ka->_sa_handler;
3129 mips_set_hflags_isa_mode_from_pc(env);
3130 unlock_user_struct(frame, frame_addr, 1);
3131 return;
3133 give_sigsegv:
3134 unlock_user_struct(frame, frame_addr, 1);
3135 force_sig(TARGET_SIGSEGV/*, current*/);
3138 long do_rt_sigreturn(CPUMIPSState *env)
3140 struct target_rt_sigframe *frame;
3141 abi_ulong frame_addr;
3142 sigset_t blocked;
3144 frame_addr = env->active_tc.gpr[29];
3145 trace_user_do_rt_sigreturn(env, frame_addr);
3146 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
3147 goto badframe;
3150 target_to_host_sigset(&blocked, &frame->rs_uc.tuc_sigmask);
3151 set_sigmask(&blocked);
3153 restore_sigcontext(env, &frame->rs_uc.tuc_mcontext);
3155 if (do_sigaltstack(frame_addr +
3156 offsetof(struct target_rt_sigframe, rs_uc.tuc_stack),
3157 0, get_sp_from_cpustate(env)) == -EFAULT)
3158 goto badframe;
3160 env->active_tc.PC = env->CP0_EPC;
3161 mips_set_hflags_isa_mode_from_pc(env);
3162 /* I am not sure this is right, but it seems to work
3163 * maybe a problem with nested signals ? */
3164 env->CP0_EPC = 0;
3165 return -TARGET_QEMU_ESIGRETURN;
3167 badframe:
3168 force_sig(TARGET_SIGSEGV/*, current*/);
3169 return 0;
3172 #elif defined(TARGET_SH4)
3175 * code and data structures from linux kernel:
3176 * include/asm-sh/sigcontext.h
3177 * arch/sh/kernel/signal.c
3180 struct target_sigcontext {
3181 target_ulong oldmask;
3183 /* CPU registers */
3184 target_ulong sc_gregs[16];
3185 target_ulong sc_pc;
3186 target_ulong sc_pr;
3187 target_ulong sc_sr;
3188 target_ulong sc_gbr;
3189 target_ulong sc_mach;
3190 target_ulong sc_macl;
3192 /* FPU registers */
3193 target_ulong sc_fpregs[16];
3194 target_ulong sc_xfpregs[16];
3195 unsigned int sc_fpscr;
3196 unsigned int sc_fpul;
3197 unsigned int sc_ownedfp;
3200 struct target_sigframe
3202 struct target_sigcontext sc;
3203 target_ulong extramask[TARGET_NSIG_WORDS-1];
3204 uint16_t retcode[3];
3208 struct target_ucontext {
3209 target_ulong tuc_flags;
3210 struct target_ucontext *tuc_link;
3211 target_stack_t tuc_stack;
3212 struct target_sigcontext tuc_mcontext;
3213 target_sigset_t tuc_sigmask; /* mask last for extensibility */
3216 struct target_rt_sigframe
3218 struct target_siginfo info;
3219 struct target_ucontext uc;
3220 uint16_t retcode[3];
3224 #define MOVW(n) (0x9300|((n)-2)) /* Move mem word at PC+n to R3 */
3225 #define TRAP_NOARG 0xc310 /* Syscall w/no args (NR in R3) SH3/4 */
3227 static abi_ulong get_sigframe(struct target_sigaction *ka,
3228 unsigned long sp, size_t frame_size)
3230 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags(sp) == 0)) {
3231 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
3234 return (sp - frame_size) & -8ul;
3237 static void setup_sigcontext(struct target_sigcontext *sc,
3238 CPUSH4State *regs, unsigned long mask)
3240 int i;
3242 #define COPY(x) __put_user(regs->x, &sc->sc_##x)
3243 COPY(gregs[0]); COPY(gregs[1]);
3244 COPY(gregs[2]); COPY(gregs[3]);
3245 COPY(gregs[4]); COPY(gregs[5]);
3246 COPY(gregs[6]); COPY(gregs[7]);
3247 COPY(gregs[8]); COPY(gregs[9]);
3248 COPY(gregs[10]); COPY(gregs[11]);
3249 COPY(gregs[12]); COPY(gregs[13]);
3250 COPY(gregs[14]); COPY(gregs[15]);
3251 COPY(gbr); COPY(mach);
3252 COPY(macl); COPY(pr);
3253 COPY(sr); COPY(pc);
3254 #undef COPY
3256 for (i=0; i<16; i++) {
3257 __put_user(regs->fregs[i], &sc->sc_fpregs[i]);
3259 __put_user(regs->fpscr, &sc->sc_fpscr);
3260 __put_user(regs->fpul, &sc->sc_fpul);
3262 /* non-iBCS2 extensions.. */
3263 __put_user(mask, &sc->oldmask);
3266 static void restore_sigcontext(CPUSH4State *regs, struct target_sigcontext *sc)
3268 int i;
3270 #define COPY(x) __get_user(regs->x, &sc->sc_##x)
3271 COPY(gregs[0]); COPY(gregs[1]);
3272 COPY(gregs[2]); COPY(gregs[3]);
3273 COPY(gregs[4]); COPY(gregs[5]);
3274 COPY(gregs[6]); COPY(gregs[7]);
3275 COPY(gregs[8]); COPY(gregs[9]);
3276 COPY(gregs[10]); COPY(gregs[11]);
3277 COPY(gregs[12]); COPY(gregs[13]);
3278 COPY(gregs[14]); COPY(gregs[15]);
3279 COPY(gbr); COPY(mach);
3280 COPY(macl); COPY(pr);
3281 COPY(sr); COPY(pc);
3282 #undef COPY
3284 for (i=0; i<16; i++) {
3285 __get_user(regs->fregs[i], &sc->sc_fpregs[i]);
3287 __get_user(regs->fpscr, &sc->sc_fpscr);
3288 __get_user(regs->fpul, &sc->sc_fpul);
3290 regs->tra = -1; /* disable syscall checks */
3293 static void setup_frame(int sig, struct target_sigaction *ka,
3294 target_sigset_t *set, CPUSH4State *regs)
3296 struct target_sigframe *frame;
3297 abi_ulong frame_addr;
3298 int i;
3300 frame_addr = get_sigframe(ka, regs->gregs[15], sizeof(*frame));
3301 trace_user_setup_frame(regs, frame_addr);
3302 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3303 goto give_sigsegv;
3306 setup_sigcontext(&frame->sc, regs, set->sig[0]);
3308 for (i = 0; i < TARGET_NSIG_WORDS - 1; i++) {
3309 __put_user(set->sig[i + 1], &frame->extramask[i]);
3312 /* Set up to return from userspace. If provided, use a stub
3313 already in userspace. */
3314 if (ka->sa_flags & TARGET_SA_RESTORER) {
3315 regs->pr = (unsigned long) ka->sa_restorer;
3316 } else {
3317 /* Generate return code (system call to sigreturn) */
3318 abi_ulong retcode_addr = frame_addr +
3319 offsetof(struct target_sigframe, retcode);
3320 __put_user(MOVW(2), &frame->retcode[0]);
3321 __put_user(TRAP_NOARG, &frame->retcode[1]);
3322 __put_user((TARGET_NR_sigreturn), &frame->retcode[2]);
3323 regs->pr = (unsigned long) retcode_addr;
3326 /* Set up registers for signal handler */
3327 regs->gregs[15] = frame_addr;
3328 regs->gregs[4] = sig; /* Arg for signal handler */
3329 regs->gregs[5] = 0;
3330 regs->gregs[6] = frame_addr += offsetof(typeof(*frame), sc);
3331 regs->pc = (unsigned long) ka->_sa_handler;
3333 unlock_user_struct(frame, frame_addr, 1);
3334 return;
3336 give_sigsegv:
3337 unlock_user_struct(frame, frame_addr, 1);
3338 force_sig(TARGET_SIGSEGV);
3341 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3342 target_siginfo_t *info,
3343 target_sigset_t *set, CPUSH4State *regs)
3345 struct target_rt_sigframe *frame;
3346 abi_ulong frame_addr;
3347 int i;
3349 frame_addr = get_sigframe(ka, regs->gregs[15], sizeof(*frame));
3350 trace_user_setup_rt_frame(regs, frame_addr);
3351 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3352 goto give_sigsegv;
3355 tswap_siginfo(&frame->info, info);
3357 /* Create the ucontext. */
3358 __put_user(0, &frame->uc.tuc_flags);
3359 __put_user(0, (unsigned long *)&frame->uc.tuc_link);
3360 __put_user((unsigned long)target_sigaltstack_used.ss_sp,
3361 &frame->uc.tuc_stack.ss_sp);
3362 __put_user(sas_ss_flags(regs->gregs[15]),
3363 &frame->uc.tuc_stack.ss_flags);
3364 __put_user(target_sigaltstack_used.ss_size,
3365 &frame->uc.tuc_stack.ss_size);
3366 setup_sigcontext(&frame->uc.tuc_mcontext,
3367 regs, set->sig[0]);
3368 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
3369 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
3372 /* Set up to return from userspace. If provided, use a stub
3373 already in userspace. */
3374 if (ka->sa_flags & TARGET_SA_RESTORER) {
3375 regs->pr = (unsigned long) ka->sa_restorer;
3376 } else {
3377 /* Generate return code (system call to sigreturn) */
3378 abi_ulong retcode_addr = frame_addr +
3379 offsetof(struct target_rt_sigframe, retcode);
3380 __put_user(MOVW(2), &frame->retcode[0]);
3381 __put_user(TRAP_NOARG, &frame->retcode[1]);
3382 __put_user((TARGET_NR_rt_sigreturn), &frame->retcode[2]);
3383 regs->pr = (unsigned long) retcode_addr;
3386 /* Set up registers for signal handler */
3387 regs->gregs[15] = frame_addr;
3388 regs->gregs[4] = sig; /* Arg for signal handler */
3389 regs->gregs[5] = frame_addr + offsetof(typeof(*frame), info);
3390 regs->gregs[6] = frame_addr + offsetof(typeof(*frame), uc);
3391 regs->pc = (unsigned long) ka->_sa_handler;
3393 unlock_user_struct(frame, frame_addr, 1);
3394 return;
3396 give_sigsegv:
3397 unlock_user_struct(frame, frame_addr, 1);
3398 force_sig(TARGET_SIGSEGV);
3401 long do_sigreturn(CPUSH4State *regs)
3403 struct target_sigframe *frame;
3404 abi_ulong frame_addr;
3405 sigset_t blocked;
3406 target_sigset_t target_set;
3407 int i;
3408 int err = 0;
3410 frame_addr = regs->gregs[15];
3411 trace_user_do_sigreturn(regs, frame_addr);
3412 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
3413 goto badframe;
3416 __get_user(target_set.sig[0], &frame->sc.oldmask);
3417 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
3418 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
3421 if (err)
3422 goto badframe;
3424 target_to_host_sigset_internal(&blocked, &target_set);
3425 set_sigmask(&blocked);
3427 restore_sigcontext(regs, &frame->sc);
3429 unlock_user_struct(frame, frame_addr, 0);
3430 return -TARGET_QEMU_ESIGRETURN;
3432 badframe:
3433 unlock_user_struct(frame, frame_addr, 0);
3434 force_sig(TARGET_SIGSEGV);
3435 return 0;
3438 long do_rt_sigreturn(CPUSH4State *regs)
3440 struct target_rt_sigframe *frame;
3441 abi_ulong frame_addr;
3442 sigset_t blocked;
3444 frame_addr = regs->gregs[15];
3445 trace_user_do_rt_sigreturn(regs, frame_addr);
3446 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
3447 goto badframe;
3450 target_to_host_sigset(&blocked, &frame->uc.tuc_sigmask);
3451 set_sigmask(&blocked);
3453 restore_sigcontext(regs, &frame->uc.tuc_mcontext);
3455 if (do_sigaltstack(frame_addr +
3456 offsetof(struct target_rt_sigframe, uc.tuc_stack),
3457 0, get_sp_from_cpustate(regs)) == -EFAULT) {
3458 goto badframe;
3461 unlock_user_struct(frame, frame_addr, 0);
3462 return -TARGET_QEMU_ESIGRETURN;
3464 badframe:
3465 unlock_user_struct(frame, frame_addr, 0);
3466 force_sig(TARGET_SIGSEGV);
3467 return 0;
3469 #elif defined(TARGET_MICROBLAZE)
3471 struct target_sigcontext {
3472 struct target_pt_regs regs; /* needs to be first */
3473 uint32_t oldmask;
3476 struct target_stack_t {
3477 abi_ulong ss_sp;
3478 int ss_flags;
3479 unsigned int ss_size;
3482 struct target_ucontext {
3483 abi_ulong tuc_flags;
3484 abi_ulong tuc_link;
3485 struct target_stack_t tuc_stack;
3486 struct target_sigcontext tuc_mcontext;
3487 uint32_t tuc_extramask[TARGET_NSIG_WORDS - 1];
3490 /* Signal frames. */
3491 struct target_signal_frame {
3492 struct target_ucontext uc;
3493 uint32_t extramask[TARGET_NSIG_WORDS - 1];
3494 uint32_t tramp[2];
3497 struct rt_signal_frame {
3498 siginfo_t info;
3499 struct ucontext uc;
3500 uint32_t tramp[2];
3503 static void setup_sigcontext(struct target_sigcontext *sc, CPUMBState *env)
3505 __put_user(env->regs[0], &sc->regs.r0);
3506 __put_user(env->regs[1], &sc->regs.r1);
3507 __put_user(env->regs[2], &sc->regs.r2);
3508 __put_user(env->regs[3], &sc->regs.r3);
3509 __put_user(env->regs[4], &sc->regs.r4);
3510 __put_user(env->regs[5], &sc->regs.r5);
3511 __put_user(env->regs[6], &sc->regs.r6);
3512 __put_user(env->regs[7], &sc->regs.r7);
3513 __put_user(env->regs[8], &sc->regs.r8);
3514 __put_user(env->regs[9], &sc->regs.r9);
3515 __put_user(env->regs[10], &sc->regs.r10);
3516 __put_user(env->regs[11], &sc->regs.r11);
3517 __put_user(env->regs[12], &sc->regs.r12);
3518 __put_user(env->regs[13], &sc->regs.r13);
3519 __put_user(env->regs[14], &sc->regs.r14);
3520 __put_user(env->regs[15], &sc->regs.r15);
3521 __put_user(env->regs[16], &sc->regs.r16);
3522 __put_user(env->regs[17], &sc->regs.r17);
3523 __put_user(env->regs[18], &sc->regs.r18);
3524 __put_user(env->regs[19], &sc->regs.r19);
3525 __put_user(env->regs[20], &sc->regs.r20);
3526 __put_user(env->regs[21], &sc->regs.r21);
3527 __put_user(env->regs[22], &sc->regs.r22);
3528 __put_user(env->regs[23], &sc->regs.r23);
3529 __put_user(env->regs[24], &sc->regs.r24);
3530 __put_user(env->regs[25], &sc->regs.r25);
3531 __put_user(env->regs[26], &sc->regs.r26);
3532 __put_user(env->regs[27], &sc->regs.r27);
3533 __put_user(env->regs[28], &sc->regs.r28);
3534 __put_user(env->regs[29], &sc->regs.r29);
3535 __put_user(env->regs[30], &sc->regs.r30);
3536 __put_user(env->regs[31], &sc->regs.r31);
3537 __put_user(env->sregs[SR_PC], &sc->regs.pc);
3540 static void restore_sigcontext(struct target_sigcontext *sc, CPUMBState *env)
3542 __get_user(env->regs[0], &sc->regs.r0);
3543 __get_user(env->regs[1], &sc->regs.r1);
3544 __get_user(env->regs[2], &sc->regs.r2);
3545 __get_user(env->regs[3], &sc->regs.r3);
3546 __get_user(env->regs[4], &sc->regs.r4);
3547 __get_user(env->regs[5], &sc->regs.r5);
3548 __get_user(env->regs[6], &sc->regs.r6);
3549 __get_user(env->regs[7], &sc->regs.r7);
3550 __get_user(env->regs[8], &sc->regs.r8);
3551 __get_user(env->regs[9], &sc->regs.r9);
3552 __get_user(env->regs[10], &sc->regs.r10);
3553 __get_user(env->regs[11], &sc->regs.r11);
3554 __get_user(env->regs[12], &sc->regs.r12);
3555 __get_user(env->regs[13], &sc->regs.r13);
3556 __get_user(env->regs[14], &sc->regs.r14);
3557 __get_user(env->regs[15], &sc->regs.r15);
3558 __get_user(env->regs[16], &sc->regs.r16);
3559 __get_user(env->regs[17], &sc->regs.r17);
3560 __get_user(env->regs[18], &sc->regs.r18);
3561 __get_user(env->regs[19], &sc->regs.r19);
3562 __get_user(env->regs[20], &sc->regs.r20);
3563 __get_user(env->regs[21], &sc->regs.r21);
3564 __get_user(env->regs[22], &sc->regs.r22);
3565 __get_user(env->regs[23], &sc->regs.r23);
3566 __get_user(env->regs[24], &sc->regs.r24);
3567 __get_user(env->regs[25], &sc->regs.r25);
3568 __get_user(env->regs[26], &sc->regs.r26);
3569 __get_user(env->regs[27], &sc->regs.r27);
3570 __get_user(env->regs[28], &sc->regs.r28);
3571 __get_user(env->regs[29], &sc->regs.r29);
3572 __get_user(env->regs[30], &sc->regs.r30);
3573 __get_user(env->regs[31], &sc->regs.r31);
3574 __get_user(env->sregs[SR_PC], &sc->regs.pc);
3577 static abi_ulong get_sigframe(struct target_sigaction *ka,
3578 CPUMBState *env, int frame_size)
3580 abi_ulong sp = env->regs[1];
3582 if ((ka->sa_flags & TARGET_SA_ONSTACK) != 0 && !on_sig_stack(sp)) {
3583 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
3586 return ((sp - frame_size) & -8UL);
3589 static void setup_frame(int sig, struct target_sigaction *ka,
3590 target_sigset_t *set, CPUMBState *env)
3592 struct target_signal_frame *frame;
3593 abi_ulong frame_addr;
3594 int i;
3596 frame_addr = get_sigframe(ka, env, sizeof *frame);
3597 trace_user_setup_frame(env, frame_addr);
3598 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
3599 goto badframe;
3601 /* Save the mask. */
3602 __put_user(set->sig[0], &frame->uc.tuc_mcontext.oldmask);
3604 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
3605 __put_user(set->sig[i], &frame->extramask[i - 1]);
3608 setup_sigcontext(&frame->uc.tuc_mcontext, env);
3610 /* Set up to return from userspace. If provided, use a stub
3611 already in userspace. */
3612 /* minus 8 is offset to cater for "rtsd r15,8" offset */
3613 if (ka->sa_flags & TARGET_SA_RESTORER) {
3614 env->regs[15] = ((unsigned long)ka->sa_restorer)-8;
3615 } else {
3616 uint32_t t;
3617 /* Note, these encodings are _big endian_! */
3618 /* addi r12, r0, __NR_sigreturn */
3619 t = 0x31800000UL | TARGET_NR_sigreturn;
3620 __put_user(t, frame->tramp + 0);
3621 /* brki r14, 0x8 */
3622 t = 0xb9cc0008UL;
3623 __put_user(t, frame->tramp + 1);
3625 /* Return from sighandler will jump to the tramp.
3626 Negative 8 offset because return is rtsd r15, 8 */
3627 env->regs[15] = frame_addr + offsetof(struct target_signal_frame, tramp)
3628 - 8;
3631 /* Set up registers for signal handler */
3632 env->regs[1] = frame_addr;
3633 /* Signal handler args: */
3634 env->regs[5] = sig; /* Arg 0: signum */
3635 env->regs[6] = 0;
3636 /* arg 1: sigcontext */
3637 env->regs[7] = frame_addr += offsetof(typeof(*frame), uc);
3639 /* Offset of 4 to handle microblaze rtid r14, 0 */
3640 env->sregs[SR_PC] = (unsigned long)ka->_sa_handler;
3642 unlock_user_struct(frame, frame_addr, 1);
3643 return;
3644 badframe:
3645 force_sig(TARGET_SIGSEGV);
3648 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3649 target_siginfo_t *info,
3650 target_sigset_t *set, CPUMBState *env)
3652 fprintf(stderr, "Microblaze setup_rt_frame: not implemented\n");
3655 long do_sigreturn(CPUMBState *env)
3657 struct target_signal_frame *frame;
3658 abi_ulong frame_addr;
3659 target_sigset_t target_set;
3660 sigset_t set;
3661 int i;
3663 frame_addr = env->regs[R_SP];
3664 trace_user_do_sigreturn(env, frame_addr);
3665 /* Make sure the guest isn't playing games. */
3666 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1))
3667 goto badframe;
3669 /* Restore blocked signals */
3670 __get_user(target_set.sig[0], &frame->uc.tuc_mcontext.oldmask);
3671 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
3672 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
3674 target_to_host_sigset_internal(&set, &target_set);
3675 set_sigmask(&set);
3677 restore_sigcontext(&frame->uc.tuc_mcontext, env);
3678 /* We got here through a sigreturn syscall, our path back is via an
3679 rtb insn so setup r14 for that. */
3680 env->regs[14] = env->sregs[SR_PC];
3682 unlock_user_struct(frame, frame_addr, 0);
3683 return -TARGET_QEMU_ESIGRETURN;
3684 badframe:
3685 force_sig(TARGET_SIGSEGV);
3688 long do_rt_sigreturn(CPUMBState *env)
3690 trace_user_do_rt_sigreturn(env, 0);
3691 fprintf(stderr, "Microblaze do_rt_sigreturn: not implemented\n");
3692 return -TARGET_ENOSYS;
3695 #elif defined(TARGET_CRIS)
3697 struct target_sigcontext {
3698 struct target_pt_regs regs; /* needs to be first */
3699 uint32_t oldmask;
3700 uint32_t usp; /* usp before stacking this gunk on it */
3703 /* Signal frames. */
3704 struct target_signal_frame {
3705 struct target_sigcontext sc;
3706 uint32_t extramask[TARGET_NSIG_WORDS - 1];
3707 uint16_t retcode[4]; /* Trampoline code. */
3710 struct rt_signal_frame {
3711 siginfo_t *pinfo;
3712 void *puc;
3713 siginfo_t info;
3714 struct ucontext uc;
3715 uint16_t retcode[4]; /* Trampoline code. */
3718 static void setup_sigcontext(struct target_sigcontext *sc, CPUCRISState *env)
3720 __put_user(env->regs[0], &sc->regs.r0);
3721 __put_user(env->regs[1], &sc->regs.r1);
3722 __put_user(env->regs[2], &sc->regs.r2);
3723 __put_user(env->regs[3], &sc->regs.r3);
3724 __put_user(env->regs[4], &sc->regs.r4);
3725 __put_user(env->regs[5], &sc->regs.r5);
3726 __put_user(env->regs[6], &sc->regs.r6);
3727 __put_user(env->regs[7], &sc->regs.r7);
3728 __put_user(env->regs[8], &sc->regs.r8);
3729 __put_user(env->regs[9], &sc->regs.r9);
3730 __put_user(env->regs[10], &sc->regs.r10);
3731 __put_user(env->regs[11], &sc->regs.r11);
3732 __put_user(env->regs[12], &sc->regs.r12);
3733 __put_user(env->regs[13], &sc->regs.r13);
3734 __put_user(env->regs[14], &sc->usp);
3735 __put_user(env->regs[15], &sc->regs.acr);
3736 __put_user(env->pregs[PR_MOF], &sc->regs.mof);
3737 __put_user(env->pregs[PR_SRP], &sc->regs.srp);
3738 __put_user(env->pc, &sc->regs.erp);
3741 static void restore_sigcontext(struct target_sigcontext *sc, CPUCRISState *env)
3743 __get_user(env->regs[0], &sc->regs.r0);
3744 __get_user(env->regs[1], &sc->regs.r1);
3745 __get_user(env->regs[2], &sc->regs.r2);
3746 __get_user(env->regs[3], &sc->regs.r3);
3747 __get_user(env->regs[4], &sc->regs.r4);
3748 __get_user(env->regs[5], &sc->regs.r5);
3749 __get_user(env->regs[6], &sc->regs.r6);
3750 __get_user(env->regs[7], &sc->regs.r7);
3751 __get_user(env->regs[8], &sc->regs.r8);
3752 __get_user(env->regs[9], &sc->regs.r9);
3753 __get_user(env->regs[10], &sc->regs.r10);
3754 __get_user(env->regs[11], &sc->regs.r11);
3755 __get_user(env->regs[12], &sc->regs.r12);
3756 __get_user(env->regs[13], &sc->regs.r13);
3757 __get_user(env->regs[14], &sc->usp);
3758 __get_user(env->regs[15], &sc->regs.acr);
3759 __get_user(env->pregs[PR_MOF], &sc->regs.mof);
3760 __get_user(env->pregs[PR_SRP], &sc->regs.srp);
3761 __get_user(env->pc, &sc->regs.erp);
3764 static abi_ulong get_sigframe(CPUCRISState *env, int framesize)
3766 abi_ulong sp;
3767 /* Align the stack downwards to 4. */
3768 sp = (env->regs[R_SP] & ~3);
3769 return sp - framesize;
3772 static void setup_frame(int sig, struct target_sigaction *ka,
3773 target_sigset_t *set, CPUCRISState *env)
3775 struct target_signal_frame *frame;
3776 abi_ulong frame_addr;
3777 int i;
3779 frame_addr = get_sigframe(env, sizeof *frame);
3780 trace_user_setup_frame(env, frame_addr);
3781 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
3782 goto badframe;
3785 * The CRIS signal return trampoline. A real linux/CRIS kernel doesn't
3786 * use this trampoline anymore but it sets it up for GDB.
3787 * In QEMU, using the trampoline simplifies things a bit so we use it.
3789 * This is movu.w __NR_sigreturn, r9; break 13;
3791 __put_user(0x9c5f, frame->retcode+0);
3792 __put_user(TARGET_NR_sigreturn,
3793 frame->retcode + 1);
3794 __put_user(0xe93d, frame->retcode + 2);
3796 /* Save the mask. */
3797 __put_user(set->sig[0], &frame->sc.oldmask);
3799 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
3800 __put_user(set->sig[i], &frame->extramask[i - 1]);
3803 setup_sigcontext(&frame->sc, env);
3805 /* Move the stack and setup the arguments for the handler. */
3806 env->regs[R_SP] = frame_addr;
3807 env->regs[10] = sig;
3808 env->pc = (unsigned long) ka->_sa_handler;
3809 /* Link SRP so the guest returns through the trampoline. */
3810 env->pregs[PR_SRP] = frame_addr + offsetof(typeof(*frame), retcode);
3812 unlock_user_struct(frame, frame_addr, 1);
3813 return;
3814 badframe:
3815 force_sig(TARGET_SIGSEGV);
3818 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3819 target_siginfo_t *info,
3820 target_sigset_t *set, CPUCRISState *env)
3822 fprintf(stderr, "CRIS setup_rt_frame: not implemented\n");
3825 long do_sigreturn(CPUCRISState *env)
3827 struct target_signal_frame *frame;
3828 abi_ulong frame_addr;
3829 target_sigset_t target_set;
3830 sigset_t set;
3831 int i;
3833 frame_addr = env->regs[R_SP];
3834 trace_user_do_sigreturn(env, frame_addr);
3835 /* Make sure the guest isn't playing games. */
3836 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1)) {
3837 goto badframe;
3840 /* Restore blocked signals */
3841 __get_user(target_set.sig[0], &frame->sc.oldmask);
3842 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
3843 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
3845 target_to_host_sigset_internal(&set, &target_set);
3846 set_sigmask(&set);
3848 restore_sigcontext(&frame->sc, env);
3849 unlock_user_struct(frame, frame_addr, 0);
3850 return -TARGET_QEMU_ESIGRETURN;
3851 badframe:
3852 force_sig(TARGET_SIGSEGV);
3855 long do_rt_sigreturn(CPUCRISState *env)
3857 trace_user_do_rt_sigreturn(env, 0);
3858 fprintf(stderr, "CRIS do_rt_sigreturn: not implemented\n");
3859 return -TARGET_ENOSYS;
3862 #elif defined(TARGET_OPENRISC)
3864 struct target_sigcontext {
3865 struct target_pt_regs regs;
3866 abi_ulong oldmask;
3867 abi_ulong usp;
3870 struct target_ucontext {
3871 abi_ulong tuc_flags;
3872 abi_ulong tuc_link;
3873 target_stack_t tuc_stack;
3874 struct target_sigcontext tuc_mcontext;
3875 target_sigset_t tuc_sigmask; /* mask last for extensibility */
3878 struct target_rt_sigframe {
3879 abi_ulong pinfo;
3880 uint64_t puc;
3881 struct target_siginfo info;
3882 struct target_sigcontext sc;
3883 struct target_ucontext uc;
3884 unsigned char retcode[16]; /* trampoline code */
3887 /* This is the asm-generic/ucontext.h version */
3888 #if 0
3889 static int restore_sigcontext(CPUOpenRISCState *regs,
3890 struct target_sigcontext *sc)
3892 unsigned int err = 0;
3893 unsigned long old_usp;
3895 /* Alwys make any pending restarted system call return -EINTR */
3896 current_thread_info()->restart_block.fn = do_no_restart_syscall;
3898 /* restore the regs from &sc->regs (same as sc, since regs is first)
3899 * (sc is already checked for VERIFY_READ since the sigframe was
3900 * checked in sys_sigreturn previously)
3903 if (copy_from_user(regs, &sc, sizeof(struct target_pt_regs))) {
3904 goto badframe;
3907 /* make sure the U-flag is set so user-mode cannot fool us */
3909 regs->sr &= ~SR_SM;
3911 /* restore the old USP as it was before we stacked the sc etc.
3912 * (we cannot just pop the sigcontext since we aligned the sp and
3913 * stuff after pushing it)
3916 __get_user(old_usp, &sc->usp);
3917 phx_signal("old_usp 0x%lx", old_usp);
3919 __PHX__ REALLY /* ??? */
3920 wrusp(old_usp);
3921 regs->gpr[1] = old_usp;
3923 /* TODO: the other ports use regs->orig_XX to disable syscall checks
3924 * after this completes, but we don't use that mechanism. maybe we can
3925 * use it now ?
3928 return err;
3930 badframe:
3931 return 1;
3933 #endif
3935 /* Set up a signal frame. */
3937 static void setup_sigcontext(struct target_sigcontext *sc,
3938 CPUOpenRISCState *regs,
3939 unsigned long mask)
3941 unsigned long usp = regs->gpr[1];
3943 /* copy the regs. they are first in sc so we can use sc directly */
3945 /*copy_to_user(&sc, regs, sizeof(struct target_pt_regs));*/
3947 /* Set the frametype to CRIS_FRAME_NORMAL for the execution of
3948 the signal handler. The frametype will be restored to its previous
3949 value in restore_sigcontext. */
3950 /*regs->frametype = CRIS_FRAME_NORMAL;*/
3952 /* then some other stuff */
3953 __put_user(mask, &sc->oldmask);
3954 __put_user(usp, &sc->usp);
3957 static inline unsigned long align_sigframe(unsigned long sp)
3959 unsigned long i;
3960 i = sp & ~3UL;
3961 return i;
3964 static inline abi_ulong get_sigframe(struct target_sigaction *ka,
3965 CPUOpenRISCState *regs,
3966 size_t frame_size)
3968 unsigned long sp = regs->gpr[1];
3969 int onsigstack = on_sig_stack(sp);
3971 /* redzone */
3972 /* This is the X/Open sanctioned signal stack switching. */
3973 if ((ka->sa_flags & TARGET_SA_ONSTACK) != 0 && !onsigstack) {
3974 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
3977 sp = align_sigframe(sp - frame_size);
3980 * If we are on the alternate signal stack and would overflow it, don't.
3981 * Return an always-bogus address instead so we will die with SIGSEGV.
3984 if (onsigstack && !likely(on_sig_stack(sp))) {
3985 return -1L;
3988 return sp;
3991 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3992 target_siginfo_t *info,
3993 target_sigset_t *set, CPUOpenRISCState *env)
3995 int err = 0;
3996 abi_ulong frame_addr;
3997 unsigned long return_ip;
3998 struct target_rt_sigframe *frame;
3999 abi_ulong info_addr, uc_addr;
4001 frame_addr = get_sigframe(ka, env, sizeof(*frame));
4002 trace_user_setup_rt_frame(env, frame_addr);
4003 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
4004 goto give_sigsegv;
4007 info_addr = frame_addr + offsetof(struct target_rt_sigframe, info);
4008 __put_user(info_addr, &frame->pinfo);
4009 uc_addr = frame_addr + offsetof(struct target_rt_sigframe, uc);
4010 __put_user(uc_addr, &frame->puc);
4012 if (ka->sa_flags & SA_SIGINFO) {
4013 tswap_siginfo(&frame->info, info);
4016 /*err |= __clear_user(&frame->uc, offsetof(struct ucontext, uc_mcontext));*/
4017 __put_user(0, &frame->uc.tuc_flags);
4018 __put_user(0, &frame->uc.tuc_link);
4019 __put_user(target_sigaltstack_used.ss_sp,
4020 &frame->uc.tuc_stack.ss_sp);
4021 __put_user(sas_ss_flags(env->gpr[1]), &frame->uc.tuc_stack.ss_flags);
4022 __put_user(target_sigaltstack_used.ss_size,
4023 &frame->uc.tuc_stack.ss_size);
4024 setup_sigcontext(&frame->sc, env, set->sig[0]);
4026 /*err |= copy_to_user(frame->uc.tuc_sigmask, set, sizeof(*set));*/
4028 /* trampoline - the desired return ip is the retcode itself */
4029 return_ip = (unsigned long)&frame->retcode;
4030 /* This is l.ori r11,r0,__NR_sigreturn, l.sys 1 */
4031 __put_user(0xa960, (short *)(frame->retcode + 0));
4032 __put_user(TARGET_NR_rt_sigreturn, (short *)(frame->retcode + 2));
4033 __put_user(0x20000001, (unsigned long *)(frame->retcode + 4));
4034 __put_user(0x15000000, (unsigned long *)(frame->retcode + 8));
4036 if (err) {
4037 goto give_sigsegv;
4040 /* TODO what is the current->exec_domain stuff and invmap ? */
4042 /* Set up registers for signal handler */
4043 env->pc = (unsigned long)ka->_sa_handler; /* what we enter NOW */
4044 env->gpr[9] = (unsigned long)return_ip; /* what we enter LATER */
4045 env->gpr[3] = (unsigned long)sig; /* arg 1: signo */
4046 env->gpr[4] = (unsigned long)&frame->info; /* arg 2: (siginfo_t*) */
4047 env->gpr[5] = (unsigned long)&frame->uc; /* arg 3: ucontext */
4049 /* actually move the usp to reflect the stacked frame */
4050 env->gpr[1] = (unsigned long)frame;
4052 return;
4054 give_sigsegv:
4055 unlock_user_struct(frame, frame_addr, 1);
4056 if (sig == TARGET_SIGSEGV) {
4057 ka->_sa_handler = TARGET_SIG_DFL;
4059 force_sig(TARGET_SIGSEGV);
4062 long do_sigreturn(CPUOpenRISCState *env)
4064 trace_user_do_sigreturn(env, 0);
4065 fprintf(stderr, "do_sigreturn: not implemented\n");
4066 return -TARGET_ENOSYS;
4069 long do_rt_sigreturn(CPUOpenRISCState *env)
4071 trace_user_do_rt_sigreturn(env, 0);
4072 fprintf(stderr, "do_rt_sigreturn: not implemented\n");
4073 return -TARGET_ENOSYS;
4075 /* TARGET_OPENRISC */
4077 #elif defined(TARGET_S390X)
4079 #define __NUM_GPRS 16
4080 #define __NUM_FPRS 16
4081 #define __NUM_ACRS 16
4083 #define S390_SYSCALL_SIZE 2
4084 #define __SIGNAL_FRAMESIZE 160 /* FIXME: 31-bit mode -> 96 */
4086 #define _SIGCONTEXT_NSIG 64
4087 #define _SIGCONTEXT_NSIG_BPW 64 /* FIXME: 31-bit mode -> 32 */
4088 #define _SIGCONTEXT_NSIG_WORDS (_SIGCONTEXT_NSIG / _SIGCONTEXT_NSIG_BPW)
4089 #define _SIGMASK_COPY_SIZE (sizeof(unsigned long)*_SIGCONTEXT_NSIG_WORDS)
4090 #define PSW_ADDR_AMODE 0x0000000000000000UL /* 0x80000000UL for 31-bit */
4091 #define S390_SYSCALL_OPCODE ((uint16_t)0x0a00)
4093 typedef struct {
4094 target_psw_t psw;
4095 target_ulong gprs[__NUM_GPRS];
4096 unsigned int acrs[__NUM_ACRS];
4097 } target_s390_regs_common;
4099 typedef struct {
4100 unsigned int fpc;
4101 double fprs[__NUM_FPRS];
4102 } target_s390_fp_regs;
4104 typedef struct {
4105 target_s390_regs_common regs;
4106 target_s390_fp_regs fpregs;
4107 } target_sigregs;
4109 struct target_sigcontext {
4110 target_ulong oldmask[_SIGCONTEXT_NSIG_WORDS];
4111 target_sigregs *sregs;
4114 typedef struct {
4115 uint8_t callee_used_stack[__SIGNAL_FRAMESIZE];
4116 struct target_sigcontext sc;
4117 target_sigregs sregs;
4118 int signo;
4119 uint8_t retcode[S390_SYSCALL_SIZE];
4120 } sigframe;
4122 struct target_ucontext {
4123 target_ulong tuc_flags;
4124 struct target_ucontext *tuc_link;
4125 target_stack_t tuc_stack;
4126 target_sigregs tuc_mcontext;
4127 target_sigset_t tuc_sigmask; /* mask last for extensibility */
4130 typedef struct {
4131 uint8_t callee_used_stack[__SIGNAL_FRAMESIZE];
4132 uint8_t retcode[S390_SYSCALL_SIZE];
4133 struct target_siginfo info;
4134 struct target_ucontext uc;
4135 } rt_sigframe;
4137 static inline abi_ulong
4138 get_sigframe(struct target_sigaction *ka, CPUS390XState *env, size_t frame_size)
4140 abi_ulong sp;
4142 /* Default to using normal stack */
4143 sp = env->regs[15];
4145 /* This is the X/Open sanctioned signal stack switching. */
4146 if (ka->sa_flags & TARGET_SA_ONSTACK) {
4147 if (!sas_ss_flags(sp)) {
4148 sp = target_sigaltstack_used.ss_sp +
4149 target_sigaltstack_used.ss_size;
4153 /* This is the legacy signal stack switching. */
4154 else if (/* FIXME !user_mode(regs) */ 0 &&
4155 !(ka->sa_flags & TARGET_SA_RESTORER) &&
4156 ka->sa_restorer) {
4157 sp = (abi_ulong) ka->sa_restorer;
4160 return (sp - frame_size) & -8ul;
4163 static void save_sigregs(CPUS390XState *env, target_sigregs *sregs)
4165 int i;
4166 //save_access_regs(current->thread.acrs); FIXME
4168 /* Copy a 'clean' PSW mask to the user to avoid leaking
4169 information about whether PER is currently on. */
4170 __put_user(env->psw.mask, &sregs->regs.psw.mask);
4171 __put_user(env->psw.addr, &sregs->regs.psw.addr);
4172 for (i = 0; i < 16; i++) {
4173 __put_user(env->regs[i], &sregs->regs.gprs[i]);
4175 for (i = 0; i < 16; i++) {
4176 __put_user(env->aregs[i], &sregs->regs.acrs[i]);
4179 * We have to store the fp registers to current->thread.fp_regs
4180 * to merge them with the emulated registers.
4182 //save_fp_regs(&current->thread.fp_regs); FIXME
4183 for (i = 0; i < 16; i++) {
4184 __put_user(get_freg(env, i)->ll, &sregs->fpregs.fprs[i]);
4188 static void setup_frame(int sig, struct target_sigaction *ka,
4189 target_sigset_t *set, CPUS390XState *env)
4191 sigframe *frame;
4192 abi_ulong frame_addr;
4194 frame_addr = get_sigframe(ka, env, sizeof(*frame));
4195 trace_user_setup_frame(env, frame_addr);
4196 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
4197 goto give_sigsegv;
4200 __put_user(set->sig[0], &frame->sc.oldmask[0]);
4202 save_sigregs(env, &frame->sregs);
4204 __put_user((abi_ulong)(unsigned long)&frame->sregs,
4205 (abi_ulong *)&frame->sc.sregs);
4207 /* Set up to return from userspace. If provided, use a stub
4208 already in userspace. */
4209 if (ka->sa_flags & TARGET_SA_RESTORER) {
4210 env->regs[14] = (unsigned long)
4211 ka->sa_restorer | PSW_ADDR_AMODE;
4212 } else {
4213 env->regs[14] = (frame_addr + offsetof(sigframe, retcode))
4214 | PSW_ADDR_AMODE;
4215 __put_user(S390_SYSCALL_OPCODE | TARGET_NR_sigreturn,
4216 (uint16_t *)(frame->retcode));
4219 /* Set up backchain. */
4220 __put_user(env->regs[15], (abi_ulong *) frame);
4222 /* Set up registers for signal handler */
4223 env->regs[15] = frame_addr;
4224 env->psw.addr = (target_ulong) ka->_sa_handler | PSW_ADDR_AMODE;
4226 env->regs[2] = sig; //map_signal(sig);
4227 env->regs[3] = frame_addr += offsetof(typeof(*frame), sc);
4229 /* We forgot to include these in the sigcontext.
4230 To avoid breaking binary compatibility, they are passed as args. */
4231 env->regs[4] = 0; // FIXME: no clue... current->thread.trap_no;
4232 env->regs[5] = 0; // FIXME: no clue... current->thread.prot_addr;
4234 /* Place signal number on stack to allow backtrace from handler. */
4235 __put_user(env->regs[2], (int *) &frame->signo);
4236 unlock_user_struct(frame, frame_addr, 1);
4237 return;
4239 give_sigsegv:
4240 force_sig(TARGET_SIGSEGV);
4243 static void setup_rt_frame(int sig, struct target_sigaction *ka,
4244 target_siginfo_t *info,
4245 target_sigset_t *set, CPUS390XState *env)
4247 int i;
4248 rt_sigframe *frame;
4249 abi_ulong frame_addr;
4251 frame_addr = get_sigframe(ka, env, sizeof *frame);
4252 trace_user_setup_rt_frame(env, frame_addr);
4253 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
4254 goto give_sigsegv;
4257 tswap_siginfo(&frame->info, info);
4259 /* Create the ucontext. */
4260 __put_user(0, &frame->uc.tuc_flags);
4261 __put_user((abi_ulong)0, (abi_ulong *)&frame->uc.tuc_link);
4262 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
4263 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
4264 &frame->uc.tuc_stack.ss_flags);
4265 __put_user(target_sigaltstack_used.ss_size, &frame->uc.tuc_stack.ss_size);
4266 save_sigregs(env, &frame->uc.tuc_mcontext);
4267 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
4268 __put_user((abi_ulong)set->sig[i],
4269 (abi_ulong *)&frame->uc.tuc_sigmask.sig[i]);
4272 /* Set up to return from userspace. If provided, use a stub
4273 already in userspace. */
4274 if (ka->sa_flags & TARGET_SA_RESTORER) {
4275 env->regs[14] = (unsigned long) ka->sa_restorer | PSW_ADDR_AMODE;
4276 } else {
4277 env->regs[14] = (unsigned long) frame->retcode | PSW_ADDR_AMODE;
4278 __put_user(S390_SYSCALL_OPCODE | TARGET_NR_rt_sigreturn,
4279 (uint16_t *)(frame->retcode));
4282 /* Set up backchain. */
4283 __put_user(env->regs[15], (abi_ulong *) frame);
4285 /* Set up registers for signal handler */
4286 env->regs[15] = frame_addr;
4287 env->psw.addr = (target_ulong) ka->_sa_handler | PSW_ADDR_AMODE;
4289 env->regs[2] = sig; //map_signal(sig);
4290 env->regs[3] = frame_addr + offsetof(typeof(*frame), info);
4291 env->regs[4] = frame_addr + offsetof(typeof(*frame), uc);
4292 return;
4294 give_sigsegv:
4295 force_sig(TARGET_SIGSEGV);
4298 static int
4299 restore_sigregs(CPUS390XState *env, target_sigregs *sc)
4301 int err = 0;
4302 int i;
4304 for (i = 0; i < 16; i++) {
4305 __get_user(env->regs[i], &sc->regs.gprs[i]);
4308 __get_user(env->psw.mask, &sc->regs.psw.mask);
4309 trace_user_s390x_restore_sigregs(env, (unsigned long long)sc->regs.psw.addr,
4310 (unsigned long long)env->psw.addr);
4311 __get_user(env->psw.addr, &sc->regs.psw.addr);
4312 /* FIXME: 31-bit -> | PSW_ADDR_AMODE */
4314 for (i = 0; i < 16; i++) {
4315 __get_user(env->aregs[i], &sc->regs.acrs[i]);
4317 for (i = 0; i < 16; i++) {
4318 __get_user(get_freg(env, i)->ll, &sc->fpregs.fprs[i]);
4321 return err;
4324 long do_sigreturn(CPUS390XState *env)
4326 sigframe *frame;
4327 abi_ulong frame_addr = env->regs[15];
4328 target_sigset_t target_set;
4329 sigset_t set;
4331 trace_user_do_sigreturn(env, frame_addr);
4332 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
4333 goto badframe;
4335 __get_user(target_set.sig[0], &frame->sc.oldmask[0]);
4337 target_to_host_sigset_internal(&set, &target_set);
4338 set_sigmask(&set); /* ~_BLOCKABLE? */
4340 if (restore_sigregs(env, &frame->sregs)) {
4341 goto badframe;
4344 unlock_user_struct(frame, frame_addr, 0);
4345 return -TARGET_QEMU_ESIGRETURN;
4347 badframe:
4348 force_sig(TARGET_SIGSEGV);
4349 return 0;
4352 long do_rt_sigreturn(CPUS390XState *env)
4354 rt_sigframe *frame;
4355 abi_ulong frame_addr = env->regs[15];
4356 sigset_t set;
4358 trace_user_do_rt_sigreturn(env, frame_addr);
4359 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
4360 goto badframe;
4362 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
4364 set_sigmask(&set); /* ~_BLOCKABLE? */
4366 if (restore_sigregs(env, &frame->uc.tuc_mcontext)) {
4367 goto badframe;
4370 if (do_sigaltstack(frame_addr + offsetof(rt_sigframe, uc.tuc_stack), 0,
4371 get_sp_from_cpustate(env)) == -EFAULT) {
4372 goto badframe;
4374 unlock_user_struct(frame, frame_addr, 0);
4375 return -TARGET_QEMU_ESIGRETURN;
4377 badframe:
4378 unlock_user_struct(frame, frame_addr, 0);
4379 force_sig(TARGET_SIGSEGV);
4380 return 0;
4383 #elif defined(TARGET_PPC)
4385 /* Size of dummy stack frame allocated when calling signal handler.
4386 See arch/powerpc/include/asm/ptrace.h. */
4387 #if defined(TARGET_PPC64)
4388 #define SIGNAL_FRAMESIZE 128
4389 #else
4390 #define SIGNAL_FRAMESIZE 64
4391 #endif
4393 /* See arch/powerpc/include/asm/ucontext.h. Only used for 32-bit PPC;
4394 on 64-bit PPC, sigcontext and mcontext are one and the same. */
4395 struct target_mcontext {
4396 target_ulong mc_gregs[48];
4397 /* Includes fpscr. */
4398 uint64_t mc_fregs[33];
4399 target_ulong mc_pad[2];
4400 /* We need to handle Altivec and SPE at the same time, which no
4401 kernel needs to do. Fortunately, the kernel defines this bit to
4402 be Altivec-register-large all the time, rather than trying to
4403 twiddle it based on the specific platform. */
4404 union {
4405 /* SPE vector registers. One extra for SPEFSCR. */
4406 uint32_t spe[33];
4407 /* Altivec vector registers. The packing of VSCR and VRSAVE
4408 varies depending on whether we're PPC64 or not: PPC64 splits
4409 them apart; PPC32 stuffs them together. */
4410 #if defined(TARGET_PPC64)
4411 #define QEMU_NVRREG 34
4412 #else
4413 #define QEMU_NVRREG 33
4414 #endif
4415 ppc_avr_t altivec[QEMU_NVRREG];
4416 #undef QEMU_NVRREG
4417 } mc_vregs __attribute__((__aligned__(16)));
4420 /* See arch/powerpc/include/asm/sigcontext.h. */
4421 struct target_sigcontext {
4422 target_ulong _unused[4];
4423 int32_t signal;
4424 #if defined(TARGET_PPC64)
4425 int32_t pad0;
4426 #endif
4427 target_ulong handler;
4428 target_ulong oldmask;
4429 target_ulong regs; /* struct pt_regs __user * */
4430 #if defined(TARGET_PPC64)
4431 struct target_mcontext mcontext;
4432 #endif
4435 /* Indices for target_mcontext.mc_gregs, below.
4436 See arch/powerpc/include/asm/ptrace.h for details. */
4437 enum {
4438 TARGET_PT_R0 = 0,
4439 TARGET_PT_R1 = 1,
4440 TARGET_PT_R2 = 2,
4441 TARGET_PT_R3 = 3,
4442 TARGET_PT_R4 = 4,
4443 TARGET_PT_R5 = 5,
4444 TARGET_PT_R6 = 6,
4445 TARGET_PT_R7 = 7,
4446 TARGET_PT_R8 = 8,
4447 TARGET_PT_R9 = 9,
4448 TARGET_PT_R10 = 10,
4449 TARGET_PT_R11 = 11,
4450 TARGET_PT_R12 = 12,
4451 TARGET_PT_R13 = 13,
4452 TARGET_PT_R14 = 14,
4453 TARGET_PT_R15 = 15,
4454 TARGET_PT_R16 = 16,
4455 TARGET_PT_R17 = 17,
4456 TARGET_PT_R18 = 18,
4457 TARGET_PT_R19 = 19,
4458 TARGET_PT_R20 = 20,
4459 TARGET_PT_R21 = 21,
4460 TARGET_PT_R22 = 22,
4461 TARGET_PT_R23 = 23,
4462 TARGET_PT_R24 = 24,
4463 TARGET_PT_R25 = 25,
4464 TARGET_PT_R26 = 26,
4465 TARGET_PT_R27 = 27,
4466 TARGET_PT_R28 = 28,
4467 TARGET_PT_R29 = 29,
4468 TARGET_PT_R30 = 30,
4469 TARGET_PT_R31 = 31,
4470 TARGET_PT_NIP = 32,
4471 TARGET_PT_MSR = 33,
4472 TARGET_PT_ORIG_R3 = 34,
4473 TARGET_PT_CTR = 35,
4474 TARGET_PT_LNK = 36,
4475 TARGET_PT_XER = 37,
4476 TARGET_PT_CCR = 38,
4477 /* Yes, there are two registers with #39. One is 64-bit only. */
4478 TARGET_PT_MQ = 39,
4479 TARGET_PT_SOFTE = 39,
4480 TARGET_PT_TRAP = 40,
4481 TARGET_PT_DAR = 41,
4482 TARGET_PT_DSISR = 42,
4483 TARGET_PT_RESULT = 43,
4484 TARGET_PT_REGS_COUNT = 44
4488 struct target_ucontext {
4489 target_ulong tuc_flags;
4490 target_ulong tuc_link; /* struct ucontext __user * */
4491 struct target_sigaltstack tuc_stack;
4492 #if !defined(TARGET_PPC64)
4493 int32_t tuc_pad[7];
4494 target_ulong tuc_regs; /* struct mcontext __user *
4495 points to uc_mcontext field */
4496 #endif
4497 target_sigset_t tuc_sigmask;
4498 #if defined(TARGET_PPC64)
4499 target_sigset_t unused[15]; /* Allow for uc_sigmask growth */
4500 struct target_sigcontext tuc_sigcontext;
4501 #else
4502 int32_t tuc_maskext[30];
4503 int32_t tuc_pad2[3];
4504 struct target_mcontext tuc_mcontext;
4505 #endif
4508 /* See arch/powerpc/kernel/signal_32.c. */
4509 struct target_sigframe {
4510 struct target_sigcontext sctx;
4511 struct target_mcontext mctx;
4512 int32_t abigap[56];
4515 #if defined(TARGET_PPC64)
4517 #define TARGET_TRAMP_SIZE 6
4519 struct target_rt_sigframe {
4520 /* sys_rt_sigreturn requires the ucontext be the first field */
4521 struct target_ucontext uc;
4522 target_ulong _unused[2];
4523 uint32_t trampoline[TARGET_TRAMP_SIZE];
4524 target_ulong pinfo; /* struct siginfo __user * */
4525 target_ulong puc; /* void __user * */
4526 struct target_siginfo info;
4527 /* 64 bit ABI allows for 288 bytes below sp before decrementing it. */
4528 char abigap[288];
4529 } __attribute__((aligned(16)));
4531 #else
4533 struct target_rt_sigframe {
4534 struct target_siginfo info;
4535 struct target_ucontext uc;
4536 int32_t abigap[56];
4539 #endif
4541 #if defined(TARGET_PPC64)
4543 struct target_func_ptr {
4544 target_ulong entry;
4545 target_ulong toc;
4548 #endif
4550 /* We use the mc_pad field for the signal return trampoline. */
4551 #define tramp mc_pad
4553 /* See arch/powerpc/kernel/signal.c. */
4554 static target_ulong get_sigframe(struct target_sigaction *ka,
4555 CPUPPCState *env,
4556 int frame_size)
4558 target_ulong oldsp, newsp;
4560 oldsp = env->gpr[1];
4562 if ((ka->sa_flags & TARGET_SA_ONSTACK) &&
4563 (sas_ss_flags(oldsp) == 0)) {
4564 oldsp = (target_sigaltstack_used.ss_sp
4565 + target_sigaltstack_used.ss_size);
4568 newsp = (oldsp - frame_size) & ~0xFUL;
4570 return newsp;
4573 static void save_user_regs(CPUPPCState *env, struct target_mcontext *frame)
4575 target_ulong msr = env->msr;
4576 int i;
4577 target_ulong ccr = 0;
4579 /* In general, the kernel attempts to be intelligent about what it
4580 needs to save for Altivec/FP/SPE registers. We don't care that
4581 much, so we just go ahead and save everything. */
4583 /* Save general registers. */
4584 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
4585 __put_user(env->gpr[i], &frame->mc_gregs[i]);
4587 __put_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]);
4588 __put_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]);
4589 __put_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]);
4590 __put_user(env->xer, &frame->mc_gregs[TARGET_PT_XER]);
4592 for (i = 0; i < ARRAY_SIZE(env->crf); i++) {
4593 ccr |= env->crf[i] << (32 - ((i + 1) * 4));
4595 __put_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]);
4597 /* Save Altivec registers if necessary. */
4598 if (env->insns_flags & PPC_ALTIVEC) {
4599 for (i = 0; i < ARRAY_SIZE(env->avr); i++) {
4600 ppc_avr_t *avr = &env->avr[i];
4601 ppc_avr_t *vreg = &frame->mc_vregs.altivec[i];
4603 __put_user(avr->u64[0], &vreg->u64[0]);
4604 __put_user(avr->u64[1], &vreg->u64[1]);
4606 /* Set MSR_VR in the saved MSR value to indicate that
4607 frame->mc_vregs contains valid data. */
4608 msr |= MSR_VR;
4609 __put_user((uint32_t)env->spr[SPR_VRSAVE],
4610 &frame->mc_vregs.altivec[32].u32[3]);
4613 /* Save floating point registers. */
4614 if (env->insns_flags & PPC_FLOAT) {
4615 for (i = 0; i < ARRAY_SIZE(env->fpr); i++) {
4616 __put_user(env->fpr[i], &frame->mc_fregs[i]);
4618 __put_user((uint64_t) env->fpscr, &frame->mc_fregs[32]);
4621 /* Save SPE registers. The kernel only saves the high half. */
4622 if (env->insns_flags & PPC_SPE) {
4623 #if defined(TARGET_PPC64)
4624 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
4625 __put_user(env->gpr[i] >> 32, &frame->mc_vregs.spe[i]);
4627 #else
4628 for (i = 0; i < ARRAY_SIZE(env->gprh); i++) {
4629 __put_user(env->gprh[i], &frame->mc_vregs.spe[i]);
4631 #endif
4632 /* Set MSR_SPE in the saved MSR value to indicate that
4633 frame->mc_vregs contains valid data. */
4634 msr |= MSR_SPE;
4635 __put_user(env->spe_fscr, &frame->mc_vregs.spe[32]);
4638 /* Store MSR. */
4639 __put_user(msr, &frame->mc_gregs[TARGET_PT_MSR]);
4642 static void encode_trampoline(int sigret, uint32_t *tramp)
4644 /* Set up the sigreturn trampoline: li r0,sigret; sc. */
4645 if (sigret) {
4646 __put_user(0x38000000 | sigret, &tramp[0]);
4647 __put_user(0x44000002, &tramp[1]);
4651 static void restore_user_regs(CPUPPCState *env,
4652 struct target_mcontext *frame, int sig)
4654 target_ulong save_r2 = 0;
4655 target_ulong msr;
4656 target_ulong ccr;
4658 int i;
4660 if (!sig) {
4661 save_r2 = env->gpr[2];
4664 /* Restore general registers. */
4665 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
4666 __get_user(env->gpr[i], &frame->mc_gregs[i]);
4668 __get_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]);
4669 __get_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]);
4670 __get_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]);
4671 __get_user(env->xer, &frame->mc_gregs[TARGET_PT_XER]);
4672 __get_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]);
4674 for (i = 0; i < ARRAY_SIZE(env->crf); i++) {
4675 env->crf[i] = (ccr >> (32 - ((i + 1) * 4))) & 0xf;
4678 if (!sig) {
4679 env->gpr[2] = save_r2;
4681 /* Restore MSR. */
4682 __get_user(msr, &frame->mc_gregs[TARGET_PT_MSR]);
4684 /* If doing signal return, restore the previous little-endian mode. */
4685 if (sig)
4686 env->msr = (env->msr & ~(1ull << MSR_LE)) | (msr & (1ull << MSR_LE));
4688 /* Restore Altivec registers if necessary. */
4689 if (env->insns_flags & PPC_ALTIVEC) {
4690 for (i = 0; i < ARRAY_SIZE(env->avr); i++) {
4691 ppc_avr_t *avr = &env->avr[i];
4692 ppc_avr_t *vreg = &frame->mc_vregs.altivec[i];
4694 __get_user(avr->u64[0], &vreg->u64[0]);
4695 __get_user(avr->u64[1], &vreg->u64[1]);
4697 /* Set MSR_VEC in the saved MSR value to indicate that
4698 frame->mc_vregs contains valid data. */
4699 __get_user(env->spr[SPR_VRSAVE],
4700 (target_ulong *)(&frame->mc_vregs.altivec[32].u32[3]));
4703 /* Restore floating point registers. */
4704 if (env->insns_flags & PPC_FLOAT) {
4705 uint64_t fpscr;
4706 for (i = 0; i < ARRAY_SIZE(env->fpr); i++) {
4707 __get_user(env->fpr[i], &frame->mc_fregs[i]);
4709 __get_user(fpscr, &frame->mc_fregs[32]);
4710 env->fpscr = (uint32_t) fpscr;
4713 /* Save SPE registers. The kernel only saves the high half. */
4714 if (env->insns_flags & PPC_SPE) {
4715 #if defined(TARGET_PPC64)
4716 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
4717 uint32_t hi;
4719 __get_user(hi, &frame->mc_vregs.spe[i]);
4720 env->gpr[i] = ((uint64_t)hi << 32) | ((uint32_t) env->gpr[i]);
4722 #else
4723 for (i = 0; i < ARRAY_SIZE(env->gprh); i++) {
4724 __get_user(env->gprh[i], &frame->mc_vregs.spe[i]);
4726 #endif
4727 __get_user(env->spe_fscr, &frame->mc_vregs.spe[32]);
4731 static void setup_frame(int sig, struct target_sigaction *ka,
4732 target_sigset_t *set, CPUPPCState *env)
4734 struct target_sigframe *frame;
4735 struct target_sigcontext *sc;
4736 target_ulong frame_addr, newsp;
4737 int err = 0;
4738 #if defined(TARGET_PPC64)
4739 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info;
4740 #endif
4742 frame_addr = get_sigframe(ka, env, sizeof(*frame));
4743 trace_user_setup_frame(env, frame_addr);
4744 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1))
4745 goto sigsegv;
4746 sc = &frame->sctx;
4748 __put_user(ka->_sa_handler, &sc->handler);
4749 __put_user(set->sig[0], &sc->oldmask);
4750 #if TARGET_ABI_BITS == 64
4751 __put_user(set->sig[0] >> 32, &sc->_unused[3]);
4752 #else
4753 __put_user(set->sig[1], &sc->_unused[3]);
4754 #endif
4755 __put_user(h2g(&frame->mctx), &sc->regs);
4756 __put_user(sig, &sc->signal);
4758 /* Save user regs. */
4759 save_user_regs(env, &frame->mctx);
4761 /* Construct the trampoline code on the stack. */
4762 encode_trampoline(TARGET_NR_sigreturn, (uint32_t *)&frame->mctx.tramp);
4764 /* The kernel checks for the presence of a VDSO here. We don't
4765 emulate a vdso, so use a sigreturn system call. */
4766 env->lr = (target_ulong) h2g(frame->mctx.tramp);
4768 /* Turn off all fp exceptions. */
4769 env->fpscr = 0;
4771 /* Create a stack frame for the caller of the handler. */
4772 newsp = frame_addr - SIGNAL_FRAMESIZE;
4773 err |= put_user(env->gpr[1], newsp, target_ulong);
4775 if (err)
4776 goto sigsegv;
4778 /* Set up registers for signal handler. */
4779 env->gpr[1] = newsp;
4780 env->gpr[3] = sig;
4781 env->gpr[4] = frame_addr + offsetof(struct target_sigframe, sctx);
4783 #if defined(TARGET_PPC64)
4784 if (get_ppc64_abi(image) < 2) {
4785 /* ELFv1 PPC64 function pointers are pointers to OPD entries. */
4786 struct target_func_ptr *handler =
4787 (struct target_func_ptr *)g2h(ka->_sa_handler);
4788 env->nip = tswapl(handler->entry);
4789 env->gpr[2] = tswapl(handler->toc);
4790 } else {
4791 /* ELFv2 PPC64 function pointers are entry points, but R12
4792 * must also be set */
4793 env->nip = tswapl((target_ulong) ka->_sa_handler);
4794 env->gpr[12] = env->nip;
4796 #else
4797 env->nip = (target_ulong) ka->_sa_handler;
4798 #endif
4800 /* Signal handlers are entered in big-endian mode. */
4801 env->msr &= ~(1ull << MSR_LE);
4803 unlock_user_struct(frame, frame_addr, 1);
4804 return;
4806 sigsegv:
4807 unlock_user_struct(frame, frame_addr, 1);
4808 force_sig(TARGET_SIGSEGV);
4811 static void setup_rt_frame(int sig, struct target_sigaction *ka,
4812 target_siginfo_t *info,
4813 target_sigset_t *set, CPUPPCState *env)
4815 struct target_rt_sigframe *rt_sf;
4816 uint32_t *trampptr = 0;
4817 struct target_mcontext *mctx = 0;
4818 target_ulong rt_sf_addr, newsp = 0;
4819 int i, err = 0;
4820 #if defined(TARGET_PPC64)
4821 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info;
4822 #endif
4824 rt_sf_addr = get_sigframe(ka, env, sizeof(*rt_sf));
4825 if (!lock_user_struct(VERIFY_WRITE, rt_sf, rt_sf_addr, 1))
4826 goto sigsegv;
4828 tswap_siginfo(&rt_sf->info, info);
4830 __put_user(0, &rt_sf->uc.tuc_flags);
4831 __put_user(0, &rt_sf->uc.tuc_link);
4832 __put_user((target_ulong)target_sigaltstack_used.ss_sp,
4833 &rt_sf->uc.tuc_stack.ss_sp);
4834 __put_user(sas_ss_flags(env->gpr[1]),
4835 &rt_sf->uc.tuc_stack.ss_flags);
4836 __put_user(target_sigaltstack_used.ss_size,
4837 &rt_sf->uc.tuc_stack.ss_size);
4838 #if !defined(TARGET_PPC64)
4839 __put_user(h2g (&rt_sf->uc.tuc_mcontext),
4840 &rt_sf->uc.tuc_regs);
4841 #endif
4842 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
4843 __put_user(set->sig[i], &rt_sf->uc.tuc_sigmask.sig[i]);
4846 #if defined(TARGET_PPC64)
4847 mctx = &rt_sf->uc.tuc_sigcontext.mcontext;
4848 trampptr = &rt_sf->trampoline[0];
4849 #else
4850 mctx = &rt_sf->uc.tuc_mcontext;
4851 trampptr = (uint32_t *)&rt_sf->uc.tuc_mcontext.tramp;
4852 #endif
4854 save_user_regs(env, mctx);
4855 encode_trampoline(TARGET_NR_rt_sigreturn, trampptr);
4857 /* The kernel checks for the presence of a VDSO here. We don't
4858 emulate a vdso, so use a sigreturn system call. */
4859 env->lr = (target_ulong) h2g(trampptr);
4861 /* Turn off all fp exceptions. */
4862 env->fpscr = 0;
4864 /* Create a stack frame for the caller of the handler. */
4865 newsp = rt_sf_addr - (SIGNAL_FRAMESIZE + 16);
4866 err |= put_user(env->gpr[1], newsp, target_ulong);
4868 if (err)
4869 goto sigsegv;
4871 /* Set up registers for signal handler. */
4872 env->gpr[1] = newsp;
4873 env->gpr[3] = (target_ulong) sig;
4874 env->gpr[4] = (target_ulong) h2g(&rt_sf->info);
4875 env->gpr[5] = (target_ulong) h2g(&rt_sf->uc);
4876 env->gpr[6] = (target_ulong) h2g(rt_sf);
4878 #if defined(TARGET_PPC64)
4879 if (get_ppc64_abi(image) < 2) {
4880 /* ELFv1 PPC64 function pointers are pointers to OPD entries. */
4881 struct target_func_ptr *handler =
4882 (struct target_func_ptr *)g2h(ka->_sa_handler);
4883 env->nip = tswapl(handler->entry);
4884 env->gpr[2] = tswapl(handler->toc);
4885 } else {
4886 /* ELFv2 PPC64 function pointers are entry points, but R12
4887 * must also be set */
4888 env->nip = tswapl((target_ulong) ka->_sa_handler);
4889 env->gpr[12] = env->nip;
4891 #else
4892 env->nip = (target_ulong) ka->_sa_handler;
4893 #endif
4895 /* Signal handlers are entered in big-endian mode. */
4896 env->msr &= ~(1ull << MSR_LE);
4898 unlock_user_struct(rt_sf, rt_sf_addr, 1);
4899 return;
4901 sigsegv:
4902 unlock_user_struct(rt_sf, rt_sf_addr, 1);
4903 force_sig(TARGET_SIGSEGV);
4907 long do_sigreturn(CPUPPCState *env)
4909 struct target_sigcontext *sc = NULL;
4910 struct target_mcontext *sr = NULL;
4911 target_ulong sr_addr = 0, sc_addr;
4912 sigset_t blocked;
4913 target_sigset_t set;
4915 sc_addr = env->gpr[1] + SIGNAL_FRAMESIZE;
4916 if (!lock_user_struct(VERIFY_READ, sc, sc_addr, 1))
4917 goto sigsegv;
4919 #if defined(TARGET_PPC64)
4920 set.sig[0] = sc->oldmask + ((uint64_t)(sc->_unused[3]) << 32);
4921 #else
4922 __get_user(set.sig[0], &sc->oldmask);
4923 __get_user(set.sig[1], &sc->_unused[3]);
4924 #endif
4925 target_to_host_sigset_internal(&blocked, &set);
4926 set_sigmask(&blocked);
4928 __get_user(sr_addr, &sc->regs);
4929 if (!lock_user_struct(VERIFY_READ, sr, sr_addr, 1))
4930 goto sigsegv;
4931 restore_user_regs(env, sr, 1);
4933 unlock_user_struct(sr, sr_addr, 1);
4934 unlock_user_struct(sc, sc_addr, 1);
4935 return -TARGET_QEMU_ESIGRETURN;
4937 sigsegv:
4938 unlock_user_struct(sr, sr_addr, 1);
4939 unlock_user_struct(sc, sc_addr, 1);
4940 force_sig(TARGET_SIGSEGV);
4941 return 0;
4944 /* See arch/powerpc/kernel/signal_32.c. */
4945 static int do_setcontext(struct target_ucontext *ucp, CPUPPCState *env, int sig)
4947 struct target_mcontext *mcp;
4948 target_ulong mcp_addr;
4949 sigset_t blocked;
4950 target_sigset_t set;
4952 if (copy_from_user(&set, h2g(ucp) + offsetof(struct target_ucontext, tuc_sigmask),
4953 sizeof (set)))
4954 return 1;
4956 #if defined(TARGET_PPC64)
4957 mcp_addr = h2g(ucp) +
4958 offsetof(struct target_ucontext, tuc_sigcontext.mcontext);
4959 #else
4960 __get_user(mcp_addr, &ucp->tuc_regs);
4961 #endif
4963 if (!lock_user_struct(VERIFY_READ, mcp, mcp_addr, 1))
4964 return 1;
4966 target_to_host_sigset_internal(&blocked, &set);
4967 set_sigmask(&blocked);
4968 restore_user_regs(env, mcp, sig);
4970 unlock_user_struct(mcp, mcp_addr, 1);
4971 return 0;
4974 long do_rt_sigreturn(CPUPPCState *env)
4976 struct target_rt_sigframe *rt_sf = NULL;
4977 target_ulong rt_sf_addr;
4979 rt_sf_addr = env->gpr[1] + SIGNAL_FRAMESIZE + 16;
4980 if (!lock_user_struct(VERIFY_READ, rt_sf, rt_sf_addr, 1))
4981 goto sigsegv;
4983 if (do_setcontext(&rt_sf->uc, env, 1))
4984 goto sigsegv;
4986 do_sigaltstack(rt_sf_addr
4987 + offsetof(struct target_rt_sigframe, uc.tuc_stack),
4988 0, env->gpr[1]);
4990 unlock_user_struct(rt_sf, rt_sf_addr, 1);
4991 return -TARGET_QEMU_ESIGRETURN;
4993 sigsegv:
4994 unlock_user_struct(rt_sf, rt_sf_addr, 1);
4995 force_sig(TARGET_SIGSEGV);
4996 return 0;
4999 #elif defined(TARGET_M68K)
5001 struct target_sigcontext {
5002 abi_ulong sc_mask;
5003 abi_ulong sc_usp;
5004 abi_ulong sc_d0;
5005 abi_ulong sc_d1;
5006 abi_ulong sc_a0;
5007 abi_ulong sc_a1;
5008 unsigned short sc_sr;
5009 abi_ulong sc_pc;
5012 struct target_sigframe
5014 abi_ulong pretcode;
5015 int sig;
5016 int code;
5017 abi_ulong psc;
5018 char retcode[8];
5019 abi_ulong extramask[TARGET_NSIG_WORDS-1];
5020 struct target_sigcontext sc;
5023 typedef int target_greg_t;
5024 #define TARGET_NGREG 18
5025 typedef target_greg_t target_gregset_t[TARGET_NGREG];
5027 typedef struct target_fpregset {
5028 int f_fpcntl[3];
5029 int f_fpregs[8*3];
5030 } target_fpregset_t;
5032 struct target_mcontext {
5033 int version;
5034 target_gregset_t gregs;
5035 target_fpregset_t fpregs;
5038 #define TARGET_MCONTEXT_VERSION 2
5040 struct target_ucontext {
5041 abi_ulong tuc_flags;
5042 abi_ulong tuc_link;
5043 target_stack_t tuc_stack;
5044 struct target_mcontext tuc_mcontext;
5045 abi_long tuc_filler[80];
5046 target_sigset_t tuc_sigmask;
5049 struct target_rt_sigframe
5051 abi_ulong pretcode;
5052 int sig;
5053 abi_ulong pinfo;
5054 abi_ulong puc;
5055 char retcode[8];
5056 struct target_siginfo info;
5057 struct target_ucontext uc;
5060 static void setup_sigcontext(struct target_sigcontext *sc, CPUM68KState *env,
5061 abi_ulong mask)
5063 __put_user(mask, &sc->sc_mask);
5064 __put_user(env->aregs[7], &sc->sc_usp);
5065 __put_user(env->dregs[0], &sc->sc_d0);
5066 __put_user(env->dregs[1], &sc->sc_d1);
5067 __put_user(env->aregs[0], &sc->sc_a0);
5068 __put_user(env->aregs[1], &sc->sc_a1);
5069 __put_user(env->sr, &sc->sc_sr);
5070 __put_user(env->pc, &sc->sc_pc);
5073 static void
5074 restore_sigcontext(CPUM68KState *env, struct target_sigcontext *sc)
5076 int temp;
5078 __get_user(env->aregs[7], &sc->sc_usp);
5079 __get_user(env->dregs[0], &sc->sc_d0);
5080 __get_user(env->dregs[1], &sc->sc_d1);
5081 __get_user(env->aregs[0], &sc->sc_a0);
5082 __get_user(env->aregs[1], &sc->sc_a1);
5083 __get_user(env->pc, &sc->sc_pc);
5084 __get_user(temp, &sc->sc_sr);
5085 env->sr = (env->sr & 0xff00) | (temp & 0xff);
5089 * Determine which stack to use..
5091 static inline abi_ulong
5092 get_sigframe(struct target_sigaction *ka, CPUM68KState *regs,
5093 size_t frame_size)
5095 unsigned long sp;
5097 sp = regs->aregs[7];
5099 /* This is the X/Open sanctioned signal stack switching. */
5100 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags (sp) == 0)) {
5101 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
5104 return ((sp - frame_size) & -8UL);
5107 static void setup_frame(int sig, struct target_sigaction *ka,
5108 target_sigset_t *set, CPUM68KState *env)
5110 struct target_sigframe *frame;
5111 abi_ulong frame_addr;
5112 abi_ulong retcode_addr;
5113 abi_ulong sc_addr;
5114 int i;
5116 frame_addr = get_sigframe(ka, env, sizeof *frame);
5117 trace_user_setup_frame(env, frame_addr);
5118 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
5119 goto give_sigsegv;
5122 __put_user(sig, &frame->sig);
5124 sc_addr = frame_addr + offsetof(struct target_sigframe, sc);
5125 __put_user(sc_addr, &frame->psc);
5127 setup_sigcontext(&frame->sc, env, set->sig[0]);
5129 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
5130 __put_user(set->sig[i], &frame->extramask[i - 1]);
5133 /* Set up to return from userspace. */
5135 retcode_addr = frame_addr + offsetof(struct target_sigframe, retcode);
5136 __put_user(retcode_addr, &frame->pretcode);
5138 /* moveq #,d0; trap #0 */
5140 __put_user(0x70004e40 + (TARGET_NR_sigreturn << 16),
5141 (uint32_t *)(frame->retcode));
5143 /* Set up to return from userspace */
5145 env->aregs[7] = frame_addr;
5146 env->pc = ka->_sa_handler;
5148 unlock_user_struct(frame, frame_addr, 1);
5149 return;
5151 give_sigsegv:
5152 force_sig(TARGET_SIGSEGV);
5155 static inline int target_rt_setup_ucontext(struct target_ucontext *uc,
5156 CPUM68KState *env)
5158 target_greg_t *gregs = uc->tuc_mcontext.gregs;
5160 __put_user(TARGET_MCONTEXT_VERSION, &uc->tuc_mcontext.version);
5161 __put_user(env->dregs[0], &gregs[0]);
5162 __put_user(env->dregs[1], &gregs[1]);
5163 __put_user(env->dregs[2], &gregs[2]);
5164 __put_user(env->dregs[3], &gregs[3]);
5165 __put_user(env->dregs[4], &gregs[4]);
5166 __put_user(env->dregs[5], &gregs[5]);
5167 __put_user(env->dregs[6], &gregs[6]);
5168 __put_user(env->dregs[7], &gregs[7]);
5169 __put_user(env->aregs[0], &gregs[8]);
5170 __put_user(env->aregs[1], &gregs[9]);
5171 __put_user(env->aregs[2], &gregs[10]);
5172 __put_user(env->aregs[3], &gregs[11]);
5173 __put_user(env->aregs[4], &gregs[12]);
5174 __put_user(env->aregs[5], &gregs[13]);
5175 __put_user(env->aregs[6], &gregs[14]);
5176 __put_user(env->aregs[7], &gregs[15]);
5177 __put_user(env->pc, &gregs[16]);
5178 __put_user(env->sr, &gregs[17]);
5180 return 0;
5183 static inline int target_rt_restore_ucontext(CPUM68KState *env,
5184 struct target_ucontext *uc)
5186 int temp;
5187 target_greg_t *gregs = uc->tuc_mcontext.gregs;
5189 __get_user(temp, &uc->tuc_mcontext.version);
5190 if (temp != TARGET_MCONTEXT_VERSION)
5191 goto badframe;
5193 /* restore passed registers */
5194 __get_user(env->dregs[0], &gregs[0]);
5195 __get_user(env->dregs[1], &gregs[1]);
5196 __get_user(env->dregs[2], &gregs[2]);
5197 __get_user(env->dregs[3], &gregs[3]);
5198 __get_user(env->dregs[4], &gregs[4]);
5199 __get_user(env->dregs[5], &gregs[5]);
5200 __get_user(env->dregs[6], &gregs[6]);
5201 __get_user(env->dregs[7], &gregs[7]);
5202 __get_user(env->aregs[0], &gregs[8]);
5203 __get_user(env->aregs[1], &gregs[9]);
5204 __get_user(env->aregs[2], &gregs[10]);
5205 __get_user(env->aregs[3], &gregs[11]);
5206 __get_user(env->aregs[4], &gregs[12]);
5207 __get_user(env->aregs[5], &gregs[13]);
5208 __get_user(env->aregs[6], &gregs[14]);
5209 __get_user(env->aregs[7], &gregs[15]);
5210 __get_user(env->pc, &gregs[16]);
5211 __get_user(temp, &gregs[17]);
5212 env->sr = (env->sr & 0xff00) | (temp & 0xff);
5214 return 0;
5216 badframe:
5217 return 1;
5220 static void setup_rt_frame(int sig, struct target_sigaction *ka,
5221 target_siginfo_t *info,
5222 target_sigset_t *set, CPUM68KState *env)
5224 struct target_rt_sigframe *frame;
5225 abi_ulong frame_addr;
5226 abi_ulong retcode_addr;
5227 abi_ulong info_addr;
5228 abi_ulong uc_addr;
5229 int err = 0;
5230 int i;
5232 frame_addr = get_sigframe(ka, env, sizeof *frame);
5233 trace_user_setup_rt_frame(env, frame_addr);
5234 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
5235 goto give_sigsegv;
5238 __put_user(sig, &frame->sig);
5240 info_addr = frame_addr + offsetof(struct target_rt_sigframe, info);
5241 __put_user(info_addr, &frame->pinfo);
5243 uc_addr = frame_addr + offsetof(struct target_rt_sigframe, uc);
5244 __put_user(uc_addr, &frame->puc);
5246 tswap_siginfo(&frame->info, info);
5248 /* Create the ucontext */
5250 __put_user(0, &frame->uc.tuc_flags);
5251 __put_user(0, &frame->uc.tuc_link);
5252 __put_user(target_sigaltstack_used.ss_sp,
5253 &frame->uc.tuc_stack.ss_sp);
5254 __put_user(sas_ss_flags(env->aregs[7]),
5255 &frame->uc.tuc_stack.ss_flags);
5256 __put_user(target_sigaltstack_used.ss_size,
5257 &frame->uc.tuc_stack.ss_size);
5258 err |= target_rt_setup_ucontext(&frame->uc, env);
5260 if (err)
5261 goto give_sigsegv;
5263 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
5264 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
5267 /* Set up to return from userspace. */
5269 retcode_addr = frame_addr + offsetof(struct target_sigframe, retcode);
5270 __put_user(retcode_addr, &frame->pretcode);
5272 /* moveq #,d0; notb d0; trap #0 */
5274 __put_user(0x70004600 + ((TARGET_NR_rt_sigreturn ^ 0xff) << 16),
5275 (uint32_t *)(frame->retcode + 0));
5276 __put_user(0x4e40, (uint16_t *)(frame->retcode + 4));
5278 if (err)
5279 goto give_sigsegv;
5281 /* Set up to return from userspace */
5283 env->aregs[7] = frame_addr;
5284 env->pc = ka->_sa_handler;
5286 unlock_user_struct(frame, frame_addr, 1);
5287 return;
5289 give_sigsegv:
5290 unlock_user_struct(frame, frame_addr, 1);
5291 force_sig(TARGET_SIGSEGV);
5294 long do_sigreturn(CPUM68KState *env)
5296 struct target_sigframe *frame;
5297 abi_ulong frame_addr = env->aregs[7] - 4;
5298 target_sigset_t target_set;
5299 sigset_t set;
5300 int i;
5302 trace_user_do_sigreturn(env, frame_addr);
5303 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
5304 goto badframe;
5306 /* set blocked signals */
5308 __get_user(target_set.sig[0], &frame->sc.sc_mask);
5310 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
5311 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
5314 target_to_host_sigset_internal(&set, &target_set);
5315 set_sigmask(&set);
5317 /* restore registers */
5319 restore_sigcontext(env, &frame->sc);
5321 unlock_user_struct(frame, frame_addr, 0);
5322 return -TARGET_QEMU_ESIGRETURN;
5324 badframe:
5325 force_sig(TARGET_SIGSEGV);
5326 return 0;
5329 long do_rt_sigreturn(CPUM68KState *env)
5331 struct target_rt_sigframe *frame;
5332 abi_ulong frame_addr = env->aregs[7] - 4;
5333 target_sigset_t target_set;
5334 sigset_t set;
5336 trace_user_do_rt_sigreturn(env, frame_addr);
5337 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
5338 goto badframe;
5340 target_to_host_sigset_internal(&set, &target_set);
5341 set_sigmask(&set);
5343 /* restore registers */
5345 if (target_rt_restore_ucontext(env, &frame->uc))
5346 goto badframe;
5348 if (do_sigaltstack(frame_addr +
5349 offsetof(struct target_rt_sigframe, uc.tuc_stack),
5350 0, get_sp_from_cpustate(env)) == -EFAULT)
5351 goto badframe;
5353 unlock_user_struct(frame, frame_addr, 0);
5354 return -TARGET_QEMU_ESIGRETURN;
5356 badframe:
5357 unlock_user_struct(frame, frame_addr, 0);
5358 force_sig(TARGET_SIGSEGV);
5359 return 0;
5362 #elif defined(TARGET_ALPHA)
5364 struct target_sigcontext {
5365 abi_long sc_onstack;
5366 abi_long sc_mask;
5367 abi_long sc_pc;
5368 abi_long sc_ps;
5369 abi_long sc_regs[32];
5370 abi_long sc_ownedfp;
5371 abi_long sc_fpregs[32];
5372 abi_ulong sc_fpcr;
5373 abi_ulong sc_fp_control;
5374 abi_ulong sc_reserved1;
5375 abi_ulong sc_reserved2;
5376 abi_ulong sc_ssize;
5377 abi_ulong sc_sbase;
5378 abi_ulong sc_traparg_a0;
5379 abi_ulong sc_traparg_a1;
5380 abi_ulong sc_traparg_a2;
5381 abi_ulong sc_fp_trap_pc;
5382 abi_ulong sc_fp_trigger_sum;
5383 abi_ulong sc_fp_trigger_inst;
5386 struct target_ucontext {
5387 abi_ulong tuc_flags;
5388 abi_ulong tuc_link;
5389 abi_ulong tuc_osf_sigmask;
5390 target_stack_t tuc_stack;
5391 struct target_sigcontext tuc_mcontext;
5392 target_sigset_t tuc_sigmask;
5395 struct target_sigframe {
5396 struct target_sigcontext sc;
5397 unsigned int retcode[3];
5400 struct target_rt_sigframe {
5401 target_siginfo_t info;
5402 struct target_ucontext uc;
5403 unsigned int retcode[3];
5406 #define INSN_MOV_R30_R16 0x47fe0410
5407 #define INSN_LDI_R0 0x201f0000
5408 #define INSN_CALLSYS 0x00000083
5410 static void setup_sigcontext(struct target_sigcontext *sc, CPUAlphaState *env,
5411 abi_ulong frame_addr, target_sigset_t *set)
5413 int i;
5415 __put_user(on_sig_stack(frame_addr), &sc->sc_onstack);
5416 __put_user(set->sig[0], &sc->sc_mask);
5417 __put_user(env->pc, &sc->sc_pc);
5418 __put_user(8, &sc->sc_ps);
5420 for (i = 0; i < 31; ++i) {
5421 __put_user(env->ir[i], &sc->sc_regs[i]);
5423 __put_user(0, &sc->sc_regs[31]);
5425 for (i = 0; i < 31; ++i) {
5426 __put_user(env->fir[i], &sc->sc_fpregs[i]);
5428 __put_user(0, &sc->sc_fpregs[31]);
5429 __put_user(cpu_alpha_load_fpcr(env), &sc->sc_fpcr);
5431 __put_user(0, &sc->sc_traparg_a0); /* FIXME */
5432 __put_user(0, &sc->sc_traparg_a1); /* FIXME */
5433 __put_user(0, &sc->sc_traparg_a2); /* FIXME */
5436 static void restore_sigcontext(CPUAlphaState *env,
5437 struct target_sigcontext *sc)
5439 uint64_t fpcr;
5440 int i;
5442 __get_user(env->pc, &sc->sc_pc);
5444 for (i = 0; i < 31; ++i) {
5445 __get_user(env->ir[i], &sc->sc_regs[i]);
5447 for (i = 0; i < 31; ++i) {
5448 __get_user(env->fir[i], &sc->sc_fpregs[i]);
5451 __get_user(fpcr, &sc->sc_fpcr);
5452 cpu_alpha_store_fpcr(env, fpcr);
5455 static inline abi_ulong get_sigframe(struct target_sigaction *sa,
5456 CPUAlphaState *env,
5457 unsigned long framesize)
5459 abi_ulong sp = env->ir[IR_SP];
5461 /* This is the X/Open sanctioned signal stack switching. */
5462 if ((sa->sa_flags & TARGET_SA_ONSTACK) != 0 && !sas_ss_flags(sp)) {
5463 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
5465 return (sp - framesize) & -32;
5468 static void setup_frame(int sig, struct target_sigaction *ka,
5469 target_sigset_t *set, CPUAlphaState *env)
5471 abi_ulong frame_addr, r26;
5472 struct target_sigframe *frame;
5473 int err = 0;
5475 frame_addr = get_sigframe(ka, env, sizeof(*frame));
5476 trace_user_setup_frame(env, frame_addr);
5477 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
5478 goto give_sigsegv;
5481 setup_sigcontext(&frame->sc, env, frame_addr, set);
5483 if (ka->sa_restorer) {
5484 r26 = ka->sa_restorer;
5485 } else {
5486 __put_user(INSN_MOV_R30_R16, &frame->retcode[0]);
5487 __put_user(INSN_LDI_R0 + TARGET_NR_sigreturn,
5488 &frame->retcode[1]);
5489 __put_user(INSN_CALLSYS, &frame->retcode[2]);
5490 /* imb() */
5491 r26 = frame_addr;
5494 unlock_user_struct(frame, frame_addr, 1);
5496 if (err) {
5497 give_sigsegv:
5498 if (sig == TARGET_SIGSEGV) {
5499 ka->_sa_handler = TARGET_SIG_DFL;
5501 force_sig(TARGET_SIGSEGV);
5504 env->ir[IR_RA] = r26;
5505 env->ir[IR_PV] = env->pc = ka->_sa_handler;
5506 env->ir[IR_A0] = sig;
5507 env->ir[IR_A1] = 0;
5508 env->ir[IR_A2] = frame_addr + offsetof(struct target_sigframe, sc);
5509 env->ir[IR_SP] = frame_addr;
5512 static void setup_rt_frame(int sig, struct target_sigaction *ka,
5513 target_siginfo_t *info,
5514 target_sigset_t *set, CPUAlphaState *env)
5516 abi_ulong frame_addr, r26;
5517 struct target_rt_sigframe *frame;
5518 int i, err = 0;
5520 frame_addr = get_sigframe(ka, env, sizeof(*frame));
5521 trace_user_setup_rt_frame(env, frame_addr);
5522 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
5523 goto give_sigsegv;
5526 tswap_siginfo(&frame->info, info);
5528 __put_user(0, &frame->uc.tuc_flags);
5529 __put_user(0, &frame->uc.tuc_link);
5530 __put_user(set->sig[0], &frame->uc.tuc_osf_sigmask);
5531 __put_user(target_sigaltstack_used.ss_sp,
5532 &frame->uc.tuc_stack.ss_sp);
5533 __put_user(sas_ss_flags(env->ir[IR_SP]),
5534 &frame->uc.tuc_stack.ss_flags);
5535 __put_user(target_sigaltstack_used.ss_size,
5536 &frame->uc.tuc_stack.ss_size);
5537 setup_sigcontext(&frame->uc.tuc_mcontext, env, frame_addr, set);
5538 for (i = 0; i < TARGET_NSIG_WORDS; ++i) {
5539 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
5542 if (ka->sa_restorer) {
5543 r26 = ka->sa_restorer;
5544 } else {
5545 __put_user(INSN_MOV_R30_R16, &frame->retcode[0]);
5546 __put_user(INSN_LDI_R0 + TARGET_NR_rt_sigreturn,
5547 &frame->retcode[1]);
5548 __put_user(INSN_CALLSYS, &frame->retcode[2]);
5549 /* imb(); */
5550 r26 = frame_addr;
5553 if (err) {
5554 give_sigsegv:
5555 if (sig == TARGET_SIGSEGV) {
5556 ka->_sa_handler = TARGET_SIG_DFL;
5558 force_sig(TARGET_SIGSEGV);
5561 env->ir[IR_RA] = r26;
5562 env->ir[IR_PV] = env->pc = ka->_sa_handler;
5563 env->ir[IR_A0] = sig;
5564 env->ir[IR_A1] = frame_addr + offsetof(struct target_rt_sigframe, info);
5565 env->ir[IR_A2] = frame_addr + offsetof(struct target_rt_sigframe, uc);
5566 env->ir[IR_SP] = frame_addr;
5569 long do_sigreturn(CPUAlphaState *env)
5571 struct target_sigcontext *sc;
5572 abi_ulong sc_addr = env->ir[IR_A0];
5573 target_sigset_t target_set;
5574 sigset_t set;
5576 if (!lock_user_struct(VERIFY_READ, sc, sc_addr, 1)) {
5577 goto badframe;
5580 target_sigemptyset(&target_set);
5581 __get_user(target_set.sig[0], &sc->sc_mask);
5583 target_to_host_sigset_internal(&set, &target_set);
5584 set_sigmask(&set);
5586 restore_sigcontext(env, sc);
5587 unlock_user_struct(sc, sc_addr, 0);
5588 return -TARGET_QEMU_ESIGRETURN;
5590 badframe:
5591 force_sig(TARGET_SIGSEGV);
5594 long do_rt_sigreturn(CPUAlphaState *env)
5596 abi_ulong frame_addr = env->ir[IR_A0];
5597 struct target_rt_sigframe *frame;
5598 sigset_t set;
5600 trace_user_do_rt_sigreturn(env, frame_addr);
5601 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
5602 goto badframe;
5604 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
5605 set_sigmask(&set);
5607 restore_sigcontext(env, &frame->uc.tuc_mcontext);
5608 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe,
5609 uc.tuc_stack),
5610 0, env->ir[IR_SP]) == -EFAULT) {
5611 goto badframe;
5614 unlock_user_struct(frame, frame_addr, 0);
5615 return -TARGET_QEMU_ESIGRETURN;
5618 badframe:
5619 unlock_user_struct(frame, frame_addr, 0);
5620 force_sig(TARGET_SIGSEGV);
5623 #elif defined(TARGET_TILEGX)
5625 struct target_sigcontext {
5626 union {
5627 /* General-purpose registers. */
5628 abi_ulong gregs[56];
5629 struct {
5630 abi_ulong __gregs[53];
5631 abi_ulong tp; /* Aliases gregs[TREG_TP]. */
5632 abi_ulong sp; /* Aliases gregs[TREG_SP]. */
5633 abi_ulong lr; /* Aliases gregs[TREG_LR]. */
5636 abi_ulong pc; /* Program counter. */
5637 abi_ulong ics; /* In Interrupt Critical Section? */
5638 abi_ulong faultnum; /* Fault number. */
5639 abi_ulong pad[5];
5642 struct target_ucontext {
5643 abi_ulong tuc_flags;
5644 abi_ulong tuc_link;
5645 target_stack_t tuc_stack;
5646 struct target_sigcontext tuc_mcontext;
5647 target_sigset_t tuc_sigmask; /* mask last for extensibility */
5650 struct target_rt_sigframe {
5651 unsigned char save_area[16]; /* caller save area */
5652 struct target_siginfo info;
5653 struct target_ucontext uc;
5654 abi_ulong retcode[2];
5657 #define INSN_MOVELI_R10_139 0x00045fe551483000ULL /* { moveli r10, 139 } */
5658 #define INSN_SWINT1 0x286b180051485000ULL /* { swint1 } */
5661 static void setup_sigcontext(struct target_sigcontext *sc,
5662 CPUArchState *env, int signo)
5664 int i;
5666 for (i = 0; i < TILEGX_R_COUNT; ++i) {
5667 __put_user(env->regs[i], &sc->gregs[i]);
5670 __put_user(env->pc, &sc->pc);
5671 __put_user(0, &sc->ics);
5672 __put_user(signo, &sc->faultnum);
5675 static void restore_sigcontext(CPUTLGState *env, struct target_sigcontext *sc)
5677 int i;
5679 for (i = 0; i < TILEGX_R_COUNT; ++i) {
5680 __get_user(env->regs[i], &sc->gregs[i]);
5683 __get_user(env->pc, &sc->pc);
5686 static abi_ulong get_sigframe(struct target_sigaction *ka, CPUArchState *env,
5687 size_t frame_size)
5689 unsigned long sp = env->regs[TILEGX_R_SP];
5691 if (on_sig_stack(sp) && !likely(on_sig_stack(sp - frame_size))) {
5692 return -1UL;
5695 if ((ka->sa_flags & SA_ONSTACK) && !sas_ss_flags(sp)) {
5696 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
5699 sp -= frame_size;
5700 sp &= -16UL;
5701 return sp;
5704 static void setup_rt_frame(int sig, struct target_sigaction *ka,
5705 target_siginfo_t *info,
5706 target_sigset_t *set, CPUArchState *env)
5708 abi_ulong frame_addr;
5709 struct target_rt_sigframe *frame;
5710 unsigned long restorer;
5712 frame_addr = get_sigframe(ka, env, sizeof(*frame));
5713 trace_user_setup_rt_frame(env, frame_addr);
5714 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
5715 goto give_sigsegv;
5718 /* Always write at least the signal number for the stack backtracer. */
5719 if (ka->sa_flags & TARGET_SA_SIGINFO) {
5720 /* At sigreturn time, restore the callee-save registers too. */
5721 tswap_siginfo(&frame->info, info);
5722 /* regs->flags |= PT_FLAGS_RESTORE_REGS; FIXME: we can skip it? */
5723 } else {
5724 __put_user(info->si_signo, &frame->info.si_signo);
5727 /* Create the ucontext. */
5728 __put_user(0, &frame->uc.tuc_flags);
5729 __put_user(0, &frame->uc.tuc_link);
5730 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
5731 __put_user(sas_ss_flags(env->regs[TILEGX_R_SP]),
5732 &frame->uc.tuc_stack.ss_flags);
5733 __put_user(target_sigaltstack_used.ss_size, &frame->uc.tuc_stack.ss_size);
5734 setup_sigcontext(&frame->uc.tuc_mcontext, env, info->si_signo);
5736 if (ka->sa_flags & TARGET_SA_RESTORER) {
5737 restorer = (unsigned long) ka->sa_restorer;
5738 } else {
5739 __put_user(INSN_MOVELI_R10_139, &frame->retcode[0]);
5740 __put_user(INSN_SWINT1, &frame->retcode[1]);
5741 restorer = frame_addr + offsetof(struct target_rt_sigframe, retcode);
5743 env->pc = (unsigned long) ka->_sa_handler;
5744 env->regs[TILEGX_R_SP] = (unsigned long) frame;
5745 env->regs[TILEGX_R_LR] = restorer;
5746 env->regs[0] = (unsigned long) sig;
5747 env->regs[1] = (unsigned long) &frame->info;
5748 env->regs[2] = (unsigned long) &frame->uc;
5749 /* regs->flags |= PT_FLAGS_CALLER_SAVES; FIXME: we can skip it? */
5751 unlock_user_struct(frame, frame_addr, 1);
5752 return;
5754 give_sigsegv:
5755 if (sig == TARGET_SIGSEGV) {
5756 ka->_sa_handler = TARGET_SIG_DFL;
5758 force_sig(TARGET_SIGSEGV /* , current */);
5761 long do_rt_sigreturn(CPUTLGState *env)
5763 abi_ulong frame_addr = env->regs[TILEGX_R_SP];
5764 struct target_rt_sigframe *frame;
5765 sigset_t set;
5767 trace_user_do_rt_sigreturn(env, frame_addr);
5768 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
5769 goto badframe;
5771 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
5772 set_sigmask(&set);
5774 restore_sigcontext(env, &frame->uc.tuc_mcontext);
5775 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe,
5776 uc.tuc_stack),
5777 0, env->regs[TILEGX_R_SP]) == -EFAULT) {
5778 goto badframe;
5781 unlock_user_struct(frame, frame_addr, 0);
5782 return -TARGET_QEMU_ESIGRETURN;
5785 badframe:
5786 unlock_user_struct(frame, frame_addr, 0);
5787 force_sig(TARGET_SIGSEGV);
5790 #else
5792 static void setup_frame(int sig, struct target_sigaction *ka,
5793 target_sigset_t *set, CPUArchState *env)
5795 fprintf(stderr, "setup_frame: not implemented\n");
5798 static void setup_rt_frame(int sig, struct target_sigaction *ka,
5799 target_siginfo_t *info,
5800 target_sigset_t *set, CPUArchState *env)
5802 fprintf(stderr, "setup_rt_frame: not implemented\n");
5805 long do_sigreturn(CPUArchState *env)
5807 fprintf(stderr, "do_sigreturn: not implemented\n");
5808 return -TARGET_ENOSYS;
5811 long do_rt_sigreturn(CPUArchState *env)
5813 fprintf(stderr, "do_rt_sigreturn: not implemented\n");
5814 return -TARGET_ENOSYS;
5817 #endif
5819 static void handle_pending_signal(CPUArchState *cpu_env, int sig)
5821 CPUState *cpu = ENV_GET_CPU(cpu_env);
5822 abi_ulong handler;
5823 sigset_t set;
5824 target_sigset_t target_old_set;
5825 struct target_sigaction *sa;
5826 TaskState *ts = cpu->opaque;
5827 struct emulated_sigtable *k = &ts->sigtab[sig - 1];
5829 trace_user_handle_signal(cpu_env, sig);
5830 /* dequeue signal */
5831 k->pending = 0;
5833 sig = gdb_handlesig(cpu, sig);
5834 if (!sig) {
5835 sa = NULL;
5836 handler = TARGET_SIG_IGN;
5837 } else {
5838 sa = &sigact_table[sig - 1];
5839 handler = sa->_sa_handler;
5842 if (handler == TARGET_SIG_DFL) {
5843 /* default handler : ignore some signal. The other are job control or fatal */
5844 if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) {
5845 kill(getpid(),SIGSTOP);
5846 } else if (sig != TARGET_SIGCHLD &&
5847 sig != TARGET_SIGURG &&
5848 sig != TARGET_SIGWINCH &&
5849 sig != TARGET_SIGCONT) {
5850 force_sig(sig);
5852 } else if (handler == TARGET_SIG_IGN) {
5853 /* ignore sig */
5854 } else if (handler == TARGET_SIG_ERR) {
5855 force_sig(sig);
5856 } else {
5857 /* compute the blocked signals during the handler execution */
5858 sigset_t *blocked_set;
5860 target_to_host_sigset(&set, &sa->sa_mask);
5861 /* SA_NODEFER indicates that the current signal should not be
5862 blocked during the handler */
5863 if (!(sa->sa_flags & TARGET_SA_NODEFER))
5864 sigaddset(&set, target_to_host_signal(sig));
5866 /* save the previous blocked signal state to restore it at the
5867 end of the signal execution (see do_sigreturn) */
5868 host_to_target_sigset_internal(&target_old_set, &ts->signal_mask);
5870 /* block signals in the handler */
5871 blocked_set = ts->in_sigsuspend ?
5872 &ts->sigsuspend_mask : &ts->signal_mask;
5873 sigorset(&ts->signal_mask, blocked_set, &set);
5874 ts->in_sigsuspend = 0;
5876 /* if the CPU is in VM86 mode, we restore the 32 bit values */
5877 #if defined(TARGET_I386) && !defined(TARGET_X86_64)
5879 CPUX86State *env = cpu_env;
5880 if (env->eflags & VM_MASK)
5881 save_v86_state(env);
5883 #endif
5884 /* prepare the stack frame of the virtual CPU */
5885 #if defined(TARGET_ABI_MIPSN32) || defined(TARGET_ABI_MIPSN64) \
5886 || defined(TARGET_OPENRISC) || defined(TARGET_TILEGX)
5887 /* These targets do not have traditional signals. */
5888 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
5889 #else
5890 if (sa->sa_flags & TARGET_SA_SIGINFO)
5891 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
5892 else
5893 setup_frame(sig, sa, &target_old_set, cpu_env);
5894 #endif
5895 if (sa->sa_flags & TARGET_SA_RESETHAND) {
5896 sa->_sa_handler = TARGET_SIG_DFL;
5901 void process_pending_signals(CPUArchState *cpu_env)
5903 CPUState *cpu = ENV_GET_CPU(cpu_env);
5904 int sig;
5905 TaskState *ts = cpu->opaque;
5906 sigset_t set;
5907 sigset_t *blocked_set;
5909 while (atomic_read(&ts->signal_pending)) {
5910 /* FIXME: This is not threadsafe. */
5911 sigfillset(&set);
5912 sigprocmask(SIG_SETMASK, &set, 0);
5914 sig = ts->sync_signal.pending;
5915 if (sig) {
5916 /* Synchronous signals are forced,
5917 * see force_sig_info() and callers in Linux
5918 * Note that not all of our queue_signal() calls in QEMU correspond
5919 * to force_sig_info() calls in Linux (some are send_sig_info()).
5920 * However it seems like a kernel bug to me to allow the process
5921 * to block a synchronous signal since it could then just end up
5922 * looping round and round indefinitely.
5924 if (sigismember(&ts->signal_mask, target_to_host_signal_table[sig])
5925 || sigact_table[sig - 1]._sa_handler == TARGET_SIG_IGN) {
5926 sigdelset(&ts->signal_mask, target_to_host_signal_table[sig]);
5927 sigact_table[sig - 1]._sa_handler = TARGET_SIG_DFL;
5930 handle_pending_signal(cpu_env, sig);
5933 for (sig = 1; sig <= TARGET_NSIG; sig++) {
5934 blocked_set = ts->in_sigsuspend ?
5935 &ts->sigsuspend_mask : &ts->signal_mask;
5937 if (ts->sigtab[sig - 1].pending &&
5938 (!sigismember(blocked_set,
5939 target_to_host_signal_table[sig]))) {
5940 handle_pending_signal(cpu_env, sig);
5941 /* Restart scan from the beginning */
5942 sig = 1;
5946 /* if no signal is pending, unblock signals and recheck (the act
5947 * of unblocking might cause us to take another host signal which
5948 * will set signal_pending again).
5950 atomic_set(&ts->signal_pending, 0);
5951 ts->in_sigsuspend = 0;
5952 set = ts->signal_mask;
5953 sigdelset(&set, SIGSEGV);
5954 sigdelset(&set, SIGBUS);
5955 sigprocmask(SIG_SETMASK, &set, 0);
5957 ts->in_sigsuspend = 0;