commit: Fix use of error handling policy
[qemu.git] / linux-user / signal.c
blob9d980456ec46eed0854a2360f59e2fca5e1afaa1
1 /*
2 * Emulation of Linux signals
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qemu/bitops.h"
21 #include <sys/ucontext.h>
22 #include <sys/resource.h>
24 #include "qemu.h"
25 #include "qemu-common.h"
26 #include "target_signal.h"
27 #include "trace.h"
29 static struct target_sigaltstack target_sigaltstack_used = {
30 .ss_sp = 0,
31 .ss_size = 0,
32 .ss_flags = TARGET_SS_DISABLE,
35 static struct target_sigaction sigact_table[TARGET_NSIG];
37 static void host_signal_handler(int host_signum, siginfo_t *info,
38 void *puc);
40 static uint8_t host_to_target_signal_table[_NSIG] = {
41 [SIGHUP] = TARGET_SIGHUP,
42 [SIGINT] = TARGET_SIGINT,
43 [SIGQUIT] = TARGET_SIGQUIT,
44 [SIGILL] = TARGET_SIGILL,
45 [SIGTRAP] = TARGET_SIGTRAP,
46 [SIGABRT] = TARGET_SIGABRT,
47 /* [SIGIOT] = TARGET_SIGIOT,*/
48 [SIGBUS] = TARGET_SIGBUS,
49 [SIGFPE] = TARGET_SIGFPE,
50 [SIGKILL] = TARGET_SIGKILL,
51 [SIGUSR1] = TARGET_SIGUSR1,
52 [SIGSEGV] = TARGET_SIGSEGV,
53 [SIGUSR2] = TARGET_SIGUSR2,
54 [SIGPIPE] = TARGET_SIGPIPE,
55 [SIGALRM] = TARGET_SIGALRM,
56 [SIGTERM] = TARGET_SIGTERM,
57 #ifdef SIGSTKFLT
58 [SIGSTKFLT] = TARGET_SIGSTKFLT,
59 #endif
60 [SIGCHLD] = TARGET_SIGCHLD,
61 [SIGCONT] = TARGET_SIGCONT,
62 [SIGSTOP] = TARGET_SIGSTOP,
63 [SIGTSTP] = TARGET_SIGTSTP,
64 [SIGTTIN] = TARGET_SIGTTIN,
65 [SIGTTOU] = TARGET_SIGTTOU,
66 [SIGURG] = TARGET_SIGURG,
67 [SIGXCPU] = TARGET_SIGXCPU,
68 [SIGXFSZ] = TARGET_SIGXFSZ,
69 [SIGVTALRM] = TARGET_SIGVTALRM,
70 [SIGPROF] = TARGET_SIGPROF,
71 [SIGWINCH] = TARGET_SIGWINCH,
72 [SIGIO] = TARGET_SIGIO,
73 [SIGPWR] = TARGET_SIGPWR,
74 [SIGSYS] = TARGET_SIGSYS,
75 /* next signals stay the same */
76 /* Nasty hack: Reverse SIGRTMIN and SIGRTMAX to avoid overlap with
77 host libpthread signals. This assumes no one actually uses SIGRTMAX :-/
78 To fix this properly we need to do manual signal delivery multiplexed
79 over a single host signal. */
80 [__SIGRTMIN] = __SIGRTMAX,
81 [__SIGRTMAX] = __SIGRTMIN,
83 static uint8_t target_to_host_signal_table[_NSIG];
85 static inline int on_sig_stack(unsigned long sp)
87 return (sp - target_sigaltstack_used.ss_sp
88 < target_sigaltstack_used.ss_size);
91 static inline int sas_ss_flags(unsigned long sp)
93 return (target_sigaltstack_used.ss_size == 0 ? SS_DISABLE
94 : on_sig_stack(sp) ? SS_ONSTACK : 0);
97 int host_to_target_signal(int sig)
99 if (sig < 0 || sig >= _NSIG)
100 return sig;
101 return host_to_target_signal_table[sig];
104 int target_to_host_signal(int sig)
106 if (sig < 0 || sig >= _NSIG)
107 return sig;
108 return target_to_host_signal_table[sig];
111 static inline void target_sigemptyset(target_sigset_t *set)
113 memset(set, 0, sizeof(*set));
116 static inline void target_sigaddset(target_sigset_t *set, int signum)
118 signum--;
119 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
120 set->sig[signum / TARGET_NSIG_BPW] |= mask;
123 static inline int target_sigismember(const target_sigset_t *set, int signum)
125 signum--;
126 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
127 return ((set->sig[signum / TARGET_NSIG_BPW] & mask) != 0);
130 static void host_to_target_sigset_internal(target_sigset_t *d,
131 const sigset_t *s)
133 int i;
134 target_sigemptyset(d);
135 for (i = 1; i <= TARGET_NSIG; i++) {
136 if (sigismember(s, i)) {
137 target_sigaddset(d, host_to_target_signal(i));
142 void host_to_target_sigset(target_sigset_t *d, const sigset_t *s)
144 target_sigset_t d1;
145 int i;
147 host_to_target_sigset_internal(&d1, s);
148 for(i = 0;i < TARGET_NSIG_WORDS; i++)
149 d->sig[i] = tswapal(d1.sig[i]);
152 static void target_to_host_sigset_internal(sigset_t *d,
153 const target_sigset_t *s)
155 int i;
156 sigemptyset(d);
157 for (i = 1; i <= TARGET_NSIG; i++) {
158 if (target_sigismember(s, i)) {
159 sigaddset(d, target_to_host_signal(i));
164 void target_to_host_sigset(sigset_t *d, const target_sigset_t *s)
166 target_sigset_t s1;
167 int i;
169 for(i = 0;i < TARGET_NSIG_WORDS; i++)
170 s1.sig[i] = tswapal(s->sig[i]);
171 target_to_host_sigset_internal(d, &s1);
174 void host_to_target_old_sigset(abi_ulong *old_sigset,
175 const sigset_t *sigset)
177 target_sigset_t d;
178 host_to_target_sigset(&d, sigset);
179 *old_sigset = d.sig[0];
182 void target_to_host_old_sigset(sigset_t *sigset,
183 const abi_ulong *old_sigset)
185 target_sigset_t d;
186 int i;
188 d.sig[0] = *old_sigset;
189 for(i = 1;i < TARGET_NSIG_WORDS; i++)
190 d.sig[i] = 0;
191 target_to_host_sigset(sigset, &d);
194 int block_signals(void)
196 TaskState *ts = (TaskState *)thread_cpu->opaque;
197 sigset_t set;
199 /* It's OK to block everything including SIGSEGV, because we won't
200 * run any further guest code before unblocking signals in
201 * process_pending_signals().
203 sigfillset(&set);
204 sigprocmask(SIG_SETMASK, &set, 0);
206 return atomic_xchg(&ts->signal_pending, 1);
209 /* Wrapper for sigprocmask function
210 * Emulates a sigprocmask in a safe way for the guest. Note that set and oldset
211 * are host signal set, not guest ones. Returns -TARGET_ERESTARTSYS if
212 * a signal was already pending and the syscall must be restarted, or
213 * 0 on success.
214 * If set is NULL, this is guaranteed not to fail.
216 int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset)
218 TaskState *ts = (TaskState *)thread_cpu->opaque;
220 if (oldset) {
221 *oldset = ts->signal_mask;
224 if (set) {
225 int i;
227 if (block_signals()) {
228 return -TARGET_ERESTARTSYS;
231 switch (how) {
232 case SIG_BLOCK:
233 sigorset(&ts->signal_mask, &ts->signal_mask, set);
234 break;
235 case SIG_UNBLOCK:
236 for (i = 1; i <= NSIG; ++i) {
237 if (sigismember(set, i)) {
238 sigdelset(&ts->signal_mask, i);
241 break;
242 case SIG_SETMASK:
243 ts->signal_mask = *set;
244 break;
245 default:
246 g_assert_not_reached();
249 /* Silently ignore attempts to change blocking status of KILL or STOP */
250 sigdelset(&ts->signal_mask, SIGKILL);
251 sigdelset(&ts->signal_mask, SIGSTOP);
253 return 0;
256 #if !defined(TARGET_OPENRISC) && !defined(TARGET_UNICORE32) && \
257 !defined(TARGET_X86_64)
258 /* Just set the guest's signal mask to the specified value; the
259 * caller is assumed to have called block_signals() already.
261 static void set_sigmask(const sigset_t *set)
263 TaskState *ts = (TaskState *)thread_cpu->opaque;
265 ts->signal_mask = *set;
267 #endif
269 /* siginfo conversion */
271 static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo,
272 const siginfo_t *info)
274 int sig = host_to_target_signal(info->si_signo);
275 int si_code = info->si_code;
276 int si_type;
277 tinfo->si_signo = sig;
278 tinfo->si_errno = 0;
279 tinfo->si_code = info->si_code;
281 /* This memset serves two purposes:
282 * (1) ensure we don't leak random junk to the guest later
283 * (2) placate false positives from gcc about fields
284 * being used uninitialized if it chooses to inline both this
285 * function and tswap_siginfo() into host_to_target_siginfo().
287 memset(tinfo->_sifields._pad, 0, sizeof(tinfo->_sifields._pad));
289 /* This is awkward, because we have to use a combination of
290 * the si_code and si_signo to figure out which of the union's
291 * members are valid. (Within the host kernel it is always possible
292 * to tell, but the kernel carefully avoids giving userspace the
293 * high 16 bits of si_code, so we don't have the information to
294 * do this the easy way...) We therefore make our best guess,
295 * bearing in mind that a guest can spoof most of the si_codes
296 * via rt_sigqueueinfo() if it likes.
298 * Once we have made our guess, we record it in the top 16 bits of
299 * the si_code, so that tswap_siginfo() later can use it.
300 * tswap_siginfo() will strip these top bits out before writing
301 * si_code to the guest (sign-extending the lower bits).
304 switch (si_code) {
305 case SI_USER:
306 case SI_TKILL:
307 case SI_KERNEL:
308 /* Sent via kill(), tkill() or tgkill(), or direct from the kernel.
309 * These are the only unspoofable si_code values.
311 tinfo->_sifields._kill._pid = info->si_pid;
312 tinfo->_sifields._kill._uid = info->si_uid;
313 si_type = QEMU_SI_KILL;
314 break;
315 default:
316 /* Everything else is spoofable. Make best guess based on signal */
317 switch (sig) {
318 case TARGET_SIGCHLD:
319 tinfo->_sifields._sigchld._pid = info->si_pid;
320 tinfo->_sifields._sigchld._uid = info->si_uid;
321 tinfo->_sifields._sigchld._status
322 = host_to_target_waitstatus(info->si_status);
323 tinfo->_sifields._sigchld._utime = info->si_utime;
324 tinfo->_sifields._sigchld._stime = info->si_stime;
325 si_type = QEMU_SI_CHLD;
326 break;
327 case TARGET_SIGIO:
328 tinfo->_sifields._sigpoll._band = info->si_band;
329 tinfo->_sifields._sigpoll._fd = info->si_fd;
330 si_type = QEMU_SI_POLL;
331 break;
332 default:
333 /* Assume a sigqueue()/mq_notify()/rt_sigqueueinfo() source. */
334 tinfo->_sifields._rt._pid = info->si_pid;
335 tinfo->_sifields._rt._uid = info->si_uid;
336 /* XXX: potential problem if 64 bit */
337 tinfo->_sifields._rt._sigval.sival_ptr
338 = (abi_ulong)(unsigned long)info->si_value.sival_ptr;
339 si_type = QEMU_SI_RT;
340 break;
342 break;
345 tinfo->si_code = deposit32(si_code, 16, 16, si_type);
348 static void tswap_siginfo(target_siginfo_t *tinfo,
349 const target_siginfo_t *info)
351 int si_type = extract32(info->si_code, 16, 16);
352 int si_code = sextract32(info->si_code, 0, 16);
354 __put_user(info->si_signo, &tinfo->si_signo);
355 __put_user(info->si_errno, &tinfo->si_errno);
356 __put_user(si_code, &tinfo->si_code);
358 /* We can use our internal marker of which fields in the structure
359 * are valid, rather than duplicating the guesswork of
360 * host_to_target_siginfo_noswap() here.
362 switch (si_type) {
363 case QEMU_SI_KILL:
364 __put_user(info->_sifields._kill._pid, &tinfo->_sifields._kill._pid);
365 __put_user(info->_sifields._kill._uid, &tinfo->_sifields._kill._uid);
366 break;
367 case QEMU_SI_TIMER:
368 __put_user(info->_sifields._timer._timer1,
369 &tinfo->_sifields._timer._timer1);
370 __put_user(info->_sifields._timer._timer2,
371 &tinfo->_sifields._timer._timer2);
372 break;
373 case QEMU_SI_POLL:
374 __put_user(info->_sifields._sigpoll._band,
375 &tinfo->_sifields._sigpoll._band);
376 __put_user(info->_sifields._sigpoll._fd,
377 &tinfo->_sifields._sigpoll._fd);
378 break;
379 case QEMU_SI_FAULT:
380 __put_user(info->_sifields._sigfault._addr,
381 &tinfo->_sifields._sigfault._addr);
382 break;
383 case QEMU_SI_CHLD:
384 __put_user(info->_sifields._sigchld._pid,
385 &tinfo->_sifields._sigchld._pid);
386 __put_user(info->_sifields._sigchld._uid,
387 &tinfo->_sifields._sigchld._uid);
388 __put_user(info->_sifields._sigchld._status,
389 &tinfo->_sifields._sigchld._status);
390 __put_user(info->_sifields._sigchld._utime,
391 &tinfo->_sifields._sigchld._utime);
392 __put_user(info->_sifields._sigchld._stime,
393 &tinfo->_sifields._sigchld._stime);
394 break;
395 case QEMU_SI_RT:
396 __put_user(info->_sifields._rt._pid, &tinfo->_sifields._rt._pid);
397 __put_user(info->_sifields._rt._uid, &tinfo->_sifields._rt._uid);
398 __put_user(info->_sifields._rt._sigval.sival_ptr,
399 &tinfo->_sifields._rt._sigval.sival_ptr);
400 break;
401 default:
402 g_assert_not_reached();
406 void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info)
408 target_siginfo_t tgt_tmp;
409 host_to_target_siginfo_noswap(&tgt_tmp, info);
410 tswap_siginfo(tinfo, &tgt_tmp);
413 /* XXX: we support only POSIX RT signals are used. */
414 /* XXX: find a solution for 64 bit (additional malloced data is needed) */
415 void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo)
417 /* This conversion is used only for the rt_sigqueueinfo syscall,
418 * and so we know that the _rt fields are the valid ones.
420 abi_ulong sival_ptr;
422 __get_user(info->si_signo, &tinfo->si_signo);
423 __get_user(info->si_errno, &tinfo->si_errno);
424 __get_user(info->si_code, &tinfo->si_code);
425 __get_user(info->si_pid, &tinfo->_sifields._rt._pid);
426 __get_user(info->si_uid, &tinfo->_sifields._rt._uid);
427 __get_user(sival_ptr, &tinfo->_sifields._rt._sigval.sival_ptr);
428 info->si_value.sival_ptr = (void *)(long)sival_ptr;
431 static int fatal_signal (int sig)
433 switch (sig) {
434 case TARGET_SIGCHLD:
435 case TARGET_SIGURG:
436 case TARGET_SIGWINCH:
437 /* Ignored by default. */
438 return 0;
439 case TARGET_SIGCONT:
440 case TARGET_SIGSTOP:
441 case TARGET_SIGTSTP:
442 case TARGET_SIGTTIN:
443 case TARGET_SIGTTOU:
444 /* Job control signals. */
445 return 0;
446 default:
447 return 1;
451 /* returns 1 if given signal should dump core if not handled */
452 static int core_dump_signal(int sig)
454 switch (sig) {
455 case TARGET_SIGABRT:
456 case TARGET_SIGFPE:
457 case TARGET_SIGILL:
458 case TARGET_SIGQUIT:
459 case TARGET_SIGSEGV:
460 case TARGET_SIGTRAP:
461 case TARGET_SIGBUS:
462 return (1);
463 default:
464 return (0);
468 void signal_init(void)
470 TaskState *ts = (TaskState *)thread_cpu->opaque;
471 struct sigaction act;
472 struct sigaction oact;
473 int i, j;
474 int host_sig;
476 /* generate signal conversion tables */
477 for(i = 1; i < _NSIG; i++) {
478 if (host_to_target_signal_table[i] == 0)
479 host_to_target_signal_table[i] = i;
481 for(i = 1; i < _NSIG; i++) {
482 j = host_to_target_signal_table[i];
483 target_to_host_signal_table[j] = i;
486 /* Set the signal mask from the host mask. */
487 sigprocmask(0, 0, &ts->signal_mask);
489 /* set all host signal handlers. ALL signals are blocked during
490 the handlers to serialize them. */
491 memset(sigact_table, 0, sizeof(sigact_table));
493 sigfillset(&act.sa_mask);
494 act.sa_flags = SA_SIGINFO;
495 act.sa_sigaction = host_signal_handler;
496 for(i = 1; i <= TARGET_NSIG; i++) {
497 host_sig = target_to_host_signal(i);
498 sigaction(host_sig, NULL, &oact);
499 if (oact.sa_sigaction == (void *)SIG_IGN) {
500 sigact_table[i - 1]._sa_handler = TARGET_SIG_IGN;
501 } else if (oact.sa_sigaction == (void *)SIG_DFL) {
502 sigact_table[i - 1]._sa_handler = TARGET_SIG_DFL;
504 /* If there's already a handler installed then something has
505 gone horribly wrong, so don't even try to handle that case. */
506 /* Install some handlers for our own use. We need at least
507 SIGSEGV and SIGBUS, to detect exceptions. We can not just
508 trap all signals because it affects syscall interrupt
509 behavior. But do trap all default-fatal signals. */
510 if (fatal_signal (i))
511 sigaction(host_sig, &act, NULL);
516 /* abort execution with signal */
517 static void QEMU_NORETURN force_sig(int target_sig)
519 CPUState *cpu = thread_cpu;
520 CPUArchState *env = cpu->env_ptr;
521 TaskState *ts = (TaskState *)cpu->opaque;
522 int host_sig, core_dumped = 0;
523 struct sigaction act;
525 host_sig = target_to_host_signal(target_sig);
526 trace_user_force_sig(env, target_sig, host_sig);
527 gdb_signalled(env, target_sig);
529 /* dump core if supported by target binary format */
530 if (core_dump_signal(target_sig) && (ts->bprm->core_dump != NULL)) {
531 stop_all_tasks();
532 core_dumped =
533 ((*ts->bprm->core_dump)(target_sig, env) == 0);
535 if (core_dumped) {
536 /* we already dumped the core of target process, we don't want
537 * a coredump of qemu itself */
538 struct rlimit nodump;
539 getrlimit(RLIMIT_CORE, &nodump);
540 nodump.rlim_cur=0;
541 setrlimit(RLIMIT_CORE, &nodump);
542 (void) fprintf(stderr, "qemu: uncaught target signal %d (%s) - %s\n",
543 target_sig, strsignal(host_sig), "core dumped" );
546 /* The proper exit code for dying from an uncaught signal is
547 * -<signal>. The kernel doesn't allow exit() or _exit() to pass
548 * a negative value. To get the proper exit code we need to
549 * actually die from an uncaught signal. Here the default signal
550 * handler is installed, we send ourself a signal and we wait for
551 * it to arrive. */
552 sigfillset(&act.sa_mask);
553 act.sa_handler = SIG_DFL;
554 act.sa_flags = 0;
555 sigaction(host_sig, &act, NULL);
557 /* For some reason raise(host_sig) doesn't send the signal when
558 * statically linked on x86-64. */
559 kill(getpid(), host_sig);
561 /* Make sure the signal isn't masked (just reuse the mask inside
562 of act) */
563 sigdelset(&act.sa_mask, host_sig);
564 sigsuspend(&act.sa_mask);
566 /* unreachable */
567 abort();
570 /* queue a signal so that it will be send to the virtual CPU as soon
571 as possible */
572 int queue_signal(CPUArchState *env, int sig, target_siginfo_t *info)
574 CPUState *cpu = ENV_GET_CPU(env);
575 TaskState *ts = cpu->opaque;
577 trace_user_queue_signal(env, sig);
579 /* Currently all callers define siginfo structures which
580 * use the _sifields._sigfault union member, so we can
581 * set the type here. If that changes we should push this
582 * out so the si_type is passed in by callers.
584 info->si_code = deposit32(info->si_code, 16, 16, QEMU_SI_FAULT);
586 ts->sync_signal.info = *info;
587 ts->sync_signal.pending = sig;
588 /* signal that a new signal is pending */
589 atomic_set(&ts->signal_pending, 1);
590 return 1; /* indicates that the signal was queued */
593 #ifndef HAVE_SAFE_SYSCALL
594 static inline void rewind_if_in_safe_syscall(void *puc)
596 /* Default version: never rewind */
598 #endif
600 static void host_signal_handler(int host_signum, siginfo_t *info,
601 void *puc)
603 CPUArchState *env = thread_cpu->env_ptr;
604 CPUState *cpu = ENV_GET_CPU(env);
605 TaskState *ts = cpu->opaque;
607 int sig;
608 target_siginfo_t tinfo;
609 ucontext_t *uc = puc;
610 struct emulated_sigtable *k;
612 /* the CPU emulator uses some host signals to detect exceptions,
613 we forward to it some signals */
614 if ((host_signum == SIGSEGV || host_signum == SIGBUS)
615 && info->si_code > 0) {
616 if (cpu_signal_handler(host_signum, info, puc))
617 return;
620 /* get target signal number */
621 sig = host_to_target_signal(host_signum);
622 if (sig < 1 || sig > TARGET_NSIG)
623 return;
624 trace_user_host_signal(env, host_signum, sig);
626 rewind_if_in_safe_syscall(puc);
628 host_to_target_siginfo_noswap(&tinfo, info);
629 k = &ts->sigtab[sig - 1];
630 k->info = tinfo;
631 k->pending = sig;
632 ts->signal_pending = 1;
634 /* Block host signals until target signal handler entered. We
635 * can't block SIGSEGV or SIGBUS while we're executing guest
636 * code in case the guest code provokes one in the window between
637 * now and it getting out to the main loop. Signals will be
638 * unblocked again in process_pending_signals().
640 * WARNING: we cannot use sigfillset() here because the uc_sigmask
641 * field is a kernel sigset_t, which is much smaller than the
642 * libc sigset_t which sigfillset() operates on. Using sigfillset()
643 * would write 0xff bytes off the end of the structure and trash
644 * data on the struct.
645 * We can't use sizeof(uc->uc_sigmask) either, because the libc
646 * headers define the struct field with the wrong (too large) type.
648 memset(&uc->uc_sigmask, 0xff, SIGSET_T_SIZE);
649 sigdelset(&uc->uc_sigmask, SIGSEGV);
650 sigdelset(&uc->uc_sigmask, SIGBUS);
652 /* interrupt the virtual CPU as soon as possible */
653 cpu_exit(thread_cpu);
656 /* do_sigaltstack() returns target values and errnos. */
657 /* compare linux/kernel/signal.c:do_sigaltstack() */
658 abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr, abi_ulong sp)
660 int ret;
661 struct target_sigaltstack oss;
663 /* XXX: test errors */
664 if(uoss_addr)
666 __put_user(target_sigaltstack_used.ss_sp, &oss.ss_sp);
667 __put_user(target_sigaltstack_used.ss_size, &oss.ss_size);
668 __put_user(sas_ss_flags(sp), &oss.ss_flags);
671 if(uss_addr)
673 struct target_sigaltstack *uss;
674 struct target_sigaltstack ss;
675 size_t minstacksize = TARGET_MINSIGSTKSZ;
677 #if defined(TARGET_PPC64)
678 /* ELF V2 for PPC64 has a 4K minimum stack size for signal handlers */
679 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info;
680 if (get_ppc64_abi(image) > 1) {
681 minstacksize = 4096;
683 #endif
685 ret = -TARGET_EFAULT;
686 if (!lock_user_struct(VERIFY_READ, uss, uss_addr, 1)) {
687 goto out;
689 __get_user(ss.ss_sp, &uss->ss_sp);
690 __get_user(ss.ss_size, &uss->ss_size);
691 __get_user(ss.ss_flags, &uss->ss_flags);
692 unlock_user_struct(uss, uss_addr, 0);
694 ret = -TARGET_EPERM;
695 if (on_sig_stack(sp))
696 goto out;
698 ret = -TARGET_EINVAL;
699 if (ss.ss_flags != TARGET_SS_DISABLE
700 && ss.ss_flags != TARGET_SS_ONSTACK
701 && ss.ss_flags != 0)
702 goto out;
704 if (ss.ss_flags == TARGET_SS_DISABLE) {
705 ss.ss_size = 0;
706 ss.ss_sp = 0;
707 } else {
708 ret = -TARGET_ENOMEM;
709 if (ss.ss_size < minstacksize) {
710 goto out;
714 target_sigaltstack_used.ss_sp = ss.ss_sp;
715 target_sigaltstack_used.ss_size = ss.ss_size;
718 if (uoss_addr) {
719 ret = -TARGET_EFAULT;
720 if (copy_to_user(uoss_addr, &oss, sizeof(oss)))
721 goto out;
724 ret = 0;
725 out:
726 return ret;
729 /* do_sigaction() return target values and host errnos */
730 int do_sigaction(int sig, const struct target_sigaction *act,
731 struct target_sigaction *oact)
733 struct target_sigaction *k;
734 struct sigaction act1;
735 int host_sig;
736 int ret = 0;
738 if (sig < 1 || sig > TARGET_NSIG || sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP) {
739 return -TARGET_EINVAL;
742 if (block_signals()) {
743 return -TARGET_ERESTARTSYS;
746 k = &sigact_table[sig - 1];
747 if (oact) {
748 __put_user(k->_sa_handler, &oact->_sa_handler);
749 __put_user(k->sa_flags, &oact->sa_flags);
750 #if !defined(TARGET_MIPS)
751 __put_user(k->sa_restorer, &oact->sa_restorer);
752 #endif
753 /* Not swapped. */
754 oact->sa_mask = k->sa_mask;
756 if (act) {
757 /* FIXME: This is not threadsafe. */
758 __get_user(k->_sa_handler, &act->_sa_handler);
759 __get_user(k->sa_flags, &act->sa_flags);
760 #if !defined(TARGET_MIPS)
761 __get_user(k->sa_restorer, &act->sa_restorer);
762 #endif
763 /* To be swapped in target_to_host_sigset. */
764 k->sa_mask = act->sa_mask;
766 /* we update the host linux signal state */
767 host_sig = target_to_host_signal(sig);
768 if (host_sig != SIGSEGV && host_sig != SIGBUS) {
769 sigfillset(&act1.sa_mask);
770 act1.sa_flags = SA_SIGINFO;
771 if (k->sa_flags & TARGET_SA_RESTART)
772 act1.sa_flags |= SA_RESTART;
773 /* NOTE: it is important to update the host kernel signal
774 ignore state to avoid getting unexpected interrupted
775 syscalls */
776 if (k->_sa_handler == TARGET_SIG_IGN) {
777 act1.sa_sigaction = (void *)SIG_IGN;
778 } else if (k->_sa_handler == TARGET_SIG_DFL) {
779 if (fatal_signal (sig))
780 act1.sa_sigaction = host_signal_handler;
781 else
782 act1.sa_sigaction = (void *)SIG_DFL;
783 } else {
784 act1.sa_sigaction = host_signal_handler;
786 ret = sigaction(host_sig, &act1, NULL);
789 return ret;
792 #if defined(TARGET_I386) && TARGET_ABI_BITS == 32
794 /* from the Linux kernel */
796 struct target_fpreg {
797 uint16_t significand[4];
798 uint16_t exponent;
801 struct target_fpxreg {
802 uint16_t significand[4];
803 uint16_t exponent;
804 uint16_t padding[3];
807 struct target_xmmreg {
808 abi_ulong element[4];
811 struct target_fpstate {
812 /* Regular FPU environment */
813 abi_ulong cw;
814 abi_ulong sw;
815 abi_ulong tag;
816 abi_ulong ipoff;
817 abi_ulong cssel;
818 abi_ulong dataoff;
819 abi_ulong datasel;
820 struct target_fpreg _st[8];
821 uint16_t status;
822 uint16_t magic; /* 0xffff = regular FPU data only */
824 /* FXSR FPU environment */
825 abi_ulong _fxsr_env[6]; /* FXSR FPU env is ignored */
826 abi_ulong mxcsr;
827 abi_ulong reserved;
828 struct target_fpxreg _fxsr_st[8]; /* FXSR FPU reg data is ignored */
829 struct target_xmmreg _xmm[8];
830 abi_ulong padding[56];
833 #define X86_FXSR_MAGIC 0x0000
835 struct target_sigcontext {
836 uint16_t gs, __gsh;
837 uint16_t fs, __fsh;
838 uint16_t es, __esh;
839 uint16_t ds, __dsh;
840 abi_ulong edi;
841 abi_ulong esi;
842 abi_ulong ebp;
843 abi_ulong esp;
844 abi_ulong ebx;
845 abi_ulong edx;
846 abi_ulong ecx;
847 abi_ulong eax;
848 abi_ulong trapno;
849 abi_ulong err;
850 abi_ulong eip;
851 uint16_t cs, __csh;
852 abi_ulong eflags;
853 abi_ulong esp_at_signal;
854 uint16_t ss, __ssh;
855 abi_ulong fpstate; /* pointer */
856 abi_ulong oldmask;
857 abi_ulong cr2;
860 struct target_ucontext {
861 abi_ulong tuc_flags;
862 abi_ulong tuc_link;
863 target_stack_t tuc_stack;
864 struct target_sigcontext tuc_mcontext;
865 target_sigset_t tuc_sigmask; /* mask last for extensibility */
868 struct sigframe
870 abi_ulong pretcode;
871 int sig;
872 struct target_sigcontext sc;
873 struct target_fpstate fpstate;
874 abi_ulong extramask[TARGET_NSIG_WORDS-1];
875 char retcode[8];
878 struct rt_sigframe
880 abi_ulong pretcode;
881 int sig;
882 abi_ulong pinfo;
883 abi_ulong puc;
884 struct target_siginfo info;
885 struct target_ucontext uc;
886 struct target_fpstate fpstate;
887 char retcode[8];
891 * Set up a signal frame.
894 /* XXX: save x87 state */
895 static void setup_sigcontext(struct target_sigcontext *sc,
896 struct target_fpstate *fpstate, CPUX86State *env, abi_ulong mask,
897 abi_ulong fpstate_addr)
899 CPUState *cs = CPU(x86_env_get_cpu(env));
900 uint16_t magic;
902 /* already locked in setup_frame() */
903 __put_user(env->segs[R_GS].selector, (unsigned int *)&sc->gs);
904 __put_user(env->segs[R_FS].selector, (unsigned int *)&sc->fs);
905 __put_user(env->segs[R_ES].selector, (unsigned int *)&sc->es);
906 __put_user(env->segs[R_DS].selector, (unsigned int *)&sc->ds);
907 __put_user(env->regs[R_EDI], &sc->edi);
908 __put_user(env->regs[R_ESI], &sc->esi);
909 __put_user(env->regs[R_EBP], &sc->ebp);
910 __put_user(env->regs[R_ESP], &sc->esp);
911 __put_user(env->regs[R_EBX], &sc->ebx);
912 __put_user(env->regs[R_EDX], &sc->edx);
913 __put_user(env->regs[R_ECX], &sc->ecx);
914 __put_user(env->regs[R_EAX], &sc->eax);
915 __put_user(cs->exception_index, &sc->trapno);
916 __put_user(env->error_code, &sc->err);
917 __put_user(env->eip, &sc->eip);
918 __put_user(env->segs[R_CS].selector, (unsigned int *)&sc->cs);
919 __put_user(env->eflags, &sc->eflags);
920 __put_user(env->regs[R_ESP], &sc->esp_at_signal);
921 __put_user(env->segs[R_SS].selector, (unsigned int *)&sc->ss);
923 cpu_x86_fsave(env, fpstate_addr, 1);
924 fpstate->status = fpstate->sw;
925 magic = 0xffff;
926 __put_user(magic, &fpstate->magic);
927 __put_user(fpstate_addr, &sc->fpstate);
929 /* non-iBCS2 extensions.. */
930 __put_user(mask, &sc->oldmask);
931 __put_user(env->cr[2], &sc->cr2);
935 * Determine which stack to use..
938 static inline abi_ulong
939 get_sigframe(struct target_sigaction *ka, CPUX86State *env, size_t frame_size)
941 unsigned long esp;
943 /* Default to using normal stack */
944 esp = env->regs[R_ESP];
945 /* This is the X/Open sanctioned signal stack switching. */
946 if (ka->sa_flags & TARGET_SA_ONSTACK) {
947 if (sas_ss_flags(esp) == 0) {
948 esp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
950 } else {
952 /* This is the legacy signal stack switching. */
953 if ((env->segs[R_SS].selector & 0xffff) != __USER_DS &&
954 !(ka->sa_flags & TARGET_SA_RESTORER) &&
955 ka->sa_restorer) {
956 esp = (unsigned long) ka->sa_restorer;
959 return (esp - frame_size) & -8ul;
962 /* compare linux/arch/i386/kernel/signal.c:setup_frame() */
963 static void setup_frame(int sig, struct target_sigaction *ka,
964 target_sigset_t *set, CPUX86State *env)
966 abi_ulong frame_addr;
967 struct sigframe *frame;
968 int i;
970 frame_addr = get_sigframe(ka, env, sizeof(*frame));
971 trace_user_setup_frame(env, frame_addr);
973 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
974 goto give_sigsegv;
976 __put_user(sig, &frame->sig);
978 setup_sigcontext(&frame->sc, &frame->fpstate, env, set->sig[0],
979 frame_addr + offsetof(struct sigframe, fpstate));
981 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
982 __put_user(set->sig[i], &frame->extramask[i - 1]);
985 /* Set up to return from userspace. If provided, use a stub
986 already in userspace. */
987 if (ka->sa_flags & TARGET_SA_RESTORER) {
988 __put_user(ka->sa_restorer, &frame->pretcode);
989 } else {
990 uint16_t val16;
991 abi_ulong retcode_addr;
992 retcode_addr = frame_addr + offsetof(struct sigframe, retcode);
993 __put_user(retcode_addr, &frame->pretcode);
994 /* This is popl %eax ; movl $,%eax ; int $0x80 */
995 val16 = 0xb858;
996 __put_user(val16, (uint16_t *)(frame->retcode+0));
997 __put_user(TARGET_NR_sigreturn, (int *)(frame->retcode+2));
998 val16 = 0x80cd;
999 __put_user(val16, (uint16_t *)(frame->retcode+6));
1003 /* Set up registers for signal handler */
1004 env->regs[R_ESP] = frame_addr;
1005 env->eip = ka->_sa_handler;
1007 cpu_x86_load_seg(env, R_DS, __USER_DS);
1008 cpu_x86_load_seg(env, R_ES, __USER_DS);
1009 cpu_x86_load_seg(env, R_SS, __USER_DS);
1010 cpu_x86_load_seg(env, R_CS, __USER_CS);
1011 env->eflags &= ~TF_MASK;
1013 unlock_user_struct(frame, frame_addr, 1);
1015 return;
1017 give_sigsegv:
1018 if (sig == TARGET_SIGSEGV) {
1019 ka->_sa_handler = TARGET_SIG_DFL;
1021 force_sig(TARGET_SIGSEGV /* , current */);
1024 /* compare linux/arch/i386/kernel/signal.c:setup_rt_frame() */
1025 static void setup_rt_frame(int sig, struct target_sigaction *ka,
1026 target_siginfo_t *info,
1027 target_sigset_t *set, CPUX86State *env)
1029 abi_ulong frame_addr, addr;
1030 struct rt_sigframe *frame;
1031 int i;
1033 frame_addr = get_sigframe(ka, env, sizeof(*frame));
1034 trace_user_setup_rt_frame(env, frame_addr);
1036 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
1037 goto give_sigsegv;
1039 __put_user(sig, &frame->sig);
1040 addr = frame_addr + offsetof(struct rt_sigframe, info);
1041 __put_user(addr, &frame->pinfo);
1042 addr = frame_addr + offsetof(struct rt_sigframe, uc);
1043 __put_user(addr, &frame->puc);
1044 tswap_siginfo(&frame->info, info);
1046 /* Create the ucontext. */
1047 __put_user(0, &frame->uc.tuc_flags);
1048 __put_user(0, &frame->uc.tuc_link);
1049 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
1050 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
1051 &frame->uc.tuc_stack.ss_flags);
1052 __put_user(target_sigaltstack_used.ss_size,
1053 &frame->uc.tuc_stack.ss_size);
1054 setup_sigcontext(&frame->uc.tuc_mcontext, &frame->fpstate, env,
1055 set->sig[0], frame_addr + offsetof(struct rt_sigframe, fpstate));
1057 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
1058 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
1061 /* Set up to return from userspace. If provided, use a stub
1062 already in userspace. */
1063 if (ka->sa_flags & TARGET_SA_RESTORER) {
1064 __put_user(ka->sa_restorer, &frame->pretcode);
1065 } else {
1066 uint16_t val16;
1067 addr = frame_addr + offsetof(struct rt_sigframe, retcode);
1068 __put_user(addr, &frame->pretcode);
1069 /* This is movl $,%eax ; int $0x80 */
1070 __put_user(0xb8, (char *)(frame->retcode+0));
1071 __put_user(TARGET_NR_rt_sigreturn, (int *)(frame->retcode+1));
1072 val16 = 0x80cd;
1073 __put_user(val16, (uint16_t *)(frame->retcode+5));
1076 /* Set up registers for signal handler */
1077 env->regs[R_ESP] = frame_addr;
1078 env->eip = ka->_sa_handler;
1080 cpu_x86_load_seg(env, R_DS, __USER_DS);
1081 cpu_x86_load_seg(env, R_ES, __USER_DS);
1082 cpu_x86_load_seg(env, R_SS, __USER_DS);
1083 cpu_x86_load_seg(env, R_CS, __USER_CS);
1084 env->eflags &= ~TF_MASK;
1086 unlock_user_struct(frame, frame_addr, 1);
1088 return;
1090 give_sigsegv:
1091 if (sig == TARGET_SIGSEGV) {
1092 ka->_sa_handler = TARGET_SIG_DFL;
1094 force_sig(TARGET_SIGSEGV /* , current */);
1097 static int
1098 restore_sigcontext(CPUX86State *env, struct target_sigcontext *sc)
1100 unsigned int err = 0;
1101 abi_ulong fpstate_addr;
1102 unsigned int tmpflags;
1104 cpu_x86_load_seg(env, R_GS, tswap16(sc->gs));
1105 cpu_x86_load_seg(env, R_FS, tswap16(sc->fs));
1106 cpu_x86_load_seg(env, R_ES, tswap16(sc->es));
1107 cpu_x86_load_seg(env, R_DS, tswap16(sc->ds));
1109 env->regs[R_EDI] = tswapl(sc->edi);
1110 env->regs[R_ESI] = tswapl(sc->esi);
1111 env->regs[R_EBP] = tswapl(sc->ebp);
1112 env->regs[R_ESP] = tswapl(sc->esp);
1113 env->regs[R_EBX] = tswapl(sc->ebx);
1114 env->regs[R_EDX] = tswapl(sc->edx);
1115 env->regs[R_ECX] = tswapl(sc->ecx);
1116 env->regs[R_EAX] = tswapl(sc->eax);
1117 env->eip = tswapl(sc->eip);
1119 cpu_x86_load_seg(env, R_CS, lduw_p(&sc->cs) | 3);
1120 cpu_x86_load_seg(env, R_SS, lduw_p(&sc->ss) | 3);
1122 tmpflags = tswapl(sc->eflags);
1123 env->eflags = (env->eflags & ~0x40DD5) | (tmpflags & 0x40DD5);
1124 // regs->orig_eax = -1; /* disable syscall checks */
1126 fpstate_addr = tswapl(sc->fpstate);
1127 if (fpstate_addr != 0) {
1128 if (!access_ok(VERIFY_READ, fpstate_addr,
1129 sizeof(struct target_fpstate)))
1130 goto badframe;
1131 cpu_x86_frstor(env, fpstate_addr, 1);
1134 return err;
1135 badframe:
1136 return 1;
1139 long do_sigreturn(CPUX86State *env)
1141 struct sigframe *frame;
1142 abi_ulong frame_addr = env->regs[R_ESP] - 8;
1143 target_sigset_t target_set;
1144 sigset_t set;
1145 int i;
1147 trace_user_do_sigreturn(env, frame_addr);
1148 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
1149 goto badframe;
1150 /* set blocked signals */
1151 __get_user(target_set.sig[0], &frame->sc.oldmask);
1152 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1153 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
1156 target_to_host_sigset_internal(&set, &target_set);
1157 set_sigmask(&set);
1159 /* restore registers */
1160 if (restore_sigcontext(env, &frame->sc))
1161 goto badframe;
1162 unlock_user_struct(frame, frame_addr, 0);
1163 return -TARGET_QEMU_ESIGRETURN;
1165 badframe:
1166 unlock_user_struct(frame, frame_addr, 0);
1167 force_sig(TARGET_SIGSEGV);
1168 return 0;
1171 long do_rt_sigreturn(CPUX86State *env)
1173 abi_ulong frame_addr;
1174 struct rt_sigframe *frame;
1175 sigset_t set;
1177 frame_addr = env->regs[R_ESP] - 4;
1178 trace_user_do_rt_sigreturn(env, frame_addr);
1179 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
1180 goto badframe;
1181 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
1182 set_sigmask(&set);
1184 if (restore_sigcontext(env, &frame->uc.tuc_mcontext)) {
1185 goto badframe;
1188 if (do_sigaltstack(frame_addr + offsetof(struct rt_sigframe, uc.tuc_stack), 0,
1189 get_sp_from_cpustate(env)) == -EFAULT) {
1190 goto badframe;
1193 unlock_user_struct(frame, frame_addr, 0);
1194 return -TARGET_QEMU_ESIGRETURN;
1196 badframe:
1197 unlock_user_struct(frame, frame_addr, 0);
1198 force_sig(TARGET_SIGSEGV);
1199 return 0;
1202 #elif defined(TARGET_AARCH64)
1204 struct target_sigcontext {
1205 uint64_t fault_address;
1206 /* AArch64 registers */
1207 uint64_t regs[31];
1208 uint64_t sp;
1209 uint64_t pc;
1210 uint64_t pstate;
1211 /* 4K reserved for FP/SIMD state and future expansion */
1212 char __reserved[4096] __attribute__((__aligned__(16)));
1215 struct target_ucontext {
1216 abi_ulong tuc_flags;
1217 abi_ulong tuc_link;
1218 target_stack_t tuc_stack;
1219 target_sigset_t tuc_sigmask;
1220 /* glibc uses a 1024-bit sigset_t */
1221 char __unused[1024 / 8 - sizeof(target_sigset_t)];
1222 /* last for future expansion */
1223 struct target_sigcontext tuc_mcontext;
1227 * Header to be used at the beginning of structures extending the user
1228 * context. Such structures must be placed after the rt_sigframe on the stack
1229 * and be 16-byte aligned. The last structure must be a dummy one with the
1230 * magic and size set to 0.
1232 struct target_aarch64_ctx {
1233 uint32_t magic;
1234 uint32_t size;
1237 #define TARGET_FPSIMD_MAGIC 0x46508001
1239 struct target_fpsimd_context {
1240 struct target_aarch64_ctx head;
1241 uint32_t fpsr;
1242 uint32_t fpcr;
1243 uint64_t vregs[32 * 2]; /* really uint128_t vregs[32] */
1247 * Auxiliary context saved in the sigcontext.__reserved array. Not exported to
1248 * user space as it will change with the addition of new context. User space
1249 * should check the magic/size information.
1251 struct target_aux_context {
1252 struct target_fpsimd_context fpsimd;
1253 /* additional context to be added before "end" */
1254 struct target_aarch64_ctx end;
1257 struct target_rt_sigframe {
1258 struct target_siginfo info;
1259 struct target_ucontext uc;
1260 uint64_t fp;
1261 uint64_t lr;
1262 uint32_t tramp[2];
1265 static int target_setup_sigframe(struct target_rt_sigframe *sf,
1266 CPUARMState *env, target_sigset_t *set)
1268 int i;
1269 struct target_aux_context *aux =
1270 (struct target_aux_context *)sf->uc.tuc_mcontext.__reserved;
1272 /* set up the stack frame for unwinding */
1273 __put_user(env->xregs[29], &sf->fp);
1274 __put_user(env->xregs[30], &sf->lr);
1276 for (i = 0; i < 31; i++) {
1277 __put_user(env->xregs[i], &sf->uc.tuc_mcontext.regs[i]);
1279 __put_user(env->xregs[31], &sf->uc.tuc_mcontext.sp);
1280 __put_user(env->pc, &sf->uc.tuc_mcontext.pc);
1281 __put_user(pstate_read(env), &sf->uc.tuc_mcontext.pstate);
1283 __put_user(env->exception.vaddress, &sf->uc.tuc_mcontext.fault_address);
1285 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
1286 __put_user(set->sig[i], &sf->uc.tuc_sigmask.sig[i]);
1289 for (i = 0; i < 32; i++) {
1290 #ifdef TARGET_WORDS_BIGENDIAN
1291 __put_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2 + 1]);
1292 __put_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2]);
1293 #else
1294 __put_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2]);
1295 __put_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2 + 1]);
1296 #endif
1298 __put_user(vfp_get_fpsr(env), &aux->fpsimd.fpsr);
1299 __put_user(vfp_get_fpcr(env), &aux->fpsimd.fpcr);
1300 __put_user(TARGET_FPSIMD_MAGIC, &aux->fpsimd.head.magic);
1301 __put_user(sizeof(struct target_fpsimd_context),
1302 &aux->fpsimd.head.size);
1304 /* set the "end" magic */
1305 __put_user(0, &aux->end.magic);
1306 __put_user(0, &aux->end.size);
1308 return 0;
1311 static int target_restore_sigframe(CPUARMState *env,
1312 struct target_rt_sigframe *sf)
1314 sigset_t set;
1315 int i;
1316 struct target_aux_context *aux =
1317 (struct target_aux_context *)sf->uc.tuc_mcontext.__reserved;
1318 uint32_t magic, size, fpsr, fpcr;
1319 uint64_t pstate;
1321 target_to_host_sigset(&set, &sf->uc.tuc_sigmask);
1322 set_sigmask(&set);
1324 for (i = 0; i < 31; i++) {
1325 __get_user(env->xregs[i], &sf->uc.tuc_mcontext.regs[i]);
1328 __get_user(env->xregs[31], &sf->uc.tuc_mcontext.sp);
1329 __get_user(env->pc, &sf->uc.tuc_mcontext.pc);
1330 __get_user(pstate, &sf->uc.tuc_mcontext.pstate);
1331 pstate_write(env, pstate);
1333 __get_user(magic, &aux->fpsimd.head.magic);
1334 __get_user(size, &aux->fpsimd.head.size);
1336 if (magic != TARGET_FPSIMD_MAGIC
1337 || size != sizeof(struct target_fpsimd_context)) {
1338 return 1;
1341 for (i = 0; i < 32; i++) {
1342 #ifdef TARGET_WORDS_BIGENDIAN
1343 __get_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2 + 1]);
1344 __get_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2]);
1345 #else
1346 __get_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2]);
1347 __get_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2 + 1]);
1348 #endif
1350 __get_user(fpsr, &aux->fpsimd.fpsr);
1351 vfp_set_fpsr(env, fpsr);
1352 __get_user(fpcr, &aux->fpsimd.fpcr);
1353 vfp_set_fpcr(env, fpcr);
1355 return 0;
1358 static abi_ulong get_sigframe(struct target_sigaction *ka, CPUARMState *env)
1360 abi_ulong sp;
1362 sp = env->xregs[31];
1365 * This is the X/Open sanctioned signal stack switching.
1367 if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) {
1368 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
1371 sp = (sp - sizeof(struct target_rt_sigframe)) & ~15;
1373 return sp;
1376 static void target_setup_frame(int usig, struct target_sigaction *ka,
1377 target_siginfo_t *info, target_sigset_t *set,
1378 CPUARMState *env)
1380 struct target_rt_sigframe *frame;
1381 abi_ulong frame_addr, return_addr;
1383 frame_addr = get_sigframe(ka, env);
1384 trace_user_setup_frame(env, frame_addr);
1385 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
1386 goto give_sigsegv;
1389 __put_user(0, &frame->uc.tuc_flags);
1390 __put_user(0, &frame->uc.tuc_link);
1392 __put_user(target_sigaltstack_used.ss_sp,
1393 &frame->uc.tuc_stack.ss_sp);
1394 __put_user(sas_ss_flags(env->xregs[31]),
1395 &frame->uc.tuc_stack.ss_flags);
1396 __put_user(target_sigaltstack_used.ss_size,
1397 &frame->uc.tuc_stack.ss_size);
1398 target_setup_sigframe(frame, env, set);
1399 if (ka->sa_flags & TARGET_SA_RESTORER) {
1400 return_addr = ka->sa_restorer;
1401 } else {
1402 /* mov x8,#__NR_rt_sigreturn; svc #0 */
1403 __put_user(0xd2801168, &frame->tramp[0]);
1404 __put_user(0xd4000001, &frame->tramp[1]);
1405 return_addr = frame_addr + offsetof(struct target_rt_sigframe, tramp);
1407 env->xregs[0] = usig;
1408 env->xregs[31] = frame_addr;
1409 env->xregs[29] = env->xregs[31] + offsetof(struct target_rt_sigframe, fp);
1410 env->pc = ka->_sa_handler;
1411 env->xregs[30] = return_addr;
1412 if (info) {
1413 tswap_siginfo(&frame->info, info);
1414 env->xregs[1] = frame_addr + offsetof(struct target_rt_sigframe, info);
1415 env->xregs[2] = frame_addr + offsetof(struct target_rt_sigframe, uc);
1418 unlock_user_struct(frame, frame_addr, 1);
1419 return;
1421 give_sigsegv:
1422 unlock_user_struct(frame, frame_addr, 1);
1423 force_sig(TARGET_SIGSEGV);
1426 static void setup_rt_frame(int sig, struct target_sigaction *ka,
1427 target_siginfo_t *info, target_sigset_t *set,
1428 CPUARMState *env)
1430 target_setup_frame(sig, ka, info, set, env);
1433 static void setup_frame(int sig, struct target_sigaction *ka,
1434 target_sigset_t *set, CPUARMState *env)
1436 target_setup_frame(sig, ka, 0, set, env);
1439 long do_rt_sigreturn(CPUARMState *env)
1441 struct target_rt_sigframe *frame = NULL;
1442 abi_ulong frame_addr = env->xregs[31];
1444 trace_user_do_rt_sigreturn(env, frame_addr);
1445 if (frame_addr & 15) {
1446 goto badframe;
1449 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
1450 goto badframe;
1453 if (target_restore_sigframe(env, frame)) {
1454 goto badframe;
1457 if (do_sigaltstack(frame_addr +
1458 offsetof(struct target_rt_sigframe, uc.tuc_stack),
1459 0, get_sp_from_cpustate(env)) == -EFAULT) {
1460 goto badframe;
1463 unlock_user_struct(frame, frame_addr, 0);
1464 return -TARGET_QEMU_ESIGRETURN;
1466 badframe:
1467 unlock_user_struct(frame, frame_addr, 0);
1468 force_sig(TARGET_SIGSEGV);
1469 return 0;
1472 long do_sigreturn(CPUARMState *env)
1474 return do_rt_sigreturn(env);
1477 #elif defined(TARGET_ARM)
1479 struct target_sigcontext {
1480 abi_ulong trap_no;
1481 abi_ulong error_code;
1482 abi_ulong oldmask;
1483 abi_ulong arm_r0;
1484 abi_ulong arm_r1;
1485 abi_ulong arm_r2;
1486 abi_ulong arm_r3;
1487 abi_ulong arm_r4;
1488 abi_ulong arm_r5;
1489 abi_ulong arm_r6;
1490 abi_ulong arm_r7;
1491 abi_ulong arm_r8;
1492 abi_ulong arm_r9;
1493 abi_ulong arm_r10;
1494 abi_ulong arm_fp;
1495 abi_ulong arm_ip;
1496 abi_ulong arm_sp;
1497 abi_ulong arm_lr;
1498 abi_ulong arm_pc;
1499 abi_ulong arm_cpsr;
1500 abi_ulong fault_address;
1503 struct target_ucontext_v1 {
1504 abi_ulong tuc_flags;
1505 abi_ulong tuc_link;
1506 target_stack_t tuc_stack;
1507 struct target_sigcontext tuc_mcontext;
1508 target_sigset_t tuc_sigmask; /* mask last for extensibility */
1511 struct target_ucontext_v2 {
1512 abi_ulong tuc_flags;
1513 abi_ulong tuc_link;
1514 target_stack_t tuc_stack;
1515 struct target_sigcontext tuc_mcontext;
1516 target_sigset_t tuc_sigmask; /* mask last for extensibility */
1517 char __unused[128 - sizeof(target_sigset_t)];
1518 abi_ulong tuc_regspace[128] __attribute__((__aligned__(8)));
1521 struct target_user_vfp {
1522 uint64_t fpregs[32];
1523 abi_ulong fpscr;
1526 struct target_user_vfp_exc {
1527 abi_ulong fpexc;
1528 abi_ulong fpinst;
1529 abi_ulong fpinst2;
1532 struct target_vfp_sigframe {
1533 abi_ulong magic;
1534 abi_ulong size;
1535 struct target_user_vfp ufp;
1536 struct target_user_vfp_exc ufp_exc;
1537 } __attribute__((__aligned__(8)));
1539 struct target_iwmmxt_sigframe {
1540 abi_ulong magic;
1541 abi_ulong size;
1542 uint64_t regs[16];
1543 /* Note that not all the coprocessor control registers are stored here */
1544 uint32_t wcssf;
1545 uint32_t wcasf;
1546 uint32_t wcgr0;
1547 uint32_t wcgr1;
1548 uint32_t wcgr2;
1549 uint32_t wcgr3;
1550 } __attribute__((__aligned__(8)));
1552 #define TARGET_VFP_MAGIC 0x56465001
1553 #define TARGET_IWMMXT_MAGIC 0x12ef842a
1555 struct sigframe_v1
1557 struct target_sigcontext sc;
1558 abi_ulong extramask[TARGET_NSIG_WORDS-1];
1559 abi_ulong retcode;
1562 struct sigframe_v2
1564 struct target_ucontext_v2 uc;
1565 abi_ulong retcode;
1568 struct rt_sigframe_v1
1570 abi_ulong pinfo;
1571 abi_ulong puc;
1572 struct target_siginfo info;
1573 struct target_ucontext_v1 uc;
1574 abi_ulong retcode;
1577 struct rt_sigframe_v2
1579 struct target_siginfo info;
1580 struct target_ucontext_v2 uc;
1581 abi_ulong retcode;
1584 #define TARGET_CONFIG_CPU_32 1
1587 * For ARM syscalls, we encode the syscall number into the instruction.
1589 #define SWI_SYS_SIGRETURN (0xef000000|(TARGET_NR_sigreturn + ARM_SYSCALL_BASE))
1590 #define SWI_SYS_RT_SIGRETURN (0xef000000|(TARGET_NR_rt_sigreturn + ARM_SYSCALL_BASE))
1593 * For Thumb syscalls, we pass the syscall number via r7. We therefore
1594 * need two 16-bit instructions.
1596 #define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_sigreturn))
1597 #define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_rt_sigreturn))
1599 static const abi_ulong retcodes[4] = {
1600 SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN,
1601 SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN
1605 static inline int valid_user_regs(CPUARMState *regs)
1607 return 1;
1610 static void
1611 setup_sigcontext(struct target_sigcontext *sc, /*struct _fpstate *fpstate,*/
1612 CPUARMState *env, abi_ulong mask)
1614 __put_user(env->regs[0], &sc->arm_r0);
1615 __put_user(env->regs[1], &sc->arm_r1);
1616 __put_user(env->regs[2], &sc->arm_r2);
1617 __put_user(env->regs[3], &sc->arm_r3);
1618 __put_user(env->regs[4], &sc->arm_r4);
1619 __put_user(env->regs[5], &sc->arm_r5);
1620 __put_user(env->regs[6], &sc->arm_r6);
1621 __put_user(env->regs[7], &sc->arm_r7);
1622 __put_user(env->regs[8], &sc->arm_r8);
1623 __put_user(env->regs[9], &sc->arm_r9);
1624 __put_user(env->regs[10], &sc->arm_r10);
1625 __put_user(env->regs[11], &sc->arm_fp);
1626 __put_user(env->regs[12], &sc->arm_ip);
1627 __put_user(env->regs[13], &sc->arm_sp);
1628 __put_user(env->regs[14], &sc->arm_lr);
1629 __put_user(env->regs[15], &sc->arm_pc);
1630 #ifdef TARGET_CONFIG_CPU_32
1631 __put_user(cpsr_read(env), &sc->arm_cpsr);
1632 #endif
1634 __put_user(/* current->thread.trap_no */ 0, &sc->trap_no);
1635 __put_user(/* current->thread.error_code */ 0, &sc->error_code);
1636 __put_user(/* current->thread.address */ 0, &sc->fault_address);
1637 __put_user(mask, &sc->oldmask);
1640 static inline abi_ulong
1641 get_sigframe(struct target_sigaction *ka, CPUARMState *regs, int framesize)
1643 unsigned long sp = regs->regs[13];
1646 * This is the X/Open sanctioned signal stack switching.
1648 if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) {
1649 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
1652 * ATPCS B01 mandates 8-byte alignment
1654 return (sp - framesize) & ~7;
1657 static void
1658 setup_return(CPUARMState *env, struct target_sigaction *ka,
1659 abi_ulong *rc, abi_ulong frame_addr, int usig, abi_ulong rc_addr)
1661 abi_ulong handler = ka->_sa_handler;
1662 abi_ulong retcode;
1663 int thumb = handler & 1;
1664 uint32_t cpsr = cpsr_read(env);
1666 cpsr &= ~CPSR_IT;
1667 if (thumb) {
1668 cpsr |= CPSR_T;
1669 } else {
1670 cpsr &= ~CPSR_T;
1673 if (ka->sa_flags & TARGET_SA_RESTORER) {
1674 retcode = ka->sa_restorer;
1675 } else {
1676 unsigned int idx = thumb;
1678 if (ka->sa_flags & TARGET_SA_SIGINFO) {
1679 idx += 2;
1682 __put_user(retcodes[idx], rc);
1684 retcode = rc_addr + thumb;
1687 env->regs[0] = usig;
1688 env->regs[13] = frame_addr;
1689 env->regs[14] = retcode;
1690 env->regs[15] = handler & (thumb ? ~1 : ~3);
1691 cpsr_write(env, cpsr, CPSR_IT | CPSR_T, CPSRWriteByInstr);
1694 static abi_ulong *setup_sigframe_v2_vfp(abi_ulong *regspace, CPUARMState *env)
1696 int i;
1697 struct target_vfp_sigframe *vfpframe;
1698 vfpframe = (struct target_vfp_sigframe *)regspace;
1699 __put_user(TARGET_VFP_MAGIC, &vfpframe->magic);
1700 __put_user(sizeof(*vfpframe), &vfpframe->size);
1701 for (i = 0; i < 32; i++) {
1702 __put_user(float64_val(env->vfp.regs[i]), &vfpframe->ufp.fpregs[i]);
1704 __put_user(vfp_get_fpscr(env), &vfpframe->ufp.fpscr);
1705 __put_user(env->vfp.xregs[ARM_VFP_FPEXC], &vfpframe->ufp_exc.fpexc);
1706 __put_user(env->vfp.xregs[ARM_VFP_FPINST], &vfpframe->ufp_exc.fpinst);
1707 __put_user(env->vfp.xregs[ARM_VFP_FPINST2], &vfpframe->ufp_exc.fpinst2);
1708 return (abi_ulong*)(vfpframe+1);
1711 static abi_ulong *setup_sigframe_v2_iwmmxt(abi_ulong *regspace,
1712 CPUARMState *env)
1714 int i;
1715 struct target_iwmmxt_sigframe *iwmmxtframe;
1716 iwmmxtframe = (struct target_iwmmxt_sigframe *)regspace;
1717 __put_user(TARGET_IWMMXT_MAGIC, &iwmmxtframe->magic);
1718 __put_user(sizeof(*iwmmxtframe), &iwmmxtframe->size);
1719 for (i = 0; i < 16; i++) {
1720 __put_user(env->iwmmxt.regs[i], &iwmmxtframe->regs[i]);
1722 __put_user(env->vfp.xregs[ARM_IWMMXT_wCSSF], &iwmmxtframe->wcssf);
1723 __put_user(env->vfp.xregs[ARM_IWMMXT_wCASF], &iwmmxtframe->wcssf);
1724 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR0], &iwmmxtframe->wcgr0);
1725 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR1], &iwmmxtframe->wcgr1);
1726 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR2], &iwmmxtframe->wcgr2);
1727 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR3], &iwmmxtframe->wcgr3);
1728 return (abi_ulong*)(iwmmxtframe+1);
1731 static void setup_sigframe_v2(struct target_ucontext_v2 *uc,
1732 target_sigset_t *set, CPUARMState *env)
1734 struct target_sigaltstack stack;
1735 int i;
1736 abi_ulong *regspace;
1738 /* Clear all the bits of the ucontext we don't use. */
1739 memset(uc, 0, offsetof(struct target_ucontext_v2, tuc_mcontext));
1741 memset(&stack, 0, sizeof(stack));
1742 __put_user(target_sigaltstack_used.ss_sp, &stack.ss_sp);
1743 __put_user(target_sigaltstack_used.ss_size, &stack.ss_size);
1744 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &stack.ss_flags);
1745 memcpy(&uc->tuc_stack, &stack, sizeof(stack));
1747 setup_sigcontext(&uc->tuc_mcontext, env, set->sig[0]);
1748 /* Save coprocessor signal frame. */
1749 regspace = uc->tuc_regspace;
1750 if (arm_feature(env, ARM_FEATURE_VFP)) {
1751 regspace = setup_sigframe_v2_vfp(regspace, env);
1753 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
1754 regspace = setup_sigframe_v2_iwmmxt(regspace, env);
1757 /* Write terminating magic word */
1758 __put_user(0, regspace);
1760 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
1761 __put_user(set->sig[i], &uc->tuc_sigmask.sig[i]);
1765 /* compare linux/arch/arm/kernel/signal.c:setup_frame() */
1766 static void setup_frame_v1(int usig, struct target_sigaction *ka,
1767 target_sigset_t *set, CPUARMState *regs)
1769 struct sigframe_v1 *frame;
1770 abi_ulong frame_addr = get_sigframe(ka, regs, sizeof(*frame));
1771 int i;
1773 trace_user_setup_frame(regs, frame_addr);
1774 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
1775 return;
1778 setup_sigcontext(&frame->sc, regs, set->sig[0]);
1780 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1781 __put_user(set->sig[i], &frame->extramask[i - 1]);
1784 setup_return(regs, ka, &frame->retcode, frame_addr, usig,
1785 frame_addr + offsetof(struct sigframe_v1, retcode));
1787 unlock_user_struct(frame, frame_addr, 1);
1790 static void setup_frame_v2(int usig, struct target_sigaction *ka,
1791 target_sigset_t *set, CPUARMState *regs)
1793 struct sigframe_v2 *frame;
1794 abi_ulong frame_addr = get_sigframe(ka, regs, sizeof(*frame));
1796 trace_user_setup_frame(regs, frame_addr);
1797 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
1798 return;
1801 setup_sigframe_v2(&frame->uc, set, regs);
1803 setup_return(regs, ka, &frame->retcode, frame_addr, usig,
1804 frame_addr + offsetof(struct sigframe_v2, retcode));
1806 unlock_user_struct(frame, frame_addr, 1);
1809 static void setup_frame(int usig, struct target_sigaction *ka,
1810 target_sigset_t *set, CPUARMState *regs)
1812 if (get_osversion() >= 0x020612) {
1813 setup_frame_v2(usig, ka, set, regs);
1814 } else {
1815 setup_frame_v1(usig, ka, set, regs);
1819 /* compare linux/arch/arm/kernel/signal.c:setup_rt_frame() */
1820 static void setup_rt_frame_v1(int usig, struct target_sigaction *ka,
1821 target_siginfo_t *info,
1822 target_sigset_t *set, CPUARMState *env)
1824 struct rt_sigframe_v1 *frame;
1825 abi_ulong frame_addr = get_sigframe(ka, env, sizeof(*frame));
1826 struct target_sigaltstack stack;
1827 int i;
1828 abi_ulong info_addr, uc_addr;
1830 trace_user_setup_rt_frame(env, frame_addr);
1831 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
1832 return /* 1 */;
1835 info_addr = frame_addr + offsetof(struct rt_sigframe_v1, info);
1836 __put_user(info_addr, &frame->pinfo);
1837 uc_addr = frame_addr + offsetof(struct rt_sigframe_v1, uc);
1838 __put_user(uc_addr, &frame->puc);
1839 tswap_siginfo(&frame->info, info);
1841 /* Clear all the bits of the ucontext we don't use. */
1842 memset(&frame->uc, 0, offsetof(struct target_ucontext_v1, tuc_mcontext));
1844 memset(&stack, 0, sizeof(stack));
1845 __put_user(target_sigaltstack_used.ss_sp, &stack.ss_sp);
1846 __put_user(target_sigaltstack_used.ss_size, &stack.ss_size);
1847 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &stack.ss_flags);
1848 memcpy(&frame->uc.tuc_stack, &stack, sizeof(stack));
1850 setup_sigcontext(&frame->uc.tuc_mcontext, env, set->sig[0]);
1851 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
1852 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
1855 setup_return(env, ka, &frame->retcode, frame_addr, usig,
1856 frame_addr + offsetof(struct rt_sigframe_v1, retcode));
1858 env->regs[1] = info_addr;
1859 env->regs[2] = uc_addr;
1861 unlock_user_struct(frame, frame_addr, 1);
1864 static void setup_rt_frame_v2(int usig, struct target_sigaction *ka,
1865 target_siginfo_t *info,
1866 target_sigset_t *set, CPUARMState *env)
1868 struct rt_sigframe_v2 *frame;
1869 abi_ulong frame_addr = get_sigframe(ka, env, sizeof(*frame));
1870 abi_ulong info_addr, uc_addr;
1872 trace_user_setup_rt_frame(env, frame_addr);
1873 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
1874 return /* 1 */;
1877 info_addr = frame_addr + offsetof(struct rt_sigframe_v2, info);
1878 uc_addr = frame_addr + offsetof(struct rt_sigframe_v2, uc);
1879 tswap_siginfo(&frame->info, info);
1881 setup_sigframe_v2(&frame->uc, set, env);
1883 setup_return(env, ka, &frame->retcode, frame_addr, usig,
1884 frame_addr + offsetof(struct rt_sigframe_v2, retcode));
1886 env->regs[1] = info_addr;
1887 env->regs[2] = uc_addr;
1889 unlock_user_struct(frame, frame_addr, 1);
1892 static void setup_rt_frame(int usig, struct target_sigaction *ka,
1893 target_siginfo_t *info,
1894 target_sigset_t *set, CPUARMState *env)
1896 if (get_osversion() >= 0x020612) {
1897 setup_rt_frame_v2(usig, ka, info, set, env);
1898 } else {
1899 setup_rt_frame_v1(usig, ka, info, set, env);
1903 static int
1904 restore_sigcontext(CPUARMState *env, struct target_sigcontext *sc)
1906 int err = 0;
1907 uint32_t cpsr;
1909 __get_user(env->regs[0], &sc->arm_r0);
1910 __get_user(env->regs[1], &sc->arm_r1);
1911 __get_user(env->regs[2], &sc->arm_r2);
1912 __get_user(env->regs[3], &sc->arm_r3);
1913 __get_user(env->regs[4], &sc->arm_r4);
1914 __get_user(env->regs[5], &sc->arm_r5);
1915 __get_user(env->regs[6], &sc->arm_r6);
1916 __get_user(env->regs[7], &sc->arm_r7);
1917 __get_user(env->regs[8], &sc->arm_r8);
1918 __get_user(env->regs[9], &sc->arm_r9);
1919 __get_user(env->regs[10], &sc->arm_r10);
1920 __get_user(env->regs[11], &sc->arm_fp);
1921 __get_user(env->regs[12], &sc->arm_ip);
1922 __get_user(env->regs[13], &sc->arm_sp);
1923 __get_user(env->regs[14], &sc->arm_lr);
1924 __get_user(env->regs[15], &sc->arm_pc);
1925 #ifdef TARGET_CONFIG_CPU_32
1926 __get_user(cpsr, &sc->arm_cpsr);
1927 cpsr_write(env, cpsr, CPSR_USER | CPSR_EXEC, CPSRWriteByInstr);
1928 #endif
1930 err |= !valid_user_regs(env);
1932 return err;
1935 static long do_sigreturn_v1(CPUARMState *env)
1937 abi_ulong frame_addr;
1938 struct sigframe_v1 *frame = NULL;
1939 target_sigset_t set;
1940 sigset_t host_set;
1941 int i;
1944 * Since we stacked the signal on a 64-bit boundary,
1945 * then 'sp' should be word aligned here. If it's
1946 * not, then the user is trying to mess with us.
1948 frame_addr = env->regs[13];
1949 trace_user_do_sigreturn(env, frame_addr);
1950 if (frame_addr & 7) {
1951 goto badframe;
1954 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
1955 goto badframe;
1958 __get_user(set.sig[0], &frame->sc.oldmask);
1959 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1960 __get_user(set.sig[i], &frame->extramask[i - 1]);
1963 target_to_host_sigset_internal(&host_set, &set);
1964 set_sigmask(&host_set);
1966 if (restore_sigcontext(env, &frame->sc)) {
1967 goto badframe;
1970 #if 0
1971 /* Send SIGTRAP if we're single-stepping */
1972 if (ptrace_cancel_bpt(current))
1973 send_sig(SIGTRAP, current, 1);
1974 #endif
1975 unlock_user_struct(frame, frame_addr, 0);
1976 return -TARGET_QEMU_ESIGRETURN;
1978 badframe:
1979 force_sig(TARGET_SIGSEGV /* , current */);
1980 return 0;
1983 static abi_ulong *restore_sigframe_v2_vfp(CPUARMState *env, abi_ulong *regspace)
1985 int i;
1986 abi_ulong magic, sz;
1987 uint32_t fpscr, fpexc;
1988 struct target_vfp_sigframe *vfpframe;
1989 vfpframe = (struct target_vfp_sigframe *)regspace;
1991 __get_user(magic, &vfpframe->magic);
1992 __get_user(sz, &vfpframe->size);
1993 if (magic != TARGET_VFP_MAGIC || sz != sizeof(*vfpframe)) {
1994 return 0;
1996 for (i = 0; i < 32; i++) {
1997 __get_user(float64_val(env->vfp.regs[i]), &vfpframe->ufp.fpregs[i]);
1999 __get_user(fpscr, &vfpframe->ufp.fpscr);
2000 vfp_set_fpscr(env, fpscr);
2001 __get_user(fpexc, &vfpframe->ufp_exc.fpexc);
2002 /* Sanitise FPEXC: ensure VFP is enabled, FPINST2 is invalid
2003 * and the exception flag is cleared
2005 fpexc |= (1 << 30);
2006 fpexc &= ~((1 << 31) | (1 << 28));
2007 env->vfp.xregs[ARM_VFP_FPEXC] = fpexc;
2008 __get_user(env->vfp.xregs[ARM_VFP_FPINST], &vfpframe->ufp_exc.fpinst);
2009 __get_user(env->vfp.xregs[ARM_VFP_FPINST2], &vfpframe->ufp_exc.fpinst2);
2010 return (abi_ulong*)(vfpframe + 1);
2013 static abi_ulong *restore_sigframe_v2_iwmmxt(CPUARMState *env,
2014 abi_ulong *regspace)
2016 int i;
2017 abi_ulong magic, sz;
2018 struct target_iwmmxt_sigframe *iwmmxtframe;
2019 iwmmxtframe = (struct target_iwmmxt_sigframe *)regspace;
2021 __get_user(magic, &iwmmxtframe->magic);
2022 __get_user(sz, &iwmmxtframe->size);
2023 if (magic != TARGET_IWMMXT_MAGIC || sz != sizeof(*iwmmxtframe)) {
2024 return 0;
2026 for (i = 0; i < 16; i++) {
2027 __get_user(env->iwmmxt.regs[i], &iwmmxtframe->regs[i]);
2029 __get_user(env->vfp.xregs[ARM_IWMMXT_wCSSF], &iwmmxtframe->wcssf);
2030 __get_user(env->vfp.xregs[ARM_IWMMXT_wCASF], &iwmmxtframe->wcssf);
2031 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR0], &iwmmxtframe->wcgr0);
2032 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR1], &iwmmxtframe->wcgr1);
2033 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR2], &iwmmxtframe->wcgr2);
2034 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR3], &iwmmxtframe->wcgr3);
2035 return (abi_ulong*)(iwmmxtframe + 1);
2038 static int do_sigframe_return_v2(CPUARMState *env, target_ulong frame_addr,
2039 struct target_ucontext_v2 *uc)
2041 sigset_t host_set;
2042 abi_ulong *regspace;
2044 target_to_host_sigset(&host_set, &uc->tuc_sigmask);
2045 set_sigmask(&host_set);
2047 if (restore_sigcontext(env, &uc->tuc_mcontext))
2048 return 1;
2050 /* Restore coprocessor signal frame */
2051 regspace = uc->tuc_regspace;
2052 if (arm_feature(env, ARM_FEATURE_VFP)) {
2053 regspace = restore_sigframe_v2_vfp(env, regspace);
2054 if (!regspace) {
2055 return 1;
2058 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
2059 regspace = restore_sigframe_v2_iwmmxt(env, regspace);
2060 if (!regspace) {
2061 return 1;
2065 if (do_sigaltstack(frame_addr + offsetof(struct target_ucontext_v2, tuc_stack), 0, get_sp_from_cpustate(env)) == -EFAULT)
2066 return 1;
2068 #if 0
2069 /* Send SIGTRAP if we're single-stepping */
2070 if (ptrace_cancel_bpt(current))
2071 send_sig(SIGTRAP, current, 1);
2072 #endif
2074 return 0;
2077 static long do_sigreturn_v2(CPUARMState *env)
2079 abi_ulong frame_addr;
2080 struct sigframe_v2 *frame = NULL;
2083 * Since we stacked the signal on a 64-bit boundary,
2084 * then 'sp' should be word aligned here. If it's
2085 * not, then the user is trying to mess with us.
2087 frame_addr = env->regs[13];
2088 trace_user_do_sigreturn(env, frame_addr);
2089 if (frame_addr & 7) {
2090 goto badframe;
2093 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
2094 goto badframe;
2097 if (do_sigframe_return_v2(env, frame_addr, &frame->uc)) {
2098 goto badframe;
2101 unlock_user_struct(frame, frame_addr, 0);
2102 return -TARGET_QEMU_ESIGRETURN;
2104 badframe:
2105 unlock_user_struct(frame, frame_addr, 0);
2106 force_sig(TARGET_SIGSEGV /* , current */);
2107 return 0;
2110 long do_sigreturn(CPUARMState *env)
2112 if (get_osversion() >= 0x020612) {
2113 return do_sigreturn_v2(env);
2114 } else {
2115 return do_sigreturn_v1(env);
2119 static long do_rt_sigreturn_v1(CPUARMState *env)
2121 abi_ulong frame_addr;
2122 struct rt_sigframe_v1 *frame = NULL;
2123 sigset_t host_set;
2126 * Since we stacked the signal on a 64-bit boundary,
2127 * then 'sp' should be word aligned here. If it's
2128 * not, then the user is trying to mess with us.
2130 frame_addr = env->regs[13];
2131 trace_user_do_rt_sigreturn(env, frame_addr);
2132 if (frame_addr & 7) {
2133 goto badframe;
2136 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
2137 goto badframe;
2140 target_to_host_sigset(&host_set, &frame->uc.tuc_sigmask);
2141 set_sigmask(&host_set);
2143 if (restore_sigcontext(env, &frame->uc.tuc_mcontext)) {
2144 goto badframe;
2147 if (do_sigaltstack(frame_addr + offsetof(struct rt_sigframe_v1, uc.tuc_stack), 0, get_sp_from_cpustate(env)) == -EFAULT)
2148 goto badframe;
2150 #if 0
2151 /* Send SIGTRAP if we're single-stepping */
2152 if (ptrace_cancel_bpt(current))
2153 send_sig(SIGTRAP, current, 1);
2154 #endif
2155 unlock_user_struct(frame, frame_addr, 0);
2156 return -TARGET_QEMU_ESIGRETURN;
2158 badframe:
2159 unlock_user_struct(frame, frame_addr, 0);
2160 force_sig(TARGET_SIGSEGV /* , current */);
2161 return 0;
2164 static long do_rt_sigreturn_v2(CPUARMState *env)
2166 abi_ulong frame_addr;
2167 struct rt_sigframe_v2 *frame = NULL;
2170 * Since we stacked the signal on a 64-bit boundary,
2171 * then 'sp' should be word aligned here. If it's
2172 * not, then the user is trying to mess with us.
2174 frame_addr = env->regs[13];
2175 trace_user_do_rt_sigreturn(env, frame_addr);
2176 if (frame_addr & 7) {
2177 goto badframe;
2180 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
2181 goto badframe;
2184 if (do_sigframe_return_v2(env, frame_addr, &frame->uc)) {
2185 goto badframe;
2188 unlock_user_struct(frame, frame_addr, 0);
2189 return -TARGET_QEMU_ESIGRETURN;
2191 badframe:
2192 unlock_user_struct(frame, frame_addr, 0);
2193 force_sig(TARGET_SIGSEGV /* , current */);
2194 return 0;
2197 long do_rt_sigreturn(CPUARMState *env)
2199 if (get_osversion() >= 0x020612) {
2200 return do_rt_sigreturn_v2(env);
2201 } else {
2202 return do_rt_sigreturn_v1(env);
2206 #elif defined(TARGET_SPARC)
2208 #define __SUNOS_MAXWIN 31
2210 /* This is what SunOS does, so shall I. */
2211 struct target_sigcontext {
2212 abi_ulong sigc_onstack; /* state to restore */
2214 abi_ulong sigc_mask; /* sigmask to restore */
2215 abi_ulong sigc_sp; /* stack pointer */
2216 abi_ulong sigc_pc; /* program counter */
2217 abi_ulong sigc_npc; /* next program counter */
2218 abi_ulong sigc_psr; /* for condition codes etc */
2219 abi_ulong sigc_g1; /* User uses these two registers */
2220 abi_ulong sigc_o0; /* within the trampoline code. */
2222 /* Now comes information regarding the users window set
2223 * at the time of the signal.
2225 abi_ulong sigc_oswins; /* outstanding windows */
2227 /* stack ptrs for each regwin buf */
2228 char *sigc_spbuf[__SUNOS_MAXWIN];
2230 /* Windows to restore after signal */
2231 struct {
2232 abi_ulong locals[8];
2233 abi_ulong ins[8];
2234 } sigc_wbuf[__SUNOS_MAXWIN];
2236 /* A Sparc stack frame */
2237 struct sparc_stackf {
2238 abi_ulong locals[8];
2239 abi_ulong ins[8];
2240 /* It's simpler to treat fp and callers_pc as elements of ins[]
2241 * since we never need to access them ourselves.
2243 char *structptr;
2244 abi_ulong xargs[6];
2245 abi_ulong xxargs[1];
2248 typedef struct {
2249 struct {
2250 abi_ulong psr;
2251 abi_ulong pc;
2252 abi_ulong npc;
2253 abi_ulong y;
2254 abi_ulong u_regs[16]; /* globals and ins */
2255 } si_regs;
2256 int si_mask;
2257 } __siginfo_t;
2259 typedef struct {
2260 abi_ulong si_float_regs[32];
2261 unsigned long si_fsr;
2262 unsigned long si_fpqdepth;
2263 struct {
2264 unsigned long *insn_addr;
2265 unsigned long insn;
2266 } si_fpqueue [16];
2267 } qemu_siginfo_fpu_t;
2270 struct target_signal_frame {
2271 struct sparc_stackf ss;
2272 __siginfo_t info;
2273 abi_ulong fpu_save;
2274 abi_ulong insns[2] __attribute__ ((aligned (8)));
2275 abi_ulong extramask[TARGET_NSIG_WORDS - 1];
2276 abi_ulong extra_size; /* Should be 0 */
2277 qemu_siginfo_fpu_t fpu_state;
2279 struct target_rt_signal_frame {
2280 struct sparc_stackf ss;
2281 siginfo_t info;
2282 abi_ulong regs[20];
2283 sigset_t mask;
2284 abi_ulong fpu_save;
2285 unsigned int insns[2];
2286 stack_t stack;
2287 unsigned int extra_size; /* Should be 0 */
2288 qemu_siginfo_fpu_t fpu_state;
2291 #define UREG_O0 16
2292 #define UREG_O6 22
2293 #define UREG_I0 0
2294 #define UREG_I1 1
2295 #define UREG_I2 2
2296 #define UREG_I3 3
2297 #define UREG_I4 4
2298 #define UREG_I5 5
2299 #define UREG_I6 6
2300 #define UREG_I7 7
2301 #define UREG_L0 8
2302 #define UREG_FP UREG_I6
2303 #define UREG_SP UREG_O6
2305 static inline abi_ulong get_sigframe(struct target_sigaction *sa,
2306 CPUSPARCState *env,
2307 unsigned long framesize)
2309 abi_ulong sp;
2311 sp = env->regwptr[UREG_FP];
2313 /* This is the X/Open sanctioned signal stack switching. */
2314 if (sa->sa_flags & TARGET_SA_ONSTACK) {
2315 if (!on_sig_stack(sp)
2316 && !((target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size) & 7)) {
2317 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
2320 return sp - framesize;
2323 static int
2324 setup___siginfo(__siginfo_t *si, CPUSPARCState *env, abi_ulong mask)
2326 int err = 0, i;
2328 __put_user(env->psr, &si->si_regs.psr);
2329 __put_user(env->pc, &si->si_regs.pc);
2330 __put_user(env->npc, &si->si_regs.npc);
2331 __put_user(env->y, &si->si_regs.y);
2332 for (i=0; i < 8; i++) {
2333 __put_user(env->gregs[i], &si->si_regs.u_regs[i]);
2335 for (i=0; i < 8; i++) {
2336 __put_user(env->regwptr[UREG_I0 + i], &si->si_regs.u_regs[i+8]);
2338 __put_user(mask, &si->si_mask);
2339 return err;
2342 #if 0
2343 static int
2344 setup_sigcontext(struct target_sigcontext *sc, /*struct _fpstate *fpstate,*/
2345 CPUSPARCState *env, unsigned long mask)
2347 int err = 0;
2349 __put_user(mask, &sc->sigc_mask);
2350 __put_user(env->regwptr[UREG_SP], &sc->sigc_sp);
2351 __put_user(env->pc, &sc->sigc_pc);
2352 __put_user(env->npc, &sc->sigc_npc);
2353 __put_user(env->psr, &sc->sigc_psr);
2354 __put_user(env->gregs[1], &sc->sigc_g1);
2355 __put_user(env->regwptr[UREG_O0], &sc->sigc_o0);
2357 return err;
2359 #endif
2360 #define NF_ALIGNEDSZ (((sizeof(struct target_signal_frame) + 7) & (~7)))
2362 static void setup_frame(int sig, struct target_sigaction *ka,
2363 target_sigset_t *set, CPUSPARCState *env)
2365 abi_ulong sf_addr;
2366 struct target_signal_frame *sf;
2367 int sigframe_size, err, i;
2369 /* 1. Make sure everything is clean */
2370 //synchronize_user_stack();
2372 sigframe_size = NF_ALIGNEDSZ;
2373 sf_addr = get_sigframe(ka, env, sigframe_size);
2374 trace_user_setup_frame(env, sf_addr);
2376 sf = lock_user(VERIFY_WRITE, sf_addr,
2377 sizeof(struct target_signal_frame), 0);
2378 if (!sf) {
2379 goto sigsegv;
2381 #if 0
2382 if (invalid_frame_pointer(sf, sigframe_size))
2383 goto sigill_and_return;
2384 #endif
2385 /* 2. Save the current process state */
2386 err = setup___siginfo(&sf->info, env, set->sig[0]);
2387 __put_user(0, &sf->extra_size);
2389 //save_fpu_state(regs, &sf->fpu_state);
2390 //__put_user(&sf->fpu_state, &sf->fpu_save);
2392 __put_user(set->sig[0], &sf->info.si_mask);
2393 for (i = 0; i < TARGET_NSIG_WORDS - 1; i++) {
2394 __put_user(set->sig[i + 1], &sf->extramask[i]);
2397 for (i = 0; i < 8; i++) {
2398 __put_user(env->regwptr[i + UREG_L0], &sf->ss.locals[i]);
2400 for (i = 0; i < 8; i++) {
2401 __put_user(env->regwptr[i + UREG_I0], &sf->ss.ins[i]);
2403 if (err)
2404 goto sigsegv;
2406 /* 3. signal handler back-trampoline and parameters */
2407 env->regwptr[UREG_FP] = sf_addr;
2408 env->regwptr[UREG_I0] = sig;
2409 env->regwptr[UREG_I1] = sf_addr +
2410 offsetof(struct target_signal_frame, info);
2411 env->regwptr[UREG_I2] = sf_addr +
2412 offsetof(struct target_signal_frame, info);
2414 /* 4. signal handler */
2415 env->pc = ka->_sa_handler;
2416 env->npc = (env->pc + 4);
2417 /* 5. return to kernel instructions */
2418 if (ka->sa_restorer) {
2419 env->regwptr[UREG_I7] = ka->sa_restorer;
2420 } else {
2421 uint32_t val32;
2423 env->regwptr[UREG_I7] = sf_addr +
2424 offsetof(struct target_signal_frame, insns) - 2 * 4;
2426 /* mov __NR_sigreturn, %g1 */
2427 val32 = 0x821020d8;
2428 __put_user(val32, &sf->insns[0]);
2430 /* t 0x10 */
2431 val32 = 0x91d02010;
2432 __put_user(val32, &sf->insns[1]);
2433 if (err)
2434 goto sigsegv;
2436 /* Flush instruction space. */
2437 // flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0]));
2438 // tb_flush(env);
2440 unlock_user(sf, sf_addr, sizeof(struct target_signal_frame));
2441 return;
2442 #if 0
2443 sigill_and_return:
2444 force_sig(TARGET_SIGILL);
2445 #endif
2446 sigsegv:
2447 unlock_user(sf, sf_addr, sizeof(struct target_signal_frame));
2448 force_sig(TARGET_SIGSEGV);
2451 static void setup_rt_frame(int sig, struct target_sigaction *ka,
2452 target_siginfo_t *info,
2453 target_sigset_t *set, CPUSPARCState *env)
2455 fprintf(stderr, "setup_rt_frame: not implemented\n");
2458 long do_sigreturn(CPUSPARCState *env)
2460 abi_ulong sf_addr;
2461 struct target_signal_frame *sf;
2462 uint32_t up_psr, pc, npc;
2463 target_sigset_t set;
2464 sigset_t host_set;
2465 int err=0, i;
2467 sf_addr = env->regwptr[UREG_FP];
2468 trace_user_do_sigreturn(env, sf_addr);
2469 if (!lock_user_struct(VERIFY_READ, sf, sf_addr, 1)) {
2470 goto segv_and_exit;
2473 /* 1. Make sure we are not getting garbage from the user */
2475 if (sf_addr & 3)
2476 goto segv_and_exit;
2478 __get_user(pc, &sf->info.si_regs.pc);
2479 __get_user(npc, &sf->info.si_regs.npc);
2481 if ((pc | npc) & 3) {
2482 goto segv_and_exit;
2485 /* 2. Restore the state */
2486 __get_user(up_psr, &sf->info.si_regs.psr);
2488 /* User can only change condition codes and FPU enabling in %psr. */
2489 env->psr = (up_psr & (PSR_ICC /* | PSR_EF */))
2490 | (env->psr & ~(PSR_ICC /* | PSR_EF */));
2492 env->pc = pc;
2493 env->npc = npc;
2494 __get_user(env->y, &sf->info.si_regs.y);
2495 for (i=0; i < 8; i++) {
2496 __get_user(env->gregs[i], &sf->info.si_regs.u_regs[i]);
2498 for (i=0; i < 8; i++) {
2499 __get_user(env->regwptr[i + UREG_I0], &sf->info.si_regs.u_regs[i+8]);
2502 /* FIXME: implement FPU save/restore:
2503 * __get_user(fpu_save, &sf->fpu_save);
2504 * if (fpu_save)
2505 * err |= restore_fpu_state(env, fpu_save);
2508 /* This is pretty much atomic, no amount locking would prevent
2509 * the races which exist anyways.
2511 __get_user(set.sig[0], &sf->info.si_mask);
2512 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
2513 __get_user(set.sig[i], &sf->extramask[i - 1]);
2516 target_to_host_sigset_internal(&host_set, &set);
2517 set_sigmask(&host_set);
2519 if (err) {
2520 goto segv_and_exit;
2522 unlock_user_struct(sf, sf_addr, 0);
2523 return -TARGET_QEMU_ESIGRETURN;
2525 segv_and_exit:
2526 unlock_user_struct(sf, sf_addr, 0);
2527 force_sig(TARGET_SIGSEGV);
2530 long do_rt_sigreturn(CPUSPARCState *env)
2532 trace_user_do_rt_sigreturn(env, 0);
2533 fprintf(stderr, "do_rt_sigreturn: not implemented\n");
2534 return -TARGET_ENOSYS;
2537 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
2538 #define MC_TSTATE 0
2539 #define MC_PC 1
2540 #define MC_NPC 2
2541 #define MC_Y 3
2542 #define MC_G1 4
2543 #define MC_G2 5
2544 #define MC_G3 6
2545 #define MC_G4 7
2546 #define MC_G5 8
2547 #define MC_G6 9
2548 #define MC_G7 10
2549 #define MC_O0 11
2550 #define MC_O1 12
2551 #define MC_O2 13
2552 #define MC_O3 14
2553 #define MC_O4 15
2554 #define MC_O5 16
2555 #define MC_O6 17
2556 #define MC_O7 18
2557 #define MC_NGREG 19
2559 typedef abi_ulong target_mc_greg_t;
2560 typedef target_mc_greg_t target_mc_gregset_t[MC_NGREG];
2562 struct target_mc_fq {
2563 abi_ulong *mcfq_addr;
2564 uint32_t mcfq_insn;
2567 struct target_mc_fpu {
2568 union {
2569 uint32_t sregs[32];
2570 uint64_t dregs[32];
2571 //uint128_t qregs[16];
2572 } mcfpu_fregs;
2573 abi_ulong mcfpu_fsr;
2574 abi_ulong mcfpu_fprs;
2575 abi_ulong mcfpu_gsr;
2576 struct target_mc_fq *mcfpu_fq;
2577 unsigned char mcfpu_qcnt;
2578 unsigned char mcfpu_qentsz;
2579 unsigned char mcfpu_enab;
2581 typedef struct target_mc_fpu target_mc_fpu_t;
2583 typedef struct {
2584 target_mc_gregset_t mc_gregs;
2585 target_mc_greg_t mc_fp;
2586 target_mc_greg_t mc_i7;
2587 target_mc_fpu_t mc_fpregs;
2588 } target_mcontext_t;
2590 struct target_ucontext {
2591 struct target_ucontext *tuc_link;
2592 abi_ulong tuc_flags;
2593 target_sigset_t tuc_sigmask;
2594 target_mcontext_t tuc_mcontext;
2597 /* A V9 register window */
2598 struct target_reg_window {
2599 abi_ulong locals[8];
2600 abi_ulong ins[8];
2603 #define TARGET_STACK_BIAS 2047
2605 /* {set, get}context() needed for 64-bit SparcLinux userland. */
2606 void sparc64_set_context(CPUSPARCState *env)
2608 abi_ulong ucp_addr;
2609 struct target_ucontext *ucp;
2610 target_mc_gregset_t *grp;
2611 abi_ulong pc, npc, tstate;
2612 abi_ulong fp, i7, w_addr;
2613 unsigned int i;
2615 ucp_addr = env->regwptr[UREG_I0];
2616 if (!lock_user_struct(VERIFY_READ, ucp, ucp_addr, 1)) {
2617 goto do_sigsegv;
2619 grp = &ucp->tuc_mcontext.mc_gregs;
2620 __get_user(pc, &((*grp)[MC_PC]));
2621 __get_user(npc, &((*grp)[MC_NPC]));
2622 if ((pc | npc) & 3) {
2623 goto do_sigsegv;
2625 if (env->regwptr[UREG_I1]) {
2626 target_sigset_t target_set;
2627 sigset_t set;
2629 if (TARGET_NSIG_WORDS == 1) {
2630 __get_user(target_set.sig[0], &ucp->tuc_sigmask.sig[0]);
2631 } else {
2632 abi_ulong *src, *dst;
2633 src = ucp->tuc_sigmask.sig;
2634 dst = target_set.sig;
2635 for (i = 0; i < TARGET_NSIG_WORDS; i++, dst++, src++) {
2636 __get_user(*dst, src);
2639 target_to_host_sigset_internal(&set, &target_set);
2640 set_sigmask(&set);
2642 env->pc = pc;
2643 env->npc = npc;
2644 __get_user(env->y, &((*grp)[MC_Y]));
2645 __get_user(tstate, &((*grp)[MC_TSTATE]));
2646 env->asi = (tstate >> 24) & 0xff;
2647 cpu_put_ccr(env, tstate >> 32);
2648 cpu_put_cwp64(env, tstate & 0x1f);
2649 __get_user(env->gregs[1], (&(*grp)[MC_G1]));
2650 __get_user(env->gregs[2], (&(*grp)[MC_G2]));
2651 __get_user(env->gregs[3], (&(*grp)[MC_G3]));
2652 __get_user(env->gregs[4], (&(*grp)[MC_G4]));
2653 __get_user(env->gregs[5], (&(*grp)[MC_G5]));
2654 __get_user(env->gregs[6], (&(*grp)[MC_G6]));
2655 __get_user(env->gregs[7], (&(*grp)[MC_G7]));
2656 __get_user(env->regwptr[UREG_I0], (&(*grp)[MC_O0]));
2657 __get_user(env->regwptr[UREG_I1], (&(*grp)[MC_O1]));
2658 __get_user(env->regwptr[UREG_I2], (&(*grp)[MC_O2]));
2659 __get_user(env->regwptr[UREG_I3], (&(*grp)[MC_O3]));
2660 __get_user(env->regwptr[UREG_I4], (&(*grp)[MC_O4]));
2661 __get_user(env->regwptr[UREG_I5], (&(*grp)[MC_O5]));
2662 __get_user(env->regwptr[UREG_I6], (&(*grp)[MC_O6]));
2663 __get_user(env->regwptr[UREG_I7], (&(*grp)[MC_O7]));
2665 __get_user(fp, &(ucp->tuc_mcontext.mc_fp));
2666 __get_user(i7, &(ucp->tuc_mcontext.mc_i7));
2668 w_addr = TARGET_STACK_BIAS+env->regwptr[UREG_I6];
2669 if (put_user(fp, w_addr + offsetof(struct target_reg_window, ins[6]),
2670 abi_ulong) != 0) {
2671 goto do_sigsegv;
2673 if (put_user(i7, w_addr + offsetof(struct target_reg_window, ins[7]),
2674 abi_ulong) != 0) {
2675 goto do_sigsegv;
2677 /* FIXME this does not match how the kernel handles the FPU in
2678 * its sparc64_set_context implementation. In particular the FPU
2679 * is only restored if fenab is non-zero in:
2680 * __get_user(fenab, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_enab));
2682 __get_user(env->fprs, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fprs));
2684 uint32_t *src = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs;
2685 for (i = 0; i < 64; i++, src++) {
2686 if (i & 1) {
2687 __get_user(env->fpr[i/2].l.lower, src);
2688 } else {
2689 __get_user(env->fpr[i/2].l.upper, src);
2693 __get_user(env->fsr,
2694 &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fsr));
2695 __get_user(env->gsr,
2696 &(ucp->tuc_mcontext.mc_fpregs.mcfpu_gsr));
2697 unlock_user_struct(ucp, ucp_addr, 0);
2698 return;
2699 do_sigsegv:
2700 unlock_user_struct(ucp, ucp_addr, 0);
2701 force_sig(TARGET_SIGSEGV);
2704 void sparc64_get_context(CPUSPARCState *env)
2706 abi_ulong ucp_addr;
2707 struct target_ucontext *ucp;
2708 target_mc_gregset_t *grp;
2709 target_mcontext_t *mcp;
2710 abi_ulong fp, i7, w_addr;
2711 int err;
2712 unsigned int i;
2713 target_sigset_t target_set;
2714 sigset_t set;
2716 ucp_addr = env->regwptr[UREG_I0];
2717 if (!lock_user_struct(VERIFY_WRITE, ucp, ucp_addr, 0)) {
2718 goto do_sigsegv;
2721 mcp = &ucp->tuc_mcontext;
2722 grp = &mcp->mc_gregs;
2724 /* Skip over the trap instruction, first. */
2725 env->pc = env->npc;
2726 env->npc += 4;
2728 /* If we're only reading the signal mask then do_sigprocmask()
2729 * is guaranteed not to fail, which is important because we don't
2730 * have any way to signal a failure or restart this operation since
2731 * this is not a normal syscall.
2733 err = do_sigprocmask(0, NULL, &set);
2734 assert(err == 0);
2735 host_to_target_sigset_internal(&target_set, &set);
2736 if (TARGET_NSIG_WORDS == 1) {
2737 __put_user(target_set.sig[0],
2738 (abi_ulong *)&ucp->tuc_sigmask);
2739 } else {
2740 abi_ulong *src, *dst;
2741 src = target_set.sig;
2742 dst = ucp->tuc_sigmask.sig;
2743 for (i = 0; i < TARGET_NSIG_WORDS; i++, dst++, src++) {
2744 __put_user(*src, dst);
2746 if (err)
2747 goto do_sigsegv;
2750 /* XXX: tstate must be saved properly */
2751 // __put_user(env->tstate, &((*grp)[MC_TSTATE]));
2752 __put_user(env->pc, &((*grp)[MC_PC]));
2753 __put_user(env->npc, &((*grp)[MC_NPC]));
2754 __put_user(env->y, &((*grp)[MC_Y]));
2755 __put_user(env->gregs[1], &((*grp)[MC_G1]));
2756 __put_user(env->gregs[2], &((*grp)[MC_G2]));
2757 __put_user(env->gregs[3], &((*grp)[MC_G3]));
2758 __put_user(env->gregs[4], &((*grp)[MC_G4]));
2759 __put_user(env->gregs[5], &((*grp)[MC_G5]));
2760 __put_user(env->gregs[6], &((*grp)[MC_G6]));
2761 __put_user(env->gregs[7], &((*grp)[MC_G7]));
2762 __put_user(env->regwptr[UREG_I0], &((*grp)[MC_O0]));
2763 __put_user(env->regwptr[UREG_I1], &((*grp)[MC_O1]));
2764 __put_user(env->regwptr[UREG_I2], &((*grp)[MC_O2]));
2765 __put_user(env->regwptr[UREG_I3], &((*grp)[MC_O3]));
2766 __put_user(env->regwptr[UREG_I4], &((*grp)[MC_O4]));
2767 __put_user(env->regwptr[UREG_I5], &((*grp)[MC_O5]));
2768 __put_user(env->regwptr[UREG_I6], &((*grp)[MC_O6]));
2769 __put_user(env->regwptr[UREG_I7], &((*grp)[MC_O7]));
2771 w_addr = TARGET_STACK_BIAS+env->regwptr[UREG_I6];
2772 fp = i7 = 0;
2773 if (get_user(fp, w_addr + offsetof(struct target_reg_window, ins[6]),
2774 abi_ulong) != 0) {
2775 goto do_sigsegv;
2777 if (get_user(i7, w_addr + offsetof(struct target_reg_window, ins[7]),
2778 abi_ulong) != 0) {
2779 goto do_sigsegv;
2781 __put_user(fp, &(mcp->mc_fp));
2782 __put_user(i7, &(mcp->mc_i7));
2785 uint32_t *dst = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs;
2786 for (i = 0; i < 64; i++, dst++) {
2787 if (i & 1) {
2788 __put_user(env->fpr[i/2].l.lower, dst);
2789 } else {
2790 __put_user(env->fpr[i/2].l.upper, dst);
2794 __put_user(env->fsr, &(mcp->mc_fpregs.mcfpu_fsr));
2795 __put_user(env->gsr, &(mcp->mc_fpregs.mcfpu_gsr));
2796 __put_user(env->fprs, &(mcp->mc_fpregs.mcfpu_fprs));
2798 if (err)
2799 goto do_sigsegv;
2800 unlock_user_struct(ucp, ucp_addr, 1);
2801 return;
2802 do_sigsegv:
2803 unlock_user_struct(ucp, ucp_addr, 1);
2804 force_sig(TARGET_SIGSEGV);
2806 #endif
2807 #elif defined(TARGET_MIPS) || defined(TARGET_MIPS64)
2809 # if defined(TARGET_ABI_MIPSO32)
2810 struct target_sigcontext {
2811 uint32_t sc_regmask; /* Unused */
2812 uint32_t sc_status;
2813 uint64_t sc_pc;
2814 uint64_t sc_regs[32];
2815 uint64_t sc_fpregs[32];
2816 uint32_t sc_ownedfp; /* Unused */
2817 uint32_t sc_fpc_csr;
2818 uint32_t sc_fpc_eir; /* Unused */
2819 uint32_t sc_used_math;
2820 uint32_t sc_dsp; /* dsp status, was sc_ssflags */
2821 uint32_t pad0;
2822 uint64_t sc_mdhi;
2823 uint64_t sc_mdlo;
2824 target_ulong sc_hi1; /* Was sc_cause */
2825 target_ulong sc_lo1; /* Was sc_badvaddr */
2826 target_ulong sc_hi2; /* Was sc_sigset[4] */
2827 target_ulong sc_lo2;
2828 target_ulong sc_hi3;
2829 target_ulong sc_lo3;
2831 # else /* N32 || N64 */
2832 struct target_sigcontext {
2833 uint64_t sc_regs[32];
2834 uint64_t sc_fpregs[32];
2835 uint64_t sc_mdhi;
2836 uint64_t sc_hi1;
2837 uint64_t sc_hi2;
2838 uint64_t sc_hi3;
2839 uint64_t sc_mdlo;
2840 uint64_t sc_lo1;
2841 uint64_t sc_lo2;
2842 uint64_t sc_lo3;
2843 uint64_t sc_pc;
2844 uint32_t sc_fpc_csr;
2845 uint32_t sc_used_math;
2846 uint32_t sc_dsp;
2847 uint32_t sc_reserved;
2849 # endif /* O32 */
2851 struct sigframe {
2852 uint32_t sf_ass[4]; /* argument save space for o32 */
2853 uint32_t sf_code[2]; /* signal trampoline */
2854 struct target_sigcontext sf_sc;
2855 target_sigset_t sf_mask;
2858 struct target_ucontext {
2859 target_ulong tuc_flags;
2860 target_ulong tuc_link;
2861 target_stack_t tuc_stack;
2862 target_ulong pad0;
2863 struct target_sigcontext tuc_mcontext;
2864 target_sigset_t tuc_sigmask;
2867 struct target_rt_sigframe {
2868 uint32_t rs_ass[4]; /* argument save space for o32 */
2869 uint32_t rs_code[2]; /* signal trampoline */
2870 struct target_siginfo rs_info;
2871 struct target_ucontext rs_uc;
2874 /* Install trampoline to jump back from signal handler */
2875 static inline int install_sigtramp(unsigned int *tramp, unsigned int syscall)
2877 int err = 0;
2880 * Set up the return code ...
2882 * li v0, __NR__foo_sigreturn
2883 * syscall
2886 __put_user(0x24020000 + syscall, tramp + 0);
2887 __put_user(0x0000000c , tramp + 1);
2888 return err;
2891 static inline void setup_sigcontext(CPUMIPSState *regs,
2892 struct target_sigcontext *sc)
2894 int i;
2896 __put_user(exception_resume_pc(regs), &sc->sc_pc);
2897 regs->hflags &= ~MIPS_HFLAG_BMASK;
2899 __put_user(0, &sc->sc_regs[0]);
2900 for (i = 1; i < 32; ++i) {
2901 __put_user(regs->active_tc.gpr[i], &sc->sc_regs[i]);
2904 __put_user(regs->active_tc.HI[0], &sc->sc_mdhi);
2905 __put_user(regs->active_tc.LO[0], &sc->sc_mdlo);
2907 /* Rather than checking for dsp existence, always copy. The storage
2908 would just be garbage otherwise. */
2909 __put_user(regs->active_tc.HI[1], &sc->sc_hi1);
2910 __put_user(regs->active_tc.HI[2], &sc->sc_hi2);
2911 __put_user(regs->active_tc.HI[3], &sc->sc_hi3);
2912 __put_user(regs->active_tc.LO[1], &sc->sc_lo1);
2913 __put_user(regs->active_tc.LO[2], &sc->sc_lo2);
2914 __put_user(regs->active_tc.LO[3], &sc->sc_lo3);
2916 uint32_t dsp = cpu_rddsp(0x3ff, regs);
2917 __put_user(dsp, &sc->sc_dsp);
2920 __put_user(1, &sc->sc_used_math);
2922 for (i = 0; i < 32; ++i) {
2923 __put_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i]);
2927 static inline void
2928 restore_sigcontext(CPUMIPSState *regs, struct target_sigcontext *sc)
2930 int i;
2932 __get_user(regs->CP0_EPC, &sc->sc_pc);
2934 __get_user(regs->active_tc.HI[0], &sc->sc_mdhi);
2935 __get_user(regs->active_tc.LO[0], &sc->sc_mdlo);
2937 for (i = 1; i < 32; ++i) {
2938 __get_user(regs->active_tc.gpr[i], &sc->sc_regs[i]);
2941 __get_user(regs->active_tc.HI[1], &sc->sc_hi1);
2942 __get_user(regs->active_tc.HI[2], &sc->sc_hi2);
2943 __get_user(regs->active_tc.HI[3], &sc->sc_hi3);
2944 __get_user(regs->active_tc.LO[1], &sc->sc_lo1);
2945 __get_user(regs->active_tc.LO[2], &sc->sc_lo2);
2946 __get_user(regs->active_tc.LO[3], &sc->sc_lo3);
2948 uint32_t dsp;
2949 __get_user(dsp, &sc->sc_dsp);
2950 cpu_wrdsp(dsp, 0x3ff, regs);
2953 for (i = 0; i < 32; ++i) {
2954 __get_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i]);
2959 * Determine which stack to use..
2961 static inline abi_ulong
2962 get_sigframe(struct target_sigaction *ka, CPUMIPSState *regs, size_t frame_size)
2964 unsigned long sp;
2966 /* Default to using normal stack */
2967 sp = regs->active_tc.gpr[29];
2970 * FPU emulator may have its own trampoline active just
2971 * above the user stack, 16-bytes before the next lowest
2972 * 16 byte boundary. Try to avoid trashing it.
2974 sp -= 32;
2976 /* This is the X/Open sanctioned signal stack switching. */
2977 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags (sp) == 0)) {
2978 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
2981 return (sp - frame_size) & ~7;
2984 static void mips_set_hflags_isa_mode_from_pc(CPUMIPSState *env)
2986 if (env->insn_flags & (ASE_MIPS16 | ASE_MICROMIPS)) {
2987 env->hflags &= ~MIPS_HFLAG_M16;
2988 env->hflags |= (env->active_tc.PC & 1) << MIPS_HFLAG_M16_SHIFT;
2989 env->active_tc.PC &= ~(target_ulong) 1;
2993 # if defined(TARGET_ABI_MIPSO32)
2994 /* compare linux/arch/mips/kernel/signal.c:setup_frame() */
2995 static void setup_frame(int sig, struct target_sigaction * ka,
2996 target_sigset_t *set, CPUMIPSState *regs)
2998 struct sigframe *frame;
2999 abi_ulong frame_addr;
3000 int i;
3002 frame_addr = get_sigframe(ka, regs, sizeof(*frame));
3003 trace_user_setup_frame(regs, frame_addr);
3004 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3005 goto give_sigsegv;
3008 install_sigtramp(frame->sf_code, TARGET_NR_sigreturn);
3010 setup_sigcontext(regs, &frame->sf_sc);
3012 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
3013 __put_user(set->sig[i], &frame->sf_mask.sig[i]);
3017 * Arguments to signal handler:
3019 * a0 = signal number
3020 * a1 = 0 (should be cause)
3021 * a2 = pointer to struct sigcontext
3023 * $25 and PC point to the signal handler, $29 points to the
3024 * struct sigframe.
3026 regs->active_tc.gpr[ 4] = sig;
3027 regs->active_tc.gpr[ 5] = 0;
3028 regs->active_tc.gpr[ 6] = frame_addr + offsetof(struct sigframe, sf_sc);
3029 regs->active_tc.gpr[29] = frame_addr;
3030 regs->active_tc.gpr[31] = frame_addr + offsetof(struct sigframe, sf_code);
3031 /* The original kernel code sets CP0_EPC to the handler
3032 * since it returns to userland using eret
3033 * we cannot do this here, and we must set PC directly */
3034 regs->active_tc.PC = regs->active_tc.gpr[25] = ka->_sa_handler;
3035 mips_set_hflags_isa_mode_from_pc(regs);
3036 unlock_user_struct(frame, frame_addr, 1);
3037 return;
3039 give_sigsegv:
3040 force_sig(TARGET_SIGSEGV/*, current*/);
3043 long do_sigreturn(CPUMIPSState *regs)
3045 struct sigframe *frame;
3046 abi_ulong frame_addr;
3047 sigset_t blocked;
3048 target_sigset_t target_set;
3049 int i;
3051 frame_addr = regs->active_tc.gpr[29];
3052 trace_user_do_sigreturn(regs, frame_addr);
3053 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
3054 goto badframe;
3056 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
3057 __get_user(target_set.sig[i], &frame->sf_mask.sig[i]);
3060 target_to_host_sigset_internal(&blocked, &target_set);
3061 set_sigmask(&blocked);
3063 restore_sigcontext(regs, &frame->sf_sc);
3065 #if 0
3067 * Don't let your children do this ...
3069 __asm__ __volatile__(
3070 "move\t$29, %0\n\t"
3071 "j\tsyscall_exit"
3072 :/* no outputs */
3073 :"r" (&regs));
3074 /* Unreached */
3075 #endif
3077 regs->active_tc.PC = regs->CP0_EPC;
3078 mips_set_hflags_isa_mode_from_pc(regs);
3079 /* I am not sure this is right, but it seems to work
3080 * maybe a problem with nested signals ? */
3081 regs->CP0_EPC = 0;
3082 return -TARGET_QEMU_ESIGRETURN;
3084 badframe:
3085 force_sig(TARGET_SIGSEGV/*, current*/);
3086 return 0;
3088 # endif /* O32 */
3090 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3091 target_siginfo_t *info,
3092 target_sigset_t *set, CPUMIPSState *env)
3094 struct target_rt_sigframe *frame;
3095 abi_ulong frame_addr;
3096 int i;
3098 frame_addr = get_sigframe(ka, env, sizeof(*frame));
3099 trace_user_setup_rt_frame(env, frame_addr);
3100 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3101 goto give_sigsegv;
3104 install_sigtramp(frame->rs_code, TARGET_NR_rt_sigreturn);
3106 tswap_siginfo(&frame->rs_info, info);
3108 __put_user(0, &frame->rs_uc.tuc_flags);
3109 __put_user(0, &frame->rs_uc.tuc_link);
3110 __put_user(target_sigaltstack_used.ss_sp, &frame->rs_uc.tuc_stack.ss_sp);
3111 __put_user(target_sigaltstack_used.ss_size, &frame->rs_uc.tuc_stack.ss_size);
3112 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
3113 &frame->rs_uc.tuc_stack.ss_flags);
3115 setup_sigcontext(env, &frame->rs_uc.tuc_mcontext);
3117 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
3118 __put_user(set->sig[i], &frame->rs_uc.tuc_sigmask.sig[i]);
3122 * Arguments to signal handler:
3124 * a0 = signal number
3125 * a1 = pointer to siginfo_t
3126 * a2 = pointer to struct ucontext
3128 * $25 and PC point to the signal handler, $29 points to the
3129 * struct sigframe.
3131 env->active_tc.gpr[ 4] = sig;
3132 env->active_tc.gpr[ 5] = frame_addr
3133 + offsetof(struct target_rt_sigframe, rs_info);
3134 env->active_tc.gpr[ 6] = frame_addr
3135 + offsetof(struct target_rt_sigframe, rs_uc);
3136 env->active_tc.gpr[29] = frame_addr;
3137 env->active_tc.gpr[31] = frame_addr
3138 + offsetof(struct target_rt_sigframe, rs_code);
3139 /* The original kernel code sets CP0_EPC to the handler
3140 * since it returns to userland using eret
3141 * we cannot do this here, and we must set PC directly */
3142 env->active_tc.PC = env->active_tc.gpr[25] = ka->_sa_handler;
3143 mips_set_hflags_isa_mode_from_pc(env);
3144 unlock_user_struct(frame, frame_addr, 1);
3145 return;
3147 give_sigsegv:
3148 unlock_user_struct(frame, frame_addr, 1);
3149 force_sig(TARGET_SIGSEGV/*, current*/);
3152 long do_rt_sigreturn(CPUMIPSState *env)
3154 struct target_rt_sigframe *frame;
3155 abi_ulong frame_addr;
3156 sigset_t blocked;
3158 frame_addr = env->active_tc.gpr[29];
3159 trace_user_do_rt_sigreturn(env, frame_addr);
3160 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
3161 goto badframe;
3164 target_to_host_sigset(&blocked, &frame->rs_uc.tuc_sigmask);
3165 set_sigmask(&blocked);
3167 restore_sigcontext(env, &frame->rs_uc.tuc_mcontext);
3169 if (do_sigaltstack(frame_addr +
3170 offsetof(struct target_rt_sigframe, rs_uc.tuc_stack),
3171 0, get_sp_from_cpustate(env)) == -EFAULT)
3172 goto badframe;
3174 env->active_tc.PC = env->CP0_EPC;
3175 mips_set_hflags_isa_mode_from_pc(env);
3176 /* I am not sure this is right, but it seems to work
3177 * maybe a problem with nested signals ? */
3178 env->CP0_EPC = 0;
3179 return -TARGET_QEMU_ESIGRETURN;
3181 badframe:
3182 force_sig(TARGET_SIGSEGV/*, current*/);
3183 return 0;
3186 #elif defined(TARGET_SH4)
3189 * code and data structures from linux kernel:
3190 * include/asm-sh/sigcontext.h
3191 * arch/sh/kernel/signal.c
3194 struct target_sigcontext {
3195 target_ulong oldmask;
3197 /* CPU registers */
3198 target_ulong sc_gregs[16];
3199 target_ulong sc_pc;
3200 target_ulong sc_pr;
3201 target_ulong sc_sr;
3202 target_ulong sc_gbr;
3203 target_ulong sc_mach;
3204 target_ulong sc_macl;
3206 /* FPU registers */
3207 target_ulong sc_fpregs[16];
3208 target_ulong sc_xfpregs[16];
3209 unsigned int sc_fpscr;
3210 unsigned int sc_fpul;
3211 unsigned int sc_ownedfp;
3214 struct target_sigframe
3216 struct target_sigcontext sc;
3217 target_ulong extramask[TARGET_NSIG_WORDS-1];
3218 uint16_t retcode[3];
3222 struct target_ucontext {
3223 target_ulong tuc_flags;
3224 struct target_ucontext *tuc_link;
3225 target_stack_t tuc_stack;
3226 struct target_sigcontext tuc_mcontext;
3227 target_sigset_t tuc_sigmask; /* mask last for extensibility */
3230 struct target_rt_sigframe
3232 struct target_siginfo info;
3233 struct target_ucontext uc;
3234 uint16_t retcode[3];
3238 #define MOVW(n) (0x9300|((n)-2)) /* Move mem word at PC+n to R3 */
3239 #define TRAP_NOARG 0xc310 /* Syscall w/no args (NR in R3) SH3/4 */
3241 static abi_ulong get_sigframe(struct target_sigaction *ka,
3242 unsigned long sp, size_t frame_size)
3244 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags(sp) == 0)) {
3245 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
3248 return (sp - frame_size) & -8ul;
3251 static void setup_sigcontext(struct target_sigcontext *sc,
3252 CPUSH4State *regs, unsigned long mask)
3254 int i;
3256 #define COPY(x) __put_user(regs->x, &sc->sc_##x)
3257 COPY(gregs[0]); COPY(gregs[1]);
3258 COPY(gregs[2]); COPY(gregs[3]);
3259 COPY(gregs[4]); COPY(gregs[5]);
3260 COPY(gregs[6]); COPY(gregs[7]);
3261 COPY(gregs[8]); COPY(gregs[9]);
3262 COPY(gregs[10]); COPY(gregs[11]);
3263 COPY(gregs[12]); COPY(gregs[13]);
3264 COPY(gregs[14]); COPY(gregs[15]);
3265 COPY(gbr); COPY(mach);
3266 COPY(macl); COPY(pr);
3267 COPY(sr); COPY(pc);
3268 #undef COPY
3270 for (i=0; i<16; i++) {
3271 __put_user(regs->fregs[i], &sc->sc_fpregs[i]);
3273 __put_user(regs->fpscr, &sc->sc_fpscr);
3274 __put_user(regs->fpul, &sc->sc_fpul);
3276 /* non-iBCS2 extensions.. */
3277 __put_user(mask, &sc->oldmask);
3280 static void restore_sigcontext(CPUSH4State *regs, struct target_sigcontext *sc)
3282 int i;
3284 #define COPY(x) __get_user(regs->x, &sc->sc_##x)
3285 COPY(gregs[0]); COPY(gregs[1]);
3286 COPY(gregs[2]); COPY(gregs[3]);
3287 COPY(gregs[4]); COPY(gregs[5]);
3288 COPY(gregs[6]); COPY(gregs[7]);
3289 COPY(gregs[8]); COPY(gregs[9]);
3290 COPY(gregs[10]); COPY(gregs[11]);
3291 COPY(gregs[12]); COPY(gregs[13]);
3292 COPY(gregs[14]); COPY(gregs[15]);
3293 COPY(gbr); COPY(mach);
3294 COPY(macl); COPY(pr);
3295 COPY(sr); COPY(pc);
3296 #undef COPY
3298 for (i=0; i<16; i++) {
3299 __get_user(regs->fregs[i], &sc->sc_fpregs[i]);
3301 __get_user(regs->fpscr, &sc->sc_fpscr);
3302 __get_user(regs->fpul, &sc->sc_fpul);
3304 regs->tra = -1; /* disable syscall checks */
3307 static void setup_frame(int sig, struct target_sigaction *ka,
3308 target_sigset_t *set, CPUSH4State *regs)
3310 struct target_sigframe *frame;
3311 abi_ulong frame_addr;
3312 int i;
3314 frame_addr = get_sigframe(ka, regs->gregs[15], sizeof(*frame));
3315 trace_user_setup_frame(regs, frame_addr);
3316 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3317 goto give_sigsegv;
3320 setup_sigcontext(&frame->sc, regs, set->sig[0]);
3322 for (i = 0; i < TARGET_NSIG_WORDS - 1; i++) {
3323 __put_user(set->sig[i + 1], &frame->extramask[i]);
3326 /* Set up to return from userspace. If provided, use a stub
3327 already in userspace. */
3328 if (ka->sa_flags & TARGET_SA_RESTORER) {
3329 regs->pr = (unsigned long) ka->sa_restorer;
3330 } else {
3331 /* Generate return code (system call to sigreturn) */
3332 abi_ulong retcode_addr = frame_addr +
3333 offsetof(struct target_sigframe, retcode);
3334 __put_user(MOVW(2), &frame->retcode[0]);
3335 __put_user(TRAP_NOARG, &frame->retcode[1]);
3336 __put_user((TARGET_NR_sigreturn), &frame->retcode[2]);
3337 regs->pr = (unsigned long) retcode_addr;
3340 /* Set up registers for signal handler */
3341 regs->gregs[15] = frame_addr;
3342 regs->gregs[4] = sig; /* Arg for signal handler */
3343 regs->gregs[5] = 0;
3344 regs->gregs[6] = frame_addr += offsetof(typeof(*frame), sc);
3345 regs->pc = (unsigned long) ka->_sa_handler;
3347 unlock_user_struct(frame, frame_addr, 1);
3348 return;
3350 give_sigsegv:
3351 unlock_user_struct(frame, frame_addr, 1);
3352 force_sig(TARGET_SIGSEGV);
3355 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3356 target_siginfo_t *info,
3357 target_sigset_t *set, CPUSH4State *regs)
3359 struct target_rt_sigframe *frame;
3360 abi_ulong frame_addr;
3361 int i;
3363 frame_addr = get_sigframe(ka, regs->gregs[15], sizeof(*frame));
3364 trace_user_setup_rt_frame(regs, frame_addr);
3365 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3366 goto give_sigsegv;
3369 tswap_siginfo(&frame->info, info);
3371 /* Create the ucontext. */
3372 __put_user(0, &frame->uc.tuc_flags);
3373 __put_user(0, (unsigned long *)&frame->uc.tuc_link);
3374 __put_user((unsigned long)target_sigaltstack_used.ss_sp,
3375 &frame->uc.tuc_stack.ss_sp);
3376 __put_user(sas_ss_flags(regs->gregs[15]),
3377 &frame->uc.tuc_stack.ss_flags);
3378 __put_user(target_sigaltstack_used.ss_size,
3379 &frame->uc.tuc_stack.ss_size);
3380 setup_sigcontext(&frame->uc.tuc_mcontext,
3381 regs, set->sig[0]);
3382 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
3383 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
3386 /* Set up to return from userspace. If provided, use a stub
3387 already in userspace. */
3388 if (ka->sa_flags & TARGET_SA_RESTORER) {
3389 regs->pr = (unsigned long) ka->sa_restorer;
3390 } else {
3391 /* Generate return code (system call to sigreturn) */
3392 abi_ulong retcode_addr = frame_addr +
3393 offsetof(struct target_rt_sigframe, retcode);
3394 __put_user(MOVW(2), &frame->retcode[0]);
3395 __put_user(TRAP_NOARG, &frame->retcode[1]);
3396 __put_user((TARGET_NR_rt_sigreturn), &frame->retcode[2]);
3397 regs->pr = (unsigned long) retcode_addr;
3400 /* Set up registers for signal handler */
3401 regs->gregs[15] = frame_addr;
3402 regs->gregs[4] = sig; /* Arg for signal handler */
3403 regs->gregs[5] = frame_addr + offsetof(typeof(*frame), info);
3404 regs->gregs[6] = frame_addr + offsetof(typeof(*frame), uc);
3405 regs->pc = (unsigned long) ka->_sa_handler;
3407 unlock_user_struct(frame, frame_addr, 1);
3408 return;
3410 give_sigsegv:
3411 unlock_user_struct(frame, frame_addr, 1);
3412 force_sig(TARGET_SIGSEGV);
3415 long do_sigreturn(CPUSH4State *regs)
3417 struct target_sigframe *frame;
3418 abi_ulong frame_addr;
3419 sigset_t blocked;
3420 target_sigset_t target_set;
3421 int i;
3422 int err = 0;
3424 frame_addr = regs->gregs[15];
3425 trace_user_do_sigreturn(regs, frame_addr);
3426 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
3427 goto badframe;
3430 __get_user(target_set.sig[0], &frame->sc.oldmask);
3431 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
3432 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
3435 if (err)
3436 goto badframe;
3438 target_to_host_sigset_internal(&blocked, &target_set);
3439 set_sigmask(&blocked);
3441 restore_sigcontext(regs, &frame->sc);
3443 unlock_user_struct(frame, frame_addr, 0);
3444 return -TARGET_QEMU_ESIGRETURN;
3446 badframe:
3447 unlock_user_struct(frame, frame_addr, 0);
3448 force_sig(TARGET_SIGSEGV);
3449 return 0;
3452 long do_rt_sigreturn(CPUSH4State *regs)
3454 struct target_rt_sigframe *frame;
3455 abi_ulong frame_addr;
3456 sigset_t blocked;
3458 frame_addr = regs->gregs[15];
3459 trace_user_do_rt_sigreturn(regs, frame_addr);
3460 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
3461 goto badframe;
3464 target_to_host_sigset(&blocked, &frame->uc.tuc_sigmask);
3465 set_sigmask(&blocked);
3467 restore_sigcontext(regs, &frame->uc.tuc_mcontext);
3469 if (do_sigaltstack(frame_addr +
3470 offsetof(struct target_rt_sigframe, uc.tuc_stack),
3471 0, get_sp_from_cpustate(regs)) == -EFAULT) {
3472 goto badframe;
3475 unlock_user_struct(frame, frame_addr, 0);
3476 return -TARGET_QEMU_ESIGRETURN;
3478 badframe:
3479 unlock_user_struct(frame, frame_addr, 0);
3480 force_sig(TARGET_SIGSEGV);
3481 return 0;
3483 #elif defined(TARGET_MICROBLAZE)
3485 struct target_sigcontext {
3486 struct target_pt_regs regs; /* needs to be first */
3487 uint32_t oldmask;
3490 struct target_stack_t {
3491 abi_ulong ss_sp;
3492 int ss_flags;
3493 unsigned int ss_size;
3496 struct target_ucontext {
3497 abi_ulong tuc_flags;
3498 abi_ulong tuc_link;
3499 struct target_stack_t tuc_stack;
3500 struct target_sigcontext tuc_mcontext;
3501 uint32_t tuc_extramask[TARGET_NSIG_WORDS - 1];
3504 /* Signal frames. */
3505 struct target_signal_frame {
3506 struct target_ucontext uc;
3507 uint32_t extramask[TARGET_NSIG_WORDS - 1];
3508 uint32_t tramp[2];
3511 struct rt_signal_frame {
3512 siginfo_t info;
3513 struct ucontext uc;
3514 uint32_t tramp[2];
3517 static void setup_sigcontext(struct target_sigcontext *sc, CPUMBState *env)
3519 __put_user(env->regs[0], &sc->regs.r0);
3520 __put_user(env->regs[1], &sc->regs.r1);
3521 __put_user(env->regs[2], &sc->regs.r2);
3522 __put_user(env->regs[3], &sc->regs.r3);
3523 __put_user(env->regs[4], &sc->regs.r4);
3524 __put_user(env->regs[5], &sc->regs.r5);
3525 __put_user(env->regs[6], &sc->regs.r6);
3526 __put_user(env->regs[7], &sc->regs.r7);
3527 __put_user(env->regs[8], &sc->regs.r8);
3528 __put_user(env->regs[9], &sc->regs.r9);
3529 __put_user(env->regs[10], &sc->regs.r10);
3530 __put_user(env->regs[11], &sc->regs.r11);
3531 __put_user(env->regs[12], &sc->regs.r12);
3532 __put_user(env->regs[13], &sc->regs.r13);
3533 __put_user(env->regs[14], &sc->regs.r14);
3534 __put_user(env->regs[15], &sc->regs.r15);
3535 __put_user(env->regs[16], &sc->regs.r16);
3536 __put_user(env->regs[17], &sc->regs.r17);
3537 __put_user(env->regs[18], &sc->regs.r18);
3538 __put_user(env->regs[19], &sc->regs.r19);
3539 __put_user(env->regs[20], &sc->regs.r20);
3540 __put_user(env->regs[21], &sc->regs.r21);
3541 __put_user(env->regs[22], &sc->regs.r22);
3542 __put_user(env->regs[23], &sc->regs.r23);
3543 __put_user(env->regs[24], &sc->regs.r24);
3544 __put_user(env->regs[25], &sc->regs.r25);
3545 __put_user(env->regs[26], &sc->regs.r26);
3546 __put_user(env->regs[27], &sc->regs.r27);
3547 __put_user(env->regs[28], &sc->regs.r28);
3548 __put_user(env->regs[29], &sc->regs.r29);
3549 __put_user(env->regs[30], &sc->regs.r30);
3550 __put_user(env->regs[31], &sc->regs.r31);
3551 __put_user(env->sregs[SR_PC], &sc->regs.pc);
3554 static void restore_sigcontext(struct target_sigcontext *sc, CPUMBState *env)
3556 __get_user(env->regs[0], &sc->regs.r0);
3557 __get_user(env->regs[1], &sc->regs.r1);
3558 __get_user(env->regs[2], &sc->regs.r2);
3559 __get_user(env->regs[3], &sc->regs.r3);
3560 __get_user(env->regs[4], &sc->regs.r4);
3561 __get_user(env->regs[5], &sc->regs.r5);
3562 __get_user(env->regs[6], &sc->regs.r6);
3563 __get_user(env->regs[7], &sc->regs.r7);
3564 __get_user(env->regs[8], &sc->regs.r8);
3565 __get_user(env->regs[9], &sc->regs.r9);
3566 __get_user(env->regs[10], &sc->regs.r10);
3567 __get_user(env->regs[11], &sc->regs.r11);
3568 __get_user(env->regs[12], &sc->regs.r12);
3569 __get_user(env->regs[13], &sc->regs.r13);
3570 __get_user(env->regs[14], &sc->regs.r14);
3571 __get_user(env->regs[15], &sc->regs.r15);
3572 __get_user(env->regs[16], &sc->regs.r16);
3573 __get_user(env->regs[17], &sc->regs.r17);
3574 __get_user(env->regs[18], &sc->regs.r18);
3575 __get_user(env->regs[19], &sc->regs.r19);
3576 __get_user(env->regs[20], &sc->regs.r20);
3577 __get_user(env->regs[21], &sc->regs.r21);
3578 __get_user(env->regs[22], &sc->regs.r22);
3579 __get_user(env->regs[23], &sc->regs.r23);
3580 __get_user(env->regs[24], &sc->regs.r24);
3581 __get_user(env->regs[25], &sc->regs.r25);
3582 __get_user(env->regs[26], &sc->regs.r26);
3583 __get_user(env->regs[27], &sc->regs.r27);
3584 __get_user(env->regs[28], &sc->regs.r28);
3585 __get_user(env->regs[29], &sc->regs.r29);
3586 __get_user(env->regs[30], &sc->regs.r30);
3587 __get_user(env->regs[31], &sc->regs.r31);
3588 __get_user(env->sregs[SR_PC], &sc->regs.pc);
3591 static abi_ulong get_sigframe(struct target_sigaction *ka,
3592 CPUMBState *env, int frame_size)
3594 abi_ulong sp = env->regs[1];
3596 if ((ka->sa_flags & TARGET_SA_ONSTACK) != 0 && !on_sig_stack(sp)) {
3597 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
3600 return ((sp - frame_size) & -8UL);
3603 static void setup_frame(int sig, struct target_sigaction *ka,
3604 target_sigset_t *set, CPUMBState *env)
3606 struct target_signal_frame *frame;
3607 abi_ulong frame_addr;
3608 int i;
3610 frame_addr = get_sigframe(ka, env, sizeof *frame);
3611 trace_user_setup_frame(env, frame_addr);
3612 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
3613 goto badframe;
3615 /* Save the mask. */
3616 __put_user(set->sig[0], &frame->uc.tuc_mcontext.oldmask);
3618 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
3619 __put_user(set->sig[i], &frame->extramask[i - 1]);
3622 setup_sigcontext(&frame->uc.tuc_mcontext, env);
3624 /* Set up to return from userspace. If provided, use a stub
3625 already in userspace. */
3626 /* minus 8 is offset to cater for "rtsd r15,8" offset */
3627 if (ka->sa_flags & TARGET_SA_RESTORER) {
3628 env->regs[15] = ((unsigned long)ka->sa_restorer)-8;
3629 } else {
3630 uint32_t t;
3631 /* Note, these encodings are _big endian_! */
3632 /* addi r12, r0, __NR_sigreturn */
3633 t = 0x31800000UL | TARGET_NR_sigreturn;
3634 __put_user(t, frame->tramp + 0);
3635 /* brki r14, 0x8 */
3636 t = 0xb9cc0008UL;
3637 __put_user(t, frame->tramp + 1);
3639 /* Return from sighandler will jump to the tramp.
3640 Negative 8 offset because return is rtsd r15, 8 */
3641 env->regs[15] = frame_addr + offsetof(struct target_signal_frame, tramp)
3642 - 8;
3645 /* Set up registers for signal handler */
3646 env->regs[1] = frame_addr;
3647 /* Signal handler args: */
3648 env->regs[5] = sig; /* Arg 0: signum */
3649 env->regs[6] = 0;
3650 /* arg 1: sigcontext */
3651 env->regs[7] = frame_addr += offsetof(typeof(*frame), uc);
3653 /* Offset of 4 to handle microblaze rtid r14, 0 */
3654 env->sregs[SR_PC] = (unsigned long)ka->_sa_handler;
3656 unlock_user_struct(frame, frame_addr, 1);
3657 return;
3658 badframe:
3659 force_sig(TARGET_SIGSEGV);
3662 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3663 target_siginfo_t *info,
3664 target_sigset_t *set, CPUMBState *env)
3666 fprintf(stderr, "Microblaze setup_rt_frame: not implemented\n");
3669 long do_sigreturn(CPUMBState *env)
3671 struct target_signal_frame *frame;
3672 abi_ulong frame_addr;
3673 target_sigset_t target_set;
3674 sigset_t set;
3675 int i;
3677 frame_addr = env->regs[R_SP];
3678 trace_user_do_sigreturn(env, frame_addr);
3679 /* Make sure the guest isn't playing games. */
3680 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1))
3681 goto badframe;
3683 /* Restore blocked signals */
3684 __get_user(target_set.sig[0], &frame->uc.tuc_mcontext.oldmask);
3685 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
3686 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
3688 target_to_host_sigset_internal(&set, &target_set);
3689 set_sigmask(&set);
3691 restore_sigcontext(&frame->uc.tuc_mcontext, env);
3692 /* We got here through a sigreturn syscall, our path back is via an
3693 rtb insn so setup r14 for that. */
3694 env->regs[14] = env->sregs[SR_PC];
3696 unlock_user_struct(frame, frame_addr, 0);
3697 return -TARGET_QEMU_ESIGRETURN;
3698 badframe:
3699 force_sig(TARGET_SIGSEGV);
3702 long do_rt_sigreturn(CPUMBState *env)
3704 trace_user_do_rt_sigreturn(env, 0);
3705 fprintf(stderr, "Microblaze do_rt_sigreturn: not implemented\n");
3706 return -TARGET_ENOSYS;
3709 #elif defined(TARGET_CRIS)
3711 struct target_sigcontext {
3712 struct target_pt_regs regs; /* needs to be first */
3713 uint32_t oldmask;
3714 uint32_t usp; /* usp before stacking this gunk on it */
3717 /* Signal frames. */
3718 struct target_signal_frame {
3719 struct target_sigcontext sc;
3720 uint32_t extramask[TARGET_NSIG_WORDS - 1];
3721 uint16_t retcode[4]; /* Trampoline code. */
3724 struct rt_signal_frame {
3725 siginfo_t *pinfo;
3726 void *puc;
3727 siginfo_t info;
3728 struct ucontext uc;
3729 uint16_t retcode[4]; /* Trampoline code. */
3732 static void setup_sigcontext(struct target_sigcontext *sc, CPUCRISState *env)
3734 __put_user(env->regs[0], &sc->regs.r0);
3735 __put_user(env->regs[1], &sc->regs.r1);
3736 __put_user(env->regs[2], &sc->regs.r2);
3737 __put_user(env->regs[3], &sc->regs.r3);
3738 __put_user(env->regs[4], &sc->regs.r4);
3739 __put_user(env->regs[5], &sc->regs.r5);
3740 __put_user(env->regs[6], &sc->regs.r6);
3741 __put_user(env->regs[7], &sc->regs.r7);
3742 __put_user(env->regs[8], &sc->regs.r8);
3743 __put_user(env->regs[9], &sc->regs.r9);
3744 __put_user(env->regs[10], &sc->regs.r10);
3745 __put_user(env->regs[11], &sc->regs.r11);
3746 __put_user(env->regs[12], &sc->regs.r12);
3747 __put_user(env->regs[13], &sc->regs.r13);
3748 __put_user(env->regs[14], &sc->usp);
3749 __put_user(env->regs[15], &sc->regs.acr);
3750 __put_user(env->pregs[PR_MOF], &sc->regs.mof);
3751 __put_user(env->pregs[PR_SRP], &sc->regs.srp);
3752 __put_user(env->pc, &sc->regs.erp);
3755 static void restore_sigcontext(struct target_sigcontext *sc, CPUCRISState *env)
3757 __get_user(env->regs[0], &sc->regs.r0);
3758 __get_user(env->regs[1], &sc->regs.r1);
3759 __get_user(env->regs[2], &sc->regs.r2);
3760 __get_user(env->regs[3], &sc->regs.r3);
3761 __get_user(env->regs[4], &sc->regs.r4);
3762 __get_user(env->regs[5], &sc->regs.r5);
3763 __get_user(env->regs[6], &sc->regs.r6);
3764 __get_user(env->regs[7], &sc->regs.r7);
3765 __get_user(env->regs[8], &sc->regs.r8);
3766 __get_user(env->regs[9], &sc->regs.r9);
3767 __get_user(env->regs[10], &sc->regs.r10);
3768 __get_user(env->regs[11], &sc->regs.r11);
3769 __get_user(env->regs[12], &sc->regs.r12);
3770 __get_user(env->regs[13], &sc->regs.r13);
3771 __get_user(env->regs[14], &sc->usp);
3772 __get_user(env->regs[15], &sc->regs.acr);
3773 __get_user(env->pregs[PR_MOF], &sc->regs.mof);
3774 __get_user(env->pregs[PR_SRP], &sc->regs.srp);
3775 __get_user(env->pc, &sc->regs.erp);
3778 static abi_ulong get_sigframe(CPUCRISState *env, int framesize)
3780 abi_ulong sp;
3781 /* Align the stack downwards to 4. */
3782 sp = (env->regs[R_SP] & ~3);
3783 return sp - framesize;
3786 static void setup_frame(int sig, struct target_sigaction *ka,
3787 target_sigset_t *set, CPUCRISState *env)
3789 struct target_signal_frame *frame;
3790 abi_ulong frame_addr;
3791 int i;
3793 frame_addr = get_sigframe(env, sizeof *frame);
3794 trace_user_setup_frame(env, frame_addr);
3795 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
3796 goto badframe;
3799 * The CRIS signal return trampoline. A real linux/CRIS kernel doesn't
3800 * use this trampoline anymore but it sets it up for GDB.
3801 * In QEMU, using the trampoline simplifies things a bit so we use it.
3803 * This is movu.w __NR_sigreturn, r9; break 13;
3805 __put_user(0x9c5f, frame->retcode+0);
3806 __put_user(TARGET_NR_sigreturn,
3807 frame->retcode + 1);
3808 __put_user(0xe93d, frame->retcode + 2);
3810 /* Save the mask. */
3811 __put_user(set->sig[0], &frame->sc.oldmask);
3813 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
3814 __put_user(set->sig[i], &frame->extramask[i - 1]);
3817 setup_sigcontext(&frame->sc, env);
3819 /* Move the stack and setup the arguments for the handler. */
3820 env->regs[R_SP] = frame_addr;
3821 env->regs[10] = sig;
3822 env->pc = (unsigned long) ka->_sa_handler;
3823 /* Link SRP so the guest returns through the trampoline. */
3824 env->pregs[PR_SRP] = frame_addr + offsetof(typeof(*frame), retcode);
3826 unlock_user_struct(frame, frame_addr, 1);
3827 return;
3828 badframe:
3829 force_sig(TARGET_SIGSEGV);
3832 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3833 target_siginfo_t *info,
3834 target_sigset_t *set, CPUCRISState *env)
3836 fprintf(stderr, "CRIS setup_rt_frame: not implemented\n");
3839 long do_sigreturn(CPUCRISState *env)
3841 struct target_signal_frame *frame;
3842 abi_ulong frame_addr;
3843 target_sigset_t target_set;
3844 sigset_t set;
3845 int i;
3847 frame_addr = env->regs[R_SP];
3848 trace_user_do_sigreturn(env, frame_addr);
3849 /* Make sure the guest isn't playing games. */
3850 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1)) {
3851 goto badframe;
3854 /* Restore blocked signals */
3855 __get_user(target_set.sig[0], &frame->sc.oldmask);
3856 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
3857 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
3859 target_to_host_sigset_internal(&set, &target_set);
3860 set_sigmask(&set);
3862 restore_sigcontext(&frame->sc, env);
3863 unlock_user_struct(frame, frame_addr, 0);
3864 return -TARGET_QEMU_ESIGRETURN;
3865 badframe:
3866 force_sig(TARGET_SIGSEGV);
3869 long do_rt_sigreturn(CPUCRISState *env)
3871 trace_user_do_rt_sigreturn(env, 0);
3872 fprintf(stderr, "CRIS do_rt_sigreturn: not implemented\n");
3873 return -TARGET_ENOSYS;
3876 #elif defined(TARGET_OPENRISC)
3878 struct target_sigcontext {
3879 struct target_pt_regs regs;
3880 abi_ulong oldmask;
3881 abi_ulong usp;
3884 struct target_ucontext {
3885 abi_ulong tuc_flags;
3886 abi_ulong tuc_link;
3887 target_stack_t tuc_stack;
3888 struct target_sigcontext tuc_mcontext;
3889 target_sigset_t tuc_sigmask; /* mask last for extensibility */
3892 struct target_rt_sigframe {
3893 abi_ulong pinfo;
3894 uint64_t puc;
3895 struct target_siginfo info;
3896 struct target_sigcontext sc;
3897 struct target_ucontext uc;
3898 unsigned char retcode[16]; /* trampoline code */
3901 /* This is the asm-generic/ucontext.h version */
3902 #if 0
3903 static int restore_sigcontext(CPUOpenRISCState *regs,
3904 struct target_sigcontext *sc)
3906 unsigned int err = 0;
3907 unsigned long old_usp;
3909 /* Alwys make any pending restarted system call return -EINTR */
3910 current_thread_info()->restart_block.fn = do_no_restart_syscall;
3912 /* restore the regs from &sc->regs (same as sc, since regs is first)
3913 * (sc is already checked for VERIFY_READ since the sigframe was
3914 * checked in sys_sigreturn previously)
3917 if (copy_from_user(regs, &sc, sizeof(struct target_pt_regs))) {
3918 goto badframe;
3921 /* make sure the U-flag is set so user-mode cannot fool us */
3923 regs->sr &= ~SR_SM;
3925 /* restore the old USP as it was before we stacked the sc etc.
3926 * (we cannot just pop the sigcontext since we aligned the sp and
3927 * stuff after pushing it)
3930 __get_user(old_usp, &sc->usp);
3931 phx_signal("old_usp 0x%lx", old_usp);
3933 __PHX__ REALLY /* ??? */
3934 wrusp(old_usp);
3935 regs->gpr[1] = old_usp;
3937 /* TODO: the other ports use regs->orig_XX to disable syscall checks
3938 * after this completes, but we don't use that mechanism. maybe we can
3939 * use it now ?
3942 return err;
3944 badframe:
3945 return 1;
3947 #endif
3949 /* Set up a signal frame. */
3951 static void setup_sigcontext(struct target_sigcontext *sc,
3952 CPUOpenRISCState *regs,
3953 unsigned long mask)
3955 unsigned long usp = regs->gpr[1];
3957 /* copy the regs. they are first in sc so we can use sc directly */
3959 /*copy_to_user(&sc, regs, sizeof(struct target_pt_regs));*/
3961 /* Set the frametype to CRIS_FRAME_NORMAL for the execution of
3962 the signal handler. The frametype will be restored to its previous
3963 value in restore_sigcontext. */
3964 /*regs->frametype = CRIS_FRAME_NORMAL;*/
3966 /* then some other stuff */
3967 __put_user(mask, &sc->oldmask);
3968 __put_user(usp, &sc->usp);
3971 static inline unsigned long align_sigframe(unsigned long sp)
3973 return sp & ~3UL;
3976 static inline abi_ulong get_sigframe(struct target_sigaction *ka,
3977 CPUOpenRISCState *regs,
3978 size_t frame_size)
3980 unsigned long sp = regs->gpr[1];
3981 int onsigstack = on_sig_stack(sp);
3983 /* redzone */
3984 /* This is the X/Open sanctioned signal stack switching. */
3985 if ((ka->sa_flags & TARGET_SA_ONSTACK) != 0 && !onsigstack) {
3986 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
3989 sp = align_sigframe(sp - frame_size);
3992 * If we are on the alternate signal stack and would overflow it, don't.
3993 * Return an always-bogus address instead so we will die with SIGSEGV.
3996 if (onsigstack && !likely(on_sig_stack(sp))) {
3997 return -1L;
4000 return sp;
4003 static void setup_rt_frame(int sig, struct target_sigaction *ka,
4004 target_siginfo_t *info,
4005 target_sigset_t *set, CPUOpenRISCState *env)
4007 int err = 0;
4008 abi_ulong frame_addr;
4009 unsigned long return_ip;
4010 struct target_rt_sigframe *frame;
4011 abi_ulong info_addr, uc_addr;
4013 frame_addr = get_sigframe(ka, env, sizeof(*frame));
4014 trace_user_setup_rt_frame(env, frame_addr);
4015 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
4016 goto give_sigsegv;
4019 info_addr = frame_addr + offsetof(struct target_rt_sigframe, info);
4020 __put_user(info_addr, &frame->pinfo);
4021 uc_addr = frame_addr + offsetof(struct target_rt_sigframe, uc);
4022 __put_user(uc_addr, &frame->puc);
4024 if (ka->sa_flags & SA_SIGINFO) {
4025 tswap_siginfo(&frame->info, info);
4028 /*err |= __clear_user(&frame->uc, offsetof(struct ucontext, uc_mcontext));*/
4029 __put_user(0, &frame->uc.tuc_flags);
4030 __put_user(0, &frame->uc.tuc_link);
4031 __put_user(target_sigaltstack_used.ss_sp,
4032 &frame->uc.tuc_stack.ss_sp);
4033 __put_user(sas_ss_flags(env->gpr[1]), &frame->uc.tuc_stack.ss_flags);
4034 __put_user(target_sigaltstack_used.ss_size,
4035 &frame->uc.tuc_stack.ss_size);
4036 setup_sigcontext(&frame->sc, env, set->sig[0]);
4038 /*err |= copy_to_user(frame->uc.tuc_sigmask, set, sizeof(*set));*/
4040 /* trampoline - the desired return ip is the retcode itself */
4041 return_ip = (unsigned long)&frame->retcode;
4042 /* This is l.ori r11,r0,__NR_sigreturn, l.sys 1 */
4043 __put_user(0xa960, (short *)(frame->retcode + 0));
4044 __put_user(TARGET_NR_rt_sigreturn, (short *)(frame->retcode + 2));
4045 __put_user(0x20000001, (unsigned long *)(frame->retcode + 4));
4046 __put_user(0x15000000, (unsigned long *)(frame->retcode + 8));
4048 if (err) {
4049 goto give_sigsegv;
4052 /* TODO what is the current->exec_domain stuff and invmap ? */
4054 /* Set up registers for signal handler */
4055 env->pc = (unsigned long)ka->_sa_handler; /* what we enter NOW */
4056 env->gpr[9] = (unsigned long)return_ip; /* what we enter LATER */
4057 env->gpr[3] = (unsigned long)sig; /* arg 1: signo */
4058 env->gpr[4] = (unsigned long)&frame->info; /* arg 2: (siginfo_t*) */
4059 env->gpr[5] = (unsigned long)&frame->uc; /* arg 3: ucontext */
4061 /* actually move the usp to reflect the stacked frame */
4062 env->gpr[1] = (unsigned long)frame;
4064 return;
4066 give_sigsegv:
4067 unlock_user_struct(frame, frame_addr, 1);
4068 if (sig == TARGET_SIGSEGV) {
4069 ka->_sa_handler = TARGET_SIG_DFL;
4071 force_sig(TARGET_SIGSEGV);
4074 long do_sigreturn(CPUOpenRISCState *env)
4076 trace_user_do_sigreturn(env, 0);
4077 fprintf(stderr, "do_sigreturn: not implemented\n");
4078 return -TARGET_ENOSYS;
4081 long do_rt_sigreturn(CPUOpenRISCState *env)
4083 trace_user_do_rt_sigreturn(env, 0);
4084 fprintf(stderr, "do_rt_sigreturn: not implemented\n");
4085 return -TARGET_ENOSYS;
4087 /* TARGET_OPENRISC */
4089 #elif defined(TARGET_S390X)
4091 #define __NUM_GPRS 16
4092 #define __NUM_FPRS 16
4093 #define __NUM_ACRS 16
4095 #define S390_SYSCALL_SIZE 2
4096 #define __SIGNAL_FRAMESIZE 160 /* FIXME: 31-bit mode -> 96 */
4098 #define _SIGCONTEXT_NSIG 64
4099 #define _SIGCONTEXT_NSIG_BPW 64 /* FIXME: 31-bit mode -> 32 */
4100 #define _SIGCONTEXT_NSIG_WORDS (_SIGCONTEXT_NSIG / _SIGCONTEXT_NSIG_BPW)
4101 #define _SIGMASK_COPY_SIZE (sizeof(unsigned long)*_SIGCONTEXT_NSIG_WORDS)
4102 #define PSW_ADDR_AMODE 0x0000000000000000UL /* 0x80000000UL for 31-bit */
4103 #define S390_SYSCALL_OPCODE ((uint16_t)0x0a00)
4105 typedef struct {
4106 target_psw_t psw;
4107 target_ulong gprs[__NUM_GPRS];
4108 unsigned int acrs[__NUM_ACRS];
4109 } target_s390_regs_common;
4111 typedef struct {
4112 unsigned int fpc;
4113 double fprs[__NUM_FPRS];
4114 } target_s390_fp_regs;
4116 typedef struct {
4117 target_s390_regs_common regs;
4118 target_s390_fp_regs fpregs;
4119 } target_sigregs;
4121 struct target_sigcontext {
4122 target_ulong oldmask[_SIGCONTEXT_NSIG_WORDS];
4123 target_sigregs *sregs;
4126 typedef struct {
4127 uint8_t callee_used_stack[__SIGNAL_FRAMESIZE];
4128 struct target_sigcontext sc;
4129 target_sigregs sregs;
4130 int signo;
4131 uint8_t retcode[S390_SYSCALL_SIZE];
4132 } sigframe;
4134 struct target_ucontext {
4135 target_ulong tuc_flags;
4136 struct target_ucontext *tuc_link;
4137 target_stack_t tuc_stack;
4138 target_sigregs tuc_mcontext;
4139 target_sigset_t tuc_sigmask; /* mask last for extensibility */
4142 typedef struct {
4143 uint8_t callee_used_stack[__SIGNAL_FRAMESIZE];
4144 uint8_t retcode[S390_SYSCALL_SIZE];
4145 struct target_siginfo info;
4146 struct target_ucontext uc;
4147 } rt_sigframe;
4149 static inline abi_ulong
4150 get_sigframe(struct target_sigaction *ka, CPUS390XState *env, size_t frame_size)
4152 abi_ulong sp;
4154 /* Default to using normal stack */
4155 sp = env->regs[15];
4157 /* This is the X/Open sanctioned signal stack switching. */
4158 if (ka->sa_flags & TARGET_SA_ONSTACK) {
4159 if (!sas_ss_flags(sp)) {
4160 sp = target_sigaltstack_used.ss_sp +
4161 target_sigaltstack_used.ss_size;
4165 /* This is the legacy signal stack switching. */
4166 else if (/* FIXME !user_mode(regs) */ 0 &&
4167 !(ka->sa_flags & TARGET_SA_RESTORER) &&
4168 ka->sa_restorer) {
4169 sp = (abi_ulong) ka->sa_restorer;
4172 return (sp - frame_size) & -8ul;
4175 static void save_sigregs(CPUS390XState *env, target_sigregs *sregs)
4177 int i;
4178 //save_access_regs(current->thread.acrs); FIXME
4180 /* Copy a 'clean' PSW mask to the user to avoid leaking
4181 information about whether PER is currently on. */
4182 __put_user(env->psw.mask, &sregs->regs.psw.mask);
4183 __put_user(env->psw.addr, &sregs->regs.psw.addr);
4184 for (i = 0; i < 16; i++) {
4185 __put_user(env->regs[i], &sregs->regs.gprs[i]);
4187 for (i = 0; i < 16; i++) {
4188 __put_user(env->aregs[i], &sregs->regs.acrs[i]);
4191 * We have to store the fp registers to current->thread.fp_regs
4192 * to merge them with the emulated registers.
4194 //save_fp_regs(&current->thread.fp_regs); FIXME
4195 for (i = 0; i < 16; i++) {
4196 __put_user(get_freg(env, i)->ll, &sregs->fpregs.fprs[i]);
4200 static void setup_frame(int sig, struct target_sigaction *ka,
4201 target_sigset_t *set, CPUS390XState *env)
4203 sigframe *frame;
4204 abi_ulong frame_addr;
4206 frame_addr = get_sigframe(ka, env, sizeof(*frame));
4207 trace_user_setup_frame(env, frame_addr);
4208 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
4209 goto give_sigsegv;
4212 __put_user(set->sig[0], &frame->sc.oldmask[0]);
4214 save_sigregs(env, &frame->sregs);
4216 __put_user((abi_ulong)(unsigned long)&frame->sregs,
4217 (abi_ulong *)&frame->sc.sregs);
4219 /* Set up to return from userspace. If provided, use a stub
4220 already in userspace. */
4221 if (ka->sa_flags & TARGET_SA_RESTORER) {
4222 env->regs[14] = (unsigned long)
4223 ka->sa_restorer | PSW_ADDR_AMODE;
4224 } else {
4225 env->regs[14] = (frame_addr + offsetof(sigframe, retcode))
4226 | PSW_ADDR_AMODE;
4227 __put_user(S390_SYSCALL_OPCODE | TARGET_NR_sigreturn,
4228 (uint16_t *)(frame->retcode));
4231 /* Set up backchain. */
4232 __put_user(env->regs[15], (abi_ulong *) frame);
4234 /* Set up registers for signal handler */
4235 env->regs[15] = frame_addr;
4236 env->psw.addr = (target_ulong) ka->_sa_handler | PSW_ADDR_AMODE;
4238 env->regs[2] = sig; //map_signal(sig);
4239 env->regs[3] = frame_addr += offsetof(typeof(*frame), sc);
4241 /* We forgot to include these in the sigcontext.
4242 To avoid breaking binary compatibility, they are passed as args. */
4243 env->regs[4] = 0; // FIXME: no clue... current->thread.trap_no;
4244 env->regs[5] = 0; // FIXME: no clue... current->thread.prot_addr;
4246 /* Place signal number on stack to allow backtrace from handler. */
4247 __put_user(env->regs[2], (int *) &frame->signo);
4248 unlock_user_struct(frame, frame_addr, 1);
4249 return;
4251 give_sigsegv:
4252 force_sig(TARGET_SIGSEGV);
4255 static void setup_rt_frame(int sig, struct target_sigaction *ka,
4256 target_siginfo_t *info,
4257 target_sigset_t *set, CPUS390XState *env)
4259 int i;
4260 rt_sigframe *frame;
4261 abi_ulong frame_addr;
4263 frame_addr = get_sigframe(ka, env, sizeof *frame);
4264 trace_user_setup_rt_frame(env, frame_addr);
4265 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
4266 goto give_sigsegv;
4269 tswap_siginfo(&frame->info, info);
4271 /* Create the ucontext. */
4272 __put_user(0, &frame->uc.tuc_flags);
4273 __put_user((abi_ulong)0, (abi_ulong *)&frame->uc.tuc_link);
4274 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
4275 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
4276 &frame->uc.tuc_stack.ss_flags);
4277 __put_user(target_sigaltstack_used.ss_size, &frame->uc.tuc_stack.ss_size);
4278 save_sigregs(env, &frame->uc.tuc_mcontext);
4279 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
4280 __put_user((abi_ulong)set->sig[i],
4281 (abi_ulong *)&frame->uc.tuc_sigmask.sig[i]);
4284 /* Set up to return from userspace. If provided, use a stub
4285 already in userspace. */
4286 if (ka->sa_flags & TARGET_SA_RESTORER) {
4287 env->regs[14] = (unsigned long) ka->sa_restorer | PSW_ADDR_AMODE;
4288 } else {
4289 env->regs[14] = (unsigned long) frame->retcode | PSW_ADDR_AMODE;
4290 __put_user(S390_SYSCALL_OPCODE | TARGET_NR_rt_sigreturn,
4291 (uint16_t *)(frame->retcode));
4294 /* Set up backchain. */
4295 __put_user(env->regs[15], (abi_ulong *) frame);
4297 /* Set up registers for signal handler */
4298 env->regs[15] = frame_addr;
4299 env->psw.addr = (target_ulong) ka->_sa_handler | PSW_ADDR_AMODE;
4301 env->regs[2] = sig; //map_signal(sig);
4302 env->regs[3] = frame_addr + offsetof(typeof(*frame), info);
4303 env->regs[4] = frame_addr + offsetof(typeof(*frame), uc);
4304 return;
4306 give_sigsegv:
4307 force_sig(TARGET_SIGSEGV);
4310 static int
4311 restore_sigregs(CPUS390XState *env, target_sigregs *sc)
4313 int err = 0;
4314 int i;
4316 for (i = 0; i < 16; i++) {
4317 __get_user(env->regs[i], &sc->regs.gprs[i]);
4320 __get_user(env->psw.mask, &sc->regs.psw.mask);
4321 trace_user_s390x_restore_sigregs(env, (unsigned long long)sc->regs.psw.addr,
4322 (unsigned long long)env->psw.addr);
4323 __get_user(env->psw.addr, &sc->regs.psw.addr);
4324 /* FIXME: 31-bit -> | PSW_ADDR_AMODE */
4326 for (i = 0; i < 16; i++) {
4327 __get_user(env->aregs[i], &sc->regs.acrs[i]);
4329 for (i = 0; i < 16; i++) {
4330 __get_user(get_freg(env, i)->ll, &sc->fpregs.fprs[i]);
4333 return err;
4336 long do_sigreturn(CPUS390XState *env)
4338 sigframe *frame;
4339 abi_ulong frame_addr = env->regs[15];
4340 target_sigset_t target_set;
4341 sigset_t set;
4343 trace_user_do_sigreturn(env, frame_addr);
4344 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
4345 goto badframe;
4347 __get_user(target_set.sig[0], &frame->sc.oldmask[0]);
4349 target_to_host_sigset_internal(&set, &target_set);
4350 set_sigmask(&set); /* ~_BLOCKABLE? */
4352 if (restore_sigregs(env, &frame->sregs)) {
4353 goto badframe;
4356 unlock_user_struct(frame, frame_addr, 0);
4357 return -TARGET_QEMU_ESIGRETURN;
4359 badframe:
4360 force_sig(TARGET_SIGSEGV);
4361 return 0;
4364 long do_rt_sigreturn(CPUS390XState *env)
4366 rt_sigframe *frame;
4367 abi_ulong frame_addr = env->regs[15];
4368 sigset_t set;
4370 trace_user_do_rt_sigreturn(env, frame_addr);
4371 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
4372 goto badframe;
4374 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
4376 set_sigmask(&set); /* ~_BLOCKABLE? */
4378 if (restore_sigregs(env, &frame->uc.tuc_mcontext)) {
4379 goto badframe;
4382 if (do_sigaltstack(frame_addr + offsetof(rt_sigframe, uc.tuc_stack), 0,
4383 get_sp_from_cpustate(env)) == -EFAULT) {
4384 goto badframe;
4386 unlock_user_struct(frame, frame_addr, 0);
4387 return -TARGET_QEMU_ESIGRETURN;
4389 badframe:
4390 unlock_user_struct(frame, frame_addr, 0);
4391 force_sig(TARGET_SIGSEGV);
4392 return 0;
4395 #elif defined(TARGET_PPC)
4397 /* Size of dummy stack frame allocated when calling signal handler.
4398 See arch/powerpc/include/asm/ptrace.h. */
4399 #if defined(TARGET_PPC64)
4400 #define SIGNAL_FRAMESIZE 128
4401 #else
4402 #define SIGNAL_FRAMESIZE 64
4403 #endif
4405 /* See arch/powerpc/include/asm/ucontext.h. Only used for 32-bit PPC;
4406 on 64-bit PPC, sigcontext and mcontext are one and the same. */
4407 struct target_mcontext {
4408 target_ulong mc_gregs[48];
4409 /* Includes fpscr. */
4410 uint64_t mc_fregs[33];
4411 target_ulong mc_pad[2];
4412 /* We need to handle Altivec and SPE at the same time, which no
4413 kernel needs to do. Fortunately, the kernel defines this bit to
4414 be Altivec-register-large all the time, rather than trying to
4415 twiddle it based on the specific platform. */
4416 union {
4417 /* SPE vector registers. One extra for SPEFSCR. */
4418 uint32_t spe[33];
4419 /* Altivec vector registers. The packing of VSCR and VRSAVE
4420 varies depending on whether we're PPC64 or not: PPC64 splits
4421 them apart; PPC32 stuffs them together. */
4422 #if defined(TARGET_PPC64)
4423 #define QEMU_NVRREG 34
4424 #else
4425 #define QEMU_NVRREG 33
4426 #endif
4427 ppc_avr_t altivec[QEMU_NVRREG];
4428 #undef QEMU_NVRREG
4429 } mc_vregs __attribute__((__aligned__(16)));
4432 /* See arch/powerpc/include/asm/sigcontext.h. */
4433 struct target_sigcontext {
4434 target_ulong _unused[4];
4435 int32_t signal;
4436 #if defined(TARGET_PPC64)
4437 int32_t pad0;
4438 #endif
4439 target_ulong handler;
4440 target_ulong oldmask;
4441 target_ulong regs; /* struct pt_regs __user * */
4442 #if defined(TARGET_PPC64)
4443 struct target_mcontext mcontext;
4444 #endif
4447 /* Indices for target_mcontext.mc_gregs, below.
4448 See arch/powerpc/include/asm/ptrace.h for details. */
4449 enum {
4450 TARGET_PT_R0 = 0,
4451 TARGET_PT_R1 = 1,
4452 TARGET_PT_R2 = 2,
4453 TARGET_PT_R3 = 3,
4454 TARGET_PT_R4 = 4,
4455 TARGET_PT_R5 = 5,
4456 TARGET_PT_R6 = 6,
4457 TARGET_PT_R7 = 7,
4458 TARGET_PT_R8 = 8,
4459 TARGET_PT_R9 = 9,
4460 TARGET_PT_R10 = 10,
4461 TARGET_PT_R11 = 11,
4462 TARGET_PT_R12 = 12,
4463 TARGET_PT_R13 = 13,
4464 TARGET_PT_R14 = 14,
4465 TARGET_PT_R15 = 15,
4466 TARGET_PT_R16 = 16,
4467 TARGET_PT_R17 = 17,
4468 TARGET_PT_R18 = 18,
4469 TARGET_PT_R19 = 19,
4470 TARGET_PT_R20 = 20,
4471 TARGET_PT_R21 = 21,
4472 TARGET_PT_R22 = 22,
4473 TARGET_PT_R23 = 23,
4474 TARGET_PT_R24 = 24,
4475 TARGET_PT_R25 = 25,
4476 TARGET_PT_R26 = 26,
4477 TARGET_PT_R27 = 27,
4478 TARGET_PT_R28 = 28,
4479 TARGET_PT_R29 = 29,
4480 TARGET_PT_R30 = 30,
4481 TARGET_PT_R31 = 31,
4482 TARGET_PT_NIP = 32,
4483 TARGET_PT_MSR = 33,
4484 TARGET_PT_ORIG_R3 = 34,
4485 TARGET_PT_CTR = 35,
4486 TARGET_PT_LNK = 36,
4487 TARGET_PT_XER = 37,
4488 TARGET_PT_CCR = 38,
4489 /* Yes, there are two registers with #39. One is 64-bit only. */
4490 TARGET_PT_MQ = 39,
4491 TARGET_PT_SOFTE = 39,
4492 TARGET_PT_TRAP = 40,
4493 TARGET_PT_DAR = 41,
4494 TARGET_PT_DSISR = 42,
4495 TARGET_PT_RESULT = 43,
4496 TARGET_PT_REGS_COUNT = 44
4500 struct target_ucontext {
4501 target_ulong tuc_flags;
4502 target_ulong tuc_link; /* struct ucontext __user * */
4503 struct target_sigaltstack tuc_stack;
4504 #if !defined(TARGET_PPC64)
4505 int32_t tuc_pad[7];
4506 target_ulong tuc_regs; /* struct mcontext __user *
4507 points to uc_mcontext field */
4508 #endif
4509 target_sigset_t tuc_sigmask;
4510 #if defined(TARGET_PPC64)
4511 target_sigset_t unused[15]; /* Allow for uc_sigmask growth */
4512 struct target_sigcontext tuc_sigcontext;
4513 #else
4514 int32_t tuc_maskext[30];
4515 int32_t tuc_pad2[3];
4516 struct target_mcontext tuc_mcontext;
4517 #endif
4520 /* See arch/powerpc/kernel/signal_32.c. */
4521 struct target_sigframe {
4522 struct target_sigcontext sctx;
4523 struct target_mcontext mctx;
4524 int32_t abigap[56];
4527 #if defined(TARGET_PPC64)
4529 #define TARGET_TRAMP_SIZE 6
4531 struct target_rt_sigframe {
4532 /* sys_rt_sigreturn requires the ucontext be the first field */
4533 struct target_ucontext uc;
4534 target_ulong _unused[2];
4535 uint32_t trampoline[TARGET_TRAMP_SIZE];
4536 target_ulong pinfo; /* struct siginfo __user * */
4537 target_ulong puc; /* void __user * */
4538 struct target_siginfo info;
4539 /* 64 bit ABI allows for 288 bytes below sp before decrementing it. */
4540 char abigap[288];
4541 } __attribute__((aligned(16)));
4543 #else
4545 struct target_rt_sigframe {
4546 struct target_siginfo info;
4547 struct target_ucontext uc;
4548 int32_t abigap[56];
4551 #endif
4553 #if defined(TARGET_PPC64)
4555 struct target_func_ptr {
4556 target_ulong entry;
4557 target_ulong toc;
4560 #endif
4562 /* We use the mc_pad field for the signal return trampoline. */
4563 #define tramp mc_pad
4565 /* See arch/powerpc/kernel/signal.c. */
4566 static target_ulong get_sigframe(struct target_sigaction *ka,
4567 CPUPPCState *env,
4568 int frame_size)
4570 target_ulong oldsp;
4572 oldsp = env->gpr[1];
4574 if ((ka->sa_flags & TARGET_SA_ONSTACK) &&
4575 (sas_ss_flags(oldsp) == 0)) {
4576 oldsp = (target_sigaltstack_used.ss_sp
4577 + target_sigaltstack_used.ss_size);
4580 return (oldsp - frame_size) & ~0xFUL;
4583 static void save_user_regs(CPUPPCState *env, struct target_mcontext *frame)
4585 target_ulong msr = env->msr;
4586 int i;
4587 target_ulong ccr = 0;
4589 /* In general, the kernel attempts to be intelligent about what it
4590 needs to save for Altivec/FP/SPE registers. We don't care that
4591 much, so we just go ahead and save everything. */
4593 /* Save general registers. */
4594 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
4595 __put_user(env->gpr[i], &frame->mc_gregs[i]);
4597 __put_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]);
4598 __put_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]);
4599 __put_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]);
4600 __put_user(env->xer, &frame->mc_gregs[TARGET_PT_XER]);
4602 for (i = 0; i < ARRAY_SIZE(env->crf); i++) {
4603 ccr |= env->crf[i] << (32 - ((i + 1) * 4));
4605 __put_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]);
4607 /* Save Altivec registers if necessary. */
4608 if (env->insns_flags & PPC_ALTIVEC) {
4609 for (i = 0; i < ARRAY_SIZE(env->avr); i++) {
4610 ppc_avr_t *avr = &env->avr[i];
4611 ppc_avr_t *vreg = &frame->mc_vregs.altivec[i];
4613 __put_user(avr->u64[0], &vreg->u64[0]);
4614 __put_user(avr->u64[1], &vreg->u64[1]);
4616 /* Set MSR_VR in the saved MSR value to indicate that
4617 frame->mc_vregs contains valid data. */
4618 msr |= MSR_VR;
4619 __put_user((uint32_t)env->spr[SPR_VRSAVE],
4620 &frame->mc_vregs.altivec[32].u32[3]);
4623 /* Save floating point registers. */
4624 if (env->insns_flags & PPC_FLOAT) {
4625 for (i = 0; i < ARRAY_SIZE(env->fpr); i++) {
4626 __put_user(env->fpr[i], &frame->mc_fregs[i]);
4628 __put_user((uint64_t) env->fpscr, &frame->mc_fregs[32]);
4631 /* Save SPE registers. The kernel only saves the high half. */
4632 if (env->insns_flags & PPC_SPE) {
4633 #if defined(TARGET_PPC64)
4634 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
4635 __put_user(env->gpr[i] >> 32, &frame->mc_vregs.spe[i]);
4637 #else
4638 for (i = 0; i < ARRAY_SIZE(env->gprh); i++) {
4639 __put_user(env->gprh[i], &frame->mc_vregs.spe[i]);
4641 #endif
4642 /* Set MSR_SPE in the saved MSR value to indicate that
4643 frame->mc_vregs contains valid data. */
4644 msr |= MSR_SPE;
4645 __put_user(env->spe_fscr, &frame->mc_vregs.spe[32]);
4648 /* Store MSR. */
4649 __put_user(msr, &frame->mc_gregs[TARGET_PT_MSR]);
4652 static void encode_trampoline(int sigret, uint32_t *tramp)
4654 /* Set up the sigreturn trampoline: li r0,sigret; sc. */
4655 if (sigret) {
4656 __put_user(0x38000000 | sigret, &tramp[0]);
4657 __put_user(0x44000002, &tramp[1]);
4661 static void restore_user_regs(CPUPPCState *env,
4662 struct target_mcontext *frame, int sig)
4664 target_ulong save_r2 = 0;
4665 target_ulong msr;
4666 target_ulong ccr;
4668 int i;
4670 if (!sig) {
4671 save_r2 = env->gpr[2];
4674 /* Restore general registers. */
4675 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
4676 __get_user(env->gpr[i], &frame->mc_gregs[i]);
4678 __get_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]);
4679 __get_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]);
4680 __get_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]);
4681 __get_user(env->xer, &frame->mc_gregs[TARGET_PT_XER]);
4682 __get_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]);
4684 for (i = 0; i < ARRAY_SIZE(env->crf); i++) {
4685 env->crf[i] = (ccr >> (32 - ((i + 1) * 4))) & 0xf;
4688 if (!sig) {
4689 env->gpr[2] = save_r2;
4691 /* Restore MSR. */
4692 __get_user(msr, &frame->mc_gregs[TARGET_PT_MSR]);
4694 /* If doing signal return, restore the previous little-endian mode. */
4695 if (sig)
4696 env->msr = (env->msr & ~(1ull << MSR_LE)) | (msr & (1ull << MSR_LE));
4698 /* Restore Altivec registers if necessary. */
4699 if (env->insns_flags & PPC_ALTIVEC) {
4700 for (i = 0; i < ARRAY_SIZE(env->avr); i++) {
4701 ppc_avr_t *avr = &env->avr[i];
4702 ppc_avr_t *vreg = &frame->mc_vregs.altivec[i];
4704 __get_user(avr->u64[0], &vreg->u64[0]);
4705 __get_user(avr->u64[1], &vreg->u64[1]);
4707 /* Set MSR_VEC in the saved MSR value to indicate that
4708 frame->mc_vregs contains valid data. */
4709 __get_user(env->spr[SPR_VRSAVE],
4710 (target_ulong *)(&frame->mc_vregs.altivec[32].u32[3]));
4713 /* Restore floating point registers. */
4714 if (env->insns_flags & PPC_FLOAT) {
4715 uint64_t fpscr;
4716 for (i = 0; i < ARRAY_SIZE(env->fpr); i++) {
4717 __get_user(env->fpr[i], &frame->mc_fregs[i]);
4719 __get_user(fpscr, &frame->mc_fregs[32]);
4720 env->fpscr = (uint32_t) fpscr;
4723 /* Save SPE registers. The kernel only saves the high half. */
4724 if (env->insns_flags & PPC_SPE) {
4725 #if defined(TARGET_PPC64)
4726 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
4727 uint32_t hi;
4729 __get_user(hi, &frame->mc_vregs.spe[i]);
4730 env->gpr[i] = ((uint64_t)hi << 32) | ((uint32_t) env->gpr[i]);
4732 #else
4733 for (i = 0; i < ARRAY_SIZE(env->gprh); i++) {
4734 __get_user(env->gprh[i], &frame->mc_vregs.spe[i]);
4736 #endif
4737 __get_user(env->spe_fscr, &frame->mc_vregs.spe[32]);
4741 static void setup_frame(int sig, struct target_sigaction *ka,
4742 target_sigset_t *set, CPUPPCState *env)
4744 struct target_sigframe *frame;
4745 struct target_sigcontext *sc;
4746 target_ulong frame_addr, newsp;
4747 int err = 0;
4748 #if defined(TARGET_PPC64)
4749 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info;
4750 #endif
4752 frame_addr = get_sigframe(ka, env, sizeof(*frame));
4753 trace_user_setup_frame(env, frame_addr);
4754 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1))
4755 goto sigsegv;
4756 sc = &frame->sctx;
4758 __put_user(ka->_sa_handler, &sc->handler);
4759 __put_user(set->sig[0], &sc->oldmask);
4760 #if TARGET_ABI_BITS == 64
4761 __put_user(set->sig[0] >> 32, &sc->_unused[3]);
4762 #else
4763 __put_user(set->sig[1], &sc->_unused[3]);
4764 #endif
4765 __put_user(h2g(&frame->mctx), &sc->regs);
4766 __put_user(sig, &sc->signal);
4768 /* Save user regs. */
4769 save_user_regs(env, &frame->mctx);
4771 /* Construct the trampoline code on the stack. */
4772 encode_trampoline(TARGET_NR_sigreturn, (uint32_t *)&frame->mctx.tramp);
4774 /* The kernel checks for the presence of a VDSO here. We don't
4775 emulate a vdso, so use a sigreturn system call. */
4776 env->lr = (target_ulong) h2g(frame->mctx.tramp);
4778 /* Turn off all fp exceptions. */
4779 env->fpscr = 0;
4781 /* Create a stack frame for the caller of the handler. */
4782 newsp = frame_addr - SIGNAL_FRAMESIZE;
4783 err |= put_user(env->gpr[1], newsp, target_ulong);
4785 if (err)
4786 goto sigsegv;
4788 /* Set up registers for signal handler. */
4789 env->gpr[1] = newsp;
4790 env->gpr[3] = sig;
4791 env->gpr[4] = frame_addr + offsetof(struct target_sigframe, sctx);
4793 #if defined(TARGET_PPC64)
4794 if (get_ppc64_abi(image) < 2) {
4795 /* ELFv1 PPC64 function pointers are pointers to OPD entries. */
4796 struct target_func_ptr *handler =
4797 (struct target_func_ptr *)g2h(ka->_sa_handler);
4798 env->nip = tswapl(handler->entry);
4799 env->gpr[2] = tswapl(handler->toc);
4800 } else {
4801 /* ELFv2 PPC64 function pointers are entry points, but R12
4802 * must also be set */
4803 env->nip = tswapl((target_ulong) ka->_sa_handler);
4804 env->gpr[12] = env->nip;
4806 #else
4807 env->nip = (target_ulong) ka->_sa_handler;
4808 #endif
4810 /* Signal handlers are entered in big-endian mode. */
4811 env->msr &= ~(1ull << MSR_LE);
4813 unlock_user_struct(frame, frame_addr, 1);
4814 return;
4816 sigsegv:
4817 unlock_user_struct(frame, frame_addr, 1);
4818 force_sig(TARGET_SIGSEGV);
4821 static void setup_rt_frame(int sig, struct target_sigaction *ka,
4822 target_siginfo_t *info,
4823 target_sigset_t *set, CPUPPCState *env)
4825 struct target_rt_sigframe *rt_sf;
4826 uint32_t *trampptr = 0;
4827 struct target_mcontext *mctx = 0;
4828 target_ulong rt_sf_addr, newsp = 0;
4829 int i, err = 0;
4830 #if defined(TARGET_PPC64)
4831 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info;
4832 #endif
4834 rt_sf_addr = get_sigframe(ka, env, sizeof(*rt_sf));
4835 if (!lock_user_struct(VERIFY_WRITE, rt_sf, rt_sf_addr, 1))
4836 goto sigsegv;
4838 tswap_siginfo(&rt_sf->info, info);
4840 __put_user(0, &rt_sf->uc.tuc_flags);
4841 __put_user(0, &rt_sf->uc.tuc_link);
4842 __put_user((target_ulong)target_sigaltstack_used.ss_sp,
4843 &rt_sf->uc.tuc_stack.ss_sp);
4844 __put_user(sas_ss_flags(env->gpr[1]),
4845 &rt_sf->uc.tuc_stack.ss_flags);
4846 __put_user(target_sigaltstack_used.ss_size,
4847 &rt_sf->uc.tuc_stack.ss_size);
4848 #if !defined(TARGET_PPC64)
4849 __put_user(h2g (&rt_sf->uc.tuc_mcontext),
4850 &rt_sf->uc.tuc_regs);
4851 #endif
4852 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
4853 __put_user(set->sig[i], &rt_sf->uc.tuc_sigmask.sig[i]);
4856 #if defined(TARGET_PPC64)
4857 mctx = &rt_sf->uc.tuc_sigcontext.mcontext;
4858 trampptr = &rt_sf->trampoline[0];
4859 #else
4860 mctx = &rt_sf->uc.tuc_mcontext;
4861 trampptr = (uint32_t *)&rt_sf->uc.tuc_mcontext.tramp;
4862 #endif
4864 save_user_regs(env, mctx);
4865 encode_trampoline(TARGET_NR_rt_sigreturn, trampptr);
4867 /* The kernel checks for the presence of a VDSO here. We don't
4868 emulate a vdso, so use a sigreturn system call. */
4869 env->lr = (target_ulong) h2g(trampptr);
4871 /* Turn off all fp exceptions. */
4872 env->fpscr = 0;
4874 /* Create a stack frame for the caller of the handler. */
4875 newsp = rt_sf_addr - (SIGNAL_FRAMESIZE + 16);
4876 err |= put_user(env->gpr[1], newsp, target_ulong);
4878 if (err)
4879 goto sigsegv;
4881 /* Set up registers for signal handler. */
4882 env->gpr[1] = newsp;
4883 env->gpr[3] = (target_ulong) sig;
4884 env->gpr[4] = (target_ulong) h2g(&rt_sf->info);
4885 env->gpr[5] = (target_ulong) h2g(&rt_sf->uc);
4886 env->gpr[6] = (target_ulong) h2g(rt_sf);
4888 #if defined(TARGET_PPC64)
4889 if (get_ppc64_abi(image) < 2) {
4890 /* ELFv1 PPC64 function pointers are pointers to OPD entries. */
4891 struct target_func_ptr *handler =
4892 (struct target_func_ptr *)g2h(ka->_sa_handler);
4893 env->nip = tswapl(handler->entry);
4894 env->gpr[2] = tswapl(handler->toc);
4895 } else {
4896 /* ELFv2 PPC64 function pointers are entry points, but R12
4897 * must also be set */
4898 env->nip = tswapl((target_ulong) ka->_sa_handler);
4899 env->gpr[12] = env->nip;
4901 #else
4902 env->nip = (target_ulong) ka->_sa_handler;
4903 #endif
4905 /* Signal handlers are entered in big-endian mode. */
4906 env->msr &= ~(1ull << MSR_LE);
4908 unlock_user_struct(rt_sf, rt_sf_addr, 1);
4909 return;
4911 sigsegv:
4912 unlock_user_struct(rt_sf, rt_sf_addr, 1);
4913 force_sig(TARGET_SIGSEGV);
4917 long do_sigreturn(CPUPPCState *env)
4919 struct target_sigcontext *sc = NULL;
4920 struct target_mcontext *sr = NULL;
4921 target_ulong sr_addr = 0, sc_addr;
4922 sigset_t blocked;
4923 target_sigset_t set;
4925 sc_addr = env->gpr[1] + SIGNAL_FRAMESIZE;
4926 if (!lock_user_struct(VERIFY_READ, sc, sc_addr, 1))
4927 goto sigsegv;
4929 #if defined(TARGET_PPC64)
4930 set.sig[0] = sc->oldmask + ((uint64_t)(sc->_unused[3]) << 32);
4931 #else
4932 __get_user(set.sig[0], &sc->oldmask);
4933 __get_user(set.sig[1], &sc->_unused[3]);
4934 #endif
4935 target_to_host_sigset_internal(&blocked, &set);
4936 set_sigmask(&blocked);
4938 __get_user(sr_addr, &sc->regs);
4939 if (!lock_user_struct(VERIFY_READ, sr, sr_addr, 1))
4940 goto sigsegv;
4941 restore_user_regs(env, sr, 1);
4943 unlock_user_struct(sr, sr_addr, 1);
4944 unlock_user_struct(sc, sc_addr, 1);
4945 return -TARGET_QEMU_ESIGRETURN;
4947 sigsegv:
4948 unlock_user_struct(sr, sr_addr, 1);
4949 unlock_user_struct(sc, sc_addr, 1);
4950 force_sig(TARGET_SIGSEGV);
4951 return 0;
4954 /* See arch/powerpc/kernel/signal_32.c. */
4955 static int do_setcontext(struct target_ucontext *ucp, CPUPPCState *env, int sig)
4957 struct target_mcontext *mcp;
4958 target_ulong mcp_addr;
4959 sigset_t blocked;
4960 target_sigset_t set;
4962 if (copy_from_user(&set, h2g(ucp) + offsetof(struct target_ucontext, tuc_sigmask),
4963 sizeof (set)))
4964 return 1;
4966 #if defined(TARGET_PPC64)
4967 mcp_addr = h2g(ucp) +
4968 offsetof(struct target_ucontext, tuc_sigcontext.mcontext);
4969 #else
4970 __get_user(mcp_addr, &ucp->tuc_regs);
4971 #endif
4973 if (!lock_user_struct(VERIFY_READ, mcp, mcp_addr, 1))
4974 return 1;
4976 target_to_host_sigset_internal(&blocked, &set);
4977 set_sigmask(&blocked);
4978 restore_user_regs(env, mcp, sig);
4980 unlock_user_struct(mcp, mcp_addr, 1);
4981 return 0;
4984 long do_rt_sigreturn(CPUPPCState *env)
4986 struct target_rt_sigframe *rt_sf = NULL;
4987 target_ulong rt_sf_addr;
4989 rt_sf_addr = env->gpr[1] + SIGNAL_FRAMESIZE + 16;
4990 if (!lock_user_struct(VERIFY_READ, rt_sf, rt_sf_addr, 1))
4991 goto sigsegv;
4993 if (do_setcontext(&rt_sf->uc, env, 1))
4994 goto sigsegv;
4996 do_sigaltstack(rt_sf_addr
4997 + offsetof(struct target_rt_sigframe, uc.tuc_stack),
4998 0, env->gpr[1]);
5000 unlock_user_struct(rt_sf, rt_sf_addr, 1);
5001 return -TARGET_QEMU_ESIGRETURN;
5003 sigsegv:
5004 unlock_user_struct(rt_sf, rt_sf_addr, 1);
5005 force_sig(TARGET_SIGSEGV);
5006 return 0;
5009 #elif defined(TARGET_M68K)
5011 struct target_sigcontext {
5012 abi_ulong sc_mask;
5013 abi_ulong sc_usp;
5014 abi_ulong sc_d0;
5015 abi_ulong sc_d1;
5016 abi_ulong sc_a0;
5017 abi_ulong sc_a1;
5018 unsigned short sc_sr;
5019 abi_ulong sc_pc;
5022 struct target_sigframe
5024 abi_ulong pretcode;
5025 int sig;
5026 int code;
5027 abi_ulong psc;
5028 char retcode[8];
5029 abi_ulong extramask[TARGET_NSIG_WORDS-1];
5030 struct target_sigcontext sc;
5033 typedef int target_greg_t;
5034 #define TARGET_NGREG 18
5035 typedef target_greg_t target_gregset_t[TARGET_NGREG];
5037 typedef struct target_fpregset {
5038 int f_fpcntl[3];
5039 int f_fpregs[8*3];
5040 } target_fpregset_t;
5042 struct target_mcontext {
5043 int version;
5044 target_gregset_t gregs;
5045 target_fpregset_t fpregs;
5048 #define TARGET_MCONTEXT_VERSION 2
5050 struct target_ucontext {
5051 abi_ulong tuc_flags;
5052 abi_ulong tuc_link;
5053 target_stack_t tuc_stack;
5054 struct target_mcontext tuc_mcontext;
5055 abi_long tuc_filler[80];
5056 target_sigset_t tuc_sigmask;
5059 struct target_rt_sigframe
5061 abi_ulong pretcode;
5062 int sig;
5063 abi_ulong pinfo;
5064 abi_ulong puc;
5065 char retcode[8];
5066 struct target_siginfo info;
5067 struct target_ucontext uc;
5070 static void setup_sigcontext(struct target_sigcontext *sc, CPUM68KState *env,
5071 abi_ulong mask)
5073 __put_user(mask, &sc->sc_mask);
5074 __put_user(env->aregs[7], &sc->sc_usp);
5075 __put_user(env->dregs[0], &sc->sc_d0);
5076 __put_user(env->dregs[1], &sc->sc_d1);
5077 __put_user(env->aregs[0], &sc->sc_a0);
5078 __put_user(env->aregs[1], &sc->sc_a1);
5079 __put_user(env->sr, &sc->sc_sr);
5080 __put_user(env->pc, &sc->sc_pc);
5083 static void
5084 restore_sigcontext(CPUM68KState *env, struct target_sigcontext *sc)
5086 int temp;
5088 __get_user(env->aregs[7], &sc->sc_usp);
5089 __get_user(env->dregs[0], &sc->sc_d0);
5090 __get_user(env->dregs[1], &sc->sc_d1);
5091 __get_user(env->aregs[0], &sc->sc_a0);
5092 __get_user(env->aregs[1], &sc->sc_a1);
5093 __get_user(env->pc, &sc->sc_pc);
5094 __get_user(temp, &sc->sc_sr);
5095 env->sr = (env->sr & 0xff00) | (temp & 0xff);
5099 * Determine which stack to use..
5101 static inline abi_ulong
5102 get_sigframe(struct target_sigaction *ka, CPUM68KState *regs,
5103 size_t frame_size)
5105 unsigned long sp;
5107 sp = regs->aregs[7];
5109 /* This is the X/Open sanctioned signal stack switching. */
5110 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags (sp) == 0)) {
5111 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
5114 return ((sp - frame_size) & -8UL);
5117 static void setup_frame(int sig, struct target_sigaction *ka,
5118 target_sigset_t *set, CPUM68KState *env)
5120 struct target_sigframe *frame;
5121 abi_ulong frame_addr;
5122 abi_ulong retcode_addr;
5123 abi_ulong sc_addr;
5124 int i;
5126 frame_addr = get_sigframe(ka, env, sizeof *frame);
5127 trace_user_setup_frame(env, frame_addr);
5128 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
5129 goto give_sigsegv;
5132 __put_user(sig, &frame->sig);
5134 sc_addr = frame_addr + offsetof(struct target_sigframe, sc);
5135 __put_user(sc_addr, &frame->psc);
5137 setup_sigcontext(&frame->sc, env, set->sig[0]);
5139 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
5140 __put_user(set->sig[i], &frame->extramask[i - 1]);
5143 /* Set up to return from userspace. */
5145 retcode_addr = frame_addr + offsetof(struct target_sigframe, retcode);
5146 __put_user(retcode_addr, &frame->pretcode);
5148 /* moveq #,d0; trap #0 */
5150 __put_user(0x70004e40 + (TARGET_NR_sigreturn << 16),
5151 (uint32_t *)(frame->retcode));
5153 /* Set up to return from userspace */
5155 env->aregs[7] = frame_addr;
5156 env->pc = ka->_sa_handler;
5158 unlock_user_struct(frame, frame_addr, 1);
5159 return;
5161 give_sigsegv:
5162 force_sig(TARGET_SIGSEGV);
5165 static inline int target_rt_setup_ucontext(struct target_ucontext *uc,
5166 CPUM68KState *env)
5168 target_greg_t *gregs = uc->tuc_mcontext.gregs;
5170 __put_user(TARGET_MCONTEXT_VERSION, &uc->tuc_mcontext.version);
5171 __put_user(env->dregs[0], &gregs[0]);
5172 __put_user(env->dregs[1], &gregs[1]);
5173 __put_user(env->dregs[2], &gregs[2]);
5174 __put_user(env->dregs[3], &gregs[3]);
5175 __put_user(env->dregs[4], &gregs[4]);
5176 __put_user(env->dregs[5], &gregs[5]);
5177 __put_user(env->dregs[6], &gregs[6]);
5178 __put_user(env->dregs[7], &gregs[7]);
5179 __put_user(env->aregs[0], &gregs[8]);
5180 __put_user(env->aregs[1], &gregs[9]);
5181 __put_user(env->aregs[2], &gregs[10]);
5182 __put_user(env->aregs[3], &gregs[11]);
5183 __put_user(env->aregs[4], &gregs[12]);
5184 __put_user(env->aregs[5], &gregs[13]);
5185 __put_user(env->aregs[6], &gregs[14]);
5186 __put_user(env->aregs[7], &gregs[15]);
5187 __put_user(env->pc, &gregs[16]);
5188 __put_user(env->sr, &gregs[17]);
5190 return 0;
5193 static inline int target_rt_restore_ucontext(CPUM68KState *env,
5194 struct target_ucontext *uc)
5196 int temp;
5197 target_greg_t *gregs = uc->tuc_mcontext.gregs;
5199 __get_user(temp, &uc->tuc_mcontext.version);
5200 if (temp != TARGET_MCONTEXT_VERSION)
5201 goto badframe;
5203 /* restore passed registers */
5204 __get_user(env->dregs[0], &gregs[0]);
5205 __get_user(env->dregs[1], &gregs[1]);
5206 __get_user(env->dregs[2], &gregs[2]);
5207 __get_user(env->dregs[3], &gregs[3]);
5208 __get_user(env->dregs[4], &gregs[4]);
5209 __get_user(env->dregs[5], &gregs[5]);
5210 __get_user(env->dregs[6], &gregs[6]);
5211 __get_user(env->dregs[7], &gregs[7]);
5212 __get_user(env->aregs[0], &gregs[8]);
5213 __get_user(env->aregs[1], &gregs[9]);
5214 __get_user(env->aregs[2], &gregs[10]);
5215 __get_user(env->aregs[3], &gregs[11]);
5216 __get_user(env->aregs[4], &gregs[12]);
5217 __get_user(env->aregs[5], &gregs[13]);
5218 __get_user(env->aregs[6], &gregs[14]);
5219 __get_user(env->aregs[7], &gregs[15]);
5220 __get_user(env->pc, &gregs[16]);
5221 __get_user(temp, &gregs[17]);
5222 env->sr = (env->sr & 0xff00) | (temp & 0xff);
5224 return 0;
5226 badframe:
5227 return 1;
5230 static void setup_rt_frame(int sig, struct target_sigaction *ka,
5231 target_siginfo_t *info,
5232 target_sigset_t *set, CPUM68KState *env)
5234 struct target_rt_sigframe *frame;
5235 abi_ulong frame_addr;
5236 abi_ulong retcode_addr;
5237 abi_ulong info_addr;
5238 abi_ulong uc_addr;
5239 int err = 0;
5240 int i;
5242 frame_addr = get_sigframe(ka, env, sizeof *frame);
5243 trace_user_setup_rt_frame(env, frame_addr);
5244 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
5245 goto give_sigsegv;
5248 __put_user(sig, &frame->sig);
5250 info_addr = frame_addr + offsetof(struct target_rt_sigframe, info);
5251 __put_user(info_addr, &frame->pinfo);
5253 uc_addr = frame_addr + offsetof(struct target_rt_sigframe, uc);
5254 __put_user(uc_addr, &frame->puc);
5256 tswap_siginfo(&frame->info, info);
5258 /* Create the ucontext */
5260 __put_user(0, &frame->uc.tuc_flags);
5261 __put_user(0, &frame->uc.tuc_link);
5262 __put_user(target_sigaltstack_used.ss_sp,
5263 &frame->uc.tuc_stack.ss_sp);
5264 __put_user(sas_ss_flags(env->aregs[7]),
5265 &frame->uc.tuc_stack.ss_flags);
5266 __put_user(target_sigaltstack_used.ss_size,
5267 &frame->uc.tuc_stack.ss_size);
5268 err |= target_rt_setup_ucontext(&frame->uc, env);
5270 if (err)
5271 goto give_sigsegv;
5273 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
5274 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
5277 /* Set up to return from userspace. */
5279 retcode_addr = frame_addr + offsetof(struct target_sigframe, retcode);
5280 __put_user(retcode_addr, &frame->pretcode);
5282 /* moveq #,d0; notb d0; trap #0 */
5284 __put_user(0x70004600 + ((TARGET_NR_rt_sigreturn ^ 0xff) << 16),
5285 (uint32_t *)(frame->retcode + 0));
5286 __put_user(0x4e40, (uint16_t *)(frame->retcode + 4));
5288 if (err)
5289 goto give_sigsegv;
5291 /* Set up to return from userspace */
5293 env->aregs[7] = frame_addr;
5294 env->pc = ka->_sa_handler;
5296 unlock_user_struct(frame, frame_addr, 1);
5297 return;
5299 give_sigsegv:
5300 unlock_user_struct(frame, frame_addr, 1);
5301 force_sig(TARGET_SIGSEGV);
5304 long do_sigreturn(CPUM68KState *env)
5306 struct target_sigframe *frame;
5307 abi_ulong frame_addr = env->aregs[7] - 4;
5308 target_sigset_t target_set;
5309 sigset_t set;
5310 int i;
5312 trace_user_do_sigreturn(env, frame_addr);
5313 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
5314 goto badframe;
5316 /* set blocked signals */
5318 __get_user(target_set.sig[0], &frame->sc.sc_mask);
5320 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
5321 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
5324 target_to_host_sigset_internal(&set, &target_set);
5325 set_sigmask(&set);
5327 /* restore registers */
5329 restore_sigcontext(env, &frame->sc);
5331 unlock_user_struct(frame, frame_addr, 0);
5332 return -TARGET_QEMU_ESIGRETURN;
5334 badframe:
5335 force_sig(TARGET_SIGSEGV);
5336 return 0;
5339 long do_rt_sigreturn(CPUM68KState *env)
5341 struct target_rt_sigframe *frame;
5342 abi_ulong frame_addr = env->aregs[7] - 4;
5343 target_sigset_t target_set;
5344 sigset_t set;
5346 trace_user_do_rt_sigreturn(env, frame_addr);
5347 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
5348 goto badframe;
5350 target_to_host_sigset_internal(&set, &target_set);
5351 set_sigmask(&set);
5353 /* restore registers */
5355 if (target_rt_restore_ucontext(env, &frame->uc))
5356 goto badframe;
5358 if (do_sigaltstack(frame_addr +
5359 offsetof(struct target_rt_sigframe, uc.tuc_stack),
5360 0, get_sp_from_cpustate(env)) == -EFAULT)
5361 goto badframe;
5363 unlock_user_struct(frame, frame_addr, 0);
5364 return -TARGET_QEMU_ESIGRETURN;
5366 badframe:
5367 unlock_user_struct(frame, frame_addr, 0);
5368 force_sig(TARGET_SIGSEGV);
5369 return 0;
5372 #elif defined(TARGET_ALPHA)
5374 struct target_sigcontext {
5375 abi_long sc_onstack;
5376 abi_long sc_mask;
5377 abi_long sc_pc;
5378 abi_long sc_ps;
5379 abi_long sc_regs[32];
5380 abi_long sc_ownedfp;
5381 abi_long sc_fpregs[32];
5382 abi_ulong sc_fpcr;
5383 abi_ulong sc_fp_control;
5384 abi_ulong sc_reserved1;
5385 abi_ulong sc_reserved2;
5386 abi_ulong sc_ssize;
5387 abi_ulong sc_sbase;
5388 abi_ulong sc_traparg_a0;
5389 abi_ulong sc_traparg_a1;
5390 abi_ulong sc_traparg_a2;
5391 abi_ulong sc_fp_trap_pc;
5392 abi_ulong sc_fp_trigger_sum;
5393 abi_ulong sc_fp_trigger_inst;
5396 struct target_ucontext {
5397 abi_ulong tuc_flags;
5398 abi_ulong tuc_link;
5399 abi_ulong tuc_osf_sigmask;
5400 target_stack_t tuc_stack;
5401 struct target_sigcontext tuc_mcontext;
5402 target_sigset_t tuc_sigmask;
5405 struct target_sigframe {
5406 struct target_sigcontext sc;
5407 unsigned int retcode[3];
5410 struct target_rt_sigframe {
5411 target_siginfo_t info;
5412 struct target_ucontext uc;
5413 unsigned int retcode[3];
5416 #define INSN_MOV_R30_R16 0x47fe0410
5417 #define INSN_LDI_R0 0x201f0000
5418 #define INSN_CALLSYS 0x00000083
5420 static void setup_sigcontext(struct target_sigcontext *sc, CPUAlphaState *env,
5421 abi_ulong frame_addr, target_sigset_t *set)
5423 int i;
5425 __put_user(on_sig_stack(frame_addr), &sc->sc_onstack);
5426 __put_user(set->sig[0], &sc->sc_mask);
5427 __put_user(env->pc, &sc->sc_pc);
5428 __put_user(8, &sc->sc_ps);
5430 for (i = 0; i < 31; ++i) {
5431 __put_user(env->ir[i], &sc->sc_regs[i]);
5433 __put_user(0, &sc->sc_regs[31]);
5435 for (i = 0; i < 31; ++i) {
5436 __put_user(env->fir[i], &sc->sc_fpregs[i]);
5438 __put_user(0, &sc->sc_fpregs[31]);
5439 __put_user(cpu_alpha_load_fpcr(env), &sc->sc_fpcr);
5441 __put_user(0, &sc->sc_traparg_a0); /* FIXME */
5442 __put_user(0, &sc->sc_traparg_a1); /* FIXME */
5443 __put_user(0, &sc->sc_traparg_a2); /* FIXME */
5446 static void restore_sigcontext(CPUAlphaState *env,
5447 struct target_sigcontext *sc)
5449 uint64_t fpcr;
5450 int i;
5452 __get_user(env->pc, &sc->sc_pc);
5454 for (i = 0; i < 31; ++i) {
5455 __get_user(env->ir[i], &sc->sc_regs[i]);
5457 for (i = 0; i < 31; ++i) {
5458 __get_user(env->fir[i], &sc->sc_fpregs[i]);
5461 __get_user(fpcr, &sc->sc_fpcr);
5462 cpu_alpha_store_fpcr(env, fpcr);
5465 static inline abi_ulong get_sigframe(struct target_sigaction *sa,
5466 CPUAlphaState *env,
5467 unsigned long framesize)
5469 abi_ulong sp = env->ir[IR_SP];
5471 /* This is the X/Open sanctioned signal stack switching. */
5472 if ((sa->sa_flags & TARGET_SA_ONSTACK) != 0 && !sas_ss_flags(sp)) {
5473 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
5475 return (sp - framesize) & -32;
5478 static void setup_frame(int sig, struct target_sigaction *ka,
5479 target_sigset_t *set, CPUAlphaState *env)
5481 abi_ulong frame_addr, r26;
5482 struct target_sigframe *frame;
5483 int err = 0;
5485 frame_addr = get_sigframe(ka, env, sizeof(*frame));
5486 trace_user_setup_frame(env, frame_addr);
5487 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
5488 goto give_sigsegv;
5491 setup_sigcontext(&frame->sc, env, frame_addr, set);
5493 if (ka->sa_restorer) {
5494 r26 = ka->sa_restorer;
5495 } else {
5496 __put_user(INSN_MOV_R30_R16, &frame->retcode[0]);
5497 __put_user(INSN_LDI_R0 + TARGET_NR_sigreturn,
5498 &frame->retcode[1]);
5499 __put_user(INSN_CALLSYS, &frame->retcode[2]);
5500 /* imb() */
5501 r26 = frame_addr;
5504 unlock_user_struct(frame, frame_addr, 1);
5506 if (err) {
5507 give_sigsegv:
5508 if (sig == TARGET_SIGSEGV) {
5509 ka->_sa_handler = TARGET_SIG_DFL;
5511 force_sig(TARGET_SIGSEGV);
5514 env->ir[IR_RA] = r26;
5515 env->ir[IR_PV] = env->pc = ka->_sa_handler;
5516 env->ir[IR_A0] = sig;
5517 env->ir[IR_A1] = 0;
5518 env->ir[IR_A2] = frame_addr + offsetof(struct target_sigframe, sc);
5519 env->ir[IR_SP] = frame_addr;
5522 static void setup_rt_frame(int sig, struct target_sigaction *ka,
5523 target_siginfo_t *info,
5524 target_sigset_t *set, CPUAlphaState *env)
5526 abi_ulong frame_addr, r26;
5527 struct target_rt_sigframe *frame;
5528 int i, err = 0;
5530 frame_addr = get_sigframe(ka, env, sizeof(*frame));
5531 trace_user_setup_rt_frame(env, frame_addr);
5532 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
5533 goto give_sigsegv;
5536 tswap_siginfo(&frame->info, info);
5538 __put_user(0, &frame->uc.tuc_flags);
5539 __put_user(0, &frame->uc.tuc_link);
5540 __put_user(set->sig[0], &frame->uc.tuc_osf_sigmask);
5541 __put_user(target_sigaltstack_used.ss_sp,
5542 &frame->uc.tuc_stack.ss_sp);
5543 __put_user(sas_ss_flags(env->ir[IR_SP]),
5544 &frame->uc.tuc_stack.ss_flags);
5545 __put_user(target_sigaltstack_used.ss_size,
5546 &frame->uc.tuc_stack.ss_size);
5547 setup_sigcontext(&frame->uc.tuc_mcontext, env, frame_addr, set);
5548 for (i = 0; i < TARGET_NSIG_WORDS; ++i) {
5549 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
5552 if (ka->sa_restorer) {
5553 r26 = ka->sa_restorer;
5554 } else {
5555 __put_user(INSN_MOV_R30_R16, &frame->retcode[0]);
5556 __put_user(INSN_LDI_R0 + TARGET_NR_rt_sigreturn,
5557 &frame->retcode[1]);
5558 __put_user(INSN_CALLSYS, &frame->retcode[2]);
5559 /* imb(); */
5560 r26 = frame_addr;
5563 if (err) {
5564 give_sigsegv:
5565 if (sig == TARGET_SIGSEGV) {
5566 ka->_sa_handler = TARGET_SIG_DFL;
5568 force_sig(TARGET_SIGSEGV);
5571 env->ir[IR_RA] = r26;
5572 env->ir[IR_PV] = env->pc = ka->_sa_handler;
5573 env->ir[IR_A0] = sig;
5574 env->ir[IR_A1] = frame_addr + offsetof(struct target_rt_sigframe, info);
5575 env->ir[IR_A2] = frame_addr + offsetof(struct target_rt_sigframe, uc);
5576 env->ir[IR_SP] = frame_addr;
5579 long do_sigreturn(CPUAlphaState *env)
5581 struct target_sigcontext *sc;
5582 abi_ulong sc_addr = env->ir[IR_A0];
5583 target_sigset_t target_set;
5584 sigset_t set;
5586 if (!lock_user_struct(VERIFY_READ, sc, sc_addr, 1)) {
5587 goto badframe;
5590 target_sigemptyset(&target_set);
5591 __get_user(target_set.sig[0], &sc->sc_mask);
5593 target_to_host_sigset_internal(&set, &target_set);
5594 set_sigmask(&set);
5596 restore_sigcontext(env, sc);
5597 unlock_user_struct(sc, sc_addr, 0);
5598 return -TARGET_QEMU_ESIGRETURN;
5600 badframe:
5601 force_sig(TARGET_SIGSEGV);
5604 long do_rt_sigreturn(CPUAlphaState *env)
5606 abi_ulong frame_addr = env->ir[IR_A0];
5607 struct target_rt_sigframe *frame;
5608 sigset_t set;
5610 trace_user_do_rt_sigreturn(env, frame_addr);
5611 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
5612 goto badframe;
5614 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
5615 set_sigmask(&set);
5617 restore_sigcontext(env, &frame->uc.tuc_mcontext);
5618 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe,
5619 uc.tuc_stack),
5620 0, env->ir[IR_SP]) == -EFAULT) {
5621 goto badframe;
5624 unlock_user_struct(frame, frame_addr, 0);
5625 return -TARGET_QEMU_ESIGRETURN;
5628 badframe:
5629 unlock_user_struct(frame, frame_addr, 0);
5630 force_sig(TARGET_SIGSEGV);
5633 #elif defined(TARGET_TILEGX)
5635 struct target_sigcontext {
5636 union {
5637 /* General-purpose registers. */
5638 abi_ulong gregs[56];
5639 struct {
5640 abi_ulong __gregs[53];
5641 abi_ulong tp; /* Aliases gregs[TREG_TP]. */
5642 abi_ulong sp; /* Aliases gregs[TREG_SP]. */
5643 abi_ulong lr; /* Aliases gregs[TREG_LR]. */
5646 abi_ulong pc; /* Program counter. */
5647 abi_ulong ics; /* In Interrupt Critical Section? */
5648 abi_ulong faultnum; /* Fault number. */
5649 abi_ulong pad[5];
5652 struct target_ucontext {
5653 abi_ulong tuc_flags;
5654 abi_ulong tuc_link;
5655 target_stack_t tuc_stack;
5656 struct target_sigcontext tuc_mcontext;
5657 target_sigset_t tuc_sigmask; /* mask last for extensibility */
5660 struct target_rt_sigframe {
5661 unsigned char save_area[16]; /* caller save area */
5662 struct target_siginfo info;
5663 struct target_ucontext uc;
5664 abi_ulong retcode[2];
5667 #define INSN_MOVELI_R10_139 0x00045fe551483000ULL /* { moveli r10, 139 } */
5668 #define INSN_SWINT1 0x286b180051485000ULL /* { swint1 } */
5671 static void setup_sigcontext(struct target_sigcontext *sc,
5672 CPUArchState *env, int signo)
5674 int i;
5676 for (i = 0; i < TILEGX_R_COUNT; ++i) {
5677 __put_user(env->regs[i], &sc->gregs[i]);
5680 __put_user(env->pc, &sc->pc);
5681 __put_user(0, &sc->ics);
5682 __put_user(signo, &sc->faultnum);
5685 static void restore_sigcontext(CPUTLGState *env, struct target_sigcontext *sc)
5687 int i;
5689 for (i = 0; i < TILEGX_R_COUNT; ++i) {
5690 __get_user(env->regs[i], &sc->gregs[i]);
5693 __get_user(env->pc, &sc->pc);
5696 static abi_ulong get_sigframe(struct target_sigaction *ka, CPUArchState *env,
5697 size_t frame_size)
5699 unsigned long sp = env->regs[TILEGX_R_SP];
5701 if (on_sig_stack(sp) && !likely(on_sig_stack(sp - frame_size))) {
5702 return -1UL;
5705 if ((ka->sa_flags & SA_ONSTACK) && !sas_ss_flags(sp)) {
5706 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
5709 sp -= frame_size;
5710 sp &= -16UL;
5711 return sp;
5714 static void setup_rt_frame(int sig, struct target_sigaction *ka,
5715 target_siginfo_t *info,
5716 target_sigset_t *set, CPUArchState *env)
5718 abi_ulong frame_addr;
5719 struct target_rt_sigframe *frame;
5720 unsigned long restorer;
5722 frame_addr = get_sigframe(ka, env, sizeof(*frame));
5723 trace_user_setup_rt_frame(env, frame_addr);
5724 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
5725 goto give_sigsegv;
5728 /* Always write at least the signal number for the stack backtracer. */
5729 if (ka->sa_flags & TARGET_SA_SIGINFO) {
5730 /* At sigreturn time, restore the callee-save registers too. */
5731 tswap_siginfo(&frame->info, info);
5732 /* regs->flags |= PT_FLAGS_RESTORE_REGS; FIXME: we can skip it? */
5733 } else {
5734 __put_user(info->si_signo, &frame->info.si_signo);
5737 /* Create the ucontext. */
5738 __put_user(0, &frame->uc.tuc_flags);
5739 __put_user(0, &frame->uc.tuc_link);
5740 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
5741 __put_user(sas_ss_flags(env->regs[TILEGX_R_SP]),
5742 &frame->uc.tuc_stack.ss_flags);
5743 __put_user(target_sigaltstack_used.ss_size, &frame->uc.tuc_stack.ss_size);
5744 setup_sigcontext(&frame->uc.tuc_mcontext, env, info->si_signo);
5746 if (ka->sa_flags & TARGET_SA_RESTORER) {
5747 restorer = (unsigned long) ka->sa_restorer;
5748 } else {
5749 __put_user(INSN_MOVELI_R10_139, &frame->retcode[0]);
5750 __put_user(INSN_SWINT1, &frame->retcode[1]);
5751 restorer = frame_addr + offsetof(struct target_rt_sigframe, retcode);
5753 env->pc = (unsigned long) ka->_sa_handler;
5754 env->regs[TILEGX_R_SP] = (unsigned long) frame;
5755 env->regs[TILEGX_R_LR] = restorer;
5756 env->regs[0] = (unsigned long) sig;
5757 env->regs[1] = (unsigned long) &frame->info;
5758 env->regs[2] = (unsigned long) &frame->uc;
5759 /* regs->flags |= PT_FLAGS_CALLER_SAVES; FIXME: we can skip it? */
5761 unlock_user_struct(frame, frame_addr, 1);
5762 return;
5764 give_sigsegv:
5765 if (sig == TARGET_SIGSEGV) {
5766 ka->_sa_handler = TARGET_SIG_DFL;
5768 force_sig(TARGET_SIGSEGV /* , current */);
5771 long do_rt_sigreturn(CPUTLGState *env)
5773 abi_ulong frame_addr = env->regs[TILEGX_R_SP];
5774 struct target_rt_sigframe *frame;
5775 sigset_t set;
5777 trace_user_do_rt_sigreturn(env, frame_addr);
5778 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
5779 goto badframe;
5781 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
5782 set_sigmask(&set);
5784 restore_sigcontext(env, &frame->uc.tuc_mcontext);
5785 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe,
5786 uc.tuc_stack),
5787 0, env->regs[TILEGX_R_SP]) == -EFAULT) {
5788 goto badframe;
5791 unlock_user_struct(frame, frame_addr, 0);
5792 return -TARGET_QEMU_ESIGRETURN;
5795 badframe:
5796 unlock_user_struct(frame, frame_addr, 0);
5797 force_sig(TARGET_SIGSEGV);
5800 #else
5802 static void setup_frame(int sig, struct target_sigaction *ka,
5803 target_sigset_t *set, CPUArchState *env)
5805 fprintf(stderr, "setup_frame: not implemented\n");
5808 static void setup_rt_frame(int sig, struct target_sigaction *ka,
5809 target_siginfo_t *info,
5810 target_sigset_t *set, CPUArchState *env)
5812 fprintf(stderr, "setup_rt_frame: not implemented\n");
5815 long do_sigreturn(CPUArchState *env)
5817 fprintf(stderr, "do_sigreturn: not implemented\n");
5818 return -TARGET_ENOSYS;
5821 long do_rt_sigreturn(CPUArchState *env)
5823 fprintf(stderr, "do_rt_sigreturn: not implemented\n");
5824 return -TARGET_ENOSYS;
5827 #endif
5829 static void handle_pending_signal(CPUArchState *cpu_env, int sig)
5831 CPUState *cpu = ENV_GET_CPU(cpu_env);
5832 abi_ulong handler;
5833 sigset_t set;
5834 target_sigset_t target_old_set;
5835 struct target_sigaction *sa;
5836 TaskState *ts = cpu->opaque;
5837 struct emulated_sigtable *k = &ts->sigtab[sig - 1];
5839 trace_user_handle_signal(cpu_env, sig);
5840 /* dequeue signal */
5841 k->pending = 0;
5843 sig = gdb_handlesig(cpu, sig);
5844 if (!sig) {
5845 sa = NULL;
5846 handler = TARGET_SIG_IGN;
5847 } else {
5848 sa = &sigact_table[sig - 1];
5849 handler = sa->_sa_handler;
5852 if (handler == TARGET_SIG_DFL) {
5853 /* default handler : ignore some signal. The other are job control or fatal */
5854 if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) {
5855 kill(getpid(),SIGSTOP);
5856 } else if (sig != TARGET_SIGCHLD &&
5857 sig != TARGET_SIGURG &&
5858 sig != TARGET_SIGWINCH &&
5859 sig != TARGET_SIGCONT) {
5860 force_sig(sig);
5862 } else if (handler == TARGET_SIG_IGN) {
5863 /* ignore sig */
5864 } else if (handler == TARGET_SIG_ERR) {
5865 force_sig(sig);
5866 } else {
5867 /* compute the blocked signals during the handler execution */
5868 sigset_t *blocked_set;
5870 target_to_host_sigset(&set, &sa->sa_mask);
5871 /* SA_NODEFER indicates that the current signal should not be
5872 blocked during the handler */
5873 if (!(sa->sa_flags & TARGET_SA_NODEFER))
5874 sigaddset(&set, target_to_host_signal(sig));
5876 /* save the previous blocked signal state to restore it at the
5877 end of the signal execution (see do_sigreturn) */
5878 host_to_target_sigset_internal(&target_old_set, &ts->signal_mask);
5880 /* block signals in the handler */
5881 blocked_set = ts->in_sigsuspend ?
5882 &ts->sigsuspend_mask : &ts->signal_mask;
5883 sigorset(&ts->signal_mask, blocked_set, &set);
5884 ts->in_sigsuspend = 0;
5886 /* if the CPU is in VM86 mode, we restore the 32 bit values */
5887 #if defined(TARGET_I386) && !defined(TARGET_X86_64)
5889 CPUX86State *env = cpu_env;
5890 if (env->eflags & VM_MASK)
5891 save_v86_state(env);
5893 #endif
5894 /* prepare the stack frame of the virtual CPU */
5895 #if defined(TARGET_ABI_MIPSN32) || defined(TARGET_ABI_MIPSN64) \
5896 || defined(TARGET_OPENRISC) || defined(TARGET_TILEGX)
5897 /* These targets do not have traditional signals. */
5898 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
5899 #else
5900 if (sa->sa_flags & TARGET_SA_SIGINFO)
5901 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
5902 else
5903 setup_frame(sig, sa, &target_old_set, cpu_env);
5904 #endif
5905 if (sa->sa_flags & TARGET_SA_RESETHAND) {
5906 sa->_sa_handler = TARGET_SIG_DFL;
5911 void process_pending_signals(CPUArchState *cpu_env)
5913 CPUState *cpu = ENV_GET_CPU(cpu_env);
5914 int sig;
5915 TaskState *ts = cpu->opaque;
5916 sigset_t set;
5917 sigset_t *blocked_set;
5919 while (atomic_read(&ts->signal_pending)) {
5920 /* FIXME: This is not threadsafe. */
5921 sigfillset(&set);
5922 sigprocmask(SIG_SETMASK, &set, 0);
5924 sig = ts->sync_signal.pending;
5925 if (sig) {
5926 /* Synchronous signals are forced,
5927 * see force_sig_info() and callers in Linux
5928 * Note that not all of our queue_signal() calls in QEMU correspond
5929 * to force_sig_info() calls in Linux (some are send_sig_info()).
5930 * However it seems like a kernel bug to me to allow the process
5931 * to block a synchronous signal since it could then just end up
5932 * looping round and round indefinitely.
5934 if (sigismember(&ts->signal_mask, target_to_host_signal_table[sig])
5935 || sigact_table[sig - 1]._sa_handler == TARGET_SIG_IGN) {
5936 sigdelset(&ts->signal_mask, target_to_host_signal_table[sig]);
5937 sigact_table[sig - 1]._sa_handler = TARGET_SIG_DFL;
5940 handle_pending_signal(cpu_env, sig);
5943 for (sig = 1; sig <= TARGET_NSIG; sig++) {
5944 blocked_set = ts->in_sigsuspend ?
5945 &ts->sigsuspend_mask : &ts->signal_mask;
5947 if (ts->sigtab[sig - 1].pending &&
5948 (!sigismember(blocked_set,
5949 target_to_host_signal_table[sig]))) {
5950 handle_pending_signal(cpu_env, sig);
5951 /* Restart scan from the beginning */
5952 sig = 1;
5956 /* if no signal is pending, unblock signals and recheck (the act
5957 * of unblocking might cause us to take another host signal which
5958 * will set signal_pending again).
5960 atomic_set(&ts->signal_pending, 0);
5961 ts->in_sigsuspend = 0;
5962 set = ts->signal_mask;
5963 sigdelset(&set, SIGSEGV);
5964 sigdelset(&set, SIGBUS);
5965 sigprocmask(SIG_SETMASK, &set, 0);
5967 ts->in_sigsuspend = 0;