Merge remote-tracking branch 'remotes/dgibson/tags/ppc-for-2.12-20180212' into staging
[qemu.git] / linux-user / signal.c
blob9a380b9e319bcbde934ed1f301e5850215aba7ca
1 /*
2 * Emulation of Linux signals
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qemu/bitops.h"
21 #include <sys/ucontext.h>
22 #include <sys/resource.h>
24 #include "qemu.h"
25 #include "qemu-common.h"
26 #include "target_signal.h"
27 #include "trace.h"
29 static struct target_sigaltstack target_sigaltstack_used = {
30 .ss_sp = 0,
31 .ss_size = 0,
32 .ss_flags = TARGET_SS_DISABLE,
35 static struct target_sigaction sigact_table[TARGET_NSIG];
37 static void host_signal_handler(int host_signum, siginfo_t *info,
38 void *puc);
40 static uint8_t host_to_target_signal_table[_NSIG] = {
41 [SIGHUP] = TARGET_SIGHUP,
42 [SIGINT] = TARGET_SIGINT,
43 [SIGQUIT] = TARGET_SIGQUIT,
44 [SIGILL] = TARGET_SIGILL,
45 [SIGTRAP] = TARGET_SIGTRAP,
46 [SIGABRT] = TARGET_SIGABRT,
47 /* [SIGIOT] = TARGET_SIGIOT,*/
48 [SIGBUS] = TARGET_SIGBUS,
49 [SIGFPE] = TARGET_SIGFPE,
50 [SIGKILL] = TARGET_SIGKILL,
51 [SIGUSR1] = TARGET_SIGUSR1,
52 [SIGSEGV] = TARGET_SIGSEGV,
53 [SIGUSR2] = TARGET_SIGUSR2,
54 [SIGPIPE] = TARGET_SIGPIPE,
55 [SIGALRM] = TARGET_SIGALRM,
56 [SIGTERM] = TARGET_SIGTERM,
57 #ifdef SIGSTKFLT
58 [SIGSTKFLT] = TARGET_SIGSTKFLT,
59 #endif
60 [SIGCHLD] = TARGET_SIGCHLD,
61 [SIGCONT] = TARGET_SIGCONT,
62 [SIGSTOP] = TARGET_SIGSTOP,
63 [SIGTSTP] = TARGET_SIGTSTP,
64 [SIGTTIN] = TARGET_SIGTTIN,
65 [SIGTTOU] = TARGET_SIGTTOU,
66 [SIGURG] = TARGET_SIGURG,
67 [SIGXCPU] = TARGET_SIGXCPU,
68 [SIGXFSZ] = TARGET_SIGXFSZ,
69 [SIGVTALRM] = TARGET_SIGVTALRM,
70 [SIGPROF] = TARGET_SIGPROF,
71 [SIGWINCH] = TARGET_SIGWINCH,
72 [SIGIO] = TARGET_SIGIO,
73 [SIGPWR] = TARGET_SIGPWR,
74 [SIGSYS] = TARGET_SIGSYS,
75 /* next signals stay the same */
76 /* Nasty hack: Reverse SIGRTMIN and SIGRTMAX to avoid overlap with
77 host libpthread signals. This assumes no one actually uses SIGRTMAX :-/
78 To fix this properly we need to do manual signal delivery multiplexed
79 over a single host signal. */
80 [__SIGRTMIN] = __SIGRTMAX,
81 [__SIGRTMAX] = __SIGRTMIN,
83 static uint8_t target_to_host_signal_table[_NSIG];
85 static inline int on_sig_stack(unsigned long sp)
87 return (sp - target_sigaltstack_used.ss_sp
88 < target_sigaltstack_used.ss_size);
91 static inline int sas_ss_flags(unsigned long sp)
93 return (target_sigaltstack_used.ss_size == 0 ? SS_DISABLE
94 : on_sig_stack(sp) ? SS_ONSTACK : 0);
97 int host_to_target_signal(int sig)
99 if (sig < 0 || sig >= _NSIG)
100 return sig;
101 return host_to_target_signal_table[sig];
104 int target_to_host_signal(int sig)
106 if (sig < 0 || sig >= _NSIG)
107 return sig;
108 return target_to_host_signal_table[sig];
111 static inline void target_sigemptyset(target_sigset_t *set)
113 memset(set, 0, sizeof(*set));
116 static inline void target_sigaddset(target_sigset_t *set, int signum)
118 signum--;
119 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
120 set->sig[signum / TARGET_NSIG_BPW] |= mask;
123 static inline int target_sigismember(const target_sigset_t *set, int signum)
125 signum--;
126 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
127 return ((set->sig[signum / TARGET_NSIG_BPW] & mask) != 0);
130 static void host_to_target_sigset_internal(target_sigset_t *d,
131 const sigset_t *s)
133 int i;
134 target_sigemptyset(d);
135 for (i = 1; i <= TARGET_NSIG; i++) {
136 if (sigismember(s, i)) {
137 target_sigaddset(d, host_to_target_signal(i));
142 void host_to_target_sigset(target_sigset_t *d, const sigset_t *s)
144 target_sigset_t d1;
145 int i;
147 host_to_target_sigset_internal(&d1, s);
148 for(i = 0;i < TARGET_NSIG_WORDS; i++)
149 d->sig[i] = tswapal(d1.sig[i]);
152 static void target_to_host_sigset_internal(sigset_t *d,
153 const target_sigset_t *s)
155 int i;
156 sigemptyset(d);
157 for (i = 1; i <= TARGET_NSIG; i++) {
158 if (target_sigismember(s, i)) {
159 sigaddset(d, target_to_host_signal(i));
164 void target_to_host_sigset(sigset_t *d, const target_sigset_t *s)
166 target_sigset_t s1;
167 int i;
169 for(i = 0;i < TARGET_NSIG_WORDS; i++)
170 s1.sig[i] = tswapal(s->sig[i]);
171 target_to_host_sigset_internal(d, &s1);
174 void host_to_target_old_sigset(abi_ulong *old_sigset,
175 const sigset_t *sigset)
177 target_sigset_t d;
178 host_to_target_sigset(&d, sigset);
179 *old_sigset = d.sig[0];
182 void target_to_host_old_sigset(sigset_t *sigset,
183 const abi_ulong *old_sigset)
185 target_sigset_t d;
186 int i;
188 d.sig[0] = *old_sigset;
189 for(i = 1;i < TARGET_NSIG_WORDS; i++)
190 d.sig[i] = 0;
191 target_to_host_sigset(sigset, &d);
194 int block_signals(void)
196 TaskState *ts = (TaskState *)thread_cpu->opaque;
197 sigset_t set;
199 /* It's OK to block everything including SIGSEGV, because we won't
200 * run any further guest code before unblocking signals in
201 * process_pending_signals().
203 sigfillset(&set);
204 sigprocmask(SIG_SETMASK, &set, 0);
206 return atomic_xchg(&ts->signal_pending, 1);
209 /* Wrapper for sigprocmask function
210 * Emulates a sigprocmask in a safe way for the guest. Note that set and oldset
211 * are host signal set, not guest ones. Returns -TARGET_ERESTARTSYS if
212 * a signal was already pending and the syscall must be restarted, or
213 * 0 on success.
214 * If set is NULL, this is guaranteed not to fail.
216 int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset)
218 TaskState *ts = (TaskState *)thread_cpu->opaque;
220 if (oldset) {
221 *oldset = ts->signal_mask;
224 if (set) {
225 int i;
227 if (block_signals()) {
228 return -TARGET_ERESTARTSYS;
231 switch (how) {
232 case SIG_BLOCK:
233 sigorset(&ts->signal_mask, &ts->signal_mask, set);
234 break;
235 case SIG_UNBLOCK:
236 for (i = 1; i <= NSIG; ++i) {
237 if (sigismember(set, i)) {
238 sigdelset(&ts->signal_mask, i);
241 break;
242 case SIG_SETMASK:
243 ts->signal_mask = *set;
244 break;
245 default:
246 g_assert_not_reached();
249 /* Silently ignore attempts to change blocking status of KILL or STOP */
250 sigdelset(&ts->signal_mask, SIGKILL);
251 sigdelset(&ts->signal_mask, SIGSTOP);
253 return 0;
256 #if !defined(TARGET_OPENRISC) && !defined(TARGET_UNICORE32) && \
257 !defined(TARGET_NIOS2)
258 /* Just set the guest's signal mask to the specified value; the
259 * caller is assumed to have called block_signals() already.
261 static void set_sigmask(const sigset_t *set)
263 TaskState *ts = (TaskState *)thread_cpu->opaque;
265 ts->signal_mask = *set;
267 #endif
269 /* siginfo conversion */
271 static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo,
272 const siginfo_t *info)
274 int sig = host_to_target_signal(info->si_signo);
275 int si_code = info->si_code;
276 int si_type;
277 tinfo->si_signo = sig;
278 tinfo->si_errno = 0;
279 tinfo->si_code = info->si_code;
281 /* This memset serves two purposes:
282 * (1) ensure we don't leak random junk to the guest later
283 * (2) placate false positives from gcc about fields
284 * being used uninitialized if it chooses to inline both this
285 * function and tswap_siginfo() into host_to_target_siginfo().
287 memset(tinfo->_sifields._pad, 0, sizeof(tinfo->_sifields._pad));
289 /* This is awkward, because we have to use a combination of
290 * the si_code and si_signo to figure out which of the union's
291 * members are valid. (Within the host kernel it is always possible
292 * to tell, but the kernel carefully avoids giving userspace the
293 * high 16 bits of si_code, so we don't have the information to
294 * do this the easy way...) We therefore make our best guess,
295 * bearing in mind that a guest can spoof most of the si_codes
296 * via rt_sigqueueinfo() if it likes.
298 * Once we have made our guess, we record it in the top 16 bits of
299 * the si_code, so that tswap_siginfo() later can use it.
300 * tswap_siginfo() will strip these top bits out before writing
301 * si_code to the guest (sign-extending the lower bits).
304 switch (si_code) {
305 case SI_USER:
306 case SI_TKILL:
307 case SI_KERNEL:
308 /* Sent via kill(), tkill() or tgkill(), or direct from the kernel.
309 * These are the only unspoofable si_code values.
311 tinfo->_sifields._kill._pid = info->si_pid;
312 tinfo->_sifields._kill._uid = info->si_uid;
313 si_type = QEMU_SI_KILL;
314 break;
315 default:
316 /* Everything else is spoofable. Make best guess based on signal */
317 switch (sig) {
318 case TARGET_SIGCHLD:
319 tinfo->_sifields._sigchld._pid = info->si_pid;
320 tinfo->_sifields._sigchld._uid = info->si_uid;
321 tinfo->_sifields._sigchld._status
322 = host_to_target_waitstatus(info->si_status);
323 tinfo->_sifields._sigchld._utime = info->si_utime;
324 tinfo->_sifields._sigchld._stime = info->si_stime;
325 si_type = QEMU_SI_CHLD;
326 break;
327 case TARGET_SIGIO:
328 tinfo->_sifields._sigpoll._band = info->si_band;
329 tinfo->_sifields._sigpoll._fd = info->si_fd;
330 si_type = QEMU_SI_POLL;
331 break;
332 default:
333 /* Assume a sigqueue()/mq_notify()/rt_sigqueueinfo() source. */
334 tinfo->_sifields._rt._pid = info->si_pid;
335 tinfo->_sifields._rt._uid = info->si_uid;
336 /* XXX: potential problem if 64 bit */
337 tinfo->_sifields._rt._sigval.sival_ptr
338 = (abi_ulong)(unsigned long)info->si_value.sival_ptr;
339 si_type = QEMU_SI_RT;
340 break;
342 break;
345 tinfo->si_code = deposit32(si_code, 16, 16, si_type);
348 static void tswap_siginfo(target_siginfo_t *tinfo,
349 const target_siginfo_t *info)
351 int si_type = extract32(info->si_code, 16, 16);
352 int si_code = sextract32(info->si_code, 0, 16);
354 __put_user(info->si_signo, &tinfo->si_signo);
355 __put_user(info->si_errno, &tinfo->si_errno);
356 __put_user(si_code, &tinfo->si_code);
358 /* We can use our internal marker of which fields in the structure
359 * are valid, rather than duplicating the guesswork of
360 * host_to_target_siginfo_noswap() here.
362 switch (si_type) {
363 case QEMU_SI_KILL:
364 __put_user(info->_sifields._kill._pid, &tinfo->_sifields._kill._pid);
365 __put_user(info->_sifields._kill._uid, &tinfo->_sifields._kill._uid);
366 break;
367 case QEMU_SI_TIMER:
368 __put_user(info->_sifields._timer._timer1,
369 &tinfo->_sifields._timer._timer1);
370 __put_user(info->_sifields._timer._timer2,
371 &tinfo->_sifields._timer._timer2);
372 break;
373 case QEMU_SI_POLL:
374 __put_user(info->_sifields._sigpoll._band,
375 &tinfo->_sifields._sigpoll._band);
376 __put_user(info->_sifields._sigpoll._fd,
377 &tinfo->_sifields._sigpoll._fd);
378 break;
379 case QEMU_SI_FAULT:
380 __put_user(info->_sifields._sigfault._addr,
381 &tinfo->_sifields._sigfault._addr);
382 break;
383 case QEMU_SI_CHLD:
384 __put_user(info->_sifields._sigchld._pid,
385 &tinfo->_sifields._sigchld._pid);
386 __put_user(info->_sifields._sigchld._uid,
387 &tinfo->_sifields._sigchld._uid);
388 __put_user(info->_sifields._sigchld._status,
389 &tinfo->_sifields._sigchld._status);
390 __put_user(info->_sifields._sigchld._utime,
391 &tinfo->_sifields._sigchld._utime);
392 __put_user(info->_sifields._sigchld._stime,
393 &tinfo->_sifields._sigchld._stime);
394 break;
395 case QEMU_SI_RT:
396 __put_user(info->_sifields._rt._pid, &tinfo->_sifields._rt._pid);
397 __put_user(info->_sifields._rt._uid, &tinfo->_sifields._rt._uid);
398 __put_user(info->_sifields._rt._sigval.sival_ptr,
399 &tinfo->_sifields._rt._sigval.sival_ptr);
400 break;
401 default:
402 g_assert_not_reached();
406 void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info)
408 target_siginfo_t tgt_tmp;
409 host_to_target_siginfo_noswap(&tgt_tmp, info);
410 tswap_siginfo(tinfo, &tgt_tmp);
413 /* XXX: we support only POSIX RT signals are used. */
414 /* XXX: find a solution for 64 bit (additional malloced data is needed) */
415 void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo)
417 /* This conversion is used only for the rt_sigqueueinfo syscall,
418 * and so we know that the _rt fields are the valid ones.
420 abi_ulong sival_ptr;
422 __get_user(info->si_signo, &tinfo->si_signo);
423 __get_user(info->si_errno, &tinfo->si_errno);
424 __get_user(info->si_code, &tinfo->si_code);
425 __get_user(info->si_pid, &tinfo->_sifields._rt._pid);
426 __get_user(info->si_uid, &tinfo->_sifields._rt._uid);
427 __get_user(sival_ptr, &tinfo->_sifields._rt._sigval.sival_ptr);
428 info->si_value.sival_ptr = (void *)(long)sival_ptr;
431 static int fatal_signal (int sig)
433 switch (sig) {
434 case TARGET_SIGCHLD:
435 case TARGET_SIGURG:
436 case TARGET_SIGWINCH:
437 /* Ignored by default. */
438 return 0;
439 case TARGET_SIGCONT:
440 case TARGET_SIGSTOP:
441 case TARGET_SIGTSTP:
442 case TARGET_SIGTTIN:
443 case TARGET_SIGTTOU:
444 /* Job control signals. */
445 return 0;
446 default:
447 return 1;
451 /* returns 1 if given signal should dump core if not handled */
452 static int core_dump_signal(int sig)
454 switch (sig) {
455 case TARGET_SIGABRT:
456 case TARGET_SIGFPE:
457 case TARGET_SIGILL:
458 case TARGET_SIGQUIT:
459 case TARGET_SIGSEGV:
460 case TARGET_SIGTRAP:
461 case TARGET_SIGBUS:
462 return (1);
463 default:
464 return (0);
468 void signal_init(void)
470 TaskState *ts = (TaskState *)thread_cpu->opaque;
471 struct sigaction act;
472 struct sigaction oact;
473 int i, j;
474 int host_sig;
476 /* generate signal conversion tables */
477 for(i = 1; i < _NSIG; i++) {
478 if (host_to_target_signal_table[i] == 0)
479 host_to_target_signal_table[i] = i;
481 for(i = 1; i < _NSIG; i++) {
482 j = host_to_target_signal_table[i];
483 target_to_host_signal_table[j] = i;
486 /* Set the signal mask from the host mask. */
487 sigprocmask(0, 0, &ts->signal_mask);
489 /* set all host signal handlers. ALL signals are blocked during
490 the handlers to serialize them. */
491 memset(sigact_table, 0, sizeof(sigact_table));
493 sigfillset(&act.sa_mask);
494 act.sa_flags = SA_SIGINFO;
495 act.sa_sigaction = host_signal_handler;
496 for(i = 1; i <= TARGET_NSIG; i++) {
497 host_sig = target_to_host_signal(i);
498 sigaction(host_sig, NULL, &oact);
499 if (oact.sa_sigaction == (void *)SIG_IGN) {
500 sigact_table[i - 1]._sa_handler = TARGET_SIG_IGN;
501 } else if (oact.sa_sigaction == (void *)SIG_DFL) {
502 sigact_table[i - 1]._sa_handler = TARGET_SIG_DFL;
504 /* If there's already a handler installed then something has
505 gone horribly wrong, so don't even try to handle that case. */
506 /* Install some handlers for our own use. We need at least
507 SIGSEGV and SIGBUS, to detect exceptions. We can not just
508 trap all signals because it affects syscall interrupt
509 behavior. But do trap all default-fatal signals. */
510 if (fatal_signal (i))
511 sigaction(host_sig, &act, NULL);
515 #ifndef TARGET_UNICORE32
516 /* Force a synchronously taken signal. The kernel force_sig() function
517 * also forces the signal to "not blocked, not ignored", but for QEMU
518 * that work is done in process_pending_signals().
520 static void force_sig(int sig)
522 CPUState *cpu = thread_cpu;
523 CPUArchState *env = cpu->env_ptr;
524 target_siginfo_t info;
526 info.si_signo = sig;
527 info.si_errno = 0;
528 info.si_code = TARGET_SI_KERNEL;
529 info._sifields._kill._pid = 0;
530 info._sifields._kill._uid = 0;
531 queue_signal(env, info.si_signo, QEMU_SI_KILL, &info);
534 /* Force a SIGSEGV if we couldn't write to memory trying to set
535 * up the signal frame. oldsig is the signal we were trying to handle
536 * at the point of failure.
538 static void force_sigsegv(int oldsig)
540 if (oldsig == SIGSEGV) {
541 /* Make sure we don't try to deliver the signal again; this will
542 * end up with handle_pending_signal() calling dump_core_and_abort().
544 sigact_table[oldsig - 1]._sa_handler = TARGET_SIG_DFL;
546 force_sig(TARGET_SIGSEGV);
548 #endif
550 /* abort execution with signal */
551 static void QEMU_NORETURN dump_core_and_abort(int target_sig)
553 CPUState *cpu = thread_cpu;
554 CPUArchState *env = cpu->env_ptr;
555 TaskState *ts = (TaskState *)cpu->opaque;
556 int host_sig, core_dumped = 0;
557 struct sigaction act;
559 host_sig = target_to_host_signal(target_sig);
560 trace_user_force_sig(env, target_sig, host_sig);
561 gdb_signalled(env, target_sig);
563 /* dump core if supported by target binary format */
564 if (core_dump_signal(target_sig) && (ts->bprm->core_dump != NULL)) {
565 stop_all_tasks();
566 core_dumped =
567 ((*ts->bprm->core_dump)(target_sig, env) == 0);
569 if (core_dumped) {
570 /* we already dumped the core of target process, we don't want
571 * a coredump of qemu itself */
572 struct rlimit nodump;
573 getrlimit(RLIMIT_CORE, &nodump);
574 nodump.rlim_cur=0;
575 setrlimit(RLIMIT_CORE, &nodump);
576 (void) fprintf(stderr, "qemu: uncaught target signal %d (%s) - %s\n",
577 target_sig, strsignal(host_sig), "core dumped" );
580 /* The proper exit code for dying from an uncaught signal is
581 * -<signal>. The kernel doesn't allow exit() or _exit() to pass
582 * a negative value. To get the proper exit code we need to
583 * actually die from an uncaught signal. Here the default signal
584 * handler is installed, we send ourself a signal and we wait for
585 * it to arrive. */
586 sigfillset(&act.sa_mask);
587 act.sa_handler = SIG_DFL;
588 act.sa_flags = 0;
589 sigaction(host_sig, &act, NULL);
591 /* For some reason raise(host_sig) doesn't send the signal when
592 * statically linked on x86-64. */
593 kill(getpid(), host_sig);
595 /* Make sure the signal isn't masked (just reuse the mask inside
596 of act) */
597 sigdelset(&act.sa_mask, host_sig);
598 sigsuspend(&act.sa_mask);
600 /* unreachable */
601 abort();
604 /* queue a signal so that it will be send to the virtual CPU as soon
605 as possible */
606 int queue_signal(CPUArchState *env, int sig, int si_type,
607 target_siginfo_t *info)
609 CPUState *cpu = ENV_GET_CPU(env);
610 TaskState *ts = cpu->opaque;
612 trace_user_queue_signal(env, sig);
614 info->si_code = deposit32(info->si_code, 16, 16, si_type);
616 ts->sync_signal.info = *info;
617 ts->sync_signal.pending = sig;
618 /* signal that a new signal is pending */
619 atomic_set(&ts->signal_pending, 1);
620 return 1; /* indicates that the signal was queued */
623 #ifndef HAVE_SAFE_SYSCALL
624 static inline void rewind_if_in_safe_syscall(void *puc)
626 /* Default version: never rewind */
628 #endif
630 static void host_signal_handler(int host_signum, siginfo_t *info,
631 void *puc)
633 CPUArchState *env = thread_cpu->env_ptr;
634 CPUState *cpu = ENV_GET_CPU(env);
635 TaskState *ts = cpu->opaque;
637 int sig;
638 target_siginfo_t tinfo;
639 ucontext_t *uc = puc;
640 struct emulated_sigtable *k;
642 /* the CPU emulator uses some host signals to detect exceptions,
643 we forward to it some signals */
644 if ((host_signum == SIGSEGV || host_signum == SIGBUS)
645 && info->si_code > 0) {
646 if (cpu_signal_handler(host_signum, info, puc))
647 return;
650 /* get target signal number */
651 sig = host_to_target_signal(host_signum);
652 if (sig < 1 || sig > TARGET_NSIG)
653 return;
654 trace_user_host_signal(env, host_signum, sig);
656 rewind_if_in_safe_syscall(puc);
658 host_to_target_siginfo_noswap(&tinfo, info);
659 k = &ts->sigtab[sig - 1];
660 k->info = tinfo;
661 k->pending = sig;
662 ts->signal_pending = 1;
664 /* Block host signals until target signal handler entered. We
665 * can't block SIGSEGV or SIGBUS while we're executing guest
666 * code in case the guest code provokes one in the window between
667 * now and it getting out to the main loop. Signals will be
668 * unblocked again in process_pending_signals().
670 * WARNING: we cannot use sigfillset() here because the uc_sigmask
671 * field is a kernel sigset_t, which is much smaller than the
672 * libc sigset_t which sigfillset() operates on. Using sigfillset()
673 * would write 0xff bytes off the end of the structure and trash
674 * data on the struct.
675 * We can't use sizeof(uc->uc_sigmask) either, because the libc
676 * headers define the struct field with the wrong (too large) type.
678 memset(&uc->uc_sigmask, 0xff, SIGSET_T_SIZE);
679 sigdelset(&uc->uc_sigmask, SIGSEGV);
680 sigdelset(&uc->uc_sigmask, SIGBUS);
682 /* interrupt the virtual CPU as soon as possible */
683 cpu_exit(thread_cpu);
686 /* do_sigaltstack() returns target values and errnos. */
687 /* compare linux/kernel/signal.c:do_sigaltstack() */
688 abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr, abi_ulong sp)
690 int ret;
691 struct target_sigaltstack oss;
693 /* XXX: test errors */
694 if(uoss_addr)
696 __put_user(target_sigaltstack_used.ss_sp, &oss.ss_sp);
697 __put_user(target_sigaltstack_used.ss_size, &oss.ss_size);
698 __put_user(sas_ss_flags(sp), &oss.ss_flags);
701 if(uss_addr)
703 struct target_sigaltstack *uss;
704 struct target_sigaltstack ss;
705 size_t minstacksize = TARGET_MINSIGSTKSZ;
707 #if defined(TARGET_PPC64)
708 /* ELF V2 for PPC64 has a 4K minimum stack size for signal handlers */
709 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info;
710 if (get_ppc64_abi(image) > 1) {
711 minstacksize = 4096;
713 #endif
715 ret = -TARGET_EFAULT;
716 if (!lock_user_struct(VERIFY_READ, uss, uss_addr, 1)) {
717 goto out;
719 __get_user(ss.ss_sp, &uss->ss_sp);
720 __get_user(ss.ss_size, &uss->ss_size);
721 __get_user(ss.ss_flags, &uss->ss_flags);
722 unlock_user_struct(uss, uss_addr, 0);
724 ret = -TARGET_EPERM;
725 if (on_sig_stack(sp))
726 goto out;
728 ret = -TARGET_EINVAL;
729 if (ss.ss_flags != TARGET_SS_DISABLE
730 && ss.ss_flags != TARGET_SS_ONSTACK
731 && ss.ss_flags != 0)
732 goto out;
734 if (ss.ss_flags == TARGET_SS_DISABLE) {
735 ss.ss_size = 0;
736 ss.ss_sp = 0;
737 } else {
738 ret = -TARGET_ENOMEM;
739 if (ss.ss_size < minstacksize) {
740 goto out;
744 target_sigaltstack_used.ss_sp = ss.ss_sp;
745 target_sigaltstack_used.ss_size = ss.ss_size;
748 if (uoss_addr) {
749 ret = -TARGET_EFAULT;
750 if (copy_to_user(uoss_addr, &oss, sizeof(oss)))
751 goto out;
754 ret = 0;
755 out:
756 return ret;
759 /* do_sigaction() return target values and host errnos */
760 int do_sigaction(int sig, const struct target_sigaction *act,
761 struct target_sigaction *oact)
763 struct target_sigaction *k;
764 struct sigaction act1;
765 int host_sig;
766 int ret = 0;
768 if (sig < 1 || sig > TARGET_NSIG || sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP) {
769 return -TARGET_EINVAL;
772 if (block_signals()) {
773 return -TARGET_ERESTARTSYS;
776 k = &sigact_table[sig - 1];
777 if (oact) {
778 __put_user(k->_sa_handler, &oact->_sa_handler);
779 __put_user(k->sa_flags, &oact->sa_flags);
780 #ifdef TARGET_ARCH_HAS_SA_RESTORER
781 __put_user(k->sa_restorer, &oact->sa_restorer);
782 #endif
783 /* Not swapped. */
784 oact->sa_mask = k->sa_mask;
786 if (act) {
787 /* FIXME: This is not threadsafe. */
788 __get_user(k->_sa_handler, &act->_sa_handler);
789 __get_user(k->sa_flags, &act->sa_flags);
790 #ifdef TARGET_ARCH_HAS_SA_RESTORER
791 __get_user(k->sa_restorer, &act->sa_restorer);
792 #endif
793 /* To be swapped in target_to_host_sigset. */
794 k->sa_mask = act->sa_mask;
796 /* we update the host linux signal state */
797 host_sig = target_to_host_signal(sig);
798 if (host_sig != SIGSEGV && host_sig != SIGBUS) {
799 sigfillset(&act1.sa_mask);
800 act1.sa_flags = SA_SIGINFO;
801 if (k->sa_flags & TARGET_SA_RESTART)
802 act1.sa_flags |= SA_RESTART;
803 /* NOTE: it is important to update the host kernel signal
804 ignore state to avoid getting unexpected interrupted
805 syscalls */
806 if (k->_sa_handler == TARGET_SIG_IGN) {
807 act1.sa_sigaction = (void *)SIG_IGN;
808 } else if (k->_sa_handler == TARGET_SIG_DFL) {
809 if (fatal_signal (sig))
810 act1.sa_sigaction = host_signal_handler;
811 else
812 act1.sa_sigaction = (void *)SIG_DFL;
813 } else {
814 act1.sa_sigaction = host_signal_handler;
816 ret = sigaction(host_sig, &act1, NULL);
819 return ret;
822 #if defined(TARGET_I386)
823 /* from the Linux kernel - /arch/x86/include/uapi/asm/sigcontext.h */
825 struct target_fpreg {
826 uint16_t significand[4];
827 uint16_t exponent;
830 struct target_fpxreg {
831 uint16_t significand[4];
832 uint16_t exponent;
833 uint16_t padding[3];
836 struct target_xmmreg {
837 uint32_t element[4];
840 struct target_fpstate_32 {
841 /* Regular FPU environment */
842 uint32_t cw;
843 uint32_t sw;
844 uint32_t tag;
845 uint32_t ipoff;
846 uint32_t cssel;
847 uint32_t dataoff;
848 uint32_t datasel;
849 struct target_fpreg st[8];
850 uint16_t status;
851 uint16_t magic; /* 0xffff = regular FPU data only */
853 /* FXSR FPU environment */
854 uint32_t _fxsr_env[6]; /* FXSR FPU env is ignored */
855 uint32_t mxcsr;
856 uint32_t reserved;
857 struct target_fpxreg fxsr_st[8]; /* FXSR FPU reg data is ignored */
858 struct target_xmmreg xmm[8];
859 uint32_t padding[56];
862 struct target_fpstate_64 {
863 /* FXSAVE format */
864 uint16_t cw;
865 uint16_t sw;
866 uint16_t twd;
867 uint16_t fop;
868 uint64_t rip;
869 uint64_t rdp;
870 uint32_t mxcsr;
871 uint32_t mxcsr_mask;
872 uint32_t st_space[32];
873 uint32_t xmm_space[64];
874 uint32_t reserved[24];
877 #ifndef TARGET_X86_64
878 # define target_fpstate target_fpstate_32
879 #else
880 # define target_fpstate target_fpstate_64
881 #endif
883 struct target_sigcontext_32 {
884 uint16_t gs, __gsh;
885 uint16_t fs, __fsh;
886 uint16_t es, __esh;
887 uint16_t ds, __dsh;
888 uint32_t edi;
889 uint32_t esi;
890 uint32_t ebp;
891 uint32_t esp;
892 uint32_t ebx;
893 uint32_t edx;
894 uint32_t ecx;
895 uint32_t eax;
896 uint32_t trapno;
897 uint32_t err;
898 uint32_t eip;
899 uint16_t cs, __csh;
900 uint32_t eflags;
901 uint32_t esp_at_signal;
902 uint16_t ss, __ssh;
903 uint32_t fpstate; /* pointer */
904 uint32_t oldmask;
905 uint32_t cr2;
908 struct target_sigcontext_64 {
909 uint64_t r8;
910 uint64_t r9;
911 uint64_t r10;
912 uint64_t r11;
913 uint64_t r12;
914 uint64_t r13;
915 uint64_t r14;
916 uint64_t r15;
918 uint64_t rdi;
919 uint64_t rsi;
920 uint64_t rbp;
921 uint64_t rbx;
922 uint64_t rdx;
923 uint64_t rax;
924 uint64_t rcx;
925 uint64_t rsp;
926 uint64_t rip;
928 uint64_t eflags;
930 uint16_t cs;
931 uint16_t gs;
932 uint16_t fs;
933 uint16_t ss;
935 uint64_t err;
936 uint64_t trapno;
937 uint64_t oldmask;
938 uint64_t cr2;
940 uint64_t fpstate; /* pointer */
941 uint64_t padding[8];
944 #ifndef TARGET_X86_64
945 # define target_sigcontext target_sigcontext_32
946 #else
947 # define target_sigcontext target_sigcontext_64
948 #endif
950 /* see Linux/include/uapi/asm-generic/ucontext.h */
951 struct target_ucontext {
952 abi_ulong tuc_flags;
953 abi_ulong tuc_link;
954 target_stack_t tuc_stack;
955 struct target_sigcontext tuc_mcontext;
956 target_sigset_t tuc_sigmask; /* mask last for extensibility */
959 #ifndef TARGET_X86_64
960 struct sigframe {
961 abi_ulong pretcode;
962 int sig;
963 struct target_sigcontext sc;
964 struct target_fpstate fpstate;
965 abi_ulong extramask[TARGET_NSIG_WORDS-1];
966 char retcode[8];
969 struct rt_sigframe {
970 abi_ulong pretcode;
971 int sig;
972 abi_ulong pinfo;
973 abi_ulong puc;
974 struct target_siginfo info;
975 struct target_ucontext uc;
976 struct target_fpstate fpstate;
977 char retcode[8];
980 #else
982 struct rt_sigframe {
983 abi_ulong pretcode;
984 struct target_ucontext uc;
985 struct target_siginfo info;
986 struct target_fpstate fpstate;
989 #endif
992 * Set up a signal frame.
995 /* XXX: save x87 state */
996 static void setup_sigcontext(struct target_sigcontext *sc,
997 struct target_fpstate *fpstate, CPUX86State *env, abi_ulong mask,
998 abi_ulong fpstate_addr)
1000 CPUState *cs = CPU(x86_env_get_cpu(env));
1001 #ifndef TARGET_X86_64
1002 uint16_t magic;
1004 /* already locked in setup_frame() */
1005 __put_user(env->segs[R_GS].selector, (unsigned int *)&sc->gs);
1006 __put_user(env->segs[R_FS].selector, (unsigned int *)&sc->fs);
1007 __put_user(env->segs[R_ES].selector, (unsigned int *)&sc->es);
1008 __put_user(env->segs[R_DS].selector, (unsigned int *)&sc->ds);
1009 __put_user(env->regs[R_EDI], &sc->edi);
1010 __put_user(env->regs[R_ESI], &sc->esi);
1011 __put_user(env->regs[R_EBP], &sc->ebp);
1012 __put_user(env->regs[R_ESP], &sc->esp);
1013 __put_user(env->regs[R_EBX], &sc->ebx);
1014 __put_user(env->regs[R_EDX], &sc->edx);
1015 __put_user(env->regs[R_ECX], &sc->ecx);
1016 __put_user(env->regs[R_EAX], &sc->eax);
1017 __put_user(cs->exception_index, &sc->trapno);
1018 __put_user(env->error_code, &sc->err);
1019 __put_user(env->eip, &sc->eip);
1020 __put_user(env->segs[R_CS].selector, (unsigned int *)&sc->cs);
1021 __put_user(env->eflags, &sc->eflags);
1022 __put_user(env->regs[R_ESP], &sc->esp_at_signal);
1023 __put_user(env->segs[R_SS].selector, (unsigned int *)&sc->ss);
1025 cpu_x86_fsave(env, fpstate_addr, 1);
1026 fpstate->status = fpstate->sw;
1027 magic = 0xffff;
1028 __put_user(magic, &fpstate->magic);
1029 __put_user(fpstate_addr, &sc->fpstate);
1031 /* non-iBCS2 extensions.. */
1032 __put_user(mask, &sc->oldmask);
1033 __put_user(env->cr[2], &sc->cr2);
1034 #else
1035 __put_user(env->regs[R_EDI], &sc->rdi);
1036 __put_user(env->regs[R_ESI], &sc->rsi);
1037 __put_user(env->regs[R_EBP], &sc->rbp);
1038 __put_user(env->regs[R_ESP], &sc->rsp);
1039 __put_user(env->regs[R_EBX], &sc->rbx);
1040 __put_user(env->regs[R_EDX], &sc->rdx);
1041 __put_user(env->regs[R_ECX], &sc->rcx);
1042 __put_user(env->regs[R_EAX], &sc->rax);
1044 __put_user(env->regs[8], &sc->r8);
1045 __put_user(env->regs[9], &sc->r9);
1046 __put_user(env->regs[10], &sc->r10);
1047 __put_user(env->regs[11], &sc->r11);
1048 __put_user(env->regs[12], &sc->r12);
1049 __put_user(env->regs[13], &sc->r13);
1050 __put_user(env->regs[14], &sc->r14);
1051 __put_user(env->regs[15], &sc->r15);
1053 __put_user(cs->exception_index, &sc->trapno);
1054 __put_user(env->error_code, &sc->err);
1055 __put_user(env->eip, &sc->rip);
1057 __put_user(env->eflags, &sc->eflags);
1058 __put_user(env->segs[R_CS].selector, &sc->cs);
1059 __put_user((uint16_t)0, &sc->gs);
1060 __put_user((uint16_t)0, &sc->fs);
1061 __put_user(env->segs[R_SS].selector, &sc->ss);
1063 __put_user(mask, &sc->oldmask);
1064 __put_user(env->cr[2], &sc->cr2);
1066 /* fpstate_addr must be 16 byte aligned for fxsave */
1067 assert(!(fpstate_addr & 0xf));
1069 cpu_x86_fxsave(env, fpstate_addr);
1070 __put_user(fpstate_addr, &sc->fpstate);
1071 #endif
1075 * Determine which stack to use..
1078 static inline abi_ulong
1079 get_sigframe(struct target_sigaction *ka, CPUX86State *env, size_t frame_size)
1081 unsigned long esp;
1083 /* Default to using normal stack */
1084 esp = env->regs[R_ESP];
1085 #ifdef TARGET_X86_64
1086 esp -= 128; /* this is the redzone */
1087 #endif
1089 /* This is the X/Open sanctioned signal stack switching. */
1090 if (ka->sa_flags & TARGET_SA_ONSTACK) {
1091 if (sas_ss_flags(esp) == 0) {
1092 esp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
1094 } else {
1095 #ifndef TARGET_X86_64
1096 /* This is the legacy signal stack switching. */
1097 if ((env->segs[R_SS].selector & 0xffff) != __USER_DS &&
1098 !(ka->sa_flags & TARGET_SA_RESTORER) &&
1099 ka->sa_restorer) {
1100 esp = (unsigned long) ka->sa_restorer;
1102 #endif
1105 #ifndef TARGET_X86_64
1106 return (esp - frame_size) & -8ul;
1107 #else
1108 return ((esp - frame_size) & (~15ul)) - 8;
1109 #endif
1112 #ifndef TARGET_X86_64
1113 /* compare linux/arch/i386/kernel/signal.c:setup_frame() */
1114 static void setup_frame(int sig, struct target_sigaction *ka,
1115 target_sigset_t *set, CPUX86State *env)
1117 abi_ulong frame_addr;
1118 struct sigframe *frame;
1119 int i;
1121 frame_addr = get_sigframe(ka, env, sizeof(*frame));
1122 trace_user_setup_frame(env, frame_addr);
1124 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
1125 goto give_sigsegv;
1127 __put_user(sig, &frame->sig);
1129 setup_sigcontext(&frame->sc, &frame->fpstate, env, set->sig[0],
1130 frame_addr + offsetof(struct sigframe, fpstate));
1132 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1133 __put_user(set->sig[i], &frame->extramask[i - 1]);
1136 /* Set up to return from userspace. If provided, use a stub
1137 already in userspace. */
1138 if (ka->sa_flags & TARGET_SA_RESTORER) {
1139 __put_user(ka->sa_restorer, &frame->pretcode);
1140 } else {
1141 uint16_t val16;
1142 abi_ulong retcode_addr;
1143 retcode_addr = frame_addr + offsetof(struct sigframe, retcode);
1144 __put_user(retcode_addr, &frame->pretcode);
1145 /* This is popl %eax ; movl $,%eax ; int $0x80 */
1146 val16 = 0xb858;
1147 __put_user(val16, (uint16_t *)(frame->retcode+0));
1148 __put_user(TARGET_NR_sigreturn, (int *)(frame->retcode+2));
1149 val16 = 0x80cd;
1150 __put_user(val16, (uint16_t *)(frame->retcode+6));
1153 /* Set up registers for signal handler */
1154 env->regs[R_ESP] = frame_addr;
1155 env->eip = ka->_sa_handler;
1157 cpu_x86_load_seg(env, R_DS, __USER_DS);
1158 cpu_x86_load_seg(env, R_ES, __USER_DS);
1159 cpu_x86_load_seg(env, R_SS, __USER_DS);
1160 cpu_x86_load_seg(env, R_CS, __USER_CS);
1161 env->eflags &= ~TF_MASK;
1163 unlock_user_struct(frame, frame_addr, 1);
1165 return;
1167 give_sigsegv:
1168 force_sigsegv(sig);
1170 #endif
1172 /* compare linux/arch/x86/kernel/signal.c:setup_rt_frame() */
1173 static void setup_rt_frame(int sig, struct target_sigaction *ka,
1174 target_siginfo_t *info,
1175 target_sigset_t *set, CPUX86State *env)
1177 abi_ulong frame_addr;
1178 #ifndef TARGET_X86_64
1179 abi_ulong addr;
1180 #endif
1181 struct rt_sigframe *frame;
1182 int i;
1184 frame_addr = get_sigframe(ka, env, sizeof(*frame));
1185 trace_user_setup_rt_frame(env, frame_addr);
1187 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
1188 goto give_sigsegv;
1190 /* These fields are only in rt_sigframe on 32 bit */
1191 #ifndef TARGET_X86_64
1192 __put_user(sig, &frame->sig);
1193 addr = frame_addr + offsetof(struct rt_sigframe, info);
1194 __put_user(addr, &frame->pinfo);
1195 addr = frame_addr + offsetof(struct rt_sigframe, uc);
1196 __put_user(addr, &frame->puc);
1197 #endif
1198 if (ka->sa_flags & TARGET_SA_SIGINFO) {
1199 tswap_siginfo(&frame->info, info);
1202 /* Create the ucontext. */
1203 __put_user(0, &frame->uc.tuc_flags);
1204 __put_user(0, &frame->uc.tuc_link);
1205 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
1206 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
1207 &frame->uc.tuc_stack.ss_flags);
1208 __put_user(target_sigaltstack_used.ss_size,
1209 &frame->uc.tuc_stack.ss_size);
1210 setup_sigcontext(&frame->uc.tuc_mcontext, &frame->fpstate, env,
1211 set->sig[0], frame_addr + offsetof(struct rt_sigframe, fpstate));
1213 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
1214 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
1217 /* Set up to return from userspace. If provided, use a stub
1218 already in userspace. */
1219 #ifndef TARGET_X86_64
1220 if (ka->sa_flags & TARGET_SA_RESTORER) {
1221 __put_user(ka->sa_restorer, &frame->pretcode);
1222 } else {
1223 uint16_t val16;
1224 addr = frame_addr + offsetof(struct rt_sigframe, retcode);
1225 __put_user(addr, &frame->pretcode);
1226 /* This is movl $,%eax ; int $0x80 */
1227 __put_user(0xb8, (char *)(frame->retcode+0));
1228 __put_user(TARGET_NR_rt_sigreturn, (int *)(frame->retcode+1));
1229 val16 = 0x80cd;
1230 __put_user(val16, (uint16_t *)(frame->retcode+5));
1232 #else
1233 /* XXX: Would be slightly better to return -EFAULT here if test fails
1234 assert(ka->sa_flags & TARGET_SA_RESTORER); */
1235 __put_user(ka->sa_restorer, &frame->pretcode);
1236 #endif
1238 /* Set up registers for signal handler */
1239 env->regs[R_ESP] = frame_addr;
1240 env->eip = ka->_sa_handler;
1242 #ifndef TARGET_X86_64
1243 env->regs[R_EAX] = sig;
1244 env->regs[R_EDX] = (unsigned long)&frame->info;
1245 env->regs[R_ECX] = (unsigned long)&frame->uc;
1246 #else
1247 env->regs[R_EAX] = 0;
1248 env->regs[R_EDI] = sig;
1249 env->regs[R_ESI] = (unsigned long)&frame->info;
1250 env->regs[R_EDX] = (unsigned long)&frame->uc;
1251 #endif
1253 cpu_x86_load_seg(env, R_DS, __USER_DS);
1254 cpu_x86_load_seg(env, R_ES, __USER_DS);
1255 cpu_x86_load_seg(env, R_CS, __USER_CS);
1256 cpu_x86_load_seg(env, R_SS, __USER_DS);
1257 env->eflags &= ~TF_MASK;
1259 unlock_user_struct(frame, frame_addr, 1);
1261 return;
1263 give_sigsegv:
1264 force_sigsegv(sig);
1267 static int
1268 restore_sigcontext(CPUX86State *env, struct target_sigcontext *sc)
1270 unsigned int err = 0;
1271 abi_ulong fpstate_addr;
1272 unsigned int tmpflags;
1274 #ifndef TARGET_X86_64
1275 cpu_x86_load_seg(env, R_GS, tswap16(sc->gs));
1276 cpu_x86_load_seg(env, R_FS, tswap16(sc->fs));
1277 cpu_x86_load_seg(env, R_ES, tswap16(sc->es));
1278 cpu_x86_load_seg(env, R_DS, tswap16(sc->ds));
1280 env->regs[R_EDI] = tswapl(sc->edi);
1281 env->regs[R_ESI] = tswapl(sc->esi);
1282 env->regs[R_EBP] = tswapl(sc->ebp);
1283 env->regs[R_ESP] = tswapl(sc->esp);
1284 env->regs[R_EBX] = tswapl(sc->ebx);
1285 env->regs[R_EDX] = tswapl(sc->edx);
1286 env->regs[R_ECX] = tswapl(sc->ecx);
1287 env->regs[R_EAX] = tswapl(sc->eax);
1289 env->eip = tswapl(sc->eip);
1290 #else
1291 env->regs[8] = tswapl(sc->r8);
1292 env->regs[9] = tswapl(sc->r9);
1293 env->regs[10] = tswapl(sc->r10);
1294 env->regs[11] = tswapl(sc->r11);
1295 env->regs[12] = tswapl(sc->r12);
1296 env->regs[13] = tswapl(sc->r13);
1297 env->regs[14] = tswapl(sc->r14);
1298 env->regs[15] = tswapl(sc->r15);
1300 env->regs[R_EDI] = tswapl(sc->rdi);
1301 env->regs[R_ESI] = tswapl(sc->rsi);
1302 env->regs[R_EBP] = tswapl(sc->rbp);
1303 env->regs[R_EBX] = tswapl(sc->rbx);
1304 env->regs[R_EDX] = tswapl(sc->rdx);
1305 env->regs[R_EAX] = tswapl(sc->rax);
1306 env->regs[R_ECX] = tswapl(sc->rcx);
1307 env->regs[R_ESP] = tswapl(sc->rsp);
1309 env->eip = tswapl(sc->rip);
1310 #endif
1312 cpu_x86_load_seg(env, R_CS, lduw_p(&sc->cs) | 3);
1313 cpu_x86_load_seg(env, R_SS, lduw_p(&sc->ss) | 3);
1315 tmpflags = tswapl(sc->eflags);
1316 env->eflags = (env->eflags & ~0x40DD5) | (tmpflags & 0x40DD5);
1317 // regs->orig_eax = -1; /* disable syscall checks */
1319 fpstate_addr = tswapl(sc->fpstate);
1320 if (fpstate_addr != 0) {
1321 if (!access_ok(VERIFY_READ, fpstate_addr,
1322 sizeof(struct target_fpstate)))
1323 goto badframe;
1324 #ifndef TARGET_X86_64
1325 cpu_x86_frstor(env, fpstate_addr, 1);
1326 #else
1327 cpu_x86_fxrstor(env, fpstate_addr);
1328 #endif
1331 return err;
1332 badframe:
1333 return 1;
1336 /* Note: there is no sigreturn on x86_64, there is only rt_sigreturn */
1337 #ifndef TARGET_X86_64
1338 long do_sigreturn(CPUX86State *env)
1340 struct sigframe *frame;
1341 abi_ulong frame_addr = env->regs[R_ESP] - 8;
1342 target_sigset_t target_set;
1343 sigset_t set;
1344 int i;
1346 trace_user_do_sigreturn(env, frame_addr);
1347 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
1348 goto badframe;
1349 /* set blocked signals */
1350 __get_user(target_set.sig[0], &frame->sc.oldmask);
1351 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1352 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
1355 target_to_host_sigset_internal(&set, &target_set);
1356 set_sigmask(&set);
1358 /* restore registers */
1359 if (restore_sigcontext(env, &frame->sc))
1360 goto badframe;
1361 unlock_user_struct(frame, frame_addr, 0);
1362 return -TARGET_QEMU_ESIGRETURN;
1364 badframe:
1365 unlock_user_struct(frame, frame_addr, 0);
1366 force_sig(TARGET_SIGSEGV);
1367 return -TARGET_QEMU_ESIGRETURN;
1369 #endif
1371 long do_rt_sigreturn(CPUX86State *env)
1373 abi_ulong frame_addr;
1374 struct rt_sigframe *frame;
1375 sigset_t set;
1377 frame_addr = env->regs[R_ESP] - sizeof(abi_ulong);
1378 trace_user_do_rt_sigreturn(env, frame_addr);
1379 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
1380 goto badframe;
1381 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
1382 set_sigmask(&set);
1384 if (restore_sigcontext(env, &frame->uc.tuc_mcontext)) {
1385 goto badframe;
1388 if (do_sigaltstack(frame_addr + offsetof(struct rt_sigframe, uc.tuc_stack), 0,
1389 get_sp_from_cpustate(env)) == -EFAULT) {
1390 goto badframe;
1393 unlock_user_struct(frame, frame_addr, 0);
1394 return -TARGET_QEMU_ESIGRETURN;
1396 badframe:
1397 unlock_user_struct(frame, frame_addr, 0);
1398 force_sig(TARGET_SIGSEGV);
1399 return -TARGET_QEMU_ESIGRETURN;
1402 #elif defined(TARGET_AARCH64)
1404 struct target_sigcontext {
1405 uint64_t fault_address;
1406 /* AArch64 registers */
1407 uint64_t regs[31];
1408 uint64_t sp;
1409 uint64_t pc;
1410 uint64_t pstate;
1411 /* 4K reserved for FP/SIMD state and future expansion */
1412 char __reserved[4096] __attribute__((__aligned__(16)));
1415 struct target_ucontext {
1416 abi_ulong tuc_flags;
1417 abi_ulong tuc_link;
1418 target_stack_t tuc_stack;
1419 target_sigset_t tuc_sigmask;
1420 /* glibc uses a 1024-bit sigset_t */
1421 char __unused[1024 / 8 - sizeof(target_sigset_t)];
1422 /* last for future expansion */
1423 struct target_sigcontext tuc_mcontext;
1427 * Header to be used at the beginning of structures extending the user
1428 * context. Such structures must be placed after the rt_sigframe on the stack
1429 * and be 16-byte aligned. The last structure must be a dummy one with the
1430 * magic and size set to 0.
1432 struct target_aarch64_ctx {
1433 uint32_t magic;
1434 uint32_t size;
1437 #define TARGET_FPSIMD_MAGIC 0x46508001
1439 struct target_fpsimd_context {
1440 struct target_aarch64_ctx head;
1441 uint32_t fpsr;
1442 uint32_t fpcr;
1443 uint64_t vregs[32 * 2]; /* really uint128_t vregs[32] */
1447 * Auxiliary context saved in the sigcontext.__reserved array. Not exported to
1448 * user space as it will change with the addition of new context. User space
1449 * should check the magic/size information.
1451 struct target_aux_context {
1452 struct target_fpsimd_context fpsimd;
1453 /* additional context to be added before "end" */
1454 struct target_aarch64_ctx end;
1457 struct target_rt_sigframe {
1458 struct target_siginfo info;
1459 struct target_ucontext uc;
1460 uint64_t fp;
1461 uint64_t lr;
1462 uint32_t tramp[2];
1465 static int target_setup_sigframe(struct target_rt_sigframe *sf,
1466 CPUARMState *env, target_sigset_t *set)
1468 int i;
1469 struct target_aux_context *aux =
1470 (struct target_aux_context *)sf->uc.tuc_mcontext.__reserved;
1472 /* set up the stack frame for unwinding */
1473 __put_user(env->xregs[29], &sf->fp);
1474 __put_user(env->xregs[30], &sf->lr);
1476 for (i = 0; i < 31; i++) {
1477 __put_user(env->xregs[i], &sf->uc.tuc_mcontext.regs[i]);
1479 __put_user(env->xregs[31], &sf->uc.tuc_mcontext.sp);
1480 __put_user(env->pc, &sf->uc.tuc_mcontext.pc);
1481 __put_user(pstate_read(env), &sf->uc.tuc_mcontext.pstate);
1483 __put_user(env->exception.vaddress, &sf->uc.tuc_mcontext.fault_address);
1485 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
1486 __put_user(set->sig[i], &sf->uc.tuc_sigmask.sig[i]);
1489 for (i = 0; i < 32; i++) {
1490 uint64_t *q = aa64_vfp_qreg(env, i);
1491 #ifdef TARGET_WORDS_BIGENDIAN
1492 __put_user(q[0], &aux->fpsimd.vregs[i * 2 + 1]);
1493 __put_user(q[1], &aux->fpsimd.vregs[i * 2]);
1494 #else
1495 __put_user(q[0], &aux->fpsimd.vregs[i * 2]);
1496 __put_user(q[1], &aux->fpsimd.vregs[i * 2 + 1]);
1497 #endif
1499 __put_user(vfp_get_fpsr(env), &aux->fpsimd.fpsr);
1500 __put_user(vfp_get_fpcr(env), &aux->fpsimd.fpcr);
1501 __put_user(TARGET_FPSIMD_MAGIC, &aux->fpsimd.head.magic);
1502 __put_user(sizeof(struct target_fpsimd_context),
1503 &aux->fpsimd.head.size);
1505 /* set the "end" magic */
1506 __put_user(0, &aux->end.magic);
1507 __put_user(0, &aux->end.size);
1509 return 0;
1512 static int target_restore_sigframe(CPUARMState *env,
1513 struct target_rt_sigframe *sf)
1515 sigset_t set;
1516 int i;
1517 struct target_aux_context *aux =
1518 (struct target_aux_context *)sf->uc.tuc_mcontext.__reserved;
1519 uint32_t magic, size, fpsr, fpcr;
1520 uint64_t pstate;
1522 target_to_host_sigset(&set, &sf->uc.tuc_sigmask);
1523 set_sigmask(&set);
1525 for (i = 0; i < 31; i++) {
1526 __get_user(env->xregs[i], &sf->uc.tuc_mcontext.regs[i]);
1529 __get_user(env->xregs[31], &sf->uc.tuc_mcontext.sp);
1530 __get_user(env->pc, &sf->uc.tuc_mcontext.pc);
1531 __get_user(pstate, &sf->uc.tuc_mcontext.pstate);
1532 pstate_write(env, pstate);
1534 __get_user(magic, &aux->fpsimd.head.magic);
1535 __get_user(size, &aux->fpsimd.head.size);
1537 if (magic != TARGET_FPSIMD_MAGIC
1538 || size != sizeof(struct target_fpsimd_context)) {
1539 return 1;
1542 for (i = 0; i < 32; i++) {
1543 uint64_t *q = aa64_vfp_qreg(env, i);
1544 #ifdef TARGET_WORDS_BIGENDIAN
1545 __get_user(q[0], &aux->fpsimd.vregs[i * 2 + 1]);
1546 __get_user(q[1], &aux->fpsimd.vregs[i * 2]);
1547 #else
1548 __get_user(q[0], &aux->fpsimd.vregs[i * 2]);
1549 __get_user(q[1], &aux->fpsimd.vregs[i * 2 + 1]);
1550 #endif
1552 __get_user(fpsr, &aux->fpsimd.fpsr);
1553 vfp_set_fpsr(env, fpsr);
1554 __get_user(fpcr, &aux->fpsimd.fpcr);
1555 vfp_set_fpcr(env, fpcr);
1557 return 0;
1560 static abi_ulong get_sigframe(struct target_sigaction *ka, CPUARMState *env)
1562 abi_ulong sp;
1564 sp = env->xregs[31];
1567 * This is the X/Open sanctioned signal stack switching.
1569 if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) {
1570 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
1573 sp = (sp - sizeof(struct target_rt_sigframe)) & ~15;
1575 return sp;
1578 static void target_setup_frame(int usig, struct target_sigaction *ka,
1579 target_siginfo_t *info, target_sigset_t *set,
1580 CPUARMState *env)
1582 struct target_rt_sigframe *frame;
1583 abi_ulong frame_addr, return_addr;
1585 frame_addr = get_sigframe(ka, env);
1586 trace_user_setup_frame(env, frame_addr);
1587 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
1588 goto give_sigsegv;
1591 __put_user(0, &frame->uc.tuc_flags);
1592 __put_user(0, &frame->uc.tuc_link);
1594 __put_user(target_sigaltstack_used.ss_sp,
1595 &frame->uc.tuc_stack.ss_sp);
1596 __put_user(sas_ss_flags(env->xregs[31]),
1597 &frame->uc.tuc_stack.ss_flags);
1598 __put_user(target_sigaltstack_used.ss_size,
1599 &frame->uc.tuc_stack.ss_size);
1600 target_setup_sigframe(frame, env, set);
1601 if (ka->sa_flags & TARGET_SA_RESTORER) {
1602 return_addr = ka->sa_restorer;
1603 } else {
1605 * mov x8,#__NR_rt_sigreturn; svc #0
1606 * Since these are instructions they need to be put as little-endian
1607 * regardless of target default or current CPU endianness.
1609 __put_user_e(0xd2801168, &frame->tramp[0], le);
1610 __put_user_e(0xd4000001, &frame->tramp[1], le);
1611 return_addr = frame_addr + offsetof(struct target_rt_sigframe, tramp);
1613 env->xregs[0] = usig;
1614 env->xregs[31] = frame_addr;
1615 env->xregs[29] = env->xregs[31] + offsetof(struct target_rt_sigframe, fp);
1616 env->pc = ka->_sa_handler;
1617 env->xregs[30] = return_addr;
1618 if (info) {
1619 tswap_siginfo(&frame->info, info);
1620 env->xregs[1] = frame_addr + offsetof(struct target_rt_sigframe, info);
1621 env->xregs[2] = frame_addr + offsetof(struct target_rt_sigframe, uc);
1624 unlock_user_struct(frame, frame_addr, 1);
1625 return;
1627 give_sigsegv:
1628 unlock_user_struct(frame, frame_addr, 1);
1629 force_sigsegv(usig);
1632 static void setup_rt_frame(int sig, struct target_sigaction *ka,
1633 target_siginfo_t *info, target_sigset_t *set,
1634 CPUARMState *env)
1636 target_setup_frame(sig, ka, info, set, env);
1639 static void setup_frame(int sig, struct target_sigaction *ka,
1640 target_sigset_t *set, CPUARMState *env)
1642 target_setup_frame(sig, ka, 0, set, env);
1645 long do_rt_sigreturn(CPUARMState *env)
1647 struct target_rt_sigframe *frame = NULL;
1648 abi_ulong frame_addr = env->xregs[31];
1650 trace_user_do_rt_sigreturn(env, frame_addr);
1651 if (frame_addr & 15) {
1652 goto badframe;
1655 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
1656 goto badframe;
1659 if (target_restore_sigframe(env, frame)) {
1660 goto badframe;
1663 if (do_sigaltstack(frame_addr +
1664 offsetof(struct target_rt_sigframe, uc.tuc_stack),
1665 0, get_sp_from_cpustate(env)) == -EFAULT) {
1666 goto badframe;
1669 unlock_user_struct(frame, frame_addr, 0);
1670 return -TARGET_QEMU_ESIGRETURN;
1672 badframe:
1673 unlock_user_struct(frame, frame_addr, 0);
1674 force_sig(TARGET_SIGSEGV);
1675 return -TARGET_QEMU_ESIGRETURN;
1678 long do_sigreturn(CPUARMState *env)
1680 return do_rt_sigreturn(env);
1683 #elif defined(TARGET_ARM)
1685 struct target_sigcontext {
1686 abi_ulong trap_no;
1687 abi_ulong error_code;
1688 abi_ulong oldmask;
1689 abi_ulong arm_r0;
1690 abi_ulong arm_r1;
1691 abi_ulong arm_r2;
1692 abi_ulong arm_r3;
1693 abi_ulong arm_r4;
1694 abi_ulong arm_r5;
1695 abi_ulong arm_r6;
1696 abi_ulong arm_r7;
1697 abi_ulong arm_r8;
1698 abi_ulong arm_r9;
1699 abi_ulong arm_r10;
1700 abi_ulong arm_fp;
1701 abi_ulong arm_ip;
1702 abi_ulong arm_sp;
1703 abi_ulong arm_lr;
1704 abi_ulong arm_pc;
1705 abi_ulong arm_cpsr;
1706 abi_ulong fault_address;
1709 struct target_ucontext_v1 {
1710 abi_ulong tuc_flags;
1711 abi_ulong tuc_link;
1712 target_stack_t tuc_stack;
1713 struct target_sigcontext tuc_mcontext;
1714 target_sigset_t tuc_sigmask; /* mask last for extensibility */
1717 struct target_ucontext_v2 {
1718 abi_ulong tuc_flags;
1719 abi_ulong tuc_link;
1720 target_stack_t tuc_stack;
1721 struct target_sigcontext tuc_mcontext;
1722 target_sigset_t tuc_sigmask; /* mask last for extensibility */
1723 char __unused[128 - sizeof(target_sigset_t)];
1724 abi_ulong tuc_regspace[128] __attribute__((__aligned__(8)));
1727 struct target_user_vfp {
1728 uint64_t fpregs[32];
1729 abi_ulong fpscr;
1732 struct target_user_vfp_exc {
1733 abi_ulong fpexc;
1734 abi_ulong fpinst;
1735 abi_ulong fpinst2;
1738 struct target_vfp_sigframe {
1739 abi_ulong magic;
1740 abi_ulong size;
1741 struct target_user_vfp ufp;
1742 struct target_user_vfp_exc ufp_exc;
1743 } __attribute__((__aligned__(8)));
1745 struct target_iwmmxt_sigframe {
1746 abi_ulong magic;
1747 abi_ulong size;
1748 uint64_t regs[16];
1749 /* Note that not all the coprocessor control registers are stored here */
1750 uint32_t wcssf;
1751 uint32_t wcasf;
1752 uint32_t wcgr0;
1753 uint32_t wcgr1;
1754 uint32_t wcgr2;
1755 uint32_t wcgr3;
1756 } __attribute__((__aligned__(8)));
1758 #define TARGET_VFP_MAGIC 0x56465001
1759 #define TARGET_IWMMXT_MAGIC 0x12ef842a
1761 struct sigframe_v1
1763 struct target_sigcontext sc;
1764 abi_ulong extramask[TARGET_NSIG_WORDS-1];
1765 abi_ulong retcode;
1768 struct sigframe_v2
1770 struct target_ucontext_v2 uc;
1771 abi_ulong retcode;
1774 struct rt_sigframe_v1
1776 abi_ulong pinfo;
1777 abi_ulong puc;
1778 struct target_siginfo info;
1779 struct target_ucontext_v1 uc;
1780 abi_ulong retcode;
1783 struct rt_sigframe_v2
1785 struct target_siginfo info;
1786 struct target_ucontext_v2 uc;
1787 abi_ulong retcode;
1790 #define TARGET_CONFIG_CPU_32 1
1793 * For ARM syscalls, we encode the syscall number into the instruction.
1795 #define SWI_SYS_SIGRETURN (0xef000000|(TARGET_NR_sigreturn + ARM_SYSCALL_BASE))
1796 #define SWI_SYS_RT_SIGRETURN (0xef000000|(TARGET_NR_rt_sigreturn + ARM_SYSCALL_BASE))
1799 * For Thumb syscalls, we pass the syscall number via r7. We therefore
1800 * need two 16-bit instructions.
1802 #define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_sigreturn))
1803 #define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_rt_sigreturn))
1805 static const abi_ulong retcodes[4] = {
1806 SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN,
1807 SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN
1811 static inline int valid_user_regs(CPUARMState *regs)
1813 return 1;
1816 static void
1817 setup_sigcontext(struct target_sigcontext *sc, /*struct _fpstate *fpstate,*/
1818 CPUARMState *env, abi_ulong mask)
1820 __put_user(env->regs[0], &sc->arm_r0);
1821 __put_user(env->regs[1], &sc->arm_r1);
1822 __put_user(env->regs[2], &sc->arm_r2);
1823 __put_user(env->regs[3], &sc->arm_r3);
1824 __put_user(env->regs[4], &sc->arm_r4);
1825 __put_user(env->regs[5], &sc->arm_r5);
1826 __put_user(env->regs[6], &sc->arm_r6);
1827 __put_user(env->regs[7], &sc->arm_r7);
1828 __put_user(env->regs[8], &sc->arm_r8);
1829 __put_user(env->regs[9], &sc->arm_r9);
1830 __put_user(env->regs[10], &sc->arm_r10);
1831 __put_user(env->regs[11], &sc->arm_fp);
1832 __put_user(env->regs[12], &sc->arm_ip);
1833 __put_user(env->regs[13], &sc->arm_sp);
1834 __put_user(env->regs[14], &sc->arm_lr);
1835 __put_user(env->regs[15], &sc->arm_pc);
1836 #ifdef TARGET_CONFIG_CPU_32
1837 __put_user(cpsr_read(env), &sc->arm_cpsr);
1838 #endif
1840 __put_user(/* current->thread.trap_no */ 0, &sc->trap_no);
1841 __put_user(/* current->thread.error_code */ 0, &sc->error_code);
1842 __put_user(/* current->thread.address */ 0, &sc->fault_address);
1843 __put_user(mask, &sc->oldmask);
1846 static inline abi_ulong
1847 get_sigframe(struct target_sigaction *ka, CPUARMState *regs, int framesize)
1849 unsigned long sp = regs->regs[13];
1852 * This is the X/Open sanctioned signal stack switching.
1854 if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) {
1855 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
1858 * ATPCS B01 mandates 8-byte alignment
1860 return (sp - framesize) & ~7;
1863 static void
1864 setup_return(CPUARMState *env, struct target_sigaction *ka,
1865 abi_ulong *rc, abi_ulong frame_addr, int usig, abi_ulong rc_addr)
1867 abi_ulong handler = ka->_sa_handler;
1868 abi_ulong retcode;
1869 int thumb = handler & 1;
1870 uint32_t cpsr = cpsr_read(env);
1872 cpsr &= ~CPSR_IT;
1873 if (thumb) {
1874 cpsr |= CPSR_T;
1875 } else {
1876 cpsr &= ~CPSR_T;
1879 if (ka->sa_flags & TARGET_SA_RESTORER) {
1880 retcode = ka->sa_restorer;
1881 } else {
1882 unsigned int idx = thumb;
1884 if (ka->sa_flags & TARGET_SA_SIGINFO) {
1885 idx += 2;
1888 __put_user(retcodes[idx], rc);
1890 retcode = rc_addr + thumb;
1893 env->regs[0] = usig;
1894 env->regs[13] = frame_addr;
1895 env->regs[14] = retcode;
1896 env->regs[15] = handler & (thumb ? ~1 : ~3);
1897 cpsr_write(env, cpsr, CPSR_IT | CPSR_T, CPSRWriteByInstr);
1900 static abi_ulong *setup_sigframe_v2_vfp(abi_ulong *regspace, CPUARMState *env)
1902 int i;
1903 struct target_vfp_sigframe *vfpframe;
1904 vfpframe = (struct target_vfp_sigframe *)regspace;
1905 __put_user(TARGET_VFP_MAGIC, &vfpframe->magic);
1906 __put_user(sizeof(*vfpframe), &vfpframe->size);
1907 for (i = 0; i < 32; i++) {
1908 __put_user(*aa32_vfp_dreg(env, i), &vfpframe->ufp.fpregs[i]);
1910 __put_user(vfp_get_fpscr(env), &vfpframe->ufp.fpscr);
1911 __put_user(env->vfp.xregs[ARM_VFP_FPEXC], &vfpframe->ufp_exc.fpexc);
1912 __put_user(env->vfp.xregs[ARM_VFP_FPINST], &vfpframe->ufp_exc.fpinst);
1913 __put_user(env->vfp.xregs[ARM_VFP_FPINST2], &vfpframe->ufp_exc.fpinst2);
1914 return (abi_ulong*)(vfpframe+1);
1917 static abi_ulong *setup_sigframe_v2_iwmmxt(abi_ulong *regspace,
1918 CPUARMState *env)
1920 int i;
1921 struct target_iwmmxt_sigframe *iwmmxtframe;
1922 iwmmxtframe = (struct target_iwmmxt_sigframe *)regspace;
1923 __put_user(TARGET_IWMMXT_MAGIC, &iwmmxtframe->magic);
1924 __put_user(sizeof(*iwmmxtframe), &iwmmxtframe->size);
1925 for (i = 0; i < 16; i++) {
1926 __put_user(env->iwmmxt.regs[i], &iwmmxtframe->regs[i]);
1928 __put_user(env->vfp.xregs[ARM_IWMMXT_wCSSF], &iwmmxtframe->wcssf);
1929 __put_user(env->vfp.xregs[ARM_IWMMXT_wCASF], &iwmmxtframe->wcssf);
1930 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR0], &iwmmxtframe->wcgr0);
1931 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR1], &iwmmxtframe->wcgr1);
1932 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR2], &iwmmxtframe->wcgr2);
1933 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR3], &iwmmxtframe->wcgr3);
1934 return (abi_ulong*)(iwmmxtframe+1);
1937 static void setup_sigframe_v2(struct target_ucontext_v2 *uc,
1938 target_sigset_t *set, CPUARMState *env)
1940 struct target_sigaltstack stack;
1941 int i;
1942 abi_ulong *regspace;
1944 /* Clear all the bits of the ucontext we don't use. */
1945 memset(uc, 0, offsetof(struct target_ucontext_v2, tuc_mcontext));
1947 memset(&stack, 0, sizeof(stack));
1948 __put_user(target_sigaltstack_used.ss_sp, &stack.ss_sp);
1949 __put_user(target_sigaltstack_used.ss_size, &stack.ss_size);
1950 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &stack.ss_flags);
1951 memcpy(&uc->tuc_stack, &stack, sizeof(stack));
1953 setup_sigcontext(&uc->tuc_mcontext, env, set->sig[0]);
1954 /* Save coprocessor signal frame. */
1955 regspace = uc->tuc_regspace;
1956 if (arm_feature(env, ARM_FEATURE_VFP)) {
1957 regspace = setup_sigframe_v2_vfp(regspace, env);
1959 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
1960 regspace = setup_sigframe_v2_iwmmxt(regspace, env);
1963 /* Write terminating magic word */
1964 __put_user(0, regspace);
1966 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
1967 __put_user(set->sig[i], &uc->tuc_sigmask.sig[i]);
1971 /* compare linux/arch/arm/kernel/signal.c:setup_frame() */
1972 static void setup_frame_v1(int usig, struct target_sigaction *ka,
1973 target_sigset_t *set, CPUARMState *regs)
1975 struct sigframe_v1 *frame;
1976 abi_ulong frame_addr = get_sigframe(ka, regs, sizeof(*frame));
1977 int i;
1979 trace_user_setup_frame(regs, frame_addr);
1980 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
1981 goto sigsegv;
1984 setup_sigcontext(&frame->sc, regs, set->sig[0]);
1986 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1987 __put_user(set->sig[i], &frame->extramask[i - 1]);
1990 setup_return(regs, ka, &frame->retcode, frame_addr, usig,
1991 frame_addr + offsetof(struct sigframe_v1, retcode));
1993 unlock_user_struct(frame, frame_addr, 1);
1994 return;
1995 sigsegv:
1996 force_sigsegv(usig);
1999 static void setup_frame_v2(int usig, struct target_sigaction *ka,
2000 target_sigset_t *set, CPUARMState *regs)
2002 struct sigframe_v2 *frame;
2003 abi_ulong frame_addr = get_sigframe(ka, regs, sizeof(*frame));
2005 trace_user_setup_frame(regs, frame_addr);
2006 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
2007 goto sigsegv;
2010 setup_sigframe_v2(&frame->uc, set, regs);
2012 setup_return(regs, ka, &frame->retcode, frame_addr, usig,
2013 frame_addr + offsetof(struct sigframe_v2, retcode));
2015 unlock_user_struct(frame, frame_addr, 1);
2016 return;
2017 sigsegv:
2018 force_sigsegv(usig);
2021 static void setup_frame(int usig, struct target_sigaction *ka,
2022 target_sigset_t *set, CPUARMState *regs)
2024 if (get_osversion() >= 0x020612) {
2025 setup_frame_v2(usig, ka, set, regs);
2026 } else {
2027 setup_frame_v1(usig, ka, set, regs);
2031 /* compare linux/arch/arm/kernel/signal.c:setup_rt_frame() */
2032 static void setup_rt_frame_v1(int usig, struct target_sigaction *ka,
2033 target_siginfo_t *info,
2034 target_sigset_t *set, CPUARMState *env)
2036 struct rt_sigframe_v1 *frame;
2037 abi_ulong frame_addr = get_sigframe(ka, env, sizeof(*frame));
2038 struct target_sigaltstack stack;
2039 int i;
2040 abi_ulong info_addr, uc_addr;
2042 trace_user_setup_rt_frame(env, frame_addr);
2043 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
2044 goto sigsegv;
2047 info_addr = frame_addr + offsetof(struct rt_sigframe_v1, info);
2048 __put_user(info_addr, &frame->pinfo);
2049 uc_addr = frame_addr + offsetof(struct rt_sigframe_v1, uc);
2050 __put_user(uc_addr, &frame->puc);
2051 tswap_siginfo(&frame->info, info);
2053 /* Clear all the bits of the ucontext we don't use. */
2054 memset(&frame->uc, 0, offsetof(struct target_ucontext_v1, tuc_mcontext));
2056 memset(&stack, 0, sizeof(stack));
2057 __put_user(target_sigaltstack_used.ss_sp, &stack.ss_sp);
2058 __put_user(target_sigaltstack_used.ss_size, &stack.ss_size);
2059 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &stack.ss_flags);
2060 memcpy(&frame->uc.tuc_stack, &stack, sizeof(stack));
2062 setup_sigcontext(&frame->uc.tuc_mcontext, env, set->sig[0]);
2063 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
2064 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
2067 setup_return(env, ka, &frame->retcode, frame_addr, usig,
2068 frame_addr + offsetof(struct rt_sigframe_v1, retcode));
2070 env->regs[1] = info_addr;
2071 env->regs[2] = uc_addr;
2073 unlock_user_struct(frame, frame_addr, 1);
2074 return;
2075 sigsegv:
2076 force_sigsegv(usig);
2079 static void setup_rt_frame_v2(int usig, struct target_sigaction *ka,
2080 target_siginfo_t *info,
2081 target_sigset_t *set, CPUARMState *env)
2083 struct rt_sigframe_v2 *frame;
2084 abi_ulong frame_addr = get_sigframe(ka, env, sizeof(*frame));
2085 abi_ulong info_addr, uc_addr;
2087 trace_user_setup_rt_frame(env, frame_addr);
2088 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
2089 goto sigsegv;
2092 info_addr = frame_addr + offsetof(struct rt_sigframe_v2, info);
2093 uc_addr = frame_addr + offsetof(struct rt_sigframe_v2, uc);
2094 tswap_siginfo(&frame->info, info);
2096 setup_sigframe_v2(&frame->uc, set, env);
2098 setup_return(env, ka, &frame->retcode, frame_addr, usig,
2099 frame_addr + offsetof(struct rt_sigframe_v2, retcode));
2101 env->regs[1] = info_addr;
2102 env->regs[2] = uc_addr;
2104 unlock_user_struct(frame, frame_addr, 1);
2105 return;
2106 sigsegv:
2107 force_sigsegv(usig);
2110 static void setup_rt_frame(int usig, struct target_sigaction *ka,
2111 target_siginfo_t *info,
2112 target_sigset_t *set, CPUARMState *env)
2114 if (get_osversion() >= 0x020612) {
2115 setup_rt_frame_v2(usig, ka, info, set, env);
2116 } else {
2117 setup_rt_frame_v1(usig, ka, info, set, env);
2121 static int
2122 restore_sigcontext(CPUARMState *env, struct target_sigcontext *sc)
2124 int err = 0;
2125 uint32_t cpsr;
2127 __get_user(env->regs[0], &sc->arm_r0);
2128 __get_user(env->regs[1], &sc->arm_r1);
2129 __get_user(env->regs[2], &sc->arm_r2);
2130 __get_user(env->regs[3], &sc->arm_r3);
2131 __get_user(env->regs[4], &sc->arm_r4);
2132 __get_user(env->regs[5], &sc->arm_r5);
2133 __get_user(env->regs[6], &sc->arm_r6);
2134 __get_user(env->regs[7], &sc->arm_r7);
2135 __get_user(env->regs[8], &sc->arm_r8);
2136 __get_user(env->regs[9], &sc->arm_r9);
2137 __get_user(env->regs[10], &sc->arm_r10);
2138 __get_user(env->regs[11], &sc->arm_fp);
2139 __get_user(env->regs[12], &sc->arm_ip);
2140 __get_user(env->regs[13], &sc->arm_sp);
2141 __get_user(env->regs[14], &sc->arm_lr);
2142 __get_user(env->regs[15], &sc->arm_pc);
2143 #ifdef TARGET_CONFIG_CPU_32
2144 __get_user(cpsr, &sc->arm_cpsr);
2145 cpsr_write(env, cpsr, CPSR_USER | CPSR_EXEC, CPSRWriteByInstr);
2146 #endif
2148 err |= !valid_user_regs(env);
2150 return err;
2153 static long do_sigreturn_v1(CPUARMState *env)
2155 abi_ulong frame_addr;
2156 struct sigframe_v1 *frame = NULL;
2157 target_sigset_t set;
2158 sigset_t host_set;
2159 int i;
2162 * Since we stacked the signal on a 64-bit boundary,
2163 * then 'sp' should be word aligned here. If it's
2164 * not, then the user is trying to mess with us.
2166 frame_addr = env->regs[13];
2167 trace_user_do_sigreturn(env, frame_addr);
2168 if (frame_addr & 7) {
2169 goto badframe;
2172 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
2173 goto badframe;
2176 __get_user(set.sig[0], &frame->sc.oldmask);
2177 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
2178 __get_user(set.sig[i], &frame->extramask[i - 1]);
2181 target_to_host_sigset_internal(&host_set, &set);
2182 set_sigmask(&host_set);
2184 if (restore_sigcontext(env, &frame->sc)) {
2185 goto badframe;
2188 #if 0
2189 /* Send SIGTRAP if we're single-stepping */
2190 if (ptrace_cancel_bpt(current))
2191 send_sig(SIGTRAP, current, 1);
2192 #endif
2193 unlock_user_struct(frame, frame_addr, 0);
2194 return -TARGET_QEMU_ESIGRETURN;
2196 badframe:
2197 force_sig(TARGET_SIGSEGV);
2198 return -TARGET_QEMU_ESIGRETURN;
2201 static abi_ulong *restore_sigframe_v2_vfp(CPUARMState *env, abi_ulong *regspace)
2203 int i;
2204 abi_ulong magic, sz;
2205 uint32_t fpscr, fpexc;
2206 struct target_vfp_sigframe *vfpframe;
2207 vfpframe = (struct target_vfp_sigframe *)regspace;
2209 __get_user(magic, &vfpframe->magic);
2210 __get_user(sz, &vfpframe->size);
2211 if (magic != TARGET_VFP_MAGIC || sz != sizeof(*vfpframe)) {
2212 return 0;
2214 for (i = 0; i < 32; i++) {
2215 __get_user(*aa32_vfp_dreg(env, i), &vfpframe->ufp.fpregs[i]);
2217 __get_user(fpscr, &vfpframe->ufp.fpscr);
2218 vfp_set_fpscr(env, fpscr);
2219 __get_user(fpexc, &vfpframe->ufp_exc.fpexc);
2220 /* Sanitise FPEXC: ensure VFP is enabled, FPINST2 is invalid
2221 * and the exception flag is cleared
2223 fpexc |= (1 << 30);
2224 fpexc &= ~((1 << 31) | (1 << 28));
2225 env->vfp.xregs[ARM_VFP_FPEXC] = fpexc;
2226 __get_user(env->vfp.xregs[ARM_VFP_FPINST], &vfpframe->ufp_exc.fpinst);
2227 __get_user(env->vfp.xregs[ARM_VFP_FPINST2], &vfpframe->ufp_exc.fpinst2);
2228 return (abi_ulong*)(vfpframe + 1);
2231 static abi_ulong *restore_sigframe_v2_iwmmxt(CPUARMState *env,
2232 abi_ulong *regspace)
2234 int i;
2235 abi_ulong magic, sz;
2236 struct target_iwmmxt_sigframe *iwmmxtframe;
2237 iwmmxtframe = (struct target_iwmmxt_sigframe *)regspace;
2239 __get_user(magic, &iwmmxtframe->magic);
2240 __get_user(sz, &iwmmxtframe->size);
2241 if (magic != TARGET_IWMMXT_MAGIC || sz != sizeof(*iwmmxtframe)) {
2242 return 0;
2244 for (i = 0; i < 16; i++) {
2245 __get_user(env->iwmmxt.regs[i], &iwmmxtframe->regs[i]);
2247 __get_user(env->vfp.xregs[ARM_IWMMXT_wCSSF], &iwmmxtframe->wcssf);
2248 __get_user(env->vfp.xregs[ARM_IWMMXT_wCASF], &iwmmxtframe->wcssf);
2249 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR0], &iwmmxtframe->wcgr0);
2250 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR1], &iwmmxtframe->wcgr1);
2251 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR2], &iwmmxtframe->wcgr2);
2252 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR3], &iwmmxtframe->wcgr3);
2253 return (abi_ulong*)(iwmmxtframe + 1);
2256 static int do_sigframe_return_v2(CPUARMState *env,
2257 target_ulong context_addr,
2258 struct target_ucontext_v2 *uc)
2260 sigset_t host_set;
2261 abi_ulong *regspace;
2263 target_to_host_sigset(&host_set, &uc->tuc_sigmask);
2264 set_sigmask(&host_set);
2266 if (restore_sigcontext(env, &uc->tuc_mcontext))
2267 return 1;
2269 /* Restore coprocessor signal frame */
2270 regspace = uc->tuc_regspace;
2271 if (arm_feature(env, ARM_FEATURE_VFP)) {
2272 regspace = restore_sigframe_v2_vfp(env, regspace);
2273 if (!regspace) {
2274 return 1;
2277 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
2278 regspace = restore_sigframe_v2_iwmmxt(env, regspace);
2279 if (!regspace) {
2280 return 1;
2284 if (do_sigaltstack(context_addr
2285 + offsetof(struct target_ucontext_v2, tuc_stack),
2286 0, get_sp_from_cpustate(env)) == -EFAULT) {
2287 return 1;
2290 #if 0
2291 /* Send SIGTRAP if we're single-stepping */
2292 if (ptrace_cancel_bpt(current))
2293 send_sig(SIGTRAP, current, 1);
2294 #endif
2296 return 0;
2299 static long do_sigreturn_v2(CPUARMState *env)
2301 abi_ulong frame_addr;
2302 struct sigframe_v2 *frame = NULL;
2305 * Since we stacked the signal on a 64-bit boundary,
2306 * then 'sp' should be word aligned here. If it's
2307 * not, then the user is trying to mess with us.
2309 frame_addr = env->regs[13];
2310 trace_user_do_sigreturn(env, frame_addr);
2311 if (frame_addr & 7) {
2312 goto badframe;
2315 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
2316 goto badframe;
2319 if (do_sigframe_return_v2(env,
2320 frame_addr
2321 + offsetof(struct sigframe_v2, uc),
2322 &frame->uc)) {
2323 goto badframe;
2326 unlock_user_struct(frame, frame_addr, 0);
2327 return -TARGET_QEMU_ESIGRETURN;
2329 badframe:
2330 unlock_user_struct(frame, frame_addr, 0);
2331 force_sig(TARGET_SIGSEGV);
2332 return -TARGET_QEMU_ESIGRETURN;
2335 long do_sigreturn(CPUARMState *env)
2337 if (get_osversion() >= 0x020612) {
2338 return do_sigreturn_v2(env);
2339 } else {
2340 return do_sigreturn_v1(env);
2344 static long do_rt_sigreturn_v1(CPUARMState *env)
2346 abi_ulong frame_addr;
2347 struct rt_sigframe_v1 *frame = NULL;
2348 sigset_t host_set;
2351 * Since we stacked the signal on a 64-bit boundary,
2352 * then 'sp' should be word aligned here. If it's
2353 * not, then the user is trying to mess with us.
2355 frame_addr = env->regs[13];
2356 trace_user_do_rt_sigreturn(env, frame_addr);
2357 if (frame_addr & 7) {
2358 goto badframe;
2361 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
2362 goto badframe;
2365 target_to_host_sigset(&host_set, &frame->uc.tuc_sigmask);
2366 set_sigmask(&host_set);
2368 if (restore_sigcontext(env, &frame->uc.tuc_mcontext)) {
2369 goto badframe;
2372 if (do_sigaltstack(frame_addr + offsetof(struct rt_sigframe_v1, uc.tuc_stack), 0, get_sp_from_cpustate(env)) == -EFAULT)
2373 goto badframe;
2375 #if 0
2376 /* Send SIGTRAP if we're single-stepping */
2377 if (ptrace_cancel_bpt(current))
2378 send_sig(SIGTRAP, current, 1);
2379 #endif
2380 unlock_user_struct(frame, frame_addr, 0);
2381 return -TARGET_QEMU_ESIGRETURN;
2383 badframe:
2384 unlock_user_struct(frame, frame_addr, 0);
2385 force_sig(TARGET_SIGSEGV);
2386 return -TARGET_QEMU_ESIGRETURN;
2389 static long do_rt_sigreturn_v2(CPUARMState *env)
2391 abi_ulong frame_addr;
2392 struct rt_sigframe_v2 *frame = NULL;
2395 * Since we stacked the signal on a 64-bit boundary,
2396 * then 'sp' should be word aligned here. If it's
2397 * not, then the user is trying to mess with us.
2399 frame_addr = env->regs[13];
2400 trace_user_do_rt_sigreturn(env, frame_addr);
2401 if (frame_addr & 7) {
2402 goto badframe;
2405 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
2406 goto badframe;
2409 if (do_sigframe_return_v2(env,
2410 frame_addr
2411 + offsetof(struct rt_sigframe_v2, uc),
2412 &frame->uc)) {
2413 goto badframe;
2416 unlock_user_struct(frame, frame_addr, 0);
2417 return -TARGET_QEMU_ESIGRETURN;
2419 badframe:
2420 unlock_user_struct(frame, frame_addr, 0);
2421 force_sig(TARGET_SIGSEGV);
2422 return -TARGET_QEMU_ESIGRETURN;
2425 long do_rt_sigreturn(CPUARMState *env)
2427 if (get_osversion() >= 0x020612) {
2428 return do_rt_sigreturn_v2(env);
2429 } else {
2430 return do_rt_sigreturn_v1(env);
2434 #elif defined(TARGET_SPARC)
2436 #define __SUNOS_MAXWIN 31
2438 /* This is what SunOS does, so shall I. */
2439 struct target_sigcontext {
2440 abi_ulong sigc_onstack; /* state to restore */
2442 abi_ulong sigc_mask; /* sigmask to restore */
2443 abi_ulong sigc_sp; /* stack pointer */
2444 abi_ulong sigc_pc; /* program counter */
2445 abi_ulong sigc_npc; /* next program counter */
2446 abi_ulong sigc_psr; /* for condition codes etc */
2447 abi_ulong sigc_g1; /* User uses these two registers */
2448 abi_ulong sigc_o0; /* within the trampoline code. */
2450 /* Now comes information regarding the users window set
2451 * at the time of the signal.
2453 abi_ulong sigc_oswins; /* outstanding windows */
2455 /* stack ptrs for each regwin buf */
2456 char *sigc_spbuf[__SUNOS_MAXWIN];
2458 /* Windows to restore after signal */
2459 struct {
2460 abi_ulong locals[8];
2461 abi_ulong ins[8];
2462 } sigc_wbuf[__SUNOS_MAXWIN];
2464 /* A Sparc stack frame */
2465 struct sparc_stackf {
2466 abi_ulong locals[8];
2467 abi_ulong ins[8];
2468 /* It's simpler to treat fp and callers_pc as elements of ins[]
2469 * since we never need to access them ourselves.
2471 char *structptr;
2472 abi_ulong xargs[6];
2473 abi_ulong xxargs[1];
2476 typedef struct {
2477 struct {
2478 abi_ulong psr;
2479 abi_ulong pc;
2480 abi_ulong npc;
2481 abi_ulong y;
2482 abi_ulong u_regs[16]; /* globals and ins */
2483 } si_regs;
2484 int si_mask;
2485 } __siginfo_t;
2487 typedef struct {
2488 abi_ulong si_float_regs[32];
2489 unsigned long si_fsr;
2490 unsigned long si_fpqdepth;
2491 struct {
2492 unsigned long *insn_addr;
2493 unsigned long insn;
2494 } si_fpqueue [16];
2495 } qemu_siginfo_fpu_t;
2498 struct target_signal_frame {
2499 struct sparc_stackf ss;
2500 __siginfo_t info;
2501 abi_ulong fpu_save;
2502 abi_ulong insns[2] __attribute__ ((aligned (8)));
2503 abi_ulong extramask[TARGET_NSIG_WORDS - 1];
2504 abi_ulong extra_size; /* Should be 0 */
2505 qemu_siginfo_fpu_t fpu_state;
2507 struct target_rt_signal_frame {
2508 struct sparc_stackf ss;
2509 siginfo_t info;
2510 abi_ulong regs[20];
2511 sigset_t mask;
2512 abi_ulong fpu_save;
2513 unsigned int insns[2];
2514 stack_t stack;
2515 unsigned int extra_size; /* Should be 0 */
2516 qemu_siginfo_fpu_t fpu_state;
2519 #define UREG_O0 16
2520 #define UREG_O6 22
2521 #define UREG_I0 0
2522 #define UREG_I1 1
2523 #define UREG_I2 2
2524 #define UREG_I3 3
2525 #define UREG_I4 4
2526 #define UREG_I5 5
2527 #define UREG_I6 6
2528 #define UREG_I7 7
2529 #define UREG_L0 8
2530 #define UREG_FP UREG_I6
2531 #define UREG_SP UREG_O6
2533 static inline abi_ulong get_sigframe(struct target_sigaction *sa,
2534 CPUSPARCState *env,
2535 unsigned long framesize)
2537 abi_ulong sp;
2539 sp = env->regwptr[UREG_FP];
2541 /* This is the X/Open sanctioned signal stack switching. */
2542 if (sa->sa_flags & TARGET_SA_ONSTACK) {
2543 if (!on_sig_stack(sp)
2544 && !((target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size) & 7)) {
2545 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
2548 return sp - framesize;
2551 static int
2552 setup___siginfo(__siginfo_t *si, CPUSPARCState *env, abi_ulong mask)
2554 int err = 0, i;
2556 __put_user(env->psr, &si->si_regs.psr);
2557 __put_user(env->pc, &si->si_regs.pc);
2558 __put_user(env->npc, &si->si_regs.npc);
2559 __put_user(env->y, &si->si_regs.y);
2560 for (i=0; i < 8; i++) {
2561 __put_user(env->gregs[i], &si->si_regs.u_regs[i]);
2563 for (i=0; i < 8; i++) {
2564 __put_user(env->regwptr[UREG_I0 + i], &si->si_regs.u_regs[i+8]);
2566 __put_user(mask, &si->si_mask);
2567 return err;
2570 #if 0
2571 static int
2572 setup_sigcontext(struct target_sigcontext *sc, /*struct _fpstate *fpstate,*/
2573 CPUSPARCState *env, unsigned long mask)
2575 int err = 0;
2577 __put_user(mask, &sc->sigc_mask);
2578 __put_user(env->regwptr[UREG_SP], &sc->sigc_sp);
2579 __put_user(env->pc, &sc->sigc_pc);
2580 __put_user(env->npc, &sc->sigc_npc);
2581 __put_user(env->psr, &sc->sigc_psr);
2582 __put_user(env->gregs[1], &sc->sigc_g1);
2583 __put_user(env->regwptr[UREG_O0], &sc->sigc_o0);
2585 return err;
2587 #endif
2588 #define NF_ALIGNEDSZ (((sizeof(struct target_signal_frame) + 7) & (~7)))
2590 static void setup_frame(int sig, struct target_sigaction *ka,
2591 target_sigset_t *set, CPUSPARCState *env)
2593 abi_ulong sf_addr;
2594 struct target_signal_frame *sf;
2595 int sigframe_size, err, i;
2597 /* 1. Make sure everything is clean */
2598 //synchronize_user_stack();
2600 sigframe_size = NF_ALIGNEDSZ;
2601 sf_addr = get_sigframe(ka, env, sigframe_size);
2602 trace_user_setup_frame(env, sf_addr);
2604 sf = lock_user(VERIFY_WRITE, sf_addr,
2605 sizeof(struct target_signal_frame), 0);
2606 if (!sf) {
2607 goto sigsegv;
2609 #if 0
2610 if (invalid_frame_pointer(sf, sigframe_size))
2611 goto sigill_and_return;
2612 #endif
2613 /* 2. Save the current process state */
2614 err = setup___siginfo(&sf->info, env, set->sig[0]);
2615 __put_user(0, &sf->extra_size);
2617 //save_fpu_state(regs, &sf->fpu_state);
2618 //__put_user(&sf->fpu_state, &sf->fpu_save);
2620 __put_user(set->sig[0], &sf->info.si_mask);
2621 for (i = 0; i < TARGET_NSIG_WORDS - 1; i++) {
2622 __put_user(set->sig[i + 1], &sf->extramask[i]);
2625 for (i = 0; i < 8; i++) {
2626 __put_user(env->regwptr[i + UREG_L0], &sf->ss.locals[i]);
2628 for (i = 0; i < 8; i++) {
2629 __put_user(env->regwptr[i + UREG_I0], &sf->ss.ins[i]);
2631 if (err)
2632 goto sigsegv;
2634 /* 3. signal handler back-trampoline and parameters */
2635 env->regwptr[UREG_FP] = sf_addr;
2636 env->regwptr[UREG_I0] = sig;
2637 env->regwptr[UREG_I1] = sf_addr +
2638 offsetof(struct target_signal_frame, info);
2639 env->regwptr[UREG_I2] = sf_addr +
2640 offsetof(struct target_signal_frame, info);
2642 /* 4. signal handler */
2643 env->pc = ka->_sa_handler;
2644 env->npc = (env->pc + 4);
2645 /* 5. return to kernel instructions */
2646 if (ka->sa_restorer) {
2647 env->regwptr[UREG_I7] = ka->sa_restorer;
2648 } else {
2649 uint32_t val32;
2651 env->regwptr[UREG_I7] = sf_addr +
2652 offsetof(struct target_signal_frame, insns) - 2 * 4;
2654 /* mov __NR_sigreturn, %g1 */
2655 val32 = 0x821020d8;
2656 __put_user(val32, &sf->insns[0]);
2658 /* t 0x10 */
2659 val32 = 0x91d02010;
2660 __put_user(val32, &sf->insns[1]);
2661 if (err)
2662 goto sigsegv;
2664 /* Flush instruction space. */
2665 // flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0]));
2666 // tb_flush(env);
2668 unlock_user(sf, sf_addr, sizeof(struct target_signal_frame));
2669 return;
2670 #if 0
2671 sigill_and_return:
2672 force_sig(TARGET_SIGILL);
2673 #endif
2674 sigsegv:
2675 unlock_user(sf, sf_addr, sizeof(struct target_signal_frame));
2676 force_sigsegv(sig);
2679 static void setup_rt_frame(int sig, struct target_sigaction *ka,
2680 target_siginfo_t *info,
2681 target_sigset_t *set, CPUSPARCState *env)
2683 fprintf(stderr, "setup_rt_frame: not implemented\n");
2686 long do_sigreturn(CPUSPARCState *env)
2688 abi_ulong sf_addr;
2689 struct target_signal_frame *sf;
2690 uint32_t up_psr, pc, npc;
2691 target_sigset_t set;
2692 sigset_t host_set;
2693 int err=0, i;
2695 sf_addr = env->regwptr[UREG_FP];
2696 trace_user_do_sigreturn(env, sf_addr);
2697 if (!lock_user_struct(VERIFY_READ, sf, sf_addr, 1)) {
2698 goto segv_and_exit;
2701 /* 1. Make sure we are not getting garbage from the user */
2703 if (sf_addr & 3)
2704 goto segv_and_exit;
2706 __get_user(pc, &sf->info.si_regs.pc);
2707 __get_user(npc, &sf->info.si_regs.npc);
2709 if ((pc | npc) & 3) {
2710 goto segv_and_exit;
2713 /* 2. Restore the state */
2714 __get_user(up_psr, &sf->info.si_regs.psr);
2716 /* User can only change condition codes and FPU enabling in %psr. */
2717 env->psr = (up_psr & (PSR_ICC /* | PSR_EF */))
2718 | (env->psr & ~(PSR_ICC /* | PSR_EF */));
2720 env->pc = pc;
2721 env->npc = npc;
2722 __get_user(env->y, &sf->info.si_regs.y);
2723 for (i=0; i < 8; i++) {
2724 __get_user(env->gregs[i], &sf->info.si_regs.u_regs[i]);
2726 for (i=0; i < 8; i++) {
2727 __get_user(env->regwptr[i + UREG_I0], &sf->info.si_regs.u_regs[i+8]);
2730 /* FIXME: implement FPU save/restore:
2731 * __get_user(fpu_save, &sf->fpu_save);
2732 * if (fpu_save)
2733 * err |= restore_fpu_state(env, fpu_save);
2736 /* This is pretty much atomic, no amount locking would prevent
2737 * the races which exist anyways.
2739 __get_user(set.sig[0], &sf->info.si_mask);
2740 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
2741 __get_user(set.sig[i], &sf->extramask[i - 1]);
2744 target_to_host_sigset_internal(&host_set, &set);
2745 set_sigmask(&host_set);
2747 if (err) {
2748 goto segv_and_exit;
2750 unlock_user_struct(sf, sf_addr, 0);
2751 return -TARGET_QEMU_ESIGRETURN;
2753 segv_and_exit:
2754 unlock_user_struct(sf, sf_addr, 0);
2755 force_sig(TARGET_SIGSEGV);
2756 return -TARGET_QEMU_ESIGRETURN;
2759 long do_rt_sigreturn(CPUSPARCState *env)
2761 trace_user_do_rt_sigreturn(env, 0);
2762 fprintf(stderr, "do_rt_sigreturn: not implemented\n");
2763 return -TARGET_ENOSYS;
2766 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
2767 #define SPARC_MC_TSTATE 0
2768 #define SPARC_MC_PC 1
2769 #define SPARC_MC_NPC 2
2770 #define SPARC_MC_Y 3
2771 #define SPARC_MC_G1 4
2772 #define SPARC_MC_G2 5
2773 #define SPARC_MC_G3 6
2774 #define SPARC_MC_G4 7
2775 #define SPARC_MC_G5 8
2776 #define SPARC_MC_G6 9
2777 #define SPARC_MC_G7 10
2778 #define SPARC_MC_O0 11
2779 #define SPARC_MC_O1 12
2780 #define SPARC_MC_O2 13
2781 #define SPARC_MC_O3 14
2782 #define SPARC_MC_O4 15
2783 #define SPARC_MC_O5 16
2784 #define SPARC_MC_O6 17
2785 #define SPARC_MC_O7 18
2786 #define SPARC_MC_NGREG 19
2788 typedef abi_ulong target_mc_greg_t;
2789 typedef target_mc_greg_t target_mc_gregset_t[SPARC_MC_NGREG];
2791 struct target_mc_fq {
2792 abi_ulong *mcfq_addr;
2793 uint32_t mcfq_insn;
2796 struct target_mc_fpu {
2797 union {
2798 uint32_t sregs[32];
2799 uint64_t dregs[32];
2800 //uint128_t qregs[16];
2801 } mcfpu_fregs;
2802 abi_ulong mcfpu_fsr;
2803 abi_ulong mcfpu_fprs;
2804 abi_ulong mcfpu_gsr;
2805 struct target_mc_fq *mcfpu_fq;
2806 unsigned char mcfpu_qcnt;
2807 unsigned char mcfpu_qentsz;
2808 unsigned char mcfpu_enab;
2810 typedef struct target_mc_fpu target_mc_fpu_t;
2812 typedef struct {
2813 target_mc_gregset_t mc_gregs;
2814 target_mc_greg_t mc_fp;
2815 target_mc_greg_t mc_i7;
2816 target_mc_fpu_t mc_fpregs;
2817 } target_mcontext_t;
2819 struct target_ucontext {
2820 struct target_ucontext *tuc_link;
2821 abi_ulong tuc_flags;
2822 target_sigset_t tuc_sigmask;
2823 target_mcontext_t tuc_mcontext;
2826 /* A V9 register window */
2827 struct target_reg_window {
2828 abi_ulong locals[8];
2829 abi_ulong ins[8];
2832 #define TARGET_STACK_BIAS 2047
2834 /* {set, get}context() needed for 64-bit SparcLinux userland. */
2835 void sparc64_set_context(CPUSPARCState *env)
2837 abi_ulong ucp_addr;
2838 struct target_ucontext *ucp;
2839 target_mc_gregset_t *grp;
2840 abi_ulong pc, npc, tstate;
2841 abi_ulong fp, i7, w_addr;
2842 unsigned int i;
2844 ucp_addr = env->regwptr[UREG_I0];
2845 if (!lock_user_struct(VERIFY_READ, ucp, ucp_addr, 1)) {
2846 goto do_sigsegv;
2848 grp = &ucp->tuc_mcontext.mc_gregs;
2849 __get_user(pc, &((*grp)[SPARC_MC_PC]));
2850 __get_user(npc, &((*grp)[SPARC_MC_NPC]));
2851 if ((pc | npc) & 3) {
2852 goto do_sigsegv;
2854 if (env->regwptr[UREG_I1]) {
2855 target_sigset_t target_set;
2856 sigset_t set;
2858 if (TARGET_NSIG_WORDS == 1) {
2859 __get_user(target_set.sig[0], &ucp->tuc_sigmask.sig[0]);
2860 } else {
2861 abi_ulong *src, *dst;
2862 src = ucp->tuc_sigmask.sig;
2863 dst = target_set.sig;
2864 for (i = 0; i < TARGET_NSIG_WORDS; i++, dst++, src++) {
2865 __get_user(*dst, src);
2868 target_to_host_sigset_internal(&set, &target_set);
2869 set_sigmask(&set);
2871 env->pc = pc;
2872 env->npc = npc;
2873 __get_user(env->y, &((*grp)[SPARC_MC_Y]));
2874 __get_user(tstate, &((*grp)[SPARC_MC_TSTATE]));
2875 env->asi = (tstate >> 24) & 0xff;
2876 cpu_put_ccr(env, tstate >> 32);
2877 cpu_put_cwp64(env, tstate & 0x1f);
2878 __get_user(env->gregs[1], (&(*grp)[SPARC_MC_G1]));
2879 __get_user(env->gregs[2], (&(*grp)[SPARC_MC_G2]));
2880 __get_user(env->gregs[3], (&(*grp)[SPARC_MC_G3]));
2881 __get_user(env->gregs[4], (&(*grp)[SPARC_MC_G4]));
2882 __get_user(env->gregs[5], (&(*grp)[SPARC_MC_G5]));
2883 __get_user(env->gregs[6], (&(*grp)[SPARC_MC_G6]));
2884 __get_user(env->gregs[7], (&(*grp)[SPARC_MC_G7]));
2885 __get_user(env->regwptr[UREG_I0], (&(*grp)[SPARC_MC_O0]));
2886 __get_user(env->regwptr[UREG_I1], (&(*grp)[SPARC_MC_O1]));
2887 __get_user(env->regwptr[UREG_I2], (&(*grp)[SPARC_MC_O2]));
2888 __get_user(env->regwptr[UREG_I3], (&(*grp)[SPARC_MC_O3]));
2889 __get_user(env->regwptr[UREG_I4], (&(*grp)[SPARC_MC_O4]));
2890 __get_user(env->regwptr[UREG_I5], (&(*grp)[SPARC_MC_O5]));
2891 __get_user(env->regwptr[UREG_I6], (&(*grp)[SPARC_MC_O6]));
2892 __get_user(env->regwptr[UREG_I7], (&(*grp)[SPARC_MC_O7]));
2894 __get_user(fp, &(ucp->tuc_mcontext.mc_fp));
2895 __get_user(i7, &(ucp->tuc_mcontext.mc_i7));
2897 w_addr = TARGET_STACK_BIAS+env->regwptr[UREG_I6];
2898 if (put_user(fp, w_addr + offsetof(struct target_reg_window, ins[6]),
2899 abi_ulong) != 0) {
2900 goto do_sigsegv;
2902 if (put_user(i7, w_addr + offsetof(struct target_reg_window, ins[7]),
2903 abi_ulong) != 0) {
2904 goto do_sigsegv;
2906 /* FIXME this does not match how the kernel handles the FPU in
2907 * its sparc64_set_context implementation. In particular the FPU
2908 * is only restored if fenab is non-zero in:
2909 * __get_user(fenab, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_enab));
2911 __get_user(env->fprs, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fprs));
2913 uint32_t *src = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs;
2914 for (i = 0; i < 64; i++, src++) {
2915 if (i & 1) {
2916 __get_user(env->fpr[i/2].l.lower, src);
2917 } else {
2918 __get_user(env->fpr[i/2].l.upper, src);
2922 __get_user(env->fsr,
2923 &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fsr));
2924 __get_user(env->gsr,
2925 &(ucp->tuc_mcontext.mc_fpregs.mcfpu_gsr));
2926 unlock_user_struct(ucp, ucp_addr, 0);
2927 return;
2928 do_sigsegv:
2929 unlock_user_struct(ucp, ucp_addr, 0);
2930 force_sig(TARGET_SIGSEGV);
2933 void sparc64_get_context(CPUSPARCState *env)
2935 abi_ulong ucp_addr;
2936 struct target_ucontext *ucp;
2937 target_mc_gregset_t *grp;
2938 target_mcontext_t *mcp;
2939 abi_ulong fp, i7, w_addr;
2940 int err;
2941 unsigned int i;
2942 target_sigset_t target_set;
2943 sigset_t set;
2945 ucp_addr = env->regwptr[UREG_I0];
2946 if (!lock_user_struct(VERIFY_WRITE, ucp, ucp_addr, 0)) {
2947 goto do_sigsegv;
2950 mcp = &ucp->tuc_mcontext;
2951 grp = &mcp->mc_gregs;
2953 /* Skip over the trap instruction, first. */
2954 env->pc = env->npc;
2955 env->npc += 4;
2957 /* If we're only reading the signal mask then do_sigprocmask()
2958 * is guaranteed not to fail, which is important because we don't
2959 * have any way to signal a failure or restart this operation since
2960 * this is not a normal syscall.
2962 err = do_sigprocmask(0, NULL, &set);
2963 assert(err == 0);
2964 host_to_target_sigset_internal(&target_set, &set);
2965 if (TARGET_NSIG_WORDS == 1) {
2966 __put_user(target_set.sig[0],
2967 (abi_ulong *)&ucp->tuc_sigmask);
2968 } else {
2969 abi_ulong *src, *dst;
2970 src = target_set.sig;
2971 dst = ucp->tuc_sigmask.sig;
2972 for (i = 0; i < TARGET_NSIG_WORDS; i++, dst++, src++) {
2973 __put_user(*src, dst);
2975 if (err)
2976 goto do_sigsegv;
2979 /* XXX: tstate must be saved properly */
2980 // __put_user(env->tstate, &((*grp)[SPARC_MC_TSTATE]));
2981 __put_user(env->pc, &((*grp)[SPARC_MC_PC]));
2982 __put_user(env->npc, &((*grp)[SPARC_MC_NPC]));
2983 __put_user(env->y, &((*grp)[SPARC_MC_Y]));
2984 __put_user(env->gregs[1], &((*grp)[SPARC_MC_G1]));
2985 __put_user(env->gregs[2], &((*grp)[SPARC_MC_G2]));
2986 __put_user(env->gregs[3], &((*grp)[SPARC_MC_G3]));
2987 __put_user(env->gregs[4], &((*grp)[SPARC_MC_G4]));
2988 __put_user(env->gregs[5], &((*grp)[SPARC_MC_G5]));
2989 __put_user(env->gregs[6], &((*grp)[SPARC_MC_G6]));
2990 __put_user(env->gregs[7], &((*grp)[SPARC_MC_G7]));
2991 __put_user(env->regwptr[UREG_I0], &((*grp)[SPARC_MC_O0]));
2992 __put_user(env->regwptr[UREG_I1], &((*grp)[SPARC_MC_O1]));
2993 __put_user(env->regwptr[UREG_I2], &((*grp)[SPARC_MC_O2]));
2994 __put_user(env->regwptr[UREG_I3], &((*grp)[SPARC_MC_O3]));
2995 __put_user(env->regwptr[UREG_I4], &((*grp)[SPARC_MC_O4]));
2996 __put_user(env->regwptr[UREG_I5], &((*grp)[SPARC_MC_O5]));
2997 __put_user(env->regwptr[UREG_I6], &((*grp)[SPARC_MC_O6]));
2998 __put_user(env->regwptr[UREG_I7], &((*grp)[SPARC_MC_O7]));
3000 w_addr = TARGET_STACK_BIAS+env->regwptr[UREG_I6];
3001 fp = i7 = 0;
3002 if (get_user(fp, w_addr + offsetof(struct target_reg_window, ins[6]),
3003 abi_ulong) != 0) {
3004 goto do_sigsegv;
3006 if (get_user(i7, w_addr + offsetof(struct target_reg_window, ins[7]),
3007 abi_ulong) != 0) {
3008 goto do_sigsegv;
3010 __put_user(fp, &(mcp->mc_fp));
3011 __put_user(i7, &(mcp->mc_i7));
3014 uint32_t *dst = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs;
3015 for (i = 0; i < 64; i++, dst++) {
3016 if (i & 1) {
3017 __put_user(env->fpr[i/2].l.lower, dst);
3018 } else {
3019 __put_user(env->fpr[i/2].l.upper, dst);
3023 __put_user(env->fsr, &(mcp->mc_fpregs.mcfpu_fsr));
3024 __put_user(env->gsr, &(mcp->mc_fpregs.mcfpu_gsr));
3025 __put_user(env->fprs, &(mcp->mc_fpregs.mcfpu_fprs));
3027 if (err)
3028 goto do_sigsegv;
3029 unlock_user_struct(ucp, ucp_addr, 1);
3030 return;
3031 do_sigsegv:
3032 unlock_user_struct(ucp, ucp_addr, 1);
3033 force_sig(TARGET_SIGSEGV);
3035 #endif
3036 #elif defined(TARGET_MIPS) || defined(TARGET_MIPS64)
3038 # if defined(TARGET_ABI_MIPSO32)
3039 struct target_sigcontext {
3040 uint32_t sc_regmask; /* Unused */
3041 uint32_t sc_status;
3042 uint64_t sc_pc;
3043 uint64_t sc_regs[32];
3044 uint64_t sc_fpregs[32];
3045 uint32_t sc_ownedfp; /* Unused */
3046 uint32_t sc_fpc_csr;
3047 uint32_t sc_fpc_eir; /* Unused */
3048 uint32_t sc_used_math;
3049 uint32_t sc_dsp; /* dsp status, was sc_ssflags */
3050 uint32_t pad0;
3051 uint64_t sc_mdhi;
3052 uint64_t sc_mdlo;
3053 target_ulong sc_hi1; /* Was sc_cause */
3054 target_ulong sc_lo1; /* Was sc_badvaddr */
3055 target_ulong sc_hi2; /* Was sc_sigset[4] */
3056 target_ulong sc_lo2;
3057 target_ulong sc_hi3;
3058 target_ulong sc_lo3;
3060 # else /* N32 || N64 */
3061 struct target_sigcontext {
3062 uint64_t sc_regs[32];
3063 uint64_t sc_fpregs[32];
3064 uint64_t sc_mdhi;
3065 uint64_t sc_hi1;
3066 uint64_t sc_hi2;
3067 uint64_t sc_hi3;
3068 uint64_t sc_mdlo;
3069 uint64_t sc_lo1;
3070 uint64_t sc_lo2;
3071 uint64_t sc_lo3;
3072 uint64_t sc_pc;
3073 uint32_t sc_fpc_csr;
3074 uint32_t sc_used_math;
3075 uint32_t sc_dsp;
3076 uint32_t sc_reserved;
3078 # endif /* O32 */
3080 struct sigframe {
3081 uint32_t sf_ass[4]; /* argument save space for o32 */
3082 uint32_t sf_code[2]; /* signal trampoline */
3083 struct target_sigcontext sf_sc;
3084 target_sigset_t sf_mask;
3087 struct target_ucontext {
3088 target_ulong tuc_flags;
3089 target_ulong tuc_link;
3090 target_stack_t tuc_stack;
3091 target_ulong pad0;
3092 struct target_sigcontext tuc_mcontext;
3093 target_sigset_t tuc_sigmask;
3096 struct target_rt_sigframe {
3097 uint32_t rs_ass[4]; /* argument save space for o32 */
3098 uint32_t rs_code[2]; /* signal trampoline */
3099 struct target_siginfo rs_info;
3100 struct target_ucontext rs_uc;
3103 /* Install trampoline to jump back from signal handler */
3104 static inline int install_sigtramp(unsigned int *tramp, unsigned int syscall)
3106 int err = 0;
3109 * Set up the return code ...
3111 * li v0, __NR__foo_sigreturn
3112 * syscall
3115 __put_user(0x24020000 + syscall, tramp + 0);
3116 __put_user(0x0000000c , tramp + 1);
3117 return err;
3120 static inline void setup_sigcontext(CPUMIPSState *regs,
3121 struct target_sigcontext *sc)
3123 int i;
3125 __put_user(exception_resume_pc(regs), &sc->sc_pc);
3126 regs->hflags &= ~MIPS_HFLAG_BMASK;
3128 __put_user(0, &sc->sc_regs[0]);
3129 for (i = 1; i < 32; ++i) {
3130 __put_user(regs->active_tc.gpr[i], &sc->sc_regs[i]);
3133 __put_user(regs->active_tc.HI[0], &sc->sc_mdhi);
3134 __put_user(regs->active_tc.LO[0], &sc->sc_mdlo);
3136 /* Rather than checking for dsp existence, always copy. The storage
3137 would just be garbage otherwise. */
3138 __put_user(regs->active_tc.HI[1], &sc->sc_hi1);
3139 __put_user(regs->active_tc.HI[2], &sc->sc_hi2);
3140 __put_user(regs->active_tc.HI[3], &sc->sc_hi3);
3141 __put_user(regs->active_tc.LO[1], &sc->sc_lo1);
3142 __put_user(regs->active_tc.LO[2], &sc->sc_lo2);
3143 __put_user(regs->active_tc.LO[3], &sc->sc_lo3);
3145 uint32_t dsp = cpu_rddsp(0x3ff, regs);
3146 __put_user(dsp, &sc->sc_dsp);
3149 __put_user(1, &sc->sc_used_math);
3151 for (i = 0; i < 32; ++i) {
3152 __put_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i]);
3156 static inline void
3157 restore_sigcontext(CPUMIPSState *regs, struct target_sigcontext *sc)
3159 int i;
3161 __get_user(regs->CP0_EPC, &sc->sc_pc);
3163 __get_user(regs->active_tc.HI[0], &sc->sc_mdhi);
3164 __get_user(regs->active_tc.LO[0], &sc->sc_mdlo);
3166 for (i = 1; i < 32; ++i) {
3167 __get_user(regs->active_tc.gpr[i], &sc->sc_regs[i]);
3170 __get_user(regs->active_tc.HI[1], &sc->sc_hi1);
3171 __get_user(regs->active_tc.HI[2], &sc->sc_hi2);
3172 __get_user(regs->active_tc.HI[3], &sc->sc_hi3);
3173 __get_user(regs->active_tc.LO[1], &sc->sc_lo1);
3174 __get_user(regs->active_tc.LO[2], &sc->sc_lo2);
3175 __get_user(regs->active_tc.LO[3], &sc->sc_lo3);
3177 uint32_t dsp;
3178 __get_user(dsp, &sc->sc_dsp);
3179 cpu_wrdsp(dsp, 0x3ff, regs);
3182 for (i = 0; i < 32; ++i) {
3183 __get_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i]);
3188 * Determine which stack to use..
3190 static inline abi_ulong
3191 get_sigframe(struct target_sigaction *ka, CPUMIPSState *regs, size_t frame_size)
3193 unsigned long sp;
3195 /* Default to using normal stack */
3196 sp = regs->active_tc.gpr[29];
3199 * FPU emulator may have its own trampoline active just
3200 * above the user stack, 16-bytes before the next lowest
3201 * 16 byte boundary. Try to avoid trashing it.
3203 sp -= 32;
3205 /* This is the X/Open sanctioned signal stack switching. */
3206 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags (sp) == 0)) {
3207 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
3210 return (sp - frame_size) & ~7;
3213 static void mips_set_hflags_isa_mode_from_pc(CPUMIPSState *env)
3215 if (env->insn_flags & (ASE_MIPS16 | ASE_MICROMIPS)) {
3216 env->hflags &= ~MIPS_HFLAG_M16;
3217 env->hflags |= (env->active_tc.PC & 1) << MIPS_HFLAG_M16_SHIFT;
3218 env->active_tc.PC &= ~(target_ulong) 1;
3222 # if defined(TARGET_ABI_MIPSO32)
3223 /* compare linux/arch/mips/kernel/signal.c:setup_frame() */
3224 static void setup_frame(int sig, struct target_sigaction * ka,
3225 target_sigset_t *set, CPUMIPSState *regs)
3227 struct sigframe *frame;
3228 abi_ulong frame_addr;
3229 int i;
3231 frame_addr = get_sigframe(ka, regs, sizeof(*frame));
3232 trace_user_setup_frame(regs, frame_addr);
3233 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3234 goto give_sigsegv;
3237 install_sigtramp(frame->sf_code, TARGET_NR_sigreturn);
3239 setup_sigcontext(regs, &frame->sf_sc);
3241 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
3242 __put_user(set->sig[i], &frame->sf_mask.sig[i]);
3246 * Arguments to signal handler:
3248 * a0 = signal number
3249 * a1 = 0 (should be cause)
3250 * a2 = pointer to struct sigcontext
3252 * $25 and PC point to the signal handler, $29 points to the
3253 * struct sigframe.
3255 regs->active_tc.gpr[ 4] = sig;
3256 regs->active_tc.gpr[ 5] = 0;
3257 regs->active_tc.gpr[ 6] = frame_addr + offsetof(struct sigframe, sf_sc);
3258 regs->active_tc.gpr[29] = frame_addr;
3259 regs->active_tc.gpr[31] = frame_addr + offsetof(struct sigframe, sf_code);
3260 /* The original kernel code sets CP0_EPC to the handler
3261 * since it returns to userland using eret
3262 * we cannot do this here, and we must set PC directly */
3263 regs->active_tc.PC = regs->active_tc.gpr[25] = ka->_sa_handler;
3264 mips_set_hflags_isa_mode_from_pc(regs);
3265 unlock_user_struct(frame, frame_addr, 1);
3266 return;
3268 give_sigsegv:
3269 force_sigsegv(sig);
3272 long do_sigreturn(CPUMIPSState *regs)
3274 struct sigframe *frame;
3275 abi_ulong frame_addr;
3276 sigset_t blocked;
3277 target_sigset_t target_set;
3278 int i;
3280 frame_addr = regs->active_tc.gpr[29];
3281 trace_user_do_sigreturn(regs, frame_addr);
3282 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
3283 goto badframe;
3285 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
3286 __get_user(target_set.sig[i], &frame->sf_mask.sig[i]);
3289 target_to_host_sigset_internal(&blocked, &target_set);
3290 set_sigmask(&blocked);
3292 restore_sigcontext(regs, &frame->sf_sc);
3294 #if 0
3296 * Don't let your children do this ...
3298 __asm__ __volatile__(
3299 "move\t$29, %0\n\t"
3300 "j\tsyscall_exit"
3301 :/* no outputs */
3302 :"r" (&regs));
3303 /* Unreached */
3304 #endif
3306 regs->active_tc.PC = regs->CP0_EPC;
3307 mips_set_hflags_isa_mode_from_pc(regs);
3308 /* I am not sure this is right, but it seems to work
3309 * maybe a problem with nested signals ? */
3310 regs->CP0_EPC = 0;
3311 return -TARGET_QEMU_ESIGRETURN;
3313 badframe:
3314 force_sig(TARGET_SIGSEGV);
3315 return -TARGET_QEMU_ESIGRETURN;
3317 # endif /* O32 */
3319 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3320 target_siginfo_t *info,
3321 target_sigset_t *set, CPUMIPSState *env)
3323 struct target_rt_sigframe *frame;
3324 abi_ulong frame_addr;
3325 int i;
3327 frame_addr = get_sigframe(ka, env, sizeof(*frame));
3328 trace_user_setup_rt_frame(env, frame_addr);
3329 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3330 goto give_sigsegv;
3333 install_sigtramp(frame->rs_code, TARGET_NR_rt_sigreturn);
3335 tswap_siginfo(&frame->rs_info, info);
3337 __put_user(0, &frame->rs_uc.tuc_flags);
3338 __put_user(0, &frame->rs_uc.tuc_link);
3339 __put_user(target_sigaltstack_used.ss_sp, &frame->rs_uc.tuc_stack.ss_sp);
3340 __put_user(target_sigaltstack_used.ss_size, &frame->rs_uc.tuc_stack.ss_size);
3341 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
3342 &frame->rs_uc.tuc_stack.ss_flags);
3344 setup_sigcontext(env, &frame->rs_uc.tuc_mcontext);
3346 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
3347 __put_user(set->sig[i], &frame->rs_uc.tuc_sigmask.sig[i]);
3351 * Arguments to signal handler:
3353 * a0 = signal number
3354 * a1 = pointer to siginfo_t
3355 * a2 = pointer to ucontext_t
3357 * $25 and PC point to the signal handler, $29 points to the
3358 * struct sigframe.
3360 env->active_tc.gpr[ 4] = sig;
3361 env->active_tc.gpr[ 5] = frame_addr
3362 + offsetof(struct target_rt_sigframe, rs_info);
3363 env->active_tc.gpr[ 6] = frame_addr
3364 + offsetof(struct target_rt_sigframe, rs_uc);
3365 env->active_tc.gpr[29] = frame_addr;
3366 env->active_tc.gpr[31] = frame_addr
3367 + offsetof(struct target_rt_sigframe, rs_code);
3368 /* The original kernel code sets CP0_EPC to the handler
3369 * since it returns to userland using eret
3370 * we cannot do this here, and we must set PC directly */
3371 env->active_tc.PC = env->active_tc.gpr[25] = ka->_sa_handler;
3372 mips_set_hflags_isa_mode_from_pc(env);
3373 unlock_user_struct(frame, frame_addr, 1);
3374 return;
3376 give_sigsegv:
3377 unlock_user_struct(frame, frame_addr, 1);
3378 force_sigsegv(sig);
3381 long do_rt_sigreturn(CPUMIPSState *env)
3383 struct target_rt_sigframe *frame;
3384 abi_ulong frame_addr;
3385 sigset_t blocked;
3387 frame_addr = env->active_tc.gpr[29];
3388 trace_user_do_rt_sigreturn(env, frame_addr);
3389 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
3390 goto badframe;
3393 target_to_host_sigset(&blocked, &frame->rs_uc.tuc_sigmask);
3394 set_sigmask(&blocked);
3396 restore_sigcontext(env, &frame->rs_uc.tuc_mcontext);
3398 if (do_sigaltstack(frame_addr +
3399 offsetof(struct target_rt_sigframe, rs_uc.tuc_stack),
3400 0, get_sp_from_cpustate(env)) == -EFAULT)
3401 goto badframe;
3403 env->active_tc.PC = env->CP0_EPC;
3404 mips_set_hflags_isa_mode_from_pc(env);
3405 /* I am not sure this is right, but it seems to work
3406 * maybe a problem with nested signals ? */
3407 env->CP0_EPC = 0;
3408 return -TARGET_QEMU_ESIGRETURN;
3410 badframe:
3411 force_sig(TARGET_SIGSEGV);
3412 return -TARGET_QEMU_ESIGRETURN;
3415 #elif defined(TARGET_SH4)
3418 * code and data structures from linux kernel:
3419 * include/asm-sh/sigcontext.h
3420 * arch/sh/kernel/signal.c
3423 struct target_sigcontext {
3424 target_ulong oldmask;
3426 /* CPU registers */
3427 target_ulong sc_gregs[16];
3428 target_ulong sc_pc;
3429 target_ulong sc_pr;
3430 target_ulong sc_sr;
3431 target_ulong sc_gbr;
3432 target_ulong sc_mach;
3433 target_ulong sc_macl;
3435 /* FPU registers */
3436 target_ulong sc_fpregs[16];
3437 target_ulong sc_xfpregs[16];
3438 unsigned int sc_fpscr;
3439 unsigned int sc_fpul;
3440 unsigned int sc_ownedfp;
3443 struct target_sigframe
3445 struct target_sigcontext sc;
3446 target_ulong extramask[TARGET_NSIG_WORDS-1];
3447 uint16_t retcode[3];
3451 struct target_ucontext {
3452 target_ulong tuc_flags;
3453 struct target_ucontext *tuc_link;
3454 target_stack_t tuc_stack;
3455 struct target_sigcontext tuc_mcontext;
3456 target_sigset_t tuc_sigmask; /* mask last for extensibility */
3459 struct target_rt_sigframe
3461 struct target_siginfo info;
3462 struct target_ucontext uc;
3463 uint16_t retcode[3];
3467 #define MOVW(n) (0x9300|((n)-2)) /* Move mem word at PC+n to R3 */
3468 #define TRAP_NOARG 0xc310 /* Syscall w/no args (NR in R3) SH3/4 */
3470 static abi_ulong get_sigframe(struct target_sigaction *ka,
3471 unsigned long sp, size_t frame_size)
3473 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags(sp) == 0)) {
3474 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
3477 return (sp - frame_size) & -8ul;
3480 /* Notice when we're in the middle of a gUSA region and reset.
3481 Note that this will only occur for !parallel_cpus, as we will
3482 translate such sequences differently in a parallel context. */
3483 static void unwind_gusa(CPUSH4State *regs)
3485 /* If the stack pointer is sufficiently negative, and we haven't
3486 completed the sequence, then reset to the entry to the region. */
3487 /* ??? The SH4 kernel checks for and address above 0xC0000000.
3488 However, the page mappings in qemu linux-user aren't as restricted
3489 and we wind up with the normal stack mapped above 0xF0000000.
3490 That said, there is no reason why the kernel should be allowing
3491 a gUSA region that spans 1GB. Use a tighter check here, for what
3492 can actually be enabled by the immediate move. */
3493 if (regs->gregs[15] >= -128u && regs->pc < regs->gregs[0]) {
3494 /* Reset the PC to before the gUSA region, as computed from
3495 R0 = region end, SP = -(region size), plus one more for the
3496 insn that actually initializes SP to the region size. */
3497 regs->pc = regs->gregs[0] + regs->gregs[15] - 2;
3499 /* Reset the SP to the saved version in R1. */
3500 regs->gregs[15] = regs->gregs[1];
3504 static void setup_sigcontext(struct target_sigcontext *sc,
3505 CPUSH4State *regs, unsigned long mask)
3507 int i;
3509 #define COPY(x) __put_user(regs->x, &sc->sc_##x)
3510 COPY(gregs[0]); COPY(gregs[1]);
3511 COPY(gregs[2]); COPY(gregs[3]);
3512 COPY(gregs[4]); COPY(gregs[5]);
3513 COPY(gregs[6]); COPY(gregs[7]);
3514 COPY(gregs[8]); COPY(gregs[9]);
3515 COPY(gregs[10]); COPY(gregs[11]);
3516 COPY(gregs[12]); COPY(gregs[13]);
3517 COPY(gregs[14]); COPY(gregs[15]);
3518 COPY(gbr); COPY(mach);
3519 COPY(macl); COPY(pr);
3520 COPY(sr); COPY(pc);
3521 #undef COPY
3523 for (i=0; i<16; i++) {
3524 __put_user(regs->fregs[i], &sc->sc_fpregs[i]);
3526 __put_user(regs->fpscr, &sc->sc_fpscr);
3527 __put_user(regs->fpul, &sc->sc_fpul);
3529 /* non-iBCS2 extensions.. */
3530 __put_user(mask, &sc->oldmask);
3533 static void restore_sigcontext(CPUSH4State *regs, struct target_sigcontext *sc)
3535 int i;
3537 #define COPY(x) __get_user(regs->x, &sc->sc_##x)
3538 COPY(gregs[0]); COPY(gregs[1]);
3539 COPY(gregs[2]); COPY(gregs[3]);
3540 COPY(gregs[4]); COPY(gregs[5]);
3541 COPY(gregs[6]); COPY(gregs[7]);
3542 COPY(gregs[8]); COPY(gregs[9]);
3543 COPY(gregs[10]); COPY(gregs[11]);
3544 COPY(gregs[12]); COPY(gregs[13]);
3545 COPY(gregs[14]); COPY(gregs[15]);
3546 COPY(gbr); COPY(mach);
3547 COPY(macl); COPY(pr);
3548 COPY(sr); COPY(pc);
3549 #undef COPY
3551 for (i=0; i<16; i++) {
3552 __get_user(regs->fregs[i], &sc->sc_fpregs[i]);
3554 __get_user(regs->fpscr, &sc->sc_fpscr);
3555 __get_user(regs->fpul, &sc->sc_fpul);
3557 regs->tra = -1; /* disable syscall checks */
3558 regs->flags &= ~(DELAY_SLOT_MASK | GUSA_MASK);
3561 static void setup_frame(int sig, struct target_sigaction *ka,
3562 target_sigset_t *set, CPUSH4State *regs)
3564 struct target_sigframe *frame;
3565 abi_ulong frame_addr;
3566 int i;
3568 unwind_gusa(regs);
3570 frame_addr = get_sigframe(ka, regs->gregs[15], sizeof(*frame));
3571 trace_user_setup_frame(regs, frame_addr);
3572 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3573 goto give_sigsegv;
3576 setup_sigcontext(&frame->sc, regs, set->sig[0]);
3578 for (i = 0; i < TARGET_NSIG_WORDS - 1; i++) {
3579 __put_user(set->sig[i + 1], &frame->extramask[i]);
3582 /* Set up to return from userspace. If provided, use a stub
3583 already in userspace. */
3584 if (ka->sa_flags & TARGET_SA_RESTORER) {
3585 regs->pr = (unsigned long) ka->sa_restorer;
3586 } else {
3587 /* Generate return code (system call to sigreturn) */
3588 abi_ulong retcode_addr = frame_addr +
3589 offsetof(struct target_sigframe, retcode);
3590 __put_user(MOVW(2), &frame->retcode[0]);
3591 __put_user(TRAP_NOARG, &frame->retcode[1]);
3592 __put_user((TARGET_NR_sigreturn), &frame->retcode[2]);
3593 regs->pr = (unsigned long) retcode_addr;
3596 /* Set up registers for signal handler */
3597 regs->gregs[15] = frame_addr;
3598 regs->gregs[4] = sig; /* Arg for signal handler */
3599 regs->gregs[5] = 0;
3600 regs->gregs[6] = frame_addr += offsetof(typeof(*frame), sc);
3601 regs->pc = (unsigned long) ka->_sa_handler;
3602 regs->flags &= ~(DELAY_SLOT_MASK | GUSA_MASK);
3604 unlock_user_struct(frame, frame_addr, 1);
3605 return;
3607 give_sigsegv:
3608 unlock_user_struct(frame, frame_addr, 1);
3609 force_sigsegv(sig);
3612 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3613 target_siginfo_t *info,
3614 target_sigset_t *set, CPUSH4State *regs)
3616 struct target_rt_sigframe *frame;
3617 abi_ulong frame_addr;
3618 int i;
3620 unwind_gusa(regs);
3622 frame_addr = get_sigframe(ka, regs->gregs[15], sizeof(*frame));
3623 trace_user_setup_rt_frame(regs, frame_addr);
3624 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3625 goto give_sigsegv;
3628 tswap_siginfo(&frame->info, info);
3630 /* Create the ucontext. */
3631 __put_user(0, &frame->uc.tuc_flags);
3632 __put_user(0, (unsigned long *)&frame->uc.tuc_link);
3633 __put_user((unsigned long)target_sigaltstack_used.ss_sp,
3634 &frame->uc.tuc_stack.ss_sp);
3635 __put_user(sas_ss_flags(regs->gregs[15]),
3636 &frame->uc.tuc_stack.ss_flags);
3637 __put_user(target_sigaltstack_used.ss_size,
3638 &frame->uc.tuc_stack.ss_size);
3639 setup_sigcontext(&frame->uc.tuc_mcontext,
3640 regs, set->sig[0]);
3641 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
3642 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
3645 /* Set up to return from userspace. If provided, use a stub
3646 already in userspace. */
3647 if (ka->sa_flags & TARGET_SA_RESTORER) {
3648 regs->pr = (unsigned long) ka->sa_restorer;
3649 } else {
3650 /* Generate return code (system call to sigreturn) */
3651 abi_ulong retcode_addr = frame_addr +
3652 offsetof(struct target_rt_sigframe, retcode);
3653 __put_user(MOVW(2), &frame->retcode[0]);
3654 __put_user(TRAP_NOARG, &frame->retcode[1]);
3655 __put_user((TARGET_NR_rt_sigreturn), &frame->retcode[2]);
3656 regs->pr = (unsigned long) retcode_addr;
3659 /* Set up registers for signal handler */
3660 regs->gregs[15] = frame_addr;
3661 regs->gregs[4] = sig; /* Arg for signal handler */
3662 regs->gregs[5] = frame_addr + offsetof(typeof(*frame), info);
3663 regs->gregs[6] = frame_addr + offsetof(typeof(*frame), uc);
3664 regs->pc = (unsigned long) ka->_sa_handler;
3665 regs->flags &= ~(DELAY_SLOT_MASK | GUSA_MASK);
3667 unlock_user_struct(frame, frame_addr, 1);
3668 return;
3670 give_sigsegv:
3671 unlock_user_struct(frame, frame_addr, 1);
3672 force_sigsegv(sig);
3675 long do_sigreturn(CPUSH4State *regs)
3677 struct target_sigframe *frame;
3678 abi_ulong frame_addr;
3679 sigset_t blocked;
3680 target_sigset_t target_set;
3681 int i;
3682 int err = 0;
3684 frame_addr = regs->gregs[15];
3685 trace_user_do_sigreturn(regs, frame_addr);
3686 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
3687 goto badframe;
3690 __get_user(target_set.sig[0], &frame->sc.oldmask);
3691 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
3692 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
3695 if (err)
3696 goto badframe;
3698 target_to_host_sigset_internal(&blocked, &target_set);
3699 set_sigmask(&blocked);
3701 restore_sigcontext(regs, &frame->sc);
3703 unlock_user_struct(frame, frame_addr, 0);
3704 return -TARGET_QEMU_ESIGRETURN;
3706 badframe:
3707 unlock_user_struct(frame, frame_addr, 0);
3708 force_sig(TARGET_SIGSEGV);
3709 return -TARGET_QEMU_ESIGRETURN;
3712 long do_rt_sigreturn(CPUSH4State *regs)
3714 struct target_rt_sigframe *frame;
3715 abi_ulong frame_addr;
3716 sigset_t blocked;
3718 frame_addr = regs->gregs[15];
3719 trace_user_do_rt_sigreturn(regs, frame_addr);
3720 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
3721 goto badframe;
3724 target_to_host_sigset(&blocked, &frame->uc.tuc_sigmask);
3725 set_sigmask(&blocked);
3727 restore_sigcontext(regs, &frame->uc.tuc_mcontext);
3729 if (do_sigaltstack(frame_addr +
3730 offsetof(struct target_rt_sigframe, uc.tuc_stack),
3731 0, get_sp_from_cpustate(regs)) == -EFAULT) {
3732 goto badframe;
3735 unlock_user_struct(frame, frame_addr, 0);
3736 return -TARGET_QEMU_ESIGRETURN;
3738 badframe:
3739 unlock_user_struct(frame, frame_addr, 0);
3740 force_sig(TARGET_SIGSEGV);
3741 return -TARGET_QEMU_ESIGRETURN;
3743 #elif defined(TARGET_MICROBLAZE)
3745 struct target_sigcontext {
3746 struct target_pt_regs regs; /* needs to be first */
3747 uint32_t oldmask;
3750 struct target_stack_t {
3751 abi_ulong ss_sp;
3752 int ss_flags;
3753 unsigned int ss_size;
3756 struct target_ucontext {
3757 abi_ulong tuc_flags;
3758 abi_ulong tuc_link;
3759 struct target_stack_t tuc_stack;
3760 struct target_sigcontext tuc_mcontext;
3761 uint32_t tuc_extramask[TARGET_NSIG_WORDS - 1];
3764 /* Signal frames. */
3765 struct target_signal_frame {
3766 struct target_ucontext uc;
3767 uint32_t extramask[TARGET_NSIG_WORDS - 1];
3768 uint32_t tramp[2];
3771 struct rt_signal_frame {
3772 siginfo_t info;
3773 ucontext_t uc;
3774 uint32_t tramp[2];
3777 static void setup_sigcontext(struct target_sigcontext *sc, CPUMBState *env)
3779 __put_user(env->regs[0], &sc->regs.r0);
3780 __put_user(env->regs[1], &sc->regs.r1);
3781 __put_user(env->regs[2], &sc->regs.r2);
3782 __put_user(env->regs[3], &sc->regs.r3);
3783 __put_user(env->regs[4], &sc->regs.r4);
3784 __put_user(env->regs[5], &sc->regs.r5);
3785 __put_user(env->regs[6], &sc->regs.r6);
3786 __put_user(env->regs[7], &sc->regs.r7);
3787 __put_user(env->regs[8], &sc->regs.r8);
3788 __put_user(env->regs[9], &sc->regs.r9);
3789 __put_user(env->regs[10], &sc->regs.r10);
3790 __put_user(env->regs[11], &sc->regs.r11);
3791 __put_user(env->regs[12], &sc->regs.r12);
3792 __put_user(env->regs[13], &sc->regs.r13);
3793 __put_user(env->regs[14], &sc->regs.r14);
3794 __put_user(env->regs[15], &sc->regs.r15);
3795 __put_user(env->regs[16], &sc->regs.r16);
3796 __put_user(env->regs[17], &sc->regs.r17);
3797 __put_user(env->regs[18], &sc->regs.r18);
3798 __put_user(env->regs[19], &sc->regs.r19);
3799 __put_user(env->regs[20], &sc->regs.r20);
3800 __put_user(env->regs[21], &sc->regs.r21);
3801 __put_user(env->regs[22], &sc->regs.r22);
3802 __put_user(env->regs[23], &sc->regs.r23);
3803 __put_user(env->regs[24], &sc->regs.r24);
3804 __put_user(env->regs[25], &sc->regs.r25);
3805 __put_user(env->regs[26], &sc->regs.r26);
3806 __put_user(env->regs[27], &sc->regs.r27);
3807 __put_user(env->regs[28], &sc->regs.r28);
3808 __put_user(env->regs[29], &sc->regs.r29);
3809 __put_user(env->regs[30], &sc->regs.r30);
3810 __put_user(env->regs[31], &sc->regs.r31);
3811 __put_user(env->sregs[SR_PC], &sc->regs.pc);
3814 static void restore_sigcontext(struct target_sigcontext *sc, CPUMBState *env)
3816 __get_user(env->regs[0], &sc->regs.r0);
3817 __get_user(env->regs[1], &sc->regs.r1);
3818 __get_user(env->regs[2], &sc->regs.r2);
3819 __get_user(env->regs[3], &sc->regs.r3);
3820 __get_user(env->regs[4], &sc->regs.r4);
3821 __get_user(env->regs[5], &sc->regs.r5);
3822 __get_user(env->regs[6], &sc->regs.r6);
3823 __get_user(env->regs[7], &sc->regs.r7);
3824 __get_user(env->regs[8], &sc->regs.r8);
3825 __get_user(env->regs[9], &sc->regs.r9);
3826 __get_user(env->regs[10], &sc->regs.r10);
3827 __get_user(env->regs[11], &sc->regs.r11);
3828 __get_user(env->regs[12], &sc->regs.r12);
3829 __get_user(env->regs[13], &sc->regs.r13);
3830 __get_user(env->regs[14], &sc->regs.r14);
3831 __get_user(env->regs[15], &sc->regs.r15);
3832 __get_user(env->regs[16], &sc->regs.r16);
3833 __get_user(env->regs[17], &sc->regs.r17);
3834 __get_user(env->regs[18], &sc->regs.r18);
3835 __get_user(env->regs[19], &sc->regs.r19);
3836 __get_user(env->regs[20], &sc->regs.r20);
3837 __get_user(env->regs[21], &sc->regs.r21);
3838 __get_user(env->regs[22], &sc->regs.r22);
3839 __get_user(env->regs[23], &sc->regs.r23);
3840 __get_user(env->regs[24], &sc->regs.r24);
3841 __get_user(env->regs[25], &sc->regs.r25);
3842 __get_user(env->regs[26], &sc->regs.r26);
3843 __get_user(env->regs[27], &sc->regs.r27);
3844 __get_user(env->regs[28], &sc->regs.r28);
3845 __get_user(env->regs[29], &sc->regs.r29);
3846 __get_user(env->regs[30], &sc->regs.r30);
3847 __get_user(env->regs[31], &sc->regs.r31);
3848 __get_user(env->sregs[SR_PC], &sc->regs.pc);
3851 static abi_ulong get_sigframe(struct target_sigaction *ka,
3852 CPUMBState *env, int frame_size)
3854 abi_ulong sp = env->regs[1];
3856 if ((ka->sa_flags & TARGET_SA_ONSTACK) != 0 && !on_sig_stack(sp)) {
3857 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
3860 return ((sp - frame_size) & -8UL);
3863 static void setup_frame(int sig, struct target_sigaction *ka,
3864 target_sigset_t *set, CPUMBState *env)
3866 struct target_signal_frame *frame;
3867 abi_ulong frame_addr;
3868 int i;
3870 frame_addr = get_sigframe(ka, env, sizeof *frame);
3871 trace_user_setup_frame(env, frame_addr);
3872 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
3873 goto badframe;
3875 /* Save the mask. */
3876 __put_user(set->sig[0], &frame->uc.tuc_mcontext.oldmask);
3878 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
3879 __put_user(set->sig[i], &frame->extramask[i - 1]);
3882 setup_sigcontext(&frame->uc.tuc_mcontext, env);
3884 /* Set up to return from userspace. If provided, use a stub
3885 already in userspace. */
3886 /* minus 8 is offset to cater for "rtsd r15,8" offset */
3887 if (ka->sa_flags & TARGET_SA_RESTORER) {
3888 env->regs[15] = ((unsigned long)ka->sa_restorer)-8;
3889 } else {
3890 uint32_t t;
3891 /* Note, these encodings are _big endian_! */
3892 /* addi r12, r0, __NR_sigreturn */
3893 t = 0x31800000UL | TARGET_NR_sigreturn;
3894 __put_user(t, frame->tramp + 0);
3895 /* brki r14, 0x8 */
3896 t = 0xb9cc0008UL;
3897 __put_user(t, frame->tramp + 1);
3899 /* Return from sighandler will jump to the tramp.
3900 Negative 8 offset because return is rtsd r15, 8 */
3901 env->regs[15] = frame_addr + offsetof(struct target_signal_frame, tramp)
3902 - 8;
3905 /* Set up registers for signal handler */
3906 env->regs[1] = frame_addr;
3907 /* Signal handler args: */
3908 env->regs[5] = sig; /* Arg 0: signum */
3909 env->regs[6] = 0;
3910 /* arg 1: sigcontext */
3911 env->regs[7] = frame_addr += offsetof(typeof(*frame), uc);
3913 /* Offset of 4 to handle microblaze rtid r14, 0 */
3914 env->sregs[SR_PC] = (unsigned long)ka->_sa_handler;
3916 unlock_user_struct(frame, frame_addr, 1);
3917 return;
3918 badframe:
3919 force_sigsegv(sig);
3922 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3923 target_siginfo_t *info,
3924 target_sigset_t *set, CPUMBState *env)
3926 fprintf(stderr, "Microblaze setup_rt_frame: not implemented\n");
3929 long do_sigreturn(CPUMBState *env)
3931 struct target_signal_frame *frame;
3932 abi_ulong frame_addr;
3933 target_sigset_t target_set;
3934 sigset_t set;
3935 int i;
3937 frame_addr = env->regs[R_SP];
3938 trace_user_do_sigreturn(env, frame_addr);
3939 /* Make sure the guest isn't playing games. */
3940 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1))
3941 goto badframe;
3943 /* Restore blocked signals */
3944 __get_user(target_set.sig[0], &frame->uc.tuc_mcontext.oldmask);
3945 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
3946 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
3948 target_to_host_sigset_internal(&set, &target_set);
3949 set_sigmask(&set);
3951 restore_sigcontext(&frame->uc.tuc_mcontext, env);
3952 /* We got here through a sigreturn syscall, our path back is via an
3953 rtb insn so setup r14 for that. */
3954 env->regs[14] = env->sregs[SR_PC];
3956 unlock_user_struct(frame, frame_addr, 0);
3957 return -TARGET_QEMU_ESIGRETURN;
3958 badframe:
3959 force_sig(TARGET_SIGSEGV);
3960 return -TARGET_QEMU_ESIGRETURN;
3963 long do_rt_sigreturn(CPUMBState *env)
3965 trace_user_do_rt_sigreturn(env, 0);
3966 fprintf(stderr, "Microblaze do_rt_sigreturn: not implemented\n");
3967 return -TARGET_ENOSYS;
3970 #elif defined(TARGET_CRIS)
3972 struct target_sigcontext {
3973 struct target_pt_regs regs; /* needs to be first */
3974 uint32_t oldmask;
3975 uint32_t usp; /* usp before stacking this gunk on it */
3978 /* Signal frames. */
3979 struct target_signal_frame {
3980 struct target_sigcontext sc;
3981 uint32_t extramask[TARGET_NSIG_WORDS - 1];
3982 uint16_t retcode[4]; /* Trampoline code. */
3985 struct rt_signal_frame {
3986 siginfo_t *pinfo;
3987 void *puc;
3988 siginfo_t info;
3989 ucontext_t uc;
3990 uint16_t retcode[4]; /* Trampoline code. */
3993 static void setup_sigcontext(struct target_sigcontext *sc, CPUCRISState *env)
3995 __put_user(env->regs[0], &sc->regs.r0);
3996 __put_user(env->regs[1], &sc->regs.r1);
3997 __put_user(env->regs[2], &sc->regs.r2);
3998 __put_user(env->regs[3], &sc->regs.r3);
3999 __put_user(env->regs[4], &sc->regs.r4);
4000 __put_user(env->regs[5], &sc->regs.r5);
4001 __put_user(env->regs[6], &sc->regs.r6);
4002 __put_user(env->regs[7], &sc->regs.r7);
4003 __put_user(env->regs[8], &sc->regs.r8);
4004 __put_user(env->regs[9], &sc->regs.r9);
4005 __put_user(env->regs[10], &sc->regs.r10);
4006 __put_user(env->regs[11], &sc->regs.r11);
4007 __put_user(env->regs[12], &sc->regs.r12);
4008 __put_user(env->regs[13], &sc->regs.r13);
4009 __put_user(env->regs[14], &sc->usp);
4010 __put_user(env->regs[15], &sc->regs.acr);
4011 __put_user(env->pregs[PR_MOF], &sc->regs.mof);
4012 __put_user(env->pregs[PR_SRP], &sc->regs.srp);
4013 __put_user(env->pc, &sc->regs.erp);
4016 static void restore_sigcontext(struct target_sigcontext *sc, CPUCRISState *env)
4018 __get_user(env->regs[0], &sc->regs.r0);
4019 __get_user(env->regs[1], &sc->regs.r1);
4020 __get_user(env->regs[2], &sc->regs.r2);
4021 __get_user(env->regs[3], &sc->regs.r3);
4022 __get_user(env->regs[4], &sc->regs.r4);
4023 __get_user(env->regs[5], &sc->regs.r5);
4024 __get_user(env->regs[6], &sc->regs.r6);
4025 __get_user(env->regs[7], &sc->regs.r7);
4026 __get_user(env->regs[8], &sc->regs.r8);
4027 __get_user(env->regs[9], &sc->regs.r9);
4028 __get_user(env->regs[10], &sc->regs.r10);
4029 __get_user(env->regs[11], &sc->regs.r11);
4030 __get_user(env->regs[12], &sc->regs.r12);
4031 __get_user(env->regs[13], &sc->regs.r13);
4032 __get_user(env->regs[14], &sc->usp);
4033 __get_user(env->regs[15], &sc->regs.acr);
4034 __get_user(env->pregs[PR_MOF], &sc->regs.mof);
4035 __get_user(env->pregs[PR_SRP], &sc->regs.srp);
4036 __get_user(env->pc, &sc->regs.erp);
4039 static abi_ulong get_sigframe(CPUCRISState *env, int framesize)
4041 abi_ulong sp;
4042 /* Align the stack downwards to 4. */
4043 sp = (env->regs[R_SP] & ~3);
4044 return sp - framesize;
4047 static void setup_frame(int sig, struct target_sigaction *ka,
4048 target_sigset_t *set, CPUCRISState *env)
4050 struct target_signal_frame *frame;
4051 abi_ulong frame_addr;
4052 int i;
4054 frame_addr = get_sigframe(env, sizeof *frame);
4055 trace_user_setup_frame(env, frame_addr);
4056 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
4057 goto badframe;
4060 * The CRIS signal return trampoline. A real linux/CRIS kernel doesn't
4061 * use this trampoline anymore but it sets it up for GDB.
4062 * In QEMU, using the trampoline simplifies things a bit so we use it.
4064 * This is movu.w __NR_sigreturn, r9; break 13;
4066 __put_user(0x9c5f, frame->retcode+0);
4067 __put_user(TARGET_NR_sigreturn,
4068 frame->retcode + 1);
4069 __put_user(0xe93d, frame->retcode + 2);
4071 /* Save the mask. */
4072 __put_user(set->sig[0], &frame->sc.oldmask);
4074 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
4075 __put_user(set->sig[i], &frame->extramask[i - 1]);
4078 setup_sigcontext(&frame->sc, env);
4080 /* Move the stack and setup the arguments for the handler. */
4081 env->regs[R_SP] = frame_addr;
4082 env->regs[10] = sig;
4083 env->pc = (unsigned long) ka->_sa_handler;
4084 /* Link SRP so the guest returns through the trampoline. */
4085 env->pregs[PR_SRP] = frame_addr + offsetof(typeof(*frame), retcode);
4087 unlock_user_struct(frame, frame_addr, 1);
4088 return;
4089 badframe:
4090 force_sigsegv(sig);
4093 static void setup_rt_frame(int sig, struct target_sigaction *ka,
4094 target_siginfo_t *info,
4095 target_sigset_t *set, CPUCRISState *env)
4097 fprintf(stderr, "CRIS setup_rt_frame: not implemented\n");
4100 long do_sigreturn(CPUCRISState *env)
4102 struct target_signal_frame *frame;
4103 abi_ulong frame_addr;
4104 target_sigset_t target_set;
4105 sigset_t set;
4106 int i;
4108 frame_addr = env->regs[R_SP];
4109 trace_user_do_sigreturn(env, frame_addr);
4110 /* Make sure the guest isn't playing games. */
4111 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1)) {
4112 goto badframe;
4115 /* Restore blocked signals */
4116 __get_user(target_set.sig[0], &frame->sc.oldmask);
4117 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
4118 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
4120 target_to_host_sigset_internal(&set, &target_set);
4121 set_sigmask(&set);
4123 restore_sigcontext(&frame->sc, env);
4124 unlock_user_struct(frame, frame_addr, 0);
4125 return -TARGET_QEMU_ESIGRETURN;
4126 badframe:
4127 force_sig(TARGET_SIGSEGV);
4128 return -TARGET_QEMU_ESIGRETURN;
4131 long do_rt_sigreturn(CPUCRISState *env)
4133 trace_user_do_rt_sigreturn(env, 0);
4134 fprintf(stderr, "CRIS do_rt_sigreturn: not implemented\n");
4135 return -TARGET_ENOSYS;
4138 #elif defined(TARGET_NIOS2)
4140 #define MCONTEXT_VERSION 2
4142 struct target_sigcontext {
4143 int version;
4144 unsigned long gregs[32];
4147 struct target_ucontext {
4148 abi_ulong tuc_flags;
4149 abi_ulong tuc_link;
4150 target_stack_t tuc_stack;
4151 struct target_sigcontext tuc_mcontext;
4152 target_sigset_t tuc_sigmask; /* mask last for extensibility */
4155 struct target_rt_sigframe {
4156 struct target_siginfo info;
4157 struct target_ucontext uc;
4160 static unsigned long sigsp(unsigned long sp, struct target_sigaction *ka)
4162 if (unlikely((ka->sa_flags & SA_ONSTACK)) && !sas_ss_flags(sp)) {
4163 #ifdef CONFIG_STACK_GROWSUP
4164 return target_sigaltstack_used.ss_sp;
4165 #else
4166 return target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
4167 #endif
4169 return sp;
4172 static int rt_setup_ucontext(struct target_ucontext *uc, CPUNios2State *env)
4174 unsigned long *gregs = uc->tuc_mcontext.gregs;
4176 __put_user(MCONTEXT_VERSION, &uc->tuc_mcontext.version);
4177 __put_user(env->regs[1], &gregs[0]);
4178 __put_user(env->regs[2], &gregs[1]);
4179 __put_user(env->regs[3], &gregs[2]);
4180 __put_user(env->regs[4], &gregs[3]);
4181 __put_user(env->regs[5], &gregs[4]);
4182 __put_user(env->regs[6], &gregs[5]);
4183 __put_user(env->regs[7], &gregs[6]);
4184 __put_user(env->regs[8], &gregs[7]);
4185 __put_user(env->regs[9], &gregs[8]);
4186 __put_user(env->regs[10], &gregs[9]);
4187 __put_user(env->regs[11], &gregs[10]);
4188 __put_user(env->regs[12], &gregs[11]);
4189 __put_user(env->regs[13], &gregs[12]);
4190 __put_user(env->regs[14], &gregs[13]);
4191 __put_user(env->regs[15], &gregs[14]);
4192 __put_user(env->regs[16], &gregs[15]);
4193 __put_user(env->regs[17], &gregs[16]);
4194 __put_user(env->regs[18], &gregs[17]);
4195 __put_user(env->regs[19], &gregs[18]);
4196 __put_user(env->regs[20], &gregs[19]);
4197 __put_user(env->regs[21], &gregs[20]);
4198 __put_user(env->regs[22], &gregs[21]);
4199 __put_user(env->regs[23], &gregs[22]);
4200 __put_user(env->regs[R_RA], &gregs[23]);
4201 __put_user(env->regs[R_FP], &gregs[24]);
4202 __put_user(env->regs[R_GP], &gregs[25]);
4203 __put_user(env->regs[R_EA], &gregs[27]);
4204 __put_user(env->regs[R_SP], &gregs[28]);
4206 return 0;
4209 static int rt_restore_ucontext(CPUNios2State *env, struct target_ucontext *uc,
4210 int *pr2)
4212 int temp;
4213 abi_ulong off, frame_addr = env->regs[R_SP];
4214 unsigned long *gregs = uc->tuc_mcontext.gregs;
4215 int err;
4217 /* Always make any pending restarted system calls return -EINTR */
4218 /* current->restart_block.fn = do_no_restart_syscall; */
4220 __get_user(temp, &uc->tuc_mcontext.version);
4221 if (temp != MCONTEXT_VERSION) {
4222 return 1;
4225 /* restore passed registers */
4226 __get_user(env->regs[1], &gregs[0]);
4227 __get_user(env->regs[2], &gregs[1]);
4228 __get_user(env->regs[3], &gregs[2]);
4229 __get_user(env->regs[4], &gregs[3]);
4230 __get_user(env->regs[5], &gregs[4]);
4231 __get_user(env->regs[6], &gregs[5]);
4232 __get_user(env->regs[7], &gregs[6]);
4233 __get_user(env->regs[8], &gregs[7]);
4234 __get_user(env->regs[9], &gregs[8]);
4235 __get_user(env->regs[10], &gregs[9]);
4236 __get_user(env->regs[11], &gregs[10]);
4237 __get_user(env->regs[12], &gregs[11]);
4238 __get_user(env->regs[13], &gregs[12]);
4239 __get_user(env->regs[14], &gregs[13]);
4240 __get_user(env->regs[15], &gregs[14]);
4241 __get_user(env->regs[16], &gregs[15]);
4242 __get_user(env->regs[17], &gregs[16]);
4243 __get_user(env->regs[18], &gregs[17]);
4244 __get_user(env->regs[19], &gregs[18]);
4245 __get_user(env->regs[20], &gregs[19]);
4246 __get_user(env->regs[21], &gregs[20]);
4247 __get_user(env->regs[22], &gregs[21]);
4248 __get_user(env->regs[23], &gregs[22]);
4249 /* gregs[23] is handled below */
4250 /* Verify, should this be settable */
4251 __get_user(env->regs[R_FP], &gregs[24]);
4252 /* Verify, should this be settable */
4253 __get_user(env->regs[R_GP], &gregs[25]);
4254 /* Not really necessary no user settable bits */
4255 __get_user(temp, &gregs[26]);
4256 __get_user(env->regs[R_EA], &gregs[27]);
4258 __get_user(env->regs[R_RA], &gregs[23]);
4259 __get_user(env->regs[R_SP], &gregs[28]);
4261 off = offsetof(struct target_rt_sigframe, uc.tuc_stack);
4262 err = do_sigaltstack(frame_addr + off, 0, get_sp_from_cpustate(env));
4263 if (err == -EFAULT) {
4264 return 1;
4267 *pr2 = env->regs[2];
4268 return 0;
4271 static void *get_sigframe(struct target_sigaction *ka, CPUNios2State *env,
4272 size_t frame_size)
4274 unsigned long usp;
4276 /* Default to using normal stack. */
4277 usp = env->regs[R_SP];
4279 /* This is the X/Open sanctioned signal stack switching. */
4280 usp = sigsp(usp, ka);
4282 /* Verify, is it 32 or 64 bit aligned */
4283 return (void *)((usp - frame_size) & -8UL);
4286 static void setup_rt_frame(int sig, struct target_sigaction *ka,
4287 target_siginfo_t *info,
4288 target_sigset_t *set,
4289 CPUNios2State *env)
4291 struct target_rt_sigframe *frame;
4292 int i, err = 0;
4294 frame = get_sigframe(ka, env, sizeof(*frame));
4296 if (ka->sa_flags & SA_SIGINFO) {
4297 tswap_siginfo(&frame->info, info);
4300 /* Create the ucontext. */
4301 __put_user(0, &frame->uc.tuc_flags);
4302 __put_user(0, &frame->uc.tuc_link);
4303 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
4304 __put_user(sas_ss_flags(env->regs[R_SP]), &frame->uc.tuc_stack.ss_flags);
4305 __put_user(target_sigaltstack_used.ss_size, &frame->uc.tuc_stack.ss_size);
4306 err |= rt_setup_ucontext(&frame->uc, env);
4307 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
4308 __put_user((abi_ulong)set->sig[i],
4309 (abi_ulong *)&frame->uc.tuc_sigmask.sig[i]);
4312 if (err) {
4313 goto give_sigsegv;
4316 /* Set up to return from userspace; jump to fixed address sigreturn
4317 trampoline on kuser page. */
4318 env->regs[R_RA] = (unsigned long) (0x1044);
4320 /* Set up registers for signal handler */
4321 env->regs[R_SP] = (unsigned long) frame;
4322 env->regs[4] = (unsigned long) sig;
4323 env->regs[5] = (unsigned long) &frame->info;
4324 env->regs[6] = (unsigned long) &frame->uc;
4325 env->regs[R_EA] = (unsigned long) ka->_sa_handler;
4326 return;
4328 give_sigsegv:
4329 if (sig == TARGET_SIGSEGV) {
4330 ka->_sa_handler = TARGET_SIG_DFL;
4332 force_sigsegv(sig);
4333 return;
4336 long do_sigreturn(CPUNios2State *env)
4338 trace_user_do_sigreturn(env, 0);
4339 fprintf(stderr, "do_sigreturn: not implemented\n");
4340 return -TARGET_ENOSYS;
4343 long do_rt_sigreturn(CPUNios2State *env)
4345 /* Verify, can we follow the stack back */
4346 abi_ulong frame_addr = env->regs[R_SP];
4347 struct target_rt_sigframe *frame;
4348 sigset_t set;
4349 int rval;
4351 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
4352 goto badframe;
4355 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
4356 do_sigprocmask(SIG_SETMASK, &set, NULL);
4358 if (rt_restore_ucontext(env, &frame->uc, &rval)) {
4359 goto badframe;
4362 unlock_user_struct(frame, frame_addr, 0);
4363 return rval;
4365 badframe:
4366 unlock_user_struct(frame, frame_addr, 0);
4367 force_sig(TARGET_SIGSEGV);
4368 return 0;
4370 /* TARGET_NIOS2 */
4372 #elif defined(TARGET_OPENRISC)
4374 struct target_sigcontext {
4375 struct target_pt_regs regs;
4376 abi_ulong oldmask;
4377 abi_ulong usp;
4380 struct target_ucontext {
4381 abi_ulong tuc_flags;
4382 abi_ulong tuc_link;
4383 target_stack_t tuc_stack;
4384 struct target_sigcontext tuc_mcontext;
4385 target_sigset_t tuc_sigmask; /* mask last for extensibility */
4388 struct target_rt_sigframe {
4389 abi_ulong pinfo;
4390 uint64_t puc;
4391 struct target_siginfo info;
4392 struct target_sigcontext sc;
4393 struct target_ucontext uc;
4394 unsigned char retcode[16]; /* trampoline code */
4397 /* This is the asm-generic/ucontext.h version */
4398 #if 0
4399 static int restore_sigcontext(CPUOpenRISCState *regs,
4400 struct target_sigcontext *sc)
4402 unsigned int err = 0;
4403 unsigned long old_usp;
4405 /* Alwys make any pending restarted system call return -EINTR */
4406 current_thread_info()->restart_block.fn = do_no_restart_syscall;
4408 /* restore the regs from &sc->regs (same as sc, since regs is first)
4409 * (sc is already checked for VERIFY_READ since the sigframe was
4410 * checked in sys_sigreturn previously)
4413 if (copy_from_user(regs, &sc, sizeof(struct target_pt_regs))) {
4414 goto badframe;
4417 /* make sure the U-flag is set so user-mode cannot fool us */
4419 regs->sr &= ~SR_SM;
4421 /* restore the old USP as it was before we stacked the sc etc.
4422 * (we cannot just pop the sigcontext since we aligned the sp and
4423 * stuff after pushing it)
4426 __get_user(old_usp, &sc->usp);
4427 phx_signal("old_usp 0x%lx", old_usp);
4429 __PHX__ REALLY /* ??? */
4430 wrusp(old_usp);
4431 regs->gpr[1] = old_usp;
4433 /* TODO: the other ports use regs->orig_XX to disable syscall checks
4434 * after this completes, but we don't use that mechanism. maybe we can
4435 * use it now ?
4438 return err;
4440 badframe:
4441 return 1;
4443 #endif
4445 /* Set up a signal frame. */
4447 static void setup_sigcontext(struct target_sigcontext *sc,
4448 CPUOpenRISCState *regs,
4449 unsigned long mask)
4451 unsigned long usp = cpu_get_gpr(regs, 1);
4453 /* copy the regs. they are first in sc so we can use sc directly */
4455 /*copy_to_user(&sc, regs, sizeof(struct target_pt_regs));*/
4457 /* Set the frametype to CRIS_FRAME_NORMAL for the execution of
4458 the signal handler. The frametype will be restored to its previous
4459 value in restore_sigcontext. */
4460 /*regs->frametype = CRIS_FRAME_NORMAL;*/
4462 /* then some other stuff */
4463 __put_user(mask, &sc->oldmask);
4464 __put_user(usp, &sc->usp);
4467 static inline unsigned long align_sigframe(unsigned long sp)
4469 return sp & ~3UL;
4472 static inline abi_ulong get_sigframe(struct target_sigaction *ka,
4473 CPUOpenRISCState *regs,
4474 size_t frame_size)
4476 unsigned long sp = cpu_get_gpr(regs, 1);
4477 int onsigstack = on_sig_stack(sp);
4479 /* redzone */
4480 /* This is the X/Open sanctioned signal stack switching. */
4481 if ((ka->sa_flags & TARGET_SA_ONSTACK) != 0 && !onsigstack) {
4482 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
4485 sp = align_sigframe(sp - frame_size);
4488 * If we are on the alternate signal stack and would overflow it, don't.
4489 * Return an always-bogus address instead so we will die with SIGSEGV.
4492 if (onsigstack && !likely(on_sig_stack(sp))) {
4493 return -1L;
4496 return sp;
4499 static void setup_rt_frame(int sig, struct target_sigaction *ka,
4500 target_siginfo_t *info,
4501 target_sigset_t *set, CPUOpenRISCState *env)
4503 int err = 0;
4504 abi_ulong frame_addr;
4505 unsigned long return_ip;
4506 struct target_rt_sigframe *frame;
4507 abi_ulong info_addr, uc_addr;
4509 frame_addr = get_sigframe(ka, env, sizeof(*frame));
4510 trace_user_setup_rt_frame(env, frame_addr);
4511 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
4512 goto give_sigsegv;
4515 info_addr = frame_addr + offsetof(struct target_rt_sigframe, info);
4516 __put_user(info_addr, &frame->pinfo);
4517 uc_addr = frame_addr + offsetof(struct target_rt_sigframe, uc);
4518 __put_user(uc_addr, &frame->puc);
4520 if (ka->sa_flags & SA_SIGINFO) {
4521 tswap_siginfo(&frame->info, info);
4524 /*err |= __clear_user(&frame->uc, offsetof(ucontext_t, uc_mcontext));*/
4525 __put_user(0, &frame->uc.tuc_flags);
4526 __put_user(0, &frame->uc.tuc_link);
4527 __put_user(target_sigaltstack_used.ss_sp,
4528 &frame->uc.tuc_stack.ss_sp);
4529 __put_user(sas_ss_flags(cpu_get_gpr(env, 1)),
4530 &frame->uc.tuc_stack.ss_flags);
4531 __put_user(target_sigaltstack_used.ss_size,
4532 &frame->uc.tuc_stack.ss_size);
4533 setup_sigcontext(&frame->sc, env, set->sig[0]);
4535 /*err |= copy_to_user(frame->uc.tuc_sigmask, set, sizeof(*set));*/
4537 /* trampoline - the desired return ip is the retcode itself */
4538 return_ip = (unsigned long)&frame->retcode;
4539 /* This is l.ori r11,r0,__NR_sigreturn, l.sys 1 */
4540 __put_user(0xa960, (short *)(frame->retcode + 0));
4541 __put_user(TARGET_NR_rt_sigreturn, (short *)(frame->retcode + 2));
4542 __put_user(0x20000001, (unsigned long *)(frame->retcode + 4));
4543 __put_user(0x15000000, (unsigned long *)(frame->retcode + 8));
4545 if (err) {
4546 goto give_sigsegv;
4549 /* TODO what is the current->exec_domain stuff and invmap ? */
4551 /* Set up registers for signal handler */
4552 env->pc = (unsigned long)ka->_sa_handler; /* what we enter NOW */
4553 cpu_set_gpr(env, 9, (unsigned long)return_ip); /* what we enter LATER */
4554 cpu_set_gpr(env, 3, (unsigned long)sig); /* arg 1: signo */
4555 cpu_set_gpr(env, 4, (unsigned long)&frame->info); /* arg 2: (siginfo_t*) */
4556 cpu_set_gpr(env, 5, (unsigned long)&frame->uc); /* arg 3: ucontext */
4558 /* actually move the usp to reflect the stacked frame */
4559 cpu_set_gpr(env, 1, (unsigned long)frame);
4561 return;
4563 give_sigsegv:
4564 unlock_user_struct(frame, frame_addr, 1);
4565 force_sigsegv(sig);
4568 long do_sigreturn(CPUOpenRISCState *env)
4570 trace_user_do_sigreturn(env, 0);
4571 fprintf(stderr, "do_sigreturn: not implemented\n");
4572 return -TARGET_ENOSYS;
4575 long do_rt_sigreturn(CPUOpenRISCState *env)
4577 trace_user_do_rt_sigreturn(env, 0);
4578 fprintf(stderr, "do_rt_sigreturn: not implemented\n");
4579 return -TARGET_ENOSYS;
4581 /* TARGET_OPENRISC */
4583 #elif defined(TARGET_S390X)
4585 #define __NUM_GPRS 16
4586 #define __NUM_FPRS 16
4587 #define __NUM_ACRS 16
4589 #define S390_SYSCALL_SIZE 2
4590 #define __SIGNAL_FRAMESIZE 160 /* FIXME: 31-bit mode -> 96 */
4592 #define _SIGCONTEXT_NSIG 64
4593 #define _SIGCONTEXT_NSIG_BPW 64 /* FIXME: 31-bit mode -> 32 */
4594 #define _SIGCONTEXT_NSIG_WORDS (_SIGCONTEXT_NSIG / _SIGCONTEXT_NSIG_BPW)
4595 #define _SIGMASK_COPY_SIZE (sizeof(unsigned long)*_SIGCONTEXT_NSIG_WORDS)
4596 #define PSW_ADDR_AMODE 0x0000000000000000UL /* 0x80000000UL for 31-bit */
4597 #define S390_SYSCALL_OPCODE ((uint16_t)0x0a00)
4599 typedef struct {
4600 target_psw_t psw;
4601 target_ulong gprs[__NUM_GPRS];
4602 unsigned int acrs[__NUM_ACRS];
4603 } target_s390_regs_common;
4605 typedef struct {
4606 unsigned int fpc;
4607 double fprs[__NUM_FPRS];
4608 } target_s390_fp_regs;
4610 typedef struct {
4611 target_s390_regs_common regs;
4612 target_s390_fp_regs fpregs;
4613 } target_sigregs;
4615 struct target_sigcontext {
4616 target_ulong oldmask[_SIGCONTEXT_NSIG_WORDS];
4617 target_sigregs *sregs;
4620 typedef struct {
4621 uint8_t callee_used_stack[__SIGNAL_FRAMESIZE];
4622 struct target_sigcontext sc;
4623 target_sigregs sregs;
4624 int signo;
4625 uint8_t retcode[S390_SYSCALL_SIZE];
4626 } sigframe;
4628 struct target_ucontext {
4629 target_ulong tuc_flags;
4630 struct target_ucontext *tuc_link;
4631 target_stack_t tuc_stack;
4632 target_sigregs tuc_mcontext;
4633 target_sigset_t tuc_sigmask; /* mask last for extensibility */
4636 typedef struct {
4637 uint8_t callee_used_stack[__SIGNAL_FRAMESIZE];
4638 uint8_t retcode[S390_SYSCALL_SIZE];
4639 struct target_siginfo info;
4640 struct target_ucontext uc;
4641 } rt_sigframe;
4643 static inline abi_ulong
4644 get_sigframe(struct target_sigaction *ka, CPUS390XState *env, size_t frame_size)
4646 abi_ulong sp;
4648 /* Default to using normal stack */
4649 sp = env->regs[15];
4651 /* This is the X/Open sanctioned signal stack switching. */
4652 if (ka->sa_flags & TARGET_SA_ONSTACK) {
4653 if (!sas_ss_flags(sp)) {
4654 sp = target_sigaltstack_used.ss_sp +
4655 target_sigaltstack_used.ss_size;
4659 /* This is the legacy signal stack switching. */
4660 else if (/* FIXME !user_mode(regs) */ 0 &&
4661 !(ka->sa_flags & TARGET_SA_RESTORER) &&
4662 ka->sa_restorer) {
4663 sp = (abi_ulong) ka->sa_restorer;
4666 return (sp - frame_size) & -8ul;
4669 static void save_sigregs(CPUS390XState *env, target_sigregs *sregs)
4671 int i;
4672 //save_access_regs(current->thread.acrs); FIXME
4674 /* Copy a 'clean' PSW mask to the user to avoid leaking
4675 information about whether PER is currently on. */
4676 __put_user(env->psw.mask, &sregs->regs.psw.mask);
4677 __put_user(env->psw.addr, &sregs->regs.psw.addr);
4678 for (i = 0; i < 16; i++) {
4679 __put_user(env->regs[i], &sregs->regs.gprs[i]);
4681 for (i = 0; i < 16; i++) {
4682 __put_user(env->aregs[i], &sregs->regs.acrs[i]);
4685 * We have to store the fp registers to current->thread.fp_regs
4686 * to merge them with the emulated registers.
4688 //save_fp_regs(&current->thread.fp_regs); FIXME
4689 for (i = 0; i < 16; i++) {
4690 __put_user(get_freg(env, i)->ll, &sregs->fpregs.fprs[i]);
4694 static void setup_frame(int sig, struct target_sigaction *ka,
4695 target_sigset_t *set, CPUS390XState *env)
4697 sigframe *frame;
4698 abi_ulong frame_addr;
4700 frame_addr = get_sigframe(ka, env, sizeof(*frame));
4701 trace_user_setup_frame(env, frame_addr);
4702 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
4703 goto give_sigsegv;
4706 __put_user(set->sig[0], &frame->sc.oldmask[0]);
4708 save_sigregs(env, &frame->sregs);
4710 __put_user((abi_ulong)(unsigned long)&frame->sregs,
4711 (abi_ulong *)&frame->sc.sregs);
4713 /* Set up to return from userspace. If provided, use a stub
4714 already in userspace. */
4715 if (ka->sa_flags & TARGET_SA_RESTORER) {
4716 env->regs[14] = (unsigned long)
4717 ka->sa_restorer | PSW_ADDR_AMODE;
4718 } else {
4719 env->regs[14] = (frame_addr + offsetof(sigframe, retcode))
4720 | PSW_ADDR_AMODE;
4721 __put_user(S390_SYSCALL_OPCODE | TARGET_NR_sigreturn,
4722 (uint16_t *)(frame->retcode));
4725 /* Set up backchain. */
4726 __put_user(env->regs[15], (abi_ulong *) frame);
4728 /* Set up registers for signal handler */
4729 env->regs[15] = frame_addr;
4730 env->psw.addr = (target_ulong) ka->_sa_handler | PSW_ADDR_AMODE;
4732 env->regs[2] = sig; //map_signal(sig);
4733 env->regs[3] = frame_addr += offsetof(typeof(*frame), sc);
4735 /* We forgot to include these in the sigcontext.
4736 To avoid breaking binary compatibility, they are passed as args. */
4737 env->regs[4] = 0; // FIXME: no clue... current->thread.trap_no;
4738 env->regs[5] = 0; // FIXME: no clue... current->thread.prot_addr;
4740 /* Place signal number on stack to allow backtrace from handler. */
4741 __put_user(env->regs[2], &frame->signo);
4742 unlock_user_struct(frame, frame_addr, 1);
4743 return;
4745 give_sigsegv:
4746 force_sigsegv(sig);
4749 static void setup_rt_frame(int sig, struct target_sigaction *ka,
4750 target_siginfo_t *info,
4751 target_sigset_t *set, CPUS390XState *env)
4753 int i;
4754 rt_sigframe *frame;
4755 abi_ulong frame_addr;
4757 frame_addr = get_sigframe(ka, env, sizeof *frame);
4758 trace_user_setup_rt_frame(env, frame_addr);
4759 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
4760 goto give_sigsegv;
4763 tswap_siginfo(&frame->info, info);
4765 /* Create the ucontext. */
4766 __put_user(0, &frame->uc.tuc_flags);
4767 __put_user((abi_ulong)0, (abi_ulong *)&frame->uc.tuc_link);
4768 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
4769 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
4770 &frame->uc.tuc_stack.ss_flags);
4771 __put_user(target_sigaltstack_used.ss_size, &frame->uc.tuc_stack.ss_size);
4772 save_sigregs(env, &frame->uc.tuc_mcontext);
4773 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
4774 __put_user((abi_ulong)set->sig[i],
4775 (abi_ulong *)&frame->uc.tuc_sigmask.sig[i]);
4778 /* Set up to return from userspace. If provided, use a stub
4779 already in userspace. */
4780 if (ka->sa_flags & TARGET_SA_RESTORER) {
4781 env->regs[14] = (unsigned long) ka->sa_restorer | PSW_ADDR_AMODE;
4782 } else {
4783 env->regs[14] = (unsigned long) frame->retcode | PSW_ADDR_AMODE;
4784 __put_user(S390_SYSCALL_OPCODE | TARGET_NR_rt_sigreturn,
4785 (uint16_t *)(frame->retcode));
4788 /* Set up backchain. */
4789 __put_user(env->regs[15], (abi_ulong *) frame);
4791 /* Set up registers for signal handler */
4792 env->regs[15] = frame_addr;
4793 env->psw.addr = (target_ulong) ka->_sa_handler | PSW_ADDR_AMODE;
4795 env->regs[2] = sig; //map_signal(sig);
4796 env->regs[3] = frame_addr + offsetof(typeof(*frame), info);
4797 env->regs[4] = frame_addr + offsetof(typeof(*frame), uc);
4798 return;
4800 give_sigsegv:
4801 force_sigsegv(sig);
4804 static int
4805 restore_sigregs(CPUS390XState *env, target_sigregs *sc)
4807 int err = 0;
4808 int i;
4810 for (i = 0; i < 16; i++) {
4811 __get_user(env->regs[i], &sc->regs.gprs[i]);
4814 __get_user(env->psw.mask, &sc->regs.psw.mask);
4815 trace_user_s390x_restore_sigregs(env, (unsigned long long)sc->regs.psw.addr,
4816 (unsigned long long)env->psw.addr);
4817 __get_user(env->psw.addr, &sc->regs.psw.addr);
4818 /* FIXME: 31-bit -> | PSW_ADDR_AMODE */
4820 for (i = 0; i < 16; i++) {
4821 __get_user(env->aregs[i], &sc->regs.acrs[i]);
4823 for (i = 0; i < 16; i++) {
4824 __get_user(get_freg(env, i)->ll, &sc->fpregs.fprs[i]);
4827 return err;
4830 long do_sigreturn(CPUS390XState *env)
4832 sigframe *frame;
4833 abi_ulong frame_addr = env->regs[15];
4834 target_sigset_t target_set;
4835 sigset_t set;
4837 trace_user_do_sigreturn(env, frame_addr);
4838 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
4839 goto badframe;
4841 __get_user(target_set.sig[0], &frame->sc.oldmask[0]);
4843 target_to_host_sigset_internal(&set, &target_set);
4844 set_sigmask(&set); /* ~_BLOCKABLE? */
4846 if (restore_sigregs(env, &frame->sregs)) {
4847 goto badframe;
4850 unlock_user_struct(frame, frame_addr, 0);
4851 return -TARGET_QEMU_ESIGRETURN;
4853 badframe:
4854 force_sig(TARGET_SIGSEGV);
4855 return -TARGET_QEMU_ESIGRETURN;
4858 long do_rt_sigreturn(CPUS390XState *env)
4860 rt_sigframe *frame;
4861 abi_ulong frame_addr = env->regs[15];
4862 sigset_t set;
4864 trace_user_do_rt_sigreturn(env, frame_addr);
4865 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
4866 goto badframe;
4868 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
4870 set_sigmask(&set); /* ~_BLOCKABLE? */
4872 if (restore_sigregs(env, &frame->uc.tuc_mcontext)) {
4873 goto badframe;
4876 if (do_sigaltstack(frame_addr + offsetof(rt_sigframe, uc.tuc_stack), 0,
4877 get_sp_from_cpustate(env)) == -EFAULT) {
4878 goto badframe;
4880 unlock_user_struct(frame, frame_addr, 0);
4881 return -TARGET_QEMU_ESIGRETURN;
4883 badframe:
4884 unlock_user_struct(frame, frame_addr, 0);
4885 force_sig(TARGET_SIGSEGV);
4886 return -TARGET_QEMU_ESIGRETURN;
4889 #elif defined(TARGET_PPC)
4891 /* Size of dummy stack frame allocated when calling signal handler.
4892 See arch/powerpc/include/asm/ptrace.h. */
4893 #if defined(TARGET_PPC64)
4894 #define SIGNAL_FRAMESIZE 128
4895 #else
4896 #define SIGNAL_FRAMESIZE 64
4897 #endif
4899 /* See arch/powerpc/include/asm/ucontext.h. Only used for 32-bit PPC;
4900 on 64-bit PPC, sigcontext and mcontext are one and the same. */
4901 struct target_mcontext {
4902 target_ulong mc_gregs[48];
4903 /* Includes fpscr. */
4904 uint64_t mc_fregs[33];
4905 #if defined(TARGET_PPC64)
4906 /* Pointer to the vector regs */
4907 target_ulong v_regs;
4908 #else
4909 target_ulong mc_pad[2];
4910 #endif
4911 /* We need to handle Altivec and SPE at the same time, which no
4912 kernel needs to do. Fortunately, the kernel defines this bit to
4913 be Altivec-register-large all the time, rather than trying to
4914 twiddle it based on the specific platform. */
4915 union {
4916 /* SPE vector registers. One extra for SPEFSCR. */
4917 uint32_t spe[33];
4918 /* Altivec vector registers. The packing of VSCR and VRSAVE
4919 varies depending on whether we're PPC64 or not: PPC64 splits
4920 them apart; PPC32 stuffs them together.
4921 We also need to account for the VSX registers on PPC64
4923 #if defined(TARGET_PPC64)
4924 #define QEMU_NVRREG (34 + 16)
4925 /* On ppc64, this mcontext structure is naturally *unaligned*,
4926 * or rather it is aligned on a 8 bytes boundary but not on
4927 * a 16 bytes one. This pad fixes it up. This is also why the
4928 * vector regs are referenced by the v_regs pointer above so
4929 * any amount of padding can be added here
4931 target_ulong pad;
4932 #else
4933 /* On ppc32, we are already aligned to 16 bytes */
4934 #define QEMU_NVRREG 33
4935 #endif
4936 /* We cannot use ppc_avr_t here as we do *not* want the implied
4937 * 16-bytes alignment that would result from it. This would have
4938 * the effect of making the whole struct target_mcontext aligned
4939 * which breaks the layout of struct target_ucontext on ppc64.
4941 uint64_t altivec[QEMU_NVRREG][2];
4942 #undef QEMU_NVRREG
4943 } mc_vregs;
4946 /* See arch/powerpc/include/asm/sigcontext.h. */
4947 struct target_sigcontext {
4948 target_ulong _unused[4];
4949 int32_t signal;
4950 #if defined(TARGET_PPC64)
4951 int32_t pad0;
4952 #endif
4953 target_ulong handler;
4954 target_ulong oldmask;
4955 target_ulong regs; /* struct pt_regs __user * */
4956 #if defined(TARGET_PPC64)
4957 struct target_mcontext mcontext;
4958 #endif
4961 /* Indices for target_mcontext.mc_gregs, below.
4962 See arch/powerpc/include/asm/ptrace.h for details. */
4963 enum {
4964 TARGET_PT_R0 = 0,
4965 TARGET_PT_R1 = 1,
4966 TARGET_PT_R2 = 2,
4967 TARGET_PT_R3 = 3,
4968 TARGET_PT_R4 = 4,
4969 TARGET_PT_R5 = 5,
4970 TARGET_PT_R6 = 6,
4971 TARGET_PT_R7 = 7,
4972 TARGET_PT_R8 = 8,
4973 TARGET_PT_R9 = 9,
4974 TARGET_PT_R10 = 10,
4975 TARGET_PT_R11 = 11,
4976 TARGET_PT_R12 = 12,
4977 TARGET_PT_R13 = 13,
4978 TARGET_PT_R14 = 14,
4979 TARGET_PT_R15 = 15,
4980 TARGET_PT_R16 = 16,
4981 TARGET_PT_R17 = 17,
4982 TARGET_PT_R18 = 18,
4983 TARGET_PT_R19 = 19,
4984 TARGET_PT_R20 = 20,
4985 TARGET_PT_R21 = 21,
4986 TARGET_PT_R22 = 22,
4987 TARGET_PT_R23 = 23,
4988 TARGET_PT_R24 = 24,
4989 TARGET_PT_R25 = 25,
4990 TARGET_PT_R26 = 26,
4991 TARGET_PT_R27 = 27,
4992 TARGET_PT_R28 = 28,
4993 TARGET_PT_R29 = 29,
4994 TARGET_PT_R30 = 30,
4995 TARGET_PT_R31 = 31,
4996 TARGET_PT_NIP = 32,
4997 TARGET_PT_MSR = 33,
4998 TARGET_PT_ORIG_R3 = 34,
4999 TARGET_PT_CTR = 35,
5000 TARGET_PT_LNK = 36,
5001 TARGET_PT_XER = 37,
5002 TARGET_PT_CCR = 38,
5003 /* Yes, there are two registers with #39. One is 64-bit only. */
5004 TARGET_PT_MQ = 39,
5005 TARGET_PT_SOFTE = 39,
5006 TARGET_PT_TRAP = 40,
5007 TARGET_PT_DAR = 41,
5008 TARGET_PT_DSISR = 42,
5009 TARGET_PT_RESULT = 43,
5010 TARGET_PT_REGS_COUNT = 44
5014 struct target_ucontext {
5015 target_ulong tuc_flags;
5016 target_ulong tuc_link; /* ucontext_t __user * */
5017 struct target_sigaltstack tuc_stack;
5018 #if !defined(TARGET_PPC64)
5019 int32_t tuc_pad[7];
5020 target_ulong tuc_regs; /* struct mcontext __user *
5021 points to uc_mcontext field */
5022 #endif
5023 target_sigset_t tuc_sigmask;
5024 #if defined(TARGET_PPC64)
5025 target_sigset_t unused[15]; /* Allow for uc_sigmask growth */
5026 struct target_sigcontext tuc_sigcontext;
5027 #else
5028 int32_t tuc_maskext[30];
5029 int32_t tuc_pad2[3];
5030 struct target_mcontext tuc_mcontext;
5031 #endif
5034 /* See arch/powerpc/kernel/signal_32.c. */
5035 struct target_sigframe {
5036 struct target_sigcontext sctx;
5037 struct target_mcontext mctx;
5038 int32_t abigap[56];
5041 #if defined(TARGET_PPC64)
5043 #define TARGET_TRAMP_SIZE 6
5045 struct target_rt_sigframe {
5046 /* sys_rt_sigreturn requires the ucontext be the first field */
5047 struct target_ucontext uc;
5048 target_ulong _unused[2];
5049 uint32_t trampoline[TARGET_TRAMP_SIZE];
5050 target_ulong pinfo; /* struct siginfo __user * */
5051 target_ulong puc; /* void __user * */
5052 struct target_siginfo info;
5053 /* 64 bit ABI allows for 288 bytes below sp before decrementing it. */
5054 char abigap[288];
5055 } __attribute__((aligned(16)));
5057 #else
5059 struct target_rt_sigframe {
5060 struct target_siginfo info;
5061 struct target_ucontext uc;
5062 int32_t abigap[56];
5065 #endif
5067 #if defined(TARGET_PPC64)
5069 struct target_func_ptr {
5070 target_ulong entry;
5071 target_ulong toc;
5074 #endif
5076 /* We use the mc_pad field for the signal return trampoline. */
5077 #define tramp mc_pad
5079 /* See arch/powerpc/kernel/signal.c. */
5080 static target_ulong get_sigframe(struct target_sigaction *ka,
5081 CPUPPCState *env,
5082 int frame_size)
5084 target_ulong oldsp;
5086 oldsp = env->gpr[1];
5088 if ((ka->sa_flags & TARGET_SA_ONSTACK) &&
5089 (sas_ss_flags(oldsp) == 0)) {
5090 oldsp = (target_sigaltstack_used.ss_sp
5091 + target_sigaltstack_used.ss_size);
5094 return (oldsp - frame_size) & ~0xFUL;
5097 #if ((defined(TARGET_WORDS_BIGENDIAN) && defined(HOST_WORDS_BIGENDIAN)) || \
5098 (!defined(HOST_WORDS_BIGENDIAN) && !defined(TARGET_WORDS_BIGENDIAN)))
5099 #define PPC_VEC_HI 0
5100 #define PPC_VEC_LO 1
5101 #else
5102 #define PPC_VEC_HI 1
5103 #define PPC_VEC_LO 0
5104 #endif
5107 static void save_user_regs(CPUPPCState *env, struct target_mcontext *frame)
5109 target_ulong msr = env->msr;
5110 int i;
5111 target_ulong ccr = 0;
5113 /* In general, the kernel attempts to be intelligent about what it
5114 needs to save for Altivec/FP/SPE registers. We don't care that
5115 much, so we just go ahead and save everything. */
5117 /* Save general registers. */
5118 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
5119 __put_user(env->gpr[i], &frame->mc_gregs[i]);
5121 __put_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]);
5122 __put_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]);
5123 __put_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]);
5124 __put_user(env->xer, &frame->mc_gregs[TARGET_PT_XER]);
5126 for (i = 0; i < ARRAY_SIZE(env->crf); i++) {
5127 ccr |= env->crf[i] << (32 - ((i + 1) * 4));
5129 __put_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]);
5131 /* Save Altivec registers if necessary. */
5132 if (env->insns_flags & PPC_ALTIVEC) {
5133 uint32_t *vrsave;
5134 for (i = 0; i < ARRAY_SIZE(env->avr); i++) {
5135 ppc_avr_t *avr = &env->avr[i];
5136 ppc_avr_t *vreg = (ppc_avr_t *)&frame->mc_vregs.altivec[i];
5138 __put_user(avr->u64[PPC_VEC_HI], &vreg->u64[0]);
5139 __put_user(avr->u64[PPC_VEC_LO], &vreg->u64[1]);
5141 /* Set MSR_VR in the saved MSR value to indicate that
5142 frame->mc_vregs contains valid data. */
5143 msr |= MSR_VR;
5144 #if defined(TARGET_PPC64)
5145 vrsave = (uint32_t *)&frame->mc_vregs.altivec[33];
5146 /* 64-bit needs to put a pointer to the vectors in the frame */
5147 __put_user(h2g(frame->mc_vregs.altivec), &frame->v_regs);
5148 #else
5149 vrsave = (uint32_t *)&frame->mc_vregs.altivec[32];
5150 #endif
5151 __put_user((uint32_t)env->spr[SPR_VRSAVE], vrsave);
5154 /* Save VSX second halves */
5155 if (env->insns_flags2 & PPC2_VSX) {
5156 uint64_t *vsregs = (uint64_t *)&frame->mc_vregs.altivec[34];
5157 for (i = 0; i < ARRAY_SIZE(env->vsr); i++) {
5158 __put_user(env->vsr[i], &vsregs[i]);
5162 /* Save floating point registers. */
5163 if (env->insns_flags & PPC_FLOAT) {
5164 for (i = 0; i < ARRAY_SIZE(env->fpr); i++) {
5165 __put_user(env->fpr[i], &frame->mc_fregs[i]);
5167 __put_user((uint64_t) env->fpscr, &frame->mc_fregs[32]);
5170 /* Save SPE registers. The kernel only saves the high half. */
5171 if (env->insns_flags & PPC_SPE) {
5172 #if defined(TARGET_PPC64)
5173 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
5174 __put_user(env->gpr[i] >> 32, &frame->mc_vregs.spe[i]);
5176 #else
5177 for (i = 0; i < ARRAY_SIZE(env->gprh); i++) {
5178 __put_user(env->gprh[i], &frame->mc_vregs.spe[i]);
5180 #endif
5181 /* Set MSR_SPE in the saved MSR value to indicate that
5182 frame->mc_vregs contains valid data. */
5183 msr |= MSR_SPE;
5184 __put_user(env->spe_fscr, &frame->mc_vregs.spe[32]);
5187 /* Store MSR. */
5188 __put_user(msr, &frame->mc_gregs[TARGET_PT_MSR]);
5191 static void encode_trampoline(int sigret, uint32_t *tramp)
5193 /* Set up the sigreturn trampoline: li r0,sigret; sc. */
5194 if (sigret) {
5195 __put_user(0x38000000 | sigret, &tramp[0]);
5196 __put_user(0x44000002, &tramp[1]);
5200 static void restore_user_regs(CPUPPCState *env,
5201 struct target_mcontext *frame, int sig)
5203 target_ulong save_r2 = 0;
5204 target_ulong msr;
5205 target_ulong ccr;
5207 int i;
5209 if (!sig) {
5210 save_r2 = env->gpr[2];
5213 /* Restore general registers. */
5214 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
5215 __get_user(env->gpr[i], &frame->mc_gregs[i]);
5217 __get_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]);
5218 __get_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]);
5219 __get_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]);
5220 __get_user(env->xer, &frame->mc_gregs[TARGET_PT_XER]);
5221 __get_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]);
5223 for (i = 0; i < ARRAY_SIZE(env->crf); i++) {
5224 env->crf[i] = (ccr >> (32 - ((i + 1) * 4))) & 0xf;
5227 if (!sig) {
5228 env->gpr[2] = save_r2;
5230 /* Restore MSR. */
5231 __get_user(msr, &frame->mc_gregs[TARGET_PT_MSR]);
5233 /* If doing signal return, restore the previous little-endian mode. */
5234 if (sig)
5235 env->msr = (env->msr & ~(1ull << MSR_LE)) | (msr & (1ull << MSR_LE));
5237 /* Restore Altivec registers if necessary. */
5238 if (env->insns_flags & PPC_ALTIVEC) {
5239 ppc_avr_t *v_regs;
5240 uint32_t *vrsave;
5241 #if defined(TARGET_PPC64)
5242 uint64_t v_addr;
5243 /* 64-bit needs to recover the pointer to the vectors from the frame */
5244 __get_user(v_addr, &frame->v_regs);
5245 v_regs = g2h(v_addr);
5246 #else
5247 v_regs = (ppc_avr_t *)frame->mc_vregs.altivec;
5248 #endif
5249 for (i = 0; i < ARRAY_SIZE(env->avr); i++) {
5250 ppc_avr_t *avr = &env->avr[i];
5251 ppc_avr_t *vreg = &v_regs[i];
5253 __get_user(avr->u64[PPC_VEC_HI], &vreg->u64[0]);
5254 __get_user(avr->u64[PPC_VEC_LO], &vreg->u64[1]);
5256 /* Set MSR_VEC in the saved MSR value to indicate that
5257 frame->mc_vregs contains valid data. */
5258 #if defined(TARGET_PPC64)
5259 vrsave = (uint32_t *)&v_regs[33];
5260 #else
5261 vrsave = (uint32_t *)&v_regs[32];
5262 #endif
5263 __get_user(env->spr[SPR_VRSAVE], vrsave);
5266 /* Restore VSX second halves */
5267 if (env->insns_flags2 & PPC2_VSX) {
5268 uint64_t *vsregs = (uint64_t *)&frame->mc_vregs.altivec[34];
5269 for (i = 0; i < ARRAY_SIZE(env->vsr); i++) {
5270 __get_user(env->vsr[i], &vsregs[i]);
5274 /* Restore floating point registers. */
5275 if (env->insns_flags & PPC_FLOAT) {
5276 uint64_t fpscr;
5277 for (i = 0; i < ARRAY_SIZE(env->fpr); i++) {
5278 __get_user(env->fpr[i], &frame->mc_fregs[i]);
5280 __get_user(fpscr, &frame->mc_fregs[32]);
5281 env->fpscr = (uint32_t) fpscr;
5284 /* Save SPE registers. The kernel only saves the high half. */
5285 if (env->insns_flags & PPC_SPE) {
5286 #if defined(TARGET_PPC64)
5287 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
5288 uint32_t hi;
5290 __get_user(hi, &frame->mc_vregs.spe[i]);
5291 env->gpr[i] = ((uint64_t)hi << 32) | ((uint32_t) env->gpr[i]);
5293 #else
5294 for (i = 0; i < ARRAY_SIZE(env->gprh); i++) {
5295 __get_user(env->gprh[i], &frame->mc_vregs.spe[i]);
5297 #endif
5298 __get_user(env->spe_fscr, &frame->mc_vregs.spe[32]);
5302 #if !defined(TARGET_PPC64)
5303 static void setup_frame(int sig, struct target_sigaction *ka,
5304 target_sigset_t *set, CPUPPCState *env)
5306 struct target_sigframe *frame;
5307 struct target_sigcontext *sc;
5308 target_ulong frame_addr, newsp;
5309 int err = 0;
5311 frame_addr = get_sigframe(ka, env, sizeof(*frame));
5312 trace_user_setup_frame(env, frame_addr);
5313 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1))
5314 goto sigsegv;
5315 sc = &frame->sctx;
5317 __put_user(ka->_sa_handler, &sc->handler);
5318 __put_user(set->sig[0], &sc->oldmask);
5319 __put_user(set->sig[1], &sc->_unused[3]);
5320 __put_user(h2g(&frame->mctx), &sc->regs);
5321 __put_user(sig, &sc->signal);
5323 /* Save user regs. */
5324 save_user_regs(env, &frame->mctx);
5326 /* Construct the trampoline code on the stack. */
5327 encode_trampoline(TARGET_NR_sigreturn, (uint32_t *)&frame->mctx.tramp);
5329 /* The kernel checks for the presence of a VDSO here. We don't
5330 emulate a vdso, so use a sigreturn system call. */
5331 env->lr = (target_ulong) h2g(frame->mctx.tramp);
5333 /* Turn off all fp exceptions. */
5334 env->fpscr = 0;
5336 /* Create a stack frame for the caller of the handler. */
5337 newsp = frame_addr - SIGNAL_FRAMESIZE;
5338 err |= put_user(env->gpr[1], newsp, target_ulong);
5340 if (err)
5341 goto sigsegv;
5343 /* Set up registers for signal handler. */
5344 env->gpr[1] = newsp;
5345 env->gpr[3] = sig;
5346 env->gpr[4] = frame_addr + offsetof(struct target_sigframe, sctx);
5348 env->nip = (target_ulong) ka->_sa_handler;
5350 /* Signal handlers are entered in big-endian mode. */
5351 env->msr &= ~(1ull << MSR_LE);
5353 unlock_user_struct(frame, frame_addr, 1);
5354 return;
5356 sigsegv:
5357 unlock_user_struct(frame, frame_addr, 1);
5358 force_sigsegv(sig);
5360 #endif /* !defined(TARGET_PPC64) */
5362 static void setup_rt_frame(int sig, struct target_sigaction *ka,
5363 target_siginfo_t *info,
5364 target_sigset_t *set, CPUPPCState *env)
5366 struct target_rt_sigframe *rt_sf;
5367 uint32_t *trampptr = 0;
5368 struct target_mcontext *mctx = 0;
5369 target_ulong rt_sf_addr, newsp = 0;
5370 int i, err = 0;
5371 #if defined(TARGET_PPC64)
5372 struct target_sigcontext *sc = 0;
5373 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info;
5374 #endif
5376 rt_sf_addr = get_sigframe(ka, env, sizeof(*rt_sf));
5377 if (!lock_user_struct(VERIFY_WRITE, rt_sf, rt_sf_addr, 1))
5378 goto sigsegv;
5380 tswap_siginfo(&rt_sf->info, info);
5382 __put_user(0, &rt_sf->uc.tuc_flags);
5383 __put_user(0, &rt_sf->uc.tuc_link);
5384 __put_user((target_ulong)target_sigaltstack_used.ss_sp,
5385 &rt_sf->uc.tuc_stack.ss_sp);
5386 __put_user(sas_ss_flags(env->gpr[1]),
5387 &rt_sf->uc.tuc_stack.ss_flags);
5388 __put_user(target_sigaltstack_used.ss_size,
5389 &rt_sf->uc.tuc_stack.ss_size);
5390 #if !defined(TARGET_PPC64)
5391 __put_user(h2g (&rt_sf->uc.tuc_mcontext),
5392 &rt_sf->uc.tuc_regs);
5393 #endif
5394 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
5395 __put_user(set->sig[i], &rt_sf->uc.tuc_sigmask.sig[i]);
5398 #if defined(TARGET_PPC64)
5399 mctx = &rt_sf->uc.tuc_sigcontext.mcontext;
5400 trampptr = &rt_sf->trampoline[0];
5402 sc = &rt_sf->uc.tuc_sigcontext;
5403 __put_user(h2g(mctx), &sc->regs);
5404 __put_user(sig, &sc->signal);
5405 #else
5406 mctx = &rt_sf->uc.tuc_mcontext;
5407 trampptr = (uint32_t *)&rt_sf->uc.tuc_mcontext.tramp;
5408 #endif
5410 save_user_regs(env, mctx);
5411 encode_trampoline(TARGET_NR_rt_sigreturn, trampptr);
5413 /* The kernel checks for the presence of a VDSO here. We don't
5414 emulate a vdso, so use a sigreturn system call. */
5415 env->lr = (target_ulong) h2g(trampptr);
5417 /* Turn off all fp exceptions. */
5418 env->fpscr = 0;
5420 /* Create a stack frame for the caller of the handler. */
5421 newsp = rt_sf_addr - (SIGNAL_FRAMESIZE + 16);
5422 err |= put_user(env->gpr[1], newsp, target_ulong);
5424 if (err)
5425 goto sigsegv;
5427 /* Set up registers for signal handler. */
5428 env->gpr[1] = newsp;
5429 env->gpr[3] = (target_ulong) sig;
5430 env->gpr[4] = (target_ulong) h2g(&rt_sf->info);
5431 env->gpr[5] = (target_ulong) h2g(&rt_sf->uc);
5432 env->gpr[6] = (target_ulong) h2g(rt_sf);
5434 #if defined(TARGET_PPC64)
5435 if (get_ppc64_abi(image) < 2) {
5436 /* ELFv1 PPC64 function pointers are pointers to OPD entries. */
5437 struct target_func_ptr *handler =
5438 (struct target_func_ptr *)g2h(ka->_sa_handler);
5439 env->nip = tswapl(handler->entry);
5440 env->gpr[2] = tswapl(handler->toc);
5441 } else {
5442 /* ELFv2 PPC64 function pointers are entry points, but R12
5443 * must also be set */
5444 env->nip = tswapl((target_ulong) ka->_sa_handler);
5445 env->gpr[12] = env->nip;
5447 #else
5448 env->nip = (target_ulong) ka->_sa_handler;
5449 #endif
5451 /* Signal handlers are entered in big-endian mode. */
5452 env->msr &= ~(1ull << MSR_LE);
5454 unlock_user_struct(rt_sf, rt_sf_addr, 1);
5455 return;
5457 sigsegv:
5458 unlock_user_struct(rt_sf, rt_sf_addr, 1);
5459 force_sigsegv(sig);
5463 #if !defined(TARGET_PPC64)
5464 long do_sigreturn(CPUPPCState *env)
5466 struct target_sigcontext *sc = NULL;
5467 struct target_mcontext *sr = NULL;
5468 target_ulong sr_addr = 0, sc_addr;
5469 sigset_t blocked;
5470 target_sigset_t set;
5472 sc_addr = env->gpr[1] + SIGNAL_FRAMESIZE;
5473 if (!lock_user_struct(VERIFY_READ, sc, sc_addr, 1))
5474 goto sigsegv;
5476 #if defined(TARGET_PPC64)
5477 set.sig[0] = sc->oldmask + ((uint64_t)(sc->_unused[3]) << 32);
5478 #else
5479 __get_user(set.sig[0], &sc->oldmask);
5480 __get_user(set.sig[1], &sc->_unused[3]);
5481 #endif
5482 target_to_host_sigset_internal(&blocked, &set);
5483 set_sigmask(&blocked);
5485 __get_user(sr_addr, &sc->regs);
5486 if (!lock_user_struct(VERIFY_READ, sr, sr_addr, 1))
5487 goto sigsegv;
5488 restore_user_regs(env, sr, 1);
5490 unlock_user_struct(sr, sr_addr, 1);
5491 unlock_user_struct(sc, sc_addr, 1);
5492 return -TARGET_QEMU_ESIGRETURN;
5494 sigsegv:
5495 unlock_user_struct(sr, sr_addr, 1);
5496 unlock_user_struct(sc, sc_addr, 1);
5497 force_sig(TARGET_SIGSEGV);
5498 return -TARGET_QEMU_ESIGRETURN;
5500 #endif /* !defined(TARGET_PPC64) */
5502 /* See arch/powerpc/kernel/signal_32.c. */
5503 static int do_setcontext(struct target_ucontext *ucp, CPUPPCState *env, int sig)
5505 struct target_mcontext *mcp;
5506 target_ulong mcp_addr;
5507 sigset_t blocked;
5508 target_sigset_t set;
5510 if (copy_from_user(&set, h2g(ucp) + offsetof(struct target_ucontext, tuc_sigmask),
5511 sizeof (set)))
5512 return 1;
5514 #if defined(TARGET_PPC64)
5515 mcp_addr = h2g(ucp) +
5516 offsetof(struct target_ucontext, tuc_sigcontext.mcontext);
5517 #else
5518 __get_user(mcp_addr, &ucp->tuc_regs);
5519 #endif
5521 if (!lock_user_struct(VERIFY_READ, mcp, mcp_addr, 1))
5522 return 1;
5524 target_to_host_sigset_internal(&blocked, &set);
5525 set_sigmask(&blocked);
5526 restore_user_regs(env, mcp, sig);
5528 unlock_user_struct(mcp, mcp_addr, 1);
5529 return 0;
5532 long do_rt_sigreturn(CPUPPCState *env)
5534 struct target_rt_sigframe *rt_sf = NULL;
5535 target_ulong rt_sf_addr;
5537 rt_sf_addr = env->gpr[1] + SIGNAL_FRAMESIZE + 16;
5538 if (!lock_user_struct(VERIFY_READ, rt_sf, rt_sf_addr, 1))
5539 goto sigsegv;
5541 if (do_setcontext(&rt_sf->uc, env, 1))
5542 goto sigsegv;
5544 do_sigaltstack(rt_sf_addr
5545 + offsetof(struct target_rt_sigframe, uc.tuc_stack),
5546 0, env->gpr[1]);
5548 unlock_user_struct(rt_sf, rt_sf_addr, 1);
5549 return -TARGET_QEMU_ESIGRETURN;
5551 sigsegv:
5552 unlock_user_struct(rt_sf, rt_sf_addr, 1);
5553 force_sig(TARGET_SIGSEGV);
5554 return -TARGET_QEMU_ESIGRETURN;
5557 #elif defined(TARGET_M68K)
5559 struct target_sigcontext {
5560 abi_ulong sc_mask;
5561 abi_ulong sc_usp;
5562 abi_ulong sc_d0;
5563 abi_ulong sc_d1;
5564 abi_ulong sc_a0;
5565 abi_ulong sc_a1;
5566 unsigned short sc_sr;
5567 abi_ulong sc_pc;
5570 struct target_sigframe
5572 abi_ulong pretcode;
5573 int sig;
5574 int code;
5575 abi_ulong psc;
5576 char retcode[8];
5577 abi_ulong extramask[TARGET_NSIG_WORDS-1];
5578 struct target_sigcontext sc;
5581 typedef int target_greg_t;
5582 #define TARGET_NGREG 18
5583 typedef target_greg_t target_gregset_t[TARGET_NGREG];
5585 typedef struct target_fpregset {
5586 int f_fpcntl[3];
5587 int f_fpregs[8*3];
5588 } target_fpregset_t;
5590 struct target_mcontext {
5591 int version;
5592 target_gregset_t gregs;
5593 target_fpregset_t fpregs;
5596 #define TARGET_MCONTEXT_VERSION 2
5598 struct target_ucontext {
5599 abi_ulong tuc_flags;
5600 abi_ulong tuc_link;
5601 target_stack_t tuc_stack;
5602 struct target_mcontext tuc_mcontext;
5603 abi_long tuc_filler[80];
5604 target_sigset_t tuc_sigmask;
5607 struct target_rt_sigframe
5609 abi_ulong pretcode;
5610 int sig;
5611 abi_ulong pinfo;
5612 abi_ulong puc;
5613 char retcode[8];
5614 struct target_siginfo info;
5615 struct target_ucontext uc;
5618 static void setup_sigcontext(struct target_sigcontext *sc, CPUM68KState *env,
5619 abi_ulong mask)
5621 uint32_t sr = (env->sr & 0xff00) | cpu_m68k_get_ccr(env);
5622 __put_user(mask, &sc->sc_mask);
5623 __put_user(env->aregs[7], &sc->sc_usp);
5624 __put_user(env->dregs[0], &sc->sc_d0);
5625 __put_user(env->dregs[1], &sc->sc_d1);
5626 __put_user(env->aregs[0], &sc->sc_a0);
5627 __put_user(env->aregs[1], &sc->sc_a1);
5628 __put_user(sr, &sc->sc_sr);
5629 __put_user(env->pc, &sc->sc_pc);
5632 static void
5633 restore_sigcontext(CPUM68KState *env, struct target_sigcontext *sc)
5635 int temp;
5637 __get_user(env->aregs[7], &sc->sc_usp);
5638 __get_user(env->dregs[0], &sc->sc_d0);
5639 __get_user(env->dregs[1], &sc->sc_d1);
5640 __get_user(env->aregs[0], &sc->sc_a0);
5641 __get_user(env->aregs[1], &sc->sc_a1);
5642 __get_user(env->pc, &sc->sc_pc);
5643 __get_user(temp, &sc->sc_sr);
5644 cpu_m68k_set_ccr(env, temp);
5648 * Determine which stack to use..
5650 static inline abi_ulong
5651 get_sigframe(struct target_sigaction *ka, CPUM68KState *regs,
5652 size_t frame_size)
5654 unsigned long sp;
5656 sp = regs->aregs[7];
5658 /* This is the X/Open sanctioned signal stack switching. */
5659 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags (sp) == 0)) {
5660 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
5663 return ((sp - frame_size) & -8UL);
5666 static void setup_frame(int sig, struct target_sigaction *ka,
5667 target_sigset_t *set, CPUM68KState *env)
5669 struct target_sigframe *frame;
5670 abi_ulong frame_addr;
5671 abi_ulong retcode_addr;
5672 abi_ulong sc_addr;
5673 int i;
5675 frame_addr = get_sigframe(ka, env, sizeof *frame);
5676 trace_user_setup_frame(env, frame_addr);
5677 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
5678 goto give_sigsegv;
5681 __put_user(sig, &frame->sig);
5683 sc_addr = frame_addr + offsetof(struct target_sigframe, sc);
5684 __put_user(sc_addr, &frame->psc);
5686 setup_sigcontext(&frame->sc, env, set->sig[0]);
5688 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
5689 __put_user(set->sig[i], &frame->extramask[i - 1]);
5692 /* Set up to return from userspace. */
5694 retcode_addr = frame_addr + offsetof(struct target_sigframe, retcode);
5695 __put_user(retcode_addr, &frame->pretcode);
5697 /* moveq #,d0; trap #0 */
5699 __put_user(0x70004e40 + (TARGET_NR_sigreturn << 16),
5700 (uint32_t *)(frame->retcode));
5702 /* Set up to return from userspace */
5704 env->aregs[7] = frame_addr;
5705 env->pc = ka->_sa_handler;
5707 unlock_user_struct(frame, frame_addr, 1);
5708 return;
5710 give_sigsegv:
5711 force_sigsegv(sig);
5714 static inline void target_rt_save_fpu_state(struct target_ucontext *uc,
5715 CPUM68KState *env)
5717 int i;
5718 target_fpregset_t *fpregs = &uc->tuc_mcontext.fpregs;
5720 __put_user(env->fpcr, &fpregs->f_fpcntl[0]);
5721 __put_user(env->fpsr, &fpregs->f_fpcntl[1]);
5722 /* fpiar is not emulated */
5724 for (i = 0; i < 8; i++) {
5725 uint32_t high = env->fregs[i].d.high << 16;
5726 __put_user(high, &fpregs->f_fpregs[i * 3]);
5727 __put_user(env->fregs[i].d.low,
5728 (uint64_t *)&fpregs->f_fpregs[i * 3 + 1]);
5732 static inline int target_rt_setup_ucontext(struct target_ucontext *uc,
5733 CPUM68KState *env)
5735 target_greg_t *gregs = uc->tuc_mcontext.gregs;
5736 uint32_t sr = (env->sr & 0xff00) | cpu_m68k_get_ccr(env);
5738 __put_user(TARGET_MCONTEXT_VERSION, &uc->tuc_mcontext.version);
5739 __put_user(env->dregs[0], &gregs[0]);
5740 __put_user(env->dregs[1], &gregs[1]);
5741 __put_user(env->dregs[2], &gregs[2]);
5742 __put_user(env->dregs[3], &gregs[3]);
5743 __put_user(env->dregs[4], &gregs[4]);
5744 __put_user(env->dregs[5], &gregs[5]);
5745 __put_user(env->dregs[6], &gregs[6]);
5746 __put_user(env->dregs[7], &gregs[7]);
5747 __put_user(env->aregs[0], &gregs[8]);
5748 __put_user(env->aregs[1], &gregs[9]);
5749 __put_user(env->aregs[2], &gregs[10]);
5750 __put_user(env->aregs[3], &gregs[11]);
5751 __put_user(env->aregs[4], &gregs[12]);
5752 __put_user(env->aregs[5], &gregs[13]);
5753 __put_user(env->aregs[6], &gregs[14]);
5754 __put_user(env->aregs[7], &gregs[15]);
5755 __put_user(env->pc, &gregs[16]);
5756 __put_user(sr, &gregs[17]);
5758 target_rt_save_fpu_state(uc, env);
5760 return 0;
5763 static inline void target_rt_restore_fpu_state(CPUM68KState *env,
5764 struct target_ucontext *uc)
5766 int i;
5767 target_fpregset_t *fpregs = &uc->tuc_mcontext.fpregs;
5768 uint32_t fpcr;
5770 __get_user(fpcr, &fpregs->f_fpcntl[0]);
5771 cpu_m68k_set_fpcr(env, fpcr);
5772 __get_user(env->fpsr, &fpregs->f_fpcntl[1]);
5773 /* fpiar is not emulated */
5775 for (i = 0; i < 8; i++) {
5776 uint32_t high;
5777 __get_user(high, &fpregs->f_fpregs[i * 3]);
5778 env->fregs[i].d.high = high >> 16;
5779 __get_user(env->fregs[i].d.low,
5780 (uint64_t *)&fpregs->f_fpregs[i * 3 + 1]);
5784 static inline int target_rt_restore_ucontext(CPUM68KState *env,
5785 struct target_ucontext *uc)
5787 int temp;
5788 target_greg_t *gregs = uc->tuc_mcontext.gregs;
5790 __get_user(temp, &uc->tuc_mcontext.version);
5791 if (temp != TARGET_MCONTEXT_VERSION)
5792 goto badframe;
5794 /* restore passed registers */
5795 __get_user(env->dregs[0], &gregs[0]);
5796 __get_user(env->dregs[1], &gregs[1]);
5797 __get_user(env->dregs[2], &gregs[2]);
5798 __get_user(env->dregs[3], &gregs[3]);
5799 __get_user(env->dregs[4], &gregs[4]);
5800 __get_user(env->dregs[5], &gregs[5]);
5801 __get_user(env->dregs[6], &gregs[6]);
5802 __get_user(env->dregs[7], &gregs[7]);
5803 __get_user(env->aregs[0], &gregs[8]);
5804 __get_user(env->aregs[1], &gregs[9]);
5805 __get_user(env->aregs[2], &gregs[10]);
5806 __get_user(env->aregs[3], &gregs[11]);
5807 __get_user(env->aregs[4], &gregs[12]);
5808 __get_user(env->aregs[5], &gregs[13]);
5809 __get_user(env->aregs[6], &gregs[14]);
5810 __get_user(env->aregs[7], &gregs[15]);
5811 __get_user(env->pc, &gregs[16]);
5812 __get_user(temp, &gregs[17]);
5813 cpu_m68k_set_ccr(env, temp);
5815 target_rt_restore_fpu_state(env, uc);
5817 return 0;
5819 badframe:
5820 return 1;
5823 static void setup_rt_frame(int sig, struct target_sigaction *ka,
5824 target_siginfo_t *info,
5825 target_sigset_t *set, CPUM68KState *env)
5827 struct target_rt_sigframe *frame;
5828 abi_ulong frame_addr;
5829 abi_ulong retcode_addr;
5830 abi_ulong info_addr;
5831 abi_ulong uc_addr;
5832 int err = 0;
5833 int i;
5835 frame_addr = get_sigframe(ka, env, sizeof *frame);
5836 trace_user_setup_rt_frame(env, frame_addr);
5837 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
5838 goto give_sigsegv;
5841 __put_user(sig, &frame->sig);
5843 info_addr = frame_addr + offsetof(struct target_rt_sigframe, info);
5844 __put_user(info_addr, &frame->pinfo);
5846 uc_addr = frame_addr + offsetof(struct target_rt_sigframe, uc);
5847 __put_user(uc_addr, &frame->puc);
5849 tswap_siginfo(&frame->info, info);
5851 /* Create the ucontext */
5853 __put_user(0, &frame->uc.tuc_flags);
5854 __put_user(0, &frame->uc.tuc_link);
5855 __put_user(target_sigaltstack_used.ss_sp,
5856 &frame->uc.tuc_stack.ss_sp);
5857 __put_user(sas_ss_flags(env->aregs[7]),
5858 &frame->uc.tuc_stack.ss_flags);
5859 __put_user(target_sigaltstack_used.ss_size,
5860 &frame->uc.tuc_stack.ss_size);
5861 err |= target_rt_setup_ucontext(&frame->uc, env);
5863 if (err)
5864 goto give_sigsegv;
5866 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
5867 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
5870 /* Set up to return from userspace. */
5872 retcode_addr = frame_addr + offsetof(struct target_sigframe, retcode);
5873 __put_user(retcode_addr, &frame->pretcode);
5875 /* moveq #,d0; notb d0; trap #0 */
5877 __put_user(0x70004600 + ((TARGET_NR_rt_sigreturn ^ 0xff) << 16),
5878 (uint32_t *)(frame->retcode + 0));
5879 __put_user(0x4e40, (uint16_t *)(frame->retcode + 4));
5881 if (err)
5882 goto give_sigsegv;
5884 /* Set up to return from userspace */
5886 env->aregs[7] = frame_addr;
5887 env->pc = ka->_sa_handler;
5889 unlock_user_struct(frame, frame_addr, 1);
5890 return;
5892 give_sigsegv:
5893 unlock_user_struct(frame, frame_addr, 1);
5894 force_sigsegv(sig);
5897 long do_sigreturn(CPUM68KState *env)
5899 struct target_sigframe *frame;
5900 abi_ulong frame_addr = env->aregs[7] - 4;
5901 target_sigset_t target_set;
5902 sigset_t set;
5903 int i;
5905 trace_user_do_sigreturn(env, frame_addr);
5906 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
5907 goto badframe;
5909 /* set blocked signals */
5911 __get_user(target_set.sig[0], &frame->sc.sc_mask);
5913 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
5914 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
5917 target_to_host_sigset_internal(&set, &target_set);
5918 set_sigmask(&set);
5920 /* restore registers */
5922 restore_sigcontext(env, &frame->sc);
5924 unlock_user_struct(frame, frame_addr, 0);
5925 return -TARGET_QEMU_ESIGRETURN;
5927 badframe:
5928 force_sig(TARGET_SIGSEGV);
5929 return -TARGET_QEMU_ESIGRETURN;
5932 long do_rt_sigreturn(CPUM68KState *env)
5934 struct target_rt_sigframe *frame;
5935 abi_ulong frame_addr = env->aregs[7] - 4;
5936 sigset_t set;
5938 trace_user_do_rt_sigreturn(env, frame_addr);
5939 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
5940 goto badframe;
5942 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
5943 set_sigmask(&set);
5945 /* restore registers */
5947 if (target_rt_restore_ucontext(env, &frame->uc))
5948 goto badframe;
5950 if (do_sigaltstack(frame_addr +
5951 offsetof(struct target_rt_sigframe, uc.tuc_stack),
5952 0, get_sp_from_cpustate(env)) == -EFAULT)
5953 goto badframe;
5955 unlock_user_struct(frame, frame_addr, 0);
5956 return -TARGET_QEMU_ESIGRETURN;
5958 badframe:
5959 unlock_user_struct(frame, frame_addr, 0);
5960 force_sig(TARGET_SIGSEGV);
5961 return -TARGET_QEMU_ESIGRETURN;
5964 #elif defined(TARGET_ALPHA)
5966 struct target_sigcontext {
5967 abi_long sc_onstack;
5968 abi_long sc_mask;
5969 abi_long sc_pc;
5970 abi_long sc_ps;
5971 abi_long sc_regs[32];
5972 abi_long sc_ownedfp;
5973 abi_long sc_fpregs[32];
5974 abi_ulong sc_fpcr;
5975 abi_ulong sc_fp_control;
5976 abi_ulong sc_reserved1;
5977 abi_ulong sc_reserved2;
5978 abi_ulong sc_ssize;
5979 abi_ulong sc_sbase;
5980 abi_ulong sc_traparg_a0;
5981 abi_ulong sc_traparg_a1;
5982 abi_ulong sc_traparg_a2;
5983 abi_ulong sc_fp_trap_pc;
5984 abi_ulong sc_fp_trigger_sum;
5985 abi_ulong sc_fp_trigger_inst;
5988 struct target_ucontext {
5989 abi_ulong tuc_flags;
5990 abi_ulong tuc_link;
5991 abi_ulong tuc_osf_sigmask;
5992 target_stack_t tuc_stack;
5993 struct target_sigcontext tuc_mcontext;
5994 target_sigset_t tuc_sigmask;
5997 struct target_sigframe {
5998 struct target_sigcontext sc;
5999 unsigned int retcode[3];
6002 struct target_rt_sigframe {
6003 target_siginfo_t info;
6004 struct target_ucontext uc;
6005 unsigned int retcode[3];
6008 #define INSN_MOV_R30_R16 0x47fe0410
6009 #define INSN_LDI_R0 0x201f0000
6010 #define INSN_CALLSYS 0x00000083
6012 static void setup_sigcontext(struct target_sigcontext *sc, CPUAlphaState *env,
6013 abi_ulong frame_addr, target_sigset_t *set)
6015 int i;
6017 __put_user(on_sig_stack(frame_addr), &sc->sc_onstack);
6018 __put_user(set->sig[0], &sc->sc_mask);
6019 __put_user(env->pc, &sc->sc_pc);
6020 __put_user(8, &sc->sc_ps);
6022 for (i = 0; i < 31; ++i) {
6023 __put_user(env->ir[i], &sc->sc_regs[i]);
6025 __put_user(0, &sc->sc_regs[31]);
6027 for (i = 0; i < 31; ++i) {
6028 __put_user(env->fir[i], &sc->sc_fpregs[i]);
6030 __put_user(0, &sc->sc_fpregs[31]);
6031 __put_user(cpu_alpha_load_fpcr(env), &sc->sc_fpcr);
6033 __put_user(0, &sc->sc_traparg_a0); /* FIXME */
6034 __put_user(0, &sc->sc_traparg_a1); /* FIXME */
6035 __put_user(0, &sc->sc_traparg_a2); /* FIXME */
6038 static void restore_sigcontext(CPUAlphaState *env,
6039 struct target_sigcontext *sc)
6041 uint64_t fpcr;
6042 int i;
6044 __get_user(env->pc, &sc->sc_pc);
6046 for (i = 0; i < 31; ++i) {
6047 __get_user(env->ir[i], &sc->sc_regs[i]);
6049 for (i = 0; i < 31; ++i) {
6050 __get_user(env->fir[i], &sc->sc_fpregs[i]);
6053 __get_user(fpcr, &sc->sc_fpcr);
6054 cpu_alpha_store_fpcr(env, fpcr);
6057 static inline abi_ulong get_sigframe(struct target_sigaction *sa,
6058 CPUAlphaState *env,
6059 unsigned long framesize)
6061 abi_ulong sp = env->ir[IR_SP];
6063 /* This is the X/Open sanctioned signal stack switching. */
6064 if ((sa->sa_flags & TARGET_SA_ONSTACK) != 0 && !sas_ss_flags(sp)) {
6065 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
6067 return (sp - framesize) & -32;
6070 static void setup_frame(int sig, struct target_sigaction *ka,
6071 target_sigset_t *set, CPUAlphaState *env)
6073 abi_ulong frame_addr, r26;
6074 struct target_sigframe *frame;
6075 int err = 0;
6077 frame_addr = get_sigframe(ka, env, sizeof(*frame));
6078 trace_user_setup_frame(env, frame_addr);
6079 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
6080 goto give_sigsegv;
6083 setup_sigcontext(&frame->sc, env, frame_addr, set);
6085 if (ka->sa_restorer) {
6086 r26 = ka->sa_restorer;
6087 } else {
6088 __put_user(INSN_MOV_R30_R16, &frame->retcode[0]);
6089 __put_user(INSN_LDI_R0 + TARGET_NR_sigreturn,
6090 &frame->retcode[1]);
6091 __put_user(INSN_CALLSYS, &frame->retcode[2]);
6092 /* imb() */
6093 r26 = frame_addr;
6096 unlock_user_struct(frame, frame_addr, 1);
6098 if (err) {
6099 give_sigsegv:
6100 force_sigsegv(sig);
6101 return;
6104 env->ir[IR_RA] = r26;
6105 env->ir[IR_PV] = env->pc = ka->_sa_handler;
6106 env->ir[IR_A0] = sig;
6107 env->ir[IR_A1] = 0;
6108 env->ir[IR_A2] = frame_addr + offsetof(struct target_sigframe, sc);
6109 env->ir[IR_SP] = frame_addr;
6112 static void setup_rt_frame(int sig, struct target_sigaction *ka,
6113 target_siginfo_t *info,
6114 target_sigset_t *set, CPUAlphaState *env)
6116 abi_ulong frame_addr, r26;
6117 struct target_rt_sigframe *frame;
6118 int i, err = 0;
6120 frame_addr = get_sigframe(ka, env, sizeof(*frame));
6121 trace_user_setup_rt_frame(env, frame_addr);
6122 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
6123 goto give_sigsegv;
6126 tswap_siginfo(&frame->info, info);
6128 __put_user(0, &frame->uc.tuc_flags);
6129 __put_user(0, &frame->uc.tuc_link);
6130 __put_user(set->sig[0], &frame->uc.tuc_osf_sigmask);
6131 __put_user(target_sigaltstack_used.ss_sp,
6132 &frame->uc.tuc_stack.ss_sp);
6133 __put_user(sas_ss_flags(env->ir[IR_SP]),
6134 &frame->uc.tuc_stack.ss_flags);
6135 __put_user(target_sigaltstack_used.ss_size,
6136 &frame->uc.tuc_stack.ss_size);
6137 setup_sigcontext(&frame->uc.tuc_mcontext, env, frame_addr, set);
6138 for (i = 0; i < TARGET_NSIG_WORDS; ++i) {
6139 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
6142 if (ka->sa_restorer) {
6143 r26 = ka->sa_restorer;
6144 } else {
6145 __put_user(INSN_MOV_R30_R16, &frame->retcode[0]);
6146 __put_user(INSN_LDI_R0 + TARGET_NR_rt_sigreturn,
6147 &frame->retcode[1]);
6148 __put_user(INSN_CALLSYS, &frame->retcode[2]);
6149 /* imb(); */
6150 r26 = frame_addr;
6153 if (err) {
6154 give_sigsegv:
6155 force_sigsegv(sig);
6156 return;
6159 env->ir[IR_RA] = r26;
6160 env->ir[IR_PV] = env->pc = ka->_sa_handler;
6161 env->ir[IR_A0] = sig;
6162 env->ir[IR_A1] = frame_addr + offsetof(struct target_rt_sigframe, info);
6163 env->ir[IR_A2] = frame_addr + offsetof(struct target_rt_sigframe, uc);
6164 env->ir[IR_SP] = frame_addr;
6167 long do_sigreturn(CPUAlphaState *env)
6169 struct target_sigcontext *sc;
6170 abi_ulong sc_addr = env->ir[IR_A0];
6171 target_sigset_t target_set;
6172 sigset_t set;
6174 if (!lock_user_struct(VERIFY_READ, sc, sc_addr, 1)) {
6175 goto badframe;
6178 target_sigemptyset(&target_set);
6179 __get_user(target_set.sig[0], &sc->sc_mask);
6181 target_to_host_sigset_internal(&set, &target_set);
6182 set_sigmask(&set);
6184 restore_sigcontext(env, sc);
6185 unlock_user_struct(sc, sc_addr, 0);
6186 return -TARGET_QEMU_ESIGRETURN;
6188 badframe:
6189 force_sig(TARGET_SIGSEGV);
6190 return -TARGET_QEMU_ESIGRETURN;
6193 long do_rt_sigreturn(CPUAlphaState *env)
6195 abi_ulong frame_addr = env->ir[IR_A0];
6196 struct target_rt_sigframe *frame;
6197 sigset_t set;
6199 trace_user_do_rt_sigreturn(env, frame_addr);
6200 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
6201 goto badframe;
6203 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
6204 set_sigmask(&set);
6206 restore_sigcontext(env, &frame->uc.tuc_mcontext);
6207 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe,
6208 uc.tuc_stack),
6209 0, env->ir[IR_SP]) == -EFAULT) {
6210 goto badframe;
6213 unlock_user_struct(frame, frame_addr, 0);
6214 return -TARGET_QEMU_ESIGRETURN;
6217 badframe:
6218 unlock_user_struct(frame, frame_addr, 0);
6219 force_sig(TARGET_SIGSEGV);
6220 return -TARGET_QEMU_ESIGRETURN;
6223 #elif defined(TARGET_TILEGX)
6225 struct target_sigcontext {
6226 union {
6227 /* General-purpose registers. */
6228 abi_ulong gregs[56];
6229 struct {
6230 abi_ulong __gregs[53];
6231 abi_ulong tp; /* Aliases gregs[TREG_TP]. */
6232 abi_ulong sp; /* Aliases gregs[TREG_SP]. */
6233 abi_ulong lr; /* Aliases gregs[TREG_LR]. */
6236 abi_ulong pc; /* Program counter. */
6237 abi_ulong ics; /* In Interrupt Critical Section? */
6238 abi_ulong faultnum; /* Fault number. */
6239 abi_ulong pad[5];
6242 struct target_ucontext {
6243 abi_ulong tuc_flags;
6244 abi_ulong tuc_link;
6245 target_stack_t tuc_stack;
6246 struct target_sigcontext tuc_mcontext;
6247 target_sigset_t tuc_sigmask; /* mask last for extensibility */
6250 struct target_rt_sigframe {
6251 unsigned char save_area[16]; /* caller save area */
6252 struct target_siginfo info;
6253 struct target_ucontext uc;
6254 abi_ulong retcode[2];
6257 #define INSN_MOVELI_R10_139 0x00045fe551483000ULL /* { moveli r10, 139 } */
6258 #define INSN_SWINT1 0x286b180051485000ULL /* { swint1 } */
6261 static void setup_sigcontext(struct target_sigcontext *sc,
6262 CPUArchState *env, int signo)
6264 int i;
6266 for (i = 0; i < TILEGX_R_COUNT; ++i) {
6267 __put_user(env->regs[i], &sc->gregs[i]);
6270 __put_user(env->pc, &sc->pc);
6271 __put_user(0, &sc->ics);
6272 __put_user(signo, &sc->faultnum);
6275 static void restore_sigcontext(CPUTLGState *env, struct target_sigcontext *sc)
6277 int i;
6279 for (i = 0; i < TILEGX_R_COUNT; ++i) {
6280 __get_user(env->regs[i], &sc->gregs[i]);
6283 __get_user(env->pc, &sc->pc);
6286 static abi_ulong get_sigframe(struct target_sigaction *ka, CPUArchState *env,
6287 size_t frame_size)
6289 unsigned long sp = env->regs[TILEGX_R_SP];
6291 if (on_sig_stack(sp) && !likely(on_sig_stack(sp - frame_size))) {
6292 return -1UL;
6295 if ((ka->sa_flags & SA_ONSTACK) && !sas_ss_flags(sp)) {
6296 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
6299 sp -= frame_size;
6300 sp &= -16UL;
6301 return sp;
6304 static void setup_rt_frame(int sig, struct target_sigaction *ka,
6305 target_siginfo_t *info,
6306 target_sigset_t *set, CPUArchState *env)
6308 abi_ulong frame_addr;
6309 struct target_rt_sigframe *frame;
6310 unsigned long restorer;
6312 frame_addr = get_sigframe(ka, env, sizeof(*frame));
6313 trace_user_setup_rt_frame(env, frame_addr);
6314 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
6315 goto give_sigsegv;
6318 /* Always write at least the signal number for the stack backtracer. */
6319 if (ka->sa_flags & TARGET_SA_SIGINFO) {
6320 /* At sigreturn time, restore the callee-save registers too. */
6321 tswap_siginfo(&frame->info, info);
6322 /* regs->flags |= PT_FLAGS_RESTORE_REGS; FIXME: we can skip it? */
6323 } else {
6324 __put_user(info->si_signo, &frame->info.si_signo);
6327 /* Create the ucontext. */
6328 __put_user(0, &frame->uc.tuc_flags);
6329 __put_user(0, &frame->uc.tuc_link);
6330 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
6331 __put_user(sas_ss_flags(env->regs[TILEGX_R_SP]),
6332 &frame->uc.tuc_stack.ss_flags);
6333 __put_user(target_sigaltstack_used.ss_size, &frame->uc.tuc_stack.ss_size);
6334 setup_sigcontext(&frame->uc.tuc_mcontext, env, info->si_signo);
6336 if (ka->sa_flags & TARGET_SA_RESTORER) {
6337 restorer = (unsigned long) ka->sa_restorer;
6338 } else {
6339 __put_user(INSN_MOVELI_R10_139, &frame->retcode[0]);
6340 __put_user(INSN_SWINT1, &frame->retcode[1]);
6341 restorer = frame_addr + offsetof(struct target_rt_sigframe, retcode);
6343 env->pc = (unsigned long) ka->_sa_handler;
6344 env->regs[TILEGX_R_SP] = (unsigned long) frame;
6345 env->regs[TILEGX_R_LR] = restorer;
6346 env->regs[0] = (unsigned long) sig;
6347 env->regs[1] = (unsigned long) &frame->info;
6348 env->regs[2] = (unsigned long) &frame->uc;
6349 /* regs->flags |= PT_FLAGS_CALLER_SAVES; FIXME: we can skip it? */
6351 unlock_user_struct(frame, frame_addr, 1);
6352 return;
6354 give_sigsegv:
6355 force_sigsegv(sig);
6358 long do_rt_sigreturn(CPUTLGState *env)
6360 abi_ulong frame_addr = env->regs[TILEGX_R_SP];
6361 struct target_rt_sigframe *frame;
6362 sigset_t set;
6364 trace_user_do_rt_sigreturn(env, frame_addr);
6365 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
6366 goto badframe;
6368 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
6369 set_sigmask(&set);
6371 restore_sigcontext(env, &frame->uc.tuc_mcontext);
6372 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe,
6373 uc.tuc_stack),
6374 0, env->regs[TILEGX_R_SP]) == -EFAULT) {
6375 goto badframe;
6378 unlock_user_struct(frame, frame_addr, 0);
6379 return -TARGET_QEMU_ESIGRETURN;
6382 badframe:
6383 unlock_user_struct(frame, frame_addr, 0);
6384 force_sig(TARGET_SIGSEGV);
6385 return -TARGET_QEMU_ESIGRETURN;
6388 #elif defined(TARGET_HPPA)
6390 struct target_sigcontext {
6391 abi_ulong sc_flags;
6392 abi_ulong sc_gr[32];
6393 uint64_t sc_fr[32];
6394 abi_ulong sc_iasq[2];
6395 abi_ulong sc_iaoq[2];
6396 abi_ulong sc_sar;
6399 struct target_ucontext {
6400 abi_uint tuc_flags;
6401 abi_ulong tuc_link;
6402 target_stack_t tuc_stack;
6403 abi_uint pad[1];
6404 struct target_sigcontext tuc_mcontext;
6405 target_sigset_t tuc_sigmask;
6408 struct target_rt_sigframe {
6409 abi_uint tramp[9];
6410 target_siginfo_t info;
6411 struct target_ucontext uc;
6412 /* hidden location of upper halves of pa2.0 64-bit gregs */
6415 static void setup_sigcontext(struct target_sigcontext *sc, CPUArchState *env)
6417 int flags = 0;
6418 int i;
6420 /* ??? if on_sig_stack, flags |= 1 (PARISC_SC_FLAG_ONSTACK). */
6422 if (env->iaoq_f < TARGET_PAGE_SIZE) {
6423 /* In the gateway page, executing a syscall. */
6424 flags |= 2; /* PARISC_SC_FLAG_IN_SYSCALL */
6425 __put_user(env->gr[31], &sc->sc_iaoq[0]);
6426 __put_user(env->gr[31] + 4, &sc->sc_iaoq[1]);
6427 } else {
6428 __put_user(env->iaoq_f, &sc->sc_iaoq[0]);
6429 __put_user(env->iaoq_b, &sc->sc_iaoq[1]);
6431 __put_user(0, &sc->sc_iasq[0]);
6432 __put_user(0, &sc->sc_iasq[1]);
6433 __put_user(flags, &sc->sc_flags);
6435 __put_user(cpu_hppa_get_psw(env), &sc->sc_gr[0]);
6436 for (i = 1; i < 32; ++i) {
6437 __put_user(env->gr[i], &sc->sc_gr[i]);
6440 __put_user((uint64_t)env->fr0_shadow << 32, &sc->sc_fr[0]);
6441 for (i = 1; i < 32; ++i) {
6442 __put_user(env->fr[i], &sc->sc_fr[i]);
6445 __put_user(env->cr[CR_SAR], &sc->sc_sar);
6448 static void restore_sigcontext(CPUArchState *env, struct target_sigcontext *sc)
6450 target_ulong psw;
6451 int i;
6453 __get_user(psw, &sc->sc_gr[0]);
6454 cpu_hppa_put_psw(env, psw);
6456 for (i = 1; i < 32; ++i) {
6457 __get_user(env->gr[i], &sc->sc_gr[i]);
6459 for (i = 0; i < 32; ++i) {
6460 __get_user(env->fr[i], &sc->sc_fr[i]);
6462 cpu_hppa_loaded_fr0(env);
6464 __get_user(env->iaoq_f, &sc->sc_iaoq[0]);
6465 __get_user(env->iaoq_b, &sc->sc_iaoq[1]);
6466 __get_user(env->cr[CR_SAR], &sc->sc_sar);
6469 /* No, this doesn't look right, but it's copied straight from the kernel. */
6470 #define PARISC_RT_SIGFRAME_SIZE32 \
6471 ((sizeof(struct target_rt_sigframe) + 48 + 64) & -64)
6473 static void setup_rt_frame(int sig, struct target_sigaction *ka,
6474 target_siginfo_t *info,
6475 target_sigset_t *set, CPUArchState *env)
6477 abi_ulong frame_addr, sp, haddr;
6478 struct target_rt_sigframe *frame;
6479 int i;
6481 sp = env->gr[30];
6482 if (ka->sa_flags & TARGET_SA_ONSTACK) {
6483 if (sas_ss_flags(sp) == 0) {
6484 sp = (target_sigaltstack_used.ss_sp + 0x7f) & ~0x3f;
6487 frame_addr = QEMU_ALIGN_UP(sp, 64);
6488 sp = frame_addr + PARISC_RT_SIGFRAME_SIZE32;
6490 trace_user_setup_rt_frame(env, frame_addr);
6492 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
6493 goto give_sigsegv;
6496 tswap_siginfo(&frame->info, info);
6497 frame->uc.tuc_flags = 0;
6498 frame->uc.tuc_link = 0;
6500 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
6501 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
6502 &frame->uc.tuc_stack.ss_flags);
6503 __put_user(target_sigaltstack_used.ss_size,
6504 &frame->uc.tuc_stack.ss_size);
6506 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
6507 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
6510 setup_sigcontext(&frame->uc.tuc_mcontext, env);
6512 __put_user(0x34190000, frame->tramp + 0); /* ldi 0,%r25 */
6513 __put_user(0x3414015a, frame->tramp + 1); /* ldi __NR_rt_sigreturn,%r20 */
6514 __put_user(0xe4008200, frame->tramp + 2); /* be,l 0x100(%sr2,%r0) */
6515 __put_user(0x08000240, frame->tramp + 3); /* nop */
6517 unlock_user_struct(frame, frame_addr, 1);
6519 env->gr[2] = h2g(frame->tramp);
6520 env->gr[30] = sp;
6521 env->gr[26] = sig;
6522 env->gr[25] = h2g(&frame->info);
6523 env->gr[24] = h2g(&frame->uc);
6525 haddr = ka->_sa_handler;
6526 if (haddr & 2) {
6527 /* Function descriptor. */
6528 target_ulong *fdesc, dest;
6530 haddr &= -4;
6531 if (!lock_user_struct(VERIFY_READ, fdesc, haddr, 1)) {
6532 goto give_sigsegv;
6534 __get_user(dest, fdesc);
6535 __get_user(env->gr[19], fdesc + 1);
6536 unlock_user_struct(fdesc, haddr, 1);
6537 haddr = dest;
6539 env->iaoq_f = haddr;
6540 env->iaoq_b = haddr + 4;
6541 return;
6543 give_sigsegv:
6544 force_sigsegv(sig);
6547 long do_rt_sigreturn(CPUArchState *env)
6549 abi_ulong frame_addr = env->gr[30] - PARISC_RT_SIGFRAME_SIZE32;
6550 struct target_rt_sigframe *frame;
6551 sigset_t set;
6553 trace_user_do_rt_sigreturn(env, frame_addr);
6554 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
6555 goto badframe;
6557 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
6558 set_sigmask(&set);
6560 restore_sigcontext(env, &frame->uc.tuc_mcontext);
6561 unlock_user_struct(frame, frame_addr, 0);
6563 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe,
6564 uc.tuc_stack),
6565 0, env->gr[30]) == -EFAULT) {
6566 goto badframe;
6569 unlock_user_struct(frame, frame_addr, 0);
6570 return -TARGET_QEMU_ESIGRETURN;
6572 badframe:
6573 force_sig(TARGET_SIGSEGV);
6574 return -TARGET_QEMU_ESIGRETURN;
6577 #else
6579 static void setup_frame(int sig, struct target_sigaction *ka,
6580 target_sigset_t *set, CPUArchState *env)
6582 fprintf(stderr, "setup_frame: not implemented\n");
6585 static void setup_rt_frame(int sig, struct target_sigaction *ka,
6586 target_siginfo_t *info,
6587 target_sigset_t *set, CPUArchState *env)
6589 fprintf(stderr, "setup_rt_frame: not implemented\n");
6592 long do_sigreturn(CPUArchState *env)
6594 fprintf(stderr, "do_sigreturn: not implemented\n");
6595 return -TARGET_ENOSYS;
6598 long do_rt_sigreturn(CPUArchState *env)
6600 fprintf(stderr, "do_rt_sigreturn: not implemented\n");
6601 return -TARGET_ENOSYS;
6604 #endif
6606 static void handle_pending_signal(CPUArchState *cpu_env, int sig,
6607 struct emulated_sigtable *k)
6609 CPUState *cpu = ENV_GET_CPU(cpu_env);
6610 abi_ulong handler;
6611 sigset_t set;
6612 target_sigset_t target_old_set;
6613 struct target_sigaction *sa;
6614 TaskState *ts = cpu->opaque;
6616 trace_user_handle_signal(cpu_env, sig);
6617 /* dequeue signal */
6618 k->pending = 0;
6620 sig = gdb_handlesig(cpu, sig);
6621 if (!sig) {
6622 sa = NULL;
6623 handler = TARGET_SIG_IGN;
6624 } else {
6625 sa = &sigact_table[sig - 1];
6626 handler = sa->_sa_handler;
6629 if (do_strace) {
6630 print_taken_signal(sig, &k->info);
6633 if (handler == TARGET_SIG_DFL) {
6634 /* default handler : ignore some signal. The other are job control or fatal */
6635 if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) {
6636 kill(getpid(),SIGSTOP);
6637 } else if (sig != TARGET_SIGCHLD &&
6638 sig != TARGET_SIGURG &&
6639 sig != TARGET_SIGWINCH &&
6640 sig != TARGET_SIGCONT) {
6641 dump_core_and_abort(sig);
6643 } else if (handler == TARGET_SIG_IGN) {
6644 /* ignore sig */
6645 } else if (handler == TARGET_SIG_ERR) {
6646 dump_core_and_abort(sig);
6647 } else {
6648 /* compute the blocked signals during the handler execution */
6649 sigset_t *blocked_set;
6651 target_to_host_sigset(&set, &sa->sa_mask);
6652 /* SA_NODEFER indicates that the current signal should not be
6653 blocked during the handler */
6654 if (!(sa->sa_flags & TARGET_SA_NODEFER))
6655 sigaddset(&set, target_to_host_signal(sig));
6657 /* save the previous blocked signal state to restore it at the
6658 end of the signal execution (see do_sigreturn) */
6659 host_to_target_sigset_internal(&target_old_set, &ts->signal_mask);
6661 /* block signals in the handler */
6662 blocked_set = ts->in_sigsuspend ?
6663 &ts->sigsuspend_mask : &ts->signal_mask;
6664 sigorset(&ts->signal_mask, blocked_set, &set);
6665 ts->in_sigsuspend = 0;
6667 /* if the CPU is in VM86 mode, we restore the 32 bit values */
6668 #if defined(TARGET_I386) && !defined(TARGET_X86_64)
6670 CPUX86State *env = cpu_env;
6671 if (env->eflags & VM_MASK)
6672 save_v86_state(env);
6674 #endif
6675 /* prepare the stack frame of the virtual CPU */
6676 #if defined(TARGET_ABI_MIPSN32) || defined(TARGET_ABI_MIPSN64) \
6677 || defined(TARGET_OPENRISC) || defined(TARGET_TILEGX) \
6678 || defined(TARGET_PPC64) || defined(TARGET_HPPA) \
6679 || defined(TARGET_NIOS2) || defined(TARGET_X86_64)
6680 /* These targets do not have traditional signals. */
6681 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
6682 #else
6683 if (sa->sa_flags & TARGET_SA_SIGINFO)
6684 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
6685 else
6686 setup_frame(sig, sa, &target_old_set, cpu_env);
6687 #endif
6688 if (sa->sa_flags & TARGET_SA_RESETHAND) {
6689 sa->_sa_handler = TARGET_SIG_DFL;
6694 void process_pending_signals(CPUArchState *cpu_env)
6696 CPUState *cpu = ENV_GET_CPU(cpu_env);
6697 int sig;
6698 TaskState *ts = cpu->opaque;
6699 sigset_t set;
6700 sigset_t *blocked_set;
6702 while (atomic_read(&ts->signal_pending)) {
6703 /* FIXME: This is not threadsafe. */
6704 sigfillset(&set);
6705 sigprocmask(SIG_SETMASK, &set, 0);
6707 restart_scan:
6708 sig = ts->sync_signal.pending;
6709 if (sig) {
6710 /* Synchronous signals are forced,
6711 * see force_sig_info() and callers in Linux
6712 * Note that not all of our queue_signal() calls in QEMU correspond
6713 * to force_sig_info() calls in Linux (some are send_sig_info()).
6714 * However it seems like a kernel bug to me to allow the process
6715 * to block a synchronous signal since it could then just end up
6716 * looping round and round indefinitely.
6718 if (sigismember(&ts->signal_mask, target_to_host_signal_table[sig])
6719 || sigact_table[sig - 1]._sa_handler == TARGET_SIG_IGN) {
6720 sigdelset(&ts->signal_mask, target_to_host_signal_table[sig]);
6721 sigact_table[sig - 1]._sa_handler = TARGET_SIG_DFL;
6724 handle_pending_signal(cpu_env, sig, &ts->sync_signal);
6727 for (sig = 1; sig <= TARGET_NSIG; sig++) {
6728 blocked_set = ts->in_sigsuspend ?
6729 &ts->sigsuspend_mask : &ts->signal_mask;
6731 if (ts->sigtab[sig - 1].pending &&
6732 (!sigismember(blocked_set,
6733 target_to_host_signal_table[sig]))) {
6734 handle_pending_signal(cpu_env, sig, &ts->sigtab[sig - 1]);
6735 /* Restart scan from the beginning, as handle_pending_signal
6736 * might have resulted in a new synchronous signal (eg SIGSEGV).
6738 goto restart_scan;
6742 /* if no signal is pending, unblock signals and recheck (the act
6743 * of unblocking might cause us to take another host signal which
6744 * will set signal_pending again).
6746 atomic_set(&ts->signal_pending, 0);
6747 ts->in_sigsuspend = 0;
6748 set = ts->signal_mask;
6749 sigdelset(&set, SIGSEGV);
6750 sigdelset(&set, SIGBUS);
6751 sigprocmask(SIG_SETMASK, &set, 0);
6753 ts->in_sigsuspend = 0;