ui: remove unreachable code in vnc_update_client
[qemu/ar7.git] / linux-user / signal.c
blobf85f0dd780253da47e383f3f25e055f177f9ad22
1 /*
2 * Emulation of Linux signals
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qemu/bitops.h"
21 #include <sys/ucontext.h>
22 #include <sys/resource.h>
24 #include "qemu.h"
25 #include "qemu-common.h"
26 #include "target_signal.h"
27 #include "trace.h"
29 static struct target_sigaltstack target_sigaltstack_used = {
30 .ss_sp = 0,
31 .ss_size = 0,
32 .ss_flags = TARGET_SS_DISABLE,
35 static struct target_sigaction sigact_table[TARGET_NSIG];
37 static void host_signal_handler(int host_signum, siginfo_t *info,
38 void *puc);
40 static uint8_t host_to_target_signal_table[_NSIG] = {
41 [SIGHUP] = TARGET_SIGHUP,
42 [SIGINT] = TARGET_SIGINT,
43 [SIGQUIT] = TARGET_SIGQUIT,
44 [SIGILL] = TARGET_SIGILL,
45 [SIGTRAP] = TARGET_SIGTRAP,
46 [SIGABRT] = TARGET_SIGABRT,
47 /* [SIGIOT] = TARGET_SIGIOT,*/
48 [SIGBUS] = TARGET_SIGBUS,
49 [SIGFPE] = TARGET_SIGFPE,
50 [SIGKILL] = TARGET_SIGKILL,
51 [SIGUSR1] = TARGET_SIGUSR1,
52 [SIGSEGV] = TARGET_SIGSEGV,
53 [SIGUSR2] = TARGET_SIGUSR2,
54 [SIGPIPE] = TARGET_SIGPIPE,
55 [SIGALRM] = TARGET_SIGALRM,
56 [SIGTERM] = TARGET_SIGTERM,
57 #ifdef SIGSTKFLT
58 [SIGSTKFLT] = TARGET_SIGSTKFLT,
59 #endif
60 [SIGCHLD] = TARGET_SIGCHLD,
61 [SIGCONT] = TARGET_SIGCONT,
62 [SIGSTOP] = TARGET_SIGSTOP,
63 [SIGTSTP] = TARGET_SIGTSTP,
64 [SIGTTIN] = TARGET_SIGTTIN,
65 [SIGTTOU] = TARGET_SIGTTOU,
66 [SIGURG] = TARGET_SIGURG,
67 [SIGXCPU] = TARGET_SIGXCPU,
68 [SIGXFSZ] = TARGET_SIGXFSZ,
69 [SIGVTALRM] = TARGET_SIGVTALRM,
70 [SIGPROF] = TARGET_SIGPROF,
71 [SIGWINCH] = TARGET_SIGWINCH,
72 [SIGIO] = TARGET_SIGIO,
73 [SIGPWR] = TARGET_SIGPWR,
74 [SIGSYS] = TARGET_SIGSYS,
75 /* next signals stay the same */
76 /* Nasty hack: Reverse SIGRTMIN and SIGRTMAX to avoid overlap with
77 host libpthread signals. This assumes no one actually uses SIGRTMAX :-/
78 To fix this properly we need to do manual signal delivery multiplexed
79 over a single host signal. */
80 [__SIGRTMIN] = __SIGRTMAX,
81 [__SIGRTMAX] = __SIGRTMIN,
83 static uint8_t target_to_host_signal_table[_NSIG];
85 static inline int on_sig_stack(unsigned long sp)
87 return (sp - target_sigaltstack_used.ss_sp
88 < target_sigaltstack_used.ss_size);
91 static inline int sas_ss_flags(unsigned long sp)
93 return (target_sigaltstack_used.ss_size == 0 ? SS_DISABLE
94 : on_sig_stack(sp) ? SS_ONSTACK : 0);
97 int host_to_target_signal(int sig)
99 if (sig < 0 || sig >= _NSIG)
100 return sig;
101 return host_to_target_signal_table[sig];
104 int target_to_host_signal(int sig)
106 if (sig < 0 || sig >= _NSIG)
107 return sig;
108 return target_to_host_signal_table[sig];
111 static inline void target_sigemptyset(target_sigset_t *set)
113 memset(set, 0, sizeof(*set));
116 static inline void target_sigaddset(target_sigset_t *set, int signum)
118 signum--;
119 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
120 set->sig[signum / TARGET_NSIG_BPW] |= mask;
123 static inline int target_sigismember(const target_sigset_t *set, int signum)
125 signum--;
126 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
127 return ((set->sig[signum / TARGET_NSIG_BPW] & mask) != 0);
130 static void host_to_target_sigset_internal(target_sigset_t *d,
131 const sigset_t *s)
133 int i;
134 target_sigemptyset(d);
135 for (i = 1; i <= TARGET_NSIG; i++) {
136 if (sigismember(s, i)) {
137 target_sigaddset(d, host_to_target_signal(i));
142 void host_to_target_sigset(target_sigset_t *d, const sigset_t *s)
144 target_sigset_t d1;
145 int i;
147 host_to_target_sigset_internal(&d1, s);
148 for(i = 0;i < TARGET_NSIG_WORDS; i++)
149 d->sig[i] = tswapal(d1.sig[i]);
152 static void target_to_host_sigset_internal(sigset_t *d,
153 const target_sigset_t *s)
155 int i;
156 sigemptyset(d);
157 for (i = 1; i <= TARGET_NSIG; i++) {
158 if (target_sigismember(s, i)) {
159 sigaddset(d, target_to_host_signal(i));
164 void target_to_host_sigset(sigset_t *d, const target_sigset_t *s)
166 target_sigset_t s1;
167 int i;
169 for(i = 0;i < TARGET_NSIG_WORDS; i++)
170 s1.sig[i] = tswapal(s->sig[i]);
171 target_to_host_sigset_internal(d, &s1);
174 void host_to_target_old_sigset(abi_ulong *old_sigset,
175 const sigset_t *sigset)
177 target_sigset_t d;
178 host_to_target_sigset(&d, sigset);
179 *old_sigset = d.sig[0];
182 void target_to_host_old_sigset(sigset_t *sigset,
183 const abi_ulong *old_sigset)
185 target_sigset_t d;
186 int i;
188 d.sig[0] = *old_sigset;
189 for(i = 1;i < TARGET_NSIG_WORDS; i++)
190 d.sig[i] = 0;
191 target_to_host_sigset(sigset, &d);
194 int block_signals(void)
196 TaskState *ts = (TaskState *)thread_cpu->opaque;
197 sigset_t set;
199 /* It's OK to block everything including SIGSEGV, because we won't
200 * run any further guest code before unblocking signals in
201 * process_pending_signals().
203 sigfillset(&set);
204 sigprocmask(SIG_SETMASK, &set, 0);
206 return atomic_xchg(&ts->signal_pending, 1);
209 /* Wrapper for sigprocmask function
210 * Emulates a sigprocmask in a safe way for the guest. Note that set and oldset
211 * are host signal set, not guest ones. Returns -TARGET_ERESTARTSYS if
212 * a signal was already pending and the syscall must be restarted, or
213 * 0 on success.
214 * If set is NULL, this is guaranteed not to fail.
216 int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset)
218 TaskState *ts = (TaskState *)thread_cpu->opaque;
220 if (oldset) {
221 *oldset = ts->signal_mask;
224 if (set) {
225 int i;
227 if (block_signals()) {
228 return -TARGET_ERESTARTSYS;
231 switch (how) {
232 case SIG_BLOCK:
233 sigorset(&ts->signal_mask, &ts->signal_mask, set);
234 break;
235 case SIG_UNBLOCK:
236 for (i = 1; i <= NSIG; ++i) {
237 if (sigismember(set, i)) {
238 sigdelset(&ts->signal_mask, i);
241 break;
242 case SIG_SETMASK:
243 ts->signal_mask = *set;
244 break;
245 default:
246 g_assert_not_reached();
249 /* Silently ignore attempts to change blocking status of KILL or STOP */
250 sigdelset(&ts->signal_mask, SIGKILL);
251 sigdelset(&ts->signal_mask, SIGSTOP);
253 return 0;
256 #if !defined(TARGET_OPENRISC) && !defined(TARGET_UNICORE32) && \
257 !defined(TARGET_NIOS2)
258 /* Just set the guest's signal mask to the specified value; the
259 * caller is assumed to have called block_signals() already.
261 static void set_sigmask(const sigset_t *set)
263 TaskState *ts = (TaskState *)thread_cpu->opaque;
265 ts->signal_mask = *set;
267 #endif
269 /* siginfo conversion */
271 static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo,
272 const siginfo_t *info)
274 int sig = host_to_target_signal(info->si_signo);
275 int si_code = info->si_code;
276 int si_type;
277 tinfo->si_signo = sig;
278 tinfo->si_errno = 0;
279 tinfo->si_code = info->si_code;
281 /* This memset serves two purposes:
282 * (1) ensure we don't leak random junk to the guest later
283 * (2) placate false positives from gcc about fields
284 * being used uninitialized if it chooses to inline both this
285 * function and tswap_siginfo() into host_to_target_siginfo().
287 memset(tinfo->_sifields._pad, 0, sizeof(tinfo->_sifields._pad));
289 /* This is awkward, because we have to use a combination of
290 * the si_code and si_signo to figure out which of the union's
291 * members are valid. (Within the host kernel it is always possible
292 * to tell, but the kernel carefully avoids giving userspace the
293 * high 16 bits of si_code, so we don't have the information to
294 * do this the easy way...) We therefore make our best guess,
295 * bearing in mind that a guest can spoof most of the si_codes
296 * via rt_sigqueueinfo() if it likes.
298 * Once we have made our guess, we record it in the top 16 bits of
299 * the si_code, so that tswap_siginfo() later can use it.
300 * tswap_siginfo() will strip these top bits out before writing
301 * si_code to the guest (sign-extending the lower bits).
304 switch (si_code) {
305 case SI_USER:
306 case SI_TKILL:
307 case SI_KERNEL:
308 /* Sent via kill(), tkill() or tgkill(), or direct from the kernel.
309 * These are the only unspoofable si_code values.
311 tinfo->_sifields._kill._pid = info->si_pid;
312 tinfo->_sifields._kill._uid = info->si_uid;
313 si_type = QEMU_SI_KILL;
314 break;
315 default:
316 /* Everything else is spoofable. Make best guess based on signal */
317 switch (sig) {
318 case TARGET_SIGCHLD:
319 tinfo->_sifields._sigchld._pid = info->si_pid;
320 tinfo->_sifields._sigchld._uid = info->si_uid;
321 tinfo->_sifields._sigchld._status
322 = host_to_target_waitstatus(info->si_status);
323 tinfo->_sifields._sigchld._utime = info->si_utime;
324 tinfo->_sifields._sigchld._stime = info->si_stime;
325 si_type = QEMU_SI_CHLD;
326 break;
327 case TARGET_SIGIO:
328 tinfo->_sifields._sigpoll._band = info->si_band;
329 tinfo->_sifields._sigpoll._fd = info->si_fd;
330 si_type = QEMU_SI_POLL;
331 break;
332 default:
333 /* Assume a sigqueue()/mq_notify()/rt_sigqueueinfo() source. */
334 tinfo->_sifields._rt._pid = info->si_pid;
335 tinfo->_sifields._rt._uid = info->si_uid;
336 /* XXX: potential problem if 64 bit */
337 tinfo->_sifields._rt._sigval.sival_ptr
338 = (abi_ulong)(unsigned long)info->si_value.sival_ptr;
339 si_type = QEMU_SI_RT;
340 break;
342 break;
345 tinfo->si_code = deposit32(si_code, 16, 16, si_type);
348 static void tswap_siginfo(target_siginfo_t *tinfo,
349 const target_siginfo_t *info)
351 int si_type = extract32(info->si_code, 16, 16);
352 int si_code = sextract32(info->si_code, 0, 16);
354 __put_user(info->si_signo, &tinfo->si_signo);
355 __put_user(info->si_errno, &tinfo->si_errno);
356 __put_user(si_code, &tinfo->si_code);
358 /* We can use our internal marker of which fields in the structure
359 * are valid, rather than duplicating the guesswork of
360 * host_to_target_siginfo_noswap() here.
362 switch (si_type) {
363 case QEMU_SI_KILL:
364 __put_user(info->_sifields._kill._pid, &tinfo->_sifields._kill._pid);
365 __put_user(info->_sifields._kill._uid, &tinfo->_sifields._kill._uid);
366 break;
367 case QEMU_SI_TIMER:
368 __put_user(info->_sifields._timer._timer1,
369 &tinfo->_sifields._timer._timer1);
370 __put_user(info->_sifields._timer._timer2,
371 &tinfo->_sifields._timer._timer2);
372 break;
373 case QEMU_SI_POLL:
374 __put_user(info->_sifields._sigpoll._band,
375 &tinfo->_sifields._sigpoll._band);
376 __put_user(info->_sifields._sigpoll._fd,
377 &tinfo->_sifields._sigpoll._fd);
378 break;
379 case QEMU_SI_FAULT:
380 __put_user(info->_sifields._sigfault._addr,
381 &tinfo->_sifields._sigfault._addr);
382 break;
383 case QEMU_SI_CHLD:
384 __put_user(info->_sifields._sigchld._pid,
385 &tinfo->_sifields._sigchld._pid);
386 __put_user(info->_sifields._sigchld._uid,
387 &tinfo->_sifields._sigchld._uid);
388 __put_user(info->_sifields._sigchld._status,
389 &tinfo->_sifields._sigchld._status);
390 __put_user(info->_sifields._sigchld._utime,
391 &tinfo->_sifields._sigchld._utime);
392 __put_user(info->_sifields._sigchld._stime,
393 &tinfo->_sifields._sigchld._stime);
394 break;
395 case QEMU_SI_RT:
396 __put_user(info->_sifields._rt._pid, &tinfo->_sifields._rt._pid);
397 __put_user(info->_sifields._rt._uid, &tinfo->_sifields._rt._uid);
398 __put_user(info->_sifields._rt._sigval.sival_ptr,
399 &tinfo->_sifields._rt._sigval.sival_ptr);
400 break;
401 default:
402 g_assert_not_reached();
406 void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info)
408 target_siginfo_t tgt_tmp;
409 host_to_target_siginfo_noswap(&tgt_tmp, info);
410 tswap_siginfo(tinfo, &tgt_tmp);
413 /* XXX: we support only POSIX RT signals are used. */
414 /* XXX: find a solution for 64 bit (additional malloced data is needed) */
415 void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo)
417 /* This conversion is used only for the rt_sigqueueinfo syscall,
418 * and so we know that the _rt fields are the valid ones.
420 abi_ulong sival_ptr;
422 __get_user(info->si_signo, &tinfo->si_signo);
423 __get_user(info->si_errno, &tinfo->si_errno);
424 __get_user(info->si_code, &tinfo->si_code);
425 __get_user(info->si_pid, &tinfo->_sifields._rt._pid);
426 __get_user(info->si_uid, &tinfo->_sifields._rt._uid);
427 __get_user(sival_ptr, &tinfo->_sifields._rt._sigval.sival_ptr);
428 info->si_value.sival_ptr = (void *)(long)sival_ptr;
431 static int fatal_signal (int sig)
433 switch (sig) {
434 case TARGET_SIGCHLD:
435 case TARGET_SIGURG:
436 case TARGET_SIGWINCH:
437 /* Ignored by default. */
438 return 0;
439 case TARGET_SIGCONT:
440 case TARGET_SIGSTOP:
441 case TARGET_SIGTSTP:
442 case TARGET_SIGTTIN:
443 case TARGET_SIGTTOU:
444 /* Job control signals. */
445 return 0;
446 default:
447 return 1;
451 /* returns 1 if given signal should dump core if not handled */
452 static int core_dump_signal(int sig)
454 switch (sig) {
455 case TARGET_SIGABRT:
456 case TARGET_SIGFPE:
457 case TARGET_SIGILL:
458 case TARGET_SIGQUIT:
459 case TARGET_SIGSEGV:
460 case TARGET_SIGTRAP:
461 case TARGET_SIGBUS:
462 return (1);
463 default:
464 return (0);
468 void signal_init(void)
470 TaskState *ts = (TaskState *)thread_cpu->opaque;
471 struct sigaction act;
472 struct sigaction oact;
473 int i, j;
474 int host_sig;
476 /* generate signal conversion tables */
477 for(i = 1; i < _NSIG; i++) {
478 if (host_to_target_signal_table[i] == 0)
479 host_to_target_signal_table[i] = i;
481 for(i = 1; i < _NSIG; i++) {
482 j = host_to_target_signal_table[i];
483 target_to_host_signal_table[j] = i;
486 /* Set the signal mask from the host mask. */
487 sigprocmask(0, 0, &ts->signal_mask);
489 /* set all host signal handlers. ALL signals are blocked during
490 the handlers to serialize them. */
491 memset(sigact_table, 0, sizeof(sigact_table));
493 sigfillset(&act.sa_mask);
494 act.sa_flags = SA_SIGINFO;
495 act.sa_sigaction = host_signal_handler;
496 for(i = 1; i <= TARGET_NSIG; i++) {
497 host_sig = target_to_host_signal(i);
498 sigaction(host_sig, NULL, &oact);
499 if (oact.sa_sigaction == (void *)SIG_IGN) {
500 sigact_table[i - 1]._sa_handler = TARGET_SIG_IGN;
501 } else if (oact.sa_sigaction == (void *)SIG_DFL) {
502 sigact_table[i - 1]._sa_handler = TARGET_SIG_DFL;
504 /* If there's already a handler installed then something has
505 gone horribly wrong, so don't even try to handle that case. */
506 /* Install some handlers for our own use. We need at least
507 SIGSEGV and SIGBUS, to detect exceptions. We can not just
508 trap all signals because it affects syscall interrupt
509 behavior. But do trap all default-fatal signals. */
510 if (fatal_signal (i))
511 sigaction(host_sig, &act, NULL);
515 #ifndef TARGET_UNICORE32
516 /* Force a synchronously taken signal. The kernel force_sig() function
517 * also forces the signal to "not blocked, not ignored", but for QEMU
518 * that work is done in process_pending_signals().
520 static void force_sig(int sig)
522 CPUState *cpu = thread_cpu;
523 CPUArchState *env = cpu->env_ptr;
524 target_siginfo_t info;
526 info.si_signo = sig;
527 info.si_errno = 0;
528 info.si_code = TARGET_SI_KERNEL;
529 info._sifields._kill._pid = 0;
530 info._sifields._kill._uid = 0;
531 queue_signal(env, info.si_signo, QEMU_SI_KILL, &info);
534 /* Force a SIGSEGV if we couldn't write to memory trying to set
535 * up the signal frame. oldsig is the signal we were trying to handle
536 * at the point of failure.
538 static void force_sigsegv(int oldsig)
540 if (oldsig == SIGSEGV) {
541 /* Make sure we don't try to deliver the signal again; this will
542 * end up with handle_pending_signal() calling dump_core_and_abort().
544 sigact_table[oldsig - 1]._sa_handler = TARGET_SIG_DFL;
546 force_sig(TARGET_SIGSEGV);
548 #endif
550 /* abort execution with signal */
551 static void QEMU_NORETURN dump_core_and_abort(int target_sig)
553 CPUState *cpu = thread_cpu;
554 CPUArchState *env = cpu->env_ptr;
555 TaskState *ts = (TaskState *)cpu->opaque;
556 int host_sig, core_dumped = 0;
557 struct sigaction act;
559 host_sig = target_to_host_signal(target_sig);
560 trace_user_force_sig(env, target_sig, host_sig);
561 gdb_signalled(env, target_sig);
563 /* dump core if supported by target binary format */
564 if (core_dump_signal(target_sig) && (ts->bprm->core_dump != NULL)) {
565 stop_all_tasks();
566 core_dumped =
567 ((*ts->bprm->core_dump)(target_sig, env) == 0);
569 if (core_dumped) {
570 /* we already dumped the core of target process, we don't want
571 * a coredump of qemu itself */
572 struct rlimit nodump;
573 getrlimit(RLIMIT_CORE, &nodump);
574 nodump.rlim_cur=0;
575 setrlimit(RLIMIT_CORE, &nodump);
576 (void) fprintf(stderr, "qemu: uncaught target signal %d (%s) - %s\n",
577 target_sig, strsignal(host_sig), "core dumped" );
580 /* The proper exit code for dying from an uncaught signal is
581 * -<signal>. The kernel doesn't allow exit() or _exit() to pass
582 * a negative value. To get the proper exit code we need to
583 * actually die from an uncaught signal. Here the default signal
584 * handler is installed, we send ourself a signal and we wait for
585 * it to arrive. */
586 sigfillset(&act.sa_mask);
587 act.sa_handler = SIG_DFL;
588 act.sa_flags = 0;
589 sigaction(host_sig, &act, NULL);
591 /* For some reason raise(host_sig) doesn't send the signal when
592 * statically linked on x86-64. */
593 kill(getpid(), host_sig);
595 /* Make sure the signal isn't masked (just reuse the mask inside
596 of act) */
597 sigdelset(&act.sa_mask, host_sig);
598 sigsuspend(&act.sa_mask);
600 /* unreachable */
601 abort();
604 /* queue a signal so that it will be send to the virtual CPU as soon
605 as possible */
606 int queue_signal(CPUArchState *env, int sig, int si_type,
607 target_siginfo_t *info)
609 CPUState *cpu = ENV_GET_CPU(env);
610 TaskState *ts = cpu->opaque;
612 trace_user_queue_signal(env, sig);
614 info->si_code = deposit32(info->si_code, 16, 16, si_type);
616 ts->sync_signal.info = *info;
617 ts->sync_signal.pending = sig;
618 /* signal that a new signal is pending */
619 atomic_set(&ts->signal_pending, 1);
620 return 1; /* indicates that the signal was queued */
623 #ifndef HAVE_SAFE_SYSCALL
624 static inline void rewind_if_in_safe_syscall(void *puc)
626 /* Default version: never rewind */
628 #endif
630 static void host_signal_handler(int host_signum, siginfo_t *info,
631 void *puc)
633 CPUArchState *env = thread_cpu->env_ptr;
634 CPUState *cpu = ENV_GET_CPU(env);
635 TaskState *ts = cpu->opaque;
637 int sig;
638 target_siginfo_t tinfo;
639 ucontext_t *uc = puc;
640 struct emulated_sigtable *k;
642 /* the CPU emulator uses some host signals to detect exceptions,
643 we forward to it some signals */
644 if ((host_signum == SIGSEGV || host_signum == SIGBUS)
645 && info->si_code > 0) {
646 if (cpu_signal_handler(host_signum, info, puc))
647 return;
650 /* get target signal number */
651 sig = host_to_target_signal(host_signum);
652 if (sig < 1 || sig > TARGET_NSIG)
653 return;
654 trace_user_host_signal(env, host_signum, sig);
656 rewind_if_in_safe_syscall(puc);
658 host_to_target_siginfo_noswap(&tinfo, info);
659 k = &ts->sigtab[sig - 1];
660 k->info = tinfo;
661 k->pending = sig;
662 ts->signal_pending = 1;
664 /* Block host signals until target signal handler entered. We
665 * can't block SIGSEGV or SIGBUS while we're executing guest
666 * code in case the guest code provokes one in the window between
667 * now and it getting out to the main loop. Signals will be
668 * unblocked again in process_pending_signals().
670 * WARNING: we cannot use sigfillset() here because the uc_sigmask
671 * field is a kernel sigset_t, which is much smaller than the
672 * libc sigset_t which sigfillset() operates on. Using sigfillset()
673 * would write 0xff bytes off the end of the structure and trash
674 * data on the struct.
675 * We can't use sizeof(uc->uc_sigmask) either, because the libc
676 * headers define the struct field with the wrong (too large) type.
678 memset(&uc->uc_sigmask, 0xff, SIGSET_T_SIZE);
679 sigdelset(&uc->uc_sigmask, SIGSEGV);
680 sigdelset(&uc->uc_sigmask, SIGBUS);
682 /* interrupt the virtual CPU as soon as possible */
683 cpu_exit(thread_cpu);
686 /* do_sigaltstack() returns target values and errnos. */
687 /* compare linux/kernel/signal.c:do_sigaltstack() */
688 abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr, abi_ulong sp)
690 int ret;
691 struct target_sigaltstack oss;
693 /* XXX: test errors */
694 if(uoss_addr)
696 __put_user(target_sigaltstack_used.ss_sp, &oss.ss_sp);
697 __put_user(target_sigaltstack_used.ss_size, &oss.ss_size);
698 __put_user(sas_ss_flags(sp), &oss.ss_flags);
701 if(uss_addr)
703 struct target_sigaltstack *uss;
704 struct target_sigaltstack ss;
705 size_t minstacksize = TARGET_MINSIGSTKSZ;
707 #if defined(TARGET_PPC64)
708 /* ELF V2 for PPC64 has a 4K minimum stack size for signal handlers */
709 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info;
710 if (get_ppc64_abi(image) > 1) {
711 minstacksize = 4096;
713 #endif
715 ret = -TARGET_EFAULT;
716 if (!lock_user_struct(VERIFY_READ, uss, uss_addr, 1)) {
717 goto out;
719 __get_user(ss.ss_sp, &uss->ss_sp);
720 __get_user(ss.ss_size, &uss->ss_size);
721 __get_user(ss.ss_flags, &uss->ss_flags);
722 unlock_user_struct(uss, uss_addr, 0);
724 ret = -TARGET_EPERM;
725 if (on_sig_stack(sp))
726 goto out;
728 ret = -TARGET_EINVAL;
729 if (ss.ss_flags != TARGET_SS_DISABLE
730 && ss.ss_flags != TARGET_SS_ONSTACK
731 && ss.ss_flags != 0)
732 goto out;
734 if (ss.ss_flags == TARGET_SS_DISABLE) {
735 ss.ss_size = 0;
736 ss.ss_sp = 0;
737 } else {
738 ret = -TARGET_ENOMEM;
739 if (ss.ss_size < minstacksize) {
740 goto out;
744 target_sigaltstack_used.ss_sp = ss.ss_sp;
745 target_sigaltstack_used.ss_size = ss.ss_size;
748 if (uoss_addr) {
749 ret = -TARGET_EFAULT;
750 if (copy_to_user(uoss_addr, &oss, sizeof(oss)))
751 goto out;
754 ret = 0;
755 out:
756 return ret;
759 /* do_sigaction() return target values and host errnos */
760 int do_sigaction(int sig, const struct target_sigaction *act,
761 struct target_sigaction *oact)
763 struct target_sigaction *k;
764 struct sigaction act1;
765 int host_sig;
766 int ret = 0;
768 if (sig < 1 || sig > TARGET_NSIG || sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP) {
769 return -TARGET_EINVAL;
772 if (block_signals()) {
773 return -TARGET_ERESTARTSYS;
776 k = &sigact_table[sig - 1];
777 if (oact) {
778 __put_user(k->_sa_handler, &oact->_sa_handler);
779 __put_user(k->sa_flags, &oact->sa_flags);
780 #ifdef TARGET_ARCH_HAS_SA_RESTORER
781 __put_user(k->sa_restorer, &oact->sa_restorer);
782 #endif
783 /* Not swapped. */
784 oact->sa_mask = k->sa_mask;
786 if (act) {
787 /* FIXME: This is not threadsafe. */
788 __get_user(k->_sa_handler, &act->_sa_handler);
789 __get_user(k->sa_flags, &act->sa_flags);
790 #ifdef TARGET_ARCH_HAS_SA_RESTORER
791 __get_user(k->sa_restorer, &act->sa_restorer);
792 #endif
793 /* To be swapped in target_to_host_sigset. */
794 k->sa_mask = act->sa_mask;
796 /* we update the host linux signal state */
797 host_sig = target_to_host_signal(sig);
798 if (host_sig != SIGSEGV && host_sig != SIGBUS) {
799 sigfillset(&act1.sa_mask);
800 act1.sa_flags = SA_SIGINFO;
801 if (k->sa_flags & TARGET_SA_RESTART)
802 act1.sa_flags |= SA_RESTART;
803 /* NOTE: it is important to update the host kernel signal
804 ignore state to avoid getting unexpected interrupted
805 syscalls */
806 if (k->_sa_handler == TARGET_SIG_IGN) {
807 act1.sa_sigaction = (void *)SIG_IGN;
808 } else if (k->_sa_handler == TARGET_SIG_DFL) {
809 if (fatal_signal (sig))
810 act1.sa_sigaction = host_signal_handler;
811 else
812 act1.sa_sigaction = (void *)SIG_DFL;
813 } else {
814 act1.sa_sigaction = host_signal_handler;
816 ret = sigaction(host_sig, &act1, NULL);
819 return ret;
822 #if defined(TARGET_I386)
823 /* from the Linux kernel - /arch/x86/include/uapi/asm/sigcontext.h */
825 struct target_fpreg {
826 uint16_t significand[4];
827 uint16_t exponent;
830 struct target_fpxreg {
831 uint16_t significand[4];
832 uint16_t exponent;
833 uint16_t padding[3];
836 struct target_xmmreg {
837 uint32_t element[4];
840 struct target_fpstate_32 {
841 /* Regular FPU environment */
842 uint32_t cw;
843 uint32_t sw;
844 uint32_t tag;
845 uint32_t ipoff;
846 uint32_t cssel;
847 uint32_t dataoff;
848 uint32_t datasel;
849 struct target_fpreg st[8];
850 uint16_t status;
851 uint16_t magic; /* 0xffff = regular FPU data only */
853 /* FXSR FPU environment */
854 uint32_t _fxsr_env[6]; /* FXSR FPU env is ignored */
855 uint32_t mxcsr;
856 uint32_t reserved;
857 struct target_fpxreg fxsr_st[8]; /* FXSR FPU reg data is ignored */
858 struct target_xmmreg xmm[8];
859 uint32_t padding[56];
862 struct target_fpstate_64 {
863 /* FXSAVE format */
864 uint16_t cw;
865 uint16_t sw;
866 uint16_t twd;
867 uint16_t fop;
868 uint64_t rip;
869 uint64_t rdp;
870 uint32_t mxcsr;
871 uint32_t mxcsr_mask;
872 uint32_t st_space[32];
873 uint32_t xmm_space[64];
874 uint32_t reserved[24];
877 #ifndef TARGET_X86_64
878 # define target_fpstate target_fpstate_32
879 #else
880 # define target_fpstate target_fpstate_64
881 #endif
883 struct target_sigcontext_32 {
884 uint16_t gs, __gsh;
885 uint16_t fs, __fsh;
886 uint16_t es, __esh;
887 uint16_t ds, __dsh;
888 uint32_t edi;
889 uint32_t esi;
890 uint32_t ebp;
891 uint32_t esp;
892 uint32_t ebx;
893 uint32_t edx;
894 uint32_t ecx;
895 uint32_t eax;
896 uint32_t trapno;
897 uint32_t err;
898 uint32_t eip;
899 uint16_t cs, __csh;
900 uint32_t eflags;
901 uint32_t esp_at_signal;
902 uint16_t ss, __ssh;
903 uint32_t fpstate; /* pointer */
904 uint32_t oldmask;
905 uint32_t cr2;
908 struct target_sigcontext_64 {
909 uint64_t r8;
910 uint64_t r9;
911 uint64_t r10;
912 uint64_t r11;
913 uint64_t r12;
914 uint64_t r13;
915 uint64_t r14;
916 uint64_t r15;
918 uint64_t rdi;
919 uint64_t rsi;
920 uint64_t rbp;
921 uint64_t rbx;
922 uint64_t rdx;
923 uint64_t rax;
924 uint64_t rcx;
925 uint64_t rsp;
926 uint64_t rip;
928 uint64_t eflags;
930 uint16_t cs;
931 uint16_t gs;
932 uint16_t fs;
933 uint16_t ss;
935 uint64_t err;
936 uint64_t trapno;
937 uint64_t oldmask;
938 uint64_t cr2;
940 uint64_t fpstate; /* pointer */
941 uint64_t padding[8];
944 #ifndef TARGET_X86_64
945 # define target_sigcontext target_sigcontext_32
946 #else
947 # define target_sigcontext target_sigcontext_64
948 #endif
950 /* see Linux/include/uapi/asm-generic/ucontext.h */
951 struct target_ucontext {
952 abi_ulong tuc_flags;
953 abi_ulong tuc_link;
954 target_stack_t tuc_stack;
955 struct target_sigcontext tuc_mcontext;
956 target_sigset_t tuc_sigmask; /* mask last for extensibility */
959 #ifndef TARGET_X86_64
960 struct sigframe {
961 abi_ulong pretcode;
962 int sig;
963 struct target_sigcontext sc;
964 struct target_fpstate fpstate;
965 abi_ulong extramask[TARGET_NSIG_WORDS-1];
966 char retcode[8];
969 struct rt_sigframe {
970 abi_ulong pretcode;
971 int sig;
972 abi_ulong pinfo;
973 abi_ulong puc;
974 struct target_siginfo info;
975 struct target_ucontext uc;
976 struct target_fpstate fpstate;
977 char retcode[8];
980 #else
982 struct rt_sigframe {
983 abi_ulong pretcode;
984 struct target_ucontext uc;
985 struct target_siginfo info;
986 struct target_fpstate fpstate;
989 #endif
992 * Set up a signal frame.
995 /* XXX: save x87 state */
996 static void setup_sigcontext(struct target_sigcontext *sc,
997 struct target_fpstate *fpstate, CPUX86State *env, abi_ulong mask,
998 abi_ulong fpstate_addr)
1000 CPUState *cs = CPU(x86_env_get_cpu(env));
1001 #ifndef TARGET_X86_64
1002 uint16_t magic;
1004 /* already locked in setup_frame() */
1005 __put_user(env->segs[R_GS].selector, (unsigned int *)&sc->gs);
1006 __put_user(env->segs[R_FS].selector, (unsigned int *)&sc->fs);
1007 __put_user(env->segs[R_ES].selector, (unsigned int *)&sc->es);
1008 __put_user(env->segs[R_DS].selector, (unsigned int *)&sc->ds);
1009 __put_user(env->regs[R_EDI], &sc->edi);
1010 __put_user(env->regs[R_ESI], &sc->esi);
1011 __put_user(env->regs[R_EBP], &sc->ebp);
1012 __put_user(env->regs[R_ESP], &sc->esp);
1013 __put_user(env->regs[R_EBX], &sc->ebx);
1014 __put_user(env->regs[R_EDX], &sc->edx);
1015 __put_user(env->regs[R_ECX], &sc->ecx);
1016 __put_user(env->regs[R_EAX], &sc->eax);
1017 __put_user(cs->exception_index, &sc->trapno);
1018 __put_user(env->error_code, &sc->err);
1019 __put_user(env->eip, &sc->eip);
1020 __put_user(env->segs[R_CS].selector, (unsigned int *)&sc->cs);
1021 __put_user(env->eflags, &sc->eflags);
1022 __put_user(env->regs[R_ESP], &sc->esp_at_signal);
1023 __put_user(env->segs[R_SS].selector, (unsigned int *)&sc->ss);
1025 cpu_x86_fsave(env, fpstate_addr, 1);
1026 fpstate->status = fpstate->sw;
1027 magic = 0xffff;
1028 __put_user(magic, &fpstate->magic);
1029 __put_user(fpstate_addr, &sc->fpstate);
1031 /* non-iBCS2 extensions.. */
1032 __put_user(mask, &sc->oldmask);
1033 __put_user(env->cr[2], &sc->cr2);
1034 #else
1035 __put_user(env->regs[R_EDI], &sc->rdi);
1036 __put_user(env->regs[R_ESI], &sc->rsi);
1037 __put_user(env->regs[R_EBP], &sc->rbp);
1038 __put_user(env->regs[R_ESP], &sc->rsp);
1039 __put_user(env->regs[R_EBX], &sc->rbx);
1040 __put_user(env->regs[R_EDX], &sc->rdx);
1041 __put_user(env->regs[R_ECX], &sc->rcx);
1042 __put_user(env->regs[R_EAX], &sc->rax);
1044 __put_user(env->regs[8], &sc->r8);
1045 __put_user(env->regs[9], &sc->r9);
1046 __put_user(env->regs[10], &sc->r10);
1047 __put_user(env->regs[11], &sc->r11);
1048 __put_user(env->regs[12], &sc->r12);
1049 __put_user(env->regs[13], &sc->r13);
1050 __put_user(env->regs[14], &sc->r14);
1051 __put_user(env->regs[15], &sc->r15);
1053 __put_user(cs->exception_index, &sc->trapno);
1054 __put_user(env->error_code, &sc->err);
1055 __put_user(env->eip, &sc->rip);
1057 __put_user(env->eflags, &sc->eflags);
1058 __put_user(env->segs[R_CS].selector, &sc->cs);
1059 __put_user((uint16_t)0, &sc->gs);
1060 __put_user((uint16_t)0, &sc->fs);
1061 __put_user(env->segs[R_SS].selector, &sc->ss);
1063 __put_user(mask, &sc->oldmask);
1064 __put_user(env->cr[2], &sc->cr2);
1066 /* fpstate_addr must be 16 byte aligned for fxsave */
1067 assert(!(fpstate_addr & 0xf));
1069 cpu_x86_fxsave(env, fpstate_addr);
1070 __put_user(fpstate_addr, &sc->fpstate);
1071 #endif
1075 * Determine which stack to use..
1078 static inline abi_ulong
1079 get_sigframe(struct target_sigaction *ka, CPUX86State *env, size_t frame_size)
1081 unsigned long esp;
1083 /* Default to using normal stack */
1084 esp = env->regs[R_ESP];
1085 #ifdef TARGET_X86_64
1086 esp -= 128; /* this is the redzone */
1087 #endif
1089 /* This is the X/Open sanctioned signal stack switching. */
1090 if (ka->sa_flags & TARGET_SA_ONSTACK) {
1091 if (sas_ss_flags(esp) == 0) {
1092 esp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
1094 } else {
1095 #ifndef TARGET_X86_64
1096 /* This is the legacy signal stack switching. */
1097 if ((env->segs[R_SS].selector & 0xffff) != __USER_DS &&
1098 !(ka->sa_flags & TARGET_SA_RESTORER) &&
1099 ka->sa_restorer) {
1100 esp = (unsigned long) ka->sa_restorer;
1102 #endif
1105 #ifndef TARGET_X86_64
1106 return (esp - frame_size) & -8ul;
1107 #else
1108 return ((esp - frame_size) & (~15ul)) - 8;
1109 #endif
1112 #ifndef TARGET_X86_64
1113 /* compare linux/arch/i386/kernel/signal.c:setup_frame() */
1114 static void setup_frame(int sig, struct target_sigaction *ka,
1115 target_sigset_t *set, CPUX86State *env)
1117 abi_ulong frame_addr;
1118 struct sigframe *frame;
1119 int i;
1121 frame_addr = get_sigframe(ka, env, sizeof(*frame));
1122 trace_user_setup_frame(env, frame_addr);
1124 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
1125 goto give_sigsegv;
1127 __put_user(sig, &frame->sig);
1129 setup_sigcontext(&frame->sc, &frame->fpstate, env, set->sig[0],
1130 frame_addr + offsetof(struct sigframe, fpstate));
1132 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1133 __put_user(set->sig[i], &frame->extramask[i - 1]);
1136 /* Set up to return from userspace. If provided, use a stub
1137 already in userspace. */
1138 if (ka->sa_flags & TARGET_SA_RESTORER) {
1139 __put_user(ka->sa_restorer, &frame->pretcode);
1140 } else {
1141 uint16_t val16;
1142 abi_ulong retcode_addr;
1143 retcode_addr = frame_addr + offsetof(struct sigframe, retcode);
1144 __put_user(retcode_addr, &frame->pretcode);
1145 /* This is popl %eax ; movl $,%eax ; int $0x80 */
1146 val16 = 0xb858;
1147 __put_user(val16, (uint16_t *)(frame->retcode+0));
1148 __put_user(TARGET_NR_sigreturn, (int *)(frame->retcode+2));
1149 val16 = 0x80cd;
1150 __put_user(val16, (uint16_t *)(frame->retcode+6));
1153 /* Set up registers for signal handler */
1154 env->regs[R_ESP] = frame_addr;
1155 env->eip = ka->_sa_handler;
1157 cpu_x86_load_seg(env, R_DS, __USER_DS);
1158 cpu_x86_load_seg(env, R_ES, __USER_DS);
1159 cpu_x86_load_seg(env, R_SS, __USER_DS);
1160 cpu_x86_load_seg(env, R_CS, __USER_CS);
1161 env->eflags &= ~TF_MASK;
1163 unlock_user_struct(frame, frame_addr, 1);
1165 return;
1167 give_sigsegv:
1168 force_sigsegv(sig);
1170 #endif
1172 /* compare linux/arch/x86/kernel/signal.c:setup_rt_frame() */
1173 static void setup_rt_frame(int sig, struct target_sigaction *ka,
1174 target_siginfo_t *info,
1175 target_sigset_t *set, CPUX86State *env)
1177 abi_ulong frame_addr;
1178 #ifndef TARGET_X86_64
1179 abi_ulong addr;
1180 #endif
1181 struct rt_sigframe *frame;
1182 int i;
1184 frame_addr = get_sigframe(ka, env, sizeof(*frame));
1185 trace_user_setup_rt_frame(env, frame_addr);
1187 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
1188 goto give_sigsegv;
1190 /* These fields are only in rt_sigframe on 32 bit */
1191 #ifndef TARGET_X86_64
1192 __put_user(sig, &frame->sig);
1193 addr = frame_addr + offsetof(struct rt_sigframe, info);
1194 __put_user(addr, &frame->pinfo);
1195 addr = frame_addr + offsetof(struct rt_sigframe, uc);
1196 __put_user(addr, &frame->puc);
1197 #endif
1198 if (ka->sa_flags & TARGET_SA_SIGINFO) {
1199 tswap_siginfo(&frame->info, info);
1202 /* Create the ucontext. */
1203 __put_user(0, &frame->uc.tuc_flags);
1204 __put_user(0, &frame->uc.tuc_link);
1205 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
1206 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
1207 &frame->uc.tuc_stack.ss_flags);
1208 __put_user(target_sigaltstack_used.ss_size,
1209 &frame->uc.tuc_stack.ss_size);
1210 setup_sigcontext(&frame->uc.tuc_mcontext, &frame->fpstate, env,
1211 set->sig[0], frame_addr + offsetof(struct rt_sigframe, fpstate));
1213 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
1214 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
1217 /* Set up to return from userspace. If provided, use a stub
1218 already in userspace. */
1219 #ifndef TARGET_X86_64
1220 if (ka->sa_flags & TARGET_SA_RESTORER) {
1221 __put_user(ka->sa_restorer, &frame->pretcode);
1222 } else {
1223 uint16_t val16;
1224 addr = frame_addr + offsetof(struct rt_sigframe, retcode);
1225 __put_user(addr, &frame->pretcode);
1226 /* This is movl $,%eax ; int $0x80 */
1227 __put_user(0xb8, (char *)(frame->retcode+0));
1228 __put_user(TARGET_NR_rt_sigreturn, (int *)(frame->retcode+1));
1229 val16 = 0x80cd;
1230 __put_user(val16, (uint16_t *)(frame->retcode+5));
1232 #else
1233 /* XXX: Would be slightly better to return -EFAULT here if test fails
1234 assert(ka->sa_flags & TARGET_SA_RESTORER); */
1235 __put_user(ka->sa_restorer, &frame->pretcode);
1236 #endif
1238 /* Set up registers for signal handler */
1239 env->regs[R_ESP] = frame_addr;
1240 env->eip = ka->_sa_handler;
1242 #ifndef TARGET_X86_64
1243 env->regs[R_EAX] = sig;
1244 env->regs[R_EDX] = (unsigned long)&frame->info;
1245 env->regs[R_ECX] = (unsigned long)&frame->uc;
1246 #else
1247 env->regs[R_EAX] = 0;
1248 env->regs[R_EDI] = sig;
1249 env->regs[R_ESI] = (unsigned long)&frame->info;
1250 env->regs[R_EDX] = (unsigned long)&frame->uc;
1251 #endif
1253 cpu_x86_load_seg(env, R_DS, __USER_DS);
1254 cpu_x86_load_seg(env, R_ES, __USER_DS);
1255 cpu_x86_load_seg(env, R_CS, __USER_CS);
1256 cpu_x86_load_seg(env, R_SS, __USER_DS);
1257 env->eflags &= ~TF_MASK;
1259 unlock_user_struct(frame, frame_addr, 1);
1261 return;
1263 give_sigsegv:
1264 force_sigsegv(sig);
1267 static int
1268 restore_sigcontext(CPUX86State *env, struct target_sigcontext *sc)
1270 unsigned int err = 0;
1271 abi_ulong fpstate_addr;
1272 unsigned int tmpflags;
1274 #ifndef TARGET_X86_64
1275 cpu_x86_load_seg(env, R_GS, tswap16(sc->gs));
1276 cpu_x86_load_seg(env, R_FS, tswap16(sc->fs));
1277 cpu_x86_load_seg(env, R_ES, tswap16(sc->es));
1278 cpu_x86_load_seg(env, R_DS, tswap16(sc->ds));
1280 env->regs[R_EDI] = tswapl(sc->edi);
1281 env->regs[R_ESI] = tswapl(sc->esi);
1282 env->regs[R_EBP] = tswapl(sc->ebp);
1283 env->regs[R_ESP] = tswapl(sc->esp);
1284 env->regs[R_EBX] = tswapl(sc->ebx);
1285 env->regs[R_EDX] = tswapl(sc->edx);
1286 env->regs[R_ECX] = tswapl(sc->ecx);
1287 env->regs[R_EAX] = tswapl(sc->eax);
1289 env->eip = tswapl(sc->eip);
1290 #else
1291 env->regs[8] = tswapl(sc->r8);
1292 env->regs[9] = tswapl(sc->r9);
1293 env->regs[10] = tswapl(sc->r10);
1294 env->regs[11] = tswapl(sc->r11);
1295 env->regs[12] = tswapl(sc->r12);
1296 env->regs[13] = tswapl(sc->r13);
1297 env->regs[14] = tswapl(sc->r14);
1298 env->regs[15] = tswapl(sc->r15);
1300 env->regs[R_EDI] = tswapl(sc->rdi);
1301 env->regs[R_ESI] = tswapl(sc->rsi);
1302 env->regs[R_EBP] = tswapl(sc->rbp);
1303 env->regs[R_EBX] = tswapl(sc->rbx);
1304 env->regs[R_EDX] = tswapl(sc->rdx);
1305 env->regs[R_EAX] = tswapl(sc->rax);
1306 env->regs[R_ECX] = tswapl(sc->rcx);
1307 env->regs[R_ESP] = tswapl(sc->rsp);
1309 env->eip = tswapl(sc->rip);
1310 #endif
1312 cpu_x86_load_seg(env, R_CS, lduw_p(&sc->cs) | 3);
1313 cpu_x86_load_seg(env, R_SS, lduw_p(&sc->ss) | 3);
1315 tmpflags = tswapl(sc->eflags);
1316 env->eflags = (env->eflags & ~0x40DD5) | (tmpflags & 0x40DD5);
1317 // regs->orig_eax = -1; /* disable syscall checks */
1319 fpstate_addr = tswapl(sc->fpstate);
1320 if (fpstate_addr != 0) {
1321 if (!access_ok(VERIFY_READ, fpstate_addr,
1322 sizeof(struct target_fpstate)))
1323 goto badframe;
1324 #ifndef TARGET_X86_64
1325 cpu_x86_frstor(env, fpstate_addr, 1);
1326 #else
1327 cpu_x86_fxrstor(env, fpstate_addr);
1328 #endif
1331 return err;
1332 badframe:
1333 return 1;
1336 /* Note: there is no sigreturn on x86_64, there is only rt_sigreturn */
1337 #ifndef TARGET_X86_64
1338 long do_sigreturn(CPUX86State *env)
1340 struct sigframe *frame;
1341 abi_ulong frame_addr = env->regs[R_ESP] - 8;
1342 target_sigset_t target_set;
1343 sigset_t set;
1344 int i;
1346 trace_user_do_sigreturn(env, frame_addr);
1347 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
1348 goto badframe;
1349 /* set blocked signals */
1350 __get_user(target_set.sig[0], &frame->sc.oldmask);
1351 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1352 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
1355 target_to_host_sigset_internal(&set, &target_set);
1356 set_sigmask(&set);
1358 /* restore registers */
1359 if (restore_sigcontext(env, &frame->sc))
1360 goto badframe;
1361 unlock_user_struct(frame, frame_addr, 0);
1362 return -TARGET_QEMU_ESIGRETURN;
1364 badframe:
1365 unlock_user_struct(frame, frame_addr, 0);
1366 force_sig(TARGET_SIGSEGV);
1367 return -TARGET_QEMU_ESIGRETURN;
1369 #endif
1371 long do_rt_sigreturn(CPUX86State *env)
1373 abi_ulong frame_addr;
1374 struct rt_sigframe *frame;
1375 sigset_t set;
1377 frame_addr = env->regs[R_ESP] - sizeof(abi_ulong);
1378 trace_user_do_rt_sigreturn(env, frame_addr);
1379 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
1380 goto badframe;
1381 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
1382 set_sigmask(&set);
1384 if (restore_sigcontext(env, &frame->uc.tuc_mcontext)) {
1385 goto badframe;
1388 if (do_sigaltstack(frame_addr + offsetof(struct rt_sigframe, uc.tuc_stack), 0,
1389 get_sp_from_cpustate(env)) == -EFAULT) {
1390 goto badframe;
1393 unlock_user_struct(frame, frame_addr, 0);
1394 return -TARGET_QEMU_ESIGRETURN;
1396 badframe:
1397 unlock_user_struct(frame, frame_addr, 0);
1398 force_sig(TARGET_SIGSEGV);
1399 return -TARGET_QEMU_ESIGRETURN;
1402 #elif defined(TARGET_AARCH64)
1404 struct target_sigcontext {
1405 uint64_t fault_address;
1406 /* AArch64 registers */
1407 uint64_t regs[31];
1408 uint64_t sp;
1409 uint64_t pc;
1410 uint64_t pstate;
1411 /* 4K reserved for FP/SIMD state and future expansion */
1412 char __reserved[4096] __attribute__((__aligned__(16)));
1415 struct target_ucontext {
1416 abi_ulong tuc_flags;
1417 abi_ulong tuc_link;
1418 target_stack_t tuc_stack;
1419 target_sigset_t tuc_sigmask;
1420 /* glibc uses a 1024-bit sigset_t */
1421 char __unused[1024 / 8 - sizeof(target_sigset_t)];
1422 /* last for future expansion */
1423 struct target_sigcontext tuc_mcontext;
1427 * Header to be used at the beginning of structures extending the user
1428 * context. Such structures must be placed after the rt_sigframe on the stack
1429 * and be 16-byte aligned. The last structure must be a dummy one with the
1430 * magic and size set to 0.
1432 struct target_aarch64_ctx {
1433 uint32_t magic;
1434 uint32_t size;
1437 #define TARGET_FPSIMD_MAGIC 0x46508001
1439 struct target_fpsimd_context {
1440 struct target_aarch64_ctx head;
1441 uint32_t fpsr;
1442 uint32_t fpcr;
1443 uint64_t vregs[32 * 2]; /* really uint128_t vregs[32] */
1447 * Auxiliary context saved in the sigcontext.__reserved array. Not exported to
1448 * user space as it will change with the addition of new context. User space
1449 * should check the magic/size information.
1451 struct target_aux_context {
1452 struct target_fpsimd_context fpsimd;
1453 /* additional context to be added before "end" */
1454 struct target_aarch64_ctx end;
1457 struct target_rt_sigframe {
1458 struct target_siginfo info;
1459 struct target_ucontext uc;
1460 uint64_t fp;
1461 uint64_t lr;
1462 uint32_t tramp[2];
1465 static int target_setup_sigframe(struct target_rt_sigframe *sf,
1466 CPUARMState *env, target_sigset_t *set)
1468 int i;
1469 struct target_aux_context *aux =
1470 (struct target_aux_context *)sf->uc.tuc_mcontext.__reserved;
1472 /* set up the stack frame for unwinding */
1473 __put_user(env->xregs[29], &sf->fp);
1474 __put_user(env->xregs[30], &sf->lr);
1476 for (i = 0; i < 31; i++) {
1477 __put_user(env->xregs[i], &sf->uc.tuc_mcontext.regs[i]);
1479 __put_user(env->xregs[31], &sf->uc.tuc_mcontext.sp);
1480 __put_user(env->pc, &sf->uc.tuc_mcontext.pc);
1481 __put_user(pstate_read(env), &sf->uc.tuc_mcontext.pstate);
1483 __put_user(env->exception.vaddress, &sf->uc.tuc_mcontext.fault_address);
1485 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
1486 __put_user(set->sig[i], &sf->uc.tuc_sigmask.sig[i]);
1489 for (i = 0; i < 32; i++) {
1490 #ifdef TARGET_WORDS_BIGENDIAN
1491 __put_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2 + 1]);
1492 __put_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2]);
1493 #else
1494 __put_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2]);
1495 __put_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2 + 1]);
1496 #endif
1498 __put_user(vfp_get_fpsr(env), &aux->fpsimd.fpsr);
1499 __put_user(vfp_get_fpcr(env), &aux->fpsimd.fpcr);
1500 __put_user(TARGET_FPSIMD_MAGIC, &aux->fpsimd.head.magic);
1501 __put_user(sizeof(struct target_fpsimd_context),
1502 &aux->fpsimd.head.size);
1504 /* set the "end" magic */
1505 __put_user(0, &aux->end.magic);
1506 __put_user(0, &aux->end.size);
1508 return 0;
1511 static int target_restore_sigframe(CPUARMState *env,
1512 struct target_rt_sigframe *sf)
1514 sigset_t set;
1515 int i;
1516 struct target_aux_context *aux =
1517 (struct target_aux_context *)sf->uc.tuc_mcontext.__reserved;
1518 uint32_t magic, size, fpsr, fpcr;
1519 uint64_t pstate;
1521 target_to_host_sigset(&set, &sf->uc.tuc_sigmask);
1522 set_sigmask(&set);
1524 for (i = 0; i < 31; i++) {
1525 __get_user(env->xregs[i], &sf->uc.tuc_mcontext.regs[i]);
1528 __get_user(env->xregs[31], &sf->uc.tuc_mcontext.sp);
1529 __get_user(env->pc, &sf->uc.tuc_mcontext.pc);
1530 __get_user(pstate, &sf->uc.tuc_mcontext.pstate);
1531 pstate_write(env, pstate);
1533 __get_user(magic, &aux->fpsimd.head.magic);
1534 __get_user(size, &aux->fpsimd.head.size);
1536 if (magic != TARGET_FPSIMD_MAGIC
1537 || size != sizeof(struct target_fpsimd_context)) {
1538 return 1;
1541 for (i = 0; i < 32; i++) {
1542 #ifdef TARGET_WORDS_BIGENDIAN
1543 __get_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2 + 1]);
1544 __get_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2]);
1545 #else
1546 __get_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2]);
1547 __get_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2 + 1]);
1548 #endif
1550 __get_user(fpsr, &aux->fpsimd.fpsr);
1551 vfp_set_fpsr(env, fpsr);
1552 __get_user(fpcr, &aux->fpsimd.fpcr);
1553 vfp_set_fpcr(env, fpcr);
1555 return 0;
1558 static abi_ulong get_sigframe(struct target_sigaction *ka, CPUARMState *env)
1560 abi_ulong sp;
1562 sp = env->xregs[31];
1565 * This is the X/Open sanctioned signal stack switching.
1567 if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) {
1568 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
1571 sp = (sp - sizeof(struct target_rt_sigframe)) & ~15;
1573 return sp;
1576 static void target_setup_frame(int usig, struct target_sigaction *ka,
1577 target_siginfo_t *info, target_sigset_t *set,
1578 CPUARMState *env)
1580 struct target_rt_sigframe *frame;
1581 abi_ulong frame_addr, return_addr;
1583 frame_addr = get_sigframe(ka, env);
1584 trace_user_setup_frame(env, frame_addr);
1585 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
1586 goto give_sigsegv;
1589 __put_user(0, &frame->uc.tuc_flags);
1590 __put_user(0, &frame->uc.tuc_link);
1592 __put_user(target_sigaltstack_used.ss_sp,
1593 &frame->uc.tuc_stack.ss_sp);
1594 __put_user(sas_ss_flags(env->xregs[31]),
1595 &frame->uc.tuc_stack.ss_flags);
1596 __put_user(target_sigaltstack_used.ss_size,
1597 &frame->uc.tuc_stack.ss_size);
1598 target_setup_sigframe(frame, env, set);
1599 if (ka->sa_flags & TARGET_SA_RESTORER) {
1600 return_addr = ka->sa_restorer;
1601 } else {
1603 * mov x8,#__NR_rt_sigreturn; svc #0
1604 * Since these are instructions they need to be put as little-endian
1605 * regardless of target default or current CPU endianness.
1607 __put_user_e(0xd2801168, &frame->tramp[0], le);
1608 __put_user_e(0xd4000001, &frame->tramp[1], le);
1609 return_addr = frame_addr + offsetof(struct target_rt_sigframe, tramp);
1611 env->xregs[0] = usig;
1612 env->xregs[31] = frame_addr;
1613 env->xregs[29] = env->xregs[31] + offsetof(struct target_rt_sigframe, fp);
1614 env->pc = ka->_sa_handler;
1615 env->xregs[30] = return_addr;
1616 if (info) {
1617 tswap_siginfo(&frame->info, info);
1618 env->xregs[1] = frame_addr + offsetof(struct target_rt_sigframe, info);
1619 env->xregs[2] = frame_addr + offsetof(struct target_rt_sigframe, uc);
1622 unlock_user_struct(frame, frame_addr, 1);
1623 return;
1625 give_sigsegv:
1626 unlock_user_struct(frame, frame_addr, 1);
1627 force_sigsegv(usig);
1630 static void setup_rt_frame(int sig, struct target_sigaction *ka,
1631 target_siginfo_t *info, target_sigset_t *set,
1632 CPUARMState *env)
1634 target_setup_frame(sig, ka, info, set, env);
1637 static void setup_frame(int sig, struct target_sigaction *ka,
1638 target_sigset_t *set, CPUARMState *env)
1640 target_setup_frame(sig, ka, 0, set, env);
1643 long do_rt_sigreturn(CPUARMState *env)
1645 struct target_rt_sigframe *frame = NULL;
1646 abi_ulong frame_addr = env->xregs[31];
1648 trace_user_do_rt_sigreturn(env, frame_addr);
1649 if (frame_addr & 15) {
1650 goto badframe;
1653 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
1654 goto badframe;
1657 if (target_restore_sigframe(env, frame)) {
1658 goto badframe;
1661 if (do_sigaltstack(frame_addr +
1662 offsetof(struct target_rt_sigframe, uc.tuc_stack),
1663 0, get_sp_from_cpustate(env)) == -EFAULT) {
1664 goto badframe;
1667 unlock_user_struct(frame, frame_addr, 0);
1668 return -TARGET_QEMU_ESIGRETURN;
1670 badframe:
1671 unlock_user_struct(frame, frame_addr, 0);
1672 force_sig(TARGET_SIGSEGV);
1673 return -TARGET_QEMU_ESIGRETURN;
1676 long do_sigreturn(CPUARMState *env)
1678 return do_rt_sigreturn(env);
1681 #elif defined(TARGET_ARM)
1683 struct target_sigcontext {
1684 abi_ulong trap_no;
1685 abi_ulong error_code;
1686 abi_ulong oldmask;
1687 abi_ulong arm_r0;
1688 abi_ulong arm_r1;
1689 abi_ulong arm_r2;
1690 abi_ulong arm_r3;
1691 abi_ulong arm_r4;
1692 abi_ulong arm_r5;
1693 abi_ulong arm_r6;
1694 abi_ulong arm_r7;
1695 abi_ulong arm_r8;
1696 abi_ulong arm_r9;
1697 abi_ulong arm_r10;
1698 abi_ulong arm_fp;
1699 abi_ulong arm_ip;
1700 abi_ulong arm_sp;
1701 abi_ulong arm_lr;
1702 abi_ulong arm_pc;
1703 abi_ulong arm_cpsr;
1704 abi_ulong fault_address;
1707 struct target_ucontext_v1 {
1708 abi_ulong tuc_flags;
1709 abi_ulong tuc_link;
1710 target_stack_t tuc_stack;
1711 struct target_sigcontext tuc_mcontext;
1712 target_sigset_t tuc_sigmask; /* mask last for extensibility */
1715 struct target_ucontext_v2 {
1716 abi_ulong tuc_flags;
1717 abi_ulong tuc_link;
1718 target_stack_t tuc_stack;
1719 struct target_sigcontext tuc_mcontext;
1720 target_sigset_t tuc_sigmask; /* mask last for extensibility */
1721 char __unused[128 - sizeof(target_sigset_t)];
1722 abi_ulong tuc_regspace[128] __attribute__((__aligned__(8)));
1725 struct target_user_vfp {
1726 uint64_t fpregs[32];
1727 abi_ulong fpscr;
1730 struct target_user_vfp_exc {
1731 abi_ulong fpexc;
1732 abi_ulong fpinst;
1733 abi_ulong fpinst2;
1736 struct target_vfp_sigframe {
1737 abi_ulong magic;
1738 abi_ulong size;
1739 struct target_user_vfp ufp;
1740 struct target_user_vfp_exc ufp_exc;
1741 } __attribute__((__aligned__(8)));
1743 struct target_iwmmxt_sigframe {
1744 abi_ulong magic;
1745 abi_ulong size;
1746 uint64_t regs[16];
1747 /* Note that not all the coprocessor control registers are stored here */
1748 uint32_t wcssf;
1749 uint32_t wcasf;
1750 uint32_t wcgr0;
1751 uint32_t wcgr1;
1752 uint32_t wcgr2;
1753 uint32_t wcgr3;
1754 } __attribute__((__aligned__(8)));
1756 #define TARGET_VFP_MAGIC 0x56465001
1757 #define TARGET_IWMMXT_MAGIC 0x12ef842a
1759 struct sigframe_v1
1761 struct target_sigcontext sc;
1762 abi_ulong extramask[TARGET_NSIG_WORDS-1];
1763 abi_ulong retcode;
1766 struct sigframe_v2
1768 struct target_ucontext_v2 uc;
1769 abi_ulong retcode;
1772 struct rt_sigframe_v1
1774 abi_ulong pinfo;
1775 abi_ulong puc;
1776 struct target_siginfo info;
1777 struct target_ucontext_v1 uc;
1778 abi_ulong retcode;
1781 struct rt_sigframe_v2
1783 struct target_siginfo info;
1784 struct target_ucontext_v2 uc;
1785 abi_ulong retcode;
1788 #define TARGET_CONFIG_CPU_32 1
1791 * For ARM syscalls, we encode the syscall number into the instruction.
1793 #define SWI_SYS_SIGRETURN (0xef000000|(TARGET_NR_sigreturn + ARM_SYSCALL_BASE))
1794 #define SWI_SYS_RT_SIGRETURN (0xef000000|(TARGET_NR_rt_sigreturn + ARM_SYSCALL_BASE))
1797 * For Thumb syscalls, we pass the syscall number via r7. We therefore
1798 * need two 16-bit instructions.
1800 #define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_sigreturn))
1801 #define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_rt_sigreturn))
1803 static const abi_ulong retcodes[4] = {
1804 SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN,
1805 SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN
1809 static inline int valid_user_regs(CPUARMState *regs)
1811 return 1;
1814 static void
1815 setup_sigcontext(struct target_sigcontext *sc, /*struct _fpstate *fpstate,*/
1816 CPUARMState *env, abi_ulong mask)
1818 __put_user(env->regs[0], &sc->arm_r0);
1819 __put_user(env->regs[1], &sc->arm_r1);
1820 __put_user(env->regs[2], &sc->arm_r2);
1821 __put_user(env->regs[3], &sc->arm_r3);
1822 __put_user(env->regs[4], &sc->arm_r4);
1823 __put_user(env->regs[5], &sc->arm_r5);
1824 __put_user(env->regs[6], &sc->arm_r6);
1825 __put_user(env->regs[7], &sc->arm_r7);
1826 __put_user(env->regs[8], &sc->arm_r8);
1827 __put_user(env->regs[9], &sc->arm_r9);
1828 __put_user(env->regs[10], &sc->arm_r10);
1829 __put_user(env->regs[11], &sc->arm_fp);
1830 __put_user(env->regs[12], &sc->arm_ip);
1831 __put_user(env->regs[13], &sc->arm_sp);
1832 __put_user(env->regs[14], &sc->arm_lr);
1833 __put_user(env->regs[15], &sc->arm_pc);
1834 #ifdef TARGET_CONFIG_CPU_32
1835 __put_user(cpsr_read(env), &sc->arm_cpsr);
1836 #endif
1838 __put_user(/* current->thread.trap_no */ 0, &sc->trap_no);
1839 __put_user(/* current->thread.error_code */ 0, &sc->error_code);
1840 __put_user(/* current->thread.address */ 0, &sc->fault_address);
1841 __put_user(mask, &sc->oldmask);
1844 static inline abi_ulong
1845 get_sigframe(struct target_sigaction *ka, CPUARMState *regs, int framesize)
1847 unsigned long sp = regs->regs[13];
1850 * This is the X/Open sanctioned signal stack switching.
1852 if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) {
1853 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
1856 * ATPCS B01 mandates 8-byte alignment
1858 return (sp - framesize) & ~7;
1861 static void
1862 setup_return(CPUARMState *env, struct target_sigaction *ka,
1863 abi_ulong *rc, abi_ulong frame_addr, int usig, abi_ulong rc_addr)
1865 abi_ulong handler = ka->_sa_handler;
1866 abi_ulong retcode;
1867 int thumb = handler & 1;
1868 uint32_t cpsr = cpsr_read(env);
1870 cpsr &= ~CPSR_IT;
1871 if (thumb) {
1872 cpsr |= CPSR_T;
1873 } else {
1874 cpsr &= ~CPSR_T;
1877 if (ka->sa_flags & TARGET_SA_RESTORER) {
1878 retcode = ka->sa_restorer;
1879 } else {
1880 unsigned int idx = thumb;
1882 if (ka->sa_flags & TARGET_SA_SIGINFO) {
1883 idx += 2;
1886 __put_user(retcodes[idx], rc);
1888 retcode = rc_addr + thumb;
1891 env->regs[0] = usig;
1892 env->regs[13] = frame_addr;
1893 env->regs[14] = retcode;
1894 env->regs[15] = handler & (thumb ? ~1 : ~3);
1895 cpsr_write(env, cpsr, CPSR_IT | CPSR_T, CPSRWriteByInstr);
1898 static abi_ulong *setup_sigframe_v2_vfp(abi_ulong *regspace, CPUARMState *env)
1900 int i;
1901 struct target_vfp_sigframe *vfpframe;
1902 vfpframe = (struct target_vfp_sigframe *)regspace;
1903 __put_user(TARGET_VFP_MAGIC, &vfpframe->magic);
1904 __put_user(sizeof(*vfpframe), &vfpframe->size);
1905 for (i = 0; i < 32; i++) {
1906 __put_user(float64_val(env->vfp.regs[i]), &vfpframe->ufp.fpregs[i]);
1908 __put_user(vfp_get_fpscr(env), &vfpframe->ufp.fpscr);
1909 __put_user(env->vfp.xregs[ARM_VFP_FPEXC], &vfpframe->ufp_exc.fpexc);
1910 __put_user(env->vfp.xregs[ARM_VFP_FPINST], &vfpframe->ufp_exc.fpinst);
1911 __put_user(env->vfp.xregs[ARM_VFP_FPINST2], &vfpframe->ufp_exc.fpinst2);
1912 return (abi_ulong*)(vfpframe+1);
1915 static abi_ulong *setup_sigframe_v2_iwmmxt(abi_ulong *regspace,
1916 CPUARMState *env)
1918 int i;
1919 struct target_iwmmxt_sigframe *iwmmxtframe;
1920 iwmmxtframe = (struct target_iwmmxt_sigframe *)regspace;
1921 __put_user(TARGET_IWMMXT_MAGIC, &iwmmxtframe->magic);
1922 __put_user(sizeof(*iwmmxtframe), &iwmmxtframe->size);
1923 for (i = 0; i < 16; i++) {
1924 __put_user(env->iwmmxt.regs[i], &iwmmxtframe->regs[i]);
1926 __put_user(env->vfp.xregs[ARM_IWMMXT_wCSSF], &iwmmxtframe->wcssf);
1927 __put_user(env->vfp.xregs[ARM_IWMMXT_wCASF], &iwmmxtframe->wcssf);
1928 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR0], &iwmmxtframe->wcgr0);
1929 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR1], &iwmmxtframe->wcgr1);
1930 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR2], &iwmmxtframe->wcgr2);
1931 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR3], &iwmmxtframe->wcgr3);
1932 return (abi_ulong*)(iwmmxtframe+1);
1935 static void setup_sigframe_v2(struct target_ucontext_v2 *uc,
1936 target_sigset_t *set, CPUARMState *env)
1938 struct target_sigaltstack stack;
1939 int i;
1940 abi_ulong *regspace;
1942 /* Clear all the bits of the ucontext we don't use. */
1943 memset(uc, 0, offsetof(struct target_ucontext_v2, tuc_mcontext));
1945 memset(&stack, 0, sizeof(stack));
1946 __put_user(target_sigaltstack_used.ss_sp, &stack.ss_sp);
1947 __put_user(target_sigaltstack_used.ss_size, &stack.ss_size);
1948 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &stack.ss_flags);
1949 memcpy(&uc->tuc_stack, &stack, sizeof(stack));
1951 setup_sigcontext(&uc->tuc_mcontext, env, set->sig[0]);
1952 /* Save coprocessor signal frame. */
1953 regspace = uc->tuc_regspace;
1954 if (arm_feature(env, ARM_FEATURE_VFP)) {
1955 regspace = setup_sigframe_v2_vfp(regspace, env);
1957 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
1958 regspace = setup_sigframe_v2_iwmmxt(regspace, env);
1961 /* Write terminating magic word */
1962 __put_user(0, regspace);
1964 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
1965 __put_user(set->sig[i], &uc->tuc_sigmask.sig[i]);
1969 /* compare linux/arch/arm/kernel/signal.c:setup_frame() */
1970 static void setup_frame_v1(int usig, struct target_sigaction *ka,
1971 target_sigset_t *set, CPUARMState *regs)
1973 struct sigframe_v1 *frame;
1974 abi_ulong frame_addr = get_sigframe(ka, regs, sizeof(*frame));
1975 int i;
1977 trace_user_setup_frame(regs, frame_addr);
1978 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
1979 goto sigsegv;
1982 setup_sigcontext(&frame->sc, regs, set->sig[0]);
1984 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1985 __put_user(set->sig[i], &frame->extramask[i - 1]);
1988 setup_return(regs, ka, &frame->retcode, frame_addr, usig,
1989 frame_addr + offsetof(struct sigframe_v1, retcode));
1991 unlock_user_struct(frame, frame_addr, 1);
1992 return;
1993 sigsegv:
1994 force_sigsegv(usig);
1997 static void setup_frame_v2(int usig, struct target_sigaction *ka,
1998 target_sigset_t *set, CPUARMState *regs)
2000 struct sigframe_v2 *frame;
2001 abi_ulong frame_addr = get_sigframe(ka, regs, sizeof(*frame));
2003 trace_user_setup_frame(regs, frame_addr);
2004 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
2005 goto sigsegv;
2008 setup_sigframe_v2(&frame->uc, set, regs);
2010 setup_return(regs, ka, &frame->retcode, frame_addr, usig,
2011 frame_addr + offsetof(struct sigframe_v2, retcode));
2013 unlock_user_struct(frame, frame_addr, 1);
2014 return;
2015 sigsegv:
2016 force_sigsegv(usig);
2019 static void setup_frame(int usig, struct target_sigaction *ka,
2020 target_sigset_t *set, CPUARMState *regs)
2022 if (get_osversion() >= 0x020612) {
2023 setup_frame_v2(usig, ka, set, regs);
2024 } else {
2025 setup_frame_v1(usig, ka, set, regs);
2029 /* compare linux/arch/arm/kernel/signal.c:setup_rt_frame() */
2030 static void setup_rt_frame_v1(int usig, struct target_sigaction *ka,
2031 target_siginfo_t *info,
2032 target_sigset_t *set, CPUARMState *env)
2034 struct rt_sigframe_v1 *frame;
2035 abi_ulong frame_addr = get_sigframe(ka, env, sizeof(*frame));
2036 struct target_sigaltstack stack;
2037 int i;
2038 abi_ulong info_addr, uc_addr;
2040 trace_user_setup_rt_frame(env, frame_addr);
2041 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
2042 goto sigsegv;
2045 info_addr = frame_addr + offsetof(struct rt_sigframe_v1, info);
2046 __put_user(info_addr, &frame->pinfo);
2047 uc_addr = frame_addr + offsetof(struct rt_sigframe_v1, uc);
2048 __put_user(uc_addr, &frame->puc);
2049 tswap_siginfo(&frame->info, info);
2051 /* Clear all the bits of the ucontext we don't use. */
2052 memset(&frame->uc, 0, offsetof(struct target_ucontext_v1, tuc_mcontext));
2054 memset(&stack, 0, sizeof(stack));
2055 __put_user(target_sigaltstack_used.ss_sp, &stack.ss_sp);
2056 __put_user(target_sigaltstack_used.ss_size, &stack.ss_size);
2057 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &stack.ss_flags);
2058 memcpy(&frame->uc.tuc_stack, &stack, sizeof(stack));
2060 setup_sigcontext(&frame->uc.tuc_mcontext, env, set->sig[0]);
2061 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
2062 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
2065 setup_return(env, ka, &frame->retcode, frame_addr, usig,
2066 frame_addr + offsetof(struct rt_sigframe_v1, retcode));
2068 env->regs[1] = info_addr;
2069 env->regs[2] = uc_addr;
2071 unlock_user_struct(frame, frame_addr, 1);
2072 return;
2073 sigsegv:
2074 force_sigsegv(usig);
2077 static void setup_rt_frame_v2(int usig, struct target_sigaction *ka,
2078 target_siginfo_t *info,
2079 target_sigset_t *set, CPUARMState *env)
2081 struct rt_sigframe_v2 *frame;
2082 abi_ulong frame_addr = get_sigframe(ka, env, sizeof(*frame));
2083 abi_ulong info_addr, uc_addr;
2085 trace_user_setup_rt_frame(env, frame_addr);
2086 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
2087 goto sigsegv;
2090 info_addr = frame_addr + offsetof(struct rt_sigframe_v2, info);
2091 uc_addr = frame_addr + offsetof(struct rt_sigframe_v2, uc);
2092 tswap_siginfo(&frame->info, info);
2094 setup_sigframe_v2(&frame->uc, set, env);
2096 setup_return(env, ka, &frame->retcode, frame_addr, usig,
2097 frame_addr + offsetof(struct rt_sigframe_v2, retcode));
2099 env->regs[1] = info_addr;
2100 env->regs[2] = uc_addr;
2102 unlock_user_struct(frame, frame_addr, 1);
2103 return;
2104 sigsegv:
2105 force_sigsegv(usig);
2108 static void setup_rt_frame(int usig, struct target_sigaction *ka,
2109 target_siginfo_t *info,
2110 target_sigset_t *set, CPUARMState *env)
2112 if (get_osversion() >= 0x020612) {
2113 setup_rt_frame_v2(usig, ka, info, set, env);
2114 } else {
2115 setup_rt_frame_v1(usig, ka, info, set, env);
2119 static int
2120 restore_sigcontext(CPUARMState *env, struct target_sigcontext *sc)
2122 int err = 0;
2123 uint32_t cpsr;
2125 __get_user(env->regs[0], &sc->arm_r0);
2126 __get_user(env->regs[1], &sc->arm_r1);
2127 __get_user(env->regs[2], &sc->arm_r2);
2128 __get_user(env->regs[3], &sc->arm_r3);
2129 __get_user(env->regs[4], &sc->arm_r4);
2130 __get_user(env->regs[5], &sc->arm_r5);
2131 __get_user(env->regs[6], &sc->arm_r6);
2132 __get_user(env->regs[7], &sc->arm_r7);
2133 __get_user(env->regs[8], &sc->arm_r8);
2134 __get_user(env->regs[9], &sc->arm_r9);
2135 __get_user(env->regs[10], &sc->arm_r10);
2136 __get_user(env->regs[11], &sc->arm_fp);
2137 __get_user(env->regs[12], &sc->arm_ip);
2138 __get_user(env->regs[13], &sc->arm_sp);
2139 __get_user(env->regs[14], &sc->arm_lr);
2140 __get_user(env->regs[15], &sc->arm_pc);
2141 #ifdef TARGET_CONFIG_CPU_32
2142 __get_user(cpsr, &sc->arm_cpsr);
2143 cpsr_write(env, cpsr, CPSR_USER | CPSR_EXEC, CPSRWriteByInstr);
2144 #endif
2146 err |= !valid_user_regs(env);
2148 return err;
2151 static long do_sigreturn_v1(CPUARMState *env)
2153 abi_ulong frame_addr;
2154 struct sigframe_v1 *frame = NULL;
2155 target_sigset_t set;
2156 sigset_t host_set;
2157 int i;
2160 * Since we stacked the signal on a 64-bit boundary,
2161 * then 'sp' should be word aligned here. If it's
2162 * not, then the user is trying to mess with us.
2164 frame_addr = env->regs[13];
2165 trace_user_do_sigreturn(env, frame_addr);
2166 if (frame_addr & 7) {
2167 goto badframe;
2170 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
2171 goto badframe;
2174 __get_user(set.sig[0], &frame->sc.oldmask);
2175 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
2176 __get_user(set.sig[i], &frame->extramask[i - 1]);
2179 target_to_host_sigset_internal(&host_set, &set);
2180 set_sigmask(&host_set);
2182 if (restore_sigcontext(env, &frame->sc)) {
2183 goto badframe;
2186 #if 0
2187 /* Send SIGTRAP if we're single-stepping */
2188 if (ptrace_cancel_bpt(current))
2189 send_sig(SIGTRAP, current, 1);
2190 #endif
2191 unlock_user_struct(frame, frame_addr, 0);
2192 return -TARGET_QEMU_ESIGRETURN;
2194 badframe:
2195 force_sig(TARGET_SIGSEGV);
2196 return -TARGET_QEMU_ESIGRETURN;
2199 static abi_ulong *restore_sigframe_v2_vfp(CPUARMState *env, abi_ulong *regspace)
2201 int i;
2202 abi_ulong magic, sz;
2203 uint32_t fpscr, fpexc;
2204 struct target_vfp_sigframe *vfpframe;
2205 vfpframe = (struct target_vfp_sigframe *)regspace;
2207 __get_user(magic, &vfpframe->magic);
2208 __get_user(sz, &vfpframe->size);
2209 if (magic != TARGET_VFP_MAGIC || sz != sizeof(*vfpframe)) {
2210 return 0;
2212 for (i = 0; i < 32; i++) {
2213 __get_user(float64_val(env->vfp.regs[i]), &vfpframe->ufp.fpregs[i]);
2215 __get_user(fpscr, &vfpframe->ufp.fpscr);
2216 vfp_set_fpscr(env, fpscr);
2217 __get_user(fpexc, &vfpframe->ufp_exc.fpexc);
2218 /* Sanitise FPEXC: ensure VFP is enabled, FPINST2 is invalid
2219 * and the exception flag is cleared
2221 fpexc |= (1 << 30);
2222 fpexc &= ~((1 << 31) | (1 << 28));
2223 env->vfp.xregs[ARM_VFP_FPEXC] = fpexc;
2224 __get_user(env->vfp.xregs[ARM_VFP_FPINST], &vfpframe->ufp_exc.fpinst);
2225 __get_user(env->vfp.xregs[ARM_VFP_FPINST2], &vfpframe->ufp_exc.fpinst2);
2226 return (abi_ulong*)(vfpframe + 1);
2229 static abi_ulong *restore_sigframe_v2_iwmmxt(CPUARMState *env,
2230 abi_ulong *regspace)
2232 int i;
2233 abi_ulong magic, sz;
2234 struct target_iwmmxt_sigframe *iwmmxtframe;
2235 iwmmxtframe = (struct target_iwmmxt_sigframe *)regspace;
2237 __get_user(magic, &iwmmxtframe->magic);
2238 __get_user(sz, &iwmmxtframe->size);
2239 if (magic != TARGET_IWMMXT_MAGIC || sz != sizeof(*iwmmxtframe)) {
2240 return 0;
2242 for (i = 0; i < 16; i++) {
2243 __get_user(env->iwmmxt.regs[i], &iwmmxtframe->regs[i]);
2245 __get_user(env->vfp.xregs[ARM_IWMMXT_wCSSF], &iwmmxtframe->wcssf);
2246 __get_user(env->vfp.xregs[ARM_IWMMXT_wCASF], &iwmmxtframe->wcssf);
2247 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR0], &iwmmxtframe->wcgr0);
2248 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR1], &iwmmxtframe->wcgr1);
2249 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR2], &iwmmxtframe->wcgr2);
2250 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR3], &iwmmxtframe->wcgr3);
2251 return (abi_ulong*)(iwmmxtframe + 1);
2254 static int do_sigframe_return_v2(CPUARMState *env,
2255 target_ulong context_addr,
2256 struct target_ucontext_v2 *uc)
2258 sigset_t host_set;
2259 abi_ulong *regspace;
2261 target_to_host_sigset(&host_set, &uc->tuc_sigmask);
2262 set_sigmask(&host_set);
2264 if (restore_sigcontext(env, &uc->tuc_mcontext))
2265 return 1;
2267 /* Restore coprocessor signal frame */
2268 regspace = uc->tuc_regspace;
2269 if (arm_feature(env, ARM_FEATURE_VFP)) {
2270 regspace = restore_sigframe_v2_vfp(env, regspace);
2271 if (!regspace) {
2272 return 1;
2275 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
2276 regspace = restore_sigframe_v2_iwmmxt(env, regspace);
2277 if (!regspace) {
2278 return 1;
2282 if (do_sigaltstack(context_addr
2283 + offsetof(struct target_ucontext_v2, tuc_stack),
2284 0, get_sp_from_cpustate(env)) == -EFAULT) {
2285 return 1;
2288 #if 0
2289 /* Send SIGTRAP if we're single-stepping */
2290 if (ptrace_cancel_bpt(current))
2291 send_sig(SIGTRAP, current, 1);
2292 #endif
2294 return 0;
2297 static long do_sigreturn_v2(CPUARMState *env)
2299 abi_ulong frame_addr;
2300 struct sigframe_v2 *frame = NULL;
2303 * Since we stacked the signal on a 64-bit boundary,
2304 * then 'sp' should be word aligned here. If it's
2305 * not, then the user is trying to mess with us.
2307 frame_addr = env->regs[13];
2308 trace_user_do_sigreturn(env, frame_addr);
2309 if (frame_addr & 7) {
2310 goto badframe;
2313 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
2314 goto badframe;
2317 if (do_sigframe_return_v2(env,
2318 frame_addr
2319 + offsetof(struct sigframe_v2, uc),
2320 &frame->uc)) {
2321 goto badframe;
2324 unlock_user_struct(frame, frame_addr, 0);
2325 return -TARGET_QEMU_ESIGRETURN;
2327 badframe:
2328 unlock_user_struct(frame, frame_addr, 0);
2329 force_sig(TARGET_SIGSEGV);
2330 return -TARGET_QEMU_ESIGRETURN;
2333 long do_sigreturn(CPUARMState *env)
2335 if (get_osversion() >= 0x020612) {
2336 return do_sigreturn_v2(env);
2337 } else {
2338 return do_sigreturn_v1(env);
2342 static long do_rt_sigreturn_v1(CPUARMState *env)
2344 abi_ulong frame_addr;
2345 struct rt_sigframe_v1 *frame = NULL;
2346 sigset_t host_set;
2349 * Since we stacked the signal on a 64-bit boundary,
2350 * then 'sp' should be word aligned here. If it's
2351 * not, then the user is trying to mess with us.
2353 frame_addr = env->regs[13];
2354 trace_user_do_rt_sigreturn(env, frame_addr);
2355 if (frame_addr & 7) {
2356 goto badframe;
2359 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
2360 goto badframe;
2363 target_to_host_sigset(&host_set, &frame->uc.tuc_sigmask);
2364 set_sigmask(&host_set);
2366 if (restore_sigcontext(env, &frame->uc.tuc_mcontext)) {
2367 goto badframe;
2370 if (do_sigaltstack(frame_addr + offsetof(struct rt_sigframe_v1, uc.tuc_stack), 0, get_sp_from_cpustate(env)) == -EFAULT)
2371 goto badframe;
2373 #if 0
2374 /* Send SIGTRAP if we're single-stepping */
2375 if (ptrace_cancel_bpt(current))
2376 send_sig(SIGTRAP, current, 1);
2377 #endif
2378 unlock_user_struct(frame, frame_addr, 0);
2379 return -TARGET_QEMU_ESIGRETURN;
2381 badframe:
2382 unlock_user_struct(frame, frame_addr, 0);
2383 force_sig(TARGET_SIGSEGV);
2384 return -TARGET_QEMU_ESIGRETURN;
2387 static long do_rt_sigreturn_v2(CPUARMState *env)
2389 abi_ulong frame_addr;
2390 struct rt_sigframe_v2 *frame = NULL;
2393 * Since we stacked the signal on a 64-bit boundary,
2394 * then 'sp' should be word aligned here. If it's
2395 * not, then the user is trying to mess with us.
2397 frame_addr = env->regs[13];
2398 trace_user_do_rt_sigreturn(env, frame_addr);
2399 if (frame_addr & 7) {
2400 goto badframe;
2403 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
2404 goto badframe;
2407 if (do_sigframe_return_v2(env,
2408 frame_addr
2409 + offsetof(struct rt_sigframe_v2, uc),
2410 &frame->uc)) {
2411 goto badframe;
2414 unlock_user_struct(frame, frame_addr, 0);
2415 return -TARGET_QEMU_ESIGRETURN;
2417 badframe:
2418 unlock_user_struct(frame, frame_addr, 0);
2419 force_sig(TARGET_SIGSEGV);
2420 return -TARGET_QEMU_ESIGRETURN;
2423 long do_rt_sigreturn(CPUARMState *env)
2425 if (get_osversion() >= 0x020612) {
2426 return do_rt_sigreturn_v2(env);
2427 } else {
2428 return do_rt_sigreturn_v1(env);
2432 #elif defined(TARGET_SPARC)
2434 #define __SUNOS_MAXWIN 31
2436 /* This is what SunOS does, so shall I. */
2437 struct target_sigcontext {
2438 abi_ulong sigc_onstack; /* state to restore */
2440 abi_ulong sigc_mask; /* sigmask to restore */
2441 abi_ulong sigc_sp; /* stack pointer */
2442 abi_ulong sigc_pc; /* program counter */
2443 abi_ulong sigc_npc; /* next program counter */
2444 abi_ulong sigc_psr; /* for condition codes etc */
2445 abi_ulong sigc_g1; /* User uses these two registers */
2446 abi_ulong sigc_o0; /* within the trampoline code. */
2448 /* Now comes information regarding the users window set
2449 * at the time of the signal.
2451 abi_ulong sigc_oswins; /* outstanding windows */
2453 /* stack ptrs for each regwin buf */
2454 char *sigc_spbuf[__SUNOS_MAXWIN];
2456 /* Windows to restore after signal */
2457 struct {
2458 abi_ulong locals[8];
2459 abi_ulong ins[8];
2460 } sigc_wbuf[__SUNOS_MAXWIN];
2462 /* A Sparc stack frame */
2463 struct sparc_stackf {
2464 abi_ulong locals[8];
2465 abi_ulong ins[8];
2466 /* It's simpler to treat fp and callers_pc as elements of ins[]
2467 * since we never need to access them ourselves.
2469 char *structptr;
2470 abi_ulong xargs[6];
2471 abi_ulong xxargs[1];
2474 typedef struct {
2475 struct {
2476 abi_ulong psr;
2477 abi_ulong pc;
2478 abi_ulong npc;
2479 abi_ulong y;
2480 abi_ulong u_regs[16]; /* globals and ins */
2481 } si_regs;
2482 int si_mask;
2483 } __siginfo_t;
2485 typedef struct {
2486 abi_ulong si_float_regs[32];
2487 unsigned long si_fsr;
2488 unsigned long si_fpqdepth;
2489 struct {
2490 unsigned long *insn_addr;
2491 unsigned long insn;
2492 } si_fpqueue [16];
2493 } qemu_siginfo_fpu_t;
2496 struct target_signal_frame {
2497 struct sparc_stackf ss;
2498 __siginfo_t info;
2499 abi_ulong fpu_save;
2500 abi_ulong insns[2] __attribute__ ((aligned (8)));
2501 abi_ulong extramask[TARGET_NSIG_WORDS - 1];
2502 abi_ulong extra_size; /* Should be 0 */
2503 qemu_siginfo_fpu_t fpu_state;
2505 struct target_rt_signal_frame {
2506 struct sparc_stackf ss;
2507 siginfo_t info;
2508 abi_ulong regs[20];
2509 sigset_t mask;
2510 abi_ulong fpu_save;
2511 unsigned int insns[2];
2512 stack_t stack;
2513 unsigned int extra_size; /* Should be 0 */
2514 qemu_siginfo_fpu_t fpu_state;
2517 #define UREG_O0 16
2518 #define UREG_O6 22
2519 #define UREG_I0 0
2520 #define UREG_I1 1
2521 #define UREG_I2 2
2522 #define UREG_I3 3
2523 #define UREG_I4 4
2524 #define UREG_I5 5
2525 #define UREG_I6 6
2526 #define UREG_I7 7
2527 #define UREG_L0 8
2528 #define UREG_FP UREG_I6
2529 #define UREG_SP UREG_O6
2531 static inline abi_ulong get_sigframe(struct target_sigaction *sa,
2532 CPUSPARCState *env,
2533 unsigned long framesize)
2535 abi_ulong sp;
2537 sp = env->regwptr[UREG_FP];
2539 /* This is the X/Open sanctioned signal stack switching. */
2540 if (sa->sa_flags & TARGET_SA_ONSTACK) {
2541 if (!on_sig_stack(sp)
2542 && !((target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size) & 7)) {
2543 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
2546 return sp - framesize;
2549 static int
2550 setup___siginfo(__siginfo_t *si, CPUSPARCState *env, abi_ulong mask)
2552 int err = 0, i;
2554 __put_user(env->psr, &si->si_regs.psr);
2555 __put_user(env->pc, &si->si_regs.pc);
2556 __put_user(env->npc, &si->si_regs.npc);
2557 __put_user(env->y, &si->si_regs.y);
2558 for (i=0; i < 8; i++) {
2559 __put_user(env->gregs[i], &si->si_regs.u_regs[i]);
2561 for (i=0; i < 8; i++) {
2562 __put_user(env->regwptr[UREG_I0 + i], &si->si_regs.u_regs[i+8]);
2564 __put_user(mask, &si->si_mask);
2565 return err;
2568 #if 0
2569 static int
2570 setup_sigcontext(struct target_sigcontext *sc, /*struct _fpstate *fpstate,*/
2571 CPUSPARCState *env, unsigned long mask)
2573 int err = 0;
2575 __put_user(mask, &sc->sigc_mask);
2576 __put_user(env->regwptr[UREG_SP], &sc->sigc_sp);
2577 __put_user(env->pc, &sc->sigc_pc);
2578 __put_user(env->npc, &sc->sigc_npc);
2579 __put_user(env->psr, &sc->sigc_psr);
2580 __put_user(env->gregs[1], &sc->sigc_g1);
2581 __put_user(env->regwptr[UREG_O0], &sc->sigc_o0);
2583 return err;
2585 #endif
2586 #define NF_ALIGNEDSZ (((sizeof(struct target_signal_frame) + 7) & (~7)))
2588 static void setup_frame(int sig, struct target_sigaction *ka,
2589 target_sigset_t *set, CPUSPARCState *env)
2591 abi_ulong sf_addr;
2592 struct target_signal_frame *sf;
2593 int sigframe_size, err, i;
2595 /* 1. Make sure everything is clean */
2596 //synchronize_user_stack();
2598 sigframe_size = NF_ALIGNEDSZ;
2599 sf_addr = get_sigframe(ka, env, sigframe_size);
2600 trace_user_setup_frame(env, sf_addr);
2602 sf = lock_user(VERIFY_WRITE, sf_addr,
2603 sizeof(struct target_signal_frame), 0);
2604 if (!sf) {
2605 goto sigsegv;
2607 #if 0
2608 if (invalid_frame_pointer(sf, sigframe_size))
2609 goto sigill_and_return;
2610 #endif
2611 /* 2. Save the current process state */
2612 err = setup___siginfo(&sf->info, env, set->sig[0]);
2613 __put_user(0, &sf->extra_size);
2615 //save_fpu_state(regs, &sf->fpu_state);
2616 //__put_user(&sf->fpu_state, &sf->fpu_save);
2618 __put_user(set->sig[0], &sf->info.si_mask);
2619 for (i = 0; i < TARGET_NSIG_WORDS - 1; i++) {
2620 __put_user(set->sig[i + 1], &sf->extramask[i]);
2623 for (i = 0; i < 8; i++) {
2624 __put_user(env->regwptr[i + UREG_L0], &sf->ss.locals[i]);
2626 for (i = 0; i < 8; i++) {
2627 __put_user(env->regwptr[i + UREG_I0], &sf->ss.ins[i]);
2629 if (err)
2630 goto sigsegv;
2632 /* 3. signal handler back-trampoline and parameters */
2633 env->regwptr[UREG_FP] = sf_addr;
2634 env->regwptr[UREG_I0] = sig;
2635 env->regwptr[UREG_I1] = sf_addr +
2636 offsetof(struct target_signal_frame, info);
2637 env->regwptr[UREG_I2] = sf_addr +
2638 offsetof(struct target_signal_frame, info);
2640 /* 4. signal handler */
2641 env->pc = ka->_sa_handler;
2642 env->npc = (env->pc + 4);
2643 /* 5. return to kernel instructions */
2644 if (ka->sa_restorer) {
2645 env->regwptr[UREG_I7] = ka->sa_restorer;
2646 } else {
2647 uint32_t val32;
2649 env->regwptr[UREG_I7] = sf_addr +
2650 offsetof(struct target_signal_frame, insns) - 2 * 4;
2652 /* mov __NR_sigreturn, %g1 */
2653 val32 = 0x821020d8;
2654 __put_user(val32, &sf->insns[0]);
2656 /* t 0x10 */
2657 val32 = 0x91d02010;
2658 __put_user(val32, &sf->insns[1]);
2659 if (err)
2660 goto sigsegv;
2662 /* Flush instruction space. */
2663 // flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0]));
2664 // tb_flush(env);
2666 unlock_user(sf, sf_addr, sizeof(struct target_signal_frame));
2667 return;
2668 #if 0
2669 sigill_and_return:
2670 force_sig(TARGET_SIGILL);
2671 #endif
2672 sigsegv:
2673 unlock_user(sf, sf_addr, sizeof(struct target_signal_frame));
2674 force_sigsegv(sig);
2677 static void setup_rt_frame(int sig, struct target_sigaction *ka,
2678 target_siginfo_t *info,
2679 target_sigset_t *set, CPUSPARCState *env)
2681 fprintf(stderr, "setup_rt_frame: not implemented\n");
2684 long do_sigreturn(CPUSPARCState *env)
2686 abi_ulong sf_addr;
2687 struct target_signal_frame *sf;
2688 uint32_t up_psr, pc, npc;
2689 target_sigset_t set;
2690 sigset_t host_set;
2691 int err=0, i;
2693 sf_addr = env->regwptr[UREG_FP];
2694 trace_user_do_sigreturn(env, sf_addr);
2695 if (!lock_user_struct(VERIFY_READ, sf, sf_addr, 1)) {
2696 goto segv_and_exit;
2699 /* 1. Make sure we are not getting garbage from the user */
2701 if (sf_addr & 3)
2702 goto segv_and_exit;
2704 __get_user(pc, &sf->info.si_regs.pc);
2705 __get_user(npc, &sf->info.si_regs.npc);
2707 if ((pc | npc) & 3) {
2708 goto segv_and_exit;
2711 /* 2. Restore the state */
2712 __get_user(up_psr, &sf->info.si_regs.psr);
2714 /* User can only change condition codes and FPU enabling in %psr. */
2715 env->psr = (up_psr & (PSR_ICC /* | PSR_EF */))
2716 | (env->psr & ~(PSR_ICC /* | PSR_EF */));
2718 env->pc = pc;
2719 env->npc = npc;
2720 __get_user(env->y, &sf->info.si_regs.y);
2721 for (i=0; i < 8; i++) {
2722 __get_user(env->gregs[i], &sf->info.si_regs.u_regs[i]);
2724 for (i=0; i < 8; i++) {
2725 __get_user(env->regwptr[i + UREG_I0], &sf->info.si_regs.u_regs[i+8]);
2728 /* FIXME: implement FPU save/restore:
2729 * __get_user(fpu_save, &sf->fpu_save);
2730 * if (fpu_save)
2731 * err |= restore_fpu_state(env, fpu_save);
2734 /* This is pretty much atomic, no amount locking would prevent
2735 * the races which exist anyways.
2737 __get_user(set.sig[0], &sf->info.si_mask);
2738 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
2739 __get_user(set.sig[i], &sf->extramask[i - 1]);
2742 target_to_host_sigset_internal(&host_set, &set);
2743 set_sigmask(&host_set);
2745 if (err) {
2746 goto segv_and_exit;
2748 unlock_user_struct(sf, sf_addr, 0);
2749 return -TARGET_QEMU_ESIGRETURN;
2751 segv_and_exit:
2752 unlock_user_struct(sf, sf_addr, 0);
2753 force_sig(TARGET_SIGSEGV);
2754 return -TARGET_QEMU_ESIGRETURN;
2757 long do_rt_sigreturn(CPUSPARCState *env)
2759 trace_user_do_rt_sigreturn(env, 0);
2760 fprintf(stderr, "do_rt_sigreturn: not implemented\n");
2761 return -TARGET_ENOSYS;
2764 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
2765 #define MC_TSTATE 0
2766 #define MC_PC 1
2767 #define MC_NPC 2
2768 #define MC_Y 3
2769 #define MC_G1 4
2770 #define MC_G2 5
2771 #define MC_G3 6
2772 #define MC_G4 7
2773 #define MC_G5 8
2774 #define MC_G6 9
2775 #define MC_G7 10
2776 #define MC_O0 11
2777 #define MC_O1 12
2778 #define MC_O2 13
2779 #define MC_O3 14
2780 #define MC_O4 15
2781 #define MC_O5 16
2782 #define MC_O6 17
2783 #define MC_O7 18
2784 #define MC_NGREG 19
2786 typedef abi_ulong target_mc_greg_t;
2787 typedef target_mc_greg_t target_mc_gregset_t[MC_NGREG];
2789 struct target_mc_fq {
2790 abi_ulong *mcfq_addr;
2791 uint32_t mcfq_insn;
2794 struct target_mc_fpu {
2795 union {
2796 uint32_t sregs[32];
2797 uint64_t dregs[32];
2798 //uint128_t qregs[16];
2799 } mcfpu_fregs;
2800 abi_ulong mcfpu_fsr;
2801 abi_ulong mcfpu_fprs;
2802 abi_ulong mcfpu_gsr;
2803 struct target_mc_fq *mcfpu_fq;
2804 unsigned char mcfpu_qcnt;
2805 unsigned char mcfpu_qentsz;
2806 unsigned char mcfpu_enab;
2808 typedef struct target_mc_fpu target_mc_fpu_t;
2810 typedef struct {
2811 target_mc_gregset_t mc_gregs;
2812 target_mc_greg_t mc_fp;
2813 target_mc_greg_t mc_i7;
2814 target_mc_fpu_t mc_fpregs;
2815 } target_mcontext_t;
2817 struct target_ucontext {
2818 struct target_ucontext *tuc_link;
2819 abi_ulong tuc_flags;
2820 target_sigset_t tuc_sigmask;
2821 target_mcontext_t tuc_mcontext;
2824 /* A V9 register window */
2825 struct target_reg_window {
2826 abi_ulong locals[8];
2827 abi_ulong ins[8];
2830 #define TARGET_STACK_BIAS 2047
2832 /* {set, get}context() needed for 64-bit SparcLinux userland. */
2833 void sparc64_set_context(CPUSPARCState *env)
2835 abi_ulong ucp_addr;
2836 struct target_ucontext *ucp;
2837 target_mc_gregset_t *grp;
2838 abi_ulong pc, npc, tstate;
2839 abi_ulong fp, i7, w_addr;
2840 unsigned int i;
2842 ucp_addr = env->regwptr[UREG_I0];
2843 if (!lock_user_struct(VERIFY_READ, ucp, ucp_addr, 1)) {
2844 goto do_sigsegv;
2846 grp = &ucp->tuc_mcontext.mc_gregs;
2847 __get_user(pc, &((*grp)[MC_PC]));
2848 __get_user(npc, &((*grp)[MC_NPC]));
2849 if ((pc | npc) & 3) {
2850 goto do_sigsegv;
2852 if (env->regwptr[UREG_I1]) {
2853 target_sigset_t target_set;
2854 sigset_t set;
2856 if (TARGET_NSIG_WORDS == 1) {
2857 __get_user(target_set.sig[0], &ucp->tuc_sigmask.sig[0]);
2858 } else {
2859 abi_ulong *src, *dst;
2860 src = ucp->tuc_sigmask.sig;
2861 dst = target_set.sig;
2862 for (i = 0; i < TARGET_NSIG_WORDS; i++, dst++, src++) {
2863 __get_user(*dst, src);
2866 target_to_host_sigset_internal(&set, &target_set);
2867 set_sigmask(&set);
2869 env->pc = pc;
2870 env->npc = npc;
2871 __get_user(env->y, &((*grp)[MC_Y]));
2872 __get_user(tstate, &((*grp)[MC_TSTATE]));
2873 env->asi = (tstate >> 24) & 0xff;
2874 cpu_put_ccr(env, tstate >> 32);
2875 cpu_put_cwp64(env, tstate & 0x1f);
2876 __get_user(env->gregs[1], (&(*grp)[MC_G1]));
2877 __get_user(env->gregs[2], (&(*grp)[MC_G2]));
2878 __get_user(env->gregs[3], (&(*grp)[MC_G3]));
2879 __get_user(env->gregs[4], (&(*grp)[MC_G4]));
2880 __get_user(env->gregs[5], (&(*grp)[MC_G5]));
2881 __get_user(env->gregs[6], (&(*grp)[MC_G6]));
2882 __get_user(env->gregs[7], (&(*grp)[MC_G7]));
2883 __get_user(env->regwptr[UREG_I0], (&(*grp)[MC_O0]));
2884 __get_user(env->regwptr[UREG_I1], (&(*grp)[MC_O1]));
2885 __get_user(env->regwptr[UREG_I2], (&(*grp)[MC_O2]));
2886 __get_user(env->regwptr[UREG_I3], (&(*grp)[MC_O3]));
2887 __get_user(env->regwptr[UREG_I4], (&(*grp)[MC_O4]));
2888 __get_user(env->regwptr[UREG_I5], (&(*grp)[MC_O5]));
2889 __get_user(env->regwptr[UREG_I6], (&(*grp)[MC_O6]));
2890 __get_user(env->regwptr[UREG_I7], (&(*grp)[MC_O7]));
2892 __get_user(fp, &(ucp->tuc_mcontext.mc_fp));
2893 __get_user(i7, &(ucp->tuc_mcontext.mc_i7));
2895 w_addr = TARGET_STACK_BIAS+env->regwptr[UREG_I6];
2896 if (put_user(fp, w_addr + offsetof(struct target_reg_window, ins[6]),
2897 abi_ulong) != 0) {
2898 goto do_sigsegv;
2900 if (put_user(i7, w_addr + offsetof(struct target_reg_window, ins[7]),
2901 abi_ulong) != 0) {
2902 goto do_sigsegv;
2904 /* FIXME this does not match how the kernel handles the FPU in
2905 * its sparc64_set_context implementation. In particular the FPU
2906 * is only restored if fenab is non-zero in:
2907 * __get_user(fenab, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_enab));
2909 __get_user(env->fprs, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fprs));
2911 uint32_t *src = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs;
2912 for (i = 0; i < 64; i++, src++) {
2913 if (i & 1) {
2914 __get_user(env->fpr[i/2].l.lower, src);
2915 } else {
2916 __get_user(env->fpr[i/2].l.upper, src);
2920 __get_user(env->fsr,
2921 &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fsr));
2922 __get_user(env->gsr,
2923 &(ucp->tuc_mcontext.mc_fpregs.mcfpu_gsr));
2924 unlock_user_struct(ucp, ucp_addr, 0);
2925 return;
2926 do_sigsegv:
2927 unlock_user_struct(ucp, ucp_addr, 0);
2928 force_sig(TARGET_SIGSEGV);
2931 void sparc64_get_context(CPUSPARCState *env)
2933 abi_ulong ucp_addr;
2934 struct target_ucontext *ucp;
2935 target_mc_gregset_t *grp;
2936 target_mcontext_t *mcp;
2937 abi_ulong fp, i7, w_addr;
2938 int err;
2939 unsigned int i;
2940 target_sigset_t target_set;
2941 sigset_t set;
2943 ucp_addr = env->regwptr[UREG_I0];
2944 if (!lock_user_struct(VERIFY_WRITE, ucp, ucp_addr, 0)) {
2945 goto do_sigsegv;
2948 mcp = &ucp->tuc_mcontext;
2949 grp = &mcp->mc_gregs;
2951 /* Skip over the trap instruction, first. */
2952 env->pc = env->npc;
2953 env->npc += 4;
2955 /* If we're only reading the signal mask then do_sigprocmask()
2956 * is guaranteed not to fail, which is important because we don't
2957 * have any way to signal a failure or restart this operation since
2958 * this is not a normal syscall.
2960 err = do_sigprocmask(0, NULL, &set);
2961 assert(err == 0);
2962 host_to_target_sigset_internal(&target_set, &set);
2963 if (TARGET_NSIG_WORDS == 1) {
2964 __put_user(target_set.sig[0],
2965 (abi_ulong *)&ucp->tuc_sigmask);
2966 } else {
2967 abi_ulong *src, *dst;
2968 src = target_set.sig;
2969 dst = ucp->tuc_sigmask.sig;
2970 for (i = 0; i < TARGET_NSIG_WORDS; i++, dst++, src++) {
2971 __put_user(*src, dst);
2973 if (err)
2974 goto do_sigsegv;
2977 /* XXX: tstate must be saved properly */
2978 // __put_user(env->tstate, &((*grp)[MC_TSTATE]));
2979 __put_user(env->pc, &((*grp)[MC_PC]));
2980 __put_user(env->npc, &((*grp)[MC_NPC]));
2981 __put_user(env->y, &((*grp)[MC_Y]));
2982 __put_user(env->gregs[1], &((*grp)[MC_G1]));
2983 __put_user(env->gregs[2], &((*grp)[MC_G2]));
2984 __put_user(env->gregs[3], &((*grp)[MC_G3]));
2985 __put_user(env->gregs[4], &((*grp)[MC_G4]));
2986 __put_user(env->gregs[5], &((*grp)[MC_G5]));
2987 __put_user(env->gregs[6], &((*grp)[MC_G6]));
2988 __put_user(env->gregs[7], &((*grp)[MC_G7]));
2989 __put_user(env->regwptr[UREG_I0], &((*grp)[MC_O0]));
2990 __put_user(env->regwptr[UREG_I1], &((*grp)[MC_O1]));
2991 __put_user(env->regwptr[UREG_I2], &((*grp)[MC_O2]));
2992 __put_user(env->regwptr[UREG_I3], &((*grp)[MC_O3]));
2993 __put_user(env->regwptr[UREG_I4], &((*grp)[MC_O4]));
2994 __put_user(env->regwptr[UREG_I5], &((*grp)[MC_O5]));
2995 __put_user(env->regwptr[UREG_I6], &((*grp)[MC_O6]));
2996 __put_user(env->regwptr[UREG_I7], &((*grp)[MC_O7]));
2998 w_addr = TARGET_STACK_BIAS+env->regwptr[UREG_I6];
2999 fp = i7 = 0;
3000 if (get_user(fp, w_addr + offsetof(struct target_reg_window, ins[6]),
3001 abi_ulong) != 0) {
3002 goto do_sigsegv;
3004 if (get_user(i7, w_addr + offsetof(struct target_reg_window, ins[7]),
3005 abi_ulong) != 0) {
3006 goto do_sigsegv;
3008 __put_user(fp, &(mcp->mc_fp));
3009 __put_user(i7, &(mcp->mc_i7));
3012 uint32_t *dst = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs;
3013 for (i = 0; i < 64; i++, dst++) {
3014 if (i & 1) {
3015 __put_user(env->fpr[i/2].l.lower, dst);
3016 } else {
3017 __put_user(env->fpr[i/2].l.upper, dst);
3021 __put_user(env->fsr, &(mcp->mc_fpregs.mcfpu_fsr));
3022 __put_user(env->gsr, &(mcp->mc_fpregs.mcfpu_gsr));
3023 __put_user(env->fprs, &(mcp->mc_fpregs.mcfpu_fprs));
3025 if (err)
3026 goto do_sigsegv;
3027 unlock_user_struct(ucp, ucp_addr, 1);
3028 return;
3029 do_sigsegv:
3030 unlock_user_struct(ucp, ucp_addr, 1);
3031 force_sig(TARGET_SIGSEGV);
3033 #endif
3034 #elif defined(TARGET_MIPS) || defined(TARGET_MIPS64)
3036 # if defined(TARGET_ABI_MIPSO32)
3037 struct target_sigcontext {
3038 uint32_t sc_regmask; /* Unused */
3039 uint32_t sc_status;
3040 uint64_t sc_pc;
3041 uint64_t sc_regs[32];
3042 uint64_t sc_fpregs[32];
3043 uint32_t sc_ownedfp; /* Unused */
3044 uint32_t sc_fpc_csr;
3045 uint32_t sc_fpc_eir; /* Unused */
3046 uint32_t sc_used_math;
3047 uint32_t sc_dsp; /* dsp status, was sc_ssflags */
3048 uint32_t pad0;
3049 uint64_t sc_mdhi;
3050 uint64_t sc_mdlo;
3051 target_ulong sc_hi1; /* Was sc_cause */
3052 target_ulong sc_lo1; /* Was sc_badvaddr */
3053 target_ulong sc_hi2; /* Was sc_sigset[4] */
3054 target_ulong sc_lo2;
3055 target_ulong sc_hi3;
3056 target_ulong sc_lo3;
3058 # else /* N32 || N64 */
3059 struct target_sigcontext {
3060 uint64_t sc_regs[32];
3061 uint64_t sc_fpregs[32];
3062 uint64_t sc_mdhi;
3063 uint64_t sc_hi1;
3064 uint64_t sc_hi2;
3065 uint64_t sc_hi3;
3066 uint64_t sc_mdlo;
3067 uint64_t sc_lo1;
3068 uint64_t sc_lo2;
3069 uint64_t sc_lo3;
3070 uint64_t sc_pc;
3071 uint32_t sc_fpc_csr;
3072 uint32_t sc_used_math;
3073 uint32_t sc_dsp;
3074 uint32_t sc_reserved;
3076 # endif /* O32 */
3078 struct sigframe {
3079 uint32_t sf_ass[4]; /* argument save space for o32 */
3080 uint32_t sf_code[2]; /* signal trampoline */
3081 struct target_sigcontext sf_sc;
3082 target_sigset_t sf_mask;
3085 struct target_ucontext {
3086 target_ulong tuc_flags;
3087 target_ulong tuc_link;
3088 target_stack_t tuc_stack;
3089 target_ulong pad0;
3090 struct target_sigcontext tuc_mcontext;
3091 target_sigset_t tuc_sigmask;
3094 struct target_rt_sigframe {
3095 uint32_t rs_ass[4]; /* argument save space for o32 */
3096 uint32_t rs_code[2]; /* signal trampoline */
3097 struct target_siginfo rs_info;
3098 struct target_ucontext rs_uc;
3101 /* Install trampoline to jump back from signal handler */
3102 static inline int install_sigtramp(unsigned int *tramp, unsigned int syscall)
3104 int err = 0;
3107 * Set up the return code ...
3109 * li v0, __NR__foo_sigreturn
3110 * syscall
3113 __put_user(0x24020000 + syscall, tramp + 0);
3114 __put_user(0x0000000c , tramp + 1);
3115 return err;
3118 static inline void setup_sigcontext(CPUMIPSState *regs,
3119 struct target_sigcontext *sc)
3121 int i;
3123 __put_user(exception_resume_pc(regs), &sc->sc_pc);
3124 regs->hflags &= ~MIPS_HFLAG_BMASK;
3126 __put_user(0, &sc->sc_regs[0]);
3127 for (i = 1; i < 32; ++i) {
3128 __put_user(regs->active_tc.gpr[i], &sc->sc_regs[i]);
3131 __put_user(regs->active_tc.HI[0], &sc->sc_mdhi);
3132 __put_user(regs->active_tc.LO[0], &sc->sc_mdlo);
3134 /* Rather than checking for dsp existence, always copy. The storage
3135 would just be garbage otherwise. */
3136 __put_user(regs->active_tc.HI[1], &sc->sc_hi1);
3137 __put_user(regs->active_tc.HI[2], &sc->sc_hi2);
3138 __put_user(regs->active_tc.HI[3], &sc->sc_hi3);
3139 __put_user(regs->active_tc.LO[1], &sc->sc_lo1);
3140 __put_user(regs->active_tc.LO[2], &sc->sc_lo2);
3141 __put_user(regs->active_tc.LO[3], &sc->sc_lo3);
3143 uint32_t dsp = cpu_rddsp(0x3ff, regs);
3144 __put_user(dsp, &sc->sc_dsp);
3147 __put_user(1, &sc->sc_used_math);
3149 for (i = 0; i < 32; ++i) {
3150 __put_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i]);
3154 static inline void
3155 restore_sigcontext(CPUMIPSState *regs, struct target_sigcontext *sc)
3157 int i;
3159 __get_user(regs->CP0_EPC, &sc->sc_pc);
3161 __get_user(regs->active_tc.HI[0], &sc->sc_mdhi);
3162 __get_user(regs->active_tc.LO[0], &sc->sc_mdlo);
3164 for (i = 1; i < 32; ++i) {
3165 __get_user(regs->active_tc.gpr[i], &sc->sc_regs[i]);
3168 __get_user(regs->active_tc.HI[1], &sc->sc_hi1);
3169 __get_user(regs->active_tc.HI[2], &sc->sc_hi2);
3170 __get_user(regs->active_tc.HI[3], &sc->sc_hi3);
3171 __get_user(regs->active_tc.LO[1], &sc->sc_lo1);
3172 __get_user(regs->active_tc.LO[2], &sc->sc_lo2);
3173 __get_user(regs->active_tc.LO[3], &sc->sc_lo3);
3175 uint32_t dsp;
3176 __get_user(dsp, &sc->sc_dsp);
3177 cpu_wrdsp(dsp, 0x3ff, regs);
3180 for (i = 0; i < 32; ++i) {
3181 __get_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i]);
3186 * Determine which stack to use..
3188 static inline abi_ulong
3189 get_sigframe(struct target_sigaction *ka, CPUMIPSState *regs, size_t frame_size)
3191 unsigned long sp;
3193 /* Default to using normal stack */
3194 sp = regs->active_tc.gpr[29];
3197 * FPU emulator may have its own trampoline active just
3198 * above the user stack, 16-bytes before the next lowest
3199 * 16 byte boundary. Try to avoid trashing it.
3201 sp -= 32;
3203 /* This is the X/Open sanctioned signal stack switching. */
3204 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags (sp) == 0)) {
3205 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
3208 return (sp - frame_size) & ~7;
3211 static void mips_set_hflags_isa_mode_from_pc(CPUMIPSState *env)
3213 if (env->insn_flags & (ASE_MIPS16 | ASE_MICROMIPS)) {
3214 env->hflags &= ~MIPS_HFLAG_M16;
3215 env->hflags |= (env->active_tc.PC & 1) << MIPS_HFLAG_M16_SHIFT;
3216 env->active_tc.PC &= ~(target_ulong) 1;
3220 # if defined(TARGET_ABI_MIPSO32)
3221 /* compare linux/arch/mips/kernel/signal.c:setup_frame() */
3222 static void setup_frame(int sig, struct target_sigaction * ka,
3223 target_sigset_t *set, CPUMIPSState *regs)
3225 struct sigframe *frame;
3226 abi_ulong frame_addr;
3227 int i;
3229 frame_addr = get_sigframe(ka, regs, sizeof(*frame));
3230 trace_user_setup_frame(regs, frame_addr);
3231 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3232 goto give_sigsegv;
3235 install_sigtramp(frame->sf_code, TARGET_NR_sigreturn);
3237 setup_sigcontext(regs, &frame->sf_sc);
3239 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
3240 __put_user(set->sig[i], &frame->sf_mask.sig[i]);
3244 * Arguments to signal handler:
3246 * a0 = signal number
3247 * a1 = 0 (should be cause)
3248 * a2 = pointer to struct sigcontext
3250 * $25 and PC point to the signal handler, $29 points to the
3251 * struct sigframe.
3253 regs->active_tc.gpr[ 4] = sig;
3254 regs->active_tc.gpr[ 5] = 0;
3255 regs->active_tc.gpr[ 6] = frame_addr + offsetof(struct sigframe, sf_sc);
3256 regs->active_tc.gpr[29] = frame_addr;
3257 regs->active_tc.gpr[31] = frame_addr + offsetof(struct sigframe, sf_code);
3258 /* The original kernel code sets CP0_EPC to the handler
3259 * since it returns to userland using eret
3260 * we cannot do this here, and we must set PC directly */
3261 regs->active_tc.PC = regs->active_tc.gpr[25] = ka->_sa_handler;
3262 mips_set_hflags_isa_mode_from_pc(regs);
3263 unlock_user_struct(frame, frame_addr, 1);
3264 return;
3266 give_sigsegv:
3267 force_sigsegv(sig);
3270 long do_sigreturn(CPUMIPSState *regs)
3272 struct sigframe *frame;
3273 abi_ulong frame_addr;
3274 sigset_t blocked;
3275 target_sigset_t target_set;
3276 int i;
3278 frame_addr = regs->active_tc.gpr[29];
3279 trace_user_do_sigreturn(regs, frame_addr);
3280 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
3281 goto badframe;
3283 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
3284 __get_user(target_set.sig[i], &frame->sf_mask.sig[i]);
3287 target_to_host_sigset_internal(&blocked, &target_set);
3288 set_sigmask(&blocked);
3290 restore_sigcontext(regs, &frame->sf_sc);
3292 #if 0
3294 * Don't let your children do this ...
3296 __asm__ __volatile__(
3297 "move\t$29, %0\n\t"
3298 "j\tsyscall_exit"
3299 :/* no outputs */
3300 :"r" (&regs));
3301 /* Unreached */
3302 #endif
3304 regs->active_tc.PC = regs->CP0_EPC;
3305 mips_set_hflags_isa_mode_from_pc(regs);
3306 /* I am not sure this is right, but it seems to work
3307 * maybe a problem with nested signals ? */
3308 regs->CP0_EPC = 0;
3309 return -TARGET_QEMU_ESIGRETURN;
3311 badframe:
3312 force_sig(TARGET_SIGSEGV);
3313 return -TARGET_QEMU_ESIGRETURN;
3315 # endif /* O32 */
3317 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3318 target_siginfo_t *info,
3319 target_sigset_t *set, CPUMIPSState *env)
3321 struct target_rt_sigframe *frame;
3322 abi_ulong frame_addr;
3323 int i;
3325 frame_addr = get_sigframe(ka, env, sizeof(*frame));
3326 trace_user_setup_rt_frame(env, frame_addr);
3327 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3328 goto give_sigsegv;
3331 install_sigtramp(frame->rs_code, TARGET_NR_rt_sigreturn);
3333 tswap_siginfo(&frame->rs_info, info);
3335 __put_user(0, &frame->rs_uc.tuc_flags);
3336 __put_user(0, &frame->rs_uc.tuc_link);
3337 __put_user(target_sigaltstack_used.ss_sp, &frame->rs_uc.tuc_stack.ss_sp);
3338 __put_user(target_sigaltstack_used.ss_size, &frame->rs_uc.tuc_stack.ss_size);
3339 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
3340 &frame->rs_uc.tuc_stack.ss_flags);
3342 setup_sigcontext(env, &frame->rs_uc.tuc_mcontext);
3344 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
3345 __put_user(set->sig[i], &frame->rs_uc.tuc_sigmask.sig[i]);
3349 * Arguments to signal handler:
3351 * a0 = signal number
3352 * a1 = pointer to siginfo_t
3353 * a2 = pointer to ucontext_t
3355 * $25 and PC point to the signal handler, $29 points to the
3356 * struct sigframe.
3358 env->active_tc.gpr[ 4] = sig;
3359 env->active_tc.gpr[ 5] = frame_addr
3360 + offsetof(struct target_rt_sigframe, rs_info);
3361 env->active_tc.gpr[ 6] = frame_addr
3362 + offsetof(struct target_rt_sigframe, rs_uc);
3363 env->active_tc.gpr[29] = frame_addr;
3364 env->active_tc.gpr[31] = frame_addr
3365 + offsetof(struct target_rt_sigframe, rs_code);
3366 /* The original kernel code sets CP0_EPC to the handler
3367 * since it returns to userland using eret
3368 * we cannot do this here, and we must set PC directly */
3369 env->active_tc.PC = env->active_tc.gpr[25] = ka->_sa_handler;
3370 mips_set_hflags_isa_mode_from_pc(env);
3371 unlock_user_struct(frame, frame_addr, 1);
3372 return;
3374 give_sigsegv:
3375 unlock_user_struct(frame, frame_addr, 1);
3376 force_sigsegv(sig);
3379 long do_rt_sigreturn(CPUMIPSState *env)
3381 struct target_rt_sigframe *frame;
3382 abi_ulong frame_addr;
3383 sigset_t blocked;
3385 frame_addr = env->active_tc.gpr[29];
3386 trace_user_do_rt_sigreturn(env, frame_addr);
3387 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
3388 goto badframe;
3391 target_to_host_sigset(&blocked, &frame->rs_uc.tuc_sigmask);
3392 set_sigmask(&blocked);
3394 restore_sigcontext(env, &frame->rs_uc.tuc_mcontext);
3396 if (do_sigaltstack(frame_addr +
3397 offsetof(struct target_rt_sigframe, rs_uc.tuc_stack),
3398 0, get_sp_from_cpustate(env)) == -EFAULT)
3399 goto badframe;
3401 env->active_tc.PC = env->CP0_EPC;
3402 mips_set_hflags_isa_mode_from_pc(env);
3403 /* I am not sure this is right, but it seems to work
3404 * maybe a problem with nested signals ? */
3405 env->CP0_EPC = 0;
3406 return -TARGET_QEMU_ESIGRETURN;
3408 badframe:
3409 force_sig(TARGET_SIGSEGV);
3410 return -TARGET_QEMU_ESIGRETURN;
3413 #elif defined(TARGET_SH4)
3416 * code and data structures from linux kernel:
3417 * include/asm-sh/sigcontext.h
3418 * arch/sh/kernel/signal.c
3421 struct target_sigcontext {
3422 target_ulong oldmask;
3424 /* CPU registers */
3425 target_ulong sc_gregs[16];
3426 target_ulong sc_pc;
3427 target_ulong sc_pr;
3428 target_ulong sc_sr;
3429 target_ulong sc_gbr;
3430 target_ulong sc_mach;
3431 target_ulong sc_macl;
3433 /* FPU registers */
3434 target_ulong sc_fpregs[16];
3435 target_ulong sc_xfpregs[16];
3436 unsigned int sc_fpscr;
3437 unsigned int sc_fpul;
3438 unsigned int sc_ownedfp;
3441 struct target_sigframe
3443 struct target_sigcontext sc;
3444 target_ulong extramask[TARGET_NSIG_WORDS-1];
3445 uint16_t retcode[3];
3449 struct target_ucontext {
3450 target_ulong tuc_flags;
3451 struct target_ucontext *tuc_link;
3452 target_stack_t tuc_stack;
3453 struct target_sigcontext tuc_mcontext;
3454 target_sigset_t tuc_sigmask; /* mask last for extensibility */
3457 struct target_rt_sigframe
3459 struct target_siginfo info;
3460 struct target_ucontext uc;
3461 uint16_t retcode[3];
3465 #define MOVW(n) (0x9300|((n)-2)) /* Move mem word at PC+n to R3 */
3466 #define TRAP_NOARG 0xc310 /* Syscall w/no args (NR in R3) SH3/4 */
3468 static abi_ulong get_sigframe(struct target_sigaction *ka,
3469 unsigned long sp, size_t frame_size)
3471 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags(sp) == 0)) {
3472 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
3475 return (sp - frame_size) & -8ul;
3478 /* Notice when we're in the middle of a gUSA region and reset.
3479 Note that this will only occur for !parallel_cpus, as we will
3480 translate such sequences differently in a parallel context. */
3481 static void unwind_gusa(CPUSH4State *regs)
3483 /* If the stack pointer is sufficiently negative, and we haven't
3484 completed the sequence, then reset to the entry to the region. */
3485 /* ??? The SH4 kernel checks for and address above 0xC0000000.
3486 However, the page mappings in qemu linux-user aren't as restricted
3487 and we wind up with the normal stack mapped above 0xF0000000.
3488 That said, there is no reason why the kernel should be allowing
3489 a gUSA region that spans 1GB. Use a tighter check here, for what
3490 can actually be enabled by the immediate move. */
3491 if (regs->gregs[15] >= -128u && regs->pc < regs->gregs[0]) {
3492 /* Reset the PC to before the gUSA region, as computed from
3493 R0 = region end, SP = -(region size), plus one more for the
3494 insn that actually initializes SP to the region size. */
3495 regs->pc = regs->gregs[0] + regs->gregs[15] - 2;
3497 /* Reset the SP to the saved version in R1. */
3498 regs->gregs[15] = regs->gregs[1];
3502 static void setup_sigcontext(struct target_sigcontext *sc,
3503 CPUSH4State *regs, unsigned long mask)
3505 int i;
3507 #define COPY(x) __put_user(regs->x, &sc->sc_##x)
3508 COPY(gregs[0]); COPY(gregs[1]);
3509 COPY(gregs[2]); COPY(gregs[3]);
3510 COPY(gregs[4]); COPY(gregs[5]);
3511 COPY(gregs[6]); COPY(gregs[7]);
3512 COPY(gregs[8]); COPY(gregs[9]);
3513 COPY(gregs[10]); COPY(gregs[11]);
3514 COPY(gregs[12]); COPY(gregs[13]);
3515 COPY(gregs[14]); COPY(gregs[15]);
3516 COPY(gbr); COPY(mach);
3517 COPY(macl); COPY(pr);
3518 COPY(sr); COPY(pc);
3519 #undef COPY
3521 for (i=0; i<16; i++) {
3522 __put_user(regs->fregs[i], &sc->sc_fpregs[i]);
3524 __put_user(regs->fpscr, &sc->sc_fpscr);
3525 __put_user(regs->fpul, &sc->sc_fpul);
3527 /* non-iBCS2 extensions.. */
3528 __put_user(mask, &sc->oldmask);
3531 static void restore_sigcontext(CPUSH4State *regs, struct target_sigcontext *sc)
3533 int i;
3535 #define COPY(x) __get_user(regs->x, &sc->sc_##x)
3536 COPY(gregs[0]); COPY(gregs[1]);
3537 COPY(gregs[2]); COPY(gregs[3]);
3538 COPY(gregs[4]); COPY(gregs[5]);
3539 COPY(gregs[6]); COPY(gregs[7]);
3540 COPY(gregs[8]); COPY(gregs[9]);
3541 COPY(gregs[10]); COPY(gregs[11]);
3542 COPY(gregs[12]); COPY(gregs[13]);
3543 COPY(gregs[14]); COPY(gregs[15]);
3544 COPY(gbr); COPY(mach);
3545 COPY(macl); COPY(pr);
3546 COPY(sr); COPY(pc);
3547 #undef COPY
3549 for (i=0; i<16; i++) {
3550 __get_user(regs->fregs[i], &sc->sc_fpregs[i]);
3552 __get_user(regs->fpscr, &sc->sc_fpscr);
3553 __get_user(regs->fpul, &sc->sc_fpul);
3555 regs->tra = -1; /* disable syscall checks */
3556 regs->flags &= ~(DELAY_SLOT_MASK | GUSA_MASK);
3559 static void setup_frame(int sig, struct target_sigaction *ka,
3560 target_sigset_t *set, CPUSH4State *regs)
3562 struct target_sigframe *frame;
3563 abi_ulong frame_addr;
3564 int i;
3566 unwind_gusa(regs);
3568 frame_addr = get_sigframe(ka, regs->gregs[15], sizeof(*frame));
3569 trace_user_setup_frame(regs, frame_addr);
3570 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3571 goto give_sigsegv;
3574 setup_sigcontext(&frame->sc, regs, set->sig[0]);
3576 for (i = 0; i < TARGET_NSIG_WORDS - 1; i++) {
3577 __put_user(set->sig[i + 1], &frame->extramask[i]);
3580 /* Set up to return from userspace. If provided, use a stub
3581 already in userspace. */
3582 if (ka->sa_flags & TARGET_SA_RESTORER) {
3583 regs->pr = (unsigned long) ka->sa_restorer;
3584 } else {
3585 /* Generate return code (system call to sigreturn) */
3586 abi_ulong retcode_addr = frame_addr +
3587 offsetof(struct target_sigframe, retcode);
3588 __put_user(MOVW(2), &frame->retcode[0]);
3589 __put_user(TRAP_NOARG, &frame->retcode[1]);
3590 __put_user((TARGET_NR_sigreturn), &frame->retcode[2]);
3591 regs->pr = (unsigned long) retcode_addr;
3594 /* Set up registers for signal handler */
3595 regs->gregs[15] = frame_addr;
3596 regs->gregs[4] = sig; /* Arg for signal handler */
3597 regs->gregs[5] = 0;
3598 regs->gregs[6] = frame_addr += offsetof(typeof(*frame), sc);
3599 regs->pc = (unsigned long) ka->_sa_handler;
3600 regs->flags &= ~(DELAY_SLOT_MASK | GUSA_MASK);
3602 unlock_user_struct(frame, frame_addr, 1);
3603 return;
3605 give_sigsegv:
3606 unlock_user_struct(frame, frame_addr, 1);
3607 force_sigsegv(sig);
3610 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3611 target_siginfo_t *info,
3612 target_sigset_t *set, CPUSH4State *regs)
3614 struct target_rt_sigframe *frame;
3615 abi_ulong frame_addr;
3616 int i;
3618 unwind_gusa(regs);
3620 frame_addr = get_sigframe(ka, regs->gregs[15], sizeof(*frame));
3621 trace_user_setup_rt_frame(regs, frame_addr);
3622 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3623 goto give_sigsegv;
3626 tswap_siginfo(&frame->info, info);
3628 /* Create the ucontext. */
3629 __put_user(0, &frame->uc.tuc_flags);
3630 __put_user(0, (unsigned long *)&frame->uc.tuc_link);
3631 __put_user((unsigned long)target_sigaltstack_used.ss_sp,
3632 &frame->uc.tuc_stack.ss_sp);
3633 __put_user(sas_ss_flags(regs->gregs[15]),
3634 &frame->uc.tuc_stack.ss_flags);
3635 __put_user(target_sigaltstack_used.ss_size,
3636 &frame->uc.tuc_stack.ss_size);
3637 setup_sigcontext(&frame->uc.tuc_mcontext,
3638 regs, set->sig[0]);
3639 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
3640 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
3643 /* Set up to return from userspace. If provided, use a stub
3644 already in userspace. */
3645 if (ka->sa_flags & TARGET_SA_RESTORER) {
3646 regs->pr = (unsigned long) ka->sa_restorer;
3647 } else {
3648 /* Generate return code (system call to sigreturn) */
3649 abi_ulong retcode_addr = frame_addr +
3650 offsetof(struct target_rt_sigframe, retcode);
3651 __put_user(MOVW(2), &frame->retcode[0]);
3652 __put_user(TRAP_NOARG, &frame->retcode[1]);
3653 __put_user((TARGET_NR_rt_sigreturn), &frame->retcode[2]);
3654 regs->pr = (unsigned long) retcode_addr;
3657 /* Set up registers for signal handler */
3658 regs->gregs[15] = frame_addr;
3659 regs->gregs[4] = sig; /* Arg for signal handler */
3660 regs->gregs[5] = frame_addr + offsetof(typeof(*frame), info);
3661 regs->gregs[6] = frame_addr + offsetof(typeof(*frame), uc);
3662 regs->pc = (unsigned long) ka->_sa_handler;
3663 regs->flags &= ~(DELAY_SLOT_MASK | GUSA_MASK);
3665 unlock_user_struct(frame, frame_addr, 1);
3666 return;
3668 give_sigsegv:
3669 unlock_user_struct(frame, frame_addr, 1);
3670 force_sigsegv(sig);
3673 long do_sigreturn(CPUSH4State *regs)
3675 struct target_sigframe *frame;
3676 abi_ulong frame_addr;
3677 sigset_t blocked;
3678 target_sigset_t target_set;
3679 int i;
3680 int err = 0;
3682 frame_addr = regs->gregs[15];
3683 trace_user_do_sigreturn(regs, frame_addr);
3684 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
3685 goto badframe;
3688 __get_user(target_set.sig[0], &frame->sc.oldmask);
3689 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
3690 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
3693 if (err)
3694 goto badframe;
3696 target_to_host_sigset_internal(&blocked, &target_set);
3697 set_sigmask(&blocked);
3699 restore_sigcontext(regs, &frame->sc);
3701 unlock_user_struct(frame, frame_addr, 0);
3702 return -TARGET_QEMU_ESIGRETURN;
3704 badframe:
3705 unlock_user_struct(frame, frame_addr, 0);
3706 force_sig(TARGET_SIGSEGV);
3707 return -TARGET_QEMU_ESIGRETURN;
3710 long do_rt_sigreturn(CPUSH4State *regs)
3712 struct target_rt_sigframe *frame;
3713 abi_ulong frame_addr;
3714 sigset_t blocked;
3716 frame_addr = regs->gregs[15];
3717 trace_user_do_rt_sigreturn(regs, frame_addr);
3718 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
3719 goto badframe;
3722 target_to_host_sigset(&blocked, &frame->uc.tuc_sigmask);
3723 set_sigmask(&blocked);
3725 restore_sigcontext(regs, &frame->uc.tuc_mcontext);
3727 if (do_sigaltstack(frame_addr +
3728 offsetof(struct target_rt_sigframe, uc.tuc_stack),
3729 0, get_sp_from_cpustate(regs)) == -EFAULT) {
3730 goto badframe;
3733 unlock_user_struct(frame, frame_addr, 0);
3734 return -TARGET_QEMU_ESIGRETURN;
3736 badframe:
3737 unlock_user_struct(frame, frame_addr, 0);
3738 force_sig(TARGET_SIGSEGV);
3739 return -TARGET_QEMU_ESIGRETURN;
3741 #elif defined(TARGET_MICROBLAZE)
3743 struct target_sigcontext {
3744 struct target_pt_regs regs; /* needs to be first */
3745 uint32_t oldmask;
3748 struct target_stack_t {
3749 abi_ulong ss_sp;
3750 int ss_flags;
3751 unsigned int ss_size;
3754 struct target_ucontext {
3755 abi_ulong tuc_flags;
3756 abi_ulong tuc_link;
3757 struct target_stack_t tuc_stack;
3758 struct target_sigcontext tuc_mcontext;
3759 uint32_t tuc_extramask[TARGET_NSIG_WORDS - 1];
3762 /* Signal frames. */
3763 struct target_signal_frame {
3764 struct target_ucontext uc;
3765 uint32_t extramask[TARGET_NSIG_WORDS - 1];
3766 uint32_t tramp[2];
3769 struct rt_signal_frame {
3770 siginfo_t info;
3771 ucontext_t uc;
3772 uint32_t tramp[2];
3775 static void setup_sigcontext(struct target_sigcontext *sc, CPUMBState *env)
3777 __put_user(env->regs[0], &sc->regs.r0);
3778 __put_user(env->regs[1], &sc->regs.r1);
3779 __put_user(env->regs[2], &sc->regs.r2);
3780 __put_user(env->regs[3], &sc->regs.r3);
3781 __put_user(env->regs[4], &sc->regs.r4);
3782 __put_user(env->regs[5], &sc->regs.r5);
3783 __put_user(env->regs[6], &sc->regs.r6);
3784 __put_user(env->regs[7], &sc->regs.r7);
3785 __put_user(env->regs[8], &sc->regs.r8);
3786 __put_user(env->regs[9], &sc->regs.r9);
3787 __put_user(env->regs[10], &sc->regs.r10);
3788 __put_user(env->regs[11], &sc->regs.r11);
3789 __put_user(env->regs[12], &sc->regs.r12);
3790 __put_user(env->regs[13], &sc->regs.r13);
3791 __put_user(env->regs[14], &sc->regs.r14);
3792 __put_user(env->regs[15], &sc->regs.r15);
3793 __put_user(env->regs[16], &sc->regs.r16);
3794 __put_user(env->regs[17], &sc->regs.r17);
3795 __put_user(env->regs[18], &sc->regs.r18);
3796 __put_user(env->regs[19], &sc->regs.r19);
3797 __put_user(env->regs[20], &sc->regs.r20);
3798 __put_user(env->regs[21], &sc->regs.r21);
3799 __put_user(env->regs[22], &sc->regs.r22);
3800 __put_user(env->regs[23], &sc->regs.r23);
3801 __put_user(env->regs[24], &sc->regs.r24);
3802 __put_user(env->regs[25], &sc->regs.r25);
3803 __put_user(env->regs[26], &sc->regs.r26);
3804 __put_user(env->regs[27], &sc->regs.r27);
3805 __put_user(env->regs[28], &sc->regs.r28);
3806 __put_user(env->regs[29], &sc->regs.r29);
3807 __put_user(env->regs[30], &sc->regs.r30);
3808 __put_user(env->regs[31], &sc->regs.r31);
3809 __put_user(env->sregs[SR_PC], &sc->regs.pc);
3812 static void restore_sigcontext(struct target_sigcontext *sc, CPUMBState *env)
3814 __get_user(env->regs[0], &sc->regs.r0);
3815 __get_user(env->regs[1], &sc->regs.r1);
3816 __get_user(env->regs[2], &sc->regs.r2);
3817 __get_user(env->regs[3], &sc->regs.r3);
3818 __get_user(env->regs[4], &sc->regs.r4);
3819 __get_user(env->regs[5], &sc->regs.r5);
3820 __get_user(env->regs[6], &sc->regs.r6);
3821 __get_user(env->regs[7], &sc->regs.r7);
3822 __get_user(env->regs[8], &sc->regs.r8);
3823 __get_user(env->regs[9], &sc->regs.r9);
3824 __get_user(env->regs[10], &sc->regs.r10);
3825 __get_user(env->regs[11], &sc->regs.r11);
3826 __get_user(env->regs[12], &sc->regs.r12);
3827 __get_user(env->regs[13], &sc->regs.r13);
3828 __get_user(env->regs[14], &sc->regs.r14);
3829 __get_user(env->regs[15], &sc->regs.r15);
3830 __get_user(env->regs[16], &sc->regs.r16);
3831 __get_user(env->regs[17], &sc->regs.r17);
3832 __get_user(env->regs[18], &sc->regs.r18);
3833 __get_user(env->regs[19], &sc->regs.r19);
3834 __get_user(env->regs[20], &sc->regs.r20);
3835 __get_user(env->regs[21], &sc->regs.r21);
3836 __get_user(env->regs[22], &sc->regs.r22);
3837 __get_user(env->regs[23], &sc->regs.r23);
3838 __get_user(env->regs[24], &sc->regs.r24);
3839 __get_user(env->regs[25], &sc->regs.r25);
3840 __get_user(env->regs[26], &sc->regs.r26);
3841 __get_user(env->regs[27], &sc->regs.r27);
3842 __get_user(env->regs[28], &sc->regs.r28);
3843 __get_user(env->regs[29], &sc->regs.r29);
3844 __get_user(env->regs[30], &sc->regs.r30);
3845 __get_user(env->regs[31], &sc->regs.r31);
3846 __get_user(env->sregs[SR_PC], &sc->regs.pc);
3849 static abi_ulong get_sigframe(struct target_sigaction *ka,
3850 CPUMBState *env, int frame_size)
3852 abi_ulong sp = env->regs[1];
3854 if ((ka->sa_flags & TARGET_SA_ONSTACK) != 0 && !on_sig_stack(sp)) {
3855 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
3858 return ((sp - frame_size) & -8UL);
3861 static void setup_frame(int sig, struct target_sigaction *ka,
3862 target_sigset_t *set, CPUMBState *env)
3864 struct target_signal_frame *frame;
3865 abi_ulong frame_addr;
3866 int i;
3868 frame_addr = get_sigframe(ka, env, sizeof *frame);
3869 trace_user_setup_frame(env, frame_addr);
3870 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
3871 goto badframe;
3873 /* Save the mask. */
3874 __put_user(set->sig[0], &frame->uc.tuc_mcontext.oldmask);
3876 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
3877 __put_user(set->sig[i], &frame->extramask[i - 1]);
3880 setup_sigcontext(&frame->uc.tuc_mcontext, env);
3882 /* Set up to return from userspace. If provided, use a stub
3883 already in userspace. */
3884 /* minus 8 is offset to cater for "rtsd r15,8" offset */
3885 if (ka->sa_flags & TARGET_SA_RESTORER) {
3886 env->regs[15] = ((unsigned long)ka->sa_restorer)-8;
3887 } else {
3888 uint32_t t;
3889 /* Note, these encodings are _big endian_! */
3890 /* addi r12, r0, __NR_sigreturn */
3891 t = 0x31800000UL | TARGET_NR_sigreturn;
3892 __put_user(t, frame->tramp + 0);
3893 /* brki r14, 0x8 */
3894 t = 0xb9cc0008UL;
3895 __put_user(t, frame->tramp + 1);
3897 /* Return from sighandler will jump to the tramp.
3898 Negative 8 offset because return is rtsd r15, 8 */
3899 env->regs[15] = frame_addr + offsetof(struct target_signal_frame, tramp)
3900 - 8;
3903 /* Set up registers for signal handler */
3904 env->regs[1] = frame_addr;
3905 /* Signal handler args: */
3906 env->regs[5] = sig; /* Arg 0: signum */
3907 env->regs[6] = 0;
3908 /* arg 1: sigcontext */
3909 env->regs[7] = frame_addr += offsetof(typeof(*frame), uc);
3911 /* Offset of 4 to handle microblaze rtid r14, 0 */
3912 env->sregs[SR_PC] = (unsigned long)ka->_sa_handler;
3914 unlock_user_struct(frame, frame_addr, 1);
3915 return;
3916 badframe:
3917 force_sigsegv(sig);
3920 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3921 target_siginfo_t *info,
3922 target_sigset_t *set, CPUMBState *env)
3924 fprintf(stderr, "Microblaze setup_rt_frame: not implemented\n");
3927 long do_sigreturn(CPUMBState *env)
3929 struct target_signal_frame *frame;
3930 abi_ulong frame_addr;
3931 target_sigset_t target_set;
3932 sigset_t set;
3933 int i;
3935 frame_addr = env->regs[R_SP];
3936 trace_user_do_sigreturn(env, frame_addr);
3937 /* Make sure the guest isn't playing games. */
3938 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1))
3939 goto badframe;
3941 /* Restore blocked signals */
3942 __get_user(target_set.sig[0], &frame->uc.tuc_mcontext.oldmask);
3943 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
3944 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
3946 target_to_host_sigset_internal(&set, &target_set);
3947 set_sigmask(&set);
3949 restore_sigcontext(&frame->uc.tuc_mcontext, env);
3950 /* We got here through a sigreturn syscall, our path back is via an
3951 rtb insn so setup r14 for that. */
3952 env->regs[14] = env->sregs[SR_PC];
3954 unlock_user_struct(frame, frame_addr, 0);
3955 return -TARGET_QEMU_ESIGRETURN;
3956 badframe:
3957 force_sig(TARGET_SIGSEGV);
3958 return -TARGET_QEMU_ESIGRETURN;
3961 long do_rt_sigreturn(CPUMBState *env)
3963 trace_user_do_rt_sigreturn(env, 0);
3964 fprintf(stderr, "Microblaze do_rt_sigreturn: not implemented\n");
3965 return -TARGET_ENOSYS;
3968 #elif defined(TARGET_CRIS)
3970 struct target_sigcontext {
3971 struct target_pt_regs regs; /* needs to be first */
3972 uint32_t oldmask;
3973 uint32_t usp; /* usp before stacking this gunk on it */
3976 /* Signal frames. */
3977 struct target_signal_frame {
3978 struct target_sigcontext sc;
3979 uint32_t extramask[TARGET_NSIG_WORDS - 1];
3980 uint16_t retcode[4]; /* Trampoline code. */
3983 struct rt_signal_frame {
3984 siginfo_t *pinfo;
3985 void *puc;
3986 siginfo_t info;
3987 ucontext_t uc;
3988 uint16_t retcode[4]; /* Trampoline code. */
3991 static void setup_sigcontext(struct target_sigcontext *sc, CPUCRISState *env)
3993 __put_user(env->regs[0], &sc->regs.r0);
3994 __put_user(env->regs[1], &sc->regs.r1);
3995 __put_user(env->regs[2], &sc->regs.r2);
3996 __put_user(env->regs[3], &sc->regs.r3);
3997 __put_user(env->regs[4], &sc->regs.r4);
3998 __put_user(env->regs[5], &sc->regs.r5);
3999 __put_user(env->regs[6], &sc->regs.r6);
4000 __put_user(env->regs[7], &sc->regs.r7);
4001 __put_user(env->regs[8], &sc->regs.r8);
4002 __put_user(env->regs[9], &sc->regs.r9);
4003 __put_user(env->regs[10], &sc->regs.r10);
4004 __put_user(env->regs[11], &sc->regs.r11);
4005 __put_user(env->regs[12], &sc->regs.r12);
4006 __put_user(env->regs[13], &sc->regs.r13);
4007 __put_user(env->regs[14], &sc->usp);
4008 __put_user(env->regs[15], &sc->regs.acr);
4009 __put_user(env->pregs[PR_MOF], &sc->regs.mof);
4010 __put_user(env->pregs[PR_SRP], &sc->regs.srp);
4011 __put_user(env->pc, &sc->regs.erp);
4014 static void restore_sigcontext(struct target_sigcontext *sc, CPUCRISState *env)
4016 __get_user(env->regs[0], &sc->regs.r0);
4017 __get_user(env->regs[1], &sc->regs.r1);
4018 __get_user(env->regs[2], &sc->regs.r2);
4019 __get_user(env->regs[3], &sc->regs.r3);
4020 __get_user(env->regs[4], &sc->regs.r4);
4021 __get_user(env->regs[5], &sc->regs.r5);
4022 __get_user(env->regs[6], &sc->regs.r6);
4023 __get_user(env->regs[7], &sc->regs.r7);
4024 __get_user(env->regs[8], &sc->regs.r8);
4025 __get_user(env->regs[9], &sc->regs.r9);
4026 __get_user(env->regs[10], &sc->regs.r10);
4027 __get_user(env->regs[11], &sc->regs.r11);
4028 __get_user(env->regs[12], &sc->regs.r12);
4029 __get_user(env->regs[13], &sc->regs.r13);
4030 __get_user(env->regs[14], &sc->usp);
4031 __get_user(env->regs[15], &sc->regs.acr);
4032 __get_user(env->pregs[PR_MOF], &sc->regs.mof);
4033 __get_user(env->pregs[PR_SRP], &sc->regs.srp);
4034 __get_user(env->pc, &sc->regs.erp);
4037 static abi_ulong get_sigframe(CPUCRISState *env, int framesize)
4039 abi_ulong sp;
4040 /* Align the stack downwards to 4. */
4041 sp = (env->regs[R_SP] & ~3);
4042 return sp - framesize;
4045 static void setup_frame(int sig, struct target_sigaction *ka,
4046 target_sigset_t *set, CPUCRISState *env)
4048 struct target_signal_frame *frame;
4049 abi_ulong frame_addr;
4050 int i;
4052 frame_addr = get_sigframe(env, sizeof *frame);
4053 trace_user_setup_frame(env, frame_addr);
4054 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
4055 goto badframe;
4058 * The CRIS signal return trampoline. A real linux/CRIS kernel doesn't
4059 * use this trampoline anymore but it sets it up for GDB.
4060 * In QEMU, using the trampoline simplifies things a bit so we use it.
4062 * This is movu.w __NR_sigreturn, r9; break 13;
4064 __put_user(0x9c5f, frame->retcode+0);
4065 __put_user(TARGET_NR_sigreturn,
4066 frame->retcode + 1);
4067 __put_user(0xe93d, frame->retcode + 2);
4069 /* Save the mask. */
4070 __put_user(set->sig[0], &frame->sc.oldmask);
4072 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
4073 __put_user(set->sig[i], &frame->extramask[i - 1]);
4076 setup_sigcontext(&frame->sc, env);
4078 /* Move the stack and setup the arguments for the handler. */
4079 env->regs[R_SP] = frame_addr;
4080 env->regs[10] = sig;
4081 env->pc = (unsigned long) ka->_sa_handler;
4082 /* Link SRP so the guest returns through the trampoline. */
4083 env->pregs[PR_SRP] = frame_addr + offsetof(typeof(*frame), retcode);
4085 unlock_user_struct(frame, frame_addr, 1);
4086 return;
4087 badframe:
4088 force_sigsegv(sig);
4091 static void setup_rt_frame(int sig, struct target_sigaction *ka,
4092 target_siginfo_t *info,
4093 target_sigset_t *set, CPUCRISState *env)
4095 fprintf(stderr, "CRIS setup_rt_frame: not implemented\n");
4098 long do_sigreturn(CPUCRISState *env)
4100 struct target_signal_frame *frame;
4101 abi_ulong frame_addr;
4102 target_sigset_t target_set;
4103 sigset_t set;
4104 int i;
4106 frame_addr = env->regs[R_SP];
4107 trace_user_do_sigreturn(env, frame_addr);
4108 /* Make sure the guest isn't playing games. */
4109 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1)) {
4110 goto badframe;
4113 /* Restore blocked signals */
4114 __get_user(target_set.sig[0], &frame->sc.oldmask);
4115 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
4116 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
4118 target_to_host_sigset_internal(&set, &target_set);
4119 set_sigmask(&set);
4121 restore_sigcontext(&frame->sc, env);
4122 unlock_user_struct(frame, frame_addr, 0);
4123 return -TARGET_QEMU_ESIGRETURN;
4124 badframe:
4125 force_sig(TARGET_SIGSEGV);
4126 return -TARGET_QEMU_ESIGRETURN;
4129 long do_rt_sigreturn(CPUCRISState *env)
4131 trace_user_do_rt_sigreturn(env, 0);
4132 fprintf(stderr, "CRIS do_rt_sigreturn: not implemented\n");
4133 return -TARGET_ENOSYS;
4136 #elif defined(TARGET_NIOS2)
4138 #define MCONTEXT_VERSION 2
4140 struct target_sigcontext {
4141 int version;
4142 unsigned long gregs[32];
4145 struct target_ucontext {
4146 abi_ulong tuc_flags;
4147 abi_ulong tuc_link;
4148 target_stack_t tuc_stack;
4149 struct target_sigcontext tuc_mcontext;
4150 target_sigset_t tuc_sigmask; /* mask last for extensibility */
4153 struct target_rt_sigframe {
4154 struct target_siginfo info;
4155 struct target_ucontext uc;
4158 static unsigned long sigsp(unsigned long sp, struct target_sigaction *ka)
4160 if (unlikely((ka->sa_flags & SA_ONSTACK)) && !sas_ss_flags(sp)) {
4161 #ifdef CONFIG_STACK_GROWSUP
4162 return target_sigaltstack_used.ss_sp;
4163 #else
4164 return target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
4165 #endif
4167 return sp;
4170 static int rt_setup_ucontext(struct target_ucontext *uc, CPUNios2State *env)
4172 unsigned long *gregs = uc->tuc_mcontext.gregs;
4174 __put_user(MCONTEXT_VERSION, &uc->tuc_mcontext.version);
4175 __put_user(env->regs[1], &gregs[0]);
4176 __put_user(env->regs[2], &gregs[1]);
4177 __put_user(env->regs[3], &gregs[2]);
4178 __put_user(env->regs[4], &gregs[3]);
4179 __put_user(env->regs[5], &gregs[4]);
4180 __put_user(env->regs[6], &gregs[5]);
4181 __put_user(env->regs[7], &gregs[6]);
4182 __put_user(env->regs[8], &gregs[7]);
4183 __put_user(env->regs[9], &gregs[8]);
4184 __put_user(env->regs[10], &gregs[9]);
4185 __put_user(env->regs[11], &gregs[10]);
4186 __put_user(env->regs[12], &gregs[11]);
4187 __put_user(env->regs[13], &gregs[12]);
4188 __put_user(env->regs[14], &gregs[13]);
4189 __put_user(env->regs[15], &gregs[14]);
4190 __put_user(env->regs[16], &gregs[15]);
4191 __put_user(env->regs[17], &gregs[16]);
4192 __put_user(env->regs[18], &gregs[17]);
4193 __put_user(env->regs[19], &gregs[18]);
4194 __put_user(env->regs[20], &gregs[19]);
4195 __put_user(env->regs[21], &gregs[20]);
4196 __put_user(env->regs[22], &gregs[21]);
4197 __put_user(env->regs[23], &gregs[22]);
4198 __put_user(env->regs[R_RA], &gregs[23]);
4199 __put_user(env->regs[R_FP], &gregs[24]);
4200 __put_user(env->regs[R_GP], &gregs[25]);
4201 __put_user(env->regs[R_EA], &gregs[27]);
4202 __put_user(env->regs[R_SP], &gregs[28]);
4204 return 0;
4207 static int rt_restore_ucontext(CPUNios2State *env, struct target_ucontext *uc,
4208 int *pr2)
4210 int temp;
4211 abi_ulong off, frame_addr = env->regs[R_SP];
4212 unsigned long *gregs = uc->tuc_mcontext.gregs;
4213 int err;
4215 /* Always make any pending restarted system calls return -EINTR */
4216 /* current->restart_block.fn = do_no_restart_syscall; */
4218 __get_user(temp, &uc->tuc_mcontext.version);
4219 if (temp != MCONTEXT_VERSION) {
4220 return 1;
4223 /* restore passed registers */
4224 __get_user(env->regs[1], &gregs[0]);
4225 __get_user(env->regs[2], &gregs[1]);
4226 __get_user(env->regs[3], &gregs[2]);
4227 __get_user(env->regs[4], &gregs[3]);
4228 __get_user(env->regs[5], &gregs[4]);
4229 __get_user(env->regs[6], &gregs[5]);
4230 __get_user(env->regs[7], &gregs[6]);
4231 __get_user(env->regs[8], &gregs[7]);
4232 __get_user(env->regs[9], &gregs[8]);
4233 __get_user(env->regs[10], &gregs[9]);
4234 __get_user(env->regs[11], &gregs[10]);
4235 __get_user(env->regs[12], &gregs[11]);
4236 __get_user(env->regs[13], &gregs[12]);
4237 __get_user(env->regs[14], &gregs[13]);
4238 __get_user(env->regs[15], &gregs[14]);
4239 __get_user(env->regs[16], &gregs[15]);
4240 __get_user(env->regs[17], &gregs[16]);
4241 __get_user(env->regs[18], &gregs[17]);
4242 __get_user(env->regs[19], &gregs[18]);
4243 __get_user(env->regs[20], &gregs[19]);
4244 __get_user(env->regs[21], &gregs[20]);
4245 __get_user(env->regs[22], &gregs[21]);
4246 __get_user(env->regs[23], &gregs[22]);
4247 /* gregs[23] is handled below */
4248 /* Verify, should this be settable */
4249 __get_user(env->regs[R_FP], &gregs[24]);
4250 /* Verify, should this be settable */
4251 __get_user(env->regs[R_GP], &gregs[25]);
4252 /* Not really necessary no user settable bits */
4253 __get_user(temp, &gregs[26]);
4254 __get_user(env->regs[R_EA], &gregs[27]);
4256 __get_user(env->regs[R_RA], &gregs[23]);
4257 __get_user(env->regs[R_SP], &gregs[28]);
4259 off = offsetof(struct target_rt_sigframe, uc.tuc_stack);
4260 err = do_sigaltstack(frame_addr + off, 0, get_sp_from_cpustate(env));
4261 if (err == -EFAULT) {
4262 return 1;
4265 *pr2 = env->regs[2];
4266 return 0;
4269 static void *get_sigframe(struct target_sigaction *ka, CPUNios2State *env,
4270 size_t frame_size)
4272 unsigned long usp;
4274 /* Default to using normal stack. */
4275 usp = env->regs[R_SP];
4277 /* This is the X/Open sanctioned signal stack switching. */
4278 usp = sigsp(usp, ka);
4280 /* Verify, is it 32 or 64 bit aligned */
4281 return (void *)((usp - frame_size) & -8UL);
4284 static void setup_rt_frame(int sig, struct target_sigaction *ka,
4285 target_siginfo_t *info,
4286 target_sigset_t *set,
4287 CPUNios2State *env)
4289 struct target_rt_sigframe *frame;
4290 int i, err = 0;
4292 frame = get_sigframe(ka, env, sizeof(*frame));
4294 if (ka->sa_flags & SA_SIGINFO) {
4295 tswap_siginfo(&frame->info, info);
4298 /* Create the ucontext. */
4299 __put_user(0, &frame->uc.tuc_flags);
4300 __put_user(0, &frame->uc.tuc_link);
4301 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
4302 __put_user(sas_ss_flags(env->regs[R_SP]), &frame->uc.tuc_stack.ss_flags);
4303 __put_user(target_sigaltstack_used.ss_size, &frame->uc.tuc_stack.ss_size);
4304 err |= rt_setup_ucontext(&frame->uc, env);
4305 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
4306 __put_user((abi_ulong)set->sig[i],
4307 (abi_ulong *)&frame->uc.tuc_sigmask.sig[i]);
4310 if (err) {
4311 goto give_sigsegv;
4314 /* Set up to return from userspace; jump to fixed address sigreturn
4315 trampoline on kuser page. */
4316 env->regs[R_RA] = (unsigned long) (0x1044);
4318 /* Set up registers for signal handler */
4319 env->regs[R_SP] = (unsigned long) frame;
4320 env->regs[4] = (unsigned long) sig;
4321 env->regs[5] = (unsigned long) &frame->info;
4322 env->regs[6] = (unsigned long) &frame->uc;
4323 env->regs[R_EA] = (unsigned long) ka->_sa_handler;
4324 return;
4326 give_sigsegv:
4327 if (sig == TARGET_SIGSEGV) {
4328 ka->_sa_handler = TARGET_SIG_DFL;
4330 force_sigsegv(sig);
4331 return;
4334 long do_sigreturn(CPUNios2State *env)
4336 trace_user_do_sigreturn(env, 0);
4337 fprintf(stderr, "do_sigreturn: not implemented\n");
4338 return -TARGET_ENOSYS;
4341 long do_rt_sigreturn(CPUNios2State *env)
4343 /* Verify, can we follow the stack back */
4344 abi_ulong frame_addr = env->regs[R_SP];
4345 struct target_rt_sigframe *frame;
4346 sigset_t set;
4347 int rval;
4349 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
4350 goto badframe;
4353 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
4354 do_sigprocmask(SIG_SETMASK, &set, NULL);
4356 if (rt_restore_ucontext(env, &frame->uc, &rval)) {
4357 goto badframe;
4360 unlock_user_struct(frame, frame_addr, 0);
4361 return rval;
4363 badframe:
4364 unlock_user_struct(frame, frame_addr, 0);
4365 force_sig(TARGET_SIGSEGV);
4366 return 0;
4368 /* TARGET_NIOS2 */
4370 #elif defined(TARGET_OPENRISC)
4372 struct target_sigcontext {
4373 struct target_pt_regs regs;
4374 abi_ulong oldmask;
4375 abi_ulong usp;
4378 struct target_ucontext {
4379 abi_ulong tuc_flags;
4380 abi_ulong tuc_link;
4381 target_stack_t tuc_stack;
4382 struct target_sigcontext tuc_mcontext;
4383 target_sigset_t tuc_sigmask; /* mask last for extensibility */
4386 struct target_rt_sigframe {
4387 abi_ulong pinfo;
4388 uint64_t puc;
4389 struct target_siginfo info;
4390 struct target_sigcontext sc;
4391 struct target_ucontext uc;
4392 unsigned char retcode[16]; /* trampoline code */
4395 /* This is the asm-generic/ucontext.h version */
4396 #if 0
4397 static int restore_sigcontext(CPUOpenRISCState *regs,
4398 struct target_sigcontext *sc)
4400 unsigned int err = 0;
4401 unsigned long old_usp;
4403 /* Alwys make any pending restarted system call return -EINTR */
4404 current_thread_info()->restart_block.fn = do_no_restart_syscall;
4406 /* restore the regs from &sc->regs (same as sc, since regs is first)
4407 * (sc is already checked for VERIFY_READ since the sigframe was
4408 * checked in sys_sigreturn previously)
4411 if (copy_from_user(regs, &sc, sizeof(struct target_pt_regs))) {
4412 goto badframe;
4415 /* make sure the U-flag is set so user-mode cannot fool us */
4417 regs->sr &= ~SR_SM;
4419 /* restore the old USP as it was before we stacked the sc etc.
4420 * (we cannot just pop the sigcontext since we aligned the sp and
4421 * stuff after pushing it)
4424 __get_user(old_usp, &sc->usp);
4425 phx_signal("old_usp 0x%lx", old_usp);
4427 __PHX__ REALLY /* ??? */
4428 wrusp(old_usp);
4429 regs->gpr[1] = old_usp;
4431 /* TODO: the other ports use regs->orig_XX to disable syscall checks
4432 * after this completes, but we don't use that mechanism. maybe we can
4433 * use it now ?
4436 return err;
4438 badframe:
4439 return 1;
4441 #endif
4443 /* Set up a signal frame. */
4445 static void setup_sigcontext(struct target_sigcontext *sc,
4446 CPUOpenRISCState *regs,
4447 unsigned long mask)
4449 unsigned long usp = cpu_get_gpr(regs, 1);
4451 /* copy the regs. they are first in sc so we can use sc directly */
4453 /*copy_to_user(&sc, regs, sizeof(struct target_pt_regs));*/
4455 /* Set the frametype to CRIS_FRAME_NORMAL for the execution of
4456 the signal handler. The frametype will be restored to its previous
4457 value in restore_sigcontext. */
4458 /*regs->frametype = CRIS_FRAME_NORMAL;*/
4460 /* then some other stuff */
4461 __put_user(mask, &sc->oldmask);
4462 __put_user(usp, &sc->usp);
4465 static inline unsigned long align_sigframe(unsigned long sp)
4467 return sp & ~3UL;
4470 static inline abi_ulong get_sigframe(struct target_sigaction *ka,
4471 CPUOpenRISCState *regs,
4472 size_t frame_size)
4474 unsigned long sp = cpu_get_gpr(regs, 1);
4475 int onsigstack = on_sig_stack(sp);
4477 /* redzone */
4478 /* This is the X/Open sanctioned signal stack switching. */
4479 if ((ka->sa_flags & TARGET_SA_ONSTACK) != 0 && !onsigstack) {
4480 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
4483 sp = align_sigframe(sp - frame_size);
4486 * If we are on the alternate signal stack and would overflow it, don't.
4487 * Return an always-bogus address instead so we will die with SIGSEGV.
4490 if (onsigstack && !likely(on_sig_stack(sp))) {
4491 return -1L;
4494 return sp;
4497 static void setup_rt_frame(int sig, struct target_sigaction *ka,
4498 target_siginfo_t *info,
4499 target_sigset_t *set, CPUOpenRISCState *env)
4501 int err = 0;
4502 abi_ulong frame_addr;
4503 unsigned long return_ip;
4504 struct target_rt_sigframe *frame;
4505 abi_ulong info_addr, uc_addr;
4507 frame_addr = get_sigframe(ka, env, sizeof(*frame));
4508 trace_user_setup_rt_frame(env, frame_addr);
4509 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
4510 goto give_sigsegv;
4513 info_addr = frame_addr + offsetof(struct target_rt_sigframe, info);
4514 __put_user(info_addr, &frame->pinfo);
4515 uc_addr = frame_addr + offsetof(struct target_rt_sigframe, uc);
4516 __put_user(uc_addr, &frame->puc);
4518 if (ka->sa_flags & SA_SIGINFO) {
4519 tswap_siginfo(&frame->info, info);
4522 /*err |= __clear_user(&frame->uc, offsetof(ucontext_t, uc_mcontext));*/
4523 __put_user(0, &frame->uc.tuc_flags);
4524 __put_user(0, &frame->uc.tuc_link);
4525 __put_user(target_sigaltstack_used.ss_sp,
4526 &frame->uc.tuc_stack.ss_sp);
4527 __put_user(sas_ss_flags(cpu_get_gpr(env, 1)),
4528 &frame->uc.tuc_stack.ss_flags);
4529 __put_user(target_sigaltstack_used.ss_size,
4530 &frame->uc.tuc_stack.ss_size);
4531 setup_sigcontext(&frame->sc, env, set->sig[0]);
4533 /*err |= copy_to_user(frame->uc.tuc_sigmask, set, sizeof(*set));*/
4535 /* trampoline - the desired return ip is the retcode itself */
4536 return_ip = (unsigned long)&frame->retcode;
4537 /* This is l.ori r11,r0,__NR_sigreturn, l.sys 1 */
4538 __put_user(0xa960, (short *)(frame->retcode + 0));
4539 __put_user(TARGET_NR_rt_sigreturn, (short *)(frame->retcode + 2));
4540 __put_user(0x20000001, (unsigned long *)(frame->retcode + 4));
4541 __put_user(0x15000000, (unsigned long *)(frame->retcode + 8));
4543 if (err) {
4544 goto give_sigsegv;
4547 /* TODO what is the current->exec_domain stuff and invmap ? */
4549 /* Set up registers for signal handler */
4550 env->pc = (unsigned long)ka->_sa_handler; /* what we enter NOW */
4551 cpu_set_gpr(env, 9, (unsigned long)return_ip); /* what we enter LATER */
4552 cpu_set_gpr(env, 3, (unsigned long)sig); /* arg 1: signo */
4553 cpu_set_gpr(env, 4, (unsigned long)&frame->info); /* arg 2: (siginfo_t*) */
4554 cpu_set_gpr(env, 5, (unsigned long)&frame->uc); /* arg 3: ucontext */
4556 /* actually move the usp to reflect the stacked frame */
4557 cpu_set_gpr(env, 1, (unsigned long)frame);
4559 return;
4561 give_sigsegv:
4562 unlock_user_struct(frame, frame_addr, 1);
4563 force_sigsegv(sig);
4566 long do_sigreturn(CPUOpenRISCState *env)
4568 trace_user_do_sigreturn(env, 0);
4569 fprintf(stderr, "do_sigreturn: not implemented\n");
4570 return -TARGET_ENOSYS;
4573 long do_rt_sigreturn(CPUOpenRISCState *env)
4575 trace_user_do_rt_sigreturn(env, 0);
4576 fprintf(stderr, "do_rt_sigreturn: not implemented\n");
4577 return -TARGET_ENOSYS;
4579 /* TARGET_OPENRISC */
4581 #elif defined(TARGET_S390X)
4583 #define __NUM_GPRS 16
4584 #define __NUM_FPRS 16
4585 #define __NUM_ACRS 16
4587 #define S390_SYSCALL_SIZE 2
4588 #define __SIGNAL_FRAMESIZE 160 /* FIXME: 31-bit mode -> 96 */
4590 #define _SIGCONTEXT_NSIG 64
4591 #define _SIGCONTEXT_NSIG_BPW 64 /* FIXME: 31-bit mode -> 32 */
4592 #define _SIGCONTEXT_NSIG_WORDS (_SIGCONTEXT_NSIG / _SIGCONTEXT_NSIG_BPW)
4593 #define _SIGMASK_COPY_SIZE (sizeof(unsigned long)*_SIGCONTEXT_NSIG_WORDS)
4594 #define PSW_ADDR_AMODE 0x0000000000000000UL /* 0x80000000UL for 31-bit */
4595 #define S390_SYSCALL_OPCODE ((uint16_t)0x0a00)
4597 typedef struct {
4598 target_psw_t psw;
4599 target_ulong gprs[__NUM_GPRS];
4600 unsigned int acrs[__NUM_ACRS];
4601 } target_s390_regs_common;
4603 typedef struct {
4604 unsigned int fpc;
4605 double fprs[__NUM_FPRS];
4606 } target_s390_fp_regs;
4608 typedef struct {
4609 target_s390_regs_common regs;
4610 target_s390_fp_regs fpregs;
4611 } target_sigregs;
4613 struct target_sigcontext {
4614 target_ulong oldmask[_SIGCONTEXT_NSIG_WORDS];
4615 target_sigregs *sregs;
4618 typedef struct {
4619 uint8_t callee_used_stack[__SIGNAL_FRAMESIZE];
4620 struct target_sigcontext sc;
4621 target_sigregs sregs;
4622 int signo;
4623 uint8_t retcode[S390_SYSCALL_SIZE];
4624 } sigframe;
4626 struct target_ucontext {
4627 target_ulong tuc_flags;
4628 struct target_ucontext *tuc_link;
4629 target_stack_t tuc_stack;
4630 target_sigregs tuc_mcontext;
4631 target_sigset_t tuc_sigmask; /* mask last for extensibility */
4634 typedef struct {
4635 uint8_t callee_used_stack[__SIGNAL_FRAMESIZE];
4636 uint8_t retcode[S390_SYSCALL_SIZE];
4637 struct target_siginfo info;
4638 struct target_ucontext uc;
4639 } rt_sigframe;
4641 static inline abi_ulong
4642 get_sigframe(struct target_sigaction *ka, CPUS390XState *env, size_t frame_size)
4644 abi_ulong sp;
4646 /* Default to using normal stack */
4647 sp = env->regs[15];
4649 /* This is the X/Open sanctioned signal stack switching. */
4650 if (ka->sa_flags & TARGET_SA_ONSTACK) {
4651 if (!sas_ss_flags(sp)) {
4652 sp = target_sigaltstack_used.ss_sp +
4653 target_sigaltstack_used.ss_size;
4657 /* This is the legacy signal stack switching. */
4658 else if (/* FIXME !user_mode(regs) */ 0 &&
4659 !(ka->sa_flags & TARGET_SA_RESTORER) &&
4660 ka->sa_restorer) {
4661 sp = (abi_ulong) ka->sa_restorer;
4664 return (sp - frame_size) & -8ul;
4667 static void save_sigregs(CPUS390XState *env, target_sigregs *sregs)
4669 int i;
4670 //save_access_regs(current->thread.acrs); FIXME
4672 /* Copy a 'clean' PSW mask to the user to avoid leaking
4673 information about whether PER is currently on. */
4674 __put_user(env->psw.mask, &sregs->regs.psw.mask);
4675 __put_user(env->psw.addr, &sregs->regs.psw.addr);
4676 for (i = 0; i < 16; i++) {
4677 __put_user(env->regs[i], &sregs->regs.gprs[i]);
4679 for (i = 0; i < 16; i++) {
4680 __put_user(env->aregs[i], &sregs->regs.acrs[i]);
4683 * We have to store the fp registers to current->thread.fp_regs
4684 * to merge them with the emulated registers.
4686 //save_fp_regs(&current->thread.fp_regs); FIXME
4687 for (i = 0; i < 16; i++) {
4688 __put_user(get_freg(env, i)->ll, &sregs->fpregs.fprs[i]);
4692 static void setup_frame(int sig, struct target_sigaction *ka,
4693 target_sigset_t *set, CPUS390XState *env)
4695 sigframe *frame;
4696 abi_ulong frame_addr;
4698 frame_addr = get_sigframe(ka, env, sizeof(*frame));
4699 trace_user_setup_frame(env, frame_addr);
4700 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
4701 goto give_sigsegv;
4704 __put_user(set->sig[0], &frame->sc.oldmask[0]);
4706 save_sigregs(env, &frame->sregs);
4708 __put_user((abi_ulong)(unsigned long)&frame->sregs,
4709 (abi_ulong *)&frame->sc.sregs);
4711 /* Set up to return from userspace. If provided, use a stub
4712 already in userspace. */
4713 if (ka->sa_flags & TARGET_SA_RESTORER) {
4714 env->regs[14] = (unsigned long)
4715 ka->sa_restorer | PSW_ADDR_AMODE;
4716 } else {
4717 env->regs[14] = (frame_addr + offsetof(sigframe, retcode))
4718 | PSW_ADDR_AMODE;
4719 __put_user(S390_SYSCALL_OPCODE | TARGET_NR_sigreturn,
4720 (uint16_t *)(frame->retcode));
4723 /* Set up backchain. */
4724 __put_user(env->regs[15], (abi_ulong *) frame);
4726 /* Set up registers for signal handler */
4727 env->regs[15] = frame_addr;
4728 env->psw.addr = (target_ulong) ka->_sa_handler | PSW_ADDR_AMODE;
4730 env->regs[2] = sig; //map_signal(sig);
4731 env->regs[3] = frame_addr += offsetof(typeof(*frame), sc);
4733 /* We forgot to include these in the sigcontext.
4734 To avoid breaking binary compatibility, they are passed as args. */
4735 env->regs[4] = 0; // FIXME: no clue... current->thread.trap_no;
4736 env->regs[5] = 0; // FIXME: no clue... current->thread.prot_addr;
4738 /* Place signal number on stack to allow backtrace from handler. */
4739 __put_user(env->regs[2], &frame->signo);
4740 unlock_user_struct(frame, frame_addr, 1);
4741 return;
4743 give_sigsegv:
4744 force_sigsegv(sig);
4747 static void setup_rt_frame(int sig, struct target_sigaction *ka,
4748 target_siginfo_t *info,
4749 target_sigset_t *set, CPUS390XState *env)
4751 int i;
4752 rt_sigframe *frame;
4753 abi_ulong frame_addr;
4755 frame_addr = get_sigframe(ka, env, sizeof *frame);
4756 trace_user_setup_rt_frame(env, frame_addr);
4757 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
4758 goto give_sigsegv;
4761 tswap_siginfo(&frame->info, info);
4763 /* Create the ucontext. */
4764 __put_user(0, &frame->uc.tuc_flags);
4765 __put_user((abi_ulong)0, (abi_ulong *)&frame->uc.tuc_link);
4766 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
4767 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
4768 &frame->uc.tuc_stack.ss_flags);
4769 __put_user(target_sigaltstack_used.ss_size, &frame->uc.tuc_stack.ss_size);
4770 save_sigregs(env, &frame->uc.tuc_mcontext);
4771 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
4772 __put_user((abi_ulong)set->sig[i],
4773 (abi_ulong *)&frame->uc.tuc_sigmask.sig[i]);
4776 /* Set up to return from userspace. If provided, use a stub
4777 already in userspace. */
4778 if (ka->sa_flags & TARGET_SA_RESTORER) {
4779 env->regs[14] = (unsigned long) ka->sa_restorer | PSW_ADDR_AMODE;
4780 } else {
4781 env->regs[14] = (unsigned long) frame->retcode | PSW_ADDR_AMODE;
4782 __put_user(S390_SYSCALL_OPCODE | TARGET_NR_rt_sigreturn,
4783 (uint16_t *)(frame->retcode));
4786 /* Set up backchain. */
4787 __put_user(env->regs[15], (abi_ulong *) frame);
4789 /* Set up registers for signal handler */
4790 env->regs[15] = frame_addr;
4791 env->psw.addr = (target_ulong) ka->_sa_handler | PSW_ADDR_AMODE;
4793 env->regs[2] = sig; //map_signal(sig);
4794 env->regs[3] = frame_addr + offsetof(typeof(*frame), info);
4795 env->regs[4] = frame_addr + offsetof(typeof(*frame), uc);
4796 return;
4798 give_sigsegv:
4799 force_sigsegv(sig);
4802 static int
4803 restore_sigregs(CPUS390XState *env, target_sigregs *sc)
4805 int err = 0;
4806 int i;
4808 for (i = 0; i < 16; i++) {
4809 __get_user(env->regs[i], &sc->regs.gprs[i]);
4812 __get_user(env->psw.mask, &sc->regs.psw.mask);
4813 trace_user_s390x_restore_sigregs(env, (unsigned long long)sc->regs.psw.addr,
4814 (unsigned long long)env->psw.addr);
4815 __get_user(env->psw.addr, &sc->regs.psw.addr);
4816 /* FIXME: 31-bit -> | PSW_ADDR_AMODE */
4818 for (i = 0; i < 16; i++) {
4819 __get_user(env->aregs[i], &sc->regs.acrs[i]);
4821 for (i = 0; i < 16; i++) {
4822 __get_user(get_freg(env, i)->ll, &sc->fpregs.fprs[i]);
4825 return err;
4828 long do_sigreturn(CPUS390XState *env)
4830 sigframe *frame;
4831 abi_ulong frame_addr = env->regs[15];
4832 target_sigset_t target_set;
4833 sigset_t set;
4835 trace_user_do_sigreturn(env, frame_addr);
4836 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
4837 goto badframe;
4839 __get_user(target_set.sig[0], &frame->sc.oldmask[0]);
4841 target_to_host_sigset_internal(&set, &target_set);
4842 set_sigmask(&set); /* ~_BLOCKABLE? */
4844 if (restore_sigregs(env, &frame->sregs)) {
4845 goto badframe;
4848 unlock_user_struct(frame, frame_addr, 0);
4849 return -TARGET_QEMU_ESIGRETURN;
4851 badframe:
4852 force_sig(TARGET_SIGSEGV);
4853 return -TARGET_QEMU_ESIGRETURN;
4856 long do_rt_sigreturn(CPUS390XState *env)
4858 rt_sigframe *frame;
4859 abi_ulong frame_addr = env->regs[15];
4860 sigset_t set;
4862 trace_user_do_rt_sigreturn(env, frame_addr);
4863 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
4864 goto badframe;
4866 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
4868 set_sigmask(&set); /* ~_BLOCKABLE? */
4870 if (restore_sigregs(env, &frame->uc.tuc_mcontext)) {
4871 goto badframe;
4874 if (do_sigaltstack(frame_addr + offsetof(rt_sigframe, uc.tuc_stack), 0,
4875 get_sp_from_cpustate(env)) == -EFAULT) {
4876 goto badframe;
4878 unlock_user_struct(frame, frame_addr, 0);
4879 return -TARGET_QEMU_ESIGRETURN;
4881 badframe:
4882 unlock_user_struct(frame, frame_addr, 0);
4883 force_sig(TARGET_SIGSEGV);
4884 return -TARGET_QEMU_ESIGRETURN;
4887 #elif defined(TARGET_PPC)
4889 /* Size of dummy stack frame allocated when calling signal handler.
4890 See arch/powerpc/include/asm/ptrace.h. */
4891 #if defined(TARGET_PPC64)
4892 #define SIGNAL_FRAMESIZE 128
4893 #else
4894 #define SIGNAL_FRAMESIZE 64
4895 #endif
4897 /* See arch/powerpc/include/asm/ucontext.h. Only used for 32-bit PPC;
4898 on 64-bit PPC, sigcontext and mcontext are one and the same. */
4899 struct target_mcontext {
4900 target_ulong mc_gregs[48];
4901 /* Includes fpscr. */
4902 uint64_t mc_fregs[33];
4903 #if defined(TARGET_PPC64)
4904 /* Pointer to the vector regs */
4905 target_ulong v_regs;
4906 #else
4907 target_ulong mc_pad[2];
4908 #endif
4909 /* We need to handle Altivec and SPE at the same time, which no
4910 kernel needs to do. Fortunately, the kernel defines this bit to
4911 be Altivec-register-large all the time, rather than trying to
4912 twiddle it based on the specific platform. */
4913 union {
4914 /* SPE vector registers. One extra for SPEFSCR. */
4915 uint32_t spe[33];
4916 /* Altivec vector registers. The packing of VSCR and VRSAVE
4917 varies depending on whether we're PPC64 or not: PPC64 splits
4918 them apart; PPC32 stuffs them together.
4919 We also need to account for the VSX registers on PPC64
4921 #if defined(TARGET_PPC64)
4922 #define QEMU_NVRREG (34 + 16)
4923 /* On ppc64, this mcontext structure is naturally *unaligned*,
4924 * or rather it is aligned on a 8 bytes boundary but not on
4925 * a 16 bytes one. This pad fixes it up. This is also why the
4926 * vector regs are referenced by the v_regs pointer above so
4927 * any amount of padding can be added here
4929 target_ulong pad;
4930 #else
4931 /* On ppc32, we are already aligned to 16 bytes */
4932 #define QEMU_NVRREG 33
4933 #endif
4934 /* We cannot use ppc_avr_t here as we do *not* want the implied
4935 * 16-bytes alignment that would result from it. This would have
4936 * the effect of making the whole struct target_mcontext aligned
4937 * which breaks the layout of struct target_ucontext on ppc64.
4939 uint64_t altivec[QEMU_NVRREG][2];
4940 #undef QEMU_NVRREG
4941 } mc_vregs;
4944 /* See arch/powerpc/include/asm/sigcontext.h. */
4945 struct target_sigcontext {
4946 target_ulong _unused[4];
4947 int32_t signal;
4948 #if defined(TARGET_PPC64)
4949 int32_t pad0;
4950 #endif
4951 target_ulong handler;
4952 target_ulong oldmask;
4953 target_ulong regs; /* struct pt_regs __user * */
4954 #if defined(TARGET_PPC64)
4955 struct target_mcontext mcontext;
4956 #endif
4959 /* Indices for target_mcontext.mc_gregs, below.
4960 See arch/powerpc/include/asm/ptrace.h for details. */
4961 enum {
4962 TARGET_PT_R0 = 0,
4963 TARGET_PT_R1 = 1,
4964 TARGET_PT_R2 = 2,
4965 TARGET_PT_R3 = 3,
4966 TARGET_PT_R4 = 4,
4967 TARGET_PT_R5 = 5,
4968 TARGET_PT_R6 = 6,
4969 TARGET_PT_R7 = 7,
4970 TARGET_PT_R8 = 8,
4971 TARGET_PT_R9 = 9,
4972 TARGET_PT_R10 = 10,
4973 TARGET_PT_R11 = 11,
4974 TARGET_PT_R12 = 12,
4975 TARGET_PT_R13 = 13,
4976 TARGET_PT_R14 = 14,
4977 TARGET_PT_R15 = 15,
4978 TARGET_PT_R16 = 16,
4979 TARGET_PT_R17 = 17,
4980 TARGET_PT_R18 = 18,
4981 TARGET_PT_R19 = 19,
4982 TARGET_PT_R20 = 20,
4983 TARGET_PT_R21 = 21,
4984 TARGET_PT_R22 = 22,
4985 TARGET_PT_R23 = 23,
4986 TARGET_PT_R24 = 24,
4987 TARGET_PT_R25 = 25,
4988 TARGET_PT_R26 = 26,
4989 TARGET_PT_R27 = 27,
4990 TARGET_PT_R28 = 28,
4991 TARGET_PT_R29 = 29,
4992 TARGET_PT_R30 = 30,
4993 TARGET_PT_R31 = 31,
4994 TARGET_PT_NIP = 32,
4995 TARGET_PT_MSR = 33,
4996 TARGET_PT_ORIG_R3 = 34,
4997 TARGET_PT_CTR = 35,
4998 TARGET_PT_LNK = 36,
4999 TARGET_PT_XER = 37,
5000 TARGET_PT_CCR = 38,
5001 /* Yes, there are two registers with #39. One is 64-bit only. */
5002 TARGET_PT_MQ = 39,
5003 TARGET_PT_SOFTE = 39,
5004 TARGET_PT_TRAP = 40,
5005 TARGET_PT_DAR = 41,
5006 TARGET_PT_DSISR = 42,
5007 TARGET_PT_RESULT = 43,
5008 TARGET_PT_REGS_COUNT = 44
5012 struct target_ucontext {
5013 target_ulong tuc_flags;
5014 target_ulong tuc_link; /* ucontext_t __user * */
5015 struct target_sigaltstack tuc_stack;
5016 #if !defined(TARGET_PPC64)
5017 int32_t tuc_pad[7];
5018 target_ulong tuc_regs; /* struct mcontext __user *
5019 points to uc_mcontext field */
5020 #endif
5021 target_sigset_t tuc_sigmask;
5022 #if defined(TARGET_PPC64)
5023 target_sigset_t unused[15]; /* Allow for uc_sigmask growth */
5024 struct target_sigcontext tuc_sigcontext;
5025 #else
5026 int32_t tuc_maskext[30];
5027 int32_t tuc_pad2[3];
5028 struct target_mcontext tuc_mcontext;
5029 #endif
5032 /* See arch/powerpc/kernel/signal_32.c. */
5033 struct target_sigframe {
5034 struct target_sigcontext sctx;
5035 struct target_mcontext mctx;
5036 int32_t abigap[56];
5039 #if defined(TARGET_PPC64)
5041 #define TARGET_TRAMP_SIZE 6
5043 struct target_rt_sigframe {
5044 /* sys_rt_sigreturn requires the ucontext be the first field */
5045 struct target_ucontext uc;
5046 target_ulong _unused[2];
5047 uint32_t trampoline[TARGET_TRAMP_SIZE];
5048 target_ulong pinfo; /* struct siginfo __user * */
5049 target_ulong puc; /* void __user * */
5050 struct target_siginfo info;
5051 /* 64 bit ABI allows for 288 bytes below sp before decrementing it. */
5052 char abigap[288];
5053 } __attribute__((aligned(16)));
5055 #else
5057 struct target_rt_sigframe {
5058 struct target_siginfo info;
5059 struct target_ucontext uc;
5060 int32_t abigap[56];
5063 #endif
5065 #if defined(TARGET_PPC64)
5067 struct target_func_ptr {
5068 target_ulong entry;
5069 target_ulong toc;
5072 #endif
5074 /* We use the mc_pad field for the signal return trampoline. */
5075 #define tramp mc_pad
5077 /* See arch/powerpc/kernel/signal.c. */
5078 static target_ulong get_sigframe(struct target_sigaction *ka,
5079 CPUPPCState *env,
5080 int frame_size)
5082 target_ulong oldsp;
5084 oldsp = env->gpr[1];
5086 if ((ka->sa_flags & TARGET_SA_ONSTACK) &&
5087 (sas_ss_flags(oldsp) == 0)) {
5088 oldsp = (target_sigaltstack_used.ss_sp
5089 + target_sigaltstack_used.ss_size);
5092 return (oldsp - frame_size) & ~0xFUL;
5095 #if ((defined(TARGET_WORDS_BIGENDIAN) && defined(HOST_WORDS_BIGENDIAN)) || \
5096 (!defined(HOST_WORDS_BIGENDIAN) && !defined(TARGET_WORDS_BIGENDIAN)))
5097 #define PPC_VEC_HI 0
5098 #define PPC_VEC_LO 1
5099 #else
5100 #define PPC_VEC_HI 1
5101 #define PPC_VEC_LO 0
5102 #endif
5105 static void save_user_regs(CPUPPCState *env, struct target_mcontext *frame)
5107 target_ulong msr = env->msr;
5108 int i;
5109 target_ulong ccr = 0;
5111 /* In general, the kernel attempts to be intelligent about what it
5112 needs to save for Altivec/FP/SPE registers. We don't care that
5113 much, so we just go ahead and save everything. */
5115 /* Save general registers. */
5116 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
5117 __put_user(env->gpr[i], &frame->mc_gregs[i]);
5119 __put_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]);
5120 __put_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]);
5121 __put_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]);
5122 __put_user(env->xer, &frame->mc_gregs[TARGET_PT_XER]);
5124 for (i = 0; i < ARRAY_SIZE(env->crf); i++) {
5125 ccr |= env->crf[i] << (32 - ((i + 1) * 4));
5127 __put_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]);
5129 /* Save Altivec registers if necessary. */
5130 if (env->insns_flags & PPC_ALTIVEC) {
5131 uint32_t *vrsave;
5132 for (i = 0; i < ARRAY_SIZE(env->avr); i++) {
5133 ppc_avr_t *avr = &env->avr[i];
5134 ppc_avr_t *vreg = (ppc_avr_t *)&frame->mc_vregs.altivec[i];
5136 __put_user(avr->u64[PPC_VEC_HI], &vreg->u64[0]);
5137 __put_user(avr->u64[PPC_VEC_LO], &vreg->u64[1]);
5139 /* Set MSR_VR in the saved MSR value to indicate that
5140 frame->mc_vregs contains valid data. */
5141 msr |= MSR_VR;
5142 #if defined(TARGET_PPC64)
5143 vrsave = (uint32_t *)&frame->mc_vregs.altivec[33];
5144 /* 64-bit needs to put a pointer to the vectors in the frame */
5145 __put_user(h2g(frame->mc_vregs.altivec), &frame->v_regs);
5146 #else
5147 vrsave = (uint32_t *)&frame->mc_vregs.altivec[32];
5148 #endif
5149 __put_user((uint32_t)env->spr[SPR_VRSAVE], vrsave);
5152 /* Save VSX second halves */
5153 if (env->insns_flags2 & PPC2_VSX) {
5154 uint64_t *vsregs = (uint64_t *)&frame->mc_vregs.altivec[34];
5155 for (i = 0; i < ARRAY_SIZE(env->vsr); i++) {
5156 __put_user(env->vsr[i], &vsregs[i]);
5160 /* Save floating point registers. */
5161 if (env->insns_flags & PPC_FLOAT) {
5162 for (i = 0; i < ARRAY_SIZE(env->fpr); i++) {
5163 __put_user(env->fpr[i], &frame->mc_fregs[i]);
5165 __put_user((uint64_t) env->fpscr, &frame->mc_fregs[32]);
5168 /* Save SPE registers. The kernel only saves the high half. */
5169 if (env->insns_flags & PPC_SPE) {
5170 #if defined(TARGET_PPC64)
5171 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
5172 __put_user(env->gpr[i] >> 32, &frame->mc_vregs.spe[i]);
5174 #else
5175 for (i = 0; i < ARRAY_SIZE(env->gprh); i++) {
5176 __put_user(env->gprh[i], &frame->mc_vregs.spe[i]);
5178 #endif
5179 /* Set MSR_SPE in the saved MSR value to indicate that
5180 frame->mc_vregs contains valid data. */
5181 msr |= MSR_SPE;
5182 __put_user(env->spe_fscr, &frame->mc_vregs.spe[32]);
5185 /* Store MSR. */
5186 __put_user(msr, &frame->mc_gregs[TARGET_PT_MSR]);
5189 static void encode_trampoline(int sigret, uint32_t *tramp)
5191 /* Set up the sigreturn trampoline: li r0,sigret; sc. */
5192 if (sigret) {
5193 __put_user(0x38000000 | sigret, &tramp[0]);
5194 __put_user(0x44000002, &tramp[1]);
5198 static void restore_user_regs(CPUPPCState *env,
5199 struct target_mcontext *frame, int sig)
5201 target_ulong save_r2 = 0;
5202 target_ulong msr;
5203 target_ulong ccr;
5205 int i;
5207 if (!sig) {
5208 save_r2 = env->gpr[2];
5211 /* Restore general registers. */
5212 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
5213 __get_user(env->gpr[i], &frame->mc_gregs[i]);
5215 __get_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]);
5216 __get_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]);
5217 __get_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]);
5218 __get_user(env->xer, &frame->mc_gregs[TARGET_PT_XER]);
5219 __get_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]);
5221 for (i = 0; i < ARRAY_SIZE(env->crf); i++) {
5222 env->crf[i] = (ccr >> (32 - ((i + 1) * 4))) & 0xf;
5225 if (!sig) {
5226 env->gpr[2] = save_r2;
5228 /* Restore MSR. */
5229 __get_user(msr, &frame->mc_gregs[TARGET_PT_MSR]);
5231 /* If doing signal return, restore the previous little-endian mode. */
5232 if (sig)
5233 env->msr = (env->msr & ~(1ull << MSR_LE)) | (msr & (1ull << MSR_LE));
5235 /* Restore Altivec registers if necessary. */
5236 if (env->insns_flags & PPC_ALTIVEC) {
5237 ppc_avr_t *v_regs;
5238 uint32_t *vrsave;
5239 #if defined(TARGET_PPC64)
5240 uint64_t v_addr;
5241 /* 64-bit needs to recover the pointer to the vectors from the frame */
5242 __get_user(v_addr, &frame->v_regs);
5243 v_regs = g2h(v_addr);
5244 #else
5245 v_regs = (ppc_avr_t *)frame->mc_vregs.altivec;
5246 #endif
5247 for (i = 0; i < ARRAY_SIZE(env->avr); i++) {
5248 ppc_avr_t *avr = &env->avr[i];
5249 ppc_avr_t *vreg = &v_regs[i];
5251 __get_user(avr->u64[PPC_VEC_HI], &vreg->u64[0]);
5252 __get_user(avr->u64[PPC_VEC_LO], &vreg->u64[1]);
5254 /* Set MSR_VEC in the saved MSR value to indicate that
5255 frame->mc_vregs contains valid data. */
5256 #if defined(TARGET_PPC64)
5257 vrsave = (uint32_t *)&v_regs[33];
5258 #else
5259 vrsave = (uint32_t *)&v_regs[32];
5260 #endif
5261 __get_user(env->spr[SPR_VRSAVE], vrsave);
5264 /* Restore VSX second halves */
5265 if (env->insns_flags2 & PPC2_VSX) {
5266 uint64_t *vsregs = (uint64_t *)&frame->mc_vregs.altivec[34];
5267 for (i = 0; i < ARRAY_SIZE(env->vsr); i++) {
5268 __get_user(env->vsr[i], &vsregs[i]);
5272 /* Restore floating point registers. */
5273 if (env->insns_flags & PPC_FLOAT) {
5274 uint64_t fpscr;
5275 for (i = 0; i < ARRAY_SIZE(env->fpr); i++) {
5276 __get_user(env->fpr[i], &frame->mc_fregs[i]);
5278 __get_user(fpscr, &frame->mc_fregs[32]);
5279 env->fpscr = (uint32_t) fpscr;
5282 /* Save SPE registers. The kernel only saves the high half. */
5283 if (env->insns_flags & PPC_SPE) {
5284 #if defined(TARGET_PPC64)
5285 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
5286 uint32_t hi;
5288 __get_user(hi, &frame->mc_vregs.spe[i]);
5289 env->gpr[i] = ((uint64_t)hi << 32) | ((uint32_t) env->gpr[i]);
5291 #else
5292 for (i = 0; i < ARRAY_SIZE(env->gprh); i++) {
5293 __get_user(env->gprh[i], &frame->mc_vregs.spe[i]);
5295 #endif
5296 __get_user(env->spe_fscr, &frame->mc_vregs.spe[32]);
5300 #if !defined(TARGET_PPC64)
5301 static void setup_frame(int sig, struct target_sigaction *ka,
5302 target_sigset_t *set, CPUPPCState *env)
5304 struct target_sigframe *frame;
5305 struct target_sigcontext *sc;
5306 target_ulong frame_addr, newsp;
5307 int err = 0;
5309 frame_addr = get_sigframe(ka, env, sizeof(*frame));
5310 trace_user_setup_frame(env, frame_addr);
5311 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1))
5312 goto sigsegv;
5313 sc = &frame->sctx;
5315 __put_user(ka->_sa_handler, &sc->handler);
5316 __put_user(set->sig[0], &sc->oldmask);
5317 __put_user(set->sig[1], &sc->_unused[3]);
5318 __put_user(h2g(&frame->mctx), &sc->regs);
5319 __put_user(sig, &sc->signal);
5321 /* Save user regs. */
5322 save_user_regs(env, &frame->mctx);
5324 /* Construct the trampoline code on the stack. */
5325 encode_trampoline(TARGET_NR_sigreturn, (uint32_t *)&frame->mctx.tramp);
5327 /* The kernel checks for the presence of a VDSO here. We don't
5328 emulate a vdso, so use a sigreturn system call. */
5329 env->lr = (target_ulong) h2g(frame->mctx.tramp);
5331 /* Turn off all fp exceptions. */
5332 env->fpscr = 0;
5334 /* Create a stack frame for the caller of the handler. */
5335 newsp = frame_addr - SIGNAL_FRAMESIZE;
5336 err |= put_user(env->gpr[1], newsp, target_ulong);
5338 if (err)
5339 goto sigsegv;
5341 /* Set up registers for signal handler. */
5342 env->gpr[1] = newsp;
5343 env->gpr[3] = sig;
5344 env->gpr[4] = frame_addr + offsetof(struct target_sigframe, sctx);
5346 env->nip = (target_ulong) ka->_sa_handler;
5348 /* Signal handlers are entered in big-endian mode. */
5349 env->msr &= ~(1ull << MSR_LE);
5351 unlock_user_struct(frame, frame_addr, 1);
5352 return;
5354 sigsegv:
5355 unlock_user_struct(frame, frame_addr, 1);
5356 force_sigsegv(sig);
5358 #endif /* !defined(TARGET_PPC64) */
5360 static void setup_rt_frame(int sig, struct target_sigaction *ka,
5361 target_siginfo_t *info,
5362 target_sigset_t *set, CPUPPCState *env)
5364 struct target_rt_sigframe *rt_sf;
5365 uint32_t *trampptr = 0;
5366 struct target_mcontext *mctx = 0;
5367 target_ulong rt_sf_addr, newsp = 0;
5368 int i, err = 0;
5369 #if defined(TARGET_PPC64)
5370 struct target_sigcontext *sc = 0;
5371 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info;
5372 #endif
5374 rt_sf_addr = get_sigframe(ka, env, sizeof(*rt_sf));
5375 if (!lock_user_struct(VERIFY_WRITE, rt_sf, rt_sf_addr, 1))
5376 goto sigsegv;
5378 tswap_siginfo(&rt_sf->info, info);
5380 __put_user(0, &rt_sf->uc.tuc_flags);
5381 __put_user(0, &rt_sf->uc.tuc_link);
5382 __put_user((target_ulong)target_sigaltstack_used.ss_sp,
5383 &rt_sf->uc.tuc_stack.ss_sp);
5384 __put_user(sas_ss_flags(env->gpr[1]),
5385 &rt_sf->uc.tuc_stack.ss_flags);
5386 __put_user(target_sigaltstack_used.ss_size,
5387 &rt_sf->uc.tuc_stack.ss_size);
5388 #if !defined(TARGET_PPC64)
5389 __put_user(h2g (&rt_sf->uc.tuc_mcontext),
5390 &rt_sf->uc.tuc_regs);
5391 #endif
5392 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
5393 __put_user(set->sig[i], &rt_sf->uc.tuc_sigmask.sig[i]);
5396 #if defined(TARGET_PPC64)
5397 mctx = &rt_sf->uc.tuc_sigcontext.mcontext;
5398 trampptr = &rt_sf->trampoline[0];
5400 sc = &rt_sf->uc.tuc_sigcontext;
5401 __put_user(h2g(mctx), &sc->regs);
5402 __put_user(sig, &sc->signal);
5403 #else
5404 mctx = &rt_sf->uc.tuc_mcontext;
5405 trampptr = (uint32_t *)&rt_sf->uc.tuc_mcontext.tramp;
5406 #endif
5408 save_user_regs(env, mctx);
5409 encode_trampoline(TARGET_NR_rt_sigreturn, trampptr);
5411 /* The kernel checks for the presence of a VDSO here. We don't
5412 emulate a vdso, so use a sigreturn system call. */
5413 env->lr = (target_ulong) h2g(trampptr);
5415 /* Turn off all fp exceptions. */
5416 env->fpscr = 0;
5418 /* Create a stack frame for the caller of the handler. */
5419 newsp = rt_sf_addr - (SIGNAL_FRAMESIZE + 16);
5420 err |= put_user(env->gpr[1], newsp, target_ulong);
5422 if (err)
5423 goto sigsegv;
5425 /* Set up registers for signal handler. */
5426 env->gpr[1] = newsp;
5427 env->gpr[3] = (target_ulong) sig;
5428 env->gpr[4] = (target_ulong) h2g(&rt_sf->info);
5429 env->gpr[5] = (target_ulong) h2g(&rt_sf->uc);
5430 env->gpr[6] = (target_ulong) h2g(rt_sf);
5432 #if defined(TARGET_PPC64)
5433 if (get_ppc64_abi(image) < 2) {
5434 /* ELFv1 PPC64 function pointers are pointers to OPD entries. */
5435 struct target_func_ptr *handler =
5436 (struct target_func_ptr *)g2h(ka->_sa_handler);
5437 env->nip = tswapl(handler->entry);
5438 env->gpr[2] = tswapl(handler->toc);
5439 } else {
5440 /* ELFv2 PPC64 function pointers are entry points, but R12
5441 * must also be set */
5442 env->nip = tswapl((target_ulong) ka->_sa_handler);
5443 env->gpr[12] = env->nip;
5445 #else
5446 env->nip = (target_ulong) ka->_sa_handler;
5447 #endif
5449 /* Signal handlers are entered in big-endian mode. */
5450 env->msr &= ~(1ull << MSR_LE);
5452 unlock_user_struct(rt_sf, rt_sf_addr, 1);
5453 return;
5455 sigsegv:
5456 unlock_user_struct(rt_sf, rt_sf_addr, 1);
5457 force_sigsegv(sig);
5461 #if !defined(TARGET_PPC64)
5462 long do_sigreturn(CPUPPCState *env)
5464 struct target_sigcontext *sc = NULL;
5465 struct target_mcontext *sr = NULL;
5466 target_ulong sr_addr = 0, sc_addr;
5467 sigset_t blocked;
5468 target_sigset_t set;
5470 sc_addr = env->gpr[1] + SIGNAL_FRAMESIZE;
5471 if (!lock_user_struct(VERIFY_READ, sc, sc_addr, 1))
5472 goto sigsegv;
5474 #if defined(TARGET_PPC64)
5475 set.sig[0] = sc->oldmask + ((uint64_t)(sc->_unused[3]) << 32);
5476 #else
5477 __get_user(set.sig[0], &sc->oldmask);
5478 __get_user(set.sig[1], &sc->_unused[3]);
5479 #endif
5480 target_to_host_sigset_internal(&blocked, &set);
5481 set_sigmask(&blocked);
5483 __get_user(sr_addr, &sc->regs);
5484 if (!lock_user_struct(VERIFY_READ, sr, sr_addr, 1))
5485 goto sigsegv;
5486 restore_user_regs(env, sr, 1);
5488 unlock_user_struct(sr, sr_addr, 1);
5489 unlock_user_struct(sc, sc_addr, 1);
5490 return -TARGET_QEMU_ESIGRETURN;
5492 sigsegv:
5493 unlock_user_struct(sr, sr_addr, 1);
5494 unlock_user_struct(sc, sc_addr, 1);
5495 force_sig(TARGET_SIGSEGV);
5496 return -TARGET_QEMU_ESIGRETURN;
5498 #endif /* !defined(TARGET_PPC64) */
5500 /* See arch/powerpc/kernel/signal_32.c. */
5501 static int do_setcontext(struct target_ucontext *ucp, CPUPPCState *env, int sig)
5503 struct target_mcontext *mcp;
5504 target_ulong mcp_addr;
5505 sigset_t blocked;
5506 target_sigset_t set;
5508 if (copy_from_user(&set, h2g(ucp) + offsetof(struct target_ucontext, tuc_sigmask),
5509 sizeof (set)))
5510 return 1;
5512 #if defined(TARGET_PPC64)
5513 mcp_addr = h2g(ucp) +
5514 offsetof(struct target_ucontext, tuc_sigcontext.mcontext);
5515 #else
5516 __get_user(mcp_addr, &ucp->tuc_regs);
5517 #endif
5519 if (!lock_user_struct(VERIFY_READ, mcp, mcp_addr, 1))
5520 return 1;
5522 target_to_host_sigset_internal(&blocked, &set);
5523 set_sigmask(&blocked);
5524 restore_user_regs(env, mcp, sig);
5526 unlock_user_struct(mcp, mcp_addr, 1);
5527 return 0;
5530 long do_rt_sigreturn(CPUPPCState *env)
5532 struct target_rt_sigframe *rt_sf = NULL;
5533 target_ulong rt_sf_addr;
5535 rt_sf_addr = env->gpr[1] + SIGNAL_FRAMESIZE + 16;
5536 if (!lock_user_struct(VERIFY_READ, rt_sf, rt_sf_addr, 1))
5537 goto sigsegv;
5539 if (do_setcontext(&rt_sf->uc, env, 1))
5540 goto sigsegv;
5542 do_sigaltstack(rt_sf_addr
5543 + offsetof(struct target_rt_sigframe, uc.tuc_stack),
5544 0, env->gpr[1]);
5546 unlock_user_struct(rt_sf, rt_sf_addr, 1);
5547 return -TARGET_QEMU_ESIGRETURN;
5549 sigsegv:
5550 unlock_user_struct(rt_sf, rt_sf_addr, 1);
5551 force_sig(TARGET_SIGSEGV);
5552 return -TARGET_QEMU_ESIGRETURN;
5555 #elif defined(TARGET_M68K)
5557 struct target_sigcontext {
5558 abi_ulong sc_mask;
5559 abi_ulong sc_usp;
5560 abi_ulong sc_d0;
5561 abi_ulong sc_d1;
5562 abi_ulong sc_a0;
5563 abi_ulong sc_a1;
5564 unsigned short sc_sr;
5565 abi_ulong sc_pc;
5568 struct target_sigframe
5570 abi_ulong pretcode;
5571 int sig;
5572 int code;
5573 abi_ulong psc;
5574 char retcode[8];
5575 abi_ulong extramask[TARGET_NSIG_WORDS-1];
5576 struct target_sigcontext sc;
5579 typedef int target_greg_t;
5580 #define TARGET_NGREG 18
5581 typedef target_greg_t target_gregset_t[TARGET_NGREG];
5583 typedef struct target_fpregset {
5584 int f_fpcntl[3];
5585 int f_fpregs[8*3];
5586 } target_fpregset_t;
5588 struct target_mcontext {
5589 int version;
5590 target_gregset_t gregs;
5591 target_fpregset_t fpregs;
5594 #define TARGET_MCONTEXT_VERSION 2
5596 struct target_ucontext {
5597 abi_ulong tuc_flags;
5598 abi_ulong tuc_link;
5599 target_stack_t tuc_stack;
5600 struct target_mcontext tuc_mcontext;
5601 abi_long tuc_filler[80];
5602 target_sigset_t tuc_sigmask;
5605 struct target_rt_sigframe
5607 abi_ulong pretcode;
5608 int sig;
5609 abi_ulong pinfo;
5610 abi_ulong puc;
5611 char retcode[8];
5612 struct target_siginfo info;
5613 struct target_ucontext uc;
5616 static void setup_sigcontext(struct target_sigcontext *sc, CPUM68KState *env,
5617 abi_ulong mask)
5619 uint32_t sr = (env->sr & 0xff00) | cpu_m68k_get_ccr(env);
5620 __put_user(mask, &sc->sc_mask);
5621 __put_user(env->aregs[7], &sc->sc_usp);
5622 __put_user(env->dregs[0], &sc->sc_d0);
5623 __put_user(env->dregs[1], &sc->sc_d1);
5624 __put_user(env->aregs[0], &sc->sc_a0);
5625 __put_user(env->aregs[1], &sc->sc_a1);
5626 __put_user(sr, &sc->sc_sr);
5627 __put_user(env->pc, &sc->sc_pc);
5630 static void
5631 restore_sigcontext(CPUM68KState *env, struct target_sigcontext *sc)
5633 int temp;
5635 __get_user(env->aregs[7], &sc->sc_usp);
5636 __get_user(env->dregs[0], &sc->sc_d0);
5637 __get_user(env->dregs[1], &sc->sc_d1);
5638 __get_user(env->aregs[0], &sc->sc_a0);
5639 __get_user(env->aregs[1], &sc->sc_a1);
5640 __get_user(env->pc, &sc->sc_pc);
5641 __get_user(temp, &sc->sc_sr);
5642 cpu_m68k_set_ccr(env, temp);
5646 * Determine which stack to use..
5648 static inline abi_ulong
5649 get_sigframe(struct target_sigaction *ka, CPUM68KState *regs,
5650 size_t frame_size)
5652 unsigned long sp;
5654 sp = regs->aregs[7];
5656 /* This is the X/Open sanctioned signal stack switching. */
5657 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags (sp) == 0)) {
5658 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
5661 return ((sp - frame_size) & -8UL);
5664 static void setup_frame(int sig, struct target_sigaction *ka,
5665 target_sigset_t *set, CPUM68KState *env)
5667 struct target_sigframe *frame;
5668 abi_ulong frame_addr;
5669 abi_ulong retcode_addr;
5670 abi_ulong sc_addr;
5671 int i;
5673 frame_addr = get_sigframe(ka, env, sizeof *frame);
5674 trace_user_setup_frame(env, frame_addr);
5675 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
5676 goto give_sigsegv;
5679 __put_user(sig, &frame->sig);
5681 sc_addr = frame_addr + offsetof(struct target_sigframe, sc);
5682 __put_user(sc_addr, &frame->psc);
5684 setup_sigcontext(&frame->sc, env, set->sig[0]);
5686 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
5687 __put_user(set->sig[i], &frame->extramask[i - 1]);
5690 /* Set up to return from userspace. */
5692 retcode_addr = frame_addr + offsetof(struct target_sigframe, retcode);
5693 __put_user(retcode_addr, &frame->pretcode);
5695 /* moveq #,d0; trap #0 */
5697 __put_user(0x70004e40 + (TARGET_NR_sigreturn << 16),
5698 (uint32_t *)(frame->retcode));
5700 /* Set up to return from userspace */
5702 env->aregs[7] = frame_addr;
5703 env->pc = ka->_sa_handler;
5705 unlock_user_struct(frame, frame_addr, 1);
5706 return;
5708 give_sigsegv:
5709 force_sigsegv(sig);
5712 static inline void target_rt_save_fpu_state(struct target_ucontext *uc,
5713 CPUM68KState *env)
5715 int i;
5716 target_fpregset_t *fpregs = &uc->tuc_mcontext.fpregs;
5718 __put_user(env->fpcr, &fpregs->f_fpcntl[0]);
5719 __put_user(env->fpsr, &fpregs->f_fpcntl[1]);
5720 /* fpiar is not emulated */
5722 for (i = 0; i < 8; i++) {
5723 uint32_t high = env->fregs[i].d.high << 16;
5724 __put_user(high, &fpregs->f_fpregs[i * 3]);
5725 __put_user(env->fregs[i].d.low,
5726 (uint64_t *)&fpregs->f_fpregs[i * 3 + 1]);
5730 static inline int target_rt_setup_ucontext(struct target_ucontext *uc,
5731 CPUM68KState *env)
5733 target_greg_t *gregs = uc->tuc_mcontext.gregs;
5734 uint32_t sr = (env->sr & 0xff00) | cpu_m68k_get_ccr(env);
5736 __put_user(TARGET_MCONTEXT_VERSION, &uc->tuc_mcontext.version);
5737 __put_user(env->dregs[0], &gregs[0]);
5738 __put_user(env->dregs[1], &gregs[1]);
5739 __put_user(env->dregs[2], &gregs[2]);
5740 __put_user(env->dregs[3], &gregs[3]);
5741 __put_user(env->dregs[4], &gregs[4]);
5742 __put_user(env->dregs[5], &gregs[5]);
5743 __put_user(env->dregs[6], &gregs[6]);
5744 __put_user(env->dregs[7], &gregs[7]);
5745 __put_user(env->aregs[0], &gregs[8]);
5746 __put_user(env->aregs[1], &gregs[9]);
5747 __put_user(env->aregs[2], &gregs[10]);
5748 __put_user(env->aregs[3], &gregs[11]);
5749 __put_user(env->aregs[4], &gregs[12]);
5750 __put_user(env->aregs[5], &gregs[13]);
5751 __put_user(env->aregs[6], &gregs[14]);
5752 __put_user(env->aregs[7], &gregs[15]);
5753 __put_user(env->pc, &gregs[16]);
5754 __put_user(sr, &gregs[17]);
5756 target_rt_save_fpu_state(uc, env);
5758 return 0;
5761 static inline void target_rt_restore_fpu_state(CPUM68KState *env,
5762 struct target_ucontext *uc)
5764 int i;
5765 target_fpregset_t *fpregs = &uc->tuc_mcontext.fpregs;
5766 uint32_t fpcr;
5768 __get_user(fpcr, &fpregs->f_fpcntl[0]);
5769 cpu_m68k_set_fpcr(env, fpcr);
5770 __get_user(env->fpsr, &fpregs->f_fpcntl[1]);
5771 /* fpiar is not emulated */
5773 for (i = 0; i < 8; i++) {
5774 uint32_t high;
5775 __get_user(high, &fpregs->f_fpregs[i * 3]);
5776 env->fregs[i].d.high = high >> 16;
5777 __get_user(env->fregs[i].d.low,
5778 (uint64_t *)&fpregs->f_fpregs[i * 3 + 1]);
5782 static inline int target_rt_restore_ucontext(CPUM68KState *env,
5783 struct target_ucontext *uc)
5785 int temp;
5786 target_greg_t *gregs = uc->tuc_mcontext.gregs;
5788 __get_user(temp, &uc->tuc_mcontext.version);
5789 if (temp != TARGET_MCONTEXT_VERSION)
5790 goto badframe;
5792 /* restore passed registers */
5793 __get_user(env->dregs[0], &gregs[0]);
5794 __get_user(env->dregs[1], &gregs[1]);
5795 __get_user(env->dregs[2], &gregs[2]);
5796 __get_user(env->dregs[3], &gregs[3]);
5797 __get_user(env->dregs[4], &gregs[4]);
5798 __get_user(env->dregs[5], &gregs[5]);
5799 __get_user(env->dregs[6], &gregs[6]);
5800 __get_user(env->dregs[7], &gregs[7]);
5801 __get_user(env->aregs[0], &gregs[8]);
5802 __get_user(env->aregs[1], &gregs[9]);
5803 __get_user(env->aregs[2], &gregs[10]);
5804 __get_user(env->aregs[3], &gregs[11]);
5805 __get_user(env->aregs[4], &gregs[12]);
5806 __get_user(env->aregs[5], &gregs[13]);
5807 __get_user(env->aregs[6], &gregs[14]);
5808 __get_user(env->aregs[7], &gregs[15]);
5809 __get_user(env->pc, &gregs[16]);
5810 __get_user(temp, &gregs[17]);
5811 cpu_m68k_set_ccr(env, temp);
5813 target_rt_restore_fpu_state(env, uc);
5815 return 0;
5817 badframe:
5818 return 1;
5821 static void setup_rt_frame(int sig, struct target_sigaction *ka,
5822 target_siginfo_t *info,
5823 target_sigset_t *set, CPUM68KState *env)
5825 struct target_rt_sigframe *frame;
5826 abi_ulong frame_addr;
5827 abi_ulong retcode_addr;
5828 abi_ulong info_addr;
5829 abi_ulong uc_addr;
5830 int err = 0;
5831 int i;
5833 frame_addr = get_sigframe(ka, env, sizeof *frame);
5834 trace_user_setup_rt_frame(env, frame_addr);
5835 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
5836 goto give_sigsegv;
5839 __put_user(sig, &frame->sig);
5841 info_addr = frame_addr + offsetof(struct target_rt_sigframe, info);
5842 __put_user(info_addr, &frame->pinfo);
5844 uc_addr = frame_addr + offsetof(struct target_rt_sigframe, uc);
5845 __put_user(uc_addr, &frame->puc);
5847 tswap_siginfo(&frame->info, info);
5849 /* Create the ucontext */
5851 __put_user(0, &frame->uc.tuc_flags);
5852 __put_user(0, &frame->uc.tuc_link);
5853 __put_user(target_sigaltstack_used.ss_sp,
5854 &frame->uc.tuc_stack.ss_sp);
5855 __put_user(sas_ss_flags(env->aregs[7]),
5856 &frame->uc.tuc_stack.ss_flags);
5857 __put_user(target_sigaltstack_used.ss_size,
5858 &frame->uc.tuc_stack.ss_size);
5859 err |= target_rt_setup_ucontext(&frame->uc, env);
5861 if (err)
5862 goto give_sigsegv;
5864 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
5865 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
5868 /* Set up to return from userspace. */
5870 retcode_addr = frame_addr + offsetof(struct target_sigframe, retcode);
5871 __put_user(retcode_addr, &frame->pretcode);
5873 /* moveq #,d0; notb d0; trap #0 */
5875 __put_user(0x70004600 + ((TARGET_NR_rt_sigreturn ^ 0xff) << 16),
5876 (uint32_t *)(frame->retcode + 0));
5877 __put_user(0x4e40, (uint16_t *)(frame->retcode + 4));
5879 if (err)
5880 goto give_sigsegv;
5882 /* Set up to return from userspace */
5884 env->aregs[7] = frame_addr;
5885 env->pc = ka->_sa_handler;
5887 unlock_user_struct(frame, frame_addr, 1);
5888 return;
5890 give_sigsegv:
5891 unlock_user_struct(frame, frame_addr, 1);
5892 force_sigsegv(sig);
5895 long do_sigreturn(CPUM68KState *env)
5897 struct target_sigframe *frame;
5898 abi_ulong frame_addr = env->aregs[7] - 4;
5899 target_sigset_t target_set;
5900 sigset_t set;
5901 int i;
5903 trace_user_do_sigreturn(env, frame_addr);
5904 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
5905 goto badframe;
5907 /* set blocked signals */
5909 __get_user(target_set.sig[0], &frame->sc.sc_mask);
5911 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
5912 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
5915 target_to_host_sigset_internal(&set, &target_set);
5916 set_sigmask(&set);
5918 /* restore registers */
5920 restore_sigcontext(env, &frame->sc);
5922 unlock_user_struct(frame, frame_addr, 0);
5923 return -TARGET_QEMU_ESIGRETURN;
5925 badframe:
5926 force_sig(TARGET_SIGSEGV);
5927 return -TARGET_QEMU_ESIGRETURN;
5930 long do_rt_sigreturn(CPUM68KState *env)
5932 struct target_rt_sigframe *frame;
5933 abi_ulong frame_addr = env->aregs[7] - 4;
5934 sigset_t set;
5936 trace_user_do_rt_sigreturn(env, frame_addr);
5937 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
5938 goto badframe;
5940 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
5941 set_sigmask(&set);
5943 /* restore registers */
5945 if (target_rt_restore_ucontext(env, &frame->uc))
5946 goto badframe;
5948 if (do_sigaltstack(frame_addr +
5949 offsetof(struct target_rt_sigframe, uc.tuc_stack),
5950 0, get_sp_from_cpustate(env)) == -EFAULT)
5951 goto badframe;
5953 unlock_user_struct(frame, frame_addr, 0);
5954 return -TARGET_QEMU_ESIGRETURN;
5956 badframe:
5957 unlock_user_struct(frame, frame_addr, 0);
5958 force_sig(TARGET_SIGSEGV);
5959 return -TARGET_QEMU_ESIGRETURN;
5962 #elif defined(TARGET_ALPHA)
5964 struct target_sigcontext {
5965 abi_long sc_onstack;
5966 abi_long sc_mask;
5967 abi_long sc_pc;
5968 abi_long sc_ps;
5969 abi_long sc_regs[32];
5970 abi_long sc_ownedfp;
5971 abi_long sc_fpregs[32];
5972 abi_ulong sc_fpcr;
5973 abi_ulong sc_fp_control;
5974 abi_ulong sc_reserved1;
5975 abi_ulong sc_reserved2;
5976 abi_ulong sc_ssize;
5977 abi_ulong sc_sbase;
5978 abi_ulong sc_traparg_a0;
5979 abi_ulong sc_traparg_a1;
5980 abi_ulong sc_traparg_a2;
5981 abi_ulong sc_fp_trap_pc;
5982 abi_ulong sc_fp_trigger_sum;
5983 abi_ulong sc_fp_trigger_inst;
5986 struct target_ucontext {
5987 abi_ulong tuc_flags;
5988 abi_ulong tuc_link;
5989 abi_ulong tuc_osf_sigmask;
5990 target_stack_t tuc_stack;
5991 struct target_sigcontext tuc_mcontext;
5992 target_sigset_t tuc_sigmask;
5995 struct target_sigframe {
5996 struct target_sigcontext sc;
5997 unsigned int retcode[3];
6000 struct target_rt_sigframe {
6001 target_siginfo_t info;
6002 struct target_ucontext uc;
6003 unsigned int retcode[3];
6006 #define INSN_MOV_R30_R16 0x47fe0410
6007 #define INSN_LDI_R0 0x201f0000
6008 #define INSN_CALLSYS 0x00000083
6010 static void setup_sigcontext(struct target_sigcontext *sc, CPUAlphaState *env,
6011 abi_ulong frame_addr, target_sigset_t *set)
6013 int i;
6015 __put_user(on_sig_stack(frame_addr), &sc->sc_onstack);
6016 __put_user(set->sig[0], &sc->sc_mask);
6017 __put_user(env->pc, &sc->sc_pc);
6018 __put_user(8, &sc->sc_ps);
6020 for (i = 0; i < 31; ++i) {
6021 __put_user(env->ir[i], &sc->sc_regs[i]);
6023 __put_user(0, &sc->sc_regs[31]);
6025 for (i = 0; i < 31; ++i) {
6026 __put_user(env->fir[i], &sc->sc_fpregs[i]);
6028 __put_user(0, &sc->sc_fpregs[31]);
6029 __put_user(cpu_alpha_load_fpcr(env), &sc->sc_fpcr);
6031 __put_user(0, &sc->sc_traparg_a0); /* FIXME */
6032 __put_user(0, &sc->sc_traparg_a1); /* FIXME */
6033 __put_user(0, &sc->sc_traparg_a2); /* FIXME */
6036 static void restore_sigcontext(CPUAlphaState *env,
6037 struct target_sigcontext *sc)
6039 uint64_t fpcr;
6040 int i;
6042 __get_user(env->pc, &sc->sc_pc);
6044 for (i = 0; i < 31; ++i) {
6045 __get_user(env->ir[i], &sc->sc_regs[i]);
6047 for (i = 0; i < 31; ++i) {
6048 __get_user(env->fir[i], &sc->sc_fpregs[i]);
6051 __get_user(fpcr, &sc->sc_fpcr);
6052 cpu_alpha_store_fpcr(env, fpcr);
6055 static inline abi_ulong get_sigframe(struct target_sigaction *sa,
6056 CPUAlphaState *env,
6057 unsigned long framesize)
6059 abi_ulong sp = env->ir[IR_SP];
6061 /* This is the X/Open sanctioned signal stack switching. */
6062 if ((sa->sa_flags & TARGET_SA_ONSTACK) != 0 && !sas_ss_flags(sp)) {
6063 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
6065 return (sp - framesize) & -32;
6068 static void setup_frame(int sig, struct target_sigaction *ka,
6069 target_sigset_t *set, CPUAlphaState *env)
6071 abi_ulong frame_addr, r26;
6072 struct target_sigframe *frame;
6073 int err = 0;
6075 frame_addr = get_sigframe(ka, env, sizeof(*frame));
6076 trace_user_setup_frame(env, frame_addr);
6077 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
6078 goto give_sigsegv;
6081 setup_sigcontext(&frame->sc, env, frame_addr, set);
6083 if (ka->sa_restorer) {
6084 r26 = ka->sa_restorer;
6085 } else {
6086 __put_user(INSN_MOV_R30_R16, &frame->retcode[0]);
6087 __put_user(INSN_LDI_R0 + TARGET_NR_sigreturn,
6088 &frame->retcode[1]);
6089 __put_user(INSN_CALLSYS, &frame->retcode[2]);
6090 /* imb() */
6091 r26 = frame_addr;
6094 unlock_user_struct(frame, frame_addr, 1);
6096 if (err) {
6097 give_sigsegv:
6098 force_sigsegv(sig);
6099 return;
6102 env->ir[IR_RA] = r26;
6103 env->ir[IR_PV] = env->pc = ka->_sa_handler;
6104 env->ir[IR_A0] = sig;
6105 env->ir[IR_A1] = 0;
6106 env->ir[IR_A2] = frame_addr + offsetof(struct target_sigframe, sc);
6107 env->ir[IR_SP] = frame_addr;
6110 static void setup_rt_frame(int sig, struct target_sigaction *ka,
6111 target_siginfo_t *info,
6112 target_sigset_t *set, CPUAlphaState *env)
6114 abi_ulong frame_addr, r26;
6115 struct target_rt_sigframe *frame;
6116 int i, err = 0;
6118 frame_addr = get_sigframe(ka, env, sizeof(*frame));
6119 trace_user_setup_rt_frame(env, frame_addr);
6120 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
6121 goto give_sigsegv;
6124 tswap_siginfo(&frame->info, info);
6126 __put_user(0, &frame->uc.tuc_flags);
6127 __put_user(0, &frame->uc.tuc_link);
6128 __put_user(set->sig[0], &frame->uc.tuc_osf_sigmask);
6129 __put_user(target_sigaltstack_used.ss_sp,
6130 &frame->uc.tuc_stack.ss_sp);
6131 __put_user(sas_ss_flags(env->ir[IR_SP]),
6132 &frame->uc.tuc_stack.ss_flags);
6133 __put_user(target_sigaltstack_used.ss_size,
6134 &frame->uc.tuc_stack.ss_size);
6135 setup_sigcontext(&frame->uc.tuc_mcontext, env, frame_addr, set);
6136 for (i = 0; i < TARGET_NSIG_WORDS; ++i) {
6137 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
6140 if (ka->sa_restorer) {
6141 r26 = ka->sa_restorer;
6142 } else {
6143 __put_user(INSN_MOV_R30_R16, &frame->retcode[0]);
6144 __put_user(INSN_LDI_R0 + TARGET_NR_rt_sigreturn,
6145 &frame->retcode[1]);
6146 __put_user(INSN_CALLSYS, &frame->retcode[2]);
6147 /* imb(); */
6148 r26 = frame_addr;
6151 if (err) {
6152 give_sigsegv:
6153 force_sigsegv(sig);
6154 return;
6157 env->ir[IR_RA] = r26;
6158 env->ir[IR_PV] = env->pc = ka->_sa_handler;
6159 env->ir[IR_A0] = sig;
6160 env->ir[IR_A1] = frame_addr + offsetof(struct target_rt_sigframe, info);
6161 env->ir[IR_A2] = frame_addr + offsetof(struct target_rt_sigframe, uc);
6162 env->ir[IR_SP] = frame_addr;
6165 long do_sigreturn(CPUAlphaState *env)
6167 struct target_sigcontext *sc;
6168 abi_ulong sc_addr = env->ir[IR_A0];
6169 target_sigset_t target_set;
6170 sigset_t set;
6172 if (!lock_user_struct(VERIFY_READ, sc, sc_addr, 1)) {
6173 goto badframe;
6176 target_sigemptyset(&target_set);
6177 __get_user(target_set.sig[0], &sc->sc_mask);
6179 target_to_host_sigset_internal(&set, &target_set);
6180 set_sigmask(&set);
6182 restore_sigcontext(env, sc);
6183 unlock_user_struct(sc, sc_addr, 0);
6184 return -TARGET_QEMU_ESIGRETURN;
6186 badframe:
6187 force_sig(TARGET_SIGSEGV);
6188 return -TARGET_QEMU_ESIGRETURN;
6191 long do_rt_sigreturn(CPUAlphaState *env)
6193 abi_ulong frame_addr = env->ir[IR_A0];
6194 struct target_rt_sigframe *frame;
6195 sigset_t set;
6197 trace_user_do_rt_sigreturn(env, frame_addr);
6198 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
6199 goto badframe;
6201 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
6202 set_sigmask(&set);
6204 restore_sigcontext(env, &frame->uc.tuc_mcontext);
6205 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe,
6206 uc.tuc_stack),
6207 0, env->ir[IR_SP]) == -EFAULT) {
6208 goto badframe;
6211 unlock_user_struct(frame, frame_addr, 0);
6212 return -TARGET_QEMU_ESIGRETURN;
6215 badframe:
6216 unlock_user_struct(frame, frame_addr, 0);
6217 force_sig(TARGET_SIGSEGV);
6218 return -TARGET_QEMU_ESIGRETURN;
6221 #elif defined(TARGET_TILEGX)
6223 struct target_sigcontext {
6224 union {
6225 /* General-purpose registers. */
6226 abi_ulong gregs[56];
6227 struct {
6228 abi_ulong __gregs[53];
6229 abi_ulong tp; /* Aliases gregs[TREG_TP]. */
6230 abi_ulong sp; /* Aliases gregs[TREG_SP]. */
6231 abi_ulong lr; /* Aliases gregs[TREG_LR]. */
6234 abi_ulong pc; /* Program counter. */
6235 abi_ulong ics; /* In Interrupt Critical Section? */
6236 abi_ulong faultnum; /* Fault number. */
6237 abi_ulong pad[5];
6240 struct target_ucontext {
6241 abi_ulong tuc_flags;
6242 abi_ulong tuc_link;
6243 target_stack_t tuc_stack;
6244 struct target_sigcontext tuc_mcontext;
6245 target_sigset_t tuc_sigmask; /* mask last for extensibility */
6248 struct target_rt_sigframe {
6249 unsigned char save_area[16]; /* caller save area */
6250 struct target_siginfo info;
6251 struct target_ucontext uc;
6252 abi_ulong retcode[2];
6255 #define INSN_MOVELI_R10_139 0x00045fe551483000ULL /* { moveli r10, 139 } */
6256 #define INSN_SWINT1 0x286b180051485000ULL /* { swint1 } */
6259 static void setup_sigcontext(struct target_sigcontext *sc,
6260 CPUArchState *env, int signo)
6262 int i;
6264 for (i = 0; i < TILEGX_R_COUNT; ++i) {
6265 __put_user(env->regs[i], &sc->gregs[i]);
6268 __put_user(env->pc, &sc->pc);
6269 __put_user(0, &sc->ics);
6270 __put_user(signo, &sc->faultnum);
6273 static void restore_sigcontext(CPUTLGState *env, struct target_sigcontext *sc)
6275 int i;
6277 for (i = 0; i < TILEGX_R_COUNT; ++i) {
6278 __get_user(env->regs[i], &sc->gregs[i]);
6281 __get_user(env->pc, &sc->pc);
6284 static abi_ulong get_sigframe(struct target_sigaction *ka, CPUArchState *env,
6285 size_t frame_size)
6287 unsigned long sp = env->regs[TILEGX_R_SP];
6289 if (on_sig_stack(sp) && !likely(on_sig_stack(sp - frame_size))) {
6290 return -1UL;
6293 if ((ka->sa_flags & SA_ONSTACK) && !sas_ss_flags(sp)) {
6294 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
6297 sp -= frame_size;
6298 sp &= -16UL;
6299 return sp;
6302 static void setup_rt_frame(int sig, struct target_sigaction *ka,
6303 target_siginfo_t *info,
6304 target_sigset_t *set, CPUArchState *env)
6306 abi_ulong frame_addr;
6307 struct target_rt_sigframe *frame;
6308 unsigned long restorer;
6310 frame_addr = get_sigframe(ka, env, sizeof(*frame));
6311 trace_user_setup_rt_frame(env, frame_addr);
6312 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
6313 goto give_sigsegv;
6316 /* Always write at least the signal number for the stack backtracer. */
6317 if (ka->sa_flags & TARGET_SA_SIGINFO) {
6318 /* At sigreturn time, restore the callee-save registers too. */
6319 tswap_siginfo(&frame->info, info);
6320 /* regs->flags |= PT_FLAGS_RESTORE_REGS; FIXME: we can skip it? */
6321 } else {
6322 __put_user(info->si_signo, &frame->info.si_signo);
6325 /* Create the ucontext. */
6326 __put_user(0, &frame->uc.tuc_flags);
6327 __put_user(0, &frame->uc.tuc_link);
6328 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
6329 __put_user(sas_ss_flags(env->regs[TILEGX_R_SP]),
6330 &frame->uc.tuc_stack.ss_flags);
6331 __put_user(target_sigaltstack_used.ss_size, &frame->uc.tuc_stack.ss_size);
6332 setup_sigcontext(&frame->uc.tuc_mcontext, env, info->si_signo);
6334 if (ka->sa_flags & TARGET_SA_RESTORER) {
6335 restorer = (unsigned long) ka->sa_restorer;
6336 } else {
6337 __put_user(INSN_MOVELI_R10_139, &frame->retcode[0]);
6338 __put_user(INSN_SWINT1, &frame->retcode[1]);
6339 restorer = frame_addr + offsetof(struct target_rt_sigframe, retcode);
6341 env->pc = (unsigned long) ka->_sa_handler;
6342 env->regs[TILEGX_R_SP] = (unsigned long) frame;
6343 env->regs[TILEGX_R_LR] = restorer;
6344 env->regs[0] = (unsigned long) sig;
6345 env->regs[1] = (unsigned long) &frame->info;
6346 env->regs[2] = (unsigned long) &frame->uc;
6347 /* regs->flags |= PT_FLAGS_CALLER_SAVES; FIXME: we can skip it? */
6349 unlock_user_struct(frame, frame_addr, 1);
6350 return;
6352 give_sigsegv:
6353 force_sigsegv(sig);
6356 long do_rt_sigreturn(CPUTLGState *env)
6358 abi_ulong frame_addr = env->regs[TILEGX_R_SP];
6359 struct target_rt_sigframe *frame;
6360 sigset_t set;
6362 trace_user_do_rt_sigreturn(env, frame_addr);
6363 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
6364 goto badframe;
6366 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
6367 set_sigmask(&set);
6369 restore_sigcontext(env, &frame->uc.tuc_mcontext);
6370 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe,
6371 uc.tuc_stack),
6372 0, env->regs[TILEGX_R_SP]) == -EFAULT) {
6373 goto badframe;
6376 unlock_user_struct(frame, frame_addr, 0);
6377 return -TARGET_QEMU_ESIGRETURN;
6380 badframe:
6381 unlock_user_struct(frame, frame_addr, 0);
6382 force_sig(TARGET_SIGSEGV);
6383 return -TARGET_QEMU_ESIGRETURN;
6386 #elif defined(TARGET_HPPA)
6388 struct target_sigcontext {
6389 abi_ulong sc_flags;
6390 abi_ulong sc_gr[32];
6391 uint64_t sc_fr[32];
6392 abi_ulong sc_iasq[2];
6393 abi_ulong sc_iaoq[2];
6394 abi_ulong sc_sar;
6397 struct target_ucontext {
6398 abi_uint tuc_flags;
6399 abi_ulong tuc_link;
6400 target_stack_t tuc_stack;
6401 abi_uint pad[1];
6402 struct target_sigcontext tuc_mcontext;
6403 target_sigset_t tuc_sigmask;
6406 struct target_rt_sigframe {
6407 abi_uint tramp[9];
6408 target_siginfo_t info;
6409 struct target_ucontext uc;
6410 /* hidden location of upper halves of pa2.0 64-bit gregs */
6413 static void setup_sigcontext(struct target_sigcontext *sc, CPUArchState *env)
6415 int flags = 0;
6416 int i;
6418 /* ??? if on_sig_stack, flags |= 1 (PARISC_SC_FLAG_ONSTACK). */
6420 if (env->iaoq_f < TARGET_PAGE_SIZE) {
6421 /* In the gateway page, executing a syscall. */
6422 flags |= 2; /* PARISC_SC_FLAG_IN_SYSCALL */
6423 __put_user(env->gr[31], &sc->sc_iaoq[0]);
6424 __put_user(env->gr[31] + 4, &sc->sc_iaoq[1]);
6425 } else {
6426 __put_user(env->iaoq_f, &sc->sc_iaoq[0]);
6427 __put_user(env->iaoq_b, &sc->sc_iaoq[1]);
6429 __put_user(0, &sc->sc_iasq[0]);
6430 __put_user(0, &sc->sc_iasq[1]);
6431 __put_user(flags, &sc->sc_flags);
6433 __put_user(cpu_hppa_get_psw(env), &sc->sc_gr[0]);
6434 for (i = 1; i < 32; ++i) {
6435 __put_user(env->gr[i], &sc->sc_gr[i]);
6438 __put_user((uint64_t)env->fr0_shadow << 32, &sc->sc_fr[0]);
6439 for (i = 1; i < 32; ++i) {
6440 __put_user(env->fr[i], &sc->sc_fr[i]);
6443 __put_user(env->sar, &sc->sc_sar);
6446 static void restore_sigcontext(CPUArchState *env, struct target_sigcontext *sc)
6448 target_ulong psw;
6449 int i;
6451 __get_user(psw, &sc->sc_gr[0]);
6452 cpu_hppa_put_psw(env, psw);
6454 for (i = 1; i < 32; ++i) {
6455 __get_user(env->gr[i], &sc->sc_gr[i]);
6457 for (i = 0; i < 32; ++i) {
6458 __get_user(env->fr[i], &sc->sc_fr[i]);
6460 cpu_hppa_loaded_fr0(env);
6462 __get_user(env->iaoq_f, &sc->sc_iaoq[0]);
6463 __get_user(env->iaoq_b, &sc->sc_iaoq[1]);
6464 __get_user(env->sar, &sc->sc_sar);
6467 /* No, this doesn't look right, but it's copied straight from the kernel. */
6468 #define PARISC_RT_SIGFRAME_SIZE32 \
6469 ((sizeof(struct target_rt_sigframe) + 48 + 64) & -64)
6471 static void setup_rt_frame(int sig, struct target_sigaction *ka,
6472 target_siginfo_t *info,
6473 target_sigset_t *set, CPUArchState *env)
6475 abi_ulong frame_addr, sp, haddr;
6476 struct target_rt_sigframe *frame;
6477 int i;
6479 sp = env->gr[30];
6480 if (ka->sa_flags & TARGET_SA_ONSTACK) {
6481 if (sas_ss_flags(sp) == 0) {
6482 sp = (target_sigaltstack_used.ss_sp + 0x7f) & ~0x3f;
6485 frame_addr = QEMU_ALIGN_UP(sp, 64);
6486 sp = frame_addr + PARISC_RT_SIGFRAME_SIZE32;
6488 trace_user_setup_rt_frame(env, frame_addr);
6490 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
6491 goto give_sigsegv;
6494 tswap_siginfo(&frame->info, info);
6495 frame->uc.tuc_flags = 0;
6496 frame->uc.tuc_link = 0;
6498 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
6499 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
6500 &frame->uc.tuc_stack.ss_flags);
6501 __put_user(target_sigaltstack_used.ss_size,
6502 &frame->uc.tuc_stack.ss_size);
6504 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
6505 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
6508 setup_sigcontext(&frame->uc.tuc_mcontext, env);
6510 __put_user(0x34190000, frame->tramp + 0); /* ldi 0,%r25 */
6511 __put_user(0x3414015a, frame->tramp + 1); /* ldi __NR_rt_sigreturn,%r20 */
6512 __put_user(0xe4008200, frame->tramp + 2); /* be,l 0x100(%sr2,%r0) */
6513 __put_user(0x08000240, frame->tramp + 3); /* nop */
6515 unlock_user_struct(frame, frame_addr, 1);
6517 env->gr[2] = h2g(frame->tramp);
6518 env->gr[30] = sp;
6519 env->gr[26] = sig;
6520 env->gr[25] = h2g(&frame->info);
6521 env->gr[24] = h2g(&frame->uc);
6523 haddr = ka->_sa_handler;
6524 if (haddr & 2) {
6525 /* Function descriptor. */
6526 target_ulong *fdesc, dest;
6528 haddr &= -4;
6529 if (!lock_user_struct(VERIFY_READ, fdesc, haddr, 1)) {
6530 goto give_sigsegv;
6532 __get_user(dest, fdesc);
6533 __get_user(env->gr[19], fdesc + 1);
6534 unlock_user_struct(fdesc, haddr, 1);
6535 haddr = dest;
6537 env->iaoq_f = haddr;
6538 env->iaoq_b = haddr + 4;
6539 return;
6541 give_sigsegv:
6542 force_sigsegv(sig);
6545 long do_rt_sigreturn(CPUArchState *env)
6547 abi_ulong frame_addr = env->gr[30] - PARISC_RT_SIGFRAME_SIZE32;
6548 struct target_rt_sigframe *frame;
6549 sigset_t set;
6551 trace_user_do_rt_sigreturn(env, frame_addr);
6552 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
6553 goto badframe;
6555 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
6556 set_sigmask(&set);
6558 restore_sigcontext(env, &frame->uc.tuc_mcontext);
6559 unlock_user_struct(frame, frame_addr, 0);
6561 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe,
6562 uc.tuc_stack),
6563 0, env->gr[30]) == -EFAULT) {
6564 goto badframe;
6567 unlock_user_struct(frame, frame_addr, 0);
6568 return -TARGET_QEMU_ESIGRETURN;
6570 badframe:
6571 force_sig(TARGET_SIGSEGV);
6572 return -TARGET_QEMU_ESIGRETURN;
6575 #else
6577 static void setup_frame(int sig, struct target_sigaction *ka,
6578 target_sigset_t *set, CPUArchState *env)
6580 fprintf(stderr, "setup_frame: not implemented\n");
6583 static void setup_rt_frame(int sig, struct target_sigaction *ka,
6584 target_siginfo_t *info,
6585 target_sigset_t *set, CPUArchState *env)
6587 fprintf(stderr, "setup_rt_frame: not implemented\n");
6590 long do_sigreturn(CPUArchState *env)
6592 fprintf(stderr, "do_sigreturn: not implemented\n");
6593 return -TARGET_ENOSYS;
6596 long do_rt_sigreturn(CPUArchState *env)
6598 fprintf(stderr, "do_rt_sigreturn: not implemented\n");
6599 return -TARGET_ENOSYS;
6602 #endif
6604 static void handle_pending_signal(CPUArchState *cpu_env, int sig,
6605 struct emulated_sigtable *k)
6607 CPUState *cpu = ENV_GET_CPU(cpu_env);
6608 abi_ulong handler;
6609 sigset_t set;
6610 target_sigset_t target_old_set;
6611 struct target_sigaction *sa;
6612 TaskState *ts = cpu->opaque;
6614 trace_user_handle_signal(cpu_env, sig);
6615 /* dequeue signal */
6616 k->pending = 0;
6618 sig = gdb_handlesig(cpu, sig);
6619 if (!sig) {
6620 sa = NULL;
6621 handler = TARGET_SIG_IGN;
6622 } else {
6623 sa = &sigact_table[sig - 1];
6624 handler = sa->_sa_handler;
6627 if (do_strace) {
6628 print_taken_signal(sig, &k->info);
6631 if (handler == TARGET_SIG_DFL) {
6632 /* default handler : ignore some signal. The other are job control or fatal */
6633 if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) {
6634 kill(getpid(),SIGSTOP);
6635 } else if (sig != TARGET_SIGCHLD &&
6636 sig != TARGET_SIGURG &&
6637 sig != TARGET_SIGWINCH &&
6638 sig != TARGET_SIGCONT) {
6639 dump_core_and_abort(sig);
6641 } else if (handler == TARGET_SIG_IGN) {
6642 /* ignore sig */
6643 } else if (handler == TARGET_SIG_ERR) {
6644 dump_core_and_abort(sig);
6645 } else {
6646 /* compute the blocked signals during the handler execution */
6647 sigset_t *blocked_set;
6649 target_to_host_sigset(&set, &sa->sa_mask);
6650 /* SA_NODEFER indicates that the current signal should not be
6651 blocked during the handler */
6652 if (!(sa->sa_flags & TARGET_SA_NODEFER))
6653 sigaddset(&set, target_to_host_signal(sig));
6655 /* save the previous blocked signal state to restore it at the
6656 end of the signal execution (see do_sigreturn) */
6657 host_to_target_sigset_internal(&target_old_set, &ts->signal_mask);
6659 /* block signals in the handler */
6660 blocked_set = ts->in_sigsuspend ?
6661 &ts->sigsuspend_mask : &ts->signal_mask;
6662 sigorset(&ts->signal_mask, blocked_set, &set);
6663 ts->in_sigsuspend = 0;
6665 /* if the CPU is in VM86 mode, we restore the 32 bit values */
6666 #if defined(TARGET_I386) && !defined(TARGET_X86_64)
6668 CPUX86State *env = cpu_env;
6669 if (env->eflags & VM_MASK)
6670 save_v86_state(env);
6672 #endif
6673 /* prepare the stack frame of the virtual CPU */
6674 #if defined(TARGET_ABI_MIPSN32) || defined(TARGET_ABI_MIPSN64) \
6675 || defined(TARGET_OPENRISC) || defined(TARGET_TILEGX) \
6676 || defined(TARGET_PPC64) || defined(TARGET_HPPA) \
6677 || defined(TARGET_NIOS2) || defined(TARGET_X86_64)
6678 /* These targets do not have traditional signals. */
6679 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
6680 #else
6681 if (sa->sa_flags & TARGET_SA_SIGINFO)
6682 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
6683 else
6684 setup_frame(sig, sa, &target_old_set, cpu_env);
6685 #endif
6686 if (sa->sa_flags & TARGET_SA_RESETHAND) {
6687 sa->_sa_handler = TARGET_SIG_DFL;
6692 void process_pending_signals(CPUArchState *cpu_env)
6694 CPUState *cpu = ENV_GET_CPU(cpu_env);
6695 int sig;
6696 TaskState *ts = cpu->opaque;
6697 sigset_t set;
6698 sigset_t *blocked_set;
6700 while (atomic_read(&ts->signal_pending)) {
6701 /* FIXME: This is not threadsafe. */
6702 sigfillset(&set);
6703 sigprocmask(SIG_SETMASK, &set, 0);
6705 restart_scan:
6706 sig = ts->sync_signal.pending;
6707 if (sig) {
6708 /* Synchronous signals are forced,
6709 * see force_sig_info() and callers in Linux
6710 * Note that not all of our queue_signal() calls in QEMU correspond
6711 * to force_sig_info() calls in Linux (some are send_sig_info()).
6712 * However it seems like a kernel bug to me to allow the process
6713 * to block a synchronous signal since it could then just end up
6714 * looping round and round indefinitely.
6716 if (sigismember(&ts->signal_mask, target_to_host_signal_table[sig])
6717 || sigact_table[sig - 1]._sa_handler == TARGET_SIG_IGN) {
6718 sigdelset(&ts->signal_mask, target_to_host_signal_table[sig]);
6719 sigact_table[sig - 1]._sa_handler = TARGET_SIG_DFL;
6722 handle_pending_signal(cpu_env, sig, &ts->sync_signal);
6725 for (sig = 1; sig <= TARGET_NSIG; sig++) {
6726 blocked_set = ts->in_sigsuspend ?
6727 &ts->sigsuspend_mask : &ts->signal_mask;
6729 if (ts->sigtab[sig - 1].pending &&
6730 (!sigismember(blocked_set,
6731 target_to_host_signal_table[sig]))) {
6732 handle_pending_signal(cpu_env, sig, &ts->sigtab[sig - 1]);
6733 /* Restart scan from the beginning, as handle_pending_signal
6734 * might have resulted in a new synchronous signal (eg SIGSEGV).
6736 goto restart_scan;
6740 /* if no signal is pending, unblock signals and recheck (the act
6741 * of unblocking might cause us to take another host signal which
6742 * will set signal_pending again).
6744 atomic_set(&ts->signal_pending, 0);
6745 ts->in_sigsuspend = 0;
6746 set = ts->signal_mask;
6747 sigdelset(&set, SIGSEGV);
6748 sigdelset(&set, SIGBUS);
6749 sigprocmask(SIG_SETMASK, &set, 0);
6751 ts->in_sigsuspend = 0;