hw/pci-host/raven: Add PCI_IO_BASE_ADDR definition
[qemu/ar7.git] / linux-user / ppc / signal.c
blobedfad28a375a8804043d5969e694d3db8c3a275b
1 /*
2 * Emulation of Linux signals
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qemu.h"
21 #include "signal-common.h"
22 #include "linux-user/trace.h"
24 /* Size of dummy stack frame allocated when calling signal handler.
25 See arch/powerpc/include/asm/ptrace.h. */
26 #if defined(TARGET_PPC64)
27 #define SIGNAL_FRAMESIZE 128
28 #else
29 #define SIGNAL_FRAMESIZE 64
30 #endif
32 /* See arch/powerpc/include/asm/ucontext.h. Only used for 32-bit PPC;
33 on 64-bit PPC, sigcontext and mcontext are one and the same. */
34 struct target_mcontext {
35 target_ulong mc_gregs[48];
36 /* Includes fpscr. */
37 uint64_t mc_fregs[33];
39 #if defined(TARGET_PPC64)
40 /* Pointer to the vector regs */
41 target_ulong v_regs;
43 * On ppc64, this mcontext structure is naturally *unaligned*,
44 * or rather it is aligned on a 8 bytes boundary but not on
45 * a 16 byte boundary. This pad fixes it up. This is why we
46 * cannot use ppc_avr_t, which would force alignment. This is
47 * also why the vector regs are referenced in the ABI by the
48 * v_regs pointer above so any amount of padding can be added here.
50 target_ulong pad;
51 /* VSCR and VRSAVE are saved separately. Also reserve space for VSX. */
52 struct {
53 uint64_t altivec[34 + 16][2];
54 } mc_vregs;
55 #else
56 target_ulong mc_pad[2];
58 /* We need to handle Altivec and SPE at the same time, which no
59 kernel needs to do. Fortunately, the kernel defines this bit to
60 be Altivec-register-large all the time, rather than trying to
61 twiddle it based on the specific platform. */
62 union {
63 /* SPE vector registers. One extra for SPEFSCR. */
64 uint32_t spe[33];
66 * Altivec vector registers. One extra for VRSAVE.
67 * On ppc32, we are already aligned to 16 bytes. We could
68 * use ppc_avr_t, but choose to share the same type as ppc64.
70 uint64_t altivec[33][2];
71 } mc_vregs;
72 #endif
75 /* See arch/powerpc/include/asm/sigcontext.h. */
76 struct target_sigcontext {
77 target_ulong _unused[4];
78 int32_t signal;
79 #if defined(TARGET_PPC64)
80 int32_t pad0;
81 #endif
82 target_ulong handler;
83 target_ulong oldmask;
84 target_ulong regs; /* struct pt_regs __user * */
85 #if defined(TARGET_PPC64)
86 struct target_mcontext mcontext;
87 #endif
90 /* Indices for target_mcontext.mc_gregs, below.
91 See arch/powerpc/include/asm/ptrace.h for details. */
92 enum {
93 TARGET_PT_R0 = 0,
94 TARGET_PT_R1 = 1,
95 TARGET_PT_R2 = 2,
96 TARGET_PT_R3 = 3,
97 TARGET_PT_R4 = 4,
98 TARGET_PT_R5 = 5,
99 TARGET_PT_R6 = 6,
100 TARGET_PT_R7 = 7,
101 TARGET_PT_R8 = 8,
102 TARGET_PT_R9 = 9,
103 TARGET_PT_R10 = 10,
104 TARGET_PT_R11 = 11,
105 TARGET_PT_R12 = 12,
106 TARGET_PT_R13 = 13,
107 TARGET_PT_R14 = 14,
108 TARGET_PT_R15 = 15,
109 TARGET_PT_R16 = 16,
110 TARGET_PT_R17 = 17,
111 TARGET_PT_R18 = 18,
112 TARGET_PT_R19 = 19,
113 TARGET_PT_R20 = 20,
114 TARGET_PT_R21 = 21,
115 TARGET_PT_R22 = 22,
116 TARGET_PT_R23 = 23,
117 TARGET_PT_R24 = 24,
118 TARGET_PT_R25 = 25,
119 TARGET_PT_R26 = 26,
120 TARGET_PT_R27 = 27,
121 TARGET_PT_R28 = 28,
122 TARGET_PT_R29 = 29,
123 TARGET_PT_R30 = 30,
124 TARGET_PT_R31 = 31,
125 TARGET_PT_NIP = 32,
126 TARGET_PT_MSR = 33,
127 TARGET_PT_ORIG_R3 = 34,
128 TARGET_PT_CTR = 35,
129 TARGET_PT_LNK = 36,
130 TARGET_PT_XER = 37,
131 TARGET_PT_CCR = 38,
132 /* Yes, there are two registers with #39. One is 64-bit only. */
133 TARGET_PT_MQ = 39,
134 TARGET_PT_SOFTE = 39,
135 TARGET_PT_TRAP = 40,
136 TARGET_PT_DAR = 41,
137 TARGET_PT_DSISR = 42,
138 TARGET_PT_RESULT = 43,
139 TARGET_PT_REGS_COUNT = 44
143 struct target_ucontext {
144 target_ulong tuc_flags;
145 target_ulong tuc_link; /* ucontext_t __user * */
146 struct target_sigaltstack tuc_stack;
147 #if !defined(TARGET_PPC64)
148 int32_t tuc_pad[7];
149 target_ulong tuc_regs; /* struct mcontext __user *
150 points to uc_mcontext field */
151 #endif
152 target_sigset_t tuc_sigmask;
153 #if defined(TARGET_PPC64)
154 target_sigset_t unused[15]; /* Allow for uc_sigmask growth */
155 struct target_sigcontext tuc_sigcontext;
156 #else
157 int32_t tuc_maskext[30];
158 int32_t tuc_pad2[3];
159 struct target_mcontext tuc_mcontext;
160 #endif
163 /* See arch/powerpc/kernel/signal_32.c. */
164 struct target_sigframe {
165 struct target_sigcontext sctx;
166 struct target_mcontext mctx;
167 int32_t abigap[56];
170 #if defined(TARGET_PPC64)
172 #define TARGET_TRAMP_SIZE 6
174 struct target_rt_sigframe {
175 /* sys_rt_sigreturn requires the ucontext be the first field */
176 struct target_ucontext uc;
177 target_ulong _unused[2];
178 uint32_t trampoline[TARGET_TRAMP_SIZE];
179 target_ulong pinfo; /* struct siginfo __user * */
180 target_ulong puc; /* void __user * */
181 struct target_siginfo info;
182 /* 64 bit ABI allows for 288 bytes below sp before decrementing it. */
183 char abigap[288];
184 } __attribute__((aligned(16)));
186 #else
188 struct target_rt_sigframe {
189 struct target_siginfo info;
190 struct target_ucontext uc;
191 int32_t abigap[56];
194 #endif
196 #if defined(TARGET_PPC64)
198 struct target_func_ptr {
199 target_ulong entry;
200 target_ulong toc;
203 #endif
205 /* We use the mc_pad field for the signal return trampoline. */
206 #define tramp mc_pad
208 /* See arch/powerpc/kernel/signal.c. */
209 static target_ulong get_sigframe(struct target_sigaction *ka,
210 CPUPPCState *env,
211 int frame_size)
213 target_ulong oldsp;
215 oldsp = target_sigsp(get_sp_from_cpustate(env), ka);
217 return (oldsp - frame_size) & ~0xFUL;
220 #if ((defined(TARGET_WORDS_BIGENDIAN) && defined(HOST_WORDS_BIGENDIAN)) || \
221 (!defined(HOST_WORDS_BIGENDIAN) && !defined(TARGET_WORDS_BIGENDIAN)))
222 #define PPC_VEC_HI 0
223 #define PPC_VEC_LO 1
224 #else
225 #define PPC_VEC_HI 1
226 #define PPC_VEC_LO 0
227 #endif
230 static void save_user_regs(CPUPPCState *env, struct target_mcontext *frame)
232 target_ulong msr = env->msr;
233 int i;
234 target_ulong ccr = 0;
236 /* In general, the kernel attempts to be intelligent about what it
237 needs to save for Altivec/FP/SPE registers. We don't care that
238 much, so we just go ahead and save everything. */
240 /* Save general registers. */
241 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
242 __put_user(env->gpr[i], &frame->mc_gregs[i]);
244 __put_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]);
245 __put_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]);
246 __put_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]);
247 __put_user(env->xer, &frame->mc_gregs[TARGET_PT_XER]);
249 for (i = 0; i < ARRAY_SIZE(env->crf); i++) {
250 ccr |= env->crf[i] << (32 - ((i + 1) * 4));
252 __put_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]);
254 /* Save Altivec registers if necessary. */
255 if (env->insns_flags & PPC_ALTIVEC) {
256 uint32_t *vrsave;
257 for (i = 0; i < 32; i++) {
258 ppc_avr_t *avr = cpu_avr_ptr(env, i);
259 ppc_avr_t *vreg = (ppc_avr_t *)&frame->mc_vregs.altivec[i];
261 __put_user(avr->u64[PPC_VEC_HI], &vreg->u64[0]);
262 __put_user(avr->u64[PPC_VEC_LO], &vreg->u64[1]);
264 #if defined(TARGET_PPC64)
265 vrsave = (uint32_t *)&frame->mc_vregs.altivec[33];
266 /* 64-bit needs to put a pointer to the vectors in the frame */
267 __put_user(h2g(frame->mc_vregs.altivec), &frame->v_regs);
268 #else
269 vrsave = (uint32_t *)&frame->mc_vregs.altivec[32];
270 #endif
271 __put_user((uint32_t)env->spr[SPR_VRSAVE], vrsave);
274 #if defined(TARGET_PPC64)
275 /* Save VSX second halves */
276 if (env->insns_flags2 & PPC2_VSX) {
277 uint64_t *vsregs = (uint64_t *)&frame->mc_vregs.altivec[34];
278 for (i = 0; i < 32; i++) {
279 uint64_t *vsrl = cpu_vsrl_ptr(env, i);
280 __put_user(*vsrl, &vsregs[i]);
283 #endif
285 /* Save floating point registers. */
286 if (env->insns_flags & PPC_FLOAT) {
287 for (i = 0; i < 32; i++) {
288 uint64_t *fpr = cpu_fpr_ptr(env, i);
289 __put_user(*fpr, &frame->mc_fregs[i]);
291 __put_user((uint64_t) env->fpscr, &frame->mc_fregs[32]);
294 #if !defined(TARGET_PPC64)
295 /* Save SPE registers. The kernel only saves the high half. */
296 if (env->insns_flags & PPC_SPE) {
297 for (i = 0; i < ARRAY_SIZE(env->gprh); i++) {
298 __put_user(env->gprh[i], &frame->mc_vregs.spe[i]);
300 __put_user(env->spe_fscr, &frame->mc_vregs.spe[32]);
302 #endif
304 /* Store MSR. */
305 __put_user(msr, &frame->mc_gregs[TARGET_PT_MSR]);
308 static void encode_trampoline(int sigret, uint32_t *tramp)
310 /* Set up the sigreturn trampoline: li r0,sigret; sc. */
311 if (sigret) {
312 __put_user(0x38000000 | sigret, &tramp[0]);
313 __put_user(0x44000002, &tramp[1]);
317 static void restore_user_regs(CPUPPCState *env,
318 struct target_mcontext *frame, int sig)
320 target_ulong save_r2 = 0;
321 target_ulong msr;
322 target_ulong ccr;
324 int i;
326 if (!sig) {
327 save_r2 = env->gpr[2];
330 /* Restore general registers. */
331 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
332 __get_user(env->gpr[i], &frame->mc_gregs[i]);
334 __get_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]);
335 __get_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]);
336 __get_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]);
337 __get_user(env->xer, &frame->mc_gregs[TARGET_PT_XER]);
338 __get_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]);
340 for (i = 0; i < ARRAY_SIZE(env->crf); i++) {
341 env->crf[i] = (ccr >> (32 - ((i + 1) * 4))) & 0xf;
344 if (!sig) {
345 env->gpr[2] = save_r2;
347 /* Restore MSR. */
348 __get_user(msr, &frame->mc_gregs[TARGET_PT_MSR]);
350 /* If doing signal return, restore the previous little-endian mode. */
351 if (sig) {
352 ppc_store_msr(env, ((env->msr & ~(1ull << MSR_LE)) |
353 (msr & (1ull << MSR_LE))));
356 /* Restore Altivec registers if necessary. */
357 if (env->insns_flags & PPC_ALTIVEC) {
358 ppc_avr_t *v_regs;
359 uint32_t *vrsave;
360 #if defined(TARGET_PPC64)
361 uint64_t v_addr;
362 /* 64-bit needs to recover the pointer to the vectors from the frame */
363 __get_user(v_addr, &frame->v_regs);
364 v_regs = g2h(env_cpu(env), v_addr);
365 #else
366 v_regs = (ppc_avr_t *)frame->mc_vregs.altivec;
367 #endif
368 for (i = 0; i < 32; i++) {
369 ppc_avr_t *avr = cpu_avr_ptr(env, i);
370 ppc_avr_t *vreg = &v_regs[i];
372 __get_user(avr->u64[PPC_VEC_HI], &vreg->u64[0]);
373 __get_user(avr->u64[PPC_VEC_LO], &vreg->u64[1]);
375 #if defined(TARGET_PPC64)
376 vrsave = (uint32_t *)&v_regs[33];
377 #else
378 vrsave = (uint32_t *)&v_regs[32];
379 #endif
380 __get_user(env->spr[SPR_VRSAVE], vrsave);
383 #if defined(TARGET_PPC64)
384 /* Restore VSX second halves */
385 if (env->insns_flags2 & PPC2_VSX) {
386 uint64_t *vsregs = (uint64_t *)&frame->mc_vregs.altivec[34];
387 for (i = 0; i < 32; i++) {
388 uint64_t *vsrl = cpu_vsrl_ptr(env, i);
389 __get_user(*vsrl, &vsregs[i]);
392 #endif
394 /* Restore floating point registers. */
395 if (env->insns_flags & PPC_FLOAT) {
396 uint64_t fpscr;
397 for (i = 0; i < 32; i++) {
398 uint64_t *fpr = cpu_fpr_ptr(env, i);
399 __get_user(*fpr, &frame->mc_fregs[i]);
401 __get_user(fpscr, &frame->mc_fregs[32]);
402 env->fpscr = (uint32_t) fpscr;
405 #if !defined(TARGET_PPC64)
406 /* Save SPE registers. The kernel only saves the high half. */
407 if (env->insns_flags & PPC_SPE) {
408 for (i = 0; i < ARRAY_SIZE(env->gprh); i++) {
409 __get_user(env->gprh[i], &frame->mc_vregs.spe[i]);
411 __get_user(env->spe_fscr, &frame->mc_vregs.spe[32]);
413 #endif
416 #if !defined(TARGET_PPC64)
417 void setup_frame(int sig, struct target_sigaction *ka,
418 target_sigset_t *set, CPUPPCState *env)
420 struct target_sigframe *frame;
421 struct target_sigcontext *sc;
422 target_ulong frame_addr, newsp;
423 int err = 0;
425 frame_addr = get_sigframe(ka, env, sizeof(*frame));
426 trace_user_setup_frame(env, frame_addr);
427 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1))
428 goto sigsegv;
429 sc = &frame->sctx;
431 __put_user(ka->_sa_handler, &sc->handler);
432 __put_user(set->sig[0], &sc->oldmask);
433 __put_user(set->sig[1], &sc->_unused[3]);
434 __put_user(h2g(&frame->mctx), &sc->regs);
435 __put_user(sig, &sc->signal);
437 /* Save user regs. */
438 save_user_regs(env, &frame->mctx);
440 /* Construct the trampoline code on the stack. */
441 encode_trampoline(TARGET_NR_sigreturn, (uint32_t *)&frame->mctx.tramp);
443 /* The kernel checks for the presence of a VDSO here. We don't
444 emulate a vdso, so use a sigreturn system call. */
445 env->lr = (target_ulong) h2g(frame->mctx.tramp);
447 /* Turn off all fp exceptions. */
448 env->fpscr = 0;
450 /* Create a stack frame for the caller of the handler. */
451 newsp = frame_addr - SIGNAL_FRAMESIZE;
452 err |= put_user(env->gpr[1], newsp, target_ulong);
454 if (err)
455 goto sigsegv;
457 /* Set up registers for signal handler. */
458 env->gpr[1] = newsp;
459 env->gpr[3] = sig;
460 env->gpr[4] = frame_addr + offsetof(struct target_sigframe, sctx);
462 env->nip = (target_ulong) ka->_sa_handler;
464 /* Signal handlers are entered in big-endian mode. */
465 ppc_store_msr(env, env->msr & ~(1ull << MSR_LE));
467 unlock_user_struct(frame, frame_addr, 1);
468 return;
470 sigsegv:
471 unlock_user_struct(frame, frame_addr, 1);
472 force_sigsegv(sig);
474 #endif /* !defined(TARGET_PPC64) */
476 void setup_rt_frame(int sig, struct target_sigaction *ka,
477 target_siginfo_t *info,
478 target_sigset_t *set, CPUPPCState *env)
480 struct target_rt_sigframe *rt_sf;
481 uint32_t *trampptr = 0;
482 struct target_mcontext *mctx = 0;
483 target_ulong rt_sf_addr, newsp = 0;
484 int i, err = 0;
485 #if defined(TARGET_PPC64)
486 struct target_sigcontext *sc = 0;
487 #if !defined(TARGET_ABI32)
488 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info;
489 #endif
490 #endif
492 rt_sf_addr = get_sigframe(ka, env, sizeof(*rt_sf));
493 if (!lock_user_struct(VERIFY_WRITE, rt_sf, rt_sf_addr, 1))
494 goto sigsegv;
496 tswap_siginfo(&rt_sf->info, info);
498 __put_user(0, &rt_sf->uc.tuc_flags);
499 __put_user(0, &rt_sf->uc.tuc_link);
500 target_save_altstack(&rt_sf->uc.tuc_stack, env);
501 #if !defined(TARGET_PPC64)
502 __put_user(h2g (&rt_sf->uc.tuc_mcontext),
503 &rt_sf->uc.tuc_regs);
504 #endif
505 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
506 __put_user(set->sig[i], &rt_sf->uc.tuc_sigmask.sig[i]);
509 #if defined(TARGET_PPC64)
510 mctx = &rt_sf->uc.tuc_sigcontext.mcontext;
511 trampptr = &rt_sf->trampoline[0];
513 sc = &rt_sf->uc.tuc_sigcontext;
514 __put_user(h2g(mctx), &sc->regs);
515 __put_user(sig, &sc->signal);
516 #else
517 mctx = &rt_sf->uc.tuc_mcontext;
518 trampptr = (uint32_t *)&rt_sf->uc.tuc_mcontext.tramp;
519 #endif
521 save_user_regs(env, mctx);
522 encode_trampoline(TARGET_NR_rt_sigreturn, trampptr);
524 /* The kernel checks for the presence of a VDSO here. We don't
525 emulate a vdso, so use a sigreturn system call. */
526 env->lr = (target_ulong) h2g(trampptr);
528 /* Turn off all fp exceptions. */
529 env->fpscr = 0;
531 /* Create a stack frame for the caller of the handler. */
532 newsp = rt_sf_addr - (SIGNAL_FRAMESIZE + 16);
533 err |= put_user(env->gpr[1], newsp, target_ulong);
535 if (err)
536 goto sigsegv;
538 /* Set up registers for signal handler. */
539 env->gpr[1] = newsp;
540 env->gpr[3] = (target_ulong) sig;
541 env->gpr[4] = (target_ulong) h2g(&rt_sf->info);
542 env->gpr[5] = (target_ulong) h2g(&rt_sf->uc);
543 env->gpr[6] = (target_ulong) h2g(rt_sf);
545 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
546 if (get_ppc64_abi(image) < 2) {
547 /* ELFv1 PPC64 function pointers are pointers to OPD entries. */
548 struct target_func_ptr *handler =
549 (struct target_func_ptr *)g2h(env_cpu(env), ka->_sa_handler);
550 env->nip = tswapl(handler->entry);
551 env->gpr[2] = tswapl(handler->toc);
552 } else {
553 /* ELFv2 PPC64 function pointers are entry points. R12 must also be set. */
554 env->gpr[12] = env->nip = ka->_sa_handler;
556 #else
557 env->nip = (target_ulong) ka->_sa_handler;
558 #endif
560 #ifdef TARGET_WORDS_BIGENDIAN
561 /* Signal handlers are entered in big-endian mode. */
562 ppc_store_msr(env, env->msr & ~(1ull << MSR_LE));
563 #else
564 /* Signal handlers are entered in little-endian mode. */
565 ppc_store_msr(env, env->msr | (1ull << MSR_LE));
566 #endif
568 unlock_user_struct(rt_sf, rt_sf_addr, 1);
569 return;
571 sigsegv:
572 unlock_user_struct(rt_sf, rt_sf_addr, 1);
573 force_sigsegv(sig);
577 #if !defined(TARGET_PPC64) || defined(TARGET_ABI32)
578 long do_sigreturn(CPUPPCState *env)
580 struct target_sigcontext *sc = NULL;
581 struct target_mcontext *sr = NULL;
582 target_ulong sr_addr = 0, sc_addr;
583 sigset_t blocked;
584 target_sigset_t set;
586 sc_addr = env->gpr[1] + SIGNAL_FRAMESIZE;
587 if (!lock_user_struct(VERIFY_READ, sc, sc_addr, 1))
588 goto sigsegv;
590 #if defined(TARGET_PPC64)
591 set.sig[0] = sc->oldmask + ((uint64_t)(sc->_unused[3]) << 32);
592 #else
593 __get_user(set.sig[0], &sc->oldmask);
594 __get_user(set.sig[1], &sc->_unused[3]);
595 #endif
596 target_to_host_sigset_internal(&blocked, &set);
597 set_sigmask(&blocked);
599 __get_user(sr_addr, &sc->regs);
600 if (!lock_user_struct(VERIFY_READ, sr, sr_addr, 1))
601 goto sigsegv;
602 restore_user_regs(env, sr, 1);
604 unlock_user_struct(sr, sr_addr, 1);
605 unlock_user_struct(sc, sc_addr, 1);
606 return -TARGET_QEMU_ESIGRETURN;
608 sigsegv:
609 unlock_user_struct(sr, sr_addr, 1);
610 unlock_user_struct(sc, sc_addr, 1);
611 force_sig(TARGET_SIGSEGV);
612 return -TARGET_QEMU_ESIGRETURN;
614 #endif /* !defined(TARGET_PPC64) */
616 /* See arch/powerpc/kernel/signal_32.c. */
617 static int do_setcontext(struct target_ucontext *ucp, CPUPPCState *env, int sig)
619 struct target_mcontext *mcp;
620 target_ulong mcp_addr;
621 sigset_t blocked;
622 target_sigset_t set;
624 if (copy_from_user(&set, h2g(ucp) + offsetof(struct target_ucontext, tuc_sigmask),
625 sizeof (set)))
626 return 1;
628 #if defined(TARGET_PPC64)
629 mcp_addr = h2g(ucp) +
630 offsetof(struct target_ucontext, tuc_sigcontext.mcontext);
631 #else
632 __get_user(mcp_addr, &ucp->tuc_regs);
633 #endif
635 if (!lock_user_struct(VERIFY_READ, mcp, mcp_addr, 1))
636 return 1;
638 target_to_host_sigset_internal(&blocked, &set);
639 set_sigmask(&blocked);
640 restore_user_regs(env, mcp, sig);
642 unlock_user_struct(mcp, mcp_addr, 1);
643 return 0;
646 long do_rt_sigreturn(CPUPPCState *env)
648 struct target_rt_sigframe *rt_sf = NULL;
649 target_ulong rt_sf_addr;
651 rt_sf_addr = env->gpr[1] + SIGNAL_FRAMESIZE + 16;
652 if (!lock_user_struct(VERIFY_READ, rt_sf, rt_sf_addr, 1))
653 goto sigsegv;
655 if (do_setcontext(&rt_sf->uc, env, 1))
656 goto sigsegv;
658 target_restore_altstack(&rt_sf->uc.tuc_stack, env);
660 unlock_user_struct(rt_sf, rt_sf_addr, 1);
661 return -TARGET_QEMU_ESIGRETURN;
663 sigsegv:
664 unlock_user_struct(rt_sf, rt_sf_addr, 1);
665 force_sig(TARGET_SIGSEGV);
666 return -TARGET_QEMU_ESIGRETURN;
669 /* This syscall implements {get,set,swap}context for userland. */
670 abi_long do_swapcontext(CPUArchState *env, abi_ulong uold_ctx,
671 abi_ulong unew_ctx, abi_long ctx_size)
673 struct target_ucontext *uctx;
674 struct target_mcontext *mctx;
676 /* For ppc32, ctx_size is "reserved for future use".
677 * For ppc64, we do not yet support the VSX extension.
679 if (ctx_size < sizeof(struct target_ucontext)) {
680 return -TARGET_EINVAL;
683 if (uold_ctx) {
684 TaskState *ts = (TaskState *)thread_cpu->opaque;
686 if (!lock_user_struct(VERIFY_WRITE, uctx, uold_ctx, 1)) {
687 return -TARGET_EFAULT;
690 #ifdef TARGET_PPC64
691 mctx = &uctx->tuc_sigcontext.mcontext;
692 #else
693 /* ??? The kernel aligns the pointer down here into padding, but
694 * in setup_rt_frame we don't. Be self-compatible for now.
696 mctx = &uctx->tuc_mcontext;
697 __put_user(h2g(mctx), &uctx->tuc_regs);
698 #endif
700 save_user_regs(env, mctx);
701 host_to_target_sigset(&uctx->tuc_sigmask, &ts->signal_mask);
703 unlock_user_struct(uctx, uold_ctx, 1);
706 if (unew_ctx) {
707 int err;
709 if (!lock_user_struct(VERIFY_READ, uctx, unew_ctx, 1)) {
710 return -TARGET_EFAULT;
712 err = do_setcontext(uctx, env, 0);
713 unlock_user_struct(uctx, unew_ctx, 1);
715 if (err) {
716 /* We cannot return to a partially updated context. */
717 force_sig(TARGET_SIGSEGV);
719 return -TARGET_QEMU_ESIGRETURN;
722 return 0;