Merge remote-tracking branch 'remotes/mdroth/tags/qga-pull-2019-11-04-tag' into staging
[qemu.git] / linux-user / ppc / signal.c
blob5b82af6cb623f7c5104cac159f842d3902799cac
1 /*
2 * Emulation of Linux signals
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qemu.h"
21 #include "signal-common.h"
22 #include "linux-user/trace.h"
24 /* Size of dummy stack frame allocated when calling signal handler.
25 See arch/powerpc/include/asm/ptrace.h. */
26 #if defined(TARGET_PPC64)
27 #define SIGNAL_FRAMESIZE 128
28 #else
29 #define SIGNAL_FRAMESIZE 64
30 #endif
32 /* See arch/powerpc/include/asm/ucontext.h. Only used for 32-bit PPC;
33 on 64-bit PPC, sigcontext and mcontext are one and the same. */
34 struct target_mcontext {
35 target_ulong mc_gregs[48];
36 /* Includes fpscr. */
37 uint64_t mc_fregs[33];
38 #if defined(TARGET_PPC64)
39 /* Pointer to the vector regs */
40 target_ulong v_regs;
41 #else
42 target_ulong mc_pad[2];
43 #endif
44 /* We need to handle Altivec and SPE at the same time, which no
45 kernel needs to do. Fortunately, the kernel defines this bit to
46 be Altivec-register-large all the time, rather than trying to
47 twiddle it based on the specific platform. */
48 union {
49 /* SPE vector registers. One extra for SPEFSCR. */
50 uint32_t spe[33];
51 /* Altivec vector registers. The packing of VSCR and VRSAVE
52 varies depending on whether we're PPC64 or not: PPC64 splits
53 them apart; PPC32 stuffs them together.
54 We also need to account for the VSX registers on PPC64
56 #if defined(TARGET_PPC64)
57 #define QEMU_NVRREG (34 + 16)
58 /* On ppc64, this mcontext structure is naturally *unaligned*,
59 * or rather it is aligned on a 8 bytes boundary but not on
60 * a 16 bytes one. This pad fixes it up. This is also why the
61 * vector regs are referenced by the v_regs pointer above so
62 * any amount of padding can be added here
64 target_ulong pad;
65 #else
66 /* On ppc32, we are already aligned to 16 bytes */
67 #define QEMU_NVRREG 33
68 #endif
69 /* We cannot use ppc_avr_t here as we do *not* want the implied
70 * 16-bytes alignment that would result from it. This would have
71 * the effect of making the whole struct target_mcontext aligned
72 * which breaks the layout of struct target_ucontext on ppc64.
74 uint64_t altivec[QEMU_NVRREG][2];
75 #undef QEMU_NVRREG
76 } mc_vregs;
79 /* See arch/powerpc/include/asm/sigcontext.h. */
80 struct target_sigcontext {
81 target_ulong _unused[4];
82 int32_t signal;
83 #if defined(TARGET_PPC64)
84 int32_t pad0;
85 #endif
86 target_ulong handler;
87 target_ulong oldmask;
88 target_ulong regs; /* struct pt_regs __user * */
89 #if defined(TARGET_PPC64)
90 struct target_mcontext mcontext;
91 #endif
94 /* Indices for target_mcontext.mc_gregs, below.
95 See arch/powerpc/include/asm/ptrace.h for details. */
96 enum {
97 TARGET_PT_R0 = 0,
98 TARGET_PT_R1 = 1,
99 TARGET_PT_R2 = 2,
100 TARGET_PT_R3 = 3,
101 TARGET_PT_R4 = 4,
102 TARGET_PT_R5 = 5,
103 TARGET_PT_R6 = 6,
104 TARGET_PT_R7 = 7,
105 TARGET_PT_R8 = 8,
106 TARGET_PT_R9 = 9,
107 TARGET_PT_R10 = 10,
108 TARGET_PT_R11 = 11,
109 TARGET_PT_R12 = 12,
110 TARGET_PT_R13 = 13,
111 TARGET_PT_R14 = 14,
112 TARGET_PT_R15 = 15,
113 TARGET_PT_R16 = 16,
114 TARGET_PT_R17 = 17,
115 TARGET_PT_R18 = 18,
116 TARGET_PT_R19 = 19,
117 TARGET_PT_R20 = 20,
118 TARGET_PT_R21 = 21,
119 TARGET_PT_R22 = 22,
120 TARGET_PT_R23 = 23,
121 TARGET_PT_R24 = 24,
122 TARGET_PT_R25 = 25,
123 TARGET_PT_R26 = 26,
124 TARGET_PT_R27 = 27,
125 TARGET_PT_R28 = 28,
126 TARGET_PT_R29 = 29,
127 TARGET_PT_R30 = 30,
128 TARGET_PT_R31 = 31,
129 TARGET_PT_NIP = 32,
130 TARGET_PT_MSR = 33,
131 TARGET_PT_ORIG_R3 = 34,
132 TARGET_PT_CTR = 35,
133 TARGET_PT_LNK = 36,
134 TARGET_PT_XER = 37,
135 TARGET_PT_CCR = 38,
136 /* Yes, there are two registers with #39. One is 64-bit only. */
137 TARGET_PT_MQ = 39,
138 TARGET_PT_SOFTE = 39,
139 TARGET_PT_TRAP = 40,
140 TARGET_PT_DAR = 41,
141 TARGET_PT_DSISR = 42,
142 TARGET_PT_RESULT = 43,
143 TARGET_PT_REGS_COUNT = 44
147 struct target_ucontext {
148 target_ulong tuc_flags;
149 target_ulong tuc_link; /* ucontext_t __user * */
150 struct target_sigaltstack tuc_stack;
151 #if !defined(TARGET_PPC64)
152 int32_t tuc_pad[7];
153 target_ulong tuc_regs; /* struct mcontext __user *
154 points to uc_mcontext field */
155 #endif
156 target_sigset_t tuc_sigmask;
157 #if defined(TARGET_PPC64)
158 target_sigset_t unused[15]; /* Allow for uc_sigmask growth */
159 struct target_sigcontext tuc_sigcontext;
160 #else
161 int32_t tuc_maskext[30];
162 int32_t tuc_pad2[3];
163 struct target_mcontext tuc_mcontext;
164 #endif
167 /* See arch/powerpc/kernel/signal_32.c. */
168 struct target_sigframe {
169 struct target_sigcontext sctx;
170 struct target_mcontext mctx;
171 int32_t abigap[56];
174 #if defined(TARGET_PPC64)
176 #define TARGET_TRAMP_SIZE 6
178 struct target_rt_sigframe {
179 /* sys_rt_sigreturn requires the ucontext be the first field */
180 struct target_ucontext uc;
181 target_ulong _unused[2];
182 uint32_t trampoline[TARGET_TRAMP_SIZE];
183 target_ulong pinfo; /* struct siginfo __user * */
184 target_ulong puc; /* void __user * */
185 struct target_siginfo info;
186 /* 64 bit ABI allows for 288 bytes below sp before decrementing it. */
187 char abigap[288];
188 } __attribute__((aligned(16)));
190 #else
192 struct target_rt_sigframe {
193 struct target_siginfo info;
194 struct target_ucontext uc;
195 int32_t abigap[56];
198 #endif
200 #if defined(TARGET_PPC64)
202 struct target_func_ptr {
203 target_ulong entry;
204 target_ulong toc;
207 #endif
209 /* We use the mc_pad field for the signal return trampoline. */
210 #define tramp mc_pad
212 /* See arch/powerpc/kernel/signal.c. */
213 static target_ulong get_sigframe(struct target_sigaction *ka,
214 CPUPPCState *env,
215 int frame_size)
217 target_ulong oldsp;
219 oldsp = target_sigsp(get_sp_from_cpustate(env), ka);
221 return (oldsp - frame_size) & ~0xFUL;
224 #if ((defined(TARGET_WORDS_BIGENDIAN) && defined(HOST_WORDS_BIGENDIAN)) || \
225 (!defined(HOST_WORDS_BIGENDIAN) && !defined(TARGET_WORDS_BIGENDIAN)))
226 #define PPC_VEC_HI 0
227 #define PPC_VEC_LO 1
228 #else
229 #define PPC_VEC_HI 1
230 #define PPC_VEC_LO 0
231 #endif
234 static void save_user_regs(CPUPPCState *env, struct target_mcontext *frame)
236 target_ulong msr = env->msr;
237 int i;
238 target_ulong ccr = 0;
240 /* In general, the kernel attempts to be intelligent about what it
241 needs to save for Altivec/FP/SPE registers. We don't care that
242 much, so we just go ahead and save everything. */
244 /* Save general registers. */
245 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
246 __put_user(env->gpr[i], &frame->mc_gregs[i]);
248 __put_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]);
249 __put_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]);
250 __put_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]);
251 __put_user(env->xer, &frame->mc_gregs[TARGET_PT_XER]);
253 for (i = 0; i < ARRAY_SIZE(env->crf); i++) {
254 ccr |= env->crf[i] << (32 - ((i + 1) * 4));
256 __put_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]);
258 /* Save Altivec registers if necessary. */
259 if (env->insns_flags & PPC_ALTIVEC) {
260 uint32_t *vrsave;
261 for (i = 0; i < 32; i++) {
262 ppc_avr_t *avr = cpu_avr_ptr(env, i);
263 ppc_avr_t *vreg = (ppc_avr_t *)&frame->mc_vregs.altivec[i];
265 __put_user(avr->u64[PPC_VEC_HI], &vreg->u64[0]);
266 __put_user(avr->u64[PPC_VEC_LO], &vreg->u64[1]);
268 /* Set MSR_VR in the saved MSR value to indicate that
269 frame->mc_vregs contains valid data. */
270 msr |= MSR_VR;
271 #if defined(TARGET_PPC64)
272 vrsave = (uint32_t *)&frame->mc_vregs.altivec[33];
273 /* 64-bit needs to put a pointer to the vectors in the frame */
274 __put_user(h2g(frame->mc_vregs.altivec), &frame->v_regs);
275 #else
276 vrsave = (uint32_t *)&frame->mc_vregs.altivec[32];
277 #endif
278 __put_user((uint32_t)env->spr[SPR_VRSAVE], vrsave);
281 /* Save VSX second halves */
282 if (env->insns_flags2 & PPC2_VSX) {
283 uint64_t *vsregs = (uint64_t *)&frame->mc_vregs.altivec[34];
284 for (i = 0; i < 32; i++) {
285 uint64_t *vsrl = cpu_vsrl_ptr(env, i);
286 __put_user(*vsrl, &vsregs[i]);
290 /* Save floating point registers. */
291 if (env->insns_flags & PPC_FLOAT) {
292 for (i = 0; i < 32; i++) {
293 uint64_t *fpr = cpu_fpr_ptr(env, i);
294 __put_user(*fpr, &frame->mc_fregs[i]);
296 __put_user((uint64_t) env->fpscr, &frame->mc_fregs[32]);
299 /* Save SPE registers. The kernel only saves the high half. */
300 if (env->insns_flags & PPC_SPE) {
301 #if defined(TARGET_PPC64)
302 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
303 __put_user(env->gpr[i] >> 32, &frame->mc_vregs.spe[i]);
305 #else
306 for (i = 0; i < ARRAY_SIZE(env->gprh); i++) {
307 __put_user(env->gprh[i], &frame->mc_vregs.spe[i]);
309 #endif
310 /* Set MSR_SPE in the saved MSR value to indicate that
311 frame->mc_vregs contains valid data. */
312 msr |= MSR_SPE;
313 __put_user(env->spe_fscr, &frame->mc_vregs.spe[32]);
316 /* Store MSR. */
317 __put_user(msr, &frame->mc_gregs[TARGET_PT_MSR]);
320 static void encode_trampoline(int sigret, uint32_t *tramp)
322 /* Set up the sigreturn trampoline: li r0,sigret; sc. */
323 if (sigret) {
324 __put_user(0x38000000 | sigret, &tramp[0]);
325 __put_user(0x44000002, &tramp[1]);
329 static void restore_user_regs(CPUPPCState *env,
330 struct target_mcontext *frame, int sig)
332 target_ulong save_r2 = 0;
333 target_ulong msr;
334 target_ulong ccr;
336 int i;
338 if (!sig) {
339 save_r2 = env->gpr[2];
342 /* Restore general registers. */
343 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
344 __get_user(env->gpr[i], &frame->mc_gregs[i]);
346 __get_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]);
347 __get_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]);
348 __get_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]);
349 __get_user(env->xer, &frame->mc_gregs[TARGET_PT_XER]);
350 __get_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]);
352 for (i = 0; i < ARRAY_SIZE(env->crf); i++) {
353 env->crf[i] = (ccr >> (32 - ((i + 1) * 4))) & 0xf;
356 if (!sig) {
357 env->gpr[2] = save_r2;
359 /* Restore MSR. */
360 __get_user(msr, &frame->mc_gregs[TARGET_PT_MSR]);
362 /* If doing signal return, restore the previous little-endian mode. */
363 if (sig)
364 env->msr = (env->msr & ~(1ull << MSR_LE)) | (msr & (1ull << MSR_LE));
366 /* Restore Altivec registers if necessary. */
367 if (env->insns_flags & PPC_ALTIVEC) {
368 ppc_avr_t *v_regs;
369 uint32_t *vrsave;
370 #if defined(TARGET_PPC64)
371 uint64_t v_addr;
372 /* 64-bit needs to recover the pointer to the vectors from the frame */
373 __get_user(v_addr, &frame->v_regs);
374 v_regs = g2h(v_addr);
375 #else
376 v_regs = (ppc_avr_t *)frame->mc_vregs.altivec;
377 #endif
378 for (i = 0; i < 32; i++) {
379 ppc_avr_t *avr = cpu_avr_ptr(env, i);
380 ppc_avr_t *vreg = &v_regs[i];
382 __get_user(avr->u64[PPC_VEC_HI], &vreg->u64[0]);
383 __get_user(avr->u64[PPC_VEC_LO], &vreg->u64[1]);
385 /* Set MSR_VEC in the saved MSR value to indicate that
386 frame->mc_vregs contains valid data. */
387 #if defined(TARGET_PPC64)
388 vrsave = (uint32_t *)&v_regs[33];
389 #else
390 vrsave = (uint32_t *)&v_regs[32];
391 #endif
392 __get_user(env->spr[SPR_VRSAVE], vrsave);
395 /* Restore VSX second halves */
396 if (env->insns_flags2 & PPC2_VSX) {
397 uint64_t *vsregs = (uint64_t *)&frame->mc_vregs.altivec[34];
398 for (i = 0; i < 32; i++) {
399 uint64_t *vsrl = cpu_vsrl_ptr(env, i);
400 __get_user(*vsrl, &vsregs[i]);
404 /* Restore floating point registers. */
405 if (env->insns_flags & PPC_FLOAT) {
406 uint64_t fpscr;
407 for (i = 0; i < 32; i++) {
408 uint64_t *fpr = cpu_fpr_ptr(env, i);
409 __get_user(*fpr, &frame->mc_fregs[i]);
411 __get_user(fpscr, &frame->mc_fregs[32]);
412 env->fpscr = (uint32_t) fpscr;
415 /* Save SPE registers. The kernel only saves the high half. */
416 if (env->insns_flags & PPC_SPE) {
417 #if defined(TARGET_PPC64)
418 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
419 uint32_t hi;
421 __get_user(hi, &frame->mc_vregs.spe[i]);
422 env->gpr[i] = ((uint64_t)hi << 32) | ((uint32_t) env->gpr[i]);
424 #else
425 for (i = 0; i < ARRAY_SIZE(env->gprh); i++) {
426 __get_user(env->gprh[i], &frame->mc_vregs.spe[i]);
428 #endif
429 __get_user(env->spe_fscr, &frame->mc_vregs.spe[32]);
433 #if !defined(TARGET_PPC64)
434 void setup_frame(int sig, struct target_sigaction *ka,
435 target_sigset_t *set, CPUPPCState *env)
437 struct target_sigframe *frame;
438 struct target_sigcontext *sc;
439 target_ulong frame_addr, newsp;
440 int err = 0;
442 frame_addr = get_sigframe(ka, env, sizeof(*frame));
443 trace_user_setup_frame(env, frame_addr);
444 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1))
445 goto sigsegv;
446 sc = &frame->sctx;
448 __put_user(ka->_sa_handler, &sc->handler);
449 __put_user(set->sig[0], &sc->oldmask);
450 __put_user(set->sig[1], &sc->_unused[3]);
451 __put_user(h2g(&frame->mctx), &sc->regs);
452 __put_user(sig, &sc->signal);
454 /* Save user regs. */
455 save_user_regs(env, &frame->mctx);
457 /* Construct the trampoline code on the stack. */
458 encode_trampoline(TARGET_NR_sigreturn, (uint32_t *)&frame->mctx.tramp);
460 /* The kernel checks for the presence of a VDSO here. We don't
461 emulate a vdso, so use a sigreturn system call. */
462 env->lr = (target_ulong) h2g(frame->mctx.tramp);
464 /* Turn off all fp exceptions. */
465 env->fpscr = 0;
467 /* Create a stack frame for the caller of the handler. */
468 newsp = frame_addr - SIGNAL_FRAMESIZE;
469 err |= put_user(env->gpr[1], newsp, target_ulong);
471 if (err)
472 goto sigsegv;
474 /* Set up registers for signal handler. */
475 env->gpr[1] = newsp;
476 env->gpr[3] = sig;
477 env->gpr[4] = frame_addr + offsetof(struct target_sigframe, sctx);
479 env->nip = (target_ulong) ka->_sa_handler;
481 /* Signal handlers are entered in big-endian mode. */
482 env->msr &= ~(1ull << MSR_LE);
484 unlock_user_struct(frame, frame_addr, 1);
485 return;
487 sigsegv:
488 unlock_user_struct(frame, frame_addr, 1);
489 force_sigsegv(sig);
491 #endif /* !defined(TARGET_PPC64) */
493 void setup_rt_frame(int sig, struct target_sigaction *ka,
494 target_siginfo_t *info,
495 target_sigset_t *set, CPUPPCState *env)
497 struct target_rt_sigframe *rt_sf;
498 uint32_t *trampptr = 0;
499 struct target_mcontext *mctx = 0;
500 target_ulong rt_sf_addr, newsp = 0;
501 int i, err = 0;
502 #if defined(TARGET_PPC64)
503 struct target_sigcontext *sc = 0;
504 #if !defined(TARGET_ABI32)
505 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info;
506 #endif
507 #endif
509 rt_sf_addr = get_sigframe(ka, env, sizeof(*rt_sf));
510 if (!lock_user_struct(VERIFY_WRITE, rt_sf, rt_sf_addr, 1))
511 goto sigsegv;
513 tswap_siginfo(&rt_sf->info, info);
515 __put_user(0, &rt_sf->uc.tuc_flags);
516 __put_user(0, &rt_sf->uc.tuc_link);
517 target_save_altstack(&rt_sf->uc.tuc_stack, env);
518 #if !defined(TARGET_PPC64)
519 __put_user(h2g (&rt_sf->uc.tuc_mcontext),
520 &rt_sf->uc.tuc_regs);
521 #endif
522 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
523 __put_user(set->sig[i], &rt_sf->uc.tuc_sigmask.sig[i]);
526 #if defined(TARGET_PPC64)
527 mctx = &rt_sf->uc.tuc_sigcontext.mcontext;
528 trampptr = &rt_sf->trampoline[0];
530 sc = &rt_sf->uc.tuc_sigcontext;
531 __put_user(h2g(mctx), &sc->regs);
532 __put_user(sig, &sc->signal);
533 #else
534 mctx = &rt_sf->uc.tuc_mcontext;
535 trampptr = (uint32_t *)&rt_sf->uc.tuc_mcontext.tramp;
536 #endif
538 save_user_regs(env, mctx);
539 encode_trampoline(TARGET_NR_rt_sigreturn, trampptr);
541 /* The kernel checks for the presence of a VDSO here. We don't
542 emulate a vdso, so use a sigreturn system call. */
543 env->lr = (target_ulong) h2g(trampptr);
545 /* Turn off all fp exceptions. */
546 env->fpscr = 0;
548 /* Create a stack frame for the caller of the handler. */
549 newsp = rt_sf_addr - (SIGNAL_FRAMESIZE + 16);
550 err |= put_user(env->gpr[1], newsp, target_ulong);
552 if (err)
553 goto sigsegv;
555 /* Set up registers for signal handler. */
556 env->gpr[1] = newsp;
557 env->gpr[3] = (target_ulong) sig;
558 env->gpr[4] = (target_ulong) h2g(&rt_sf->info);
559 env->gpr[5] = (target_ulong) h2g(&rt_sf->uc);
560 env->gpr[6] = (target_ulong) h2g(rt_sf);
562 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
563 if (get_ppc64_abi(image) < 2) {
564 /* ELFv1 PPC64 function pointers are pointers to OPD entries. */
565 struct target_func_ptr *handler =
566 (struct target_func_ptr *)g2h(ka->_sa_handler);
567 env->nip = tswapl(handler->entry);
568 env->gpr[2] = tswapl(handler->toc);
569 } else {
570 /* ELFv2 PPC64 function pointers are entry points, but R12
571 * must also be set */
572 env->nip = tswapl((target_ulong) ka->_sa_handler);
573 env->gpr[12] = env->nip;
575 #else
576 env->nip = (target_ulong) ka->_sa_handler;
577 #endif
579 /* Signal handlers are entered in big-endian mode. */
580 env->msr &= ~(1ull << MSR_LE);
582 unlock_user_struct(rt_sf, rt_sf_addr, 1);
583 return;
585 sigsegv:
586 unlock_user_struct(rt_sf, rt_sf_addr, 1);
587 force_sigsegv(sig);
591 #if !defined(TARGET_PPC64)
592 long do_sigreturn(CPUPPCState *env)
594 struct target_sigcontext *sc = NULL;
595 struct target_mcontext *sr = NULL;
596 target_ulong sr_addr = 0, sc_addr;
597 sigset_t blocked;
598 target_sigset_t set;
600 sc_addr = env->gpr[1] + SIGNAL_FRAMESIZE;
601 if (!lock_user_struct(VERIFY_READ, sc, sc_addr, 1))
602 goto sigsegv;
604 #if defined(TARGET_PPC64)
605 set.sig[0] = sc->oldmask + ((uint64_t)(sc->_unused[3]) << 32);
606 #else
607 __get_user(set.sig[0], &sc->oldmask);
608 __get_user(set.sig[1], &sc->_unused[3]);
609 #endif
610 target_to_host_sigset_internal(&blocked, &set);
611 set_sigmask(&blocked);
613 __get_user(sr_addr, &sc->regs);
614 if (!lock_user_struct(VERIFY_READ, sr, sr_addr, 1))
615 goto sigsegv;
616 restore_user_regs(env, sr, 1);
618 unlock_user_struct(sr, sr_addr, 1);
619 unlock_user_struct(sc, sc_addr, 1);
620 return -TARGET_QEMU_ESIGRETURN;
622 sigsegv:
623 unlock_user_struct(sr, sr_addr, 1);
624 unlock_user_struct(sc, sc_addr, 1);
625 force_sig(TARGET_SIGSEGV);
626 return -TARGET_QEMU_ESIGRETURN;
628 #endif /* !defined(TARGET_PPC64) */
630 /* See arch/powerpc/kernel/signal_32.c. */
631 static int do_setcontext(struct target_ucontext *ucp, CPUPPCState *env, int sig)
633 struct target_mcontext *mcp;
634 target_ulong mcp_addr;
635 sigset_t blocked;
636 target_sigset_t set;
638 if (copy_from_user(&set, h2g(ucp) + offsetof(struct target_ucontext, tuc_sigmask),
639 sizeof (set)))
640 return 1;
642 #if defined(TARGET_PPC64)
643 mcp_addr = h2g(ucp) +
644 offsetof(struct target_ucontext, tuc_sigcontext.mcontext);
645 #else
646 __get_user(mcp_addr, &ucp->tuc_regs);
647 #endif
649 if (!lock_user_struct(VERIFY_READ, mcp, mcp_addr, 1))
650 return 1;
652 target_to_host_sigset_internal(&blocked, &set);
653 set_sigmask(&blocked);
654 restore_user_regs(env, mcp, sig);
656 unlock_user_struct(mcp, mcp_addr, 1);
657 return 0;
660 long do_rt_sigreturn(CPUPPCState *env)
662 struct target_rt_sigframe *rt_sf = NULL;
663 target_ulong rt_sf_addr;
665 rt_sf_addr = env->gpr[1] + SIGNAL_FRAMESIZE + 16;
666 if (!lock_user_struct(VERIFY_READ, rt_sf, rt_sf_addr, 1))
667 goto sigsegv;
669 if (do_setcontext(&rt_sf->uc, env, 1))
670 goto sigsegv;
672 do_sigaltstack(rt_sf_addr
673 + offsetof(struct target_rt_sigframe, uc.tuc_stack),
674 0, env->gpr[1]);
676 unlock_user_struct(rt_sf, rt_sf_addr, 1);
677 return -TARGET_QEMU_ESIGRETURN;
679 sigsegv:
680 unlock_user_struct(rt_sf, rt_sf_addr, 1);
681 force_sig(TARGET_SIGSEGV);
682 return -TARGET_QEMU_ESIGRETURN;
685 /* This syscall implements {get,set,swap}context for userland. */
686 abi_long do_swapcontext(CPUArchState *env, abi_ulong uold_ctx,
687 abi_ulong unew_ctx, abi_long ctx_size)
689 struct target_ucontext *uctx;
690 struct target_mcontext *mctx;
692 /* For ppc32, ctx_size is "reserved for future use".
693 * For ppc64, we do not yet support the VSX extension.
695 if (ctx_size < sizeof(struct target_ucontext)) {
696 return -TARGET_EINVAL;
699 if (uold_ctx) {
700 TaskState *ts = (TaskState *)thread_cpu->opaque;
702 if (!lock_user_struct(VERIFY_WRITE, uctx, uold_ctx, 1)) {
703 return -TARGET_EFAULT;
706 #ifdef TARGET_PPC64
707 mctx = &uctx->tuc_sigcontext.mcontext;
708 #else
709 /* ??? The kernel aligns the pointer down here into padding, but
710 * in setup_rt_frame we don't. Be self-compatible for now.
712 mctx = &uctx->tuc_mcontext;
713 __put_user(h2g(mctx), &uctx->tuc_regs);
714 #endif
716 save_user_regs(env, mctx);
717 host_to_target_sigset(&uctx->tuc_sigmask, &ts->signal_mask);
719 unlock_user_struct(uctx, uold_ctx, 1);
722 if (unew_ctx) {
723 int err;
725 if (!lock_user_struct(VERIFY_READ, uctx, unew_ctx, 1)) {
726 return -TARGET_EFAULT;
728 err = do_setcontext(uctx, env, 0);
729 unlock_user_struct(uctx, unew_ctx, 1);
731 if (err) {
732 /* We cannot return to a partially updated context. */
733 force_sig(TARGET_SIGSEGV);
735 return -TARGET_QEMU_ESIGRETURN;
738 return 0;