Merge tag 'pull-nvme-20241001' of https://gitlab.com/birkelund/qemu into staging
[qemu/armbru.git] / linux-user / ppc / signal.c
bloba1d8c0bccc12c5199edeeb90eb1c9a36095a6bc5
1 /*
2 * Emulation of Linux signals
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qemu.h"
21 #include "user-internals.h"
22 #include "signal-common.h"
23 #include "linux-user/trace.h"
24 #include "user/tswap-target.h"
25 #include "vdso-asmoffset.h"
27 /* See arch/powerpc/include/asm/ucontext.h. Only used for 32-bit PPC;
28 on 64-bit PPC, sigcontext and mcontext are one and the same. */
29 struct target_mcontext {
30 target_ulong mc_gregs[48];
31 /* Includes fpscr. */
32 uint64_t mc_fregs[33];
34 #if defined(TARGET_PPC64)
35 /* Pointer to the vector regs */
36 target_ulong v_regs;
38 * On ppc64, this mcontext structure is naturally *unaligned*,
39 * or rather it is aligned on a 8 bytes boundary but not on
40 * a 16 byte boundary. This pad fixes it up. This is why we
41 * cannot use ppc_avr_t, which would force alignment. This is
42 * also why the vector regs are referenced in the ABI by the
43 * v_regs pointer above so any amount of padding can be added here.
45 target_ulong pad;
46 /* VSCR and VRSAVE are saved separately. Also reserve space for VSX. */
47 struct {
48 uint64_t altivec[34 + 16][2];
49 } mc_vregs;
50 #else
51 target_ulong mc_pad[2];
53 /* We need to handle Altivec and SPE at the same time, which no
54 kernel needs to do. Fortunately, the kernel defines this bit to
55 be Altivec-register-large all the time, rather than trying to
56 twiddle it based on the specific platform. */
57 union {
58 /* SPE vector registers. One extra for SPEFSCR. */
59 uint32_t spe[33];
61 * Altivec vector registers. One extra for VRSAVE.
62 * On ppc32, we are already aligned to 16 bytes. We could
63 * use ppc_avr_t, but choose to share the same type as ppc64.
65 uint64_t altivec[33][2];
66 } mc_vregs;
67 #endif
70 QEMU_BUILD_BUG_ON(offsetof(struct target_mcontext, mc_fregs)
71 != offsetof_mcontext_fregs);
72 #if defined(TARGET_PPC64)
73 QEMU_BUILD_BUG_ON(offsetof(struct target_mcontext, v_regs)
74 != offsetof_mcontext_vregs_ptr);
75 #else
76 QEMU_BUILD_BUG_ON(offsetof(struct target_mcontext, mc_vregs)
77 != offsetof_mcontext_vregs);
78 #endif
80 /* See arch/powerpc/include/asm/sigcontext.h. */
81 struct target_sigcontext {
82 target_ulong _unused[4];
83 int32_t signal;
84 #if defined(TARGET_PPC64)
85 int32_t pad0;
86 #endif
87 target_ulong handler;
88 target_ulong oldmask;
89 target_ulong regs; /* struct pt_regs __user * */
90 #if defined(TARGET_PPC64)
91 struct target_mcontext mcontext;
92 #endif
95 /* Indices for target_mcontext.mc_gregs, below.
96 See arch/powerpc/include/asm/ptrace.h for details. */
97 enum {
98 TARGET_PT_R0 = 0,
99 TARGET_PT_R1 = 1,
100 TARGET_PT_R2 = 2,
101 TARGET_PT_R3 = 3,
102 TARGET_PT_R4 = 4,
103 TARGET_PT_R5 = 5,
104 TARGET_PT_R6 = 6,
105 TARGET_PT_R7 = 7,
106 TARGET_PT_R8 = 8,
107 TARGET_PT_R9 = 9,
108 TARGET_PT_R10 = 10,
109 TARGET_PT_R11 = 11,
110 TARGET_PT_R12 = 12,
111 TARGET_PT_R13 = 13,
112 TARGET_PT_R14 = 14,
113 TARGET_PT_R15 = 15,
114 TARGET_PT_R16 = 16,
115 TARGET_PT_R17 = 17,
116 TARGET_PT_R18 = 18,
117 TARGET_PT_R19 = 19,
118 TARGET_PT_R20 = 20,
119 TARGET_PT_R21 = 21,
120 TARGET_PT_R22 = 22,
121 TARGET_PT_R23 = 23,
122 TARGET_PT_R24 = 24,
123 TARGET_PT_R25 = 25,
124 TARGET_PT_R26 = 26,
125 TARGET_PT_R27 = 27,
126 TARGET_PT_R28 = 28,
127 TARGET_PT_R29 = 29,
128 TARGET_PT_R30 = 30,
129 TARGET_PT_R31 = 31,
130 TARGET_PT_NIP = 32,
131 TARGET_PT_MSR = 33,
132 TARGET_PT_ORIG_R3 = 34,
133 TARGET_PT_CTR = 35,
134 TARGET_PT_LNK = 36,
135 TARGET_PT_XER = 37,
136 TARGET_PT_CCR = 38,
137 /* Yes, there are two registers with #39. One is 64-bit only. */
138 TARGET_PT_MQ = 39,
139 TARGET_PT_SOFTE = 39,
140 TARGET_PT_TRAP = 40,
141 TARGET_PT_DAR = 41,
142 TARGET_PT_DSISR = 42,
143 TARGET_PT_RESULT = 43,
144 TARGET_PT_REGS_COUNT = 44
148 struct target_ucontext {
149 target_ulong tuc_flags;
150 target_ulong tuc_link; /* ucontext_t __user * */
151 struct target_sigaltstack tuc_stack;
152 #if !defined(TARGET_PPC64)
153 int32_t tuc_pad[7];
154 target_ulong tuc_regs; /* struct mcontext __user *
155 points to uc_mcontext field */
156 #endif
157 target_sigset_t tuc_sigmask;
158 #if defined(TARGET_PPC64)
159 target_sigset_t unused[15]; /* Allow for uc_sigmask growth */
160 struct target_sigcontext tuc_sigcontext;
161 #else
162 int32_t tuc_maskext[30];
163 int32_t tuc_pad2[3];
164 struct target_mcontext tuc_mcontext;
165 #endif
168 #if !defined(TARGET_PPC64)
169 /* See arch/powerpc/kernel/signal_32.c. */
170 struct target_sigframe {
171 struct target_sigcontext sctx;
172 struct target_mcontext mctx;
173 int32_t abigap[56];
176 QEMU_BUILD_BUG_ON(offsetof(struct target_sigframe, mctx)
177 != offsetof_sigframe_mcontext);
178 #endif
180 #if defined(TARGET_PPC64)
182 #define TARGET_TRAMP_SIZE 6
184 struct target_rt_sigframe {
185 /* sys_rt_sigreturn requires the ucontext be the first field */
186 struct target_ucontext uc;
187 target_ulong _unused[2];
188 uint32_t trampoline[TARGET_TRAMP_SIZE];
189 target_ulong pinfo; /* struct siginfo __user * */
190 target_ulong puc; /* void __user * */
191 struct target_siginfo info;
192 /* 64 bit ABI allows for 288 bytes below sp before decrementing it. */
193 char abigap[288];
194 } __attribute__((aligned(16)));
196 QEMU_BUILD_BUG_ON(offsetof(struct target_rt_sigframe,
197 uc.tuc_sigcontext.mcontext)
198 != offsetof_rt_sigframe_mcontext);
200 #else
202 struct target_rt_sigframe {
203 struct target_siginfo info;
204 struct target_ucontext uc;
205 int32_t abigap[56];
208 QEMU_BUILD_BUG_ON(offsetof(struct target_rt_sigframe, uc.tuc_mcontext)
209 != offsetof_rt_sigframe_mcontext);
211 #endif
213 #if defined(TARGET_PPC64)
215 struct target_func_ptr {
216 target_ulong entry;
217 target_ulong toc;
220 #endif
222 /* See arch/powerpc/kernel/signal.c. */
223 static target_ulong get_sigframe(struct target_sigaction *ka,
224 CPUPPCState *env,
225 int frame_size)
227 target_ulong oldsp;
229 oldsp = target_sigsp(get_sp_from_cpustate(env), ka);
231 return (oldsp - frame_size) & ~0xFUL;
234 #if TARGET_BIG_ENDIAN == HOST_BIG_ENDIAN
235 #define PPC_VEC_HI 0
236 #define PPC_VEC_LO 1
237 #else
238 #define PPC_VEC_HI 1
239 #define PPC_VEC_LO 0
240 #endif
243 static void save_user_regs(CPUPPCState *env, struct target_mcontext *frame)
245 target_ulong msr = env->msr;
246 int i;
247 uint32_t ccr = 0;
249 /* In general, the kernel attempts to be intelligent about what it
250 needs to save for Altivec/FP/SPE registers. We don't care that
251 much, so we just go ahead and save everything. */
253 /* Save general registers. */
254 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
255 __put_user(env->gpr[i], &frame->mc_gregs[i]);
257 __put_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]);
258 __put_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]);
259 __put_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]);
260 __put_user(cpu_read_xer(env), &frame->mc_gregs[TARGET_PT_XER]);
262 ccr = ppc_get_cr(env);
263 __put_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]);
265 /* Save Altivec registers if necessary. */
266 if (env->insns_flags & PPC_ALTIVEC) {
267 uint32_t *vrsave;
268 for (i = 0; i < 32; i++) {
269 ppc_avr_t *avr = cpu_avr_ptr(env, i);
270 ppc_avr_t *vreg = (ppc_avr_t *)&frame->mc_vregs.altivec[i];
272 __put_user(avr->u64[PPC_VEC_HI], &vreg->u64[0]);
273 __put_user(avr->u64[PPC_VEC_LO], &vreg->u64[1]);
275 #if defined(TARGET_PPC64)
276 vrsave = (uint32_t *)&frame->mc_vregs.altivec[33];
277 /* 64-bit needs to put a pointer to the vectors in the frame */
278 __put_user(h2g(frame->mc_vregs.altivec), &frame->v_regs);
279 #else
280 vrsave = (uint32_t *)&frame->mc_vregs.altivec[32];
281 #endif
282 __put_user((uint32_t)env->spr[SPR_VRSAVE], vrsave);
285 #if defined(TARGET_PPC64)
286 /* Save VSX second halves */
287 if (env->insns_flags2 & PPC2_VSX) {
288 uint64_t *vsregs = (uint64_t *)&frame->mc_vregs.altivec[34];
289 for (i = 0; i < 32; i++) {
290 uint64_t *vsrl = cpu_vsrl_ptr(env, i);
291 __put_user(*vsrl, &vsregs[i]);
294 #endif
296 /* Save floating point registers. */
297 if (env->insns_flags & PPC_FLOAT) {
298 for (i = 0; i < 32; i++) {
299 uint64_t *fpr = cpu_fpr_ptr(env, i);
300 __put_user(*fpr, &frame->mc_fregs[i]);
302 __put_user((uint64_t) env->fpscr, &frame->mc_fregs[32]);
305 #if !defined(TARGET_PPC64)
306 /* Save SPE registers. The kernel only saves the high half. */
307 if (env->insns_flags & PPC_SPE) {
308 for (i = 0; i < ARRAY_SIZE(env->gprh); i++) {
309 __put_user(env->gprh[i], &frame->mc_vregs.spe[i]);
311 __put_user(env->spe_fscr, &frame->mc_vregs.spe[32]);
313 #endif
315 /* Store MSR. */
316 __put_user(msr, &frame->mc_gregs[TARGET_PT_MSR]);
319 static void encode_trampoline(int sigret, uint32_t *tramp)
321 /* Set up the sigreturn trampoline: li r0,sigret; sc. */
322 __put_user(0x38000000 | sigret, &tramp[0]);
323 __put_user(0x44000002, &tramp[1]);
326 static void restore_user_regs(CPUPPCState *env,
327 struct target_mcontext *frame, int sig)
329 target_ulong save_r2 = 0;
330 target_ulong msr;
331 target_ulong xer;
332 target_ulong ccr;
334 int i;
336 if (!sig) {
337 save_r2 = env->gpr[2];
340 /* Restore general registers. */
341 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
342 __get_user(env->gpr[i], &frame->mc_gregs[i]);
344 __get_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]);
345 __get_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]);
346 __get_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]);
348 __get_user(xer, &frame->mc_gregs[TARGET_PT_XER]);
349 cpu_write_xer(env, xer);
351 __get_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]);
352 ppc_set_cr(env, ccr);
353 if (!sig) {
354 env->gpr[2] = save_r2;
356 /* Restore MSR. */
357 __get_user(msr, &frame->mc_gregs[TARGET_PT_MSR]);
359 /* If doing signal return, restore the previous little-endian mode. */
360 if (sig) {
361 ppc_store_msr(env, ((env->msr & ~(1ull << MSR_LE)) |
362 (msr & (1ull << MSR_LE))));
365 /* Restore Altivec registers if necessary. */
366 if (env->insns_flags & PPC_ALTIVEC) {
367 ppc_avr_t *v_regs;
368 uint32_t *vrsave;
369 #if defined(TARGET_PPC64)
370 uint64_t v_addr;
371 /* 64-bit needs to recover the pointer to the vectors from the frame */
372 __get_user(v_addr, &frame->v_regs);
373 v_regs = g2h(env_cpu(env), v_addr);
374 #else
375 v_regs = (ppc_avr_t *)frame->mc_vregs.altivec;
376 #endif
377 for (i = 0; i < 32; i++) {
378 ppc_avr_t *avr = cpu_avr_ptr(env, i);
379 ppc_avr_t *vreg = &v_regs[i];
381 __get_user(avr->u64[PPC_VEC_HI], &vreg->u64[0]);
382 __get_user(avr->u64[PPC_VEC_LO], &vreg->u64[1]);
384 #if defined(TARGET_PPC64)
385 vrsave = (uint32_t *)&v_regs[33];
386 #else
387 vrsave = (uint32_t *)&v_regs[32];
388 #endif
389 __get_user(env->spr[SPR_VRSAVE], vrsave);
392 #if defined(TARGET_PPC64)
393 /* Restore VSX second halves */
394 if (env->insns_flags2 & PPC2_VSX) {
395 uint64_t *vsregs = (uint64_t *)&frame->mc_vregs.altivec[34];
396 for (i = 0; i < 32; i++) {
397 uint64_t *vsrl = cpu_vsrl_ptr(env, i);
398 __get_user(*vsrl, &vsregs[i]);
401 #endif
403 /* Restore floating point registers. */
404 if (env->insns_flags & PPC_FLOAT) {
405 uint64_t fpscr;
406 for (i = 0; i < 32; i++) {
407 uint64_t *fpr = cpu_fpr_ptr(env, i);
408 __get_user(*fpr, &frame->mc_fregs[i]);
410 __get_user(fpscr, &frame->mc_fregs[32]);
411 env->fpscr = (uint32_t) fpscr;
414 #if !defined(TARGET_PPC64)
415 /* Save SPE registers. The kernel only saves the high half. */
416 if (env->insns_flags & PPC_SPE) {
417 for (i = 0; i < ARRAY_SIZE(env->gprh); i++) {
418 __get_user(env->gprh[i], &frame->mc_vregs.spe[i]);
420 __get_user(env->spe_fscr, &frame->mc_vregs.spe[32]);
422 #endif
425 #if !defined(TARGET_PPC64)
426 void setup_frame(int sig, struct target_sigaction *ka,
427 target_sigset_t *set, CPUPPCState *env)
429 struct target_sigframe *frame;
430 struct target_sigcontext *sc;
431 target_ulong frame_addr, newsp;
432 int err = 0;
434 frame_addr = get_sigframe(ka, env, sizeof(*frame));
435 trace_user_setup_frame(env, frame_addr);
436 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1))
437 goto sigsegv;
438 sc = &frame->sctx;
440 __put_user(ka->_sa_handler, &sc->handler);
441 __put_user(set->sig[0], &sc->oldmask);
442 __put_user(set->sig[1], &sc->_unused[3]);
443 __put_user(h2g(&frame->mctx), &sc->regs);
444 __put_user(sig, &sc->signal);
446 /* Save user regs. */
447 save_user_regs(env, &frame->mctx);
449 env->lr = default_sigreturn;
451 /* Turn off all fp exceptions. */
452 env->fpscr = 0;
454 /* Create a stack frame for the caller of the handler. */
455 newsp = frame_addr - SIGNAL_FRAMESIZE;
456 err |= put_user(env->gpr[1], newsp, target_ulong);
458 if (err)
459 goto sigsegv;
461 /* Set up registers for signal handler. */
462 env->gpr[1] = newsp;
463 env->gpr[3] = sig;
464 env->gpr[4] = frame_addr + offsetof(struct target_sigframe, sctx);
466 env->nip = (target_ulong) ka->_sa_handler;
468 /* Signal handlers are entered in big-endian mode. */
469 ppc_store_msr(env, env->msr & ~(1ull << MSR_LE));
471 unlock_user_struct(frame, frame_addr, 1);
472 return;
474 sigsegv:
475 unlock_user_struct(frame, frame_addr, 1);
476 force_sigsegv(sig);
478 #endif /* !defined(TARGET_PPC64) */
480 void setup_rt_frame(int sig, struct target_sigaction *ka,
481 target_siginfo_t *info,
482 target_sigset_t *set, CPUPPCState *env)
484 struct target_rt_sigframe *rt_sf;
485 struct target_mcontext *mctx = 0;
486 target_ulong rt_sf_addr, newsp = 0;
487 int i, err = 0;
488 #if defined(TARGET_PPC64)
489 struct target_sigcontext *sc = 0;
490 struct image_info *image = get_task_state(thread_cpu)->info;
491 #endif
493 rt_sf_addr = get_sigframe(ka, env, sizeof(*rt_sf));
494 if (!lock_user_struct(VERIFY_WRITE, rt_sf, rt_sf_addr, 1))
495 goto sigsegv;
497 rt_sf->info = *info;
499 __put_user(0, &rt_sf->uc.tuc_flags);
500 __put_user(0, &rt_sf->uc.tuc_link);
501 target_save_altstack(&rt_sf->uc.tuc_stack, env);
502 #if !defined(TARGET_PPC64)
503 __put_user(h2g (&rt_sf->uc.tuc_mcontext),
504 &rt_sf->uc.tuc_regs);
505 #endif
506 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
507 __put_user(set->sig[i], &rt_sf->uc.tuc_sigmask.sig[i]);
510 #if defined(TARGET_PPC64)
511 mctx = &rt_sf->uc.tuc_sigcontext.mcontext;
513 sc = &rt_sf->uc.tuc_sigcontext;
514 __put_user(h2g(mctx), &sc->regs);
515 __put_user(sig, &sc->signal);
516 #else
517 mctx = &rt_sf->uc.tuc_mcontext;
518 #endif
520 save_user_regs(env, mctx);
522 env->lr = default_rt_sigreturn;
524 /* Turn off all fp exceptions. */
525 env->fpscr = 0;
527 /* Create a stack frame for the caller of the handler. */
528 newsp = rt_sf_addr - (SIGNAL_FRAMESIZE + 16);
529 err |= put_user(env->gpr[1], newsp, target_ulong);
531 if (err)
532 goto sigsegv;
534 /* Set up registers for signal handler. */
535 env->gpr[1] = newsp;
536 env->gpr[3] = (target_ulong) sig;
537 env->gpr[4] = (target_ulong) h2g(&rt_sf->info);
538 env->gpr[5] = (target_ulong) h2g(&rt_sf->uc);
539 env->gpr[6] = (target_ulong) h2g(rt_sf);
541 #if defined(TARGET_PPC64)
542 if (get_ppc64_abi(image) < 2) {
543 /* ELFv1 PPC64 function pointers are pointers to OPD entries. */
544 struct target_func_ptr *handler =
545 (struct target_func_ptr *)g2h(env_cpu(env), ka->_sa_handler);
546 env->nip = tswapl(handler->entry);
547 env->gpr[2] = tswapl(handler->toc);
548 } else {
549 /* ELFv2 PPC64 function pointers are entry points. R12 must also be set. */
550 env->gpr[12] = env->nip = ka->_sa_handler;
552 #else
553 env->nip = (target_ulong) ka->_sa_handler;
554 #endif
556 #if TARGET_BIG_ENDIAN
557 /* Signal handlers are entered in big-endian mode. */
558 ppc_store_msr(env, env->msr & ~(1ull << MSR_LE));
559 #else
560 /* Signal handlers are entered in little-endian mode. */
561 ppc_store_msr(env, env->msr | (1ull << MSR_LE));
562 #endif
564 unlock_user_struct(rt_sf, rt_sf_addr, 1);
565 return;
567 sigsegv:
568 unlock_user_struct(rt_sf, rt_sf_addr, 1);
569 force_sigsegv(sig);
573 #if !defined(TARGET_PPC64)
574 long do_sigreturn(CPUPPCState *env)
576 struct target_sigcontext *sc = NULL;
577 struct target_mcontext *sr = NULL;
578 target_ulong sr_addr = 0, sc_addr;
579 sigset_t blocked;
580 target_sigset_t set;
582 sc_addr = env->gpr[1] + SIGNAL_FRAMESIZE;
583 if (!lock_user_struct(VERIFY_READ, sc, sc_addr, 1))
584 goto sigsegv;
586 __get_user(set.sig[0], &sc->oldmask);
587 __get_user(set.sig[1], &sc->_unused[3]);
589 target_to_host_sigset_internal(&blocked, &set);
590 set_sigmask(&blocked);
592 __get_user(sr_addr, &sc->regs);
593 if (!lock_user_struct(VERIFY_READ, sr, sr_addr, 1))
594 goto sigsegv;
595 restore_user_regs(env, sr, 1);
597 unlock_user_struct(sr, sr_addr, 1);
598 unlock_user_struct(sc, sc_addr, 1);
599 return -QEMU_ESIGRETURN;
601 sigsegv:
602 unlock_user_struct(sr, sr_addr, 1);
603 unlock_user_struct(sc, sc_addr, 1);
604 force_sig(TARGET_SIGSEGV);
605 return -QEMU_ESIGRETURN;
607 #endif /* !defined(TARGET_PPC64) */
609 /* See arch/powerpc/kernel/signal_32.c. */
610 static int do_setcontext(struct target_ucontext *ucp, CPUPPCState *env, int sig)
612 struct target_mcontext *mcp;
613 target_ulong mcp_addr;
614 sigset_t blocked;
615 target_sigset_t set;
617 if (copy_from_user(&set, h2g(ucp) + offsetof(struct target_ucontext, tuc_sigmask),
618 sizeof (set)))
619 return 1;
621 #if defined(TARGET_PPC64)
622 mcp_addr = h2g(ucp) +
623 offsetof(struct target_ucontext, tuc_sigcontext.mcontext);
624 #else
625 __get_user(mcp_addr, &ucp->tuc_regs);
626 #endif
628 if (!lock_user_struct(VERIFY_READ, mcp, mcp_addr, 1))
629 return 1;
631 target_to_host_sigset_internal(&blocked, &set);
632 set_sigmask(&blocked);
633 restore_user_regs(env, mcp, sig);
635 unlock_user_struct(mcp, mcp_addr, 1);
636 return 0;
639 long do_rt_sigreturn(CPUPPCState *env)
641 struct target_rt_sigframe *rt_sf = NULL;
642 target_ulong rt_sf_addr;
644 rt_sf_addr = env->gpr[1] + SIGNAL_FRAMESIZE + 16;
645 if (!lock_user_struct(VERIFY_READ, rt_sf, rt_sf_addr, 1))
646 goto sigsegv;
648 if (do_setcontext(&rt_sf->uc, env, 1))
649 goto sigsegv;
651 target_restore_altstack(&rt_sf->uc.tuc_stack, env);
653 unlock_user_struct(rt_sf, rt_sf_addr, 1);
654 return -QEMU_ESIGRETURN;
656 sigsegv:
657 unlock_user_struct(rt_sf, rt_sf_addr, 1);
658 force_sig(TARGET_SIGSEGV);
659 return -QEMU_ESIGRETURN;
662 /* This syscall implements {get,set,swap}context for userland. */
663 abi_long do_swapcontext(CPUArchState *env, abi_ulong uold_ctx,
664 abi_ulong unew_ctx, abi_long ctx_size)
666 struct target_ucontext *uctx;
667 struct target_mcontext *mctx;
669 /* For ppc32, ctx_size is "reserved for future use".
670 * For ppc64, we do not yet support the VSX extension.
672 if (ctx_size < sizeof(struct target_ucontext)) {
673 return -TARGET_EINVAL;
676 if (uold_ctx) {
677 TaskState *ts = get_task_state(thread_cpu);
679 if (!lock_user_struct(VERIFY_WRITE, uctx, uold_ctx, 1)) {
680 return -TARGET_EFAULT;
683 #ifdef TARGET_PPC64
684 mctx = &uctx->tuc_sigcontext.mcontext;
685 #else
686 /* ??? The kernel aligns the pointer down here into padding, but
687 * in setup_rt_frame we don't. Be self-compatible for now.
689 mctx = &uctx->tuc_mcontext;
690 __put_user(h2g(mctx), &uctx->tuc_regs);
691 #endif
693 save_user_regs(env, mctx);
694 host_to_target_sigset(&uctx->tuc_sigmask, &ts->signal_mask);
696 unlock_user_struct(uctx, uold_ctx, 1);
699 if (unew_ctx) {
700 int err;
702 if (!lock_user_struct(VERIFY_READ, uctx, unew_ctx, 1)) {
703 return -TARGET_EFAULT;
705 err = do_setcontext(uctx, env, 0);
706 unlock_user_struct(uctx, unew_ctx, 1);
708 if (err) {
709 /* We cannot return to a partially updated context. */
710 force_sig(TARGET_SIGSEGV);
712 return -QEMU_ESIGRETURN;
715 return 0;
718 void setup_sigtramp(abi_ulong sigtramp_page)
720 uint32_t *tramp = lock_user(VERIFY_WRITE, sigtramp_page, 2 * 8, 0);
721 assert(tramp != NULL);
723 #ifdef TARGET_ARCH_HAS_SETUP_FRAME
724 default_sigreturn = sigtramp_page;
725 encode_trampoline(TARGET_NR_sigreturn, tramp + 0);
726 #endif
728 default_rt_sigreturn = sigtramp_page + 8;
729 encode_trampoline(TARGET_NR_rt_sigreturn, tramp + 2);
731 unlock_user(tramp, sigtramp_page, 2 * 8);