2 * Emulation of Linux signals
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
21 #include "signal-common.h"
22 #include "linux-user/trace.h"
24 /* Size of dummy stack frame allocated when calling signal handler.
25 See arch/powerpc/include/asm/ptrace.h. */
26 #if defined(TARGET_PPC64)
27 #define SIGNAL_FRAMESIZE 128
29 #define SIGNAL_FRAMESIZE 64
32 /* See arch/powerpc/include/asm/ucontext.h. Only used for 32-bit PPC;
33 on 64-bit PPC, sigcontext and mcontext are one and the same. */
34 struct target_mcontext
{
35 target_ulong mc_gregs
[48];
37 uint64_t mc_fregs
[33];
38 #if defined(TARGET_PPC64)
39 /* Pointer to the vector regs */
42 target_ulong mc_pad
[2];
44 /* We need to handle Altivec and SPE at the same time, which no
45 kernel needs to do. Fortunately, the kernel defines this bit to
46 be Altivec-register-large all the time, rather than trying to
47 twiddle it based on the specific platform. */
49 /* SPE vector registers. One extra for SPEFSCR. */
51 /* Altivec vector registers. The packing of VSCR and VRSAVE
52 varies depending on whether we're PPC64 or not: PPC64 splits
53 them apart; PPC32 stuffs them together.
54 We also need to account for the VSX registers on PPC64
56 #if defined(TARGET_PPC64)
57 #define QEMU_NVRREG (34 + 16)
58 /* On ppc64, this mcontext structure is naturally *unaligned*,
59 * or rather it is aligned on a 8 bytes boundary but not on
60 * a 16 bytes one. This pad fixes it up. This is also why the
61 * vector regs are referenced by the v_regs pointer above so
62 * any amount of padding can be added here
66 /* On ppc32, we are already aligned to 16 bytes */
67 #define QEMU_NVRREG 33
69 /* We cannot use ppc_avr_t here as we do *not* want the implied
70 * 16-bytes alignment that would result from it. This would have
71 * the effect of making the whole struct target_mcontext aligned
72 * which breaks the layout of struct target_ucontext on ppc64.
74 uint64_t altivec
[QEMU_NVRREG
][2];
79 /* See arch/powerpc/include/asm/sigcontext.h. */
80 struct target_sigcontext
{
81 target_ulong _unused
[4];
83 #if defined(TARGET_PPC64)
88 target_ulong regs
; /* struct pt_regs __user * */
89 #if defined(TARGET_PPC64)
90 struct target_mcontext mcontext
;
94 /* Indices for target_mcontext.mc_gregs, below.
95 See arch/powerpc/include/asm/ptrace.h for details. */
131 TARGET_PT_ORIG_R3
= 34,
136 /* Yes, there are two registers with #39. One is 64-bit only. */
138 TARGET_PT_SOFTE
= 39,
141 TARGET_PT_DSISR
= 42,
142 TARGET_PT_RESULT
= 43,
143 TARGET_PT_REGS_COUNT
= 44
147 struct target_ucontext
{
148 target_ulong tuc_flags
;
149 target_ulong tuc_link
; /* ucontext_t __user * */
150 struct target_sigaltstack tuc_stack
;
151 #if !defined(TARGET_PPC64)
153 target_ulong tuc_regs
; /* struct mcontext __user *
154 points to uc_mcontext field */
156 target_sigset_t tuc_sigmask
;
157 #if defined(TARGET_PPC64)
158 target_sigset_t unused
[15]; /* Allow for uc_sigmask growth */
159 struct target_sigcontext tuc_sigcontext
;
161 int32_t tuc_maskext
[30];
163 struct target_mcontext tuc_mcontext
;
167 /* See arch/powerpc/kernel/signal_32.c. */
168 struct target_sigframe
{
169 struct target_sigcontext sctx
;
170 struct target_mcontext mctx
;
174 #if defined(TARGET_PPC64)
176 #define TARGET_TRAMP_SIZE 6
178 struct target_rt_sigframe
{
179 /* sys_rt_sigreturn requires the ucontext be the first field */
180 struct target_ucontext uc
;
181 target_ulong _unused
[2];
182 uint32_t trampoline
[TARGET_TRAMP_SIZE
];
183 target_ulong pinfo
; /* struct siginfo __user * */
184 target_ulong puc
; /* void __user * */
185 struct target_siginfo info
;
186 /* 64 bit ABI allows for 288 bytes below sp before decrementing it. */
188 } __attribute__((aligned(16)));
192 struct target_rt_sigframe
{
193 struct target_siginfo info
;
194 struct target_ucontext uc
;
200 #if defined(TARGET_PPC64)
202 struct target_func_ptr
{
209 /* We use the mc_pad field for the signal return trampoline. */
212 /* See arch/powerpc/kernel/signal.c. */
213 static target_ulong
get_sigframe(struct target_sigaction
*ka
,
219 oldsp
= target_sigsp(get_sp_from_cpustate(env
), ka
);
221 return (oldsp
- frame_size
) & ~0xFUL
;
224 #if ((defined(TARGET_WORDS_BIGENDIAN) && defined(HOST_WORDS_BIGENDIAN)) || \
225 (!defined(HOST_WORDS_BIGENDIAN) && !defined(TARGET_WORDS_BIGENDIAN)))
234 static void save_user_regs(CPUPPCState
*env
, struct target_mcontext
*frame
)
236 target_ulong msr
= env
->msr
;
238 target_ulong ccr
= 0;
240 /* In general, the kernel attempts to be intelligent about what it
241 needs to save for Altivec/FP/SPE registers. We don't care that
242 much, so we just go ahead and save everything. */
244 /* Save general registers. */
245 for (i
= 0; i
< ARRAY_SIZE(env
->gpr
); i
++) {
246 __put_user(env
->gpr
[i
], &frame
->mc_gregs
[i
]);
248 __put_user(env
->nip
, &frame
->mc_gregs
[TARGET_PT_NIP
]);
249 __put_user(env
->ctr
, &frame
->mc_gregs
[TARGET_PT_CTR
]);
250 __put_user(env
->lr
, &frame
->mc_gregs
[TARGET_PT_LNK
]);
251 __put_user(env
->xer
, &frame
->mc_gregs
[TARGET_PT_XER
]);
253 for (i
= 0; i
< ARRAY_SIZE(env
->crf
); i
++) {
254 ccr
|= env
->crf
[i
] << (32 - ((i
+ 1) * 4));
256 __put_user(ccr
, &frame
->mc_gregs
[TARGET_PT_CCR
]);
258 /* Save Altivec registers if necessary. */
259 if (env
->insns_flags
& PPC_ALTIVEC
) {
261 for (i
= 0; i
< ARRAY_SIZE(env
->avr
); i
++) {
262 ppc_avr_t
*avr
= &env
->avr
[i
];
263 ppc_avr_t
*vreg
= (ppc_avr_t
*)&frame
->mc_vregs
.altivec
[i
];
265 __put_user(avr
->u64
[PPC_VEC_HI
], &vreg
->u64
[0]);
266 __put_user(avr
->u64
[PPC_VEC_LO
], &vreg
->u64
[1]);
268 /* Set MSR_VR in the saved MSR value to indicate that
269 frame->mc_vregs contains valid data. */
271 #if defined(TARGET_PPC64)
272 vrsave
= (uint32_t *)&frame
->mc_vregs
.altivec
[33];
273 /* 64-bit needs to put a pointer to the vectors in the frame */
274 __put_user(h2g(frame
->mc_vregs
.altivec
), &frame
->v_regs
);
276 vrsave
= (uint32_t *)&frame
->mc_vregs
.altivec
[32];
278 __put_user((uint32_t)env
->spr
[SPR_VRSAVE
], vrsave
);
281 /* Save VSX second halves */
282 if (env
->insns_flags2
& PPC2_VSX
) {
283 uint64_t *vsregs
= (uint64_t *)&frame
->mc_vregs
.altivec
[34];
284 for (i
= 0; i
< ARRAY_SIZE(env
->vsr
); i
++) {
285 __put_user(env
->vsr
[i
], &vsregs
[i
]);
289 /* Save floating point registers. */
290 if (env
->insns_flags
& PPC_FLOAT
) {
291 for (i
= 0; i
< ARRAY_SIZE(env
->fpr
); i
++) {
292 __put_user(env
->fpr
[i
], &frame
->mc_fregs
[i
]);
294 __put_user((uint64_t) env
->fpscr
, &frame
->mc_fregs
[32]);
297 /* Save SPE registers. The kernel only saves the high half. */
298 if (env
->insns_flags
& PPC_SPE
) {
299 #if defined(TARGET_PPC64)
300 for (i
= 0; i
< ARRAY_SIZE(env
->gpr
); i
++) {
301 __put_user(env
->gpr
[i
] >> 32, &frame
->mc_vregs
.spe
[i
]);
304 for (i
= 0; i
< ARRAY_SIZE(env
->gprh
); i
++) {
305 __put_user(env
->gprh
[i
], &frame
->mc_vregs
.spe
[i
]);
308 /* Set MSR_SPE in the saved MSR value to indicate that
309 frame->mc_vregs contains valid data. */
311 __put_user(env
->spe_fscr
, &frame
->mc_vregs
.spe
[32]);
315 __put_user(msr
, &frame
->mc_gregs
[TARGET_PT_MSR
]);
318 static void encode_trampoline(int sigret
, uint32_t *tramp
)
320 /* Set up the sigreturn trampoline: li r0,sigret; sc. */
322 __put_user(0x38000000 | sigret
, &tramp
[0]);
323 __put_user(0x44000002, &tramp
[1]);
327 static void restore_user_regs(CPUPPCState
*env
,
328 struct target_mcontext
*frame
, int sig
)
330 target_ulong save_r2
= 0;
337 save_r2
= env
->gpr
[2];
340 /* Restore general registers. */
341 for (i
= 0; i
< ARRAY_SIZE(env
->gpr
); i
++) {
342 __get_user(env
->gpr
[i
], &frame
->mc_gregs
[i
]);
344 __get_user(env
->nip
, &frame
->mc_gregs
[TARGET_PT_NIP
]);
345 __get_user(env
->ctr
, &frame
->mc_gregs
[TARGET_PT_CTR
]);
346 __get_user(env
->lr
, &frame
->mc_gregs
[TARGET_PT_LNK
]);
347 __get_user(env
->xer
, &frame
->mc_gregs
[TARGET_PT_XER
]);
348 __get_user(ccr
, &frame
->mc_gregs
[TARGET_PT_CCR
]);
350 for (i
= 0; i
< ARRAY_SIZE(env
->crf
); i
++) {
351 env
->crf
[i
] = (ccr
>> (32 - ((i
+ 1) * 4))) & 0xf;
355 env
->gpr
[2] = save_r2
;
358 __get_user(msr
, &frame
->mc_gregs
[TARGET_PT_MSR
]);
360 /* If doing signal return, restore the previous little-endian mode. */
362 env
->msr
= (env
->msr
& ~(1ull << MSR_LE
)) | (msr
& (1ull << MSR_LE
));
364 /* Restore Altivec registers if necessary. */
365 if (env
->insns_flags
& PPC_ALTIVEC
) {
368 #if defined(TARGET_PPC64)
370 /* 64-bit needs to recover the pointer to the vectors from the frame */
371 __get_user(v_addr
, &frame
->v_regs
);
372 v_regs
= g2h(v_addr
);
374 v_regs
= (ppc_avr_t
*)frame
->mc_vregs
.altivec
;
376 for (i
= 0; i
< ARRAY_SIZE(env
->avr
); i
++) {
377 ppc_avr_t
*avr
= &env
->avr
[i
];
378 ppc_avr_t
*vreg
= &v_regs
[i
];
380 __get_user(avr
->u64
[PPC_VEC_HI
], &vreg
->u64
[0]);
381 __get_user(avr
->u64
[PPC_VEC_LO
], &vreg
->u64
[1]);
383 /* Set MSR_VEC in the saved MSR value to indicate that
384 frame->mc_vregs contains valid data. */
385 #if defined(TARGET_PPC64)
386 vrsave
= (uint32_t *)&v_regs
[33];
388 vrsave
= (uint32_t *)&v_regs
[32];
390 __get_user(env
->spr
[SPR_VRSAVE
], vrsave
);
393 /* Restore VSX second halves */
394 if (env
->insns_flags2
& PPC2_VSX
) {
395 uint64_t *vsregs
= (uint64_t *)&frame
->mc_vregs
.altivec
[34];
396 for (i
= 0; i
< ARRAY_SIZE(env
->vsr
); i
++) {
397 __get_user(env
->vsr
[i
], &vsregs
[i
]);
401 /* Restore floating point registers. */
402 if (env
->insns_flags
& PPC_FLOAT
) {
404 for (i
= 0; i
< ARRAY_SIZE(env
->fpr
); i
++) {
405 __get_user(env
->fpr
[i
], &frame
->mc_fregs
[i
]);
407 __get_user(fpscr
, &frame
->mc_fregs
[32]);
408 env
->fpscr
= (uint32_t) fpscr
;
411 /* Save SPE registers. The kernel only saves the high half. */
412 if (env
->insns_flags
& PPC_SPE
) {
413 #if defined(TARGET_PPC64)
414 for (i
= 0; i
< ARRAY_SIZE(env
->gpr
); i
++) {
417 __get_user(hi
, &frame
->mc_vregs
.spe
[i
]);
418 env
->gpr
[i
] = ((uint64_t)hi
<< 32) | ((uint32_t) env
->gpr
[i
]);
421 for (i
= 0; i
< ARRAY_SIZE(env
->gprh
); i
++) {
422 __get_user(env
->gprh
[i
], &frame
->mc_vregs
.spe
[i
]);
425 __get_user(env
->spe_fscr
, &frame
->mc_vregs
.spe
[32]);
429 #if !defined(TARGET_PPC64)
430 void setup_frame(int sig
, struct target_sigaction
*ka
,
431 target_sigset_t
*set
, CPUPPCState
*env
)
433 struct target_sigframe
*frame
;
434 struct target_sigcontext
*sc
;
435 target_ulong frame_addr
, newsp
;
438 frame_addr
= get_sigframe(ka
, env
, sizeof(*frame
));
439 trace_user_setup_frame(env
, frame_addr
);
440 if (!lock_user_struct(VERIFY_WRITE
, frame
, frame_addr
, 1))
444 __put_user(ka
->_sa_handler
, &sc
->handler
);
445 __put_user(set
->sig
[0], &sc
->oldmask
);
446 __put_user(set
->sig
[1], &sc
->_unused
[3]);
447 __put_user(h2g(&frame
->mctx
), &sc
->regs
);
448 __put_user(sig
, &sc
->signal
);
450 /* Save user regs. */
451 save_user_regs(env
, &frame
->mctx
);
453 /* Construct the trampoline code on the stack. */
454 encode_trampoline(TARGET_NR_sigreturn
, (uint32_t *)&frame
->mctx
.tramp
);
456 /* The kernel checks for the presence of a VDSO here. We don't
457 emulate a vdso, so use a sigreturn system call. */
458 env
->lr
= (target_ulong
) h2g(frame
->mctx
.tramp
);
460 /* Turn off all fp exceptions. */
463 /* Create a stack frame for the caller of the handler. */
464 newsp
= frame_addr
- SIGNAL_FRAMESIZE
;
465 err
|= put_user(env
->gpr
[1], newsp
, target_ulong
);
470 /* Set up registers for signal handler. */
473 env
->gpr
[4] = frame_addr
+ offsetof(struct target_sigframe
, sctx
);
475 env
->nip
= (target_ulong
) ka
->_sa_handler
;
477 /* Signal handlers are entered in big-endian mode. */
478 env
->msr
&= ~(1ull << MSR_LE
);
480 unlock_user_struct(frame
, frame_addr
, 1);
484 unlock_user_struct(frame
, frame_addr
, 1);
487 #endif /* !defined(TARGET_PPC64) */
489 void setup_rt_frame(int sig
, struct target_sigaction
*ka
,
490 target_siginfo_t
*info
,
491 target_sigset_t
*set
, CPUPPCState
*env
)
493 struct target_rt_sigframe
*rt_sf
;
494 uint32_t *trampptr
= 0;
495 struct target_mcontext
*mctx
= 0;
496 target_ulong rt_sf_addr
, newsp
= 0;
498 #if defined(TARGET_PPC64)
499 struct target_sigcontext
*sc
= 0;
500 struct image_info
*image
= ((TaskState
*)thread_cpu
->opaque
)->info
;
503 rt_sf_addr
= get_sigframe(ka
, env
, sizeof(*rt_sf
));
504 if (!lock_user_struct(VERIFY_WRITE
, rt_sf
, rt_sf_addr
, 1))
507 tswap_siginfo(&rt_sf
->info
, info
);
509 __put_user(0, &rt_sf
->uc
.tuc_flags
);
510 __put_user(0, &rt_sf
->uc
.tuc_link
);
511 target_save_altstack(&rt_sf
->uc
.tuc_stack
, env
);
512 #if !defined(TARGET_PPC64)
513 __put_user(h2g (&rt_sf
->uc
.tuc_mcontext
),
514 &rt_sf
->uc
.tuc_regs
);
516 for(i
= 0; i
< TARGET_NSIG_WORDS
; i
++) {
517 __put_user(set
->sig
[i
], &rt_sf
->uc
.tuc_sigmask
.sig
[i
]);
520 #if defined(TARGET_PPC64)
521 mctx
= &rt_sf
->uc
.tuc_sigcontext
.mcontext
;
522 trampptr
= &rt_sf
->trampoline
[0];
524 sc
= &rt_sf
->uc
.tuc_sigcontext
;
525 __put_user(h2g(mctx
), &sc
->regs
);
526 __put_user(sig
, &sc
->signal
);
528 mctx
= &rt_sf
->uc
.tuc_mcontext
;
529 trampptr
= (uint32_t *)&rt_sf
->uc
.tuc_mcontext
.tramp
;
532 save_user_regs(env
, mctx
);
533 encode_trampoline(TARGET_NR_rt_sigreturn
, trampptr
);
535 /* The kernel checks for the presence of a VDSO here. We don't
536 emulate a vdso, so use a sigreturn system call. */
537 env
->lr
= (target_ulong
) h2g(trampptr
);
539 /* Turn off all fp exceptions. */
542 /* Create a stack frame for the caller of the handler. */
543 newsp
= rt_sf_addr
- (SIGNAL_FRAMESIZE
+ 16);
544 err
|= put_user(env
->gpr
[1], newsp
, target_ulong
);
549 /* Set up registers for signal handler. */
551 env
->gpr
[3] = (target_ulong
) sig
;
552 env
->gpr
[4] = (target_ulong
) h2g(&rt_sf
->info
);
553 env
->gpr
[5] = (target_ulong
) h2g(&rt_sf
->uc
);
554 env
->gpr
[6] = (target_ulong
) h2g(rt_sf
);
556 #if defined(TARGET_PPC64)
557 if (get_ppc64_abi(image
) < 2) {
558 /* ELFv1 PPC64 function pointers are pointers to OPD entries. */
559 struct target_func_ptr
*handler
=
560 (struct target_func_ptr
*)g2h(ka
->_sa_handler
);
561 env
->nip
= tswapl(handler
->entry
);
562 env
->gpr
[2] = tswapl(handler
->toc
);
564 /* ELFv2 PPC64 function pointers are entry points, but R12
565 * must also be set */
566 env
->nip
= tswapl((target_ulong
) ka
->_sa_handler
);
567 env
->gpr
[12] = env
->nip
;
570 env
->nip
= (target_ulong
) ka
->_sa_handler
;
573 /* Signal handlers are entered in big-endian mode. */
574 env
->msr
&= ~(1ull << MSR_LE
);
576 unlock_user_struct(rt_sf
, rt_sf_addr
, 1);
580 unlock_user_struct(rt_sf
, rt_sf_addr
, 1);
585 #if !defined(TARGET_PPC64)
586 long do_sigreturn(CPUPPCState
*env
)
588 struct target_sigcontext
*sc
= NULL
;
589 struct target_mcontext
*sr
= NULL
;
590 target_ulong sr_addr
= 0, sc_addr
;
594 sc_addr
= env
->gpr
[1] + SIGNAL_FRAMESIZE
;
595 if (!lock_user_struct(VERIFY_READ
, sc
, sc_addr
, 1))
598 #if defined(TARGET_PPC64)
599 set
.sig
[0] = sc
->oldmask
+ ((uint64_t)(sc
->_unused
[3]) << 32);
601 __get_user(set
.sig
[0], &sc
->oldmask
);
602 __get_user(set
.sig
[1], &sc
->_unused
[3]);
604 target_to_host_sigset_internal(&blocked
, &set
);
605 set_sigmask(&blocked
);
607 __get_user(sr_addr
, &sc
->regs
);
608 if (!lock_user_struct(VERIFY_READ
, sr
, sr_addr
, 1))
610 restore_user_regs(env
, sr
, 1);
612 unlock_user_struct(sr
, sr_addr
, 1);
613 unlock_user_struct(sc
, sc_addr
, 1);
614 return -TARGET_QEMU_ESIGRETURN
;
617 unlock_user_struct(sr
, sr_addr
, 1);
618 unlock_user_struct(sc
, sc_addr
, 1);
619 force_sig(TARGET_SIGSEGV
);
620 return -TARGET_QEMU_ESIGRETURN
;
622 #endif /* !defined(TARGET_PPC64) */
624 /* See arch/powerpc/kernel/signal_32.c. */
625 static int do_setcontext(struct target_ucontext
*ucp
, CPUPPCState
*env
, int sig
)
627 struct target_mcontext
*mcp
;
628 target_ulong mcp_addr
;
632 if (copy_from_user(&set
, h2g(ucp
) + offsetof(struct target_ucontext
, tuc_sigmask
),
636 #if defined(TARGET_PPC64)
637 mcp_addr
= h2g(ucp
) +
638 offsetof(struct target_ucontext
, tuc_sigcontext
.mcontext
);
640 __get_user(mcp_addr
, &ucp
->tuc_regs
);
643 if (!lock_user_struct(VERIFY_READ
, mcp
, mcp_addr
, 1))
646 target_to_host_sigset_internal(&blocked
, &set
);
647 set_sigmask(&blocked
);
648 restore_user_regs(env
, mcp
, sig
);
650 unlock_user_struct(mcp
, mcp_addr
, 1);
654 long do_rt_sigreturn(CPUPPCState
*env
)
656 struct target_rt_sigframe
*rt_sf
= NULL
;
657 target_ulong rt_sf_addr
;
659 rt_sf_addr
= env
->gpr
[1] + SIGNAL_FRAMESIZE
+ 16;
660 if (!lock_user_struct(VERIFY_READ
, rt_sf
, rt_sf_addr
, 1))
663 if (do_setcontext(&rt_sf
->uc
, env
, 1))
666 do_sigaltstack(rt_sf_addr
667 + offsetof(struct target_rt_sigframe
, uc
.tuc_stack
),
670 unlock_user_struct(rt_sf
, rt_sf_addr
, 1);
671 return -TARGET_QEMU_ESIGRETURN
;
674 unlock_user_struct(rt_sf
, rt_sf_addr
, 1);
675 force_sig(TARGET_SIGSEGV
);
676 return -TARGET_QEMU_ESIGRETURN
;
679 /* This syscall implements {get,set,swap}context for userland. */
680 abi_long
do_swapcontext(CPUArchState
*env
, abi_ulong uold_ctx
,
681 abi_ulong unew_ctx
, abi_long ctx_size
)
683 struct target_ucontext
*uctx
;
684 struct target_mcontext
*mctx
;
686 /* For ppc32, ctx_size is "reserved for future use".
687 * For ppc64, we do not yet support the VSX extension.
689 if (ctx_size
< sizeof(struct target_ucontext
)) {
690 return -TARGET_EINVAL
;
694 TaskState
*ts
= (TaskState
*)thread_cpu
->opaque
;
696 if (!lock_user_struct(VERIFY_WRITE
, uctx
, uold_ctx
, 1)) {
697 return -TARGET_EFAULT
;
701 mctx
= &uctx
->tuc_sigcontext
.mcontext
;
703 /* ??? The kernel aligns the pointer down here into padding, but
704 * in setup_rt_frame we don't. Be self-compatible for now.
706 mctx
= &uctx
->tuc_mcontext
;
707 __put_user(h2g(mctx
), &uctx
->tuc_regs
);
710 save_user_regs(env
, mctx
);
711 host_to_target_sigset(&uctx
->tuc_sigmask
, &ts
->signal_mask
);
713 unlock_user_struct(uctx
, uold_ctx
, 1);
719 if (!lock_user_struct(VERIFY_READ
, uctx
, unew_ctx
, 1)) {
720 return -TARGET_EFAULT
;
722 err
= do_setcontext(uctx
, env
, 0);
723 unlock_user_struct(uctx
, unew_ctx
, 1);
726 /* We cannot return to a partially updated context. */
727 force_sig(TARGET_SIGSEGV
);
729 return -TARGET_QEMU_ESIGRETURN
;