tests/qtest/bios-tables-test: Update virt SPCR and DBG2 golden references
[qemu/armbru.git] / linux-user / loongarch64 / signal.c
blob39ea82c8140207b18c09eea9e277efd0dfd5e89e
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * LoongArch emulation of Linux signals
5 * Copyright (c) 2021 Loongson Technology Corporation Limited
6 */
8 #include "qemu/osdep.h"
9 #include "qemu.h"
10 #include "user-internals.h"
11 #include "signal-common.h"
12 #include "linux-user/trace.h"
13 #include "target/loongarch/internals.h"
14 #include "target/loongarch/vec.h"
15 #include "vdso-asmoffset.h"
17 /* FP context was used */
18 #define SC_USED_FP (1 << 0)
20 struct target_sigcontext {
21 abi_ulong sc_pc;
22 abi_ulong sc_regs[32];
23 abi_uint sc_flags;
24 abi_ulong sc_extcontext[0] QEMU_ALIGNED(16);
27 QEMU_BUILD_BUG_ON(sizeof(struct target_sigcontext) != sizeof_sigcontext);
28 QEMU_BUILD_BUG_ON(offsetof(struct target_sigcontext, sc_pc)
29 != offsetof_sigcontext_pc);
30 QEMU_BUILD_BUG_ON(offsetof(struct target_sigcontext, sc_regs)
31 != offsetof_sigcontext_gr);
33 #define FPU_CTX_MAGIC 0x46505501
34 #define FPU_CTX_ALIGN 8
35 struct target_fpu_context {
36 abi_ulong regs[32];
37 abi_ulong fcc;
38 abi_uint fcsr;
39 } QEMU_ALIGNED(FPU_CTX_ALIGN);
41 QEMU_BUILD_BUG_ON(offsetof(struct target_fpu_context, regs)
42 != offsetof_fpucontext_fr);
44 #define LSX_CTX_MAGIC 0x53580001
45 #define LSX_CTX_ALIGN 16
46 struct target_lsx_context {
47 abi_ulong regs[2 * 32];
48 abi_ulong fcc;
49 abi_uint fcsr;
50 } QEMU_ALIGNED(LSX_CTX_ALIGN);
52 #define LASX_CTX_MAGIC 0x41535801
53 #define LASX_CTX_ALIGN 32
54 struct target_lasx_context {
55 abi_ulong regs[4 * 32];
56 abi_ulong fcc;
57 abi_uint fcsr;
58 } QEMU_ALIGNED(LASX_CTX_ALIGN);
60 #define CONTEXT_INFO_ALIGN 16
61 struct target_sctx_info {
62 abi_uint magic;
63 abi_uint size;
64 abi_ulong padding;
65 } QEMU_ALIGNED(CONTEXT_INFO_ALIGN);
67 QEMU_BUILD_BUG_ON(sizeof(struct target_sctx_info) != sizeof_sctx_info);
69 struct target_ucontext {
70 abi_ulong tuc_flags;
71 abi_ptr tuc_link;
72 target_stack_t tuc_stack;
73 target_sigset_t tuc_sigmask;
74 uint8_t __unused[1024 / 8 - sizeof(target_sigset_t)];
75 struct target_sigcontext tuc_mcontext;
78 struct target_rt_sigframe {
79 struct target_siginfo rs_info;
80 struct target_ucontext rs_uc;
83 QEMU_BUILD_BUG_ON(sizeof(struct target_rt_sigframe)
84 != sizeof_rt_sigframe);
85 QEMU_BUILD_BUG_ON(offsetof(struct target_rt_sigframe, rs_uc.tuc_mcontext)
86 != offsetof_sigcontext);
89 * These two structures are not present in guest memory, are private
90 * to the signal implementation, but are largely copied from the
91 * kernel's signal implementation.
93 struct ctx_layout {
94 void *haddr;
95 abi_ptr gaddr;
96 unsigned int size;
99 struct extctx_layout {
100 unsigned long size;
101 unsigned int flags;
102 struct ctx_layout fpu;
103 struct ctx_layout lsx;
104 struct ctx_layout lasx;
105 struct ctx_layout end;
108 static abi_ptr extframe_alloc(struct extctx_layout *extctx,
109 struct ctx_layout *sctx, unsigned size,
110 unsigned align, abi_ptr orig_sp)
112 abi_ptr sp = orig_sp;
114 sp -= sizeof(struct target_sctx_info) + size;
115 align = MAX(align, CONTEXT_INFO_ALIGN);
116 sp = ROUND_DOWN(sp, align);
117 sctx->gaddr = sp;
119 size = orig_sp - sp;
120 sctx->size = size;
121 extctx->size += size;
123 return sp;
126 static abi_ptr setup_extcontext(CPULoongArchState *env,
127 struct extctx_layout *extctx, abi_ptr sp)
129 memset(extctx, 0, sizeof(struct extctx_layout));
131 /* Grow down, alloc "end" context info first. */
132 sp = extframe_alloc(extctx, &extctx->end, 0, CONTEXT_INFO_ALIGN, sp);
134 /* For qemu, there is no lazy fp context switch, so fp always present. */
135 extctx->flags = SC_USED_FP;
137 if (FIELD_EX64(env->CSR_EUEN, CSR_EUEN, ASXE)) {
138 sp = extframe_alloc(extctx, &extctx->lasx,
139 sizeof(struct target_lasx_context), LASX_CTX_ALIGN, sp);
140 } else if (FIELD_EX64(env->CSR_EUEN, CSR_EUEN, SXE)) {
141 sp = extframe_alloc(extctx, &extctx->lsx,
142 sizeof(struct target_lsx_context), LSX_CTX_ALIGN, sp);
143 } else {
144 sp = extframe_alloc(extctx, &extctx->fpu,
145 sizeof(struct target_fpu_context), FPU_CTX_ALIGN, sp);
148 return sp;
151 static void setup_sigframe(CPULoongArchState *env,
152 struct target_sigcontext *sc,
153 struct extctx_layout *extctx)
155 struct target_sctx_info *info;
156 int i;
158 __put_user(extctx->flags, &sc->sc_flags);
159 __put_user(env->pc, &sc->sc_pc);
160 __put_user(0, &sc->sc_regs[0]);
161 for (i = 1; i < 32; ++i) {
162 __put_user(env->gpr[i], &sc->sc_regs[i]);
166 * Set extension context
169 if (FIELD_EX64(env->CSR_EUEN, CSR_EUEN, ASXE)) {
170 struct target_lasx_context *lasx_ctx;
171 info = extctx->lasx.haddr;
173 __put_user(LASX_CTX_MAGIC, &info->magic);
174 __put_user(extctx->lasx.size, &info->size);
176 lasx_ctx = (struct target_lasx_context *)(info + 1);
178 for (i = 0; i < 32; ++i) {
179 __put_user(env->fpr[i].vreg.UD(0), &lasx_ctx->regs[4 * i]);
180 __put_user(env->fpr[i].vreg.UD(1), &lasx_ctx->regs[4 * i + 1]);
181 __put_user(env->fpr[i].vreg.UD(2), &lasx_ctx->regs[4 * i + 2]);
182 __put_user(env->fpr[i].vreg.UD(3), &lasx_ctx->regs[4 * i + 3]);
184 __put_user(read_fcc(env), &lasx_ctx->fcc);
185 __put_user(env->fcsr0, &lasx_ctx->fcsr);
186 } else if (FIELD_EX64(env->CSR_EUEN, CSR_EUEN, SXE)) {
187 struct target_lsx_context *lsx_ctx;
188 info = extctx->lsx.haddr;
190 __put_user(LSX_CTX_MAGIC, &info->magic);
191 __put_user(extctx->lsx.size, &info->size);
193 lsx_ctx = (struct target_lsx_context *)(info + 1);
195 for (i = 0; i < 32; ++i) {
196 __put_user(env->fpr[i].vreg.UD(0), &lsx_ctx->regs[2 * i]);
197 __put_user(env->fpr[i].vreg.UD(1), &lsx_ctx->regs[2 * i + 1]);
199 __put_user(read_fcc(env), &lsx_ctx->fcc);
200 __put_user(env->fcsr0, &lsx_ctx->fcsr);
201 } else {
202 struct target_fpu_context *fpu_ctx;
203 info = extctx->fpu.haddr;
205 __put_user(FPU_CTX_MAGIC, &info->magic);
206 __put_user(extctx->fpu.size, &info->size);
208 fpu_ctx = (struct target_fpu_context *)(info + 1);
210 for (i = 0; i < 32; ++i) {
211 __put_user(env->fpr[i].vreg.UD(0), &fpu_ctx->regs[i]);
213 __put_user(read_fcc(env), &fpu_ctx->fcc);
214 __put_user(env->fcsr0, &fpu_ctx->fcsr);
218 * Set end context
220 info = extctx->end.haddr;
221 __put_user(0, &info->magic);
222 __put_user(0, &info->size);
225 static bool parse_extcontext(struct extctx_layout *extctx, abi_ptr frame)
227 memset(extctx, 0, sizeof(*extctx));
229 while (1) {
230 abi_uint magic, size;
232 if (get_user_u32(magic, frame) || get_user_u32(size, frame + 4)) {
233 return false;
236 switch (magic) {
237 case 0: /* END */
238 extctx->end.gaddr = frame;
239 extctx->end.size = size;
240 extctx->size += size;
241 return true;
243 case FPU_CTX_MAGIC:
244 if (size < (sizeof(struct target_sctx_info) +
245 sizeof(struct target_fpu_context))) {
246 return false;
248 extctx->fpu.gaddr = frame;
249 extctx->fpu.size = size;
250 extctx->size += size;
251 break;
252 case LSX_CTX_MAGIC:
253 if (size < (sizeof(struct target_sctx_info) +
254 sizeof(struct target_lsx_context))) {
255 return false;
257 extctx->lsx.gaddr = frame;
258 extctx->lsx.size = size;
259 extctx->size += size;
260 break;
261 case LASX_CTX_MAGIC:
262 if (size < (sizeof(struct target_sctx_info) +
263 sizeof(struct target_lasx_context))) {
264 return false;
266 extctx->lasx.gaddr = frame;
267 extctx->lasx.size = size;
268 extctx->size += size;
269 break;
270 default:
271 return false;
274 frame += size;
278 static void restore_sigframe(CPULoongArchState *env,
279 struct target_sigcontext *sc,
280 struct extctx_layout *extctx)
282 int i;
283 abi_ulong fcc;
285 __get_user(env->pc, &sc->sc_pc);
286 for (i = 1; i < 32; ++i) {
287 __get_user(env->gpr[i], &sc->sc_regs[i]);
290 if (extctx->lasx.haddr) {
291 struct target_lasx_context *lasx_ctx =
292 extctx->lasx.haddr + sizeof(struct target_sctx_info);
294 for (i = 0; i < 32; ++i) {
295 __get_user(env->fpr[i].vreg.UD(0), &lasx_ctx->regs[4 * i]);
296 __get_user(env->fpr[i].vreg.UD(1), &lasx_ctx->regs[4 * i + 1]);
297 __get_user(env->fpr[i].vreg.UD(2), &lasx_ctx->regs[4 * i + 2]);
298 __get_user(env->fpr[i].vreg.UD(3), &lasx_ctx->regs[4 * i + 3]);
300 __get_user(fcc, &lasx_ctx->fcc);
301 write_fcc(env, fcc);
302 __get_user(env->fcsr0, &lasx_ctx->fcsr);
303 restore_fp_status(env);
304 } else if (extctx->lsx.haddr) {
305 struct target_lsx_context *lsx_ctx =
306 extctx->lsx.haddr + sizeof(struct target_sctx_info);
308 for (i = 0; i < 32; ++i) {
309 __get_user(env->fpr[i].vreg.UD(0), &lsx_ctx->regs[2 * i]);
310 __get_user(env->fpr[i].vreg.UD(1), &lsx_ctx->regs[2 * i + 1]);
312 __get_user(fcc, &lsx_ctx->fcc);
313 write_fcc(env, fcc);
314 __get_user(env->fcsr0, &lsx_ctx->fcsr);
315 restore_fp_status(env);
316 } else if (extctx->fpu.haddr) {
317 struct target_fpu_context *fpu_ctx =
318 extctx->fpu.haddr + sizeof(struct target_sctx_info);
320 for (i = 0; i < 32; ++i) {
321 __get_user(env->fpr[i].vreg.UD(0), &fpu_ctx->regs[i]);
323 __get_user(fcc, &fpu_ctx->fcc);
324 write_fcc(env, fcc);
325 __get_user(env->fcsr0, &fpu_ctx->fcsr);
326 restore_fp_status(env);
331 * Determine which stack to use.
333 static abi_ptr get_sigframe(struct target_sigaction *ka,
334 CPULoongArchState *env,
335 struct extctx_layout *extctx)
337 abi_ulong sp;
339 sp = target_sigsp(get_sp_from_cpustate(env), ka);
340 sp = ROUND_DOWN(sp, 16);
341 sp = setup_extcontext(env, extctx, sp);
342 sp -= sizeof(struct target_rt_sigframe);
344 assert(QEMU_IS_ALIGNED(sp, 16));
346 return sp;
349 void setup_rt_frame(int sig, struct target_sigaction *ka,
350 target_siginfo_t *info,
351 target_sigset_t *set, CPULoongArchState *env)
353 struct target_rt_sigframe *frame;
354 struct extctx_layout extctx;
355 abi_ptr frame_addr;
356 int i;
358 frame_addr = get_sigframe(ka, env, &extctx);
359 trace_user_setup_rt_frame(env, frame_addr);
361 frame = lock_user(VERIFY_WRITE, frame_addr,
362 sizeof(*frame) + extctx.size, 0);
363 if (!frame) {
364 force_sigsegv(sig);
365 return;
368 if (FIELD_EX64(env->CSR_EUEN, CSR_EUEN, ASXE)) {
369 extctx.lasx.haddr = (void *)frame + (extctx.lasx.gaddr - frame_addr);
370 extctx.end.haddr = (void *)frame + (extctx.end.gaddr - frame_addr);
371 } else if (FIELD_EX64(env->CSR_EUEN, CSR_EUEN, SXE)) {
372 extctx.lsx.haddr = (void *)frame + (extctx.lsx.gaddr - frame_addr);
373 extctx.end.haddr = (void *)frame + (extctx.end.gaddr - frame_addr);
374 } else {
375 extctx.fpu.haddr = (void *)frame + (extctx.fpu.gaddr - frame_addr);
376 extctx.end.haddr = (void *)frame + (extctx.end.gaddr - frame_addr);
379 tswap_siginfo(&frame->rs_info, info);
381 __put_user(0, &frame->rs_uc.tuc_flags);
382 __put_user(0, &frame->rs_uc.tuc_link);
383 target_save_altstack(&frame->rs_uc.tuc_stack, env);
385 setup_sigframe(env, &frame->rs_uc.tuc_mcontext, &extctx);
387 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
388 __put_user(set->sig[i], &frame->rs_uc.tuc_sigmask.sig[i]);
391 env->gpr[4] = sig;
392 env->gpr[5] = frame_addr + offsetof(struct target_rt_sigframe, rs_info);
393 env->gpr[6] = frame_addr + offsetof(struct target_rt_sigframe, rs_uc);
394 env->gpr[3] = frame_addr;
395 env->gpr[1] = default_rt_sigreturn;
397 env->pc = ka->_sa_handler;
398 unlock_user(frame, frame_addr, sizeof(*frame) + extctx.size);
401 long do_rt_sigreturn(CPULoongArchState *env)
403 struct target_rt_sigframe *frame;
404 struct extctx_layout extctx;
405 abi_ulong frame_addr;
406 sigset_t blocked;
408 frame_addr = env->gpr[3];
409 trace_user_do_rt_sigreturn(env, frame_addr);
411 if (!parse_extcontext(&extctx, frame_addr + sizeof(*frame))) {
412 goto badframe;
415 frame = lock_user(VERIFY_READ, frame_addr,
416 sizeof(*frame) + extctx.size, 1);
417 if (!frame) {
418 goto badframe;
421 if (extctx.lasx.gaddr) {
422 extctx.lasx.haddr = (void *)frame + (extctx.lasx.gaddr - frame_addr);
423 } else if (extctx.lsx.gaddr) {
424 extctx.lsx.haddr = (void *)frame + (extctx.lsx.gaddr - frame_addr);
425 } else if (extctx.fpu.gaddr) {
426 extctx.fpu.haddr = (void *)frame + (extctx.fpu.gaddr - frame_addr);
429 target_to_host_sigset(&blocked, &frame->rs_uc.tuc_sigmask);
430 set_sigmask(&blocked);
432 restore_sigframe(env, &frame->rs_uc.tuc_mcontext, &extctx);
434 target_restore_altstack(&frame->rs_uc.tuc_stack, env);
436 unlock_user(frame, frame_addr, 0);
437 return -QEMU_ESIGRETURN;
439 badframe:
440 force_sig(TARGET_SIGSEGV);
441 return -QEMU_ESIGRETURN;
444 void setup_sigtramp(abi_ulong sigtramp_page)
446 uint32_t *tramp = lock_user(VERIFY_WRITE, sigtramp_page, 8, 0);
447 assert(tramp != NULL);
449 __put_user(0x03822c0b, tramp + 0); /* ori a7, zero, 0x8b */
450 __put_user(0x002b0000, tramp + 1); /* syscall 0 */
452 default_rt_sigreturn = sigtramp_page;
453 unlock_user(tramp, sigtramp_page, 8);