specs/qcow2: Clarify that compressed clusters have the COPIED bit reset
[qemu.git] / linux-user / aarch64 / signal.c
blobf95dc61dfb9fce9c63e39e2c12baef0cb886bb11
1 /*
2 * Emulation of Linux signals
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qemu.h"
21 #include "target_signal.h"
22 #include "signal-common.h"
23 #include "linux-user/trace.h"
25 struct target_sigcontext {
26 uint64_t fault_address;
27 /* AArch64 registers */
28 uint64_t regs[31];
29 uint64_t sp;
30 uint64_t pc;
31 uint64_t pstate;
32 /* 4K reserved for FP/SIMD state and future expansion */
33 char __reserved[4096] __attribute__((__aligned__(16)));
36 struct target_ucontext {
37 abi_ulong tuc_flags;
38 abi_ulong tuc_link;
39 target_stack_t tuc_stack;
40 target_sigset_t tuc_sigmask;
41 /* glibc uses a 1024-bit sigset_t */
42 char __unused[1024 / 8 - sizeof(target_sigset_t)];
43 /* last for future expansion */
44 struct target_sigcontext tuc_mcontext;
48 * Header to be used at the beginning of structures extending the user
49 * context. Such structures must be placed after the rt_sigframe on the stack
50 * and be 16-byte aligned. The last structure must be a dummy one with the
51 * magic and size set to 0.
53 struct target_aarch64_ctx {
54 uint32_t magic;
55 uint32_t size;
58 #define TARGET_FPSIMD_MAGIC 0x46508001
60 struct target_fpsimd_context {
61 struct target_aarch64_ctx head;
62 uint32_t fpsr;
63 uint32_t fpcr;
64 uint64_t vregs[32 * 2]; /* really uint128_t vregs[32] */
67 #define TARGET_EXTRA_MAGIC 0x45585401
69 struct target_extra_context {
70 struct target_aarch64_ctx head;
71 uint64_t datap; /* 16-byte aligned pointer to extra space cast to __u64 */
72 uint32_t size; /* size in bytes of the extra space */
73 uint32_t reserved[3];
76 #define TARGET_SVE_MAGIC 0x53564501
78 struct target_sve_context {
79 struct target_aarch64_ctx head;
80 uint16_t vl;
81 uint16_t reserved[3];
82 /* The actual SVE data immediately follows. It is layed out
83 * according to TARGET_SVE_SIG_{Z,P}REG_OFFSET, based off of
84 * the original struct pointer.
88 #define TARGET_SVE_VQ_BYTES 16
90 #define TARGET_SVE_SIG_ZREG_SIZE(VQ) ((VQ) * TARGET_SVE_VQ_BYTES)
91 #define TARGET_SVE_SIG_PREG_SIZE(VQ) ((VQ) * (TARGET_SVE_VQ_BYTES / 8))
93 #define TARGET_SVE_SIG_REGS_OFFSET \
94 QEMU_ALIGN_UP(sizeof(struct target_sve_context), TARGET_SVE_VQ_BYTES)
95 #define TARGET_SVE_SIG_ZREG_OFFSET(VQ, N) \
96 (TARGET_SVE_SIG_REGS_OFFSET + TARGET_SVE_SIG_ZREG_SIZE(VQ) * (N))
97 #define TARGET_SVE_SIG_PREG_OFFSET(VQ, N) \
98 (TARGET_SVE_SIG_ZREG_OFFSET(VQ, 32) + TARGET_SVE_SIG_PREG_SIZE(VQ) * (N))
99 #define TARGET_SVE_SIG_FFR_OFFSET(VQ) \
100 (TARGET_SVE_SIG_PREG_OFFSET(VQ, 16))
101 #define TARGET_SVE_SIG_CONTEXT_SIZE(VQ) \
102 (TARGET_SVE_SIG_PREG_OFFSET(VQ, 17))
104 struct target_rt_sigframe {
105 struct target_siginfo info;
106 struct target_ucontext uc;
109 struct target_rt_frame_record {
110 uint64_t fp;
111 uint64_t lr;
112 uint32_t tramp[2];
115 static void target_setup_general_frame(struct target_rt_sigframe *sf,
116 CPUARMState *env, target_sigset_t *set)
118 int i;
120 __put_user(0, &sf->uc.tuc_flags);
121 __put_user(0, &sf->uc.tuc_link);
123 target_save_altstack(&sf->uc.tuc_stack, env);
125 for (i = 0; i < 31; i++) {
126 __put_user(env->xregs[i], &sf->uc.tuc_mcontext.regs[i]);
128 __put_user(env->xregs[31], &sf->uc.tuc_mcontext.sp);
129 __put_user(env->pc, &sf->uc.tuc_mcontext.pc);
130 __put_user(pstate_read(env), &sf->uc.tuc_mcontext.pstate);
132 __put_user(env->exception.vaddress, &sf->uc.tuc_mcontext.fault_address);
134 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
135 __put_user(set->sig[i], &sf->uc.tuc_sigmask.sig[i]);
139 static void target_setup_fpsimd_record(struct target_fpsimd_context *fpsimd,
140 CPUARMState *env)
142 int i;
144 __put_user(TARGET_FPSIMD_MAGIC, &fpsimd->head.magic);
145 __put_user(sizeof(struct target_fpsimd_context), &fpsimd->head.size);
146 __put_user(vfp_get_fpsr(env), &fpsimd->fpsr);
147 __put_user(vfp_get_fpcr(env), &fpsimd->fpcr);
149 for (i = 0; i < 32; i++) {
150 uint64_t *q = aa64_vfp_qreg(env, i);
151 #ifdef TARGET_WORDS_BIGENDIAN
152 __put_user(q[0], &fpsimd->vregs[i * 2 + 1]);
153 __put_user(q[1], &fpsimd->vregs[i * 2]);
154 #else
155 __put_user(q[0], &fpsimd->vregs[i * 2]);
156 __put_user(q[1], &fpsimd->vregs[i * 2 + 1]);
157 #endif
161 static void target_setup_extra_record(struct target_extra_context *extra,
162 uint64_t datap, uint32_t extra_size)
164 __put_user(TARGET_EXTRA_MAGIC, &extra->head.magic);
165 __put_user(sizeof(struct target_extra_context), &extra->head.size);
166 __put_user(datap, &extra->datap);
167 __put_user(extra_size, &extra->size);
170 static void target_setup_end_record(struct target_aarch64_ctx *end)
172 __put_user(0, &end->magic);
173 __put_user(0, &end->size);
176 static void target_setup_sve_record(struct target_sve_context *sve,
177 CPUARMState *env, int vq, int size)
179 int i, j;
181 __put_user(TARGET_SVE_MAGIC, &sve->head.magic);
182 __put_user(size, &sve->head.size);
183 __put_user(vq * TARGET_SVE_VQ_BYTES, &sve->vl);
185 /* Note that SVE regs are stored as a byte stream, with each byte element
186 * at a subsequent address. This corresponds to a little-endian store
187 * of our 64-bit hunks.
189 for (i = 0; i < 32; ++i) {
190 uint64_t *z = (void *)sve + TARGET_SVE_SIG_ZREG_OFFSET(vq, i);
191 for (j = 0; j < vq * 2; ++j) {
192 __put_user_e(env->vfp.zregs[i].d[j], z + j, le);
195 for (i = 0; i <= 16; ++i) {
196 uint16_t *p = (void *)sve + TARGET_SVE_SIG_PREG_OFFSET(vq, i);
197 for (j = 0; j < vq; ++j) {
198 uint64_t r = env->vfp.pregs[i].p[j >> 2];
199 __put_user_e(r >> ((j & 3) * 16), p + j, le);
204 static void target_restore_general_frame(CPUARMState *env,
205 struct target_rt_sigframe *sf)
207 sigset_t set;
208 uint64_t pstate;
209 int i;
211 target_to_host_sigset(&set, &sf->uc.tuc_sigmask);
212 set_sigmask(&set);
214 for (i = 0; i < 31; i++) {
215 __get_user(env->xregs[i], &sf->uc.tuc_mcontext.regs[i]);
218 __get_user(env->xregs[31], &sf->uc.tuc_mcontext.sp);
219 __get_user(env->pc, &sf->uc.tuc_mcontext.pc);
220 __get_user(pstate, &sf->uc.tuc_mcontext.pstate);
221 pstate_write(env, pstate);
224 static void target_restore_fpsimd_record(CPUARMState *env,
225 struct target_fpsimd_context *fpsimd)
227 uint32_t fpsr, fpcr;
228 int i;
230 __get_user(fpsr, &fpsimd->fpsr);
231 vfp_set_fpsr(env, fpsr);
232 __get_user(fpcr, &fpsimd->fpcr);
233 vfp_set_fpcr(env, fpcr);
235 for (i = 0; i < 32; i++) {
236 uint64_t *q = aa64_vfp_qreg(env, i);
237 #ifdef TARGET_WORDS_BIGENDIAN
238 __get_user(q[0], &fpsimd->vregs[i * 2 + 1]);
239 __get_user(q[1], &fpsimd->vregs[i * 2]);
240 #else
241 __get_user(q[0], &fpsimd->vregs[i * 2]);
242 __get_user(q[1], &fpsimd->vregs[i * 2 + 1]);
243 #endif
247 static void target_restore_sve_record(CPUARMState *env,
248 struct target_sve_context *sve, int vq)
250 int i, j;
252 /* Note that SVE regs are stored as a byte stream, with each byte element
253 * at a subsequent address. This corresponds to a little-endian load
254 * of our 64-bit hunks.
256 for (i = 0; i < 32; ++i) {
257 uint64_t *z = (void *)sve + TARGET_SVE_SIG_ZREG_OFFSET(vq, i);
258 for (j = 0; j < vq * 2; ++j) {
259 __get_user_e(env->vfp.zregs[i].d[j], z + j, le);
262 for (i = 0; i <= 16; ++i) {
263 uint16_t *p = (void *)sve + TARGET_SVE_SIG_PREG_OFFSET(vq, i);
264 for (j = 0; j < vq; ++j) {
265 uint16_t r;
266 __get_user_e(r, p + j, le);
267 if (j & 3) {
268 env->vfp.pregs[i].p[j >> 2] |= (uint64_t)r << ((j & 3) * 16);
269 } else {
270 env->vfp.pregs[i].p[j >> 2] = r;
276 static int target_restore_sigframe(CPUARMState *env,
277 struct target_rt_sigframe *sf)
279 struct target_aarch64_ctx *ctx, *extra = NULL;
280 struct target_fpsimd_context *fpsimd = NULL;
281 struct target_sve_context *sve = NULL;
282 uint64_t extra_datap = 0;
283 bool used_extra = false;
284 bool err = false;
285 int vq = 0, sve_size = 0;
287 target_restore_general_frame(env, sf);
289 ctx = (struct target_aarch64_ctx *)sf->uc.tuc_mcontext.__reserved;
290 while (ctx) {
291 uint32_t magic, size, extra_size;
293 __get_user(magic, &ctx->magic);
294 __get_user(size, &ctx->size);
295 switch (magic) {
296 case 0:
297 if (size != 0) {
298 err = true;
299 goto exit;
301 if (used_extra) {
302 ctx = NULL;
303 } else {
304 ctx = extra;
305 used_extra = true;
307 continue;
309 case TARGET_FPSIMD_MAGIC:
310 if (fpsimd || size != sizeof(struct target_fpsimd_context)) {
311 err = true;
312 goto exit;
314 fpsimd = (struct target_fpsimd_context *)ctx;
315 break;
317 case TARGET_SVE_MAGIC:
318 if (arm_feature(env, ARM_FEATURE_SVE)) {
319 vq = (env->vfp.zcr_el[1] & 0xf) + 1;
320 sve_size = QEMU_ALIGN_UP(TARGET_SVE_SIG_CONTEXT_SIZE(vq), 16);
321 if (!sve && size == sve_size) {
322 sve = (struct target_sve_context *)ctx;
323 break;
326 err = true;
327 goto exit;
329 case TARGET_EXTRA_MAGIC:
330 if (extra || size != sizeof(struct target_extra_context)) {
331 err = true;
332 goto exit;
334 __get_user(extra_datap,
335 &((struct target_extra_context *)ctx)->datap);
336 __get_user(extra_size,
337 &((struct target_extra_context *)ctx)->size);
338 extra = lock_user(VERIFY_READ, extra_datap, extra_size, 0);
339 break;
341 default:
342 /* Unknown record -- we certainly didn't generate it.
343 * Did we in fact get out of sync?
345 err = true;
346 goto exit;
348 ctx = (void *)ctx + size;
351 /* Require FPSIMD always. */
352 if (fpsimd) {
353 target_restore_fpsimd_record(env, fpsimd);
354 } else {
355 err = true;
358 /* SVE data, if present, overwrites FPSIMD data. */
359 if (sve) {
360 target_restore_sve_record(env, sve, vq);
363 exit:
364 unlock_user(extra, extra_datap, 0);
365 return err;
368 static abi_ulong get_sigframe(struct target_sigaction *ka,
369 CPUARMState *env, int size)
371 abi_ulong sp;
373 sp = target_sigsp(get_sp_from_cpustate(env), ka);
375 sp = (sp - size) & ~15;
377 return sp;
380 typedef struct {
381 int total_size;
382 int extra_base;
383 int extra_size;
384 int std_end_ofs;
385 int extra_ofs;
386 int extra_end_ofs;
387 } target_sigframe_layout;
389 static int alloc_sigframe_space(int this_size, target_sigframe_layout *l)
391 /* Make sure there will always be space for the end marker. */
392 const int std_size = sizeof(struct target_rt_sigframe)
393 - sizeof(struct target_aarch64_ctx);
394 int this_loc = l->total_size;
396 if (l->extra_base) {
397 /* Once we have begun an extra space, all allocations go there. */
398 l->extra_size += this_size;
399 } else if (this_size + this_loc > std_size) {
400 /* This allocation does not fit in the standard space. */
401 /* Allocate the extra record. */
402 l->extra_ofs = this_loc;
403 l->total_size += sizeof(struct target_extra_context);
405 /* Allocate the standard end record. */
406 l->std_end_ofs = l->total_size;
407 l->total_size += sizeof(struct target_aarch64_ctx);
409 /* Allocate the requested record. */
410 l->extra_base = this_loc = l->total_size;
411 l->extra_size = this_size;
413 l->total_size += this_size;
415 return this_loc;
418 static void target_setup_frame(int usig, struct target_sigaction *ka,
419 target_siginfo_t *info, target_sigset_t *set,
420 CPUARMState *env)
422 target_sigframe_layout layout = {
423 /* Begin with the size pointing to the reserved space. */
424 .total_size = offsetof(struct target_rt_sigframe,
425 uc.tuc_mcontext.__reserved),
427 int fpsimd_ofs, fr_ofs, sve_ofs = 0, vq = 0, sve_size = 0;
428 struct target_rt_sigframe *frame;
429 struct target_rt_frame_record *fr;
430 abi_ulong frame_addr, return_addr;
432 /* FPSIMD record is always in the standard space. */
433 fpsimd_ofs = alloc_sigframe_space(sizeof(struct target_fpsimd_context),
434 &layout);
436 /* SVE state needs saving only if it exists. */
437 if (arm_feature(env, ARM_FEATURE_SVE)) {
438 vq = (env->vfp.zcr_el[1] & 0xf) + 1;
439 sve_size = QEMU_ALIGN_UP(TARGET_SVE_SIG_CONTEXT_SIZE(vq), 16);
440 sve_ofs = alloc_sigframe_space(sve_size, &layout);
443 if (layout.extra_ofs) {
444 /* Reserve space for the extra end marker. The standard end marker
445 * will have been allocated when we allocated the extra record.
447 layout.extra_end_ofs
448 = alloc_sigframe_space(sizeof(struct target_aarch64_ctx), &layout);
449 } else {
450 /* Reserve space for the standard end marker.
451 * Do not use alloc_sigframe_space because we cheat
452 * std_size therein to reserve space for this.
454 layout.std_end_ofs = layout.total_size;
455 layout.total_size += sizeof(struct target_aarch64_ctx);
458 /* We must always provide at least the standard 4K reserved space,
459 * even if we don't use all of it (this is part of the ABI)
461 layout.total_size = MAX(layout.total_size,
462 sizeof(struct target_rt_sigframe));
464 /* Reserve space for the return code. On a real system this would
465 * be within the VDSO. So, despite the name this is not a "real"
466 * record within the frame.
468 fr_ofs = layout.total_size;
469 layout.total_size += sizeof(struct target_rt_frame_record);
471 frame_addr = get_sigframe(ka, env, layout.total_size);
472 trace_user_setup_frame(env, frame_addr);
473 frame = lock_user(VERIFY_WRITE, frame_addr, layout.total_size, 0);
474 if (!frame) {
475 goto give_sigsegv;
478 target_setup_general_frame(frame, env, set);
479 target_setup_fpsimd_record((void *)frame + fpsimd_ofs, env);
480 target_setup_end_record((void *)frame + layout.std_end_ofs);
481 if (layout.extra_ofs) {
482 target_setup_extra_record((void *)frame + layout.extra_ofs,
483 frame_addr + layout.extra_base,
484 layout.extra_size);
485 target_setup_end_record((void *)frame + layout.extra_end_ofs);
487 if (sve_ofs) {
488 target_setup_sve_record((void *)frame + sve_ofs, env, vq, sve_size);
491 /* Set up the stack frame for unwinding. */
492 fr = (void *)frame + fr_ofs;
493 __put_user(env->xregs[29], &fr->fp);
494 __put_user(env->xregs[30], &fr->lr);
496 if (ka->sa_flags & TARGET_SA_RESTORER) {
497 return_addr = ka->sa_restorer;
498 } else {
500 * mov x8,#__NR_rt_sigreturn; svc #0
501 * Since these are instructions they need to be put as little-endian
502 * regardless of target default or current CPU endianness.
504 __put_user_e(0xd2801168, &fr->tramp[0], le);
505 __put_user_e(0xd4000001, &fr->tramp[1], le);
506 return_addr = frame_addr + fr_ofs
507 + offsetof(struct target_rt_frame_record, tramp);
509 env->xregs[0] = usig;
510 env->xregs[31] = frame_addr;
511 env->xregs[29] = frame_addr + fr_ofs;
512 env->pc = ka->_sa_handler;
513 env->xregs[30] = return_addr;
514 if (info) {
515 tswap_siginfo(&frame->info, info);
516 env->xregs[1] = frame_addr + offsetof(struct target_rt_sigframe, info);
517 env->xregs[2] = frame_addr + offsetof(struct target_rt_sigframe, uc);
520 unlock_user(frame, frame_addr, layout.total_size);
521 return;
523 give_sigsegv:
524 unlock_user(frame, frame_addr, layout.total_size);
525 force_sigsegv(usig);
528 void setup_rt_frame(int sig, struct target_sigaction *ka,
529 target_siginfo_t *info, target_sigset_t *set,
530 CPUARMState *env)
532 target_setup_frame(sig, ka, info, set, env);
535 void setup_frame(int sig, struct target_sigaction *ka,
536 target_sigset_t *set, CPUARMState *env)
538 target_setup_frame(sig, ka, 0, set, env);
541 long do_rt_sigreturn(CPUARMState *env)
543 struct target_rt_sigframe *frame = NULL;
544 abi_ulong frame_addr = env->xregs[31];
546 trace_user_do_rt_sigreturn(env, frame_addr);
547 if (frame_addr & 15) {
548 goto badframe;
551 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
552 goto badframe;
555 if (target_restore_sigframe(env, frame)) {
556 goto badframe;
559 if (do_sigaltstack(frame_addr +
560 offsetof(struct target_rt_sigframe, uc.tuc_stack),
561 0, get_sp_from_cpustate(env)) == -EFAULT) {
562 goto badframe;
565 unlock_user_struct(frame, frame_addr, 0);
566 return -TARGET_QEMU_ESIGRETURN;
568 badframe:
569 unlock_user_struct(frame, frame_addr, 0);
570 force_sig(TARGET_SIGSEGV);
571 return -TARGET_QEMU_ESIGRETURN;
574 long do_sigreturn(CPUARMState *env)
576 return do_rt_sigreturn(env);