target-i386: Use gen_lea_v_seg in stack subroutines
[qemu/ar7.git] / target-arm / op_helper.c
bloba5ee65fe2f9b2f2dafa322f607d15ee6414fe520
1 /*
2 * ARM helper routines
4 * Copyright (c) 2005-2007 CodeSourcery, LLC
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "cpu.h"
21 #include "exec/helper-proto.h"
22 #include "internals.h"
23 #include "exec/cpu_ldst.h"
25 #define SIGNBIT (uint32_t)0x80000000
26 #define SIGNBIT64 ((uint64_t)1 << 63)
28 static void raise_exception(CPUARMState *env, uint32_t excp,
29 uint32_t syndrome, uint32_t target_el)
31 CPUState *cs = CPU(arm_env_get_cpu(env));
33 assert(!excp_is_internal(excp));
34 cs->exception_index = excp;
35 env->exception.syndrome = syndrome;
36 env->exception.target_el = target_el;
37 cpu_loop_exit(cs);
40 static int exception_target_el(CPUARMState *env)
42 int target_el = MAX(1, arm_current_el(env));
44 /* No such thing as secure EL1 if EL3 is aarch32, so update the target EL
45 * to EL3 in this case.
47 if (arm_is_secure(env) && !arm_el_is_aa64(env, 3) && target_el == 1) {
48 target_el = 3;
51 return target_el;
54 uint32_t HELPER(neon_tbl)(CPUARMState *env, uint32_t ireg, uint32_t def,
55 uint32_t rn, uint32_t maxindex)
57 uint32_t val;
58 uint32_t tmp;
59 int index;
60 int shift;
61 uint64_t *table;
62 table = (uint64_t *)&env->vfp.regs[rn];
63 val = 0;
64 for (shift = 0; shift < 32; shift += 8) {
65 index = (ireg >> shift) & 0xff;
66 if (index < maxindex) {
67 tmp = (table[index >> 3] >> ((index & 7) << 3)) & 0xff;
68 val |= tmp << shift;
69 } else {
70 val |= def & (0xff << shift);
73 return val;
76 #if !defined(CONFIG_USER_ONLY)
78 /* try to fill the TLB and return an exception if error. If retaddr is
79 * NULL, it means that the function was called in C code (i.e. not
80 * from generated code or from helper.c)
82 void tlb_fill(CPUState *cs, target_ulong addr, int is_write, int mmu_idx,
83 uintptr_t retaddr)
85 bool ret;
86 uint32_t fsr = 0;
87 ARMMMUFaultInfo fi = {};
89 ret = arm_tlb_fill(cs, addr, is_write, mmu_idx, &fsr, &fi);
90 if (unlikely(ret)) {
91 ARMCPU *cpu = ARM_CPU(cs);
92 CPUARMState *env = &cpu->env;
93 uint32_t syn, exc;
94 unsigned int target_el;
95 bool same_el;
97 if (retaddr) {
98 /* now we have a real cpu fault */
99 cpu_restore_state(cs, retaddr);
102 target_el = exception_target_el(env);
103 if (fi.stage2) {
104 target_el = 2;
105 env->cp15.hpfar_el2 = extract64(fi.s2addr, 12, 47) << 4;
107 same_el = arm_current_el(env) == target_el;
108 /* AArch64 syndrome does not have an LPAE bit */
109 syn = fsr & ~(1 << 9);
111 /* For insn and data aborts we assume there is no instruction syndrome
112 * information; this is always true for exceptions reported to EL1.
114 if (is_write == 2) {
115 syn = syn_insn_abort(same_el, 0, fi.s1ptw, syn);
116 exc = EXCP_PREFETCH_ABORT;
117 } else {
118 syn = syn_data_abort(same_el, 0, 0, fi.s1ptw, is_write == 1, syn);
119 if (is_write == 1 && arm_feature(env, ARM_FEATURE_V6)) {
120 fsr |= (1 << 11);
122 exc = EXCP_DATA_ABORT;
125 env->exception.vaddress = addr;
126 env->exception.fsr = fsr;
127 raise_exception(env, exc, syn, target_el);
131 /* Raise a data fault alignment exception for the specified virtual address */
132 void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr, int is_write,
133 int is_user, uintptr_t retaddr)
135 ARMCPU *cpu = ARM_CPU(cs);
136 CPUARMState *env = &cpu->env;
137 int target_el;
138 bool same_el;
140 if (retaddr) {
141 /* now we have a real cpu fault */
142 cpu_restore_state(cs, retaddr);
145 target_el = exception_target_el(env);
146 same_el = (arm_current_el(env) == target_el);
148 env->exception.vaddress = vaddr;
150 /* the DFSR for an alignment fault depends on whether we're using
151 * the LPAE long descriptor format, or the short descriptor format
153 if (arm_s1_regime_using_lpae_format(env, cpu_mmu_index(env, false))) {
154 env->exception.fsr = 0x21;
155 } else {
156 env->exception.fsr = 0x1;
159 if (is_write == 1 && arm_feature(env, ARM_FEATURE_V6)) {
160 env->exception.fsr |= (1 << 11);
163 raise_exception(env, EXCP_DATA_ABORT,
164 syn_data_abort(same_el, 0, 0, 0, is_write == 1, 0x21),
165 target_el);
168 #endif /* !defined(CONFIG_USER_ONLY) */
170 uint32_t HELPER(add_setq)(CPUARMState *env, uint32_t a, uint32_t b)
172 uint32_t res = a + b;
173 if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT))
174 env->QF = 1;
175 return res;
178 uint32_t HELPER(add_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
180 uint32_t res = a + b;
181 if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT)) {
182 env->QF = 1;
183 res = ~(((int32_t)a >> 31) ^ SIGNBIT);
185 return res;
188 uint32_t HELPER(sub_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
190 uint32_t res = a - b;
191 if (((res ^ a) & SIGNBIT) && ((a ^ b) & SIGNBIT)) {
192 env->QF = 1;
193 res = ~(((int32_t)a >> 31) ^ SIGNBIT);
195 return res;
198 uint32_t HELPER(double_saturate)(CPUARMState *env, int32_t val)
200 uint32_t res;
201 if (val >= 0x40000000) {
202 res = ~SIGNBIT;
203 env->QF = 1;
204 } else if (val <= (int32_t)0xc0000000) {
205 res = SIGNBIT;
206 env->QF = 1;
207 } else {
208 res = val << 1;
210 return res;
213 uint32_t HELPER(add_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
215 uint32_t res = a + b;
216 if (res < a) {
217 env->QF = 1;
218 res = ~0;
220 return res;
223 uint32_t HELPER(sub_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
225 uint32_t res = a - b;
226 if (res > a) {
227 env->QF = 1;
228 res = 0;
230 return res;
233 /* Signed saturation. */
234 static inline uint32_t do_ssat(CPUARMState *env, int32_t val, int shift)
236 int32_t top;
237 uint32_t mask;
239 top = val >> shift;
240 mask = (1u << shift) - 1;
241 if (top > 0) {
242 env->QF = 1;
243 return mask;
244 } else if (top < -1) {
245 env->QF = 1;
246 return ~mask;
248 return val;
251 /* Unsigned saturation. */
252 static inline uint32_t do_usat(CPUARMState *env, int32_t val, int shift)
254 uint32_t max;
256 max = (1u << shift) - 1;
257 if (val < 0) {
258 env->QF = 1;
259 return 0;
260 } else if (val > max) {
261 env->QF = 1;
262 return max;
264 return val;
267 /* Signed saturate. */
268 uint32_t HELPER(ssat)(CPUARMState *env, uint32_t x, uint32_t shift)
270 return do_ssat(env, x, shift);
273 /* Dual halfword signed saturate. */
274 uint32_t HELPER(ssat16)(CPUARMState *env, uint32_t x, uint32_t shift)
276 uint32_t res;
278 res = (uint16_t)do_ssat(env, (int16_t)x, shift);
279 res |= do_ssat(env, ((int32_t)x) >> 16, shift) << 16;
280 return res;
283 /* Unsigned saturate. */
284 uint32_t HELPER(usat)(CPUARMState *env, uint32_t x, uint32_t shift)
286 return do_usat(env, x, shift);
289 /* Dual halfword unsigned saturate. */
290 uint32_t HELPER(usat16)(CPUARMState *env, uint32_t x, uint32_t shift)
292 uint32_t res;
294 res = (uint16_t)do_usat(env, (int16_t)x, shift);
295 res |= do_usat(env, ((int32_t)x) >> 16, shift) << 16;
296 return res;
299 /* Function checks whether WFx (WFI/WFE) instructions are set up to be trapped.
300 * The function returns the target EL (1-3) if the instruction is to be trapped;
301 * otherwise it returns 0 indicating it is not trapped.
303 static inline int check_wfx_trap(CPUARMState *env, bool is_wfe)
305 int cur_el = arm_current_el(env);
306 uint64_t mask;
308 /* If we are currently in EL0 then we need to check if SCTLR is set up for
309 * WFx instructions being trapped to EL1. These trap bits don't exist in v7.
311 if (cur_el < 1 && arm_feature(env, ARM_FEATURE_V8)) {
312 int target_el;
314 mask = is_wfe ? SCTLR_nTWE : SCTLR_nTWI;
315 if (arm_is_secure_below_el3(env) && !arm_el_is_aa64(env, 3)) {
316 /* Secure EL0 and Secure PL1 is at EL3 */
317 target_el = 3;
318 } else {
319 target_el = 1;
322 if (!(env->cp15.sctlr_el[target_el] & mask)) {
323 return target_el;
327 /* We are not trapping to EL1; trap to EL2 if HCR_EL2 requires it
328 * No need for ARM_FEATURE check as if HCR_EL2 doesn't exist the
329 * bits will be zero indicating no trap.
331 if (cur_el < 2 && !arm_is_secure(env)) {
332 mask = (is_wfe) ? HCR_TWE : HCR_TWI;
333 if (env->cp15.hcr_el2 & mask) {
334 return 2;
338 /* We are not trapping to EL1 or EL2; trap to EL3 if SCR_EL3 requires it */
339 if (cur_el < 3) {
340 mask = (is_wfe) ? SCR_TWE : SCR_TWI;
341 if (env->cp15.scr_el3 & mask) {
342 return 3;
346 return 0;
349 void HELPER(wfi)(CPUARMState *env)
351 CPUState *cs = CPU(arm_env_get_cpu(env));
352 int target_el = check_wfx_trap(env, false);
354 if (cpu_has_work(cs)) {
355 /* Don't bother to go into our "low power state" if
356 * we would just wake up immediately.
358 return;
361 if (target_el) {
362 env->pc -= 4;
363 raise_exception(env, EXCP_UDEF, syn_wfx(1, 0xe, 0), target_el);
366 cs->exception_index = EXCP_HLT;
367 cs->halted = 1;
368 cpu_loop_exit(cs);
371 void HELPER(wfe)(CPUARMState *env)
373 /* This is a hint instruction that is semantically different
374 * from YIELD even though we currently implement it identically.
375 * Don't actually halt the CPU, just yield back to top
376 * level loop. This is not going into a "low power state"
377 * (ie halting until some event occurs), so we never take
378 * a configurable trap to a different exception level.
380 HELPER(yield)(env);
383 void HELPER(yield)(CPUARMState *env)
385 ARMCPU *cpu = arm_env_get_cpu(env);
386 CPUState *cs = CPU(cpu);
388 /* This is a non-trappable hint instruction that generally indicates
389 * that the guest is currently busy-looping. Yield control back to the
390 * top level loop so that a more deserving VCPU has a chance to run.
392 cs->exception_index = EXCP_YIELD;
393 cpu_loop_exit(cs);
396 /* Raise an internal-to-QEMU exception. This is limited to only
397 * those EXCP values which are special cases for QEMU to interrupt
398 * execution and not to be used for exceptions which are passed to
399 * the guest (those must all have syndrome information and thus should
400 * use exception_with_syndrome).
402 void HELPER(exception_internal)(CPUARMState *env, uint32_t excp)
404 CPUState *cs = CPU(arm_env_get_cpu(env));
406 assert(excp_is_internal(excp));
407 cs->exception_index = excp;
408 cpu_loop_exit(cs);
411 /* Raise an exception with the specified syndrome register value */
412 void HELPER(exception_with_syndrome)(CPUARMState *env, uint32_t excp,
413 uint32_t syndrome, uint32_t target_el)
415 raise_exception(env, excp, syndrome, target_el);
418 uint32_t HELPER(cpsr_read)(CPUARMState *env)
420 return cpsr_read(env) & ~(CPSR_EXEC | CPSR_RESERVED);
423 void HELPER(cpsr_write)(CPUARMState *env, uint32_t val, uint32_t mask)
425 cpsr_write(env, val, mask);
428 /* Access to user mode registers from privileged modes. */
429 uint32_t HELPER(get_user_reg)(CPUARMState *env, uint32_t regno)
431 uint32_t val;
433 if (regno == 13) {
434 val = env->banked_r13[BANK_USRSYS];
435 } else if (regno == 14) {
436 val = env->banked_r14[BANK_USRSYS];
437 } else if (regno >= 8
438 && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
439 val = env->usr_regs[regno - 8];
440 } else {
441 val = env->regs[regno];
443 return val;
446 void HELPER(set_user_reg)(CPUARMState *env, uint32_t regno, uint32_t val)
448 if (regno == 13) {
449 env->banked_r13[BANK_USRSYS] = val;
450 } else if (regno == 14) {
451 env->banked_r14[BANK_USRSYS] = val;
452 } else if (regno >= 8
453 && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
454 env->usr_regs[regno - 8] = val;
455 } else {
456 env->regs[regno] = val;
460 void HELPER(access_check_cp_reg)(CPUARMState *env, void *rip, uint32_t syndrome)
462 const ARMCPRegInfo *ri = rip;
463 int target_el;
465 if (arm_feature(env, ARM_FEATURE_XSCALE) && ri->cp < 14
466 && extract32(env->cp15.c15_cpar, ri->cp, 1) == 0) {
467 raise_exception(env, EXCP_UDEF, syndrome, exception_target_el(env));
470 if (!ri->accessfn) {
471 return;
474 switch (ri->accessfn(env, ri)) {
475 case CP_ACCESS_OK:
476 return;
477 case CP_ACCESS_TRAP:
478 target_el = exception_target_el(env);
479 break;
480 case CP_ACCESS_TRAP_EL2:
481 /* Requesting a trap to EL2 when we're in EL3 or S-EL0/1 is
482 * a bug in the access function.
484 assert(!arm_is_secure(env) && arm_current_el(env) != 3);
485 target_el = 2;
486 break;
487 case CP_ACCESS_TRAP_EL3:
488 target_el = 3;
489 break;
490 case CP_ACCESS_TRAP_UNCATEGORIZED:
491 target_el = exception_target_el(env);
492 syndrome = syn_uncategorized();
493 break;
494 case CP_ACCESS_TRAP_UNCATEGORIZED_EL2:
495 target_el = 2;
496 syndrome = syn_uncategorized();
497 break;
498 case CP_ACCESS_TRAP_UNCATEGORIZED_EL3:
499 target_el = 3;
500 syndrome = syn_uncategorized();
501 break;
502 default:
503 g_assert_not_reached();
506 raise_exception(env, EXCP_UDEF, syndrome, target_el);
509 void HELPER(set_cp_reg)(CPUARMState *env, void *rip, uint32_t value)
511 const ARMCPRegInfo *ri = rip;
513 ri->writefn(env, ri, value);
516 uint32_t HELPER(get_cp_reg)(CPUARMState *env, void *rip)
518 const ARMCPRegInfo *ri = rip;
520 return ri->readfn(env, ri);
523 void HELPER(set_cp_reg64)(CPUARMState *env, void *rip, uint64_t value)
525 const ARMCPRegInfo *ri = rip;
527 ri->writefn(env, ri, value);
530 uint64_t HELPER(get_cp_reg64)(CPUARMState *env, void *rip)
532 const ARMCPRegInfo *ri = rip;
534 return ri->readfn(env, ri);
537 void HELPER(msr_i_pstate)(CPUARMState *env, uint32_t op, uint32_t imm)
539 /* MSR_i to update PSTATE. This is OK from EL0 only if UMA is set.
540 * Note that SPSel is never OK from EL0; we rely on handle_msr_i()
541 * to catch that case at translate time.
543 if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UMA)) {
544 uint32_t syndrome = syn_aa64_sysregtrap(0, extract32(op, 0, 3),
545 extract32(op, 3, 3), 4,
546 imm, 0x1f, 0);
547 raise_exception(env, EXCP_UDEF, syndrome, exception_target_el(env));
550 switch (op) {
551 case 0x05: /* SPSel */
552 update_spsel(env, imm);
553 break;
554 case 0x1e: /* DAIFSet */
555 env->daif |= (imm << 6) & PSTATE_DAIF;
556 break;
557 case 0x1f: /* DAIFClear */
558 env->daif &= ~((imm << 6) & PSTATE_DAIF);
559 break;
560 default:
561 g_assert_not_reached();
565 void HELPER(clear_pstate_ss)(CPUARMState *env)
567 env->pstate &= ~PSTATE_SS;
570 void HELPER(pre_hvc)(CPUARMState *env)
572 ARMCPU *cpu = arm_env_get_cpu(env);
573 int cur_el = arm_current_el(env);
574 /* FIXME: Use actual secure state. */
575 bool secure = false;
576 bool undef;
578 if (arm_is_psci_call(cpu, EXCP_HVC)) {
579 /* If PSCI is enabled and this looks like a valid PSCI call then
580 * that overrides the architecturally mandated HVC behaviour.
582 return;
585 if (!arm_feature(env, ARM_FEATURE_EL2)) {
586 /* If EL2 doesn't exist, HVC always UNDEFs */
587 undef = true;
588 } else if (arm_feature(env, ARM_FEATURE_EL3)) {
589 /* EL3.HCE has priority over EL2.HCD. */
590 undef = !(env->cp15.scr_el3 & SCR_HCE);
591 } else {
592 undef = env->cp15.hcr_el2 & HCR_HCD;
595 /* In ARMv7 and ARMv8/AArch32, HVC is undef in secure state.
596 * For ARMv8/AArch64, HVC is allowed in EL3.
597 * Note that we've already trapped HVC from EL0 at translation
598 * time.
600 if (secure && (!is_a64(env) || cur_el == 1)) {
601 undef = true;
604 if (undef) {
605 raise_exception(env, EXCP_UDEF, syn_uncategorized(),
606 exception_target_el(env));
610 void HELPER(pre_smc)(CPUARMState *env, uint32_t syndrome)
612 ARMCPU *cpu = arm_env_get_cpu(env);
613 int cur_el = arm_current_el(env);
614 bool secure = arm_is_secure(env);
615 bool smd = env->cp15.scr_el3 & SCR_SMD;
616 /* On ARMv8 AArch32, SMD only applies to NS state.
617 * On ARMv7 SMD only applies to NS state and only if EL2 is available.
618 * For ARMv7 non EL2, we force SMD to zero so we don't need to re-check
619 * the EL2 condition here.
621 bool undef = is_a64(env) ? smd : (!secure && smd);
623 if (arm_is_psci_call(cpu, EXCP_SMC)) {
624 /* If PSCI is enabled and this looks like a valid PSCI call then
625 * that overrides the architecturally mandated SMC behaviour.
627 return;
630 if (!arm_feature(env, ARM_FEATURE_EL3)) {
631 /* If we have no EL3 then SMC always UNDEFs */
632 undef = true;
633 } else if (!secure && cur_el == 1 && (env->cp15.hcr_el2 & HCR_TSC)) {
634 /* In NS EL1, HCR controlled routing to EL2 has priority over SMD. */
635 raise_exception(env, EXCP_HYP_TRAP, syndrome, 2);
638 if (undef) {
639 raise_exception(env, EXCP_UDEF, syn_uncategorized(),
640 exception_target_el(env));
644 static int el_from_spsr(uint32_t spsr)
646 /* Return the exception level that this SPSR is requesting a return to,
647 * or -1 if it is invalid (an illegal return)
649 if (spsr & PSTATE_nRW) {
650 switch (spsr & CPSR_M) {
651 case ARM_CPU_MODE_USR:
652 return 0;
653 case ARM_CPU_MODE_HYP:
654 return 2;
655 case ARM_CPU_MODE_FIQ:
656 case ARM_CPU_MODE_IRQ:
657 case ARM_CPU_MODE_SVC:
658 case ARM_CPU_MODE_ABT:
659 case ARM_CPU_MODE_UND:
660 case ARM_CPU_MODE_SYS:
661 return 1;
662 case ARM_CPU_MODE_MON:
663 /* Returning to Mon from AArch64 is never possible,
664 * so this is an illegal return.
666 default:
667 return -1;
669 } else {
670 if (extract32(spsr, 1, 1)) {
671 /* Return with reserved M[1] bit set */
672 return -1;
674 if (extract32(spsr, 0, 4) == 1) {
675 /* return to EL0 with M[0] bit set */
676 return -1;
678 return extract32(spsr, 2, 2);
682 void HELPER(exception_return)(CPUARMState *env)
684 int cur_el = arm_current_el(env);
685 unsigned int spsr_idx = aarch64_banked_spsr_index(cur_el);
686 uint32_t spsr = env->banked_spsr[spsr_idx];
687 int new_el;
688 bool return_to_aa64 = (spsr & PSTATE_nRW) == 0;
690 aarch64_save_sp(env, cur_el);
692 env->exclusive_addr = -1;
694 /* We must squash the PSTATE.SS bit to zero unless both of the
695 * following hold:
696 * 1. debug exceptions are currently disabled
697 * 2. singlestep will be active in the EL we return to
698 * We check 1 here and 2 after we've done the pstate/cpsr write() to
699 * transition to the EL we're going to.
701 if (arm_generate_debug_exceptions(env)) {
702 spsr &= ~PSTATE_SS;
705 new_el = el_from_spsr(spsr);
706 if (new_el == -1) {
707 goto illegal_return;
709 if (new_el > cur_el
710 || (new_el == 2 && !arm_feature(env, ARM_FEATURE_EL2))) {
711 /* Disallow return to an EL which is unimplemented or higher
712 * than the current one.
714 goto illegal_return;
717 if (new_el != 0 && arm_el_is_aa64(env, new_el) != return_to_aa64) {
718 /* Return to an EL which is configured for a different register width */
719 goto illegal_return;
722 if (new_el == 2 && arm_is_secure_below_el3(env)) {
723 /* Return to the non-existent secure-EL2 */
724 goto illegal_return;
727 if (new_el == 1 && (env->cp15.hcr_el2 & HCR_TGE)
728 && !arm_is_secure_below_el3(env)) {
729 goto illegal_return;
732 if (!return_to_aa64) {
733 env->aarch64 = 0;
734 env->uncached_cpsr = spsr & CPSR_M;
735 cpsr_write(env, spsr, ~0);
736 if (!arm_singlestep_active(env)) {
737 env->uncached_cpsr &= ~PSTATE_SS;
739 aarch64_sync_64_to_32(env);
741 if (spsr & CPSR_T) {
742 env->regs[15] = env->elr_el[cur_el] & ~0x1;
743 } else {
744 env->regs[15] = env->elr_el[cur_el] & ~0x3;
746 } else {
747 env->aarch64 = 1;
748 pstate_write(env, spsr);
749 if (!arm_singlestep_active(env)) {
750 env->pstate &= ~PSTATE_SS;
752 aarch64_restore_sp(env, new_el);
753 env->pc = env->elr_el[cur_el];
756 return;
758 illegal_return:
759 /* Illegal return events of various kinds have architecturally
760 * mandated behaviour:
761 * restore NZCV and DAIF from SPSR_ELx
762 * set PSTATE.IL
763 * restore PC from ELR_ELx
764 * no change to exception level, execution state or stack pointer
766 env->pstate |= PSTATE_IL;
767 env->pc = env->elr_el[cur_el];
768 spsr &= PSTATE_NZCV | PSTATE_DAIF;
769 spsr |= pstate_read(env) & ~(PSTATE_NZCV | PSTATE_DAIF);
770 pstate_write(env, spsr);
771 if (!arm_singlestep_active(env)) {
772 env->pstate &= ~PSTATE_SS;
776 /* Return true if the linked breakpoint entry lbn passes its checks */
777 static bool linked_bp_matches(ARMCPU *cpu, int lbn)
779 CPUARMState *env = &cpu->env;
780 uint64_t bcr = env->cp15.dbgbcr[lbn];
781 int brps = extract32(cpu->dbgdidr, 24, 4);
782 int ctx_cmps = extract32(cpu->dbgdidr, 20, 4);
783 int bt;
784 uint32_t contextidr;
786 /* Links to unimplemented or non-context aware breakpoints are
787 * CONSTRAINED UNPREDICTABLE: either behave as if disabled, or
788 * as if linked to an UNKNOWN context-aware breakpoint (in which
789 * case DBGWCR<n>_EL1.LBN must indicate that breakpoint).
790 * We choose the former.
792 if (lbn > brps || lbn < (brps - ctx_cmps)) {
793 return false;
796 bcr = env->cp15.dbgbcr[lbn];
798 if (extract64(bcr, 0, 1) == 0) {
799 /* Linked breakpoint disabled : generate no events */
800 return false;
803 bt = extract64(bcr, 20, 4);
805 /* We match the whole register even if this is AArch32 using the
806 * short descriptor format (in which case it holds both PROCID and ASID),
807 * since we don't implement the optional v7 context ID masking.
809 contextidr = extract64(env->cp15.contextidr_el[1], 0, 32);
811 switch (bt) {
812 case 3: /* linked context ID match */
813 if (arm_current_el(env) > 1) {
814 /* Context matches never fire in EL2 or (AArch64) EL3 */
815 return false;
817 return (contextidr == extract64(env->cp15.dbgbvr[lbn], 0, 32));
818 case 5: /* linked address mismatch (reserved in AArch64) */
819 case 9: /* linked VMID match (reserved if no EL2) */
820 case 11: /* linked context ID and VMID match (reserved if no EL2) */
821 default:
822 /* Links to Unlinked context breakpoints must generate no
823 * events; we choose to do the same for reserved values too.
825 return false;
828 return false;
831 static bool bp_wp_matches(ARMCPU *cpu, int n, bool is_wp)
833 CPUARMState *env = &cpu->env;
834 uint64_t cr;
835 int pac, hmc, ssc, wt, lbn;
836 /* Note that for watchpoints the check is against the CPU security
837 * state, not the S/NS attribute on the offending data access.
839 bool is_secure = arm_is_secure(env);
840 int access_el = arm_current_el(env);
842 if (is_wp) {
843 CPUWatchpoint *wp = env->cpu_watchpoint[n];
845 if (!wp || !(wp->flags & BP_WATCHPOINT_HIT)) {
846 return false;
848 cr = env->cp15.dbgwcr[n];
849 if (wp->hitattrs.user) {
850 /* The LDRT/STRT/LDT/STT "unprivileged access" instructions should
851 * match watchpoints as if they were accesses done at EL0, even if
852 * the CPU is at EL1 or higher.
854 access_el = 0;
856 } else {
857 uint64_t pc = is_a64(env) ? env->pc : env->regs[15];
859 if (!env->cpu_breakpoint[n] || env->cpu_breakpoint[n]->pc != pc) {
860 return false;
862 cr = env->cp15.dbgbcr[n];
864 /* The WATCHPOINT_HIT flag guarantees us that the watchpoint is
865 * enabled and that the address and access type match; for breakpoints
866 * we know the address matched; check the remaining fields, including
867 * linked breakpoints. We rely on WCR and BCR having the same layout
868 * for the LBN, SSC, HMC, PAC/PMC and is-linked fields.
869 * Note that some combinations of {PAC, HMC, SSC} are reserved and
870 * must act either like some valid combination or as if the watchpoint
871 * were disabled. We choose the former, and use this together with
872 * the fact that EL3 must always be Secure and EL2 must always be
873 * Non-Secure to simplify the code slightly compared to the full
874 * table in the ARM ARM.
876 pac = extract64(cr, 1, 2);
877 hmc = extract64(cr, 13, 1);
878 ssc = extract64(cr, 14, 2);
880 switch (ssc) {
881 case 0:
882 break;
883 case 1:
884 case 3:
885 if (is_secure) {
886 return false;
888 break;
889 case 2:
890 if (!is_secure) {
891 return false;
893 break;
896 switch (access_el) {
897 case 3:
898 case 2:
899 if (!hmc) {
900 return false;
902 break;
903 case 1:
904 if (extract32(pac, 0, 1) == 0) {
905 return false;
907 break;
908 case 0:
909 if (extract32(pac, 1, 1) == 0) {
910 return false;
912 break;
913 default:
914 g_assert_not_reached();
917 wt = extract64(cr, 20, 1);
918 lbn = extract64(cr, 16, 4);
920 if (wt && !linked_bp_matches(cpu, lbn)) {
921 return false;
924 return true;
927 static bool check_watchpoints(ARMCPU *cpu)
929 CPUARMState *env = &cpu->env;
930 int n;
932 /* If watchpoints are disabled globally or we can't take debug
933 * exceptions here then watchpoint firings are ignored.
935 if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
936 || !arm_generate_debug_exceptions(env)) {
937 return false;
940 for (n = 0; n < ARRAY_SIZE(env->cpu_watchpoint); n++) {
941 if (bp_wp_matches(cpu, n, true)) {
942 return true;
945 return false;
948 static bool check_breakpoints(ARMCPU *cpu)
950 CPUARMState *env = &cpu->env;
951 int n;
953 /* If breakpoints are disabled globally or we can't take debug
954 * exceptions here then breakpoint firings are ignored.
956 if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
957 || !arm_generate_debug_exceptions(env)) {
958 return false;
961 for (n = 0; n < ARRAY_SIZE(env->cpu_breakpoint); n++) {
962 if (bp_wp_matches(cpu, n, false)) {
963 return true;
966 return false;
969 void HELPER(check_breakpoints)(CPUARMState *env)
971 ARMCPU *cpu = arm_env_get_cpu(env);
973 if (check_breakpoints(cpu)) {
974 HELPER(exception_internal(env, EXCP_DEBUG));
978 void arm_debug_excp_handler(CPUState *cs)
980 /* Called by core code when a watchpoint or breakpoint fires;
981 * need to check which one and raise the appropriate exception.
983 ARMCPU *cpu = ARM_CPU(cs);
984 CPUARMState *env = &cpu->env;
985 CPUWatchpoint *wp_hit = cs->watchpoint_hit;
987 if (wp_hit) {
988 if (wp_hit->flags & BP_CPU) {
989 cs->watchpoint_hit = NULL;
990 if (check_watchpoints(cpu)) {
991 bool wnr = (wp_hit->flags & BP_WATCHPOINT_HIT_WRITE) != 0;
992 bool same_el = arm_debug_target_el(env) == arm_current_el(env);
994 if (extended_addresses_enabled(env)) {
995 env->exception.fsr = (1 << 9) | 0x22;
996 } else {
997 env->exception.fsr = 0x2;
999 env->exception.vaddress = wp_hit->hitaddr;
1000 raise_exception(env, EXCP_DATA_ABORT,
1001 syn_watchpoint(same_el, 0, wnr),
1002 arm_debug_target_el(env));
1003 } else {
1004 cpu_resume_from_signal(cs, NULL);
1007 } else {
1008 uint64_t pc = is_a64(env) ? env->pc : env->regs[15];
1009 bool same_el = (arm_debug_target_el(env) == arm_current_el(env));
1011 /* (1) GDB breakpoints should be handled first.
1012 * (2) Do not raise a CPU exception if no CPU breakpoint has fired,
1013 * since singlestep is also done by generating a debug internal
1014 * exception.
1016 if (cpu_breakpoint_test(cs, pc, BP_GDB)
1017 || !cpu_breakpoint_test(cs, pc, BP_CPU)) {
1018 return;
1021 if (extended_addresses_enabled(env)) {
1022 env->exception.fsr = (1 << 9) | 0x22;
1023 } else {
1024 env->exception.fsr = 0x2;
1026 /* FAR is UNKNOWN, so doesn't need setting */
1027 raise_exception(env, EXCP_PREFETCH_ABORT,
1028 syn_breakpoint(same_el),
1029 arm_debug_target_el(env));
1033 /* ??? Flag setting arithmetic is awkward because we need to do comparisons.
1034 The only way to do that in TCG is a conditional branch, which clobbers
1035 all our temporaries. For now implement these as helper functions. */
1037 /* Similarly for variable shift instructions. */
1039 uint32_t HELPER(shl_cc)(CPUARMState *env, uint32_t x, uint32_t i)
1041 int shift = i & 0xff;
1042 if (shift >= 32) {
1043 if (shift == 32)
1044 env->CF = x & 1;
1045 else
1046 env->CF = 0;
1047 return 0;
1048 } else if (shift != 0) {
1049 env->CF = (x >> (32 - shift)) & 1;
1050 return x << shift;
1052 return x;
1055 uint32_t HELPER(shr_cc)(CPUARMState *env, uint32_t x, uint32_t i)
1057 int shift = i & 0xff;
1058 if (shift >= 32) {
1059 if (shift == 32)
1060 env->CF = (x >> 31) & 1;
1061 else
1062 env->CF = 0;
1063 return 0;
1064 } else if (shift != 0) {
1065 env->CF = (x >> (shift - 1)) & 1;
1066 return x >> shift;
1068 return x;
1071 uint32_t HELPER(sar_cc)(CPUARMState *env, uint32_t x, uint32_t i)
1073 int shift = i & 0xff;
1074 if (shift >= 32) {
1075 env->CF = (x >> 31) & 1;
1076 return (int32_t)x >> 31;
1077 } else if (shift != 0) {
1078 env->CF = (x >> (shift - 1)) & 1;
1079 return (int32_t)x >> shift;
1081 return x;
1084 uint32_t HELPER(ror_cc)(CPUARMState *env, uint32_t x, uint32_t i)
1086 int shift1, shift;
1087 shift1 = i & 0xff;
1088 shift = shift1 & 0x1f;
1089 if (shift == 0) {
1090 if (shift1 != 0)
1091 env->CF = (x >> 31) & 1;
1092 return x;
1093 } else {
1094 env->CF = (x >> (shift - 1)) & 1;
1095 return ((uint32_t)x >> shift) | (x << (32 - shift));