target/arm: Convert VNMUL to decodetree
[qemu/ar7.git] / target / arm / op_helper.c
blob4db254876dd5066cc1dab6c80b833b74645d736b
1 /*
2 * ARM helper routines
4 * Copyright (c) 2005-2007 CodeSourcery, LLC
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qemu/log.h"
21 #include "qemu/main-loop.h"
22 #include "cpu.h"
23 #include "exec/helper-proto.h"
24 #include "internals.h"
25 #include "exec/exec-all.h"
26 #include "exec/cpu_ldst.h"
28 #define SIGNBIT (uint32_t)0x80000000
29 #define SIGNBIT64 ((uint64_t)1 << 63)
31 static CPUState *do_raise_exception(CPUARMState *env, uint32_t excp,
32 uint32_t syndrome, uint32_t target_el)
34 CPUState *cs = env_cpu(env);
36 if (target_el == 1 && (arm_hcr_el2_eff(env) & HCR_TGE)) {
38 * Redirect NS EL1 exceptions to NS EL2. These are reported with
39 * their original syndrome register value, with the exception of
40 * SIMD/FP access traps, which are reported as uncategorized
41 * (see DDI0478C.a D1.10.4)
43 target_el = 2;
44 if (syn_get_ec(syndrome) == EC_ADVSIMDFPACCESSTRAP) {
45 syndrome = syn_uncategorized();
49 assert(!excp_is_internal(excp));
50 cs->exception_index = excp;
51 env->exception.syndrome = syndrome;
52 env->exception.target_el = target_el;
54 return cs;
57 void raise_exception(CPUARMState *env, uint32_t excp,
58 uint32_t syndrome, uint32_t target_el)
60 CPUState *cs = do_raise_exception(env, excp, syndrome, target_el);
61 cpu_loop_exit(cs);
64 void raise_exception_ra(CPUARMState *env, uint32_t excp, uint32_t syndrome,
65 uint32_t target_el, uintptr_t ra)
67 CPUState *cs = do_raise_exception(env, excp, syndrome, target_el);
68 cpu_loop_exit_restore(cs, ra);
71 uint32_t HELPER(neon_tbl)(uint32_t ireg, uint32_t def, void *vn,
72 uint32_t maxindex)
74 uint32_t val, shift;
75 uint64_t *table = vn;
77 val = 0;
78 for (shift = 0; shift < 32; shift += 8) {
79 uint32_t index = (ireg >> shift) & 0xff;
80 if (index < maxindex) {
81 uint32_t tmp = (table[index >> 3] >> ((index & 7) << 3)) & 0xff;
82 val |= tmp << shift;
83 } else {
84 val |= def & (0xff << shift);
87 return val;
90 #if !defined(CONFIG_USER_ONLY)
92 static inline uint32_t merge_syn_data_abort(uint32_t template_syn,
93 unsigned int target_el,
94 bool same_el, bool ea,
95 bool s1ptw, bool is_write,
96 int fsc)
98 uint32_t syn;
100 /* ISV is only set for data aborts routed to EL2 and
101 * never for stage-1 page table walks faulting on stage 2.
103 * Furthermore, ISV is only set for certain kinds of load/stores.
104 * If the template syndrome does not have ISV set, we should leave
105 * it cleared.
107 * See ARMv8 specs, D7-1974:
108 * ISS encoding for an exception from a Data Abort, the
109 * ISV field.
111 if (!(template_syn & ARM_EL_ISV) || target_el != 2 || s1ptw) {
112 syn = syn_data_abort_no_iss(same_el,
113 ea, 0, s1ptw, is_write, fsc);
114 } else {
115 /* Fields: IL, ISV, SAS, SSE, SRT, SF and AR come from the template
116 * syndrome created at translation time.
117 * Now we create the runtime syndrome with the remaining fields.
119 syn = syn_data_abort_with_iss(same_el,
120 0, 0, 0, 0, 0,
121 ea, 0, s1ptw, is_write, fsc,
122 false);
123 /* Merge the runtime syndrome with the template syndrome. */
124 syn |= template_syn;
126 return syn;
129 void arm_deliver_fault(ARMCPU *cpu, vaddr addr, MMUAccessType access_type,
130 int mmu_idx, ARMMMUFaultInfo *fi)
132 CPUARMState *env = &cpu->env;
133 int target_el;
134 bool same_el;
135 uint32_t syn, exc, fsr, fsc;
136 ARMMMUIdx arm_mmu_idx = core_to_arm_mmu_idx(env, mmu_idx);
138 target_el = exception_target_el(env);
139 if (fi->stage2) {
140 target_el = 2;
141 env->cp15.hpfar_el2 = extract64(fi->s2addr, 12, 47) << 4;
143 same_el = (arm_current_el(env) == target_el);
145 if (target_el == 2 || arm_el_is_aa64(env, target_el) ||
146 arm_s1_regime_using_lpae_format(env, arm_mmu_idx)) {
147 /* LPAE format fault status register : bottom 6 bits are
148 * status code in the same form as needed for syndrome
150 fsr = arm_fi_to_lfsc(fi);
151 fsc = extract32(fsr, 0, 6);
152 } else {
153 fsr = arm_fi_to_sfsc(fi);
154 /* Short format FSR : this fault will never actually be reported
155 * to an EL that uses a syndrome register. Use a (currently)
156 * reserved FSR code in case the constructed syndrome does leak
157 * into the guest somehow.
159 fsc = 0x3f;
162 if (access_type == MMU_INST_FETCH) {
163 syn = syn_insn_abort(same_el, fi->ea, fi->s1ptw, fsc);
164 exc = EXCP_PREFETCH_ABORT;
165 } else {
166 syn = merge_syn_data_abort(env->exception.syndrome, target_el,
167 same_el, fi->ea, fi->s1ptw,
168 access_type == MMU_DATA_STORE,
169 fsc);
170 if (access_type == MMU_DATA_STORE
171 && arm_feature(env, ARM_FEATURE_V6)) {
172 fsr |= (1 << 11);
174 exc = EXCP_DATA_ABORT;
177 env->exception.vaddress = addr;
178 env->exception.fsr = fsr;
179 raise_exception(env, exc, syn, target_el);
182 /* Raise a data fault alignment exception for the specified virtual address */
183 void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
184 MMUAccessType access_type,
185 int mmu_idx, uintptr_t retaddr)
187 ARMCPU *cpu = ARM_CPU(cs);
188 ARMMMUFaultInfo fi = {};
190 /* now we have a real cpu fault */
191 cpu_restore_state(cs, retaddr, true);
193 fi.type = ARMFault_Alignment;
194 arm_deliver_fault(cpu, vaddr, access_type, mmu_idx, &fi);
197 /* arm_cpu_do_transaction_failed: handle a memory system error response
198 * (eg "no device/memory present at address") by raising an external abort
199 * exception
201 void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
202 vaddr addr, unsigned size,
203 MMUAccessType access_type,
204 int mmu_idx, MemTxAttrs attrs,
205 MemTxResult response, uintptr_t retaddr)
207 ARMCPU *cpu = ARM_CPU(cs);
208 ARMMMUFaultInfo fi = {};
210 /* now we have a real cpu fault */
211 cpu_restore_state(cs, retaddr, true);
213 fi.ea = arm_extabort_type(response);
214 fi.type = ARMFault_SyncExternal;
215 arm_deliver_fault(cpu, addr, access_type, mmu_idx, &fi);
218 #endif /* !defined(CONFIG_USER_ONLY) */
220 void HELPER(v8m_stackcheck)(CPUARMState *env, uint32_t newvalue)
223 * Perform the v8M stack limit check for SP updates from translated code,
224 * raising an exception if the limit is breached.
226 if (newvalue < v7m_sp_limit(env)) {
227 CPUState *cs = env_cpu(env);
230 * Stack limit exceptions are a rare case, so rather than syncing
231 * PC/condbits before the call, we use cpu_restore_state() to
232 * get them right before raising the exception.
234 cpu_restore_state(cs, GETPC(), true);
235 raise_exception(env, EXCP_STKOF, 0, 1);
239 uint32_t HELPER(add_setq)(CPUARMState *env, uint32_t a, uint32_t b)
241 uint32_t res = a + b;
242 if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT))
243 env->QF = 1;
244 return res;
247 uint32_t HELPER(add_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
249 uint32_t res = a + b;
250 if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT)) {
251 env->QF = 1;
252 res = ~(((int32_t)a >> 31) ^ SIGNBIT);
254 return res;
257 uint32_t HELPER(sub_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
259 uint32_t res = a - b;
260 if (((res ^ a) & SIGNBIT) && ((a ^ b) & SIGNBIT)) {
261 env->QF = 1;
262 res = ~(((int32_t)a >> 31) ^ SIGNBIT);
264 return res;
267 uint32_t HELPER(double_saturate)(CPUARMState *env, int32_t val)
269 uint32_t res;
270 if (val >= 0x40000000) {
271 res = ~SIGNBIT;
272 env->QF = 1;
273 } else if (val <= (int32_t)0xc0000000) {
274 res = SIGNBIT;
275 env->QF = 1;
276 } else {
277 res = val << 1;
279 return res;
282 uint32_t HELPER(add_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
284 uint32_t res = a + b;
285 if (res < a) {
286 env->QF = 1;
287 res = ~0;
289 return res;
292 uint32_t HELPER(sub_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
294 uint32_t res = a - b;
295 if (res > a) {
296 env->QF = 1;
297 res = 0;
299 return res;
302 /* Signed saturation. */
303 static inline uint32_t do_ssat(CPUARMState *env, int32_t val, int shift)
305 int32_t top;
306 uint32_t mask;
308 top = val >> shift;
309 mask = (1u << shift) - 1;
310 if (top > 0) {
311 env->QF = 1;
312 return mask;
313 } else if (top < -1) {
314 env->QF = 1;
315 return ~mask;
317 return val;
320 /* Unsigned saturation. */
321 static inline uint32_t do_usat(CPUARMState *env, int32_t val, int shift)
323 uint32_t max;
325 max = (1u << shift) - 1;
326 if (val < 0) {
327 env->QF = 1;
328 return 0;
329 } else if (val > max) {
330 env->QF = 1;
331 return max;
333 return val;
336 /* Signed saturate. */
337 uint32_t HELPER(ssat)(CPUARMState *env, uint32_t x, uint32_t shift)
339 return do_ssat(env, x, shift);
342 /* Dual halfword signed saturate. */
343 uint32_t HELPER(ssat16)(CPUARMState *env, uint32_t x, uint32_t shift)
345 uint32_t res;
347 res = (uint16_t)do_ssat(env, (int16_t)x, shift);
348 res |= do_ssat(env, ((int32_t)x) >> 16, shift) << 16;
349 return res;
352 /* Unsigned saturate. */
353 uint32_t HELPER(usat)(CPUARMState *env, uint32_t x, uint32_t shift)
355 return do_usat(env, x, shift);
358 /* Dual halfword unsigned saturate. */
359 uint32_t HELPER(usat16)(CPUARMState *env, uint32_t x, uint32_t shift)
361 uint32_t res;
363 res = (uint16_t)do_usat(env, (int16_t)x, shift);
364 res |= do_usat(env, ((int32_t)x) >> 16, shift) << 16;
365 return res;
368 void HELPER(setend)(CPUARMState *env)
370 env->uncached_cpsr ^= CPSR_E;
373 /* Function checks whether WFx (WFI/WFE) instructions are set up to be trapped.
374 * The function returns the target EL (1-3) if the instruction is to be trapped;
375 * otherwise it returns 0 indicating it is not trapped.
377 static inline int check_wfx_trap(CPUARMState *env, bool is_wfe)
379 int cur_el = arm_current_el(env);
380 uint64_t mask;
382 if (arm_feature(env, ARM_FEATURE_M)) {
383 /* M profile cores can never trap WFI/WFE. */
384 return 0;
387 /* If we are currently in EL0 then we need to check if SCTLR is set up for
388 * WFx instructions being trapped to EL1. These trap bits don't exist in v7.
390 if (cur_el < 1 && arm_feature(env, ARM_FEATURE_V8)) {
391 int target_el;
393 mask = is_wfe ? SCTLR_nTWE : SCTLR_nTWI;
394 if (arm_is_secure_below_el3(env) && !arm_el_is_aa64(env, 3)) {
395 /* Secure EL0 and Secure PL1 is at EL3 */
396 target_el = 3;
397 } else {
398 target_el = 1;
401 if (!(env->cp15.sctlr_el[target_el] & mask)) {
402 return target_el;
406 /* We are not trapping to EL1; trap to EL2 if HCR_EL2 requires it
407 * No need for ARM_FEATURE check as if HCR_EL2 doesn't exist the
408 * bits will be zero indicating no trap.
410 if (cur_el < 2) {
411 mask = is_wfe ? HCR_TWE : HCR_TWI;
412 if (arm_hcr_el2_eff(env) & mask) {
413 return 2;
417 /* We are not trapping to EL1 or EL2; trap to EL3 if SCR_EL3 requires it */
418 if (cur_el < 3) {
419 mask = (is_wfe) ? SCR_TWE : SCR_TWI;
420 if (env->cp15.scr_el3 & mask) {
421 return 3;
425 return 0;
428 void HELPER(wfi)(CPUARMState *env, uint32_t insn_len)
430 CPUState *cs = env_cpu(env);
431 int target_el = check_wfx_trap(env, false);
433 if (cpu_has_work(cs)) {
434 /* Don't bother to go into our "low power state" if
435 * we would just wake up immediately.
437 return;
440 if (target_el) {
441 env->pc -= insn_len;
442 raise_exception(env, EXCP_UDEF, syn_wfx(1, 0xe, 0, insn_len == 2),
443 target_el);
446 cs->exception_index = EXCP_HLT;
447 cs->halted = 1;
448 cpu_loop_exit(cs);
451 void HELPER(wfe)(CPUARMState *env)
453 /* This is a hint instruction that is semantically different
454 * from YIELD even though we currently implement it identically.
455 * Don't actually halt the CPU, just yield back to top
456 * level loop. This is not going into a "low power state"
457 * (ie halting until some event occurs), so we never take
458 * a configurable trap to a different exception level.
460 HELPER(yield)(env);
463 void HELPER(yield)(CPUARMState *env)
465 CPUState *cs = env_cpu(env);
467 /* This is a non-trappable hint instruction that generally indicates
468 * that the guest is currently busy-looping. Yield control back to the
469 * top level loop so that a more deserving VCPU has a chance to run.
471 cs->exception_index = EXCP_YIELD;
472 cpu_loop_exit(cs);
475 /* Raise an internal-to-QEMU exception. This is limited to only
476 * those EXCP values which are special cases for QEMU to interrupt
477 * execution and not to be used for exceptions which are passed to
478 * the guest (those must all have syndrome information and thus should
479 * use exception_with_syndrome).
481 void HELPER(exception_internal)(CPUARMState *env, uint32_t excp)
483 CPUState *cs = env_cpu(env);
485 assert(excp_is_internal(excp));
486 cs->exception_index = excp;
487 cpu_loop_exit(cs);
490 /* Raise an exception with the specified syndrome register value */
491 void HELPER(exception_with_syndrome)(CPUARMState *env, uint32_t excp,
492 uint32_t syndrome, uint32_t target_el)
494 raise_exception(env, excp, syndrome, target_el);
497 /* Raise an EXCP_BKPT with the specified syndrome register value,
498 * targeting the correct exception level for debug exceptions.
500 void HELPER(exception_bkpt_insn)(CPUARMState *env, uint32_t syndrome)
502 /* FSR will only be used if the debug target EL is AArch32. */
503 env->exception.fsr = arm_debug_exception_fsr(env);
504 /* FAR is UNKNOWN: clear vaddress to avoid potentially exposing
505 * values to the guest that it shouldn't be able to see at its
506 * exception/security level.
508 env->exception.vaddress = 0;
509 raise_exception(env, EXCP_BKPT, syndrome, arm_debug_target_el(env));
512 uint32_t HELPER(cpsr_read)(CPUARMState *env)
514 return cpsr_read(env) & ~(CPSR_EXEC | CPSR_RESERVED);
517 void HELPER(cpsr_write)(CPUARMState *env, uint32_t val, uint32_t mask)
519 cpsr_write(env, val, mask, CPSRWriteByInstr);
522 /* Write the CPSR for a 32-bit exception return */
523 void HELPER(cpsr_write_eret)(CPUARMState *env, uint32_t val)
525 qemu_mutex_lock_iothread();
526 arm_call_pre_el_change_hook(env_archcpu(env));
527 qemu_mutex_unlock_iothread();
529 cpsr_write(env, val, CPSR_ERET_MASK, CPSRWriteExceptionReturn);
531 /* Generated code has already stored the new PC value, but
532 * without masking out its low bits, because which bits need
533 * masking depends on whether we're returning to Thumb or ARM
534 * state. Do the masking now.
536 env->regs[15] &= (env->thumb ? ~1 : ~3);
538 qemu_mutex_lock_iothread();
539 arm_call_el_change_hook(env_archcpu(env));
540 qemu_mutex_unlock_iothread();
543 /* Access to user mode registers from privileged modes. */
544 uint32_t HELPER(get_user_reg)(CPUARMState *env, uint32_t regno)
546 uint32_t val;
548 if (regno == 13) {
549 val = env->banked_r13[BANK_USRSYS];
550 } else if (regno == 14) {
551 val = env->banked_r14[BANK_USRSYS];
552 } else if (regno >= 8
553 && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
554 val = env->usr_regs[regno - 8];
555 } else {
556 val = env->regs[regno];
558 return val;
561 void HELPER(set_user_reg)(CPUARMState *env, uint32_t regno, uint32_t val)
563 if (regno == 13) {
564 env->banked_r13[BANK_USRSYS] = val;
565 } else if (regno == 14) {
566 env->banked_r14[BANK_USRSYS] = val;
567 } else if (regno >= 8
568 && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
569 env->usr_regs[regno - 8] = val;
570 } else {
571 env->regs[regno] = val;
575 void HELPER(set_r13_banked)(CPUARMState *env, uint32_t mode, uint32_t val)
577 if ((env->uncached_cpsr & CPSR_M) == mode) {
578 env->regs[13] = val;
579 } else {
580 env->banked_r13[bank_number(mode)] = val;
584 uint32_t HELPER(get_r13_banked)(CPUARMState *env, uint32_t mode)
586 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_SYS) {
587 /* SRS instruction is UNPREDICTABLE from System mode; we UNDEF.
588 * Other UNPREDICTABLE and UNDEF cases were caught at translate time.
590 raise_exception(env, EXCP_UDEF, syn_uncategorized(),
591 exception_target_el(env));
594 if ((env->uncached_cpsr & CPSR_M) == mode) {
595 return env->regs[13];
596 } else {
597 return env->banked_r13[bank_number(mode)];
601 static void msr_mrs_banked_exc_checks(CPUARMState *env, uint32_t tgtmode,
602 uint32_t regno)
604 /* Raise an exception if the requested access is one of the UNPREDICTABLE
605 * cases; otherwise return. This broadly corresponds to the pseudocode
606 * BankedRegisterAccessValid() and SPSRAccessValid(),
607 * except that we have already handled some cases at translate time.
609 int curmode = env->uncached_cpsr & CPSR_M;
611 if (regno == 17) {
612 /* ELR_Hyp: a special case because access from tgtmode is OK */
613 if (curmode != ARM_CPU_MODE_HYP && curmode != ARM_CPU_MODE_MON) {
614 goto undef;
616 return;
619 if (curmode == tgtmode) {
620 goto undef;
623 if (tgtmode == ARM_CPU_MODE_USR) {
624 switch (regno) {
625 case 8 ... 12:
626 if (curmode != ARM_CPU_MODE_FIQ) {
627 goto undef;
629 break;
630 case 13:
631 if (curmode == ARM_CPU_MODE_SYS) {
632 goto undef;
634 break;
635 case 14:
636 if (curmode == ARM_CPU_MODE_HYP || curmode == ARM_CPU_MODE_SYS) {
637 goto undef;
639 break;
640 default:
641 break;
645 if (tgtmode == ARM_CPU_MODE_HYP) {
646 /* SPSR_Hyp, r13_hyp: accessible from Monitor mode only */
647 if (curmode != ARM_CPU_MODE_MON) {
648 goto undef;
652 return;
654 undef:
655 raise_exception(env, EXCP_UDEF, syn_uncategorized(),
656 exception_target_el(env));
659 void HELPER(msr_banked)(CPUARMState *env, uint32_t value, uint32_t tgtmode,
660 uint32_t regno)
662 msr_mrs_banked_exc_checks(env, tgtmode, regno);
664 switch (regno) {
665 case 16: /* SPSRs */
666 env->banked_spsr[bank_number(tgtmode)] = value;
667 break;
668 case 17: /* ELR_Hyp */
669 env->elr_el[2] = value;
670 break;
671 case 13:
672 env->banked_r13[bank_number(tgtmode)] = value;
673 break;
674 case 14:
675 env->banked_r14[r14_bank_number(tgtmode)] = value;
676 break;
677 case 8 ... 12:
678 switch (tgtmode) {
679 case ARM_CPU_MODE_USR:
680 env->usr_regs[regno - 8] = value;
681 break;
682 case ARM_CPU_MODE_FIQ:
683 env->fiq_regs[regno - 8] = value;
684 break;
685 default:
686 g_assert_not_reached();
688 break;
689 default:
690 g_assert_not_reached();
694 uint32_t HELPER(mrs_banked)(CPUARMState *env, uint32_t tgtmode, uint32_t regno)
696 msr_mrs_banked_exc_checks(env, tgtmode, regno);
698 switch (regno) {
699 case 16: /* SPSRs */
700 return env->banked_spsr[bank_number(tgtmode)];
701 case 17: /* ELR_Hyp */
702 return env->elr_el[2];
703 case 13:
704 return env->banked_r13[bank_number(tgtmode)];
705 case 14:
706 return env->banked_r14[r14_bank_number(tgtmode)];
707 case 8 ... 12:
708 switch (tgtmode) {
709 case ARM_CPU_MODE_USR:
710 return env->usr_regs[regno - 8];
711 case ARM_CPU_MODE_FIQ:
712 return env->fiq_regs[regno - 8];
713 default:
714 g_assert_not_reached();
716 default:
717 g_assert_not_reached();
721 void HELPER(access_check_cp_reg)(CPUARMState *env, void *rip, uint32_t syndrome,
722 uint32_t isread)
724 const ARMCPRegInfo *ri = rip;
725 int target_el;
727 if (arm_feature(env, ARM_FEATURE_XSCALE) && ri->cp < 14
728 && extract32(env->cp15.c15_cpar, ri->cp, 1) == 0) {
729 raise_exception(env, EXCP_UDEF, syndrome, exception_target_el(env));
732 if (!ri->accessfn) {
733 return;
736 switch (ri->accessfn(env, ri, isread)) {
737 case CP_ACCESS_OK:
738 return;
739 case CP_ACCESS_TRAP:
740 target_el = exception_target_el(env);
741 break;
742 case CP_ACCESS_TRAP_EL2:
743 /* Requesting a trap to EL2 when we're in EL3 or S-EL0/1 is
744 * a bug in the access function.
746 assert(!arm_is_secure(env) && arm_current_el(env) != 3);
747 target_el = 2;
748 break;
749 case CP_ACCESS_TRAP_EL3:
750 target_el = 3;
751 break;
752 case CP_ACCESS_TRAP_UNCATEGORIZED:
753 target_el = exception_target_el(env);
754 syndrome = syn_uncategorized();
755 break;
756 case CP_ACCESS_TRAP_UNCATEGORIZED_EL2:
757 target_el = 2;
758 syndrome = syn_uncategorized();
759 break;
760 case CP_ACCESS_TRAP_UNCATEGORIZED_EL3:
761 target_el = 3;
762 syndrome = syn_uncategorized();
763 break;
764 case CP_ACCESS_TRAP_FP_EL2:
765 target_el = 2;
766 /* Since we are an implementation that takes exceptions on a trapped
767 * conditional insn only if the insn has passed its condition code
768 * check, we take the IMPDEF choice to always report CV=1 COND=0xe
769 * (which is also the required value for AArch64 traps).
771 syndrome = syn_fp_access_trap(1, 0xe, false);
772 break;
773 case CP_ACCESS_TRAP_FP_EL3:
774 target_el = 3;
775 syndrome = syn_fp_access_trap(1, 0xe, false);
776 break;
777 default:
778 g_assert_not_reached();
781 raise_exception(env, EXCP_UDEF, syndrome, target_el);
784 void HELPER(set_cp_reg)(CPUARMState *env, void *rip, uint32_t value)
786 const ARMCPRegInfo *ri = rip;
788 if (ri->type & ARM_CP_IO) {
789 qemu_mutex_lock_iothread();
790 ri->writefn(env, ri, value);
791 qemu_mutex_unlock_iothread();
792 } else {
793 ri->writefn(env, ri, value);
797 uint32_t HELPER(get_cp_reg)(CPUARMState *env, void *rip)
799 const ARMCPRegInfo *ri = rip;
800 uint32_t res;
802 if (ri->type & ARM_CP_IO) {
803 qemu_mutex_lock_iothread();
804 res = ri->readfn(env, ri);
805 qemu_mutex_unlock_iothread();
806 } else {
807 res = ri->readfn(env, ri);
810 return res;
813 void HELPER(set_cp_reg64)(CPUARMState *env, void *rip, uint64_t value)
815 const ARMCPRegInfo *ri = rip;
817 if (ri->type & ARM_CP_IO) {
818 qemu_mutex_lock_iothread();
819 ri->writefn(env, ri, value);
820 qemu_mutex_unlock_iothread();
821 } else {
822 ri->writefn(env, ri, value);
826 uint64_t HELPER(get_cp_reg64)(CPUARMState *env, void *rip)
828 const ARMCPRegInfo *ri = rip;
829 uint64_t res;
831 if (ri->type & ARM_CP_IO) {
832 qemu_mutex_lock_iothread();
833 res = ri->readfn(env, ri);
834 qemu_mutex_unlock_iothread();
835 } else {
836 res = ri->readfn(env, ri);
839 return res;
842 void HELPER(pre_hvc)(CPUARMState *env)
844 ARMCPU *cpu = env_archcpu(env);
845 int cur_el = arm_current_el(env);
846 /* FIXME: Use actual secure state. */
847 bool secure = false;
848 bool undef;
850 if (arm_is_psci_call(cpu, EXCP_HVC)) {
851 /* If PSCI is enabled and this looks like a valid PSCI call then
852 * that overrides the architecturally mandated HVC behaviour.
854 return;
857 if (!arm_feature(env, ARM_FEATURE_EL2)) {
858 /* If EL2 doesn't exist, HVC always UNDEFs */
859 undef = true;
860 } else if (arm_feature(env, ARM_FEATURE_EL3)) {
861 /* EL3.HCE has priority over EL2.HCD. */
862 undef = !(env->cp15.scr_el3 & SCR_HCE);
863 } else {
864 undef = env->cp15.hcr_el2 & HCR_HCD;
867 /* In ARMv7 and ARMv8/AArch32, HVC is undef in secure state.
868 * For ARMv8/AArch64, HVC is allowed in EL3.
869 * Note that we've already trapped HVC from EL0 at translation
870 * time.
872 if (secure && (!is_a64(env) || cur_el == 1)) {
873 undef = true;
876 if (undef) {
877 raise_exception(env, EXCP_UDEF, syn_uncategorized(),
878 exception_target_el(env));
882 void HELPER(pre_smc)(CPUARMState *env, uint32_t syndrome)
884 ARMCPU *cpu = env_archcpu(env);
885 int cur_el = arm_current_el(env);
886 bool secure = arm_is_secure(env);
887 bool smd_flag = env->cp15.scr_el3 & SCR_SMD;
890 * SMC behaviour is summarized in the following table.
891 * This helper handles the "Trap to EL2" and "Undef insn" cases.
892 * The "Trap to EL3" and "PSCI call" cases are handled in the exception
893 * helper.
895 * -> ARM_FEATURE_EL3 and !SMD
896 * HCR_TSC && NS EL1 !HCR_TSC || !NS EL1
898 * Conduit SMC, valid call Trap to EL2 PSCI Call
899 * Conduit SMC, inval call Trap to EL2 Trap to EL3
900 * Conduit not SMC Trap to EL2 Trap to EL3
903 * -> ARM_FEATURE_EL3 and SMD
904 * HCR_TSC && NS EL1 !HCR_TSC || !NS EL1
906 * Conduit SMC, valid call Trap to EL2 PSCI Call
907 * Conduit SMC, inval call Trap to EL2 Undef insn
908 * Conduit not SMC Trap to EL2 Undef insn
911 * -> !ARM_FEATURE_EL3
912 * HCR_TSC && NS EL1 !HCR_TSC || !NS EL1
914 * Conduit SMC, valid call Trap to EL2 PSCI Call
915 * Conduit SMC, inval call Trap to EL2 Undef insn
916 * Conduit not SMC Undef insn Undef insn
919 /* On ARMv8 with EL3 AArch64, SMD applies to both S and NS state.
920 * On ARMv8 with EL3 AArch32, or ARMv7 with the Virtualization
921 * extensions, SMD only applies to NS state.
922 * On ARMv7 without the Virtualization extensions, the SMD bit
923 * doesn't exist, but we forbid the guest to set it to 1 in scr_write(),
924 * so we need not special case this here.
926 bool smd = arm_feature(env, ARM_FEATURE_AARCH64) ? smd_flag
927 : smd_flag && !secure;
929 if (!arm_feature(env, ARM_FEATURE_EL3) &&
930 cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) {
931 /* If we have no EL3 then SMC always UNDEFs and can't be
932 * trapped to EL2. PSCI-via-SMC is a sort of ersatz EL3
933 * firmware within QEMU, and we want an EL2 guest to be able
934 * to forbid its EL1 from making PSCI calls into QEMU's
935 * "firmware" via HCR.TSC, so for these purposes treat
936 * PSCI-via-SMC as implying an EL3.
937 * This handles the very last line of the previous table.
939 raise_exception(env, EXCP_UDEF, syn_uncategorized(),
940 exception_target_el(env));
943 if (cur_el == 1 && (arm_hcr_el2_eff(env) & HCR_TSC)) {
944 /* In NS EL1, HCR controlled routing to EL2 has priority over SMD.
945 * We also want an EL2 guest to be able to forbid its EL1 from
946 * making PSCI calls into QEMU's "firmware" via HCR.TSC.
947 * This handles all the "Trap to EL2" cases of the previous table.
949 raise_exception(env, EXCP_HYP_TRAP, syndrome, 2);
952 /* Catch the two remaining "Undef insn" cases of the previous table:
953 * - PSCI conduit is SMC but we don't have a valid PCSI call,
954 * - We don't have EL3 or SMD is set.
956 if (!arm_is_psci_call(cpu, EXCP_SMC) &&
957 (smd || !arm_feature(env, ARM_FEATURE_EL3))) {
958 raise_exception(env, EXCP_UDEF, syn_uncategorized(),
959 exception_target_el(env));
963 /* Return true if the linked breakpoint entry lbn passes its checks */
964 static bool linked_bp_matches(ARMCPU *cpu, int lbn)
966 CPUARMState *env = &cpu->env;
967 uint64_t bcr = env->cp15.dbgbcr[lbn];
968 int brps = extract32(cpu->dbgdidr, 24, 4);
969 int ctx_cmps = extract32(cpu->dbgdidr, 20, 4);
970 int bt;
971 uint32_t contextidr;
973 /* Links to unimplemented or non-context aware breakpoints are
974 * CONSTRAINED UNPREDICTABLE: either behave as if disabled, or
975 * as if linked to an UNKNOWN context-aware breakpoint (in which
976 * case DBGWCR<n>_EL1.LBN must indicate that breakpoint).
977 * We choose the former.
979 if (lbn > brps || lbn < (brps - ctx_cmps)) {
980 return false;
983 bcr = env->cp15.dbgbcr[lbn];
985 if (extract64(bcr, 0, 1) == 0) {
986 /* Linked breakpoint disabled : generate no events */
987 return false;
990 bt = extract64(bcr, 20, 4);
992 /* We match the whole register even if this is AArch32 using the
993 * short descriptor format (in which case it holds both PROCID and ASID),
994 * since we don't implement the optional v7 context ID masking.
996 contextidr = extract64(env->cp15.contextidr_el[1], 0, 32);
998 switch (bt) {
999 case 3: /* linked context ID match */
1000 if (arm_current_el(env) > 1) {
1001 /* Context matches never fire in EL2 or (AArch64) EL3 */
1002 return false;
1004 return (contextidr == extract64(env->cp15.dbgbvr[lbn], 0, 32));
1005 case 5: /* linked address mismatch (reserved in AArch64) */
1006 case 9: /* linked VMID match (reserved if no EL2) */
1007 case 11: /* linked context ID and VMID match (reserved if no EL2) */
1008 default:
1009 /* Links to Unlinked context breakpoints must generate no
1010 * events; we choose to do the same for reserved values too.
1012 return false;
1015 return false;
1018 static bool bp_wp_matches(ARMCPU *cpu, int n, bool is_wp)
1020 CPUARMState *env = &cpu->env;
1021 uint64_t cr;
1022 int pac, hmc, ssc, wt, lbn;
1023 /* Note that for watchpoints the check is against the CPU security
1024 * state, not the S/NS attribute on the offending data access.
1026 bool is_secure = arm_is_secure(env);
1027 int access_el = arm_current_el(env);
1029 if (is_wp) {
1030 CPUWatchpoint *wp = env->cpu_watchpoint[n];
1032 if (!wp || !(wp->flags & BP_WATCHPOINT_HIT)) {
1033 return false;
1035 cr = env->cp15.dbgwcr[n];
1036 if (wp->hitattrs.user) {
1037 /* The LDRT/STRT/LDT/STT "unprivileged access" instructions should
1038 * match watchpoints as if they were accesses done at EL0, even if
1039 * the CPU is at EL1 or higher.
1041 access_el = 0;
1043 } else {
1044 uint64_t pc = is_a64(env) ? env->pc : env->regs[15];
1046 if (!env->cpu_breakpoint[n] || env->cpu_breakpoint[n]->pc != pc) {
1047 return false;
1049 cr = env->cp15.dbgbcr[n];
1051 /* The WATCHPOINT_HIT flag guarantees us that the watchpoint is
1052 * enabled and that the address and access type match; for breakpoints
1053 * we know the address matched; check the remaining fields, including
1054 * linked breakpoints. We rely on WCR and BCR having the same layout
1055 * for the LBN, SSC, HMC, PAC/PMC and is-linked fields.
1056 * Note that some combinations of {PAC, HMC, SSC} are reserved and
1057 * must act either like some valid combination or as if the watchpoint
1058 * were disabled. We choose the former, and use this together with
1059 * the fact that EL3 must always be Secure and EL2 must always be
1060 * Non-Secure to simplify the code slightly compared to the full
1061 * table in the ARM ARM.
1063 pac = extract64(cr, 1, 2);
1064 hmc = extract64(cr, 13, 1);
1065 ssc = extract64(cr, 14, 2);
1067 switch (ssc) {
1068 case 0:
1069 break;
1070 case 1:
1071 case 3:
1072 if (is_secure) {
1073 return false;
1075 break;
1076 case 2:
1077 if (!is_secure) {
1078 return false;
1080 break;
1083 switch (access_el) {
1084 case 3:
1085 case 2:
1086 if (!hmc) {
1087 return false;
1089 break;
1090 case 1:
1091 if (extract32(pac, 0, 1) == 0) {
1092 return false;
1094 break;
1095 case 0:
1096 if (extract32(pac, 1, 1) == 0) {
1097 return false;
1099 break;
1100 default:
1101 g_assert_not_reached();
1104 wt = extract64(cr, 20, 1);
1105 lbn = extract64(cr, 16, 4);
1107 if (wt && !linked_bp_matches(cpu, lbn)) {
1108 return false;
1111 return true;
1114 static bool check_watchpoints(ARMCPU *cpu)
1116 CPUARMState *env = &cpu->env;
1117 int n;
1119 /* If watchpoints are disabled globally or we can't take debug
1120 * exceptions here then watchpoint firings are ignored.
1122 if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
1123 || !arm_generate_debug_exceptions(env)) {
1124 return false;
1127 for (n = 0; n < ARRAY_SIZE(env->cpu_watchpoint); n++) {
1128 if (bp_wp_matches(cpu, n, true)) {
1129 return true;
1132 return false;
1135 static bool check_breakpoints(ARMCPU *cpu)
1137 CPUARMState *env = &cpu->env;
1138 int n;
1140 /* If breakpoints are disabled globally or we can't take debug
1141 * exceptions here then breakpoint firings are ignored.
1143 if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
1144 || !arm_generate_debug_exceptions(env)) {
1145 return false;
1148 for (n = 0; n < ARRAY_SIZE(env->cpu_breakpoint); n++) {
1149 if (bp_wp_matches(cpu, n, false)) {
1150 return true;
1153 return false;
1156 void HELPER(check_breakpoints)(CPUARMState *env)
1158 ARMCPU *cpu = env_archcpu(env);
1160 if (check_breakpoints(cpu)) {
1161 HELPER(exception_internal(env, EXCP_DEBUG));
1165 bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp)
1167 /* Called by core code when a CPU watchpoint fires; need to check if this
1168 * is also an architectural watchpoint match.
1170 ARMCPU *cpu = ARM_CPU(cs);
1172 return check_watchpoints(cpu);
1175 vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len)
1177 ARMCPU *cpu = ARM_CPU(cs);
1178 CPUARMState *env = &cpu->env;
1180 /* In BE32 system mode, target memory is stored byteswapped (on a
1181 * little-endian host system), and by the time we reach here (via an
1182 * opcode helper) the addresses of subword accesses have been adjusted
1183 * to account for that, which means that watchpoints will not match.
1184 * Undo the adjustment here.
1186 if (arm_sctlr_b(env)) {
1187 if (len == 1) {
1188 addr ^= 3;
1189 } else if (len == 2) {
1190 addr ^= 2;
1194 return addr;
1197 void arm_debug_excp_handler(CPUState *cs)
1199 /* Called by core code when a watchpoint or breakpoint fires;
1200 * need to check which one and raise the appropriate exception.
1202 ARMCPU *cpu = ARM_CPU(cs);
1203 CPUARMState *env = &cpu->env;
1204 CPUWatchpoint *wp_hit = cs->watchpoint_hit;
1206 if (wp_hit) {
1207 if (wp_hit->flags & BP_CPU) {
1208 bool wnr = (wp_hit->flags & BP_WATCHPOINT_HIT_WRITE) != 0;
1209 bool same_el = arm_debug_target_el(env) == arm_current_el(env);
1211 cs->watchpoint_hit = NULL;
1213 env->exception.fsr = arm_debug_exception_fsr(env);
1214 env->exception.vaddress = wp_hit->hitaddr;
1215 raise_exception(env, EXCP_DATA_ABORT,
1216 syn_watchpoint(same_el, 0, wnr),
1217 arm_debug_target_el(env));
1219 } else {
1220 uint64_t pc = is_a64(env) ? env->pc : env->regs[15];
1221 bool same_el = (arm_debug_target_el(env) == arm_current_el(env));
1223 /* (1) GDB breakpoints should be handled first.
1224 * (2) Do not raise a CPU exception if no CPU breakpoint has fired,
1225 * since singlestep is also done by generating a debug internal
1226 * exception.
1228 if (cpu_breakpoint_test(cs, pc, BP_GDB)
1229 || !cpu_breakpoint_test(cs, pc, BP_CPU)) {
1230 return;
1233 env->exception.fsr = arm_debug_exception_fsr(env);
1234 /* FAR is UNKNOWN: clear vaddress to avoid potentially exposing
1235 * values to the guest that it shouldn't be able to see at its
1236 * exception/security level.
1238 env->exception.vaddress = 0;
1239 raise_exception(env, EXCP_PREFETCH_ABORT,
1240 syn_breakpoint(same_el),
1241 arm_debug_target_el(env));
1245 /* ??? Flag setting arithmetic is awkward because we need to do comparisons.
1246 The only way to do that in TCG is a conditional branch, which clobbers
1247 all our temporaries. For now implement these as helper functions. */
1249 /* Similarly for variable shift instructions. */
1251 uint32_t HELPER(shl_cc)(CPUARMState *env, uint32_t x, uint32_t i)
1253 int shift = i & 0xff;
1254 if (shift >= 32) {
1255 if (shift == 32)
1256 env->CF = x & 1;
1257 else
1258 env->CF = 0;
1259 return 0;
1260 } else if (shift != 0) {
1261 env->CF = (x >> (32 - shift)) & 1;
1262 return x << shift;
1264 return x;
1267 uint32_t HELPER(shr_cc)(CPUARMState *env, uint32_t x, uint32_t i)
1269 int shift = i & 0xff;
1270 if (shift >= 32) {
1271 if (shift == 32)
1272 env->CF = (x >> 31) & 1;
1273 else
1274 env->CF = 0;
1275 return 0;
1276 } else if (shift != 0) {
1277 env->CF = (x >> (shift - 1)) & 1;
1278 return x >> shift;
1280 return x;
1283 uint32_t HELPER(sar_cc)(CPUARMState *env, uint32_t x, uint32_t i)
1285 int shift = i & 0xff;
1286 if (shift >= 32) {
1287 env->CF = (x >> 31) & 1;
1288 return (int32_t)x >> 31;
1289 } else if (shift != 0) {
1290 env->CF = (x >> (shift - 1)) & 1;
1291 return (int32_t)x >> shift;
1293 return x;
1296 uint32_t HELPER(ror_cc)(CPUARMState *env, uint32_t x, uint32_t i)
1298 int shift1, shift;
1299 shift1 = i & 0xff;
1300 shift = shift1 & 0x1f;
1301 if (shift == 0) {
1302 if (shift1 != 0)
1303 env->CF = (x >> 31) & 1;
1304 return x;
1305 } else {
1306 env->CF = (x >> (shift - 1)) & 1;
1307 return ((uint32_t)x >> shift) | (x << (32 - shift));