oslib-win32: Change return type of function getpagesize
[qemu/ar7.git] / target-arm / op_helper.c
blob02f48aa6a775f59288fb49260197adff4104817f
1 /*
2 * ARM helper routines
4 * Copyright (c) 2005-2007 CodeSourcery, LLC
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "cpu.h"
21 #include "exec/helper-proto.h"
22 #include "internals.h"
23 #include "exec/cpu_ldst.h"
25 #define SIGNBIT (uint32_t)0x80000000
26 #define SIGNBIT64 ((uint64_t)1 << 63)
28 static void QEMU_NORETURN
29 raise_exception(CPUARMState *env, uint32_t excp,
30 uint32_t syndrome, uint32_t target_el)
32 CPUState *cs = CPU(arm_env_get_cpu(env));
34 assert(!excp_is_internal(excp));
35 cs->exception_index = excp;
36 env->exception.syndrome = syndrome;
37 env->exception.target_el = target_el;
38 cpu_loop_exit(cs);
41 static int exception_target_el(CPUARMState *env)
43 int target_el = MAX(1, arm_current_el(env));
45 /* No such thing as secure EL1 if EL3 is aarch32, so update the target EL
46 * to EL3 in this case.
48 if (arm_is_secure(env) && !arm_el_is_aa64(env, 3) && target_el == 1) {
49 target_el = 3;
52 return target_el;
55 uint32_t HELPER(neon_tbl)(CPUARMState *env, uint32_t ireg, uint32_t def,
56 uint32_t rn, uint32_t maxindex)
58 uint32_t val;
59 uint32_t tmp;
60 int index;
61 int shift;
62 uint64_t *table;
63 table = (uint64_t *)&env->vfp.regs[rn];
64 val = 0;
65 for (shift = 0; shift < 32; shift += 8) {
66 index = (ireg >> shift) & 0xff;
67 if (index < maxindex) {
68 tmp = (table[index >> 3] >> ((index & 7) << 3)) & 0xff;
69 val |= tmp << shift;
70 } else {
71 val |= def & (0xff << shift);
74 return val;
77 #if !defined(CONFIG_USER_ONLY)
79 /* try to fill the TLB and return an exception if error. If retaddr is
80 * NULL, it means that the function was called in C code (i.e. not
81 * from generated code or from helper.c)
83 void tlb_fill(CPUState *cs, target_ulong addr, int is_write, int mmu_idx,
84 uintptr_t retaddr)
86 bool ret;
87 uint32_t fsr = 0;
88 ARMMMUFaultInfo fi = {};
90 ret = arm_tlb_fill(cs, addr, is_write, mmu_idx, &fsr, &fi);
91 if (unlikely(ret)) {
92 ARMCPU *cpu = ARM_CPU(cs);
93 CPUARMState *env = &cpu->env;
94 uint32_t syn, exc;
95 unsigned int target_el;
96 bool same_el;
98 if (retaddr) {
99 /* now we have a real cpu fault */
100 cpu_restore_state(cs, retaddr);
103 target_el = exception_target_el(env);
104 if (fi.stage2) {
105 target_el = 2;
106 env->cp15.hpfar_el2 = extract64(fi.s2addr, 12, 47) << 4;
108 same_el = arm_current_el(env) == target_el;
109 /* AArch64 syndrome does not have an LPAE bit */
110 syn = fsr & ~(1 << 9);
112 /* For insn and data aborts we assume there is no instruction syndrome
113 * information; this is always true for exceptions reported to EL1.
115 if (is_write == 2) {
116 syn = syn_insn_abort(same_el, 0, fi.s1ptw, syn);
117 exc = EXCP_PREFETCH_ABORT;
118 } else {
119 syn = syn_data_abort(same_el, 0, 0, fi.s1ptw, is_write == 1, syn);
120 if (is_write == 1 && arm_feature(env, ARM_FEATURE_V6)) {
121 fsr |= (1 << 11);
123 exc = EXCP_DATA_ABORT;
126 env->exception.vaddress = addr;
127 env->exception.fsr = fsr;
128 raise_exception(env, exc, syn, target_el);
131 #endif
133 uint32_t HELPER(add_setq)(CPUARMState *env, uint32_t a, uint32_t b)
135 uint32_t res = a + b;
136 if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT))
137 env->QF = 1;
138 return res;
141 uint32_t HELPER(add_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
143 uint32_t res = a + b;
144 if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT)) {
145 env->QF = 1;
146 res = ~(((int32_t)a >> 31) ^ SIGNBIT);
148 return res;
151 uint32_t HELPER(sub_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
153 uint32_t res = a - b;
154 if (((res ^ a) & SIGNBIT) && ((a ^ b) & SIGNBIT)) {
155 env->QF = 1;
156 res = ~(((int32_t)a >> 31) ^ SIGNBIT);
158 return res;
161 uint32_t HELPER(double_saturate)(CPUARMState *env, int32_t val)
163 uint32_t res;
164 if (val >= 0x40000000) {
165 res = ~SIGNBIT;
166 env->QF = 1;
167 } else if (val <= (int32_t)0xc0000000) {
168 res = SIGNBIT;
169 env->QF = 1;
170 } else {
171 res = val << 1;
173 return res;
176 uint32_t HELPER(add_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
178 uint32_t res = a + b;
179 if (res < a) {
180 env->QF = 1;
181 res = ~0;
183 return res;
186 uint32_t HELPER(sub_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
188 uint32_t res = a - b;
189 if (res > a) {
190 env->QF = 1;
191 res = 0;
193 return res;
196 /* Signed saturation. */
197 static inline uint32_t do_ssat(CPUARMState *env, int32_t val, int shift)
199 int32_t top;
200 uint32_t mask;
202 top = val >> shift;
203 mask = (1u << shift) - 1;
204 if (top > 0) {
205 env->QF = 1;
206 return mask;
207 } else if (top < -1) {
208 env->QF = 1;
209 return ~mask;
211 return val;
214 /* Unsigned saturation. */
215 static inline uint32_t do_usat(CPUARMState *env, int32_t val, int shift)
217 uint32_t max;
219 max = (1u << shift) - 1;
220 if (val < 0) {
221 env->QF = 1;
222 return 0;
223 } else if (val > max) {
224 env->QF = 1;
225 return max;
227 return val;
230 /* Signed saturate. */
231 uint32_t HELPER(ssat)(CPUARMState *env, uint32_t x, uint32_t shift)
233 return do_ssat(env, x, shift);
236 /* Dual halfword signed saturate. */
237 uint32_t HELPER(ssat16)(CPUARMState *env, uint32_t x, uint32_t shift)
239 uint32_t res;
241 res = (uint16_t)do_ssat(env, (int16_t)x, shift);
242 res |= do_ssat(env, ((int32_t)x) >> 16, shift) << 16;
243 return res;
246 /* Unsigned saturate. */
247 uint32_t HELPER(usat)(CPUARMState *env, uint32_t x, uint32_t shift)
249 return do_usat(env, x, shift);
252 /* Dual halfword unsigned saturate. */
253 uint32_t HELPER(usat16)(CPUARMState *env, uint32_t x, uint32_t shift)
255 uint32_t res;
257 res = (uint16_t)do_usat(env, (int16_t)x, shift);
258 res |= do_usat(env, ((int32_t)x) >> 16, shift) << 16;
259 return res;
262 /* Function checks whether WFx (WFI/WFE) instructions are set up to be trapped.
263 * The function returns the target EL (1-3) if the instruction is to be trapped;
264 * otherwise it returns 0 indicating it is not trapped.
266 static inline int check_wfx_trap(CPUARMState *env, bool is_wfe)
268 int cur_el = arm_current_el(env);
269 uint64_t mask;
271 /* If we are currently in EL0 then we need to check if SCTLR is set up for
272 * WFx instructions being trapped to EL1. These trap bits don't exist in v7.
274 if (cur_el < 1 && arm_feature(env, ARM_FEATURE_V8)) {
275 int target_el;
277 mask = is_wfe ? SCTLR_nTWE : SCTLR_nTWI;
278 if (arm_is_secure_below_el3(env) && !arm_el_is_aa64(env, 3)) {
279 /* Secure EL0 and Secure PL1 is at EL3 */
280 target_el = 3;
281 } else {
282 target_el = 1;
285 if (!(env->cp15.sctlr_el[target_el] & mask)) {
286 return target_el;
290 /* We are not trapping to EL1; trap to EL2 if HCR_EL2 requires it
291 * No need for ARM_FEATURE check as if HCR_EL2 doesn't exist the
292 * bits will be zero indicating no trap.
294 if (cur_el < 2 && !arm_is_secure(env)) {
295 mask = (is_wfe) ? HCR_TWE : HCR_TWI;
296 if (env->cp15.hcr_el2 & mask) {
297 return 2;
301 /* We are not trapping to EL1 or EL2; trap to EL3 if SCR_EL3 requires it */
302 if (cur_el < 3) {
303 mask = (is_wfe) ? SCR_TWE : SCR_TWI;
304 if (env->cp15.scr_el3 & mask) {
305 return 3;
309 return 0;
312 void HELPER(wfi)(CPUARMState *env)
314 CPUState *cs = CPU(arm_env_get_cpu(env));
315 int target_el = check_wfx_trap(env, false);
317 if (cpu_has_work(cs)) {
318 /* Don't bother to go into our "low power state" if
319 * we would just wake up immediately.
321 return;
324 if (target_el) {
325 env->pc -= 4;
326 raise_exception(env, EXCP_UDEF, syn_wfx(1, 0xe, 0), target_el);
329 cs->exception_index = EXCP_HLT;
330 cs->halted = 1;
331 cpu_loop_exit(cs);
334 void QEMU_NORETURN HELPER(wfe)(CPUARMState *env)
336 /* This is a hint instruction that is semantically different
337 * from YIELD even though we currently implement it identically.
338 * Don't actually halt the CPU, just yield back to top
339 * level loop. This is not going into a "low power state"
340 * (ie halting until some event occurs), so we never take
341 * a configurable trap to a different exception level.
343 HELPER(yield)(env);
346 void QEMU_NORETURN HELPER(yield)(CPUARMState *env)
348 ARMCPU *cpu = arm_env_get_cpu(env);
349 CPUState *cs = CPU(cpu);
351 /* This is a non-trappable hint instruction that generally indicates
352 * that the guest is currently busy-looping. Yield control back to the
353 * top level loop so that a more deserving VCPU has a chance to run.
355 cs->exception_index = EXCP_YIELD;
356 cpu_loop_exit(cs);
359 /* Raise an internal-to-QEMU exception. This is limited to only
360 * those EXCP values which are special cases for QEMU to interrupt
361 * execution and not to be used for exceptions which are passed to
362 * the guest (those must all have syndrome information and thus should
363 * use exception_with_syndrome).
365 void QEMU_NORETURN HELPER(exception_internal)(CPUARMState *env, uint32_t excp)
367 CPUState *cs = CPU(arm_env_get_cpu(env));
369 assert(excp_is_internal(excp));
370 cs->exception_index = excp;
371 cpu_loop_exit(cs);
374 /* Raise an exception with the specified syndrome register value */
375 void QEMU_NORETURN
376 HELPER(exception_with_syndrome)(CPUARMState *env, uint32_t excp,
377 uint32_t syndrome, uint32_t target_el)
379 raise_exception(env, excp, syndrome, target_el);
382 uint32_t HELPER(cpsr_read)(CPUARMState *env)
384 return cpsr_read(env) & ~(CPSR_EXEC | CPSR_RESERVED);
387 void HELPER(cpsr_write)(CPUARMState *env, uint32_t val, uint32_t mask)
389 cpsr_write(env, val, mask);
392 /* Access to user mode registers from privileged modes. */
393 uint32_t HELPER(get_user_reg)(CPUARMState *env, uint32_t regno)
395 uint32_t val;
397 if (regno == 13) {
398 val = env->banked_r13[BANK_USRSYS];
399 } else if (regno == 14) {
400 val = env->banked_r14[BANK_USRSYS];
401 } else if (regno >= 8
402 && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
403 val = env->usr_regs[regno - 8];
404 } else {
405 val = env->regs[regno];
407 return val;
410 void HELPER(set_user_reg)(CPUARMState *env, uint32_t regno, uint32_t val)
412 if (regno == 13) {
413 env->banked_r13[BANK_USRSYS] = val;
414 } else if (regno == 14) {
415 env->banked_r14[BANK_USRSYS] = val;
416 } else if (regno >= 8
417 && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
418 env->usr_regs[regno - 8] = val;
419 } else {
420 env->regs[regno] = val;
424 void HELPER(access_check_cp_reg)(CPUARMState *env, void *rip, uint32_t syndrome)
426 const ARMCPRegInfo *ri = rip;
427 int target_el;
429 if (arm_feature(env, ARM_FEATURE_XSCALE) && ri->cp < 14
430 && extract32(env->cp15.c15_cpar, ri->cp, 1) == 0) {
431 raise_exception(env, EXCP_UDEF, syndrome, exception_target_el(env));
434 if (!ri->accessfn) {
435 return;
438 switch (ri->accessfn(env, ri)) {
439 case CP_ACCESS_OK:
440 return;
441 case CP_ACCESS_TRAP:
442 target_el = exception_target_el(env);
443 break;
444 case CP_ACCESS_TRAP_EL2:
445 /* Requesting a trap to EL2 when we're in EL3 or S-EL0/1 is
446 * a bug in the access function.
448 assert(!arm_is_secure(env) && arm_current_el(env) != 3);
449 target_el = 2;
450 break;
451 case CP_ACCESS_TRAP_EL3:
452 target_el = 3;
453 break;
454 case CP_ACCESS_TRAP_UNCATEGORIZED:
455 target_el = exception_target_el(env);
456 syndrome = syn_uncategorized();
457 break;
458 case CP_ACCESS_TRAP_UNCATEGORIZED_EL2:
459 target_el = 2;
460 syndrome = syn_uncategorized();
461 break;
462 case CP_ACCESS_TRAP_UNCATEGORIZED_EL3:
463 target_el = 3;
464 syndrome = syn_uncategorized();
465 break;
466 default:
467 g_assert_not_reached();
470 raise_exception(env, EXCP_UDEF, syndrome, target_el);
473 void HELPER(set_cp_reg)(CPUARMState *env, void *rip, uint32_t value)
475 const ARMCPRegInfo *ri = rip;
477 ri->writefn(env, ri, value);
480 uint32_t HELPER(get_cp_reg)(CPUARMState *env, void *rip)
482 const ARMCPRegInfo *ri = rip;
484 return ri->readfn(env, ri);
487 void HELPER(set_cp_reg64)(CPUARMState *env, void *rip, uint64_t value)
489 const ARMCPRegInfo *ri = rip;
491 ri->writefn(env, ri, value);
494 uint64_t HELPER(get_cp_reg64)(CPUARMState *env, void *rip)
496 const ARMCPRegInfo *ri = rip;
498 return ri->readfn(env, ri);
501 void HELPER(msr_i_pstate)(CPUARMState *env, uint32_t op, uint32_t imm)
503 /* MSR_i to update PSTATE. This is OK from EL0 only if UMA is set.
504 * Note that SPSel is never OK from EL0; we rely on handle_msr_i()
505 * to catch that case at translate time.
507 if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UMA)) {
508 uint32_t syndrome = syn_aa64_sysregtrap(0, extract32(op, 0, 3),
509 extract32(op, 3, 3), 4,
510 imm, 0x1f, 0);
511 raise_exception(env, EXCP_UDEF, syndrome, exception_target_el(env));
514 switch (op) {
515 case 0x05: /* SPSel */
516 update_spsel(env, imm);
517 break;
518 case 0x1e: /* DAIFSet */
519 env->daif |= (imm << 6) & PSTATE_DAIF;
520 break;
521 case 0x1f: /* DAIFClear */
522 env->daif &= ~((imm << 6) & PSTATE_DAIF);
523 break;
524 default:
525 g_assert_not_reached();
529 void HELPER(clear_pstate_ss)(CPUARMState *env)
531 env->pstate &= ~PSTATE_SS;
534 void HELPER(pre_hvc)(CPUARMState *env)
536 ARMCPU *cpu = arm_env_get_cpu(env);
537 int cur_el = arm_current_el(env);
538 /* FIXME: Use actual secure state. */
539 bool secure = false;
540 bool undef;
542 if (arm_is_psci_call(cpu, EXCP_HVC)) {
543 /* If PSCI is enabled and this looks like a valid PSCI call then
544 * that overrides the architecturally mandated HVC behaviour.
546 return;
549 if (!arm_feature(env, ARM_FEATURE_EL2)) {
550 /* If EL2 doesn't exist, HVC always UNDEFs */
551 undef = true;
552 } else if (arm_feature(env, ARM_FEATURE_EL3)) {
553 /* EL3.HCE has priority over EL2.HCD. */
554 undef = !(env->cp15.scr_el3 & SCR_HCE);
555 } else {
556 undef = env->cp15.hcr_el2 & HCR_HCD;
559 /* In ARMv7 and ARMv8/AArch32, HVC is undef in secure state.
560 * For ARMv8/AArch64, HVC is allowed in EL3.
561 * Note that we've already trapped HVC from EL0 at translation
562 * time.
564 if (secure && (!is_a64(env) || cur_el == 1)) {
565 undef = true;
568 if (undef) {
569 raise_exception(env, EXCP_UDEF, syn_uncategorized(),
570 exception_target_el(env));
574 void HELPER(pre_smc)(CPUARMState *env, uint32_t syndrome)
576 ARMCPU *cpu = arm_env_get_cpu(env);
577 int cur_el = arm_current_el(env);
578 bool secure = arm_is_secure(env);
579 bool smd = env->cp15.scr_el3 & SCR_SMD;
580 /* On ARMv8 AArch32, SMD only applies to NS state.
581 * On ARMv7 SMD only applies to NS state and only if EL2 is available.
582 * For ARMv7 non EL2, we force SMD to zero so we don't need to re-check
583 * the EL2 condition here.
585 bool undef = is_a64(env) ? smd : (!secure && smd);
587 if (arm_is_psci_call(cpu, EXCP_SMC)) {
588 /* If PSCI is enabled and this looks like a valid PSCI call then
589 * that overrides the architecturally mandated SMC behaviour.
591 return;
594 if (!arm_feature(env, ARM_FEATURE_EL3)) {
595 /* If we have no EL3 then SMC always UNDEFs */
596 undef = true;
597 } else if (!secure && cur_el == 1 && (env->cp15.hcr_el2 & HCR_TSC)) {
598 /* In NS EL1, HCR controlled routing to EL2 has priority over SMD. */
599 raise_exception(env, EXCP_HYP_TRAP, syndrome, 2);
602 if (undef) {
603 raise_exception(env, EXCP_UDEF, syn_uncategorized(),
604 exception_target_el(env));
608 void HELPER(exception_return)(CPUARMState *env)
610 int cur_el = arm_current_el(env);
611 unsigned int spsr_idx = aarch64_banked_spsr_index(cur_el);
612 uint32_t spsr = env->banked_spsr[spsr_idx];
613 int new_el;
615 aarch64_save_sp(env, cur_el);
617 env->exclusive_addr = -1;
619 /* We must squash the PSTATE.SS bit to zero unless both of the
620 * following hold:
621 * 1. debug exceptions are currently disabled
622 * 2. singlestep will be active in the EL we return to
623 * We check 1 here and 2 after we've done the pstate/cpsr write() to
624 * transition to the EL we're going to.
626 if (arm_generate_debug_exceptions(env)) {
627 spsr &= ~PSTATE_SS;
630 if (spsr & PSTATE_nRW) {
631 /* TODO: We currently assume EL1/2/3 are running in AArch64. */
632 env->aarch64 = 0;
633 new_el = 0;
634 env->uncached_cpsr = 0x10;
635 cpsr_write(env, spsr, ~0);
636 if (!arm_singlestep_active(env)) {
637 env->uncached_cpsr &= ~PSTATE_SS;
639 aarch64_sync_64_to_32(env);
641 env->regs[15] = env->elr_el[1] & ~0x1;
642 } else {
643 new_el = extract32(spsr, 2, 2);
644 if (new_el > cur_el
645 || (new_el == 2 && !arm_feature(env, ARM_FEATURE_EL2))) {
646 /* Disallow return to an EL which is unimplemented or higher
647 * than the current one.
649 goto illegal_return;
651 if (extract32(spsr, 1, 1)) {
652 /* Return with reserved M[1] bit set */
653 goto illegal_return;
655 if (new_el == 0 && (spsr & PSTATE_SP)) {
656 /* Return to EL0 with M[0] bit set */
657 goto illegal_return;
659 env->aarch64 = 1;
660 pstate_write(env, spsr);
661 if (!arm_singlestep_active(env)) {
662 env->pstate &= ~PSTATE_SS;
664 aarch64_restore_sp(env, new_el);
665 env->pc = env->elr_el[cur_el];
668 return;
670 illegal_return:
671 /* Illegal return events of various kinds have architecturally
672 * mandated behaviour:
673 * restore NZCV and DAIF from SPSR_ELx
674 * set PSTATE.IL
675 * restore PC from ELR_ELx
676 * no change to exception level, execution state or stack pointer
678 env->pstate |= PSTATE_IL;
679 env->pc = env->elr_el[cur_el];
680 spsr &= PSTATE_NZCV | PSTATE_DAIF;
681 spsr |= pstate_read(env) & ~(PSTATE_NZCV | PSTATE_DAIF);
682 pstate_write(env, spsr);
683 if (!arm_singlestep_active(env)) {
684 env->pstate &= ~PSTATE_SS;
688 /* Return true if the linked breakpoint entry lbn passes its checks */
689 static bool linked_bp_matches(ARMCPU *cpu, int lbn)
691 CPUARMState *env = &cpu->env;
692 uint64_t bcr = env->cp15.dbgbcr[lbn];
693 int brps = extract32(cpu->dbgdidr, 24, 4);
694 int ctx_cmps = extract32(cpu->dbgdidr, 20, 4);
695 int bt;
696 uint32_t contextidr;
698 /* Links to unimplemented or non-context aware breakpoints are
699 * CONSTRAINED UNPREDICTABLE: either behave as if disabled, or
700 * as if linked to an UNKNOWN context-aware breakpoint (in which
701 * case DBGWCR<n>_EL1.LBN must indicate that breakpoint).
702 * We choose the former.
704 if (lbn > brps || lbn < (brps - ctx_cmps)) {
705 return false;
708 bcr = env->cp15.dbgbcr[lbn];
710 if (extract64(bcr, 0, 1) == 0) {
711 /* Linked breakpoint disabled : generate no events */
712 return false;
715 bt = extract64(bcr, 20, 4);
717 /* We match the whole register even if this is AArch32 using the
718 * short descriptor format (in which case it holds both PROCID and ASID),
719 * since we don't implement the optional v7 context ID masking.
721 contextidr = extract64(env->cp15.contextidr_el[1], 0, 32);
723 switch (bt) {
724 case 3: /* linked context ID match */
725 if (arm_current_el(env) > 1) {
726 /* Context matches never fire in EL2 or (AArch64) EL3 */
727 return false;
729 return (contextidr == extract64(env->cp15.dbgbvr[lbn], 0, 32));
730 case 5: /* linked address mismatch (reserved in AArch64) */
731 case 9: /* linked VMID match (reserved if no EL2) */
732 case 11: /* linked context ID and VMID match (reserved if no EL2) */
733 default:
734 /* Links to Unlinked context breakpoints must generate no
735 * events; we choose to do the same for reserved values too.
737 return false;
740 return false;
743 static bool bp_wp_matches(ARMCPU *cpu, int n, bool is_wp)
745 CPUARMState *env = &cpu->env;
746 uint64_t cr;
747 int pac, hmc, ssc, wt, lbn;
748 /* Note that for watchpoints the check is against the CPU security
749 * state, not the S/NS attribute on the offending data access.
751 bool is_secure = arm_is_secure(env);
752 int access_el = arm_current_el(env);
754 if (is_wp) {
755 CPUWatchpoint *wp = env->cpu_watchpoint[n];
757 if (!wp || !(wp->flags & BP_WATCHPOINT_HIT)) {
758 return false;
760 cr = env->cp15.dbgwcr[n];
761 if (wp->hitattrs.user) {
762 /* The LDRT/STRT/LDT/STT "unprivileged access" instructions should
763 * match watchpoints as if they were accesses done at EL0, even if
764 * the CPU is at EL1 or higher.
766 access_el = 0;
768 } else {
769 uint64_t pc = is_a64(env) ? env->pc : env->regs[15];
771 if (!env->cpu_breakpoint[n] || env->cpu_breakpoint[n]->pc != pc) {
772 return false;
774 cr = env->cp15.dbgbcr[n];
776 /* The WATCHPOINT_HIT flag guarantees us that the watchpoint is
777 * enabled and that the address and access type match; for breakpoints
778 * we know the address matched; check the remaining fields, including
779 * linked breakpoints. We rely on WCR and BCR having the same layout
780 * for the LBN, SSC, HMC, PAC/PMC and is-linked fields.
781 * Note that some combinations of {PAC, HMC, SSC} are reserved and
782 * must act either like some valid combination or as if the watchpoint
783 * were disabled. We choose the former, and use this together with
784 * the fact that EL3 must always be Secure and EL2 must always be
785 * Non-Secure to simplify the code slightly compared to the full
786 * table in the ARM ARM.
788 pac = extract64(cr, 1, 2);
789 hmc = extract64(cr, 13, 1);
790 ssc = extract64(cr, 14, 2);
792 switch (ssc) {
793 case 0:
794 break;
795 case 1:
796 case 3:
797 if (is_secure) {
798 return false;
800 break;
801 case 2:
802 if (!is_secure) {
803 return false;
805 break;
808 switch (access_el) {
809 case 3:
810 case 2:
811 if (!hmc) {
812 return false;
814 break;
815 case 1:
816 if (extract32(pac, 0, 1) == 0) {
817 return false;
819 break;
820 case 0:
821 if (extract32(pac, 1, 1) == 0) {
822 return false;
824 break;
825 default:
826 g_assert_not_reached();
829 wt = extract64(cr, 20, 1);
830 lbn = extract64(cr, 16, 4);
832 if (wt && !linked_bp_matches(cpu, lbn)) {
833 return false;
836 return true;
839 static bool check_watchpoints(ARMCPU *cpu)
841 CPUARMState *env = &cpu->env;
842 int n;
844 /* If watchpoints are disabled globally or we can't take debug
845 * exceptions here then watchpoint firings are ignored.
847 if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
848 || !arm_generate_debug_exceptions(env)) {
849 return false;
852 for (n = 0; n < ARRAY_SIZE(env->cpu_watchpoint); n++) {
853 if (bp_wp_matches(cpu, n, true)) {
854 return true;
857 return false;
860 static bool check_breakpoints(ARMCPU *cpu)
862 CPUARMState *env = &cpu->env;
863 int n;
865 /* If breakpoints are disabled globally or we can't take debug
866 * exceptions here then breakpoint firings are ignored.
868 if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
869 || !arm_generate_debug_exceptions(env)) {
870 return false;
873 for (n = 0; n < ARRAY_SIZE(env->cpu_breakpoint); n++) {
874 if (bp_wp_matches(cpu, n, false)) {
875 return true;
878 return false;
881 void HELPER(check_breakpoints)(CPUARMState *env)
883 ARMCPU *cpu = arm_env_get_cpu(env);
885 if (check_breakpoints(cpu)) {
886 HELPER(exception_internal(env, EXCP_DEBUG));
890 void arm_debug_excp_handler(CPUState *cs)
892 /* Called by core code when a watchpoint or breakpoint fires;
893 * need to check which one and raise the appropriate exception.
895 ARMCPU *cpu = ARM_CPU(cs);
896 CPUARMState *env = &cpu->env;
897 CPUWatchpoint *wp_hit = cs->watchpoint_hit;
899 if (wp_hit) {
900 if (wp_hit->flags & BP_CPU) {
901 cs->watchpoint_hit = NULL;
902 if (check_watchpoints(cpu)) {
903 bool wnr = (wp_hit->flags & BP_WATCHPOINT_HIT_WRITE) != 0;
904 bool same_el = arm_debug_target_el(env) == arm_current_el(env);
906 if (extended_addresses_enabled(env)) {
907 env->exception.fsr = (1 << 9) | 0x22;
908 } else {
909 env->exception.fsr = 0x2;
911 env->exception.vaddress = wp_hit->hitaddr;
912 raise_exception(env, EXCP_DATA_ABORT,
913 syn_watchpoint(same_el, 0, wnr),
914 arm_debug_target_el(env));
915 } else {
916 cpu_resume_from_signal(cs, NULL);
919 } else {
920 uint64_t pc = is_a64(env) ? env->pc : env->regs[15];
921 bool same_el = (arm_debug_target_el(env) == arm_current_el(env));
923 /* (1) GDB breakpoints should be handled first.
924 * (2) Do not raise a CPU exception if no CPU breakpoint has fired,
925 * since singlestep is also done by generating a debug internal
926 * exception.
928 if (cpu_breakpoint_test(cs, pc, BP_GDB)
929 || !cpu_breakpoint_test(cs, pc, BP_CPU)) {
930 return;
933 if (extended_addresses_enabled(env)) {
934 env->exception.fsr = (1 << 9) | 0x22;
935 } else {
936 env->exception.fsr = 0x2;
938 /* FAR is UNKNOWN, so doesn't need setting */
939 raise_exception(env, EXCP_PREFETCH_ABORT,
940 syn_breakpoint(same_el),
941 arm_debug_target_el(env));
945 /* ??? Flag setting arithmetic is awkward because we need to do comparisons.
946 The only way to do that in TCG is a conditional branch, which clobbers
947 all our temporaries. For now implement these as helper functions. */
949 /* Similarly for variable shift instructions. */
951 uint32_t HELPER(shl_cc)(CPUARMState *env, uint32_t x, uint32_t i)
953 int shift = i & 0xff;
954 if (shift >= 32) {
955 if (shift == 32)
956 env->CF = x & 1;
957 else
958 env->CF = 0;
959 return 0;
960 } else if (shift != 0) {
961 env->CF = (x >> (32 - shift)) & 1;
962 return x << shift;
964 return x;
967 uint32_t HELPER(shr_cc)(CPUARMState *env, uint32_t x, uint32_t i)
969 int shift = i & 0xff;
970 if (shift >= 32) {
971 if (shift == 32)
972 env->CF = (x >> 31) & 1;
973 else
974 env->CF = 0;
975 return 0;
976 } else if (shift != 0) {
977 env->CF = (x >> (shift - 1)) & 1;
978 return x >> shift;
980 return x;
983 uint32_t HELPER(sar_cc)(CPUARMState *env, uint32_t x, uint32_t i)
985 int shift = i & 0xff;
986 if (shift >= 32) {
987 env->CF = (x >> 31) & 1;
988 return (int32_t)x >> 31;
989 } else if (shift != 0) {
990 env->CF = (x >> (shift - 1)) & 1;
991 return (int32_t)x >> shift;
993 return x;
996 uint32_t HELPER(ror_cc)(CPUARMState *env, uint32_t x, uint32_t i)
998 int shift1, shift;
999 shift1 = i & 0xff;
1000 shift = shift1 & 0x1f;
1001 if (shift == 0) {
1002 if (shift1 != 0)
1003 env->CF = (x >> 31) & 1;
1004 return x;
1005 } else {
1006 env->CF = (x >> (shift - 1)) & 1;
1007 return ((uint32_t)x >> shift) | (x << (32 - shift));