target-arm: add emulation of PSCI calls for system emulation
[qemu.git] / target-arm / op_helper.c
blob464a5ce56733406a1aea485d614730aff0cd4ae6
1 /*
2 * ARM helper routines
4 * Copyright (c) 2005-2007 CodeSourcery, LLC
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "cpu.h"
20 #include "exec/helper-proto.h"
21 #include "internals.h"
22 #include "exec/cpu_ldst.h"
24 #define SIGNBIT (uint32_t)0x80000000
25 #define SIGNBIT64 ((uint64_t)1 << 63)
27 static void raise_exception(CPUARMState *env, int tt)
29 ARMCPU *cpu = arm_env_get_cpu(env);
30 CPUState *cs = CPU(cpu);
32 cs->exception_index = tt;
33 cpu_loop_exit(cs);
36 uint32_t HELPER(neon_tbl)(CPUARMState *env, uint32_t ireg, uint32_t def,
37 uint32_t rn, uint32_t maxindex)
39 uint32_t val;
40 uint32_t tmp;
41 int index;
42 int shift;
43 uint64_t *table;
44 table = (uint64_t *)&env->vfp.regs[rn];
45 val = 0;
46 for (shift = 0; shift < 32; shift += 8) {
47 index = (ireg >> shift) & 0xff;
48 if (index < maxindex) {
49 tmp = (table[index >> 3] >> ((index & 7) << 3)) & 0xff;
50 val |= tmp << shift;
51 } else {
52 val |= def & (0xff << shift);
55 return val;
58 #if !defined(CONFIG_USER_ONLY)
60 /* try to fill the TLB and return an exception if error. If retaddr is
61 * NULL, it means that the function was called in C code (i.e. not
62 * from generated code or from helper.c)
64 void tlb_fill(CPUState *cs, target_ulong addr, int is_write, int mmu_idx,
65 uintptr_t retaddr)
67 int ret;
69 ret = arm_cpu_handle_mmu_fault(cs, addr, is_write, mmu_idx);
70 if (unlikely(ret)) {
71 ARMCPU *cpu = ARM_CPU(cs);
72 CPUARMState *env = &cpu->env;
74 if (retaddr) {
75 /* now we have a real cpu fault */
76 cpu_restore_state(cs, retaddr);
78 raise_exception(env, cs->exception_index);
81 #endif
83 uint32_t HELPER(add_setq)(CPUARMState *env, uint32_t a, uint32_t b)
85 uint32_t res = a + b;
86 if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT))
87 env->QF = 1;
88 return res;
91 uint32_t HELPER(add_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
93 uint32_t res = a + b;
94 if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT)) {
95 env->QF = 1;
96 res = ~(((int32_t)a >> 31) ^ SIGNBIT);
98 return res;
101 uint32_t HELPER(sub_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
103 uint32_t res = a - b;
104 if (((res ^ a) & SIGNBIT) && ((a ^ b) & SIGNBIT)) {
105 env->QF = 1;
106 res = ~(((int32_t)a >> 31) ^ SIGNBIT);
108 return res;
111 uint32_t HELPER(double_saturate)(CPUARMState *env, int32_t val)
113 uint32_t res;
114 if (val >= 0x40000000) {
115 res = ~SIGNBIT;
116 env->QF = 1;
117 } else if (val <= (int32_t)0xc0000000) {
118 res = SIGNBIT;
119 env->QF = 1;
120 } else {
121 res = val << 1;
123 return res;
126 uint32_t HELPER(add_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
128 uint32_t res = a + b;
129 if (res < a) {
130 env->QF = 1;
131 res = ~0;
133 return res;
136 uint32_t HELPER(sub_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
138 uint32_t res = a - b;
139 if (res > a) {
140 env->QF = 1;
141 res = 0;
143 return res;
146 /* Signed saturation. */
147 static inline uint32_t do_ssat(CPUARMState *env, int32_t val, int shift)
149 int32_t top;
150 uint32_t mask;
152 top = val >> shift;
153 mask = (1u << shift) - 1;
154 if (top > 0) {
155 env->QF = 1;
156 return mask;
157 } else if (top < -1) {
158 env->QF = 1;
159 return ~mask;
161 return val;
164 /* Unsigned saturation. */
165 static inline uint32_t do_usat(CPUARMState *env, int32_t val, int shift)
167 uint32_t max;
169 max = (1u << shift) - 1;
170 if (val < 0) {
171 env->QF = 1;
172 return 0;
173 } else if (val > max) {
174 env->QF = 1;
175 return max;
177 return val;
180 /* Signed saturate. */
181 uint32_t HELPER(ssat)(CPUARMState *env, uint32_t x, uint32_t shift)
183 return do_ssat(env, x, shift);
186 /* Dual halfword signed saturate. */
187 uint32_t HELPER(ssat16)(CPUARMState *env, uint32_t x, uint32_t shift)
189 uint32_t res;
191 res = (uint16_t)do_ssat(env, (int16_t)x, shift);
192 res |= do_ssat(env, ((int32_t)x) >> 16, shift) << 16;
193 return res;
196 /* Unsigned saturate. */
197 uint32_t HELPER(usat)(CPUARMState *env, uint32_t x, uint32_t shift)
199 return do_usat(env, x, shift);
202 /* Dual halfword unsigned saturate. */
203 uint32_t HELPER(usat16)(CPUARMState *env, uint32_t x, uint32_t shift)
205 uint32_t res;
207 res = (uint16_t)do_usat(env, (int16_t)x, shift);
208 res |= do_usat(env, ((int32_t)x) >> 16, shift) << 16;
209 return res;
212 void HELPER(wfi)(CPUARMState *env)
214 CPUState *cs = CPU(arm_env_get_cpu(env));
216 cs->exception_index = EXCP_HLT;
217 cs->halted = 1;
218 cpu_loop_exit(cs);
221 void HELPER(wfe)(CPUARMState *env)
223 CPUState *cs = CPU(arm_env_get_cpu(env));
225 /* Don't actually halt the CPU, just yield back to top
226 * level loop
228 cs->exception_index = EXCP_YIELD;
229 cpu_loop_exit(cs);
232 /* Raise an internal-to-QEMU exception. This is limited to only
233 * those EXCP values which are special cases for QEMU to interrupt
234 * execution and not to be used for exceptions which are passed to
235 * the guest (those must all have syndrome information and thus should
236 * use exception_with_syndrome).
238 void HELPER(exception_internal)(CPUARMState *env, uint32_t excp)
240 CPUState *cs = CPU(arm_env_get_cpu(env));
242 assert(excp_is_internal(excp));
243 cs->exception_index = excp;
244 cpu_loop_exit(cs);
247 /* Raise an exception with the specified syndrome register value */
248 void HELPER(exception_with_syndrome)(CPUARMState *env, uint32_t excp,
249 uint32_t syndrome)
251 CPUState *cs = CPU(arm_env_get_cpu(env));
253 assert(!excp_is_internal(excp));
254 cs->exception_index = excp;
255 env->exception.syndrome = syndrome;
256 cpu_loop_exit(cs);
259 uint32_t HELPER(cpsr_read)(CPUARMState *env)
261 return cpsr_read(env) & ~(CPSR_EXEC | CPSR_RESERVED);
264 void HELPER(cpsr_write)(CPUARMState *env, uint32_t val, uint32_t mask)
266 cpsr_write(env, val, mask);
269 /* Access to user mode registers from privileged modes. */
270 uint32_t HELPER(get_user_reg)(CPUARMState *env, uint32_t regno)
272 uint32_t val;
274 if (regno == 13) {
275 val = env->banked_r13[0];
276 } else if (regno == 14) {
277 val = env->banked_r14[0];
278 } else if (regno >= 8
279 && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
280 val = env->usr_regs[regno - 8];
281 } else {
282 val = env->regs[regno];
284 return val;
287 void HELPER(set_user_reg)(CPUARMState *env, uint32_t regno, uint32_t val)
289 if (regno == 13) {
290 env->banked_r13[0] = val;
291 } else if (regno == 14) {
292 env->banked_r14[0] = val;
293 } else if (regno >= 8
294 && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
295 env->usr_regs[regno - 8] = val;
296 } else {
297 env->regs[regno] = val;
301 void HELPER(access_check_cp_reg)(CPUARMState *env, void *rip, uint32_t syndrome)
303 const ARMCPRegInfo *ri = rip;
305 if (arm_feature(env, ARM_FEATURE_XSCALE) && ri->cp < 14
306 && extract32(env->cp15.c15_cpar, ri->cp, 1) == 0) {
307 env->exception.syndrome = syndrome;
308 raise_exception(env, EXCP_UDEF);
311 if (!ri->accessfn) {
312 return;
315 switch (ri->accessfn(env, ri)) {
316 case CP_ACCESS_OK:
317 return;
318 case CP_ACCESS_TRAP:
319 env->exception.syndrome = syndrome;
320 break;
321 case CP_ACCESS_TRAP_UNCATEGORIZED:
322 env->exception.syndrome = syn_uncategorized();
323 break;
324 default:
325 g_assert_not_reached();
327 raise_exception(env, EXCP_UDEF);
330 void HELPER(set_cp_reg)(CPUARMState *env, void *rip, uint32_t value)
332 const ARMCPRegInfo *ri = rip;
334 ri->writefn(env, ri, value);
337 uint32_t HELPER(get_cp_reg)(CPUARMState *env, void *rip)
339 const ARMCPRegInfo *ri = rip;
341 return ri->readfn(env, ri);
344 void HELPER(set_cp_reg64)(CPUARMState *env, void *rip, uint64_t value)
346 const ARMCPRegInfo *ri = rip;
348 ri->writefn(env, ri, value);
351 uint64_t HELPER(get_cp_reg64)(CPUARMState *env, void *rip)
353 const ARMCPRegInfo *ri = rip;
355 return ri->readfn(env, ri);
358 void HELPER(msr_i_pstate)(CPUARMState *env, uint32_t op, uint32_t imm)
360 /* MSR_i to update PSTATE. This is OK from EL0 only if UMA is set.
361 * Note that SPSel is never OK from EL0; we rely on handle_msr_i()
362 * to catch that case at translate time.
364 if (arm_current_pl(env) == 0 && !(env->cp15.c1_sys & SCTLR_UMA)) {
365 raise_exception(env, EXCP_UDEF);
368 switch (op) {
369 case 0x05: /* SPSel */
370 update_spsel(env, imm);
371 break;
372 case 0x1e: /* DAIFSet */
373 env->daif |= (imm << 6) & PSTATE_DAIF;
374 break;
375 case 0x1f: /* DAIFClear */
376 env->daif &= ~((imm << 6) & PSTATE_DAIF);
377 break;
378 default:
379 g_assert_not_reached();
383 void HELPER(clear_pstate_ss)(CPUARMState *env)
385 env->pstate &= ~PSTATE_SS;
388 void HELPER(pre_hvc)(CPUARMState *env)
390 ARMCPU *cpu = arm_env_get_cpu(env);
391 int cur_el = arm_current_pl(env);
392 /* FIXME: Use actual secure state. */
393 bool secure = false;
394 bool undef;
396 if (arm_is_psci_call(cpu, EXCP_HVC)) {
397 /* If PSCI is enabled and this looks like a valid PSCI call then
398 * that overrides the architecturally mandated HVC behaviour.
400 return;
403 if (!arm_feature(env, ARM_FEATURE_EL2)) {
404 /* If EL2 doesn't exist, HVC always UNDEFs */
405 undef = true;
406 } else if (arm_feature(env, ARM_FEATURE_EL3)) {
407 /* EL3.HCE has priority over EL2.HCD. */
408 undef = !(env->cp15.scr_el3 & SCR_HCE);
409 } else {
410 undef = env->cp15.hcr_el2 & HCR_HCD;
413 /* In ARMv7 and ARMv8/AArch32, HVC is undef in secure state.
414 * For ARMv8/AArch64, HVC is allowed in EL3.
415 * Note that we've already trapped HVC from EL0 at translation
416 * time.
418 if (secure && (!is_a64(env) || cur_el == 1)) {
419 undef = true;
422 if (undef) {
423 env->exception.syndrome = syn_uncategorized();
424 raise_exception(env, EXCP_UDEF);
428 void HELPER(pre_smc)(CPUARMState *env, uint32_t syndrome)
430 ARMCPU *cpu = arm_env_get_cpu(env);
431 int cur_el = arm_current_pl(env);
432 /* FIXME: Use real secure state. */
433 bool secure = false;
434 bool smd = env->cp15.scr_el3 & SCR_SMD;
435 /* On ARMv8 AArch32, SMD only applies to NS state.
436 * On ARMv7 SMD only applies to NS state and only if EL2 is available.
437 * For ARMv7 non EL2, we force SMD to zero so we don't need to re-check
438 * the EL2 condition here.
440 bool undef = is_a64(env) ? smd : (!secure && smd);
442 if (arm_is_psci_call(cpu, EXCP_SMC)) {
443 /* If PSCI is enabled and this looks like a valid PSCI call then
444 * that overrides the architecturally mandated SMC behaviour.
446 return;
449 if (!arm_feature(env, ARM_FEATURE_EL3)) {
450 /* If we have no EL3 then SMC always UNDEFs */
451 undef = true;
452 } else if (!secure && cur_el == 1 && (env->cp15.hcr_el2 & HCR_TSC)) {
453 /* In NS EL1, HCR controlled routing to EL2 has priority over SMD. */
454 env->exception.syndrome = syndrome;
455 raise_exception(env, EXCP_HYP_TRAP);
458 if (undef) {
459 env->exception.syndrome = syn_uncategorized();
460 raise_exception(env, EXCP_UDEF);
464 void HELPER(exception_return)(CPUARMState *env)
466 int cur_el = arm_current_pl(env);
467 unsigned int spsr_idx = aarch64_banked_spsr_index(cur_el);
468 uint32_t spsr = env->banked_spsr[spsr_idx];
469 int new_el, i;
471 aarch64_save_sp(env, cur_el);
473 env->exclusive_addr = -1;
475 /* We must squash the PSTATE.SS bit to zero unless both of the
476 * following hold:
477 * 1. debug exceptions are currently disabled
478 * 2. singlestep will be active in the EL we return to
479 * We check 1 here and 2 after we've done the pstate/cpsr write() to
480 * transition to the EL we're going to.
482 if (arm_generate_debug_exceptions(env)) {
483 spsr &= ~PSTATE_SS;
486 if (spsr & PSTATE_nRW) {
487 /* TODO: We currently assume EL1/2/3 are running in AArch64. */
488 env->aarch64 = 0;
489 new_el = 0;
490 env->uncached_cpsr = 0x10;
491 cpsr_write(env, spsr, ~0);
492 if (!arm_singlestep_active(env)) {
493 env->uncached_cpsr &= ~PSTATE_SS;
495 for (i = 0; i < 15; i++) {
496 env->regs[i] = env->xregs[i];
499 env->regs[15] = env->elr_el[1] & ~0x1;
500 } else {
501 new_el = extract32(spsr, 2, 2);
502 if (new_el > cur_el
503 || (new_el == 2 && !arm_feature(env, ARM_FEATURE_EL2))) {
504 /* Disallow return to an EL which is unimplemented or higher
505 * than the current one.
507 goto illegal_return;
509 if (extract32(spsr, 1, 1)) {
510 /* Return with reserved M[1] bit set */
511 goto illegal_return;
513 if (new_el == 0 && (spsr & PSTATE_SP)) {
514 /* Return to EL0 with M[0] bit set */
515 goto illegal_return;
517 env->aarch64 = 1;
518 pstate_write(env, spsr);
519 if (!arm_singlestep_active(env)) {
520 env->pstate &= ~PSTATE_SS;
522 aarch64_restore_sp(env, new_el);
523 env->pc = env->elr_el[cur_el];
526 return;
528 illegal_return:
529 /* Illegal return events of various kinds have architecturally
530 * mandated behaviour:
531 * restore NZCV and DAIF from SPSR_ELx
532 * set PSTATE.IL
533 * restore PC from ELR_ELx
534 * no change to exception level, execution state or stack pointer
536 env->pstate |= PSTATE_IL;
537 env->pc = env->elr_el[cur_el];
538 spsr &= PSTATE_NZCV | PSTATE_DAIF;
539 spsr |= pstate_read(env) & ~(PSTATE_NZCV | PSTATE_DAIF);
540 pstate_write(env, spsr);
541 if (!arm_singlestep_active(env)) {
542 env->pstate &= ~PSTATE_SS;
546 /* Return true if the linked breakpoint entry lbn passes its checks */
547 static bool linked_bp_matches(ARMCPU *cpu, int lbn)
549 CPUARMState *env = &cpu->env;
550 uint64_t bcr = env->cp15.dbgbcr[lbn];
551 int brps = extract32(cpu->dbgdidr, 24, 4);
552 int ctx_cmps = extract32(cpu->dbgdidr, 20, 4);
553 int bt;
554 uint32_t contextidr;
556 /* Links to unimplemented or non-context aware breakpoints are
557 * CONSTRAINED UNPREDICTABLE: either behave as if disabled, or
558 * as if linked to an UNKNOWN context-aware breakpoint (in which
559 * case DBGWCR<n>_EL1.LBN must indicate that breakpoint).
560 * We choose the former.
562 if (lbn > brps || lbn < (brps - ctx_cmps)) {
563 return false;
566 bcr = env->cp15.dbgbcr[lbn];
568 if (extract64(bcr, 0, 1) == 0) {
569 /* Linked breakpoint disabled : generate no events */
570 return false;
573 bt = extract64(bcr, 20, 4);
575 /* We match the whole register even if this is AArch32 using the
576 * short descriptor format (in which case it holds both PROCID and ASID),
577 * since we don't implement the optional v7 context ID masking.
579 contextidr = extract64(env->cp15.contextidr_el1, 0, 32);
581 switch (bt) {
582 case 3: /* linked context ID match */
583 if (arm_current_pl(env) > 1) {
584 /* Context matches never fire in EL2 or (AArch64) EL3 */
585 return false;
587 return (contextidr == extract64(env->cp15.dbgbvr[lbn], 0, 32));
588 case 5: /* linked address mismatch (reserved in AArch64) */
589 case 9: /* linked VMID match (reserved if no EL2) */
590 case 11: /* linked context ID and VMID match (reserved if no EL2) */
591 default:
592 /* Links to Unlinked context breakpoints must generate no
593 * events; we choose to do the same for reserved values too.
595 return false;
598 return false;
601 static bool bp_wp_matches(ARMCPU *cpu, int n, bool is_wp)
603 CPUARMState *env = &cpu->env;
604 uint64_t cr;
605 int pac, hmc, ssc, wt, lbn;
606 /* TODO: check against CPU security state when we implement TrustZone */
607 bool is_secure = false;
609 if (is_wp) {
610 if (!env->cpu_watchpoint[n]
611 || !(env->cpu_watchpoint[n]->flags & BP_WATCHPOINT_HIT)) {
612 return false;
614 cr = env->cp15.dbgwcr[n];
615 } else {
616 uint64_t pc = is_a64(env) ? env->pc : env->regs[15];
618 if (!env->cpu_breakpoint[n] || env->cpu_breakpoint[n]->pc != pc) {
619 return false;
621 cr = env->cp15.dbgbcr[n];
623 /* The WATCHPOINT_HIT flag guarantees us that the watchpoint is
624 * enabled and that the address and access type match; for breakpoints
625 * we know the address matched; check the remaining fields, including
626 * linked breakpoints. We rely on WCR and BCR having the same layout
627 * for the LBN, SSC, HMC, PAC/PMC and is-linked fields.
628 * Note that some combinations of {PAC, HMC, SSC} are reserved and
629 * must act either like some valid combination or as if the watchpoint
630 * were disabled. We choose the former, and use this together with
631 * the fact that EL3 must always be Secure and EL2 must always be
632 * Non-Secure to simplify the code slightly compared to the full
633 * table in the ARM ARM.
635 pac = extract64(cr, 1, 2);
636 hmc = extract64(cr, 13, 1);
637 ssc = extract64(cr, 14, 2);
639 switch (ssc) {
640 case 0:
641 break;
642 case 1:
643 case 3:
644 if (is_secure) {
645 return false;
647 break;
648 case 2:
649 if (!is_secure) {
650 return false;
652 break;
655 /* TODO: this is not strictly correct because the LDRT/STRT/LDT/STT
656 * "unprivileged access" instructions should match watchpoints as if
657 * they were accesses done at EL0, even if the CPU is at EL1 or higher.
658 * Implementing this would require reworking the core watchpoint code
659 * to plumb the mmu_idx through to this point. Luckily Linux does not
660 * rely on this behaviour currently.
661 * For breakpoints we do want to use the current CPU state.
663 switch (arm_current_pl(env)) {
664 case 3:
665 case 2:
666 if (!hmc) {
667 return false;
669 break;
670 case 1:
671 if (extract32(pac, 0, 1) == 0) {
672 return false;
674 break;
675 case 0:
676 if (extract32(pac, 1, 1) == 0) {
677 return false;
679 break;
680 default:
681 g_assert_not_reached();
684 wt = extract64(cr, 20, 1);
685 lbn = extract64(cr, 16, 4);
687 if (wt && !linked_bp_matches(cpu, lbn)) {
688 return false;
691 return true;
694 static bool check_watchpoints(ARMCPU *cpu)
696 CPUARMState *env = &cpu->env;
697 int n;
699 /* If watchpoints are disabled globally or we can't take debug
700 * exceptions here then watchpoint firings are ignored.
702 if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
703 || !arm_generate_debug_exceptions(env)) {
704 return false;
707 for (n = 0; n < ARRAY_SIZE(env->cpu_watchpoint); n++) {
708 if (bp_wp_matches(cpu, n, true)) {
709 return true;
712 return false;
715 static bool check_breakpoints(ARMCPU *cpu)
717 CPUARMState *env = &cpu->env;
718 int n;
720 /* If breakpoints are disabled globally or we can't take debug
721 * exceptions here then breakpoint firings are ignored.
723 if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
724 || !arm_generate_debug_exceptions(env)) {
725 return false;
728 for (n = 0; n < ARRAY_SIZE(env->cpu_breakpoint); n++) {
729 if (bp_wp_matches(cpu, n, false)) {
730 return true;
733 return false;
736 void arm_debug_excp_handler(CPUState *cs)
738 /* Called by core code when a watchpoint or breakpoint fires;
739 * need to check which one and raise the appropriate exception.
741 ARMCPU *cpu = ARM_CPU(cs);
742 CPUARMState *env = &cpu->env;
743 CPUWatchpoint *wp_hit = cs->watchpoint_hit;
745 if (wp_hit) {
746 if (wp_hit->flags & BP_CPU) {
747 cs->watchpoint_hit = NULL;
748 if (check_watchpoints(cpu)) {
749 bool wnr = (wp_hit->flags & BP_WATCHPOINT_HIT_WRITE) != 0;
750 bool same_el = arm_debug_target_el(env) == arm_current_pl(env);
752 env->exception.syndrome = syn_watchpoint(same_el, 0, wnr);
753 if (extended_addresses_enabled(env)) {
754 env->exception.fsr = (1 << 9) | 0x22;
755 } else {
756 env->exception.fsr = 0x2;
758 env->exception.vaddress = wp_hit->hitaddr;
759 raise_exception(env, EXCP_DATA_ABORT);
760 } else {
761 cpu_resume_from_signal(cs, NULL);
764 } else {
765 if (check_breakpoints(cpu)) {
766 bool same_el = (arm_debug_target_el(env) == arm_current_pl(env));
767 env->exception.syndrome = syn_breakpoint(same_el);
768 if (extended_addresses_enabled(env)) {
769 env->exception.fsr = (1 << 9) | 0x22;
770 } else {
771 env->exception.fsr = 0x2;
773 /* FAR is UNKNOWN, so doesn't need setting */
774 raise_exception(env, EXCP_PREFETCH_ABORT);
779 /* ??? Flag setting arithmetic is awkward because we need to do comparisons.
780 The only way to do that in TCG is a conditional branch, which clobbers
781 all our temporaries. For now implement these as helper functions. */
783 /* Similarly for variable shift instructions. */
785 uint32_t HELPER(shl_cc)(CPUARMState *env, uint32_t x, uint32_t i)
787 int shift = i & 0xff;
788 if (shift >= 32) {
789 if (shift == 32)
790 env->CF = x & 1;
791 else
792 env->CF = 0;
793 return 0;
794 } else if (shift != 0) {
795 env->CF = (x >> (32 - shift)) & 1;
796 return x << shift;
798 return x;
801 uint32_t HELPER(shr_cc)(CPUARMState *env, uint32_t x, uint32_t i)
803 int shift = i & 0xff;
804 if (shift >= 32) {
805 if (shift == 32)
806 env->CF = (x >> 31) & 1;
807 else
808 env->CF = 0;
809 return 0;
810 } else if (shift != 0) {
811 env->CF = (x >> (shift - 1)) & 1;
812 return x >> shift;
814 return x;
817 uint32_t HELPER(sar_cc)(CPUARMState *env, uint32_t x, uint32_t i)
819 int shift = i & 0xff;
820 if (shift >= 32) {
821 env->CF = (x >> 31) & 1;
822 return (int32_t)x >> 31;
823 } else if (shift != 0) {
824 env->CF = (x >> (shift - 1)) & 1;
825 return (int32_t)x >> shift;
827 return x;
830 uint32_t HELPER(ror_cc)(CPUARMState *env, uint32_t x, uint32_t i)
832 int shift1, shift;
833 shift1 = i & 0xff;
834 shift = shift1 & 0x1f;
835 if (shift == 0) {
836 if (shift1 != 0)
837 env->CF = (x >> 31) & 1;
838 return x;
839 } else {
840 env->CF = (x >> (shift - 1)) & 1;
841 return ((uint32_t)x >> shift) | (x << (32 - shift));