4 * Copyright (c) 2005-2007 CodeSourcery, LLC
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "exec/helper-proto.h"
21 #include "internals.h"
22 #include "exec/cpu_ldst.h"
24 #define SIGNBIT (uint32_t)0x80000000
25 #define SIGNBIT64 ((uint64_t)1 << 63)
27 static void raise_exception(CPUARMState
*env
, int tt
)
29 ARMCPU
*cpu
= arm_env_get_cpu(env
);
30 CPUState
*cs
= CPU(cpu
);
32 cs
->exception_index
= tt
;
36 uint32_t HELPER(neon_tbl
)(CPUARMState
*env
, uint32_t ireg
, uint32_t def
,
37 uint32_t rn
, uint32_t maxindex
)
44 table
= (uint64_t *)&env
->vfp
.regs
[rn
];
46 for (shift
= 0; shift
< 32; shift
+= 8) {
47 index
= (ireg
>> shift
) & 0xff;
48 if (index
< maxindex
) {
49 tmp
= (table
[index
>> 3] >> ((index
& 7) << 3)) & 0xff;
52 val
|= def
& (0xff << shift
);
58 #if !defined(CONFIG_USER_ONLY)
60 /* try to fill the TLB and return an exception if error. If retaddr is
61 * NULL, it means that the function was called in C code (i.e. not
62 * from generated code or from helper.c)
64 void tlb_fill(CPUState
*cs
, target_ulong addr
, int is_write
, int mmu_idx
,
69 ret
= arm_cpu_handle_mmu_fault(cs
, addr
, is_write
, mmu_idx
);
71 ARMCPU
*cpu
= ARM_CPU(cs
);
72 CPUARMState
*env
= &cpu
->env
;
75 /* now we have a real cpu fault */
76 cpu_restore_state(cs
, retaddr
);
78 raise_exception(env
, cs
->exception_index
);
83 uint32_t HELPER(add_setq
)(CPUARMState
*env
, uint32_t a
, uint32_t b
)
86 if (((res
^ a
) & SIGNBIT
) && !((a
^ b
) & SIGNBIT
))
91 uint32_t HELPER(add_saturate
)(CPUARMState
*env
, uint32_t a
, uint32_t b
)
94 if (((res
^ a
) & SIGNBIT
) && !((a
^ b
) & SIGNBIT
)) {
96 res
= ~(((int32_t)a
>> 31) ^ SIGNBIT
);
101 uint32_t HELPER(sub_saturate
)(CPUARMState
*env
, uint32_t a
, uint32_t b
)
103 uint32_t res
= a
- b
;
104 if (((res
^ a
) & SIGNBIT
) && ((a
^ b
) & SIGNBIT
)) {
106 res
= ~(((int32_t)a
>> 31) ^ SIGNBIT
);
111 uint32_t HELPER(double_saturate
)(CPUARMState
*env
, int32_t val
)
114 if (val
>= 0x40000000) {
117 } else if (val
<= (int32_t)0xc0000000) {
126 uint32_t HELPER(add_usaturate
)(CPUARMState
*env
, uint32_t a
, uint32_t b
)
128 uint32_t res
= a
+ b
;
136 uint32_t HELPER(sub_usaturate
)(CPUARMState
*env
, uint32_t a
, uint32_t b
)
138 uint32_t res
= a
- b
;
146 /* Signed saturation. */
147 static inline uint32_t do_ssat(CPUARMState
*env
, int32_t val
, int shift
)
153 mask
= (1u << shift
) - 1;
157 } else if (top
< -1) {
164 /* Unsigned saturation. */
165 static inline uint32_t do_usat(CPUARMState
*env
, int32_t val
, int shift
)
169 max
= (1u << shift
) - 1;
173 } else if (val
> max
) {
180 /* Signed saturate. */
181 uint32_t HELPER(ssat
)(CPUARMState
*env
, uint32_t x
, uint32_t shift
)
183 return do_ssat(env
, x
, shift
);
186 /* Dual halfword signed saturate. */
187 uint32_t HELPER(ssat16
)(CPUARMState
*env
, uint32_t x
, uint32_t shift
)
191 res
= (uint16_t)do_ssat(env
, (int16_t)x
, shift
);
192 res
|= do_ssat(env
, ((int32_t)x
) >> 16, shift
) << 16;
196 /* Unsigned saturate. */
197 uint32_t HELPER(usat
)(CPUARMState
*env
, uint32_t x
, uint32_t shift
)
199 return do_usat(env
, x
, shift
);
202 /* Dual halfword unsigned saturate. */
203 uint32_t HELPER(usat16
)(CPUARMState
*env
, uint32_t x
, uint32_t shift
)
207 res
= (uint16_t)do_usat(env
, (int16_t)x
, shift
);
208 res
|= do_usat(env
, ((int32_t)x
) >> 16, shift
) << 16;
212 void HELPER(wfi
)(CPUARMState
*env
)
214 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
216 cs
->exception_index
= EXCP_HLT
;
221 void HELPER(wfe
)(CPUARMState
*env
)
223 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
225 /* Don't actually halt the CPU, just yield back to top
228 cs
->exception_index
= EXCP_YIELD
;
232 /* Raise an internal-to-QEMU exception. This is limited to only
233 * those EXCP values which are special cases for QEMU to interrupt
234 * execution and not to be used for exceptions which are passed to
235 * the guest (those must all have syndrome information and thus should
236 * use exception_with_syndrome).
238 void HELPER(exception_internal
)(CPUARMState
*env
, uint32_t excp
)
240 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
242 assert(excp_is_internal(excp
));
243 cs
->exception_index
= excp
;
247 /* Raise an exception with the specified syndrome register value */
248 void HELPER(exception_with_syndrome
)(CPUARMState
*env
, uint32_t excp
,
251 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
253 assert(!excp_is_internal(excp
));
254 cs
->exception_index
= excp
;
255 env
->exception
.syndrome
= syndrome
;
259 uint32_t HELPER(cpsr_read
)(CPUARMState
*env
)
261 return cpsr_read(env
) & ~(CPSR_EXEC
| CPSR_RESERVED
);
264 void HELPER(cpsr_write
)(CPUARMState
*env
, uint32_t val
, uint32_t mask
)
266 cpsr_write(env
, val
, mask
);
269 /* Access to user mode registers from privileged modes. */
270 uint32_t HELPER(get_user_reg
)(CPUARMState
*env
, uint32_t regno
)
275 val
= env
->banked_r13
[0];
276 } else if (regno
== 14) {
277 val
= env
->banked_r14
[0];
278 } else if (regno
>= 8
279 && (env
->uncached_cpsr
& 0x1f) == ARM_CPU_MODE_FIQ
) {
280 val
= env
->usr_regs
[regno
- 8];
282 val
= env
->regs
[regno
];
287 void HELPER(set_user_reg
)(CPUARMState
*env
, uint32_t regno
, uint32_t val
)
290 env
->banked_r13
[0] = val
;
291 } else if (regno
== 14) {
292 env
->banked_r14
[0] = val
;
293 } else if (regno
>= 8
294 && (env
->uncached_cpsr
& 0x1f) == ARM_CPU_MODE_FIQ
) {
295 env
->usr_regs
[regno
- 8] = val
;
297 env
->regs
[regno
] = val
;
301 void HELPER(access_check_cp_reg
)(CPUARMState
*env
, void *rip
, uint32_t syndrome
)
303 const ARMCPRegInfo
*ri
= rip
;
304 switch (ri
->accessfn(env
, ri
)) {
308 env
->exception
.syndrome
= syndrome
;
310 case CP_ACCESS_TRAP_UNCATEGORIZED
:
311 env
->exception
.syndrome
= syn_uncategorized();
314 g_assert_not_reached();
316 raise_exception(env
, EXCP_UDEF
);
319 void HELPER(set_cp_reg
)(CPUARMState
*env
, void *rip
, uint32_t value
)
321 const ARMCPRegInfo
*ri
= rip
;
323 ri
->writefn(env
, ri
, value
);
326 uint32_t HELPER(get_cp_reg
)(CPUARMState
*env
, void *rip
)
328 const ARMCPRegInfo
*ri
= rip
;
330 return ri
->readfn(env
, ri
);
333 void HELPER(set_cp_reg64
)(CPUARMState
*env
, void *rip
, uint64_t value
)
335 const ARMCPRegInfo
*ri
= rip
;
337 ri
->writefn(env
, ri
, value
);
340 uint64_t HELPER(get_cp_reg64
)(CPUARMState
*env
, void *rip
)
342 const ARMCPRegInfo
*ri
= rip
;
344 return ri
->readfn(env
, ri
);
347 void HELPER(msr_i_pstate
)(CPUARMState
*env
, uint32_t op
, uint32_t imm
)
349 /* MSR_i to update PSTATE. This is OK from EL0 only if UMA is set.
350 * Note that SPSel is never OK from EL0; we rely on handle_msr_i()
351 * to catch that case at translate time.
353 if (arm_current_pl(env
) == 0 && !(env
->cp15
.c1_sys
& SCTLR_UMA
)) {
354 raise_exception(env
, EXCP_UDEF
);
358 case 0x05: /* SPSel */
359 update_spsel(env
, imm
);
361 case 0x1e: /* DAIFSet */
362 env
->daif
|= (imm
<< 6) & PSTATE_DAIF
;
364 case 0x1f: /* DAIFClear */
365 env
->daif
&= ~((imm
<< 6) & PSTATE_DAIF
);
368 g_assert_not_reached();
372 void HELPER(clear_pstate_ss
)(CPUARMState
*env
)
374 env
->pstate
&= ~PSTATE_SS
;
377 void HELPER(exception_return
)(CPUARMState
*env
)
379 int cur_el
= arm_current_pl(env
);
380 unsigned int spsr_idx
= aarch64_banked_spsr_index(cur_el
);
381 uint32_t spsr
= env
->banked_spsr
[spsr_idx
];
384 aarch64_save_sp(env
, cur_el
);
386 env
->exclusive_addr
= -1;
388 /* We must squash the PSTATE.SS bit to zero unless both of the
390 * 1. debug exceptions are currently disabled
391 * 2. singlestep will be active in the EL we return to
392 * We check 1 here and 2 after we've done the pstate/cpsr write() to
393 * transition to the EL we're going to.
395 if (arm_generate_debug_exceptions(env
)) {
399 if (spsr
& PSTATE_nRW
) {
400 /* TODO: We currently assume EL1/2/3 are running in AArch64. */
403 env
->uncached_cpsr
= 0x10;
404 cpsr_write(env
, spsr
, ~0);
405 if (!arm_singlestep_active(env
)) {
406 env
->uncached_cpsr
&= ~PSTATE_SS
;
408 for (i
= 0; i
< 15; i
++) {
409 env
->regs
[i
] = env
->xregs
[i
];
412 env
->regs
[15] = env
->elr_el
[1] & ~0x1;
414 new_el
= extract32(spsr
, 2, 2);
416 || (new_el
== 2 && !arm_feature(env
, ARM_FEATURE_EL2
))) {
417 /* Disallow return to an EL which is unimplemented or higher
418 * than the current one.
422 if (extract32(spsr
, 1, 1)) {
423 /* Return with reserved M[1] bit set */
426 if (new_el
== 0 && (spsr
& PSTATE_SP
)) {
427 /* Return to EL0 with M[0] bit set */
431 pstate_write(env
, spsr
);
432 if (!arm_singlestep_active(env
)) {
433 env
->pstate
&= ~PSTATE_SS
;
435 aarch64_restore_sp(env
, new_el
);
436 env
->pc
= env
->elr_el
[cur_el
];
442 /* Illegal return events of various kinds have architecturally
443 * mandated behaviour:
444 * restore NZCV and DAIF from SPSR_ELx
446 * restore PC from ELR_ELx
447 * no change to exception level, execution state or stack pointer
449 env
->pstate
|= PSTATE_IL
;
450 env
->pc
= env
->elr_el
[cur_el
];
451 spsr
&= PSTATE_NZCV
| PSTATE_DAIF
;
452 spsr
|= pstate_read(env
) & ~(PSTATE_NZCV
| PSTATE_DAIF
);
453 pstate_write(env
, spsr
);
454 if (!arm_singlestep_active(env
)) {
455 env
->pstate
&= ~PSTATE_SS
;
459 /* Return true if the linked breakpoint entry lbn passes its checks */
460 static bool linked_bp_matches(ARMCPU
*cpu
, int lbn
)
462 CPUARMState
*env
= &cpu
->env
;
463 uint64_t bcr
= env
->cp15
.dbgbcr
[lbn
];
464 int brps
= extract32(cpu
->dbgdidr
, 24, 4);
465 int ctx_cmps
= extract32(cpu
->dbgdidr
, 20, 4);
469 /* Links to unimplemented or non-context aware breakpoints are
470 * CONSTRAINED UNPREDICTABLE: either behave as if disabled, or
471 * as if linked to an UNKNOWN context-aware breakpoint (in which
472 * case DBGWCR<n>_EL1.LBN must indicate that breakpoint).
473 * We choose the former.
475 if (lbn
> brps
|| lbn
< (brps
- ctx_cmps
)) {
479 bcr
= env
->cp15
.dbgbcr
[lbn
];
481 if (extract64(bcr
, 0, 1) == 0) {
482 /* Linked breakpoint disabled : generate no events */
486 bt
= extract64(bcr
, 20, 4);
488 /* We match the whole register even if this is AArch32 using the
489 * short descriptor format (in which case it holds both PROCID and ASID),
490 * since we don't implement the optional v7 context ID masking.
492 contextidr
= extract64(env
->cp15
.contextidr_el1
, 0, 32);
495 case 3: /* linked context ID match */
496 if (arm_current_pl(env
) > 1) {
497 /* Context matches never fire in EL2 or (AArch64) EL3 */
500 return (contextidr
== extract64(env
->cp15
.dbgbvr
[lbn
], 0, 32));
501 case 5: /* linked address mismatch (reserved in AArch64) */
502 case 9: /* linked VMID match (reserved if no EL2) */
503 case 11: /* linked context ID and VMID match (reserved if no EL2) */
505 /* Links to Unlinked context breakpoints must generate no
506 * events; we choose to do the same for reserved values too.
514 static bool wp_matches(ARMCPU
*cpu
, int n
)
516 CPUARMState
*env
= &cpu
->env
;
517 uint64_t wcr
= env
->cp15
.dbgwcr
[n
];
518 int pac
, hmc
, ssc
, wt
, lbn
;
519 /* TODO: check against CPU security state when we implement TrustZone */
520 bool is_secure
= false;
522 if (!env
->cpu_watchpoint
[n
]
523 || !(env
->cpu_watchpoint
[n
]->flags
& BP_WATCHPOINT_HIT
)) {
527 /* The WATCHPOINT_HIT flag guarantees us that the watchpoint is
528 * enabled and that the address and access type match; check the
529 * remaining fields, including linked breakpoints.
530 * Note that some combinations of {PAC, HMC SSC} are reserved and
531 * must act either like some valid combination or as if the watchpoint
532 * were disabled. We choose the former, and use this together with
533 * the fact that EL3 must always be Secure and EL2 must always be
534 * Non-Secure to simplify the code slightly compared to the full
535 * table in the ARM ARM.
537 pac
= extract64(wcr
, 1, 2);
538 hmc
= extract64(wcr
, 13, 1);
539 ssc
= extract64(wcr
, 14, 2);
557 /* TODO: this is not strictly correct because the LDRT/STRT/LDT/STT
558 * "unprivileged access" instructions should match watchpoints as if
559 * they were accesses done at EL0, even if the CPU is at EL1 or higher.
560 * Implementing this would require reworking the core watchpoint code
561 * to plumb the mmu_idx through to this point. Luckily Linux does not
562 * rely on this behaviour currently.
564 switch (arm_current_pl(env
)) {
572 if (extract32(pac
, 0, 1) == 0) {
577 if (extract32(pac
, 1, 1) == 0) {
582 g_assert_not_reached();
585 wt
= extract64(wcr
, 20, 1);
586 lbn
= extract64(wcr
, 16, 4);
588 if (wt
&& !linked_bp_matches(cpu
, lbn
)) {
595 static bool check_watchpoints(ARMCPU
*cpu
)
597 CPUARMState
*env
= &cpu
->env
;
600 /* If watchpoints are disabled globally or we can't take debug
601 * exceptions here then watchpoint firings are ignored.
603 if (extract32(env
->cp15
.mdscr_el1
, 15, 1) == 0
604 || !arm_generate_debug_exceptions(env
)) {
608 for (n
= 0; n
< ARRAY_SIZE(env
->cpu_watchpoint
); n
++) {
609 if (wp_matches(cpu
, n
)) {
616 void arm_debug_excp_handler(CPUState
*cs
)
618 /* Called by core code when a watchpoint or breakpoint fires;
619 * need to check which one and raise the appropriate exception.
621 ARMCPU
*cpu
= ARM_CPU(cs
);
622 CPUARMState
*env
= &cpu
->env
;
623 CPUWatchpoint
*wp_hit
= cs
->watchpoint_hit
;
626 if (wp_hit
->flags
& BP_CPU
) {
627 cs
->watchpoint_hit
= NULL
;
628 if (check_watchpoints(cpu
)) {
629 bool wnr
= (wp_hit
->flags
& BP_WATCHPOINT_HIT_WRITE
) != 0;
630 bool same_el
= arm_debug_target_el(env
) == arm_current_pl(env
);
632 env
->exception
.syndrome
= syn_watchpoint(same_el
, 0, wnr
);
633 if (extended_addresses_enabled(env
)) {
634 env
->exception
.fsr
= (1 << 9) | 0x22;
636 env
->exception
.fsr
= 0x2;
638 env
->exception
.vaddress
= wp_hit
->hitaddr
;
639 raise_exception(env
, EXCP_DATA_ABORT
);
641 cpu_resume_from_signal(cs
, NULL
);
647 /* ??? Flag setting arithmetic is awkward because we need to do comparisons.
648 The only way to do that in TCG is a conditional branch, which clobbers
649 all our temporaries. For now implement these as helper functions. */
651 /* Similarly for variable shift instructions. */
653 uint32_t HELPER(shl_cc
)(CPUARMState
*env
, uint32_t x
, uint32_t i
)
655 int shift
= i
& 0xff;
662 } else if (shift
!= 0) {
663 env
->CF
= (x
>> (32 - shift
)) & 1;
669 uint32_t HELPER(shr_cc
)(CPUARMState
*env
, uint32_t x
, uint32_t i
)
671 int shift
= i
& 0xff;
674 env
->CF
= (x
>> 31) & 1;
678 } else if (shift
!= 0) {
679 env
->CF
= (x
>> (shift
- 1)) & 1;
685 uint32_t HELPER(sar_cc
)(CPUARMState
*env
, uint32_t x
, uint32_t i
)
687 int shift
= i
& 0xff;
689 env
->CF
= (x
>> 31) & 1;
690 return (int32_t)x
>> 31;
691 } else if (shift
!= 0) {
692 env
->CF
= (x
>> (shift
- 1)) & 1;
693 return (int32_t)x
>> shift
;
698 uint32_t HELPER(ror_cc
)(CPUARMState
*env
, uint32_t x
, uint32_t i
)
702 shift
= shift1
& 0x1f;
705 env
->CF
= (x
>> 31) & 1;
708 env
->CF
= (x
>> (shift
- 1)) & 1;
709 return ((uint32_t)x
>> shift
) | (x
<< (32 - shift
));