intel_iommu: remove X86_IOMMU_PCI_DEVFN_MAX
[qemu/ar7.git] / target / arm / op_helper.c
bloba40a84ac249a4a8ce0bc178520a3859389d13087
1 /*
2 * ARM helper routines
4 * Copyright (c) 2005-2007 CodeSourcery, LLC
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qemu/log.h"
21 #include "qemu/main-loop.h"
22 #include "cpu.h"
23 #include "exec/helper-proto.h"
24 #include "internals.h"
25 #include "exec/exec-all.h"
26 #include "exec/cpu_ldst.h"
28 #define SIGNBIT (uint32_t)0x80000000
29 #define SIGNBIT64 ((uint64_t)1 << 63)
31 static void raise_exception(CPUARMState *env, uint32_t excp,
32 uint32_t syndrome, uint32_t target_el)
34 CPUState *cs = CPU(arm_env_get_cpu(env));
36 assert(!excp_is_internal(excp));
37 cs->exception_index = excp;
38 env->exception.syndrome = syndrome;
39 env->exception.target_el = target_el;
40 cpu_loop_exit(cs);
43 static int exception_target_el(CPUARMState *env)
45 int target_el = MAX(1, arm_current_el(env));
47 /* No such thing as secure EL1 if EL3 is aarch32, so update the target EL
48 * to EL3 in this case.
50 if (arm_is_secure(env) && !arm_el_is_aa64(env, 3) && target_el == 1) {
51 target_el = 3;
54 return target_el;
57 uint32_t HELPER(neon_tbl)(CPUARMState *env, uint32_t ireg, uint32_t def,
58 uint32_t rn, uint32_t maxindex)
60 uint32_t val;
61 uint32_t tmp;
62 int index;
63 int shift;
64 uint64_t *table;
65 table = (uint64_t *)&env->vfp.regs[rn];
66 val = 0;
67 for (shift = 0; shift < 32; shift += 8) {
68 index = (ireg >> shift) & 0xff;
69 if (index < maxindex) {
70 tmp = (table[index >> 3] >> ((index & 7) << 3)) & 0xff;
71 val |= tmp << shift;
72 } else {
73 val |= def & (0xff << shift);
76 return val;
79 #if !defined(CONFIG_USER_ONLY)
81 static inline uint32_t merge_syn_data_abort(uint32_t template_syn,
82 unsigned int target_el,
83 bool same_el, bool ea,
84 bool s1ptw, bool is_write,
85 int fsc)
87 uint32_t syn;
89 /* ISV is only set for data aborts routed to EL2 and
90 * never for stage-1 page table walks faulting on stage 2.
92 * Furthermore, ISV is only set for certain kinds of load/stores.
93 * If the template syndrome does not have ISV set, we should leave
94 * it cleared.
96 * See ARMv8 specs, D7-1974:
97 * ISS encoding for an exception from a Data Abort, the
98 * ISV field.
100 if (!(template_syn & ARM_EL_ISV) || target_el != 2 || s1ptw) {
101 syn = syn_data_abort_no_iss(same_el,
102 ea, 0, s1ptw, is_write, fsc);
103 } else {
104 /* Fields: IL, ISV, SAS, SSE, SRT, SF and AR come from the template
105 * syndrome created at translation time.
106 * Now we create the runtime syndrome with the remaining fields.
108 syn = syn_data_abort_with_iss(same_el,
109 0, 0, 0, 0, 0,
110 ea, 0, s1ptw, is_write, fsc,
111 false);
112 /* Merge the runtime syndrome with the template syndrome. */
113 syn |= template_syn;
115 return syn;
118 static void deliver_fault(ARMCPU *cpu, vaddr addr, MMUAccessType access_type,
119 uint32_t fsr, uint32_t fsc, ARMMMUFaultInfo *fi)
121 CPUARMState *env = &cpu->env;
122 int target_el;
123 bool same_el;
124 uint32_t syn, exc;
126 target_el = exception_target_el(env);
127 if (fi->stage2) {
128 target_el = 2;
129 env->cp15.hpfar_el2 = extract64(fi->s2addr, 12, 47) << 4;
131 same_el = (arm_current_el(env) == target_el);
133 if (fsc == 0x3f) {
134 /* Caller doesn't have a long-format fault status code. This
135 * should only happen if this fault will never actually be reported
136 * to an EL that uses a syndrome register. Check that here.
137 * 0x3f is a (currently) reserved FSC code, in case the constructed
138 * syndrome does leak into the guest somehow.
140 assert(target_el != 2 && !arm_el_is_aa64(env, target_el));
143 if (access_type == MMU_INST_FETCH) {
144 syn = syn_insn_abort(same_el, fi->ea, fi->s1ptw, fsc);
145 exc = EXCP_PREFETCH_ABORT;
146 } else {
147 syn = merge_syn_data_abort(env->exception.syndrome, target_el,
148 same_el, fi->ea, fi->s1ptw,
149 access_type == MMU_DATA_STORE,
150 fsc);
151 if (access_type == MMU_DATA_STORE
152 && arm_feature(env, ARM_FEATURE_V6)) {
153 fsr |= (1 << 11);
155 exc = EXCP_DATA_ABORT;
158 env->exception.vaddress = addr;
159 env->exception.fsr = fsr;
160 raise_exception(env, exc, syn, target_el);
163 /* try to fill the TLB and return an exception if error. If retaddr is
164 * NULL, it means that the function was called in C code (i.e. not
165 * from generated code or from helper.c)
167 void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type,
168 int mmu_idx, uintptr_t retaddr)
170 bool ret;
171 uint32_t fsr = 0;
172 ARMMMUFaultInfo fi = {};
174 ret = arm_tlb_fill(cs, addr, access_type, mmu_idx, &fsr, &fi);
175 if (unlikely(ret)) {
176 ARMCPU *cpu = ARM_CPU(cs);
177 uint32_t fsc;
179 if (retaddr) {
180 /* now we have a real cpu fault */
181 cpu_restore_state(cs, retaddr);
184 if (fsr & (1 << 9)) {
185 /* LPAE format fault status register : bottom 6 bits are
186 * status code in the same form as needed for syndrome
188 fsc = extract32(fsr, 0, 6);
189 } else {
190 /* Short format FSR : this fault will never actually be reported
191 * to an EL that uses a syndrome register. Use a (currently)
192 * reserved FSR code in case the constructed syndrome does leak
193 * into the guest somehow. deliver_fault will assert that
194 * we don't target an EL using the syndrome.
196 fsc = 0x3f;
199 deliver_fault(cpu, addr, access_type, fsr, fsc, &fi);
203 /* Raise a data fault alignment exception for the specified virtual address */
204 void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
205 MMUAccessType access_type,
206 int mmu_idx, uintptr_t retaddr)
208 ARMCPU *cpu = ARM_CPU(cs);
209 CPUARMState *env = &cpu->env;
210 uint32_t fsr, fsc;
211 ARMMMUFaultInfo fi = {};
212 ARMMMUIdx arm_mmu_idx = core_to_arm_mmu_idx(env, mmu_idx);
214 if (retaddr) {
215 /* now we have a real cpu fault */
216 cpu_restore_state(cs, retaddr);
219 /* the DFSR for an alignment fault depends on whether we're using
220 * the LPAE long descriptor format, or the short descriptor format
222 if (arm_s1_regime_using_lpae_format(env, arm_mmu_idx)) {
223 fsr = (1 << 9) | 0x21;
224 } else {
225 fsr = 0x1;
227 fsc = 0x21;
229 deliver_fault(cpu, vaddr, access_type, fsr, fsc, &fi);
232 /* arm_cpu_do_transaction_failed: handle a memory system error response
233 * (eg "no device/memory present at address") by raising an external abort
234 * exception
236 void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
237 vaddr addr, unsigned size,
238 MMUAccessType access_type,
239 int mmu_idx, MemTxAttrs attrs,
240 MemTxResult response, uintptr_t retaddr)
242 ARMCPU *cpu = ARM_CPU(cs);
243 CPUARMState *env = &cpu->env;
244 uint32_t fsr, fsc;
245 ARMMMUFaultInfo fi = {};
246 ARMMMUIdx arm_mmu_idx = core_to_arm_mmu_idx(env, mmu_idx);
248 if (retaddr) {
249 /* now we have a real cpu fault */
250 cpu_restore_state(cs, retaddr);
253 /* The EA bit in syndromes and fault status registers is an
254 * IMPDEF classification of external aborts. ARM implementations
255 * usually use this to indicate AXI bus Decode error (0) or
256 * Slave error (1); in QEMU we follow that.
258 fi.ea = (response != MEMTX_DECODE_ERROR);
260 /* The fault status register format depends on whether we're using
261 * the LPAE long descriptor format, or the short descriptor format.
263 if (arm_s1_regime_using_lpae_format(env, arm_mmu_idx)) {
264 /* long descriptor form, STATUS 0b010000: synchronous ext abort */
265 fsr = (fi.ea << 12) | (1 << 9) | 0x10;
266 } else {
267 /* short descriptor form, FSR 0b01000 : synchronous ext abort */
268 fsr = (fi.ea << 12) | 0x8;
270 fsc = 0x10;
272 deliver_fault(cpu, addr, access_type, fsr, fsc, &fi);
275 #endif /* !defined(CONFIG_USER_ONLY) */
277 uint32_t HELPER(add_setq)(CPUARMState *env, uint32_t a, uint32_t b)
279 uint32_t res = a + b;
280 if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT))
281 env->QF = 1;
282 return res;
285 uint32_t HELPER(add_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
287 uint32_t res = a + b;
288 if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT)) {
289 env->QF = 1;
290 res = ~(((int32_t)a >> 31) ^ SIGNBIT);
292 return res;
295 uint32_t HELPER(sub_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
297 uint32_t res = a - b;
298 if (((res ^ a) & SIGNBIT) && ((a ^ b) & SIGNBIT)) {
299 env->QF = 1;
300 res = ~(((int32_t)a >> 31) ^ SIGNBIT);
302 return res;
305 uint32_t HELPER(double_saturate)(CPUARMState *env, int32_t val)
307 uint32_t res;
308 if (val >= 0x40000000) {
309 res = ~SIGNBIT;
310 env->QF = 1;
311 } else if (val <= (int32_t)0xc0000000) {
312 res = SIGNBIT;
313 env->QF = 1;
314 } else {
315 res = val << 1;
317 return res;
320 uint32_t HELPER(add_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
322 uint32_t res = a + b;
323 if (res < a) {
324 env->QF = 1;
325 res = ~0;
327 return res;
330 uint32_t HELPER(sub_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
332 uint32_t res = a - b;
333 if (res > a) {
334 env->QF = 1;
335 res = 0;
337 return res;
340 /* Signed saturation. */
341 static inline uint32_t do_ssat(CPUARMState *env, int32_t val, int shift)
343 int32_t top;
344 uint32_t mask;
346 top = val >> shift;
347 mask = (1u << shift) - 1;
348 if (top > 0) {
349 env->QF = 1;
350 return mask;
351 } else if (top < -1) {
352 env->QF = 1;
353 return ~mask;
355 return val;
358 /* Unsigned saturation. */
359 static inline uint32_t do_usat(CPUARMState *env, int32_t val, int shift)
361 uint32_t max;
363 max = (1u << shift) - 1;
364 if (val < 0) {
365 env->QF = 1;
366 return 0;
367 } else if (val > max) {
368 env->QF = 1;
369 return max;
371 return val;
374 /* Signed saturate. */
375 uint32_t HELPER(ssat)(CPUARMState *env, uint32_t x, uint32_t shift)
377 return do_ssat(env, x, shift);
380 /* Dual halfword signed saturate. */
381 uint32_t HELPER(ssat16)(CPUARMState *env, uint32_t x, uint32_t shift)
383 uint32_t res;
385 res = (uint16_t)do_ssat(env, (int16_t)x, shift);
386 res |= do_ssat(env, ((int32_t)x) >> 16, shift) << 16;
387 return res;
390 /* Unsigned saturate. */
391 uint32_t HELPER(usat)(CPUARMState *env, uint32_t x, uint32_t shift)
393 return do_usat(env, x, shift);
396 /* Dual halfword unsigned saturate. */
397 uint32_t HELPER(usat16)(CPUARMState *env, uint32_t x, uint32_t shift)
399 uint32_t res;
401 res = (uint16_t)do_usat(env, (int16_t)x, shift);
402 res |= do_usat(env, ((int32_t)x) >> 16, shift) << 16;
403 return res;
406 void HELPER(setend)(CPUARMState *env)
408 env->uncached_cpsr ^= CPSR_E;
411 /* Function checks whether WFx (WFI/WFE) instructions are set up to be trapped.
412 * The function returns the target EL (1-3) if the instruction is to be trapped;
413 * otherwise it returns 0 indicating it is not trapped.
415 static inline int check_wfx_trap(CPUARMState *env, bool is_wfe)
417 int cur_el = arm_current_el(env);
418 uint64_t mask;
420 if (arm_feature(env, ARM_FEATURE_M)) {
421 /* M profile cores can never trap WFI/WFE. */
422 return 0;
425 /* If we are currently in EL0 then we need to check if SCTLR is set up for
426 * WFx instructions being trapped to EL1. These trap bits don't exist in v7.
428 if (cur_el < 1 && arm_feature(env, ARM_FEATURE_V8)) {
429 int target_el;
431 mask = is_wfe ? SCTLR_nTWE : SCTLR_nTWI;
432 if (arm_is_secure_below_el3(env) && !arm_el_is_aa64(env, 3)) {
433 /* Secure EL0 and Secure PL1 is at EL3 */
434 target_el = 3;
435 } else {
436 target_el = 1;
439 if (!(env->cp15.sctlr_el[target_el] & mask)) {
440 return target_el;
444 /* We are not trapping to EL1; trap to EL2 if HCR_EL2 requires it
445 * No need for ARM_FEATURE check as if HCR_EL2 doesn't exist the
446 * bits will be zero indicating no trap.
448 if (cur_el < 2 && !arm_is_secure(env)) {
449 mask = (is_wfe) ? HCR_TWE : HCR_TWI;
450 if (env->cp15.hcr_el2 & mask) {
451 return 2;
455 /* We are not trapping to EL1 or EL2; trap to EL3 if SCR_EL3 requires it */
456 if (cur_el < 3) {
457 mask = (is_wfe) ? SCR_TWE : SCR_TWI;
458 if (env->cp15.scr_el3 & mask) {
459 return 3;
463 return 0;
466 void HELPER(wfi)(CPUARMState *env, uint32_t insn_len)
468 CPUState *cs = CPU(arm_env_get_cpu(env));
469 int target_el = check_wfx_trap(env, false);
471 if (cpu_has_work(cs)) {
472 /* Don't bother to go into our "low power state" if
473 * we would just wake up immediately.
475 return;
478 if (target_el) {
479 env->pc -= insn_len;
480 raise_exception(env, EXCP_UDEF, syn_wfx(1, 0xe, 0, insn_len == 2),
481 target_el);
484 cs->exception_index = EXCP_HLT;
485 cs->halted = 1;
486 cpu_loop_exit(cs);
489 void HELPER(wfe)(CPUARMState *env)
491 /* This is a hint instruction that is semantically different
492 * from YIELD even though we currently implement it identically.
493 * Don't actually halt the CPU, just yield back to top
494 * level loop. This is not going into a "low power state"
495 * (ie halting until some event occurs), so we never take
496 * a configurable trap to a different exception level.
498 HELPER(yield)(env);
501 void HELPER(yield)(CPUARMState *env)
503 ARMCPU *cpu = arm_env_get_cpu(env);
504 CPUState *cs = CPU(cpu);
506 /* This is a non-trappable hint instruction that generally indicates
507 * that the guest is currently busy-looping. Yield control back to the
508 * top level loop so that a more deserving VCPU has a chance to run.
510 cs->exception_index = EXCP_YIELD;
511 cpu_loop_exit(cs);
514 /* Raise an internal-to-QEMU exception. This is limited to only
515 * those EXCP values which are special cases for QEMU to interrupt
516 * execution and not to be used for exceptions which are passed to
517 * the guest (those must all have syndrome information and thus should
518 * use exception_with_syndrome).
520 void HELPER(exception_internal)(CPUARMState *env, uint32_t excp)
522 CPUState *cs = CPU(arm_env_get_cpu(env));
524 assert(excp_is_internal(excp));
525 cs->exception_index = excp;
526 cpu_loop_exit(cs);
529 /* Raise an exception with the specified syndrome register value */
530 void HELPER(exception_with_syndrome)(CPUARMState *env, uint32_t excp,
531 uint32_t syndrome, uint32_t target_el)
533 raise_exception(env, excp, syndrome, target_el);
536 uint32_t HELPER(cpsr_read)(CPUARMState *env)
538 return cpsr_read(env) & ~(CPSR_EXEC | CPSR_RESERVED);
541 void HELPER(cpsr_write)(CPUARMState *env, uint32_t val, uint32_t mask)
543 cpsr_write(env, val, mask, CPSRWriteByInstr);
546 /* Write the CPSR for a 32-bit exception return */
547 void HELPER(cpsr_write_eret)(CPUARMState *env, uint32_t val)
549 cpsr_write(env, val, CPSR_ERET_MASK, CPSRWriteExceptionReturn);
551 /* Generated code has already stored the new PC value, but
552 * without masking out its low bits, because which bits need
553 * masking depends on whether we're returning to Thumb or ARM
554 * state. Do the masking now.
556 env->regs[15] &= (env->thumb ? ~1 : ~3);
558 qemu_mutex_lock_iothread();
559 arm_call_el_change_hook(arm_env_get_cpu(env));
560 qemu_mutex_unlock_iothread();
563 /* Access to user mode registers from privileged modes. */
564 uint32_t HELPER(get_user_reg)(CPUARMState *env, uint32_t regno)
566 uint32_t val;
568 if (regno == 13) {
569 val = env->banked_r13[BANK_USRSYS];
570 } else if (regno == 14) {
571 val = env->banked_r14[BANK_USRSYS];
572 } else if (regno >= 8
573 && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
574 val = env->usr_regs[regno - 8];
575 } else {
576 val = env->regs[regno];
578 return val;
581 void HELPER(set_user_reg)(CPUARMState *env, uint32_t regno, uint32_t val)
583 if (regno == 13) {
584 env->banked_r13[BANK_USRSYS] = val;
585 } else if (regno == 14) {
586 env->banked_r14[BANK_USRSYS] = val;
587 } else if (regno >= 8
588 && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
589 env->usr_regs[regno - 8] = val;
590 } else {
591 env->regs[regno] = val;
595 void HELPER(set_r13_banked)(CPUARMState *env, uint32_t mode, uint32_t val)
597 if ((env->uncached_cpsr & CPSR_M) == mode) {
598 env->regs[13] = val;
599 } else {
600 env->banked_r13[bank_number(mode)] = val;
604 uint32_t HELPER(get_r13_banked)(CPUARMState *env, uint32_t mode)
606 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_SYS) {
607 /* SRS instruction is UNPREDICTABLE from System mode; we UNDEF.
608 * Other UNPREDICTABLE and UNDEF cases were caught at translate time.
610 raise_exception(env, EXCP_UDEF, syn_uncategorized(),
611 exception_target_el(env));
614 if ((env->uncached_cpsr & CPSR_M) == mode) {
615 return env->regs[13];
616 } else {
617 return env->banked_r13[bank_number(mode)];
621 static void msr_mrs_banked_exc_checks(CPUARMState *env, uint32_t tgtmode,
622 uint32_t regno)
624 /* Raise an exception if the requested access is one of the UNPREDICTABLE
625 * cases; otherwise return. This broadly corresponds to the pseudocode
626 * BankedRegisterAccessValid() and SPSRAccessValid(),
627 * except that we have already handled some cases at translate time.
629 int curmode = env->uncached_cpsr & CPSR_M;
631 if (curmode == tgtmode) {
632 goto undef;
635 if (tgtmode == ARM_CPU_MODE_USR) {
636 switch (regno) {
637 case 8 ... 12:
638 if (curmode != ARM_CPU_MODE_FIQ) {
639 goto undef;
641 break;
642 case 13:
643 if (curmode == ARM_CPU_MODE_SYS) {
644 goto undef;
646 break;
647 case 14:
648 if (curmode == ARM_CPU_MODE_HYP || curmode == ARM_CPU_MODE_SYS) {
649 goto undef;
651 break;
652 default:
653 break;
657 if (tgtmode == ARM_CPU_MODE_HYP) {
658 switch (regno) {
659 case 17: /* ELR_Hyp */
660 if (curmode != ARM_CPU_MODE_HYP && curmode != ARM_CPU_MODE_MON) {
661 goto undef;
663 break;
664 default:
665 if (curmode != ARM_CPU_MODE_MON) {
666 goto undef;
668 break;
672 return;
674 undef:
675 raise_exception(env, EXCP_UDEF, syn_uncategorized(),
676 exception_target_el(env));
679 void HELPER(msr_banked)(CPUARMState *env, uint32_t value, uint32_t tgtmode,
680 uint32_t regno)
682 msr_mrs_banked_exc_checks(env, tgtmode, regno);
684 switch (regno) {
685 case 16: /* SPSRs */
686 env->banked_spsr[bank_number(tgtmode)] = value;
687 break;
688 case 17: /* ELR_Hyp */
689 env->elr_el[2] = value;
690 break;
691 case 13:
692 env->banked_r13[bank_number(tgtmode)] = value;
693 break;
694 case 14:
695 env->banked_r14[bank_number(tgtmode)] = value;
696 break;
697 case 8 ... 12:
698 switch (tgtmode) {
699 case ARM_CPU_MODE_USR:
700 env->usr_regs[regno - 8] = value;
701 break;
702 case ARM_CPU_MODE_FIQ:
703 env->fiq_regs[regno - 8] = value;
704 break;
705 default:
706 g_assert_not_reached();
708 break;
709 default:
710 g_assert_not_reached();
714 uint32_t HELPER(mrs_banked)(CPUARMState *env, uint32_t tgtmode, uint32_t regno)
716 msr_mrs_banked_exc_checks(env, tgtmode, regno);
718 switch (regno) {
719 case 16: /* SPSRs */
720 return env->banked_spsr[bank_number(tgtmode)];
721 case 17: /* ELR_Hyp */
722 return env->elr_el[2];
723 case 13:
724 return env->banked_r13[bank_number(tgtmode)];
725 case 14:
726 return env->banked_r14[bank_number(tgtmode)];
727 case 8 ... 12:
728 switch (tgtmode) {
729 case ARM_CPU_MODE_USR:
730 return env->usr_regs[regno - 8];
731 case ARM_CPU_MODE_FIQ:
732 return env->fiq_regs[regno - 8];
733 default:
734 g_assert_not_reached();
736 default:
737 g_assert_not_reached();
741 void HELPER(access_check_cp_reg)(CPUARMState *env, void *rip, uint32_t syndrome,
742 uint32_t isread)
744 const ARMCPRegInfo *ri = rip;
745 int target_el;
747 if (arm_feature(env, ARM_FEATURE_XSCALE) && ri->cp < 14
748 && extract32(env->cp15.c15_cpar, ri->cp, 1) == 0) {
749 raise_exception(env, EXCP_UDEF, syndrome, exception_target_el(env));
752 if (!ri->accessfn) {
753 return;
756 switch (ri->accessfn(env, ri, isread)) {
757 case CP_ACCESS_OK:
758 return;
759 case CP_ACCESS_TRAP:
760 target_el = exception_target_el(env);
761 break;
762 case CP_ACCESS_TRAP_EL2:
763 /* Requesting a trap to EL2 when we're in EL3 or S-EL0/1 is
764 * a bug in the access function.
766 assert(!arm_is_secure(env) && arm_current_el(env) != 3);
767 target_el = 2;
768 break;
769 case CP_ACCESS_TRAP_EL3:
770 target_el = 3;
771 break;
772 case CP_ACCESS_TRAP_UNCATEGORIZED:
773 target_el = exception_target_el(env);
774 syndrome = syn_uncategorized();
775 break;
776 case CP_ACCESS_TRAP_UNCATEGORIZED_EL2:
777 target_el = 2;
778 syndrome = syn_uncategorized();
779 break;
780 case CP_ACCESS_TRAP_UNCATEGORIZED_EL3:
781 target_el = 3;
782 syndrome = syn_uncategorized();
783 break;
784 case CP_ACCESS_TRAP_FP_EL2:
785 target_el = 2;
786 /* Since we are an implementation that takes exceptions on a trapped
787 * conditional insn only if the insn has passed its condition code
788 * check, we take the IMPDEF choice to always report CV=1 COND=0xe
789 * (which is also the required value for AArch64 traps).
791 syndrome = syn_fp_access_trap(1, 0xe, false);
792 break;
793 case CP_ACCESS_TRAP_FP_EL3:
794 target_el = 3;
795 syndrome = syn_fp_access_trap(1, 0xe, false);
796 break;
797 default:
798 g_assert_not_reached();
801 raise_exception(env, EXCP_UDEF, syndrome, target_el);
804 void HELPER(set_cp_reg)(CPUARMState *env, void *rip, uint32_t value)
806 const ARMCPRegInfo *ri = rip;
808 if (ri->type & ARM_CP_IO) {
809 qemu_mutex_lock_iothread();
810 ri->writefn(env, ri, value);
811 qemu_mutex_unlock_iothread();
812 } else {
813 ri->writefn(env, ri, value);
817 uint32_t HELPER(get_cp_reg)(CPUARMState *env, void *rip)
819 const ARMCPRegInfo *ri = rip;
820 uint32_t res;
822 if (ri->type & ARM_CP_IO) {
823 qemu_mutex_lock_iothread();
824 res = ri->readfn(env, ri);
825 qemu_mutex_unlock_iothread();
826 } else {
827 res = ri->readfn(env, ri);
830 return res;
833 void HELPER(set_cp_reg64)(CPUARMState *env, void *rip, uint64_t value)
835 const ARMCPRegInfo *ri = rip;
837 if (ri->type & ARM_CP_IO) {
838 qemu_mutex_lock_iothread();
839 ri->writefn(env, ri, value);
840 qemu_mutex_unlock_iothread();
841 } else {
842 ri->writefn(env, ri, value);
846 uint64_t HELPER(get_cp_reg64)(CPUARMState *env, void *rip)
848 const ARMCPRegInfo *ri = rip;
849 uint64_t res;
851 if (ri->type & ARM_CP_IO) {
852 qemu_mutex_lock_iothread();
853 res = ri->readfn(env, ri);
854 qemu_mutex_unlock_iothread();
855 } else {
856 res = ri->readfn(env, ri);
859 return res;
862 void HELPER(msr_i_pstate)(CPUARMState *env, uint32_t op, uint32_t imm)
864 /* MSR_i to update PSTATE. This is OK from EL0 only if UMA is set.
865 * Note that SPSel is never OK from EL0; we rely on handle_msr_i()
866 * to catch that case at translate time.
868 if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UMA)) {
869 uint32_t syndrome = syn_aa64_sysregtrap(0, extract32(op, 0, 3),
870 extract32(op, 3, 3), 4,
871 imm, 0x1f, 0);
872 raise_exception(env, EXCP_UDEF, syndrome, exception_target_el(env));
875 switch (op) {
876 case 0x05: /* SPSel */
877 update_spsel(env, imm);
878 break;
879 case 0x1e: /* DAIFSet */
880 env->daif |= (imm << 6) & PSTATE_DAIF;
881 break;
882 case 0x1f: /* DAIFClear */
883 env->daif &= ~((imm << 6) & PSTATE_DAIF);
884 break;
885 default:
886 g_assert_not_reached();
890 void HELPER(clear_pstate_ss)(CPUARMState *env)
892 env->pstate &= ~PSTATE_SS;
895 void HELPER(pre_hvc)(CPUARMState *env)
897 ARMCPU *cpu = arm_env_get_cpu(env);
898 int cur_el = arm_current_el(env);
899 /* FIXME: Use actual secure state. */
900 bool secure = false;
901 bool undef;
903 if (arm_is_psci_call(cpu, EXCP_HVC)) {
904 /* If PSCI is enabled and this looks like a valid PSCI call then
905 * that overrides the architecturally mandated HVC behaviour.
907 return;
910 if (!arm_feature(env, ARM_FEATURE_EL2)) {
911 /* If EL2 doesn't exist, HVC always UNDEFs */
912 undef = true;
913 } else if (arm_feature(env, ARM_FEATURE_EL3)) {
914 /* EL3.HCE has priority over EL2.HCD. */
915 undef = !(env->cp15.scr_el3 & SCR_HCE);
916 } else {
917 undef = env->cp15.hcr_el2 & HCR_HCD;
920 /* In ARMv7 and ARMv8/AArch32, HVC is undef in secure state.
921 * For ARMv8/AArch64, HVC is allowed in EL3.
922 * Note that we've already trapped HVC from EL0 at translation
923 * time.
925 if (secure && (!is_a64(env) || cur_el == 1)) {
926 undef = true;
929 if (undef) {
930 raise_exception(env, EXCP_UDEF, syn_uncategorized(),
931 exception_target_el(env));
935 void HELPER(pre_smc)(CPUARMState *env, uint32_t syndrome)
937 ARMCPU *cpu = arm_env_get_cpu(env);
938 int cur_el = arm_current_el(env);
939 bool secure = arm_is_secure(env);
940 bool smd = env->cp15.scr_el3 & SCR_SMD;
941 /* On ARMv8 with EL3 AArch64, SMD applies to both S and NS state.
942 * On ARMv8 with EL3 AArch32, or ARMv7 with the Virtualization
943 * extensions, SMD only applies to NS state.
944 * On ARMv7 without the Virtualization extensions, the SMD bit
945 * doesn't exist, but we forbid the guest to set it to 1 in scr_write(),
946 * so we need not special case this here.
948 bool undef = arm_feature(env, ARM_FEATURE_AARCH64) ? smd : smd && !secure;
950 if (!arm_feature(env, ARM_FEATURE_EL3) &&
951 cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) {
952 /* If we have no EL3 then SMC always UNDEFs and can't be
953 * trapped to EL2. PSCI-via-SMC is a sort of ersatz EL3
954 * firmware within QEMU, and we want an EL2 guest to be able
955 * to forbid its EL1 from making PSCI calls into QEMU's
956 * "firmware" via HCR.TSC, so for these purposes treat
957 * PSCI-via-SMC as implying an EL3.
959 undef = true;
960 } else if (!secure && cur_el == 1 && (env->cp15.hcr_el2 & HCR_TSC)) {
961 /* In NS EL1, HCR controlled routing to EL2 has priority over SMD.
962 * We also want an EL2 guest to be able to forbid its EL1 from
963 * making PSCI calls into QEMU's "firmware" via HCR.TSC.
965 raise_exception(env, EXCP_HYP_TRAP, syndrome, 2);
968 /* If PSCI is enabled and this looks like a valid PSCI call then
969 * suppress the UNDEF -- we'll catch the SMC exception and
970 * implement the PSCI call behaviour there.
972 if (undef && !arm_is_psci_call(cpu, EXCP_SMC)) {
973 raise_exception(env, EXCP_UDEF, syn_uncategorized(),
974 exception_target_el(env));
978 static int el_from_spsr(uint32_t spsr)
980 /* Return the exception level that this SPSR is requesting a return to,
981 * or -1 if it is invalid (an illegal return)
983 if (spsr & PSTATE_nRW) {
984 switch (spsr & CPSR_M) {
985 case ARM_CPU_MODE_USR:
986 return 0;
987 case ARM_CPU_MODE_HYP:
988 return 2;
989 case ARM_CPU_MODE_FIQ:
990 case ARM_CPU_MODE_IRQ:
991 case ARM_CPU_MODE_SVC:
992 case ARM_CPU_MODE_ABT:
993 case ARM_CPU_MODE_UND:
994 case ARM_CPU_MODE_SYS:
995 return 1;
996 case ARM_CPU_MODE_MON:
997 /* Returning to Mon from AArch64 is never possible,
998 * so this is an illegal return.
1000 default:
1001 return -1;
1003 } else {
1004 if (extract32(spsr, 1, 1)) {
1005 /* Return with reserved M[1] bit set */
1006 return -1;
1008 if (extract32(spsr, 0, 4) == 1) {
1009 /* return to EL0 with M[0] bit set */
1010 return -1;
1012 return extract32(spsr, 2, 2);
1016 void HELPER(exception_return)(CPUARMState *env)
1018 int cur_el = arm_current_el(env);
1019 unsigned int spsr_idx = aarch64_banked_spsr_index(cur_el);
1020 uint32_t spsr = env->banked_spsr[spsr_idx];
1021 int new_el;
1022 bool return_to_aa64 = (spsr & PSTATE_nRW) == 0;
1024 aarch64_save_sp(env, cur_el);
1026 arm_clear_exclusive(env);
1028 /* We must squash the PSTATE.SS bit to zero unless both of the
1029 * following hold:
1030 * 1. debug exceptions are currently disabled
1031 * 2. singlestep will be active in the EL we return to
1032 * We check 1 here and 2 after we've done the pstate/cpsr write() to
1033 * transition to the EL we're going to.
1035 if (arm_generate_debug_exceptions(env)) {
1036 spsr &= ~PSTATE_SS;
1039 new_el = el_from_spsr(spsr);
1040 if (new_el == -1) {
1041 goto illegal_return;
1043 if (new_el > cur_el
1044 || (new_el == 2 && !arm_feature(env, ARM_FEATURE_EL2))) {
1045 /* Disallow return to an EL which is unimplemented or higher
1046 * than the current one.
1048 goto illegal_return;
1051 if (new_el != 0 && arm_el_is_aa64(env, new_el) != return_to_aa64) {
1052 /* Return to an EL which is configured for a different register width */
1053 goto illegal_return;
1056 if (new_el == 2 && arm_is_secure_below_el3(env)) {
1057 /* Return to the non-existent secure-EL2 */
1058 goto illegal_return;
1061 if (new_el == 1 && (env->cp15.hcr_el2 & HCR_TGE)
1062 && !arm_is_secure_below_el3(env)) {
1063 goto illegal_return;
1066 if (!return_to_aa64) {
1067 env->aarch64 = 0;
1068 /* We do a raw CPSR write because aarch64_sync_64_to_32()
1069 * will sort the register banks out for us, and we've already
1070 * caught all the bad-mode cases in el_from_spsr().
1072 cpsr_write(env, spsr, ~0, CPSRWriteRaw);
1073 if (!arm_singlestep_active(env)) {
1074 env->uncached_cpsr &= ~PSTATE_SS;
1076 aarch64_sync_64_to_32(env);
1078 if (spsr & CPSR_T) {
1079 env->regs[15] = env->elr_el[cur_el] & ~0x1;
1080 } else {
1081 env->regs[15] = env->elr_el[cur_el] & ~0x3;
1083 qemu_log_mask(CPU_LOG_INT, "Exception return from AArch64 EL%d to "
1084 "AArch32 EL%d PC 0x%" PRIx32 "\n",
1085 cur_el, new_el, env->regs[15]);
1086 } else {
1087 env->aarch64 = 1;
1088 pstate_write(env, spsr);
1089 if (!arm_singlestep_active(env)) {
1090 env->pstate &= ~PSTATE_SS;
1092 aarch64_restore_sp(env, new_el);
1093 env->pc = env->elr_el[cur_el];
1094 qemu_log_mask(CPU_LOG_INT, "Exception return from AArch64 EL%d to "
1095 "AArch64 EL%d PC 0x%" PRIx64 "\n",
1096 cur_el, new_el, env->pc);
1099 qemu_mutex_lock_iothread();
1100 arm_call_el_change_hook(arm_env_get_cpu(env));
1101 qemu_mutex_unlock_iothread();
1103 return;
1105 illegal_return:
1106 /* Illegal return events of various kinds have architecturally
1107 * mandated behaviour:
1108 * restore NZCV and DAIF from SPSR_ELx
1109 * set PSTATE.IL
1110 * restore PC from ELR_ELx
1111 * no change to exception level, execution state or stack pointer
1113 env->pstate |= PSTATE_IL;
1114 env->pc = env->elr_el[cur_el];
1115 spsr &= PSTATE_NZCV | PSTATE_DAIF;
1116 spsr |= pstate_read(env) & ~(PSTATE_NZCV | PSTATE_DAIF);
1117 pstate_write(env, spsr);
1118 if (!arm_singlestep_active(env)) {
1119 env->pstate &= ~PSTATE_SS;
1121 qemu_log_mask(LOG_GUEST_ERROR, "Illegal exception return at EL%d: "
1122 "resuming execution at 0x%" PRIx64 "\n", cur_el, env->pc);
1125 /* Return true if the linked breakpoint entry lbn passes its checks */
1126 static bool linked_bp_matches(ARMCPU *cpu, int lbn)
1128 CPUARMState *env = &cpu->env;
1129 uint64_t bcr = env->cp15.dbgbcr[lbn];
1130 int brps = extract32(cpu->dbgdidr, 24, 4);
1131 int ctx_cmps = extract32(cpu->dbgdidr, 20, 4);
1132 int bt;
1133 uint32_t contextidr;
1135 /* Links to unimplemented or non-context aware breakpoints are
1136 * CONSTRAINED UNPREDICTABLE: either behave as if disabled, or
1137 * as if linked to an UNKNOWN context-aware breakpoint (in which
1138 * case DBGWCR<n>_EL1.LBN must indicate that breakpoint).
1139 * We choose the former.
1141 if (lbn > brps || lbn < (brps - ctx_cmps)) {
1142 return false;
1145 bcr = env->cp15.dbgbcr[lbn];
1147 if (extract64(bcr, 0, 1) == 0) {
1148 /* Linked breakpoint disabled : generate no events */
1149 return false;
1152 bt = extract64(bcr, 20, 4);
1154 /* We match the whole register even if this is AArch32 using the
1155 * short descriptor format (in which case it holds both PROCID and ASID),
1156 * since we don't implement the optional v7 context ID masking.
1158 contextidr = extract64(env->cp15.contextidr_el[1], 0, 32);
1160 switch (bt) {
1161 case 3: /* linked context ID match */
1162 if (arm_current_el(env) > 1) {
1163 /* Context matches never fire in EL2 or (AArch64) EL3 */
1164 return false;
1166 return (contextidr == extract64(env->cp15.dbgbvr[lbn], 0, 32));
1167 case 5: /* linked address mismatch (reserved in AArch64) */
1168 case 9: /* linked VMID match (reserved if no EL2) */
1169 case 11: /* linked context ID and VMID match (reserved if no EL2) */
1170 default:
1171 /* Links to Unlinked context breakpoints must generate no
1172 * events; we choose to do the same for reserved values too.
1174 return false;
1177 return false;
1180 static bool bp_wp_matches(ARMCPU *cpu, int n, bool is_wp)
1182 CPUARMState *env = &cpu->env;
1183 uint64_t cr;
1184 int pac, hmc, ssc, wt, lbn;
1185 /* Note that for watchpoints the check is against the CPU security
1186 * state, not the S/NS attribute on the offending data access.
1188 bool is_secure = arm_is_secure(env);
1189 int access_el = arm_current_el(env);
1191 if (is_wp) {
1192 CPUWatchpoint *wp = env->cpu_watchpoint[n];
1194 if (!wp || !(wp->flags & BP_WATCHPOINT_HIT)) {
1195 return false;
1197 cr = env->cp15.dbgwcr[n];
1198 if (wp->hitattrs.user) {
1199 /* The LDRT/STRT/LDT/STT "unprivileged access" instructions should
1200 * match watchpoints as if they were accesses done at EL0, even if
1201 * the CPU is at EL1 or higher.
1203 access_el = 0;
1205 } else {
1206 uint64_t pc = is_a64(env) ? env->pc : env->regs[15];
1208 if (!env->cpu_breakpoint[n] || env->cpu_breakpoint[n]->pc != pc) {
1209 return false;
1211 cr = env->cp15.dbgbcr[n];
1213 /* The WATCHPOINT_HIT flag guarantees us that the watchpoint is
1214 * enabled and that the address and access type match; for breakpoints
1215 * we know the address matched; check the remaining fields, including
1216 * linked breakpoints. We rely on WCR and BCR having the same layout
1217 * for the LBN, SSC, HMC, PAC/PMC and is-linked fields.
1218 * Note that some combinations of {PAC, HMC, SSC} are reserved and
1219 * must act either like some valid combination or as if the watchpoint
1220 * were disabled. We choose the former, and use this together with
1221 * the fact that EL3 must always be Secure and EL2 must always be
1222 * Non-Secure to simplify the code slightly compared to the full
1223 * table in the ARM ARM.
1225 pac = extract64(cr, 1, 2);
1226 hmc = extract64(cr, 13, 1);
1227 ssc = extract64(cr, 14, 2);
1229 switch (ssc) {
1230 case 0:
1231 break;
1232 case 1:
1233 case 3:
1234 if (is_secure) {
1235 return false;
1237 break;
1238 case 2:
1239 if (!is_secure) {
1240 return false;
1242 break;
1245 switch (access_el) {
1246 case 3:
1247 case 2:
1248 if (!hmc) {
1249 return false;
1251 break;
1252 case 1:
1253 if (extract32(pac, 0, 1) == 0) {
1254 return false;
1256 break;
1257 case 0:
1258 if (extract32(pac, 1, 1) == 0) {
1259 return false;
1261 break;
1262 default:
1263 g_assert_not_reached();
1266 wt = extract64(cr, 20, 1);
1267 lbn = extract64(cr, 16, 4);
1269 if (wt && !linked_bp_matches(cpu, lbn)) {
1270 return false;
1273 return true;
1276 static bool check_watchpoints(ARMCPU *cpu)
1278 CPUARMState *env = &cpu->env;
1279 int n;
1281 /* If watchpoints are disabled globally or we can't take debug
1282 * exceptions here then watchpoint firings are ignored.
1284 if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
1285 || !arm_generate_debug_exceptions(env)) {
1286 return false;
1289 for (n = 0; n < ARRAY_SIZE(env->cpu_watchpoint); n++) {
1290 if (bp_wp_matches(cpu, n, true)) {
1291 return true;
1294 return false;
1297 static bool check_breakpoints(ARMCPU *cpu)
1299 CPUARMState *env = &cpu->env;
1300 int n;
1302 /* If breakpoints are disabled globally or we can't take debug
1303 * exceptions here then breakpoint firings are ignored.
1305 if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
1306 || !arm_generate_debug_exceptions(env)) {
1307 return false;
1310 for (n = 0; n < ARRAY_SIZE(env->cpu_breakpoint); n++) {
1311 if (bp_wp_matches(cpu, n, false)) {
1312 return true;
1315 return false;
1318 void HELPER(check_breakpoints)(CPUARMState *env)
1320 ARMCPU *cpu = arm_env_get_cpu(env);
1322 if (check_breakpoints(cpu)) {
1323 HELPER(exception_internal(env, EXCP_DEBUG));
1327 bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp)
1329 /* Called by core code when a CPU watchpoint fires; need to check if this
1330 * is also an architectural watchpoint match.
1332 ARMCPU *cpu = ARM_CPU(cs);
1334 return check_watchpoints(cpu);
1337 vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len)
1339 ARMCPU *cpu = ARM_CPU(cs);
1340 CPUARMState *env = &cpu->env;
1342 /* In BE32 system mode, target memory is stored byteswapped (on a
1343 * little-endian host system), and by the time we reach here (via an
1344 * opcode helper) the addresses of subword accesses have been adjusted
1345 * to account for that, which means that watchpoints will not match.
1346 * Undo the adjustment here.
1348 if (arm_sctlr_b(env)) {
1349 if (len == 1) {
1350 addr ^= 3;
1351 } else if (len == 2) {
1352 addr ^= 2;
1356 return addr;
1359 void arm_debug_excp_handler(CPUState *cs)
1361 /* Called by core code when a watchpoint or breakpoint fires;
1362 * need to check which one and raise the appropriate exception.
1364 ARMCPU *cpu = ARM_CPU(cs);
1365 CPUARMState *env = &cpu->env;
1366 CPUWatchpoint *wp_hit = cs->watchpoint_hit;
1368 if (wp_hit) {
1369 if (wp_hit->flags & BP_CPU) {
1370 bool wnr = (wp_hit->flags & BP_WATCHPOINT_HIT_WRITE) != 0;
1371 bool same_el = arm_debug_target_el(env) == arm_current_el(env);
1373 cs->watchpoint_hit = NULL;
1375 if (extended_addresses_enabled(env)) {
1376 env->exception.fsr = (1 << 9) | 0x22;
1377 } else {
1378 env->exception.fsr = 0x2;
1380 env->exception.vaddress = wp_hit->hitaddr;
1381 raise_exception(env, EXCP_DATA_ABORT,
1382 syn_watchpoint(same_el, 0, wnr),
1383 arm_debug_target_el(env));
1385 } else {
1386 uint64_t pc = is_a64(env) ? env->pc : env->regs[15];
1387 bool same_el = (arm_debug_target_el(env) == arm_current_el(env));
1389 /* (1) GDB breakpoints should be handled first.
1390 * (2) Do not raise a CPU exception if no CPU breakpoint has fired,
1391 * since singlestep is also done by generating a debug internal
1392 * exception.
1394 if (cpu_breakpoint_test(cs, pc, BP_GDB)
1395 || !cpu_breakpoint_test(cs, pc, BP_CPU)) {
1396 return;
1399 if (extended_addresses_enabled(env)) {
1400 env->exception.fsr = (1 << 9) | 0x22;
1401 } else {
1402 env->exception.fsr = 0x2;
1404 /* FAR is UNKNOWN, so doesn't need setting */
1405 raise_exception(env, EXCP_PREFETCH_ABORT,
1406 syn_breakpoint(same_el),
1407 arm_debug_target_el(env));
1411 /* ??? Flag setting arithmetic is awkward because we need to do comparisons.
1412 The only way to do that in TCG is a conditional branch, which clobbers
1413 all our temporaries. For now implement these as helper functions. */
1415 /* Similarly for variable shift instructions. */
1417 uint32_t HELPER(shl_cc)(CPUARMState *env, uint32_t x, uint32_t i)
1419 int shift = i & 0xff;
1420 if (shift >= 32) {
1421 if (shift == 32)
1422 env->CF = x & 1;
1423 else
1424 env->CF = 0;
1425 return 0;
1426 } else if (shift != 0) {
1427 env->CF = (x >> (32 - shift)) & 1;
1428 return x << shift;
1430 return x;
1433 uint32_t HELPER(shr_cc)(CPUARMState *env, uint32_t x, uint32_t i)
1435 int shift = i & 0xff;
1436 if (shift >= 32) {
1437 if (shift == 32)
1438 env->CF = (x >> 31) & 1;
1439 else
1440 env->CF = 0;
1441 return 0;
1442 } else if (shift != 0) {
1443 env->CF = (x >> (shift - 1)) & 1;
1444 return x >> shift;
1446 return x;
1449 uint32_t HELPER(sar_cc)(CPUARMState *env, uint32_t x, uint32_t i)
1451 int shift = i & 0xff;
1452 if (shift >= 32) {
1453 env->CF = (x >> 31) & 1;
1454 return (int32_t)x >> 31;
1455 } else if (shift != 0) {
1456 env->CF = (x >> (shift - 1)) & 1;
1457 return (int32_t)x >> shift;
1459 return x;
1462 uint32_t HELPER(ror_cc)(CPUARMState *env, uint32_t x, uint32_t i)
1464 int shift1, shift;
1465 shift1 = i & 0xff;
1466 shift = shift1 & 0x1f;
1467 if (shift == 0) {
1468 if (shift1 != 0)
1469 env->CF = (x >> 31) & 1;
1470 return x;
1471 } else {
1472 env->CF = (x >> (shift - 1)) & 1;
1473 return ((uint32_t)x >> shift) | (x << (32 - shift));