4 * Copyright (c) 2012 SUSE LINUX Products GmbH
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see
18 * <http://www.gnu.org/licenses/gpl-2.0.html>
21 #include "qemu/osdep.h"
22 #include "qemu/qemu-print.h"
23 #include "qemu/timer.h"
25 #include "qemu-common.h"
26 #include "target/arm/idau.h"
27 #include "qemu/module.h"
28 #include "qapi/error.h"
29 #include "qapi/visitor.h"
32 #include "hw/core/tcg-cpu-ops.h"
33 #endif /* CONFIG_TCG */
34 #include "internals.h"
35 #include "exec/exec-all.h"
36 #include "hw/qdev-properties.h"
37 #if !defined(CONFIG_USER_ONLY)
38 #include "hw/loader.h"
39 #include "hw/boards.h"
41 #include "sysemu/tcg.h"
42 #include "sysemu/hw_accel.h"
44 #include "disas/capstone.h"
45 #include "fpu/softfloat.h"
47 static void arm_cpu_set_pc(CPUState
*cs
, vaddr value
)
49 ARMCPU
*cpu
= ARM_CPU(cs
);
50 CPUARMState
*env
= &cpu
->env
;
56 env
->regs
[15] = value
& ~1;
57 env
->thumb
= value
& 1;
62 void arm_cpu_synchronize_from_tb(CPUState
*cs
,
63 const TranslationBlock
*tb
)
65 ARMCPU
*cpu
= ARM_CPU(cs
);
66 CPUARMState
*env
= &cpu
->env
;
69 * It's OK to look at env for the current mode here, because it's
70 * never possible for an AArch64 TB to chain to an AArch32 TB.
75 env
->regs
[15] = tb
->pc
;
78 #endif /* CONFIG_TCG */
80 static bool arm_cpu_has_work(CPUState
*cs
)
82 ARMCPU
*cpu
= ARM_CPU(cs
);
84 return (cpu
->power_state
!= PSCI_OFF
)
85 && cs
->interrupt_request
&
86 (CPU_INTERRUPT_FIQ
| CPU_INTERRUPT_HARD
87 | CPU_INTERRUPT_VFIQ
| CPU_INTERRUPT_VIRQ
88 | CPU_INTERRUPT_EXITTB
);
91 void arm_register_pre_el_change_hook(ARMCPU
*cpu
, ARMELChangeHookFn
*hook
,
94 ARMELChangeHook
*entry
= g_new0(ARMELChangeHook
, 1);
97 entry
->opaque
= opaque
;
99 QLIST_INSERT_HEAD(&cpu
->pre_el_change_hooks
, entry
, node
);
102 void arm_register_el_change_hook(ARMCPU
*cpu
, ARMELChangeHookFn
*hook
,
105 ARMELChangeHook
*entry
= g_new0(ARMELChangeHook
, 1);
108 entry
->opaque
= opaque
;
110 QLIST_INSERT_HEAD(&cpu
->el_change_hooks
, entry
, node
);
113 static void cp_reg_reset(gpointer key
, gpointer value
, gpointer opaque
)
115 /* Reset a single ARMCPRegInfo register */
116 ARMCPRegInfo
*ri
= value
;
117 ARMCPU
*cpu
= opaque
;
119 if (ri
->type
& (ARM_CP_SPECIAL
| ARM_CP_ALIAS
)) {
124 ri
->resetfn(&cpu
->env
, ri
);
128 /* A zero offset is never possible as it would be regs[0]
129 * so we use it to indicate that reset is being handled elsewhere.
130 * This is basically only used for fields in non-core coprocessors
131 * (like the pxa2xx ones).
133 if (!ri
->fieldoffset
) {
137 if (cpreg_field_is_64bit(ri
)) {
138 CPREG_FIELD64(&cpu
->env
, ri
) = ri
->resetvalue
;
140 CPREG_FIELD32(&cpu
->env
, ri
) = ri
->resetvalue
;
144 static void cp_reg_check_reset(gpointer key
, gpointer value
, gpointer opaque
)
146 /* Purely an assertion check: we've already done reset once,
147 * so now check that running the reset for the cpreg doesn't
148 * change its value. This traps bugs where two different cpregs
149 * both try to reset the same state field but to different values.
151 ARMCPRegInfo
*ri
= value
;
152 ARMCPU
*cpu
= opaque
;
153 uint64_t oldvalue
, newvalue
;
155 if (ri
->type
& (ARM_CP_SPECIAL
| ARM_CP_ALIAS
| ARM_CP_NO_RAW
)) {
159 oldvalue
= read_raw_cp_reg(&cpu
->env
, ri
);
160 cp_reg_reset(key
, value
, opaque
);
161 newvalue
= read_raw_cp_reg(&cpu
->env
, ri
);
162 assert(oldvalue
== newvalue
);
165 static void arm_cpu_reset(DeviceState
*dev
)
167 CPUState
*s
= CPU(dev
);
168 ARMCPU
*cpu
= ARM_CPU(s
);
169 ARMCPUClass
*acc
= ARM_CPU_GET_CLASS(cpu
);
170 CPUARMState
*env
= &cpu
->env
;
172 acc
->parent_reset(dev
);
174 memset(env
, 0, offsetof(CPUARMState
, end_reset_fields
));
176 g_hash_table_foreach(cpu
->cp_regs
, cp_reg_reset
, cpu
);
177 g_hash_table_foreach(cpu
->cp_regs
, cp_reg_check_reset
, cpu
);
179 env
->vfp
.xregs
[ARM_VFP_FPSID
] = cpu
->reset_fpsid
;
180 env
->vfp
.xregs
[ARM_VFP_MVFR0
] = cpu
->isar
.mvfr0
;
181 env
->vfp
.xregs
[ARM_VFP_MVFR1
] = cpu
->isar
.mvfr1
;
182 env
->vfp
.xregs
[ARM_VFP_MVFR2
] = cpu
->isar
.mvfr2
;
184 cpu
->power_state
= s
->start_powered_off
? PSCI_OFF
: PSCI_ON
;
186 if (arm_feature(env
, ARM_FEATURE_IWMMXT
)) {
187 env
->iwmmxt
.cregs
[ARM_IWMMXT_wCID
] = 0x69051000 | 'Q';
190 if (arm_feature(env
, ARM_FEATURE_AARCH64
)) {
191 /* 64 bit CPUs always start in 64 bit mode */
193 #if defined(CONFIG_USER_ONLY)
194 env
->pstate
= PSTATE_MODE_EL0t
;
195 /* Userspace expects access to DC ZVA, CTL_EL0 and the cache ops */
196 env
->cp15
.sctlr_el
[1] |= SCTLR_UCT
| SCTLR_UCI
| SCTLR_DZE
;
197 /* Enable all PAC keys. */
198 env
->cp15
.sctlr_el
[1] |= (SCTLR_EnIA
| SCTLR_EnIB
|
199 SCTLR_EnDA
| SCTLR_EnDB
);
200 /* and to the FP/Neon instructions */
201 env
->cp15
.cpacr_el1
= deposit64(env
->cp15
.cpacr_el1
, 20, 2, 3);
202 /* and to the SVE instructions */
203 env
->cp15
.cpacr_el1
= deposit64(env
->cp15
.cpacr_el1
, 16, 2, 3);
204 /* with reasonable vector length */
205 if (cpu_isar_feature(aa64_sve
, cpu
)) {
207 aarch64_sve_zcr_get_valid_len(cpu
, cpu
->sve_default_vq
- 1);
210 * Enable 48-bit address space (TODO: take reserved_va into account).
211 * Enable TBI0 but not TBI1.
212 * Note that this must match useronly_clean_ptr.
214 env
->cp15
.tcr_el
[1].raw_tcr
= 5 | (1ULL << 37);
217 if (cpu_isar_feature(aa64_mte
, cpu
)) {
218 /* Enable tag access, but leave TCF0 as No Effect (0). */
219 env
->cp15
.sctlr_el
[1] |= SCTLR_ATA0
;
221 * Exclude all tags, so that tag 0 is always used.
222 * This corresponds to Linux current->thread.gcr_incl = 0.
224 * Set RRND, so that helper_irg() will generate a seed later.
225 * Here in cpu_reset(), the crypto subsystem has not yet been
228 env
->cp15
.gcr_el1
= 0x1ffff;
231 /* Reset into the highest available EL */
232 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
233 env
->pstate
= PSTATE_MODE_EL3h
;
234 } else if (arm_feature(env
, ARM_FEATURE_EL2
)) {
235 env
->pstate
= PSTATE_MODE_EL2h
;
237 env
->pstate
= PSTATE_MODE_EL1h
;
240 /* Sample rvbar at reset. */
241 env
->cp15
.rvbar
= cpu
->rvbar_prop
;
242 env
->pc
= env
->cp15
.rvbar
;
245 #if defined(CONFIG_USER_ONLY)
246 /* Userspace expects access to cp10 and cp11 for FP/Neon */
247 env
->cp15
.cpacr_el1
= deposit64(env
->cp15
.cpacr_el1
, 20, 4, 0xf);
251 #if defined(CONFIG_USER_ONLY)
252 env
->uncached_cpsr
= ARM_CPU_MODE_USR
;
253 /* For user mode we must enable access to coprocessors */
254 env
->vfp
.xregs
[ARM_VFP_FPEXC
] = 1 << 30;
255 if (arm_feature(env
, ARM_FEATURE_IWMMXT
)) {
256 env
->cp15
.c15_cpar
= 3;
257 } else if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
258 env
->cp15
.c15_cpar
= 1;
263 * If the highest available EL is EL2, AArch32 will start in Hyp
264 * mode; otherwise it starts in SVC. Note that if we start in
265 * AArch64 then these values in the uncached_cpsr will be ignored.
267 if (arm_feature(env
, ARM_FEATURE_EL2
) &&
268 !arm_feature(env
, ARM_FEATURE_EL3
)) {
269 env
->uncached_cpsr
= ARM_CPU_MODE_HYP
;
271 env
->uncached_cpsr
= ARM_CPU_MODE_SVC
;
273 env
->daif
= PSTATE_D
| PSTATE_A
| PSTATE_I
| PSTATE_F
;
275 /* AArch32 has a hard highvec setting of 0xFFFF0000. If we are currently
276 * executing as AArch32 then check if highvecs are enabled and
277 * adjust the PC accordingly.
279 if (A32_BANKED_CURRENT_REG_GET(env
, sctlr
) & SCTLR_V
) {
280 env
->regs
[15] = 0xFFFF0000;
283 env
->vfp
.xregs
[ARM_VFP_FPEXC
] = 0;
286 if (arm_feature(env
, ARM_FEATURE_M
)) {
287 #ifndef CONFIG_USER_ONLY
288 uint32_t initial_msp
; /* Loaded from 0x0 */
289 uint32_t initial_pc
; /* Loaded from 0x4 */
294 if (cpu_isar_feature(aa32_lob
, cpu
)) {
296 * LTPSIZE is constant 4 if MVE not implemented, and resets
297 * to an UNKNOWN value if MVE is implemented. We choose to
300 env
->v7m
.ltpsize
= 4;
301 /* The LTPSIZE field in FPDSCR is constant and reads as 4. */
302 env
->v7m
.fpdscr
[M_REG_NS
] = 4 << FPCR_LTPSIZE_SHIFT
;
303 env
->v7m
.fpdscr
[M_REG_S
] = 4 << FPCR_LTPSIZE_SHIFT
;
306 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
307 env
->v7m
.secure
= true;
309 /* This bit resets to 0 if security is supported, but 1 if
310 * it is not. The bit is not present in v7M, but we set it
311 * here so we can avoid having to make checks on it conditional
312 * on ARM_FEATURE_V8 (we don't let the guest see the bit).
314 env
->v7m
.aircr
= R_V7M_AIRCR_BFHFNMINS_MASK
;
316 * Set NSACR to indicate "NS access permitted to everything";
317 * this avoids having to have all the tests of it being
318 * conditional on ARM_FEATURE_M_SECURITY. Note also that from
319 * v8.1M the guest-visible value of NSACR in a CPU without the
320 * Security Extension is 0xcff.
322 env
->v7m
.nsacr
= 0xcff;
325 /* In v7M the reset value of this bit is IMPDEF, but ARM recommends
326 * that it resets to 1, so QEMU always does that rather than making
327 * it dependent on CPU model. In v8M it is RES1.
329 env
->v7m
.ccr
[M_REG_NS
] = R_V7M_CCR_STKALIGN_MASK
;
330 env
->v7m
.ccr
[M_REG_S
] = R_V7M_CCR_STKALIGN_MASK
;
331 if (arm_feature(env
, ARM_FEATURE_V8
)) {
332 /* in v8M the NONBASETHRDENA bit [0] is RES1 */
333 env
->v7m
.ccr
[M_REG_NS
] |= R_V7M_CCR_NONBASETHRDENA_MASK
;
334 env
->v7m
.ccr
[M_REG_S
] |= R_V7M_CCR_NONBASETHRDENA_MASK
;
336 if (!arm_feature(env
, ARM_FEATURE_M_MAIN
)) {
337 env
->v7m
.ccr
[M_REG_NS
] |= R_V7M_CCR_UNALIGN_TRP_MASK
;
338 env
->v7m
.ccr
[M_REG_S
] |= R_V7M_CCR_UNALIGN_TRP_MASK
;
341 if (cpu_isar_feature(aa32_vfp_simd
, cpu
)) {
342 env
->v7m
.fpccr
[M_REG_NS
] = R_V7M_FPCCR_ASPEN_MASK
;
343 env
->v7m
.fpccr
[M_REG_S
] = R_V7M_FPCCR_ASPEN_MASK
|
344 R_V7M_FPCCR_LSPEN_MASK
| R_V7M_FPCCR_S_MASK
;
347 #ifndef CONFIG_USER_ONLY
348 /* Unlike A/R profile, M profile defines the reset LR value */
349 env
->regs
[14] = 0xffffffff;
351 env
->v7m
.vecbase
[M_REG_S
] = cpu
->init_svtor
& 0xffffff80;
352 env
->v7m
.vecbase
[M_REG_NS
] = cpu
->init_nsvtor
& 0xffffff80;
354 /* Load the initial SP and PC from offset 0 and 4 in the vector table */
355 vecbase
= env
->v7m
.vecbase
[env
->v7m
.secure
];
356 rom
= rom_ptr_for_as(s
->as
, vecbase
, 8);
358 /* Address zero is covered by ROM which hasn't yet been
359 * copied into physical memory.
361 initial_msp
= ldl_p(rom
);
362 initial_pc
= ldl_p(rom
+ 4);
364 /* Address zero not covered by a ROM blob, or the ROM blob
365 * is in non-modifiable memory and this is a second reset after
366 * it got copied into memory. In the latter case, rom_ptr
367 * will return a NULL pointer and we should use ldl_phys instead.
369 initial_msp
= ldl_phys(s
->as
, vecbase
);
370 initial_pc
= ldl_phys(s
->as
, vecbase
+ 4);
373 qemu_log_mask(CPU_LOG_INT
,
374 "Loaded reset SP 0x%x PC 0x%x from vector table\n",
375 initial_msp
, initial_pc
);
377 env
->regs
[13] = initial_msp
& 0xFFFFFFFC;
378 env
->regs
[15] = initial_pc
& ~1;
379 env
->thumb
= initial_pc
& 1;
382 * For user mode we run non-secure and with access to the FPU.
383 * The FPU context is active (ie does not need further setup)
384 * and is owned by non-secure.
386 env
->v7m
.secure
= false;
387 env
->v7m
.nsacr
= 0xcff;
388 env
->v7m
.cpacr
[M_REG_NS
] = 0xf0ffff;
389 env
->v7m
.fpccr
[M_REG_S
] &=
390 ~(R_V7M_FPCCR_LSPEN_MASK
| R_V7M_FPCCR_S_MASK
);
391 env
->v7m
.control
[M_REG_S
] |= R_V7M_CONTROL_FPCA_MASK
;
395 /* M profile requires that reset clears the exclusive monitor;
396 * A profile does not, but clearing it makes more sense than having it
397 * set with an exclusive access on address zero.
399 arm_clear_exclusive(env
);
401 if (arm_feature(env
, ARM_FEATURE_PMSA
)) {
402 if (cpu
->pmsav7_dregion
> 0) {
403 if (arm_feature(env
, ARM_FEATURE_V8
)) {
404 memset(env
->pmsav8
.rbar
[M_REG_NS
], 0,
405 sizeof(*env
->pmsav8
.rbar
[M_REG_NS
])
406 * cpu
->pmsav7_dregion
);
407 memset(env
->pmsav8
.rlar
[M_REG_NS
], 0,
408 sizeof(*env
->pmsav8
.rlar
[M_REG_NS
])
409 * cpu
->pmsav7_dregion
);
410 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
411 memset(env
->pmsav8
.rbar
[M_REG_S
], 0,
412 sizeof(*env
->pmsav8
.rbar
[M_REG_S
])
413 * cpu
->pmsav7_dregion
);
414 memset(env
->pmsav8
.rlar
[M_REG_S
], 0,
415 sizeof(*env
->pmsav8
.rlar
[M_REG_S
])
416 * cpu
->pmsav7_dregion
);
418 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
419 memset(env
->pmsav7
.drbar
, 0,
420 sizeof(*env
->pmsav7
.drbar
) * cpu
->pmsav7_dregion
);
421 memset(env
->pmsav7
.drsr
, 0,
422 sizeof(*env
->pmsav7
.drsr
) * cpu
->pmsav7_dregion
);
423 memset(env
->pmsav7
.dracr
, 0,
424 sizeof(*env
->pmsav7
.dracr
) * cpu
->pmsav7_dregion
);
427 env
->pmsav7
.rnr
[M_REG_NS
] = 0;
428 env
->pmsav7
.rnr
[M_REG_S
] = 0;
429 env
->pmsav8
.mair0
[M_REG_NS
] = 0;
430 env
->pmsav8
.mair0
[M_REG_S
] = 0;
431 env
->pmsav8
.mair1
[M_REG_NS
] = 0;
432 env
->pmsav8
.mair1
[M_REG_S
] = 0;
435 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
436 if (cpu
->sau_sregion
> 0) {
437 memset(env
->sau
.rbar
, 0, sizeof(*env
->sau
.rbar
) * cpu
->sau_sregion
);
438 memset(env
->sau
.rlar
, 0, sizeof(*env
->sau
.rlar
) * cpu
->sau_sregion
);
441 /* SAU_CTRL reset value is IMPDEF; we choose 0, which is what
442 * the Cortex-M33 does.
447 set_flush_to_zero(1, &env
->vfp
.standard_fp_status
);
448 set_flush_inputs_to_zero(1, &env
->vfp
.standard_fp_status
);
449 set_default_nan_mode(1, &env
->vfp
.standard_fp_status
);
450 set_default_nan_mode(1, &env
->vfp
.standard_fp_status_f16
);
451 set_float_detect_tininess(float_tininess_before_rounding
,
452 &env
->vfp
.fp_status
);
453 set_float_detect_tininess(float_tininess_before_rounding
,
454 &env
->vfp
.standard_fp_status
);
455 set_float_detect_tininess(float_tininess_before_rounding
,
456 &env
->vfp
.fp_status_f16
);
457 set_float_detect_tininess(float_tininess_before_rounding
,
458 &env
->vfp
.standard_fp_status_f16
);
459 #ifndef CONFIG_USER_ONLY
461 kvm_arm_reset_vcpu(cpu
);
465 hw_breakpoint_update_all(cpu
);
466 hw_watchpoint_update_all(cpu
);
467 arm_rebuild_hflags(env
);
470 #ifndef CONFIG_USER_ONLY
472 static inline bool arm_excp_unmasked(CPUState
*cs
, unsigned int excp_idx
,
473 unsigned int target_el
,
474 unsigned int cur_el
, bool secure
,
477 CPUARMState
*env
= cs
->env_ptr
;
478 bool pstate_unmasked
;
479 bool unmasked
= false;
482 * Don't take exceptions if they target a lower EL.
483 * This check should catch any exceptions that would not be taken
486 if (cur_el
> target_el
) {
492 pstate_unmasked
= !(env
->daif
& PSTATE_F
);
496 pstate_unmasked
= !(env
->daif
& PSTATE_I
);
500 if (!(hcr_el2
& HCR_FMO
) || (hcr_el2
& HCR_TGE
)) {
501 /* VFIQs are only taken when hypervized. */
504 return !(env
->daif
& PSTATE_F
);
506 if (!(hcr_el2
& HCR_IMO
) || (hcr_el2
& HCR_TGE
)) {
507 /* VIRQs are only taken when hypervized. */
510 return !(env
->daif
& PSTATE_I
);
512 g_assert_not_reached();
516 * Use the target EL, current execution state and SCR/HCR settings to
517 * determine whether the corresponding CPSR bit is used to mask the
520 if ((target_el
> cur_el
) && (target_el
!= 1)) {
521 /* Exceptions targeting a higher EL may not be maskable */
522 if (arm_feature(env
, ARM_FEATURE_AARCH64
)) {
524 * 64-bit masking rules are simple: exceptions to EL3
525 * can't be masked, and exceptions to EL2 can only be
526 * masked from Secure state. The HCR and SCR settings
527 * don't affect the masking logic, only the interrupt routing.
529 if (target_el
== 3 || !secure
|| (env
->cp15
.scr_el3
& SCR_EEL2
)) {
534 * The old 32-bit-only environment has a more complicated
535 * masking setup. HCR and SCR bits not only affect interrupt
536 * routing but also change the behaviour of masking.
543 * If FIQs are routed to EL3 or EL2 then there are cases where
544 * we override the CPSR.F in determining if the exception is
545 * masked or not. If neither of these are set then we fall back
546 * to the CPSR.F setting otherwise we further assess the state
549 hcr
= hcr_el2
& HCR_FMO
;
550 scr
= (env
->cp15
.scr_el3
& SCR_FIQ
);
553 * When EL3 is 32-bit, the SCR.FW bit controls whether the
554 * CPSR.F bit masks FIQ interrupts when taken in non-secure
555 * state. If SCR.FW is set then FIQs can be masked by CPSR.F
556 * when non-secure but only when FIQs are only routed to EL3.
558 scr
= scr
&& !((env
->cp15
.scr_el3
& SCR_FW
) && !hcr
);
562 * When EL3 execution state is 32-bit, if HCR.IMO is set then
563 * we may override the CPSR.I masking when in non-secure state.
564 * The SCR.IRQ setting has already been taken into consideration
565 * when setting the target EL, so it does not have a further
568 hcr
= hcr_el2
& HCR_IMO
;
572 g_assert_not_reached();
575 if ((scr
|| hcr
) && !secure
) {
582 * The PSTATE bits only mask the interrupt if we have not overriden the
585 return unmasked
|| pstate_unmasked
;
588 static bool arm_cpu_exec_interrupt(CPUState
*cs
, int interrupt_request
)
590 CPUClass
*cc
= CPU_GET_CLASS(cs
);
591 CPUARMState
*env
= cs
->env_ptr
;
592 uint32_t cur_el
= arm_current_el(env
);
593 bool secure
= arm_is_secure(env
);
594 uint64_t hcr_el2
= arm_hcr_el2_eff(env
);
598 /* The prioritization of interrupts is IMPLEMENTATION DEFINED. */
600 if (interrupt_request
& CPU_INTERRUPT_FIQ
) {
602 target_el
= arm_phys_excp_target_el(cs
, excp_idx
, cur_el
, secure
);
603 if (arm_excp_unmasked(cs
, excp_idx
, target_el
,
604 cur_el
, secure
, hcr_el2
)) {
608 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
610 target_el
= arm_phys_excp_target_el(cs
, excp_idx
, cur_el
, secure
);
611 if (arm_excp_unmasked(cs
, excp_idx
, target_el
,
612 cur_el
, secure
, hcr_el2
)) {
616 if (interrupt_request
& CPU_INTERRUPT_VIRQ
) {
617 excp_idx
= EXCP_VIRQ
;
619 if (arm_excp_unmasked(cs
, excp_idx
, target_el
,
620 cur_el
, secure
, hcr_el2
)) {
624 if (interrupt_request
& CPU_INTERRUPT_VFIQ
) {
625 excp_idx
= EXCP_VFIQ
;
627 if (arm_excp_unmasked(cs
, excp_idx
, target_el
,
628 cur_el
, secure
, hcr_el2
)) {
635 cs
->exception_index
= excp_idx
;
636 env
->exception
.target_el
= target_el
;
637 cc
->tcg_ops
->do_interrupt(cs
);
640 #endif /* !CONFIG_USER_ONLY */
642 void arm_cpu_update_virq(ARMCPU
*cpu
)
645 * Update the interrupt level for VIRQ, which is the logical OR of
646 * the HCR_EL2.VI bit and the input line level from the GIC.
648 CPUARMState
*env
= &cpu
->env
;
649 CPUState
*cs
= CPU(cpu
);
651 bool new_state
= (env
->cp15
.hcr_el2
& HCR_VI
) ||
652 (env
->irq_line_state
& CPU_INTERRUPT_VIRQ
);
654 if (new_state
!= ((cs
->interrupt_request
& CPU_INTERRUPT_VIRQ
) != 0)) {
656 cpu_interrupt(cs
, CPU_INTERRUPT_VIRQ
);
658 cpu_reset_interrupt(cs
, CPU_INTERRUPT_VIRQ
);
663 void arm_cpu_update_vfiq(ARMCPU
*cpu
)
666 * Update the interrupt level for VFIQ, which is the logical OR of
667 * the HCR_EL2.VF bit and the input line level from the GIC.
669 CPUARMState
*env
= &cpu
->env
;
670 CPUState
*cs
= CPU(cpu
);
672 bool new_state
= (env
->cp15
.hcr_el2
& HCR_VF
) ||
673 (env
->irq_line_state
& CPU_INTERRUPT_VFIQ
);
675 if (new_state
!= ((cs
->interrupt_request
& CPU_INTERRUPT_VFIQ
) != 0)) {
677 cpu_interrupt(cs
, CPU_INTERRUPT_VFIQ
);
679 cpu_reset_interrupt(cs
, CPU_INTERRUPT_VFIQ
);
684 #ifndef CONFIG_USER_ONLY
685 static void arm_cpu_set_irq(void *opaque
, int irq
, int level
)
687 ARMCPU
*cpu
= opaque
;
688 CPUARMState
*env
= &cpu
->env
;
689 CPUState
*cs
= CPU(cpu
);
690 static const int mask
[] = {
691 [ARM_CPU_IRQ
] = CPU_INTERRUPT_HARD
,
692 [ARM_CPU_FIQ
] = CPU_INTERRUPT_FIQ
,
693 [ARM_CPU_VIRQ
] = CPU_INTERRUPT_VIRQ
,
694 [ARM_CPU_VFIQ
] = CPU_INTERRUPT_VFIQ
698 env
->irq_line_state
|= mask
[irq
];
700 env
->irq_line_state
&= ~mask
[irq
];
705 assert(arm_feature(env
, ARM_FEATURE_EL2
));
706 arm_cpu_update_virq(cpu
);
709 assert(arm_feature(env
, ARM_FEATURE_EL2
));
710 arm_cpu_update_vfiq(cpu
);
715 cpu_interrupt(cs
, mask
[irq
]);
717 cpu_reset_interrupt(cs
, mask
[irq
]);
721 g_assert_not_reached();
725 static void arm_cpu_kvm_set_irq(void *opaque
, int irq
, int level
)
728 ARMCPU
*cpu
= opaque
;
729 CPUARMState
*env
= &cpu
->env
;
730 CPUState
*cs
= CPU(cpu
);
731 uint32_t linestate_bit
;
736 irq_id
= KVM_ARM_IRQ_CPU_IRQ
;
737 linestate_bit
= CPU_INTERRUPT_HARD
;
740 irq_id
= KVM_ARM_IRQ_CPU_FIQ
;
741 linestate_bit
= CPU_INTERRUPT_FIQ
;
744 g_assert_not_reached();
748 env
->irq_line_state
|= linestate_bit
;
750 env
->irq_line_state
&= ~linestate_bit
;
752 kvm_arm_set_irq(cs
->cpu_index
, KVM_ARM_IRQ_TYPE_CPU
, irq_id
, !!level
);
756 static bool arm_cpu_virtio_is_big_endian(CPUState
*cs
)
758 ARMCPU
*cpu
= ARM_CPU(cs
);
759 CPUARMState
*env
= &cpu
->env
;
761 cpu_synchronize_state(cs
);
762 return arm_cpu_data_is_big_endian(env
);
768 print_insn_thumb1(bfd_vma pc
, disassemble_info
*info
)
770 return print_insn_arm(pc
| 1, info
);
773 static void arm_disas_set_info(CPUState
*cpu
, disassemble_info
*info
)
775 ARMCPU
*ac
= ARM_CPU(cpu
);
776 CPUARMState
*env
= &ac
->env
;
780 /* We might not be compiled with the A64 disassembler
781 * because it needs a C++ compiler. Leave print_insn
782 * unset in this case to use the caller default behaviour.
784 #if defined(CONFIG_ARM_A64_DIS)
785 info
->print_insn
= print_insn_arm_a64
;
787 info
->cap_arch
= CS_ARCH_ARM64
;
788 info
->cap_insn_unit
= 4;
789 info
->cap_insn_split
= 4;
793 info
->print_insn
= print_insn_thumb1
;
794 info
->cap_insn_unit
= 2;
795 info
->cap_insn_split
= 4;
796 cap_mode
= CS_MODE_THUMB
;
798 info
->print_insn
= print_insn_arm
;
799 info
->cap_insn_unit
= 4;
800 info
->cap_insn_split
= 4;
801 cap_mode
= CS_MODE_ARM
;
803 if (arm_feature(env
, ARM_FEATURE_V8
)) {
804 cap_mode
|= CS_MODE_V8
;
806 if (arm_feature(env
, ARM_FEATURE_M
)) {
807 cap_mode
|= CS_MODE_MCLASS
;
809 info
->cap_arch
= CS_ARCH_ARM
;
810 info
->cap_mode
= cap_mode
;
813 sctlr_b
= arm_sctlr_b(env
);
814 if (bswap_code(sctlr_b
)) {
815 #ifdef TARGET_WORDS_BIGENDIAN
816 info
->endian
= BFD_ENDIAN_LITTLE
;
818 info
->endian
= BFD_ENDIAN_BIG
;
821 info
->flags
&= ~INSN_ARM_BE32
;
822 #ifndef CONFIG_USER_ONLY
824 info
->flags
|= INSN_ARM_BE32
;
829 #ifdef TARGET_AARCH64
831 static void aarch64_cpu_dump_state(CPUState
*cs
, FILE *f
, int flags
)
833 ARMCPU
*cpu
= ARM_CPU(cs
);
834 CPUARMState
*env
= &cpu
->env
;
835 uint32_t psr
= pstate_read(env
);
837 int el
= arm_current_el(env
);
838 const char *ns_status
;
840 qemu_fprintf(f
, " PC=%016" PRIx64
" ", env
->pc
);
841 for (i
= 0; i
< 32; i
++) {
843 qemu_fprintf(f
, " SP=%016" PRIx64
"\n", env
->xregs
[i
]);
845 qemu_fprintf(f
, "X%02d=%016" PRIx64
"%s", i
, env
->xregs
[i
],
846 (i
+ 2) % 3 ? " " : "\n");
850 if (arm_feature(env
, ARM_FEATURE_EL3
) && el
!= 3) {
851 ns_status
= env
->cp15
.scr_el3
& SCR_NS
? "NS " : "S ";
855 qemu_fprintf(f
, "PSTATE=%08x %c%c%c%c %sEL%d%c",
857 psr
& PSTATE_N
? 'N' : '-',
858 psr
& PSTATE_Z
? 'Z' : '-',
859 psr
& PSTATE_C
? 'C' : '-',
860 psr
& PSTATE_V
? 'V' : '-',
863 psr
& PSTATE_SP
? 'h' : 't');
865 if (cpu_isar_feature(aa64_bti
, cpu
)) {
866 qemu_fprintf(f
, " BTYPE=%d", (psr
& PSTATE_BTYPE
) >> 10);
868 if (!(flags
& CPU_DUMP_FPU
)) {
869 qemu_fprintf(f
, "\n");
872 if (fp_exception_el(env
, el
) != 0) {
873 qemu_fprintf(f
, " FPU disabled\n");
876 qemu_fprintf(f
, " FPCR=%08x FPSR=%08x\n",
877 vfp_get_fpcr(env
), vfp_get_fpsr(env
));
879 if (cpu_isar_feature(aa64_sve
, cpu
) && sve_exception_el(env
, el
) == 0) {
880 int j
, zcr_len
= sve_zcr_len_for_el(env
, el
);
882 for (i
= 0; i
<= FFR_PRED_NUM
; i
++) {
884 if (i
== FFR_PRED_NUM
) {
885 qemu_fprintf(f
, "FFR=");
886 /* It's last, so end the line. */
889 qemu_fprintf(f
, "P%02d=", i
);
902 /* More than one quadword per predicate. */
907 for (j
= zcr_len
/ 4; j
>= 0; j
--) {
909 if (j
* 4 + 4 <= zcr_len
+ 1) {
912 digits
= (zcr_len
% 4 + 1) * 4;
914 qemu_fprintf(f
, "%0*" PRIx64
"%s", digits
,
915 env
->vfp
.pregs
[i
].p
[j
],
916 j
? ":" : eol
? "\n" : " ");
920 for (i
= 0; i
< 32; i
++) {
922 qemu_fprintf(f
, "Z%02d=%016" PRIx64
":%016" PRIx64
"%s",
923 i
, env
->vfp
.zregs
[i
].d
[1],
924 env
->vfp
.zregs
[i
].d
[0], i
& 1 ? "\n" : " ");
925 } else if (zcr_len
== 1) {
926 qemu_fprintf(f
, "Z%02d=%016" PRIx64
":%016" PRIx64
927 ":%016" PRIx64
":%016" PRIx64
"\n",
928 i
, env
->vfp
.zregs
[i
].d
[3], env
->vfp
.zregs
[i
].d
[2],
929 env
->vfp
.zregs
[i
].d
[1], env
->vfp
.zregs
[i
].d
[0]);
931 for (j
= zcr_len
; j
>= 0; j
--) {
932 bool odd
= (zcr_len
- j
) % 2 != 0;
934 qemu_fprintf(f
, "Z%02d[%x-%x]=", i
, j
, j
- 1);
937 qemu_fprintf(f
, " [%x-%x]=", j
, j
- 1);
939 qemu_fprintf(f
, " [%x]=", j
);
942 qemu_fprintf(f
, "%016" PRIx64
":%016" PRIx64
"%s",
943 env
->vfp
.zregs
[i
].d
[j
* 2 + 1],
944 env
->vfp
.zregs
[i
].d
[j
* 2],
945 odd
|| j
== 0 ? "\n" : ":");
950 for (i
= 0; i
< 32; i
++) {
951 uint64_t *q
= aa64_vfp_qreg(env
, i
);
952 qemu_fprintf(f
, "Q%02d=%016" PRIx64
":%016" PRIx64
"%s",
953 i
, q
[1], q
[0], (i
& 1 ? "\n" : " "));
960 static inline void aarch64_cpu_dump_state(CPUState
*cs
, FILE *f
, int flags
)
962 g_assert_not_reached();
967 static void arm_cpu_dump_state(CPUState
*cs
, FILE *f
, int flags
)
969 ARMCPU
*cpu
= ARM_CPU(cs
);
970 CPUARMState
*env
= &cpu
->env
;
974 aarch64_cpu_dump_state(cs
, f
, flags
);
978 for (i
= 0; i
< 16; i
++) {
979 qemu_fprintf(f
, "R%02d=%08x", i
, env
->regs
[i
]);
981 qemu_fprintf(f
, "\n");
983 qemu_fprintf(f
, " ");
987 if (arm_feature(env
, ARM_FEATURE_M
)) {
988 uint32_t xpsr
= xpsr_read(env
);
990 const char *ns_status
= "";
992 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
993 ns_status
= env
->v7m
.secure
? "S " : "NS ";
996 if (xpsr
& XPSR_EXCP
) {
999 if (env
->v7m
.control
[env
->v7m
.secure
] & R_V7M_CONTROL_NPRIV_MASK
) {
1000 mode
= "unpriv-thread";
1002 mode
= "priv-thread";
1006 qemu_fprintf(f
, "XPSR=%08x %c%c%c%c %c %s%s\n",
1008 xpsr
& XPSR_N
? 'N' : '-',
1009 xpsr
& XPSR_Z
? 'Z' : '-',
1010 xpsr
& XPSR_C
? 'C' : '-',
1011 xpsr
& XPSR_V
? 'V' : '-',
1012 xpsr
& XPSR_T
? 'T' : 'A',
1016 uint32_t psr
= cpsr_read(env
);
1017 const char *ns_status
= "";
1019 if (arm_feature(env
, ARM_FEATURE_EL3
) &&
1020 (psr
& CPSR_M
) != ARM_CPU_MODE_MON
) {
1021 ns_status
= env
->cp15
.scr_el3
& SCR_NS
? "NS " : "S ";
1024 qemu_fprintf(f
, "PSR=%08x %c%c%c%c %c %s%s%d\n",
1026 psr
& CPSR_N
? 'N' : '-',
1027 psr
& CPSR_Z
? 'Z' : '-',
1028 psr
& CPSR_C
? 'C' : '-',
1029 psr
& CPSR_V
? 'V' : '-',
1030 psr
& CPSR_T
? 'T' : 'A',
1032 aarch32_mode_name(psr
), (psr
& 0x10) ? 32 : 26);
1035 if (flags
& CPU_DUMP_FPU
) {
1037 if (cpu_isar_feature(aa32_simd_r32
, cpu
)) {
1039 } else if (cpu_isar_feature(aa32_vfp_simd
, cpu
)) {
1042 for (i
= 0; i
< numvfpregs
; i
++) {
1043 uint64_t v
= *aa32_vfp_dreg(env
, i
);
1044 qemu_fprintf(f
, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64
"\n",
1046 i
* 2 + 1, (uint32_t)(v
>> 32),
1049 qemu_fprintf(f
, "FPSCR: %08x\n", vfp_get_fpscr(env
));
1050 if (cpu_isar_feature(aa32_mve
, cpu
)) {
1051 qemu_fprintf(f
, "VPR: %08x\n", env
->v7m
.vpr
);
1056 uint64_t arm_cpu_mp_affinity(int idx
, uint8_t clustersz
)
1058 uint32_t Aff1
= idx
/ clustersz
;
1059 uint32_t Aff0
= idx
% clustersz
;
1060 return (Aff1
<< ARM_AFF1_SHIFT
) | Aff0
;
1063 static void cpreg_hashtable_data_destroy(gpointer data
)
1066 * Destroy function for cpu->cp_regs hashtable data entries.
1067 * We must free the name string because it was g_strdup()ed in
1068 * add_cpreg_to_hashtable(). It's OK to cast away the 'const'
1069 * from r->name because we know we definitely allocated it.
1071 ARMCPRegInfo
*r
= data
;
1073 g_free((void *)r
->name
);
1077 static void arm_cpu_initfn(Object
*obj
)
1079 ARMCPU
*cpu
= ARM_CPU(obj
);
1081 cpu_set_cpustate_pointers(cpu
);
1082 cpu
->cp_regs
= g_hash_table_new_full(g_int_hash
, g_int_equal
,
1083 g_free
, cpreg_hashtable_data_destroy
);
1085 QLIST_INIT(&cpu
->pre_el_change_hooks
);
1086 QLIST_INIT(&cpu
->el_change_hooks
);
1088 #ifdef CONFIG_USER_ONLY
1089 # ifdef TARGET_AARCH64
1091 * The linux kernel defaults to 512-bit vectors, when sve is supported.
1092 * See documentation for /proc/sys/abi/sve_default_vector_length, and
1093 * our corresponding sve-default-vector-length cpu property.
1095 cpu
->sve_default_vq
= 4;
1098 /* Our inbound IRQ and FIQ lines */
1099 if (kvm_enabled()) {
1100 /* VIRQ and VFIQ are unused with KVM but we add them to maintain
1101 * the same interface as non-KVM CPUs.
1103 qdev_init_gpio_in(DEVICE(cpu
), arm_cpu_kvm_set_irq
, 4);
1105 qdev_init_gpio_in(DEVICE(cpu
), arm_cpu_set_irq
, 4);
1108 qdev_init_gpio_out(DEVICE(cpu
), cpu
->gt_timer_outputs
,
1109 ARRAY_SIZE(cpu
->gt_timer_outputs
));
1111 qdev_init_gpio_out_named(DEVICE(cpu
), &cpu
->gicv3_maintenance_interrupt
,
1112 "gicv3-maintenance-interrupt", 1);
1113 qdev_init_gpio_out_named(DEVICE(cpu
), &cpu
->pmu_interrupt
,
1114 "pmu-interrupt", 1);
1117 /* DTB consumers generally don't in fact care what the 'compatible'
1118 * string is, so always provide some string and trust that a hypothetical
1119 * picky DTB consumer will also provide a helpful error message.
1121 cpu
->dtb_compatible
= "qemu,unknown";
1122 cpu
->psci_version
= QEMU_PSCI_VERSION_0_1
; /* By default assume PSCI v0.1 */
1123 cpu
->kvm_target
= QEMU_KVM_ARM_TARGET_NONE
;
1125 if (tcg_enabled() || hvf_enabled()) {
1126 /* TCG and HVF implement PSCI 1.1 */
1127 cpu
->psci_version
= QEMU_PSCI_VERSION_1_1
;
1131 static Property arm_cpu_gt_cntfrq_property
=
1132 DEFINE_PROP_UINT64("cntfrq", ARMCPU
, gt_cntfrq_hz
,
1133 NANOSECONDS_PER_SECOND
/ GTIMER_SCALE
);
1135 static Property arm_cpu_reset_cbar_property
=
1136 DEFINE_PROP_UINT64("reset-cbar", ARMCPU
, reset_cbar
, 0);
1138 static Property arm_cpu_reset_hivecs_property
=
1139 DEFINE_PROP_BOOL("reset-hivecs", ARMCPU
, reset_hivecs
, false);
1141 #ifndef CONFIG_USER_ONLY
1142 static Property arm_cpu_has_el2_property
=
1143 DEFINE_PROP_BOOL("has_el2", ARMCPU
, has_el2
, true);
1145 static Property arm_cpu_has_el3_property
=
1146 DEFINE_PROP_BOOL("has_el3", ARMCPU
, has_el3
, true);
1149 static Property arm_cpu_cfgend_property
=
1150 DEFINE_PROP_BOOL("cfgend", ARMCPU
, cfgend
, false);
1152 static Property arm_cpu_has_vfp_property
=
1153 DEFINE_PROP_BOOL("vfp", ARMCPU
, has_vfp
, true);
1155 static Property arm_cpu_has_neon_property
=
1156 DEFINE_PROP_BOOL("neon", ARMCPU
, has_neon
, true);
1158 static Property arm_cpu_has_dsp_property
=
1159 DEFINE_PROP_BOOL("dsp", ARMCPU
, has_dsp
, true);
1161 static Property arm_cpu_has_mpu_property
=
1162 DEFINE_PROP_BOOL("has-mpu", ARMCPU
, has_mpu
, true);
1164 /* This is like DEFINE_PROP_UINT32 but it doesn't set the default value,
1165 * because the CPU initfn will have already set cpu->pmsav7_dregion to
1166 * the right value for that particular CPU type, and we don't want
1167 * to override that with an incorrect constant value.
1169 static Property arm_cpu_pmsav7_dregion_property
=
1170 DEFINE_PROP_UNSIGNED_NODEFAULT("pmsav7-dregion", ARMCPU
,
1172 qdev_prop_uint32
, uint32_t);
1174 static bool arm_get_pmu(Object
*obj
, Error
**errp
)
1176 ARMCPU
*cpu
= ARM_CPU(obj
);
1178 return cpu
->has_pmu
;
1181 static void arm_set_pmu(Object
*obj
, bool value
, Error
**errp
)
1183 ARMCPU
*cpu
= ARM_CPU(obj
);
1186 if (kvm_enabled() && !kvm_arm_pmu_supported()) {
1187 error_setg(errp
, "'pmu' feature not supported by KVM on this host");
1190 set_feature(&cpu
->env
, ARM_FEATURE_PMU
);
1192 unset_feature(&cpu
->env
, ARM_FEATURE_PMU
);
1194 cpu
->has_pmu
= value
;
1197 unsigned int gt_cntfrq_period_ns(ARMCPU
*cpu
)
1200 * The exact approach to calculating guest ticks is:
1202 * muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), cpu->gt_cntfrq_hz,
1203 * NANOSECONDS_PER_SECOND);
1205 * We don't do that. Rather we intentionally use integer division
1206 * truncation below and in the caller for the conversion of host monotonic
1207 * time to guest ticks to provide the exact inverse for the semantics of
1208 * the QEMUTimer scale factor. QEMUTimer's scale facter is an integer, so
1209 * it loses precision when representing frequencies where
1210 * `(NANOSECONDS_PER_SECOND % cpu->gt_cntfrq) > 0` holds. Failing to
1211 * provide an exact inverse leads to scheduling timers with negative
1212 * periods, which in turn leads to sticky behaviour in the guest.
1214 * Finally, CNTFRQ is effectively capped at 1GHz to ensure our scale factor
1215 * cannot become zero.
1217 return NANOSECONDS_PER_SECOND
> cpu
->gt_cntfrq_hz
?
1218 NANOSECONDS_PER_SECOND
/ cpu
->gt_cntfrq_hz
: 1;
1221 void arm_cpu_post_init(Object
*obj
)
1223 ARMCPU
*cpu
= ARM_CPU(obj
);
1225 /* M profile implies PMSA. We have to do this here rather than
1226 * in realize with the other feature-implication checks because
1227 * we look at the PMSA bit to see if we should add some properties.
1229 if (arm_feature(&cpu
->env
, ARM_FEATURE_M
)) {
1230 set_feature(&cpu
->env
, ARM_FEATURE_PMSA
);
1233 if (arm_feature(&cpu
->env
, ARM_FEATURE_CBAR
) ||
1234 arm_feature(&cpu
->env
, ARM_FEATURE_CBAR_RO
)) {
1235 qdev_property_add_static(DEVICE(obj
), &arm_cpu_reset_cbar_property
);
1238 if (!arm_feature(&cpu
->env
, ARM_FEATURE_M
)) {
1239 qdev_property_add_static(DEVICE(obj
), &arm_cpu_reset_hivecs_property
);
1242 if (arm_feature(&cpu
->env
, ARM_FEATURE_AARCH64
)) {
1243 object_property_add_uint64_ptr(obj
, "rvbar",
1245 OBJ_PROP_FLAG_READWRITE
);
1248 #ifndef CONFIG_USER_ONLY
1249 if (arm_feature(&cpu
->env
, ARM_FEATURE_EL3
)) {
1250 /* Add the has_el3 state CPU property only if EL3 is allowed. This will
1251 * prevent "has_el3" from existing on CPUs which cannot support EL3.
1253 qdev_property_add_static(DEVICE(obj
), &arm_cpu_has_el3_property
);
1255 object_property_add_link(obj
, "secure-memory",
1257 (Object
**)&cpu
->secure_memory
,
1258 qdev_prop_allow_set_link_before_realize
,
1259 OBJ_PROP_LINK_STRONG
);
1262 if (arm_feature(&cpu
->env
, ARM_FEATURE_EL2
)) {
1263 qdev_property_add_static(DEVICE(obj
), &arm_cpu_has_el2_property
);
1267 if (arm_feature(&cpu
->env
, ARM_FEATURE_PMU
)) {
1268 cpu
->has_pmu
= true;
1269 object_property_add_bool(obj
, "pmu", arm_get_pmu
, arm_set_pmu
);
1273 * Allow user to turn off VFP and Neon support, but only for TCG --
1274 * KVM does not currently allow us to lie to the guest about its
1275 * ID/feature registers, so the guest always sees what the host has.
1277 if (arm_feature(&cpu
->env
, ARM_FEATURE_AARCH64
)
1278 ? cpu_isar_feature(aa64_fp_simd
, cpu
)
1279 : cpu_isar_feature(aa32_vfp
, cpu
)) {
1280 cpu
->has_vfp
= true;
1281 if (!kvm_enabled()) {
1282 qdev_property_add_static(DEVICE(obj
), &arm_cpu_has_vfp_property
);
1286 if (arm_feature(&cpu
->env
, ARM_FEATURE_NEON
)) {
1287 cpu
->has_neon
= true;
1288 if (!kvm_enabled()) {
1289 qdev_property_add_static(DEVICE(obj
), &arm_cpu_has_neon_property
);
1293 if (arm_feature(&cpu
->env
, ARM_FEATURE_M
) &&
1294 arm_feature(&cpu
->env
, ARM_FEATURE_THUMB_DSP
)) {
1295 qdev_property_add_static(DEVICE(obj
), &arm_cpu_has_dsp_property
);
1298 if (arm_feature(&cpu
->env
, ARM_FEATURE_PMSA
)) {
1299 qdev_property_add_static(DEVICE(obj
), &arm_cpu_has_mpu_property
);
1300 if (arm_feature(&cpu
->env
, ARM_FEATURE_V7
)) {
1301 qdev_property_add_static(DEVICE(obj
),
1302 &arm_cpu_pmsav7_dregion_property
);
1306 if (arm_feature(&cpu
->env
, ARM_FEATURE_M_SECURITY
)) {
1307 object_property_add_link(obj
, "idau", TYPE_IDAU_INTERFACE
, &cpu
->idau
,
1308 qdev_prop_allow_set_link_before_realize
,
1309 OBJ_PROP_LINK_STRONG
);
1311 * M profile: initial value of the Secure VTOR. We can't just use
1312 * a simple DEFINE_PROP_UINT32 for this because we want to permit
1313 * the property to be set after realize.
1315 object_property_add_uint32_ptr(obj
, "init-svtor",
1317 OBJ_PROP_FLAG_READWRITE
);
1319 if (arm_feature(&cpu
->env
, ARM_FEATURE_M
)) {
1321 * Initial value of the NS VTOR (for cores without the Security
1322 * extension, this is the only VTOR)
1324 object_property_add_uint32_ptr(obj
, "init-nsvtor",
1326 OBJ_PROP_FLAG_READWRITE
);
1329 /* Not DEFINE_PROP_UINT32: we want this to be settable after realize */
1330 object_property_add_uint32_ptr(obj
, "psci-conduit",
1332 OBJ_PROP_FLAG_READWRITE
);
1334 qdev_property_add_static(DEVICE(obj
), &arm_cpu_cfgend_property
);
1336 if (arm_feature(&cpu
->env
, ARM_FEATURE_GENERIC_TIMER
)) {
1337 qdev_property_add_static(DEVICE(cpu
), &arm_cpu_gt_cntfrq_property
);
1340 if (kvm_enabled()) {
1341 kvm_arm_add_vcpu_properties(obj
);
1344 #ifndef CONFIG_USER_ONLY
1345 if (arm_feature(&cpu
->env
, ARM_FEATURE_AARCH64
) &&
1346 cpu_isar_feature(aa64_mte
, cpu
)) {
1347 object_property_add_link(obj
, "tag-memory",
1349 (Object
**)&cpu
->tag_memory
,
1350 qdev_prop_allow_set_link_before_realize
,
1351 OBJ_PROP_LINK_STRONG
);
1353 if (arm_feature(&cpu
->env
, ARM_FEATURE_EL3
)) {
1354 object_property_add_link(obj
, "secure-tag-memory",
1356 (Object
**)&cpu
->secure_tag_memory
,
1357 qdev_prop_allow_set_link_before_realize
,
1358 OBJ_PROP_LINK_STRONG
);
1364 static void arm_cpu_finalizefn(Object
*obj
)
1366 ARMCPU
*cpu
= ARM_CPU(obj
);
1367 ARMELChangeHook
*hook
, *next
;
1369 g_hash_table_destroy(cpu
->cp_regs
);
1371 QLIST_FOREACH_SAFE(hook
, &cpu
->pre_el_change_hooks
, node
, next
) {
1372 QLIST_REMOVE(hook
, node
);
1375 QLIST_FOREACH_SAFE(hook
, &cpu
->el_change_hooks
, node
, next
) {
1376 QLIST_REMOVE(hook
, node
);
1379 #ifndef CONFIG_USER_ONLY
1380 if (cpu
->pmu_timer
) {
1381 timer_free(cpu
->pmu_timer
);
1386 void arm_cpu_finalize_features(ARMCPU
*cpu
, Error
**errp
)
1388 Error
*local_err
= NULL
;
1390 if (arm_feature(&cpu
->env
, ARM_FEATURE_AARCH64
)) {
1391 arm_cpu_sve_finalize(cpu
, &local_err
);
1392 if (local_err
!= NULL
) {
1393 error_propagate(errp
, local_err
);
1397 arm_cpu_pauth_finalize(cpu
, &local_err
);
1398 if (local_err
!= NULL
) {
1399 error_propagate(errp
, local_err
);
1403 arm_cpu_lpa2_finalize(cpu
, &local_err
);
1404 if (local_err
!= NULL
) {
1405 error_propagate(errp
, local_err
);
1410 if (kvm_enabled()) {
1411 kvm_arm_steal_time_finalize(cpu
, &local_err
);
1412 if (local_err
!= NULL
) {
1413 error_propagate(errp
, local_err
);
1419 static void arm_cpu_realizefn(DeviceState
*dev
, Error
**errp
)
1421 CPUState
*cs
= CPU(dev
);
1422 ARMCPU
*cpu
= ARM_CPU(dev
);
1423 ARMCPUClass
*acc
= ARM_CPU_GET_CLASS(dev
);
1424 CPUARMState
*env
= &cpu
->env
;
1426 Error
*local_err
= NULL
;
1427 bool no_aa32
= false;
1429 /* If we needed to query the host kernel for the CPU features
1430 * then it's possible that might have failed in the initfn, but
1431 * this is the first point where we can report it.
1433 if (cpu
->host_cpu_probe_failed
) {
1434 if (!kvm_enabled() && !hvf_enabled()) {
1435 error_setg(errp
, "The 'host' CPU type can only be used with KVM or HVF");
1437 error_setg(errp
, "Failed to retrieve host CPU features");
1442 #ifndef CONFIG_USER_ONLY
1443 /* The NVIC and M-profile CPU are two halves of a single piece of
1444 * hardware; trying to use one without the other is a command line
1445 * error and will result in segfaults if not caught here.
1447 if (arm_feature(env
, ARM_FEATURE_M
)) {
1449 error_setg(errp
, "This board cannot be used with Cortex-M CPUs");
1454 error_setg(errp
, "This board can only be used with Cortex-M CPUs");
1459 if (kvm_enabled()) {
1461 * Catch all the cases which might cause us to create more than one
1462 * address space for the CPU (otherwise we will assert() later in
1463 * cpu_address_space_init()).
1465 if (arm_feature(env
, ARM_FEATURE_M
)) {
1467 "Cannot enable KVM when using an M-profile guest CPU");
1472 "Cannot enable KVM when guest CPU has EL3 enabled");
1475 if (cpu
->tag_memory
) {
1477 "Cannot enable KVM when guest CPUs has MTE enabled");
1485 if (arm_feature(env
, ARM_FEATURE_GENERIC_TIMER
)) {
1486 if (!cpu
->gt_cntfrq_hz
) {
1487 error_setg(errp
, "Invalid CNTFRQ: %"PRId64
"Hz",
1491 scale
= gt_cntfrq_period_ns(cpu
);
1493 scale
= GTIMER_SCALE
;
1496 cpu
->gt_timer
[GTIMER_PHYS
] = timer_new(QEMU_CLOCK_VIRTUAL
, scale
,
1497 arm_gt_ptimer_cb
, cpu
);
1498 cpu
->gt_timer
[GTIMER_VIRT
] = timer_new(QEMU_CLOCK_VIRTUAL
, scale
,
1499 arm_gt_vtimer_cb
, cpu
);
1500 cpu
->gt_timer
[GTIMER_HYP
] = timer_new(QEMU_CLOCK_VIRTUAL
, scale
,
1501 arm_gt_htimer_cb
, cpu
);
1502 cpu
->gt_timer
[GTIMER_SEC
] = timer_new(QEMU_CLOCK_VIRTUAL
, scale
,
1503 arm_gt_stimer_cb
, cpu
);
1504 cpu
->gt_timer
[GTIMER_HYPVIRT
] = timer_new(QEMU_CLOCK_VIRTUAL
, scale
,
1505 arm_gt_hvtimer_cb
, cpu
);
1509 cpu_exec_realizefn(cs
, &local_err
);
1510 if (local_err
!= NULL
) {
1511 error_propagate(errp
, local_err
);
1515 arm_cpu_finalize_features(cpu
, &local_err
);
1516 if (local_err
!= NULL
) {
1517 error_propagate(errp
, local_err
);
1521 if (arm_feature(env
, ARM_FEATURE_AARCH64
) &&
1522 cpu
->has_vfp
!= cpu
->has_neon
) {
1524 * This is an architectural requirement for AArch64; AArch32 is
1525 * more flexible and permits VFP-no-Neon and Neon-no-VFP.
1528 "AArch64 CPUs must have both VFP and Neon or neither");
1532 if (!cpu
->has_vfp
) {
1536 t
= cpu
->isar
.id_aa64isar1
;
1537 t
= FIELD_DP64(t
, ID_AA64ISAR1
, JSCVT
, 0);
1538 cpu
->isar
.id_aa64isar1
= t
;
1540 t
= cpu
->isar
.id_aa64pfr0
;
1541 t
= FIELD_DP64(t
, ID_AA64PFR0
, FP
, 0xf);
1542 cpu
->isar
.id_aa64pfr0
= t
;
1544 u
= cpu
->isar
.id_isar6
;
1545 u
= FIELD_DP32(u
, ID_ISAR6
, JSCVT
, 0);
1546 u
= FIELD_DP32(u
, ID_ISAR6
, BF16
, 0);
1547 cpu
->isar
.id_isar6
= u
;
1549 u
= cpu
->isar
.mvfr0
;
1550 u
= FIELD_DP32(u
, MVFR0
, FPSP
, 0);
1551 u
= FIELD_DP32(u
, MVFR0
, FPDP
, 0);
1552 u
= FIELD_DP32(u
, MVFR0
, FPDIVIDE
, 0);
1553 u
= FIELD_DP32(u
, MVFR0
, FPSQRT
, 0);
1554 u
= FIELD_DP32(u
, MVFR0
, FPROUND
, 0);
1555 if (!arm_feature(env
, ARM_FEATURE_M
)) {
1556 u
= FIELD_DP32(u
, MVFR0
, FPTRAP
, 0);
1557 u
= FIELD_DP32(u
, MVFR0
, FPSHVEC
, 0);
1559 cpu
->isar
.mvfr0
= u
;
1561 u
= cpu
->isar
.mvfr1
;
1562 u
= FIELD_DP32(u
, MVFR1
, FPFTZ
, 0);
1563 u
= FIELD_DP32(u
, MVFR1
, FPDNAN
, 0);
1564 u
= FIELD_DP32(u
, MVFR1
, FPHP
, 0);
1565 if (arm_feature(env
, ARM_FEATURE_M
)) {
1566 u
= FIELD_DP32(u
, MVFR1
, FP16
, 0);
1568 cpu
->isar
.mvfr1
= u
;
1570 u
= cpu
->isar
.mvfr2
;
1571 u
= FIELD_DP32(u
, MVFR2
, FPMISC
, 0);
1572 cpu
->isar
.mvfr2
= u
;
1575 if (!cpu
->has_neon
) {
1579 unset_feature(env
, ARM_FEATURE_NEON
);
1581 t
= cpu
->isar
.id_aa64isar0
;
1582 t
= FIELD_DP64(t
, ID_AA64ISAR0
, DP
, 0);
1583 cpu
->isar
.id_aa64isar0
= t
;
1585 t
= cpu
->isar
.id_aa64isar1
;
1586 t
= FIELD_DP64(t
, ID_AA64ISAR1
, FCMA
, 0);
1587 t
= FIELD_DP64(t
, ID_AA64ISAR1
, BF16
, 0);
1588 t
= FIELD_DP64(t
, ID_AA64ISAR1
, I8MM
, 0);
1589 cpu
->isar
.id_aa64isar1
= t
;
1591 t
= cpu
->isar
.id_aa64pfr0
;
1592 t
= FIELD_DP64(t
, ID_AA64PFR0
, ADVSIMD
, 0xf);
1593 cpu
->isar
.id_aa64pfr0
= t
;
1595 u
= cpu
->isar
.id_isar5
;
1596 u
= FIELD_DP32(u
, ID_ISAR5
, RDM
, 0);
1597 u
= FIELD_DP32(u
, ID_ISAR5
, VCMA
, 0);
1598 cpu
->isar
.id_isar5
= u
;
1600 u
= cpu
->isar
.id_isar6
;
1601 u
= FIELD_DP32(u
, ID_ISAR6
, DP
, 0);
1602 u
= FIELD_DP32(u
, ID_ISAR6
, FHM
, 0);
1603 u
= FIELD_DP32(u
, ID_ISAR6
, BF16
, 0);
1604 u
= FIELD_DP32(u
, ID_ISAR6
, I8MM
, 0);
1605 cpu
->isar
.id_isar6
= u
;
1607 if (!arm_feature(env
, ARM_FEATURE_M
)) {
1608 u
= cpu
->isar
.mvfr1
;
1609 u
= FIELD_DP32(u
, MVFR1
, SIMDLS
, 0);
1610 u
= FIELD_DP32(u
, MVFR1
, SIMDINT
, 0);
1611 u
= FIELD_DP32(u
, MVFR1
, SIMDSP
, 0);
1612 u
= FIELD_DP32(u
, MVFR1
, SIMDHP
, 0);
1613 cpu
->isar
.mvfr1
= u
;
1615 u
= cpu
->isar
.mvfr2
;
1616 u
= FIELD_DP32(u
, MVFR2
, SIMDMISC
, 0);
1617 cpu
->isar
.mvfr2
= u
;
1621 if (!cpu
->has_neon
&& !cpu
->has_vfp
) {
1625 t
= cpu
->isar
.id_aa64isar0
;
1626 t
= FIELD_DP64(t
, ID_AA64ISAR0
, FHM
, 0);
1627 cpu
->isar
.id_aa64isar0
= t
;
1629 t
= cpu
->isar
.id_aa64isar1
;
1630 t
= FIELD_DP64(t
, ID_AA64ISAR1
, FRINTTS
, 0);
1631 cpu
->isar
.id_aa64isar1
= t
;
1633 u
= cpu
->isar
.mvfr0
;
1634 u
= FIELD_DP32(u
, MVFR0
, SIMDREG
, 0);
1635 cpu
->isar
.mvfr0
= u
;
1637 /* Despite the name, this field covers both VFP and Neon */
1638 u
= cpu
->isar
.mvfr1
;
1639 u
= FIELD_DP32(u
, MVFR1
, SIMDFMAC
, 0);
1640 cpu
->isar
.mvfr1
= u
;
1643 if (arm_feature(env
, ARM_FEATURE_M
) && !cpu
->has_dsp
) {
1646 unset_feature(env
, ARM_FEATURE_THUMB_DSP
);
1648 u
= cpu
->isar
.id_isar1
;
1649 u
= FIELD_DP32(u
, ID_ISAR1
, EXTEND
, 1);
1650 cpu
->isar
.id_isar1
= u
;
1652 u
= cpu
->isar
.id_isar2
;
1653 u
= FIELD_DP32(u
, ID_ISAR2
, MULTU
, 1);
1654 u
= FIELD_DP32(u
, ID_ISAR2
, MULTS
, 1);
1655 cpu
->isar
.id_isar2
= u
;
1657 u
= cpu
->isar
.id_isar3
;
1658 u
= FIELD_DP32(u
, ID_ISAR3
, SIMD
, 1);
1659 u
= FIELD_DP32(u
, ID_ISAR3
, SATURATE
, 0);
1660 cpu
->isar
.id_isar3
= u
;
1663 /* Some features automatically imply others: */
1664 if (arm_feature(env
, ARM_FEATURE_V8
)) {
1665 if (arm_feature(env
, ARM_FEATURE_M
)) {
1666 set_feature(env
, ARM_FEATURE_V7
);
1668 set_feature(env
, ARM_FEATURE_V7VE
);
1673 * There exist AArch64 cpus without AArch32 support. When KVM
1674 * queries ID_ISAR0_EL1 on such a host, the value is UNKNOWN.
1675 * Similarly, we cannot check ID_AA64PFR0 without AArch64 support.
1676 * As a general principle, we also do not make ID register
1677 * consistency checks anywhere unless using TCG, because only
1678 * for TCG would a consistency-check failure be a QEMU bug.
1680 if (arm_feature(&cpu
->env
, ARM_FEATURE_AARCH64
)) {
1681 no_aa32
= !cpu_isar_feature(aa64_aa32
, cpu
);
1684 if (arm_feature(env
, ARM_FEATURE_V7VE
)) {
1685 /* v7 Virtualization Extensions. In real hardware this implies
1686 * EL2 and also the presence of the Security Extensions.
1687 * For QEMU, for backwards-compatibility we implement some
1688 * CPUs or CPU configs which have no actual EL2 or EL3 but do
1689 * include the various other features that V7VE implies.
1690 * Presence of EL2 itself is ARM_FEATURE_EL2, and of the
1691 * Security Extensions is ARM_FEATURE_EL3.
1693 assert(!tcg_enabled() || no_aa32
||
1694 cpu_isar_feature(aa32_arm_div
, cpu
));
1695 set_feature(env
, ARM_FEATURE_LPAE
);
1696 set_feature(env
, ARM_FEATURE_V7
);
1698 if (arm_feature(env
, ARM_FEATURE_V7
)) {
1699 set_feature(env
, ARM_FEATURE_VAPA
);
1700 set_feature(env
, ARM_FEATURE_THUMB2
);
1701 set_feature(env
, ARM_FEATURE_MPIDR
);
1702 if (!arm_feature(env
, ARM_FEATURE_M
)) {
1703 set_feature(env
, ARM_FEATURE_V6K
);
1705 set_feature(env
, ARM_FEATURE_V6
);
1708 /* Always define VBAR for V7 CPUs even if it doesn't exist in
1709 * non-EL3 configs. This is needed by some legacy boards.
1711 set_feature(env
, ARM_FEATURE_VBAR
);
1713 if (arm_feature(env
, ARM_FEATURE_V6K
)) {
1714 set_feature(env
, ARM_FEATURE_V6
);
1715 set_feature(env
, ARM_FEATURE_MVFR
);
1717 if (arm_feature(env
, ARM_FEATURE_V6
)) {
1718 set_feature(env
, ARM_FEATURE_V5
);
1719 if (!arm_feature(env
, ARM_FEATURE_M
)) {
1720 assert(!tcg_enabled() || no_aa32
||
1721 cpu_isar_feature(aa32_jazelle
, cpu
));
1722 set_feature(env
, ARM_FEATURE_AUXCR
);
1725 if (arm_feature(env
, ARM_FEATURE_V5
)) {
1726 set_feature(env
, ARM_FEATURE_V4T
);
1728 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
1729 set_feature(env
, ARM_FEATURE_V7MP
);
1731 if (arm_feature(env
, ARM_FEATURE_CBAR_RO
)) {
1732 set_feature(env
, ARM_FEATURE_CBAR
);
1734 if (arm_feature(env
, ARM_FEATURE_THUMB2
) &&
1735 !arm_feature(env
, ARM_FEATURE_M
)) {
1736 set_feature(env
, ARM_FEATURE_THUMB_DSP
);
1740 * We rely on no XScale CPU having VFP so we can use the same bits in the
1741 * TB flags field for VECSTRIDE and XSCALE_CPAR.
1743 assert(arm_feature(&cpu
->env
, ARM_FEATURE_AARCH64
) ||
1744 !cpu_isar_feature(aa32_vfp_simd
, cpu
) ||
1745 !arm_feature(env
, ARM_FEATURE_XSCALE
));
1747 if (arm_feature(env
, ARM_FEATURE_V7
) &&
1748 !arm_feature(env
, ARM_FEATURE_M
) &&
1749 !arm_feature(env
, ARM_FEATURE_PMSA
)) {
1750 /* v7VMSA drops support for the old ARMv5 tiny pages, so we
1755 /* For CPUs which might have tiny 1K pages, or which have an
1756 * MPU and might have small region sizes, stick with 1K pages.
1760 if (!set_preferred_target_page_bits(pagebits
)) {
1761 /* This can only ever happen for hotplugging a CPU, or if
1762 * the board code incorrectly creates a CPU which it has
1763 * promised via minimum_page_size that it will not.
1765 error_setg(errp
, "This CPU requires a smaller page size than the "
1770 /* This cpu-id-to-MPIDR affinity is used only for TCG; KVM will override it.
1771 * We don't support setting cluster ID ([16..23]) (known as Aff2
1772 * in later ARM ARM versions), or any of the higher affinity level fields,
1773 * so these bits always RAZ.
1775 if (cpu
->mp_affinity
== ARM64_AFFINITY_INVALID
) {
1776 cpu
->mp_affinity
= arm_cpu_mp_affinity(cs
->cpu_index
,
1777 ARM_DEFAULT_CPUS_PER_CLUSTER
);
1780 if (cpu
->reset_hivecs
) {
1781 cpu
->reset_sctlr
|= (1 << 13);
1785 if (arm_feature(&cpu
->env
, ARM_FEATURE_V7
)) {
1786 cpu
->reset_sctlr
|= SCTLR_EE
;
1788 cpu
->reset_sctlr
|= SCTLR_B
;
1792 if (!arm_feature(env
, ARM_FEATURE_M
) && !cpu
->has_el3
) {
1793 /* If the has_el3 CPU property is disabled then we need to disable the
1796 unset_feature(env
, ARM_FEATURE_EL3
);
1798 /* Disable the security extension feature bits in the processor feature
1799 * registers as well. These are id_pfr1[7:4] and id_aa64pfr0[15:12].
1801 cpu
->isar
.id_pfr1
&= ~0xf0;
1802 cpu
->isar
.id_aa64pfr0
&= ~0xf000;
1805 if (!cpu
->has_el2
) {
1806 unset_feature(env
, ARM_FEATURE_EL2
);
1809 if (!cpu
->has_pmu
) {
1810 unset_feature(env
, ARM_FEATURE_PMU
);
1812 if (arm_feature(env
, ARM_FEATURE_PMU
)) {
1815 if (!kvm_enabled()) {
1816 arm_register_pre_el_change_hook(cpu
, &pmu_pre_el_change
, 0);
1817 arm_register_el_change_hook(cpu
, &pmu_post_el_change
, 0);
1820 #ifndef CONFIG_USER_ONLY
1821 cpu
->pmu_timer
= timer_new_ns(QEMU_CLOCK_VIRTUAL
, arm_pmu_timer_cb
,
1825 cpu
->isar
.id_aa64dfr0
=
1826 FIELD_DP64(cpu
->isar
.id_aa64dfr0
, ID_AA64DFR0
, PMUVER
, 0);
1827 cpu
->isar
.id_dfr0
= FIELD_DP32(cpu
->isar
.id_dfr0
, ID_DFR0
, PERFMON
, 0);
1832 if (!arm_feature(env
, ARM_FEATURE_EL2
)) {
1833 /* Disable the hypervisor feature bits in the processor feature
1834 * registers if we don't have EL2. These are id_pfr1[15:12] and
1835 * id_aa64pfr0_el1[11:8].
1837 cpu
->isar
.id_aa64pfr0
&= ~0xf00;
1838 cpu
->isar
.id_pfr1
&= ~0xf000;
1841 #ifndef CONFIG_USER_ONLY
1842 if (cpu
->tag_memory
== NULL
&& cpu_isar_feature(aa64_mte
, cpu
)) {
1844 * Disable the MTE feature bits if we do not have tag-memory
1845 * provided by the machine.
1847 cpu
->isar
.id_aa64pfr1
=
1848 FIELD_DP64(cpu
->isar
.id_aa64pfr1
, ID_AA64PFR1
, MTE
, 0);
1852 /* MPU can be configured out of a PMSA CPU either by setting has-mpu
1853 * to false or by setting pmsav7-dregion to 0.
1855 if (!cpu
->has_mpu
) {
1856 cpu
->pmsav7_dregion
= 0;
1858 if (cpu
->pmsav7_dregion
== 0) {
1859 cpu
->has_mpu
= false;
1862 if (arm_feature(env
, ARM_FEATURE_PMSA
) &&
1863 arm_feature(env
, ARM_FEATURE_V7
)) {
1864 uint32_t nr
= cpu
->pmsav7_dregion
;
1867 error_setg(errp
, "PMSAv7 MPU #regions invalid %" PRIu32
, nr
);
1872 if (arm_feature(env
, ARM_FEATURE_V8
)) {
1874 env
->pmsav8
.rbar
[M_REG_NS
] = g_new0(uint32_t, nr
);
1875 env
->pmsav8
.rlar
[M_REG_NS
] = g_new0(uint32_t, nr
);
1876 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
1877 env
->pmsav8
.rbar
[M_REG_S
] = g_new0(uint32_t, nr
);
1878 env
->pmsav8
.rlar
[M_REG_S
] = g_new0(uint32_t, nr
);
1881 env
->pmsav7
.drbar
= g_new0(uint32_t, nr
);
1882 env
->pmsav7
.drsr
= g_new0(uint32_t, nr
);
1883 env
->pmsav7
.dracr
= g_new0(uint32_t, nr
);
1888 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
1889 uint32_t nr
= cpu
->sau_sregion
;
1892 error_setg(errp
, "v8M SAU #regions invalid %" PRIu32
, nr
);
1897 env
->sau
.rbar
= g_new0(uint32_t, nr
);
1898 env
->sau
.rlar
= g_new0(uint32_t, nr
);
1902 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
1903 set_feature(env
, ARM_FEATURE_VBAR
);
1906 register_cp_regs_for_features(cpu
);
1907 arm_cpu_register_gdb_regs_for_features(cpu
);
1909 init_cpreg_list(cpu
);
1911 #ifndef CONFIG_USER_ONLY
1912 MachineState
*ms
= MACHINE(qdev_get_machine());
1913 unsigned int smp_cpus
= ms
->smp
.cpus
;
1914 bool has_secure
= cpu
->has_el3
|| arm_feature(env
, ARM_FEATURE_M_SECURITY
);
1917 * We must set cs->num_ases to the final value before
1918 * the first call to cpu_address_space_init.
1920 if (cpu
->tag_memory
!= NULL
) {
1921 cs
->num_ases
= 3 + has_secure
;
1923 cs
->num_ases
= 1 + has_secure
;
1927 if (!cpu
->secure_memory
) {
1928 cpu
->secure_memory
= cs
->memory
;
1930 cpu_address_space_init(cs
, ARMASIdx_S
, "cpu-secure-memory",
1931 cpu
->secure_memory
);
1934 if (cpu
->tag_memory
!= NULL
) {
1935 cpu_address_space_init(cs
, ARMASIdx_TagNS
, "cpu-tag-memory",
1938 cpu_address_space_init(cs
, ARMASIdx_TagS
, "cpu-tag-memory",
1939 cpu
->secure_tag_memory
);
1943 cpu_address_space_init(cs
, ARMASIdx_NS
, "cpu-memory", cs
->memory
);
1945 /* No core_count specified, default to smp_cpus. */
1946 if (cpu
->core_count
== -1) {
1947 cpu
->core_count
= smp_cpus
;
1951 if (tcg_enabled()) {
1952 int dcz_blocklen
= 4 << cpu
->dcz_blocksize
;
1955 * We only support DCZ blocklen that fits on one page.
1957 * Architectually this is always true. However TARGET_PAGE_SIZE
1958 * is variable and, for compatibility with -machine virt-2.7,
1959 * is only 1KiB, as an artifact of legacy ARMv5 subpage support.
1960 * But even then, while the largest architectural DCZ blocklen
1961 * is 2KiB, no cpu actually uses such a large blocklen.
1963 assert(dcz_blocklen
<= TARGET_PAGE_SIZE
);
1966 * We only support DCZ blocksize >= 2*TAG_GRANULE, which is to say
1967 * both nibbles of each byte storing tag data may be written at once.
1968 * Since TAG_GRANULE is 16, this means that blocklen must be >= 32.
1970 if (cpu_isar_feature(aa64_mte
, cpu
)) {
1971 assert(dcz_blocklen
>= 2 * TAG_GRANULE
);
1978 acc
->parent_realize(dev
, errp
);
1981 static ObjectClass
*arm_cpu_class_by_name(const char *cpu_model
)
1986 const char *cpunamestr
;
1988 cpuname
= g_strsplit(cpu_model
, ",", 1);
1989 cpunamestr
= cpuname
[0];
1990 #ifdef CONFIG_USER_ONLY
1991 /* For backwards compatibility usermode emulation allows "-cpu any",
1992 * which has the same semantics as "-cpu max".
1994 if (!strcmp(cpunamestr
, "any")) {
1998 typename
= g_strdup_printf(ARM_CPU_TYPE_NAME("%s"), cpunamestr
);
1999 oc
= object_class_by_name(typename
);
2000 g_strfreev(cpuname
);
2002 if (!oc
|| !object_class_dynamic_cast(oc
, TYPE_ARM_CPU
) ||
2003 object_class_is_abstract(oc
)) {
2009 static Property arm_cpu_properties
[] = {
2010 DEFINE_PROP_UINT64("midr", ARMCPU
, midr
, 0),
2011 DEFINE_PROP_UINT64("mp-affinity", ARMCPU
,
2012 mp_affinity
, ARM64_AFFINITY_INVALID
),
2013 DEFINE_PROP_INT32("node-id", ARMCPU
, node_id
, CPU_UNSET_NUMA_NODE_ID
),
2014 DEFINE_PROP_INT32("core-count", ARMCPU
, core_count
, -1),
2015 DEFINE_PROP_END_OF_LIST()
2018 static gchar
*arm_gdb_arch_name(CPUState
*cs
)
2020 ARMCPU
*cpu
= ARM_CPU(cs
);
2021 CPUARMState
*env
= &cpu
->env
;
2023 if (arm_feature(env
, ARM_FEATURE_IWMMXT
)) {
2024 return g_strdup("iwmmxt");
2026 return g_strdup("arm");
2029 #ifndef CONFIG_USER_ONLY
2030 #include "hw/core/sysemu-cpu-ops.h"
2032 static const struct SysemuCPUOps arm_sysemu_ops
= {
2033 .get_phys_page_attrs_debug
= arm_cpu_get_phys_page_attrs_debug
,
2034 .asidx_from_attrs
= arm_asidx_from_attrs
,
2035 .write_elf32_note
= arm_cpu_write_elf32_note
,
2036 .write_elf64_note
= arm_cpu_write_elf64_note
,
2037 .virtio_is_big_endian
= arm_cpu_virtio_is_big_endian
,
2038 .legacy_vmsd
= &vmstate_arm_cpu
,
2043 static const struct TCGCPUOps arm_tcg_ops
= {
2044 .initialize
= arm_translate_init
,
2045 .synchronize_from_tb
= arm_cpu_synchronize_from_tb
,
2046 .debug_excp_handler
= arm_debug_excp_handler
,
2048 #ifdef CONFIG_USER_ONLY
2049 .record_sigsegv
= arm_cpu_record_sigsegv
,
2050 .record_sigbus
= arm_cpu_record_sigbus
,
2052 .tlb_fill
= arm_cpu_tlb_fill
,
2053 .cpu_exec_interrupt
= arm_cpu_exec_interrupt
,
2054 .do_interrupt
= arm_cpu_do_interrupt
,
2055 .do_transaction_failed
= arm_cpu_do_transaction_failed
,
2056 .do_unaligned_access
= arm_cpu_do_unaligned_access
,
2057 .adjust_watchpoint_address
= arm_adjust_watchpoint_address
,
2058 .debug_check_watchpoint
= arm_debug_check_watchpoint
,
2059 .debug_check_breakpoint
= arm_debug_check_breakpoint
,
2060 #endif /* !CONFIG_USER_ONLY */
2062 #endif /* CONFIG_TCG */
2064 static void arm_cpu_class_init(ObjectClass
*oc
, void *data
)
2066 ARMCPUClass
*acc
= ARM_CPU_CLASS(oc
);
2067 CPUClass
*cc
= CPU_CLASS(acc
);
2068 DeviceClass
*dc
= DEVICE_CLASS(oc
);
2070 device_class_set_parent_realize(dc
, arm_cpu_realizefn
,
2071 &acc
->parent_realize
);
2073 device_class_set_props(dc
, arm_cpu_properties
);
2074 device_class_set_parent_reset(dc
, arm_cpu_reset
, &acc
->parent_reset
);
2076 cc
->class_by_name
= arm_cpu_class_by_name
;
2077 cc
->has_work
= arm_cpu_has_work
;
2078 cc
->dump_state
= arm_cpu_dump_state
;
2079 cc
->set_pc
= arm_cpu_set_pc
;
2080 cc
->gdb_read_register
= arm_cpu_gdb_read_register
;
2081 cc
->gdb_write_register
= arm_cpu_gdb_write_register
;
2082 #ifndef CONFIG_USER_ONLY
2083 cc
->sysemu_ops
= &arm_sysemu_ops
;
2085 cc
->gdb_num_core_regs
= 26;
2086 cc
->gdb_core_xml_file
= "arm-core.xml";
2087 cc
->gdb_arch_name
= arm_gdb_arch_name
;
2088 cc
->gdb_get_dynamic_xml
= arm_gdb_get_dynamic_xml
;
2089 cc
->gdb_stop_before_watchpoint
= true;
2090 cc
->disas_set_info
= arm_disas_set_info
;
2093 cc
->tcg_ops
= &arm_tcg_ops
;
2094 #endif /* CONFIG_TCG */
2097 static void arm_cpu_instance_init(Object
*obj
)
2099 ARMCPUClass
*acc
= ARM_CPU_GET_CLASS(obj
);
2101 acc
->info
->initfn(obj
);
2102 arm_cpu_post_init(obj
);
2105 static void cpu_register_class_init(ObjectClass
*oc
, void *data
)
2107 ARMCPUClass
*acc
= ARM_CPU_CLASS(oc
);
2112 void arm_cpu_register(const ARMCPUInfo
*info
)
2114 TypeInfo type_info
= {
2115 .parent
= TYPE_ARM_CPU
,
2116 .instance_size
= sizeof(ARMCPU
),
2117 .instance_align
= __alignof__(ARMCPU
),
2118 .instance_init
= arm_cpu_instance_init
,
2119 .class_size
= sizeof(ARMCPUClass
),
2120 .class_init
= info
->class_init
?: cpu_register_class_init
,
2121 .class_data
= (void *)info
,
2124 type_info
.name
= g_strdup_printf("%s-" TYPE_ARM_CPU
, info
->name
);
2125 type_register(&type_info
);
2126 g_free((void *)type_info
.name
);
2129 static const TypeInfo arm_cpu_type_info
= {
2130 .name
= TYPE_ARM_CPU
,
2132 .instance_size
= sizeof(ARMCPU
),
2133 .instance_align
= __alignof__(ARMCPU
),
2134 .instance_init
= arm_cpu_initfn
,
2135 .instance_finalize
= arm_cpu_finalizefn
,
2137 .class_size
= sizeof(ARMCPUClass
),
2138 .class_init
= arm_cpu_class_init
,
2141 static void arm_cpu_register_types(void)
2143 type_register_static(&arm_cpu_type_info
);
2146 type_init(arm_cpu_register_types
)