4 * Copyright (c) 2012 SUSE LINUX Products GmbH
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see
18 * <http://www.gnu.org/licenses/gpl-2.0.html>
21 #include "qemu/osdep.h"
22 #include "target/arm/idau.h"
23 #include "qemu/error-report.h"
24 #include "qapi/error.h"
26 #include "internals.h"
27 #include "qemu-common.h"
28 #include "exec/exec-all.h"
29 #include "hw/qdev-properties.h"
30 #if !defined(CONFIG_USER_ONLY)
31 #include "hw/loader.h"
33 #include "hw/arm/arm.h"
34 #include "sysemu/sysemu.h"
35 #include "sysemu/hw_accel.h"
37 #include "disas/capstone.h"
38 #include "fpu/softfloat.h"
40 static void arm_cpu_set_pc(CPUState
*cs
, vaddr value
)
42 ARMCPU
*cpu
= ARM_CPU(cs
);
44 cpu
->env
.regs
[15] = value
;
47 static bool arm_cpu_has_work(CPUState
*cs
)
49 ARMCPU
*cpu
= ARM_CPU(cs
);
51 return (cpu
->power_state
!= PSCI_OFF
)
52 && cs
->interrupt_request
&
53 (CPU_INTERRUPT_FIQ
| CPU_INTERRUPT_HARD
54 | CPU_INTERRUPT_VFIQ
| CPU_INTERRUPT_VIRQ
55 | CPU_INTERRUPT_EXITTB
);
58 void arm_register_pre_el_change_hook(ARMCPU
*cpu
, ARMELChangeHookFn
*hook
,
61 ARMELChangeHook
*entry
= g_new0(ARMELChangeHook
, 1);
64 entry
->opaque
= opaque
;
66 QLIST_INSERT_HEAD(&cpu
->pre_el_change_hooks
, entry
, node
);
69 void arm_register_el_change_hook(ARMCPU
*cpu
, ARMELChangeHookFn
*hook
,
72 ARMELChangeHook
*entry
= g_new0(ARMELChangeHook
, 1);
75 entry
->opaque
= opaque
;
77 QLIST_INSERT_HEAD(&cpu
->el_change_hooks
, entry
, node
);
80 static void cp_reg_reset(gpointer key
, gpointer value
, gpointer opaque
)
82 /* Reset a single ARMCPRegInfo register */
83 ARMCPRegInfo
*ri
= value
;
86 if (ri
->type
& (ARM_CP_SPECIAL
| ARM_CP_ALIAS
)) {
91 ri
->resetfn(&cpu
->env
, ri
);
95 /* A zero offset is never possible as it would be regs[0]
96 * so we use it to indicate that reset is being handled elsewhere.
97 * This is basically only used for fields in non-core coprocessors
98 * (like the pxa2xx ones).
100 if (!ri
->fieldoffset
) {
104 if (cpreg_field_is_64bit(ri
)) {
105 CPREG_FIELD64(&cpu
->env
, ri
) = ri
->resetvalue
;
107 CPREG_FIELD32(&cpu
->env
, ri
) = ri
->resetvalue
;
111 static void cp_reg_check_reset(gpointer key
, gpointer value
, gpointer opaque
)
113 /* Purely an assertion check: we've already done reset once,
114 * so now check that running the reset for the cpreg doesn't
115 * change its value. This traps bugs where two different cpregs
116 * both try to reset the same state field but to different values.
118 ARMCPRegInfo
*ri
= value
;
119 ARMCPU
*cpu
= opaque
;
120 uint64_t oldvalue
, newvalue
;
122 if (ri
->type
& (ARM_CP_SPECIAL
| ARM_CP_ALIAS
| ARM_CP_NO_RAW
)) {
126 oldvalue
= read_raw_cp_reg(&cpu
->env
, ri
);
127 cp_reg_reset(key
, value
, opaque
);
128 newvalue
= read_raw_cp_reg(&cpu
->env
, ri
);
129 assert(oldvalue
== newvalue
);
132 /* CPUClass::reset() */
133 static void arm_cpu_reset(CPUState
*s
)
135 ARMCPU
*cpu
= ARM_CPU(s
);
136 ARMCPUClass
*acc
= ARM_CPU_GET_CLASS(cpu
);
137 CPUARMState
*env
= &cpu
->env
;
139 acc
->parent_reset(s
);
141 memset(env
, 0, offsetof(CPUARMState
, end_reset_fields
));
143 g_hash_table_foreach(cpu
->cp_regs
, cp_reg_reset
, cpu
);
144 g_hash_table_foreach(cpu
->cp_regs
, cp_reg_check_reset
, cpu
);
146 env
->vfp
.xregs
[ARM_VFP_FPSID
] = cpu
->reset_fpsid
;
147 env
->vfp
.xregs
[ARM_VFP_MVFR0
] = cpu
->isar
.mvfr0
;
148 env
->vfp
.xregs
[ARM_VFP_MVFR1
] = cpu
->isar
.mvfr1
;
149 env
->vfp
.xregs
[ARM_VFP_MVFR2
] = cpu
->isar
.mvfr2
;
151 cpu
->power_state
= cpu
->start_powered_off
? PSCI_OFF
: PSCI_ON
;
152 s
->halted
= cpu
->start_powered_off
;
154 if (arm_feature(env
, ARM_FEATURE_IWMMXT
)) {
155 env
->iwmmxt
.cregs
[ARM_IWMMXT_wCID
] = 0x69051000 | 'Q';
158 if (arm_feature(env
, ARM_FEATURE_AARCH64
)) {
159 /* 64 bit CPUs always start in 64 bit mode */
161 #if defined(CONFIG_USER_ONLY)
162 env
->pstate
= PSTATE_MODE_EL0t
;
163 /* Userspace expects access to DC ZVA, CTL_EL0 and the cache ops */
164 env
->cp15
.sctlr_el
[1] |= SCTLR_UCT
| SCTLR_UCI
| SCTLR_DZE
;
165 /* and to the FP/Neon instructions */
166 env
->cp15
.cpacr_el1
= deposit64(env
->cp15
.cpacr_el1
, 20, 2, 3);
167 /* and to the SVE instructions */
168 env
->cp15
.cpacr_el1
= deposit64(env
->cp15
.cpacr_el1
, 16, 2, 3);
169 env
->cp15
.cptr_el
[3] |= CPTR_EZ
;
170 /* with maximum vector length */
171 env
->vfp
.zcr_el
[1] = cpu
->sve_max_vq
- 1;
172 env
->vfp
.zcr_el
[2] = env
->vfp
.zcr_el
[1];
173 env
->vfp
.zcr_el
[3] = env
->vfp
.zcr_el
[1];
175 /* Reset into the highest available EL */
176 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
177 env
->pstate
= PSTATE_MODE_EL3h
;
178 } else if (arm_feature(env
, ARM_FEATURE_EL2
)) {
179 env
->pstate
= PSTATE_MODE_EL2h
;
181 env
->pstate
= PSTATE_MODE_EL1h
;
183 env
->pc
= cpu
->rvbar
;
186 #if defined(CONFIG_USER_ONLY)
187 /* Userspace expects access to cp10 and cp11 for FP/Neon */
188 env
->cp15
.cpacr_el1
= deposit64(env
->cp15
.cpacr_el1
, 20, 4, 0xf);
192 #if defined(CONFIG_USER_ONLY)
193 env
->uncached_cpsr
= ARM_CPU_MODE_USR
;
194 /* For user mode we must enable access to coprocessors */
195 env
->vfp
.xregs
[ARM_VFP_FPEXC
] = 1 << 30;
196 if (arm_feature(env
, ARM_FEATURE_IWMMXT
)) {
197 env
->cp15
.c15_cpar
= 3;
198 } else if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
199 env
->cp15
.c15_cpar
= 1;
204 * If the highest available EL is EL2, AArch32 will start in Hyp
205 * mode; otherwise it starts in SVC. Note that if we start in
206 * AArch64 then these values in the uncached_cpsr will be ignored.
208 if (arm_feature(env
, ARM_FEATURE_EL2
) &&
209 !arm_feature(env
, ARM_FEATURE_EL3
)) {
210 env
->uncached_cpsr
= ARM_CPU_MODE_HYP
;
212 env
->uncached_cpsr
= ARM_CPU_MODE_SVC
;
214 env
->daif
= PSTATE_D
| PSTATE_A
| PSTATE_I
| PSTATE_F
;
216 if (arm_feature(env
, ARM_FEATURE_M
)) {
217 uint32_t initial_msp
; /* Loaded from 0x0 */
218 uint32_t initial_pc
; /* Loaded from 0x4 */
222 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
223 env
->v7m
.secure
= true;
225 /* This bit resets to 0 if security is supported, but 1 if
226 * it is not. The bit is not present in v7M, but we set it
227 * here so we can avoid having to make checks on it conditional
228 * on ARM_FEATURE_V8 (we don't let the guest see the bit).
230 env
->v7m
.aircr
= R_V7M_AIRCR_BFHFNMINS_MASK
;
233 /* In v7M the reset value of this bit is IMPDEF, but ARM recommends
234 * that it resets to 1, so QEMU always does that rather than making
235 * it dependent on CPU model. In v8M it is RES1.
237 env
->v7m
.ccr
[M_REG_NS
] = R_V7M_CCR_STKALIGN_MASK
;
238 env
->v7m
.ccr
[M_REG_S
] = R_V7M_CCR_STKALIGN_MASK
;
239 if (arm_feature(env
, ARM_FEATURE_V8
)) {
240 /* in v8M the NONBASETHRDENA bit [0] is RES1 */
241 env
->v7m
.ccr
[M_REG_NS
] |= R_V7M_CCR_NONBASETHRDENA_MASK
;
242 env
->v7m
.ccr
[M_REG_S
] |= R_V7M_CCR_NONBASETHRDENA_MASK
;
244 if (!arm_feature(env
, ARM_FEATURE_M_MAIN
)) {
245 env
->v7m
.ccr
[M_REG_NS
] |= R_V7M_CCR_UNALIGN_TRP_MASK
;
246 env
->v7m
.ccr
[M_REG_S
] |= R_V7M_CCR_UNALIGN_TRP_MASK
;
249 /* Unlike A/R profile, M profile defines the reset LR value */
250 env
->regs
[14] = 0xffffffff;
252 env
->v7m
.vecbase
[M_REG_S
] = cpu
->init_svtor
& 0xffffff80;
254 /* Load the initial SP and PC from offset 0 and 4 in the vector table */
255 vecbase
= env
->v7m
.vecbase
[env
->v7m
.secure
];
256 rom
= rom_ptr(vecbase
, 8);
258 /* Address zero is covered by ROM which hasn't yet been
259 * copied into physical memory.
261 initial_msp
= ldl_p(rom
);
262 initial_pc
= ldl_p(rom
+ 4);
264 /* Address zero not covered by a ROM blob, or the ROM blob
265 * is in non-modifiable memory and this is a second reset after
266 * it got copied into memory. In the latter case, rom_ptr
267 * will return a NULL pointer and we should use ldl_phys instead.
269 initial_msp
= ldl_phys(s
->as
, vecbase
);
270 initial_pc
= ldl_phys(s
->as
, vecbase
+ 4);
273 env
->regs
[13] = initial_msp
& 0xFFFFFFFC;
274 env
->regs
[15] = initial_pc
& ~1;
275 env
->thumb
= initial_pc
& 1;
278 /* AArch32 has a hard highvec setting of 0xFFFF0000. If we are currently
279 * executing as AArch32 then check if highvecs are enabled and
280 * adjust the PC accordingly.
282 if (A32_BANKED_CURRENT_REG_GET(env
, sctlr
) & SCTLR_V
) {
283 env
->regs
[15] = 0xFFFF0000;
286 /* M profile requires that reset clears the exclusive monitor;
287 * A profile does not, but clearing it makes more sense than having it
288 * set with an exclusive access on address zero.
290 arm_clear_exclusive(env
);
292 env
->vfp
.xregs
[ARM_VFP_FPEXC
] = 0;
295 if (arm_feature(env
, ARM_FEATURE_PMSA
)) {
296 if (cpu
->pmsav7_dregion
> 0) {
297 if (arm_feature(env
, ARM_FEATURE_V8
)) {
298 memset(env
->pmsav8
.rbar
[M_REG_NS
], 0,
299 sizeof(*env
->pmsav8
.rbar
[M_REG_NS
])
300 * cpu
->pmsav7_dregion
);
301 memset(env
->pmsav8
.rlar
[M_REG_NS
], 0,
302 sizeof(*env
->pmsav8
.rlar
[M_REG_NS
])
303 * cpu
->pmsav7_dregion
);
304 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
305 memset(env
->pmsav8
.rbar
[M_REG_S
], 0,
306 sizeof(*env
->pmsav8
.rbar
[M_REG_S
])
307 * cpu
->pmsav7_dregion
);
308 memset(env
->pmsav8
.rlar
[M_REG_S
], 0,
309 sizeof(*env
->pmsav8
.rlar
[M_REG_S
])
310 * cpu
->pmsav7_dregion
);
312 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
313 memset(env
->pmsav7
.drbar
, 0,
314 sizeof(*env
->pmsav7
.drbar
) * cpu
->pmsav7_dregion
);
315 memset(env
->pmsav7
.drsr
, 0,
316 sizeof(*env
->pmsav7
.drsr
) * cpu
->pmsav7_dregion
);
317 memset(env
->pmsav7
.dracr
, 0,
318 sizeof(*env
->pmsav7
.dracr
) * cpu
->pmsav7_dregion
);
321 env
->pmsav7
.rnr
[M_REG_NS
] = 0;
322 env
->pmsav7
.rnr
[M_REG_S
] = 0;
323 env
->pmsav8
.mair0
[M_REG_NS
] = 0;
324 env
->pmsav8
.mair0
[M_REG_S
] = 0;
325 env
->pmsav8
.mair1
[M_REG_NS
] = 0;
326 env
->pmsav8
.mair1
[M_REG_S
] = 0;
329 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
330 if (cpu
->sau_sregion
> 0) {
331 memset(env
->sau
.rbar
, 0, sizeof(*env
->sau
.rbar
) * cpu
->sau_sregion
);
332 memset(env
->sau
.rlar
, 0, sizeof(*env
->sau
.rlar
) * cpu
->sau_sregion
);
335 /* SAU_CTRL reset value is IMPDEF; we choose 0, which is what
336 * the Cortex-M33 does.
341 set_flush_to_zero(1, &env
->vfp
.standard_fp_status
);
342 set_flush_inputs_to_zero(1, &env
->vfp
.standard_fp_status
);
343 set_default_nan_mode(1, &env
->vfp
.standard_fp_status
);
344 set_float_detect_tininess(float_tininess_before_rounding
,
345 &env
->vfp
.fp_status
);
346 set_float_detect_tininess(float_tininess_before_rounding
,
347 &env
->vfp
.standard_fp_status
);
348 set_float_detect_tininess(float_tininess_before_rounding
,
349 &env
->vfp
.fp_status_f16
);
350 #ifndef CONFIG_USER_ONLY
352 kvm_arm_reset_vcpu(cpu
);
356 hw_breakpoint_update_all(cpu
);
357 hw_watchpoint_update_all(cpu
);
360 bool arm_cpu_exec_interrupt(CPUState
*cs
, int interrupt_request
)
362 CPUClass
*cc
= CPU_GET_CLASS(cs
);
363 CPUARMState
*env
= cs
->env_ptr
;
364 uint32_t cur_el
= arm_current_el(env
);
365 bool secure
= arm_is_secure(env
);
370 if (interrupt_request
& CPU_INTERRUPT_FIQ
) {
372 target_el
= arm_phys_excp_target_el(cs
, excp_idx
, cur_el
, secure
);
373 if (arm_excp_unmasked(cs
, excp_idx
, target_el
)) {
374 cs
->exception_index
= excp_idx
;
375 env
->exception
.target_el
= target_el
;
376 cc
->do_interrupt(cs
);
380 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
382 target_el
= arm_phys_excp_target_el(cs
, excp_idx
, cur_el
, secure
);
383 if (arm_excp_unmasked(cs
, excp_idx
, target_el
)) {
384 cs
->exception_index
= excp_idx
;
385 env
->exception
.target_el
= target_el
;
386 cc
->do_interrupt(cs
);
390 if (interrupt_request
& CPU_INTERRUPT_VIRQ
) {
391 excp_idx
= EXCP_VIRQ
;
393 if (arm_excp_unmasked(cs
, excp_idx
, target_el
)) {
394 cs
->exception_index
= excp_idx
;
395 env
->exception
.target_el
= target_el
;
396 cc
->do_interrupt(cs
);
400 if (interrupt_request
& CPU_INTERRUPT_VFIQ
) {
401 excp_idx
= EXCP_VFIQ
;
403 if (arm_excp_unmasked(cs
, excp_idx
, target_el
)) {
404 cs
->exception_index
= excp_idx
;
405 env
->exception
.target_el
= target_el
;
406 cc
->do_interrupt(cs
);
414 #if !defined(CONFIG_USER_ONLY) || !defined(TARGET_AARCH64)
415 static bool arm_v7m_cpu_exec_interrupt(CPUState
*cs
, int interrupt_request
)
417 CPUClass
*cc
= CPU_GET_CLASS(cs
);
418 ARMCPU
*cpu
= ARM_CPU(cs
);
419 CPUARMState
*env
= &cpu
->env
;
422 /* ARMv7-M interrupt masking works differently than -A or -R.
423 * There is no FIQ/IRQ distinction. Instead of I and F bits
424 * masking FIQ and IRQ interrupts, an exception is taken only
425 * if it is higher priority than the current execution priority
426 * (which depends on state like BASEPRI, FAULTMASK and the
427 * currently active exception).
429 if (interrupt_request
& CPU_INTERRUPT_HARD
430 && (armv7m_nvic_can_take_pending_exception(env
->nvic
))) {
431 cs
->exception_index
= EXCP_IRQ
;
432 cc
->do_interrupt(cs
);
439 #ifndef CONFIG_USER_ONLY
440 static void arm_cpu_set_irq(void *opaque
, int irq
, int level
)
442 ARMCPU
*cpu
= opaque
;
443 CPUARMState
*env
= &cpu
->env
;
444 CPUState
*cs
= CPU(cpu
);
445 static const int mask
[] = {
446 [ARM_CPU_IRQ
] = CPU_INTERRUPT_HARD
,
447 [ARM_CPU_FIQ
] = CPU_INTERRUPT_FIQ
,
448 [ARM_CPU_VIRQ
] = CPU_INTERRUPT_VIRQ
,
449 [ARM_CPU_VFIQ
] = CPU_INTERRUPT_VFIQ
455 assert(arm_feature(env
, ARM_FEATURE_EL2
));
460 cpu_interrupt(cs
, mask
[irq
]);
462 cpu_reset_interrupt(cs
, mask
[irq
]);
466 g_assert_not_reached();
470 static void arm_cpu_kvm_set_irq(void *opaque
, int irq
, int level
)
473 ARMCPU
*cpu
= opaque
;
474 CPUState
*cs
= CPU(cpu
);
475 int kvm_irq
= KVM_ARM_IRQ_TYPE_CPU
<< KVM_ARM_IRQ_TYPE_SHIFT
;
479 kvm_irq
|= KVM_ARM_IRQ_CPU_IRQ
;
482 kvm_irq
|= KVM_ARM_IRQ_CPU_FIQ
;
485 g_assert_not_reached();
487 kvm_irq
|= cs
->cpu_index
<< KVM_ARM_IRQ_VCPU_SHIFT
;
488 kvm_set_irq(kvm_state
, kvm_irq
, level
? 1 : 0);
492 static bool arm_cpu_virtio_is_big_endian(CPUState
*cs
)
494 ARMCPU
*cpu
= ARM_CPU(cs
);
495 CPUARMState
*env
= &cpu
->env
;
497 cpu_synchronize_state(cs
);
498 return arm_cpu_data_is_big_endian(env
);
503 static inline void set_feature(CPUARMState
*env
, int feature
)
505 env
->features
|= 1ULL << feature
;
508 static inline void unset_feature(CPUARMState
*env
, int feature
)
510 env
->features
&= ~(1ULL << feature
);
514 print_insn_thumb1(bfd_vma pc
, disassemble_info
*info
)
516 return print_insn_arm(pc
| 1, info
);
519 static void arm_disas_set_info(CPUState
*cpu
, disassemble_info
*info
)
521 ARMCPU
*ac
= ARM_CPU(cpu
);
522 CPUARMState
*env
= &ac
->env
;
526 /* We might not be compiled with the A64 disassembler
527 * because it needs a C++ compiler. Leave print_insn
528 * unset in this case to use the caller default behaviour.
530 #if defined(CONFIG_ARM_A64_DIS)
531 info
->print_insn
= print_insn_arm_a64
;
533 info
->cap_arch
= CS_ARCH_ARM64
;
534 info
->cap_insn_unit
= 4;
535 info
->cap_insn_split
= 4;
539 info
->print_insn
= print_insn_thumb1
;
540 info
->cap_insn_unit
= 2;
541 info
->cap_insn_split
= 4;
542 cap_mode
= CS_MODE_THUMB
;
544 info
->print_insn
= print_insn_arm
;
545 info
->cap_insn_unit
= 4;
546 info
->cap_insn_split
= 4;
547 cap_mode
= CS_MODE_ARM
;
549 if (arm_feature(env
, ARM_FEATURE_V8
)) {
550 cap_mode
|= CS_MODE_V8
;
552 if (arm_feature(env
, ARM_FEATURE_M
)) {
553 cap_mode
|= CS_MODE_MCLASS
;
555 info
->cap_arch
= CS_ARCH_ARM
;
556 info
->cap_mode
= cap_mode
;
559 sctlr_b
= arm_sctlr_b(env
);
560 if (bswap_code(sctlr_b
)) {
561 #ifdef TARGET_WORDS_BIGENDIAN
562 info
->endian
= BFD_ENDIAN_LITTLE
;
564 info
->endian
= BFD_ENDIAN_BIG
;
567 info
->flags
&= ~INSN_ARM_BE32
;
568 #ifndef CONFIG_USER_ONLY
570 info
->flags
|= INSN_ARM_BE32
;
575 uint64_t arm_cpu_mp_affinity(int idx
, uint8_t clustersz
)
577 uint32_t Aff1
= idx
/ clustersz
;
578 uint32_t Aff0
= idx
% clustersz
;
579 return (Aff1
<< ARM_AFF1_SHIFT
) | Aff0
;
582 static void arm_cpu_initfn(Object
*obj
)
584 CPUState
*cs
= CPU(obj
);
585 ARMCPU
*cpu
= ARM_CPU(obj
);
587 cs
->env_ptr
= &cpu
->env
;
588 cpu
->cp_regs
= g_hash_table_new_full(g_int_hash
, g_int_equal
,
591 QLIST_INIT(&cpu
->pre_el_change_hooks
);
592 QLIST_INIT(&cpu
->el_change_hooks
);
594 #ifndef CONFIG_USER_ONLY
595 /* Our inbound IRQ and FIQ lines */
597 /* VIRQ and VFIQ are unused with KVM but we add them to maintain
598 * the same interface as non-KVM CPUs.
600 qdev_init_gpio_in(DEVICE(cpu
), arm_cpu_kvm_set_irq
, 4);
602 qdev_init_gpio_in(DEVICE(cpu
), arm_cpu_set_irq
, 4);
605 cpu
->gt_timer
[GTIMER_PHYS
] = timer_new(QEMU_CLOCK_VIRTUAL
, GTIMER_SCALE
,
606 arm_gt_ptimer_cb
, cpu
);
607 cpu
->gt_timer
[GTIMER_VIRT
] = timer_new(QEMU_CLOCK_VIRTUAL
, GTIMER_SCALE
,
608 arm_gt_vtimer_cb
, cpu
);
609 cpu
->gt_timer
[GTIMER_HYP
] = timer_new(QEMU_CLOCK_VIRTUAL
, GTIMER_SCALE
,
610 arm_gt_htimer_cb
, cpu
);
611 cpu
->gt_timer
[GTIMER_SEC
] = timer_new(QEMU_CLOCK_VIRTUAL
, GTIMER_SCALE
,
612 arm_gt_stimer_cb
, cpu
);
613 qdev_init_gpio_out(DEVICE(cpu
), cpu
->gt_timer_outputs
,
614 ARRAY_SIZE(cpu
->gt_timer_outputs
));
616 qdev_init_gpio_out_named(DEVICE(cpu
), &cpu
->gicv3_maintenance_interrupt
,
617 "gicv3-maintenance-interrupt", 1);
618 qdev_init_gpio_out_named(DEVICE(cpu
), &cpu
->pmu_interrupt
,
622 /* DTB consumers generally don't in fact care what the 'compatible'
623 * string is, so always provide some string and trust that a hypothetical
624 * picky DTB consumer will also provide a helpful error message.
626 cpu
->dtb_compatible
= "qemu,unknown";
627 cpu
->psci_version
= 1; /* By default assume PSCI v0.1 */
628 cpu
->kvm_target
= QEMU_KVM_ARM_TARGET_NONE
;
631 cpu
->psci_version
= 2; /* TCG implements PSCI 0.2 */
635 static Property arm_cpu_reset_cbar_property
=
636 DEFINE_PROP_UINT64("reset-cbar", ARMCPU
, reset_cbar
, 0);
638 static Property arm_cpu_reset_hivecs_property
=
639 DEFINE_PROP_BOOL("reset-hivecs", ARMCPU
, reset_hivecs
, false);
641 static Property arm_cpu_rvbar_property
=
642 DEFINE_PROP_UINT64("rvbar", ARMCPU
, rvbar
, 0);
644 static Property arm_cpu_has_el2_property
=
645 DEFINE_PROP_BOOL("has_el2", ARMCPU
, has_el2
, true);
647 static Property arm_cpu_has_el3_property
=
648 DEFINE_PROP_BOOL("has_el3", ARMCPU
, has_el3
, true);
650 static Property arm_cpu_cfgend_property
=
651 DEFINE_PROP_BOOL("cfgend", ARMCPU
, cfgend
, false);
653 /* use property name "pmu" to match other archs and virt tools */
654 static Property arm_cpu_has_pmu_property
=
655 DEFINE_PROP_BOOL("pmu", ARMCPU
, has_pmu
, true);
657 static Property arm_cpu_has_mpu_property
=
658 DEFINE_PROP_BOOL("has-mpu", ARMCPU
, has_mpu
, true);
660 /* This is like DEFINE_PROP_UINT32 but it doesn't set the default value,
661 * because the CPU initfn will have already set cpu->pmsav7_dregion to
662 * the right value for that particular CPU type, and we don't want
663 * to override that with an incorrect constant value.
665 static Property arm_cpu_pmsav7_dregion_property
=
666 DEFINE_PROP_UNSIGNED_NODEFAULT("pmsav7-dregion", ARMCPU
,
668 qdev_prop_uint32
, uint32_t);
670 /* M profile: initial value of the Secure VTOR */
671 static Property arm_cpu_initsvtor_property
=
672 DEFINE_PROP_UINT32("init-svtor", ARMCPU
, init_svtor
, 0);
674 static void arm_cpu_post_init(Object
*obj
)
676 ARMCPU
*cpu
= ARM_CPU(obj
);
678 /* M profile implies PMSA. We have to do this here rather than
679 * in realize with the other feature-implication checks because
680 * we look at the PMSA bit to see if we should add some properties.
682 if (arm_feature(&cpu
->env
, ARM_FEATURE_M
)) {
683 set_feature(&cpu
->env
, ARM_FEATURE_PMSA
);
686 if (arm_feature(&cpu
->env
, ARM_FEATURE_CBAR
) ||
687 arm_feature(&cpu
->env
, ARM_FEATURE_CBAR_RO
)) {
688 qdev_property_add_static(DEVICE(obj
), &arm_cpu_reset_cbar_property
,
692 if (!arm_feature(&cpu
->env
, ARM_FEATURE_M
)) {
693 qdev_property_add_static(DEVICE(obj
), &arm_cpu_reset_hivecs_property
,
697 if (arm_feature(&cpu
->env
, ARM_FEATURE_AARCH64
)) {
698 qdev_property_add_static(DEVICE(obj
), &arm_cpu_rvbar_property
,
702 if (arm_feature(&cpu
->env
, ARM_FEATURE_EL3
)) {
703 /* Add the has_el3 state CPU property only if EL3 is allowed. This will
704 * prevent "has_el3" from existing on CPUs which cannot support EL3.
706 qdev_property_add_static(DEVICE(obj
), &arm_cpu_has_el3_property
,
709 #ifndef CONFIG_USER_ONLY
710 object_property_add_link(obj
, "secure-memory",
712 (Object
**)&cpu
->secure_memory
,
713 qdev_prop_allow_set_link_before_realize
,
714 OBJ_PROP_LINK_STRONG
,
719 if (arm_feature(&cpu
->env
, ARM_FEATURE_EL2
)) {
720 qdev_property_add_static(DEVICE(obj
), &arm_cpu_has_el2_property
,
724 if (arm_feature(&cpu
->env
, ARM_FEATURE_PMU
)) {
725 qdev_property_add_static(DEVICE(obj
), &arm_cpu_has_pmu_property
,
729 if (arm_feature(&cpu
->env
, ARM_FEATURE_PMSA
)) {
730 qdev_property_add_static(DEVICE(obj
), &arm_cpu_has_mpu_property
,
732 if (arm_feature(&cpu
->env
, ARM_FEATURE_V7
)) {
733 qdev_property_add_static(DEVICE(obj
),
734 &arm_cpu_pmsav7_dregion_property
,
739 if (arm_feature(&cpu
->env
, ARM_FEATURE_M_SECURITY
)) {
740 object_property_add_link(obj
, "idau", TYPE_IDAU_INTERFACE
, &cpu
->idau
,
741 qdev_prop_allow_set_link_before_realize
,
742 OBJ_PROP_LINK_STRONG
,
744 qdev_property_add_static(DEVICE(obj
), &arm_cpu_initsvtor_property
,
748 qdev_property_add_static(DEVICE(obj
), &arm_cpu_cfgend_property
,
752 static void arm_cpu_finalizefn(Object
*obj
)
754 ARMCPU
*cpu
= ARM_CPU(obj
);
755 ARMELChangeHook
*hook
, *next
;
757 g_hash_table_destroy(cpu
->cp_regs
);
759 QLIST_FOREACH_SAFE(hook
, &cpu
->pre_el_change_hooks
, node
, next
) {
760 QLIST_REMOVE(hook
, node
);
763 QLIST_FOREACH_SAFE(hook
, &cpu
->el_change_hooks
, node
, next
) {
764 QLIST_REMOVE(hook
, node
);
769 static void arm_cpu_realizefn(DeviceState
*dev
, Error
**errp
)
771 CPUState
*cs
= CPU(dev
);
772 ARMCPU
*cpu
= ARM_CPU(dev
);
773 ARMCPUClass
*acc
= ARM_CPU_GET_CLASS(dev
);
774 CPUARMState
*env
= &cpu
->env
;
776 Error
*local_err
= NULL
;
778 /* If we needed to query the host kernel for the CPU features
779 * then it's possible that might have failed in the initfn, but
780 * this is the first point where we can report it.
782 if (cpu
->host_cpu_probe_failed
) {
783 if (!kvm_enabled()) {
784 error_setg(errp
, "The 'host' CPU type can only be used with KVM");
786 error_setg(errp
, "Failed to retrieve host CPU features");
791 #ifndef CONFIG_USER_ONLY
792 /* The NVIC and M-profile CPU are two halves of a single piece of
793 * hardware; trying to use one without the other is a command line
794 * error and will result in segfaults if not caught here.
796 if (arm_feature(env
, ARM_FEATURE_M
)) {
798 error_setg(errp
, "This board cannot be used with Cortex-M CPUs");
803 error_setg(errp
, "This board can only be used with Cortex-M CPUs");
809 cpu_exec_realizefn(cs
, &local_err
);
810 if (local_err
!= NULL
) {
811 error_propagate(errp
, local_err
);
815 /* Some features automatically imply others: */
816 if (arm_feature(env
, ARM_FEATURE_V8
)) {
817 if (arm_feature(env
, ARM_FEATURE_M
)) {
818 set_feature(env
, ARM_FEATURE_V7
);
820 set_feature(env
, ARM_FEATURE_V7VE
);
823 if (arm_feature(env
, ARM_FEATURE_V7VE
)) {
824 /* v7 Virtualization Extensions. In real hardware this implies
825 * EL2 and also the presence of the Security Extensions.
826 * For QEMU, for backwards-compatibility we implement some
827 * CPUs or CPU configs which have no actual EL2 or EL3 but do
828 * include the various other features that V7VE implies.
829 * Presence of EL2 itself is ARM_FEATURE_EL2, and of the
830 * Security Extensions is ARM_FEATURE_EL3.
832 assert(cpu_isar_feature(arm_div
, cpu
));
833 set_feature(env
, ARM_FEATURE_LPAE
);
834 set_feature(env
, ARM_FEATURE_V7
);
836 if (arm_feature(env
, ARM_FEATURE_V7
)) {
837 set_feature(env
, ARM_FEATURE_VAPA
);
838 set_feature(env
, ARM_FEATURE_THUMB2
);
839 set_feature(env
, ARM_FEATURE_MPIDR
);
840 if (!arm_feature(env
, ARM_FEATURE_M
)) {
841 set_feature(env
, ARM_FEATURE_V6K
);
843 set_feature(env
, ARM_FEATURE_V6
);
846 /* Always define VBAR for V7 CPUs even if it doesn't exist in
847 * non-EL3 configs. This is needed by some legacy boards.
849 set_feature(env
, ARM_FEATURE_VBAR
);
851 if (arm_feature(env
, ARM_FEATURE_V6K
)) {
852 set_feature(env
, ARM_FEATURE_V6
);
853 set_feature(env
, ARM_FEATURE_MVFR
);
855 if (arm_feature(env
, ARM_FEATURE_V6
)) {
856 set_feature(env
, ARM_FEATURE_V5
);
857 if (!arm_feature(env
, ARM_FEATURE_M
)) {
858 assert(cpu_isar_feature(jazelle
, cpu
));
859 set_feature(env
, ARM_FEATURE_AUXCR
);
862 if (arm_feature(env
, ARM_FEATURE_V5
)) {
863 set_feature(env
, ARM_FEATURE_V4T
);
865 if (arm_feature(env
, ARM_FEATURE_VFP4
)) {
866 set_feature(env
, ARM_FEATURE_VFP3
);
867 set_feature(env
, ARM_FEATURE_VFP_FP16
);
869 if (arm_feature(env
, ARM_FEATURE_VFP3
)) {
870 set_feature(env
, ARM_FEATURE_VFP
);
872 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
873 set_feature(env
, ARM_FEATURE_V7MP
);
874 set_feature(env
, ARM_FEATURE_PXN
);
876 if (arm_feature(env
, ARM_FEATURE_CBAR_RO
)) {
877 set_feature(env
, ARM_FEATURE_CBAR
);
879 if (arm_feature(env
, ARM_FEATURE_THUMB2
) &&
880 !arm_feature(env
, ARM_FEATURE_M
)) {
881 set_feature(env
, ARM_FEATURE_THUMB_DSP
);
884 if (arm_feature(env
, ARM_FEATURE_V7
) &&
885 !arm_feature(env
, ARM_FEATURE_M
) &&
886 !arm_feature(env
, ARM_FEATURE_PMSA
)) {
887 /* v7VMSA drops support for the old ARMv5 tiny pages, so we
892 /* For CPUs which might have tiny 1K pages, or which have an
893 * MPU and might have small region sizes, stick with 1K pages.
897 if (!set_preferred_target_page_bits(pagebits
)) {
898 /* This can only ever happen for hotplugging a CPU, or if
899 * the board code incorrectly creates a CPU which it has
900 * promised via minimum_page_size that it will not.
902 error_setg(errp
, "This CPU requires a smaller page size than the "
907 /* This cpu-id-to-MPIDR affinity is used only for TCG; KVM will override it.
908 * We don't support setting cluster ID ([16..23]) (known as Aff2
909 * in later ARM ARM versions), or any of the higher affinity level fields,
910 * so these bits always RAZ.
912 if (cpu
->mp_affinity
== ARM64_AFFINITY_INVALID
) {
913 cpu
->mp_affinity
= arm_cpu_mp_affinity(cs
->cpu_index
,
914 ARM_DEFAULT_CPUS_PER_CLUSTER
);
917 if (cpu
->reset_hivecs
) {
918 cpu
->reset_sctlr
|= (1 << 13);
922 if (arm_feature(&cpu
->env
, ARM_FEATURE_V7
)) {
923 cpu
->reset_sctlr
|= SCTLR_EE
;
925 cpu
->reset_sctlr
|= SCTLR_B
;
930 /* If the has_el3 CPU property is disabled then we need to disable the
933 unset_feature(env
, ARM_FEATURE_EL3
);
935 /* Disable the security extension feature bits in the processor feature
936 * registers as well. These are id_pfr1[7:4] and id_aa64pfr0[15:12].
938 cpu
->id_pfr1
&= ~0xf0;
939 cpu
->isar
.id_aa64pfr0
&= ~0xf000;
943 unset_feature(env
, ARM_FEATURE_EL2
);
947 unset_feature(env
, ARM_FEATURE_PMU
);
948 cpu
->id_aa64dfr0
&= ~0xf00;
951 if (!arm_feature(env
, ARM_FEATURE_EL2
)) {
952 /* Disable the hypervisor feature bits in the processor feature
953 * registers if we don't have EL2. These are id_pfr1[15:12] and
954 * id_aa64pfr0_el1[11:8].
956 cpu
->isar
.id_aa64pfr0
&= ~0xf00;
957 cpu
->id_pfr1
&= ~0xf000;
960 /* MPU can be configured out of a PMSA CPU either by setting has-mpu
961 * to false or by setting pmsav7-dregion to 0.
964 cpu
->pmsav7_dregion
= 0;
966 if (cpu
->pmsav7_dregion
== 0) {
967 cpu
->has_mpu
= false;
970 if (arm_feature(env
, ARM_FEATURE_PMSA
) &&
971 arm_feature(env
, ARM_FEATURE_V7
)) {
972 uint32_t nr
= cpu
->pmsav7_dregion
;
975 error_setg(errp
, "PMSAv7 MPU #regions invalid %" PRIu32
, nr
);
980 if (arm_feature(env
, ARM_FEATURE_V8
)) {
982 env
->pmsav8
.rbar
[M_REG_NS
] = g_new0(uint32_t, nr
);
983 env
->pmsav8
.rlar
[M_REG_NS
] = g_new0(uint32_t, nr
);
984 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
985 env
->pmsav8
.rbar
[M_REG_S
] = g_new0(uint32_t, nr
);
986 env
->pmsav8
.rlar
[M_REG_S
] = g_new0(uint32_t, nr
);
989 env
->pmsav7
.drbar
= g_new0(uint32_t, nr
);
990 env
->pmsav7
.drsr
= g_new0(uint32_t, nr
);
991 env
->pmsav7
.dracr
= g_new0(uint32_t, nr
);
996 if (arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
997 uint32_t nr
= cpu
->sau_sregion
;
1000 error_setg(errp
, "v8M SAU #regions invalid %" PRIu32
, nr
);
1005 env
->sau
.rbar
= g_new0(uint32_t, nr
);
1006 env
->sau
.rlar
= g_new0(uint32_t, nr
);
1010 if (arm_feature(env
, ARM_FEATURE_EL3
)) {
1011 set_feature(env
, ARM_FEATURE_VBAR
);
1014 register_cp_regs_for_features(cpu
);
1015 arm_cpu_register_gdb_regs_for_features(cpu
);
1017 init_cpreg_list(cpu
);
1019 #ifndef CONFIG_USER_ONLY
1020 if (cpu
->has_el3
|| arm_feature(env
, ARM_FEATURE_M_SECURITY
)) {
1023 if (!cpu
->secure_memory
) {
1024 cpu
->secure_memory
= cs
->memory
;
1026 cpu_address_space_init(cs
, ARMASIdx_S
, "cpu-secure-memory",
1027 cpu
->secure_memory
);
1031 cpu_address_space_init(cs
, ARMASIdx_NS
, "cpu-memory", cs
->memory
);
1033 /* No core_count specified, default to smp_cpus. */
1034 if (cpu
->core_count
== -1) {
1035 cpu
->core_count
= smp_cpus
;
1042 acc
->parent_realize(dev
, errp
);
1045 static ObjectClass
*arm_cpu_class_by_name(const char *cpu_model
)
1050 const char *cpunamestr
;
1052 cpuname
= g_strsplit(cpu_model
, ",", 1);
1053 cpunamestr
= cpuname
[0];
1054 #ifdef CONFIG_USER_ONLY
1055 /* For backwards compatibility usermode emulation allows "-cpu any",
1056 * which has the same semantics as "-cpu max".
1058 if (!strcmp(cpunamestr
, "any")) {
1062 typename
= g_strdup_printf(ARM_CPU_TYPE_NAME("%s"), cpunamestr
);
1063 oc
= object_class_by_name(typename
);
1064 g_strfreev(cpuname
);
1066 if (!oc
|| !object_class_dynamic_cast(oc
, TYPE_ARM_CPU
) ||
1067 object_class_is_abstract(oc
)) {
1073 /* CPU models. These are not needed for the AArch64 linux-user build. */
1074 #if !defined(CONFIG_USER_ONLY) || !defined(TARGET_AARCH64)
1076 static void arm926_initfn(Object
*obj
)
1078 ARMCPU
*cpu
= ARM_CPU(obj
);
1080 cpu
->dtb_compatible
= "arm,arm926";
1081 set_feature(&cpu
->env
, ARM_FEATURE_V5
);
1082 set_feature(&cpu
->env
, ARM_FEATURE_VFP
);
1083 set_feature(&cpu
->env
, ARM_FEATURE_DUMMY_C15_REGS
);
1084 set_feature(&cpu
->env
, ARM_FEATURE_CACHE_TEST_CLEAN
);
1085 cpu
->midr
= 0x41069265;
1086 cpu
->reset_fpsid
= 0x41011090;
1087 cpu
->ctr
= 0x1dd20d2;
1088 cpu
->reset_sctlr
= 0x00090078;
1091 * ARMv5 does not have the ID_ISAR registers, but we can still
1092 * set the field to indicate Jazelle support within QEMU.
1094 cpu
->isar
.id_isar1
= FIELD_DP32(cpu
->isar
.id_isar1
, ID_ISAR1
, JAZELLE
, 1);
1097 static void arm946_initfn(Object
*obj
)
1099 ARMCPU
*cpu
= ARM_CPU(obj
);
1101 cpu
->dtb_compatible
= "arm,arm946";
1102 set_feature(&cpu
->env
, ARM_FEATURE_V5
);
1103 set_feature(&cpu
->env
, ARM_FEATURE_PMSA
);
1104 set_feature(&cpu
->env
, ARM_FEATURE_DUMMY_C15_REGS
);
1105 cpu
->midr
= 0x41059461;
1106 cpu
->ctr
= 0x0f004006;
1107 cpu
->reset_sctlr
= 0x00000078;
1110 static void arm1026_initfn(Object
*obj
)
1112 ARMCPU
*cpu
= ARM_CPU(obj
);
1114 cpu
->dtb_compatible
= "arm,arm1026";
1115 set_feature(&cpu
->env
, ARM_FEATURE_V5
);
1116 set_feature(&cpu
->env
, ARM_FEATURE_VFP
);
1117 set_feature(&cpu
->env
, ARM_FEATURE_AUXCR
);
1118 set_feature(&cpu
->env
, ARM_FEATURE_DUMMY_C15_REGS
);
1119 set_feature(&cpu
->env
, ARM_FEATURE_CACHE_TEST_CLEAN
);
1120 cpu
->midr
= 0x4106a262;
1121 cpu
->reset_fpsid
= 0x410110a0;
1122 cpu
->ctr
= 0x1dd20d2;
1123 cpu
->reset_sctlr
= 0x00090078;
1124 cpu
->reset_auxcr
= 1;
1127 * ARMv5 does not have the ID_ISAR registers, but we can still
1128 * set the field to indicate Jazelle support within QEMU.
1130 cpu
->isar
.id_isar1
= FIELD_DP32(cpu
->isar
.id_isar1
, ID_ISAR1
, JAZELLE
, 1);
1133 /* The 1026 had an IFAR at c6,c0,0,1 rather than the ARMv6 c6,c0,0,2 */
1134 ARMCPRegInfo ifar
= {
1135 .name
= "IFAR", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 1,
1137 .fieldoffset
= offsetof(CPUARMState
, cp15
.ifar_ns
),
1140 define_one_arm_cp_reg(cpu
, &ifar
);
1144 static void arm1136_r2_initfn(Object
*obj
)
1146 ARMCPU
*cpu
= ARM_CPU(obj
);
1147 /* What qemu calls "arm1136_r2" is actually the 1136 r0p2, ie an
1148 * older core than plain "arm1136". In particular this does not
1149 * have the v6K features.
1150 * These ID register values are correct for 1136 but may be wrong
1151 * for 1136_r2 (in particular r0p2 does not actually implement most
1152 * of the ID registers).
1155 cpu
->dtb_compatible
= "arm,arm1136";
1156 set_feature(&cpu
->env
, ARM_FEATURE_V6
);
1157 set_feature(&cpu
->env
, ARM_FEATURE_VFP
);
1158 set_feature(&cpu
->env
, ARM_FEATURE_DUMMY_C15_REGS
);
1159 set_feature(&cpu
->env
, ARM_FEATURE_CACHE_DIRTY_REG
);
1160 set_feature(&cpu
->env
, ARM_FEATURE_CACHE_BLOCK_OPS
);
1161 cpu
->midr
= 0x4107b362;
1162 cpu
->reset_fpsid
= 0x410120b4;
1163 cpu
->isar
.mvfr0
= 0x11111111;
1164 cpu
->isar
.mvfr1
= 0x00000000;
1165 cpu
->ctr
= 0x1dd20d2;
1166 cpu
->reset_sctlr
= 0x00050078;
1167 cpu
->id_pfr0
= 0x111;
1171 cpu
->id_mmfr0
= 0x01130003;
1172 cpu
->id_mmfr1
= 0x10030302;
1173 cpu
->id_mmfr2
= 0x01222110;
1174 cpu
->isar
.id_isar0
= 0x00140011;
1175 cpu
->isar
.id_isar1
= 0x12002111;
1176 cpu
->isar
.id_isar2
= 0x11231111;
1177 cpu
->isar
.id_isar3
= 0x01102131;
1178 cpu
->isar
.id_isar4
= 0x141;
1179 cpu
->reset_auxcr
= 7;
1182 static void arm1136_initfn(Object
*obj
)
1184 ARMCPU
*cpu
= ARM_CPU(obj
);
1186 cpu
->dtb_compatible
= "arm,arm1136";
1187 set_feature(&cpu
->env
, ARM_FEATURE_V6K
);
1188 set_feature(&cpu
->env
, ARM_FEATURE_V6
);
1189 set_feature(&cpu
->env
, ARM_FEATURE_VFP
);
1190 set_feature(&cpu
->env
, ARM_FEATURE_DUMMY_C15_REGS
);
1191 set_feature(&cpu
->env
, ARM_FEATURE_CACHE_DIRTY_REG
);
1192 set_feature(&cpu
->env
, ARM_FEATURE_CACHE_BLOCK_OPS
);
1193 cpu
->midr
= 0x4117b363;
1194 cpu
->reset_fpsid
= 0x410120b4;
1195 cpu
->isar
.mvfr0
= 0x11111111;
1196 cpu
->isar
.mvfr1
= 0x00000000;
1197 cpu
->ctr
= 0x1dd20d2;
1198 cpu
->reset_sctlr
= 0x00050078;
1199 cpu
->id_pfr0
= 0x111;
1203 cpu
->id_mmfr0
= 0x01130003;
1204 cpu
->id_mmfr1
= 0x10030302;
1205 cpu
->id_mmfr2
= 0x01222110;
1206 cpu
->isar
.id_isar0
= 0x00140011;
1207 cpu
->isar
.id_isar1
= 0x12002111;
1208 cpu
->isar
.id_isar2
= 0x11231111;
1209 cpu
->isar
.id_isar3
= 0x01102131;
1210 cpu
->isar
.id_isar4
= 0x141;
1211 cpu
->reset_auxcr
= 7;
1214 static void arm1176_initfn(Object
*obj
)
1216 ARMCPU
*cpu
= ARM_CPU(obj
);
1218 cpu
->dtb_compatible
= "arm,arm1176";
1219 set_feature(&cpu
->env
, ARM_FEATURE_V6K
);
1220 set_feature(&cpu
->env
, ARM_FEATURE_VFP
);
1221 set_feature(&cpu
->env
, ARM_FEATURE_VAPA
);
1222 set_feature(&cpu
->env
, ARM_FEATURE_DUMMY_C15_REGS
);
1223 set_feature(&cpu
->env
, ARM_FEATURE_CACHE_DIRTY_REG
);
1224 set_feature(&cpu
->env
, ARM_FEATURE_CACHE_BLOCK_OPS
);
1225 set_feature(&cpu
->env
, ARM_FEATURE_EL3
);
1226 cpu
->midr
= 0x410fb767;
1227 cpu
->reset_fpsid
= 0x410120b5;
1228 cpu
->isar
.mvfr0
= 0x11111111;
1229 cpu
->isar
.mvfr1
= 0x00000000;
1230 cpu
->ctr
= 0x1dd20d2;
1231 cpu
->reset_sctlr
= 0x00050078;
1232 cpu
->id_pfr0
= 0x111;
1233 cpu
->id_pfr1
= 0x11;
1234 cpu
->id_dfr0
= 0x33;
1236 cpu
->id_mmfr0
= 0x01130003;
1237 cpu
->id_mmfr1
= 0x10030302;
1238 cpu
->id_mmfr2
= 0x01222100;
1239 cpu
->isar
.id_isar0
= 0x0140011;
1240 cpu
->isar
.id_isar1
= 0x12002111;
1241 cpu
->isar
.id_isar2
= 0x11231121;
1242 cpu
->isar
.id_isar3
= 0x01102131;
1243 cpu
->isar
.id_isar4
= 0x01141;
1244 cpu
->reset_auxcr
= 7;
1247 static void arm11mpcore_initfn(Object
*obj
)
1249 ARMCPU
*cpu
= ARM_CPU(obj
);
1251 cpu
->dtb_compatible
= "arm,arm11mpcore";
1252 set_feature(&cpu
->env
, ARM_FEATURE_V6K
);
1253 set_feature(&cpu
->env
, ARM_FEATURE_VFP
);
1254 set_feature(&cpu
->env
, ARM_FEATURE_VAPA
);
1255 set_feature(&cpu
->env
, ARM_FEATURE_MPIDR
);
1256 set_feature(&cpu
->env
, ARM_FEATURE_DUMMY_C15_REGS
);
1257 cpu
->midr
= 0x410fb022;
1258 cpu
->reset_fpsid
= 0x410120b4;
1259 cpu
->isar
.mvfr0
= 0x11111111;
1260 cpu
->isar
.mvfr1
= 0x00000000;
1261 cpu
->ctr
= 0x1d192992; /* 32K icache 32K dcache */
1262 cpu
->id_pfr0
= 0x111;
1266 cpu
->id_mmfr0
= 0x01100103;
1267 cpu
->id_mmfr1
= 0x10020302;
1268 cpu
->id_mmfr2
= 0x01222000;
1269 cpu
->isar
.id_isar0
= 0x00100011;
1270 cpu
->isar
.id_isar1
= 0x12002111;
1271 cpu
->isar
.id_isar2
= 0x11221011;
1272 cpu
->isar
.id_isar3
= 0x01102131;
1273 cpu
->isar
.id_isar4
= 0x141;
1274 cpu
->reset_auxcr
= 1;
1277 static void cortex_m0_initfn(Object
*obj
)
1279 ARMCPU
*cpu
= ARM_CPU(obj
);
1280 set_feature(&cpu
->env
, ARM_FEATURE_V6
);
1281 set_feature(&cpu
->env
, ARM_FEATURE_M
);
1283 cpu
->midr
= 0x410cc200;
1286 static void cortex_m3_initfn(Object
*obj
)
1288 ARMCPU
*cpu
= ARM_CPU(obj
);
1289 set_feature(&cpu
->env
, ARM_FEATURE_V7
);
1290 set_feature(&cpu
->env
, ARM_FEATURE_M
);
1291 set_feature(&cpu
->env
, ARM_FEATURE_M_MAIN
);
1292 cpu
->midr
= 0x410fc231;
1293 cpu
->pmsav7_dregion
= 8;
1294 cpu
->id_pfr0
= 0x00000030;
1295 cpu
->id_pfr1
= 0x00000200;
1296 cpu
->id_dfr0
= 0x00100000;
1297 cpu
->id_afr0
= 0x00000000;
1298 cpu
->id_mmfr0
= 0x00000030;
1299 cpu
->id_mmfr1
= 0x00000000;
1300 cpu
->id_mmfr2
= 0x00000000;
1301 cpu
->id_mmfr3
= 0x00000000;
1302 cpu
->isar
.id_isar0
= 0x01141110;
1303 cpu
->isar
.id_isar1
= 0x02111000;
1304 cpu
->isar
.id_isar2
= 0x21112231;
1305 cpu
->isar
.id_isar3
= 0x01111110;
1306 cpu
->isar
.id_isar4
= 0x01310102;
1307 cpu
->isar
.id_isar5
= 0x00000000;
1308 cpu
->isar
.id_isar6
= 0x00000000;
1311 static void cortex_m4_initfn(Object
*obj
)
1313 ARMCPU
*cpu
= ARM_CPU(obj
);
1315 set_feature(&cpu
->env
, ARM_FEATURE_V7
);
1316 set_feature(&cpu
->env
, ARM_FEATURE_M
);
1317 set_feature(&cpu
->env
, ARM_FEATURE_M_MAIN
);
1318 set_feature(&cpu
->env
, ARM_FEATURE_THUMB_DSP
);
1319 cpu
->midr
= 0x410fc240; /* r0p0 */
1320 cpu
->pmsav7_dregion
= 8;
1321 cpu
->id_pfr0
= 0x00000030;
1322 cpu
->id_pfr1
= 0x00000200;
1323 cpu
->id_dfr0
= 0x00100000;
1324 cpu
->id_afr0
= 0x00000000;
1325 cpu
->id_mmfr0
= 0x00000030;
1326 cpu
->id_mmfr1
= 0x00000000;
1327 cpu
->id_mmfr2
= 0x00000000;
1328 cpu
->id_mmfr3
= 0x00000000;
1329 cpu
->isar
.id_isar0
= 0x01141110;
1330 cpu
->isar
.id_isar1
= 0x02111000;
1331 cpu
->isar
.id_isar2
= 0x21112231;
1332 cpu
->isar
.id_isar3
= 0x01111110;
1333 cpu
->isar
.id_isar4
= 0x01310102;
1334 cpu
->isar
.id_isar5
= 0x00000000;
1335 cpu
->isar
.id_isar6
= 0x00000000;
1338 static void cortex_m33_initfn(Object
*obj
)
1340 ARMCPU
*cpu
= ARM_CPU(obj
);
1342 set_feature(&cpu
->env
, ARM_FEATURE_V8
);
1343 set_feature(&cpu
->env
, ARM_FEATURE_M
);
1344 set_feature(&cpu
->env
, ARM_FEATURE_M_MAIN
);
1345 set_feature(&cpu
->env
, ARM_FEATURE_M_SECURITY
);
1346 set_feature(&cpu
->env
, ARM_FEATURE_THUMB_DSP
);
1347 cpu
->midr
= 0x410fd213; /* r0p3 */
1348 cpu
->pmsav7_dregion
= 16;
1349 cpu
->sau_sregion
= 8;
1350 cpu
->id_pfr0
= 0x00000030;
1351 cpu
->id_pfr1
= 0x00000210;
1352 cpu
->id_dfr0
= 0x00200000;
1353 cpu
->id_afr0
= 0x00000000;
1354 cpu
->id_mmfr0
= 0x00101F40;
1355 cpu
->id_mmfr1
= 0x00000000;
1356 cpu
->id_mmfr2
= 0x01000000;
1357 cpu
->id_mmfr3
= 0x00000000;
1358 cpu
->isar
.id_isar0
= 0x01101110;
1359 cpu
->isar
.id_isar1
= 0x02212000;
1360 cpu
->isar
.id_isar2
= 0x20232232;
1361 cpu
->isar
.id_isar3
= 0x01111131;
1362 cpu
->isar
.id_isar4
= 0x01310132;
1363 cpu
->isar
.id_isar5
= 0x00000000;
1364 cpu
->isar
.id_isar6
= 0x00000000;
1365 cpu
->clidr
= 0x00000000;
1366 cpu
->ctr
= 0x8000c000;
1369 static void arm_v7m_class_init(ObjectClass
*oc
, void *data
)
1371 CPUClass
*cc
= CPU_CLASS(oc
);
1373 #ifndef CONFIG_USER_ONLY
1374 cc
->do_interrupt
= arm_v7m_cpu_do_interrupt
;
1377 cc
->cpu_exec_interrupt
= arm_v7m_cpu_exec_interrupt
;
1380 static const ARMCPRegInfo cortexr5_cp_reginfo
[] = {
1381 /* Dummy the TCM region regs for the moment */
1382 { .name
= "ATCM", .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 1, .opc2
= 0,
1383 .access
= PL1_RW
, .type
= ARM_CP_CONST
},
1384 { .name
= "BTCM", .cp
= 15, .opc1
= 0, .crn
= 9, .crm
= 1, .opc2
= 1,
1385 .access
= PL1_RW
, .type
= ARM_CP_CONST
},
1386 { .name
= "DCACHE_INVAL", .cp
= 15, .opc1
= 0, .crn
= 15, .crm
= 5,
1387 .opc2
= 0, .access
= PL1_W
, .type
= ARM_CP_NOP
},
1391 static void cortex_r5_initfn(Object
*obj
)
1393 ARMCPU
*cpu
= ARM_CPU(obj
);
1395 set_feature(&cpu
->env
, ARM_FEATURE_V7
);
1396 set_feature(&cpu
->env
, ARM_FEATURE_V7MP
);
1397 set_feature(&cpu
->env
, ARM_FEATURE_PMSA
);
1398 cpu
->midr
= 0x411fc153; /* r1p3 */
1399 cpu
->id_pfr0
= 0x0131;
1400 cpu
->id_pfr1
= 0x001;
1401 cpu
->id_dfr0
= 0x010400;
1403 cpu
->id_mmfr0
= 0x0210030;
1404 cpu
->id_mmfr1
= 0x00000000;
1405 cpu
->id_mmfr2
= 0x01200000;
1406 cpu
->id_mmfr3
= 0x0211;
1407 cpu
->isar
.id_isar0
= 0x02101111;
1408 cpu
->isar
.id_isar1
= 0x13112111;
1409 cpu
->isar
.id_isar2
= 0x21232141;
1410 cpu
->isar
.id_isar3
= 0x01112131;
1411 cpu
->isar
.id_isar4
= 0x0010142;
1412 cpu
->isar
.id_isar5
= 0x0;
1413 cpu
->isar
.id_isar6
= 0x0;
1414 cpu
->mp_is_up
= true;
1415 cpu
->pmsav7_dregion
= 16;
1416 define_arm_cp_regs(cpu
, cortexr5_cp_reginfo
);
1419 static void cortex_r5f_initfn(Object
*obj
)
1421 ARMCPU
*cpu
= ARM_CPU(obj
);
1423 cortex_r5_initfn(obj
);
1424 set_feature(&cpu
->env
, ARM_FEATURE_VFP3
);
1427 static const ARMCPRegInfo cortexa8_cp_reginfo
[] = {
1428 { .name
= "L2LOCKDOWN", .cp
= 15, .crn
= 9, .crm
= 0, .opc1
= 1, .opc2
= 0,
1429 .access
= PL1_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
1430 { .name
= "L2AUXCR", .cp
= 15, .crn
= 9, .crm
= 0, .opc1
= 1, .opc2
= 2,
1431 .access
= PL1_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
1435 static void cortex_a8_initfn(Object
*obj
)
1437 ARMCPU
*cpu
= ARM_CPU(obj
);
1439 cpu
->dtb_compatible
= "arm,cortex-a8";
1440 set_feature(&cpu
->env
, ARM_FEATURE_V7
);
1441 set_feature(&cpu
->env
, ARM_FEATURE_VFP3
);
1442 set_feature(&cpu
->env
, ARM_FEATURE_NEON
);
1443 set_feature(&cpu
->env
, ARM_FEATURE_THUMB2EE
);
1444 set_feature(&cpu
->env
, ARM_FEATURE_DUMMY_C15_REGS
);
1445 set_feature(&cpu
->env
, ARM_FEATURE_EL3
);
1446 cpu
->midr
= 0x410fc080;
1447 cpu
->reset_fpsid
= 0x410330c0;
1448 cpu
->isar
.mvfr0
= 0x11110222;
1449 cpu
->isar
.mvfr1
= 0x00011111;
1450 cpu
->ctr
= 0x82048004;
1451 cpu
->reset_sctlr
= 0x00c50078;
1452 cpu
->id_pfr0
= 0x1031;
1453 cpu
->id_pfr1
= 0x11;
1454 cpu
->id_dfr0
= 0x400;
1456 cpu
->id_mmfr0
= 0x31100003;
1457 cpu
->id_mmfr1
= 0x20000000;
1458 cpu
->id_mmfr2
= 0x01202000;
1459 cpu
->id_mmfr3
= 0x11;
1460 cpu
->isar
.id_isar0
= 0x00101111;
1461 cpu
->isar
.id_isar1
= 0x12112111;
1462 cpu
->isar
.id_isar2
= 0x21232031;
1463 cpu
->isar
.id_isar3
= 0x11112131;
1464 cpu
->isar
.id_isar4
= 0x00111142;
1465 cpu
->dbgdidr
= 0x15141000;
1466 cpu
->clidr
= (1 << 27) | (2 << 24) | 3;
1467 cpu
->ccsidr
[0] = 0xe007e01a; /* 16k L1 dcache. */
1468 cpu
->ccsidr
[1] = 0x2007e01a; /* 16k L1 icache. */
1469 cpu
->ccsidr
[2] = 0xf0000000; /* No L2 icache. */
1470 cpu
->reset_auxcr
= 2;
1471 define_arm_cp_regs(cpu
, cortexa8_cp_reginfo
);
1474 static const ARMCPRegInfo cortexa9_cp_reginfo
[] = {
1475 /* power_control should be set to maximum latency. Again,
1476 * default to 0 and set by private hook
1478 { .name
= "A9_PWRCTL", .cp
= 15, .crn
= 15, .crm
= 0, .opc1
= 0, .opc2
= 0,
1479 .access
= PL1_RW
, .resetvalue
= 0,
1480 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_power_control
) },
1481 { .name
= "A9_DIAG", .cp
= 15, .crn
= 15, .crm
= 0, .opc1
= 0, .opc2
= 1,
1482 .access
= PL1_RW
, .resetvalue
= 0,
1483 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_diagnostic
) },
1484 { .name
= "A9_PWRDIAG", .cp
= 15, .crn
= 15, .crm
= 0, .opc1
= 0, .opc2
= 2,
1485 .access
= PL1_RW
, .resetvalue
= 0,
1486 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_power_diagnostic
) },
1487 { .name
= "NEONBUSY", .cp
= 15, .crn
= 15, .crm
= 1, .opc1
= 0, .opc2
= 0,
1488 .access
= PL1_RW
, .resetvalue
= 0, .type
= ARM_CP_CONST
},
1489 /* TLB lockdown control */
1490 { .name
= "TLB_LOCKR", .cp
= 15, .crn
= 15, .crm
= 4, .opc1
= 5, .opc2
= 2,
1491 .access
= PL1_W
, .resetvalue
= 0, .type
= ARM_CP_NOP
},
1492 { .name
= "TLB_LOCKW", .cp
= 15, .crn
= 15, .crm
= 4, .opc1
= 5, .opc2
= 4,
1493 .access
= PL1_W
, .resetvalue
= 0, .type
= ARM_CP_NOP
},
1494 { .name
= "TLB_VA", .cp
= 15, .crn
= 15, .crm
= 5, .opc1
= 5, .opc2
= 2,
1495 .access
= PL1_RW
, .resetvalue
= 0, .type
= ARM_CP_CONST
},
1496 { .name
= "TLB_PA", .cp
= 15, .crn
= 15, .crm
= 6, .opc1
= 5, .opc2
= 2,
1497 .access
= PL1_RW
, .resetvalue
= 0, .type
= ARM_CP_CONST
},
1498 { .name
= "TLB_ATTR", .cp
= 15, .crn
= 15, .crm
= 7, .opc1
= 5, .opc2
= 2,
1499 .access
= PL1_RW
, .resetvalue
= 0, .type
= ARM_CP_CONST
},
1503 static void cortex_a9_initfn(Object
*obj
)
1505 ARMCPU
*cpu
= ARM_CPU(obj
);
1507 cpu
->dtb_compatible
= "arm,cortex-a9";
1508 set_feature(&cpu
->env
, ARM_FEATURE_V7
);
1509 set_feature(&cpu
->env
, ARM_FEATURE_VFP3
);
1510 set_feature(&cpu
->env
, ARM_FEATURE_VFP_FP16
);
1511 set_feature(&cpu
->env
, ARM_FEATURE_NEON
);
1512 set_feature(&cpu
->env
, ARM_FEATURE_THUMB2EE
);
1513 set_feature(&cpu
->env
, ARM_FEATURE_EL3
);
1514 /* Note that A9 supports the MP extensions even for
1515 * A9UP and single-core A9MP (which are both different
1516 * and valid configurations; we don't model A9UP).
1518 set_feature(&cpu
->env
, ARM_FEATURE_V7MP
);
1519 set_feature(&cpu
->env
, ARM_FEATURE_CBAR
);
1520 cpu
->midr
= 0x410fc090;
1521 cpu
->reset_fpsid
= 0x41033090;
1522 cpu
->isar
.mvfr0
= 0x11110222;
1523 cpu
->isar
.mvfr1
= 0x01111111;
1524 cpu
->ctr
= 0x80038003;
1525 cpu
->reset_sctlr
= 0x00c50078;
1526 cpu
->id_pfr0
= 0x1031;
1527 cpu
->id_pfr1
= 0x11;
1528 cpu
->id_dfr0
= 0x000;
1530 cpu
->id_mmfr0
= 0x00100103;
1531 cpu
->id_mmfr1
= 0x20000000;
1532 cpu
->id_mmfr2
= 0x01230000;
1533 cpu
->id_mmfr3
= 0x00002111;
1534 cpu
->isar
.id_isar0
= 0x00101111;
1535 cpu
->isar
.id_isar1
= 0x13112111;
1536 cpu
->isar
.id_isar2
= 0x21232041;
1537 cpu
->isar
.id_isar3
= 0x11112131;
1538 cpu
->isar
.id_isar4
= 0x00111142;
1539 cpu
->dbgdidr
= 0x35141000;
1540 cpu
->clidr
= (1 << 27) | (1 << 24) | 3;
1541 cpu
->ccsidr
[0] = 0xe00fe019; /* 16k L1 dcache. */
1542 cpu
->ccsidr
[1] = 0x200fe019; /* 16k L1 icache. */
1543 define_arm_cp_regs(cpu
, cortexa9_cp_reginfo
);
1546 #ifndef CONFIG_USER_ONLY
1547 static uint64_t a15_l2ctlr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1549 /* Linux wants the number of processors from here.
1550 * Might as well set the interrupt-controller bit too.
1552 return ((smp_cpus
- 1) << 24) | (1 << 23);
1556 static const ARMCPRegInfo cortexa15_cp_reginfo
[] = {
1557 #ifndef CONFIG_USER_ONLY
1558 { .name
= "L2CTLR", .cp
= 15, .crn
= 9, .crm
= 0, .opc1
= 1, .opc2
= 2,
1559 .access
= PL1_RW
, .resetvalue
= 0, .readfn
= a15_l2ctlr_read
,
1560 .writefn
= arm_cp_write_ignore
, },
1562 { .name
= "L2ECTLR", .cp
= 15, .crn
= 9, .crm
= 0, .opc1
= 1, .opc2
= 3,
1563 .access
= PL1_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
1567 static void cortex_a7_initfn(Object
*obj
)
1569 ARMCPU
*cpu
= ARM_CPU(obj
);
1571 cpu
->dtb_compatible
= "arm,cortex-a7";
1572 set_feature(&cpu
->env
, ARM_FEATURE_V7VE
);
1573 set_feature(&cpu
->env
, ARM_FEATURE_VFP4
);
1574 set_feature(&cpu
->env
, ARM_FEATURE_NEON
);
1575 set_feature(&cpu
->env
, ARM_FEATURE_THUMB2EE
);
1576 set_feature(&cpu
->env
, ARM_FEATURE_GENERIC_TIMER
);
1577 set_feature(&cpu
->env
, ARM_FEATURE_DUMMY_C15_REGS
);
1578 set_feature(&cpu
->env
, ARM_FEATURE_CBAR_RO
);
1579 set_feature(&cpu
->env
, ARM_FEATURE_EL3
);
1580 cpu
->kvm_target
= QEMU_KVM_ARM_TARGET_CORTEX_A7
;
1581 cpu
->midr
= 0x410fc075;
1582 cpu
->reset_fpsid
= 0x41023075;
1583 cpu
->isar
.mvfr0
= 0x10110222;
1584 cpu
->isar
.mvfr1
= 0x11111111;
1585 cpu
->ctr
= 0x84448003;
1586 cpu
->reset_sctlr
= 0x00c50078;
1587 cpu
->id_pfr0
= 0x00001131;
1588 cpu
->id_pfr1
= 0x00011011;
1589 cpu
->id_dfr0
= 0x02010555;
1590 cpu
->pmceid0
= 0x00000000;
1591 cpu
->pmceid1
= 0x00000000;
1592 cpu
->id_afr0
= 0x00000000;
1593 cpu
->id_mmfr0
= 0x10101105;
1594 cpu
->id_mmfr1
= 0x40000000;
1595 cpu
->id_mmfr2
= 0x01240000;
1596 cpu
->id_mmfr3
= 0x02102211;
1597 /* a7_mpcore_r0p5_trm, page 4-4 gives 0x01101110; but
1598 * table 4-41 gives 0x02101110, which includes the arm div insns.
1600 cpu
->isar
.id_isar0
= 0x02101110;
1601 cpu
->isar
.id_isar1
= 0x13112111;
1602 cpu
->isar
.id_isar2
= 0x21232041;
1603 cpu
->isar
.id_isar3
= 0x11112131;
1604 cpu
->isar
.id_isar4
= 0x10011142;
1605 cpu
->dbgdidr
= 0x3515f005;
1606 cpu
->clidr
= 0x0a200023;
1607 cpu
->ccsidr
[0] = 0x701fe00a; /* 32K L1 dcache */
1608 cpu
->ccsidr
[1] = 0x201fe00a; /* 32K L1 icache */
1609 cpu
->ccsidr
[2] = 0x711fe07a; /* 4096K L2 unified cache */
1610 define_arm_cp_regs(cpu
, cortexa15_cp_reginfo
); /* Same as A15 */
1613 static void cortex_a15_initfn(Object
*obj
)
1615 ARMCPU
*cpu
= ARM_CPU(obj
);
1617 cpu
->dtb_compatible
= "arm,cortex-a15";
1618 set_feature(&cpu
->env
, ARM_FEATURE_V7VE
);
1619 set_feature(&cpu
->env
, ARM_FEATURE_VFP4
);
1620 set_feature(&cpu
->env
, ARM_FEATURE_NEON
);
1621 set_feature(&cpu
->env
, ARM_FEATURE_THUMB2EE
);
1622 set_feature(&cpu
->env
, ARM_FEATURE_GENERIC_TIMER
);
1623 set_feature(&cpu
->env
, ARM_FEATURE_DUMMY_C15_REGS
);
1624 set_feature(&cpu
->env
, ARM_FEATURE_CBAR_RO
);
1625 set_feature(&cpu
->env
, ARM_FEATURE_EL3
);
1626 cpu
->kvm_target
= QEMU_KVM_ARM_TARGET_CORTEX_A15
;
1627 cpu
->midr
= 0x412fc0f1;
1628 cpu
->reset_fpsid
= 0x410430f0;
1629 cpu
->isar
.mvfr0
= 0x10110222;
1630 cpu
->isar
.mvfr1
= 0x11111111;
1631 cpu
->ctr
= 0x8444c004;
1632 cpu
->reset_sctlr
= 0x00c50078;
1633 cpu
->id_pfr0
= 0x00001131;
1634 cpu
->id_pfr1
= 0x00011011;
1635 cpu
->id_dfr0
= 0x02010555;
1636 cpu
->pmceid0
= 0x0000000;
1637 cpu
->pmceid1
= 0x00000000;
1638 cpu
->id_afr0
= 0x00000000;
1639 cpu
->id_mmfr0
= 0x10201105;
1640 cpu
->id_mmfr1
= 0x20000000;
1641 cpu
->id_mmfr2
= 0x01240000;
1642 cpu
->id_mmfr3
= 0x02102211;
1643 cpu
->isar
.id_isar0
= 0x02101110;
1644 cpu
->isar
.id_isar1
= 0x13112111;
1645 cpu
->isar
.id_isar2
= 0x21232041;
1646 cpu
->isar
.id_isar3
= 0x11112131;
1647 cpu
->isar
.id_isar4
= 0x10011142;
1648 cpu
->dbgdidr
= 0x3515f021;
1649 cpu
->clidr
= 0x0a200023;
1650 cpu
->ccsidr
[0] = 0x701fe00a; /* 32K L1 dcache */
1651 cpu
->ccsidr
[1] = 0x201fe00a; /* 32K L1 icache */
1652 cpu
->ccsidr
[2] = 0x711fe07a; /* 4096K L2 unified cache */
1653 define_arm_cp_regs(cpu
, cortexa15_cp_reginfo
);
1656 static void ti925t_initfn(Object
*obj
)
1658 ARMCPU
*cpu
= ARM_CPU(obj
);
1659 set_feature(&cpu
->env
, ARM_FEATURE_V4T
);
1660 set_feature(&cpu
->env
, ARM_FEATURE_OMAPCP
);
1661 cpu
->midr
= ARM_CPUID_TI925T
;
1662 cpu
->ctr
= 0x5109149;
1663 cpu
->reset_sctlr
= 0x00000070;
1666 static void sa1100_initfn(Object
*obj
)
1668 ARMCPU
*cpu
= ARM_CPU(obj
);
1670 cpu
->dtb_compatible
= "intel,sa1100";
1671 set_feature(&cpu
->env
, ARM_FEATURE_STRONGARM
);
1672 set_feature(&cpu
->env
, ARM_FEATURE_DUMMY_C15_REGS
);
1673 cpu
->midr
= 0x4401A11B;
1674 cpu
->reset_sctlr
= 0x00000070;
1677 static void sa1110_initfn(Object
*obj
)
1679 ARMCPU
*cpu
= ARM_CPU(obj
);
1680 set_feature(&cpu
->env
, ARM_FEATURE_STRONGARM
);
1681 set_feature(&cpu
->env
, ARM_FEATURE_DUMMY_C15_REGS
);
1682 cpu
->midr
= 0x6901B119;
1683 cpu
->reset_sctlr
= 0x00000070;
1686 static void pxa250_initfn(Object
*obj
)
1688 ARMCPU
*cpu
= ARM_CPU(obj
);
1690 cpu
->dtb_compatible
= "marvell,xscale";
1691 set_feature(&cpu
->env
, ARM_FEATURE_V5
);
1692 set_feature(&cpu
->env
, ARM_FEATURE_XSCALE
);
1693 cpu
->midr
= 0x69052100;
1694 cpu
->ctr
= 0xd172172;
1695 cpu
->reset_sctlr
= 0x00000078;
1698 static void pxa255_initfn(Object
*obj
)
1700 ARMCPU
*cpu
= ARM_CPU(obj
);
1702 cpu
->dtb_compatible
= "marvell,xscale";
1703 set_feature(&cpu
->env
, ARM_FEATURE_V5
);
1704 set_feature(&cpu
->env
, ARM_FEATURE_XSCALE
);
1705 cpu
->midr
= 0x69052d00;
1706 cpu
->ctr
= 0xd172172;
1707 cpu
->reset_sctlr
= 0x00000078;
1710 static void pxa260_initfn(Object
*obj
)
1712 ARMCPU
*cpu
= ARM_CPU(obj
);
1714 cpu
->dtb_compatible
= "marvell,xscale";
1715 set_feature(&cpu
->env
, ARM_FEATURE_V5
);
1716 set_feature(&cpu
->env
, ARM_FEATURE_XSCALE
);
1717 cpu
->midr
= 0x69052903;
1718 cpu
->ctr
= 0xd172172;
1719 cpu
->reset_sctlr
= 0x00000078;
1722 static void pxa261_initfn(Object
*obj
)
1724 ARMCPU
*cpu
= ARM_CPU(obj
);
1726 cpu
->dtb_compatible
= "marvell,xscale";
1727 set_feature(&cpu
->env
, ARM_FEATURE_V5
);
1728 set_feature(&cpu
->env
, ARM_FEATURE_XSCALE
);
1729 cpu
->midr
= 0x69052d05;
1730 cpu
->ctr
= 0xd172172;
1731 cpu
->reset_sctlr
= 0x00000078;
1734 static void pxa262_initfn(Object
*obj
)
1736 ARMCPU
*cpu
= ARM_CPU(obj
);
1738 cpu
->dtb_compatible
= "marvell,xscale";
1739 set_feature(&cpu
->env
, ARM_FEATURE_V5
);
1740 set_feature(&cpu
->env
, ARM_FEATURE_XSCALE
);
1741 cpu
->midr
= 0x69052d06;
1742 cpu
->ctr
= 0xd172172;
1743 cpu
->reset_sctlr
= 0x00000078;
1746 static void pxa270a0_initfn(Object
*obj
)
1748 ARMCPU
*cpu
= ARM_CPU(obj
);
1750 cpu
->dtb_compatible
= "marvell,xscale";
1751 set_feature(&cpu
->env
, ARM_FEATURE_V5
);
1752 set_feature(&cpu
->env
, ARM_FEATURE_XSCALE
);
1753 set_feature(&cpu
->env
, ARM_FEATURE_IWMMXT
);
1754 cpu
->midr
= 0x69054110;
1755 cpu
->ctr
= 0xd172172;
1756 cpu
->reset_sctlr
= 0x00000078;
1759 static void pxa270a1_initfn(Object
*obj
)
1761 ARMCPU
*cpu
= ARM_CPU(obj
);
1763 cpu
->dtb_compatible
= "marvell,xscale";
1764 set_feature(&cpu
->env
, ARM_FEATURE_V5
);
1765 set_feature(&cpu
->env
, ARM_FEATURE_XSCALE
);
1766 set_feature(&cpu
->env
, ARM_FEATURE_IWMMXT
);
1767 cpu
->midr
= 0x69054111;
1768 cpu
->ctr
= 0xd172172;
1769 cpu
->reset_sctlr
= 0x00000078;
1772 static void pxa270b0_initfn(Object
*obj
)
1774 ARMCPU
*cpu
= ARM_CPU(obj
);
1776 cpu
->dtb_compatible
= "marvell,xscale";
1777 set_feature(&cpu
->env
, ARM_FEATURE_V5
);
1778 set_feature(&cpu
->env
, ARM_FEATURE_XSCALE
);
1779 set_feature(&cpu
->env
, ARM_FEATURE_IWMMXT
);
1780 cpu
->midr
= 0x69054112;
1781 cpu
->ctr
= 0xd172172;
1782 cpu
->reset_sctlr
= 0x00000078;
1785 static void pxa270b1_initfn(Object
*obj
)
1787 ARMCPU
*cpu
= ARM_CPU(obj
);
1789 cpu
->dtb_compatible
= "marvell,xscale";
1790 set_feature(&cpu
->env
, ARM_FEATURE_V5
);
1791 set_feature(&cpu
->env
, ARM_FEATURE_XSCALE
);
1792 set_feature(&cpu
->env
, ARM_FEATURE_IWMMXT
);
1793 cpu
->midr
= 0x69054113;
1794 cpu
->ctr
= 0xd172172;
1795 cpu
->reset_sctlr
= 0x00000078;
1798 static void pxa270c0_initfn(Object
*obj
)
1800 ARMCPU
*cpu
= ARM_CPU(obj
);
1802 cpu
->dtb_compatible
= "marvell,xscale";
1803 set_feature(&cpu
->env
, ARM_FEATURE_V5
);
1804 set_feature(&cpu
->env
, ARM_FEATURE_XSCALE
);
1805 set_feature(&cpu
->env
, ARM_FEATURE_IWMMXT
);
1806 cpu
->midr
= 0x69054114;
1807 cpu
->ctr
= 0xd172172;
1808 cpu
->reset_sctlr
= 0x00000078;
1811 static void pxa270c5_initfn(Object
*obj
)
1813 ARMCPU
*cpu
= ARM_CPU(obj
);
1815 cpu
->dtb_compatible
= "marvell,xscale";
1816 set_feature(&cpu
->env
, ARM_FEATURE_V5
);
1817 set_feature(&cpu
->env
, ARM_FEATURE_XSCALE
);
1818 set_feature(&cpu
->env
, ARM_FEATURE_IWMMXT
);
1819 cpu
->midr
= 0x69054117;
1820 cpu
->ctr
= 0xd172172;
1821 cpu
->reset_sctlr
= 0x00000078;
1824 #ifndef TARGET_AARCH64
1825 /* -cpu max: if KVM is enabled, like -cpu host (best possible with this host);
1826 * otherwise, a CPU with as many features enabled as our emulation supports.
1827 * The version of '-cpu max' for qemu-system-aarch64 is defined in cpu64.c;
1828 * this only needs to handle 32 bits.
1830 static void arm_max_initfn(Object
*obj
)
1832 ARMCPU
*cpu
= ARM_CPU(obj
);
1834 if (kvm_enabled()) {
1835 kvm_arm_set_cpu_features_from_host(cpu
);
1837 cortex_a15_initfn(obj
);
1838 #ifdef CONFIG_USER_ONLY
1839 /* We don't set these in system emulation mode for the moment,
1840 * since we don't correctly set (all of) the ID registers to
1843 set_feature(&cpu
->env
, ARM_FEATURE_V8
);
1847 t
= cpu
->isar
.id_isar5
;
1848 t
= FIELD_DP32(t
, ID_ISAR5
, AES
, 2);
1849 t
= FIELD_DP32(t
, ID_ISAR5
, SHA1
, 1);
1850 t
= FIELD_DP32(t
, ID_ISAR5
, SHA2
, 1);
1851 t
= FIELD_DP32(t
, ID_ISAR5
, CRC32
, 1);
1852 t
= FIELD_DP32(t
, ID_ISAR5
, RDM
, 1);
1853 t
= FIELD_DP32(t
, ID_ISAR5
, VCMA
, 1);
1854 cpu
->isar
.id_isar5
= t
;
1856 t
= cpu
->isar
.id_isar6
;
1857 t
= FIELD_DP32(t
, ID_ISAR6
, DP
, 1);
1858 cpu
->isar
.id_isar6
= t
;
1865 #endif /* !defined(CONFIG_USER_ONLY) || !defined(TARGET_AARCH64) */
1867 typedef struct ARMCPUInfo
{
1869 void (*initfn
)(Object
*obj
);
1870 void (*class_init
)(ObjectClass
*oc
, void *data
);
1873 static const ARMCPUInfo arm_cpus
[] = {
1874 #if !defined(CONFIG_USER_ONLY) || !defined(TARGET_AARCH64)
1875 { .name
= "arm926", .initfn
= arm926_initfn
},
1876 { .name
= "arm946", .initfn
= arm946_initfn
},
1877 { .name
= "arm1026", .initfn
= arm1026_initfn
},
1878 /* What QEMU calls "arm1136-r2" is actually the 1136 r0p2, i.e. an
1879 * older core than plain "arm1136". In particular this does not
1880 * have the v6K features.
1882 { .name
= "arm1136-r2", .initfn
= arm1136_r2_initfn
},
1883 { .name
= "arm1136", .initfn
= arm1136_initfn
},
1884 { .name
= "arm1176", .initfn
= arm1176_initfn
},
1885 { .name
= "arm11mpcore", .initfn
= arm11mpcore_initfn
},
1886 { .name
= "cortex-m0", .initfn
= cortex_m0_initfn
,
1887 .class_init
= arm_v7m_class_init
},
1888 { .name
= "cortex-m3", .initfn
= cortex_m3_initfn
,
1889 .class_init
= arm_v7m_class_init
},
1890 { .name
= "cortex-m4", .initfn
= cortex_m4_initfn
,
1891 .class_init
= arm_v7m_class_init
},
1892 { .name
= "cortex-m33", .initfn
= cortex_m33_initfn
,
1893 .class_init
= arm_v7m_class_init
},
1894 { .name
= "cortex-r5", .initfn
= cortex_r5_initfn
},
1895 { .name
= "cortex-r5f", .initfn
= cortex_r5f_initfn
},
1896 { .name
= "cortex-a7", .initfn
= cortex_a7_initfn
},
1897 { .name
= "cortex-a8", .initfn
= cortex_a8_initfn
},
1898 { .name
= "cortex-a9", .initfn
= cortex_a9_initfn
},
1899 { .name
= "cortex-a15", .initfn
= cortex_a15_initfn
},
1900 { .name
= "ti925t", .initfn
= ti925t_initfn
},
1901 { .name
= "sa1100", .initfn
= sa1100_initfn
},
1902 { .name
= "sa1110", .initfn
= sa1110_initfn
},
1903 { .name
= "pxa250", .initfn
= pxa250_initfn
},
1904 { .name
= "pxa255", .initfn
= pxa255_initfn
},
1905 { .name
= "pxa260", .initfn
= pxa260_initfn
},
1906 { .name
= "pxa261", .initfn
= pxa261_initfn
},
1907 { .name
= "pxa262", .initfn
= pxa262_initfn
},
1908 /* "pxa270" is an alias for "pxa270-a0" */
1909 { .name
= "pxa270", .initfn
= pxa270a0_initfn
},
1910 { .name
= "pxa270-a0", .initfn
= pxa270a0_initfn
},
1911 { .name
= "pxa270-a1", .initfn
= pxa270a1_initfn
},
1912 { .name
= "pxa270-b0", .initfn
= pxa270b0_initfn
},
1913 { .name
= "pxa270-b1", .initfn
= pxa270b1_initfn
},
1914 { .name
= "pxa270-c0", .initfn
= pxa270c0_initfn
},
1915 { .name
= "pxa270-c5", .initfn
= pxa270c5_initfn
},
1916 #ifndef TARGET_AARCH64
1917 { .name
= "max", .initfn
= arm_max_initfn
},
1919 #ifdef CONFIG_USER_ONLY
1920 { .name
= "any", .initfn
= arm_max_initfn
},
1926 static Property arm_cpu_properties
[] = {
1927 DEFINE_PROP_BOOL("start-powered-off", ARMCPU
, start_powered_off
, false),
1928 DEFINE_PROP_UINT32("psci-conduit", ARMCPU
, psci_conduit
, 0),
1929 DEFINE_PROP_UINT32("midr", ARMCPU
, midr
, 0),
1930 DEFINE_PROP_UINT64("mp-affinity", ARMCPU
,
1931 mp_affinity
, ARM64_AFFINITY_INVALID
),
1932 DEFINE_PROP_INT32("node-id", ARMCPU
, node_id
, CPU_UNSET_NUMA_NODE_ID
),
1933 DEFINE_PROP_INT32("core-count", ARMCPU
, core_count
, -1),
1934 DEFINE_PROP_END_OF_LIST()
1937 #ifdef CONFIG_USER_ONLY
1938 static int arm_cpu_handle_mmu_fault(CPUState
*cs
, vaddr address
, int size
,
1939 int rw
, int mmu_idx
)
1941 ARMCPU
*cpu
= ARM_CPU(cs
);
1942 CPUARMState
*env
= &cpu
->env
;
1944 env
->exception
.vaddress
= address
;
1946 cs
->exception_index
= EXCP_PREFETCH_ABORT
;
1948 cs
->exception_index
= EXCP_DATA_ABORT
;
1954 static gchar
*arm_gdb_arch_name(CPUState
*cs
)
1956 ARMCPU
*cpu
= ARM_CPU(cs
);
1957 CPUARMState
*env
= &cpu
->env
;
1959 if (arm_feature(env
, ARM_FEATURE_IWMMXT
)) {
1960 return g_strdup("iwmmxt");
1962 return g_strdup("arm");
1965 static void arm_cpu_class_init(ObjectClass
*oc
, void *data
)
1967 ARMCPUClass
*acc
= ARM_CPU_CLASS(oc
);
1968 CPUClass
*cc
= CPU_CLASS(acc
);
1969 DeviceClass
*dc
= DEVICE_CLASS(oc
);
1971 device_class_set_parent_realize(dc
, arm_cpu_realizefn
,
1972 &acc
->parent_realize
);
1973 dc
->props
= arm_cpu_properties
;
1975 acc
->parent_reset
= cc
->reset
;
1976 cc
->reset
= arm_cpu_reset
;
1978 cc
->class_by_name
= arm_cpu_class_by_name
;
1979 cc
->has_work
= arm_cpu_has_work
;
1980 cc
->cpu_exec_interrupt
= arm_cpu_exec_interrupt
;
1981 cc
->dump_state
= arm_cpu_dump_state
;
1982 cc
->set_pc
= arm_cpu_set_pc
;
1983 cc
->gdb_read_register
= arm_cpu_gdb_read_register
;
1984 cc
->gdb_write_register
= arm_cpu_gdb_write_register
;
1985 #ifdef CONFIG_USER_ONLY
1986 cc
->handle_mmu_fault
= arm_cpu_handle_mmu_fault
;
1988 cc
->do_interrupt
= arm_cpu_do_interrupt
;
1989 cc
->do_unaligned_access
= arm_cpu_do_unaligned_access
;
1990 cc
->do_transaction_failed
= arm_cpu_do_transaction_failed
;
1991 cc
->get_phys_page_attrs_debug
= arm_cpu_get_phys_page_attrs_debug
;
1992 cc
->asidx_from_attrs
= arm_asidx_from_attrs
;
1993 cc
->vmsd
= &vmstate_arm_cpu
;
1994 cc
->virtio_is_big_endian
= arm_cpu_virtio_is_big_endian
;
1995 cc
->write_elf64_note
= arm_cpu_write_elf64_note
;
1996 cc
->write_elf32_note
= arm_cpu_write_elf32_note
;
1998 cc
->gdb_num_core_regs
= 26;
1999 cc
->gdb_core_xml_file
= "arm-core.xml";
2000 cc
->gdb_arch_name
= arm_gdb_arch_name
;
2001 cc
->gdb_get_dynamic_xml
= arm_gdb_get_dynamic_xml
;
2002 cc
->gdb_stop_before_watchpoint
= true;
2003 cc
->debug_excp_handler
= arm_debug_excp_handler
;
2004 cc
->debug_check_watchpoint
= arm_debug_check_watchpoint
;
2005 #if !defined(CONFIG_USER_ONLY)
2006 cc
->adjust_watchpoint_address
= arm_adjust_watchpoint_address
;
2009 cc
->disas_set_info
= arm_disas_set_info
;
2011 cc
->tcg_initialize
= arm_translate_init
;
2016 static void arm_host_initfn(Object
*obj
)
2018 ARMCPU
*cpu
= ARM_CPU(obj
);
2020 kvm_arm_set_cpu_features_from_host(cpu
);
2023 static const TypeInfo host_arm_cpu_type_info
= {
2024 .name
= TYPE_ARM_HOST_CPU
,
2025 #ifdef TARGET_AARCH64
2026 .parent
= TYPE_AARCH64_CPU
,
2028 .parent
= TYPE_ARM_CPU
,
2030 .instance_init
= arm_host_initfn
,
2035 static void cpu_register(const ARMCPUInfo
*info
)
2037 TypeInfo type_info
= {
2038 .parent
= TYPE_ARM_CPU
,
2039 .instance_size
= sizeof(ARMCPU
),
2040 .instance_init
= info
->initfn
,
2041 .class_size
= sizeof(ARMCPUClass
),
2042 .class_init
= info
->class_init
,
2045 type_info
.name
= g_strdup_printf("%s-" TYPE_ARM_CPU
, info
->name
);
2046 type_register(&type_info
);
2047 g_free((void *)type_info
.name
);
2050 static const TypeInfo arm_cpu_type_info
= {
2051 .name
= TYPE_ARM_CPU
,
2053 .instance_size
= sizeof(ARMCPU
),
2054 .instance_init
= arm_cpu_initfn
,
2055 .instance_post_init
= arm_cpu_post_init
,
2056 .instance_finalize
= arm_cpu_finalizefn
,
2058 .class_size
= sizeof(ARMCPUClass
),
2059 .class_init
= arm_cpu_class_init
,
2062 static const TypeInfo idau_interface_type_info
= {
2063 .name
= TYPE_IDAU_INTERFACE
,
2064 .parent
= TYPE_INTERFACE
,
2065 .class_size
= sizeof(IDAUInterfaceClass
),
2068 static void arm_cpu_register_types(void)
2070 const ARMCPUInfo
*info
= arm_cpus
;
2072 type_register_static(&arm_cpu_type_info
);
2073 type_register_static(&idau_interface_type_info
);
2075 while (info
->name
) {
2081 type_register_static(&host_arm_cpu_type_info
);
2085 type_init(arm_cpu_register_types
)