2 #include "exec/gdbstub.h"
4 #include "qemu/host-utils.h"
5 #include "sysemu/arch_init.h"
6 #include "sysemu/sysemu.h"
7 #include "qemu/bitops.h"
9 #ifndef CONFIG_USER_ONLY
10 static inline int get_phys_addr(CPUARMState
*env
, uint32_t address
,
11 int access_type
, int is_user
,
12 hwaddr
*phys_ptr
, int *prot
,
13 target_ulong
*page_size
);
16 static int vfp_gdb_get_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
20 /* VFP data registers are always little-endian. */
21 nregs
= arm_feature(env
, ARM_FEATURE_VFP3
) ? 32 : 16;
23 stfq_le_p(buf
, env
->vfp
.regs
[reg
]);
26 if (arm_feature(env
, ARM_FEATURE_NEON
)) {
27 /* Aliases for Q regs. */
30 stfq_le_p(buf
, env
->vfp
.regs
[(reg
- 32) * 2]);
31 stfq_le_p(buf
+ 8, env
->vfp
.regs
[(reg
- 32) * 2 + 1]);
35 switch (reg
- nregs
) {
36 case 0: stl_p(buf
, env
->vfp
.xregs
[ARM_VFP_FPSID
]); return 4;
37 case 1: stl_p(buf
, env
->vfp
.xregs
[ARM_VFP_FPSCR
]); return 4;
38 case 2: stl_p(buf
, env
->vfp
.xregs
[ARM_VFP_FPEXC
]); return 4;
43 static int vfp_gdb_set_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
47 nregs
= arm_feature(env
, ARM_FEATURE_VFP3
) ? 32 : 16;
49 env
->vfp
.regs
[reg
] = ldfq_le_p(buf
);
52 if (arm_feature(env
, ARM_FEATURE_NEON
)) {
55 env
->vfp
.regs
[(reg
- 32) * 2] = ldfq_le_p(buf
);
56 env
->vfp
.regs
[(reg
- 32) * 2 + 1] = ldfq_le_p(buf
+ 8);
60 switch (reg
- nregs
) {
61 case 0: env
->vfp
.xregs
[ARM_VFP_FPSID
] = ldl_p(buf
); return 4;
62 case 1: env
->vfp
.xregs
[ARM_VFP_FPSCR
] = ldl_p(buf
); return 4;
63 case 2: env
->vfp
.xregs
[ARM_VFP_FPEXC
] = ldl_p(buf
) & (1 << 30); return 4;
68 static int aarch64_fpu_gdb_get_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
72 /* 128 bit FP register */
73 stfq_le_p(buf
, env
->vfp
.regs
[reg
* 2]);
74 stfq_le_p(buf
+ 8, env
->vfp
.regs
[reg
* 2 + 1]);
78 stl_p(buf
, vfp_get_fpsr(env
));
82 stl_p(buf
, vfp_get_fpcr(env
));
89 static int aarch64_fpu_gdb_set_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
93 /* 128 bit FP register */
94 env
->vfp
.regs
[reg
* 2] = ldfq_le_p(buf
);
95 env
->vfp
.regs
[reg
* 2 + 1] = ldfq_le_p(buf
+ 8);
99 vfp_set_fpsr(env
, ldl_p(buf
));
103 vfp_set_fpcr(env
, ldl_p(buf
));
110 static int raw_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
113 if (ri
->type
& ARM_CP_64BIT
) {
114 *value
= CPREG_FIELD64(env
, ri
);
116 *value
= CPREG_FIELD32(env
, ri
);
121 static int raw_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
124 if (ri
->type
& ARM_CP_64BIT
) {
125 CPREG_FIELD64(env
, ri
) = value
;
127 CPREG_FIELD32(env
, ri
) = value
;
132 static bool read_raw_cp_reg(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
135 /* Raw read of a coprocessor register (as needed for migration, etc)
136 * return true on success, false if the read is impossible for some reason.
138 if (ri
->type
& ARM_CP_CONST
) {
140 } else if (ri
->raw_readfn
) {
141 return (ri
->raw_readfn(env
, ri
, v
) == 0);
142 } else if (ri
->readfn
) {
143 return (ri
->readfn(env
, ri
, v
) == 0);
145 if (ri
->type
& ARM_CP_64BIT
) {
146 *v
= CPREG_FIELD64(env
, ri
);
148 *v
= CPREG_FIELD32(env
, ri
);
154 static bool write_raw_cp_reg(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
157 /* Raw write of a coprocessor register (as needed for migration, etc).
158 * Return true on success, false if the write is impossible for some reason.
159 * Note that constant registers are treated as write-ignored; the
160 * caller should check for success by whether a readback gives the
163 if (ri
->type
& ARM_CP_CONST
) {
165 } else if (ri
->raw_writefn
) {
166 return (ri
->raw_writefn(env
, ri
, v
) == 0);
167 } else if (ri
->writefn
) {
168 return (ri
->writefn(env
, ri
, v
) == 0);
170 if (ri
->type
& ARM_CP_64BIT
) {
171 CPREG_FIELD64(env
, ri
) = v
;
173 CPREG_FIELD32(env
, ri
) = v
;
179 bool write_cpustate_to_list(ARMCPU
*cpu
)
181 /* Write the coprocessor state from cpu->env to the (index,value) list. */
185 for (i
= 0; i
< cpu
->cpreg_array_len
; i
++) {
186 uint32_t regidx
= kvm_to_cpreg_id(cpu
->cpreg_indexes
[i
]);
187 const ARMCPRegInfo
*ri
;
189 ri
= get_arm_cp_reginfo(cpu
, regidx
);
194 if (ri
->type
& ARM_CP_NO_MIGRATE
) {
197 if (!read_raw_cp_reg(&cpu
->env
, ri
, &v
)) {
201 cpu
->cpreg_values
[i
] = v
;
206 bool write_list_to_cpustate(ARMCPU
*cpu
)
211 for (i
= 0; i
< cpu
->cpreg_array_len
; i
++) {
212 uint32_t regidx
= kvm_to_cpreg_id(cpu
->cpreg_indexes
[i
]);
213 uint64_t v
= cpu
->cpreg_values
[i
];
215 const ARMCPRegInfo
*ri
;
217 ri
= get_arm_cp_reginfo(cpu
, regidx
);
222 if (ri
->type
& ARM_CP_NO_MIGRATE
) {
225 /* Write value and confirm it reads back as written
226 * (to catch read-only registers and partially read-only
227 * registers where the incoming migration value doesn't match)
229 if (!write_raw_cp_reg(&cpu
->env
, ri
, v
) ||
230 !read_raw_cp_reg(&cpu
->env
, ri
, &readback
) ||
238 static void add_cpreg_to_list(gpointer key
, gpointer opaque
)
240 ARMCPU
*cpu
= opaque
;
242 const ARMCPRegInfo
*ri
;
244 regidx
= *(uint32_t *)key
;
245 ri
= get_arm_cp_reginfo(cpu
, regidx
);
247 if (!(ri
->type
& ARM_CP_NO_MIGRATE
)) {
248 cpu
->cpreg_indexes
[cpu
->cpreg_array_len
] = cpreg_to_kvm_id(regidx
);
249 /* The value array need not be initialized at this point */
250 cpu
->cpreg_array_len
++;
254 static void count_cpreg(gpointer key
, gpointer opaque
)
256 ARMCPU
*cpu
= opaque
;
258 const ARMCPRegInfo
*ri
;
260 regidx
= *(uint32_t *)key
;
261 ri
= get_arm_cp_reginfo(cpu
, regidx
);
263 if (!(ri
->type
& ARM_CP_NO_MIGRATE
)) {
264 cpu
->cpreg_array_len
++;
268 static gint
cpreg_key_compare(gconstpointer a
, gconstpointer b
)
270 uint64_t aidx
= cpreg_to_kvm_id(*(uint32_t *)a
);
271 uint64_t bidx
= cpreg_to_kvm_id(*(uint32_t *)b
);
282 static void cpreg_make_keylist(gpointer key
, gpointer value
, gpointer udata
)
284 GList
**plist
= udata
;
286 *plist
= g_list_prepend(*plist
, key
);
289 void init_cpreg_list(ARMCPU
*cpu
)
291 /* Initialise the cpreg_tuples[] array based on the cp_regs hash.
292 * Note that we require cpreg_tuples[] to be sorted by key ID.
297 g_hash_table_foreach(cpu
->cp_regs
, cpreg_make_keylist
, &keys
);
299 keys
= g_list_sort(keys
, cpreg_key_compare
);
301 cpu
->cpreg_array_len
= 0;
303 g_list_foreach(keys
, count_cpreg
, cpu
);
305 arraylen
= cpu
->cpreg_array_len
;
306 cpu
->cpreg_indexes
= g_new(uint64_t, arraylen
);
307 cpu
->cpreg_values
= g_new(uint64_t, arraylen
);
308 cpu
->cpreg_vmstate_indexes
= g_new(uint64_t, arraylen
);
309 cpu
->cpreg_vmstate_values
= g_new(uint64_t, arraylen
);
310 cpu
->cpreg_vmstate_array_len
= cpu
->cpreg_array_len
;
311 cpu
->cpreg_array_len
= 0;
313 g_list_foreach(keys
, add_cpreg_to_list
, cpu
);
315 assert(cpu
->cpreg_array_len
== arraylen
);
320 static int dacr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
322 env
->cp15
.c3
= value
;
323 tlb_flush(env
, 1); /* Flush TLB as domain not tracked in TLB */
327 static int fcse_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
329 if (env
->cp15
.c13_fcse
!= value
) {
330 /* Unlike real hardware the qemu TLB uses virtual addresses,
331 * not modified virtual addresses, so this causes a TLB flush.
334 env
->cp15
.c13_fcse
= value
;
338 static int contextidr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
341 if (env
->cp15
.c13_context
!= value
&& !arm_feature(env
, ARM_FEATURE_MPU
)) {
342 /* For VMSA (when not using the LPAE long descriptor page table
343 * format) this register includes the ASID, so do a TLB flush.
344 * For PMSA it is purely a process ID and no action is needed.
348 env
->cp15
.c13_context
= value
;
352 static int tlbiall_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
355 /* Invalidate all (TLBIALL) */
360 static int tlbimva_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
363 /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
364 tlb_flush_page(env
, value
& TARGET_PAGE_MASK
);
368 static int tlbiasid_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
371 /* Invalidate by ASID (TLBIASID) */
372 tlb_flush(env
, value
== 0);
376 static int tlbimvaa_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
379 /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
380 tlb_flush_page(env
, value
& TARGET_PAGE_MASK
);
384 static const ARMCPRegInfo cp_reginfo
[] = {
385 /* DBGDIDR: just RAZ. In particular this means the "debug architecture
386 * version" bits will read as a reserved value, which should cause
387 * Linux to not try to use the debug hardware.
389 { .name
= "DBGDIDR", .cp
= 14, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 0,
390 .access
= PL0_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
391 /* MMU Domain access control / MPU write buffer control */
392 { .name
= "DACR", .cp
= 15,
393 .crn
= 3, .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
,
394 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c3
),
395 .resetvalue
= 0, .writefn
= dacr_write
, .raw_writefn
= raw_write
, },
396 { .name
= "FCSEIDR", .cp
= 15, .crn
= 13, .crm
= 0, .opc1
= 0, .opc2
= 0,
397 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c13_fcse
),
398 .resetvalue
= 0, .writefn
= fcse_write
, .raw_writefn
= raw_write
, },
399 { .name
= "CONTEXTIDR", .cp
= 15, .crn
= 13, .crm
= 0, .opc1
= 0, .opc2
= 1,
400 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c13_fcse
),
401 .resetvalue
= 0, .writefn
= contextidr_write
, .raw_writefn
= raw_write
, },
402 /* ??? This covers not just the impdef TLB lockdown registers but also
403 * some v7VMSA registers relating to TEX remap, so it is overly broad.
405 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= CP_ANY
,
406 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
407 /* MMU TLB control. Note that the wildcarding means we cover not just
408 * the unified TLB ops but also the dside/iside/inner-shareable variants.
410 { .name
= "TLBIALL", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
411 .opc1
= CP_ANY
, .opc2
= 0, .access
= PL1_W
, .writefn
= tlbiall_write
,
412 .type
= ARM_CP_NO_MIGRATE
},
413 { .name
= "TLBIMVA", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
414 .opc1
= CP_ANY
, .opc2
= 1, .access
= PL1_W
, .writefn
= tlbimva_write
,
415 .type
= ARM_CP_NO_MIGRATE
},
416 { .name
= "TLBIASID", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
417 .opc1
= CP_ANY
, .opc2
= 2, .access
= PL1_W
, .writefn
= tlbiasid_write
,
418 .type
= ARM_CP_NO_MIGRATE
},
419 { .name
= "TLBIMVAA", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
420 .opc1
= CP_ANY
, .opc2
= 3, .access
= PL1_W
, .writefn
= tlbimvaa_write
,
421 .type
= ARM_CP_NO_MIGRATE
},
422 /* Cache maintenance ops; some of this space may be overridden later. */
423 { .name
= "CACHEMAINT", .cp
= 15, .crn
= 7, .crm
= CP_ANY
,
424 .opc1
= 0, .opc2
= CP_ANY
, .access
= PL1_W
,
425 .type
= ARM_CP_NOP
| ARM_CP_OVERRIDE
},
429 static const ARMCPRegInfo not_v6_cp_reginfo
[] = {
430 /* Not all pre-v6 cores implemented this WFI, so this is slightly
433 { .name
= "WFI_v5", .cp
= 15, .crn
= 7, .crm
= 8, .opc1
= 0, .opc2
= 2,
434 .access
= PL1_W
, .type
= ARM_CP_WFI
},
438 static const ARMCPRegInfo not_v7_cp_reginfo
[] = {
439 /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
440 * is UNPREDICTABLE; we choose to NOP as most implementations do).
442 { .name
= "WFI_v6", .cp
= 15, .crn
= 7, .crm
= 0, .opc1
= 0, .opc2
= 4,
443 .access
= PL1_W
, .type
= ARM_CP_WFI
},
444 /* L1 cache lockdown. Not architectural in v6 and earlier but in practice
445 * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and
446 * OMAPCP will override this space.
448 { .name
= "DLOCKDOWN", .cp
= 15, .crn
= 9, .crm
= 0, .opc1
= 0, .opc2
= 0,
449 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_data
),
451 { .name
= "ILOCKDOWN", .cp
= 15, .crn
= 9, .crm
= 0, .opc1
= 0, .opc2
= 1,
452 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_insn
),
454 /* v6 doesn't have the cache ID registers but Linux reads them anyway */
455 { .name
= "DUMMY", .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= CP_ANY
,
456 .access
= PL1_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_MIGRATE
,
461 static int cpacr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
463 if (env
->cp15
.c1_coproc
!= value
) {
464 env
->cp15
.c1_coproc
= value
;
465 /* ??? Is this safe when called from within a TB? */
471 static const ARMCPRegInfo v6_cp_reginfo
[] = {
472 /* prefetch by MVA in v6, NOP in v7 */
473 { .name
= "MVA_prefetch",
474 .cp
= 15, .crn
= 7, .crm
= 13, .opc1
= 0, .opc2
= 1,
475 .access
= PL1_W
, .type
= ARM_CP_NOP
},
476 { .name
= "ISB", .cp
= 15, .crn
= 7, .crm
= 5, .opc1
= 0, .opc2
= 4,
477 .access
= PL0_W
, .type
= ARM_CP_NOP
},
478 { .name
= "DSB", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 4,
479 .access
= PL0_W
, .type
= ARM_CP_NOP
},
480 { .name
= "DMB", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 5,
481 .access
= PL0_W
, .type
= ARM_CP_NOP
},
482 { .name
= "IFAR", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 2,
483 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_insn
),
485 /* Watchpoint Fault Address Register : should actually only be present
486 * for 1136, 1176, 11MPCore.
488 { .name
= "WFAR", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 1,
489 .access
= PL1_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0, },
490 { .name
= "CPACR", .cp
= 15, .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 2,
491 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c1_coproc
),
492 .resetvalue
= 0, .writefn
= cpacr_write
},
497 static int pmreg_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
500 /* Generic performance monitor register read function for where
501 * user access may be allowed by PMUSERENR.
503 if (arm_current_pl(env
) == 0 && !env
->cp15
.c9_pmuserenr
) {
506 *value
= CPREG_FIELD32(env
, ri
);
510 static int pmcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
513 if (arm_current_pl(env
) == 0 && !env
->cp15
.c9_pmuserenr
) {
516 /* only the DP, X, D and E bits are writable */
517 env
->cp15
.c9_pmcr
&= ~0x39;
518 env
->cp15
.c9_pmcr
|= (value
& 0x39);
522 static int pmcntenset_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
525 if (arm_current_pl(env
) == 0 && !env
->cp15
.c9_pmuserenr
) {
529 env
->cp15
.c9_pmcnten
|= value
;
533 static int pmcntenclr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
536 if (arm_current_pl(env
) == 0 && !env
->cp15
.c9_pmuserenr
) {
540 env
->cp15
.c9_pmcnten
&= ~value
;
544 static int pmovsr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
547 if (arm_current_pl(env
) == 0 && !env
->cp15
.c9_pmuserenr
) {
550 env
->cp15
.c9_pmovsr
&= ~value
;
554 static int pmxevtyper_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
557 if (arm_current_pl(env
) == 0 && !env
->cp15
.c9_pmuserenr
) {
560 env
->cp15
.c9_pmxevtyper
= value
& 0xff;
564 static int pmuserenr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
567 env
->cp15
.c9_pmuserenr
= value
& 1;
571 static int pmintenset_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
574 /* We have no event counters so only the C bit can be changed */
576 env
->cp15
.c9_pminten
|= value
;
580 static int pmintenclr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
584 env
->cp15
.c9_pminten
&= ~value
;
588 static int vbar_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
591 env
->cp15
.c12_vbar
= value
& ~0x1Ful
;
595 static int ccsidr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
598 ARMCPU
*cpu
= arm_env_get_cpu(env
);
599 *value
= cpu
->ccsidr
[env
->cp15
.c0_cssel
];
603 static int csselr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
606 env
->cp15
.c0_cssel
= value
& 0xf;
610 static const ARMCPRegInfo v7_cp_reginfo
[] = {
611 /* DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped
614 { .name
= "DBGDRAR", .cp
= 14, .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 0,
615 .access
= PL0_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
616 { .name
= "DBGDSAR", .cp
= 14, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 0,
617 .access
= PL0_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
618 /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
619 { .name
= "NOP", .cp
= 15, .crn
= 7, .crm
= 0, .opc1
= 0, .opc2
= 4,
620 .access
= PL1_W
, .type
= ARM_CP_NOP
},
621 /* Performance monitors are implementation defined in v7,
622 * but with an ARM recommended set of registers, which we
623 * follow (although we don't actually implement any counters)
625 * Performance registers fall into three categories:
626 * (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR)
627 * (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR)
628 * (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others)
629 * For the cases controlled by PMUSERENR we must set .access to PL0_RW
630 * or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
632 { .name
= "PMCNTENSET", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 1,
633 .access
= PL0_RW
, .resetvalue
= 0,
634 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmcnten
),
635 .readfn
= pmreg_read
, .writefn
= pmcntenset_write
,
636 .raw_readfn
= raw_read
, .raw_writefn
= raw_write
},
637 { .name
= "PMCNTENCLR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 2,
638 .access
= PL0_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmcnten
),
639 .readfn
= pmreg_read
, .writefn
= pmcntenclr_write
,
640 .type
= ARM_CP_NO_MIGRATE
},
641 { .name
= "PMOVSR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 3,
642 .access
= PL0_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmovsr
),
643 .readfn
= pmreg_read
, .writefn
= pmovsr_write
,
644 .raw_readfn
= raw_read
, .raw_writefn
= raw_write
},
645 /* Unimplemented so WI. Strictly speaking write accesses in PL0 should
648 { .name
= "PMSWINC", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 4,
649 .access
= PL0_W
, .type
= ARM_CP_NOP
},
650 /* Since we don't implement any events, writing to PMSELR is UNPREDICTABLE.
651 * We choose to RAZ/WI. XXX should respect PMUSERENR.
653 { .name
= "PMSELR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 5,
654 .access
= PL0_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
655 /* Unimplemented, RAZ/WI. XXX PMUSERENR */
656 { .name
= "PMCCNTR", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 0,
657 .access
= PL0_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
658 { .name
= "PMXEVTYPER", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 1,
660 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmxevtyper
),
661 .readfn
= pmreg_read
, .writefn
= pmxevtyper_write
,
662 .raw_readfn
= raw_read
, .raw_writefn
= raw_write
},
663 /* Unimplemented, RAZ/WI. XXX PMUSERENR */
664 { .name
= "PMXEVCNTR", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 2,
665 .access
= PL0_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
666 { .name
= "PMUSERENR", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 0,
667 .access
= PL0_R
| PL1_RW
,
668 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmuserenr
),
670 .writefn
= pmuserenr_write
, .raw_writefn
= raw_write
},
671 { .name
= "PMINTENSET", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 1,
673 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pminten
),
675 .writefn
= pmintenset_write
, .raw_writefn
= raw_write
},
676 { .name
= "PMINTENCLR", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 2,
677 .access
= PL1_RW
, .type
= ARM_CP_NO_MIGRATE
,
678 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pminten
),
679 .resetvalue
= 0, .writefn
= pmintenclr_write
, },
680 { .name
= "VBAR", .cp
= 15, .crn
= 12, .crm
= 0, .opc1
= 0, .opc2
= 0,
681 .access
= PL1_RW
, .writefn
= vbar_write
,
682 .fieldoffset
= offsetof(CPUARMState
, cp15
.c12_vbar
),
684 { .name
= "SCR", .cp
= 15, .crn
= 1, .crm
= 1, .opc1
= 0, .opc2
= 0,
685 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c1_scr
),
687 { .name
= "CCSIDR", .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= 0,
688 .access
= PL1_R
, .readfn
= ccsidr_read
, .type
= ARM_CP_NO_MIGRATE
},
689 { .name
= "CSSELR", .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 2, .opc2
= 0,
690 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c0_cssel
),
691 .writefn
= csselr_write
, .resetvalue
= 0 },
692 /* Auxiliary ID register: this actually has an IMPDEF value but for now
693 * just RAZ for all cores:
695 { .name
= "AIDR", .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= 7,
696 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
700 static int teecr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
707 static int teehbr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
710 /* This is a helper function because the user access rights
711 * depend on the value of the TEECR.
713 if (arm_current_pl(env
) == 0 && (env
->teecr
& 1)) {
716 *value
= env
->teehbr
;
720 static int teehbr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
723 if (arm_current_pl(env
) == 0 && (env
->teecr
& 1)) {
730 static const ARMCPRegInfo t2ee_cp_reginfo
[] = {
731 { .name
= "TEECR", .cp
= 14, .crn
= 0, .crm
= 0, .opc1
= 6, .opc2
= 0,
732 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, teecr
),
734 .writefn
= teecr_write
},
735 { .name
= "TEEHBR", .cp
= 14, .crn
= 1, .crm
= 0, .opc1
= 6, .opc2
= 0,
736 .access
= PL0_RW
, .fieldoffset
= offsetof(CPUARMState
, teehbr
),
737 .resetvalue
= 0, .raw_readfn
= raw_read
, .raw_writefn
= raw_write
,
738 .readfn
= teehbr_read
, .writefn
= teehbr_write
},
742 static const ARMCPRegInfo v6k_cp_reginfo
[] = {
743 { .name
= "TPIDRURW", .cp
= 15, .crn
= 13, .crm
= 0, .opc1
= 0, .opc2
= 2,
745 .fieldoffset
= offsetof(CPUARMState
, cp15
.c13_tls1
),
747 { .name
= "TPIDRURO", .cp
= 15, .crn
= 13, .crm
= 0, .opc1
= 0, .opc2
= 3,
748 .access
= PL0_R
|PL1_W
,
749 .fieldoffset
= offsetof(CPUARMState
, cp15
.c13_tls2
),
751 { .name
= "TPIDRPRW", .cp
= 15, .crn
= 13, .crm
= 0, .opc1
= 0, .opc2
= 4,
753 .fieldoffset
= offsetof(CPUARMState
, cp15
.c13_tls3
),
758 #ifndef CONFIG_USER_ONLY
760 static uint64_t gt_get_countervalue(CPUARMState
*env
)
762 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) / GTIMER_SCALE
;
765 static void gt_recalc_timer(ARMCPU
*cpu
, int timeridx
)
767 ARMGenericTimer
*gt
= &cpu
->env
.cp15
.c14_timer
[timeridx
];
770 /* Timer enabled: calculate and set current ISTATUS, irq, and
771 * reset timer to when ISTATUS next has to change
773 uint64_t count
= gt_get_countervalue(&cpu
->env
);
774 /* Note that this must be unsigned 64 bit arithmetic: */
775 int istatus
= count
>= gt
->cval
;
778 gt
->ctl
= deposit32(gt
->ctl
, 2, 1, istatus
);
779 qemu_set_irq(cpu
->gt_timer_outputs
[timeridx
],
780 (istatus
&& !(gt
->ctl
& 2)));
782 /* Next transition is when count rolls back over to zero */
783 nexttick
= UINT64_MAX
;
785 /* Next transition is when we hit cval */
788 /* Note that the desired next expiry time might be beyond the
789 * signed-64-bit range of a QEMUTimer -- in this case we just
790 * set the timer for as far in the future as possible. When the
791 * timer expires we will reset the timer for any remaining period.
793 if (nexttick
> INT64_MAX
/ GTIMER_SCALE
) {
794 nexttick
= INT64_MAX
/ GTIMER_SCALE
;
796 timer_mod(cpu
->gt_timer
[timeridx
], nexttick
);
798 /* Timer disabled: ISTATUS and timer output always clear */
800 qemu_set_irq(cpu
->gt_timer_outputs
[timeridx
], 0);
801 timer_del(cpu
->gt_timer
[timeridx
]);
805 static int gt_cntfrq_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
808 /* Not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero */
809 if (arm_current_pl(env
) == 0 && !extract32(env
->cp15
.c14_cntkctl
, 0, 2)) {
812 *value
= env
->cp15
.c14_cntfrq
;
816 static void gt_cnt_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
818 ARMCPU
*cpu
= arm_env_get_cpu(env
);
819 int timeridx
= ri
->opc1
& 1;
821 timer_del(cpu
->gt_timer
[timeridx
]);
824 static int gt_cnt_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
827 int timeridx
= ri
->opc1
& 1;
829 if (arm_current_pl(env
) == 0 &&
830 !extract32(env
->cp15
.c14_cntkctl
, timeridx
, 1)) {
833 *value
= gt_get_countervalue(env
);
837 static int gt_cval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
840 int timeridx
= ri
->opc1
& 1;
842 if (arm_current_pl(env
) == 0 &&
843 !extract32(env
->cp15
.c14_cntkctl
, 9 - timeridx
, 1)) {
846 *value
= env
->cp15
.c14_timer
[timeridx
].cval
;
850 static int gt_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
853 int timeridx
= ri
->opc1
& 1;
855 env
->cp15
.c14_timer
[timeridx
].cval
= value
;
856 gt_recalc_timer(arm_env_get_cpu(env
), timeridx
);
859 static int gt_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
862 int timeridx
= ri
->crm
& 1;
864 if (arm_current_pl(env
) == 0 &&
865 !extract32(env
->cp15
.c14_cntkctl
, 9 - timeridx
, 1)) {
868 *value
= (uint32_t)(env
->cp15
.c14_timer
[timeridx
].cval
-
869 gt_get_countervalue(env
));
873 static int gt_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
876 int timeridx
= ri
->crm
& 1;
878 env
->cp15
.c14_timer
[timeridx
].cval
= gt_get_countervalue(env
) +
879 + sextract64(value
, 0, 32);
880 gt_recalc_timer(arm_env_get_cpu(env
), timeridx
);
884 static int gt_ctl_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
887 int timeridx
= ri
->crm
& 1;
889 if (arm_current_pl(env
) == 0 &&
890 !extract32(env
->cp15
.c14_cntkctl
, 9 - timeridx
, 1)) {
893 *value
= env
->cp15
.c14_timer
[timeridx
].ctl
;
897 static int gt_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
900 ARMCPU
*cpu
= arm_env_get_cpu(env
);
901 int timeridx
= ri
->crm
& 1;
902 uint32_t oldval
= env
->cp15
.c14_timer
[timeridx
].ctl
;
904 env
->cp15
.c14_timer
[timeridx
].ctl
= value
& 3;
905 if ((oldval
^ value
) & 1) {
907 gt_recalc_timer(cpu
, timeridx
);
908 } else if ((oldval
& value
) & 2) {
909 /* IMASK toggled: don't need to recalculate,
910 * just set the interrupt line based on ISTATUS
912 qemu_set_irq(cpu
->gt_timer_outputs
[timeridx
],
913 (oldval
& 4) && (value
& 2));
918 void arm_gt_ptimer_cb(void *opaque
)
920 ARMCPU
*cpu
= opaque
;
922 gt_recalc_timer(cpu
, GTIMER_PHYS
);
925 void arm_gt_vtimer_cb(void *opaque
)
927 ARMCPU
*cpu
= opaque
;
929 gt_recalc_timer(cpu
, GTIMER_VIRT
);
932 static const ARMCPRegInfo generic_timer_cp_reginfo
[] = {
933 /* Note that CNTFRQ is purely reads-as-written for the benefit
934 * of software; writing it doesn't actually change the timer frequency.
935 * Our reset value matches the fixed frequency we implement the timer at.
937 { .name
= "CNTFRQ", .cp
= 15, .crn
= 14, .crm
= 0, .opc1
= 0, .opc2
= 0,
938 .access
= PL1_RW
| PL0_R
,
939 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_cntfrq
),
940 .resetvalue
= (1000 * 1000 * 1000) / GTIMER_SCALE
,
941 .readfn
= gt_cntfrq_read
, .raw_readfn
= raw_read
,
943 /* overall control: mostly access permissions */
944 { .name
= "CNTKCTL", .cp
= 15, .crn
= 14, .crm
= 1, .opc1
= 0, .opc2
= 0,
946 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_cntkctl
),
949 /* per-timer control */
950 { .name
= "CNTP_CTL", .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 1,
951 .type
= ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
952 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].ctl
),
954 .readfn
= gt_ctl_read
, .writefn
= gt_ctl_write
,
955 .raw_readfn
= raw_read
, .raw_writefn
= raw_write
,
957 { .name
= "CNTV_CTL", .cp
= 15, .crn
= 14, .crm
= 3, .opc1
= 0, .opc2
= 1,
958 .type
= ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
959 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].ctl
),
961 .readfn
= gt_ctl_read
, .writefn
= gt_ctl_write
,
962 .raw_readfn
= raw_read
, .raw_writefn
= raw_write
,
964 /* TimerValue views: a 32 bit downcounting view of the underlying state */
965 { .name
= "CNTP_TVAL", .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 0,
966 .type
= ARM_CP_NO_MIGRATE
| ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
967 .readfn
= gt_tval_read
, .writefn
= gt_tval_write
,
969 { .name
= "CNTV_TVAL", .cp
= 15, .crn
= 14, .crm
= 3, .opc1
= 0, .opc2
= 0,
970 .type
= ARM_CP_NO_MIGRATE
| ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
971 .readfn
= gt_tval_read
, .writefn
= gt_tval_write
,
973 /* The counter itself */
974 { .name
= "CNTPCT", .cp
= 15, .crm
= 14, .opc1
= 0,
975 .access
= PL0_R
, .type
= ARM_CP_64BIT
| ARM_CP_NO_MIGRATE
| ARM_CP_IO
,
976 .readfn
= gt_cnt_read
, .resetfn
= gt_cnt_reset
,
978 { .name
= "CNTVCT", .cp
= 15, .crm
= 14, .opc1
= 1,
979 .access
= PL0_R
, .type
= ARM_CP_64BIT
| ARM_CP_NO_MIGRATE
| ARM_CP_IO
,
980 .readfn
= gt_cnt_read
, .resetfn
= gt_cnt_reset
,
982 /* Comparison value, indicating when the timer goes off */
983 { .name
= "CNTP_CVAL", .cp
= 15, .crm
= 14, .opc1
= 2,
984 .access
= PL1_RW
| PL0_R
,
985 .type
= ARM_CP_64BIT
| ARM_CP_IO
,
986 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].cval
),
988 .readfn
= gt_cval_read
, .writefn
= gt_cval_write
,
989 .raw_readfn
= raw_read
, .raw_writefn
= raw_write
,
991 { .name
= "CNTV_CVAL", .cp
= 15, .crm
= 14, .opc1
= 3,
992 .access
= PL1_RW
| PL0_R
,
993 .type
= ARM_CP_64BIT
| ARM_CP_IO
,
994 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].cval
),
996 .readfn
= gt_cval_read
, .writefn
= gt_cval_write
,
997 .raw_readfn
= raw_read
, .raw_writefn
= raw_write
,
1003 /* In user-mode none of the generic timer registers are accessible,
1004 * and their implementation depends on QEMU_CLOCK_VIRTUAL and qdev gpio outputs,
1005 * so instead just don't register any of them.
1007 static const ARMCPRegInfo generic_timer_cp_reginfo
[] = {
1013 static int par_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
1015 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
1016 env
->cp15
.c7_par
= value
;
1017 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
1018 env
->cp15
.c7_par
= value
& 0xfffff6ff;
1020 env
->cp15
.c7_par
= value
& 0xfffff1ff;
1025 #ifndef CONFIG_USER_ONLY
1026 /* get_phys_addr() isn't present for user-mode-only targets */
1028 /* Return true if extended addresses are enabled, ie this is an
1029 * LPAE implementation and we are using the long-descriptor translation
1030 * table format because the TTBCR EAE bit is set.
1032 static inline bool extended_addresses_enabled(CPUARMState
*env
)
1034 return arm_feature(env
, ARM_FEATURE_LPAE
)
1035 && (env
->cp15
.c2_control
& (1U << 31));
1038 static int ats_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
1041 target_ulong page_size
;
1043 int ret
, is_user
= ri
->opc2
& 2;
1044 int access_type
= ri
->opc2
& 1;
1047 /* Other states are only available with TrustZone */
1050 ret
= get_phys_addr(env
, value
, access_type
, is_user
,
1051 &phys_addr
, &prot
, &page_size
);
1052 if (extended_addresses_enabled(env
)) {
1053 /* ret is a DFSR/IFSR value for the long descriptor
1054 * translation table format, but with WnR always clear.
1055 * Convert it to a 64-bit PAR.
1057 uint64_t par64
= (1 << 11); /* LPAE bit always set */
1059 par64
|= phys_addr
& ~0xfffULL
;
1060 /* We don't set the ATTR or SH fields in the PAR. */
1063 par64
|= (ret
& 0x3f) << 1; /* FS */
1064 /* Note that S2WLK and FSTAGE are always zero, because we don't
1065 * implement virtualization and therefore there can't be a stage 2
1069 env
->cp15
.c7_par
= par64
;
1070 env
->cp15
.c7_par_hi
= par64
>> 32;
1072 /* ret is a DFSR/IFSR value for the short descriptor
1073 * translation table format (with WnR always clear).
1074 * Convert it to a 32-bit PAR.
1077 /* We do not set any attribute bits in the PAR */
1078 if (page_size
== (1 << 24)
1079 && arm_feature(env
, ARM_FEATURE_V7
)) {
1080 env
->cp15
.c7_par
= (phys_addr
& 0xff000000) | 1 << 1;
1082 env
->cp15
.c7_par
= phys_addr
& 0xfffff000;
1085 env
->cp15
.c7_par
= ((ret
& (10 << 1)) >> 5) |
1086 ((ret
& (12 << 1)) >> 6) |
1087 ((ret
& 0xf) << 1) | 1;
1089 env
->cp15
.c7_par_hi
= 0;
1095 static const ARMCPRegInfo vapa_cp_reginfo
[] = {
1096 { .name
= "PAR", .cp
= 15, .crn
= 7, .crm
= 4, .opc1
= 0, .opc2
= 0,
1097 .access
= PL1_RW
, .resetvalue
= 0,
1098 .fieldoffset
= offsetof(CPUARMState
, cp15
.c7_par
),
1099 .writefn
= par_write
},
1100 #ifndef CONFIG_USER_ONLY
1101 { .name
= "ATS", .cp
= 15, .crn
= 7, .crm
= 8, .opc1
= 0, .opc2
= CP_ANY
,
1102 .access
= PL1_W
, .writefn
= ats_write
, .type
= ARM_CP_NO_MIGRATE
},
1107 /* Return basic MPU access permission bits. */
1108 static uint32_t simple_mpu_ap_bits(uint32_t val
)
1115 for (i
= 0; i
< 16; i
+= 2) {
1116 ret
|= (val
>> i
) & mask
;
1122 /* Pad basic MPU access permission bits to extended format. */
1123 static uint32_t extended_mpu_ap_bits(uint32_t val
)
1130 for (i
= 0; i
< 16; i
+= 2) {
1131 ret
|= (val
& mask
) << i
;
1137 static int pmsav5_data_ap_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1140 env
->cp15
.c5_data
= extended_mpu_ap_bits(value
);
1144 static int pmsav5_data_ap_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1147 *value
= simple_mpu_ap_bits(env
->cp15
.c5_data
);
1151 static int pmsav5_insn_ap_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1154 env
->cp15
.c5_insn
= extended_mpu_ap_bits(value
);
1158 static int pmsav5_insn_ap_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1161 *value
= simple_mpu_ap_bits(env
->cp15
.c5_insn
);
1165 static int arm946_prbs_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1171 *value
= env
->cp15
.c6_region
[ri
->crm
];
1175 static int arm946_prbs_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1181 env
->cp15
.c6_region
[ri
->crm
] = value
;
1185 static const ARMCPRegInfo pmsav5_cp_reginfo
[] = {
1186 { .name
= "DATA_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 0,
1187 .access
= PL1_RW
, .type
= ARM_CP_NO_MIGRATE
,
1188 .fieldoffset
= offsetof(CPUARMState
, cp15
.c5_data
), .resetvalue
= 0,
1189 .readfn
= pmsav5_data_ap_read
, .writefn
= pmsav5_data_ap_write
, },
1190 { .name
= "INSN_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 1,
1191 .access
= PL1_RW
, .type
= ARM_CP_NO_MIGRATE
,
1192 .fieldoffset
= offsetof(CPUARMState
, cp15
.c5_insn
), .resetvalue
= 0,
1193 .readfn
= pmsav5_insn_ap_read
, .writefn
= pmsav5_insn_ap_write
, },
1194 { .name
= "DATA_EXT_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 2,
1196 .fieldoffset
= offsetof(CPUARMState
, cp15
.c5_data
), .resetvalue
= 0, },
1197 { .name
= "INSN_EXT_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 3,
1199 .fieldoffset
= offsetof(CPUARMState
, cp15
.c5_insn
), .resetvalue
= 0, },
1200 { .name
= "DCACHE_CFG", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 0,
1202 .fieldoffset
= offsetof(CPUARMState
, cp15
.c2_data
), .resetvalue
= 0, },
1203 { .name
= "ICACHE_CFG", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 1,
1205 .fieldoffset
= offsetof(CPUARMState
, cp15
.c2_insn
), .resetvalue
= 0, },
1206 /* Protection region base and size registers */
1207 { .name
= "946_PRBS", .cp
= 15, .crn
= 6, .crm
= CP_ANY
, .opc1
= 0,
1208 .opc2
= CP_ANY
, .access
= PL1_RW
,
1209 .readfn
= arm946_prbs_read
, .writefn
= arm946_prbs_write
, },
1213 static int vmsa_ttbcr_raw_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1216 int maskshift
= extract32(value
, 0, 3);
1218 if (arm_feature(env
, ARM_FEATURE_LPAE
) && (value
& (1 << 31))) {
1219 value
&= ~((7 << 19) | (3 << 14) | (0xf << 3));
1223 /* Note that we always calculate c2_mask and c2_base_mask, but
1224 * they are only used for short-descriptor tables (ie if EAE is 0);
1225 * for long-descriptor tables the TTBCR fields are used differently
1226 * and the c2_mask and c2_base_mask values are meaningless.
1228 env
->cp15
.c2_control
= value
;
1229 env
->cp15
.c2_mask
= ~(((uint32_t)0xffffffffu
) >> maskshift
);
1230 env
->cp15
.c2_base_mask
= ~((uint32_t)0x3fffu
>> maskshift
);
1234 static int vmsa_ttbcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1237 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
1238 /* With LPAE the TTBCR could result in a change of ASID
1239 * via the TTBCR.A1 bit, so do a TLB flush.
1243 return vmsa_ttbcr_raw_write(env
, ri
, value
);
1246 static void vmsa_ttbcr_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1248 env
->cp15
.c2_base_mask
= 0xffffc000u
;
1249 env
->cp15
.c2_control
= 0;
1250 env
->cp15
.c2_mask
= 0;
1253 static const ARMCPRegInfo vmsa_cp_reginfo
[] = {
1254 { .name
= "DFSR", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 0,
1256 .fieldoffset
= offsetof(CPUARMState
, cp15
.c5_data
), .resetvalue
= 0, },
1257 { .name
= "IFSR", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 1,
1259 .fieldoffset
= offsetof(CPUARMState
, cp15
.c5_insn
), .resetvalue
= 0, },
1260 { .name
= "TTBR0", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 0,
1262 .fieldoffset
= offsetof(CPUARMState
, cp15
.c2_base0
), .resetvalue
= 0, },
1263 { .name
= "TTBR1", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 1,
1265 .fieldoffset
= offsetof(CPUARMState
, cp15
.c2_base1
), .resetvalue
= 0, },
1266 { .name
= "TTBCR", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 2,
1267 .access
= PL1_RW
, .writefn
= vmsa_ttbcr_write
,
1268 .resetfn
= vmsa_ttbcr_reset
, .raw_writefn
= vmsa_ttbcr_raw_write
,
1269 .fieldoffset
= offsetof(CPUARMState
, cp15
.c2_control
) },
1270 { .name
= "DFAR", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 0,
1271 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_data
),
1276 static int omap_ticonfig_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1279 env
->cp15
.c15_ticonfig
= value
& 0xe7;
1280 /* The OS_TYPE bit in this register changes the reported CPUID! */
1281 env
->cp15
.c0_cpuid
= (value
& (1 << 5)) ?
1282 ARM_CPUID_TI915T
: ARM_CPUID_TI925T
;
1286 static int omap_threadid_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1289 env
->cp15
.c15_threadid
= value
& 0xffff;
1293 static int omap_wfi_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1296 /* Wait-for-interrupt (deprecated) */
1297 cpu_interrupt(CPU(arm_env_get_cpu(env
)), CPU_INTERRUPT_HALT
);
1301 static int omap_cachemaint_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1304 /* On OMAP there are registers indicating the max/min index of dcache lines
1305 * containing a dirty line; cache flush operations have to reset these.
1307 env
->cp15
.c15_i_max
= 0x000;
1308 env
->cp15
.c15_i_min
= 0xff0;
1312 static const ARMCPRegInfo omap_cp_reginfo
[] = {
1313 { .name
= "DFSR", .cp
= 15, .crn
= 5, .crm
= CP_ANY
,
1314 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_OVERRIDE
,
1315 .fieldoffset
= offsetof(CPUARMState
, cp15
.c5_data
), .resetvalue
= 0, },
1316 { .name
= "", .cp
= 15, .crn
= 15, .crm
= 0, .opc1
= 0, .opc2
= 0,
1317 .access
= PL1_RW
, .type
= ARM_CP_NOP
},
1318 { .name
= "TICONFIG", .cp
= 15, .crn
= 15, .crm
= 1, .opc1
= 0, .opc2
= 0,
1320 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_ticonfig
), .resetvalue
= 0,
1321 .writefn
= omap_ticonfig_write
},
1322 { .name
= "IMAX", .cp
= 15, .crn
= 15, .crm
= 2, .opc1
= 0, .opc2
= 0,
1324 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_i_max
), .resetvalue
= 0, },
1325 { .name
= "IMIN", .cp
= 15, .crn
= 15, .crm
= 3, .opc1
= 0, .opc2
= 0,
1326 .access
= PL1_RW
, .resetvalue
= 0xff0,
1327 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_i_min
) },
1328 { .name
= "THREADID", .cp
= 15, .crn
= 15, .crm
= 4, .opc1
= 0, .opc2
= 0,
1330 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_threadid
), .resetvalue
= 0,
1331 .writefn
= omap_threadid_write
},
1332 { .name
= "TI925T_STATUS", .cp
= 15, .crn
= 15,
1333 .crm
= 8, .opc1
= 0, .opc2
= 0, .access
= PL1_RW
,
1334 .type
= ARM_CP_NO_MIGRATE
,
1335 .readfn
= arm_cp_read_zero
, .writefn
= omap_wfi_write
, },
1336 /* TODO: Peripheral port remap register:
1337 * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller
1338 * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff),
1341 { .name
= "OMAP_CACHEMAINT", .cp
= 15, .crn
= 7, .crm
= CP_ANY
,
1342 .opc1
= 0, .opc2
= CP_ANY
, .access
= PL1_W
,
1343 .type
= ARM_CP_OVERRIDE
| ARM_CP_NO_MIGRATE
,
1344 .writefn
= omap_cachemaint_write
},
1345 { .name
= "C9", .cp
= 15, .crn
= 9,
1346 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
,
1347 .type
= ARM_CP_CONST
| ARM_CP_OVERRIDE
, .resetvalue
= 0 },
1351 static int xscale_cpar_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1355 if (env
->cp15
.c15_cpar
!= value
) {
1356 /* Changes cp0 to cp13 behavior, so needs a TB flush. */
1358 env
->cp15
.c15_cpar
= value
;
1363 static const ARMCPRegInfo xscale_cp_reginfo
[] = {
1364 { .name
= "XSCALE_CPAR",
1365 .cp
= 15, .crn
= 15, .crm
= 1, .opc1
= 0, .opc2
= 0, .access
= PL1_RW
,
1366 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_cpar
), .resetvalue
= 0,
1367 .writefn
= xscale_cpar_write
, },
1368 { .name
= "XSCALE_AUXCR",
1369 .cp
= 15, .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 1, .access
= PL1_RW
,
1370 .fieldoffset
= offsetof(CPUARMState
, cp15
.c1_xscaleauxcr
),
1375 static const ARMCPRegInfo dummy_c15_cp_reginfo
[] = {
1376 /* RAZ/WI the whole crn=15 space, when we don't have a more specific
1377 * implementation of this implementation-defined space.
1378 * Ideally this should eventually disappear in favour of actually
1379 * implementing the correct behaviour for all cores.
1381 { .name
= "C15_IMPDEF", .cp
= 15, .crn
= 15,
1382 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
,
1384 .type
= ARM_CP_CONST
| ARM_CP_NO_MIGRATE
| ARM_CP_OVERRIDE
,
1389 static const ARMCPRegInfo cache_dirty_status_cp_reginfo
[] = {
1390 /* Cache status: RAZ because we have no cache so it's always clean */
1391 { .name
= "CDSR", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 6,
1392 .access
= PL1_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_MIGRATE
,
1397 static const ARMCPRegInfo cache_block_ops_cp_reginfo
[] = {
1398 /* We never have a a block transfer operation in progress */
1399 { .name
= "BXSR", .cp
= 15, .crn
= 7, .crm
= 12, .opc1
= 0, .opc2
= 4,
1400 .access
= PL0_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_MIGRATE
,
1402 /* The cache ops themselves: these all NOP for QEMU */
1403 { .name
= "IICR", .cp
= 15, .crm
= 5, .opc1
= 0,
1404 .access
= PL1_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
1405 { .name
= "IDCR", .cp
= 15, .crm
= 6, .opc1
= 0,
1406 .access
= PL1_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
1407 { .name
= "CDCR", .cp
= 15, .crm
= 12, .opc1
= 0,
1408 .access
= PL0_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
1409 { .name
= "PIR", .cp
= 15, .crm
= 12, .opc1
= 1,
1410 .access
= PL0_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
1411 { .name
= "PDR", .cp
= 15, .crm
= 12, .opc1
= 2,
1412 .access
= PL0_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
1413 { .name
= "CIDCR", .cp
= 15, .crm
= 14, .opc1
= 0,
1414 .access
= PL1_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
1418 static const ARMCPRegInfo cache_test_clean_cp_reginfo
[] = {
1419 /* The cache test-and-clean instructions always return (1 << 30)
1420 * to indicate that there are no dirty cache lines.
1422 { .name
= "TC_DCACHE", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 3,
1423 .access
= PL0_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_MIGRATE
,
1424 .resetvalue
= (1 << 30) },
1425 { .name
= "TCI_DCACHE", .cp
= 15, .crn
= 7, .crm
= 14, .opc1
= 0, .opc2
= 3,
1426 .access
= PL0_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_MIGRATE
,
1427 .resetvalue
= (1 << 30) },
1431 static const ARMCPRegInfo strongarm_cp_reginfo
[] = {
1432 /* Ignore ReadBuffer accesses */
1433 { .name
= "C9_READBUFFER", .cp
= 15, .crn
= 9,
1434 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
,
1435 .access
= PL1_RW
, .resetvalue
= 0,
1436 .type
= ARM_CP_CONST
| ARM_CP_OVERRIDE
| ARM_CP_NO_MIGRATE
},
1440 static int mpidr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1443 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
1444 uint32_t mpidr
= cs
->cpu_index
;
1445 /* We don't support setting cluster ID ([8..11])
1446 * so these bits always RAZ.
1448 if (arm_feature(env
, ARM_FEATURE_V7MP
)) {
1449 mpidr
|= (1U << 31);
1450 /* Cores which are uniprocessor (non-coherent)
1451 * but still implement the MP extensions set
1452 * bit 30. (For instance, A9UP.) However we do
1453 * not currently model any of those cores.
1460 static const ARMCPRegInfo mpidr_cp_reginfo
[] = {
1461 { .name
= "MPIDR", .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 5,
1462 .access
= PL1_R
, .readfn
= mpidr_read
, .type
= ARM_CP_NO_MIGRATE
},
1466 static int par64_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t *value
)
1468 *value
= ((uint64_t)env
->cp15
.c7_par_hi
<< 32) | env
->cp15
.c7_par
;
1472 static int par64_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
1474 env
->cp15
.c7_par_hi
= value
>> 32;
1475 env
->cp15
.c7_par
= value
;
1479 static void par64_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1481 env
->cp15
.c7_par_hi
= 0;
1482 env
->cp15
.c7_par
= 0;
1485 static int ttbr064_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1488 *value
= ((uint64_t)env
->cp15
.c2_base0_hi
<< 32) | env
->cp15
.c2_base0
;
1492 static int ttbr064_raw_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1495 env
->cp15
.c2_base0_hi
= value
>> 32;
1496 env
->cp15
.c2_base0
= value
;
1500 static int ttbr064_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1503 /* Writes to the 64 bit format TTBRs may change the ASID */
1505 return ttbr064_raw_write(env
, ri
, value
);
1508 static void ttbr064_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1510 env
->cp15
.c2_base0_hi
= 0;
1511 env
->cp15
.c2_base0
= 0;
1514 static int ttbr164_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1517 *value
= ((uint64_t)env
->cp15
.c2_base1_hi
<< 32) | env
->cp15
.c2_base1
;
1521 static int ttbr164_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1524 env
->cp15
.c2_base1_hi
= value
>> 32;
1525 env
->cp15
.c2_base1
= value
;
1529 static void ttbr164_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1531 env
->cp15
.c2_base1_hi
= 0;
1532 env
->cp15
.c2_base1
= 0;
1535 static const ARMCPRegInfo lpae_cp_reginfo
[] = {
1536 /* NOP AMAIR0/1: the override is because these clash with the rather
1537 * broadly specified TLB_LOCKDOWN entry in the generic cp_reginfo.
1539 { .name
= "AMAIR0", .cp
= 15, .crn
= 10, .crm
= 3, .opc1
= 0, .opc2
= 0,
1540 .access
= PL1_RW
, .type
= ARM_CP_CONST
| ARM_CP_OVERRIDE
,
1542 { .name
= "AMAIR1", .cp
= 15, .crn
= 10, .crm
= 3, .opc1
= 0, .opc2
= 1,
1543 .access
= PL1_RW
, .type
= ARM_CP_CONST
| ARM_CP_OVERRIDE
,
1545 /* 64 bit access versions of the (dummy) debug registers */
1546 { .name
= "DBGDRAR", .cp
= 14, .crm
= 1, .opc1
= 0,
1547 .access
= PL0_R
, .type
= ARM_CP_CONST
|ARM_CP_64BIT
, .resetvalue
= 0 },
1548 { .name
= "DBGDSAR", .cp
= 14, .crm
= 2, .opc1
= 0,
1549 .access
= PL0_R
, .type
= ARM_CP_CONST
|ARM_CP_64BIT
, .resetvalue
= 0 },
1550 { .name
= "PAR", .cp
= 15, .crm
= 7, .opc1
= 0,
1551 .access
= PL1_RW
, .type
= ARM_CP_64BIT
,
1552 .readfn
= par64_read
, .writefn
= par64_write
, .resetfn
= par64_reset
},
1553 { .name
= "TTBR0", .cp
= 15, .crm
= 2, .opc1
= 0,
1554 .access
= PL1_RW
, .type
= ARM_CP_64BIT
, .readfn
= ttbr064_read
,
1555 .writefn
= ttbr064_write
, .raw_writefn
= ttbr064_raw_write
,
1556 .resetfn
= ttbr064_reset
},
1557 { .name
= "TTBR1", .cp
= 15, .crm
= 2, .opc1
= 1,
1558 .access
= PL1_RW
, .type
= ARM_CP_64BIT
, .readfn
= ttbr164_read
,
1559 .writefn
= ttbr164_write
, .resetfn
= ttbr164_reset
},
1563 static int sctlr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
1565 env
->cp15
.c1_sys
= value
;
1566 /* ??? Lots of these bits are not implemented. */
1567 /* This may enable/disable the MMU, so do a TLB flush. */
1572 void register_cp_regs_for_features(ARMCPU
*cpu
)
1574 /* Register all the coprocessor registers based on feature bits */
1575 CPUARMState
*env
= &cpu
->env
;
1576 if (arm_feature(env
, ARM_FEATURE_M
)) {
1577 /* M profile has no coprocessor registers */
1581 define_arm_cp_regs(cpu
, cp_reginfo
);
1582 if (arm_feature(env
, ARM_FEATURE_V6
)) {
1583 /* The ID registers all have impdef reset values */
1584 ARMCPRegInfo v6_idregs
[] = {
1585 { .name
= "ID_PFR0", .cp
= 15, .crn
= 0, .crm
= 1,
1586 .opc1
= 0, .opc2
= 0, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1587 .resetvalue
= cpu
->id_pfr0
},
1588 { .name
= "ID_PFR1", .cp
= 15, .crn
= 0, .crm
= 1,
1589 .opc1
= 0, .opc2
= 1, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1590 .resetvalue
= cpu
->id_pfr1
},
1591 { .name
= "ID_DFR0", .cp
= 15, .crn
= 0, .crm
= 1,
1592 .opc1
= 0, .opc2
= 2, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1593 .resetvalue
= cpu
->id_dfr0
},
1594 { .name
= "ID_AFR0", .cp
= 15, .crn
= 0, .crm
= 1,
1595 .opc1
= 0, .opc2
= 3, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1596 .resetvalue
= cpu
->id_afr0
},
1597 { .name
= "ID_MMFR0", .cp
= 15, .crn
= 0, .crm
= 1,
1598 .opc1
= 0, .opc2
= 4, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1599 .resetvalue
= cpu
->id_mmfr0
},
1600 { .name
= "ID_MMFR1", .cp
= 15, .crn
= 0, .crm
= 1,
1601 .opc1
= 0, .opc2
= 5, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1602 .resetvalue
= cpu
->id_mmfr1
},
1603 { .name
= "ID_MMFR2", .cp
= 15, .crn
= 0, .crm
= 1,
1604 .opc1
= 0, .opc2
= 6, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1605 .resetvalue
= cpu
->id_mmfr2
},
1606 { .name
= "ID_MMFR3", .cp
= 15, .crn
= 0, .crm
= 1,
1607 .opc1
= 0, .opc2
= 7, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1608 .resetvalue
= cpu
->id_mmfr3
},
1609 { .name
= "ID_ISAR0", .cp
= 15, .crn
= 0, .crm
= 2,
1610 .opc1
= 0, .opc2
= 0, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1611 .resetvalue
= cpu
->id_isar0
},
1612 { .name
= "ID_ISAR1", .cp
= 15, .crn
= 0, .crm
= 2,
1613 .opc1
= 0, .opc2
= 1, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1614 .resetvalue
= cpu
->id_isar1
},
1615 { .name
= "ID_ISAR2", .cp
= 15, .crn
= 0, .crm
= 2,
1616 .opc1
= 0, .opc2
= 2, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1617 .resetvalue
= cpu
->id_isar2
},
1618 { .name
= "ID_ISAR3", .cp
= 15, .crn
= 0, .crm
= 2,
1619 .opc1
= 0, .opc2
= 3, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1620 .resetvalue
= cpu
->id_isar3
},
1621 { .name
= "ID_ISAR4", .cp
= 15, .crn
= 0, .crm
= 2,
1622 .opc1
= 0, .opc2
= 4, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1623 .resetvalue
= cpu
->id_isar4
},
1624 { .name
= "ID_ISAR5", .cp
= 15, .crn
= 0, .crm
= 2,
1625 .opc1
= 0, .opc2
= 5, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1626 .resetvalue
= cpu
->id_isar5
},
1627 /* 6..7 are as yet unallocated and must RAZ */
1628 { .name
= "ID_ISAR6", .cp
= 15, .crn
= 0, .crm
= 2,
1629 .opc1
= 0, .opc2
= 6, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1631 { .name
= "ID_ISAR7", .cp
= 15, .crn
= 0, .crm
= 2,
1632 .opc1
= 0, .opc2
= 7, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1636 define_arm_cp_regs(cpu
, v6_idregs
);
1637 define_arm_cp_regs(cpu
, v6_cp_reginfo
);
1639 define_arm_cp_regs(cpu
, not_v6_cp_reginfo
);
1641 if (arm_feature(env
, ARM_FEATURE_V6K
)) {
1642 define_arm_cp_regs(cpu
, v6k_cp_reginfo
);
1644 if (arm_feature(env
, ARM_FEATURE_V7
)) {
1645 /* v7 performance monitor control register: same implementor
1646 * field as main ID register, and we implement no event counters.
1648 ARMCPRegInfo pmcr
= {
1649 .name
= "PMCR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 0,
1650 .access
= PL0_RW
, .resetvalue
= cpu
->midr
& 0xff000000,
1651 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmcr
),
1652 .readfn
= pmreg_read
, .writefn
= pmcr_write
,
1653 .raw_readfn
= raw_read
, .raw_writefn
= raw_write
,
1655 ARMCPRegInfo clidr
= {
1656 .name
= "CLIDR", .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= 1,
1657 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= cpu
->clidr
1659 define_one_arm_cp_reg(cpu
, &pmcr
);
1660 define_one_arm_cp_reg(cpu
, &clidr
);
1661 define_arm_cp_regs(cpu
, v7_cp_reginfo
);
1663 define_arm_cp_regs(cpu
, not_v7_cp_reginfo
);
1665 if (arm_feature(env
, ARM_FEATURE_MPU
)) {
1666 /* These are the MPU registers prior to PMSAv6. Any new
1667 * PMSA core later than the ARM946 will require that we
1668 * implement the PMSAv6 or PMSAv7 registers, which are
1669 * completely different.
1671 assert(!arm_feature(env
, ARM_FEATURE_V6
));
1672 define_arm_cp_regs(cpu
, pmsav5_cp_reginfo
);
1674 define_arm_cp_regs(cpu
, vmsa_cp_reginfo
);
1676 if (arm_feature(env
, ARM_FEATURE_THUMB2EE
)) {
1677 define_arm_cp_regs(cpu
, t2ee_cp_reginfo
);
1679 if (arm_feature(env
, ARM_FEATURE_GENERIC_TIMER
)) {
1680 define_arm_cp_regs(cpu
, generic_timer_cp_reginfo
);
1682 if (arm_feature(env
, ARM_FEATURE_VAPA
)) {
1683 define_arm_cp_regs(cpu
, vapa_cp_reginfo
);
1685 if (arm_feature(env
, ARM_FEATURE_CACHE_TEST_CLEAN
)) {
1686 define_arm_cp_regs(cpu
, cache_test_clean_cp_reginfo
);
1688 if (arm_feature(env
, ARM_FEATURE_CACHE_DIRTY_REG
)) {
1689 define_arm_cp_regs(cpu
, cache_dirty_status_cp_reginfo
);
1691 if (arm_feature(env
, ARM_FEATURE_CACHE_BLOCK_OPS
)) {
1692 define_arm_cp_regs(cpu
, cache_block_ops_cp_reginfo
);
1694 if (arm_feature(env
, ARM_FEATURE_OMAPCP
)) {
1695 define_arm_cp_regs(cpu
, omap_cp_reginfo
);
1697 if (arm_feature(env
, ARM_FEATURE_STRONGARM
)) {
1698 define_arm_cp_regs(cpu
, strongarm_cp_reginfo
);
1700 if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
1701 define_arm_cp_regs(cpu
, xscale_cp_reginfo
);
1703 if (arm_feature(env
, ARM_FEATURE_DUMMY_C15_REGS
)) {
1704 define_arm_cp_regs(cpu
, dummy_c15_cp_reginfo
);
1706 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
1707 define_arm_cp_regs(cpu
, lpae_cp_reginfo
);
1709 /* Slightly awkwardly, the OMAP and StrongARM cores need all of
1710 * cp15 crn=0 to be writes-ignored, whereas for other cores they should
1711 * be read-only (ie write causes UNDEF exception).
1714 ARMCPRegInfo id_cp_reginfo
[] = {
1715 /* Note that the MIDR isn't a simple constant register because
1716 * of the TI925 behaviour where writes to another register can
1717 * cause the MIDR value to change.
1719 * Unimplemented registers in the c15 0 0 0 space default to
1720 * MIDR. Define MIDR first as this entire space, then CTR, TCMTR
1721 * and friends override accordingly.
1724 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= CP_ANY
,
1725 .access
= PL1_R
, .resetvalue
= cpu
->midr
,
1726 .writefn
= arm_cp_write_ignore
, .raw_writefn
= raw_write
,
1727 .fieldoffset
= offsetof(CPUARMState
, cp15
.c0_cpuid
),
1728 .type
= ARM_CP_OVERRIDE
},
1730 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 1,
1731 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= cpu
->ctr
},
1733 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 2,
1734 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
1736 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 3,
1737 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
1738 /* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */
1740 .cp
= 15, .crn
= 0, .crm
= 3, .opc1
= 0, .opc2
= CP_ANY
,
1741 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
1743 .cp
= 15, .crn
= 0, .crm
= 4, .opc1
= 0, .opc2
= CP_ANY
,
1744 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
1746 .cp
= 15, .crn
= 0, .crm
= 5, .opc1
= 0, .opc2
= CP_ANY
,
1747 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
1749 .cp
= 15, .crn
= 0, .crm
= 6, .opc1
= 0, .opc2
= CP_ANY
,
1750 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
1752 .cp
= 15, .crn
= 0, .crm
= 7, .opc1
= 0, .opc2
= CP_ANY
,
1753 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
1756 ARMCPRegInfo crn0_wi_reginfo
= {
1757 .name
= "CRN0_WI", .cp
= 15, .crn
= 0, .crm
= CP_ANY
,
1758 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_W
,
1759 .type
= ARM_CP_NOP
| ARM_CP_OVERRIDE
1761 if (arm_feature(env
, ARM_FEATURE_OMAPCP
) ||
1762 arm_feature(env
, ARM_FEATURE_STRONGARM
)) {
1764 /* Register the blanket "writes ignored" value first to cover the
1765 * whole space. Then update the specific ID registers to allow write
1766 * access, so that they ignore writes rather than causing them to
1769 define_one_arm_cp_reg(cpu
, &crn0_wi_reginfo
);
1770 for (r
= id_cp_reginfo
; r
->type
!= ARM_CP_SENTINEL
; r
++) {
1774 define_arm_cp_regs(cpu
, id_cp_reginfo
);
1777 if (arm_feature(env
, ARM_FEATURE_MPIDR
)) {
1778 define_arm_cp_regs(cpu
, mpidr_cp_reginfo
);
1781 if (arm_feature(env
, ARM_FEATURE_AUXCR
)) {
1782 ARMCPRegInfo auxcr
= {
1783 .name
= "AUXCR", .cp
= 15, .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 1,
1784 .access
= PL1_RW
, .type
= ARM_CP_CONST
,
1785 .resetvalue
= cpu
->reset_auxcr
1787 define_one_arm_cp_reg(cpu
, &auxcr
);
1790 if (arm_feature(env
, ARM_FEATURE_CBAR
)) {
1791 ARMCPRegInfo cbar
= {
1792 .name
= "CBAR", .cp
= 15, .crn
= 15, .crm
= 0, .opc1
= 4, .opc2
= 0,
1793 .access
= PL1_R
|PL3_W
, .resetvalue
= cpu
->reset_cbar
,
1794 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_config_base_address
)
1796 define_one_arm_cp_reg(cpu
, &cbar
);
1799 /* Generic registers whose values depend on the implementation */
1801 ARMCPRegInfo sctlr
= {
1802 .name
= "SCTLR", .cp
= 15, .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 0,
1803 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c1_sys
),
1804 .writefn
= sctlr_write
, .resetvalue
= cpu
->reset_sctlr
,
1805 .raw_writefn
= raw_write
,
1807 if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
1808 /* Normally we would always end the TB on an SCTLR write, but Linux
1809 * arch/arm/mach-pxa/sleep.S expects two instructions following
1810 * an MMU enable to execute from cache. Imitate this behaviour.
1812 sctlr
.type
|= ARM_CP_SUPPRESS_TB_END
;
1814 define_one_arm_cp_reg(cpu
, &sctlr
);
1818 ARMCPU
*cpu_arm_init(const char *cpu_model
)
1823 oc
= cpu_class_by_name(TYPE_ARM_CPU
, cpu_model
);
1827 cpu
= ARM_CPU(object_new(object_class_get_name(oc
)));
1829 /* TODO this should be set centrally, once possible */
1830 object_property_set_bool(OBJECT(cpu
), true, "realized", NULL
);
1835 void arm_cpu_register_gdb_regs_for_features(ARMCPU
*cpu
)
1837 CPUState
*cs
= CPU(cpu
);
1838 CPUARMState
*env
= &cpu
->env
;
1840 if (arm_feature(env
, ARM_FEATURE_AARCH64
)) {
1841 gdb_register_coprocessor(cs
, aarch64_fpu_gdb_get_reg
,
1842 aarch64_fpu_gdb_set_reg
,
1843 34, "aarch64-fpu.xml", 0);
1844 } else if (arm_feature(env
, ARM_FEATURE_NEON
)) {
1845 gdb_register_coprocessor(cs
, vfp_gdb_get_reg
, vfp_gdb_set_reg
,
1846 51, "arm-neon.xml", 0);
1847 } else if (arm_feature(env
, ARM_FEATURE_VFP3
)) {
1848 gdb_register_coprocessor(cs
, vfp_gdb_get_reg
, vfp_gdb_set_reg
,
1849 35, "arm-vfp3.xml", 0);
1850 } else if (arm_feature(env
, ARM_FEATURE_VFP
)) {
1851 gdb_register_coprocessor(cs
, vfp_gdb_get_reg
, vfp_gdb_set_reg
,
1852 19, "arm-vfp.xml", 0);
1856 /* Sort alphabetically by type name, except for "any". */
1857 static gint
arm_cpu_list_compare(gconstpointer a
, gconstpointer b
)
1859 ObjectClass
*class_a
= (ObjectClass
*)a
;
1860 ObjectClass
*class_b
= (ObjectClass
*)b
;
1861 const char *name_a
, *name_b
;
1863 name_a
= object_class_get_name(class_a
);
1864 name_b
= object_class_get_name(class_b
);
1865 if (strcmp(name_a
, "any-" TYPE_ARM_CPU
) == 0) {
1867 } else if (strcmp(name_b
, "any-" TYPE_ARM_CPU
) == 0) {
1870 return strcmp(name_a
, name_b
);
1874 static void arm_cpu_list_entry(gpointer data
, gpointer user_data
)
1876 ObjectClass
*oc
= data
;
1877 CPUListState
*s
= user_data
;
1878 const char *typename
;
1881 typename
= object_class_get_name(oc
);
1882 name
= g_strndup(typename
, strlen(typename
) - strlen("-" TYPE_ARM_CPU
));
1883 (*s
->cpu_fprintf
)(s
->file
, " %s\n",
1888 void arm_cpu_list(FILE *f
, fprintf_function cpu_fprintf
)
1892 .cpu_fprintf
= cpu_fprintf
,
1896 list
= object_class_get_list(TYPE_ARM_CPU
, false);
1897 list
= g_slist_sort(list
, arm_cpu_list_compare
);
1898 (*cpu_fprintf
)(f
, "Available CPUs:\n");
1899 g_slist_foreach(list
, arm_cpu_list_entry
, &s
);
1902 /* The 'host' CPU type is dynamically registered only if KVM is
1903 * enabled, so we have to special-case it here:
1905 (*cpu_fprintf
)(f
, " host (only available in KVM mode)\n");
1909 static void arm_cpu_add_definition(gpointer data
, gpointer user_data
)
1911 ObjectClass
*oc
= data
;
1912 CpuDefinitionInfoList
**cpu_list
= user_data
;
1913 CpuDefinitionInfoList
*entry
;
1914 CpuDefinitionInfo
*info
;
1915 const char *typename
;
1917 typename
= object_class_get_name(oc
);
1918 info
= g_malloc0(sizeof(*info
));
1919 info
->name
= g_strndup(typename
,
1920 strlen(typename
) - strlen("-" TYPE_ARM_CPU
));
1922 entry
= g_malloc0(sizeof(*entry
));
1923 entry
->value
= info
;
1924 entry
->next
= *cpu_list
;
1928 CpuDefinitionInfoList
*arch_query_cpu_definitions(Error
**errp
)
1930 CpuDefinitionInfoList
*cpu_list
= NULL
;
1933 list
= object_class_get_list(TYPE_ARM_CPU
, false);
1934 g_slist_foreach(list
, arm_cpu_add_definition
, &cpu_list
);
1940 void define_one_arm_cp_reg_with_opaque(ARMCPU
*cpu
,
1941 const ARMCPRegInfo
*r
, void *opaque
)
1943 /* Define implementations of coprocessor registers.
1944 * We store these in a hashtable because typically
1945 * there are less than 150 registers in a space which
1946 * is 16*16*16*8*8 = 262144 in size.
1947 * Wildcarding is supported for the crm, opc1 and opc2 fields.
1948 * If a register is defined twice then the second definition is
1949 * used, so this can be used to define some generic registers and
1950 * then override them with implementation specific variations.
1951 * At least one of the original and the second definition should
1952 * include ARM_CP_OVERRIDE in its type bits -- this is just a guard
1953 * against accidental use.
1955 int crm
, opc1
, opc2
;
1956 int crmmin
= (r
->crm
== CP_ANY
) ? 0 : r
->crm
;
1957 int crmmax
= (r
->crm
== CP_ANY
) ? 15 : r
->crm
;
1958 int opc1min
= (r
->opc1
== CP_ANY
) ? 0 : r
->opc1
;
1959 int opc1max
= (r
->opc1
== CP_ANY
) ? 7 : r
->opc1
;
1960 int opc2min
= (r
->opc2
== CP_ANY
) ? 0 : r
->opc2
;
1961 int opc2max
= (r
->opc2
== CP_ANY
) ? 7 : r
->opc2
;
1962 /* 64 bit registers have only CRm and Opc1 fields */
1963 assert(!((r
->type
& ARM_CP_64BIT
) && (r
->opc2
|| r
->crn
)));
1964 /* Check that the register definition has enough info to handle
1965 * reads and writes if they are permitted.
1967 if (!(r
->type
& (ARM_CP_SPECIAL
|ARM_CP_CONST
))) {
1968 if (r
->access
& PL3_R
) {
1969 assert(r
->fieldoffset
|| r
->readfn
);
1971 if (r
->access
& PL3_W
) {
1972 assert(r
->fieldoffset
|| r
->writefn
);
1975 /* Bad type field probably means missing sentinel at end of reg list */
1976 assert(cptype_valid(r
->type
));
1977 for (crm
= crmmin
; crm
<= crmmax
; crm
++) {
1978 for (opc1
= opc1min
; opc1
<= opc1max
; opc1
++) {
1979 for (opc2
= opc2min
; opc2
<= opc2max
; opc2
++) {
1980 uint32_t *key
= g_new(uint32_t, 1);
1981 ARMCPRegInfo
*r2
= g_memdup(r
, sizeof(ARMCPRegInfo
));
1982 int is64
= (r
->type
& ARM_CP_64BIT
) ? 1 : 0;
1983 *key
= ENCODE_CP_REG(r
->cp
, is64
, r
->crn
, crm
, opc1
, opc2
);
1985 r2
->opaque
= opaque
;
1987 /* Make sure reginfo passed to helpers for wildcarded regs
1988 * has the correct crm/opc1/opc2 for this reg, not CP_ANY:
1993 /* By convention, for wildcarded registers only the first
1994 * entry is used for migration; the others are marked as
1995 * NO_MIGRATE so we don't try to transfer the register
1996 * multiple times. Special registers (ie NOP/WFI) are
1999 if ((r
->type
& ARM_CP_SPECIAL
) ||
2000 ((r
->crm
== CP_ANY
) && crm
!= 0) ||
2001 ((r
->opc1
== CP_ANY
) && opc1
!= 0) ||
2002 ((r
->opc2
== CP_ANY
) && opc2
!= 0)) {
2003 r2
->type
|= ARM_CP_NO_MIGRATE
;
2006 /* Overriding of an existing definition must be explicitly
2009 if (!(r
->type
& ARM_CP_OVERRIDE
)) {
2010 ARMCPRegInfo
*oldreg
;
2011 oldreg
= g_hash_table_lookup(cpu
->cp_regs
, key
);
2012 if (oldreg
&& !(oldreg
->type
& ARM_CP_OVERRIDE
)) {
2013 fprintf(stderr
, "Register redefined: cp=%d %d bit "
2014 "crn=%d crm=%d opc1=%d opc2=%d, "
2015 "was %s, now %s\n", r2
->cp
, 32 + 32 * is64
,
2016 r2
->crn
, r2
->crm
, r2
->opc1
, r2
->opc2
,
2017 oldreg
->name
, r2
->name
);
2018 g_assert_not_reached();
2021 g_hash_table_insert(cpu
->cp_regs
, key
, r2
);
2027 void define_arm_cp_regs_with_opaque(ARMCPU
*cpu
,
2028 const ARMCPRegInfo
*regs
, void *opaque
)
2030 /* Define a whole list of registers */
2031 const ARMCPRegInfo
*r
;
2032 for (r
= regs
; r
->type
!= ARM_CP_SENTINEL
; r
++) {
2033 define_one_arm_cp_reg_with_opaque(cpu
, r
, opaque
);
2037 const ARMCPRegInfo
*get_arm_cp_reginfo(ARMCPU
*cpu
, uint32_t encoded_cp
)
2039 return g_hash_table_lookup(cpu
->cp_regs
, &encoded_cp
);
2042 int arm_cp_write_ignore(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2045 /* Helper coprocessor write function for write-ignore registers */
2049 int arm_cp_read_zero(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t *value
)
2051 /* Helper coprocessor write function for read-as-zero registers */
2056 static int bad_mode_switch(CPUARMState
*env
, int mode
)
2058 /* Return true if it is not valid for us to switch to
2059 * this CPU mode (ie all the UNPREDICTABLE cases in
2060 * the ARM ARM CPSRWriteByInstr pseudocode).
2063 case ARM_CPU_MODE_USR
:
2064 case ARM_CPU_MODE_SYS
:
2065 case ARM_CPU_MODE_SVC
:
2066 case ARM_CPU_MODE_ABT
:
2067 case ARM_CPU_MODE_UND
:
2068 case ARM_CPU_MODE_IRQ
:
2069 case ARM_CPU_MODE_FIQ
:
2076 uint32_t cpsr_read(CPUARMState
*env
)
2079 ZF
= (env
->ZF
== 0);
2080 return env
->uncached_cpsr
| (env
->NF
& 0x80000000) | (ZF
<< 30) |
2081 (env
->CF
<< 29) | ((env
->VF
& 0x80000000) >> 3) | (env
->QF
<< 27)
2082 | (env
->thumb
<< 5) | ((env
->condexec_bits
& 3) << 25)
2083 | ((env
->condexec_bits
& 0xfc) << 8)
2087 void cpsr_write(CPUARMState
*env
, uint32_t val
, uint32_t mask
)
2089 if (mask
& CPSR_NZCV
) {
2090 env
->ZF
= (~val
) & CPSR_Z
;
2092 env
->CF
= (val
>> 29) & 1;
2093 env
->VF
= (val
<< 3) & 0x80000000;
2096 env
->QF
= ((val
& CPSR_Q
) != 0);
2098 env
->thumb
= ((val
& CPSR_T
) != 0);
2099 if (mask
& CPSR_IT_0_1
) {
2100 env
->condexec_bits
&= ~3;
2101 env
->condexec_bits
|= (val
>> 25) & 3;
2103 if (mask
& CPSR_IT_2_7
) {
2104 env
->condexec_bits
&= 3;
2105 env
->condexec_bits
|= (val
>> 8) & 0xfc;
2107 if (mask
& CPSR_GE
) {
2108 env
->GE
= (val
>> 16) & 0xf;
2111 if ((env
->uncached_cpsr
^ val
) & mask
& CPSR_M
) {
2112 if (bad_mode_switch(env
, val
& CPSR_M
)) {
2113 /* Attempt to switch to an invalid mode: this is UNPREDICTABLE.
2114 * We choose to ignore the attempt and leave the CPSR M field
2119 switch_mode(env
, val
& CPSR_M
);
2122 mask
&= ~CACHED_CPSR_BITS
;
2123 env
->uncached_cpsr
= (env
->uncached_cpsr
& ~mask
) | (val
& mask
);
2126 /* Sign/zero extend */
2127 uint32_t HELPER(sxtb16
)(uint32_t x
)
2130 res
= (uint16_t)(int8_t)x
;
2131 res
|= (uint32_t)(int8_t)(x
>> 16) << 16;
2135 uint32_t HELPER(uxtb16
)(uint32_t x
)
2138 res
= (uint16_t)(uint8_t)x
;
2139 res
|= (uint32_t)(uint8_t)(x
>> 16) << 16;
2143 uint32_t HELPER(clz
)(uint32_t x
)
2148 int32_t HELPER(sdiv
)(int32_t num
, int32_t den
)
2152 if (num
== INT_MIN
&& den
== -1)
2157 uint32_t HELPER(udiv
)(uint32_t num
, uint32_t den
)
2164 uint32_t HELPER(rbit
)(uint32_t x
)
2166 x
= ((x
& 0xff000000) >> 24)
2167 | ((x
& 0x00ff0000) >> 8)
2168 | ((x
& 0x0000ff00) << 8)
2169 | ((x
& 0x000000ff) << 24);
2170 x
= ((x
& 0xf0f0f0f0) >> 4)
2171 | ((x
& 0x0f0f0f0f) << 4);
2172 x
= ((x
& 0x88888888) >> 3)
2173 | ((x
& 0x44444444) >> 1)
2174 | ((x
& 0x22222222) << 1)
2175 | ((x
& 0x11111111) << 3);
2179 #if defined(CONFIG_USER_ONLY)
2181 void arm_cpu_do_interrupt(CPUState
*cs
)
2183 ARMCPU
*cpu
= ARM_CPU(cs
);
2184 CPUARMState
*env
= &cpu
->env
;
2186 env
->exception_index
= -1;
2189 int cpu_arm_handle_mmu_fault (CPUARMState
*env
, target_ulong address
, int rw
,
2193 env
->exception_index
= EXCP_PREFETCH_ABORT
;
2194 env
->cp15
.c6_insn
= address
;
2196 env
->exception_index
= EXCP_DATA_ABORT
;
2197 env
->cp15
.c6_data
= address
;
2202 /* These should probably raise undefined insn exceptions. */
2203 void HELPER(v7m_msr
)(CPUARMState
*env
, uint32_t reg
, uint32_t val
)
2205 cpu_abort(env
, "v7m_mrs %d\n", reg
);
2208 uint32_t HELPER(v7m_mrs
)(CPUARMState
*env
, uint32_t reg
)
2210 cpu_abort(env
, "v7m_mrs %d\n", reg
);
2214 void switch_mode(CPUARMState
*env
, int mode
)
2216 if (mode
!= ARM_CPU_MODE_USR
)
2217 cpu_abort(env
, "Tried to switch out of user mode\n");
2220 void HELPER(set_r13_banked
)(CPUARMState
*env
, uint32_t mode
, uint32_t val
)
2222 cpu_abort(env
, "banked r13 write\n");
2225 uint32_t HELPER(get_r13_banked
)(CPUARMState
*env
, uint32_t mode
)
2227 cpu_abort(env
, "banked r13 read\n");
2233 /* Map CPU modes onto saved register banks. */
2234 int bank_number(int mode
)
2237 case ARM_CPU_MODE_USR
:
2238 case ARM_CPU_MODE_SYS
:
2240 case ARM_CPU_MODE_SVC
:
2242 case ARM_CPU_MODE_ABT
:
2244 case ARM_CPU_MODE_UND
:
2246 case ARM_CPU_MODE_IRQ
:
2248 case ARM_CPU_MODE_FIQ
:
2251 hw_error("bank number requested for bad CPSR mode value 0x%x\n", mode
);
2254 void switch_mode(CPUARMState
*env
, int mode
)
2259 old_mode
= env
->uncached_cpsr
& CPSR_M
;
2260 if (mode
== old_mode
)
2263 if (old_mode
== ARM_CPU_MODE_FIQ
) {
2264 memcpy (env
->fiq_regs
, env
->regs
+ 8, 5 * sizeof(uint32_t));
2265 memcpy (env
->regs
+ 8, env
->usr_regs
, 5 * sizeof(uint32_t));
2266 } else if (mode
== ARM_CPU_MODE_FIQ
) {
2267 memcpy (env
->usr_regs
, env
->regs
+ 8, 5 * sizeof(uint32_t));
2268 memcpy (env
->regs
+ 8, env
->fiq_regs
, 5 * sizeof(uint32_t));
2271 i
= bank_number(old_mode
);
2272 env
->banked_r13
[i
] = env
->regs
[13];
2273 env
->banked_r14
[i
] = env
->regs
[14];
2274 env
->banked_spsr
[i
] = env
->spsr
;
2276 i
= bank_number(mode
);
2277 env
->regs
[13] = env
->banked_r13
[i
];
2278 env
->regs
[14] = env
->banked_r14
[i
];
2279 env
->spsr
= env
->banked_spsr
[i
];
2282 static void v7m_push(CPUARMState
*env
, uint32_t val
)
2285 stl_phys(env
->regs
[13], val
);
2288 static uint32_t v7m_pop(CPUARMState
*env
)
2291 val
= ldl_phys(env
->regs
[13]);
2296 /* Switch to V7M main or process stack pointer. */
2297 static void switch_v7m_sp(CPUARMState
*env
, int process
)
2300 if (env
->v7m
.current_sp
!= process
) {
2301 tmp
= env
->v7m
.other_sp
;
2302 env
->v7m
.other_sp
= env
->regs
[13];
2303 env
->regs
[13] = tmp
;
2304 env
->v7m
.current_sp
= process
;
2308 static void do_v7m_exception_exit(CPUARMState
*env
)
2313 type
= env
->regs
[15];
2314 if (env
->v7m
.exception
!= 0)
2315 armv7m_nvic_complete_irq(env
->nvic
, env
->v7m
.exception
);
2317 /* Switch to the target stack. */
2318 switch_v7m_sp(env
, (type
& 4) != 0);
2319 /* Pop registers. */
2320 env
->regs
[0] = v7m_pop(env
);
2321 env
->regs
[1] = v7m_pop(env
);
2322 env
->regs
[2] = v7m_pop(env
);
2323 env
->regs
[3] = v7m_pop(env
);
2324 env
->regs
[12] = v7m_pop(env
);
2325 env
->regs
[14] = v7m_pop(env
);
2326 env
->regs
[15] = v7m_pop(env
);
2327 xpsr
= v7m_pop(env
);
2328 xpsr_write(env
, xpsr
, 0xfffffdff);
2329 /* Undo stack alignment. */
2332 /* ??? The exception return type specifies Thread/Handler mode. However
2333 this is also implied by the xPSR value. Not sure what to do
2334 if there is a mismatch. */
2335 /* ??? Likewise for mismatches between the CONTROL register and the stack
2339 /* Exception names for debug logging; note that not all of these
2340 * precisely correspond to architectural exceptions.
2342 static const char * const excnames
[] = {
2343 [EXCP_UDEF
] = "Undefined Instruction",
2345 [EXCP_PREFETCH_ABORT
] = "Prefetch Abort",
2346 [EXCP_DATA_ABORT
] = "Data Abort",
2349 [EXCP_BKPT
] = "Breakpoint",
2350 [EXCP_EXCEPTION_EXIT
] = "QEMU v7M exception exit",
2351 [EXCP_KERNEL_TRAP
] = "QEMU intercept of kernel commpage",
2352 [EXCP_STREX
] = "QEMU intercept of STREX",
2355 static inline void arm_log_exception(int idx
)
2357 if (qemu_loglevel_mask(CPU_LOG_INT
)) {
2358 const char *exc
= NULL
;
2360 if (idx
>= 0 && idx
< ARRAY_SIZE(excnames
)) {
2361 exc
= excnames
[idx
];
2366 qemu_log_mask(CPU_LOG_INT
, "Taking exception %d [%s]\n", idx
, exc
);
2370 void arm_v7m_cpu_do_interrupt(CPUState
*cs
)
2372 ARMCPU
*cpu
= ARM_CPU(cs
);
2373 CPUARMState
*env
= &cpu
->env
;
2374 uint32_t xpsr
= xpsr_read(env
);
2378 arm_log_exception(env
->exception_index
);
2381 if (env
->v7m
.current_sp
)
2383 if (env
->v7m
.exception
== 0)
2386 /* For exceptions we just mark as pending on the NVIC, and let that
2388 /* TODO: Need to escalate if the current priority is higher than the
2389 one we're raising. */
2390 switch (env
->exception_index
) {
2392 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
);
2395 /* The PC already points to the next instruction. */
2396 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SVC
);
2398 case EXCP_PREFETCH_ABORT
:
2399 case EXCP_DATA_ABORT
:
2400 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_MEM
);
2403 if (semihosting_enabled
) {
2405 nr
= arm_lduw_code(env
, env
->regs
[15], env
->bswap_code
) & 0xff;
2408 env
->regs
[0] = do_arm_semihosting(env
);
2409 qemu_log_mask(CPU_LOG_INT
, "...handled as semihosting call\n");
2413 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_DEBUG
);
2416 env
->v7m
.exception
= armv7m_nvic_acknowledge_irq(env
->nvic
);
2418 case EXCP_EXCEPTION_EXIT
:
2419 do_v7m_exception_exit(env
);
2422 cpu_abort(env
, "Unhandled exception 0x%x\n", env
->exception_index
);
2423 return; /* Never happens. Keep compiler happy. */
2426 /* Align stack pointer. */
2427 /* ??? Should only do this if Configuration Control Register
2428 STACKALIGN bit is set. */
2429 if (env
->regs
[13] & 4) {
2433 /* Switch to the handler mode. */
2434 v7m_push(env
, xpsr
);
2435 v7m_push(env
, env
->regs
[15]);
2436 v7m_push(env
, env
->regs
[14]);
2437 v7m_push(env
, env
->regs
[12]);
2438 v7m_push(env
, env
->regs
[3]);
2439 v7m_push(env
, env
->regs
[2]);
2440 v7m_push(env
, env
->regs
[1]);
2441 v7m_push(env
, env
->regs
[0]);
2442 switch_v7m_sp(env
, 0);
2444 env
->condexec_bits
= 0;
2446 addr
= ldl_phys(env
->v7m
.vecbase
+ env
->v7m
.exception
* 4);
2447 env
->regs
[15] = addr
& 0xfffffffe;
2448 env
->thumb
= addr
& 1;
2451 /* Handle a CPU exception. */
2452 void arm_cpu_do_interrupt(CPUState
*cs
)
2454 ARMCPU
*cpu
= ARM_CPU(cs
);
2455 CPUARMState
*env
= &cpu
->env
;
2463 arm_log_exception(env
->exception_index
);
2465 /* TODO: Vectored interrupt controller. */
2466 switch (env
->exception_index
) {
2468 new_mode
= ARM_CPU_MODE_UND
;
2477 if (semihosting_enabled
) {
2478 /* Check for semihosting interrupt. */
2480 mask
= arm_lduw_code(env
, env
->regs
[15] - 2, env
->bswap_code
)
2483 mask
= arm_ldl_code(env
, env
->regs
[15] - 4, env
->bswap_code
)
2486 /* Only intercept calls from privileged modes, to provide some
2487 semblance of security. */
2488 if (((mask
== 0x123456 && !env
->thumb
)
2489 || (mask
== 0xab && env
->thumb
))
2490 && (env
->uncached_cpsr
& CPSR_M
) != ARM_CPU_MODE_USR
) {
2491 env
->regs
[0] = do_arm_semihosting(env
);
2492 qemu_log_mask(CPU_LOG_INT
, "...handled as semihosting call\n");
2496 new_mode
= ARM_CPU_MODE_SVC
;
2499 /* The PC already points to the next instruction. */
2503 /* See if this is a semihosting syscall. */
2504 if (env
->thumb
&& semihosting_enabled
) {
2505 mask
= arm_lduw_code(env
, env
->regs
[15], env
->bswap_code
) & 0xff;
2507 && (env
->uncached_cpsr
& CPSR_M
) != ARM_CPU_MODE_USR
) {
2509 env
->regs
[0] = do_arm_semihosting(env
);
2510 qemu_log_mask(CPU_LOG_INT
, "...handled as semihosting call\n");
2514 env
->cp15
.c5_insn
= 2;
2515 /* Fall through to prefetch abort. */
2516 case EXCP_PREFETCH_ABORT
:
2517 qemu_log_mask(CPU_LOG_INT
, "...with IFSR 0x%x IFAR 0x%x\n",
2518 env
->cp15
.c5_insn
, env
->cp15
.c6_insn
);
2519 new_mode
= ARM_CPU_MODE_ABT
;
2521 mask
= CPSR_A
| CPSR_I
;
2524 case EXCP_DATA_ABORT
:
2525 qemu_log_mask(CPU_LOG_INT
, "...with DFSR 0x%x DFAR 0x%x\n",
2526 env
->cp15
.c5_data
, env
->cp15
.c6_data
);
2527 new_mode
= ARM_CPU_MODE_ABT
;
2529 mask
= CPSR_A
| CPSR_I
;
2533 new_mode
= ARM_CPU_MODE_IRQ
;
2535 /* Disable IRQ and imprecise data aborts. */
2536 mask
= CPSR_A
| CPSR_I
;
2540 new_mode
= ARM_CPU_MODE_FIQ
;
2542 /* Disable FIQ, IRQ and imprecise data aborts. */
2543 mask
= CPSR_A
| CPSR_I
| CPSR_F
;
2547 cpu_abort(env
, "Unhandled exception 0x%x\n", env
->exception_index
);
2548 return; /* Never happens. Keep compiler happy. */
2551 if (env
->cp15
.c1_sys
& (1 << 13)) {
2552 /* when enabled, base address cannot be remapped. */
2555 /* ARM v7 architectures provide a vector base address register to remap
2556 * the interrupt vector table.
2557 * This register is only followed in non-monitor mode, and has a secure
2558 * and un-secure copy. Since the cpu is always in a un-secure operation
2559 * and is never in monitor mode this feature is always active.
2560 * Note: only bits 31:5 are valid.
2562 addr
+= env
->cp15
.c12_vbar
;
2564 switch_mode (env
, new_mode
);
2565 env
->spsr
= cpsr_read(env
);
2566 /* Clear IT bits. */
2567 env
->condexec_bits
= 0;
2568 /* Switch to the new mode, and to the correct instruction set. */
2569 env
->uncached_cpsr
= (env
->uncached_cpsr
& ~CPSR_M
) | new_mode
;
2570 env
->uncached_cpsr
|= mask
;
2571 /* this is a lie, as the was no c1_sys on V4T/V5, but who cares
2572 * and we should just guard the thumb mode on V4 */
2573 if (arm_feature(env
, ARM_FEATURE_V4T
)) {
2574 env
->thumb
= (env
->cp15
.c1_sys
& (1 << 30)) != 0;
2576 env
->regs
[14] = env
->regs
[15] + offset
;
2577 env
->regs
[15] = addr
;
2578 cs
->interrupt_request
|= CPU_INTERRUPT_EXITTB
;
2581 /* Check section/page access permissions.
2582 Returns the page protection flags, or zero if the access is not
2584 static inline int check_ap(CPUARMState
*env
, int ap
, int domain_prot
,
2585 int access_type
, int is_user
)
2589 if (domain_prot
== 3) {
2590 return PAGE_READ
| PAGE_WRITE
;
2593 if (access_type
== 1)
2596 prot_ro
= PAGE_READ
;
2600 if (access_type
== 1)
2602 switch ((env
->cp15
.c1_sys
>> 8) & 3) {
2604 return is_user
? 0 : PAGE_READ
;
2611 return is_user
? 0 : PAGE_READ
| PAGE_WRITE
;
2616 return PAGE_READ
| PAGE_WRITE
;
2618 return PAGE_READ
| PAGE_WRITE
;
2619 case 4: /* Reserved. */
2622 return is_user
? 0 : prot_ro
;
2626 if (!arm_feature (env
, ARM_FEATURE_V6K
))
2634 static uint32_t get_level1_table_address(CPUARMState
*env
, uint32_t address
)
2638 if (address
& env
->cp15
.c2_mask
)
2639 table
= env
->cp15
.c2_base1
& 0xffffc000;
2641 table
= env
->cp15
.c2_base0
& env
->cp15
.c2_base_mask
;
2643 table
|= (address
>> 18) & 0x3ffc;
2647 static int get_phys_addr_v5(CPUARMState
*env
, uint32_t address
, int access_type
,
2648 int is_user
, hwaddr
*phys_ptr
,
2649 int *prot
, target_ulong
*page_size
)
2660 /* Pagetable walk. */
2661 /* Lookup l1 descriptor. */
2662 table
= get_level1_table_address(env
, address
);
2663 desc
= ldl_phys(table
);
2665 domain
= (desc
>> 5) & 0x0f;
2666 domain_prot
= (env
->cp15
.c3
>> (domain
* 2)) & 3;
2668 /* Section translation fault. */
2672 if (domain_prot
== 0 || domain_prot
== 2) {
2674 code
= 9; /* Section domain fault. */
2676 code
= 11; /* Page domain fault. */
2681 phys_addr
= (desc
& 0xfff00000) | (address
& 0x000fffff);
2682 ap
= (desc
>> 10) & 3;
2684 *page_size
= 1024 * 1024;
2686 /* Lookup l2 entry. */
2688 /* Coarse pagetable. */
2689 table
= (desc
& 0xfffffc00) | ((address
>> 10) & 0x3fc);
2691 /* Fine pagetable. */
2692 table
= (desc
& 0xfffff000) | ((address
>> 8) & 0xffc);
2694 desc
= ldl_phys(table
);
2696 case 0: /* Page translation fault. */
2699 case 1: /* 64k page. */
2700 phys_addr
= (desc
& 0xffff0000) | (address
& 0xffff);
2701 ap
= (desc
>> (4 + ((address
>> 13) & 6))) & 3;
2702 *page_size
= 0x10000;
2704 case 2: /* 4k page. */
2705 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
2706 ap
= (desc
>> (4 + ((address
>> 13) & 6))) & 3;
2707 *page_size
= 0x1000;
2709 case 3: /* 1k page. */
2711 if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
2712 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
2714 /* Page translation fault. */
2719 phys_addr
= (desc
& 0xfffffc00) | (address
& 0x3ff);
2721 ap
= (desc
>> 4) & 3;
2725 /* Never happens, but compiler isn't smart enough to tell. */
2730 *prot
= check_ap(env
, ap
, domain_prot
, access_type
, is_user
);
2732 /* Access permission fault. */
2736 *phys_ptr
= phys_addr
;
2739 return code
| (domain
<< 4);
2742 static int get_phys_addr_v6(CPUARMState
*env
, uint32_t address
, int access_type
,
2743 int is_user
, hwaddr
*phys_ptr
,
2744 int *prot
, target_ulong
*page_size
)
2757 /* Pagetable walk. */
2758 /* Lookup l1 descriptor. */
2759 table
= get_level1_table_address(env
, address
);
2760 desc
= ldl_phys(table
);
2762 if (type
== 0 || (type
== 3 && !arm_feature(env
, ARM_FEATURE_PXN
))) {
2763 /* Section translation fault, or attempt to use the encoding
2764 * which is Reserved on implementations without PXN.
2769 if ((type
== 1) || !(desc
& (1 << 18))) {
2770 /* Page or Section. */
2771 domain
= (desc
>> 5) & 0x0f;
2773 domain_prot
= (env
->cp15
.c3
>> (domain
* 2)) & 3;
2774 if (domain_prot
== 0 || domain_prot
== 2) {
2776 code
= 9; /* Section domain fault. */
2778 code
= 11; /* Page domain fault. */
2783 if (desc
& (1 << 18)) {
2785 phys_addr
= (desc
& 0xff000000) | (address
& 0x00ffffff);
2786 *page_size
= 0x1000000;
2789 phys_addr
= (desc
& 0xfff00000) | (address
& 0x000fffff);
2790 *page_size
= 0x100000;
2792 ap
= ((desc
>> 10) & 3) | ((desc
>> 13) & 4);
2793 xn
= desc
& (1 << 4);
2797 if (arm_feature(env
, ARM_FEATURE_PXN
)) {
2798 pxn
= (desc
>> 2) & 1;
2800 /* Lookup l2 entry. */
2801 table
= (desc
& 0xfffffc00) | ((address
>> 10) & 0x3fc);
2802 desc
= ldl_phys(table
);
2803 ap
= ((desc
>> 4) & 3) | ((desc
>> 7) & 4);
2805 case 0: /* Page translation fault. */
2808 case 1: /* 64k page. */
2809 phys_addr
= (desc
& 0xffff0000) | (address
& 0xffff);
2810 xn
= desc
& (1 << 15);
2811 *page_size
= 0x10000;
2813 case 2: case 3: /* 4k page. */
2814 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
2816 *page_size
= 0x1000;
2819 /* Never happens, but compiler isn't smart enough to tell. */
2824 if (domain_prot
== 3) {
2825 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
2827 if (pxn
&& !is_user
) {
2830 if (xn
&& access_type
== 2)
2833 /* The simplified model uses AP[0] as an access control bit. */
2834 if ((env
->cp15
.c1_sys
& (1 << 29)) && (ap
& 1) == 0) {
2835 /* Access flag fault. */
2836 code
= (code
== 15) ? 6 : 3;
2839 *prot
= check_ap(env
, ap
, domain_prot
, access_type
, is_user
);
2841 /* Access permission fault. */
2848 *phys_ptr
= phys_addr
;
2851 return code
| (domain
<< 4);
2854 /* Fault type for long-descriptor MMU fault reporting; this corresponds
2855 * to bits [5..2] in the STATUS field in long-format DFSR/IFSR.
2858 translation_fault
= 1,
2860 permission_fault
= 3,
2863 static int get_phys_addr_lpae(CPUARMState
*env
, uint32_t address
,
2864 int access_type
, int is_user
,
2865 hwaddr
*phys_ptr
, int *prot
,
2866 target_ulong
*page_size_ptr
)
2868 /* Read an LPAE long-descriptor translation table. */
2869 MMUFaultType fault_type
= translation_fault
;
2877 uint32_t tableattrs
;
2878 target_ulong page_size
;
2881 /* Determine whether this address is in the region controlled by
2882 * TTBR0 or TTBR1 (or if it is in neither region and should fault).
2883 * This is a Non-secure PL0/1 stage 1 translation, so controlled by
2884 * TTBCR/TTBR0/TTBR1 in accordance with ARM ARM DDI0406C table B-32:
2886 uint32_t t0sz
= extract32(env
->cp15
.c2_control
, 0, 3);
2887 uint32_t t1sz
= extract32(env
->cp15
.c2_control
, 16, 3);
2888 if (t0sz
&& !extract32(address
, 32 - t0sz
, t0sz
)) {
2889 /* there is a ttbr0 region and we are in it (high bits all zero) */
2891 } else if (t1sz
&& !extract32(~address
, 32 - t1sz
, t1sz
)) {
2892 /* there is a ttbr1 region and we are in it (high bits all one) */
2895 /* ttbr0 region is "everything not in the ttbr1 region" */
2898 /* ttbr1 region is "everything not in the ttbr0 region" */
2901 /* in the gap between the two regions, this is a Translation fault */
2902 fault_type
= translation_fault
;
2906 /* Note that QEMU ignores shareability and cacheability attributes,
2907 * so we don't need to do anything with the SH, ORGN, IRGN fields
2908 * in the TTBCR. Similarly, TTBCR:A1 selects whether we get the
2909 * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently
2910 * implement any ASID-like capability so we can ignore it (instead
2911 * we will always flush the TLB any time the ASID is changed).
2913 if (ttbr_select
== 0) {
2914 ttbr
= ((uint64_t)env
->cp15
.c2_base0_hi
<< 32) | env
->cp15
.c2_base0
;
2915 epd
= extract32(env
->cp15
.c2_control
, 7, 1);
2918 ttbr
= ((uint64_t)env
->cp15
.c2_base1_hi
<< 32) | env
->cp15
.c2_base1
;
2919 epd
= extract32(env
->cp15
.c2_control
, 23, 1);
2924 /* Translation table walk disabled => Translation fault on TLB miss */
2928 /* If the region is small enough we will skip straight to a 2nd level
2929 * lookup. This affects the number of bits of the address used in
2930 * combination with the TTBR to find the first descriptor. ('n' here
2931 * matches the usage in the ARM ARM sB3.6.6, where bits [39..n] are
2932 * from the TTBR, [n-1..3] from the vaddr, and [2..0] always zero).
2941 /* Clear the vaddr bits which aren't part of the within-region address,
2942 * so that we don't have to special case things when calculating the
2943 * first descriptor address.
2945 address
&= (0xffffffffU
>> tsz
);
2947 /* Now we can extract the actual base address from the TTBR */
2948 descaddr
= extract64(ttbr
, 0, 40);
2949 descaddr
&= ~((1ULL << n
) - 1);
2953 uint64_t descriptor
;
2955 descaddr
|= ((address
>> (9 * (4 - level
))) & 0xff8);
2956 descriptor
= ldq_phys(descaddr
);
2957 if (!(descriptor
& 1) ||
2958 (!(descriptor
& 2) && (level
== 3))) {
2959 /* Invalid, or the Reserved level 3 encoding */
2962 descaddr
= descriptor
& 0xfffffff000ULL
;
2964 if ((descriptor
& 2) && (level
< 3)) {
2965 /* Table entry. The top five bits are attributes which may
2966 * propagate down through lower levels of the table (and
2967 * which are all arranged so that 0 means "no effect", so
2968 * we can gather them up by ORing in the bits at each level).
2970 tableattrs
|= extract64(descriptor
, 59, 5);
2974 /* Block entry at level 1 or 2, or page entry at level 3.
2975 * These are basically the same thing, although the number
2976 * of bits we pull in from the vaddr varies.
2978 page_size
= (1 << (39 - (9 * level
)));
2979 descaddr
|= (address
& (page_size
- 1));
2980 /* Extract attributes from the descriptor and merge with table attrs */
2981 attrs
= extract64(descriptor
, 2, 10)
2982 | (extract64(descriptor
, 52, 12) << 10);
2983 attrs
|= extract32(tableattrs
, 0, 2) << 11; /* XN, PXN */
2984 attrs
|= extract32(tableattrs
, 3, 1) << 5; /* APTable[1] => AP[2] */
2985 /* The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1
2986 * means "force PL1 access only", which means forcing AP[1] to 0.
2988 if (extract32(tableattrs
, 2, 1)) {
2991 /* Since we're always in the Non-secure state, NSTable is ignored. */
2994 /* Here descaddr is the final physical address, and attributes
2997 fault_type
= access_fault
;
2998 if ((attrs
& (1 << 8)) == 0) {
3002 fault_type
= permission_fault
;
3003 if (is_user
&& !(attrs
& (1 << 4))) {
3004 /* Unprivileged access not enabled */
3007 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
3008 if (attrs
& (1 << 12) || (!is_user
&& (attrs
& (1 << 11)))) {
3010 if (access_type
== 2) {
3013 *prot
&= ~PAGE_EXEC
;
3015 if (attrs
& (1 << 5)) {
3016 /* Write access forbidden */
3017 if (access_type
== 1) {
3020 *prot
&= ~PAGE_WRITE
;
3023 *phys_ptr
= descaddr
;
3024 *page_size_ptr
= page_size
;
3028 /* Long-descriptor format IFSR/DFSR value */
3029 return (1 << 9) | (fault_type
<< 2) | level
;
3032 static int get_phys_addr_mpu(CPUARMState
*env
, uint32_t address
,
3033 int access_type
, int is_user
,
3034 hwaddr
*phys_ptr
, int *prot
)
3040 *phys_ptr
= address
;
3041 for (n
= 7; n
>= 0; n
--) {
3042 base
= env
->cp15
.c6_region
[n
];
3043 if ((base
& 1) == 0)
3045 mask
= 1 << ((base
>> 1) & 0x1f);
3046 /* Keep this shift separate from the above to avoid an
3047 (undefined) << 32. */
3048 mask
= (mask
<< 1) - 1;
3049 if (((base
^ address
) & ~mask
) == 0)
3055 if (access_type
== 2) {
3056 mask
= env
->cp15
.c5_insn
;
3058 mask
= env
->cp15
.c5_data
;
3060 mask
= (mask
>> (n
* 4)) & 0xf;
3067 *prot
= PAGE_READ
| PAGE_WRITE
;
3072 *prot
|= PAGE_WRITE
;
3075 *prot
= PAGE_READ
| PAGE_WRITE
;
3086 /* Bad permission. */
3093 /* get_phys_addr - get the physical address for this virtual address
3095 * Find the physical address corresponding to the given virtual address,
3096 * by doing a translation table walk on MMU based systems or using the
3097 * MPU state on MPU based systems.
3099 * Returns 0 if the translation was successful. Otherwise, phys_ptr,
3100 * prot and page_size are not filled in, and the return value provides
3101 * information on why the translation aborted, in the format of a
3102 * DFSR/IFSR fault register, with the following caveats:
3103 * * we honour the short vs long DFSR format differences.
3104 * * the WnR bit is never set (the caller must do this).
3105 * * for MPU based systems we don't bother to return a full FSR format
3109 * @address: virtual address to get physical address for
3110 * @access_type: 0 for read, 1 for write, 2 for execute
3111 * @is_user: 0 for privileged access, 1 for user
3112 * @phys_ptr: set to the physical address corresponding to the virtual address
3113 * @prot: set to the permissions for the page containing phys_ptr
3114 * @page_size: set to the size of the page containing phys_ptr
3116 static inline int get_phys_addr(CPUARMState
*env
, uint32_t address
,
3117 int access_type
, int is_user
,
3118 hwaddr
*phys_ptr
, int *prot
,
3119 target_ulong
*page_size
)
3121 /* Fast Context Switch Extension. */
3122 if (address
< 0x02000000)
3123 address
+= env
->cp15
.c13_fcse
;
3125 if ((env
->cp15
.c1_sys
& 1) == 0) {
3126 /* MMU/MPU disabled. */
3127 *phys_ptr
= address
;
3128 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
3129 *page_size
= TARGET_PAGE_SIZE
;
3131 } else if (arm_feature(env
, ARM_FEATURE_MPU
)) {
3132 *page_size
= TARGET_PAGE_SIZE
;
3133 return get_phys_addr_mpu(env
, address
, access_type
, is_user
, phys_ptr
,
3135 } else if (extended_addresses_enabled(env
)) {
3136 return get_phys_addr_lpae(env
, address
, access_type
, is_user
, phys_ptr
,
3138 } else if (env
->cp15
.c1_sys
& (1 << 23)) {
3139 return get_phys_addr_v6(env
, address
, access_type
, is_user
, phys_ptr
,
3142 return get_phys_addr_v5(env
, address
, access_type
, is_user
, phys_ptr
,
3147 int cpu_arm_handle_mmu_fault (CPUARMState
*env
, target_ulong address
,
3148 int access_type
, int mmu_idx
)
3151 target_ulong page_size
;
3155 is_user
= mmu_idx
== MMU_USER_IDX
;
3156 ret
= get_phys_addr(env
, address
, access_type
, is_user
, &phys_addr
, &prot
,
3159 /* Map a single [sub]page. */
3160 phys_addr
&= ~(hwaddr
)0x3ff;
3161 address
&= ~(uint32_t)0x3ff;
3162 tlb_set_page (env
, address
, phys_addr
, prot
, mmu_idx
, page_size
);
3166 if (access_type
== 2) {
3167 env
->cp15
.c5_insn
= ret
;
3168 env
->cp15
.c6_insn
= address
;
3169 env
->exception_index
= EXCP_PREFETCH_ABORT
;
3171 env
->cp15
.c5_data
= ret
;
3172 if (access_type
== 1 && arm_feature(env
, ARM_FEATURE_V6
))
3173 env
->cp15
.c5_data
|= (1 << 11);
3174 env
->cp15
.c6_data
= address
;
3175 env
->exception_index
= EXCP_DATA_ABORT
;
3180 hwaddr
arm_cpu_get_phys_page_debug(CPUState
*cs
, vaddr addr
)
3182 ARMCPU
*cpu
= ARM_CPU(cs
);
3184 target_ulong page_size
;
3188 ret
= get_phys_addr(&cpu
->env
, addr
, 0, 0, &phys_addr
, &prot
, &page_size
);
3197 void HELPER(set_r13_banked
)(CPUARMState
*env
, uint32_t mode
, uint32_t val
)
3199 if ((env
->uncached_cpsr
& CPSR_M
) == mode
) {
3200 env
->regs
[13] = val
;
3202 env
->banked_r13
[bank_number(mode
)] = val
;
3206 uint32_t HELPER(get_r13_banked
)(CPUARMState
*env
, uint32_t mode
)
3208 if ((env
->uncached_cpsr
& CPSR_M
) == mode
) {
3209 return env
->regs
[13];
3211 return env
->banked_r13
[bank_number(mode
)];
3215 uint32_t HELPER(v7m_mrs
)(CPUARMState
*env
, uint32_t reg
)
3219 return xpsr_read(env
) & 0xf8000000;
3221 return xpsr_read(env
) & 0xf80001ff;
3223 return xpsr_read(env
) & 0xff00fc00;
3225 return xpsr_read(env
) & 0xff00fdff;
3227 return xpsr_read(env
) & 0x000001ff;
3229 return xpsr_read(env
) & 0x0700fc00;
3231 return xpsr_read(env
) & 0x0700edff;
3233 return env
->v7m
.current_sp
? env
->v7m
.other_sp
: env
->regs
[13];
3235 return env
->v7m
.current_sp
? env
->regs
[13] : env
->v7m
.other_sp
;
3236 case 16: /* PRIMASK */
3237 return (env
->uncached_cpsr
& CPSR_I
) != 0;
3238 case 17: /* BASEPRI */
3239 case 18: /* BASEPRI_MAX */
3240 return env
->v7m
.basepri
;
3241 case 19: /* FAULTMASK */
3242 return (env
->uncached_cpsr
& CPSR_F
) != 0;
3243 case 20: /* CONTROL */
3244 return env
->v7m
.control
;
3246 /* ??? For debugging only. */
3247 cpu_abort(env
, "Unimplemented system register read (%d)\n", reg
);
3252 void HELPER(v7m_msr
)(CPUARMState
*env
, uint32_t reg
, uint32_t val
)
3256 xpsr_write(env
, val
, 0xf8000000);
3259 xpsr_write(env
, val
, 0xf8000000);
3262 xpsr_write(env
, val
, 0xfe00fc00);
3265 xpsr_write(env
, val
, 0xfe00fc00);
3268 /* IPSR bits are readonly. */
3271 xpsr_write(env
, val
, 0x0600fc00);
3274 xpsr_write(env
, val
, 0x0600fc00);
3277 if (env
->v7m
.current_sp
)
3278 env
->v7m
.other_sp
= val
;
3280 env
->regs
[13] = val
;
3283 if (env
->v7m
.current_sp
)
3284 env
->regs
[13] = val
;
3286 env
->v7m
.other_sp
= val
;
3288 case 16: /* PRIMASK */
3290 env
->uncached_cpsr
|= CPSR_I
;
3292 env
->uncached_cpsr
&= ~CPSR_I
;
3294 case 17: /* BASEPRI */
3295 env
->v7m
.basepri
= val
& 0xff;
3297 case 18: /* BASEPRI_MAX */
3299 if (val
!= 0 && (val
< env
->v7m
.basepri
|| env
->v7m
.basepri
== 0))
3300 env
->v7m
.basepri
= val
;
3302 case 19: /* FAULTMASK */
3304 env
->uncached_cpsr
|= CPSR_F
;
3306 env
->uncached_cpsr
&= ~CPSR_F
;
3308 case 20: /* CONTROL */
3309 env
->v7m
.control
= val
& 3;
3310 switch_v7m_sp(env
, (val
& 2) != 0);
3313 /* ??? For debugging only. */
3314 cpu_abort(env
, "Unimplemented system register write (%d)\n", reg
);
3321 /* Note that signed overflow is undefined in C. The following routines are
3322 careful to use unsigned types where modulo arithmetic is required.
3323 Failure to do so _will_ break on newer gcc. */
3325 /* Signed saturating arithmetic. */
3327 /* Perform 16-bit signed saturating addition. */
3328 static inline uint16_t add16_sat(uint16_t a
, uint16_t b
)
3333 if (((res
^ a
) & 0x8000) && !((a
^ b
) & 0x8000)) {
3342 /* Perform 8-bit signed saturating addition. */
3343 static inline uint8_t add8_sat(uint8_t a
, uint8_t b
)
3348 if (((res
^ a
) & 0x80) && !((a
^ b
) & 0x80)) {
3357 /* Perform 16-bit signed saturating subtraction. */
3358 static inline uint16_t sub16_sat(uint16_t a
, uint16_t b
)
3363 if (((res
^ a
) & 0x8000) && ((a
^ b
) & 0x8000)) {
3372 /* Perform 8-bit signed saturating subtraction. */
3373 static inline uint8_t sub8_sat(uint8_t a
, uint8_t b
)
3378 if (((res
^ a
) & 0x80) && ((a
^ b
) & 0x80)) {
3387 #define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16);
3388 #define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16);
3389 #define ADD8(a, b, n) RESULT(add8_sat(a, b), n, 8);
3390 #define SUB8(a, b, n) RESULT(sub8_sat(a, b), n, 8);
3393 #include "op_addsub.h"
3395 /* Unsigned saturating arithmetic. */
3396 static inline uint16_t add16_usat(uint16_t a
, uint16_t b
)
3405 static inline uint16_t sub16_usat(uint16_t a
, uint16_t b
)
3413 static inline uint8_t add8_usat(uint8_t a
, uint8_t b
)
3422 static inline uint8_t sub8_usat(uint8_t a
, uint8_t b
)
3430 #define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16);
3431 #define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16);
3432 #define ADD8(a, b, n) RESULT(add8_usat(a, b), n, 8);
3433 #define SUB8(a, b, n) RESULT(sub8_usat(a, b), n, 8);
3436 #include "op_addsub.h"
3438 /* Signed modulo arithmetic. */
3439 #define SARITH16(a, b, n, op) do { \
3441 sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \
3442 RESULT(sum, n, 16); \
3444 ge |= 3 << (n * 2); \
3447 #define SARITH8(a, b, n, op) do { \
3449 sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \
3450 RESULT(sum, n, 8); \
3456 #define ADD16(a, b, n) SARITH16(a, b, n, +)
3457 #define SUB16(a, b, n) SARITH16(a, b, n, -)
3458 #define ADD8(a, b, n) SARITH8(a, b, n, +)
3459 #define SUB8(a, b, n) SARITH8(a, b, n, -)
3463 #include "op_addsub.h"
3465 /* Unsigned modulo arithmetic. */
3466 #define ADD16(a, b, n) do { \
3468 sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \
3469 RESULT(sum, n, 16); \
3470 if ((sum >> 16) == 1) \
3471 ge |= 3 << (n * 2); \
3474 #define ADD8(a, b, n) do { \
3476 sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \
3477 RESULT(sum, n, 8); \
3478 if ((sum >> 8) == 1) \
3482 #define SUB16(a, b, n) do { \
3484 sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \
3485 RESULT(sum, n, 16); \
3486 if ((sum >> 16) == 0) \
3487 ge |= 3 << (n * 2); \
3490 #define SUB8(a, b, n) do { \
3492 sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \
3493 RESULT(sum, n, 8); \
3494 if ((sum >> 8) == 0) \
3501 #include "op_addsub.h"
3503 /* Halved signed arithmetic. */
3504 #define ADD16(a, b, n) \
3505 RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16)
3506 #define SUB16(a, b, n) \
3507 RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16)
3508 #define ADD8(a, b, n) \
3509 RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8)
3510 #define SUB8(a, b, n) \
3511 RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8)
3514 #include "op_addsub.h"
3516 /* Halved unsigned arithmetic. */
3517 #define ADD16(a, b, n) \
3518 RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16)
3519 #define SUB16(a, b, n) \
3520 RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16)
3521 #define ADD8(a, b, n) \
3522 RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8)
3523 #define SUB8(a, b, n) \
3524 RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8)
3527 #include "op_addsub.h"
3529 static inline uint8_t do_usad(uint8_t a
, uint8_t b
)
3537 /* Unsigned sum of absolute byte differences. */
3538 uint32_t HELPER(usad8
)(uint32_t a
, uint32_t b
)
3541 sum
= do_usad(a
, b
);
3542 sum
+= do_usad(a
>> 8, b
>> 8);
3543 sum
+= do_usad(a
>> 16, b
>>16);
3544 sum
+= do_usad(a
>> 24, b
>> 24);
3548 /* For ARMv6 SEL instruction. */
3549 uint32_t HELPER(sel_flags
)(uint32_t flags
, uint32_t a
, uint32_t b
)
3562 return (a
& mask
) | (b
& ~mask
);
3565 /* VFP support. We follow the convention used for VFP instructions:
3566 Single precision routines have a "s" suffix, double precision a
3569 /* Convert host exception flags to vfp form. */
3570 static inline int vfp_exceptbits_from_host(int host_bits
)
3572 int target_bits
= 0;
3574 if (host_bits
& float_flag_invalid
)
3576 if (host_bits
& float_flag_divbyzero
)
3578 if (host_bits
& float_flag_overflow
)
3580 if (host_bits
& (float_flag_underflow
| float_flag_output_denormal
))
3582 if (host_bits
& float_flag_inexact
)
3583 target_bits
|= 0x10;
3584 if (host_bits
& float_flag_input_denormal
)
3585 target_bits
|= 0x80;
3589 uint32_t HELPER(vfp_get_fpscr
)(CPUARMState
*env
)
3594 fpscr
= (env
->vfp
.xregs
[ARM_VFP_FPSCR
] & 0xffc8ffff)
3595 | (env
->vfp
.vec_len
<< 16)
3596 | (env
->vfp
.vec_stride
<< 20);
3597 i
= get_float_exception_flags(&env
->vfp
.fp_status
);
3598 i
|= get_float_exception_flags(&env
->vfp
.standard_fp_status
);
3599 fpscr
|= vfp_exceptbits_from_host(i
);
3603 uint32_t vfp_get_fpscr(CPUARMState
*env
)
3605 return HELPER(vfp_get_fpscr
)(env
);
3608 /* Convert vfp exception flags to target form. */
3609 static inline int vfp_exceptbits_to_host(int target_bits
)
3613 if (target_bits
& 1)
3614 host_bits
|= float_flag_invalid
;
3615 if (target_bits
& 2)
3616 host_bits
|= float_flag_divbyzero
;
3617 if (target_bits
& 4)
3618 host_bits
|= float_flag_overflow
;
3619 if (target_bits
& 8)
3620 host_bits
|= float_flag_underflow
;
3621 if (target_bits
& 0x10)
3622 host_bits
|= float_flag_inexact
;
3623 if (target_bits
& 0x80)
3624 host_bits
|= float_flag_input_denormal
;
3628 void HELPER(vfp_set_fpscr
)(CPUARMState
*env
, uint32_t val
)
3633 changed
= env
->vfp
.xregs
[ARM_VFP_FPSCR
];
3634 env
->vfp
.xregs
[ARM_VFP_FPSCR
] = (val
& 0xffc8ffff);
3635 env
->vfp
.vec_len
= (val
>> 16) & 7;
3636 env
->vfp
.vec_stride
= (val
>> 20) & 3;
3639 if (changed
& (3 << 22)) {
3640 i
= (val
>> 22) & 3;
3643 i
= float_round_nearest_even
;
3649 i
= float_round_down
;
3652 i
= float_round_to_zero
;
3655 set_float_rounding_mode(i
, &env
->vfp
.fp_status
);
3657 if (changed
& (1 << 24)) {
3658 set_flush_to_zero((val
& (1 << 24)) != 0, &env
->vfp
.fp_status
);
3659 set_flush_inputs_to_zero((val
& (1 << 24)) != 0, &env
->vfp
.fp_status
);
3661 if (changed
& (1 << 25))
3662 set_default_nan_mode((val
& (1 << 25)) != 0, &env
->vfp
.fp_status
);
3664 i
= vfp_exceptbits_to_host(val
);
3665 set_float_exception_flags(i
, &env
->vfp
.fp_status
);
3666 set_float_exception_flags(0, &env
->vfp
.standard_fp_status
);
3669 void vfp_set_fpscr(CPUARMState
*env
, uint32_t val
)
3671 HELPER(vfp_set_fpscr
)(env
, val
);
3674 #define VFP_HELPER(name, p) HELPER(glue(glue(vfp_,name),p))
3676 #define VFP_BINOP(name) \
3677 float32 VFP_HELPER(name, s)(float32 a, float32 b, void *fpstp) \
3679 float_status *fpst = fpstp; \
3680 return float32_ ## name(a, b, fpst); \
3682 float64 VFP_HELPER(name, d)(float64 a, float64 b, void *fpstp) \
3684 float_status *fpst = fpstp; \
3685 return float64_ ## name(a, b, fpst); \
3693 float32
VFP_HELPER(neg
, s
)(float32 a
)
3695 return float32_chs(a
);
3698 float64
VFP_HELPER(neg
, d
)(float64 a
)
3700 return float64_chs(a
);
3703 float32
VFP_HELPER(abs
, s
)(float32 a
)
3705 return float32_abs(a
);
3708 float64
VFP_HELPER(abs
, d
)(float64 a
)
3710 return float64_abs(a
);
3713 float32
VFP_HELPER(sqrt
, s
)(float32 a
, CPUARMState
*env
)
3715 return float32_sqrt(a
, &env
->vfp
.fp_status
);
3718 float64
VFP_HELPER(sqrt
, d
)(float64 a
, CPUARMState
*env
)
3720 return float64_sqrt(a
, &env
->vfp
.fp_status
);
3723 /* XXX: check quiet/signaling case */
3724 #define DO_VFP_cmp(p, type) \
3725 void VFP_HELPER(cmp, p)(type a, type b, CPUARMState *env) \
3728 switch(type ## _compare_quiet(a, b, &env->vfp.fp_status)) { \
3729 case 0: flags = 0x6; break; \
3730 case -1: flags = 0x8; break; \
3731 case 1: flags = 0x2; break; \
3732 default: case 2: flags = 0x3; break; \
3734 env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
3735 | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
3737 void VFP_HELPER(cmpe, p)(type a, type b, CPUARMState *env) \
3740 switch(type ## _compare(a, b, &env->vfp.fp_status)) { \
3741 case 0: flags = 0x6; break; \
3742 case -1: flags = 0x8; break; \
3743 case 1: flags = 0x2; break; \
3744 default: case 2: flags = 0x3; break; \
3746 env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
3747 | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
3749 DO_VFP_cmp(s
, float32
)
3750 DO_VFP_cmp(d
, float64
)
3753 /* Integer to float and float to integer conversions */
3755 #define CONV_ITOF(name, fsz, sign) \
3756 float##fsz HELPER(name)(uint32_t x, void *fpstp) \
3758 float_status *fpst = fpstp; \
3759 return sign##int32_to_##float##fsz((sign##int32_t)x, fpst); \
3762 #define CONV_FTOI(name, fsz, sign, round) \
3763 uint32_t HELPER(name)(float##fsz x, void *fpstp) \
3765 float_status *fpst = fpstp; \
3766 if (float##fsz##_is_any_nan(x)) { \
3767 float_raise(float_flag_invalid, fpst); \
3770 return float##fsz##_to_##sign##int32##round(x, fpst); \
3773 #define FLOAT_CONVS(name, p, fsz, sign) \
3774 CONV_ITOF(vfp_##name##to##p, fsz, sign) \
3775 CONV_FTOI(vfp_to##name##p, fsz, sign, ) \
3776 CONV_FTOI(vfp_to##name##z##p, fsz, sign, _round_to_zero)
3778 FLOAT_CONVS(si
, s
, 32, )
3779 FLOAT_CONVS(si
, d
, 64, )
3780 FLOAT_CONVS(ui
, s
, 32, u
)
3781 FLOAT_CONVS(ui
, d
, 64, u
)
3787 /* floating point conversion */
3788 float64
VFP_HELPER(fcvtd
, s
)(float32 x
, CPUARMState
*env
)
3790 float64 r
= float32_to_float64(x
, &env
->vfp
.fp_status
);
3791 /* ARM requires that S<->D conversion of any kind of NaN generates
3792 * a quiet NaN by forcing the most significant frac bit to 1.
3794 return float64_maybe_silence_nan(r
);
3797 float32
VFP_HELPER(fcvts
, d
)(float64 x
, CPUARMState
*env
)
3799 float32 r
= float64_to_float32(x
, &env
->vfp
.fp_status
);
3800 /* ARM requires that S<->D conversion of any kind of NaN generates
3801 * a quiet NaN by forcing the most significant frac bit to 1.
3803 return float32_maybe_silence_nan(r
);
3806 /* VFP3 fixed point conversion. */
3807 #define VFP_CONV_FIX(name, p, fsz, itype, sign) \
3808 float##fsz HELPER(vfp_##name##to##p)(uint##fsz##_t x, uint32_t shift, \
3811 float_status *fpst = fpstp; \
3813 tmp = sign##int32_to_##float##fsz((itype##_t)x, fpst); \
3814 return float##fsz##_scalbn(tmp, -(int)shift, fpst); \
3816 uint##fsz##_t HELPER(vfp_to##name##p)(float##fsz x, uint32_t shift, \
3819 float_status *fpst = fpstp; \
3821 if (float##fsz##_is_any_nan(x)) { \
3822 float_raise(float_flag_invalid, fpst); \
3825 tmp = float##fsz##_scalbn(x, shift, fpst); \
3826 return float##fsz##_to_##itype##_round_to_zero(tmp, fpst); \
3829 VFP_CONV_FIX(sh
, d
, 64, int16
, )
3830 VFP_CONV_FIX(sl
, d
, 64, int32
, )
3831 VFP_CONV_FIX(uh
, d
, 64, uint16
, u
)
3832 VFP_CONV_FIX(ul
, d
, 64, uint32
, u
)
3833 VFP_CONV_FIX(sh
, s
, 32, int16
, )
3834 VFP_CONV_FIX(sl
, s
, 32, int32
, )
3835 VFP_CONV_FIX(uh
, s
, 32, uint16
, u
)
3836 VFP_CONV_FIX(ul
, s
, 32, uint32
, u
)
3839 /* Half precision conversions. */
3840 static float32
do_fcvt_f16_to_f32(uint32_t a
, CPUARMState
*env
, float_status
*s
)
3842 int ieee
= (env
->vfp
.xregs
[ARM_VFP_FPSCR
] & (1 << 26)) == 0;
3843 float32 r
= float16_to_float32(make_float16(a
), ieee
, s
);
3845 return float32_maybe_silence_nan(r
);
3850 static uint32_t do_fcvt_f32_to_f16(float32 a
, CPUARMState
*env
, float_status
*s
)
3852 int ieee
= (env
->vfp
.xregs
[ARM_VFP_FPSCR
] & (1 << 26)) == 0;
3853 float16 r
= float32_to_float16(a
, ieee
, s
);
3855 r
= float16_maybe_silence_nan(r
);
3857 return float16_val(r
);
3860 float32
HELPER(neon_fcvt_f16_to_f32
)(uint32_t a
, CPUARMState
*env
)
3862 return do_fcvt_f16_to_f32(a
, env
, &env
->vfp
.standard_fp_status
);
3865 uint32_t HELPER(neon_fcvt_f32_to_f16
)(float32 a
, CPUARMState
*env
)
3867 return do_fcvt_f32_to_f16(a
, env
, &env
->vfp
.standard_fp_status
);
3870 float32
HELPER(vfp_fcvt_f16_to_f32
)(uint32_t a
, CPUARMState
*env
)
3872 return do_fcvt_f16_to_f32(a
, env
, &env
->vfp
.fp_status
);
3875 uint32_t HELPER(vfp_fcvt_f32_to_f16
)(float32 a
, CPUARMState
*env
)
3877 return do_fcvt_f32_to_f16(a
, env
, &env
->vfp
.fp_status
);
3880 #define float32_two make_float32(0x40000000)
3881 #define float32_three make_float32(0x40400000)
3882 #define float32_one_point_five make_float32(0x3fc00000)
3884 float32
HELPER(recps_f32
)(float32 a
, float32 b
, CPUARMState
*env
)
3886 float_status
*s
= &env
->vfp
.standard_fp_status
;
3887 if ((float32_is_infinity(a
) && float32_is_zero_or_denormal(b
)) ||
3888 (float32_is_infinity(b
) && float32_is_zero_or_denormal(a
))) {
3889 if (!(float32_is_zero(a
) || float32_is_zero(b
))) {
3890 float_raise(float_flag_input_denormal
, s
);
3894 return float32_sub(float32_two
, float32_mul(a
, b
, s
), s
);
3897 float32
HELPER(rsqrts_f32
)(float32 a
, float32 b
, CPUARMState
*env
)
3899 float_status
*s
= &env
->vfp
.standard_fp_status
;
3901 if ((float32_is_infinity(a
) && float32_is_zero_or_denormal(b
)) ||
3902 (float32_is_infinity(b
) && float32_is_zero_or_denormal(a
))) {
3903 if (!(float32_is_zero(a
) || float32_is_zero(b
))) {
3904 float_raise(float_flag_input_denormal
, s
);
3906 return float32_one_point_five
;
3908 product
= float32_mul(a
, b
, s
);
3909 return float32_div(float32_sub(float32_three
, product
, s
), float32_two
, s
);
3914 /* Constants 256 and 512 are used in some helpers; we avoid relying on
3915 * int->float conversions at run-time. */
3916 #define float64_256 make_float64(0x4070000000000000LL)
3917 #define float64_512 make_float64(0x4080000000000000LL)
3919 /* The algorithm that must be used to calculate the estimate
3920 * is specified by the ARM ARM.
3922 static float64
recip_estimate(float64 a
, CPUARMState
*env
)
3924 /* These calculations mustn't set any fp exception flags,
3925 * so we use a local copy of the fp_status.
3927 float_status dummy_status
= env
->vfp
.standard_fp_status
;
3928 float_status
*s
= &dummy_status
;
3929 /* q = (int)(a * 512.0) */
3930 float64 q
= float64_mul(float64_512
, a
, s
);
3931 int64_t q_int
= float64_to_int64_round_to_zero(q
, s
);
3933 /* r = 1.0 / (((double)q + 0.5) / 512.0) */
3934 q
= int64_to_float64(q_int
, s
);
3935 q
= float64_add(q
, float64_half
, s
);
3936 q
= float64_div(q
, float64_512
, s
);
3937 q
= float64_div(float64_one
, q
, s
);
3939 /* s = (int)(256.0 * r + 0.5) */
3940 q
= float64_mul(q
, float64_256
, s
);
3941 q
= float64_add(q
, float64_half
, s
);
3942 q_int
= float64_to_int64_round_to_zero(q
, s
);
3944 /* return (double)s / 256.0 */
3945 return float64_div(int64_to_float64(q_int
, s
), float64_256
, s
);
3948 float32
HELPER(recpe_f32
)(float32 a
, CPUARMState
*env
)
3950 float_status
*s
= &env
->vfp
.standard_fp_status
;
3952 uint32_t val32
= float32_val(a
);
3955 int a_exp
= (val32
& 0x7f800000) >> 23;
3956 int sign
= val32
& 0x80000000;
3958 if (float32_is_any_nan(a
)) {
3959 if (float32_is_signaling_nan(a
)) {
3960 float_raise(float_flag_invalid
, s
);
3962 return float32_default_nan
;
3963 } else if (float32_is_infinity(a
)) {
3964 return float32_set_sign(float32_zero
, float32_is_neg(a
));
3965 } else if (float32_is_zero_or_denormal(a
)) {
3966 if (!float32_is_zero(a
)) {
3967 float_raise(float_flag_input_denormal
, s
);
3969 float_raise(float_flag_divbyzero
, s
);
3970 return float32_set_sign(float32_infinity
, float32_is_neg(a
));
3971 } else if (a_exp
>= 253) {
3972 float_raise(float_flag_underflow
, s
);
3973 return float32_set_sign(float32_zero
, float32_is_neg(a
));
3976 f64
= make_float64((0x3feULL
<< 52)
3977 | ((int64_t)(val32
& 0x7fffff) << 29));
3979 result_exp
= 253 - a_exp
;
3981 f64
= recip_estimate(f64
, env
);
3984 | ((result_exp
& 0xff) << 23)
3985 | ((float64_val(f64
) >> 29) & 0x7fffff);
3986 return make_float32(val32
);
3989 /* The algorithm that must be used to calculate the estimate
3990 * is specified by the ARM ARM.
3992 static float64
recip_sqrt_estimate(float64 a
, CPUARMState
*env
)
3994 /* These calculations mustn't set any fp exception flags,
3995 * so we use a local copy of the fp_status.
3997 float_status dummy_status
= env
->vfp
.standard_fp_status
;
3998 float_status
*s
= &dummy_status
;
4002 if (float64_lt(a
, float64_half
, s
)) {
4003 /* range 0.25 <= a < 0.5 */
4005 /* a in units of 1/512 rounded down */
4006 /* q0 = (int)(a * 512.0); */
4007 q
= float64_mul(float64_512
, a
, s
);
4008 q_int
= float64_to_int64_round_to_zero(q
, s
);
4010 /* reciprocal root r */
4011 /* r = 1.0 / sqrt(((double)q0 + 0.5) / 512.0); */
4012 q
= int64_to_float64(q_int
, s
);
4013 q
= float64_add(q
, float64_half
, s
);
4014 q
= float64_div(q
, float64_512
, s
);
4015 q
= float64_sqrt(q
, s
);
4016 q
= float64_div(float64_one
, q
, s
);
4018 /* range 0.5 <= a < 1.0 */
4020 /* a in units of 1/256 rounded down */
4021 /* q1 = (int)(a * 256.0); */
4022 q
= float64_mul(float64_256
, a
, s
);
4023 int64_t q_int
= float64_to_int64_round_to_zero(q
, s
);
4025 /* reciprocal root r */
4026 /* r = 1.0 /sqrt(((double)q1 + 0.5) / 256); */
4027 q
= int64_to_float64(q_int
, s
);
4028 q
= float64_add(q
, float64_half
, s
);
4029 q
= float64_div(q
, float64_256
, s
);
4030 q
= float64_sqrt(q
, s
);
4031 q
= float64_div(float64_one
, q
, s
);
4033 /* r in units of 1/256 rounded to nearest */
4034 /* s = (int)(256.0 * r + 0.5); */
4036 q
= float64_mul(q
, float64_256
,s
);
4037 q
= float64_add(q
, float64_half
, s
);
4038 q_int
= float64_to_int64_round_to_zero(q
, s
);
4040 /* return (double)s / 256.0;*/
4041 return float64_div(int64_to_float64(q_int
, s
), float64_256
, s
);
4044 float32
HELPER(rsqrte_f32
)(float32 a
, CPUARMState
*env
)
4046 float_status
*s
= &env
->vfp
.standard_fp_status
;
4052 val
= float32_val(a
);
4054 if (float32_is_any_nan(a
)) {
4055 if (float32_is_signaling_nan(a
)) {
4056 float_raise(float_flag_invalid
, s
);
4058 return float32_default_nan
;
4059 } else if (float32_is_zero_or_denormal(a
)) {
4060 if (!float32_is_zero(a
)) {
4061 float_raise(float_flag_input_denormal
, s
);
4063 float_raise(float_flag_divbyzero
, s
);
4064 return float32_set_sign(float32_infinity
, float32_is_neg(a
));
4065 } else if (float32_is_neg(a
)) {
4066 float_raise(float_flag_invalid
, s
);
4067 return float32_default_nan
;
4068 } else if (float32_is_infinity(a
)) {
4069 return float32_zero
;
4072 /* Normalize to a double-precision value between 0.25 and 1.0,
4073 * preserving the parity of the exponent. */
4074 if ((val
& 0x800000) == 0) {
4075 f64
= make_float64(((uint64_t)(val
& 0x80000000) << 32)
4077 | ((uint64_t)(val
& 0x7fffff) << 29));
4079 f64
= make_float64(((uint64_t)(val
& 0x80000000) << 32)
4081 | ((uint64_t)(val
& 0x7fffff) << 29));
4084 result_exp
= (380 - ((val
& 0x7f800000) >> 23)) / 2;
4086 f64
= recip_sqrt_estimate(f64
, env
);
4088 val64
= float64_val(f64
);
4090 val
= ((result_exp
& 0xff) << 23)
4091 | ((val64
>> 29) & 0x7fffff);
4092 return make_float32(val
);
4095 uint32_t HELPER(recpe_u32
)(uint32_t a
, CPUARMState
*env
)
4099 if ((a
& 0x80000000) == 0) {
4103 f64
= make_float64((0x3feULL
<< 52)
4104 | ((int64_t)(a
& 0x7fffffff) << 21));
4106 f64
= recip_estimate (f64
, env
);
4108 return 0x80000000 | ((float64_val(f64
) >> 21) & 0x7fffffff);
4111 uint32_t HELPER(rsqrte_u32
)(uint32_t a
, CPUARMState
*env
)
4115 if ((a
& 0xc0000000) == 0) {
4119 if (a
& 0x80000000) {
4120 f64
= make_float64((0x3feULL
<< 52)
4121 | ((uint64_t)(a
& 0x7fffffff) << 21));
4122 } else { /* bits 31-30 == '01' */
4123 f64
= make_float64((0x3fdULL
<< 52)
4124 | ((uint64_t)(a
& 0x3fffffff) << 22));
4127 f64
= recip_sqrt_estimate(f64
, env
);
4129 return 0x80000000 | ((float64_val(f64
) >> 21) & 0x7fffffff);
4132 /* VFPv4 fused multiply-accumulate */
4133 float32
VFP_HELPER(muladd
, s
)(float32 a
, float32 b
, float32 c
, void *fpstp
)
4135 float_status
*fpst
= fpstp
;
4136 return float32_muladd(a
, b
, c
, 0, fpst
);
4139 float64
VFP_HELPER(muladd
, d
)(float64 a
, float64 b
, float64 c
, void *fpstp
)
4141 float_status
*fpst
= fpstp
;
4142 return float64_muladd(a
, b
, c
, 0, fpst
);
4145 /* ARMv8 VMAXNM/VMINNM */
4146 float32
VFP_HELPER(maxnm
, s
)(float32 a
, float32 b
, void *fpstp
)
4148 float_status
*fpst
= fpstp
;
4149 return float32_maxnum(a
, b
, fpst
);
4152 float64
VFP_HELPER(maxnm
, d
)(float64 a
, float64 b
, void *fpstp
)
4154 float_status
*fpst
= fpstp
;
4155 return float64_maxnum(a
, b
, fpst
);
4158 float32
VFP_HELPER(minnm
, s
)(float32 a
, float32 b
, void *fpstp
)
4160 float_status
*fpst
= fpstp
;
4161 return float32_minnum(a
, b
, fpst
);
4164 float64
VFP_HELPER(minnm
, d
)(float64 a
, float64 b
, void *fpstp
)
4166 float_status
*fpst
= fpstp
;
4167 return float64_minnum(a
, b
, fpst
);