2 #include "exec/gdbstub.h"
4 #include "qemu/host-utils.h"
5 #include "sysemu/arch_init.h"
6 #include "sysemu/sysemu.h"
7 #include "qemu/bitops.h"
8 #include "qemu/crc32c.h"
9 #include <zlib.h> /* For crc32 */
11 #ifndef CONFIG_USER_ONLY
12 static inline int get_phys_addr(CPUARMState
*env
, uint32_t address
,
13 int access_type
, int is_user
,
14 hwaddr
*phys_ptr
, int *prot
,
15 target_ulong
*page_size
);
18 static int vfp_gdb_get_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
22 /* VFP data registers are always little-endian. */
23 nregs
= arm_feature(env
, ARM_FEATURE_VFP3
) ? 32 : 16;
25 stfq_le_p(buf
, env
->vfp
.regs
[reg
]);
28 if (arm_feature(env
, ARM_FEATURE_NEON
)) {
29 /* Aliases for Q regs. */
32 stfq_le_p(buf
, env
->vfp
.regs
[(reg
- 32) * 2]);
33 stfq_le_p(buf
+ 8, env
->vfp
.regs
[(reg
- 32) * 2 + 1]);
37 switch (reg
- nregs
) {
38 case 0: stl_p(buf
, env
->vfp
.xregs
[ARM_VFP_FPSID
]); return 4;
39 case 1: stl_p(buf
, env
->vfp
.xregs
[ARM_VFP_FPSCR
]); return 4;
40 case 2: stl_p(buf
, env
->vfp
.xregs
[ARM_VFP_FPEXC
]); return 4;
45 static int vfp_gdb_set_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
49 nregs
= arm_feature(env
, ARM_FEATURE_VFP3
) ? 32 : 16;
51 env
->vfp
.regs
[reg
] = ldfq_le_p(buf
);
54 if (arm_feature(env
, ARM_FEATURE_NEON
)) {
57 env
->vfp
.regs
[(reg
- 32) * 2] = ldfq_le_p(buf
);
58 env
->vfp
.regs
[(reg
- 32) * 2 + 1] = ldfq_le_p(buf
+ 8);
62 switch (reg
- nregs
) {
63 case 0: env
->vfp
.xregs
[ARM_VFP_FPSID
] = ldl_p(buf
); return 4;
64 case 1: env
->vfp
.xregs
[ARM_VFP_FPSCR
] = ldl_p(buf
); return 4;
65 case 2: env
->vfp
.xregs
[ARM_VFP_FPEXC
] = ldl_p(buf
) & (1 << 30); return 4;
70 static int aarch64_fpu_gdb_get_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
74 /* 128 bit FP register */
75 stfq_le_p(buf
, env
->vfp
.regs
[reg
* 2]);
76 stfq_le_p(buf
+ 8, env
->vfp
.regs
[reg
* 2 + 1]);
80 stl_p(buf
, vfp_get_fpsr(env
));
84 stl_p(buf
, vfp_get_fpcr(env
));
91 static int aarch64_fpu_gdb_set_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
95 /* 128 bit FP register */
96 env
->vfp
.regs
[reg
* 2] = ldfq_le_p(buf
);
97 env
->vfp
.regs
[reg
* 2 + 1] = ldfq_le_p(buf
+ 8);
101 vfp_set_fpsr(env
, ldl_p(buf
));
105 vfp_set_fpcr(env
, ldl_p(buf
));
112 static uint64_t raw_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
114 if (cpreg_field_is_64bit(ri
)) {
115 return CPREG_FIELD64(env
, ri
);
117 return CPREG_FIELD32(env
, ri
);
121 static void raw_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
124 if (cpreg_field_is_64bit(ri
)) {
125 CPREG_FIELD64(env
, ri
) = value
;
127 CPREG_FIELD32(env
, ri
) = value
;
131 static uint64_t read_raw_cp_reg(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
133 /* Raw read of a coprocessor register (as needed for migration, etc). */
134 if (ri
->type
& ARM_CP_CONST
) {
135 return ri
->resetvalue
;
136 } else if (ri
->raw_readfn
) {
137 return ri
->raw_readfn(env
, ri
);
138 } else if (ri
->readfn
) {
139 return ri
->readfn(env
, ri
);
141 return raw_read(env
, ri
);
145 static void write_raw_cp_reg(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
148 /* Raw write of a coprocessor register (as needed for migration, etc).
149 * Note that constant registers are treated as write-ignored; the
150 * caller should check for success by whether a readback gives the
153 if (ri
->type
& ARM_CP_CONST
) {
155 } else if (ri
->raw_writefn
) {
156 ri
->raw_writefn(env
, ri
, v
);
157 } else if (ri
->writefn
) {
158 ri
->writefn(env
, ri
, v
);
160 raw_write(env
, ri
, v
);
164 bool write_cpustate_to_list(ARMCPU
*cpu
)
166 /* Write the coprocessor state from cpu->env to the (index,value) list. */
170 for (i
= 0; i
< cpu
->cpreg_array_len
; i
++) {
171 uint32_t regidx
= kvm_to_cpreg_id(cpu
->cpreg_indexes
[i
]);
172 const ARMCPRegInfo
*ri
;
174 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
179 if (ri
->type
& ARM_CP_NO_MIGRATE
) {
182 cpu
->cpreg_values
[i
] = read_raw_cp_reg(&cpu
->env
, ri
);
187 bool write_list_to_cpustate(ARMCPU
*cpu
)
192 for (i
= 0; i
< cpu
->cpreg_array_len
; i
++) {
193 uint32_t regidx
= kvm_to_cpreg_id(cpu
->cpreg_indexes
[i
]);
194 uint64_t v
= cpu
->cpreg_values
[i
];
195 const ARMCPRegInfo
*ri
;
197 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
202 if (ri
->type
& ARM_CP_NO_MIGRATE
) {
205 /* Write value and confirm it reads back as written
206 * (to catch read-only registers and partially read-only
207 * registers where the incoming migration value doesn't match)
209 write_raw_cp_reg(&cpu
->env
, ri
, v
);
210 if (read_raw_cp_reg(&cpu
->env
, ri
) != v
) {
217 static void add_cpreg_to_list(gpointer key
, gpointer opaque
)
219 ARMCPU
*cpu
= opaque
;
221 const ARMCPRegInfo
*ri
;
223 regidx
= *(uint32_t *)key
;
224 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
226 if (!(ri
->type
& ARM_CP_NO_MIGRATE
)) {
227 cpu
->cpreg_indexes
[cpu
->cpreg_array_len
] = cpreg_to_kvm_id(regidx
);
228 /* The value array need not be initialized at this point */
229 cpu
->cpreg_array_len
++;
233 static void count_cpreg(gpointer key
, gpointer opaque
)
235 ARMCPU
*cpu
= opaque
;
237 const ARMCPRegInfo
*ri
;
239 regidx
= *(uint32_t *)key
;
240 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
242 if (!(ri
->type
& ARM_CP_NO_MIGRATE
)) {
243 cpu
->cpreg_array_len
++;
247 static gint
cpreg_key_compare(gconstpointer a
, gconstpointer b
)
249 uint64_t aidx
= cpreg_to_kvm_id(*(uint32_t *)a
);
250 uint64_t bidx
= cpreg_to_kvm_id(*(uint32_t *)b
);
261 static void cpreg_make_keylist(gpointer key
, gpointer value
, gpointer udata
)
263 GList
**plist
= udata
;
265 *plist
= g_list_prepend(*plist
, key
);
268 void init_cpreg_list(ARMCPU
*cpu
)
270 /* Initialise the cpreg_tuples[] array based on the cp_regs hash.
271 * Note that we require cpreg_tuples[] to be sorted by key ID.
276 g_hash_table_foreach(cpu
->cp_regs
, cpreg_make_keylist
, &keys
);
278 keys
= g_list_sort(keys
, cpreg_key_compare
);
280 cpu
->cpreg_array_len
= 0;
282 g_list_foreach(keys
, count_cpreg
, cpu
);
284 arraylen
= cpu
->cpreg_array_len
;
285 cpu
->cpreg_indexes
= g_new(uint64_t, arraylen
);
286 cpu
->cpreg_values
= g_new(uint64_t, arraylen
);
287 cpu
->cpreg_vmstate_indexes
= g_new(uint64_t, arraylen
);
288 cpu
->cpreg_vmstate_values
= g_new(uint64_t, arraylen
);
289 cpu
->cpreg_vmstate_array_len
= cpu
->cpreg_array_len
;
290 cpu
->cpreg_array_len
= 0;
292 g_list_foreach(keys
, add_cpreg_to_list
, cpu
);
294 assert(cpu
->cpreg_array_len
== arraylen
);
299 static void dacr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
301 env
->cp15
.c3
= value
;
302 tlb_flush(env
, 1); /* Flush TLB as domain not tracked in TLB */
305 static void fcse_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
307 if (env
->cp15
.c13_fcse
!= value
) {
308 /* Unlike real hardware the qemu TLB uses virtual addresses,
309 * not modified virtual addresses, so this causes a TLB flush.
312 env
->cp15
.c13_fcse
= value
;
316 static void contextidr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
319 if (env
->cp15
.c13_context
!= value
&& !arm_feature(env
, ARM_FEATURE_MPU
)) {
320 /* For VMSA (when not using the LPAE long descriptor page table
321 * format) this register includes the ASID, so do a TLB flush.
322 * For PMSA it is purely a process ID and no action is needed.
326 env
->cp15
.c13_context
= value
;
329 static void tlbiall_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
332 /* Invalidate all (TLBIALL) */
336 static void tlbimva_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
339 /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
340 tlb_flush_page(env
, value
& TARGET_PAGE_MASK
);
343 static void tlbiasid_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
346 /* Invalidate by ASID (TLBIASID) */
347 tlb_flush(env
, value
== 0);
350 static void tlbimvaa_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
353 /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
354 tlb_flush_page(env
, value
& TARGET_PAGE_MASK
);
357 static const ARMCPRegInfo cp_reginfo
[] = {
358 /* DBGDIDR: just RAZ. In particular this means the "debug architecture
359 * version" bits will read as a reserved value, which should cause
360 * Linux to not try to use the debug hardware.
362 { .name
= "DBGDIDR", .cp
= 14, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 0,
363 .access
= PL0_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
364 /* MMU Domain access control / MPU write buffer control */
365 { .name
= "DACR", .cp
= 15,
366 .crn
= 3, .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
,
367 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c3
),
368 .resetvalue
= 0, .writefn
= dacr_write
, .raw_writefn
= raw_write
, },
369 { .name
= "FCSEIDR", .cp
= 15, .crn
= 13, .crm
= 0, .opc1
= 0, .opc2
= 0,
370 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c13_fcse
),
371 .resetvalue
= 0, .writefn
= fcse_write
, .raw_writefn
= raw_write
, },
372 { .name
= "CONTEXTIDR", .cp
= 15, .crn
= 13, .crm
= 0, .opc1
= 0, .opc2
= 1,
373 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c13_context
),
374 .resetvalue
= 0, .writefn
= contextidr_write
, .raw_writefn
= raw_write
, },
375 /* ??? This covers not just the impdef TLB lockdown registers but also
376 * some v7VMSA registers relating to TEX remap, so it is overly broad.
378 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= CP_ANY
,
379 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
380 /* MMU TLB control. Note that the wildcarding means we cover not just
381 * the unified TLB ops but also the dside/iside/inner-shareable variants.
383 { .name
= "TLBIALL", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
384 .opc1
= CP_ANY
, .opc2
= 0, .access
= PL1_W
, .writefn
= tlbiall_write
,
385 .type
= ARM_CP_NO_MIGRATE
},
386 { .name
= "TLBIMVA", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
387 .opc1
= CP_ANY
, .opc2
= 1, .access
= PL1_W
, .writefn
= tlbimva_write
,
388 .type
= ARM_CP_NO_MIGRATE
},
389 { .name
= "TLBIASID", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
390 .opc1
= CP_ANY
, .opc2
= 2, .access
= PL1_W
, .writefn
= tlbiasid_write
,
391 .type
= ARM_CP_NO_MIGRATE
},
392 { .name
= "TLBIMVAA", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
393 .opc1
= CP_ANY
, .opc2
= 3, .access
= PL1_W
, .writefn
= tlbimvaa_write
,
394 .type
= ARM_CP_NO_MIGRATE
},
395 /* Cache maintenance ops; some of this space may be overridden later. */
396 { .name
= "CACHEMAINT", .cp
= 15, .crn
= 7, .crm
= CP_ANY
,
397 .opc1
= 0, .opc2
= CP_ANY
, .access
= PL1_W
,
398 .type
= ARM_CP_NOP
| ARM_CP_OVERRIDE
},
402 static const ARMCPRegInfo not_v6_cp_reginfo
[] = {
403 /* Not all pre-v6 cores implemented this WFI, so this is slightly
406 { .name
= "WFI_v5", .cp
= 15, .crn
= 7, .crm
= 8, .opc1
= 0, .opc2
= 2,
407 .access
= PL1_W
, .type
= ARM_CP_WFI
},
411 static const ARMCPRegInfo not_v7_cp_reginfo
[] = {
412 /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
413 * is UNPREDICTABLE; we choose to NOP as most implementations do).
415 { .name
= "WFI_v6", .cp
= 15, .crn
= 7, .crm
= 0, .opc1
= 0, .opc2
= 4,
416 .access
= PL1_W
, .type
= ARM_CP_WFI
},
417 /* L1 cache lockdown. Not architectural in v6 and earlier but in practice
418 * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and
419 * OMAPCP will override this space.
421 { .name
= "DLOCKDOWN", .cp
= 15, .crn
= 9, .crm
= 0, .opc1
= 0, .opc2
= 0,
422 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_data
),
424 { .name
= "ILOCKDOWN", .cp
= 15, .crn
= 9, .crm
= 0, .opc1
= 0, .opc2
= 1,
425 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_insn
),
427 /* v6 doesn't have the cache ID registers but Linux reads them anyway */
428 { .name
= "DUMMY", .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= CP_ANY
,
429 .access
= PL1_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_MIGRATE
,
434 static void cpacr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
437 if (env
->cp15
.c1_coproc
!= value
) {
438 env
->cp15
.c1_coproc
= value
;
439 /* ??? Is this safe when called from within a TB? */
444 static const ARMCPRegInfo v6_cp_reginfo
[] = {
445 /* prefetch by MVA in v6, NOP in v7 */
446 { .name
= "MVA_prefetch",
447 .cp
= 15, .crn
= 7, .crm
= 13, .opc1
= 0, .opc2
= 1,
448 .access
= PL1_W
, .type
= ARM_CP_NOP
},
449 { .name
= "ISB", .cp
= 15, .crn
= 7, .crm
= 5, .opc1
= 0, .opc2
= 4,
450 .access
= PL0_W
, .type
= ARM_CP_NOP
},
451 { .name
= "DSB", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 4,
452 .access
= PL0_W
, .type
= ARM_CP_NOP
},
453 { .name
= "DMB", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 5,
454 .access
= PL0_W
, .type
= ARM_CP_NOP
},
455 { .name
= "IFAR", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 2,
456 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_insn
),
458 /* Watchpoint Fault Address Register : should actually only be present
459 * for 1136, 1176, 11MPCore.
461 { .name
= "WFAR", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 1,
462 .access
= PL1_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0, },
463 { .name
= "CPACR", .state
= ARM_CP_STATE_BOTH
, .opc0
= 3,
464 .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 2,
465 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c1_coproc
),
466 .resetvalue
= 0, .writefn
= cpacr_write
},
470 static CPAccessResult
pmreg_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
472 /* Perfomance monitor registers user accessibility is controlled
475 if (arm_current_pl(env
) == 0 && !env
->cp15
.c9_pmuserenr
) {
476 return CP_ACCESS_TRAP
;
481 static void pmcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
484 /* only the DP, X, D and E bits are writable */
485 env
->cp15
.c9_pmcr
&= ~0x39;
486 env
->cp15
.c9_pmcr
|= (value
& 0x39);
489 static void pmcntenset_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
493 env
->cp15
.c9_pmcnten
|= value
;
496 static void pmcntenclr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
500 env
->cp15
.c9_pmcnten
&= ~value
;
503 static void pmovsr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
506 env
->cp15
.c9_pmovsr
&= ~value
;
509 static void pmxevtyper_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
512 env
->cp15
.c9_pmxevtyper
= value
& 0xff;
515 static void pmuserenr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
518 env
->cp15
.c9_pmuserenr
= value
& 1;
521 static void pmintenset_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
524 /* We have no event counters so only the C bit can be changed */
526 env
->cp15
.c9_pminten
|= value
;
529 static void pmintenclr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
533 env
->cp15
.c9_pminten
&= ~value
;
536 static void vbar_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
539 /* Note that even though the AArch64 view of this register has bits
540 * [10:0] all RES0 we can only mask the bottom 5, to comply with the
541 * architectural requirements for bits which are RES0 only in some
542 * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7
543 * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.)
545 env
->cp15
.c12_vbar
= value
& ~0x1Ful
;
548 static uint64_t ccsidr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
550 ARMCPU
*cpu
= arm_env_get_cpu(env
);
551 return cpu
->ccsidr
[env
->cp15
.c0_cssel
];
554 static void csselr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
557 env
->cp15
.c0_cssel
= value
& 0xf;
560 static const ARMCPRegInfo v7_cp_reginfo
[] = {
561 /* DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped
564 { .name
= "DBGDRAR", .cp
= 14, .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 0,
565 .access
= PL0_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
566 { .name
= "DBGDSAR", .cp
= 14, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 0,
567 .access
= PL0_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
568 /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
569 { .name
= "NOP", .cp
= 15, .crn
= 7, .crm
= 0, .opc1
= 0, .opc2
= 4,
570 .access
= PL1_W
, .type
= ARM_CP_NOP
},
571 /* Performance monitors are implementation defined in v7,
572 * but with an ARM recommended set of registers, which we
573 * follow (although we don't actually implement any counters)
575 * Performance registers fall into three categories:
576 * (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR)
577 * (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR)
578 * (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others)
579 * For the cases controlled by PMUSERENR we must set .access to PL0_RW
580 * or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
582 { .name
= "PMCNTENSET", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 1,
583 .access
= PL0_RW
, .resetvalue
= 0,
584 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmcnten
),
585 .writefn
= pmcntenset_write
,
586 .accessfn
= pmreg_access
,
587 .raw_writefn
= raw_write
},
588 { .name
= "PMCNTENCLR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 2,
589 .access
= PL0_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmcnten
),
590 .accessfn
= pmreg_access
,
591 .writefn
= pmcntenclr_write
,
592 .type
= ARM_CP_NO_MIGRATE
},
593 { .name
= "PMOVSR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 3,
594 .access
= PL0_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmovsr
),
595 .accessfn
= pmreg_access
,
596 .writefn
= pmovsr_write
,
597 .raw_writefn
= raw_write
},
598 /* Unimplemented so WI. */
599 { .name
= "PMSWINC", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 4,
600 .access
= PL0_W
, .accessfn
= pmreg_access
, .type
= ARM_CP_NOP
},
601 /* Since we don't implement any events, writing to PMSELR is UNPREDICTABLE.
602 * We choose to RAZ/WI.
604 { .name
= "PMSELR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 5,
605 .access
= PL0_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0,
606 .accessfn
= pmreg_access
},
607 /* Unimplemented, RAZ/WI. */
608 { .name
= "PMCCNTR", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 0,
609 .access
= PL0_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0,
610 .accessfn
= pmreg_access
},
611 { .name
= "PMXEVTYPER", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 1,
613 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmxevtyper
),
614 .accessfn
= pmreg_access
, .writefn
= pmxevtyper_write
,
615 .raw_writefn
= raw_write
},
616 /* Unimplemented, RAZ/WI. */
617 { .name
= "PMXEVCNTR", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 2,
618 .access
= PL0_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0,
619 .accessfn
= pmreg_access
},
620 { .name
= "PMUSERENR", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 0,
621 .access
= PL0_R
| PL1_RW
,
622 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmuserenr
),
624 .writefn
= pmuserenr_write
, .raw_writefn
= raw_write
},
625 { .name
= "PMINTENSET", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 1,
627 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pminten
),
629 .writefn
= pmintenset_write
, .raw_writefn
= raw_write
},
630 { .name
= "PMINTENCLR", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 2,
631 .access
= PL1_RW
, .type
= ARM_CP_NO_MIGRATE
,
632 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pminten
),
633 .resetvalue
= 0, .writefn
= pmintenclr_write
, },
634 { .name
= "VBAR", .state
= ARM_CP_STATE_BOTH
,
635 .opc0
= 3, .crn
= 12, .crm
= 0, .opc1
= 0, .opc2
= 0,
636 .access
= PL1_RW
, .writefn
= vbar_write
,
637 .fieldoffset
= offsetof(CPUARMState
, cp15
.c12_vbar
),
639 { .name
= "SCR", .cp
= 15, .crn
= 1, .crm
= 1, .opc1
= 0, .opc2
= 0,
640 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c1_scr
),
642 { .name
= "CCSIDR", .state
= ARM_CP_STATE_BOTH
,
643 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= 0,
644 .access
= PL1_R
, .readfn
= ccsidr_read
, .type
= ARM_CP_NO_MIGRATE
},
645 { .name
= "CSSELR", .state
= ARM_CP_STATE_BOTH
,
646 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 2, .opc2
= 0,
647 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c0_cssel
),
648 .writefn
= csselr_write
, .resetvalue
= 0 },
649 /* Auxiliary ID register: this actually has an IMPDEF value but for now
650 * just RAZ for all cores:
652 { .name
= "AIDR", .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= 7,
653 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
654 /* MAIR can just read-as-written because we don't implement caches
655 * and so don't need to care about memory attributes.
657 { .name
= "MAIR_EL1", .state
= ARM_CP_STATE_AA64
,
658 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 2, .opc2
= 0,
659 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.mair_el1
),
661 /* For non-long-descriptor page tables these are PRRR and NMRR;
662 * regardless they still act as reads-as-written for QEMU.
663 * The override is necessary because of the overly-broad TLB_LOCKDOWN
666 { .name
= "MAIR0", .state
= ARM_CP_STATE_AA32
, .type
= ARM_CP_OVERRIDE
,
667 .cp
= 15, .opc1
= 0, .crn
= 10, .crm
= 2, .opc2
= 0, .access
= PL1_RW
,
668 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.mair_el1
),
669 .resetfn
= arm_cp_reset_ignore
},
670 { .name
= "MAIR1", .state
= ARM_CP_STATE_AA32
, .type
= ARM_CP_OVERRIDE
,
671 .cp
= 15, .opc1
= 0, .crn
= 10, .crm
= 2, .opc2
= 1, .access
= PL1_RW
,
672 .fieldoffset
= offsetofhigh32(CPUARMState
, cp15
.mair_el1
),
673 .resetfn
= arm_cp_reset_ignore
},
677 static void teecr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
684 static CPAccessResult
teehbr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
686 if (arm_current_pl(env
) == 0 && (env
->teecr
& 1)) {
687 return CP_ACCESS_TRAP
;
692 static const ARMCPRegInfo t2ee_cp_reginfo
[] = {
693 { .name
= "TEECR", .cp
= 14, .crn
= 0, .crm
= 0, .opc1
= 6, .opc2
= 0,
694 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, teecr
),
696 .writefn
= teecr_write
},
697 { .name
= "TEEHBR", .cp
= 14, .crn
= 1, .crm
= 0, .opc1
= 6, .opc2
= 0,
698 .access
= PL0_RW
, .fieldoffset
= offsetof(CPUARMState
, teehbr
),
699 .accessfn
= teehbr_access
, .resetvalue
= 0 },
703 static const ARMCPRegInfo v6k_cp_reginfo
[] = {
704 { .name
= "TPIDR_EL0", .state
= ARM_CP_STATE_AA64
,
705 .opc0
= 3, .opc1
= 3, .opc2
= 2, .crn
= 13, .crm
= 0,
707 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el0
), .resetvalue
= 0 },
708 { .name
= "TPIDRURW", .cp
= 15, .crn
= 13, .crm
= 0, .opc1
= 0, .opc2
= 2,
710 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.tpidr_el0
),
711 .resetfn
= arm_cp_reset_ignore
},
712 { .name
= "TPIDRRO_EL0", .state
= ARM_CP_STATE_AA64
,
713 .opc0
= 3, .opc1
= 3, .opc2
= 3, .crn
= 13, .crm
= 0,
714 .access
= PL0_R
|PL1_W
,
715 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidrro_el0
), .resetvalue
= 0 },
716 { .name
= "TPIDRURO", .cp
= 15, .crn
= 13, .crm
= 0, .opc1
= 0, .opc2
= 3,
717 .access
= PL0_R
|PL1_W
,
718 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.tpidrro_el0
),
719 .resetfn
= arm_cp_reset_ignore
},
720 { .name
= "TPIDR_EL1", .state
= ARM_CP_STATE_BOTH
,
721 .opc0
= 3, .opc1
= 0, .opc2
= 4, .crn
= 13, .crm
= 0,
723 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el1
), .resetvalue
= 0 },
727 #ifndef CONFIG_USER_ONLY
729 static CPAccessResult
gt_cntfrq_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
731 /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero */
732 if (arm_current_pl(env
) == 0 && !extract32(env
->cp15
.c14_cntkctl
, 0, 2)) {
733 return CP_ACCESS_TRAP
;
738 static CPAccessResult
gt_counter_access(CPUARMState
*env
, int timeridx
)
740 /* CNT[PV]CT: not visible from PL0 if ELO[PV]CTEN is zero */
741 if (arm_current_pl(env
) == 0 &&
742 !extract32(env
->cp15
.c14_cntkctl
, timeridx
, 1)) {
743 return CP_ACCESS_TRAP
;
748 static CPAccessResult
gt_timer_access(CPUARMState
*env
, int timeridx
)
750 /* CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from PL0 if
751 * EL0[PV]TEN is zero.
753 if (arm_current_pl(env
) == 0 &&
754 !extract32(env
->cp15
.c14_cntkctl
, 9 - timeridx
, 1)) {
755 return CP_ACCESS_TRAP
;
760 static CPAccessResult
gt_pct_access(CPUARMState
*env
,
761 const ARMCPRegInfo
*ri
)
763 return gt_counter_access(env
, GTIMER_PHYS
);
766 static CPAccessResult
gt_vct_access(CPUARMState
*env
,
767 const ARMCPRegInfo
*ri
)
769 return gt_counter_access(env
, GTIMER_VIRT
);
772 static CPAccessResult
gt_ptimer_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
774 return gt_timer_access(env
, GTIMER_PHYS
);
777 static CPAccessResult
gt_vtimer_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
779 return gt_timer_access(env
, GTIMER_VIRT
);
782 static uint64_t gt_get_countervalue(CPUARMState
*env
)
784 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) / GTIMER_SCALE
;
787 static void gt_recalc_timer(ARMCPU
*cpu
, int timeridx
)
789 ARMGenericTimer
*gt
= &cpu
->env
.cp15
.c14_timer
[timeridx
];
792 /* Timer enabled: calculate and set current ISTATUS, irq, and
793 * reset timer to when ISTATUS next has to change
795 uint64_t count
= gt_get_countervalue(&cpu
->env
);
796 /* Note that this must be unsigned 64 bit arithmetic: */
797 int istatus
= count
>= gt
->cval
;
800 gt
->ctl
= deposit32(gt
->ctl
, 2, 1, istatus
);
801 qemu_set_irq(cpu
->gt_timer_outputs
[timeridx
],
802 (istatus
&& !(gt
->ctl
& 2)));
804 /* Next transition is when count rolls back over to zero */
805 nexttick
= UINT64_MAX
;
807 /* Next transition is when we hit cval */
810 /* Note that the desired next expiry time might be beyond the
811 * signed-64-bit range of a QEMUTimer -- in this case we just
812 * set the timer for as far in the future as possible. When the
813 * timer expires we will reset the timer for any remaining period.
815 if (nexttick
> INT64_MAX
/ GTIMER_SCALE
) {
816 nexttick
= INT64_MAX
/ GTIMER_SCALE
;
818 timer_mod(cpu
->gt_timer
[timeridx
], nexttick
);
820 /* Timer disabled: ISTATUS and timer output always clear */
822 qemu_set_irq(cpu
->gt_timer_outputs
[timeridx
], 0);
823 timer_del(cpu
->gt_timer
[timeridx
]);
827 static void gt_cnt_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
829 ARMCPU
*cpu
= arm_env_get_cpu(env
);
830 int timeridx
= ri
->opc1
& 1;
832 timer_del(cpu
->gt_timer
[timeridx
]);
835 static uint64_t gt_cnt_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
837 return gt_get_countervalue(env
);
840 static void gt_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
843 int timeridx
= ri
->opc1
& 1;
845 env
->cp15
.c14_timer
[timeridx
].cval
= value
;
846 gt_recalc_timer(arm_env_get_cpu(env
), timeridx
);
849 static uint64_t gt_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
851 int timeridx
= ri
->crm
& 1;
853 return (uint32_t)(env
->cp15
.c14_timer
[timeridx
].cval
-
854 gt_get_countervalue(env
));
857 static void gt_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
860 int timeridx
= ri
->crm
& 1;
862 env
->cp15
.c14_timer
[timeridx
].cval
= gt_get_countervalue(env
) +
863 + sextract64(value
, 0, 32);
864 gt_recalc_timer(arm_env_get_cpu(env
), timeridx
);
867 static void gt_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
870 ARMCPU
*cpu
= arm_env_get_cpu(env
);
871 int timeridx
= ri
->crm
& 1;
872 uint32_t oldval
= env
->cp15
.c14_timer
[timeridx
].ctl
;
874 env
->cp15
.c14_timer
[timeridx
].ctl
= value
& 3;
875 if ((oldval
^ value
) & 1) {
877 gt_recalc_timer(cpu
, timeridx
);
878 } else if ((oldval
& value
) & 2) {
879 /* IMASK toggled: don't need to recalculate,
880 * just set the interrupt line based on ISTATUS
882 qemu_set_irq(cpu
->gt_timer_outputs
[timeridx
],
883 (oldval
& 4) && (value
& 2));
887 void arm_gt_ptimer_cb(void *opaque
)
889 ARMCPU
*cpu
= opaque
;
891 gt_recalc_timer(cpu
, GTIMER_PHYS
);
894 void arm_gt_vtimer_cb(void *opaque
)
896 ARMCPU
*cpu
= opaque
;
898 gt_recalc_timer(cpu
, GTIMER_VIRT
);
901 static const ARMCPRegInfo generic_timer_cp_reginfo
[] = {
902 /* Note that CNTFRQ is purely reads-as-written for the benefit
903 * of software; writing it doesn't actually change the timer frequency.
904 * Our reset value matches the fixed frequency we implement the timer at.
906 { .name
= "CNTFRQ", .cp
= 15, .crn
= 14, .crm
= 0, .opc1
= 0, .opc2
= 0,
907 .type
= ARM_CP_NO_MIGRATE
,
908 .access
= PL1_RW
| PL0_R
, .accessfn
= gt_cntfrq_access
,
909 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c14_cntfrq
),
910 .resetfn
= arm_cp_reset_ignore
,
912 { .name
= "CNTFRQ_EL0", .state
= ARM_CP_STATE_AA64
,
913 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 0,
914 .access
= PL1_RW
| PL0_R
, .accessfn
= gt_cntfrq_access
,
915 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_cntfrq
),
916 .resetvalue
= (1000 * 1000 * 1000) / GTIMER_SCALE
,
918 /* overall control: mostly access permissions */
919 { .name
= "CNTKCTL", .state
= ARM_CP_STATE_BOTH
,
920 .opc0
= 3, .opc1
= 0, .crn
= 14, .crm
= 1, .opc2
= 0,
922 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_cntkctl
),
925 /* per-timer control */
926 { .name
= "CNTP_CTL", .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 1,
927 .type
= ARM_CP_IO
| ARM_CP_NO_MIGRATE
, .access
= PL1_RW
| PL0_R
,
928 .accessfn
= gt_ptimer_access
,
929 .fieldoffset
= offsetoflow32(CPUARMState
,
930 cp15
.c14_timer
[GTIMER_PHYS
].ctl
),
931 .resetfn
= arm_cp_reset_ignore
,
932 .writefn
= gt_ctl_write
, .raw_writefn
= raw_write
,
934 { .name
= "CNTP_CTL_EL0", .state
= ARM_CP_STATE_AA64
,
935 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 2, .opc2
= 1,
936 .type
= ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
937 .accessfn
= gt_ptimer_access
,
938 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].ctl
),
940 .writefn
= gt_ctl_write
, .raw_writefn
= raw_write
,
942 { .name
= "CNTV_CTL", .cp
= 15, .crn
= 14, .crm
= 3, .opc1
= 0, .opc2
= 1,
943 .type
= ARM_CP_IO
| ARM_CP_NO_MIGRATE
, .access
= PL1_RW
| PL0_R
,
944 .accessfn
= gt_vtimer_access
,
945 .fieldoffset
= offsetoflow32(CPUARMState
,
946 cp15
.c14_timer
[GTIMER_VIRT
].ctl
),
947 .resetfn
= arm_cp_reset_ignore
,
948 .writefn
= gt_ctl_write
, .raw_writefn
= raw_write
,
950 { .name
= "CNTV_CTL_EL0", .state
= ARM_CP_STATE_AA64
,
951 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 3, .opc2
= 1,
952 .type
= ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
953 .accessfn
= gt_vtimer_access
,
954 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].ctl
),
956 .writefn
= gt_ctl_write
, .raw_writefn
= raw_write
,
958 /* TimerValue views: a 32 bit downcounting view of the underlying state */
959 { .name
= "CNTP_TVAL", .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 0,
960 .type
= ARM_CP_NO_MIGRATE
| ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
961 .accessfn
= gt_ptimer_access
,
962 .readfn
= gt_tval_read
, .writefn
= gt_tval_write
,
964 { .name
= "CNTP_TVAL_EL0", .state
= ARM_CP_STATE_AA64
,
965 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 2, .opc2
= 0,
966 .type
= ARM_CP_NO_MIGRATE
| ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
967 .readfn
= gt_tval_read
, .writefn
= gt_tval_write
,
969 { .name
= "CNTV_TVAL", .cp
= 15, .crn
= 14, .crm
= 3, .opc1
= 0, .opc2
= 0,
970 .type
= ARM_CP_NO_MIGRATE
| ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
971 .accessfn
= gt_vtimer_access
,
972 .readfn
= gt_tval_read
, .writefn
= gt_tval_write
,
974 { .name
= "CNTV_TVAL_EL0", .state
= ARM_CP_STATE_AA64
,
975 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 3, .opc2
= 0,
976 .type
= ARM_CP_NO_MIGRATE
| ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
977 .readfn
= gt_tval_read
, .writefn
= gt_tval_write
,
979 /* The counter itself */
980 { .name
= "CNTPCT", .cp
= 15, .crm
= 14, .opc1
= 0,
981 .access
= PL0_R
, .type
= ARM_CP_64BIT
| ARM_CP_NO_MIGRATE
| ARM_CP_IO
,
982 .accessfn
= gt_pct_access
,
983 .readfn
= gt_cnt_read
, .resetfn
= arm_cp_reset_ignore
,
985 { .name
= "CNTPCT_EL0", .state
= ARM_CP_STATE_AA64
,
986 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 1,
987 .access
= PL0_R
, .type
= ARM_CP_NO_MIGRATE
| ARM_CP_IO
,
988 .accessfn
= gt_pct_access
,
989 .readfn
= gt_cnt_read
, .resetfn
= gt_cnt_reset
,
991 { .name
= "CNTVCT", .cp
= 15, .crm
= 14, .opc1
= 1,
992 .access
= PL0_R
, .type
= ARM_CP_64BIT
| ARM_CP_NO_MIGRATE
| ARM_CP_IO
,
993 .accessfn
= gt_vct_access
,
994 .readfn
= gt_cnt_read
, .resetfn
= arm_cp_reset_ignore
,
996 { .name
= "CNTVCT_EL0", .state
= ARM_CP_STATE_AA64
,
997 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 2,
998 .access
= PL0_R
, .type
= ARM_CP_NO_MIGRATE
| ARM_CP_IO
,
999 .accessfn
= gt_vct_access
,
1000 .readfn
= gt_cnt_read
, .resetfn
= gt_cnt_reset
,
1002 /* Comparison value, indicating when the timer goes off */
1003 { .name
= "CNTP_CVAL", .cp
= 15, .crm
= 14, .opc1
= 2,
1004 .access
= PL1_RW
| PL0_R
,
1005 .type
= ARM_CP_64BIT
| ARM_CP_IO
| ARM_CP_NO_MIGRATE
,
1006 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].cval
),
1007 .accessfn
= gt_ptimer_access
, .resetfn
= arm_cp_reset_ignore
,
1008 .writefn
= gt_cval_write
, .raw_writefn
= raw_write
,
1010 { .name
= "CNTP_CVAL_EL0", .state
= ARM_CP_STATE_AA64
,
1011 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 2, .opc2
= 2,
1012 .access
= PL1_RW
| PL0_R
,
1014 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].cval
),
1015 .resetvalue
= 0, .accessfn
= gt_vtimer_access
,
1016 .writefn
= gt_cval_write
, .raw_writefn
= raw_write
,
1018 { .name
= "CNTV_CVAL", .cp
= 15, .crm
= 14, .opc1
= 3,
1019 .access
= PL1_RW
| PL0_R
,
1020 .type
= ARM_CP_64BIT
| ARM_CP_IO
| ARM_CP_NO_MIGRATE
,
1021 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].cval
),
1022 .accessfn
= gt_vtimer_access
, .resetfn
= arm_cp_reset_ignore
,
1023 .writefn
= gt_cval_write
, .raw_writefn
= raw_write
,
1025 { .name
= "CNTV_CVAL_EL0", .state
= ARM_CP_STATE_AA64
,
1026 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 3, .opc2
= 2,
1027 .access
= PL1_RW
| PL0_R
,
1029 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].cval
),
1030 .resetvalue
= 0, .accessfn
= gt_vtimer_access
,
1031 .writefn
= gt_cval_write
, .raw_writefn
= raw_write
,
1037 /* In user-mode none of the generic timer registers are accessible,
1038 * and their implementation depends on QEMU_CLOCK_VIRTUAL and qdev gpio outputs,
1039 * so instead just don't register any of them.
1041 static const ARMCPRegInfo generic_timer_cp_reginfo
[] = {
1047 static void par_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
1049 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
1050 env
->cp15
.c7_par
= value
;
1051 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
1052 env
->cp15
.c7_par
= value
& 0xfffff6ff;
1054 env
->cp15
.c7_par
= value
& 0xfffff1ff;
1058 #ifndef CONFIG_USER_ONLY
1059 /* get_phys_addr() isn't present for user-mode-only targets */
1061 /* Return true if extended addresses are enabled, ie this is an
1062 * LPAE implementation and we are using the long-descriptor translation
1063 * table format because the TTBCR EAE bit is set.
1065 static inline bool extended_addresses_enabled(CPUARMState
*env
)
1067 return arm_feature(env
, ARM_FEATURE_LPAE
)
1068 && (env
->cp15
.c2_control
& (1U << 31));
1071 static CPAccessResult
ats_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1074 /* Other states are only available with TrustZone; in
1075 * a non-TZ implementation these registers don't exist
1076 * at all, which is an Uncategorized trap. This underdecoding
1077 * is safe because the reginfo is NO_MIGRATE.
1079 return CP_ACCESS_TRAP_UNCATEGORIZED
;
1081 return CP_ACCESS_OK
;
1084 static void ats_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
1087 target_ulong page_size
;
1089 int ret
, is_user
= ri
->opc2
& 2;
1090 int access_type
= ri
->opc2
& 1;
1092 ret
= get_phys_addr(env
, value
, access_type
, is_user
,
1093 &phys_addr
, &prot
, &page_size
);
1094 if (extended_addresses_enabled(env
)) {
1095 /* ret is a DFSR/IFSR value for the long descriptor
1096 * translation table format, but with WnR always clear.
1097 * Convert it to a 64-bit PAR.
1099 uint64_t par64
= (1 << 11); /* LPAE bit always set */
1101 par64
|= phys_addr
& ~0xfffULL
;
1102 /* We don't set the ATTR or SH fields in the PAR. */
1105 par64
|= (ret
& 0x3f) << 1; /* FS */
1106 /* Note that S2WLK and FSTAGE are always zero, because we don't
1107 * implement virtualization and therefore there can't be a stage 2
1111 env
->cp15
.c7_par
= par64
;
1112 env
->cp15
.c7_par_hi
= par64
>> 32;
1114 /* ret is a DFSR/IFSR value for the short descriptor
1115 * translation table format (with WnR always clear).
1116 * Convert it to a 32-bit PAR.
1119 /* We do not set any attribute bits in the PAR */
1120 if (page_size
== (1 << 24)
1121 && arm_feature(env
, ARM_FEATURE_V7
)) {
1122 env
->cp15
.c7_par
= (phys_addr
& 0xff000000) | 1 << 1;
1124 env
->cp15
.c7_par
= phys_addr
& 0xfffff000;
1127 env
->cp15
.c7_par
= ((ret
& (1 << 10)) >> 5) |
1128 ((ret
& (1 << 12)) >> 6) |
1129 ((ret
& 0xf) << 1) | 1;
1131 env
->cp15
.c7_par_hi
= 0;
1136 static const ARMCPRegInfo vapa_cp_reginfo
[] = {
1137 { .name
= "PAR", .cp
= 15, .crn
= 7, .crm
= 4, .opc1
= 0, .opc2
= 0,
1138 .access
= PL1_RW
, .resetvalue
= 0,
1139 .fieldoffset
= offsetof(CPUARMState
, cp15
.c7_par
),
1140 .writefn
= par_write
},
1141 #ifndef CONFIG_USER_ONLY
1142 { .name
= "ATS", .cp
= 15, .crn
= 7, .crm
= 8, .opc1
= 0, .opc2
= CP_ANY
,
1143 .access
= PL1_W
, .accessfn
= ats_access
,
1144 .writefn
= ats_write
, .type
= ARM_CP_NO_MIGRATE
},
1149 /* Return basic MPU access permission bits. */
1150 static uint32_t simple_mpu_ap_bits(uint32_t val
)
1157 for (i
= 0; i
< 16; i
+= 2) {
1158 ret
|= (val
>> i
) & mask
;
1164 /* Pad basic MPU access permission bits to extended format. */
1165 static uint32_t extended_mpu_ap_bits(uint32_t val
)
1172 for (i
= 0; i
< 16; i
+= 2) {
1173 ret
|= (val
& mask
) << i
;
1179 static void pmsav5_data_ap_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1182 env
->cp15
.c5_data
= extended_mpu_ap_bits(value
);
1185 static uint64_t pmsav5_data_ap_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1187 return simple_mpu_ap_bits(env
->cp15
.c5_data
);
1190 static void pmsav5_insn_ap_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1193 env
->cp15
.c5_insn
= extended_mpu_ap_bits(value
);
1196 static uint64_t pmsav5_insn_ap_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1198 return simple_mpu_ap_bits(env
->cp15
.c5_insn
);
1201 static const ARMCPRegInfo pmsav5_cp_reginfo
[] = {
1202 { .name
= "DATA_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 0,
1203 .access
= PL1_RW
, .type
= ARM_CP_NO_MIGRATE
,
1204 .fieldoffset
= offsetof(CPUARMState
, cp15
.c5_data
), .resetvalue
= 0,
1205 .readfn
= pmsav5_data_ap_read
, .writefn
= pmsav5_data_ap_write
, },
1206 { .name
= "INSN_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 1,
1207 .access
= PL1_RW
, .type
= ARM_CP_NO_MIGRATE
,
1208 .fieldoffset
= offsetof(CPUARMState
, cp15
.c5_insn
), .resetvalue
= 0,
1209 .readfn
= pmsav5_insn_ap_read
, .writefn
= pmsav5_insn_ap_write
, },
1210 { .name
= "DATA_EXT_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 2,
1212 .fieldoffset
= offsetof(CPUARMState
, cp15
.c5_data
), .resetvalue
= 0, },
1213 { .name
= "INSN_EXT_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 3,
1215 .fieldoffset
= offsetof(CPUARMState
, cp15
.c5_insn
), .resetvalue
= 0, },
1216 { .name
= "DCACHE_CFG", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 0,
1218 .fieldoffset
= offsetof(CPUARMState
, cp15
.c2_data
), .resetvalue
= 0, },
1219 { .name
= "ICACHE_CFG", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 1,
1221 .fieldoffset
= offsetof(CPUARMState
, cp15
.c2_insn
), .resetvalue
= 0, },
1222 /* Protection region base and size registers */
1223 { .name
= "946_PRBS0", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0,
1224 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
1225 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[0]) },
1226 { .name
= "946_PRBS1", .cp
= 15, .crn
= 6, .crm
= 1, .opc1
= 0,
1227 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
1228 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[1]) },
1229 { .name
= "946_PRBS2", .cp
= 15, .crn
= 6, .crm
= 2, .opc1
= 0,
1230 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
1231 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[2]) },
1232 { .name
= "946_PRBS3", .cp
= 15, .crn
= 6, .crm
= 3, .opc1
= 0,
1233 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
1234 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[3]) },
1235 { .name
= "946_PRBS4", .cp
= 15, .crn
= 6, .crm
= 4, .opc1
= 0,
1236 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
1237 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[4]) },
1238 { .name
= "946_PRBS5", .cp
= 15, .crn
= 6, .crm
= 5, .opc1
= 0,
1239 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
1240 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[5]) },
1241 { .name
= "946_PRBS6", .cp
= 15, .crn
= 6, .crm
= 6, .opc1
= 0,
1242 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
1243 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[6]) },
1244 { .name
= "946_PRBS7", .cp
= 15, .crn
= 6, .crm
= 7, .opc1
= 0,
1245 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
1246 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[7]) },
1250 static void vmsa_ttbcr_raw_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1253 int maskshift
= extract32(value
, 0, 3);
1255 if (arm_feature(env
, ARM_FEATURE_LPAE
) && (value
& (1 << 31))) {
1256 value
&= ~((7 << 19) | (3 << 14) | (0xf << 3));
1260 /* Note that we always calculate c2_mask and c2_base_mask, but
1261 * they are only used for short-descriptor tables (ie if EAE is 0);
1262 * for long-descriptor tables the TTBCR fields are used differently
1263 * and the c2_mask and c2_base_mask values are meaningless.
1265 env
->cp15
.c2_control
= value
;
1266 env
->cp15
.c2_mask
= ~(((uint32_t)0xffffffffu
) >> maskshift
);
1267 env
->cp15
.c2_base_mask
= ~((uint32_t)0x3fffu
>> maskshift
);
1270 static void vmsa_ttbcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1273 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
1274 /* With LPAE the TTBCR could result in a change of ASID
1275 * via the TTBCR.A1 bit, so do a TLB flush.
1279 vmsa_ttbcr_raw_write(env
, ri
, value
);
1282 static void vmsa_ttbcr_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1284 env
->cp15
.c2_base_mask
= 0xffffc000u
;
1285 env
->cp15
.c2_control
= 0;
1286 env
->cp15
.c2_mask
= 0;
1289 static void vmsa_tcr_el1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1292 /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */
1294 env
->cp15
.c2_control
= value
;
1297 static void vmsa_ttbr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1300 /* 64 bit accesses to the TTBRs can change the ASID and so we
1301 * must flush the TLB.
1303 if (cpreg_field_is_64bit(ri
)) {
1306 raw_write(env
, ri
, value
);
1309 static const ARMCPRegInfo vmsa_cp_reginfo
[] = {
1310 { .name
= "DFSR", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 0,
1312 .fieldoffset
= offsetof(CPUARMState
, cp15
.c5_data
), .resetvalue
= 0, },
1313 { .name
= "IFSR", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 1,
1315 .fieldoffset
= offsetof(CPUARMState
, cp15
.c5_insn
), .resetvalue
= 0, },
1316 { .name
= "TTBR0_EL1", .state
= ARM_CP_STATE_BOTH
,
1317 .opc0
= 3, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 0,
1318 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.ttbr0_el1
),
1319 .writefn
= vmsa_ttbr_write
, .resetvalue
= 0 },
1320 { .name
= "TTBR1_EL1", .state
= ARM_CP_STATE_BOTH
,
1321 .opc0
= 3, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 1,
1322 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.ttbr1_el1
),
1323 .writefn
= vmsa_ttbr_write
, .resetvalue
= 0 },
1324 { .name
= "TCR_EL1", .state
= ARM_CP_STATE_AA64
,
1325 .opc0
= 3, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 2,
1326 .access
= PL1_RW
, .writefn
= vmsa_tcr_el1_write
,
1327 .resetfn
= vmsa_ttbcr_reset
, .raw_writefn
= raw_write
,
1328 .fieldoffset
= offsetof(CPUARMState
, cp15
.c2_control
) },
1329 { .name
= "TTBCR", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 2,
1330 .access
= PL1_RW
, .type
= ARM_CP_NO_MIGRATE
, .writefn
= vmsa_ttbcr_write
,
1331 .resetfn
= arm_cp_reset_ignore
, .raw_writefn
= vmsa_ttbcr_raw_write
,
1332 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c2_control
) },
1333 { .name
= "DFAR", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 0,
1334 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_data
),
1339 static void omap_ticonfig_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1342 env
->cp15
.c15_ticonfig
= value
& 0xe7;
1343 /* The OS_TYPE bit in this register changes the reported CPUID! */
1344 env
->cp15
.c0_cpuid
= (value
& (1 << 5)) ?
1345 ARM_CPUID_TI915T
: ARM_CPUID_TI925T
;
1348 static void omap_threadid_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1351 env
->cp15
.c15_threadid
= value
& 0xffff;
1354 static void omap_wfi_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1357 /* Wait-for-interrupt (deprecated) */
1358 cpu_interrupt(CPU(arm_env_get_cpu(env
)), CPU_INTERRUPT_HALT
);
1361 static void omap_cachemaint_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1364 /* On OMAP there are registers indicating the max/min index of dcache lines
1365 * containing a dirty line; cache flush operations have to reset these.
1367 env
->cp15
.c15_i_max
= 0x000;
1368 env
->cp15
.c15_i_min
= 0xff0;
1371 static const ARMCPRegInfo omap_cp_reginfo
[] = {
1372 { .name
= "DFSR", .cp
= 15, .crn
= 5, .crm
= CP_ANY
,
1373 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_OVERRIDE
,
1374 .fieldoffset
= offsetof(CPUARMState
, cp15
.c5_data
), .resetvalue
= 0, },
1375 { .name
= "", .cp
= 15, .crn
= 15, .crm
= 0, .opc1
= 0, .opc2
= 0,
1376 .access
= PL1_RW
, .type
= ARM_CP_NOP
},
1377 { .name
= "TICONFIG", .cp
= 15, .crn
= 15, .crm
= 1, .opc1
= 0, .opc2
= 0,
1379 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_ticonfig
), .resetvalue
= 0,
1380 .writefn
= omap_ticonfig_write
},
1381 { .name
= "IMAX", .cp
= 15, .crn
= 15, .crm
= 2, .opc1
= 0, .opc2
= 0,
1383 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_i_max
), .resetvalue
= 0, },
1384 { .name
= "IMIN", .cp
= 15, .crn
= 15, .crm
= 3, .opc1
= 0, .opc2
= 0,
1385 .access
= PL1_RW
, .resetvalue
= 0xff0,
1386 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_i_min
) },
1387 { .name
= "THREADID", .cp
= 15, .crn
= 15, .crm
= 4, .opc1
= 0, .opc2
= 0,
1389 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_threadid
), .resetvalue
= 0,
1390 .writefn
= omap_threadid_write
},
1391 { .name
= "TI925T_STATUS", .cp
= 15, .crn
= 15,
1392 .crm
= 8, .opc1
= 0, .opc2
= 0, .access
= PL1_RW
,
1393 .type
= ARM_CP_NO_MIGRATE
,
1394 .readfn
= arm_cp_read_zero
, .writefn
= omap_wfi_write
, },
1395 /* TODO: Peripheral port remap register:
1396 * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller
1397 * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff),
1400 { .name
= "OMAP_CACHEMAINT", .cp
= 15, .crn
= 7, .crm
= CP_ANY
,
1401 .opc1
= 0, .opc2
= CP_ANY
, .access
= PL1_W
,
1402 .type
= ARM_CP_OVERRIDE
| ARM_CP_NO_MIGRATE
,
1403 .writefn
= omap_cachemaint_write
},
1404 { .name
= "C9", .cp
= 15, .crn
= 9,
1405 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
,
1406 .type
= ARM_CP_CONST
| ARM_CP_OVERRIDE
, .resetvalue
= 0 },
1410 static void xscale_cpar_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1414 if (env
->cp15
.c15_cpar
!= value
) {
1415 /* Changes cp0 to cp13 behavior, so needs a TB flush. */
1417 env
->cp15
.c15_cpar
= value
;
1421 static const ARMCPRegInfo xscale_cp_reginfo
[] = {
1422 { .name
= "XSCALE_CPAR",
1423 .cp
= 15, .crn
= 15, .crm
= 1, .opc1
= 0, .opc2
= 0, .access
= PL1_RW
,
1424 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_cpar
), .resetvalue
= 0,
1425 .writefn
= xscale_cpar_write
, },
1426 { .name
= "XSCALE_AUXCR",
1427 .cp
= 15, .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 1, .access
= PL1_RW
,
1428 .fieldoffset
= offsetof(CPUARMState
, cp15
.c1_xscaleauxcr
),
1433 static const ARMCPRegInfo dummy_c15_cp_reginfo
[] = {
1434 /* RAZ/WI the whole crn=15 space, when we don't have a more specific
1435 * implementation of this implementation-defined space.
1436 * Ideally this should eventually disappear in favour of actually
1437 * implementing the correct behaviour for all cores.
1439 { .name
= "C15_IMPDEF", .cp
= 15, .crn
= 15,
1440 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
,
1442 .type
= ARM_CP_CONST
| ARM_CP_NO_MIGRATE
| ARM_CP_OVERRIDE
,
1447 static const ARMCPRegInfo cache_dirty_status_cp_reginfo
[] = {
1448 /* Cache status: RAZ because we have no cache so it's always clean */
1449 { .name
= "CDSR", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 6,
1450 .access
= PL1_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_MIGRATE
,
1455 static const ARMCPRegInfo cache_block_ops_cp_reginfo
[] = {
1456 /* We never have a a block transfer operation in progress */
1457 { .name
= "BXSR", .cp
= 15, .crn
= 7, .crm
= 12, .opc1
= 0, .opc2
= 4,
1458 .access
= PL0_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_MIGRATE
,
1460 /* The cache ops themselves: these all NOP for QEMU */
1461 { .name
= "IICR", .cp
= 15, .crm
= 5, .opc1
= 0,
1462 .access
= PL1_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
1463 { .name
= "IDCR", .cp
= 15, .crm
= 6, .opc1
= 0,
1464 .access
= PL1_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
1465 { .name
= "CDCR", .cp
= 15, .crm
= 12, .opc1
= 0,
1466 .access
= PL0_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
1467 { .name
= "PIR", .cp
= 15, .crm
= 12, .opc1
= 1,
1468 .access
= PL0_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
1469 { .name
= "PDR", .cp
= 15, .crm
= 12, .opc1
= 2,
1470 .access
= PL0_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
1471 { .name
= "CIDCR", .cp
= 15, .crm
= 14, .opc1
= 0,
1472 .access
= PL1_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
1476 static const ARMCPRegInfo cache_test_clean_cp_reginfo
[] = {
1477 /* The cache test-and-clean instructions always return (1 << 30)
1478 * to indicate that there are no dirty cache lines.
1480 { .name
= "TC_DCACHE", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 3,
1481 .access
= PL0_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_MIGRATE
,
1482 .resetvalue
= (1 << 30) },
1483 { .name
= "TCI_DCACHE", .cp
= 15, .crn
= 7, .crm
= 14, .opc1
= 0, .opc2
= 3,
1484 .access
= PL0_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_MIGRATE
,
1485 .resetvalue
= (1 << 30) },
1489 static const ARMCPRegInfo strongarm_cp_reginfo
[] = {
1490 /* Ignore ReadBuffer accesses */
1491 { .name
= "C9_READBUFFER", .cp
= 15, .crn
= 9,
1492 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
,
1493 .access
= PL1_RW
, .resetvalue
= 0,
1494 .type
= ARM_CP_CONST
| ARM_CP_OVERRIDE
| ARM_CP_NO_MIGRATE
},
1498 static uint64_t mpidr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1500 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
1501 uint32_t mpidr
= cs
->cpu_index
;
1502 /* We don't support setting cluster ID ([8..11]) (known as Aff1
1503 * in later ARM ARM versions), or any of the higher affinity level fields,
1504 * so these bits always RAZ.
1506 if (arm_feature(env
, ARM_FEATURE_V7MP
)) {
1507 mpidr
|= (1U << 31);
1508 /* Cores which are uniprocessor (non-coherent)
1509 * but still implement the MP extensions set
1510 * bit 30. (For instance, A9UP.) However we do
1511 * not currently model any of those cores.
1517 static const ARMCPRegInfo mpidr_cp_reginfo
[] = {
1518 { .name
= "MPIDR", .state
= ARM_CP_STATE_BOTH
,
1519 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 5,
1520 .access
= PL1_R
, .readfn
= mpidr_read
, .type
= ARM_CP_NO_MIGRATE
},
1524 static uint64_t par64_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1526 return ((uint64_t)env
->cp15
.c7_par_hi
<< 32) | env
->cp15
.c7_par
;
1529 static void par64_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1532 env
->cp15
.c7_par_hi
= value
>> 32;
1533 env
->cp15
.c7_par
= value
;
1536 static void par64_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1538 env
->cp15
.c7_par_hi
= 0;
1539 env
->cp15
.c7_par
= 0;
1542 static const ARMCPRegInfo lpae_cp_reginfo
[] = {
1543 /* NOP AMAIR0/1: the override is because these clash with the rather
1544 * broadly specified TLB_LOCKDOWN entry in the generic cp_reginfo.
1546 { .name
= "AMAIR0", .state
= ARM_CP_STATE_BOTH
,
1547 .opc0
= 3, .crn
= 10, .crm
= 3, .opc1
= 0, .opc2
= 0,
1548 .access
= PL1_RW
, .type
= ARM_CP_CONST
| ARM_CP_OVERRIDE
,
1550 /* AMAIR1 is mapped to AMAIR_EL1[63:32] */
1551 { .name
= "AMAIR1", .cp
= 15, .crn
= 10, .crm
= 3, .opc1
= 0, .opc2
= 1,
1552 .access
= PL1_RW
, .type
= ARM_CP_CONST
| ARM_CP_OVERRIDE
,
1554 /* 64 bit access versions of the (dummy) debug registers */
1555 { .name
= "DBGDRAR", .cp
= 14, .crm
= 1, .opc1
= 0,
1556 .access
= PL0_R
, .type
= ARM_CP_CONST
|ARM_CP_64BIT
, .resetvalue
= 0 },
1557 { .name
= "DBGDSAR", .cp
= 14, .crm
= 2, .opc1
= 0,
1558 .access
= PL0_R
, .type
= ARM_CP_CONST
|ARM_CP_64BIT
, .resetvalue
= 0 },
1559 { .name
= "PAR", .cp
= 15, .crm
= 7, .opc1
= 0,
1560 .access
= PL1_RW
, .type
= ARM_CP_64BIT
,
1561 .readfn
= par64_read
, .writefn
= par64_write
, .resetfn
= par64_reset
},
1562 { .name
= "TTBR0", .cp
= 15, .crm
= 2, .opc1
= 0,
1563 .access
= PL1_RW
, .type
= ARM_CP_64BIT
| ARM_CP_NO_MIGRATE
,
1564 .fieldoffset
= offsetof(CPUARMState
, cp15
.ttbr0_el1
),
1565 .writefn
= vmsa_ttbr_write
, .resetfn
= arm_cp_reset_ignore
},
1566 { .name
= "TTBR1", .cp
= 15, .crm
= 2, .opc1
= 1,
1567 .access
= PL1_RW
, .type
= ARM_CP_64BIT
| ARM_CP_NO_MIGRATE
,
1568 .fieldoffset
= offsetof(CPUARMState
, cp15
.ttbr1_el1
),
1569 .writefn
= vmsa_ttbr_write
, .resetfn
= arm_cp_reset_ignore
},
1573 static uint64_t aa64_fpcr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1575 return vfp_get_fpcr(env
);
1578 static void aa64_fpcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1581 vfp_set_fpcr(env
, value
);
1584 static uint64_t aa64_fpsr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1586 return vfp_get_fpsr(env
);
1589 static void aa64_fpsr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1592 vfp_set_fpsr(env
, value
);
1595 static CPAccessResult
aa64_cacheop_access(CPUARMState
*env
,
1596 const ARMCPRegInfo
*ri
)
1598 /* Cache invalidate/clean: NOP, but EL0 must UNDEF unless
1599 * SCTLR_EL1.UCI is set.
1601 if (arm_current_pl(env
) == 0 && !(env
->cp15
.c1_sys
& SCTLR_UCI
)) {
1602 return CP_ACCESS_TRAP
;
1604 return CP_ACCESS_OK
;
1607 static void tlbi_aa64_va_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1610 /* Invalidate by VA (AArch64 version) */
1611 uint64_t pageaddr
= value
<< 12;
1612 tlb_flush_page(env
, pageaddr
);
1615 static void tlbi_aa64_vaa_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1618 /* Invalidate by VA, all ASIDs (AArch64 version) */
1619 uint64_t pageaddr
= value
<< 12;
1620 tlb_flush_page(env
, pageaddr
);
1623 static void tlbi_aa64_asid_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1626 /* Invalidate by ASID (AArch64 version) */
1627 int asid
= extract64(value
, 48, 16);
1628 tlb_flush(env
, asid
== 0);
1631 static const ARMCPRegInfo v8_cp_reginfo
[] = {
1632 /* Minimal set of EL0-visible registers. This will need to be expanded
1633 * significantly for system emulation of AArch64 CPUs.
1635 { .name
= "NZCV", .state
= ARM_CP_STATE_AA64
,
1636 .opc0
= 3, .opc1
= 3, .opc2
= 0, .crn
= 4, .crm
= 2,
1637 .access
= PL0_RW
, .type
= ARM_CP_NZCV
},
1638 { .name
= "FPCR", .state
= ARM_CP_STATE_AA64
,
1639 .opc0
= 3, .opc1
= 3, .opc2
= 0, .crn
= 4, .crm
= 4,
1640 .access
= PL0_RW
, .readfn
= aa64_fpcr_read
, .writefn
= aa64_fpcr_write
},
1641 { .name
= "FPSR", .state
= ARM_CP_STATE_AA64
,
1642 .opc0
= 3, .opc1
= 3, .opc2
= 1, .crn
= 4, .crm
= 4,
1643 .access
= PL0_RW
, .readfn
= aa64_fpsr_read
, .writefn
= aa64_fpsr_write
},
1644 /* Prohibit use of DC ZVA. OPTME: implement DC ZVA and allow its use.
1645 * For system mode the DZP bit here will need to be computed, not constant.
1647 { .name
= "DCZID_EL0", .state
= ARM_CP_STATE_AA64
,
1648 .opc0
= 3, .opc1
= 3, .opc2
= 7, .crn
= 0, .crm
= 0,
1649 .access
= PL0_R
, .type
= ARM_CP_CONST
,
1650 .resetvalue
= 0x10 },
1651 { .name
= "CURRENTEL", .state
= ARM_CP_STATE_AA64
,
1652 .opc0
= 3, .opc1
= 0, .opc2
= 2, .crn
= 4, .crm
= 2,
1653 .access
= PL1_R
, .type
= ARM_CP_CURRENTEL
},
1654 /* Cache ops: all NOPs since we don't emulate caches */
1655 { .name
= "IC_IALLUIS", .state
= ARM_CP_STATE_AA64
,
1656 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 1, .opc2
= 0,
1657 .access
= PL1_W
, .type
= ARM_CP_NOP
},
1658 { .name
= "IC_IALLU", .state
= ARM_CP_STATE_AA64
,
1659 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 0,
1660 .access
= PL1_W
, .type
= ARM_CP_NOP
},
1661 { .name
= "IC_IVAU", .state
= ARM_CP_STATE_AA64
,
1662 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 5, .opc2
= 1,
1663 .access
= PL0_W
, .type
= ARM_CP_NOP
,
1664 .accessfn
= aa64_cacheop_access
},
1665 { .name
= "DC_IVAC", .state
= ARM_CP_STATE_AA64
,
1666 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 1,
1667 .access
= PL1_W
, .type
= ARM_CP_NOP
},
1668 { .name
= "DC_ISW", .state
= ARM_CP_STATE_AA64
,
1669 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 2,
1670 .access
= PL1_W
, .type
= ARM_CP_NOP
},
1671 { .name
= "DC_CVAC", .state
= ARM_CP_STATE_AA64
,
1672 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 10, .opc2
= 1,
1673 .access
= PL0_W
, .type
= ARM_CP_NOP
,
1674 .accessfn
= aa64_cacheop_access
},
1675 { .name
= "DC_CSW", .state
= ARM_CP_STATE_AA64
,
1676 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 10, .opc2
= 2,
1677 .access
= PL1_W
, .type
= ARM_CP_NOP
},
1678 { .name
= "DC_CVAU", .state
= ARM_CP_STATE_AA64
,
1679 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 11, .opc2
= 1,
1680 .access
= PL0_W
, .type
= ARM_CP_NOP
,
1681 .accessfn
= aa64_cacheop_access
},
1682 { .name
= "DC_CIVAC", .state
= ARM_CP_STATE_AA64
,
1683 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 14, .opc2
= 1,
1684 .access
= PL0_W
, .type
= ARM_CP_NOP
,
1685 .accessfn
= aa64_cacheop_access
},
1686 { .name
= "DC_CISW", .state
= ARM_CP_STATE_AA64
,
1687 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 14, .opc2
= 2,
1688 .access
= PL1_W
, .type
= ARM_CP_NOP
},
1689 /* TLBI operations */
1690 { .name
= "TLBI_VMALLE1IS", .state
= ARM_CP_STATE_AA64
,
1691 .opc0
= 1, .opc2
= 0, .crn
= 8, .crm
= 3, .opc2
= 0,
1692 .access
= PL1_W
, .type
= ARM_CP_NO_MIGRATE
,
1693 .writefn
= tlbiall_write
},
1694 { .name
= "TLBI_VAE1IS", .state
= ARM_CP_STATE_AA64
,
1695 .opc0
= 1, .opc2
= 0, .crn
= 8, .crm
= 3, .opc2
= 1,
1696 .access
= PL1_W
, .type
= ARM_CP_NO_MIGRATE
,
1697 .writefn
= tlbi_aa64_va_write
},
1698 { .name
= "TLBI_ASIDE1IS", .state
= ARM_CP_STATE_AA64
,
1699 .opc0
= 1, .opc2
= 0, .crn
= 8, .crm
= 3, .opc2
= 2,
1700 .access
= PL1_W
, .type
= ARM_CP_NO_MIGRATE
,
1701 .writefn
= tlbi_aa64_asid_write
},
1702 { .name
= "TLBI_VAAE1IS", .state
= ARM_CP_STATE_AA64
,
1703 .opc0
= 1, .opc2
= 0, .crn
= 8, .crm
= 3, .opc2
= 3,
1704 .access
= PL1_W
, .type
= ARM_CP_NO_MIGRATE
,
1705 .writefn
= tlbi_aa64_vaa_write
},
1706 { .name
= "TLBI_VALE1IS", .state
= ARM_CP_STATE_AA64
,
1707 .opc0
= 1, .opc2
= 0, .crn
= 8, .crm
= 3, .opc2
= 5,
1708 .access
= PL1_W
, .type
= ARM_CP_NO_MIGRATE
,
1709 .writefn
= tlbi_aa64_va_write
},
1710 { .name
= "TLBI_VAALE1IS", .state
= ARM_CP_STATE_AA64
,
1711 .opc0
= 1, .opc2
= 0, .crn
= 8, .crm
= 3, .opc2
= 7,
1712 .access
= PL1_W
, .type
= ARM_CP_NO_MIGRATE
,
1713 .writefn
= tlbi_aa64_vaa_write
},
1714 { .name
= "TLBI_VMALLE1", .state
= ARM_CP_STATE_AA64
,
1715 .opc0
= 1, .opc2
= 0, .crn
= 8, .crm
= 7, .opc2
= 0,
1716 .access
= PL1_W
, .type
= ARM_CP_NO_MIGRATE
,
1717 .writefn
= tlbiall_write
},
1718 { .name
= "TLBI_VAE1", .state
= ARM_CP_STATE_AA64
,
1719 .opc0
= 1, .opc2
= 0, .crn
= 8, .crm
= 7, .opc2
= 1,
1720 .access
= PL1_W
, .type
= ARM_CP_NO_MIGRATE
,
1721 .writefn
= tlbi_aa64_va_write
},
1722 { .name
= "TLBI_ASIDE1", .state
= ARM_CP_STATE_AA64
,
1723 .opc0
= 1, .opc2
= 0, .crn
= 8, .crm
= 7, .opc2
= 2,
1724 .access
= PL1_W
, .type
= ARM_CP_NO_MIGRATE
,
1725 .writefn
= tlbi_aa64_asid_write
},
1726 { .name
= "TLBI_VAAE1", .state
= ARM_CP_STATE_AA64
,
1727 .opc0
= 1, .opc2
= 0, .crn
= 8, .crm
= 7, .opc2
= 3,
1728 .access
= PL1_W
, .type
= ARM_CP_NO_MIGRATE
,
1729 .writefn
= tlbi_aa64_vaa_write
},
1730 { .name
= "TLBI_VALE1", .state
= ARM_CP_STATE_AA64
,
1731 .opc0
= 1, .opc2
= 0, .crn
= 8, .crm
= 7, .opc2
= 5,
1732 .access
= PL1_W
, .type
= ARM_CP_NO_MIGRATE
,
1733 .writefn
= tlbi_aa64_va_write
},
1734 { .name
= "TLBI_VAALE1", .state
= ARM_CP_STATE_AA64
,
1735 .opc0
= 1, .opc2
= 0, .crn
= 8, .crm
= 7, .opc2
= 7,
1736 .access
= PL1_W
, .type
= ARM_CP_NO_MIGRATE
,
1737 .writefn
= tlbi_aa64_vaa_write
},
1738 /* Dummy implementation of monitor debug system control register:
1739 * we don't support debug.
1741 { .name
= "MDSCR_EL1", .state
= ARM_CP_STATE_AA64
,
1742 .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 2,
1743 .access
= PL1_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
1744 /* We define a dummy WI OSLAR_EL1, because Linux writes to it. */
1745 { .name
= "OSLAR_EL1", .state
= ARM_CP_STATE_AA64
,
1746 .opc0
= 2, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 4,
1747 .access
= PL1_W
, .type
= ARM_CP_NOP
},
1751 static void sctlr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1754 env
->cp15
.c1_sys
= value
;
1755 /* ??? Lots of these bits are not implemented. */
1756 /* This may enable/disable the MMU, so do a TLB flush. */
1760 static CPAccessResult
ctr_el0_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1762 /* Only accessible in EL0 if SCTLR.UCT is set (and only in AArch64,
1763 * but the AArch32 CTR has its own reginfo struct)
1765 if (arm_current_pl(env
) == 0 && !(env
->cp15
.c1_sys
& SCTLR_UCT
)) {
1766 return CP_ACCESS_TRAP
;
1768 return CP_ACCESS_OK
;
1771 static void define_aarch64_debug_regs(ARMCPU
*cpu
)
1773 /* Define breakpoint and watchpoint registers. These do nothing
1774 * but read as written, for now.
1778 for (i
= 0; i
< 16; i
++) {
1779 ARMCPRegInfo dbgregs
[] = {
1780 { .name
= "DBGBVR", .state
= ARM_CP_STATE_AA64
,
1781 .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 4,
1783 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgbvr
[i
]) },
1784 { .name
= "DBGBCR", .state
= ARM_CP_STATE_AA64
,
1785 .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 5,
1787 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgbcr
[i
]) },
1788 { .name
= "DBGWVR", .state
= ARM_CP_STATE_AA64
,
1789 .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 6,
1791 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgwvr
[i
]) },
1792 { .name
= "DBGWCR", .state
= ARM_CP_STATE_AA64
,
1793 .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 7,
1795 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgwcr
[i
]) },
1798 define_arm_cp_regs(cpu
, dbgregs
);
1802 void register_cp_regs_for_features(ARMCPU
*cpu
)
1804 /* Register all the coprocessor registers based on feature bits */
1805 CPUARMState
*env
= &cpu
->env
;
1806 if (arm_feature(env
, ARM_FEATURE_M
)) {
1807 /* M profile has no coprocessor registers */
1811 define_arm_cp_regs(cpu
, cp_reginfo
);
1812 if (arm_feature(env
, ARM_FEATURE_V6
)) {
1813 /* The ID registers all have impdef reset values */
1814 ARMCPRegInfo v6_idregs
[] = {
1815 { .name
= "ID_PFR0", .cp
= 15, .crn
= 0, .crm
= 1,
1816 .opc1
= 0, .opc2
= 0, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1817 .resetvalue
= cpu
->id_pfr0
},
1818 { .name
= "ID_PFR1", .cp
= 15, .crn
= 0, .crm
= 1,
1819 .opc1
= 0, .opc2
= 1, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1820 .resetvalue
= cpu
->id_pfr1
},
1821 { .name
= "ID_DFR0", .cp
= 15, .crn
= 0, .crm
= 1,
1822 .opc1
= 0, .opc2
= 2, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1823 .resetvalue
= cpu
->id_dfr0
},
1824 { .name
= "ID_AFR0", .cp
= 15, .crn
= 0, .crm
= 1,
1825 .opc1
= 0, .opc2
= 3, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1826 .resetvalue
= cpu
->id_afr0
},
1827 { .name
= "ID_MMFR0", .cp
= 15, .crn
= 0, .crm
= 1,
1828 .opc1
= 0, .opc2
= 4, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1829 .resetvalue
= cpu
->id_mmfr0
},
1830 { .name
= "ID_MMFR1", .cp
= 15, .crn
= 0, .crm
= 1,
1831 .opc1
= 0, .opc2
= 5, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1832 .resetvalue
= cpu
->id_mmfr1
},
1833 { .name
= "ID_MMFR2", .cp
= 15, .crn
= 0, .crm
= 1,
1834 .opc1
= 0, .opc2
= 6, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1835 .resetvalue
= cpu
->id_mmfr2
},
1836 { .name
= "ID_MMFR3", .cp
= 15, .crn
= 0, .crm
= 1,
1837 .opc1
= 0, .opc2
= 7, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1838 .resetvalue
= cpu
->id_mmfr3
},
1839 { .name
= "ID_ISAR0", .cp
= 15, .crn
= 0, .crm
= 2,
1840 .opc1
= 0, .opc2
= 0, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1841 .resetvalue
= cpu
->id_isar0
},
1842 { .name
= "ID_ISAR1", .cp
= 15, .crn
= 0, .crm
= 2,
1843 .opc1
= 0, .opc2
= 1, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1844 .resetvalue
= cpu
->id_isar1
},
1845 { .name
= "ID_ISAR2", .cp
= 15, .crn
= 0, .crm
= 2,
1846 .opc1
= 0, .opc2
= 2, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1847 .resetvalue
= cpu
->id_isar2
},
1848 { .name
= "ID_ISAR3", .cp
= 15, .crn
= 0, .crm
= 2,
1849 .opc1
= 0, .opc2
= 3, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1850 .resetvalue
= cpu
->id_isar3
},
1851 { .name
= "ID_ISAR4", .cp
= 15, .crn
= 0, .crm
= 2,
1852 .opc1
= 0, .opc2
= 4, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1853 .resetvalue
= cpu
->id_isar4
},
1854 { .name
= "ID_ISAR5", .cp
= 15, .crn
= 0, .crm
= 2,
1855 .opc1
= 0, .opc2
= 5, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1856 .resetvalue
= cpu
->id_isar5
},
1857 /* 6..7 are as yet unallocated and must RAZ */
1858 { .name
= "ID_ISAR6", .cp
= 15, .crn
= 0, .crm
= 2,
1859 .opc1
= 0, .opc2
= 6, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1861 { .name
= "ID_ISAR7", .cp
= 15, .crn
= 0, .crm
= 2,
1862 .opc1
= 0, .opc2
= 7, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1866 define_arm_cp_regs(cpu
, v6_idregs
);
1867 define_arm_cp_regs(cpu
, v6_cp_reginfo
);
1869 define_arm_cp_regs(cpu
, not_v6_cp_reginfo
);
1871 if (arm_feature(env
, ARM_FEATURE_V6K
)) {
1872 define_arm_cp_regs(cpu
, v6k_cp_reginfo
);
1874 if (arm_feature(env
, ARM_FEATURE_V7
)) {
1875 /* v7 performance monitor control register: same implementor
1876 * field as main ID register, and we implement no event counters.
1878 ARMCPRegInfo pmcr
= {
1879 .name
= "PMCR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 0,
1880 .access
= PL0_RW
, .resetvalue
= cpu
->midr
& 0xff000000,
1881 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmcr
),
1882 .accessfn
= pmreg_access
, .writefn
= pmcr_write
,
1883 .raw_writefn
= raw_write
,
1885 ARMCPRegInfo clidr
= {
1886 .name
= "CLIDR", .state
= ARM_CP_STATE_BOTH
,
1887 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= 1,
1888 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= cpu
->clidr
1890 define_one_arm_cp_reg(cpu
, &pmcr
);
1891 define_one_arm_cp_reg(cpu
, &clidr
);
1892 define_arm_cp_regs(cpu
, v7_cp_reginfo
);
1894 define_arm_cp_regs(cpu
, not_v7_cp_reginfo
);
1896 if (arm_feature(env
, ARM_FEATURE_V8
)) {
1897 /* AArch64 ID registers, which all have impdef reset values */
1898 ARMCPRegInfo v8_idregs
[] = {
1899 { .name
= "ID_AA64PFR0_EL1", .state
= ARM_CP_STATE_AA64
,
1900 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 0,
1901 .access
= PL1_R
, .type
= ARM_CP_CONST
,
1902 .resetvalue
= cpu
->id_aa64pfr0
},
1903 { .name
= "ID_AA64PFR1_EL1", .state
= ARM_CP_STATE_AA64
,
1904 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 1,
1905 .access
= PL1_R
, .type
= ARM_CP_CONST
,
1906 .resetvalue
= cpu
->id_aa64pfr1
},
1907 { .name
= "ID_AA64DFR0_EL1", .state
= ARM_CP_STATE_AA64
,
1908 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 0,
1909 .access
= PL1_R
, .type
= ARM_CP_CONST
,
1910 .resetvalue
= cpu
->id_aa64dfr0
},
1911 { .name
= "ID_AA64DFR1_EL1", .state
= ARM_CP_STATE_AA64
,
1912 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 1,
1913 .access
= PL1_R
, .type
= ARM_CP_CONST
,
1914 .resetvalue
= cpu
->id_aa64dfr1
},
1915 { .name
= "ID_AA64AFR0_EL1", .state
= ARM_CP_STATE_AA64
,
1916 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 4,
1917 .access
= PL1_R
, .type
= ARM_CP_CONST
,
1918 .resetvalue
= cpu
->id_aa64afr0
},
1919 { .name
= "ID_AA64AFR1_EL1", .state
= ARM_CP_STATE_AA64
,
1920 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 5,
1921 .access
= PL1_R
, .type
= ARM_CP_CONST
,
1922 .resetvalue
= cpu
->id_aa64afr1
},
1923 { .name
= "ID_AA64ISAR0_EL1", .state
= ARM_CP_STATE_AA64
,
1924 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 0,
1925 .access
= PL1_R
, .type
= ARM_CP_CONST
,
1926 .resetvalue
= cpu
->id_aa64isar0
},
1927 { .name
= "ID_AA64ISAR1_EL1", .state
= ARM_CP_STATE_AA64
,
1928 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 1,
1929 .access
= PL1_R
, .type
= ARM_CP_CONST
,
1930 .resetvalue
= cpu
->id_aa64isar1
},
1931 { .name
= "ID_AA64MMFR0_EL1", .state
= ARM_CP_STATE_AA64
,
1932 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 0,
1933 .access
= PL1_R
, .type
= ARM_CP_CONST
,
1934 .resetvalue
= cpu
->id_aa64mmfr0
},
1935 { .name
= "ID_AA64MMFR1_EL1", .state
= ARM_CP_STATE_AA64
,
1936 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 1,
1937 .access
= PL1_R
, .type
= ARM_CP_CONST
,
1938 .resetvalue
= cpu
->id_aa64mmfr1
},
1941 define_arm_cp_regs(cpu
, v8_idregs
);
1942 define_arm_cp_regs(cpu
, v8_cp_reginfo
);
1943 define_aarch64_debug_regs(cpu
);
1945 if (arm_feature(env
, ARM_FEATURE_MPU
)) {
1946 /* These are the MPU registers prior to PMSAv6. Any new
1947 * PMSA core later than the ARM946 will require that we
1948 * implement the PMSAv6 or PMSAv7 registers, which are
1949 * completely different.
1951 assert(!arm_feature(env
, ARM_FEATURE_V6
));
1952 define_arm_cp_regs(cpu
, pmsav5_cp_reginfo
);
1954 define_arm_cp_regs(cpu
, vmsa_cp_reginfo
);
1956 if (arm_feature(env
, ARM_FEATURE_THUMB2EE
)) {
1957 define_arm_cp_regs(cpu
, t2ee_cp_reginfo
);
1959 if (arm_feature(env
, ARM_FEATURE_GENERIC_TIMER
)) {
1960 define_arm_cp_regs(cpu
, generic_timer_cp_reginfo
);
1962 if (arm_feature(env
, ARM_FEATURE_VAPA
)) {
1963 define_arm_cp_regs(cpu
, vapa_cp_reginfo
);
1965 if (arm_feature(env
, ARM_FEATURE_CACHE_TEST_CLEAN
)) {
1966 define_arm_cp_regs(cpu
, cache_test_clean_cp_reginfo
);
1968 if (arm_feature(env
, ARM_FEATURE_CACHE_DIRTY_REG
)) {
1969 define_arm_cp_regs(cpu
, cache_dirty_status_cp_reginfo
);
1971 if (arm_feature(env
, ARM_FEATURE_CACHE_BLOCK_OPS
)) {
1972 define_arm_cp_regs(cpu
, cache_block_ops_cp_reginfo
);
1974 if (arm_feature(env
, ARM_FEATURE_OMAPCP
)) {
1975 define_arm_cp_regs(cpu
, omap_cp_reginfo
);
1977 if (arm_feature(env
, ARM_FEATURE_STRONGARM
)) {
1978 define_arm_cp_regs(cpu
, strongarm_cp_reginfo
);
1980 if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
1981 define_arm_cp_regs(cpu
, xscale_cp_reginfo
);
1983 if (arm_feature(env
, ARM_FEATURE_DUMMY_C15_REGS
)) {
1984 define_arm_cp_regs(cpu
, dummy_c15_cp_reginfo
);
1986 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
1987 define_arm_cp_regs(cpu
, lpae_cp_reginfo
);
1989 /* Slightly awkwardly, the OMAP and StrongARM cores need all of
1990 * cp15 crn=0 to be writes-ignored, whereas for other cores they should
1991 * be read-only (ie write causes UNDEF exception).
1994 ARMCPRegInfo id_cp_reginfo
[] = {
1995 /* Note that the MIDR isn't a simple constant register because
1996 * of the TI925 behaviour where writes to another register can
1997 * cause the MIDR value to change.
1999 * Unimplemented registers in the c15 0 0 0 space default to
2000 * MIDR. Define MIDR first as this entire space, then CTR, TCMTR
2001 * and friends override accordingly.
2004 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= CP_ANY
,
2005 .access
= PL1_R
, .resetvalue
= cpu
->midr
,
2006 .writefn
= arm_cp_write_ignore
, .raw_writefn
= raw_write
,
2007 .fieldoffset
= offsetof(CPUARMState
, cp15
.c0_cpuid
),
2008 .type
= ARM_CP_OVERRIDE
},
2009 { .name
= "MIDR_EL1", .state
= ARM_CP_STATE_AA64
,
2010 .opc0
= 3, .opc1
= 0, .opc2
= 0, .crn
= 0, .crm
= 0,
2011 .access
= PL1_R
, .resetvalue
= cpu
->midr
, .type
= ARM_CP_CONST
},
2013 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 1,
2014 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= cpu
->ctr
},
2015 { .name
= "CTR_EL0", .state
= ARM_CP_STATE_AA64
,
2016 .opc0
= 3, .opc1
= 3, .opc2
= 1, .crn
= 0, .crm
= 0,
2017 .access
= PL0_R
, .accessfn
= ctr_el0_access
,
2018 .type
= ARM_CP_CONST
, .resetvalue
= cpu
->ctr
},
2020 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 2,
2021 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
2023 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 3,
2024 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
2025 /* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */
2027 .cp
= 15, .crn
= 0, .crm
= 3, .opc1
= 0, .opc2
= CP_ANY
,
2028 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
2030 .cp
= 15, .crn
= 0, .crm
= 4, .opc1
= 0, .opc2
= CP_ANY
,
2031 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
2033 .cp
= 15, .crn
= 0, .crm
= 5, .opc1
= 0, .opc2
= CP_ANY
,
2034 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
2036 .cp
= 15, .crn
= 0, .crm
= 6, .opc1
= 0, .opc2
= CP_ANY
,
2037 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
2039 .cp
= 15, .crn
= 0, .crm
= 7, .opc1
= 0, .opc2
= CP_ANY
,
2040 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
2043 ARMCPRegInfo crn0_wi_reginfo
= {
2044 .name
= "CRN0_WI", .cp
= 15, .crn
= 0, .crm
= CP_ANY
,
2045 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_W
,
2046 .type
= ARM_CP_NOP
| ARM_CP_OVERRIDE
2048 if (arm_feature(env
, ARM_FEATURE_OMAPCP
) ||
2049 arm_feature(env
, ARM_FEATURE_STRONGARM
)) {
2051 /* Register the blanket "writes ignored" value first to cover the
2052 * whole space. Then update the specific ID registers to allow write
2053 * access, so that they ignore writes rather than causing them to
2056 define_one_arm_cp_reg(cpu
, &crn0_wi_reginfo
);
2057 for (r
= id_cp_reginfo
; r
->type
!= ARM_CP_SENTINEL
; r
++) {
2061 define_arm_cp_regs(cpu
, id_cp_reginfo
);
2064 if (arm_feature(env
, ARM_FEATURE_MPIDR
)) {
2065 define_arm_cp_regs(cpu
, mpidr_cp_reginfo
);
2068 if (arm_feature(env
, ARM_FEATURE_AUXCR
)) {
2069 ARMCPRegInfo auxcr
= {
2070 .name
= "AUXCR", .cp
= 15, .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 1,
2071 .access
= PL1_RW
, .type
= ARM_CP_CONST
,
2072 .resetvalue
= cpu
->reset_auxcr
2074 define_one_arm_cp_reg(cpu
, &auxcr
);
2077 if (arm_feature(env
, ARM_FEATURE_CBAR
)) {
2078 ARMCPRegInfo cbar
= {
2079 .name
= "CBAR", .cp
= 15, .crn
= 15, .crm
= 0, .opc1
= 4, .opc2
= 0,
2080 .access
= PL1_R
|PL3_W
, .resetvalue
= cpu
->reset_cbar
,
2081 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_config_base_address
)
2083 define_one_arm_cp_reg(cpu
, &cbar
);
2086 /* Generic registers whose values depend on the implementation */
2088 ARMCPRegInfo sctlr
= {
2089 .name
= "SCTLR", .state
= ARM_CP_STATE_BOTH
,
2090 .opc0
= 3, .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 0,
2091 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c1_sys
),
2092 .writefn
= sctlr_write
, .resetvalue
= cpu
->reset_sctlr
,
2093 .raw_writefn
= raw_write
,
2095 if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
2096 /* Normally we would always end the TB on an SCTLR write, but Linux
2097 * arch/arm/mach-pxa/sleep.S expects two instructions following
2098 * an MMU enable to execute from cache. Imitate this behaviour.
2100 sctlr
.type
|= ARM_CP_SUPPRESS_TB_END
;
2102 define_one_arm_cp_reg(cpu
, &sctlr
);
2106 ARMCPU
*cpu_arm_init(const char *cpu_model
)
2111 oc
= cpu_class_by_name(TYPE_ARM_CPU
, cpu_model
);
2115 cpu
= ARM_CPU(object_new(object_class_get_name(oc
)));
2117 /* TODO this should be set centrally, once possible */
2118 object_property_set_bool(OBJECT(cpu
), true, "realized", NULL
);
2123 void arm_cpu_register_gdb_regs_for_features(ARMCPU
*cpu
)
2125 CPUState
*cs
= CPU(cpu
);
2126 CPUARMState
*env
= &cpu
->env
;
2128 if (arm_feature(env
, ARM_FEATURE_AARCH64
)) {
2129 gdb_register_coprocessor(cs
, aarch64_fpu_gdb_get_reg
,
2130 aarch64_fpu_gdb_set_reg
,
2131 34, "aarch64-fpu.xml", 0);
2132 } else if (arm_feature(env
, ARM_FEATURE_NEON
)) {
2133 gdb_register_coprocessor(cs
, vfp_gdb_get_reg
, vfp_gdb_set_reg
,
2134 51, "arm-neon.xml", 0);
2135 } else if (arm_feature(env
, ARM_FEATURE_VFP3
)) {
2136 gdb_register_coprocessor(cs
, vfp_gdb_get_reg
, vfp_gdb_set_reg
,
2137 35, "arm-vfp3.xml", 0);
2138 } else if (arm_feature(env
, ARM_FEATURE_VFP
)) {
2139 gdb_register_coprocessor(cs
, vfp_gdb_get_reg
, vfp_gdb_set_reg
,
2140 19, "arm-vfp.xml", 0);
2144 /* Sort alphabetically by type name, except for "any". */
2145 static gint
arm_cpu_list_compare(gconstpointer a
, gconstpointer b
)
2147 ObjectClass
*class_a
= (ObjectClass
*)a
;
2148 ObjectClass
*class_b
= (ObjectClass
*)b
;
2149 const char *name_a
, *name_b
;
2151 name_a
= object_class_get_name(class_a
);
2152 name_b
= object_class_get_name(class_b
);
2153 if (strcmp(name_a
, "any-" TYPE_ARM_CPU
) == 0) {
2155 } else if (strcmp(name_b
, "any-" TYPE_ARM_CPU
) == 0) {
2158 return strcmp(name_a
, name_b
);
2162 static void arm_cpu_list_entry(gpointer data
, gpointer user_data
)
2164 ObjectClass
*oc
= data
;
2165 CPUListState
*s
= user_data
;
2166 const char *typename
;
2169 typename
= object_class_get_name(oc
);
2170 name
= g_strndup(typename
, strlen(typename
) - strlen("-" TYPE_ARM_CPU
));
2171 (*s
->cpu_fprintf
)(s
->file
, " %s\n",
2176 void arm_cpu_list(FILE *f
, fprintf_function cpu_fprintf
)
2180 .cpu_fprintf
= cpu_fprintf
,
2184 list
= object_class_get_list(TYPE_ARM_CPU
, false);
2185 list
= g_slist_sort(list
, arm_cpu_list_compare
);
2186 (*cpu_fprintf
)(f
, "Available CPUs:\n");
2187 g_slist_foreach(list
, arm_cpu_list_entry
, &s
);
2190 /* The 'host' CPU type is dynamically registered only if KVM is
2191 * enabled, so we have to special-case it here:
2193 (*cpu_fprintf
)(f
, " host (only available in KVM mode)\n");
2197 static void arm_cpu_add_definition(gpointer data
, gpointer user_data
)
2199 ObjectClass
*oc
= data
;
2200 CpuDefinitionInfoList
**cpu_list
= user_data
;
2201 CpuDefinitionInfoList
*entry
;
2202 CpuDefinitionInfo
*info
;
2203 const char *typename
;
2205 typename
= object_class_get_name(oc
);
2206 info
= g_malloc0(sizeof(*info
));
2207 info
->name
= g_strndup(typename
,
2208 strlen(typename
) - strlen("-" TYPE_ARM_CPU
));
2210 entry
= g_malloc0(sizeof(*entry
));
2211 entry
->value
= info
;
2212 entry
->next
= *cpu_list
;
2216 CpuDefinitionInfoList
*arch_query_cpu_definitions(Error
**errp
)
2218 CpuDefinitionInfoList
*cpu_list
= NULL
;
2221 list
= object_class_get_list(TYPE_ARM_CPU
, false);
2222 g_slist_foreach(list
, arm_cpu_add_definition
, &cpu_list
);
2228 static void add_cpreg_to_hashtable(ARMCPU
*cpu
, const ARMCPRegInfo
*r
,
2229 void *opaque
, int state
,
2230 int crm
, int opc1
, int opc2
)
2232 /* Private utility function for define_one_arm_cp_reg_with_opaque():
2233 * add a single reginfo struct to the hash table.
2235 uint32_t *key
= g_new(uint32_t, 1);
2236 ARMCPRegInfo
*r2
= g_memdup(r
, sizeof(ARMCPRegInfo
));
2237 int is64
= (r
->type
& ARM_CP_64BIT
) ? 1 : 0;
2238 if (r
->state
== ARM_CP_STATE_BOTH
&& state
== ARM_CP_STATE_AA32
) {
2239 /* The AArch32 view of a shared register sees the lower 32 bits
2240 * of a 64 bit backing field. It is not migratable as the AArch64
2241 * view handles that. AArch64 also handles reset.
2242 * We assume it is a cp15 register.
2245 r2
->type
|= ARM_CP_NO_MIGRATE
;
2246 r2
->resetfn
= arm_cp_reset_ignore
;
2247 #ifdef HOST_WORDS_BIGENDIAN
2248 if (r2
->fieldoffset
) {
2249 r2
->fieldoffset
+= sizeof(uint32_t);
2253 if (state
== ARM_CP_STATE_AA64
) {
2254 /* To allow abbreviation of ARMCPRegInfo
2255 * definitions, we treat cp == 0 as equivalent to
2256 * the value for "standard guest-visible sysreg".
2259 r2
->cp
= CP_REG_ARM64_SYSREG_CP
;
2261 *key
= ENCODE_AA64_CP_REG(r2
->cp
, r2
->crn
, crm
,
2262 r2
->opc0
, opc1
, opc2
);
2264 *key
= ENCODE_CP_REG(r2
->cp
, is64
, r2
->crn
, crm
, opc1
, opc2
);
2267 r2
->opaque
= opaque
;
2269 /* reginfo passed to helpers is correct for the actual access,
2270 * and is never ARM_CP_STATE_BOTH:
2273 /* Make sure reginfo passed to helpers for wildcarded regs
2274 * has the correct crm/opc1/opc2 for this reg, not CP_ANY:
2279 /* By convention, for wildcarded registers only the first
2280 * entry is used for migration; the others are marked as
2281 * NO_MIGRATE so we don't try to transfer the register
2282 * multiple times. Special registers (ie NOP/WFI) are
2285 if ((r
->type
& ARM_CP_SPECIAL
) ||
2286 ((r
->crm
== CP_ANY
) && crm
!= 0) ||
2287 ((r
->opc1
== CP_ANY
) && opc1
!= 0) ||
2288 ((r
->opc2
== CP_ANY
) && opc2
!= 0)) {
2289 r2
->type
|= ARM_CP_NO_MIGRATE
;
2292 /* Overriding of an existing definition must be explicitly
2295 if (!(r
->type
& ARM_CP_OVERRIDE
)) {
2296 ARMCPRegInfo
*oldreg
;
2297 oldreg
= g_hash_table_lookup(cpu
->cp_regs
, key
);
2298 if (oldreg
&& !(oldreg
->type
& ARM_CP_OVERRIDE
)) {
2299 fprintf(stderr
, "Register redefined: cp=%d %d bit "
2300 "crn=%d crm=%d opc1=%d opc2=%d, "
2301 "was %s, now %s\n", r2
->cp
, 32 + 32 * is64
,
2302 r2
->crn
, r2
->crm
, r2
->opc1
, r2
->opc2
,
2303 oldreg
->name
, r2
->name
);
2304 g_assert_not_reached();
2307 g_hash_table_insert(cpu
->cp_regs
, key
, r2
);
2311 void define_one_arm_cp_reg_with_opaque(ARMCPU
*cpu
,
2312 const ARMCPRegInfo
*r
, void *opaque
)
2314 /* Define implementations of coprocessor registers.
2315 * We store these in a hashtable because typically
2316 * there are less than 150 registers in a space which
2317 * is 16*16*16*8*8 = 262144 in size.
2318 * Wildcarding is supported for the crm, opc1 and opc2 fields.
2319 * If a register is defined twice then the second definition is
2320 * used, so this can be used to define some generic registers and
2321 * then override them with implementation specific variations.
2322 * At least one of the original and the second definition should
2323 * include ARM_CP_OVERRIDE in its type bits -- this is just a guard
2324 * against accidental use.
2326 * The state field defines whether the register is to be
2327 * visible in the AArch32 or AArch64 execution state. If the
2328 * state is set to ARM_CP_STATE_BOTH then we synthesise a
2329 * reginfo structure for the AArch32 view, which sees the lower
2330 * 32 bits of the 64 bit register.
2332 * Only registers visible in AArch64 may set r->opc0; opc0 cannot
2333 * be wildcarded. AArch64 registers are always considered to be 64
2334 * bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of
2335 * the register, if any.
2337 int crm
, opc1
, opc2
, state
;
2338 int crmmin
= (r
->crm
== CP_ANY
) ? 0 : r
->crm
;
2339 int crmmax
= (r
->crm
== CP_ANY
) ? 15 : r
->crm
;
2340 int opc1min
= (r
->opc1
== CP_ANY
) ? 0 : r
->opc1
;
2341 int opc1max
= (r
->opc1
== CP_ANY
) ? 7 : r
->opc1
;
2342 int opc2min
= (r
->opc2
== CP_ANY
) ? 0 : r
->opc2
;
2343 int opc2max
= (r
->opc2
== CP_ANY
) ? 7 : r
->opc2
;
2344 /* 64 bit registers have only CRm and Opc1 fields */
2345 assert(!((r
->type
& ARM_CP_64BIT
) && (r
->opc2
|| r
->crn
)));
2346 /* op0 only exists in the AArch64 encodings */
2347 assert((r
->state
!= ARM_CP_STATE_AA32
) || (r
->opc0
== 0));
2348 /* AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless */
2349 assert((r
->state
!= ARM_CP_STATE_AA64
) || !(r
->type
& ARM_CP_64BIT
));
2350 /* The AArch64 pseudocode CheckSystemAccess() specifies that op1
2351 * encodes a minimum access level for the register. We roll this
2352 * runtime check into our general permission check code, so check
2353 * here that the reginfo's specified permissions are strict enough
2354 * to encompass the generic architectural permission check.
2356 if (r
->state
!= ARM_CP_STATE_AA32
) {
2359 case 0: case 1: case 2:
2372 /* unallocated encoding, so not possible */
2380 /* min_EL EL1, secure mode only (we don't check the latter) */
2384 /* broken reginfo with out-of-range opc1 */
2388 /* assert our permissions are not too lax (stricter is fine) */
2389 assert((r
->access
& ~mask
) == 0);
2392 /* Check that the register definition has enough info to handle
2393 * reads and writes if they are permitted.
2395 if (!(r
->type
& (ARM_CP_SPECIAL
|ARM_CP_CONST
))) {
2396 if (r
->access
& PL3_R
) {
2397 assert(r
->fieldoffset
|| r
->readfn
);
2399 if (r
->access
& PL3_W
) {
2400 assert(r
->fieldoffset
|| r
->writefn
);
2403 /* Bad type field probably means missing sentinel at end of reg list */
2404 assert(cptype_valid(r
->type
));
2405 for (crm
= crmmin
; crm
<= crmmax
; crm
++) {
2406 for (opc1
= opc1min
; opc1
<= opc1max
; opc1
++) {
2407 for (opc2
= opc2min
; opc2
<= opc2max
; opc2
++) {
2408 for (state
= ARM_CP_STATE_AA32
;
2409 state
<= ARM_CP_STATE_AA64
; state
++) {
2410 if (r
->state
!= state
&& r
->state
!= ARM_CP_STATE_BOTH
) {
2413 add_cpreg_to_hashtable(cpu
, r
, opaque
, state
,
2421 void define_arm_cp_regs_with_opaque(ARMCPU
*cpu
,
2422 const ARMCPRegInfo
*regs
, void *opaque
)
2424 /* Define a whole list of registers */
2425 const ARMCPRegInfo
*r
;
2426 for (r
= regs
; r
->type
!= ARM_CP_SENTINEL
; r
++) {
2427 define_one_arm_cp_reg_with_opaque(cpu
, r
, opaque
);
2431 const ARMCPRegInfo
*get_arm_cp_reginfo(GHashTable
*cpregs
, uint32_t encoded_cp
)
2433 return g_hash_table_lookup(cpregs
, &encoded_cp
);
2436 void arm_cp_write_ignore(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2439 /* Helper coprocessor write function for write-ignore registers */
2442 uint64_t arm_cp_read_zero(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2444 /* Helper coprocessor write function for read-as-zero registers */
2448 void arm_cp_reset_ignore(CPUARMState
*env
, const ARMCPRegInfo
*opaque
)
2450 /* Helper coprocessor reset function for do-nothing-on-reset registers */
2453 static int bad_mode_switch(CPUARMState
*env
, int mode
)
2455 /* Return true if it is not valid for us to switch to
2456 * this CPU mode (ie all the UNPREDICTABLE cases in
2457 * the ARM ARM CPSRWriteByInstr pseudocode).
2460 case ARM_CPU_MODE_USR
:
2461 case ARM_CPU_MODE_SYS
:
2462 case ARM_CPU_MODE_SVC
:
2463 case ARM_CPU_MODE_ABT
:
2464 case ARM_CPU_MODE_UND
:
2465 case ARM_CPU_MODE_IRQ
:
2466 case ARM_CPU_MODE_FIQ
:
2473 uint32_t cpsr_read(CPUARMState
*env
)
2476 ZF
= (env
->ZF
== 0);
2477 return env
->uncached_cpsr
| (env
->NF
& 0x80000000) | (ZF
<< 30) |
2478 (env
->CF
<< 29) | ((env
->VF
& 0x80000000) >> 3) | (env
->QF
<< 27)
2479 | (env
->thumb
<< 5) | ((env
->condexec_bits
& 3) << 25)
2480 | ((env
->condexec_bits
& 0xfc) << 8)
2481 | (env
->GE
<< 16) | env
->daif
;
2484 void cpsr_write(CPUARMState
*env
, uint32_t val
, uint32_t mask
)
2486 if (mask
& CPSR_NZCV
) {
2487 env
->ZF
= (~val
) & CPSR_Z
;
2489 env
->CF
= (val
>> 29) & 1;
2490 env
->VF
= (val
<< 3) & 0x80000000;
2493 env
->QF
= ((val
& CPSR_Q
) != 0);
2495 env
->thumb
= ((val
& CPSR_T
) != 0);
2496 if (mask
& CPSR_IT_0_1
) {
2497 env
->condexec_bits
&= ~3;
2498 env
->condexec_bits
|= (val
>> 25) & 3;
2500 if (mask
& CPSR_IT_2_7
) {
2501 env
->condexec_bits
&= 3;
2502 env
->condexec_bits
|= (val
>> 8) & 0xfc;
2504 if (mask
& CPSR_GE
) {
2505 env
->GE
= (val
>> 16) & 0xf;
2508 env
->daif
&= ~(CPSR_AIF
& mask
);
2509 env
->daif
|= val
& CPSR_AIF
& mask
;
2511 if ((env
->uncached_cpsr
^ val
) & mask
& CPSR_M
) {
2512 if (bad_mode_switch(env
, val
& CPSR_M
)) {
2513 /* Attempt to switch to an invalid mode: this is UNPREDICTABLE.
2514 * We choose to ignore the attempt and leave the CPSR M field
2519 switch_mode(env
, val
& CPSR_M
);
2522 mask
&= ~CACHED_CPSR_BITS
;
2523 env
->uncached_cpsr
= (env
->uncached_cpsr
& ~mask
) | (val
& mask
);
2526 /* Sign/zero extend */
2527 uint32_t HELPER(sxtb16
)(uint32_t x
)
2530 res
= (uint16_t)(int8_t)x
;
2531 res
|= (uint32_t)(int8_t)(x
>> 16) << 16;
2535 uint32_t HELPER(uxtb16
)(uint32_t x
)
2538 res
= (uint16_t)(uint8_t)x
;
2539 res
|= (uint32_t)(uint8_t)(x
>> 16) << 16;
2543 uint32_t HELPER(clz
)(uint32_t x
)
2548 int32_t HELPER(sdiv
)(int32_t num
, int32_t den
)
2552 if (num
== INT_MIN
&& den
== -1)
2557 uint32_t HELPER(udiv
)(uint32_t num
, uint32_t den
)
2564 uint32_t HELPER(rbit
)(uint32_t x
)
2566 x
= ((x
& 0xff000000) >> 24)
2567 | ((x
& 0x00ff0000) >> 8)
2568 | ((x
& 0x0000ff00) << 8)
2569 | ((x
& 0x000000ff) << 24);
2570 x
= ((x
& 0xf0f0f0f0) >> 4)
2571 | ((x
& 0x0f0f0f0f) << 4);
2572 x
= ((x
& 0x88888888) >> 3)
2573 | ((x
& 0x44444444) >> 1)
2574 | ((x
& 0x22222222) << 1)
2575 | ((x
& 0x11111111) << 3);
2579 #if defined(CONFIG_USER_ONLY)
2581 void arm_cpu_do_interrupt(CPUState
*cs
)
2583 ARMCPU
*cpu
= ARM_CPU(cs
);
2584 CPUARMState
*env
= &cpu
->env
;
2586 env
->exception_index
= -1;
2589 int cpu_arm_handle_mmu_fault (CPUARMState
*env
, target_ulong address
, int rw
,
2593 env
->exception_index
= EXCP_PREFETCH_ABORT
;
2594 env
->cp15
.c6_insn
= address
;
2596 env
->exception_index
= EXCP_DATA_ABORT
;
2597 env
->cp15
.c6_data
= address
;
2602 /* These should probably raise undefined insn exceptions. */
2603 void HELPER(v7m_msr
)(CPUARMState
*env
, uint32_t reg
, uint32_t val
)
2605 cpu_abort(env
, "v7m_mrs %d\n", reg
);
2608 uint32_t HELPER(v7m_mrs
)(CPUARMState
*env
, uint32_t reg
)
2610 cpu_abort(env
, "v7m_mrs %d\n", reg
);
2614 void switch_mode(CPUARMState
*env
, int mode
)
2616 if (mode
!= ARM_CPU_MODE_USR
)
2617 cpu_abort(env
, "Tried to switch out of user mode\n");
2620 void HELPER(set_r13_banked
)(CPUARMState
*env
, uint32_t mode
, uint32_t val
)
2622 cpu_abort(env
, "banked r13 write\n");
2625 uint32_t HELPER(get_r13_banked
)(CPUARMState
*env
, uint32_t mode
)
2627 cpu_abort(env
, "banked r13 read\n");
2633 /* Map CPU modes onto saved register banks. */
2634 int bank_number(int mode
)
2637 case ARM_CPU_MODE_USR
:
2638 case ARM_CPU_MODE_SYS
:
2640 case ARM_CPU_MODE_SVC
:
2642 case ARM_CPU_MODE_ABT
:
2644 case ARM_CPU_MODE_UND
:
2646 case ARM_CPU_MODE_IRQ
:
2648 case ARM_CPU_MODE_FIQ
:
2651 hw_error("bank number requested for bad CPSR mode value 0x%x\n", mode
);
2654 void switch_mode(CPUARMState
*env
, int mode
)
2659 old_mode
= env
->uncached_cpsr
& CPSR_M
;
2660 if (mode
== old_mode
)
2663 if (old_mode
== ARM_CPU_MODE_FIQ
) {
2664 memcpy (env
->fiq_regs
, env
->regs
+ 8, 5 * sizeof(uint32_t));
2665 memcpy (env
->regs
+ 8, env
->usr_regs
, 5 * sizeof(uint32_t));
2666 } else if (mode
== ARM_CPU_MODE_FIQ
) {
2667 memcpy (env
->usr_regs
, env
->regs
+ 8, 5 * sizeof(uint32_t));
2668 memcpy (env
->regs
+ 8, env
->fiq_regs
, 5 * sizeof(uint32_t));
2671 i
= bank_number(old_mode
);
2672 env
->banked_r13
[i
] = env
->regs
[13];
2673 env
->banked_r14
[i
] = env
->regs
[14];
2674 env
->banked_spsr
[i
] = env
->spsr
;
2676 i
= bank_number(mode
);
2677 env
->regs
[13] = env
->banked_r13
[i
];
2678 env
->regs
[14] = env
->banked_r14
[i
];
2679 env
->spsr
= env
->banked_spsr
[i
];
2682 static void v7m_push(CPUARMState
*env
, uint32_t val
)
2684 CPUState
*cs
= ENV_GET_CPU(env
);
2686 stl_phys(cs
->as
, env
->regs
[13], val
);
2689 static uint32_t v7m_pop(CPUARMState
*env
)
2691 CPUState
*cs
= ENV_GET_CPU(env
);
2693 val
= ldl_phys(cs
->as
, env
->regs
[13]);
2698 /* Switch to V7M main or process stack pointer. */
2699 static void switch_v7m_sp(CPUARMState
*env
, int process
)
2702 if (env
->v7m
.current_sp
!= process
) {
2703 tmp
= env
->v7m
.other_sp
;
2704 env
->v7m
.other_sp
= env
->regs
[13];
2705 env
->regs
[13] = tmp
;
2706 env
->v7m
.current_sp
= process
;
2710 static void do_v7m_exception_exit(CPUARMState
*env
)
2715 type
= env
->regs
[15];
2716 if (env
->v7m
.exception
!= 0)
2717 armv7m_nvic_complete_irq(env
->nvic
, env
->v7m
.exception
);
2719 /* Switch to the target stack. */
2720 switch_v7m_sp(env
, (type
& 4) != 0);
2721 /* Pop registers. */
2722 env
->regs
[0] = v7m_pop(env
);
2723 env
->regs
[1] = v7m_pop(env
);
2724 env
->regs
[2] = v7m_pop(env
);
2725 env
->regs
[3] = v7m_pop(env
);
2726 env
->regs
[12] = v7m_pop(env
);
2727 env
->regs
[14] = v7m_pop(env
);
2728 env
->regs
[15] = v7m_pop(env
);
2729 xpsr
= v7m_pop(env
);
2730 xpsr_write(env
, xpsr
, 0xfffffdff);
2731 /* Undo stack alignment. */
2734 /* ??? The exception return type specifies Thread/Handler mode. However
2735 this is also implied by the xPSR value. Not sure what to do
2736 if there is a mismatch. */
2737 /* ??? Likewise for mismatches between the CONTROL register and the stack
2741 /* Exception names for debug logging; note that not all of these
2742 * precisely correspond to architectural exceptions.
2744 static const char * const excnames
[] = {
2745 [EXCP_UDEF
] = "Undefined Instruction",
2747 [EXCP_PREFETCH_ABORT
] = "Prefetch Abort",
2748 [EXCP_DATA_ABORT
] = "Data Abort",
2751 [EXCP_BKPT
] = "Breakpoint",
2752 [EXCP_EXCEPTION_EXIT
] = "QEMU v7M exception exit",
2753 [EXCP_KERNEL_TRAP
] = "QEMU intercept of kernel commpage",
2754 [EXCP_STREX
] = "QEMU intercept of STREX",
2757 static inline void arm_log_exception(int idx
)
2759 if (qemu_loglevel_mask(CPU_LOG_INT
)) {
2760 const char *exc
= NULL
;
2762 if (idx
>= 0 && idx
< ARRAY_SIZE(excnames
)) {
2763 exc
= excnames
[idx
];
2768 qemu_log_mask(CPU_LOG_INT
, "Taking exception %d [%s]\n", idx
, exc
);
2772 void arm_v7m_cpu_do_interrupt(CPUState
*cs
)
2774 ARMCPU
*cpu
= ARM_CPU(cs
);
2775 CPUARMState
*env
= &cpu
->env
;
2776 uint32_t xpsr
= xpsr_read(env
);
2780 arm_log_exception(env
->exception_index
);
2783 if (env
->v7m
.current_sp
)
2785 if (env
->v7m
.exception
== 0)
2788 /* For exceptions we just mark as pending on the NVIC, and let that
2790 /* TODO: Need to escalate if the current priority is higher than the
2791 one we're raising. */
2792 switch (env
->exception_index
) {
2794 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
);
2797 /* The PC already points to the next instruction. */
2798 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SVC
);
2800 case EXCP_PREFETCH_ABORT
:
2801 case EXCP_DATA_ABORT
:
2802 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_MEM
);
2805 if (semihosting_enabled
) {
2807 nr
= arm_lduw_code(env
, env
->regs
[15], env
->bswap_code
) & 0xff;
2810 env
->regs
[0] = do_arm_semihosting(env
);
2811 qemu_log_mask(CPU_LOG_INT
, "...handled as semihosting call\n");
2815 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_DEBUG
);
2818 env
->v7m
.exception
= armv7m_nvic_acknowledge_irq(env
->nvic
);
2820 case EXCP_EXCEPTION_EXIT
:
2821 do_v7m_exception_exit(env
);
2824 cpu_abort(env
, "Unhandled exception 0x%x\n", env
->exception_index
);
2825 return; /* Never happens. Keep compiler happy. */
2828 /* Align stack pointer. */
2829 /* ??? Should only do this if Configuration Control Register
2830 STACKALIGN bit is set. */
2831 if (env
->regs
[13] & 4) {
2835 /* Switch to the handler mode. */
2836 v7m_push(env
, xpsr
);
2837 v7m_push(env
, env
->regs
[15]);
2838 v7m_push(env
, env
->regs
[14]);
2839 v7m_push(env
, env
->regs
[12]);
2840 v7m_push(env
, env
->regs
[3]);
2841 v7m_push(env
, env
->regs
[2]);
2842 v7m_push(env
, env
->regs
[1]);
2843 v7m_push(env
, env
->regs
[0]);
2844 switch_v7m_sp(env
, 0);
2846 env
->condexec_bits
= 0;
2848 addr
= ldl_phys(cs
->as
, env
->v7m
.vecbase
+ env
->v7m
.exception
* 4);
2849 env
->regs
[15] = addr
& 0xfffffffe;
2850 env
->thumb
= addr
& 1;
2853 /* Handle a CPU exception. */
2854 void arm_cpu_do_interrupt(CPUState
*cs
)
2856 ARMCPU
*cpu
= ARM_CPU(cs
);
2857 CPUARMState
*env
= &cpu
->env
;
2865 arm_log_exception(env
->exception_index
);
2867 /* TODO: Vectored interrupt controller. */
2868 switch (env
->exception_index
) {
2870 new_mode
= ARM_CPU_MODE_UND
;
2879 if (semihosting_enabled
) {
2880 /* Check for semihosting interrupt. */
2882 mask
= arm_lduw_code(env
, env
->regs
[15] - 2, env
->bswap_code
)
2885 mask
= arm_ldl_code(env
, env
->regs
[15] - 4, env
->bswap_code
)
2888 /* Only intercept calls from privileged modes, to provide some
2889 semblance of security. */
2890 if (((mask
== 0x123456 && !env
->thumb
)
2891 || (mask
== 0xab && env
->thumb
))
2892 && (env
->uncached_cpsr
& CPSR_M
) != ARM_CPU_MODE_USR
) {
2893 env
->regs
[0] = do_arm_semihosting(env
);
2894 qemu_log_mask(CPU_LOG_INT
, "...handled as semihosting call\n");
2898 new_mode
= ARM_CPU_MODE_SVC
;
2901 /* The PC already points to the next instruction. */
2905 /* See if this is a semihosting syscall. */
2906 if (env
->thumb
&& semihosting_enabled
) {
2907 mask
= arm_lduw_code(env
, env
->regs
[15], env
->bswap_code
) & 0xff;
2909 && (env
->uncached_cpsr
& CPSR_M
) != ARM_CPU_MODE_USR
) {
2911 env
->regs
[0] = do_arm_semihosting(env
);
2912 qemu_log_mask(CPU_LOG_INT
, "...handled as semihosting call\n");
2916 env
->cp15
.c5_insn
= 2;
2917 /* Fall through to prefetch abort. */
2918 case EXCP_PREFETCH_ABORT
:
2919 qemu_log_mask(CPU_LOG_INT
, "...with IFSR 0x%x IFAR 0x%x\n",
2920 env
->cp15
.c5_insn
, env
->cp15
.c6_insn
);
2921 new_mode
= ARM_CPU_MODE_ABT
;
2923 mask
= CPSR_A
| CPSR_I
;
2926 case EXCP_DATA_ABORT
:
2927 qemu_log_mask(CPU_LOG_INT
, "...with DFSR 0x%x DFAR 0x%x\n",
2928 env
->cp15
.c5_data
, env
->cp15
.c6_data
);
2929 new_mode
= ARM_CPU_MODE_ABT
;
2931 mask
= CPSR_A
| CPSR_I
;
2935 new_mode
= ARM_CPU_MODE_IRQ
;
2937 /* Disable IRQ and imprecise data aborts. */
2938 mask
= CPSR_A
| CPSR_I
;
2942 new_mode
= ARM_CPU_MODE_FIQ
;
2944 /* Disable FIQ, IRQ and imprecise data aborts. */
2945 mask
= CPSR_A
| CPSR_I
| CPSR_F
;
2949 cpu_abort(env
, "Unhandled exception 0x%x\n", env
->exception_index
);
2950 return; /* Never happens. Keep compiler happy. */
2953 if (env
->cp15
.c1_sys
& SCTLR_V
) {
2954 /* when enabled, base address cannot be remapped. */
2957 /* ARM v7 architectures provide a vector base address register to remap
2958 * the interrupt vector table.
2959 * This register is only followed in non-monitor mode, and has a secure
2960 * and un-secure copy. Since the cpu is always in a un-secure operation
2961 * and is never in monitor mode this feature is always active.
2962 * Note: only bits 31:5 are valid.
2964 addr
+= env
->cp15
.c12_vbar
;
2966 switch_mode (env
, new_mode
);
2967 env
->spsr
= cpsr_read(env
);
2968 /* Clear IT bits. */
2969 env
->condexec_bits
= 0;
2970 /* Switch to the new mode, and to the correct instruction set. */
2971 env
->uncached_cpsr
= (env
->uncached_cpsr
& ~CPSR_M
) | new_mode
;
2973 /* this is a lie, as the was no c1_sys on V4T/V5, but who cares
2974 * and we should just guard the thumb mode on V4 */
2975 if (arm_feature(env
, ARM_FEATURE_V4T
)) {
2976 env
->thumb
= (env
->cp15
.c1_sys
& SCTLR_TE
) != 0;
2978 env
->regs
[14] = env
->regs
[15] + offset
;
2979 env
->regs
[15] = addr
;
2980 cs
->interrupt_request
|= CPU_INTERRUPT_EXITTB
;
2983 /* Check section/page access permissions.
2984 Returns the page protection flags, or zero if the access is not
2986 static inline int check_ap(CPUARMState
*env
, int ap
, int domain_prot
,
2987 int access_type
, int is_user
)
2991 if (domain_prot
== 3) {
2992 return PAGE_READ
| PAGE_WRITE
;
2995 if (access_type
== 1)
2998 prot_ro
= PAGE_READ
;
3002 if (arm_feature(env
, ARM_FEATURE_V7
)) {
3005 if (access_type
== 1)
3007 switch (env
->cp15
.c1_sys
& (SCTLR_S
| SCTLR_R
)) {
3009 return is_user
? 0 : PAGE_READ
;
3016 return is_user
? 0 : PAGE_READ
| PAGE_WRITE
;
3021 return PAGE_READ
| PAGE_WRITE
;
3023 return PAGE_READ
| PAGE_WRITE
;
3024 case 4: /* Reserved. */
3027 return is_user
? 0 : prot_ro
;
3031 if (!arm_feature (env
, ARM_FEATURE_V6K
))
3039 static uint32_t get_level1_table_address(CPUARMState
*env
, uint32_t address
)
3043 if (address
& env
->cp15
.c2_mask
)
3044 table
= env
->cp15
.ttbr1_el1
& 0xffffc000;
3046 table
= env
->cp15
.ttbr0_el1
& env
->cp15
.c2_base_mask
;
3048 table
|= (address
>> 18) & 0x3ffc;
3052 static int get_phys_addr_v5(CPUARMState
*env
, uint32_t address
, int access_type
,
3053 int is_user
, hwaddr
*phys_ptr
,
3054 int *prot
, target_ulong
*page_size
)
3056 CPUState
*cs
= ENV_GET_CPU(env
);
3066 /* Pagetable walk. */
3067 /* Lookup l1 descriptor. */
3068 table
= get_level1_table_address(env
, address
);
3069 desc
= ldl_phys(cs
->as
, table
);
3071 domain
= (desc
>> 5) & 0x0f;
3072 domain_prot
= (env
->cp15
.c3
>> (domain
* 2)) & 3;
3074 /* Section translation fault. */
3078 if (domain_prot
== 0 || domain_prot
== 2) {
3080 code
= 9; /* Section domain fault. */
3082 code
= 11; /* Page domain fault. */
3087 phys_addr
= (desc
& 0xfff00000) | (address
& 0x000fffff);
3088 ap
= (desc
>> 10) & 3;
3090 *page_size
= 1024 * 1024;
3092 /* Lookup l2 entry. */
3094 /* Coarse pagetable. */
3095 table
= (desc
& 0xfffffc00) | ((address
>> 10) & 0x3fc);
3097 /* Fine pagetable. */
3098 table
= (desc
& 0xfffff000) | ((address
>> 8) & 0xffc);
3100 desc
= ldl_phys(cs
->as
, table
);
3102 case 0: /* Page translation fault. */
3105 case 1: /* 64k page. */
3106 phys_addr
= (desc
& 0xffff0000) | (address
& 0xffff);
3107 ap
= (desc
>> (4 + ((address
>> 13) & 6))) & 3;
3108 *page_size
= 0x10000;
3110 case 2: /* 4k page. */
3111 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
3112 ap
= (desc
>> (4 + ((address
>> 9) & 6))) & 3;
3113 *page_size
= 0x1000;
3115 case 3: /* 1k page. */
3117 if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
3118 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
3120 /* Page translation fault. */
3125 phys_addr
= (desc
& 0xfffffc00) | (address
& 0x3ff);
3127 ap
= (desc
>> 4) & 3;
3131 /* Never happens, but compiler isn't smart enough to tell. */
3136 *prot
= check_ap(env
, ap
, domain_prot
, access_type
, is_user
);
3138 /* Access permission fault. */
3142 *phys_ptr
= phys_addr
;
3145 return code
| (domain
<< 4);
3148 static int get_phys_addr_v6(CPUARMState
*env
, uint32_t address
, int access_type
,
3149 int is_user
, hwaddr
*phys_ptr
,
3150 int *prot
, target_ulong
*page_size
)
3152 CPUState
*cs
= ENV_GET_CPU(env
);
3164 /* Pagetable walk. */
3165 /* Lookup l1 descriptor. */
3166 table
= get_level1_table_address(env
, address
);
3167 desc
= ldl_phys(cs
->as
, table
);
3169 if (type
== 0 || (type
== 3 && !arm_feature(env
, ARM_FEATURE_PXN
))) {
3170 /* Section translation fault, or attempt to use the encoding
3171 * which is Reserved on implementations without PXN.
3176 if ((type
== 1) || !(desc
& (1 << 18))) {
3177 /* Page or Section. */
3178 domain
= (desc
>> 5) & 0x0f;
3180 domain_prot
= (env
->cp15
.c3
>> (domain
* 2)) & 3;
3181 if (domain_prot
== 0 || domain_prot
== 2) {
3183 code
= 9; /* Section domain fault. */
3185 code
= 11; /* Page domain fault. */
3190 if (desc
& (1 << 18)) {
3192 phys_addr
= (desc
& 0xff000000) | (address
& 0x00ffffff);
3193 *page_size
= 0x1000000;
3196 phys_addr
= (desc
& 0xfff00000) | (address
& 0x000fffff);
3197 *page_size
= 0x100000;
3199 ap
= ((desc
>> 10) & 3) | ((desc
>> 13) & 4);
3200 xn
= desc
& (1 << 4);
3204 if (arm_feature(env
, ARM_FEATURE_PXN
)) {
3205 pxn
= (desc
>> 2) & 1;
3207 /* Lookup l2 entry. */
3208 table
= (desc
& 0xfffffc00) | ((address
>> 10) & 0x3fc);
3209 desc
= ldl_phys(cs
->as
, table
);
3210 ap
= ((desc
>> 4) & 3) | ((desc
>> 7) & 4);
3212 case 0: /* Page translation fault. */
3215 case 1: /* 64k page. */
3216 phys_addr
= (desc
& 0xffff0000) | (address
& 0xffff);
3217 xn
= desc
& (1 << 15);
3218 *page_size
= 0x10000;
3220 case 2: case 3: /* 4k page. */
3221 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
3223 *page_size
= 0x1000;
3226 /* Never happens, but compiler isn't smart enough to tell. */
3231 if (domain_prot
== 3) {
3232 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
3234 if (pxn
&& !is_user
) {
3237 if (xn
&& access_type
== 2)
3240 /* The simplified model uses AP[0] as an access control bit. */
3241 if ((env
->cp15
.c1_sys
& SCTLR_AFE
) && (ap
& 1) == 0) {
3242 /* Access flag fault. */
3243 code
= (code
== 15) ? 6 : 3;
3246 *prot
= check_ap(env
, ap
, domain_prot
, access_type
, is_user
);
3248 /* Access permission fault. */
3255 *phys_ptr
= phys_addr
;
3258 return code
| (domain
<< 4);
3261 /* Fault type for long-descriptor MMU fault reporting; this corresponds
3262 * to bits [5..2] in the STATUS field in long-format DFSR/IFSR.
3265 translation_fault
= 1,
3267 permission_fault
= 3,
3270 static int get_phys_addr_lpae(CPUARMState
*env
, uint32_t address
,
3271 int access_type
, int is_user
,
3272 hwaddr
*phys_ptr
, int *prot
,
3273 target_ulong
*page_size_ptr
)
3275 CPUState
*cs
= ENV_GET_CPU(env
);
3276 /* Read an LPAE long-descriptor translation table. */
3277 MMUFaultType fault_type
= translation_fault
;
3285 uint32_t tableattrs
;
3286 target_ulong page_size
;
3289 /* Determine whether this address is in the region controlled by
3290 * TTBR0 or TTBR1 (or if it is in neither region and should fault).
3291 * This is a Non-secure PL0/1 stage 1 translation, so controlled by
3292 * TTBCR/TTBR0/TTBR1 in accordance with ARM ARM DDI0406C table B-32:
3294 uint32_t t0sz
= extract32(env
->cp15
.c2_control
, 0, 3);
3295 uint32_t t1sz
= extract32(env
->cp15
.c2_control
, 16, 3);
3296 if (t0sz
&& !extract32(address
, 32 - t0sz
, t0sz
)) {
3297 /* there is a ttbr0 region and we are in it (high bits all zero) */
3299 } else if (t1sz
&& !extract32(~address
, 32 - t1sz
, t1sz
)) {
3300 /* there is a ttbr1 region and we are in it (high bits all one) */
3303 /* ttbr0 region is "everything not in the ttbr1 region" */
3306 /* ttbr1 region is "everything not in the ttbr0 region" */
3309 /* in the gap between the two regions, this is a Translation fault */
3310 fault_type
= translation_fault
;
3314 /* Note that QEMU ignores shareability and cacheability attributes,
3315 * so we don't need to do anything with the SH, ORGN, IRGN fields
3316 * in the TTBCR. Similarly, TTBCR:A1 selects whether we get the
3317 * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently
3318 * implement any ASID-like capability so we can ignore it (instead
3319 * we will always flush the TLB any time the ASID is changed).
3321 if (ttbr_select
== 0) {
3322 ttbr
= env
->cp15
.ttbr0_el1
;
3323 epd
= extract32(env
->cp15
.c2_control
, 7, 1);
3326 ttbr
= env
->cp15
.ttbr1_el1
;
3327 epd
= extract32(env
->cp15
.c2_control
, 23, 1);
3332 /* Translation table walk disabled => Translation fault on TLB miss */
3336 /* If the region is small enough we will skip straight to a 2nd level
3337 * lookup. This affects the number of bits of the address used in
3338 * combination with the TTBR to find the first descriptor. ('n' here
3339 * matches the usage in the ARM ARM sB3.6.6, where bits [39..n] are
3340 * from the TTBR, [n-1..3] from the vaddr, and [2..0] always zero).
3349 /* Clear the vaddr bits which aren't part of the within-region address,
3350 * so that we don't have to special case things when calculating the
3351 * first descriptor address.
3353 address
&= (0xffffffffU
>> tsz
);
3355 /* Now we can extract the actual base address from the TTBR */
3356 descaddr
= extract64(ttbr
, 0, 40);
3357 descaddr
&= ~((1ULL << n
) - 1);
3361 uint64_t descriptor
;
3363 descaddr
|= ((address
>> (9 * (4 - level
))) & 0xff8);
3364 descriptor
= ldq_phys(cs
->as
, descaddr
);
3365 if (!(descriptor
& 1) ||
3366 (!(descriptor
& 2) && (level
== 3))) {
3367 /* Invalid, or the Reserved level 3 encoding */
3370 descaddr
= descriptor
& 0xfffffff000ULL
;
3372 if ((descriptor
& 2) && (level
< 3)) {
3373 /* Table entry. The top five bits are attributes which may
3374 * propagate down through lower levels of the table (and
3375 * which are all arranged so that 0 means "no effect", so
3376 * we can gather them up by ORing in the bits at each level).
3378 tableattrs
|= extract64(descriptor
, 59, 5);
3382 /* Block entry at level 1 or 2, or page entry at level 3.
3383 * These are basically the same thing, although the number
3384 * of bits we pull in from the vaddr varies.
3386 page_size
= (1 << (39 - (9 * level
)));
3387 descaddr
|= (address
& (page_size
- 1));
3388 /* Extract attributes from the descriptor and merge with table attrs */
3389 attrs
= extract64(descriptor
, 2, 10)
3390 | (extract64(descriptor
, 52, 12) << 10);
3391 attrs
|= extract32(tableattrs
, 0, 2) << 11; /* XN, PXN */
3392 attrs
|= extract32(tableattrs
, 3, 1) << 5; /* APTable[1] => AP[2] */
3393 /* The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1
3394 * means "force PL1 access only", which means forcing AP[1] to 0.
3396 if (extract32(tableattrs
, 2, 1)) {
3399 /* Since we're always in the Non-secure state, NSTable is ignored. */
3402 /* Here descaddr is the final physical address, and attributes
3405 fault_type
= access_fault
;
3406 if ((attrs
& (1 << 8)) == 0) {
3410 fault_type
= permission_fault
;
3411 if (is_user
&& !(attrs
& (1 << 4))) {
3412 /* Unprivileged access not enabled */
3415 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
3416 if (attrs
& (1 << 12) || (!is_user
&& (attrs
& (1 << 11)))) {
3418 if (access_type
== 2) {
3421 *prot
&= ~PAGE_EXEC
;
3423 if (attrs
& (1 << 5)) {
3424 /* Write access forbidden */
3425 if (access_type
== 1) {
3428 *prot
&= ~PAGE_WRITE
;
3431 *phys_ptr
= descaddr
;
3432 *page_size_ptr
= page_size
;
3436 /* Long-descriptor format IFSR/DFSR value */
3437 return (1 << 9) | (fault_type
<< 2) | level
;
3440 static int get_phys_addr_mpu(CPUARMState
*env
, uint32_t address
,
3441 int access_type
, int is_user
,
3442 hwaddr
*phys_ptr
, int *prot
)
3448 *phys_ptr
= address
;
3449 for (n
= 7; n
>= 0; n
--) {
3450 base
= env
->cp15
.c6_region
[n
];
3451 if ((base
& 1) == 0)
3453 mask
= 1 << ((base
>> 1) & 0x1f);
3454 /* Keep this shift separate from the above to avoid an
3455 (undefined) << 32. */
3456 mask
= (mask
<< 1) - 1;
3457 if (((base
^ address
) & ~mask
) == 0)
3463 if (access_type
== 2) {
3464 mask
= env
->cp15
.c5_insn
;
3466 mask
= env
->cp15
.c5_data
;
3468 mask
= (mask
>> (n
* 4)) & 0xf;
3475 *prot
= PAGE_READ
| PAGE_WRITE
;
3480 *prot
|= PAGE_WRITE
;
3483 *prot
= PAGE_READ
| PAGE_WRITE
;
3494 /* Bad permission. */
3501 /* get_phys_addr - get the physical address for this virtual address
3503 * Find the physical address corresponding to the given virtual address,
3504 * by doing a translation table walk on MMU based systems or using the
3505 * MPU state on MPU based systems.
3507 * Returns 0 if the translation was successful. Otherwise, phys_ptr,
3508 * prot and page_size are not filled in, and the return value provides
3509 * information on why the translation aborted, in the format of a
3510 * DFSR/IFSR fault register, with the following caveats:
3511 * * we honour the short vs long DFSR format differences.
3512 * * the WnR bit is never set (the caller must do this).
3513 * * for MPU based systems we don't bother to return a full FSR format
3517 * @address: virtual address to get physical address for
3518 * @access_type: 0 for read, 1 for write, 2 for execute
3519 * @is_user: 0 for privileged access, 1 for user
3520 * @phys_ptr: set to the physical address corresponding to the virtual address
3521 * @prot: set to the permissions for the page containing phys_ptr
3522 * @page_size: set to the size of the page containing phys_ptr
3524 static inline int get_phys_addr(CPUARMState
*env
, uint32_t address
,
3525 int access_type
, int is_user
,
3526 hwaddr
*phys_ptr
, int *prot
,
3527 target_ulong
*page_size
)
3529 /* Fast Context Switch Extension. */
3530 if (address
< 0x02000000)
3531 address
+= env
->cp15
.c13_fcse
;
3533 if ((env
->cp15
.c1_sys
& SCTLR_M
) == 0) {
3534 /* MMU/MPU disabled. */
3535 *phys_ptr
= address
;
3536 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
3537 *page_size
= TARGET_PAGE_SIZE
;
3539 } else if (arm_feature(env
, ARM_FEATURE_MPU
)) {
3540 *page_size
= TARGET_PAGE_SIZE
;
3541 return get_phys_addr_mpu(env
, address
, access_type
, is_user
, phys_ptr
,
3543 } else if (extended_addresses_enabled(env
)) {
3544 return get_phys_addr_lpae(env
, address
, access_type
, is_user
, phys_ptr
,
3546 } else if (env
->cp15
.c1_sys
& SCTLR_XP
) {
3547 return get_phys_addr_v6(env
, address
, access_type
, is_user
, phys_ptr
,
3550 return get_phys_addr_v5(env
, address
, access_type
, is_user
, phys_ptr
,
3555 int cpu_arm_handle_mmu_fault (CPUARMState
*env
, target_ulong address
,
3556 int access_type
, int mmu_idx
)
3559 target_ulong page_size
;
3563 is_user
= mmu_idx
== MMU_USER_IDX
;
3564 ret
= get_phys_addr(env
, address
, access_type
, is_user
, &phys_addr
, &prot
,
3567 /* Map a single [sub]page. */
3568 phys_addr
&= ~(hwaddr
)0x3ff;
3569 address
&= ~(uint32_t)0x3ff;
3570 tlb_set_page (env
, address
, phys_addr
, prot
, mmu_idx
, page_size
);
3574 if (access_type
== 2) {
3575 env
->cp15
.c5_insn
= ret
;
3576 env
->cp15
.c6_insn
= address
;
3577 env
->exception_index
= EXCP_PREFETCH_ABORT
;
3579 env
->cp15
.c5_data
= ret
;
3580 if (access_type
== 1 && arm_feature(env
, ARM_FEATURE_V6
))
3581 env
->cp15
.c5_data
|= (1 << 11);
3582 env
->cp15
.c6_data
= address
;
3583 env
->exception_index
= EXCP_DATA_ABORT
;
3588 hwaddr
arm_cpu_get_phys_page_debug(CPUState
*cs
, vaddr addr
)
3590 ARMCPU
*cpu
= ARM_CPU(cs
);
3592 target_ulong page_size
;
3596 ret
= get_phys_addr(&cpu
->env
, addr
, 0, 0, &phys_addr
, &prot
, &page_size
);
3605 void HELPER(set_r13_banked
)(CPUARMState
*env
, uint32_t mode
, uint32_t val
)
3607 if ((env
->uncached_cpsr
& CPSR_M
) == mode
) {
3608 env
->regs
[13] = val
;
3610 env
->banked_r13
[bank_number(mode
)] = val
;
3614 uint32_t HELPER(get_r13_banked
)(CPUARMState
*env
, uint32_t mode
)
3616 if ((env
->uncached_cpsr
& CPSR_M
) == mode
) {
3617 return env
->regs
[13];
3619 return env
->banked_r13
[bank_number(mode
)];
3623 uint32_t HELPER(v7m_mrs
)(CPUARMState
*env
, uint32_t reg
)
3627 return xpsr_read(env
) & 0xf8000000;
3629 return xpsr_read(env
) & 0xf80001ff;
3631 return xpsr_read(env
) & 0xff00fc00;
3633 return xpsr_read(env
) & 0xff00fdff;
3635 return xpsr_read(env
) & 0x000001ff;
3637 return xpsr_read(env
) & 0x0700fc00;
3639 return xpsr_read(env
) & 0x0700edff;
3641 return env
->v7m
.current_sp
? env
->v7m
.other_sp
: env
->regs
[13];
3643 return env
->v7m
.current_sp
? env
->regs
[13] : env
->v7m
.other_sp
;
3644 case 16: /* PRIMASK */
3645 return (env
->daif
& PSTATE_I
) != 0;
3646 case 17: /* BASEPRI */
3647 case 18: /* BASEPRI_MAX */
3648 return env
->v7m
.basepri
;
3649 case 19: /* FAULTMASK */
3650 return (env
->daif
& PSTATE_F
) != 0;
3651 case 20: /* CONTROL */
3652 return env
->v7m
.control
;
3654 /* ??? For debugging only. */
3655 cpu_abort(env
, "Unimplemented system register read (%d)\n", reg
);
3660 void HELPER(v7m_msr
)(CPUARMState
*env
, uint32_t reg
, uint32_t val
)
3664 xpsr_write(env
, val
, 0xf8000000);
3667 xpsr_write(env
, val
, 0xf8000000);
3670 xpsr_write(env
, val
, 0xfe00fc00);
3673 xpsr_write(env
, val
, 0xfe00fc00);
3676 /* IPSR bits are readonly. */
3679 xpsr_write(env
, val
, 0x0600fc00);
3682 xpsr_write(env
, val
, 0x0600fc00);
3685 if (env
->v7m
.current_sp
)
3686 env
->v7m
.other_sp
= val
;
3688 env
->regs
[13] = val
;
3691 if (env
->v7m
.current_sp
)
3692 env
->regs
[13] = val
;
3694 env
->v7m
.other_sp
= val
;
3696 case 16: /* PRIMASK */
3698 env
->daif
|= PSTATE_I
;
3700 env
->daif
&= ~PSTATE_I
;
3703 case 17: /* BASEPRI */
3704 env
->v7m
.basepri
= val
& 0xff;
3706 case 18: /* BASEPRI_MAX */
3708 if (val
!= 0 && (val
< env
->v7m
.basepri
|| env
->v7m
.basepri
== 0))
3709 env
->v7m
.basepri
= val
;
3711 case 19: /* FAULTMASK */
3713 env
->daif
|= PSTATE_F
;
3715 env
->daif
&= ~PSTATE_F
;
3718 case 20: /* CONTROL */
3719 env
->v7m
.control
= val
& 3;
3720 switch_v7m_sp(env
, (val
& 2) != 0);
3723 /* ??? For debugging only. */
3724 cpu_abort(env
, "Unimplemented system register write (%d)\n", reg
);
3731 /* Note that signed overflow is undefined in C. The following routines are
3732 careful to use unsigned types where modulo arithmetic is required.
3733 Failure to do so _will_ break on newer gcc. */
3735 /* Signed saturating arithmetic. */
3737 /* Perform 16-bit signed saturating addition. */
3738 static inline uint16_t add16_sat(uint16_t a
, uint16_t b
)
3743 if (((res
^ a
) & 0x8000) && !((a
^ b
) & 0x8000)) {
3752 /* Perform 8-bit signed saturating addition. */
3753 static inline uint8_t add8_sat(uint8_t a
, uint8_t b
)
3758 if (((res
^ a
) & 0x80) && !((a
^ b
) & 0x80)) {
3767 /* Perform 16-bit signed saturating subtraction. */
3768 static inline uint16_t sub16_sat(uint16_t a
, uint16_t b
)
3773 if (((res
^ a
) & 0x8000) && ((a
^ b
) & 0x8000)) {
3782 /* Perform 8-bit signed saturating subtraction. */
3783 static inline uint8_t sub8_sat(uint8_t a
, uint8_t b
)
3788 if (((res
^ a
) & 0x80) && ((a
^ b
) & 0x80)) {
3797 #define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16);
3798 #define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16);
3799 #define ADD8(a, b, n) RESULT(add8_sat(a, b), n, 8);
3800 #define SUB8(a, b, n) RESULT(sub8_sat(a, b), n, 8);
3803 #include "op_addsub.h"
3805 /* Unsigned saturating arithmetic. */
3806 static inline uint16_t add16_usat(uint16_t a
, uint16_t b
)
3815 static inline uint16_t sub16_usat(uint16_t a
, uint16_t b
)
3823 static inline uint8_t add8_usat(uint8_t a
, uint8_t b
)
3832 static inline uint8_t sub8_usat(uint8_t a
, uint8_t b
)
3840 #define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16);
3841 #define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16);
3842 #define ADD8(a, b, n) RESULT(add8_usat(a, b), n, 8);
3843 #define SUB8(a, b, n) RESULT(sub8_usat(a, b), n, 8);
3846 #include "op_addsub.h"
3848 /* Signed modulo arithmetic. */
3849 #define SARITH16(a, b, n, op) do { \
3851 sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \
3852 RESULT(sum, n, 16); \
3854 ge |= 3 << (n * 2); \
3857 #define SARITH8(a, b, n, op) do { \
3859 sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \
3860 RESULT(sum, n, 8); \
3866 #define ADD16(a, b, n) SARITH16(a, b, n, +)
3867 #define SUB16(a, b, n) SARITH16(a, b, n, -)
3868 #define ADD8(a, b, n) SARITH8(a, b, n, +)
3869 #define SUB8(a, b, n) SARITH8(a, b, n, -)
3873 #include "op_addsub.h"
3875 /* Unsigned modulo arithmetic. */
3876 #define ADD16(a, b, n) do { \
3878 sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \
3879 RESULT(sum, n, 16); \
3880 if ((sum >> 16) == 1) \
3881 ge |= 3 << (n * 2); \
3884 #define ADD8(a, b, n) do { \
3886 sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \
3887 RESULT(sum, n, 8); \
3888 if ((sum >> 8) == 1) \
3892 #define SUB16(a, b, n) do { \
3894 sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \
3895 RESULT(sum, n, 16); \
3896 if ((sum >> 16) == 0) \
3897 ge |= 3 << (n * 2); \
3900 #define SUB8(a, b, n) do { \
3902 sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \
3903 RESULT(sum, n, 8); \
3904 if ((sum >> 8) == 0) \
3911 #include "op_addsub.h"
3913 /* Halved signed arithmetic. */
3914 #define ADD16(a, b, n) \
3915 RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16)
3916 #define SUB16(a, b, n) \
3917 RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16)
3918 #define ADD8(a, b, n) \
3919 RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8)
3920 #define SUB8(a, b, n) \
3921 RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8)
3924 #include "op_addsub.h"
3926 /* Halved unsigned arithmetic. */
3927 #define ADD16(a, b, n) \
3928 RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16)
3929 #define SUB16(a, b, n) \
3930 RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16)
3931 #define ADD8(a, b, n) \
3932 RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8)
3933 #define SUB8(a, b, n) \
3934 RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8)
3937 #include "op_addsub.h"
3939 static inline uint8_t do_usad(uint8_t a
, uint8_t b
)
3947 /* Unsigned sum of absolute byte differences. */
3948 uint32_t HELPER(usad8
)(uint32_t a
, uint32_t b
)
3951 sum
= do_usad(a
, b
);
3952 sum
+= do_usad(a
>> 8, b
>> 8);
3953 sum
+= do_usad(a
>> 16, b
>>16);
3954 sum
+= do_usad(a
>> 24, b
>> 24);
3958 /* For ARMv6 SEL instruction. */
3959 uint32_t HELPER(sel_flags
)(uint32_t flags
, uint32_t a
, uint32_t b
)
3972 return (a
& mask
) | (b
& ~mask
);
3975 /* VFP support. We follow the convention used for VFP instructions:
3976 Single precision routines have a "s" suffix, double precision a
3979 /* Convert host exception flags to vfp form. */
3980 static inline int vfp_exceptbits_from_host(int host_bits
)
3982 int target_bits
= 0;
3984 if (host_bits
& float_flag_invalid
)
3986 if (host_bits
& float_flag_divbyzero
)
3988 if (host_bits
& float_flag_overflow
)
3990 if (host_bits
& (float_flag_underflow
| float_flag_output_denormal
))
3992 if (host_bits
& float_flag_inexact
)
3993 target_bits
|= 0x10;
3994 if (host_bits
& float_flag_input_denormal
)
3995 target_bits
|= 0x80;
3999 uint32_t HELPER(vfp_get_fpscr
)(CPUARMState
*env
)
4004 fpscr
= (env
->vfp
.xregs
[ARM_VFP_FPSCR
] & 0xffc8ffff)
4005 | (env
->vfp
.vec_len
<< 16)
4006 | (env
->vfp
.vec_stride
<< 20);
4007 i
= get_float_exception_flags(&env
->vfp
.fp_status
);
4008 i
|= get_float_exception_flags(&env
->vfp
.standard_fp_status
);
4009 fpscr
|= vfp_exceptbits_from_host(i
);
4013 uint32_t vfp_get_fpscr(CPUARMState
*env
)
4015 return HELPER(vfp_get_fpscr
)(env
);
4018 /* Convert vfp exception flags to target form. */
4019 static inline int vfp_exceptbits_to_host(int target_bits
)
4023 if (target_bits
& 1)
4024 host_bits
|= float_flag_invalid
;
4025 if (target_bits
& 2)
4026 host_bits
|= float_flag_divbyzero
;
4027 if (target_bits
& 4)
4028 host_bits
|= float_flag_overflow
;
4029 if (target_bits
& 8)
4030 host_bits
|= float_flag_underflow
;
4031 if (target_bits
& 0x10)
4032 host_bits
|= float_flag_inexact
;
4033 if (target_bits
& 0x80)
4034 host_bits
|= float_flag_input_denormal
;
4038 void HELPER(vfp_set_fpscr
)(CPUARMState
*env
, uint32_t val
)
4043 changed
= env
->vfp
.xregs
[ARM_VFP_FPSCR
];
4044 env
->vfp
.xregs
[ARM_VFP_FPSCR
] = (val
& 0xffc8ffff);
4045 env
->vfp
.vec_len
= (val
>> 16) & 7;
4046 env
->vfp
.vec_stride
= (val
>> 20) & 3;
4049 if (changed
& (3 << 22)) {
4050 i
= (val
>> 22) & 3;
4052 case FPROUNDING_TIEEVEN
:
4053 i
= float_round_nearest_even
;
4055 case FPROUNDING_POSINF
:
4058 case FPROUNDING_NEGINF
:
4059 i
= float_round_down
;
4061 case FPROUNDING_ZERO
:
4062 i
= float_round_to_zero
;
4065 set_float_rounding_mode(i
, &env
->vfp
.fp_status
);
4067 if (changed
& (1 << 24)) {
4068 set_flush_to_zero((val
& (1 << 24)) != 0, &env
->vfp
.fp_status
);
4069 set_flush_inputs_to_zero((val
& (1 << 24)) != 0, &env
->vfp
.fp_status
);
4071 if (changed
& (1 << 25))
4072 set_default_nan_mode((val
& (1 << 25)) != 0, &env
->vfp
.fp_status
);
4074 i
= vfp_exceptbits_to_host(val
);
4075 set_float_exception_flags(i
, &env
->vfp
.fp_status
);
4076 set_float_exception_flags(0, &env
->vfp
.standard_fp_status
);
4079 void vfp_set_fpscr(CPUARMState
*env
, uint32_t val
)
4081 HELPER(vfp_set_fpscr
)(env
, val
);
4084 #define VFP_HELPER(name, p) HELPER(glue(glue(vfp_,name),p))
4086 #define VFP_BINOP(name) \
4087 float32 VFP_HELPER(name, s)(float32 a, float32 b, void *fpstp) \
4089 float_status *fpst = fpstp; \
4090 return float32_ ## name(a, b, fpst); \
4092 float64 VFP_HELPER(name, d)(float64 a, float64 b, void *fpstp) \
4094 float_status *fpst = fpstp; \
4095 return float64_ ## name(a, b, fpst); \
4107 float32
VFP_HELPER(neg
, s
)(float32 a
)
4109 return float32_chs(a
);
4112 float64
VFP_HELPER(neg
, d
)(float64 a
)
4114 return float64_chs(a
);
4117 float32
VFP_HELPER(abs
, s
)(float32 a
)
4119 return float32_abs(a
);
4122 float64
VFP_HELPER(abs
, d
)(float64 a
)
4124 return float64_abs(a
);
4127 float32
VFP_HELPER(sqrt
, s
)(float32 a
, CPUARMState
*env
)
4129 return float32_sqrt(a
, &env
->vfp
.fp_status
);
4132 float64
VFP_HELPER(sqrt
, d
)(float64 a
, CPUARMState
*env
)
4134 return float64_sqrt(a
, &env
->vfp
.fp_status
);
4137 /* XXX: check quiet/signaling case */
4138 #define DO_VFP_cmp(p, type) \
4139 void VFP_HELPER(cmp, p)(type a, type b, CPUARMState *env) \
4142 switch(type ## _compare_quiet(a, b, &env->vfp.fp_status)) { \
4143 case 0: flags = 0x6; break; \
4144 case -1: flags = 0x8; break; \
4145 case 1: flags = 0x2; break; \
4146 default: case 2: flags = 0x3; break; \
4148 env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
4149 | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
4151 void VFP_HELPER(cmpe, p)(type a, type b, CPUARMState *env) \
4154 switch(type ## _compare(a, b, &env->vfp.fp_status)) { \
4155 case 0: flags = 0x6; break; \
4156 case -1: flags = 0x8; break; \
4157 case 1: flags = 0x2; break; \
4158 default: case 2: flags = 0x3; break; \
4160 env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
4161 | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
4163 DO_VFP_cmp(s
, float32
)
4164 DO_VFP_cmp(d
, float64
)
4167 /* Integer to float and float to integer conversions */
4169 #define CONV_ITOF(name, fsz, sign) \
4170 float##fsz HELPER(name)(uint32_t x, void *fpstp) \
4172 float_status *fpst = fpstp; \
4173 return sign##int32_to_##float##fsz((sign##int32_t)x, fpst); \
4176 #define CONV_FTOI(name, fsz, sign, round) \
4177 uint32_t HELPER(name)(float##fsz x, void *fpstp) \
4179 float_status *fpst = fpstp; \
4180 if (float##fsz##_is_any_nan(x)) { \
4181 float_raise(float_flag_invalid, fpst); \
4184 return float##fsz##_to_##sign##int32##round(x, fpst); \
4187 #define FLOAT_CONVS(name, p, fsz, sign) \
4188 CONV_ITOF(vfp_##name##to##p, fsz, sign) \
4189 CONV_FTOI(vfp_to##name##p, fsz, sign, ) \
4190 CONV_FTOI(vfp_to##name##z##p, fsz, sign, _round_to_zero)
4192 FLOAT_CONVS(si
, s
, 32, )
4193 FLOAT_CONVS(si
, d
, 64, )
4194 FLOAT_CONVS(ui
, s
, 32, u
)
4195 FLOAT_CONVS(ui
, d
, 64, u
)
4201 /* floating point conversion */
4202 float64
VFP_HELPER(fcvtd
, s
)(float32 x
, CPUARMState
*env
)
4204 float64 r
= float32_to_float64(x
, &env
->vfp
.fp_status
);
4205 /* ARM requires that S<->D conversion of any kind of NaN generates
4206 * a quiet NaN by forcing the most significant frac bit to 1.
4208 return float64_maybe_silence_nan(r
);
4211 float32
VFP_HELPER(fcvts
, d
)(float64 x
, CPUARMState
*env
)
4213 float32 r
= float64_to_float32(x
, &env
->vfp
.fp_status
);
4214 /* ARM requires that S<->D conversion of any kind of NaN generates
4215 * a quiet NaN by forcing the most significant frac bit to 1.
4217 return float32_maybe_silence_nan(r
);
4220 /* VFP3 fixed point conversion. */
4221 #define VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \
4222 float##fsz HELPER(vfp_##name##to##p)(uint##isz##_t x, uint32_t shift, \
4225 float_status *fpst = fpstp; \
4227 tmp = itype##_to_##float##fsz(x, fpst); \
4228 return float##fsz##_scalbn(tmp, -(int)shift, fpst); \
4231 /* Notice that we want only input-denormal exception flags from the
4232 * scalbn operation: the other possible flags (overflow+inexact if
4233 * we overflow to infinity, output-denormal) aren't correct for the
4234 * complete scale-and-convert operation.
4236 #define VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, round) \
4237 uint##isz##_t HELPER(vfp_to##name##p##round)(float##fsz x, \
4241 float_status *fpst = fpstp; \
4242 int old_exc_flags = get_float_exception_flags(fpst); \
4244 if (float##fsz##_is_any_nan(x)) { \
4245 float_raise(float_flag_invalid, fpst); \
4248 tmp = float##fsz##_scalbn(x, shift, fpst); \
4249 old_exc_flags |= get_float_exception_flags(fpst) \
4250 & float_flag_input_denormal; \
4251 set_float_exception_flags(old_exc_flags, fpst); \
4252 return float##fsz##_to_##itype##round(tmp, fpst); \
4255 #define VFP_CONV_FIX(name, p, fsz, isz, itype) \
4256 VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \
4257 VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, _round_to_zero) \
4258 VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, )
4260 #define VFP_CONV_FIX_A64(name, p, fsz, isz, itype) \
4261 VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \
4262 VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, )
4264 VFP_CONV_FIX(sh
, d
, 64, 64, int16
)
4265 VFP_CONV_FIX(sl
, d
, 64, 64, int32
)
4266 VFP_CONV_FIX_A64(sq
, d
, 64, 64, int64
)
4267 VFP_CONV_FIX(uh
, d
, 64, 64, uint16
)
4268 VFP_CONV_FIX(ul
, d
, 64, 64, uint32
)
4269 VFP_CONV_FIX_A64(uq
, d
, 64, 64, uint64
)
4270 VFP_CONV_FIX(sh
, s
, 32, 32, int16
)
4271 VFP_CONV_FIX(sl
, s
, 32, 32, int32
)
4272 VFP_CONV_FIX_A64(sq
, s
, 32, 64, int64
)
4273 VFP_CONV_FIX(uh
, s
, 32, 32, uint16
)
4274 VFP_CONV_FIX(ul
, s
, 32, 32, uint32
)
4275 VFP_CONV_FIX_A64(uq
, s
, 32, 64, uint64
)
4277 #undef VFP_CONV_FIX_FLOAT
4278 #undef VFP_CONV_FLOAT_FIX_ROUND
4280 /* Set the current fp rounding mode and return the old one.
4281 * The argument is a softfloat float_round_ value.
4283 uint32_t HELPER(set_rmode
)(uint32_t rmode
, CPUARMState
*env
)
4285 float_status
*fp_status
= &env
->vfp
.fp_status
;
4287 uint32_t prev_rmode
= get_float_rounding_mode(fp_status
);
4288 set_float_rounding_mode(rmode
, fp_status
);
4293 /* Set the current fp rounding mode in the standard fp status and return
4294 * the old one. This is for NEON instructions that need to change the
4295 * rounding mode but wish to use the standard FPSCR values for everything
4296 * else. Always set the rounding mode back to the correct value after
4298 * The argument is a softfloat float_round_ value.
4300 uint32_t HELPER(set_neon_rmode
)(uint32_t rmode
, CPUARMState
*env
)
4302 float_status
*fp_status
= &env
->vfp
.standard_fp_status
;
4304 uint32_t prev_rmode
= get_float_rounding_mode(fp_status
);
4305 set_float_rounding_mode(rmode
, fp_status
);
4310 /* Half precision conversions. */
4311 static float32
do_fcvt_f16_to_f32(uint32_t a
, CPUARMState
*env
, float_status
*s
)
4313 int ieee
= (env
->vfp
.xregs
[ARM_VFP_FPSCR
] & (1 << 26)) == 0;
4314 float32 r
= float16_to_float32(make_float16(a
), ieee
, s
);
4316 return float32_maybe_silence_nan(r
);
4321 static uint32_t do_fcvt_f32_to_f16(float32 a
, CPUARMState
*env
, float_status
*s
)
4323 int ieee
= (env
->vfp
.xregs
[ARM_VFP_FPSCR
] & (1 << 26)) == 0;
4324 float16 r
= float32_to_float16(a
, ieee
, s
);
4326 r
= float16_maybe_silence_nan(r
);
4328 return float16_val(r
);
4331 float32
HELPER(neon_fcvt_f16_to_f32
)(uint32_t a
, CPUARMState
*env
)
4333 return do_fcvt_f16_to_f32(a
, env
, &env
->vfp
.standard_fp_status
);
4336 uint32_t HELPER(neon_fcvt_f32_to_f16
)(float32 a
, CPUARMState
*env
)
4338 return do_fcvt_f32_to_f16(a
, env
, &env
->vfp
.standard_fp_status
);
4341 float32
HELPER(vfp_fcvt_f16_to_f32
)(uint32_t a
, CPUARMState
*env
)
4343 return do_fcvt_f16_to_f32(a
, env
, &env
->vfp
.fp_status
);
4346 uint32_t HELPER(vfp_fcvt_f32_to_f16
)(float32 a
, CPUARMState
*env
)
4348 return do_fcvt_f32_to_f16(a
, env
, &env
->vfp
.fp_status
);
4351 float64
HELPER(vfp_fcvt_f16_to_f64
)(uint32_t a
, CPUARMState
*env
)
4353 int ieee
= (env
->vfp
.xregs
[ARM_VFP_FPSCR
] & (1 << 26)) == 0;
4354 float64 r
= float16_to_float64(make_float16(a
), ieee
, &env
->vfp
.fp_status
);
4356 return float64_maybe_silence_nan(r
);
4361 uint32_t HELPER(vfp_fcvt_f64_to_f16
)(float64 a
, CPUARMState
*env
)
4363 int ieee
= (env
->vfp
.xregs
[ARM_VFP_FPSCR
] & (1 << 26)) == 0;
4364 float16 r
= float64_to_float16(a
, ieee
, &env
->vfp
.fp_status
);
4366 r
= float16_maybe_silence_nan(r
);
4368 return float16_val(r
);
4371 #define float32_two make_float32(0x40000000)
4372 #define float32_three make_float32(0x40400000)
4373 #define float32_one_point_five make_float32(0x3fc00000)
4375 float32
HELPER(recps_f32
)(float32 a
, float32 b
, CPUARMState
*env
)
4377 float_status
*s
= &env
->vfp
.standard_fp_status
;
4378 if ((float32_is_infinity(a
) && float32_is_zero_or_denormal(b
)) ||
4379 (float32_is_infinity(b
) && float32_is_zero_or_denormal(a
))) {
4380 if (!(float32_is_zero(a
) || float32_is_zero(b
))) {
4381 float_raise(float_flag_input_denormal
, s
);
4385 return float32_sub(float32_two
, float32_mul(a
, b
, s
), s
);
4388 float32
HELPER(rsqrts_f32
)(float32 a
, float32 b
, CPUARMState
*env
)
4390 float_status
*s
= &env
->vfp
.standard_fp_status
;
4392 if ((float32_is_infinity(a
) && float32_is_zero_or_denormal(b
)) ||
4393 (float32_is_infinity(b
) && float32_is_zero_or_denormal(a
))) {
4394 if (!(float32_is_zero(a
) || float32_is_zero(b
))) {
4395 float_raise(float_flag_input_denormal
, s
);
4397 return float32_one_point_five
;
4399 product
= float32_mul(a
, b
, s
);
4400 return float32_div(float32_sub(float32_three
, product
, s
), float32_two
, s
);
4405 /* Constants 256 and 512 are used in some helpers; we avoid relying on
4406 * int->float conversions at run-time. */
4407 #define float64_256 make_float64(0x4070000000000000LL)
4408 #define float64_512 make_float64(0x4080000000000000LL)
4410 /* The algorithm that must be used to calculate the estimate
4411 * is specified by the ARM ARM.
4413 static float64
recip_estimate(float64 a
, CPUARMState
*env
)
4415 /* These calculations mustn't set any fp exception flags,
4416 * so we use a local copy of the fp_status.
4418 float_status dummy_status
= env
->vfp
.standard_fp_status
;
4419 float_status
*s
= &dummy_status
;
4420 /* q = (int)(a * 512.0) */
4421 float64 q
= float64_mul(float64_512
, a
, s
);
4422 int64_t q_int
= float64_to_int64_round_to_zero(q
, s
);
4424 /* r = 1.0 / (((double)q + 0.5) / 512.0) */
4425 q
= int64_to_float64(q_int
, s
);
4426 q
= float64_add(q
, float64_half
, s
);
4427 q
= float64_div(q
, float64_512
, s
);
4428 q
= float64_div(float64_one
, q
, s
);
4430 /* s = (int)(256.0 * r + 0.5) */
4431 q
= float64_mul(q
, float64_256
, s
);
4432 q
= float64_add(q
, float64_half
, s
);
4433 q_int
= float64_to_int64_round_to_zero(q
, s
);
4435 /* return (double)s / 256.0 */
4436 return float64_div(int64_to_float64(q_int
, s
), float64_256
, s
);
4439 float32
HELPER(recpe_f32
)(float32 a
, CPUARMState
*env
)
4441 float_status
*s
= &env
->vfp
.standard_fp_status
;
4443 uint32_t val32
= float32_val(a
);
4446 int a_exp
= (val32
& 0x7f800000) >> 23;
4447 int sign
= val32
& 0x80000000;
4449 if (float32_is_any_nan(a
)) {
4450 if (float32_is_signaling_nan(a
)) {
4451 float_raise(float_flag_invalid
, s
);
4453 return float32_default_nan
;
4454 } else if (float32_is_infinity(a
)) {
4455 return float32_set_sign(float32_zero
, float32_is_neg(a
));
4456 } else if (float32_is_zero_or_denormal(a
)) {
4457 if (!float32_is_zero(a
)) {
4458 float_raise(float_flag_input_denormal
, s
);
4460 float_raise(float_flag_divbyzero
, s
);
4461 return float32_set_sign(float32_infinity
, float32_is_neg(a
));
4462 } else if (a_exp
>= 253) {
4463 float_raise(float_flag_underflow
, s
);
4464 return float32_set_sign(float32_zero
, float32_is_neg(a
));
4467 f64
= make_float64((0x3feULL
<< 52)
4468 | ((int64_t)(val32
& 0x7fffff) << 29));
4470 result_exp
= 253 - a_exp
;
4472 f64
= recip_estimate(f64
, env
);
4475 | ((result_exp
& 0xff) << 23)
4476 | ((float64_val(f64
) >> 29) & 0x7fffff);
4477 return make_float32(val32
);
4480 /* The algorithm that must be used to calculate the estimate
4481 * is specified by the ARM ARM.
4483 static float64
recip_sqrt_estimate(float64 a
, CPUARMState
*env
)
4485 /* These calculations mustn't set any fp exception flags,
4486 * so we use a local copy of the fp_status.
4488 float_status dummy_status
= env
->vfp
.standard_fp_status
;
4489 float_status
*s
= &dummy_status
;
4493 if (float64_lt(a
, float64_half
, s
)) {
4494 /* range 0.25 <= a < 0.5 */
4496 /* a in units of 1/512 rounded down */
4497 /* q0 = (int)(a * 512.0); */
4498 q
= float64_mul(float64_512
, a
, s
);
4499 q_int
= float64_to_int64_round_to_zero(q
, s
);
4501 /* reciprocal root r */
4502 /* r = 1.0 / sqrt(((double)q0 + 0.5) / 512.0); */
4503 q
= int64_to_float64(q_int
, s
);
4504 q
= float64_add(q
, float64_half
, s
);
4505 q
= float64_div(q
, float64_512
, s
);
4506 q
= float64_sqrt(q
, s
);
4507 q
= float64_div(float64_one
, q
, s
);
4509 /* range 0.5 <= a < 1.0 */
4511 /* a in units of 1/256 rounded down */
4512 /* q1 = (int)(a * 256.0); */
4513 q
= float64_mul(float64_256
, a
, s
);
4514 int64_t q_int
= float64_to_int64_round_to_zero(q
, s
);
4516 /* reciprocal root r */
4517 /* r = 1.0 /sqrt(((double)q1 + 0.5) / 256); */
4518 q
= int64_to_float64(q_int
, s
);
4519 q
= float64_add(q
, float64_half
, s
);
4520 q
= float64_div(q
, float64_256
, s
);
4521 q
= float64_sqrt(q
, s
);
4522 q
= float64_div(float64_one
, q
, s
);
4524 /* r in units of 1/256 rounded to nearest */
4525 /* s = (int)(256.0 * r + 0.5); */
4527 q
= float64_mul(q
, float64_256
,s
);
4528 q
= float64_add(q
, float64_half
, s
);
4529 q_int
= float64_to_int64_round_to_zero(q
, s
);
4531 /* return (double)s / 256.0;*/
4532 return float64_div(int64_to_float64(q_int
, s
), float64_256
, s
);
4535 float32
HELPER(rsqrte_f32
)(float32 a
, CPUARMState
*env
)
4537 float_status
*s
= &env
->vfp
.standard_fp_status
;
4543 val
= float32_val(a
);
4545 if (float32_is_any_nan(a
)) {
4546 if (float32_is_signaling_nan(a
)) {
4547 float_raise(float_flag_invalid
, s
);
4549 return float32_default_nan
;
4550 } else if (float32_is_zero_or_denormal(a
)) {
4551 if (!float32_is_zero(a
)) {
4552 float_raise(float_flag_input_denormal
, s
);
4554 float_raise(float_flag_divbyzero
, s
);
4555 return float32_set_sign(float32_infinity
, float32_is_neg(a
));
4556 } else if (float32_is_neg(a
)) {
4557 float_raise(float_flag_invalid
, s
);
4558 return float32_default_nan
;
4559 } else if (float32_is_infinity(a
)) {
4560 return float32_zero
;
4563 /* Normalize to a double-precision value between 0.25 and 1.0,
4564 * preserving the parity of the exponent. */
4565 if ((val
& 0x800000) == 0) {
4566 f64
= make_float64(((uint64_t)(val
& 0x80000000) << 32)
4568 | ((uint64_t)(val
& 0x7fffff) << 29));
4570 f64
= make_float64(((uint64_t)(val
& 0x80000000) << 32)
4572 | ((uint64_t)(val
& 0x7fffff) << 29));
4575 result_exp
= (380 - ((val
& 0x7f800000) >> 23)) / 2;
4577 f64
= recip_sqrt_estimate(f64
, env
);
4579 val64
= float64_val(f64
);
4581 val
= ((result_exp
& 0xff) << 23)
4582 | ((val64
>> 29) & 0x7fffff);
4583 return make_float32(val
);
4586 uint32_t HELPER(recpe_u32
)(uint32_t a
, CPUARMState
*env
)
4590 if ((a
& 0x80000000) == 0) {
4594 f64
= make_float64((0x3feULL
<< 52)
4595 | ((int64_t)(a
& 0x7fffffff) << 21));
4597 f64
= recip_estimate (f64
, env
);
4599 return 0x80000000 | ((float64_val(f64
) >> 21) & 0x7fffffff);
4602 uint32_t HELPER(rsqrte_u32
)(uint32_t a
, CPUARMState
*env
)
4606 if ((a
& 0xc0000000) == 0) {
4610 if (a
& 0x80000000) {
4611 f64
= make_float64((0x3feULL
<< 52)
4612 | ((uint64_t)(a
& 0x7fffffff) << 21));
4613 } else { /* bits 31-30 == '01' */
4614 f64
= make_float64((0x3fdULL
<< 52)
4615 | ((uint64_t)(a
& 0x3fffffff) << 22));
4618 f64
= recip_sqrt_estimate(f64
, env
);
4620 return 0x80000000 | ((float64_val(f64
) >> 21) & 0x7fffffff);
4623 /* VFPv4 fused multiply-accumulate */
4624 float32
VFP_HELPER(muladd
, s
)(float32 a
, float32 b
, float32 c
, void *fpstp
)
4626 float_status
*fpst
= fpstp
;
4627 return float32_muladd(a
, b
, c
, 0, fpst
);
4630 float64
VFP_HELPER(muladd
, d
)(float64 a
, float64 b
, float64 c
, void *fpstp
)
4632 float_status
*fpst
= fpstp
;
4633 return float64_muladd(a
, b
, c
, 0, fpst
);
4636 /* ARMv8 round to integral */
4637 float32
HELPER(rints_exact
)(float32 x
, void *fp_status
)
4639 return float32_round_to_int(x
, fp_status
);
4642 float64
HELPER(rintd_exact
)(float64 x
, void *fp_status
)
4644 return float64_round_to_int(x
, fp_status
);
4647 float32
HELPER(rints
)(float32 x
, void *fp_status
)
4649 int old_flags
= get_float_exception_flags(fp_status
), new_flags
;
4652 ret
= float32_round_to_int(x
, fp_status
);
4654 /* Suppress any inexact exceptions the conversion produced */
4655 if (!(old_flags
& float_flag_inexact
)) {
4656 new_flags
= get_float_exception_flags(fp_status
);
4657 set_float_exception_flags(new_flags
& ~float_flag_inexact
, fp_status
);
4663 float64
HELPER(rintd
)(float64 x
, void *fp_status
)
4665 int old_flags
= get_float_exception_flags(fp_status
), new_flags
;
4668 ret
= float64_round_to_int(x
, fp_status
);
4670 new_flags
= get_float_exception_flags(fp_status
);
4672 /* Suppress any inexact exceptions the conversion produced */
4673 if (!(old_flags
& float_flag_inexact
)) {
4674 new_flags
= get_float_exception_flags(fp_status
);
4675 set_float_exception_flags(new_flags
& ~float_flag_inexact
, fp_status
);
4681 /* Convert ARM rounding mode to softfloat */
4682 int arm_rmode_to_sf(int rmode
)
4685 case FPROUNDING_TIEAWAY
:
4686 rmode
= float_round_ties_away
;
4688 case FPROUNDING_ODD
:
4689 /* FIXME: add support for TIEAWAY and ODD */
4690 qemu_log_mask(LOG_UNIMP
, "arm: unimplemented rounding mode: %d\n",
4692 case FPROUNDING_TIEEVEN
:
4694 rmode
= float_round_nearest_even
;
4696 case FPROUNDING_POSINF
:
4697 rmode
= float_round_up
;
4699 case FPROUNDING_NEGINF
:
4700 rmode
= float_round_down
;
4702 case FPROUNDING_ZERO
:
4703 rmode
= float_round_to_zero
;
4709 static void crc_init_buffer(uint8_t *buf
, uint32_t val
, uint32_t bytes
)
4714 buf
[0] = val
& 0xff;
4715 } else if (bytes
== 2) {
4716 buf
[0] = val
& 0xff;
4717 buf
[1] = (val
>> 8) & 0xff;
4719 buf
[0] = val
& 0xff;
4720 buf
[1] = (val
>> 8) & 0xff;
4721 buf
[2] = (val
>> 16) & 0xff;
4722 buf
[3] = (val
>> 24) & 0xff;
4726 uint32_t HELPER(crc32
)(uint32_t acc
, uint32_t val
, uint32_t bytes
)
4730 crc_init_buffer(buf
, val
, bytes
);
4732 /* zlib crc32 converts the accumulator and output to one's complement. */
4733 return crc32(acc
^ 0xffffffff, buf
, bytes
) ^ 0xffffffff;
4736 uint32_t HELPER(crc32c
)(uint32_t acc
, uint32_t val
, uint32_t bytes
)
4740 crc_init_buffer(buf
, val
, bytes
);
4742 /* Linux crc32c converts the output to one's complement. */
4743 return crc32c(acc
, buf
, bytes
) ^ 0xffffffff;