2 #include "exec/gdbstub.h"
4 #include "qemu/host-utils.h"
5 #include "sysemu/arch_init.h"
6 #include "sysemu/sysemu.h"
7 #include "qemu/bitops.h"
9 #ifndef CONFIG_USER_ONLY
10 static inline int get_phys_addr(CPUARMState
*env
, uint32_t address
,
11 int access_type
, int is_user
,
12 hwaddr
*phys_ptr
, int *prot
,
13 target_ulong
*page_size
);
16 static int vfp_gdb_get_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
20 /* VFP data registers are always little-endian. */
21 nregs
= arm_feature(env
, ARM_FEATURE_VFP3
) ? 32 : 16;
23 stfq_le_p(buf
, env
->vfp
.regs
[reg
]);
26 if (arm_feature(env
, ARM_FEATURE_NEON
)) {
27 /* Aliases for Q regs. */
30 stfq_le_p(buf
, env
->vfp
.regs
[(reg
- 32) * 2]);
31 stfq_le_p(buf
+ 8, env
->vfp
.regs
[(reg
- 32) * 2 + 1]);
35 switch (reg
- nregs
) {
36 case 0: stl_p(buf
, env
->vfp
.xregs
[ARM_VFP_FPSID
]); return 4;
37 case 1: stl_p(buf
, env
->vfp
.xregs
[ARM_VFP_FPSCR
]); return 4;
38 case 2: stl_p(buf
, env
->vfp
.xregs
[ARM_VFP_FPEXC
]); return 4;
43 static int vfp_gdb_set_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
47 nregs
= arm_feature(env
, ARM_FEATURE_VFP3
) ? 32 : 16;
49 env
->vfp
.regs
[reg
] = ldfq_le_p(buf
);
52 if (arm_feature(env
, ARM_FEATURE_NEON
)) {
55 env
->vfp
.regs
[(reg
- 32) * 2] = ldfq_le_p(buf
);
56 env
->vfp
.regs
[(reg
- 32) * 2 + 1] = ldfq_le_p(buf
+ 8);
60 switch (reg
- nregs
) {
61 case 0: env
->vfp
.xregs
[ARM_VFP_FPSID
] = ldl_p(buf
); return 4;
62 case 1: env
->vfp
.xregs
[ARM_VFP_FPSCR
] = ldl_p(buf
); return 4;
63 case 2: env
->vfp
.xregs
[ARM_VFP_FPEXC
] = ldl_p(buf
) & (1 << 30); return 4;
68 static int aarch64_fpu_gdb_get_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
72 /* 128 bit FP register */
73 stfq_le_p(buf
, env
->vfp
.regs
[reg
* 2]);
74 stfq_le_p(buf
+ 8, env
->vfp
.regs
[reg
* 2 + 1]);
78 stl_p(buf
, vfp_get_fpsr(env
));
82 stl_p(buf
, vfp_get_fpcr(env
));
89 static int aarch64_fpu_gdb_set_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
93 /* 128 bit FP register */
94 env
->vfp
.regs
[reg
* 2] = ldfq_le_p(buf
);
95 env
->vfp
.regs
[reg
* 2 + 1] = ldfq_le_p(buf
+ 8);
99 vfp_set_fpsr(env
, ldl_p(buf
));
103 vfp_set_fpcr(env
, ldl_p(buf
));
110 static uint64_t raw_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
112 if (cpreg_field_is_64bit(ri
)) {
113 return CPREG_FIELD64(env
, ri
);
115 return CPREG_FIELD32(env
, ri
);
119 static void raw_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
122 if (cpreg_field_is_64bit(ri
)) {
123 CPREG_FIELD64(env
, ri
) = value
;
125 CPREG_FIELD32(env
, ri
) = value
;
129 static uint64_t read_raw_cp_reg(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
131 /* Raw read of a coprocessor register (as needed for migration, etc). */
132 if (ri
->type
& ARM_CP_CONST
) {
133 return ri
->resetvalue
;
134 } else if (ri
->raw_readfn
) {
135 return ri
->raw_readfn(env
, ri
);
136 } else if (ri
->readfn
) {
137 return ri
->readfn(env
, ri
);
139 return raw_read(env
, ri
);
143 static void write_raw_cp_reg(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
146 /* Raw write of a coprocessor register (as needed for migration, etc).
147 * Note that constant registers are treated as write-ignored; the
148 * caller should check for success by whether a readback gives the
151 if (ri
->type
& ARM_CP_CONST
) {
153 } else if (ri
->raw_writefn
) {
154 ri
->raw_writefn(env
, ri
, v
);
155 } else if (ri
->writefn
) {
156 ri
->writefn(env
, ri
, v
);
158 raw_write(env
, ri
, v
);
162 bool write_cpustate_to_list(ARMCPU
*cpu
)
164 /* Write the coprocessor state from cpu->env to the (index,value) list. */
168 for (i
= 0; i
< cpu
->cpreg_array_len
; i
++) {
169 uint32_t regidx
= kvm_to_cpreg_id(cpu
->cpreg_indexes
[i
]);
170 const ARMCPRegInfo
*ri
;
172 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
177 if (ri
->type
& ARM_CP_NO_MIGRATE
) {
180 cpu
->cpreg_values
[i
] = read_raw_cp_reg(&cpu
->env
, ri
);
185 bool write_list_to_cpustate(ARMCPU
*cpu
)
190 for (i
= 0; i
< cpu
->cpreg_array_len
; i
++) {
191 uint32_t regidx
= kvm_to_cpreg_id(cpu
->cpreg_indexes
[i
]);
192 uint64_t v
= cpu
->cpreg_values
[i
];
193 const ARMCPRegInfo
*ri
;
195 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
200 if (ri
->type
& ARM_CP_NO_MIGRATE
) {
203 /* Write value and confirm it reads back as written
204 * (to catch read-only registers and partially read-only
205 * registers where the incoming migration value doesn't match)
207 write_raw_cp_reg(&cpu
->env
, ri
, v
);
208 if (read_raw_cp_reg(&cpu
->env
, ri
) != v
) {
215 static void add_cpreg_to_list(gpointer key
, gpointer opaque
)
217 ARMCPU
*cpu
= opaque
;
219 const ARMCPRegInfo
*ri
;
221 regidx
= *(uint32_t *)key
;
222 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
224 if (!(ri
->type
& ARM_CP_NO_MIGRATE
)) {
225 cpu
->cpreg_indexes
[cpu
->cpreg_array_len
] = cpreg_to_kvm_id(regidx
);
226 /* The value array need not be initialized at this point */
227 cpu
->cpreg_array_len
++;
231 static void count_cpreg(gpointer key
, gpointer opaque
)
233 ARMCPU
*cpu
= opaque
;
235 const ARMCPRegInfo
*ri
;
237 regidx
= *(uint32_t *)key
;
238 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
240 if (!(ri
->type
& ARM_CP_NO_MIGRATE
)) {
241 cpu
->cpreg_array_len
++;
245 static gint
cpreg_key_compare(gconstpointer a
, gconstpointer b
)
247 uint64_t aidx
= cpreg_to_kvm_id(*(uint32_t *)a
);
248 uint64_t bidx
= cpreg_to_kvm_id(*(uint32_t *)b
);
259 static void cpreg_make_keylist(gpointer key
, gpointer value
, gpointer udata
)
261 GList
**plist
= udata
;
263 *plist
= g_list_prepend(*plist
, key
);
266 void init_cpreg_list(ARMCPU
*cpu
)
268 /* Initialise the cpreg_tuples[] array based on the cp_regs hash.
269 * Note that we require cpreg_tuples[] to be sorted by key ID.
274 g_hash_table_foreach(cpu
->cp_regs
, cpreg_make_keylist
, &keys
);
276 keys
= g_list_sort(keys
, cpreg_key_compare
);
278 cpu
->cpreg_array_len
= 0;
280 g_list_foreach(keys
, count_cpreg
, cpu
);
282 arraylen
= cpu
->cpreg_array_len
;
283 cpu
->cpreg_indexes
= g_new(uint64_t, arraylen
);
284 cpu
->cpreg_values
= g_new(uint64_t, arraylen
);
285 cpu
->cpreg_vmstate_indexes
= g_new(uint64_t, arraylen
);
286 cpu
->cpreg_vmstate_values
= g_new(uint64_t, arraylen
);
287 cpu
->cpreg_vmstate_array_len
= cpu
->cpreg_array_len
;
288 cpu
->cpreg_array_len
= 0;
290 g_list_foreach(keys
, add_cpreg_to_list
, cpu
);
292 assert(cpu
->cpreg_array_len
== arraylen
);
297 static void dacr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
299 env
->cp15
.c3
= value
;
300 tlb_flush(env
, 1); /* Flush TLB as domain not tracked in TLB */
303 static void fcse_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
305 if (env
->cp15
.c13_fcse
!= value
) {
306 /* Unlike real hardware the qemu TLB uses virtual addresses,
307 * not modified virtual addresses, so this causes a TLB flush.
310 env
->cp15
.c13_fcse
= value
;
314 static void contextidr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
317 if (env
->cp15
.c13_context
!= value
&& !arm_feature(env
, ARM_FEATURE_MPU
)) {
318 /* For VMSA (when not using the LPAE long descriptor page table
319 * format) this register includes the ASID, so do a TLB flush.
320 * For PMSA it is purely a process ID and no action is needed.
324 env
->cp15
.c13_context
= value
;
327 static void tlbiall_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
330 /* Invalidate all (TLBIALL) */
334 static void tlbimva_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
337 /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
338 tlb_flush_page(env
, value
& TARGET_PAGE_MASK
);
341 static void tlbiasid_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
344 /* Invalidate by ASID (TLBIASID) */
345 tlb_flush(env
, value
== 0);
348 static void tlbimvaa_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
351 /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
352 tlb_flush_page(env
, value
& TARGET_PAGE_MASK
);
355 static const ARMCPRegInfo cp_reginfo
[] = {
356 /* DBGDIDR: just RAZ. In particular this means the "debug architecture
357 * version" bits will read as a reserved value, which should cause
358 * Linux to not try to use the debug hardware.
360 { .name
= "DBGDIDR", .cp
= 14, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 0,
361 .access
= PL0_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
362 /* MMU Domain access control / MPU write buffer control */
363 { .name
= "DACR", .cp
= 15,
364 .crn
= 3, .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
,
365 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c3
),
366 .resetvalue
= 0, .writefn
= dacr_write
, .raw_writefn
= raw_write
, },
367 { .name
= "FCSEIDR", .cp
= 15, .crn
= 13, .crm
= 0, .opc1
= 0, .opc2
= 0,
368 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c13_fcse
),
369 .resetvalue
= 0, .writefn
= fcse_write
, .raw_writefn
= raw_write
, },
370 { .name
= "CONTEXTIDR", .cp
= 15, .crn
= 13, .crm
= 0, .opc1
= 0, .opc2
= 1,
371 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c13_context
),
372 .resetvalue
= 0, .writefn
= contextidr_write
, .raw_writefn
= raw_write
, },
373 /* ??? This covers not just the impdef TLB lockdown registers but also
374 * some v7VMSA registers relating to TEX remap, so it is overly broad.
376 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= CP_ANY
,
377 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
378 /* MMU TLB control. Note that the wildcarding means we cover not just
379 * the unified TLB ops but also the dside/iside/inner-shareable variants.
381 { .name
= "TLBIALL", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
382 .opc1
= CP_ANY
, .opc2
= 0, .access
= PL1_W
, .writefn
= tlbiall_write
,
383 .type
= ARM_CP_NO_MIGRATE
},
384 { .name
= "TLBIMVA", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
385 .opc1
= CP_ANY
, .opc2
= 1, .access
= PL1_W
, .writefn
= tlbimva_write
,
386 .type
= ARM_CP_NO_MIGRATE
},
387 { .name
= "TLBIASID", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
388 .opc1
= CP_ANY
, .opc2
= 2, .access
= PL1_W
, .writefn
= tlbiasid_write
,
389 .type
= ARM_CP_NO_MIGRATE
},
390 { .name
= "TLBIMVAA", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
391 .opc1
= CP_ANY
, .opc2
= 3, .access
= PL1_W
, .writefn
= tlbimvaa_write
,
392 .type
= ARM_CP_NO_MIGRATE
},
393 /* Cache maintenance ops; some of this space may be overridden later. */
394 { .name
= "CACHEMAINT", .cp
= 15, .crn
= 7, .crm
= CP_ANY
,
395 .opc1
= 0, .opc2
= CP_ANY
, .access
= PL1_W
,
396 .type
= ARM_CP_NOP
| ARM_CP_OVERRIDE
},
400 static const ARMCPRegInfo not_v6_cp_reginfo
[] = {
401 /* Not all pre-v6 cores implemented this WFI, so this is slightly
404 { .name
= "WFI_v5", .cp
= 15, .crn
= 7, .crm
= 8, .opc1
= 0, .opc2
= 2,
405 .access
= PL1_W
, .type
= ARM_CP_WFI
},
409 static const ARMCPRegInfo not_v7_cp_reginfo
[] = {
410 /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
411 * is UNPREDICTABLE; we choose to NOP as most implementations do).
413 { .name
= "WFI_v6", .cp
= 15, .crn
= 7, .crm
= 0, .opc1
= 0, .opc2
= 4,
414 .access
= PL1_W
, .type
= ARM_CP_WFI
},
415 /* L1 cache lockdown. Not architectural in v6 and earlier but in practice
416 * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and
417 * OMAPCP will override this space.
419 { .name
= "DLOCKDOWN", .cp
= 15, .crn
= 9, .crm
= 0, .opc1
= 0, .opc2
= 0,
420 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_data
),
422 { .name
= "ILOCKDOWN", .cp
= 15, .crn
= 9, .crm
= 0, .opc1
= 0, .opc2
= 1,
423 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_insn
),
425 /* v6 doesn't have the cache ID registers but Linux reads them anyway */
426 { .name
= "DUMMY", .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= CP_ANY
,
427 .access
= PL1_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_MIGRATE
,
432 static void cpacr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
435 if (env
->cp15
.c1_coproc
!= value
) {
436 env
->cp15
.c1_coproc
= value
;
437 /* ??? Is this safe when called from within a TB? */
442 static const ARMCPRegInfo v6_cp_reginfo
[] = {
443 /* prefetch by MVA in v6, NOP in v7 */
444 { .name
= "MVA_prefetch",
445 .cp
= 15, .crn
= 7, .crm
= 13, .opc1
= 0, .opc2
= 1,
446 .access
= PL1_W
, .type
= ARM_CP_NOP
},
447 { .name
= "ISB", .cp
= 15, .crn
= 7, .crm
= 5, .opc1
= 0, .opc2
= 4,
448 .access
= PL0_W
, .type
= ARM_CP_NOP
},
449 { .name
= "DSB", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 4,
450 .access
= PL0_W
, .type
= ARM_CP_NOP
},
451 { .name
= "DMB", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 5,
452 .access
= PL0_W
, .type
= ARM_CP_NOP
},
453 { .name
= "IFAR", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 2,
454 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_insn
),
456 /* Watchpoint Fault Address Register : should actually only be present
457 * for 1136, 1176, 11MPCore.
459 { .name
= "WFAR", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 1,
460 .access
= PL1_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0, },
461 { .name
= "CPACR", .cp
= 15, .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 2,
462 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c1_coproc
),
463 .resetvalue
= 0, .writefn
= cpacr_write
},
467 static CPAccessResult
pmreg_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
469 /* Perfomance monitor registers user accessibility is controlled
472 if (arm_current_pl(env
) == 0 && !env
->cp15
.c9_pmuserenr
) {
473 return CP_ACCESS_TRAP
;
478 static void pmcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
481 /* only the DP, X, D and E bits are writable */
482 env
->cp15
.c9_pmcr
&= ~0x39;
483 env
->cp15
.c9_pmcr
|= (value
& 0x39);
486 static void pmcntenset_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
490 env
->cp15
.c9_pmcnten
|= value
;
493 static void pmcntenclr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
497 env
->cp15
.c9_pmcnten
&= ~value
;
500 static void pmovsr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
503 env
->cp15
.c9_pmovsr
&= ~value
;
506 static void pmxevtyper_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
509 env
->cp15
.c9_pmxevtyper
= value
& 0xff;
512 static void pmuserenr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
515 env
->cp15
.c9_pmuserenr
= value
& 1;
518 static void pmintenset_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
521 /* We have no event counters so only the C bit can be changed */
523 env
->cp15
.c9_pminten
|= value
;
526 static void pmintenclr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
530 env
->cp15
.c9_pminten
&= ~value
;
533 static void vbar_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
536 /* Note that even though the AArch64 view of this register has bits
537 * [10:0] all RES0 we can only mask the bottom 5, to comply with the
538 * architectural requirements for bits which are RES0 only in some
539 * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7
540 * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.)
542 env
->cp15
.c12_vbar
= value
& ~0x1Ful
;
545 static uint64_t ccsidr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
547 ARMCPU
*cpu
= arm_env_get_cpu(env
);
548 return cpu
->ccsidr
[env
->cp15
.c0_cssel
];
551 static void csselr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
554 env
->cp15
.c0_cssel
= value
& 0xf;
557 static const ARMCPRegInfo v7_cp_reginfo
[] = {
558 /* DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped
561 { .name
= "DBGDRAR", .cp
= 14, .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 0,
562 .access
= PL0_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
563 { .name
= "DBGDSAR", .cp
= 14, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 0,
564 .access
= PL0_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
565 /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
566 { .name
= "NOP", .cp
= 15, .crn
= 7, .crm
= 0, .opc1
= 0, .opc2
= 4,
567 .access
= PL1_W
, .type
= ARM_CP_NOP
},
568 /* Performance monitors are implementation defined in v7,
569 * but with an ARM recommended set of registers, which we
570 * follow (although we don't actually implement any counters)
572 * Performance registers fall into three categories:
573 * (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR)
574 * (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR)
575 * (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others)
576 * For the cases controlled by PMUSERENR we must set .access to PL0_RW
577 * or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
579 { .name
= "PMCNTENSET", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 1,
580 .access
= PL0_RW
, .resetvalue
= 0,
581 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmcnten
),
582 .writefn
= pmcntenset_write
,
583 .accessfn
= pmreg_access
,
584 .raw_writefn
= raw_write
},
585 { .name
= "PMCNTENCLR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 2,
586 .access
= PL0_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmcnten
),
587 .accessfn
= pmreg_access
,
588 .writefn
= pmcntenclr_write
,
589 .type
= ARM_CP_NO_MIGRATE
},
590 { .name
= "PMOVSR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 3,
591 .access
= PL0_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmovsr
),
592 .accessfn
= pmreg_access
,
593 .writefn
= pmovsr_write
,
594 .raw_writefn
= raw_write
},
595 /* Unimplemented so WI. */
596 { .name
= "PMSWINC", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 4,
597 .access
= PL0_W
, .accessfn
= pmreg_access
, .type
= ARM_CP_NOP
},
598 /* Since we don't implement any events, writing to PMSELR is UNPREDICTABLE.
599 * We choose to RAZ/WI.
601 { .name
= "PMSELR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 5,
602 .access
= PL0_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0,
603 .accessfn
= pmreg_access
},
604 /* Unimplemented, RAZ/WI. */
605 { .name
= "PMCCNTR", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 0,
606 .access
= PL0_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0,
607 .accessfn
= pmreg_access
},
608 { .name
= "PMXEVTYPER", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 1,
610 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmxevtyper
),
611 .accessfn
= pmreg_access
, .writefn
= pmxevtyper_write
,
612 .raw_writefn
= raw_write
},
613 /* Unimplemented, RAZ/WI. */
614 { .name
= "PMXEVCNTR", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 2,
615 .access
= PL0_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0,
616 .accessfn
= pmreg_access
},
617 { .name
= "PMUSERENR", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 0,
618 .access
= PL0_R
| PL1_RW
,
619 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmuserenr
),
621 .writefn
= pmuserenr_write
, .raw_writefn
= raw_write
},
622 { .name
= "PMINTENSET", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 1,
624 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pminten
),
626 .writefn
= pmintenset_write
, .raw_writefn
= raw_write
},
627 { .name
= "PMINTENCLR", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 2,
628 .access
= PL1_RW
, .type
= ARM_CP_NO_MIGRATE
,
629 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pminten
),
630 .resetvalue
= 0, .writefn
= pmintenclr_write
, },
631 { .name
= "VBAR", .state
= ARM_CP_STATE_BOTH
,
632 .opc0
= 3, .crn
= 12, .crm
= 0, .opc1
= 0, .opc2
= 0,
633 .access
= PL1_RW
, .writefn
= vbar_write
,
634 .fieldoffset
= offsetof(CPUARMState
, cp15
.c12_vbar
),
636 { .name
= "SCR", .cp
= 15, .crn
= 1, .crm
= 1, .opc1
= 0, .opc2
= 0,
637 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c1_scr
),
639 { .name
= "CCSIDR", .state
= ARM_CP_STATE_BOTH
,
640 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= 0,
641 .access
= PL1_R
, .readfn
= ccsidr_read
, .type
= ARM_CP_NO_MIGRATE
},
642 { .name
= "CSSELR", .state
= ARM_CP_STATE_BOTH
,
643 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 2, .opc2
= 0,
644 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c0_cssel
),
645 .writefn
= csselr_write
, .resetvalue
= 0 },
646 /* Auxiliary ID register: this actually has an IMPDEF value but for now
647 * just RAZ for all cores:
649 { .name
= "AIDR", .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= 7,
650 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
651 /* MAIR can just read-as-written because we don't implement caches
652 * and so don't need to care about memory attributes.
654 { .name
= "MAIR_EL1", .state
= ARM_CP_STATE_AA64
,
655 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 2, .opc2
= 0,
656 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.mair_el1
),
658 /* For non-long-descriptor page tables these are PRRR and NMRR;
659 * regardless they still act as reads-as-written for QEMU.
660 * The override is necessary because of the overly-broad TLB_LOCKDOWN
663 { .name
= "MAIR0", .state
= ARM_CP_STATE_AA32
, .type
= ARM_CP_OVERRIDE
,
664 .cp
= 15, .opc1
= 0, .crn
= 10, .crm
= 2, .opc2
= 0, .access
= PL1_RW
,
665 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.mair_el1
),
666 .resetfn
= arm_cp_reset_ignore
},
667 { .name
= "MAIR1", .state
= ARM_CP_STATE_AA32
, .type
= ARM_CP_OVERRIDE
,
668 .cp
= 15, .opc1
= 0, .crn
= 10, .crm
= 2, .opc2
= 1, .access
= PL1_RW
,
669 .fieldoffset
= offsetofhigh32(CPUARMState
, cp15
.mair_el1
),
670 .resetfn
= arm_cp_reset_ignore
},
674 static void teecr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
681 static CPAccessResult
teehbr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
683 if (arm_current_pl(env
) == 0 && (env
->teecr
& 1)) {
684 return CP_ACCESS_TRAP
;
689 static const ARMCPRegInfo t2ee_cp_reginfo
[] = {
690 { .name
= "TEECR", .cp
= 14, .crn
= 0, .crm
= 0, .opc1
= 6, .opc2
= 0,
691 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, teecr
),
693 .writefn
= teecr_write
},
694 { .name
= "TEEHBR", .cp
= 14, .crn
= 1, .crm
= 0, .opc1
= 6, .opc2
= 0,
695 .access
= PL0_RW
, .fieldoffset
= offsetof(CPUARMState
, teehbr
),
696 .accessfn
= teehbr_access
, .resetvalue
= 0 },
700 static const ARMCPRegInfo v6k_cp_reginfo
[] = {
701 { .name
= "TPIDR_EL0", .state
= ARM_CP_STATE_AA64
,
702 .opc0
= 3, .opc1
= 3, .opc2
= 2, .crn
= 13, .crm
= 0,
704 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el0
), .resetvalue
= 0 },
705 { .name
= "TPIDRURW", .cp
= 15, .crn
= 13, .crm
= 0, .opc1
= 0, .opc2
= 2,
707 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.tpidr_el0
),
708 .resetfn
= arm_cp_reset_ignore
},
709 { .name
= "TPIDRRO_EL0", .state
= ARM_CP_STATE_AA64
,
710 .opc0
= 3, .opc1
= 3, .opc2
= 3, .crn
= 13, .crm
= 0,
711 .access
= PL0_R
|PL1_W
,
712 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidrro_el0
), .resetvalue
= 0 },
713 { .name
= "TPIDRURO", .cp
= 15, .crn
= 13, .crm
= 0, .opc1
= 0, .opc2
= 3,
714 .access
= PL0_R
|PL1_W
,
715 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.tpidrro_el0
),
716 .resetfn
= arm_cp_reset_ignore
},
717 { .name
= "TPIDR_EL1", .state
= ARM_CP_STATE_BOTH
,
718 .opc0
= 3, .opc1
= 0, .opc2
= 4, .crn
= 13, .crm
= 0,
720 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el1
), .resetvalue
= 0 },
724 #ifndef CONFIG_USER_ONLY
726 static CPAccessResult
gt_cntfrq_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
728 /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero */
729 if (arm_current_pl(env
) == 0 && !extract32(env
->cp15
.c14_cntkctl
, 0, 2)) {
730 return CP_ACCESS_TRAP
;
735 static CPAccessResult
gt_counter_access(CPUARMState
*env
, int timeridx
)
737 /* CNT[PV]CT: not visible from PL0 if ELO[PV]CTEN is zero */
738 if (arm_current_pl(env
) == 0 &&
739 !extract32(env
->cp15
.c14_cntkctl
, timeridx
, 1)) {
740 return CP_ACCESS_TRAP
;
745 static CPAccessResult
gt_timer_access(CPUARMState
*env
, int timeridx
)
747 /* CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from PL0 if
748 * EL0[PV]TEN is zero.
750 if (arm_current_pl(env
) == 0 &&
751 !extract32(env
->cp15
.c14_cntkctl
, 9 - timeridx
, 1)) {
752 return CP_ACCESS_TRAP
;
757 static CPAccessResult
gt_pct_access(CPUARMState
*env
,
758 const ARMCPRegInfo
*ri
)
760 return gt_counter_access(env
, GTIMER_PHYS
);
763 static CPAccessResult
gt_vct_access(CPUARMState
*env
,
764 const ARMCPRegInfo
*ri
)
766 return gt_counter_access(env
, GTIMER_VIRT
);
769 static CPAccessResult
gt_ptimer_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
771 return gt_timer_access(env
, GTIMER_PHYS
);
774 static CPAccessResult
gt_vtimer_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
776 return gt_timer_access(env
, GTIMER_VIRT
);
779 static uint64_t gt_get_countervalue(CPUARMState
*env
)
781 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) / GTIMER_SCALE
;
784 static void gt_recalc_timer(ARMCPU
*cpu
, int timeridx
)
786 ARMGenericTimer
*gt
= &cpu
->env
.cp15
.c14_timer
[timeridx
];
789 /* Timer enabled: calculate and set current ISTATUS, irq, and
790 * reset timer to when ISTATUS next has to change
792 uint64_t count
= gt_get_countervalue(&cpu
->env
);
793 /* Note that this must be unsigned 64 bit arithmetic: */
794 int istatus
= count
>= gt
->cval
;
797 gt
->ctl
= deposit32(gt
->ctl
, 2, 1, istatus
);
798 qemu_set_irq(cpu
->gt_timer_outputs
[timeridx
],
799 (istatus
&& !(gt
->ctl
& 2)));
801 /* Next transition is when count rolls back over to zero */
802 nexttick
= UINT64_MAX
;
804 /* Next transition is when we hit cval */
807 /* Note that the desired next expiry time might be beyond the
808 * signed-64-bit range of a QEMUTimer -- in this case we just
809 * set the timer for as far in the future as possible. When the
810 * timer expires we will reset the timer for any remaining period.
812 if (nexttick
> INT64_MAX
/ GTIMER_SCALE
) {
813 nexttick
= INT64_MAX
/ GTIMER_SCALE
;
815 timer_mod(cpu
->gt_timer
[timeridx
], nexttick
);
817 /* Timer disabled: ISTATUS and timer output always clear */
819 qemu_set_irq(cpu
->gt_timer_outputs
[timeridx
], 0);
820 timer_del(cpu
->gt_timer
[timeridx
]);
824 static void gt_cnt_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
826 ARMCPU
*cpu
= arm_env_get_cpu(env
);
827 int timeridx
= ri
->opc1
& 1;
829 timer_del(cpu
->gt_timer
[timeridx
]);
832 static uint64_t gt_cnt_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
834 return gt_get_countervalue(env
);
837 static void gt_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
840 int timeridx
= ri
->opc1
& 1;
842 env
->cp15
.c14_timer
[timeridx
].cval
= value
;
843 gt_recalc_timer(arm_env_get_cpu(env
), timeridx
);
846 static uint64_t gt_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
848 int timeridx
= ri
->crm
& 1;
850 return (uint32_t)(env
->cp15
.c14_timer
[timeridx
].cval
-
851 gt_get_countervalue(env
));
854 static void gt_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
857 int timeridx
= ri
->crm
& 1;
859 env
->cp15
.c14_timer
[timeridx
].cval
= gt_get_countervalue(env
) +
860 + sextract64(value
, 0, 32);
861 gt_recalc_timer(arm_env_get_cpu(env
), timeridx
);
864 static void gt_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
867 ARMCPU
*cpu
= arm_env_get_cpu(env
);
868 int timeridx
= ri
->crm
& 1;
869 uint32_t oldval
= env
->cp15
.c14_timer
[timeridx
].ctl
;
871 env
->cp15
.c14_timer
[timeridx
].ctl
= value
& 3;
872 if ((oldval
^ value
) & 1) {
874 gt_recalc_timer(cpu
, timeridx
);
875 } else if ((oldval
& value
) & 2) {
876 /* IMASK toggled: don't need to recalculate,
877 * just set the interrupt line based on ISTATUS
879 qemu_set_irq(cpu
->gt_timer_outputs
[timeridx
],
880 (oldval
& 4) && (value
& 2));
884 void arm_gt_ptimer_cb(void *opaque
)
886 ARMCPU
*cpu
= opaque
;
888 gt_recalc_timer(cpu
, GTIMER_PHYS
);
891 void arm_gt_vtimer_cb(void *opaque
)
893 ARMCPU
*cpu
= opaque
;
895 gt_recalc_timer(cpu
, GTIMER_VIRT
);
898 static const ARMCPRegInfo generic_timer_cp_reginfo
[] = {
899 /* Note that CNTFRQ is purely reads-as-written for the benefit
900 * of software; writing it doesn't actually change the timer frequency.
901 * Our reset value matches the fixed frequency we implement the timer at.
903 { .name
= "CNTFRQ", .cp
= 15, .crn
= 14, .crm
= 0, .opc1
= 0, .opc2
= 0,
904 .type
= ARM_CP_NO_MIGRATE
,
905 .access
= PL1_RW
| PL0_R
, .accessfn
= gt_cntfrq_access
,
906 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c14_cntfrq
),
907 .resetfn
= arm_cp_reset_ignore
,
909 { .name
= "CNTFRQ_EL0", .state
= ARM_CP_STATE_AA64
,
910 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 0,
911 .access
= PL1_RW
| PL0_R
, .accessfn
= gt_cntfrq_access
,
912 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_cntfrq
),
913 .resetvalue
= (1000 * 1000 * 1000) / GTIMER_SCALE
,
915 /* overall control: mostly access permissions */
916 { .name
= "CNTKCTL", .state
= ARM_CP_STATE_BOTH
,
917 .opc0
= 3, .opc1
= 0, .crn
= 14, .crm
= 1, .opc2
= 0,
919 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_cntkctl
),
922 /* per-timer control */
923 { .name
= "CNTP_CTL", .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 1,
924 .type
= ARM_CP_IO
| ARM_CP_NO_MIGRATE
, .access
= PL1_RW
| PL0_R
,
925 .accessfn
= gt_ptimer_access
,
926 .fieldoffset
= offsetoflow32(CPUARMState
,
927 cp15
.c14_timer
[GTIMER_PHYS
].ctl
),
928 .resetfn
= arm_cp_reset_ignore
,
929 .writefn
= gt_ctl_write
, .raw_writefn
= raw_write
,
931 { .name
= "CNTP_CTL_EL0", .state
= ARM_CP_STATE_AA64
,
932 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 2, .opc2
= 1,
933 .type
= ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
934 .accessfn
= gt_ptimer_access
,
935 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].ctl
),
937 .writefn
= gt_ctl_write
, .raw_writefn
= raw_write
,
939 { .name
= "CNTV_CTL", .cp
= 15, .crn
= 14, .crm
= 3, .opc1
= 0, .opc2
= 1,
940 .type
= ARM_CP_IO
| ARM_CP_NO_MIGRATE
, .access
= PL1_RW
| PL0_R
,
941 .accessfn
= gt_vtimer_access
,
942 .fieldoffset
= offsetoflow32(CPUARMState
,
943 cp15
.c14_timer
[GTIMER_VIRT
].ctl
),
944 .resetfn
= arm_cp_reset_ignore
,
945 .writefn
= gt_ctl_write
, .raw_writefn
= raw_write
,
947 { .name
= "CNTV_CTL_EL0", .state
= ARM_CP_STATE_AA64
,
948 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 3, .opc2
= 1,
949 .type
= ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
950 .accessfn
= gt_vtimer_access
,
951 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].ctl
),
953 .writefn
= gt_ctl_write
, .raw_writefn
= raw_write
,
955 /* TimerValue views: a 32 bit downcounting view of the underlying state */
956 { .name
= "CNTP_TVAL", .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 0,
957 .type
= ARM_CP_NO_MIGRATE
| ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
958 .accessfn
= gt_ptimer_access
,
959 .readfn
= gt_tval_read
, .writefn
= gt_tval_write
,
961 { .name
= "CNTP_TVAL_EL0", .state
= ARM_CP_STATE_AA64
,
962 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 2, .opc2
= 0,
963 .type
= ARM_CP_NO_MIGRATE
| ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
964 .readfn
= gt_tval_read
, .writefn
= gt_tval_write
,
966 { .name
= "CNTV_TVAL", .cp
= 15, .crn
= 14, .crm
= 3, .opc1
= 0, .opc2
= 0,
967 .type
= ARM_CP_NO_MIGRATE
| ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
968 .accessfn
= gt_vtimer_access
,
969 .readfn
= gt_tval_read
, .writefn
= gt_tval_write
,
971 { .name
= "CNTV_TVAL_EL0", .state
= ARM_CP_STATE_AA64
,
972 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 3, .opc2
= 0,
973 .type
= ARM_CP_NO_MIGRATE
| ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
974 .readfn
= gt_tval_read
, .writefn
= gt_tval_write
,
976 /* The counter itself */
977 { .name
= "CNTPCT", .cp
= 15, .crm
= 14, .opc1
= 0,
978 .access
= PL0_R
, .type
= ARM_CP_64BIT
| ARM_CP_NO_MIGRATE
| ARM_CP_IO
,
979 .accessfn
= gt_pct_access
,
980 .readfn
= gt_cnt_read
, .resetfn
= arm_cp_reset_ignore
,
982 { .name
= "CNTPCT_EL0", .state
= ARM_CP_STATE_AA64
,
983 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 1,
984 .access
= PL0_R
, .type
= ARM_CP_NO_MIGRATE
| ARM_CP_IO
,
985 .accessfn
= gt_pct_access
,
986 .readfn
= gt_cnt_read
, .resetfn
= gt_cnt_reset
,
988 { .name
= "CNTVCT", .cp
= 15, .crm
= 14, .opc1
= 1,
989 .access
= PL0_R
, .type
= ARM_CP_64BIT
| ARM_CP_NO_MIGRATE
| ARM_CP_IO
,
990 .accessfn
= gt_vct_access
,
991 .readfn
= gt_cnt_read
, .resetfn
= arm_cp_reset_ignore
,
993 { .name
= "CNTVCT_EL0", .state
= ARM_CP_STATE_AA64
,
994 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 2,
995 .access
= PL0_R
, .type
= ARM_CP_NO_MIGRATE
| ARM_CP_IO
,
996 .accessfn
= gt_vct_access
,
997 .readfn
= gt_cnt_read
, .resetfn
= gt_cnt_reset
,
999 /* Comparison value, indicating when the timer goes off */
1000 { .name
= "CNTP_CVAL", .cp
= 15, .crm
= 14, .opc1
= 2,
1001 .access
= PL1_RW
| PL0_R
,
1002 .type
= ARM_CP_64BIT
| ARM_CP_IO
| ARM_CP_NO_MIGRATE
,
1003 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].cval
),
1004 .accessfn
= gt_ptimer_access
, .resetfn
= arm_cp_reset_ignore
,
1005 .writefn
= gt_cval_write
, .raw_writefn
= raw_write
,
1007 { .name
= "CNTP_CVAL_EL0", .state
= ARM_CP_STATE_AA64
,
1008 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 2, .opc2
= 2,
1009 .access
= PL1_RW
| PL0_R
,
1011 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].cval
),
1012 .resetvalue
= 0, .accessfn
= gt_vtimer_access
,
1013 .writefn
= gt_cval_write
, .raw_writefn
= raw_write
,
1015 { .name
= "CNTV_CVAL", .cp
= 15, .crm
= 14, .opc1
= 3,
1016 .access
= PL1_RW
| PL0_R
,
1017 .type
= ARM_CP_64BIT
| ARM_CP_IO
| ARM_CP_NO_MIGRATE
,
1018 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].cval
),
1019 .accessfn
= gt_vtimer_access
, .resetfn
= arm_cp_reset_ignore
,
1020 .writefn
= gt_cval_write
, .raw_writefn
= raw_write
,
1022 { .name
= "CNTV_CVAL_EL0", .state
= ARM_CP_STATE_AA64
,
1023 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 3, .opc2
= 2,
1024 .access
= PL1_RW
| PL0_R
,
1026 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].cval
),
1027 .resetvalue
= 0, .accessfn
= gt_vtimer_access
,
1028 .writefn
= gt_cval_write
, .raw_writefn
= raw_write
,
1034 /* In user-mode none of the generic timer registers are accessible,
1035 * and their implementation depends on QEMU_CLOCK_VIRTUAL and qdev gpio outputs,
1036 * so instead just don't register any of them.
1038 static const ARMCPRegInfo generic_timer_cp_reginfo
[] = {
1044 static void par_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
1046 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
1047 env
->cp15
.c7_par
= value
;
1048 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
1049 env
->cp15
.c7_par
= value
& 0xfffff6ff;
1051 env
->cp15
.c7_par
= value
& 0xfffff1ff;
1055 #ifndef CONFIG_USER_ONLY
1056 /* get_phys_addr() isn't present for user-mode-only targets */
1058 /* Return true if extended addresses are enabled, ie this is an
1059 * LPAE implementation and we are using the long-descriptor translation
1060 * table format because the TTBCR EAE bit is set.
1062 static inline bool extended_addresses_enabled(CPUARMState
*env
)
1064 return arm_feature(env
, ARM_FEATURE_LPAE
)
1065 && (env
->cp15
.c2_control
& (1U << 31));
1068 static CPAccessResult
ats_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1071 /* Other states are only available with TrustZone; in
1072 * a non-TZ implementation these registers don't exist
1073 * at all, which is an Uncategorized trap. This underdecoding
1074 * is safe because the reginfo is NO_MIGRATE.
1076 return CP_ACCESS_TRAP_UNCATEGORIZED
;
1078 return CP_ACCESS_OK
;
1081 static void ats_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
1084 target_ulong page_size
;
1086 int ret
, is_user
= ri
->opc2
& 2;
1087 int access_type
= ri
->opc2
& 1;
1089 ret
= get_phys_addr(env
, value
, access_type
, is_user
,
1090 &phys_addr
, &prot
, &page_size
);
1091 if (extended_addresses_enabled(env
)) {
1092 /* ret is a DFSR/IFSR value for the long descriptor
1093 * translation table format, but with WnR always clear.
1094 * Convert it to a 64-bit PAR.
1096 uint64_t par64
= (1 << 11); /* LPAE bit always set */
1098 par64
|= phys_addr
& ~0xfffULL
;
1099 /* We don't set the ATTR or SH fields in the PAR. */
1102 par64
|= (ret
& 0x3f) << 1; /* FS */
1103 /* Note that S2WLK and FSTAGE are always zero, because we don't
1104 * implement virtualization and therefore there can't be a stage 2
1108 env
->cp15
.c7_par
= par64
;
1109 env
->cp15
.c7_par_hi
= par64
>> 32;
1111 /* ret is a DFSR/IFSR value for the short descriptor
1112 * translation table format (with WnR always clear).
1113 * Convert it to a 32-bit PAR.
1116 /* We do not set any attribute bits in the PAR */
1117 if (page_size
== (1 << 24)
1118 && arm_feature(env
, ARM_FEATURE_V7
)) {
1119 env
->cp15
.c7_par
= (phys_addr
& 0xff000000) | 1 << 1;
1121 env
->cp15
.c7_par
= phys_addr
& 0xfffff000;
1124 env
->cp15
.c7_par
= ((ret
& (1 << 10)) >> 5) |
1125 ((ret
& (1 << 12)) >> 6) |
1126 ((ret
& 0xf) << 1) | 1;
1128 env
->cp15
.c7_par_hi
= 0;
1133 static const ARMCPRegInfo vapa_cp_reginfo
[] = {
1134 { .name
= "PAR", .cp
= 15, .crn
= 7, .crm
= 4, .opc1
= 0, .opc2
= 0,
1135 .access
= PL1_RW
, .resetvalue
= 0,
1136 .fieldoffset
= offsetof(CPUARMState
, cp15
.c7_par
),
1137 .writefn
= par_write
},
1138 #ifndef CONFIG_USER_ONLY
1139 { .name
= "ATS", .cp
= 15, .crn
= 7, .crm
= 8, .opc1
= 0, .opc2
= CP_ANY
,
1140 .access
= PL1_W
, .accessfn
= ats_access
,
1141 .writefn
= ats_write
, .type
= ARM_CP_NO_MIGRATE
},
1146 /* Return basic MPU access permission bits. */
1147 static uint32_t simple_mpu_ap_bits(uint32_t val
)
1154 for (i
= 0; i
< 16; i
+= 2) {
1155 ret
|= (val
>> i
) & mask
;
1161 /* Pad basic MPU access permission bits to extended format. */
1162 static uint32_t extended_mpu_ap_bits(uint32_t val
)
1169 for (i
= 0; i
< 16; i
+= 2) {
1170 ret
|= (val
& mask
) << i
;
1176 static void pmsav5_data_ap_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1179 env
->cp15
.c5_data
= extended_mpu_ap_bits(value
);
1182 static uint64_t pmsav5_data_ap_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1184 return simple_mpu_ap_bits(env
->cp15
.c5_data
);
1187 static void pmsav5_insn_ap_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1190 env
->cp15
.c5_insn
= extended_mpu_ap_bits(value
);
1193 static uint64_t pmsav5_insn_ap_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1195 return simple_mpu_ap_bits(env
->cp15
.c5_insn
);
1198 static const ARMCPRegInfo pmsav5_cp_reginfo
[] = {
1199 { .name
= "DATA_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 0,
1200 .access
= PL1_RW
, .type
= ARM_CP_NO_MIGRATE
,
1201 .fieldoffset
= offsetof(CPUARMState
, cp15
.c5_data
), .resetvalue
= 0,
1202 .readfn
= pmsav5_data_ap_read
, .writefn
= pmsav5_data_ap_write
, },
1203 { .name
= "INSN_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 1,
1204 .access
= PL1_RW
, .type
= ARM_CP_NO_MIGRATE
,
1205 .fieldoffset
= offsetof(CPUARMState
, cp15
.c5_insn
), .resetvalue
= 0,
1206 .readfn
= pmsav5_insn_ap_read
, .writefn
= pmsav5_insn_ap_write
, },
1207 { .name
= "DATA_EXT_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 2,
1209 .fieldoffset
= offsetof(CPUARMState
, cp15
.c5_data
), .resetvalue
= 0, },
1210 { .name
= "INSN_EXT_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 3,
1212 .fieldoffset
= offsetof(CPUARMState
, cp15
.c5_insn
), .resetvalue
= 0, },
1213 { .name
= "DCACHE_CFG", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 0,
1215 .fieldoffset
= offsetof(CPUARMState
, cp15
.c2_data
), .resetvalue
= 0, },
1216 { .name
= "ICACHE_CFG", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 1,
1218 .fieldoffset
= offsetof(CPUARMState
, cp15
.c2_insn
), .resetvalue
= 0, },
1219 /* Protection region base and size registers */
1220 { .name
= "946_PRBS0", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0,
1221 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
1222 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[0]) },
1223 { .name
= "946_PRBS1", .cp
= 15, .crn
= 6, .crm
= 1, .opc1
= 0,
1224 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
1225 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[1]) },
1226 { .name
= "946_PRBS2", .cp
= 15, .crn
= 6, .crm
= 2, .opc1
= 0,
1227 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
1228 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[2]) },
1229 { .name
= "946_PRBS3", .cp
= 15, .crn
= 6, .crm
= 3, .opc1
= 0,
1230 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
1231 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[3]) },
1232 { .name
= "946_PRBS4", .cp
= 15, .crn
= 6, .crm
= 4, .opc1
= 0,
1233 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
1234 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[4]) },
1235 { .name
= "946_PRBS5", .cp
= 15, .crn
= 6, .crm
= 5, .opc1
= 0,
1236 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
1237 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[5]) },
1238 { .name
= "946_PRBS6", .cp
= 15, .crn
= 6, .crm
= 6, .opc1
= 0,
1239 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
1240 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[6]) },
1241 { .name
= "946_PRBS7", .cp
= 15, .crn
= 6, .crm
= 7, .opc1
= 0,
1242 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
1243 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[7]) },
1247 static void vmsa_ttbcr_raw_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1250 int maskshift
= extract32(value
, 0, 3);
1252 if (arm_feature(env
, ARM_FEATURE_LPAE
) && (value
& (1 << 31))) {
1253 value
&= ~((7 << 19) | (3 << 14) | (0xf << 3));
1257 /* Note that we always calculate c2_mask and c2_base_mask, but
1258 * they are only used for short-descriptor tables (ie if EAE is 0);
1259 * for long-descriptor tables the TTBCR fields are used differently
1260 * and the c2_mask and c2_base_mask values are meaningless.
1262 env
->cp15
.c2_control
= value
;
1263 env
->cp15
.c2_mask
= ~(((uint32_t)0xffffffffu
) >> maskshift
);
1264 env
->cp15
.c2_base_mask
= ~((uint32_t)0x3fffu
>> maskshift
);
1267 static void vmsa_ttbcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1270 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
1271 /* With LPAE the TTBCR could result in a change of ASID
1272 * via the TTBCR.A1 bit, so do a TLB flush.
1276 vmsa_ttbcr_raw_write(env
, ri
, value
);
1279 static void vmsa_ttbcr_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1281 env
->cp15
.c2_base_mask
= 0xffffc000u
;
1282 env
->cp15
.c2_control
= 0;
1283 env
->cp15
.c2_mask
= 0;
1286 static void vmsa_tcr_el1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1289 /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */
1291 env
->cp15
.c2_control
= value
;
1294 static void vmsa_ttbr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1297 /* 64 bit accesses to the TTBRs can change the ASID and so we
1298 * must flush the TLB.
1300 if (cpreg_field_is_64bit(ri
)) {
1303 raw_write(env
, ri
, value
);
1306 static const ARMCPRegInfo vmsa_cp_reginfo
[] = {
1307 { .name
= "DFSR", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 0,
1309 .fieldoffset
= offsetof(CPUARMState
, cp15
.c5_data
), .resetvalue
= 0, },
1310 { .name
= "IFSR", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 1,
1312 .fieldoffset
= offsetof(CPUARMState
, cp15
.c5_insn
), .resetvalue
= 0, },
1313 { .name
= "TTBR0_EL1", .state
= ARM_CP_STATE_BOTH
,
1314 .opc0
= 3, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 0,
1315 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.ttbr0_el1
),
1316 .writefn
= vmsa_ttbr_write
, .resetvalue
= 0 },
1317 { .name
= "TTBR1_EL1", .state
= ARM_CP_STATE_BOTH
,
1318 .opc0
= 3, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 1,
1319 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.ttbr1_el1
),
1320 .writefn
= vmsa_ttbr_write
, .resetvalue
= 0 },
1321 { .name
= "TCR_EL1", .state
= ARM_CP_STATE_AA64
,
1322 .opc0
= 3, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 2,
1323 .access
= PL1_RW
, .writefn
= vmsa_tcr_el1_write
,
1324 .resetfn
= vmsa_ttbcr_reset
, .raw_writefn
= raw_write
,
1325 .fieldoffset
= offsetof(CPUARMState
, cp15
.c2_control
) },
1326 { .name
= "TTBCR", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 2,
1327 .access
= PL1_RW
, .type
= ARM_CP_NO_MIGRATE
, .writefn
= vmsa_ttbcr_write
,
1328 .resetfn
= arm_cp_reset_ignore
, .raw_writefn
= vmsa_ttbcr_raw_write
,
1329 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c2_control
) },
1330 { .name
= "DFAR", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 0,
1331 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_data
),
1336 static void omap_ticonfig_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1339 env
->cp15
.c15_ticonfig
= value
& 0xe7;
1340 /* The OS_TYPE bit in this register changes the reported CPUID! */
1341 env
->cp15
.c0_cpuid
= (value
& (1 << 5)) ?
1342 ARM_CPUID_TI915T
: ARM_CPUID_TI925T
;
1345 static void omap_threadid_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1348 env
->cp15
.c15_threadid
= value
& 0xffff;
1351 static void omap_wfi_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1354 /* Wait-for-interrupt (deprecated) */
1355 cpu_interrupt(CPU(arm_env_get_cpu(env
)), CPU_INTERRUPT_HALT
);
1358 static void omap_cachemaint_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1361 /* On OMAP there are registers indicating the max/min index of dcache lines
1362 * containing a dirty line; cache flush operations have to reset these.
1364 env
->cp15
.c15_i_max
= 0x000;
1365 env
->cp15
.c15_i_min
= 0xff0;
1368 static const ARMCPRegInfo omap_cp_reginfo
[] = {
1369 { .name
= "DFSR", .cp
= 15, .crn
= 5, .crm
= CP_ANY
,
1370 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_OVERRIDE
,
1371 .fieldoffset
= offsetof(CPUARMState
, cp15
.c5_data
), .resetvalue
= 0, },
1372 { .name
= "", .cp
= 15, .crn
= 15, .crm
= 0, .opc1
= 0, .opc2
= 0,
1373 .access
= PL1_RW
, .type
= ARM_CP_NOP
},
1374 { .name
= "TICONFIG", .cp
= 15, .crn
= 15, .crm
= 1, .opc1
= 0, .opc2
= 0,
1376 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_ticonfig
), .resetvalue
= 0,
1377 .writefn
= omap_ticonfig_write
},
1378 { .name
= "IMAX", .cp
= 15, .crn
= 15, .crm
= 2, .opc1
= 0, .opc2
= 0,
1380 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_i_max
), .resetvalue
= 0, },
1381 { .name
= "IMIN", .cp
= 15, .crn
= 15, .crm
= 3, .opc1
= 0, .opc2
= 0,
1382 .access
= PL1_RW
, .resetvalue
= 0xff0,
1383 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_i_min
) },
1384 { .name
= "THREADID", .cp
= 15, .crn
= 15, .crm
= 4, .opc1
= 0, .opc2
= 0,
1386 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_threadid
), .resetvalue
= 0,
1387 .writefn
= omap_threadid_write
},
1388 { .name
= "TI925T_STATUS", .cp
= 15, .crn
= 15,
1389 .crm
= 8, .opc1
= 0, .opc2
= 0, .access
= PL1_RW
,
1390 .type
= ARM_CP_NO_MIGRATE
,
1391 .readfn
= arm_cp_read_zero
, .writefn
= omap_wfi_write
, },
1392 /* TODO: Peripheral port remap register:
1393 * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller
1394 * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff),
1397 { .name
= "OMAP_CACHEMAINT", .cp
= 15, .crn
= 7, .crm
= CP_ANY
,
1398 .opc1
= 0, .opc2
= CP_ANY
, .access
= PL1_W
,
1399 .type
= ARM_CP_OVERRIDE
| ARM_CP_NO_MIGRATE
,
1400 .writefn
= omap_cachemaint_write
},
1401 { .name
= "C9", .cp
= 15, .crn
= 9,
1402 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
,
1403 .type
= ARM_CP_CONST
| ARM_CP_OVERRIDE
, .resetvalue
= 0 },
1407 static void xscale_cpar_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1411 if (env
->cp15
.c15_cpar
!= value
) {
1412 /* Changes cp0 to cp13 behavior, so needs a TB flush. */
1414 env
->cp15
.c15_cpar
= value
;
1418 static const ARMCPRegInfo xscale_cp_reginfo
[] = {
1419 { .name
= "XSCALE_CPAR",
1420 .cp
= 15, .crn
= 15, .crm
= 1, .opc1
= 0, .opc2
= 0, .access
= PL1_RW
,
1421 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_cpar
), .resetvalue
= 0,
1422 .writefn
= xscale_cpar_write
, },
1423 { .name
= "XSCALE_AUXCR",
1424 .cp
= 15, .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 1, .access
= PL1_RW
,
1425 .fieldoffset
= offsetof(CPUARMState
, cp15
.c1_xscaleauxcr
),
1430 static const ARMCPRegInfo dummy_c15_cp_reginfo
[] = {
1431 /* RAZ/WI the whole crn=15 space, when we don't have a more specific
1432 * implementation of this implementation-defined space.
1433 * Ideally this should eventually disappear in favour of actually
1434 * implementing the correct behaviour for all cores.
1436 { .name
= "C15_IMPDEF", .cp
= 15, .crn
= 15,
1437 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
,
1439 .type
= ARM_CP_CONST
| ARM_CP_NO_MIGRATE
| ARM_CP_OVERRIDE
,
1444 static const ARMCPRegInfo cache_dirty_status_cp_reginfo
[] = {
1445 /* Cache status: RAZ because we have no cache so it's always clean */
1446 { .name
= "CDSR", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 6,
1447 .access
= PL1_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_MIGRATE
,
1452 static const ARMCPRegInfo cache_block_ops_cp_reginfo
[] = {
1453 /* We never have a a block transfer operation in progress */
1454 { .name
= "BXSR", .cp
= 15, .crn
= 7, .crm
= 12, .opc1
= 0, .opc2
= 4,
1455 .access
= PL0_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_MIGRATE
,
1457 /* The cache ops themselves: these all NOP for QEMU */
1458 { .name
= "IICR", .cp
= 15, .crm
= 5, .opc1
= 0,
1459 .access
= PL1_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
1460 { .name
= "IDCR", .cp
= 15, .crm
= 6, .opc1
= 0,
1461 .access
= PL1_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
1462 { .name
= "CDCR", .cp
= 15, .crm
= 12, .opc1
= 0,
1463 .access
= PL0_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
1464 { .name
= "PIR", .cp
= 15, .crm
= 12, .opc1
= 1,
1465 .access
= PL0_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
1466 { .name
= "PDR", .cp
= 15, .crm
= 12, .opc1
= 2,
1467 .access
= PL0_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
1468 { .name
= "CIDCR", .cp
= 15, .crm
= 14, .opc1
= 0,
1469 .access
= PL1_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
1473 static const ARMCPRegInfo cache_test_clean_cp_reginfo
[] = {
1474 /* The cache test-and-clean instructions always return (1 << 30)
1475 * to indicate that there are no dirty cache lines.
1477 { .name
= "TC_DCACHE", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 3,
1478 .access
= PL0_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_MIGRATE
,
1479 .resetvalue
= (1 << 30) },
1480 { .name
= "TCI_DCACHE", .cp
= 15, .crn
= 7, .crm
= 14, .opc1
= 0, .opc2
= 3,
1481 .access
= PL0_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_MIGRATE
,
1482 .resetvalue
= (1 << 30) },
1486 static const ARMCPRegInfo strongarm_cp_reginfo
[] = {
1487 /* Ignore ReadBuffer accesses */
1488 { .name
= "C9_READBUFFER", .cp
= 15, .crn
= 9,
1489 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
,
1490 .access
= PL1_RW
, .resetvalue
= 0,
1491 .type
= ARM_CP_CONST
| ARM_CP_OVERRIDE
| ARM_CP_NO_MIGRATE
},
1495 static uint64_t mpidr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1497 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
1498 uint32_t mpidr
= cs
->cpu_index
;
1499 /* We don't support setting cluster ID ([8..11]) (known as Aff1
1500 * in later ARM ARM versions), or any of the higher affinity level fields,
1501 * so these bits always RAZ.
1503 if (arm_feature(env
, ARM_FEATURE_V7MP
)) {
1504 mpidr
|= (1U << 31);
1505 /* Cores which are uniprocessor (non-coherent)
1506 * but still implement the MP extensions set
1507 * bit 30. (For instance, A9UP.) However we do
1508 * not currently model any of those cores.
1514 static const ARMCPRegInfo mpidr_cp_reginfo
[] = {
1515 { .name
= "MPIDR", .state
= ARM_CP_STATE_BOTH
,
1516 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 5,
1517 .access
= PL1_R
, .readfn
= mpidr_read
, .type
= ARM_CP_NO_MIGRATE
},
1521 static uint64_t par64_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1523 return ((uint64_t)env
->cp15
.c7_par_hi
<< 32) | env
->cp15
.c7_par
;
1526 static void par64_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1529 env
->cp15
.c7_par_hi
= value
>> 32;
1530 env
->cp15
.c7_par
= value
;
1533 static void par64_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1535 env
->cp15
.c7_par_hi
= 0;
1536 env
->cp15
.c7_par
= 0;
1539 static const ARMCPRegInfo lpae_cp_reginfo
[] = {
1540 /* NOP AMAIR0/1: the override is because these clash with the rather
1541 * broadly specified TLB_LOCKDOWN entry in the generic cp_reginfo.
1543 { .name
= "AMAIR0", .state
= ARM_CP_STATE_BOTH
,
1544 .opc0
= 3, .crn
= 10, .crm
= 3, .opc1
= 0, .opc2
= 0,
1545 .access
= PL1_RW
, .type
= ARM_CP_CONST
| ARM_CP_OVERRIDE
,
1547 /* AMAIR1 is mapped to AMAIR_EL1[63:32] */
1548 { .name
= "AMAIR1", .cp
= 15, .crn
= 10, .crm
= 3, .opc1
= 0, .opc2
= 1,
1549 .access
= PL1_RW
, .type
= ARM_CP_CONST
| ARM_CP_OVERRIDE
,
1551 /* 64 bit access versions of the (dummy) debug registers */
1552 { .name
= "DBGDRAR", .cp
= 14, .crm
= 1, .opc1
= 0,
1553 .access
= PL0_R
, .type
= ARM_CP_CONST
|ARM_CP_64BIT
, .resetvalue
= 0 },
1554 { .name
= "DBGDSAR", .cp
= 14, .crm
= 2, .opc1
= 0,
1555 .access
= PL0_R
, .type
= ARM_CP_CONST
|ARM_CP_64BIT
, .resetvalue
= 0 },
1556 { .name
= "PAR", .cp
= 15, .crm
= 7, .opc1
= 0,
1557 .access
= PL1_RW
, .type
= ARM_CP_64BIT
,
1558 .readfn
= par64_read
, .writefn
= par64_write
, .resetfn
= par64_reset
},
1559 { .name
= "TTBR0", .cp
= 15, .crm
= 2, .opc1
= 0,
1560 .access
= PL1_RW
, .type
= ARM_CP_64BIT
| ARM_CP_NO_MIGRATE
,
1561 .fieldoffset
= offsetof(CPUARMState
, cp15
.ttbr0_el1
),
1562 .writefn
= vmsa_ttbr_write
, .resetfn
= arm_cp_reset_ignore
},
1563 { .name
= "TTBR1", .cp
= 15, .crm
= 2, .opc1
= 1,
1564 .access
= PL1_RW
, .type
= ARM_CP_64BIT
| ARM_CP_NO_MIGRATE
,
1565 .fieldoffset
= offsetof(CPUARMState
, cp15
.ttbr1_el1
),
1566 .writefn
= vmsa_ttbr_write
, .resetfn
= arm_cp_reset_ignore
},
1570 static uint64_t aa64_fpcr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1572 return vfp_get_fpcr(env
);
1575 static void aa64_fpcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1578 vfp_set_fpcr(env
, value
);
1581 static uint64_t aa64_fpsr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1583 return vfp_get_fpsr(env
);
1586 static void aa64_fpsr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1589 vfp_set_fpsr(env
, value
);
1592 static CPAccessResult
aa64_cacheop_access(CPUARMState
*env
,
1593 const ARMCPRegInfo
*ri
)
1595 /* Cache invalidate/clean: NOP, but EL0 must UNDEF unless
1596 * SCTLR_EL1.UCI is set.
1598 if (arm_current_pl(env
) == 0 && !(env
->cp15
.c1_sys
& SCTLR_UCI
)) {
1599 return CP_ACCESS_TRAP
;
1601 return CP_ACCESS_OK
;
1604 static void tlbi_aa64_va_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1607 /* Invalidate by VA (AArch64 version) */
1608 uint64_t pageaddr
= value
<< 12;
1609 tlb_flush_page(env
, pageaddr
);
1612 static void tlbi_aa64_vaa_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1615 /* Invalidate by VA, all ASIDs (AArch64 version) */
1616 uint64_t pageaddr
= value
<< 12;
1617 tlb_flush_page(env
, pageaddr
);
1620 static void tlbi_aa64_asid_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1623 /* Invalidate by ASID (AArch64 version) */
1624 int asid
= extract64(value
, 48, 16);
1625 tlb_flush(env
, asid
== 0);
1628 static const ARMCPRegInfo v8_cp_reginfo
[] = {
1629 /* Minimal set of EL0-visible registers. This will need to be expanded
1630 * significantly for system emulation of AArch64 CPUs.
1632 { .name
= "NZCV", .state
= ARM_CP_STATE_AA64
,
1633 .opc0
= 3, .opc1
= 3, .opc2
= 0, .crn
= 4, .crm
= 2,
1634 .access
= PL0_RW
, .type
= ARM_CP_NZCV
},
1635 { .name
= "FPCR", .state
= ARM_CP_STATE_AA64
,
1636 .opc0
= 3, .opc1
= 3, .opc2
= 0, .crn
= 4, .crm
= 4,
1637 .access
= PL0_RW
, .readfn
= aa64_fpcr_read
, .writefn
= aa64_fpcr_write
},
1638 { .name
= "FPSR", .state
= ARM_CP_STATE_AA64
,
1639 .opc0
= 3, .opc1
= 3, .opc2
= 1, .crn
= 4, .crm
= 4,
1640 .access
= PL0_RW
, .readfn
= aa64_fpsr_read
, .writefn
= aa64_fpsr_write
},
1641 /* Prohibit use of DC ZVA. OPTME: implement DC ZVA and allow its use.
1642 * For system mode the DZP bit here will need to be computed, not constant.
1644 { .name
= "DCZID_EL0", .state
= ARM_CP_STATE_AA64
,
1645 .opc0
= 3, .opc1
= 3, .opc2
= 7, .crn
= 0, .crm
= 0,
1646 .access
= PL0_R
, .type
= ARM_CP_CONST
,
1647 .resetvalue
= 0x10 },
1648 { .name
= "CURRENTEL", .state
= ARM_CP_STATE_AA64
,
1649 .opc0
= 3, .opc1
= 0, .opc2
= 2, .crn
= 4, .crm
= 2,
1650 .access
= PL1_R
, .type
= ARM_CP_CURRENTEL
},
1651 /* Cache ops: all NOPs since we don't emulate caches */
1652 { .name
= "IC_IALLUIS", .state
= ARM_CP_STATE_AA64
,
1653 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 1, .opc2
= 0,
1654 .access
= PL1_W
, .type
= ARM_CP_NOP
},
1655 { .name
= "IC_IALLU", .state
= ARM_CP_STATE_AA64
,
1656 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 0,
1657 .access
= PL1_W
, .type
= ARM_CP_NOP
},
1658 { .name
= "IC_IVAU", .state
= ARM_CP_STATE_AA64
,
1659 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 5, .opc2
= 1,
1660 .access
= PL0_W
, .type
= ARM_CP_NOP
,
1661 .accessfn
= aa64_cacheop_access
},
1662 { .name
= "DC_IVAC", .state
= ARM_CP_STATE_AA64
,
1663 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 1,
1664 .access
= PL1_W
, .type
= ARM_CP_NOP
},
1665 { .name
= "DC_ISW", .state
= ARM_CP_STATE_AA64
,
1666 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 2,
1667 .access
= PL1_W
, .type
= ARM_CP_NOP
},
1668 { .name
= "DC_CVAC", .state
= ARM_CP_STATE_AA64
,
1669 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 10, .opc2
= 1,
1670 .access
= PL0_W
, .type
= ARM_CP_NOP
,
1671 .accessfn
= aa64_cacheop_access
},
1672 { .name
= "DC_CSW", .state
= ARM_CP_STATE_AA64
,
1673 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 10, .opc2
= 2,
1674 .access
= PL1_W
, .type
= ARM_CP_NOP
},
1675 { .name
= "DC_CVAU", .state
= ARM_CP_STATE_AA64
,
1676 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 11, .opc2
= 1,
1677 .access
= PL0_W
, .type
= ARM_CP_NOP
,
1678 .accessfn
= aa64_cacheop_access
},
1679 { .name
= "DC_CIVAC", .state
= ARM_CP_STATE_AA64
,
1680 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 14, .opc2
= 1,
1681 .access
= PL0_W
, .type
= ARM_CP_NOP
,
1682 .accessfn
= aa64_cacheop_access
},
1683 { .name
= "DC_CISW", .state
= ARM_CP_STATE_AA64
,
1684 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 14, .opc2
= 2,
1685 .access
= PL1_W
, .type
= ARM_CP_NOP
},
1686 /* TLBI operations */
1687 { .name
= "TLBI_VMALLE1IS", .state
= ARM_CP_STATE_AA64
,
1688 .opc0
= 1, .opc2
= 0, .crn
= 8, .crm
= 3, .opc2
= 0,
1689 .access
= PL1_W
, .type
= ARM_CP_NO_MIGRATE
,
1690 .writefn
= tlbiall_write
},
1691 { .name
= "TLBI_VAE1IS", .state
= ARM_CP_STATE_AA64
,
1692 .opc0
= 1, .opc2
= 0, .crn
= 8, .crm
= 3, .opc2
= 1,
1693 .access
= PL1_W
, .type
= ARM_CP_NO_MIGRATE
,
1694 .writefn
= tlbi_aa64_va_write
},
1695 { .name
= "TLBI_ASIDE1IS", .state
= ARM_CP_STATE_AA64
,
1696 .opc0
= 1, .opc2
= 0, .crn
= 8, .crm
= 3, .opc2
= 2,
1697 .access
= PL1_W
, .type
= ARM_CP_NO_MIGRATE
,
1698 .writefn
= tlbi_aa64_asid_write
},
1699 { .name
= "TLBI_VAAE1IS", .state
= ARM_CP_STATE_AA64
,
1700 .opc0
= 1, .opc2
= 0, .crn
= 8, .crm
= 3, .opc2
= 3,
1701 .access
= PL1_W
, .type
= ARM_CP_NO_MIGRATE
,
1702 .writefn
= tlbi_aa64_vaa_write
},
1703 { .name
= "TLBI_VALE1IS", .state
= ARM_CP_STATE_AA64
,
1704 .opc0
= 1, .opc2
= 0, .crn
= 8, .crm
= 3, .opc2
= 5,
1705 .access
= PL1_W
, .type
= ARM_CP_NO_MIGRATE
,
1706 .writefn
= tlbi_aa64_va_write
},
1707 { .name
= "TLBI_VAALE1IS", .state
= ARM_CP_STATE_AA64
,
1708 .opc0
= 1, .opc2
= 0, .crn
= 8, .crm
= 3, .opc2
= 7,
1709 .access
= PL1_W
, .type
= ARM_CP_NO_MIGRATE
,
1710 .writefn
= tlbi_aa64_vaa_write
},
1711 { .name
= "TLBI_VMALLE1", .state
= ARM_CP_STATE_AA64
,
1712 .opc0
= 1, .opc2
= 0, .crn
= 8, .crm
= 7, .opc2
= 0,
1713 .access
= PL1_W
, .type
= ARM_CP_NO_MIGRATE
,
1714 .writefn
= tlbiall_write
},
1715 { .name
= "TLBI_VAE1", .state
= ARM_CP_STATE_AA64
,
1716 .opc0
= 1, .opc2
= 0, .crn
= 8, .crm
= 7, .opc2
= 1,
1717 .access
= PL1_W
, .type
= ARM_CP_NO_MIGRATE
,
1718 .writefn
= tlbi_aa64_va_write
},
1719 { .name
= "TLBI_ASIDE1", .state
= ARM_CP_STATE_AA64
,
1720 .opc0
= 1, .opc2
= 0, .crn
= 8, .crm
= 7, .opc2
= 2,
1721 .access
= PL1_W
, .type
= ARM_CP_NO_MIGRATE
,
1722 .writefn
= tlbi_aa64_asid_write
},
1723 { .name
= "TLBI_VAAE1", .state
= ARM_CP_STATE_AA64
,
1724 .opc0
= 1, .opc2
= 0, .crn
= 8, .crm
= 7, .opc2
= 3,
1725 .access
= PL1_W
, .type
= ARM_CP_NO_MIGRATE
,
1726 .writefn
= tlbi_aa64_vaa_write
},
1727 { .name
= "TLBI_VALE1", .state
= ARM_CP_STATE_AA64
,
1728 .opc0
= 1, .opc2
= 0, .crn
= 8, .crm
= 7, .opc2
= 5,
1729 .access
= PL1_W
, .type
= ARM_CP_NO_MIGRATE
,
1730 .writefn
= tlbi_aa64_va_write
},
1731 { .name
= "TLBI_VAALE1", .state
= ARM_CP_STATE_AA64
,
1732 .opc0
= 1, .opc2
= 0, .crn
= 8, .crm
= 7, .opc2
= 7,
1733 .access
= PL1_W
, .type
= ARM_CP_NO_MIGRATE
,
1734 .writefn
= tlbi_aa64_vaa_write
},
1735 /* Dummy implementation of monitor debug system control register:
1736 * we don't support debug.
1738 { .name
= "MDSCR_EL1", .state
= ARM_CP_STATE_AA64
,
1739 .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 2,
1740 .access
= PL1_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
1741 /* We define a dummy WI OSLAR_EL1, because Linux writes to it. */
1742 { .name
= "OSLAR_EL1", .state
= ARM_CP_STATE_AA64
,
1743 .opc0
= 2, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 4,
1744 .access
= PL1_W
, .type
= ARM_CP_NOP
},
1748 static void sctlr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1751 env
->cp15
.c1_sys
= value
;
1752 /* ??? Lots of these bits are not implemented. */
1753 /* This may enable/disable the MMU, so do a TLB flush. */
1757 static CPAccessResult
ctr_el0_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1759 /* Only accessible in EL0 if SCTLR.UCT is set (and only in AArch64,
1760 * but the AArch32 CTR has its own reginfo struct)
1762 if (arm_current_pl(env
) == 0 && !(env
->cp15
.c1_sys
& SCTLR_UCT
)) {
1763 return CP_ACCESS_TRAP
;
1765 return CP_ACCESS_OK
;
1768 static void define_aarch64_debug_regs(ARMCPU
*cpu
)
1770 /* Define breakpoint and watchpoint registers. These do nothing
1771 * but read as written, for now.
1775 for (i
= 0; i
< 16; i
++) {
1776 ARMCPRegInfo dbgregs
[] = {
1777 { .name
= "DBGBVR", .state
= ARM_CP_STATE_AA64
,
1778 .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 4,
1780 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgbvr
[i
]) },
1781 { .name
= "DBGBCR", .state
= ARM_CP_STATE_AA64
,
1782 .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 5,
1784 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgbcr
[i
]) },
1785 { .name
= "DBGWVR", .state
= ARM_CP_STATE_AA64
,
1786 .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 6,
1788 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgwvr
[i
]) },
1789 { .name
= "DBGWCR", .state
= ARM_CP_STATE_AA64
,
1790 .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 7,
1792 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgwcr
[i
]) },
1795 define_arm_cp_regs(cpu
, dbgregs
);
1799 void register_cp_regs_for_features(ARMCPU
*cpu
)
1801 /* Register all the coprocessor registers based on feature bits */
1802 CPUARMState
*env
= &cpu
->env
;
1803 if (arm_feature(env
, ARM_FEATURE_M
)) {
1804 /* M profile has no coprocessor registers */
1808 define_arm_cp_regs(cpu
, cp_reginfo
);
1809 if (arm_feature(env
, ARM_FEATURE_V6
)) {
1810 /* The ID registers all have impdef reset values */
1811 ARMCPRegInfo v6_idregs
[] = {
1812 { .name
= "ID_PFR0", .cp
= 15, .crn
= 0, .crm
= 1,
1813 .opc1
= 0, .opc2
= 0, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1814 .resetvalue
= cpu
->id_pfr0
},
1815 { .name
= "ID_PFR1", .cp
= 15, .crn
= 0, .crm
= 1,
1816 .opc1
= 0, .opc2
= 1, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1817 .resetvalue
= cpu
->id_pfr1
},
1818 { .name
= "ID_DFR0", .cp
= 15, .crn
= 0, .crm
= 1,
1819 .opc1
= 0, .opc2
= 2, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1820 .resetvalue
= cpu
->id_dfr0
},
1821 { .name
= "ID_AFR0", .cp
= 15, .crn
= 0, .crm
= 1,
1822 .opc1
= 0, .opc2
= 3, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1823 .resetvalue
= cpu
->id_afr0
},
1824 { .name
= "ID_MMFR0", .cp
= 15, .crn
= 0, .crm
= 1,
1825 .opc1
= 0, .opc2
= 4, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1826 .resetvalue
= cpu
->id_mmfr0
},
1827 { .name
= "ID_MMFR1", .cp
= 15, .crn
= 0, .crm
= 1,
1828 .opc1
= 0, .opc2
= 5, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1829 .resetvalue
= cpu
->id_mmfr1
},
1830 { .name
= "ID_MMFR2", .cp
= 15, .crn
= 0, .crm
= 1,
1831 .opc1
= 0, .opc2
= 6, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1832 .resetvalue
= cpu
->id_mmfr2
},
1833 { .name
= "ID_MMFR3", .cp
= 15, .crn
= 0, .crm
= 1,
1834 .opc1
= 0, .opc2
= 7, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1835 .resetvalue
= cpu
->id_mmfr3
},
1836 { .name
= "ID_ISAR0", .cp
= 15, .crn
= 0, .crm
= 2,
1837 .opc1
= 0, .opc2
= 0, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1838 .resetvalue
= cpu
->id_isar0
},
1839 { .name
= "ID_ISAR1", .cp
= 15, .crn
= 0, .crm
= 2,
1840 .opc1
= 0, .opc2
= 1, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1841 .resetvalue
= cpu
->id_isar1
},
1842 { .name
= "ID_ISAR2", .cp
= 15, .crn
= 0, .crm
= 2,
1843 .opc1
= 0, .opc2
= 2, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1844 .resetvalue
= cpu
->id_isar2
},
1845 { .name
= "ID_ISAR3", .cp
= 15, .crn
= 0, .crm
= 2,
1846 .opc1
= 0, .opc2
= 3, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1847 .resetvalue
= cpu
->id_isar3
},
1848 { .name
= "ID_ISAR4", .cp
= 15, .crn
= 0, .crm
= 2,
1849 .opc1
= 0, .opc2
= 4, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1850 .resetvalue
= cpu
->id_isar4
},
1851 { .name
= "ID_ISAR5", .cp
= 15, .crn
= 0, .crm
= 2,
1852 .opc1
= 0, .opc2
= 5, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1853 .resetvalue
= cpu
->id_isar5
},
1854 /* 6..7 are as yet unallocated and must RAZ */
1855 { .name
= "ID_ISAR6", .cp
= 15, .crn
= 0, .crm
= 2,
1856 .opc1
= 0, .opc2
= 6, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1858 { .name
= "ID_ISAR7", .cp
= 15, .crn
= 0, .crm
= 2,
1859 .opc1
= 0, .opc2
= 7, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1863 define_arm_cp_regs(cpu
, v6_idregs
);
1864 define_arm_cp_regs(cpu
, v6_cp_reginfo
);
1866 define_arm_cp_regs(cpu
, not_v6_cp_reginfo
);
1868 if (arm_feature(env
, ARM_FEATURE_V6K
)) {
1869 define_arm_cp_regs(cpu
, v6k_cp_reginfo
);
1871 if (arm_feature(env
, ARM_FEATURE_V7
)) {
1872 /* v7 performance monitor control register: same implementor
1873 * field as main ID register, and we implement no event counters.
1875 ARMCPRegInfo pmcr
= {
1876 .name
= "PMCR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 0,
1877 .access
= PL0_RW
, .resetvalue
= cpu
->midr
& 0xff000000,
1878 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmcr
),
1879 .accessfn
= pmreg_access
, .writefn
= pmcr_write
,
1880 .raw_writefn
= raw_write
,
1882 ARMCPRegInfo clidr
= {
1883 .name
= "CLIDR", .state
= ARM_CP_STATE_BOTH
,
1884 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= 1,
1885 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= cpu
->clidr
1887 define_one_arm_cp_reg(cpu
, &pmcr
);
1888 define_one_arm_cp_reg(cpu
, &clidr
);
1889 define_arm_cp_regs(cpu
, v7_cp_reginfo
);
1891 define_arm_cp_regs(cpu
, not_v7_cp_reginfo
);
1893 if (arm_feature(env
, ARM_FEATURE_V8
)) {
1894 /* AArch64 ID registers, which all have impdef reset values */
1895 ARMCPRegInfo v8_idregs
[] = {
1896 { .name
= "ID_AA64PFR0_EL1", .state
= ARM_CP_STATE_AA64
,
1897 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 0,
1898 .access
= PL1_R
, .type
= ARM_CP_CONST
,
1899 .resetvalue
= cpu
->id_aa64pfr0
},
1900 { .name
= "ID_AA64PFR1_EL1", .state
= ARM_CP_STATE_AA64
,
1901 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 1,
1902 .access
= PL1_R
, .type
= ARM_CP_CONST
,
1903 .resetvalue
= cpu
->id_aa64pfr1
},
1904 { .name
= "ID_AA64DFR0_EL1", .state
= ARM_CP_STATE_AA64
,
1905 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 0,
1906 .access
= PL1_R
, .type
= ARM_CP_CONST
,
1907 .resetvalue
= cpu
->id_aa64dfr0
},
1908 { .name
= "ID_AA64DFR1_EL1", .state
= ARM_CP_STATE_AA64
,
1909 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 1,
1910 .access
= PL1_R
, .type
= ARM_CP_CONST
,
1911 .resetvalue
= cpu
->id_aa64dfr1
},
1912 { .name
= "ID_AA64AFR0_EL1", .state
= ARM_CP_STATE_AA64
,
1913 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 4,
1914 .access
= PL1_R
, .type
= ARM_CP_CONST
,
1915 .resetvalue
= cpu
->id_aa64afr0
},
1916 { .name
= "ID_AA64AFR1_EL1", .state
= ARM_CP_STATE_AA64
,
1917 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 5,
1918 .access
= PL1_R
, .type
= ARM_CP_CONST
,
1919 .resetvalue
= cpu
->id_aa64afr1
},
1920 { .name
= "ID_AA64ISAR0_EL1", .state
= ARM_CP_STATE_AA64
,
1921 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 0,
1922 .access
= PL1_R
, .type
= ARM_CP_CONST
,
1923 .resetvalue
= cpu
->id_aa64isar0
},
1924 { .name
= "ID_AA64ISAR1_EL1", .state
= ARM_CP_STATE_AA64
,
1925 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 1,
1926 .access
= PL1_R
, .type
= ARM_CP_CONST
,
1927 .resetvalue
= cpu
->id_aa64isar1
},
1928 { .name
= "ID_AA64MMFR0_EL1", .state
= ARM_CP_STATE_AA64
,
1929 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 0,
1930 .access
= PL1_R
, .type
= ARM_CP_CONST
,
1931 .resetvalue
= cpu
->id_aa64mmfr0
},
1932 { .name
= "ID_AA64MMFR1_EL1", .state
= ARM_CP_STATE_AA64
,
1933 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 1,
1934 .access
= PL1_R
, .type
= ARM_CP_CONST
,
1935 .resetvalue
= cpu
->id_aa64mmfr1
},
1938 define_arm_cp_regs(cpu
, v8_idregs
);
1939 define_arm_cp_regs(cpu
, v8_cp_reginfo
);
1940 define_aarch64_debug_regs(cpu
);
1942 if (arm_feature(env
, ARM_FEATURE_MPU
)) {
1943 /* These are the MPU registers prior to PMSAv6. Any new
1944 * PMSA core later than the ARM946 will require that we
1945 * implement the PMSAv6 or PMSAv7 registers, which are
1946 * completely different.
1948 assert(!arm_feature(env
, ARM_FEATURE_V6
));
1949 define_arm_cp_regs(cpu
, pmsav5_cp_reginfo
);
1951 define_arm_cp_regs(cpu
, vmsa_cp_reginfo
);
1953 if (arm_feature(env
, ARM_FEATURE_THUMB2EE
)) {
1954 define_arm_cp_regs(cpu
, t2ee_cp_reginfo
);
1956 if (arm_feature(env
, ARM_FEATURE_GENERIC_TIMER
)) {
1957 define_arm_cp_regs(cpu
, generic_timer_cp_reginfo
);
1959 if (arm_feature(env
, ARM_FEATURE_VAPA
)) {
1960 define_arm_cp_regs(cpu
, vapa_cp_reginfo
);
1962 if (arm_feature(env
, ARM_FEATURE_CACHE_TEST_CLEAN
)) {
1963 define_arm_cp_regs(cpu
, cache_test_clean_cp_reginfo
);
1965 if (arm_feature(env
, ARM_FEATURE_CACHE_DIRTY_REG
)) {
1966 define_arm_cp_regs(cpu
, cache_dirty_status_cp_reginfo
);
1968 if (arm_feature(env
, ARM_FEATURE_CACHE_BLOCK_OPS
)) {
1969 define_arm_cp_regs(cpu
, cache_block_ops_cp_reginfo
);
1971 if (arm_feature(env
, ARM_FEATURE_OMAPCP
)) {
1972 define_arm_cp_regs(cpu
, omap_cp_reginfo
);
1974 if (arm_feature(env
, ARM_FEATURE_STRONGARM
)) {
1975 define_arm_cp_regs(cpu
, strongarm_cp_reginfo
);
1977 if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
1978 define_arm_cp_regs(cpu
, xscale_cp_reginfo
);
1980 if (arm_feature(env
, ARM_FEATURE_DUMMY_C15_REGS
)) {
1981 define_arm_cp_regs(cpu
, dummy_c15_cp_reginfo
);
1983 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
1984 define_arm_cp_regs(cpu
, lpae_cp_reginfo
);
1986 /* Slightly awkwardly, the OMAP and StrongARM cores need all of
1987 * cp15 crn=0 to be writes-ignored, whereas for other cores they should
1988 * be read-only (ie write causes UNDEF exception).
1991 ARMCPRegInfo id_cp_reginfo
[] = {
1992 /* Note that the MIDR isn't a simple constant register because
1993 * of the TI925 behaviour where writes to another register can
1994 * cause the MIDR value to change.
1996 * Unimplemented registers in the c15 0 0 0 space default to
1997 * MIDR. Define MIDR first as this entire space, then CTR, TCMTR
1998 * and friends override accordingly.
2001 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= CP_ANY
,
2002 .access
= PL1_R
, .resetvalue
= cpu
->midr
,
2003 .writefn
= arm_cp_write_ignore
, .raw_writefn
= raw_write
,
2004 .fieldoffset
= offsetof(CPUARMState
, cp15
.c0_cpuid
),
2005 .type
= ARM_CP_OVERRIDE
},
2006 { .name
= "MIDR_EL1", .state
= ARM_CP_STATE_AA64
,
2007 .opc0
= 3, .opc1
= 0, .opc2
= 0, .crn
= 0, .crm
= 0,
2008 .access
= PL1_R
, .resetvalue
= cpu
->midr
, .type
= ARM_CP_CONST
},
2010 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 1,
2011 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= cpu
->ctr
},
2012 { .name
= "CTR_EL0", .state
= ARM_CP_STATE_AA64
,
2013 .opc0
= 3, .opc1
= 3, .opc2
= 1, .crn
= 0, .crm
= 0,
2014 .access
= PL0_R
, .accessfn
= ctr_el0_access
,
2015 .type
= ARM_CP_CONST
, .resetvalue
= cpu
->ctr
},
2017 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 2,
2018 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
2020 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 3,
2021 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
2022 /* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */
2024 .cp
= 15, .crn
= 0, .crm
= 3, .opc1
= 0, .opc2
= CP_ANY
,
2025 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
2027 .cp
= 15, .crn
= 0, .crm
= 4, .opc1
= 0, .opc2
= CP_ANY
,
2028 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
2030 .cp
= 15, .crn
= 0, .crm
= 5, .opc1
= 0, .opc2
= CP_ANY
,
2031 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
2033 .cp
= 15, .crn
= 0, .crm
= 6, .opc1
= 0, .opc2
= CP_ANY
,
2034 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
2036 .cp
= 15, .crn
= 0, .crm
= 7, .opc1
= 0, .opc2
= CP_ANY
,
2037 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
2040 ARMCPRegInfo crn0_wi_reginfo
= {
2041 .name
= "CRN0_WI", .cp
= 15, .crn
= 0, .crm
= CP_ANY
,
2042 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_W
,
2043 .type
= ARM_CP_NOP
| ARM_CP_OVERRIDE
2045 if (arm_feature(env
, ARM_FEATURE_OMAPCP
) ||
2046 arm_feature(env
, ARM_FEATURE_STRONGARM
)) {
2048 /* Register the blanket "writes ignored" value first to cover the
2049 * whole space. Then update the specific ID registers to allow write
2050 * access, so that they ignore writes rather than causing them to
2053 define_one_arm_cp_reg(cpu
, &crn0_wi_reginfo
);
2054 for (r
= id_cp_reginfo
; r
->type
!= ARM_CP_SENTINEL
; r
++) {
2058 define_arm_cp_regs(cpu
, id_cp_reginfo
);
2061 if (arm_feature(env
, ARM_FEATURE_MPIDR
)) {
2062 define_arm_cp_regs(cpu
, mpidr_cp_reginfo
);
2065 if (arm_feature(env
, ARM_FEATURE_AUXCR
)) {
2066 ARMCPRegInfo auxcr
= {
2067 .name
= "AUXCR", .cp
= 15, .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 1,
2068 .access
= PL1_RW
, .type
= ARM_CP_CONST
,
2069 .resetvalue
= cpu
->reset_auxcr
2071 define_one_arm_cp_reg(cpu
, &auxcr
);
2074 if (arm_feature(env
, ARM_FEATURE_CBAR
)) {
2075 ARMCPRegInfo cbar
= {
2076 .name
= "CBAR", .cp
= 15, .crn
= 15, .crm
= 0, .opc1
= 4, .opc2
= 0,
2077 .access
= PL1_R
|PL3_W
, .resetvalue
= cpu
->reset_cbar
,
2078 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_config_base_address
)
2080 define_one_arm_cp_reg(cpu
, &cbar
);
2083 /* Generic registers whose values depend on the implementation */
2085 ARMCPRegInfo sctlr
= {
2086 .name
= "SCTLR", .state
= ARM_CP_STATE_BOTH
,
2087 .opc0
= 3, .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 0,
2088 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c1_sys
),
2089 .writefn
= sctlr_write
, .resetvalue
= cpu
->reset_sctlr
,
2090 .raw_writefn
= raw_write
,
2092 if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
2093 /* Normally we would always end the TB on an SCTLR write, but Linux
2094 * arch/arm/mach-pxa/sleep.S expects two instructions following
2095 * an MMU enable to execute from cache. Imitate this behaviour.
2097 sctlr
.type
|= ARM_CP_SUPPRESS_TB_END
;
2099 define_one_arm_cp_reg(cpu
, &sctlr
);
2103 ARMCPU
*cpu_arm_init(const char *cpu_model
)
2108 oc
= cpu_class_by_name(TYPE_ARM_CPU
, cpu_model
);
2112 cpu
= ARM_CPU(object_new(object_class_get_name(oc
)));
2114 /* TODO this should be set centrally, once possible */
2115 object_property_set_bool(OBJECT(cpu
), true, "realized", NULL
);
2120 void arm_cpu_register_gdb_regs_for_features(ARMCPU
*cpu
)
2122 CPUState
*cs
= CPU(cpu
);
2123 CPUARMState
*env
= &cpu
->env
;
2125 if (arm_feature(env
, ARM_FEATURE_AARCH64
)) {
2126 gdb_register_coprocessor(cs
, aarch64_fpu_gdb_get_reg
,
2127 aarch64_fpu_gdb_set_reg
,
2128 34, "aarch64-fpu.xml", 0);
2129 } else if (arm_feature(env
, ARM_FEATURE_NEON
)) {
2130 gdb_register_coprocessor(cs
, vfp_gdb_get_reg
, vfp_gdb_set_reg
,
2131 51, "arm-neon.xml", 0);
2132 } else if (arm_feature(env
, ARM_FEATURE_VFP3
)) {
2133 gdb_register_coprocessor(cs
, vfp_gdb_get_reg
, vfp_gdb_set_reg
,
2134 35, "arm-vfp3.xml", 0);
2135 } else if (arm_feature(env
, ARM_FEATURE_VFP
)) {
2136 gdb_register_coprocessor(cs
, vfp_gdb_get_reg
, vfp_gdb_set_reg
,
2137 19, "arm-vfp.xml", 0);
2141 /* Sort alphabetically by type name, except for "any". */
2142 static gint
arm_cpu_list_compare(gconstpointer a
, gconstpointer b
)
2144 ObjectClass
*class_a
= (ObjectClass
*)a
;
2145 ObjectClass
*class_b
= (ObjectClass
*)b
;
2146 const char *name_a
, *name_b
;
2148 name_a
= object_class_get_name(class_a
);
2149 name_b
= object_class_get_name(class_b
);
2150 if (strcmp(name_a
, "any-" TYPE_ARM_CPU
) == 0) {
2152 } else if (strcmp(name_b
, "any-" TYPE_ARM_CPU
) == 0) {
2155 return strcmp(name_a
, name_b
);
2159 static void arm_cpu_list_entry(gpointer data
, gpointer user_data
)
2161 ObjectClass
*oc
= data
;
2162 CPUListState
*s
= user_data
;
2163 const char *typename
;
2166 typename
= object_class_get_name(oc
);
2167 name
= g_strndup(typename
, strlen(typename
) - strlen("-" TYPE_ARM_CPU
));
2168 (*s
->cpu_fprintf
)(s
->file
, " %s\n",
2173 void arm_cpu_list(FILE *f
, fprintf_function cpu_fprintf
)
2177 .cpu_fprintf
= cpu_fprintf
,
2181 list
= object_class_get_list(TYPE_ARM_CPU
, false);
2182 list
= g_slist_sort(list
, arm_cpu_list_compare
);
2183 (*cpu_fprintf
)(f
, "Available CPUs:\n");
2184 g_slist_foreach(list
, arm_cpu_list_entry
, &s
);
2187 /* The 'host' CPU type is dynamically registered only if KVM is
2188 * enabled, so we have to special-case it here:
2190 (*cpu_fprintf
)(f
, " host (only available in KVM mode)\n");
2194 static void arm_cpu_add_definition(gpointer data
, gpointer user_data
)
2196 ObjectClass
*oc
= data
;
2197 CpuDefinitionInfoList
**cpu_list
= user_data
;
2198 CpuDefinitionInfoList
*entry
;
2199 CpuDefinitionInfo
*info
;
2200 const char *typename
;
2202 typename
= object_class_get_name(oc
);
2203 info
= g_malloc0(sizeof(*info
));
2204 info
->name
= g_strndup(typename
,
2205 strlen(typename
) - strlen("-" TYPE_ARM_CPU
));
2207 entry
= g_malloc0(sizeof(*entry
));
2208 entry
->value
= info
;
2209 entry
->next
= *cpu_list
;
2213 CpuDefinitionInfoList
*arch_query_cpu_definitions(Error
**errp
)
2215 CpuDefinitionInfoList
*cpu_list
= NULL
;
2218 list
= object_class_get_list(TYPE_ARM_CPU
, false);
2219 g_slist_foreach(list
, arm_cpu_add_definition
, &cpu_list
);
2225 static void add_cpreg_to_hashtable(ARMCPU
*cpu
, const ARMCPRegInfo
*r
,
2226 void *opaque
, int state
,
2227 int crm
, int opc1
, int opc2
)
2229 /* Private utility function for define_one_arm_cp_reg_with_opaque():
2230 * add a single reginfo struct to the hash table.
2232 uint32_t *key
= g_new(uint32_t, 1);
2233 ARMCPRegInfo
*r2
= g_memdup(r
, sizeof(ARMCPRegInfo
));
2234 int is64
= (r
->type
& ARM_CP_64BIT
) ? 1 : 0;
2235 if (r
->state
== ARM_CP_STATE_BOTH
&& state
== ARM_CP_STATE_AA32
) {
2236 /* The AArch32 view of a shared register sees the lower 32 bits
2237 * of a 64 bit backing field. It is not migratable as the AArch64
2238 * view handles that. AArch64 also handles reset.
2239 * We assume it is a cp15 register.
2242 r2
->type
|= ARM_CP_NO_MIGRATE
;
2243 r2
->resetfn
= arm_cp_reset_ignore
;
2244 #ifdef HOST_WORDS_BIGENDIAN
2245 if (r2
->fieldoffset
) {
2246 r2
->fieldoffset
+= sizeof(uint32_t);
2250 if (state
== ARM_CP_STATE_AA64
) {
2251 /* To allow abbreviation of ARMCPRegInfo
2252 * definitions, we treat cp == 0 as equivalent to
2253 * the value for "standard guest-visible sysreg".
2256 r2
->cp
= CP_REG_ARM64_SYSREG_CP
;
2258 *key
= ENCODE_AA64_CP_REG(r2
->cp
, r2
->crn
, crm
,
2259 r2
->opc0
, opc1
, opc2
);
2261 *key
= ENCODE_CP_REG(r2
->cp
, is64
, r2
->crn
, crm
, opc1
, opc2
);
2264 r2
->opaque
= opaque
;
2266 /* reginfo passed to helpers is correct for the actual access,
2267 * and is never ARM_CP_STATE_BOTH:
2270 /* Make sure reginfo passed to helpers for wildcarded regs
2271 * has the correct crm/opc1/opc2 for this reg, not CP_ANY:
2276 /* By convention, for wildcarded registers only the first
2277 * entry is used for migration; the others are marked as
2278 * NO_MIGRATE so we don't try to transfer the register
2279 * multiple times. Special registers (ie NOP/WFI) are
2282 if ((r
->type
& ARM_CP_SPECIAL
) ||
2283 ((r
->crm
== CP_ANY
) && crm
!= 0) ||
2284 ((r
->opc1
== CP_ANY
) && opc1
!= 0) ||
2285 ((r
->opc2
== CP_ANY
) && opc2
!= 0)) {
2286 r2
->type
|= ARM_CP_NO_MIGRATE
;
2289 /* Overriding of an existing definition must be explicitly
2292 if (!(r
->type
& ARM_CP_OVERRIDE
)) {
2293 ARMCPRegInfo
*oldreg
;
2294 oldreg
= g_hash_table_lookup(cpu
->cp_regs
, key
);
2295 if (oldreg
&& !(oldreg
->type
& ARM_CP_OVERRIDE
)) {
2296 fprintf(stderr
, "Register redefined: cp=%d %d bit "
2297 "crn=%d crm=%d opc1=%d opc2=%d, "
2298 "was %s, now %s\n", r2
->cp
, 32 + 32 * is64
,
2299 r2
->crn
, r2
->crm
, r2
->opc1
, r2
->opc2
,
2300 oldreg
->name
, r2
->name
);
2301 g_assert_not_reached();
2304 g_hash_table_insert(cpu
->cp_regs
, key
, r2
);
2308 void define_one_arm_cp_reg_with_opaque(ARMCPU
*cpu
,
2309 const ARMCPRegInfo
*r
, void *opaque
)
2311 /* Define implementations of coprocessor registers.
2312 * We store these in a hashtable because typically
2313 * there are less than 150 registers in a space which
2314 * is 16*16*16*8*8 = 262144 in size.
2315 * Wildcarding is supported for the crm, opc1 and opc2 fields.
2316 * If a register is defined twice then the second definition is
2317 * used, so this can be used to define some generic registers and
2318 * then override them with implementation specific variations.
2319 * At least one of the original and the second definition should
2320 * include ARM_CP_OVERRIDE in its type bits -- this is just a guard
2321 * against accidental use.
2323 * The state field defines whether the register is to be
2324 * visible in the AArch32 or AArch64 execution state. If the
2325 * state is set to ARM_CP_STATE_BOTH then we synthesise a
2326 * reginfo structure for the AArch32 view, which sees the lower
2327 * 32 bits of the 64 bit register.
2329 * Only registers visible in AArch64 may set r->opc0; opc0 cannot
2330 * be wildcarded. AArch64 registers are always considered to be 64
2331 * bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of
2332 * the register, if any.
2334 int crm
, opc1
, opc2
, state
;
2335 int crmmin
= (r
->crm
== CP_ANY
) ? 0 : r
->crm
;
2336 int crmmax
= (r
->crm
== CP_ANY
) ? 15 : r
->crm
;
2337 int opc1min
= (r
->opc1
== CP_ANY
) ? 0 : r
->opc1
;
2338 int opc1max
= (r
->opc1
== CP_ANY
) ? 7 : r
->opc1
;
2339 int opc2min
= (r
->opc2
== CP_ANY
) ? 0 : r
->opc2
;
2340 int opc2max
= (r
->opc2
== CP_ANY
) ? 7 : r
->opc2
;
2341 /* 64 bit registers have only CRm and Opc1 fields */
2342 assert(!((r
->type
& ARM_CP_64BIT
) && (r
->opc2
|| r
->crn
)));
2343 /* op0 only exists in the AArch64 encodings */
2344 assert((r
->state
!= ARM_CP_STATE_AA32
) || (r
->opc0
== 0));
2345 /* AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless */
2346 assert((r
->state
!= ARM_CP_STATE_AA64
) || !(r
->type
& ARM_CP_64BIT
));
2347 /* The AArch64 pseudocode CheckSystemAccess() specifies that op1
2348 * encodes a minimum access level for the register. We roll this
2349 * runtime check into our general permission check code, so check
2350 * here that the reginfo's specified permissions are strict enough
2351 * to encompass the generic architectural permission check.
2353 if (r
->state
!= ARM_CP_STATE_AA32
) {
2356 case 0: case 1: case 2:
2369 /* unallocated encoding, so not possible */
2377 /* min_EL EL1, secure mode only (we don't check the latter) */
2381 /* broken reginfo with out-of-range opc1 */
2385 /* assert our permissions are not too lax (stricter is fine) */
2386 assert((r
->access
& ~mask
) == 0);
2389 /* Check that the register definition has enough info to handle
2390 * reads and writes if they are permitted.
2392 if (!(r
->type
& (ARM_CP_SPECIAL
|ARM_CP_CONST
))) {
2393 if (r
->access
& PL3_R
) {
2394 assert(r
->fieldoffset
|| r
->readfn
);
2396 if (r
->access
& PL3_W
) {
2397 assert(r
->fieldoffset
|| r
->writefn
);
2400 /* Bad type field probably means missing sentinel at end of reg list */
2401 assert(cptype_valid(r
->type
));
2402 for (crm
= crmmin
; crm
<= crmmax
; crm
++) {
2403 for (opc1
= opc1min
; opc1
<= opc1max
; opc1
++) {
2404 for (opc2
= opc2min
; opc2
<= opc2max
; opc2
++) {
2405 for (state
= ARM_CP_STATE_AA32
;
2406 state
<= ARM_CP_STATE_AA64
; state
++) {
2407 if (r
->state
!= state
&& r
->state
!= ARM_CP_STATE_BOTH
) {
2410 add_cpreg_to_hashtable(cpu
, r
, opaque
, state
,
2418 void define_arm_cp_regs_with_opaque(ARMCPU
*cpu
,
2419 const ARMCPRegInfo
*regs
, void *opaque
)
2421 /* Define a whole list of registers */
2422 const ARMCPRegInfo
*r
;
2423 for (r
= regs
; r
->type
!= ARM_CP_SENTINEL
; r
++) {
2424 define_one_arm_cp_reg_with_opaque(cpu
, r
, opaque
);
2428 const ARMCPRegInfo
*get_arm_cp_reginfo(GHashTable
*cpregs
, uint32_t encoded_cp
)
2430 return g_hash_table_lookup(cpregs
, &encoded_cp
);
2433 void arm_cp_write_ignore(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2436 /* Helper coprocessor write function for write-ignore registers */
2439 uint64_t arm_cp_read_zero(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2441 /* Helper coprocessor write function for read-as-zero registers */
2445 void arm_cp_reset_ignore(CPUARMState
*env
, const ARMCPRegInfo
*opaque
)
2447 /* Helper coprocessor reset function for do-nothing-on-reset registers */
2450 static int bad_mode_switch(CPUARMState
*env
, int mode
)
2452 /* Return true if it is not valid for us to switch to
2453 * this CPU mode (ie all the UNPREDICTABLE cases in
2454 * the ARM ARM CPSRWriteByInstr pseudocode).
2457 case ARM_CPU_MODE_USR
:
2458 case ARM_CPU_MODE_SYS
:
2459 case ARM_CPU_MODE_SVC
:
2460 case ARM_CPU_MODE_ABT
:
2461 case ARM_CPU_MODE_UND
:
2462 case ARM_CPU_MODE_IRQ
:
2463 case ARM_CPU_MODE_FIQ
:
2470 uint32_t cpsr_read(CPUARMState
*env
)
2473 ZF
= (env
->ZF
== 0);
2474 return env
->uncached_cpsr
| (env
->NF
& 0x80000000) | (ZF
<< 30) |
2475 (env
->CF
<< 29) | ((env
->VF
& 0x80000000) >> 3) | (env
->QF
<< 27)
2476 | (env
->thumb
<< 5) | ((env
->condexec_bits
& 3) << 25)
2477 | ((env
->condexec_bits
& 0xfc) << 8)
2478 | (env
->GE
<< 16) | env
->daif
;
2481 void cpsr_write(CPUARMState
*env
, uint32_t val
, uint32_t mask
)
2483 if (mask
& CPSR_NZCV
) {
2484 env
->ZF
= (~val
) & CPSR_Z
;
2486 env
->CF
= (val
>> 29) & 1;
2487 env
->VF
= (val
<< 3) & 0x80000000;
2490 env
->QF
= ((val
& CPSR_Q
) != 0);
2492 env
->thumb
= ((val
& CPSR_T
) != 0);
2493 if (mask
& CPSR_IT_0_1
) {
2494 env
->condexec_bits
&= ~3;
2495 env
->condexec_bits
|= (val
>> 25) & 3;
2497 if (mask
& CPSR_IT_2_7
) {
2498 env
->condexec_bits
&= 3;
2499 env
->condexec_bits
|= (val
>> 8) & 0xfc;
2501 if (mask
& CPSR_GE
) {
2502 env
->GE
= (val
>> 16) & 0xf;
2505 env
->daif
&= ~(CPSR_AIF
& mask
);
2506 env
->daif
|= val
& CPSR_AIF
& mask
;
2508 if ((env
->uncached_cpsr
^ val
) & mask
& CPSR_M
) {
2509 if (bad_mode_switch(env
, val
& CPSR_M
)) {
2510 /* Attempt to switch to an invalid mode: this is UNPREDICTABLE.
2511 * We choose to ignore the attempt and leave the CPSR M field
2516 switch_mode(env
, val
& CPSR_M
);
2519 mask
&= ~CACHED_CPSR_BITS
;
2520 env
->uncached_cpsr
= (env
->uncached_cpsr
& ~mask
) | (val
& mask
);
2523 /* Sign/zero extend */
2524 uint32_t HELPER(sxtb16
)(uint32_t x
)
2527 res
= (uint16_t)(int8_t)x
;
2528 res
|= (uint32_t)(int8_t)(x
>> 16) << 16;
2532 uint32_t HELPER(uxtb16
)(uint32_t x
)
2535 res
= (uint16_t)(uint8_t)x
;
2536 res
|= (uint32_t)(uint8_t)(x
>> 16) << 16;
2540 uint32_t HELPER(clz
)(uint32_t x
)
2545 int32_t HELPER(sdiv
)(int32_t num
, int32_t den
)
2549 if (num
== INT_MIN
&& den
== -1)
2554 uint32_t HELPER(udiv
)(uint32_t num
, uint32_t den
)
2561 uint32_t HELPER(rbit
)(uint32_t x
)
2563 x
= ((x
& 0xff000000) >> 24)
2564 | ((x
& 0x00ff0000) >> 8)
2565 | ((x
& 0x0000ff00) << 8)
2566 | ((x
& 0x000000ff) << 24);
2567 x
= ((x
& 0xf0f0f0f0) >> 4)
2568 | ((x
& 0x0f0f0f0f) << 4);
2569 x
= ((x
& 0x88888888) >> 3)
2570 | ((x
& 0x44444444) >> 1)
2571 | ((x
& 0x22222222) << 1)
2572 | ((x
& 0x11111111) << 3);
2576 #if defined(CONFIG_USER_ONLY)
2578 void arm_cpu_do_interrupt(CPUState
*cs
)
2580 ARMCPU
*cpu
= ARM_CPU(cs
);
2581 CPUARMState
*env
= &cpu
->env
;
2583 env
->exception_index
= -1;
2586 int cpu_arm_handle_mmu_fault (CPUARMState
*env
, target_ulong address
, int rw
,
2590 env
->exception_index
= EXCP_PREFETCH_ABORT
;
2591 env
->cp15
.c6_insn
= address
;
2593 env
->exception_index
= EXCP_DATA_ABORT
;
2594 env
->cp15
.c6_data
= address
;
2599 /* These should probably raise undefined insn exceptions. */
2600 void HELPER(v7m_msr
)(CPUARMState
*env
, uint32_t reg
, uint32_t val
)
2602 cpu_abort(env
, "v7m_mrs %d\n", reg
);
2605 uint32_t HELPER(v7m_mrs
)(CPUARMState
*env
, uint32_t reg
)
2607 cpu_abort(env
, "v7m_mrs %d\n", reg
);
2611 void switch_mode(CPUARMState
*env
, int mode
)
2613 if (mode
!= ARM_CPU_MODE_USR
)
2614 cpu_abort(env
, "Tried to switch out of user mode\n");
2617 void HELPER(set_r13_banked
)(CPUARMState
*env
, uint32_t mode
, uint32_t val
)
2619 cpu_abort(env
, "banked r13 write\n");
2622 uint32_t HELPER(get_r13_banked
)(CPUARMState
*env
, uint32_t mode
)
2624 cpu_abort(env
, "banked r13 read\n");
2630 /* Map CPU modes onto saved register banks. */
2631 int bank_number(int mode
)
2634 case ARM_CPU_MODE_USR
:
2635 case ARM_CPU_MODE_SYS
:
2637 case ARM_CPU_MODE_SVC
:
2639 case ARM_CPU_MODE_ABT
:
2641 case ARM_CPU_MODE_UND
:
2643 case ARM_CPU_MODE_IRQ
:
2645 case ARM_CPU_MODE_FIQ
:
2648 hw_error("bank number requested for bad CPSR mode value 0x%x\n", mode
);
2651 void switch_mode(CPUARMState
*env
, int mode
)
2656 old_mode
= env
->uncached_cpsr
& CPSR_M
;
2657 if (mode
== old_mode
)
2660 if (old_mode
== ARM_CPU_MODE_FIQ
) {
2661 memcpy (env
->fiq_regs
, env
->regs
+ 8, 5 * sizeof(uint32_t));
2662 memcpy (env
->regs
+ 8, env
->usr_regs
, 5 * sizeof(uint32_t));
2663 } else if (mode
== ARM_CPU_MODE_FIQ
) {
2664 memcpy (env
->usr_regs
, env
->regs
+ 8, 5 * sizeof(uint32_t));
2665 memcpy (env
->regs
+ 8, env
->fiq_regs
, 5 * sizeof(uint32_t));
2668 i
= bank_number(old_mode
);
2669 env
->banked_r13
[i
] = env
->regs
[13];
2670 env
->banked_r14
[i
] = env
->regs
[14];
2671 env
->banked_spsr
[i
] = env
->spsr
;
2673 i
= bank_number(mode
);
2674 env
->regs
[13] = env
->banked_r13
[i
];
2675 env
->regs
[14] = env
->banked_r14
[i
];
2676 env
->spsr
= env
->banked_spsr
[i
];
2679 static void v7m_push(CPUARMState
*env
, uint32_t val
)
2681 CPUState
*cs
= ENV_GET_CPU(env
);
2683 stl_phys(cs
->as
, env
->regs
[13], val
);
2686 static uint32_t v7m_pop(CPUARMState
*env
)
2688 CPUState
*cs
= ENV_GET_CPU(env
);
2690 val
= ldl_phys(cs
->as
, env
->regs
[13]);
2695 /* Switch to V7M main or process stack pointer. */
2696 static void switch_v7m_sp(CPUARMState
*env
, int process
)
2699 if (env
->v7m
.current_sp
!= process
) {
2700 tmp
= env
->v7m
.other_sp
;
2701 env
->v7m
.other_sp
= env
->regs
[13];
2702 env
->regs
[13] = tmp
;
2703 env
->v7m
.current_sp
= process
;
2707 static void do_v7m_exception_exit(CPUARMState
*env
)
2712 type
= env
->regs
[15];
2713 if (env
->v7m
.exception
!= 0)
2714 armv7m_nvic_complete_irq(env
->nvic
, env
->v7m
.exception
);
2716 /* Switch to the target stack. */
2717 switch_v7m_sp(env
, (type
& 4) != 0);
2718 /* Pop registers. */
2719 env
->regs
[0] = v7m_pop(env
);
2720 env
->regs
[1] = v7m_pop(env
);
2721 env
->regs
[2] = v7m_pop(env
);
2722 env
->regs
[3] = v7m_pop(env
);
2723 env
->regs
[12] = v7m_pop(env
);
2724 env
->regs
[14] = v7m_pop(env
);
2725 env
->regs
[15] = v7m_pop(env
);
2726 xpsr
= v7m_pop(env
);
2727 xpsr_write(env
, xpsr
, 0xfffffdff);
2728 /* Undo stack alignment. */
2731 /* ??? The exception return type specifies Thread/Handler mode. However
2732 this is also implied by the xPSR value. Not sure what to do
2733 if there is a mismatch. */
2734 /* ??? Likewise for mismatches between the CONTROL register and the stack
2738 /* Exception names for debug logging; note that not all of these
2739 * precisely correspond to architectural exceptions.
2741 static const char * const excnames
[] = {
2742 [EXCP_UDEF
] = "Undefined Instruction",
2744 [EXCP_PREFETCH_ABORT
] = "Prefetch Abort",
2745 [EXCP_DATA_ABORT
] = "Data Abort",
2748 [EXCP_BKPT
] = "Breakpoint",
2749 [EXCP_EXCEPTION_EXIT
] = "QEMU v7M exception exit",
2750 [EXCP_KERNEL_TRAP
] = "QEMU intercept of kernel commpage",
2751 [EXCP_STREX
] = "QEMU intercept of STREX",
2754 static inline void arm_log_exception(int idx
)
2756 if (qemu_loglevel_mask(CPU_LOG_INT
)) {
2757 const char *exc
= NULL
;
2759 if (idx
>= 0 && idx
< ARRAY_SIZE(excnames
)) {
2760 exc
= excnames
[idx
];
2765 qemu_log_mask(CPU_LOG_INT
, "Taking exception %d [%s]\n", idx
, exc
);
2769 void arm_v7m_cpu_do_interrupt(CPUState
*cs
)
2771 ARMCPU
*cpu
= ARM_CPU(cs
);
2772 CPUARMState
*env
= &cpu
->env
;
2773 uint32_t xpsr
= xpsr_read(env
);
2777 arm_log_exception(env
->exception_index
);
2780 if (env
->v7m
.current_sp
)
2782 if (env
->v7m
.exception
== 0)
2785 /* For exceptions we just mark as pending on the NVIC, and let that
2787 /* TODO: Need to escalate if the current priority is higher than the
2788 one we're raising. */
2789 switch (env
->exception_index
) {
2791 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
);
2794 /* The PC already points to the next instruction. */
2795 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SVC
);
2797 case EXCP_PREFETCH_ABORT
:
2798 case EXCP_DATA_ABORT
:
2799 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_MEM
);
2802 if (semihosting_enabled
) {
2804 nr
= arm_lduw_code(env
, env
->regs
[15], env
->bswap_code
) & 0xff;
2807 env
->regs
[0] = do_arm_semihosting(env
);
2808 qemu_log_mask(CPU_LOG_INT
, "...handled as semihosting call\n");
2812 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_DEBUG
);
2815 env
->v7m
.exception
= armv7m_nvic_acknowledge_irq(env
->nvic
);
2817 case EXCP_EXCEPTION_EXIT
:
2818 do_v7m_exception_exit(env
);
2821 cpu_abort(env
, "Unhandled exception 0x%x\n", env
->exception_index
);
2822 return; /* Never happens. Keep compiler happy. */
2825 /* Align stack pointer. */
2826 /* ??? Should only do this if Configuration Control Register
2827 STACKALIGN bit is set. */
2828 if (env
->regs
[13] & 4) {
2832 /* Switch to the handler mode. */
2833 v7m_push(env
, xpsr
);
2834 v7m_push(env
, env
->regs
[15]);
2835 v7m_push(env
, env
->regs
[14]);
2836 v7m_push(env
, env
->regs
[12]);
2837 v7m_push(env
, env
->regs
[3]);
2838 v7m_push(env
, env
->regs
[2]);
2839 v7m_push(env
, env
->regs
[1]);
2840 v7m_push(env
, env
->regs
[0]);
2841 switch_v7m_sp(env
, 0);
2843 env
->condexec_bits
= 0;
2845 addr
= ldl_phys(cs
->as
, env
->v7m
.vecbase
+ env
->v7m
.exception
* 4);
2846 env
->regs
[15] = addr
& 0xfffffffe;
2847 env
->thumb
= addr
& 1;
2850 /* Handle a CPU exception. */
2851 void arm_cpu_do_interrupt(CPUState
*cs
)
2853 ARMCPU
*cpu
= ARM_CPU(cs
);
2854 CPUARMState
*env
= &cpu
->env
;
2862 arm_log_exception(env
->exception_index
);
2864 /* TODO: Vectored interrupt controller. */
2865 switch (env
->exception_index
) {
2867 new_mode
= ARM_CPU_MODE_UND
;
2876 if (semihosting_enabled
) {
2877 /* Check for semihosting interrupt. */
2879 mask
= arm_lduw_code(env
, env
->regs
[15] - 2, env
->bswap_code
)
2882 mask
= arm_ldl_code(env
, env
->regs
[15] - 4, env
->bswap_code
)
2885 /* Only intercept calls from privileged modes, to provide some
2886 semblance of security. */
2887 if (((mask
== 0x123456 && !env
->thumb
)
2888 || (mask
== 0xab && env
->thumb
))
2889 && (env
->uncached_cpsr
& CPSR_M
) != ARM_CPU_MODE_USR
) {
2890 env
->regs
[0] = do_arm_semihosting(env
);
2891 qemu_log_mask(CPU_LOG_INT
, "...handled as semihosting call\n");
2895 new_mode
= ARM_CPU_MODE_SVC
;
2898 /* The PC already points to the next instruction. */
2902 /* See if this is a semihosting syscall. */
2903 if (env
->thumb
&& semihosting_enabled
) {
2904 mask
= arm_lduw_code(env
, env
->regs
[15], env
->bswap_code
) & 0xff;
2906 && (env
->uncached_cpsr
& CPSR_M
) != ARM_CPU_MODE_USR
) {
2908 env
->regs
[0] = do_arm_semihosting(env
);
2909 qemu_log_mask(CPU_LOG_INT
, "...handled as semihosting call\n");
2913 env
->cp15
.c5_insn
= 2;
2914 /* Fall through to prefetch abort. */
2915 case EXCP_PREFETCH_ABORT
:
2916 qemu_log_mask(CPU_LOG_INT
, "...with IFSR 0x%x IFAR 0x%x\n",
2917 env
->cp15
.c5_insn
, env
->cp15
.c6_insn
);
2918 new_mode
= ARM_CPU_MODE_ABT
;
2920 mask
= CPSR_A
| CPSR_I
;
2923 case EXCP_DATA_ABORT
:
2924 qemu_log_mask(CPU_LOG_INT
, "...with DFSR 0x%x DFAR 0x%x\n",
2925 env
->cp15
.c5_data
, env
->cp15
.c6_data
);
2926 new_mode
= ARM_CPU_MODE_ABT
;
2928 mask
= CPSR_A
| CPSR_I
;
2932 new_mode
= ARM_CPU_MODE_IRQ
;
2934 /* Disable IRQ and imprecise data aborts. */
2935 mask
= CPSR_A
| CPSR_I
;
2939 new_mode
= ARM_CPU_MODE_FIQ
;
2941 /* Disable FIQ, IRQ and imprecise data aborts. */
2942 mask
= CPSR_A
| CPSR_I
| CPSR_F
;
2946 cpu_abort(env
, "Unhandled exception 0x%x\n", env
->exception_index
);
2947 return; /* Never happens. Keep compiler happy. */
2950 if (env
->cp15
.c1_sys
& SCTLR_V
) {
2951 /* when enabled, base address cannot be remapped. */
2954 /* ARM v7 architectures provide a vector base address register to remap
2955 * the interrupt vector table.
2956 * This register is only followed in non-monitor mode, and has a secure
2957 * and un-secure copy. Since the cpu is always in a un-secure operation
2958 * and is never in monitor mode this feature is always active.
2959 * Note: only bits 31:5 are valid.
2961 addr
+= env
->cp15
.c12_vbar
;
2963 switch_mode (env
, new_mode
);
2964 env
->spsr
= cpsr_read(env
);
2965 /* Clear IT bits. */
2966 env
->condexec_bits
= 0;
2967 /* Switch to the new mode, and to the correct instruction set. */
2968 env
->uncached_cpsr
= (env
->uncached_cpsr
& ~CPSR_M
) | new_mode
;
2970 /* this is a lie, as the was no c1_sys on V4T/V5, but who cares
2971 * and we should just guard the thumb mode on V4 */
2972 if (arm_feature(env
, ARM_FEATURE_V4T
)) {
2973 env
->thumb
= (env
->cp15
.c1_sys
& SCTLR_TE
) != 0;
2975 env
->regs
[14] = env
->regs
[15] + offset
;
2976 env
->regs
[15] = addr
;
2977 cs
->interrupt_request
|= CPU_INTERRUPT_EXITTB
;
2980 /* Check section/page access permissions.
2981 Returns the page protection flags, or zero if the access is not
2983 static inline int check_ap(CPUARMState
*env
, int ap
, int domain_prot
,
2984 int access_type
, int is_user
)
2988 if (domain_prot
== 3) {
2989 return PAGE_READ
| PAGE_WRITE
;
2992 if (access_type
== 1)
2995 prot_ro
= PAGE_READ
;
2999 if (arm_feature(env
, ARM_FEATURE_V7
)) {
3002 if (access_type
== 1)
3004 switch (env
->cp15
.c1_sys
& (SCTLR_S
| SCTLR_R
)) {
3006 return is_user
? 0 : PAGE_READ
;
3013 return is_user
? 0 : PAGE_READ
| PAGE_WRITE
;
3018 return PAGE_READ
| PAGE_WRITE
;
3020 return PAGE_READ
| PAGE_WRITE
;
3021 case 4: /* Reserved. */
3024 return is_user
? 0 : prot_ro
;
3028 if (!arm_feature (env
, ARM_FEATURE_V6K
))
3036 static uint32_t get_level1_table_address(CPUARMState
*env
, uint32_t address
)
3040 if (address
& env
->cp15
.c2_mask
)
3041 table
= env
->cp15
.ttbr1_el1
& 0xffffc000;
3043 table
= env
->cp15
.ttbr0_el1
& env
->cp15
.c2_base_mask
;
3045 table
|= (address
>> 18) & 0x3ffc;
3049 static int get_phys_addr_v5(CPUARMState
*env
, uint32_t address
, int access_type
,
3050 int is_user
, hwaddr
*phys_ptr
,
3051 int *prot
, target_ulong
*page_size
)
3053 CPUState
*cs
= ENV_GET_CPU(env
);
3063 /* Pagetable walk. */
3064 /* Lookup l1 descriptor. */
3065 table
= get_level1_table_address(env
, address
);
3066 desc
= ldl_phys(cs
->as
, table
);
3068 domain
= (desc
>> 5) & 0x0f;
3069 domain_prot
= (env
->cp15
.c3
>> (domain
* 2)) & 3;
3071 /* Section translation fault. */
3075 if (domain_prot
== 0 || domain_prot
== 2) {
3077 code
= 9; /* Section domain fault. */
3079 code
= 11; /* Page domain fault. */
3084 phys_addr
= (desc
& 0xfff00000) | (address
& 0x000fffff);
3085 ap
= (desc
>> 10) & 3;
3087 *page_size
= 1024 * 1024;
3089 /* Lookup l2 entry. */
3091 /* Coarse pagetable. */
3092 table
= (desc
& 0xfffffc00) | ((address
>> 10) & 0x3fc);
3094 /* Fine pagetable. */
3095 table
= (desc
& 0xfffff000) | ((address
>> 8) & 0xffc);
3097 desc
= ldl_phys(cs
->as
, table
);
3099 case 0: /* Page translation fault. */
3102 case 1: /* 64k page. */
3103 phys_addr
= (desc
& 0xffff0000) | (address
& 0xffff);
3104 ap
= (desc
>> (4 + ((address
>> 13) & 6))) & 3;
3105 *page_size
= 0x10000;
3107 case 2: /* 4k page. */
3108 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
3109 ap
= (desc
>> (4 + ((address
>> 9) & 6))) & 3;
3110 *page_size
= 0x1000;
3112 case 3: /* 1k page. */
3114 if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
3115 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
3117 /* Page translation fault. */
3122 phys_addr
= (desc
& 0xfffffc00) | (address
& 0x3ff);
3124 ap
= (desc
>> 4) & 3;
3128 /* Never happens, but compiler isn't smart enough to tell. */
3133 *prot
= check_ap(env
, ap
, domain_prot
, access_type
, is_user
);
3135 /* Access permission fault. */
3139 *phys_ptr
= phys_addr
;
3142 return code
| (domain
<< 4);
3145 static int get_phys_addr_v6(CPUARMState
*env
, uint32_t address
, int access_type
,
3146 int is_user
, hwaddr
*phys_ptr
,
3147 int *prot
, target_ulong
*page_size
)
3149 CPUState
*cs
= ENV_GET_CPU(env
);
3161 /* Pagetable walk. */
3162 /* Lookup l1 descriptor. */
3163 table
= get_level1_table_address(env
, address
);
3164 desc
= ldl_phys(cs
->as
, table
);
3166 if (type
== 0 || (type
== 3 && !arm_feature(env
, ARM_FEATURE_PXN
))) {
3167 /* Section translation fault, or attempt to use the encoding
3168 * which is Reserved on implementations without PXN.
3173 if ((type
== 1) || !(desc
& (1 << 18))) {
3174 /* Page or Section. */
3175 domain
= (desc
>> 5) & 0x0f;
3177 domain_prot
= (env
->cp15
.c3
>> (domain
* 2)) & 3;
3178 if (domain_prot
== 0 || domain_prot
== 2) {
3180 code
= 9; /* Section domain fault. */
3182 code
= 11; /* Page domain fault. */
3187 if (desc
& (1 << 18)) {
3189 phys_addr
= (desc
& 0xff000000) | (address
& 0x00ffffff);
3190 *page_size
= 0x1000000;
3193 phys_addr
= (desc
& 0xfff00000) | (address
& 0x000fffff);
3194 *page_size
= 0x100000;
3196 ap
= ((desc
>> 10) & 3) | ((desc
>> 13) & 4);
3197 xn
= desc
& (1 << 4);
3201 if (arm_feature(env
, ARM_FEATURE_PXN
)) {
3202 pxn
= (desc
>> 2) & 1;
3204 /* Lookup l2 entry. */
3205 table
= (desc
& 0xfffffc00) | ((address
>> 10) & 0x3fc);
3206 desc
= ldl_phys(cs
->as
, table
);
3207 ap
= ((desc
>> 4) & 3) | ((desc
>> 7) & 4);
3209 case 0: /* Page translation fault. */
3212 case 1: /* 64k page. */
3213 phys_addr
= (desc
& 0xffff0000) | (address
& 0xffff);
3214 xn
= desc
& (1 << 15);
3215 *page_size
= 0x10000;
3217 case 2: case 3: /* 4k page. */
3218 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
3220 *page_size
= 0x1000;
3223 /* Never happens, but compiler isn't smart enough to tell. */
3228 if (domain_prot
== 3) {
3229 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
3231 if (pxn
&& !is_user
) {
3234 if (xn
&& access_type
== 2)
3237 /* The simplified model uses AP[0] as an access control bit. */
3238 if ((env
->cp15
.c1_sys
& SCTLR_AFE
) && (ap
& 1) == 0) {
3239 /* Access flag fault. */
3240 code
= (code
== 15) ? 6 : 3;
3243 *prot
= check_ap(env
, ap
, domain_prot
, access_type
, is_user
);
3245 /* Access permission fault. */
3252 *phys_ptr
= phys_addr
;
3255 return code
| (domain
<< 4);
3258 /* Fault type for long-descriptor MMU fault reporting; this corresponds
3259 * to bits [5..2] in the STATUS field in long-format DFSR/IFSR.
3262 translation_fault
= 1,
3264 permission_fault
= 3,
3267 static int get_phys_addr_lpae(CPUARMState
*env
, uint32_t address
,
3268 int access_type
, int is_user
,
3269 hwaddr
*phys_ptr
, int *prot
,
3270 target_ulong
*page_size_ptr
)
3272 CPUState
*cs
= ENV_GET_CPU(env
);
3273 /* Read an LPAE long-descriptor translation table. */
3274 MMUFaultType fault_type
= translation_fault
;
3282 uint32_t tableattrs
;
3283 target_ulong page_size
;
3286 /* Determine whether this address is in the region controlled by
3287 * TTBR0 or TTBR1 (or if it is in neither region and should fault).
3288 * This is a Non-secure PL0/1 stage 1 translation, so controlled by
3289 * TTBCR/TTBR0/TTBR1 in accordance with ARM ARM DDI0406C table B-32:
3291 uint32_t t0sz
= extract32(env
->cp15
.c2_control
, 0, 3);
3292 uint32_t t1sz
= extract32(env
->cp15
.c2_control
, 16, 3);
3293 if (t0sz
&& !extract32(address
, 32 - t0sz
, t0sz
)) {
3294 /* there is a ttbr0 region and we are in it (high bits all zero) */
3296 } else if (t1sz
&& !extract32(~address
, 32 - t1sz
, t1sz
)) {
3297 /* there is a ttbr1 region and we are in it (high bits all one) */
3300 /* ttbr0 region is "everything not in the ttbr1 region" */
3303 /* ttbr1 region is "everything not in the ttbr0 region" */
3306 /* in the gap between the two regions, this is a Translation fault */
3307 fault_type
= translation_fault
;
3311 /* Note that QEMU ignores shareability and cacheability attributes,
3312 * so we don't need to do anything with the SH, ORGN, IRGN fields
3313 * in the TTBCR. Similarly, TTBCR:A1 selects whether we get the
3314 * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently
3315 * implement any ASID-like capability so we can ignore it (instead
3316 * we will always flush the TLB any time the ASID is changed).
3318 if (ttbr_select
== 0) {
3319 ttbr
= env
->cp15
.ttbr0_el1
;
3320 epd
= extract32(env
->cp15
.c2_control
, 7, 1);
3323 ttbr
= env
->cp15
.ttbr1_el1
;
3324 epd
= extract32(env
->cp15
.c2_control
, 23, 1);
3329 /* Translation table walk disabled => Translation fault on TLB miss */
3333 /* If the region is small enough we will skip straight to a 2nd level
3334 * lookup. This affects the number of bits of the address used in
3335 * combination with the TTBR to find the first descriptor. ('n' here
3336 * matches the usage in the ARM ARM sB3.6.6, where bits [39..n] are
3337 * from the TTBR, [n-1..3] from the vaddr, and [2..0] always zero).
3346 /* Clear the vaddr bits which aren't part of the within-region address,
3347 * so that we don't have to special case things when calculating the
3348 * first descriptor address.
3350 address
&= (0xffffffffU
>> tsz
);
3352 /* Now we can extract the actual base address from the TTBR */
3353 descaddr
= extract64(ttbr
, 0, 40);
3354 descaddr
&= ~((1ULL << n
) - 1);
3358 uint64_t descriptor
;
3360 descaddr
|= ((address
>> (9 * (4 - level
))) & 0xff8);
3361 descriptor
= ldq_phys(cs
->as
, descaddr
);
3362 if (!(descriptor
& 1) ||
3363 (!(descriptor
& 2) && (level
== 3))) {
3364 /* Invalid, or the Reserved level 3 encoding */
3367 descaddr
= descriptor
& 0xfffffff000ULL
;
3369 if ((descriptor
& 2) && (level
< 3)) {
3370 /* Table entry. The top five bits are attributes which may
3371 * propagate down through lower levels of the table (and
3372 * which are all arranged so that 0 means "no effect", so
3373 * we can gather them up by ORing in the bits at each level).
3375 tableattrs
|= extract64(descriptor
, 59, 5);
3379 /* Block entry at level 1 or 2, or page entry at level 3.
3380 * These are basically the same thing, although the number
3381 * of bits we pull in from the vaddr varies.
3383 page_size
= (1 << (39 - (9 * level
)));
3384 descaddr
|= (address
& (page_size
- 1));
3385 /* Extract attributes from the descriptor and merge with table attrs */
3386 attrs
= extract64(descriptor
, 2, 10)
3387 | (extract64(descriptor
, 52, 12) << 10);
3388 attrs
|= extract32(tableattrs
, 0, 2) << 11; /* XN, PXN */
3389 attrs
|= extract32(tableattrs
, 3, 1) << 5; /* APTable[1] => AP[2] */
3390 /* The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1
3391 * means "force PL1 access only", which means forcing AP[1] to 0.
3393 if (extract32(tableattrs
, 2, 1)) {
3396 /* Since we're always in the Non-secure state, NSTable is ignored. */
3399 /* Here descaddr is the final physical address, and attributes
3402 fault_type
= access_fault
;
3403 if ((attrs
& (1 << 8)) == 0) {
3407 fault_type
= permission_fault
;
3408 if (is_user
&& !(attrs
& (1 << 4))) {
3409 /* Unprivileged access not enabled */
3412 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
3413 if (attrs
& (1 << 12) || (!is_user
&& (attrs
& (1 << 11)))) {
3415 if (access_type
== 2) {
3418 *prot
&= ~PAGE_EXEC
;
3420 if (attrs
& (1 << 5)) {
3421 /* Write access forbidden */
3422 if (access_type
== 1) {
3425 *prot
&= ~PAGE_WRITE
;
3428 *phys_ptr
= descaddr
;
3429 *page_size_ptr
= page_size
;
3433 /* Long-descriptor format IFSR/DFSR value */
3434 return (1 << 9) | (fault_type
<< 2) | level
;
3437 static int get_phys_addr_mpu(CPUARMState
*env
, uint32_t address
,
3438 int access_type
, int is_user
,
3439 hwaddr
*phys_ptr
, int *prot
)
3445 *phys_ptr
= address
;
3446 for (n
= 7; n
>= 0; n
--) {
3447 base
= env
->cp15
.c6_region
[n
];
3448 if ((base
& 1) == 0)
3450 mask
= 1 << ((base
>> 1) & 0x1f);
3451 /* Keep this shift separate from the above to avoid an
3452 (undefined) << 32. */
3453 mask
= (mask
<< 1) - 1;
3454 if (((base
^ address
) & ~mask
) == 0)
3460 if (access_type
== 2) {
3461 mask
= env
->cp15
.c5_insn
;
3463 mask
= env
->cp15
.c5_data
;
3465 mask
= (mask
>> (n
* 4)) & 0xf;
3472 *prot
= PAGE_READ
| PAGE_WRITE
;
3477 *prot
|= PAGE_WRITE
;
3480 *prot
= PAGE_READ
| PAGE_WRITE
;
3491 /* Bad permission. */
3498 /* get_phys_addr - get the physical address for this virtual address
3500 * Find the physical address corresponding to the given virtual address,
3501 * by doing a translation table walk on MMU based systems or using the
3502 * MPU state on MPU based systems.
3504 * Returns 0 if the translation was successful. Otherwise, phys_ptr,
3505 * prot and page_size are not filled in, and the return value provides
3506 * information on why the translation aborted, in the format of a
3507 * DFSR/IFSR fault register, with the following caveats:
3508 * * we honour the short vs long DFSR format differences.
3509 * * the WnR bit is never set (the caller must do this).
3510 * * for MPU based systems we don't bother to return a full FSR format
3514 * @address: virtual address to get physical address for
3515 * @access_type: 0 for read, 1 for write, 2 for execute
3516 * @is_user: 0 for privileged access, 1 for user
3517 * @phys_ptr: set to the physical address corresponding to the virtual address
3518 * @prot: set to the permissions for the page containing phys_ptr
3519 * @page_size: set to the size of the page containing phys_ptr
3521 static inline int get_phys_addr(CPUARMState
*env
, uint32_t address
,
3522 int access_type
, int is_user
,
3523 hwaddr
*phys_ptr
, int *prot
,
3524 target_ulong
*page_size
)
3526 /* Fast Context Switch Extension. */
3527 if (address
< 0x02000000)
3528 address
+= env
->cp15
.c13_fcse
;
3530 if ((env
->cp15
.c1_sys
& SCTLR_M
) == 0) {
3531 /* MMU/MPU disabled. */
3532 *phys_ptr
= address
;
3533 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
3534 *page_size
= TARGET_PAGE_SIZE
;
3536 } else if (arm_feature(env
, ARM_FEATURE_MPU
)) {
3537 *page_size
= TARGET_PAGE_SIZE
;
3538 return get_phys_addr_mpu(env
, address
, access_type
, is_user
, phys_ptr
,
3540 } else if (extended_addresses_enabled(env
)) {
3541 return get_phys_addr_lpae(env
, address
, access_type
, is_user
, phys_ptr
,
3543 } else if (env
->cp15
.c1_sys
& SCTLR_XP
) {
3544 return get_phys_addr_v6(env
, address
, access_type
, is_user
, phys_ptr
,
3547 return get_phys_addr_v5(env
, address
, access_type
, is_user
, phys_ptr
,
3552 int cpu_arm_handle_mmu_fault (CPUARMState
*env
, target_ulong address
,
3553 int access_type
, int mmu_idx
)
3556 target_ulong page_size
;
3560 is_user
= mmu_idx
== MMU_USER_IDX
;
3561 ret
= get_phys_addr(env
, address
, access_type
, is_user
, &phys_addr
, &prot
,
3564 /* Map a single [sub]page. */
3565 phys_addr
&= ~(hwaddr
)0x3ff;
3566 address
&= ~(uint32_t)0x3ff;
3567 tlb_set_page (env
, address
, phys_addr
, prot
, mmu_idx
, page_size
);
3571 if (access_type
== 2) {
3572 env
->cp15
.c5_insn
= ret
;
3573 env
->cp15
.c6_insn
= address
;
3574 env
->exception_index
= EXCP_PREFETCH_ABORT
;
3576 env
->cp15
.c5_data
= ret
;
3577 if (access_type
== 1 && arm_feature(env
, ARM_FEATURE_V6
))
3578 env
->cp15
.c5_data
|= (1 << 11);
3579 env
->cp15
.c6_data
= address
;
3580 env
->exception_index
= EXCP_DATA_ABORT
;
3585 hwaddr
arm_cpu_get_phys_page_debug(CPUState
*cs
, vaddr addr
)
3587 ARMCPU
*cpu
= ARM_CPU(cs
);
3589 target_ulong page_size
;
3593 ret
= get_phys_addr(&cpu
->env
, addr
, 0, 0, &phys_addr
, &prot
, &page_size
);
3602 void HELPER(set_r13_banked
)(CPUARMState
*env
, uint32_t mode
, uint32_t val
)
3604 if ((env
->uncached_cpsr
& CPSR_M
) == mode
) {
3605 env
->regs
[13] = val
;
3607 env
->banked_r13
[bank_number(mode
)] = val
;
3611 uint32_t HELPER(get_r13_banked
)(CPUARMState
*env
, uint32_t mode
)
3613 if ((env
->uncached_cpsr
& CPSR_M
) == mode
) {
3614 return env
->regs
[13];
3616 return env
->banked_r13
[bank_number(mode
)];
3620 uint32_t HELPER(v7m_mrs
)(CPUARMState
*env
, uint32_t reg
)
3624 return xpsr_read(env
) & 0xf8000000;
3626 return xpsr_read(env
) & 0xf80001ff;
3628 return xpsr_read(env
) & 0xff00fc00;
3630 return xpsr_read(env
) & 0xff00fdff;
3632 return xpsr_read(env
) & 0x000001ff;
3634 return xpsr_read(env
) & 0x0700fc00;
3636 return xpsr_read(env
) & 0x0700edff;
3638 return env
->v7m
.current_sp
? env
->v7m
.other_sp
: env
->regs
[13];
3640 return env
->v7m
.current_sp
? env
->regs
[13] : env
->v7m
.other_sp
;
3641 case 16: /* PRIMASK */
3642 return (env
->daif
& PSTATE_I
) != 0;
3643 case 17: /* BASEPRI */
3644 case 18: /* BASEPRI_MAX */
3645 return env
->v7m
.basepri
;
3646 case 19: /* FAULTMASK */
3647 return (env
->daif
& PSTATE_F
) != 0;
3648 case 20: /* CONTROL */
3649 return env
->v7m
.control
;
3651 /* ??? For debugging only. */
3652 cpu_abort(env
, "Unimplemented system register read (%d)\n", reg
);
3657 void HELPER(v7m_msr
)(CPUARMState
*env
, uint32_t reg
, uint32_t val
)
3661 xpsr_write(env
, val
, 0xf8000000);
3664 xpsr_write(env
, val
, 0xf8000000);
3667 xpsr_write(env
, val
, 0xfe00fc00);
3670 xpsr_write(env
, val
, 0xfe00fc00);
3673 /* IPSR bits are readonly. */
3676 xpsr_write(env
, val
, 0x0600fc00);
3679 xpsr_write(env
, val
, 0x0600fc00);
3682 if (env
->v7m
.current_sp
)
3683 env
->v7m
.other_sp
= val
;
3685 env
->regs
[13] = val
;
3688 if (env
->v7m
.current_sp
)
3689 env
->regs
[13] = val
;
3691 env
->v7m
.other_sp
= val
;
3693 case 16: /* PRIMASK */
3695 env
->daif
|= PSTATE_I
;
3697 env
->daif
&= ~PSTATE_I
;
3700 case 17: /* BASEPRI */
3701 env
->v7m
.basepri
= val
& 0xff;
3703 case 18: /* BASEPRI_MAX */
3705 if (val
!= 0 && (val
< env
->v7m
.basepri
|| env
->v7m
.basepri
== 0))
3706 env
->v7m
.basepri
= val
;
3708 case 19: /* FAULTMASK */
3710 env
->daif
|= PSTATE_F
;
3712 env
->daif
&= ~PSTATE_F
;
3715 case 20: /* CONTROL */
3716 env
->v7m
.control
= val
& 3;
3717 switch_v7m_sp(env
, (val
& 2) != 0);
3720 /* ??? For debugging only. */
3721 cpu_abort(env
, "Unimplemented system register write (%d)\n", reg
);
3728 /* Note that signed overflow is undefined in C. The following routines are
3729 careful to use unsigned types where modulo arithmetic is required.
3730 Failure to do so _will_ break on newer gcc. */
3732 /* Signed saturating arithmetic. */
3734 /* Perform 16-bit signed saturating addition. */
3735 static inline uint16_t add16_sat(uint16_t a
, uint16_t b
)
3740 if (((res
^ a
) & 0x8000) && !((a
^ b
) & 0x8000)) {
3749 /* Perform 8-bit signed saturating addition. */
3750 static inline uint8_t add8_sat(uint8_t a
, uint8_t b
)
3755 if (((res
^ a
) & 0x80) && !((a
^ b
) & 0x80)) {
3764 /* Perform 16-bit signed saturating subtraction. */
3765 static inline uint16_t sub16_sat(uint16_t a
, uint16_t b
)
3770 if (((res
^ a
) & 0x8000) && ((a
^ b
) & 0x8000)) {
3779 /* Perform 8-bit signed saturating subtraction. */
3780 static inline uint8_t sub8_sat(uint8_t a
, uint8_t b
)
3785 if (((res
^ a
) & 0x80) && ((a
^ b
) & 0x80)) {
3794 #define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16);
3795 #define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16);
3796 #define ADD8(a, b, n) RESULT(add8_sat(a, b), n, 8);
3797 #define SUB8(a, b, n) RESULT(sub8_sat(a, b), n, 8);
3800 #include "op_addsub.h"
3802 /* Unsigned saturating arithmetic. */
3803 static inline uint16_t add16_usat(uint16_t a
, uint16_t b
)
3812 static inline uint16_t sub16_usat(uint16_t a
, uint16_t b
)
3820 static inline uint8_t add8_usat(uint8_t a
, uint8_t b
)
3829 static inline uint8_t sub8_usat(uint8_t a
, uint8_t b
)
3837 #define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16);
3838 #define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16);
3839 #define ADD8(a, b, n) RESULT(add8_usat(a, b), n, 8);
3840 #define SUB8(a, b, n) RESULT(sub8_usat(a, b), n, 8);
3843 #include "op_addsub.h"
3845 /* Signed modulo arithmetic. */
3846 #define SARITH16(a, b, n, op) do { \
3848 sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \
3849 RESULT(sum, n, 16); \
3851 ge |= 3 << (n * 2); \
3854 #define SARITH8(a, b, n, op) do { \
3856 sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \
3857 RESULT(sum, n, 8); \
3863 #define ADD16(a, b, n) SARITH16(a, b, n, +)
3864 #define SUB16(a, b, n) SARITH16(a, b, n, -)
3865 #define ADD8(a, b, n) SARITH8(a, b, n, +)
3866 #define SUB8(a, b, n) SARITH8(a, b, n, -)
3870 #include "op_addsub.h"
3872 /* Unsigned modulo arithmetic. */
3873 #define ADD16(a, b, n) do { \
3875 sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \
3876 RESULT(sum, n, 16); \
3877 if ((sum >> 16) == 1) \
3878 ge |= 3 << (n * 2); \
3881 #define ADD8(a, b, n) do { \
3883 sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \
3884 RESULT(sum, n, 8); \
3885 if ((sum >> 8) == 1) \
3889 #define SUB16(a, b, n) do { \
3891 sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \
3892 RESULT(sum, n, 16); \
3893 if ((sum >> 16) == 0) \
3894 ge |= 3 << (n * 2); \
3897 #define SUB8(a, b, n) do { \
3899 sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \
3900 RESULT(sum, n, 8); \
3901 if ((sum >> 8) == 0) \
3908 #include "op_addsub.h"
3910 /* Halved signed arithmetic. */
3911 #define ADD16(a, b, n) \
3912 RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16)
3913 #define SUB16(a, b, n) \
3914 RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16)
3915 #define ADD8(a, b, n) \
3916 RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8)
3917 #define SUB8(a, b, n) \
3918 RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8)
3921 #include "op_addsub.h"
3923 /* Halved unsigned arithmetic. */
3924 #define ADD16(a, b, n) \
3925 RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16)
3926 #define SUB16(a, b, n) \
3927 RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16)
3928 #define ADD8(a, b, n) \
3929 RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8)
3930 #define SUB8(a, b, n) \
3931 RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8)
3934 #include "op_addsub.h"
3936 static inline uint8_t do_usad(uint8_t a
, uint8_t b
)
3944 /* Unsigned sum of absolute byte differences. */
3945 uint32_t HELPER(usad8
)(uint32_t a
, uint32_t b
)
3948 sum
= do_usad(a
, b
);
3949 sum
+= do_usad(a
>> 8, b
>> 8);
3950 sum
+= do_usad(a
>> 16, b
>>16);
3951 sum
+= do_usad(a
>> 24, b
>> 24);
3955 /* For ARMv6 SEL instruction. */
3956 uint32_t HELPER(sel_flags
)(uint32_t flags
, uint32_t a
, uint32_t b
)
3969 return (a
& mask
) | (b
& ~mask
);
3972 /* VFP support. We follow the convention used for VFP instructions:
3973 Single precision routines have a "s" suffix, double precision a
3976 /* Convert host exception flags to vfp form. */
3977 static inline int vfp_exceptbits_from_host(int host_bits
)
3979 int target_bits
= 0;
3981 if (host_bits
& float_flag_invalid
)
3983 if (host_bits
& float_flag_divbyzero
)
3985 if (host_bits
& float_flag_overflow
)
3987 if (host_bits
& (float_flag_underflow
| float_flag_output_denormal
))
3989 if (host_bits
& float_flag_inexact
)
3990 target_bits
|= 0x10;
3991 if (host_bits
& float_flag_input_denormal
)
3992 target_bits
|= 0x80;
3996 uint32_t HELPER(vfp_get_fpscr
)(CPUARMState
*env
)
4001 fpscr
= (env
->vfp
.xregs
[ARM_VFP_FPSCR
] & 0xffc8ffff)
4002 | (env
->vfp
.vec_len
<< 16)
4003 | (env
->vfp
.vec_stride
<< 20);
4004 i
= get_float_exception_flags(&env
->vfp
.fp_status
);
4005 i
|= get_float_exception_flags(&env
->vfp
.standard_fp_status
);
4006 fpscr
|= vfp_exceptbits_from_host(i
);
4010 uint32_t vfp_get_fpscr(CPUARMState
*env
)
4012 return HELPER(vfp_get_fpscr
)(env
);
4015 /* Convert vfp exception flags to target form. */
4016 static inline int vfp_exceptbits_to_host(int target_bits
)
4020 if (target_bits
& 1)
4021 host_bits
|= float_flag_invalid
;
4022 if (target_bits
& 2)
4023 host_bits
|= float_flag_divbyzero
;
4024 if (target_bits
& 4)
4025 host_bits
|= float_flag_overflow
;
4026 if (target_bits
& 8)
4027 host_bits
|= float_flag_underflow
;
4028 if (target_bits
& 0x10)
4029 host_bits
|= float_flag_inexact
;
4030 if (target_bits
& 0x80)
4031 host_bits
|= float_flag_input_denormal
;
4035 void HELPER(vfp_set_fpscr
)(CPUARMState
*env
, uint32_t val
)
4040 changed
= env
->vfp
.xregs
[ARM_VFP_FPSCR
];
4041 env
->vfp
.xregs
[ARM_VFP_FPSCR
] = (val
& 0xffc8ffff);
4042 env
->vfp
.vec_len
= (val
>> 16) & 7;
4043 env
->vfp
.vec_stride
= (val
>> 20) & 3;
4046 if (changed
& (3 << 22)) {
4047 i
= (val
>> 22) & 3;
4049 case FPROUNDING_TIEEVEN
:
4050 i
= float_round_nearest_even
;
4052 case FPROUNDING_POSINF
:
4055 case FPROUNDING_NEGINF
:
4056 i
= float_round_down
;
4058 case FPROUNDING_ZERO
:
4059 i
= float_round_to_zero
;
4062 set_float_rounding_mode(i
, &env
->vfp
.fp_status
);
4064 if (changed
& (1 << 24)) {
4065 set_flush_to_zero((val
& (1 << 24)) != 0, &env
->vfp
.fp_status
);
4066 set_flush_inputs_to_zero((val
& (1 << 24)) != 0, &env
->vfp
.fp_status
);
4068 if (changed
& (1 << 25))
4069 set_default_nan_mode((val
& (1 << 25)) != 0, &env
->vfp
.fp_status
);
4071 i
= vfp_exceptbits_to_host(val
);
4072 set_float_exception_flags(i
, &env
->vfp
.fp_status
);
4073 set_float_exception_flags(0, &env
->vfp
.standard_fp_status
);
4076 void vfp_set_fpscr(CPUARMState
*env
, uint32_t val
)
4078 HELPER(vfp_set_fpscr
)(env
, val
);
4081 #define VFP_HELPER(name, p) HELPER(glue(glue(vfp_,name),p))
4083 #define VFP_BINOP(name) \
4084 float32 VFP_HELPER(name, s)(float32 a, float32 b, void *fpstp) \
4086 float_status *fpst = fpstp; \
4087 return float32_ ## name(a, b, fpst); \
4089 float64 VFP_HELPER(name, d)(float64 a, float64 b, void *fpstp) \
4091 float_status *fpst = fpstp; \
4092 return float64_ ## name(a, b, fpst); \
4104 float32
VFP_HELPER(neg
, s
)(float32 a
)
4106 return float32_chs(a
);
4109 float64
VFP_HELPER(neg
, d
)(float64 a
)
4111 return float64_chs(a
);
4114 float32
VFP_HELPER(abs
, s
)(float32 a
)
4116 return float32_abs(a
);
4119 float64
VFP_HELPER(abs
, d
)(float64 a
)
4121 return float64_abs(a
);
4124 float32
VFP_HELPER(sqrt
, s
)(float32 a
, CPUARMState
*env
)
4126 return float32_sqrt(a
, &env
->vfp
.fp_status
);
4129 float64
VFP_HELPER(sqrt
, d
)(float64 a
, CPUARMState
*env
)
4131 return float64_sqrt(a
, &env
->vfp
.fp_status
);
4134 /* XXX: check quiet/signaling case */
4135 #define DO_VFP_cmp(p, type) \
4136 void VFP_HELPER(cmp, p)(type a, type b, CPUARMState *env) \
4139 switch(type ## _compare_quiet(a, b, &env->vfp.fp_status)) { \
4140 case 0: flags = 0x6; break; \
4141 case -1: flags = 0x8; break; \
4142 case 1: flags = 0x2; break; \
4143 default: case 2: flags = 0x3; break; \
4145 env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
4146 | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
4148 void VFP_HELPER(cmpe, p)(type a, type b, CPUARMState *env) \
4151 switch(type ## _compare(a, b, &env->vfp.fp_status)) { \
4152 case 0: flags = 0x6; break; \
4153 case -1: flags = 0x8; break; \
4154 case 1: flags = 0x2; break; \
4155 default: case 2: flags = 0x3; break; \
4157 env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
4158 | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
4160 DO_VFP_cmp(s
, float32
)
4161 DO_VFP_cmp(d
, float64
)
4164 /* Integer to float and float to integer conversions */
4166 #define CONV_ITOF(name, fsz, sign) \
4167 float##fsz HELPER(name)(uint32_t x, void *fpstp) \
4169 float_status *fpst = fpstp; \
4170 return sign##int32_to_##float##fsz((sign##int32_t)x, fpst); \
4173 #define CONV_FTOI(name, fsz, sign, round) \
4174 uint32_t HELPER(name)(float##fsz x, void *fpstp) \
4176 float_status *fpst = fpstp; \
4177 if (float##fsz##_is_any_nan(x)) { \
4178 float_raise(float_flag_invalid, fpst); \
4181 return float##fsz##_to_##sign##int32##round(x, fpst); \
4184 #define FLOAT_CONVS(name, p, fsz, sign) \
4185 CONV_ITOF(vfp_##name##to##p, fsz, sign) \
4186 CONV_FTOI(vfp_to##name##p, fsz, sign, ) \
4187 CONV_FTOI(vfp_to##name##z##p, fsz, sign, _round_to_zero)
4189 FLOAT_CONVS(si
, s
, 32, )
4190 FLOAT_CONVS(si
, d
, 64, )
4191 FLOAT_CONVS(ui
, s
, 32, u
)
4192 FLOAT_CONVS(ui
, d
, 64, u
)
4198 /* floating point conversion */
4199 float64
VFP_HELPER(fcvtd
, s
)(float32 x
, CPUARMState
*env
)
4201 float64 r
= float32_to_float64(x
, &env
->vfp
.fp_status
);
4202 /* ARM requires that S<->D conversion of any kind of NaN generates
4203 * a quiet NaN by forcing the most significant frac bit to 1.
4205 return float64_maybe_silence_nan(r
);
4208 float32
VFP_HELPER(fcvts
, d
)(float64 x
, CPUARMState
*env
)
4210 float32 r
= float64_to_float32(x
, &env
->vfp
.fp_status
);
4211 /* ARM requires that S<->D conversion of any kind of NaN generates
4212 * a quiet NaN by forcing the most significant frac bit to 1.
4214 return float32_maybe_silence_nan(r
);
4217 /* VFP3 fixed point conversion. */
4218 #define VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \
4219 float##fsz HELPER(vfp_##name##to##p)(uint##isz##_t x, uint32_t shift, \
4222 float_status *fpst = fpstp; \
4224 tmp = itype##_to_##float##fsz(x, fpst); \
4225 return float##fsz##_scalbn(tmp, -(int)shift, fpst); \
4228 /* Notice that we want only input-denormal exception flags from the
4229 * scalbn operation: the other possible flags (overflow+inexact if
4230 * we overflow to infinity, output-denormal) aren't correct for the
4231 * complete scale-and-convert operation.
4233 #define VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, round) \
4234 uint##isz##_t HELPER(vfp_to##name##p##round)(float##fsz x, \
4238 float_status *fpst = fpstp; \
4239 int old_exc_flags = get_float_exception_flags(fpst); \
4241 if (float##fsz##_is_any_nan(x)) { \
4242 float_raise(float_flag_invalid, fpst); \
4245 tmp = float##fsz##_scalbn(x, shift, fpst); \
4246 old_exc_flags |= get_float_exception_flags(fpst) \
4247 & float_flag_input_denormal; \
4248 set_float_exception_flags(old_exc_flags, fpst); \
4249 return float##fsz##_to_##itype##round(tmp, fpst); \
4252 #define VFP_CONV_FIX(name, p, fsz, isz, itype) \
4253 VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \
4254 VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, _round_to_zero) \
4255 VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, )
4257 #define VFP_CONV_FIX_A64(name, p, fsz, isz, itype) \
4258 VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \
4259 VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, )
4261 VFP_CONV_FIX(sh
, d
, 64, 64, int16
)
4262 VFP_CONV_FIX(sl
, d
, 64, 64, int32
)
4263 VFP_CONV_FIX_A64(sq
, d
, 64, 64, int64
)
4264 VFP_CONV_FIX(uh
, d
, 64, 64, uint16
)
4265 VFP_CONV_FIX(ul
, d
, 64, 64, uint32
)
4266 VFP_CONV_FIX_A64(uq
, d
, 64, 64, uint64
)
4267 VFP_CONV_FIX(sh
, s
, 32, 32, int16
)
4268 VFP_CONV_FIX(sl
, s
, 32, 32, int32
)
4269 VFP_CONV_FIX_A64(sq
, s
, 32, 64, int64
)
4270 VFP_CONV_FIX(uh
, s
, 32, 32, uint16
)
4271 VFP_CONV_FIX(ul
, s
, 32, 32, uint32
)
4272 VFP_CONV_FIX_A64(uq
, s
, 32, 64, uint64
)
4274 #undef VFP_CONV_FIX_FLOAT
4275 #undef VFP_CONV_FLOAT_FIX_ROUND
4277 /* Set the current fp rounding mode and return the old one.
4278 * The argument is a softfloat float_round_ value.
4280 uint32_t HELPER(set_rmode
)(uint32_t rmode
, CPUARMState
*env
)
4282 float_status
*fp_status
= &env
->vfp
.fp_status
;
4284 uint32_t prev_rmode
= get_float_rounding_mode(fp_status
);
4285 set_float_rounding_mode(rmode
, fp_status
);
4290 /* Set the current fp rounding mode in the standard fp status and return
4291 * the old one. This is for NEON instructions that need to change the
4292 * rounding mode but wish to use the standard FPSCR values for everything
4293 * else. Always set the rounding mode back to the correct value after
4295 * The argument is a softfloat float_round_ value.
4297 uint32_t HELPER(set_neon_rmode
)(uint32_t rmode
, CPUARMState
*env
)
4299 float_status
*fp_status
= &env
->vfp
.standard_fp_status
;
4301 uint32_t prev_rmode
= get_float_rounding_mode(fp_status
);
4302 set_float_rounding_mode(rmode
, fp_status
);
4307 /* Half precision conversions. */
4308 static float32
do_fcvt_f16_to_f32(uint32_t a
, CPUARMState
*env
, float_status
*s
)
4310 int ieee
= (env
->vfp
.xregs
[ARM_VFP_FPSCR
] & (1 << 26)) == 0;
4311 float32 r
= float16_to_float32(make_float16(a
), ieee
, s
);
4313 return float32_maybe_silence_nan(r
);
4318 static uint32_t do_fcvt_f32_to_f16(float32 a
, CPUARMState
*env
, float_status
*s
)
4320 int ieee
= (env
->vfp
.xregs
[ARM_VFP_FPSCR
] & (1 << 26)) == 0;
4321 float16 r
= float32_to_float16(a
, ieee
, s
);
4323 r
= float16_maybe_silence_nan(r
);
4325 return float16_val(r
);
4328 float32
HELPER(neon_fcvt_f16_to_f32
)(uint32_t a
, CPUARMState
*env
)
4330 return do_fcvt_f16_to_f32(a
, env
, &env
->vfp
.standard_fp_status
);
4333 uint32_t HELPER(neon_fcvt_f32_to_f16
)(float32 a
, CPUARMState
*env
)
4335 return do_fcvt_f32_to_f16(a
, env
, &env
->vfp
.standard_fp_status
);
4338 float32
HELPER(vfp_fcvt_f16_to_f32
)(uint32_t a
, CPUARMState
*env
)
4340 return do_fcvt_f16_to_f32(a
, env
, &env
->vfp
.fp_status
);
4343 uint32_t HELPER(vfp_fcvt_f32_to_f16
)(float32 a
, CPUARMState
*env
)
4345 return do_fcvt_f32_to_f16(a
, env
, &env
->vfp
.fp_status
);
4348 float64
HELPER(vfp_fcvt_f16_to_f64
)(uint32_t a
, CPUARMState
*env
)
4350 int ieee
= (env
->vfp
.xregs
[ARM_VFP_FPSCR
] & (1 << 26)) == 0;
4351 float64 r
= float16_to_float64(make_float16(a
), ieee
, &env
->vfp
.fp_status
);
4353 return float64_maybe_silence_nan(r
);
4358 uint32_t HELPER(vfp_fcvt_f64_to_f16
)(float64 a
, CPUARMState
*env
)
4360 int ieee
= (env
->vfp
.xregs
[ARM_VFP_FPSCR
] & (1 << 26)) == 0;
4361 float16 r
= float64_to_float16(a
, ieee
, &env
->vfp
.fp_status
);
4363 r
= float16_maybe_silence_nan(r
);
4365 return float16_val(r
);
4368 #define float32_two make_float32(0x40000000)
4369 #define float32_three make_float32(0x40400000)
4370 #define float32_one_point_five make_float32(0x3fc00000)
4372 float32
HELPER(recps_f32
)(float32 a
, float32 b
, CPUARMState
*env
)
4374 float_status
*s
= &env
->vfp
.standard_fp_status
;
4375 if ((float32_is_infinity(a
) && float32_is_zero_or_denormal(b
)) ||
4376 (float32_is_infinity(b
) && float32_is_zero_or_denormal(a
))) {
4377 if (!(float32_is_zero(a
) || float32_is_zero(b
))) {
4378 float_raise(float_flag_input_denormal
, s
);
4382 return float32_sub(float32_two
, float32_mul(a
, b
, s
), s
);
4385 float32
HELPER(rsqrts_f32
)(float32 a
, float32 b
, CPUARMState
*env
)
4387 float_status
*s
= &env
->vfp
.standard_fp_status
;
4389 if ((float32_is_infinity(a
) && float32_is_zero_or_denormal(b
)) ||
4390 (float32_is_infinity(b
) && float32_is_zero_or_denormal(a
))) {
4391 if (!(float32_is_zero(a
) || float32_is_zero(b
))) {
4392 float_raise(float_flag_input_denormal
, s
);
4394 return float32_one_point_five
;
4396 product
= float32_mul(a
, b
, s
);
4397 return float32_div(float32_sub(float32_three
, product
, s
), float32_two
, s
);
4402 /* Constants 256 and 512 are used in some helpers; we avoid relying on
4403 * int->float conversions at run-time. */
4404 #define float64_256 make_float64(0x4070000000000000LL)
4405 #define float64_512 make_float64(0x4080000000000000LL)
4407 /* The algorithm that must be used to calculate the estimate
4408 * is specified by the ARM ARM.
4410 static float64
recip_estimate(float64 a
, CPUARMState
*env
)
4412 /* These calculations mustn't set any fp exception flags,
4413 * so we use a local copy of the fp_status.
4415 float_status dummy_status
= env
->vfp
.standard_fp_status
;
4416 float_status
*s
= &dummy_status
;
4417 /* q = (int)(a * 512.0) */
4418 float64 q
= float64_mul(float64_512
, a
, s
);
4419 int64_t q_int
= float64_to_int64_round_to_zero(q
, s
);
4421 /* r = 1.0 / (((double)q + 0.5) / 512.0) */
4422 q
= int64_to_float64(q_int
, s
);
4423 q
= float64_add(q
, float64_half
, s
);
4424 q
= float64_div(q
, float64_512
, s
);
4425 q
= float64_div(float64_one
, q
, s
);
4427 /* s = (int)(256.0 * r + 0.5) */
4428 q
= float64_mul(q
, float64_256
, s
);
4429 q
= float64_add(q
, float64_half
, s
);
4430 q_int
= float64_to_int64_round_to_zero(q
, s
);
4432 /* return (double)s / 256.0 */
4433 return float64_div(int64_to_float64(q_int
, s
), float64_256
, s
);
4436 float32
HELPER(recpe_f32
)(float32 a
, CPUARMState
*env
)
4438 float_status
*s
= &env
->vfp
.standard_fp_status
;
4440 uint32_t val32
= float32_val(a
);
4443 int a_exp
= (val32
& 0x7f800000) >> 23;
4444 int sign
= val32
& 0x80000000;
4446 if (float32_is_any_nan(a
)) {
4447 if (float32_is_signaling_nan(a
)) {
4448 float_raise(float_flag_invalid
, s
);
4450 return float32_default_nan
;
4451 } else if (float32_is_infinity(a
)) {
4452 return float32_set_sign(float32_zero
, float32_is_neg(a
));
4453 } else if (float32_is_zero_or_denormal(a
)) {
4454 if (!float32_is_zero(a
)) {
4455 float_raise(float_flag_input_denormal
, s
);
4457 float_raise(float_flag_divbyzero
, s
);
4458 return float32_set_sign(float32_infinity
, float32_is_neg(a
));
4459 } else if (a_exp
>= 253) {
4460 float_raise(float_flag_underflow
, s
);
4461 return float32_set_sign(float32_zero
, float32_is_neg(a
));
4464 f64
= make_float64((0x3feULL
<< 52)
4465 | ((int64_t)(val32
& 0x7fffff) << 29));
4467 result_exp
= 253 - a_exp
;
4469 f64
= recip_estimate(f64
, env
);
4472 | ((result_exp
& 0xff) << 23)
4473 | ((float64_val(f64
) >> 29) & 0x7fffff);
4474 return make_float32(val32
);
4477 /* The algorithm that must be used to calculate the estimate
4478 * is specified by the ARM ARM.
4480 static float64
recip_sqrt_estimate(float64 a
, CPUARMState
*env
)
4482 /* These calculations mustn't set any fp exception flags,
4483 * so we use a local copy of the fp_status.
4485 float_status dummy_status
= env
->vfp
.standard_fp_status
;
4486 float_status
*s
= &dummy_status
;
4490 if (float64_lt(a
, float64_half
, s
)) {
4491 /* range 0.25 <= a < 0.5 */
4493 /* a in units of 1/512 rounded down */
4494 /* q0 = (int)(a * 512.0); */
4495 q
= float64_mul(float64_512
, a
, s
);
4496 q_int
= float64_to_int64_round_to_zero(q
, s
);
4498 /* reciprocal root r */
4499 /* r = 1.0 / sqrt(((double)q0 + 0.5) / 512.0); */
4500 q
= int64_to_float64(q_int
, s
);
4501 q
= float64_add(q
, float64_half
, s
);
4502 q
= float64_div(q
, float64_512
, s
);
4503 q
= float64_sqrt(q
, s
);
4504 q
= float64_div(float64_one
, q
, s
);
4506 /* range 0.5 <= a < 1.0 */
4508 /* a in units of 1/256 rounded down */
4509 /* q1 = (int)(a * 256.0); */
4510 q
= float64_mul(float64_256
, a
, s
);
4511 int64_t q_int
= float64_to_int64_round_to_zero(q
, s
);
4513 /* reciprocal root r */
4514 /* r = 1.0 /sqrt(((double)q1 + 0.5) / 256); */
4515 q
= int64_to_float64(q_int
, s
);
4516 q
= float64_add(q
, float64_half
, s
);
4517 q
= float64_div(q
, float64_256
, s
);
4518 q
= float64_sqrt(q
, s
);
4519 q
= float64_div(float64_one
, q
, s
);
4521 /* r in units of 1/256 rounded to nearest */
4522 /* s = (int)(256.0 * r + 0.5); */
4524 q
= float64_mul(q
, float64_256
,s
);
4525 q
= float64_add(q
, float64_half
, s
);
4526 q_int
= float64_to_int64_round_to_zero(q
, s
);
4528 /* return (double)s / 256.0;*/
4529 return float64_div(int64_to_float64(q_int
, s
), float64_256
, s
);
4532 float32
HELPER(rsqrte_f32
)(float32 a
, CPUARMState
*env
)
4534 float_status
*s
= &env
->vfp
.standard_fp_status
;
4540 val
= float32_val(a
);
4542 if (float32_is_any_nan(a
)) {
4543 if (float32_is_signaling_nan(a
)) {
4544 float_raise(float_flag_invalid
, s
);
4546 return float32_default_nan
;
4547 } else if (float32_is_zero_or_denormal(a
)) {
4548 if (!float32_is_zero(a
)) {
4549 float_raise(float_flag_input_denormal
, s
);
4551 float_raise(float_flag_divbyzero
, s
);
4552 return float32_set_sign(float32_infinity
, float32_is_neg(a
));
4553 } else if (float32_is_neg(a
)) {
4554 float_raise(float_flag_invalid
, s
);
4555 return float32_default_nan
;
4556 } else if (float32_is_infinity(a
)) {
4557 return float32_zero
;
4560 /* Normalize to a double-precision value between 0.25 and 1.0,
4561 * preserving the parity of the exponent. */
4562 if ((val
& 0x800000) == 0) {
4563 f64
= make_float64(((uint64_t)(val
& 0x80000000) << 32)
4565 | ((uint64_t)(val
& 0x7fffff) << 29));
4567 f64
= make_float64(((uint64_t)(val
& 0x80000000) << 32)
4569 | ((uint64_t)(val
& 0x7fffff) << 29));
4572 result_exp
= (380 - ((val
& 0x7f800000) >> 23)) / 2;
4574 f64
= recip_sqrt_estimate(f64
, env
);
4576 val64
= float64_val(f64
);
4578 val
= ((result_exp
& 0xff) << 23)
4579 | ((val64
>> 29) & 0x7fffff);
4580 return make_float32(val
);
4583 uint32_t HELPER(recpe_u32
)(uint32_t a
, CPUARMState
*env
)
4587 if ((a
& 0x80000000) == 0) {
4591 f64
= make_float64((0x3feULL
<< 52)
4592 | ((int64_t)(a
& 0x7fffffff) << 21));
4594 f64
= recip_estimate (f64
, env
);
4596 return 0x80000000 | ((float64_val(f64
) >> 21) & 0x7fffffff);
4599 uint32_t HELPER(rsqrte_u32
)(uint32_t a
, CPUARMState
*env
)
4603 if ((a
& 0xc0000000) == 0) {
4607 if (a
& 0x80000000) {
4608 f64
= make_float64((0x3feULL
<< 52)
4609 | ((uint64_t)(a
& 0x7fffffff) << 21));
4610 } else { /* bits 31-30 == '01' */
4611 f64
= make_float64((0x3fdULL
<< 52)
4612 | ((uint64_t)(a
& 0x3fffffff) << 22));
4615 f64
= recip_sqrt_estimate(f64
, env
);
4617 return 0x80000000 | ((float64_val(f64
) >> 21) & 0x7fffffff);
4620 /* VFPv4 fused multiply-accumulate */
4621 float32
VFP_HELPER(muladd
, s
)(float32 a
, float32 b
, float32 c
, void *fpstp
)
4623 float_status
*fpst
= fpstp
;
4624 return float32_muladd(a
, b
, c
, 0, fpst
);
4627 float64
VFP_HELPER(muladd
, d
)(float64 a
, float64 b
, float64 c
, void *fpstp
)
4629 float_status
*fpst
= fpstp
;
4630 return float64_muladd(a
, b
, c
, 0, fpst
);
4633 /* ARMv8 round to integral */
4634 float32
HELPER(rints_exact
)(float32 x
, void *fp_status
)
4636 return float32_round_to_int(x
, fp_status
);
4639 float64
HELPER(rintd_exact
)(float64 x
, void *fp_status
)
4641 return float64_round_to_int(x
, fp_status
);
4644 float32
HELPER(rints
)(float32 x
, void *fp_status
)
4646 int old_flags
= get_float_exception_flags(fp_status
), new_flags
;
4649 ret
= float32_round_to_int(x
, fp_status
);
4651 /* Suppress any inexact exceptions the conversion produced */
4652 if (!(old_flags
& float_flag_inexact
)) {
4653 new_flags
= get_float_exception_flags(fp_status
);
4654 set_float_exception_flags(new_flags
& ~float_flag_inexact
, fp_status
);
4660 float64
HELPER(rintd
)(float64 x
, void *fp_status
)
4662 int old_flags
= get_float_exception_flags(fp_status
), new_flags
;
4665 ret
= float64_round_to_int(x
, fp_status
);
4667 new_flags
= get_float_exception_flags(fp_status
);
4669 /* Suppress any inexact exceptions the conversion produced */
4670 if (!(old_flags
& float_flag_inexact
)) {
4671 new_flags
= get_float_exception_flags(fp_status
);
4672 set_float_exception_flags(new_flags
& ~float_flag_inexact
, fp_status
);
4678 /* Convert ARM rounding mode to softfloat */
4679 int arm_rmode_to_sf(int rmode
)
4682 case FPROUNDING_TIEAWAY
:
4683 rmode
= float_round_ties_away
;
4685 case FPROUNDING_ODD
:
4686 /* FIXME: add support for TIEAWAY and ODD */
4687 qemu_log_mask(LOG_UNIMP
, "arm: unimplemented rounding mode: %d\n",
4689 case FPROUNDING_TIEEVEN
:
4691 rmode
= float_round_nearest_even
;
4693 case FPROUNDING_POSINF
:
4694 rmode
= float_round_up
;
4696 case FPROUNDING_NEGINF
:
4697 rmode
= float_round_down
;
4699 case FPROUNDING_ZERO
:
4700 rmode
= float_round_to_zero
;