2 #include "exec/gdbstub.h"
4 #include "qemu/host-utils.h"
5 #include "sysemu/arch_init.h"
6 #include "sysemu/sysemu.h"
7 #include "qemu/bitops.h"
8 #include "qemu/crc32c.h"
9 #include <zlib.h> /* For crc32 */
11 #ifndef CONFIG_USER_ONLY
12 static inline int get_phys_addr(CPUARMState
*env
, uint32_t address
,
13 int access_type
, int is_user
,
14 hwaddr
*phys_ptr
, int *prot
,
15 target_ulong
*page_size
);
17 /* Definitions for the PMCCNTR and PMCR registers */
23 static int vfp_gdb_get_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
27 /* VFP data registers are always little-endian. */
28 nregs
= arm_feature(env
, ARM_FEATURE_VFP3
) ? 32 : 16;
30 stfq_le_p(buf
, env
->vfp
.regs
[reg
]);
33 if (arm_feature(env
, ARM_FEATURE_NEON
)) {
34 /* Aliases for Q regs. */
37 stfq_le_p(buf
, env
->vfp
.regs
[(reg
- 32) * 2]);
38 stfq_le_p(buf
+ 8, env
->vfp
.regs
[(reg
- 32) * 2 + 1]);
42 switch (reg
- nregs
) {
43 case 0: stl_p(buf
, env
->vfp
.xregs
[ARM_VFP_FPSID
]); return 4;
44 case 1: stl_p(buf
, env
->vfp
.xregs
[ARM_VFP_FPSCR
]); return 4;
45 case 2: stl_p(buf
, env
->vfp
.xregs
[ARM_VFP_FPEXC
]); return 4;
50 static int vfp_gdb_set_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
54 nregs
= arm_feature(env
, ARM_FEATURE_VFP3
) ? 32 : 16;
56 env
->vfp
.regs
[reg
] = ldfq_le_p(buf
);
59 if (arm_feature(env
, ARM_FEATURE_NEON
)) {
62 env
->vfp
.regs
[(reg
- 32) * 2] = ldfq_le_p(buf
);
63 env
->vfp
.regs
[(reg
- 32) * 2 + 1] = ldfq_le_p(buf
+ 8);
67 switch (reg
- nregs
) {
68 case 0: env
->vfp
.xregs
[ARM_VFP_FPSID
] = ldl_p(buf
); return 4;
69 case 1: env
->vfp
.xregs
[ARM_VFP_FPSCR
] = ldl_p(buf
); return 4;
70 case 2: env
->vfp
.xregs
[ARM_VFP_FPEXC
] = ldl_p(buf
) & (1 << 30); return 4;
75 static int aarch64_fpu_gdb_get_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
79 /* 128 bit FP register */
80 stfq_le_p(buf
, env
->vfp
.regs
[reg
* 2]);
81 stfq_le_p(buf
+ 8, env
->vfp
.regs
[reg
* 2 + 1]);
85 stl_p(buf
, vfp_get_fpsr(env
));
89 stl_p(buf
, vfp_get_fpcr(env
));
96 static int aarch64_fpu_gdb_set_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
100 /* 128 bit FP register */
101 env
->vfp
.regs
[reg
* 2] = ldfq_le_p(buf
);
102 env
->vfp
.regs
[reg
* 2 + 1] = ldfq_le_p(buf
+ 8);
106 vfp_set_fpsr(env
, ldl_p(buf
));
110 vfp_set_fpcr(env
, ldl_p(buf
));
117 static uint64_t raw_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
119 if (cpreg_field_is_64bit(ri
)) {
120 return CPREG_FIELD64(env
, ri
);
122 return CPREG_FIELD32(env
, ri
);
126 static void raw_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
129 if (cpreg_field_is_64bit(ri
)) {
130 CPREG_FIELD64(env
, ri
) = value
;
132 CPREG_FIELD32(env
, ri
) = value
;
136 static uint64_t read_raw_cp_reg(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
138 /* Raw read of a coprocessor register (as needed for migration, etc). */
139 if (ri
->type
& ARM_CP_CONST
) {
140 return ri
->resetvalue
;
141 } else if (ri
->raw_readfn
) {
142 return ri
->raw_readfn(env
, ri
);
143 } else if (ri
->readfn
) {
144 return ri
->readfn(env
, ri
);
146 return raw_read(env
, ri
);
150 static void write_raw_cp_reg(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
153 /* Raw write of a coprocessor register (as needed for migration, etc).
154 * Note that constant registers are treated as write-ignored; the
155 * caller should check for success by whether a readback gives the
158 if (ri
->type
& ARM_CP_CONST
) {
160 } else if (ri
->raw_writefn
) {
161 ri
->raw_writefn(env
, ri
, v
);
162 } else if (ri
->writefn
) {
163 ri
->writefn(env
, ri
, v
);
165 raw_write(env
, ri
, v
);
169 bool write_cpustate_to_list(ARMCPU
*cpu
)
171 /* Write the coprocessor state from cpu->env to the (index,value) list. */
175 for (i
= 0; i
< cpu
->cpreg_array_len
; i
++) {
176 uint32_t regidx
= kvm_to_cpreg_id(cpu
->cpreg_indexes
[i
]);
177 const ARMCPRegInfo
*ri
;
179 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
184 if (ri
->type
& ARM_CP_NO_MIGRATE
) {
187 cpu
->cpreg_values
[i
] = read_raw_cp_reg(&cpu
->env
, ri
);
192 bool write_list_to_cpustate(ARMCPU
*cpu
)
197 for (i
= 0; i
< cpu
->cpreg_array_len
; i
++) {
198 uint32_t regidx
= kvm_to_cpreg_id(cpu
->cpreg_indexes
[i
]);
199 uint64_t v
= cpu
->cpreg_values
[i
];
200 const ARMCPRegInfo
*ri
;
202 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
207 if (ri
->type
& ARM_CP_NO_MIGRATE
) {
210 /* Write value and confirm it reads back as written
211 * (to catch read-only registers and partially read-only
212 * registers where the incoming migration value doesn't match)
214 write_raw_cp_reg(&cpu
->env
, ri
, v
);
215 if (read_raw_cp_reg(&cpu
->env
, ri
) != v
) {
222 static void add_cpreg_to_list(gpointer key
, gpointer opaque
)
224 ARMCPU
*cpu
= opaque
;
226 const ARMCPRegInfo
*ri
;
228 regidx
= *(uint32_t *)key
;
229 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
231 if (!(ri
->type
& ARM_CP_NO_MIGRATE
)) {
232 cpu
->cpreg_indexes
[cpu
->cpreg_array_len
] = cpreg_to_kvm_id(regidx
);
233 /* The value array need not be initialized at this point */
234 cpu
->cpreg_array_len
++;
238 static void count_cpreg(gpointer key
, gpointer opaque
)
240 ARMCPU
*cpu
= opaque
;
242 const ARMCPRegInfo
*ri
;
244 regidx
= *(uint32_t *)key
;
245 ri
= get_arm_cp_reginfo(cpu
->cp_regs
, regidx
);
247 if (!(ri
->type
& ARM_CP_NO_MIGRATE
)) {
248 cpu
->cpreg_array_len
++;
252 static gint
cpreg_key_compare(gconstpointer a
, gconstpointer b
)
254 uint64_t aidx
= cpreg_to_kvm_id(*(uint32_t *)a
);
255 uint64_t bidx
= cpreg_to_kvm_id(*(uint32_t *)b
);
266 static void cpreg_make_keylist(gpointer key
, gpointer value
, gpointer udata
)
268 GList
**plist
= udata
;
270 *plist
= g_list_prepend(*plist
, key
);
273 void init_cpreg_list(ARMCPU
*cpu
)
275 /* Initialise the cpreg_tuples[] array based on the cp_regs hash.
276 * Note that we require cpreg_tuples[] to be sorted by key ID.
281 g_hash_table_foreach(cpu
->cp_regs
, cpreg_make_keylist
, &keys
);
283 keys
= g_list_sort(keys
, cpreg_key_compare
);
285 cpu
->cpreg_array_len
= 0;
287 g_list_foreach(keys
, count_cpreg
, cpu
);
289 arraylen
= cpu
->cpreg_array_len
;
290 cpu
->cpreg_indexes
= g_new(uint64_t, arraylen
);
291 cpu
->cpreg_values
= g_new(uint64_t, arraylen
);
292 cpu
->cpreg_vmstate_indexes
= g_new(uint64_t, arraylen
);
293 cpu
->cpreg_vmstate_values
= g_new(uint64_t, arraylen
);
294 cpu
->cpreg_vmstate_array_len
= cpu
->cpreg_array_len
;
295 cpu
->cpreg_array_len
= 0;
297 g_list_foreach(keys
, add_cpreg_to_list
, cpu
);
299 assert(cpu
->cpreg_array_len
== arraylen
);
304 static void dacr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
306 env
->cp15
.c3
= value
;
307 tlb_flush(env
, 1); /* Flush TLB as domain not tracked in TLB */
310 static void fcse_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
312 if (env
->cp15
.c13_fcse
!= value
) {
313 /* Unlike real hardware the qemu TLB uses virtual addresses,
314 * not modified virtual addresses, so this causes a TLB flush.
317 env
->cp15
.c13_fcse
= value
;
321 static void contextidr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
324 if (env
->cp15
.c13_context
!= value
&& !arm_feature(env
, ARM_FEATURE_MPU
)) {
325 /* For VMSA (when not using the LPAE long descriptor page table
326 * format) this register includes the ASID, so do a TLB flush.
327 * For PMSA it is purely a process ID and no action is needed.
331 env
->cp15
.c13_context
= value
;
334 static void tlbiall_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
337 /* Invalidate all (TLBIALL) */
341 static void tlbimva_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
344 /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
345 ARMCPU
*cpu
= arm_env_get_cpu(env
);
347 tlb_flush_page(CPU(cpu
), value
& TARGET_PAGE_MASK
);
350 static void tlbiasid_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
353 /* Invalidate by ASID (TLBIASID) */
354 tlb_flush(env
, value
== 0);
357 static void tlbimvaa_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
360 /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
361 ARMCPU
*cpu
= arm_env_get_cpu(env
);
363 tlb_flush_page(CPU(cpu
), value
& TARGET_PAGE_MASK
);
366 static const ARMCPRegInfo cp_reginfo
[] = {
367 /* DBGDIDR: just RAZ. In particular this means the "debug architecture
368 * version" bits will read as a reserved value, which should cause
369 * Linux to not try to use the debug hardware.
371 { .name
= "DBGDIDR", .cp
= 14, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 0,
372 .access
= PL0_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
373 /* MMU Domain access control / MPU write buffer control */
374 { .name
= "DACR", .cp
= 15,
375 .crn
= 3, .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
,
376 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c3
),
377 .resetvalue
= 0, .writefn
= dacr_write
, .raw_writefn
= raw_write
, },
378 { .name
= "FCSEIDR", .cp
= 15, .crn
= 13, .crm
= 0, .opc1
= 0, .opc2
= 0,
379 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c13_fcse
),
380 .resetvalue
= 0, .writefn
= fcse_write
, .raw_writefn
= raw_write
, },
381 { .name
= "CONTEXTIDR", .cp
= 15, .crn
= 13, .crm
= 0, .opc1
= 0, .opc2
= 1,
382 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c13_context
),
383 .resetvalue
= 0, .writefn
= contextidr_write
, .raw_writefn
= raw_write
, },
384 /* ??? This covers not just the impdef TLB lockdown registers but also
385 * some v7VMSA registers relating to TEX remap, so it is overly broad.
387 { .name
= "TLB_LOCKDOWN", .cp
= 15, .crn
= 10, .crm
= CP_ANY
,
388 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_NOP
},
389 /* MMU TLB control. Note that the wildcarding means we cover not just
390 * the unified TLB ops but also the dside/iside/inner-shareable variants.
392 { .name
= "TLBIALL", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
393 .opc1
= CP_ANY
, .opc2
= 0, .access
= PL1_W
, .writefn
= tlbiall_write
,
394 .type
= ARM_CP_NO_MIGRATE
},
395 { .name
= "TLBIMVA", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
396 .opc1
= CP_ANY
, .opc2
= 1, .access
= PL1_W
, .writefn
= tlbimva_write
,
397 .type
= ARM_CP_NO_MIGRATE
},
398 { .name
= "TLBIASID", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
399 .opc1
= CP_ANY
, .opc2
= 2, .access
= PL1_W
, .writefn
= tlbiasid_write
,
400 .type
= ARM_CP_NO_MIGRATE
},
401 { .name
= "TLBIMVAA", .cp
= 15, .crn
= 8, .crm
= CP_ANY
,
402 .opc1
= CP_ANY
, .opc2
= 3, .access
= PL1_W
, .writefn
= tlbimvaa_write
,
403 .type
= ARM_CP_NO_MIGRATE
},
404 /* Cache maintenance ops; some of this space may be overridden later. */
405 { .name
= "CACHEMAINT", .cp
= 15, .crn
= 7, .crm
= CP_ANY
,
406 .opc1
= 0, .opc2
= CP_ANY
, .access
= PL1_W
,
407 .type
= ARM_CP_NOP
| ARM_CP_OVERRIDE
},
411 static const ARMCPRegInfo not_v6_cp_reginfo
[] = {
412 /* Not all pre-v6 cores implemented this WFI, so this is slightly
415 { .name
= "WFI_v5", .cp
= 15, .crn
= 7, .crm
= 8, .opc1
= 0, .opc2
= 2,
416 .access
= PL1_W
, .type
= ARM_CP_WFI
},
420 static const ARMCPRegInfo not_v7_cp_reginfo
[] = {
421 /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
422 * is UNPREDICTABLE; we choose to NOP as most implementations do).
424 { .name
= "WFI_v6", .cp
= 15, .crn
= 7, .crm
= 0, .opc1
= 0, .opc2
= 4,
425 .access
= PL1_W
, .type
= ARM_CP_WFI
},
426 /* L1 cache lockdown. Not architectural in v6 and earlier but in practice
427 * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and
428 * OMAPCP will override this space.
430 { .name
= "DLOCKDOWN", .cp
= 15, .crn
= 9, .crm
= 0, .opc1
= 0, .opc2
= 0,
431 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_data
),
433 { .name
= "ILOCKDOWN", .cp
= 15, .crn
= 9, .crm
= 0, .opc1
= 0, .opc2
= 1,
434 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_insn
),
436 /* v6 doesn't have the cache ID registers but Linux reads them anyway */
437 { .name
= "DUMMY", .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= CP_ANY
,
438 .access
= PL1_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_MIGRATE
,
443 static void cpacr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
446 if (env
->cp15
.c1_coproc
!= value
) {
447 env
->cp15
.c1_coproc
= value
;
448 /* ??? Is this safe when called from within a TB? */
453 static const ARMCPRegInfo v6_cp_reginfo
[] = {
454 /* prefetch by MVA in v6, NOP in v7 */
455 { .name
= "MVA_prefetch",
456 .cp
= 15, .crn
= 7, .crm
= 13, .opc1
= 0, .opc2
= 1,
457 .access
= PL1_W
, .type
= ARM_CP_NOP
},
458 { .name
= "ISB", .cp
= 15, .crn
= 7, .crm
= 5, .opc1
= 0, .opc2
= 4,
459 .access
= PL0_W
, .type
= ARM_CP_NOP
},
460 { .name
= "DSB", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 4,
461 .access
= PL0_W
, .type
= ARM_CP_NOP
},
462 { .name
= "DMB", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 5,
463 .access
= PL0_W
, .type
= ARM_CP_NOP
},
464 { .name
= "IFAR", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 2,
465 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_insn
),
467 /* Watchpoint Fault Address Register : should actually only be present
468 * for 1136, 1176, 11MPCore.
470 { .name
= "WFAR", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 1,
471 .access
= PL1_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0, },
472 { .name
= "CPACR", .state
= ARM_CP_STATE_BOTH
, .opc0
= 3,
473 .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 2,
474 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c1_coproc
),
475 .resetvalue
= 0, .writefn
= cpacr_write
},
479 static CPAccessResult
pmreg_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
481 /* Perfomance monitor registers user accessibility is controlled
484 if (arm_current_pl(env
) == 0 && !env
->cp15
.c9_pmuserenr
) {
485 return CP_ACCESS_TRAP
;
490 #ifndef CONFIG_USER_ONLY
491 static void pmcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
494 /* Don't computer the number of ticks in user mode */
497 temp_ticks
= qemu_clock_get_us(QEMU_CLOCK_VIRTUAL
) *
498 get_ticks_per_sec() / 1000000;
500 if (env
->cp15
.c9_pmcr
& PMCRE
) {
501 /* If the counter is enabled */
502 if (env
->cp15
.c9_pmcr
& PMCRD
) {
503 /* Increment once every 64 processor clock cycles */
504 env
->cp15
.c15_ccnt
= (temp_ticks
/64) - env
->cp15
.c15_ccnt
;
506 env
->cp15
.c15_ccnt
= temp_ticks
- env
->cp15
.c15_ccnt
;
511 /* The counter has been reset */
512 env
->cp15
.c15_ccnt
= 0;
515 /* only the DP, X, D and E bits are writable */
516 env
->cp15
.c9_pmcr
&= ~0x39;
517 env
->cp15
.c9_pmcr
|= (value
& 0x39);
519 if (env
->cp15
.c9_pmcr
& PMCRE
) {
520 if (env
->cp15
.c9_pmcr
& PMCRD
) {
521 /* Increment once every 64 processor clock cycles */
524 env
->cp15
.c15_ccnt
= temp_ticks
- env
->cp15
.c15_ccnt
;
528 static uint64_t pmccntr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
530 uint32_t total_ticks
;
532 if (!(env
->cp15
.c9_pmcr
& PMCRE
)) {
533 /* Counter is disabled, do not change value */
534 return env
->cp15
.c15_ccnt
;
537 total_ticks
= qemu_clock_get_us(QEMU_CLOCK_VIRTUAL
) *
538 get_ticks_per_sec() / 1000000;
540 if (env
->cp15
.c9_pmcr
& PMCRD
) {
541 /* Increment once every 64 processor clock cycles */
544 return total_ticks
- env
->cp15
.c15_ccnt
;
547 static void pmccntr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
550 uint32_t total_ticks
;
552 if (!(env
->cp15
.c9_pmcr
& PMCRE
)) {
553 /* Counter is disabled, set the absolute value */
554 env
->cp15
.c15_ccnt
= value
;
558 total_ticks
= qemu_clock_get_us(QEMU_CLOCK_VIRTUAL
) *
559 get_ticks_per_sec() / 1000000;
561 if (env
->cp15
.c9_pmcr
& PMCRD
) {
562 /* Increment once every 64 processor clock cycles */
565 env
->cp15
.c15_ccnt
= total_ticks
- value
;
569 static void pmcntenset_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
573 env
->cp15
.c9_pmcnten
|= value
;
576 static void pmcntenclr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
580 env
->cp15
.c9_pmcnten
&= ~value
;
583 static void pmovsr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
586 env
->cp15
.c9_pmovsr
&= ~value
;
589 static void pmxevtyper_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
592 env
->cp15
.c9_pmxevtyper
= value
& 0xff;
595 static void pmuserenr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
598 env
->cp15
.c9_pmuserenr
= value
& 1;
601 static void pmintenset_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
604 /* We have no event counters so only the C bit can be changed */
606 env
->cp15
.c9_pminten
|= value
;
609 static void pmintenclr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
613 env
->cp15
.c9_pminten
&= ~value
;
616 static void vbar_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
619 /* Note that even though the AArch64 view of this register has bits
620 * [10:0] all RES0 we can only mask the bottom 5, to comply with the
621 * architectural requirements for bits which are RES0 only in some
622 * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7
623 * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.)
625 env
->cp15
.c12_vbar
= value
& ~0x1Ful
;
628 static uint64_t ccsidr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
630 ARMCPU
*cpu
= arm_env_get_cpu(env
);
631 return cpu
->ccsidr
[env
->cp15
.c0_cssel
];
634 static void csselr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
637 env
->cp15
.c0_cssel
= value
& 0xf;
640 static const ARMCPRegInfo v7_cp_reginfo
[] = {
641 /* DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped
644 { .name
= "DBGDRAR", .cp
= 14, .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 0,
645 .access
= PL0_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
646 { .name
= "DBGDSAR", .cp
= 14, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 0,
647 .access
= PL0_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
648 /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
649 { .name
= "NOP", .cp
= 15, .crn
= 7, .crm
= 0, .opc1
= 0, .opc2
= 4,
650 .access
= PL1_W
, .type
= ARM_CP_NOP
},
651 /* Performance monitors are implementation defined in v7,
652 * but with an ARM recommended set of registers, which we
653 * follow (although we don't actually implement any counters)
655 * Performance registers fall into three categories:
656 * (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR)
657 * (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR)
658 * (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others)
659 * For the cases controlled by PMUSERENR we must set .access to PL0_RW
660 * or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
662 { .name
= "PMCNTENSET", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 1,
663 .access
= PL0_RW
, .resetvalue
= 0,
664 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmcnten
),
665 .writefn
= pmcntenset_write
,
666 .accessfn
= pmreg_access
,
667 .raw_writefn
= raw_write
},
668 { .name
= "PMCNTENCLR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 2,
669 .access
= PL0_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmcnten
),
670 .accessfn
= pmreg_access
,
671 .writefn
= pmcntenclr_write
,
672 .type
= ARM_CP_NO_MIGRATE
},
673 { .name
= "PMOVSR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 3,
674 .access
= PL0_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmovsr
),
675 .accessfn
= pmreg_access
,
676 .writefn
= pmovsr_write
,
677 .raw_writefn
= raw_write
},
678 /* Unimplemented so WI. */
679 { .name
= "PMSWINC", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 4,
680 .access
= PL0_W
, .accessfn
= pmreg_access
, .type
= ARM_CP_NOP
},
681 /* Since we don't implement any events, writing to PMSELR is UNPREDICTABLE.
682 * We choose to RAZ/WI.
684 { .name
= "PMSELR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 5,
685 .access
= PL0_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0,
686 .accessfn
= pmreg_access
},
687 #ifndef CONFIG_USER_ONLY
688 { .name
= "PMCCNTR", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 0,
689 .access
= PL0_RW
, .resetvalue
= 0, .type
= ARM_CP_IO
,
690 .readfn
= pmccntr_read
, .writefn
= pmccntr_write
,
691 .accessfn
= pmreg_access
},
693 { .name
= "PMXEVTYPER", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 1,
695 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmxevtyper
),
696 .accessfn
= pmreg_access
, .writefn
= pmxevtyper_write
,
697 .raw_writefn
= raw_write
},
698 /* Unimplemented, RAZ/WI. */
699 { .name
= "PMXEVCNTR", .cp
= 15, .crn
= 9, .crm
= 13, .opc1
= 0, .opc2
= 2,
700 .access
= PL0_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0,
701 .accessfn
= pmreg_access
},
702 { .name
= "PMUSERENR", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 0,
703 .access
= PL0_R
| PL1_RW
,
704 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmuserenr
),
706 .writefn
= pmuserenr_write
, .raw_writefn
= raw_write
},
707 { .name
= "PMINTENSET", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 1,
709 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pminten
),
711 .writefn
= pmintenset_write
, .raw_writefn
= raw_write
},
712 { .name
= "PMINTENCLR", .cp
= 15, .crn
= 9, .crm
= 14, .opc1
= 0, .opc2
= 2,
713 .access
= PL1_RW
, .type
= ARM_CP_NO_MIGRATE
,
714 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pminten
),
715 .resetvalue
= 0, .writefn
= pmintenclr_write
, },
716 { .name
= "VBAR", .state
= ARM_CP_STATE_BOTH
,
717 .opc0
= 3, .crn
= 12, .crm
= 0, .opc1
= 0, .opc2
= 0,
718 .access
= PL1_RW
, .writefn
= vbar_write
,
719 .fieldoffset
= offsetof(CPUARMState
, cp15
.c12_vbar
),
721 { .name
= "SCR", .cp
= 15, .crn
= 1, .crm
= 1, .opc1
= 0, .opc2
= 0,
722 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c1_scr
),
724 { .name
= "CCSIDR", .state
= ARM_CP_STATE_BOTH
,
725 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= 0,
726 .access
= PL1_R
, .readfn
= ccsidr_read
, .type
= ARM_CP_NO_MIGRATE
},
727 { .name
= "CSSELR", .state
= ARM_CP_STATE_BOTH
,
728 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 2, .opc2
= 0,
729 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c0_cssel
),
730 .writefn
= csselr_write
, .resetvalue
= 0 },
731 /* Auxiliary ID register: this actually has an IMPDEF value but for now
732 * just RAZ for all cores:
734 { .name
= "AIDR", .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= 7,
735 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
736 /* MAIR can just read-as-written because we don't implement caches
737 * and so don't need to care about memory attributes.
739 { .name
= "MAIR_EL1", .state
= ARM_CP_STATE_AA64
,
740 .opc0
= 3, .opc1
= 0, .crn
= 10, .crm
= 2, .opc2
= 0,
741 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.mair_el1
),
743 /* For non-long-descriptor page tables these are PRRR and NMRR;
744 * regardless they still act as reads-as-written for QEMU.
745 * The override is necessary because of the overly-broad TLB_LOCKDOWN
748 { .name
= "MAIR0", .state
= ARM_CP_STATE_AA32
, .type
= ARM_CP_OVERRIDE
,
749 .cp
= 15, .opc1
= 0, .crn
= 10, .crm
= 2, .opc2
= 0, .access
= PL1_RW
,
750 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.mair_el1
),
751 .resetfn
= arm_cp_reset_ignore
},
752 { .name
= "MAIR1", .state
= ARM_CP_STATE_AA32
, .type
= ARM_CP_OVERRIDE
,
753 .cp
= 15, .opc1
= 0, .crn
= 10, .crm
= 2, .opc2
= 1, .access
= PL1_RW
,
754 .fieldoffset
= offsetofhigh32(CPUARMState
, cp15
.mair_el1
),
755 .resetfn
= arm_cp_reset_ignore
},
759 static void teecr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
766 static CPAccessResult
teehbr_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
768 if (arm_current_pl(env
) == 0 && (env
->teecr
& 1)) {
769 return CP_ACCESS_TRAP
;
774 static const ARMCPRegInfo t2ee_cp_reginfo
[] = {
775 { .name
= "TEECR", .cp
= 14, .crn
= 0, .crm
= 0, .opc1
= 6, .opc2
= 0,
776 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, teecr
),
778 .writefn
= teecr_write
},
779 { .name
= "TEEHBR", .cp
= 14, .crn
= 1, .crm
= 0, .opc1
= 6, .opc2
= 0,
780 .access
= PL0_RW
, .fieldoffset
= offsetof(CPUARMState
, teehbr
),
781 .accessfn
= teehbr_access
, .resetvalue
= 0 },
785 static const ARMCPRegInfo v6k_cp_reginfo
[] = {
786 { .name
= "TPIDR_EL0", .state
= ARM_CP_STATE_AA64
,
787 .opc0
= 3, .opc1
= 3, .opc2
= 2, .crn
= 13, .crm
= 0,
789 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el0
), .resetvalue
= 0 },
790 { .name
= "TPIDRURW", .cp
= 15, .crn
= 13, .crm
= 0, .opc1
= 0, .opc2
= 2,
792 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.tpidr_el0
),
793 .resetfn
= arm_cp_reset_ignore
},
794 { .name
= "TPIDRRO_EL0", .state
= ARM_CP_STATE_AA64
,
795 .opc0
= 3, .opc1
= 3, .opc2
= 3, .crn
= 13, .crm
= 0,
796 .access
= PL0_R
|PL1_W
,
797 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidrro_el0
), .resetvalue
= 0 },
798 { .name
= "TPIDRURO", .cp
= 15, .crn
= 13, .crm
= 0, .opc1
= 0, .opc2
= 3,
799 .access
= PL0_R
|PL1_W
,
800 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.tpidrro_el0
),
801 .resetfn
= arm_cp_reset_ignore
},
802 { .name
= "TPIDR_EL1", .state
= ARM_CP_STATE_BOTH
,
803 .opc0
= 3, .opc1
= 0, .opc2
= 4, .crn
= 13, .crm
= 0,
805 .fieldoffset
= offsetof(CPUARMState
, cp15
.tpidr_el1
), .resetvalue
= 0 },
809 #ifndef CONFIG_USER_ONLY
811 static CPAccessResult
gt_cntfrq_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
813 /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero */
814 if (arm_current_pl(env
) == 0 && !extract32(env
->cp15
.c14_cntkctl
, 0, 2)) {
815 return CP_ACCESS_TRAP
;
820 static CPAccessResult
gt_counter_access(CPUARMState
*env
, int timeridx
)
822 /* CNT[PV]CT: not visible from PL0 if ELO[PV]CTEN is zero */
823 if (arm_current_pl(env
) == 0 &&
824 !extract32(env
->cp15
.c14_cntkctl
, timeridx
, 1)) {
825 return CP_ACCESS_TRAP
;
830 static CPAccessResult
gt_timer_access(CPUARMState
*env
, int timeridx
)
832 /* CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from PL0 if
833 * EL0[PV]TEN is zero.
835 if (arm_current_pl(env
) == 0 &&
836 !extract32(env
->cp15
.c14_cntkctl
, 9 - timeridx
, 1)) {
837 return CP_ACCESS_TRAP
;
842 static CPAccessResult
gt_pct_access(CPUARMState
*env
,
843 const ARMCPRegInfo
*ri
)
845 return gt_counter_access(env
, GTIMER_PHYS
);
848 static CPAccessResult
gt_vct_access(CPUARMState
*env
,
849 const ARMCPRegInfo
*ri
)
851 return gt_counter_access(env
, GTIMER_VIRT
);
854 static CPAccessResult
gt_ptimer_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
856 return gt_timer_access(env
, GTIMER_PHYS
);
859 static CPAccessResult
gt_vtimer_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
861 return gt_timer_access(env
, GTIMER_VIRT
);
864 static uint64_t gt_get_countervalue(CPUARMState
*env
)
866 return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) / GTIMER_SCALE
;
869 static void gt_recalc_timer(ARMCPU
*cpu
, int timeridx
)
871 ARMGenericTimer
*gt
= &cpu
->env
.cp15
.c14_timer
[timeridx
];
874 /* Timer enabled: calculate and set current ISTATUS, irq, and
875 * reset timer to when ISTATUS next has to change
877 uint64_t count
= gt_get_countervalue(&cpu
->env
);
878 /* Note that this must be unsigned 64 bit arithmetic: */
879 int istatus
= count
>= gt
->cval
;
882 gt
->ctl
= deposit32(gt
->ctl
, 2, 1, istatus
);
883 qemu_set_irq(cpu
->gt_timer_outputs
[timeridx
],
884 (istatus
&& !(gt
->ctl
& 2)));
886 /* Next transition is when count rolls back over to zero */
887 nexttick
= UINT64_MAX
;
889 /* Next transition is when we hit cval */
892 /* Note that the desired next expiry time might be beyond the
893 * signed-64-bit range of a QEMUTimer -- in this case we just
894 * set the timer for as far in the future as possible. When the
895 * timer expires we will reset the timer for any remaining period.
897 if (nexttick
> INT64_MAX
/ GTIMER_SCALE
) {
898 nexttick
= INT64_MAX
/ GTIMER_SCALE
;
900 timer_mod(cpu
->gt_timer
[timeridx
], nexttick
);
902 /* Timer disabled: ISTATUS and timer output always clear */
904 qemu_set_irq(cpu
->gt_timer_outputs
[timeridx
], 0);
905 timer_del(cpu
->gt_timer
[timeridx
]);
909 static void gt_cnt_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
911 ARMCPU
*cpu
= arm_env_get_cpu(env
);
912 int timeridx
= ri
->opc1
& 1;
914 timer_del(cpu
->gt_timer
[timeridx
]);
917 static uint64_t gt_cnt_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
919 return gt_get_countervalue(env
);
922 static void gt_cval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
925 int timeridx
= ri
->opc1
& 1;
927 env
->cp15
.c14_timer
[timeridx
].cval
= value
;
928 gt_recalc_timer(arm_env_get_cpu(env
), timeridx
);
931 static uint64_t gt_tval_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
933 int timeridx
= ri
->crm
& 1;
935 return (uint32_t)(env
->cp15
.c14_timer
[timeridx
].cval
-
936 gt_get_countervalue(env
));
939 static void gt_tval_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
942 int timeridx
= ri
->crm
& 1;
944 env
->cp15
.c14_timer
[timeridx
].cval
= gt_get_countervalue(env
) +
945 + sextract64(value
, 0, 32);
946 gt_recalc_timer(arm_env_get_cpu(env
), timeridx
);
949 static void gt_ctl_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
952 ARMCPU
*cpu
= arm_env_get_cpu(env
);
953 int timeridx
= ri
->crm
& 1;
954 uint32_t oldval
= env
->cp15
.c14_timer
[timeridx
].ctl
;
956 env
->cp15
.c14_timer
[timeridx
].ctl
= value
& 3;
957 if ((oldval
^ value
) & 1) {
959 gt_recalc_timer(cpu
, timeridx
);
960 } else if ((oldval
& value
) & 2) {
961 /* IMASK toggled: don't need to recalculate,
962 * just set the interrupt line based on ISTATUS
964 qemu_set_irq(cpu
->gt_timer_outputs
[timeridx
],
965 (oldval
& 4) && (value
& 2));
969 void arm_gt_ptimer_cb(void *opaque
)
971 ARMCPU
*cpu
= opaque
;
973 gt_recalc_timer(cpu
, GTIMER_PHYS
);
976 void arm_gt_vtimer_cb(void *opaque
)
978 ARMCPU
*cpu
= opaque
;
980 gt_recalc_timer(cpu
, GTIMER_VIRT
);
983 static const ARMCPRegInfo generic_timer_cp_reginfo
[] = {
984 /* Note that CNTFRQ is purely reads-as-written for the benefit
985 * of software; writing it doesn't actually change the timer frequency.
986 * Our reset value matches the fixed frequency we implement the timer at.
988 { .name
= "CNTFRQ", .cp
= 15, .crn
= 14, .crm
= 0, .opc1
= 0, .opc2
= 0,
989 .type
= ARM_CP_NO_MIGRATE
,
990 .access
= PL1_RW
| PL0_R
, .accessfn
= gt_cntfrq_access
,
991 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c14_cntfrq
),
992 .resetfn
= arm_cp_reset_ignore
,
994 { .name
= "CNTFRQ_EL0", .state
= ARM_CP_STATE_AA64
,
995 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 0,
996 .access
= PL1_RW
| PL0_R
, .accessfn
= gt_cntfrq_access
,
997 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_cntfrq
),
998 .resetvalue
= (1000 * 1000 * 1000) / GTIMER_SCALE
,
1000 /* overall control: mostly access permissions */
1001 { .name
= "CNTKCTL", .state
= ARM_CP_STATE_BOTH
,
1002 .opc0
= 3, .opc1
= 0, .crn
= 14, .crm
= 1, .opc2
= 0,
1004 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_cntkctl
),
1007 /* per-timer control */
1008 { .name
= "CNTP_CTL", .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 1,
1009 .type
= ARM_CP_IO
| ARM_CP_NO_MIGRATE
, .access
= PL1_RW
| PL0_R
,
1010 .accessfn
= gt_ptimer_access
,
1011 .fieldoffset
= offsetoflow32(CPUARMState
,
1012 cp15
.c14_timer
[GTIMER_PHYS
].ctl
),
1013 .resetfn
= arm_cp_reset_ignore
,
1014 .writefn
= gt_ctl_write
, .raw_writefn
= raw_write
,
1016 { .name
= "CNTP_CTL_EL0", .state
= ARM_CP_STATE_AA64
,
1017 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 2, .opc2
= 1,
1018 .type
= ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
1019 .accessfn
= gt_ptimer_access
,
1020 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].ctl
),
1022 .writefn
= gt_ctl_write
, .raw_writefn
= raw_write
,
1024 { .name
= "CNTV_CTL", .cp
= 15, .crn
= 14, .crm
= 3, .opc1
= 0, .opc2
= 1,
1025 .type
= ARM_CP_IO
| ARM_CP_NO_MIGRATE
, .access
= PL1_RW
| PL0_R
,
1026 .accessfn
= gt_vtimer_access
,
1027 .fieldoffset
= offsetoflow32(CPUARMState
,
1028 cp15
.c14_timer
[GTIMER_VIRT
].ctl
),
1029 .resetfn
= arm_cp_reset_ignore
,
1030 .writefn
= gt_ctl_write
, .raw_writefn
= raw_write
,
1032 { .name
= "CNTV_CTL_EL0", .state
= ARM_CP_STATE_AA64
,
1033 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 3, .opc2
= 1,
1034 .type
= ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
1035 .accessfn
= gt_vtimer_access
,
1036 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].ctl
),
1038 .writefn
= gt_ctl_write
, .raw_writefn
= raw_write
,
1040 /* TimerValue views: a 32 bit downcounting view of the underlying state */
1041 { .name
= "CNTP_TVAL", .cp
= 15, .crn
= 14, .crm
= 2, .opc1
= 0, .opc2
= 0,
1042 .type
= ARM_CP_NO_MIGRATE
| ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
1043 .accessfn
= gt_ptimer_access
,
1044 .readfn
= gt_tval_read
, .writefn
= gt_tval_write
,
1046 { .name
= "CNTP_TVAL_EL0", .state
= ARM_CP_STATE_AA64
,
1047 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 2, .opc2
= 0,
1048 .type
= ARM_CP_NO_MIGRATE
| ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
1049 .readfn
= gt_tval_read
, .writefn
= gt_tval_write
,
1051 { .name
= "CNTV_TVAL", .cp
= 15, .crn
= 14, .crm
= 3, .opc1
= 0, .opc2
= 0,
1052 .type
= ARM_CP_NO_MIGRATE
| ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
1053 .accessfn
= gt_vtimer_access
,
1054 .readfn
= gt_tval_read
, .writefn
= gt_tval_write
,
1056 { .name
= "CNTV_TVAL_EL0", .state
= ARM_CP_STATE_AA64
,
1057 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 3, .opc2
= 0,
1058 .type
= ARM_CP_NO_MIGRATE
| ARM_CP_IO
, .access
= PL1_RW
| PL0_R
,
1059 .readfn
= gt_tval_read
, .writefn
= gt_tval_write
,
1061 /* The counter itself */
1062 { .name
= "CNTPCT", .cp
= 15, .crm
= 14, .opc1
= 0,
1063 .access
= PL0_R
, .type
= ARM_CP_64BIT
| ARM_CP_NO_MIGRATE
| ARM_CP_IO
,
1064 .accessfn
= gt_pct_access
,
1065 .readfn
= gt_cnt_read
, .resetfn
= arm_cp_reset_ignore
,
1067 { .name
= "CNTPCT_EL0", .state
= ARM_CP_STATE_AA64
,
1068 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 1,
1069 .access
= PL0_R
, .type
= ARM_CP_NO_MIGRATE
| ARM_CP_IO
,
1070 .accessfn
= gt_pct_access
,
1071 .readfn
= gt_cnt_read
, .resetfn
= gt_cnt_reset
,
1073 { .name
= "CNTVCT", .cp
= 15, .crm
= 14, .opc1
= 1,
1074 .access
= PL0_R
, .type
= ARM_CP_64BIT
| ARM_CP_NO_MIGRATE
| ARM_CP_IO
,
1075 .accessfn
= gt_vct_access
,
1076 .readfn
= gt_cnt_read
, .resetfn
= arm_cp_reset_ignore
,
1078 { .name
= "CNTVCT_EL0", .state
= ARM_CP_STATE_AA64
,
1079 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 0, .opc2
= 2,
1080 .access
= PL0_R
, .type
= ARM_CP_NO_MIGRATE
| ARM_CP_IO
,
1081 .accessfn
= gt_vct_access
,
1082 .readfn
= gt_cnt_read
, .resetfn
= gt_cnt_reset
,
1084 /* Comparison value, indicating when the timer goes off */
1085 { .name
= "CNTP_CVAL", .cp
= 15, .crm
= 14, .opc1
= 2,
1086 .access
= PL1_RW
| PL0_R
,
1087 .type
= ARM_CP_64BIT
| ARM_CP_IO
| ARM_CP_NO_MIGRATE
,
1088 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].cval
),
1089 .accessfn
= gt_ptimer_access
, .resetfn
= arm_cp_reset_ignore
,
1090 .writefn
= gt_cval_write
, .raw_writefn
= raw_write
,
1092 { .name
= "CNTP_CVAL_EL0", .state
= ARM_CP_STATE_AA64
,
1093 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 2, .opc2
= 2,
1094 .access
= PL1_RW
| PL0_R
,
1096 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_PHYS
].cval
),
1097 .resetvalue
= 0, .accessfn
= gt_vtimer_access
,
1098 .writefn
= gt_cval_write
, .raw_writefn
= raw_write
,
1100 { .name
= "CNTV_CVAL", .cp
= 15, .crm
= 14, .opc1
= 3,
1101 .access
= PL1_RW
| PL0_R
,
1102 .type
= ARM_CP_64BIT
| ARM_CP_IO
| ARM_CP_NO_MIGRATE
,
1103 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].cval
),
1104 .accessfn
= gt_vtimer_access
, .resetfn
= arm_cp_reset_ignore
,
1105 .writefn
= gt_cval_write
, .raw_writefn
= raw_write
,
1107 { .name
= "CNTV_CVAL_EL0", .state
= ARM_CP_STATE_AA64
,
1108 .opc0
= 3, .opc1
= 3, .crn
= 14, .crm
= 3, .opc2
= 2,
1109 .access
= PL1_RW
| PL0_R
,
1111 .fieldoffset
= offsetof(CPUARMState
, cp15
.c14_timer
[GTIMER_VIRT
].cval
),
1112 .resetvalue
= 0, .accessfn
= gt_vtimer_access
,
1113 .writefn
= gt_cval_write
, .raw_writefn
= raw_write
,
1119 /* In user-mode none of the generic timer registers are accessible,
1120 * and their implementation depends on QEMU_CLOCK_VIRTUAL and qdev gpio outputs,
1121 * so instead just don't register any of them.
1123 static const ARMCPRegInfo generic_timer_cp_reginfo
[] = {
1129 static void par_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
1131 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
1132 env
->cp15
.c7_par
= value
;
1133 } else if (arm_feature(env
, ARM_FEATURE_V7
)) {
1134 env
->cp15
.c7_par
= value
& 0xfffff6ff;
1136 env
->cp15
.c7_par
= value
& 0xfffff1ff;
1140 #ifndef CONFIG_USER_ONLY
1141 /* get_phys_addr() isn't present for user-mode-only targets */
1143 /* Return true if extended addresses are enabled, ie this is an
1144 * LPAE implementation and we are using the long-descriptor translation
1145 * table format because the TTBCR EAE bit is set.
1147 static inline bool extended_addresses_enabled(CPUARMState
*env
)
1149 return arm_feature(env
, ARM_FEATURE_LPAE
)
1150 && (env
->cp15
.c2_control
& (1U << 31));
1153 static CPAccessResult
ats_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1156 /* Other states are only available with TrustZone; in
1157 * a non-TZ implementation these registers don't exist
1158 * at all, which is an Uncategorized trap. This underdecoding
1159 * is safe because the reginfo is NO_MIGRATE.
1161 return CP_ACCESS_TRAP_UNCATEGORIZED
;
1163 return CP_ACCESS_OK
;
1166 static void ats_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
, uint64_t value
)
1169 target_ulong page_size
;
1171 int ret
, is_user
= ri
->opc2
& 2;
1172 int access_type
= ri
->opc2
& 1;
1174 ret
= get_phys_addr(env
, value
, access_type
, is_user
,
1175 &phys_addr
, &prot
, &page_size
);
1176 if (extended_addresses_enabled(env
)) {
1177 /* ret is a DFSR/IFSR value for the long descriptor
1178 * translation table format, but with WnR always clear.
1179 * Convert it to a 64-bit PAR.
1181 uint64_t par64
= (1 << 11); /* LPAE bit always set */
1183 par64
|= phys_addr
& ~0xfffULL
;
1184 /* We don't set the ATTR or SH fields in the PAR. */
1187 par64
|= (ret
& 0x3f) << 1; /* FS */
1188 /* Note that S2WLK and FSTAGE are always zero, because we don't
1189 * implement virtualization and therefore there can't be a stage 2
1193 env
->cp15
.c7_par
= par64
;
1194 env
->cp15
.c7_par_hi
= par64
>> 32;
1196 /* ret is a DFSR/IFSR value for the short descriptor
1197 * translation table format (with WnR always clear).
1198 * Convert it to a 32-bit PAR.
1201 /* We do not set any attribute bits in the PAR */
1202 if (page_size
== (1 << 24)
1203 && arm_feature(env
, ARM_FEATURE_V7
)) {
1204 env
->cp15
.c7_par
= (phys_addr
& 0xff000000) | 1 << 1;
1206 env
->cp15
.c7_par
= phys_addr
& 0xfffff000;
1209 env
->cp15
.c7_par
= ((ret
& (1 << 10)) >> 5) |
1210 ((ret
& (1 << 12)) >> 6) |
1211 ((ret
& 0xf) << 1) | 1;
1213 env
->cp15
.c7_par_hi
= 0;
1218 static const ARMCPRegInfo vapa_cp_reginfo
[] = {
1219 { .name
= "PAR", .cp
= 15, .crn
= 7, .crm
= 4, .opc1
= 0, .opc2
= 0,
1220 .access
= PL1_RW
, .resetvalue
= 0,
1221 .fieldoffset
= offsetof(CPUARMState
, cp15
.c7_par
),
1222 .writefn
= par_write
},
1223 #ifndef CONFIG_USER_ONLY
1224 { .name
= "ATS", .cp
= 15, .crn
= 7, .crm
= 8, .opc1
= 0, .opc2
= CP_ANY
,
1225 .access
= PL1_W
, .accessfn
= ats_access
,
1226 .writefn
= ats_write
, .type
= ARM_CP_NO_MIGRATE
},
1231 /* Return basic MPU access permission bits. */
1232 static uint32_t simple_mpu_ap_bits(uint32_t val
)
1239 for (i
= 0; i
< 16; i
+= 2) {
1240 ret
|= (val
>> i
) & mask
;
1246 /* Pad basic MPU access permission bits to extended format. */
1247 static uint32_t extended_mpu_ap_bits(uint32_t val
)
1254 for (i
= 0; i
< 16; i
+= 2) {
1255 ret
|= (val
& mask
) << i
;
1261 static void pmsav5_data_ap_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1264 env
->cp15
.c5_data
= extended_mpu_ap_bits(value
);
1267 static uint64_t pmsav5_data_ap_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1269 return simple_mpu_ap_bits(env
->cp15
.c5_data
);
1272 static void pmsav5_insn_ap_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1275 env
->cp15
.c5_insn
= extended_mpu_ap_bits(value
);
1278 static uint64_t pmsav5_insn_ap_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1280 return simple_mpu_ap_bits(env
->cp15
.c5_insn
);
1283 static const ARMCPRegInfo pmsav5_cp_reginfo
[] = {
1284 { .name
= "DATA_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 0,
1285 .access
= PL1_RW
, .type
= ARM_CP_NO_MIGRATE
,
1286 .fieldoffset
= offsetof(CPUARMState
, cp15
.c5_data
), .resetvalue
= 0,
1287 .readfn
= pmsav5_data_ap_read
, .writefn
= pmsav5_data_ap_write
, },
1288 { .name
= "INSN_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 1,
1289 .access
= PL1_RW
, .type
= ARM_CP_NO_MIGRATE
,
1290 .fieldoffset
= offsetof(CPUARMState
, cp15
.c5_insn
), .resetvalue
= 0,
1291 .readfn
= pmsav5_insn_ap_read
, .writefn
= pmsav5_insn_ap_write
, },
1292 { .name
= "DATA_EXT_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 2,
1294 .fieldoffset
= offsetof(CPUARMState
, cp15
.c5_data
), .resetvalue
= 0, },
1295 { .name
= "INSN_EXT_AP", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 3,
1297 .fieldoffset
= offsetof(CPUARMState
, cp15
.c5_insn
), .resetvalue
= 0, },
1298 { .name
= "DCACHE_CFG", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 0,
1300 .fieldoffset
= offsetof(CPUARMState
, cp15
.c2_data
), .resetvalue
= 0, },
1301 { .name
= "ICACHE_CFG", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 1,
1303 .fieldoffset
= offsetof(CPUARMState
, cp15
.c2_insn
), .resetvalue
= 0, },
1304 /* Protection region base and size registers */
1305 { .name
= "946_PRBS0", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0,
1306 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
1307 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[0]) },
1308 { .name
= "946_PRBS1", .cp
= 15, .crn
= 6, .crm
= 1, .opc1
= 0,
1309 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
1310 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[1]) },
1311 { .name
= "946_PRBS2", .cp
= 15, .crn
= 6, .crm
= 2, .opc1
= 0,
1312 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
1313 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[2]) },
1314 { .name
= "946_PRBS3", .cp
= 15, .crn
= 6, .crm
= 3, .opc1
= 0,
1315 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
1316 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[3]) },
1317 { .name
= "946_PRBS4", .cp
= 15, .crn
= 6, .crm
= 4, .opc1
= 0,
1318 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
1319 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[4]) },
1320 { .name
= "946_PRBS5", .cp
= 15, .crn
= 6, .crm
= 5, .opc1
= 0,
1321 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
1322 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[5]) },
1323 { .name
= "946_PRBS6", .cp
= 15, .crn
= 6, .crm
= 6, .opc1
= 0,
1324 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
1325 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[6]) },
1326 { .name
= "946_PRBS7", .cp
= 15, .crn
= 6, .crm
= 7, .opc1
= 0,
1327 .opc2
= CP_ANY
, .access
= PL1_RW
, .resetvalue
= 0,
1328 .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_region
[7]) },
1332 static void vmsa_ttbcr_raw_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1335 int maskshift
= extract32(value
, 0, 3);
1337 if (arm_feature(env
, ARM_FEATURE_LPAE
) && (value
& (1 << 31))) {
1338 value
&= ~((7 << 19) | (3 << 14) | (0xf << 3));
1342 /* Note that we always calculate c2_mask and c2_base_mask, but
1343 * they are only used for short-descriptor tables (ie if EAE is 0);
1344 * for long-descriptor tables the TTBCR fields are used differently
1345 * and the c2_mask and c2_base_mask values are meaningless.
1347 env
->cp15
.c2_control
= value
;
1348 env
->cp15
.c2_mask
= ~(((uint32_t)0xffffffffu
) >> maskshift
);
1349 env
->cp15
.c2_base_mask
= ~((uint32_t)0x3fffu
>> maskshift
);
1352 static void vmsa_ttbcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1355 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
1356 /* With LPAE the TTBCR could result in a change of ASID
1357 * via the TTBCR.A1 bit, so do a TLB flush.
1361 vmsa_ttbcr_raw_write(env
, ri
, value
);
1364 static void vmsa_ttbcr_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1366 env
->cp15
.c2_base_mask
= 0xffffc000u
;
1367 env
->cp15
.c2_control
= 0;
1368 env
->cp15
.c2_mask
= 0;
1371 static void vmsa_tcr_el1_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1374 /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */
1376 env
->cp15
.c2_control
= value
;
1379 static void vmsa_ttbr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1382 /* 64 bit accesses to the TTBRs can change the ASID and so we
1383 * must flush the TLB.
1385 if (cpreg_field_is_64bit(ri
)) {
1388 raw_write(env
, ri
, value
);
1391 static const ARMCPRegInfo vmsa_cp_reginfo
[] = {
1392 { .name
= "DFSR", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 0,
1394 .fieldoffset
= offsetof(CPUARMState
, cp15
.c5_data
), .resetvalue
= 0, },
1395 { .name
= "IFSR", .cp
= 15, .crn
= 5, .crm
= 0, .opc1
= 0, .opc2
= 1,
1397 .fieldoffset
= offsetof(CPUARMState
, cp15
.c5_insn
), .resetvalue
= 0, },
1398 { .name
= "TTBR0_EL1", .state
= ARM_CP_STATE_BOTH
,
1399 .opc0
= 3, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 0,
1400 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.ttbr0_el1
),
1401 .writefn
= vmsa_ttbr_write
, .resetvalue
= 0 },
1402 { .name
= "TTBR1_EL1", .state
= ARM_CP_STATE_BOTH
,
1403 .opc0
= 3, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 1,
1404 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.ttbr1_el1
),
1405 .writefn
= vmsa_ttbr_write
, .resetvalue
= 0 },
1406 { .name
= "TCR_EL1", .state
= ARM_CP_STATE_AA64
,
1407 .opc0
= 3, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 2,
1408 .access
= PL1_RW
, .writefn
= vmsa_tcr_el1_write
,
1409 .resetfn
= vmsa_ttbcr_reset
, .raw_writefn
= raw_write
,
1410 .fieldoffset
= offsetof(CPUARMState
, cp15
.c2_control
) },
1411 { .name
= "TTBCR", .cp
= 15, .crn
= 2, .crm
= 0, .opc1
= 0, .opc2
= 2,
1412 .access
= PL1_RW
, .type
= ARM_CP_NO_MIGRATE
, .writefn
= vmsa_ttbcr_write
,
1413 .resetfn
= arm_cp_reset_ignore
, .raw_writefn
= vmsa_ttbcr_raw_write
,
1414 .fieldoffset
= offsetoflow32(CPUARMState
, cp15
.c2_control
) },
1415 { .name
= "DFAR", .cp
= 15, .crn
= 6, .crm
= 0, .opc1
= 0, .opc2
= 0,
1416 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c6_data
),
1421 static void omap_ticonfig_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1424 env
->cp15
.c15_ticonfig
= value
& 0xe7;
1425 /* The OS_TYPE bit in this register changes the reported CPUID! */
1426 env
->cp15
.c0_cpuid
= (value
& (1 << 5)) ?
1427 ARM_CPUID_TI915T
: ARM_CPUID_TI925T
;
1430 static void omap_threadid_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1433 env
->cp15
.c15_threadid
= value
& 0xffff;
1436 static void omap_wfi_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1439 /* Wait-for-interrupt (deprecated) */
1440 cpu_interrupt(CPU(arm_env_get_cpu(env
)), CPU_INTERRUPT_HALT
);
1443 static void omap_cachemaint_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1446 /* On OMAP there are registers indicating the max/min index of dcache lines
1447 * containing a dirty line; cache flush operations have to reset these.
1449 env
->cp15
.c15_i_max
= 0x000;
1450 env
->cp15
.c15_i_min
= 0xff0;
1453 static const ARMCPRegInfo omap_cp_reginfo
[] = {
1454 { .name
= "DFSR", .cp
= 15, .crn
= 5, .crm
= CP_ANY
,
1455 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
, .type
= ARM_CP_OVERRIDE
,
1456 .fieldoffset
= offsetof(CPUARMState
, cp15
.c5_data
), .resetvalue
= 0, },
1457 { .name
= "", .cp
= 15, .crn
= 15, .crm
= 0, .opc1
= 0, .opc2
= 0,
1458 .access
= PL1_RW
, .type
= ARM_CP_NOP
},
1459 { .name
= "TICONFIG", .cp
= 15, .crn
= 15, .crm
= 1, .opc1
= 0, .opc2
= 0,
1461 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_ticonfig
), .resetvalue
= 0,
1462 .writefn
= omap_ticonfig_write
},
1463 { .name
= "IMAX", .cp
= 15, .crn
= 15, .crm
= 2, .opc1
= 0, .opc2
= 0,
1465 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_i_max
), .resetvalue
= 0, },
1466 { .name
= "IMIN", .cp
= 15, .crn
= 15, .crm
= 3, .opc1
= 0, .opc2
= 0,
1467 .access
= PL1_RW
, .resetvalue
= 0xff0,
1468 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_i_min
) },
1469 { .name
= "THREADID", .cp
= 15, .crn
= 15, .crm
= 4, .opc1
= 0, .opc2
= 0,
1471 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_threadid
), .resetvalue
= 0,
1472 .writefn
= omap_threadid_write
},
1473 { .name
= "TI925T_STATUS", .cp
= 15, .crn
= 15,
1474 .crm
= 8, .opc1
= 0, .opc2
= 0, .access
= PL1_RW
,
1475 .type
= ARM_CP_NO_MIGRATE
,
1476 .readfn
= arm_cp_read_zero
, .writefn
= omap_wfi_write
, },
1477 /* TODO: Peripheral port remap register:
1478 * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller
1479 * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff),
1482 { .name
= "OMAP_CACHEMAINT", .cp
= 15, .crn
= 7, .crm
= CP_ANY
,
1483 .opc1
= 0, .opc2
= CP_ANY
, .access
= PL1_W
,
1484 .type
= ARM_CP_OVERRIDE
| ARM_CP_NO_MIGRATE
,
1485 .writefn
= omap_cachemaint_write
},
1486 { .name
= "C9", .cp
= 15, .crn
= 9,
1487 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_RW
,
1488 .type
= ARM_CP_CONST
| ARM_CP_OVERRIDE
, .resetvalue
= 0 },
1492 static void xscale_cpar_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1496 if (env
->cp15
.c15_cpar
!= value
) {
1497 /* Changes cp0 to cp13 behavior, so needs a TB flush. */
1499 env
->cp15
.c15_cpar
= value
;
1503 static const ARMCPRegInfo xscale_cp_reginfo
[] = {
1504 { .name
= "XSCALE_CPAR",
1505 .cp
= 15, .crn
= 15, .crm
= 1, .opc1
= 0, .opc2
= 0, .access
= PL1_RW
,
1506 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_cpar
), .resetvalue
= 0,
1507 .writefn
= xscale_cpar_write
, },
1508 { .name
= "XSCALE_AUXCR",
1509 .cp
= 15, .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 1, .access
= PL1_RW
,
1510 .fieldoffset
= offsetof(CPUARMState
, cp15
.c1_xscaleauxcr
),
1515 static const ARMCPRegInfo dummy_c15_cp_reginfo
[] = {
1516 /* RAZ/WI the whole crn=15 space, when we don't have a more specific
1517 * implementation of this implementation-defined space.
1518 * Ideally this should eventually disappear in favour of actually
1519 * implementing the correct behaviour for all cores.
1521 { .name
= "C15_IMPDEF", .cp
= 15, .crn
= 15,
1522 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
,
1524 .type
= ARM_CP_CONST
| ARM_CP_NO_MIGRATE
| ARM_CP_OVERRIDE
,
1529 static const ARMCPRegInfo cache_dirty_status_cp_reginfo
[] = {
1530 /* Cache status: RAZ because we have no cache so it's always clean */
1531 { .name
= "CDSR", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 6,
1532 .access
= PL1_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_MIGRATE
,
1537 static const ARMCPRegInfo cache_block_ops_cp_reginfo
[] = {
1538 /* We never have a a block transfer operation in progress */
1539 { .name
= "BXSR", .cp
= 15, .crn
= 7, .crm
= 12, .opc1
= 0, .opc2
= 4,
1540 .access
= PL0_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_MIGRATE
,
1542 /* The cache ops themselves: these all NOP for QEMU */
1543 { .name
= "IICR", .cp
= 15, .crm
= 5, .opc1
= 0,
1544 .access
= PL1_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
1545 { .name
= "IDCR", .cp
= 15, .crm
= 6, .opc1
= 0,
1546 .access
= PL1_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
1547 { .name
= "CDCR", .cp
= 15, .crm
= 12, .opc1
= 0,
1548 .access
= PL0_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
1549 { .name
= "PIR", .cp
= 15, .crm
= 12, .opc1
= 1,
1550 .access
= PL0_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
1551 { .name
= "PDR", .cp
= 15, .crm
= 12, .opc1
= 2,
1552 .access
= PL0_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
1553 { .name
= "CIDCR", .cp
= 15, .crm
= 14, .opc1
= 0,
1554 .access
= PL1_W
, .type
= ARM_CP_NOP
|ARM_CP_64BIT
},
1558 static const ARMCPRegInfo cache_test_clean_cp_reginfo
[] = {
1559 /* The cache test-and-clean instructions always return (1 << 30)
1560 * to indicate that there are no dirty cache lines.
1562 { .name
= "TC_DCACHE", .cp
= 15, .crn
= 7, .crm
= 10, .opc1
= 0, .opc2
= 3,
1563 .access
= PL0_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_MIGRATE
,
1564 .resetvalue
= (1 << 30) },
1565 { .name
= "TCI_DCACHE", .cp
= 15, .crn
= 7, .crm
= 14, .opc1
= 0, .opc2
= 3,
1566 .access
= PL0_R
, .type
= ARM_CP_CONST
| ARM_CP_NO_MIGRATE
,
1567 .resetvalue
= (1 << 30) },
1571 static const ARMCPRegInfo strongarm_cp_reginfo
[] = {
1572 /* Ignore ReadBuffer accesses */
1573 { .name
= "C9_READBUFFER", .cp
= 15, .crn
= 9,
1574 .crm
= CP_ANY
, .opc1
= CP_ANY
, .opc2
= CP_ANY
,
1575 .access
= PL1_RW
, .resetvalue
= 0,
1576 .type
= ARM_CP_CONST
| ARM_CP_OVERRIDE
| ARM_CP_NO_MIGRATE
},
1580 static uint64_t mpidr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1582 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
1583 uint32_t mpidr
= cs
->cpu_index
;
1584 /* We don't support setting cluster ID ([8..11]) (known as Aff1
1585 * in later ARM ARM versions), or any of the higher affinity level fields,
1586 * so these bits always RAZ.
1588 if (arm_feature(env
, ARM_FEATURE_V7MP
)) {
1589 mpidr
|= (1U << 31);
1590 /* Cores which are uniprocessor (non-coherent)
1591 * but still implement the MP extensions set
1592 * bit 30. (For instance, A9UP.) However we do
1593 * not currently model any of those cores.
1599 static const ARMCPRegInfo mpidr_cp_reginfo
[] = {
1600 { .name
= "MPIDR", .state
= ARM_CP_STATE_BOTH
,
1601 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 5,
1602 .access
= PL1_R
, .readfn
= mpidr_read
, .type
= ARM_CP_NO_MIGRATE
},
1606 static uint64_t par64_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1608 return ((uint64_t)env
->cp15
.c7_par_hi
<< 32) | env
->cp15
.c7_par
;
1611 static void par64_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1614 env
->cp15
.c7_par_hi
= value
>> 32;
1615 env
->cp15
.c7_par
= value
;
1618 static void par64_reset(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1620 env
->cp15
.c7_par_hi
= 0;
1621 env
->cp15
.c7_par
= 0;
1624 static const ARMCPRegInfo lpae_cp_reginfo
[] = {
1625 /* NOP AMAIR0/1: the override is because these clash with the rather
1626 * broadly specified TLB_LOCKDOWN entry in the generic cp_reginfo.
1628 { .name
= "AMAIR0", .state
= ARM_CP_STATE_BOTH
,
1629 .opc0
= 3, .crn
= 10, .crm
= 3, .opc1
= 0, .opc2
= 0,
1630 .access
= PL1_RW
, .type
= ARM_CP_CONST
| ARM_CP_OVERRIDE
,
1632 /* AMAIR1 is mapped to AMAIR_EL1[63:32] */
1633 { .name
= "AMAIR1", .cp
= 15, .crn
= 10, .crm
= 3, .opc1
= 0, .opc2
= 1,
1634 .access
= PL1_RW
, .type
= ARM_CP_CONST
| ARM_CP_OVERRIDE
,
1636 /* 64 bit access versions of the (dummy) debug registers */
1637 { .name
= "DBGDRAR", .cp
= 14, .crm
= 1, .opc1
= 0,
1638 .access
= PL0_R
, .type
= ARM_CP_CONST
|ARM_CP_64BIT
, .resetvalue
= 0 },
1639 { .name
= "DBGDSAR", .cp
= 14, .crm
= 2, .opc1
= 0,
1640 .access
= PL0_R
, .type
= ARM_CP_CONST
|ARM_CP_64BIT
, .resetvalue
= 0 },
1641 { .name
= "PAR", .cp
= 15, .crm
= 7, .opc1
= 0,
1642 .access
= PL1_RW
, .type
= ARM_CP_64BIT
,
1643 .readfn
= par64_read
, .writefn
= par64_write
, .resetfn
= par64_reset
},
1644 { .name
= "TTBR0", .cp
= 15, .crm
= 2, .opc1
= 0,
1645 .access
= PL1_RW
, .type
= ARM_CP_64BIT
| ARM_CP_NO_MIGRATE
,
1646 .fieldoffset
= offsetof(CPUARMState
, cp15
.ttbr0_el1
),
1647 .writefn
= vmsa_ttbr_write
, .resetfn
= arm_cp_reset_ignore
},
1648 { .name
= "TTBR1", .cp
= 15, .crm
= 2, .opc1
= 1,
1649 .access
= PL1_RW
, .type
= ARM_CP_64BIT
| ARM_CP_NO_MIGRATE
,
1650 .fieldoffset
= offsetof(CPUARMState
, cp15
.ttbr1_el1
),
1651 .writefn
= vmsa_ttbr_write
, .resetfn
= arm_cp_reset_ignore
},
1655 static uint64_t aa64_fpcr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1657 return vfp_get_fpcr(env
);
1660 static void aa64_fpcr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1663 vfp_set_fpcr(env
, value
);
1666 static uint64_t aa64_fpsr_read(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1668 return vfp_get_fpsr(env
);
1671 static void aa64_fpsr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1674 vfp_set_fpsr(env
, value
);
1677 static CPAccessResult
aa64_cacheop_access(CPUARMState
*env
,
1678 const ARMCPRegInfo
*ri
)
1680 /* Cache invalidate/clean: NOP, but EL0 must UNDEF unless
1681 * SCTLR_EL1.UCI is set.
1683 if (arm_current_pl(env
) == 0 && !(env
->cp15
.c1_sys
& SCTLR_UCI
)) {
1684 return CP_ACCESS_TRAP
;
1686 return CP_ACCESS_OK
;
1689 static void tlbi_aa64_va_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1692 /* Invalidate by VA (AArch64 version) */
1693 ARMCPU
*cpu
= arm_env_get_cpu(env
);
1694 uint64_t pageaddr
= value
<< 12;
1695 tlb_flush_page(CPU(cpu
), pageaddr
);
1698 static void tlbi_aa64_vaa_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1701 /* Invalidate by VA, all ASIDs (AArch64 version) */
1702 ARMCPU
*cpu
= arm_env_get_cpu(env
);
1703 uint64_t pageaddr
= value
<< 12;
1704 tlb_flush_page(CPU(cpu
), pageaddr
);
1707 static void tlbi_aa64_asid_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1710 /* Invalidate by ASID (AArch64 version) */
1711 int asid
= extract64(value
, 48, 16);
1712 tlb_flush(env
, asid
== 0);
1715 static const ARMCPRegInfo v8_cp_reginfo
[] = {
1716 /* Minimal set of EL0-visible registers. This will need to be expanded
1717 * significantly for system emulation of AArch64 CPUs.
1719 { .name
= "NZCV", .state
= ARM_CP_STATE_AA64
,
1720 .opc0
= 3, .opc1
= 3, .opc2
= 0, .crn
= 4, .crm
= 2,
1721 .access
= PL0_RW
, .type
= ARM_CP_NZCV
},
1722 { .name
= "FPCR", .state
= ARM_CP_STATE_AA64
,
1723 .opc0
= 3, .opc1
= 3, .opc2
= 0, .crn
= 4, .crm
= 4,
1724 .access
= PL0_RW
, .readfn
= aa64_fpcr_read
, .writefn
= aa64_fpcr_write
},
1725 { .name
= "FPSR", .state
= ARM_CP_STATE_AA64
,
1726 .opc0
= 3, .opc1
= 3, .opc2
= 1, .crn
= 4, .crm
= 4,
1727 .access
= PL0_RW
, .readfn
= aa64_fpsr_read
, .writefn
= aa64_fpsr_write
},
1728 /* Prohibit use of DC ZVA. OPTME: implement DC ZVA and allow its use.
1729 * For system mode the DZP bit here will need to be computed, not constant.
1731 { .name
= "DCZID_EL0", .state
= ARM_CP_STATE_AA64
,
1732 .opc0
= 3, .opc1
= 3, .opc2
= 7, .crn
= 0, .crm
= 0,
1733 .access
= PL0_R
, .type
= ARM_CP_CONST
,
1734 .resetvalue
= 0x10 },
1735 { .name
= "CURRENTEL", .state
= ARM_CP_STATE_AA64
,
1736 .opc0
= 3, .opc1
= 0, .opc2
= 2, .crn
= 4, .crm
= 2,
1737 .access
= PL1_R
, .type
= ARM_CP_CURRENTEL
},
1738 /* Cache ops: all NOPs since we don't emulate caches */
1739 { .name
= "IC_IALLUIS", .state
= ARM_CP_STATE_AA64
,
1740 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 1, .opc2
= 0,
1741 .access
= PL1_W
, .type
= ARM_CP_NOP
},
1742 { .name
= "IC_IALLU", .state
= ARM_CP_STATE_AA64
,
1743 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 5, .opc2
= 0,
1744 .access
= PL1_W
, .type
= ARM_CP_NOP
},
1745 { .name
= "IC_IVAU", .state
= ARM_CP_STATE_AA64
,
1746 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 5, .opc2
= 1,
1747 .access
= PL0_W
, .type
= ARM_CP_NOP
,
1748 .accessfn
= aa64_cacheop_access
},
1749 { .name
= "DC_IVAC", .state
= ARM_CP_STATE_AA64
,
1750 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 1,
1751 .access
= PL1_W
, .type
= ARM_CP_NOP
},
1752 { .name
= "DC_ISW", .state
= ARM_CP_STATE_AA64
,
1753 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 6, .opc2
= 2,
1754 .access
= PL1_W
, .type
= ARM_CP_NOP
},
1755 { .name
= "DC_CVAC", .state
= ARM_CP_STATE_AA64
,
1756 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 10, .opc2
= 1,
1757 .access
= PL0_W
, .type
= ARM_CP_NOP
,
1758 .accessfn
= aa64_cacheop_access
},
1759 { .name
= "DC_CSW", .state
= ARM_CP_STATE_AA64
,
1760 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 10, .opc2
= 2,
1761 .access
= PL1_W
, .type
= ARM_CP_NOP
},
1762 { .name
= "DC_CVAU", .state
= ARM_CP_STATE_AA64
,
1763 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 11, .opc2
= 1,
1764 .access
= PL0_W
, .type
= ARM_CP_NOP
,
1765 .accessfn
= aa64_cacheop_access
},
1766 { .name
= "DC_CIVAC", .state
= ARM_CP_STATE_AA64
,
1767 .opc0
= 1, .opc1
= 3, .crn
= 7, .crm
= 14, .opc2
= 1,
1768 .access
= PL0_W
, .type
= ARM_CP_NOP
,
1769 .accessfn
= aa64_cacheop_access
},
1770 { .name
= "DC_CISW", .state
= ARM_CP_STATE_AA64
,
1771 .opc0
= 1, .opc1
= 0, .crn
= 7, .crm
= 14, .opc2
= 2,
1772 .access
= PL1_W
, .type
= ARM_CP_NOP
},
1773 /* TLBI operations */
1774 { .name
= "TLBI_VMALLE1IS", .state
= ARM_CP_STATE_AA64
,
1775 .opc0
= 1, .opc2
= 0, .crn
= 8, .crm
= 3, .opc2
= 0,
1776 .access
= PL1_W
, .type
= ARM_CP_NO_MIGRATE
,
1777 .writefn
= tlbiall_write
},
1778 { .name
= "TLBI_VAE1IS", .state
= ARM_CP_STATE_AA64
,
1779 .opc0
= 1, .opc2
= 0, .crn
= 8, .crm
= 3, .opc2
= 1,
1780 .access
= PL1_W
, .type
= ARM_CP_NO_MIGRATE
,
1781 .writefn
= tlbi_aa64_va_write
},
1782 { .name
= "TLBI_ASIDE1IS", .state
= ARM_CP_STATE_AA64
,
1783 .opc0
= 1, .opc2
= 0, .crn
= 8, .crm
= 3, .opc2
= 2,
1784 .access
= PL1_W
, .type
= ARM_CP_NO_MIGRATE
,
1785 .writefn
= tlbi_aa64_asid_write
},
1786 { .name
= "TLBI_VAAE1IS", .state
= ARM_CP_STATE_AA64
,
1787 .opc0
= 1, .opc2
= 0, .crn
= 8, .crm
= 3, .opc2
= 3,
1788 .access
= PL1_W
, .type
= ARM_CP_NO_MIGRATE
,
1789 .writefn
= tlbi_aa64_vaa_write
},
1790 { .name
= "TLBI_VALE1IS", .state
= ARM_CP_STATE_AA64
,
1791 .opc0
= 1, .opc2
= 0, .crn
= 8, .crm
= 3, .opc2
= 5,
1792 .access
= PL1_W
, .type
= ARM_CP_NO_MIGRATE
,
1793 .writefn
= tlbi_aa64_va_write
},
1794 { .name
= "TLBI_VAALE1IS", .state
= ARM_CP_STATE_AA64
,
1795 .opc0
= 1, .opc2
= 0, .crn
= 8, .crm
= 3, .opc2
= 7,
1796 .access
= PL1_W
, .type
= ARM_CP_NO_MIGRATE
,
1797 .writefn
= tlbi_aa64_vaa_write
},
1798 { .name
= "TLBI_VMALLE1", .state
= ARM_CP_STATE_AA64
,
1799 .opc0
= 1, .opc2
= 0, .crn
= 8, .crm
= 7, .opc2
= 0,
1800 .access
= PL1_W
, .type
= ARM_CP_NO_MIGRATE
,
1801 .writefn
= tlbiall_write
},
1802 { .name
= "TLBI_VAE1", .state
= ARM_CP_STATE_AA64
,
1803 .opc0
= 1, .opc2
= 0, .crn
= 8, .crm
= 7, .opc2
= 1,
1804 .access
= PL1_W
, .type
= ARM_CP_NO_MIGRATE
,
1805 .writefn
= tlbi_aa64_va_write
},
1806 { .name
= "TLBI_ASIDE1", .state
= ARM_CP_STATE_AA64
,
1807 .opc0
= 1, .opc2
= 0, .crn
= 8, .crm
= 7, .opc2
= 2,
1808 .access
= PL1_W
, .type
= ARM_CP_NO_MIGRATE
,
1809 .writefn
= tlbi_aa64_asid_write
},
1810 { .name
= "TLBI_VAAE1", .state
= ARM_CP_STATE_AA64
,
1811 .opc0
= 1, .opc2
= 0, .crn
= 8, .crm
= 7, .opc2
= 3,
1812 .access
= PL1_W
, .type
= ARM_CP_NO_MIGRATE
,
1813 .writefn
= tlbi_aa64_vaa_write
},
1814 { .name
= "TLBI_VALE1", .state
= ARM_CP_STATE_AA64
,
1815 .opc0
= 1, .opc2
= 0, .crn
= 8, .crm
= 7, .opc2
= 5,
1816 .access
= PL1_W
, .type
= ARM_CP_NO_MIGRATE
,
1817 .writefn
= tlbi_aa64_va_write
},
1818 { .name
= "TLBI_VAALE1", .state
= ARM_CP_STATE_AA64
,
1819 .opc0
= 1, .opc2
= 0, .crn
= 8, .crm
= 7, .opc2
= 7,
1820 .access
= PL1_W
, .type
= ARM_CP_NO_MIGRATE
,
1821 .writefn
= tlbi_aa64_vaa_write
},
1822 /* Dummy implementation of monitor debug system control register:
1823 * we don't support debug.
1825 { .name
= "MDSCR_EL1", .state
= ARM_CP_STATE_AA64
,
1826 .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= 2, .opc2
= 2,
1827 .access
= PL1_RW
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
1828 /* We define a dummy WI OSLAR_EL1, because Linux writes to it. */
1829 { .name
= "OSLAR_EL1", .state
= ARM_CP_STATE_AA64
,
1830 .opc0
= 2, .opc1
= 0, .crn
= 1, .crm
= 0, .opc2
= 4,
1831 .access
= PL1_W
, .type
= ARM_CP_NOP
},
1835 static void sctlr_write(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
1838 env
->cp15
.c1_sys
= value
;
1839 /* ??? Lots of these bits are not implemented. */
1840 /* This may enable/disable the MMU, so do a TLB flush. */
1844 static CPAccessResult
ctr_el0_access(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
1846 /* Only accessible in EL0 if SCTLR.UCT is set (and only in AArch64,
1847 * but the AArch32 CTR has its own reginfo struct)
1849 if (arm_current_pl(env
) == 0 && !(env
->cp15
.c1_sys
& SCTLR_UCT
)) {
1850 return CP_ACCESS_TRAP
;
1852 return CP_ACCESS_OK
;
1855 static void define_aarch64_debug_regs(ARMCPU
*cpu
)
1857 /* Define breakpoint and watchpoint registers. These do nothing
1858 * but read as written, for now.
1862 for (i
= 0; i
< 16; i
++) {
1863 ARMCPRegInfo dbgregs
[] = {
1864 { .name
= "DBGBVR", .state
= ARM_CP_STATE_AA64
,
1865 .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 4,
1867 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgbvr
[i
]) },
1868 { .name
= "DBGBCR", .state
= ARM_CP_STATE_AA64
,
1869 .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 5,
1871 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgbcr
[i
]) },
1872 { .name
= "DBGWVR", .state
= ARM_CP_STATE_AA64
,
1873 .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 6,
1875 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgwvr
[i
]) },
1876 { .name
= "DBGWCR", .state
= ARM_CP_STATE_AA64
,
1877 .opc0
= 2, .opc1
= 0, .crn
= 0, .crm
= i
, .opc2
= 7,
1879 .fieldoffset
= offsetof(CPUARMState
, cp15
.dbgwcr
[i
]) },
1882 define_arm_cp_regs(cpu
, dbgregs
);
1886 void register_cp_regs_for_features(ARMCPU
*cpu
)
1888 /* Register all the coprocessor registers based on feature bits */
1889 CPUARMState
*env
= &cpu
->env
;
1890 if (arm_feature(env
, ARM_FEATURE_M
)) {
1891 /* M profile has no coprocessor registers */
1895 define_arm_cp_regs(cpu
, cp_reginfo
);
1896 if (arm_feature(env
, ARM_FEATURE_V6
)) {
1897 /* The ID registers all have impdef reset values */
1898 ARMCPRegInfo v6_idregs
[] = {
1899 { .name
= "ID_PFR0", .cp
= 15, .crn
= 0, .crm
= 1,
1900 .opc1
= 0, .opc2
= 0, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1901 .resetvalue
= cpu
->id_pfr0
},
1902 { .name
= "ID_PFR1", .cp
= 15, .crn
= 0, .crm
= 1,
1903 .opc1
= 0, .opc2
= 1, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1904 .resetvalue
= cpu
->id_pfr1
},
1905 { .name
= "ID_DFR0", .cp
= 15, .crn
= 0, .crm
= 1,
1906 .opc1
= 0, .opc2
= 2, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1907 .resetvalue
= cpu
->id_dfr0
},
1908 { .name
= "ID_AFR0", .cp
= 15, .crn
= 0, .crm
= 1,
1909 .opc1
= 0, .opc2
= 3, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1910 .resetvalue
= cpu
->id_afr0
},
1911 { .name
= "ID_MMFR0", .cp
= 15, .crn
= 0, .crm
= 1,
1912 .opc1
= 0, .opc2
= 4, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1913 .resetvalue
= cpu
->id_mmfr0
},
1914 { .name
= "ID_MMFR1", .cp
= 15, .crn
= 0, .crm
= 1,
1915 .opc1
= 0, .opc2
= 5, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1916 .resetvalue
= cpu
->id_mmfr1
},
1917 { .name
= "ID_MMFR2", .cp
= 15, .crn
= 0, .crm
= 1,
1918 .opc1
= 0, .opc2
= 6, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1919 .resetvalue
= cpu
->id_mmfr2
},
1920 { .name
= "ID_MMFR3", .cp
= 15, .crn
= 0, .crm
= 1,
1921 .opc1
= 0, .opc2
= 7, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1922 .resetvalue
= cpu
->id_mmfr3
},
1923 { .name
= "ID_ISAR0", .cp
= 15, .crn
= 0, .crm
= 2,
1924 .opc1
= 0, .opc2
= 0, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1925 .resetvalue
= cpu
->id_isar0
},
1926 { .name
= "ID_ISAR1", .cp
= 15, .crn
= 0, .crm
= 2,
1927 .opc1
= 0, .opc2
= 1, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1928 .resetvalue
= cpu
->id_isar1
},
1929 { .name
= "ID_ISAR2", .cp
= 15, .crn
= 0, .crm
= 2,
1930 .opc1
= 0, .opc2
= 2, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1931 .resetvalue
= cpu
->id_isar2
},
1932 { .name
= "ID_ISAR3", .cp
= 15, .crn
= 0, .crm
= 2,
1933 .opc1
= 0, .opc2
= 3, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1934 .resetvalue
= cpu
->id_isar3
},
1935 { .name
= "ID_ISAR4", .cp
= 15, .crn
= 0, .crm
= 2,
1936 .opc1
= 0, .opc2
= 4, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1937 .resetvalue
= cpu
->id_isar4
},
1938 { .name
= "ID_ISAR5", .cp
= 15, .crn
= 0, .crm
= 2,
1939 .opc1
= 0, .opc2
= 5, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1940 .resetvalue
= cpu
->id_isar5
},
1941 /* 6..7 are as yet unallocated and must RAZ */
1942 { .name
= "ID_ISAR6", .cp
= 15, .crn
= 0, .crm
= 2,
1943 .opc1
= 0, .opc2
= 6, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1945 { .name
= "ID_ISAR7", .cp
= 15, .crn
= 0, .crm
= 2,
1946 .opc1
= 0, .opc2
= 7, .access
= PL1_R
, .type
= ARM_CP_CONST
,
1950 define_arm_cp_regs(cpu
, v6_idregs
);
1951 define_arm_cp_regs(cpu
, v6_cp_reginfo
);
1953 define_arm_cp_regs(cpu
, not_v6_cp_reginfo
);
1955 if (arm_feature(env
, ARM_FEATURE_V6K
)) {
1956 define_arm_cp_regs(cpu
, v6k_cp_reginfo
);
1958 if (arm_feature(env
, ARM_FEATURE_V7
)) {
1959 /* v7 performance monitor control register: same implementor
1960 * field as main ID register, and we implement only the cycle
1963 #ifndef CONFIG_USER_ONLY
1964 ARMCPRegInfo pmcr
= {
1965 .name
= "PMCR", .cp
= 15, .crn
= 9, .crm
= 12, .opc1
= 0, .opc2
= 0,
1966 .access
= PL0_RW
, .resetvalue
= cpu
->midr
& 0xff000000,
1967 .fieldoffset
= offsetof(CPUARMState
, cp15
.c9_pmcr
),
1968 .accessfn
= pmreg_access
, .writefn
= pmcr_write
,
1969 .raw_writefn
= raw_write
,
1971 define_one_arm_cp_reg(cpu
, &pmcr
);
1973 ARMCPRegInfo clidr
= {
1974 .name
= "CLIDR", .state
= ARM_CP_STATE_BOTH
,
1975 .opc0
= 3, .crn
= 0, .crm
= 0, .opc1
= 1, .opc2
= 1,
1976 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= cpu
->clidr
1978 define_one_arm_cp_reg(cpu
, &clidr
);
1979 define_arm_cp_regs(cpu
, v7_cp_reginfo
);
1981 define_arm_cp_regs(cpu
, not_v7_cp_reginfo
);
1983 if (arm_feature(env
, ARM_FEATURE_V8
)) {
1984 /* AArch64 ID registers, which all have impdef reset values */
1985 ARMCPRegInfo v8_idregs
[] = {
1986 { .name
= "ID_AA64PFR0_EL1", .state
= ARM_CP_STATE_AA64
,
1987 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 0,
1988 .access
= PL1_R
, .type
= ARM_CP_CONST
,
1989 .resetvalue
= cpu
->id_aa64pfr0
},
1990 { .name
= "ID_AA64PFR1_EL1", .state
= ARM_CP_STATE_AA64
,
1991 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 4, .opc2
= 1,
1992 .access
= PL1_R
, .type
= ARM_CP_CONST
,
1993 .resetvalue
= cpu
->id_aa64pfr1
},
1994 { .name
= "ID_AA64DFR0_EL1", .state
= ARM_CP_STATE_AA64
,
1995 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 0,
1996 .access
= PL1_R
, .type
= ARM_CP_CONST
,
1997 .resetvalue
= cpu
->id_aa64dfr0
},
1998 { .name
= "ID_AA64DFR1_EL1", .state
= ARM_CP_STATE_AA64
,
1999 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 1,
2000 .access
= PL1_R
, .type
= ARM_CP_CONST
,
2001 .resetvalue
= cpu
->id_aa64dfr1
},
2002 { .name
= "ID_AA64AFR0_EL1", .state
= ARM_CP_STATE_AA64
,
2003 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 4,
2004 .access
= PL1_R
, .type
= ARM_CP_CONST
,
2005 .resetvalue
= cpu
->id_aa64afr0
},
2006 { .name
= "ID_AA64AFR1_EL1", .state
= ARM_CP_STATE_AA64
,
2007 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 5, .opc2
= 5,
2008 .access
= PL1_R
, .type
= ARM_CP_CONST
,
2009 .resetvalue
= cpu
->id_aa64afr1
},
2010 { .name
= "ID_AA64ISAR0_EL1", .state
= ARM_CP_STATE_AA64
,
2011 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 0,
2012 .access
= PL1_R
, .type
= ARM_CP_CONST
,
2013 .resetvalue
= cpu
->id_aa64isar0
},
2014 { .name
= "ID_AA64ISAR1_EL1", .state
= ARM_CP_STATE_AA64
,
2015 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 6, .opc2
= 1,
2016 .access
= PL1_R
, .type
= ARM_CP_CONST
,
2017 .resetvalue
= cpu
->id_aa64isar1
},
2018 { .name
= "ID_AA64MMFR0_EL1", .state
= ARM_CP_STATE_AA64
,
2019 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 0,
2020 .access
= PL1_R
, .type
= ARM_CP_CONST
,
2021 .resetvalue
= cpu
->id_aa64mmfr0
},
2022 { .name
= "ID_AA64MMFR1_EL1", .state
= ARM_CP_STATE_AA64
,
2023 .opc0
= 3, .opc1
= 0, .crn
= 0, .crm
= 7, .opc2
= 1,
2024 .access
= PL1_R
, .type
= ARM_CP_CONST
,
2025 .resetvalue
= cpu
->id_aa64mmfr1
},
2028 define_arm_cp_regs(cpu
, v8_idregs
);
2029 define_arm_cp_regs(cpu
, v8_cp_reginfo
);
2030 define_aarch64_debug_regs(cpu
);
2032 if (arm_feature(env
, ARM_FEATURE_MPU
)) {
2033 /* These are the MPU registers prior to PMSAv6. Any new
2034 * PMSA core later than the ARM946 will require that we
2035 * implement the PMSAv6 or PMSAv7 registers, which are
2036 * completely different.
2038 assert(!arm_feature(env
, ARM_FEATURE_V6
));
2039 define_arm_cp_regs(cpu
, pmsav5_cp_reginfo
);
2041 define_arm_cp_regs(cpu
, vmsa_cp_reginfo
);
2043 if (arm_feature(env
, ARM_FEATURE_THUMB2EE
)) {
2044 define_arm_cp_regs(cpu
, t2ee_cp_reginfo
);
2046 if (arm_feature(env
, ARM_FEATURE_GENERIC_TIMER
)) {
2047 define_arm_cp_regs(cpu
, generic_timer_cp_reginfo
);
2049 if (arm_feature(env
, ARM_FEATURE_VAPA
)) {
2050 define_arm_cp_regs(cpu
, vapa_cp_reginfo
);
2052 if (arm_feature(env
, ARM_FEATURE_CACHE_TEST_CLEAN
)) {
2053 define_arm_cp_regs(cpu
, cache_test_clean_cp_reginfo
);
2055 if (arm_feature(env
, ARM_FEATURE_CACHE_DIRTY_REG
)) {
2056 define_arm_cp_regs(cpu
, cache_dirty_status_cp_reginfo
);
2058 if (arm_feature(env
, ARM_FEATURE_CACHE_BLOCK_OPS
)) {
2059 define_arm_cp_regs(cpu
, cache_block_ops_cp_reginfo
);
2061 if (arm_feature(env
, ARM_FEATURE_OMAPCP
)) {
2062 define_arm_cp_regs(cpu
, omap_cp_reginfo
);
2064 if (arm_feature(env
, ARM_FEATURE_STRONGARM
)) {
2065 define_arm_cp_regs(cpu
, strongarm_cp_reginfo
);
2067 if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
2068 define_arm_cp_regs(cpu
, xscale_cp_reginfo
);
2070 if (arm_feature(env
, ARM_FEATURE_DUMMY_C15_REGS
)) {
2071 define_arm_cp_regs(cpu
, dummy_c15_cp_reginfo
);
2073 if (arm_feature(env
, ARM_FEATURE_LPAE
)) {
2074 define_arm_cp_regs(cpu
, lpae_cp_reginfo
);
2076 /* Slightly awkwardly, the OMAP and StrongARM cores need all of
2077 * cp15 crn=0 to be writes-ignored, whereas for other cores they should
2078 * be read-only (ie write causes UNDEF exception).
2081 ARMCPRegInfo id_cp_reginfo
[] = {
2082 /* Note that the MIDR isn't a simple constant register because
2083 * of the TI925 behaviour where writes to another register can
2084 * cause the MIDR value to change.
2086 * Unimplemented registers in the c15 0 0 0 space default to
2087 * MIDR. Define MIDR first as this entire space, then CTR, TCMTR
2088 * and friends override accordingly.
2091 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= CP_ANY
,
2092 .access
= PL1_R
, .resetvalue
= cpu
->midr
,
2093 .writefn
= arm_cp_write_ignore
, .raw_writefn
= raw_write
,
2094 .fieldoffset
= offsetof(CPUARMState
, cp15
.c0_cpuid
),
2095 .type
= ARM_CP_OVERRIDE
},
2096 { .name
= "MIDR_EL1", .state
= ARM_CP_STATE_AA64
,
2097 .opc0
= 3, .opc1
= 0, .opc2
= 0, .crn
= 0, .crm
= 0,
2098 .access
= PL1_R
, .resetvalue
= cpu
->midr
, .type
= ARM_CP_CONST
},
2100 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 1,
2101 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= cpu
->ctr
},
2102 { .name
= "CTR_EL0", .state
= ARM_CP_STATE_AA64
,
2103 .opc0
= 3, .opc1
= 3, .opc2
= 1, .crn
= 0, .crm
= 0,
2104 .access
= PL0_R
, .accessfn
= ctr_el0_access
,
2105 .type
= ARM_CP_CONST
, .resetvalue
= cpu
->ctr
},
2107 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 2,
2108 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
2110 .cp
= 15, .crn
= 0, .crm
= 0, .opc1
= 0, .opc2
= 3,
2111 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
2112 /* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */
2114 .cp
= 15, .crn
= 0, .crm
= 3, .opc1
= 0, .opc2
= CP_ANY
,
2115 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
2117 .cp
= 15, .crn
= 0, .crm
= 4, .opc1
= 0, .opc2
= CP_ANY
,
2118 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
2120 .cp
= 15, .crn
= 0, .crm
= 5, .opc1
= 0, .opc2
= CP_ANY
,
2121 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
2123 .cp
= 15, .crn
= 0, .crm
= 6, .opc1
= 0, .opc2
= CP_ANY
,
2124 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
2126 .cp
= 15, .crn
= 0, .crm
= 7, .opc1
= 0, .opc2
= CP_ANY
,
2127 .access
= PL1_R
, .type
= ARM_CP_CONST
, .resetvalue
= 0 },
2130 ARMCPRegInfo crn0_wi_reginfo
= {
2131 .name
= "CRN0_WI", .cp
= 15, .crn
= 0, .crm
= CP_ANY
,
2132 .opc1
= CP_ANY
, .opc2
= CP_ANY
, .access
= PL1_W
,
2133 .type
= ARM_CP_NOP
| ARM_CP_OVERRIDE
2135 if (arm_feature(env
, ARM_FEATURE_OMAPCP
) ||
2136 arm_feature(env
, ARM_FEATURE_STRONGARM
)) {
2138 /* Register the blanket "writes ignored" value first to cover the
2139 * whole space. Then update the specific ID registers to allow write
2140 * access, so that they ignore writes rather than causing them to
2143 define_one_arm_cp_reg(cpu
, &crn0_wi_reginfo
);
2144 for (r
= id_cp_reginfo
; r
->type
!= ARM_CP_SENTINEL
; r
++) {
2148 define_arm_cp_regs(cpu
, id_cp_reginfo
);
2151 if (arm_feature(env
, ARM_FEATURE_MPIDR
)) {
2152 define_arm_cp_regs(cpu
, mpidr_cp_reginfo
);
2155 if (arm_feature(env
, ARM_FEATURE_AUXCR
)) {
2156 ARMCPRegInfo auxcr
= {
2157 .name
= "AUXCR", .cp
= 15, .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 1,
2158 .access
= PL1_RW
, .type
= ARM_CP_CONST
,
2159 .resetvalue
= cpu
->reset_auxcr
2161 define_one_arm_cp_reg(cpu
, &auxcr
);
2164 if (arm_feature(env
, ARM_FEATURE_CBAR
)) {
2165 ARMCPRegInfo cbar
= {
2166 .name
= "CBAR", .cp
= 15, .crn
= 15, .crm
= 0, .opc1
= 4, .opc2
= 0,
2167 .access
= PL1_R
|PL3_W
, .resetvalue
= cpu
->reset_cbar
,
2168 .fieldoffset
= offsetof(CPUARMState
, cp15
.c15_config_base_address
)
2170 define_one_arm_cp_reg(cpu
, &cbar
);
2173 /* Generic registers whose values depend on the implementation */
2175 ARMCPRegInfo sctlr
= {
2176 .name
= "SCTLR", .state
= ARM_CP_STATE_BOTH
,
2177 .opc0
= 3, .crn
= 1, .crm
= 0, .opc1
= 0, .opc2
= 0,
2178 .access
= PL1_RW
, .fieldoffset
= offsetof(CPUARMState
, cp15
.c1_sys
),
2179 .writefn
= sctlr_write
, .resetvalue
= cpu
->reset_sctlr
,
2180 .raw_writefn
= raw_write
,
2182 if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
2183 /* Normally we would always end the TB on an SCTLR write, but Linux
2184 * arch/arm/mach-pxa/sleep.S expects two instructions following
2185 * an MMU enable to execute from cache. Imitate this behaviour.
2187 sctlr
.type
|= ARM_CP_SUPPRESS_TB_END
;
2189 define_one_arm_cp_reg(cpu
, &sctlr
);
2193 ARMCPU
*cpu_arm_init(const char *cpu_model
)
2195 return ARM_CPU(cpu_generic_init(TYPE_ARM_CPU
, cpu_model
));
2198 void arm_cpu_register_gdb_regs_for_features(ARMCPU
*cpu
)
2200 CPUState
*cs
= CPU(cpu
);
2201 CPUARMState
*env
= &cpu
->env
;
2203 if (arm_feature(env
, ARM_FEATURE_AARCH64
)) {
2204 gdb_register_coprocessor(cs
, aarch64_fpu_gdb_get_reg
,
2205 aarch64_fpu_gdb_set_reg
,
2206 34, "aarch64-fpu.xml", 0);
2207 } else if (arm_feature(env
, ARM_FEATURE_NEON
)) {
2208 gdb_register_coprocessor(cs
, vfp_gdb_get_reg
, vfp_gdb_set_reg
,
2209 51, "arm-neon.xml", 0);
2210 } else if (arm_feature(env
, ARM_FEATURE_VFP3
)) {
2211 gdb_register_coprocessor(cs
, vfp_gdb_get_reg
, vfp_gdb_set_reg
,
2212 35, "arm-vfp3.xml", 0);
2213 } else if (arm_feature(env
, ARM_FEATURE_VFP
)) {
2214 gdb_register_coprocessor(cs
, vfp_gdb_get_reg
, vfp_gdb_set_reg
,
2215 19, "arm-vfp.xml", 0);
2219 /* Sort alphabetically by type name, except for "any". */
2220 static gint
arm_cpu_list_compare(gconstpointer a
, gconstpointer b
)
2222 ObjectClass
*class_a
= (ObjectClass
*)a
;
2223 ObjectClass
*class_b
= (ObjectClass
*)b
;
2224 const char *name_a
, *name_b
;
2226 name_a
= object_class_get_name(class_a
);
2227 name_b
= object_class_get_name(class_b
);
2228 if (strcmp(name_a
, "any-" TYPE_ARM_CPU
) == 0) {
2230 } else if (strcmp(name_b
, "any-" TYPE_ARM_CPU
) == 0) {
2233 return strcmp(name_a
, name_b
);
2237 static void arm_cpu_list_entry(gpointer data
, gpointer user_data
)
2239 ObjectClass
*oc
= data
;
2240 CPUListState
*s
= user_data
;
2241 const char *typename
;
2244 typename
= object_class_get_name(oc
);
2245 name
= g_strndup(typename
, strlen(typename
) - strlen("-" TYPE_ARM_CPU
));
2246 (*s
->cpu_fprintf
)(s
->file
, " %s\n",
2251 void arm_cpu_list(FILE *f
, fprintf_function cpu_fprintf
)
2255 .cpu_fprintf
= cpu_fprintf
,
2259 list
= object_class_get_list(TYPE_ARM_CPU
, false);
2260 list
= g_slist_sort(list
, arm_cpu_list_compare
);
2261 (*cpu_fprintf
)(f
, "Available CPUs:\n");
2262 g_slist_foreach(list
, arm_cpu_list_entry
, &s
);
2265 /* The 'host' CPU type is dynamically registered only if KVM is
2266 * enabled, so we have to special-case it here:
2268 (*cpu_fprintf
)(f
, " host (only available in KVM mode)\n");
2272 static void arm_cpu_add_definition(gpointer data
, gpointer user_data
)
2274 ObjectClass
*oc
= data
;
2275 CpuDefinitionInfoList
**cpu_list
= user_data
;
2276 CpuDefinitionInfoList
*entry
;
2277 CpuDefinitionInfo
*info
;
2278 const char *typename
;
2280 typename
= object_class_get_name(oc
);
2281 info
= g_malloc0(sizeof(*info
));
2282 info
->name
= g_strndup(typename
,
2283 strlen(typename
) - strlen("-" TYPE_ARM_CPU
));
2285 entry
= g_malloc0(sizeof(*entry
));
2286 entry
->value
= info
;
2287 entry
->next
= *cpu_list
;
2291 CpuDefinitionInfoList
*arch_query_cpu_definitions(Error
**errp
)
2293 CpuDefinitionInfoList
*cpu_list
= NULL
;
2296 list
= object_class_get_list(TYPE_ARM_CPU
, false);
2297 g_slist_foreach(list
, arm_cpu_add_definition
, &cpu_list
);
2303 static void add_cpreg_to_hashtable(ARMCPU
*cpu
, const ARMCPRegInfo
*r
,
2304 void *opaque
, int state
,
2305 int crm
, int opc1
, int opc2
)
2307 /* Private utility function for define_one_arm_cp_reg_with_opaque():
2308 * add a single reginfo struct to the hash table.
2310 uint32_t *key
= g_new(uint32_t, 1);
2311 ARMCPRegInfo
*r2
= g_memdup(r
, sizeof(ARMCPRegInfo
));
2312 int is64
= (r
->type
& ARM_CP_64BIT
) ? 1 : 0;
2313 if (r
->state
== ARM_CP_STATE_BOTH
&& state
== ARM_CP_STATE_AA32
) {
2314 /* The AArch32 view of a shared register sees the lower 32 bits
2315 * of a 64 bit backing field. It is not migratable as the AArch64
2316 * view handles that. AArch64 also handles reset.
2317 * We assume it is a cp15 register.
2320 r2
->type
|= ARM_CP_NO_MIGRATE
;
2321 r2
->resetfn
= arm_cp_reset_ignore
;
2322 #ifdef HOST_WORDS_BIGENDIAN
2323 if (r2
->fieldoffset
) {
2324 r2
->fieldoffset
+= sizeof(uint32_t);
2328 if (state
== ARM_CP_STATE_AA64
) {
2329 /* To allow abbreviation of ARMCPRegInfo
2330 * definitions, we treat cp == 0 as equivalent to
2331 * the value for "standard guest-visible sysreg".
2334 r2
->cp
= CP_REG_ARM64_SYSREG_CP
;
2336 *key
= ENCODE_AA64_CP_REG(r2
->cp
, r2
->crn
, crm
,
2337 r2
->opc0
, opc1
, opc2
);
2339 *key
= ENCODE_CP_REG(r2
->cp
, is64
, r2
->crn
, crm
, opc1
, opc2
);
2342 r2
->opaque
= opaque
;
2344 /* reginfo passed to helpers is correct for the actual access,
2345 * and is never ARM_CP_STATE_BOTH:
2348 /* Make sure reginfo passed to helpers for wildcarded regs
2349 * has the correct crm/opc1/opc2 for this reg, not CP_ANY:
2354 /* By convention, for wildcarded registers only the first
2355 * entry is used for migration; the others are marked as
2356 * NO_MIGRATE so we don't try to transfer the register
2357 * multiple times. Special registers (ie NOP/WFI) are
2360 if ((r
->type
& ARM_CP_SPECIAL
) ||
2361 ((r
->crm
== CP_ANY
) && crm
!= 0) ||
2362 ((r
->opc1
== CP_ANY
) && opc1
!= 0) ||
2363 ((r
->opc2
== CP_ANY
) && opc2
!= 0)) {
2364 r2
->type
|= ARM_CP_NO_MIGRATE
;
2367 /* Overriding of an existing definition must be explicitly
2370 if (!(r
->type
& ARM_CP_OVERRIDE
)) {
2371 ARMCPRegInfo
*oldreg
;
2372 oldreg
= g_hash_table_lookup(cpu
->cp_regs
, key
);
2373 if (oldreg
&& !(oldreg
->type
& ARM_CP_OVERRIDE
)) {
2374 fprintf(stderr
, "Register redefined: cp=%d %d bit "
2375 "crn=%d crm=%d opc1=%d opc2=%d, "
2376 "was %s, now %s\n", r2
->cp
, 32 + 32 * is64
,
2377 r2
->crn
, r2
->crm
, r2
->opc1
, r2
->opc2
,
2378 oldreg
->name
, r2
->name
);
2379 g_assert_not_reached();
2382 g_hash_table_insert(cpu
->cp_regs
, key
, r2
);
2386 void define_one_arm_cp_reg_with_opaque(ARMCPU
*cpu
,
2387 const ARMCPRegInfo
*r
, void *opaque
)
2389 /* Define implementations of coprocessor registers.
2390 * We store these in a hashtable because typically
2391 * there are less than 150 registers in a space which
2392 * is 16*16*16*8*8 = 262144 in size.
2393 * Wildcarding is supported for the crm, opc1 and opc2 fields.
2394 * If a register is defined twice then the second definition is
2395 * used, so this can be used to define some generic registers and
2396 * then override them with implementation specific variations.
2397 * At least one of the original and the second definition should
2398 * include ARM_CP_OVERRIDE in its type bits -- this is just a guard
2399 * against accidental use.
2401 * The state field defines whether the register is to be
2402 * visible in the AArch32 or AArch64 execution state. If the
2403 * state is set to ARM_CP_STATE_BOTH then we synthesise a
2404 * reginfo structure for the AArch32 view, which sees the lower
2405 * 32 bits of the 64 bit register.
2407 * Only registers visible in AArch64 may set r->opc0; opc0 cannot
2408 * be wildcarded. AArch64 registers are always considered to be 64
2409 * bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of
2410 * the register, if any.
2412 int crm
, opc1
, opc2
, state
;
2413 int crmmin
= (r
->crm
== CP_ANY
) ? 0 : r
->crm
;
2414 int crmmax
= (r
->crm
== CP_ANY
) ? 15 : r
->crm
;
2415 int opc1min
= (r
->opc1
== CP_ANY
) ? 0 : r
->opc1
;
2416 int opc1max
= (r
->opc1
== CP_ANY
) ? 7 : r
->opc1
;
2417 int opc2min
= (r
->opc2
== CP_ANY
) ? 0 : r
->opc2
;
2418 int opc2max
= (r
->opc2
== CP_ANY
) ? 7 : r
->opc2
;
2419 /* 64 bit registers have only CRm and Opc1 fields */
2420 assert(!((r
->type
& ARM_CP_64BIT
) && (r
->opc2
|| r
->crn
)));
2421 /* op0 only exists in the AArch64 encodings */
2422 assert((r
->state
!= ARM_CP_STATE_AA32
) || (r
->opc0
== 0));
2423 /* AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless */
2424 assert((r
->state
!= ARM_CP_STATE_AA64
) || !(r
->type
& ARM_CP_64BIT
));
2425 /* The AArch64 pseudocode CheckSystemAccess() specifies that op1
2426 * encodes a minimum access level for the register. We roll this
2427 * runtime check into our general permission check code, so check
2428 * here that the reginfo's specified permissions are strict enough
2429 * to encompass the generic architectural permission check.
2431 if (r
->state
!= ARM_CP_STATE_AA32
) {
2434 case 0: case 1: case 2:
2447 /* unallocated encoding, so not possible */
2455 /* min_EL EL1, secure mode only (we don't check the latter) */
2459 /* broken reginfo with out-of-range opc1 */
2463 /* assert our permissions are not too lax (stricter is fine) */
2464 assert((r
->access
& ~mask
) == 0);
2467 /* Check that the register definition has enough info to handle
2468 * reads and writes if they are permitted.
2470 if (!(r
->type
& (ARM_CP_SPECIAL
|ARM_CP_CONST
))) {
2471 if (r
->access
& PL3_R
) {
2472 assert(r
->fieldoffset
|| r
->readfn
);
2474 if (r
->access
& PL3_W
) {
2475 assert(r
->fieldoffset
|| r
->writefn
);
2478 /* Bad type field probably means missing sentinel at end of reg list */
2479 assert(cptype_valid(r
->type
));
2480 for (crm
= crmmin
; crm
<= crmmax
; crm
++) {
2481 for (opc1
= opc1min
; opc1
<= opc1max
; opc1
++) {
2482 for (opc2
= opc2min
; opc2
<= opc2max
; opc2
++) {
2483 for (state
= ARM_CP_STATE_AA32
;
2484 state
<= ARM_CP_STATE_AA64
; state
++) {
2485 if (r
->state
!= state
&& r
->state
!= ARM_CP_STATE_BOTH
) {
2488 add_cpreg_to_hashtable(cpu
, r
, opaque
, state
,
2496 void define_arm_cp_regs_with_opaque(ARMCPU
*cpu
,
2497 const ARMCPRegInfo
*regs
, void *opaque
)
2499 /* Define a whole list of registers */
2500 const ARMCPRegInfo
*r
;
2501 for (r
= regs
; r
->type
!= ARM_CP_SENTINEL
; r
++) {
2502 define_one_arm_cp_reg_with_opaque(cpu
, r
, opaque
);
2506 const ARMCPRegInfo
*get_arm_cp_reginfo(GHashTable
*cpregs
, uint32_t encoded_cp
)
2508 return g_hash_table_lookup(cpregs
, &encoded_cp
);
2511 void arm_cp_write_ignore(CPUARMState
*env
, const ARMCPRegInfo
*ri
,
2514 /* Helper coprocessor write function for write-ignore registers */
2517 uint64_t arm_cp_read_zero(CPUARMState
*env
, const ARMCPRegInfo
*ri
)
2519 /* Helper coprocessor write function for read-as-zero registers */
2523 void arm_cp_reset_ignore(CPUARMState
*env
, const ARMCPRegInfo
*opaque
)
2525 /* Helper coprocessor reset function for do-nothing-on-reset registers */
2528 static int bad_mode_switch(CPUARMState
*env
, int mode
)
2530 /* Return true if it is not valid for us to switch to
2531 * this CPU mode (ie all the UNPREDICTABLE cases in
2532 * the ARM ARM CPSRWriteByInstr pseudocode).
2535 case ARM_CPU_MODE_USR
:
2536 case ARM_CPU_MODE_SYS
:
2537 case ARM_CPU_MODE_SVC
:
2538 case ARM_CPU_MODE_ABT
:
2539 case ARM_CPU_MODE_UND
:
2540 case ARM_CPU_MODE_IRQ
:
2541 case ARM_CPU_MODE_FIQ
:
2548 uint32_t cpsr_read(CPUARMState
*env
)
2551 ZF
= (env
->ZF
== 0);
2552 return env
->uncached_cpsr
| (env
->NF
& 0x80000000) | (ZF
<< 30) |
2553 (env
->CF
<< 29) | ((env
->VF
& 0x80000000) >> 3) | (env
->QF
<< 27)
2554 | (env
->thumb
<< 5) | ((env
->condexec_bits
& 3) << 25)
2555 | ((env
->condexec_bits
& 0xfc) << 8)
2556 | (env
->GE
<< 16) | (env
->daif
& CPSR_AIF
);
2559 void cpsr_write(CPUARMState
*env
, uint32_t val
, uint32_t mask
)
2561 if (mask
& CPSR_NZCV
) {
2562 env
->ZF
= (~val
) & CPSR_Z
;
2564 env
->CF
= (val
>> 29) & 1;
2565 env
->VF
= (val
<< 3) & 0x80000000;
2568 env
->QF
= ((val
& CPSR_Q
) != 0);
2570 env
->thumb
= ((val
& CPSR_T
) != 0);
2571 if (mask
& CPSR_IT_0_1
) {
2572 env
->condexec_bits
&= ~3;
2573 env
->condexec_bits
|= (val
>> 25) & 3;
2575 if (mask
& CPSR_IT_2_7
) {
2576 env
->condexec_bits
&= 3;
2577 env
->condexec_bits
|= (val
>> 8) & 0xfc;
2579 if (mask
& CPSR_GE
) {
2580 env
->GE
= (val
>> 16) & 0xf;
2583 env
->daif
&= ~(CPSR_AIF
& mask
);
2584 env
->daif
|= val
& CPSR_AIF
& mask
;
2586 if ((env
->uncached_cpsr
^ val
) & mask
& CPSR_M
) {
2587 if (bad_mode_switch(env
, val
& CPSR_M
)) {
2588 /* Attempt to switch to an invalid mode: this is UNPREDICTABLE.
2589 * We choose to ignore the attempt and leave the CPSR M field
2594 switch_mode(env
, val
& CPSR_M
);
2597 mask
&= ~CACHED_CPSR_BITS
;
2598 env
->uncached_cpsr
= (env
->uncached_cpsr
& ~mask
) | (val
& mask
);
2601 /* Sign/zero extend */
2602 uint32_t HELPER(sxtb16
)(uint32_t x
)
2605 res
= (uint16_t)(int8_t)x
;
2606 res
|= (uint32_t)(int8_t)(x
>> 16) << 16;
2610 uint32_t HELPER(uxtb16
)(uint32_t x
)
2613 res
= (uint16_t)(uint8_t)x
;
2614 res
|= (uint32_t)(uint8_t)(x
>> 16) << 16;
2618 uint32_t HELPER(clz
)(uint32_t x
)
2623 int32_t HELPER(sdiv
)(int32_t num
, int32_t den
)
2627 if (num
== INT_MIN
&& den
== -1)
2632 uint32_t HELPER(udiv
)(uint32_t num
, uint32_t den
)
2639 uint32_t HELPER(rbit
)(uint32_t x
)
2641 x
= ((x
& 0xff000000) >> 24)
2642 | ((x
& 0x00ff0000) >> 8)
2643 | ((x
& 0x0000ff00) << 8)
2644 | ((x
& 0x000000ff) << 24);
2645 x
= ((x
& 0xf0f0f0f0) >> 4)
2646 | ((x
& 0x0f0f0f0f) << 4);
2647 x
= ((x
& 0x88888888) >> 3)
2648 | ((x
& 0x44444444) >> 1)
2649 | ((x
& 0x22222222) << 1)
2650 | ((x
& 0x11111111) << 3);
2654 #if defined(CONFIG_USER_ONLY)
2656 void arm_cpu_do_interrupt(CPUState
*cs
)
2658 cs
->exception_index
= -1;
2661 int arm_cpu_handle_mmu_fault(CPUState
*cs
, vaddr address
, int rw
,
2664 ARMCPU
*cpu
= ARM_CPU(cs
);
2665 CPUARMState
*env
= &cpu
->env
;
2668 cs
->exception_index
= EXCP_PREFETCH_ABORT
;
2669 env
->cp15
.c6_insn
= address
;
2671 cs
->exception_index
= EXCP_DATA_ABORT
;
2672 env
->cp15
.c6_data
= address
;
2677 /* These should probably raise undefined insn exceptions. */
2678 void HELPER(v7m_msr
)(CPUARMState
*env
, uint32_t reg
, uint32_t val
)
2680 ARMCPU
*cpu
= arm_env_get_cpu(env
);
2682 cpu_abort(CPU(cpu
), "v7m_msr %d\n", reg
);
2685 uint32_t HELPER(v7m_mrs
)(CPUARMState
*env
, uint32_t reg
)
2687 ARMCPU
*cpu
= arm_env_get_cpu(env
);
2689 cpu_abort(CPU(cpu
), "v7m_mrs %d\n", reg
);
2693 void switch_mode(CPUARMState
*env
, int mode
)
2695 ARMCPU
*cpu
= arm_env_get_cpu(env
);
2697 if (mode
!= ARM_CPU_MODE_USR
) {
2698 cpu_abort(CPU(cpu
), "Tried to switch out of user mode\n");
2702 void HELPER(set_r13_banked
)(CPUARMState
*env
, uint32_t mode
, uint32_t val
)
2704 ARMCPU
*cpu
= arm_env_get_cpu(env
);
2706 cpu_abort(CPU(cpu
), "banked r13 write\n");
2709 uint32_t HELPER(get_r13_banked
)(CPUARMState
*env
, uint32_t mode
)
2711 ARMCPU
*cpu
= arm_env_get_cpu(env
);
2713 cpu_abort(CPU(cpu
), "banked r13 read\n");
2719 /* Map CPU modes onto saved register banks. */
2720 int bank_number(int mode
)
2723 case ARM_CPU_MODE_USR
:
2724 case ARM_CPU_MODE_SYS
:
2726 case ARM_CPU_MODE_SVC
:
2728 case ARM_CPU_MODE_ABT
:
2730 case ARM_CPU_MODE_UND
:
2732 case ARM_CPU_MODE_IRQ
:
2734 case ARM_CPU_MODE_FIQ
:
2737 hw_error("bank number requested for bad CPSR mode value 0x%x\n", mode
);
2740 void switch_mode(CPUARMState
*env
, int mode
)
2745 old_mode
= env
->uncached_cpsr
& CPSR_M
;
2746 if (mode
== old_mode
)
2749 if (old_mode
== ARM_CPU_MODE_FIQ
) {
2750 memcpy (env
->fiq_regs
, env
->regs
+ 8, 5 * sizeof(uint32_t));
2751 memcpy (env
->regs
+ 8, env
->usr_regs
, 5 * sizeof(uint32_t));
2752 } else if (mode
== ARM_CPU_MODE_FIQ
) {
2753 memcpy (env
->usr_regs
, env
->regs
+ 8, 5 * sizeof(uint32_t));
2754 memcpy (env
->regs
+ 8, env
->fiq_regs
, 5 * sizeof(uint32_t));
2757 i
= bank_number(old_mode
);
2758 env
->banked_r13
[i
] = env
->regs
[13];
2759 env
->banked_r14
[i
] = env
->regs
[14];
2760 env
->banked_spsr
[i
] = env
->spsr
;
2762 i
= bank_number(mode
);
2763 env
->regs
[13] = env
->banked_r13
[i
];
2764 env
->regs
[14] = env
->banked_r14
[i
];
2765 env
->spsr
= env
->banked_spsr
[i
];
2768 static void v7m_push(CPUARMState
*env
, uint32_t val
)
2770 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
2773 stl_phys(cs
->as
, env
->regs
[13], val
);
2776 static uint32_t v7m_pop(CPUARMState
*env
)
2778 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
2781 val
= ldl_phys(cs
->as
, env
->regs
[13]);
2786 /* Switch to V7M main or process stack pointer. */
2787 static void switch_v7m_sp(CPUARMState
*env
, int process
)
2790 if (env
->v7m
.current_sp
!= process
) {
2791 tmp
= env
->v7m
.other_sp
;
2792 env
->v7m
.other_sp
= env
->regs
[13];
2793 env
->regs
[13] = tmp
;
2794 env
->v7m
.current_sp
= process
;
2798 static void do_v7m_exception_exit(CPUARMState
*env
)
2803 type
= env
->regs
[15];
2804 if (env
->v7m
.exception
!= 0)
2805 armv7m_nvic_complete_irq(env
->nvic
, env
->v7m
.exception
);
2807 /* Switch to the target stack. */
2808 switch_v7m_sp(env
, (type
& 4) != 0);
2809 /* Pop registers. */
2810 env
->regs
[0] = v7m_pop(env
);
2811 env
->regs
[1] = v7m_pop(env
);
2812 env
->regs
[2] = v7m_pop(env
);
2813 env
->regs
[3] = v7m_pop(env
);
2814 env
->regs
[12] = v7m_pop(env
);
2815 env
->regs
[14] = v7m_pop(env
);
2816 env
->regs
[15] = v7m_pop(env
);
2817 xpsr
= v7m_pop(env
);
2818 xpsr_write(env
, xpsr
, 0xfffffdff);
2819 /* Undo stack alignment. */
2822 /* ??? The exception return type specifies Thread/Handler mode. However
2823 this is also implied by the xPSR value. Not sure what to do
2824 if there is a mismatch. */
2825 /* ??? Likewise for mismatches between the CONTROL register and the stack
2829 /* Exception names for debug logging; note that not all of these
2830 * precisely correspond to architectural exceptions.
2832 static const char * const excnames
[] = {
2833 [EXCP_UDEF
] = "Undefined Instruction",
2835 [EXCP_PREFETCH_ABORT
] = "Prefetch Abort",
2836 [EXCP_DATA_ABORT
] = "Data Abort",
2839 [EXCP_BKPT
] = "Breakpoint",
2840 [EXCP_EXCEPTION_EXIT
] = "QEMU v7M exception exit",
2841 [EXCP_KERNEL_TRAP
] = "QEMU intercept of kernel commpage",
2842 [EXCP_STREX
] = "QEMU intercept of STREX",
2845 static inline void arm_log_exception(int idx
)
2847 if (qemu_loglevel_mask(CPU_LOG_INT
)) {
2848 const char *exc
= NULL
;
2850 if (idx
>= 0 && idx
< ARRAY_SIZE(excnames
)) {
2851 exc
= excnames
[idx
];
2856 qemu_log_mask(CPU_LOG_INT
, "Taking exception %d [%s]\n", idx
, exc
);
2860 void arm_v7m_cpu_do_interrupt(CPUState
*cs
)
2862 ARMCPU
*cpu
= ARM_CPU(cs
);
2863 CPUARMState
*env
= &cpu
->env
;
2864 uint32_t xpsr
= xpsr_read(env
);
2868 arm_log_exception(cs
->exception_index
);
2871 if (env
->v7m
.current_sp
)
2873 if (env
->v7m
.exception
== 0)
2876 /* For exceptions we just mark as pending on the NVIC, and let that
2878 /* TODO: Need to escalate if the current priority is higher than the
2879 one we're raising. */
2880 switch (cs
->exception_index
) {
2882 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
);
2885 /* The PC already points to the next instruction. */
2886 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SVC
);
2888 case EXCP_PREFETCH_ABORT
:
2889 case EXCP_DATA_ABORT
:
2890 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_MEM
);
2893 if (semihosting_enabled
) {
2895 nr
= arm_lduw_code(env
, env
->regs
[15], env
->bswap_code
) & 0xff;
2898 env
->regs
[0] = do_arm_semihosting(env
);
2899 qemu_log_mask(CPU_LOG_INT
, "...handled as semihosting call\n");
2903 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_DEBUG
);
2906 env
->v7m
.exception
= armv7m_nvic_acknowledge_irq(env
->nvic
);
2908 case EXCP_EXCEPTION_EXIT
:
2909 do_v7m_exception_exit(env
);
2912 cpu_abort(cs
, "Unhandled exception 0x%x\n", cs
->exception_index
);
2913 return; /* Never happens. Keep compiler happy. */
2916 /* Align stack pointer. */
2917 /* ??? Should only do this if Configuration Control Register
2918 STACKALIGN bit is set. */
2919 if (env
->regs
[13] & 4) {
2923 /* Switch to the handler mode. */
2924 v7m_push(env
, xpsr
);
2925 v7m_push(env
, env
->regs
[15]);
2926 v7m_push(env
, env
->regs
[14]);
2927 v7m_push(env
, env
->regs
[12]);
2928 v7m_push(env
, env
->regs
[3]);
2929 v7m_push(env
, env
->regs
[2]);
2930 v7m_push(env
, env
->regs
[1]);
2931 v7m_push(env
, env
->regs
[0]);
2932 switch_v7m_sp(env
, 0);
2934 env
->condexec_bits
= 0;
2936 addr
= ldl_phys(cs
->as
, env
->v7m
.vecbase
+ env
->v7m
.exception
* 4);
2937 env
->regs
[15] = addr
& 0xfffffffe;
2938 env
->thumb
= addr
& 1;
2941 /* Handle a CPU exception. */
2942 void arm_cpu_do_interrupt(CPUState
*cs
)
2944 ARMCPU
*cpu
= ARM_CPU(cs
);
2945 CPUARMState
*env
= &cpu
->env
;
2953 arm_log_exception(cs
->exception_index
);
2955 /* TODO: Vectored interrupt controller. */
2956 switch (cs
->exception_index
) {
2958 new_mode
= ARM_CPU_MODE_UND
;
2967 if (semihosting_enabled
) {
2968 /* Check for semihosting interrupt. */
2970 mask
= arm_lduw_code(env
, env
->regs
[15] - 2, env
->bswap_code
)
2973 mask
= arm_ldl_code(env
, env
->regs
[15] - 4, env
->bswap_code
)
2976 /* Only intercept calls from privileged modes, to provide some
2977 semblance of security. */
2978 if (((mask
== 0x123456 && !env
->thumb
)
2979 || (mask
== 0xab && env
->thumb
))
2980 && (env
->uncached_cpsr
& CPSR_M
) != ARM_CPU_MODE_USR
) {
2981 env
->regs
[0] = do_arm_semihosting(env
);
2982 qemu_log_mask(CPU_LOG_INT
, "...handled as semihosting call\n");
2986 new_mode
= ARM_CPU_MODE_SVC
;
2989 /* The PC already points to the next instruction. */
2993 /* See if this is a semihosting syscall. */
2994 if (env
->thumb
&& semihosting_enabled
) {
2995 mask
= arm_lduw_code(env
, env
->regs
[15], env
->bswap_code
) & 0xff;
2997 && (env
->uncached_cpsr
& CPSR_M
) != ARM_CPU_MODE_USR
) {
2999 env
->regs
[0] = do_arm_semihosting(env
);
3000 qemu_log_mask(CPU_LOG_INT
, "...handled as semihosting call\n");
3004 env
->cp15
.c5_insn
= 2;
3005 /* Fall through to prefetch abort. */
3006 case EXCP_PREFETCH_ABORT
:
3007 qemu_log_mask(CPU_LOG_INT
, "...with IFSR 0x%x IFAR 0x%x\n",
3008 env
->cp15
.c5_insn
, env
->cp15
.c6_insn
);
3009 new_mode
= ARM_CPU_MODE_ABT
;
3011 mask
= CPSR_A
| CPSR_I
;
3014 case EXCP_DATA_ABORT
:
3015 qemu_log_mask(CPU_LOG_INT
, "...with DFSR 0x%x DFAR 0x%x\n",
3016 env
->cp15
.c5_data
, env
->cp15
.c6_data
);
3017 new_mode
= ARM_CPU_MODE_ABT
;
3019 mask
= CPSR_A
| CPSR_I
;
3023 new_mode
= ARM_CPU_MODE_IRQ
;
3025 /* Disable IRQ and imprecise data aborts. */
3026 mask
= CPSR_A
| CPSR_I
;
3030 new_mode
= ARM_CPU_MODE_FIQ
;
3032 /* Disable FIQ, IRQ and imprecise data aborts. */
3033 mask
= CPSR_A
| CPSR_I
| CPSR_F
;
3037 cpu_abort(cs
, "Unhandled exception 0x%x\n", cs
->exception_index
);
3038 return; /* Never happens. Keep compiler happy. */
3041 if (env
->cp15
.c1_sys
& SCTLR_V
) {
3042 /* when enabled, base address cannot be remapped. */
3045 /* ARM v7 architectures provide a vector base address register to remap
3046 * the interrupt vector table.
3047 * This register is only followed in non-monitor mode, and has a secure
3048 * and un-secure copy. Since the cpu is always in a un-secure operation
3049 * and is never in monitor mode this feature is always active.
3050 * Note: only bits 31:5 are valid.
3052 addr
+= env
->cp15
.c12_vbar
;
3054 switch_mode (env
, new_mode
);
3055 env
->spsr
= cpsr_read(env
);
3056 /* Clear IT bits. */
3057 env
->condexec_bits
= 0;
3058 /* Switch to the new mode, and to the correct instruction set. */
3059 env
->uncached_cpsr
= (env
->uncached_cpsr
& ~CPSR_M
) | new_mode
;
3061 /* this is a lie, as the was no c1_sys on V4T/V5, but who cares
3062 * and we should just guard the thumb mode on V4 */
3063 if (arm_feature(env
, ARM_FEATURE_V4T
)) {
3064 env
->thumb
= (env
->cp15
.c1_sys
& SCTLR_TE
) != 0;
3066 env
->regs
[14] = env
->regs
[15] + offset
;
3067 env
->regs
[15] = addr
;
3068 cs
->interrupt_request
|= CPU_INTERRUPT_EXITTB
;
3071 /* Check section/page access permissions.
3072 Returns the page protection flags, or zero if the access is not
3074 static inline int check_ap(CPUARMState
*env
, int ap
, int domain_prot
,
3075 int access_type
, int is_user
)
3079 if (domain_prot
== 3) {
3080 return PAGE_READ
| PAGE_WRITE
;
3083 if (access_type
== 1)
3086 prot_ro
= PAGE_READ
;
3090 if (arm_feature(env
, ARM_FEATURE_V7
)) {
3093 if (access_type
== 1)
3095 switch (env
->cp15
.c1_sys
& (SCTLR_S
| SCTLR_R
)) {
3097 return is_user
? 0 : PAGE_READ
;
3104 return is_user
? 0 : PAGE_READ
| PAGE_WRITE
;
3109 return PAGE_READ
| PAGE_WRITE
;
3111 return PAGE_READ
| PAGE_WRITE
;
3112 case 4: /* Reserved. */
3115 return is_user
? 0 : prot_ro
;
3119 if (!arm_feature (env
, ARM_FEATURE_V6K
))
3127 static uint32_t get_level1_table_address(CPUARMState
*env
, uint32_t address
)
3131 if (address
& env
->cp15
.c2_mask
)
3132 table
= env
->cp15
.ttbr1_el1
& 0xffffc000;
3134 table
= env
->cp15
.ttbr0_el1
& env
->cp15
.c2_base_mask
;
3136 table
|= (address
>> 18) & 0x3ffc;
3140 static int get_phys_addr_v5(CPUARMState
*env
, uint32_t address
, int access_type
,
3141 int is_user
, hwaddr
*phys_ptr
,
3142 int *prot
, target_ulong
*page_size
)
3144 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
3154 /* Pagetable walk. */
3155 /* Lookup l1 descriptor. */
3156 table
= get_level1_table_address(env
, address
);
3157 desc
= ldl_phys(cs
->as
, table
);
3159 domain
= (desc
>> 5) & 0x0f;
3160 domain_prot
= (env
->cp15
.c3
>> (domain
* 2)) & 3;
3162 /* Section translation fault. */
3166 if (domain_prot
== 0 || domain_prot
== 2) {
3168 code
= 9; /* Section domain fault. */
3170 code
= 11; /* Page domain fault. */
3175 phys_addr
= (desc
& 0xfff00000) | (address
& 0x000fffff);
3176 ap
= (desc
>> 10) & 3;
3178 *page_size
= 1024 * 1024;
3180 /* Lookup l2 entry. */
3182 /* Coarse pagetable. */
3183 table
= (desc
& 0xfffffc00) | ((address
>> 10) & 0x3fc);
3185 /* Fine pagetable. */
3186 table
= (desc
& 0xfffff000) | ((address
>> 8) & 0xffc);
3188 desc
= ldl_phys(cs
->as
, table
);
3190 case 0: /* Page translation fault. */
3193 case 1: /* 64k page. */
3194 phys_addr
= (desc
& 0xffff0000) | (address
& 0xffff);
3195 ap
= (desc
>> (4 + ((address
>> 13) & 6))) & 3;
3196 *page_size
= 0x10000;
3198 case 2: /* 4k page. */
3199 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
3200 ap
= (desc
>> (4 + ((address
>> 9) & 6))) & 3;
3201 *page_size
= 0x1000;
3203 case 3: /* 1k page. */
3205 if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
3206 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
3208 /* Page translation fault. */
3213 phys_addr
= (desc
& 0xfffffc00) | (address
& 0x3ff);
3215 ap
= (desc
>> 4) & 3;
3219 /* Never happens, but compiler isn't smart enough to tell. */
3224 *prot
= check_ap(env
, ap
, domain_prot
, access_type
, is_user
);
3226 /* Access permission fault. */
3230 *phys_ptr
= phys_addr
;
3233 return code
| (domain
<< 4);
3236 static int get_phys_addr_v6(CPUARMState
*env
, uint32_t address
, int access_type
,
3237 int is_user
, hwaddr
*phys_ptr
,
3238 int *prot
, target_ulong
*page_size
)
3240 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
3252 /* Pagetable walk. */
3253 /* Lookup l1 descriptor. */
3254 table
= get_level1_table_address(env
, address
);
3255 desc
= ldl_phys(cs
->as
, table
);
3257 if (type
== 0 || (type
== 3 && !arm_feature(env
, ARM_FEATURE_PXN
))) {
3258 /* Section translation fault, or attempt to use the encoding
3259 * which is Reserved on implementations without PXN.
3264 if ((type
== 1) || !(desc
& (1 << 18))) {
3265 /* Page or Section. */
3266 domain
= (desc
>> 5) & 0x0f;
3268 domain_prot
= (env
->cp15
.c3
>> (domain
* 2)) & 3;
3269 if (domain_prot
== 0 || domain_prot
== 2) {
3271 code
= 9; /* Section domain fault. */
3273 code
= 11; /* Page domain fault. */
3278 if (desc
& (1 << 18)) {
3280 phys_addr
= (desc
& 0xff000000) | (address
& 0x00ffffff);
3281 *page_size
= 0x1000000;
3284 phys_addr
= (desc
& 0xfff00000) | (address
& 0x000fffff);
3285 *page_size
= 0x100000;
3287 ap
= ((desc
>> 10) & 3) | ((desc
>> 13) & 4);
3288 xn
= desc
& (1 << 4);
3292 if (arm_feature(env
, ARM_FEATURE_PXN
)) {
3293 pxn
= (desc
>> 2) & 1;
3295 /* Lookup l2 entry. */
3296 table
= (desc
& 0xfffffc00) | ((address
>> 10) & 0x3fc);
3297 desc
= ldl_phys(cs
->as
, table
);
3298 ap
= ((desc
>> 4) & 3) | ((desc
>> 7) & 4);
3300 case 0: /* Page translation fault. */
3303 case 1: /* 64k page. */
3304 phys_addr
= (desc
& 0xffff0000) | (address
& 0xffff);
3305 xn
= desc
& (1 << 15);
3306 *page_size
= 0x10000;
3308 case 2: case 3: /* 4k page. */
3309 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
3311 *page_size
= 0x1000;
3314 /* Never happens, but compiler isn't smart enough to tell. */
3319 if (domain_prot
== 3) {
3320 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
3322 if (pxn
&& !is_user
) {
3325 if (xn
&& access_type
== 2)
3328 /* The simplified model uses AP[0] as an access control bit. */
3329 if ((env
->cp15
.c1_sys
& SCTLR_AFE
) && (ap
& 1) == 0) {
3330 /* Access flag fault. */
3331 code
= (code
== 15) ? 6 : 3;
3334 *prot
= check_ap(env
, ap
, domain_prot
, access_type
, is_user
);
3336 /* Access permission fault. */
3343 *phys_ptr
= phys_addr
;
3346 return code
| (domain
<< 4);
3349 /* Fault type for long-descriptor MMU fault reporting; this corresponds
3350 * to bits [5..2] in the STATUS field in long-format DFSR/IFSR.
3353 translation_fault
= 1,
3355 permission_fault
= 3,
3358 static int get_phys_addr_lpae(CPUARMState
*env
, uint32_t address
,
3359 int access_type
, int is_user
,
3360 hwaddr
*phys_ptr
, int *prot
,
3361 target_ulong
*page_size_ptr
)
3363 CPUState
*cs
= CPU(arm_env_get_cpu(env
));
3364 /* Read an LPAE long-descriptor translation table. */
3365 MMUFaultType fault_type
= translation_fault
;
3373 uint32_t tableattrs
;
3374 target_ulong page_size
;
3377 /* Determine whether this address is in the region controlled by
3378 * TTBR0 or TTBR1 (or if it is in neither region and should fault).
3379 * This is a Non-secure PL0/1 stage 1 translation, so controlled by
3380 * TTBCR/TTBR0/TTBR1 in accordance with ARM ARM DDI0406C table B-32:
3382 uint32_t t0sz
= extract32(env
->cp15
.c2_control
, 0, 3);
3383 uint32_t t1sz
= extract32(env
->cp15
.c2_control
, 16, 3);
3384 if (t0sz
&& !extract32(address
, 32 - t0sz
, t0sz
)) {
3385 /* there is a ttbr0 region and we are in it (high bits all zero) */
3387 } else if (t1sz
&& !extract32(~address
, 32 - t1sz
, t1sz
)) {
3388 /* there is a ttbr1 region and we are in it (high bits all one) */
3391 /* ttbr0 region is "everything not in the ttbr1 region" */
3394 /* ttbr1 region is "everything not in the ttbr0 region" */
3397 /* in the gap between the two regions, this is a Translation fault */
3398 fault_type
= translation_fault
;
3402 /* Note that QEMU ignores shareability and cacheability attributes,
3403 * so we don't need to do anything with the SH, ORGN, IRGN fields
3404 * in the TTBCR. Similarly, TTBCR:A1 selects whether we get the
3405 * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently
3406 * implement any ASID-like capability so we can ignore it (instead
3407 * we will always flush the TLB any time the ASID is changed).
3409 if (ttbr_select
== 0) {
3410 ttbr
= env
->cp15
.ttbr0_el1
;
3411 epd
= extract32(env
->cp15
.c2_control
, 7, 1);
3414 ttbr
= env
->cp15
.ttbr1_el1
;
3415 epd
= extract32(env
->cp15
.c2_control
, 23, 1);
3420 /* Translation table walk disabled => Translation fault on TLB miss */
3424 /* If the region is small enough we will skip straight to a 2nd level
3425 * lookup. This affects the number of bits of the address used in
3426 * combination with the TTBR to find the first descriptor. ('n' here
3427 * matches the usage in the ARM ARM sB3.6.6, where bits [39..n] are
3428 * from the TTBR, [n-1..3] from the vaddr, and [2..0] always zero).
3437 /* Clear the vaddr bits which aren't part of the within-region address,
3438 * so that we don't have to special case things when calculating the
3439 * first descriptor address.
3441 address
&= (0xffffffffU
>> tsz
);
3443 /* Now we can extract the actual base address from the TTBR */
3444 descaddr
= extract64(ttbr
, 0, 40);
3445 descaddr
&= ~((1ULL << n
) - 1);
3449 uint64_t descriptor
;
3451 descaddr
|= ((address
>> (9 * (4 - level
))) & 0xff8);
3452 descriptor
= ldq_phys(cs
->as
, descaddr
);
3453 if (!(descriptor
& 1) ||
3454 (!(descriptor
& 2) && (level
== 3))) {
3455 /* Invalid, or the Reserved level 3 encoding */
3458 descaddr
= descriptor
& 0xfffffff000ULL
;
3460 if ((descriptor
& 2) && (level
< 3)) {
3461 /* Table entry. The top five bits are attributes which may
3462 * propagate down through lower levels of the table (and
3463 * which are all arranged so that 0 means "no effect", so
3464 * we can gather them up by ORing in the bits at each level).
3466 tableattrs
|= extract64(descriptor
, 59, 5);
3470 /* Block entry at level 1 or 2, or page entry at level 3.
3471 * These are basically the same thing, although the number
3472 * of bits we pull in from the vaddr varies.
3474 page_size
= (1 << (39 - (9 * level
)));
3475 descaddr
|= (address
& (page_size
- 1));
3476 /* Extract attributes from the descriptor and merge with table attrs */
3477 attrs
= extract64(descriptor
, 2, 10)
3478 | (extract64(descriptor
, 52, 12) << 10);
3479 attrs
|= extract32(tableattrs
, 0, 2) << 11; /* XN, PXN */
3480 attrs
|= extract32(tableattrs
, 3, 1) << 5; /* APTable[1] => AP[2] */
3481 /* The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1
3482 * means "force PL1 access only", which means forcing AP[1] to 0.
3484 if (extract32(tableattrs
, 2, 1)) {
3487 /* Since we're always in the Non-secure state, NSTable is ignored. */
3490 /* Here descaddr is the final physical address, and attributes
3493 fault_type
= access_fault
;
3494 if ((attrs
& (1 << 8)) == 0) {
3498 fault_type
= permission_fault
;
3499 if (is_user
&& !(attrs
& (1 << 4))) {
3500 /* Unprivileged access not enabled */
3503 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
3504 if (attrs
& (1 << 12) || (!is_user
&& (attrs
& (1 << 11)))) {
3506 if (access_type
== 2) {
3509 *prot
&= ~PAGE_EXEC
;
3511 if (attrs
& (1 << 5)) {
3512 /* Write access forbidden */
3513 if (access_type
== 1) {
3516 *prot
&= ~PAGE_WRITE
;
3519 *phys_ptr
= descaddr
;
3520 *page_size_ptr
= page_size
;
3524 /* Long-descriptor format IFSR/DFSR value */
3525 return (1 << 9) | (fault_type
<< 2) | level
;
3528 static int get_phys_addr_mpu(CPUARMState
*env
, uint32_t address
,
3529 int access_type
, int is_user
,
3530 hwaddr
*phys_ptr
, int *prot
)
3536 *phys_ptr
= address
;
3537 for (n
= 7; n
>= 0; n
--) {
3538 base
= env
->cp15
.c6_region
[n
];
3539 if ((base
& 1) == 0)
3541 mask
= 1 << ((base
>> 1) & 0x1f);
3542 /* Keep this shift separate from the above to avoid an
3543 (undefined) << 32. */
3544 mask
= (mask
<< 1) - 1;
3545 if (((base
^ address
) & ~mask
) == 0)
3551 if (access_type
== 2) {
3552 mask
= env
->cp15
.c5_insn
;
3554 mask
= env
->cp15
.c5_data
;
3556 mask
= (mask
>> (n
* 4)) & 0xf;
3563 *prot
= PAGE_READ
| PAGE_WRITE
;
3568 *prot
|= PAGE_WRITE
;
3571 *prot
= PAGE_READ
| PAGE_WRITE
;
3582 /* Bad permission. */
3589 /* get_phys_addr - get the physical address for this virtual address
3591 * Find the physical address corresponding to the given virtual address,
3592 * by doing a translation table walk on MMU based systems or using the
3593 * MPU state on MPU based systems.
3595 * Returns 0 if the translation was successful. Otherwise, phys_ptr,
3596 * prot and page_size are not filled in, and the return value provides
3597 * information on why the translation aborted, in the format of a
3598 * DFSR/IFSR fault register, with the following caveats:
3599 * * we honour the short vs long DFSR format differences.
3600 * * the WnR bit is never set (the caller must do this).
3601 * * for MPU based systems we don't bother to return a full FSR format
3605 * @address: virtual address to get physical address for
3606 * @access_type: 0 for read, 1 for write, 2 for execute
3607 * @is_user: 0 for privileged access, 1 for user
3608 * @phys_ptr: set to the physical address corresponding to the virtual address
3609 * @prot: set to the permissions for the page containing phys_ptr
3610 * @page_size: set to the size of the page containing phys_ptr
3612 static inline int get_phys_addr(CPUARMState
*env
, uint32_t address
,
3613 int access_type
, int is_user
,
3614 hwaddr
*phys_ptr
, int *prot
,
3615 target_ulong
*page_size
)
3617 /* Fast Context Switch Extension. */
3618 if (address
< 0x02000000)
3619 address
+= env
->cp15
.c13_fcse
;
3621 if ((env
->cp15
.c1_sys
& SCTLR_M
) == 0) {
3622 /* MMU/MPU disabled. */
3623 *phys_ptr
= address
;
3624 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
3625 *page_size
= TARGET_PAGE_SIZE
;
3627 } else if (arm_feature(env
, ARM_FEATURE_MPU
)) {
3628 *page_size
= TARGET_PAGE_SIZE
;
3629 return get_phys_addr_mpu(env
, address
, access_type
, is_user
, phys_ptr
,
3631 } else if (extended_addresses_enabled(env
)) {
3632 return get_phys_addr_lpae(env
, address
, access_type
, is_user
, phys_ptr
,
3634 } else if (env
->cp15
.c1_sys
& SCTLR_XP
) {
3635 return get_phys_addr_v6(env
, address
, access_type
, is_user
, phys_ptr
,
3638 return get_phys_addr_v5(env
, address
, access_type
, is_user
, phys_ptr
,
3643 int arm_cpu_handle_mmu_fault(CPUState
*cs
, vaddr address
,
3644 int access_type
, int mmu_idx
)
3646 ARMCPU
*cpu
= ARM_CPU(cs
);
3647 CPUARMState
*env
= &cpu
->env
;
3649 target_ulong page_size
;
3653 is_user
= mmu_idx
== MMU_USER_IDX
;
3654 ret
= get_phys_addr(env
, address
, access_type
, is_user
, &phys_addr
, &prot
,
3657 /* Map a single [sub]page. */
3658 phys_addr
&= ~(hwaddr
)0x3ff;
3659 address
&= ~(uint32_t)0x3ff;
3660 tlb_set_page (env
, address
, phys_addr
, prot
, mmu_idx
, page_size
);
3664 if (access_type
== 2) {
3665 env
->cp15
.c5_insn
= ret
;
3666 env
->cp15
.c6_insn
= address
;
3667 cs
->exception_index
= EXCP_PREFETCH_ABORT
;
3669 env
->cp15
.c5_data
= ret
;
3670 if (access_type
== 1 && arm_feature(env
, ARM_FEATURE_V6
))
3671 env
->cp15
.c5_data
|= (1 << 11);
3672 env
->cp15
.c6_data
= address
;
3673 cs
->exception_index
= EXCP_DATA_ABORT
;
3678 hwaddr
arm_cpu_get_phys_page_debug(CPUState
*cs
, vaddr addr
)
3680 ARMCPU
*cpu
= ARM_CPU(cs
);
3682 target_ulong page_size
;
3686 ret
= get_phys_addr(&cpu
->env
, addr
, 0, 0, &phys_addr
, &prot
, &page_size
);
3695 void HELPER(set_r13_banked
)(CPUARMState
*env
, uint32_t mode
, uint32_t val
)
3697 if ((env
->uncached_cpsr
& CPSR_M
) == mode
) {
3698 env
->regs
[13] = val
;
3700 env
->banked_r13
[bank_number(mode
)] = val
;
3704 uint32_t HELPER(get_r13_banked
)(CPUARMState
*env
, uint32_t mode
)
3706 if ((env
->uncached_cpsr
& CPSR_M
) == mode
) {
3707 return env
->regs
[13];
3709 return env
->banked_r13
[bank_number(mode
)];
3713 uint32_t HELPER(v7m_mrs
)(CPUARMState
*env
, uint32_t reg
)
3715 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3719 return xpsr_read(env
) & 0xf8000000;
3721 return xpsr_read(env
) & 0xf80001ff;
3723 return xpsr_read(env
) & 0xff00fc00;
3725 return xpsr_read(env
) & 0xff00fdff;
3727 return xpsr_read(env
) & 0x000001ff;
3729 return xpsr_read(env
) & 0x0700fc00;
3731 return xpsr_read(env
) & 0x0700edff;
3733 return env
->v7m
.current_sp
? env
->v7m
.other_sp
: env
->regs
[13];
3735 return env
->v7m
.current_sp
? env
->regs
[13] : env
->v7m
.other_sp
;
3736 case 16: /* PRIMASK */
3737 return (env
->daif
& PSTATE_I
) != 0;
3738 case 17: /* BASEPRI */
3739 case 18: /* BASEPRI_MAX */
3740 return env
->v7m
.basepri
;
3741 case 19: /* FAULTMASK */
3742 return (env
->daif
& PSTATE_F
) != 0;
3743 case 20: /* CONTROL */
3744 return env
->v7m
.control
;
3746 /* ??? For debugging only. */
3747 cpu_abort(CPU(cpu
), "Unimplemented system register read (%d)\n", reg
);
3752 void HELPER(v7m_msr
)(CPUARMState
*env
, uint32_t reg
, uint32_t val
)
3754 ARMCPU
*cpu
= arm_env_get_cpu(env
);
3758 xpsr_write(env
, val
, 0xf8000000);
3761 xpsr_write(env
, val
, 0xf8000000);
3764 xpsr_write(env
, val
, 0xfe00fc00);
3767 xpsr_write(env
, val
, 0xfe00fc00);
3770 /* IPSR bits are readonly. */
3773 xpsr_write(env
, val
, 0x0600fc00);
3776 xpsr_write(env
, val
, 0x0600fc00);
3779 if (env
->v7m
.current_sp
)
3780 env
->v7m
.other_sp
= val
;
3782 env
->regs
[13] = val
;
3785 if (env
->v7m
.current_sp
)
3786 env
->regs
[13] = val
;
3788 env
->v7m
.other_sp
= val
;
3790 case 16: /* PRIMASK */
3792 env
->daif
|= PSTATE_I
;
3794 env
->daif
&= ~PSTATE_I
;
3797 case 17: /* BASEPRI */
3798 env
->v7m
.basepri
= val
& 0xff;
3800 case 18: /* BASEPRI_MAX */
3802 if (val
!= 0 && (val
< env
->v7m
.basepri
|| env
->v7m
.basepri
== 0))
3803 env
->v7m
.basepri
= val
;
3805 case 19: /* FAULTMASK */
3807 env
->daif
|= PSTATE_F
;
3809 env
->daif
&= ~PSTATE_F
;
3812 case 20: /* CONTROL */
3813 env
->v7m
.control
= val
& 3;
3814 switch_v7m_sp(env
, (val
& 2) != 0);
3817 /* ??? For debugging only. */
3818 cpu_abort(CPU(cpu
), "Unimplemented system register write (%d)\n", reg
);
3825 /* Note that signed overflow is undefined in C. The following routines are
3826 careful to use unsigned types where modulo arithmetic is required.
3827 Failure to do so _will_ break on newer gcc. */
3829 /* Signed saturating arithmetic. */
3831 /* Perform 16-bit signed saturating addition. */
3832 static inline uint16_t add16_sat(uint16_t a
, uint16_t b
)
3837 if (((res
^ a
) & 0x8000) && !((a
^ b
) & 0x8000)) {
3846 /* Perform 8-bit signed saturating addition. */
3847 static inline uint8_t add8_sat(uint8_t a
, uint8_t b
)
3852 if (((res
^ a
) & 0x80) && !((a
^ b
) & 0x80)) {
3861 /* Perform 16-bit signed saturating subtraction. */
3862 static inline uint16_t sub16_sat(uint16_t a
, uint16_t b
)
3867 if (((res
^ a
) & 0x8000) && ((a
^ b
) & 0x8000)) {
3876 /* Perform 8-bit signed saturating subtraction. */
3877 static inline uint8_t sub8_sat(uint8_t a
, uint8_t b
)
3882 if (((res
^ a
) & 0x80) && ((a
^ b
) & 0x80)) {
3891 #define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16);
3892 #define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16);
3893 #define ADD8(a, b, n) RESULT(add8_sat(a, b), n, 8);
3894 #define SUB8(a, b, n) RESULT(sub8_sat(a, b), n, 8);
3897 #include "op_addsub.h"
3899 /* Unsigned saturating arithmetic. */
3900 static inline uint16_t add16_usat(uint16_t a
, uint16_t b
)
3909 static inline uint16_t sub16_usat(uint16_t a
, uint16_t b
)
3917 static inline uint8_t add8_usat(uint8_t a
, uint8_t b
)
3926 static inline uint8_t sub8_usat(uint8_t a
, uint8_t b
)
3934 #define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16);
3935 #define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16);
3936 #define ADD8(a, b, n) RESULT(add8_usat(a, b), n, 8);
3937 #define SUB8(a, b, n) RESULT(sub8_usat(a, b), n, 8);
3940 #include "op_addsub.h"
3942 /* Signed modulo arithmetic. */
3943 #define SARITH16(a, b, n, op) do { \
3945 sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \
3946 RESULT(sum, n, 16); \
3948 ge |= 3 << (n * 2); \
3951 #define SARITH8(a, b, n, op) do { \
3953 sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \
3954 RESULT(sum, n, 8); \
3960 #define ADD16(a, b, n) SARITH16(a, b, n, +)
3961 #define SUB16(a, b, n) SARITH16(a, b, n, -)
3962 #define ADD8(a, b, n) SARITH8(a, b, n, +)
3963 #define SUB8(a, b, n) SARITH8(a, b, n, -)
3967 #include "op_addsub.h"
3969 /* Unsigned modulo arithmetic. */
3970 #define ADD16(a, b, n) do { \
3972 sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \
3973 RESULT(sum, n, 16); \
3974 if ((sum >> 16) == 1) \
3975 ge |= 3 << (n * 2); \
3978 #define ADD8(a, b, n) do { \
3980 sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \
3981 RESULT(sum, n, 8); \
3982 if ((sum >> 8) == 1) \
3986 #define SUB16(a, b, n) do { \
3988 sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \
3989 RESULT(sum, n, 16); \
3990 if ((sum >> 16) == 0) \
3991 ge |= 3 << (n * 2); \
3994 #define SUB8(a, b, n) do { \
3996 sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \
3997 RESULT(sum, n, 8); \
3998 if ((sum >> 8) == 0) \
4005 #include "op_addsub.h"
4007 /* Halved signed arithmetic. */
4008 #define ADD16(a, b, n) \
4009 RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16)
4010 #define SUB16(a, b, n) \
4011 RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16)
4012 #define ADD8(a, b, n) \
4013 RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8)
4014 #define SUB8(a, b, n) \
4015 RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8)
4018 #include "op_addsub.h"
4020 /* Halved unsigned arithmetic. */
4021 #define ADD16(a, b, n) \
4022 RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16)
4023 #define SUB16(a, b, n) \
4024 RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16)
4025 #define ADD8(a, b, n) \
4026 RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8)
4027 #define SUB8(a, b, n) \
4028 RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8)
4031 #include "op_addsub.h"
4033 static inline uint8_t do_usad(uint8_t a
, uint8_t b
)
4041 /* Unsigned sum of absolute byte differences. */
4042 uint32_t HELPER(usad8
)(uint32_t a
, uint32_t b
)
4045 sum
= do_usad(a
, b
);
4046 sum
+= do_usad(a
>> 8, b
>> 8);
4047 sum
+= do_usad(a
>> 16, b
>>16);
4048 sum
+= do_usad(a
>> 24, b
>> 24);
4052 /* For ARMv6 SEL instruction. */
4053 uint32_t HELPER(sel_flags
)(uint32_t flags
, uint32_t a
, uint32_t b
)
4066 return (a
& mask
) | (b
& ~mask
);
4069 /* VFP support. We follow the convention used for VFP instructions:
4070 Single precision routines have a "s" suffix, double precision a
4073 /* Convert host exception flags to vfp form. */
4074 static inline int vfp_exceptbits_from_host(int host_bits
)
4076 int target_bits
= 0;
4078 if (host_bits
& float_flag_invalid
)
4080 if (host_bits
& float_flag_divbyzero
)
4082 if (host_bits
& float_flag_overflow
)
4084 if (host_bits
& (float_flag_underflow
| float_flag_output_denormal
))
4086 if (host_bits
& float_flag_inexact
)
4087 target_bits
|= 0x10;
4088 if (host_bits
& float_flag_input_denormal
)
4089 target_bits
|= 0x80;
4093 uint32_t HELPER(vfp_get_fpscr
)(CPUARMState
*env
)
4098 fpscr
= (env
->vfp
.xregs
[ARM_VFP_FPSCR
] & 0xffc8ffff)
4099 | (env
->vfp
.vec_len
<< 16)
4100 | (env
->vfp
.vec_stride
<< 20);
4101 i
= get_float_exception_flags(&env
->vfp
.fp_status
);
4102 i
|= get_float_exception_flags(&env
->vfp
.standard_fp_status
);
4103 fpscr
|= vfp_exceptbits_from_host(i
);
4107 uint32_t vfp_get_fpscr(CPUARMState
*env
)
4109 return HELPER(vfp_get_fpscr
)(env
);
4112 /* Convert vfp exception flags to target form. */
4113 static inline int vfp_exceptbits_to_host(int target_bits
)
4117 if (target_bits
& 1)
4118 host_bits
|= float_flag_invalid
;
4119 if (target_bits
& 2)
4120 host_bits
|= float_flag_divbyzero
;
4121 if (target_bits
& 4)
4122 host_bits
|= float_flag_overflow
;
4123 if (target_bits
& 8)
4124 host_bits
|= float_flag_underflow
;
4125 if (target_bits
& 0x10)
4126 host_bits
|= float_flag_inexact
;
4127 if (target_bits
& 0x80)
4128 host_bits
|= float_flag_input_denormal
;
4132 void HELPER(vfp_set_fpscr
)(CPUARMState
*env
, uint32_t val
)
4137 changed
= env
->vfp
.xregs
[ARM_VFP_FPSCR
];
4138 env
->vfp
.xregs
[ARM_VFP_FPSCR
] = (val
& 0xffc8ffff);
4139 env
->vfp
.vec_len
= (val
>> 16) & 7;
4140 env
->vfp
.vec_stride
= (val
>> 20) & 3;
4143 if (changed
& (3 << 22)) {
4144 i
= (val
>> 22) & 3;
4146 case FPROUNDING_TIEEVEN
:
4147 i
= float_round_nearest_even
;
4149 case FPROUNDING_POSINF
:
4152 case FPROUNDING_NEGINF
:
4153 i
= float_round_down
;
4155 case FPROUNDING_ZERO
:
4156 i
= float_round_to_zero
;
4159 set_float_rounding_mode(i
, &env
->vfp
.fp_status
);
4161 if (changed
& (1 << 24)) {
4162 set_flush_to_zero((val
& (1 << 24)) != 0, &env
->vfp
.fp_status
);
4163 set_flush_inputs_to_zero((val
& (1 << 24)) != 0, &env
->vfp
.fp_status
);
4165 if (changed
& (1 << 25))
4166 set_default_nan_mode((val
& (1 << 25)) != 0, &env
->vfp
.fp_status
);
4168 i
= vfp_exceptbits_to_host(val
);
4169 set_float_exception_flags(i
, &env
->vfp
.fp_status
);
4170 set_float_exception_flags(0, &env
->vfp
.standard_fp_status
);
4173 void vfp_set_fpscr(CPUARMState
*env
, uint32_t val
)
4175 HELPER(vfp_set_fpscr
)(env
, val
);
4178 #define VFP_HELPER(name, p) HELPER(glue(glue(vfp_,name),p))
4180 #define VFP_BINOP(name) \
4181 float32 VFP_HELPER(name, s)(float32 a, float32 b, void *fpstp) \
4183 float_status *fpst = fpstp; \
4184 return float32_ ## name(a, b, fpst); \
4186 float64 VFP_HELPER(name, d)(float64 a, float64 b, void *fpstp) \
4188 float_status *fpst = fpstp; \
4189 return float64_ ## name(a, b, fpst); \
4201 float32
VFP_HELPER(neg
, s
)(float32 a
)
4203 return float32_chs(a
);
4206 float64
VFP_HELPER(neg
, d
)(float64 a
)
4208 return float64_chs(a
);
4211 float32
VFP_HELPER(abs
, s
)(float32 a
)
4213 return float32_abs(a
);
4216 float64
VFP_HELPER(abs
, d
)(float64 a
)
4218 return float64_abs(a
);
4221 float32
VFP_HELPER(sqrt
, s
)(float32 a
, CPUARMState
*env
)
4223 return float32_sqrt(a
, &env
->vfp
.fp_status
);
4226 float64
VFP_HELPER(sqrt
, d
)(float64 a
, CPUARMState
*env
)
4228 return float64_sqrt(a
, &env
->vfp
.fp_status
);
4231 /* XXX: check quiet/signaling case */
4232 #define DO_VFP_cmp(p, type) \
4233 void VFP_HELPER(cmp, p)(type a, type b, CPUARMState *env) \
4236 switch(type ## _compare_quiet(a, b, &env->vfp.fp_status)) { \
4237 case 0: flags = 0x6; break; \
4238 case -1: flags = 0x8; break; \
4239 case 1: flags = 0x2; break; \
4240 default: case 2: flags = 0x3; break; \
4242 env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
4243 | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
4245 void VFP_HELPER(cmpe, p)(type a, type b, CPUARMState *env) \
4248 switch(type ## _compare(a, b, &env->vfp.fp_status)) { \
4249 case 0: flags = 0x6; break; \
4250 case -1: flags = 0x8; break; \
4251 case 1: flags = 0x2; break; \
4252 default: case 2: flags = 0x3; break; \
4254 env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
4255 | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
4257 DO_VFP_cmp(s
, float32
)
4258 DO_VFP_cmp(d
, float64
)
4261 /* Integer to float and float to integer conversions */
4263 #define CONV_ITOF(name, fsz, sign) \
4264 float##fsz HELPER(name)(uint32_t x, void *fpstp) \
4266 float_status *fpst = fpstp; \
4267 return sign##int32_to_##float##fsz((sign##int32_t)x, fpst); \
4270 #define CONV_FTOI(name, fsz, sign, round) \
4271 uint32_t HELPER(name)(float##fsz x, void *fpstp) \
4273 float_status *fpst = fpstp; \
4274 if (float##fsz##_is_any_nan(x)) { \
4275 float_raise(float_flag_invalid, fpst); \
4278 return float##fsz##_to_##sign##int32##round(x, fpst); \
4281 #define FLOAT_CONVS(name, p, fsz, sign) \
4282 CONV_ITOF(vfp_##name##to##p, fsz, sign) \
4283 CONV_FTOI(vfp_to##name##p, fsz, sign, ) \
4284 CONV_FTOI(vfp_to##name##z##p, fsz, sign, _round_to_zero)
4286 FLOAT_CONVS(si
, s
, 32, )
4287 FLOAT_CONVS(si
, d
, 64, )
4288 FLOAT_CONVS(ui
, s
, 32, u
)
4289 FLOAT_CONVS(ui
, d
, 64, u
)
4295 /* floating point conversion */
4296 float64
VFP_HELPER(fcvtd
, s
)(float32 x
, CPUARMState
*env
)
4298 float64 r
= float32_to_float64(x
, &env
->vfp
.fp_status
);
4299 /* ARM requires that S<->D conversion of any kind of NaN generates
4300 * a quiet NaN by forcing the most significant frac bit to 1.
4302 return float64_maybe_silence_nan(r
);
4305 float32
VFP_HELPER(fcvts
, d
)(float64 x
, CPUARMState
*env
)
4307 float32 r
= float64_to_float32(x
, &env
->vfp
.fp_status
);
4308 /* ARM requires that S<->D conversion of any kind of NaN generates
4309 * a quiet NaN by forcing the most significant frac bit to 1.
4311 return float32_maybe_silence_nan(r
);
4314 /* VFP3 fixed point conversion. */
4315 #define VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \
4316 float##fsz HELPER(vfp_##name##to##p)(uint##isz##_t x, uint32_t shift, \
4319 float_status *fpst = fpstp; \
4321 tmp = itype##_to_##float##fsz(x, fpst); \
4322 return float##fsz##_scalbn(tmp, -(int)shift, fpst); \
4325 /* Notice that we want only input-denormal exception flags from the
4326 * scalbn operation: the other possible flags (overflow+inexact if
4327 * we overflow to infinity, output-denormal) aren't correct for the
4328 * complete scale-and-convert operation.
4330 #define VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, round) \
4331 uint##isz##_t HELPER(vfp_to##name##p##round)(float##fsz x, \
4335 float_status *fpst = fpstp; \
4336 int old_exc_flags = get_float_exception_flags(fpst); \
4338 if (float##fsz##_is_any_nan(x)) { \
4339 float_raise(float_flag_invalid, fpst); \
4342 tmp = float##fsz##_scalbn(x, shift, fpst); \
4343 old_exc_flags |= get_float_exception_flags(fpst) \
4344 & float_flag_input_denormal; \
4345 set_float_exception_flags(old_exc_flags, fpst); \
4346 return float##fsz##_to_##itype##round(tmp, fpst); \
4349 #define VFP_CONV_FIX(name, p, fsz, isz, itype) \
4350 VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \
4351 VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, _round_to_zero) \
4352 VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, )
4354 #define VFP_CONV_FIX_A64(name, p, fsz, isz, itype) \
4355 VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \
4356 VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, )
4358 VFP_CONV_FIX(sh
, d
, 64, 64, int16
)
4359 VFP_CONV_FIX(sl
, d
, 64, 64, int32
)
4360 VFP_CONV_FIX_A64(sq
, d
, 64, 64, int64
)
4361 VFP_CONV_FIX(uh
, d
, 64, 64, uint16
)
4362 VFP_CONV_FIX(ul
, d
, 64, 64, uint32
)
4363 VFP_CONV_FIX_A64(uq
, d
, 64, 64, uint64
)
4364 VFP_CONV_FIX(sh
, s
, 32, 32, int16
)
4365 VFP_CONV_FIX(sl
, s
, 32, 32, int32
)
4366 VFP_CONV_FIX_A64(sq
, s
, 32, 64, int64
)
4367 VFP_CONV_FIX(uh
, s
, 32, 32, uint16
)
4368 VFP_CONV_FIX(ul
, s
, 32, 32, uint32
)
4369 VFP_CONV_FIX_A64(uq
, s
, 32, 64, uint64
)
4371 #undef VFP_CONV_FIX_FLOAT
4372 #undef VFP_CONV_FLOAT_FIX_ROUND
4374 /* Set the current fp rounding mode and return the old one.
4375 * The argument is a softfloat float_round_ value.
4377 uint32_t HELPER(set_rmode
)(uint32_t rmode
, CPUARMState
*env
)
4379 float_status
*fp_status
= &env
->vfp
.fp_status
;
4381 uint32_t prev_rmode
= get_float_rounding_mode(fp_status
);
4382 set_float_rounding_mode(rmode
, fp_status
);
4387 /* Set the current fp rounding mode in the standard fp status and return
4388 * the old one. This is for NEON instructions that need to change the
4389 * rounding mode but wish to use the standard FPSCR values for everything
4390 * else. Always set the rounding mode back to the correct value after
4392 * The argument is a softfloat float_round_ value.
4394 uint32_t HELPER(set_neon_rmode
)(uint32_t rmode
, CPUARMState
*env
)
4396 float_status
*fp_status
= &env
->vfp
.standard_fp_status
;
4398 uint32_t prev_rmode
= get_float_rounding_mode(fp_status
);
4399 set_float_rounding_mode(rmode
, fp_status
);
4404 /* Half precision conversions. */
4405 static float32
do_fcvt_f16_to_f32(uint32_t a
, CPUARMState
*env
, float_status
*s
)
4407 int ieee
= (env
->vfp
.xregs
[ARM_VFP_FPSCR
] & (1 << 26)) == 0;
4408 float32 r
= float16_to_float32(make_float16(a
), ieee
, s
);
4410 return float32_maybe_silence_nan(r
);
4415 static uint32_t do_fcvt_f32_to_f16(float32 a
, CPUARMState
*env
, float_status
*s
)
4417 int ieee
= (env
->vfp
.xregs
[ARM_VFP_FPSCR
] & (1 << 26)) == 0;
4418 float16 r
= float32_to_float16(a
, ieee
, s
);
4420 r
= float16_maybe_silence_nan(r
);
4422 return float16_val(r
);
4425 float32
HELPER(neon_fcvt_f16_to_f32
)(uint32_t a
, CPUARMState
*env
)
4427 return do_fcvt_f16_to_f32(a
, env
, &env
->vfp
.standard_fp_status
);
4430 uint32_t HELPER(neon_fcvt_f32_to_f16
)(float32 a
, CPUARMState
*env
)
4432 return do_fcvt_f32_to_f16(a
, env
, &env
->vfp
.standard_fp_status
);
4435 float32
HELPER(vfp_fcvt_f16_to_f32
)(uint32_t a
, CPUARMState
*env
)
4437 return do_fcvt_f16_to_f32(a
, env
, &env
->vfp
.fp_status
);
4440 uint32_t HELPER(vfp_fcvt_f32_to_f16
)(float32 a
, CPUARMState
*env
)
4442 return do_fcvt_f32_to_f16(a
, env
, &env
->vfp
.fp_status
);
4445 float64
HELPER(vfp_fcvt_f16_to_f64
)(uint32_t a
, CPUARMState
*env
)
4447 int ieee
= (env
->vfp
.xregs
[ARM_VFP_FPSCR
] & (1 << 26)) == 0;
4448 float64 r
= float16_to_float64(make_float16(a
), ieee
, &env
->vfp
.fp_status
);
4450 return float64_maybe_silence_nan(r
);
4455 uint32_t HELPER(vfp_fcvt_f64_to_f16
)(float64 a
, CPUARMState
*env
)
4457 int ieee
= (env
->vfp
.xregs
[ARM_VFP_FPSCR
] & (1 << 26)) == 0;
4458 float16 r
= float64_to_float16(a
, ieee
, &env
->vfp
.fp_status
);
4460 r
= float16_maybe_silence_nan(r
);
4462 return float16_val(r
);
4465 #define float32_two make_float32(0x40000000)
4466 #define float32_three make_float32(0x40400000)
4467 #define float32_one_point_five make_float32(0x3fc00000)
4469 float32
HELPER(recps_f32
)(float32 a
, float32 b
, CPUARMState
*env
)
4471 float_status
*s
= &env
->vfp
.standard_fp_status
;
4472 if ((float32_is_infinity(a
) && float32_is_zero_or_denormal(b
)) ||
4473 (float32_is_infinity(b
) && float32_is_zero_or_denormal(a
))) {
4474 if (!(float32_is_zero(a
) || float32_is_zero(b
))) {
4475 float_raise(float_flag_input_denormal
, s
);
4479 return float32_sub(float32_two
, float32_mul(a
, b
, s
), s
);
4482 float32
HELPER(rsqrts_f32
)(float32 a
, float32 b
, CPUARMState
*env
)
4484 float_status
*s
= &env
->vfp
.standard_fp_status
;
4486 if ((float32_is_infinity(a
) && float32_is_zero_or_denormal(b
)) ||
4487 (float32_is_infinity(b
) && float32_is_zero_or_denormal(a
))) {
4488 if (!(float32_is_zero(a
) || float32_is_zero(b
))) {
4489 float_raise(float_flag_input_denormal
, s
);
4491 return float32_one_point_five
;
4493 product
= float32_mul(a
, b
, s
);
4494 return float32_div(float32_sub(float32_three
, product
, s
), float32_two
, s
);
4499 /* Constants 256 and 512 are used in some helpers; we avoid relying on
4500 * int->float conversions at run-time. */
4501 #define float64_256 make_float64(0x4070000000000000LL)
4502 #define float64_512 make_float64(0x4080000000000000LL)
4504 /* The algorithm that must be used to calculate the estimate
4505 * is specified by the ARM ARM.
4507 static float64
recip_estimate(float64 a
, CPUARMState
*env
)
4509 /* These calculations mustn't set any fp exception flags,
4510 * so we use a local copy of the fp_status.
4512 float_status dummy_status
= env
->vfp
.standard_fp_status
;
4513 float_status
*s
= &dummy_status
;
4514 /* q = (int)(a * 512.0) */
4515 float64 q
= float64_mul(float64_512
, a
, s
);
4516 int64_t q_int
= float64_to_int64_round_to_zero(q
, s
);
4518 /* r = 1.0 / (((double)q + 0.5) / 512.0) */
4519 q
= int64_to_float64(q_int
, s
);
4520 q
= float64_add(q
, float64_half
, s
);
4521 q
= float64_div(q
, float64_512
, s
);
4522 q
= float64_div(float64_one
, q
, s
);
4524 /* s = (int)(256.0 * r + 0.5) */
4525 q
= float64_mul(q
, float64_256
, s
);
4526 q
= float64_add(q
, float64_half
, s
);
4527 q_int
= float64_to_int64_round_to_zero(q
, s
);
4529 /* return (double)s / 256.0 */
4530 return float64_div(int64_to_float64(q_int
, s
), float64_256
, s
);
4533 float32
HELPER(recpe_f32
)(float32 a
, CPUARMState
*env
)
4535 float_status
*s
= &env
->vfp
.standard_fp_status
;
4537 uint32_t val32
= float32_val(a
);
4540 int a_exp
= (val32
& 0x7f800000) >> 23;
4541 int sign
= val32
& 0x80000000;
4543 if (float32_is_any_nan(a
)) {
4544 if (float32_is_signaling_nan(a
)) {
4545 float_raise(float_flag_invalid
, s
);
4547 return float32_default_nan
;
4548 } else if (float32_is_infinity(a
)) {
4549 return float32_set_sign(float32_zero
, float32_is_neg(a
));
4550 } else if (float32_is_zero_or_denormal(a
)) {
4551 if (!float32_is_zero(a
)) {
4552 float_raise(float_flag_input_denormal
, s
);
4554 float_raise(float_flag_divbyzero
, s
);
4555 return float32_set_sign(float32_infinity
, float32_is_neg(a
));
4556 } else if (a_exp
>= 253) {
4557 float_raise(float_flag_underflow
, s
);
4558 return float32_set_sign(float32_zero
, float32_is_neg(a
));
4561 f64
= make_float64((0x3feULL
<< 52)
4562 | ((int64_t)(val32
& 0x7fffff) << 29));
4564 result_exp
= 253 - a_exp
;
4566 f64
= recip_estimate(f64
, env
);
4569 | ((result_exp
& 0xff) << 23)
4570 | ((float64_val(f64
) >> 29) & 0x7fffff);
4571 return make_float32(val32
);
4574 /* The algorithm that must be used to calculate the estimate
4575 * is specified by the ARM ARM.
4577 static float64
recip_sqrt_estimate(float64 a
, CPUARMState
*env
)
4579 /* These calculations mustn't set any fp exception flags,
4580 * so we use a local copy of the fp_status.
4582 float_status dummy_status
= env
->vfp
.standard_fp_status
;
4583 float_status
*s
= &dummy_status
;
4587 if (float64_lt(a
, float64_half
, s
)) {
4588 /* range 0.25 <= a < 0.5 */
4590 /* a in units of 1/512 rounded down */
4591 /* q0 = (int)(a * 512.0); */
4592 q
= float64_mul(float64_512
, a
, s
);
4593 q_int
= float64_to_int64_round_to_zero(q
, s
);
4595 /* reciprocal root r */
4596 /* r = 1.0 / sqrt(((double)q0 + 0.5) / 512.0); */
4597 q
= int64_to_float64(q_int
, s
);
4598 q
= float64_add(q
, float64_half
, s
);
4599 q
= float64_div(q
, float64_512
, s
);
4600 q
= float64_sqrt(q
, s
);
4601 q
= float64_div(float64_one
, q
, s
);
4603 /* range 0.5 <= a < 1.0 */
4605 /* a in units of 1/256 rounded down */
4606 /* q1 = (int)(a * 256.0); */
4607 q
= float64_mul(float64_256
, a
, s
);
4608 int64_t q_int
= float64_to_int64_round_to_zero(q
, s
);
4610 /* reciprocal root r */
4611 /* r = 1.0 /sqrt(((double)q1 + 0.5) / 256); */
4612 q
= int64_to_float64(q_int
, s
);
4613 q
= float64_add(q
, float64_half
, s
);
4614 q
= float64_div(q
, float64_256
, s
);
4615 q
= float64_sqrt(q
, s
);
4616 q
= float64_div(float64_one
, q
, s
);
4618 /* r in units of 1/256 rounded to nearest */
4619 /* s = (int)(256.0 * r + 0.5); */
4621 q
= float64_mul(q
, float64_256
,s
);
4622 q
= float64_add(q
, float64_half
, s
);
4623 q_int
= float64_to_int64_round_to_zero(q
, s
);
4625 /* return (double)s / 256.0;*/
4626 return float64_div(int64_to_float64(q_int
, s
), float64_256
, s
);
4629 float32
HELPER(rsqrte_f32
)(float32 a
, CPUARMState
*env
)
4631 float_status
*s
= &env
->vfp
.standard_fp_status
;
4637 val
= float32_val(a
);
4639 if (float32_is_any_nan(a
)) {
4640 if (float32_is_signaling_nan(a
)) {
4641 float_raise(float_flag_invalid
, s
);
4643 return float32_default_nan
;
4644 } else if (float32_is_zero_or_denormal(a
)) {
4645 if (!float32_is_zero(a
)) {
4646 float_raise(float_flag_input_denormal
, s
);
4648 float_raise(float_flag_divbyzero
, s
);
4649 return float32_set_sign(float32_infinity
, float32_is_neg(a
));
4650 } else if (float32_is_neg(a
)) {
4651 float_raise(float_flag_invalid
, s
);
4652 return float32_default_nan
;
4653 } else if (float32_is_infinity(a
)) {
4654 return float32_zero
;
4657 /* Normalize to a double-precision value between 0.25 and 1.0,
4658 * preserving the parity of the exponent. */
4659 if ((val
& 0x800000) == 0) {
4660 f64
= make_float64(((uint64_t)(val
& 0x80000000) << 32)
4662 | ((uint64_t)(val
& 0x7fffff) << 29));
4664 f64
= make_float64(((uint64_t)(val
& 0x80000000) << 32)
4666 | ((uint64_t)(val
& 0x7fffff) << 29));
4669 result_exp
= (380 - ((val
& 0x7f800000) >> 23)) / 2;
4671 f64
= recip_sqrt_estimate(f64
, env
);
4673 val64
= float64_val(f64
);
4675 val
= ((result_exp
& 0xff) << 23)
4676 | ((val64
>> 29) & 0x7fffff);
4677 return make_float32(val
);
4680 uint32_t HELPER(recpe_u32
)(uint32_t a
, CPUARMState
*env
)
4684 if ((a
& 0x80000000) == 0) {
4688 f64
= make_float64((0x3feULL
<< 52)
4689 | ((int64_t)(a
& 0x7fffffff) << 21));
4691 f64
= recip_estimate (f64
, env
);
4693 return 0x80000000 | ((float64_val(f64
) >> 21) & 0x7fffffff);
4696 uint32_t HELPER(rsqrte_u32
)(uint32_t a
, CPUARMState
*env
)
4700 if ((a
& 0xc0000000) == 0) {
4704 if (a
& 0x80000000) {
4705 f64
= make_float64((0x3feULL
<< 52)
4706 | ((uint64_t)(a
& 0x7fffffff) << 21));
4707 } else { /* bits 31-30 == '01' */
4708 f64
= make_float64((0x3fdULL
<< 52)
4709 | ((uint64_t)(a
& 0x3fffffff) << 22));
4712 f64
= recip_sqrt_estimate(f64
, env
);
4714 return 0x80000000 | ((float64_val(f64
) >> 21) & 0x7fffffff);
4717 /* VFPv4 fused multiply-accumulate */
4718 float32
VFP_HELPER(muladd
, s
)(float32 a
, float32 b
, float32 c
, void *fpstp
)
4720 float_status
*fpst
= fpstp
;
4721 return float32_muladd(a
, b
, c
, 0, fpst
);
4724 float64
VFP_HELPER(muladd
, d
)(float64 a
, float64 b
, float64 c
, void *fpstp
)
4726 float_status
*fpst
= fpstp
;
4727 return float64_muladd(a
, b
, c
, 0, fpst
);
4730 /* ARMv8 round to integral */
4731 float32
HELPER(rints_exact
)(float32 x
, void *fp_status
)
4733 return float32_round_to_int(x
, fp_status
);
4736 float64
HELPER(rintd_exact
)(float64 x
, void *fp_status
)
4738 return float64_round_to_int(x
, fp_status
);
4741 float32
HELPER(rints
)(float32 x
, void *fp_status
)
4743 int old_flags
= get_float_exception_flags(fp_status
), new_flags
;
4746 ret
= float32_round_to_int(x
, fp_status
);
4748 /* Suppress any inexact exceptions the conversion produced */
4749 if (!(old_flags
& float_flag_inexact
)) {
4750 new_flags
= get_float_exception_flags(fp_status
);
4751 set_float_exception_flags(new_flags
& ~float_flag_inexact
, fp_status
);
4757 float64
HELPER(rintd
)(float64 x
, void *fp_status
)
4759 int old_flags
= get_float_exception_flags(fp_status
), new_flags
;
4762 ret
= float64_round_to_int(x
, fp_status
);
4764 new_flags
= get_float_exception_flags(fp_status
);
4766 /* Suppress any inexact exceptions the conversion produced */
4767 if (!(old_flags
& float_flag_inexact
)) {
4768 new_flags
= get_float_exception_flags(fp_status
);
4769 set_float_exception_flags(new_flags
& ~float_flag_inexact
, fp_status
);
4775 /* Convert ARM rounding mode to softfloat */
4776 int arm_rmode_to_sf(int rmode
)
4779 case FPROUNDING_TIEAWAY
:
4780 rmode
= float_round_ties_away
;
4782 case FPROUNDING_ODD
:
4783 /* FIXME: add support for TIEAWAY and ODD */
4784 qemu_log_mask(LOG_UNIMP
, "arm: unimplemented rounding mode: %d\n",
4786 case FPROUNDING_TIEEVEN
:
4788 rmode
= float_round_nearest_even
;
4790 case FPROUNDING_POSINF
:
4791 rmode
= float_round_up
;
4793 case FPROUNDING_NEGINF
:
4794 rmode
= float_round_down
;
4796 case FPROUNDING_ZERO
:
4797 rmode
= float_round_to_zero
;
4803 static void crc_init_buffer(uint8_t *buf
, uint32_t val
, uint32_t bytes
)
4808 buf
[0] = val
& 0xff;
4809 } else if (bytes
== 2) {
4810 buf
[0] = val
& 0xff;
4811 buf
[1] = (val
>> 8) & 0xff;
4813 buf
[0] = val
& 0xff;
4814 buf
[1] = (val
>> 8) & 0xff;
4815 buf
[2] = (val
>> 16) & 0xff;
4816 buf
[3] = (val
>> 24) & 0xff;
4820 uint32_t HELPER(crc32
)(uint32_t acc
, uint32_t val
, uint32_t bytes
)
4824 crc_init_buffer(buf
, val
, bytes
);
4826 /* zlib crc32 converts the accumulator and output to one's complement. */
4827 return crc32(acc
^ 0xffffffff, buf
, bytes
) ^ 0xffffffff;
4830 uint32_t HELPER(crc32c
)(uint32_t acc
, uint32_t val
, uint32_t bytes
)
4834 crc_init_buffer(buf
, val
, bytes
);
4836 /* Linux crc32c converts the output to one's complement. */
4837 return crc32c(acc
, buf
, bytes
) ^ 0xffffffff;