4 #include "host-utils.h"
7 void cpu_state_reset(CPUARMState
*env
)
9 cpu_reset(ENV_GET_CPU(env
));
12 static int vfp_gdb_get_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
16 /* VFP data registers are always little-endian. */
17 nregs
= arm_feature(env
, ARM_FEATURE_VFP3
) ? 32 : 16;
19 stfq_le_p(buf
, env
->vfp
.regs
[reg
]);
22 if (arm_feature(env
, ARM_FEATURE_NEON
)) {
23 /* Aliases for Q regs. */
26 stfq_le_p(buf
, env
->vfp
.regs
[(reg
- 32) * 2]);
27 stfq_le_p(buf
+ 8, env
->vfp
.regs
[(reg
- 32) * 2 + 1]);
31 switch (reg
- nregs
) {
32 case 0: stl_p(buf
, env
->vfp
.xregs
[ARM_VFP_FPSID
]); return 4;
33 case 1: stl_p(buf
, env
->vfp
.xregs
[ARM_VFP_FPSCR
]); return 4;
34 case 2: stl_p(buf
, env
->vfp
.xregs
[ARM_VFP_FPEXC
]); return 4;
39 static int vfp_gdb_set_reg(CPUARMState
*env
, uint8_t *buf
, int reg
)
43 nregs
= arm_feature(env
, ARM_FEATURE_VFP3
) ? 32 : 16;
45 env
->vfp
.regs
[reg
] = ldfq_le_p(buf
);
48 if (arm_feature(env
, ARM_FEATURE_NEON
)) {
51 env
->vfp
.regs
[(reg
- 32) * 2] = ldfq_le_p(buf
);
52 env
->vfp
.regs
[(reg
- 32) * 2 + 1] = ldfq_le_p(buf
+ 8);
56 switch (reg
- nregs
) {
57 case 0: env
->vfp
.xregs
[ARM_VFP_FPSID
] = ldl_p(buf
); return 4;
58 case 1: env
->vfp
.xregs
[ARM_VFP_FPSCR
] = ldl_p(buf
); return 4;
59 case 2: env
->vfp
.xregs
[ARM_VFP_FPEXC
] = ldl_p(buf
) & (1 << 30); return 4;
64 ARMCPU
*cpu_arm_init(const char *cpu_model
)
68 static int inited
= 0;
70 if (!object_class_by_name(cpu_model
)) {
73 cpu
= ARM_CPU(object_new(cpu_model
));
75 env
->cpu_model_str
= cpu_model
;
78 if (tcg_enabled() && !inited
) {
84 if (arm_feature(env
, ARM_FEATURE_NEON
)) {
85 gdb_register_coprocessor(env
, vfp_gdb_get_reg
, vfp_gdb_set_reg
,
86 51, "arm-neon.xml", 0);
87 } else if (arm_feature(env
, ARM_FEATURE_VFP3
)) {
88 gdb_register_coprocessor(env
, vfp_gdb_get_reg
, vfp_gdb_set_reg
,
89 35, "arm-vfp3.xml", 0);
90 } else if (arm_feature(env
, ARM_FEATURE_VFP
)) {
91 gdb_register_coprocessor(env
, vfp_gdb_get_reg
, vfp_gdb_set_reg
,
92 19, "arm-vfp.xml", 0);
98 typedef struct ARMCPUListState
{
99 fprintf_function cpu_fprintf
;
103 /* Sort alphabetically by type name, except for "any". */
104 static gint
arm_cpu_list_compare(gconstpointer a
, gconstpointer b
)
106 ObjectClass
*class_a
= (ObjectClass
*)a
;
107 ObjectClass
*class_b
= (ObjectClass
*)b
;
108 const char *name_a
, *name_b
;
110 name_a
= object_class_get_name(class_a
);
111 name_b
= object_class_get_name(class_b
);
112 if (strcmp(name_a
, "any") == 0) {
114 } else if (strcmp(name_b
, "any") == 0) {
117 return strcmp(name_a
, name_b
);
121 static void arm_cpu_list_entry(gpointer data
, gpointer user_data
)
123 ObjectClass
*oc
= data
;
124 ARMCPUListState
*s
= user_data
;
126 (*s
->cpu_fprintf
)(s
->file
, " %s\n",
127 object_class_get_name(oc
));
130 void arm_cpu_list(FILE *f
, fprintf_function cpu_fprintf
)
132 ARMCPUListState s
= {
134 .cpu_fprintf
= cpu_fprintf
,
138 list
= object_class_get_list(TYPE_ARM_CPU
, false);
139 list
= g_slist_sort(list
, arm_cpu_list_compare
);
140 (*cpu_fprintf
)(f
, "Available CPUs:\n");
141 g_slist_foreach(list
, arm_cpu_list_entry
, &s
);
145 static int bad_mode_switch(CPUARMState
*env
, int mode
)
147 /* Return true if it is not valid for us to switch to
148 * this CPU mode (ie all the UNPREDICTABLE cases in
149 * the ARM ARM CPSRWriteByInstr pseudocode).
152 case ARM_CPU_MODE_USR
:
153 case ARM_CPU_MODE_SYS
:
154 case ARM_CPU_MODE_SVC
:
155 case ARM_CPU_MODE_ABT
:
156 case ARM_CPU_MODE_UND
:
157 case ARM_CPU_MODE_IRQ
:
158 case ARM_CPU_MODE_FIQ
:
165 uint32_t cpsr_read(CPUARMState
*env
)
169 return env
->uncached_cpsr
| (env
->NF
& 0x80000000) | (ZF
<< 30) |
170 (env
->CF
<< 29) | ((env
->VF
& 0x80000000) >> 3) | (env
->QF
<< 27)
171 | (env
->thumb
<< 5) | ((env
->condexec_bits
& 3) << 25)
172 | ((env
->condexec_bits
& 0xfc) << 8)
176 void cpsr_write(CPUARMState
*env
, uint32_t val
, uint32_t mask
)
178 if (mask
& CPSR_NZCV
) {
179 env
->ZF
= (~val
) & CPSR_Z
;
181 env
->CF
= (val
>> 29) & 1;
182 env
->VF
= (val
<< 3) & 0x80000000;
185 env
->QF
= ((val
& CPSR_Q
) != 0);
187 env
->thumb
= ((val
& CPSR_T
) != 0);
188 if (mask
& CPSR_IT_0_1
) {
189 env
->condexec_bits
&= ~3;
190 env
->condexec_bits
|= (val
>> 25) & 3;
192 if (mask
& CPSR_IT_2_7
) {
193 env
->condexec_bits
&= 3;
194 env
->condexec_bits
|= (val
>> 8) & 0xfc;
196 if (mask
& CPSR_GE
) {
197 env
->GE
= (val
>> 16) & 0xf;
200 if ((env
->uncached_cpsr
^ val
) & mask
& CPSR_M
) {
201 if (bad_mode_switch(env
, val
& CPSR_M
)) {
202 /* Attempt to switch to an invalid mode: this is UNPREDICTABLE.
203 * We choose to ignore the attempt and leave the CPSR M field
208 switch_mode(env
, val
& CPSR_M
);
211 mask
&= ~CACHED_CPSR_BITS
;
212 env
->uncached_cpsr
= (env
->uncached_cpsr
& ~mask
) | (val
& mask
);
215 /* Sign/zero extend */
216 uint32_t HELPER(sxtb16
)(uint32_t x
)
219 res
= (uint16_t)(int8_t)x
;
220 res
|= (uint32_t)(int8_t)(x
>> 16) << 16;
224 uint32_t HELPER(uxtb16
)(uint32_t x
)
227 res
= (uint16_t)(uint8_t)x
;
228 res
|= (uint32_t)(uint8_t)(x
>> 16) << 16;
232 uint32_t HELPER(clz
)(uint32_t x
)
237 int32_t HELPER(sdiv
)(int32_t num
, int32_t den
)
241 if (num
== INT_MIN
&& den
== -1)
246 uint32_t HELPER(udiv
)(uint32_t num
, uint32_t den
)
253 uint32_t HELPER(rbit
)(uint32_t x
)
255 x
= ((x
& 0xff000000) >> 24)
256 | ((x
& 0x00ff0000) >> 8)
257 | ((x
& 0x0000ff00) << 8)
258 | ((x
& 0x000000ff) << 24);
259 x
= ((x
& 0xf0f0f0f0) >> 4)
260 | ((x
& 0x0f0f0f0f) << 4);
261 x
= ((x
& 0x88888888) >> 3)
262 | ((x
& 0x44444444) >> 1)
263 | ((x
& 0x22222222) << 1)
264 | ((x
& 0x11111111) << 3);
268 uint32_t HELPER(abs
)(uint32_t x
)
270 return ((int32_t)x
< 0) ? -x
: x
;
273 #if defined(CONFIG_USER_ONLY)
275 void do_interrupt (CPUARMState
*env
)
277 env
->exception_index
= -1;
280 int cpu_arm_handle_mmu_fault (CPUARMState
*env
, target_ulong address
, int rw
,
284 env
->exception_index
= EXCP_PREFETCH_ABORT
;
285 env
->cp15
.c6_insn
= address
;
287 env
->exception_index
= EXCP_DATA_ABORT
;
288 env
->cp15
.c6_data
= address
;
293 /* These should probably raise undefined insn exceptions. */
294 void HELPER(set_cp
)(CPUARMState
*env
, uint32_t insn
, uint32_t val
)
296 int op1
= (insn
>> 8) & 0xf;
297 cpu_abort(env
, "cp%i insn %08x\n", op1
, insn
);
301 uint32_t HELPER(get_cp
)(CPUARMState
*env
, uint32_t insn
)
303 int op1
= (insn
>> 8) & 0xf;
304 cpu_abort(env
, "cp%i insn %08x\n", op1
, insn
);
308 void HELPER(set_cp15
)(CPUARMState
*env
, uint32_t insn
, uint32_t val
)
310 cpu_abort(env
, "cp15 insn %08x\n", insn
);
313 uint32_t HELPER(get_cp15
)(CPUARMState
*env
, uint32_t insn
)
315 cpu_abort(env
, "cp15 insn %08x\n", insn
);
318 /* These should probably raise undefined insn exceptions. */
319 void HELPER(v7m_msr
)(CPUARMState
*env
, uint32_t reg
, uint32_t val
)
321 cpu_abort(env
, "v7m_mrs %d\n", reg
);
324 uint32_t HELPER(v7m_mrs
)(CPUARMState
*env
, uint32_t reg
)
326 cpu_abort(env
, "v7m_mrs %d\n", reg
);
330 void switch_mode(CPUARMState
*env
, int mode
)
332 if (mode
!= ARM_CPU_MODE_USR
)
333 cpu_abort(env
, "Tried to switch out of user mode\n");
336 void HELPER(set_r13_banked
)(CPUARMState
*env
, uint32_t mode
, uint32_t val
)
338 cpu_abort(env
, "banked r13 write\n");
341 uint32_t HELPER(get_r13_banked
)(CPUARMState
*env
, uint32_t mode
)
343 cpu_abort(env
, "banked r13 read\n");
349 /* Map CPU modes onto saved register banks. */
350 static inline int bank_number(CPUARMState
*env
, int mode
)
353 case ARM_CPU_MODE_USR
:
354 case ARM_CPU_MODE_SYS
:
356 case ARM_CPU_MODE_SVC
:
358 case ARM_CPU_MODE_ABT
:
360 case ARM_CPU_MODE_UND
:
362 case ARM_CPU_MODE_IRQ
:
364 case ARM_CPU_MODE_FIQ
:
367 cpu_abort(env
, "Bad mode %x\n", mode
);
371 void switch_mode(CPUARMState
*env
, int mode
)
376 old_mode
= env
->uncached_cpsr
& CPSR_M
;
377 if (mode
== old_mode
)
380 if (old_mode
== ARM_CPU_MODE_FIQ
) {
381 memcpy (env
->fiq_regs
, env
->regs
+ 8, 5 * sizeof(uint32_t));
382 memcpy (env
->regs
+ 8, env
->usr_regs
, 5 * sizeof(uint32_t));
383 } else if (mode
== ARM_CPU_MODE_FIQ
) {
384 memcpy (env
->usr_regs
, env
->regs
+ 8, 5 * sizeof(uint32_t));
385 memcpy (env
->regs
+ 8, env
->fiq_regs
, 5 * sizeof(uint32_t));
388 i
= bank_number(env
, old_mode
);
389 env
->banked_r13
[i
] = env
->regs
[13];
390 env
->banked_r14
[i
] = env
->regs
[14];
391 env
->banked_spsr
[i
] = env
->spsr
;
393 i
= bank_number(env
, mode
);
394 env
->regs
[13] = env
->banked_r13
[i
];
395 env
->regs
[14] = env
->banked_r14
[i
];
396 env
->spsr
= env
->banked_spsr
[i
];
399 static void v7m_push(CPUARMState
*env
, uint32_t val
)
402 stl_phys(env
->regs
[13], val
);
405 static uint32_t v7m_pop(CPUARMState
*env
)
408 val
= ldl_phys(env
->regs
[13]);
413 /* Switch to V7M main or process stack pointer. */
414 static void switch_v7m_sp(CPUARMState
*env
, int process
)
417 if (env
->v7m
.current_sp
!= process
) {
418 tmp
= env
->v7m
.other_sp
;
419 env
->v7m
.other_sp
= env
->regs
[13];
421 env
->v7m
.current_sp
= process
;
425 static void do_v7m_exception_exit(CPUARMState
*env
)
430 type
= env
->regs
[15];
431 if (env
->v7m
.exception
!= 0)
432 armv7m_nvic_complete_irq(env
->nvic
, env
->v7m
.exception
);
434 /* Switch to the target stack. */
435 switch_v7m_sp(env
, (type
& 4) != 0);
437 env
->regs
[0] = v7m_pop(env
);
438 env
->regs
[1] = v7m_pop(env
);
439 env
->regs
[2] = v7m_pop(env
);
440 env
->regs
[3] = v7m_pop(env
);
441 env
->regs
[12] = v7m_pop(env
);
442 env
->regs
[14] = v7m_pop(env
);
443 env
->regs
[15] = v7m_pop(env
);
445 xpsr_write(env
, xpsr
, 0xfffffdff);
446 /* Undo stack alignment. */
449 /* ??? The exception return type specifies Thread/Handler mode. However
450 this is also implied by the xPSR value. Not sure what to do
451 if there is a mismatch. */
452 /* ??? Likewise for mismatches between the CONTROL register and the stack
456 static void do_interrupt_v7m(CPUARMState
*env
)
458 uint32_t xpsr
= xpsr_read(env
);
463 if (env
->v7m
.current_sp
)
465 if (env
->v7m
.exception
== 0)
468 /* For exceptions we just mark as pending on the NVIC, and let that
470 /* TODO: Need to escalate if the current priority is higher than the
471 one we're raising. */
472 switch (env
->exception_index
) {
474 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_USAGE
);
478 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_SVC
);
480 case EXCP_PREFETCH_ABORT
:
481 case EXCP_DATA_ABORT
:
482 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_MEM
);
485 if (semihosting_enabled
) {
487 nr
= arm_lduw_code(env
->regs
[15], env
->bswap_code
) & 0xff;
490 env
->regs
[0] = do_arm_semihosting(env
);
494 armv7m_nvic_set_pending(env
->nvic
, ARMV7M_EXCP_DEBUG
);
497 env
->v7m
.exception
= armv7m_nvic_acknowledge_irq(env
->nvic
);
499 case EXCP_EXCEPTION_EXIT
:
500 do_v7m_exception_exit(env
);
503 cpu_abort(env
, "Unhandled exception 0x%x\n", env
->exception_index
);
504 return; /* Never happens. Keep compiler happy. */
507 /* Align stack pointer. */
508 /* ??? Should only do this if Configuration Control Register
509 STACKALIGN bit is set. */
510 if (env
->regs
[13] & 4) {
514 /* Switch to the handler mode. */
516 v7m_push(env
, env
->regs
[15]);
517 v7m_push(env
, env
->regs
[14]);
518 v7m_push(env
, env
->regs
[12]);
519 v7m_push(env
, env
->regs
[3]);
520 v7m_push(env
, env
->regs
[2]);
521 v7m_push(env
, env
->regs
[1]);
522 v7m_push(env
, env
->regs
[0]);
523 switch_v7m_sp(env
, 0);
525 env
->condexec_bits
= 0;
527 addr
= ldl_phys(env
->v7m
.vecbase
+ env
->v7m
.exception
* 4);
528 env
->regs
[15] = addr
& 0xfffffffe;
529 env
->thumb
= addr
& 1;
532 /* Handle a CPU exception. */
533 void do_interrupt(CPUARMState
*env
)
541 do_interrupt_v7m(env
);
544 /* TODO: Vectored interrupt controller. */
545 switch (env
->exception_index
) {
547 new_mode
= ARM_CPU_MODE_UND
;
556 if (semihosting_enabled
) {
557 /* Check for semihosting interrupt. */
559 mask
= arm_lduw_code(env
->regs
[15] - 2, env
->bswap_code
) & 0xff;
561 mask
= arm_ldl_code(env
->regs
[15] - 4, env
->bswap_code
)
564 /* Only intercept calls from privileged modes, to provide some
565 semblance of security. */
566 if (((mask
== 0x123456 && !env
->thumb
)
567 || (mask
== 0xab && env
->thumb
))
568 && (env
->uncached_cpsr
& CPSR_M
) != ARM_CPU_MODE_USR
) {
569 env
->regs
[0] = do_arm_semihosting(env
);
573 new_mode
= ARM_CPU_MODE_SVC
;
576 /* The PC already points to the next instruction. */
580 /* See if this is a semihosting syscall. */
581 if (env
->thumb
&& semihosting_enabled
) {
582 mask
= arm_lduw_code(env
->regs
[15], env
->bswap_code
) & 0xff;
584 && (env
->uncached_cpsr
& CPSR_M
) != ARM_CPU_MODE_USR
) {
586 env
->regs
[0] = do_arm_semihosting(env
);
590 env
->cp15
.c5_insn
= 2;
591 /* Fall through to prefetch abort. */
592 case EXCP_PREFETCH_ABORT
:
593 new_mode
= ARM_CPU_MODE_ABT
;
595 mask
= CPSR_A
| CPSR_I
;
598 case EXCP_DATA_ABORT
:
599 new_mode
= ARM_CPU_MODE_ABT
;
601 mask
= CPSR_A
| CPSR_I
;
605 new_mode
= ARM_CPU_MODE_IRQ
;
607 /* Disable IRQ and imprecise data aborts. */
608 mask
= CPSR_A
| CPSR_I
;
612 new_mode
= ARM_CPU_MODE_FIQ
;
614 /* Disable FIQ, IRQ and imprecise data aborts. */
615 mask
= CPSR_A
| CPSR_I
| CPSR_F
;
619 cpu_abort(env
, "Unhandled exception 0x%x\n", env
->exception_index
);
620 return; /* Never happens. Keep compiler happy. */
623 if (env
->cp15
.c1_sys
& (1 << 13)) {
626 switch_mode (env
, new_mode
);
627 env
->spsr
= cpsr_read(env
);
629 env
->condexec_bits
= 0;
630 /* Switch to the new mode, and to the correct instruction set. */
631 env
->uncached_cpsr
= (env
->uncached_cpsr
& ~CPSR_M
) | new_mode
;
632 env
->uncached_cpsr
|= mask
;
633 /* this is a lie, as the was no c1_sys on V4T/V5, but who cares
634 * and we should just guard the thumb mode on V4 */
635 if (arm_feature(env
, ARM_FEATURE_V4T
)) {
636 env
->thumb
= (env
->cp15
.c1_sys
& (1 << 30)) != 0;
638 env
->regs
[14] = env
->regs
[15] + offset
;
639 env
->regs
[15] = addr
;
640 env
->interrupt_request
|= CPU_INTERRUPT_EXITTB
;
643 /* Check section/page access permissions.
644 Returns the page protection flags, or zero if the access is not
646 static inline int check_ap(CPUARMState
*env
, int ap
, int domain_prot
,
647 int access_type
, int is_user
)
651 if (domain_prot
== 3) {
652 return PAGE_READ
| PAGE_WRITE
;
655 if (access_type
== 1)
662 if (access_type
== 1)
664 switch ((env
->cp15
.c1_sys
>> 8) & 3) {
666 return is_user
? 0 : PAGE_READ
;
673 return is_user
? 0 : PAGE_READ
| PAGE_WRITE
;
678 return PAGE_READ
| PAGE_WRITE
;
680 return PAGE_READ
| PAGE_WRITE
;
681 case 4: /* Reserved. */
684 return is_user
? 0 : prot_ro
;
688 if (!arm_feature (env
, ARM_FEATURE_V6K
))
696 static uint32_t get_level1_table_address(CPUARMState
*env
, uint32_t address
)
700 if (address
& env
->cp15
.c2_mask
)
701 table
= env
->cp15
.c2_base1
& 0xffffc000;
703 table
= env
->cp15
.c2_base0
& env
->cp15
.c2_base_mask
;
705 table
|= (address
>> 18) & 0x3ffc;
709 static int get_phys_addr_v5(CPUARMState
*env
, uint32_t address
, int access_type
,
710 int is_user
, uint32_t *phys_ptr
, int *prot
,
711 target_ulong
*page_size
)
722 /* Pagetable walk. */
723 /* Lookup l1 descriptor. */
724 table
= get_level1_table_address(env
, address
);
725 desc
= ldl_phys(table
);
727 domain
= (desc
>> 5) & 0x0f;
728 domain_prot
= (env
->cp15
.c3
>> (domain
* 2)) & 3;
730 /* Section translation fault. */
734 if (domain_prot
== 0 || domain_prot
== 2) {
736 code
= 9; /* Section domain fault. */
738 code
= 11; /* Page domain fault. */
743 phys_addr
= (desc
& 0xfff00000) | (address
& 0x000fffff);
744 ap
= (desc
>> 10) & 3;
746 *page_size
= 1024 * 1024;
748 /* Lookup l2 entry. */
750 /* Coarse pagetable. */
751 table
= (desc
& 0xfffffc00) | ((address
>> 10) & 0x3fc);
753 /* Fine pagetable. */
754 table
= (desc
& 0xfffff000) | ((address
>> 8) & 0xffc);
756 desc
= ldl_phys(table
);
758 case 0: /* Page translation fault. */
761 case 1: /* 64k page. */
762 phys_addr
= (desc
& 0xffff0000) | (address
& 0xffff);
763 ap
= (desc
>> (4 + ((address
>> 13) & 6))) & 3;
764 *page_size
= 0x10000;
766 case 2: /* 4k page. */
767 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
768 ap
= (desc
>> (4 + ((address
>> 13) & 6))) & 3;
771 case 3: /* 1k page. */
773 if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
774 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
776 /* Page translation fault. */
781 phys_addr
= (desc
& 0xfffffc00) | (address
& 0x3ff);
783 ap
= (desc
>> 4) & 3;
787 /* Never happens, but compiler isn't smart enough to tell. */
792 *prot
= check_ap(env
, ap
, domain_prot
, access_type
, is_user
);
794 /* Access permission fault. */
798 *phys_ptr
= phys_addr
;
801 return code
| (domain
<< 4);
804 static int get_phys_addr_v6(CPUARMState
*env
, uint32_t address
, int access_type
,
805 int is_user
, uint32_t *phys_ptr
, int *prot
,
806 target_ulong
*page_size
)
818 /* Pagetable walk. */
819 /* Lookup l1 descriptor. */
820 table
= get_level1_table_address(env
, address
);
821 desc
= ldl_phys(table
);
824 /* Section translation fault. */
828 } else if (type
== 2 && (desc
& (1 << 18))) {
832 /* Section or page. */
833 domain
= (desc
>> 5) & 0x0f;
835 domain_prot
= (env
->cp15
.c3
>> (domain
* 2)) & 3;
836 if (domain_prot
== 0 || domain_prot
== 2) {
838 code
= 9; /* Section domain fault. */
840 code
= 11; /* Page domain fault. */
844 if (desc
& (1 << 18)) {
846 phys_addr
= (desc
& 0xff000000) | (address
& 0x00ffffff);
847 *page_size
= 0x1000000;
850 phys_addr
= (desc
& 0xfff00000) | (address
& 0x000fffff);
851 *page_size
= 0x100000;
853 ap
= ((desc
>> 10) & 3) | ((desc
>> 13) & 4);
854 xn
= desc
& (1 << 4);
857 /* Lookup l2 entry. */
858 table
= (desc
& 0xfffffc00) | ((address
>> 10) & 0x3fc);
859 desc
= ldl_phys(table
);
860 ap
= ((desc
>> 4) & 3) | ((desc
>> 7) & 4);
862 case 0: /* Page translation fault. */
865 case 1: /* 64k page. */
866 phys_addr
= (desc
& 0xffff0000) | (address
& 0xffff);
867 xn
= desc
& (1 << 15);
868 *page_size
= 0x10000;
870 case 2: case 3: /* 4k page. */
871 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
876 /* Never happens, but compiler isn't smart enough to tell. */
881 if (domain_prot
== 3) {
882 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
884 if (xn
&& access_type
== 2)
887 /* The simplified model uses AP[0] as an access control bit. */
888 if ((env
->cp15
.c1_sys
& (1 << 29)) && (ap
& 1) == 0) {
889 /* Access flag fault. */
890 code
= (code
== 15) ? 6 : 3;
893 *prot
= check_ap(env
, ap
, domain_prot
, access_type
, is_user
);
895 /* Access permission fault. */
902 *phys_ptr
= phys_addr
;
905 return code
| (domain
<< 4);
908 static int get_phys_addr_mpu(CPUARMState
*env
, uint32_t address
, int access_type
,
909 int is_user
, uint32_t *phys_ptr
, int *prot
)
916 for (n
= 7; n
>= 0; n
--) {
917 base
= env
->cp15
.c6_region
[n
];
920 mask
= 1 << ((base
>> 1) & 0x1f);
921 /* Keep this shift separate from the above to avoid an
922 (undefined) << 32. */
923 mask
= (mask
<< 1) - 1;
924 if (((base
^ address
) & ~mask
) == 0)
930 if (access_type
== 2) {
931 mask
= env
->cp15
.c5_insn
;
933 mask
= env
->cp15
.c5_data
;
935 mask
= (mask
>> (n
* 4)) & 0xf;
942 *prot
= PAGE_READ
| PAGE_WRITE
;
950 *prot
= PAGE_READ
| PAGE_WRITE
;
961 /* Bad permission. */
968 static inline int get_phys_addr(CPUARMState
*env
, uint32_t address
,
969 int access_type
, int is_user
,
970 uint32_t *phys_ptr
, int *prot
,
971 target_ulong
*page_size
)
973 /* Fast Context Switch Extension. */
974 if (address
< 0x02000000)
975 address
+= env
->cp15
.c13_fcse
;
977 if ((env
->cp15
.c1_sys
& 1) == 0) {
978 /* MMU/MPU disabled. */
980 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
981 *page_size
= TARGET_PAGE_SIZE
;
983 } else if (arm_feature(env
, ARM_FEATURE_MPU
)) {
984 *page_size
= TARGET_PAGE_SIZE
;
985 return get_phys_addr_mpu(env
, address
, access_type
, is_user
, phys_ptr
,
987 } else if (env
->cp15
.c1_sys
& (1 << 23)) {
988 return get_phys_addr_v6(env
, address
, access_type
, is_user
, phys_ptr
,
991 return get_phys_addr_v5(env
, address
, access_type
, is_user
, phys_ptr
,
996 int cpu_arm_handle_mmu_fault (CPUARMState
*env
, target_ulong address
,
997 int access_type
, int mmu_idx
)
1000 target_ulong page_size
;
1004 is_user
= mmu_idx
== MMU_USER_IDX
;
1005 ret
= get_phys_addr(env
, address
, access_type
, is_user
, &phys_addr
, &prot
,
1008 /* Map a single [sub]page. */
1009 phys_addr
&= ~(uint32_t)0x3ff;
1010 address
&= ~(uint32_t)0x3ff;
1011 tlb_set_page (env
, address
, phys_addr
, prot
, mmu_idx
, page_size
);
1015 if (access_type
== 2) {
1016 env
->cp15
.c5_insn
= ret
;
1017 env
->cp15
.c6_insn
= address
;
1018 env
->exception_index
= EXCP_PREFETCH_ABORT
;
1020 env
->cp15
.c5_data
= ret
;
1021 if (access_type
== 1 && arm_feature(env
, ARM_FEATURE_V6
))
1022 env
->cp15
.c5_data
|= (1 << 11);
1023 env
->cp15
.c6_data
= address
;
1024 env
->exception_index
= EXCP_DATA_ABORT
;
1029 target_phys_addr_t
cpu_get_phys_page_debug(CPUARMState
*env
, target_ulong addr
)
1032 target_ulong page_size
;
1036 ret
= get_phys_addr(env
, addr
, 0, 0, &phys_addr
, &prot
, &page_size
);
1044 void HELPER(set_cp
)(CPUARMState
*env
, uint32_t insn
, uint32_t val
)
1046 int cp_num
= (insn
>> 8) & 0xf;
1047 int cp_info
= (insn
>> 5) & 7;
1048 int src
= (insn
>> 16) & 0xf;
1049 int operand
= insn
& 0xf;
1051 if (env
->cp
[cp_num
].cp_write
)
1052 env
->cp
[cp_num
].cp_write(env
->cp
[cp_num
].opaque
,
1053 cp_info
, src
, operand
, val
);
1056 uint32_t HELPER(get_cp
)(CPUARMState
*env
, uint32_t insn
)
1058 int cp_num
= (insn
>> 8) & 0xf;
1059 int cp_info
= (insn
>> 5) & 7;
1060 int dest
= (insn
>> 16) & 0xf;
1061 int operand
= insn
& 0xf;
1063 if (env
->cp
[cp_num
].cp_read
)
1064 return env
->cp
[cp_num
].cp_read(env
->cp
[cp_num
].opaque
,
1065 cp_info
, dest
, operand
);
1069 /* Return basic MPU access permission bits. */
1070 static uint32_t simple_mpu_ap_bits(uint32_t val
)
1077 for (i
= 0; i
< 16; i
+= 2) {
1078 ret
|= (val
>> i
) & mask
;
1084 /* Pad basic MPU access permission bits to extended format. */
1085 static uint32_t extended_mpu_ap_bits(uint32_t val
)
1092 for (i
= 0; i
< 16; i
+= 2) {
1093 ret
|= (val
& mask
) << i
;
1099 void HELPER(set_cp15
)(CPUARMState
*env
, uint32_t insn
, uint32_t val
)
1105 op1
= (insn
>> 21) & 7;
1106 op2
= (insn
>> 5) & 7;
1108 switch ((insn
>> 16) & 0xf) {
1111 if (arm_feature(env
, ARM_FEATURE_XSCALE
))
1113 if (arm_feature(env
, ARM_FEATURE_OMAPCP
))
1115 if (arm_feature(env
, ARM_FEATURE_V7
)
1116 && op1
== 2 && crm
== 0 && op2
== 0) {
1117 env
->cp15
.c0_cssel
= val
& 0xf;
1121 case 1: /* System configuration. */
1122 if (arm_feature(env
, ARM_FEATURE_V7
)
1123 && op1
== 0 && crm
== 1 && op2
== 0) {
1124 env
->cp15
.c1_scr
= val
;
1127 if (arm_feature(env
, ARM_FEATURE_OMAPCP
))
1131 if (!arm_feature(env
, ARM_FEATURE_XSCALE
) || crm
== 0)
1132 env
->cp15
.c1_sys
= val
;
1133 /* ??? Lots of these bits are not implemented. */
1134 /* This may enable/disable the MMU, so do a TLB flush. */
1137 case 1: /* Auxiliary control register. */
1138 if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
1139 env
->cp15
.c1_xscaleauxcr
= val
;
1142 /* Not implemented. */
1145 if (arm_feature(env
, ARM_FEATURE_XSCALE
))
1147 if (env
->cp15
.c1_coproc
!= val
) {
1148 env
->cp15
.c1_coproc
= val
;
1149 /* ??? Is this safe when called from within a TB? */
1157 case 2: /* MMU Page table control / MPU cache control. */
1158 if (arm_feature(env
, ARM_FEATURE_MPU
)) {
1161 env
->cp15
.c2_data
= val
;
1164 env
->cp15
.c2_insn
= val
;
1172 env
->cp15
.c2_base0
= val
;
1175 env
->cp15
.c2_base1
= val
;
1179 env
->cp15
.c2_control
= val
;
1180 env
->cp15
.c2_mask
= ~(((uint32_t)0xffffffffu
) >> val
);
1181 env
->cp15
.c2_base_mask
= ~((uint32_t)0x3fffu
>> val
);
1188 case 3: /* MMU Domain access control / MPU write buffer control. */
1190 tlb_flush(env
, 1); /* Flush TLB as domain not tracked in TLB */
1192 case 4: /* Reserved. */
1194 case 5: /* MMU Fault status / MPU access permission. */
1195 if (arm_feature(env
, ARM_FEATURE_OMAPCP
))
1199 if (arm_feature(env
, ARM_FEATURE_MPU
))
1200 val
= extended_mpu_ap_bits(val
);
1201 env
->cp15
.c5_data
= val
;
1204 if (arm_feature(env
, ARM_FEATURE_MPU
))
1205 val
= extended_mpu_ap_bits(val
);
1206 env
->cp15
.c5_insn
= val
;
1209 if (!arm_feature(env
, ARM_FEATURE_MPU
))
1211 env
->cp15
.c5_data
= val
;
1214 if (!arm_feature(env
, ARM_FEATURE_MPU
))
1216 env
->cp15
.c5_insn
= val
;
1222 case 6: /* MMU Fault address / MPU base/size. */
1223 if (arm_feature(env
, ARM_FEATURE_MPU
)) {
1226 env
->cp15
.c6_region
[crm
] = val
;
1228 if (arm_feature(env
, ARM_FEATURE_OMAPCP
))
1232 env
->cp15
.c6_data
= val
;
1234 case 1: /* ??? This is WFAR on armv6 */
1236 env
->cp15
.c6_insn
= val
;
1243 case 7: /* Cache control. */
1244 env
->cp15
.c15_i_max
= 0x000;
1245 env
->cp15
.c15_i_min
= 0xff0;
1249 /* No cache, so nothing to do except VA->PA translations. */
1250 if (arm_feature(env
, ARM_FEATURE_VAPA
)) {
1253 if (arm_feature(env
, ARM_FEATURE_V7
)) {
1254 env
->cp15
.c7_par
= val
& 0xfffff6ff;
1256 env
->cp15
.c7_par
= val
& 0xfffff1ff;
1261 target_ulong page_size
;
1263 int ret
, is_user
= op2
& 2;
1264 int access_type
= op2
& 1;
1267 /* Other states are only available with TrustZone */
1270 ret
= get_phys_addr(env
, val
, access_type
, is_user
,
1271 &phys_addr
, &prot
, &page_size
);
1273 /* We do not set any attribute bits in the PAR */
1274 if (page_size
== (1 << 24)
1275 && arm_feature(env
, ARM_FEATURE_V7
)) {
1276 env
->cp15
.c7_par
= (phys_addr
& 0xff000000) | 1 << 1;
1278 env
->cp15
.c7_par
= phys_addr
& 0xfffff000;
1281 env
->cp15
.c7_par
= ((ret
& (10 << 1)) >> 5) |
1282 ((ret
& (12 << 1)) >> 6) |
1283 ((ret
& 0xf) << 1) | 1;
1290 case 8: /* MMU TLB control. */
1292 case 0: /* Invalidate all (TLBIALL) */
1295 case 1: /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
1296 tlb_flush_page(env
, val
& TARGET_PAGE_MASK
);
1298 case 2: /* Invalidate by ASID (TLBIASID) */
1299 tlb_flush(env
, val
== 0);
1301 case 3: /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
1302 tlb_flush_page(env
, val
& TARGET_PAGE_MASK
);
1309 if (arm_feature(env
, ARM_FEATURE_OMAPCP
))
1311 if (arm_feature(env
, ARM_FEATURE_STRONGARM
))
1312 break; /* Ignore ReadBuffer access */
1314 case 0: /* Cache lockdown. */
1316 case 0: /* L1 cache. */
1319 env
->cp15
.c9_data
= val
;
1322 env
->cp15
.c9_insn
= val
;
1328 case 1: /* L2 cache. */
1329 /* Ignore writes to L2 lockdown/auxiliary registers. */
1335 case 1: /* TCM memory region registers. */
1336 /* Not implemented. */
1338 case 12: /* Performance monitor control */
1339 /* Performance monitors are implementation defined in v7,
1340 * but with an ARM recommended set of registers, which we
1341 * follow (although we don't actually implement any counters)
1343 if (!arm_feature(env
, ARM_FEATURE_V7
)) {
1347 case 0: /* performance monitor control register */
1348 /* only the DP, X, D and E bits are writable */
1349 env
->cp15
.c9_pmcr
&= ~0x39;
1350 env
->cp15
.c9_pmcr
|= (val
& 0x39);
1352 case 1: /* Count enable set register */
1354 env
->cp15
.c9_pmcnten
|= val
;
1356 case 2: /* Count enable clear */
1358 env
->cp15
.c9_pmcnten
&= ~val
;
1360 case 3: /* Overflow flag status */
1361 env
->cp15
.c9_pmovsr
&= ~val
;
1363 case 4: /* Software increment */
1364 /* RAZ/WI since we don't implement the software-count event */
1366 case 5: /* Event counter selection register */
1367 /* Since we don't implement any events, writing to this register
1368 * is actually UNPREDICTABLE. So we choose to RAZ/WI.
1375 case 13: /* Performance counters */
1376 if (!arm_feature(env
, ARM_FEATURE_V7
)) {
1380 case 0: /* Cycle count register: not implemented, so RAZ/WI */
1382 case 1: /* Event type select */
1383 env
->cp15
.c9_pmxevtyper
= val
& 0xff;
1385 case 2: /* Event count register */
1386 /* Unimplemented (we have no events), RAZ/WI */
1392 case 14: /* Performance monitor control */
1393 if (!arm_feature(env
, ARM_FEATURE_V7
)) {
1397 case 0: /* user enable */
1398 env
->cp15
.c9_pmuserenr
= val
& 1;
1399 /* changes access rights for cp registers, so flush tbs */
1402 case 1: /* interrupt enable set */
1403 /* We have no event counters so only the C bit can be changed */
1405 env
->cp15
.c9_pminten
|= val
;
1407 case 2: /* interrupt enable clear */
1409 env
->cp15
.c9_pminten
&= ~val
;
1417 case 10: /* MMU TLB lockdown. */
1418 /* ??? TLB lockdown not implemented. */
1420 case 12: /* Reserved. */
1422 case 13: /* Process ID. */
1425 /* Unlike real hardware the qemu TLB uses virtual addresses,
1426 not modified virtual addresses, so this causes a TLB flush.
1428 if (env
->cp15
.c13_fcse
!= val
)
1430 env
->cp15
.c13_fcse
= val
;
1433 /* This changes the ASID, so do a TLB flush. */
1434 if (env
->cp15
.c13_context
!= val
1435 && !arm_feature(env
, ARM_FEATURE_MPU
))
1437 env
->cp15
.c13_context
= val
;
1443 case 14: /* Generic timer */
1444 if (arm_feature(env
, ARM_FEATURE_GENERIC_TIMER
)) {
1445 /* Dummy implementation: RAZ/WI for all */
1449 case 15: /* Implementation specific. */
1450 if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
1451 if (op2
== 0 && crm
== 1) {
1452 if (env
->cp15
.c15_cpar
!= (val
& 0x3fff)) {
1453 /* Changes cp0 to cp13 behavior, so needs a TB flush. */
1455 env
->cp15
.c15_cpar
= val
& 0x3fff;
1461 if (arm_feature(env
, ARM_FEATURE_OMAPCP
)) {
1465 case 1: /* Set TI925T configuration. */
1466 env
->cp15
.c15_ticonfig
= val
& 0xe7;
1467 env
->cp15
.c0_cpuid
= (val
& (1 << 5)) ? /* OS_TYPE bit */
1468 ARM_CPUID_TI915T
: ARM_CPUID_TI925T
;
1470 case 2: /* Set I_max. */
1471 env
->cp15
.c15_i_max
= val
;
1473 case 3: /* Set I_min. */
1474 env
->cp15
.c15_i_min
= val
;
1476 case 4: /* Set thread-ID. */
1477 env
->cp15
.c15_threadid
= val
& 0xffff;
1479 case 8: /* Wait-for-interrupt (deprecated). */
1480 cpu_interrupt(env
, CPU_INTERRUPT_HALT
);
1486 if (ARM_CPUID(env
) == ARM_CPUID_CORTEXA9
) {
1489 if ((op1
== 0) && (op2
== 0)) {
1490 env
->cp15
.c15_power_control
= val
;
1491 } else if ((op1
== 0) && (op2
== 1)) {
1492 env
->cp15
.c15_diagnostic
= val
;
1493 } else if ((op1
== 0) && (op2
== 2)) {
1494 env
->cp15
.c15_power_diagnostic
= val
;
1504 /* ??? For debugging only. Should raise illegal instruction exception. */
1505 cpu_abort(env
, "Unimplemented cp15 register write (c%d, c%d, {%d, %d})\n",
1506 (insn
>> 16) & 0xf, crm
, op1
, op2
);
1509 uint32_t HELPER(get_cp15
)(CPUARMState
*env
, uint32_t insn
)
1515 op1
= (insn
>> 21) & 7;
1516 op2
= (insn
>> 5) & 7;
1518 switch ((insn
>> 16) & 0xf) {
1519 case 0: /* ID codes. */
1525 case 0: /* Device ID. */
1526 return env
->cp15
.c0_cpuid
;
1527 case 1: /* Cache Type. */
1528 return env
->cp15
.c0_cachetype
;
1529 case 2: /* TCM status. */
1531 case 3: /* TLB type register. */
1532 return 0; /* No lockable TLB entries. */
1534 /* The MPIDR was standardised in v7; prior to
1535 * this it was implemented only in the 11MPCore.
1536 * For all other pre-v7 cores it does not exist.
1538 if (arm_feature(env
, ARM_FEATURE_V7
) ||
1539 ARM_CPUID(env
) == ARM_CPUID_ARM11MPCORE
) {
1540 int mpidr
= env
->cpu_index
;
1541 /* We don't support setting cluster ID ([8..11])
1542 * so these bits always RAZ.
1544 if (arm_feature(env
, ARM_FEATURE_V7MP
)) {
1546 /* Cores which are uniprocessor (non-coherent)
1547 * but still implement the MP extensions set
1548 * bit 30. (For instance, A9UP.) However we do
1549 * not currently model any of those cores.
1554 /* otherwise fall through to the unimplemented-reg case */
1559 if (!arm_feature(env
, ARM_FEATURE_V6
))
1561 return env
->cp15
.c0_c1
[op2
];
1563 if (!arm_feature(env
, ARM_FEATURE_V6
))
1565 return env
->cp15
.c0_c2
[op2
];
1566 case 3: case 4: case 5: case 6: case 7:
1572 /* These registers aren't documented on arm11 cores. However
1573 Linux looks at them anyway. */
1574 if (!arm_feature(env
, ARM_FEATURE_V6
))
1578 if (!arm_feature(env
, ARM_FEATURE_V7
))
1583 return env
->cp15
.c0_ccsid
[env
->cp15
.c0_cssel
];
1585 return env
->cp15
.c0_clid
;
1591 if (op2
!= 0 || crm
!= 0)
1593 return env
->cp15
.c0_cssel
;
1597 case 1: /* System configuration. */
1598 if (arm_feature(env
, ARM_FEATURE_V7
)
1599 && op1
== 0 && crm
== 1 && op2
== 0) {
1600 return env
->cp15
.c1_scr
;
1602 if (arm_feature(env
, ARM_FEATURE_OMAPCP
))
1605 case 0: /* Control register. */
1606 return env
->cp15
.c1_sys
;
1607 case 1: /* Auxiliary control register. */
1608 if (arm_feature(env
, ARM_FEATURE_XSCALE
))
1609 return env
->cp15
.c1_xscaleauxcr
;
1610 if (!arm_feature(env
, ARM_FEATURE_AUXCR
))
1612 switch (ARM_CPUID(env
)) {
1613 case ARM_CPUID_ARM1026
:
1615 case ARM_CPUID_ARM1136
:
1616 case ARM_CPUID_ARM1136_R2
:
1617 case ARM_CPUID_ARM1176
:
1619 case ARM_CPUID_ARM11MPCORE
:
1621 case ARM_CPUID_CORTEXA8
:
1623 case ARM_CPUID_CORTEXA9
:
1624 case ARM_CPUID_CORTEXA15
:
1629 case 2: /* Coprocessor access register. */
1630 if (arm_feature(env
, ARM_FEATURE_XSCALE
))
1632 return env
->cp15
.c1_coproc
;
1636 case 2: /* MMU Page table control / MPU cache control. */
1637 if (arm_feature(env
, ARM_FEATURE_MPU
)) {
1640 return env
->cp15
.c2_data
;
1643 return env
->cp15
.c2_insn
;
1651 return env
->cp15
.c2_base0
;
1653 return env
->cp15
.c2_base1
;
1655 return env
->cp15
.c2_control
;
1660 case 3: /* MMU Domain access control / MPU write buffer control. */
1661 return env
->cp15
.c3
;
1662 case 4: /* Reserved. */
1664 case 5: /* MMU Fault status / MPU access permission. */
1665 if (arm_feature(env
, ARM_FEATURE_OMAPCP
))
1669 if (arm_feature(env
, ARM_FEATURE_MPU
))
1670 return simple_mpu_ap_bits(env
->cp15
.c5_data
);
1671 return env
->cp15
.c5_data
;
1673 if (arm_feature(env
, ARM_FEATURE_MPU
))
1674 return simple_mpu_ap_bits(env
->cp15
.c5_insn
);
1675 return env
->cp15
.c5_insn
;
1677 if (!arm_feature(env
, ARM_FEATURE_MPU
))
1679 return env
->cp15
.c5_data
;
1681 if (!arm_feature(env
, ARM_FEATURE_MPU
))
1683 return env
->cp15
.c5_insn
;
1687 case 6: /* MMU Fault address. */
1688 if (arm_feature(env
, ARM_FEATURE_MPU
)) {
1691 return env
->cp15
.c6_region
[crm
];
1693 if (arm_feature(env
, ARM_FEATURE_OMAPCP
))
1697 return env
->cp15
.c6_data
;
1699 if (arm_feature(env
, ARM_FEATURE_V6
)) {
1700 /* Watchpoint Fault Adrress. */
1701 return 0; /* Not implemented. */
1703 /* Instruction Fault Adrress. */
1704 /* Arm9 doesn't have an IFAR, but implementing it anyway
1705 shouldn't do any harm. */
1706 return env
->cp15
.c6_insn
;
1709 if (arm_feature(env
, ARM_FEATURE_V6
)) {
1710 /* Instruction Fault Adrress. */
1711 return env
->cp15
.c6_insn
;
1719 case 7: /* Cache control. */
1720 if (crm
== 4 && op1
== 0 && op2
== 0) {
1721 return env
->cp15
.c7_par
;
1723 /* FIXME: Should only clear Z flag if destination is r15. */
1726 case 8: /* MMU TLB control. */
1730 case 0: /* Cache lockdown */
1732 case 0: /* L1 cache. */
1733 if (arm_feature(env
, ARM_FEATURE_OMAPCP
)) {
1738 return env
->cp15
.c9_data
;
1740 return env
->cp15
.c9_insn
;
1744 case 1: /* L2 cache */
1745 /* L2 Lockdown and Auxiliary control. */
1748 /* L2 cache lockdown (A8 only) */
1751 /* L2 cache auxiliary control (A8) or control (A15) */
1752 if (ARM_CPUID(env
) == ARM_CPUID_CORTEXA15
) {
1753 /* Linux wants the number of processors from here.
1754 * Might as well set the interrupt-controller bit too.
1756 return ((smp_cpus
- 1) << 24) | (1 << 23);
1760 /* L2 cache extended control (A15) */
1769 case 12: /* Performance monitor control */
1770 if (!arm_feature(env
, ARM_FEATURE_V7
)) {
1774 case 0: /* performance monitor control register */
1775 return env
->cp15
.c9_pmcr
;
1776 case 1: /* count enable set */
1777 case 2: /* count enable clear */
1778 return env
->cp15
.c9_pmcnten
;
1779 case 3: /* overflow flag status */
1780 return env
->cp15
.c9_pmovsr
;
1781 case 4: /* software increment */
1782 case 5: /* event counter selection register */
1783 return 0; /* Unimplemented, RAZ/WI */
1787 case 13: /* Performance counters */
1788 if (!arm_feature(env
, ARM_FEATURE_V7
)) {
1792 case 1: /* Event type select */
1793 return env
->cp15
.c9_pmxevtyper
;
1794 case 0: /* Cycle count register */
1795 case 2: /* Event count register */
1796 /* Unimplemented, so RAZ/WI */
1801 case 14: /* Performance monitor control */
1802 if (!arm_feature(env
, ARM_FEATURE_V7
)) {
1806 case 0: /* user enable */
1807 return env
->cp15
.c9_pmuserenr
;
1808 case 1: /* interrupt enable set */
1809 case 2: /* interrupt enable clear */
1810 return env
->cp15
.c9_pminten
;
1818 case 10: /* MMU TLB lockdown. */
1819 /* ??? TLB lockdown not implemented. */
1821 case 11: /* TCM DMA control. */
1822 case 12: /* Reserved. */
1824 case 13: /* Process ID. */
1827 return env
->cp15
.c13_fcse
;
1829 return env
->cp15
.c13_context
;
1833 case 14: /* Generic timer */
1834 if (arm_feature(env
, ARM_FEATURE_GENERIC_TIMER
)) {
1835 /* Dummy implementation: RAZ/WI for all */
1839 case 15: /* Implementation specific. */
1840 if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
1841 if (op2
== 0 && crm
== 1)
1842 return env
->cp15
.c15_cpar
;
1846 if (arm_feature(env
, ARM_FEATURE_OMAPCP
)) {
1850 case 1: /* Read TI925T configuration. */
1851 return env
->cp15
.c15_ticonfig
;
1852 case 2: /* Read I_max. */
1853 return env
->cp15
.c15_i_max
;
1854 case 3: /* Read I_min. */
1855 return env
->cp15
.c15_i_min
;
1856 case 4: /* Read thread-ID. */
1857 return env
->cp15
.c15_threadid
;
1858 case 8: /* TI925T_status */
1861 /* TODO: Peripheral port remap register:
1862 * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt
1863 * controller base address at $rn & ~0xfff and map size of
1864 * 0x200 << ($rn & 0xfff), when MMU is off. */
1867 if (ARM_CPUID(env
) == ARM_CPUID_CORTEXA9
) {
1870 if ((op1
== 4) && (op2
== 0)) {
1871 /* The config_base_address should hold the value of
1872 * the peripheral base. ARM should get this from a CPU
1873 * object property, but that support isn't available in
1874 * December 2011. Default to 0 for now and board models
1875 * that care can set it by a private hook */
1876 return env
->cp15
.c15_config_base_address
;
1877 } else if ((op1
== 0) && (op2
== 0)) {
1878 /* power_control should be set to maximum latency. Again,
1879 default to 0 and set by private hook */
1880 return env
->cp15
.c15_power_control
;
1881 } else if ((op1
== 0) && (op2
== 1)) {
1882 return env
->cp15
.c15_diagnostic
;
1883 } else if ((op1
== 0) && (op2
== 2)) {
1884 return env
->cp15
.c15_power_diagnostic
;
1887 case 1: /* NEON Busy */
1889 case 5: /* tlb lockdown */
1892 if ((op1
== 5) && (op2
== 2)) {
1904 /* ??? For debugging only. Should raise illegal instruction exception. */
1905 cpu_abort(env
, "Unimplemented cp15 register read (c%d, c%d, {%d, %d})\n",
1906 (insn
>> 16) & 0xf, crm
, op1
, op2
);
1910 void HELPER(set_r13_banked
)(CPUARMState
*env
, uint32_t mode
, uint32_t val
)
1912 if ((env
->uncached_cpsr
& CPSR_M
) == mode
) {
1913 env
->regs
[13] = val
;
1915 env
->banked_r13
[bank_number(env
, mode
)] = val
;
1919 uint32_t HELPER(get_r13_banked
)(CPUARMState
*env
, uint32_t mode
)
1921 if ((env
->uncached_cpsr
& CPSR_M
) == mode
) {
1922 return env
->regs
[13];
1924 return env
->banked_r13
[bank_number(env
, mode
)];
1928 uint32_t HELPER(v7m_mrs
)(CPUARMState
*env
, uint32_t reg
)
1932 return xpsr_read(env
) & 0xf8000000;
1934 return xpsr_read(env
) & 0xf80001ff;
1936 return xpsr_read(env
) & 0xff00fc00;
1938 return xpsr_read(env
) & 0xff00fdff;
1940 return xpsr_read(env
) & 0x000001ff;
1942 return xpsr_read(env
) & 0x0700fc00;
1944 return xpsr_read(env
) & 0x0700edff;
1946 return env
->v7m
.current_sp
? env
->v7m
.other_sp
: env
->regs
[13];
1948 return env
->v7m
.current_sp
? env
->regs
[13] : env
->v7m
.other_sp
;
1949 case 16: /* PRIMASK */
1950 return (env
->uncached_cpsr
& CPSR_I
) != 0;
1951 case 17: /* BASEPRI */
1952 case 18: /* BASEPRI_MAX */
1953 return env
->v7m
.basepri
;
1954 case 19: /* FAULTMASK */
1955 return (env
->uncached_cpsr
& CPSR_F
) != 0;
1956 case 20: /* CONTROL */
1957 return env
->v7m
.control
;
1959 /* ??? For debugging only. */
1960 cpu_abort(env
, "Unimplemented system register read (%d)\n", reg
);
1965 void HELPER(v7m_msr
)(CPUARMState
*env
, uint32_t reg
, uint32_t val
)
1969 xpsr_write(env
, val
, 0xf8000000);
1972 xpsr_write(env
, val
, 0xf8000000);
1975 xpsr_write(env
, val
, 0xfe00fc00);
1978 xpsr_write(env
, val
, 0xfe00fc00);
1981 /* IPSR bits are readonly. */
1984 xpsr_write(env
, val
, 0x0600fc00);
1987 xpsr_write(env
, val
, 0x0600fc00);
1990 if (env
->v7m
.current_sp
)
1991 env
->v7m
.other_sp
= val
;
1993 env
->regs
[13] = val
;
1996 if (env
->v7m
.current_sp
)
1997 env
->regs
[13] = val
;
1999 env
->v7m
.other_sp
= val
;
2001 case 16: /* PRIMASK */
2003 env
->uncached_cpsr
|= CPSR_I
;
2005 env
->uncached_cpsr
&= ~CPSR_I
;
2007 case 17: /* BASEPRI */
2008 env
->v7m
.basepri
= val
& 0xff;
2010 case 18: /* BASEPRI_MAX */
2012 if (val
!= 0 && (val
< env
->v7m
.basepri
|| env
->v7m
.basepri
== 0))
2013 env
->v7m
.basepri
= val
;
2015 case 19: /* FAULTMASK */
2017 env
->uncached_cpsr
|= CPSR_F
;
2019 env
->uncached_cpsr
&= ~CPSR_F
;
2021 case 20: /* CONTROL */
2022 env
->v7m
.control
= val
& 3;
2023 switch_v7m_sp(env
, (val
& 2) != 0);
2026 /* ??? For debugging only. */
2027 cpu_abort(env
, "Unimplemented system register write (%d)\n", reg
);
2032 void cpu_arm_set_cp_io(CPUARMState
*env
, int cpnum
,
2033 ARMReadCPFunc
*cp_read
, ARMWriteCPFunc
*cp_write
,
2036 if (cpnum
< 0 || cpnum
> 14) {
2037 cpu_abort(env
, "Bad coprocessor number: %i\n", cpnum
);
2041 env
->cp
[cpnum
].cp_read
= cp_read
;
2042 env
->cp
[cpnum
].cp_write
= cp_write
;
2043 env
->cp
[cpnum
].opaque
= opaque
;
2048 /* Note that signed overflow is undefined in C. The following routines are
2049 careful to use unsigned types where modulo arithmetic is required.
2050 Failure to do so _will_ break on newer gcc. */
2052 /* Signed saturating arithmetic. */
2054 /* Perform 16-bit signed saturating addition. */
2055 static inline uint16_t add16_sat(uint16_t a
, uint16_t b
)
2060 if (((res
^ a
) & 0x8000) && !((a
^ b
) & 0x8000)) {
2069 /* Perform 8-bit signed saturating addition. */
2070 static inline uint8_t add8_sat(uint8_t a
, uint8_t b
)
2075 if (((res
^ a
) & 0x80) && !((a
^ b
) & 0x80)) {
2084 /* Perform 16-bit signed saturating subtraction. */
2085 static inline uint16_t sub16_sat(uint16_t a
, uint16_t b
)
2090 if (((res
^ a
) & 0x8000) && ((a
^ b
) & 0x8000)) {
2099 /* Perform 8-bit signed saturating subtraction. */
2100 static inline uint8_t sub8_sat(uint8_t a
, uint8_t b
)
2105 if (((res
^ a
) & 0x80) && ((a
^ b
) & 0x80)) {
2114 #define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16);
2115 #define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16);
2116 #define ADD8(a, b, n) RESULT(add8_sat(a, b), n, 8);
2117 #define SUB8(a, b, n) RESULT(sub8_sat(a, b), n, 8);
2120 #include "op_addsub.h"
2122 /* Unsigned saturating arithmetic. */
2123 static inline uint16_t add16_usat(uint16_t a
, uint16_t b
)
2132 static inline uint16_t sub16_usat(uint16_t a
, uint16_t b
)
2140 static inline uint8_t add8_usat(uint8_t a
, uint8_t b
)
2149 static inline uint8_t sub8_usat(uint8_t a
, uint8_t b
)
2157 #define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16);
2158 #define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16);
2159 #define ADD8(a, b, n) RESULT(add8_usat(a, b), n, 8);
2160 #define SUB8(a, b, n) RESULT(sub8_usat(a, b), n, 8);
2163 #include "op_addsub.h"
2165 /* Signed modulo arithmetic. */
2166 #define SARITH16(a, b, n, op) do { \
2168 sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \
2169 RESULT(sum, n, 16); \
2171 ge |= 3 << (n * 2); \
2174 #define SARITH8(a, b, n, op) do { \
2176 sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \
2177 RESULT(sum, n, 8); \
2183 #define ADD16(a, b, n) SARITH16(a, b, n, +)
2184 #define SUB16(a, b, n) SARITH16(a, b, n, -)
2185 #define ADD8(a, b, n) SARITH8(a, b, n, +)
2186 #define SUB8(a, b, n) SARITH8(a, b, n, -)
2190 #include "op_addsub.h"
2192 /* Unsigned modulo arithmetic. */
2193 #define ADD16(a, b, n) do { \
2195 sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \
2196 RESULT(sum, n, 16); \
2197 if ((sum >> 16) == 1) \
2198 ge |= 3 << (n * 2); \
2201 #define ADD8(a, b, n) do { \
2203 sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \
2204 RESULT(sum, n, 8); \
2205 if ((sum >> 8) == 1) \
2209 #define SUB16(a, b, n) do { \
2211 sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \
2212 RESULT(sum, n, 16); \
2213 if ((sum >> 16) == 0) \
2214 ge |= 3 << (n * 2); \
2217 #define SUB8(a, b, n) do { \
2219 sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \
2220 RESULT(sum, n, 8); \
2221 if ((sum >> 8) == 0) \
2228 #include "op_addsub.h"
2230 /* Halved signed arithmetic. */
2231 #define ADD16(a, b, n) \
2232 RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16)
2233 #define SUB16(a, b, n) \
2234 RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16)
2235 #define ADD8(a, b, n) \
2236 RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8)
2237 #define SUB8(a, b, n) \
2238 RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8)
2241 #include "op_addsub.h"
2243 /* Halved unsigned arithmetic. */
2244 #define ADD16(a, b, n) \
2245 RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16)
2246 #define SUB16(a, b, n) \
2247 RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16)
2248 #define ADD8(a, b, n) \
2249 RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8)
2250 #define SUB8(a, b, n) \
2251 RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8)
2254 #include "op_addsub.h"
2256 static inline uint8_t do_usad(uint8_t a
, uint8_t b
)
2264 /* Unsigned sum of absolute byte differences. */
2265 uint32_t HELPER(usad8
)(uint32_t a
, uint32_t b
)
2268 sum
= do_usad(a
, b
);
2269 sum
+= do_usad(a
>> 8, b
>> 8);
2270 sum
+= do_usad(a
>> 16, b
>>16);
2271 sum
+= do_usad(a
>> 24, b
>> 24);
2275 /* For ARMv6 SEL instruction. */
2276 uint32_t HELPER(sel_flags
)(uint32_t flags
, uint32_t a
, uint32_t b
)
2289 return (a
& mask
) | (b
& ~mask
);
2292 uint32_t HELPER(logicq_cc
)(uint64_t val
)
2294 return (val
>> 32) | (val
!= 0);
2297 /* VFP support. We follow the convention used for VFP instrunctions:
2298 Single precition routines have a "s" suffix, double precision a
2301 /* Convert host exception flags to vfp form. */
2302 static inline int vfp_exceptbits_from_host(int host_bits
)
2304 int target_bits
= 0;
2306 if (host_bits
& float_flag_invalid
)
2308 if (host_bits
& float_flag_divbyzero
)
2310 if (host_bits
& float_flag_overflow
)
2312 if (host_bits
& (float_flag_underflow
| float_flag_output_denormal
))
2314 if (host_bits
& float_flag_inexact
)
2315 target_bits
|= 0x10;
2316 if (host_bits
& float_flag_input_denormal
)
2317 target_bits
|= 0x80;
2321 uint32_t HELPER(vfp_get_fpscr
)(CPUARMState
*env
)
2326 fpscr
= (env
->vfp
.xregs
[ARM_VFP_FPSCR
] & 0xffc8ffff)
2327 | (env
->vfp
.vec_len
<< 16)
2328 | (env
->vfp
.vec_stride
<< 20);
2329 i
= get_float_exception_flags(&env
->vfp
.fp_status
);
2330 i
|= get_float_exception_flags(&env
->vfp
.standard_fp_status
);
2331 fpscr
|= vfp_exceptbits_from_host(i
);
2335 uint32_t vfp_get_fpscr(CPUARMState
*env
)
2337 return HELPER(vfp_get_fpscr
)(env
);
2340 /* Convert vfp exception flags to target form. */
2341 static inline int vfp_exceptbits_to_host(int target_bits
)
2345 if (target_bits
& 1)
2346 host_bits
|= float_flag_invalid
;
2347 if (target_bits
& 2)
2348 host_bits
|= float_flag_divbyzero
;
2349 if (target_bits
& 4)
2350 host_bits
|= float_flag_overflow
;
2351 if (target_bits
& 8)
2352 host_bits
|= float_flag_underflow
;
2353 if (target_bits
& 0x10)
2354 host_bits
|= float_flag_inexact
;
2355 if (target_bits
& 0x80)
2356 host_bits
|= float_flag_input_denormal
;
2360 void HELPER(vfp_set_fpscr
)(CPUARMState
*env
, uint32_t val
)
2365 changed
= env
->vfp
.xregs
[ARM_VFP_FPSCR
];
2366 env
->vfp
.xregs
[ARM_VFP_FPSCR
] = (val
& 0xffc8ffff);
2367 env
->vfp
.vec_len
= (val
>> 16) & 7;
2368 env
->vfp
.vec_stride
= (val
>> 20) & 3;
2371 if (changed
& (3 << 22)) {
2372 i
= (val
>> 22) & 3;
2375 i
= float_round_nearest_even
;
2381 i
= float_round_down
;
2384 i
= float_round_to_zero
;
2387 set_float_rounding_mode(i
, &env
->vfp
.fp_status
);
2389 if (changed
& (1 << 24)) {
2390 set_flush_to_zero((val
& (1 << 24)) != 0, &env
->vfp
.fp_status
);
2391 set_flush_inputs_to_zero((val
& (1 << 24)) != 0, &env
->vfp
.fp_status
);
2393 if (changed
& (1 << 25))
2394 set_default_nan_mode((val
& (1 << 25)) != 0, &env
->vfp
.fp_status
);
2396 i
= vfp_exceptbits_to_host(val
);
2397 set_float_exception_flags(i
, &env
->vfp
.fp_status
);
2398 set_float_exception_flags(0, &env
->vfp
.standard_fp_status
);
2401 void vfp_set_fpscr(CPUARMState
*env
, uint32_t val
)
2403 HELPER(vfp_set_fpscr
)(env
, val
);
2406 #define VFP_HELPER(name, p) HELPER(glue(glue(vfp_,name),p))
2408 #define VFP_BINOP(name) \
2409 float32 VFP_HELPER(name, s)(float32 a, float32 b, void *fpstp) \
2411 float_status *fpst = fpstp; \
2412 return float32_ ## name(a, b, fpst); \
2414 float64 VFP_HELPER(name, d)(float64 a, float64 b, void *fpstp) \
2416 float_status *fpst = fpstp; \
2417 return float64_ ## name(a, b, fpst); \
2425 float32
VFP_HELPER(neg
, s
)(float32 a
)
2427 return float32_chs(a
);
2430 float64
VFP_HELPER(neg
, d
)(float64 a
)
2432 return float64_chs(a
);
2435 float32
VFP_HELPER(abs
, s
)(float32 a
)
2437 return float32_abs(a
);
2440 float64
VFP_HELPER(abs
, d
)(float64 a
)
2442 return float64_abs(a
);
2445 float32
VFP_HELPER(sqrt
, s
)(float32 a
, CPUARMState
*env
)
2447 return float32_sqrt(a
, &env
->vfp
.fp_status
);
2450 float64
VFP_HELPER(sqrt
, d
)(float64 a
, CPUARMState
*env
)
2452 return float64_sqrt(a
, &env
->vfp
.fp_status
);
2455 /* XXX: check quiet/signaling case */
2456 #define DO_VFP_cmp(p, type) \
2457 void VFP_HELPER(cmp, p)(type a, type b, CPUARMState *env) \
2460 switch(type ## _compare_quiet(a, b, &env->vfp.fp_status)) { \
2461 case 0: flags = 0x6; break; \
2462 case -1: flags = 0x8; break; \
2463 case 1: flags = 0x2; break; \
2464 default: case 2: flags = 0x3; break; \
2466 env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
2467 | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
2469 void VFP_HELPER(cmpe, p)(type a, type b, CPUARMState *env) \
2472 switch(type ## _compare(a, b, &env->vfp.fp_status)) { \
2473 case 0: flags = 0x6; break; \
2474 case -1: flags = 0x8; break; \
2475 case 1: flags = 0x2; break; \
2476 default: case 2: flags = 0x3; break; \
2478 env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
2479 | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
2481 DO_VFP_cmp(s
, float32
)
2482 DO_VFP_cmp(d
, float64
)
2485 /* Integer to float and float to integer conversions */
2487 #define CONV_ITOF(name, fsz, sign) \
2488 float##fsz HELPER(name)(uint32_t x, void *fpstp) \
2490 float_status *fpst = fpstp; \
2491 return sign##int32_to_##float##fsz((sign##int32_t)x, fpst); \
2494 #define CONV_FTOI(name, fsz, sign, round) \
2495 uint32_t HELPER(name)(float##fsz x, void *fpstp) \
2497 float_status *fpst = fpstp; \
2498 if (float##fsz##_is_any_nan(x)) { \
2499 float_raise(float_flag_invalid, fpst); \
2502 return float##fsz##_to_##sign##int32##round(x, fpst); \
2505 #define FLOAT_CONVS(name, p, fsz, sign) \
2506 CONV_ITOF(vfp_##name##to##p, fsz, sign) \
2507 CONV_FTOI(vfp_to##name##p, fsz, sign, ) \
2508 CONV_FTOI(vfp_to##name##z##p, fsz, sign, _round_to_zero)
2510 FLOAT_CONVS(si
, s
, 32, )
2511 FLOAT_CONVS(si
, d
, 64, )
2512 FLOAT_CONVS(ui
, s
, 32, u
)
2513 FLOAT_CONVS(ui
, d
, 64, u
)
2519 /* floating point conversion */
2520 float64
VFP_HELPER(fcvtd
, s
)(float32 x
, CPUARMState
*env
)
2522 float64 r
= float32_to_float64(x
, &env
->vfp
.fp_status
);
2523 /* ARM requires that S<->D conversion of any kind of NaN generates
2524 * a quiet NaN by forcing the most significant frac bit to 1.
2526 return float64_maybe_silence_nan(r
);
2529 float32
VFP_HELPER(fcvts
, d
)(float64 x
, CPUARMState
*env
)
2531 float32 r
= float64_to_float32(x
, &env
->vfp
.fp_status
);
2532 /* ARM requires that S<->D conversion of any kind of NaN generates
2533 * a quiet NaN by forcing the most significant frac bit to 1.
2535 return float32_maybe_silence_nan(r
);
2538 /* VFP3 fixed point conversion. */
2539 #define VFP_CONV_FIX(name, p, fsz, itype, sign) \
2540 float##fsz HELPER(vfp_##name##to##p)(uint##fsz##_t x, uint32_t shift, \
2543 float_status *fpst = fpstp; \
2545 tmp = sign##int32_to_##float##fsz((itype##_t)x, fpst); \
2546 return float##fsz##_scalbn(tmp, -(int)shift, fpst); \
2548 uint##fsz##_t HELPER(vfp_to##name##p)(float##fsz x, uint32_t shift, \
2551 float_status *fpst = fpstp; \
2553 if (float##fsz##_is_any_nan(x)) { \
2554 float_raise(float_flag_invalid, fpst); \
2557 tmp = float##fsz##_scalbn(x, shift, fpst); \
2558 return float##fsz##_to_##itype##_round_to_zero(tmp, fpst); \
2561 VFP_CONV_FIX(sh
, d
, 64, int16
, )
2562 VFP_CONV_FIX(sl
, d
, 64, int32
, )
2563 VFP_CONV_FIX(uh
, d
, 64, uint16
, u
)
2564 VFP_CONV_FIX(ul
, d
, 64, uint32
, u
)
2565 VFP_CONV_FIX(sh
, s
, 32, int16
, )
2566 VFP_CONV_FIX(sl
, s
, 32, int32
, )
2567 VFP_CONV_FIX(uh
, s
, 32, uint16
, u
)
2568 VFP_CONV_FIX(ul
, s
, 32, uint32
, u
)
2571 /* Half precision conversions. */
2572 static float32
do_fcvt_f16_to_f32(uint32_t a
, CPUARMState
*env
, float_status
*s
)
2574 int ieee
= (env
->vfp
.xregs
[ARM_VFP_FPSCR
] & (1 << 26)) == 0;
2575 float32 r
= float16_to_float32(make_float16(a
), ieee
, s
);
2577 return float32_maybe_silence_nan(r
);
2582 static uint32_t do_fcvt_f32_to_f16(float32 a
, CPUARMState
*env
, float_status
*s
)
2584 int ieee
= (env
->vfp
.xregs
[ARM_VFP_FPSCR
] & (1 << 26)) == 0;
2585 float16 r
= float32_to_float16(a
, ieee
, s
);
2587 r
= float16_maybe_silence_nan(r
);
2589 return float16_val(r
);
2592 float32
HELPER(neon_fcvt_f16_to_f32
)(uint32_t a
, CPUARMState
*env
)
2594 return do_fcvt_f16_to_f32(a
, env
, &env
->vfp
.standard_fp_status
);
2597 uint32_t HELPER(neon_fcvt_f32_to_f16
)(float32 a
, CPUARMState
*env
)
2599 return do_fcvt_f32_to_f16(a
, env
, &env
->vfp
.standard_fp_status
);
2602 float32
HELPER(vfp_fcvt_f16_to_f32
)(uint32_t a
, CPUARMState
*env
)
2604 return do_fcvt_f16_to_f32(a
, env
, &env
->vfp
.fp_status
);
2607 uint32_t HELPER(vfp_fcvt_f32_to_f16
)(float32 a
, CPUARMState
*env
)
2609 return do_fcvt_f32_to_f16(a
, env
, &env
->vfp
.fp_status
);
2612 #define float32_two make_float32(0x40000000)
2613 #define float32_three make_float32(0x40400000)
2614 #define float32_one_point_five make_float32(0x3fc00000)
2616 float32
HELPER(recps_f32
)(float32 a
, float32 b
, CPUARMState
*env
)
2618 float_status
*s
= &env
->vfp
.standard_fp_status
;
2619 if ((float32_is_infinity(a
) && float32_is_zero_or_denormal(b
)) ||
2620 (float32_is_infinity(b
) && float32_is_zero_or_denormal(a
))) {
2621 if (!(float32_is_zero(a
) || float32_is_zero(b
))) {
2622 float_raise(float_flag_input_denormal
, s
);
2626 return float32_sub(float32_two
, float32_mul(a
, b
, s
), s
);
2629 float32
HELPER(rsqrts_f32
)(float32 a
, float32 b
, CPUARMState
*env
)
2631 float_status
*s
= &env
->vfp
.standard_fp_status
;
2633 if ((float32_is_infinity(a
) && float32_is_zero_or_denormal(b
)) ||
2634 (float32_is_infinity(b
) && float32_is_zero_or_denormal(a
))) {
2635 if (!(float32_is_zero(a
) || float32_is_zero(b
))) {
2636 float_raise(float_flag_input_denormal
, s
);
2638 return float32_one_point_five
;
2640 product
= float32_mul(a
, b
, s
);
2641 return float32_div(float32_sub(float32_three
, product
, s
), float32_two
, s
);
2646 /* Constants 256 and 512 are used in some helpers; we avoid relying on
2647 * int->float conversions at run-time. */
2648 #define float64_256 make_float64(0x4070000000000000LL)
2649 #define float64_512 make_float64(0x4080000000000000LL)
2651 /* The algorithm that must be used to calculate the estimate
2652 * is specified by the ARM ARM.
2654 static float64
recip_estimate(float64 a
, CPUARMState
*env
)
2656 /* These calculations mustn't set any fp exception flags,
2657 * so we use a local copy of the fp_status.
2659 float_status dummy_status
= env
->vfp
.standard_fp_status
;
2660 float_status
*s
= &dummy_status
;
2661 /* q = (int)(a * 512.0) */
2662 float64 q
= float64_mul(float64_512
, a
, s
);
2663 int64_t q_int
= float64_to_int64_round_to_zero(q
, s
);
2665 /* r = 1.0 / (((double)q + 0.5) / 512.0) */
2666 q
= int64_to_float64(q_int
, s
);
2667 q
= float64_add(q
, float64_half
, s
);
2668 q
= float64_div(q
, float64_512
, s
);
2669 q
= float64_div(float64_one
, q
, s
);
2671 /* s = (int)(256.0 * r + 0.5) */
2672 q
= float64_mul(q
, float64_256
, s
);
2673 q
= float64_add(q
, float64_half
, s
);
2674 q_int
= float64_to_int64_round_to_zero(q
, s
);
2676 /* return (double)s / 256.0 */
2677 return float64_div(int64_to_float64(q_int
, s
), float64_256
, s
);
2680 float32
HELPER(recpe_f32
)(float32 a
, CPUARMState
*env
)
2682 float_status
*s
= &env
->vfp
.standard_fp_status
;
2684 uint32_t val32
= float32_val(a
);
2687 int a_exp
= (val32
& 0x7f800000) >> 23;
2688 int sign
= val32
& 0x80000000;
2690 if (float32_is_any_nan(a
)) {
2691 if (float32_is_signaling_nan(a
)) {
2692 float_raise(float_flag_invalid
, s
);
2694 return float32_default_nan
;
2695 } else if (float32_is_infinity(a
)) {
2696 return float32_set_sign(float32_zero
, float32_is_neg(a
));
2697 } else if (float32_is_zero_or_denormal(a
)) {
2698 if (!float32_is_zero(a
)) {
2699 float_raise(float_flag_input_denormal
, s
);
2701 float_raise(float_flag_divbyzero
, s
);
2702 return float32_set_sign(float32_infinity
, float32_is_neg(a
));
2703 } else if (a_exp
>= 253) {
2704 float_raise(float_flag_underflow
, s
);
2705 return float32_set_sign(float32_zero
, float32_is_neg(a
));
2708 f64
= make_float64((0x3feULL
<< 52)
2709 | ((int64_t)(val32
& 0x7fffff) << 29));
2711 result_exp
= 253 - a_exp
;
2713 f64
= recip_estimate(f64
, env
);
2716 | ((result_exp
& 0xff) << 23)
2717 | ((float64_val(f64
) >> 29) & 0x7fffff);
2718 return make_float32(val32
);
2721 /* The algorithm that must be used to calculate the estimate
2722 * is specified by the ARM ARM.
2724 static float64
recip_sqrt_estimate(float64 a
, CPUARMState
*env
)
2726 /* These calculations mustn't set any fp exception flags,
2727 * so we use a local copy of the fp_status.
2729 float_status dummy_status
= env
->vfp
.standard_fp_status
;
2730 float_status
*s
= &dummy_status
;
2734 if (float64_lt(a
, float64_half
, s
)) {
2735 /* range 0.25 <= a < 0.5 */
2737 /* a in units of 1/512 rounded down */
2738 /* q0 = (int)(a * 512.0); */
2739 q
= float64_mul(float64_512
, a
, s
);
2740 q_int
= float64_to_int64_round_to_zero(q
, s
);
2742 /* reciprocal root r */
2743 /* r = 1.0 / sqrt(((double)q0 + 0.5) / 512.0); */
2744 q
= int64_to_float64(q_int
, s
);
2745 q
= float64_add(q
, float64_half
, s
);
2746 q
= float64_div(q
, float64_512
, s
);
2747 q
= float64_sqrt(q
, s
);
2748 q
= float64_div(float64_one
, q
, s
);
2750 /* range 0.5 <= a < 1.0 */
2752 /* a in units of 1/256 rounded down */
2753 /* q1 = (int)(a * 256.0); */
2754 q
= float64_mul(float64_256
, a
, s
);
2755 int64_t q_int
= float64_to_int64_round_to_zero(q
, s
);
2757 /* reciprocal root r */
2758 /* r = 1.0 /sqrt(((double)q1 + 0.5) / 256); */
2759 q
= int64_to_float64(q_int
, s
);
2760 q
= float64_add(q
, float64_half
, s
);
2761 q
= float64_div(q
, float64_256
, s
);
2762 q
= float64_sqrt(q
, s
);
2763 q
= float64_div(float64_one
, q
, s
);
2765 /* r in units of 1/256 rounded to nearest */
2766 /* s = (int)(256.0 * r + 0.5); */
2768 q
= float64_mul(q
, float64_256
,s
);
2769 q
= float64_add(q
, float64_half
, s
);
2770 q_int
= float64_to_int64_round_to_zero(q
, s
);
2772 /* return (double)s / 256.0;*/
2773 return float64_div(int64_to_float64(q_int
, s
), float64_256
, s
);
2776 float32
HELPER(rsqrte_f32
)(float32 a
, CPUARMState
*env
)
2778 float_status
*s
= &env
->vfp
.standard_fp_status
;
2784 val
= float32_val(a
);
2786 if (float32_is_any_nan(a
)) {
2787 if (float32_is_signaling_nan(a
)) {
2788 float_raise(float_flag_invalid
, s
);
2790 return float32_default_nan
;
2791 } else if (float32_is_zero_or_denormal(a
)) {
2792 if (!float32_is_zero(a
)) {
2793 float_raise(float_flag_input_denormal
, s
);
2795 float_raise(float_flag_divbyzero
, s
);
2796 return float32_set_sign(float32_infinity
, float32_is_neg(a
));
2797 } else if (float32_is_neg(a
)) {
2798 float_raise(float_flag_invalid
, s
);
2799 return float32_default_nan
;
2800 } else if (float32_is_infinity(a
)) {
2801 return float32_zero
;
2804 /* Normalize to a double-precision value between 0.25 and 1.0,
2805 * preserving the parity of the exponent. */
2806 if ((val
& 0x800000) == 0) {
2807 f64
= make_float64(((uint64_t)(val
& 0x80000000) << 32)
2809 | ((uint64_t)(val
& 0x7fffff) << 29));
2811 f64
= make_float64(((uint64_t)(val
& 0x80000000) << 32)
2813 | ((uint64_t)(val
& 0x7fffff) << 29));
2816 result_exp
= (380 - ((val
& 0x7f800000) >> 23)) / 2;
2818 f64
= recip_sqrt_estimate(f64
, env
);
2820 val64
= float64_val(f64
);
2822 val
= ((result_exp
& 0xff) << 23)
2823 | ((val64
>> 29) & 0x7fffff);
2824 return make_float32(val
);
2827 uint32_t HELPER(recpe_u32
)(uint32_t a
, CPUARMState
*env
)
2831 if ((a
& 0x80000000) == 0) {
2835 f64
= make_float64((0x3feULL
<< 52)
2836 | ((int64_t)(a
& 0x7fffffff) << 21));
2838 f64
= recip_estimate (f64
, env
);
2840 return 0x80000000 | ((float64_val(f64
) >> 21) & 0x7fffffff);
2843 uint32_t HELPER(rsqrte_u32
)(uint32_t a
, CPUARMState
*env
)
2847 if ((a
& 0xc0000000) == 0) {
2851 if (a
& 0x80000000) {
2852 f64
= make_float64((0x3feULL
<< 52)
2853 | ((uint64_t)(a
& 0x7fffffff) << 21));
2854 } else { /* bits 31-30 == '01' */
2855 f64
= make_float64((0x3fdULL
<< 52)
2856 | ((uint64_t)(a
& 0x3fffffff) << 22));
2859 f64
= recip_sqrt_estimate(f64
, env
);
2861 return 0x80000000 | ((float64_val(f64
) >> 21) & 0x7fffffff);
2864 /* VFPv4 fused multiply-accumulate */
2865 float32
VFP_HELPER(muladd
, s
)(float32 a
, float32 b
, float32 c
, void *fpstp
)
2867 float_status
*fpst
= fpstp
;
2868 return float32_muladd(a
, b
, c
, 0, fpst
);
2871 float64
VFP_HELPER(muladd
, d
)(float64 a
, float64 b
, float64 c
, void *fpstp
)
2873 float_status
*fpst
= fpstp
;
2874 return float64_muladd(a
, b
, c
, 0, fpst
);
2877 void HELPER(set_teecr
)(CPUARMState
*env
, uint32_t val
)
2880 if (env
->teecr
!= val
) {