8 void cpu_reset(CPUARMState
*env
)
10 #if defined (CONFIG_USER_ONLY)
11 env
->uncached_cpsr
= ARM_CPU_MODE_USR
;
12 env
->vfp
.xregs
[ARM_VFP_FPEXC
] = 1 << 30;
14 /* SVC mode with interrupts disabled. */
15 env
->uncached_cpsr
= ARM_CPU_MODE_SVC
| CPSR_A
| CPSR_F
| CPSR_I
;
16 env
->vfp
.xregs
[ARM_VFP_FPEXC
] = 0;
21 CPUARMState
*cpu_arm_init(void)
25 env
= qemu_mallocz(sizeof(CPUARMState
));
34 static inline void set_feature(CPUARMState
*env
, int feature
)
36 env
->features
|= 1u << feature
;
39 void cpu_arm_set_model(CPUARMState
*env
, uint32_t id
)
41 env
->cp15
.c0_cpuid
= id
;
43 case ARM_CPUID_ARM926
:
44 set_feature(env
, ARM_FEATURE_VFP
);
45 env
->vfp
.xregs
[ARM_VFP_FPSID
] = 0x41011090;
47 case ARM_CPUID_ARM1026
:
48 set_feature(env
, ARM_FEATURE_VFP
);
49 set_feature(env
, ARM_FEATURE_AUXCR
);
50 env
->vfp
.xregs
[ARM_VFP_FPSID
] = 0x410110a0;
53 cpu_abort(env
, "Bad CPU ID: %x\n", id
);
58 void cpu_arm_close(CPUARMState
*env
)
63 #if defined(CONFIG_USER_ONLY)
65 void do_interrupt (CPUState
*env
)
67 env
->exception_index
= -1;
70 int cpu_arm_handle_mmu_fault (CPUState
*env
, target_ulong address
, int rw
,
71 int is_user
, int is_softmmu
)
74 env
->exception_index
= EXCP_PREFETCH_ABORT
;
75 env
->cp15
.c6_insn
= address
;
77 env
->exception_index
= EXCP_DATA_ABORT
;
78 env
->cp15
.c6_data
= address
;
83 target_ulong
cpu_get_phys_page_debug(CPUState
*env
, target_ulong addr
)
88 /* These should probably raise undefined insn exceptions. */
89 void helper_set_cp15(CPUState
*env
, uint32_t insn
, uint32_t val
)
91 cpu_abort(env
, "cp15 insn %08x\n", insn
);
94 uint32_t helper_get_cp15(CPUState
*env
, uint32_t insn
)
96 cpu_abort(env
, "cp15 insn %08x\n", insn
);
100 void switch_mode(CPUState
*env
, int mode
)
102 if (mode
!= ARM_CPU_MODE_USR
)
103 cpu_abort(env
, "Tried to switch out of user mode\n");
108 extern int semihosting_enabled
;
110 /* Map CPU modes onto saved register banks. */
111 static inline int bank_number (int mode
)
114 case ARM_CPU_MODE_USR
:
115 case ARM_CPU_MODE_SYS
:
117 case ARM_CPU_MODE_SVC
:
119 case ARM_CPU_MODE_ABT
:
121 case ARM_CPU_MODE_UND
:
123 case ARM_CPU_MODE_IRQ
:
125 case ARM_CPU_MODE_FIQ
:
128 cpu_abort(cpu_single_env
, "Bad mode %x\n", mode
);
132 void switch_mode(CPUState
*env
, int mode
)
137 old_mode
= env
->uncached_cpsr
& CPSR_M
;
138 if (mode
== old_mode
)
141 if (old_mode
== ARM_CPU_MODE_FIQ
) {
142 memcpy (env
->fiq_regs
, env
->regs
+ 8, 5 * sizeof(uint32_t));
143 memcpy (env
->regs
+ 8, env
->usr_regs
, 5 * sizeof(uint32_t));
144 } else if (mode
== ARM_CPU_MODE_FIQ
) {
145 memcpy (env
->usr_regs
, env
->regs
+ 8, 5 * sizeof(uint32_t));
146 memcpy (env
->regs
+ 8, env
->fiq_regs
, 5 * sizeof(uint32_t));
149 i
= bank_number(old_mode
);
150 env
->banked_r13
[i
] = env
->regs
[13];
151 env
->banked_r14
[i
] = env
->regs
[14];
152 env
->banked_spsr
[i
] = env
->spsr
;
154 i
= bank_number(mode
);
155 env
->regs
[13] = env
->banked_r13
[i
];
156 env
->regs
[14] = env
->banked_r14
[i
];
157 env
->spsr
= env
->banked_spsr
[i
];
160 /* Handle a CPU exception. */
161 void do_interrupt(CPUARMState
*env
)
168 /* TODO: Vectored interrupt controller. */
169 switch (env
->exception_index
) {
171 new_mode
= ARM_CPU_MODE_UND
;
180 if (semihosting_enabled
) {
181 /* Check for semihosting interrupt. */
183 mask
= lduw_code(env
->regs
[15] - 2) & 0xff;
185 mask
= ldl_code(env
->regs
[15] - 4) & 0xffffff;
187 /* Only intercept calls from privileged modes, to provide some
188 semblance of security. */
189 if (((mask
== 0x123456 && !env
->thumb
)
190 || (mask
== 0xab && env
->thumb
))
191 && (env
->uncached_cpsr
& CPSR_M
) != ARM_CPU_MODE_USR
) {
192 env
->regs
[0] = do_arm_semihosting(env
);
196 new_mode
= ARM_CPU_MODE_SVC
;
199 /* The PC already points to the next instructon. */
202 case EXCP_PREFETCH_ABORT
:
204 new_mode
= ARM_CPU_MODE_ABT
;
206 mask
= CPSR_A
| CPSR_I
;
209 case EXCP_DATA_ABORT
:
210 new_mode
= ARM_CPU_MODE_ABT
;
212 mask
= CPSR_A
| CPSR_I
;
216 new_mode
= ARM_CPU_MODE_IRQ
;
218 /* Disable IRQ and imprecise data aborts. */
219 mask
= CPSR_A
| CPSR_I
;
223 new_mode
= ARM_CPU_MODE_FIQ
;
225 /* Disable FIQ, IRQ and imprecise data aborts. */
226 mask
= CPSR_A
| CPSR_I
| CPSR_F
;
230 cpu_abort(env
, "Unhandled exception 0x%x\n", env
->exception_index
);
231 return; /* Never happens. Keep compiler happy. */
234 if (env
->cp15
.c1_sys
& (1 << 13)) {
237 switch_mode (env
, new_mode
);
238 env
->spsr
= cpsr_read(env
);
239 /* Switch to the new mode, and switch to Arm mode. */
240 /* ??? Thumb interrupt handlers not implemented. */
241 env
->uncached_cpsr
= (env
->uncached_cpsr
& ~CPSR_M
) | new_mode
;
242 env
->uncached_cpsr
|= mask
;
244 env
->regs
[14] = env
->regs
[15] + offset
;
245 env
->regs
[15] = addr
;
246 env
->interrupt_request
|= CPU_INTERRUPT_EXITTB
;
249 /* Check section/page access permissions.
250 Returns the page protection flags, or zero if the access is not
252 static inline int check_ap(CPUState
*env
, int ap
, int domain
, int access_type
,
256 return PAGE_READ
| PAGE_WRITE
;
260 if (access_type
== 1)
262 switch ((env
->cp15
.c1_sys
>> 8) & 3) {
264 return is_user
? 0 : PAGE_READ
;
271 return is_user
? 0 : PAGE_READ
| PAGE_WRITE
;
274 return (access_type
== 1) ? 0 : PAGE_READ
;
276 return PAGE_READ
| PAGE_WRITE
;
278 return PAGE_READ
| PAGE_WRITE
;
284 static int get_phys_addr(CPUState
*env
, uint32_t address
, int access_type
,
285 int is_user
, uint32_t *phys_ptr
, int *prot
)
295 /* Fast Context Switch Extension. */
296 if (address
< 0x02000000)
297 address
+= env
->cp15
.c13_fcse
;
299 if ((env
->cp15
.c1_sys
& 1) == 0) {
302 *prot
= PAGE_READ
| PAGE_WRITE
;
304 /* Pagetable walk. */
305 /* Lookup l1 descriptor. */
306 table
= (env
->cp15
.c2
& 0xffffc000) | ((address
>> 18) & 0x3ffc);
307 desc
= ldl_phys(table
);
309 domain
= (env
->cp15
.c3
>> ((desc
>> 4) & 0x1e)) & 3;
311 /* Secton translation fault. */
315 if (domain
== 0 || domain
== 2) {
317 code
= 9; /* Section domain fault. */
319 code
= 11; /* Page domain fault. */
324 phys_addr
= (desc
& 0xfff00000) | (address
& 0x000fffff);
325 ap
= (desc
>> 10) & 3;
328 /* Lookup l2 entry. */
329 table
= (desc
& 0xfffffc00) | ((address
>> 10) & 0x3fc);
330 desc
= ldl_phys(table
);
332 case 0: /* Page translation fault. */
335 case 1: /* 64k page. */
336 phys_addr
= (desc
& 0xffff0000) | (address
& 0xffff);
337 ap
= (desc
>> (4 + ((address
>> 13) & 6))) & 3;
339 case 2: /* 4k page. */
340 phys_addr
= (desc
& 0xfffff000) | (address
& 0xfff);
341 ap
= (desc
>> (4 + ((address
>> 13) & 6))) & 3;
343 case 3: /* 1k page. */
345 /* Page translation fault. */
349 phys_addr
= (desc
& 0xfffffc00) | (address
& 0x3ff);
350 ap
= (desc
>> 4) & 3;
353 /* Never happens, but compiler isn't smart enough to tell. */
358 *prot
= check_ap(env
, ap
, domain
, access_type
, is_user
);
360 /* Access permission fault. */
363 *phys_ptr
= phys_addr
;
367 return code
| (domain
<< 4);
370 int cpu_arm_handle_mmu_fault (CPUState
*env
, target_ulong address
,
371 int access_type
, int is_user
, int is_softmmu
)
377 ret
= get_phys_addr(env
, address
, access_type
, is_user
, &phys_addr
, &prot
);
379 /* Map a single [sub]page. */
380 phys_addr
&= ~(uint32_t)0x3ff;
381 address
&= ~(uint32_t)0x3ff;
382 return tlb_set_page (env
, address
, phys_addr
, prot
, is_user
,
386 if (access_type
== 2) {
387 env
->cp15
.c5_insn
= ret
;
388 env
->cp15
.c6_insn
= address
;
389 env
->exception_index
= EXCP_PREFETCH_ABORT
;
391 env
->cp15
.c5_data
= ret
;
392 env
->cp15
.c6_data
= address
;
393 env
->exception_index
= EXCP_DATA_ABORT
;
398 target_ulong
cpu_get_phys_page_debug(CPUState
*env
, target_ulong addr
)
404 ret
= get_phys_addr(env
, addr
, 0, 0, &phys_addr
, &prot
);
412 void helper_set_cp15(CPUState
*env
, uint32_t insn
, uint32_t val
)
416 op2
= (insn
>> 5) & 7;
417 switch ((insn
>> 16) & 0xf) {
418 case 0: /* ID codes. */
420 case 1: /* System configuration. */
423 env
->cp15
.c1_sys
= val
;
424 /* ??? Lots of these bits are not implemented. */
425 /* This may enable/disable the MMU, so do a TLB flush. */
429 env
->cp15
.c1_coproc
= val
;
430 /* ??? Is this safe when called from within a TB? */
436 case 2: /* MMU Page table control. */
439 case 3: /* MMU Domain access control. */
442 case 4: /* Reserved. */
444 case 5: /* MMU Fault status. */
447 env
->cp15
.c5_data
= val
;
450 env
->cp15
.c5_insn
= val
;
456 case 6: /* MMU Fault address. */
459 env
->cp15
.c6_data
= val
;
462 env
->cp15
.c6_insn
= val
;
468 case 7: /* Cache control. */
469 /* No cache, so nothing to do. */
471 case 8: /* MMU TLB control. */
473 case 0: /* Invalidate all. */
476 case 1: /* Invalidate single TLB entry. */
478 /* ??? This is wrong for large pages and sections. */
479 /* As an ugly hack to make linux work we always flush a 4K
482 tlb_flush_page(env
, val
);
483 tlb_flush_page(env
, val
+ 0x400);
484 tlb_flush_page(env
, val
+ 0x800);
485 tlb_flush_page(env
, val
+ 0xc00);
494 case 9: /* Cache lockdown. */
497 env
->cp15
.c9_data
= val
;
500 env
->cp15
.c9_insn
= val
;
506 case 10: /* MMU TLB lockdown. */
507 /* ??? TLB lockdown not implemented. */
509 case 11: /* TCM DMA control. */
510 case 12: /* Reserved. */
512 case 13: /* Process ID. */
515 /* Unlike real hardware the qemu TLB uses virtual addresses,
516 not modified virtual addresses, so this causes a TLB flush.
518 if (env
->cp15
.c13_fcse
!= val
)
520 env
->cp15
.c13_fcse
= val
;
523 /* This changes the ASID, so do a TLB flush. */
524 if (env
->cp15
.c13_context
!= val
)
526 env
->cp15
.c13_context
= val
;
532 case 14: /* Reserved. */
534 case 15: /* Implementation specific. */
535 /* ??? Internal registers not implemented. */
540 /* ??? For debugging only. Should raise illegal instruction exception. */
541 cpu_abort(env
, "Unimplemented cp15 register read\n");
544 uint32_t helper_get_cp15(CPUState
*env
, uint32_t insn
)
548 op2
= (insn
>> 5) & 7;
549 switch ((insn
>> 16) & 0xf) {
550 case 0: /* ID codes. */
552 default: /* Device ID. */
553 return env
->cp15
.c0_cpuid
;
554 case 1: /* Cache Type. */
556 case 2: /* TCM status. */
559 case 1: /* System configuration. */
561 case 0: /* Control register. */
562 return env
->cp15
.c1_sys
;
563 case 1: /* Auxiliary control register. */
564 if (arm_feature(env
, ARM_FEATURE_AUXCR
))
567 case 2: /* Coprocessor access register. */
568 return env
->cp15
.c1_coproc
;
572 case 2: /* MMU Page table control. */
574 case 3: /* MMU Domain access control. */
576 case 4: /* Reserved. */
578 case 5: /* MMU Fault status. */
581 return env
->cp15
.c5_data
;
583 return env
->cp15
.c5_insn
;
587 case 6: /* MMU Fault address. */
590 return env
->cp15
.c6_data
;
592 /* Arm9 doesn't have an IFAR, but implementing it anyway shouldn't
594 return env
->cp15
.c6_insn
;
598 case 7: /* Cache control. */
599 /* ??? This is for test, clean and invaidate operations that set the
600 Z flag. We can't represent N = Z = 1, so it also clears clears
601 the N flag. Oh well. */
604 case 8: /* MMU TLB control. */
606 case 9: /* Cache lockdown. */
609 return env
->cp15
.c9_data
;
611 return env
->cp15
.c9_insn
;
615 case 10: /* MMU TLB lockdown. */
616 /* ??? TLB lockdown not implemented. */
618 case 11: /* TCM DMA control. */
619 case 12: /* Reserved. */
621 case 13: /* Process ID. */
624 return env
->cp15
.c13_fcse
;
626 return env
->cp15
.c13_context
;
630 case 14: /* Reserved. */
632 case 15: /* Implementation specific. */
633 /* ??? Internal registers not implemented. */
637 /* ??? For debugging only. Should raise illegal instruction exception. */
638 cpu_abort(env
, "Unimplemented cp15 register read\n");