kvm: configure: per architecture top level dependencies
[qemu-kvm/fedora.git] / target-arm / helper.c
blob86470dbeee4b6093d749b262a76a60abf6738a07
1 #include <stdio.h>
2 #include <stdlib.h>
3 #include <string.h>
5 #include "cpu.h"
6 #include "exec-all.h"
7 #include "gdbstub.h"
9 static uint32_t cortexa8_cp15_c0_c1[8] =
10 { 0x1031, 0x11, 0x400, 0, 0x31100003, 0x20000000, 0x01202000, 0x11 };
12 static uint32_t cortexa8_cp15_c0_c2[8] =
13 { 0x00101111, 0x12112111, 0x21232031, 0x11112131, 0x00111142, 0, 0, 0 };
15 static uint32_t mpcore_cp15_c0_c1[8] =
16 { 0x111, 0x1, 0, 0x2, 0x01100103, 0x10020302, 0x01222000, 0 };
18 static uint32_t mpcore_cp15_c0_c2[8] =
19 { 0x00100011, 0x12002111, 0x11221011, 0x01102131, 0x141, 0, 0, 0 };
21 static uint32_t arm1136_cp15_c0_c1[8] =
22 { 0x111, 0x1, 0x2, 0x3, 0x01130003, 0x10030302, 0x01222110, 0 };
24 static uint32_t arm1136_cp15_c0_c2[8] =
25 { 0x00140011, 0x12002111, 0x11231111, 0x01102131, 0x141, 0, 0, 0 };
27 static uint32_t cpu_arm_find_by_name(const char *name);
29 static inline void set_feature(CPUARMState *env, int feature)
31 env->features |= 1u << feature;
34 static void cpu_reset_model_id(CPUARMState *env, uint32_t id)
36 env->cp15.c0_cpuid = id;
37 switch (id) {
38 case ARM_CPUID_ARM926:
39 set_feature(env, ARM_FEATURE_VFP);
40 env->vfp.xregs[ARM_VFP_FPSID] = 0x41011090;
41 env->cp15.c0_cachetype = 0x1dd20d2;
42 env->cp15.c1_sys = 0x00090078;
43 break;
44 case ARM_CPUID_ARM946:
45 set_feature(env, ARM_FEATURE_MPU);
46 env->cp15.c0_cachetype = 0x0f004006;
47 env->cp15.c1_sys = 0x00000078;
48 break;
49 case ARM_CPUID_ARM1026:
50 set_feature(env, ARM_FEATURE_VFP);
51 set_feature(env, ARM_FEATURE_AUXCR);
52 env->vfp.xregs[ARM_VFP_FPSID] = 0x410110a0;
53 env->cp15.c0_cachetype = 0x1dd20d2;
54 env->cp15.c1_sys = 0x00090078;
55 break;
56 case ARM_CPUID_ARM1136:
57 set_feature(env, ARM_FEATURE_V6);
58 set_feature(env, ARM_FEATURE_VFP);
59 set_feature(env, ARM_FEATURE_AUXCR);
60 env->vfp.xregs[ARM_VFP_FPSID] = 0x410120b4;
61 env->vfp.xregs[ARM_VFP_MVFR0] = 0x11111111;
62 env->vfp.xregs[ARM_VFP_MVFR1] = 0x00000000;
63 memcpy(env->cp15.c0_c1, arm1136_cp15_c0_c1, 8 * sizeof(uint32_t));
64 memcpy(env->cp15.c0_c1, arm1136_cp15_c0_c2, 8 * sizeof(uint32_t));
65 env->cp15.c0_cachetype = 0x1dd20d2;
66 break;
67 case ARM_CPUID_ARM11MPCORE:
68 set_feature(env, ARM_FEATURE_V6);
69 set_feature(env, ARM_FEATURE_V6K);
70 set_feature(env, ARM_FEATURE_VFP);
71 set_feature(env, ARM_FEATURE_AUXCR);
72 env->vfp.xregs[ARM_VFP_FPSID] = 0x410120b4;
73 env->vfp.xregs[ARM_VFP_MVFR0] = 0x11111111;
74 env->vfp.xregs[ARM_VFP_MVFR1] = 0x00000000;
75 memcpy(env->cp15.c0_c1, mpcore_cp15_c0_c1, 8 * sizeof(uint32_t));
76 memcpy(env->cp15.c0_c1, mpcore_cp15_c0_c2, 8 * sizeof(uint32_t));
77 env->cp15.c0_cachetype = 0x1dd20d2;
78 break;
79 case ARM_CPUID_CORTEXA8:
80 set_feature(env, ARM_FEATURE_V6);
81 set_feature(env, ARM_FEATURE_V6K);
82 set_feature(env, ARM_FEATURE_V7);
83 set_feature(env, ARM_FEATURE_AUXCR);
84 set_feature(env, ARM_FEATURE_THUMB2);
85 set_feature(env, ARM_FEATURE_VFP);
86 set_feature(env, ARM_FEATURE_VFP3);
87 set_feature(env, ARM_FEATURE_NEON);
88 env->vfp.xregs[ARM_VFP_FPSID] = 0x410330c0;
89 env->vfp.xregs[ARM_VFP_MVFR0] = 0x11110222;
90 env->vfp.xregs[ARM_VFP_MVFR1] = 0x00011100;
91 memcpy(env->cp15.c0_c1, cortexa8_cp15_c0_c1, 8 * sizeof(uint32_t));
92 memcpy(env->cp15.c0_c1, cortexa8_cp15_c0_c2, 8 * sizeof(uint32_t));
93 env->cp15.c0_cachetype = 0x1dd20d2;
94 break;
95 case ARM_CPUID_CORTEXM3:
96 set_feature(env, ARM_FEATURE_V6);
97 set_feature(env, ARM_FEATURE_THUMB2);
98 set_feature(env, ARM_FEATURE_V7);
99 set_feature(env, ARM_FEATURE_M);
100 set_feature(env, ARM_FEATURE_DIV);
101 break;
102 case ARM_CPUID_ANY: /* For userspace emulation. */
103 set_feature(env, ARM_FEATURE_V6);
104 set_feature(env, ARM_FEATURE_V6K);
105 set_feature(env, ARM_FEATURE_V7);
106 set_feature(env, ARM_FEATURE_THUMB2);
107 set_feature(env, ARM_FEATURE_VFP);
108 set_feature(env, ARM_FEATURE_VFP3);
109 set_feature(env, ARM_FEATURE_NEON);
110 set_feature(env, ARM_FEATURE_DIV);
111 break;
112 case ARM_CPUID_TI915T:
113 case ARM_CPUID_TI925T:
114 set_feature(env, ARM_FEATURE_OMAPCP);
115 env->cp15.c0_cpuid = ARM_CPUID_TI925T; /* Depends on wiring. */
116 env->cp15.c0_cachetype = 0x5109149;
117 env->cp15.c1_sys = 0x00000070;
118 env->cp15.c15_i_max = 0x000;
119 env->cp15.c15_i_min = 0xff0;
120 break;
121 case ARM_CPUID_PXA250:
122 case ARM_CPUID_PXA255:
123 case ARM_CPUID_PXA260:
124 case ARM_CPUID_PXA261:
125 case ARM_CPUID_PXA262:
126 set_feature(env, ARM_FEATURE_XSCALE);
127 /* JTAG_ID is ((id << 28) | 0x09265013) */
128 env->cp15.c0_cachetype = 0xd172172;
129 env->cp15.c1_sys = 0x00000078;
130 break;
131 case ARM_CPUID_PXA270_A0:
132 case ARM_CPUID_PXA270_A1:
133 case ARM_CPUID_PXA270_B0:
134 case ARM_CPUID_PXA270_B1:
135 case ARM_CPUID_PXA270_C0:
136 case ARM_CPUID_PXA270_C5:
137 set_feature(env, ARM_FEATURE_XSCALE);
138 /* JTAG_ID is ((id << 28) | 0x09265013) */
139 set_feature(env, ARM_FEATURE_IWMMXT);
140 env->iwmmxt.cregs[ARM_IWMMXT_wCID] = 0x69051000 | 'Q';
141 env->cp15.c0_cachetype = 0xd172172;
142 env->cp15.c1_sys = 0x00000078;
143 break;
144 default:
145 cpu_abort(env, "Bad CPU ID: %x\n", id);
146 break;
150 void cpu_reset(CPUARMState *env)
152 uint32_t id;
153 id = env->cp15.c0_cpuid;
154 memset(env, 0, offsetof(CPUARMState, breakpoints));
155 if (id)
156 cpu_reset_model_id(env, id);
157 #if defined (CONFIG_USER_ONLY)
158 env->uncached_cpsr = ARM_CPU_MODE_USR;
159 env->vfp.xregs[ARM_VFP_FPEXC] = 1 << 30;
160 #else
161 /* SVC mode with interrupts disabled. */
162 env->uncached_cpsr = ARM_CPU_MODE_SVC | CPSR_A | CPSR_F | CPSR_I;
163 /* On ARMv7-M the CPSR_I is the value of the PRIMASK register, and is
164 clear at reset. */
165 if (IS_M(env))
166 env->uncached_cpsr &= ~CPSR_I;
167 env->vfp.xregs[ARM_VFP_FPEXC] = 0;
168 #endif
169 env->regs[15] = 0;
170 tlb_flush(env, 1);
173 CPUARMState *cpu_arm_init(const char *cpu_model)
175 CPUARMState *env;
176 uint32_t id;
178 id = cpu_arm_find_by_name(cpu_model);
179 if (id == 0)
180 return NULL;
181 env = qemu_mallocz(sizeof(CPUARMState));
182 if (!env)
183 return NULL;
184 cpu_exec_init(env);
185 env->cpu_model_str = cpu_model;
186 env->cp15.c0_cpuid = id;
187 cpu_reset(env);
188 return env;
191 struct arm_cpu_t {
192 uint32_t id;
193 const char *name;
196 static const struct arm_cpu_t arm_cpu_names[] = {
197 { ARM_CPUID_ARM926, "arm926"},
198 { ARM_CPUID_ARM946, "arm946"},
199 { ARM_CPUID_ARM1026, "arm1026"},
200 { ARM_CPUID_ARM1136, "arm1136"},
201 { ARM_CPUID_ARM11MPCORE, "arm11mpcore"},
202 { ARM_CPUID_CORTEXM3, "cortex-m3"},
203 { ARM_CPUID_CORTEXA8, "cortex-a8"},
204 { ARM_CPUID_TI925T, "ti925t" },
205 { ARM_CPUID_PXA250, "pxa250" },
206 { ARM_CPUID_PXA255, "pxa255" },
207 { ARM_CPUID_PXA260, "pxa260" },
208 { ARM_CPUID_PXA261, "pxa261" },
209 { ARM_CPUID_PXA262, "pxa262" },
210 { ARM_CPUID_PXA270, "pxa270" },
211 { ARM_CPUID_PXA270_A0, "pxa270-a0" },
212 { ARM_CPUID_PXA270_A1, "pxa270-a1" },
213 { ARM_CPUID_PXA270_B0, "pxa270-b0" },
214 { ARM_CPUID_PXA270_B1, "pxa270-b1" },
215 { ARM_CPUID_PXA270_C0, "pxa270-c0" },
216 { ARM_CPUID_PXA270_C5, "pxa270-c5" },
217 { ARM_CPUID_ANY, "any"},
218 { 0, NULL}
221 void arm_cpu_list(FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
223 int i;
225 (*cpu_fprintf)(f, "Available CPUs:\n");
226 for (i = 0; arm_cpu_names[i].name; i++) {
227 (*cpu_fprintf)(f, " %s\n", arm_cpu_names[i].name);
231 /* return 0 if not found */
232 static uint32_t cpu_arm_find_by_name(const char *name)
234 int i;
235 uint32_t id;
237 id = 0;
238 for (i = 0; arm_cpu_names[i].name; i++) {
239 if (strcmp(name, arm_cpu_names[i].name) == 0) {
240 id = arm_cpu_names[i].id;
241 break;
244 return id;
247 void cpu_arm_close(CPUARMState *env)
249 free(env);
252 /* Polynomial multiplication is like integer multiplcation except the
253 partial products are XORed, not added. */
254 uint32_t helper_neon_mul_p8(uint32_t op1, uint32_t op2)
256 uint32_t mask;
257 uint32_t result;
258 result = 0;
259 while (op1) {
260 mask = 0;
261 if (op1 & 1)
262 mask |= 0xff;
263 if (op1 & (1 << 8))
264 mask |= (0xff << 8);
265 if (op1 & (1 << 16))
266 mask |= (0xff << 16);
267 if (op1 & (1 << 24))
268 mask |= (0xff << 24);
269 result ^= op2 & mask;
270 op1 = (op1 >> 1) & 0x7f7f7f7f;
271 op2 = (op2 << 1) & 0xfefefefe;
273 return result;
276 uint32_t cpsr_read(CPUARMState *env)
278 int ZF;
279 ZF = (env->NZF == 0);
280 return env->uncached_cpsr | (env->NZF & 0x80000000) | (ZF << 30) |
281 (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27)
282 | (env->thumb << 5) | ((env->condexec_bits & 3) << 25)
283 | ((env->condexec_bits & 0xfc) << 8)
284 | (env->GE << 16);
287 void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask)
289 /* NOTE: N = 1 and Z = 1 cannot be stored currently */
290 if (mask & CPSR_NZCV) {
291 env->NZF = (val & 0xc0000000) ^ 0x40000000;
292 env->CF = (val >> 29) & 1;
293 env->VF = (val << 3) & 0x80000000;
295 if (mask & CPSR_Q)
296 env->QF = ((val & CPSR_Q) != 0);
297 if (mask & CPSR_T)
298 env->thumb = ((val & CPSR_T) != 0);
299 if (mask & CPSR_IT_0_1) {
300 env->condexec_bits &= ~3;
301 env->condexec_bits |= (val >> 25) & 3;
303 if (mask & CPSR_IT_2_7) {
304 env->condexec_bits &= 3;
305 env->condexec_bits |= (val >> 8) & 0xfc;
307 if (mask & CPSR_GE) {
308 env->GE = (val >> 16) & 0xf;
311 if ((env->uncached_cpsr ^ val) & mask & CPSR_M) {
312 switch_mode(env, val & CPSR_M);
314 mask &= ~CACHED_CPSR_BITS;
315 env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask);
318 #if defined(CONFIG_USER_ONLY)
320 void do_interrupt (CPUState *env)
322 env->exception_index = -1;
325 /* Structure used to record exclusive memory locations. */
326 typedef struct mmon_state {
327 struct mmon_state *next;
328 CPUARMState *cpu_env;
329 uint32_t addr;
330 } mmon_state;
332 /* Chain of current locks. */
333 static mmon_state* mmon_head = NULL;
335 int cpu_arm_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
336 int mmu_idx, int is_softmmu)
338 if (rw == 2) {
339 env->exception_index = EXCP_PREFETCH_ABORT;
340 env->cp15.c6_insn = address;
341 } else {
342 env->exception_index = EXCP_DATA_ABORT;
343 env->cp15.c6_data = address;
345 return 1;
348 static void allocate_mmon_state(CPUState *env)
350 env->mmon_entry = malloc(sizeof (mmon_state));
351 if (!env->mmon_entry)
352 abort();
353 memset (env->mmon_entry, 0, sizeof (mmon_state));
354 env->mmon_entry->cpu_env = env;
355 mmon_head = env->mmon_entry;
358 /* Flush any monitor locks for the specified address. */
359 static void flush_mmon(uint32_t addr)
361 mmon_state *mon;
363 for (mon = mmon_head; mon; mon = mon->next)
365 if (mon->addr != addr)
366 continue;
368 mon->addr = 0;
369 break;
373 /* Mark an address for exclusive access. */
374 void helper_mark_exclusive(CPUState *env, uint32_t addr)
376 if (!env->mmon_entry)
377 allocate_mmon_state(env);
378 /* Clear any previous locks. */
379 flush_mmon(addr);
380 env->mmon_entry->addr = addr;
383 /* Test if an exclusive address is still exclusive. Returns zero
384 if the address is still exclusive. */
385 int helper_test_exclusive(CPUState *env, uint32_t addr)
387 int res;
389 if (!env->mmon_entry)
390 return 1;
391 if (env->mmon_entry->addr == addr)
392 res = 0;
393 else
394 res = 1;
395 flush_mmon(addr);
396 return res;
399 void helper_clrex(CPUState *env)
401 if (!(env->mmon_entry && env->mmon_entry->addr))
402 return;
403 flush_mmon(env->mmon_entry->addr);
406 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
408 return addr;
411 /* These should probably raise undefined insn exceptions. */
412 void helper_set_cp(CPUState *env, uint32_t insn, uint32_t val)
414 int op1 = (insn >> 8) & 0xf;
415 cpu_abort(env, "cp%i insn %08x\n", op1, insn);
416 return;
419 uint32_t helper_get_cp(CPUState *env, uint32_t insn)
421 int op1 = (insn >> 8) & 0xf;
422 cpu_abort(env, "cp%i insn %08x\n", op1, insn);
423 return 0;
426 void helper_set_cp15(CPUState *env, uint32_t insn, uint32_t val)
428 cpu_abort(env, "cp15 insn %08x\n", insn);
431 uint32_t helper_get_cp15(CPUState *env, uint32_t insn)
433 cpu_abort(env, "cp15 insn %08x\n", insn);
434 return 0;
437 /* These should probably raise undefined insn exceptions. */
438 void helper_v7m_msr(CPUState *env, int reg, uint32_t val)
440 cpu_abort(env, "v7m_mrs %d\n", reg);
443 uint32_t helper_v7m_mrs(CPUState *env, int reg)
445 cpu_abort(env, "v7m_mrs %d\n", reg);
446 return 0;
449 void switch_mode(CPUState *env, int mode)
451 if (mode != ARM_CPU_MODE_USR)
452 cpu_abort(env, "Tried to switch out of user mode\n");
455 void helper_set_r13_banked(CPUState *env, int mode, uint32_t val)
457 cpu_abort(env, "banked r13 write\n");
460 uint32_t helper_get_r13_banked(CPUState *env, int mode)
462 cpu_abort(env, "banked r13 read\n");
463 return 0;
466 #else
468 extern int semihosting_enabled;
470 /* Map CPU modes onto saved register banks. */
471 static inline int bank_number (int mode)
473 switch (mode) {
474 case ARM_CPU_MODE_USR:
475 case ARM_CPU_MODE_SYS:
476 return 0;
477 case ARM_CPU_MODE_SVC:
478 return 1;
479 case ARM_CPU_MODE_ABT:
480 return 2;
481 case ARM_CPU_MODE_UND:
482 return 3;
483 case ARM_CPU_MODE_IRQ:
484 return 4;
485 case ARM_CPU_MODE_FIQ:
486 return 5;
488 cpu_abort(cpu_single_env, "Bad mode %x\n", mode);
489 return -1;
492 void switch_mode(CPUState *env, int mode)
494 int old_mode;
495 int i;
497 old_mode = env->uncached_cpsr & CPSR_M;
498 if (mode == old_mode)
499 return;
501 if (old_mode == ARM_CPU_MODE_FIQ) {
502 memcpy (env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t));
503 memcpy (env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t));
504 } else if (mode == ARM_CPU_MODE_FIQ) {
505 memcpy (env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t));
506 memcpy (env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t));
509 i = bank_number(old_mode);
510 env->banked_r13[i] = env->regs[13];
511 env->banked_r14[i] = env->regs[14];
512 env->banked_spsr[i] = env->spsr;
514 i = bank_number(mode);
515 env->regs[13] = env->banked_r13[i];
516 env->regs[14] = env->banked_r14[i];
517 env->spsr = env->banked_spsr[i];
520 static void v7m_push(CPUARMState *env, uint32_t val)
522 env->regs[13] -= 4;
523 stl_phys(env->regs[13], val);
526 static uint32_t v7m_pop(CPUARMState *env)
528 uint32_t val;
529 val = ldl_phys(env->regs[13]);
530 env->regs[13] += 4;
531 return val;
534 /* Switch to V7M main or process stack pointer. */
535 static void switch_v7m_sp(CPUARMState *env, int process)
537 uint32_t tmp;
538 if (env->v7m.current_sp != process) {
539 tmp = env->v7m.other_sp;
540 env->v7m.other_sp = env->regs[13];
541 env->regs[13] = tmp;
542 env->v7m.current_sp = process;
546 static void do_v7m_exception_exit(CPUARMState *env)
548 uint32_t type;
549 uint32_t xpsr;
551 type = env->regs[15];
552 if (env->v7m.exception != 0)
553 armv7m_nvic_complete_irq(env->v7m.nvic, env->v7m.exception);
555 /* Switch to the target stack. */
556 switch_v7m_sp(env, (type & 4) != 0);
557 /* Pop registers. */
558 env->regs[0] = v7m_pop(env);
559 env->regs[1] = v7m_pop(env);
560 env->regs[2] = v7m_pop(env);
561 env->regs[3] = v7m_pop(env);
562 env->regs[12] = v7m_pop(env);
563 env->regs[14] = v7m_pop(env);
564 env->regs[15] = v7m_pop(env);
565 xpsr = v7m_pop(env);
566 xpsr_write(env, xpsr, 0xfffffdff);
567 /* Undo stack alignment. */
568 if (xpsr & 0x200)
569 env->regs[13] |= 4;
570 /* ??? The exception return type specifies Thread/Handler mode. However
571 this is also implied by the xPSR value. Not sure what to do
572 if there is a mismatch. */
573 /* ??? Likewise for mismatches between the CONTROL register and the stack
574 pointer. */
577 void do_interrupt_v7m(CPUARMState *env)
579 uint32_t xpsr = xpsr_read(env);
580 uint32_t lr;
581 uint32_t addr;
583 lr = 0xfffffff1;
584 if (env->v7m.current_sp)
585 lr |= 4;
586 if (env->v7m.exception == 0)
587 lr |= 8;
589 /* For exceptions we just mark as pending on the NVIC, and let that
590 handle it. */
591 /* TODO: Need to escalate if the current priority is higher than the
592 one we're raising. */
593 switch (env->exception_index) {
594 case EXCP_UDEF:
595 armv7m_nvic_set_pending(env->v7m.nvic, ARMV7M_EXCP_USAGE);
596 return;
597 case EXCP_SWI:
598 env->regs[15] += 2;
599 armv7m_nvic_set_pending(env->v7m.nvic, ARMV7M_EXCP_SVC);
600 return;
601 case EXCP_PREFETCH_ABORT:
602 case EXCP_DATA_ABORT:
603 armv7m_nvic_set_pending(env->v7m.nvic, ARMV7M_EXCP_MEM);
604 return;
605 case EXCP_BKPT:
606 if (semihosting_enabled) {
607 int nr;
608 nr = lduw_code(env->regs[15]) & 0xff;
609 if (nr == 0xab) {
610 env->regs[15] += 2;
611 env->regs[0] = do_arm_semihosting(env);
612 return;
615 armv7m_nvic_set_pending(env->v7m.nvic, ARMV7M_EXCP_DEBUG);
616 return;
617 case EXCP_IRQ:
618 env->v7m.exception = armv7m_nvic_acknowledge_irq(env->v7m.nvic);
619 break;
620 case EXCP_EXCEPTION_EXIT:
621 do_v7m_exception_exit(env);
622 return;
623 default:
624 cpu_abort(env, "Unhandled exception 0x%x\n", env->exception_index);
625 return; /* Never happens. Keep compiler happy. */
628 /* Align stack pointer. */
629 /* ??? Should only do this if Configuration Control Register
630 STACKALIGN bit is set. */
631 if (env->regs[13] & 4) {
632 env->regs[13] += 4;
633 xpsr |= 0x200;
635 /* Switch to the hander mode. */
636 v7m_push(env, xpsr);
637 v7m_push(env, env->regs[15]);
638 v7m_push(env, env->regs[14]);
639 v7m_push(env, env->regs[12]);
640 v7m_push(env, env->regs[3]);
641 v7m_push(env, env->regs[2]);
642 v7m_push(env, env->regs[1]);
643 v7m_push(env, env->regs[0]);
644 switch_v7m_sp(env, 0);
645 env->uncached_cpsr &= ~CPSR_IT;
646 env->regs[14] = lr;
647 addr = ldl_phys(env->v7m.vecbase + env->v7m.exception * 4);
648 env->regs[15] = addr & 0xfffffffe;
649 env->thumb = addr & 1;
652 /* Handle a CPU exception. */
653 void do_interrupt(CPUARMState *env)
655 uint32_t addr;
656 uint32_t mask;
657 int new_mode;
658 uint32_t offset;
660 if (IS_M(env)) {
661 do_interrupt_v7m(env);
662 return;
664 /* TODO: Vectored interrupt controller. */
665 switch (env->exception_index) {
666 case EXCP_UDEF:
667 new_mode = ARM_CPU_MODE_UND;
668 addr = 0x04;
669 mask = CPSR_I;
670 if (env->thumb)
671 offset = 2;
672 else
673 offset = 4;
674 break;
675 case EXCP_SWI:
676 if (semihosting_enabled) {
677 /* Check for semihosting interrupt. */
678 if (env->thumb) {
679 mask = lduw_code(env->regs[15] - 2) & 0xff;
680 } else {
681 mask = ldl_code(env->regs[15] - 4) & 0xffffff;
683 /* Only intercept calls from privileged modes, to provide some
684 semblance of security. */
685 if (((mask == 0x123456 && !env->thumb)
686 || (mask == 0xab && env->thumb))
687 && (env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR) {
688 env->regs[0] = do_arm_semihosting(env);
689 return;
692 new_mode = ARM_CPU_MODE_SVC;
693 addr = 0x08;
694 mask = CPSR_I;
695 /* The PC already points to the next instructon. */
696 offset = 0;
697 break;
698 case EXCP_BKPT:
699 /* See if this is a semihosting syscall. */
700 if (env->thumb && semihosting_enabled) {
701 mask = lduw_code(env->regs[15]) & 0xff;
702 if (mask == 0xab
703 && (env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR) {
704 env->regs[15] += 2;
705 env->regs[0] = do_arm_semihosting(env);
706 return;
709 /* Fall through to prefetch abort. */
710 case EXCP_PREFETCH_ABORT:
711 new_mode = ARM_CPU_MODE_ABT;
712 addr = 0x0c;
713 mask = CPSR_A | CPSR_I;
714 offset = 4;
715 break;
716 case EXCP_DATA_ABORT:
717 new_mode = ARM_CPU_MODE_ABT;
718 addr = 0x10;
719 mask = CPSR_A | CPSR_I;
720 offset = 8;
721 break;
722 case EXCP_IRQ:
723 new_mode = ARM_CPU_MODE_IRQ;
724 addr = 0x18;
725 /* Disable IRQ and imprecise data aborts. */
726 mask = CPSR_A | CPSR_I;
727 offset = 4;
728 break;
729 case EXCP_FIQ:
730 new_mode = ARM_CPU_MODE_FIQ;
731 addr = 0x1c;
732 /* Disable FIQ, IRQ and imprecise data aborts. */
733 mask = CPSR_A | CPSR_I | CPSR_F;
734 offset = 4;
735 break;
736 default:
737 cpu_abort(env, "Unhandled exception 0x%x\n", env->exception_index);
738 return; /* Never happens. Keep compiler happy. */
740 /* High vectors. */
741 if (env->cp15.c1_sys & (1 << 13)) {
742 addr += 0xffff0000;
744 switch_mode (env, new_mode);
745 env->spsr = cpsr_read(env);
746 /* Clear IT bits. */
747 env->condexec_bits = 0;
748 /* Switch to the new mode, and switch to Arm mode. */
749 /* ??? Thumb interrupt handlers not implemented. */
750 env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode;
751 env->uncached_cpsr |= mask;
752 env->thumb = 0;
753 env->regs[14] = env->regs[15] + offset;
754 env->regs[15] = addr;
755 env->interrupt_request |= CPU_INTERRUPT_EXITTB;
758 /* Check section/page access permissions.
759 Returns the page protection flags, or zero if the access is not
760 permitted. */
761 static inline int check_ap(CPUState *env, int ap, int domain, int access_type,
762 int is_user)
764 int prot_ro;
766 if (domain == 3)
767 return PAGE_READ | PAGE_WRITE;
769 if (access_type == 1)
770 prot_ro = 0;
771 else
772 prot_ro = PAGE_READ;
774 switch (ap) {
775 case 0:
776 if (access_type == 1)
777 return 0;
778 switch ((env->cp15.c1_sys >> 8) & 3) {
779 case 1:
780 return is_user ? 0 : PAGE_READ;
781 case 2:
782 return PAGE_READ;
783 default:
784 return 0;
786 case 1:
787 return is_user ? 0 : PAGE_READ | PAGE_WRITE;
788 case 2:
789 if (is_user)
790 return prot_ro;
791 else
792 return PAGE_READ | PAGE_WRITE;
793 case 3:
794 return PAGE_READ | PAGE_WRITE;
795 case 4: case 7: /* Reserved. */
796 return 0;
797 case 5:
798 return is_user ? 0 : prot_ro;
799 case 6:
800 return prot_ro;
801 default:
802 abort();
806 static int get_phys_addr_v5(CPUState *env, uint32_t address, int access_type,
807 int is_user, uint32_t *phys_ptr, int *prot)
809 int code;
810 uint32_t table;
811 uint32_t desc;
812 int type;
813 int ap;
814 int domain;
815 uint32_t phys_addr;
817 /* Pagetable walk. */
818 /* Lookup l1 descriptor. */
819 if (address & env->cp15.c2_mask)
820 table = env->cp15.c2_base1;
821 else
822 table = env->cp15.c2_base0;
823 table = (table & 0xffffc000) | ((address >> 18) & 0x3ffc);
824 desc = ldl_phys(table);
825 type = (desc & 3);
826 domain = (env->cp15.c3 >> ((desc >> 4) & 0x1e)) & 3;
827 if (type == 0) {
828 /* Secton translation fault. */
829 code = 5;
830 goto do_fault;
832 if (domain == 0 || domain == 2) {
833 if (type == 2)
834 code = 9; /* Section domain fault. */
835 else
836 code = 11; /* Page domain fault. */
837 goto do_fault;
839 if (type == 2) {
840 /* 1Mb section. */
841 phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
842 ap = (desc >> 10) & 3;
843 code = 13;
844 } else {
845 /* Lookup l2 entry. */
846 if (type == 1) {
847 /* Coarse pagetable. */
848 table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
849 } else {
850 /* Fine pagetable. */
851 table = (desc & 0xfffff000) | ((address >> 8) & 0xffc);
853 desc = ldl_phys(table);
854 switch (desc & 3) {
855 case 0: /* Page translation fault. */
856 code = 7;
857 goto do_fault;
858 case 1: /* 64k page. */
859 phys_addr = (desc & 0xffff0000) | (address & 0xffff);
860 ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
861 break;
862 case 2: /* 4k page. */
863 phys_addr = (desc & 0xfffff000) | (address & 0xfff);
864 ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
865 break;
866 case 3: /* 1k page. */
867 if (type == 1) {
868 if (arm_feature(env, ARM_FEATURE_XSCALE)) {
869 phys_addr = (desc & 0xfffff000) | (address & 0xfff);
870 } else {
871 /* Page translation fault. */
872 code = 7;
873 goto do_fault;
875 } else {
876 phys_addr = (desc & 0xfffffc00) | (address & 0x3ff);
878 ap = (desc >> 4) & 3;
879 break;
880 default:
881 /* Never happens, but compiler isn't smart enough to tell. */
882 abort();
884 code = 15;
886 *prot = check_ap(env, ap, domain, access_type, is_user);
887 if (!*prot) {
888 /* Access permission fault. */
889 goto do_fault;
891 *phys_ptr = phys_addr;
892 return 0;
893 do_fault:
894 return code | (domain << 4);
897 static int get_phys_addr_v6(CPUState *env, uint32_t address, int access_type,
898 int is_user, uint32_t *phys_ptr, int *prot)
900 int code;
901 uint32_t table;
902 uint32_t desc;
903 uint32_t xn;
904 int type;
905 int ap;
906 int domain;
907 uint32_t phys_addr;
909 /* Pagetable walk. */
910 /* Lookup l1 descriptor. */
911 if (address & env->cp15.c2_mask)
912 table = env->cp15.c2_base1;
913 else
914 table = env->cp15.c2_base0;
915 table = (table & 0xffffc000) | ((address >> 18) & 0x3ffc);
916 desc = ldl_phys(table);
917 type = (desc & 3);
918 if (type == 0) {
919 /* Secton translation fault. */
920 code = 5;
921 domain = 0;
922 goto do_fault;
923 } else if (type == 2 && (desc & (1 << 18))) {
924 /* Supersection. */
925 domain = 0;
926 } else {
927 /* Section or page. */
928 domain = (desc >> 4) & 0x1e;
930 domain = (env->cp15.c3 >> domain) & 3;
931 if (domain == 0 || domain == 2) {
932 if (type == 2)
933 code = 9; /* Section domain fault. */
934 else
935 code = 11; /* Page domain fault. */
936 goto do_fault;
938 if (type == 2) {
939 if (desc & (1 << 18)) {
940 /* Supersection. */
941 phys_addr = (desc & 0xff000000) | (address & 0x00ffffff);
942 } else {
943 /* Section. */
944 phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
946 ap = ((desc >> 10) & 3) | ((desc >> 13) & 4);
947 xn = desc & (1 << 4);
948 code = 13;
949 } else {
950 /* Lookup l2 entry. */
951 table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
952 desc = ldl_phys(table);
953 ap = ((desc >> 4) & 3) | ((desc >> 7) & 4);
954 switch (desc & 3) {
955 case 0: /* Page translation fault. */
956 code = 7;
957 goto do_fault;
958 case 1: /* 64k page. */
959 phys_addr = (desc & 0xffff0000) | (address & 0xffff);
960 xn = desc & (1 << 15);
961 break;
962 case 2: case 3: /* 4k page. */
963 phys_addr = (desc & 0xfffff000) | (address & 0xfff);
964 xn = desc & 1;
965 break;
966 default:
967 /* Never happens, but compiler isn't smart enough to tell. */
968 abort();
970 code = 15;
972 if (xn && access_type == 2)
973 goto do_fault;
975 *prot = check_ap(env, ap, domain, access_type, is_user);
976 if (!*prot) {
977 /* Access permission fault. */
978 goto do_fault;
980 *phys_ptr = phys_addr;
981 return 0;
982 do_fault:
983 return code | (domain << 4);
986 static int get_phys_addr_mpu(CPUState *env, uint32_t address, int access_type,
987 int is_user, uint32_t *phys_ptr, int *prot)
989 int n;
990 uint32_t mask;
991 uint32_t base;
993 *phys_ptr = address;
994 for (n = 7; n >= 0; n--) {
995 base = env->cp15.c6_region[n];
996 if ((base & 1) == 0)
997 continue;
998 mask = 1 << ((base >> 1) & 0x1f);
999 /* Keep this shift separate from the above to avoid an
1000 (undefined) << 32. */
1001 mask = (mask << 1) - 1;
1002 if (((base ^ address) & ~mask) == 0)
1003 break;
1005 if (n < 0)
1006 return 2;
1008 if (access_type == 2) {
1009 mask = env->cp15.c5_insn;
1010 } else {
1011 mask = env->cp15.c5_data;
1013 mask = (mask >> (n * 4)) & 0xf;
1014 switch (mask) {
1015 case 0:
1016 return 1;
1017 case 1:
1018 if (is_user)
1019 return 1;
1020 *prot = PAGE_READ | PAGE_WRITE;
1021 break;
1022 case 2:
1023 *prot = PAGE_READ;
1024 if (!is_user)
1025 *prot |= PAGE_WRITE;
1026 break;
1027 case 3:
1028 *prot = PAGE_READ | PAGE_WRITE;
1029 break;
1030 case 5:
1031 if (is_user)
1032 return 1;
1033 *prot = PAGE_READ;
1034 break;
1035 case 6:
1036 *prot = PAGE_READ;
1037 break;
1038 default:
1039 /* Bad permission. */
1040 return 1;
1042 return 0;
1045 static inline int get_phys_addr(CPUState *env, uint32_t address,
1046 int access_type, int is_user,
1047 uint32_t *phys_ptr, int *prot)
1049 /* Fast Context Switch Extension. */
1050 if (address < 0x02000000)
1051 address += env->cp15.c13_fcse;
1053 if ((env->cp15.c1_sys & 1) == 0) {
1054 /* MMU/MPU disabled. */
1055 *phys_ptr = address;
1056 *prot = PAGE_READ | PAGE_WRITE;
1057 return 0;
1058 } else if (arm_feature(env, ARM_FEATURE_MPU)) {
1059 return get_phys_addr_mpu(env, address, access_type, is_user, phys_ptr,
1060 prot);
1061 } else if (env->cp15.c1_sys & (1 << 23)) {
1062 return get_phys_addr_v6(env, address, access_type, is_user, phys_ptr,
1063 prot);
1064 } else {
1065 return get_phys_addr_v5(env, address, access_type, is_user, phys_ptr,
1066 prot);
1070 int cpu_arm_handle_mmu_fault (CPUState *env, target_ulong address,
1071 int access_type, int mmu_idx, int is_softmmu)
1073 uint32_t phys_addr;
1074 int prot;
1075 int ret, is_user;
1077 is_user = mmu_idx == MMU_USER_IDX;
1078 ret = get_phys_addr(env, address, access_type, is_user, &phys_addr, &prot);
1079 if (ret == 0) {
1080 /* Map a single [sub]page. */
1081 phys_addr &= ~(uint32_t)0x3ff;
1082 address &= ~(uint32_t)0x3ff;
1083 return tlb_set_page (env, address, phys_addr, prot, mmu_idx,
1084 is_softmmu);
1087 if (access_type == 2) {
1088 env->cp15.c5_insn = ret;
1089 env->cp15.c6_insn = address;
1090 env->exception_index = EXCP_PREFETCH_ABORT;
1091 } else {
1092 env->cp15.c5_data = ret;
1093 if (access_type == 1 && arm_feature(env, ARM_FEATURE_V6))
1094 env->cp15.c5_data |= (1 << 11);
1095 env->cp15.c6_data = address;
1096 env->exception_index = EXCP_DATA_ABORT;
1098 return 1;
1101 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
1103 uint32_t phys_addr;
1104 int prot;
1105 int ret;
1107 ret = get_phys_addr(env, addr, 0, 0, &phys_addr, &prot);
1109 if (ret != 0)
1110 return -1;
1112 return phys_addr;
1115 /* Not really implemented. Need to figure out a sane way of doing this.
1116 Maybe add generic watchpoint support and use that. */
1118 void helper_mark_exclusive(CPUState *env, uint32_t addr)
1120 env->mmon_addr = addr;
1123 int helper_test_exclusive(CPUState *env, uint32_t addr)
1125 return (env->mmon_addr != addr);
1128 void helper_clrex(CPUState *env)
1130 env->mmon_addr = -1;
1133 void helper_set_cp(CPUState *env, uint32_t insn, uint32_t val)
1135 int cp_num = (insn >> 8) & 0xf;
1136 int cp_info = (insn >> 5) & 7;
1137 int src = (insn >> 16) & 0xf;
1138 int operand = insn & 0xf;
1140 if (env->cp[cp_num].cp_write)
1141 env->cp[cp_num].cp_write(env->cp[cp_num].opaque,
1142 cp_info, src, operand, val);
1145 uint32_t helper_get_cp(CPUState *env, uint32_t insn)
1147 int cp_num = (insn >> 8) & 0xf;
1148 int cp_info = (insn >> 5) & 7;
1149 int dest = (insn >> 16) & 0xf;
1150 int operand = insn & 0xf;
1152 if (env->cp[cp_num].cp_read)
1153 return env->cp[cp_num].cp_read(env->cp[cp_num].opaque,
1154 cp_info, dest, operand);
1155 return 0;
1158 /* Return basic MPU access permission bits. */
1159 static uint32_t simple_mpu_ap_bits(uint32_t val)
1161 uint32_t ret;
1162 uint32_t mask;
1163 int i;
1164 ret = 0;
1165 mask = 3;
1166 for (i = 0; i < 16; i += 2) {
1167 ret |= (val >> i) & mask;
1168 mask <<= 2;
1170 return ret;
1173 /* Pad basic MPU access permission bits to extended format. */
1174 static uint32_t extended_mpu_ap_bits(uint32_t val)
1176 uint32_t ret;
1177 uint32_t mask;
1178 int i;
1179 ret = 0;
1180 mask = 3;
1181 for (i = 0; i < 16; i += 2) {
1182 ret |= (val & mask) << i;
1183 mask <<= 2;
1185 return ret;
1188 void helper_set_cp15(CPUState *env, uint32_t insn, uint32_t val)
1190 int op1;
1191 int op2;
1192 int crm;
1194 op1 = (insn >> 21) & 7;
1195 op2 = (insn >> 5) & 7;
1196 crm = insn & 0xf;
1197 switch ((insn >> 16) & 0xf) {
1198 case 0:
1199 if (((insn >> 21) & 7) == 2) {
1200 /* ??? Select cache level. Ignore. */
1201 return;
1203 /* ID codes. */
1204 if (arm_feature(env, ARM_FEATURE_XSCALE))
1205 break;
1206 if (arm_feature(env, ARM_FEATURE_OMAPCP))
1207 break;
1208 goto bad_reg;
1209 case 1: /* System configuration. */
1210 if (arm_feature(env, ARM_FEATURE_OMAPCP))
1211 op2 = 0;
1212 switch (op2) {
1213 case 0:
1214 if (!arm_feature(env, ARM_FEATURE_XSCALE) || crm == 0)
1215 env->cp15.c1_sys = val;
1216 /* ??? Lots of these bits are not implemented. */
1217 /* This may enable/disable the MMU, so do a TLB flush. */
1218 tlb_flush(env, 1);
1219 break;
1220 case 1: /* Auxiliary cotrol register. */
1221 if (arm_feature(env, ARM_FEATURE_XSCALE)) {
1222 env->cp15.c1_xscaleauxcr = val;
1223 break;
1225 /* Not implemented. */
1226 break;
1227 case 2:
1228 if (arm_feature(env, ARM_FEATURE_XSCALE))
1229 goto bad_reg;
1230 env->cp15.c1_coproc = val;
1231 /* ??? Is this safe when called from within a TB? */
1232 tb_flush(env);
1233 break;
1234 default:
1235 goto bad_reg;
1237 break;
1238 case 2: /* MMU Page table control / MPU cache control. */
1239 if (arm_feature(env, ARM_FEATURE_MPU)) {
1240 switch (op2) {
1241 case 0:
1242 env->cp15.c2_data = val;
1243 break;
1244 case 1:
1245 env->cp15.c2_insn = val;
1246 break;
1247 default:
1248 goto bad_reg;
1250 } else {
1251 switch (op2) {
1252 case 0:
1253 env->cp15.c2_base0 = val;
1254 break;
1255 case 1:
1256 env->cp15.c2_base1 = val;
1257 break;
1258 case 2:
1259 env->cp15.c2_mask = ~(((uint32_t)0xffffffffu) >> val);
1260 break;
1261 default:
1262 goto bad_reg;
1265 break;
1266 case 3: /* MMU Domain access control / MPU write buffer control. */
1267 env->cp15.c3 = val;
1268 tlb_flush(env, 1); /* Flush TLB as domain not tracked in TLB */
1269 break;
1270 case 4: /* Reserved. */
1271 goto bad_reg;
1272 case 5: /* MMU Fault status / MPU access permission. */
1273 if (arm_feature(env, ARM_FEATURE_OMAPCP))
1274 op2 = 0;
1275 switch (op2) {
1276 case 0:
1277 if (arm_feature(env, ARM_FEATURE_MPU))
1278 val = extended_mpu_ap_bits(val);
1279 env->cp15.c5_data = val;
1280 break;
1281 case 1:
1282 if (arm_feature(env, ARM_FEATURE_MPU))
1283 val = extended_mpu_ap_bits(val);
1284 env->cp15.c5_insn = val;
1285 break;
1286 case 2:
1287 if (!arm_feature(env, ARM_FEATURE_MPU))
1288 goto bad_reg;
1289 env->cp15.c5_data = val;
1290 break;
1291 case 3:
1292 if (!arm_feature(env, ARM_FEATURE_MPU))
1293 goto bad_reg;
1294 env->cp15.c5_insn = val;
1295 break;
1296 default:
1297 goto bad_reg;
1299 break;
1300 case 6: /* MMU Fault address / MPU base/size. */
1301 if (arm_feature(env, ARM_FEATURE_MPU)) {
1302 if (crm >= 8)
1303 goto bad_reg;
1304 env->cp15.c6_region[crm] = val;
1305 } else {
1306 if (arm_feature(env, ARM_FEATURE_OMAPCP))
1307 op2 = 0;
1308 switch (op2) {
1309 case 0:
1310 env->cp15.c6_data = val;
1311 break;
1312 case 1: /* ??? This is WFAR on armv6 */
1313 case 2:
1314 env->cp15.c6_insn = val;
1315 break;
1316 default:
1317 goto bad_reg;
1320 break;
1321 case 7: /* Cache control. */
1322 env->cp15.c15_i_max = 0x000;
1323 env->cp15.c15_i_min = 0xff0;
1324 /* No cache, so nothing to do. */
1325 /* ??? MPCore has VA to PA translation functions. */
1326 break;
1327 case 8: /* MMU TLB control. */
1328 switch (op2) {
1329 case 0: /* Invalidate all. */
1330 tlb_flush(env, 0);
1331 break;
1332 case 1: /* Invalidate single TLB entry. */
1333 #if 0
1334 /* ??? This is wrong for large pages and sections. */
1335 /* As an ugly hack to make linux work we always flush a 4K
1336 pages. */
1337 val &= 0xfffff000;
1338 tlb_flush_page(env, val);
1339 tlb_flush_page(env, val + 0x400);
1340 tlb_flush_page(env, val + 0x800);
1341 tlb_flush_page(env, val + 0xc00);
1342 #else
1343 tlb_flush(env, 1);
1344 #endif
1345 break;
1346 case 2: /* Invalidate on ASID. */
1347 tlb_flush(env, val == 0);
1348 break;
1349 case 3: /* Invalidate single entry on MVA. */
1350 /* ??? This is like case 1, but ignores ASID. */
1351 tlb_flush(env, 1);
1352 break;
1353 default:
1354 goto bad_reg;
1356 break;
1357 case 9:
1358 if (arm_feature(env, ARM_FEATURE_OMAPCP))
1359 break;
1360 switch (crm) {
1361 case 0: /* Cache lockdown. */
1362 switch (op1) {
1363 case 0: /* L1 cache. */
1364 switch (op2) {
1365 case 0:
1366 env->cp15.c9_data = val;
1367 break;
1368 case 1:
1369 env->cp15.c9_insn = val;
1370 break;
1371 default:
1372 goto bad_reg;
1374 break;
1375 case 1: /* L2 cache. */
1376 /* Ignore writes to L2 lockdown/auxiliary registers. */
1377 break;
1378 default:
1379 goto bad_reg;
1381 break;
1382 case 1: /* TCM memory region registers. */
1383 /* Not implemented. */
1384 goto bad_reg;
1385 default:
1386 goto bad_reg;
1388 break;
1389 case 10: /* MMU TLB lockdown. */
1390 /* ??? TLB lockdown not implemented. */
1391 break;
1392 case 12: /* Reserved. */
1393 goto bad_reg;
1394 case 13: /* Process ID. */
1395 switch (op2) {
1396 case 0:
1397 /* Unlike real hardware the qemu TLB uses virtual addresses,
1398 not modified virtual addresses, so this causes a TLB flush.
1400 if (env->cp15.c13_fcse != val)
1401 tlb_flush(env, 1);
1402 env->cp15.c13_fcse = val;
1403 break;
1404 case 1:
1405 /* This changes the ASID, so do a TLB flush. */
1406 if (env->cp15.c13_context != val
1407 && !arm_feature(env, ARM_FEATURE_MPU))
1408 tlb_flush(env, 0);
1409 env->cp15.c13_context = val;
1410 break;
1411 case 2:
1412 env->cp15.c13_tls1 = val;
1413 break;
1414 case 3:
1415 env->cp15.c13_tls2 = val;
1416 break;
1417 case 4:
1418 env->cp15.c13_tls3 = val;
1419 break;
1420 default:
1421 goto bad_reg;
1423 break;
1424 case 14: /* Reserved. */
1425 goto bad_reg;
1426 case 15: /* Implementation specific. */
1427 if (arm_feature(env, ARM_FEATURE_XSCALE)) {
1428 if (op2 == 0 && crm == 1) {
1429 if (env->cp15.c15_cpar != (val & 0x3fff)) {
1430 /* Changes cp0 to cp13 behavior, so needs a TB flush. */
1431 tb_flush(env);
1432 env->cp15.c15_cpar = val & 0x3fff;
1434 break;
1436 goto bad_reg;
1438 if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
1439 switch (crm) {
1440 case 0:
1441 break;
1442 case 1: /* Set TI925T configuration. */
1443 env->cp15.c15_ticonfig = val & 0xe7;
1444 env->cp15.c0_cpuid = (val & (1 << 5)) ? /* OS_TYPE bit */
1445 ARM_CPUID_TI915T : ARM_CPUID_TI925T;
1446 break;
1447 case 2: /* Set I_max. */
1448 env->cp15.c15_i_max = val;
1449 break;
1450 case 3: /* Set I_min. */
1451 env->cp15.c15_i_min = val;
1452 break;
1453 case 4: /* Set thread-ID. */
1454 env->cp15.c15_threadid = val & 0xffff;
1455 break;
1456 case 8: /* Wait-for-interrupt (deprecated). */
1457 cpu_interrupt(env, CPU_INTERRUPT_HALT);
1458 break;
1459 default:
1460 goto bad_reg;
1463 break;
1465 return;
1466 bad_reg:
1467 /* ??? For debugging only. Should raise illegal instruction exception. */
1468 cpu_abort(env, "Unimplemented cp15 register write (c%d, c%d, {%d, %d})\n",
1469 (insn >> 16) & 0xf, crm, op1, op2);
1472 uint32_t helper_get_cp15(CPUState *env, uint32_t insn)
1474 int op1;
1475 int op2;
1476 int crm;
1478 op1 = (insn >> 21) & 7;
1479 op2 = (insn >> 5) & 7;
1480 crm = insn & 0xf;
1481 switch ((insn >> 16) & 0xf) {
1482 case 0: /* ID codes. */
1483 switch (op1) {
1484 case 0:
1485 switch (crm) {
1486 case 0:
1487 switch (op2) {
1488 case 0: /* Device ID. */
1489 return env->cp15.c0_cpuid;
1490 case 1: /* Cache Type. */
1491 return env->cp15.c0_cachetype;
1492 case 2: /* TCM status. */
1493 return 0;
1494 case 3: /* TLB type register. */
1495 return 0; /* No lockable TLB entries. */
1496 case 5: /* CPU ID */
1497 return env->cpu_index;
1498 default:
1499 goto bad_reg;
1501 case 1:
1502 if (!arm_feature(env, ARM_FEATURE_V6))
1503 goto bad_reg;
1504 return env->cp15.c0_c1[op2];
1505 case 2:
1506 if (!arm_feature(env, ARM_FEATURE_V6))
1507 goto bad_reg;
1508 return env->cp15.c0_c2[op2];
1509 case 3: case 4: case 5: case 6: case 7:
1510 return 0;
1511 default:
1512 goto bad_reg;
1514 case 1:
1515 /* These registers aren't documented on arm11 cores. However
1516 Linux looks at them anyway. */
1517 if (!arm_feature(env, ARM_FEATURE_V6))
1518 goto bad_reg;
1519 if (crm != 0)
1520 goto bad_reg;
1521 if (arm_feature(env, ARM_FEATURE_XSCALE))
1522 goto bad_reg;
1523 return 0;
1524 default:
1525 goto bad_reg;
1527 case 1: /* System configuration. */
1528 if (arm_feature(env, ARM_FEATURE_OMAPCP))
1529 op2 = 0;
1530 switch (op2) {
1531 case 0: /* Control register. */
1532 return env->cp15.c1_sys;
1533 case 1: /* Auxiliary control register. */
1534 if (arm_feature(env, ARM_FEATURE_XSCALE))
1535 return env->cp15.c1_xscaleauxcr;
1536 if (!arm_feature(env, ARM_FEATURE_AUXCR))
1537 goto bad_reg;
1538 switch (ARM_CPUID(env)) {
1539 case ARM_CPUID_ARM1026:
1540 return 1;
1541 case ARM_CPUID_ARM1136:
1542 return 7;
1543 case ARM_CPUID_ARM11MPCORE:
1544 return 1;
1545 case ARM_CPUID_CORTEXA8:
1546 return 0;
1547 default:
1548 goto bad_reg;
1550 case 2: /* Coprocessor access register. */
1551 if (arm_feature(env, ARM_FEATURE_XSCALE))
1552 goto bad_reg;
1553 return env->cp15.c1_coproc;
1554 default:
1555 goto bad_reg;
1557 case 2: /* MMU Page table control / MPU cache control. */
1558 if (arm_feature(env, ARM_FEATURE_MPU)) {
1559 switch (op2) {
1560 case 0:
1561 return env->cp15.c2_data;
1562 break;
1563 case 1:
1564 return env->cp15.c2_insn;
1565 break;
1566 default:
1567 goto bad_reg;
1569 } else {
1570 switch (op2) {
1571 case 0:
1572 return env->cp15.c2_base0;
1573 case 1:
1574 return env->cp15.c2_base1;
1575 case 2:
1577 int n;
1578 uint32_t mask;
1579 n = 0;
1580 mask = env->cp15.c2_mask;
1581 while (mask) {
1582 n++;
1583 mask <<= 1;
1585 return n;
1587 default:
1588 goto bad_reg;
1591 case 3: /* MMU Domain access control / MPU write buffer control. */
1592 return env->cp15.c3;
1593 case 4: /* Reserved. */
1594 goto bad_reg;
1595 case 5: /* MMU Fault status / MPU access permission. */
1596 if (arm_feature(env, ARM_FEATURE_OMAPCP))
1597 op2 = 0;
1598 switch (op2) {
1599 case 0:
1600 if (arm_feature(env, ARM_FEATURE_MPU))
1601 return simple_mpu_ap_bits(env->cp15.c5_data);
1602 return env->cp15.c5_data;
1603 case 1:
1604 if (arm_feature(env, ARM_FEATURE_MPU))
1605 return simple_mpu_ap_bits(env->cp15.c5_data);
1606 return env->cp15.c5_insn;
1607 case 2:
1608 if (!arm_feature(env, ARM_FEATURE_MPU))
1609 goto bad_reg;
1610 return env->cp15.c5_data;
1611 case 3:
1612 if (!arm_feature(env, ARM_FEATURE_MPU))
1613 goto bad_reg;
1614 return env->cp15.c5_insn;
1615 default:
1616 goto bad_reg;
1618 case 6: /* MMU Fault address. */
1619 if (arm_feature(env, ARM_FEATURE_MPU)) {
1620 if (crm >= 8)
1621 goto bad_reg;
1622 return env->cp15.c6_region[crm];
1623 } else {
1624 if (arm_feature(env, ARM_FEATURE_OMAPCP))
1625 op2 = 0;
1626 switch (op2) {
1627 case 0:
1628 return env->cp15.c6_data;
1629 case 1:
1630 if (arm_feature(env, ARM_FEATURE_V6)) {
1631 /* Watchpoint Fault Adrress. */
1632 return 0; /* Not implemented. */
1633 } else {
1634 /* Instruction Fault Adrress. */
1635 /* Arm9 doesn't have an IFAR, but implementing it anyway
1636 shouldn't do any harm. */
1637 return env->cp15.c6_insn;
1639 case 2:
1640 if (arm_feature(env, ARM_FEATURE_V6)) {
1641 /* Instruction Fault Adrress. */
1642 return env->cp15.c6_insn;
1643 } else {
1644 goto bad_reg;
1646 default:
1647 goto bad_reg;
1650 case 7: /* Cache control. */
1651 /* ??? This is for test, clean and invaidate operations that set the
1652 Z flag. We can't represent N = Z = 1, so it also clears
1653 the N flag. Oh well. */
1654 env->NZF = 0;
1655 return 0;
1656 case 8: /* MMU TLB control. */
1657 goto bad_reg;
1658 case 9: /* Cache lockdown. */
1659 switch (op1) {
1660 case 0: /* L1 cache. */
1661 if (arm_feature(env, ARM_FEATURE_OMAPCP))
1662 return 0;
1663 switch (op2) {
1664 case 0:
1665 return env->cp15.c9_data;
1666 case 1:
1667 return env->cp15.c9_insn;
1668 default:
1669 goto bad_reg;
1671 case 1: /* L2 cache */
1672 if (crm != 0)
1673 goto bad_reg;
1674 /* L2 Lockdown and Auxiliary control. */
1675 return 0;
1676 default:
1677 goto bad_reg;
1679 case 10: /* MMU TLB lockdown. */
1680 /* ??? TLB lockdown not implemented. */
1681 return 0;
1682 case 11: /* TCM DMA control. */
1683 case 12: /* Reserved. */
1684 goto bad_reg;
1685 case 13: /* Process ID. */
1686 switch (op2) {
1687 case 0:
1688 return env->cp15.c13_fcse;
1689 case 1:
1690 return env->cp15.c13_context;
1691 case 2:
1692 return env->cp15.c13_tls1;
1693 case 3:
1694 return env->cp15.c13_tls2;
1695 case 4:
1696 return env->cp15.c13_tls3;
1697 default:
1698 goto bad_reg;
1700 case 14: /* Reserved. */
1701 goto bad_reg;
1702 case 15: /* Implementation specific. */
1703 if (arm_feature(env, ARM_FEATURE_XSCALE)) {
1704 if (op2 == 0 && crm == 1)
1705 return env->cp15.c15_cpar;
1707 goto bad_reg;
1709 if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
1710 switch (crm) {
1711 case 0:
1712 return 0;
1713 case 1: /* Read TI925T configuration. */
1714 return env->cp15.c15_ticonfig;
1715 case 2: /* Read I_max. */
1716 return env->cp15.c15_i_max;
1717 case 3: /* Read I_min. */
1718 return env->cp15.c15_i_min;
1719 case 4: /* Read thread-ID. */
1720 return env->cp15.c15_threadid;
1721 case 8: /* TI925T_status */
1722 return 0;
1724 goto bad_reg;
1726 return 0;
1728 bad_reg:
1729 /* ??? For debugging only. Should raise illegal instruction exception. */
1730 cpu_abort(env, "Unimplemented cp15 register read (c%d, c%d, {%d, %d})\n",
1731 (insn >> 16) & 0xf, crm, op1, op2);
1732 return 0;
1735 void helper_set_r13_banked(CPUState *env, int mode, uint32_t val)
1737 env->banked_r13[bank_number(mode)] = val;
1740 uint32_t helper_get_r13_banked(CPUState *env, int mode)
1742 return env->banked_r13[bank_number(mode)];
1745 uint32_t helper_v7m_mrs(CPUState *env, int reg)
1747 switch (reg) {
1748 case 0: /* APSR */
1749 return xpsr_read(env) & 0xf8000000;
1750 case 1: /* IAPSR */
1751 return xpsr_read(env) & 0xf80001ff;
1752 case 2: /* EAPSR */
1753 return xpsr_read(env) & 0xff00fc00;
1754 case 3: /* xPSR */
1755 return xpsr_read(env) & 0xff00fdff;
1756 case 5: /* IPSR */
1757 return xpsr_read(env) & 0x000001ff;
1758 case 6: /* EPSR */
1759 return xpsr_read(env) & 0x0700fc00;
1760 case 7: /* IEPSR */
1761 return xpsr_read(env) & 0x0700edff;
1762 case 8: /* MSP */
1763 return env->v7m.current_sp ? env->v7m.other_sp : env->regs[13];
1764 case 9: /* PSP */
1765 return env->v7m.current_sp ? env->regs[13] : env->v7m.other_sp;
1766 case 16: /* PRIMASK */
1767 return (env->uncached_cpsr & CPSR_I) != 0;
1768 case 17: /* FAULTMASK */
1769 return (env->uncached_cpsr & CPSR_F) != 0;
1770 case 18: /* BASEPRI */
1771 case 19: /* BASEPRI_MAX */
1772 return env->v7m.basepri;
1773 case 20: /* CONTROL */
1774 return env->v7m.control;
1775 default:
1776 /* ??? For debugging only. */
1777 cpu_abort(env, "Unimplemented system register read (%d)\n", reg);
1778 return 0;
1782 void helper_v7m_msr(CPUState *env, int reg, uint32_t val)
1784 switch (reg) {
1785 case 0: /* APSR */
1786 xpsr_write(env, val, 0xf8000000);
1787 break;
1788 case 1: /* IAPSR */
1789 xpsr_write(env, val, 0xf8000000);
1790 break;
1791 case 2: /* EAPSR */
1792 xpsr_write(env, val, 0xfe00fc00);
1793 break;
1794 case 3: /* xPSR */
1795 xpsr_write(env, val, 0xfe00fc00);
1796 break;
1797 case 5: /* IPSR */
1798 /* IPSR bits are readonly. */
1799 break;
1800 case 6: /* EPSR */
1801 xpsr_write(env, val, 0x0600fc00);
1802 break;
1803 case 7: /* IEPSR */
1804 xpsr_write(env, val, 0x0600fc00);
1805 break;
1806 case 8: /* MSP */
1807 if (env->v7m.current_sp)
1808 env->v7m.other_sp = val;
1809 else
1810 env->regs[13] = val;
1811 break;
1812 case 9: /* PSP */
1813 if (env->v7m.current_sp)
1814 env->regs[13] = val;
1815 else
1816 env->v7m.other_sp = val;
1817 break;
1818 case 16: /* PRIMASK */
1819 if (val & 1)
1820 env->uncached_cpsr |= CPSR_I;
1821 else
1822 env->uncached_cpsr &= ~CPSR_I;
1823 break;
1824 case 17: /* FAULTMASK */
1825 if (val & 1)
1826 env->uncached_cpsr |= CPSR_F;
1827 else
1828 env->uncached_cpsr &= ~CPSR_F;
1829 break;
1830 case 18: /* BASEPRI */
1831 env->v7m.basepri = val & 0xff;
1832 break;
1833 case 19: /* BASEPRI_MAX */
1834 val &= 0xff;
1835 if (val != 0 && (val < env->v7m.basepri || env->v7m.basepri == 0))
1836 env->v7m.basepri = val;
1837 break;
1838 case 20: /* CONTROL */
1839 env->v7m.control = val & 3;
1840 switch_v7m_sp(env, (val & 2) != 0);
1841 break;
1842 default:
1843 /* ??? For debugging only. */
1844 cpu_abort(env, "Unimplemented system register write (%d)\n", reg);
1845 return;
1849 void cpu_arm_set_cp_io(CPUARMState *env, int cpnum,
1850 ARMReadCPFunc *cp_read, ARMWriteCPFunc *cp_write,
1851 void *opaque)
1853 if (cpnum < 0 || cpnum > 14) {
1854 cpu_abort(env, "Bad coprocessor number: %i\n", cpnum);
1855 return;
1858 env->cp[cpnum].cp_read = cp_read;
1859 env->cp[cpnum].cp_write = cp_write;
1860 env->cp[cpnum].opaque = opaque;
1863 #endif