Implement ColdFire ff1.
[qemu/mini2440.git] / target-arm / helper.c
blobe8a216975908a8586f0222f7275a317d800f038f
1 #include <stdio.h>
2 #include <stdlib.h>
3 #include <string.h>
5 #include "cpu.h"
6 #include "exec-all.h"
8 static inline void set_feature(CPUARMState *env, int feature)
10 env->features |= 1u << feature;
13 static void cpu_reset_model_id(CPUARMState *env, uint32_t id)
15 env->cp15.c0_cpuid = id;
16 switch (id) {
17 case ARM_CPUID_ARM926:
18 set_feature(env, ARM_FEATURE_VFP);
19 env->vfp.xregs[ARM_VFP_FPSID] = 0x41011090;
20 env->cp15.c0_cachetype = 0x1dd20d2;
21 break;
22 case ARM_CPUID_ARM946:
23 set_feature(env, ARM_FEATURE_MPU);
24 env->cp15.c0_cachetype = 0x0f004006;
25 break;
26 case ARM_CPUID_ARM1026:
27 set_feature(env, ARM_FEATURE_VFP);
28 set_feature(env, ARM_FEATURE_AUXCR);
29 env->vfp.xregs[ARM_VFP_FPSID] = 0x410110a0;
30 env->cp15.c0_cachetype = 0x1dd20d2;
31 break;
32 case ARM_CPUID_PXA250:
33 case ARM_CPUID_PXA255:
34 case ARM_CPUID_PXA260:
35 case ARM_CPUID_PXA261:
36 case ARM_CPUID_PXA262:
37 set_feature(env, ARM_FEATURE_XSCALE);
38 /* JTAG_ID is ((id << 28) | 0x09265013) */
39 env->cp15.c0_cachetype = 0xd172172;
40 break;
41 case ARM_CPUID_PXA270_A0:
42 case ARM_CPUID_PXA270_A1:
43 case ARM_CPUID_PXA270_B0:
44 case ARM_CPUID_PXA270_B1:
45 case ARM_CPUID_PXA270_C0:
46 case ARM_CPUID_PXA270_C5:
47 set_feature(env, ARM_FEATURE_XSCALE);
48 /* JTAG_ID is ((id << 28) | 0x09265013) */
49 set_feature(env, ARM_FEATURE_IWMMXT);
50 env->iwmmxt.cregs[ARM_IWMMXT_wCID] = 0x69051000 | 'Q';
51 env->cp15.c0_cachetype = 0xd172172;
52 break;
53 default:
54 cpu_abort(env, "Bad CPU ID: %x\n", id);
55 break;
59 void cpu_reset(CPUARMState *env)
61 uint32_t id;
62 id = env->cp15.c0_cpuid;
63 memset(env, 0, offsetof(CPUARMState, breakpoints));
64 if (id)
65 cpu_reset_model_id(env, id);
66 #if defined (CONFIG_USER_ONLY)
67 env->uncached_cpsr = ARM_CPU_MODE_USR;
68 env->vfp.xregs[ARM_VFP_FPEXC] = 1 << 30;
69 #else
70 /* SVC mode with interrupts disabled. */
71 env->uncached_cpsr = ARM_CPU_MODE_SVC | CPSR_A | CPSR_F | CPSR_I;
72 env->vfp.xregs[ARM_VFP_FPEXC] = 0;
73 #endif
74 env->regs[15] = 0;
75 tlb_flush(env, 1);
78 CPUARMState *cpu_arm_init(void)
80 CPUARMState *env;
82 env = qemu_mallocz(sizeof(CPUARMState));
83 if (!env)
84 return NULL;
85 cpu_exec_init(env);
86 cpu_reset(env);
87 return env;
90 struct arm_cpu_t {
91 uint32_t id;
92 const char *name;
95 static const struct arm_cpu_t arm_cpu_names[] = {
96 { ARM_CPUID_ARM926, "arm926"},
97 { ARM_CPUID_ARM946, "arm946"},
98 { ARM_CPUID_ARM1026, "arm1026"},
99 { ARM_CPUID_PXA250, "pxa250" },
100 { ARM_CPUID_PXA255, "pxa255" },
101 { ARM_CPUID_PXA260, "pxa260" },
102 { ARM_CPUID_PXA261, "pxa261" },
103 { ARM_CPUID_PXA262, "pxa262" },
104 { ARM_CPUID_PXA270, "pxa270" },
105 { ARM_CPUID_PXA270_A0, "pxa270-a0" },
106 { ARM_CPUID_PXA270_A1, "pxa270-a1" },
107 { ARM_CPUID_PXA270_B0, "pxa270-b0" },
108 { ARM_CPUID_PXA270_B1, "pxa270-b1" },
109 { ARM_CPUID_PXA270_C0, "pxa270-c0" },
110 { ARM_CPUID_PXA270_C5, "pxa270-c5" },
111 { 0, NULL}
114 void arm_cpu_list(void)
116 int i;
118 printf ("Available CPUs:\n");
119 for (i = 0; arm_cpu_names[i].name; i++) {
120 printf(" %s\n", arm_cpu_names[i].name);
124 void cpu_arm_set_model(CPUARMState *env, const char *name)
126 int i;
127 uint32_t id;
129 id = 0;
130 i = 0;
131 for (i = 0; arm_cpu_names[i].name; i++) {
132 if (strcmp(name, arm_cpu_names[i].name) == 0) {
133 id = arm_cpu_names[i].id;
134 break;
137 if (!id) {
138 cpu_abort(env, "Unknown CPU '%s'", name);
139 return;
141 cpu_reset_model_id(env, id);
144 void cpu_arm_close(CPUARMState *env)
146 free(env);
149 #if defined(CONFIG_USER_ONLY)
151 void do_interrupt (CPUState *env)
153 env->exception_index = -1;
156 int cpu_arm_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
157 int is_user, int is_softmmu)
159 if (rw == 2) {
160 env->exception_index = EXCP_PREFETCH_ABORT;
161 env->cp15.c6_insn = address;
162 } else {
163 env->exception_index = EXCP_DATA_ABORT;
164 env->cp15.c6_data = address;
166 return 1;
169 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
171 return addr;
174 /* These should probably raise undefined insn exceptions. */
175 void helper_set_cp(CPUState *env, uint32_t insn, uint32_t val)
177 int op1 = (insn >> 8) & 0xf;
178 cpu_abort(env, "cp%i insn %08x\n", op1, insn);
179 return;
182 uint32_t helper_get_cp(CPUState *env, uint32_t insn)
184 int op1 = (insn >> 8) & 0xf;
185 cpu_abort(env, "cp%i insn %08x\n", op1, insn);
186 return 0;
189 void helper_set_cp15(CPUState *env, uint32_t insn, uint32_t val)
191 cpu_abort(env, "cp15 insn %08x\n", insn);
194 uint32_t helper_get_cp15(CPUState *env, uint32_t insn)
196 cpu_abort(env, "cp15 insn %08x\n", insn);
197 return 0;
200 void switch_mode(CPUState *env, int mode)
202 if (mode != ARM_CPU_MODE_USR)
203 cpu_abort(env, "Tried to switch out of user mode\n");
206 #else
208 extern int semihosting_enabled;
210 /* Map CPU modes onto saved register banks. */
211 static inline int bank_number (int mode)
213 switch (mode) {
214 case ARM_CPU_MODE_USR:
215 case ARM_CPU_MODE_SYS:
216 return 0;
217 case ARM_CPU_MODE_SVC:
218 return 1;
219 case ARM_CPU_MODE_ABT:
220 return 2;
221 case ARM_CPU_MODE_UND:
222 return 3;
223 case ARM_CPU_MODE_IRQ:
224 return 4;
225 case ARM_CPU_MODE_FIQ:
226 return 5;
228 cpu_abort(cpu_single_env, "Bad mode %x\n", mode);
229 return -1;
232 void switch_mode(CPUState *env, int mode)
234 int old_mode;
235 int i;
237 old_mode = env->uncached_cpsr & CPSR_M;
238 if (mode == old_mode)
239 return;
241 if (old_mode == ARM_CPU_MODE_FIQ) {
242 memcpy (env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t));
243 memcpy (env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t));
244 } else if (mode == ARM_CPU_MODE_FIQ) {
245 memcpy (env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t));
246 memcpy (env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t));
249 i = bank_number(old_mode);
250 env->banked_r13[i] = env->regs[13];
251 env->banked_r14[i] = env->regs[14];
252 env->banked_spsr[i] = env->spsr;
254 i = bank_number(mode);
255 env->regs[13] = env->banked_r13[i];
256 env->regs[14] = env->banked_r14[i];
257 env->spsr = env->banked_spsr[i];
260 /* Handle a CPU exception. */
261 void do_interrupt(CPUARMState *env)
263 uint32_t addr;
264 uint32_t mask;
265 int new_mode;
266 uint32_t offset;
268 /* TODO: Vectored interrupt controller. */
269 switch (env->exception_index) {
270 case EXCP_UDEF:
271 new_mode = ARM_CPU_MODE_UND;
272 addr = 0x04;
273 mask = CPSR_I;
274 if (env->thumb)
275 offset = 2;
276 else
277 offset = 4;
278 break;
279 case EXCP_SWI:
280 if (semihosting_enabled) {
281 /* Check for semihosting interrupt. */
282 if (env->thumb) {
283 mask = lduw_code(env->regs[15] - 2) & 0xff;
284 } else {
285 mask = ldl_code(env->regs[15] - 4) & 0xffffff;
287 /* Only intercept calls from privileged modes, to provide some
288 semblance of security. */
289 if (((mask == 0x123456 && !env->thumb)
290 || (mask == 0xab && env->thumb))
291 && (env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR) {
292 env->regs[0] = do_arm_semihosting(env);
293 return;
296 new_mode = ARM_CPU_MODE_SVC;
297 addr = 0x08;
298 mask = CPSR_I;
299 /* The PC already points to the next instructon. */
300 offset = 0;
301 break;
302 case EXCP_PREFETCH_ABORT:
303 case EXCP_BKPT:
304 new_mode = ARM_CPU_MODE_ABT;
305 addr = 0x0c;
306 mask = CPSR_A | CPSR_I;
307 offset = 4;
308 break;
309 case EXCP_DATA_ABORT:
310 new_mode = ARM_CPU_MODE_ABT;
311 addr = 0x10;
312 mask = CPSR_A | CPSR_I;
313 offset = 8;
314 break;
315 case EXCP_IRQ:
316 new_mode = ARM_CPU_MODE_IRQ;
317 addr = 0x18;
318 /* Disable IRQ and imprecise data aborts. */
319 mask = CPSR_A | CPSR_I;
320 offset = 4;
321 break;
322 case EXCP_FIQ:
323 new_mode = ARM_CPU_MODE_FIQ;
324 addr = 0x1c;
325 /* Disable FIQ, IRQ and imprecise data aborts. */
326 mask = CPSR_A | CPSR_I | CPSR_F;
327 offset = 4;
328 break;
329 default:
330 cpu_abort(env, "Unhandled exception 0x%x\n", env->exception_index);
331 return; /* Never happens. Keep compiler happy. */
333 /* High vectors. */
334 if (env->cp15.c1_sys & (1 << 13)) {
335 addr += 0xffff0000;
337 switch_mode (env, new_mode);
338 env->spsr = cpsr_read(env);
339 /* Switch to the new mode, and switch to Arm mode. */
340 /* ??? Thumb interrupt handlers not implemented. */
341 env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode;
342 env->uncached_cpsr |= mask;
343 env->thumb = 0;
344 env->regs[14] = env->regs[15] + offset;
345 env->regs[15] = addr;
346 env->interrupt_request |= CPU_INTERRUPT_EXITTB;
349 /* Check section/page access permissions.
350 Returns the page protection flags, or zero if the access is not
351 permitted. */
352 static inline int check_ap(CPUState *env, int ap, int domain, int access_type,
353 int is_user)
355 if (domain == 3)
356 return PAGE_READ | PAGE_WRITE;
358 switch (ap) {
359 case 0:
360 if (access_type == 1)
361 return 0;
362 switch ((env->cp15.c1_sys >> 8) & 3) {
363 case 1:
364 return is_user ? 0 : PAGE_READ;
365 case 2:
366 return PAGE_READ;
367 default:
368 return 0;
370 case 1:
371 return is_user ? 0 : PAGE_READ | PAGE_WRITE;
372 case 2:
373 if (is_user)
374 return (access_type == 1) ? 0 : PAGE_READ;
375 else
376 return PAGE_READ | PAGE_WRITE;
377 case 3:
378 return PAGE_READ | PAGE_WRITE;
379 default:
380 abort();
384 static int get_phys_addr(CPUState *env, uint32_t address, int access_type,
385 int is_user, uint32_t *phys_ptr, int *prot)
387 int code;
388 uint32_t table;
389 uint32_t desc;
390 int type;
391 int ap;
392 int domain;
393 uint32_t phys_addr;
395 /* Fast Context Switch Extension. */
396 if (address < 0x02000000)
397 address += env->cp15.c13_fcse;
399 if ((env->cp15.c1_sys & 1) == 0) {
400 /* MMU/MPU disabled. */
401 *phys_ptr = address;
402 *prot = PAGE_READ | PAGE_WRITE;
403 } else if (arm_feature(env, ARM_FEATURE_MPU)) {
404 int n;
405 uint32_t mask;
406 uint32_t base;
408 *phys_ptr = address;
409 for (n = 7; n >= 0; n--) {
410 base = env->cp15.c6_region[n];
411 if ((base & 1) == 0)
412 continue;
413 mask = 1 << ((base >> 1) & 0x1f);
414 /* Keep this shift separate from the above to avoid an
415 (undefined) << 32. */
416 mask = (mask << 1) - 1;
417 if (((base ^ address) & ~mask) == 0)
418 break;
420 if (n < 0)
421 return 2;
423 if (access_type == 2) {
424 mask = env->cp15.c5_insn;
425 } else {
426 mask = env->cp15.c5_data;
428 mask = (mask >> (n * 4)) & 0xf;
429 switch (mask) {
430 case 0:
431 return 1;
432 case 1:
433 if (is_user)
434 return 1;
435 *prot = PAGE_READ | PAGE_WRITE;
436 break;
437 case 2:
438 *prot = PAGE_READ;
439 if (!is_user)
440 *prot |= PAGE_WRITE;
441 break;
442 case 3:
443 *prot = PAGE_READ | PAGE_WRITE;
444 break;
445 case 5:
446 if (is_user)
447 return 1;
448 *prot = PAGE_READ;
449 break;
450 case 6:
451 *prot = PAGE_READ;
452 break;
453 default:
454 /* Bad permission. */
455 return 1;
457 } else {
458 /* Pagetable walk. */
459 /* Lookup l1 descriptor. */
460 table = (env->cp15.c2_base & 0xffffc000) | ((address >> 18) & 0x3ffc);
461 desc = ldl_phys(table);
462 type = (desc & 3);
463 domain = (env->cp15.c3 >> ((desc >> 4) & 0x1e)) & 3;
464 if (type == 0) {
465 /* Secton translation fault. */
466 code = 5;
467 goto do_fault;
469 if (domain == 0 || domain == 2) {
470 if (type == 2)
471 code = 9; /* Section domain fault. */
472 else
473 code = 11; /* Page domain fault. */
474 goto do_fault;
476 if (type == 2) {
477 /* 1Mb section. */
478 phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
479 ap = (desc >> 10) & 3;
480 code = 13;
481 } else {
482 /* Lookup l2 entry. */
483 if (type == 1) {
484 /* Coarse pagetable. */
485 table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
486 } else {
487 /* Fine pagetable. */
488 table = (desc & 0xfffff000) | ((address >> 8) & 0xffc);
490 desc = ldl_phys(table);
491 switch (desc & 3) {
492 case 0: /* Page translation fault. */
493 code = 7;
494 goto do_fault;
495 case 1: /* 64k page. */
496 phys_addr = (desc & 0xffff0000) | (address & 0xffff);
497 ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
498 break;
499 case 2: /* 4k page. */
500 phys_addr = (desc & 0xfffff000) | (address & 0xfff);
501 ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
502 break;
503 case 3: /* 1k page. */
504 if (arm_feature(env, ARM_FEATURE_XSCALE))
505 phys_addr = (desc & 0xfffff000) | (address & 0xfff);
506 else {
507 if (type == 1) {
508 /* Page translation fault. */
509 code = 7;
510 goto do_fault;
512 phys_addr = (desc & 0xfffffc00) | (address & 0x3ff);
514 ap = (desc >> 4) & 3;
515 break;
516 default:
517 /* Never happens, but compiler isn't smart enough to tell. */
518 abort();
520 code = 15;
522 *prot = check_ap(env, ap, domain, access_type, is_user);
523 if (!*prot) {
524 /* Access permission fault. */
525 goto do_fault;
527 *phys_ptr = phys_addr;
529 return 0;
530 do_fault:
531 return code | (domain << 4);
534 int cpu_arm_handle_mmu_fault (CPUState *env, target_ulong address,
535 int access_type, int is_user, int is_softmmu)
537 uint32_t phys_addr;
538 int prot;
539 int ret;
541 ret = get_phys_addr(env, address, access_type, is_user, &phys_addr, &prot);
542 if (ret == 0) {
543 /* Map a single [sub]page. */
544 phys_addr &= ~(uint32_t)0x3ff;
545 address &= ~(uint32_t)0x3ff;
546 return tlb_set_page (env, address, phys_addr, prot, is_user,
547 is_softmmu);
550 if (access_type == 2) {
551 env->cp15.c5_insn = ret;
552 env->cp15.c6_insn = address;
553 env->exception_index = EXCP_PREFETCH_ABORT;
554 } else {
555 env->cp15.c5_data = ret;
556 env->cp15.c6_data = address;
557 env->exception_index = EXCP_DATA_ABORT;
559 return 1;
562 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
564 uint32_t phys_addr;
565 int prot;
566 int ret;
568 ret = get_phys_addr(env, addr, 0, 0, &phys_addr, &prot);
570 if (ret != 0)
571 return -1;
573 return phys_addr;
576 void helper_set_cp(CPUState *env, uint32_t insn, uint32_t val)
578 int cp_num = (insn >> 8) & 0xf;
579 int cp_info = (insn >> 5) & 7;
580 int src = (insn >> 16) & 0xf;
581 int operand = insn & 0xf;
583 if (env->cp[cp_num].cp_write)
584 env->cp[cp_num].cp_write(env->cp[cp_num].opaque,
585 cp_info, src, operand, val);
588 uint32_t helper_get_cp(CPUState *env, uint32_t insn)
590 int cp_num = (insn >> 8) & 0xf;
591 int cp_info = (insn >> 5) & 7;
592 int dest = (insn >> 16) & 0xf;
593 int operand = insn & 0xf;
595 if (env->cp[cp_num].cp_read)
596 return env->cp[cp_num].cp_read(env->cp[cp_num].opaque,
597 cp_info, dest, operand);
598 return 0;
601 /* Return basic MPU access permission bits. */
602 static uint32_t simple_mpu_ap_bits(uint32_t val)
604 uint32_t ret;
605 uint32_t mask;
606 int i;
607 ret = 0;
608 mask = 3;
609 for (i = 0; i < 16; i += 2) {
610 ret |= (val >> i) & mask;
611 mask <<= 2;
613 return ret;
616 /* Pad basic MPU access permission bits to extended format. */
617 static uint32_t extended_mpu_ap_bits(uint32_t val)
619 uint32_t ret;
620 uint32_t mask;
621 int i;
622 ret = 0;
623 mask = 3;
624 for (i = 0; i < 16; i += 2) {
625 ret |= (val & mask) << i;
626 mask <<= 2;
628 return ret;
631 void helper_set_cp15(CPUState *env, uint32_t insn, uint32_t val)
633 uint32_t op2;
634 uint32_t crm;
636 op2 = (insn >> 5) & 7;
637 crm = insn & 0xf;
638 switch ((insn >> 16) & 0xf) {
639 case 0: /* ID codes. */
640 goto bad_reg;
641 case 1: /* System configuration. */
642 switch (op2) {
643 case 0:
644 if (!arm_feature(env, ARM_FEATURE_XSCALE) || crm == 0)
645 env->cp15.c1_sys = val;
646 /* ??? Lots of these bits are not implemented. */
647 /* This may enable/disable the MMU, so do a TLB flush. */
648 tlb_flush(env, 1);
649 break;
650 case 1:
651 /* XScale doesn't implement AUX CR (P-Bit) but allows
652 * writing with zero and reading. */
653 if (arm_feature(env, ARM_FEATURE_XSCALE))
654 break;
655 goto bad_reg;
656 case 2:
657 env->cp15.c1_coproc = val;
658 /* ??? Is this safe when called from within a TB? */
659 tb_flush(env);
660 break;
661 default:
662 goto bad_reg;
664 break;
665 case 2: /* MMU Page table control / MPU cache control. */
666 if (arm_feature(env, ARM_FEATURE_MPU)) {
667 switch (op2) {
668 case 0:
669 env->cp15.c2_data = val;
670 break;
671 case 1:
672 env->cp15.c2_insn = val;
673 break;
674 default:
675 goto bad_reg;
677 } else {
678 env->cp15.c2_base = val;
680 break;
681 case 3: /* MMU Domain access control / MPU write buffer control. */
682 env->cp15.c3 = val;
683 break;
684 case 4: /* Reserved. */
685 goto bad_reg;
686 case 5: /* MMU Fault status / MPU access permission. */
687 switch (op2) {
688 case 0:
689 if (arm_feature(env, ARM_FEATURE_MPU))
690 val = extended_mpu_ap_bits(val);
691 env->cp15.c5_data = val;
692 break;
693 case 1:
694 if (arm_feature(env, ARM_FEATURE_MPU))
695 val = extended_mpu_ap_bits(val);
696 env->cp15.c5_insn = val;
697 break;
698 case 2:
699 if (!arm_feature(env, ARM_FEATURE_MPU))
700 goto bad_reg;
701 env->cp15.c5_data = val;
702 break;
703 case 3:
704 if (!arm_feature(env, ARM_FEATURE_MPU))
705 goto bad_reg;
706 env->cp15.c5_insn = val;
707 break;
708 default:
709 goto bad_reg;
711 break;
712 case 6: /* MMU Fault address / MPU base/size. */
713 if (arm_feature(env, ARM_FEATURE_MPU)) {
714 if (crm >= 8)
715 goto bad_reg;
716 env->cp15.c6_region[crm] = val;
717 } else {
718 switch (op2) {
719 case 0:
720 env->cp15.c6_data = val;
721 break;
722 case 1:
723 env->cp15.c6_insn = val;
724 break;
725 default:
726 goto bad_reg;
729 break;
730 case 7: /* Cache control. */
731 /* No cache, so nothing to do. */
732 break;
733 case 8: /* MMU TLB control. */
734 switch (op2) {
735 case 0: /* Invalidate all. */
736 tlb_flush(env, 0);
737 break;
738 case 1: /* Invalidate single TLB entry. */
739 #if 0
740 /* ??? This is wrong for large pages and sections. */
741 /* As an ugly hack to make linux work we always flush a 4K
742 pages. */
743 val &= 0xfffff000;
744 tlb_flush_page(env, val);
745 tlb_flush_page(env, val + 0x400);
746 tlb_flush_page(env, val + 0x800);
747 tlb_flush_page(env, val + 0xc00);
748 #else
749 tlb_flush(env, 1);
750 #endif
751 break;
752 default:
753 goto bad_reg;
755 break;
756 case 9:
757 switch (crm) {
758 case 0: /* Cache lockdown. */
759 switch (op2) {
760 case 0:
761 env->cp15.c9_data = val;
762 break;
763 case 1:
764 env->cp15.c9_insn = val;
765 break;
766 default:
767 goto bad_reg;
769 break;
770 case 1: /* TCM memory region registers. */
771 /* Not implemented. */
772 goto bad_reg;
773 default:
774 goto bad_reg;
776 break;
777 case 10: /* MMU TLB lockdown. */
778 /* ??? TLB lockdown not implemented. */
779 break;
780 case 12: /* Reserved. */
781 goto bad_reg;
782 case 13: /* Process ID. */
783 switch (op2) {
784 case 0:
785 if (!arm_feature(env, ARM_FEATURE_MPU))
786 goto bad_reg;
787 /* Unlike real hardware the qemu TLB uses virtual addresses,
788 not modified virtual addresses, so this causes a TLB flush.
790 if (env->cp15.c13_fcse != val)
791 tlb_flush(env, 1);
792 env->cp15.c13_fcse = val;
793 break;
794 case 1:
795 /* This changes the ASID, so do a TLB flush. */
796 if (env->cp15.c13_context != val
797 && !arm_feature(env, ARM_FEATURE_MPU))
798 tlb_flush(env, 0);
799 env->cp15.c13_context = val;
800 break;
801 default:
802 goto bad_reg;
804 break;
805 case 14: /* Reserved. */
806 goto bad_reg;
807 case 15: /* Implementation specific. */
808 if (arm_feature(env, ARM_FEATURE_XSCALE)) {
809 if (op2 == 0 && crm == 1) {
810 /* Changes cp0 to cp13 behavior, so needs a TB flush. */
811 tb_flush(env);
812 env->cp15.c15_cpar = (val & 0x3fff) | 2;
813 break;
815 goto bad_reg;
817 break;
819 return;
820 bad_reg:
821 /* ??? For debugging only. Should raise illegal instruction exception. */
822 cpu_abort(env, "Unimplemented cp15 register write\n");
825 uint32_t helper_get_cp15(CPUState *env, uint32_t insn)
827 uint32_t op2;
829 op2 = (insn >> 5) & 7;
830 switch ((insn >> 16) & 0xf) {
831 case 0: /* ID codes. */
832 switch (op2) {
833 default: /* Device ID. */
834 return env->cp15.c0_cpuid;
835 case 1: /* Cache Type. */
836 return env->cp15.c0_cachetype;
837 case 2: /* TCM status. */
838 return 0;
840 case 1: /* System configuration. */
841 switch (op2) {
842 case 0: /* Control register. */
843 return env->cp15.c1_sys;
844 case 1: /* Auxiliary control register. */
845 if (arm_feature(env, ARM_FEATURE_AUXCR))
846 return 1;
847 if (arm_feature(env, ARM_FEATURE_XSCALE))
848 return 0;
849 goto bad_reg;
850 case 2: /* Coprocessor access register. */
851 return env->cp15.c1_coproc;
852 default:
853 goto bad_reg;
855 case 2: /* MMU Page table control / MPU cache control. */
856 if (arm_feature(env, ARM_FEATURE_MPU)) {
857 switch (op2) {
858 case 0:
859 return env->cp15.c2_data;
860 break;
861 case 1:
862 return env->cp15.c2_insn;
863 break;
864 default:
865 goto bad_reg;
867 } else {
868 return env->cp15.c2_base;
870 case 3: /* MMU Domain access control / MPU write buffer control. */
871 return env->cp15.c3;
872 case 4: /* Reserved. */
873 goto bad_reg;
874 case 5: /* MMU Fault status / MPU access permission. */
875 switch (op2) {
876 case 0:
877 if (arm_feature(env, ARM_FEATURE_MPU))
878 return simple_mpu_ap_bits(env->cp15.c5_data);
879 return env->cp15.c5_data;
880 case 1:
881 if (arm_feature(env, ARM_FEATURE_MPU))
882 return simple_mpu_ap_bits(env->cp15.c5_data);
883 return env->cp15.c5_insn;
884 case 2:
885 if (!arm_feature(env, ARM_FEATURE_MPU))
886 goto bad_reg;
887 return env->cp15.c5_data;
888 case 3:
889 if (!arm_feature(env, ARM_FEATURE_MPU))
890 goto bad_reg;
891 return env->cp15.c5_insn;
892 default:
893 goto bad_reg;
895 case 6: /* MMU Fault address / MPU base/size. */
896 if (arm_feature(env, ARM_FEATURE_MPU)) {
897 int n;
898 n = (insn & 0xf);
899 if (n >= 8)
900 goto bad_reg;
901 return env->cp15.c6_region[n];
902 } else {
903 switch (op2) {
904 case 0:
905 return env->cp15.c6_data;
906 case 1:
907 /* Arm9 doesn't have an IFAR, but implementing it anyway
908 shouldn't do any harm. */
909 return env->cp15.c6_insn;
910 default:
911 goto bad_reg;
914 case 7: /* Cache control. */
915 /* ??? This is for test, clean and invaidate operations that set the
916 Z flag. We can't represent N = Z = 1, so it also clears
917 the N flag. Oh well. */
918 env->NZF = 0;
919 return 0;
920 case 8: /* MMU TLB control. */
921 goto bad_reg;
922 case 9: /* Cache lockdown. */
923 switch (op2) {
924 case 0:
925 return env->cp15.c9_data;
926 case 1:
927 return env->cp15.c9_insn;
928 default:
929 goto bad_reg;
931 case 10: /* MMU TLB lockdown. */
932 /* ??? TLB lockdown not implemented. */
933 return 0;
934 case 11: /* TCM DMA control. */
935 case 12: /* Reserved. */
936 goto bad_reg;
937 case 13: /* Process ID. */
938 switch (op2) {
939 case 0:
940 return env->cp15.c13_fcse;
941 case 1:
942 return env->cp15.c13_context;
943 default:
944 goto bad_reg;
946 case 14: /* Reserved. */
947 goto bad_reg;
948 case 15: /* Implementation specific. */
949 if (arm_feature(env, ARM_FEATURE_XSCALE)) {
950 if (op2 == 0 && (insn & 0xf) == 1)
951 return env->cp15.c15_cpar;
953 goto bad_reg;
955 return 0;
957 bad_reg:
958 /* ??? For debugging only. Should raise illegal instruction exception. */
959 cpu_abort(env, "Unimplemented cp15 register read\n");
960 return 0;
963 void cpu_arm_set_cp_io(CPUARMState *env, int cpnum,
964 ARMReadCPFunc *cp_read, ARMWriteCPFunc *cp_write,
965 void *opaque)
967 if (cpnum < 0 || cpnum > 14) {
968 cpu_abort(env, "Bad coprocessor number: %i\n", cpnum);
969 return;
972 env->cp[cpnum].cp_read = cp_read;
973 env->cp[cpnum].cp_write = cp_write;
974 env->cp[cpnum].opaque = opaque;
977 #endif