trace: add "-trace enable=..."
[qemu/ar7.git] / target-mips / helper.c
blobf9c4c11eb93e526ea1dbf8eba2b998f85390ce86
1 /*
2 * MIPS emulation helpers for qemu.
4 * Copyright (c) 2004-2005 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "sysemu/kvm.h"
23 #include "exec/cpu_ldst.h"
25 enum {
26 TLBRET_XI = -6,
27 TLBRET_RI = -5,
28 TLBRET_DIRTY = -4,
29 TLBRET_INVALID = -3,
30 TLBRET_NOMATCH = -2,
31 TLBRET_BADADDR = -1,
32 TLBRET_MATCH = 0
35 #if !defined(CONFIG_USER_ONLY)
37 /* no MMU emulation */
38 int no_mmu_map_address (CPUMIPSState *env, hwaddr *physical, int *prot,
39 target_ulong address, int rw, int access_type)
41 *physical = address;
42 *prot = PAGE_READ | PAGE_WRITE;
43 return TLBRET_MATCH;
46 /* fixed mapping MMU emulation */
47 int fixed_mmu_map_address (CPUMIPSState *env, hwaddr *physical, int *prot,
48 target_ulong address, int rw, int access_type)
50 if (address <= (int32_t)0x7FFFFFFFUL) {
51 if (!(env->CP0_Status & (1 << CP0St_ERL)))
52 *physical = address + 0x40000000UL;
53 else
54 *physical = address;
55 } else if (address <= (int32_t)0xBFFFFFFFUL)
56 *physical = address & 0x1FFFFFFF;
57 else
58 *physical = address;
60 *prot = PAGE_READ | PAGE_WRITE;
61 return TLBRET_MATCH;
64 /* MIPS32/MIPS64 R4000-style MMU emulation */
65 int r4k_map_address (CPUMIPSState *env, hwaddr *physical, int *prot,
66 target_ulong address, int rw, int access_type)
68 uint8_t ASID = env->CP0_EntryHi & 0xFF;
69 int i;
71 for (i = 0; i < env->tlb->tlb_in_use; i++) {
72 r4k_tlb_t *tlb = &env->tlb->mmu.r4k.tlb[i];
73 /* 1k pages are not supported. */
74 target_ulong mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
75 target_ulong tag = address & ~mask;
76 target_ulong VPN = tlb->VPN & ~mask;
77 #if defined(TARGET_MIPS64)
78 tag &= env->SEGMask;
79 #endif
81 /* Check ASID, virtual page number & size */
82 if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag && !tlb->EHINV) {
83 /* TLB match */
84 int n = !!(address & mask & ~(mask >> 1));
85 /* Check access rights */
86 if (!(n ? tlb->V1 : tlb->V0)) {
87 return TLBRET_INVALID;
89 if (rw == MMU_INST_FETCH && (n ? tlb->XI1 : tlb->XI0)) {
90 return TLBRET_XI;
92 if (rw == MMU_DATA_LOAD && (n ? tlb->RI1 : tlb->RI0)) {
93 return TLBRET_RI;
95 if (rw != MMU_DATA_STORE || (n ? tlb->D1 : tlb->D0)) {
96 *physical = tlb->PFN[n] | (address & (mask >> 1));
97 *prot = PAGE_READ;
98 if (n ? tlb->D1 : tlb->D0)
99 *prot |= PAGE_WRITE;
100 return TLBRET_MATCH;
102 return TLBRET_DIRTY;
105 return TLBRET_NOMATCH;
108 static int get_physical_address (CPUMIPSState *env, hwaddr *physical,
109 int *prot, target_ulong real_address,
110 int rw, int access_type)
112 /* User mode can only access useg/xuseg */
113 int user_mode = (env->hflags & MIPS_HFLAG_MODE) == MIPS_HFLAG_UM;
114 int supervisor_mode = (env->hflags & MIPS_HFLAG_MODE) == MIPS_HFLAG_SM;
115 int kernel_mode = !user_mode && !supervisor_mode;
116 #if defined(TARGET_MIPS64)
117 int UX = (env->CP0_Status & (1 << CP0St_UX)) != 0;
118 int SX = (env->CP0_Status & (1 << CP0St_SX)) != 0;
119 int KX = (env->CP0_Status & (1 << CP0St_KX)) != 0;
120 #endif
121 int ret = TLBRET_MATCH;
122 /* effective address (modified for KVM T&E kernel segments) */
123 target_ulong address = real_address;
125 #define USEG_LIMIT 0x7FFFFFFFUL
126 #define KSEG0_BASE 0x80000000UL
127 #define KSEG1_BASE 0xA0000000UL
128 #define KSEG2_BASE 0xC0000000UL
129 #define KSEG3_BASE 0xE0000000UL
131 #define KVM_KSEG0_BASE 0x40000000UL
132 #define KVM_KSEG2_BASE 0x60000000UL
134 if (kvm_enabled()) {
135 /* KVM T&E adds guest kernel segments in useg */
136 if (real_address >= KVM_KSEG0_BASE) {
137 if (real_address < KVM_KSEG2_BASE) {
138 /* kseg0 */
139 address += KSEG0_BASE - KVM_KSEG0_BASE;
140 } else if (real_address <= USEG_LIMIT) {
141 /* kseg2/3 */
142 address += KSEG2_BASE - KVM_KSEG2_BASE;
147 if (address <= USEG_LIMIT) {
148 /* useg */
149 if (env->CP0_Status & (1 << CP0St_ERL)) {
150 *physical = address & 0xFFFFFFFF;
151 *prot = PAGE_READ | PAGE_WRITE;
152 } else {
153 ret = env->tlb->map_address(env, physical, prot, real_address, rw, access_type);
155 #if defined(TARGET_MIPS64)
156 } else if (address < 0x4000000000000000ULL) {
157 /* xuseg */
158 if (UX && address <= (0x3FFFFFFFFFFFFFFFULL & env->SEGMask)) {
159 ret = env->tlb->map_address(env, physical, prot, real_address, rw, access_type);
160 } else {
161 ret = TLBRET_BADADDR;
163 } else if (address < 0x8000000000000000ULL) {
164 /* xsseg */
165 if ((supervisor_mode || kernel_mode) &&
166 SX && address <= (0x7FFFFFFFFFFFFFFFULL & env->SEGMask)) {
167 ret = env->tlb->map_address(env, physical, prot, real_address, rw, access_type);
168 } else {
169 ret = TLBRET_BADADDR;
171 } else if (address < 0xC000000000000000ULL) {
172 /* xkphys */
173 if (kernel_mode && KX &&
174 (address & 0x07FFFFFFFFFFFFFFULL) <= env->PAMask) {
175 *physical = address & env->PAMask;
176 *prot = PAGE_READ | PAGE_WRITE;
177 } else {
178 ret = TLBRET_BADADDR;
180 } else if (address < 0xFFFFFFFF80000000ULL) {
181 /* xkseg */
182 if (kernel_mode && KX &&
183 address <= (0xFFFFFFFF7FFFFFFFULL & env->SEGMask)) {
184 ret = env->tlb->map_address(env, physical, prot, real_address, rw, access_type);
185 } else {
186 ret = TLBRET_BADADDR;
188 #endif
189 } else if (address < (int32_t)KSEG1_BASE) {
190 /* kseg0 */
191 if (kernel_mode) {
192 *physical = address - (int32_t)KSEG0_BASE;
193 *prot = PAGE_READ | PAGE_WRITE;
194 } else {
195 ret = TLBRET_BADADDR;
197 } else if (address < (int32_t)KSEG2_BASE) {
198 /* kseg1 */
199 if (kernel_mode) {
200 *physical = address - (int32_t)KSEG1_BASE;
201 *prot = PAGE_READ | PAGE_WRITE;
202 } else {
203 ret = TLBRET_BADADDR;
205 } else if (address < (int32_t)KSEG3_BASE) {
206 /* sseg (kseg2) */
207 if (supervisor_mode || kernel_mode) {
208 ret = env->tlb->map_address(env, physical, prot, real_address, rw, access_type);
209 } else {
210 ret = TLBRET_BADADDR;
212 } else {
213 /* kseg3 */
214 /* XXX: debug segment is not emulated */
215 if (kernel_mode) {
216 ret = env->tlb->map_address(env, physical, prot, real_address, rw, access_type);
217 } else {
218 ret = TLBRET_BADADDR;
221 return ret;
223 #endif
225 static void raise_mmu_exception(CPUMIPSState *env, target_ulong address,
226 int rw, int tlb_error)
228 CPUState *cs = CPU(mips_env_get_cpu(env));
229 int exception = 0, error_code = 0;
231 if (rw == MMU_INST_FETCH) {
232 error_code |= EXCP_INST_NOTAVAIL;
235 switch (tlb_error) {
236 default:
237 case TLBRET_BADADDR:
238 /* Reference to kernel address from user mode or supervisor mode */
239 /* Reference to supervisor address from user mode */
240 if (rw == MMU_DATA_STORE) {
241 exception = EXCP_AdES;
242 } else {
243 exception = EXCP_AdEL;
245 break;
246 case TLBRET_NOMATCH:
247 /* No TLB match for a mapped address */
248 if (rw == MMU_DATA_STORE) {
249 exception = EXCP_TLBS;
250 } else {
251 exception = EXCP_TLBL;
253 error_code |= EXCP_TLB_NOMATCH;
254 break;
255 case TLBRET_INVALID:
256 /* TLB match with no valid bit */
257 if (rw == MMU_DATA_STORE) {
258 exception = EXCP_TLBS;
259 } else {
260 exception = EXCP_TLBL;
262 break;
263 case TLBRET_DIRTY:
264 /* TLB match but 'D' bit is cleared */
265 exception = EXCP_LTLBL;
266 break;
267 case TLBRET_XI:
268 /* Execute-Inhibit Exception */
269 if (env->CP0_PageGrain & (1 << CP0PG_IEC)) {
270 exception = EXCP_TLBXI;
271 } else {
272 exception = EXCP_TLBL;
274 break;
275 case TLBRET_RI:
276 /* Read-Inhibit Exception */
277 if (env->CP0_PageGrain & (1 << CP0PG_IEC)) {
278 exception = EXCP_TLBRI;
279 } else {
280 exception = EXCP_TLBL;
282 break;
284 /* Raise exception */
285 env->CP0_BadVAddr = address;
286 env->CP0_Context = (env->CP0_Context & ~0x007fffff) |
287 ((address >> 9) & 0x007ffff0);
288 env->CP0_EntryHi =
289 (env->CP0_EntryHi & 0xFF) | (address & (TARGET_PAGE_MASK << 1));
290 #if defined(TARGET_MIPS64)
291 env->CP0_EntryHi &= env->SEGMask;
292 env->CP0_XContext =
293 /* PTEBase */ (env->CP0_XContext & ((~0ULL) << (env->SEGBITS - 7))) |
294 /* R */ (extract64(address, 62, 2) << (env->SEGBITS - 9)) |
295 /* BadVPN2 */ (extract64(address, 13, env->SEGBITS - 13) << 4);
296 #endif
297 cs->exception_index = exception;
298 env->error_code = error_code;
301 #if !defined(CONFIG_USER_ONLY)
302 hwaddr mips_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
304 MIPSCPU *cpu = MIPS_CPU(cs);
305 hwaddr phys_addr;
306 int prot;
308 if (get_physical_address(&cpu->env, &phys_addr, &prot, addr, 0,
309 ACCESS_INT) != 0) {
310 return -1;
312 return phys_addr;
314 #endif
316 int mips_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
317 int mmu_idx)
319 MIPSCPU *cpu = MIPS_CPU(cs);
320 CPUMIPSState *env = &cpu->env;
321 #if !defined(CONFIG_USER_ONLY)
322 hwaddr physical;
323 int prot;
324 int access_type;
325 #endif
326 int ret = 0;
328 #if 0
329 log_cpu_state(cs, 0);
330 #endif
331 qemu_log_mask(CPU_LOG_MMU,
332 "%s pc " TARGET_FMT_lx " ad %" VADDR_PRIx " rw %d mmu_idx %d\n",
333 __func__, env->active_tc.PC, address, rw, mmu_idx);
335 /* data access */
336 #if !defined(CONFIG_USER_ONLY)
337 /* XXX: put correct access by using cpu_restore_state()
338 correctly */
339 access_type = ACCESS_INT;
340 ret = get_physical_address(env, &physical, &prot,
341 address, rw, access_type);
342 qemu_log_mask(CPU_LOG_MMU,
343 "%s address=%" VADDR_PRIx " ret %d physical " TARGET_FMT_plx
344 " prot %d\n",
345 __func__, address, ret, physical, prot);
346 if (ret == TLBRET_MATCH) {
347 tlb_set_page(cs, address & TARGET_PAGE_MASK,
348 physical & TARGET_PAGE_MASK, prot | PAGE_EXEC,
349 mmu_idx, TARGET_PAGE_SIZE);
350 ret = 0;
351 } else if (ret < 0)
352 #endif
354 raise_mmu_exception(env, address, rw, ret);
355 ret = 1;
358 return ret;
361 #if !defined(CONFIG_USER_ONLY)
362 hwaddr cpu_mips_translate_address(CPUMIPSState *env, target_ulong address, int rw)
364 hwaddr physical;
365 int prot;
366 int access_type;
367 int ret = 0;
369 /* data access */
370 access_type = ACCESS_INT;
371 ret = get_physical_address(env, &physical, &prot,
372 address, rw, access_type);
373 if (ret != TLBRET_MATCH) {
374 raise_mmu_exception(env, address, rw, ret);
375 return -1LL;
376 } else {
377 return physical;
381 static const char * const excp_names[EXCP_LAST + 1] = {
382 [EXCP_RESET] = "reset",
383 [EXCP_SRESET] = "soft reset",
384 [EXCP_DSS] = "debug single step",
385 [EXCP_DINT] = "debug interrupt",
386 [EXCP_NMI] = "non-maskable interrupt",
387 [EXCP_MCHECK] = "machine check",
388 [EXCP_EXT_INTERRUPT] = "interrupt",
389 [EXCP_DFWATCH] = "deferred watchpoint",
390 [EXCP_DIB] = "debug instruction breakpoint",
391 [EXCP_IWATCH] = "instruction fetch watchpoint",
392 [EXCP_AdEL] = "address error load",
393 [EXCP_AdES] = "address error store",
394 [EXCP_TLBF] = "TLB refill",
395 [EXCP_IBE] = "instruction bus error",
396 [EXCP_DBp] = "debug breakpoint",
397 [EXCP_SYSCALL] = "syscall",
398 [EXCP_BREAK] = "break",
399 [EXCP_CpU] = "coprocessor unusable",
400 [EXCP_RI] = "reserved instruction",
401 [EXCP_OVERFLOW] = "arithmetic overflow",
402 [EXCP_TRAP] = "trap",
403 [EXCP_FPE] = "floating point",
404 [EXCP_DDBS] = "debug data break store",
405 [EXCP_DWATCH] = "data watchpoint",
406 [EXCP_LTLBL] = "TLB modify",
407 [EXCP_TLBL] = "TLB load",
408 [EXCP_TLBS] = "TLB store",
409 [EXCP_DBE] = "data bus error",
410 [EXCP_DDBL] = "debug data break load",
411 [EXCP_THREAD] = "thread",
412 [EXCP_MDMX] = "MDMX",
413 [EXCP_C2E] = "precise coprocessor 2",
414 [EXCP_CACHE] = "cache error",
415 [EXCP_TLBXI] = "TLB execute-inhibit",
416 [EXCP_TLBRI] = "TLB read-inhibit",
417 [EXCP_MSADIS] = "MSA disabled",
418 [EXCP_MSAFPE] = "MSA floating point",
420 #endif
422 target_ulong exception_resume_pc (CPUMIPSState *env)
424 target_ulong bad_pc;
425 target_ulong isa_mode;
427 isa_mode = !!(env->hflags & MIPS_HFLAG_M16);
428 bad_pc = env->active_tc.PC | isa_mode;
429 if (env->hflags & MIPS_HFLAG_BMASK) {
430 /* If the exception was raised from a delay slot, come back to
431 the jump. */
432 bad_pc -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4);
435 return bad_pc;
438 #if !defined(CONFIG_USER_ONLY)
439 static void set_hflags_for_handler (CPUMIPSState *env)
441 /* Exception handlers are entered in 32-bit mode. */
442 env->hflags &= ~(MIPS_HFLAG_M16);
443 /* ...except that microMIPS lets you choose. */
444 if (env->insn_flags & ASE_MICROMIPS) {
445 env->hflags |= (!!(env->CP0_Config3
446 & (1 << CP0C3_ISA_ON_EXC))
447 << MIPS_HFLAG_M16_SHIFT);
451 static inline void set_badinstr_registers(CPUMIPSState *env)
453 if (env->hflags & MIPS_HFLAG_M16) {
454 /* TODO: add BadInstr support for microMIPS */
455 return;
457 if (env->CP0_Config3 & (1 << CP0C3_BI)) {
458 env->CP0_BadInstr = cpu_ldl_code(env, env->active_tc.PC);
460 if ((env->CP0_Config3 & (1 << CP0C3_BP)) &&
461 (env->hflags & MIPS_HFLAG_BMASK)) {
462 env->CP0_BadInstrP = cpu_ldl_code(env, env->active_tc.PC - 4);
465 #endif
467 void mips_cpu_do_interrupt(CPUState *cs)
469 #if !defined(CONFIG_USER_ONLY)
470 MIPSCPU *cpu = MIPS_CPU(cs);
471 CPUMIPSState *env = &cpu->env;
472 bool update_badinstr = 0;
473 target_ulong offset;
474 int cause = -1;
475 const char *name;
477 if (qemu_loglevel_mask(CPU_LOG_INT)
478 && cs->exception_index != EXCP_EXT_INTERRUPT) {
479 if (cs->exception_index < 0 || cs->exception_index > EXCP_LAST) {
480 name = "unknown";
481 } else {
482 name = excp_names[cs->exception_index];
485 qemu_log("%s enter: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx
486 " %s exception\n",
487 __func__, env->active_tc.PC, env->CP0_EPC, name);
489 if (cs->exception_index == EXCP_EXT_INTERRUPT &&
490 (env->hflags & MIPS_HFLAG_DM)) {
491 cs->exception_index = EXCP_DINT;
493 offset = 0x180;
494 switch (cs->exception_index) {
495 case EXCP_DSS:
496 env->CP0_Debug |= 1 << CP0DB_DSS;
497 /* Debug single step cannot be raised inside a delay slot and
498 resume will always occur on the next instruction
499 (but we assume the pc has always been updated during
500 code translation). */
501 env->CP0_DEPC = env->active_tc.PC | !!(env->hflags & MIPS_HFLAG_M16);
502 goto enter_debug_mode;
503 case EXCP_DINT:
504 env->CP0_Debug |= 1 << CP0DB_DINT;
505 goto set_DEPC;
506 case EXCP_DIB:
507 env->CP0_Debug |= 1 << CP0DB_DIB;
508 goto set_DEPC;
509 case EXCP_DBp:
510 env->CP0_Debug |= 1 << CP0DB_DBp;
511 goto set_DEPC;
512 case EXCP_DDBS:
513 env->CP0_Debug |= 1 << CP0DB_DDBS;
514 goto set_DEPC;
515 case EXCP_DDBL:
516 env->CP0_Debug |= 1 << CP0DB_DDBL;
517 set_DEPC:
518 env->CP0_DEPC = exception_resume_pc(env);
519 env->hflags &= ~MIPS_HFLAG_BMASK;
520 enter_debug_mode:
521 if (env->insn_flags & ISA_MIPS3) {
522 env->hflags |= MIPS_HFLAG_64;
523 if (!(env->insn_flags & ISA_MIPS64R6) ||
524 env->CP0_Status & (1 << CP0St_KX)) {
525 env->hflags &= ~MIPS_HFLAG_AWRAP;
528 env->hflags |= MIPS_HFLAG_DM | MIPS_HFLAG_CP0;
529 env->hflags &= ~(MIPS_HFLAG_KSU);
530 /* EJTAG probe trap enable is not implemented... */
531 if (!(env->CP0_Status & (1 << CP0St_EXL)))
532 env->CP0_Cause &= ~(1U << CP0Ca_BD);
533 env->active_tc.PC = (int32_t)0xBFC00480;
534 set_hflags_for_handler(env);
535 break;
536 case EXCP_RESET:
537 cpu_reset(CPU(cpu));
538 break;
539 case EXCP_SRESET:
540 env->CP0_Status |= (1 << CP0St_SR);
541 memset(env->CP0_WatchLo, 0, sizeof(*env->CP0_WatchLo));
542 goto set_error_EPC;
543 case EXCP_NMI:
544 env->CP0_Status |= (1 << CP0St_NMI);
545 set_error_EPC:
546 env->CP0_ErrorEPC = exception_resume_pc(env);
547 env->hflags &= ~MIPS_HFLAG_BMASK;
548 env->CP0_Status |= (1 << CP0St_ERL) | (1 << CP0St_BEV);
549 if (env->insn_flags & ISA_MIPS3) {
550 env->hflags |= MIPS_HFLAG_64;
551 if (!(env->insn_flags & ISA_MIPS64R6) ||
552 env->CP0_Status & (1 << CP0St_KX)) {
553 env->hflags &= ~MIPS_HFLAG_AWRAP;
556 env->hflags |= MIPS_HFLAG_CP0;
557 env->hflags &= ~(MIPS_HFLAG_KSU);
558 if (!(env->CP0_Status & (1 << CP0St_EXL)))
559 env->CP0_Cause &= ~(1U << CP0Ca_BD);
560 env->active_tc.PC = (int32_t)0xBFC00000;
561 set_hflags_for_handler(env);
562 break;
563 case EXCP_EXT_INTERRUPT:
564 cause = 0;
565 if (env->CP0_Cause & (1 << CP0Ca_IV)) {
566 uint32_t spacing = (env->CP0_IntCtl >> CP0IntCtl_VS) & 0x1f;
568 if ((env->CP0_Status & (1 << CP0St_BEV)) || spacing == 0) {
569 offset = 0x200;
570 } else {
571 uint32_t vector = 0;
572 uint32_t pending = (env->CP0_Cause & CP0Ca_IP_mask) >> CP0Ca_IP;
574 if (env->CP0_Config3 & (1 << CP0C3_VEIC)) {
575 /* For VEIC mode, the external interrupt controller feeds
576 * the vector through the CP0Cause IP lines. */
577 vector = pending;
578 } else {
579 /* Vectored Interrupts
580 * Mask with Status.IM7-IM0 to get enabled interrupts. */
581 pending &= (env->CP0_Status >> CP0St_IM) & 0xff;
582 /* Find the highest-priority interrupt. */
583 while (pending >>= 1) {
584 vector++;
587 offset = 0x200 + (vector * (spacing << 5));
590 goto set_EPC;
591 case EXCP_LTLBL:
592 cause = 1;
593 update_badinstr = !(env->error_code & EXCP_INST_NOTAVAIL);
594 goto set_EPC;
595 case EXCP_TLBL:
596 cause = 2;
597 update_badinstr = !(env->error_code & EXCP_INST_NOTAVAIL);
598 if ((env->error_code & EXCP_TLB_NOMATCH) &&
599 !(env->CP0_Status & (1 << CP0St_EXL))) {
600 #if defined(TARGET_MIPS64)
601 int R = env->CP0_BadVAddr >> 62;
602 int UX = (env->CP0_Status & (1 << CP0St_UX)) != 0;
603 int SX = (env->CP0_Status & (1 << CP0St_SX)) != 0;
604 int KX = (env->CP0_Status & (1 << CP0St_KX)) != 0;
606 if (((R == 0 && UX) || (R == 1 && SX) || (R == 3 && KX)) &&
607 (!(env->insn_flags & (INSN_LOONGSON2E | INSN_LOONGSON2F))))
608 offset = 0x080;
609 else
610 #endif
611 offset = 0x000;
613 goto set_EPC;
614 case EXCP_TLBS:
615 cause = 3;
616 update_badinstr = 1;
617 if ((env->error_code & EXCP_TLB_NOMATCH) &&
618 !(env->CP0_Status & (1 << CP0St_EXL))) {
619 #if defined(TARGET_MIPS64)
620 int R = env->CP0_BadVAddr >> 62;
621 int UX = (env->CP0_Status & (1 << CP0St_UX)) != 0;
622 int SX = (env->CP0_Status & (1 << CP0St_SX)) != 0;
623 int KX = (env->CP0_Status & (1 << CP0St_KX)) != 0;
625 if (((R == 0 && UX) || (R == 1 && SX) || (R == 3 && KX)) &&
626 (!(env->insn_flags & (INSN_LOONGSON2E | INSN_LOONGSON2F))))
627 offset = 0x080;
628 else
629 #endif
630 offset = 0x000;
632 goto set_EPC;
633 case EXCP_AdEL:
634 cause = 4;
635 update_badinstr = !(env->error_code & EXCP_INST_NOTAVAIL);
636 goto set_EPC;
637 case EXCP_AdES:
638 cause = 5;
639 update_badinstr = 1;
640 goto set_EPC;
641 case EXCP_IBE:
642 cause = 6;
643 goto set_EPC;
644 case EXCP_DBE:
645 cause = 7;
646 goto set_EPC;
647 case EXCP_SYSCALL:
648 cause = 8;
649 update_badinstr = 1;
650 goto set_EPC;
651 case EXCP_BREAK:
652 cause = 9;
653 update_badinstr = 1;
654 goto set_EPC;
655 case EXCP_RI:
656 cause = 10;
657 update_badinstr = 1;
658 goto set_EPC;
659 case EXCP_CpU:
660 cause = 11;
661 update_badinstr = 1;
662 env->CP0_Cause = (env->CP0_Cause & ~(0x3 << CP0Ca_CE)) |
663 (env->error_code << CP0Ca_CE);
664 goto set_EPC;
665 case EXCP_OVERFLOW:
666 cause = 12;
667 update_badinstr = 1;
668 goto set_EPC;
669 case EXCP_TRAP:
670 cause = 13;
671 update_badinstr = 1;
672 goto set_EPC;
673 case EXCP_MSAFPE:
674 cause = 14;
675 update_badinstr = 1;
676 goto set_EPC;
677 case EXCP_FPE:
678 cause = 15;
679 update_badinstr = 1;
680 goto set_EPC;
681 case EXCP_C2E:
682 cause = 18;
683 goto set_EPC;
684 case EXCP_TLBRI:
685 cause = 19;
686 update_badinstr = 1;
687 goto set_EPC;
688 case EXCP_TLBXI:
689 cause = 20;
690 goto set_EPC;
691 case EXCP_MSADIS:
692 cause = 21;
693 update_badinstr = 1;
694 goto set_EPC;
695 case EXCP_MDMX:
696 cause = 22;
697 goto set_EPC;
698 case EXCP_DWATCH:
699 cause = 23;
700 /* XXX: TODO: manage deferred watch exceptions */
701 goto set_EPC;
702 case EXCP_MCHECK:
703 cause = 24;
704 goto set_EPC;
705 case EXCP_THREAD:
706 cause = 25;
707 goto set_EPC;
708 case EXCP_DSPDIS:
709 cause = 26;
710 goto set_EPC;
711 case EXCP_CACHE:
712 cause = 30;
713 if (env->CP0_Status & (1 << CP0St_BEV)) {
714 offset = 0x100;
715 } else {
716 offset = 0x20000100;
718 set_EPC:
719 if (!(env->CP0_Status & (1 << CP0St_EXL))) {
720 env->CP0_EPC = exception_resume_pc(env);
721 if (update_badinstr) {
722 set_badinstr_registers(env);
724 if (env->hflags & MIPS_HFLAG_BMASK) {
725 env->CP0_Cause |= (1U << CP0Ca_BD);
726 } else {
727 env->CP0_Cause &= ~(1U << CP0Ca_BD);
729 env->CP0_Status |= (1 << CP0St_EXL);
730 if (env->insn_flags & ISA_MIPS3) {
731 env->hflags |= MIPS_HFLAG_64;
732 if (!(env->insn_flags & ISA_MIPS64R6) ||
733 env->CP0_Status & (1 << CP0St_KX)) {
734 env->hflags &= ~MIPS_HFLAG_AWRAP;
737 env->hflags |= MIPS_HFLAG_CP0;
738 env->hflags &= ~(MIPS_HFLAG_KSU);
740 env->hflags &= ~MIPS_HFLAG_BMASK;
741 if (env->CP0_Status & (1 << CP0St_BEV)) {
742 env->active_tc.PC = (int32_t)0xBFC00200;
743 } else {
744 env->active_tc.PC = (int32_t)(env->CP0_EBase & ~0x3ff);
746 env->active_tc.PC += offset;
747 set_hflags_for_handler(env);
748 env->CP0_Cause = (env->CP0_Cause & ~(0x1f << CP0Ca_EC)) | (cause << CP0Ca_EC);
749 break;
750 default:
751 abort();
753 if (qemu_loglevel_mask(CPU_LOG_INT)
754 && cs->exception_index != EXCP_EXT_INTERRUPT) {
755 qemu_log("%s: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx " cause %d\n"
756 " S %08x C %08x A " TARGET_FMT_lx " D " TARGET_FMT_lx "\n",
757 __func__, env->active_tc.PC, env->CP0_EPC, cause,
758 env->CP0_Status, env->CP0_Cause, env->CP0_BadVAddr,
759 env->CP0_DEPC);
761 #endif
762 cs->exception_index = EXCP_NONE;
765 bool mips_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
767 if (interrupt_request & CPU_INTERRUPT_HARD) {
768 MIPSCPU *cpu = MIPS_CPU(cs);
769 CPUMIPSState *env = &cpu->env;
771 if (cpu_mips_hw_interrupts_enabled(env) &&
772 cpu_mips_hw_interrupts_pending(env)) {
773 /* Raise it */
774 cs->exception_index = EXCP_EXT_INTERRUPT;
775 env->error_code = 0;
776 mips_cpu_do_interrupt(cs);
777 return true;
780 return false;
783 #if !defined(CONFIG_USER_ONLY)
784 void r4k_invalidate_tlb (CPUMIPSState *env, int idx, int use_extra)
786 MIPSCPU *cpu = mips_env_get_cpu(env);
787 CPUState *cs;
788 r4k_tlb_t *tlb;
789 target_ulong addr;
790 target_ulong end;
791 uint8_t ASID = env->CP0_EntryHi & 0xFF;
792 target_ulong mask;
794 tlb = &env->tlb->mmu.r4k.tlb[idx];
795 /* The qemu TLB is flushed when the ASID changes, so no need to
796 flush these entries again. */
797 if (tlb->G == 0 && tlb->ASID != ASID) {
798 return;
801 if (use_extra && env->tlb->tlb_in_use < MIPS_TLB_MAX) {
802 /* For tlbwr, we can shadow the discarded entry into
803 a new (fake) TLB entry, as long as the guest can not
804 tell that it's there. */
805 env->tlb->mmu.r4k.tlb[env->tlb->tlb_in_use] = *tlb;
806 env->tlb->tlb_in_use++;
807 return;
810 /* 1k pages are not supported. */
811 mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
812 if (tlb->V0) {
813 cs = CPU(cpu);
814 addr = tlb->VPN & ~mask;
815 #if defined(TARGET_MIPS64)
816 if (addr >= (0xFFFFFFFF80000000ULL & env->SEGMask)) {
817 addr |= 0x3FFFFF0000000000ULL;
819 #endif
820 end = addr | (mask >> 1);
821 while (addr < end) {
822 tlb_flush_page(cs, addr);
823 addr += TARGET_PAGE_SIZE;
826 if (tlb->V1) {
827 cs = CPU(cpu);
828 addr = (tlb->VPN & ~mask) | ((mask >> 1) + 1);
829 #if defined(TARGET_MIPS64)
830 if (addr >= (0xFFFFFFFF80000000ULL & env->SEGMask)) {
831 addr |= 0x3FFFFF0000000000ULL;
833 #endif
834 end = addr | mask;
835 while (addr - 1 < end) {
836 tlb_flush_page(cs, addr);
837 addr += TARGET_PAGE_SIZE;
841 #endif