2 * MIPS emulation helpers for qemu.
4 * Copyright (c) 2004-2005 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
22 #include "exec/exec-all.h"
23 #include "exec/cpu_ldst.h"
25 #include "hw/mips/cpudevs.h"
37 #if !defined(CONFIG_USER_ONLY)
39 /* no MMU emulation */
40 int no_mmu_map_address (CPUMIPSState
*env
, hwaddr
*physical
, int *prot
,
41 target_ulong address
, int rw
, int access_type
)
44 *prot
= PAGE_READ
| PAGE_WRITE
;
48 /* fixed mapping MMU emulation */
49 int fixed_mmu_map_address (CPUMIPSState
*env
, hwaddr
*physical
, int *prot
,
50 target_ulong address
, int rw
, int access_type
)
52 if (address
<= (int32_t)0x7FFFFFFFUL
) {
53 if (!(env
->CP0_Status
& (1 << CP0St_ERL
)))
54 *physical
= address
+ 0x40000000UL
;
57 } else if (address
<= (int32_t)0xBFFFFFFFUL
)
58 *physical
= address
& 0x1FFFFFFF;
62 *prot
= PAGE_READ
| PAGE_WRITE
;
66 /* MIPS32/MIPS64 R4000-style MMU emulation */
67 int r4k_map_address (CPUMIPSState
*env
, hwaddr
*physical
, int *prot
,
68 target_ulong address
, int rw
, int access_type
)
70 uint16_t ASID
= env
->CP0_EntryHi
& env
->CP0_EntryHi_ASID_mask
;
73 for (i
= 0; i
< env
->tlb
->tlb_in_use
; i
++) {
74 r4k_tlb_t
*tlb
= &env
->tlb
->mmu
.r4k
.tlb
[i
];
75 /* 1k pages are not supported. */
76 target_ulong mask
= tlb
->PageMask
| ~(TARGET_PAGE_MASK
<< 1);
77 target_ulong tag
= address
& ~mask
;
78 target_ulong VPN
= tlb
->VPN
& ~mask
;
79 #if defined(TARGET_MIPS64)
83 /* Check ASID, virtual page number & size */
84 if ((tlb
->G
== 1 || tlb
->ASID
== ASID
) && VPN
== tag
&& !tlb
->EHINV
) {
86 int n
= !!(address
& mask
& ~(mask
>> 1));
87 /* Check access rights */
88 if (!(n
? tlb
->V1
: tlb
->V0
)) {
89 return TLBRET_INVALID
;
91 if (rw
== MMU_INST_FETCH
&& (n
? tlb
->XI1
: tlb
->XI0
)) {
94 if (rw
== MMU_DATA_LOAD
&& (n
? tlb
->RI1
: tlb
->RI0
)) {
97 if (rw
!= MMU_DATA_STORE
|| (n
? tlb
->D1
: tlb
->D0
)) {
98 *physical
= tlb
->PFN
[n
] | (address
& (mask
>> 1));
100 if (n
? tlb
->D1
: tlb
->D0
)
107 return TLBRET_NOMATCH
;
110 static int is_seg_am_mapped(unsigned int am
, bool eu
, int mmu_idx
)
113 * Interpret access control mode and mmu_idx.
116 * UK 0 0 1 1 0 0 - - 0
117 * MK 1 0 1 1 0 1 - - !eu
118 * MSK 2 0 0 1 0 1 1 - !eu
119 * MUSK 3 0 0 0 0 1 1 1 !eu
120 * MUSUK 4 0 0 0 0 0 1 1 0
121 * USK 5 0 0 1 0 0 0 - 0
122 * - 6 - - - - - - - -
123 * UUSK 7 0 0 0 0 0 0 0 0
129 /* If EU is set, always unmapped */
135 /* Never AdE, TLB mapped if AM={1,2,3} */
136 adetlb_mask
= 0x70000000;
140 /* AdE if AM={0,1}, TLB mapped if AM={2,3,4} */
141 adetlb_mask
= 0xc0380000;
145 /* AdE if AM={0,1,2,5}, TLB mapped if AM={3,4} */
146 adetlb_mask
= 0xe4180000;
149 /* does this AM cause AdE in current execution mode */
150 if ((adetlb_mask
<< am
) < 0) {
151 return TLBRET_BADADDR
;
156 /* is this AM mapped in current execution mode */
157 return ((adetlb_mask
<< am
) < 0);
160 return TLBRET_BADADDR
;
164 static int get_seg_physical_address(CPUMIPSState
*env
, hwaddr
*physical
,
165 int *prot
, target_ulong real_address
,
166 int rw
, int access_type
, int mmu_idx
,
167 unsigned int am
, bool eu
,
168 target_ulong segmask
,
169 hwaddr physical_base
)
171 int mapped
= is_seg_am_mapped(am
, eu
, mmu_idx
);
174 /* is_seg_am_mapped can report TLBRET_BADADDR */
177 /* The segment is TLB mapped */
178 return env
->tlb
->map_address(env
, physical
, prot
, real_address
, rw
,
181 /* The segment is unmapped */
182 *physical
= physical_base
| (real_address
& segmask
);
183 *prot
= PAGE_READ
| PAGE_WRITE
;
188 static int get_segctl_physical_address(CPUMIPSState
*env
, hwaddr
*physical
,
189 int *prot
, target_ulong real_address
,
190 int rw
, int access_type
, int mmu_idx
,
191 uint16_t segctl
, target_ulong segmask
)
193 unsigned int am
= (segctl
& CP0SC_AM_MASK
) >> CP0SC_AM
;
194 bool eu
= (segctl
>> CP0SC_EU
) & 1;
195 hwaddr pa
= ((hwaddr
)segctl
& CP0SC_PA_MASK
) << 20;
197 return get_seg_physical_address(env
, physical
, prot
, real_address
, rw
,
198 access_type
, mmu_idx
, am
, eu
, segmask
,
199 pa
& ~(hwaddr
)segmask
);
202 static int get_physical_address (CPUMIPSState
*env
, hwaddr
*physical
,
203 int *prot
, target_ulong real_address
,
204 int rw
, int access_type
, int mmu_idx
)
206 /* User mode can only access useg/xuseg */
207 #if defined(TARGET_MIPS64)
208 int user_mode
= mmu_idx
== MIPS_HFLAG_UM
;
209 int supervisor_mode
= mmu_idx
== MIPS_HFLAG_SM
;
210 int kernel_mode
= !user_mode
&& !supervisor_mode
;
211 int UX
= (env
->CP0_Status
& (1 << CP0St_UX
)) != 0;
212 int SX
= (env
->CP0_Status
& (1 << CP0St_SX
)) != 0;
213 int KX
= (env
->CP0_Status
& (1 << CP0St_KX
)) != 0;
215 int ret
= TLBRET_MATCH
;
216 /* effective address (modified for KVM T&E kernel segments) */
217 target_ulong address
= real_address
;
219 #define USEG_LIMIT ((target_ulong)(int32_t)0x7FFFFFFFUL)
220 #define KSEG0_BASE ((target_ulong)(int32_t)0x80000000UL)
221 #define KSEG1_BASE ((target_ulong)(int32_t)0xA0000000UL)
222 #define KSEG2_BASE ((target_ulong)(int32_t)0xC0000000UL)
223 #define KSEG3_BASE ((target_ulong)(int32_t)0xE0000000UL)
225 #define KVM_KSEG0_BASE ((target_ulong)(int32_t)0x40000000UL)
226 #define KVM_KSEG2_BASE ((target_ulong)(int32_t)0x60000000UL)
228 if (mips_um_ksegs_enabled()) {
229 /* KVM T&E adds guest kernel segments in useg */
230 if (real_address
>= KVM_KSEG0_BASE
) {
231 if (real_address
< KVM_KSEG2_BASE
) {
233 address
+= KSEG0_BASE
- KVM_KSEG0_BASE
;
234 } else if (real_address
<= USEG_LIMIT
) {
236 address
+= KSEG2_BASE
- KVM_KSEG2_BASE
;
241 if (address
<= USEG_LIMIT
) {
245 if (address
>= 0x40000000UL
) {
246 segctl
= env
->CP0_SegCtl2
;
248 segctl
= env
->CP0_SegCtl2
>> 16;
250 ret
= get_segctl_physical_address(env
, physical
, prot
, real_address
, rw
,
251 access_type
, mmu_idx
, segctl
,
253 #if defined(TARGET_MIPS64)
254 } else if (address
< 0x4000000000000000ULL
) {
256 if (UX
&& address
<= (0x3FFFFFFFFFFFFFFFULL
& env
->SEGMask
)) {
257 ret
= env
->tlb
->map_address(env
, physical
, prot
, real_address
, rw
, access_type
);
259 ret
= TLBRET_BADADDR
;
261 } else if (address
< 0x8000000000000000ULL
) {
263 if ((supervisor_mode
|| kernel_mode
) &&
264 SX
&& address
<= (0x7FFFFFFFFFFFFFFFULL
& env
->SEGMask
)) {
265 ret
= env
->tlb
->map_address(env
, physical
, prot
, real_address
, rw
, access_type
);
267 ret
= TLBRET_BADADDR
;
269 } else if (address
< 0xC000000000000000ULL
) {
271 if ((address
& 0x07FFFFFFFFFFFFFFULL
) <= env
->PAMask
) {
272 /* KX/SX/UX bit to check for each xkphys EVA access mode */
273 static const uint8_t am_ksux
[8] = {
274 [CP0SC_AM_UK
] = (1u << CP0St_KX
),
275 [CP0SC_AM_MK
] = (1u << CP0St_KX
),
276 [CP0SC_AM_MSK
] = (1u << CP0St_SX
),
277 [CP0SC_AM_MUSK
] = (1u << CP0St_UX
),
278 [CP0SC_AM_MUSUK
] = (1u << CP0St_UX
),
279 [CP0SC_AM_USK
] = (1u << CP0St_SX
),
280 [6] = (1u << CP0St_KX
),
281 [CP0SC_AM_UUSK
] = (1u << CP0St_UX
),
283 unsigned int am
= CP0SC_AM_UK
;
284 unsigned int xr
= (env
->CP0_SegCtl2
& CP0SC2_XR_MASK
) >> CP0SC2_XR
;
286 if (xr
& (1 << ((address
>> 59) & 0x7))) {
287 am
= (env
->CP0_SegCtl1
& CP0SC1_XAM_MASK
) >> CP0SC1_XAM
;
289 /* Does CP0_Status.KX/SX/UX permit the access mode (am) */
290 if (env
->CP0_Status
& am_ksux
[am
]) {
291 ret
= get_seg_physical_address(env
, physical
, prot
,
292 real_address
, rw
, access_type
,
293 mmu_idx
, am
, false, env
->PAMask
,
296 ret
= TLBRET_BADADDR
;
299 ret
= TLBRET_BADADDR
;
301 } else if (address
< 0xFFFFFFFF80000000ULL
) {
303 if (kernel_mode
&& KX
&&
304 address
<= (0xFFFFFFFF7FFFFFFFULL
& env
->SEGMask
)) {
305 ret
= env
->tlb
->map_address(env
, physical
, prot
, real_address
, rw
, access_type
);
307 ret
= TLBRET_BADADDR
;
310 } else if (address
< KSEG1_BASE
) {
312 ret
= get_segctl_physical_address(env
, physical
, prot
, real_address
, rw
,
313 access_type
, mmu_idx
,
314 env
->CP0_SegCtl1
>> 16, 0x1FFFFFFF);
315 } else if (address
< KSEG2_BASE
) {
317 ret
= get_segctl_physical_address(env
, physical
, prot
, real_address
, rw
,
318 access_type
, mmu_idx
,
319 env
->CP0_SegCtl1
, 0x1FFFFFFF);
320 } else if (address
< KSEG3_BASE
) {
322 ret
= get_segctl_physical_address(env
, physical
, prot
, real_address
, rw
,
323 access_type
, mmu_idx
,
324 env
->CP0_SegCtl0
>> 16, 0x1FFFFFFF);
327 /* XXX: debug segment is not emulated */
328 ret
= get_segctl_physical_address(env
, physical
, prot
, real_address
, rw
,
329 access_type
, mmu_idx
,
330 env
->CP0_SegCtl0
, 0x1FFFFFFF);
335 void cpu_mips_tlb_flush(CPUMIPSState
*env
)
337 MIPSCPU
*cpu
= mips_env_get_cpu(env
);
339 /* Flush qemu's TLB and discard all shadowed entries. */
341 env
->tlb
->tlb_in_use
= env
->tlb
->nb_tlb
;
344 /* Called for updates to CP0_Status. */
345 void sync_c0_status(CPUMIPSState
*env
, CPUMIPSState
*cpu
, int tc
)
347 int32_t tcstatus
, *tcst
;
348 uint32_t v
= cpu
->CP0_Status
;
349 uint32_t cu
, mx
, asid
, ksu
;
350 uint32_t mask
= ((1 << CP0TCSt_TCU3
)
351 | (1 << CP0TCSt_TCU2
)
352 | (1 << CP0TCSt_TCU1
)
353 | (1 << CP0TCSt_TCU0
)
355 | (3 << CP0TCSt_TKSU
)
356 | (0xff << CP0TCSt_TASID
));
358 cu
= (v
>> CP0St_CU0
) & 0xf;
359 mx
= (v
>> CP0St_MX
) & 0x1;
360 ksu
= (v
>> CP0St_KSU
) & 0x3;
361 asid
= env
->CP0_EntryHi
& env
->CP0_EntryHi_ASID_mask
;
363 tcstatus
= cu
<< CP0TCSt_TCU0
;
364 tcstatus
|= mx
<< CP0TCSt_TMX
;
365 tcstatus
|= ksu
<< CP0TCSt_TKSU
;
368 if (tc
== cpu
->current_tc
) {
369 tcst
= &cpu
->active_tc
.CP0_TCStatus
;
371 tcst
= &cpu
->tcs
[tc
].CP0_TCStatus
;
379 void cpu_mips_store_status(CPUMIPSState
*env
, target_ulong val
)
381 uint32_t mask
= env
->CP0_Status_rw_bitmask
;
382 target_ulong old
= env
->CP0_Status
;
384 if (env
->insn_flags
& ISA_MIPS32R6
) {
385 bool has_supervisor
= extract32(mask
, CP0St_KSU
, 2) == 0x3;
386 #if defined(TARGET_MIPS64)
387 uint32_t ksux
= (1 << CP0St_KX
) & val
;
388 ksux
|= (ksux
>> 1) & val
; /* KX = 0 forces SX to be 0 */
389 ksux
|= (ksux
>> 1) & val
; /* SX = 0 forces UX to be 0 */
390 val
= (val
& ~(7 << CP0St_UX
)) | ksux
;
392 if (has_supervisor
&& extract32(val
, CP0St_KSU
, 2) == 0x3) {
393 mask
&= ~(3 << CP0St_KSU
);
395 mask
&= ~(((1 << CP0St_SR
) | (1 << CP0St_NMI
)) & val
);
398 env
->CP0_Status
= (old
& ~mask
) | (val
& mask
);
399 #if defined(TARGET_MIPS64)
400 if ((env
->CP0_Status
^ old
) & (old
& (7 << CP0St_UX
))) {
401 /* Access to at least one of the 64-bit segments has been disabled */
402 tlb_flush(CPU(mips_env_get_cpu(env
)));
405 if (env
->CP0_Config3
& (1 << CP0C3_MT
)) {
406 sync_c0_status(env
, env
, env
->current_tc
);
412 void cpu_mips_store_cause(CPUMIPSState
*env
, target_ulong val
)
414 uint32_t mask
= 0x00C00300;
415 uint32_t old
= env
->CP0_Cause
;
418 if (env
->insn_flags
& ISA_MIPS32R2
) {
419 mask
|= 1 << CP0Ca_DC
;
421 if (env
->insn_flags
& ISA_MIPS32R6
) {
422 mask
&= ~((1 << CP0Ca_WP
) & val
);
425 env
->CP0_Cause
= (env
->CP0_Cause
& ~mask
) | (val
& mask
);
427 if ((old
^ env
->CP0_Cause
) & (1 << CP0Ca_DC
)) {
428 if (env
->CP0_Cause
& (1 << CP0Ca_DC
)) {
429 cpu_mips_stop_count(env
);
431 cpu_mips_start_count(env
);
435 /* Set/reset software interrupts */
436 for (i
= 0 ; i
< 2 ; i
++) {
437 if ((old
^ env
->CP0_Cause
) & (1 << (CP0Ca_IP
+ i
))) {
438 cpu_mips_soft_irq(env
, i
, env
->CP0_Cause
& (1 << (CP0Ca_IP
+ i
)));
444 static void raise_mmu_exception(CPUMIPSState
*env
, target_ulong address
,
445 int rw
, int tlb_error
)
447 CPUState
*cs
= CPU(mips_env_get_cpu(env
));
448 int exception
= 0, error_code
= 0;
450 if (rw
== MMU_INST_FETCH
) {
451 error_code
|= EXCP_INST_NOTAVAIL
;
457 /* Reference to kernel address from user mode or supervisor mode */
458 /* Reference to supervisor address from user mode */
459 if (rw
== MMU_DATA_STORE
) {
460 exception
= EXCP_AdES
;
462 exception
= EXCP_AdEL
;
466 /* No TLB match for a mapped address */
467 if (rw
== MMU_DATA_STORE
) {
468 exception
= EXCP_TLBS
;
470 exception
= EXCP_TLBL
;
472 error_code
|= EXCP_TLB_NOMATCH
;
475 /* TLB match with no valid bit */
476 if (rw
== MMU_DATA_STORE
) {
477 exception
= EXCP_TLBS
;
479 exception
= EXCP_TLBL
;
483 /* TLB match but 'D' bit is cleared */
484 exception
= EXCP_LTLBL
;
487 /* Execute-Inhibit Exception */
488 if (env
->CP0_PageGrain
& (1 << CP0PG_IEC
)) {
489 exception
= EXCP_TLBXI
;
491 exception
= EXCP_TLBL
;
495 /* Read-Inhibit Exception */
496 if (env
->CP0_PageGrain
& (1 << CP0PG_IEC
)) {
497 exception
= EXCP_TLBRI
;
499 exception
= EXCP_TLBL
;
503 /* Raise exception */
504 env
->CP0_BadVAddr
= address
;
505 env
->CP0_Context
= (env
->CP0_Context
& ~0x007fffff) |
506 ((address
>> 9) & 0x007ffff0);
507 env
->CP0_EntryHi
= (env
->CP0_EntryHi
& env
->CP0_EntryHi_ASID_mask
) |
508 (env
->CP0_EntryHi
& (1 << CP0EnHi_EHINV
)) |
509 (address
& (TARGET_PAGE_MASK
<< 1));
510 #if defined(TARGET_MIPS64)
511 env
->CP0_EntryHi
&= env
->SEGMask
;
513 /* PTEBase */ (env
->CP0_XContext
& ((~0ULL) << (env
->SEGBITS
- 7))) |
514 /* R */ (extract64(address
, 62, 2) << (env
->SEGBITS
- 9)) |
515 /* BadVPN2 */ (extract64(address
, 13, env
->SEGBITS
- 13) << 4);
517 cs
->exception_index
= exception
;
518 env
->error_code
= error_code
;
521 #if !defined(CONFIG_USER_ONLY)
522 hwaddr
mips_cpu_get_phys_page_debug(CPUState
*cs
, vaddr addr
)
524 MIPSCPU
*cpu
= MIPS_CPU(cs
);
525 CPUMIPSState
*env
= &cpu
->env
;
529 if (get_physical_address(env
, &phys_addr
, &prot
, addr
, 0, ACCESS_INT
,
530 cpu_mmu_index(env
, false)) != 0) {
537 int mips_cpu_handle_mmu_fault(CPUState
*cs
, vaddr address
, int rw
,
540 MIPSCPU
*cpu
= MIPS_CPU(cs
);
541 CPUMIPSState
*env
= &cpu
->env
;
542 #if !defined(CONFIG_USER_ONLY)
550 log_cpu_state(cs
, 0);
552 qemu_log_mask(CPU_LOG_MMU
,
553 "%s pc " TARGET_FMT_lx
" ad %" VADDR_PRIx
" rw %d mmu_idx %d\n",
554 __func__
, env
->active_tc
.PC
, address
, rw
, mmu_idx
);
557 #if !defined(CONFIG_USER_ONLY)
558 /* XXX: put correct access by using cpu_restore_state()
560 access_type
= ACCESS_INT
;
561 ret
= get_physical_address(env
, &physical
, &prot
,
562 address
, rw
, access_type
, mmu_idx
);
565 qemu_log_mask(CPU_LOG_MMU
,
566 "%s address=%" VADDR_PRIx
" physical " TARGET_FMT_plx
567 " prot %d\n", __func__
, address
, physical
, prot
);
570 qemu_log_mask(CPU_LOG_MMU
,
571 "%s address=%" VADDR_PRIx
" ret %d\n", __func__
, address
,
575 if (ret
== TLBRET_MATCH
) {
576 tlb_set_page(cs
, address
& TARGET_PAGE_MASK
,
577 physical
& TARGET_PAGE_MASK
, prot
| PAGE_EXEC
,
578 mmu_idx
, TARGET_PAGE_SIZE
);
583 raise_mmu_exception(env
, address
, rw
, ret
);
590 #if !defined(CONFIG_USER_ONLY)
591 hwaddr
cpu_mips_translate_address(CPUMIPSState
*env
, target_ulong address
, int rw
)
599 access_type
= ACCESS_INT
;
600 ret
= get_physical_address(env
, &physical
, &prot
, address
, rw
, access_type
,
601 cpu_mmu_index(env
, false));
602 if (ret
!= TLBRET_MATCH
) {
603 raise_mmu_exception(env
, address
, rw
, ret
);
610 static const char * const excp_names
[EXCP_LAST
+ 1] = {
611 [EXCP_RESET
] = "reset",
612 [EXCP_SRESET
] = "soft reset",
613 [EXCP_DSS
] = "debug single step",
614 [EXCP_DINT
] = "debug interrupt",
615 [EXCP_NMI
] = "non-maskable interrupt",
616 [EXCP_MCHECK
] = "machine check",
617 [EXCP_EXT_INTERRUPT
] = "interrupt",
618 [EXCP_DFWATCH
] = "deferred watchpoint",
619 [EXCP_DIB
] = "debug instruction breakpoint",
620 [EXCP_IWATCH
] = "instruction fetch watchpoint",
621 [EXCP_AdEL
] = "address error load",
622 [EXCP_AdES
] = "address error store",
623 [EXCP_TLBF
] = "TLB refill",
624 [EXCP_IBE
] = "instruction bus error",
625 [EXCP_DBp
] = "debug breakpoint",
626 [EXCP_SYSCALL
] = "syscall",
627 [EXCP_BREAK
] = "break",
628 [EXCP_CpU
] = "coprocessor unusable",
629 [EXCP_RI
] = "reserved instruction",
630 [EXCP_OVERFLOW
] = "arithmetic overflow",
631 [EXCP_TRAP
] = "trap",
632 [EXCP_FPE
] = "floating point",
633 [EXCP_DDBS
] = "debug data break store",
634 [EXCP_DWATCH
] = "data watchpoint",
635 [EXCP_LTLBL
] = "TLB modify",
636 [EXCP_TLBL
] = "TLB load",
637 [EXCP_TLBS
] = "TLB store",
638 [EXCP_DBE
] = "data bus error",
639 [EXCP_DDBL
] = "debug data break load",
640 [EXCP_THREAD
] = "thread",
641 [EXCP_MDMX
] = "MDMX",
642 [EXCP_C2E
] = "precise coprocessor 2",
643 [EXCP_CACHE
] = "cache error",
644 [EXCP_TLBXI
] = "TLB execute-inhibit",
645 [EXCP_TLBRI
] = "TLB read-inhibit",
646 [EXCP_MSADIS
] = "MSA disabled",
647 [EXCP_MSAFPE
] = "MSA floating point",
651 target_ulong
exception_resume_pc (CPUMIPSState
*env
)
654 target_ulong isa_mode
;
656 isa_mode
= !!(env
->hflags
& MIPS_HFLAG_M16
);
657 bad_pc
= env
->active_tc
.PC
| isa_mode
;
658 if (env
->hflags
& MIPS_HFLAG_BMASK
) {
659 /* If the exception was raised from a delay slot, come back to
661 bad_pc
-= (env
->hflags
& MIPS_HFLAG_B16
? 2 : 4);
667 #if !defined(CONFIG_USER_ONLY)
668 static void set_hflags_for_handler (CPUMIPSState
*env
)
670 /* Exception handlers are entered in 32-bit mode. */
671 env
->hflags
&= ~(MIPS_HFLAG_M16
);
672 /* ...except that microMIPS lets you choose. */
673 if (env
->insn_flags
& ASE_MICROMIPS
) {
674 env
->hflags
|= (!!(env
->CP0_Config3
675 & (1 << CP0C3_ISA_ON_EXC
))
676 << MIPS_HFLAG_M16_SHIFT
);
680 static inline void set_badinstr_registers(CPUMIPSState
*env
)
682 if (env
->hflags
& MIPS_HFLAG_M16
) {
683 /* TODO: add BadInstr support for microMIPS */
686 if (env
->CP0_Config3
& (1 << CP0C3_BI
)) {
687 env
->CP0_BadInstr
= cpu_ldl_code(env
, env
->active_tc
.PC
);
689 if ((env
->CP0_Config3
& (1 << CP0C3_BP
)) &&
690 (env
->hflags
& MIPS_HFLAG_BMASK
)) {
691 env
->CP0_BadInstrP
= cpu_ldl_code(env
, env
->active_tc
.PC
- 4);
696 void mips_cpu_do_interrupt(CPUState
*cs
)
698 #if !defined(CONFIG_USER_ONLY)
699 MIPSCPU
*cpu
= MIPS_CPU(cs
);
700 CPUMIPSState
*env
= &cpu
->env
;
701 bool update_badinstr
= 0;
706 if (qemu_loglevel_mask(CPU_LOG_INT
)
707 && cs
->exception_index
!= EXCP_EXT_INTERRUPT
) {
708 if (cs
->exception_index
< 0 || cs
->exception_index
> EXCP_LAST
) {
711 name
= excp_names
[cs
->exception_index
];
714 qemu_log("%s enter: PC " TARGET_FMT_lx
" EPC " TARGET_FMT_lx
716 __func__
, env
->active_tc
.PC
, env
->CP0_EPC
, name
);
718 if (cs
->exception_index
== EXCP_EXT_INTERRUPT
&&
719 (env
->hflags
& MIPS_HFLAG_DM
)) {
720 cs
->exception_index
= EXCP_DINT
;
723 switch (cs
->exception_index
) {
725 env
->CP0_Debug
|= 1 << CP0DB_DSS
;
726 /* Debug single step cannot be raised inside a delay slot and
727 resume will always occur on the next instruction
728 (but we assume the pc has always been updated during
729 code translation). */
730 env
->CP0_DEPC
= env
->active_tc
.PC
| !!(env
->hflags
& MIPS_HFLAG_M16
);
731 goto enter_debug_mode
;
733 env
->CP0_Debug
|= 1 << CP0DB_DINT
;
736 env
->CP0_Debug
|= 1 << CP0DB_DIB
;
739 env
->CP0_Debug
|= 1 << CP0DB_DBp
;
740 /* Setup DExcCode - SDBBP instruction */
741 env
->CP0_Debug
= (env
->CP0_Debug
& ~(0x1fULL
<< CP0DB_DEC
)) | 9 << CP0DB_DEC
;
744 env
->CP0_Debug
|= 1 << CP0DB_DDBS
;
747 env
->CP0_Debug
|= 1 << CP0DB_DDBL
;
749 env
->CP0_DEPC
= exception_resume_pc(env
);
750 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
752 if (env
->insn_flags
& ISA_MIPS3
) {
753 env
->hflags
|= MIPS_HFLAG_64
;
754 if (!(env
->insn_flags
& ISA_MIPS64R6
) ||
755 env
->CP0_Status
& (1 << CP0St_KX
)) {
756 env
->hflags
&= ~MIPS_HFLAG_AWRAP
;
759 env
->hflags
|= MIPS_HFLAG_DM
| MIPS_HFLAG_CP0
;
760 env
->hflags
&= ~(MIPS_HFLAG_KSU
);
761 /* EJTAG probe trap enable is not implemented... */
762 if (!(env
->CP0_Status
& (1 << CP0St_EXL
)))
763 env
->CP0_Cause
&= ~(1U << CP0Ca_BD
);
764 env
->active_tc
.PC
= env
->exception_base
+ 0x480;
765 set_hflags_for_handler(env
);
771 env
->CP0_Status
|= (1 << CP0St_SR
);
772 memset(env
->CP0_WatchLo
, 0, sizeof(env
->CP0_WatchLo
));
775 env
->CP0_Status
|= (1 << CP0St_NMI
);
777 env
->CP0_ErrorEPC
= exception_resume_pc(env
);
778 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
779 env
->CP0_Status
|= (1 << CP0St_ERL
) | (1 << CP0St_BEV
);
780 if (env
->insn_flags
& ISA_MIPS3
) {
781 env
->hflags
|= MIPS_HFLAG_64
;
782 if (!(env
->insn_flags
& ISA_MIPS64R6
) ||
783 env
->CP0_Status
& (1 << CP0St_KX
)) {
784 env
->hflags
&= ~MIPS_HFLAG_AWRAP
;
787 env
->hflags
|= MIPS_HFLAG_CP0
;
788 env
->hflags
&= ~(MIPS_HFLAG_KSU
);
789 if (!(env
->CP0_Status
& (1 << CP0St_EXL
)))
790 env
->CP0_Cause
&= ~(1U << CP0Ca_BD
);
791 env
->active_tc
.PC
= env
->exception_base
;
792 set_hflags_for_handler(env
);
794 case EXCP_EXT_INTERRUPT
:
796 if (env
->CP0_Cause
& (1 << CP0Ca_IV
)) {
797 uint32_t spacing
= (env
->CP0_IntCtl
>> CP0IntCtl_VS
) & 0x1f;
799 if ((env
->CP0_Status
& (1 << CP0St_BEV
)) || spacing
== 0) {
803 uint32_t pending
= (env
->CP0_Cause
& CP0Ca_IP_mask
) >> CP0Ca_IP
;
805 if (env
->CP0_Config3
& (1 << CP0C3_VEIC
)) {
806 /* For VEIC mode, the external interrupt controller feeds
807 * the vector through the CP0Cause IP lines. */
810 /* Vectored Interrupts
811 * Mask with Status.IM7-IM0 to get enabled interrupts. */
812 pending
&= (env
->CP0_Status
>> CP0St_IM
) & 0xff;
813 /* Find the highest-priority interrupt. */
814 while (pending
>>= 1) {
818 offset
= 0x200 + (vector
* (spacing
<< 5));
824 update_badinstr
= !(env
->error_code
& EXCP_INST_NOTAVAIL
);
828 update_badinstr
= !(env
->error_code
& EXCP_INST_NOTAVAIL
);
829 if ((env
->error_code
& EXCP_TLB_NOMATCH
) &&
830 !(env
->CP0_Status
& (1 << CP0St_EXL
))) {
831 #if defined(TARGET_MIPS64)
832 int R
= env
->CP0_BadVAddr
>> 62;
833 int UX
= (env
->CP0_Status
& (1 << CP0St_UX
)) != 0;
834 int KX
= (env
->CP0_Status
& (1 << CP0St_KX
)) != 0;
836 if ((R
!= 0 || UX
) && (R
!= 3 || KX
) &&
837 (!(env
->insn_flags
& (INSN_LOONGSON2E
| INSN_LOONGSON2F
)))) {
842 #if defined(TARGET_MIPS64)
850 if ((env
->error_code
& EXCP_TLB_NOMATCH
) &&
851 !(env
->CP0_Status
& (1 << CP0St_EXL
))) {
852 #if defined(TARGET_MIPS64)
853 int R
= env
->CP0_BadVAddr
>> 62;
854 int UX
= (env
->CP0_Status
& (1 << CP0St_UX
)) != 0;
855 int KX
= (env
->CP0_Status
& (1 << CP0St_KX
)) != 0;
857 if ((R
!= 0 || UX
) && (R
!= 3 || KX
) &&
858 (!(env
->insn_flags
& (INSN_LOONGSON2E
| INSN_LOONGSON2F
)))) {
863 #if defined(TARGET_MIPS64)
870 update_badinstr
= !(env
->error_code
& EXCP_INST_NOTAVAIL
);
897 env
->CP0_Cause
= (env
->CP0_Cause
& ~(0x3 << CP0Ca_CE
)) |
898 (env
->error_code
<< CP0Ca_CE
);
935 /* XXX: TODO: manage deferred watch exceptions */
950 if (!(env
->CP0_Status
& (1 << CP0St_EXL
))) {
951 env
->CP0_EPC
= exception_resume_pc(env
);
952 if (update_badinstr
) {
953 set_badinstr_registers(env
);
955 if (env
->hflags
& MIPS_HFLAG_BMASK
) {
956 env
->CP0_Cause
|= (1U << CP0Ca_BD
);
958 env
->CP0_Cause
&= ~(1U << CP0Ca_BD
);
960 env
->CP0_Status
|= (1 << CP0St_EXL
);
961 if (env
->insn_flags
& ISA_MIPS3
) {
962 env
->hflags
|= MIPS_HFLAG_64
;
963 if (!(env
->insn_flags
& ISA_MIPS64R6
) ||
964 env
->CP0_Status
& (1 << CP0St_KX
)) {
965 env
->hflags
&= ~MIPS_HFLAG_AWRAP
;
968 env
->hflags
|= MIPS_HFLAG_CP0
;
969 env
->hflags
&= ~(MIPS_HFLAG_KSU
);
971 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
972 if (env
->CP0_Status
& (1 << CP0St_BEV
)) {
973 env
->active_tc
.PC
= env
->exception_base
+ 0x200;
974 } else if (cause
== 30 && !(env
->CP0_Config3
& (1 << CP0C3_SC
) &&
975 env
->CP0_Config5
& (1 << CP0C5_CV
))) {
976 /* Force KSeg1 for cache errors */
977 env
->active_tc
.PC
= KSEG1_BASE
| (env
->CP0_EBase
& 0x1FFFF000);
979 env
->active_tc
.PC
= env
->CP0_EBase
& ~0xfff;
982 env
->active_tc
.PC
+= offset
;
983 set_hflags_for_handler(env
);
984 env
->CP0_Cause
= (env
->CP0_Cause
& ~(0x1f << CP0Ca_EC
)) | (cause
<< CP0Ca_EC
);
989 if (qemu_loglevel_mask(CPU_LOG_INT
)
990 && cs
->exception_index
!= EXCP_EXT_INTERRUPT
) {
991 qemu_log("%s: PC " TARGET_FMT_lx
" EPC " TARGET_FMT_lx
" cause %d\n"
992 " S %08x C %08x A " TARGET_FMT_lx
" D " TARGET_FMT_lx
"\n",
993 __func__
, env
->active_tc
.PC
, env
->CP0_EPC
, cause
,
994 env
->CP0_Status
, env
->CP0_Cause
, env
->CP0_BadVAddr
,
998 cs
->exception_index
= EXCP_NONE
;
1001 bool mips_cpu_exec_interrupt(CPUState
*cs
, int interrupt_request
)
1003 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
1004 MIPSCPU
*cpu
= MIPS_CPU(cs
);
1005 CPUMIPSState
*env
= &cpu
->env
;
1007 if (cpu_mips_hw_interrupts_enabled(env
) &&
1008 cpu_mips_hw_interrupts_pending(env
)) {
1010 cs
->exception_index
= EXCP_EXT_INTERRUPT
;
1011 env
->error_code
= 0;
1012 mips_cpu_do_interrupt(cs
);
1019 #if !defined(CONFIG_USER_ONLY)
1020 void r4k_invalidate_tlb (CPUMIPSState
*env
, int idx
, int use_extra
)
1022 MIPSCPU
*cpu
= mips_env_get_cpu(env
);
1027 uint16_t ASID
= env
->CP0_EntryHi
& env
->CP0_EntryHi_ASID_mask
;
1030 tlb
= &env
->tlb
->mmu
.r4k
.tlb
[idx
];
1031 /* The qemu TLB is flushed when the ASID changes, so no need to
1032 flush these entries again. */
1033 if (tlb
->G
== 0 && tlb
->ASID
!= ASID
) {
1037 if (use_extra
&& env
->tlb
->tlb_in_use
< MIPS_TLB_MAX
) {
1038 /* For tlbwr, we can shadow the discarded entry into
1039 a new (fake) TLB entry, as long as the guest can not
1040 tell that it's there. */
1041 env
->tlb
->mmu
.r4k
.tlb
[env
->tlb
->tlb_in_use
] = *tlb
;
1042 env
->tlb
->tlb_in_use
++;
1046 /* 1k pages are not supported. */
1047 mask
= tlb
->PageMask
| ~(TARGET_PAGE_MASK
<< 1);
1050 addr
= tlb
->VPN
& ~mask
;
1051 #if defined(TARGET_MIPS64)
1052 if (addr
>= (0xFFFFFFFF80000000ULL
& env
->SEGMask
)) {
1053 addr
|= 0x3FFFFF0000000000ULL
;
1056 end
= addr
| (mask
>> 1);
1057 while (addr
< end
) {
1058 tlb_flush_page(cs
, addr
);
1059 addr
+= TARGET_PAGE_SIZE
;
1064 addr
= (tlb
->VPN
& ~mask
) | ((mask
>> 1) + 1);
1065 #if defined(TARGET_MIPS64)
1066 if (addr
>= (0xFFFFFFFF80000000ULL
& env
->SEGMask
)) {
1067 addr
|= 0x3FFFFF0000000000ULL
;
1071 while (addr
- 1 < end
) {
1072 tlb_flush_page(cs
, addr
);
1073 addr
+= TARGET_PAGE_SIZE
;
1079 void QEMU_NORETURN
do_raise_exception_err(CPUMIPSState
*env
,
1084 CPUState
*cs
= CPU(mips_env_get_cpu(env
));
1086 if (exception
< EXCP_SC
) {
1087 qemu_log_mask(CPU_LOG_INT
, "%s: %d %d\n",
1088 __func__
, exception
, error_code
);
1090 cs
->exception_index
= exception
;
1091 env
->error_code
= error_code
;
1093 cpu_loop_exit_restore(cs
, pc
);