2 * MIPS emulation helpers for qemu.
4 * Copyright (c) 2004-2005 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
23 #include "exec/exec-all.h"
24 #include "exec/cpu_ldst.h"
26 #include "hw/mips/cpudevs.h"
38 #if !defined(CONFIG_USER_ONLY)
40 /* no MMU emulation */
41 int no_mmu_map_address (CPUMIPSState
*env
, hwaddr
*physical
, int *prot
,
42 target_ulong address
, int rw
, int access_type
)
45 *prot
= PAGE_READ
| PAGE_WRITE
;
49 /* fixed mapping MMU emulation */
50 int fixed_mmu_map_address (CPUMIPSState
*env
, hwaddr
*physical
, int *prot
,
51 target_ulong address
, int rw
, int access_type
)
53 if (address
<= (int32_t)0x7FFFFFFFUL
) {
54 if (!(env
->CP0_Status
& (1 << CP0St_ERL
)))
55 *physical
= address
+ 0x40000000UL
;
58 } else if (address
<= (int32_t)0xBFFFFFFFUL
)
59 *physical
= address
& 0x1FFFFFFF;
63 *prot
= PAGE_READ
| PAGE_WRITE
;
67 /* MIPS32/MIPS64 R4000-style MMU emulation */
68 int r4k_map_address (CPUMIPSState
*env
, hwaddr
*physical
, int *prot
,
69 target_ulong address
, int rw
, int access_type
)
71 uint16_t ASID
= env
->CP0_EntryHi
& env
->CP0_EntryHi_ASID_mask
;
74 for (i
= 0; i
< env
->tlb
->tlb_in_use
; i
++) {
75 r4k_tlb_t
*tlb
= &env
->tlb
->mmu
.r4k
.tlb
[i
];
76 /* 1k pages are not supported. */
77 target_ulong mask
= tlb
->PageMask
| ~(TARGET_PAGE_MASK
<< 1);
78 target_ulong tag
= address
& ~mask
;
79 target_ulong VPN
= tlb
->VPN
& ~mask
;
80 #if defined(TARGET_MIPS64)
84 /* Check ASID, virtual page number & size */
85 if ((tlb
->G
== 1 || tlb
->ASID
== ASID
) && VPN
== tag
&& !tlb
->EHINV
) {
87 int n
= !!(address
& mask
& ~(mask
>> 1));
88 /* Check access rights */
89 if (!(n
? tlb
->V1
: tlb
->V0
)) {
90 return TLBRET_INVALID
;
92 if (rw
== MMU_INST_FETCH
&& (n
? tlb
->XI1
: tlb
->XI0
)) {
95 if (rw
== MMU_DATA_LOAD
&& (n
? tlb
->RI1
: tlb
->RI0
)) {
98 if (rw
!= MMU_DATA_STORE
|| (n
? tlb
->D1
: tlb
->D0
)) {
99 *physical
= tlb
->PFN
[n
] | (address
& (mask
>> 1));
101 if (n
? tlb
->D1
: tlb
->D0
)
108 return TLBRET_NOMATCH
;
111 static int is_seg_am_mapped(unsigned int am
, bool eu
, int mmu_idx
)
114 * Interpret access control mode and mmu_idx.
117 * UK 0 0 1 1 0 0 - - 0
118 * MK 1 0 1 1 0 1 - - !eu
119 * MSK 2 0 0 1 0 1 1 - !eu
120 * MUSK 3 0 0 0 0 1 1 1 !eu
121 * MUSUK 4 0 0 0 0 0 1 1 0
122 * USK 5 0 0 1 0 0 0 - 0
123 * - 6 - - - - - - - -
124 * UUSK 7 0 0 0 0 0 0 0 0
130 /* If EU is set, always unmapped */
136 /* Never AdE, TLB mapped if AM={1,2,3} */
137 adetlb_mask
= 0x70000000;
141 /* AdE if AM={0,1}, TLB mapped if AM={2,3,4} */
142 adetlb_mask
= 0xc0380000;
146 /* AdE if AM={0,1,2,5}, TLB mapped if AM={3,4} */
147 adetlb_mask
= 0xe4180000;
150 /* does this AM cause AdE in current execution mode */
151 if ((adetlb_mask
<< am
) < 0) {
152 return TLBRET_BADADDR
;
157 /* is this AM mapped in current execution mode */
158 return ((adetlb_mask
<< am
) < 0);
161 return TLBRET_BADADDR
;
165 static int get_seg_physical_address(CPUMIPSState
*env
, hwaddr
*physical
,
166 int *prot
, target_ulong real_address
,
167 int rw
, int access_type
, int mmu_idx
,
168 unsigned int am
, bool eu
,
169 target_ulong segmask
,
170 hwaddr physical_base
)
172 int mapped
= is_seg_am_mapped(am
, eu
, mmu_idx
);
175 /* is_seg_am_mapped can report TLBRET_BADADDR */
178 /* The segment is TLB mapped */
179 return env
->tlb
->map_address(env
, physical
, prot
, real_address
, rw
,
182 /* The segment is unmapped */
183 *physical
= physical_base
| (real_address
& segmask
);
184 *prot
= PAGE_READ
| PAGE_WRITE
;
189 static int get_segctl_physical_address(CPUMIPSState
*env
, hwaddr
*physical
,
190 int *prot
, target_ulong real_address
,
191 int rw
, int access_type
, int mmu_idx
,
192 uint16_t segctl
, target_ulong segmask
)
194 unsigned int am
= (segctl
& CP0SC_AM_MASK
) >> CP0SC_AM
;
195 bool eu
= (segctl
>> CP0SC_EU
) & 1;
196 hwaddr pa
= ((hwaddr
)segctl
& CP0SC_PA_MASK
) << 20;
198 return get_seg_physical_address(env
, physical
, prot
, real_address
, rw
,
199 access_type
, mmu_idx
, am
, eu
, segmask
,
200 pa
& ~(hwaddr
)segmask
);
203 static int get_physical_address (CPUMIPSState
*env
, hwaddr
*physical
,
204 int *prot
, target_ulong real_address
,
205 int rw
, int access_type
, int mmu_idx
)
207 /* User mode can only access useg/xuseg */
208 #if defined(TARGET_MIPS64)
209 int user_mode
= mmu_idx
== MIPS_HFLAG_UM
;
210 int supervisor_mode
= mmu_idx
== MIPS_HFLAG_SM
;
211 int kernel_mode
= !user_mode
&& !supervisor_mode
;
212 int UX
= (env
->CP0_Status
& (1 << CP0St_UX
)) != 0;
213 int SX
= (env
->CP0_Status
& (1 << CP0St_SX
)) != 0;
214 int KX
= (env
->CP0_Status
& (1 << CP0St_KX
)) != 0;
216 int ret
= TLBRET_MATCH
;
217 /* effective address (modified for KVM T&E kernel segments) */
218 target_ulong address
= real_address
;
220 #define USEG_LIMIT ((target_ulong)(int32_t)0x7FFFFFFFUL)
221 #define KSEG0_BASE ((target_ulong)(int32_t)0x80000000UL)
222 #define KSEG1_BASE ((target_ulong)(int32_t)0xA0000000UL)
223 #define KSEG2_BASE ((target_ulong)(int32_t)0xC0000000UL)
224 #define KSEG3_BASE ((target_ulong)(int32_t)0xE0000000UL)
226 #define KVM_KSEG0_BASE ((target_ulong)(int32_t)0x40000000UL)
227 #define KVM_KSEG2_BASE ((target_ulong)(int32_t)0x60000000UL)
229 if (mips_um_ksegs_enabled()) {
230 /* KVM T&E adds guest kernel segments in useg */
231 if (real_address
>= KVM_KSEG0_BASE
) {
232 if (real_address
< KVM_KSEG2_BASE
) {
234 address
+= KSEG0_BASE
- KVM_KSEG0_BASE
;
235 } else if (real_address
<= USEG_LIMIT
) {
237 address
+= KSEG2_BASE
- KVM_KSEG2_BASE
;
242 if (address
<= USEG_LIMIT
) {
246 if (address
>= 0x40000000UL
) {
247 segctl
= env
->CP0_SegCtl2
;
249 segctl
= env
->CP0_SegCtl2
>> 16;
251 ret
= get_segctl_physical_address(env
, physical
, prot
, real_address
, rw
,
252 access_type
, mmu_idx
, segctl
,
254 #if defined(TARGET_MIPS64)
255 } else if (address
< 0x4000000000000000ULL
) {
257 if (UX
&& address
<= (0x3FFFFFFFFFFFFFFFULL
& env
->SEGMask
)) {
258 ret
= env
->tlb
->map_address(env
, physical
, prot
, real_address
, rw
, access_type
);
260 ret
= TLBRET_BADADDR
;
262 } else if (address
< 0x8000000000000000ULL
) {
264 if ((supervisor_mode
|| kernel_mode
) &&
265 SX
&& address
<= (0x7FFFFFFFFFFFFFFFULL
& env
->SEGMask
)) {
266 ret
= env
->tlb
->map_address(env
, physical
, prot
, real_address
, rw
, access_type
);
268 ret
= TLBRET_BADADDR
;
270 } else if (address
< 0xC000000000000000ULL
) {
272 if ((address
& 0x07FFFFFFFFFFFFFFULL
) <= env
->PAMask
) {
273 /* KX/SX/UX bit to check for each xkphys EVA access mode */
274 static const uint8_t am_ksux
[8] = {
275 [CP0SC_AM_UK
] = (1u << CP0St_KX
),
276 [CP0SC_AM_MK
] = (1u << CP0St_KX
),
277 [CP0SC_AM_MSK
] = (1u << CP0St_SX
),
278 [CP0SC_AM_MUSK
] = (1u << CP0St_UX
),
279 [CP0SC_AM_MUSUK
] = (1u << CP0St_UX
),
280 [CP0SC_AM_USK
] = (1u << CP0St_SX
),
281 [6] = (1u << CP0St_KX
),
282 [CP0SC_AM_UUSK
] = (1u << CP0St_UX
),
284 unsigned int am
= CP0SC_AM_UK
;
285 unsigned int xr
= (env
->CP0_SegCtl2
& CP0SC2_XR_MASK
) >> CP0SC2_XR
;
287 if (xr
& (1 << ((address
>> 59) & 0x7))) {
288 am
= (env
->CP0_SegCtl1
& CP0SC1_XAM_MASK
) >> CP0SC1_XAM
;
290 /* Does CP0_Status.KX/SX/UX permit the access mode (am) */
291 if (env
->CP0_Status
& am_ksux
[am
]) {
292 ret
= get_seg_physical_address(env
, physical
, prot
,
293 real_address
, rw
, access_type
,
294 mmu_idx
, am
, false, env
->PAMask
,
297 ret
= TLBRET_BADADDR
;
300 ret
= TLBRET_BADADDR
;
302 } else if (address
< 0xFFFFFFFF80000000ULL
) {
304 if (kernel_mode
&& KX
&&
305 address
<= (0xFFFFFFFF7FFFFFFFULL
& env
->SEGMask
)) {
306 ret
= env
->tlb
->map_address(env
, physical
, prot
, real_address
, rw
, access_type
);
308 ret
= TLBRET_BADADDR
;
311 } else if (address
< KSEG1_BASE
) {
313 ret
= get_segctl_physical_address(env
, physical
, prot
, real_address
, rw
,
314 access_type
, mmu_idx
,
315 env
->CP0_SegCtl1
>> 16, 0x1FFFFFFF);
316 } else if (address
< KSEG2_BASE
) {
318 ret
= get_segctl_physical_address(env
, physical
, prot
, real_address
, rw
,
319 access_type
, mmu_idx
,
320 env
->CP0_SegCtl1
, 0x1FFFFFFF);
321 } else if (address
< KSEG3_BASE
) {
323 ret
= get_segctl_physical_address(env
, physical
, prot
, real_address
, rw
,
324 access_type
, mmu_idx
,
325 env
->CP0_SegCtl0
>> 16, 0x1FFFFFFF);
328 /* XXX: debug segment is not emulated */
329 ret
= get_segctl_physical_address(env
, physical
, prot
, real_address
, rw
,
330 access_type
, mmu_idx
,
331 env
->CP0_SegCtl0
, 0x1FFFFFFF);
336 void cpu_mips_tlb_flush(CPUMIPSState
*env
)
338 MIPSCPU
*cpu
= mips_env_get_cpu(env
);
340 /* Flush qemu's TLB and discard all shadowed entries. */
342 env
->tlb
->tlb_in_use
= env
->tlb
->nb_tlb
;
345 /* Called for updates to CP0_Status. */
346 void sync_c0_status(CPUMIPSState
*env
, CPUMIPSState
*cpu
, int tc
)
348 int32_t tcstatus
, *tcst
;
349 uint32_t v
= cpu
->CP0_Status
;
350 uint32_t cu
, mx
, asid
, ksu
;
351 uint32_t mask
= ((1 << CP0TCSt_TCU3
)
352 | (1 << CP0TCSt_TCU2
)
353 | (1 << CP0TCSt_TCU1
)
354 | (1 << CP0TCSt_TCU0
)
356 | (3 << CP0TCSt_TKSU
)
357 | (0xff << CP0TCSt_TASID
));
359 cu
= (v
>> CP0St_CU0
) & 0xf;
360 mx
= (v
>> CP0St_MX
) & 0x1;
361 ksu
= (v
>> CP0St_KSU
) & 0x3;
362 asid
= env
->CP0_EntryHi
& env
->CP0_EntryHi_ASID_mask
;
364 tcstatus
= cu
<< CP0TCSt_TCU0
;
365 tcstatus
|= mx
<< CP0TCSt_TMX
;
366 tcstatus
|= ksu
<< CP0TCSt_TKSU
;
369 if (tc
== cpu
->current_tc
) {
370 tcst
= &cpu
->active_tc
.CP0_TCStatus
;
372 tcst
= &cpu
->tcs
[tc
].CP0_TCStatus
;
380 void cpu_mips_store_status(CPUMIPSState
*env
, target_ulong val
)
382 uint32_t mask
= env
->CP0_Status_rw_bitmask
;
383 target_ulong old
= env
->CP0_Status
;
385 if (env
->insn_flags
& ISA_MIPS32R6
) {
386 bool has_supervisor
= extract32(mask
, CP0St_KSU
, 2) == 0x3;
387 #if defined(TARGET_MIPS64)
388 uint32_t ksux
= (1 << CP0St_KX
) & val
;
389 ksux
|= (ksux
>> 1) & val
; /* KX = 0 forces SX to be 0 */
390 ksux
|= (ksux
>> 1) & val
; /* SX = 0 forces UX to be 0 */
391 val
= (val
& ~(7 << CP0St_UX
)) | ksux
;
393 if (has_supervisor
&& extract32(val
, CP0St_KSU
, 2) == 0x3) {
394 mask
&= ~(3 << CP0St_KSU
);
396 mask
&= ~(((1 << CP0St_SR
) | (1 << CP0St_NMI
)) & val
);
399 env
->CP0_Status
= (old
& ~mask
) | (val
& mask
);
400 #if defined(TARGET_MIPS64)
401 if ((env
->CP0_Status
^ old
) & (old
& (7 << CP0St_UX
))) {
402 /* Access to at least one of the 64-bit segments has been disabled */
403 tlb_flush(CPU(mips_env_get_cpu(env
)));
406 if (env
->CP0_Config3
& (1 << CP0C3_MT
)) {
407 sync_c0_status(env
, env
, env
->current_tc
);
413 void cpu_mips_store_cause(CPUMIPSState
*env
, target_ulong val
)
415 uint32_t mask
= 0x00C00300;
416 uint32_t old
= env
->CP0_Cause
;
419 if (env
->insn_flags
& ISA_MIPS32R2
) {
420 mask
|= 1 << CP0Ca_DC
;
422 if (env
->insn_flags
& ISA_MIPS32R6
) {
423 mask
&= ~((1 << CP0Ca_WP
) & val
);
426 env
->CP0_Cause
= (env
->CP0_Cause
& ~mask
) | (val
& mask
);
428 if ((old
^ env
->CP0_Cause
) & (1 << CP0Ca_DC
)) {
429 if (env
->CP0_Cause
& (1 << CP0Ca_DC
)) {
430 cpu_mips_stop_count(env
);
432 cpu_mips_start_count(env
);
436 /* Set/reset software interrupts */
437 for (i
= 0 ; i
< 2 ; i
++) {
438 if ((old
^ env
->CP0_Cause
) & (1 << (CP0Ca_IP
+ i
))) {
439 cpu_mips_soft_irq(env
, i
, env
->CP0_Cause
& (1 << (CP0Ca_IP
+ i
)));
445 static void raise_mmu_exception(CPUMIPSState
*env
, target_ulong address
,
446 int rw
, int tlb_error
)
448 CPUState
*cs
= CPU(mips_env_get_cpu(env
));
449 int exception
= 0, error_code
= 0;
451 if (rw
== MMU_INST_FETCH
) {
452 error_code
|= EXCP_INST_NOTAVAIL
;
458 /* Reference to kernel address from user mode or supervisor mode */
459 /* Reference to supervisor address from user mode */
460 if (rw
== MMU_DATA_STORE
) {
461 exception
= EXCP_AdES
;
463 exception
= EXCP_AdEL
;
467 /* No TLB match for a mapped address */
468 if (rw
== MMU_DATA_STORE
) {
469 exception
= EXCP_TLBS
;
471 exception
= EXCP_TLBL
;
473 error_code
|= EXCP_TLB_NOMATCH
;
476 /* TLB match with no valid bit */
477 if (rw
== MMU_DATA_STORE
) {
478 exception
= EXCP_TLBS
;
480 exception
= EXCP_TLBL
;
484 /* TLB match but 'D' bit is cleared */
485 exception
= EXCP_LTLBL
;
488 /* Execute-Inhibit Exception */
489 if (env
->CP0_PageGrain
& (1 << CP0PG_IEC
)) {
490 exception
= EXCP_TLBXI
;
492 exception
= EXCP_TLBL
;
496 /* Read-Inhibit Exception */
497 if (env
->CP0_PageGrain
& (1 << CP0PG_IEC
)) {
498 exception
= EXCP_TLBRI
;
500 exception
= EXCP_TLBL
;
504 /* Raise exception */
505 env
->CP0_BadVAddr
= address
;
506 env
->CP0_Context
= (env
->CP0_Context
& ~0x007fffff) |
507 ((address
>> 9) & 0x007ffff0);
508 env
->CP0_EntryHi
= (env
->CP0_EntryHi
& env
->CP0_EntryHi_ASID_mask
) |
509 (env
->CP0_EntryHi
& (1 << CP0EnHi_EHINV
)) |
510 (address
& (TARGET_PAGE_MASK
<< 1));
511 #if defined(TARGET_MIPS64)
512 env
->CP0_EntryHi
&= env
->SEGMask
;
514 /* PTEBase */ (env
->CP0_XContext
& ((~0ULL) << (env
->SEGBITS
- 7))) |
515 /* R */ (extract64(address
, 62, 2) << (env
->SEGBITS
- 9)) |
516 /* BadVPN2 */ (extract64(address
, 13, env
->SEGBITS
- 13) << 4);
518 cs
->exception_index
= exception
;
519 env
->error_code
= error_code
;
522 #if !defined(CONFIG_USER_ONLY)
523 hwaddr
mips_cpu_get_phys_page_debug(CPUState
*cs
, vaddr addr
)
525 MIPSCPU
*cpu
= MIPS_CPU(cs
);
526 CPUMIPSState
*env
= &cpu
->env
;
530 if (get_physical_address(env
, &phys_addr
, &prot
, addr
, 0, ACCESS_INT
,
531 cpu_mmu_index(env
, false)) != 0) {
538 int mips_cpu_handle_mmu_fault(CPUState
*cs
, vaddr address
, int size
, int rw
,
541 MIPSCPU
*cpu
= MIPS_CPU(cs
);
542 CPUMIPSState
*env
= &cpu
->env
;
543 #if !defined(CONFIG_USER_ONLY)
551 log_cpu_state(cs
, 0);
553 qemu_log_mask(CPU_LOG_MMU
,
554 "%s pc " TARGET_FMT_lx
" ad %" VADDR_PRIx
" rw %d mmu_idx %d\n",
555 __func__
, env
->active_tc
.PC
, address
, rw
, mmu_idx
);
558 #if !defined(CONFIG_USER_ONLY)
559 /* XXX: put correct access by using cpu_restore_state()
561 access_type
= ACCESS_INT
;
562 ret
= get_physical_address(env
, &physical
, &prot
,
563 address
, rw
, access_type
, mmu_idx
);
566 qemu_log_mask(CPU_LOG_MMU
,
567 "%s address=%" VADDR_PRIx
" physical " TARGET_FMT_plx
568 " prot %d\n", __func__
, address
, physical
, prot
);
571 qemu_log_mask(CPU_LOG_MMU
,
572 "%s address=%" VADDR_PRIx
" ret %d\n", __func__
, address
,
576 if (ret
== TLBRET_MATCH
) {
577 tlb_set_page(cs
, address
& TARGET_PAGE_MASK
,
578 physical
& TARGET_PAGE_MASK
, prot
| PAGE_EXEC
,
579 mmu_idx
, TARGET_PAGE_SIZE
);
584 raise_mmu_exception(env
, address
, rw
, ret
);
591 #if !defined(CONFIG_USER_ONLY)
592 hwaddr
cpu_mips_translate_address(CPUMIPSState
*env
, target_ulong address
, int rw
)
600 access_type
= ACCESS_INT
;
601 ret
= get_physical_address(env
, &physical
, &prot
, address
, rw
, access_type
,
602 cpu_mmu_index(env
, false));
603 if (ret
!= TLBRET_MATCH
) {
604 raise_mmu_exception(env
, address
, rw
, ret
);
611 static const char * const excp_names
[EXCP_LAST
+ 1] = {
612 [EXCP_RESET
] = "reset",
613 [EXCP_SRESET
] = "soft reset",
614 [EXCP_DSS
] = "debug single step",
615 [EXCP_DINT
] = "debug interrupt",
616 [EXCP_NMI
] = "non-maskable interrupt",
617 [EXCP_MCHECK
] = "machine check",
618 [EXCP_EXT_INTERRUPT
] = "interrupt",
619 [EXCP_DFWATCH
] = "deferred watchpoint",
620 [EXCP_DIB
] = "debug instruction breakpoint",
621 [EXCP_IWATCH
] = "instruction fetch watchpoint",
622 [EXCP_AdEL
] = "address error load",
623 [EXCP_AdES
] = "address error store",
624 [EXCP_TLBF
] = "TLB refill",
625 [EXCP_IBE
] = "instruction bus error",
626 [EXCP_DBp
] = "debug breakpoint",
627 [EXCP_SYSCALL
] = "syscall",
628 [EXCP_BREAK
] = "break",
629 [EXCP_CpU
] = "coprocessor unusable",
630 [EXCP_RI
] = "reserved instruction",
631 [EXCP_OVERFLOW
] = "arithmetic overflow",
632 [EXCP_TRAP
] = "trap",
633 [EXCP_FPE
] = "floating point",
634 [EXCP_DDBS
] = "debug data break store",
635 [EXCP_DWATCH
] = "data watchpoint",
636 [EXCP_LTLBL
] = "TLB modify",
637 [EXCP_TLBL
] = "TLB load",
638 [EXCP_TLBS
] = "TLB store",
639 [EXCP_DBE
] = "data bus error",
640 [EXCP_DDBL
] = "debug data break load",
641 [EXCP_THREAD
] = "thread",
642 [EXCP_MDMX
] = "MDMX",
643 [EXCP_C2E
] = "precise coprocessor 2",
644 [EXCP_CACHE
] = "cache error",
645 [EXCP_TLBXI
] = "TLB execute-inhibit",
646 [EXCP_TLBRI
] = "TLB read-inhibit",
647 [EXCP_MSADIS
] = "MSA disabled",
648 [EXCP_MSAFPE
] = "MSA floating point",
652 target_ulong
exception_resume_pc (CPUMIPSState
*env
)
655 target_ulong isa_mode
;
657 isa_mode
= !!(env
->hflags
& MIPS_HFLAG_M16
);
658 bad_pc
= env
->active_tc
.PC
| isa_mode
;
659 if (env
->hflags
& MIPS_HFLAG_BMASK
) {
660 /* If the exception was raised from a delay slot, come back to
662 bad_pc
-= (env
->hflags
& MIPS_HFLAG_B16
? 2 : 4);
668 #if !defined(CONFIG_USER_ONLY)
669 static void set_hflags_for_handler (CPUMIPSState
*env
)
671 /* Exception handlers are entered in 32-bit mode. */
672 env
->hflags
&= ~(MIPS_HFLAG_M16
);
673 /* ...except that microMIPS lets you choose. */
674 if (env
->insn_flags
& ASE_MICROMIPS
) {
675 env
->hflags
|= (!!(env
->CP0_Config3
676 & (1 << CP0C3_ISA_ON_EXC
))
677 << MIPS_HFLAG_M16_SHIFT
);
681 static inline void set_badinstr_registers(CPUMIPSState
*env
)
683 if (env
->hflags
& MIPS_HFLAG_M16
) {
684 /* TODO: add BadInstr support for microMIPS */
687 if (env
->CP0_Config3
& (1 << CP0C3_BI
)) {
688 env
->CP0_BadInstr
= cpu_ldl_code(env
, env
->active_tc
.PC
);
690 if ((env
->CP0_Config3
& (1 << CP0C3_BP
)) &&
691 (env
->hflags
& MIPS_HFLAG_BMASK
)) {
692 env
->CP0_BadInstrP
= cpu_ldl_code(env
, env
->active_tc
.PC
- 4);
697 void mips_cpu_do_interrupt(CPUState
*cs
)
699 #if !defined(CONFIG_USER_ONLY)
700 MIPSCPU
*cpu
= MIPS_CPU(cs
);
701 CPUMIPSState
*env
= &cpu
->env
;
702 bool update_badinstr
= 0;
707 if (qemu_loglevel_mask(CPU_LOG_INT
)
708 && cs
->exception_index
!= EXCP_EXT_INTERRUPT
) {
709 if (cs
->exception_index
< 0 || cs
->exception_index
> EXCP_LAST
) {
712 name
= excp_names
[cs
->exception_index
];
715 qemu_log("%s enter: PC " TARGET_FMT_lx
" EPC " TARGET_FMT_lx
717 __func__
, env
->active_tc
.PC
, env
->CP0_EPC
, name
);
719 if (cs
->exception_index
== EXCP_EXT_INTERRUPT
&&
720 (env
->hflags
& MIPS_HFLAG_DM
)) {
721 cs
->exception_index
= EXCP_DINT
;
724 switch (cs
->exception_index
) {
726 env
->CP0_Debug
|= 1 << CP0DB_DSS
;
727 /* Debug single step cannot be raised inside a delay slot and
728 resume will always occur on the next instruction
729 (but we assume the pc has always been updated during
730 code translation). */
731 env
->CP0_DEPC
= env
->active_tc
.PC
| !!(env
->hflags
& MIPS_HFLAG_M16
);
732 goto enter_debug_mode
;
734 env
->CP0_Debug
|= 1 << CP0DB_DINT
;
737 env
->CP0_Debug
|= 1 << CP0DB_DIB
;
740 env
->CP0_Debug
|= 1 << CP0DB_DBp
;
741 /* Setup DExcCode - SDBBP instruction */
742 env
->CP0_Debug
= (env
->CP0_Debug
& ~(0x1fULL
<< CP0DB_DEC
)) | 9 << CP0DB_DEC
;
745 env
->CP0_Debug
|= 1 << CP0DB_DDBS
;
748 env
->CP0_Debug
|= 1 << CP0DB_DDBL
;
750 env
->CP0_DEPC
= exception_resume_pc(env
);
751 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
753 if (env
->insn_flags
& ISA_MIPS3
) {
754 env
->hflags
|= MIPS_HFLAG_64
;
755 if (!(env
->insn_flags
& ISA_MIPS64R6
) ||
756 env
->CP0_Status
& (1 << CP0St_KX
)) {
757 env
->hflags
&= ~MIPS_HFLAG_AWRAP
;
760 env
->hflags
|= MIPS_HFLAG_DM
| MIPS_HFLAG_CP0
;
761 env
->hflags
&= ~(MIPS_HFLAG_KSU
);
762 /* EJTAG probe trap enable is not implemented... */
763 if (!(env
->CP0_Status
& (1 << CP0St_EXL
)))
764 env
->CP0_Cause
&= ~(1U << CP0Ca_BD
);
765 env
->active_tc
.PC
= env
->exception_base
+ 0x480;
766 set_hflags_for_handler(env
);
772 env
->CP0_Status
|= (1 << CP0St_SR
);
773 memset(env
->CP0_WatchLo
, 0, sizeof(env
->CP0_WatchLo
));
776 env
->CP0_Status
|= (1 << CP0St_NMI
);
778 env
->CP0_ErrorEPC
= exception_resume_pc(env
);
779 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
780 env
->CP0_Status
|= (1 << CP0St_ERL
) | (1 << CP0St_BEV
);
781 if (env
->insn_flags
& ISA_MIPS3
) {
782 env
->hflags
|= MIPS_HFLAG_64
;
783 if (!(env
->insn_flags
& ISA_MIPS64R6
) ||
784 env
->CP0_Status
& (1 << CP0St_KX
)) {
785 env
->hflags
&= ~MIPS_HFLAG_AWRAP
;
788 env
->hflags
|= MIPS_HFLAG_CP0
;
789 env
->hflags
&= ~(MIPS_HFLAG_KSU
);
790 if (!(env
->CP0_Status
& (1 << CP0St_EXL
)))
791 env
->CP0_Cause
&= ~(1U << CP0Ca_BD
);
792 env
->active_tc
.PC
= env
->exception_base
;
793 set_hflags_for_handler(env
);
795 case EXCP_EXT_INTERRUPT
:
797 if (env
->CP0_Cause
& (1 << CP0Ca_IV
)) {
798 uint32_t spacing
= (env
->CP0_IntCtl
>> CP0IntCtl_VS
) & 0x1f;
800 if ((env
->CP0_Status
& (1 << CP0St_BEV
)) || spacing
== 0) {
804 uint32_t pending
= (env
->CP0_Cause
& CP0Ca_IP_mask
) >> CP0Ca_IP
;
806 if (env
->CP0_Config3
& (1 << CP0C3_VEIC
)) {
807 /* For VEIC mode, the external interrupt controller feeds
808 * the vector through the CP0Cause IP lines. */
811 /* Vectored Interrupts
812 * Mask with Status.IM7-IM0 to get enabled interrupts. */
813 pending
&= (env
->CP0_Status
>> CP0St_IM
) & 0xff;
814 /* Find the highest-priority interrupt. */
815 while (pending
>>= 1) {
819 offset
= 0x200 + (vector
* (spacing
<< 5));
825 update_badinstr
= !(env
->error_code
& EXCP_INST_NOTAVAIL
);
829 update_badinstr
= !(env
->error_code
& EXCP_INST_NOTAVAIL
);
830 if ((env
->error_code
& EXCP_TLB_NOMATCH
) &&
831 !(env
->CP0_Status
& (1 << CP0St_EXL
))) {
832 #if defined(TARGET_MIPS64)
833 int R
= env
->CP0_BadVAddr
>> 62;
834 int UX
= (env
->CP0_Status
& (1 << CP0St_UX
)) != 0;
835 int KX
= (env
->CP0_Status
& (1 << CP0St_KX
)) != 0;
837 if ((R
!= 0 || UX
) && (R
!= 3 || KX
) &&
838 (!(env
->insn_flags
& (INSN_LOONGSON2E
| INSN_LOONGSON2F
)))) {
843 #if defined(TARGET_MIPS64)
851 if ((env
->error_code
& EXCP_TLB_NOMATCH
) &&
852 !(env
->CP0_Status
& (1 << CP0St_EXL
))) {
853 #if defined(TARGET_MIPS64)
854 int R
= env
->CP0_BadVAddr
>> 62;
855 int UX
= (env
->CP0_Status
& (1 << CP0St_UX
)) != 0;
856 int KX
= (env
->CP0_Status
& (1 << CP0St_KX
)) != 0;
858 if ((R
!= 0 || UX
) && (R
!= 3 || KX
) &&
859 (!(env
->insn_flags
& (INSN_LOONGSON2E
| INSN_LOONGSON2F
)))) {
864 #if defined(TARGET_MIPS64)
871 update_badinstr
= !(env
->error_code
& EXCP_INST_NOTAVAIL
);
898 env
->CP0_Cause
= (env
->CP0_Cause
& ~(0x3 << CP0Ca_CE
)) |
899 (env
->error_code
<< CP0Ca_CE
);
936 /* XXX: TODO: manage deferred watch exceptions */
951 if (!(env
->CP0_Status
& (1 << CP0St_EXL
))) {
952 env
->CP0_EPC
= exception_resume_pc(env
);
953 if (update_badinstr
) {
954 set_badinstr_registers(env
);
956 if (env
->hflags
& MIPS_HFLAG_BMASK
) {
957 env
->CP0_Cause
|= (1U << CP0Ca_BD
);
959 env
->CP0_Cause
&= ~(1U << CP0Ca_BD
);
961 env
->CP0_Status
|= (1 << CP0St_EXL
);
962 if (env
->insn_flags
& ISA_MIPS3
) {
963 env
->hflags
|= MIPS_HFLAG_64
;
964 if (!(env
->insn_flags
& ISA_MIPS64R6
) ||
965 env
->CP0_Status
& (1 << CP0St_KX
)) {
966 env
->hflags
&= ~MIPS_HFLAG_AWRAP
;
969 env
->hflags
|= MIPS_HFLAG_CP0
;
970 env
->hflags
&= ~(MIPS_HFLAG_KSU
);
972 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
973 if (env
->CP0_Status
& (1 << CP0St_BEV
)) {
974 env
->active_tc
.PC
= env
->exception_base
+ 0x200;
975 } else if (cause
== 30 && !(env
->CP0_Config3
& (1 << CP0C3_SC
) &&
976 env
->CP0_Config5
& (1 << CP0C5_CV
))) {
977 /* Force KSeg1 for cache errors */
978 env
->active_tc
.PC
= KSEG1_BASE
| (env
->CP0_EBase
& 0x1FFFF000);
980 env
->active_tc
.PC
= env
->CP0_EBase
& ~0xfff;
983 env
->active_tc
.PC
+= offset
;
984 set_hflags_for_handler(env
);
985 env
->CP0_Cause
= (env
->CP0_Cause
& ~(0x1f << CP0Ca_EC
)) | (cause
<< CP0Ca_EC
);
990 if (qemu_loglevel_mask(CPU_LOG_INT
)
991 && cs
->exception_index
!= EXCP_EXT_INTERRUPT
) {
992 qemu_log("%s: PC " TARGET_FMT_lx
" EPC " TARGET_FMT_lx
" cause %d\n"
993 " S %08x C %08x A " TARGET_FMT_lx
" D " TARGET_FMT_lx
"\n",
994 __func__
, env
->active_tc
.PC
, env
->CP0_EPC
, cause
,
995 env
->CP0_Status
, env
->CP0_Cause
, env
->CP0_BadVAddr
,
999 cs
->exception_index
= EXCP_NONE
;
1002 bool mips_cpu_exec_interrupt(CPUState
*cs
, int interrupt_request
)
1004 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
1005 MIPSCPU
*cpu
= MIPS_CPU(cs
);
1006 CPUMIPSState
*env
= &cpu
->env
;
1008 if (cpu_mips_hw_interrupts_enabled(env
) &&
1009 cpu_mips_hw_interrupts_pending(env
)) {
1011 cs
->exception_index
= EXCP_EXT_INTERRUPT
;
1012 env
->error_code
= 0;
1013 mips_cpu_do_interrupt(cs
);
1020 #if !defined(CONFIG_USER_ONLY)
1021 void r4k_invalidate_tlb (CPUMIPSState
*env
, int idx
, int use_extra
)
1023 MIPSCPU
*cpu
= mips_env_get_cpu(env
);
1028 uint16_t ASID
= env
->CP0_EntryHi
& env
->CP0_EntryHi_ASID_mask
;
1031 tlb
= &env
->tlb
->mmu
.r4k
.tlb
[idx
];
1032 /* The qemu TLB is flushed when the ASID changes, so no need to
1033 flush these entries again. */
1034 if (tlb
->G
== 0 && tlb
->ASID
!= ASID
) {
1038 if (use_extra
&& env
->tlb
->tlb_in_use
< MIPS_TLB_MAX
) {
1039 /* For tlbwr, we can shadow the discarded entry into
1040 a new (fake) TLB entry, as long as the guest can not
1041 tell that it's there. */
1042 env
->tlb
->mmu
.r4k
.tlb
[env
->tlb
->tlb_in_use
] = *tlb
;
1043 env
->tlb
->tlb_in_use
++;
1047 /* 1k pages are not supported. */
1048 mask
= tlb
->PageMask
| ~(TARGET_PAGE_MASK
<< 1);
1051 addr
= tlb
->VPN
& ~mask
;
1052 #if defined(TARGET_MIPS64)
1053 if (addr
>= (0xFFFFFFFF80000000ULL
& env
->SEGMask
)) {
1054 addr
|= 0x3FFFFF0000000000ULL
;
1057 end
= addr
| (mask
>> 1);
1058 while (addr
< end
) {
1059 tlb_flush_page(cs
, addr
);
1060 addr
+= TARGET_PAGE_SIZE
;
1065 addr
= (tlb
->VPN
& ~mask
) | ((mask
>> 1) + 1);
1066 #if defined(TARGET_MIPS64)
1067 if (addr
>= (0xFFFFFFFF80000000ULL
& env
->SEGMask
)) {
1068 addr
|= 0x3FFFFF0000000000ULL
;
1072 while (addr
- 1 < end
) {
1073 tlb_flush_page(cs
, addr
);
1074 addr
+= TARGET_PAGE_SIZE
;
1080 void QEMU_NORETURN
do_raise_exception_err(CPUMIPSState
*env
,
1085 CPUState
*cs
= CPU(mips_env_get_cpu(env
));
1087 if (exception
< EXCP_SC
) {
1088 qemu_log_mask(CPU_LOG_INT
, "%s: %d %d\n",
1089 __func__
, exception
, error_code
);
1091 cs
->exception_index
= exception
;
1092 env
->error_code
= error_code
;
1094 cpu_loop_exit_restore(cs
, pc
);