2 * MIPS emulation helpers for qemu.
4 * Copyright (c) 2004-2005 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
23 #include "exec/exec-all.h"
24 #include "exec/cpu_ldst.h"
26 #include "hw/mips/cpudevs.h"
38 #if !defined(CONFIG_USER_ONLY)
40 /* no MMU emulation */
41 int no_mmu_map_address (CPUMIPSState
*env
, hwaddr
*physical
, int *prot
,
42 target_ulong address
, int rw
, int access_type
)
45 *prot
= PAGE_READ
| PAGE_WRITE
;
49 /* fixed mapping MMU emulation */
50 int fixed_mmu_map_address (CPUMIPSState
*env
, hwaddr
*physical
, int *prot
,
51 target_ulong address
, int rw
, int access_type
)
53 if (address
<= (int32_t)0x7FFFFFFFUL
) {
54 if (!(env
->CP0_Status
& (1 << CP0St_ERL
)))
55 *physical
= address
+ 0x40000000UL
;
58 } else if (address
<= (int32_t)0xBFFFFFFFUL
)
59 *physical
= address
& 0x1FFFFFFF;
63 *prot
= PAGE_READ
| PAGE_WRITE
;
67 /* MIPS32/MIPS64 R4000-style MMU emulation */
68 int r4k_map_address (CPUMIPSState
*env
, hwaddr
*physical
, int *prot
,
69 target_ulong address
, int rw
, int access_type
)
71 uint16_t ASID
= env
->CP0_EntryHi
& env
->CP0_EntryHi_ASID_mask
;
74 for (i
= 0; i
< env
->tlb
->tlb_in_use
; i
++) {
75 r4k_tlb_t
*tlb
= &env
->tlb
->mmu
.r4k
.tlb
[i
];
76 /* 1k pages are not supported. */
77 target_ulong mask
= tlb
->PageMask
| ~(TARGET_PAGE_MASK
<< 1);
78 target_ulong tag
= address
& ~mask
;
79 target_ulong VPN
= tlb
->VPN
& ~mask
;
80 #if defined(TARGET_MIPS64)
84 /* Check ASID, virtual page number & size */
85 if ((tlb
->G
== 1 || tlb
->ASID
== ASID
) && VPN
== tag
&& !tlb
->EHINV
) {
87 int n
= !!(address
& mask
& ~(mask
>> 1));
88 /* Check access rights */
89 if (!(n
? tlb
->V1
: tlb
->V0
)) {
90 return TLBRET_INVALID
;
92 if (rw
== MMU_INST_FETCH
&& (n
? tlb
->XI1
: tlb
->XI0
)) {
95 if (rw
== MMU_DATA_LOAD
&& (n
? tlb
->RI1
: tlb
->RI0
)) {
98 if (rw
!= MMU_DATA_STORE
|| (n
? tlb
->D1
: tlb
->D0
)) {
99 *physical
= tlb
->PFN
[n
] | (address
& (mask
>> 1));
101 if (n
? tlb
->D1
: tlb
->D0
)
108 return TLBRET_NOMATCH
;
111 static int is_seg_am_mapped(unsigned int am
, bool eu
, int mmu_idx
)
114 * Interpret access control mode and mmu_idx.
117 * UK 0 0 1 1 0 0 - - 0
118 * MK 1 0 1 1 0 1 - - !eu
119 * MSK 2 0 0 1 0 1 1 - !eu
120 * MUSK 3 0 0 0 0 1 1 1 !eu
121 * MUSUK 4 0 0 0 0 0 1 1 0
122 * USK 5 0 0 1 0 0 0 - 0
123 * - 6 - - - - - - - -
124 * UUSK 7 0 0 0 0 0 0 0 0
130 /* If EU is set, always unmapped */
136 /* Never AdE, TLB mapped if AM={1,2,3} */
137 adetlb_mask
= 0x70000000;
141 /* AdE if AM={0,1}, TLB mapped if AM={2,3,4} */
142 adetlb_mask
= 0xc0380000;
146 /* AdE if AM={0,1,2,5}, TLB mapped if AM={3,4} */
147 adetlb_mask
= 0xe4180000;
150 /* does this AM cause AdE in current execution mode */
151 if ((adetlb_mask
<< am
) < 0) {
152 return TLBRET_BADADDR
;
157 /* is this AM mapped in current execution mode */
158 return ((adetlb_mask
<< am
) < 0);
161 return TLBRET_BADADDR
;
165 static int get_seg_physical_address(CPUMIPSState
*env
, hwaddr
*physical
,
166 int *prot
, target_ulong real_address
,
167 int rw
, int access_type
, int mmu_idx
,
168 unsigned int am
, bool eu
,
169 target_ulong segmask
,
170 hwaddr physical_base
)
172 int mapped
= is_seg_am_mapped(am
, eu
, mmu_idx
);
175 /* is_seg_am_mapped can report TLBRET_BADADDR */
178 /* The segment is TLB mapped */
179 return env
->tlb
->map_address(env
, physical
, prot
, real_address
, rw
,
182 /* The segment is unmapped */
183 *physical
= physical_base
| (real_address
& segmask
);
184 *prot
= PAGE_READ
| PAGE_WRITE
;
189 static int get_segctl_physical_address(CPUMIPSState
*env
, hwaddr
*physical
,
190 int *prot
, target_ulong real_address
,
191 int rw
, int access_type
, int mmu_idx
,
192 uint16_t segctl
, target_ulong segmask
)
194 unsigned int am
= (segctl
& CP0SC_AM_MASK
) >> CP0SC_AM
;
195 bool eu
= (segctl
>> CP0SC_EU
) & 1;
196 hwaddr pa
= ((hwaddr
)segctl
& CP0SC_PA_MASK
) << 20;
198 return get_seg_physical_address(env
, physical
, prot
, real_address
, rw
,
199 access_type
, mmu_idx
, am
, eu
, segmask
,
200 pa
& ~(hwaddr
)segmask
);
203 static int get_physical_address (CPUMIPSState
*env
, hwaddr
*physical
,
204 int *prot
, target_ulong real_address
,
205 int rw
, int access_type
, int mmu_idx
)
207 /* User mode can only access useg/xuseg */
208 #if defined(TARGET_MIPS64)
209 int user_mode
= mmu_idx
== MIPS_HFLAG_UM
;
210 int supervisor_mode
= mmu_idx
== MIPS_HFLAG_SM
;
211 int kernel_mode
= !user_mode
&& !supervisor_mode
;
212 int UX
= (env
->CP0_Status
& (1 << CP0St_UX
)) != 0;
213 int SX
= (env
->CP0_Status
& (1 << CP0St_SX
)) != 0;
214 int KX
= (env
->CP0_Status
& (1 << CP0St_KX
)) != 0;
216 int ret
= TLBRET_MATCH
;
217 /* effective address (modified for KVM T&E kernel segments) */
218 target_ulong address
= real_address
;
220 #define USEG_LIMIT ((target_ulong)(int32_t)0x7FFFFFFFUL)
221 #define KSEG0_BASE ((target_ulong)(int32_t)0x80000000UL)
222 #define KSEG1_BASE ((target_ulong)(int32_t)0xA0000000UL)
223 #define KSEG2_BASE ((target_ulong)(int32_t)0xC0000000UL)
224 #define KSEG3_BASE ((target_ulong)(int32_t)0xE0000000UL)
226 #define KVM_KSEG0_BASE ((target_ulong)(int32_t)0x40000000UL)
227 #define KVM_KSEG2_BASE ((target_ulong)(int32_t)0x60000000UL)
229 if (mips_um_ksegs_enabled()) {
230 /* KVM T&E adds guest kernel segments in useg */
231 if (real_address
>= KVM_KSEG0_BASE
) {
232 if (real_address
< KVM_KSEG2_BASE
) {
234 address
+= KSEG0_BASE
- KVM_KSEG0_BASE
;
235 } else if (real_address
<= USEG_LIMIT
) {
237 address
+= KSEG2_BASE
- KVM_KSEG2_BASE
;
242 if (address
<= USEG_LIMIT
) {
246 if (address
>= 0x40000000UL
) {
247 segctl
= env
->CP0_SegCtl2
;
249 segctl
= env
->CP0_SegCtl2
>> 16;
251 ret
= get_segctl_physical_address(env
, physical
, prot
, real_address
, rw
,
252 access_type
, mmu_idx
, segctl
,
254 #if defined(TARGET_MIPS64)
255 } else if (address
< 0x4000000000000000ULL
) {
257 if (UX
&& address
<= (0x3FFFFFFFFFFFFFFFULL
& env
->SEGMask
)) {
258 ret
= env
->tlb
->map_address(env
, physical
, prot
, real_address
, rw
, access_type
);
260 ret
= TLBRET_BADADDR
;
262 } else if (address
< 0x8000000000000000ULL
) {
264 if ((supervisor_mode
|| kernel_mode
) &&
265 SX
&& address
<= (0x7FFFFFFFFFFFFFFFULL
& env
->SEGMask
)) {
266 ret
= env
->tlb
->map_address(env
, physical
, prot
, real_address
, rw
, access_type
);
268 ret
= TLBRET_BADADDR
;
270 } else if (address
< 0xC000000000000000ULL
) {
272 if ((address
& 0x07FFFFFFFFFFFFFFULL
) <= env
->PAMask
) {
273 /* KX/SX/UX bit to check for each xkphys EVA access mode */
274 static const uint8_t am_ksux
[8] = {
275 [CP0SC_AM_UK
] = (1u << CP0St_KX
),
276 [CP0SC_AM_MK
] = (1u << CP0St_KX
),
277 [CP0SC_AM_MSK
] = (1u << CP0St_SX
),
278 [CP0SC_AM_MUSK
] = (1u << CP0St_UX
),
279 [CP0SC_AM_MUSUK
] = (1u << CP0St_UX
),
280 [CP0SC_AM_USK
] = (1u << CP0St_SX
),
281 [6] = (1u << CP0St_KX
),
282 [CP0SC_AM_UUSK
] = (1u << CP0St_UX
),
284 unsigned int am
= CP0SC_AM_UK
;
285 unsigned int xr
= (env
->CP0_SegCtl2
& CP0SC2_XR_MASK
) >> CP0SC2_XR
;
287 if (xr
& (1 << ((address
>> 59) & 0x7))) {
288 am
= (env
->CP0_SegCtl1
& CP0SC1_XAM_MASK
) >> CP0SC1_XAM
;
290 /* Does CP0_Status.KX/SX/UX permit the access mode (am) */
291 if (env
->CP0_Status
& am_ksux
[am
]) {
292 ret
= get_seg_physical_address(env
, physical
, prot
,
293 real_address
, rw
, access_type
,
294 mmu_idx
, am
, false, env
->PAMask
,
297 ret
= TLBRET_BADADDR
;
300 ret
= TLBRET_BADADDR
;
302 } else if (address
< 0xFFFFFFFF80000000ULL
) {
304 if (kernel_mode
&& KX
&&
305 address
<= (0xFFFFFFFF7FFFFFFFULL
& env
->SEGMask
)) {
306 ret
= env
->tlb
->map_address(env
, physical
, prot
, real_address
, rw
, access_type
);
308 ret
= TLBRET_BADADDR
;
311 } else if (address
< KSEG1_BASE
) {
313 ret
= get_segctl_physical_address(env
, physical
, prot
, real_address
, rw
,
314 access_type
, mmu_idx
,
315 env
->CP0_SegCtl1
>> 16, 0x1FFFFFFF);
316 } else if (address
< KSEG2_BASE
) {
318 ret
= get_segctl_physical_address(env
, physical
, prot
, real_address
, rw
,
319 access_type
, mmu_idx
,
320 env
->CP0_SegCtl1
, 0x1FFFFFFF);
321 } else if (address
< KSEG3_BASE
) {
323 ret
= get_segctl_physical_address(env
, physical
, prot
, real_address
, rw
,
324 access_type
, mmu_idx
,
325 env
->CP0_SegCtl0
>> 16, 0x1FFFFFFF);
328 /* XXX: debug segment is not emulated */
329 ret
= get_segctl_physical_address(env
, physical
, prot
, real_address
, rw
,
330 access_type
, mmu_idx
,
331 env
->CP0_SegCtl0
, 0x1FFFFFFF);
336 void cpu_mips_tlb_flush(CPUMIPSState
*env
)
338 MIPSCPU
*cpu
= mips_env_get_cpu(env
);
340 /* Flush qemu's TLB and discard all shadowed entries. */
342 env
->tlb
->tlb_in_use
= env
->tlb
->nb_tlb
;
345 /* Called for updates to CP0_Status. */
346 void sync_c0_status(CPUMIPSState
*env
, CPUMIPSState
*cpu
, int tc
)
348 int32_t tcstatus
, *tcst
;
349 uint32_t v
= cpu
->CP0_Status
;
350 uint32_t cu
, mx
, asid
, ksu
;
351 uint32_t mask
= ((1 << CP0TCSt_TCU3
)
352 | (1 << CP0TCSt_TCU2
)
353 | (1 << CP0TCSt_TCU1
)
354 | (1 << CP0TCSt_TCU0
)
356 | (3 << CP0TCSt_TKSU
)
357 | (0xff << CP0TCSt_TASID
));
359 cu
= (v
>> CP0St_CU0
) & 0xf;
360 mx
= (v
>> CP0St_MX
) & 0x1;
361 ksu
= (v
>> CP0St_KSU
) & 0x3;
362 asid
= env
->CP0_EntryHi
& env
->CP0_EntryHi_ASID_mask
;
364 tcstatus
= cu
<< CP0TCSt_TCU0
;
365 tcstatus
|= mx
<< CP0TCSt_TMX
;
366 tcstatus
|= ksu
<< CP0TCSt_TKSU
;
369 if (tc
== cpu
->current_tc
) {
370 tcst
= &cpu
->active_tc
.CP0_TCStatus
;
372 tcst
= &cpu
->tcs
[tc
].CP0_TCStatus
;
380 void cpu_mips_store_status(CPUMIPSState
*env
, target_ulong val
)
382 uint32_t mask
= env
->CP0_Status_rw_bitmask
;
383 target_ulong old
= env
->CP0_Status
;
385 if (env
->insn_flags
& ISA_MIPS32R6
) {
386 bool has_supervisor
= extract32(mask
, CP0St_KSU
, 2) == 0x3;
387 #if defined(TARGET_MIPS64)
388 uint32_t ksux
= (1 << CP0St_KX
) & val
;
389 ksux
|= (ksux
>> 1) & val
; /* KX = 0 forces SX to be 0 */
390 ksux
|= (ksux
>> 1) & val
; /* SX = 0 forces UX to be 0 */
391 val
= (val
& ~(7 << CP0St_UX
)) | ksux
;
393 if (has_supervisor
&& extract32(val
, CP0St_KSU
, 2) == 0x3) {
394 mask
&= ~(3 << CP0St_KSU
);
396 mask
&= ~(((1 << CP0St_SR
) | (1 << CP0St_NMI
)) & val
);
399 env
->CP0_Status
= (old
& ~mask
) | (val
& mask
);
400 #if defined(TARGET_MIPS64)
401 if ((env
->CP0_Status
^ old
) & (old
& (7 << CP0St_UX
))) {
402 /* Access to at least one of the 64-bit segments has been disabled */
403 tlb_flush(CPU(mips_env_get_cpu(env
)));
406 if (env
->CP0_Config3
& (1 << CP0C3_MT
)) {
407 sync_c0_status(env
, env
, env
->current_tc
);
413 void cpu_mips_store_cause(CPUMIPSState
*env
, target_ulong val
)
415 uint32_t mask
= 0x00C00300;
416 uint32_t old
= env
->CP0_Cause
;
419 if (env
->insn_flags
& ISA_MIPS32R2
) {
420 mask
|= 1 << CP0Ca_DC
;
422 if (env
->insn_flags
& ISA_MIPS32R6
) {
423 mask
&= ~((1 << CP0Ca_WP
) & val
);
426 env
->CP0_Cause
= (env
->CP0_Cause
& ~mask
) | (val
& mask
);
428 if ((old
^ env
->CP0_Cause
) & (1 << CP0Ca_DC
)) {
429 if (env
->CP0_Cause
& (1 << CP0Ca_DC
)) {
430 cpu_mips_stop_count(env
);
432 cpu_mips_start_count(env
);
436 /* Set/reset software interrupts */
437 for (i
= 0 ; i
< 2 ; i
++) {
438 if ((old
^ env
->CP0_Cause
) & (1 << (CP0Ca_IP
+ i
))) {
439 cpu_mips_soft_irq(env
, i
, env
->CP0_Cause
& (1 << (CP0Ca_IP
+ i
)));
445 static void raise_mmu_exception(CPUMIPSState
*env
, target_ulong address
,
446 int rw
, int tlb_error
)
448 CPUState
*cs
= CPU(mips_env_get_cpu(env
));
449 int exception
= 0, error_code
= 0;
451 if (rw
== MMU_INST_FETCH
) {
452 error_code
|= EXCP_INST_NOTAVAIL
;
458 /* Reference to kernel address from user mode or supervisor mode */
459 /* Reference to supervisor address from user mode */
460 if (rw
== MMU_DATA_STORE
) {
461 exception
= EXCP_AdES
;
463 exception
= EXCP_AdEL
;
467 /* No TLB match for a mapped address */
468 if (rw
== MMU_DATA_STORE
) {
469 exception
= EXCP_TLBS
;
471 exception
= EXCP_TLBL
;
473 error_code
|= EXCP_TLB_NOMATCH
;
476 /* TLB match with no valid bit */
477 if (rw
== MMU_DATA_STORE
) {
478 exception
= EXCP_TLBS
;
480 exception
= EXCP_TLBL
;
484 /* TLB match but 'D' bit is cleared */
485 exception
= EXCP_LTLBL
;
488 /* Execute-Inhibit Exception */
489 if (env
->CP0_PageGrain
& (1 << CP0PG_IEC
)) {
490 exception
= EXCP_TLBXI
;
492 exception
= EXCP_TLBL
;
496 /* Read-Inhibit Exception */
497 if (env
->CP0_PageGrain
& (1 << CP0PG_IEC
)) {
498 exception
= EXCP_TLBRI
;
500 exception
= EXCP_TLBL
;
504 /* Raise exception */
505 if (!(env
->hflags
& MIPS_HFLAG_DM
)) {
506 env
->CP0_BadVAddr
= address
;
508 env
->CP0_Context
= (env
->CP0_Context
& ~0x007fffff) |
509 ((address
>> 9) & 0x007ffff0);
510 env
->CP0_EntryHi
= (env
->CP0_EntryHi
& env
->CP0_EntryHi_ASID_mask
) |
511 (env
->CP0_EntryHi
& (1 << CP0EnHi_EHINV
)) |
512 (address
& (TARGET_PAGE_MASK
<< 1));
513 #if defined(TARGET_MIPS64)
514 env
->CP0_EntryHi
&= env
->SEGMask
;
516 /* PTEBase */ (env
->CP0_XContext
& ((~0ULL) << (env
->SEGBITS
- 7))) |
517 /* R */ (extract64(address
, 62, 2) << (env
->SEGBITS
- 9)) |
518 /* BadVPN2 */ (extract64(address
, 13, env
->SEGBITS
- 13) << 4);
520 cs
->exception_index
= exception
;
521 env
->error_code
= error_code
;
524 #if !defined(CONFIG_USER_ONLY)
525 hwaddr
mips_cpu_get_phys_page_debug(CPUState
*cs
, vaddr addr
)
527 MIPSCPU
*cpu
= MIPS_CPU(cs
);
528 CPUMIPSState
*env
= &cpu
->env
;
532 if (get_physical_address(env
, &phys_addr
, &prot
, addr
, 0, ACCESS_INT
,
533 cpu_mmu_index(env
, false)) != 0) {
540 #if !defined(CONFIG_USER_ONLY)
541 #if !defined(TARGET_MIPS64)
544 * Perform hardware page table walk
546 * Memory accesses are performed using the KERNEL privilege level.
547 * Synchronous exceptions detected on memory accesses cause a silent exit
548 * from page table walking, resulting in a TLB or XTLB Refill exception.
550 * Implementations are not required to support page table walk memory
551 * accesses from mapped memory regions. When an unsupported access is
552 * attempted, a silent exit is taken, resulting in a TLB or XTLB Refill
555 * Note that if an exception is caused by AddressTranslation or LoadMemory
556 * functions, the exception is not taken, a silent exit is taken,
557 * resulting in a TLB or XTLB Refill exception.
560 static bool get_pte(CPUMIPSState
*env
, uint64_t vaddr
, int entry_size
,
563 if ((vaddr
& ((entry_size
>> 3) - 1)) != 0) {
566 if (entry_size
== 64) {
567 *pte
= cpu_ldq_code(env
, vaddr
);
569 *pte
= cpu_ldl_code(env
, vaddr
);
574 static uint64_t get_tlb_entry_layout(CPUMIPSState
*env
, uint64_t entry
,
575 int entry_size
, int ptei
)
577 uint64_t result
= entry
;
579 if (ptei
> entry_size
) {
582 result
>>= (ptei
- 2);
585 result
|= rixi
<< CP0EnLo_XI
;
589 static int walk_directory(CPUMIPSState
*env
, uint64_t *vaddr
,
590 int directory_index
, bool *huge_page
, bool *hgpg_directory_hit
,
591 uint64_t *pw_entrylo0
, uint64_t *pw_entrylo1
)
593 int dph
= (env
->CP0_PWCtl
>> CP0PC_DPH
) & 0x1;
594 int psn
= (env
->CP0_PWCtl
>> CP0PC_PSN
) & 0x3F;
595 int hugepg
= (env
->CP0_PWCtl
>> CP0PC_HUGEPG
) & 0x1;
596 int pf_ptew
= (env
->CP0_PWField
>> CP0PF_PTEW
) & 0x3F;
597 int ptew
= (env
->CP0_PWSize
>> CP0PS_PTEW
) & 0x3F;
598 int native_shift
= (((env
->CP0_PWSize
>> CP0PS_PS
) & 1) == 0) ? 2 : 3;
599 int directory_shift
= (ptew
> 1) ? -1 :
600 (hugepg
&& (ptew
== 1)) ? native_shift
+ 1 : native_shift
;
601 int leaf_shift
= (ptew
> 1) ? -1 :
602 (ptew
== 1) ? native_shift
+ 1 : native_shift
;
603 uint32_t direntry_size
= 1 << (directory_shift
+ 3);
604 uint32_t leafentry_size
= 1 << (leaf_shift
+ 3);
611 if (get_physical_address(env
, &paddr
, &prot
, *vaddr
, MMU_DATA_LOAD
,
612 ACCESS_INT
, cpu_mmu_index(env
, false)) !=
614 /* wrong base address */
617 if (!get_pte(env
, *vaddr
, direntry_size
, &entry
)) {
621 if ((entry
& (1 << psn
)) && hugepg
) {
623 *hgpg_directory_hit
= true;
624 entry
= get_tlb_entry_layout(env
, entry
, leafentry_size
, pf_ptew
);
625 w
= directory_index
- 1;
626 if (directory_index
& 0x1) {
627 /* Generate adjacent page from same PTE for odd TLB page */
629 *pw_entrylo0
= entry
& ~lsb
; /* even page */
630 *pw_entrylo1
= entry
| lsb
; /* odd page */
632 int oddpagebit
= 1 << leaf_shift
;
633 uint64_t vaddr2
= *vaddr
^ oddpagebit
;
634 if (*vaddr
& oddpagebit
) {
635 *pw_entrylo1
= entry
;
637 *pw_entrylo0
= entry
;
639 if (get_physical_address(env
, &paddr
, &prot
, vaddr2
, MMU_DATA_LOAD
,
640 ACCESS_INT
, cpu_mmu_index(env
, false)) !=
644 if (!get_pte(env
, vaddr2
, leafentry_size
, &entry
)) {
647 entry
= get_tlb_entry_layout(env
, entry
, leafentry_size
, pf_ptew
);
648 if (*vaddr
& oddpagebit
) {
649 *pw_entrylo0
= entry
;
651 *pw_entrylo1
= entry
;
663 static bool page_table_walk_refill(CPUMIPSState
*env
, vaddr address
, int rw
,
666 int gdw
= (env
->CP0_PWSize
>> CP0PS_GDW
) & 0x3F;
667 int udw
= (env
->CP0_PWSize
>> CP0PS_UDW
) & 0x3F;
668 int mdw
= (env
->CP0_PWSize
>> CP0PS_MDW
) & 0x3F;
669 int ptw
= (env
->CP0_PWSize
>> CP0PS_PTW
) & 0x3F;
670 int ptew
= (env
->CP0_PWSize
>> CP0PS_PTEW
) & 0x3F;
673 bool huge_page
= false;
674 bool hgpg_bdhit
= false;
675 bool hgpg_gdhit
= false;
676 bool hgpg_udhit
= false;
677 bool hgpg_mdhit
= false;
679 int32_t pw_pagemask
= 0;
680 target_ulong pw_entryhi
= 0;
681 uint64_t pw_entrylo0
= 0;
682 uint64_t pw_entrylo1
= 0;
684 /* Native pointer size */
685 /*For the 32-bit architectures, this bit is fixed to 0.*/
686 int native_shift
= (((env
->CP0_PWSize
>> CP0PS_PS
) & 1) == 0) ? 2 : 3;
688 /* Indices from PWField */
689 int pf_gdw
= (env
->CP0_PWField
>> CP0PF_GDW
) & 0x3F;
690 int pf_udw
= (env
->CP0_PWField
>> CP0PF_UDW
) & 0x3F;
691 int pf_mdw
= (env
->CP0_PWField
>> CP0PF_MDW
) & 0x3F;
692 int pf_ptw
= (env
->CP0_PWField
>> CP0PF_PTW
) & 0x3F;
693 int pf_ptew
= (env
->CP0_PWField
>> CP0PF_PTEW
) & 0x3F;
695 /* Indices computed from faulting address */
696 int gindex
= (address
>> pf_gdw
) & ((1 << gdw
) - 1);
697 int uindex
= (address
>> pf_udw
) & ((1 << udw
) - 1);
698 int mindex
= (address
>> pf_mdw
) & ((1 << mdw
) - 1);
699 int ptindex
= (address
>> pf_ptw
) & ((1 << ptw
) - 1);
701 /* Other HTW configs */
702 int hugepg
= (env
->CP0_PWCtl
>> CP0PC_HUGEPG
) & 0x1;
704 /* HTW Shift values (depend on entry size) */
705 int directory_shift
= (ptew
> 1) ? -1 :
706 (hugepg
&& (ptew
== 1)) ? native_shift
+ 1 : native_shift
;
707 int leaf_shift
= (ptew
> 1) ? -1 :
708 (ptew
== 1) ? native_shift
+ 1 : native_shift
;
710 /* Offsets into tables */
711 int goffset
= gindex
<< directory_shift
;
712 int uoffset
= uindex
<< directory_shift
;
713 int moffset
= mindex
<< directory_shift
;
714 int ptoffset0
= (ptindex
>> 1) << (leaf_shift
+ 1);
715 int ptoffset1
= ptoffset0
| (1 << (leaf_shift
));
717 uint32_t leafentry_size
= 1 << (leaf_shift
+ 3);
719 /* Starting address - Page Table Base */
720 uint64_t vaddr
= env
->CP0_PWBase
;
727 if (!(env
->CP0_Config3
& (1 << CP0C3_PW
))) {
728 /* walker is unimplemented */
731 if (!(env
->CP0_PWCtl
& (1 << CP0PC_PWEN
))) {
732 /* walker is disabled */
735 if (!(gdw
> 0 || udw
> 0 || mdw
> 0)) {
736 /* no structure to walk */
739 if ((directory_shift
== -1) || (leaf_shift
== -1)) {
743 /* Global Directory */
746 switch (walk_directory(env
, &vaddr
, pf_gdw
, &huge_page
, &hgpg_gdhit
,
747 &pw_entrylo0
, &pw_entrylo1
))
759 /* Upper directory */
762 switch (walk_directory(env
, &vaddr
, pf_udw
, &huge_page
, &hgpg_udhit
,
763 &pw_entrylo0
, &pw_entrylo1
))
775 /* Middle directory */
778 switch (walk_directory(env
, &vaddr
, pf_mdw
, &huge_page
, &hgpg_mdhit
,
779 &pw_entrylo0
, &pw_entrylo1
))
791 /* Leaf Level Page Table - First half of PTE pair */
793 if (get_physical_address(env
, &paddr
, &prot
, vaddr
, MMU_DATA_LOAD
,
794 ACCESS_INT
, cpu_mmu_index(env
, false)) !=
798 if (!get_pte(env
, vaddr
, leafentry_size
, &dir_entry
)) {
801 dir_entry
= get_tlb_entry_layout(env
, dir_entry
, leafentry_size
, pf_ptew
);
802 pw_entrylo0
= dir_entry
;
804 /* Leaf Level Page Table - Second half of PTE pair */
806 if (get_physical_address(env
, &paddr
, &prot
, vaddr
, MMU_DATA_LOAD
,
807 ACCESS_INT
, cpu_mmu_index(env
, false)) !=
811 if (!get_pte(env
, vaddr
, leafentry_size
, &dir_entry
)) {
814 dir_entry
= get_tlb_entry_layout(env
, dir_entry
, leafentry_size
, pf_ptew
);
815 pw_entrylo1
= dir_entry
;
819 m
= (1 << pf_ptw
) - 1;
822 switch (hgpg_bdhit
<< 3 | hgpg_gdhit
<< 2 | hgpg_udhit
<< 1 |
826 m
= (1 << pf_gdw
) - 1;
832 m
= (1 << pf_udw
) - 1;
838 m
= (1 << pf_mdw
) - 1;
845 pw_pagemask
= m
>> 12;
846 update_pagemask(env
, pw_pagemask
<< 13, &pw_pagemask
);
847 pw_entryhi
= (address
& ~0x1fff) | (env
->CP0_EntryHi
& 0xFF);
849 target_ulong tmp_entryhi
= env
->CP0_EntryHi
;
850 int32_t tmp_pagemask
= env
->CP0_PageMask
;
851 uint64_t tmp_entrylo0
= env
->CP0_EntryLo0
;
852 uint64_t tmp_entrylo1
= env
->CP0_EntryLo1
;
854 env
->CP0_EntryHi
= pw_entryhi
;
855 env
->CP0_PageMask
= pw_pagemask
;
856 env
->CP0_EntryLo0
= pw_entrylo0
;
857 env
->CP0_EntryLo1
= pw_entrylo1
;
860 * The hardware page walker inserts a page into the TLB in a manner
861 * identical to a TLBWR instruction as executed by the software refill
864 r4k_helper_tlbwr(env
);
866 env
->CP0_EntryHi
= tmp_entryhi
;
867 env
->CP0_PageMask
= tmp_pagemask
;
868 env
->CP0_EntryLo0
= tmp_entrylo0
;
869 env
->CP0_EntryLo1
= tmp_entrylo1
;
876 int mips_cpu_handle_mmu_fault(CPUState
*cs
, vaddr address
, int size
, int rw
,
879 MIPSCPU
*cpu
= MIPS_CPU(cs
);
880 CPUMIPSState
*env
= &cpu
->env
;
881 #if !defined(CONFIG_USER_ONLY)
889 log_cpu_state(cs
, 0);
891 qemu_log_mask(CPU_LOG_MMU
,
892 "%s pc " TARGET_FMT_lx
" ad %" VADDR_PRIx
" rw %d mmu_idx %d\n",
893 __func__
, env
->active_tc
.PC
, address
, rw
, mmu_idx
);
896 #if !defined(CONFIG_USER_ONLY)
897 /* XXX: put correct access by using cpu_restore_state() correctly */
898 access_type
= ACCESS_INT
;
899 ret
= get_physical_address(env
, &physical
, &prot
,
900 address
, rw
, access_type
, mmu_idx
);
903 qemu_log_mask(CPU_LOG_MMU
,
904 "%s address=%" VADDR_PRIx
" physical " TARGET_FMT_plx
905 " prot %d\n", __func__
, address
, physical
, prot
);
908 qemu_log_mask(CPU_LOG_MMU
,
909 "%s address=%" VADDR_PRIx
" ret %d\n", __func__
, address
,
913 if (ret
== TLBRET_MATCH
) {
914 tlb_set_page(cs
, address
& TARGET_PAGE_MASK
,
915 physical
& TARGET_PAGE_MASK
, prot
| PAGE_EXEC
,
916 mmu_idx
, TARGET_PAGE_SIZE
);
921 #if !defined(CONFIG_USER_ONLY)
922 #if !defined(TARGET_MIPS64)
923 if ((ret
== TLBRET_NOMATCH
) && (env
->tlb
->nb_tlb
> 1)) {
925 * Memory reads during hardware page table walking are performed
926 * as if they were kernel-mode load instructions.
928 int mode
= (env
->hflags
& MIPS_HFLAG_KSU
);
930 env
->hflags
&= ~MIPS_HFLAG_KSU
;
931 ret_walker
= page_table_walk_refill(env
, address
, rw
, mmu_idx
);
934 ret
= get_physical_address(env
, &physical
, &prot
,
935 address
, rw
, access_type
, mmu_idx
);
936 if (ret
== TLBRET_MATCH
) {
937 tlb_set_page(cs
, address
& TARGET_PAGE_MASK
,
938 physical
& TARGET_PAGE_MASK
, prot
| PAGE_EXEC
,
939 mmu_idx
, TARGET_PAGE_SIZE
);
947 raise_mmu_exception(env
, address
, rw
, ret
);
954 #if !defined(CONFIG_USER_ONLY)
955 hwaddr
cpu_mips_translate_address(CPUMIPSState
*env
, target_ulong address
, int rw
)
963 access_type
= ACCESS_INT
;
964 ret
= get_physical_address(env
, &physical
, &prot
, address
, rw
, access_type
,
965 cpu_mmu_index(env
, false));
966 if (ret
!= TLBRET_MATCH
) {
967 raise_mmu_exception(env
, address
, rw
, ret
);
974 static const char * const excp_names
[EXCP_LAST
+ 1] = {
975 [EXCP_RESET
] = "reset",
976 [EXCP_SRESET
] = "soft reset",
977 [EXCP_DSS
] = "debug single step",
978 [EXCP_DINT
] = "debug interrupt",
979 [EXCP_NMI
] = "non-maskable interrupt",
980 [EXCP_MCHECK
] = "machine check",
981 [EXCP_EXT_INTERRUPT
] = "interrupt",
982 [EXCP_DFWATCH
] = "deferred watchpoint",
983 [EXCP_DIB
] = "debug instruction breakpoint",
984 [EXCP_IWATCH
] = "instruction fetch watchpoint",
985 [EXCP_AdEL
] = "address error load",
986 [EXCP_AdES
] = "address error store",
987 [EXCP_TLBF
] = "TLB refill",
988 [EXCP_IBE
] = "instruction bus error",
989 [EXCP_DBp
] = "debug breakpoint",
990 [EXCP_SYSCALL
] = "syscall",
991 [EXCP_BREAK
] = "break",
992 [EXCP_CpU
] = "coprocessor unusable",
993 [EXCP_RI
] = "reserved instruction",
994 [EXCP_OVERFLOW
] = "arithmetic overflow",
995 [EXCP_TRAP
] = "trap",
996 [EXCP_FPE
] = "floating point",
997 [EXCP_DDBS
] = "debug data break store",
998 [EXCP_DWATCH
] = "data watchpoint",
999 [EXCP_LTLBL
] = "TLB modify",
1000 [EXCP_TLBL
] = "TLB load",
1001 [EXCP_TLBS
] = "TLB store",
1002 [EXCP_DBE
] = "data bus error",
1003 [EXCP_DDBL
] = "debug data break load",
1004 [EXCP_THREAD
] = "thread",
1005 [EXCP_MDMX
] = "MDMX",
1006 [EXCP_C2E
] = "precise coprocessor 2",
1007 [EXCP_CACHE
] = "cache error",
1008 [EXCP_TLBXI
] = "TLB execute-inhibit",
1009 [EXCP_TLBRI
] = "TLB read-inhibit",
1010 [EXCP_MSADIS
] = "MSA disabled",
1011 [EXCP_MSAFPE
] = "MSA floating point",
1015 target_ulong
exception_resume_pc (CPUMIPSState
*env
)
1017 target_ulong bad_pc
;
1018 target_ulong isa_mode
;
1020 isa_mode
= !!(env
->hflags
& MIPS_HFLAG_M16
);
1021 bad_pc
= env
->active_tc
.PC
| isa_mode
;
1022 if (env
->hflags
& MIPS_HFLAG_BMASK
) {
1023 /* If the exception was raised from a delay slot, come back to
1025 bad_pc
-= (env
->hflags
& MIPS_HFLAG_B16
? 2 : 4);
1031 #if !defined(CONFIG_USER_ONLY)
1032 static void set_hflags_for_handler (CPUMIPSState
*env
)
1034 /* Exception handlers are entered in 32-bit mode. */
1035 env
->hflags
&= ~(MIPS_HFLAG_M16
);
1036 /* ...except that microMIPS lets you choose. */
1037 if (env
->insn_flags
& ASE_MICROMIPS
) {
1038 env
->hflags
|= (!!(env
->CP0_Config3
1039 & (1 << CP0C3_ISA_ON_EXC
))
1040 << MIPS_HFLAG_M16_SHIFT
);
1044 static inline void set_badinstr_registers(CPUMIPSState
*env
)
1046 if (env
->insn_flags
& ISA_NANOMIPS32
) {
1047 if (env
->CP0_Config3
& (1 << CP0C3_BI
)) {
1048 uint32_t instr
= (cpu_lduw_code(env
, env
->active_tc
.PC
)) << 16;
1049 if ((instr
& 0x10000000) == 0) {
1050 instr
|= cpu_lduw_code(env
, env
->active_tc
.PC
+ 2);
1052 env
->CP0_BadInstr
= instr
;
1054 if ((instr
& 0xFC000000) == 0x60000000) {
1055 instr
= cpu_lduw_code(env
, env
->active_tc
.PC
+ 4) << 16;
1056 env
->CP0_BadInstrX
= instr
;
1062 if (env
->hflags
& MIPS_HFLAG_M16
) {
1063 /* TODO: add BadInstr support for microMIPS */
1066 if (env
->CP0_Config3
& (1 << CP0C3_BI
)) {
1067 env
->CP0_BadInstr
= cpu_ldl_code(env
, env
->active_tc
.PC
);
1069 if ((env
->CP0_Config3
& (1 << CP0C3_BP
)) &&
1070 (env
->hflags
& MIPS_HFLAG_BMASK
)) {
1071 env
->CP0_BadInstrP
= cpu_ldl_code(env
, env
->active_tc
.PC
- 4);
1076 void mips_cpu_do_interrupt(CPUState
*cs
)
1078 #if !defined(CONFIG_USER_ONLY)
1079 MIPSCPU
*cpu
= MIPS_CPU(cs
);
1080 CPUMIPSState
*env
= &cpu
->env
;
1081 bool update_badinstr
= 0;
1082 target_ulong offset
;
1086 if (qemu_loglevel_mask(CPU_LOG_INT
)
1087 && cs
->exception_index
!= EXCP_EXT_INTERRUPT
) {
1088 if (cs
->exception_index
< 0 || cs
->exception_index
> EXCP_LAST
) {
1091 name
= excp_names
[cs
->exception_index
];
1094 qemu_log("%s enter: PC " TARGET_FMT_lx
" EPC " TARGET_FMT_lx
1096 __func__
, env
->active_tc
.PC
, env
->CP0_EPC
, name
);
1098 if (cs
->exception_index
== EXCP_EXT_INTERRUPT
&&
1099 (env
->hflags
& MIPS_HFLAG_DM
)) {
1100 cs
->exception_index
= EXCP_DINT
;
1103 switch (cs
->exception_index
) {
1105 env
->CP0_Debug
|= 1 << CP0DB_DSS
;
1106 /* Debug single step cannot be raised inside a delay slot and
1107 resume will always occur on the next instruction
1108 (but we assume the pc has always been updated during
1109 code translation). */
1110 env
->CP0_DEPC
= env
->active_tc
.PC
| !!(env
->hflags
& MIPS_HFLAG_M16
);
1111 goto enter_debug_mode
;
1113 env
->CP0_Debug
|= 1 << CP0DB_DINT
;
1116 env
->CP0_Debug
|= 1 << CP0DB_DIB
;
1119 env
->CP0_Debug
|= 1 << CP0DB_DBp
;
1120 /* Setup DExcCode - SDBBP instruction */
1121 env
->CP0_Debug
= (env
->CP0_Debug
& ~(0x1fULL
<< CP0DB_DEC
)) | 9 << CP0DB_DEC
;
1124 env
->CP0_Debug
|= 1 << CP0DB_DDBS
;
1127 env
->CP0_Debug
|= 1 << CP0DB_DDBL
;
1129 env
->CP0_DEPC
= exception_resume_pc(env
);
1130 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
1132 if (env
->insn_flags
& ISA_MIPS3
) {
1133 env
->hflags
|= MIPS_HFLAG_64
;
1134 if (!(env
->insn_flags
& ISA_MIPS64R6
) ||
1135 env
->CP0_Status
& (1 << CP0St_KX
)) {
1136 env
->hflags
&= ~MIPS_HFLAG_AWRAP
;
1139 env
->hflags
|= MIPS_HFLAG_DM
| MIPS_HFLAG_CP0
;
1140 env
->hflags
&= ~(MIPS_HFLAG_KSU
);
1141 /* EJTAG probe trap enable is not implemented... */
1142 if (!(env
->CP0_Status
& (1 << CP0St_EXL
)))
1143 env
->CP0_Cause
&= ~(1U << CP0Ca_BD
);
1144 env
->active_tc
.PC
= env
->exception_base
+ 0x480;
1145 set_hflags_for_handler(env
);
1148 cpu_reset(CPU(cpu
));
1151 env
->CP0_Status
|= (1 << CP0St_SR
);
1152 memset(env
->CP0_WatchLo
, 0, sizeof(env
->CP0_WatchLo
));
1155 env
->CP0_Status
|= (1 << CP0St_NMI
);
1157 env
->CP0_ErrorEPC
= exception_resume_pc(env
);
1158 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
1159 env
->CP0_Status
|= (1 << CP0St_ERL
) | (1 << CP0St_BEV
);
1160 if (env
->insn_flags
& ISA_MIPS3
) {
1161 env
->hflags
|= MIPS_HFLAG_64
;
1162 if (!(env
->insn_flags
& ISA_MIPS64R6
) ||
1163 env
->CP0_Status
& (1 << CP0St_KX
)) {
1164 env
->hflags
&= ~MIPS_HFLAG_AWRAP
;
1167 env
->hflags
|= MIPS_HFLAG_CP0
;
1168 env
->hflags
&= ~(MIPS_HFLAG_KSU
);
1169 if (!(env
->CP0_Status
& (1 << CP0St_EXL
)))
1170 env
->CP0_Cause
&= ~(1U << CP0Ca_BD
);
1171 env
->active_tc
.PC
= env
->exception_base
;
1172 set_hflags_for_handler(env
);
1174 case EXCP_EXT_INTERRUPT
:
1176 if (env
->CP0_Cause
& (1 << CP0Ca_IV
)) {
1177 uint32_t spacing
= (env
->CP0_IntCtl
>> CP0IntCtl_VS
) & 0x1f;
1179 if ((env
->CP0_Status
& (1 << CP0St_BEV
)) || spacing
== 0) {
1182 uint32_t vector
= 0;
1183 uint32_t pending
= (env
->CP0_Cause
& CP0Ca_IP_mask
) >> CP0Ca_IP
;
1185 if (env
->CP0_Config3
& (1 << CP0C3_VEIC
)) {
1186 /* For VEIC mode, the external interrupt controller feeds
1187 * the vector through the CP0Cause IP lines. */
1190 /* Vectored Interrupts
1191 * Mask with Status.IM7-IM0 to get enabled interrupts. */
1192 pending
&= (env
->CP0_Status
>> CP0St_IM
) & 0xff;
1193 /* Find the highest-priority interrupt. */
1194 while (pending
>>= 1) {
1198 offset
= 0x200 + (vector
* (spacing
<< 5));
1204 update_badinstr
= !(env
->error_code
& EXCP_INST_NOTAVAIL
);
1208 update_badinstr
= !(env
->error_code
& EXCP_INST_NOTAVAIL
);
1209 if ((env
->error_code
& EXCP_TLB_NOMATCH
) &&
1210 !(env
->CP0_Status
& (1 << CP0St_EXL
))) {
1211 #if defined(TARGET_MIPS64)
1212 int R
= env
->CP0_BadVAddr
>> 62;
1213 int UX
= (env
->CP0_Status
& (1 << CP0St_UX
)) != 0;
1214 int KX
= (env
->CP0_Status
& (1 << CP0St_KX
)) != 0;
1216 if ((R
!= 0 || UX
) && (R
!= 3 || KX
) &&
1217 (!(env
->insn_flags
& (INSN_LOONGSON2E
| INSN_LOONGSON2F
)))) {
1222 #if defined(TARGET_MIPS64)
1229 update_badinstr
= 1;
1230 if ((env
->error_code
& EXCP_TLB_NOMATCH
) &&
1231 !(env
->CP0_Status
& (1 << CP0St_EXL
))) {
1232 #if defined(TARGET_MIPS64)
1233 int R
= env
->CP0_BadVAddr
>> 62;
1234 int UX
= (env
->CP0_Status
& (1 << CP0St_UX
)) != 0;
1235 int KX
= (env
->CP0_Status
& (1 << CP0St_KX
)) != 0;
1237 if ((R
!= 0 || UX
) && (R
!= 3 || KX
) &&
1238 (!(env
->insn_flags
& (INSN_LOONGSON2E
| INSN_LOONGSON2F
)))) {
1243 #if defined(TARGET_MIPS64)
1250 update_badinstr
= !(env
->error_code
& EXCP_INST_NOTAVAIL
);
1254 update_badinstr
= 1;
1264 update_badinstr
= 1;
1268 update_badinstr
= 1;
1272 update_badinstr
= 1;
1276 update_badinstr
= 1;
1277 env
->CP0_Cause
= (env
->CP0_Cause
& ~(0x3 << CP0Ca_CE
)) |
1278 (env
->error_code
<< CP0Ca_CE
);
1282 update_badinstr
= 1;
1286 update_badinstr
= 1;
1290 update_badinstr
= 1;
1294 update_badinstr
= 1;
1301 update_badinstr
= 1;
1308 update_badinstr
= 1;
1315 /* XXX: TODO: manage deferred watch exceptions */
1330 if (!(env
->CP0_Status
& (1 << CP0St_EXL
))) {
1331 env
->CP0_EPC
= exception_resume_pc(env
);
1332 if (update_badinstr
) {
1333 set_badinstr_registers(env
);
1335 if (env
->hflags
& MIPS_HFLAG_BMASK
) {
1336 env
->CP0_Cause
|= (1U << CP0Ca_BD
);
1338 env
->CP0_Cause
&= ~(1U << CP0Ca_BD
);
1340 env
->CP0_Status
|= (1 << CP0St_EXL
);
1341 if (env
->insn_flags
& ISA_MIPS3
) {
1342 env
->hflags
|= MIPS_HFLAG_64
;
1343 if (!(env
->insn_flags
& ISA_MIPS64R6
) ||
1344 env
->CP0_Status
& (1 << CP0St_KX
)) {
1345 env
->hflags
&= ~MIPS_HFLAG_AWRAP
;
1348 env
->hflags
|= MIPS_HFLAG_CP0
;
1349 env
->hflags
&= ~(MIPS_HFLAG_KSU
);
1351 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
1352 if (env
->CP0_Status
& (1 << CP0St_BEV
)) {
1353 env
->active_tc
.PC
= env
->exception_base
+ 0x200;
1354 } else if (cause
== 30 && !(env
->CP0_Config3
& (1 << CP0C3_SC
) &&
1355 env
->CP0_Config5
& (1 << CP0C5_CV
))) {
1356 /* Force KSeg1 for cache errors */
1357 env
->active_tc
.PC
= KSEG1_BASE
| (env
->CP0_EBase
& 0x1FFFF000);
1359 env
->active_tc
.PC
= env
->CP0_EBase
& ~0xfff;
1362 env
->active_tc
.PC
+= offset
;
1363 set_hflags_for_handler(env
);
1364 env
->CP0_Cause
= (env
->CP0_Cause
& ~(0x1f << CP0Ca_EC
)) | (cause
<< CP0Ca_EC
);
1369 if (qemu_loglevel_mask(CPU_LOG_INT
)
1370 && cs
->exception_index
!= EXCP_EXT_INTERRUPT
) {
1371 qemu_log("%s: PC " TARGET_FMT_lx
" EPC " TARGET_FMT_lx
" cause %d\n"
1372 " S %08x C %08x A " TARGET_FMT_lx
" D " TARGET_FMT_lx
"\n",
1373 __func__
, env
->active_tc
.PC
, env
->CP0_EPC
, cause
,
1374 env
->CP0_Status
, env
->CP0_Cause
, env
->CP0_BadVAddr
,
1378 cs
->exception_index
= EXCP_NONE
;
1381 bool mips_cpu_exec_interrupt(CPUState
*cs
, int interrupt_request
)
1383 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
1384 MIPSCPU
*cpu
= MIPS_CPU(cs
);
1385 CPUMIPSState
*env
= &cpu
->env
;
1387 if (cpu_mips_hw_interrupts_enabled(env
) &&
1388 cpu_mips_hw_interrupts_pending(env
)) {
1390 cs
->exception_index
= EXCP_EXT_INTERRUPT
;
1391 env
->error_code
= 0;
1392 mips_cpu_do_interrupt(cs
);
1399 #if !defined(CONFIG_USER_ONLY)
1400 void r4k_invalidate_tlb (CPUMIPSState
*env
, int idx
, int use_extra
)
1402 MIPSCPU
*cpu
= mips_env_get_cpu(env
);
1407 uint16_t ASID
= env
->CP0_EntryHi
& env
->CP0_EntryHi_ASID_mask
;
1410 tlb
= &env
->tlb
->mmu
.r4k
.tlb
[idx
];
1411 /* The qemu TLB is flushed when the ASID changes, so no need to
1412 flush these entries again. */
1413 if (tlb
->G
== 0 && tlb
->ASID
!= ASID
) {
1417 if (use_extra
&& env
->tlb
->tlb_in_use
< MIPS_TLB_MAX
) {
1418 /* For tlbwr, we can shadow the discarded entry into
1419 a new (fake) TLB entry, as long as the guest can not
1420 tell that it's there. */
1421 env
->tlb
->mmu
.r4k
.tlb
[env
->tlb
->tlb_in_use
] = *tlb
;
1422 env
->tlb
->tlb_in_use
++;
1426 /* 1k pages are not supported. */
1427 mask
= tlb
->PageMask
| ~(TARGET_PAGE_MASK
<< 1);
1430 addr
= tlb
->VPN
& ~mask
;
1431 #if defined(TARGET_MIPS64)
1432 if (addr
>= (0xFFFFFFFF80000000ULL
& env
->SEGMask
)) {
1433 addr
|= 0x3FFFFF0000000000ULL
;
1436 end
= addr
| (mask
>> 1);
1437 while (addr
< end
) {
1438 tlb_flush_page(cs
, addr
);
1439 addr
+= TARGET_PAGE_SIZE
;
1444 addr
= (tlb
->VPN
& ~mask
) | ((mask
>> 1) + 1);
1445 #if defined(TARGET_MIPS64)
1446 if (addr
>= (0xFFFFFFFF80000000ULL
& env
->SEGMask
)) {
1447 addr
|= 0x3FFFFF0000000000ULL
;
1451 while (addr
- 1 < end
) {
1452 tlb_flush_page(cs
, addr
);
1453 addr
+= TARGET_PAGE_SIZE
;
1459 void QEMU_NORETURN
do_raise_exception_err(CPUMIPSState
*env
,
1464 CPUState
*cs
= CPU(mips_env_get_cpu(env
));
1466 if (exception
< EXCP_SC
) {
1467 qemu_log_mask(CPU_LOG_INT
, "%s: %d %d\n",
1468 __func__
, exception
, error_code
);
1470 cs
->exception_index
= exception
;
1471 env
->error_code
= error_code
;
1473 cpu_loop_exit_restore(cs
, pc
);