2 * MIPS emulation helpers for qemu.
4 * Copyright (c) 2004-2005 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
23 #include "exec/exec-all.h"
24 #include "exec/cpu_ldst.h"
26 #include "hw/mips/cpudevs.h"
27 #include "qapi/qapi-commands-machine-target.h"
39 #if !defined(CONFIG_USER_ONLY)
41 /* no MMU emulation */
42 int no_mmu_map_address(CPUMIPSState
*env
, hwaddr
*physical
, int *prot
,
43 target_ulong address
, int rw
, int access_type
)
46 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
50 /* fixed mapping MMU emulation */
51 int fixed_mmu_map_address(CPUMIPSState
*env
, hwaddr
*physical
, int *prot
,
52 target_ulong address
, int rw
, int access_type
)
54 if (address
<= (int32_t)0x7FFFFFFFUL
) {
55 if (!(env
->CP0_Status
& (1 << CP0St_ERL
))) {
56 *physical
= address
+ 0x40000000UL
;
60 } else if (address
<= (int32_t)0xBFFFFFFFUL
) {
61 *physical
= address
& 0x1FFFFFFF;
66 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
70 /* MIPS32/MIPS64 R4000-style MMU emulation */
71 int r4k_map_address(CPUMIPSState
*env
, hwaddr
*physical
, int *prot
,
72 target_ulong address
, int rw
, int access_type
)
74 uint16_t ASID
= env
->CP0_EntryHi
& env
->CP0_EntryHi_ASID_mask
;
77 for (i
= 0; i
< env
->tlb
->tlb_in_use
; i
++) {
78 r4k_tlb_t
*tlb
= &env
->tlb
->mmu
.r4k
.tlb
[i
];
79 /* 1k pages are not supported. */
80 target_ulong mask
= tlb
->PageMask
| ~(TARGET_PAGE_MASK
<< 1);
81 target_ulong tag
= address
& ~mask
;
82 target_ulong VPN
= tlb
->VPN
& ~mask
;
83 #if defined(TARGET_MIPS64)
87 /* Check ASID, virtual page number & size */
88 if ((tlb
->G
== 1 || tlb
->ASID
== ASID
) && VPN
== tag
&& !tlb
->EHINV
) {
90 int n
= !!(address
& mask
& ~(mask
>> 1));
91 /* Check access rights */
92 if (!(n
? tlb
->V1
: tlb
->V0
)) {
93 return TLBRET_INVALID
;
95 if (rw
== MMU_INST_FETCH
&& (n
? tlb
->XI1
: tlb
->XI0
)) {
98 if (rw
== MMU_DATA_LOAD
&& (n
? tlb
->RI1
: tlb
->RI0
)) {
101 if (rw
!= MMU_DATA_STORE
|| (n
? tlb
->D1
: tlb
->D0
)) {
102 *physical
= tlb
->PFN
[n
] | (address
& (mask
>> 1));
104 if (n
? tlb
->D1
: tlb
->D0
) {
107 if (!(n
? tlb
->XI1
: tlb
->XI0
)) {
115 return TLBRET_NOMATCH
;
118 static int is_seg_am_mapped(unsigned int am
, bool eu
, int mmu_idx
)
121 * Interpret access control mode and mmu_idx.
124 * UK 0 0 1 1 0 0 - - 0
125 * MK 1 0 1 1 0 1 - - !eu
126 * MSK 2 0 0 1 0 1 1 - !eu
127 * MUSK 3 0 0 0 0 1 1 1 !eu
128 * MUSUK 4 0 0 0 0 0 1 1 0
129 * USK 5 0 0 1 0 0 0 - 0
130 * - 6 - - - - - - - -
131 * UUSK 7 0 0 0 0 0 0 0 0
137 /* If EU is set, always unmapped */
143 /* Never AdE, TLB mapped if AM={1,2,3} */
144 adetlb_mask
= 0x70000000;
148 /* AdE if AM={0,1}, TLB mapped if AM={2,3,4} */
149 adetlb_mask
= 0xc0380000;
153 /* AdE if AM={0,1,2,5}, TLB mapped if AM={3,4} */
154 adetlb_mask
= 0xe4180000;
157 /* does this AM cause AdE in current execution mode */
158 if ((adetlb_mask
<< am
) < 0) {
159 return TLBRET_BADADDR
;
164 /* is this AM mapped in current execution mode */
165 return ((adetlb_mask
<< am
) < 0);
168 return TLBRET_BADADDR
;
172 static int get_seg_physical_address(CPUMIPSState
*env
, hwaddr
*physical
,
173 int *prot
, target_ulong real_address
,
174 int rw
, int access_type
, int mmu_idx
,
175 unsigned int am
, bool eu
,
176 target_ulong segmask
,
177 hwaddr physical_base
)
179 int mapped
= is_seg_am_mapped(am
, eu
, mmu_idx
);
182 /* is_seg_am_mapped can report TLBRET_BADADDR */
185 /* The segment is TLB mapped */
186 return env
->tlb
->map_address(env
, physical
, prot
, real_address
, rw
,
189 /* The segment is unmapped */
190 *physical
= physical_base
| (real_address
& segmask
);
191 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
196 static int get_segctl_physical_address(CPUMIPSState
*env
, hwaddr
*physical
,
197 int *prot
, target_ulong real_address
,
198 int rw
, int access_type
, int mmu_idx
,
199 uint16_t segctl
, target_ulong segmask
)
201 unsigned int am
= (segctl
& CP0SC_AM_MASK
) >> CP0SC_AM
;
202 bool eu
= (segctl
>> CP0SC_EU
) & 1;
203 hwaddr pa
= ((hwaddr
)segctl
& CP0SC_PA_MASK
) << 20;
205 return get_seg_physical_address(env
, physical
, prot
, real_address
, rw
,
206 access_type
, mmu_idx
, am
, eu
, segmask
,
207 pa
& ~(hwaddr
)segmask
);
210 static int get_physical_address(CPUMIPSState
*env
, hwaddr
*physical
,
211 int *prot
, target_ulong real_address
,
212 int rw
, int access_type
, int mmu_idx
)
214 /* User mode can only access useg/xuseg */
215 #if defined(TARGET_MIPS64)
216 int user_mode
= mmu_idx
== MIPS_HFLAG_UM
;
217 int supervisor_mode
= mmu_idx
== MIPS_HFLAG_SM
;
218 int kernel_mode
= !user_mode
&& !supervisor_mode
;
219 int UX
= (env
->CP0_Status
& (1 << CP0St_UX
)) != 0;
220 int SX
= (env
->CP0_Status
& (1 << CP0St_SX
)) != 0;
221 int KX
= (env
->CP0_Status
& (1 << CP0St_KX
)) != 0;
223 int ret
= TLBRET_MATCH
;
224 /* effective address (modified for KVM T&E kernel segments) */
225 target_ulong address
= real_address
;
227 #define USEG_LIMIT ((target_ulong)(int32_t)0x7FFFFFFFUL)
228 #define KSEG0_BASE ((target_ulong)(int32_t)0x80000000UL)
229 #define KSEG1_BASE ((target_ulong)(int32_t)0xA0000000UL)
230 #define KSEG2_BASE ((target_ulong)(int32_t)0xC0000000UL)
231 #define KSEG3_BASE ((target_ulong)(int32_t)0xE0000000UL)
233 #define KVM_KSEG0_BASE ((target_ulong)(int32_t)0x40000000UL)
234 #define KVM_KSEG2_BASE ((target_ulong)(int32_t)0x60000000UL)
236 if (mips_um_ksegs_enabled()) {
237 /* KVM T&E adds guest kernel segments in useg */
238 if (real_address
>= KVM_KSEG0_BASE
) {
239 if (real_address
< KVM_KSEG2_BASE
) {
241 address
+= KSEG0_BASE
- KVM_KSEG0_BASE
;
242 } else if (real_address
<= USEG_LIMIT
) {
244 address
+= KSEG2_BASE
- KVM_KSEG2_BASE
;
249 if (address
<= USEG_LIMIT
) {
253 if (address
>= 0x40000000UL
) {
254 segctl
= env
->CP0_SegCtl2
;
256 segctl
= env
->CP0_SegCtl2
>> 16;
258 ret
= get_segctl_physical_address(env
, physical
, prot
,
259 real_address
, rw
, access_type
,
260 mmu_idx
, segctl
, 0x3FFFFFFF);
261 #if defined(TARGET_MIPS64)
262 } else if (address
< 0x4000000000000000ULL
) {
264 if (UX
&& address
<= (0x3FFFFFFFFFFFFFFFULL
& env
->SEGMask
)) {
265 ret
= env
->tlb
->map_address(env
, physical
, prot
,
266 real_address
, rw
, access_type
);
268 ret
= TLBRET_BADADDR
;
270 } else if (address
< 0x8000000000000000ULL
) {
272 if ((supervisor_mode
|| kernel_mode
) &&
273 SX
&& address
<= (0x7FFFFFFFFFFFFFFFULL
& env
->SEGMask
)) {
274 ret
= env
->tlb
->map_address(env
, physical
, prot
,
275 real_address
, rw
, access_type
);
277 ret
= TLBRET_BADADDR
;
279 } else if (address
< 0xC000000000000000ULL
) {
281 if ((address
& 0x07FFFFFFFFFFFFFFULL
) <= env
->PAMask
) {
282 /* KX/SX/UX bit to check for each xkphys EVA access mode */
283 static const uint8_t am_ksux
[8] = {
284 [CP0SC_AM_UK
] = (1u << CP0St_KX
),
285 [CP0SC_AM_MK
] = (1u << CP0St_KX
),
286 [CP0SC_AM_MSK
] = (1u << CP0St_SX
),
287 [CP0SC_AM_MUSK
] = (1u << CP0St_UX
),
288 [CP0SC_AM_MUSUK
] = (1u << CP0St_UX
),
289 [CP0SC_AM_USK
] = (1u << CP0St_SX
),
290 [6] = (1u << CP0St_KX
),
291 [CP0SC_AM_UUSK
] = (1u << CP0St_UX
),
293 unsigned int am
= CP0SC_AM_UK
;
294 unsigned int xr
= (env
->CP0_SegCtl2
& CP0SC2_XR_MASK
) >> CP0SC2_XR
;
296 if (xr
& (1 << ((address
>> 59) & 0x7))) {
297 am
= (env
->CP0_SegCtl1
& CP0SC1_XAM_MASK
) >> CP0SC1_XAM
;
299 /* Does CP0_Status.KX/SX/UX permit the access mode (am) */
300 if (env
->CP0_Status
& am_ksux
[am
]) {
301 ret
= get_seg_physical_address(env
, physical
, prot
,
302 real_address
, rw
, access_type
,
303 mmu_idx
, am
, false, env
->PAMask
,
306 ret
= TLBRET_BADADDR
;
309 ret
= TLBRET_BADADDR
;
311 } else if (address
< 0xFFFFFFFF80000000ULL
) {
313 if (kernel_mode
&& KX
&&
314 address
<= (0xFFFFFFFF7FFFFFFFULL
& env
->SEGMask
)) {
315 ret
= env
->tlb
->map_address(env
, physical
, prot
,
316 real_address
, rw
, access_type
);
318 ret
= TLBRET_BADADDR
;
321 } else if (address
< KSEG1_BASE
) {
323 ret
= get_segctl_physical_address(env
, physical
, prot
, real_address
, rw
,
324 access_type
, mmu_idx
,
325 env
->CP0_SegCtl1
>> 16, 0x1FFFFFFF);
326 } else if (address
< KSEG2_BASE
) {
328 ret
= get_segctl_physical_address(env
, physical
, prot
, real_address
, rw
,
329 access_type
, mmu_idx
,
330 env
->CP0_SegCtl1
, 0x1FFFFFFF);
331 } else if (address
< KSEG3_BASE
) {
333 ret
= get_segctl_physical_address(env
, physical
, prot
, real_address
, rw
,
334 access_type
, mmu_idx
,
335 env
->CP0_SegCtl0
>> 16, 0x1FFFFFFF);
339 * XXX: debug segment is not emulated
341 ret
= get_segctl_physical_address(env
, physical
, prot
, real_address
, rw
,
342 access_type
, mmu_idx
,
343 env
->CP0_SegCtl0
, 0x1FFFFFFF);
348 void cpu_mips_tlb_flush(CPUMIPSState
*env
)
350 /* Flush qemu's TLB and discard all shadowed entries. */
351 tlb_flush(env_cpu(env
));
352 env
->tlb
->tlb_in_use
= env
->tlb
->nb_tlb
;
355 /* Called for updates to CP0_Status. */
356 void sync_c0_status(CPUMIPSState
*env
, CPUMIPSState
*cpu
, int tc
)
358 int32_t tcstatus
, *tcst
;
359 uint32_t v
= cpu
->CP0_Status
;
360 uint32_t cu
, mx
, asid
, ksu
;
361 uint32_t mask
= ((1 << CP0TCSt_TCU3
)
362 | (1 << CP0TCSt_TCU2
)
363 | (1 << CP0TCSt_TCU1
)
364 | (1 << CP0TCSt_TCU0
)
366 | (3 << CP0TCSt_TKSU
)
367 | (0xff << CP0TCSt_TASID
));
369 cu
= (v
>> CP0St_CU0
) & 0xf;
370 mx
= (v
>> CP0St_MX
) & 0x1;
371 ksu
= (v
>> CP0St_KSU
) & 0x3;
372 asid
= env
->CP0_EntryHi
& env
->CP0_EntryHi_ASID_mask
;
374 tcstatus
= cu
<< CP0TCSt_TCU0
;
375 tcstatus
|= mx
<< CP0TCSt_TMX
;
376 tcstatus
|= ksu
<< CP0TCSt_TKSU
;
379 if (tc
== cpu
->current_tc
) {
380 tcst
= &cpu
->active_tc
.CP0_TCStatus
;
382 tcst
= &cpu
->tcs
[tc
].CP0_TCStatus
;
390 void cpu_mips_store_status(CPUMIPSState
*env
, target_ulong val
)
392 uint32_t mask
= env
->CP0_Status_rw_bitmask
;
393 target_ulong old
= env
->CP0_Status
;
395 if (env
->insn_flags
& ISA_MIPS32R6
) {
396 bool has_supervisor
= extract32(mask
, CP0St_KSU
, 2) == 0x3;
397 #if defined(TARGET_MIPS64)
398 uint32_t ksux
= (1 << CP0St_KX
) & val
;
399 ksux
|= (ksux
>> 1) & val
; /* KX = 0 forces SX to be 0 */
400 ksux
|= (ksux
>> 1) & val
; /* SX = 0 forces UX to be 0 */
401 val
= (val
& ~(7 << CP0St_UX
)) | ksux
;
403 if (has_supervisor
&& extract32(val
, CP0St_KSU
, 2) == 0x3) {
404 mask
&= ~(3 << CP0St_KSU
);
406 mask
&= ~(((1 << CP0St_SR
) | (1 << CP0St_NMI
)) & val
);
409 env
->CP0_Status
= (old
& ~mask
) | (val
& mask
);
410 #if defined(TARGET_MIPS64)
411 if ((env
->CP0_Status
^ old
) & (old
& (7 << CP0St_UX
))) {
412 /* Access to at least one of the 64-bit segments has been disabled */
413 tlb_flush(env_cpu(env
));
416 if (env
->CP0_Config3
& (1 << CP0C3_MT
)) {
417 sync_c0_status(env
, env
, env
->current_tc
);
423 void cpu_mips_store_cause(CPUMIPSState
*env
, target_ulong val
)
425 uint32_t mask
= 0x00C00300;
426 uint32_t old
= env
->CP0_Cause
;
429 if (env
->insn_flags
& ISA_MIPS32R2
) {
430 mask
|= 1 << CP0Ca_DC
;
432 if (env
->insn_flags
& ISA_MIPS32R6
) {
433 mask
&= ~((1 << CP0Ca_WP
) & val
);
436 env
->CP0_Cause
= (env
->CP0_Cause
& ~mask
) | (val
& mask
);
438 if ((old
^ env
->CP0_Cause
) & (1 << CP0Ca_DC
)) {
439 if (env
->CP0_Cause
& (1 << CP0Ca_DC
)) {
440 cpu_mips_stop_count(env
);
442 cpu_mips_start_count(env
);
446 /* Set/reset software interrupts */
447 for (i
= 0 ; i
< 2 ; i
++) {
448 if ((old
^ env
->CP0_Cause
) & (1 << (CP0Ca_IP
+ i
))) {
449 cpu_mips_soft_irq(env
, i
, env
->CP0_Cause
& (1 << (CP0Ca_IP
+ i
)));
455 static void raise_mmu_exception(CPUMIPSState
*env
, target_ulong address
,
456 int rw
, int tlb_error
)
458 CPUState
*cs
= env_cpu(env
);
459 int exception
= 0, error_code
= 0;
461 if (rw
== MMU_INST_FETCH
) {
462 error_code
|= EXCP_INST_NOTAVAIL
;
468 /* Reference to kernel address from user mode or supervisor mode */
469 /* Reference to supervisor address from user mode */
470 if (rw
== MMU_DATA_STORE
) {
471 exception
= EXCP_AdES
;
473 exception
= EXCP_AdEL
;
477 /* No TLB match for a mapped address */
478 if (rw
== MMU_DATA_STORE
) {
479 exception
= EXCP_TLBS
;
481 exception
= EXCP_TLBL
;
483 error_code
|= EXCP_TLB_NOMATCH
;
486 /* TLB match with no valid bit */
487 if (rw
== MMU_DATA_STORE
) {
488 exception
= EXCP_TLBS
;
490 exception
= EXCP_TLBL
;
494 /* TLB match but 'D' bit is cleared */
495 exception
= EXCP_LTLBL
;
498 /* Execute-Inhibit Exception */
499 if (env
->CP0_PageGrain
& (1 << CP0PG_IEC
)) {
500 exception
= EXCP_TLBXI
;
502 exception
= EXCP_TLBL
;
506 /* Read-Inhibit Exception */
507 if (env
->CP0_PageGrain
& (1 << CP0PG_IEC
)) {
508 exception
= EXCP_TLBRI
;
510 exception
= EXCP_TLBL
;
514 /* Raise exception */
515 if (!(env
->hflags
& MIPS_HFLAG_DM
)) {
516 env
->CP0_BadVAddr
= address
;
518 env
->CP0_Context
= (env
->CP0_Context
& ~0x007fffff) |
519 ((address
>> 9) & 0x007ffff0);
520 env
->CP0_EntryHi
= (env
->CP0_EntryHi
& env
->CP0_EntryHi_ASID_mask
) |
521 (env
->CP0_EntryHi
& (1 << CP0EnHi_EHINV
)) |
522 (address
& (TARGET_PAGE_MASK
<< 1));
523 #if defined(TARGET_MIPS64)
524 env
->CP0_EntryHi
&= env
->SEGMask
;
526 (env
->CP0_XContext
& ((~0ULL) << (env
->SEGBITS
- 7))) | /* PTEBase */
527 (extract64(address
, 62, 2) << (env
->SEGBITS
- 9)) | /* R */
528 (extract64(address
, 13, env
->SEGBITS
- 13) << 4); /* BadVPN2 */
530 cs
->exception_index
= exception
;
531 env
->error_code
= error_code
;
534 #if !defined(CONFIG_USER_ONLY)
535 hwaddr
mips_cpu_get_phys_page_debug(CPUState
*cs
, vaddr addr
)
537 MIPSCPU
*cpu
= MIPS_CPU(cs
);
538 CPUMIPSState
*env
= &cpu
->env
;
542 if (get_physical_address(env
, &phys_addr
, &prot
, addr
, 0, ACCESS_INT
,
543 cpu_mmu_index(env
, false)) != 0) {
550 #if !defined(CONFIG_USER_ONLY)
551 #if !defined(TARGET_MIPS64)
554 * Perform hardware page table walk
556 * Memory accesses are performed using the KERNEL privilege level.
557 * Synchronous exceptions detected on memory accesses cause a silent exit
558 * from page table walking, resulting in a TLB or XTLB Refill exception.
560 * Implementations are not required to support page table walk memory
561 * accesses from mapped memory regions. When an unsupported access is
562 * attempted, a silent exit is taken, resulting in a TLB or XTLB Refill
565 * Note that if an exception is caused by AddressTranslation or LoadMemory
566 * functions, the exception is not taken, a silent exit is taken,
567 * resulting in a TLB or XTLB Refill exception.
570 static bool get_pte(CPUMIPSState
*env
, uint64_t vaddr
, int entry_size
,
573 if ((vaddr
& ((entry_size
>> 3) - 1)) != 0) {
576 if (entry_size
== 64) {
577 *pte
= cpu_ldq_code(env
, vaddr
);
579 *pte
= cpu_ldl_code(env
, vaddr
);
584 static uint64_t get_tlb_entry_layout(CPUMIPSState
*env
, uint64_t entry
,
585 int entry_size
, int ptei
)
587 uint64_t result
= entry
;
589 if (ptei
> entry_size
) {
592 result
>>= (ptei
- 2);
595 result
|= rixi
<< CP0EnLo_XI
;
599 static int walk_directory(CPUMIPSState
*env
, uint64_t *vaddr
,
600 int directory_index
, bool *huge_page
, bool *hgpg_directory_hit
,
601 uint64_t *pw_entrylo0
, uint64_t *pw_entrylo1
)
603 int dph
= (env
->CP0_PWCtl
>> CP0PC_DPH
) & 0x1;
604 int psn
= (env
->CP0_PWCtl
>> CP0PC_PSN
) & 0x3F;
605 int hugepg
= (env
->CP0_PWCtl
>> CP0PC_HUGEPG
) & 0x1;
606 int pf_ptew
= (env
->CP0_PWField
>> CP0PF_PTEW
) & 0x3F;
607 int ptew
= (env
->CP0_PWSize
>> CP0PS_PTEW
) & 0x3F;
608 int native_shift
= (((env
->CP0_PWSize
>> CP0PS_PS
) & 1) == 0) ? 2 : 3;
609 int directory_shift
= (ptew
> 1) ? -1 :
610 (hugepg
&& (ptew
== 1)) ? native_shift
+ 1 : native_shift
;
611 int leaf_shift
= (ptew
> 1) ? -1 :
612 (ptew
== 1) ? native_shift
+ 1 : native_shift
;
613 uint32_t direntry_size
= 1 << (directory_shift
+ 3);
614 uint32_t leafentry_size
= 1 << (leaf_shift
+ 3);
621 if (get_physical_address(env
, &paddr
, &prot
, *vaddr
, MMU_DATA_LOAD
,
622 ACCESS_INT
, cpu_mmu_index(env
, false)) !=
624 /* wrong base address */
627 if (!get_pte(env
, *vaddr
, direntry_size
, &entry
)) {
631 if ((entry
& (1 << psn
)) && hugepg
) {
633 *hgpg_directory_hit
= true;
634 entry
= get_tlb_entry_layout(env
, entry
, leafentry_size
, pf_ptew
);
635 w
= directory_index
- 1;
636 if (directory_index
& 0x1) {
637 /* Generate adjacent page from same PTE for odd TLB page */
639 *pw_entrylo0
= entry
& ~lsb
; /* even page */
640 *pw_entrylo1
= entry
| lsb
; /* odd page */
642 int oddpagebit
= 1 << leaf_shift
;
643 uint64_t vaddr2
= *vaddr
^ oddpagebit
;
644 if (*vaddr
& oddpagebit
) {
645 *pw_entrylo1
= entry
;
647 *pw_entrylo0
= entry
;
649 if (get_physical_address(env
, &paddr
, &prot
, vaddr2
, MMU_DATA_LOAD
,
650 ACCESS_INT
, cpu_mmu_index(env
, false)) !=
654 if (!get_pte(env
, vaddr2
, leafentry_size
, &entry
)) {
657 entry
= get_tlb_entry_layout(env
, entry
, leafentry_size
, pf_ptew
);
658 if (*vaddr
& oddpagebit
) {
659 *pw_entrylo0
= entry
;
661 *pw_entrylo1
= entry
;
673 static bool page_table_walk_refill(CPUMIPSState
*env
, vaddr address
, int rw
,
676 int gdw
= (env
->CP0_PWSize
>> CP0PS_GDW
) & 0x3F;
677 int udw
= (env
->CP0_PWSize
>> CP0PS_UDW
) & 0x3F;
678 int mdw
= (env
->CP0_PWSize
>> CP0PS_MDW
) & 0x3F;
679 int ptw
= (env
->CP0_PWSize
>> CP0PS_PTW
) & 0x3F;
680 int ptew
= (env
->CP0_PWSize
>> CP0PS_PTEW
) & 0x3F;
683 bool huge_page
= false;
684 bool hgpg_bdhit
= false;
685 bool hgpg_gdhit
= false;
686 bool hgpg_udhit
= false;
687 bool hgpg_mdhit
= false;
689 int32_t pw_pagemask
= 0;
690 target_ulong pw_entryhi
= 0;
691 uint64_t pw_entrylo0
= 0;
692 uint64_t pw_entrylo1
= 0;
694 /* Native pointer size */
695 /*For the 32-bit architectures, this bit is fixed to 0.*/
696 int native_shift
= (((env
->CP0_PWSize
>> CP0PS_PS
) & 1) == 0) ? 2 : 3;
698 /* Indices from PWField */
699 int pf_gdw
= (env
->CP0_PWField
>> CP0PF_GDW
) & 0x3F;
700 int pf_udw
= (env
->CP0_PWField
>> CP0PF_UDW
) & 0x3F;
701 int pf_mdw
= (env
->CP0_PWField
>> CP0PF_MDW
) & 0x3F;
702 int pf_ptw
= (env
->CP0_PWField
>> CP0PF_PTW
) & 0x3F;
703 int pf_ptew
= (env
->CP0_PWField
>> CP0PF_PTEW
) & 0x3F;
705 /* Indices computed from faulting address */
706 int gindex
= (address
>> pf_gdw
) & ((1 << gdw
) - 1);
707 int uindex
= (address
>> pf_udw
) & ((1 << udw
) - 1);
708 int mindex
= (address
>> pf_mdw
) & ((1 << mdw
) - 1);
709 int ptindex
= (address
>> pf_ptw
) & ((1 << ptw
) - 1);
711 /* Other HTW configs */
712 int hugepg
= (env
->CP0_PWCtl
>> CP0PC_HUGEPG
) & 0x1;
714 /* HTW Shift values (depend on entry size) */
715 int directory_shift
= (ptew
> 1) ? -1 :
716 (hugepg
&& (ptew
== 1)) ? native_shift
+ 1 : native_shift
;
717 int leaf_shift
= (ptew
> 1) ? -1 :
718 (ptew
== 1) ? native_shift
+ 1 : native_shift
;
720 /* Offsets into tables */
721 int goffset
= gindex
<< directory_shift
;
722 int uoffset
= uindex
<< directory_shift
;
723 int moffset
= mindex
<< directory_shift
;
724 int ptoffset0
= (ptindex
>> 1) << (leaf_shift
+ 1);
725 int ptoffset1
= ptoffset0
| (1 << (leaf_shift
));
727 uint32_t leafentry_size
= 1 << (leaf_shift
+ 3);
729 /* Starting address - Page Table Base */
730 uint64_t vaddr
= env
->CP0_PWBase
;
737 if (!(env
->CP0_Config3
& (1 << CP0C3_PW
))) {
738 /* walker is unimplemented */
741 if (!(env
->CP0_PWCtl
& (1 << CP0PC_PWEN
))) {
742 /* walker is disabled */
745 if (!(gdw
> 0 || udw
> 0 || mdw
> 0)) {
746 /* no structure to walk */
749 if ((directory_shift
== -1) || (leaf_shift
== -1)) {
753 /* Global Directory */
756 switch (walk_directory(env
, &vaddr
, pf_gdw
, &huge_page
, &hgpg_gdhit
,
757 &pw_entrylo0
, &pw_entrylo1
))
769 /* Upper directory */
772 switch (walk_directory(env
, &vaddr
, pf_udw
, &huge_page
, &hgpg_udhit
,
773 &pw_entrylo0
, &pw_entrylo1
))
785 /* Middle directory */
788 switch (walk_directory(env
, &vaddr
, pf_mdw
, &huge_page
, &hgpg_mdhit
,
789 &pw_entrylo0
, &pw_entrylo1
))
801 /* Leaf Level Page Table - First half of PTE pair */
803 if (get_physical_address(env
, &paddr
, &prot
, vaddr
, MMU_DATA_LOAD
,
804 ACCESS_INT
, cpu_mmu_index(env
, false)) !=
808 if (!get_pte(env
, vaddr
, leafentry_size
, &dir_entry
)) {
811 dir_entry
= get_tlb_entry_layout(env
, dir_entry
, leafentry_size
, pf_ptew
);
812 pw_entrylo0
= dir_entry
;
814 /* Leaf Level Page Table - Second half of PTE pair */
816 if (get_physical_address(env
, &paddr
, &prot
, vaddr
, MMU_DATA_LOAD
,
817 ACCESS_INT
, cpu_mmu_index(env
, false)) !=
821 if (!get_pte(env
, vaddr
, leafentry_size
, &dir_entry
)) {
824 dir_entry
= get_tlb_entry_layout(env
, dir_entry
, leafentry_size
, pf_ptew
);
825 pw_entrylo1
= dir_entry
;
829 m
= (1 << pf_ptw
) - 1;
832 switch (hgpg_bdhit
<< 3 | hgpg_gdhit
<< 2 | hgpg_udhit
<< 1 |
836 m
= (1 << pf_gdw
) - 1;
842 m
= (1 << pf_udw
) - 1;
848 m
= (1 << pf_mdw
) - 1;
855 pw_pagemask
= m
>> 12;
856 update_pagemask(env
, pw_pagemask
<< 13, &pw_pagemask
);
857 pw_entryhi
= (address
& ~0x1fff) | (env
->CP0_EntryHi
& 0xFF);
859 target_ulong tmp_entryhi
= env
->CP0_EntryHi
;
860 int32_t tmp_pagemask
= env
->CP0_PageMask
;
861 uint64_t tmp_entrylo0
= env
->CP0_EntryLo0
;
862 uint64_t tmp_entrylo1
= env
->CP0_EntryLo1
;
864 env
->CP0_EntryHi
= pw_entryhi
;
865 env
->CP0_PageMask
= pw_pagemask
;
866 env
->CP0_EntryLo0
= pw_entrylo0
;
867 env
->CP0_EntryLo1
= pw_entrylo1
;
870 * The hardware page walker inserts a page into the TLB in a manner
871 * identical to a TLBWR instruction as executed by the software refill
874 r4k_helper_tlbwr(env
);
876 env
->CP0_EntryHi
= tmp_entryhi
;
877 env
->CP0_PageMask
= tmp_pagemask
;
878 env
->CP0_EntryLo0
= tmp_entrylo0
;
879 env
->CP0_EntryLo1
= tmp_entrylo1
;
886 bool mips_cpu_tlb_fill(CPUState
*cs
, vaddr address
, int size
,
887 MMUAccessType access_type
, int mmu_idx
,
888 bool probe
, uintptr_t retaddr
)
890 MIPSCPU
*cpu
= MIPS_CPU(cs
);
891 CPUMIPSState
*env
= &cpu
->env
;
892 #if !defined(CONFIG_USER_ONLY)
895 int mips_access_type
;
897 int ret
= TLBRET_BADADDR
;
900 #if !defined(CONFIG_USER_ONLY)
901 /* XXX: put correct access by using cpu_restore_state() correctly */
902 mips_access_type
= ACCESS_INT
;
903 ret
= get_physical_address(env
, &physical
, &prot
, address
,
904 access_type
, mips_access_type
, mmu_idx
);
907 qemu_log_mask(CPU_LOG_MMU
,
908 "%s address=%" VADDR_PRIx
" physical " TARGET_FMT_plx
909 " prot %d\n", __func__
, address
, physical
, prot
);
912 qemu_log_mask(CPU_LOG_MMU
,
913 "%s address=%" VADDR_PRIx
" ret %d\n", __func__
, address
,
917 if (ret
== TLBRET_MATCH
) {
918 tlb_set_page(cs
, address
& TARGET_PAGE_MASK
,
919 physical
& TARGET_PAGE_MASK
, prot
,
920 mmu_idx
, TARGET_PAGE_SIZE
);
923 #if !defined(TARGET_MIPS64)
924 if ((ret
== TLBRET_NOMATCH
) && (env
->tlb
->nb_tlb
> 1)) {
926 * Memory reads during hardware page table walking are performed
927 * as if they were kernel-mode load instructions.
929 int mode
= (env
->hflags
& MIPS_HFLAG_KSU
);
931 env
->hflags
&= ~MIPS_HFLAG_KSU
;
932 ret_walker
= page_table_walk_refill(env
, address
, access_type
, mmu_idx
);
935 ret
= get_physical_address(env
, &physical
, &prot
, address
,
936 access_type
, mips_access_type
, mmu_idx
);
937 if (ret
== TLBRET_MATCH
) {
938 tlb_set_page(cs
, address
& TARGET_PAGE_MASK
,
939 physical
& TARGET_PAGE_MASK
, prot
,
940 mmu_idx
, TARGET_PAGE_SIZE
);
951 raise_mmu_exception(env
, address
, access_type
, ret
);
952 do_raise_exception_err(env
, cs
->exception_index
, env
->error_code
, retaddr
);
955 #ifndef CONFIG_USER_ONLY
956 hwaddr
cpu_mips_translate_address(CPUMIPSState
*env
, target_ulong address
,
965 access_type
= ACCESS_INT
;
966 ret
= get_physical_address(env
, &physical
, &prot
, address
, rw
, access_type
,
967 cpu_mmu_index(env
, false));
968 if (ret
!= TLBRET_MATCH
) {
969 raise_mmu_exception(env
, address
, rw
, ret
);
976 static const char * const excp_names
[EXCP_LAST
+ 1] = {
977 [EXCP_RESET
] = "reset",
978 [EXCP_SRESET
] = "soft reset",
979 [EXCP_DSS
] = "debug single step",
980 [EXCP_DINT
] = "debug interrupt",
981 [EXCP_NMI
] = "non-maskable interrupt",
982 [EXCP_MCHECK
] = "machine check",
983 [EXCP_EXT_INTERRUPT
] = "interrupt",
984 [EXCP_DFWATCH
] = "deferred watchpoint",
985 [EXCP_DIB
] = "debug instruction breakpoint",
986 [EXCP_IWATCH
] = "instruction fetch watchpoint",
987 [EXCP_AdEL
] = "address error load",
988 [EXCP_AdES
] = "address error store",
989 [EXCP_TLBF
] = "TLB refill",
990 [EXCP_IBE
] = "instruction bus error",
991 [EXCP_DBp
] = "debug breakpoint",
992 [EXCP_SYSCALL
] = "syscall",
993 [EXCP_BREAK
] = "break",
994 [EXCP_CpU
] = "coprocessor unusable",
995 [EXCP_RI
] = "reserved instruction",
996 [EXCP_OVERFLOW
] = "arithmetic overflow",
997 [EXCP_TRAP
] = "trap",
998 [EXCP_FPE
] = "floating point",
999 [EXCP_DDBS
] = "debug data break store",
1000 [EXCP_DWATCH
] = "data watchpoint",
1001 [EXCP_LTLBL
] = "TLB modify",
1002 [EXCP_TLBL
] = "TLB load",
1003 [EXCP_TLBS
] = "TLB store",
1004 [EXCP_DBE
] = "data bus error",
1005 [EXCP_DDBL
] = "debug data break load",
1006 [EXCP_THREAD
] = "thread",
1007 [EXCP_MDMX
] = "MDMX",
1008 [EXCP_C2E
] = "precise coprocessor 2",
1009 [EXCP_CACHE
] = "cache error",
1010 [EXCP_TLBXI
] = "TLB execute-inhibit",
1011 [EXCP_TLBRI
] = "TLB read-inhibit",
1012 [EXCP_MSADIS
] = "MSA disabled",
1013 [EXCP_MSAFPE
] = "MSA floating point",
1017 target_ulong
exception_resume_pc(CPUMIPSState
*env
)
1019 target_ulong bad_pc
;
1020 target_ulong isa_mode
;
1022 isa_mode
= !!(env
->hflags
& MIPS_HFLAG_M16
);
1023 bad_pc
= env
->active_tc
.PC
| isa_mode
;
1024 if (env
->hflags
& MIPS_HFLAG_BMASK
) {
1026 * If the exception was raised from a delay slot, come back to
1029 bad_pc
-= (env
->hflags
& MIPS_HFLAG_B16
? 2 : 4);
1035 #if !defined(CONFIG_USER_ONLY)
1036 static void set_hflags_for_handler(CPUMIPSState
*env
)
1038 /* Exception handlers are entered in 32-bit mode. */
1039 env
->hflags
&= ~(MIPS_HFLAG_M16
);
1040 /* ...except that microMIPS lets you choose. */
1041 if (env
->insn_flags
& ASE_MICROMIPS
) {
1042 env
->hflags
|= (!!(env
->CP0_Config3
&
1043 (1 << CP0C3_ISA_ON_EXC
))
1044 << MIPS_HFLAG_M16_SHIFT
);
1048 static inline void set_badinstr_registers(CPUMIPSState
*env
)
1050 if (env
->insn_flags
& ISA_NANOMIPS32
) {
1051 if (env
->CP0_Config3
& (1 << CP0C3_BI
)) {
1052 uint32_t instr
= (cpu_lduw_code(env
, env
->active_tc
.PC
)) << 16;
1053 if ((instr
& 0x10000000) == 0) {
1054 instr
|= cpu_lduw_code(env
, env
->active_tc
.PC
+ 2);
1056 env
->CP0_BadInstr
= instr
;
1058 if ((instr
& 0xFC000000) == 0x60000000) {
1059 instr
= cpu_lduw_code(env
, env
->active_tc
.PC
+ 4) << 16;
1060 env
->CP0_BadInstrX
= instr
;
1066 if (env
->hflags
& MIPS_HFLAG_M16
) {
1067 /* TODO: add BadInstr support for microMIPS */
1070 if (env
->CP0_Config3
& (1 << CP0C3_BI
)) {
1071 env
->CP0_BadInstr
= cpu_ldl_code(env
, env
->active_tc
.PC
);
1073 if ((env
->CP0_Config3
& (1 << CP0C3_BP
)) &&
1074 (env
->hflags
& MIPS_HFLAG_BMASK
)) {
1075 env
->CP0_BadInstrP
= cpu_ldl_code(env
, env
->active_tc
.PC
- 4);
1080 void mips_cpu_do_interrupt(CPUState
*cs
)
1082 #if !defined(CONFIG_USER_ONLY)
1083 MIPSCPU
*cpu
= MIPS_CPU(cs
);
1084 CPUMIPSState
*env
= &cpu
->env
;
1085 bool update_badinstr
= 0;
1086 target_ulong offset
;
1090 if (qemu_loglevel_mask(CPU_LOG_INT
)
1091 && cs
->exception_index
!= EXCP_EXT_INTERRUPT
) {
1092 if (cs
->exception_index
< 0 || cs
->exception_index
> EXCP_LAST
) {
1095 name
= excp_names
[cs
->exception_index
];
1098 qemu_log("%s enter: PC " TARGET_FMT_lx
" EPC " TARGET_FMT_lx
1100 __func__
, env
->active_tc
.PC
, env
->CP0_EPC
, name
);
1102 if (cs
->exception_index
== EXCP_EXT_INTERRUPT
&&
1103 (env
->hflags
& MIPS_HFLAG_DM
)) {
1104 cs
->exception_index
= EXCP_DINT
;
1107 switch (cs
->exception_index
) {
1109 env
->CP0_Debug
|= 1 << CP0DB_DSS
;
1111 * Debug single step cannot be raised inside a delay slot and
1112 * resume will always occur on the next instruction
1113 * (but we assume the pc has always been updated during
1114 * code translation).
1116 env
->CP0_DEPC
= env
->active_tc
.PC
| !!(env
->hflags
& MIPS_HFLAG_M16
);
1117 goto enter_debug_mode
;
1119 env
->CP0_Debug
|= 1 << CP0DB_DINT
;
1122 env
->CP0_Debug
|= 1 << CP0DB_DIB
;
1125 env
->CP0_Debug
|= 1 << CP0DB_DBp
;
1126 /* Setup DExcCode - SDBBP instruction */
1127 env
->CP0_Debug
= (env
->CP0_Debug
& ~(0x1fULL
<< CP0DB_DEC
)) |
1131 env
->CP0_Debug
|= 1 << CP0DB_DDBS
;
1134 env
->CP0_Debug
|= 1 << CP0DB_DDBL
;
1136 env
->CP0_DEPC
= exception_resume_pc(env
);
1137 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
1139 if (env
->insn_flags
& ISA_MIPS3
) {
1140 env
->hflags
|= MIPS_HFLAG_64
;
1141 if (!(env
->insn_flags
& ISA_MIPS64R6
) ||
1142 env
->CP0_Status
& (1 << CP0St_KX
)) {
1143 env
->hflags
&= ~MIPS_HFLAG_AWRAP
;
1146 env
->hflags
|= MIPS_HFLAG_DM
| MIPS_HFLAG_CP0
;
1147 env
->hflags
&= ~(MIPS_HFLAG_KSU
);
1148 /* EJTAG probe trap enable is not implemented... */
1149 if (!(env
->CP0_Status
& (1 << CP0St_EXL
))) {
1150 env
->CP0_Cause
&= ~(1U << CP0Ca_BD
);
1152 env
->active_tc
.PC
= env
->exception_base
+ 0x480;
1153 set_hflags_for_handler(env
);
1156 cpu_reset(CPU(cpu
));
1159 env
->CP0_Status
|= (1 << CP0St_SR
);
1160 memset(env
->CP0_WatchLo
, 0, sizeof(env
->CP0_WatchLo
));
1163 env
->CP0_Status
|= (1 << CP0St_NMI
);
1165 env
->CP0_ErrorEPC
= exception_resume_pc(env
);
1166 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
1167 env
->CP0_Status
|= (1 << CP0St_ERL
) | (1 << CP0St_BEV
);
1168 if (env
->insn_flags
& ISA_MIPS3
) {
1169 env
->hflags
|= MIPS_HFLAG_64
;
1170 if (!(env
->insn_flags
& ISA_MIPS64R6
) ||
1171 env
->CP0_Status
& (1 << CP0St_KX
)) {
1172 env
->hflags
&= ~MIPS_HFLAG_AWRAP
;
1175 env
->hflags
|= MIPS_HFLAG_CP0
;
1176 env
->hflags
&= ~(MIPS_HFLAG_KSU
);
1177 if (!(env
->CP0_Status
& (1 << CP0St_EXL
))) {
1178 env
->CP0_Cause
&= ~(1U << CP0Ca_BD
);
1180 env
->active_tc
.PC
= env
->exception_base
;
1181 set_hflags_for_handler(env
);
1183 case EXCP_EXT_INTERRUPT
:
1185 if (env
->CP0_Cause
& (1 << CP0Ca_IV
)) {
1186 uint32_t spacing
= (env
->CP0_IntCtl
>> CP0IntCtl_VS
) & 0x1f;
1188 if ((env
->CP0_Status
& (1 << CP0St_BEV
)) || spacing
== 0) {
1191 uint32_t vector
= 0;
1192 uint32_t pending
= (env
->CP0_Cause
& CP0Ca_IP_mask
) >> CP0Ca_IP
;
1194 if (env
->CP0_Config3
& (1 << CP0C3_VEIC
)) {
1196 * For VEIC mode, the external interrupt controller feeds
1197 * the vector through the CP0Cause IP lines.
1202 * Vectored Interrupts
1203 * Mask with Status.IM7-IM0 to get enabled interrupts.
1205 pending
&= (env
->CP0_Status
>> CP0St_IM
) & 0xff;
1206 /* Find the highest-priority interrupt. */
1207 while (pending
>>= 1) {
1211 offset
= 0x200 + (vector
* (spacing
<< 5));
1217 update_badinstr
= !(env
->error_code
& EXCP_INST_NOTAVAIL
);
1221 update_badinstr
= !(env
->error_code
& EXCP_INST_NOTAVAIL
);
1222 if ((env
->error_code
& EXCP_TLB_NOMATCH
) &&
1223 !(env
->CP0_Status
& (1 << CP0St_EXL
))) {
1224 #if defined(TARGET_MIPS64)
1225 int R
= env
->CP0_BadVAddr
>> 62;
1226 int UX
= (env
->CP0_Status
& (1 << CP0St_UX
)) != 0;
1227 int KX
= (env
->CP0_Status
& (1 << CP0St_KX
)) != 0;
1229 if ((R
!= 0 || UX
) && (R
!= 3 || KX
) &&
1230 (!(env
->insn_flags
& (INSN_LOONGSON2E
| INSN_LOONGSON2F
)))) {
1235 #if defined(TARGET_MIPS64)
1242 update_badinstr
= 1;
1243 if ((env
->error_code
& EXCP_TLB_NOMATCH
) &&
1244 !(env
->CP0_Status
& (1 << CP0St_EXL
))) {
1245 #if defined(TARGET_MIPS64)
1246 int R
= env
->CP0_BadVAddr
>> 62;
1247 int UX
= (env
->CP0_Status
& (1 << CP0St_UX
)) != 0;
1248 int KX
= (env
->CP0_Status
& (1 << CP0St_KX
)) != 0;
1250 if ((R
!= 0 || UX
) && (R
!= 3 || KX
) &&
1251 (!(env
->insn_flags
& (INSN_LOONGSON2E
| INSN_LOONGSON2F
)))) {
1256 #if defined(TARGET_MIPS64)
1263 update_badinstr
= !(env
->error_code
& EXCP_INST_NOTAVAIL
);
1267 update_badinstr
= 1;
1277 update_badinstr
= 1;
1281 update_badinstr
= 1;
1285 update_badinstr
= 1;
1289 update_badinstr
= 1;
1290 env
->CP0_Cause
= (env
->CP0_Cause
& ~(0x3 << CP0Ca_CE
)) |
1291 (env
->error_code
<< CP0Ca_CE
);
1295 update_badinstr
= 1;
1299 update_badinstr
= 1;
1303 update_badinstr
= 1;
1307 update_badinstr
= 1;
1314 update_badinstr
= 1;
1321 update_badinstr
= 1;
1328 /* XXX: TODO: manage deferred watch exceptions */
1343 if (!(env
->CP0_Status
& (1 << CP0St_EXL
))) {
1344 env
->CP0_EPC
= exception_resume_pc(env
);
1345 if (update_badinstr
) {
1346 set_badinstr_registers(env
);
1348 if (env
->hflags
& MIPS_HFLAG_BMASK
) {
1349 env
->CP0_Cause
|= (1U << CP0Ca_BD
);
1351 env
->CP0_Cause
&= ~(1U << CP0Ca_BD
);
1353 env
->CP0_Status
|= (1 << CP0St_EXL
);
1354 if (env
->insn_flags
& ISA_MIPS3
) {
1355 env
->hflags
|= MIPS_HFLAG_64
;
1356 if (!(env
->insn_flags
& ISA_MIPS64R6
) ||
1357 env
->CP0_Status
& (1 << CP0St_KX
)) {
1358 env
->hflags
&= ~MIPS_HFLAG_AWRAP
;
1361 env
->hflags
|= MIPS_HFLAG_CP0
;
1362 env
->hflags
&= ~(MIPS_HFLAG_KSU
);
1364 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
1365 if (env
->CP0_Status
& (1 << CP0St_BEV
)) {
1366 env
->active_tc
.PC
= env
->exception_base
+ 0x200;
1367 } else if (cause
== 30 && !(env
->CP0_Config3
& (1 << CP0C3_SC
) &&
1368 env
->CP0_Config5
& (1 << CP0C5_CV
))) {
1369 /* Force KSeg1 for cache errors */
1370 env
->active_tc
.PC
= KSEG1_BASE
| (env
->CP0_EBase
& 0x1FFFF000);
1372 env
->active_tc
.PC
= env
->CP0_EBase
& ~0xfff;
1375 env
->active_tc
.PC
+= offset
;
1376 set_hflags_for_handler(env
);
1377 env
->CP0_Cause
= (env
->CP0_Cause
& ~(0x1f << CP0Ca_EC
)) |
1378 (cause
<< CP0Ca_EC
);
1383 if (qemu_loglevel_mask(CPU_LOG_INT
)
1384 && cs
->exception_index
!= EXCP_EXT_INTERRUPT
) {
1385 qemu_log("%s: PC " TARGET_FMT_lx
" EPC " TARGET_FMT_lx
" cause %d\n"
1386 " S %08x C %08x A " TARGET_FMT_lx
" D " TARGET_FMT_lx
"\n",
1387 __func__
, env
->active_tc
.PC
, env
->CP0_EPC
, cause
,
1388 env
->CP0_Status
, env
->CP0_Cause
, env
->CP0_BadVAddr
,
1392 cs
->exception_index
= EXCP_NONE
;
1395 bool mips_cpu_exec_interrupt(CPUState
*cs
, int interrupt_request
)
1397 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
1398 MIPSCPU
*cpu
= MIPS_CPU(cs
);
1399 CPUMIPSState
*env
= &cpu
->env
;
1401 if (cpu_mips_hw_interrupts_enabled(env
) &&
1402 cpu_mips_hw_interrupts_pending(env
)) {
1404 cs
->exception_index
= EXCP_EXT_INTERRUPT
;
1405 env
->error_code
= 0;
1406 mips_cpu_do_interrupt(cs
);
1413 #if !defined(CONFIG_USER_ONLY)
1414 void r4k_invalidate_tlb(CPUMIPSState
*env
, int idx
, int use_extra
)
1416 CPUState
*cs
= env_cpu(env
);
1420 uint16_t ASID
= env
->CP0_EntryHi
& env
->CP0_EntryHi_ASID_mask
;
1423 tlb
= &env
->tlb
->mmu
.r4k
.tlb
[idx
];
1425 * The qemu TLB is flushed when the ASID changes, so no need to
1426 * flush these entries again.
1428 if (tlb
->G
== 0 && tlb
->ASID
!= ASID
) {
1432 if (use_extra
&& env
->tlb
->tlb_in_use
< MIPS_TLB_MAX
) {
1434 * For tlbwr, we can shadow the discarded entry into
1435 * a new (fake) TLB entry, as long as the guest can not
1436 * tell that it's there.
1438 env
->tlb
->mmu
.r4k
.tlb
[env
->tlb
->tlb_in_use
] = *tlb
;
1439 env
->tlb
->tlb_in_use
++;
1443 /* 1k pages are not supported. */
1444 mask
= tlb
->PageMask
| ~(TARGET_PAGE_MASK
<< 1);
1446 addr
= tlb
->VPN
& ~mask
;
1447 #if defined(TARGET_MIPS64)
1448 if (addr
>= (0xFFFFFFFF80000000ULL
& env
->SEGMask
)) {
1449 addr
|= 0x3FFFFF0000000000ULL
;
1452 end
= addr
| (mask
>> 1);
1453 while (addr
< end
) {
1454 tlb_flush_page(cs
, addr
);
1455 addr
+= TARGET_PAGE_SIZE
;
1459 addr
= (tlb
->VPN
& ~mask
) | ((mask
>> 1) + 1);
1460 #if defined(TARGET_MIPS64)
1461 if (addr
>= (0xFFFFFFFF80000000ULL
& env
->SEGMask
)) {
1462 addr
|= 0x3FFFFF0000000000ULL
;
1466 while (addr
- 1 < end
) {
1467 tlb_flush_page(cs
, addr
);
1468 addr
+= TARGET_PAGE_SIZE
;
1474 void QEMU_NORETURN
do_raise_exception_err(CPUMIPSState
*env
,
1479 CPUState
*cs
= env_cpu(env
);
1481 qemu_log_mask(CPU_LOG_INT
, "%s: %d %d\n",
1482 __func__
, exception
, error_code
);
1483 cs
->exception_index
= exception
;
1484 env
->error_code
= error_code
;
1486 cpu_loop_exit_restore(cs
, pc
);
1489 static void mips_cpu_add_definition(gpointer data
, gpointer user_data
)
1491 ObjectClass
*oc
= data
;
1492 CpuDefinitionInfoList
**cpu_list
= user_data
;
1493 CpuDefinitionInfoList
*entry
;
1494 CpuDefinitionInfo
*info
;
1495 const char *typename
;
1497 typename
= object_class_get_name(oc
);
1498 info
= g_malloc0(sizeof(*info
));
1499 info
->name
= g_strndup(typename
,
1500 strlen(typename
) - strlen("-" TYPE_MIPS_CPU
));
1501 info
->q_typename
= g_strdup(typename
);
1503 entry
= g_malloc0(sizeof(*entry
));
1504 entry
->value
= info
;
1505 entry
->next
= *cpu_list
;
1509 CpuDefinitionInfoList
*qmp_query_cpu_definitions(Error
**errp
)
1511 CpuDefinitionInfoList
*cpu_list
= NULL
;
1514 list
= object_class_get_list(TYPE_MIPS_CPU
, false);
1515 g_slist_foreach(list
, mips_cpu_add_definition
, &cpu_list
);