2 * MIPS emulation helpers for qemu.
4 * Copyright (c) 2004-2005 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
23 #include "exec/exec-all.h"
24 #include "exec/cpu_ldst.h"
26 #include "hw/mips/cpudevs.h"
27 #include "qapi/qapi-commands-machine-target.h"
39 #if !defined(CONFIG_USER_ONLY)
41 /* no MMU emulation */
42 int no_mmu_map_address(CPUMIPSState
*env
, hwaddr
*physical
, int *prot
,
43 target_ulong address
, int rw
, int access_type
)
46 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
50 /* fixed mapping MMU emulation */
51 int fixed_mmu_map_address(CPUMIPSState
*env
, hwaddr
*physical
, int *prot
,
52 target_ulong address
, int rw
, int access_type
)
54 if (address
<= (int32_t)0x7FFFFFFFUL
) {
55 if (!(env
->CP0_Status
& (1 << CP0St_ERL
))) {
56 *physical
= address
+ 0x40000000UL
;
60 } else if (address
<= (int32_t)0xBFFFFFFFUL
) {
61 *physical
= address
& 0x1FFFFFFF;
66 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
70 /* MIPS32/MIPS64 R4000-style MMU emulation */
71 int r4k_map_address(CPUMIPSState
*env
, hwaddr
*physical
, int *prot
,
72 target_ulong address
, int rw
, int access_type
)
74 uint16_t ASID
= env
->CP0_EntryHi
& env
->CP0_EntryHi_ASID_mask
;
75 uint32_t MMID
= env
->CP0_MemoryMapID
;
76 bool mi
= !!((env
->CP0_Config5
>> CP0C5_MI
) & 1);
80 MMID
= mi
? MMID
: (uint32_t) ASID
;
82 for (i
= 0; i
< env
->tlb
->tlb_in_use
; i
++) {
83 r4k_tlb_t
*tlb
= &env
->tlb
->mmu
.r4k
.tlb
[i
];
84 /* 1k pages are not supported. */
85 target_ulong mask
= tlb
->PageMask
| ~(TARGET_PAGE_MASK
<< 1);
86 target_ulong tag
= address
& ~mask
;
87 target_ulong VPN
= tlb
->VPN
& ~mask
;
88 #if defined(TARGET_MIPS64)
92 /* Check ASID/MMID, virtual page number & size */
93 tlb_mmid
= mi
? tlb
->MMID
: (uint32_t) tlb
->ASID
;
94 if ((tlb
->G
== 1 || tlb_mmid
== MMID
) && VPN
== tag
&& !tlb
->EHINV
) {
96 int n
= !!(address
& mask
& ~(mask
>> 1));
97 /* Check access rights */
98 if (!(n
? tlb
->V1
: tlb
->V0
)) {
99 return TLBRET_INVALID
;
101 if (rw
== MMU_INST_FETCH
&& (n
? tlb
->XI1
: tlb
->XI0
)) {
104 if (rw
== MMU_DATA_LOAD
&& (n
? tlb
->RI1
: tlb
->RI0
)) {
107 if (rw
!= MMU_DATA_STORE
|| (n
? tlb
->D1
: tlb
->D0
)) {
108 *physical
= tlb
->PFN
[n
] | (address
& (mask
>> 1));
110 if (n
? tlb
->D1
: tlb
->D0
) {
113 if (!(n
? tlb
->XI1
: tlb
->XI0
)) {
121 return TLBRET_NOMATCH
;
124 static int is_seg_am_mapped(unsigned int am
, bool eu
, int mmu_idx
)
127 * Interpret access control mode and mmu_idx.
130 * UK 0 0 1 1 0 0 - - 0
131 * MK 1 0 1 1 0 1 - - !eu
132 * MSK 2 0 0 1 0 1 1 - !eu
133 * MUSK 3 0 0 0 0 1 1 1 !eu
134 * MUSUK 4 0 0 0 0 0 1 1 0
135 * USK 5 0 0 1 0 0 0 - 0
136 * - 6 - - - - - - - -
137 * UUSK 7 0 0 0 0 0 0 0 0
143 /* If EU is set, always unmapped */
149 /* Never AdE, TLB mapped if AM={1,2,3} */
150 adetlb_mask
= 0x70000000;
154 /* AdE if AM={0,1}, TLB mapped if AM={2,3,4} */
155 adetlb_mask
= 0xc0380000;
159 /* AdE if AM={0,1,2,5}, TLB mapped if AM={3,4} */
160 adetlb_mask
= 0xe4180000;
163 /* does this AM cause AdE in current execution mode */
164 if ((adetlb_mask
<< am
) < 0) {
165 return TLBRET_BADADDR
;
170 /* is this AM mapped in current execution mode */
171 return ((adetlb_mask
<< am
) < 0);
174 return TLBRET_BADADDR
;
178 static int get_seg_physical_address(CPUMIPSState
*env
, hwaddr
*physical
,
179 int *prot
, target_ulong real_address
,
180 int rw
, int access_type
, int mmu_idx
,
181 unsigned int am
, bool eu
,
182 target_ulong segmask
,
183 hwaddr physical_base
)
185 int mapped
= is_seg_am_mapped(am
, eu
, mmu_idx
);
188 /* is_seg_am_mapped can report TLBRET_BADADDR */
191 /* The segment is TLB mapped */
192 return env
->tlb
->map_address(env
, physical
, prot
, real_address
, rw
,
195 /* The segment is unmapped */
196 *physical
= physical_base
| (real_address
& segmask
);
197 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
202 static int get_segctl_physical_address(CPUMIPSState
*env
, hwaddr
*physical
,
203 int *prot
, target_ulong real_address
,
204 int rw
, int access_type
, int mmu_idx
,
205 uint16_t segctl
, target_ulong segmask
)
207 unsigned int am
= (segctl
& CP0SC_AM_MASK
) >> CP0SC_AM
;
208 bool eu
= (segctl
>> CP0SC_EU
) & 1;
209 hwaddr pa
= ((hwaddr
)segctl
& CP0SC_PA_MASK
) << 20;
211 return get_seg_physical_address(env
, physical
, prot
, real_address
, rw
,
212 access_type
, mmu_idx
, am
, eu
, segmask
,
213 pa
& ~(hwaddr
)segmask
);
216 static int get_physical_address(CPUMIPSState
*env
, hwaddr
*physical
,
217 int *prot
, target_ulong real_address
,
218 int rw
, int access_type
, int mmu_idx
)
220 /* User mode can only access useg/xuseg */
221 #if defined(TARGET_MIPS64)
222 int user_mode
= mmu_idx
== MIPS_HFLAG_UM
;
223 int supervisor_mode
= mmu_idx
== MIPS_HFLAG_SM
;
224 int kernel_mode
= !user_mode
&& !supervisor_mode
;
225 int UX
= (env
->CP0_Status
& (1 << CP0St_UX
)) != 0;
226 int SX
= (env
->CP0_Status
& (1 << CP0St_SX
)) != 0;
227 int KX
= (env
->CP0_Status
& (1 << CP0St_KX
)) != 0;
229 int ret
= TLBRET_MATCH
;
230 /* effective address (modified for KVM T&E kernel segments) */
231 target_ulong address
= real_address
;
233 #define USEG_LIMIT ((target_ulong)(int32_t)0x7FFFFFFFUL)
234 #define KSEG0_BASE ((target_ulong)(int32_t)0x80000000UL)
235 #define KSEG1_BASE ((target_ulong)(int32_t)0xA0000000UL)
236 #define KSEG2_BASE ((target_ulong)(int32_t)0xC0000000UL)
237 #define KSEG3_BASE ((target_ulong)(int32_t)0xE0000000UL)
239 #define KVM_KSEG0_BASE ((target_ulong)(int32_t)0x40000000UL)
240 #define KVM_KSEG2_BASE ((target_ulong)(int32_t)0x60000000UL)
242 if (mips_um_ksegs_enabled()) {
243 /* KVM T&E adds guest kernel segments in useg */
244 if (real_address
>= KVM_KSEG0_BASE
) {
245 if (real_address
< KVM_KSEG2_BASE
) {
247 address
+= KSEG0_BASE
- KVM_KSEG0_BASE
;
248 } else if (real_address
<= USEG_LIMIT
) {
250 address
+= KSEG2_BASE
- KVM_KSEG2_BASE
;
255 if (address
<= USEG_LIMIT
) {
259 if (address
>= 0x40000000UL
) {
260 segctl
= env
->CP0_SegCtl2
;
262 segctl
= env
->CP0_SegCtl2
>> 16;
264 ret
= get_segctl_physical_address(env
, physical
, prot
,
265 real_address
, rw
, access_type
,
266 mmu_idx
, segctl
, 0x3FFFFFFF);
267 #if defined(TARGET_MIPS64)
268 } else if (address
< 0x4000000000000000ULL
) {
270 if (UX
&& address
<= (0x3FFFFFFFFFFFFFFFULL
& env
->SEGMask
)) {
271 ret
= env
->tlb
->map_address(env
, physical
, prot
,
272 real_address
, rw
, access_type
);
274 ret
= TLBRET_BADADDR
;
276 } else if (address
< 0x8000000000000000ULL
) {
278 if ((supervisor_mode
|| kernel_mode
) &&
279 SX
&& address
<= (0x7FFFFFFFFFFFFFFFULL
& env
->SEGMask
)) {
280 ret
= env
->tlb
->map_address(env
, physical
, prot
,
281 real_address
, rw
, access_type
);
283 ret
= TLBRET_BADADDR
;
285 } else if (address
< 0xC000000000000000ULL
) {
287 if ((address
& 0x07FFFFFFFFFFFFFFULL
) <= env
->PAMask
) {
288 /* KX/SX/UX bit to check for each xkphys EVA access mode */
289 static const uint8_t am_ksux
[8] = {
290 [CP0SC_AM_UK
] = (1u << CP0St_KX
),
291 [CP0SC_AM_MK
] = (1u << CP0St_KX
),
292 [CP0SC_AM_MSK
] = (1u << CP0St_SX
),
293 [CP0SC_AM_MUSK
] = (1u << CP0St_UX
),
294 [CP0SC_AM_MUSUK
] = (1u << CP0St_UX
),
295 [CP0SC_AM_USK
] = (1u << CP0St_SX
),
296 [6] = (1u << CP0St_KX
),
297 [CP0SC_AM_UUSK
] = (1u << CP0St_UX
),
299 unsigned int am
= CP0SC_AM_UK
;
300 unsigned int xr
= (env
->CP0_SegCtl2
& CP0SC2_XR_MASK
) >> CP0SC2_XR
;
302 if (xr
& (1 << ((address
>> 59) & 0x7))) {
303 am
= (env
->CP0_SegCtl1
& CP0SC1_XAM_MASK
) >> CP0SC1_XAM
;
305 /* Does CP0_Status.KX/SX/UX permit the access mode (am) */
306 if (env
->CP0_Status
& am_ksux
[am
]) {
307 ret
= get_seg_physical_address(env
, physical
, prot
,
308 real_address
, rw
, access_type
,
309 mmu_idx
, am
, false, env
->PAMask
,
312 ret
= TLBRET_BADADDR
;
315 ret
= TLBRET_BADADDR
;
317 } else if (address
< 0xFFFFFFFF80000000ULL
) {
319 if (kernel_mode
&& KX
&&
320 address
<= (0xFFFFFFFF7FFFFFFFULL
& env
->SEGMask
)) {
321 ret
= env
->tlb
->map_address(env
, physical
, prot
,
322 real_address
, rw
, access_type
);
324 ret
= TLBRET_BADADDR
;
327 } else if (address
< KSEG1_BASE
) {
329 ret
= get_segctl_physical_address(env
, physical
, prot
, real_address
, rw
,
330 access_type
, mmu_idx
,
331 env
->CP0_SegCtl1
>> 16, 0x1FFFFFFF);
332 } else if (address
< KSEG2_BASE
) {
334 ret
= get_segctl_physical_address(env
, physical
, prot
, real_address
, rw
,
335 access_type
, mmu_idx
,
336 env
->CP0_SegCtl1
, 0x1FFFFFFF);
337 } else if (address
< KSEG3_BASE
) {
339 ret
= get_segctl_physical_address(env
, physical
, prot
, real_address
, rw
,
340 access_type
, mmu_idx
,
341 env
->CP0_SegCtl0
>> 16, 0x1FFFFFFF);
345 * XXX: debug segment is not emulated
347 ret
= get_segctl_physical_address(env
, physical
, prot
, real_address
, rw
,
348 access_type
, mmu_idx
,
349 env
->CP0_SegCtl0
, 0x1FFFFFFF);
354 void cpu_mips_tlb_flush(CPUMIPSState
*env
)
356 /* Flush qemu's TLB and discard all shadowed entries. */
357 tlb_flush(env_cpu(env
));
358 env
->tlb
->tlb_in_use
= env
->tlb
->nb_tlb
;
361 /* Called for updates to CP0_Status. */
362 void sync_c0_status(CPUMIPSState
*env
, CPUMIPSState
*cpu
, int tc
)
364 int32_t tcstatus
, *tcst
;
365 uint32_t v
= cpu
->CP0_Status
;
366 uint32_t cu
, mx
, asid
, ksu
;
367 uint32_t mask
= ((1 << CP0TCSt_TCU3
)
368 | (1 << CP0TCSt_TCU2
)
369 | (1 << CP0TCSt_TCU1
)
370 | (1 << CP0TCSt_TCU0
)
372 | (3 << CP0TCSt_TKSU
)
373 | (0xff << CP0TCSt_TASID
));
375 cu
= (v
>> CP0St_CU0
) & 0xf;
376 mx
= (v
>> CP0St_MX
) & 0x1;
377 ksu
= (v
>> CP0St_KSU
) & 0x3;
378 asid
= env
->CP0_EntryHi
& env
->CP0_EntryHi_ASID_mask
;
380 tcstatus
= cu
<< CP0TCSt_TCU0
;
381 tcstatus
|= mx
<< CP0TCSt_TMX
;
382 tcstatus
|= ksu
<< CP0TCSt_TKSU
;
385 if (tc
== cpu
->current_tc
) {
386 tcst
= &cpu
->active_tc
.CP0_TCStatus
;
388 tcst
= &cpu
->tcs
[tc
].CP0_TCStatus
;
396 void cpu_mips_store_status(CPUMIPSState
*env
, target_ulong val
)
398 uint32_t mask
= env
->CP0_Status_rw_bitmask
;
399 target_ulong old
= env
->CP0_Status
;
401 if (env
->insn_flags
& ISA_MIPS32R6
) {
402 bool has_supervisor
= extract32(mask
, CP0St_KSU
, 2) == 0x3;
403 #if defined(TARGET_MIPS64)
404 uint32_t ksux
= (1 << CP0St_KX
) & val
;
405 ksux
|= (ksux
>> 1) & val
; /* KX = 0 forces SX to be 0 */
406 ksux
|= (ksux
>> 1) & val
; /* SX = 0 forces UX to be 0 */
407 val
= (val
& ~(7 << CP0St_UX
)) | ksux
;
409 if (has_supervisor
&& extract32(val
, CP0St_KSU
, 2) == 0x3) {
410 mask
&= ~(3 << CP0St_KSU
);
412 mask
&= ~(((1 << CP0St_SR
) | (1 << CP0St_NMI
)) & val
);
415 env
->CP0_Status
= (old
& ~mask
) | (val
& mask
);
416 #if defined(TARGET_MIPS64)
417 if ((env
->CP0_Status
^ old
) & (old
& (7 << CP0St_UX
))) {
418 /* Access to at least one of the 64-bit segments has been disabled */
419 tlb_flush(env_cpu(env
));
422 if (env
->CP0_Config3
& (1 << CP0C3_MT
)) {
423 sync_c0_status(env
, env
, env
->current_tc
);
429 void cpu_mips_store_cause(CPUMIPSState
*env
, target_ulong val
)
431 uint32_t mask
= 0x00C00300;
432 uint32_t old
= env
->CP0_Cause
;
435 if (env
->insn_flags
& ISA_MIPS32R2
) {
436 mask
|= 1 << CP0Ca_DC
;
438 if (env
->insn_flags
& ISA_MIPS32R6
) {
439 mask
&= ~((1 << CP0Ca_WP
) & val
);
442 env
->CP0_Cause
= (env
->CP0_Cause
& ~mask
) | (val
& mask
);
444 if ((old
^ env
->CP0_Cause
) & (1 << CP0Ca_DC
)) {
445 if (env
->CP0_Cause
& (1 << CP0Ca_DC
)) {
446 cpu_mips_stop_count(env
);
448 cpu_mips_start_count(env
);
452 /* Set/reset software interrupts */
453 for (i
= 0 ; i
< 2 ; i
++) {
454 if ((old
^ env
->CP0_Cause
) & (1 << (CP0Ca_IP
+ i
))) {
455 cpu_mips_soft_irq(env
, i
, env
->CP0_Cause
& (1 << (CP0Ca_IP
+ i
)));
461 static void raise_mmu_exception(CPUMIPSState
*env
, target_ulong address
,
462 int rw
, int tlb_error
)
464 CPUState
*cs
= env_cpu(env
);
465 int exception
= 0, error_code
= 0;
467 if (rw
== MMU_INST_FETCH
) {
468 error_code
|= EXCP_INST_NOTAVAIL
;
474 /* Reference to kernel address from user mode or supervisor mode */
475 /* Reference to supervisor address from user mode */
476 if (rw
== MMU_DATA_STORE
) {
477 exception
= EXCP_AdES
;
479 exception
= EXCP_AdEL
;
483 /* No TLB match for a mapped address */
484 if (rw
== MMU_DATA_STORE
) {
485 exception
= EXCP_TLBS
;
487 exception
= EXCP_TLBL
;
489 error_code
|= EXCP_TLB_NOMATCH
;
492 /* TLB match with no valid bit */
493 if (rw
== MMU_DATA_STORE
) {
494 exception
= EXCP_TLBS
;
496 exception
= EXCP_TLBL
;
500 /* TLB match but 'D' bit is cleared */
501 exception
= EXCP_LTLBL
;
504 /* Execute-Inhibit Exception */
505 if (env
->CP0_PageGrain
& (1 << CP0PG_IEC
)) {
506 exception
= EXCP_TLBXI
;
508 exception
= EXCP_TLBL
;
512 /* Read-Inhibit Exception */
513 if (env
->CP0_PageGrain
& (1 << CP0PG_IEC
)) {
514 exception
= EXCP_TLBRI
;
516 exception
= EXCP_TLBL
;
520 /* Raise exception */
521 if (!(env
->hflags
& MIPS_HFLAG_DM
)) {
522 env
->CP0_BadVAddr
= address
;
524 env
->CP0_Context
= (env
->CP0_Context
& ~0x007fffff) |
525 ((address
>> 9) & 0x007ffff0);
526 env
->CP0_EntryHi
= (env
->CP0_EntryHi
& env
->CP0_EntryHi_ASID_mask
) |
527 (env
->CP0_EntryHi
& (1 << CP0EnHi_EHINV
)) |
528 (address
& (TARGET_PAGE_MASK
<< 1));
529 #if defined(TARGET_MIPS64)
530 env
->CP0_EntryHi
&= env
->SEGMask
;
532 (env
->CP0_XContext
& ((~0ULL) << (env
->SEGBITS
- 7))) | /* PTEBase */
533 (extract64(address
, 62, 2) << (env
->SEGBITS
- 9)) | /* R */
534 (extract64(address
, 13, env
->SEGBITS
- 13) << 4); /* BadVPN2 */
536 cs
->exception_index
= exception
;
537 env
->error_code
= error_code
;
540 #if !defined(CONFIG_USER_ONLY)
541 hwaddr
mips_cpu_get_phys_page_debug(CPUState
*cs
, vaddr addr
)
543 MIPSCPU
*cpu
= MIPS_CPU(cs
);
544 CPUMIPSState
*env
= &cpu
->env
;
548 if (get_physical_address(env
, &phys_addr
, &prot
, addr
, 0, ACCESS_INT
,
549 cpu_mmu_index(env
, false)) != 0) {
556 #if !defined(CONFIG_USER_ONLY)
557 #if !defined(TARGET_MIPS64)
560 * Perform hardware page table walk
562 * Memory accesses are performed using the KERNEL privilege level.
563 * Synchronous exceptions detected on memory accesses cause a silent exit
564 * from page table walking, resulting in a TLB or XTLB Refill exception.
566 * Implementations are not required to support page table walk memory
567 * accesses from mapped memory regions. When an unsupported access is
568 * attempted, a silent exit is taken, resulting in a TLB or XTLB Refill
571 * Note that if an exception is caused by AddressTranslation or LoadMemory
572 * functions, the exception is not taken, a silent exit is taken,
573 * resulting in a TLB or XTLB Refill exception.
576 static bool get_pte(CPUMIPSState
*env
, uint64_t vaddr
, int entry_size
,
579 if ((vaddr
& ((entry_size
>> 3) - 1)) != 0) {
582 if (entry_size
== 64) {
583 *pte
= cpu_ldq_code(env
, vaddr
);
585 *pte
= cpu_ldl_code(env
, vaddr
);
590 static uint64_t get_tlb_entry_layout(CPUMIPSState
*env
, uint64_t entry
,
591 int entry_size
, int ptei
)
593 uint64_t result
= entry
;
595 if (ptei
> entry_size
) {
598 result
>>= (ptei
- 2);
601 result
|= rixi
<< CP0EnLo_XI
;
605 static int walk_directory(CPUMIPSState
*env
, uint64_t *vaddr
,
606 int directory_index
, bool *huge_page
, bool *hgpg_directory_hit
,
607 uint64_t *pw_entrylo0
, uint64_t *pw_entrylo1
)
609 int dph
= (env
->CP0_PWCtl
>> CP0PC_DPH
) & 0x1;
610 int psn
= (env
->CP0_PWCtl
>> CP0PC_PSN
) & 0x3F;
611 int hugepg
= (env
->CP0_PWCtl
>> CP0PC_HUGEPG
) & 0x1;
612 int pf_ptew
= (env
->CP0_PWField
>> CP0PF_PTEW
) & 0x3F;
613 int ptew
= (env
->CP0_PWSize
>> CP0PS_PTEW
) & 0x3F;
614 int native_shift
= (((env
->CP0_PWSize
>> CP0PS_PS
) & 1) == 0) ? 2 : 3;
615 int directory_shift
= (ptew
> 1) ? -1 :
616 (hugepg
&& (ptew
== 1)) ? native_shift
+ 1 : native_shift
;
617 int leaf_shift
= (ptew
> 1) ? -1 :
618 (ptew
== 1) ? native_shift
+ 1 : native_shift
;
619 uint32_t direntry_size
= 1 << (directory_shift
+ 3);
620 uint32_t leafentry_size
= 1 << (leaf_shift
+ 3);
627 if (get_physical_address(env
, &paddr
, &prot
, *vaddr
, MMU_DATA_LOAD
,
628 ACCESS_INT
, cpu_mmu_index(env
, false)) !=
630 /* wrong base address */
633 if (!get_pte(env
, *vaddr
, direntry_size
, &entry
)) {
637 if ((entry
& (1 << psn
)) && hugepg
) {
639 *hgpg_directory_hit
= true;
640 entry
= get_tlb_entry_layout(env
, entry
, leafentry_size
, pf_ptew
);
641 w
= directory_index
- 1;
642 if (directory_index
& 0x1) {
643 /* Generate adjacent page from same PTE for odd TLB page */
645 *pw_entrylo0
= entry
& ~lsb
; /* even page */
646 *pw_entrylo1
= entry
| lsb
; /* odd page */
648 int oddpagebit
= 1 << leaf_shift
;
649 uint64_t vaddr2
= *vaddr
^ oddpagebit
;
650 if (*vaddr
& oddpagebit
) {
651 *pw_entrylo1
= entry
;
653 *pw_entrylo0
= entry
;
655 if (get_physical_address(env
, &paddr
, &prot
, vaddr2
, MMU_DATA_LOAD
,
656 ACCESS_INT
, cpu_mmu_index(env
, false)) !=
660 if (!get_pte(env
, vaddr2
, leafentry_size
, &entry
)) {
663 entry
= get_tlb_entry_layout(env
, entry
, leafentry_size
, pf_ptew
);
664 if (*vaddr
& oddpagebit
) {
665 *pw_entrylo0
= entry
;
667 *pw_entrylo1
= entry
;
679 static bool page_table_walk_refill(CPUMIPSState
*env
, vaddr address
, int rw
,
682 int gdw
= (env
->CP0_PWSize
>> CP0PS_GDW
) & 0x3F;
683 int udw
= (env
->CP0_PWSize
>> CP0PS_UDW
) & 0x3F;
684 int mdw
= (env
->CP0_PWSize
>> CP0PS_MDW
) & 0x3F;
685 int ptw
= (env
->CP0_PWSize
>> CP0PS_PTW
) & 0x3F;
686 int ptew
= (env
->CP0_PWSize
>> CP0PS_PTEW
) & 0x3F;
689 bool huge_page
= false;
690 bool hgpg_bdhit
= false;
691 bool hgpg_gdhit
= false;
692 bool hgpg_udhit
= false;
693 bool hgpg_mdhit
= false;
695 int32_t pw_pagemask
= 0;
696 target_ulong pw_entryhi
= 0;
697 uint64_t pw_entrylo0
= 0;
698 uint64_t pw_entrylo1
= 0;
700 /* Native pointer size */
701 /*For the 32-bit architectures, this bit is fixed to 0.*/
702 int native_shift
= (((env
->CP0_PWSize
>> CP0PS_PS
) & 1) == 0) ? 2 : 3;
704 /* Indices from PWField */
705 int pf_gdw
= (env
->CP0_PWField
>> CP0PF_GDW
) & 0x3F;
706 int pf_udw
= (env
->CP0_PWField
>> CP0PF_UDW
) & 0x3F;
707 int pf_mdw
= (env
->CP0_PWField
>> CP0PF_MDW
) & 0x3F;
708 int pf_ptw
= (env
->CP0_PWField
>> CP0PF_PTW
) & 0x3F;
709 int pf_ptew
= (env
->CP0_PWField
>> CP0PF_PTEW
) & 0x3F;
711 /* Indices computed from faulting address */
712 int gindex
= (address
>> pf_gdw
) & ((1 << gdw
) - 1);
713 int uindex
= (address
>> pf_udw
) & ((1 << udw
) - 1);
714 int mindex
= (address
>> pf_mdw
) & ((1 << mdw
) - 1);
715 int ptindex
= (address
>> pf_ptw
) & ((1 << ptw
) - 1);
717 /* Other HTW configs */
718 int hugepg
= (env
->CP0_PWCtl
>> CP0PC_HUGEPG
) & 0x1;
720 /* HTW Shift values (depend on entry size) */
721 int directory_shift
= (ptew
> 1) ? -1 :
722 (hugepg
&& (ptew
== 1)) ? native_shift
+ 1 : native_shift
;
723 int leaf_shift
= (ptew
> 1) ? -1 :
724 (ptew
== 1) ? native_shift
+ 1 : native_shift
;
726 /* Offsets into tables */
727 int goffset
= gindex
<< directory_shift
;
728 int uoffset
= uindex
<< directory_shift
;
729 int moffset
= mindex
<< directory_shift
;
730 int ptoffset0
= (ptindex
>> 1) << (leaf_shift
+ 1);
731 int ptoffset1
= ptoffset0
| (1 << (leaf_shift
));
733 uint32_t leafentry_size
= 1 << (leaf_shift
+ 3);
735 /* Starting address - Page Table Base */
736 uint64_t vaddr
= env
->CP0_PWBase
;
743 if (!(env
->CP0_Config3
& (1 << CP0C3_PW
))) {
744 /* walker is unimplemented */
747 if (!(env
->CP0_PWCtl
& (1 << CP0PC_PWEN
))) {
748 /* walker is disabled */
751 if (!(gdw
> 0 || udw
> 0 || mdw
> 0)) {
752 /* no structure to walk */
755 if ((directory_shift
== -1) || (leaf_shift
== -1)) {
759 /* Global Directory */
762 switch (walk_directory(env
, &vaddr
, pf_gdw
, &huge_page
, &hgpg_gdhit
,
763 &pw_entrylo0
, &pw_entrylo1
))
775 /* Upper directory */
778 switch (walk_directory(env
, &vaddr
, pf_udw
, &huge_page
, &hgpg_udhit
,
779 &pw_entrylo0
, &pw_entrylo1
))
791 /* Middle directory */
794 switch (walk_directory(env
, &vaddr
, pf_mdw
, &huge_page
, &hgpg_mdhit
,
795 &pw_entrylo0
, &pw_entrylo1
))
807 /* Leaf Level Page Table - First half of PTE pair */
809 if (get_physical_address(env
, &paddr
, &prot
, vaddr
, MMU_DATA_LOAD
,
810 ACCESS_INT
, cpu_mmu_index(env
, false)) !=
814 if (!get_pte(env
, vaddr
, leafentry_size
, &dir_entry
)) {
817 dir_entry
= get_tlb_entry_layout(env
, dir_entry
, leafentry_size
, pf_ptew
);
818 pw_entrylo0
= dir_entry
;
820 /* Leaf Level Page Table - Second half of PTE pair */
822 if (get_physical_address(env
, &paddr
, &prot
, vaddr
, MMU_DATA_LOAD
,
823 ACCESS_INT
, cpu_mmu_index(env
, false)) !=
827 if (!get_pte(env
, vaddr
, leafentry_size
, &dir_entry
)) {
830 dir_entry
= get_tlb_entry_layout(env
, dir_entry
, leafentry_size
, pf_ptew
);
831 pw_entrylo1
= dir_entry
;
835 m
= (1 << pf_ptw
) - 1;
838 switch (hgpg_bdhit
<< 3 | hgpg_gdhit
<< 2 | hgpg_udhit
<< 1 |
842 m
= (1 << pf_gdw
) - 1;
848 m
= (1 << pf_udw
) - 1;
854 m
= (1 << pf_mdw
) - 1;
861 pw_pagemask
= m
>> 12;
862 update_pagemask(env
, pw_pagemask
<< 13, &pw_pagemask
);
863 pw_entryhi
= (address
& ~0x1fff) | (env
->CP0_EntryHi
& 0xFF);
865 target_ulong tmp_entryhi
= env
->CP0_EntryHi
;
866 int32_t tmp_pagemask
= env
->CP0_PageMask
;
867 uint64_t tmp_entrylo0
= env
->CP0_EntryLo0
;
868 uint64_t tmp_entrylo1
= env
->CP0_EntryLo1
;
870 env
->CP0_EntryHi
= pw_entryhi
;
871 env
->CP0_PageMask
= pw_pagemask
;
872 env
->CP0_EntryLo0
= pw_entrylo0
;
873 env
->CP0_EntryLo1
= pw_entrylo1
;
876 * The hardware page walker inserts a page into the TLB in a manner
877 * identical to a TLBWR instruction as executed by the software refill
880 r4k_helper_tlbwr(env
);
882 env
->CP0_EntryHi
= tmp_entryhi
;
883 env
->CP0_PageMask
= tmp_pagemask
;
884 env
->CP0_EntryLo0
= tmp_entrylo0
;
885 env
->CP0_EntryLo1
= tmp_entrylo1
;
892 bool mips_cpu_tlb_fill(CPUState
*cs
, vaddr address
, int size
,
893 MMUAccessType access_type
, int mmu_idx
,
894 bool probe
, uintptr_t retaddr
)
896 MIPSCPU
*cpu
= MIPS_CPU(cs
);
897 CPUMIPSState
*env
= &cpu
->env
;
898 #if !defined(CONFIG_USER_ONLY)
901 int mips_access_type
;
903 int ret
= TLBRET_BADADDR
;
906 #if !defined(CONFIG_USER_ONLY)
907 /* XXX: put correct access by using cpu_restore_state() correctly */
908 mips_access_type
= ACCESS_INT
;
909 ret
= get_physical_address(env
, &physical
, &prot
, address
,
910 access_type
, mips_access_type
, mmu_idx
);
913 qemu_log_mask(CPU_LOG_MMU
,
914 "%s address=%" VADDR_PRIx
" physical " TARGET_FMT_plx
915 " prot %d\n", __func__
, address
, physical
, prot
);
918 qemu_log_mask(CPU_LOG_MMU
,
919 "%s address=%" VADDR_PRIx
" ret %d\n", __func__
, address
,
923 if (ret
== TLBRET_MATCH
) {
924 tlb_set_page(cs
, address
& TARGET_PAGE_MASK
,
925 physical
& TARGET_PAGE_MASK
, prot
,
926 mmu_idx
, TARGET_PAGE_SIZE
);
929 #if !defined(TARGET_MIPS64)
930 if ((ret
== TLBRET_NOMATCH
) && (env
->tlb
->nb_tlb
> 1)) {
932 * Memory reads during hardware page table walking are performed
933 * as if they were kernel-mode load instructions.
935 int mode
= (env
->hflags
& MIPS_HFLAG_KSU
);
937 env
->hflags
&= ~MIPS_HFLAG_KSU
;
938 ret_walker
= page_table_walk_refill(env
, address
, access_type
, mmu_idx
);
941 ret
= get_physical_address(env
, &physical
, &prot
, address
,
942 access_type
, mips_access_type
, mmu_idx
);
943 if (ret
== TLBRET_MATCH
) {
944 tlb_set_page(cs
, address
& TARGET_PAGE_MASK
,
945 physical
& TARGET_PAGE_MASK
, prot
,
946 mmu_idx
, TARGET_PAGE_SIZE
);
957 raise_mmu_exception(env
, address
, access_type
, ret
);
958 do_raise_exception_err(env
, cs
->exception_index
, env
->error_code
, retaddr
);
961 #ifndef CONFIG_USER_ONLY
962 hwaddr
cpu_mips_translate_address(CPUMIPSState
*env
, target_ulong address
,
971 access_type
= ACCESS_INT
;
972 ret
= get_physical_address(env
, &physical
, &prot
, address
, rw
, access_type
,
973 cpu_mmu_index(env
, false));
974 if (ret
!= TLBRET_MATCH
) {
975 raise_mmu_exception(env
, address
, rw
, ret
);
982 static const char * const excp_names
[EXCP_LAST
+ 1] = {
983 [EXCP_RESET
] = "reset",
984 [EXCP_SRESET
] = "soft reset",
985 [EXCP_DSS
] = "debug single step",
986 [EXCP_DINT
] = "debug interrupt",
987 [EXCP_NMI
] = "non-maskable interrupt",
988 [EXCP_MCHECK
] = "machine check",
989 [EXCP_EXT_INTERRUPT
] = "interrupt",
990 [EXCP_DFWATCH
] = "deferred watchpoint",
991 [EXCP_DIB
] = "debug instruction breakpoint",
992 [EXCP_IWATCH
] = "instruction fetch watchpoint",
993 [EXCP_AdEL
] = "address error load",
994 [EXCP_AdES
] = "address error store",
995 [EXCP_TLBF
] = "TLB refill",
996 [EXCP_IBE
] = "instruction bus error",
997 [EXCP_DBp
] = "debug breakpoint",
998 [EXCP_SYSCALL
] = "syscall",
999 [EXCP_BREAK
] = "break",
1000 [EXCP_CpU
] = "coprocessor unusable",
1001 [EXCP_RI
] = "reserved instruction",
1002 [EXCP_OVERFLOW
] = "arithmetic overflow",
1003 [EXCP_TRAP
] = "trap",
1004 [EXCP_FPE
] = "floating point",
1005 [EXCP_DDBS
] = "debug data break store",
1006 [EXCP_DWATCH
] = "data watchpoint",
1007 [EXCP_LTLBL
] = "TLB modify",
1008 [EXCP_TLBL
] = "TLB load",
1009 [EXCP_TLBS
] = "TLB store",
1010 [EXCP_DBE
] = "data bus error",
1011 [EXCP_DDBL
] = "debug data break load",
1012 [EXCP_THREAD
] = "thread",
1013 [EXCP_MDMX
] = "MDMX",
1014 [EXCP_C2E
] = "precise coprocessor 2",
1015 [EXCP_CACHE
] = "cache error",
1016 [EXCP_TLBXI
] = "TLB execute-inhibit",
1017 [EXCP_TLBRI
] = "TLB read-inhibit",
1018 [EXCP_MSADIS
] = "MSA disabled",
1019 [EXCP_MSAFPE
] = "MSA floating point",
1023 target_ulong
exception_resume_pc(CPUMIPSState
*env
)
1025 target_ulong bad_pc
;
1026 target_ulong isa_mode
;
1028 isa_mode
= !!(env
->hflags
& MIPS_HFLAG_M16
);
1029 bad_pc
= env
->active_tc
.PC
| isa_mode
;
1030 if (env
->hflags
& MIPS_HFLAG_BMASK
) {
1032 * If the exception was raised from a delay slot, come back to
1035 bad_pc
-= (env
->hflags
& MIPS_HFLAG_B16
? 2 : 4);
1041 #if !defined(CONFIG_USER_ONLY)
1042 static void set_hflags_for_handler(CPUMIPSState
*env
)
1044 /* Exception handlers are entered in 32-bit mode. */
1045 env
->hflags
&= ~(MIPS_HFLAG_M16
);
1046 /* ...except that microMIPS lets you choose. */
1047 if (env
->insn_flags
& ASE_MICROMIPS
) {
1048 env
->hflags
|= (!!(env
->CP0_Config3
&
1049 (1 << CP0C3_ISA_ON_EXC
))
1050 << MIPS_HFLAG_M16_SHIFT
);
1054 static inline void set_badinstr_registers(CPUMIPSState
*env
)
1056 if (env
->insn_flags
& ISA_NANOMIPS32
) {
1057 if (env
->CP0_Config3
& (1 << CP0C3_BI
)) {
1058 uint32_t instr
= (cpu_lduw_code(env
, env
->active_tc
.PC
)) << 16;
1059 if ((instr
& 0x10000000) == 0) {
1060 instr
|= cpu_lduw_code(env
, env
->active_tc
.PC
+ 2);
1062 env
->CP0_BadInstr
= instr
;
1064 if ((instr
& 0xFC000000) == 0x60000000) {
1065 instr
= cpu_lduw_code(env
, env
->active_tc
.PC
+ 4) << 16;
1066 env
->CP0_BadInstrX
= instr
;
1072 if (env
->hflags
& MIPS_HFLAG_M16
) {
1073 /* TODO: add BadInstr support for microMIPS */
1076 if (env
->CP0_Config3
& (1 << CP0C3_BI
)) {
1077 env
->CP0_BadInstr
= cpu_ldl_code(env
, env
->active_tc
.PC
);
1079 if ((env
->CP0_Config3
& (1 << CP0C3_BP
)) &&
1080 (env
->hflags
& MIPS_HFLAG_BMASK
)) {
1081 env
->CP0_BadInstrP
= cpu_ldl_code(env
, env
->active_tc
.PC
- 4);
1086 void mips_cpu_do_interrupt(CPUState
*cs
)
1088 #if !defined(CONFIG_USER_ONLY)
1089 MIPSCPU
*cpu
= MIPS_CPU(cs
);
1090 CPUMIPSState
*env
= &cpu
->env
;
1091 bool update_badinstr
= 0;
1092 target_ulong offset
;
1096 if (qemu_loglevel_mask(CPU_LOG_INT
)
1097 && cs
->exception_index
!= EXCP_EXT_INTERRUPT
) {
1098 if (cs
->exception_index
< 0 || cs
->exception_index
> EXCP_LAST
) {
1101 name
= excp_names
[cs
->exception_index
];
1104 qemu_log("%s enter: PC " TARGET_FMT_lx
" EPC " TARGET_FMT_lx
1106 __func__
, env
->active_tc
.PC
, env
->CP0_EPC
, name
);
1108 if (cs
->exception_index
== EXCP_EXT_INTERRUPT
&&
1109 (env
->hflags
& MIPS_HFLAG_DM
)) {
1110 cs
->exception_index
= EXCP_DINT
;
1113 switch (cs
->exception_index
) {
1115 env
->CP0_Debug
|= 1 << CP0DB_DSS
;
1117 * Debug single step cannot be raised inside a delay slot and
1118 * resume will always occur on the next instruction
1119 * (but we assume the pc has always been updated during
1120 * code translation).
1122 env
->CP0_DEPC
= env
->active_tc
.PC
| !!(env
->hflags
& MIPS_HFLAG_M16
);
1123 goto enter_debug_mode
;
1125 env
->CP0_Debug
|= 1 << CP0DB_DINT
;
1128 env
->CP0_Debug
|= 1 << CP0DB_DIB
;
1131 env
->CP0_Debug
|= 1 << CP0DB_DBp
;
1132 /* Setup DExcCode - SDBBP instruction */
1133 env
->CP0_Debug
= (env
->CP0_Debug
& ~(0x1fULL
<< CP0DB_DEC
)) |
1137 env
->CP0_Debug
|= 1 << CP0DB_DDBS
;
1140 env
->CP0_Debug
|= 1 << CP0DB_DDBL
;
1142 env
->CP0_DEPC
= exception_resume_pc(env
);
1143 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
1145 if (env
->insn_flags
& ISA_MIPS3
) {
1146 env
->hflags
|= MIPS_HFLAG_64
;
1147 if (!(env
->insn_flags
& ISA_MIPS64R6
) ||
1148 env
->CP0_Status
& (1 << CP0St_KX
)) {
1149 env
->hflags
&= ~MIPS_HFLAG_AWRAP
;
1152 env
->hflags
|= MIPS_HFLAG_DM
| MIPS_HFLAG_CP0
;
1153 env
->hflags
&= ~(MIPS_HFLAG_KSU
);
1154 /* EJTAG probe trap enable is not implemented... */
1155 if (!(env
->CP0_Status
& (1 << CP0St_EXL
))) {
1156 env
->CP0_Cause
&= ~(1U << CP0Ca_BD
);
1158 env
->active_tc
.PC
= env
->exception_base
+ 0x480;
1159 set_hflags_for_handler(env
);
1162 cpu_reset(CPU(cpu
));
1165 env
->CP0_Status
|= (1 << CP0St_SR
);
1166 memset(env
->CP0_WatchLo
, 0, sizeof(env
->CP0_WatchLo
));
1169 env
->CP0_Status
|= (1 << CP0St_NMI
);
1171 env
->CP0_ErrorEPC
= exception_resume_pc(env
);
1172 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
1173 env
->CP0_Status
|= (1 << CP0St_ERL
) | (1 << CP0St_BEV
);
1174 if (env
->insn_flags
& ISA_MIPS3
) {
1175 env
->hflags
|= MIPS_HFLAG_64
;
1176 if (!(env
->insn_flags
& ISA_MIPS64R6
) ||
1177 env
->CP0_Status
& (1 << CP0St_KX
)) {
1178 env
->hflags
&= ~MIPS_HFLAG_AWRAP
;
1181 env
->hflags
|= MIPS_HFLAG_CP0
;
1182 env
->hflags
&= ~(MIPS_HFLAG_KSU
);
1183 if (!(env
->CP0_Status
& (1 << CP0St_EXL
))) {
1184 env
->CP0_Cause
&= ~(1U << CP0Ca_BD
);
1186 env
->active_tc
.PC
= env
->exception_base
;
1187 set_hflags_for_handler(env
);
1189 case EXCP_EXT_INTERRUPT
:
1191 if (env
->CP0_Cause
& (1 << CP0Ca_IV
)) {
1192 uint32_t spacing
= (env
->CP0_IntCtl
>> CP0IntCtl_VS
) & 0x1f;
1194 if ((env
->CP0_Status
& (1 << CP0St_BEV
)) || spacing
== 0) {
1197 uint32_t vector
= 0;
1198 uint32_t pending
= (env
->CP0_Cause
& CP0Ca_IP_mask
) >> CP0Ca_IP
;
1200 if (env
->CP0_Config3
& (1 << CP0C3_VEIC
)) {
1202 * For VEIC mode, the external interrupt controller feeds
1203 * the vector through the CP0Cause IP lines.
1208 * Vectored Interrupts
1209 * Mask with Status.IM7-IM0 to get enabled interrupts.
1211 pending
&= (env
->CP0_Status
>> CP0St_IM
) & 0xff;
1212 /* Find the highest-priority interrupt. */
1213 while (pending
>>= 1) {
1217 offset
= 0x200 + (vector
* (spacing
<< 5));
1223 update_badinstr
= !(env
->error_code
& EXCP_INST_NOTAVAIL
);
1227 update_badinstr
= !(env
->error_code
& EXCP_INST_NOTAVAIL
);
1228 if ((env
->error_code
& EXCP_TLB_NOMATCH
) &&
1229 !(env
->CP0_Status
& (1 << CP0St_EXL
))) {
1230 #if defined(TARGET_MIPS64)
1231 int R
= env
->CP0_BadVAddr
>> 62;
1232 int UX
= (env
->CP0_Status
& (1 << CP0St_UX
)) != 0;
1233 int KX
= (env
->CP0_Status
& (1 << CP0St_KX
)) != 0;
1235 if ((R
!= 0 || UX
) && (R
!= 3 || KX
) &&
1236 (!(env
->insn_flags
& (INSN_LOONGSON2E
| INSN_LOONGSON2F
)))) {
1241 #if defined(TARGET_MIPS64)
1248 update_badinstr
= 1;
1249 if ((env
->error_code
& EXCP_TLB_NOMATCH
) &&
1250 !(env
->CP0_Status
& (1 << CP0St_EXL
))) {
1251 #if defined(TARGET_MIPS64)
1252 int R
= env
->CP0_BadVAddr
>> 62;
1253 int UX
= (env
->CP0_Status
& (1 << CP0St_UX
)) != 0;
1254 int KX
= (env
->CP0_Status
& (1 << CP0St_KX
)) != 0;
1256 if ((R
!= 0 || UX
) && (R
!= 3 || KX
) &&
1257 (!(env
->insn_flags
& (INSN_LOONGSON2E
| INSN_LOONGSON2F
)))) {
1262 #if defined(TARGET_MIPS64)
1269 update_badinstr
= !(env
->error_code
& EXCP_INST_NOTAVAIL
);
1273 update_badinstr
= 1;
1283 update_badinstr
= 1;
1287 update_badinstr
= 1;
1291 update_badinstr
= 1;
1295 update_badinstr
= 1;
1296 env
->CP0_Cause
= (env
->CP0_Cause
& ~(0x3 << CP0Ca_CE
)) |
1297 (env
->error_code
<< CP0Ca_CE
);
1301 update_badinstr
= 1;
1305 update_badinstr
= 1;
1309 update_badinstr
= 1;
1313 update_badinstr
= 1;
1320 update_badinstr
= 1;
1327 update_badinstr
= 1;
1334 /* XXX: TODO: manage deferred watch exceptions */
1349 if (!(env
->CP0_Status
& (1 << CP0St_EXL
))) {
1350 env
->CP0_EPC
= exception_resume_pc(env
);
1351 if (update_badinstr
) {
1352 set_badinstr_registers(env
);
1354 if (env
->hflags
& MIPS_HFLAG_BMASK
) {
1355 env
->CP0_Cause
|= (1U << CP0Ca_BD
);
1357 env
->CP0_Cause
&= ~(1U << CP0Ca_BD
);
1359 env
->CP0_Status
|= (1 << CP0St_EXL
);
1360 if (env
->insn_flags
& ISA_MIPS3
) {
1361 env
->hflags
|= MIPS_HFLAG_64
;
1362 if (!(env
->insn_flags
& ISA_MIPS64R6
) ||
1363 env
->CP0_Status
& (1 << CP0St_KX
)) {
1364 env
->hflags
&= ~MIPS_HFLAG_AWRAP
;
1367 env
->hflags
|= MIPS_HFLAG_CP0
;
1368 env
->hflags
&= ~(MIPS_HFLAG_KSU
);
1370 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
1371 if (env
->CP0_Status
& (1 << CP0St_BEV
)) {
1372 env
->active_tc
.PC
= env
->exception_base
+ 0x200;
1373 } else if (cause
== 30 && !(env
->CP0_Config3
& (1 << CP0C3_SC
) &&
1374 env
->CP0_Config5
& (1 << CP0C5_CV
))) {
1375 /* Force KSeg1 for cache errors */
1376 env
->active_tc
.PC
= KSEG1_BASE
| (env
->CP0_EBase
& 0x1FFFF000);
1378 env
->active_tc
.PC
= env
->CP0_EBase
& ~0xfff;
1381 env
->active_tc
.PC
+= offset
;
1382 set_hflags_for_handler(env
);
1383 env
->CP0_Cause
= (env
->CP0_Cause
& ~(0x1f << CP0Ca_EC
)) |
1384 (cause
<< CP0Ca_EC
);
1389 if (qemu_loglevel_mask(CPU_LOG_INT
)
1390 && cs
->exception_index
!= EXCP_EXT_INTERRUPT
) {
1391 qemu_log("%s: PC " TARGET_FMT_lx
" EPC " TARGET_FMT_lx
" cause %d\n"
1392 " S %08x C %08x A " TARGET_FMT_lx
" D " TARGET_FMT_lx
"\n",
1393 __func__
, env
->active_tc
.PC
, env
->CP0_EPC
, cause
,
1394 env
->CP0_Status
, env
->CP0_Cause
, env
->CP0_BadVAddr
,
1398 cs
->exception_index
= EXCP_NONE
;
1401 bool mips_cpu_exec_interrupt(CPUState
*cs
, int interrupt_request
)
1403 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
1404 MIPSCPU
*cpu
= MIPS_CPU(cs
);
1405 CPUMIPSState
*env
= &cpu
->env
;
1407 if (cpu_mips_hw_interrupts_enabled(env
) &&
1408 cpu_mips_hw_interrupts_pending(env
)) {
1410 cs
->exception_index
= EXCP_EXT_INTERRUPT
;
1411 env
->error_code
= 0;
1412 mips_cpu_do_interrupt(cs
);
1419 #if !defined(CONFIG_USER_ONLY)
1420 void r4k_invalidate_tlb(CPUMIPSState
*env
, int idx
, int use_extra
)
1422 CPUState
*cs
= env_cpu(env
);
1426 uint16_t ASID
= env
->CP0_EntryHi
& env
->CP0_EntryHi_ASID_mask
;
1427 uint32_t MMID
= env
->CP0_MemoryMapID
;
1428 bool mi
= !!((env
->CP0_Config5
>> CP0C5_MI
) & 1);
1432 MMID
= mi
? MMID
: (uint32_t) ASID
;
1434 tlb
= &env
->tlb
->mmu
.r4k
.tlb
[idx
];
1436 * The qemu TLB is flushed when the ASID/MMID changes, so no need to
1437 * flush these entries again.
1439 tlb_mmid
= mi
? tlb
->MMID
: (uint32_t) tlb
->ASID
;
1440 if (tlb
->G
== 0 && tlb_mmid
!= MMID
) {
1444 if (use_extra
&& env
->tlb
->tlb_in_use
< MIPS_TLB_MAX
) {
1446 * For tlbwr, we can shadow the discarded entry into
1447 * a new (fake) TLB entry, as long as the guest can not
1448 * tell that it's there.
1450 env
->tlb
->mmu
.r4k
.tlb
[env
->tlb
->tlb_in_use
] = *tlb
;
1451 env
->tlb
->tlb_in_use
++;
1455 /* 1k pages are not supported. */
1456 mask
= tlb
->PageMask
| ~(TARGET_PAGE_MASK
<< 1);
1458 addr
= tlb
->VPN
& ~mask
;
1459 #if defined(TARGET_MIPS64)
1460 if (addr
>= (0xFFFFFFFF80000000ULL
& env
->SEGMask
)) {
1461 addr
|= 0x3FFFFF0000000000ULL
;
1464 end
= addr
| (mask
>> 1);
1465 while (addr
< end
) {
1466 tlb_flush_page(cs
, addr
);
1467 addr
+= TARGET_PAGE_SIZE
;
1471 addr
= (tlb
->VPN
& ~mask
) | ((mask
>> 1) + 1);
1472 #if defined(TARGET_MIPS64)
1473 if (addr
>= (0xFFFFFFFF80000000ULL
& env
->SEGMask
)) {
1474 addr
|= 0x3FFFFF0000000000ULL
;
1478 while (addr
- 1 < end
) {
1479 tlb_flush_page(cs
, addr
);
1480 addr
+= TARGET_PAGE_SIZE
;
1486 void QEMU_NORETURN
do_raise_exception_err(CPUMIPSState
*env
,
1491 CPUState
*cs
= env_cpu(env
);
1493 qemu_log_mask(CPU_LOG_INT
, "%s: %d %d\n",
1494 __func__
, exception
, error_code
);
1495 cs
->exception_index
= exception
;
1496 env
->error_code
= error_code
;
1498 cpu_loop_exit_restore(cs
, pc
);
1501 static void mips_cpu_add_definition(gpointer data
, gpointer user_data
)
1503 ObjectClass
*oc
= data
;
1504 CpuDefinitionInfoList
**cpu_list
= user_data
;
1505 CpuDefinitionInfoList
*entry
;
1506 CpuDefinitionInfo
*info
;
1507 const char *typename
;
1509 typename
= object_class_get_name(oc
);
1510 info
= g_malloc0(sizeof(*info
));
1511 info
->name
= g_strndup(typename
,
1512 strlen(typename
) - strlen("-" TYPE_MIPS_CPU
));
1513 info
->q_typename
= g_strdup(typename
);
1515 entry
= g_malloc0(sizeof(*entry
));
1516 entry
->value
= info
;
1517 entry
->next
= *cpu_list
;
1521 CpuDefinitionInfoList
*qmp_query_cpu_definitions(Error
**errp
)
1523 CpuDefinitionInfoList
*cpu_list
= NULL
;
1526 list
= object_class_get_list(TYPE_MIPS_CPU
, false);
1527 g_slist_foreach(list
, mips_cpu_add_definition
, &cpu_list
);