2 * MIPS TLB (Translation lookaside buffer) helpers.
4 * Copyright (c) 2004-2005 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
23 #include "exec/exec-all.h"
24 #include "exec/cpu_ldst.h"
26 #include "hw/mips/cpudevs.h"
38 #if !defined(CONFIG_USER_ONLY)
40 /* no MMU emulation */
41 int no_mmu_map_address(CPUMIPSState
*env
, hwaddr
*physical
, int *prot
,
42 target_ulong address
, MMUAccessType access_type
)
45 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
49 /* fixed mapping MMU emulation */
50 int fixed_mmu_map_address(CPUMIPSState
*env
, hwaddr
*physical
, int *prot
,
51 target_ulong address
, MMUAccessType access_type
)
53 if (address
<= (int32_t)0x7FFFFFFFUL
) {
54 if (!(env
->CP0_Status
& (1 << CP0St_ERL
))) {
55 *physical
= address
+ 0x40000000UL
;
59 } else if (address
<= (int32_t)0xBFFFFFFFUL
) {
60 *physical
= address
& 0x1FFFFFFF;
65 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
69 /* MIPS32/MIPS64 R4000-style MMU emulation */
70 int r4k_map_address(CPUMIPSState
*env
, hwaddr
*physical
, int *prot
,
71 target_ulong address
, MMUAccessType access_type
)
73 uint16_t ASID
= env
->CP0_EntryHi
& env
->CP0_EntryHi_ASID_mask
;
74 uint32_t MMID
= env
->CP0_MemoryMapID
;
75 bool mi
= !!((env
->CP0_Config5
>> CP0C5_MI
) & 1);
79 MMID
= mi
? MMID
: (uint32_t) ASID
;
81 for (i
= 0; i
< env
->tlb
->tlb_in_use
; i
++) {
82 r4k_tlb_t
*tlb
= &env
->tlb
->mmu
.r4k
.tlb
[i
];
83 /* 1k pages are not supported. */
84 target_ulong mask
= tlb
->PageMask
| ~(TARGET_PAGE_MASK
<< 1);
85 target_ulong tag
= address
& ~mask
;
86 target_ulong VPN
= tlb
->VPN
& ~mask
;
87 #if defined(TARGET_MIPS64)
91 /* Check ASID/MMID, virtual page number & size */
92 tlb_mmid
= mi
? tlb
->MMID
: (uint32_t) tlb
->ASID
;
93 if ((tlb
->G
== 1 || tlb_mmid
== MMID
) && VPN
== tag
&& !tlb
->EHINV
) {
95 int n
= !!(address
& mask
& ~(mask
>> 1));
96 /* Check access rights */
97 if (!(n
? tlb
->V1
: tlb
->V0
)) {
98 return TLBRET_INVALID
;
100 if (access_type
== MMU_INST_FETCH
&& (n
? tlb
->XI1
: tlb
->XI0
)) {
103 if (access_type
== MMU_DATA_LOAD
&& (n
? tlb
->RI1
: tlb
->RI0
)) {
106 if (access_type
!= MMU_DATA_STORE
|| (n
? tlb
->D1
: tlb
->D0
)) {
107 *physical
= tlb
->PFN
[n
] | (address
& (mask
>> 1));
109 if (n
? tlb
->D1
: tlb
->D0
) {
112 if (!(n
? tlb
->XI1
: tlb
->XI0
)) {
120 return TLBRET_NOMATCH
;
123 static void no_mmu_init(CPUMIPSState
*env
, const mips_def_t
*def
)
125 env
->tlb
->nb_tlb
= 1;
126 env
->tlb
->map_address
= &no_mmu_map_address
;
129 static void fixed_mmu_init(CPUMIPSState
*env
, const mips_def_t
*def
)
131 env
->tlb
->nb_tlb
= 1;
132 env
->tlb
->map_address
= &fixed_mmu_map_address
;
135 static void r4k_mmu_init(CPUMIPSState
*env
, const mips_def_t
*def
)
137 env
->tlb
->nb_tlb
= 1 + ((def
->CP0_Config1
>> CP0C1_MMU
) & 63);
138 env
->tlb
->map_address
= &r4k_map_address
;
139 env
->tlb
->helper_tlbwi
= r4k_helper_tlbwi
;
140 env
->tlb
->helper_tlbwr
= r4k_helper_tlbwr
;
141 env
->tlb
->helper_tlbp
= r4k_helper_tlbp
;
142 env
->tlb
->helper_tlbr
= r4k_helper_tlbr
;
143 env
->tlb
->helper_tlbinv
= r4k_helper_tlbinv
;
144 env
->tlb
->helper_tlbinvf
= r4k_helper_tlbinvf
;
147 void mmu_init(CPUMIPSState
*env
, const mips_def_t
*def
)
149 env
->tlb
= g_malloc0(sizeof(CPUMIPSTLBContext
));
151 switch (def
->mmu_type
) {
153 no_mmu_init(env
, def
);
156 r4k_mmu_init(env
, def
);
159 fixed_mmu_init(env
, def
);
165 cpu_abort(env_cpu(env
), "MMU type not supported\n");
169 static int is_seg_am_mapped(unsigned int am
, bool eu
, int mmu_idx
)
172 * Interpret access control mode and mmu_idx.
175 * UK 0 0 1 1 0 0 - - 0
176 * MK 1 0 1 1 0 1 - - !eu
177 * MSK 2 0 0 1 0 1 1 - !eu
178 * MUSK 3 0 0 0 0 1 1 1 !eu
179 * MUSUK 4 0 0 0 0 0 1 1 0
180 * USK 5 0 0 1 0 0 0 - 0
181 * - 6 - - - - - - - -
182 * UUSK 7 0 0 0 0 0 0 0 0
188 /* If EU is set, always unmapped */
194 /* Never AdE, TLB mapped if AM={1,2,3} */
195 adetlb_mask
= 0x70000000;
199 /* AdE if AM={0,1}, TLB mapped if AM={2,3,4} */
200 adetlb_mask
= 0xc0380000;
204 /* AdE if AM={0,1,2,5}, TLB mapped if AM={3,4} */
205 adetlb_mask
= 0xe4180000;
208 /* does this AM cause AdE in current execution mode */
209 if ((adetlb_mask
<< am
) < 0) {
210 return TLBRET_BADADDR
;
215 /* is this AM mapped in current execution mode */
216 return ((adetlb_mask
<< am
) < 0);
219 return TLBRET_BADADDR
;
223 static int get_seg_physical_address(CPUMIPSState
*env
, hwaddr
*physical
,
224 int *prot
, target_ulong real_address
,
225 MMUAccessType access_type
, int mmu_idx
,
226 unsigned int am
, bool eu
,
227 target_ulong segmask
,
228 hwaddr physical_base
)
230 int mapped
= is_seg_am_mapped(am
, eu
, mmu_idx
);
233 /* is_seg_am_mapped can report TLBRET_BADADDR */
236 /* The segment is TLB mapped */
237 return env
->tlb
->map_address(env
, physical
, prot
, real_address
,
240 /* The segment is unmapped */
241 *physical
= physical_base
| (real_address
& segmask
);
242 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
247 static int get_segctl_physical_address(CPUMIPSState
*env
, hwaddr
*physical
,
248 int *prot
, target_ulong real_address
,
249 MMUAccessType access_type
, int mmu_idx
,
250 uint16_t segctl
, target_ulong segmask
)
252 unsigned int am
= (segctl
& CP0SC_AM_MASK
) >> CP0SC_AM
;
253 bool eu
= (segctl
>> CP0SC_EU
) & 1;
254 hwaddr pa
= ((hwaddr
)segctl
& CP0SC_PA_MASK
) << 20;
256 return get_seg_physical_address(env
, physical
, prot
, real_address
,
257 access_type
, mmu_idx
, am
, eu
, segmask
,
258 pa
& ~(hwaddr
)segmask
);
261 static int get_physical_address(CPUMIPSState
*env
, hwaddr
*physical
,
262 int *prot
, target_ulong real_address
,
263 MMUAccessType access_type
, int mmu_idx
)
265 /* User mode can only access useg/xuseg */
266 #if defined(TARGET_MIPS64)
267 int user_mode
= mmu_idx
== MIPS_HFLAG_UM
;
268 int supervisor_mode
= mmu_idx
== MIPS_HFLAG_SM
;
269 int kernel_mode
= !user_mode
&& !supervisor_mode
;
270 int UX
= (env
->CP0_Status
& (1 << CP0St_UX
)) != 0;
271 int SX
= (env
->CP0_Status
& (1 << CP0St_SX
)) != 0;
272 int KX
= (env
->CP0_Status
& (1 << CP0St_KX
)) != 0;
274 int ret
= TLBRET_MATCH
;
275 /* effective address (modified for KVM T&E kernel segments) */
276 target_ulong address
= real_address
;
278 #define USEG_LIMIT ((target_ulong)(int32_t)0x7FFFFFFFUL)
279 #define KSEG0_BASE ((target_ulong)(int32_t)0x80000000UL)
280 #define KSEG1_BASE ((target_ulong)(int32_t)0xA0000000UL)
281 #define KSEG2_BASE ((target_ulong)(int32_t)0xC0000000UL)
282 #define KSEG3_BASE ((target_ulong)(int32_t)0xE0000000UL)
284 #define KVM_KSEG0_BASE ((target_ulong)(int32_t)0x40000000UL)
285 #define KVM_KSEG2_BASE ((target_ulong)(int32_t)0x60000000UL)
287 if (mips_um_ksegs_enabled()) {
288 /* KVM T&E adds guest kernel segments in useg */
289 if (real_address
>= KVM_KSEG0_BASE
) {
290 if (real_address
< KVM_KSEG2_BASE
) {
292 address
+= KSEG0_BASE
- KVM_KSEG0_BASE
;
293 } else if (real_address
<= USEG_LIMIT
) {
295 address
+= KSEG2_BASE
- KVM_KSEG2_BASE
;
300 if (address
<= USEG_LIMIT
) {
304 if (address
>= 0x40000000UL
) {
305 segctl
= env
->CP0_SegCtl2
;
307 segctl
= env
->CP0_SegCtl2
>> 16;
309 ret
= get_segctl_physical_address(env
, physical
, prot
,
310 real_address
, access_type
,
311 mmu_idx
, segctl
, 0x3FFFFFFF);
312 #if defined(TARGET_MIPS64)
313 } else if (address
< 0x4000000000000000ULL
) {
315 if (UX
&& address
<= (0x3FFFFFFFFFFFFFFFULL
& env
->SEGMask
)) {
316 ret
= env
->tlb
->map_address(env
, physical
, prot
,
317 real_address
, access_type
);
319 ret
= TLBRET_BADADDR
;
321 } else if (address
< 0x8000000000000000ULL
) {
323 if ((supervisor_mode
|| kernel_mode
) &&
324 SX
&& address
<= (0x7FFFFFFFFFFFFFFFULL
& env
->SEGMask
)) {
325 ret
= env
->tlb
->map_address(env
, physical
, prot
,
326 real_address
, access_type
);
328 ret
= TLBRET_BADADDR
;
330 } else if (address
< 0xC000000000000000ULL
) {
332 if ((address
& 0x07FFFFFFFFFFFFFFULL
) <= env
->PAMask
) {
333 /* KX/SX/UX bit to check for each xkphys EVA access mode */
334 static const uint8_t am_ksux
[8] = {
335 [CP0SC_AM_UK
] = (1u << CP0St_KX
),
336 [CP0SC_AM_MK
] = (1u << CP0St_KX
),
337 [CP0SC_AM_MSK
] = (1u << CP0St_SX
),
338 [CP0SC_AM_MUSK
] = (1u << CP0St_UX
),
339 [CP0SC_AM_MUSUK
] = (1u << CP0St_UX
),
340 [CP0SC_AM_USK
] = (1u << CP0St_SX
),
341 [6] = (1u << CP0St_KX
),
342 [CP0SC_AM_UUSK
] = (1u << CP0St_UX
),
344 unsigned int am
= CP0SC_AM_UK
;
345 unsigned int xr
= (env
->CP0_SegCtl2
& CP0SC2_XR_MASK
) >> CP0SC2_XR
;
347 if (xr
& (1 << ((address
>> 59) & 0x7))) {
348 am
= (env
->CP0_SegCtl1
& CP0SC1_XAM_MASK
) >> CP0SC1_XAM
;
350 /* Does CP0_Status.KX/SX/UX permit the access mode (am) */
351 if (env
->CP0_Status
& am_ksux
[am
]) {
352 ret
= get_seg_physical_address(env
, physical
, prot
,
353 real_address
, access_type
,
354 mmu_idx
, am
, false, env
->PAMask
,
357 ret
= TLBRET_BADADDR
;
360 ret
= TLBRET_BADADDR
;
362 } else if (address
< 0xFFFFFFFF80000000ULL
) {
364 if (kernel_mode
&& KX
&&
365 address
<= (0xFFFFFFFF7FFFFFFFULL
& env
->SEGMask
)) {
366 ret
= env
->tlb
->map_address(env
, physical
, prot
,
367 real_address
, access_type
);
369 ret
= TLBRET_BADADDR
;
372 } else if (address
< KSEG1_BASE
) {
374 ret
= get_segctl_physical_address(env
, physical
, prot
, real_address
,
375 access_type
, mmu_idx
,
376 env
->CP0_SegCtl1
>> 16, 0x1FFFFFFF);
377 } else if (address
< KSEG2_BASE
) {
379 ret
= get_segctl_physical_address(env
, physical
, prot
, real_address
,
380 access_type
, mmu_idx
,
381 env
->CP0_SegCtl1
, 0x1FFFFFFF);
382 } else if (address
< KSEG3_BASE
) {
384 ret
= get_segctl_physical_address(env
, physical
, prot
, real_address
,
385 access_type
, mmu_idx
,
386 env
->CP0_SegCtl0
>> 16, 0x1FFFFFFF);
390 * XXX: debug segment is not emulated
392 ret
= get_segctl_physical_address(env
, physical
, prot
, real_address
,
393 access_type
, mmu_idx
,
394 env
->CP0_SegCtl0
, 0x1FFFFFFF);
399 void cpu_mips_tlb_flush(CPUMIPSState
*env
)
401 /* Flush qemu's TLB and discard all shadowed entries. */
402 tlb_flush(env_cpu(env
));
403 env
->tlb
->tlb_in_use
= env
->tlb
->nb_tlb
;
406 #endif /* !CONFIG_USER_ONLY */
408 static void raise_mmu_exception(CPUMIPSState
*env
, target_ulong address
,
409 MMUAccessType access_type
, int tlb_error
)
411 CPUState
*cs
= env_cpu(env
);
412 int exception
= 0, error_code
= 0;
414 if (access_type
== MMU_INST_FETCH
) {
415 error_code
|= EXCP_INST_NOTAVAIL
;
421 /* Reference to kernel address from user mode or supervisor mode */
422 /* Reference to supervisor address from user mode */
423 if (access_type
== MMU_DATA_STORE
) {
424 exception
= EXCP_AdES
;
426 exception
= EXCP_AdEL
;
430 /* No TLB match for a mapped address */
431 if (access_type
== MMU_DATA_STORE
) {
432 exception
= EXCP_TLBS
;
434 exception
= EXCP_TLBL
;
436 error_code
|= EXCP_TLB_NOMATCH
;
439 /* TLB match with no valid bit */
440 if (access_type
== MMU_DATA_STORE
) {
441 exception
= EXCP_TLBS
;
443 exception
= EXCP_TLBL
;
447 /* TLB match but 'D' bit is cleared */
448 exception
= EXCP_LTLBL
;
451 /* Execute-Inhibit Exception */
452 if (env
->CP0_PageGrain
& (1 << CP0PG_IEC
)) {
453 exception
= EXCP_TLBXI
;
455 exception
= EXCP_TLBL
;
459 /* Read-Inhibit Exception */
460 if (env
->CP0_PageGrain
& (1 << CP0PG_IEC
)) {
461 exception
= EXCP_TLBRI
;
463 exception
= EXCP_TLBL
;
467 /* Raise exception */
468 if (!(env
->hflags
& MIPS_HFLAG_DM
)) {
469 env
->CP0_BadVAddr
= address
;
471 env
->CP0_Context
= (env
->CP0_Context
& ~0x007fffff) |
472 ((address
>> 9) & 0x007ffff0);
473 env
->CP0_EntryHi
= (env
->CP0_EntryHi
& env
->CP0_EntryHi_ASID_mask
) |
474 (env
->CP0_EntryHi
& (1 << CP0EnHi_EHINV
)) |
475 (address
& (TARGET_PAGE_MASK
<< 1));
476 #if defined(TARGET_MIPS64)
477 env
->CP0_EntryHi
&= env
->SEGMask
;
479 (env
->CP0_XContext
& ((~0ULL) << (env
->SEGBITS
- 7))) | /* PTEBase */
480 (extract64(address
, 62, 2) << (env
->SEGBITS
- 9)) | /* R */
481 (extract64(address
, 13, env
->SEGBITS
- 13) << 4); /* BadVPN2 */
483 cs
->exception_index
= exception
;
484 env
->error_code
= error_code
;
487 #if !defined(CONFIG_USER_ONLY)
489 hwaddr
mips_cpu_get_phys_page_debug(CPUState
*cs
, vaddr addr
)
491 MIPSCPU
*cpu
= MIPS_CPU(cs
);
492 CPUMIPSState
*env
= &cpu
->env
;
496 if (get_physical_address(env
, &phys_addr
, &prot
, addr
, MMU_DATA_LOAD
,
497 cpu_mmu_index(env
, false)) != 0) {
503 #if !defined(TARGET_MIPS64)
506 * Perform hardware page table walk
508 * Memory accesses are performed using the KERNEL privilege level.
509 * Synchronous exceptions detected on memory accesses cause a silent exit
510 * from page table walking, resulting in a TLB or XTLB Refill exception.
512 * Implementations are not required to support page table walk memory
513 * accesses from mapped memory regions. When an unsupported access is
514 * attempted, a silent exit is taken, resulting in a TLB or XTLB Refill
517 * Note that if an exception is caused by AddressTranslation or LoadMemory
518 * functions, the exception is not taken, a silent exit is taken,
519 * resulting in a TLB or XTLB Refill exception.
522 static bool get_pte(CPUMIPSState
*env
, uint64_t vaddr
, int entry_size
,
525 if ((vaddr
& ((entry_size
>> 3) - 1)) != 0) {
528 if (entry_size
== 64) {
529 *pte
= cpu_ldq_code(env
, vaddr
);
531 *pte
= cpu_ldl_code(env
, vaddr
);
536 static uint64_t get_tlb_entry_layout(CPUMIPSState
*env
, uint64_t entry
,
537 int entry_size
, int ptei
)
539 uint64_t result
= entry
;
541 if (ptei
> entry_size
) {
544 result
>>= (ptei
- 2);
547 result
|= rixi
<< CP0EnLo_XI
;
551 static int walk_directory(CPUMIPSState
*env
, uint64_t *vaddr
,
552 int directory_index
, bool *huge_page
, bool *hgpg_directory_hit
,
553 uint64_t *pw_entrylo0
, uint64_t *pw_entrylo1
)
555 int dph
= (env
->CP0_PWCtl
>> CP0PC_DPH
) & 0x1;
556 int psn
= (env
->CP0_PWCtl
>> CP0PC_PSN
) & 0x3F;
557 int hugepg
= (env
->CP0_PWCtl
>> CP0PC_HUGEPG
) & 0x1;
558 int pf_ptew
= (env
->CP0_PWField
>> CP0PF_PTEW
) & 0x3F;
559 int ptew
= (env
->CP0_PWSize
>> CP0PS_PTEW
) & 0x3F;
560 int native_shift
= (((env
->CP0_PWSize
>> CP0PS_PS
) & 1) == 0) ? 2 : 3;
561 int directory_shift
= (ptew
> 1) ? -1 :
562 (hugepg
&& (ptew
== 1)) ? native_shift
+ 1 : native_shift
;
563 int leaf_shift
= (ptew
> 1) ? -1 :
564 (ptew
== 1) ? native_shift
+ 1 : native_shift
;
565 uint32_t direntry_size
= 1 << (directory_shift
+ 3);
566 uint32_t leafentry_size
= 1 << (leaf_shift
+ 3);
573 if (get_physical_address(env
, &paddr
, &prot
, *vaddr
, MMU_DATA_LOAD
,
574 cpu_mmu_index(env
, false)) !=
576 /* wrong base address */
579 if (!get_pte(env
, *vaddr
, direntry_size
, &entry
)) {
583 if ((entry
& (1 << psn
)) && hugepg
) {
585 *hgpg_directory_hit
= true;
586 entry
= get_tlb_entry_layout(env
, entry
, leafentry_size
, pf_ptew
);
587 w
= directory_index
- 1;
588 if (directory_index
& 0x1) {
589 /* Generate adjacent page from same PTE for odd TLB page */
591 *pw_entrylo0
= entry
& ~lsb
; /* even page */
592 *pw_entrylo1
= entry
| lsb
; /* odd page */
594 int oddpagebit
= 1 << leaf_shift
;
595 uint64_t vaddr2
= *vaddr
^ oddpagebit
;
596 if (*vaddr
& oddpagebit
) {
597 *pw_entrylo1
= entry
;
599 *pw_entrylo0
= entry
;
601 if (get_physical_address(env
, &paddr
, &prot
, vaddr2
, MMU_DATA_LOAD
,
602 cpu_mmu_index(env
, false)) !=
606 if (!get_pte(env
, vaddr2
, leafentry_size
, &entry
)) {
609 entry
= get_tlb_entry_layout(env
, entry
, leafentry_size
, pf_ptew
);
610 if (*vaddr
& oddpagebit
) {
611 *pw_entrylo0
= entry
;
613 *pw_entrylo1
= entry
;
625 static bool page_table_walk_refill(CPUMIPSState
*env
, vaddr address
,
628 int gdw
= (env
->CP0_PWSize
>> CP0PS_GDW
) & 0x3F;
629 int udw
= (env
->CP0_PWSize
>> CP0PS_UDW
) & 0x3F;
630 int mdw
= (env
->CP0_PWSize
>> CP0PS_MDW
) & 0x3F;
631 int ptw
= (env
->CP0_PWSize
>> CP0PS_PTW
) & 0x3F;
632 int ptew
= (env
->CP0_PWSize
>> CP0PS_PTEW
) & 0x3F;
635 bool huge_page
= false;
636 bool hgpg_bdhit
= false;
637 bool hgpg_gdhit
= false;
638 bool hgpg_udhit
= false;
639 bool hgpg_mdhit
= false;
641 int32_t pw_pagemask
= 0;
642 target_ulong pw_entryhi
= 0;
643 uint64_t pw_entrylo0
= 0;
644 uint64_t pw_entrylo1
= 0;
646 /* Native pointer size */
647 /*For the 32-bit architectures, this bit is fixed to 0.*/
648 int native_shift
= (((env
->CP0_PWSize
>> CP0PS_PS
) & 1) == 0) ? 2 : 3;
650 /* Indices from PWField */
651 int pf_gdw
= (env
->CP0_PWField
>> CP0PF_GDW
) & 0x3F;
652 int pf_udw
= (env
->CP0_PWField
>> CP0PF_UDW
) & 0x3F;
653 int pf_mdw
= (env
->CP0_PWField
>> CP0PF_MDW
) & 0x3F;
654 int pf_ptw
= (env
->CP0_PWField
>> CP0PF_PTW
) & 0x3F;
655 int pf_ptew
= (env
->CP0_PWField
>> CP0PF_PTEW
) & 0x3F;
657 /* Indices computed from faulting address */
658 int gindex
= (address
>> pf_gdw
) & ((1 << gdw
) - 1);
659 int uindex
= (address
>> pf_udw
) & ((1 << udw
) - 1);
660 int mindex
= (address
>> pf_mdw
) & ((1 << mdw
) - 1);
661 int ptindex
= (address
>> pf_ptw
) & ((1 << ptw
) - 1);
663 /* Other HTW configs */
664 int hugepg
= (env
->CP0_PWCtl
>> CP0PC_HUGEPG
) & 0x1;
666 /* HTW Shift values (depend on entry size) */
667 int directory_shift
= (ptew
> 1) ? -1 :
668 (hugepg
&& (ptew
== 1)) ? native_shift
+ 1 : native_shift
;
669 int leaf_shift
= (ptew
> 1) ? -1 :
670 (ptew
== 1) ? native_shift
+ 1 : native_shift
;
672 /* Offsets into tables */
673 int goffset
= gindex
<< directory_shift
;
674 int uoffset
= uindex
<< directory_shift
;
675 int moffset
= mindex
<< directory_shift
;
676 int ptoffset0
= (ptindex
>> 1) << (leaf_shift
+ 1);
677 int ptoffset1
= ptoffset0
| (1 << (leaf_shift
));
679 uint32_t leafentry_size
= 1 << (leaf_shift
+ 3);
681 /* Starting address - Page Table Base */
682 uint64_t vaddr
= env
->CP0_PWBase
;
689 if (!(env
->CP0_Config3
& (1 << CP0C3_PW
))) {
690 /* walker is unimplemented */
693 if (!(env
->CP0_PWCtl
& (1 << CP0PC_PWEN
))) {
694 /* walker is disabled */
697 if (!(gdw
> 0 || udw
> 0 || mdw
> 0)) {
698 /* no structure to walk */
701 if ((directory_shift
== -1) || (leaf_shift
== -1)) {
705 /* Global Directory */
708 switch (walk_directory(env
, &vaddr
, pf_gdw
, &huge_page
, &hgpg_gdhit
,
709 &pw_entrylo0
, &pw_entrylo1
))
721 /* Upper directory */
724 switch (walk_directory(env
, &vaddr
, pf_udw
, &huge_page
, &hgpg_udhit
,
725 &pw_entrylo0
, &pw_entrylo1
))
737 /* Middle directory */
740 switch (walk_directory(env
, &vaddr
, pf_mdw
, &huge_page
, &hgpg_mdhit
,
741 &pw_entrylo0
, &pw_entrylo1
))
753 /* Leaf Level Page Table - First half of PTE pair */
755 if (get_physical_address(env
, &paddr
, &prot
, vaddr
, MMU_DATA_LOAD
,
756 cpu_mmu_index(env
, false)) !=
760 if (!get_pte(env
, vaddr
, leafentry_size
, &dir_entry
)) {
763 dir_entry
= get_tlb_entry_layout(env
, dir_entry
, leafentry_size
, pf_ptew
);
764 pw_entrylo0
= dir_entry
;
766 /* Leaf Level Page Table - Second half of PTE pair */
768 if (get_physical_address(env
, &paddr
, &prot
, vaddr
, MMU_DATA_LOAD
,
769 cpu_mmu_index(env
, false)) !=
773 if (!get_pte(env
, vaddr
, leafentry_size
, &dir_entry
)) {
776 dir_entry
= get_tlb_entry_layout(env
, dir_entry
, leafentry_size
, pf_ptew
);
777 pw_entrylo1
= dir_entry
;
781 m
= (1 << pf_ptw
) - 1;
784 switch (hgpg_bdhit
<< 3 | hgpg_gdhit
<< 2 | hgpg_udhit
<< 1 |
788 m
= (1 << pf_gdw
) - 1;
794 m
= (1 << pf_udw
) - 1;
800 m
= (1 << pf_mdw
) - 1;
807 pw_pagemask
= m
>> TARGET_PAGE_BITS_MIN
;
808 update_pagemask(env
, pw_pagemask
<< CP0PM_MASK
, &pw_pagemask
);
809 pw_entryhi
= (address
& ~0x1fff) | (env
->CP0_EntryHi
& 0xFF);
811 target_ulong tmp_entryhi
= env
->CP0_EntryHi
;
812 int32_t tmp_pagemask
= env
->CP0_PageMask
;
813 uint64_t tmp_entrylo0
= env
->CP0_EntryLo0
;
814 uint64_t tmp_entrylo1
= env
->CP0_EntryLo1
;
816 env
->CP0_EntryHi
= pw_entryhi
;
817 env
->CP0_PageMask
= pw_pagemask
;
818 env
->CP0_EntryLo0
= pw_entrylo0
;
819 env
->CP0_EntryLo1
= pw_entrylo1
;
822 * The hardware page walker inserts a page into the TLB in a manner
823 * identical to a TLBWR instruction as executed by the software refill
826 r4k_helper_tlbwr(env
);
828 env
->CP0_EntryHi
= tmp_entryhi
;
829 env
->CP0_PageMask
= tmp_pagemask
;
830 env
->CP0_EntryLo0
= tmp_entrylo0
;
831 env
->CP0_EntryLo1
= tmp_entrylo1
;
836 #endif /* !CONFIG_USER_ONLY */
838 bool mips_cpu_tlb_fill(CPUState
*cs
, vaddr address
, int size
,
839 MMUAccessType access_type
, int mmu_idx
,
840 bool probe
, uintptr_t retaddr
)
842 MIPSCPU
*cpu
= MIPS_CPU(cs
);
843 CPUMIPSState
*env
= &cpu
->env
;
844 #if !defined(CONFIG_USER_ONLY)
848 int ret
= TLBRET_BADADDR
;
851 #if !defined(CONFIG_USER_ONLY)
852 /* XXX: put correct access by using cpu_restore_state() correctly */
853 ret
= get_physical_address(env
, &physical
, &prot
, address
,
854 access_type
, mmu_idx
);
857 qemu_log_mask(CPU_LOG_MMU
,
858 "%s address=%" VADDR_PRIx
" physical " TARGET_FMT_plx
859 " prot %d\n", __func__
, address
, physical
, prot
);
862 qemu_log_mask(CPU_LOG_MMU
,
863 "%s address=%" VADDR_PRIx
" ret %d\n", __func__
, address
,
867 if (ret
== TLBRET_MATCH
) {
868 tlb_set_page(cs
, address
& TARGET_PAGE_MASK
,
869 physical
& TARGET_PAGE_MASK
, prot
,
870 mmu_idx
, TARGET_PAGE_SIZE
);
873 #if !defined(TARGET_MIPS64)
874 if ((ret
== TLBRET_NOMATCH
) && (env
->tlb
->nb_tlb
> 1)) {
876 * Memory reads during hardware page table walking are performed
877 * as if they were kernel-mode load instructions.
879 int mode
= (env
->hflags
& MIPS_HFLAG_KSU
);
881 env
->hflags
&= ~MIPS_HFLAG_KSU
;
882 ret_walker
= page_table_walk_refill(env
, address
, mmu_idx
);
885 ret
= get_physical_address(env
, &physical
, &prot
, address
,
886 access_type
, mmu_idx
);
887 if (ret
== TLBRET_MATCH
) {
888 tlb_set_page(cs
, address
& TARGET_PAGE_MASK
,
889 physical
& TARGET_PAGE_MASK
, prot
,
890 mmu_idx
, TARGET_PAGE_SIZE
);
901 raise_mmu_exception(env
, address
, access_type
, ret
);
902 do_raise_exception_err(env
, cs
->exception_index
, env
->error_code
, retaddr
);
905 #ifndef CONFIG_USER_ONLY
906 hwaddr
cpu_mips_translate_address(CPUMIPSState
*env
, target_ulong address
,
907 MMUAccessType access_type
)
914 ret
= get_physical_address(env
, &physical
, &prot
, address
, access_type
,
915 cpu_mmu_index(env
, false));
916 if (ret
!= TLBRET_MATCH
) {
917 raise_mmu_exception(env
, address
, access_type
, ret
);
924 static void set_hflags_for_handler(CPUMIPSState
*env
)
926 /* Exception handlers are entered in 32-bit mode. */
927 env
->hflags
&= ~(MIPS_HFLAG_M16
);
928 /* ...except that microMIPS lets you choose. */
929 if (env
->insn_flags
& ASE_MICROMIPS
) {
930 env
->hflags
|= (!!(env
->CP0_Config3
&
931 (1 << CP0C3_ISA_ON_EXC
))
932 << MIPS_HFLAG_M16_SHIFT
);
936 static inline void set_badinstr_registers(CPUMIPSState
*env
)
938 if (env
->insn_flags
& ISA_NANOMIPS32
) {
939 if (env
->CP0_Config3
& (1 << CP0C3_BI
)) {
940 uint32_t instr
= (cpu_lduw_code(env
, env
->active_tc
.PC
)) << 16;
941 if ((instr
& 0x10000000) == 0) {
942 instr
|= cpu_lduw_code(env
, env
->active_tc
.PC
+ 2);
944 env
->CP0_BadInstr
= instr
;
946 if ((instr
& 0xFC000000) == 0x60000000) {
947 instr
= cpu_lduw_code(env
, env
->active_tc
.PC
+ 4) << 16;
948 env
->CP0_BadInstrX
= instr
;
954 if (env
->hflags
& MIPS_HFLAG_M16
) {
955 /* TODO: add BadInstr support for microMIPS */
958 if (env
->CP0_Config3
& (1 << CP0C3_BI
)) {
959 env
->CP0_BadInstr
= cpu_ldl_code(env
, env
->active_tc
.PC
);
961 if ((env
->CP0_Config3
& (1 << CP0C3_BP
)) &&
962 (env
->hflags
& MIPS_HFLAG_BMASK
)) {
963 env
->CP0_BadInstrP
= cpu_ldl_code(env
, env
->active_tc
.PC
- 4);
967 #endif /* !CONFIG_USER_ONLY */
969 void mips_cpu_do_interrupt(CPUState
*cs
)
971 #if !defined(CONFIG_USER_ONLY)
972 MIPSCPU
*cpu
= MIPS_CPU(cs
);
973 CPUMIPSState
*env
= &cpu
->env
;
974 bool update_badinstr
= 0;
978 if (qemu_loglevel_mask(CPU_LOG_INT
)
979 && cs
->exception_index
!= EXCP_EXT_INTERRUPT
) {
980 qemu_log("%s enter: PC " TARGET_FMT_lx
" EPC " TARGET_FMT_lx
982 __func__
, env
->active_tc
.PC
, env
->CP0_EPC
,
983 mips_exception_name(cs
->exception_index
));
985 if (cs
->exception_index
== EXCP_EXT_INTERRUPT
&&
986 (env
->hflags
& MIPS_HFLAG_DM
)) {
987 cs
->exception_index
= EXCP_DINT
;
990 switch (cs
->exception_index
) {
992 env
->CP0_Debug
|= 1 << CP0DB_DSS
;
994 * Debug single step cannot be raised inside a delay slot and
995 * resume will always occur on the next instruction
996 * (but we assume the pc has always been updated during
999 env
->CP0_DEPC
= env
->active_tc
.PC
| !!(env
->hflags
& MIPS_HFLAG_M16
);
1000 goto enter_debug_mode
;
1002 env
->CP0_Debug
|= 1 << CP0DB_DINT
;
1005 env
->CP0_Debug
|= 1 << CP0DB_DIB
;
1008 env
->CP0_Debug
|= 1 << CP0DB_DBp
;
1009 /* Setup DExcCode - SDBBP instruction */
1010 env
->CP0_Debug
= (env
->CP0_Debug
& ~(0x1fULL
<< CP0DB_DEC
)) |
1014 env
->CP0_Debug
|= 1 << CP0DB_DDBS
;
1017 env
->CP0_Debug
|= 1 << CP0DB_DDBL
;
1019 env
->CP0_DEPC
= exception_resume_pc(env
);
1020 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
1022 if (env
->insn_flags
& ISA_MIPS3
) {
1023 env
->hflags
|= MIPS_HFLAG_64
;
1024 if (!(env
->insn_flags
& ISA_MIPS_R6
) ||
1025 env
->CP0_Status
& (1 << CP0St_KX
)) {
1026 env
->hflags
&= ~MIPS_HFLAG_AWRAP
;
1029 env
->hflags
|= MIPS_HFLAG_DM
| MIPS_HFLAG_CP0
;
1030 env
->hflags
&= ~(MIPS_HFLAG_KSU
);
1031 /* EJTAG probe trap enable is not implemented... */
1032 if (!(env
->CP0_Status
& (1 << CP0St_EXL
))) {
1033 env
->CP0_Cause
&= ~(1U << CP0Ca_BD
);
1035 env
->active_tc
.PC
= env
->exception_base
+ 0x480;
1036 set_hflags_for_handler(env
);
1039 cpu_reset(CPU(cpu
));
1042 env
->CP0_Status
|= (1 << CP0St_SR
);
1043 memset(env
->CP0_WatchLo
, 0, sizeof(env
->CP0_WatchLo
));
1046 env
->CP0_Status
|= (1 << CP0St_NMI
);
1048 env
->CP0_ErrorEPC
= exception_resume_pc(env
);
1049 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
1050 env
->CP0_Status
|= (1 << CP0St_ERL
) | (1 << CP0St_BEV
);
1051 if (env
->insn_flags
& ISA_MIPS3
) {
1052 env
->hflags
|= MIPS_HFLAG_64
;
1053 if (!(env
->insn_flags
& ISA_MIPS_R6
) ||
1054 env
->CP0_Status
& (1 << CP0St_KX
)) {
1055 env
->hflags
&= ~MIPS_HFLAG_AWRAP
;
1058 env
->hflags
|= MIPS_HFLAG_CP0
;
1059 env
->hflags
&= ~(MIPS_HFLAG_KSU
);
1060 if (!(env
->CP0_Status
& (1 << CP0St_EXL
))) {
1061 env
->CP0_Cause
&= ~(1U << CP0Ca_BD
);
1063 env
->active_tc
.PC
= env
->exception_base
;
1064 set_hflags_for_handler(env
);
1066 case EXCP_EXT_INTERRUPT
:
1068 if (env
->CP0_Cause
& (1 << CP0Ca_IV
)) {
1069 uint32_t spacing
= (env
->CP0_IntCtl
>> CP0IntCtl_VS
) & 0x1f;
1071 if ((env
->CP0_Status
& (1 << CP0St_BEV
)) || spacing
== 0) {
1074 uint32_t vector
= 0;
1075 uint32_t pending
= (env
->CP0_Cause
& CP0Ca_IP_mask
) >> CP0Ca_IP
;
1077 if (env
->CP0_Config3
& (1 << CP0C3_VEIC
)) {
1079 * For VEIC mode, the external interrupt controller feeds
1080 * the vector through the CP0Cause IP lines.
1085 * Vectored Interrupts
1086 * Mask with Status.IM7-IM0 to get enabled interrupts.
1088 pending
&= (env
->CP0_Status
>> CP0St_IM
) & 0xff;
1089 /* Find the highest-priority interrupt. */
1090 while (pending
>>= 1) {
1094 offset
= 0x200 + (vector
* (spacing
<< 5));
1100 update_badinstr
= !(env
->error_code
& EXCP_INST_NOTAVAIL
);
1104 update_badinstr
= !(env
->error_code
& EXCP_INST_NOTAVAIL
);
1105 if ((env
->error_code
& EXCP_TLB_NOMATCH
) &&
1106 !(env
->CP0_Status
& (1 << CP0St_EXL
))) {
1107 #if defined(TARGET_MIPS64)
1108 int R
= env
->CP0_BadVAddr
>> 62;
1109 int UX
= (env
->CP0_Status
& (1 << CP0St_UX
)) != 0;
1110 int KX
= (env
->CP0_Status
& (1 << CP0St_KX
)) != 0;
1112 if ((R
!= 0 || UX
) && (R
!= 3 || KX
) &&
1113 (!(env
->insn_flags
& (INSN_LOONGSON2E
| INSN_LOONGSON2F
)))) {
1118 #if defined(TARGET_MIPS64)
1125 update_badinstr
= 1;
1126 if ((env
->error_code
& EXCP_TLB_NOMATCH
) &&
1127 !(env
->CP0_Status
& (1 << CP0St_EXL
))) {
1128 #if defined(TARGET_MIPS64)
1129 int R
= env
->CP0_BadVAddr
>> 62;
1130 int UX
= (env
->CP0_Status
& (1 << CP0St_UX
)) != 0;
1131 int KX
= (env
->CP0_Status
& (1 << CP0St_KX
)) != 0;
1133 if ((R
!= 0 || UX
) && (R
!= 3 || KX
) &&
1134 (!(env
->insn_flags
& (INSN_LOONGSON2E
| INSN_LOONGSON2F
)))) {
1139 #if defined(TARGET_MIPS64)
1146 update_badinstr
= !(env
->error_code
& EXCP_INST_NOTAVAIL
);
1150 update_badinstr
= 1;
1160 update_badinstr
= 1;
1164 update_badinstr
= 1;
1168 update_badinstr
= 1;
1172 update_badinstr
= 1;
1173 env
->CP0_Cause
= (env
->CP0_Cause
& ~(0x3 << CP0Ca_CE
)) |
1174 (env
->error_code
<< CP0Ca_CE
);
1178 update_badinstr
= 1;
1182 update_badinstr
= 1;
1186 update_badinstr
= 1;
1190 update_badinstr
= 1;
1197 update_badinstr
= 1;
1204 update_badinstr
= 1;
1211 /* XXX: TODO: manage deferred watch exceptions */
1226 if (!(env
->CP0_Status
& (1 << CP0St_EXL
))) {
1227 env
->CP0_EPC
= exception_resume_pc(env
);
1228 if (update_badinstr
) {
1229 set_badinstr_registers(env
);
1231 if (env
->hflags
& MIPS_HFLAG_BMASK
) {
1232 env
->CP0_Cause
|= (1U << CP0Ca_BD
);
1234 env
->CP0_Cause
&= ~(1U << CP0Ca_BD
);
1236 env
->CP0_Status
|= (1 << CP0St_EXL
);
1237 if (env
->insn_flags
& ISA_MIPS3
) {
1238 env
->hflags
|= MIPS_HFLAG_64
;
1239 if (!(env
->insn_flags
& ISA_MIPS_R6
) ||
1240 env
->CP0_Status
& (1 << CP0St_KX
)) {
1241 env
->hflags
&= ~MIPS_HFLAG_AWRAP
;
1244 env
->hflags
|= MIPS_HFLAG_CP0
;
1245 env
->hflags
&= ~(MIPS_HFLAG_KSU
);
1247 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
1248 if (env
->CP0_Status
& (1 << CP0St_BEV
)) {
1249 env
->active_tc
.PC
= env
->exception_base
+ 0x200;
1250 } else if (cause
== 30 && !(env
->CP0_Config3
& (1 << CP0C3_SC
) &&
1251 env
->CP0_Config5
& (1 << CP0C5_CV
))) {
1252 /* Force KSeg1 for cache errors */
1253 env
->active_tc
.PC
= KSEG1_BASE
| (env
->CP0_EBase
& 0x1FFFF000);
1255 env
->active_tc
.PC
= env
->CP0_EBase
& ~0xfff;
1258 env
->active_tc
.PC
+= offset
;
1259 set_hflags_for_handler(env
);
1260 env
->CP0_Cause
= (env
->CP0_Cause
& ~(0x1f << CP0Ca_EC
)) |
1261 (cause
<< CP0Ca_EC
);
1266 if (qemu_loglevel_mask(CPU_LOG_INT
)
1267 && cs
->exception_index
!= EXCP_EXT_INTERRUPT
) {
1268 qemu_log("%s: PC " TARGET_FMT_lx
" EPC " TARGET_FMT_lx
" cause %d\n"
1269 " S %08x C %08x A " TARGET_FMT_lx
" D " TARGET_FMT_lx
"\n",
1270 __func__
, env
->active_tc
.PC
, env
->CP0_EPC
, cause
,
1271 env
->CP0_Status
, env
->CP0_Cause
, env
->CP0_BadVAddr
,
1275 cs
->exception_index
= EXCP_NONE
;
1278 #if !defined(CONFIG_USER_ONLY)
1279 void r4k_invalidate_tlb(CPUMIPSState
*env
, int idx
, int use_extra
)
1281 CPUState
*cs
= env_cpu(env
);
1285 uint16_t ASID
= env
->CP0_EntryHi
& env
->CP0_EntryHi_ASID_mask
;
1286 uint32_t MMID
= env
->CP0_MemoryMapID
;
1287 bool mi
= !!((env
->CP0_Config5
>> CP0C5_MI
) & 1);
1291 MMID
= mi
? MMID
: (uint32_t) ASID
;
1293 tlb
= &env
->tlb
->mmu
.r4k
.tlb
[idx
];
1295 * The qemu TLB is flushed when the ASID/MMID changes, so no need to
1296 * flush these entries again.
1298 tlb_mmid
= mi
? tlb
->MMID
: (uint32_t) tlb
->ASID
;
1299 if (tlb
->G
== 0 && tlb_mmid
!= MMID
) {
1303 if (use_extra
&& env
->tlb
->tlb_in_use
< MIPS_TLB_MAX
) {
1305 * For tlbwr, we can shadow the discarded entry into
1306 * a new (fake) TLB entry, as long as the guest can not
1307 * tell that it's there.
1309 env
->tlb
->mmu
.r4k
.tlb
[env
->tlb
->tlb_in_use
] = *tlb
;
1310 env
->tlb
->tlb_in_use
++;
1314 /* 1k pages are not supported. */
1315 mask
= tlb
->PageMask
| ~(TARGET_PAGE_MASK
<< 1);
1317 addr
= tlb
->VPN
& ~mask
;
1318 #if defined(TARGET_MIPS64)
1319 if (addr
>= (0xFFFFFFFF80000000ULL
& env
->SEGMask
)) {
1320 addr
|= 0x3FFFFF0000000000ULL
;
1323 end
= addr
| (mask
>> 1);
1324 while (addr
< end
) {
1325 tlb_flush_page(cs
, addr
);
1326 addr
+= TARGET_PAGE_SIZE
;
1330 addr
= (tlb
->VPN
& ~mask
) | ((mask
>> 1) + 1);
1331 #if defined(TARGET_MIPS64)
1332 if (addr
>= (0xFFFFFFFF80000000ULL
& env
->SEGMask
)) {
1333 addr
|= 0x3FFFFF0000000000ULL
;
1337 while (addr
- 1 < end
) {
1338 tlb_flush_page(cs
, addr
);
1339 addr
+= TARGET_PAGE_SIZE
;
1343 #endif /* !CONFIG_USER_ONLY */