2 * MIPS TLB (Translation lookaside buffer) helpers.
4 * Copyright (c) 2004-2005 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qemu/bitops.h"
24 #include "exec/exec-all.h"
25 #include "exec/cpu_ldst.h"
27 #include "exec/helper-proto.h"
30 static void r4k_mips_tlb_flush_extra(CPUMIPSState
*env
, int first
)
32 /* Discard entries from env->tlb[first] onwards. */
33 while (env
->tlb
->tlb_in_use
> first
) {
34 r4k_invalidate_tlb(env
, --env
->tlb
->tlb_in_use
, 0);
38 static inline uint64_t get_tlb_pfn_from_entrylo(uint64_t entrylo
)
40 #if defined(TARGET_MIPS64)
41 return extract64(entrylo
, 6, 54);
43 return extract64(entrylo
, 6, 24) | /* PFN */
44 (extract64(entrylo
, 32, 32) << 24); /* PFNX */
48 static void r4k_fill_tlb(CPUMIPSState
*env
, int idx
)
51 uint64_t mask
= env
->CP0_PageMask
>> (TARGET_PAGE_BITS
+ 1);
53 /* XXX: detect conflicting TLBs and raise a MCHECK exception when needed */
54 tlb
= &env
->tlb
->mmu
.r4k
.tlb
[idx
];
55 if (env
->CP0_EntryHi
& (1 << CP0EnHi_EHINV
)) {
60 tlb
->VPN
= env
->CP0_EntryHi
& (TARGET_PAGE_MASK
<< 1);
61 #if defined(TARGET_MIPS64)
62 tlb
->VPN
&= env
->SEGMask
;
64 tlb
->ASID
= env
->CP0_EntryHi
& env
->CP0_EntryHi_ASID_mask
;
65 tlb
->MMID
= env
->CP0_MemoryMapID
;
66 tlb
->PageMask
= env
->CP0_PageMask
;
67 tlb
->G
= env
->CP0_EntryLo0
& env
->CP0_EntryLo1
& 1;
68 tlb
->V0
= (env
->CP0_EntryLo0
& 2) != 0;
69 tlb
->D0
= (env
->CP0_EntryLo0
& 4) != 0;
70 tlb
->C0
= (env
->CP0_EntryLo0
>> 3) & 0x7;
71 tlb
->XI0
= (env
->CP0_EntryLo0
>> CP0EnLo_XI
) & 1;
72 tlb
->RI0
= (env
->CP0_EntryLo0
>> CP0EnLo_RI
) & 1;
73 tlb
->PFN
[0] = (get_tlb_pfn_from_entrylo(env
->CP0_EntryLo0
) & ~mask
) << 12;
74 tlb
->V1
= (env
->CP0_EntryLo1
& 2) != 0;
75 tlb
->D1
= (env
->CP0_EntryLo1
& 4) != 0;
76 tlb
->C1
= (env
->CP0_EntryLo1
>> 3) & 0x7;
77 tlb
->XI1
= (env
->CP0_EntryLo1
>> CP0EnLo_XI
) & 1;
78 tlb
->RI1
= (env
->CP0_EntryLo1
>> CP0EnLo_RI
) & 1;
79 tlb
->PFN
[1] = (get_tlb_pfn_from_entrylo(env
->CP0_EntryLo1
) & ~mask
) << 12;
82 static void r4k_helper_tlbinv(CPUMIPSState
*env
)
84 bool mi
= !!((env
->CP0_Config5
>> CP0C5_MI
) & 1);
85 uint16_t ASID
= env
->CP0_EntryHi
& env
->CP0_EntryHi_ASID_mask
;
86 uint32_t MMID
= env
->CP0_MemoryMapID
;
91 MMID
= mi
? MMID
: (uint32_t) ASID
;
92 for (idx
= 0; idx
< env
->tlb
->nb_tlb
; idx
++) {
93 tlb
= &env
->tlb
->mmu
.r4k
.tlb
[idx
];
94 tlb_mmid
= mi
? tlb
->MMID
: (uint32_t) tlb
->ASID
;
95 if (!tlb
->G
&& tlb_mmid
== MMID
) {
99 cpu_mips_tlb_flush(env
);
102 static void r4k_helper_tlbinvf(CPUMIPSState
*env
)
106 for (idx
= 0; idx
< env
->tlb
->nb_tlb
; idx
++) {
107 env
->tlb
->mmu
.r4k
.tlb
[idx
].EHINV
= 1;
109 cpu_mips_tlb_flush(env
);
112 static void r4k_helper_tlbwi(CPUMIPSState
*env
)
114 bool mi
= !!((env
->CP0_Config5
>> CP0C5_MI
) & 1);
116 uint16_t ASID
= env
->CP0_EntryHi
& env
->CP0_EntryHi_ASID_mask
;
117 uint32_t MMID
= env
->CP0_MemoryMapID
;
119 bool EHINV
, G
, V0
, D0
, V1
, D1
, XI0
, XI1
, RI0
, RI1
;
123 MMID
= mi
? MMID
: (uint32_t) ASID
;
125 idx
= (env
->CP0_Index
& ~0x80000000) % env
->tlb
->nb_tlb
;
126 tlb
= &env
->tlb
->mmu
.r4k
.tlb
[idx
];
127 VPN
= env
->CP0_EntryHi
& (TARGET_PAGE_MASK
<< 1);
128 #if defined(TARGET_MIPS64)
131 EHINV
= (env
->CP0_EntryHi
& (1 << CP0EnHi_EHINV
)) != 0;
132 G
= env
->CP0_EntryLo0
& env
->CP0_EntryLo1
& 1;
133 V0
= (env
->CP0_EntryLo0
& 2) != 0;
134 D0
= (env
->CP0_EntryLo0
& 4) != 0;
135 XI0
= (env
->CP0_EntryLo0
>> CP0EnLo_XI
) &1;
136 RI0
= (env
->CP0_EntryLo0
>> CP0EnLo_RI
) &1;
137 V1
= (env
->CP0_EntryLo1
& 2) != 0;
138 D1
= (env
->CP0_EntryLo1
& 4) != 0;
139 XI1
= (env
->CP0_EntryLo1
>> CP0EnLo_XI
) &1;
140 RI1
= (env
->CP0_EntryLo1
>> CP0EnLo_RI
) &1;
142 tlb_mmid
= mi
? tlb
->MMID
: (uint32_t) tlb
->ASID
;
144 * Discard cached TLB entries, unless tlbwi is just upgrading access
145 * permissions on the current entry.
147 if (tlb
->VPN
!= VPN
|| tlb_mmid
!= MMID
|| tlb
->G
!= G
||
148 (!tlb
->EHINV
&& EHINV
) ||
149 (tlb
->V0
&& !V0
) || (tlb
->D0
&& !D0
) ||
150 (!tlb
->XI0
&& XI0
) || (!tlb
->RI0
&& RI0
) ||
151 (tlb
->V1
&& !V1
) || (tlb
->D1
&& !D1
) ||
152 (!tlb
->XI1
&& XI1
) || (!tlb
->RI1
&& RI1
)) {
153 r4k_mips_tlb_flush_extra(env
, env
->tlb
->nb_tlb
);
156 r4k_invalidate_tlb(env
, idx
, 0);
157 r4k_fill_tlb(env
, idx
);
160 static void r4k_helper_tlbwr(CPUMIPSState
*env
)
162 int r
= cpu_mips_get_random(env
);
164 r4k_invalidate_tlb(env
, r
, 1);
165 r4k_fill_tlb(env
, r
);
168 static void r4k_helper_tlbp(CPUMIPSState
*env
)
170 bool mi
= !!((env
->CP0_Config5
>> CP0C5_MI
) & 1);
175 uint16_t ASID
= env
->CP0_EntryHi
& env
->CP0_EntryHi_ASID_mask
;
176 uint32_t MMID
= env
->CP0_MemoryMapID
;
180 MMID
= mi
? MMID
: (uint32_t) ASID
;
181 for (i
= 0; i
< env
->tlb
->nb_tlb
; i
++) {
182 tlb
= &env
->tlb
->mmu
.r4k
.tlb
[i
];
183 /* 1k pages are not supported. */
184 mask
= tlb
->PageMask
| ~(TARGET_PAGE_MASK
<< 1);
185 tag
= env
->CP0_EntryHi
& ~mask
;
186 VPN
= tlb
->VPN
& ~mask
;
187 #if defined(TARGET_MIPS64)
190 tlb_mmid
= mi
? tlb
->MMID
: (uint32_t) tlb
->ASID
;
191 /* Check ASID/MMID, virtual page number & size */
192 if ((tlb
->G
== 1 || tlb_mmid
== MMID
) && VPN
== tag
&& !tlb
->EHINV
) {
198 if (i
== env
->tlb
->nb_tlb
) {
199 /* No match. Discard any shadow entries, if any of them match. */
200 for (i
= env
->tlb
->nb_tlb
; i
< env
->tlb
->tlb_in_use
; i
++) {
201 tlb
= &env
->tlb
->mmu
.r4k
.tlb
[i
];
202 /* 1k pages are not supported. */
203 mask
= tlb
->PageMask
| ~(TARGET_PAGE_MASK
<< 1);
204 tag
= env
->CP0_EntryHi
& ~mask
;
205 VPN
= tlb
->VPN
& ~mask
;
206 #if defined(TARGET_MIPS64)
209 tlb_mmid
= mi
? tlb
->MMID
: (uint32_t) tlb
->ASID
;
210 /* Check ASID/MMID, virtual page number & size */
211 if ((tlb
->G
== 1 || tlb_mmid
== MMID
) && VPN
== tag
) {
212 r4k_mips_tlb_flush_extra(env
, i
);
217 env
->CP0_Index
|= 0x80000000;
221 static inline uint64_t get_entrylo_pfn_from_tlb(uint64_t tlb_pfn
)
223 #if defined(TARGET_MIPS64)
226 return (extract64(tlb_pfn
, 0, 24) << 6) | /* PFN */
227 (extract64(tlb_pfn
, 24, 32) << 32); /* PFNX */
231 static void r4k_helper_tlbr(CPUMIPSState
*env
)
233 bool mi
= !!((env
->CP0_Config5
>> CP0C5_MI
) & 1);
234 uint16_t ASID
= env
->CP0_EntryHi
& env
->CP0_EntryHi_ASID_mask
;
235 uint32_t MMID
= env
->CP0_MemoryMapID
;
240 MMID
= mi
? MMID
: (uint32_t) ASID
;
241 idx
= (env
->CP0_Index
& ~0x80000000) % env
->tlb
->nb_tlb
;
242 tlb
= &env
->tlb
->mmu
.r4k
.tlb
[idx
];
244 tlb_mmid
= mi
? tlb
->MMID
: (uint32_t) tlb
->ASID
;
245 /* If this will change the current ASID/MMID, flush qemu's TLB. */
246 if (MMID
!= tlb_mmid
) {
247 cpu_mips_tlb_flush(env
);
250 r4k_mips_tlb_flush_extra(env
, env
->tlb
->nb_tlb
);
253 env
->CP0_EntryHi
= 1 << CP0EnHi_EHINV
;
254 env
->CP0_PageMask
= 0;
255 env
->CP0_EntryLo0
= 0;
256 env
->CP0_EntryLo1
= 0;
258 env
->CP0_EntryHi
= mi
? tlb
->VPN
: tlb
->VPN
| tlb
->ASID
;
259 env
->CP0_MemoryMapID
= tlb
->MMID
;
260 env
->CP0_PageMask
= tlb
->PageMask
;
261 env
->CP0_EntryLo0
= tlb
->G
| (tlb
->V0
<< 1) | (tlb
->D0
<< 2) |
262 ((uint64_t)tlb
->RI0
<< CP0EnLo_RI
) |
263 ((uint64_t)tlb
->XI0
<< CP0EnLo_XI
) | (tlb
->C0
<< 3) |
264 get_entrylo_pfn_from_tlb(tlb
->PFN
[0] >> 12);
265 env
->CP0_EntryLo1
= tlb
->G
| (tlb
->V1
<< 1) | (tlb
->D1
<< 2) |
266 ((uint64_t)tlb
->RI1
<< CP0EnLo_RI
) |
267 ((uint64_t)tlb
->XI1
<< CP0EnLo_XI
) | (tlb
->C1
<< 3) |
268 get_entrylo_pfn_from_tlb(tlb
->PFN
[1] >> 12);
272 void helper_tlbwi(CPUMIPSState
*env
)
274 env
->tlb
->helper_tlbwi(env
);
277 void helper_tlbwr(CPUMIPSState
*env
)
279 env
->tlb
->helper_tlbwr(env
);
282 void helper_tlbp(CPUMIPSState
*env
)
284 env
->tlb
->helper_tlbp(env
);
287 void helper_tlbr(CPUMIPSState
*env
)
289 env
->tlb
->helper_tlbr(env
);
292 void helper_tlbinv(CPUMIPSState
*env
)
294 env
->tlb
->helper_tlbinv(env
);
297 void helper_tlbinvf(CPUMIPSState
*env
)
299 env
->tlb
->helper_tlbinvf(env
);
302 static void global_invalidate_tlb(CPUMIPSState
*env
,
317 for (idx
= 0; idx
< env
->tlb
->nb_tlb
; idx
++) {
318 tlb
= &env
->tlb
->mmu
.r4k
.tlb
[idx
];
320 (((tlb
->VPN
& ~tlb
->PageMask
) == (invMsgVPN2
& ~tlb
->PageMask
))
323 (extract64(env
->CP0_EntryHi
, 62, 2) == invMsgR
)
326 MMidMatch
= tlb
->MMID
== invMsgMMid
;
327 if ((invAll
&& (idx
> env
->CP0_Wired
)) ||
328 (VAMatch
&& invVAMMid
&& (tlb
->G
|| MMidMatch
)) ||
329 (VAMatch
&& invVA
) ||
330 (MMidMatch
&& !(tlb
->G
) && invMMid
)) {
334 cpu_mips_tlb_flush(env
);
337 void helper_ginvt(CPUMIPSState
*env
, target_ulong arg
, uint32_t type
)
339 bool invAll
= type
== 0;
340 bool invVA
= type
== 1;
341 bool invMMid
= type
== 2;
342 bool invVAMMid
= type
== 3;
343 uint32_t invMsgVPN2
= arg
& (TARGET_PAGE_MASK
<< 1);
345 uint32_t invMsgMMid
= env
->CP0_MemoryMapID
;
346 CPUState
*other_cs
= first_cpu
;
349 invMsgR
= extract64(arg
, 62, 2);
352 CPU_FOREACH(other_cs
) {
353 MIPSCPU
*other_cpu
= MIPS_CPU(other_cs
);
354 global_invalidate_tlb(&other_cpu
->env
, invMsgVPN2
, invMsgR
, invMsgMMid
,
355 invAll
, invVAMMid
, invMMid
, invVA
);
359 /* no MMU emulation */
360 static int no_mmu_map_address(CPUMIPSState
*env
, hwaddr
*physical
, int *prot
,
361 target_ulong address
, MMUAccessType access_type
)
364 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
368 /* fixed mapping MMU emulation */
369 static int fixed_mmu_map_address(CPUMIPSState
*env
, hwaddr
*physical
,
370 int *prot
, target_ulong address
,
371 MMUAccessType access_type
)
373 if (address
<= (int32_t)0x7FFFFFFFUL
) {
374 if (!(env
->CP0_Status
& (1 << CP0St_ERL
))) {
375 *physical
= address
+ 0x40000000UL
;
379 } else if (address
<= (int32_t)0xBFFFFFFFUL
) {
380 *physical
= address
& 0x1FFFFFFF;
385 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
389 /* MIPS32/MIPS64 R4000-style MMU emulation */
390 static int r4k_map_address(CPUMIPSState
*env
, hwaddr
*physical
, int *prot
,
391 target_ulong address
, MMUAccessType access_type
)
393 uint16_t ASID
= env
->CP0_EntryHi
& env
->CP0_EntryHi_ASID_mask
;
394 uint32_t MMID
= env
->CP0_MemoryMapID
;
395 bool mi
= !!((env
->CP0_Config5
>> CP0C5_MI
) & 1);
399 MMID
= mi
? MMID
: (uint32_t) ASID
;
401 for (i
= 0; i
< env
->tlb
->tlb_in_use
; i
++) {
402 r4k_tlb_t
*tlb
= &env
->tlb
->mmu
.r4k
.tlb
[i
];
403 /* 1k pages are not supported. */
404 target_ulong mask
= tlb
->PageMask
| ~(TARGET_PAGE_MASK
<< 1);
405 target_ulong tag
= address
& ~mask
;
406 target_ulong VPN
= tlb
->VPN
& ~mask
;
407 #if defined(TARGET_MIPS64)
411 /* Check ASID/MMID, virtual page number & size */
412 tlb_mmid
= mi
? tlb
->MMID
: (uint32_t) tlb
->ASID
;
413 if ((tlb
->G
== 1 || tlb_mmid
== MMID
) && VPN
== tag
&& !tlb
->EHINV
) {
415 int n
= !!(address
& mask
& ~(mask
>> 1));
416 /* Check access rights */
417 if (!(n
? tlb
->V1
: tlb
->V0
)) {
418 return TLBRET_INVALID
;
420 if (access_type
== MMU_INST_FETCH
&& (n
? tlb
->XI1
: tlb
->XI0
)) {
423 if (access_type
== MMU_DATA_LOAD
&& (n
? tlb
->RI1
: tlb
->RI0
)) {
426 if (access_type
!= MMU_DATA_STORE
|| (n
? tlb
->D1
: tlb
->D0
)) {
427 *physical
= tlb
->PFN
[n
] | (address
& (mask
>> 1));
429 if (n
? tlb
->D1
: tlb
->D0
) {
432 if (!(n
? tlb
->XI1
: tlb
->XI0
)) {
440 return TLBRET_NOMATCH
;
443 static void no_mmu_init(CPUMIPSState
*env
, const mips_def_t
*def
)
445 env
->tlb
->nb_tlb
= 1;
446 env
->tlb
->map_address
= &no_mmu_map_address
;
449 static void fixed_mmu_init(CPUMIPSState
*env
, const mips_def_t
*def
)
451 env
->tlb
->nb_tlb
= 1;
452 env
->tlb
->map_address
= &fixed_mmu_map_address
;
455 static void r4k_mmu_init(CPUMIPSState
*env
, const mips_def_t
*def
)
457 env
->tlb
->nb_tlb
= 1 + ((def
->CP0_Config1
>> CP0C1_MMU
) & 63);
458 env
->tlb
->map_address
= &r4k_map_address
;
459 env
->tlb
->helper_tlbwi
= r4k_helper_tlbwi
;
460 env
->tlb
->helper_tlbwr
= r4k_helper_tlbwr
;
461 env
->tlb
->helper_tlbp
= r4k_helper_tlbp
;
462 env
->tlb
->helper_tlbr
= r4k_helper_tlbr
;
463 env
->tlb
->helper_tlbinv
= r4k_helper_tlbinv
;
464 env
->tlb
->helper_tlbinvf
= r4k_helper_tlbinvf
;
467 void mmu_init(CPUMIPSState
*env
, const mips_def_t
*def
)
469 env
->tlb
= g_malloc0(sizeof(CPUMIPSTLBContext
));
471 switch (def
->mmu_type
) {
473 no_mmu_init(env
, def
);
476 r4k_mmu_init(env
, def
);
479 fixed_mmu_init(env
, def
);
485 cpu_abort(env_cpu(env
), "MMU type not supported\n");
489 void cpu_mips_tlb_flush(CPUMIPSState
*env
)
491 /* Flush qemu's TLB and discard all shadowed entries. */
492 tlb_flush(env_cpu(env
));
493 env
->tlb
->tlb_in_use
= env
->tlb
->nb_tlb
;
496 static void raise_mmu_exception(CPUMIPSState
*env
, target_ulong address
,
497 MMUAccessType access_type
, int tlb_error
)
499 CPUState
*cs
= env_cpu(env
);
500 int exception
= 0, error_code
= 0;
502 if (access_type
== MMU_INST_FETCH
) {
503 error_code
|= EXCP_INST_NOTAVAIL
;
509 /* Reference to kernel address from user mode or supervisor mode */
510 /* Reference to supervisor address from user mode */
511 if (access_type
== MMU_DATA_STORE
) {
512 exception
= EXCP_AdES
;
514 exception
= EXCP_AdEL
;
518 /* No TLB match for a mapped address */
519 if (access_type
== MMU_DATA_STORE
) {
520 exception
= EXCP_TLBS
;
522 exception
= EXCP_TLBL
;
524 error_code
|= EXCP_TLB_NOMATCH
;
527 /* TLB match with no valid bit */
528 if (access_type
== MMU_DATA_STORE
) {
529 exception
= EXCP_TLBS
;
531 exception
= EXCP_TLBL
;
535 /* TLB match but 'D' bit is cleared */
536 exception
= EXCP_LTLBL
;
539 /* Execute-Inhibit Exception */
540 if (env
->CP0_PageGrain
& (1 << CP0PG_IEC
)) {
541 exception
= EXCP_TLBXI
;
543 exception
= EXCP_TLBL
;
547 /* Read-Inhibit Exception */
548 if (env
->CP0_PageGrain
& (1 << CP0PG_IEC
)) {
549 exception
= EXCP_TLBRI
;
551 exception
= EXCP_TLBL
;
555 /* Raise exception */
556 if (!(env
->hflags
& MIPS_HFLAG_DM
)) {
557 env
->CP0_BadVAddr
= address
;
559 env
->CP0_Context
= (env
->CP0_Context
& ~0x007fffff) |
560 ((address
>> 9) & 0x007ffff0);
561 env
->CP0_EntryHi
= (env
->CP0_EntryHi
& env
->CP0_EntryHi_ASID_mask
) |
562 (env
->CP0_EntryHi
& (1 << CP0EnHi_EHINV
)) |
563 (address
& (TARGET_PAGE_MASK
<< 1));
564 #if defined(TARGET_MIPS64)
565 env
->CP0_EntryHi
&= env
->SEGMask
;
567 (env
->CP0_XContext
& ((~0ULL) << (env
->SEGBITS
- 7))) | /* PTEBase */
568 (extract64(address
, 62, 2) << (env
->SEGBITS
- 9)) | /* R */
569 (extract64(address
, 13, env
->SEGBITS
- 13) << 4); /* BadVPN2 */
571 cs
->exception_index
= exception
;
572 env
->error_code
= error_code
;
575 #if !defined(TARGET_MIPS64)
578 * Perform hardware page table walk
580 * Memory accesses are performed using the KERNEL privilege level.
581 * Synchronous exceptions detected on memory accesses cause a silent exit
582 * from page table walking, resulting in a TLB or XTLB Refill exception.
584 * Implementations are not required to support page table walk memory
585 * accesses from mapped memory regions. When an unsupported access is
586 * attempted, a silent exit is taken, resulting in a TLB or XTLB Refill
589 * Note that if an exception is caused by AddressTranslation or LoadMemory
590 * functions, the exception is not taken, a silent exit is taken,
591 * resulting in a TLB or XTLB Refill exception.
594 static bool get_pte(CPUMIPSState
*env
, uint64_t vaddr
, int entry_size
,
597 if ((vaddr
& ((entry_size
>> 3) - 1)) != 0) {
600 if (entry_size
== 64) {
601 *pte
= cpu_ldq_code(env
, vaddr
);
603 *pte
= cpu_ldl_code(env
, vaddr
);
608 static uint64_t get_tlb_entry_layout(CPUMIPSState
*env
, uint64_t entry
,
609 int entry_size
, int ptei
)
611 uint64_t result
= entry
;
613 if (ptei
> entry_size
) {
616 result
>>= (ptei
- 2);
619 result
|= rixi
<< CP0EnLo_XI
;
623 static int walk_directory(CPUMIPSState
*env
, uint64_t *vaddr
,
624 int directory_index
, bool *huge_page
, bool *hgpg_directory_hit
,
625 uint64_t *pw_entrylo0
, uint64_t *pw_entrylo1
,
626 unsigned directory_shift
, unsigned leaf_shift
, int ptw_mmu_idx
)
628 int dph
= (env
->CP0_PWCtl
>> CP0PC_DPH
) & 0x1;
629 int psn
= (env
->CP0_PWCtl
>> CP0PC_PSN
) & 0x3F;
630 int hugepg
= (env
->CP0_PWCtl
>> CP0PC_HUGEPG
) & 0x1;
631 int pf_ptew
= (env
->CP0_PWField
>> CP0PF_PTEW
) & 0x3F;
632 uint32_t direntry_size
= 1 << (directory_shift
+ 3);
633 uint32_t leafentry_size
= 1 << (leaf_shift
+ 3);
640 if (get_physical_address(env
, &paddr
, &prot
, *vaddr
, MMU_DATA_LOAD
,
641 ptw_mmu_idx
) != TLBRET_MATCH
) {
642 /* wrong base address */
645 if (!get_pte(env
, *vaddr
, direntry_size
, &entry
)) {
649 if ((entry
& (1 << psn
)) && hugepg
) {
651 *hgpg_directory_hit
= true;
652 entry
= get_tlb_entry_layout(env
, entry
, leafentry_size
, pf_ptew
);
653 w
= directory_index
- 1;
654 if (directory_index
& 0x1) {
655 /* Generate adjacent page from same PTE for odd TLB page */
656 lsb
= BIT_ULL(w
) >> 6;
657 *pw_entrylo0
= entry
& ~lsb
; /* even page */
658 *pw_entrylo1
= entry
| lsb
; /* odd page */
660 int oddpagebit
= 1 << leaf_shift
;
661 uint64_t vaddr2
= *vaddr
^ oddpagebit
;
662 if (*vaddr
& oddpagebit
) {
663 *pw_entrylo1
= entry
;
665 *pw_entrylo0
= entry
;
667 if (get_physical_address(env
, &paddr
, &prot
, vaddr2
, MMU_DATA_LOAD
,
668 ptw_mmu_idx
) != TLBRET_MATCH
) {
671 if (!get_pte(env
, vaddr2
, leafentry_size
, &entry
)) {
674 entry
= get_tlb_entry_layout(env
, entry
, leafentry_size
, pf_ptew
);
675 if (*vaddr
& oddpagebit
) {
676 *pw_entrylo0
= entry
;
678 *pw_entrylo1
= entry
;
690 static bool page_table_walk_refill(CPUMIPSState
*env
, vaddr address
,
693 int gdw
= (env
->CP0_PWSize
>> CP0PS_GDW
) & 0x3F;
694 int udw
= (env
->CP0_PWSize
>> CP0PS_UDW
) & 0x3F;
695 int mdw
= (env
->CP0_PWSize
>> CP0PS_MDW
) & 0x3F;
696 int ptw
= (env
->CP0_PWSize
>> CP0PS_PTW
) & 0x3F;
697 int ptew
= (env
->CP0_PWSize
>> CP0PS_PTEW
) & 0x3F;
700 bool huge_page
= false;
701 bool hgpg_bdhit
= false;
702 bool hgpg_gdhit
= false;
703 bool hgpg_udhit
= false;
704 bool hgpg_mdhit
= false;
706 int32_t pw_pagemask
= 0;
707 target_ulong pw_entryhi
= 0;
708 uint64_t pw_entrylo0
= 0;
709 uint64_t pw_entrylo1
= 0;
711 /* Native pointer size */
712 /*For the 32-bit architectures, this bit is fixed to 0.*/
713 int native_shift
= (((env
->CP0_PWSize
>> CP0PS_PS
) & 1) == 0) ? 2 : 3;
715 /* Indices from PWField */
716 int pf_gdw
= (env
->CP0_PWField
>> CP0PF_GDW
) & 0x3F;
717 int pf_udw
= (env
->CP0_PWField
>> CP0PF_UDW
) & 0x3F;
718 int pf_mdw
= (env
->CP0_PWField
>> CP0PF_MDW
) & 0x3F;
719 int pf_ptw
= (env
->CP0_PWField
>> CP0PF_PTW
) & 0x3F;
720 int pf_ptew
= (env
->CP0_PWField
>> CP0PF_PTEW
) & 0x3F;
722 /* Indices computed from faulting address */
723 int gindex
= (address
>> pf_gdw
) & ((1 << gdw
) - 1);
724 int uindex
= (address
>> pf_udw
) & ((1 << udw
) - 1);
725 int mindex
= (address
>> pf_mdw
) & ((1 << mdw
) - 1);
726 int ptindex
= (address
>> pf_ptw
) & ((1 << ptw
) - 1);
728 /* Other HTW configs */
729 int hugepg
= (env
->CP0_PWCtl
>> CP0PC_HUGEPG
) & 0x1;
730 unsigned directory_shift
, leaf_shift
;
732 /* Offsets into tables */
733 unsigned goffset
, uoffset
, moffset
, ptoffset0
, ptoffset1
;
734 uint32_t leafentry_size
;
736 /* Starting address - Page Table Base */
737 uint64_t vaddr
= env
->CP0_PWBase
;
744 if (!(env
->CP0_Config3
& (1 << CP0C3_PW
))) {
745 /* walker is unimplemented */
748 if (!(env
->CP0_PWCtl
& (1 << CP0PC_PWEN
))) {
749 /* walker is disabled */
752 if (!(gdw
> 0 || udw
> 0 || mdw
> 0)) {
753 /* no structure to walk */
760 /* HTW Shift values (depend on entry size) */
761 directory_shift
= (hugepg
&& (ptew
== 1)) ? native_shift
+ 1 : native_shift
;
762 leaf_shift
= (ptew
== 1) ? native_shift
+ 1 : native_shift
;
764 goffset
= gindex
<< directory_shift
;
765 uoffset
= uindex
<< directory_shift
;
766 moffset
= mindex
<< directory_shift
;
767 ptoffset0
= (ptindex
>> 1) << (leaf_shift
+ 1);
768 ptoffset1
= ptoffset0
| (1 << (leaf_shift
));
770 leafentry_size
= 1 << (leaf_shift
+ 3);
772 /* Global Directory */
775 switch (walk_directory(env
, &vaddr
, pf_gdw
, &huge_page
, &hgpg_gdhit
,
776 &pw_entrylo0
, &pw_entrylo1
,
777 directory_shift
, leaf_shift
, ptw_mmu_idx
))
789 /* Upper directory */
792 switch (walk_directory(env
, &vaddr
, pf_udw
, &huge_page
, &hgpg_udhit
,
793 &pw_entrylo0
, &pw_entrylo1
,
794 directory_shift
, leaf_shift
, ptw_mmu_idx
))
806 /* Middle directory */
809 switch (walk_directory(env
, &vaddr
, pf_mdw
, &huge_page
, &hgpg_mdhit
,
810 &pw_entrylo0
, &pw_entrylo1
,
811 directory_shift
, leaf_shift
, ptw_mmu_idx
))
823 /* Leaf Level Page Table - First half of PTE pair */
825 if (get_physical_address(env
, &paddr
, &prot
, vaddr
, MMU_DATA_LOAD
,
826 ptw_mmu_idx
) != TLBRET_MATCH
) {
829 if (!get_pte(env
, vaddr
, leafentry_size
, &dir_entry
)) {
832 dir_entry
= get_tlb_entry_layout(env
, dir_entry
, leafentry_size
, pf_ptew
);
833 pw_entrylo0
= dir_entry
;
835 /* Leaf Level Page Table - Second half of PTE pair */
837 if (get_physical_address(env
, &paddr
, &prot
, vaddr
, MMU_DATA_LOAD
,
838 ptw_mmu_idx
) != TLBRET_MATCH
) {
841 if (!get_pte(env
, vaddr
, leafentry_size
, &dir_entry
)) {
844 dir_entry
= get_tlb_entry_layout(env
, dir_entry
, leafentry_size
, pf_ptew
);
845 pw_entrylo1
= dir_entry
;
849 m
= (1 << pf_ptw
) - 1;
852 switch (hgpg_bdhit
<< 3 | hgpg_gdhit
<< 2 | hgpg_udhit
<< 1 |
856 m
= (1 << pf_gdw
) - 1;
862 m
= (1 << pf_udw
) - 1;
868 m
= (1 << pf_mdw
) - 1;
875 pw_pagemask
= m
>> TARGET_PAGE_BITS_MIN
;
876 update_pagemask(env
, pw_pagemask
<< CP0PM_MASK
, &pw_pagemask
);
877 pw_entryhi
= (address
& ~0x1fff) | (env
->CP0_EntryHi
& 0xFF);
879 target_ulong tmp_entryhi
= env
->CP0_EntryHi
;
880 int32_t tmp_pagemask
= env
->CP0_PageMask
;
881 uint64_t tmp_entrylo0
= env
->CP0_EntryLo0
;
882 uint64_t tmp_entrylo1
= env
->CP0_EntryLo1
;
884 env
->CP0_EntryHi
= pw_entryhi
;
885 env
->CP0_PageMask
= pw_pagemask
;
886 env
->CP0_EntryLo0
= pw_entrylo0
;
887 env
->CP0_EntryLo1
= pw_entrylo1
;
890 * The hardware page walker inserts a page into the TLB in a manner
891 * identical to a TLBWR instruction as executed by the software refill
894 r4k_helper_tlbwr(env
);
896 env
->CP0_EntryHi
= tmp_entryhi
;
897 env
->CP0_PageMask
= tmp_pagemask
;
898 env
->CP0_EntryLo0
= tmp_entrylo0
;
899 env
->CP0_EntryLo1
= tmp_entrylo1
;
905 bool mips_cpu_tlb_fill(CPUState
*cs
, vaddr address
, int size
,
906 MMUAccessType access_type
, int mmu_idx
,
907 bool probe
, uintptr_t retaddr
)
909 MIPSCPU
*cpu
= MIPS_CPU(cs
);
910 CPUMIPSState
*env
= &cpu
->env
;
913 int ret
= TLBRET_BADADDR
;
916 /* XXX: put correct access by using cpu_restore_state() correctly */
917 ret
= get_physical_address(env
, &physical
, &prot
, address
,
918 access_type
, mmu_idx
);
921 qemu_log_mask(CPU_LOG_MMU
,
922 "%s address=%" VADDR_PRIx
" physical " HWADDR_FMT_plx
923 " prot %d\n", __func__
, address
, physical
, prot
);
926 qemu_log_mask(CPU_LOG_MMU
,
927 "%s address=%" VADDR_PRIx
" ret %d\n", __func__
, address
,
931 if (ret
== TLBRET_MATCH
) {
932 tlb_set_page(cs
, address
& TARGET_PAGE_MASK
,
933 physical
& TARGET_PAGE_MASK
, prot
,
934 mmu_idx
, TARGET_PAGE_SIZE
);
937 #if !defined(TARGET_MIPS64)
938 if ((ret
== TLBRET_NOMATCH
) && (env
->tlb
->nb_tlb
> 1)) {
940 * Memory reads during hardware page table walking are performed
941 * as if they were kernel-mode load instructions.
943 int ptw_mmu_idx
= (env
->hflags
& MIPS_HFLAG_ERL
?
944 MMU_ERL_IDX
: MMU_KERNEL_IDX
);
946 if (page_table_walk_refill(env
, address
, ptw_mmu_idx
)) {
947 ret
= get_physical_address(env
, &physical
, &prot
, address
,
948 access_type
, mmu_idx
);
949 if (ret
== TLBRET_MATCH
) {
950 tlb_set_page(cs
, address
& TARGET_PAGE_MASK
,
951 physical
& TARGET_PAGE_MASK
, prot
,
952 mmu_idx
, TARGET_PAGE_SIZE
);
962 raise_mmu_exception(env
, address
, access_type
, ret
);
963 do_raise_exception_err(env
, cs
->exception_index
, env
->error_code
, retaddr
);
966 hwaddr
cpu_mips_translate_address(CPUMIPSState
*env
, target_ulong address
,
967 MMUAccessType access_type
, uintptr_t retaddr
)
972 CPUState
*cs
= env_cpu(env
);
975 ret
= get_physical_address(env
, &physical
, &prot
, address
, access_type
,
976 mips_env_mmu_index(env
));
977 if (ret
== TLBRET_MATCH
) {
981 raise_mmu_exception(env
, address
, access_type
, ret
);
982 cpu_loop_exit_restore(cs
, retaddr
);
985 static void set_hflags_for_handler(CPUMIPSState
*env
)
987 /* Exception handlers are entered in 32-bit mode. */
988 env
->hflags
&= ~(MIPS_HFLAG_M16
);
989 /* ...except that microMIPS lets you choose. */
990 if (env
->insn_flags
& ASE_MICROMIPS
) {
991 env
->hflags
|= (!!(env
->CP0_Config3
&
992 (1 << CP0C3_ISA_ON_EXC
))
993 << MIPS_HFLAG_M16_SHIFT
);
997 static inline void set_badinstr_registers(CPUMIPSState
*env
)
999 if (env
->insn_flags
& ISA_NANOMIPS32
) {
1000 if (env
->CP0_Config3
& (1 << CP0C3_BI
)) {
1001 uint32_t instr
= (cpu_lduw_code(env
, env
->active_tc
.PC
)) << 16;
1002 if ((instr
& 0x10000000) == 0) {
1003 instr
|= cpu_lduw_code(env
, env
->active_tc
.PC
+ 2);
1005 env
->CP0_BadInstr
= instr
;
1007 if ((instr
& 0xFC000000) == 0x60000000) {
1008 instr
= cpu_lduw_code(env
, env
->active_tc
.PC
+ 4) << 16;
1009 env
->CP0_BadInstrX
= instr
;
1015 if (env
->hflags
& MIPS_HFLAG_M16
) {
1016 /* TODO: add BadInstr support for microMIPS */
1019 if (env
->CP0_Config3
& (1 << CP0C3_BI
)) {
1020 env
->CP0_BadInstr
= cpu_ldl_code(env
, env
->active_tc
.PC
);
1022 if ((env
->CP0_Config3
& (1 << CP0C3_BP
)) &&
1023 (env
->hflags
& MIPS_HFLAG_BMASK
)) {
1024 env
->CP0_BadInstrP
= cpu_ldl_code(env
, env
->active_tc
.PC
- 4);
1028 void mips_cpu_do_interrupt(CPUState
*cs
)
1030 MIPSCPU
*cpu
= MIPS_CPU(cs
);
1031 CPUMIPSState
*env
= &cpu
->env
;
1032 bool update_badinstr
= 0;
1033 target_ulong offset
;
1036 if (qemu_loglevel_mask(CPU_LOG_INT
)
1037 && cs
->exception_index
!= EXCP_EXT_INTERRUPT
) {
1038 qemu_log("%s enter: PC " TARGET_FMT_lx
" EPC " TARGET_FMT_lx
1040 __func__
, env
->active_tc
.PC
, env
->CP0_EPC
,
1041 mips_exception_name(cs
->exception_index
));
1043 if (cs
->exception_index
== EXCP_EXT_INTERRUPT
&&
1044 (env
->hflags
& MIPS_HFLAG_DM
)) {
1045 cs
->exception_index
= EXCP_DINT
;
1048 switch (cs
->exception_index
) {
1050 cs
->exception_index
= EXCP_NONE
;
1051 mips_semihosting(env
);
1052 env
->active_tc
.PC
+= env
->error_code
;
1055 env
->CP0_Debug
|= 1 << CP0DB_DSS
;
1057 * Debug single step cannot be raised inside a delay slot and
1058 * resume will always occur on the next instruction
1059 * (but we assume the pc has always been updated during
1060 * code translation).
1062 env
->CP0_DEPC
= env
->active_tc
.PC
| !!(env
->hflags
& MIPS_HFLAG_M16
);
1063 goto enter_debug_mode
;
1065 env
->CP0_Debug
|= 1 << CP0DB_DINT
;
1068 env
->CP0_Debug
|= 1 << CP0DB_DIB
;
1071 env
->CP0_Debug
|= 1 << CP0DB_DBp
;
1072 /* Setup DExcCode - SDBBP instruction */
1073 env
->CP0_Debug
= (env
->CP0_Debug
& ~(0x1fULL
<< CP0DB_DEC
)) |
1077 env
->CP0_Debug
|= 1 << CP0DB_DDBS
;
1080 env
->CP0_Debug
|= 1 << CP0DB_DDBL
;
1082 env
->CP0_DEPC
= exception_resume_pc(env
);
1083 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
1085 if (env
->insn_flags
& ISA_MIPS3
) {
1086 env
->hflags
|= MIPS_HFLAG_64
;
1087 if (!(env
->insn_flags
& ISA_MIPS_R6
) ||
1088 env
->CP0_Status
& (1 << CP0St_KX
)) {
1089 env
->hflags
&= ~MIPS_HFLAG_AWRAP
;
1092 env
->hflags
|= MIPS_HFLAG_DM
| MIPS_HFLAG_CP0
;
1093 env
->hflags
&= ~(MIPS_HFLAG_KSU
);
1094 /* EJTAG probe trap enable is not implemented... */
1095 if (!(env
->CP0_Status
& (1 << CP0St_EXL
))) {
1096 env
->CP0_Cause
&= ~(1U << CP0Ca_BD
);
1098 env
->active_tc
.PC
= env
->exception_base
+ 0x480;
1099 set_hflags_for_handler(env
);
1102 cpu_reset(CPU(cpu
));
1105 env
->CP0_Status
|= (1 << CP0St_SR
);
1106 memset(env
->CP0_WatchLo
, 0, sizeof(env
->CP0_WatchLo
));
1109 env
->CP0_Status
|= (1 << CP0St_NMI
);
1111 env
->CP0_ErrorEPC
= exception_resume_pc(env
);
1112 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
1113 env
->CP0_Status
|= (1 << CP0St_ERL
) | (1 << CP0St_BEV
);
1114 if (env
->insn_flags
& ISA_MIPS3
) {
1115 env
->hflags
|= MIPS_HFLAG_64
;
1116 if (!(env
->insn_flags
& ISA_MIPS_R6
) ||
1117 env
->CP0_Status
& (1 << CP0St_KX
)) {
1118 env
->hflags
&= ~MIPS_HFLAG_AWRAP
;
1121 env
->hflags
|= MIPS_HFLAG_CP0
;
1122 env
->hflags
&= ~(MIPS_HFLAG_KSU
);
1123 if (!(env
->CP0_Status
& (1 << CP0St_EXL
))) {
1124 env
->CP0_Cause
&= ~(1U << CP0Ca_BD
);
1126 env
->active_tc
.PC
= env
->exception_base
;
1127 set_hflags_for_handler(env
);
1129 case EXCP_EXT_INTERRUPT
:
1131 if (env
->CP0_Cause
& (1 << CP0Ca_IV
)) {
1132 uint32_t spacing
= (env
->CP0_IntCtl
>> CP0IntCtl_VS
) & 0x1f;
1134 if ((env
->CP0_Status
& (1 << CP0St_BEV
)) || spacing
== 0) {
1137 uint32_t vector
= 0;
1138 uint32_t pending
= (env
->CP0_Cause
& CP0Ca_IP_mask
) >> CP0Ca_IP
;
1140 if (env
->CP0_Config3
& (1 << CP0C3_VEIC
)) {
1142 * For VEIC mode, the external interrupt controller feeds
1143 * the vector through the CP0Cause IP lines.
1148 * Vectored Interrupts
1149 * Mask with Status.IM7-IM0 to get enabled interrupts.
1151 pending
&= (env
->CP0_Status
>> CP0St_IM
) & 0xff;
1152 /* Find the highest-priority interrupt. */
1153 while (pending
>>= 1) {
1157 offset
= 0x200 + (vector
* (spacing
<< 5));
1163 update_badinstr
= !(env
->error_code
& EXCP_INST_NOTAVAIL
);
1167 update_badinstr
= !(env
->error_code
& EXCP_INST_NOTAVAIL
);
1168 if ((env
->error_code
& EXCP_TLB_NOMATCH
) &&
1169 !(env
->CP0_Status
& (1 << CP0St_EXL
))) {
1170 #if defined(TARGET_MIPS64)
1171 int R
= env
->CP0_BadVAddr
>> 62;
1172 int UX
= (env
->CP0_Status
& (1 << CP0St_UX
)) != 0;
1173 int KX
= (env
->CP0_Status
& (1 << CP0St_KX
)) != 0;
1175 if ((R
!= 0 || UX
) && (R
!= 3 || KX
) &&
1176 (!(env
->insn_flags
& (INSN_LOONGSON2E
| INSN_LOONGSON2F
)))) {
1181 #if defined(TARGET_MIPS64)
1188 update_badinstr
= 1;
1189 if ((env
->error_code
& EXCP_TLB_NOMATCH
) &&
1190 !(env
->CP0_Status
& (1 << CP0St_EXL
))) {
1191 #if defined(TARGET_MIPS64)
1192 int R
= env
->CP0_BadVAddr
>> 62;
1193 int UX
= (env
->CP0_Status
& (1 << CP0St_UX
)) != 0;
1194 int KX
= (env
->CP0_Status
& (1 << CP0St_KX
)) != 0;
1196 if ((R
!= 0 || UX
) && (R
!= 3 || KX
) &&
1197 (!(env
->insn_flags
& (INSN_LOONGSON2E
| INSN_LOONGSON2F
)))) {
1202 #if defined(TARGET_MIPS64)
1209 update_badinstr
= !(env
->error_code
& EXCP_INST_NOTAVAIL
);
1213 update_badinstr
= 1;
1223 update_badinstr
= 1;
1227 update_badinstr
= 1;
1231 update_badinstr
= 1;
1235 update_badinstr
= 1;
1236 env
->CP0_Cause
= (env
->CP0_Cause
& ~(0x3 << CP0Ca_CE
)) |
1237 (env
->error_code
<< CP0Ca_CE
);
1241 update_badinstr
= 1;
1245 update_badinstr
= 1;
1249 update_badinstr
= 1;
1253 update_badinstr
= 1;
1260 update_badinstr
= 1;
1267 update_badinstr
= 1;
1274 /* XXX: TODO: manage deferred watch exceptions */
1289 if (!(env
->CP0_Status
& (1 << CP0St_EXL
))) {
1290 env
->CP0_EPC
= exception_resume_pc(env
);
1291 if (update_badinstr
) {
1292 set_badinstr_registers(env
);
1294 if (env
->hflags
& MIPS_HFLAG_BMASK
) {
1295 env
->CP0_Cause
|= (1U << CP0Ca_BD
);
1297 env
->CP0_Cause
&= ~(1U << CP0Ca_BD
);
1299 env
->CP0_Status
|= (1 << CP0St_EXL
);
1300 if (env
->insn_flags
& ISA_MIPS3
) {
1301 env
->hflags
|= MIPS_HFLAG_64
;
1302 if (!(env
->insn_flags
& ISA_MIPS_R6
) ||
1303 env
->CP0_Status
& (1 << CP0St_KX
)) {
1304 env
->hflags
&= ~MIPS_HFLAG_AWRAP
;
1307 env
->hflags
|= MIPS_HFLAG_CP0
;
1308 env
->hflags
&= ~(MIPS_HFLAG_KSU
);
1310 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
1311 if (env
->CP0_Status
& (1 << CP0St_BEV
)) {
1312 env
->active_tc
.PC
= env
->exception_base
+ 0x200;
1313 } else if (cause
== 30 && !(env
->CP0_Config3
& (1 << CP0C3_SC
) &&
1314 env
->CP0_Config5
& (1 << CP0C5_CV
))) {
1315 /* Force KSeg1 for cache errors */
1316 env
->active_tc
.PC
= KSEG1_BASE
| (env
->CP0_EBase
& 0x1FFFF000);
1318 env
->active_tc
.PC
= env
->CP0_EBase
& ~0xfff;
1321 env
->active_tc
.PC
+= offset
;
1322 set_hflags_for_handler(env
);
1323 env
->CP0_Cause
= (env
->CP0_Cause
& ~(0x1f << CP0Ca_EC
)) |
1324 (cause
<< CP0Ca_EC
);
1329 if (qemu_loglevel_mask(CPU_LOG_INT
)
1330 && cs
->exception_index
!= EXCP_EXT_INTERRUPT
) {
1331 qemu_log("%s: PC " TARGET_FMT_lx
" EPC " TARGET_FMT_lx
" cause %d\n"
1332 " S %08x C %08x A " TARGET_FMT_lx
" D " TARGET_FMT_lx
"\n",
1333 __func__
, env
->active_tc
.PC
, env
->CP0_EPC
, cause
,
1334 env
->CP0_Status
, env
->CP0_Cause
, env
->CP0_BadVAddr
,
1337 cs
->exception_index
= EXCP_NONE
;
1340 bool mips_cpu_exec_interrupt(CPUState
*cs
, int interrupt_request
)
1342 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
1343 MIPSCPU
*cpu
= MIPS_CPU(cs
);
1344 CPUMIPSState
*env
= &cpu
->env
;
1346 if (cpu_mips_hw_interrupts_enabled(env
) &&
1347 cpu_mips_hw_interrupts_pending(env
)) {
1349 cs
->exception_index
= EXCP_EXT_INTERRUPT
;
1350 env
->error_code
= 0;
1351 mips_cpu_do_interrupt(cs
);
1358 void r4k_invalidate_tlb(CPUMIPSState
*env
, int idx
, int use_extra
)
1360 CPUState
*cs
= env_cpu(env
);
1364 uint16_t ASID
= env
->CP0_EntryHi
& env
->CP0_EntryHi_ASID_mask
;
1365 uint32_t MMID
= env
->CP0_MemoryMapID
;
1366 bool mi
= !!((env
->CP0_Config5
>> CP0C5_MI
) & 1);
1370 MMID
= mi
? MMID
: (uint32_t) ASID
;
1372 tlb
= &env
->tlb
->mmu
.r4k
.tlb
[idx
];
1374 * The qemu TLB is flushed when the ASID/MMID changes, so no need to
1375 * flush these entries again.
1377 tlb_mmid
= mi
? tlb
->MMID
: (uint32_t) tlb
->ASID
;
1378 if (tlb
->G
== 0 && tlb_mmid
!= MMID
) {
1382 if (use_extra
&& env
->tlb
->tlb_in_use
< MIPS_TLB_MAX
) {
1384 * For tlbwr, we can shadow the discarded entry into
1385 * a new (fake) TLB entry, as long as the guest can not
1386 * tell that it's there.
1388 env
->tlb
->mmu
.r4k
.tlb
[env
->tlb
->tlb_in_use
] = *tlb
;
1389 env
->tlb
->tlb_in_use
++;
1393 /* 1k pages are not supported. */
1394 mask
= tlb
->PageMask
| ~(TARGET_PAGE_MASK
<< 1);
1396 addr
= tlb
->VPN
& ~mask
;
1397 #if defined(TARGET_MIPS64)
1398 if (addr
>= (0xFFFFFFFF80000000ULL
& env
->SEGMask
)) {
1399 addr
|= 0x3FFFFF0000000000ULL
;
1402 end
= addr
| (mask
>> 1);
1403 while (addr
< end
) {
1404 // optimize memset in tlb_flush_page!!!
1405 tlb_flush_page(cs
, addr
);
1406 addr
+= TARGET_PAGE_SIZE
;
1410 addr
= (tlb
->VPN
& ~mask
) | ((mask
>> 1) + 1);
1411 #if defined(TARGET_MIPS64)
1412 if (addr
>= (0xFFFFFFFF80000000ULL
& env
->SEGMask
)) {
1413 addr
|= 0x3FFFFF0000000000ULL
;
1417 while (addr
- 1 < end
) {
1418 // optimize memset in tlb_flush_page!!!
1419 tlb_flush_page(cs
, addr
);
1420 addr
+= TARGET_PAGE_SIZE
;