hw: vfio: drop TYPE_FOO MACRO in VMStateDescription
[qemu/ar7.git] / target / mips / helper.c
blob9799f2ede1190dfefa22436a503ee1fc16799a9e
1 /*
2 * MIPS emulation helpers for qemu.
4 * Copyright (c) 2004-2005 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "internal.h"
23 #include "exec/exec-all.h"
24 #include "exec/cpu_ldst.h"
25 #include "exec/log.h"
26 #include "hw/mips/cpudevs.h"
27 #include "qapi/qapi-commands-target.h"
29 enum {
30 TLBRET_XI = -6,
31 TLBRET_RI = -5,
32 TLBRET_DIRTY = -4,
33 TLBRET_INVALID = -3,
34 TLBRET_NOMATCH = -2,
35 TLBRET_BADADDR = -1,
36 TLBRET_MATCH = 0
39 #if !defined(CONFIG_USER_ONLY)
41 /* no MMU emulation */
42 int no_mmu_map_address (CPUMIPSState *env, hwaddr *physical, int *prot,
43 target_ulong address, int rw, int access_type)
45 *physical = address;
46 *prot = PAGE_READ | PAGE_WRITE;
47 return TLBRET_MATCH;
50 /* fixed mapping MMU emulation */
51 int fixed_mmu_map_address (CPUMIPSState *env, hwaddr *physical, int *prot,
52 target_ulong address, int rw, int access_type)
54 if (address <= (int32_t)0x7FFFFFFFUL) {
55 if (!(env->CP0_Status & (1 << CP0St_ERL)))
56 *physical = address + 0x40000000UL;
57 else
58 *physical = address;
59 } else if (address <= (int32_t)0xBFFFFFFFUL)
60 *physical = address & 0x1FFFFFFF;
61 else
62 *physical = address;
64 *prot = PAGE_READ | PAGE_WRITE;
65 return TLBRET_MATCH;
68 /* MIPS32/MIPS64 R4000-style MMU emulation */
69 int r4k_map_address (CPUMIPSState *env, hwaddr *physical, int *prot,
70 target_ulong address, int rw, int access_type)
72 uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
73 int i;
75 for (i = 0; i < env->tlb->tlb_in_use; i++) {
76 r4k_tlb_t *tlb = &env->tlb->mmu.r4k.tlb[i];
77 /* 1k pages are not supported. */
78 target_ulong mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
79 target_ulong tag = address & ~mask;
80 target_ulong VPN = tlb->VPN & ~mask;
81 #if defined(TARGET_MIPS64)
82 tag &= env->SEGMask;
83 #endif
85 /* Check ASID, virtual page number & size */
86 if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag && !tlb->EHINV) {
87 /* TLB match */
88 int n = !!(address & mask & ~(mask >> 1));
89 /* Check access rights */
90 if (!(n ? tlb->V1 : tlb->V0)) {
91 return TLBRET_INVALID;
93 if (rw == MMU_INST_FETCH && (n ? tlb->XI1 : tlb->XI0)) {
94 return TLBRET_XI;
96 if (rw == MMU_DATA_LOAD && (n ? tlb->RI1 : tlb->RI0)) {
97 return TLBRET_RI;
99 if (rw != MMU_DATA_STORE || (n ? tlb->D1 : tlb->D0)) {
100 *physical = tlb->PFN[n] | (address & (mask >> 1));
101 *prot = PAGE_READ;
102 if (n ? tlb->D1 : tlb->D0)
103 *prot |= PAGE_WRITE;
104 return TLBRET_MATCH;
106 return TLBRET_DIRTY;
109 return TLBRET_NOMATCH;
112 static int is_seg_am_mapped(unsigned int am, bool eu, int mmu_idx)
115 * Interpret access control mode and mmu_idx.
116 * AdE? TLB?
117 * AM K S U E K S U E
118 * UK 0 0 1 1 0 0 - - 0
119 * MK 1 0 1 1 0 1 - - !eu
120 * MSK 2 0 0 1 0 1 1 - !eu
121 * MUSK 3 0 0 0 0 1 1 1 !eu
122 * MUSUK 4 0 0 0 0 0 1 1 0
123 * USK 5 0 0 1 0 0 0 - 0
124 * - 6 - - - - - - - -
125 * UUSK 7 0 0 0 0 0 0 0 0
127 int32_t adetlb_mask;
129 switch (mmu_idx) {
130 case 3 /* ERL */:
131 /* If EU is set, always unmapped */
132 if (eu) {
133 return 0;
135 /* fall through */
136 case MIPS_HFLAG_KM:
137 /* Never AdE, TLB mapped if AM={1,2,3} */
138 adetlb_mask = 0x70000000;
139 goto check_tlb;
141 case MIPS_HFLAG_SM:
142 /* AdE if AM={0,1}, TLB mapped if AM={2,3,4} */
143 adetlb_mask = 0xc0380000;
144 goto check_ade;
146 case MIPS_HFLAG_UM:
147 /* AdE if AM={0,1,2,5}, TLB mapped if AM={3,4} */
148 adetlb_mask = 0xe4180000;
149 /* fall through */
150 check_ade:
151 /* does this AM cause AdE in current execution mode */
152 if ((adetlb_mask << am) < 0) {
153 return TLBRET_BADADDR;
155 adetlb_mask <<= 8;
156 /* fall through */
157 check_tlb:
158 /* is this AM mapped in current execution mode */
159 return ((adetlb_mask << am) < 0);
160 default:
161 assert(0);
162 return TLBRET_BADADDR;
166 static int get_seg_physical_address(CPUMIPSState *env, hwaddr *physical,
167 int *prot, target_ulong real_address,
168 int rw, int access_type, int mmu_idx,
169 unsigned int am, bool eu,
170 target_ulong segmask,
171 hwaddr physical_base)
173 int mapped = is_seg_am_mapped(am, eu, mmu_idx);
175 if (mapped < 0) {
176 /* is_seg_am_mapped can report TLBRET_BADADDR */
177 return mapped;
178 } else if (mapped) {
179 /* The segment is TLB mapped */
180 return env->tlb->map_address(env, physical, prot, real_address, rw,
181 access_type);
182 } else {
183 /* The segment is unmapped */
184 *physical = physical_base | (real_address & segmask);
185 *prot = PAGE_READ | PAGE_WRITE;
186 return TLBRET_MATCH;
190 static int get_segctl_physical_address(CPUMIPSState *env, hwaddr *physical,
191 int *prot, target_ulong real_address,
192 int rw, int access_type, int mmu_idx,
193 uint16_t segctl, target_ulong segmask)
195 unsigned int am = (segctl & CP0SC_AM_MASK) >> CP0SC_AM;
196 bool eu = (segctl >> CP0SC_EU) & 1;
197 hwaddr pa = ((hwaddr)segctl & CP0SC_PA_MASK) << 20;
199 return get_seg_physical_address(env, physical, prot, real_address, rw,
200 access_type, mmu_idx, am, eu, segmask,
201 pa & ~(hwaddr)segmask);
204 static int get_physical_address (CPUMIPSState *env, hwaddr *physical,
205 int *prot, target_ulong real_address,
206 int rw, int access_type, int mmu_idx)
208 /* User mode can only access useg/xuseg */
209 #if defined(TARGET_MIPS64)
210 int user_mode = mmu_idx == MIPS_HFLAG_UM;
211 int supervisor_mode = mmu_idx == MIPS_HFLAG_SM;
212 int kernel_mode = !user_mode && !supervisor_mode;
213 int UX = (env->CP0_Status & (1 << CP0St_UX)) != 0;
214 int SX = (env->CP0_Status & (1 << CP0St_SX)) != 0;
215 int KX = (env->CP0_Status & (1 << CP0St_KX)) != 0;
216 #endif
217 int ret = TLBRET_MATCH;
218 /* effective address (modified for KVM T&E kernel segments) */
219 target_ulong address = real_address;
221 #define USEG_LIMIT ((target_ulong)(int32_t)0x7FFFFFFFUL)
222 #define KSEG0_BASE ((target_ulong)(int32_t)0x80000000UL)
223 #define KSEG1_BASE ((target_ulong)(int32_t)0xA0000000UL)
224 #define KSEG2_BASE ((target_ulong)(int32_t)0xC0000000UL)
225 #define KSEG3_BASE ((target_ulong)(int32_t)0xE0000000UL)
227 #define KVM_KSEG0_BASE ((target_ulong)(int32_t)0x40000000UL)
228 #define KVM_KSEG2_BASE ((target_ulong)(int32_t)0x60000000UL)
230 if (mips_um_ksegs_enabled()) {
231 /* KVM T&E adds guest kernel segments in useg */
232 if (real_address >= KVM_KSEG0_BASE) {
233 if (real_address < KVM_KSEG2_BASE) {
234 /* kseg0 */
235 address += KSEG0_BASE - KVM_KSEG0_BASE;
236 } else if (real_address <= USEG_LIMIT) {
237 /* kseg2/3 */
238 address += KSEG2_BASE - KVM_KSEG2_BASE;
243 if (address <= USEG_LIMIT) {
244 /* useg */
245 uint16_t segctl;
247 if (address >= 0x40000000UL) {
248 segctl = env->CP0_SegCtl2;
249 } else {
250 segctl = env->CP0_SegCtl2 >> 16;
252 ret = get_segctl_physical_address(env, physical, prot, real_address, rw,
253 access_type, mmu_idx, segctl,
254 0x3FFFFFFF);
255 #if defined(TARGET_MIPS64)
256 } else if (address < 0x4000000000000000ULL) {
257 /* xuseg */
258 if (UX && address <= (0x3FFFFFFFFFFFFFFFULL & env->SEGMask)) {
259 ret = env->tlb->map_address(env, physical, prot, real_address, rw, access_type);
260 } else {
261 ret = TLBRET_BADADDR;
263 } else if (address < 0x8000000000000000ULL) {
264 /* xsseg */
265 if ((supervisor_mode || kernel_mode) &&
266 SX && address <= (0x7FFFFFFFFFFFFFFFULL & env->SEGMask)) {
267 ret = env->tlb->map_address(env, physical, prot, real_address, rw, access_type);
268 } else {
269 ret = TLBRET_BADADDR;
271 } else if (address < 0xC000000000000000ULL) {
272 /* xkphys */
273 if ((address & 0x07FFFFFFFFFFFFFFULL) <= env->PAMask) {
274 /* KX/SX/UX bit to check for each xkphys EVA access mode */
275 static const uint8_t am_ksux[8] = {
276 [CP0SC_AM_UK] = (1u << CP0St_KX),
277 [CP0SC_AM_MK] = (1u << CP0St_KX),
278 [CP0SC_AM_MSK] = (1u << CP0St_SX),
279 [CP0SC_AM_MUSK] = (1u << CP0St_UX),
280 [CP0SC_AM_MUSUK] = (1u << CP0St_UX),
281 [CP0SC_AM_USK] = (1u << CP0St_SX),
282 [6] = (1u << CP0St_KX),
283 [CP0SC_AM_UUSK] = (1u << CP0St_UX),
285 unsigned int am = CP0SC_AM_UK;
286 unsigned int xr = (env->CP0_SegCtl2 & CP0SC2_XR_MASK) >> CP0SC2_XR;
288 if (xr & (1 << ((address >> 59) & 0x7))) {
289 am = (env->CP0_SegCtl1 & CP0SC1_XAM_MASK) >> CP0SC1_XAM;
291 /* Does CP0_Status.KX/SX/UX permit the access mode (am) */
292 if (env->CP0_Status & am_ksux[am]) {
293 ret = get_seg_physical_address(env, physical, prot,
294 real_address, rw, access_type,
295 mmu_idx, am, false, env->PAMask,
297 } else {
298 ret = TLBRET_BADADDR;
300 } else {
301 ret = TLBRET_BADADDR;
303 } else if (address < 0xFFFFFFFF80000000ULL) {
304 /* xkseg */
305 if (kernel_mode && KX &&
306 address <= (0xFFFFFFFF7FFFFFFFULL & env->SEGMask)) {
307 ret = env->tlb->map_address(env, physical, prot, real_address, rw, access_type);
308 } else {
309 ret = TLBRET_BADADDR;
311 #endif
312 } else if (address < KSEG1_BASE) {
313 /* kseg0 */
314 ret = get_segctl_physical_address(env, physical, prot, real_address, rw,
315 access_type, mmu_idx,
316 env->CP0_SegCtl1 >> 16, 0x1FFFFFFF);
317 } else if (address < KSEG2_BASE) {
318 /* kseg1 */
319 ret = get_segctl_physical_address(env, physical, prot, real_address, rw,
320 access_type, mmu_idx,
321 env->CP0_SegCtl1, 0x1FFFFFFF);
322 } else if (address < KSEG3_BASE) {
323 /* sseg (kseg2) */
324 ret = get_segctl_physical_address(env, physical, prot, real_address, rw,
325 access_type, mmu_idx,
326 env->CP0_SegCtl0 >> 16, 0x1FFFFFFF);
327 } else {
328 /* kseg3 */
329 /* XXX: debug segment is not emulated */
330 ret = get_segctl_physical_address(env, physical, prot, real_address, rw,
331 access_type, mmu_idx,
332 env->CP0_SegCtl0, 0x1FFFFFFF);
334 return ret;
337 void cpu_mips_tlb_flush(CPUMIPSState *env)
339 MIPSCPU *cpu = mips_env_get_cpu(env);
341 /* Flush qemu's TLB and discard all shadowed entries. */
342 tlb_flush(CPU(cpu));
343 env->tlb->tlb_in_use = env->tlb->nb_tlb;
346 /* Called for updates to CP0_Status. */
347 void sync_c0_status(CPUMIPSState *env, CPUMIPSState *cpu, int tc)
349 int32_t tcstatus, *tcst;
350 uint32_t v = cpu->CP0_Status;
351 uint32_t cu, mx, asid, ksu;
352 uint32_t mask = ((1 << CP0TCSt_TCU3)
353 | (1 << CP0TCSt_TCU2)
354 | (1 << CP0TCSt_TCU1)
355 | (1 << CP0TCSt_TCU0)
356 | (1 << CP0TCSt_TMX)
357 | (3 << CP0TCSt_TKSU)
358 | (0xff << CP0TCSt_TASID));
360 cu = (v >> CP0St_CU0) & 0xf;
361 mx = (v >> CP0St_MX) & 0x1;
362 ksu = (v >> CP0St_KSU) & 0x3;
363 asid = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
365 tcstatus = cu << CP0TCSt_TCU0;
366 tcstatus |= mx << CP0TCSt_TMX;
367 tcstatus |= ksu << CP0TCSt_TKSU;
368 tcstatus |= asid;
370 if (tc == cpu->current_tc) {
371 tcst = &cpu->active_tc.CP0_TCStatus;
372 } else {
373 tcst = &cpu->tcs[tc].CP0_TCStatus;
376 *tcst &= ~mask;
377 *tcst |= tcstatus;
378 compute_hflags(cpu);
381 void cpu_mips_store_status(CPUMIPSState *env, target_ulong val)
383 uint32_t mask = env->CP0_Status_rw_bitmask;
384 target_ulong old = env->CP0_Status;
386 if (env->insn_flags & ISA_MIPS32R6) {
387 bool has_supervisor = extract32(mask, CP0St_KSU, 2) == 0x3;
388 #if defined(TARGET_MIPS64)
389 uint32_t ksux = (1 << CP0St_KX) & val;
390 ksux |= (ksux >> 1) & val; /* KX = 0 forces SX to be 0 */
391 ksux |= (ksux >> 1) & val; /* SX = 0 forces UX to be 0 */
392 val = (val & ~(7 << CP0St_UX)) | ksux;
393 #endif
394 if (has_supervisor && extract32(val, CP0St_KSU, 2) == 0x3) {
395 mask &= ~(3 << CP0St_KSU);
397 mask &= ~(((1 << CP0St_SR) | (1 << CP0St_NMI)) & val);
400 env->CP0_Status = (old & ~mask) | (val & mask);
401 #if defined(TARGET_MIPS64)
402 if ((env->CP0_Status ^ old) & (old & (7 << CP0St_UX))) {
403 /* Access to at least one of the 64-bit segments has been disabled */
404 tlb_flush(CPU(mips_env_get_cpu(env)));
406 #endif
407 if (env->CP0_Config3 & (1 << CP0C3_MT)) {
408 sync_c0_status(env, env, env->current_tc);
409 } else {
410 compute_hflags(env);
414 void cpu_mips_store_cause(CPUMIPSState *env, target_ulong val)
416 uint32_t mask = 0x00C00300;
417 uint32_t old = env->CP0_Cause;
418 int i;
420 if (env->insn_flags & ISA_MIPS32R2) {
421 mask |= 1 << CP0Ca_DC;
423 if (env->insn_flags & ISA_MIPS32R6) {
424 mask &= ~((1 << CP0Ca_WP) & val);
427 env->CP0_Cause = (env->CP0_Cause & ~mask) | (val & mask);
429 if ((old ^ env->CP0_Cause) & (1 << CP0Ca_DC)) {
430 if (env->CP0_Cause & (1 << CP0Ca_DC)) {
431 cpu_mips_stop_count(env);
432 } else {
433 cpu_mips_start_count(env);
437 /* Set/reset software interrupts */
438 for (i = 0 ; i < 2 ; i++) {
439 if ((old ^ env->CP0_Cause) & (1 << (CP0Ca_IP + i))) {
440 cpu_mips_soft_irq(env, i, env->CP0_Cause & (1 << (CP0Ca_IP + i)));
444 #endif
446 static void raise_mmu_exception(CPUMIPSState *env, target_ulong address,
447 int rw, int tlb_error)
449 CPUState *cs = CPU(mips_env_get_cpu(env));
450 int exception = 0, error_code = 0;
452 if (rw == MMU_INST_FETCH) {
453 error_code |= EXCP_INST_NOTAVAIL;
456 switch (tlb_error) {
457 default:
458 case TLBRET_BADADDR:
459 /* Reference to kernel address from user mode or supervisor mode */
460 /* Reference to supervisor address from user mode */
461 if (rw == MMU_DATA_STORE) {
462 exception = EXCP_AdES;
463 } else {
464 exception = EXCP_AdEL;
466 break;
467 case TLBRET_NOMATCH:
468 /* No TLB match for a mapped address */
469 if (rw == MMU_DATA_STORE) {
470 exception = EXCP_TLBS;
471 } else {
472 exception = EXCP_TLBL;
474 error_code |= EXCP_TLB_NOMATCH;
475 break;
476 case TLBRET_INVALID:
477 /* TLB match with no valid bit */
478 if (rw == MMU_DATA_STORE) {
479 exception = EXCP_TLBS;
480 } else {
481 exception = EXCP_TLBL;
483 break;
484 case TLBRET_DIRTY:
485 /* TLB match but 'D' bit is cleared */
486 exception = EXCP_LTLBL;
487 break;
488 case TLBRET_XI:
489 /* Execute-Inhibit Exception */
490 if (env->CP0_PageGrain & (1 << CP0PG_IEC)) {
491 exception = EXCP_TLBXI;
492 } else {
493 exception = EXCP_TLBL;
495 break;
496 case TLBRET_RI:
497 /* Read-Inhibit Exception */
498 if (env->CP0_PageGrain & (1 << CP0PG_IEC)) {
499 exception = EXCP_TLBRI;
500 } else {
501 exception = EXCP_TLBL;
503 break;
505 /* Raise exception */
506 if (!(env->hflags & MIPS_HFLAG_DM)) {
507 env->CP0_BadVAddr = address;
509 env->CP0_Context = (env->CP0_Context & ~0x007fffff) |
510 ((address >> 9) & 0x007ffff0);
511 env->CP0_EntryHi = (env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask) |
512 (env->CP0_EntryHi & (1 << CP0EnHi_EHINV)) |
513 (address & (TARGET_PAGE_MASK << 1));
514 #if defined(TARGET_MIPS64)
515 env->CP0_EntryHi &= env->SEGMask;
516 env->CP0_XContext =
517 /* PTEBase */ (env->CP0_XContext & ((~0ULL) << (env->SEGBITS - 7))) |
518 /* R */ (extract64(address, 62, 2) << (env->SEGBITS - 9)) |
519 /* BadVPN2 */ (extract64(address, 13, env->SEGBITS - 13) << 4);
520 #endif
521 cs->exception_index = exception;
522 env->error_code = error_code;
525 #if !defined(CONFIG_USER_ONLY)
526 hwaddr mips_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
528 MIPSCPU *cpu = MIPS_CPU(cs);
529 CPUMIPSState *env = &cpu->env;
530 hwaddr phys_addr;
531 int prot;
533 if (get_physical_address(env, &phys_addr, &prot, addr, 0, ACCESS_INT,
534 cpu_mmu_index(env, false)) != 0) {
535 return -1;
537 return phys_addr;
539 #endif
541 #if !defined(CONFIG_USER_ONLY)
542 #if !defined(TARGET_MIPS64)
545 * Perform hardware page table walk
547 * Memory accesses are performed using the KERNEL privilege level.
548 * Synchronous exceptions detected on memory accesses cause a silent exit
549 * from page table walking, resulting in a TLB or XTLB Refill exception.
551 * Implementations are not required to support page table walk memory
552 * accesses from mapped memory regions. When an unsupported access is
553 * attempted, a silent exit is taken, resulting in a TLB or XTLB Refill
554 * exception.
556 * Note that if an exception is caused by AddressTranslation or LoadMemory
557 * functions, the exception is not taken, a silent exit is taken,
558 * resulting in a TLB or XTLB Refill exception.
561 static bool get_pte(CPUMIPSState *env, uint64_t vaddr, int entry_size,
562 uint64_t *pte)
564 if ((vaddr & ((entry_size >> 3) - 1)) != 0) {
565 return false;
567 if (entry_size == 64) {
568 *pte = cpu_ldq_code(env, vaddr);
569 } else {
570 *pte = cpu_ldl_code(env, vaddr);
572 return true;
575 static uint64_t get_tlb_entry_layout(CPUMIPSState *env, uint64_t entry,
576 int entry_size, int ptei)
578 uint64_t result = entry;
579 uint64_t rixi;
580 if (ptei > entry_size) {
581 ptei -= 32;
583 result >>= (ptei - 2);
584 rixi = result & 3;
585 result >>= 2;
586 result |= rixi << CP0EnLo_XI;
587 return result;
590 static int walk_directory(CPUMIPSState *env, uint64_t *vaddr,
591 int directory_index, bool *huge_page, bool *hgpg_directory_hit,
592 uint64_t *pw_entrylo0, uint64_t *pw_entrylo1)
594 int dph = (env->CP0_PWCtl >> CP0PC_DPH) & 0x1;
595 int psn = (env->CP0_PWCtl >> CP0PC_PSN) & 0x3F;
596 int hugepg = (env->CP0_PWCtl >> CP0PC_HUGEPG) & 0x1;
597 int pf_ptew = (env->CP0_PWField >> CP0PF_PTEW) & 0x3F;
598 int ptew = (env->CP0_PWSize >> CP0PS_PTEW) & 0x3F;
599 int native_shift = (((env->CP0_PWSize >> CP0PS_PS) & 1) == 0) ? 2 : 3;
600 int directory_shift = (ptew > 1) ? -1 :
601 (hugepg && (ptew == 1)) ? native_shift + 1 : native_shift;
602 int leaf_shift = (ptew > 1) ? -1 :
603 (ptew == 1) ? native_shift + 1 : native_shift;
604 uint32_t direntry_size = 1 << (directory_shift + 3);
605 uint32_t leafentry_size = 1 << (leaf_shift + 3);
606 uint64_t entry;
607 uint64_t paddr;
608 int prot;
609 uint64_t lsb = 0;
610 uint64_t w = 0;
612 if (get_physical_address(env, &paddr, &prot, *vaddr, MMU_DATA_LOAD,
613 ACCESS_INT, cpu_mmu_index(env, false)) !=
614 TLBRET_MATCH) {
615 /* wrong base address */
616 return 0;
618 if (!get_pte(env, *vaddr, direntry_size, &entry)) {
619 return 0;
622 if ((entry & (1 << psn)) && hugepg) {
623 *huge_page = true;
624 *hgpg_directory_hit = true;
625 entry = get_tlb_entry_layout(env, entry, leafentry_size, pf_ptew);
626 w = directory_index - 1;
627 if (directory_index & 0x1) {
628 /* Generate adjacent page from same PTE for odd TLB page */
629 lsb = (1 << w) >> 6;
630 *pw_entrylo0 = entry & ~lsb; /* even page */
631 *pw_entrylo1 = entry | lsb; /* odd page */
632 } else if (dph) {
633 int oddpagebit = 1 << leaf_shift;
634 uint64_t vaddr2 = *vaddr ^ oddpagebit;
635 if (*vaddr & oddpagebit) {
636 *pw_entrylo1 = entry;
637 } else {
638 *pw_entrylo0 = entry;
640 if (get_physical_address(env, &paddr, &prot, vaddr2, MMU_DATA_LOAD,
641 ACCESS_INT, cpu_mmu_index(env, false)) !=
642 TLBRET_MATCH) {
643 return 0;
645 if (!get_pte(env, vaddr2, leafentry_size, &entry)) {
646 return 0;
648 entry = get_tlb_entry_layout(env, entry, leafentry_size, pf_ptew);
649 if (*vaddr & oddpagebit) {
650 *pw_entrylo0 = entry;
651 } else {
652 *pw_entrylo1 = entry;
654 } else {
655 return 0;
657 return 1;
658 } else {
659 *vaddr = entry;
660 return 2;
664 static bool page_table_walk_refill(CPUMIPSState *env, vaddr address, int rw,
665 int mmu_idx)
667 int gdw = (env->CP0_PWSize >> CP0PS_GDW) & 0x3F;
668 int udw = (env->CP0_PWSize >> CP0PS_UDW) & 0x3F;
669 int mdw = (env->CP0_PWSize >> CP0PS_MDW) & 0x3F;
670 int ptw = (env->CP0_PWSize >> CP0PS_PTW) & 0x3F;
671 int ptew = (env->CP0_PWSize >> CP0PS_PTEW) & 0x3F;
673 /* Initial values */
674 bool huge_page = false;
675 bool hgpg_bdhit = false;
676 bool hgpg_gdhit = false;
677 bool hgpg_udhit = false;
678 bool hgpg_mdhit = false;
680 int32_t pw_pagemask = 0;
681 target_ulong pw_entryhi = 0;
682 uint64_t pw_entrylo0 = 0;
683 uint64_t pw_entrylo1 = 0;
685 /* Native pointer size */
686 /*For the 32-bit architectures, this bit is fixed to 0.*/
687 int native_shift = (((env->CP0_PWSize >> CP0PS_PS) & 1) == 0) ? 2 : 3;
689 /* Indices from PWField */
690 int pf_gdw = (env->CP0_PWField >> CP0PF_GDW) & 0x3F;
691 int pf_udw = (env->CP0_PWField >> CP0PF_UDW) & 0x3F;
692 int pf_mdw = (env->CP0_PWField >> CP0PF_MDW) & 0x3F;
693 int pf_ptw = (env->CP0_PWField >> CP0PF_PTW) & 0x3F;
694 int pf_ptew = (env->CP0_PWField >> CP0PF_PTEW) & 0x3F;
696 /* Indices computed from faulting address */
697 int gindex = (address >> pf_gdw) & ((1 << gdw) - 1);
698 int uindex = (address >> pf_udw) & ((1 << udw) - 1);
699 int mindex = (address >> pf_mdw) & ((1 << mdw) - 1);
700 int ptindex = (address >> pf_ptw) & ((1 << ptw) - 1);
702 /* Other HTW configs */
703 int hugepg = (env->CP0_PWCtl >> CP0PC_HUGEPG) & 0x1;
705 /* HTW Shift values (depend on entry size) */
706 int directory_shift = (ptew > 1) ? -1 :
707 (hugepg && (ptew == 1)) ? native_shift + 1 : native_shift;
708 int leaf_shift = (ptew > 1) ? -1 :
709 (ptew == 1) ? native_shift + 1 : native_shift;
711 /* Offsets into tables */
712 int goffset = gindex << directory_shift;
713 int uoffset = uindex << directory_shift;
714 int moffset = mindex << directory_shift;
715 int ptoffset0 = (ptindex >> 1) << (leaf_shift + 1);
716 int ptoffset1 = ptoffset0 | (1 << (leaf_shift));
718 uint32_t leafentry_size = 1 << (leaf_shift + 3);
720 /* Starting address - Page Table Base */
721 uint64_t vaddr = env->CP0_PWBase;
723 uint64_t dir_entry;
724 uint64_t paddr;
725 int prot;
726 int m;
728 if (!(env->CP0_Config3 & (1 << CP0C3_PW))) {
729 /* walker is unimplemented */
730 return false;
732 if (!(env->CP0_PWCtl & (1 << CP0PC_PWEN))) {
733 /* walker is disabled */
734 return false;
736 if (!(gdw > 0 || udw > 0 || mdw > 0)) {
737 /* no structure to walk */
738 return false;
740 if ((directory_shift == -1) || (leaf_shift == -1)) {
741 return false;
744 /* Global Directory */
745 if (gdw > 0) {
746 vaddr |= goffset;
747 switch (walk_directory(env, &vaddr, pf_gdw, &huge_page, &hgpg_gdhit,
748 &pw_entrylo0, &pw_entrylo1))
750 case 0:
751 return false;
752 case 1:
753 goto refill;
754 case 2:
755 default:
756 break;
760 /* Upper directory */
761 if (udw > 0) {
762 vaddr |= uoffset;
763 switch (walk_directory(env, &vaddr, pf_udw, &huge_page, &hgpg_udhit,
764 &pw_entrylo0, &pw_entrylo1))
766 case 0:
767 return false;
768 case 1:
769 goto refill;
770 case 2:
771 default:
772 break;
776 /* Middle directory */
777 if (mdw > 0) {
778 vaddr |= moffset;
779 switch (walk_directory(env, &vaddr, pf_mdw, &huge_page, &hgpg_mdhit,
780 &pw_entrylo0, &pw_entrylo1))
782 case 0:
783 return false;
784 case 1:
785 goto refill;
786 case 2:
787 default:
788 break;
792 /* Leaf Level Page Table - First half of PTE pair */
793 vaddr |= ptoffset0;
794 if (get_physical_address(env, &paddr, &prot, vaddr, MMU_DATA_LOAD,
795 ACCESS_INT, cpu_mmu_index(env, false)) !=
796 TLBRET_MATCH) {
797 return false;
799 if (!get_pte(env, vaddr, leafentry_size, &dir_entry)) {
800 return false;
802 dir_entry = get_tlb_entry_layout(env, dir_entry, leafentry_size, pf_ptew);
803 pw_entrylo0 = dir_entry;
805 /* Leaf Level Page Table - Second half of PTE pair */
806 vaddr |= ptoffset1;
807 if (get_physical_address(env, &paddr, &prot, vaddr, MMU_DATA_LOAD,
808 ACCESS_INT, cpu_mmu_index(env, false)) !=
809 TLBRET_MATCH) {
810 return false;
812 if (!get_pte(env, vaddr, leafentry_size, &dir_entry)) {
813 return false;
815 dir_entry = get_tlb_entry_layout(env, dir_entry, leafentry_size, pf_ptew);
816 pw_entrylo1 = dir_entry;
818 refill:
820 m = (1 << pf_ptw) - 1;
822 if (huge_page) {
823 switch (hgpg_bdhit << 3 | hgpg_gdhit << 2 | hgpg_udhit << 1 |
824 hgpg_mdhit)
826 case 4:
827 m = (1 << pf_gdw) - 1;
828 if (pf_gdw & 1) {
829 m >>= 1;
831 break;
832 case 2:
833 m = (1 << pf_udw) - 1;
834 if (pf_udw & 1) {
835 m >>= 1;
837 break;
838 case 1:
839 m = (1 << pf_mdw) - 1;
840 if (pf_mdw & 1) {
841 m >>= 1;
843 break;
846 pw_pagemask = m >> 12;
847 update_pagemask(env, pw_pagemask << 13, &pw_pagemask);
848 pw_entryhi = (address & ~0x1fff) | (env->CP0_EntryHi & 0xFF);
850 target_ulong tmp_entryhi = env->CP0_EntryHi;
851 int32_t tmp_pagemask = env->CP0_PageMask;
852 uint64_t tmp_entrylo0 = env->CP0_EntryLo0;
853 uint64_t tmp_entrylo1 = env->CP0_EntryLo1;
855 env->CP0_EntryHi = pw_entryhi;
856 env->CP0_PageMask = pw_pagemask;
857 env->CP0_EntryLo0 = pw_entrylo0;
858 env->CP0_EntryLo1 = pw_entrylo1;
861 * The hardware page walker inserts a page into the TLB in a manner
862 * identical to a TLBWR instruction as executed by the software refill
863 * handler.
865 r4k_helper_tlbwr(env);
867 env->CP0_EntryHi = tmp_entryhi;
868 env->CP0_PageMask = tmp_pagemask;
869 env->CP0_EntryLo0 = tmp_entrylo0;
870 env->CP0_EntryLo1 = tmp_entrylo1;
872 return true;
874 #endif
875 #endif
877 bool mips_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
878 MMUAccessType access_type, int mmu_idx,
879 bool probe, uintptr_t retaddr)
881 MIPSCPU *cpu = MIPS_CPU(cs);
882 CPUMIPSState *env = &cpu->env;
883 #if !defined(CONFIG_USER_ONLY)
884 hwaddr physical;
885 int prot;
886 int mips_access_type;
887 #endif
888 int ret = TLBRET_BADADDR;
890 /* data access */
891 #if !defined(CONFIG_USER_ONLY)
892 /* XXX: put correct access by using cpu_restore_state() correctly */
893 mips_access_type = ACCESS_INT;
894 ret = get_physical_address(env, &physical, &prot, address,
895 access_type, mips_access_type, mmu_idx);
896 switch (ret) {
897 case TLBRET_MATCH:
898 qemu_log_mask(CPU_LOG_MMU,
899 "%s address=%" VADDR_PRIx " physical " TARGET_FMT_plx
900 " prot %d\n", __func__, address, physical, prot);
901 break;
902 default:
903 qemu_log_mask(CPU_LOG_MMU,
904 "%s address=%" VADDR_PRIx " ret %d\n", __func__, address,
905 ret);
906 break;
908 if (ret == TLBRET_MATCH) {
909 tlb_set_page(cs, address & TARGET_PAGE_MASK,
910 physical & TARGET_PAGE_MASK, prot | PAGE_EXEC,
911 mmu_idx, TARGET_PAGE_SIZE);
912 return true;
914 #if !defined(TARGET_MIPS64)
915 if ((ret == TLBRET_NOMATCH) && (env->tlb->nb_tlb > 1)) {
917 * Memory reads during hardware page table walking are performed
918 * as if they were kernel-mode load instructions.
920 int mode = (env->hflags & MIPS_HFLAG_KSU);
921 bool ret_walker;
922 env->hflags &= ~MIPS_HFLAG_KSU;
923 ret_walker = page_table_walk_refill(env, address, access_type, mmu_idx);
924 env->hflags |= mode;
925 if (ret_walker) {
926 ret = get_physical_address(env, &physical, &prot, address,
927 access_type, mips_access_type, mmu_idx);
928 if (ret == TLBRET_MATCH) {
929 tlb_set_page(cs, address & TARGET_PAGE_MASK,
930 physical & TARGET_PAGE_MASK, prot | PAGE_EXEC,
931 mmu_idx, TARGET_PAGE_SIZE);
932 return true;
936 #endif
937 if (probe) {
938 return false;
940 #endif
942 raise_mmu_exception(env, address, access_type, ret);
943 do_raise_exception_err(env, cs->exception_index, env->error_code, retaddr);
946 #ifndef CONFIG_USER_ONLY
947 hwaddr cpu_mips_translate_address(CPUMIPSState *env, target_ulong address, int rw)
949 hwaddr physical;
950 int prot;
951 int access_type;
952 int ret = 0;
954 /* data access */
955 access_type = ACCESS_INT;
956 ret = get_physical_address(env, &physical, &prot, address, rw, access_type,
957 cpu_mmu_index(env, false));
958 if (ret != TLBRET_MATCH) {
959 raise_mmu_exception(env, address, rw, ret);
960 return -1LL;
961 } else {
962 return physical;
966 static const char * const excp_names[EXCP_LAST + 1] = {
967 [EXCP_RESET] = "reset",
968 [EXCP_SRESET] = "soft reset",
969 [EXCP_DSS] = "debug single step",
970 [EXCP_DINT] = "debug interrupt",
971 [EXCP_NMI] = "non-maskable interrupt",
972 [EXCP_MCHECK] = "machine check",
973 [EXCP_EXT_INTERRUPT] = "interrupt",
974 [EXCP_DFWATCH] = "deferred watchpoint",
975 [EXCP_DIB] = "debug instruction breakpoint",
976 [EXCP_IWATCH] = "instruction fetch watchpoint",
977 [EXCP_AdEL] = "address error load",
978 [EXCP_AdES] = "address error store",
979 [EXCP_TLBF] = "TLB refill",
980 [EXCP_IBE] = "instruction bus error",
981 [EXCP_DBp] = "debug breakpoint",
982 [EXCP_SYSCALL] = "syscall",
983 [EXCP_BREAK] = "break",
984 [EXCP_CpU] = "coprocessor unusable",
985 [EXCP_RI] = "reserved instruction",
986 [EXCP_OVERFLOW] = "arithmetic overflow",
987 [EXCP_TRAP] = "trap",
988 [EXCP_FPE] = "floating point",
989 [EXCP_DDBS] = "debug data break store",
990 [EXCP_DWATCH] = "data watchpoint",
991 [EXCP_LTLBL] = "TLB modify",
992 [EXCP_TLBL] = "TLB load",
993 [EXCP_TLBS] = "TLB store",
994 [EXCP_DBE] = "data bus error",
995 [EXCP_DDBL] = "debug data break load",
996 [EXCP_THREAD] = "thread",
997 [EXCP_MDMX] = "MDMX",
998 [EXCP_C2E] = "precise coprocessor 2",
999 [EXCP_CACHE] = "cache error",
1000 [EXCP_TLBXI] = "TLB execute-inhibit",
1001 [EXCP_TLBRI] = "TLB read-inhibit",
1002 [EXCP_MSADIS] = "MSA disabled",
1003 [EXCP_MSAFPE] = "MSA floating point",
1005 #endif
1007 target_ulong exception_resume_pc (CPUMIPSState *env)
1009 target_ulong bad_pc;
1010 target_ulong isa_mode;
1012 isa_mode = !!(env->hflags & MIPS_HFLAG_M16);
1013 bad_pc = env->active_tc.PC | isa_mode;
1014 if (env->hflags & MIPS_HFLAG_BMASK) {
1015 /* If the exception was raised from a delay slot, come back to
1016 the jump. */
1017 bad_pc -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4);
1020 return bad_pc;
1023 #if !defined(CONFIG_USER_ONLY)
1024 static void set_hflags_for_handler (CPUMIPSState *env)
1026 /* Exception handlers are entered in 32-bit mode. */
1027 env->hflags &= ~(MIPS_HFLAG_M16);
1028 /* ...except that microMIPS lets you choose. */
1029 if (env->insn_flags & ASE_MICROMIPS) {
1030 env->hflags |= (!!(env->CP0_Config3
1031 & (1 << CP0C3_ISA_ON_EXC))
1032 << MIPS_HFLAG_M16_SHIFT);
1036 static inline void set_badinstr_registers(CPUMIPSState *env)
1038 if (env->insn_flags & ISA_NANOMIPS32) {
1039 if (env->CP0_Config3 & (1 << CP0C3_BI)) {
1040 uint32_t instr = (cpu_lduw_code(env, env->active_tc.PC)) << 16;
1041 if ((instr & 0x10000000) == 0) {
1042 instr |= cpu_lduw_code(env, env->active_tc.PC + 2);
1044 env->CP0_BadInstr = instr;
1046 if ((instr & 0xFC000000) == 0x60000000) {
1047 instr = cpu_lduw_code(env, env->active_tc.PC + 4) << 16;
1048 env->CP0_BadInstrX = instr;
1051 return;
1054 if (env->hflags & MIPS_HFLAG_M16) {
1055 /* TODO: add BadInstr support for microMIPS */
1056 return;
1058 if (env->CP0_Config3 & (1 << CP0C3_BI)) {
1059 env->CP0_BadInstr = cpu_ldl_code(env, env->active_tc.PC);
1061 if ((env->CP0_Config3 & (1 << CP0C3_BP)) &&
1062 (env->hflags & MIPS_HFLAG_BMASK)) {
1063 env->CP0_BadInstrP = cpu_ldl_code(env, env->active_tc.PC - 4);
1066 #endif
1068 void mips_cpu_do_interrupt(CPUState *cs)
1070 #if !defined(CONFIG_USER_ONLY)
1071 MIPSCPU *cpu = MIPS_CPU(cs);
1072 CPUMIPSState *env = &cpu->env;
1073 bool update_badinstr = 0;
1074 target_ulong offset;
1075 int cause = -1;
1076 const char *name;
1078 if (qemu_loglevel_mask(CPU_LOG_INT)
1079 && cs->exception_index != EXCP_EXT_INTERRUPT) {
1080 if (cs->exception_index < 0 || cs->exception_index > EXCP_LAST) {
1081 name = "unknown";
1082 } else {
1083 name = excp_names[cs->exception_index];
1086 qemu_log("%s enter: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx
1087 " %s exception\n",
1088 __func__, env->active_tc.PC, env->CP0_EPC, name);
1090 if (cs->exception_index == EXCP_EXT_INTERRUPT &&
1091 (env->hflags & MIPS_HFLAG_DM)) {
1092 cs->exception_index = EXCP_DINT;
1094 offset = 0x180;
1095 switch (cs->exception_index) {
1096 case EXCP_DSS:
1097 env->CP0_Debug |= 1 << CP0DB_DSS;
1098 /* Debug single step cannot be raised inside a delay slot and
1099 resume will always occur on the next instruction
1100 (but we assume the pc has always been updated during
1101 code translation). */
1102 env->CP0_DEPC = env->active_tc.PC | !!(env->hflags & MIPS_HFLAG_M16);
1103 goto enter_debug_mode;
1104 case EXCP_DINT:
1105 env->CP0_Debug |= 1 << CP0DB_DINT;
1106 goto set_DEPC;
1107 case EXCP_DIB:
1108 env->CP0_Debug |= 1 << CP0DB_DIB;
1109 goto set_DEPC;
1110 case EXCP_DBp:
1111 env->CP0_Debug |= 1 << CP0DB_DBp;
1112 /* Setup DExcCode - SDBBP instruction */
1113 env->CP0_Debug = (env->CP0_Debug & ~(0x1fULL << CP0DB_DEC)) | 9 << CP0DB_DEC;
1114 goto set_DEPC;
1115 case EXCP_DDBS:
1116 env->CP0_Debug |= 1 << CP0DB_DDBS;
1117 goto set_DEPC;
1118 case EXCP_DDBL:
1119 env->CP0_Debug |= 1 << CP0DB_DDBL;
1120 set_DEPC:
1121 env->CP0_DEPC = exception_resume_pc(env);
1122 env->hflags &= ~MIPS_HFLAG_BMASK;
1123 enter_debug_mode:
1124 if (env->insn_flags & ISA_MIPS3) {
1125 env->hflags |= MIPS_HFLAG_64;
1126 if (!(env->insn_flags & ISA_MIPS64R6) ||
1127 env->CP0_Status & (1 << CP0St_KX)) {
1128 env->hflags &= ~MIPS_HFLAG_AWRAP;
1131 env->hflags |= MIPS_HFLAG_DM | MIPS_HFLAG_CP0;
1132 env->hflags &= ~(MIPS_HFLAG_KSU);
1133 /* EJTAG probe trap enable is not implemented... */
1134 if (!(env->CP0_Status & (1 << CP0St_EXL)))
1135 env->CP0_Cause &= ~(1U << CP0Ca_BD);
1136 env->active_tc.PC = env->exception_base + 0x480;
1137 set_hflags_for_handler(env);
1138 break;
1139 case EXCP_RESET:
1140 cpu_reset(CPU(cpu));
1141 break;
1142 case EXCP_SRESET:
1143 env->CP0_Status |= (1 << CP0St_SR);
1144 memset(env->CP0_WatchLo, 0, sizeof(env->CP0_WatchLo));
1145 goto set_error_EPC;
1146 case EXCP_NMI:
1147 env->CP0_Status |= (1 << CP0St_NMI);
1148 set_error_EPC:
1149 env->CP0_ErrorEPC = exception_resume_pc(env);
1150 env->hflags &= ~MIPS_HFLAG_BMASK;
1151 env->CP0_Status |= (1 << CP0St_ERL) | (1 << CP0St_BEV);
1152 if (env->insn_flags & ISA_MIPS3) {
1153 env->hflags |= MIPS_HFLAG_64;
1154 if (!(env->insn_flags & ISA_MIPS64R6) ||
1155 env->CP0_Status & (1 << CP0St_KX)) {
1156 env->hflags &= ~MIPS_HFLAG_AWRAP;
1159 env->hflags |= MIPS_HFLAG_CP0;
1160 env->hflags &= ~(MIPS_HFLAG_KSU);
1161 if (!(env->CP0_Status & (1 << CP0St_EXL)))
1162 env->CP0_Cause &= ~(1U << CP0Ca_BD);
1163 env->active_tc.PC = env->exception_base;
1164 set_hflags_for_handler(env);
1165 break;
1166 case EXCP_EXT_INTERRUPT:
1167 cause = 0;
1168 if (env->CP0_Cause & (1 << CP0Ca_IV)) {
1169 uint32_t spacing = (env->CP0_IntCtl >> CP0IntCtl_VS) & 0x1f;
1171 if ((env->CP0_Status & (1 << CP0St_BEV)) || spacing == 0) {
1172 offset = 0x200;
1173 } else {
1174 uint32_t vector = 0;
1175 uint32_t pending = (env->CP0_Cause & CP0Ca_IP_mask) >> CP0Ca_IP;
1177 if (env->CP0_Config3 & (1 << CP0C3_VEIC)) {
1178 /* For VEIC mode, the external interrupt controller feeds
1179 * the vector through the CP0Cause IP lines. */
1180 vector = pending;
1181 } else {
1182 /* Vectored Interrupts
1183 * Mask with Status.IM7-IM0 to get enabled interrupts. */
1184 pending &= (env->CP0_Status >> CP0St_IM) & 0xff;
1185 /* Find the highest-priority interrupt. */
1186 while (pending >>= 1) {
1187 vector++;
1190 offset = 0x200 + (vector * (spacing << 5));
1193 goto set_EPC;
1194 case EXCP_LTLBL:
1195 cause = 1;
1196 update_badinstr = !(env->error_code & EXCP_INST_NOTAVAIL);
1197 goto set_EPC;
1198 case EXCP_TLBL:
1199 cause = 2;
1200 update_badinstr = !(env->error_code & EXCP_INST_NOTAVAIL);
1201 if ((env->error_code & EXCP_TLB_NOMATCH) &&
1202 !(env->CP0_Status & (1 << CP0St_EXL))) {
1203 #if defined(TARGET_MIPS64)
1204 int R = env->CP0_BadVAddr >> 62;
1205 int UX = (env->CP0_Status & (1 << CP0St_UX)) != 0;
1206 int KX = (env->CP0_Status & (1 << CP0St_KX)) != 0;
1208 if ((R != 0 || UX) && (R != 3 || KX) &&
1209 (!(env->insn_flags & (INSN_LOONGSON2E | INSN_LOONGSON2F)))) {
1210 offset = 0x080;
1211 } else {
1212 #endif
1213 offset = 0x000;
1214 #if defined(TARGET_MIPS64)
1216 #endif
1218 goto set_EPC;
1219 case EXCP_TLBS:
1220 cause = 3;
1221 update_badinstr = 1;
1222 if ((env->error_code & EXCP_TLB_NOMATCH) &&
1223 !(env->CP0_Status & (1 << CP0St_EXL))) {
1224 #if defined(TARGET_MIPS64)
1225 int R = env->CP0_BadVAddr >> 62;
1226 int UX = (env->CP0_Status & (1 << CP0St_UX)) != 0;
1227 int KX = (env->CP0_Status & (1 << CP0St_KX)) != 0;
1229 if ((R != 0 || UX) && (R != 3 || KX) &&
1230 (!(env->insn_flags & (INSN_LOONGSON2E | INSN_LOONGSON2F)))) {
1231 offset = 0x080;
1232 } else {
1233 #endif
1234 offset = 0x000;
1235 #if defined(TARGET_MIPS64)
1237 #endif
1239 goto set_EPC;
1240 case EXCP_AdEL:
1241 cause = 4;
1242 update_badinstr = !(env->error_code & EXCP_INST_NOTAVAIL);
1243 goto set_EPC;
1244 case EXCP_AdES:
1245 cause = 5;
1246 update_badinstr = 1;
1247 goto set_EPC;
1248 case EXCP_IBE:
1249 cause = 6;
1250 goto set_EPC;
1251 case EXCP_DBE:
1252 cause = 7;
1253 goto set_EPC;
1254 case EXCP_SYSCALL:
1255 cause = 8;
1256 update_badinstr = 1;
1257 goto set_EPC;
1258 case EXCP_BREAK:
1259 cause = 9;
1260 update_badinstr = 1;
1261 goto set_EPC;
1262 case EXCP_RI:
1263 cause = 10;
1264 update_badinstr = 1;
1265 goto set_EPC;
1266 case EXCP_CpU:
1267 cause = 11;
1268 update_badinstr = 1;
1269 env->CP0_Cause = (env->CP0_Cause & ~(0x3 << CP0Ca_CE)) |
1270 (env->error_code << CP0Ca_CE);
1271 goto set_EPC;
1272 case EXCP_OVERFLOW:
1273 cause = 12;
1274 update_badinstr = 1;
1275 goto set_EPC;
1276 case EXCP_TRAP:
1277 cause = 13;
1278 update_badinstr = 1;
1279 goto set_EPC;
1280 case EXCP_MSAFPE:
1281 cause = 14;
1282 update_badinstr = 1;
1283 goto set_EPC;
1284 case EXCP_FPE:
1285 cause = 15;
1286 update_badinstr = 1;
1287 goto set_EPC;
1288 case EXCP_C2E:
1289 cause = 18;
1290 goto set_EPC;
1291 case EXCP_TLBRI:
1292 cause = 19;
1293 update_badinstr = 1;
1294 goto set_EPC;
1295 case EXCP_TLBXI:
1296 cause = 20;
1297 goto set_EPC;
1298 case EXCP_MSADIS:
1299 cause = 21;
1300 update_badinstr = 1;
1301 goto set_EPC;
1302 case EXCP_MDMX:
1303 cause = 22;
1304 goto set_EPC;
1305 case EXCP_DWATCH:
1306 cause = 23;
1307 /* XXX: TODO: manage deferred watch exceptions */
1308 goto set_EPC;
1309 case EXCP_MCHECK:
1310 cause = 24;
1311 goto set_EPC;
1312 case EXCP_THREAD:
1313 cause = 25;
1314 goto set_EPC;
1315 case EXCP_DSPDIS:
1316 cause = 26;
1317 goto set_EPC;
1318 case EXCP_CACHE:
1319 cause = 30;
1320 offset = 0x100;
1321 set_EPC:
1322 if (!(env->CP0_Status & (1 << CP0St_EXL))) {
1323 env->CP0_EPC = exception_resume_pc(env);
1324 if (update_badinstr) {
1325 set_badinstr_registers(env);
1327 if (env->hflags & MIPS_HFLAG_BMASK) {
1328 env->CP0_Cause |= (1U << CP0Ca_BD);
1329 } else {
1330 env->CP0_Cause &= ~(1U << CP0Ca_BD);
1332 env->CP0_Status |= (1 << CP0St_EXL);
1333 if (env->insn_flags & ISA_MIPS3) {
1334 env->hflags |= MIPS_HFLAG_64;
1335 if (!(env->insn_flags & ISA_MIPS64R6) ||
1336 env->CP0_Status & (1 << CP0St_KX)) {
1337 env->hflags &= ~MIPS_HFLAG_AWRAP;
1340 env->hflags |= MIPS_HFLAG_CP0;
1341 env->hflags &= ~(MIPS_HFLAG_KSU);
1343 env->hflags &= ~MIPS_HFLAG_BMASK;
1344 if (env->CP0_Status & (1 << CP0St_BEV)) {
1345 env->active_tc.PC = env->exception_base + 0x200;
1346 } else if (cause == 30 && !(env->CP0_Config3 & (1 << CP0C3_SC) &&
1347 env->CP0_Config5 & (1 << CP0C5_CV))) {
1348 /* Force KSeg1 for cache errors */
1349 env->active_tc.PC = KSEG1_BASE | (env->CP0_EBase & 0x1FFFF000);
1350 } else {
1351 env->active_tc.PC = env->CP0_EBase & ~0xfff;
1354 env->active_tc.PC += offset;
1355 set_hflags_for_handler(env);
1356 env->CP0_Cause = (env->CP0_Cause & ~(0x1f << CP0Ca_EC)) | (cause << CP0Ca_EC);
1357 break;
1358 default:
1359 abort();
1361 if (qemu_loglevel_mask(CPU_LOG_INT)
1362 && cs->exception_index != EXCP_EXT_INTERRUPT) {
1363 qemu_log("%s: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx " cause %d\n"
1364 " S %08x C %08x A " TARGET_FMT_lx " D " TARGET_FMT_lx "\n",
1365 __func__, env->active_tc.PC, env->CP0_EPC, cause,
1366 env->CP0_Status, env->CP0_Cause, env->CP0_BadVAddr,
1367 env->CP0_DEPC);
1369 #endif
1370 cs->exception_index = EXCP_NONE;
1373 bool mips_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
1375 if (interrupt_request & CPU_INTERRUPT_HARD) {
1376 MIPSCPU *cpu = MIPS_CPU(cs);
1377 CPUMIPSState *env = &cpu->env;
1379 if (cpu_mips_hw_interrupts_enabled(env) &&
1380 cpu_mips_hw_interrupts_pending(env)) {
1381 /* Raise it */
1382 cs->exception_index = EXCP_EXT_INTERRUPT;
1383 env->error_code = 0;
1384 mips_cpu_do_interrupt(cs);
1385 return true;
1388 return false;
1391 #if !defined(CONFIG_USER_ONLY)
1392 void r4k_invalidate_tlb (CPUMIPSState *env, int idx, int use_extra)
1394 MIPSCPU *cpu = mips_env_get_cpu(env);
1395 CPUState *cs;
1396 r4k_tlb_t *tlb;
1397 target_ulong addr;
1398 target_ulong end;
1399 uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
1400 target_ulong mask;
1402 tlb = &env->tlb->mmu.r4k.tlb[idx];
1403 /* The qemu TLB is flushed when the ASID changes, so no need to
1404 flush these entries again. */
1405 if (tlb->G == 0 && tlb->ASID != ASID) {
1406 return;
1409 if (use_extra && env->tlb->tlb_in_use < MIPS_TLB_MAX) {
1410 /* For tlbwr, we can shadow the discarded entry into
1411 a new (fake) TLB entry, as long as the guest can not
1412 tell that it's there. */
1413 env->tlb->mmu.r4k.tlb[env->tlb->tlb_in_use] = *tlb;
1414 env->tlb->tlb_in_use++;
1415 return;
1418 /* 1k pages are not supported. */
1419 mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
1420 if (tlb->V0) {
1421 cs = CPU(cpu);
1422 addr = tlb->VPN & ~mask;
1423 #if defined(TARGET_MIPS64)
1424 if (addr >= (0xFFFFFFFF80000000ULL & env->SEGMask)) {
1425 addr |= 0x3FFFFF0000000000ULL;
1427 #endif
1428 end = addr | (mask >> 1);
1429 while (addr < end) {
1430 tlb_flush_page(cs, addr);
1431 addr += TARGET_PAGE_SIZE;
1434 if (tlb->V1) {
1435 cs = CPU(cpu);
1436 addr = (tlb->VPN & ~mask) | ((mask >> 1) + 1);
1437 #if defined(TARGET_MIPS64)
1438 if (addr >= (0xFFFFFFFF80000000ULL & env->SEGMask)) {
1439 addr |= 0x3FFFFF0000000000ULL;
1441 #endif
1442 end = addr | mask;
1443 while (addr - 1 < end) {
1444 tlb_flush_page(cs, addr);
1445 addr += TARGET_PAGE_SIZE;
1449 #endif
1451 void QEMU_NORETURN do_raise_exception_err(CPUMIPSState *env,
1452 uint32_t exception,
1453 int error_code,
1454 uintptr_t pc)
1456 CPUState *cs = CPU(mips_env_get_cpu(env));
1458 qemu_log_mask(CPU_LOG_INT, "%s: %d %d\n",
1459 __func__, exception, error_code);
1460 cs->exception_index = exception;
1461 env->error_code = error_code;
1463 cpu_loop_exit_restore(cs, pc);
1466 static void mips_cpu_add_definition(gpointer data, gpointer user_data)
1468 ObjectClass *oc = data;
1469 CpuDefinitionInfoList **cpu_list = user_data;
1470 CpuDefinitionInfoList *entry;
1471 CpuDefinitionInfo *info;
1472 const char *typename;
1474 typename = object_class_get_name(oc);
1475 info = g_malloc0(sizeof(*info));
1476 info->name = g_strndup(typename,
1477 strlen(typename) - strlen("-" TYPE_MIPS_CPU));
1478 info->q_typename = g_strdup(typename);
1480 entry = g_malloc0(sizeof(*entry));
1481 entry->value = info;
1482 entry->next = *cpu_list;
1483 *cpu_list = entry;
1486 CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp)
1488 CpuDefinitionInfoList *cpu_list = NULL;
1489 GSList *list;
1491 list = object_class_get_list(TYPE_MIPS_CPU, false);
1492 g_slist_foreach(list, mips_cpu_add_definition, &cpu_list);
1493 g_slist_free(list);
1495 return cpu_list;