1 #if !defined (__MIPS_CPU_H__)
8 #define CPUArchState struct CPUMIPSState
11 #include "qemu-common.h"
12 #include "mips-defs.h"
13 #include "exec/cpu-defs.h"
14 #include "fpu/softfloat.h"
18 typedef struct r4k_tlb_t r4k_tlb_t
;
34 uint_fast16_t EHINV
:1;
38 #if !defined(CONFIG_USER_ONLY)
39 typedef struct CPUMIPSTLBContext CPUMIPSTLBContext
;
40 struct CPUMIPSTLBContext
{
43 int (*map_address
) (struct CPUMIPSState
*env
, hwaddr
*physical
, int *prot
, target_ulong address
, int rw
, int access_type
);
44 void (*helper_tlbwi
)(struct CPUMIPSState
*env
);
45 void (*helper_tlbwr
)(struct CPUMIPSState
*env
);
46 void (*helper_tlbp
)(struct CPUMIPSState
*env
);
47 void (*helper_tlbr
)(struct CPUMIPSState
*env
);
48 void (*helper_tlbinv
)(struct CPUMIPSState
*env
);
49 void (*helper_tlbinvf
)(struct CPUMIPSState
*env
);
52 r4k_tlb_t tlb
[MIPS_TLB_MAX
];
59 #define MSA_WRLEN (128)
61 enum CPUMIPSMSADataFormat
{
68 typedef union wr_t wr_t
;
70 int8_t b
[MSA_WRLEN
/8];
71 int16_t h
[MSA_WRLEN
/16];
72 int32_t w
[MSA_WRLEN
/32];
73 int64_t d
[MSA_WRLEN
/64];
76 typedef union fpr_t fpr_t
;
78 float64 fd
; /* ieee double precision */
79 float32 fs
[2];/* ieee single precision */
80 uint64_t d
; /* binary double fixed-point */
81 uint32_t w
[2]; /* binary single fixed-point */
82 /* FPU/MSA register mapping is not tested on big-endian hosts. */
83 wr_t wr
; /* vector data */
85 /* define FP_ENDIAN_IDX to access the same location
86 * in the fpr_t union regardless of the host endianness
88 #if defined(HOST_WORDS_BIGENDIAN)
89 # define FP_ENDIAN_IDX 1
91 # define FP_ENDIAN_IDX 0
94 typedef struct CPUMIPSFPUContext CPUMIPSFPUContext
;
95 struct CPUMIPSFPUContext
{
96 /* Floating point registers */
98 float_status fp_status
;
99 /* fpu implementation/revision register (fir) */
114 #define SET_FP_COND(num,env) do { ((env).fcr31) |= ((num) ? (1 << ((num) + 24)) : (1 << 23)); } while(0)
115 #define CLEAR_FP_COND(num,env) do { ((env).fcr31) &= ~((num) ? (1 << ((num) + 24)) : (1 << 23)); } while(0)
116 #define GET_FP_COND(env) ((((env).fcr31 >> 24) & 0xfe) | (((env).fcr31 >> 23) & 0x1))
117 #define GET_FP_CAUSE(reg) (((reg) >> 12) & 0x3f)
118 #define GET_FP_ENABLE(reg) (((reg) >> 7) & 0x1f)
119 #define GET_FP_FLAGS(reg) (((reg) >> 2) & 0x1f)
120 #define SET_FP_CAUSE(reg,v) do { (reg) = ((reg) & ~(0x3f << 12)) | ((v & 0x3f) << 12); } while(0)
121 #define SET_FP_ENABLE(reg,v) do { (reg) = ((reg) & ~(0x1f << 7)) | ((v & 0x1f) << 7); } while(0)
122 #define SET_FP_FLAGS(reg,v) do { (reg) = ((reg) & ~(0x1f << 2)) | ((v & 0x1f) << 2); } while(0)
123 #define UPDATE_FP_FLAGS(reg,v) do { (reg) |= ((v & 0x1f) << 2); } while(0)
125 #define FP_UNDERFLOW 2
126 #define FP_OVERFLOW 4
128 #define FP_INVALID 16
129 #define FP_UNIMPLEMENTED 32
132 #define NB_MMU_MODES 3
133 #define TARGET_INSN_START_EXTRA_WORDS 2
135 typedef struct CPUMIPSMVPContext CPUMIPSMVPContext
;
136 struct CPUMIPSMVPContext
{
137 int32_t CP0_MVPControl
;
138 #define CP0MVPCo_CPA 3
139 #define CP0MVPCo_STLB 2
140 #define CP0MVPCo_VPC 1
141 #define CP0MVPCo_EVP 0
142 int32_t CP0_MVPConf0
;
143 #define CP0MVPC0_M 31
144 #define CP0MVPC0_TLBS 29
145 #define CP0MVPC0_GS 28
146 #define CP0MVPC0_PCP 27
147 #define CP0MVPC0_PTLBE 16
148 #define CP0MVPC0_TCA 15
149 #define CP0MVPC0_PVPE 10
150 #define CP0MVPC0_PTC 0
151 int32_t CP0_MVPConf1
;
152 #define CP0MVPC1_CIM 31
153 #define CP0MVPC1_CIF 30
154 #define CP0MVPC1_PCX 20
155 #define CP0MVPC1_PCP2 10
156 #define CP0MVPC1_PCP1 0
159 typedef struct mips_def_t mips_def_t
;
161 #define MIPS_SHADOW_SET_MAX 16
162 #define MIPS_TC_MAX 5
163 #define MIPS_FPU_MAX 1
164 #define MIPS_DSP_ACC 4
165 #define MIPS_KSCRATCH_NUM 6
167 typedef struct TCState TCState
;
169 target_ulong gpr
[32];
171 target_ulong HI
[MIPS_DSP_ACC
];
172 target_ulong LO
[MIPS_DSP_ACC
];
173 target_ulong ACX
[MIPS_DSP_ACC
];
174 target_ulong DSPControl
;
175 int32_t CP0_TCStatus
;
176 #define CP0TCSt_TCU3 31
177 #define CP0TCSt_TCU2 30
178 #define CP0TCSt_TCU1 29
179 #define CP0TCSt_TCU0 28
180 #define CP0TCSt_TMX 27
181 #define CP0TCSt_RNST 23
182 #define CP0TCSt_TDS 21
183 #define CP0TCSt_DT 20
184 #define CP0TCSt_DA 15
186 #define CP0TCSt_TKSU 11
187 #define CP0TCSt_IXMT 10
188 #define CP0TCSt_TASID 0
190 #define CP0TCBd_CurTC 21
191 #define CP0TCBd_TBE 17
192 #define CP0TCBd_CurVPE 0
193 target_ulong CP0_TCHalt
;
194 target_ulong CP0_TCContext
;
195 target_ulong CP0_TCSchedule
;
196 target_ulong CP0_TCScheFBack
;
197 int32_t CP0_Debug_tcstatus
;
198 target_ulong CP0_UserLocal
;
203 #define MSACSR_FS_MASK (1 << MSACSR_FS)
205 #define MSACSR_NX_MASK (1 << MSACSR_NX)
207 #define MSACSR_CEF_MASK (0xffff << MSACSR_CEF)
209 #define MSACSR_RM_MASK (0x3 << MSACSR_RM)
210 #define MSACSR_MASK (MSACSR_RM_MASK | MSACSR_CEF_MASK | MSACSR_NX_MASK | \
213 float_status msa_fp_status
;
216 typedef struct CPUMIPSState CPUMIPSState
;
217 struct CPUMIPSState
{
219 CPUMIPSFPUContext active_fpu
;
222 uint32_t current_fpu
;
226 #if defined(TARGET_MIPS64)
227 # define PABITS_BASE 36
229 # define PABITS_BASE 32
231 target_ulong SEGMask
;
233 #define PAMASK_BASE ((1ULL << PABITS_BASE) - 1)
236 #define MSAIR_ProcID 8
240 /* CP0_MVP* are per MVP registers. */
242 int32_t CP0_VPEControl
;
243 #define CP0VPECo_YSI 21
244 #define CP0VPECo_GSI 20
245 #define CP0VPECo_EXCPT 16
246 #define CP0VPECo_TE 15
247 #define CP0VPECo_TargTC 0
248 int32_t CP0_VPEConf0
;
249 #define CP0VPEC0_M 31
250 #define CP0VPEC0_XTC 21
251 #define CP0VPEC0_TCS 19
252 #define CP0VPEC0_SCS 18
253 #define CP0VPEC0_DSC 17
254 #define CP0VPEC0_ICS 16
255 #define CP0VPEC0_MVP 1
256 #define CP0VPEC0_VPA 0
257 int32_t CP0_VPEConf1
;
258 #define CP0VPEC1_NCX 20
259 #define CP0VPEC1_NCP2 10
260 #define CP0VPEC1_NCP1 0
261 target_ulong CP0_YQMask
;
262 target_ulong CP0_VPESchedule
;
263 target_ulong CP0_VPEScheFBack
;
265 #define CP0VPEOpt_IWX7 15
266 #define CP0VPEOpt_IWX6 14
267 #define CP0VPEOpt_IWX5 13
268 #define CP0VPEOpt_IWX4 12
269 #define CP0VPEOpt_IWX3 11
270 #define CP0VPEOpt_IWX2 10
271 #define CP0VPEOpt_IWX1 9
272 #define CP0VPEOpt_IWX0 8
273 #define CP0VPEOpt_DWX7 7
274 #define CP0VPEOpt_DWX6 6
275 #define CP0VPEOpt_DWX5 5
276 #define CP0VPEOpt_DWX4 4
277 #define CP0VPEOpt_DWX3 3
278 #define CP0VPEOpt_DWX2 2
279 #define CP0VPEOpt_DWX1 1
280 #define CP0VPEOpt_DWX0 0
281 uint64_t CP0_EntryLo0
;
282 uint64_t CP0_EntryLo1
;
283 #if defined(TARGET_MIPS64)
284 # define CP0EnLo_RI 63
285 # define CP0EnLo_XI 62
287 # define CP0EnLo_RI 31
288 # define CP0EnLo_XI 30
290 target_ulong CP0_Context
;
291 target_ulong CP0_KScratch
[MIPS_KSCRATCH_NUM
];
292 int32_t CP0_PageMask
;
293 int32_t CP0_PageGrain_rw_bitmask
;
294 int32_t CP0_PageGrain
;
297 #define CP0PG_ELPA 29
300 int32_t CP0_SRSConf0_rw_bitmask
;
301 int32_t CP0_SRSConf0
;
302 #define CP0SRSC0_M 31
303 #define CP0SRSC0_SRS3 20
304 #define CP0SRSC0_SRS2 10
305 #define CP0SRSC0_SRS1 0
306 int32_t CP0_SRSConf1_rw_bitmask
;
307 int32_t CP0_SRSConf1
;
308 #define CP0SRSC1_M 31
309 #define CP0SRSC1_SRS6 20
310 #define CP0SRSC1_SRS5 10
311 #define CP0SRSC1_SRS4 0
312 int32_t CP0_SRSConf2_rw_bitmask
;
313 int32_t CP0_SRSConf2
;
314 #define CP0SRSC2_M 31
315 #define CP0SRSC2_SRS9 20
316 #define CP0SRSC2_SRS8 10
317 #define CP0SRSC2_SRS7 0
318 int32_t CP0_SRSConf3_rw_bitmask
;
319 int32_t CP0_SRSConf3
;
320 #define CP0SRSC3_M 31
321 #define CP0SRSC3_SRS12 20
322 #define CP0SRSC3_SRS11 10
323 #define CP0SRSC3_SRS10 0
324 int32_t CP0_SRSConf4_rw_bitmask
;
325 int32_t CP0_SRSConf4
;
326 #define CP0SRSC4_SRS15 20
327 #define CP0SRSC4_SRS14 10
328 #define CP0SRSC4_SRS13 0
330 target_ulong CP0_BadVAddr
;
331 uint32_t CP0_BadInstr
;
332 uint32_t CP0_BadInstrP
;
334 target_ulong CP0_EntryHi
;
335 #define CP0EnHi_EHINV 10
360 #define CP0IntCtl_IPTI 29
361 #define CP0IntCtl_IPPCI 26
362 #define CP0IntCtl_VS 5
364 #define CP0SRSCtl_HSS 26
365 #define CP0SRSCtl_EICSS 18
366 #define CP0SRSCtl_ESS 12
367 #define CP0SRSCtl_PSS 6
368 #define CP0SRSCtl_CSS 0
370 #define CP0SRSMap_SSV7 28
371 #define CP0SRSMap_SSV6 24
372 #define CP0SRSMap_SSV5 20
373 #define CP0SRSMap_SSV4 16
374 #define CP0SRSMap_SSV3 12
375 #define CP0SRSMap_SSV2 8
376 #define CP0SRSMap_SSV1 4
377 #define CP0SRSMap_SSV0 0
387 #define CP0Ca_IP_mask 0x0000FF00
389 target_ulong CP0_EPC
;
434 #define CP0C3_CMCGR 29
435 #define CP0C3_MSAP 28
438 #define CP0C3_IPLW 21
439 #define CP0C3_MMAR 18
441 #define CP0C3_ISA_ON_EXC 16
443 #define CP0C3_ULRI 13
445 #define CP0C3_DSP2P 11
446 #define CP0C3_DSPP 10
456 int32_t CP0_Config4_rw_bitmask
;
459 #define CP0C4_KScrExist 16
460 #define CP0C4_MMUExtDef 14
461 #define CP0C4_FTLBPageSize 8
462 #define CP0C4_FTLBWays 4
463 #define CP0C4_FTLBSets 0
464 #define CP0C4_MMUSizeExt 0
466 int32_t CP0_Config5_rw_bitmask
;
471 #define CP0C5_MSAEn 27
479 #define CP0C5_NFExists 0
482 /* XXX: Maybe make LLAddr per-TC? */
485 target_ulong llnewval
;
487 uint64_t CP0_LLAddr_rw_bitmask
;
488 int CP0_LLAddr_shift
;
489 target_ulong CP0_WatchLo
[8];
490 int32_t CP0_WatchHi
[8];
491 target_ulong CP0_XContext
;
492 int32_t CP0_Framemask
;
496 #define CP0DB_LSNM 28
497 #define CP0DB_Doze 27
498 #define CP0DB_Halt 26
500 #define CP0DB_IBEP 24
501 #define CP0DB_DBEP 21
502 #define CP0DB_IEXI 20
512 target_ulong CP0_DEPC
;
513 int32_t CP0_Performance0
;
518 target_ulong CP0_ErrorEPC
;
520 /* We waste some space so we can handle shadow registers like TCs. */
521 TCState tcs
[MIPS_SHADOW_SET_MAX
];
522 CPUMIPSFPUContext fpus
[MIPS_FPU_MAX
];
525 #define EXCP_TLB_NOMATCH 0x1
526 #define EXCP_INST_NOTAVAIL 0x2 /* No valid instruction word for BadInstr */
527 uint32_t hflags
; /* CPU State */
528 /* TMASK defines different execution modes */
529 #define MIPS_HFLAG_TMASK 0x75807FF
530 #define MIPS_HFLAG_MODE 0x00007 /* execution modes */
531 /* The KSU flags must be the lowest bits in hflags. The flag order
532 must be the same as defined for CP0 Status. This allows to use
533 the bits as the value of mmu_idx. */
534 #define MIPS_HFLAG_KSU 0x00003 /* kernel/supervisor/user mode mask */
535 #define MIPS_HFLAG_UM 0x00002 /* user mode flag */
536 #define MIPS_HFLAG_SM 0x00001 /* supervisor mode flag */
537 #define MIPS_HFLAG_KM 0x00000 /* kernel mode flag */
538 #define MIPS_HFLAG_DM 0x00004 /* Debug mode */
539 #define MIPS_HFLAG_64 0x00008 /* 64-bit instructions enabled */
540 #define MIPS_HFLAG_CP0 0x00010 /* CP0 enabled */
541 #define MIPS_HFLAG_FPU 0x00020 /* FPU enabled */
542 #define MIPS_HFLAG_F64 0x00040 /* 64-bit FPU enabled */
543 /* True if the MIPS IV COP1X instructions can be used. This also
544 controls the non-COP1X instructions RECIP.S, RECIP.D, RSQRT.S
546 #define MIPS_HFLAG_COP1X 0x00080 /* COP1X instructions enabled */
547 #define MIPS_HFLAG_RE 0x00100 /* Reversed endianness */
548 #define MIPS_HFLAG_AWRAP 0x00200 /* 32-bit compatibility address wrapping */
549 #define MIPS_HFLAG_M16 0x00400 /* MIPS16 mode flag */
550 #define MIPS_HFLAG_M16_SHIFT 10
551 /* If translation is interrupted between the branch instruction and
552 * the delay slot, record what type of branch it is so that we can
553 * resume translation properly. It might be possible to reduce
554 * this from three bits to two. */
555 #define MIPS_HFLAG_BMASK_BASE 0x803800
556 #define MIPS_HFLAG_B 0x00800 /* Unconditional branch */
557 #define MIPS_HFLAG_BC 0x01000 /* Conditional branch */
558 #define MIPS_HFLAG_BL 0x01800 /* Likely branch */
559 #define MIPS_HFLAG_BR 0x02000 /* branch to register (can't link TB) */
560 /* Extra flags about the current pending branch. */
561 #define MIPS_HFLAG_BMASK_EXT 0x7C000
562 #define MIPS_HFLAG_B16 0x04000 /* branch instruction was 16 bits */
563 #define MIPS_HFLAG_BDS16 0x08000 /* branch requires 16-bit delay slot */
564 #define MIPS_HFLAG_BDS32 0x10000 /* branch requires 32-bit delay slot */
565 #define MIPS_HFLAG_BDS_STRICT 0x20000 /* Strict delay slot size */
566 #define MIPS_HFLAG_BX 0x40000 /* branch exchanges execution mode */
567 #define MIPS_HFLAG_BMASK (MIPS_HFLAG_BMASK_BASE | MIPS_HFLAG_BMASK_EXT)
568 /* MIPS DSP resources access. */
569 #define MIPS_HFLAG_DSP 0x080000 /* Enable access to MIPS DSP resources. */
570 #define MIPS_HFLAG_DSPR2 0x100000 /* Enable access to MIPS DSPR2 resources. */
571 /* Extra flag about HWREna register. */
572 #define MIPS_HFLAG_HWRENA_ULR 0x200000 /* ULR bit from HWREna is set. */
573 #define MIPS_HFLAG_SBRI 0x400000 /* R6 SDBBP causes RI excpt. in user mode */
574 #define MIPS_HFLAG_FBNSLOT 0x800000 /* Forbidden slot */
575 #define MIPS_HFLAG_MSA 0x1000000
576 #define MIPS_HFLAG_FRE 0x2000000 /* FRE enabled */
577 #define MIPS_HFLAG_ELPA 0x4000000
578 target_ulong btarget
; /* Jump / branch target */
579 target_ulong bcond
; /* Branch condition (if needed) */
581 int SYNCI_Step
; /* Address step size for SYNCI */
582 int CCRes
; /* Cycle count resolution/divisor */
583 uint32_t CP0_Status_rw_bitmask
; /* Read/write bits in CP0_Status */
584 uint32_t CP0_TCStatus_rw_bitmask
; /* Read/write bits in CP0_TCStatus */
585 int insn_flags
; /* Supported instruction set */
589 /* Fields from here on are preserved across CPU reset. */
590 CPUMIPSMVPContext
*mvp
;
591 #if !defined(CONFIG_USER_ONLY)
592 CPUMIPSTLBContext
*tlb
;
595 const mips_def_t
*cpu_model
;
597 QEMUTimer
*timer
; /* Internal timer */
602 #if !defined(CONFIG_USER_ONLY)
603 int no_mmu_map_address (CPUMIPSState
*env
, hwaddr
*physical
, int *prot
,
604 target_ulong address
, int rw
, int access_type
);
605 int fixed_mmu_map_address (CPUMIPSState
*env
, hwaddr
*physical
, int *prot
,
606 target_ulong address
, int rw
, int access_type
);
607 int r4k_map_address (CPUMIPSState
*env
, hwaddr
*physical
, int *prot
,
608 target_ulong address
, int rw
, int access_type
);
609 void r4k_helper_tlbwi(CPUMIPSState
*env
);
610 void r4k_helper_tlbwr(CPUMIPSState
*env
);
611 void r4k_helper_tlbp(CPUMIPSState
*env
);
612 void r4k_helper_tlbr(CPUMIPSState
*env
);
613 void r4k_helper_tlbinv(CPUMIPSState
*env
);
614 void r4k_helper_tlbinvf(CPUMIPSState
*env
);
616 void mips_cpu_unassigned_access(CPUState
*cpu
, hwaddr addr
,
617 bool is_write
, bool is_exec
, int unused
,
621 void mips_cpu_list (FILE *f
, fprintf_function cpu_fprintf
);
623 #define cpu_exec cpu_mips_exec
624 #define cpu_signal_handler cpu_mips_signal_handler
625 #define cpu_list mips_cpu_list
627 extern void cpu_wrdsp(uint32_t rs
, uint32_t mask_num
, CPUMIPSState
*env
);
628 extern uint32_t cpu_rddsp(uint32_t mask_num
, CPUMIPSState
*env
);
630 /* MMU modes definitions. We carefully match the indices with our
632 #define MMU_MODE0_SUFFIX _kernel
633 #define MMU_MODE1_SUFFIX _super
634 #define MMU_MODE2_SUFFIX _user
635 #define MMU_USER_IDX 2
636 static inline int cpu_mmu_index (CPUMIPSState
*env
, bool ifetch
)
638 return env
->hflags
& MIPS_HFLAG_KSU
;
641 static inline bool cpu_mips_hw_interrupts_enabled(CPUMIPSState
*env
)
643 return (env
->CP0_Status
& (1 << CP0St_IE
)) &&
644 !(env
->CP0_Status
& (1 << CP0St_EXL
)) &&
645 !(env
->CP0_Status
& (1 << CP0St_ERL
)) &&
646 !(env
->hflags
& MIPS_HFLAG_DM
) &&
647 /* Note that the TCStatus IXMT field is initialized to zero,
648 and only MT capable cores can set it to one. So we don't
649 need to check for MT capabilities here. */
650 !(env
->active_tc
.CP0_TCStatus
& (1 << CP0TCSt_IXMT
));
653 /* Check if there is pending and not masked out interrupt */
654 static inline bool cpu_mips_hw_interrupts_pending(CPUMIPSState
*env
)
660 pending
= env
->CP0_Cause
& CP0Ca_IP_mask
;
661 status
= env
->CP0_Status
& CP0Ca_IP_mask
;
663 if (env
->CP0_Config3
& (1 << CP0C3_VEIC
)) {
664 /* A MIPS configured with a vectorizing external interrupt controller
665 will feed a vector into the Cause pending lines. The core treats
666 the status lines as a vector level, not as indiviual masks. */
667 r
= pending
> status
;
669 /* A MIPS configured with compatibility or VInt (Vectored Interrupts)
670 treats the pending lines as individual interrupt lines, the status
671 lines are individual masks. */
672 r
= (pending
& status
) != 0;
677 #include "exec/cpu-all.h"
679 /* Memory access type :
680 * may be needed for precise access rights control and precise exceptions.
683 /* 1 bit to define user level / supervisor access */
686 /* 1 bit to indicate direction */
688 /* Type of instruction that generated the access */
689 ACCESS_CODE
= 0x10, /* Code fetch access */
690 ACCESS_INT
= 0x20, /* Integer load/store access */
691 ACCESS_FLOAT
= 0x30, /* floating point load/store access */
705 EXCP_EXT_INTERRUPT
, /* 8 */
721 EXCP_DWATCH
, /* 24 */
736 EXCP_LAST
= EXCP_TLBRI
,
738 /* Dummy exception for conditional stores. */
739 #define EXCP_SC 0x100
742 * This is an interrnally generated WAKE request line.
743 * It is driven by the CPU itself. Raised when the MT
744 * block wants to wake a VPE from an inactive state and
745 * cleared when VPE goes from active to inactive.
747 #define CPU_INTERRUPT_WAKE CPU_INTERRUPT_TGT_INT_0
749 int cpu_mips_exec(CPUState
*cpu
);
750 void mips_tcg_init(void);
751 MIPSCPU
*cpu_mips_init(const char *cpu_model
);
752 int cpu_mips_signal_handler(int host_signum
, void *pinfo
, void *puc
);
754 #define cpu_init(cpu_model) CPU(cpu_mips_init(cpu_model))
756 /* TODO QOM'ify CPU reset and remove */
757 void cpu_state_reset(CPUMIPSState
*s
);
760 uint32_t cpu_mips_get_random (CPUMIPSState
*env
);
761 uint32_t cpu_mips_get_count (CPUMIPSState
*env
);
762 void cpu_mips_store_count (CPUMIPSState
*env
, uint32_t value
);
763 void cpu_mips_store_compare (CPUMIPSState
*env
, uint32_t value
);
764 void cpu_mips_start_count(CPUMIPSState
*env
);
765 void cpu_mips_stop_count(CPUMIPSState
*env
);
768 void cpu_mips_soft_irq(CPUMIPSState
*env
, int irq
, int level
);
771 int mips_cpu_handle_mmu_fault(CPUState
*cpu
, vaddr address
, int rw
,
773 #if !defined(CONFIG_USER_ONLY)
774 void r4k_invalidate_tlb (CPUMIPSState
*env
, int idx
, int use_extra
);
775 hwaddr
cpu_mips_translate_address (CPUMIPSState
*env
, target_ulong address
,
778 target_ulong
exception_resume_pc (CPUMIPSState
*env
);
781 extern unsigned int ieee_rm
[];
782 int ieee_ex_to_mips(int xcpt
);
784 static inline void restore_rounding_mode(CPUMIPSState
*env
)
786 set_float_rounding_mode(ieee_rm
[env
->active_fpu
.fcr31
& 3],
787 &env
->active_fpu
.fp_status
);
790 static inline void restore_flush_mode(CPUMIPSState
*env
)
792 set_flush_to_zero((env
->active_fpu
.fcr31
& (1 << 24)) != 0,
793 &env
->active_fpu
.fp_status
);
796 static inline void restore_fp_status(CPUMIPSState
*env
)
798 restore_rounding_mode(env
);
799 restore_flush_mode(env
);
802 static inline void restore_msa_fp_status(CPUMIPSState
*env
)
804 float_status
*status
= &env
->active_tc
.msa_fp_status
;
805 int rounding_mode
= (env
->active_tc
.msacsr
& MSACSR_RM_MASK
) >> MSACSR_RM
;
806 bool flush_to_zero
= (env
->active_tc
.msacsr
& MSACSR_FS_MASK
) != 0;
808 set_float_rounding_mode(ieee_rm
[rounding_mode
], status
);
809 set_flush_to_zero(flush_to_zero
, status
);
810 set_flush_inputs_to_zero(flush_to_zero
, status
);
813 static inline void restore_pamask(CPUMIPSState
*env
)
815 if (env
->hflags
& MIPS_HFLAG_ELPA
) {
816 env
->PAMask
= (1ULL << env
->PABITS
) - 1;
818 env
->PAMask
= PAMASK_BASE
;
822 static inline void cpu_get_tb_cpu_state(CPUMIPSState
*env
, target_ulong
*pc
,
823 target_ulong
*cs_base
, int *flags
)
825 *pc
= env
->active_tc
.PC
;
827 *flags
= env
->hflags
& (MIPS_HFLAG_TMASK
| MIPS_HFLAG_BMASK
|
828 MIPS_HFLAG_HWRENA_ULR
);
831 static inline int mips_vpe_active(CPUMIPSState
*env
)
835 /* Check that the VPE is enabled. */
836 if (!(env
->mvp
->CP0_MVPControl
& (1 << CP0MVPCo_EVP
))) {
839 /* Check that the VPE is activated. */
840 if (!(env
->CP0_VPEConf0
& (1 << CP0VPEC0_VPA
))) {
844 /* Now verify that there are active thread contexts in the VPE.
846 This assumes the CPU model will internally reschedule threads
847 if the active one goes to sleep. If there are no threads available
848 the active one will be in a sleeping state, and we can turn off
850 if (!(env
->active_tc
.CP0_TCStatus
& (1 << CP0TCSt_A
))) {
851 /* TC is not activated. */
854 if (env
->active_tc
.CP0_TCHalt
& 1) {
855 /* TC is in halt state. */
862 #include "exec/exec-all.h"
864 static inline void compute_hflags(CPUMIPSState
*env
)
866 env
->hflags
&= ~(MIPS_HFLAG_COP1X
| MIPS_HFLAG_64
| MIPS_HFLAG_CP0
|
867 MIPS_HFLAG_F64
| MIPS_HFLAG_FPU
| MIPS_HFLAG_KSU
|
868 MIPS_HFLAG_AWRAP
| MIPS_HFLAG_DSP
| MIPS_HFLAG_DSPR2
|
869 MIPS_HFLAG_SBRI
| MIPS_HFLAG_MSA
| MIPS_HFLAG_FRE
|
871 if (!(env
->CP0_Status
& (1 << CP0St_EXL
)) &&
872 !(env
->CP0_Status
& (1 << CP0St_ERL
)) &&
873 !(env
->hflags
& MIPS_HFLAG_DM
)) {
874 env
->hflags
|= (env
->CP0_Status
>> CP0St_KSU
) & MIPS_HFLAG_KSU
;
876 #if defined(TARGET_MIPS64)
877 if ((env
->insn_flags
& ISA_MIPS3
) &&
878 (((env
->hflags
& MIPS_HFLAG_KSU
) != MIPS_HFLAG_UM
) ||
879 (env
->CP0_Status
& (1 << CP0St_PX
)) ||
880 (env
->CP0_Status
& (1 << CP0St_UX
)))) {
881 env
->hflags
|= MIPS_HFLAG_64
;
884 if (!(env
->insn_flags
& ISA_MIPS3
)) {
885 env
->hflags
|= MIPS_HFLAG_AWRAP
;
886 } else if (((env
->hflags
& MIPS_HFLAG_KSU
) == MIPS_HFLAG_UM
) &&
887 !(env
->CP0_Status
& (1 << CP0St_UX
))) {
888 env
->hflags
|= MIPS_HFLAG_AWRAP
;
889 } else if (env
->insn_flags
& ISA_MIPS64R6
) {
890 /* Address wrapping for Supervisor and Kernel is specified in R6 */
891 if ((((env
->hflags
& MIPS_HFLAG_KSU
) == MIPS_HFLAG_SM
) &&
892 !(env
->CP0_Status
& (1 << CP0St_SX
))) ||
893 (((env
->hflags
& MIPS_HFLAG_KSU
) == MIPS_HFLAG_KM
) &&
894 !(env
->CP0_Status
& (1 << CP0St_KX
)))) {
895 env
->hflags
|= MIPS_HFLAG_AWRAP
;
899 if (((env
->CP0_Status
& (1 << CP0St_CU0
)) &&
900 !(env
->insn_flags
& ISA_MIPS32R6
)) ||
901 !(env
->hflags
& MIPS_HFLAG_KSU
)) {
902 env
->hflags
|= MIPS_HFLAG_CP0
;
904 if (env
->CP0_Status
& (1 << CP0St_CU1
)) {
905 env
->hflags
|= MIPS_HFLAG_FPU
;
907 if (env
->CP0_Status
& (1 << CP0St_FR
)) {
908 env
->hflags
|= MIPS_HFLAG_F64
;
910 if (((env
->hflags
& MIPS_HFLAG_KSU
) != MIPS_HFLAG_KM
) &&
911 (env
->CP0_Config5
& (1 << CP0C5_SBRI
))) {
912 env
->hflags
|= MIPS_HFLAG_SBRI
;
914 if (env
->insn_flags
& ASE_DSPR2
) {
915 /* Enables access MIPS DSP resources, now our cpu is DSP ASER2,
916 so enable to access DSPR2 resources. */
917 if (env
->CP0_Status
& (1 << CP0St_MX
)) {
918 env
->hflags
|= MIPS_HFLAG_DSP
| MIPS_HFLAG_DSPR2
;
921 } else if (env
->insn_flags
& ASE_DSP
) {
922 /* Enables access MIPS DSP resources, now our cpu is DSP ASE,
923 so enable to access DSP resources. */
924 if (env
->CP0_Status
& (1 << CP0St_MX
)) {
925 env
->hflags
|= MIPS_HFLAG_DSP
;
929 if (env
->insn_flags
& ISA_MIPS32R2
) {
930 if (env
->active_fpu
.fcr0
& (1 << FCR0_F64
)) {
931 env
->hflags
|= MIPS_HFLAG_COP1X
;
933 } else if (env
->insn_flags
& ISA_MIPS32
) {
934 if (env
->hflags
& MIPS_HFLAG_64
) {
935 env
->hflags
|= MIPS_HFLAG_COP1X
;
937 } else if (env
->insn_flags
& ISA_MIPS4
) {
938 /* All supported MIPS IV CPUs use the XX (CU3) to enable
939 and disable the MIPS IV extensions to the MIPS III ISA.
940 Some other MIPS IV CPUs ignore the bit, so the check here
941 would be too restrictive for them. */
942 if (env
->CP0_Status
& (1U << CP0St_CU3
)) {
943 env
->hflags
|= MIPS_HFLAG_COP1X
;
946 if (env
->insn_flags
& ASE_MSA
) {
947 if (env
->CP0_Config5
& (1 << CP0C5_MSAEn
)) {
948 env
->hflags
|= MIPS_HFLAG_MSA
;
951 if (env
->active_fpu
.fcr0
& (1 << FCR0_FREP
)) {
952 if (env
->CP0_Config5
& (1 << CP0C5_FRE
)) {
953 env
->hflags
|= MIPS_HFLAG_FRE
;
956 if (env
->CP0_Config3
& (1 << CP0C3_LPA
)) {
957 if (env
->CP0_PageGrain
& (1 << CP0PG_ELPA
)) {
958 env
->hflags
|= MIPS_HFLAG_ELPA
;
963 #ifndef CONFIG_USER_ONLY
964 static inline void cpu_mips_tlb_flush(CPUMIPSState
*env
, int flush_global
)
966 MIPSCPU
*cpu
= mips_env_get_cpu(env
);
968 /* Flush qemu's TLB and discard all shadowed entries. */
969 tlb_flush(CPU(cpu
), flush_global
);
970 env
->tlb
->tlb_in_use
= env
->tlb
->nb_tlb
;
973 /* Called for updates to CP0_Status. */
974 static inline void sync_c0_status(CPUMIPSState
*env
, CPUMIPSState
*cpu
, int tc
)
976 int32_t tcstatus
, *tcst
;
977 uint32_t v
= cpu
->CP0_Status
;
978 uint32_t cu
, mx
, asid
, ksu
;
979 uint32_t mask
= ((1 << CP0TCSt_TCU3
)
980 | (1 << CP0TCSt_TCU2
)
981 | (1 << CP0TCSt_TCU1
)
982 | (1 << CP0TCSt_TCU0
)
984 | (3 << CP0TCSt_TKSU
)
985 | (0xff << CP0TCSt_TASID
));
987 cu
= (v
>> CP0St_CU0
) & 0xf;
988 mx
= (v
>> CP0St_MX
) & 0x1;
989 ksu
= (v
>> CP0St_KSU
) & 0x3;
990 asid
= env
->CP0_EntryHi
& 0xff;
992 tcstatus
= cu
<< CP0TCSt_TCU0
;
993 tcstatus
|= mx
<< CP0TCSt_TMX
;
994 tcstatus
|= ksu
<< CP0TCSt_TKSU
;
997 if (tc
== cpu
->current_tc
) {
998 tcst
= &cpu
->active_tc
.CP0_TCStatus
;
1000 tcst
= &cpu
->tcs
[tc
].CP0_TCStatus
;
1005 compute_hflags(cpu
);
1008 static inline void cpu_mips_store_status(CPUMIPSState
*env
, target_ulong val
)
1010 uint32_t mask
= env
->CP0_Status_rw_bitmask
;
1011 target_ulong old
= env
->CP0_Status
;
1013 if (env
->insn_flags
& ISA_MIPS32R6
) {
1014 bool has_supervisor
= extract32(mask
, CP0St_KSU
, 2) == 0x3;
1015 #if defined(TARGET_MIPS64)
1016 uint32_t ksux
= (1 << CP0St_KX
) & val
;
1017 ksux
|= (ksux
>> 1) & val
; /* KX = 0 forces SX to be 0 */
1018 ksux
|= (ksux
>> 1) & val
; /* SX = 0 forces UX to be 0 */
1019 val
= (val
& ~(7 << CP0St_UX
)) | ksux
;
1021 if (has_supervisor
&& extract32(val
, CP0St_KSU
, 2) == 0x3) {
1022 mask
&= ~(3 << CP0St_KSU
);
1024 mask
&= ~(((1 << CP0St_SR
) | (1 << CP0St_NMI
)) & val
);
1027 env
->CP0_Status
= (old
& ~mask
) | (val
& mask
);
1028 #if defined(TARGET_MIPS64)
1029 if ((env
->CP0_Status
^ old
) & (old
& (7 << CP0St_UX
))) {
1030 /* Access to at least one of the 64-bit segments has been disabled */
1031 cpu_mips_tlb_flush(env
, 1);
1034 if (env
->CP0_Config3
& (1 << CP0C3_MT
)) {
1035 sync_c0_status(env
, env
, env
->current_tc
);
1037 compute_hflags(env
);
1041 static inline void cpu_mips_store_cause(CPUMIPSState
*env
, target_ulong val
)
1043 uint32_t mask
= 0x00C00300;
1044 uint32_t old
= env
->CP0_Cause
;
1047 if (env
->insn_flags
& ISA_MIPS32R2
) {
1048 mask
|= 1 << CP0Ca_DC
;
1050 if (env
->insn_flags
& ISA_MIPS32R6
) {
1051 mask
&= ~((1 << CP0Ca_WP
) & val
);
1054 env
->CP0_Cause
= (env
->CP0_Cause
& ~mask
) | (val
& mask
);
1056 if ((old
^ env
->CP0_Cause
) & (1 << CP0Ca_DC
)) {
1057 if (env
->CP0_Cause
& (1 << CP0Ca_DC
)) {
1058 cpu_mips_stop_count(env
);
1060 cpu_mips_start_count(env
);
1064 /* Set/reset software interrupts */
1065 for (i
= 0 ; i
< 2 ; i
++) {
1066 if ((old
^ env
->CP0_Cause
) & (1 << (CP0Ca_IP
+ i
))) {
1067 cpu_mips_soft_irq(env
, i
, env
->CP0_Cause
& (1 << (CP0Ca_IP
+ i
)));
1073 static inline void QEMU_NORETURN
do_raise_exception_err(CPUMIPSState
*env
,
1078 CPUState
*cs
= CPU(mips_env_get_cpu(env
));
1080 if (exception
< EXCP_SC
) {
1081 qemu_log_mask(CPU_LOG_INT
, "%s: %d %d\n",
1082 __func__
, exception
, error_code
);
1084 cs
->exception_index
= exception
;
1085 env
->error_code
= error_code
;
1087 cpu_loop_exit_restore(cs
, pc
);
1090 static inline void QEMU_NORETURN
do_raise_exception(CPUMIPSState
*env
,
1094 do_raise_exception_err(env
, exception
, 0, pc
);
1097 #endif /* !defined (__MIPS_CPU_H__) */