1 /* mips internal definitions and helpers
3 * This work is licensed under the terms of the GNU GPL, version 2 or later.
4 * See the COPYING file in the top-level directory.
7 #ifndef MIPS_INTERNAL_H
8 #define MIPS_INTERNAL_H
11 /* MMU types, the first four entries have the same layout as the
31 int32_t CP0_Config4_rw_bitmask
;
33 int32_t CP0_Config5_rw_bitmask
;
36 target_ulong CP0_LLAddr_rw_bitmask
;
40 int32_t CP0_Status_rw_bitmask
;
41 int32_t CP0_TCStatus_rw_bitmask
;
44 int32_t CP1_fcr31_rw_bitmask
;
49 int32_t CP0_SRSConf0_rw_bitmask
;
51 int32_t CP0_SRSConf1_rw_bitmask
;
53 int32_t CP0_SRSConf2_rw_bitmask
;
55 int32_t CP0_SRSConf3_rw_bitmask
;
57 int32_t CP0_SRSConf4_rw_bitmask
;
59 int32_t CP0_PageGrain_rw_bitmask
;
60 int32_t CP0_PageGrain
;
61 target_ulong CP0_EBaseWG_rw_bitmask
;
63 enum mips_mmu_types mmu_type
;
67 extern const struct mips_def_t mips_defs
[];
68 extern const int mips_defs_number
;
70 enum CPUMIPSMSADataFormat
{
77 void mips_cpu_do_interrupt(CPUState
*cpu
);
78 bool mips_cpu_exec_interrupt(CPUState
*cpu
, int int_req
);
79 void mips_cpu_dump_state(CPUState
*cpu
, FILE *f
, fprintf_function cpu_fprintf
,
81 hwaddr
mips_cpu_get_phys_page_debug(CPUState
*cpu
, vaddr addr
);
82 int mips_cpu_gdb_read_register(CPUState
*cpu
, uint8_t *buf
, int reg
);
83 int mips_cpu_gdb_write_register(CPUState
*cpu
, uint8_t *buf
, int reg
);
84 void mips_cpu_do_unaligned_access(CPUState
*cpu
, vaddr addr
,
85 MMUAccessType access_type
,
86 int mmu_idx
, uintptr_t retaddr
);
88 #if !defined(CONFIG_USER_ONLY)
90 typedef struct r4k_tlb_t r4k_tlb_t
;
106 unsigned int EHINV
:1;
110 struct CPUMIPSTLBContext
{
113 int (*map_address
)(struct CPUMIPSState
*env
, hwaddr
*physical
, int *prot
,
114 target_ulong address
, int rw
, int access_type
);
115 void (*helper_tlbwi
)(struct CPUMIPSState
*env
);
116 void (*helper_tlbwr
)(struct CPUMIPSState
*env
);
117 void (*helper_tlbp
)(struct CPUMIPSState
*env
);
118 void (*helper_tlbr
)(struct CPUMIPSState
*env
);
119 void (*helper_tlbinv
)(struct CPUMIPSState
*env
);
120 void (*helper_tlbinvf
)(struct CPUMIPSState
*env
);
123 r4k_tlb_t tlb
[MIPS_TLB_MAX
];
128 int no_mmu_map_address(CPUMIPSState
*env
, hwaddr
*physical
, int *prot
,
129 target_ulong address
, int rw
, int access_type
);
130 int fixed_mmu_map_address(CPUMIPSState
*env
, hwaddr
*physical
, int *prot
,
131 target_ulong address
, int rw
, int access_type
);
132 int r4k_map_address(CPUMIPSState
*env
, hwaddr
*physical
, int *prot
,
133 target_ulong address
, int rw
, int access_type
);
134 void r4k_helper_tlbwi(CPUMIPSState
*env
);
135 void r4k_helper_tlbwr(CPUMIPSState
*env
);
136 void r4k_helper_tlbp(CPUMIPSState
*env
);
137 void r4k_helper_tlbr(CPUMIPSState
*env
);
138 void r4k_helper_tlbinv(CPUMIPSState
*env
);
139 void r4k_helper_tlbinvf(CPUMIPSState
*env
);
140 void r4k_invalidate_tlb(CPUMIPSState
*env
, int idx
, int use_extra
);
142 void mips_cpu_unassigned_access(CPUState
*cpu
, hwaddr addr
,
143 bool is_write
, bool is_exec
, int unused
,
145 hwaddr
cpu_mips_translate_address(CPUMIPSState
*env
, target_ulong address
,
149 #define cpu_signal_handler cpu_mips_signal_handler
151 #ifndef CONFIG_USER_ONLY
152 extern const struct VMStateDescription vmstate_mips_cpu
;
155 static inline bool cpu_mips_hw_interrupts_enabled(CPUMIPSState
*env
)
157 return (env
->CP0_Status
& (1 << CP0St_IE
)) &&
158 !(env
->CP0_Status
& (1 << CP0St_EXL
)) &&
159 !(env
->CP0_Status
& (1 << CP0St_ERL
)) &&
160 !(env
->hflags
& MIPS_HFLAG_DM
) &&
161 /* Note that the TCStatus IXMT field is initialized to zero,
162 and only MT capable cores can set it to one. So we don't
163 need to check for MT capabilities here. */
164 !(env
->active_tc
.CP0_TCStatus
& (1 << CP0TCSt_IXMT
));
167 /* Check if there is pending and not masked out interrupt */
168 static inline bool cpu_mips_hw_interrupts_pending(CPUMIPSState
*env
)
174 pending
= env
->CP0_Cause
& CP0Ca_IP_mask
;
175 status
= env
->CP0_Status
& CP0Ca_IP_mask
;
177 if (env
->CP0_Config3
& (1 << CP0C3_VEIC
)) {
178 /* A MIPS configured with a vectorizing external interrupt controller
179 will feed a vector into the Cause pending lines. The core treats
180 the status lines as a vector level, not as indiviual masks. */
181 r
= pending
> status
;
183 /* A MIPS configured with compatibility or VInt (Vectored Interrupts)
184 treats the pending lines as individual interrupt lines, the status
185 lines are individual masks. */
186 r
= (pending
& status
) != 0;
191 void mips_tcg_init(void);
193 /* TODO QOM'ify CPU reset and remove */
194 void cpu_state_reset(CPUMIPSState
*s
);
195 void cpu_mips_realize_env(CPUMIPSState
*env
);
198 uint32_t cpu_mips_get_random(CPUMIPSState
*env
);
199 uint32_t cpu_mips_get_count(CPUMIPSState
*env
);
200 void cpu_mips_store_count(CPUMIPSState
*env
, uint32_t value
);
201 void cpu_mips_store_compare(CPUMIPSState
*env
, uint32_t value
);
202 void cpu_mips_start_count(CPUMIPSState
*env
);
203 void cpu_mips_stop_count(CPUMIPSState
*env
);
206 int mips_cpu_handle_mmu_fault(CPUState
*cpu
, vaddr address
, int size
, int rw
,
210 uint32_t float_class_s(uint32_t arg
, float_status
*fst
);
211 uint64_t float_class_d(uint64_t arg
, float_status
*fst
);
213 extern unsigned int ieee_rm
[];
214 int ieee_ex_to_mips(int xcpt
);
215 void update_pagemask(CPUMIPSState
*env
, target_ulong arg1
, int32_t *pagemask
);
217 static inline void restore_rounding_mode(CPUMIPSState
*env
)
219 set_float_rounding_mode(ieee_rm
[env
->active_fpu
.fcr31
& 3],
220 &env
->active_fpu
.fp_status
);
223 static inline void restore_flush_mode(CPUMIPSState
*env
)
225 set_flush_to_zero((env
->active_fpu
.fcr31
& (1 << FCR31_FS
)) != 0,
226 &env
->active_fpu
.fp_status
);
229 static inline void restore_fp_status(CPUMIPSState
*env
)
231 restore_rounding_mode(env
);
232 restore_flush_mode(env
);
233 restore_snan_bit_mode(env
);
236 static inline void restore_msa_fp_status(CPUMIPSState
*env
)
238 float_status
*status
= &env
->active_tc
.msa_fp_status
;
239 int rounding_mode
= (env
->active_tc
.msacsr
& MSACSR_RM_MASK
) >> MSACSR_RM
;
240 bool flush_to_zero
= (env
->active_tc
.msacsr
& MSACSR_FS_MASK
) != 0;
242 set_float_rounding_mode(ieee_rm
[rounding_mode
], status
);
243 set_flush_to_zero(flush_to_zero
, status
);
244 set_flush_inputs_to_zero(flush_to_zero
, status
);
247 static inline void restore_pamask(CPUMIPSState
*env
)
249 if (env
->hflags
& MIPS_HFLAG_ELPA
) {
250 env
->PAMask
= (1ULL << env
->PABITS
) - 1;
252 env
->PAMask
= PAMASK_BASE
;
256 static inline int mips_vpe_active(CPUMIPSState
*env
)
260 /* Check that the VPE is enabled. */
261 if (!(env
->mvp
->CP0_MVPControl
& (1 << CP0MVPCo_EVP
))) {
264 /* Check that the VPE is activated. */
265 if (!(env
->CP0_VPEConf0
& (1 << CP0VPEC0_VPA
))) {
269 /* Now verify that there are active thread contexts in the VPE.
271 This assumes the CPU model will internally reschedule threads
272 if the active one goes to sleep. If there are no threads available
273 the active one will be in a sleeping state, and we can turn off
275 if (!(env
->active_tc
.CP0_TCStatus
& (1 << CP0TCSt_A
))) {
276 /* TC is not activated. */
279 if (env
->active_tc
.CP0_TCHalt
& 1) {
280 /* TC is in halt state. */
287 static inline int mips_vp_active(CPUMIPSState
*env
)
289 CPUState
*other_cs
= first_cpu
;
291 /* Check if the VP disabled other VPs (which means the VP is enabled) */
292 if ((env
->CP0_VPControl
>> CP0VPCtl_DIS
) & 1) {
296 /* Check if the virtual processor is disabled due to a DVP */
297 CPU_FOREACH(other_cs
) {
298 MIPSCPU
*other_cpu
= MIPS_CPU(other_cs
);
299 if ((&other_cpu
->env
!= env
) &&
300 ((other_cpu
->env
.CP0_VPControl
>> CP0VPCtl_DIS
) & 1)) {
307 static inline void compute_hflags(CPUMIPSState
*env
)
309 env
->hflags
&= ~(MIPS_HFLAG_COP1X
| MIPS_HFLAG_64
| MIPS_HFLAG_CP0
|
310 MIPS_HFLAG_F64
| MIPS_HFLAG_FPU
| MIPS_HFLAG_KSU
|
311 MIPS_HFLAG_AWRAP
| MIPS_HFLAG_DSP
| MIPS_HFLAG_DSP_R2
|
312 MIPS_HFLAG_DSP_R3
| MIPS_HFLAG_SBRI
| MIPS_HFLAG_MSA
|
313 MIPS_HFLAG_FRE
| MIPS_HFLAG_ELPA
| MIPS_HFLAG_ERL
);
314 if (env
->CP0_Status
& (1 << CP0St_ERL
)) {
315 env
->hflags
|= MIPS_HFLAG_ERL
;
317 if (!(env
->CP0_Status
& (1 << CP0St_EXL
)) &&
318 !(env
->CP0_Status
& (1 << CP0St_ERL
)) &&
319 !(env
->hflags
& MIPS_HFLAG_DM
)) {
320 env
->hflags
|= (env
->CP0_Status
>> CP0St_KSU
) & MIPS_HFLAG_KSU
;
322 #if defined(TARGET_MIPS64)
323 if ((env
->insn_flags
& ISA_MIPS3
) &&
324 (((env
->hflags
& MIPS_HFLAG_KSU
) != MIPS_HFLAG_UM
) ||
325 (env
->CP0_Status
& (1 << CP0St_PX
)) ||
326 (env
->CP0_Status
& (1 << CP0St_UX
)))) {
327 env
->hflags
|= MIPS_HFLAG_64
;
330 if (!(env
->insn_flags
& ISA_MIPS3
)) {
331 env
->hflags
|= MIPS_HFLAG_AWRAP
;
332 } else if (((env
->hflags
& MIPS_HFLAG_KSU
) == MIPS_HFLAG_UM
) &&
333 !(env
->CP0_Status
& (1 << CP0St_UX
))) {
334 env
->hflags
|= MIPS_HFLAG_AWRAP
;
335 } else if (env
->insn_flags
& ISA_MIPS64R6
) {
336 /* Address wrapping for Supervisor and Kernel is specified in R6 */
337 if ((((env
->hflags
& MIPS_HFLAG_KSU
) == MIPS_HFLAG_SM
) &&
338 !(env
->CP0_Status
& (1 << CP0St_SX
))) ||
339 (((env
->hflags
& MIPS_HFLAG_KSU
) == MIPS_HFLAG_KM
) &&
340 !(env
->CP0_Status
& (1 << CP0St_KX
)))) {
341 env
->hflags
|= MIPS_HFLAG_AWRAP
;
345 if (((env
->CP0_Status
& (1 << CP0St_CU0
)) &&
346 !(env
->insn_flags
& ISA_MIPS32R6
)) ||
347 !(env
->hflags
& MIPS_HFLAG_KSU
)) {
348 env
->hflags
|= MIPS_HFLAG_CP0
;
350 if (env
->CP0_Status
& (1 << CP0St_CU1
)) {
351 env
->hflags
|= MIPS_HFLAG_FPU
;
353 if (env
->CP0_Status
& (1 << CP0St_FR
)) {
354 env
->hflags
|= MIPS_HFLAG_F64
;
356 if (((env
->hflags
& MIPS_HFLAG_KSU
) != MIPS_HFLAG_KM
) &&
357 (env
->CP0_Config5
& (1 << CP0C5_SBRI
))) {
358 env
->hflags
|= MIPS_HFLAG_SBRI
;
360 if (env
->insn_flags
& ASE_DSP_R3
) {
362 * Our cpu supports DSP R3 ASE, so enable
363 * access to DSP R3 resources.
365 if (env
->CP0_Status
& (1 << CP0St_MX
)) {
366 env
->hflags
|= MIPS_HFLAG_DSP
| MIPS_HFLAG_DSP_R2
|
369 } else if (env
->insn_flags
& ASE_DSP_R2
) {
371 * Our cpu supports DSP R2 ASE, so enable
372 * access to DSP R2 resources.
374 if (env
->CP0_Status
& (1 << CP0St_MX
)) {
375 env
->hflags
|= MIPS_HFLAG_DSP
| MIPS_HFLAG_DSP_R2
;
378 } else if (env
->insn_flags
& ASE_DSP
) {
380 * Our cpu supports DSP ASE, so enable
381 * access to DSP resources.
383 if (env
->CP0_Status
& (1 << CP0St_MX
)) {
384 env
->hflags
|= MIPS_HFLAG_DSP
;
388 if (env
->insn_flags
& ISA_MIPS32R2
) {
389 if (env
->active_fpu
.fcr0
& (1 << FCR0_F64
)) {
390 env
->hflags
|= MIPS_HFLAG_COP1X
;
392 } else if (env
->insn_flags
& ISA_MIPS32
) {
393 if (env
->hflags
& MIPS_HFLAG_64
) {
394 env
->hflags
|= MIPS_HFLAG_COP1X
;
396 } else if (env
->insn_flags
& ISA_MIPS4
) {
397 /* All supported MIPS IV CPUs use the XX (CU3) to enable
398 and disable the MIPS IV extensions to the MIPS III ISA.
399 Some other MIPS IV CPUs ignore the bit, so the check here
400 would be too restrictive for them. */
401 if (env
->CP0_Status
& (1U << CP0St_CU3
)) {
402 env
->hflags
|= MIPS_HFLAG_COP1X
;
405 if (env
->insn_flags
& ASE_MSA
) {
406 if (env
->CP0_Config5
& (1 << CP0C5_MSAEn
)) {
407 env
->hflags
|= MIPS_HFLAG_MSA
;
410 if (env
->active_fpu
.fcr0
& (1 << FCR0_FREP
)) {
411 if (env
->CP0_Config5
& (1 << CP0C5_FRE
)) {
412 env
->hflags
|= MIPS_HFLAG_FRE
;
415 if (env
->CP0_Config3
& (1 << CP0C3_LPA
)) {
416 if (env
->CP0_PageGrain
& (1 << CP0PG_ELPA
)) {
417 env
->hflags
|= MIPS_HFLAG_ELPA
;
422 void cpu_mips_tlb_flush(CPUMIPSState
*env
);
423 void sync_c0_status(CPUMIPSState
*env
, CPUMIPSState
*cpu
, int tc
);
424 void cpu_mips_store_status(CPUMIPSState
*env
, target_ulong val
);
425 void cpu_mips_store_cause(CPUMIPSState
*env
, target_ulong val
);
427 void QEMU_NORETURN
do_raise_exception_err(CPUMIPSState
*env
, uint32_t exception
,
428 int error_code
, uintptr_t pc
);
430 static inline void QEMU_NORETURN
do_raise_exception(CPUMIPSState
*env
,
434 do_raise_exception_err(env
, exception
, 0, pc
);