scsi: move host_status handling into SCSI drivers
[qemu/ar7.git] / target / mips / internal.h
blob99264b8bf6a70a35e81def839fc4106e96e08824
1 /*
2 * MIPS internal definitions and helpers
4 * This work is licensed under the terms of the GNU GPL, version 2 or later.
5 * See the COPYING file in the top-level directory.
6 */
8 #ifndef MIPS_INTERNAL_H
9 #define MIPS_INTERNAL_H
11 #include "exec/memattrs.h"
14 * MMU types, the first four entries have the same layout as the
15 * CP0C0_MT field.
17 enum mips_mmu_types {
18 MMU_TYPE_NONE = 0,
19 MMU_TYPE_R4000 = 1, /* Standard TLB */
20 MMU_TYPE_BAT = 2, /* Block Address Translation */
21 MMU_TYPE_FMT = 3, /* Fixed Mapping */
22 MMU_TYPE_DVF = 4, /* Dual VTLB and FTLB */
23 MMU_TYPE_R3000,
24 MMU_TYPE_R6000,
25 MMU_TYPE_R8000
28 struct mips_def_t {
29 const char *name;
30 int32_t CP0_PRid;
31 int32_t CP0_Config0;
32 int32_t CP0_Config1;
33 int32_t CP0_Config2;
34 int32_t CP0_Config3;
35 int32_t CP0_Config4;
36 int32_t CP0_Config4_rw_bitmask;
37 int32_t CP0_Config5;
38 int32_t CP0_Config5_rw_bitmask;
39 int32_t CP0_Config6;
40 int32_t CP0_Config6_rw_bitmask;
41 int32_t CP0_Config7;
42 int32_t CP0_Config7_rw_bitmask;
43 target_ulong CP0_LLAddr_rw_bitmask;
44 int CP0_LLAddr_shift;
45 int32_t SYNCI_Step;
46 int32_t CCRes;
47 int32_t CP0_Status_rw_bitmask;
48 int32_t CP0_TCStatus_rw_bitmask;
49 int32_t CP0_SRSCtl;
50 int32_t CP1_fcr0;
51 int32_t CP1_fcr31_rw_bitmask;
52 int32_t CP1_fcr31;
53 int32_t MSAIR;
54 int32_t SEGBITS;
55 int32_t PABITS;
56 int32_t CP0_SRSConf0_rw_bitmask;
57 int32_t CP0_SRSConf0;
58 int32_t CP0_SRSConf1_rw_bitmask;
59 int32_t CP0_SRSConf1;
60 int32_t CP0_SRSConf2_rw_bitmask;
61 int32_t CP0_SRSConf2;
62 int32_t CP0_SRSConf3_rw_bitmask;
63 int32_t CP0_SRSConf3;
64 int32_t CP0_SRSConf4_rw_bitmask;
65 int32_t CP0_SRSConf4;
66 int32_t CP0_PageGrain_rw_bitmask;
67 int32_t CP0_PageGrain;
68 target_ulong CP0_EBaseWG_rw_bitmask;
69 uint64_t insn_flags;
70 enum mips_mmu_types mmu_type;
71 int32_t SAARP;
74 extern const struct mips_def_t mips_defs[];
75 extern const int mips_defs_number;
77 void mips_cpu_do_interrupt(CPUState *cpu);
78 bool mips_cpu_exec_interrupt(CPUState *cpu, int int_req);
79 void mips_cpu_dump_state(CPUState *cpu, FILE *f, int flags);
80 hwaddr mips_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
81 int mips_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
82 int mips_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
83 void mips_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
84 MMUAccessType access_type,
85 int mmu_idx, uintptr_t retaddr);
87 #if !defined(CONFIG_USER_ONLY)
89 typedef struct r4k_tlb_t r4k_tlb_t;
90 struct r4k_tlb_t {
91 target_ulong VPN;
92 uint32_t PageMask;
93 uint16_t ASID;
94 uint32_t MMID;
95 unsigned int G:1;
96 unsigned int C0:3;
97 unsigned int C1:3;
98 unsigned int V0:1;
99 unsigned int V1:1;
100 unsigned int D0:1;
101 unsigned int D1:1;
102 unsigned int XI0:1;
103 unsigned int XI1:1;
104 unsigned int RI0:1;
105 unsigned int RI1:1;
106 unsigned int EHINV:1;
107 uint64_t PFN[2];
110 struct CPUMIPSTLBContext {
111 uint32_t nb_tlb;
112 uint32_t tlb_in_use;
113 int (*map_address)(struct CPUMIPSState *env, hwaddr *physical, int *prot,
114 target_ulong address, MMUAccessType access_type);
115 void (*helper_tlbwi)(struct CPUMIPSState *env);
116 void (*helper_tlbwr)(struct CPUMIPSState *env);
117 void (*helper_tlbp)(struct CPUMIPSState *env);
118 void (*helper_tlbr)(struct CPUMIPSState *env);
119 void (*helper_tlbinv)(struct CPUMIPSState *env);
120 void (*helper_tlbinvf)(struct CPUMIPSState *env);
121 union {
122 struct {
123 r4k_tlb_t tlb[MIPS_TLB_MAX];
124 } r4k;
125 } mmu;
128 int no_mmu_map_address(CPUMIPSState *env, hwaddr *physical, int *prot,
129 target_ulong address, MMUAccessType access_type);
130 int fixed_mmu_map_address(CPUMIPSState *env, hwaddr *physical, int *prot,
131 target_ulong address, MMUAccessType access_type);
132 int r4k_map_address(CPUMIPSState *env, hwaddr *physical, int *prot,
133 target_ulong address, MMUAccessType access_type);
134 void r4k_helper_tlbwi(CPUMIPSState *env);
135 void r4k_helper_tlbwr(CPUMIPSState *env);
136 void r4k_helper_tlbp(CPUMIPSState *env);
137 void r4k_helper_tlbr(CPUMIPSState *env);
138 void r4k_helper_tlbinv(CPUMIPSState *env);
139 void r4k_helper_tlbinvf(CPUMIPSState *env);
140 void r4k_invalidate_tlb(CPUMIPSState *env, int idx, int use_extra);
141 uint32_t cpu_mips_get_random(CPUMIPSState *env);
143 void mips_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
144 vaddr addr, unsigned size,
145 MMUAccessType access_type,
146 int mmu_idx, MemTxAttrs attrs,
147 MemTxResult response, uintptr_t retaddr);
148 hwaddr cpu_mips_translate_address(CPUMIPSState *env, target_ulong address,
149 MMUAccessType access_type);
150 #endif
152 #define cpu_signal_handler cpu_mips_signal_handler
154 #ifndef CONFIG_USER_ONLY
155 extern const VMStateDescription vmstate_mips_cpu;
156 #endif
158 static inline bool cpu_mips_hw_interrupts_enabled(CPUMIPSState *env)
160 return (env->CP0_Status & (1 << CP0St_IE)) &&
161 !(env->CP0_Status & (1 << CP0St_EXL)) &&
162 !(env->CP0_Status & (1 << CP0St_ERL)) &&
163 !(env->hflags & MIPS_HFLAG_DM) &&
165 * Note that the TCStatus IXMT field is initialized to zero,
166 * and only MT capable cores can set it to one. So we don't
167 * need to check for MT capabilities here.
169 !(env->active_tc.CP0_TCStatus & (1 << CP0TCSt_IXMT));
172 /* Check if there is pending and not masked out interrupt */
173 static inline bool cpu_mips_hw_interrupts_pending(CPUMIPSState *env)
175 int32_t pending;
176 int32_t status;
177 bool r;
179 pending = env->CP0_Cause & CP0Ca_IP_mask;
180 status = env->CP0_Status & CP0Ca_IP_mask;
182 if (env->CP0_Config3 & (1 << CP0C3_VEIC)) {
184 * A MIPS configured with a vectorizing external interrupt controller
185 * will feed a vector into the Cause pending lines. The core treats
186 * the status lines as a vector level, not as individual masks.
188 r = pending > status;
189 } else {
191 * A MIPS configured with compatibility or VInt (Vectored Interrupts)
192 * treats the pending lines as individual interrupt lines, the status
193 * lines are individual masks.
195 r = (pending & status) != 0;
197 return r;
200 void mips_tcg_init(void);
202 void msa_reset(CPUMIPSState *env);
204 /* cp0_timer.c */
205 uint32_t cpu_mips_get_count(CPUMIPSState *env);
206 void cpu_mips_store_count(CPUMIPSState *env, uint32_t value);
207 void cpu_mips_store_compare(CPUMIPSState *env, uint32_t value);
208 void cpu_mips_start_count(CPUMIPSState *env);
209 void cpu_mips_stop_count(CPUMIPSState *env);
211 /* helper.c */
212 void mmu_init(CPUMIPSState *env, const mips_def_t *def);
213 bool mips_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
214 MMUAccessType access_type, int mmu_idx,
215 bool probe, uintptr_t retaddr);
217 /* op_helper.c */
218 void update_pagemask(CPUMIPSState *env, target_ulong arg1, int32_t *pagemask);
220 static inline void restore_pamask(CPUMIPSState *env)
222 if (env->hflags & MIPS_HFLAG_ELPA) {
223 env->PAMask = (1ULL << env->PABITS) - 1;
224 } else {
225 env->PAMask = PAMASK_BASE;
229 static inline int mips_vpe_active(CPUMIPSState *env)
231 int active = 1;
233 /* Check that the VPE is enabled. */
234 if (!(env->mvp->CP0_MVPControl & (1 << CP0MVPCo_EVP))) {
235 active = 0;
237 /* Check that the VPE is activated. */
238 if (!(env->CP0_VPEConf0 & (1 << CP0VPEC0_VPA))) {
239 active = 0;
243 * Now verify that there are active thread contexts in the VPE.
245 * This assumes the CPU model will internally reschedule threads
246 * if the active one goes to sleep. If there are no threads available
247 * the active one will be in a sleeping state, and we can turn off
248 * the entire VPE.
250 if (!(env->active_tc.CP0_TCStatus & (1 << CP0TCSt_A))) {
251 /* TC is not activated. */
252 active = 0;
254 if (env->active_tc.CP0_TCHalt & 1) {
255 /* TC is in halt state. */
256 active = 0;
259 return active;
262 static inline int mips_vp_active(CPUMIPSState *env)
264 CPUState *other_cs = first_cpu;
266 /* Check if the VP disabled other VPs (which means the VP is enabled) */
267 if ((env->CP0_VPControl >> CP0VPCtl_DIS) & 1) {
268 return 1;
271 /* Check if the virtual processor is disabled due to a DVP */
272 CPU_FOREACH(other_cs) {
273 MIPSCPU *other_cpu = MIPS_CPU(other_cs);
274 if ((&other_cpu->env != env) &&
275 ((other_cpu->env.CP0_VPControl >> CP0VPCtl_DIS) & 1)) {
276 return 0;
279 return 1;
282 static inline void compute_hflags(CPUMIPSState *env)
284 env->hflags &= ~(MIPS_HFLAG_COP1X | MIPS_HFLAG_64 | MIPS_HFLAG_CP0 |
285 MIPS_HFLAG_F64 | MIPS_HFLAG_FPU | MIPS_HFLAG_KSU |
286 MIPS_HFLAG_AWRAP | MIPS_HFLAG_DSP | MIPS_HFLAG_DSP_R2 |
287 MIPS_HFLAG_DSP_R3 | MIPS_HFLAG_SBRI | MIPS_HFLAG_MSA |
288 MIPS_HFLAG_FRE | MIPS_HFLAG_ELPA | MIPS_HFLAG_ERL);
289 if (env->CP0_Status & (1 << CP0St_ERL)) {
290 env->hflags |= MIPS_HFLAG_ERL;
292 if (!(env->CP0_Status & (1 << CP0St_EXL)) &&
293 !(env->CP0_Status & (1 << CP0St_ERL)) &&
294 !(env->hflags & MIPS_HFLAG_DM)) {
295 env->hflags |= (env->CP0_Status >> CP0St_KSU) &
296 MIPS_HFLAG_KSU;
298 #if defined(TARGET_MIPS64)
299 if ((env->insn_flags & ISA_MIPS3) &&
300 (((env->hflags & MIPS_HFLAG_KSU) != MIPS_HFLAG_UM) ||
301 (env->CP0_Status & (1 << CP0St_PX)) ||
302 (env->CP0_Status & (1 << CP0St_UX)))) {
303 env->hflags |= MIPS_HFLAG_64;
306 if (!(env->insn_flags & ISA_MIPS3)) {
307 env->hflags |= MIPS_HFLAG_AWRAP;
308 } else if (((env->hflags & MIPS_HFLAG_KSU) == MIPS_HFLAG_UM) &&
309 !(env->CP0_Status & (1 << CP0St_UX))) {
310 env->hflags |= MIPS_HFLAG_AWRAP;
311 } else if (env->insn_flags & ISA_MIPS_R6) {
312 /* Address wrapping for Supervisor and Kernel is specified in R6 */
313 if ((((env->hflags & MIPS_HFLAG_KSU) == MIPS_HFLAG_SM) &&
314 !(env->CP0_Status & (1 << CP0St_SX))) ||
315 (((env->hflags & MIPS_HFLAG_KSU) == MIPS_HFLAG_KM) &&
316 !(env->CP0_Status & (1 << CP0St_KX)))) {
317 env->hflags |= MIPS_HFLAG_AWRAP;
320 #endif
321 if (((env->CP0_Status & (1 << CP0St_CU0)) &&
322 !(env->insn_flags & ISA_MIPS_R6)) ||
323 !(env->hflags & MIPS_HFLAG_KSU)) {
324 env->hflags |= MIPS_HFLAG_CP0;
326 if (env->CP0_Status & (1 << CP0St_CU1)) {
327 env->hflags |= MIPS_HFLAG_FPU;
329 if (env->CP0_Status & (1 << CP0St_FR)) {
330 env->hflags |= MIPS_HFLAG_F64;
332 if (((env->hflags & MIPS_HFLAG_KSU) != MIPS_HFLAG_KM) &&
333 (env->CP0_Config5 & (1 << CP0C5_SBRI))) {
334 env->hflags |= MIPS_HFLAG_SBRI;
336 if (env->insn_flags & ASE_DSP_R3) {
338 * Our cpu supports DSP R3 ASE, so enable
339 * access to DSP R3 resources.
341 if (env->CP0_Status & (1 << CP0St_MX)) {
342 env->hflags |= MIPS_HFLAG_DSP | MIPS_HFLAG_DSP_R2 |
343 MIPS_HFLAG_DSP_R3;
345 } else if (env->insn_flags & ASE_DSP_R2) {
347 * Our cpu supports DSP R2 ASE, so enable
348 * access to DSP R2 resources.
350 if (env->CP0_Status & (1 << CP0St_MX)) {
351 env->hflags |= MIPS_HFLAG_DSP | MIPS_HFLAG_DSP_R2;
354 } else if (env->insn_flags & ASE_DSP) {
356 * Our cpu supports DSP ASE, so enable
357 * access to DSP resources.
359 if (env->CP0_Status & (1 << CP0St_MX)) {
360 env->hflags |= MIPS_HFLAG_DSP;
364 if (env->insn_flags & ISA_MIPS_R2) {
365 if (env->active_fpu.fcr0 & (1 << FCR0_F64)) {
366 env->hflags |= MIPS_HFLAG_COP1X;
368 } else if (env->insn_flags & ISA_MIPS_R1) {
369 if (env->hflags & MIPS_HFLAG_64) {
370 env->hflags |= MIPS_HFLAG_COP1X;
372 } else if (env->insn_flags & ISA_MIPS4) {
374 * All supported MIPS IV CPUs use the XX (CU3) to enable
375 * and disable the MIPS IV extensions to the MIPS III ISA.
376 * Some other MIPS IV CPUs ignore the bit, so the check here
377 * would be too restrictive for them.
379 if (env->CP0_Status & (1U << CP0St_CU3)) {
380 env->hflags |= MIPS_HFLAG_COP1X;
383 if (ase_msa_available(env)) {
384 if (env->CP0_Config5 & (1 << CP0C5_MSAEn)) {
385 env->hflags |= MIPS_HFLAG_MSA;
388 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
389 if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
390 env->hflags |= MIPS_HFLAG_FRE;
393 if (env->CP0_Config3 & (1 << CP0C3_LPA)) {
394 if (env->CP0_PageGrain & (1 << CP0PG_ELPA)) {
395 env->hflags |= MIPS_HFLAG_ELPA;
400 void cpu_mips_tlb_flush(CPUMIPSState *env);
401 void sync_c0_status(CPUMIPSState *env, CPUMIPSState *cpu, int tc);
402 void cpu_mips_store_status(CPUMIPSState *env, target_ulong val);
403 void cpu_mips_store_cause(CPUMIPSState *env, target_ulong val);
405 const char *mips_exception_name(int32_t exception);
407 void QEMU_NORETURN do_raise_exception_err(CPUMIPSState *env, uint32_t exception,
408 int error_code, uintptr_t pc);
410 static inline void QEMU_NORETURN do_raise_exception(CPUMIPSState *env,
411 uint32_t exception,
412 uintptr_t pc)
414 do_raise_exception_err(env, exception, 0, pc);
417 #endif