qapi: Clean up after recent conversions to QAPISchemaVisitor
[qemu/ar7.git] / target-mips / cpu.h
blobed7d86d779b5b01a82ed22eb5dddac3e08bd356a
1 #if !defined (__MIPS_CPU_H__)
2 #define __MIPS_CPU_H__
4 //#define DEBUG_OP
6 #define ALIGNED_ONLY
8 #define ELF_MACHINE EM_MIPS
10 #define CPUArchState struct CPUMIPSState
12 #include "config.h"
13 #include "qemu-common.h"
14 #include "mips-defs.h"
15 #include "exec/cpu-defs.h"
16 #include "fpu/softfloat.h"
18 struct CPUMIPSState;
20 typedef struct r4k_tlb_t r4k_tlb_t;
21 struct r4k_tlb_t {
22 target_ulong VPN;
23 uint32_t PageMask;
24 uint_fast8_t ASID;
25 uint_fast16_t G:1;
26 uint_fast16_t C0:3;
27 uint_fast16_t C1:3;
28 uint_fast16_t V0:1;
29 uint_fast16_t V1:1;
30 uint_fast16_t D0:1;
31 uint_fast16_t D1:1;
32 uint_fast16_t XI0:1;
33 uint_fast16_t XI1:1;
34 uint_fast16_t RI0:1;
35 uint_fast16_t RI1:1;
36 uint_fast16_t EHINV:1;
37 uint64_t PFN[2];
40 #if !defined(CONFIG_USER_ONLY)
41 typedef struct CPUMIPSTLBContext CPUMIPSTLBContext;
42 struct CPUMIPSTLBContext {
43 uint32_t nb_tlb;
44 uint32_t tlb_in_use;
45 int (*map_address) (struct CPUMIPSState *env, hwaddr *physical, int *prot, target_ulong address, int rw, int access_type);
46 void (*helper_tlbwi)(struct CPUMIPSState *env);
47 void (*helper_tlbwr)(struct CPUMIPSState *env);
48 void (*helper_tlbp)(struct CPUMIPSState *env);
49 void (*helper_tlbr)(struct CPUMIPSState *env);
50 void (*helper_tlbinv)(struct CPUMIPSState *env);
51 void (*helper_tlbinvf)(struct CPUMIPSState *env);
52 union {
53 struct {
54 r4k_tlb_t tlb[MIPS_TLB_MAX];
55 } r4k;
56 } mmu;
58 #endif
60 /* MSA Context */
61 #define MSA_WRLEN (128)
63 enum CPUMIPSMSADataFormat {
64 DF_BYTE = 0,
65 DF_HALF,
66 DF_WORD,
67 DF_DOUBLE
70 typedef union wr_t wr_t;
71 union wr_t {
72 int8_t b[MSA_WRLEN/8];
73 int16_t h[MSA_WRLEN/16];
74 int32_t w[MSA_WRLEN/32];
75 int64_t d[MSA_WRLEN/64];
78 typedef union fpr_t fpr_t;
79 union fpr_t {
80 float64 fd; /* ieee double precision */
81 float32 fs[2];/* ieee single precision */
82 uint64_t d; /* binary double fixed-point */
83 uint32_t w[2]; /* binary single fixed-point */
84 /* FPU/MSA register mapping is not tested on big-endian hosts. */
85 wr_t wr; /* vector data */
87 /* define FP_ENDIAN_IDX to access the same location
88 * in the fpr_t union regardless of the host endianness
90 #if defined(HOST_WORDS_BIGENDIAN)
91 # define FP_ENDIAN_IDX 1
92 #else
93 # define FP_ENDIAN_IDX 0
94 #endif
96 typedef struct CPUMIPSFPUContext CPUMIPSFPUContext;
97 struct CPUMIPSFPUContext {
98 /* Floating point registers */
99 fpr_t fpr[32];
100 float_status fp_status;
101 /* fpu implementation/revision register (fir) */
102 uint32_t fcr0;
103 #define FCR0_FREP 29
104 #define FCR0_UFRP 28
105 #define FCR0_F64 22
106 #define FCR0_L 21
107 #define FCR0_W 20
108 #define FCR0_3D 19
109 #define FCR0_PS 18
110 #define FCR0_D 17
111 #define FCR0_S 16
112 #define FCR0_PRID 8
113 #define FCR0_REV 0
114 /* fcsr */
115 uint32_t fcr31;
116 #define SET_FP_COND(num,env) do { ((env).fcr31) |= ((num) ? (1 << ((num) + 24)) : (1 << 23)); } while(0)
117 #define CLEAR_FP_COND(num,env) do { ((env).fcr31) &= ~((num) ? (1 << ((num) + 24)) : (1 << 23)); } while(0)
118 #define GET_FP_COND(env) ((((env).fcr31 >> 24) & 0xfe) | (((env).fcr31 >> 23) & 0x1))
119 #define GET_FP_CAUSE(reg) (((reg) >> 12) & 0x3f)
120 #define GET_FP_ENABLE(reg) (((reg) >> 7) & 0x1f)
121 #define GET_FP_FLAGS(reg) (((reg) >> 2) & 0x1f)
122 #define SET_FP_CAUSE(reg,v) do { (reg) = ((reg) & ~(0x3f << 12)) | ((v & 0x3f) << 12); } while(0)
123 #define SET_FP_ENABLE(reg,v) do { (reg) = ((reg) & ~(0x1f << 7)) | ((v & 0x1f) << 7); } while(0)
124 #define SET_FP_FLAGS(reg,v) do { (reg) = ((reg) & ~(0x1f << 2)) | ((v & 0x1f) << 2); } while(0)
125 #define UPDATE_FP_FLAGS(reg,v) do { (reg) |= ((v & 0x1f) << 2); } while(0)
126 #define FP_INEXACT 1
127 #define FP_UNDERFLOW 2
128 #define FP_OVERFLOW 4
129 #define FP_DIV0 8
130 #define FP_INVALID 16
131 #define FP_UNIMPLEMENTED 32
134 #define NB_MMU_MODES 3
136 typedef struct CPUMIPSMVPContext CPUMIPSMVPContext;
137 struct CPUMIPSMVPContext {
138 int32_t CP0_MVPControl;
139 #define CP0MVPCo_CPA 3
140 #define CP0MVPCo_STLB 2
141 #define CP0MVPCo_VPC 1
142 #define CP0MVPCo_EVP 0
143 int32_t CP0_MVPConf0;
144 #define CP0MVPC0_M 31
145 #define CP0MVPC0_TLBS 29
146 #define CP0MVPC0_GS 28
147 #define CP0MVPC0_PCP 27
148 #define CP0MVPC0_PTLBE 16
149 #define CP0MVPC0_TCA 15
150 #define CP0MVPC0_PVPE 10
151 #define CP0MVPC0_PTC 0
152 int32_t CP0_MVPConf1;
153 #define CP0MVPC1_CIM 31
154 #define CP0MVPC1_CIF 30
155 #define CP0MVPC1_PCX 20
156 #define CP0MVPC1_PCP2 10
157 #define CP0MVPC1_PCP1 0
160 typedef struct mips_def_t mips_def_t;
162 #define MIPS_SHADOW_SET_MAX 16
163 #define MIPS_TC_MAX 5
164 #define MIPS_FPU_MAX 1
165 #define MIPS_DSP_ACC 4
166 #define MIPS_KSCRATCH_NUM 6
168 typedef struct TCState TCState;
169 struct TCState {
170 target_ulong gpr[32];
171 target_ulong PC;
172 target_ulong HI[MIPS_DSP_ACC];
173 target_ulong LO[MIPS_DSP_ACC];
174 target_ulong ACX[MIPS_DSP_ACC];
175 target_ulong DSPControl;
176 int32_t CP0_TCStatus;
177 #define CP0TCSt_TCU3 31
178 #define CP0TCSt_TCU2 30
179 #define CP0TCSt_TCU1 29
180 #define CP0TCSt_TCU0 28
181 #define CP0TCSt_TMX 27
182 #define CP0TCSt_RNST 23
183 #define CP0TCSt_TDS 21
184 #define CP0TCSt_DT 20
185 #define CP0TCSt_DA 15
186 #define CP0TCSt_A 13
187 #define CP0TCSt_TKSU 11
188 #define CP0TCSt_IXMT 10
189 #define CP0TCSt_TASID 0
190 int32_t CP0_TCBind;
191 #define CP0TCBd_CurTC 21
192 #define CP0TCBd_TBE 17
193 #define CP0TCBd_CurVPE 0
194 target_ulong CP0_TCHalt;
195 target_ulong CP0_TCContext;
196 target_ulong CP0_TCSchedule;
197 target_ulong CP0_TCScheFBack;
198 int32_t CP0_Debug_tcstatus;
199 target_ulong CP0_UserLocal;
201 int32_t msacsr;
203 #define MSACSR_FS 24
204 #define MSACSR_FS_MASK (1 << MSACSR_FS)
205 #define MSACSR_NX 18
206 #define MSACSR_NX_MASK (1 << MSACSR_NX)
207 #define MSACSR_CEF 2
208 #define MSACSR_CEF_MASK (0xffff << MSACSR_CEF)
209 #define MSACSR_RM 0
210 #define MSACSR_RM_MASK (0x3 << MSACSR_RM)
211 #define MSACSR_MASK (MSACSR_RM_MASK | MSACSR_CEF_MASK | MSACSR_NX_MASK | \
212 MSACSR_FS_MASK)
214 float_status msa_fp_status;
217 typedef struct CPUMIPSState CPUMIPSState;
218 struct CPUMIPSState {
219 TCState active_tc;
220 CPUMIPSFPUContext active_fpu;
222 uint32_t current_tc;
223 uint32_t current_fpu;
225 uint32_t SEGBITS;
226 uint32_t PABITS;
227 #if defined(TARGET_MIPS64)
228 # define PABITS_BASE 36
229 #else
230 # define PABITS_BASE 32
231 #endif
232 target_ulong SEGMask;
233 uint64_t PAMask;
234 #define PAMASK_BASE ((1ULL << PABITS_BASE) - 1)
236 int32_t msair;
237 #define MSAIR_ProcID 8
238 #define MSAIR_Rev 0
240 int32_t CP0_Index;
241 /* CP0_MVP* are per MVP registers. */
242 int32_t CP0_Random;
243 int32_t CP0_VPEControl;
244 #define CP0VPECo_YSI 21
245 #define CP0VPECo_GSI 20
246 #define CP0VPECo_EXCPT 16
247 #define CP0VPECo_TE 15
248 #define CP0VPECo_TargTC 0
249 int32_t CP0_VPEConf0;
250 #define CP0VPEC0_M 31
251 #define CP0VPEC0_XTC 21
252 #define CP0VPEC0_TCS 19
253 #define CP0VPEC0_SCS 18
254 #define CP0VPEC0_DSC 17
255 #define CP0VPEC0_ICS 16
256 #define CP0VPEC0_MVP 1
257 #define CP0VPEC0_VPA 0
258 int32_t CP0_VPEConf1;
259 #define CP0VPEC1_NCX 20
260 #define CP0VPEC1_NCP2 10
261 #define CP0VPEC1_NCP1 0
262 target_ulong CP0_YQMask;
263 target_ulong CP0_VPESchedule;
264 target_ulong CP0_VPEScheFBack;
265 int32_t CP0_VPEOpt;
266 #define CP0VPEOpt_IWX7 15
267 #define CP0VPEOpt_IWX6 14
268 #define CP0VPEOpt_IWX5 13
269 #define CP0VPEOpt_IWX4 12
270 #define CP0VPEOpt_IWX3 11
271 #define CP0VPEOpt_IWX2 10
272 #define CP0VPEOpt_IWX1 9
273 #define CP0VPEOpt_IWX0 8
274 #define CP0VPEOpt_DWX7 7
275 #define CP0VPEOpt_DWX6 6
276 #define CP0VPEOpt_DWX5 5
277 #define CP0VPEOpt_DWX4 4
278 #define CP0VPEOpt_DWX3 3
279 #define CP0VPEOpt_DWX2 2
280 #define CP0VPEOpt_DWX1 1
281 #define CP0VPEOpt_DWX0 0
282 uint64_t CP0_EntryLo0;
283 uint64_t CP0_EntryLo1;
284 #if defined(TARGET_MIPS64)
285 # define CP0EnLo_RI 63
286 # define CP0EnLo_XI 62
287 #else
288 # define CP0EnLo_RI 31
289 # define CP0EnLo_XI 30
290 #endif
291 target_ulong CP0_Context;
292 target_ulong CP0_KScratch[MIPS_KSCRATCH_NUM];
293 int32_t CP0_PageMask;
294 int32_t CP0_PageGrain_rw_bitmask;
295 int32_t CP0_PageGrain;
296 #define CP0PG_RIE 31
297 #define CP0PG_XIE 30
298 #define CP0PG_ELPA 29
299 #define CP0PG_IEC 27
300 int32_t CP0_Wired;
301 int32_t CP0_SRSConf0_rw_bitmask;
302 int32_t CP0_SRSConf0;
303 #define CP0SRSC0_M 31
304 #define CP0SRSC0_SRS3 20
305 #define CP0SRSC0_SRS2 10
306 #define CP0SRSC0_SRS1 0
307 int32_t CP0_SRSConf1_rw_bitmask;
308 int32_t CP0_SRSConf1;
309 #define CP0SRSC1_M 31
310 #define CP0SRSC1_SRS6 20
311 #define CP0SRSC1_SRS5 10
312 #define CP0SRSC1_SRS4 0
313 int32_t CP0_SRSConf2_rw_bitmask;
314 int32_t CP0_SRSConf2;
315 #define CP0SRSC2_M 31
316 #define CP0SRSC2_SRS9 20
317 #define CP0SRSC2_SRS8 10
318 #define CP0SRSC2_SRS7 0
319 int32_t CP0_SRSConf3_rw_bitmask;
320 int32_t CP0_SRSConf3;
321 #define CP0SRSC3_M 31
322 #define CP0SRSC3_SRS12 20
323 #define CP0SRSC3_SRS11 10
324 #define CP0SRSC3_SRS10 0
325 int32_t CP0_SRSConf4_rw_bitmask;
326 int32_t CP0_SRSConf4;
327 #define CP0SRSC4_SRS15 20
328 #define CP0SRSC4_SRS14 10
329 #define CP0SRSC4_SRS13 0
330 int32_t CP0_HWREna;
331 target_ulong CP0_BadVAddr;
332 uint32_t CP0_BadInstr;
333 uint32_t CP0_BadInstrP;
334 int32_t CP0_Count;
335 target_ulong CP0_EntryHi;
336 #define CP0EnHi_EHINV 10
337 int32_t CP0_Compare;
338 int32_t CP0_Status;
339 #define CP0St_CU3 31
340 #define CP0St_CU2 30
341 #define CP0St_CU1 29
342 #define CP0St_CU0 28
343 #define CP0St_RP 27
344 #define CP0St_FR 26
345 #define CP0St_RE 25
346 #define CP0St_MX 24
347 #define CP0St_PX 23
348 #define CP0St_BEV 22
349 #define CP0St_TS 21
350 #define CP0St_SR 20
351 #define CP0St_NMI 19
352 #define CP0St_IM 8
353 #define CP0St_KX 7
354 #define CP0St_SX 6
355 #define CP0St_UX 5
356 #define CP0St_KSU 3
357 #define CP0St_ERL 2
358 #define CP0St_EXL 1
359 #define CP0St_IE 0
360 int32_t CP0_IntCtl;
361 #define CP0IntCtl_IPTI 29
362 #define CP0IntCtl_IPPC1 26
363 #define CP0IntCtl_VS 5
364 int32_t CP0_SRSCtl;
365 #define CP0SRSCtl_HSS 26
366 #define CP0SRSCtl_EICSS 18
367 #define CP0SRSCtl_ESS 12
368 #define CP0SRSCtl_PSS 6
369 #define CP0SRSCtl_CSS 0
370 int32_t CP0_SRSMap;
371 #define CP0SRSMap_SSV7 28
372 #define CP0SRSMap_SSV6 24
373 #define CP0SRSMap_SSV5 20
374 #define CP0SRSMap_SSV4 16
375 #define CP0SRSMap_SSV3 12
376 #define CP0SRSMap_SSV2 8
377 #define CP0SRSMap_SSV1 4
378 #define CP0SRSMap_SSV0 0
379 int32_t CP0_Cause;
380 #define CP0Ca_BD 31
381 #define CP0Ca_TI 30
382 #define CP0Ca_CE 28
383 #define CP0Ca_DC 27
384 #define CP0Ca_PCI 26
385 #define CP0Ca_IV 23
386 #define CP0Ca_WP 22
387 #define CP0Ca_IP 8
388 #define CP0Ca_IP_mask 0x0000FF00
389 #define CP0Ca_EC 2
390 target_ulong CP0_EPC;
391 int32_t CP0_PRid;
392 int32_t CP0_EBase;
393 int32_t CP0_Config0;
394 #define CP0C0_M 31
395 #define CP0C0_K23 28
396 #define CP0C0_KU 25
397 #define CP0C0_MDU 20
398 #define CP0C0_MM 18
399 #define CP0C0_BM 16
400 #define CP0C0_BE 15
401 #define CP0C0_AT 13
402 #define CP0C0_AR 10
403 #define CP0C0_MT 7
404 #define CP0C0_VI 3
405 #define CP0C0_K0 0
406 int32_t CP0_Config1;
407 #define CP0C1_M 31
408 #define CP0C1_MMU 25
409 #define CP0C1_IS 22
410 #define CP0C1_IL 19
411 #define CP0C1_IA 16
412 #define CP0C1_DS 13
413 #define CP0C1_DL 10
414 #define CP0C1_DA 7
415 #define CP0C1_C2 6
416 #define CP0C1_MD 5
417 #define CP0C1_PC 4
418 #define CP0C1_WR 3
419 #define CP0C1_CA 2
420 #define CP0C1_EP 1
421 #define CP0C1_FP 0
422 int32_t CP0_Config2;
423 #define CP0C2_M 31
424 #define CP0C2_TU 28
425 #define CP0C2_TS 24
426 #define CP0C2_TL 20
427 #define CP0C2_TA 16
428 #define CP0C2_SU 12
429 #define CP0C2_SS 8
430 #define CP0C2_SL 4
431 #define CP0C2_SA 0
432 int32_t CP0_Config3;
433 #define CP0C3_M 31
434 #define CP0C3_BPG 30
435 #define CP0C3_CMCGR 29
436 #define CP0C3_MSAP 28
437 #define CP0C3_BP 27
438 #define CP0C3_BI 26
439 #define CP0C3_IPLW 21
440 #define CP0C3_MMAR 18
441 #define CP0C3_MCU 17
442 #define CP0C3_ISA_ON_EXC 16
443 #define CP0C3_ISA 14
444 #define CP0C3_ULRI 13
445 #define CP0C3_RXI 12
446 #define CP0C3_DSP2P 11
447 #define CP0C3_DSPP 10
448 #define CP0C3_LPA 7
449 #define CP0C3_VEIC 6
450 #define CP0C3_VInt 5
451 #define CP0C3_SP 4
452 #define CP0C3_CDMM 3
453 #define CP0C3_MT 2
454 #define CP0C3_SM 1
455 #define CP0C3_TL 0
456 int32_t CP0_Config4;
457 int32_t CP0_Config4_rw_bitmask;
458 #define CP0C4_M 31
459 #define CP0C4_IE 29
460 #define CP0C4_KScrExist 16
461 #define CP0C4_MMUExtDef 14
462 #define CP0C4_FTLBPageSize 8
463 #define CP0C4_FTLBWays 4
464 #define CP0C4_FTLBSets 0
465 #define CP0C4_MMUSizeExt 0
466 int32_t CP0_Config5;
467 int32_t CP0_Config5_rw_bitmask;
468 #define CP0C5_M 31
469 #define CP0C5_K 30
470 #define CP0C5_CV 29
471 #define CP0C5_EVA 28
472 #define CP0C5_MSAEn 27
473 #define CP0C5_UFE 9
474 #define CP0C5_FRE 8
475 #define CP0C5_SBRI 6
476 #define CP0C5_MVH 5
477 #define CP0C5_LLB 4
478 #define CP0C5_UFR 2
479 #define CP0C5_NFExists 0
480 int32_t CP0_Config6;
481 int32_t CP0_Config7;
482 /* XXX: Maybe make LLAddr per-TC? */
483 uint64_t lladdr;
484 target_ulong llval;
485 target_ulong llnewval;
486 target_ulong llreg;
487 uint64_t CP0_LLAddr_rw_bitmask;
488 int CP0_LLAddr_shift;
489 target_ulong CP0_WatchLo[8];
490 int32_t CP0_WatchHi[8];
491 target_ulong CP0_XContext;
492 int32_t CP0_Framemask;
493 int32_t CP0_Debug;
494 #define CP0DB_DBD 31
495 #define CP0DB_DM 30
496 #define CP0DB_LSNM 28
497 #define CP0DB_Doze 27
498 #define CP0DB_Halt 26
499 #define CP0DB_CNT 25
500 #define CP0DB_IBEP 24
501 #define CP0DB_DBEP 21
502 #define CP0DB_IEXI 20
503 #define CP0DB_VER 15
504 #define CP0DB_DEC 10
505 #define CP0DB_SSt 8
506 #define CP0DB_DINT 5
507 #define CP0DB_DIB 4
508 #define CP0DB_DDBS 3
509 #define CP0DB_DDBL 2
510 #define CP0DB_DBp 1
511 #define CP0DB_DSS 0
512 target_ulong CP0_DEPC;
513 int32_t CP0_Performance0;
514 uint64_t CP0_TagLo;
515 int32_t CP0_DataLo;
516 int32_t CP0_TagHi;
517 int32_t CP0_DataHi;
518 target_ulong CP0_ErrorEPC;
519 int32_t CP0_DESAVE;
520 /* We waste some space so we can handle shadow registers like TCs. */
521 TCState tcs[MIPS_SHADOW_SET_MAX];
522 CPUMIPSFPUContext fpus[MIPS_FPU_MAX];
523 /* QEMU */
524 int error_code;
525 #define EXCP_TLB_NOMATCH 0x1
526 #define EXCP_INST_NOTAVAIL 0x2 /* No valid instruction word for BadInstr */
527 uint32_t hflags; /* CPU State */
528 /* TMASK defines different execution modes */
529 #define MIPS_HFLAG_TMASK 0x75807FF
530 #define MIPS_HFLAG_MODE 0x00007 /* execution modes */
531 /* The KSU flags must be the lowest bits in hflags. The flag order
532 must be the same as defined for CP0 Status. This allows to use
533 the bits as the value of mmu_idx. */
534 #define MIPS_HFLAG_KSU 0x00003 /* kernel/supervisor/user mode mask */
535 #define MIPS_HFLAG_UM 0x00002 /* user mode flag */
536 #define MIPS_HFLAG_SM 0x00001 /* supervisor mode flag */
537 #define MIPS_HFLAG_KM 0x00000 /* kernel mode flag */
538 #define MIPS_HFLAG_DM 0x00004 /* Debug mode */
539 #define MIPS_HFLAG_64 0x00008 /* 64-bit instructions enabled */
540 #define MIPS_HFLAG_CP0 0x00010 /* CP0 enabled */
541 #define MIPS_HFLAG_FPU 0x00020 /* FPU enabled */
542 #define MIPS_HFLAG_F64 0x00040 /* 64-bit FPU enabled */
543 /* True if the MIPS IV COP1X instructions can be used. This also
544 controls the non-COP1X instructions RECIP.S, RECIP.D, RSQRT.S
545 and RSQRT.D. */
546 #define MIPS_HFLAG_COP1X 0x00080 /* COP1X instructions enabled */
547 #define MIPS_HFLAG_RE 0x00100 /* Reversed endianness */
548 #define MIPS_HFLAG_AWRAP 0x00200 /* 32-bit compatibility address wrapping */
549 #define MIPS_HFLAG_M16 0x00400 /* MIPS16 mode flag */
550 #define MIPS_HFLAG_M16_SHIFT 10
551 /* If translation is interrupted between the branch instruction and
552 * the delay slot, record what type of branch it is so that we can
553 * resume translation properly. It might be possible to reduce
554 * this from three bits to two. */
555 #define MIPS_HFLAG_BMASK_BASE 0x803800
556 #define MIPS_HFLAG_B 0x00800 /* Unconditional branch */
557 #define MIPS_HFLAG_BC 0x01000 /* Conditional branch */
558 #define MIPS_HFLAG_BL 0x01800 /* Likely branch */
559 #define MIPS_HFLAG_BR 0x02000 /* branch to register (can't link TB) */
560 /* Extra flags about the current pending branch. */
561 #define MIPS_HFLAG_BMASK_EXT 0x7C000
562 #define MIPS_HFLAG_B16 0x04000 /* branch instruction was 16 bits */
563 #define MIPS_HFLAG_BDS16 0x08000 /* branch requires 16-bit delay slot */
564 #define MIPS_HFLAG_BDS32 0x10000 /* branch requires 32-bit delay slot */
565 #define MIPS_HFLAG_BDS_STRICT 0x20000 /* Strict delay slot size */
566 #define MIPS_HFLAG_BX 0x40000 /* branch exchanges execution mode */
567 #define MIPS_HFLAG_BMASK (MIPS_HFLAG_BMASK_BASE | MIPS_HFLAG_BMASK_EXT)
568 /* MIPS DSP resources access. */
569 #define MIPS_HFLAG_DSP 0x080000 /* Enable access to MIPS DSP resources. */
570 #define MIPS_HFLAG_DSPR2 0x100000 /* Enable access to MIPS DSPR2 resources. */
571 /* Extra flag about HWREna register. */
572 #define MIPS_HFLAG_HWRENA_ULR 0x200000 /* ULR bit from HWREna is set. */
573 #define MIPS_HFLAG_SBRI 0x400000 /* R6 SDBBP causes RI excpt. in user mode */
574 #define MIPS_HFLAG_FBNSLOT 0x800000 /* Forbidden slot */
575 #define MIPS_HFLAG_MSA 0x1000000
576 #define MIPS_HFLAG_FRE 0x2000000 /* FRE enabled */
577 #define MIPS_HFLAG_ELPA 0x4000000
578 target_ulong btarget; /* Jump / branch target */
579 target_ulong bcond; /* Branch condition (if needed) */
581 int SYNCI_Step; /* Address step size for SYNCI */
582 int CCRes; /* Cycle count resolution/divisor */
583 uint32_t CP0_Status_rw_bitmask; /* Read/write bits in CP0_Status */
584 uint32_t CP0_TCStatus_rw_bitmask; /* Read/write bits in CP0_TCStatus */
585 int insn_flags; /* Supported instruction set */
587 CPU_COMMON
589 /* Fields from here on are preserved across CPU reset. */
590 CPUMIPSMVPContext *mvp;
591 #if !defined(CONFIG_USER_ONLY)
592 CPUMIPSTLBContext *tlb;
593 #endif
595 const mips_def_t *cpu_model;
596 void *irq[8];
597 QEMUTimer *timer; /* Internal timer */
600 #include "cpu-qom.h"
602 #if !defined(CONFIG_USER_ONLY)
603 int no_mmu_map_address (CPUMIPSState *env, hwaddr *physical, int *prot,
604 target_ulong address, int rw, int access_type);
605 int fixed_mmu_map_address (CPUMIPSState *env, hwaddr *physical, int *prot,
606 target_ulong address, int rw, int access_type);
607 int r4k_map_address (CPUMIPSState *env, hwaddr *physical, int *prot,
608 target_ulong address, int rw, int access_type);
609 void r4k_helper_tlbwi(CPUMIPSState *env);
610 void r4k_helper_tlbwr(CPUMIPSState *env);
611 void r4k_helper_tlbp(CPUMIPSState *env);
612 void r4k_helper_tlbr(CPUMIPSState *env);
613 void r4k_helper_tlbinv(CPUMIPSState *env);
614 void r4k_helper_tlbinvf(CPUMIPSState *env);
616 void mips_cpu_unassigned_access(CPUState *cpu, hwaddr addr,
617 bool is_write, bool is_exec, int unused,
618 unsigned size);
619 #endif
621 void mips_cpu_list (FILE *f, fprintf_function cpu_fprintf);
623 #define cpu_exec cpu_mips_exec
624 #define cpu_gen_code cpu_mips_gen_code
625 #define cpu_signal_handler cpu_mips_signal_handler
626 #define cpu_list mips_cpu_list
628 extern void cpu_wrdsp(uint32_t rs, uint32_t mask_num, CPUMIPSState *env);
629 extern uint32_t cpu_rddsp(uint32_t mask_num, CPUMIPSState *env);
631 /* MMU modes definitions. We carefully match the indices with our
632 hflags layout. */
633 #define MMU_MODE0_SUFFIX _kernel
634 #define MMU_MODE1_SUFFIX _super
635 #define MMU_MODE2_SUFFIX _user
636 #define MMU_USER_IDX 2
637 static inline int cpu_mmu_index (CPUMIPSState *env, bool ifetch)
639 return env->hflags & MIPS_HFLAG_KSU;
642 static inline int cpu_mips_hw_interrupts_pending(CPUMIPSState *env)
644 int32_t pending;
645 int32_t status;
646 int r;
648 if (!(env->CP0_Status & (1 << CP0St_IE)) ||
649 (env->CP0_Status & (1 << CP0St_EXL)) ||
650 (env->CP0_Status & (1 << CP0St_ERL)) ||
651 /* Note that the TCStatus IXMT field is initialized to zero,
652 and only MT capable cores can set it to one. So we don't
653 need to check for MT capabilities here. */
654 (env->active_tc.CP0_TCStatus & (1 << CP0TCSt_IXMT)) ||
655 (env->hflags & MIPS_HFLAG_DM)) {
656 /* Interrupts are disabled */
657 return 0;
660 pending = env->CP0_Cause & CP0Ca_IP_mask;
661 status = env->CP0_Status & CP0Ca_IP_mask;
663 if (env->CP0_Config3 & (1 << CP0C3_VEIC)) {
664 /* A MIPS configured with a vectorizing external interrupt controller
665 will feed a vector into the Cause pending lines. The core treats
666 the status lines as a vector level, not as indiviual masks. */
667 r = pending > status;
668 } else {
669 /* A MIPS configured with compatibility or VInt (Vectored Interrupts)
670 treats the pending lines as individual interrupt lines, the status
671 lines are individual masks. */
672 r = pending & status;
674 return r;
677 #include "exec/cpu-all.h"
679 /* Memory access type :
680 * may be needed for precise access rights control and precise exceptions.
682 enum {
683 /* 1 bit to define user level / supervisor access */
684 ACCESS_USER = 0x00,
685 ACCESS_SUPER = 0x01,
686 /* 1 bit to indicate direction */
687 ACCESS_STORE = 0x02,
688 /* Type of instruction that generated the access */
689 ACCESS_CODE = 0x10, /* Code fetch access */
690 ACCESS_INT = 0x20, /* Integer load/store access */
691 ACCESS_FLOAT = 0x30, /* floating point load/store access */
694 /* Exceptions */
695 enum {
696 EXCP_NONE = -1,
697 EXCP_RESET = 0,
698 EXCP_SRESET,
699 EXCP_DSS,
700 EXCP_DINT,
701 EXCP_DDBL,
702 EXCP_DDBS,
703 EXCP_NMI,
704 EXCP_MCHECK,
705 EXCP_EXT_INTERRUPT, /* 8 */
706 EXCP_DFWATCH,
707 EXCP_DIB,
708 EXCP_IWATCH,
709 EXCP_AdEL,
710 EXCP_AdES,
711 EXCP_TLBF,
712 EXCP_IBE,
713 EXCP_DBp, /* 16 */
714 EXCP_SYSCALL,
715 EXCP_BREAK,
716 EXCP_CpU,
717 EXCP_RI,
718 EXCP_OVERFLOW,
719 EXCP_TRAP,
720 EXCP_FPE,
721 EXCP_DWATCH, /* 24 */
722 EXCP_LTLBL,
723 EXCP_TLBL,
724 EXCP_TLBS,
725 EXCP_DBE,
726 EXCP_THREAD,
727 EXCP_MDMX,
728 EXCP_C2E,
729 EXCP_CACHE, /* 32 */
730 EXCP_DSPDIS,
731 EXCP_MSADIS,
732 EXCP_MSAFPE,
733 EXCP_TLBXI,
734 EXCP_TLBRI,
736 EXCP_LAST = EXCP_TLBRI,
738 /* Dummy exception for conditional stores. */
739 #define EXCP_SC 0x100
742 * This is an interrnally generated WAKE request line.
743 * It is driven by the CPU itself. Raised when the MT
744 * block wants to wake a VPE from an inactive state and
745 * cleared when VPE goes from active to inactive.
747 #define CPU_INTERRUPT_WAKE CPU_INTERRUPT_TGT_INT_0
749 int cpu_mips_exec(CPUState *cpu);
750 void mips_tcg_init(void);
751 MIPSCPU *cpu_mips_init(const char *cpu_model);
752 int cpu_mips_signal_handler(int host_signum, void *pinfo, void *puc);
754 #define cpu_init(cpu_model) CPU(cpu_mips_init(cpu_model))
756 /* TODO QOM'ify CPU reset and remove */
757 void cpu_state_reset(CPUMIPSState *s);
759 /* mips_timer.c */
760 uint32_t cpu_mips_get_random (CPUMIPSState *env);
761 uint32_t cpu_mips_get_count (CPUMIPSState *env);
762 void cpu_mips_store_count (CPUMIPSState *env, uint32_t value);
763 void cpu_mips_store_compare (CPUMIPSState *env, uint32_t value);
764 void cpu_mips_start_count(CPUMIPSState *env);
765 void cpu_mips_stop_count(CPUMIPSState *env);
767 /* mips_int.c */
768 void cpu_mips_soft_irq(CPUMIPSState *env, int irq, int level);
770 /* helper.c */
771 int mips_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw,
772 int mmu_idx);
773 #if !defined(CONFIG_USER_ONLY)
774 void r4k_invalidate_tlb (CPUMIPSState *env, int idx, int use_extra);
775 hwaddr cpu_mips_translate_address (CPUMIPSState *env, target_ulong address,
776 int rw);
777 #endif
778 target_ulong exception_resume_pc (CPUMIPSState *env);
780 /* op_helper.c */
781 extern unsigned int ieee_rm[];
782 int ieee_ex_to_mips(int xcpt);
784 static inline void restore_rounding_mode(CPUMIPSState *env)
786 set_float_rounding_mode(ieee_rm[env->active_fpu.fcr31 & 3],
787 &env->active_fpu.fp_status);
790 static inline void restore_flush_mode(CPUMIPSState *env)
792 set_flush_to_zero((env->active_fpu.fcr31 & (1 << 24)) != 0,
793 &env->active_fpu.fp_status);
796 static inline void restore_fp_status(CPUMIPSState *env)
798 restore_rounding_mode(env);
799 restore_flush_mode(env);
802 static inline void restore_msa_fp_status(CPUMIPSState *env)
804 float_status *status = &env->active_tc.msa_fp_status;
805 int rounding_mode = (env->active_tc.msacsr & MSACSR_RM_MASK) >> MSACSR_RM;
806 bool flush_to_zero = (env->active_tc.msacsr & MSACSR_FS_MASK) != 0;
808 set_float_rounding_mode(ieee_rm[rounding_mode], status);
809 set_flush_to_zero(flush_to_zero, status);
810 set_flush_inputs_to_zero(flush_to_zero, status);
813 static inline void restore_pamask(CPUMIPSState *env)
815 if (env->hflags & MIPS_HFLAG_ELPA) {
816 env->PAMask = (1ULL << env->PABITS) - 1;
817 } else {
818 env->PAMask = PAMASK_BASE;
822 static inline void cpu_get_tb_cpu_state(CPUMIPSState *env, target_ulong *pc,
823 target_ulong *cs_base, int *flags)
825 *pc = env->active_tc.PC;
826 *cs_base = 0;
827 *flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK |
828 MIPS_HFLAG_HWRENA_ULR);
831 static inline int mips_vpe_active(CPUMIPSState *env)
833 int active = 1;
835 /* Check that the VPE is enabled. */
836 if (!(env->mvp->CP0_MVPControl & (1 << CP0MVPCo_EVP))) {
837 active = 0;
839 /* Check that the VPE is activated. */
840 if (!(env->CP0_VPEConf0 & (1 << CP0VPEC0_VPA))) {
841 active = 0;
844 /* Now verify that there are active thread contexts in the VPE.
846 This assumes the CPU model will internally reschedule threads
847 if the active one goes to sleep. If there are no threads available
848 the active one will be in a sleeping state, and we can turn off
849 the entire VPE. */
850 if (!(env->active_tc.CP0_TCStatus & (1 << CP0TCSt_A))) {
851 /* TC is not activated. */
852 active = 0;
854 if (env->active_tc.CP0_TCHalt & 1) {
855 /* TC is in halt state. */
856 active = 0;
859 return active;
862 #include "exec/exec-all.h"
864 static inline void compute_hflags(CPUMIPSState *env)
866 env->hflags &= ~(MIPS_HFLAG_COP1X | MIPS_HFLAG_64 | MIPS_HFLAG_CP0 |
867 MIPS_HFLAG_F64 | MIPS_HFLAG_FPU | MIPS_HFLAG_KSU |
868 MIPS_HFLAG_AWRAP | MIPS_HFLAG_DSP | MIPS_HFLAG_DSPR2 |
869 MIPS_HFLAG_SBRI | MIPS_HFLAG_MSA | MIPS_HFLAG_FRE |
870 MIPS_HFLAG_ELPA);
871 if (!(env->CP0_Status & (1 << CP0St_EXL)) &&
872 !(env->CP0_Status & (1 << CP0St_ERL)) &&
873 !(env->hflags & MIPS_HFLAG_DM)) {
874 env->hflags |= (env->CP0_Status >> CP0St_KSU) & MIPS_HFLAG_KSU;
876 #if defined(TARGET_MIPS64)
877 if ((env->insn_flags & ISA_MIPS3) &&
878 (((env->hflags & MIPS_HFLAG_KSU) != MIPS_HFLAG_UM) ||
879 (env->CP0_Status & (1 << CP0St_PX)) ||
880 (env->CP0_Status & (1 << CP0St_UX)))) {
881 env->hflags |= MIPS_HFLAG_64;
884 if (!(env->insn_flags & ISA_MIPS3)) {
885 env->hflags |= MIPS_HFLAG_AWRAP;
886 } else if (((env->hflags & MIPS_HFLAG_KSU) == MIPS_HFLAG_UM) &&
887 !(env->CP0_Status & (1 << CP0St_UX))) {
888 env->hflags |= MIPS_HFLAG_AWRAP;
889 } else if (env->insn_flags & ISA_MIPS64R6) {
890 /* Address wrapping for Supervisor and Kernel is specified in R6 */
891 if ((((env->hflags & MIPS_HFLAG_KSU) == MIPS_HFLAG_SM) &&
892 !(env->CP0_Status & (1 << CP0St_SX))) ||
893 (((env->hflags & MIPS_HFLAG_KSU) == MIPS_HFLAG_KM) &&
894 !(env->CP0_Status & (1 << CP0St_KX)))) {
895 env->hflags |= MIPS_HFLAG_AWRAP;
898 #endif
899 if (((env->CP0_Status & (1 << CP0St_CU0)) &&
900 !(env->insn_flags & ISA_MIPS32R6)) ||
901 !(env->hflags & MIPS_HFLAG_KSU)) {
902 env->hflags |= MIPS_HFLAG_CP0;
904 if (env->CP0_Status & (1 << CP0St_CU1)) {
905 env->hflags |= MIPS_HFLAG_FPU;
907 if (env->CP0_Status & (1 << CP0St_FR)) {
908 env->hflags |= MIPS_HFLAG_F64;
910 if (((env->hflags & MIPS_HFLAG_KSU) != MIPS_HFLAG_KM) &&
911 (env->CP0_Config5 & (1 << CP0C5_SBRI))) {
912 env->hflags |= MIPS_HFLAG_SBRI;
914 if (env->insn_flags & ASE_DSPR2) {
915 /* Enables access MIPS DSP resources, now our cpu is DSP ASER2,
916 so enable to access DSPR2 resources. */
917 if (env->CP0_Status & (1 << CP0St_MX)) {
918 env->hflags |= MIPS_HFLAG_DSP | MIPS_HFLAG_DSPR2;
921 } else if (env->insn_flags & ASE_DSP) {
922 /* Enables access MIPS DSP resources, now our cpu is DSP ASE,
923 so enable to access DSP resources. */
924 if (env->CP0_Status & (1 << CP0St_MX)) {
925 env->hflags |= MIPS_HFLAG_DSP;
929 if (env->insn_flags & ISA_MIPS32R2) {
930 if (env->active_fpu.fcr0 & (1 << FCR0_F64)) {
931 env->hflags |= MIPS_HFLAG_COP1X;
933 } else if (env->insn_flags & ISA_MIPS32) {
934 if (env->hflags & MIPS_HFLAG_64) {
935 env->hflags |= MIPS_HFLAG_COP1X;
937 } else if (env->insn_flags & ISA_MIPS4) {
938 /* All supported MIPS IV CPUs use the XX (CU3) to enable
939 and disable the MIPS IV extensions to the MIPS III ISA.
940 Some other MIPS IV CPUs ignore the bit, so the check here
941 would be too restrictive for them. */
942 if (env->CP0_Status & (1U << CP0St_CU3)) {
943 env->hflags |= MIPS_HFLAG_COP1X;
946 if (env->insn_flags & ASE_MSA) {
947 if (env->CP0_Config5 & (1 << CP0C5_MSAEn)) {
948 env->hflags |= MIPS_HFLAG_MSA;
951 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
952 if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
953 env->hflags |= MIPS_HFLAG_FRE;
956 if (env->CP0_Config3 & (1 << CP0C3_LPA)) {
957 if (env->CP0_PageGrain & (1 << CP0PG_ELPA)) {
958 env->hflags |= MIPS_HFLAG_ELPA;
963 #ifndef CONFIG_USER_ONLY
964 /* Called for updates to CP0_Status. */
965 static inline void sync_c0_status(CPUMIPSState *env, CPUMIPSState *cpu, int tc)
967 int32_t tcstatus, *tcst;
968 uint32_t v = cpu->CP0_Status;
969 uint32_t cu, mx, asid, ksu;
970 uint32_t mask = ((1 << CP0TCSt_TCU3)
971 | (1 << CP0TCSt_TCU2)
972 | (1 << CP0TCSt_TCU1)
973 | (1 << CP0TCSt_TCU0)
974 | (1 << CP0TCSt_TMX)
975 | (3 << CP0TCSt_TKSU)
976 | (0xff << CP0TCSt_TASID));
978 cu = (v >> CP0St_CU0) & 0xf;
979 mx = (v >> CP0St_MX) & 0x1;
980 ksu = (v >> CP0St_KSU) & 0x3;
981 asid = env->CP0_EntryHi & 0xff;
983 tcstatus = cu << CP0TCSt_TCU0;
984 tcstatus |= mx << CP0TCSt_TMX;
985 tcstatus |= ksu << CP0TCSt_TKSU;
986 tcstatus |= asid;
988 if (tc == cpu->current_tc) {
989 tcst = &cpu->active_tc.CP0_TCStatus;
990 } else {
991 tcst = &cpu->tcs[tc].CP0_TCStatus;
994 *tcst &= ~mask;
995 *tcst |= tcstatus;
996 compute_hflags(cpu);
999 static inline void cpu_mips_store_status(CPUMIPSState *env, target_ulong val)
1001 uint32_t mask = env->CP0_Status_rw_bitmask;
1003 if (env->insn_flags & ISA_MIPS32R6) {
1004 bool has_supervisor = extract32(mask, CP0St_KSU, 2) == 0x3;
1006 if (has_supervisor && extract32(val, CP0St_KSU, 2) == 0x3) {
1007 mask &= ~(3 << CP0St_KSU);
1009 mask &= ~(((1 << CP0St_SR) | (1 << CP0St_NMI)) & val);
1012 env->CP0_Status = (env->CP0_Status & ~mask) | (val & mask);
1013 if (env->CP0_Config3 & (1 << CP0C3_MT)) {
1014 sync_c0_status(env, env, env->current_tc);
1015 } else {
1016 compute_hflags(env);
1020 static inline void cpu_mips_store_cause(CPUMIPSState *env, target_ulong val)
1022 uint32_t mask = 0x00C00300;
1023 uint32_t old = env->CP0_Cause;
1024 int i;
1026 if (env->insn_flags & ISA_MIPS32R2) {
1027 mask |= 1 << CP0Ca_DC;
1029 if (env->insn_flags & ISA_MIPS32R6) {
1030 mask &= ~((1 << CP0Ca_WP) & val);
1033 env->CP0_Cause = (env->CP0_Cause & ~mask) | (val & mask);
1035 if ((old ^ env->CP0_Cause) & (1 << CP0Ca_DC)) {
1036 if (env->CP0_Cause & (1 << CP0Ca_DC)) {
1037 cpu_mips_stop_count(env);
1038 } else {
1039 cpu_mips_start_count(env);
1043 /* Set/reset software interrupts */
1044 for (i = 0 ; i < 2 ; i++) {
1045 if ((old ^ env->CP0_Cause) & (1 << (CP0Ca_IP + i))) {
1046 cpu_mips_soft_irq(env, i, env->CP0_Cause & (1 << (CP0Ca_IP + i)));
1050 #endif
1052 static inline void QEMU_NORETURN do_raise_exception_err(CPUMIPSState *env,
1053 uint32_t exception,
1054 int error_code,
1055 uintptr_t pc)
1057 CPUState *cs = CPU(mips_env_get_cpu(env));
1059 if (exception < EXCP_SC) {
1060 qemu_log_mask(CPU_LOG_INT, "%s: %d %d\n",
1061 __func__, exception, error_code);
1063 cs->exception_index = exception;
1064 env->error_code = error_code;
1066 cpu_loop_exit_restore(cs, pc);
1069 static inline void QEMU_NORETURN do_raise_exception(CPUMIPSState *env,
1070 uint32_t exception,
1071 uintptr_t pc)
1073 do_raise_exception_err(env, exception, 0, pc);
1076 #endif /* !defined (__MIPS_CPU_H__) */