migration/rdma: Silence qemu_rdma_register_and_get_keys()
[qemu/armbru.git] / target / microblaze / cpu.h
blobe43c49d4af8ac9ab539241ae9d54de26ca82b6cc
1 /*
2 * MicroBlaze virtual CPU header
4 * Copyright (c) 2009 Edgar E. Iglesias
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #ifndef MICROBLAZE_CPU_H
21 #define MICROBLAZE_CPU_H
23 #include "cpu-qom.h"
24 #include "exec/cpu-defs.h"
25 #include "qemu/cpu-float.h"
27 /* MicroBlaze is always in-order. */
28 #define TCG_GUEST_DEFAULT_MO TCG_MO_ALL
30 typedef struct CPUArchState CPUMBState;
31 #if !defined(CONFIG_USER_ONLY)
32 #include "mmu.h"
33 #endif
35 #define EXCP_MMU 1
36 #define EXCP_IRQ 2
37 #define EXCP_SYSCALL 3 /* user-only */
38 #define EXCP_HW_BREAK 4
39 #define EXCP_HW_EXCP 5
41 /* MicroBlaze-specific interrupt pending bits. */
42 #define CPU_INTERRUPT_NMI CPU_INTERRUPT_TGT_EXT_3
44 /* Meanings of the MBCPU object's two inbound GPIO lines */
45 #define MB_CPU_IRQ 0
46 #define MB_CPU_FIR 1
48 /* Register aliases. R0 - R15 */
49 #define R_SP 1
50 #define SR_PC 0
51 #define SR_MSR 1
52 #define SR_EAR 3
53 #define SR_ESR 5
54 #define SR_FSR 7
55 #define SR_BTR 0xb
56 #define SR_EDR 0xd
58 /* MSR flags. */
59 #define MSR_BE (1<<0) /* 0x001 */
60 #define MSR_IE (1<<1) /* 0x002 */
61 #define MSR_C (1<<2) /* 0x004 */
62 #define MSR_BIP (1<<3) /* 0x008 */
63 #define MSR_FSL (1<<4) /* 0x010 */
64 #define MSR_ICE (1<<5) /* 0x020 */
65 #define MSR_DZ (1<<6) /* 0x040 */
66 #define MSR_DCE (1<<7) /* 0x080 */
67 #define MSR_EE (1<<8) /* 0x100 */
68 #define MSR_EIP (1<<9) /* 0x200 */
69 #define MSR_PVR (1<<10) /* 0x400 */
70 #define MSR_CC (1<<31)
72 /* Machine State Register (MSR) Fields */
73 #define MSR_UM (1<<11) /* User Mode */
74 #define MSR_UMS (1<<12) /* User Mode Save */
75 #define MSR_VM (1<<13) /* Virtual Mode */
76 #define MSR_VMS (1<<14) /* Virtual Mode Save */
78 #define MSR_KERNEL MSR_EE|MSR_VM
79 //#define MSR_USER MSR_KERNEL|MSR_UM|MSR_IE
80 #define MSR_KERNEL_VMS MSR_EE|MSR_VMS
81 //#define MSR_USER_VMS MSR_KERNEL_VMS|MSR_UMS|MSR_IE
83 /* Exception State Register (ESR) Fields */
84 #define ESR_DIZ (1<<11) /* Zone Protection */
85 #define ESR_W (1<<11) /* Unaligned word access */
86 #define ESR_S (1<<10) /* Store instruction */
88 #define ESR_ESS_FSL_OFFSET 5
90 #define ESR_ESS_MASK (0x7f << 5)
92 #define ESR_EC_FSL 0
93 #define ESR_EC_UNALIGNED_DATA 1
94 #define ESR_EC_ILLEGAL_OP 2
95 #define ESR_EC_INSN_BUS 3
96 #define ESR_EC_DATA_BUS 4
97 #define ESR_EC_DIVZERO 5
98 #define ESR_EC_FPU 6
99 #define ESR_EC_PRIVINSN 7
100 #define ESR_EC_STACKPROT 7 /* Same as PRIVINSN. */
101 #define ESR_EC_DATA_STORAGE 8
102 #define ESR_EC_INSN_STORAGE 9
103 #define ESR_EC_DATA_TLB 10
104 #define ESR_EC_INSN_TLB 11
105 #define ESR_EC_MASK 31
107 /* Floating Point Status Register (FSR) Bits */
108 #define FSR_IO (1<<4) /* Invalid operation */
109 #define FSR_DZ (1<<3) /* Divide-by-zero */
110 #define FSR_OF (1<<2) /* Overflow */
111 #define FSR_UF (1<<1) /* Underflow */
112 #define FSR_DO (1<<0) /* Denormalized operand error */
114 /* Version reg. */
115 /* Basic PVR mask */
116 #define PVR0_PVR_FULL_MASK 0x80000000
117 #define PVR0_USE_BARREL_MASK 0x40000000
118 #define PVR0_USE_DIV_MASK 0x20000000
119 #define PVR0_USE_HW_MUL_MASK 0x10000000
120 #define PVR0_USE_FPU_MASK 0x08000000
121 #define PVR0_USE_EXC_MASK 0x04000000
122 #define PVR0_USE_ICACHE_MASK 0x02000000
123 #define PVR0_USE_DCACHE_MASK 0x01000000
124 #define PVR0_USE_MMU_MASK 0x00800000
125 #define PVR0_USE_BTC 0x00400000
126 #define PVR0_ENDI_MASK 0x00200000
127 #define PVR0_FAULT 0x00100000
128 #define PVR0_VERSION_MASK 0x0000FF00
129 #define PVR0_USER1_MASK 0x000000FF
130 #define PVR0_SPROT_MASK 0x00000001
132 #define PVR0_VERSION_SHIFT 8
134 /* User 2 PVR mask */
135 #define PVR1_USER2_MASK 0xFFFFFFFF
137 /* Configuration PVR masks */
138 #define PVR2_D_OPB_MASK 0x80000000
139 #define PVR2_D_LMB_MASK 0x40000000
140 #define PVR2_I_OPB_MASK 0x20000000
141 #define PVR2_I_LMB_MASK 0x10000000
142 #define PVR2_INTERRUPT_IS_EDGE_MASK 0x08000000
143 #define PVR2_EDGE_IS_POSITIVE_MASK 0x04000000
144 #define PVR2_D_PLB_MASK 0x02000000 /* new */
145 #define PVR2_I_PLB_MASK 0x01000000 /* new */
146 #define PVR2_INTERCONNECT 0x00800000 /* new */
147 #define PVR2_USE_EXTEND_FSL 0x00080000 /* new */
148 #define PVR2_USE_FSL_EXC 0x00040000 /* new */
149 #define PVR2_USE_MSR_INSTR 0x00020000
150 #define PVR2_USE_PCMP_INSTR 0x00010000
151 #define PVR2_AREA_OPTIMISED 0x00008000
152 #define PVR2_USE_BARREL_MASK 0x00004000
153 #define PVR2_USE_DIV_MASK 0x00002000
154 #define PVR2_USE_HW_MUL_MASK 0x00001000
155 #define PVR2_USE_FPU_MASK 0x00000800
156 #define PVR2_USE_MUL64_MASK 0x00000400
157 #define PVR2_USE_FPU2_MASK 0x00000200 /* new */
158 #define PVR2_USE_IPLBEXC 0x00000100
159 #define PVR2_USE_DPLBEXC 0x00000080
160 #define PVR2_OPCODE_0x0_ILL_MASK 0x00000040
161 #define PVR2_UNALIGNED_EXC_MASK 0x00000020
162 #define PVR2_ILL_OPCODE_EXC_MASK 0x00000010
163 #define PVR2_IOPB_BUS_EXC_MASK 0x00000008
164 #define PVR2_DOPB_BUS_EXC_MASK 0x00000004
165 #define PVR2_DIV_ZERO_EXC_MASK 0x00000002
166 #define PVR2_FPU_EXC_MASK 0x00000001
168 /* Debug and exception PVR masks */
169 #define PVR3_DEBUG_ENABLED_MASK 0x80000000
170 #define PVR3_NUMBER_OF_PC_BRK_MASK 0x1E000000
171 #define PVR3_NUMBER_OF_RD_ADDR_BRK_MASK 0x00380000
172 #define PVR3_NUMBER_OF_WR_ADDR_BRK_MASK 0x0000E000
173 #define PVR3_FSL_LINKS_MASK 0x00000380
175 /* ICache config PVR masks */
176 #define PVR4_USE_ICACHE_MASK 0x80000000
177 #define PVR4_ICACHE_ADDR_TAG_BITS_MASK 0x7C000000
178 #define PVR4_ICACHE_USE_FSL_MASK 0x02000000
179 #define PVR4_ICACHE_ALLOW_WR_MASK 0x01000000
180 #define PVR4_ICACHE_LINE_LEN_MASK 0x00E00000
181 #define PVR4_ICACHE_BYTE_SIZE_MASK 0x001F0000
183 /* DCache config PVR masks */
184 #define PVR5_USE_DCACHE_MASK 0x80000000
185 #define PVR5_DCACHE_ADDR_TAG_BITS_MASK 0x7C000000
186 #define PVR5_DCACHE_USE_FSL_MASK 0x02000000
187 #define PVR5_DCACHE_ALLOW_WR_MASK 0x01000000
188 #define PVR5_DCACHE_LINE_LEN_MASK 0x00E00000
189 #define PVR5_DCACHE_BYTE_SIZE_MASK 0x001F0000
190 #define PVR5_DCACHE_WRITEBACK_MASK 0x00004000
192 /* ICache base address PVR mask */
193 #define PVR6_ICACHE_BASEADDR_MASK 0xFFFFFFFF
195 /* ICache high address PVR mask */
196 #define PVR7_ICACHE_HIGHADDR_MASK 0xFFFFFFFF
198 /* DCache base address PVR mask */
199 #define PVR8_DCACHE_BASEADDR_MASK 0xFFFFFFFF
201 /* DCache high address PVR mask */
202 #define PVR9_DCACHE_HIGHADDR_MASK 0xFFFFFFFF
204 /* Target family PVR mask */
205 #define PVR10_TARGET_FAMILY_MASK 0xFF000000
206 #define PVR10_ASIZE_SHIFT 18
208 /* MMU description */
209 #define PVR11_USE_MMU 0xC0000000
210 #define PVR11_MMU_ITLB_SIZE 0x38000000
211 #define PVR11_MMU_DTLB_SIZE 0x07000000
212 #define PVR11_MMU_TLB_ACCESS 0x00C00000
213 #define PVR11_MMU_ZONES 0x003E0000
214 /* MSR Reset value PVR mask */
215 #define PVR11_MSR_RESET_VALUE_MASK 0x000007FF
217 #define C_PVR_NONE 0
218 #define C_PVR_BASIC 1
219 #define C_PVR_FULL 2
221 /* CPU flags. */
223 /* Condition codes. */
224 #define CC_GE 5
225 #define CC_GT 4
226 #define CC_LE 3
227 #define CC_LT 2
228 #define CC_NE 1
229 #define CC_EQ 0
231 #define STREAM_EXCEPTION (1 << 0)
232 #define STREAM_ATOMIC (1 << 1)
233 #define STREAM_TEST (1 << 2)
234 #define STREAM_CONTROL (1 << 3)
235 #define STREAM_NONBLOCK (1 << 4)
237 #define TARGET_INSN_START_EXTRA_WORDS 1
239 /* use-non-secure property masks */
240 #define USE_NON_SECURE_M_AXI_DP_MASK 0x1
241 #define USE_NON_SECURE_M_AXI_IP_MASK 0x2
242 #define USE_NON_SECURE_M_AXI_DC_MASK 0x4
243 #define USE_NON_SECURE_M_AXI_IC_MASK 0x8
245 struct CPUArchState {
246 uint32_t bvalue; /* TCG temporary, only valid during a TB */
247 uint32_t btarget; /* Full resolved branch destination */
249 uint32_t imm;
250 uint32_t regs[32];
251 uint32_t pc;
252 uint32_t msr; /* All bits of MSR except MSR[C] and MSR[CC] */
253 uint32_t msr_c; /* MSR[C], in low bit; other bits must be 0 */
254 target_ulong ear;
255 uint32_t esr;
256 uint32_t fsr;
257 uint32_t btr;
258 uint32_t edr;
259 float_status fp_status;
260 /* Stack protectors. Yes, it's a hw feature. */
261 uint32_t slr, shr;
263 /* lwx/swx reserved address */
264 #define RES_ADDR_NONE 0xffffffff /* Use 0xffffffff to indicate no reservation */
265 target_ulong res_addr;
266 uint32_t res_val;
268 /* Internal flags. */
269 #define IMM_FLAG (1 << 0)
270 #define BIMM_FLAG (1 << 1)
271 #define ESR_ESS_FLAG (1 << 2) /* indicates ESR_ESS_MASK is present */
272 /* MSR_EE (1 << 8) -- these 3 are not in iflags but tb_flags */
273 /* MSR_UM (1 << 11) */
274 /* MSR_VM (1 << 13) */
275 /* ESR_ESS_MASK [11:5] -- unwind into iflags for unaligned excp */
276 #define D_FLAG (1 << 12) /* Bit in ESR. */
277 #define DRTI_FLAG (1 << 16)
278 #define DRTE_FLAG (1 << 17)
279 #define DRTB_FLAG (1 << 18)
281 /* TB dependent CPUMBState. */
282 #define IFLAGS_TB_MASK (D_FLAG | BIMM_FLAG | IMM_FLAG | \
283 DRTI_FLAG | DRTE_FLAG | DRTB_FLAG)
284 #define MSR_TB_MASK (MSR_UM | MSR_VM | MSR_EE)
286 uint32_t iflags;
288 #if !defined(CONFIG_USER_ONLY)
289 /* Unified MMU. */
290 MicroBlazeMMU mmu;
291 #endif
293 /* Fields up to this point are cleared by a CPU reset */
294 struct {} end_reset_fields;
296 /* These fields are preserved on reset. */
300 * Microblaze Configuration Settings
302 * Note that the structure is sorted by type and size to minimize holes.
304 typedef struct {
305 char *version;
307 uint64_t addr_mask;
309 uint32_t base_vectors;
310 uint32_t pvr_user2;
311 uint32_t pvr_regs[13];
313 uint8_t addr_size;
314 uint8_t use_fpu;
315 uint8_t use_hw_mul;
316 uint8_t pvr_user1;
317 uint8_t pvr;
318 uint8_t mmu;
319 uint8_t mmu_tlb_access;
320 uint8_t mmu_zones;
322 bool stackprot;
323 bool use_barrel;
324 bool use_div;
325 bool use_msr_instr;
326 bool use_pcmp_instr;
327 bool use_mmu;
328 uint8_t use_non_secure;
329 bool dcache_writeback;
330 bool endi;
331 bool dopb_bus_exception;
332 bool iopb_bus_exception;
333 bool illegal_opcode_exception;
334 bool opcode_0_illegal;
335 bool div_zero_exception;
336 bool unaligned_exceptions;
337 } MicroBlazeCPUConfig;
340 * MicroBlazeCPU:
341 * @env: #CPUMBState
343 * A MicroBlaze CPU.
345 struct ArchCPU {
346 /*< private >*/
347 CPUState parent_obj;
348 /*< public >*/
350 CPUMBState env;
352 bool ns_axi_dp;
353 bool ns_axi_ip;
354 bool ns_axi_dc;
355 bool ns_axi_ic;
357 MicroBlazeCPUConfig cfg;
361 #ifndef CONFIG_USER_ONLY
362 void mb_cpu_do_interrupt(CPUState *cs);
363 bool mb_cpu_exec_interrupt(CPUState *cs, int int_req);
364 hwaddr mb_cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr,
365 MemTxAttrs *attrs);
366 #endif /* !CONFIG_USER_ONLY */
367 G_NORETURN void mb_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
368 MMUAccessType access_type,
369 int mmu_idx, uintptr_t retaddr);
370 void mb_cpu_dump_state(CPUState *cpu, FILE *f, int flags);
371 int mb_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
372 int mb_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
373 int mb_cpu_gdb_read_stack_protect(CPUArchState *cpu, GByteArray *buf, int reg);
374 int mb_cpu_gdb_write_stack_protect(CPUArchState *cpu, uint8_t *buf, int reg);
376 static inline uint32_t mb_cpu_read_msr(const CPUMBState *env)
378 /* Replicate MSR[C] to MSR[CC]. */
379 return env->msr | (env->msr_c * (MSR_C | MSR_CC));
382 static inline void mb_cpu_write_msr(CPUMBState *env, uint32_t val)
384 env->msr_c = (val >> 2) & 1;
386 * Clear both MSR[C] and MSR[CC] from the saved copy.
387 * MSR_PVR is not writable and is always clear.
389 env->msr = val & ~(MSR_C | MSR_CC | MSR_PVR);
392 void mb_tcg_init(void);
394 #define CPU_RESOLVING_TYPE TYPE_MICROBLAZE_CPU
396 /* MMU modes definitions */
397 #define MMU_NOMMU_IDX 0
398 #define MMU_KERNEL_IDX 1
399 #define MMU_USER_IDX 2
400 /* See NB_MMU_MODES in cpu-defs.h. */
402 #include "exec/cpu-all.h"
404 /* Ensure there is no overlap between the two masks. */
405 QEMU_BUILD_BUG_ON(MSR_TB_MASK & IFLAGS_TB_MASK);
407 static inline void cpu_get_tb_cpu_state(CPUMBState *env, vaddr *pc,
408 uint64_t *cs_base, uint32_t *flags)
410 *pc = env->pc;
411 *flags = (env->iflags & IFLAGS_TB_MASK) | (env->msr & MSR_TB_MASK);
412 *cs_base = (*flags & IMM_FLAG ? env->imm : 0);
415 #if !defined(CONFIG_USER_ONLY)
416 bool mb_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
417 MMUAccessType access_type, int mmu_idx,
418 bool probe, uintptr_t retaddr);
420 void mb_cpu_transaction_failed(CPUState *cs, hwaddr physaddr, vaddr addr,
421 unsigned size, MMUAccessType access_type,
422 int mmu_idx, MemTxAttrs attrs,
423 MemTxResult response, uintptr_t retaddr);
424 #endif
426 static inline int cpu_mmu_index(CPUMBState *env, bool ifetch)
428 MicroBlazeCPU *cpu = env_archcpu(env);
430 /* Are we in nommu mode?. */
431 if (!(env->msr & MSR_VM) || !cpu->cfg.use_mmu) {
432 return MMU_NOMMU_IDX;
435 if (env->msr & MSR_UM) {
436 return MMU_USER_IDX;
438 return MMU_KERNEL_IDX;
441 #ifndef CONFIG_USER_ONLY
442 extern const VMStateDescription vmstate_mb_cpu;
443 #endif
445 #endif