vhost: ensure vhost_ops are set before calling iotlb callback
[qemu/kevin.git] / target / sh4 / cpu.h
blobffb91687b82185db05a3ab4aac788766bf674cc9
1 /*
2 * SH4 emulation
4 * Copyright (c) 2005 Samuel Tardieu
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #ifndef SH4_CPU_H
21 #define SH4_CPU_H
23 #include "qemu-common.h"
24 #include "cpu-qom.h"
26 #define TARGET_LONG_BITS 32
27 #define ALIGNED_ONLY
29 /* CPU Subtypes */
30 #define SH_CPU_SH7750 (1 << 0)
31 #define SH_CPU_SH7750S (1 << 1)
32 #define SH_CPU_SH7750R (1 << 2)
33 #define SH_CPU_SH7751 (1 << 3)
34 #define SH_CPU_SH7751R (1 << 4)
35 #define SH_CPU_SH7785 (1 << 5)
36 #define SH_CPU_SH7750_ALL (SH_CPU_SH7750 | SH_CPU_SH7750S | SH_CPU_SH7750R)
37 #define SH_CPU_SH7751_ALL (SH_CPU_SH7751 | SH_CPU_SH7751R)
39 #define CPUArchState struct CPUSH4State
41 #include "exec/cpu-defs.h"
43 #include "fpu/softfloat.h"
45 #define TARGET_PAGE_BITS 12 /* 4k XXXXX */
47 #define TARGET_PHYS_ADDR_SPACE_BITS 32
48 #define TARGET_VIRT_ADDR_SPACE_BITS 32
50 #define SR_MD 30
51 #define SR_RB 29
52 #define SR_BL 28
53 #define SR_FD 15
54 #define SR_M 9
55 #define SR_Q 8
56 #define SR_I3 7
57 #define SR_I2 6
58 #define SR_I1 5
59 #define SR_I0 4
60 #define SR_S 1
61 #define SR_T 0
63 #define FPSCR_MASK (0x003fffff)
64 #define FPSCR_FR (1 << 21)
65 #define FPSCR_SZ (1 << 20)
66 #define FPSCR_PR (1 << 19)
67 #define FPSCR_DN (1 << 18)
68 #define FPSCR_CAUSE_MASK (0x3f << 12)
69 #define FPSCR_CAUSE_SHIFT (12)
70 #define FPSCR_CAUSE_E (1 << 17)
71 #define FPSCR_CAUSE_V (1 << 16)
72 #define FPSCR_CAUSE_Z (1 << 15)
73 #define FPSCR_CAUSE_O (1 << 14)
74 #define FPSCR_CAUSE_U (1 << 13)
75 #define FPSCR_CAUSE_I (1 << 12)
76 #define FPSCR_ENABLE_MASK (0x1f << 7)
77 #define FPSCR_ENABLE_SHIFT (7)
78 #define FPSCR_ENABLE_V (1 << 11)
79 #define FPSCR_ENABLE_Z (1 << 10)
80 #define FPSCR_ENABLE_O (1 << 9)
81 #define FPSCR_ENABLE_U (1 << 8)
82 #define FPSCR_ENABLE_I (1 << 7)
83 #define FPSCR_FLAG_MASK (0x1f << 2)
84 #define FPSCR_FLAG_SHIFT (2)
85 #define FPSCR_FLAG_V (1 << 6)
86 #define FPSCR_FLAG_Z (1 << 5)
87 #define FPSCR_FLAG_O (1 << 4)
88 #define FPSCR_FLAG_U (1 << 3)
89 #define FPSCR_FLAG_I (1 << 2)
90 #define FPSCR_RM_MASK (0x03 << 0)
91 #define FPSCR_RM_NEAREST (0 << 0)
92 #define FPSCR_RM_ZERO (1 << 0)
94 #define DELAY_SLOT_MASK 0x7
95 #define DELAY_SLOT (1 << 0)
96 #define DELAY_SLOT_CONDITIONAL (1 << 1)
97 #define DELAY_SLOT_RTE (1 << 2)
99 typedef struct tlb_t {
100 uint32_t vpn; /* virtual page number */
101 uint32_t ppn; /* physical page number */
102 uint32_t size; /* mapped page size in bytes */
103 uint8_t asid; /* address space identifier */
104 uint8_t v:1; /* validity */
105 uint8_t sz:2; /* page size */
106 uint8_t sh:1; /* share status */
107 uint8_t c:1; /* cacheability */
108 uint8_t pr:2; /* protection key */
109 uint8_t d:1; /* dirty */
110 uint8_t wt:1; /* write through */
111 uint8_t sa:3; /* space attribute (PCMCIA) */
112 uint8_t tc:1; /* timing control */
113 } tlb_t;
115 #define UTLB_SIZE 64
116 #define ITLB_SIZE 4
118 #define NB_MMU_MODES 2
119 #define TARGET_INSN_START_EXTRA_WORDS 1
121 enum sh_features {
122 SH_FEATURE_SH4A = 1,
123 SH_FEATURE_BCR3_AND_BCR4 = 2,
126 typedef struct memory_content {
127 uint32_t address;
128 uint32_t value;
129 struct memory_content *next;
130 } memory_content;
132 typedef struct CPUSH4State {
133 uint32_t flags; /* general execution flags */
134 uint32_t gregs[24]; /* general registers */
135 float32 fregs[32]; /* floating point registers */
136 uint32_t sr; /* status register (with T split out) */
137 uint32_t sr_m; /* M bit of status register */
138 uint32_t sr_q; /* Q bit of status register */
139 uint32_t sr_t; /* T bit of status register */
140 uint32_t ssr; /* saved status register */
141 uint32_t spc; /* saved program counter */
142 uint32_t gbr; /* global base register */
143 uint32_t vbr; /* vector base register */
144 uint32_t sgr; /* saved global register 15 */
145 uint32_t dbr; /* debug base register */
146 uint32_t pc; /* program counter */
147 uint32_t delayed_pc; /* target of delayed branch */
148 uint32_t delayed_cond; /* condition of delayed branch */
149 uint32_t mach; /* multiply and accumulate high */
150 uint32_t macl; /* multiply and accumulate low */
151 uint32_t pr; /* procedure register */
152 uint32_t fpscr; /* floating point status/control register */
153 uint32_t fpul; /* floating point communication register */
155 /* float point status register */
156 float_status fp_status;
158 /* Those belong to the specific unit (SH7750) but are handled here */
159 uint32_t mmucr; /* MMU control register */
160 uint32_t pteh; /* page table entry high register */
161 uint32_t ptel; /* page table entry low register */
162 uint32_t ptea; /* page table entry assistance register */
163 uint32_t ttb; /* tranlation table base register */
164 uint32_t tea; /* TLB exception address register */
165 uint32_t tra; /* TRAPA exception register */
166 uint32_t expevt; /* exception event register */
167 uint32_t intevt; /* interrupt event register */
169 tlb_t itlb[ITLB_SIZE]; /* instruction translation table */
170 tlb_t utlb[UTLB_SIZE]; /* unified translation table */
172 uint32_t ldst;
174 /* Fields up to this point are cleared by a CPU reset */
175 struct {} end_reset_fields;
177 CPU_COMMON
179 /* Fields from here on are preserved over CPU reset. */
180 int id; /* CPU model */
182 /* The features that we should emulate. See sh_features above. */
183 uint32_t features;
185 void *intc_handle;
186 int in_sleep; /* SR_BL ignored during sleep */
187 memory_content *movcal_backup;
188 memory_content **movcal_backup_tail;
189 } CPUSH4State;
192 * SuperHCPU:
193 * @env: #CPUSH4State
195 * A SuperH CPU.
197 struct SuperHCPU {
198 /*< private >*/
199 CPUState parent_obj;
200 /*< public >*/
202 CPUSH4State env;
205 static inline SuperHCPU *sh_env_get_cpu(CPUSH4State *env)
207 return container_of(env, SuperHCPU, env);
210 #define ENV_GET_CPU(e) CPU(sh_env_get_cpu(e))
212 #define ENV_OFFSET offsetof(SuperHCPU, env)
214 void superh_cpu_do_interrupt(CPUState *cpu);
215 bool superh_cpu_exec_interrupt(CPUState *cpu, int int_req);
216 void superh_cpu_dump_state(CPUState *cpu, FILE *f,
217 fprintf_function cpu_fprintf, int flags);
218 hwaddr superh_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
219 int superh_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg);
220 int superh_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
221 void superh_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
222 MMUAccessType access_type,
223 int mmu_idx, uintptr_t retaddr);
225 void sh4_translate_init(void);
226 SuperHCPU *cpu_sh4_init(const char *cpu_model);
227 int cpu_sh4_signal_handler(int host_signum, void *pinfo,
228 void *puc);
229 int superh_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw,
230 int mmu_idx);
232 void sh4_cpu_list(FILE *f, fprintf_function cpu_fprintf);
233 #if !defined(CONFIG_USER_ONLY)
234 void cpu_sh4_invalidate_tlb(CPUSH4State *s);
235 uint32_t cpu_sh4_read_mmaped_itlb_addr(CPUSH4State *s,
236 hwaddr addr);
237 void cpu_sh4_write_mmaped_itlb_addr(CPUSH4State *s, hwaddr addr,
238 uint32_t mem_value);
239 uint32_t cpu_sh4_read_mmaped_itlb_data(CPUSH4State *s,
240 hwaddr addr);
241 void cpu_sh4_write_mmaped_itlb_data(CPUSH4State *s, hwaddr addr,
242 uint32_t mem_value);
243 uint32_t cpu_sh4_read_mmaped_utlb_addr(CPUSH4State *s,
244 hwaddr addr);
245 void cpu_sh4_write_mmaped_utlb_addr(CPUSH4State *s, hwaddr addr,
246 uint32_t mem_value);
247 uint32_t cpu_sh4_read_mmaped_utlb_data(CPUSH4State *s,
248 hwaddr addr);
249 void cpu_sh4_write_mmaped_utlb_data(CPUSH4State *s, hwaddr addr,
250 uint32_t mem_value);
251 #endif
253 int cpu_sh4_is_cached(CPUSH4State * env, target_ulong addr);
255 void cpu_load_tlb(CPUSH4State * env);
257 #define cpu_init(cpu_model) CPU(cpu_sh4_init(cpu_model))
259 #define cpu_signal_handler cpu_sh4_signal_handler
260 #define cpu_list sh4_cpu_list
262 /* MMU modes definitions */
263 #define MMU_MODE0_SUFFIX _kernel
264 #define MMU_MODE1_SUFFIX _user
265 #define MMU_USER_IDX 1
266 static inline int cpu_mmu_index (CPUSH4State *env, bool ifetch)
268 /* The instruction in a RTE delay slot is fetched in privileged
269 mode, but executed in user mode. */
270 if (ifetch && (env->flags & DELAY_SLOT_RTE)) {
271 return 0;
272 } else {
273 return (env->sr & (1u << SR_MD)) == 0 ? 1 : 0;
277 #include "exec/cpu-all.h"
279 /* Memory access type */
280 enum {
281 /* Privilege */
282 ACCESS_PRIV = 0x01,
283 /* Direction */
284 ACCESS_WRITE = 0x02,
285 /* Type of instruction */
286 ACCESS_CODE = 0x10,
287 ACCESS_INT = 0x20
290 /* MMU control register */
291 #define MMUCR 0x1F000010
292 #define MMUCR_AT (1<<0)
293 #define MMUCR_TI (1<<2)
294 #define MMUCR_SV (1<<8)
295 #define MMUCR_URC_BITS (6)
296 #define MMUCR_URC_OFFSET (10)
297 #define MMUCR_URC_SIZE (1 << MMUCR_URC_BITS)
298 #define MMUCR_URC_MASK (((MMUCR_URC_SIZE) - 1) << MMUCR_URC_OFFSET)
299 static inline int cpu_mmucr_urc (uint32_t mmucr)
301 return ((mmucr & MMUCR_URC_MASK) >> MMUCR_URC_OFFSET);
304 /* PTEH : Page Translation Entry High register */
305 #define PTEH_ASID_BITS (8)
306 #define PTEH_ASID_SIZE (1 << PTEH_ASID_BITS)
307 #define PTEH_ASID_MASK (PTEH_ASID_SIZE - 1)
308 #define cpu_pteh_asid(pteh) ((pteh) & PTEH_ASID_MASK)
309 #define PTEH_VPN_BITS (22)
310 #define PTEH_VPN_OFFSET (10)
311 #define PTEH_VPN_SIZE (1 << PTEH_VPN_BITS)
312 #define PTEH_VPN_MASK (((PTEH_VPN_SIZE) - 1) << PTEH_VPN_OFFSET)
313 static inline int cpu_pteh_vpn (uint32_t pteh)
315 return ((pteh & PTEH_VPN_MASK) >> PTEH_VPN_OFFSET);
318 /* PTEL : Page Translation Entry Low register */
319 #define PTEL_V (1 << 8)
320 #define cpu_ptel_v(ptel) (((ptel) & PTEL_V) >> 8)
321 #define PTEL_C (1 << 3)
322 #define cpu_ptel_c(ptel) (((ptel) & PTEL_C) >> 3)
323 #define PTEL_D (1 << 2)
324 #define cpu_ptel_d(ptel) (((ptel) & PTEL_D) >> 2)
325 #define PTEL_SH (1 << 1)
326 #define cpu_ptel_sh(ptel)(((ptel) & PTEL_SH) >> 1)
327 #define PTEL_WT (1 << 0)
328 #define cpu_ptel_wt(ptel) ((ptel) & PTEL_WT)
330 #define PTEL_SZ_HIGH_OFFSET (7)
331 #define PTEL_SZ_HIGH (1 << PTEL_SZ_HIGH_OFFSET)
332 #define PTEL_SZ_LOW_OFFSET (4)
333 #define PTEL_SZ_LOW (1 << PTEL_SZ_LOW_OFFSET)
334 static inline int cpu_ptel_sz (uint32_t ptel)
336 int sz;
337 sz = (ptel & PTEL_SZ_HIGH) >> PTEL_SZ_HIGH_OFFSET;
338 sz <<= 1;
339 sz |= (ptel & PTEL_SZ_LOW) >> PTEL_SZ_LOW_OFFSET;
340 return sz;
343 #define PTEL_PPN_BITS (19)
344 #define PTEL_PPN_OFFSET (10)
345 #define PTEL_PPN_SIZE (1 << PTEL_PPN_BITS)
346 #define PTEL_PPN_MASK (((PTEL_PPN_SIZE) - 1) << PTEL_PPN_OFFSET)
347 static inline int cpu_ptel_ppn (uint32_t ptel)
349 return ((ptel & PTEL_PPN_MASK) >> PTEL_PPN_OFFSET);
352 #define PTEL_PR_BITS (2)
353 #define PTEL_PR_OFFSET (5)
354 #define PTEL_PR_SIZE (1 << PTEL_PR_BITS)
355 #define PTEL_PR_MASK (((PTEL_PR_SIZE) - 1) << PTEL_PR_OFFSET)
356 static inline int cpu_ptel_pr (uint32_t ptel)
358 return ((ptel & PTEL_PR_MASK) >> PTEL_PR_OFFSET);
361 /* PTEA : Page Translation Entry Assistance register */
362 #define PTEA_SA_BITS (3)
363 #define PTEA_SA_SIZE (1 << PTEA_SA_BITS)
364 #define PTEA_SA_MASK (PTEA_SA_SIZE - 1)
365 #define cpu_ptea_sa(ptea) ((ptea) & PTEA_SA_MASK)
366 #define PTEA_TC (1 << 3)
367 #define cpu_ptea_tc(ptea) (((ptea) & PTEA_TC) >> 3)
369 #define TB_FLAG_PENDING_MOVCA (1 << 4)
371 static inline target_ulong cpu_read_sr(CPUSH4State *env)
373 return env->sr | (env->sr_m << SR_M) |
374 (env->sr_q << SR_Q) |
375 (env->sr_t << SR_T);
378 static inline void cpu_write_sr(CPUSH4State *env, target_ulong sr)
380 env->sr_m = (sr >> SR_M) & 1;
381 env->sr_q = (sr >> SR_Q) & 1;
382 env->sr_t = (sr >> SR_T) & 1;
383 env->sr = sr & ~((1u << SR_M) | (1u << SR_Q) | (1u << SR_T));
386 static inline void cpu_get_tb_cpu_state(CPUSH4State *env, target_ulong *pc,
387 target_ulong *cs_base, uint32_t *flags)
389 *pc = env->pc;
390 *cs_base = 0;
391 *flags = (env->flags & DELAY_SLOT_MASK) /* Bits 0- 2 */
392 | (env->fpscr & (FPSCR_FR | FPSCR_SZ | FPSCR_PR)) /* Bits 19-21 */
393 | (env->sr & ((1u << SR_MD) | (1u << SR_RB))) /* Bits 29-30 */
394 | (env->sr & (1u << SR_FD)) /* Bit 15 */
395 | (env->movcal_backup ? TB_FLAG_PENDING_MOVCA : 0); /* Bit 4 */
398 #endif /* SH4_CPU_H */