usb-host: wire up timer for windows
[qemu/kevin.git] / target / microblaze / helper.c
blob20dbd673136e42e8ebf588077482068898e27987
1 /*
2 * MicroBlaze helper routines.
4 * Copyright (c) 2009 Edgar E. Iglesias <edgar.iglesias@gmail.com>
5 * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "qemu/host-utils.h"
25 #include "exec/log.h"
27 #if defined(CONFIG_USER_ONLY)
29 void mb_cpu_do_interrupt(CPUState *cs)
31 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
32 CPUMBState *env = &cpu->env;
34 cs->exception_index = -1;
35 env->res_addr = RES_ADDR_NONE;
36 env->regs[14] = env->pc;
39 bool mb_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
40 MMUAccessType access_type, int mmu_idx,
41 bool probe, uintptr_t retaddr)
43 cs->exception_index = 0xaa;
44 cpu_loop_exit_restore(cs, retaddr);
47 #else /* !CONFIG_USER_ONLY */
49 static bool mb_cpu_access_is_secure(MicroBlazeCPU *cpu,
50 MMUAccessType access_type)
52 if (access_type == MMU_INST_FETCH) {
53 return !cpu->ns_axi_ip;
54 } else {
55 return !cpu->ns_axi_dp;
59 bool mb_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
60 MMUAccessType access_type, int mmu_idx,
61 bool probe, uintptr_t retaddr)
63 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
64 CPUMBState *env = &cpu->env;
65 MicroBlazeMMULookup lu;
66 unsigned int hit;
67 int prot;
68 MemTxAttrs attrs = {};
70 attrs.secure = mb_cpu_access_is_secure(cpu, access_type);
72 if (mmu_idx == MMU_NOMMU_IDX) {
73 /* MMU disabled or not available. */
74 address &= TARGET_PAGE_MASK;
75 prot = PAGE_BITS;
76 tlb_set_page_with_attrs(cs, address, address, attrs, prot, mmu_idx,
77 TARGET_PAGE_SIZE);
78 return true;
81 hit = mmu_translate(cpu, &lu, address, access_type, mmu_idx);
82 if (likely(hit)) {
83 uint32_t vaddr = address & TARGET_PAGE_MASK;
84 uint32_t paddr = lu.paddr + vaddr - lu.vaddr;
86 qemu_log_mask(CPU_LOG_MMU, "MMU map mmu=%d v=%x p=%x prot=%x\n",
87 mmu_idx, vaddr, paddr, lu.prot);
88 tlb_set_page_with_attrs(cs, vaddr, paddr, attrs, lu.prot, mmu_idx,
89 TARGET_PAGE_SIZE);
90 return true;
93 /* TLB miss. */
94 if (probe) {
95 return false;
98 qemu_log_mask(CPU_LOG_MMU, "mmu=%d miss v=%" VADDR_PRIx "\n",
99 mmu_idx, address);
101 env->ear = address;
102 switch (lu.err) {
103 case ERR_PROT:
104 env->esr = access_type == MMU_INST_FETCH ? 17 : 16;
105 env->esr |= (access_type == MMU_DATA_STORE) << 10;
106 break;
107 case ERR_MISS:
108 env->esr = access_type == MMU_INST_FETCH ? 19 : 18;
109 env->esr |= (access_type == MMU_DATA_STORE) << 10;
110 break;
111 default:
112 abort();
115 if (cs->exception_index == EXCP_MMU) {
116 cpu_abort(cs, "recursive faults\n");
119 /* TLB miss. */
120 cs->exception_index = EXCP_MMU;
121 cpu_loop_exit_restore(cs, retaddr);
124 void mb_cpu_do_interrupt(CPUState *cs)
126 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
127 CPUMBState *env = &cpu->env;
128 uint32_t t, msr = mb_cpu_read_msr(env);
129 bool set_esr;
131 /* IMM flag cannot propagate across a branch and into the dslot. */
132 assert((env->iflags & (D_FLAG | IMM_FLAG)) != (D_FLAG | IMM_FLAG));
133 /* BIMM flag cannot be set without D_FLAG. */
134 assert((env->iflags & (D_FLAG | BIMM_FLAG)) != BIMM_FLAG);
135 /* RTI flags are private to translate. */
136 assert(!(env->iflags & (DRTI_FLAG | DRTE_FLAG | DRTB_FLAG)));
138 switch (cs->exception_index) {
139 case EXCP_HW_EXCP:
140 if (!(cpu->cfg.pvr_regs[0] & PVR0_USE_EXC_MASK)) {
141 qemu_log_mask(LOG_GUEST_ERROR,
142 "Exception raised on system without exceptions!\n");
143 return;
146 qemu_log_mask(CPU_LOG_INT,
147 "INT: HWE at pc=%08x msr=%08x iflags=%x\n",
148 env->pc, msr, env->iflags);
150 /* Exception breaks branch + dslot sequence? */
151 set_esr = true;
152 env->esr &= ~D_FLAG;
153 if (env->iflags & D_FLAG) {
154 env->esr |= D_FLAG;
155 env->btr = env->btarget;
158 /* Exception in progress. */
159 msr |= MSR_EIP;
160 env->regs[17] = env->pc + 4;
161 env->pc = cpu->cfg.base_vectors + 0x20;
162 break;
164 case EXCP_MMU:
165 qemu_log_mask(CPU_LOG_INT,
166 "INT: MMU at pc=%08x msr=%08x "
167 "ear=%" PRIx64 " iflags=%x\n",
168 env->pc, msr, env->ear, env->iflags);
170 /* Exception breaks branch + dslot sequence? */
171 set_esr = true;
172 env->esr &= ~D_FLAG;
173 if (env->iflags & D_FLAG) {
174 env->esr |= D_FLAG;
175 env->btr = env->btarget;
176 /* Reexecute the branch. */
177 env->regs[17] = env->pc - (env->iflags & BIMM_FLAG ? 8 : 4);
178 } else if (env->iflags & IMM_FLAG) {
179 /* Reexecute the imm. */
180 env->regs[17] = env->pc - 4;
181 } else {
182 env->regs[17] = env->pc;
185 /* Exception in progress. */
186 msr |= MSR_EIP;
187 env->pc = cpu->cfg.base_vectors + 0x20;
188 break;
190 case EXCP_IRQ:
191 assert(!(msr & (MSR_EIP | MSR_BIP)));
192 assert(msr & MSR_IE);
193 assert(!(env->iflags & (D_FLAG | IMM_FLAG)));
195 qemu_log_mask(CPU_LOG_INT,
196 "INT: DEV at pc=%08x msr=%08x iflags=%x\n",
197 env->pc, msr, env->iflags);
198 set_esr = false;
200 /* Disable interrupts. */
201 msr &= ~MSR_IE;
202 env->regs[14] = env->pc;
203 env->pc = cpu->cfg.base_vectors + 0x10;
204 break;
206 case EXCP_HW_BREAK:
207 assert(!(env->iflags & (D_FLAG | IMM_FLAG)));
209 qemu_log_mask(CPU_LOG_INT,
210 "INT: BRK at pc=%08x msr=%08x iflags=%x\n",
211 env->pc, msr, env->iflags);
212 set_esr = false;
214 /* Break in progress. */
215 msr |= MSR_BIP;
216 env->regs[16] = env->pc;
217 env->pc = cpu->cfg.base_vectors + 0x18;
218 break;
220 default:
221 cpu_abort(cs, "unhandled exception type=%d\n", cs->exception_index);
222 /* not reached */
225 /* Save previous mode, disable mmu, disable user-mode. */
226 t = (msr & (MSR_VM | MSR_UM)) << 1;
227 msr &= ~(MSR_VMS | MSR_UMS | MSR_VM | MSR_UM);
228 msr |= t;
229 mb_cpu_write_msr(env, msr);
231 env->res_addr = RES_ADDR_NONE;
232 env->iflags = 0;
234 if (!set_esr) {
235 qemu_log_mask(CPU_LOG_INT,
236 " to pc=%08x msr=%08x\n", env->pc, msr);
237 } else if (env->esr & D_FLAG) {
238 qemu_log_mask(CPU_LOG_INT,
239 " to pc=%08x msr=%08x esr=%04x btr=%08x\n",
240 env->pc, msr, env->esr, env->btr);
241 } else {
242 qemu_log_mask(CPU_LOG_INT,
243 " to pc=%08x msr=%08x esr=%04x\n",
244 env->pc, msr, env->esr);
248 hwaddr mb_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
249 MemTxAttrs *attrs)
251 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
252 CPUMBState *env = &cpu->env;
253 target_ulong vaddr, paddr = 0;
254 MicroBlazeMMULookup lu;
255 int mmu_idx = cpu_mmu_index(env, false);
256 unsigned int hit;
258 /* Caller doesn't initialize */
259 *attrs = (MemTxAttrs) {};
260 attrs->secure = mb_cpu_access_is_secure(cpu, MMU_DATA_LOAD);
262 if (mmu_idx != MMU_NOMMU_IDX) {
263 hit = mmu_translate(cpu, &lu, addr, 0, 0);
264 if (hit) {
265 vaddr = addr & TARGET_PAGE_MASK;
266 paddr = lu.paddr + vaddr - lu.vaddr;
267 } else
268 paddr = 0; /* ???. */
269 } else
270 paddr = addr & TARGET_PAGE_MASK;
272 return paddr;
274 #endif
276 bool mb_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
278 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
279 CPUMBState *env = &cpu->env;
281 if ((interrupt_request & CPU_INTERRUPT_HARD)
282 && (env->msr & MSR_IE)
283 && !(env->msr & (MSR_EIP | MSR_BIP))
284 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
285 cs->exception_index = EXCP_IRQ;
286 mb_cpu_do_interrupt(cs);
287 return true;
289 return false;
292 void mb_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
293 MMUAccessType access_type,
294 int mmu_idx, uintptr_t retaddr)
296 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
297 uint32_t esr, iflags;
299 /* Recover the pc and iflags from the corresponding insn_start. */
300 cpu_restore_state(cs, retaddr, true);
301 iflags = cpu->env.iflags;
303 qemu_log_mask(CPU_LOG_INT,
304 "Unaligned access addr=" TARGET_FMT_lx " pc=%x iflags=%x\n",
305 (target_ulong)addr, cpu->env.pc, iflags);
307 esr = ESR_EC_UNALIGNED_DATA;
308 if (likely(iflags & ESR_ESS_FLAG)) {
309 esr |= iflags & ESR_ESS_MASK;
310 } else {
311 qemu_log_mask(LOG_UNIMP, "Unaligned access without ESR_ESS_FLAG\n");
314 cpu->env.ear = addr;
315 cpu->env.esr = esr;
316 cs->exception_index = EXCP_HW_EXCP;
317 cpu_loop_exit(cs);