2 * MicroBlaze helper routines.
4 * Copyright (c) 2009 Edgar E. Iglesias <edgar.iglesias@gmail.com>
5 * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
23 #include "exec/exec-all.h"
24 #include "qemu/host-utils.h"
27 #if defined(CONFIG_USER_ONLY)
29 void mb_cpu_do_interrupt(CPUState
*cs
)
31 MicroBlazeCPU
*cpu
= MICROBLAZE_CPU(cs
);
32 CPUMBState
*env
= &cpu
->env
;
34 cs
->exception_index
= -1;
35 env
->res_addr
= RES_ADDR_NONE
;
36 env
->regs
[14] = env
->pc
;
39 bool mb_cpu_tlb_fill(CPUState
*cs
, vaddr address
, int size
,
40 MMUAccessType access_type
, int mmu_idx
,
41 bool probe
, uintptr_t retaddr
)
43 cs
->exception_index
= 0xaa;
44 cpu_loop_exit_restore(cs
, retaddr
);
47 #else /* !CONFIG_USER_ONLY */
49 bool mb_cpu_tlb_fill(CPUState
*cs
, vaddr address
, int size
,
50 MMUAccessType access_type
, int mmu_idx
,
51 bool probe
, uintptr_t retaddr
)
53 MicroBlazeCPU
*cpu
= MICROBLAZE_CPU(cs
);
54 CPUMBState
*env
= &cpu
->env
;
55 MicroBlazeMMULookup lu
;
59 if (mmu_idx
== MMU_NOMMU_IDX
) {
60 /* MMU disabled or not available. */
61 address
&= TARGET_PAGE_MASK
;
63 tlb_set_page(cs
, address
, address
, prot
, mmu_idx
, TARGET_PAGE_SIZE
);
67 hit
= mmu_translate(cpu
, &lu
, address
, access_type
, mmu_idx
);
69 uint32_t vaddr
= address
& TARGET_PAGE_MASK
;
70 uint32_t paddr
= lu
.paddr
+ vaddr
- lu
.vaddr
;
72 qemu_log_mask(CPU_LOG_MMU
, "MMU map mmu=%d v=%x p=%x prot=%x\n",
73 mmu_idx
, vaddr
, paddr
, lu
.prot
);
74 tlb_set_page(cs
, vaddr
, paddr
, lu
.prot
, mmu_idx
, TARGET_PAGE_SIZE
);
83 qemu_log_mask(CPU_LOG_MMU
, "mmu=%d miss v=%" VADDR_PRIx
"\n",
89 env
->esr
= access_type
== MMU_INST_FETCH
? 17 : 16;
90 env
->esr
|= (access_type
== MMU_DATA_STORE
) << 10;
93 env
->esr
= access_type
== MMU_INST_FETCH
? 19 : 18;
94 env
->esr
|= (access_type
== MMU_DATA_STORE
) << 10;
100 if (cs
->exception_index
== EXCP_MMU
) {
101 cpu_abort(cs
, "recursive faults\n");
105 cs
->exception_index
= EXCP_MMU
;
106 cpu_loop_exit_restore(cs
, retaddr
);
109 void mb_cpu_do_interrupt(CPUState
*cs
)
111 MicroBlazeCPU
*cpu
= MICROBLAZE_CPU(cs
);
112 CPUMBState
*env
= &cpu
->env
;
113 uint32_t t
, msr
= mb_cpu_read_msr(env
);
116 /* IMM flag cannot propagate across a branch and into the dslot. */
117 assert((env
->iflags
& (D_FLAG
| IMM_FLAG
)) != (D_FLAG
| IMM_FLAG
));
118 /* BIMM flag cannot be set without D_FLAG. */
119 assert((env
->iflags
& (D_FLAG
| BIMM_FLAG
)) != BIMM_FLAG
);
120 /* RTI flags are private to translate. */
121 assert(!(env
->iflags
& (DRTI_FLAG
| DRTE_FLAG
| DRTB_FLAG
)));
123 switch (cs
->exception_index
) {
125 if (!(cpu
->cfg
.pvr_regs
[0] & PVR0_USE_EXC_MASK
)) {
126 qemu_log_mask(LOG_GUEST_ERROR
,
127 "Exception raised on system without exceptions!\n");
131 qemu_log_mask(CPU_LOG_INT
,
132 "INT: HWE at pc=%08x msr=%08x iflags=%x\n",
133 env
->pc
, msr
, env
->iflags
);
135 /* Exception breaks branch + dslot sequence? */
138 if (env
->iflags
& D_FLAG
) {
140 env
->btr
= env
->btarget
;
143 /* Exception in progress. */
145 env
->regs
[17] = env
->pc
+ 4;
146 env
->pc
= cpu
->cfg
.base_vectors
+ 0x20;
150 qemu_log_mask(CPU_LOG_INT
,
151 "INT: MMU at pc=%08x msr=%08x "
152 "ear=%" PRIx64
" iflags=%x\n",
153 env
->pc
, msr
, env
->ear
, env
->iflags
);
155 /* Exception breaks branch + dslot sequence? */
158 if (env
->iflags
& D_FLAG
) {
160 env
->btr
= env
->btarget
;
161 /* Reexecute the branch. */
162 env
->regs
[17] = env
->pc
- (env
->iflags
& BIMM_FLAG
? 8 : 4);
163 } else if (env
->iflags
& IMM_FLAG
) {
164 /* Reexecute the imm. */
165 env
->regs
[17] = env
->pc
- 4;
167 env
->regs
[17] = env
->pc
;
170 /* Exception in progress. */
172 env
->pc
= cpu
->cfg
.base_vectors
+ 0x20;
176 assert(!(msr
& (MSR_EIP
| MSR_BIP
)));
177 assert(msr
& MSR_IE
);
178 assert(!(env
->iflags
& (D_FLAG
| IMM_FLAG
)));
180 qemu_log_mask(CPU_LOG_INT
,
181 "INT: DEV at pc=%08x msr=%08x iflags=%x\n",
182 env
->pc
, msr
, env
->iflags
);
185 /* Disable interrupts. */
187 env
->regs
[14] = env
->pc
;
188 env
->pc
= cpu
->cfg
.base_vectors
+ 0x10;
192 assert(!(env
->iflags
& (D_FLAG
| IMM_FLAG
)));
194 qemu_log_mask(CPU_LOG_INT
,
195 "INT: BRK at pc=%08x msr=%08x iflags=%x\n",
196 env
->pc
, msr
, env
->iflags
);
199 /* Break in progress. */
201 env
->regs
[16] = env
->pc
;
202 env
->pc
= cpu
->cfg
.base_vectors
+ 0x18;
206 cpu_abort(cs
, "unhandled exception type=%d\n", cs
->exception_index
);
210 /* Save previous mode, disable mmu, disable user-mode. */
211 t
= (msr
& (MSR_VM
| MSR_UM
)) << 1;
212 msr
&= ~(MSR_VMS
| MSR_UMS
| MSR_VM
| MSR_UM
);
214 mb_cpu_write_msr(env
, msr
);
216 env
->res_addr
= RES_ADDR_NONE
;
220 qemu_log_mask(CPU_LOG_INT
,
221 " to pc=%08x msr=%08x\n", env
->pc
, msr
);
222 } else if (env
->esr
& D_FLAG
) {
223 qemu_log_mask(CPU_LOG_INT
,
224 " to pc=%08x msr=%08x esr=%04x btr=%08x\n",
225 env
->pc
, msr
, env
->esr
, env
->btr
);
227 qemu_log_mask(CPU_LOG_INT
,
228 " to pc=%08x msr=%08x esr=%04x\n",
229 env
->pc
, msr
, env
->esr
);
233 hwaddr
mb_cpu_get_phys_page_debug(CPUState
*cs
, vaddr addr
)
235 MicroBlazeCPU
*cpu
= MICROBLAZE_CPU(cs
);
236 CPUMBState
*env
= &cpu
->env
;
237 target_ulong vaddr
, paddr
= 0;
238 MicroBlazeMMULookup lu
;
239 int mmu_idx
= cpu_mmu_index(env
, false);
242 if (mmu_idx
!= MMU_NOMMU_IDX
) {
243 hit
= mmu_translate(cpu
, &lu
, addr
, 0, 0);
245 vaddr
= addr
& TARGET_PAGE_MASK
;
246 paddr
= lu
.paddr
+ vaddr
- lu
.vaddr
;
248 paddr
= 0; /* ???. */
250 paddr
= addr
& TARGET_PAGE_MASK
;
256 bool mb_cpu_exec_interrupt(CPUState
*cs
, int interrupt_request
)
258 MicroBlazeCPU
*cpu
= MICROBLAZE_CPU(cs
);
259 CPUMBState
*env
= &cpu
->env
;
261 if ((interrupt_request
& CPU_INTERRUPT_HARD
)
262 && (env
->msr
& MSR_IE
)
263 && !(env
->msr
& (MSR_EIP
| MSR_BIP
))
264 && !(env
->iflags
& (D_FLAG
| IMM_FLAG
))) {
265 cs
->exception_index
= EXCP_IRQ
;
266 mb_cpu_do_interrupt(cs
);
272 void mb_cpu_do_unaligned_access(CPUState
*cs
, vaddr addr
,
273 MMUAccessType access_type
,
274 int mmu_idx
, uintptr_t retaddr
)
276 MicroBlazeCPU
*cpu
= MICROBLAZE_CPU(cs
);
277 uint32_t esr
, iflags
;
279 /* Recover the pc and iflags from the corresponding insn_start. */
280 cpu_restore_state(cs
, retaddr
, true);
281 iflags
= cpu
->env
.iflags
;
283 qemu_log_mask(CPU_LOG_INT
,
284 "Unaligned access addr=" TARGET_FMT_lx
" pc=%x iflags=%x\n",
285 (target_ulong
)addr
, cpu
->env
.pc
, iflags
);
287 esr
= ESR_EC_UNALIGNED_DATA
;
288 if (likely(iflags
& ESR_ESS_FLAG
)) {
289 esr
|= iflags
& ESR_ESS_MASK
;
291 qemu_log_mask(LOG_UNIMP
, "Unaligned access without ESR_ESS_FLAG\n");
296 cs
->exception_index
= EXCP_HW_EXCP
;