2 * MicroBlaze helper routines.
4 * Copyright (c) 2009 Edgar E. Iglesias <edgar.iglesias@gmail.com>
5 * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
23 #include "exec/exec-all.h"
24 #include "qemu/host-utils.h"
27 #if defined(CONFIG_USER_ONLY)
29 bool mb_cpu_tlb_fill(CPUState
*cs
, vaddr address
, int size
,
30 MMUAccessType access_type
, int mmu_idx
,
31 bool probe
, uintptr_t retaddr
)
33 cs
->exception_index
= 0xaa;
34 cpu_loop_exit_restore(cs
, retaddr
);
37 #else /* !CONFIG_USER_ONLY */
39 static bool mb_cpu_access_is_secure(MicroBlazeCPU
*cpu
,
40 MMUAccessType access_type
)
42 if (access_type
== MMU_INST_FETCH
) {
43 return !cpu
->ns_axi_ip
;
45 return !cpu
->ns_axi_dp
;
49 bool mb_cpu_tlb_fill(CPUState
*cs
, vaddr address
, int size
,
50 MMUAccessType access_type
, int mmu_idx
,
51 bool probe
, uintptr_t retaddr
)
53 MicroBlazeCPU
*cpu
= MICROBLAZE_CPU(cs
);
54 CPUMBState
*env
= &cpu
->env
;
55 MicroBlazeMMULookup lu
;
58 MemTxAttrs attrs
= {};
60 attrs
.secure
= mb_cpu_access_is_secure(cpu
, access_type
);
62 if (mmu_idx
== MMU_NOMMU_IDX
) {
63 /* MMU disabled or not available. */
64 address
&= TARGET_PAGE_MASK
;
66 tlb_set_page_with_attrs(cs
, address
, address
, attrs
, prot
, mmu_idx
,
71 hit
= mmu_translate(cpu
, &lu
, address
, access_type
, mmu_idx
);
73 uint32_t vaddr
= address
& TARGET_PAGE_MASK
;
74 uint32_t paddr
= lu
.paddr
+ vaddr
- lu
.vaddr
;
76 qemu_log_mask(CPU_LOG_MMU
, "MMU map mmu=%d v=%x p=%x prot=%x\n",
77 mmu_idx
, vaddr
, paddr
, lu
.prot
);
78 tlb_set_page_with_attrs(cs
, vaddr
, paddr
, attrs
, lu
.prot
, mmu_idx
,
88 qemu_log_mask(CPU_LOG_MMU
, "mmu=%d miss v=%" VADDR_PRIx
"\n",
94 env
->esr
= access_type
== MMU_INST_FETCH
? 17 : 16;
95 env
->esr
|= (access_type
== MMU_DATA_STORE
) << 10;
98 env
->esr
= access_type
== MMU_INST_FETCH
? 19 : 18;
99 env
->esr
|= (access_type
== MMU_DATA_STORE
) << 10;
105 if (cs
->exception_index
== EXCP_MMU
) {
106 cpu_abort(cs
, "recursive faults\n");
110 cs
->exception_index
= EXCP_MMU
;
111 cpu_loop_exit_restore(cs
, retaddr
);
114 void mb_cpu_do_interrupt(CPUState
*cs
)
116 MicroBlazeCPU
*cpu
= MICROBLAZE_CPU(cs
);
117 CPUMBState
*env
= &cpu
->env
;
118 uint32_t t
, msr
= mb_cpu_read_msr(env
);
121 /* IMM flag cannot propagate across a branch and into the dslot. */
122 assert((env
->iflags
& (D_FLAG
| IMM_FLAG
)) != (D_FLAG
| IMM_FLAG
));
123 /* BIMM flag cannot be set without D_FLAG. */
124 assert((env
->iflags
& (D_FLAG
| BIMM_FLAG
)) != BIMM_FLAG
);
125 /* RTI flags are private to translate. */
126 assert(!(env
->iflags
& (DRTI_FLAG
| DRTE_FLAG
| DRTB_FLAG
)));
128 switch (cs
->exception_index
) {
130 if (!(cpu
->cfg
.pvr_regs
[0] & PVR0_USE_EXC_MASK
)) {
131 qemu_log_mask(LOG_GUEST_ERROR
,
132 "Exception raised on system without exceptions!\n");
136 qemu_log_mask(CPU_LOG_INT
,
137 "INT: HWE at pc=%08x msr=%08x iflags=%x\n",
138 env
->pc
, msr
, env
->iflags
);
140 /* Exception breaks branch + dslot sequence? */
143 if (env
->iflags
& D_FLAG
) {
145 env
->btr
= env
->btarget
;
148 /* Exception in progress. */
150 env
->regs
[17] = env
->pc
+ 4;
151 env
->pc
= cpu
->cfg
.base_vectors
+ 0x20;
155 qemu_log_mask(CPU_LOG_INT
,
156 "INT: MMU at pc=%08x msr=%08x "
157 "ear=%" PRIx64
" iflags=%x\n",
158 env
->pc
, msr
, env
->ear
, env
->iflags
);
160 /* Exception breaks branch + dslot sequence? */
163 if (env
->iflags
& D_FLAG
) {
165 env
->btr
= env
->btarget
;
166 /* Reexecute the branch. */
167 env
->regs
[17] = env
->pc
- (env
->iflags
& BIMM_FLAG
? 8 : 4);
168 } else if (env
->iflags
& IMM_FLAG
) {
169 /* Reexecute the imm. */
170 env
->regs
[17] = env
->pc
- 4;
172 env
->regs
[17] = env
->pc
;
175 /* Exception in progress. */
177 env
->pc
= cpu
->cfg
.base_vectors
+ 0x20;
181 assert(!(msr
& (MSR_EIP
| MSR_BIP
)));
182 assert(msr
& MSR_IE
);
183 assert(!(env
->iflags
& (D_FLAG
| IMM_FLAG
)));
185 qemu_log_mask(CPU_LOG_INT
,
186 "INT: DEV at pc=%08x msr=%08x iflags=%x\n",
187 env
->pc
, msr
, env
->iflags
);
190 /* Disable interrupts. */
192 env
->regs
[14] = env
->pc
;
193 env
->pc
= cpu
->cfg
.base_vectors
+ 0x10;
197 assert(!(env
->iflags
& (D_FLAG
| IMM_FLAG
)));
199 qemu_log_mask(CPU_LOG_INT
,
200 "INT: BRK at pc=%08x msr=%08x iflags=%x\n",
201 env
->pc
, msr
, env
->iflags
);
204 /* Break in progress. */
206 env
->regs
[16] = env
->pc
;
207 env
->pc
= cpu
->cfg
.base_vectors
+ 0x18;
211 cpu_abort(cs
, "unhandled exception type=%d\n", cs
->exception_index
);
215 /* Save previous mode, disable mmu, disable user-mode. */
216 t
= (msr
& (MSR_VM
| MSR_UM
)) << 1;
217 msr
&= ~(MSR_VMS
| MSR_UMS
| MSR_VM
| MSR_UM
);
219 mb_cpu_write_msr(env
, msr
);
221 env
->res_addr
= RES_ADDR_NONE
;
225 qemu_log_mask(CPU_LOG_INT
,
226 " to pc=%08x msr=%08x\n", env
->pc
, msr
);
227 } else if (env
->esr
& D_FLAG
) {
228 qemu_log_mask(CPU_LOG_INT
,
229 " to pc=%08x msr=%08x esr=%04x btr=%08x\n",
230 env
->pc
, msr
, env
->esr
, env
->btr
);
232 qemu_log_mask(CPU_LOG_INT
,
233 " to pc=%08x msr=%08x esr=%04x\n",
234 env
->pc
, msr
, env
->esr
);
238 hwaddr
mb_cpu_get_phys_page_attrs_debug(CPUState
*cs
, vaddr addr
,
241 MicroBlazeCPU
*cpu
= MICROBLAZE_CPU(cs
);
242 CPUMBState
*env
= &cpu
->env
;
243 target_ulong vaddr
, paddr
= 0;
244 MicroBlazeMMULookup lu
;
245 int mmu_idx
= cpu_mmu_index(env
, false);
248 /* Caller doesn't initialize */
249 *attrs
= (MemTxAttrs
) {};
250 attrs
->secure
= mb_cpu_access_is_secure(cpu
, MMU_DATA_LOAD
);
252 if (mmu_idx
!= MMU_NOMMU_IDX
) {
253 hit
= mmu_translate(cpu
, &lu
, addr
, 0, 0);
255 vaddr
= addr
& TARGET_PAGE_MASK
;
256 paddr
= lu
.paddr
+ vaddr
- lu
.vaddr
;
258 paddr
= 0; /* ???. */
260 paddr
= addr
& TARGET_PAGE_MASK
;
265 bool mb_cpu_exec_interrupt(CPUState
*cs
, int interrupt_request
)
267 MicroBlazeCPU
*cpu
= MICROBLAZE_CPU(cs
);
268 CPUMBState
*env
= &cpu
->env
;
270 if ((interrupt_request
& CPU_INTERRUPT_HARD
)
271 && (env
->msr
& MSR_IE
)
272 && !(env
->msr
& (MSR_EIP
| MSR_BIP
))
273 && !(env
->iflags
& (D_FLAG
| IMM_FLAG
))) {
274 cs
->exception_index
= EXCP_IRQ
;
275 mb_cpu_do_interrupt(cs
);
281 #endif /* !CONFIG_USER_ONLY */
283 void mb_cpu_do_unaligned_access(CPUState
*cs
, vaddr addr
,
284 MMUAccessType access_type
,
285 int mmu_idx
, uintptr_t retaddr
)
287 MicroBlazeCPU
*cpu
= MICROBLAZE_CPU(cs
);
288 uint32_t esr
, iflags
;
290 /* Recover the pc and iflags from the corresponding insn_start. */
291 cpu_restore_state(cs
, retaddr
, true);
292 iflags
= cpu
->env
.iflags
;
294 qemu_log_mask(CPU_LOG_INT
,
295 "Unaligned access addr=" TARGET_FMT_lx
" pc=%x iflags=%x\n",
296 (target_ulong
)addr
, cpu
->env
.pc
, iflags
);
298 esr
= ESR_EC_UNALIGNED_DATA
;
299 if (likely(iflags
& ESR_ESS_FLAG
)) {
300 esr
|= iflags
& ESR_ESS_MASK
;
302 qemu_log_mask(LOG_UNIMP
, "Unaligned access without ESR_ESS_FLAG\n");
307 cs
->exception_index
= EXCP_HW_EXCP
;