2 * MicroBlaze helper routines.
4 * Copyright (c) 2009 Edgar E. Iglesias <edgar.iglesias@gmail.com>
5 * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
23 #include "exec/exec-all.h"
24 #include "qemu/host-utils.h"
27 #if defined(CONFIG_USER_ONLY)
29 void mb_cpu_do_interrupt(CPUState
*cs
)
31 MicroBlazeCPU
*cpu
= MICROBLAZE_CPU(cs
);
32 CPUMBState
*env
= &cpu
->env
;
34 cs
->exception_index
= -1;
35 env
->res_addr
= RES_ADDR_NONE
;
36 env
->regs
[14] = env
->pc
;
39 bool mb_cpu_tlb_fill(CPUState
*cs
, vaddr address
, int size
,
40 MMUAccessType access_type
, int mmu_idx
,
41 bool probe
, uintptr_t retaddr
)
43 cs
->exception_index
= 0xaa;
44 cpu_loop_exit_restore(cs
, retaddr
);
47 #else /* !CONFIG_USER_ONLY */
49 static bool mb_cpu_access_is_secure(MicroBlazeCPU
*cpu
,
50 MMUAccessType access_type
)
52 if (access_type
== MMU_INST_FETCH
) {
53 return !cpu
->ns_axi_ip
;
55 return !cpu
->ns_axi_dp
;
59 bool mb_cpu_tlb_fill(CPUState
*cs
, vaddr address
, int size
,
60 MMUAccessType access_type
, int mmu_idx
,
61 bool probe
, uintptr_t retaddr
)
63 MicroBlazeCPU
*cpu
= MICROBLAZE_CPU(cs
);
64 CPUMBState
*env
= &cpu
->env
;
65 MicroBlazeMMULookup lu
;
68 MemTxAttrs attrs
= {};
70 attrs
.secure
= mb_cpu_access_is_secure(cpu
, access_type
);
72 if (mmu_idx
== MMU_NOMMU_IDX
) {
73 /* MMU disabled or not available. */
74 address
&= TARGET_PAGE_MASK
;
76 tlb_set_page_with_attrs(cs
, address
, address
, attrs
, prot
, mmu_idx
,
81 hit
= mmu_translate(cpu
, &lu
, address
, access_type
, mmu_idx
);
83 uint32_t vaddr
= address
& TARGET_PAGE_MASK
;
84 uint32_t paddr
= lu
.paddr
+ vaddr
- lu
.vaddr
;
86 qemu_log_mask(CPU_LOG_MMU
, "MMU map mmu=%d v=%x p=%x prot=%x\n",
87 mmu_idx
, vaddr
, paddr
, lu
.prot
);
88 tlb_set_page_with_attrs(cs
, vaddr
, paddr
, attrs
, lu
.prot
, mmu_idx
,
98 qemu_log_mask(CPU_LOG_MMU
, "mmu=%d miss v=%" VADDR_PRIx
"\n",
104 env
->esr
= access_type
== MMU_INST_FETCH
? 17 : 16;
105 env
->esr
|= (access_type
== MMU_DATA_STORE
) << 10;
108 env
->esr
= access_type
== MMU_INST_FETCH
? 19 : 18;
109 env
->esr
|= (access_type
== MMU_DATA_STORE
) << 10;
115 if (cs
->exception_index
== EXCP_MMU
) {
116 cpu_abort(cs
, "recursive faults\n");
120 cs
->exception_index
= EXCP_MMU
;
121 cpu_loop_exit_restore(cs
, retaddr
);
124 void mb_cpu_do_interrupt(CPUState
*cs
)
126 MicroBlazeCPU
*cpu
= MICROBLAZE_CPU(cs
);
127 CPUMBState
*env
= &cpu
->env
;
128 uint32_t t
, msr
= mb_cpu_read_msr(env
);
131 /* IMM flag cannot propagate across a branch and into the dslot. */
132 assert((env
->iflags
& (D_FLAG
| IMM_FLAG
)) != (D_FLAG
| IMM_FLAG
));
133 /* BIMM flag cannot be set without D_FLAG. */
134 assert((env
->iflags
& (D_FLAG
| BIMM_FLAG
)) != BIMM_FLAG
);
135 /* RTI flags are private to translate. */
136 assert(!(env
->iflags
& (DRTI_FLAG
| DRTE_FLAG
| DRTB_FLAG
)));
138 switch (cs
->exception_index
) {
140 if (!(cpu
->cfg
.pvr_regs
[0] & PVR0_USE_EXC_MASK
)) {
141 qemu_log_mask(LOG_GUEST_ERROR
,
142 "Exception raised on system without exceptions!\n");
146 qemu_log_mask(CPU_LOG_INT
,
147 "INT: HWE at pc=%08x msr=%08x iflags=%x\n",
148 env
->pc
, msr
, env
->iflags
);
150 /* Exception breaks branch + dslot sequence? */
153 if (env
->iflags
& D_FLAG
) {
155 env
->btr
= env
->btarget
;
158 /* Exception in progress. */
160 env
->regs
[17] = env
->pc
+ 4;
161 env
->pc
= cpu
->cfg
.base_vectors
+ 0x20;
165 qemu_log_mask(CPU_LOG_INT
,
166 "INT: MMU at pc=%08x msr=%08x "
167 "ear=%" PRIx64
" iflags=%x\n",
168 env
->pc
, msr
, env
->ear
, env
->iflags
);
170 /* Exception breaks branch + dslot sequence? */
173 if (env
->iflags
& D_FLAG
) {
175 env
->btr
= env
->btarget
;
176 /* Reexecute the branch. */
177 env
->regs
[17] = env
->pc
- (env
->iflags
& BIMM_FLAG
? 8 : 4);
178 } else if (env
->iflags
& IMM_FLAG
) {
179 /* Reexecute the imm. */
180 env
->regs
[17] = env
->pc
- 4;
182 env
->regs
[17] = env
->pc
;
185 /* Exception in progress. */
187 env
->pc
= cpu
->cfg
.base_vectors
+ 0x20;
191 assert(!(msr
& (MSR_EIP
| MSR_BIP
)));
192 assert(msr
& MSR_IE
);
193 assert(!(env
->iflags
& (D_FLAG
| IMM_FLAG
)));
195 qemu_log_mask(CPU_LOG_INT
,
196 "INT: DEV at pc=%08x msr=%08x iflags=%x\n",
197 env
->pc
, msr
, env
->iflags
);
200 /* Disable interrupts. */
202 env
->regs
[14] = env
->pc
;
203 env
->pc
= cpu
->cfg
.base_vectors
+ 0x10;
207 assert(!(env
->iflags
& (D_FLAG
| IMM_FLAG
)));
209 qemu_log_mask(CPU_LOG_INT
,
210 "INT: BRK at pc=%08x msr=%08x iflags=%x\n",
211 env
->pc
, msr
, env
->iflags
);
214 /* Break in progress. */
216 env
->regs
[16] = env
->pc
;
217 env
->pc
= cpu
->cfg
.base_vectors
+ 0x18;
221 cpu_abort(cs
, "unhandled exception type=%d\n", cs
->exception_index
);
225 /* Save previous mode, disable mmu, disable user-mode. */
226 t
= (msr
& (MSR_VM
| MSR_UM
)) << 1;
227 msr
&= ~(MSR_VMS
| MSR_UMS
| MSR_VM
| MSR_UM
);
229 mb_cpu_write_msr(env
, msr
);
231 env
->res_addr
= RES_ADDR_NONE
;
235 qemu_log_mask(CPU_LOG_INT
,
236 " to pc=%08x msr=%08x\n", env
->pc
, msr
);
237 } else if (env
->esr
& D_FLAG
) {
238 qemu_log_mask(CPU_LOG_INT
,
239 " to pc=%08x msr=%08x esr=%04x btr=%08x\n",
240 env
->pc
, msr
, env
->esr
, env
->btr
);
242 qemu_log_mask(CPU_LOG_INT
,
243 " to pc=%08x msr=%08x esr=%04x\n",
244 env
->pc
, msr
, env
->esr
);
248 hwaddr
mb_cpu_get_phys_page_attrs_debug(CPUState
*cs
, vaddr addr
,
251 MicroBlazeCPU
*cpu
= MICROBLAZE_CPU(cs
);
252 CPUMBState
*env
= &cpu
->env
;
253 target_ulong vaddr
, paddr
= 0;
254 MicroBlazeMMULookup lu
;
255 int mmu_idx
= cpu_mmu_index(env
, false);
258 /* Caller doesn't initialize */
259 *attrs
= (MemTxAttrs
) {};
260 attrs
->secure
= mb_cpu_access_is_secure(cpu
, MMU_DATA_LOAD
);
262 if (mmu_idx
!= MMU_NOMMU_IDX
) {
263 hit
= mmu_translate(cpu
, &lu
, addr
, 0, 0);
265 vaddr
= addr
& TARGET_PAGE_MASK
;
266 paddr
= lu
.paddr
+ vaddr
- lu
.vaddr
;
268 paddr
= 0; /* ???. */
270 paddr
= addr
& TARGET_PAGE_MASK
;
276 bool mb_cpu_exec_interrupt(CPUState
*cs
, int interrupt_request
)
278 MicroBlazeCPU
*cpu
= MICROBLAZE_CPU(cs
);
279 CPUMBState
*env
= &cpu
->env
;
281 if ((interrupt_request
& CPU_INTERRUPT_HARD
)
282 && (env
->msr
& MSR_IE
)
283 && !(env
->msr
& (MSR_EIP
| MSR_BIP
))
284 && !(env
->iflags
& (D_FLAG
| IMM_FLAG
))) {
285 cs
->exception_index
= EXCP_IRQ
;
286 mb_cpu_do_interrupt(cs
);
292 void mb_cpu_do_unaligned_access(CPUState
*cs
, vaddr addr
,
293 MMUAccessType access_type
,
294 int mmu_idx
, uintptr_t retaddr
)
296 MicroBlazeCPU
*cpu
= MICROBLAZE_CPU(cs
);
297 uint32_t esr
, iflags
;
299 /* Recover the pc and iflags from the corresponding insn_start. */
300 cpu_restore_state(cs
, retaddr
, true);
301 iflags
= cpu
->env
.iflags
;
303 qemu_log_mask(CPU_LOG_INT
,
304 "Unaligned access addr=" TARGET_FMT_lx
" pc=%x iflags=%x\n",
305 (target_ulong
)addr
, cpu
->env
.pc
, iflags
);
307 esr
= ESR_EC_UNALIGNED_DATA
;
308 if (likely(iflags
& ESR_ESS_FLAG
)) {
309 esr
|= iflags
& ESR_ESS_MASK
;
311 qemu_log_mask(LOG_UNIMP
, "Unaligned access without ESR_ESS_FLAG\n");
316 cs
->exception_index
= EXCP_HW_EXCP
;