target/xtensa: extract MMU helpers
[qemu/ar7.git] / target / xtensa / op_helper.c
blob1d3d87012c4a6f8336b448407b5db3746aca2e5f
1 /*
2 * Copyright (c) 2011, Max Filippov, Open Source and Linux Lab.
3 * All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 * * Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * * Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * * Neither the name of the Open Source and Linux Lab nor the
13 * names of its contributors may be used to endorse or promote products
14 * derived from this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
23 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 #include "qemu/osdep.h"
29 #include "qemu/main-loop.h"
30 #include "cpu.h"
31 #include "exec/helper-proto.h"
32 #include "qemu/host-utils.h"
33 #include "exec/exec-all.h"
34 #include "exec/cpu_ldst.h"
35 #include "exec/address-spaces.h"
36 #include "qemu/timer.h"
38 #ifndef CONFIG_USER_ONLY
40 void xtensa_cpu_do_unaligned_access(CPUState *cs,
41 vaddr addr, MMUAccessType access_type,
42 int mmu_idx, uintptr_t retaddr)
44 XtensaCPU *cpu = XTENSA_CPU(cs);
45 CPUXtensaState *env = &cpu->env;
47 if (xtensa_option_enabled(env->config, XTENSA_OPTION_UNALIGNED_EXCEPTION) &&
48 !xtensa_option_enabled(env->config, XTENSA_OPTION_HW_ALIGNMENT)) {
49 cpu_restore_state(CPU(cpu), retaddr, true);
50 HELPER(exception_cause_vaddr)(env,
51 env->pc, LOAD_STORE_ALIGNMENT_CAUSE, addr);
55 void tlb_fill(CPUState *cs, target_ulong vaddr, int size,
56 MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
58 XtensaCPU *cpu = XTENSA_CPU(cs);
59 CPUXtensaState *env = &cpu->env;
60 uint32_t paddr;
61 uint32_t page_size;
62 unsigned access;
63 int ret = xtensa_get_physical_addr(env, true, vaddr, access_type, mmu_idx,
64 &paddr, &page_size, &access);
66 qemu_log_mask(CPU_LOG_MMU, "%s(%08x, %d, %d) -> %08x, ret = %d\n",
67 __func__, vaddr, access_type, mmu_idx, paddr, ret);
69 if (ret == 0) {
70 tlb_set_page(cs,
71 vaddr & TARGET_PAGE_MASK,
72 paddr & TARGET_PAGE_MASK,
73 access, mmu_idx, page_size);
74 } else {
75 cpu_restore_state(cs, retaddr, true);
76 HELPER(exception_cause_vaddr)(env, env->pc, ret, vaddr);
80 void xtensa_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, vaddr addr,
81 unsigned size, MMUAccessType access_type,
82 int mmu_idx, MemTxAttrs attrs,
83 MemTxResult response, uintptr_t retaddr)
85 XtensaCPU *cpu = XTENSA_CPU(cs);
86 CPUXtensaState *env = &cpu->env;
88 cpu_restore_state(cs, retaddr, true);
89 HELPER(exception_cause_vaddr)(env, env->pc,
90 access_type == MMU_INST_FETCH ?
91 INSTR_PIF_ADDR_ERROR_CAUSE :
92 LOAD_STORE_PIF_ADDR_ERROR_CAUSE,
93 addr);
96 static void tb_invalidate_virtual_addr(CPUXtensaState *env, uint32_t vaddr)
98 uint32_t paddr;
99 uint32_t page_size;
100 unsigned access;
101 int ret = xtensa_get_physical_addr(env, false, vaddr, 2, 0,
102 &paddr, &page_size, &access);
103 if (ret == 0) {
104 tb_invalidate_phys_addr(&address_space_memory, paddr,
105 MEMTXATTRS_UNSPECIFIED);
109 #endif
111 void HELPER(exception)(CPUXtensaState *env, uint32_t excp)
113 CPUState *cs = CPU(xtensa_env_get_cpu(env));
115 cs->exception_index = excp;
116 if (excp == EXCP_YIELD) {
117 env->yield_needed = 0;
119 if (excp == EXCP_DEBUG) {
120 env->exception_taken = 0;
122 cpu_loop_exit(cs);
125 void HELPER(exception_cause)(CPUXtensaState *env, uint32_t pc, uint32_t cause)
127 uint32_t vector;
129 env->pc = pc;
130 if (env->sregs[PS] & PS_EXCM) {
131 if (env->config->ndepc) {
132 env->sregs[DEPC] = pc;
133 } else {
134 env->sregs[EPC1] = pc;
136 vector = EXC_DOUBLE;
137 } else {
138 env->sregs[EPC1] = pc;
139 vector = (env->sregs[PS] & PS_UM) ? EXC_USER : EXC_KERNEL;
142 env->sregs[EXCCAUSE] = cause;
143 env->sregs[PS] |= PS_EXCM;
145 HELPER(exception)(env, vector);
148 void HELPER(exception_cause_vaddr)(CPUXtensaState *env,
149 uint32_t pc, uint32_t cause, uint32_t vaddr)
151 env->sregs[EXCVADDR] = vaddr;
152 HELPER(exception_cause)(env, pc, cause);
155 void debug_exception_env(CPUXtensaState *env, uint32_t cause)
157 if (xtensa_get_cintlevel(env) < env->config->debug_level) {
158 HELPER(debug_exception)(env, env->pc, cause);
162 void HELPER(debug_exception)(CPUXtensaState *env, uint32_t pc, uint32_t cause)
164 unsigned level = env->config->debug_level;
166 env->pc = pc;
167 env->sregs[DEBUGCAUSE] = cause;
168 env->sregs[EPC1 + level - 1] = pc;
169 env->sregs[EPS2 + level - 2] = env->sregs[PS];
170 env->sregs[PS] = (env->sregs[PS] & ~PS_INTLEVEL) | PS_EXCM |
171 (level << PS_INTLEVEL_SHIFT);
172 HELPER(exception)(env, EXC_DEBUG);
175 void HELPER(dump_state)(CPUXtensaState *env)
177 XtensaCPU *cpu = xtensa_env_get_cpu(env);
179 cpu_dump_state(CPU(cpu), stderr, fprintf, 0);
182 #ifndef CONFIG_USER_ONLY
184 void HELPER(waiti)(CPUXtensaState *env, uint32_t pc, uint32_t intlevel)
186 CPUState *cpu;
188 env->pc = pc;
189 env->sregs[PS] = (env->sregs[PS] & ~PS_INTLEVEL) |
190 (intlevel << PS_INTLEVEL_SHIFT);
192 qemu_mutex_lock_iothread();
193 check_interrupts(env);
194 qemu_mutex_unlock_iothread();
196 if (env->pending_irq_level) {
197 cpu_loop_exit(CPU(xtensa_env_get_cpu(env)));
198 return;
201 cpu = CPU(xtensa_env_get_cpu(env));
202 cpu->halted = 1;
203 HELPER(exception)(env, EXCP_HLT);
206 void HELPER(update_ccount)(CPUXtensaState *env)
208 uint64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
210 env->ccount_time = now;
211 env->sregs[CCOUNT] = env->ccount_base +
212 (uint32_t)((now - env->time_base) *
213 env->config->clock_freq_khz / 1000000);
216 void HELPER(wsr_ccount)(CPUXtensaState *env, uint32_t v)
218 int i;
220 HELPER(update_ccount)(env);
221 env->ccount_base += v - env->sregs[CCOUNT];
222 for (i = 0; i < env->config->nccompare; ++i) {
223 HELPER(update_ccompare)(env, i);
227 void HELPER(update_ccompare)(CPUXtensaState *env, uint32_t i)
229 uint64_t dcc;
231 HELPER(update_ccount)(env);
232 dcc = (uint64_t)(env->sregs[CCOMPARE + i] - env->sregs[CCOUNT] - 1) + 1;
233 timer_mod(env->ccompare[i].timer,
234 env->ccount_time + (dcc * 1000000) / env->config->clock_freq_khz);
235 env->yield_needed = 1;
238 void HELPER(check_interrupts)(CPUXtensaState *env)
240 qemu_mutex_lock_iothread();
241 check_interrupts(env);
242 qemu_mutex_unlock_iothread();
246 * Check vaddr accessibility/cache attributes and raise an exception if
247 * specified by the ATOMCTL SR.
249 * Note: local memory exclusion is not implemented
251 void HELPER(check_atomctl)(CPUXtensaState *env, uint32_t pc, uint32_t vaddr)
253 uint32_t paddr, page_size, access;
254 uint32_t atomctl = env->sregs[ATOMCTL];
255 int rc = xtensa_get_physical_addr(env, true, vaddr, 1,
256 xtensa_get_cring(env), &paddr, &page_size, &access);
259 * s32c1i never causes LOAD_PROHIBITED_CAUSE exceptions,
260 * see opcode description in the ISA
262 if (rc == 0 &&
263 (access & (PAGE_READ | PAGE_WRITE)) != (PAGE_READ | PAGE_WRITE)) {
264 rc = STORE_PROHIBITED_CAUSE;
267 if (rc) {
268 HELPER(exception_cause_vaddr)(env, pc, rc, vaddr);
272 * When data cache is not configured use ATOMCTL bypass field.
273 * See ISA, 4.3.12.4 The Atomic Operation Control Register (ATOMCTL)
274 * under the Conditional Store Option.
276 if (!xtensa_option_enabled(env->config, XTENSA_OPTION_DCACHE)) {
277 access = PAGE_CACHE_BYPASS;
280 switch (access & PAGE_CACHE_MASK) {
281 case PAGE_CACHE_WB:
282 atomctl >>= 2;
283 /* fall through */
284 case PAGE_CACHE_WT:
285 atomctl >>= 2;
286 /* fall through */
287 case PAGE_CACHE_BYPASS:
288 if ((atomctl & 0x3) == 0) {
289 HELPER(exception_cause_vaddr)(env, pc,
290 LOAD_STORE_ERROR_CAUSE, vaddr);
292 break;
294 case PAGE_CACHE_ISOLATE:
295 HELPER(exception_cause_vaddr)(env, pc,
296 LOAD_STORE_ERROR_CAUSE, vaddr);
297 break;
299 default:
300 break;
304 void HELPER(wsr_memctl)(CPUXtensaState *env, uint32_t v)
306 if (xtensa_option_enabled(env->config, XTENSA_OPTION_ICACHE)) {
307 if (extract32(v, MEMCTL_IUSEWAYS_SHIFT, MEMCTL_IUSEWAYS_LEN) >
308 env->config->icache_ways) {
309 deposit32(v, MEMCTL_IUSEWAYS_SHIFT, MEMCTL_IUSEWAYS_LEN,
310 env->config->icache_ways);
313 if (xtensa_option_enabled(env->config, XTENSA_OPTION_DCACHE)) {
314 if (extract32(v, MEMCTL_DUSEWAYS_SHIFT, MEMCTL_DUSEWAYS_LEN) >
315 env->config->dcache_ways) {
316 deposit32(v, MEMCTL_DUSEWAYS_SHIFT, MEMCTL_DUSEWAYS_LEN,
317 env->config->dcache_ways);
319 if (extract32(v, MEMCTL_DALLOCWAYS_SHIFT, MEMCTL_DALLOCWAYS_LEN) >
320 env->config->dcache_ways) {
321 deposit32(v, MEMCTL_DALLOCWAYS_SHIFT, MEMCTL_DALLOCWAYS_LEN,
322 env->config->dcache_ways);
325 env->sregs[MEMCTL] = v & env->config->memctl_mask;
328 void HELPER(wsr_ibreakenable)(CPUXtensaState *env, uint32_t v)
330 uint32_t change = v ^ env->sregs[IBREAKENABLE];
331 unsigned i;
333 for (i = 0; i < env->config->nibreak; ++i) {
334 if (change & (1 << i)) {
335 tb_invalidate_virtual_addr(env, env->sregs[IBREAKA + i]);
338 env->sregs[IBREAKENABLE] = v & ((1 << env->config->nibreak) - 1);
341 void HELPER(wsr_ibreaka)(CPUXtensaState *env, uint32_t i, uint32_t v)
343 if (env->sregs[IBREAKENABLE] & (1 << i) && env->sregs[IBREAKA + i] != v) {
344 tb_invalidate_virtual_addr(env, env->sregs[IBREAKA + i]);
345 tb_invalidate_virtual_addr(env, v);
347 env->sregs[IBREAKA + i] = v;
350 static void set_dbreak(CPUXtensaState *env, unsigned i, uint32_t dbreaka,
351 uint32_t dbreakc)
353 CPUState *cs = CPU(xtensa_env_get_cpu(env));
354 int flags = BP_CPU | BP_STOP_BEFORE_ACCESS;
355 uint32_t mask = dbreakc | ~DBREAKC_MASK;
357 if (env->cpu_watchpoint[i]) {
358 cpu_watchpoint_remove_by_ref(cs, env->cpu_watchpoint[i]);
360 if (dbreakc & DBREAKC_SB) {
361 flags |= BP_MEM_WRITE;
363 if (dbreakc & DBREAKC_LB) {
364 flags |= BP_MEM_READ;
366 /* contiguous mask after inversion is one less than some power of 2 */
367 if ((~mask + 1) & ~mask) {
368 qemu_log_mask(LOG_GUEST_ERROR, "DBREAKC mask is not contiguous: 0x%08x\n", dbreakc);
369 /* cut mask after the first zero bit */
370 mask = 0xffffffff << (32 - clo32(mask));
372 if (cpu_watchpoint_insert(cs, dbreaka & mask, ~mask + 1,
373 flags, &env->cpu_watchpoint[i])) {
374 env->cpu_watchpoint[i] = NULL;
375 qemu_log_mask(LOG_GUEST_ERROR, "Failed to set data breakpoint at 0x%08x/%d\n",
376 dbreaka & mask, ~mask + 1);
380 void HELPER(wsr_dbreaka)(CPUXtensaState *env, uint32_t i, uint32_t v)
382 uint32_t dbreakc = env->sregs[DBREAKC + i];
384 if ((dbreakc & DBREAKC_SB_LB) &&
385 env->sregs[DBREAKA + i] != v) {
386 set_dbreak(env, i, v, dbreakc);
388 env->sregs[DBREAKA + i] = v;
391 void HELPER(wsr_dbreakc)(CPUXtensaState *env, uint32_t i, uint32_t v)
393 if ((env->sregs[DBREAKC + i] ^ v) & (DBREAKC_SB_LB | DBREAKC_MASK)) {
394 if (v & DBREAKC_SB_LB) {
395 set_dbreak(env, i, env->sregs[DBREAKA + i], v);
396 } else {
397 if (env->cpu_watchpoint[i]) {
398 CPUState *cs = CPU(xtensa_env_get_cpu(env));
400 cpu_watchpoint_remove_by_ref(cs, env->cpu_watchpoint[i]);
401 env->cpu_watchpoint[i] = NULL;
405 env->sregs[DBREAKC + i] = v;
407 #endif
409 uint32_t HELPER(rer)(CPUXtensaState *env, uint32_t addr)
411 #ifndef CONFIG_USER_ONLY
412 return address_space_ldl(env->address_space_er, addr,
413 MEMTXATTRS_UNSPECIFIED, NULL);
414 #else
415 return 0;
416 #endif
419 void HELPER(wer)(CPUXtensaState *env, uint32_t data, uint32_t addr)
421 #ifndef CONFIG_USER_ONLY
422 address_space_stl(env->address_space_er, addr, data,
423 MEMTXATTRS_UNSPECIFIED, NULL);
424 #endif