remove space-tab sequences
[qemu/ar7.git] / target / xtensa / op_helper.c
blobe4b42ab3e56cfa82b04188f6d43905154c56c4c9
1 /*
2 * Copyright (c) 2011, Max Filippov, Open Source and Linux Lab.
3 * All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 * * Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * * Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * * Neither the name of the Open Source and Linux Lab nor the
13 * names of its contributors may be used to endorse or promote products
14 * derived from this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
23 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 #include "qemu/osdep.h"
29 #include "qemu/main-loop.h"
30 #include "cpu.h"
31 #include "exec/helper-proto.h"
32 #include "qemu/host-utils.h"
33 #include "exec/exec-all.h"
34 #include "exec/cpu_ldst.h"
35 #include "exec/address-spaces.h"
36 #include "qemu/timer.h"
37 #include "fpu/softfloat.h"
39 #ifndef CONFIG_USER_ONLY
41 void xtensa_cpu_do_unaligned_access(CPUState *cs,
42 vaddr addr, MMUAccessType access_type,
43 int mmu_idx, uintptr_t retaddr)
45 XtensaCPU *cpu = XTENSA_CPU(cs);
46 CPUXtensaState *env = &cpu->env;
48 if (xtensa_option_enabled(env->config, XTENSA_OPTION_UNALIGNED_EXCEPTION) &&
49 !xtensa_option_enabled(env->config, XTENSA_OPTION_HW_ALIGNMENT)) {
50 cpu_restore_state(CPU(cpu), retaddr, true);
51 HELPER(exception_cause_vaddr)(env,
52 env->pc, LOAD_STORE_ALIGNMENT_CAUSE, addr);
56 void tlb_fill(CPUState *cs, target_ulong vaddr, int size,
57 MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
59 XtensaCPU *cpu = XTENSA_CPU(cs);
60 CPUXtensaState *env = &cpu->env;
61 uint32_t paddr;
62 uint32_t page_size;
63 unsigned access;
64 int ret = xtensa_get_physical_addr(env, true, vaddr, access_type, mmu_idx,
65 &paddr, &page_size, &access);
67 qemu_log_mask(CPU_LOG_MMU, "%s(%08x, %d, %d) -> %08x, ret = %d\n",
68 __func__, vaddr, access_type, mmu_idx, paddr, ret);
70 if (ret == 0) {
71 tlb_set_page(cs,
72 vaddr & TARGET_PAGE_MASK,
73 paddr & TARGET_PAGE_MASK,
74 access, mmu_idx, page_size);
75 } else {
76 cpu_restore_state(cs, retaddr, true);
77 HELPER(exception_cause_vaddr)(env, env->pc, ret, vaddr);
81 void xtensa_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, vaddr addr,
82 unsigned size, MMUAccessType access_type,
83 int mmu_idx, MemTxAttrs attrs,
84 MemTxResult response, uintptr_t retaddr)
86 XtensaCPU *cpu = XTENSA_CPU(cs);
87 CPUXtensaState *env = &cpu->env;
89 cpu_restore_state(cs, retaddr, true);
90 HELPER(exception_cause_vaddr)(env, env->pc,
91 access_type == MMU_INST_FETCH ?
92 INSTR_PIF_ADDR_ERROR_CAUSE :
93 LOAD_STORE_PIF_ADDR_ERROR_CAUSE,
94 addr);
97 static void tb_invalidate_virtual_addr(CPUXtensaState *env, uint32_t vaddr)
99 uint32_t paddr;
100 uint32_t page_size;
101 unsigned access;
102 int ret = xtensa_get_physical_addr(env, false, vaddr, 2, 0,
103 &paddr, &page_size, &access);
104 if (ret == 0) {
105 tb_invalidate_phys_addr(&address_space_memory, paddr,
106 MEMTXATTRS_UNSPECIFIED);
110 #else
112 static void tb_invalidate_virtual_addr(CPUXtensaState *env, uint32_t vaddr)
114 tb_invalidate_phys_addr(vaddr);
117 #endif
119 void HELPER(exception)(CPUXtensaState *env, uint32_t excp)
121 CPUState *cs = CPU(xtensa_env_get_cpu(env));
123 cs->exception_index = excp;
124 if (excp == EXCP_YIELD) {
125 env->yield_needed = 0;
127 if (excp == EXCP_DEBUG) {
128 env->exception_taken = 0;
130 cpu_loop_exit(cs);
133 void HELPER(exception_cause)(CPUXtensaState *env, uint32_t pc, uint32_t cause)
135 uint32_t vector;
137 env->pc = pc;
138 if (env->sregs[PS] & PS_EXCM) {
139 if (env->config->ndepc) {
140 env->sregs[DEPC] = pc;
141 } else {
142 env->sregs[EPC1] = pc;
144 vector = EXC_DOUBLE;
145 } else {
146 env->sregs[EPC1] = pc;
147 vector = (env->sregs[PS] & PS_UM) ? EXC_USER : EXC_KERNEL;
150 env->sregs[EXCCAUSE] = cause;
151 env->sregs[PS] |= PS_EXCM;
153 HELPER(exception)(env, vector);
156 void HELPER(exception_cause_vaddr)(CPUXtensaState *env,
157 uint32_t pc, uint32_t cause, uint32_t vaddr)
159 env->sregs[EXCVADDR] = vaddr;
160 HELPER(exception_cause)(env, pc, cause);
163 void debug_exception_env(CPUXtensaState *env, uint32_t cause)
165 if (xtensa_get_cintlevel(env) < env->config->debug_level) {
166 HELPER(debug_exception)(env, env->pc, cause);
170 void HELPER(debug_exception)(CPUXtensaState *env, uint32_t pc, uint32_t cause)
172 unsigned level = env->config->debug_level;
174 env->pc = pc;
175 env->sregs[DEBUGCAUSE] = cause;
176 env->sregs[EPC1 + level - 1] = pc;
177 env->sregs[EPS2 + level - 2] = env->sregs[PS];
178 env->sregs[PS] = (env->sregs[PS] & ~PS_INTLEVEL) | PS_EXCM |
179 (level << PS_INTLEVEL_SHIFT);
180 HELPER(exception)(env, EXC_DEBUG);
183 static void copy_window_from_phys(CPUXtensaState *env,
184 uint32_t window, uint32_t phys, uint32_t n)
186 assert(phys < env->config->nareg);
187 if (phys + n <= env->config->nareg) {
188 memcpy(env->regs + window, env->phys_regs + phys,
189 n * sizeof(uint32_t));
190 } else {
191 uint32_t n1 = env->config->nareg - phys;
192 memcpy(env->regs + window, env->phys_regs + phys,
193 n1 * sizeof(uint32_t));
194 memcpy(env->regs + window + n1, env->phys_regs,
195 (n - n1) * sizeof(uint32_t));
199 static void copy_phys_from_window(CPUXtensaState *env,
200 uint32_t phys, uint32_t window, uint32_t n)
202 assert(phys < env->config->nareg);
203 if (phys + n <= env->config->nareg) {
204 memcpy(env->phys_regs + phys, env->regs + window,
205 n * sizeof(uint32_t));
206 } else {
207 uint32_t n1 = env->config->nareg - phys;
208 memcpy(env->phys_regs + phys, env->regs + window,
209 n1 * sizeof(uint32_t));
210 memcpy(env->phys_regs, env->regs + window + n1,
211 (n - n1) * sizeof(uint32_t));
216 static inline unsigned windowbase_bound(unsigned a, const CPUXtensaState *env)
218 return a & (env->config->nareg / 4 - 1);
221 static inline unsigned windowstart_bit(unsigned a, const CPUXtensaState *env)
223 return 1 << windowbase_bound(a, env);
226 void xtensa_sync_window_from_phys(CPUXtensaState *env)
228 copy_window_from_phys(env, 0, env->sregs[WINDOW_BASE] * 4, 16);
231 void xtensa_sync_phys_from_window(CPUXtensaState *env)
233 copy_phys_from_window(env, env->sregs[WINDOW_BASE] * 4, 0, 16);
236 static void xtensa_rotate_window_abs(CPUXtensaState *env, uint32_t position)
238 xtensa_sync_phys_from_window(env);
239 env->sregs[WINDOW_BASE] = windowbase_bound(position, env);
240 xtensa_sync_window_from_phys(env);
243 void xtensa_rotate_window(CPUXtensaState *env, uint32_t delta)
245 xtensa_rotate_window_abs(env, env->sregs[WINDOW_BASE] + delta);
248 void HELPER(wsr_windowbase)(CPUXtensaState *env, uint32_t v)
250 xtensa_rotate_window_abs(env, v);
253 void HELPER(entry)(CPUXtensaState *env, uint32_t pc, uint32_t s, uint32_t imm)
255 int callinc = (env->sregs[PS] & PS_CALLINC) >> PS_CALLINC_SHIFT;
257 env->regs[(callinc << 2) | (s & 3)] = env->regs[s] - imm;
258 xtensa_rotate_window(env, callinc);
259 env->sregs[WINDOW_START] |=
260 windowstart_bit(env->sregs[WINDOW_BASE], env);
263 void HELPER(window_check)(CPUXtensaState *env, uint32_t pc, uint32_t w)
265 uint32_t windowbase = windowbase_bound(env->sregs[WINDOW_BASE], env);
266 uint32_t windowstart = xtensa_replicate_windowstart(env) >>
267 (env->sregs[WINDOW_BASE] + 1);
268 uint32_t n = ctz32(windowstart) + 1;
270 assert(n <= w);
272 xtensa_rotate_window(env, n);
273 env->sregs[PS] = (env->sregs[PS] & ~PS_OWB) |
274 (windowbase << PS_OWB_SHIFT) | PS_EXCM;
275 env->sregs[EPC1] = env->pc = pc;
277 switch (ctz32(windowstart >> n)) {
278 case 0:
279 HELPER(exception)(env, EXC_WINDOW_OVERFLOW4);
280 break;
281 case 1:
282 HELPER(exception)(env, EXC_WINDOW_OVERFLOW8);
283 break;
284 default:
285 HELPER(exception)(env, EXC_WINDOW_OVERFLOW12);
286 break;
290 void HELPER(test_ill_retw)(CPUXtensaState *env, uint32_t pc)
292 int n = (env->regs[0] >> 30) & 0x3;
293 int m = 0;
294 uint32_t windowbase = windowbase_bound(env->sregs[WINDOW_BASE], env);
295 uint32_t windowstart = env->sregs[WINDOW_START];
297 if (windowstart & windowstart_bit(windowbase - 1, env)) {
298 m = 1;
299 } else if (windowstart & windowstart_bit(windowbase - 2, env)) {
300 m = 2;
301 } else if (windowstart & windowstart_bit(windowbase - 3, env)) {
302 m = 3;
305 if (n == 0 || (m != 0 && m != n)) {
306 qemu_log_mask(LOG_GUEST_ERROR, "Illegal retw instruction(pc = %08x), "
307 "PS = %08x, m = %d, n = %d\n",
308 pc, env->sregs[PS], m, n);
309 HELPER(exception_cause)(env, pc, ILLEGAL_INSTRUCTION_CAUSE);
313 void HELPER(test_underflow_retw)(CPUXtensaState *env, uint32_t pc)
315 int n = (env->regs[0] >> 30) & 0x3;
317 if (!(env->sregs[WINDOW_START] &
318 windowstart_bit(env->sregs[WINDOW_BASE] - n, env))) {
319 uint32_t windowbase = windowbase_bound(env->sregs[WINDOW_BASE], env);
321 xtensa_rotate_window(env, -n);
322 /* window underflow */
323 env->sregs[PS] = (env->sregs[PS] & ~PS_OWB) |
324 (windowbase << PS_OWB_SHIFT) | PS_EXCM;
325 env->sregs[EPC1] = env->pc = pc;
327 if (n == 1) {
328 HELPER(exception)(env, EXC_WINDOW_UNDERFLOW4);
329 } else if (n == 2) {
330 HELPER(exception)(env, EXC_WINDOW_UNDERFLOW8);
331 } else if (n == 3) {
332 HELPER(exception)(env, EXC_WINDOW_UNDERFLOW12);
337 uint32_t HELPER(retw)(CPUXtensaState *env, uint32_t pc)
339 int n = (env->regs[0] >> 30) & 0x3;
340 uint32_t windowbase = windowbase_bound(env->sregs[WINDOW_BASE], env);
341 uint32_t ret_pc = (pc & 0xc0000000) | (env->regs[0] & 0x3fffffff);
343 xtensa_rotate_window(env, -n);
344 env->sregs[WINDOW_START] &= ~windowstart_bit(windowbase, env);
345 return ret_pc;
348 void HELPER(rotw)(CPUXtensaState *env, uint32_t imm4)
350 xtensa_rotate_window(env, imm4);
353 void xtensa_restore_owb(CPUXtensaState *env)
355 xtensa_rotate_window_abs(env, (env->sregs[PS] & PS_OWB) >> PS_OWB_SHIFT);
358 void HELPER(restore_owb)(CPUXtensaState *env)
360 xtensa_restore_owb(env);
363 void HELPER(movsp)(CPUXtensaState *env, uint32_t pc)
365 if ((env->sregs[WINDOW_START] &
366 (windowstart_bit(env->sregs[WINDOW_BASE] - 3, env) |
367 windowstart_bit(env->sregs[WINDOW_BASE] - 2, env) |
368 windowstart_bit(env->sregs[WINDOW_BASE] - 1, env))) == 0) {
369 HELPER(exception_cause)(env, pc, ALLOCA_CAUSE);
373 void HELPER(wsr_lbeg)(CPUXtensaState *env, uint32_t v)
375 if (env->sregs[LBEG] != v) {
376 tb_invalidate_virtual_addr(env, env->sregs[LEND] - 1);
377 env->sregs[LBEG] = v;
381 void HELPER(wsr_lend)(CPUXtensaState *env, uint32_t v)
383 if (env->sregs[LEND] != v) {
384 tb_invalidate_virtual_addr(env, env->sregs[LEND] - 1);
385 env->sregs[LEND] = v;
386 tb_invalidate_virtual_addr(env, env->sregs[LEND] - 1);
390 void HELPER(dump_state)(CPUXtensaState *env)
392 XtensaCPU *cpu = xtensa_env_get_cpu(env);
394 cpu_dump_state(CPU(cpu), stderr, fprintf, 0);
397 #ifndef CONFIG_USER_ONLY
399 void HELPER(waiti)(CPUXtensaState *env, uint32_t pc, uint32_t intlevel)
401 CPUState *cpu;
403 env->pc = pc;
404 env->sregs[PS] = (env->sregs[PS] & ~PS_INTLEVEL) |
405 (intlevel << PS_INTLEVEL_SHIFT);
407 qemu_mutex_lock_iothread();
408 check_interrupts(env);
409 qemu_mutex_unlock_iothread();
411 if (env->pending_irq_level) {
412 cpu_loop_exit(CPU(xtensa_env_get_cpu(env)));
413 return;
416 cpu = CPU(xtensa_env_get_cpu(env));
417 cpu->halted = 1;
418 HELPER(exception)(env, EXCP_HLT);
421 void HELPER(update_ccount)(CPUXtensaState *env)
423 uint64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
425 env->ccount_time = now;
426 env->sregs[CCOUNT] = env->ccount_base +
427 (uint32_t)((now - env->time_base) *
428 env->config->clock_freq_khz / 1000000);
431 void HELPER(wsr_ccount)(CPUXtensaState *env, uint32_t v)
433 int i;
435 HELPER(update_ccount)(env);
436 env->ccount_base += v - env->sregs[CCOUNT];
437 for (i = 0; i < env->config->nccompare; ++i) {
438 HELPER(update_ccompare)(env, i);
442 void HELPER(update_ccompare)(CPUXtensaState *env, uint32_t i)
444 uint64_t dcc;
446 HELPER(update_ccount)(env);
447 dcc = (uint64_t)(env->sregs[CCOMPARE + i] - env->sregs[CCOUNT] - 1) + 1;
448 timer_mod(env->ccompare[i].timer,
449 env->ccount_time + (dcc * 1000000) / env->config->clock_freq_khz);
450 env->yield_needed = 1;
453 void HELPER(check_interrupts)(CPUXtensaState *env)
455 qemu_mutex_lock_iothread();
456 check_interrupts(env);
457 qemu_mutex_unlock_iothread();
460 void HELPER(itlb_hit_test)(CPUXtensaState *env, uint32_t vaddr)
463 * Attempt the memory load; we don't care about the result but
464 * only the side-effects (ie any MMU or other exception)
466 cpu_ldub_code_ra(env, vaddr, GETPC());
470 * Check vaddr accessibility/cache attributes and raise an exception if
471 * specified by the ATOMCTL SR.
473 * Note: local memory exclusion is not implemented
475 void HELPER(check_atomctl)(CPUXtensaState *env, uint32_t pc, uint32_t vaddr)
477 uint32_t paddr, page_size, access;
478 uint32_t atomctl = env->sregs[ATOMCTL];
479 int rc = xtensa_get_physical_addr(env, true, vaddr, 1,
480 xtensa_get_cring(env), &paddr, &page_size, &access);
483 * s32c1i never causes LOAD_PROHIBITED_CAUSE exceptions,
484 * see opcode description in the ISA
486 if (rc == 0 &&
487 (access & (PAGE_READ | PAGE_WRITE)) != (PAGE_READ | PAGE_WRITE)) {
488 rc = STORE_PROHIBITED_CAUSE;
491 if (rc) {
492 HELPER(exception_cause_vaddr)(env, pc, rc, vaddr);
496 * When data cache is not configured use ATOMCTL bypass field.
497 * See ISA, 4.3.12.4 The Atomic Operation Control Register (ATOMCTL)
498 * under the Conditional Store Option.
500 if (!xtensa_option_enabled(env->config, XTENSA_OPTION_DCACHE)) {
501 access = PAGE_CACHE_BYPASS;
504 switch (access & PAGE_CACHE_MASK) {
505 case PAGE_CACHE_WB:
506 atomctl >>= 2;
507 /* fall through */
508 case PAGE_CACHE_WT:
509 atomctl >>= 2;
510 /* fall through */
511 case PAGE_CACHE_BYPASS:
512 if ((atomctl & 0x3) == 0) {
513 HELPER(exception_cause_vaddr)(env, pc,
514 LOAD_STORE_ERROR_CAUSE, vaddr);
516 break;
518 case PAGE_CACHE_ISOLATE:
519 HELPER(exception_cause_vaddr)(env, pc,
520 LOAD_STORE_ERROR_CAUSE, vaddr);
521 break;
523 default:
524 break;
528 void HELPER(wsr_memctl)(CPUXtensaState *env, uint32_t v)
530 if (xtensa_option_enabled(env->config, XTENSA_OPTION_ICACHE)) {
531 if (extract32(v, MEMCTL_IUSEWAYS_SHIFT, MEMCTL_IUSEWAYS_LEN) >
532 env->config->icache_ways) {
533 deposit32(v, MEMCTL_IUSEWAYS_SHIFT, MEMCTL_IUSEWAYS_LEN,
534 env->config->icache_ways);
537 if (xtensa_option_enabled(env->config, XTENSA_OPTION_DCACHE)) {
538 if (extract32(v, MEMCTL_DUSEWAYS_SHIFT, MEMCTL_DUSEWAYS_LEN) >
539 env->config->dcache_ways) {
540 deposit32(v, MEMCTL_DUSEWAYS_SHIFT, MEMCTL_DUSEWAYS_LEN,
541 env->config->dcache_ways);
543 if (extract32(v, MEMCTL_DALLOCWAYS_SHIFT, MEMCTL_DALLOCWAYS_LEN) >
544 env->config->dcache_ways) {
545 deposit32(v, MEMCTL_DALLOCWAYS_SHIFT, MEMCTL_DALLOCWAYS_LEN,
546 env->config->dcache_ways);
549 env->sregs[MEMCTL] = v & env->config->memctl_mask;
552 void HELPER(wsr_rasid)(CPUXtensaState *env, uint32_t v)
554 XtensaCPU *cpu = xtensa_env_get_cpu(env);
556 v = (v & 0xffffff00) | 0x1;
557 if (v != env->sregs[RASID]) {
558 env->sregs[RASID] = v;
559 tlb_flush(CPU(cpu));
563 static uint32_t get_page_size(const CPUXtensaState *env, bool dtlb, uint32_t way)
565 uint32_t tlbcfg = env->sregs[dtlb ? DTLBCFG : ITLBCFG];
567 switch (way) {
568 case 4:
569 return (tlbcfg >> 16) & 0x3;
571 case 5:
572 return (tlbcfg >> 20) & 0x1;
574 case 6:
575 return (tlbcfg >> 24) & 0x1;
577 default:
578 return 0;
583 * Get bit mask for the virtual address bits translated by the TLB way
585 uint32_t xtensa_tlb_get_addr_mask(const CPUXtensaState *env, bool dtlb, uint32_t way)
587 if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
588 bool varway56 = dtlb ?
589 env->config->dtlb.varway56 :
590 env->config->itlb.varway56;
592 switch (way) {
593 case 4:
594 return 0xfff00000 << get_page_size(env, dtlb, way) * 2;
596 case 5:
597 if (varway56) {
598 return 0xf8000000 << get_page_size(env, dtlb, way);
599 } else {
600 return 0xf8000000;
603 case 6:
604 if (varway56) {
605 return 0xf0000000 << (1 - get_page_size(env, dtlb, way));
606 } else {
607 return 0xf0000000;
610 default:
611 return 0xfffff000;
613 } else {
614 return REGION_PAGE_MASK;
619 * Get bit mask for the 'VPN without index' field.
620 * See ISA, 4.6.5.6, data format for RxTLB0
622 static uint32_t get_vpn_mask(const CPUXtensaState *env, bool dtlb, uint32_t way)
624 if (way < 4) {
625 bool is32 = (dtlb ?
626 env->config->dtlb.nrefillentries :
627 env->config->itlb.nrefillentries) == 32;
628 return is32 ? 0xffff8000 : 0xffffc000;
629 } else if (way == 4) {
630 return xtensa_tlb_get_addr_mask(env, dtlb, way) << 2;
631 } else if (way <= 6) {
632 uint32_t mask = xtensa_tlb_get_addr_mask(env, dtlb, way);
633 bool varway56 = dtlb ?
634 env->config->dtlb.varway56 :
635 env->config->itlb.varway56;
637 if (varway56) {
638 return mask << (way == 5 ? 2 : 3);
639 } else {
640 return mask << 1;
642 } else {
643 return 0xfffff000;
648 * Split virtual address into VPN (with index) and entry index
649 * for the given TLB way
651 void split_tlb_entry_spec_way(const CPUXtensaState *env, uint32_t v, bool dtlb,
652 uint32_t *vpn, uint32_t wi, uint32_t *ei)
654 bool varway56 = dtlb ?
655 env->config->dtlb.varway56 :
656 env->config->itlb.varway56;
658 if (!dtlb) {
659 wi &= 7;
662 if (wi < 4) {
663 bool is32 = (dtlb ?
664 env->config->dtlb.nrefillentries :
665 env->config->itlb.nrefillentries) == 32;
666 *ei = (v >> 12) & (is32 ? 0x7 : 0x3);
667 } else {
668 switch (wi) {
669 case 4:
671 uint32_t eibase = 20 + get_page_size(env, dtlb, wi) * 2;
672 *ei = (v >> eibase) & 0x3;
674 break;
676 case 5:
677 if (varway56) {
678 uint32_t eibase = 27 + get_page_size(env, dtlb, wi);
679 *ei = (v >> eibase) & 0x3;
680 } else {
681 *ei = (v >> 27) & 0x1;
683 break;
685 case 6:
686 if (varway56) {
687 uint32_t eibase = 29 - get_page_size(env, dtlb, wi);
688 *ei = (v >> eibase) & 0x7;
689 } else {
690 *ei = (v >> 28) & 0x1;
692 break;
694 default:
695 *ei = 0;
696 break;
699 *vpn = v & xtensa_tlb_get_addr_mask(env, dtlb, wi);
703 * Split TLB address into TLB way, entry index and VPN (with index).
704 * See ISA, 4.6.5.5 - 4.6.5.8 for the TLB addressing format
706 static void split_tlb_entry_spec(CPUXtensaState *env, uint32_t v, bool dtlb,
707 uint32_t *vpn, uint32_t *wi, uint32_t *ei)
709 if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
710 *wi = v & (dtlb ? 0xf : 0x7);
711 split_tlb_entry_spec_way(env, v, dtlb, vpn, *wi, ei);
712 } else {
713 *vpn = v & REGION_PAGE_MASK;
714 *wi = 0;
715 *ei = (v >> 29) & 0x7;
719 static xtensa_tlb_entry *get_tlb_entry(CPUXtensaState *env,
720 uint32_t v, bool dtlb, uint32_t *pwi)
722 uint32_t vpn;
723 uint32_t wi;
724 uint32_t ei;
726 split_tlb_entry_spec(env, v, dtlb, &vpn, &wi, &ei);
727 if (pwi) {
728 *pwi = wi;
730 return xtensa_tlb_get_entry(env, dtlb, wi, ei);
733 uint32_t HELPER(rtlb0)(CPUXtensaState *env, uint32_t v, uint32_t dtlb)
735 if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
736 uint32_t wi;
737 const xtensa_tlb_entry *entry = get_tlb_entry(env, v, dtlb, &wi);
738 return (entry->vaddr & get_vpn_mask(env, dtlb, wi)) | entry->asid;
739 } else {
740 return v & REGION_PAGE_MASK;
744 uint32_t HELPER(rtlb1)(CPUXtensaState *env, uint32_t v, uint32_t dtlb)
746 const xtensa_tlb_entry *entry = get_tlb_entry(env, v, dtlb, NULL);
747 return entry->paddr | entry->attr;
750 void HELPER(itlb)(CPUXtensaState *env, uint32_t v, uint32_t dtlb)
752 if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
753 uint32_t wi;
754 xtensa_tlb_entry *entry = get_tlb_entry(env, v, dtlb, &wi);
755 if (entry->variable && entry->asid) {
756 tlb_flush_page(CPU(xtensa_env_get_cpu(env)), entry->vaddr);
757 entry->asid = 0;
762 uint32_t HELPER(ptlb)(CPUXtensaState *env, uint32_t v, uint32_t dtlb)
764 if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
765 uint32_t wi;
766 uint32_t ei;
767 uint8_t ring;
768 int res = xtensa_tlb_lookup(env, v, dtlb, &wi, &ei, &ring);
770 switch (res) {
771 case 0:
772 if (ring >= xtensa_get_ring(env)) {
773 return (v & 0xfffff000) | wi | (dtlb ? 0x10 : 0x8);
775 break;
777 case INST_TLB_MULTI_HIT_CAUSE:
778 case LOAD_STORE_TLB_MULTI_HIT_CAUSE:
779 HELPER(exception_cause_vaddr)(env, env->pc, res, v);
780 break;
782 return 0;
783 } else {
784 return (v & REGION_PAGE_MASK) | 0x1;
788 void xtensa_tlb_set_entry_mmu(const CPUXtensaState *env,
789 xtensa_tlb_entry *entry, bool dtlb,
790 unsigned wi, unsigned ei, uint32_t vpn, uint32_t pte)
792 entry->vaddr = vpn;
793 entry->paddr = pte & xtensa_tlb_get_addr_mask(env, dtlb, wi);
794 entry->asid = (env->sregs[RASID] >> ((pte >> 1) & 0x18)) & 0xff;
795 entry->attr = pte & 0xf;
798 void xtensa_tlb_set_entry(CPUXtensaState *env, bool dtlb,
799 unsigned wi, unsigned ei, uint32_t vpn, uint32_t pte)
801 XtensaCPU *cpu = xtensa_env_get_cpu(env);
802 CPUState *cs = CPU(cpu);
803 xtensa_tlb_entry *entry = xtensa_tlb_get_entry(env, dtlb, wi, ei);
805 if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
806 if (entry->variable) {
807 if (entry->asid) {
808 tlb_flush_page(cs, entry->vaddr);
810 xtensa_tlb_set_entry_mmu(env, entry, dtlb, wi, ei, vpn, pte);
811 tlb_flush_page(cs, entry->vaddr);
812 } else {
813 qemu_log_mask(LOG_GUEST_ERROR, "%s %d, %d, %d trying to set immutable entry\n",
814 __func__, dtlb, wi, ei);
816 } else {
817 tlb_flush_page(cs, entry->vaddr);
818 if (xtensa_option_enabled(env->config,
819 XTENSA_OPTION_REGION_TRANSLATION)) {
820 entry->paddr = pte & REGION_PAGE_MASK;
822 entry->attr = pte & 0xf;
826 void HELPER(wtlb)(CPUXtensaState *env, uint32_t p, uint32_t v, uint32_t dtlb)
828 uint32_t vpn;
829 uint32_t wi;
830 uint32_t ei;
831 split_tlb_entry_spec(env, v, dtlb, &vpn, &wi, &ei);
832 xtensa_tlb_set_entry(env, dtlb, wi, ei, vpn, p);
836 void HELPER(wsr_ibreakenable)(CPUXtensaState *env, uint32_t v)
838 uint32_t change = v ^ env->sregs[IBREAKENABLE];
839 unsigned i;
841 for (i = 0; i < env->config->nibreak; ++i) {
842 if (change & (1 << i)) {
843 tb_invalidate_virtual_addr(env, env->sregs[IBREAKA + i]);
846 env->sregs[IBREAKENABLE] = v & ((1 << env->config->nibreak) - 1);
849 void HELPER(wsr_ibreaka)(CPUXtensaState *env, uint32_t i, uint32_t v)
851 if (env->sregs[IBREAKENABLE] & (1 << i) && env->sregs[IBREAKA + i] != v) {
852 tb_invalidate_virtual_addr(env, env->sregs[IBREAKA + i]);
853 tb_invalidate_virtual_addr(env, v);
855 env->sregs[IBREAKA + i] = v;
858 static void set_dbreak(CPUXtensaState *env, unsigned i, uint32_t dbreaka,
859 uint32_t dbreakc)
861 CPUState *cs = CPU(xtensa_env_get_cpu(env));
862 int flags = BP_CPU | BP_STOP_BEFORE_ACCESS;
863 uint32_t mask = dbreakc | ~DBREAKC_MASK;
865 if (env->cpu_watchpoint[i]) {
866 cpu_watchpoint_remove_by_ref(cs, env->cpu_watchpoint[i]);
868 if (dbreakc & DBREAKC_SB) {
869 flags |= BP_MEM_WRITE;
871 if (dbreakc & DBREAKC_LB) {
872 flags |= BP_MEM_READ;
874 /* contiguous mask after inversion is one less than some power of 2 */
875 if ((~mask + 1) & ~mask) {
876 qemu_log_mask(LOG_GUEST_ERROR, "DBREAKC mask is not contiguous: 0x%08x\n", dbreakc);
877 /* cut mask after the first zero bit */
878 mask = 0xffffffff << (32 - clo32(mask));
880 if (cpu_watchpoint_insert(cs, dbreaka & mask, ~mask + 1,
881 flags, &env->cpu_watchpoint[i])) {
882 env->cpu_watchpoint[i] = NULL;
883 qemu_log_mask(LOG_GUEST_ERROR, "Failed to set data breakpoint at 0x%08x/%d\n",
884 dbreaka & mask, ~mask + 1);
888 void HELPER(wsr_dbreaka)(CPUXtensaState *env, uint32_t i, uint32_t v)
890 uint32_t dbreakc = env->sregs[DBREAKC + i];
892 if ((dbreakc & DBREAKC_SB_LB) &&
893 env->sregs[DBREAKA + i] != v) {
894 set_dbreak(env, i, v, dbreakc);
896 env->sregs[DBREAKA + i] = v;
899 void HELPER(wsr_dbreakc)(CPUXtensaState *env, uint32_t i, uint32_t v)
901 if ((env->sregs[DBREAKC + i] ^ v) & (DBREAKC_SB_LB | DBREAKC_MASK)) {
902 if (v & DBREAKC_SB_LB) {
903 set_dbreak(env, i, env->sregs[DBREAKA + i], v);
904 } else {
905 if (env->cpu_watchpoint[i]) {
906 CPUState *cs = CPU(xtensa_env_get_cpu(env));
908 cpu_watchpoint_remove_by_ref(cs, env->cpu_watchpoint[i]);
909 env->cpu_watchpoint[i] = NULL;
913 env->sregs[DBREAKC + i] = v;
915 #endif
917 void HELPER(wur_fcr)(CPUXtensaState *env, uint32_t v)
919 static const int rounding_mode[] = {
920 float_round_nearest_even,
921 float_round_to_zero,
922 float_round_up,
923 float_round_down,
926 env->uregs[FCR] = v & 0xfffff07f;
927 set_float_rounding_mode(rounding_mode[v & 3], &env->fp_status);
930 float32 HELPER(abs_s)(float32 v)
932 return float32_abs(v);
935 float32 HELPER(neg_s)(float32 v)
937 return float32_chs(v);
940 float32 HELPER(add_s)(CPUXtensaState *env, float32 a, float32 b)
942 return float32_add(a, b, &env->fp_status);
945 float32 HELPER(sub_s)(CPUXtensaState *env, float32 a, float32 b)
947 return float32_sub(a, b, &env->fp_status);
950 float32 HELPER(mul_s)(CPUXtensaState *env, float32 a, float32 b)
952 return float32_mul(a, b, &env->fp_status);
955 float32 HELPER(madd_s)(CPUXtensaState *env, float32 a, float32 b, float32 c)
957 return float32_muladd(b, c, a, 0,
958 &env->fp_status);
961 float32 HELPER(msub_s)(CPUXtensaState *env, float32 a, float32 b, float32 c)
963 return float32_muladd(b, c, a, float_muladd_negate_product,
964 &env->fp_status);
967 uint32_t HELPER(ftoi)(float32 v, uint32_t rounding_mode, uint32_t scale)
969 float_status fp_status = {0};
971 set_float_rounding_mode(rounding_mode, &fp_status);
972 return float32_to_int32(
973 float32_scalbn(v, scale, &fp_status), &fp_status);
976 uint32_t HELPER(ftoui)(float32 v, uint32_t rounding_mode, uint32_t scale)
978 float_status fp_status = {0};
979 float32 res;
981 set_float_rounding_mode(rounding_mode, &fp_status);
983 res = float32_scalbn(v, scale, &fp_status);
985 if (float32_is_neg(v) && !float32_is_any_nan(v)) {
986 return float32_to_int32(res, &fp_status);
987 } else {
988 return float32_to_uint32(res, &fp_status);
992 float32 HELPER(itof)(CPUXtensaState *env, uint32_t v, uint32_t scale)
994 return float32_scalbn(int32_to_float32(v, &env->fp_status),
995 (int32_t)scale, &env->fp_status);
998 float32 HELPER(uitof)(CPUXtensaState *env, uint32_t v, uint32_t scale)
1000 return float32_scalbn(uint32_to_float32(v, &env->fp_status),
1001 (int32_t)scale, &env->fp_status);
1004 static inline void set_br(CPUXtensaState *env, bool v, uint32_t br)
1006 if (v) {
1007 env->sregs[BR] |= br;
1008 } else {
1009 env->sregs[BR] &= ~br;
1013 void HELPER(un_s)(CPUXtensaState *env, uint32_t br, float32 a, float32 b)
1015 set_br(env, float32_unordered_quiet(a, b, &env->fp_status), br);
1018 void HELPER(oeq_s)(CPUXtensaState *env, uint32_t br, float32 a, float32 b)
1020 set_br(env, float32_eq_quiet(a, b, &env->fp_status), br);
1023 void HELPER(ueq_s)(CPUXtensaState *env, uint32_t br, float32 a, float32 b)
1025 int v = float32_compare_quiet(a, b, &env->fp_status);
1026 set_br(env, v == float_relation_equal || v == float_relation_unordered, br);
1029 void HELPER(olt_s)(CPUXtensaState *env, uint32_t br, float32 a, float32 b)
1031 set_br(env, float32_lt_quiet(a, b, &env->fp_status), br);
1034 void HELPER(ult_s)(CPUXtensaState *env, uint32_t br, float32 a, float32 b)
1036 int v = float32_compare_quiet(a, b, &env->fp_status);
1037 set_br(env, v == float_relation_less || v == float_relation_unordered, br);
1040 void HELPER(ole_s)(CPUXtensaState *env, uint32_t br, float32 a, float32 b)
1042 set_br(env, float32_le_quiet(a, b, &env->fp_status), br);
1045 void HELPER(ule_s)(CPUXtensaState *env, uint32_t br, float32 a, float32 b)
1047 int v = float32_compare_quiet(a, b, &env->fp_status);
1048 set_br(env, v != float_relation_greater, br);
1051 uint32_t HELPER(rer)(CPUXtensaState *env, uint32_t addr)
1053 #ifndef CONFIG_USER_ONLY
1054 return address_space_ldl(env->address_space_er, addr,
1055 MEMTXATTRS_UNSPECIFIED, NULL);
1056 #else
1057 return 0;
1058 #endif
1061 void HELPER(wer)(CPUXtensaState *env, uint32_t data, uint32_t addr)
1063 #ifndef CONFIG_USER_ONLY
1064 address_space_stl(env->address_space_er, addr, data,
1065 MEMTXATTRS_UNSPECIFIED, NULL);
1066 #endif