qxl: call qemu_spice_display_init_common for secondary devices
[qemu/ar7.git] / target / xtensa / helper.c
blobbcd0b7738d5927520039a3cd90ab8efde4192680
1 /*
2 * Copyright (c) 2011, Max Filippov, Open Source and Linux Lab.
3 * All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 * * Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * * Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * * Neither the name of the Open Source and Linux Lab nor the
13 * names of its contributors may be used to endorse or promote products
14 * derived from this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
23 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 #include "qemu/osdep.h"
29 #include "cpu.h"
30 #include "exec/exec-all.h"
31 #include "exec/gdbstub.h"
32 #include "qemu/host-utils.h"
33 #if !defined(CONFIG_USER_ONLY)
34 #include "hw/loader.h"
35 #endif
37 static struct XtensaConfigList *xtensa_cores;
39 static void xtensa_core_class_init(ObjectClass *oc, void *data)
41 CPUClass *cc = CPU_CLASS(oc);
42 XtensaCPUClass *xcc = XTENSA_CPU_CLASS(oc);
43 const XtensaConfig *config = data;
45 xcc->config = config;
47 /* Use num_core_regs to see only non-privileged registers in an unmodified
48 * gdb. Use num_regs to see all registers. gdb modification is required
49 * for that: reset bit 0 in the 'flags' field of the registers definitions
50 * in the gdb/xtensa-config.c inside gdb source tree or inside gdb overlay.
52 cc->gdb_num_core_regs = config->gdb_regmap.num_regs;
55 void xtensa_finalize_config(XtensaConfig *config)
57 unsigned i, n = 0;
59 if (config->gdb_regmap.num_regs) {
60 return;
63 for (i = 0; config->gdb_regmap.reg[i].targno >= 0; ++i) {
64 n += (config->gdb_regmap.reg[i].type != 6);
66 config->gdb_regmap.num_regs = n;
69 void xtensa_register_core(XtensaConfigList *node)
71 TypeInfo type = {
72 .parent = TYPE_XTENSA_CPU,
73 .class_init = xtensa_core_class_init,
74 .class_data = (void *)node->config,
77 node->next = xtensa_cores;
78 xtensa_cores = node;
79 type.name = g_strdup_printf("%s-" TYPE_XTENSA_CPU, node->config->name);
80 type_register(&type);
81 g_free((gpointer)type.name);
84 static uint32_t check_hw_breakpoints(CPUXtensaState *env)
86 unsigned i;
88 for (i = 0; i < env->config->ndbreak; ++i) {
89 if (env->cpu_watchpoint[i] &&
90 env->cpu_watchpoint[i]->flags & BP_WATCHPOINT_HIT) {
91 return DEBUGCAUSE_DB | (i << DEBUGCAUSE_DBNUM_SHIFT);
94 return 0;
97 void xtensa_breakpoint_handler(CPUState *cs)
99 XtensaCPU *cpu = XTENSA_CPU(cs);
100 CPUXtensaState *env = &cpu->env;
102 if (cs->watchpoint_hit) {
103 if (cs->watchpoint_hit->flags & BP_CPU) {
104 uint32_t cause;
106 cs->watchpoint_hit = NULL;
107 cause = check_hw_breakpoints(env);
108 if (cause) {
109 debug_exception_env(env, cause);
111 cpu_loop_exit_noexc(cs);
116 XtensaCPU *cpu_xtensa_init(const char *cpu_model)
118 ObjectClass *oc;
119 XtensaCPU *cpu;
120 CPUXtensaState *env;
122 oc = cpu_class_by_name(TYPE_XTENSA_CPU, cpu_model);
123 if (oc == NULL) {
124 return NULL;
127 cpu = XTENSA_CPU(object_new(object_class_get_name(oc)));
128 env = &cpu->env;
130 xtensa_irq_init(env);
132 object_property_set_bool(OBJECT(cpu), true, "realized", NULL);
134 return cpu;
138 void xtensa_cpu_list(FILE *f, fprintf_function cpu_fprintf)
140 XtensaConfigList *core = xtensa_cores;
141 cpu_fprintf(f, "Available CPUs:\n");
142 for (; core; core = core->next) {
143 cpu_fprintf(f, " %s\n", core->config->name);
147 hwaddr xtensa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
149 XtensaCPU *cpu = XTENSA_CPU(cs);
150 uint32_t paddr;
151 uint32_t page_size;
152 unsigned access;
154 if (xtensa_get_physical_addr(&cpu->env, false, addr, 0, 0,
155 &paddr, &page_size, &access) == 0) {
156 return paddr;
158 if (xtensa_get_physical_addr(&cpu->env, false, addr, 2, 0,
159 &paddr, &page_size, &access) == 0) {
160 return paddr;
162 return ~0;
165 static uint32_t relocated_vector(CPUXtensaState *env, uint32_t vector)
167 if (xtensa_option_enabled(env->config,
168 XTENSA_OPTION_RELOCATABLE_VECTOR)) {
169 return vector - env->config->vecbase + env->sregs[VECBASE];
170 } else {
171 return vector;
176 * Handle penging IRQ.
177 * For the high priority interrupt jump to the corresponding interrupt vector.
178 * For the level-1 interrupt convert it to either user, kernel or double
179 * exception with the 'level-1 interrupt' exception cause.
181 static void handle_interrupt(CPUXtensaState *env)
183 int level = env->pending_irq_level;
185 if (level > xtensa_get_cintlevel(env) &&
186 level <= env->config->nlevel &&
187 (env->config->level_mask[level] &
188 env->sregs[INTSET] &
189 env->sregs[INTENABLE])) {
190 CPUState *cs = CPU(xtensa_env_get_cpu(env));
192 if (level > 1) {
193 env->sregs[EPC1 + level - 1] = env->pc;
194 env->sregs[EPS2 + level - 2] = env->sregs[PS];
195 env->sregs[PS] =
196 (env->sregs[PS] & ~PS_INTLEVEL) | level | PS_EXCM;
197 env->pc = relocated_vector(env,
198 env->config->interrupt_vector[level]);
199 } else {
200 env->sregs[EXCCAUSE] = LEVEL1_INTERRUPT_CAUSE;
202 if (env->sregs[PS] & PS_EXCM) {
203 if (env->config->ndepc) {
204 env->sregs[DEPC] = env->pc;
205 } else {
206 env->sregs[EPC1] = env->pc;
208 cs->exception_index = EXC_DOUBLE;
209 } else {
210 env->sregs[EPC1] = env->pc;
211 cs->exception_index =
212 (env->sregs[PS] & PS_UM) ? EXC_USER : EXC_KERNEL;
214 env->sregs[PS] |= PS_EXCM;
216 env->exception_taken = 1;
220 /* Called from cpu_handle_interrupt with BQL held */
221 void xtensa_cpu_do_interrupt(CPUState *cs)
223 XtensaCPU *cpu = XTENSA_CPU(cs);
224 CPUXtensaState *env = &cpu->env;
226 if (cs->exception_index == EXC_IRQ) {
227 qemu_log_mask(CPU_LOG_INT,
228 "%s(EXC_IRQ) level = %d, cintlevel = %d, "
229 "pc = %08x, a0 = %08x, ps = %08x, "
230 "intset = %08x, intenable = %08x, "
231 "ccount = %08x\n",
232 __func__, env->pending_irq_level, xtensa_get_cintlevel(env),
233 env->pc, env->regs[0], env->sregs[PS],
234 env->sregs[INTSET], env->sregs[INTENABLE],
235 env->sregs[CCOUNT]);
236 handle_interrupt(env);
239 switch (cs->exception_index) {
240 case EXC_WINDOW_OVERFLOW4:
241 case EXC_WINDOW_UNDERFLOW4:
242 case EXC_WINDOW_OVERFLOW8:
243 case EXC_WINDOW_UNDERFLOW8:
244 case EXC_WINDOW_OVERFLOW12:
245 case EXC_WINDOW_UNDERFLOW12:
246 case EXC_KERNEL:
247 case EXC_USER:
248 case EXC_DOUBLE:
249 case EXC_DEBUG:
250 qemu_log_mask(CPU_LOG_INT, "%s(%d) "
251 "pc = %08x, a0 = %08x, ps = %08x, ccount = %08x\n",
252 __func__, cs->exception_index,
253 env->pc, env->regs[0], env->sregs[PS], env->sregs[CCOUNT]);
254 if (env->config->exception_vector[cs->exception_index]) {
255 env->pc = relocated_vector(env,
256 env->config->exception_vector[cs->exception_index]);
257 env->exception_taken = 1;
258 } else {
259 qemu_log_mask(CPU_LOG_INT, "%s(pc = %08x) bad exception_index: %d\n",
260 __func__, env->pc, cs->exception_index);
262 break;
264 case EXC_IRQ:
265 break;
267 default:
268 qemu_log("%s(pc = %08x) unknown exception_index: %d\n",
269 __func__, env->pc, cs->exception_index);
270 break;
272 check_interrupts(env);
275 bool xtensa_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
277 if (interrupt_request & CPU_INTERRUPT_HARD) {
278 cs->exception_index = EXC_IRQ;
279 xtensa_cpu_do_interrupt(cs);
280 return true;
282 return false;
285 static void reset_tlb_mmu_all_ways(CPUXtensaState *env,
286 const xtensa_tlb *tlb, xtensa_tlb_entry entry[][MAX_TLB_WAY_SIZE])
288 unsigned wi, ei;
290 for (wi = 0; wi < tlb->nways; ++wi) {
291 for (ei = 0; ei < tlb->way_size[wi]; ++ei) {
292 entry[wi][ei].asid = 0;
293 entry[wi][ei].variable = true;
298 static void reset_tlb_mmu_ways56(CPUXtensaState *env,
299 const xtensa_tlb *tlb, xtensa_tlb_entry entry[][MAX_TLB_WAY_SIZE])
301 if (!tlb->varway56) {
302 static const xtensa_tlb_entry way5[] = {
304 .vaddr = 0xd0000000,
305 .paddr = 0,
306 .asid = 1,
307 .attr = 7,
308 .variable = false,
309 }, {
310 .vaddr = 0xd8000000,
311 .paddr = 0,
312 .asid = 1,
313 .attr = 3,
314 .variable = false,
317 static const xtensa_tlb_entry way6[] = {
319 .vaddr = 0xe0000000,
320 .paddr = 0xf0000000,
321 .asid = 1,
322 .attr = 7,
323 .variable = false,
324 }, {
325 .vaddr = 0xf0000000,
326 .paddr = 0xf0000000,
327 .asid = 1,
328 .attr = 3,
329 .variable = false,
332 memcpy(entry[5], way5, sizeof(way5));
333 memcpy(entry[6], way6, sizeof(way6));
334 } else {
335 uint32_t ei;
336 for (ei = 0; ei < 8; ++ei) {
337 entry[6][ei].vaddr = ei << 29;
338 entry[6][ei].paddr = ei << 29;
339 entry[6][ei].asid = 1;
340 entry[6][ei].attr = 3;
345 static void reset_tlb_region_way0(CPUXtensaState *env,
346 xtensa_tlb_entry entry[][MAX_TLB_WAY_SIZE])
348 unsigned ei;
350 for (ei = 0; ei < 8; ++ei) {
351 entry[0][ei].vaddr = ei << 29;
352 entry[0][ei].paddr = ei << 29;
353 entry[0][ei].asid = 1;
354 entry[0][ei].attr = 2;
355 entry[0][ei].variable = true;
359 void reset_mmu(CPUXtensaState *env)
361 if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
362 env->sregs[RASID] = 0x04030201;
363 env->sregs[ITLBCFG] = 0;
364 env->sregs[DTLBCFG] = 0;
365 env->autorefill_idx = 0;
366 reset_tlb_mmu_all_ways(env, &env->config->itlb, env->itlb);
367 reset_tlb_mmu_all_ways(env, &env->config->dtlb, env->dtlb);
368 reset_tlb_mmu_ways56(env, &env->config->itlb, env->itlb);
369 reset_tlb_mmu_ways56(env, &env->config->dtlb, env->dtlb);
370 } else {
371 reset_tlb_region_way0(env, env->itlb);
372 reset_tlb_region_way0(env, env->dtlb);
376 static unsigned get_ring(const CPUXtensaState *env, uint8_t asid)
378 unsigned i;
379 for (i = 0; i < 4; ++i) {
380 if (((env->sregs[RASID] >> i * 8) & 0xff) == asid) {
381 return i;
384 return 0xff;
388 * Lookup xtensa TLB for the given virtual address.
389 * See ISA, 4.6.2.2
391 * \param pwi: [out] way index
392 * \param pei: [out] entry index
393 * \param pring: [out] access ring
394 * \return 0 if ok, exception cause code otherwise
396 int xtensa_tlb_lookup(const CPUXtensaState *env, uint32_t addr, bool dtlb,
397 uint32_t *pwi, uint32_t *pei, uint8_t *pring)
399 const xtensa_tlb *tlb = dtlb ?
400 &env->config->dtlb : &env->config->itlb;
401 const xtensa_tlb_entry (*entry)[MAX_TLB_WAY_SIZE] = dtlb ?
402 env->dtlb : env->itlb;
404 int nhits = 0;
405 unsigned wi;
407 for (wi = 0; wi < tlb->nways; ++wi) {
408 uint32_t vpn;
409 uint32_t ei;
410 split_tlb_entry_spec_way(env, addr, dtlb, &vpn, wi, &ei);
411 if (entry[wi][ei].vaddr == vpn && entry[wi][ei].asid) {
412 unsigned ring = get_ring(env, entry[wi][ei].asid);
413 if (ring < 4) {
414 if (++nhits > 1) {
415 return dtlb ?
416 LOAD_STORE_TLB_MULTI_HIT_CAUSE :
417 INST_TLB_MULTI_HIT_CAUSE;
419 *pwi = wi;
420 *pei = ei;
421 *pring = ring;
425 return nhits ? 0 :
426 (dtlb ? LOAD_STORE_TLB_MISS_CAUSE : INST_TLB_MISS_CAUSE);
430 * Convert MMU ATTR to PAGE_{READ,WRITE,EXEC} mask.
431 * See ISA, 4.6.5.10
433 static unsigned mmu_attr_to_access(uint32_t attr)
435 unsigned access = 0;
437 if (attr < 12) {
438 access |= PAGE_READ;
439 if (attr & 0x1) {
440 access |= PAGE_EXEC;
442 if (attr & 0x2) {
443 access |= PAGE_WRITE;
446 switch (attr & 0xc) {
447 case 0:
448 access |= PAGE_CACHE_BYPASS;
449 break;
451 case 4:
452 access |= PAGE_CACHE_WB;
453 break;
455 case 8:
456 access |= PAGE_CACHE_WT;
457 break;
459 } else if (attr == 13) {
460 access |= PAGE_READ | PAGE_WRITE | PAGE_CACHE_ISOLATE;
462 return access;
466 * Convert region protection ATTR to PAGE_{READ,WRITE,EXEC} mask.
467 * See ISA, 4.6.3.3
469 static unsigned region_attr_to_access(uint32_t attr)
471 static const unsigned access[16] = {
472 [0] = PAGE_READ | PAGE_WRITE | PAGE_CACHE_WT,
473 [1] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WT,
474 [2] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_BYPASS,
475 [3] = PAGE_EXEC | PAGE_CACHE_WB,
476 [4] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WB,
477 [5] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WB,
478 [14] = PAGE_READ | PAGE_WRITE | PAGE_CACHE_ISOLATE,
481 return access[attr & 0xf];
485 * Convert cacheattr to PAGE_{READ,WRITE,EXEC} mask.
486 * See ISA, A.2.14 The Cache Attribute Register
488 static unsigned cacheattr_attr_to_access(uint32_t attr)
490 static const unsigned access[16] = {
491 [0] = PAGE_READ | PAGE_WRITE | PAGE_CACHE_WT,
492 [1] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WT,
493 [2] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_BYPASS,
494 [3] = PAGE_EXEC | PAGE_CACHE_WB,
495 [4] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WB,
496 [14] = PAGE_READ | PAGE_WRITE | PAGE_CACHE_ISOLATE,
499 return access[attr & 0xf];
502 static bool is_access_granted(unsigned access, int is_write)
504 switch (is_write) {
505 case 0:
506 return access & PAGE_READ;
508 case 1:
509 return access & PAGE_WRITE;
511 case 2:
512 return access & PAGE_EXEC;
514 default:
515 return 0;
519 static int get_pte(CPUXtensaState *env, uint32_t vaddr, uint32_t *pte);
521 static int get_physical_addr_mmu(CPUXtensaState *env, bool update_tlb,
522 uint32_t vaddr, int is_write, int mmu_idx,
523 uint32_t *paddr, uint32_t *page_size, unsigned *access,
524 bool may_lookup_pt)
526 bool dtlb = is_write != 2;
527 uint32_t wi;
528 uint32_t ei;
529 uint8_t ring;
530 uint32_t vpn;
531 uint32_t pte;
532 const xtensa_tlb_entry *entry = NULL;
533 xtensa_tlb_entry tmp_entry;
534 int ret = xtensa_tlb_lookup(env, vaddr, dtlb, &wi, &ei, &ring);
536 if ((ret == INST_TLB_MISS_CAUSE || ret == LOAD_STORE_TLB_MISS_CAUSE) &&
537 may_lookup_pt && get_pte(env, vaddr, &pte) == 0) {
538 ring = (pte >> 4) & 0x3;
539 wi = 0;
540 split_tlb_entry_spec_way(env, vaddr, dtlb, &vpn, wi, &ei);
542 if (update_tlb) {
543 wi = ++env->autorefill_idx & 0x3;
544 xtensa_tlb_set_entry(env, dtlb, wi, ei, vpn, pte);
545 env->sregs[EXCVADDR] = vaddr;
546 qemu_log_mask(CPU_LOG_MMU, "%s: autorefill(%08x): %08x -> %08x\n",
547 __func__, vaddr, vpn, pte);
548 } else {
549 xtensa_tlb_set_entry_mmu(env, &tmp_entry, dtlb, wi, ei, vpn, pte);
550 entry = &tmp_entry;
552 ret = 0;
554 if (ret != 0) {
555 return ret;
558 if (entry == NULL) {
559 entry = xtensa_tlb_get_entry(env, dtlb, wi, ei);
562 if (ring < mmu_idx) {
563 return dtlb ?
564 LOAD_STORE_PRIVILEGE_CAUSE :
565 INST_FETCH_PRIVILEGE_CAUSE;
568 *access = mmu_attr_to_access(entry->attr) &
569 ~(dtlb ? PAGE_EXEC : PAGE_READ | PAGE_WRITE);
570 if (!is_access_granted(*access, is_write)) {
571 return dtlb ?
572 (is_write ?
573 STORE_PROHIBITED_CAUSE :
574 LOAD_PROHIBITED_CAUSE) :
575 INST_FETCH_PROHIBITED_CAUSE;
578 *paddr = entry->paddr | (vaddr & ~xtensa_tlb_get_addr_mask(env, dtlb, wi));
579 *page_size = ~xtensa_tlb_get_addr_mask(env, dtlb, wi) + 1;
581 return 0;
584 static int get_pte(CPUXtensaState *env, uint32_t vaddr, uint32_t *pte)
586 CPUState *cs = CPU(xtensa_env_get_cpu(env));
587 uint32_t paddr;
588 uint32_t page_size;
589 unsigned access;
590 uint32_t pt_vaddr =
591 (env->sregs[PTEVADDR] | (vaddr >> 10)) & 0xfffffffc;
592 int ret = get_physical_addr_mmu(env, false, pt_vaddr, 0, 0,
593 &paddr, &page_size, &access, false);
595 qemu_log_mask(CPU_LOG_MMU, "%s: trying autorefill(%08x) -> %08x\n",
596 __func__, vaddr, ret ? ~0 : paddr);
598 if (ret == 0) {
599 *pte = ldl_phys(cs->as, paddr);
601 return ret;
604 static int get_physical_addr_region(CPUXtensaState *env,
605 uint32_t vaddr, int is_write, int mmu_idx,
606 uint32_t *paddr, uint32_t *page_size, unsigned *access)
608 bool dtlb = is_write != 2;
609 uint32_t wi = 0;
610 uint32_t ei = (vaddr >> 29) & 0x7;
611 const xtensa_tlb_entry *entry =
612 xtensa_tlb_get_entry(env, dtlb, wi, ei);
614 *access = region_attr_to_access(entry->attr);
615 if (!is_access_granted(*access, is_write)) {
616 return dtlb ?
617 (is_write ?
618 STORE_PROHIBITED_CAUSE :
619 LOAD_PROHIBITED_CAUSE) :
620 INST_FETCH_PROHIBITED_CAUSE;
623 *paddr = entry->paddr | (vaddr & ~REGION_PAGE_MASK);
624 *page_size = ~REGION_PAGE_MASK + 1;
626 return 0;
630 * Convert virtual address to physical addr.
631 * MMU may issue pagewalk and change xtensa autorefill TLB way entry.
633 * \return 0 if ok, exception cause code otherwise
635 int xtensa_get_physical_addr(CPUXtensaState *env, bool update_tlb,
636 uint32_t vaddr, int is_write, int mmu_idx,
637 uint32_t *paddr, uint32_t *page_size, unsigned *access)
639 if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
640 return get_physical_addr_mmu(env, update_tlb,
641 vaddr, is_write, mmu_idx, paddr, page_size, access, true);
642 } else if (xtensa_option_bits_enabled(env->config,
643 XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_PROTECTION) |
644 XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_TRANSLATION))) {
645 return get_physical_addr_region(env, vaddr, is_write, mmu_idx,
646 paddr, page_size, access);
647 } else {
648 *paddr = vaddr;
649 *page_size = TARGET_PAGE_SIZE;
650 *access = cacheattr_attr_to_access(
651 env->sregs[CACHEATTR] >> ((vaddr & 0xe0000000) >> 27));
652 return 0;
656 static void dump_tlb(FILE *f, fprintf_function cpu_fprintf,
657 CPUXtensaState *env, bool dtlb)
659 unsigned wi, ei;
660 const xtensa_tlb *conf =
661 dtlb ? &env->config->dtlb : &env->config->itlb;
662 unsigned (*attr_to_access)(uint32_t) =
663 xtensa_option_enabled(env->config, XTENSA_OPTION_MMU) ?
664 mmu_attr_to_access : region_attr_to_access;
666 for (wi = 0; wi < conf->nways; ++wi) {
667 uint32_t sz = ~xtensa_tlb_get_addr_mask(env, dtlb, wi) + 1;
668 const char *sz_text;
669 bool print_header = true;
671 if (sz >= 0x100000) {
672 sz >>= 20;
673 sz_text = "MB";
674 } else {
675 sz >>= 10;
676 sz_text = "KB";
679 for (ei = 0; ei < conf->way_size[wi]; ++ei) {
680 const xtensa_tlb_entry *entry =
681 xtensa_tlb_get_entry(env, dtlb, wi, ei);
683 if (entry->asid) {
684 static const char * const cache_text[8] = {
685 [PAGE_CACHE_BYPASS >> PAGE_CACHE_SHIFT] = "Bypass",
686 [PAGE_CACHE_WT >> PAGE_CACHE_SHIFT] = "WT",
687 [PAGE_CACHE_WB >> PAGE_CACHE_SHIFT] = "WB",
688 [PAGE_CACHE_ISOLATE >> PAGE_CACHE_SHIFT] = "Isolate",
690 unsigned access = attr_to_access(entry->attr);
691 unsigned cache_idx = (access & PAGE_CACHE_MASK) >>
692 PAGE_CACHE_SHIFT;
694 if (print_header) {
695 print_header = false;
696 cpu_fprintf(f, "Way %u (%d %s)\n", wi, sz, sz_text);
697 cpu_fprintf(f,
698 "\tVaddr Paddr ASID Attr RWX Cache\n"
699 "\t---------- ---------- ---- ---- --- -------\n");
701 cpu_fprintf(f,
702 "\t0x%08x 0x%08x 0x%02x 0x%02x %c%c%c %-7s\n",
703 entry->vaddr,
704 entry->paddr,
705 entry->asid,
706 entry->attr,
707 (access & PAGE_READ) ? 'R' : '-',
708 (access & PAGE_WRITE) ? 'W' : '-',
709 (access & PAGE_EXEC) ? 'X' : '-',
710 cache_text[cache_idx] ? cache_text[cache_idx] :
711 "Invalid");
717 void dump_mmu(FILE *f, fprintf_function cpu_fprintf, CPUXtensaState *env)
719 if (xtensa_option_bits_enabled(env->config,
720 XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_PROTECTION) |
721 XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_TRANSLATION) |
722 XTENSA_OPTION_BIT(XTENSA_OPTION_MMU))) {
724 cpu_fprintf(f, "ITLB:\n");
725 dump_tlb(f, cpu_fprintf, env, false);
726 cpu_fprintf(f, "\nDTLB:\n");
727 dump_tlb(f, cpu_fprintf, env, true);
728 } else {
729 cpu_fprintf(f, "No TLB for this CPU core\n");
733 void xtensa_runstall(CPUXtensaState *env, bool runstall)
735 CPUState *cpu = CPU(xtensa_env_get_cpu(env));
737 env->runstall = runstall;
738 cpu->halted = runstall;
739 if (runstall) {
740 cpu_interrupt(cpu, CPU_INTERRUPT_HALT);
741 } else {
742 cpu_reset_interrupt(cpu, CPU_INTERRUPT_HALT);