2 * Copyright (c) 2011, Max Filippov, Open Source and Linux Lab.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 * * Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * * Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * * Neither the name of the Open Source and Linux Lab nor the
13 * names of its contributors may be used to endorse or promote products
14 * derived from this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
23 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 #include "qemu/osdep.h"
29 #include "qemu/units.h"
31 #include "exec/exec-all.h"
32 #include "exec/gdbstub.h"
33 #include "qemu/host-utils.h"
34 #if !defined(CONFIG_USER_ONLY)
35 #include "hw/loader.h"
38 static struct XtensaConfigList
*xtensa_cores
;
40 static void xtensa_core_class_init(ObjectClass
*oc
, void *data
)
42 CPUClass
*cc
= CPU_CLASS(oc
);
43 XtensaCPUClass
*xcc
= XTENSA_CPU_CLASS(oc
);
44 const XtensaConfig
*config
= data
;
48 /* Use num_core_regs to see only non-privileged registers in an unmodified
49 * gdb. Use num_regs to see all registers. gdb modification is required
50 * for that: reset bit 0 in the 'flags' field of the registers definitions
51 * in the gdb/xtensa-config.c inside gdb source tree or inside gdb overlay.
53 cc
->gdb_num_core_regs
= config
->gdb_regmap
.num_regs
;
56 static void init_libisa(XtensaConfig
*config
)
61 config
->isa
= xtensa_isa_init(config
->isa_internal
, NULL
, NULL
);
62 assert(xtensa_isa_maxlength(config
->isa
) <= MAX_INSN_LENGTH
);
63 opcodes
= xtensa_isa_num_opcodes(config
->isa
);
64 config
->opcode_ops
= g_new(XtensaOpcodeOps
*, opcodes
);
66 for (i
= 0; i
< opcodes
; ++i
) {
67 const char *opc_name
= xtensa_opcode_name(config
->isa
, i
);
68 XtensaOpcodeOps
*ops
= NULL
;
70 assert(xtensa_opcode_num_operands(config
->isa
, i
) <= MAX_OPCODE_ARGS
);
71 if (!config
->opcode_translators
) {
72 ops
= xtensa_find_opcode_ops(&xtensa_core_opcodes
, opc_name
);
74 for (j
= 0; !ops
&& config
->opcode_translators
[j
]; ++j
) {
75 ops
= xtensa_find_opcode_ops(config
->opcode_translators
[j
],
82 "opcode translator not found for %s's opcode '%s'\n",
83 config
->name
, opc_name
);
86 config
->opcode_ops
[i
] = ops
;
90 void xtensa_finalize_config(XtensaConfig
*config
)
92 if (config
->isa_internal
) {
96 if (config
->gdb_regmap
.num_regs
== 0 ||
97 config
->gdb_regmap
.num_core_regs
== 0) {
100 unsigned n_core_regs
= 0;
102 for (i
= 0; config
->gdb_regmap
.reg
[i
].targno
>= 0; ++i
) {
103 if (config
->gdb_regmap
.reg
[i
].type
!= 6) {
105 if ((config
->gdb_regmap
.reg
[i
].flags
& 0x1) == 0) {
110 if (config
->gdb_regmap
.num_regs
== 0) {
111 config
->gdb_regmap
.num_regs
= n_regs
;
113 if (config
->gdb_regmap
.num_core_regs
== 0) {
114 config
->gdb_regmap
.num_core_regs
= n_core_regs
;
119 void xtensa_register_core(XtensaConfigList
*node
)
122 .parent
= TYPE_XTENSA_CPU
,
123 .class_init
= xtensa_core_class_init
,
124 .class_data
= (void *)node
->config
,
127 node
->next
= xtensa_cores
;
129 type
.name
= g_strdup_printf(XTENSA_CPU_TYPE_NAME("%s"), node
->config
->name
);
130 type_register(&type
);
131 g_free((gpointer
)type
.name
);
134 static uint32_t check_hw_breakpoints(CPUXtensaState
*env
)
138 for (i
= 0; i
< env
->config
->ndbreak
; ++i
) {
139 if (env
->cpu_watchpoint
[i
] &&
140 env
->cpu_watchpoint
[i
]->flags
& BP_WATCHPOINT_HIT
) {
141 return DEBUGCAUSE_DB
| (i
<< DEBUGCAUSE_DBNUM_SHIFT
);
147 void xtensa_breakpoint_handler(CPUState
*cs
)
149 XtensaCPU
*cpu
= XTENSA_CPU(cs
);
150 CPUXtensaState
*env
= &cpu
->env
;
152 if (cs
->watchpoint_hit
) {
153 if (cs
->watchpoint_hit
->flags
& BP_CPU
) {
156 cs
->watchpoint_hit
= NULL
;
157 cause
= check_hw_breakpoints(env
);
159 debug_exception_env(env
, cause
);
161 cpu_loop_exit_noexc(cs
);
166 void xtensa_cpu_list(FILE *f
, fprintf_function cpu_fprintf
)
168 XtensaConfigList
*core
= xtensa_cores
;
169 cpu_fprintf(f
, "Available CPUs:\n");
170 for (; core
; core
= core
->next
) {
171 cpu_fprintf(f
, " %s\n", core
->config
->name
);
175 hwaddr
xtensa_cpu_get_phys_page_debug(CPUState
*cs
, vaddr addr
)
177 #ifndef CONFIG_USER_ONLY
178 XtensaCPU
*cpu
= XTENSA_CPU(cs
);
183 if (xtensa_get_physical_addr(&cpu
->env
, false, addr
, 0, 0,
184 &paddr
, &page_size
, &access
) == 0) {
187 if (xtensa_get_physical_addr(&cpu
->env
, false, addr
, 2, 0,
188 &paddr
, &page_size
, &access
) == 0) {
197 #ifndef CONFIG_USER_ONLY
199 static uint32_t relocated_vector(CPUXtensaState
*env
, uint32_t vector
)
201 if (xtensa_option_enabled(env
->config
,
202 XTENSA_OPTION_RELOCATABLE_VECTOR
)) {
203 return vector
- env
->config
->vecbase
+ env
->sregs
[VECBASE
];
210 * Handle penging IRQ.
211 * For the high priority interrupt jump to the corresponding interrupt vector.
212 * For the level-1 interrupt convert it to either user, kernel or double
213 * exception with the 'level-1 interrupt' exception cause.
215 static void handle_interrupt(CPUXtensaState
*env
)
217 int level
= env
->pending_irq_level
;
219 if (level
> xtensa_get_cintlevel(env
) &&
220 level
<= env
->config
->nlevel
&&
221 (env
->config
->level_mask
[level
] &
223 env
->sregs
[INTENABLE
])) {
224 CPUState
*cs
= CPU(xtensa_env_get_cpu(env
));
227 env
->sregs
[EPC1
+ level
- 1] = env
->pc
;
228 env
->sregs
[EPS2
+ level
- 2] = env
->sregs
[PS
];
230 (env
->sregs
[PS
] & ~PS_INTLEVEL
) | level
| PS_EXCM
;
231 env
->pc
= relocated_vector(env
,
232 env
->config
->interrupt_vector
[level
]);
234 env
->sregs
[EXCCAUSE
] = LEVEL1_INTERRUPT_CAUSE
;
236 if (env
->sregs
[PS
] & PS_EXCM
) {
237 if (env
->config
->ndepc
) {
238 env
->sregs
[DEPC
] = env
->pc
;
240 env
->sregs
[EPC1
] = env
->pc
;
242 cs
->exception_index
= EXC_DOUBLE
;
244 env
->sregs
[EPC1
] = env
->pc
;
245 cs
->exception_index
=
246 (env
->sregs
[PS
] & PS_UM
) ? EXC_USER
: EXC_KERNEL
;
248 env
->sregs
[PS
] |= PS_EXCM
;
250 env
->exception_taken
= 1;
254 /* Called from cpu_handle_interrupt with BQL held */
255 void xtensa_cpu_do_interrupt(CPUState
*cs
)
257 XtensaCPU
*cpu
= XTENSA_CPU(cs
);
258 CPUXtensaState
*env
= &cpu
->env
;
260 if (cs
->exception_index
== EXC_IRQ
) {
261 qemu_log_mask(CPU_LOG_INT
,
262 "%s(EXC_IRQ) level = %d, cintlevel = %d, "
263 "pc = %08x, a0 = %08x, ps = %08x, "
264 "intset = %08x, intenable = %08x, "
266 __func__
, env
->pending_irq_level
, xtensa_get_cintlevel(env
),
267 env
->pc
, env
->regs
[0], env
->sregs
[PS
],
268 env
->sregs
[INTSET
], env
->sregs
[INTENABLE
],
270 handle_interrupt(env
);
273 switch (cs
->exception_index
) {
274 case EXC_WINDOW_OVERFLOW4
:
275 case EXC_WINDOW_UNDERFLOW4
:
276 case EXC_WINDOW_OVERFLOW8
:
277 case EXC_WINDOW_UNDERFLOW8
:
278 case EXC_WINDOW_OVERFLOW12
:
279 case EXC_WINDOW_UNDERFLOW12
:
284 qemu_log_mask(CPU_LOG_INT
, "%s(%d) "
285 "pc = %08x, a0 = %08x, ps = %08x, ccount = %08x\n",
286 __func__
, cs
->exception_index
,
287 env
->pc
, env
->regs
[0], env
->sregs
[PS
], env
->sregs
[CCOUNT
]);
288 if (env
->config
->exception_vector
[cs
->exception_index
]) {
289 env
->pc
= relocated_vector(env
,
290 env
->config
->exception_vector
[cs
->exception_index
]);
291 env
->exception_taken
= 1;
293 qemu_log_mask(CPU_LOG_INT
, "%s(pc = %08x) bad exception_index: %d\n",
294 __func__
, env
->pc
, cs
->exception_index
);
302 qemu_log("%s(pc = %08x) unknown exception_index: %d\n",
303 __func__
, env
->pc
, cs
->exception_index
);
306 check_interrupts(env
);
309 void xtensa_cpu_do_interrupt(CPUState
*cs
)
314 bool xtensa_cpu_exec_interrupt(CPUState
*cs
, int interrupt_request
)
316 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
317 cs
->exception_index
= EXC_IRQ
;
318 xtensa_cpu_do_interrupt(cs
);
324 #ifdef CONFIG_USER_ONLY
326 int xtensa_cpu_handle_mmu_fault(CPUState
*cs
, vaddr address
, int size
, int rw
,
329 XtensaCPU
*cpu
= XTENSA_CPU(cs
);
330 CPUXtensaState
*env
= &cpu
->env
;
332 qemu_log_mask(CPU_LOG_INT
,
333 "%s: rw = %d, address = 0x%08" VADDR_PRIx
", size = %d\n",
334 __func__
, rw
, address
, size
);
335 env
->sregs
[EXCVADDR
] = address
;
336 env
->sregs
[EXCCAUSE
] = rw
? STORE_PROHIBITED_CAUSE
: LOAD_PROHIBITED_CAUSE
;
337 cs
->exception_index
= EXC_USER
;
343 static void reset_tlb_mmu_all_ways(CPUXtensaState
*env
,
344 const xtensa_tlb
*tlb
, xtensa_tlb_entry entry
[][MAX_TLB_WAY_SIZE
])
348 for (wi
= 0; wi
< tlb
->nways
; ++wi
) {
349 for (ei
= 0; ei
< tlb
->way_size
[wi
]; ++ei
) {
350 entry
[wi
][ei
].asid
= 0;
351 entry
[wi
][ei
].variable
= true;
356 static void reset_tlb_mmu_ways56(CPUXtensaState
*env
,
357 const xtensa_tlb
*tlb
, xtensa_tlb_entry entry
[][MAX_TLB_WAY_SIZE
])
359 if (!tlb
->varway56
) {
360 static const xtensa_tlb_entry way5
[] = {
375 static const xtensa_tlb_entry way6
[] = {
390 memcpy(entry
[5], way5
, sizeof(way5
));
391 memcpy(entry
[6], way6
, sizeof(way6
));
394 for (ei
= 0; ei
< 8; ++ei
) {
395 entry
[6][ei
].vaddr
= ei
<< 29;
396 entry
[6][ei
].paddr
= ei
<< 29;
397 entry
[6][ei
].asid
= 1;
398 entry
[6][ei
].attr
= 3;
403 static void reset_tlb_region_way0(CPUXtensaState
*env
,
404 xtensa_tlb_entry entry
[][MAX_TLB_WAY_SIZE
])
408 for (ei
= 0; ei
< 8; ++ei
) {
409 entry
[0][ei
].vaddr
= ei
<< 29;
410 entry
[0][ei
].paddr
= ei
<< 29;
411 entry
[0][ei
].asid
= 1;
412 entry
[0][ei
].attr
= 2;
413 entry
[0][ei
].variable
= true;
417 void reset_mmu(CPUXtensaState
*env
)
419 if (xtensa_option_enabled(env
->config
, XTENSA_OPTION_MMU
)) {
420 env
->sregs
[RASID
] = 0x04030201;
421 env
->sregs
[ITLBCFG
] = 0;
422 env
->sregs
[DTLBCFG
] = 0;
423 env
->autorefill_idx
= 0;
424 reset_tlb_mmu_all_ways(env
, &env
->config
->itlb
, env
->itlb
);
425 reset_tlb_mmu_all_ways(env
, &env
->config
->dtlb
, env
->dtlb
);
426 reset_tlb_mmu_ways56(env
, &env
->config
->itlb
, env
->itlb
);
427 reset_tlb_mmu_ways56(env
, &env
->config
->dtlb
, env
->dtlb
);
429 reset_tlb_region_way0(env
, env
->itlb
);
430 reset_tlb_region_way0(env
, env
->dtlb
);
434 static unsigned get_ring(const CPUXtensaState
*env
, uint8_t asid
)
437 for (i
= 0; i
< 4; ++i
) {
438 if (((env
->sregs
[RASID
] >> i
* 8) & 0xff) == asid
) {
446 * Lookup xtensa TLB for the given virtual address.
449 * \param pwi: [out] way index
450 * \param pei: [out] entry index
451 * \param pring: [out] access ring
452 * \return 0 if ok, exception cause code otherwise
454 int xtensa_tlb_lookup(const CPUXtensaState
*env
, uint32_t addr
, bool dtlb
,
455 uint32_t *pwi
, uint32_t *pei
, uint8_t *pring
)
457 const xtensa_tlb
*tlb
= dtlb
?
458 &env
->config
->dtlb
: &env
->config
->itlb
;
459 const xtensa_tlb_entry (*entry
)[MAX_TLB_WAY_SIZE
] = dtlb
?
460 env
->dtlb
: env
->itlb
;
465 for (wi
= 0; wi
< tlb
->nways
; ++wi
) {
468 split_tlb_entry_spec_way(env
, addr
, dtlb
, &vpn
, wi
, &ei
);
469 if (entry
[wi
][ei
].vaddr
== vpn
&& entry
[wi
][ei
].asid
) {
470 unsigned ring
= get_ring(env
, entry
[wi
][ei
].asid
);
474 LOAD_STORE_TLB_MULTI_HIT_CAUSE
:
475 INST_TLB_MULTI_HIT_CAUSE
;
484 (dtlb
? LOAD_STORE_TLB_MISS_CAUSE
: INST_TLB_MISS_CAUSE
);
488 * Convert MMU ATTR to PAGE_{READ,WRITE,EXEC} mask.
491 static unsigned mmu_attr_to_access(uint32_t attr
)
501 access
|= PAGE_WRITE
;
504 switch (attr
& 0xc) {
506 access
|= PAGE_CACHE_BYPASS
;
510 access
|= PAGE_CACHE_WB
;
514 access
|= PAGE_CACHE_WT
;
517 } else if (attr
== 13) {
518 access
|= PAGE_READ
| PAGE_WRITE
| PAGE_CACHE_ISOLATE
;
524 * Convert region protection ATTR to PAGE_{READ,WRITE,EXEC} mask.
527 static unsigned region_attr_to_access(uint32_t attr
)
529 static const unsigned access
[16] = {
530 [0] = PAGE_READ
| PAGE_WRITE
| PAGE_CACHE_WT
,
531 [1] = PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
| PAGE_CACHE_WT
,
532 [2] = PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
| PAGE_CACHE_BYPASS
,
533 [3] = PAGE_EXEC
| PAGE_CACHE_WB
,
534 [4] = PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
| PAGE_CACHE_WB
,
535 [5] = PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
| PAGE_CACHE_WB
,
536 [14] = PAGE_READ
| PAGE_WRITE
| PAGE_CACHE_ISOLATE
,
539 return access
[attr
& 0xf];
543 * Convert cacheattr to PAGE_{READ,WRITE,EXEC} mask.
544 * See ISA, A.2.14 The Cache Attribute Register
546 static unsigned cacheattr_attr_to_access(uint32_t attr
)
548 static const unsigned access
[16] = {
549 [0] = PAGE_READ
| PAGE_WRITE
| PAGE_CACHE_WT
,
550 [1] = PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
| PAGE_CACHE_WT
,
551 [2] = PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
| PAGE_CACHE_BYPASS
,
552 [3] = PAGE_EXEC
| PAGE_CACHE_WB
,
553 [4] = PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
| PAGE_CACHE_WB
,
554 [14] = PAGE_READ
| PAGE_WRITE
| PAGE_CACHE_ISOLATE
,
557 return access
[attr
& 0xf];
560 static bool is_access_granted(unsigned access
, int is_write
)
564 return access
& PAGE_READ
;
567 return access
& PAGE_WRITE
;
570 return access
& PAGE_EXEC
;
577 static int get_pte(CPUXtensaState
*env
, uint32_t vaddr
, uint32_t *pte
);
579 static int get_physical_addr_mmu(CPUXtensaState
*env
, bool update_tlb
,
580 uint32_t vaddr
, int is_write
, int mmu_idx
,
581 uint32_t *paddr
, uint32_t *page_size
, unsigned *access
,
584 bool dtlb
= is_write
!= 2;
590 const xtensa_tlb_entry
*entry
= NULL
;
591 xtensa_tlb_entry tmp_entry
;
592 int ret
= xtensa_tlb_lookup(env
, vaddr
, dtlb
, &wi
, &ei
, &ring
);
594 if ((ret
== INST_TLB_MISS_CAUSE
|| ret
== LOAD_STORE_TLB_MISS_CAUSE
) &&
595 may_lookup_pt
&& get_pte(env
, vaddr
, &pte
) == 0) {
596 ring
= (pte
>> 4) & 0x3;
598 split_tlb_entry_spec_way(env
, vaddr
, dtlb
, &vpn
, wi
, &ei
);
601 wi
= ++env
->autorefill_idx
& 0x3;
602 xtensa_tlb_set_entry(env
, dtlb
, wi
, ei
, vpn
, pte
);
603 env
->sregs
[EXCVADDR
] = vaddr
;
604 qemu_log_mask(CPU_LOG_MMU
, "%s: autorefill(%08x): %08x -> %08x\n",
605 __func__
, vaddr
, vpn
, pte
);
607 xtensa_tlb_set_entry_mmu(env
, &tmp_entry
, dtlb
, wi
, ei
, vpn
, pte
);
617 entry
= xtensa_tlb_get_entry(env
, dtlb
, wi
, ei
);
620 if (ring
< mmu_idx
) {
622 LOAD_STORE_PRIVILEGE_CAUSE
:
623 INST_FETCH_PRIVILEGE_CAUSE
;
626 *access
= mmu_attr_to_access(entry
->attr
) &
627 ~(dtlb
? PAGE_EXEC
: PAGE_READ
| PAGE_WRITE
);
628 if (!is_access_granted(*access
, is_write
)) {
631 STORE_PROHIBITED_CAUSE
:
632 LOAD_PROHIBITED_CAUSE
) :
633 INST_FETCH_PROHIBITED_CAUSE
;
636 *paddr
= entry
->paddr
| (vaddr
& ~xtensa_tlb_get_addr_mask(env
, dtlb
, wi
));
637 *page_size
= ~xtensa_tlb_get_addr_mask(env
, dtlb
, wi
) + 1;
642 static int get_pte(CPUXtensaState
*env
, uint32_t vaddr
, uint32_t *pte
)
644 CPUState
*cs
= CPU(xtensa_env_get_cpu(env
));
649 (env
->sregs
[PTEVADDR
] | (vaddr
>> 10)) & 0xfffffffc;
650 int ret
= get_physical_addr_mmu(env
, false, pt_vaddr
, 0, 0,
651 &paddr
, &page_size
, &access
, false);
653 qemu_log_mask(CPU_LOG_MMU
, "%s: trying autorefill(%08x) -> %08x\n",
654 __func__
, vaddr
, ret
? ~0 : paddr
);
657 *pte
= ldl_phys(cs
->as
, paddr
);
662 static int get_physical_addr_region(CPUXtensaState
*env
,
663 uint32_t vaddr
, int is_write
, int mmu_idx
,
664 uint32_t *paddr
, uint32_t *page_size
, unsigned *access
)
666 bool dtlb
= is_write
!= 2;
668 uint32_t ei
= (vaddr
>> 29) & 0x7;
669 const xtensa_tlb_entry
*entry
=
670 xtensa_tlb_get_entry(env
, dtlb
, wi
, ei
);
672 *access
= region_attr_to_access(entry
->attr
);
673 if (!is_access_granted(*access
, is_write
)) {
676 STORE_PROHIBITED_CAUSE
:
677 LOAD_PROHIBITED_CAUSE
) :
678 INST_FETCH_PROHIBITED_CAUSE
;
681 *paddr
= entry
->paddr
| (vaddr
& ~REGION_PAGE_MASK
);
682 *page_size
= ~REGION_PAGE_MASK
+ 1;
688 * Convert virtual address to physical addr.
689 * MMU may issue pagewalk and change xtensa autorefill TLB way entry.
691 * \return 0 if ok, exception cause code otherwise
693 int xtensa_get_physical_addr(CPUXtensaState
*env
, bool update_tlb
,
694 uint32_t vaddr
, int is_write
, int mmu_idx
,
695 uint32_t *paddr
, uint32_t *page_size
, unsigned *access
)
697 if (xtensa_option_enabled(env
->config
, XTENSA_OPTION_MMU
)) {
698 return get_physical_addr_mmu(env
, update_tlb
,
699 vaddr
, is_write
, mmu_idx
, paddr
, page_size
, access
, true);
700 } else if (xtensa_option_bits_enabled(env
->config
,
701 XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_PROTECTION
) |
702 XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_TRANSLATION
))) {
703 return get_physical_addr_region(env
, vaddr
, is_write
, mmu_idx
,
704 paddr
, page_size
, access
);
707 *page_size
= TARGET_PAGE_SIZE
;
708 *access
= cacheattr_attr_to_access(
709 env
->sregs
[CACHEATTR
] >> ((vaddr
& 0xe0000000) >> 27));
714 static void dump_tlb(FILE *f
, fprintf_function cpu_fprintf
,
715 CPUXtensaState
*env
, bool dtlb
)
718 const xtensa_tlb
*conf
=
719 dtlb
? &env
->config
->dtlb
: &env
->config
->itlb
;
720 unsigned (*attr_to_access
)(uint32_t) =
721 xtensa_option_enabled(env
->config
, XTENSA_OPTION_MMU
) ?
722 mmu_attr_to_access
: region_attr_to_access
;
724 for (wi
= 0; wi
< conf
->nways
; ++wi
) {
725 uint32_t sz
= ~xtensa_tlb_get_addr_mask(env
, dtlb
, wi
) + 1;
727 bool print_header
= true;
729 if (sz
>= 0x100000) {
737 for (ei
= 0; ei
< conf
->way_size
[wi
]; ++ei
) {
738 const xtensa_tlb_entry
*entry
=
739 xtensa_tlb_get_entry(env
, dtlb
, wi
, ei
);
742 static const char * const cache_text
[8] = {
743 [PAGE_CACHE_BYPASS
>> PAGE_CACHE_SHIFT
] = "Bypass",
744 [PAGE_CACHE_WT
>> PAGE_CACHE_SHIFT
] = "WT",
745 [PAGE_CACHE_WB
>> PAGE_CACHE_SHIFT
] = "WB",
746 [PAGE_CACHE_ISOLATE
>> PAGE_CACHE_SHIFT
] = "Isolate",
748 unsigned access
= attr_to_access(entry
->attr
);
749 unsigned cache_idx
= (access
& PAGE_CACHE_MASK
) >>
753 print_header
= false;
754 cpu_fprintf(f
, "Way %u (%d %s)\n", wi
, sz
, sz_text
);
756 "\tVaddr Paddr ASID Attr RWX Cache\n"
757 "\t---------- ---------- ---- ---- --- -------\n");
760 "\t0x%08x 0x%08x 0x%02x 0x%02x %c%c%c %-7s\n",
765 (access
& PAGE_READ
) ? 'R' : '-',
766 (access
& PAGE_WRITE
) ? 'W' : '-',
767 (access
& PAGE_EXEC
) ? 'X' : '-',
768 cache_text
[cache_idx
] ? cache_text
[cache_idx
] :
775 void dump_mmu(FILE *f
, fprintf_function cpu_fprintf
, CPUXtensaState
*env
)
777 if (xtensa_option_bits_enabled(env
->config
,
778 XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_PROTECTION
) |
779 XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_TRANSLATION
) |
780 XTENSA_OPTION_BIT(XTENSA_OPTION_MMU
))) {
782 cpu_fprintf(f
, "ITLB:\n");
783 dump_tlb(f
, cpu_fprintf
, env
, false);
784 cpu_fprintf(f
, "\nDTLB:\n");
785 dump_tlb(f
, cpu_fprintf
, env
, true);
787 cpu_fprintf(f
, "No TLB for this CPU core\n");
791 void xtensa_runstall(CPUXtensaState
*env
, bool runstall
)
793 CPUState
*cpu
= CPU(xtensa_env_get_cpu(env
));
795 env
->runstall
= runstall
;
796 cpu
->halted
= runstall
;
798 cpu_interrupt(cpu
, CPU_INTERRUPT_HALT
);
800 cpu_reset_interrupt(cpu
, CPU_INTERRUPT_HALT
);