chardev: ensure qemu_chr_parse_compat reports missing driver error
[qemu/ar7.git] / target / xtensa / mmu_helper.c
blob2096fbbd9fc2279dda21b541cf24598ae0fde6df
1 /*
2 * Copyright (c) 2011 - 2019, Max Filippov, Open Source and Linux Lab.
3 * All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 * * Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * * Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * * Neither the name of the Open Source and Linux Lab nor the
13 * names of its contributors may be used to endorse or promote products
14 * derived from this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
23 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 #include "qemu/osdep.h"
29 #include "qemu/main-loop.h"
30 #include "qemu/units.h"
31 #include "cpu.h"
32 #include "exec/helper-proto.h"
33 #include "qemu/host-utils.h"
34 #include "exec/exec-all.h"
35 #include "exec/cpu_ldst.h"
37 void HELPER(itlb_hit_test)(CPUXtensaState *env, uint32_t vaddr)
40 * Attempt the memory load; we don't care about the result but
41 * only the side-effects (ie any MMU or other exception)
43 cpu_ldub_code_ra(env, vaddr, GETPC());
46 void HELPER(wsr_rasid)(CPUXtensaState *env, uint32_t v)
48 XtensaCPU *cpu = xtensa_env_get_cpu(env);
50 v = (v & 0xffffff00) | 0x1;
51 if (v != env->sregs[RASID]) {
52 env->sregs[RASID] = v;
53 tlb_flush(CPU(cpu));
57 static uint32_t get_page_size(const CPUXtensaState *env,
58 bool dtlb, uint32_t way)
60 uint32_t tlbcfg = env->sregs[dtlb ? DTLBCFG : ITLBCFG];
62 switch (way) {
63 case 4:
64 return (tlbcfg >> 16) & 0x3;
66 case 5:
67 return (tlbcfg >> 20) & 0x1;
69 case 6:
70 return (tlbcfg >> 24) & 0x1;
72 default:
73 return 0;
77 /*!
78 * Get bit mask for the virtual address bits translated by the TLB way
80 uint32_t xtensa_tlb_get_addr_mask(const CPUXtensaState *env,
81 bool dtlb, uint32_t way)
83 if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
84 bool varway56 = dtlb ?
85 env->config->dtlb.varway56 :
86 env->config->itlb.varway56;
88 switch (way) {
89 case 4:
90 return 0xfff00000 << get_page_size(env, dtlb, way) * 2;
92 case 5:
93 if (varway56) {
94 return 0xf8000000 << get_page_size(env, dtlb, way);
95 } else {
96 return 0xf8000000;
99 case 6:
100 if (varway56) {
101 return 0xf0000000 << (1 - get_page_size(env, dtlb, way));
102 } else {
103 return 0xf0000000;
106 default:
107 return 0xfffff000;
109 } else {
110 return REGION_PAGE_MASK;
115 * Get bit mask for the 'VPN without index' field.
116 * See ISA, 4.6.5.6, data format for RxTLB0
118 static uint32_t get_vpn_mask(const CPUXtensaState *env, bool dtlb, uint32_t way)
120 if (way < 4) {
121 bool is32 = (dtlb ?
122 env->config->dtlb.nrefillentries :
123 env->config->itlb.nrefillentries) == 32;
124 return is32 ? 0xffff8000 : 0xffffc000;
125 } else if (way == 4) {
126 return xtensa_tlb_get_addr_mask(env, dtlb, way) << 2;
127 } else if (way <= 6) {
128 uint32_t mask = xtensa_tlb_get_addr_mask(env, dtlb, way);
129 bool varway56 = dtlb ?
130 env->config->dtlb.varway56 :
131 env->config->itlb.varway56;
133 if (varway56) {
134 return mask << (way == 5 ? 2 : 3);
135 } else {
136 return mask << 1;
138 } else {
139 return 0xfffff000;
144 * Split virtual address into VPN (with index) and entry index
145 * for the given TLB way
147 void split_tlb_entry_spec_way(const CPUXtensaState *env, uint32_t v, bool dtlb,
148 uint32_t *vpn, uint32_t wi, uint32_t *ei)
150 bool varway56 = dtlb ?
151 env->config->dtlb.varway56 :
152 env->config->itlb.varway56;
154 if (!dtlb) {
155 wi &= 7;
158 if (wi < 4) {
159 bool is32 = (dtlb ?
160 env->config->dtlb.nrefillentries :
161 env->config->itlb.nrefillentries) == 32;
162 *ei = (v >> 12) & (is32 ? 0x7 : 0x3);
163 } else {
164 switch (wi) {
165 case 4:
167 uint32_t eibase = 20 + get_page_size(env, dtlb, wi) * 2;
168 *ei = (v >> eibase) & 0x3;
170 break;
172 case 5:
173 if (varway56) {
174 uint32_t eibase = 27 + get_page_size(env, dtlb, wi);
175 *ei = (v >> eibase) & 0x3;
176 } else {
177 *ei = (v >> 27) & 0x1;
179 break;
181 case 6:
182 if (varway56) {
183 uint32_t eibase = 29 - get_page_size(env, dtlb, wi);
184 *ei = (v >> eibase) & 0x7;
185 } else {
186 *ei = (v >> 28) & 0x1;
188 break;
190 default:
191 *ei = 0;
192 break;
195 *vpn = v & xtensa_tlb_get_addr_mask(env, dtlb, wi);
199 * Split TLB address into TLB way, entry index and VPN (with index).
200 * See ISA, 4.6.5.5 - 4.6.5.8 for the TLB addressing format
202 static void split_tlb_entry_spec(CPUXtensaState *env, uint32_t v, bool dtlb,
203 uint32_t *vpn, uint32_t *wi, uint32_t *ei)
205 if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
206 *wi = v & (dtlb ? 0xf : 0x7);
207 split_tlb_entry_spec_way(env, v, dtlb, vpn, *wi, ei);
208 } else {
209 *vpn = v & REGION_PAGE_MASK;
210 *wi = 0;
211 *ei = (v >> 29) & 0x7;
215 static xtensa_tlb_entry *get_tlb_entry(CPUXtensaState *env,
216 uint32_t v, bool dtlb, uint32_t *pwi)
218 uint32_t vpn;
219 uint32_t wi;
220 uint32_t ei;
222 split_tlb_entry_spec(env, v, dtlb, &vpn, &wi, &ei);
223 if (pwi) {
224 *pwi = wi;
226 return xtensa_tlb_get_entry(env, dtlb, wi, ei);
229 uint32_t HELPER(rtlb0)(CPUXtensaState *env, uint32_t v, uint32_t dtlb)
231 if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
232 uint32_t wi;
233 const xtensa_tlb_entry *entry = get_tlb_entry(env, v, dtlb, &wi);
234 return (entry->vaddr & get_vpn_mask(env, dtlb, wi)) | entry->asid;
235 } else {
236 return v & REGION_PAGE_MASK;
240 uint32_t HELPER(rtlb1)(CPUXtensaState *env, uint32_t v, uint32_t dtlb)
242 const xtensa_tlb_entry *entry = get_tlb_entry(env, v, dtlb, NULL);
243 return entry->paddr | entry->attr;
246 void HELPER(itlb)(CPUXtensaState *env, uint32_t v, uint32_t dtlb)
248 if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
249 uint32_t wi;
250 xtensa_tlb_entry *entry = get_tlb_entry(env, v, dtlb, &wi);
251 if (entry->variable && entry->asid) {
252 tlb_flush_page(CPU(xtensa_env_get_cpu(env)), entry->vaddr);
253 entry->asid = 0;
258 uint32_t HELPER(ptlb)(CPUXtensaState *env, uint32_t v, uint32_t dtlb)
260 if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
261 uint32_t wi;
262 uint32_t ei;
263 uint8_t ring;
264 int res = xtensa_tlb_lookup(env, v, dtlb, &wi, &ei, &ring);
266 switch (res) {
267 case 0:
268 if (ring >= xtensa_get_ring(env)) {
269 return (v & 0xfffff000) | wi | (dtlb ? 0x10 : 0x8);
271 break;
273 case INST_TLB_MULTI_HIT_CAUSE:
274 case LOAD_STORE_TLB_MULTI_HIT_CAUSE:
275 HELPER(exception_cause_vaddr)(env, env->pc, res, v);
276 break;
278 return 0;
279 } else {
280 return (v & REGION_PAGE_MASK) | 0x1;
284 void xtensa_tlb_set_entry_mmu(const CPUXtensaState *env,
285 xtensa_tlb_entry *entry, bool dtlb,
286 unsigned wi, unsigned ei, uint32_t vpn,
287 uint32_t pte)
289 entry->vaddr = vpn;
290 entry->paddr = pte & xtensa_tlb_get_addr_mask(env, dtlb, wi);
291 entry->asid = (env->sregs[RASID] >> ((pte >> 1) & 0x18)) & 0xff;
292 entry->attr = pte & 0xf;
295 void xtensa_tlb_set_entry(CPUXtensaState *env, bool dtlb,
296 unsigned wi, unsigned ei, uint32_t vpn, uint32_t pte)
298 XtensaCPU *cpu = xtensa_env_get_cpu(env);
299 CPUState *cs = CPU(cpu);
300 xtensa_tlb_entry *entry = xtensa_tlb_get_entry(env, dtlb, wi, ei);
302 if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
303 if (entry->variable) {
304 if (entry->asid) {
305 tlb_flush_page(cs, entry->vaddr);
307 xtensa_tlb_set_entry_mmu(env, entry, dtlb, wi, ei, vpn, pte);
308 tlb_flush_page(cs, entry->vaddr);
309 } else {
310 qemu_log_mask(LOG_GUEST_ERROR,
311 "%s %d, %d, %d trying to set immutable entry\n",
312 __func__, dtlb, wi, ei);
314 } else {
315 tlb_flush_page(cs, entry->vaddr);
316 if (xtensa_option_enabled(env->config,
317 XTENSA_OPTION_REGION_TRANSLATION)) {
318 entry->paddr = pte & REGION_PAGE_MASK;
320 entry->attr = pte & 0xf;
324 void HELPER(wtlb)(CPUXtensaState *env, uint32_t p, uint32_t v, uint32_t dtlb)
326 uint32_t vpn;
327 uint32_t wi;
328 uint32_t ei;
329 split_tlb_entry_spec(env, v, dtlb, &vpn, &wi, &ei);
330 xtensa_tlb_set_entry(env, dtlb, wi, ei, vpn, p);
333 hwaddr xtensa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
335 XtensaCPU *cpu = XTENSA_CPU(cs);
336 uint32_t paddr;
337 uint32_t page_size;
338 unsigned access;
340 if (xtensa_get_physical_addr(&cpu->env, false, addr, 0, 0,
341 &paddr, &page_size, &access) == 0) {
342 return paddr;
344 if (xtensa_get_physical_addr(&cpu->env, false, addr, 2, 0,
345 &paddr, &page_size, &access) == 0) {
346 return paddr;
348 return ~0;
351 static void reset_tlb_mmu_all_ways(CPUXtensaState *env,
352 const xtensa_tlb *tlb,
353 xtensa_tlb_entry entry[][MAX_TLB_WAY_SIZE])
355 unsigned wi, ei;
357 for (wi = 0; wi < tlb->nways; ++wi) {
358 for (ei = 0; ei < tlb->way_size[wi]; ++ei) {
359 entry[wi][ei].asid = 0;
360 entry[wi][ei].variable = true;
365 static void reset_tlb_mmu_ways56(CPUXtensaState *env,
366 const xtensa_tlb *tlb,
367 xtensa_tlb_entry entry[][MAX_TLB_WAY_SIZE])
369 if (!tlb->varway56) {
370 static const xtensa_tlb_entry way5[] = {
372 .vaddr = 0xd0000000,
373 .paddr = 0,
374 .asid = 1,
375 .attr = 7,
376 .variable = false,
377 }, {
378 .vaddr = 0xd8000000,
379 .paddr = 0,
380 .asid = 1,
381 .attr = 3,
382 .variable = false,
385 static const xtensa_tlb_entry way6[] = {
387 .vaddr = 0xe0000000,
388 .paddr = 0xf0000000,
389 .asid = 1,
390 .attr = 7,
391 .variable = false,
392 }, {
393 .vaddr = 0xf0000000,
394 .paddr = 0xf0000000,
395 .asid = 1,
396 .attr = 3,
397 .variable = false,
400 memcpy(entry[5], way5, sizeof(way5));
401 memcpy(entry[6], way6, sizeof(way6));
402 } else {
403 uint32_t ei;
404 for (ei = 0; ei < 8; ++ei) {
405 entry[6][ei].vaddr = ei << 29;
406 entry[6][ei].paddr = ei << 29;
407 entry[6][ei].asid = 1;
408 entry[6][ei].attr = 3;
413 static void reset_tlb_region_way0(CPUXtensaState *env,
414 xtensa_tlb_entry entry[][MAX_TLB_WAY_SIZE])
416 unsigned ei;
418 for (ei = 0; ei < 8; ++ei) {
419 entry[0][ei].vaddr = ei << 29;
420 entry[0][ei].paddr = ei << 29;
421 entry[0][ei].asid = 1;
422 entry[0][ei].attr = 2;
423 entry[0][ei].variable = true;
427 void reset_mmu(CPUXtensaState *env)
429 if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
430 env->sregs[RASID] = 0x04030201;
431 env->sregs[ITLBCFG] = 0;
432 env->sregs[DTLBCFG] = 0;
433 env->autorefill_idx = 0;
434 reset_tlb_mmu_all_ways(env, &env->config->itlb, env->itlb);
435 reset_tlb_mmu_all_ways(env, &env->config->dtlb, env->dtlb);
436 reset_tlb_mmu_ways56(env, &env->config->itlb, env->itlb);
437 reset_tlb_mmu_ways56(env, &env->config->dtlb, env->dtlb);
438 } else {
439 reset_tlb_region_way0(env, env->itlb);
440 reset_tlb_region_way0(env, env->dtlb);
444 static unsigned get_ring(const CPUXtensaState *env, uint8_t asid)
446 unsigned i;
447 for (i = 0; i < 4; ++i) {
448 if (((env->sregs[RASID] >> i * 8) & 0xff) == asid) {
449 return i;
452 return 0xff;
456 * Lookup xtensa TLB for the given virtual address.
457 * See ISA, 4.6.2.2
459 * \param pwi: [out] way index
460 * \param pei: [out] entry index
461 * \param pring: [out] access ring
462 * \return 0 if ok, exception cause code otherwise
464 int xtensa_tlb_lookup(const CPUXtensaState *env, uint32_t addr, bool dtlb,
465 uint32_t *pwi, uint32_t *pei, uint8_t *pring)
467 const xtensa_tlb *tlb = dtlb ?
468 &env->config->dtlb : &env->config->itlb;
469 const xtensa_tlb_entry (*entry)[MAX_TLB_WAY_SIZE] = dtlb ?
470 env->dtlb : env->itlb;
472 int nhits = 0;
473 unsigned wi;
475 for (wi = 0; wi < tlb->nways; ++wi) {
476 uint32_t vpn;
477 uint32_t ei;
478 split_tlb_entry_spec_way(env, addr, dtlb, &vpn, wi, &ei);
479 if (entry[wi][ei].vaddr == vpn && entry[wi][ei].asid) {
480 unsigned ring = get_ring(env, entry[wi][ei].asid);
481 if (ring < 4) {
482 if (++nhits > 1) {
483 return dtlb ?
484 LOAD_STORE_TLB_MULTI_HIT_CAUSE :
485 INST_TLB_MULTI_HIT_CAUSE;
487 *pwi = wi;
488 *pei = ei;
489 *pring = ring;
493 return nhits ? 0 :
494 (dtlb ? LOAD_STORE_TLB_MISS_CAUSE : INST_TLB_MISS_CAUSE);
498 * Convert MMU ATTR to PAGE_{READ,WRITE,EXEC} mask.
499 * See ISA, 4.6.5.10
501 static unsigned mmu_attr_to_access(uint32_t attr)
503 unsigned access = 0;
505 if (attr < 12) {
506 access |= PAGE_READ;
507 if (attr & 0x1) {
508 access |= PAGE_EXEC;
510 if (attr & 0x2) {
511 access |= PAGE_WRITE;
514 switch (attr & 0xc) {
515 case 0:
516 access |= PAGE_CACHE_BYPASS;
517 break;
519 case 4:
520 access |= PAGE_CACHE_WB;
521 break;
523 case 8:
524 access |= PAGE_CACHE_WT;
525 break;
527 } else if (attr == 13) {
528 access |= PAGE_READ | PAGE_WRITE | PAGE_CACHE_ISOLATE;
530 return access;
534 * Convert region protection ATTR to PAGE_{READ,WRITE,EXEC} mask.
535 * See ISA, 4.6.3.3
537 static unsigned region_attr_to_access(uint32_t attr)
539 static const unsigned access[16] = {
540 [0] = PAGE_READ | PAGE_WRITE | PAGE_CACHE_WT,
541 [1] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WT,
542 [2] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_BYPASS,
543 [3] = PAGE_EXEC | PAGE_CACHE_WB,
544 [4] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WB,
545 [5] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WB,
546 [14] = PAGE_READ | PAGE_WRITE | PAGE_CACHE_ISOLATE,
549 return access[attr & 0xf];
553 * Convert cacheattr to PAGE_{READ,WRITE,EXEC} mask.
554 * See ISA, A.2.14 The Cache Attribute Register
556 static unsigned cacheattr_attr_to_access(uint32_t attr)
558 static const unsigned access[16] = {
559 [0] = PAGE_READ | PAGE_WRITE | PAGE_CACHE_WT,
560 [1] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WT,
561 [2] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_BYPASS,
562 [3] = PAGE_EXEC | PAGE_CACHE_WB,
563 [4] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WB,
564 [14] = PAGE_READ | PAGE_WRITE | PAGE_CACHE_ISOLATE,
567 return access[attr & 0xf];
570 static bool is_access_granted(unsigned access, int is_write)
572 switch (is_write) {
573 case 0:
574 return access & PAGE_READ;
576 case 1:
577 return access & PAGE_WRITE;
579 case 2:
580 return access & PAGE_EXEC;
582 default:
583 return 0;
587 static bool get_pte(CPUXtensaState *env, uint32_t vaddr, uint32_t *pte);
589 static int get_physical_addr_mmu(CPUXtensaState *env, bool update_tlb,
590 uint32_t vaddr, int is_write, int mmu_idx,
591 uint32_t *paddr, uint32_t *page_size,
592 unsigned *access, bool may_lookup_pt)
594 bool dtlb = is_write != 2;
595 uint32_t wi;
596 uint32_t ei;
597 uint8_t ring;
598 uint32_t vpn;
599 uint32_t pte;
600 const xtensa_tlb_entry *entry = NULL;
601 xtensa_tlb_entry tmp_entry;
602 int ret = xtensa_tlb_lookup(env, vaddr, dtlb, &wi, &ei, &ring);
604 if ((ret == INST_TLB_MISS_CAUSE || ret == LOAD_STORE_TLB_MISS_CAUSE) &&
605 may_lookup_pt && get_pte(env, vaddr, &pte)) {
606 ring = (pte >> 4) & 0x3;
607 wi = 0;
608 split_tlb_entry_spec_way(env, vaddr, dtlb, &vpn, wi, &ei);
610 if (update_tlb) {
611 wi = ++env->autorefill_idx & 0x3;
612 xtensa_tlb_set_entry(env, dtlb, wi, ei, vpn, pte);
613 env->sregs[EXCVADDR] = vaddr;
614 qemu_log_mask(CPU_LOG_MMU, "%s: autorefill(%08x): %08x -> %08x\n",
615 __func__, vaddr, vpn, pte);
616 } else {
617 xtensa_tlb_set_entry_mmu(env, &tmp_entry, dtlb, wi, ei, vpn, pte);
618 entry = &tmp_entry;
620 ret = 0;
622 if (ret != 0) {
623 return ret;
626 if (entry == NULL) {
627 entry = xtensa_tlb_get_entry(env, dtlb, wi, ei);
630 if (ring < mmu_idx) {
631 return dtlb ?
632 LOAD_STORE_PRIVILEGE_CAUSE :
633 INST_FETCH_PRIVILEGE_CAUSE;
636 *access = mmu_attr_to_access(entry->attr) &
637 ~(dtlb ? PAGE_EXEC : PAGE_READ | PAGE_WRITE);
638 if (!is_access_granted(*access, is_write)) {
639 return dtlb ?
640 (is_write ?
641 STORE_PROHIBITED_CAUSE :
642 LOAD_PROHIBITED_CAUSE) :
643 INST_FETCH_PROHIBITED_CAUSE;
646 *paddr = entry->paddr | (vaddr & ~xtensa_tlb_get_addr_mask(env, dtlb, wi));
647 *page_size = ~xtensa_tlb_get_addr_mask(env, dtlb, wi) + 1;
649 return 0;
652 static bool get_pte(CPUXtensaState *env, uint32_t vaddr, uint32_t *pte)
654 CPUState *cs = CPU(xtensa_env_get_cpu(env));
655 uint32_t paddr;
656 uint32_t page_size;
657 unsigned access;
658 uint32_t pt_vaddr =
659 (env->sregs[PTEVADDR] | (vaddr >> 10)) & 0xfffffffc;
660 int ret = get_physical_addr_mmu(env, false, pt_vaddr, 0, 0,
661 &paddr, &page_size, &access, false);
663 if (ret == 0) {
664 qemu_log_mask(CPU_LOG_MMU,
665 "%s: autorefill(%08x): PTE va = %08x, pa = %08x\n",
666 __func__, vaddr, pt_vaddr, paddr);
667 } else {
668 qemu_log_mask(CPU_LOG_MMU,
669 "%s: autorefill(%08x): PTE va = %08x, failed (%d)\n",
670 __func__, vaddr, pt_vaddr, ret);
673 if (ret == 0) {
674 MemTxResult result;
676 *pte = address_space_ldl(cs->as, paddr, MEMTXATTRS_UNSPECIFIED,
677 &result);
678 if (result != MEMTX_OK) {
679 qemu_log_mask(CPU_LOG_MMU,
680 "%s: couldn't load PTE: transaction failed (%u)\n",
681 __func__, (unsigned)result);
682 ret = 1;
685 return ret == 0;
688 static int get_physical_addr_region(CPUXtensaState *env,
689 uint32_t vaddr, int is_write, int mmu_idx,
690 uint32_t *paddr, uint32_t *page_size,
691 unsigned *access)
693 bool dtlb = is_write != 2;
694 uint32_t wi = 0;
695 uint32_t ei = (vaddr >> 29) & 0x7;
696 const xtensa_tlb_entry *entry =
697 xtensa_tlb_get_entry(env, dtlb, wi, ei);
699 *access = region_attr_to_access(entry->attr);
700 if (!is_access_granted(*access, is_write)) {
701 return dtlb ?
702 (is_write ?
703 STORE_PROHIBITED_CAUSE :
704 LOAD_PROHIBITED_CAUSE) :
705 INST_FETCH_PROHIBITED_CAUSE;
708 *paddr = entry->paddr | (vaddr & ~REGION_PAGE_MASK);
709 *page_size = ~REGION_PAGE_MASK + 1;
711 return 0;
715 * Convert virtual address to physical addr.
716 * MMU may issue pagewalk and change xtensa autorefill TLB way entry.
718 * \return 0 if ok, exception cause code otherwise
720 int xtensa_get_physical_addr(CPUXtensaState *env, bool update_tlb,
721 uint32_t vaddr, int is_write, int mmu_idx,
722 uint32_t *paddr, uint32_t *page_size,
723 unsigned *access)
725 if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
726 return get_physical_addr_mmu(env, update_tlb,
727 vaddr, is_write, mmu_idx, paddr,
728 page_size, access, true);
729 } else if (xtensa_option_bits_enabled(env->config,
730 XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_PROTECTION) |
731 XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_TRANSLATION))) {
732 return get_physical_addr_region(env, vaddr, is_write, mmu_idx,
733 paddr, page_size, access);
734 } else {
735 *paddr = vaddr;
736 *page_size = TARGET_PAGE_SIZE;
737 *access = cacheattr_attr_to_access(env->sregs[CACHEATTR] >>
738 ((vaddr & 0xe0000000) >> 27));
739 return 0;
743 static void dump_tlb(FILE *f, fprintf_function cpu_fprintf,
744 CPUXtensaState *env, bool dtlb)
746 unsigned wi, ei;
747 const xtensa_tlb *conf =
748 dtlb ? &env->config->dtlb : &env->config->itlb;
749 unsigned (*attr_to_access)(uint32_t) =
750 xtensa_option_enabled(env->config, XTENSA_OPTION_MMU) ?
751 mmu_attr_to_access : region_attr_to_access;
753 for (wi = 0; wi < conf->nways; ++wi) {
754 uint32_t sz = ~xtensa_tlb_get_addr_mask(env, dtlb, wi) + 1;
755 const char *sz_text;
756 bool print_header = true;
758 if (sz >= 0x100000) {
759 sz /= MiB;
760 sz_text = "MB";
761 } else {
762 sz /= KiB;
763 sz_text = "KB";
766 for (ei = 0; ei < conf->way_size[wi]; ++ei) {
767 const xtensa_tlb_entry *entry =
768 xtensa_tlb_get_entry(env, dtlb, wi, ei);
770 if (entry->asid) {
771 static const char * const cache_text[8] = {
772 [PAGE_CACHE_BYPASS >> PAGE_CACHE_SHIFT] = "Bypass",
773 [PAGE_CACHE_WT >> PAGE_CACHE_SHIFT] = "WT",
774 [PAGE_CACHE_WB >> PAGE_CACHE_SHIFT] = "WB",
775 [PAGE_CACHE_ISOLATE >> PAGE_CACHE_SHIFT] = "Isolate",
777 unsigned access = attr_to_access(entry->attr);
778 unsigned cache_idx = (access & PAGE_CACHE_MASK) >>
779 PAGE_CACHE_SHIFT;
781 if (print_header) {
782 print_header = false;
783 cpu_fprintf(f, "Way %u (%d %s)\n", wi, sz, sz_text);
784 cpu_fprintf(f,
785 "\tVaddr Paddr ASID Attr RWX Cache\n"
786 "\t---------- ---------- ---- ---- --- -------\n");
788 cpu_fprintf(f,
789 "\t0x%08x 0x%08x 0x%02x 0x%02x %c%c%c %-7s\n",
790 entry->vaddr,
791 entry->paddr,
792 entry->asid,
793 entry->attr,
794 (access & PAGE_READ) ? 'R' : '-',
795 (access & PAGE_WRITE) ? 'W' : '-',
796 (access & PAGE_EXEC) ? 'X' : '-',
797 cache_text[cache_idx] ?
798 cache_text[cache_idx] : "Invalid");
804 void dump_mmu(FILE *f, fprintf_function cpu_fprintf, CPUXtensaState *env)
806 if (xtensa_option_bits_enabled(env->config,
807 XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_PROTECTION) |
808 XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_TRANSLATION) |
809 XTENSA_OPTION_BIT(XTENSA_OPTION_MMU))) {
811 cpu_fprintf(f, "ITLB:\n");
812 dump_tlb(f, cpu_fprintf, env, false);
813 cpu_fprintf(f, "\nDTLB:\n");
814 dump_tlb(f, cpu_fprintf, env, true);
815 } else {
816 cpu_fprintf(f, "No TLB for this CPU core\n");