2 * Copyright (c) 2011 - 2019, Max Filippov, Open Source and Linux Lab.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 * * Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * * Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * * Neither the name of the Open Source and Linux Lab nor the
13 * names of its contributors may be used to endorse or promote products
14 * derived from this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
23 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 #include "qemu/osdep.h"
29 #include "qemu/main-loop.h"
30 #include "qemu/qemu-print.h"
31 #include "qemu/units.h"
33 #include "exec/helper-proto.h"
34 #include "qemu/host-utils.h"
35 #include "exec/exec-all.h"
36 #include "exec/cpu_ldst.h"
38 #define XTENSA_MPU_SEGMENT_MASK 0x0000001f
39 #define XTENSA_MPU_ACC_RIGHTS_MASK 0x00000f00
40 #define XTENSA_MPU_ACC_RIGHTS_SHIFT 8
41 #define XTENSA_MPU_MEM_TYPE_MASK 0x001ff000
42 #define XTENSA_MPU_MEM_TYPE_SHIFT 12
43 #define XTENSA_MPU_ATTR_MASK 0x001fff00
45 #define XTENSA_MPU_PROBE_B 0x40000000
46 #define XTENSA_MPU_PROBE_V 0x80000000
48 #define XTENSA_MPU_SYSTEM_TYPE_DEVICE 0x0001
49 #define XTENSA_MPU_SYSTEM_TYPE_NC 0x0002
50 #define XTENSA_MPU_SYSTEM_TYPE_C 0x0003
51 #define XTENSA_MPU_SYSTEM_TYPE_MASK 0x0003
53 #define XTENSA_MPU_TYPE_SYS_C 0x0010
54 #define XTENSA_MPU_TYPE_SYS_W 0x0020
55 #define XTENSA_MPU_TYPE_SYS_R 0x0040
56 #define XTENSA_MPU_TYPE_CPU_C 0x0100
57 #define XTENSA_MPU_TYPE_CPU_W 0x0200
58 #define XTENSA_MPU_TYPE_CPU_R 0x0400
59 #define XTENSA_MPU_TYPE_CPU_CACHE 0x0800
60 #define XTENSA_MPU_TYPE_B 0x1000
61 #define XTENSA_MPU_TYPE_INT 0x2000
63 void HELPER(itlb_hit_test
)(CPUXtensaState
*env
, uint32_t vaddr
)
66 * Attempt the memory load; we don't care about the result but
67 * only the side-effects (ie any MMU or other exception)
69 cpu_ldub_code_ra(env
, vaddr
, GETPC());
72 void HELPER(wsr_rasid
)(CPUXtensaState
*env
, uint32_t v
)
74 v
= (v
& 0xffffff00) | 0x1;
75 if (v
!= env
->sregs
[RASID
]) {
76 env
->sregs
[RASID
] = v
;
77 tlb_flush(env_cpu(env
));
81 static uint32_t get_page_size(const CPUXtensaState
*env
,
82 bool dtlb
, uint32_t way
)
84 uint32_t tlbcfg
= env
->sregs
[dtlb
? DTLBCFG
: ITLBCFG
];
88 return (tlbcfg
>> 16) & 0x3;
91 return (tlbcfg
>> 20) & 0x1;
94 return (tlbcfg
>> 24) & 0x1;
102 * Get bit mask for the virtual address bits translated by the TLB way
104 static uint32_t xtensa_tlb_get_addr_mask(const CPUXtensaState
*env
,
105 bool dtlb
, uint32_t way
)
107 if (xtensa_option_enabled(env
->config
, XTENSA_OPTION_MMU
)) {
108 bool varway56
= dtlb
?
109 env
->config
->dtlb
.varway56
:
110 env
->config
->itlb
.varway56
;
114 return 0xfff00000 << get_page_size(env
, dtlb
, way
) * 2;
118 return 0xf8000000 << get_page_size(env
, dtlb
, way
);
125 return 0xf0000000 << (1 - get_page_size(env
, dtlb
, way
));
134 return REGION_PAGE_MASK
;
139 * Get bit mask for the 'VPN without index' field.
140 * See ISA, 4.6.5.6, data format for RxTLB0
142 static uint32_t get_vpn_mask(const CPUXtensaState
*env
, bool dtlb
, uint32_t way
)
146 env
->config
->dtlb
.nrefillentries
:
147 env
->config
->itlb
.nrefillentries
) == 32;
148 return is32
? 0xffff8000 : 0xffffc000;
149 } else if (way
== 4) {
150 return xtensa_tlb_get_addr_mask(env
, dtlb
, way
) << 2;
151 } else if (way
<= 6) {
152 uint32_t mask
= xtensa_tlb_get_addr_mask(env
, dtlb
, way
);
153 bool varway56
= dtlb
?
154 env
->config
->dtlb
.varway56
:
155 env
->config
->itlb
.varway56
;
158 return mask
<< (way
== 5 ? 2 : 3);
168 * Split virtual address into VPN (with index) and entry index
169 * for the given TLB way
171 static void split_tlb_entry_spec_way(const CPUXtensaState
*env
, uint32_t v
,
172 bool dtlb
, uint32_t *vpn
,
173 uint32_t wi
, uint32_t *ei
)
175 bool varway56
= dtlb
?
176 env
->config
->dtlb
.varway56
:
177 env
->config
->itlb
.varway56
;
185 env
->config
->dtlb
.nrefillentries
:
186 env
->config
->itlb
.nrefillentries
) == 32;
187 *ei
= (v
>> 12) & (is32
? 0x7 : 0x3);
192 uint32_t eibase
= 20 + get_page_size(env
, dtlb
, wi
) * 2;
193 *ei
= (v
>> eibase
) & 0x3;
199 uint32_t eibase
= 27 + get_page_size(env
, dtlb
, wi
);
200 *ei
= (v
>> eibase
) & 0x3;
202 *ei
= (v
>> 27) & 0x1;
208 uint32_t eibase
= 29 - get_page_size(env
, dtlb
, wi
);
209 *ei
= (v
>> eibase
) & 0x7;
211 *ei
= (v
>> 28) & 0x1;
220 *vpn
= v
& xtensa_tlb_get_addr_mask(env
, dtlb
, wi
);
224 * Split TLB address into TLB way, entry index and VPN (with index).
225 * See ISA, 4.6.5.5 - 4.6.5.8 for the TLB addressing format
227 static void split_tlb_entry_spec(CPUXtensaState
*env
, uint32_t v
, bool dtlb
,
228 uint32_t *vpn
, uint32_t *wi
, uint32_t *ei
)
230 if (xtensa_option_enabled(env
->config
, XTENSA_OPTION_MMU
)) {
231 *wi
= v
& (dtlb
? 0xf : 0x7);
232 split_tlb_entry_spec_way(env
, v
, dtlb
, vpn
, *wi
, ei
);
234 *vpn
= v
& REGION_PAGE_MASK
;
236 *ei
= (v
>> 29) & 0x7;
240 static xtensa_tlb_entry
*xtensa_tlb_get_entry(CPUXtensaState
*env
, bool dtlb
,
241 unsigned wi
, unsigned ei
)
248 static xtensa_tlb_entry
*get_tlb_entry(CPUXtensaState
*env
,
249 uint32_t v
, bool dtlb
, uint32_t *pwi
)
255 split_tlb_entry_spec(env
, v
, dtlb
, &vpn
, &wi
, &ei
);
259 return xtensa_tlb_get_entry(env
, dtlb
, wi
, ei
);
262 static void xtensa_tlb_set_entry_mmu(const CPUXtensaState
*env
,
263 xtensa_tlb_entry
*entry
, bool dtlb
,
264 unsigned wi
, unsigned ei
, uint32_t vpn
,
268 entry
->paddr
= pte
& xtensa_tlb_get_addr_mask(env
, dtlb
, wi
);
269 entry
->asid
= (env
->sregs
[RASID
] >> ((pte
>> 1) & 0x18)) & 0xff;
270 entry
->attr
= pte
& 0xf;
273 static void xtensa_tlb_set_entry(CPUXtensaState
*env
, bool dtlb
,
274 unsigned wi
, unsigned ei
,
275 uint32_t vpn
, uint32_t pte
)
277 CPUState
*cs
= env_cpu(env
);
278 xtensa_tlb_entry
*entry
= xtensa_tlb_get_entry(env
, dtlb
, wi
, ei
);
280 if (xtensa_option_enabled(env
->config
, XTENSA_OPTION_MMU
)) {
281 if (entry
->variable
) {
283 tlb_flush_page(cs
, entry
->vaddr
);
285 xtensa_tlb_set_entry_mmu(env
, entry
, dtlb
, wi
, ei
, vpn
, pte
);
286 tlb_flush_page(cs
, entry
->vaddr
);
288 qemu_log_mask(LOG_GUEST_ERROR
,
289 "%s %d, %d, %d trying to set immutable entry\n",
290 __func__
, dtlb
, wi
, ei
);
293 tlb_flush_page(cs
, entry
->vaddr
);
294 if (xtensa_option_enabled(env
->config
,
295 XTENSA_OPTION_REGION_TRANSLATION
)) {
296 entry
->paddr
= pte
& REGION_PAGE_MASK
;
298 entry
->attr
= pte
& 0xf;
302 hwaddr
xtensa_cpu_get_phys_page_debug(CPUState
*cs
, vaddr addr
)
304 XtensaCPU
*cpu
= XTENSA_CPU(cs
);
309 if (xtensa_get_physical_addr(&cpu
->env
, false, addr
, 0, 0,
310 &paddr
, &page_size
, &access
) == 0) {
313 if (xtensa_get_physical_addr(&cpu
->env
, false, addr
, 2, 0,
314 &paddr
, &page_size
, &access
) == 0) {
320 static void reset_tlb_mmu_all_ways(CPUXtensaState
*env
,
321 const xtensa_tlb
*tlb
,
322 xtensa_tlb_entry entry
[][MAX_TLB_WAY_SIZE
])
326 for (wi
= 0; wi
< tlb
->nways
; ++wi
) {
327 for (ei
= 0; ei
< tlb
->way_size
[wi
]; ++ei
) {
328 entry
[wi
][ei
].asid
= 0;
329 entry
[wi
][ei
].variable
= true;
334 static void reset_tlb_mmu_ways56(CPUXtensaState
*env
,
335 const xtensa_tlb
*tlb
,
336 xtensa_tlb_entry entry
[][MAX_TLB_WAY_SIZE
])
338 if (!tlb
->varway56
) {
339 static const xtensa_tlb_entry way5
[] = {
354 static const xtensa_tlb_entry way6
[] = {
369 memcpy(entry
[5], way5
, sizeof(way5
));
370 memcpy(entry
[6], way6
, sizeof(way6
));
373 for (ei
= 0; ei
< 8; ++ei
) {
374 entry
[6][ei
].vaddr
= ei
<< 29;
375 entry
[6][ei
].paddr
= ei
<< 29;
376 entry
[6][ei
].asid
= 1;
377 entry
[6][ei
].attr
= 3;
382 static void reset_tlb_region_way0(CPUXtensaState
*env
,
383 xtensa_tlb_entry entry
[][MAX_TLB_WAY_SIZE
])
387 for (ei
= 0; ei
< 8; ++ei
) {
388 entry
[0][ei
].vaddr
= ei
<< 29;
389 entry
[0][ei
].paddr
= ei
<< 29;
390 entry
[0][ei
].asid
= 1;
391 entry
[0][ei
].attr
= 2;
392 entry
[0][ei
].variable
= true;
396 void reset_mmu(CPUXtensaState
*env
)
398 if (xtensa_option_enabled(env
->config
, XTENSA_OPTION_MMU
)) {
399 env
->sregs
[RASID
] = 0x04030201;
400 env
->sregs
[ITLBCFG
] = 0;
401 env
->sregs
[DTLBCFG
] = 0;
402 env
->autorefill_idx
= 0;
403 reset_tlb_mmu_all_ways(env
, &env
->config
->itlb
, env
->itlb
);
404 reset_tlb_mmu_all_ways(env
, &env
->config
->dtlb
, env
->dtlb
);
405 reset_tlb_mmu_ways56(env
, &env
->config
->itlb
, env
->itlb
);
406 reset_tlb_mmu_ways56(env
, &env
->config
->dtlb
, env
->dtlb
);
407 } else if (xtensa_option_enabled(env
->config
, XTENSA_OPTION_MPU
)) {
410 env
->sregs
[MPUENB
] = 0;
411 env
->sregs
[MPUCFG
] = env
->config
->n_mpu_fg_segments
;
412 env
->sregs
[CACHEADRDIS
] = 0;
413 assert(env
->config
->n_mpu_bg_segments
> 0 &&
414 env
->config
->mpu_bg
[0].vaddr
== 0);
415 for (i
= 1; i
< env
->config
->n_mpu_bg_segments
; ++i
) {
416 assert(env
->config
->mpu_bg
[i
].vaddr
>=
417 env
->config
->mpu_bg
[i
- 1].vaddr
);
420 env
->sregs
[CACHEATTR
] = 0x22222222;
421 reset_tlb_region_way0(env
, env
->itlb
);
422 reset_tlb_region_way0(env
, env
->dtlb
);
426 static unsigned get_ring(const CPUXtensaState
*env
, uint8_t asid
)
429 for (i
= 0; i
< 4; ++i
) {
430 if (((env
->sregs
[RASID
] >> i
* 8) & 0xff) == asid
) {
438 * Lookup xtensa TLB for the given virtual address.
441 * \param pwi: [out] way index
442 * \param pei: [out] entry index
443 * \param pring: [out] access ring
444 * \return 0 if ok, exception cause code otherwise
446 static int xtensa_tlb_lookup(const CPUXtensaState
*env
,
447 uint32_t addr
, bool dtlb
,
448 uint32_t *pwi
, uint32_t *pei
, uint8_t *pring
)
450 const xtensa_tlb
*tlb
= dtlb
?
451 &env
->config
->dtlb
: &env
->config
->itlb
;
452 const xtensa_tlb_entry (*entry
)[MAX_TLB_WAY_SIZE
] = dtlb
?
453 env
->dtlb
: env
->itlb
;
458 for (wi
= 0; wi
< tlb
->nways
; ++wi
) {
461 split_tlb_entry_spec_way(env
, addr
, dtlb
, &vpn
, wi
, &ei
);
462 if (entry
[wi
][ei
].vaddr
== vpn
&& entry
[wi
][ei
].asid
) {
463 unsigned ring
= get_ring(env
, entry
[wi
][ei
].asid
);
467 LOAD_STORE_TLB_MULTI_HIT_CAUSE
:
468 INST_TLB_MULTI_HIT_CAUSE
;
477 (dtlb
? LOAD_STORE_TLB_MISS_CAUSE
: INST_TLB_MISS_CAUSE
);
480 uint32_t HELPER(rtlb0
)(CPUXtensaState
*env
, uint32_t v
, uint32_t dtlb
)
482 if (xtensa_option_enabled(env
->config
, XTENSA_OPTION_MMU
)) {
484 const xtensa_tlb_entry
*entry
= get_tlb_entry(env
, v
, dtlb
, &wi
);
485 return (entry
->vaddr
& get_vpn_mask(env
, dtlb
, wi
)) | entry
->asid
;
487 return v
& REGION_PAGE_MASK
;
491 uint32_t HELPER(rtlb1
)(CPUXtensaState
*env
, uint32_t v
, uint32_t dtlb
)
493 const xtensa_tlb_entry
*entry
= get_tlb_entry(env
, v
, dtlb
, NULL
);
494 return entry
->paddr
| entry
->attr
;
497 void HELPER(itlb
)(CPUXtensaState
*env
, uint32_t v
, uint32_t dtlb
)
499 if (xtensa_option_enabled(env
->config
, XTENSA_OPTION_MMU
)) {
501 xtensa_tlb_entry
*entry
= get_tlb_entry(env
, v
, dtlb
, &wi
);
502 if (entry
->variable
&& entry
->asid
) {
503 tlb_flush_page(env_cpu(env
), entry
->vaddr
);
509 uint32_t HELPER(ptlb
)(CPUXtensaState
*env
, uint32_t v
, uint32_t dtlb
)
511 if (xtensa_option_enabled(env
->config
, XTENSA_OPTION_MMU
)) {
515 int res
= xtensa_tlb_lookup(env
, v
, dtlb
, &wi
, &ei
, &ring
);
519 if (ring
>= xtensa_get_ring(env
)) {
520 return (v
& 0xfffff000) | wi
| (dtlb
? 0x10 : 0x8);
524 case INST_TLB_MULTI_HIT_CAUSE
:
525 case LOAD_STORE_TLB_MULTI_HIT_CAUSE
:
526 HELPER(exception_cause_vaddr
)(env
, env
->pc
, res
, v
);
531 return (v
& REGION_PAGE_MASK
) | 0x1;
535 void HELPER(wtlb
)(CPUXtensaState
*env
, uint32_t p
, uint32_t v
, uint32_t dtlb
)
540 split_tlb_entry_spec(env
, v
, dtlb
, &vpn
, &wi
, &ei
);
541 xtensa_tlb_set_entry(env
, dtlb
, wi
, ei
, vpn
, p
);
545 * Convert MMU ATTR to PAGE_{READ,WRITE,EXEC} mask.
548 static unsigned mmu_attr_to_access(uint32_t attr
)
558 access
|= PAGE_WRITE
;
561 switch (attr
& 0xc) {
563 access
|= PAGE_CACHE_BYPASS
;
567 access
|= PAGE_CACHE_WB
;
571 access
|= PAGE_CACHE_WT
;
574 } else if (attr
== 13) {
575 access
|= PAGE_READ
| PAGE_WRITE
| PAGE_CACHE_ISOLATE
;
581 * Convert region protection ATTR to PAGE_{READ,WRITE,EXEC} mask.
584 static unsigned region_attr_to_access(uint32_t attr
)
586 static const unsigned access
[16] = {
587 [0] = PAGE_READ
| PAGE_WRITE
| PAGE_CACHE_WT
,
588 [1] = PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
| PAGE_CACHE_WT
,
589 [2] = PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
| PAGE_CACHE_BYPASS
,
590 [3] = PAGE_EXEC
| PAGE_CACHE_WB
,
591 [4] = PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
| PAGE_CACHE_WB
,
592 [5] = PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
| PAGE_CACHE_WB
,
593 [14] = PAGE_READ
| PAGE_WRITE
| PAGE_CACHE_ISOLATE
,
596 return access
[attr
& 0xf];
600 * Convert cacheattr to PAGE_{READ,WRITE,EXEC} mask.
601 * See ISA, A.2.14 The Cache Attribute Register
603 static unsigned cacheattr_attr_to_access(uint32_t attr
)
605 static const unsigned access
[16] = {
606 [0] = PAGE_READ
| PAGE_WRITE
| PAGE_CACHE_WT
,
607 [1] = PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
| PAGE_CACHE_WT
,
608 [2] = PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
| PAGE_CACHE_BYPASS
,
609 [3] = PAGE_EXEC
| PAGE_CACHE_WB
,
610 [4] = PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
| PAGE_CACHE_WB
,
611 [14] = PAGE_READ
| PAGE_WRITE
| PAGE_CACHE_ISOLATE
,
614 return access
[attr
& 0xf];
617 struct attr_pattern
{
622 static int attr_pattern_match(uint32_t attr
,
623 const struct attr_pattern
*pattern
,
628 for (i
= 0; i
< n
; ++i
) {
629 if ((attr
& pattern
[i
].mask
) == pattern
[i
].value
) {
636 static unsigned mpu_attr_to_cpu_cache(uint32_t attr
)
638 static const struct attr_pattern cpu_c
[] = {
639 { .mask
= 0x18f, .value
= 0x089 },
640 { .mask
= 0x188, .value
= 0x080 },
641 { .mask
= 0x180, .value
= 0x180 },
646 if (attr_pattern_match(attr
, cpu_c
, ARRAY_SIZE(cpu_c
))) {
647 type
|= XTENSA_MPU_TYPE_CPU_CACHE
;
649 type
|= XTENSA_MPU_TYPE_CPU_C
;
652 type
|= XTENSA_MPU_TYPE_CPU_W
;
655 type
|= XTENSA_MPU_TYPE_CPU_R
;
661 static unsigned mpu_attr_to_type(uint32_t attr
)
663 static const struct attr_pattern device_type
[] = {
664 { .mask
= 0x1f6, .value
= 0x000 },
665 { .mask
= 0x1f6, .value
= 0x006 },
667 static const struct attr_pattern sys_nc_type
[] = {
668 { .mask
= 0x1fe, .value
= 0x018 },
669 { .mask
= 0x1fe, .value
= 0x01e },
670 { .mask
= 0x18f, .value
= 0x089 },
672 static const struct attr_pattern sys_c_type
[] = {
673 { .mask
= 0x1f8, .value
= 0x010 },
674 { .mask
= 0x188, .value
= 0x080 },
675 { .mask
= 0x1f0, .value
= 0x030 },
676 { .mask
= 0x180, .value
= 0x180 },
678 static const struct attr_pattern b
[] = {
679 { .mask
= 0x1f7, .value
= 0x001 },
680 { .mask
= 0x1f7, .value
= 0x007 },
681 { .mask
= 0x1ff, .value
= 0x019 },
682 { .mask
= 0x1ff, .value
= 0x01f },
687 attr
= (attr
& XTENSA_MPU_MEM_TYPE_MASK
) >> XTENSA_MPU_MEM_TYPE_SHIFT
;
688 if (attr_pattern_match(attr
, device_type
, ARRAY_SIZE(device_type
))) {
689 type
|= XTENSA_MPU_SYSTEM_TYPE_DEVICE
;
691 type
|= XTENSA_MPU_TYPE_INT
;
694 if (attr_pattern_match(attr
, sys_nc_type
, ARRAY_SIZE(sys_nc_type
))) {
695 type
|= XTENSA_MPU_SYSTEM_TYPE_NC
;
697 if (attr_pattern_match(attr
, sys_c_type
, ARRAY_SIZE(sys_c_type
))) {
698 type
|= XTENSA_MPU_SYSTEM_TYPE_C
;
700 type
|= XTENSA_MPU_TYPE_SYS_C
;
703 type
|= XTENSA_MPU_TYPE_SYS_W
;
706 type
|= XTENSA_MPU_TYPE_SYS_R
;
709 if (attr_pattern_match(attr
, b
, ARRAY_SIZE(b
))) {
710 type
|= XTENSA_MPU_TYPE_B
;
712 type
|= mpu_attr_to_cpu_cache(attr
);
717 static unsigned mpu_attr_to_access(uint32_t attr
, unsigned ring
)
719 static const unsigned access
[2][16] = {
722 [5] = PAGE_READ
| PAGE_EXEC
,
723 [6] = PAGE_READ
| PAGE_WRITE
,
724 [7] = PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
,
726 [9] = PAGE_READ
| PAGE_WRITE
,
727 [10] = PAGE_READ
| PAGE_WRITE
,
728 [11] = PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
,
730 [13] = PAGE_READ
| PAGE_EXEC
,
731 [14] = PAGE_READ
| PAGE_WRITE
,
732 [15] = PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
,
736 [9] = PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
,
738 [11] = PAGE_READ
| PAGE_EXEC
,
740 [13] = PAGE_READ
| PAGE_EXEC
,
741 [14] = PAGE_READ
| PAGE_WRITE
,
742 [15] = PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
,
748 type
= mpu_attr_to_cpu_cache(attr
);
749 rv
= access
[ring
!= 0][(attr
& XTENSA_MPU_ACC_RIGHTS_MASK
) >>
750 XTENSA_MPU_ACC_RIGHTS_SHIFT
];
752 if (type
& XTENSA_MPU_TYPE_CPU_CACHE
) {
753 rv
|= (type
& XTENSA_MPU_TYPE_CPU_C
) ? PAGE_CACHE_WB
: PAGE_CACHE_WT
;
755 rv
|= PAGE_CACHE_BYPASS
;
760 static bool is_access_granted(unsigned access
, int is_write
)
764 return access
& PAGE_READ
;
767 return access
& PAGE_WRITE
;
770 return access
& PAGE_EXEC
;
777 static bool get_pte(CPUXtensaState
*env
, uint32_t vaddr
, uint32_t *pte
);
779 static int get_physical_addr_mmu(CPUXtensaState
*env
, bool update_tlb
,
780 uint32_t vaddr
, int is_write
, int mmu_idx
,
781 uint32_t *paddr
, uint32_t *page_size
,
782 unsigned *access
, bool may_lookup_pt
)
784 bool dtlb
= is_write
!= 2;
790 const xtensa_tlb_entry
*entry
= NULL
;
791 xtensa_tlb_entry tmp_entry
;
792 int ret
= xtensa_tlb_lookup(env
, vaddr
, dtlb
, &wi
, &ei
, &ring
);
794 if ((ret
== INST_TLB_MISS_CAUSE
|| ret
== LOAD_STORE_TLB_MISS_CAUSE
) &&
795 may_lookup_pt
&& get_pte(env
, vaddr
, &pte
)) {
796 ring
= (pte
>> 4) & 0x3;
798 split_tlb_entry_spec_way(env
, vaddr
, dtlb
, &vpn
, wi
, &ei
);
801 wi
= ++env
->autorefill_idx
& 0x3;
802 xtensa_tlb_set_entry(env
, dtlb
, wi
, ei
, vpn
, pte
);
803 env
->sregs
[EXCVADDR
] = vaddr
;
804 qemu_log_mask(CPU_LOG_MMU
, "%s: autorefill(%08x): %08x -> %08x\n",
805 __func__
, vaddr
, vpn
, pte
);
807 xtensa_tlb_set_entry_mmu(env
, &tmp_entry
, dtlb
, wi
, ei
, vpn
, pte
);
817 entry
= xtensa_tlb_get_entry(env
, dtlb
, wi
, ei
);
820 if (ring
< mmu_idx
) {
822 LOAD_STORE_PRIVILEGE_CAUSE
:
823 INST_FETCH_PRIVILEGE_CAUSE
;
826 *access
= mmu_attr_to_access(entry
->attr
) &
827 ~(dtlb
? PAGE_EXEC
: PAGE_READ
| PAGE_WRITE
);
828 if (!is_access_granted(*access
, is_write
)) {
831 STORE_PROHIBITED_CAUSE
:
832 LOAD_PROHIBITED_CAUSE
) :
833 INST_FETCH_PROHIBITED_CAUSE
;
836 *paddr
= entry
->paddr
| (vaddr
& ~xtensa_tlb_get_addr_mask(env
, dtlb
, wi
));
837 *page_size
= ~xtensa_tlb_get_addr_mask(env
, dtlb
, wi
) + 1;
842 static bool get_pte(CPUXtensaState
*env
, uint32_t vaddr
, uint32_t *pte
)
844 CPUState
*cs
= env_cpu(env
);
849 (env
->sregs
[PTEVADDR
] | (vaddr
>> 10)) & 0xfffffffc;
850 int ret
= get_physical_addr_mmu(env
, false, pt_vaddr
, 0, 0,
851 &paddr
, &page_size
, &access
, false);
854 qemu_log_mask(CPU_LOG_MMU
,
855 "%s: autorefill(%08x): PTE va = %08x, pa = %08x\n",
856 __func__
, vaddr
, pt_vaddr
, paddr
);
858 qemu_log_mask(CPU_LOG_MMU
,
859 "%s: autorefill(%08x): PTE va = %08x, failed (%d)\n",
860 __func__
, vaddr
, pt_vaddr
, ret
);
866 *pte
= address_space_ldl(cs
->as
, paddr
, MEMTXATTRS_UNSPECIFIED
,
868 if (result
!= MEMTX_OK
) {
869 qemu_log_mask(CPU_LOG_MMU
,
870 "%s: couldn't load PTE: transaction failed (%u)\n",
871 __func__
, (unsigned)result
);
878 static int get_physical_addr_region(CPUXtensaState
*env
,
879 uint32_t vaddr
, int is_write
, int mmu_idx
,
880 uint32_t *paddr
, uint32_t *page_size
,
883 bool dtlb
= is_write
!= 2;
885 uint32_t ei
= (vaddr
>> 29) & 0x7;
886 const xtensa_tlb_entry
*entry
=
887 xtensa_tlb_get_entry(env
, dtlb
, wi
, ei
);
889 *access
= region_attr_to_access(entry
->attr
);
890 if (!is_access_granted(*access
, is_write
)) {
893 STORE_PROHIBITED_CAUSE
:
894 LOAD_PROHIBITED_CAUSE
) :
895 INST_FETCH_PROHIBITED_CAUSE
;
898 *paddr
= entry
->paddr
| (vaddr
& ~REGION_PAGE_MASK
);
899 *page_size
= ~REGION_PAGE_MASK
+ 1;
904 static int xtensa_mpu_lookup(const xtensa_mpu_entry
*entry
, unsigned n
,
905 uint32_t vaddr
, unsigned *segment
)
910 for (i
= 0; i
< n
; ++i
) {
911 if (vaddr
>= entry
[i
].vaddr
&&
912 (i
== n
- 1 || vaddr
< entry
[i
+ 1].vaddr
)) {
922 void HELPER(wsr_mpuenb
)(CPUXtensaState
*env
, uint32_t v
)
924 v
&= (2u << (env
->config
->n_mpu_fg_segments
- 1)) - 1;
926 if (v
!= env
->sregs
[MPUENB
]) {
927 env
->sregs
[MPUENB
] = v
;
928 tlb_flush(env_cpu(env
));
932 void HELPER(wptlb
)(CPUXtensaState
*env
, uint32_t p
, uint32_t v
)
934 unsigned segment
= p
& XTENSA_MPU_SEGMENT_MASK
;
936 if (segment
< env
->config
->n_mpu_fg_segments
) {
937 env
->mpu_fg
[segment
].vaddr
= v
& -env
->config
->mpu_align
;
938 env
->mpu_fg
[segment
].attr
= p
& XTENSA_MPU_ATTR_MASK
;
939 env
->sregs
[MPUENB
] = deposit32(env
->sregs
[MPUENB
], segment
, 1, v
);
940 tlb_flush(env_cpu(env
));
944 uint32_t HELPER(rptlb0
)(CPUXtensaState
*env
, uint32_t s
)
946 unsigned segment
= s
& XTENSA_MPU_SEGMENT_MASK
;
948 if (segment
< env
->config
->n_mpu_fg_segments
) {
949 return env
->mpu_fg
[segment
].vaddr
|
950 extract32(env
->sregs
[MPUENB
], segment
, 1);
956 uint32_t HELPER(rptlb1
)(CPUXtensaState
*env
, uint32_t s
)
958 unsigned segment
= s
& XTENSA_MPU_SEGMENT_MASK
;
960 if (segment
< env
->config
->n_mpu_fg_segments
) {
961 return env
->mpu_fg
[segment
].attr
;
967 uint32_t HELPER(pptlb
)(CPUXtensaState
*env
, uint32_t v
)
970 unsigned segment
= XTENSA_MPU_PROBE_B
;
973 nhits
= xtensa_mpu_lookup(env
->mpu_fg
, env
->config
->n_mpu_fg_segments
,
976 HELPER(exception_cause_vaddr
)(env
, env
->pc
,
977 LOAD_STORE_TLB_MULTI_HIT_CAUSE
, v
);
978 } else if (nhits
== 1 && (env
->sregs
[MPUENB
] & (1u << segment
))) {
979 return env
->mpu_fg
[segment
].attr
| segment
| XTENSA_MPU_PROBE_V
;
981 xtensa_mpu_lookup(env
->config
->mpu_bg
,
982 env
->config
->n_mpu_bg_segments
,
984 return env
->config
->mpu_bg
[bg_segment
].attr
| segment
;
988 static int get_physical_addr_mpu(CPUXtensaState
*env
,
989 uint32_t vaddr
, int is_write
, int mmu_idx
,
990 uint32_t *paddr
, uint32_t *page_size
,
997 nhits
= xtensa_mpu_lookup(env
->mpu_fg
, env
->config
->n_mpu_fg_segments
,
1000 return is_write
< 2 ?
1001 LOAD_STORE_TLB_MULTI_HIT_CAUSE
:
1002 INST_TLB_MULTI_HIT_CAUSE
;
1003 } else if (nhits
== 1 && (env
->sregs
[MPUENB
] & (1u << segment
))) {
1004 attr
= env
->mpu_fg
[segment
].attr
;
1006 xtensa_mpu_lookup(env
->config
->mpu_bg
,
1007 env
->config
->n_mpu_bg_segments
,
1009 attr
= env
->config
->mpu_bg
[segment
].attr
;
1012 *access
= mpu_attr_to_access(attr
, mmu_idx
);
1013 if (!is_access_granted(*access
, is_write
)) {
1014 return is_write
< 2 ?
1016 STORE_PROHIBITED_CAUSE
:
1017 LOAD_PROHIBITED_CAUSE
) :
1018 INST_FETCH_PROHIBITED_CAUSE
;
1021 *page_size
= env
->config
->mpu_align
;
1026 * Convert virtual address to physical addr.
1027 * MMU may issue pagewalk and change xtensa autorefill TLB way entry.
1029 * \return 0 if ok, exception cause code otherwise
1031 int xtensa_get_physical_addr(CPUXtensaState
*env
, bool update_tlb
,
1032 uint32_t vaddr
, int is_write
, int mmu_idx
,
1033 uint32_t *paddr
, uint32_t *page_size
,
1036 if (xtensa_option_enabled(env
->config
, XTENSA_OPTION_MMU
)) {
1037 return get_physical_addr_mmu(env
, update_tlb
,
1038 vaddr
, is_write
, mmu_idx
, paddr
,
1039 page_size
, access
, true);
1040 } else if (xtensa_option_bits_enabled(env
->config
,
1041 XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_PROTECTION
) |
1042 XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_TRANSLATION
))) {
1043 return get_physical_addr_region(env
, vaddr
, is_write
, mmu_idx
,
1044 paddr
, page_size
, access
);
1045 } else if (xtensa_option_enabled(env
->config
, XTENSA_OPTION_MPU
)) {
1046 return get_physical_addr_mpu(env
, vaddr
, is_write
, mmu_idx
,
1047 paddr
, page_size
, access
);
1050 *page_size
= TARGET_PAGE_SIZE
;
1051 *access
= cacheattr_attr_to_access(env
->sregs
[CACHEATTR
] >>
1052 ((vaddr
& 0xe0000000) >> 27));
1057 static void dump_tlb(CPUXtensaState
*env
, bool dtlb
)
1060 const xtensa_tlb
*conf
=
1061 dtlb
? &env
->config
->dtlb
: &env
->config
->itlb
;
1062 unsigned (*attr_to_access
)(uint32_t) =
1063 xtensa_option_enabled(env
->config
, XTENSA_OPTION_MMU
) ?
1064 mmu_attr_to_access
: region_attr_to_access
;
1066 for (wi
= 0; wi
< conf
->nways
; ++wi
) {
1067 uint32_t sz
= ~xtensa_tlb_get_addr_mask(env
, dtlb
, wi
) + 1;
1068 const char *sz_text
;
1069 bool print_header
= true;
1071 if (sz
>= 0x100000) {
1079 for (ei
= 0; ei
< conf
->way_size
[wi
]; ++ei
) {
1080 const xtensa_tlb_entry
*entry
=
1081 xtensa_tlb_get_entry(env
, dtlb
, wi
, ei
);
1084 static const char * const cache_text
[8] = {
1085 [PAGE_CACHE_BYPASS
>> PAGE_CACHE_SHIFT
] = "Bypass",
1086 [PAGE_CACHE_WT
>> PAGE_CACHE_SHIFT
] = "WT",
1087 [PAGE_CACHE_WB
>> PAGE_CACHE_SHIFT
] = "WB",
1088 [PAGE_CACHE_ISOLATE
>> PAGE_CACHE_SHIFT
] = "Isolate",
1090 unsigned access
= attr_to_access(entry
->attr
);
1091 unsigned cache_idx
= (access
& PAGE_CACHE_MASK
) >>
1095 print_header
= false;
1096 qemu_printf("Way %u (%d %s)\n", wi
, sz
, sz_text
);
1097 qemu_printf("\tVaddr Paddr ASID Attr RWX Cache\n"
1098 "\t---------- ---------- ---- ---- --- -------\n");
1100 qemu_printf("\t0x%08x 0x%08x 0x%02x 0x%02x %c%c%c %-7s\n",
1105 (access
& PAGE_READ
) ? 'R' : '-',
1106 (access
& PAGE_WRITE
) ? 'W' : '-',
1107 (access
& PAGE_EXEC
) ? 'X' : '-',
1108 cache_text
[cache_idx
] ?
1109 cache_text
[cache_idx
] : "Invalid");
1115 static void dump_mpu(CPUXtensaState
*env
,
1116 const xtensa_mpu_entry
*entry
, unsigned n
)
1120 qemu_printf("\t%s Vaddr Attr Ring0 Ring1 System Type CPU cache\n"
1121 "\t%s ---------- ---------- ----- ----- ------------- ---------\n",
1125 for (i
= 0; i
< n
; ++i
) {
1126 uint32_t attr
= entry
[i
].attr
;
1127 unsigned access0
= mpu_attr_to_access(attr
, 0);
1128 unsigned access1
= mpu_attr_to_access(attr
, 1);
1129 unsigned type
= mpu_attr_to_type(attr
);
1130 char cpu_cache
= (type
& XTENSA_MPU_TYPE_CPU_CACHE
) ? '-' : ' ';
1132 qemu_printf("\t %c 0x%08x 0x%08x %c%c%c %c%c%c ",
1134 ((env
->sregs
[MPUENB
] & (1u << i
)) ? '+' : '-') : ' ',
1135 entry
[i
].vaddr
, attr
,
1136 (access0
& PAGE_READ
) ? 'R' : '-',
1137 (access0
& PAGE_WRITE
) ? 'W' : '-',
1138 (access0
& PAGE_EXEC
) ? 'X' : '-',
1139 (access1
& PAGE_READ
) ? 'R' : '-',
1140 (access1
& PAGE_WRITE
) ? 'W' : '-',
1141 (access1
& PAGE_EXEC
) ? 'X' : '-');
1143 switch (type
& XTENSA_MPU_SYSTEM_TYPE_MASK
) {
1144 case XTENSA_MPU_SYSTEM_TYPE_DEVICE
:
1145 qemu_printf("Device %cB %3s\n",
1146 (type
& XTENSA_MPU_TYPE_B
) ? ' ' : 'n',
1147 (type
& XTENSA_MPU_TYPE_INT
) ? "int" : "");
1149 case XTENSA_MPU_SYSTEM_TYPE_NC
:
1150 qemu_printf("Sys NC %cB %c%c%c\n",
1151 (type
& XTENSA_MPU_TYPE_B
) ? ' ' : 'n',
1152 (type
& XTENSA_MPU_TYPE_CPU_R
) ? 'r' : cpu_cache
,
1153 (type
& XTENSA_MPU_TYPE_CPU_W
) ? 'w' : cpu_cache
,
1154 (type
& XTENSA_MPU_TYPE_CPU_C
) ? 'c' : cpu_cache
);
1156 case XTENSA_MPU_SYSTEM_TYPE_C
:
1157 qemu_printf("Sys C %c%c%c %c%c%c\n",
1158 (type
& XTENSA_MPU_TYPE_SYS_R
) ? 'R' : '-',
1159 (type
& XTENSA_MPU_TYPE_SYS_W
) ? 'W' : '-',
1160 (type
& XTENSA_MPU_TYPE_SYS_C
) ? 'C' : '-',
1161 (type
& XTENSA_MPU_TYPE_CPU_R
) ? 'r' : cpu_cache
,
1162 (type
& XTENSA_MPU_TYPE_CPU_W
) ? 'w' : cpu_cache
,
1163 (type
& XTENSA_MPU_TYPE_CPU_C
) ? 'c' : cpu_cache
);
1166 qemu_printf("Unknown\n");
1172 void dump_mmu(CPUXtensaState
*env
)
1174 if (xtensa_option_bits_enabled(env
->config
,
1175 XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_PROTECTION
) |
1176 XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_TRANSLATION
) |
1177 XTENSA_OPTION_BIT(XTENSA_OPTION_MMU
))) {
1179 qemu_printf("ITLB:\n");
1180 dump_tlb(env
, false);
1181 qemu_printf("\nDTLB:\n");
1182 dump_tlb(env
, true);
1183 } else if (xtensa_option_enabled(env
->config
, XTENSA_OPTION_MPU
)) {
1184 qemu_printf("Foreground map:\n");
1185 dump_mpu(env
, env
->mpu_fg
, env
->config
->n_mpu_fg_segments
);
1186 qemu_printf("\nBackground map:\n");
1187 dump_mpu(NULL
, env
->config
->mpu_bg
, env
->config
->n_mpu_bg_segments
);
1189 qemu_printf("No TLB for this CPU core\n");