2 * Copyright (c) 2011 - 2019, Max Filippov, Open Source and Linux Lab.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 * * Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * * Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * * Neither the name of the Open Source and Linux Lab nor the
13 * names of its contributors may be used to endorse or promote products
14 * derived from this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
23 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 #include "qemu/osdep.h"
30 #include "qemu/qemu-print.h"
31 #include "qemu/units.h"
33 #include "exec/helper-proto.h"
34 #include "qemu/host-utils.h"
35 #include "exec/exec-all.h"
37 #define XTENSA_MPU_SEGMENT_MASK 0x0000001f
38 #define XTENSA_MPU_ACC_RIGHTS_MASK 0x00000f00
39 #define XTENSA_MPU_ACC_RIGHTS_SHIFT 8
40 #define XTENSA_MPU_MEM_TYPE_MASK 0x001ff000
41 #define XTENSA_MPU_MEM_TYPE_SHIFT 12
42 #define XTENSA_MPU_ATTR_MASK 0x001fff00
44 #define XTENSA_MPU_PROBE_B 0x40000000
45 #define XTENSA_MPU_PROBE_V 0x80000000
47 #define XTENSA_MPU_SYSTEM_TYPE_DEVICE 0x0001
48 #define XTENSA_MPU_SYSTEM_TYPE_NC 0x0002
49 #define XTENSA_MPU_SYSTEM_TYPE_C 0x0003
50 #define XTENSA_MPU_SYSTEM_TYPE_MASK 0x0003
52 #define XTENSA_MPU_TYPE_SYS_C 0x0010
53 #define XTENSA_MPU_TYPE_SYS_W 0x0020
54 #define XTENSA_MPU_TYPE_SYS_R 0x0040
55 #define XTENSA_MPU_TYPE_CPU_C 0x0100
56 #define XTENSA_MPU_TYPE_CPU_W 0x0200
57 #define XTENSA_MPU_TYPE_CPU_R 0x0400
58 #define XTENSA_MPU_TYPE_CPU_CACHE 0x0800
59 #define XTENSA_MPU_TYPE_B 0x1000
60 #define XTENSA_MPU_TYPE_INT 0x2000
62 void HELPER(itlb_hit_test
)(CPUXtensaState
*env
, uint32_t vaddr
)
65 * Probe the memory; we don't care about the result but
66 * only the side-effects (ie any MMU or other exception)
68 probe_access(env
, vaddr
, 1, MMU_INST_FETCH
,
69 cpu_mmu_index(env_cpu(env
), true), GETPC());
72 void HELPER(wsr_rasid
)(CPUXtensaState
*env
, uint32_t v
)
74 v
= (v
& 0xffffff00) | 0x1;
75 if (v
!= env
->sregs
[RASID
]) {
76 env
->sregs
[RASID
] = v
;
77 tlb_flush(env_cpu(env
));
81 static uint32_t get_page_size(const CPUXtensaState
*env
,
82 bool dtlb
, uint32_t way
)
84 uint32_t tlbcfg
= env
->sregs
[dtlb
? DTLBCFG
: ITLBCFG
];
88 return (tlbcfg
>> 16) & 0x3;
91 return (tlbcfg
>> 20) & 0x1;
94 return (tlbcfg
>> 24) & 0x1;
102 * Get bit mask for the virtual address bits translated by the TLB way
104 static uint32_t xtensa_tlb_get_addr_mask(const CPUXtensaState
*env
,
105 bool dtlb
, uint32_t way
)
107 if (xtensa_option_enabled(env
->config
, XTENSA_OPTION_MMU
)) {
108 bool varway56
= dtlb
?
109 env
->config
->dtlb
.varway56
:
110 env
->config
->itlb
.varway56
;
114 return 0xfff00000 << get_page_size(env
, dtlb
, way
) * 2;
118 return 0xf8000000 << get_page_size(env
, dtlb
, way
);
125 return 0xf0000000 << (1 - get_page_size(env
, dtlb
, way
));
134 return REGION_PAGE_MASK
;
139 * Get bit mask for the 'VPN without index' field.
140 * See ISA, 4.6.5.6, data format for RxTLB0
142 static uint32_t get_vpn_mask(const CPUXtensaState
*env
, bool dtlb
, uint32_t way
)
146 env
->config
->dtlb
.nrefillentries
:
147 env
->config
->itlb
.nrefillentries
) == 32;
148 return is32
? 0xffff8000 : 0xffffc000;
149 } else if (way
== 4) {
150 return xtensa_tlb_get_addr_mask(env
, dtlb
, way
) << 2;
151 } else if (way
<= 6) {
152 uint32_t mask
= xtensa_tlb_get_addr_mask(env
, dtlb
, way
);
153 bool varway56
= dtlb
?
154 env
->config
->dtlb
.varway56
:
155 env
->config
->itlb
.varway56
;
158 return mask
<< (way
== 5 ? 2 : 3);
168 * Split virtual address into VPN (with index) and entry index
169 * for the given TLB way
171 static void split_tlb_entry_spec_way(const CPUXtensaState
*env
, uint32_t v
,
172 bool dtlb
, uint32_t *vpn
,
173 uint32_t wi
, uint32_t *ei
)
175 bool varway56
= dtlb
?
176 env
->config
->dtlb
.varway56
:
177 env
->config
->itlb
.varway56
;
185 env
->config
->dtlb
.nrefillentries
:
186 env
->config
->itlb
.nrefillentries
) == 32;
187 *ei
= (v
>> 12) & (is32
? 0x7 : 0x3);
192 uint32_t eibase
= 20 + get_page_size(env
, dtlb
, wi
) * 2;
193 *ei
= (v
>> eibase
) & 0x3;
199 uint32_t eibase
= 27 + get_page_size(env
, dtlb
, wi
);
200 *ei
= (v
>> eibase
) & 0x3;
202 *ei
= (v
>> 27) & 0x1;
208 uint32_t eibase
= 29 - get_page_size(env
, dtlb
, wi
);
209 *ei
= (v
>> eibase
) & 0x7;
211 *ei
= (v
>> 28) & 0x1;
220 *vpn
= v
& xtensa_tlb_get_addr_mask(env
, dtlb
, wi
);
224 * Split TLB address into TLB way, entry index and VPN (with index).
225 * See ISA, 4.6.5.5 - 4.6.5.8 for the TLB addressing format
227 static bool split_tlb_entry_spec(CPUXtensaState
*env
, uint32_t v
, bool dtlb
,
228 uint32_t *vpn
, uint32_t *wi
, uint32_t *ei
)
230 if (xtensa_option_enabled(env
->config
, XTENSA_OPTION_MMU
)) {
231 *wi
= v
& (dtlb
? 0xf : 0x7);
232 if (*wi
< (dtlb
? env
->config
->dtlb
.nways
: env
->config
->itlb
.nways
)) {
233 split_tlb_entry_spec_way(env
, v
, dtlb
, vpn
, *wi
, ei
);
239 *vpn
= v
& REGION_PAGE_MASK
;
241 *ei
= (v
>> 29) & 0x7;
246 static xtensa_tlb_entry
*xtensa_tlb_get_entry(CPUXtensaState
*env
, bool dtlb
,
247 unsigned wi
, unsigned ei
)
249 const xtensa_tlb
*tlb
= dtlb
? &env
->config
->dtlb
: &env
->config
->itlb
;
251 assert(wi
< tlb
->nways
&& ei
< tlb
->way_size
[wi
]);
257 static xtensa_tlb_entry
*get_tlb_entry(CPUXtensaState
*env
,
258 uint32_t v
, bool dtlb
, uint32_t *pwi
)
264 if (split_tlb_entry_spec(env
, v
, dtlb
, &vpn
, &wi
, &ei
)) {
268 return xtensa_tlb_get_entry(env
, dtlb
, wi
, ei
);
274 static void xtensa_tlb_set_entry_mmu(const CPUXtensaState
*env
,
275 xtensa_tlb_entry
*entry
, bool dtlb
,
276 unsigned wi
, unsigned ei
, uint32_t vpn
,
280 entry
->paddr
= pte
& xtensa_tlb_get_addr_mask(env
, dtlb
, wi
);
281 entry
->asid
= (env
->sregs
[RASID
] >> ((pte
>> 1) & 0x18)) & 0xff;
282 entry
->attr
= pte
& 0xf;
285 static void xtensa_tlb_set_entry(CPUXtensaState
*env
, bool dtlb
,
286 unsigned wi
, unsigned ei
,
287 uint32_t vpn
, uint32_t pte
)
289 CPUState
*cs
= env_cpu(env
);
290 xtensa_tlb_entry
*entry
= xtensa_tlb_get_entry(env
, dtlb
, wi
, ei
);
292 if (xtensa_option_enabled(env
->config
, XTENSA_OPTION_MMU
)) {
293 if (entry
->variable
) {
295 tlb_flush_page(cs
, entry
->vaddr
);
297 xtensa_tlb_set_entry_mmu(env
, entry
, dtlb
, wi
, ei
, vpn
, pte
);
298 tlb_flush_page(cs
, entry
->vaddr
);
300 qemu_log_mask(LOG_GUEST_ERROR
,
301 "%s %d, %d, %d trying to set immutable entry\n",
302 __func__
, dtlb
, wi
, ei
);
305 tlb_flush_page(cs
, entry
->vaddr
);
306 if (xtensa_option_enabled(env
->config
,
307 XTENSA_OPTION_REGION_TRANSLATION
)) {
308 entry
->paddr
= pte
& REGION_PAGE_MASK
;
310 entry
->attr
= pte
& 0xf;
314 hwaddr
xtensa_cpu_get_phys_page_debug(CPUState
*cs
, vaddr addr
)
316 XtensaCPU
*cpu
= XTENSA_CPU(cs
);
321 if (xtensa_get_physical_addr(&cpu
->env
, false, addr
, 0, 0,
322 &paddr
, &page_size
, &access
) == 0) {
325 if (xtensa_get_physical_addr(&cpu
->env
, false, addr
, 2, 0,
326 &paddr
, &page_size
, &access
) == 0) {
332 static void reset_tlb_mmu_all_ways(CPUXtensaState
*env
,
333 const xtensa_tlb
*tlb
,
334 xtensa_tlb_entry entry
[][MAX_TLB_WAY_SIZE
])
338 for (wi
= 0; wi
< tlb
->nways
; ++wi
) {
339 for (ei
= 0; ei
< tlb
->way_size
[wi
]; ++ei
) {
340 entry
[wi
][ei
].asid
= 0;
341 entry
[wi
][ei
].variable
= true;
346 static void reset_tlb_mmu_ways56(CPUXtensaState
*env
,
347 const xtensa_tlb
*tlb
,
348 xtensa_tlb_entry entry
[][MAX_TLB_WAY_SIZE
])
350 if (!tlb
->varway56
) {
351 static const xtensa_tlb_entry way5
[] = {
366 static const xtensa_tlb_entry way6
[] = {
381 memcpy(entry
[5], way5
, sizeof(way5
));
382 memcpy(entry
[6], way6
, sizeof(way6
));
385 for (ei
= 0; ei
< 8; ++ei
) {
386 entry
[6][ei
].vaddr
= ei
<< 29;
387 entry
[6][ei
].paddr
= ei
<< 29;
388 entry
[6][ei
].asid
= 1;
389 entry
[6][ei
].attr
= 3;
394 static void reset_tlb_region_way0(CPUXtensaState
*env
,
395 xtensa_tlb_entry entry
[][MAX_TLB_WAY_SIZE
])
399 for (ei
= 0; ei
< 8; ++ei
) {
400 entry
[0][ei
].vaddr
= ei
<< 29;
401 entry
[0][ei
].paddr
= ei
<< 29;
402 entry
[0][ei
].asid
= 1;
403 entry
[0][ei
].attr
= 2;
404 entry
[0][ei
].variable
= true;
408 void reset_mmu(CPUXtensaState
*env
)
410 if (xtensa_option_enabled(env
->config
, XTENSA_OPTION_MMU
)) {
411 env
->sregs
[RASID
] = 0x04030201;
412 env
->sregs
[ITLBCFG
] = 0;
413 env
->sregs
[DTLBCFG
] = 0;
414 env
->autorefill_idx
= 0;
415 reset_tlb_mmu_all_ways(env
, &env
->config
->itlb
, env
->itlb
);
416 reset_tlb_mmu_all_ways(env
, &env
->config
->dtlb
, env
->dtlb
);
417 reset_tlb_mmu_ways56(env
, &env
->config
->itlb
, env
->itlb
);
418 reset_tlb_mmu_ways56(env
, &env
->config
->dtlb
, env
->dtlb
);
419 } else if (xtensa_option_enabled(env
->config
, XTENSA_OPTION_MPU
)) {
422 env
->sregs
[MPUENB
] = 0;
423 env
->sregs
[MPUCFG
] = env
->config
->n_mpu_fg_segments
;
424 env
->sregs
[CACHEADRDIS
] = 0;
425 assert(env
->config
->n_mpu_bg_segments
> 0 &&
426 env
->config
->mpu_bg
[0].vaddr
== 0);
427 for (i
= 1; i
< env
->config
->n_mpu_bg_segments
; ++i
) {
428 assert(env
->config
->mpu_bg
[i
].vaddr
>=
429 env
->config
->mpu_bg
[i
- 1].vaddr
);
432 env
->sregs
[CACHEATTR
] = 0x22222222;
433 reset_tlb_region_way0(env
, env
->itlb
);
434 reset_tlb_region_way0(env
, env
->dtlb
);
438 static unsigned get_ring(const CPUXtensaState
*env
, uint8_t asid
)
441 for (i
= 0; i
< 4; ++i
) {
442 if (((env
->sregs
[RASID
] >> i
* 8) & 0xff) == asid
) {
450 * Lookup xtensa TLB for the given virtual address.
453 * \param pwi: [out] way index
454 * \param pei: [out] entry index
455 * \param pring: [out] access ring
456 * \return 0 if ok, exception cause code otherwise
458 static int xtensa_tlb_lookup(const CPUXtensaState
*env
,
459 uint32_t addr
, bool dtlb
,
460 uint32_t *pwi
, uint32_t *pei
, uint8_t *pring
)
462 const xtensa_tlb
*tlb
= dtlb
?
463 &env
->config
->dtlb
: &env
->config
->itlb
;
464 const xtensa_tlb_entry (*entry
)[MAX_TLB_WAY_SIZE
] = dtlb
?
465 env
->dtlb
: env
->itlb
;
470 for (wi
= 0; wi
< tlb
->nways
; ++wi
) {
473 split_tlb_entry_spec_way(env
, addr
, dtlb
, &vpn
, wi
, &ei
);
474 if (entry
[wi
][ei
].vaddr
== vpn
&& entry
[wi
][ei
].asid
) {
475 unsigned ring
= get_ring(env
, entry
[wi
][ei
].asid
);
479 LOAD_STORE_TLB_MULTI_HIT_CAUSE
:
480 INST_TLB_MULTI_HIT_CAUSE
;
489 (dtlb
? LOAD_STORE_TLB_MISS_CAUSE
: INST_TLB_MISS_CAUSE
);
492 uint32_t HELPER(rtlb0
)(CPUXtensaState
*env
, uint32_t v
, uint32_t dtlb
)
494 if (xtensa_option_enabled(env
->config
, XTENSA_OPTION_MMU
)) {
496 const xtensa_tlb_entry
*entry
= get_tlb_entry(env
, v
, dtlb
, &wi
);
499 return (entry
->vaddr
& get_vpn_mask(env
, dtlb
, wi
)) | entry
->asid
;
504 return v
& REGION_PAGE_MASK
;
508 uint32_t HELPER(rtlb1
)(CPUXtensaState
*env
, uint32_t v
, uint32_t dtlb
)
510 const xtensa_tlb_entry
*entry
= get_tlb_entry(env
, v
, dtlb
, NULL
);
513 return entry
->paddr
| entry
->attr
;
519 void HELPER(itlb
)(CPUXtensaState
*env
, uint32_t v
, uint32_t dtlb
)
521 if (xtensa_option_enabled(env
->config
, XTENSA_OPTION_MMU
)) {
523 xtensa_tlb_entry
*entry
= get_tlb_entry(env
, v
, dtlb
, &wi
);
524 if (entry
&& entry
->variable
&& entry
->asid
) {
525 tlb_flush_page(env_cpu(env
), entry
->vaddr
);
531 uint32_t HELPER(ptlb
)(CPUXtensaState
*env
, uint32_t v
, uint32_t dtlb
)
533 if (xtensa_option_enabled(env
->config
, XTENSA_OPTION_MMU
)) {
537 int res
= xtensa_tlb_lookup(env
, v
, dtlb
, &wi
, &ei
, &ring
);
541 if (ring
>= xtensa_get_ring(env
)) {
542 return (v
& 0xfffff000) | wi
| (dtlb
? 0x10 : 0x8);
546 case INST_TLB_MULTI_HIT_CAUSE
:
547 case LOAD_STORE_TLB_MULTI_HIT_CAUSE
:
548 HELPER(exception_cause_vaddr
)(env
, env
->pc
, res
, v
);
553 return (v
& REGION_PAGE_MASK
) | 0x1;
557 void HELPER(wtlb
)(CPUXtensaState
*env
, uint32_t p
, uint32_t v
, uint32_t dtlb
)
562 if (split_tlb_entry_spec(env
, v
, dtlb
, &vpn
, &wi
, &ei
)) {
563 xtensa_tlb_set_entry(env
, dtlb
, wi
, ei
, vpn
, p
);
568 * Convert MMU ATTR to PAGE_{READ,WRITE,EXEC} mask.
571 static unsigned mmu_attr_to_access(uint32_t attr
)
581 access
|= PAGE_WRITE
;
584 switch (attr
& 0xc) {
586 access
|= PAGE_CACHE_BYPASS
;
590 access
|= PAGE_CACHE_WB
;
594 access
|= PAGE_CACHE_WT
;
597 } else if (attr
== 13) {
598 access
|= PAGE_READ
| PAGE_WRITE
| PAGE_CACHE_ISOLATE
;
604 * Convert region protection ATTR to PAGE_{READ,WRITE,EXEC} mask.
607 static unsigned region_attr_to_access(uint32_t attr
)
609 static const unsigned access
[16] = {
610 [0] = PAGE_READ
| PAGE_WRITE
| PAGE_CACHE_WT
,
611 [1] = PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
| PAGE_CACHE_WT
,
612 [2] = PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
| PAGE_CACHE_BYPASS
,
613 [3] = PAGE_EXEC
| PAGE_CACHE_WB
,
614 [4] = PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
| PAGE_CACHE_WB
,
615 [5] = PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
| PAGE_CACHE_WB
,
616 [14] = PAGE_READ
| PAGE_WRITE
| PAGE_CACHE_ISOLATE
,
619 return access
[attr
& 0xf];
623 * Convert cacheattr to PAGE_{READ,WRITE,EXEC} mask.
624 * See ISA, A.2.14 The Cache Attribute Register
626 static unsigned cacheattr_attr_to_access(uint32_t attr
)
628 static const unsigned access
[16] = {
629 [0] = PAGE_READ
| PAGE_WRITE
| PAGE_CACHE_WT
,
630 [1] = PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
| PAGE_CACHE_WT
,
631 [2] = PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
| PAGE_CACHE_BYPASS
,
632 [3] = PAGE_EXEC
| PAGE_CACHE_WB
,
633 [4] = PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
| PAGE_CACHE_WB
,
634 [14] = PAGE_READ
| PAGE_WRITE
| PAGE_CACHE_ISOLATE
,
637 return access
[attr
& 0xf];
640 struct attr_pattern
{
645 static int attr_pattern_match(uint32_t attr
,
646 const struct attr_pattern
*pattern
,
651 for (i
= 0; i
< n
; ++i
) {
652 if ((attr
& pattern
[i
].mask
) == pattern
[i
].value
) {
659 static unsigned mpu_attr_to_cpu_cache(uint32_t attr
)
661 static const struct attr_pattern cpu_c
[] = {
662 { .mask
= 0x18f, .value
= 0x089 },
663 { .mask
= 0x188, .value
= 0x080 },
664 { .mask
= 0x180, .value
= 0x180 },
669 if (attr_pattern_match(attr
, cpu_c
, ARRAY_SIZE(cpu_c
))) {
670 type
|= XTENSA_MPU_TYPE_CPU_CACHE
;
672 type
|= XTENSA_MPU_TYPE_CPU_C
;
675 type
|= XTENSA_MPU_TYPE_CPU_W
;
678 type
|= XTENSA_MPU_TYPE_CPU_R
;
684 static unsigned mpu_attr_to_type(uint32_t attr
)
686 static const struct attr_pattern device_type
[] = {
687 { .mask
= 0x1f6, .value
= 0x000 },
688 { .mask
= 0x1f6, .value
= 0x006 },
690 static const struct attr_pattern sys_nc_type
[] = {
691 { .mask
= 0x1fe, .value
= 0x018 },
692 { .mask
= 0x1fe, .value
= 0x01e },
693 { .mask
= 0x18f, .value
= 0x089 },
695 static const struct attr_pattern sys_c_type
[] = {
696 { .mask
= 0x1f8, .value
= 0x010 },
697 { .mask
= 0x188, .value
= 0x080 },
698 { .mask
= 0x1f0, .value
= 0x030 },
699 { .mask
= 0x180, .value
= 0x180 },
701 static const struct attr_pattern b
[] = {
702 { .mask
= 0x1f7, .value
= 0x001 },
703 { .mask
= 0x1f7, .value
= 0x007 },
704 { .mask
= 0x1ff, .value
= 0x019 },
705 { .mask
= 0x1ff, .value
= 0x01f },
710 attr
= (attr
& XTENSA_MPU_MEM_TYPE_MASK
) >> XTENSA_MPU_MEM_TYPE_SHIFT
;
711 if (attr_pattern_match(attr
, device_type
, ARRAY_SIZE(device_type
))) {
712 type
|= XTENSA_MPU_SYSTEM_TYPE_DEVICE
;
714 type
|= XTENSA_MPU_TYPE_INT
;
717 if (attr_pattern_match(attr
, sys_nc_type
, ARRAY_SIZE(sys_nc_type
))) {
718 type
|= XTENSA_MPU_SYSTEM_TYPE_NC
;
720 if (attr_pattern_match(attr
, sys_c_type
, ARRAY_SIZE(sys_c_type
))) {
721 type
|= XTENSA_MPU_SYSTEM_TYPE_C
;
723 type
|= XTENSA_MPU_TYPE_SYS_C
;
726 type
|= XTENSA_MPU_TYPE_SYS_W
;
729 type
|= XTENSA_MPU_TYPE_SYS_R
;
732 if (attr_pattern_match(attr
, b
, ARRAY_SIZE(b
))) {
733 type
|= XTENSA_MPU_TYPE_B
;
735 type
|= mpu_attr_to_cpu_cache(attr
);
740 static unsigned mpu_attr_to_access(uint32_t attr
, unsigned ring
)
742 static const unsigned access
[2][16] = {
745 [5] = PAGE_READ
| PAGE_EXEC
,
746 [6] = PAGE_READ
| PAGE_WRITE
,
747 [7] = PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
,
749 [9] = PAGE_READ
| PAGE_WRITE
,
750 [10] = PAGE_READ
| PAGE_WRITE
,
751 [11] = PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
,
753 [13] = PAGE_READ
| PAGE_EXEC
,
754 [14] = PAGE_READ
| PAGE_WRITE
,
755 [15] = PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
,
759 [9] = PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
,
761 [11] = PAGE_READ
| PAGE_EXEC
,
763 [13] = PAGE_READ
| PAGE_EXEC
,
764 [14] = PAGE_READ
| PAGE_WRITE
,
765 [15] = PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
,
771 type
= mpu_attr_to_cpu_cache(attr
);
772 rv
= access
[ring
!= 0][(attr
& XTENSA_MPU_ACC_RIGHTS_MASK
) >>
773 XTENSA_MPU_ACC_RIGHTS_SHIFT
];
775 if (type
& XTENSA_MPU_TYPE_CPU_CACHE
) {
776 rv
|= (type
& XTENSA_MPU_TYPE_CPU_C
) ? PAGE_CACHE_WB
: PAGE_CACHE_WT
;
778 rv
|= PAGE_CACHE_BYPASS
;
783 static bool is_access_granted(unsigned access
, int is_write
)
787 return access
& PAGE_READ
;
790 return access
& PAGE_WRITE
;
793 return access
& PAGE_EXEC
;
800 static bool get_pte(CPUXtensaState
*env
, uint32_t vaddr
, uint32_t *pte
);
802 static int get_physical_addr_mmu(CPUXtensaState
*env
, bool update_tlb
,
803 uint32_t vaddr
, int is_write
, int mmu_idx
,
804 uint32_t *paddr
, uint32_t *page_size
,
805 unsigned *access
, bool may_lookup_pt
)
807 bool dtlb
= is_write
!= 2;
813 const xtensa_tlb_entry
*entry
= NULL
;
814 xtensa_tlb_entry tmp_entry
;
815 int ret
= xtensa_tlb_lookup(env
, vaddr
, dtlb
, &wi
, &ei
, &ring
);
817 if ((ret
== INST_TLB_MISS_CAUSE
|| ret
== LOAD_STORE_TLB_MISS_CAUSE
) &&
818 may_lookup_pt
&& get_pte(env
, vaddr
, &pte
)) {
819 ring
= (pte
>> 4) & 0x3;
821 split_tlb_entry_spec_way(env
, vaddr
, dtlb
, &vpn
, wi
, &ei
);
824 wi
= ++env
->autorefill_idx
& 0x3;
825 xtensa_tlb_set_entry(env
, dtlb
, wi
, ei
, vpn
, pte
);
826 env
->sregs
[EXCVADDR
] = vaddr
;
827 qemu_log_mask(CPU_LOG_MMU
, "%s: autorefill(%08x): %08x -> %08x\n",
828 __func__
, vaddr
, vpn
, pte
);
830 xtensa_tlb_set_entry_mmu(env
, &tmp_entry
, dtlb
, wi
, ei
, vpn
, pte
);
840 entry
= xtensa_tlb_get_entry(env
, dtlb
, wi
, ei
);
843 if (ring
< mmu_idx
) {
845 LOAD_STORE_PRIVILEGE_CAUSE
:
846 INST_FETCH_PRIVILEGE_CAUSE
;
849 *access
= mmu_attr_to_access(entry
->attr
) &
850 ~(dtlb
? PAGE_EXEC
: PAGE_READ
| PAGE_WRITE
);
851 if (!is_access_granted(*access
, is_write
)) {
854 STORE_PROHIBITED_CAUSE
:
855 LOAD_PROHIBITED_CAUSE
) :
856 INST_FETCH_PROHIBITED_CAUSE
;
859 *paddr
= entry
->paddr
| (vaddr
& ~xtensa_tlb_get_addr_mask(env
, dtlb
, wi
));
860 *page_size
= ~xtensa_tlb_get_addr_mask(env
, dtlb
, wi
) + 1;
865 static bool get_pte(CPUXtensaState
*env
, uint32_t vaddr
, uint32_t *pte
)
867 CPUState
*cs
= env_cpu(env
);
872 (env
->sregs
[PTEVADDR
] | (vaddr
>> 10)) & 0xfffffffc;
873 int ret
= get_physical_addr_mmu(env
, false, pt_vaddr
, 0, 0,
874 &paddr
, &page_size
, &access
, false);
877 qemu_log_mask(CPU_LOG_MMU
,
878 "%s: autorefill(%08x): PTE va = %08x, pa = %08x\n",
879 __func__
, vaddr
, pt_vaddr
, paddr
);
881 qemu_log_mask(CPU_LOG_MMU
,
882 "%s: autorefill(%08x): PTE va = %08x, failed (%d)\n",
883 __func__
, vaddr
, pt_vaddr
, ret
);
889 *pte
= address_space_ldl(cs
->as
, paddr
, MEMTXATTRS_UNSPECIFIED
,
891 if (result
!= MEMTX_OK
) {
892 qemu_log_mask(CPU_LOG_MMU
,
893 "%s: couldn't load PTE: transaction failed (%u)\n",
894 __func__
, (unsigned)result
);
901 static int get_physical_addr_region(CPUXtensaState
*env
,
902 uint32_t vaddr
, int is_write
, int mmu_idx
,
903 uint32_t *paddr
, uint32_t *page_size
,
906 bool dtlb
= is_write
!= 2;
908 uint32_t ei
= (vaddr
>> 29) & 0x7;
909 const xtensa_tlb_entry
*entry
=
910 xtensa_tlb_get_entry(env
, dtlb
, wi
, ei
);
912 *access
= region_attr_to_access(entry
->attr
);
913 if (!is_access_granted(*access
, is_write
)) {
916 STORE_PROHIBITED_CAUSE
:
917 LOAD_PROHIBITED_CAUSE
) :
918 INST_FETCH_PROHIBITED_CAUSE
;
921 *paddr
= entry
->paddr
| (vaddr
& ~REGION_PAGE_MASK
);
922 *page_size
= ~REGION_PAGE_MASK
+ 1;
927 static int xtensa_mpu_lookup(const xtensa_mpu_entry
*entry
, unsigned n
,
928 uint32_t vaddr
, unsigned *segment
)
933 for (i
= 0; i
< n
; ++i
) {
934 if (vaddr
>= entry
[i
].vaddr
&&
935 (i
== n
- 1 || vaddr
< entry
[i
+ 1].vaddr
)) {
945 void HELPER(wsr_mpuenb
)(CPUXtensaState
*env
, uint32_t v
)
947 v
&= (2u << (env
->config
->n_mpu_fg_segments
- 1)) - 1;
949 if (v
!= env
->sregs
[MPUENB
]) {
950 env
->sregs
[MPUENB
] = v
;
951 tlb_flush(env_cpu(env
));
955 void HELPER(wptlb
)(CPUXtensaState
*env
, uint32_t p
, uint32_t v
)
957 unsigned segment
= p
& XTENSA_MPU_SEGMENT_MASK
;
959 if (segment
< env
->config
->n_mpu_fg_segments
) {
960 env
->mpu_fg
[segment
].vaddr
= v
& -env
->config
->mpu_align
;
961 env
->mpu_fg
[segment
].attr
= p
& XTENSA_MPU_ATTR_MASK
;
962 env
->sregs
[MPUENB
] = deposit32(env
->sregs
[MPUENB
], segment
, 1, v
);
963 tlb_flush(env_cpu(env
));
967 uint32_t HELPER(rptlb0
)(CPUXtensaState
*env
, uint32_t s
)
969 unsigned segment
= s
& XTENSA_MPU_SEGMENT_MASK
;
971 if (segment
< env
->config
->n_mpu_fg_segments
) {
972 return env
->mpu_fg
[segment
].vaddr
|
973 extract32(env
->sregs
[MPUENB
], segment
, 1);
979 uint32_t HELPER(rptlb1
)(CPUXtensaState
*env
, uint32_t s
)
981 unsigned segment
= s
& XTENSA_MPU_SEGMENT_MASK
;
983 if (segment
< env
->config
->n_mpu_fg_segments
) {
984 return env
->mpu_fg
[segment
].attr
;
990 uint32_t HELPER(pptlb
)(CPUXtensaState
*env
, uint32_t v
)
993 unsigned segment
= XTENSA_MPU_PROBE_B
;
996 nhits
= xtensa_mpu_lookup(env
->mpu_fg
, env
->config
->n_mpu_fg_segments
,
999 HELPER(exception_cause_vaddr
)(env
, env
->pc
,
1000 LOAD_STORE_TLB_MULTI_HIT_CAUSE
, v
);
1001 } else if (nhits
== 1 && (env
->sregs
[MPUENB
] & (1u << segment
))) {
1002 return env
->mpu_fg
[segment
].attr
| segment
| XTENSA_MPU_PROBE_V
;
1004 xtensa_mpu_lookup(env
->config
->mpu_bg
,
1005 env
->config
->n_mpu_bg_segments
,
1007 return env
->config
->mpu_bg
[bg_segment
].attr
| segment
;
1011 static int get_physical_addr_mpu(CPUXtensaState
*env
,
1012 uint32_t vaddr
, int is_write
, int mmu_idx
,
1013 uint32_t *paddr
, uint32_t *page_size
,
1020 nhits
= xtensa_mpu_lookup(env
->mpu_fg
, env
->config
->n_mpu_fg_segments
,
1023 return is_write
< 2 ?
1024 LOAD_STORE_TLB_MULTI_HIT_CAUSE
:
1025 INST_TLB_MULTI_HIT_CAUSE
;
1026 } else if (nhits
== 1 && (env
->sregs
[MPUENB
] & (1u << segment
))) {
1027 attr
= env
->mpu_fg
[segment
].attr
;
1029 xtensa_mpu_lookup(env
->config
->mpu_bg
,
1030 env
->config
->n_mpu_bg_segments
,
1032 attr
= env
->config
->mpu_bg
[segment
].attr
;
1035 *access
= mpu_attr_to_access(attr
, mmu_idx
);
1036 if (!is_access_granted(*access
, is_write
)) {
1037 return is_write
< 2 ?
1039 STORE_PROHIBITED_CAUSE
:
1040 LOAD_PROHIBITED_CAUSE
) :
1041 INST_FETCH_PROHIBITED_CAUSE
;
1044 *page_size
= env
->config
->mpu_align
;
1049 * Convert virtual address to physical addr.
1050 * MMU may issue pagewalk and change xtensa autorefill TLB way entry.
1052 * \return 0 if ok, exception cause code otherwise
1054 int xtensa_get_physical_addr(CPUXtensaState
*env
, bool update_tlb
,
1055 uint32_t vaddr
, int is_write
, int mmu_idx
,
1056 uint32_t *paddr
, uint32_t *page_size
,
1059 if (xtensa_option_enabled(env
->config
, XTENSA_OPTION_MMU
)) {
1060 return get_physical_addr_mmu(env
, update_tlb
,
1061 vaddr
, is_write
, mmu_idx
, paddr
,
1062 page_size
, access
, true);
1063 } else if (xtensa_option_bits_enabled(env
->config
,
1064 XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_PROTECTION
) |
1065 XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_TRANSLATION
))) {
1066 return get_physical_addr_region(env
, vaddr
, is_write
, mmu_idx
,
1067 paddr
, page_size
, access
);
1068 } else if (xtensa_option_enabled(env
->config
, XTENSA_OPTION_MPU
)) {
1069 return get_physical_addr_mpu(env
, vaddr
, is_write
, mmu_idx
,
1070 paddr
, page_size
, access
);
1073 *page_size
= TARGET_PAGE_SIZE
;
1074 *access
= cacheattr_attr_to_access(env
->sregs
[CACHEATTR
] >>
1075 ((vaddr
& 0xe0000000) >> 27));
1080 static void dump_tlb(CPUXtensaState
*env
, bool dtlb
)
1083 const xtensa_tlb
*conf
=
1084 dtlb
? &env
->config
->dtlb
: &env
->config
->itlb
;
1085 unsigned (*attr_to_access
)(uint32_t) =
1086 xtensa_option_enabled(env
->config
, XTENSA_OPTION_MMU
) ?
1087 mmu_attr_to_access
: region_attr_to_access
;
1089 for (wi
= 0; wi
< conf
->nways
; ++wi
) {
1090 uint32_t sz
= ~xtensa_tlb_get_addr_mask(env
, dtlb
, wi
) + 1;
1091 const char *sz_text
;
1092 bool print_header
= true;
1094 if (sz
>= 0x100000) {
1102 for (ei
= 0; ei
< conf
->way_size
[wi
]; ++ei
) {
1103 const xtensa_tlb_entry
*entry
=
1104 xtensa_tlb_get_entry(env
, dtlb
, wi
, ei
);
1107 static const char * const cache_text
[8] = {
1108 [PAGE_CACHE_BYPASS
>> PAGE_CACHE_SHIFT
] = "Bypass",
1109 [PAGE_CACHE_WT
>> PAGE_CACHE_SHIFT
] = "WT",
1110 [PAGE_CACHE_WB
>> PAGE_CACHE_SHIFT
] = "WB",
1111 [PAGE_CACHE_ISOLATE
>> PAGE_CACHE_SHIFT
] = "Isolate",
1113 unsigned access
= attr_to_access(entry
->attr
);
1114 unsigned cache_idx
= (access
& PAGE_CACHE_MASK
) >>
1118 print_header
= false;
1119 qemu_printf("Way %u (%d %s)\n", wi
, sz
, sz_text
);
1120 qemu_printf("\tVaddr Paddr ASID Attr RWX Cache\n"
1121 "\t---------- ---------- ---- ---- --- -------\n");
1123 qemu_printf("\t0x%08x 0x%08x 0x%02x 0x%02x %c%c%c %s\n",
1128 (access
& PAGE_READ
) ? 'R' : '-',
1129 (access
& PAGE_WRITE
) ? 'W' : '-',
1130 (access
& PAGE_EXEC
) ? 'X' : '-',
1131 cache_text
[cache_idx
] ?
1132 cache_text
[cache_idx
] : "Invalid");
1138 static void dump_mpu(CPUXtensaState
*env
,
1139 const xtensa_mpu_entry
*entry
, unsigned n
)
1143 qemu_printf("\t%s Vaddr Attr Ring0 Ring1 System Type CPU cache\n"
1144 "\t%s ---------- ---------- ----- ----- ------------- ---------\n",
1148 for (i
= 0; i
< n
; ++i
) {
1149 uint32_t attr
= entry
[i
].attr
;
1150 unsigned access0
= mpu_attr_to_access(attr
, 0);
1151 unsigned access1
= mpu_attr_to_access(attr
, 1);
1152 unsigned type
= mpu_attr_to_type(attr
);
1153 char cpu_cache
= (type
& XTENSA_MPU_TYPE_CPU_CACHE
) ? '-' : ' ';
1155 qemu_printf("\t %c 0x%08x 0x%08x %c%c%c %c%c%c ",
1157 ((env
->sregs
[MPUENB
] & (1u << i
)) ? '+' : '-') : ' ',
1158 entry
[i
].vaddr
, attr
,
1159 (access0
& PAGE_READ
) ? 'R' : '-',
1160 (access0
& PAGE_WRITE
) ? 'W' : '-',
1161 (access0
& PAGE_EXEC
) ? 'X' : '-',
1162 (access1
& PAGE_READ
) ? 'R' : '-',
1163 (access1
& PAGE_WRITE
) ? 'W' : '-',
1164 (access1
& PAGE_EXEC
) ? 'X' : '-');
1166 switch (type
& XTENSA_MPU_SYSTEM_TYPE_MASK
) {
1167 case XTENSA_MPU_SYSTEM_TYPE_DEVICE
:
1168 qemu_printf("Device %cB %3s\n",
1169 (type
& XTENSA_MPU_TYPE_B
) ? ' ' : 'n',
1170 (type
& XTENSA_MPU_TYPE_INT
) ? "int" : "");
1172 case XTENSA_MPU_SYSTEM_TYPE_NC
:
1173 qemu_printf("Sys NC %cB %c%c%c\n",
1174 (type
& XTENSA_MPU_TYPE_B
) ? ' ' : 'n',
1175 (type
& XTENSA_MPU_TYPE_CPU_R
) ? 'r' : cpu_cache
,
1176 (type
& XTENSA_MPU_TYPE_CPU_W
) ? 'w' : cpu_cache
,
1177 (type
& XTENSA_MPU_TYPE_CPU_C
) ? 'c' : cpu_cache
);
1179 case XTENSA_MPU_SYSTEM_TYPE_C
:
1180 qemu_printf("Sys C %c%c%c %c%c%c\n",
1181 (type
& XTENSA_MPU_TYPE_SYS_R
) ? 'R' : '-',
1182 (type
& XTENSA_MPU_TYPE_SYS_W
) ? 'W' : '-',
1183 (type
& XTENSA_MPU_TYPE_SYS_C
) ? 'C' : '-',
1184 (type
& XTENSA_MPU_TYPE_CPU_R
) ? 'r' : cpu_cache
,
1185 (type
& XTENSA_MPU_TYPE_CPU_W
) ? 'w' : cpu_cache
,
1186 (type
& XTENSA_MPU_TYPE_CPU_C
) ? 'c' : cpu_cache
);
1189 qemu_printf("Unknown\n");
1195 void dump_mmu(CPUXtensaState
*env
)
1197 if (xtensa_option_bits_enabled(env
->config
,
1198 XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_PROTECTION
) |
1199 XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_TRANSLATION
) |
1200 XTENSA_OPTION_BIT(XTENSA_OPTION_MMU
))) {
1202 qemu_printf("ITLB:\n");
1203 dump_tlb(env
, false);
1204 qemu_printf("\nDTLB:\n");
1205 dump_tlb(env
, true);
1206 } else if (xtensa_option_enabled(env
->config
, XTENSA_OPTION_MPU
)) {
1207 qemu_printf("Foreground map:\n");
1208 dump_mpu(env
, env
->mpu_fg
, env
->config
->n_mpu_fg_segments
);
1209 qemu_printf("\nBackground map:\n");
1210 dump_mpu(NULL
, env
->config
->mpu_bg
, env
->config
->n_mpu_bg_segments
);
1212 qemu_printf("No TLB for this CPU core\n");