2 * Copyright (c) 2011 - 2019, Max Filippov, Open Source and Linux Lab.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 * * Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * * Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * * Neither the name of the Open Source and Linux Lab nor the
13 * names of its contributors may be used to endorse or promote products
14 * derived from this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
23 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 #include "qemu/osdep.h"
30 #include "qemu/main-loop.h"
31 #include "qemu/qemu-print.h"
32 #include "qemu/units.h"
34 #include "exec/helper-proto.h"
35 #include "qemu/host-utils.h"
36 #include "exec/exec-all.h"
38 #define XTENSA_MPU_SEGMENT_MASK 0x0000001f
39 #define XTENSA_MPU_ACC_RIGHTS_MASK 0x00000f00
40 #define XTENSA_MPU_ACC_RIGHTS_SHIFT 8
41 #define XTENSA_MPU_MEM_TYPE_MASK 0x001ff000
42 #define XTENSA_MPU_MEM_TYPE_SHIFT 12
43 #define XTENSA_MPU_ATTR_MASK 0x001fff00
45 #define XTENSA_MPU_PROBE_B 0x40000000
46 #define XTENSA_MPU_PROBE_V 0x80000000
48 #define XTENSA_MPU_SYSTEM_TYPE_DEVICE 0x0001
49 #define XTENSA_MPU_SYSTEM_TYPE_NC 0x0002
50 #define XTENSA_MPU_SYSTEM_TYPE_C 0x0003
51 #define XTENSA_MPU_SYSTEM_TYPE_MASK 0x0003
53 #define XTENSA_MPU_TYPE_SYS_C 0x0010
54 #define XTENSA_MPU_TYPE_SYS_W 0x0020
55 #define XTENSA_MPU_TYPE_SYS_R 0x0040
56 #define XTENSA_MPU_TYPE_CPU_C 0x0100
57 #define XTENSA_MPU_TYPE_CPU_W 0x0200
58 #define XTENSA_MPU_TYPE_CPU_R 0x0400
59 #define XTENSA_MPU_TYPE_CPU_CACHE 0x0800
60 #define XTENSA_MPU_TYPE_B 0x1000
61 #define XTENSA_MPU_TYPE_INT 0x2000
63 void HELPER(itlb_hit_test
)(CPUXtensaState
*env
, uint32_t vaddr
)
66 * Probe the memory; we don't care about the result but
67 * only the side-effects (ie any MMU or other exception)
69 probe_access(env
, vaddr
, 1, MMU_INST_FETCH
,
70 cpu_mmu_index(env
, true), GETPC());
73 void HELPER(wsr_rasid
)(CPUXtensaState
*env
, uint32_t v
)
75 v
= (v
& 0xffffff00) | 0x1;
76 if (v
!= env
->sregs
[RASID
]) {
77 env
->sregs
[RASID
] = v
;
78 tlb_flush(env_cpu(env
));
82 static uint32_t get_page_size(const CPUXtensaState
*env
,
83 bool dtlb
, uint32_t way
)
85 uint32_t tlbcfg
= env
->sregs
[dtlb
? DTLBCFG
: ITLBCFG
];
89 return (tlbcfg
>> 16) & 0x3;
92 return (tlbcfg
>> 20) & 0x1;
95 return (tlbcfg
>> 24) & 0x1;
103 * Get bit mask for the virtual address bits translated by the TLB way
105 static uint32_t xtensa_tlb_get_addr_mask(const CPUXtensaState
*env
,
106 bool dtlb
, uint32_t way
)
108 if (xtensa_option_enabled(env
->config
, XTENSA_OPTION_MMU
)) {
109 bool varway56
= dtlb
?
110 env
->config
->dtlb
.varway56
:
111 env
->config
->itlb
.varway56
;
115 return 0xfff00000 << get_page_size(env
, dtlb
, way
) * 2;
119 return 0xf8000000 << get_page_size(env
, dtlb
, way
);
126 return 0xf0000000 << (1 - get_page_size(env
, dtlb
, way
));
135 return REGION_PAGE_MASK
;
140 * Get bit mask for the 'VPN without index' field.
141 * See ISA, 4.6.5.6, data format for RxTLB0
143 static uint32_t get_vpn_mask(const CPUXtensaState
*env
, bool dtlb
, uint32_t way
)
147 env
->config
->dtlb
.nrefillentries
:
148 env
->config
->itlb
.nrefillentries
) == 32;
149 return is32
? 0xffff8000 : 0xffffc000;
150 } else if (way
== 4) {
151 return xtensa_tlb_get_addr_mask(env
, dtlb
, way
) << 2;
152 } else if (way
<= 6) {
153 uint32_t mask
= xtensa_tlb_get_addr_mask(env
, dtlb
, way
);
154 bool varway56
= dtlb
?
155 env
->config
->dtlb
.varway56
:
156 env
->config
->itlb
.varway56
;
159 return mask
<< (way
== 5 ? 2 : 3);
169 * Split virtual address into VPN (with index) and entry index
170 * for the given TLB way
172 static void split_tlb_entry_spec_way(const CPUXtensaState
*env
, uint32_t v
,
173 bool dtlb
, uint32_t *vpn
,
174 uint32_t wi
, uint32_t *ei
)
176 bool varway56
= dtlb
?
177 env
->config
->dtlb
.varway56
:
178 env
->config
->itlb
.varway56
;
186 env
->config
->dtlb
.nrefillentries
:
187 env
->config
->itlb
.nrefillentries
) == 32;
188 *ei
= (v
>> 12) & (is32
? 0x7 : 0x3);
193 uint32_t eibase
= 20 + get_page_size(env
, dtlb
, wi
) * 2;
194 *ei
= (v
>> eibase
) & 0x3;
200 uint32_t eibase
= 27 + get_page_size(env
, dtlb
, wi
);
201 *ei
= (v
>> eibase
) & 0x3;
203 *ei
= (v
>> 27) & 0x1;
209 uint32_t eibase
= 29 - get_page_size(env
, dtlb
, wi
);
210 *ei
= (v
>> eibase
) & 0x7;
212 *ei
= (v
>> 28) & 0x1;
221 *vpn
= v
& xtensa_tlb_get_addr_mask(env
, dtlb
, wi
);
225 * Split TLB address into TLB way, entry index and VPN (with index).
226 * See ISA, 4.6.5.5 - 4.6.5.8 for the TLB addressing format
228 static void split_tlb_entry_spec(CPUXtensaState
*env
, uint32_t v
, bool dtlb
,
229 uint32_t *vpn
, uint32_t *wi
, uint32_t *ei
)
231 if (xtensa_option_enabled(env
->config
, XTENSA_OPTION_MMU
)) {
232 *wi
= v
& (dtlb
? 0xf : 0x7);
233 split_tlb_entry_spec_way(env
, v
, dtlb
, vpn
, *wi
, ei
);
235 *vpn
= v
& REGION_PAGE_MASK
;
237 *ei
= (v
>> 29) & 0x7;
241 static xtensa_tlb_entry
*xtensa_tlb_get_entry(CPUXtensaState
*env
, bool dtlb
,
242 unsigned wi
, unsigned ei
)
249 static xtensa_tlb_entry
*get_tlb_entry(CPUXtensaState
*env
,
250 uint32_t v
, bool dtlb
, uint32_t *pwi
)
256 split_tlb_entry_spec(env
, v
, dtlb
, &vpn
, &wi
, &ei
);
260 return xtensa_tlb_get_entry(env
, dtlb
, wi
, ei
);
263 static void xtensa_tlb_set_entry_mmu(const CPUXtensaState
*env
,
264 xtensa_tlb_entry
*entry
, bool dtlb
,
265 unsigned wi
, unsigned ei
, uint32_t vpn
,
269 entry
->paddr
= pte
& xtensa_tlb_get_addr_mask(env
, dtlb
, wi
);
270 entry
->asid
= (env
->sregs
[RASID
] >> ((pte
>> 1) & 0x18)) & 0xff;
271 entry
->attr
= pte
& 0xf;
274 static void xtensa_tlb_set_entry(CPUXtensaState
*env
, bool dtlb
,
275 unsigned wi
, unsigned ei
,
276 uint32_t vpn
, uint32_t pte
)
278 CPUState
*cs
= env_cpu(env
);
279 xtensa_tlb_entry
*entry
= xtensa_tlb_get_entry(env
, dtlb
, wi
, ei
);
281 if (xtensa_option_enabled(env
->config
, XTENSA_OPTION_MMU
)) {
282 if (entry
->variable
) {
284 tlb_flush_page(cs
, entry
->vaddr
);
286 xtensa_tlb_set_entry_mmu(env
, entry
, dtlb
, wi
, ei
, vpn
, pte
);
287 tlb_flush_page(cs
, entry
->vaddr
);
289 qemu_log_mask(LOG_GUEST_ERROR
,
290 "%s %d, %d, %d trying to set immutable entry\n",
291 __func__
, dtlb
, wi
, ei
);
294 tlb_flush_page(cs
, entry
->vaddr
);
295 if (xtensa_option_enabled(env
->config
,
296 XTENSA_OPTION_REGION_TRANSLATION
)) {
297 entry
->paddr
= pte
& REGION_PAGE_MASK
;
299 entry
->attr
= pte
& 0xf;
303 hwaddr
xtensa_cpu_get_phys_page_debug(CPUState
*cs
, vaddr addr
)
305 XtensaCPU
*cpu
= XTENSA_CPU(cs
);
310 if (xtensa_get_physical_addr(&cpu
->env
, false, addr
, 0, 0,
311 &paddr
, &page_size
, &access
) == 0) {
314 if (xtensa_get_physical_addr(&cpu
->env
, false, addr
, 2, 0,
315 &paddr
, &page_size
, &access
) == 0) {
321 static void reset_tlb_mmu_all_ways(CPUXtensaState
*env
,
322 const xtensa_tlb
*tlb
,
323 xtensa_tlb_entry entry
[][MAX_TLB_WAY_SIZE
])
327 for (wi
= 0; wi
< tlb
->nways
; ++wi
) {
328 for (ei
= 0; ei
< tlb
->way_size
[wi
]; ++ei
) {
329 entry
[wi
][ei
].asid
= 0;
330 entry
[wi
][ei
].variable
= true;
335 static void reset_tlb_mmu_ways56(CPUXtensaState
*env
,
336 const xtensa_tlb
*tlb
,
337 xtensa_tlb_entry entry
[][MAX_TLB_WAY_SIZE
])
339 if (!tlb
->varway56
) {
340 static const xtensa_tlb_entry way5
[] = {
355 static const xtensa_tlb_entry way6
[] = {
370 memcpy(entry
[5], way5
, sizeof(way5
));
371 memcpy(entry
[6], way6
, sizeof(way6
));
374 for (ei
= 0; ei
< 8; ++ei
) {
375 entry
[6][ei
].vaddr
= ei
<< 29;
376 entry
[6][ei
].paddr
= ei
<< 29;
377 entry
[6][ei
].asid
= 1;
378 entry
[6][ei
].attr
= 3;
383 static void reset_tlb_region_way0(CPUXtensaState
*env
,
384 xtensa_tlb_entry entry
[][MAX_TLB_WAY_SIZE
])
388 for (ei
= 0; ei
< 8; ++ei
) {
389 entry
[0][ei
].vaddr
= ei
<< 29;
390 entry
[0][ei
].paddr
= ei
<< 29;
391 entry
[0][ei
].asid
= 1;
392 entry
[0][ei
].attr
= 2;
393 entry
[0][ei
].variable
= true;
397 void reset_mmu(CPUXtensaState
*env
)
399 if (xtensa_option_enabled(env
->config
, XTENSA_OPTION_MMU
)) {
400 env
->sregs
[RASID
] = 0x04030201;
401 env
->sregs
[ITLBCFG
] = 0;
402 env
->sregs
[DTLBCFG
] = 0;
403 env
->autorefill_idx
= 0;
404 reset_tlb_mmu_all_ways(env
, &env
->config
->itlb
, env
->itlb
);
405 reset_tlb_mmu_all_ways(env
, &env
->config
->dtlb
, env
->dtlb
);
406 reset_tlb_mmu_ways56(env
, &env
->config
->itlb
, env
->itlb
);
407 reset_tlb_mmu_ways56(env
, &env
->config
->dtlb
, env
->dtlb
);
408 } else if (xtensa_option_enabled(env
->config
, XTENSA_OPTION_MPU
)) {
411 env
->sregs
[MPUENB
] = 0;
412 env
->sregs
[MPUCFG
] = env
->config
->n_mpu_fg_segments
;
413 env
->sregs
[CACHEADRDIS
] = 0;
414 assert(env
->config
->n_mpu_bg_segments
> 0 &&
415 env
->config
->mpu_bg
[0].vaddr
== 0);
416 for (i
= 1; i
< env
->config
->n_mpu_bg_segments
; ++i
) {
417 assert(env
->config
->mpu_bg
[i
].vaddr
>=
418 env
->config
->mpu_bg
[i
- 1].vaddr
);
421 env
->sregs
[CACHEATTR
] = 0x22222222;
422 reset_tlb_region_way0(env
, env
->itlb
);
423 reset_tlb_region_way0(env
, env
->dtlb
);
427 static unsigned get_ring(const CPUXtensaState
*env
, uint8_t asid
)
430 for (i
= 0; i
< 4; ++i
) {
431 if (((env
->sregs
[RASID
] >> i
* 8) & 0xff) == asid
) {
439 * Lookup xtensa TLB for the given virtual address.
442 * \param pwi: [out] way index
443 * \param pei: [out] entry index
444 * \param pring: [out] access ring
445 * \return 0 if ok, exception cause code otherwise
447 static int xtensa_tlb_lookup(const CPUXtensaState
*env
,
448 uint32_t addr
, bool dtlb
,
449 uint32_t *pwi
, uint32_t *pei
, uint8_t *pring
)
451 const xtensa_tlb
*tlb
= dtlb
?
452 &env
->config
->dtlb
: &env
->config
->itlb
;
453 const xtensa_tlb_entry (*entry
)[MAX_TLB_WAY_SIZE
] = dtlb
?
454 env
->dtlb
: env
->itlb
;
459 for (wi
= 0; wi
< tlb
->nways
; ++wi
) {
462 split_tlb_entry_spec_way(env
, addr
, dtlb
, &vpn
, wi
, &ei
);
463 if (entry
[wi
][ei
].vaddr
== vpn
&& entry
[wi
][ei
].asid
) {
464 unsigned ring
= get_ring(env
, entry
[wi
][ei
].asid
);
468 LOAD_STORE_TLB_MULTI_HIT_CAUSE
:
469 INST_TLB_MULTI_HIT_CAUSE
;
478 (dtlb
? LOAD_STORE_TLB_MISS_CAUSE
: INST_TLB_MISS_CAUSE
);
481 uint32_t HELPER(rtlb0
)(CPUXtensaState
*env
, uint32_t v
, uint32_t dtlb
)
483 if (xtensa_option_enabled(env
->config
, XTENSA_OPTION_MMU
)) {
485 const xtensa_tlb_entry
*entry
= get_tlb_entry(env
, v
, dtlb
, &wi
);
486 return (entry
->vaddr
& get_vpn_mask(env
, dtlb
, wi
)) | entry
->asid
;
488 return v
& REGION_PAGE_MASK
;
492 uint32_t HELPER(rtlb1
)(CPUXtensaState
*env
, uint32_t v
, uint32_t dtlb
)
494 const xtensa_tlb_entry
*entry
= get_tlb_entry(env
, v
, dtlb
, NULL
);
495 return entry
->paddr
| entry
->attr
;
498 void HELPER(itlb
)(CPUXtensaState
*env
, uint32_t v
, uint32_t dtlb
)
500 if (xtensa_option_enabled(env
->config
, XTENSA_OPTION_MMU
)) {
502 xtensa_tlb_entry
*entry
= get_tlb_entry(env
, v
, dtlb
, &wi
);
503 if (entry
->variable
&& entry
->asid
) {
504 tlb_flush_page(env_cpu(env
), entry
->vaddr
);
510 uint32_t HELPER(ptlb
)(CPUXtensaState
*env
, uint32_t v
, uint32_t dtlb
)
512 if (xtensa_option_enabled(env
->config
, XTENSA_OPTION_MMU
)) {
516 int res
= xtensa_tlb_lookup(env
, v
, dtlb
, &wi
, &ei
, &ring
);
520 if (ring
>= xtensa_get_ring(env
)) {
521 return (v
& 0xfffff000) | wi
| (dtlb
? 0x10 : 0x8);
525 case INST_TLB_MULTI_HIT_CAUSE
:
526 case LOAD_STORE_TLB_MULTI_HIT_CAUSE
:
527 HELPER(exception_cause_vaddr
)(env
, env
->pc
, res
, v
);
532 return (v
& REGION_PAGE_MASK
) | 0x1;
536 void HELPER(wtlb
)(CPUXtensaState
*env
, uint32_t p
, uint32_t v
, uint32_t dtlb
)
541 split_tlb_entry_spec(env
, v
, dtlb
, &vpn
, &wi
, &ei
);
542 xtensa_tlb_set_entry(env
, dtlb
, wi
, ei
, vpn
, p
);
546 * Convert MMU ATTR to PAGE_{READ,WRITE,EXEC} mask.
549 static unsigned mmu_attr_to_access(uint32_t attr
)
559 access
|= PAGE_WRITE
;
562 switch (attr
& 0xc) {
564 access
|= PAGE_CACHE_BYPASS
;
568 access
|= PAGE_CACHE_WB
;
572 access
|= PAGE_CACHE_WT
;
575 } else if (attr
== 13) {
576 access
|= PAGE_READ
| PAGE_WRITE
| PAGE_CACHE_ISOLATE
;
582 * Convert region protection ATTR to PAGE_{READ,WRITE,EXEC} mask.
585 static unsigned region_attr_to_access(uint32_t attr
)
587 static const unsigned access
[16] = {
588 [0] = PAGE_READ
| PAGE_WRITE
| PAGE_CACHE_WT
,
589 [1] = PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
| PAGE_CACHE_WT
,
590 [2] = PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
| PAGE_CACHE_BYPASS
,
591 [3] = PAGE_EXEC
| PAGE_CACHE_WB
,
592 [4] = PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
| PAGE_CACHE_WB
,
593 [5] = PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
| PAGE_CACHE_WB
,
594 [14] = PAGE_READ
| PAGE_WRITE
| PAGE_CACHE_ISOLATE
,
597 return access
[attr
& 0xf];
601 * Convert cacheattr to PAGE_{READ,WRITE,EXEC} mask.
602 * See ISA, A.2.14 The Cache Attribute Register
604 static unsigned cacheattr_attr_to_access(uint32_t attr
)
606 static const unsigned access
[16] = {
607 [0] = PAGE_READ
| PAGE_WRITE
| PAGE_CACHE_WT
,
608 [1] = PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
| PAGE_CACHE_WT
,
609 [2] = PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
| PAGE_CACHE_BYPASS
,
610 [3] = PAGE_EXEC
| PAGE_CACHE_WB
,
611 [4] = PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
| PAGE_CACHE_WB
,
612 [14] = PAGE_READ
| PAGE_WRITE
| PAGE_CACHE_ISOLATE
,
615 return access
[attr
& 0xf];
618 struct attr_pattern
{
623 static int attr_pattern_match(uint32_t attr
,
624 const struct attr_pattern
*pattern
,
629 for (i
= 0; i
< n
; ++i
) {
630 if ((attr
& pattern
[i
].mask
) == pattern
[i
].value
) {
637 static unsigned mpu_attr_to_cpu_cache(uint32_t attr
)
639 static const struct attr_pattern cpu_c
[] = {
640 { .mask
= 0x18f, .value
= 0x089 },
641 { .mask
= 0x188, .value
= 0x080 },
642 { .mask
= 0x180, .value
= 0x180 },
647 if (attr_pattern_match(attr
, cpu_c
, ARRAY_SIZE(cpu_c
))) {
648 type
|= XTENSA_MPU_TYPE_CPU_CACHE
;
650 type
|= XTENSA_MPU_TYPE_CPU_C
;
653 type
|= XTENSA_MPU_TYPE_CPU_W
;
656 type
|= XTENSA_MPU_TYPE_CPU_R
;
662 static unsigned mpu_attr_to_type(uint32_t attr
)
664 static const struct attr_pattern device_type
[] = {
665 { .mask
= 0x1f6, .value
= 0x000 },
666 { .mask
= 0x1f6, .value
= 0x006 },
668 static const struct attr_pattern sys_nc_type
[] = {
669 { .mask
= 0x1fe, .value
= 0x018 },
670 { .mask
= 0x1fe, .value
= 0x01e },
671 { .mask
= 0x18f, .value
= 0x089 },
673 static const struct attr_pattern sys_c_type
[] = {
674 { .mask
= 0x1f8, .value
= 0x010 },
675 { .mask
= 0x188, .value
= 0x080 },
676 { .mask
= 0x1f0, .value
= 0x030 },
677 { .mask
= 0x180, .value
= 0x180 },
679 static const struct attr_pattern b
[] = {
680 { .mask
= 0x1f7, .value
= 0x001 },
681 { .mask
= 0x1f7, .value
= 0x007 },
682 { .mask
= 0x1ff, .value
= 0x019 },
683 { .mask
= 0x1ff, .value
= 0x01f },
688 attr
= (attr
& XTENSA_MPU_MEM_TYPE_MASK
) >> XTENSA_MPU_MEM_TYPE_SHIFT
;
689 if (attr_pattern_match(attr
, device_type
, ARRAY_SIZE(device_type
))) {
690 type
|= XTENSA_MPU_SYSTEM_TYPE_DEVICE
;
692 type
|= XTENSA_MPU_TYPE_INT
;
695 if (attr_pattern_match(attr
, sys_nc_type
, ARRAY_SIZE(sys_nc_type
))) {
696 type
|= XTENSA_MPU_SYSTEM_TYPE_NC
;
698 if (attr_pattern_match(attr
, sys_c_type
, ARRAY_SIZE(sys_c_type
))) {
699 type
|= XTENSA_MPU_SYSTEM_TYPE_C
;
701 type
|= XTENSA_MPU_TYPE_SYS_C
;
704 type
|= XTENSA_MPU_TYPE_SYS_W
;
707 type
|= XTENSA_MPU_TYPE_SYS_R
;
710 if (attr_pattern_match(attr
, b
, ARRAY_SIZE(b
))) {
711 type
|= XTENSA_MPU_TYPE_B
;
713 type
|= mpu_attr_to_cpu_cache(attr
);
718 static unsigned mpu_attr_to_access(uint32_t attr
, unsigned ring
)
720 static const unsigned access
[2][16] = {
723 [5] = PAGE_READ
| PAGE_EXEC
,
724 [6] = PAGE_READ
| PAGE_WRITE
,
725 [7] = PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
,
727 [9] = PAGE_READ
| PAGE_WRITE
,
728 [10] = PAGE_READ
| PAGE_WRITE
,
729 [11] = PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
,
731 [13] = PAGE_READ
| PAGE_EXEC
,
732 [14] = PAGE_READ
| PAGE_WRITE
,
733 [15] = PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
,
737 [9] = PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
,
739 [11] = PAGE_READ
| PAGE_EXEC
,
741 [13] = PAGE_READ
| PAGE_EXEC
,
742 [14] = PAGE_READ
| PAGE_WRITE
,
743 [15] = PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
,
749 type
= mpu_attr_to_cpu_cache(attr
);
750 rv
= access
[ring
!= 0][(attr
& XTENSA_MPU_ACC_RIGHTS_MASK
) >>
751 XTENSA_MPU_ACC_RIGHTS_SHIFT
];
753 if (type
& XTENSA_MPU_TYPE_CPU_CACHE
) {
754 rv
|= (type
& XTENSA_MPU_TYPE_CPU_C
) ? PAGE_CACHE_WB
: PAGE_CACHE_WT
;
756 rv
|= PAGE_CACHE_BYPASS
;
761 static bool is_access_granted(unsigned access
, int is_write
)
765 return access
& PAGE_READ
;
768 return access
& PAGE_WRITE
;
771 return access
& PAGE_EXEC
;
778 static bool get_pte(CPUXtensaState
*env
, uint32_t vaddr
, uint32_t *pte
);
780 static int get_physical_addr_mmu(CPUXtensaState
*env
, bool update_tlb
,
781 uint32_t vaddr
, int is_write
, int mmu_idx
,
782 uint32_t *paddr
, uint32_t *page_size
,
783 unsigned *access
, bool may_lookup_pt
)
785 bool dtlb
= is_write
!= 2;
791 const xtensa_tlb_entry
*entry
= NULL
;
792 xtensa_tlb_entry tmp_entry
;
793 int ret
= xtensa_tlb_lookup(env
, vaddr
, dtlb
, &wi
, &ei
, &ring
);
795 if ((ret
== INST_TLB_MISS_CAUSE
|| ret
== LOAD_STORE_TLB_MISS_CAUSE
) &&
796 may_lookup_pt
&& get_pte(env
, vaddr
, &pte
)) {
797 ring
= (pte
>> 4) & 0x3;
799 split_tlb_entry_spec_way(env
, vaddr
, dtlb
, &vpn
, wi
, &ei
);
802 wi
= ++env
->autorefill_idx
& 0x3;
803 xtensa_tlb_set_entry(env
, dtlb
, wi
, ei
, vpn
, pte
);
804 env
->sregs
[EXCVADDR
] = vaddr
;
805 qemu_log_mask(CPU_LOG_MMU
, "%s: autorefill(%08x): %08x -> %08x\n",
806 __func__
, vaddr
, vpn
, pte
);
808 xtensa_tlb_set_entry_mmu(env
, &tmp_entry
, dtlb
, wi
, ei
, vpn
, pte
);
818 entry
= xtensa_tlb_get_entry(env
, dtlb
, wi
, ei
);
821 if (ring
< mmu_idx
) {
823 LOAD_STORE_PRIVILEGE_CAUSE
:
824 INST_FETCH_PRIVILEGE_CAUSE
;
827 *access
= mmu_attr_to_access(entry
->attr
) &
828 ~(dtlb
? PAGE_EXEC
: PAGE_READ
| PAGE_WRITE
);
829 if (!is_access_granted(*access
, is_write
)) {
832 STORE_PROHIBITED_CAUSE
:
833 LOAD_PROHIBITED_CAUSE
) :
834 INST_FETCH_PROHIBITED_CAUSE
;
837 *paddr
= entry
->paddr
| (vaddr
& ~xtensa_tlb_get_addr_mask(env
, dtlb
, wi
));
838 *page_size
= ~xtensa_tlb_get_addr_mask(env
, dtlb
, wi
) + 1;
843 static bool get_pte(CPUXtensaState
*env
, uint32_t vaddr
, uint32_t *pte
)
845 CPUState
*cs
= env_cpu(env
);
850 (env
->sregs
[PTEVADDR
] | (vaddr
>> 10)) & 0xfffffffc;
851 int ret
= get_physical_addr_mmu(env
, false, pt_vaddr
, 0, 0,
852 &paddr
, &page_size
, &access
, false);
855 qemu_log_mask(CPU_LOG_MMU
,
856 "%s: autorefill(%08x): PTE va = %08x, pa = %08x\n",
857 __func__
, vaddr
, pt_vaddr
, paddr
);
859 qemu_log_mask(CPU_LOG_MMU
,
860 "%s: autorefill(%08x): PTE va = %08x, failed (%d)\n",
861 __func__
, vaddr
, pt_vaddr
, ret
);
867 *pte
= address_space_ldl(cs
->as
, paddr
, MEMTXATTRS_UNSPECIFIED
,
869 if (result
!= MEMTX_OK
) {
870 qemu_log_mask(CPU_LOG_MMU
,
871 "%s: couldn't load PTE: transaction failed (%u)\n",
872 __func__
, (unsigned)result
);
879 static int get_physical_addr_region(CPUXtensaState
*env
,
880 uint32_t vaddr
, int is_write
, int mmu_idx
,
881 uint32_t *paddr
, uint32_t *page_size
,
884 bool dtlb
= is_write
!= 2;
886 uint32_t ei
= (vaddr
>> 29) & 0x7;
887 const xtensa_tlb_entry
*entry
=
888 xtensa_tlb_get_entry(env
, dtlb
, wi
, ei
);
890 *access
= region_attr_to_access(entry
->attr
);
891 if (!is_access_granted(*access
, is_write
)) {
894 STORE_PROHIBITED_CAUSE
:
895 LOAD_PROHIBITED_CAUSE
) :
896 INST_FETCH_PROHIBITED_CAUSE
;
899 *paddr
= entry
->paddr
| (vaddr
& ~REGION_PAGE_MASK
);
900 *page_size
= ~REGION_PAGE_MASK
+ 1;
905 static int xtensa_mpu_lookup(const xtensa_mpu_entry
*entry
, unsigned n
,
906 uint32_t vaddr
, unsigned *segment
)
911 for (i
= 0; i
< n
; ++i
) {
912 if (vaddr
>= entry
[i
].vaddr
&&
913 (i
== n
- 1 || vaddr
< entry
[i
+ 1].vaddr
)) {
923 void HELPER(wsr_mpuenb
)(CPUXtensaState
*env
, uint32_t v
)
925 v
&= (2u << (env
->config
->n_mpu_fg_segments
- 1)) - 1;
927 if (v
!= env
->sregs
[MPUENB
]) {
928 env
->sregs
[MPUENB
] = v
;
929 tlb_flush(env_cpu(env
));
933 void HELPER(wptlb
)(CPUXtensaState
*env
, uint32_t p
, uint32_t v
)
935 unsigned segment
= p
& XTENSA_MPU_SEGMENT_MASK
;
937 if (segment
< env
->config
->n_mpu_fg_segments
) {
938 env
->mpu_fg
[segment
].vaddr
= v
& -env
->config
->mpu_align
;
939 env
->mpu_fg
[segment
].attr
= p
& XTENSA_MPU_ATTR_MASK
;
940 env
->sregs
[MPUENB
] = deposit32(env
->sregs
[MPUENB
], segment
, 1, v
);
941 tlb_flush(env_cpu(env
));
945 uint32_t HELPER(rptlb0
)(CPUXtensaState
*env
, uint32_t s
)
947 unsigned segment
= s
& XTENSA_MPU_SEGMENT_MASK
;
949 if (segment
< env
->config
->n_mpu_fg_segments
) {
950 return env
->mpu_fg
[segment
].vaddr
|
951 extract32(env
->sregs
[MPUENB
], segment
, 1);
957 uint32_t HELPER(rptlb1
)(CPUXtensaState
*env
, uint32_t s
)
959 unsigned segment
= s
& XTENSA_MPU_SEGMENT_MASK
;
961 if (segment
< env
->config
->n_mpu_fg_segments
) {
962 return env
->mpu_fg
[segment
].attr
;
968 uint32_t HELPER(pptlb
)(CPUXtensaState
*env
, uint32_t v
)
971 unsigned segment
= XTENSA_MPU_PROBE_B
;
974 nhits
= xtensa_mpu_lookup(env
->mpu_fg
, env
->config
->n_mpu_fg_segments
,
977 HELPER(exception_cause_vaddr
)(env
, env
->pc
,
978 LOAD_STORE_TLB_MULTI_HIT_CAUSE
, v
);
979 } else if (nhits
== 1 && (env
->sregs
[MPUENB
] & (1u << segment
))) {
980 return env
->mpu_fg
[segment
].attr
| segment
| XTENSA_MPU_PROBE_V
;
982 xtensa_mpu_lookup(env
->config
->mpu_bg
,
983 env
->config
->n_mpu_bg_segments
,
985 return env
->config
->mpu_bg
[bg_segment
].attr
| segment
;
989 static int get_physical_addr_mpu(CPUXtensaState
*env
,
990 uint32_t vaddr
, int is_write
, int mmu_idx
,
991 uint32_t *paddr
, uint32_t *page_size
,
998 nhits
= xtensa_mpu_lookup(env
->mpu_fg
, env
->config
->n_mpu_fg_segments
,
1001 return is_write
< 2 ?
1002 LOAD_STORE_TLB_MULTI_HIT_CAUSE
:
1003 INST_TLB_MULTI_HIT_CAUSE
;
1004 } else if (nhits
== 1 && (env
->sregs
[MPUENB
] & (1u << segment
))) {
1005 attr
= env
->mpu_fg
[segment
].attr
;
1007 xtensa_mpu_lookup(env
->config
->mpu_bg
,
1008 env
->config
->n_mpu_bg_segments
,
1010 attr
= env
->config
->mpu_bg
[segment
].attr
;
1013 *access
= mpu_attr_to_access(attr
, mmu_idx
);
1014 if (!is_access_granted(*access
, is_write
)) {
1015 return is_write
< 2 ?
1017 STORE_PROHIBITED_CAUSE
:
1018 LOAD_PROHIBITED_CAUSE
) :
1019 INST_FETCH_PROHIBITED_CAUSE
;
1022 *page_size
= env
->config
->mpu_align
;
1027 * Convert virtual address to physical addr.
1028 * MMU may issue pagewalk and change xtensa autorefill TLB way entry.
1030 * \return 0 if ok, exception cause code otherwise
1032 int xtensa_get_physical_addr(CPUXtensaState
*env
, bool update_tlb
,
1033 uint32_t vaddr
, int is_write
, int mmu_idx
,
1034 uint32_t *paddr
, uint32_t *page_size
,
1037 if (xtensa_option_enabled(env
->config
, XTENSA_OPTION_MMU
)) {
1038 return get_physical_addr_mmu(env
, update_tlb
,
1039 vaddr
, is_write
, mmu_idx
, paddr
,
1040 page_size
, access
, true);
1041 } else if (xtensa_option_bits_enabled(env
->config
,
1042 XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_PROTECTION
) |
1043 XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_TRANSLATION
))) {
1044 return get_physical_addr_region(env
, vaddr
, is_write
, mmu_idx
,
1045 paddr
, page_size
, access
);
1046 } else if (xtensa_option_enabled(env
->config
, XTENSA_OPTION_MPU
)) {
1047 return get_physical_addr_mpu(env
, vaddr
, is_write
, mmu_idx
,
1048 paddr
, page_size
, access
);
1051 *page_size
= TARGET_PAGE_SIZE
;
1052 *access
= cacheattr_attr_to_access(env
->sregs
[CACHEATTR
] >>
1053 ((vaddr
& 0xe0000000) >> 27));
1058 static void dump_tlb(CPUXtensaState
*env
, bool dtlb
)
1061 const xtensa_tlb
*conf
=
1062 dtlb
? &env
->config
->dtlb
: &env
->config
->itlb
;
1063 unsigned (*attr_to_access
)(uint32_t) =
1064 xtensa_option_enabled(env
->config
, XTENSA_OPTION_MMU
) ?
1065 mmu_attr_to_access
: region_attr_to_access
;
1067 for (wi
= 0; wi
< conf
->nways
; ++wi
) {
1068 uint32_t sz
= ~xtensa_tlb_get_addr_mask(env
, dtlb
, wi
) + 1;
1069 const char *sz_text
;
1070 bool print_header
= true;
1072 if (sz
>= 0x100000) {
1080 for (ei
= 0; ei
< conf
->way_size
[wi
]; ++ei
) {
1081 const xtensa_tlb_entry
*entry
=
1082 xtensa_tlb_get_entry(env
, dtlb
, wi
, ei
);
1085 static const char * const cache_text
[8] = {
1086 [PAGE_CACHE_BYPASS
>> PAGE_CACHE_SHIFT
] = "Bypass",
1087 [PAGE_CACHE_WT
>> PAGE_CACHE_SHIFT
] = "WT",
1088 [PAGE_CACHE_WB
>> PAGE_CACHE_SHIFT
] = "WB",
1089 [PAGE_CACHE_ISOLATE
>> PAGE_CACHE_SHIFT
] = "Isolate",
1091 unsigned access
= attr_to_access(entry
->attr
);
1092 unsigned cache_idx
= (access
& PAGE_CACHE_MASK
) >>
1096 print_header
= false;
1097 qemu_printf("Way %u (%d %s)\n", wi
, sz
, sz_text
);
1098 qemu_printf("\tVaddr Paddr ASID Attr RWX Cache\n"
1099 "\t---------- ---------- ---- ---- --- -------\n");
1101 qemu_printf("\t0x%08x 0x%08x 0x%02x 0x%02x %c%c%c %s\n",
1106 (access
& PAGE_READ
) ? 'R' : '-',
1107 (access
& PAGE_WRITE
) ? 'W' : '-',
1108 (access
& PAGE_EXEC
) ? 'X' : '-',
1109 cache_text
[cache_idx
] ?
1110 cache_text
[cache_idx
] : "Invalid");
1116 static void dump_mpu(CPUXtensaState
*env
,
1117 const xtensa_mpu_entry
*entry
, unsigned n
)
1121 qemu_printf("\t%s Vaddr Attr Ring0 Ring1 System Type CPU cache\n"
1122 "\t%s ---------- ---------- ----- ----- ------------- ---------\n",
1126 for (i
= 0; i
< n
; ++i
) {
1127 uint32_t attr
= entry
[i
].attr
;
1128 unsigned access0
= mpu_attr_to_access(attr
, 0);
1129 unsigned access1
= mpu_attr_to_access(attr
, 1);
1130 unsigned type
= mpu_attr_to_type(attr
);
1131 char cpu_cache
= (type
& XTENSA_MPU_TYPE_CPU_CACHE
) ? '-' : ' ';
1133 qemu_printf("\t %c 0x%08x 0x%08x %c%c%c %c%c%c ",
1135 ((env
->sregs
[MPUENB
] & (1u << i
)) ? '+' : '-') : ' ',
1136 entry
[i
].vaddr
, attr
,
1137 (access0
& PAGE_READ
) ? 'R' : '-',
1138 (access0
& PAGE_WRITE
) ? 'W' : '-',
1139 (access0
& PAGE_EXEC
) ? 'X' : '-',
1140 (access1
& PAGE_READ
) ? 'R' : '-',
1141 (access1
& PAGE_WRITE
) ? 'W' : '-',
1142 (access1
& PAGE_EXEC
) ? 'X' : '-');
1144 switch (type
& XTENSA_MPU_SYSTEM_TYPE_MASK
) {
1145 case XTENSA_MPU_SYSTEM_TYPE_DEVICE
:
1146 qemu_printf("Device %cB %3s\n",
1147 (type
& XTENSA_MPU_TYPE_B
) ? ' ' : 'n',
1148 (type
& XTENSA_MPU_TYPE_INT
) ? "int" : "");
1150 case XTENSA_MPU_SYSTEM_TYPE_NC
:
1151 qemu_printf("Sys NC %cB %c%c%c\n",
1152 (type
& XTENSA_MPU_TYPE_B
) ? ' ' : 'n',
1153 (type
& XTENSA_MPU_TYPE_CPU_R
) ? 'r' : cpu_cache
,
1154 (type
& XTENSA_MPU_TYPE_CPU_W
) ? 'w' : cpu_cache
,
1155 (type
& XTENSA_MPU_TYPE_CPU_C
) ? 'c' : cpu_cache
);
1157 case XTENSA_MPU_SYSTEM_TYPE_C
:
1158 qemu_printf("Sys C %c%c%c %c%c%c\n",
1159 (type
& XTENSA_MPU_TYPE_SYS_R
) ? 'R' : '-',
1160 (type
& XTENSA_MPU_TYPE_SYS_W
) ? 'W' : '-',
1161 (type
& XTENSA_MPU_TYPE_SYS_C
) ? 'C' : '-',
1162 (type
& XTENSA_MPU_TYPE_CPU_R
) ? 'r' : cpu_cache
,
1163 (type
& XTENSA_MPU_TYPE_CPU_W
) ? 'w' : cpu_cache
,
1164 (type
& XTENSA_MPU_TYPE_CPU_C
) ? 'c' : cpu_cache
);
1167 qemu_printf("Unknown\n");
1173 void dump_mmu(CPUXtensaState
*env
)
1175 if (xtensa_option_bits_enabled(env
->config
,
1176 XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_PROTECTION
) |
1177 XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_TRANSLATION
) |
1178 XTENSA_OPTION_BIT(XTENSA_OPTION_MMU
))) {
1180 qemu_printf("ITLB:\n");
1181 dump_tlb(env
, false);
1182 qemu_printf("\nDTLB:\n");
1183 dump_tlb(env
, true);
1184 } else if (xtensa_option_enabled(env
->config
, XTENSA_OPTION_MPU
)) {
1185 qemu_printf("Foreground map:\n");
1186 dump_mpu(env
, env
->mpu_fg
, env
->config
->n_mpu_fg_segments
);
1187 qemu_printf("\nBackground map:\n");
1188 dump_mpu(NULL
, env
->config
->mpu_bg
, env
->config
->n_mpu_bg_segments
);
1190 qemu_printf("No TLB for this CPU core\n");