2 * QEMU RISC-V PMP (Physical Memory Protection)
4 * Author: Daire McNamara, daire.mcnamara@emdalo.com
5 * Ivan Griffin, ivan.griffin@emdalo.com
7 * This provides a RISC-V Physical Memory Protection implementation
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms and conditions of the GNU General Public License,
11 * version 2 or later, as published by the Free Software Foundation.
13 * This program is distributed in the hope it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * You should have received a copy of the GNU General Public License along with
19 * this program. If not, see <http://www.gnu.org/licenses/>.
22 #include "qemu/osdep.h"
24 #include "qapi/error.h"
27 #include "exec/exec-all.h"
29 static bool pmp_write_cfg(CPURISCVState
*env
, uint32_t addr_index
,
31 static uint8_t pmp_read_cfg(CPURISCVState
*env
, uint32_t addr_index
);
34 * Accessor method to extract address matching type 'a field' from cfg reg
36 static inline uint8_t pmp_get_a_field(uint8_t cfg
)
43 * Check whether a PMP is locked or not.
45 static inline int pmp_is_locked(CPURISCVState
*env
, uint32_t pmp_index
)
47 /* mseccfg.RLB is set */
48 if (MSECCFG_RLB_ISSET(env
)) {
52 if (env
->pmp_state
.pmp
[pmp_index
].cfg_reg
& PMP_LOCK
) {
56 /* Top PMP has no 'next' to check */
57 if ((pmp_index
+ 1u) >= MAX_RISCV_PMPS
) {
65 * Count the number of active rules.
67 uint32_t pmp_get_num_rules(CPURISCVState
*env
)
69 return env
->pmp_state
.num_rules
;
73 * Accessor to get the cfg reg for a specific PMP/HART
75 static inline uint8_t pmp_read_cfg(CPURISCVState
*env
, uint32_t pmp_index
)
77 if (pmp_index
< MAX_RISCV_PMPS
) {
78 return env
->pmp_state
.pmp
[pmp_index
].cfg_reg
;
86 * Accessor to set the cfg reg for a specific PMP/HART
87 * Bounds checks and relevant lock bit.
89 static bool pmp_write_cfg(CPURISCVState
*env
, uint32_t pmp_index
, uint8_t val
)
91 if (pmp_index
< MAX_RISCV_PMPS
) {
94 if (riscv_cpu_cfg(env
)->ext_smepmp
) {
95 /* mseccfg.RLB is set */
96 if (MSECCFG_RLB_ISSET(env
)) {
100 /* mseccfg.MML is not set */
101 if (!MSECCFG_MML_ISSET(env
) && !pmp_is_locked(env
, pmp_index
)) {
105 /* mseccfg.MML is set */
106 if (MSECCFG_MML_ISSET(env
)) {
107 /* not adding execute bit */
108 if ((val
& PMP_LOCK
) != 0 && (val
& PMP_EXEC
) != PMP_EXEC
) {
111 /* shared region and not adding X bit */
112 if ((val
& PMP_LOCK
) != PMP_LOCK
&&
113 (val
& 0x7) != (PMP_WRITE
| PMP_EXEC
)) {
118 if (!pmp_is_locked(env
, pmp_index
)) {
124 qemu_log_mask(LOG_GUEST_ERROR
, "ignoring pmpcfg write - locked\n");
125 } else if (env
->pmp_state
.pmp
[pmp_index
].cfg_reg
!= val
) {
126 /* If !mseccfg.MML then ignore writes with encoding RW=01 */
127 if ((val
& PMP_WRITE
) && !(val
& PMP_READ
) &&
128 !MSECCFG_MML_ISSET(env
)) {
131 env
->pmp_state
.pmp
[pmp_index
].cfg_reg
= val
;
132 pmp_update_rule_addr(env
, pmp_index
);
136 qemu_log_mask(LOG_GUEST_ERROR
,
137 "ignoring pmpcfg write - out of bounds\n");
143 void pmp_unlock_entries(CPURISCVState
*env
)
145 uint32_t pmp_num
= pmp_get_num_rules(env
);
148 for (i
= 0; i
< pmp_num
; i
++) {
149 env
->pmp_state
.pmp
[i
].cfg_reg
&= ~(PMP_LOCK
| PMP_AMATCH
);
153 static void pmp_decode_napot(hwaddr a
, hwaddr
*sa
, hwaddr
*ea
)
156 * aaaa...aaa0 8-byte NAPOT range
157 * aaaa...aa01 16-byte NAPOT range
158 * aaaa...a011 32-byte NAPOT range
160 * aa01...1111 2^XLEN-byte NAPOT range
161 * a011...1111 2^(XLEN+1)-byte NAPOT range
162 * 0111...1111 2^(XLEN+2)-byte NAPOT range
163 * 1111...1111 Reserved
170 void pmp_update_rule_addr(CPURISCVState
*env
, uint32_t pmp_index
)
172 uint8_t this_cfg
= env
->pmp_state
.pmp
[pmp_index
].cfg_reg
;
173 target_ulong this_addr
= env
->pmp_state
.pmp
[pmp_index
].addr_reg
;
174 target_ulong prev_addr
= 0u;
178 if (pmp_index
>= 1u) {
179 prev_addr
= env
->pmp_state
.pmp
[pmp_index
- 1].addr_reg
;
182 switch (pmp_get_a_field(this_cfg
)) {
189 sa
= prev_addr
<< 2; /* shift up from [xx:0] to [xx+2:2] */
190 ea
= (this_addr
<< 2) - 1u;
197 sa
= this_addr
<< 2; /* shift up from [xx:0] to [xx+2:2] */
201 case PMP_AMATCH_NAPOT
:
202 pmp_decode_napot(this_addr
, &sa
, &ea
);
211 env
->pmp_state
.addr
[pmp_index
].sa
= sa
;
212 env
->pmp_state
.addr
[pmp_index
].ea
= ea
;
215 void pmp_update_rule_nums(CPURISCVState
*env
)
219 env
->pmp_state
.num_rules
= 0;
220 for (i
= 0; i
< MAX_RISCV_PMPS
; i
++) {
221 const uint8_t a_field
=
222 pmp_get_a_field(env
->pmp_state
.pmp
[i
].cfg_reg
);
223 if (PMP_AMATCH_OFF
!= a_field
) {
224 env
->pmp_state
.num_rules
++;
229 static int pmp_is_in_range(CPURISCVState
*env
, int pmp_index
, hwaddr addr
)
233 if ((addr
>= env
->pmp_state
.addr
[pmp_index
].sa
) &&
234 (addr
<= env
->pmp_state
.addr
[pmp_index
].ea
)) {
244 * Check if the address has required RWX privs when no PMP entry is matched.
246 static bool pmp_hart_has_privs_default(CPURISCVState
*env
, pmp_priv_t privs
,
247 pmp_priv_t
*allowed_privs
,
252 if (MSECCFG_MMWP_ISSET(env
)) {
254 * The Machine Mode Whitelist Policy (mseccfg.MMWP) is set
255 * so we default to deny all, even for M-mode.
259 } else if (MSECCFG_MML_ISSET(env
)) {
261 * The Machine Mode Lockdown (mseccfg.MML) bit is set
262 * so we can only execute code in M-mode with an applicable
263 * rule. Other modes are disabled.
265 if (mode
== PRV_M
&& !(privs
& PMP_EXEC
)) {
267 *allowed_privs
= PMP_READ
| PMP_WRITE
;
276 if (!riscv_cpu_cfg(env
)->pmp
|| (mode
== PRV_M
)) {
278 * Privileged spec v1.10 states if HW doesn't implement any PMP entry
279 * or no PMP entry matches an M-Mode access, the access succeeds.
282 *allowed_privs
= PMP_READ
| PMP_WRITE
| PMP_EXEC
;
285 * Other modes are not allowed to succeed if they don't * match a rule,
286 * but there are rules. We've checked for no rule earlier in this
302 * Check if the address has required RWX privs to complete desired operation
303 * Return true if a pmp rule match or default match
304 * Return false if no match
306 bool pmp_hart_has_privs(CPURISCVState
*env
, hwaddr addr
,
307 target_ulong size
, pmp_priv_t privs
,
308 pmp_priv_t
*allowed_privs
, target_ulong mode
)
315 /* Short cut if no rules */
316 if (0 == pmp_get_num_rules(env
)) {
317 return pmp_hart_has_privs_default(env
, privs
, allowed_privs
, mode
);
321 if (riscv_cpu_cfg(env
)->mmu
) {
323 * If size is unknown (0), assume that all bytes
324 * from addr to the end of the page will be accessed.
326 pmp_size
= -(addr
| TARGET_PAGE_MASK
);
328 pmp_size
= sizeof(target_ulong
);
335 * 1.10 draft priv spec states there is an implicit order
338 for (i
= 0; i
< MAX_RISCV_PMPS
; i
++) {
339 s
= pmp_is_in_range(env
, i
, addr
);
340 e
= pmp_is_in_range(env
, i
, addr
+ pmp_size
- 1);
342 /* partially inside */
344 qemu_log_mask(LOG_GUEST_ERROR
,
345 "pmp violation - access is partially inside\n");
351 const uint8_t a_field
=
352 pmp_get_a_field(env
->pmp_state
.pmp
[i
].cfg_reg
);
355 * Convert the PMP permissions to match the truth table in the
358 const uint8_t smepmp_operation
=
359 ((env
->pmp_state
.pmp
[i
].cfg_reg
& PMP_LOCK
) >> 4) |
360 ((env
->pmp_state
.pmp
[i
].cfg_reg
& PMP_READ
) << 2) |
361 (env
->pmp_state
.pmp
[i
].cfg_reg
& PMP_WRITE
) |
362 ((env
->pmp_state
.pmp
[i
].cfg_reg
& PMP_EXEC
) >> 2);
364 if (((s
+ e
) == 2) && (PMP_AMATCH_OFF
!= a_field
)) {
366 * If the PMP entry is not off and the address is in range,
369 if (!MSECCFG_MML_ISSET(env
)) {
371 * If mseccfg.MML Bit is not set, do pmp priv check
372 * This will always apply to regular PMP.
374 *allowed_privs
= PMP_READ
| PMP_WRITE
| PMP_EXEC
;
375 if ((mode
!= PRV_M
) || pmp_is_locked(env
, i
)) {
376 *allowed_privs
&= env
->pmp_state
.pmp
[i
].cfg_reg
;
380 * If mseccfg.MML Bit set, do the enhanced pmp priv check
383 switch (smepmp_operation
) {
396 *allowed_privs
= PMP_READ
| PMP_WRITE
;
400 *allowed_privs
= PMP_EXEC
;
404 *allowed_privs
= PMP_READ
| PMP_EXEC
;
408 *allowed_privs
= PMP_READ
;
411 g_assert_not_reached();
414 switch (smepmp_operation
) {
426 *allowed_privs
= PMP_EXEC
;
431 *allowed_privs
= PMP_READ
;
435 *allowed_privs
= PMP_READ
| PMP_WRITE
;
438 *allowed_privs
= PMP_READ
| PMP_EXEC
;
441 *allowed_privs
= PMP_READ
| PMP_WRITE
| PMP_EXEC
;
444 g_assert_not_reached();
450 * If matching address range was found, the protection bits
451 * defined with PMP must be used. We shouldn't fallback on
452 * finding default privileges.
454 return (privs
& *allowed_privs
) == privs
;
458 /* No rule matched */
459 return pmp_hart_has_privs_default(env
, privs
, allowed_privs
, mode
);
463 * Handle a write to a pmpcfg CSR
465 void pmpcfg_csr_write(CPURISCVState
*env
, uint32_t reg_index
,
470 int pmpcfg_nums
= 2 << riscv_cpu_mxl(env
);
471 bool modified
= false;
473 trace_pmpcfg_csr_write(env
->mhartid
, reg_index
, val
);
475 for (i
= 0; i
< pmpcfg_nums
; i
++) {
476 cfg_val
= (val
>> 8 * i
) & 0xff;
477 modified
|= pmp_write_cfg(env
, (reg_index
* 4) + i
, cfg_val
);
480 /* If PMP permission of any addr has been changed, flush TLB pages. */
482 pmp_update_rule_nums(env
);
483 tlb_flush(env_cpu(env
));
489 * Handle a read from a pmpcfg CSR
491 target_ulong
pmpcfg_csr_read(CPURISCVState
*env
, uint32_t reg_index
)
494 target_ulong cfg_val
= 0;
495 target_ulong val
= 0;
496 int pmpcfg_nums
= 2 << riscv_cpu_mxl(env
);
498 for (i
= 0; i
< pmpcfg_nums
; i
++) {
499 val
= pmp_read_cfg(env
, (reg_index
* 4) + i
);
500 cfg_val
|= (val
<< (i
* 8));
502 trace_pmpcfg_csr_read(env
->mhartid
, reg_index
, cfg_val
);
509 * Handle a write to a pmpaddr CSR
511 void pmpaddr_csr_write(CPURISCVState
*env
, uint32_t addr_index
,
514 trace_pmpaddr_csr_write(env
->mhartid
, addr_index
, val
);
515 bool is_next_cfg_tor
= false;
517 if (addr_index
< MAX_RISCV_PMPS
) {
519 * In TOR mode, need to check the lock bit of the next pmp
520 * (if there is a next).
522 if (addr_index
+ 1 < MAX_RISCV_PMPS
) {
523 uint8_t pmp_cfg
= env
->pmp_state
.pmp
[addr_index
+ 1].cfg_reg
;
524 is_next_cfg_tor
= PMP_AMATCH_TOR
== pmp_get_a_field(pmp_cfg
);
526 if (pmp_cfg
& PMP_LOCK
&& is_next_cfg_tor
) {
527 qemu_log_mask(LOG_GUEST_ERROR
,
528 "ignoring pmpaddr write - pmpcfg + 1 locked\n");
533 if (!pmp_is_locked(env
, addr_index
)) {
534 if (env
->pmp_state
.pmp
[addr_index
].addr_reg
!= val
) {
535 env
->pmp_state
.pmp
[addr_index
].addr_reg
= val
;
536 pmp_update_rule_addr(env
, addr_index
);
537 if (is_next_cfg_tor
) {
538 pmp_update_rule_addr(env
, addr_index
+ 1);
540 tlb_flush(env_cpu(env
));
543 qemu_log_mask(LOG_GUEST_ERROR
,
544 "ignoring pmpaddr write - locked\n");
547 qemu_log_mask(LOG_GUEST_ERROR
,
548 "ignoring pmpaddr write - out of bounds\n");
554 * Handle a read from a pmpaddr CSR
556 target_ulong
pmpaddr_csr_read(CPURISCVState
*env
, uint32_t addr_index
)
558 target_ulong val
= 0;
560 if (addr_index
< MAX_RISCV_PMPS
) {
561 val
= env
->pmp_state
.pmp
[addr_index
].addr_reg
;
562 trace_pmpaddr_csr_read(env
->mhartid
, addr_index
, val
);
564 qemu_log_mask(LOG_GUEST_ERROR
,
565 "ignoring pmpaddr read - out of bounds\n");
572 * Handle a write to a mseccfg CSR
574 void mseccfg_csr_write(CPURISCVState
*env
, target_ulong val
)
578 trace_mseccfg_csr_write(env
->mhartid
, val
);
580 /* RLB cannot be enabled if it's already 0 and if any regions are locked */
581 if (!MSECCFG_RLB_ISSET(env
)) {
582 for (i
= 0; i
< MAX_RISCV_PMPS
; i
++) {
583 if (pmp_is_locked(env
, i
)) {
590 if (riscv_cpu_cfg(env
)->ext_smepmp
) {
592 val
|= (env
->mseccfg
& (MSECCFG_MMWP
| MSECCFG_MML
));
593 if ((val
^ env
->mseccfg
) & (MSECCFG_MMWP
| MSECCFG_MML
)) {
594 tlb_flush(env_cpu(env
));
597 val
&= ~(MSECCFG_MMWP
| MSECCFG_MML
| MSECCFG_RLB
);
604 * Handle a read from a mseccfg CSR
606 target_ulong
mseccfg_csr_read(CPURISCVState
*env
)
608 trace_mseccfg_csr_read(env
->mhartid
, env
->mseccfg
);
613 * Calculate the TLB size.
614 * It's possible that PMP regions only cover partial of the TLB page, and
615 * this may split the page into regions with different permissions.
616 * For example if PMP0 is (0x80000008~0x8000000F, R) and PMP1 is (0x80000000
617 * ~0x80000FFF, RWX), then region 0x80000008~0x8000000F has R permission, and
618 * the other regions in this page have RWX permissions.
619 * A write access to 0x80000000 will match PMP1. However we cannot cache the
620 * translation result in the TLB since this will make the write access to
621 * 0x80000008 bypass the check of PMP0.
622 * To avoid this we return a size of 1 (which means no caching) if the PMP
623 * region only covers partial of the TLB page.
625 target_ulong
pmp_get_tlb_size(CPURISCVState
*env
, hwaddr addr
)
629 hwaddr tlb_sa
= addr
& ~(TARGET_PAGE_SIZE
- 1);
630 hwaddr tlb_ea
= tlb_sa
+ TARGET_PAGE_SIZE
- 1;
634 * If PMP is not supported or there are no PMP rules, the TLB page will not
635 * be split into regions with different permissions by PMP so we set the
636 * size to TARGET_PAGE_SIZE.
638 if (!riscv_cpu_cfg(env
)->pmp
|| !pmp_get_num_rules(env
)) {
639 return TARGET_PAGE_SIZE
;
642 for (i
= 0; i
< MAX_RISCV_PMPS
; i
++) {
643 if (pmp_get_a_field(env
->pmp_state
.pmp
[i
].cfg_reg
) == PMP_AMATCH_OFF
) {
647 pmp_sa
= env
->pmp_state
.addr
[i
].sa
;
648 pmp_ea
= env
->pmp_state
.addr
[i
].ea
;
651 * Only the first PMP entry that covers (whole or partial of) the TLB
652 * page really matters:
653 * If it covers the whole TLB page, set the size to TARGET_PAGE_SIZE,
654 * since the following PMP entries have lower priority and will not
655 * affect the permissions of the page.
656 * If it only covers partial of the TLB page, set the size to 1 since
657 * the allowed permissions of the region may be different from other
658 * region of the page.
660 if (pmp_sa
<= tlb_sa
&& pmp_ea
>= tlb_ea
) {
661 return TARGET_PAGE_SIZE
;
662 } else if ((pmp_sa
>= tlb_sa
&& pmp_sa
<= tlb_ea
) ||
663 (pmp_ea
>= tlb_sa
&& pmp_ea
<= tlb_ea
)) {
669 * If no PMP entry matches the TLB page, the TLB page will also not be
670 * split into regions with different permissions by PMP so we set the size
671 * to TARGET_PAGE_SIZE.
673 return TARGET_PAGE_SIZE
;
677 * Convert PMP privilege to TLB page privilege.
679 int pmp_priv_to_page_prot(pmp_priv_t pmp_priv
)
683 if (pmp_priv
& PMP_READ
) {
686 if (pmp_priv
& PMP_WRITE
) {
689 if (pmp_priv
& PMP_EXEC
) {