2 * QEMU RISC-V PMP (Physical Memory Protection)
4 * Author: Daire McNamara, daire.mcnamara@emdalo.com
5 * Ivan Griffin, ivan.griffin@emdalo.com
7 * This provides a RISC-V Physical Memory Protection implementation
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms and conditions of the GNU General Public License,
11 * version 2 or later, as published by the Free Software Foundation.
13 * This program is distributed in the hope it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * You should have received a copy of the GNU General Public License along with
19 * this program. If not, see <http://www.gnu.org/licenses/>.
23 * PMP (Physical Memory Protection) is as-of-yet unused and needs testing.
26 #include "qemu/osdep.h"
28 #include "qapi/error.h"
31 #include "exec/exec-all.h"
33 static void pmp_write_cfg(CPURISCVState
*env
, uint32_t addr_index
,
35 static uint8_t pmp_read_cfg(CPURISCVState
*env
, uint32_t addr_index
);
36 static void pmp_update_rule(CPURISCVState
*env
, uint32_t pmp_index
);
39 * Accessor method to extract address matching type 'a field' from cfg reg
41 static inline uint8_t pmp_get_a_field(uint8_t cfg
)
48 * Check whether a PMP is locked or not.
50 static inline int pmp_is_locked(CPURISCVState
*env
, uint32_t pmp_index
)
53 if (env
->pmp_state
.pmp
[pmp_index
].cfg_reg
& PMP_LOCK
) {
57 /* Top PMP has no 'next' to check */
58 if ((pmp_index
+ 1u) >= MAX_RISCV_PMPS
) {
62 /* In TOR mode, need to check the lock bit of the next pmp
63 * (if there is a next)
65 const uint8_t a_field
=
66 pmp_get_a_field(env
->pmp_state
.pmp
[pmp_index
+ 1].cfg_reg
);
67 if ((env
->pmp_state
.pmp
[pmp_index
+ 1u].cfg_reg
& PMP_LOCK
) &&
68 (PMP_AMATCH_TOR
== a_field
)) {
76 * Count the number of active rules.
78 uint32_t pmp_get_num_rules(CPURISCVState
*env
)
80 return env
->pmp_state
.num_rules
;
84 * Accessor to get the cfg reg for a specific PMP/HART
86 static inline uint8_t pmp_read_cfg(CPURISCVState
*env
, uint32_t pmp_index
)
88 if (pmp_index
< MAX_RISCV_PMPS
) {
89 return env
->pmp_state
.pmp
[pmp_index
].cfg_reg
;
97 * Accessor to set the cfg reg for a specific PMP/HART
98 * Bounds checks and relevant lock bit.
100 static void pmp_write_cfg(CPURISCVState
*env
, uint32_t pmp_index
, uint8_t val
)
102 if (pmp_index
< MAX_RISCV_PMPS
) {
103 if (!pmp_is_locked(env
, pmp_index
)) {
104 env
->pmp_state
.pmp
[pmp_index
].cfg_reg
= val
;
105 pmp_update_rule(env
, pmp_index
);
107 qemu_log_mask(LOG_GUEST_ERROR
, "ignoring pmpcfg write - locked\n");
110 qemu_log_mask(LOG_GUEST_ERROR
,
111 "ignoring pmpcfg write - out of bounds\n");
115 static void pmp_decode_napot(target_ulong a
, target_ulong
*sa
, target_ulong
*ea
)
118 aaaa...aaa0 8-byte NAPOT range
119 aaaa...aa01 16-byte NAPOT range
120 aaaa...a011 32-byte NAPOT range
122 aa01...1111 2^XLEN-byte NAPOT range
123 a011...1111 2^(XLEN+1)-byte NAPOT range
124 0111...1111 2^(XLEN+2)-byte NAPOT range
132 target_ulong t1
= ctz64(~a
);
133 target_ulong base
= (a
& ~(((target_ulong
)1 << t1
) - 1)) << 2;
134 target_ulong range
= ((target_ulong
)1 << (t1
+ 3)) - 1;
140 void pmp_update_rule_addr(CPURISCVState
*env
, uint32_t pmp_index
)
142 uint8_t this_cfg
= env
->pmp_state
.pmp
[pmp_index
].cfg_reg
;
143 target_ulong this_addr
= env
->pmp_state
.pmp
[pmp_index
].addr_reg
;
144 target_ulong prev_addr
= 0u;
145 target_ulong sa
= 0u;
146 target_ulong ea
= 0u;
148 if (pmp_index
>= 1u) {
149 prev_addr
= env
->pmp_state
.pmp
[pmp_index
- 1].addr_reg
;
152 switch (pmp_get_a_field(this_cfg
)) {
159 sa
= prev_addr
<< 2; /* shift up from [xx:0] to [xx+2:2] */
160 ea
= (this_addr
<< 2) - 1u;
164 sa
= this_addr
<< 2; /* shift up from [xx:0] to [xx+2:2] */
168 case PMP_AMATCH_NAPOT
:
169 pmp_decode_napot(this_addr
, &sa
, &ea
);
178 env
->pmp_state
.addr
[pmp_index
].sa
= sa
;
179 env
->pmp_state
.addr
[pmp_index
].ea
= ea
;
182 void pmp_update_rule_nums(CPURISCVState
*env
)
186 env
->pmp_state
.num_rules
= 0;
187 for (i
= 0; i
< MAX_RISCV_PMPS
; i
++) {
188 const uint8_t a_field
=
189 pmp_get_a_field(env
->pmp_state
.pmp
[i
].cfg_reg
);
190 if (PMP_AMATCH_OFF
!= a_field
) {
191 env
->pmp_state
.num_rules
++;
196 /* Convert cfg/addr reg values here into simple 'sa' --> start address and 'ea'
197 * end address values.
198 * This function is called relatively infrequently whereas the check that
199 * an address is within a pmp rule is called often, so optimise that one
201 static void pmp_update_rule(CPURISCVState
*env
, uint32_t pmp_index
)
203 pmp_update_rule_addr(env
, pmp_index
);
204 pmp_update_rule_nums(env
);
207 static int pmp_is_in_range(CPURISCVState
*env
, int pmp_index
, target_ulong addr
)
211 if ((addr
>= env
->pmp_state
.addr
[pmp_index
].sa
)
212 && (addr
<= env
->pmp_state
.addr
[pmp_index
].ea
)) {
222 * Check if the address has required RWX privs when no PMP entry is matched.
224 static bool pmp_hart_has_privs_default(CPURISCVState
*env
, target_ulong addr
,
225 target_ulong size
, pmp_priv_t privs
, pmp_priv_t
*allowed_privs
,
230 if ((!riscv_feature(env
, RISCV_FEATURE_PMP
)) || (mode
== PRV_M
)) {
232 * Privileged spec v1.10 states if HW doesn't implement any PMP entry
233 * or no PMP entry matches an M-Mode access, the access succeeds.
236 *allowed_privs
= PMP_READ
| PMP_WRITE
| PMP_EXEC
;
239 * Other modes are not allowed to succeed if they don't * match a rule,
240 * but there are rules. We've checked for no rule earlier in this
256 * Check if the address has required RWX privs to complete desired operation
258 bool pmp_hart_has_privs(CPURISCVState
*env
, target_ulong addr
,
259 target_ulong size
, pmp_priv_t privs
, pmp_priv_t
*allowed_privs
,
268 /* Short cut if no rules */
269 if (0 == pmp_get_num_rules(env
)) {
270 return pmp_hart_has_privs_default(env
, addr
, size
, privs
,
271 allowed_privs
, mode
);
275 if (riscv_feature(env
, RISCV_FEATURE_MMU
)) {
277 * If size is unknown (0), assume that all bytes
278 * from addr to the end of the page will be accessed.
280 pmp_size
= -(addr
| TARGET_PAGE_MASK
);
282 pmp_size
= sizeof(target_ulong
);
288 /* 1.10 draft priv spec states there is an implicit order
290 for (i
= 0; i
< MAX_RISCV_PMPS
; i
++) {
291 s
= pmp_is_in_range(env
, i
, addr
);
292 e
= pmp_is_in_range(env
, i
, addr
+ pmp_size
- 1);
294 /* partially inside */
296 qemu_log_mask(LOG_GUEST_ERROR
,
297 "pmp violation - access is partially inside\n");
303 const uint8_t a_field
=
304 pmp_get_a_field(env
->pmp_state
.pmp
[i
].cfg_reg
);
307 * If the PMP entry is not off and the address is in range, do the priv
310 if (((s
+ e
) == 2) && (PMP_AMATCH_OFF
!= a_field
)) {
311 *allowed_privs
= PMP_READ
| PMP_WRITE
| PMP_EXEC
;
312 if ((mode
!= PRV_M
) || pmp_is_locked(env
, i
)) {
313 *allowed_privs
&= env
->pmp_state
.pmp
[i
].cfg_reg
;
316 ret
= ((privs
& *allowed_privs
) == privs
);
321 /* No rule matched */
323 return pmp_hart_has_privs_default(env
, addr
, size
, privs
,
324 allowed_privs
, mode
);
327 return ret
== 1 ? true : false;
331 * Handle a write to a pmpcfg CSP
333 void pmpcfg_csr_write(CPURISCVState
*env
, uint32_t reg_index
,
339 trace_pmpcfg_csr_write(env
->mhartid
, reg_index
, val
);
341 if ((reg_index
& 1) && (sizeof(target_ulong
) == 8)) {
342 qemu_log_mask(LOG_GUEST_ERROR
,
343 "ignoring pmpcfg write - incorrect address\n");
347 for (i
= 0; i
< sizeof(target_ulong
); i
++) {
348 cfg_val
= (val
>> 8 * i
) & 0xff;
349 pmp_write_cfg(env
, (reg_index
* 4) + i
, cfg_val
);
352 /* If PMP permission of any addr has been changed, flush TLB pages. */
353 tlb_flush(env_cpu(env
));
358 * Handle a read from a pmpcfg CSP
360 target_ulong
pmpcfg_csr_read(CPURISCVState
*env
, uint32_t reg_index
)
363 target_ulong cfg_val
= 0;
364 target_ulong val
= 0;
366 for (i
= 0; i
< sizeof(target_ulong
); i
++) {
367 val
= pmp_read_cfg(env
, (reg_index
* 4) + i
);
368 cfg_val
|= (val
<< (i
* 8));
370 trace_pmpcfg_csr_read(env
->mhartid
, reg_index
, cfg_val
);
377 * Handle a write to a pmpaddr CSP
379 void pmpaddr_csr_write(CPURISCVState
*env
, uint32_t addr_index
,
382 trace_pmpaddr_csr_write(env
->mhartid
, addr_index
, val
);
383 if (addr_index
< MAX_RISCV_PMPS
) {
384 if (!pmp_is_locked(env
, addr_index
)) {
385 env
->pmp_state
.pmp
[addr_index
].addr_reg
= val
;
386 pmp_update_rule(env
, addr_index
);
388 qemu_log_mask(LOG_GUEST_ERROR
,
389 "ignoring pmpaddr write - locked\n");
392 qemu_log_mask(LOG_GUEST_ERROR
,
393 "ignoring pmpaddr write - out of bounds\n");
399 * Handle a read from a pmpaddr CSP
401 target_ulong
pmpaddr_csr_read(CPURISCVState
*env
, uint32_t addr_index
)
403 target_ulong val
= 0;
405 if (addr_index
< MAX_RISCV_PMPS
) {
406 val
= env
->pmp_state
.pmp
[addr_index
].addr_reg
;
407 trace_pmpaddr_csr_read(env
->mhartid
, addr_index
, val
);
409 qemu_log_mask(LOG_GUEST_ERROR
,
410 "ignoring pmpaddr read - out of bounds\n");
417 * Calculate the TLB size if the start address or the end address of
418 * PMP entry is presented in thie TLB page.
420 static target_ulong
pmp_get_tlb_size(CPURISCVState
*env
, int pmp_index
,
421 target_ulong tlb_sa
, target_ulong tlb_ea
)
423 target_ulong pmp_sa
= env
->pmp_state
.addr
[pmp_index
].sa
;
424 target_ulong pmp_ea
= env
->pmp_state
.addr
[pmp_index
].ea
;
426 if (pmp_sa
>= tlb_sa
&& pmp_ea
<= tlb_ea
) {
427 return pmp_ea
- pmp_sa
+ 1;
430 if (pmp_sa
>= tlb_sa
&& pmp_sa
<= tlb_ea
&& pmp_ea
>= tlb_ea
) {
431 return tlb_ea
- pmp_sa
+ 1;
434 if (pmp_ea
<= tlb_ea
&& pmp_ea
>= tlb_sa
&& pmp_sa
<= tlb_sa
) {
435 return pmp_ea
- tlb_sa
+ 1;
442 * Check is there a PMP entry which range covers this page. If so,
443 * try to find the minimum granularity for the TLB size.
445 bool pmp_is_range_in_tlb(CPURISCVState
*env
, hwaddr tlb_sa
,
446 target_ulong
*tlb_size
)
450 target_ulong tlb_ea
= (tlb_sa
+ TARGET_PAGE_SIZE
- 1);
452 for (i
= 0; i
< MAX_RISCV_PMPS
; i
++) {
453 val
= pmp_get_tlb_size(env
, i
, tlb_sa
, tlb_ea
);
455 if (*tlb_size
== 0 || *tlb_size
> val
) {
461 if (*tlb_size
!= 0) {
469 * Convert PMP privilege to TLB page privilege.
471 int pmp_priv_to_page_prot(pmp_priv_t pmp_priv
)
475 if (pmp_priv
& PMP_READ
) {
478 if (pmp_priv
& PMP_WRITE
) {
481 if (pmp_priv
& PMP_EXEC
) {