2 * RISC-V CPU helpers for qemu.
4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5 * Copyright (c) 2017-2018 SiFive, Inc.
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2 or later, as published by the Free Software Foundation.
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "qemu/main-loop.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
27 #include "semihosting/common-semi.h"
29 int riscv_cpu_mmu_index(CPURISCVState
*env
, bool ifetch
)
31 #ifdef CONFIG_USER_ONLY
38 void cpu_get_tb_cpu_state(CPURISCVState
*env
, target_ulong
*pc
,
39 target_ulong
*cs_base
, uint32_t *pflags
)
41 CPUState
*cs
= env_cpu(env
);
42 RISCVCPU
*cpu
= RISCV_CPU(cs
);
46 *pc
= env
->xl
== MXL_RV32
? env
->pc
& UINT32_MAX
: env
->pc
;
49 if (riscv_has_ext(env
, RVV
) || cpu
->cfg
.ext_zve32f
|| cpu
->cfg
.ext_zve64f
) {
51 * If env->vl equals to VLMAX, we can use generic vector operation
52 * expanders (GVEC) to accerlate the vector operations.
53 * However, as LMUL could be a fractional number. The maximum
54 * vector size can be operated might be less than 8 bytes,
55 * which is not supported by GVEC. So we set vl_eq_vlmax flag to true
56 * only when maxsz >= 8 bytes.
58 uint32_t vlmax
= vext_get_vlmax(env_archcpu(env
), env
->vtype
);
59 uint32_t sew
= FIELD_EX64(env
->vtype
, VTYPE
, VSEW
);
60 uint32_t maxsz
= vlmax
<< sew
;
61 bool vl_eq_vlmax
= (env
->vstart
== 0) && (vlmax
== env
->vl
) &&
63 flags
= FIELD_DP32(flags
, TB_FLAGS
, VILL
, env
->vill
);
64 flags
= FIELD_DP32(flags
, TB_FLAGS
, SEW
, sew
);
65 flags
= FIELD_DP32(flags
, TB_FLAGS
, LMUL
,
66 FIELD_EX64(env
->vtype
, VTYPE
, VLMUL
));
67 flags
= FIELD_DP32(flags
, TB_FLAGS
, VL_EQ_VLMAX
, vl_eq_vlmax
);
69 flags
= FIELD_DP32(flags
, TB_FLAGS
, VILL
, 1);
72 #ifdef CONFIG_USER_ONLY
73 flags
|= TB_FLAGS_MSTATUS_FS
;
74 flags
|= TB_FLAGS_MSTATUS_VS
;
76 flags
|= cpu_mmu_index(env
, 0);
77 if (riscv_cpu_fp_enabled(env
)) {
78 flags
|= env
->mstatus
& MSTATUS_FS
;
81 if (riscv_cpu_vector_enabled(env
)) {
82 flags
|= env
->mstatus
& MSTATUS_VS
;
85 if (riscv_has_ext(env
, RVH
)) {
86 if (env
->priv
== PRV_M
||
87 (env
->priv
== PRV_S
&& !riscv_cpu_virt_enabled(env
)) ||
88 (env
->priv
== PRV_U
&& !riscv_cpu_virt_enabled(env
) &&
89 get_field(env
->hstatus
, HSTATUS_HU
))) {
90 flags
= FIELD_DP32(flags
, TB_FLAGS
, HLSX
, 1);
93 flags
= FIELD_DP32(flags
, TB_FLAGS
, MSTATUS_HS_FS
,
94 get_field(env
->mstatus_hs
, MSTATUS_FS
));
96 flags
= FIELD_DP32(flags
, TB_FLAGS
, MSTATUS_HS_VS
,
97 get_field(env
->mstatus_hs
, MSTATUS_VS
));
101 flags
= FIELD_DP32(flags
, TB_FLAGS
, XL
, env
->xl
);
102 if (env
->cur_pmmask
< (env
->xl
== MXL_RV32
? UINT32_MAX
: UINT64_MAX
)) {
103 flags
= FIELD_DP32(flags
, TB_FLAGS
, PM_MASK_ENABLED
, 1);
105 if (env
->cur_pmbase
!= 0) {
106 flags
= FIELD_DP32(flags
, TB_FLAGS
, PM_BASE_ENABLED
, 1);
112 void riscv_cpu_update_mask(CPURISCVState
*env
)
114 target_ulong mask
= -1, base
= 0;
116 * TODO: Current RVJ spec does not specify
117 * how the extension interacts with XLEN.
119 #ifndef CONFIG_USER_ONLY
120 if (riscv_has_ext(env
, RVJ
)) {
123 if (env
->mmte
& M_PM_ENABLE
) {
129 if (env
->mmte
& S_PM_ENABLE
) {
135 if (env
->mmte
& U_PM_ENABLE
) {
141 g_assert_not_reached();
145 if (env
->xl
== MXL_RV32
) {
146 env
->cur_pmmask
= mask
& UINT32_MAX
;
147 env
->cur_pmbase
= base
& UINT32_MAX
;
149 env
->cur_pmmask
= mask
;
150 env
->cur_pmbase
= base
;
154 #ifndef CONFIG_USER_ONLY
157 * The HS-mode is allowed to configure priority only for the
158 * following VS-mode local interrupts:
160 * 0 (Reserved interrupt, reads as zero)
161 * 1 Supervisor software interrupt
162 * 4 (Reserved interrupt, reads as zero)
163 * 5 Supervisor timer interrupt
164 * 8 (Reserved interrupt, reads as zero)
165 * 13 (Reserved interrupt)
169 * 18 Debug/trace interrupt
170 * 20 (Reserved interrupt)
175 * 30 (Reserved for standard reporting of bus or system errors)
178 static const int hviprio_index2irq
[] = {
179 0, 1, 4, 5, 8, 13, 14, 15, 16, 18, 20, 22, 24, 26, 28, 30 };
180 static const int hviprio_index2rdzero
[] = {
181 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
183 int riscv_cpu_hviprio_index2irq(int index
, int *out_irq
, int *out_rdzero
)
185 if (index
< 0 || ARRAY_SIZE(hviprio_index2irq
) <= index
) {
190 *out_irq
= hviprio_index2irq
[index
];
194 *out_rdzero
= hviprio_index2rdzero
[index
];
201 * Default priorities of local interrupts are defined in the
202 * RISC-V Advanced Interrupt Architecture specification.
204 * ----------------------------------------------------------------
206 * Priority | Major Interrupt Numbers
207 * ----------------------------------------------------------------
208 * Highest | 63 (3f), 62 (3e), 31 (1f), 30 (1e), 61 (3d), 60 (3c),
209 * | 59 (3b), 58 (3a), 29 (1d), 28 (1c), 57 (39), 56 (38),
210 * | 55 (37), 54 (36), 27 (1b), 26 (1a), 53 (35), 52 (34),
211 * | 51 (33), 50 (32), 25 (19), 24 (18), 49 (31), 48 (30)
213 * | 11 (0b), 3 (03), 7 (07)
214 * | 9 (09), 1 (01), 5 (05)
216 * | 10 (0a), 2 (02), 6 (06)
218 * | 47 (2f), 46 (2e), 23 (17), 22 (16), 45 (2d), 44 (2c),
219 * | 43 (2b), 42 (2a), 21 (15), 20 (14), 41 (29), 40 (28),
220 * | 39 (27), 38 (26), 19 (13), 18 (12), 37 (25), 36 (24),
221 * Lowest | 35 (23), 34 (22), 17 (11), 16 (10), 33 (21), 32 (20)
222 * ----------------------------------------------------------------
224 static const uint8_t default_iprio
[64] = {
225 [63] = IPRIO_DEFAULT_UPPER
,
226 [62] = IPRIO_DEFAULT_UPPER
+ 1,
227 [31] = IPRIO_DEFAULT_UPPER
+ 2,
228 [30] = IPRIO_DEFAULT_UPPER
+ 3,
229 [61] = IPRIO_DEFAULT_UPPER
+ 4,
230 [60] = IPRIO_DEFAULT_UPPER
+ 5,
232 [59] = IPRIO_DEFAULT_UPPER
+ 6,
233 [58] = IPRIO_DEFAULT_UPPER
+ 7,
234 [29] = IPRIO_DEFAULT_UPPER
+ 8,
235 [28] = IPRIO_DEFAULT_UPPER
+ 9,
236 [57] = IPRIO_DEFAULT_UPPER
+ 10,
237 [56] = IPRIO_DEFAULT_UPPER
+ 11,
239 [55] = IPRIO_DEFAULT_UPPER
+ 12,
240 [54] = IPRIO_DEFAULT_UPPER
+ 13,
241 [27] = IPRIO_DEFAULT_UPPER
+ 14,
242 [26] = IPRIO_DEFAULT_UPPER
+ 15,
243 [53] = IPRIO_DEFAULT_UPPER
+ 16,
244 [52] = IPRIO_DEFAULT_UPPER
+ 17,
246 [51] = IPRIO_DEFAULT_UPPER
+ 18,
247 [50] = IPRIO_DEFAULT_UPPER
+ 19,
248 [25] = IPRIO_DEFAULT_UPPER
+ 20,
249 [24] = IPRIO_DEFAULT_UPPER
+ 21,
250 [49] = IPRIO_DEFAULT_UPPER
+ 22,
251 [48] = IPRIO_DEFAULT_UPPER
+ 23,
253 [11] = IPRIO_DEFAULT_M
,
254 [3] = IPRIO_DEFAULT_M
+ 1,
255 [7] = IPRIO_DEFAULT_M
+ 2,
257 [9] = IPRIO_DEFAULT_S
,
258 [1] = IPRIO_DEFAULT_S
+ 1,
259 [5] = IPRIO_DEFAULT_S
+ 2,
261 [12] = IPRIO_DEFAULT_SGEXT
,
263 [10] = IPRIO_DEFAULT_VS
,
264 [2] = IPRIO_DEFAULT_VS
+ 1,
265 [6] = IPRIO_DEFAULT_VS
+ 2,
267 [47] = IPRIO_DEFAULT_LOWER
,
268 [46] = IPRIO_DEFAULT_LOWER
+ 1,
269 [23] = IPRIO_DEFAULT_LOWER
+ 2,
270 [22] = IPRIO_DEFAULT_LOWER
+ 3,
271 [45] = IPRIO_DEFAULT_LOWER
+ 4,
272 [44] = IPRIO_DEFAULT_LOWER
+ 5,
274 [43] = IPRIO_DEFAULT_LOWER
+ 6,
275 [42] = IPRIO_DEFAULT_LOWER
+ 7,
276 [21] = IPRIO_DEFAULT_LOWER
+ 8,
277 [20] = IPRIO_DEFAULT_LOWER
+ 9,
278 [41] = IPRIO_DEFAULT_LOWER
+ 10,
279 [40] = IPRIO_DEFAULT_LOWER
+ 11,
281 [39] = IPRIO_DEFAULT_LOWER
+ 12,
282 [38] = IPRIO_DEFAULT_LOWER
+ 13,
283 [19] = IPRIO_DEFAULT_LOWER
+ 14,
284 [18] = IPRIO_DEFAULT_LOWER
+ 15,
285 [37] = IPRIO_DEFAULT_LOWER
+ 16,
286 [36] = IPRIO_DEFAULT_LOWER
+ 17,
288 [35] = IPRIO_DEFAULT_LOWER
+ 18,
289 [34] = IPRIO_DEFAULT_LOWER
+ 19,
290 [17] = IPRIO_DEFAULT_LOWER
+ 20,
291 [16] = IPRIO_DEFAULT_LOWER
+ 21,
292 [33] = IPRIO_DEFAULT_LOWER
+ 22,
293 [32] = IPRIO_DEFAULT_LOWER
+ 23,
296 uint8_t riscv_cpu_default_priority(int irq
)
298 if (irq
< 0 || irq
> 63) {
299 return IPRIO_MMAXIPRIO
;
302 return default_iprio
[irq
] ? default_iprio
[irq
] : IPRIO_MMAXIPRIO
;
305 static int riscv_cpu_pending_to_irq(CPURISCVState
*env
,
306 int extirq
, unsigned int extirq_def_prio
,
307 uint64_t pending
, uint8_t *iprio
)
309 int irq
, best_irq
= RISCV_EXCP_NONE
;
310 unsigned int prio
, best_prio
= UINT_MAX
;
313 return RISCV_EXCP_NONE
;
316 irq
= ctz64(pending
);
317 if (!riscv_feature(env
, RISCV_FEATURE_AIA
)) {
321 pending
= pending
>> irq
;
326 prio
= extirq_def_prio
;
328 prio
= (riscv_cpu_default_priority(irq
) < extirq_def_prio
) ?
332 if ((pending
& 0x1) && (prio
<= best_prio
)) {
337 pending
= pending
>> 1;
343 static uint64_t riscv_cpu_all_pending(CPURISCVState
*env
)
345 uint32_t gein
= get_field(env
->hstatus
, HSTATUS_VGEIN
);
346 uint64_t vsgein
= (env
->hgeip
& (1ULL << gein
)) ? MIP_VSEIP
: 0;
348 return (env
->mip
| vsgein
) & env
->mie
;
351 int riscv_cpu_mirq_pending(CPURISCVState
*env
)
353 uint64_t irqs
= riscv_cpu_all_pending(env
) & ~env
->mideleg
&
354 ~(MIP_SGEIP
| MIP_VSSIP
| MIP_VSTIP
| MIP_VSEIP
);
356 return riscv_cpu_pending_to_irq(env
, IRQ_M_EXT
, IPRIO_DEFAULT_M
,
360 int riscv_cpu_sirq_pending(CPURISCVState
*env
)
362 uint64_t irqs
= riscv_cpu_all_pending(env
) & env
->mideleg
&
363 ~(MIP_VSSIP
| MIP_VSTIP
| MIP_VSEIP
);
365 return riscv_cpu_pending_to_irq(env
, IRQ_S_EXT
, IPRIO_DEFAULT_S
,
369 int riscv_cpu_vsirq_pending(CPURISCVState
*env
)
371 uint64_t irqs
= riscv_cpu_all_pending(env
) & env
->mideleg
&
372 (MIP_VSSIP
| MIP_VSTIP
| MIP_VSEIP
);
374 return riscv_cpu_pending_to_irq(env
, IRQ_S_EXT
, IPRIO_DEFAULT_S
,
375 irqs
>> 1, env
->hviprio
);
378 static int riscv_cpu_local_irq_pending(CPURISCVState
*env
)
381 uint64_t irqs
, pending
, mie
, hsie
, vsie
;
383 /* Determine interrupt enable state of all privilege modes */
384 if (riscv_cpu_virt_enabled(env
)) {
387 vsie
= (env
->priv
< PRV_S
) ||
388 (env
->priv
== PRV_S
&& get_field(env
->mstatus
, MSTATUS_SIE
));
390 mie
= (env
->priv
< PRV_M
) ||
391 (env
->priv
== PRV_M
&& get_field(env
->mstatus
, MSTATUS_MIE
));
392 hsie
= (env
->priv
< PRV_S
) ||
393 (env
->priv
== PRV_S
&& get_field(env
->mstatus
, MSTATUS_SIE
));
397 /* Determine all pending interrupts */
398 pending
= riscv_cpu_all_pending(env
);
400 /* Check M-mode interrupts */
401 irqs
= pending
& ~env
->mideleg
& -mie
;
403 return riscv_cpu_pending_to_irq(env
, IRQ_M_EXT
, IPRIO_DEFAULT_M
,
407 /* Check HS-mode interrupts */
408 irqs
= pending
& env
->mideleg
& ~env
->hideleg
& -hsie
;
410 return riscv_cpu_pending_to_irq(env
, IRQ_S_EXT
, IPRIO_DEFAULT_S
,
414 /* Check VS-mode interrupts */
415 irqs
= pending
& env
->mideleg
& env
->hideleg
& -vsie
;
417 virq
= riscv_cpu_pending_to_irq(env
, IRQ_S_EXT
, IPRIO_DEFAULT_S
,
418 irqs
>> 1, env
->hviprio
);
419 return (virq
<= 0) ? virq
: virq
+ 1;
422 /* Indicate no pending interrupt */
423 return RISCV_EXCP_NONE
;
426 bool riscv_cpu_exec_interrupt(CPUState
*cs
, int interrupt_request
)
428 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
429 RISCVCPU
*cpu
= RISCV_CPU(cs
);
430 CPURISCVState
*env
= &cpu
->env
;
431 int interruptno
= riscv_cpu_local_irq_pending(env
);
432 if (interruptno
>= 0) {
433 cs
->exception_index
= RISCV_EXCP_INT_FLAG
| interruptno
;
434 riscv_cpu_do_interrupt(cs
);
441 /* Return true is floating point support is currently enabled */
442 bool riscv_cpu_fp_enabled(CPURISCVState
*env
)
444 if (env
->mstatus
& MSTATUS_FS
) {
445 if (riscv_cpu_virt_enabled(env
) && !(env
->mstatus_hs
& MSTATUS_FS
)) {
454 /* Return true is vector support is currently enabled */
455 bool riscv_cpu_vector_enabled(CPURISCVState
*env
)
457 if (env
->mstatus
& MSTATUS_VS
) {
458 if (riscv_cpu_virt_enabled(env
) && !(env
->mstatus_hs
& MSTATUS_VS
)) {
467 void riscv_cpu_swap_hypervisor_regs(CPURISCVState
*env
)
469 uint64_t mstatus_mask
= MSTATUS_MXR
| MSTATUS_SUM
|
470 MSTATUS_SPP
| MSTATUS_SPIE
| MSTATUS_SIE
|
471 MSTATUS64_UXL
| MSTATUS_VS
;
473 if (riscv_has_ext(env
, RVF
)) {
474 mstatus_mask
|= MSTATUS_FS
;
476 bool current_virt
= riscv_cpu_virt_enabled(env
);
478 g_assert(riscv_has_ext(env
, RVH
));
481 /* Current V=1 and we are about to change to V=0 */
482 env
->vsstatus
= env
->mstatus
& mstatus_mask
;
483 env
->mstatus
&= ~mstatus_mask
;
484 env
->mstatus
|= env
->mstatus_hs
;
486 env
->vstvec
= env
->stvec
;
487 env
->stvec
= env
->stvec_hs
;
489 env
->vsscratch
= env
->sscratch
;
490 env
->sscratch
= env
->sscratch_hs
;
492 env
->vsepc
= env
->sepc
;
493 env
->sepc
= env
->sepc_hs
;
495 env
->vscause
= env
->scause
;
496 env
->scause
= env
->scause_hs
;
498 env
->vstval
= env
->stval
;
499 env
->stval
= env
->stval_hs
;
501 env
->vsatp
= env
->satp
;
502 env
->satp
= env
->satp_hs
;
504 /* Current V=0 and we are about to change to V=1 */
505 env
->mstatus_hs
= env
->mstatus
& mstatus_mask
;
506 env
->mstatus
&= ~mstatus_mask
;
507 env
->mstatus
|= env
->vsstatus
;
509 env
->stvec_hs
= env
->stvec
;
510 env
->stvec
= env
->vstvec
;
512 env
->sscratch_hs
= env
->sscratch
;
513 env
->sscratch
= env
->vsscratch
;
515 env
->sepc_hs
= env
->sepc
;
516 env
->sepc
= env
->vsepc
;
518 env
->scause_hs
= env
->scause
;
519 env
->scause
= env
->vscause
;
521 env
->stval_hs
= env
->stval
;
522 env
->stval
= env
->vstval
;
524 env
->satp_hs
= env
->satp
;
525 env
->satp
= env
->vsatp
;
529 target_ulong
riscv_cpu_get_geilen(CPURISCVState
*env
)
531 if (!riscv_has_ext(env
, RVH
)) {
538 void riscv_cpu_set_geilen(CPURISCVState
*env
, target_ulong geilen
)
540 if (!riscv_has_ext(env
, RVH
)) {
544 if (geilen
> (TARGET_LONG_BITS
- 1)) {
548 env
->geilen
= geilen
;
551 bool riscv_cpu_virt_enabled(CPURISCVState
*env
)
553 if (!riscv_has_ext(env
, RVH
)) {
557 return get_field(env
->virt
, VIRT_ONOFF
);
560 void riscv_cpu_set_virt_enabled(CPURISCVState
*env
, bool enable
)
562 if (!riscv_has_ext(env
, RVH
)) {
566 /* Flush the TLB on all virt mode changes. */
567 if (get_field(env
->virt
, VIRT_ONOFF
) != enable
) {
568 tlb_flush(env_cpu(env
));
571 env
->virt
= set_field(env
->virt
, VIRT_ONOFF
, enable
);
575 * The guest external interrupts from an interrupt controller are
576 * delivered only when the Guest/VM is running (i.e. V=1). This means
577 * any guest external interrupt which is triggered while the Guest/VM
578 * is not running (i.e. V=0) will be missed on QEMU resulting in guest
579 * with sluggish response to serial console input and other I/O events.
581 * To solve this, we check and inject interrupt after setting V=1.
583 riscv_cpu_update_mip(env_archcpu(env
), 0, 0);
587 bool riscv_cpu_two_stage_lookup(int mmu_idx
)
589 return mmu_idx
& TB_FLAGS_PRIV_HYP_ACCESS_MASK
;
592 int riscv_cpu_claim_interrupts(RISCVCPU
*cpu
, uint64_t interrupts
)
594 CPURISCVState
*env
= &cpu
->env
;
595 if (env
->miclaim
& interrupts
) {
598 env
->miclaim
|= interrupts
;
603 uint64_t riscv_cpu_update_mip(RISCVCPU
*cpu
, uint64_t mask
, uint64_t value
)
605 CPURISCVState
*env
= &cpu
->env
;
606 CPUState
*cs
= CPU(cpu
);
607 uint64_t gein
, vsgein
= 0, old
= env
->mip
;
610 if (riscv_cpu_virt_enabled(env
)) {
611 gein
= get_field(env
->hstatus
, HSTATUS_VGEIN
);
612 vsgein
= (env
->hgeip
& (1ULL << gein
)) ? MIP_VSEIP
: 0;
615 if (!qemu_mutex_iothread_locked()) {
617 qemu_mutex_lock_iothread();
620 env
->mip
= (env
->mip
& ~mask
) | (value
& mask
);
622 if (env
->mip
| vsgein
) {
623 cpu_interrupt(cs
, CPU_INTERRUPT_HARD
);
625 cpu_reset_interrupt(cs
, CPU_INTERRUPT_HARD
);
629 qemu_mutex_unlock_iothread();
635 void riscv_cpu_set_rdtime_fn(CPURISCVState
*env
, uint64_t (*fn
)(void *),
639 env
->rdtime_fn_arg
= arg
;
642 void riscv_cpu_set_aia_ireg_rmw_fn(CPURISCVState
*env
, uint32_t priv
,
643 int (*rmw_fn
)(void *arg
,
646 target_ulong new_val
,
647 target_ulong write_mask
),
651 env
->aia_ireg_rmw_fn
[priv
] = rmw_fn
;
652 env
->aia_ireg_rmw_fn_arg
[priv
] = rmw_fn_arg
;
656 void riscv_cpu_set_mode(CPURISCVState
*env
, target_ulong newpriv
)
658 if (newpriv
> PRV_M
) {
659 g_assert_not_reached();
661 if (newpriv
== PRV_H
) {
664 /* tlb_flush is unnecessary as mode is contained in mmu_idx */
666 env
->xl
= cpu_recompute_xl(env
);
667 riscv_cpu_update_mask(env
);
670 * Clear the load reservation - otherwise a reservation placed in one
671 * context/process can be used by another, resulting in an SC succeeding
672 * incorrectly. Version 2.2 of the ISA specification explicitly requires
673 * this behaviour, while later revisions say that the kernel "should" use
674 * an SC instruction to force the yielding of a load reservation on a
675 * preemptive context switch. As a result, do both.
681 * get_physical_address_pmp - check PMP permission for this physical address
683 * Match the PMP region and check permission for this physical address and it's
684 * TLB page. Returns 0 if the permission checking was successful
686 * @env: CPURISCVState
687 * @prot: The returned protection attributes
688 * @tlb_size: TLB page size containing addr. It could be modified after PMP
689 * permission checking. NULL if not set TLB page for addr.
690 * @addr: The physical address to be checked permission
691 * @access_type: The type of MMU access
692 * @mode: Indicates current privilege level.
694 static int get_physical_address_pmp(CPURISCVState
*env
, int *prot
,
695 target_ulong
*tlb_size
, hwaddr addr
,
696 int size
, MMUAccessType access_type
,
700 target_ulong tlb_size_pmp
= 0;
702 if (!riscv_feature(env
, RISCV_FEATURE_PMP
)) {
703 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
704 return TRANSLATE_SUCCESS
;
707 if (!pmp_hart_has_privs(env
, addr
, size
, 1 << access_type
, &pmp_priv
,
710 return TRANSLATE_PMP_FAIL
;
713 *prot
= pmp_priv_to_page_prot(pmp_priv
);
714 if (tlb_size
!= NULL
) {
715 if (pmp_is_range_in_tlb(env
, addr
& ~(*tlb_size
- 1), &tlb_size_pmp
)) {
716 *tlb_size
= tlb_size_pmp
;
720 return TRANSLATE_SUCCESS
;
723 /* get_physical_address - get the physical address for this virtual address
725 * Do a page table walk to obtain the physical address corresponding to a
726 * virtual address. Returns 0 if the translation was successful
728 * Adapted from Spike's mmu_t::translate and mmu_t::walk
730 * @env: CPURISCVState
731 * @physical: This will be set to the calculated physical address
732 * @prot: The returned protection attributes
733 * @addr: The virtual address to be translated
734 * @fault_pte_addr: If not NULL, this will be set to fault pte address
735 * when a error occurs on pte address translation.
736 * This will already be shifted to match htval.
737 * @access_type: The type of MMU access
738 * @mmu_idx: Indicates current privilege level
739 * @first_stage: Are we in first stage translation?
740 * Second stage is used for hypervisor guest translation
741 * @two_stage: Are we going to perform two stage translation
742 * @is_debug: Is this access from a debugger or the monitor?
744 static int get_physical_address(CPURISCVState
*env
, hwaddr
*physical
,
745 int *prot
, target_ulong addr
,
746 target_ulong
*fault_pte_addr
,
747 int access_type
, int mmu_idx
,
748 bool first_stage
, bool two_stage
,
751 /* NOTE: the env->pc value visible here will not be
752 * correct, but the value visible to the exception handler
753 * (riscv_cpu_do_interrupt) is correct */
755 MemTxAttrs attrs
= MEMTXATTRS_UNSPECIFIED
;
756 int mode
= mmu_idx
& TB_FLAGS_PRIV_MMU_MASK
;
757 bool use_background
= false;
759 RISCVCPU
*cpu
= env_archcpu(env
);
761 target_ulong napot_mask
;
764 * Check if we should use the background registers for the two
765 * stage translation. We don't need to check if we actually need
766 * two stage translation as that happened before this function
767 * was called. Background registers will be used if the guest has
768 * forced a two stage translation to be on (in HS or M mode).
770 if (!riscv_cpu_virt_enabled(env
) && two_stage
) {
771 use_background
= true;
774 /* MPRV does not affect the virtual-machine load/store
775 instructions, HLV, HLVX, and HSV. */
776 if (riscv_cpu_two_stage_lookup(mmu_idx
)) {
777 mode
= get_field(env
->hstatus
, HSTATUS_SPVP
);
778 } else if (mode
== PRV_M
&& access_type
!= MMU_INST_FETCH
) {
779 if (get_field(env
->mstatus
, MSTATUS_MPRV
)) {
780 mode
= get_field(env
->mstatus
, MSTATUS_MPP
);
784 if (first_stage
== false) {
785 /* We are in stage 2 translation, this is similar to stage 1. */
786 /* Stage 2 is always taken as U-mode */
790 if (mode
== PRV_M
|| !riscv_feature(env
, RISCV_FEATURE_MMU
)) {
792 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
793 return TRANSLATE_SUCCESS
;
799 int levels
, ptidxbits
, ptesize
, vm
, sum
, mxr
, widened
;
801 if (first_stage
== true) {
802 mxr
= get_field(env
->mstatus
, MSTATUS_MXR
);
804 mxr
= get_field(env
->vsstatus
, MSTATUS_MXR
);
807 if (first_stage
== true) {
808 if (use_background
) {
809 if (riscv_cpu_mxl(env
) == MXL_RV32
) {
810 base
= (hwaddr
)get_field(env
->vsatp
, SATP32_PPN
) << PGSHIFT
;
811 vm
= get_field(env
->vsatp
, SATP32_MODE
);
813 base
= (hwaddr
)get_field(env
->vsatp
, SATP64_PPN
) << PGSHIFT
;
814 vm
= get_field(env
->vsatp
, SATP64_MODE
);
817 if (riscv_cpu_mxl(env
) == MXL_RV32
) {
818 base
= (hwaddr
)get_field(env
->satp
, SATP32_PPN
) << PGSHIFT
;
819 vm
= get_field(env
->satp
, SATP32_MODE
);
821 base
= (hwaddr
)get_field(env
->satp
, SATP64_PPN
) << PGSHIFT
;
822 vm
= get_field(env
->satp
, SATP64_MODE
);
827 if (riscv_cpu_mxl(env
) == MXL_RV32
) {
828 base
= (hwaddr
)get_field(env
->hgatp
, SATP32_PPN
) << PGSHIFT
;
829 vm
= get_field(env
->hgatp
, SATP32_MODE
);
831 base
= (hwaddr
)get_field(env
->hgatp
, SATP64_PPN
) << PGSHIFT
;
832 vm
= get_field(env
->hgatp
, SATP64_MODE
);
836 /* status.SUM will be ignored if execute on background */
837 sum
= get_field(env
->mstatus
, MSTATUS_SUM
) || use_background
|| is_debug
;
840 levels
= 2; ptidxbits
= 10; ptesize
= 4; break;
842 levels
= 3; ptidxbits
= 9; ptesize
= 8; break;
844 levels
= 4; ptidxbits
= 9; ptesize
= 8; break;
846 levels
= 5; ptidxbits
= 9; ptesize
= 8; break;
849 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
850 return TRANSLATE_SUCCESS
;
852 g_assert_not_reached();
855 CPUState
*cs
= env_cpu(env
);
856 int va_bits
= PGSHIFT
+ levels
* ptidxbits
+ widened
;
857 target_ulong mask
, masked_msbs
;
859 if (TARGET_LONG_BITS
> (va_bits
- 1)) {
860 mask
= (1L << (TARGET_LONG_BITS
- (va_bits
- 1))) - 1;
864 masked_msbs
= (addr
>> (va_bits
- 1)) & mask
;
866 if (masked_msbs
!= 0 && masked_msbs
!= mask
) {
867 return TRANSLATE_FAIL
;
870 int ptshift
= (levels
- 1) * ptidxbits
;
873 #if !TCG_OVERSIZED_GUEST
876 for (i
= 0; i
< levels
; i
++, ptshift
-= ptidxbits
) {
879 idx
= (addr
>> (PGSHIFT
+ ptshift
)) &
880 ((1 << (ptidxbits
+ widened
)) - 1);
882 idx
= (addr
>> (PGSHIFT
+ ptshift
)) &
883 ((1 << ptidxbits
) - 1);
886 /* check that physical address of PTE is legal */
889 if (two_stage
&& first_stage
) {
893 /* Do the second stage translation on the base PTE address. */
894 int vbase_ret
= get_physical_address(env
, &vbase
, &vbase_prot
,
895 base
, NULL
, MMU_DATA_LOAD
,
896 mmu_idx
, false, true,
899 if (vbase_ret
!= TRANSLATE_SUCCESS
) {
900 if (fault_pte_addr
) {
901 *fault_pte_addr
= (base
+ idx
* ptesize
) >> 2;
903 return TRANSLATE_G_STAGE_FAIL
;
906 pte_addr
= vbase
+ idx
* ptesize
;
908 pte_addr
= base
+ idx
* ptesize
;
912 int pmp_ret
= get_physical_address_pmp(env
, &pmp_prot
, NULL
, pte_addr
,
913 sizeof(target_ulong
),
914 MMU_DATA_LOAD
, PRV_S
);
915 if (pmp_ret
!= TRANSLATE_SUCCESS
) {
916 return TRANSLATE_PMP_FAIL
;
920 if (riscv_cpu_mxl(env
) == MXL_RV32
) {
921 pte
= address_space_ldl(cs
->as
, pte_addr
, attrs
, &res
);
923 pte
= address_space_ldq(cs
->as
, pte_addr
, attrs
, &res
);
926 if (res
!= MEMTX_OK
) {
927 return TRANSLATE_FAIL
;
930 if (riscv_cpu_sxl(env
) == MXL_RV32
) {
931 ppn
= pte
>> PTE_PPN_SHIFT
;
932 } else if (cpu
->cfg
.ext_svpbmt
|| cpu
->cfg
.ext_svnapot
) {
933 ppn
= (pte
& (target_ulong
)PTE_PPN_MASK
) >> PTE_PPN_SHIFT
;
935 ppn
= pte
>> PTE_PPN_SHIFT
;
936 if ((pte
& ~(target_ulong
)PTE_PPN_MASK
) >> PTE_PPN_SHIFT
) {
937 return TRANSLATE_FAIL
;
941 if (!(pte
& PTE_V
)) {
943 return TRANSLATE_FAIL
;
944 } else if (!cpu
->cfg
.ext_svpbmt
&& (pte
& PTE_PBMT
)) {
945 return TRANSLATE_FAIL
;
946 } else if (!(pte
& (PTE_R
| PTE_W
| PTE_X
))) {
947 /* Inner PTE, continue walking */
948 if (pte
& (PTE_D
| PTE_A
| PTE_U
| PTE_ATTR
)) {
949 return TRANSLATE_FAIL
;
951 base
= ppn
<< PGSHIFT
;
952 } else if ((pte
& (PTE_R
| PTE_W
| PTE_X
)) == PTE_W
) {
953 /* Reserved leaf PTE flags: PTE_W */
954 return TRANSLATE_FAIL
;
955 } else if ((pte
& (PTE_R
| PTE_W
| PTE_X
)) == (PTE_W
| PTE_X
)) {
956 /* Reserved leaf PTE flags: PTE_W + PTE_X */
957 return TRANSLATE_FAIL
;
958 } else if ((pte
& PTE_U
) && ((mode
!= PRV_U
) &&
959 (!sum
|| access_type
== MMU_INST_FETCH
))) {
960 /* User PTE flags when not U mode and mstatus.SUM is not set,
961 or the access type is an instruction fetch */
962 return TRANSLATE_FAIL
;
963 } else if (!(pte
& PTE_U
) && (mode
!= PRV_S
)) {
964 /* Supervisor PTE flags when not S mode */
965 return TRANSLATE_FAIL
;
966 } else if (ppn
& ((1ULL << ptshift
) - 1)) {
968 return TRANSLATE_FAIL
;
969 } else if (access_type
== MMU_DATA_LOAD
&& !((pte
& PTE_R
) ||
970 ((pte
& PTE_X
) && mxr
))) {
971 /* Read access check failed */
972 return TRANSLATE_FAIL
;
973 } else if (access_type
== MMU_DATA_STORE
&& !(pte
& PTE_W
)) {
974 /* Write access check failed */
975 return TRANSLATE_FAIL
;
976 } else if (access_type
== MMU_INST_FETCH
&& !(pte
& PTE_X
)) {
977 /* Fetch access check failed */
978 return TRANSLATE_FAIL
;
980 /* if necessary, set accessed and dirty bits. */
981 target_ulong updated_pte
= pte
| PTE_A
|
982 (access_type
== MMU_DATA_STORE
? PTE_D
: 0);
984 /* Page table updates need to be atomic with MTTCG enabled */
985 if (updated_pte
!= pte
) {
987 * - if accessed or dirty bits need updating, and the PTE is
988 * in RAM, then we do so atomically with a compare and swap.
989 * - if the PTE is in IO space or ROM, then it can't be updated
990 * and we return TRANSLATE_FAIL.
991 * - if the PTE changed by the time we went to update it, then
992 * it is no longer valid and we must re-walk the page table.
995 hwaddr l
= sizeof(target_ulong
), addr1
;
996 mr
= address_space_translate(cs
->as
, pte_addr
,
997 &addr1
, &l
, false, MEMTXATTRS_UNSPECIFIED
);
998 if (memory_region_is_ram(mr
)) {
999 target_ulong
*pte_pa
=
1000 qemu_map_ram_ptr(mr
->ram_block
, addr1
);
1001 #if TCG_OVERSIZED_GUEST
1002 /* MTTCG is not enabled on oversized TCG guests so
1003 * page table updates do not need to be atomic */
1004 *pte_pa
= pte
= updated_pte
;
1006 target_ulong old_pte
=
1007 qatomic_cmpxchg(pte_pa
, pte
, updated_pte
);
1008 if (old_pte
!= pte
) {
1015 /* misconfigured PTE in ROM (AD bits are not preset) or
1016 * PTE is in IO space and can't be updated atomically */
1017 return TRANSLATE_FAIL
;
1021 /* for superpage mappings, make a fake leaf PTE for the TLB's
1023 target_ulong vpn
= addr
>> PGSHIFT
;
1025 if (cpu
->cfg
.ext_svnapot
&& (pte
& PTE_N
)) {
1026 napot_bits
= ctzl(ppn
) + 1;
1027 if ((i
!= (levels
- 1)) || (napot_bits
!= 4)) {
1028 return TRANSLATE_FAIL
;
1032 napot_mask
= (1 << napot_bits
) - 1;
1033 *physical
= (((ppn
& ~napot_mask
) | (vpn
& napot_mask
) |
1034 (vpn
& (((target_ulong
)1 << ptshift
) - 1))
1035 ) << PGSHIFT
) | (addr
& ~TARGET_PAGE_MASK
);
1037 /* set permissions on the TLB entry */
1038 if ((pte
& PTE_R
) || ((pte
& PTE_X
) && mxr
)) {
1041 if ((pte
& PTE_X
)) {
1044 /* add write permission on stores or if the page is already dirty,
1045 so that we TLB miss on later writes to update the dirty bit */
1046 if ((pte
& PTE_W
) &&
1047 (access_type
== MMU_DATA_STORE
|| (pte
& PTE_D
))) {
1048 *prot
|= PAGE_WRITE
;
1050 return TRANSLATE_SUCCESS
;
1053 return TRANSLATE_FAIL
;
1056 static void raise_mmu_exception(CPURISCVState
*env
, target_ulong address
,
1057 MMUAccessType access_type
, bool pmp_violation
,
1058 bool first_stage
, bool two_stage
)
1060 CPUState
*cs
= env_cpu(env
);
1061 int page_fault_exceptions
, vm
;
1064 if (riscv_cpu_mxl(env
) == MXL_RV32
) {
1065 stap_mode
= SATP32_MODE
;
1067 stap_mode
= SATP64_MODE
;
1071 vm
= get_field(env
->satp
, stap_mode
);
1073 vm
= get_field(env
->hgatp
, stap_mode
);
1076 page_fault_exceptions
= vm
!= VM_1_10_MBARE
&& !pmp_violation
;
1078 switch (access_type
) {
1079 case MMU_INST_FETCH
:
1080 if (riscv_cpu_virt_enabled(env
) && !first_stage
) {
1081 cs
->exception_index
= RISCV_EXCP_INST_GUEST_PAGE_FAULT
;
1083 cs
->exception_index
= page_fault_exceptions
?
1084 RISCV_EXCP_INST_PAGE_FAULT
: RISCV_EXCP_INST_ACCESS_FAULT
;
1088 if (two_stage
&& !first_stage
) {
1089 cs
->exception_index
= RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT
;
1091 cs
->exception_index
= page_fault_exceptions
?
1092 RISCV_EXCP_LOAD_PAGE_FAULT
: RISCV_EXCP_LOAD_ACCESS_FAULT
;
1095 case MMU_DATA_STORE
:
1096 if (two_stage
&& !first_stage
) {
1097 cs
->exception_index
= RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT
;
1099 cs
->exception_index
= page_fault_exceptions
?
1100 RISCV_EXCP_STORE_PAGE_FAULT
: RISCV_EXCP_STORE_AMO_ACCESS_FAULT
;
1104 g_assert_not_reached();
1106 env
->badaddr
= address
;
1107 env
->two_stage_lookup
= two_stage
;
1110 hwaddr
riscv_cpu_get_phys_page_debug(CPUState
*cs
, vaddr addr
)
1112 RISCVCPU
*cpu
= RISCV_CPU(cs
);
1113 CPURISCVState
*env
= &cpu
->env
;
1116 int mmu_idx
= cpu_mmu_index(&cpu
->env
, false);
1118 if (get_physical_address(env
, &phys_addr
, &prot
, addr
, NULL
, 0, mmu_idx
,
1119 true, riscv_cpu_virt_enabled(env
), true)) {
1123 if (riscv_cpu_virt_enabled(env
)) {
1124 if (get_physical_address(env
, &phys_addr
, &prot
, phys_addr
, NULL
,
1125 0, mmu_idx
, false, true, true)) {
1130 return phys_addr
& TARGET_PAGE_MASK
;
1133 void riscv_cpu_do_transaction_failed(CPUState
*cs
, hwaddr physaddr
,
1134 vaddr addr
, unsigned size
,
1135 MMUAccessType access_type
,
1136 int mmu_idx
, MemTxAttrs attrs
,
1137 MemTxResult response
, uintptr_t retaddr
)
1139 RISCVCPU
*cpu
= RISCV_CPU(cs
);
1140 CPURISCVState
*env
= &cpu
->env
;
1142 if (access_type
== MMU_DATA_STORE
) {
1143 cs
->exception_index
= RISCV_EXCP_STORE_AMO_ACCESS_FAULT
;
1144 } else if (access_type
== MMU_DATA_LOAD
) {
1145 cs
->exception_index
= RISCV_EXCP_LOAD_ACCESS_FAULT
;
1147 cs
->exception_index
= RISCV_EXCP_INST_ACCESS_FAULT
;
1150 env
->badaddr
= addr
;
1151 env
->two_stage_lookup
= riscv_cpu_virt_enabled(env
) ||
1152 riscv_cpu_two_stage_lookup(mmu_idx
);
1153 cpu_loop_exit_restore(cs
, retaddr
);
1156 void riscv_cpu_do_unaligned_access(CPUState
*cs
, vaddr addr
,
1157 MMUAccessType access_type
, int mmu_idx
,
1160 RISCVCPU
*cpu
= RISCV_CPU(cs
);
1161 CPURISCVState
*env
= &cpu
->env
;
1162 switch (access_type
) {
1163 case MMU_INST_FETCH
:
1164 cs
->exception_index
= RISCV_EXCP_INST_ADDR_MIS
;
1167 cs
->exception_index
= RISCV_EXCP_LOAD_ADDR_MIS
;
1169 case MMU_DATA_STORE
:
1170 cs
->exception_index
= RISCV_EXCP_STORE_AMO_ADDR_MIS
;
1173 g_assert_not_reached();
1175 env
->badaddr
= addr
;
1176 env
->two_stage_lookup
= riscv_cpu_virt_enabled(env
) ||
1177 riscv_cpu_two_stage_lookup(mmu_idx
);
1178 cpu_loop_exit_restore(cs
, retaddr
);
1181 bool riscv_cpu_tlb_fill(CPUState
*cs
, vaddr address
, int size
,
1182 MMUAccessType access_type
, int mmu_idx
,
1183 bool probe
, uintptr_t retaddr
)
1185 RISCVCPU
*cpu
= RISCV_CPU(cs
);
1186 CPURISCVState
*env
= &cpu
->env
;
1189 int prot
, prot2
, prot_pmp
;
1190 bool pmp_violation
= false;
1191 bool first_stage_error
= true;
1192 bool two_stage_lookup
= false;
1193 int ret
= TRANSLATE_FAIL
;
1195 /* default TLB page size */
1196 target_ulong tlb_size
= TARGET_PAGE_SIZE
;
1198 env
->guest_phys_fault_addr
= 0;
1200 qemu_log_mask(CPU_LOG_MMU
, "%s ad %" VADDR_PRIx
" rw %d mmu_idx %d\n",
1201 __func__
, address
, access_type
, mmu_idx
);
1203 /* MPRV does not affect the virtual-machine load/store
1204 instructions, HLV, HLVX, and HSV. */
1205 if (riscv_cpu_two_stage_lookup(mmu_idx
)) {
1206 mode
= get_field(env
->hstatus
, HSTATUS_SPVP
);
1207 } else if (mode
== PRV_M
&& access_type
!= MMU_INST_FETCH
&&
1208 get_field(env
->mstatus
, MSTATUS_MPRV
)) {
1209 mode
= get_field(env
->mstatus
, MSTATUS_MPP
);
1210 if (riscv_has_ext(env
, RVH
) && get_field(env
->mstatus
, MSTATUS_MPV
)) {
1211 two_stage_lookup
= true;
1215 if (riscv_cpu_virt_enabled(env
) ||
1216 ((riscv_cpu_two_stage_lookup(mmu_idx
) || two_stage_lookup
) &&
1217 access_type
!= MMU_INST_FETCH
)) {
1218 /* Two stage lookup */
1219 ret
= get_physical_address(env
, &pa
, &prot
, address
,
1220 &env
->guest_phys_fault_addr
, access_type
,
1221 mmu_idx
, true, true, false);
1224 * A G-stage exception may be triggered during two state lookup.
1225 * And the env->guest_phys_fault_addr has already been set in
1226 * get_physical_address().
1228 if (ret
== TRANSLATE_G_STAGE_FAIL
) {
1229 first_stage_error
= false;
1230 access_type
= MMU_DATA_LOAD
;
1233 qemu_log_mask(CPU_LOG_MMU
,
1234 "%s 1st-stage address=%" VADDR_PRIx
" ret %d physical "
1235 TARGET_FMT_plx
" prot %d\n",
1236 __func__
, address
, ret
, pa
, prot
);
1238 if (ret
== TRANSLATE_SUCCESS
) {
1239 /* Second stage lookup */
1242 ret
= get_physical_address(env
, &pa
, &prot2
, im_address
, NULL
,
1243 access_type
, mmu_idx
, false, true,
1246 qemu_log_mask(CPU_LOG_MMU
,
1247 "%s 2nd-stage address=%" VADDR_PRIx
" ret %d physical "
1248 TARGET_FMT_plx
" prot %d\n",
1249 __func__
, im_address
, ret
, pa
, prot2
);
1253 if (ret
== TRANSLATE_SUCCESS
) {
1254 ret
= get_physical_address_pmp(env
, &prot_pmp
, &tlb_size
, pa
,
1255 size
, access_type
, mode
);
1257 qemu_log_mask(CPU_LOG_MMU
,
1258 "%s PMP address=" TARGET_FMT_plx
" ret %d prot"
1259 " %d tlb_size " TARGET_FMT_lu
"\n",
1260 __func__
, pa
, ret
, prot_pmp
, tlb_size
);
1265 if (ret
!= TRANSLATE_SUCCESS
) {
1267 * Guest physical address translation failed, this is a HS
1270 first_stage_error
= false;
1271 env
->guest_phys_fault_addr
= (im_address
|
1273 (TARGET_PAGE_SIZE
- 1))) >> 2;
1277 /* Single stage lookup */
1278 ret
= get_physical_address(env
, &pa
, &prot
, address
, NULL
,
1279 access_type
, mmu_idx
, true, false, false);
1281 qemu_log_mask(CPU_LOG_MMU
,
1282 "%s address=%" VADDR_PRIx
" ret %d physical "
1283 TARGET_FMT_plx
" prot %d\n",
1284 __func__
, address
, ret
, pa
, prot
);
1286 if (ret
== TRANSLATE_SUCCESS
) {
1287 ret
= get_physical_address_pmp(env
, &prot_pmp
, &tlb_size
, pa
,
1288 size
, access_type
, mode
);
1290 qemu_log_mask(CPU_LOG_MMU
,
1291 "%s PMP address=" TARGET_FMT_plx
" ret %d prot"
1292 " %d tlb_size " TARGET_FMT_lu
"\n",
1293 __func__
, pa
, ret
, prot_pmp
, tlb_size
);
1299 if (ret
== TRANSLATE_PMP_FAIL
) {
1300 pmp_violation
= true;
1303 if (ret
== TRANSLATE_SUCCESS
) {
1304 tlb_set_page(cs
, address
& ~(tlb_size
- 1), pa
& ~(tlb_size
- 1),
1305 prot
, mmu_idx
, tlb_size
);
1310 raise_mmu_exception(env
, address
, access_type
, pmp_violation
,
1312 riscv_cpu_virt_enabled(env
) ||
1313 riscv_cpu_two_stage_lookup(mmu_idx
));
1314 cpu_loop_exit_restore(cs
, retaddr
);
1319 #endif /* !CONFIG_USER_ONLY */
1324 * Adapted from Spike's processor_t::take_trap.
1327 void riscv_cpu_do_interrupt(CPUState
*cs
)
1329 #if !defined(CONFIG_USER_ONLY)
1331 RISCVCPU
*cpu
= RISCV_CPU(cs
);
1332 CPURISCVState
*env
= &cpu
->env
;
1333 bool write_gva
= false;
1336 /* cs->exception is 32-bits wide unlike mcause which is XLEN-bits wide
1337 * so we mask off the MSB and separate into trap type and cause.
1339 bool async
= !!(cs
->exception_index
& RISCV_EXCP_INT_FLAG
);
1340 target_ulong cause
= cs
->exception_index
& RISCV_EXCP_INT_MASK
;
1341 uint64_t deleg
= async
? env
->mideleg
: env
->medeleg
;
1342 target_ulong tval
= 0;
1343 target_ulong htval
= 0;
1344 target_ulong mtval2
= 0;
1346 if (cause
== RISCV_EXCP_SEMIHOST
) {
1347 if (env
->priv
>= PRV_S
) {
1348 env
->gpr
[xA0
] = do_common_semihosting(cs
);
1352 cause
= RISCV_EXCP_BREAKPOINT
;
1356 /* set tval to badaddr for traps with address information */
1358 case RISCV_EXCP_INST_GUEST_PAGE_FAULT
:
1359 case RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT
:
1360 case RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT
:
1361 case RISCV_EXCP_INST_ADDR_MIS
:
1362 case RISCV_EXCP_INST_ACCESS_FAULT
:
1363 case RISCV_EXCP_LOAD_ADDR_MIS
:
1364 case RISCV_EXCP_STORE_AMO_ADDR_MIS
:
1365 case RISCV_EXCP_LOAD_ACCESS_FAULT
:
1366 case RISCV_EXCP_STORE_AMO_ACCESS_FAULT
:
1367 case RISCV_EXCP_INST_PAGE_FAULT
:
1368 case RISCV_EXCP_LOAD_PAGE_FAULT
:
1369 case RISCV_EXCP_STORE_PAGE_FAULT
:
1371 tval
= env
->badaddr
;
1373 case RISCV_EXCP_ILLEGAL_INST
:
1379 /* ecall is dispatched as one cause so translate based on mode */
1380 if (cause
== RISCV_EXCP_U_ECALL
) {
1381 assert(env
->priv
<= 3);
1383 if (env
->priv
== PRV_M
) {
1384 cause
= RISCV_EXCP_M_ECALL
;
1385 } else if (env
->priv
== PRV_S
&& riscv_cpu_virt_enabled(env
)) {
1386 cause
= RISCV_EXCP_VS_ECALL
;
1387 } else if (env
->priv
== PRV_S
&& !riscv_cpu_virt_enabled(env
)) {
1388 cause
= RISCV_EXCP_S_ECALL
;
1389 } else if (env
->priv
== PRV_U
) {
1390 cause
= RISCV_EXCP_U_ECALL
;
1395 trace_riscv_trap(env
->mhartid
, async
, cause
, env
->pc
, tval
,
1396 riscv_cpu_get_trap_name(cause
, async
));
1398 qemu_log_mask(CPU_LOG_INT
,
1399 "%s: hart:"TARGET_FMT_ld
", async:%d, cause:"TARGET_FMT_lx
", "
1400 "epc:0x"TARGET_FMT_lx
", tval:0x"TARGET_FMT_lx
", desc=%s\n",
1401 __func__
, env
->mhartid
, async
, cause
, env
->pc
, tval
,
1402 riscv_cpu_get_trap_name(cause
, async
));
1404 if (env
->priv
<= PRV_S
&&
1405 cause
< TARGET_LONG_BITS
&& ((deleg
>> cause
) & 1)) {
1406 /* handle the trap in S-mode */
1407 if (riscv_has_ext(env
, RVH
)) {
1408 uint64_t hdeleg
= async
? env
->hideleg
: env
->hedeleg
;
1410 if (riscv_cpu_virt_enabled(env
) && ((hdeleg
>> cause
) & 1)) {
1411 /* Trap to VS mode */
1413 * See if we need to adjust cause. Yes if its VS mode interrupt
1414 * no if hypervisor has delegated one of hs mode's interrupt
1416 if (cause
== IRQ_VS_TIMER
|| cause
== IRQ_VS_SOFT
||
1417 cause
== IRQ_VS_EXT
) {
1421 } else if (riscv_cpu_virt_enabled(env
)) {
1422 /* Trap into HS mode, from virt */
1423 riscv_cpu_swap_hypervisor_regs(env
);
1424 env
->hstatus
= set_field(env
->hstatus
, HSTATUS_SPVP
,
1426 env
->hstatus
= set_field(env
->hstatus
, HSTATUS_SPV
,
1427 riscv_cpu_virt_enabled(env
));
1430 htval
= env
->guest_phys_fault_addr
;
1432 riscv_cpu_set_virt_enabled(env
, 0);
1434 /* Trap into HS mode */
1435 env
->hstatus
= set_field(env
->hstatus
, HSTATUS_SPV
, false);
1436 htval
= env
->guest_phys_fault_addr
;
1439 env
->hstatus
= set_field(env
->hstatus
, HSTATUS_GVA
, write_gva
);
1443 s
= set_field(s
, MSTATUS_SPIE
, get_field(s
, MSTATUS_SIE
));
1444 s
= set_field(s
, MSTATUS_SPP
, env
->priv
);
1445 s
= set_field(s
, MSTATUS_SIE
, 0);
1447 env
->scause
= cause
| ((target_ulong
)async
<< (TARGET_LONG_BITS
- 1));
1448 env
->sepc
= env
->pc
;
1451 env
->pc
= (env
->stvec
>> 2 << 2) +
1452 ((async
&& (env
->stvec
& 3) == 1) ? cause
* 4 : 0);
1453 riscv_cpu_set_mode(env
, PRV_S
);
1455 /* handle the trap in M-mode */
1456 if (riscv_has_ext(env
, RVH
)) {
1457 if (riscv_cpu_virt_enabled(env
)) {
1458 riscv_cpu_swap_hypervisor_regs(env
);
1460 env
->mstatus
= set_field(env
->mstatus
, MSTATUS_MPV
,
1461 riscv_cpu_virt_enabled(env
));
1462 if (riscv_cpu_virt_enabled(env
) && tval
) {
1463 env
->mstatus
= set_field(env
->mstatus
, MSTATUS_GVA
, 1);
1466 mtval2
= env
->guest_phys_fault_addr
;
1468 /* Trapping to M mode, virt is disabled */
1469 riscv_cpu_set_virt_enabled(env
, 0);
1473 s
= set_field(s
, MSTATUS_MPIE
, get_field(s
, MSTATUS_MIE
));
1474 s
= set_field(s
, MSTATUS_MPP
, env
->priv
);
1475 s
= set_field(s
, MSTATUS_MIE
, 0);
1477 env
->mcause
= cause
| ~(((target_ulong
)-1) >> async
);
1478 env
->mepc
= env
->pc
;
1480 env
->mtval2
= mtval2
;
1481 env
->pc
= (env
->mtvec
>> 2 << 2) +
1482 ((async
&& (env
->mtvec
& 3) == 1) ? cause
* 4 : 0);
1483 riscv_cpu_set_mode(env
, PRV_M
);
1486 /* NOTE: it is not necessary to yield load reservations here. It is only
1487 * necessary for an SC from "another hart" to cause a load reservation
1488 * to be yielded. Refer to the memory consistency model section of the
1489 * RISC-V ISA Specification.
1492 env
->two_stage_lookup
= false;
1494 cs
->exception_index
= RISCV_EXCP_NONE
; /* mark handled to qemu */