2 * RISC-V VMState Description
4 * Copyright (c) 2020 Huawei Technologies Co., Ltd
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2 or later, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
21 #include "qemu/error-report.h"
22 #include "sysemu/kvm.h"
23 #include "migration/cpu.h"
24 #include "sysemu/cpu-timers.h"
27 static bool pmp_needed(void *opaque
)
29 RISCVCPU
*cpu
= opaque
;
34 static int pmp_post_load(void *opaque
, int version_id
)
36 RISCVCPU
*cpu
= opaque
;
37 CPURISCVState
*env
= &cpu
->env
;
40 for (i
= 0; i
< MAX_RISCV_PMPS
; i
++) {
41 pmp_update_rule_addr(env
, i
);
43 pmp_update_rule_nums(env
);
48 static const VMStateDescription vmstate_pmp_entry
= {
49 .name
= "cpu/pmp/entry",
51 .minimum_version_id
= 1,
52 .fields
= (const VMStateField
[]) {
53 VMSTATE_UINTTL(addr_reg
, pmp_entry_t
),
54 VMSTATE_UINT8(cfg_reg
, pmp_entry_t
),
59 static const VMStateDescription vmstate_pmp
= {
62 .minimum_version_id
= 1,
64 .post_load
= pmp_post_load
,
65 .fields
= (const VMStateField
[]) {
66 VMSTATE_STRUCT_ARRAY(env
.pmp_state
.pmp
, RISCVCPU
, MAX_RISCV_PMPS
,
67 0, vmstate_pmp_entry
, pmp_entry_t
),
72 static bool hyper_needed(void *opaque
)
74 RISCVCPU
*cpu
= opaque
;
75 CPURISCVState
*env
= &cpu
->env
;
77 return riscv_has_ext(env
, RVH
);
80 static const VMStateDescription vmstate_hyper
= {
83 .minimum_version_id
= 3,
84 .needed
= hyper_needed
,
85 .fields
= (const VMStateField
[]) {
86 VMSTATE_UINTTL(env
.hstatus
, RISCVCPU
),
87 VMSTATE_UINTTL(env
.hedeleg
, RISCVCPU
),
88 VMSTATE_UINT64(env
.hideleg
, RISCVCPU
),
89 VMSTATE_UINTTL(env
.hcounteren
, RISCVCPU
),
90 VMSTATE_UINTTL(env
.htval
, RISCVCPU
),
91 VMSTATE_UINTTL(env
.htinst
, RISCVCPU
),
92 VMSTATE_UINTTL(env
.hgatp
, RISCVCPU
),
93 VMSTATE_UINTTL(env
.hgeie
, RISCVCPU
),
94 VMSTATE_UINTTL(env
.hgeip
, RISCVCPU
),
95 VMSTATE_UINT64(env
.hvien
, RISCVCPU
),
96 VMSTATE_UINT64(env
.hvip
, RISCVCPU
),
97 VMSTATE_UINT64(env
.htimedelta
, RISCVCPU
),
98 VMSTATE_UINT64(env
.vstimecmp
, RISCVCPU
),
100 VMSTATE_UINTTL(env
.hvictl
, RISCVCPU
),
101 VMSTATE_UINT8_ARRAY(env
.hviprio
, RISCVCPU
, 64),
103 VMSTATE_UINT64(env
.vsstatus
, RISCVCPU
),
104 VMSTATE_UINTTL(env
.vstvec
, RISCVCPU
),
105 VMSTATE_UINTTL(env
.vsscratch
, RISCVCPU
),
106 VMSTATE_UINTTL(env
.vsepc
, RISCVCPU
),
107 VMSTATE_UINTTL(env
.vscause
, RISCVCPU
),
108 VMSTATE_UINTTL(env
.vstval
, RISCVCPU
),
109 VMSTATE_UINTTL(env
.vsatp
, RISCVCPU
),
110 VMSTATE_UINTTL(env
.vsiselect
, RISCVCPU
),
111 VMSTATE_UINT64(env
.vsie
, RISCVCPU
),
113 VMSTATE_UINTTL(env
.mtval2
, RISCVCPU
),
114 VMSTATE_UINTTL(env
.mtinst
, RISCVCPU
),
116 VMSTATE_UINTTL(env
.stvec_hs
, RISCVCPU
),
117 VMSTATE_UINTTL(env
.sscratch_hs
, RISCVCPU
),
118 VMSTATE_UINTTL(env
.sepc_hs
, RISCVCPU
),
119 VMSTATE_UINTTL(env
.scause_hs
, RISCVCPU
),
120 VMSTATE_UINTTL(env
.stval_hs
, RISCVCPU
),
121 VMSTATE_UINTTL(env
.satp_hs
, RISCVCPU
),
122 VMSTATE_UINT64(env
.mstatus_hs
, RISCVCPU
),
124 VMSTATE_END_OF_LIST()
128 static bool vector_needed(void *opaque
)
130 RISCVCPU
*cpu
= opaque
;
131 CPURISCVState
*env
= &cpu
->env
;
133 return riscv_has_ext(env
, RVV
);
136 static const VMStateDescription vmstate_vector
= {
137 .name
= "cpu/vector",
139 .minimum_version_id
= 2,
140 .needed
= vector_needed
,
141 .fields
= (const VMStateField
[]) {
142 VMSTATE_UINT64_ARRAY(env
.vreg
, RISCVCPU
, 32 * RV_VLEN_MAX
/ 64),
143 VMSTATE_UINTTL(env
.vxrm
, RISCVCPU
),
144 VMSTATE_UINTTL(env
.vxsat
, RISCVCPU
),
145 VMSTATE_UINTTL(env
.vl
, RISCVCPU
),
146 VMSTATE_UINTTL(env
.vstart
, RISCVCPU
),
147 VMSTATE_UINTTL(env
.vtype
, RISCVCPU
),
148 VMSTATE_BOOL(env
.vill
, RISCVCPU
),
149 VMSTATE_END_OF_LIST()
153 static bool pointermasking_needed(void *opaque
)
155 RISCVCPU
*cpu
= opaque
;
156 CPURISCVState
*env
= &cpu
->env
;
158 return riscv_has_ext(env
, RVJ
);
161 static const VMStateDescription vmstate_pointermasking
= {
162 .name
= "cpu/pointer_masking",
164 .minimum_version_id
= 1,
165 .needed
= pointermasking_needed
,
166 .fields
= (const VMStateField
[]) {
167 VMSTATE_UINTTL(env
.mmte
, RISCVCPU
),
168 VMSTATE_UINTTL(env
.mpmmask
, RISCVCPU
),
169 VMSTATE_UINTTL(env
.mpmbase
, RISCVCPU
),
170 VMSTATE_UINTTL(env
.spmmask
, RISCVCPU
),
171 VMSTATE_UINTTL(env
.spmbase
, RISCVCPU
),
172 VMSTATE_UINTTL(env
.upmmask
, RISCVCPU
),
173 VMSTATE_UINTTL(env
.upmbase
, RISCVCPU
),
175 VMSTATE_END_OF_LIST()
179 static bool rv128_needed(void *opaque
)
181 RISCVCPU
*cpu
= opaque
;
182 CPURISCVState
*env
= &cpu
->env
;
184 return env
->misa_mxl_max
== MXL_RV128
;
187 static const VMStateDescription vmstate_rv128
= {
190 .minimum_version_id
= 1,
191 .needed
= rv128_needed
,
192 .fields
= (const VMStateField
[]) {
193 VMSTATE_UINTTL_ARRAY(env
.gprh
, RISCVCPU
, 32),
194 VMSTATE_UINT64(env
.mscratchh
, RISCVCPU
),
195 VMSTATE_UINT64(env
.sscratchh
, RISCVCPU
),
196 VMSTATE_END_OF_LIST()
201 static bool kvmtimer_needed(void *opaque
)
203 return kvm_enabled();
206 static int cpu_kvmtimer_post_load(void *opaque
, int version_id
)
208 RISCVCPU
*cpu
= opaque
;
209 CPURISCVState
*env
= &cpu
->env
;
211 env
->kvm_timer_dirty
= true;
215 static const VMStateDescription vmstate_kvmtimer
= {
216 .name
= "cpu/kvmtimer",
218 .minimum_version_id
= 1,
219 .needed
= kvmtimer_needed
,
220 .post_load
= cpu_kvmtimer_post_load
,
221 .fields
= (const VMStateField
[]) {
222 VMSTATE_UINT64(env
.kvm_timer_time
, RISCVCPU
),
223 VMSTATE_UINT64(env
.kvm_timer_compare
, RISCVCPU
),
224 VMSTATE_UINT64(env
.kvm_timer_state
, RISCVCPU
),
225 VMSTATE_END_OF_LIST()
230 static bool debug_needed(void *opaque
)
232 RISCVCPU
*cpu
= opaque
;
234 return cpu
->cfg
.debug
;
237 static int debug_post_load(void *opaque
, int version_id
)
239 RISCVCPU
*cpu
= opaque
;
240 CPURISCVState
*env
= &cpu
->env
;
242 if (icount_enabled()) {
243 env
->itrigger_enabled
= riscv_itrigger_enabled(env
);
249 static const VMStateDescription vmstate_debug
= {
252 .minimum_version_id
= 2,
253 .needed
= debug_needed
,
254 .post_load
= debug_post_load
,
255 .fields
= (const VMStateField
[]) {
256 VMSTATE_UINTTL(env
.trigger_cur
, RISCVCPU
),
257 VMSTATE_UINTTL_ARRAY(env
.tdata1
, RISCVCPU
, RV_MAX_TRIGGERS
),
258 VMSTATE_UINTTL_ARRAY(env
.tdata2
, RISCVCPU
, RV_MAX_TRIGGERS
),
259 VMSTATE_UINTTL_ARRAY(env
.tdata3
, RISCVCPU
, RV_MAX_TRIGGERS
),
260 VMSTATE_END_OF_LIST()
264 static int riscv_cpu_post_load(void *opaque
, int version_id
)
266 RISCVCPU
*cpu
= opaque
;
267 CPURISCVState
*env
= &cpu
->env
;
269 env
->xl
= cpu_recompute_xl(env
);
270 riscv_cpu_update_mask(env
);
274 static bool smstateen_needed(void *opaque
)
276 RISCVCPU
*cpu
= opaque
;
278 return cpu
->cfg
.ext_smstateen
;
281 static const VMStateDescription vmstate_smstateen
= {
282 .name
= "cpu/smtateen",
284 .minimum_version_id
= 1,
285 .needed
= smstateen_needed
,
286 .fields
= (const VMStateField
[]) {
287 VMSTATE_UINT64_ARRAY(env
.mstateen
, RISCVCPU
, 4),
288 VMSTATE_UINT64_ARRAY(env
.hstateen
, RISCVCPU
, 4),
289 VMSTATE_UINT64_ARRAY(env
.sstateen
, RISCVCPU
, 4),
290 VMSTATE_END_OF_LIST()
294 static bool envcfg_needed(void *opaque
)
296 RISCVCPU
*cpu
= opaque
;
297 CPURISCVState
*env
= &cpu
->env
;
299 return (env
->priv_ver
>= PRIV_VERSION_1_12_0
? 1 : 0);
302 static const VMStateDescription vmstate_envcfg
= {
303 .name
= "cpu/envcfg",
305 .minimum_version_id
= 1,
306 .needed
= envcfg_needed
,
307 .fields
= (const VMStateField
[]) {
308 VMSTATE_UINT64(env
.menvcfg
, RISCVCPU
),
309 VMSTATE_UINTTL(env
.senvcfg
, RISCVCPU
),
310 VMSTATE_UINT64(env
.henvcfg
, RISCVCPU
),
311 VMSTATE_END_OF_LIST()
315 static bool pmu_needed(void *opaque
)
317 RISCVCPU
*cpu
= opaque
;
319 return (cpu
->cfg
.pmu_mask
> 0);
322 static const VMStateDescription vmstate_pmu_ctr_state
= {
325 .minimum_version_id
= 1,
326 .needed
= pmu_needed
,
327 .fields
= (const VMStateField
[]) {
328 VMSTATE_UINTTL(mhpmcounter_val
, PMUCTRState
),
329 VMSTATE_UINTTL(mhpmcounterh_val
, PMUCTRState
),
330 VMSTATE_UINTTL(mhpmcounter_prev
, PMUCTRState
),
331 VMSTATE_UINTTL(mhpmcounterh_prev
, PMUCTRState
),
332 VMSTATE_BOOL(started
, PMUCTRState
),
333 VMSTATE_END_OF_LIST()
337 static bool jvt_needed(void *opaque
)
339 RISCVCPU
*cpu
= opaque
;
341 return cpu
->cfg
.ext_zcmt
;
344 static const VMStateDescription vmstate_jvt
= {
347 .minimum_version_id
= 1,
348 .needed
= jvt_needed
,
349 .fields
= (const VMStateField
[]) {
350 VMSTATE_UINTTL(env
.jvt
, RISCVCPU
),
351 VMSTATE_END_OF_LIST()
355 const VMStateDescription vmstate_riscv_cpu
= {
358 .minimum_version_id
= 9,
359 .post_load
= riscv_cpu_post_load
,
360 .fields
= (const VMStateField
[]) {
361 VMSTATE_UINTTL_ARRAY(env
.gpr
, RISCVCPU
, 32),
362 VMSTATE_UINT64_ARRAY(env
.fpr
, RISCVCPU
, 32),
363 VMSTATE_UINT8_ARRAY(env
.miprio
, RISCVCPU
, 64),
364 VMSTATE_UINT8_ARRAY(env
.siprio
, RISCVCPU
, 64),
365 VMSTATE_UINTTL(env
.pc
, RISCVCPU
),
366 VMSTATE_UINTTL(env
.load_res
, RISCVCPU
),
367 VMSTATE_UINTTL(env
.load_val
, RISCVCPU
),
368 VMSTATE_UINTTL(env
.frm
, RISCVCPU
),
369 VMSTATE_UINTTL(env
.badaddr
, RISCVCPU
),
370 VMSTATE_UINTTL(env
.guest_phys_fault_addr
, RISCVCPU
),
371 VMSTATE_UINTTL(env
.priv_ver
, RISCVCPU
),
372 VMSTATE_UINTTL(env
.vext_ver
, RISCVCPU
),
373 VMSTATE_UINT32(env
.misa_mxl
, RISCVCPU
),
374 VMSTATE_UINT32(env
.misa_ext
, RISCVCPU
),
375 VMSTATE_UINT32(env
.misa_mxl_max
, RISCVCPU
),
376 VMSTATE_UINT32(env
.misa_ext_mask
, RISCVCPU
),
377 VMSTATE_UINTTL(env
.priv
, RISCVCPU
),
378 VMSTATE_BOOL(env
.virt_enabled
, RISCVCPU
),
379 VMSTATE_UINT64(env
.resetvec
, RISCVCPU
),
380 VMSTATE_UINTTL(env
.mhartid
, RISCVCPU
),
381 VMSTATE_UINT64(env
.mstatus
, RISCVCPU
),
382 VMSTATE_UINT64(env
.mip
, RISCVCPU
),
383 VMSTATE_UINT64(env
.miclaim
, RISCVCPU
),
384 VMSTATE_UINT64(env
.mie
, RISCVCPU
),
385 VMSTATE_UINT64(env
.mvien
, RISCVCPU
),
386 VMSTATE_UINT64(env
.mvip
, RISCVCPU
),
387 VMSTATE_UINT64(env
.sie
, RISCVCPU
),
388 VMSTATE_UINT64(env
.mideleg
, RISCVCPU
),
389 VMSTATE_UINTTL(env
.satp
, RISCVCPU
),
390 VMSTATE_UINTTL(env
.stval
, RISCVCPU
),
391 VMSTATE_UINTTL(env
.medeleg
, RISCVCPU
),
392 VMSTATE_UINTTL(env
.stvec
, RISCVCPU
),
393 VMSTATE_UINTTL(env
.sepc
, RISCVCPU
),
394 VMSTATE_UINTTL(env
.scause
, RISCVCPU
),
395 VMSTATE_UINTTL(env
.mtvec
, RISCVCPU
),
396 VMSTATE_UINTTL(env
.mepc
, RISCVCPU
),
397 VMSTATE_UINTTL(env
.mcause
, RISCVCPU
),
398 VMSTATE_UINTTL(env
.mtval
, RISCVCPU
),
399 VMSTATE_UINTTL(env
.miselect
, RISCVCPU
),
400 VMSTATE_UINTTL(env
.siselect
, RISCVCPU
),
401 VMSTATE_UINTTL(env
.scounteren
, RISCVCPU
),
402 VMSTATE_UINTTL(env
.mcounteren
, RISCVCPU
),
403 VMSTATE_UINTTL(env
.mcountinhibit
, RISCVCPU
),
404 VMSTATE_STRUCT_ARRAY(env
.pmu_ctrs
, RISCVCPU
, RV_MAX_MHPMCOUNTERS
, 0,
405 vmstate_pmu_ctr_state
, PMUCTRState
),
406 VMSTATE_UINTTL_ARRAY(env
.mhpmevent_val
, RISCVCPU
, RV_MAX_MHPMEVENTS
),
407 VMSTATE_UINTTL_ARRAY(env
.mhpmeventh_val
, RISCVCPU
, RV_MAX_MHPMEVENTS
),
408 VMSTATE_UINTTL(env
.sscratch
, RISCVCPU
),
409 VMSTATE_UINTTL(env
.mscratch
, RISCVCPU
),
410 VMSTATE_UINT64(env
.stimecmp
, RISCVCPU
),
412 VMSTATE_END_OF_LIST()
414 .subsections
= (const VMStateDescription
* const []) {
418 &vmstate_pointermasking
,