2 * RISC-V VMState Description
4 * Copyright (c) 2020 Huawei Technologies Co., Ltd
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2 or later, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
21 #include "qemu/error-report.h"
22 #include "sysemu/kvm.h"
23 #include "migration/cpu.h"
24 #include "sysemu/cpu-timers.h"
27 static bool pmp_needed(void *opaque
)
29 RISCVCPU
*cpu
= opaque
;
34 static int pmp_post_load(void *opaque
, int version_id
)
36 RISCVCPU
*cpu
= opaque
;
37 CPURISCVState
*env
= &cpu
->env
;
40 for (i
= 0; i
< MAX_RISCV_PMPS
; i
++) {
41 pmp_update_rule_addr(env
, i
);
43 pmp_update_rule_nums(env
);
48 static const VMStateDescription vmstate_pmp_entry
= {
49 .name
= "cpu/pmp/entry",
51 .minimum_version_id
= 1,
52 .fields
= (const VMStateField
[]) {
53 VMSTATE_UINTTL(addr_reg
, pmp_entry_t
),
54 VMSTATE_UINT8(cfg_reg
, pmp_entry_t
),
59 static const VMStateDescription vmstate_pmp
= {
62 .minimum_version_id
= 1,
64 .post_load
= pmp_post_load
,
65 .fields
= (const VMStateField
[]) {
66 VMSTATE_STRUCT_ARRAY(env
.pmp_state
.pmp
, RISCVCPU
, MAX_RISCV_PMPS
,
67 0, vmstate_pmp_entry
, pmp_entry_t
),
72 static bool hyper_needed(void *opaque
)
74 RISCVCPU
*cpu
= opaque
;
75 CPURISCVState
*env
= &cpu
->env
;
77 return riscv_has_ext(env
, RVH
);
80 static const VMStateDescription vmstate_hyper
= {
83 .minimum_version_id
= 4,
84 .needed
= hyper_needed
,
85 .fields
= (const VMStateField
[]) {
86 VMSTATE_UINTTL(env
.hstatus
, RISCVCPU
),
87 VMSTATE_UINTTL(env
.hedeleg
, RISCVCPU
),
88 VMSTATE_UINT64(env
.hideleg
, RISCVCPU
),
89 VMSTATE_UINT32(env
.hcounteren
, RISCVCPU
),
90 VMSTATE_UINTTL(env
.htval
, RISCVCPU
),
91 VMSTATE_UINTTL(env
.htinst
, RISCVCPU
),
92 VMSTATE_UINTTL(env
.hgatp
, RISCVCPU
),
93 VMSTATE_UINTTL(env
.hgeie
, RISCVCPU
),
94 VMSTATE_UINTTL(env
.hgeip
, RISCVCPU
),
95 VMSTATE_UINT64(env
.hvien
, RISCVCPU
),
96 VMSTATE_UINT64(env
.hvip
, RISCVCPU
),
97 VMSTATE_UINT64(env
.htimedelta
, RISCVCPU
),
98 VMSTATE_UINT64(env
.vstimecmp
, RISCVCPU
),
100 VMSTATE_UINTTL(env
.hvictl
, RISCVCPU
),
101 VMSTATE_UINT8_ARRAY(env
.hviprio
, RISCVCPU
, 64),
103 VMSTATE_UINT64(env
.vsstatus
, RISCVCPU
),
104 VMSTATE_UINTTL(env
.vstvec
, RISCVCPU
),
105 VMSTATE_UINTTL(env
.vsscratch
, RISCVCPU
),
106 VMSTATE_UINTTL(env
.vsepc
, RISCVCPU
),
107 VMSTATE_UINTTL(env
.vscause
, RISCVCPU
),
108 VMSTATE_UINTTL(env
.vstval
, RISCVCPU
),
109 VMSTATE_UINTTL(env
.vsatp
, RISCVCPU
),
110 VMSTATE_UINTTL(env
.vsiselect
, RISCVCPU
),
111 VMSTATE_UINT64(env
.vsie
, RISCVCPU
),
113 VMSTATE_UINTTL(env
.mtval2
, RISCVCPU
),
114 VMSTATE_UINTTL(env
.mtinst
, RISCVCPU
),
116 VMSTATE_UINTTL(env
.stvec_hs
, RISCVCPU
),
117 VMSTATE_UINTTL(env
.sscratch_hs
, RISCVCPU
),
118 VMSTATE_UINTTL(env
.sepc_hs
, RISCVCPU
),
119 VMSTATE_UINTTL(env
.scause_hs
, RISCVCPU
),
120 VMSTATE_UINTTL(env
.stval_hs
, RISCVCPU
),
121 VMSTATE_UINTTL(env
.satp_hs
, RISCVCPU
),
122 VMSTATE_UINT64(env
.mstatus_hs
, RISCVCPU
),
124 VMSTATE_END_OF_LIST()
128 static bool vector_needed(void *opaque
)
130 RISCVCPU
*cpu
= opaque
;
131 CPURISCVState
*env
= &cpu
->env
;
133 return riscv_has_ext(env
, RVV
);
136 static const VMStateDescription vmstate_vector
= {
137 .name
= "cpu/vector",
139 .minimum_version_id
= 2,
140 .needed
= vector_needed
,
141 .fields
= (const VMStateField
[]) {
142 VMSTATE_UINT64_ARRAY(env
.vreg
, RISCVCPU
, 32 * RV_VLEN_MAX
/ 64),
143 VMSTATE_UINTTL(env
.vxrm
, RISCVCPU
),
144 VMSTATE_UINTTL(env
.vxsat
, RISCVCPU
),
145 VMSTATE_UINTTL(env
.vl
, RISCVCPU
),
146 VMSTATE_UINTTL(env
.vstart
, RISCVCPU
),
147 VMSTATE_UINTTL(env
.vtype
, RISCVCPU
),
148 VMSTATE_BOOL(env
.vill
, RISCVCPU
),
149 VMSTATE_END_OF_LIST()
153 static bool pointermasking_needed(void *opaque
)
155 RISCVCPU
*cpu
= opaque
;
156 CPURISCVState
*env
= &cpu
->env
;
158 return riscv_has_ext(env
, RVJ
);
161 static const VMStateDescription vmstate_pointermasking
= {
162 .name
= "cpu/pointer_masking",
164 .minimum_version_id
= 1,
165 .needed
= pointermasking_needed
,
166 .fields
= (const VMStateField
[]) {
167 VMSTATE_UINTTL(env
.mmte
, RISCVCPU
),
168 VMSTATE_UINTTL(env
.mpmmask
, RISCVCPU
),
169 VMSTATE_UINTTL(env
.mpmbase
, RISCVCPU
),
170 VMSTATE_UINTTL(env
.spmmask
, RISCVCPU
),
171 VMSTATE_UINTTL(env
.spmbase
, RISCVCPU
),
172 VMSTATE_UINTTL(env
.upmmask
, RISCVCPU
),
173 VMSTATE_UINTTL(env
.upmbase
, RISCVCPU
),
175 VMSTATE_END_OF_LIST()
179 static bool rv128_needed(void *opaque
)
181 RISCVCPUClass
*mcc
= RISCV_CPU_GET_CLASS(opaque
);
183 return mcc
->misa_mxl_max
== MXL_RV128
;
186 static const VMStateDescription vmstate_rv128
= {
189 .minimum_version_id
= 1,
190 .needed
= rv128_needed
,
191 .fields
= (const VMStateField
[]) {
192 VMSTATE_UINTTL_ARRAY(env
.gprh
, RISCVCPU
, 32),
193 VMSTATE_UINT64(env
.mscratchh
, RISCVCPU
),
194 VMSTATE_UINT64(env
.sscratchh
, RISCVCPU
),
195 VMSTATE_END_OF_LIST()
200 static bool kvmtimer_needed(void *opaque
)
202 return kvm_enabled();
205 static int cpu_kvmtimer_post_load(void *opaque
, int version_id
)
207 RISCVCPU
*cpu
= opaque
;
208 CPURISCVState
*env
= &cpu
->env
;
210 env
->kvm_timer_dirty
= true;
214 static const VMStateDescription vmstate_kvmtimer
= {
215 .name
= "cpu/kvmtimer",
217 .minimum_version_id
= 1,
218 .needed
= kvmtimer_needed
,
219 .post_load
= cpu_kvmtimer_post_load
,
220 .fields
= (const VMStateField
[]) {
221 VMSTATE_UINT64(env
.kvm_timer_time
, RISCVCPU
),
222 VMSTATE_UINT64(env
.kvm_timer_compare
, RISCVCPU
),
223 VMSTATE_UINT64(env
.kvm_timer_state
, RISCVCPU
),
224 VMSTATE_END_OF_LIST()
229 static bool debug_needed(void *opaque
)
231 RISCVCPU
*cpu
= opaque
;
233 return cpu
->cfg
.debug
;
236 static int debug_post_load(void *opaque
, int version_id
)
238 RISCVCPU
*cpu
= opaque
;
239 CPURISCVState
*env
= &cpu
->env
;
241 if (icount_enabled()) {
242 env
->itrigger_enabled
= riscv_itrigger_enabled(env
);
248 static const VMStateDescription vmstate_debug
= {
251 .minimum_version_id
= 2,
252 .needed
= debug_needed
,
253 .post_load
= debug_post_load
,
254 .fields
= (const VMStateField
[]) {
255 VMSTATE_UINTTL(env
.trigger_cur
, RISCVCPU
),
256 VMSTATE_UINTTL_ARRAY(env
.tdata1
, RISCVCPU
, RV_MAX_TRIGGERS
),
257 VMSTATE_UINTTL_ARRAY(env
.tdata2
, RISCVCPU
, RV_MAX_TRIGGERS
),
258 VMSTATE_UINTTL_ARRAY(env
.tdata3
, RISCVCPU
, RV_MAX_TRIGGERS
),
259 VMSTATE_END_OF_LIST()
263 static int riscv_cpu_post_load(void *opaque
, int version_id
)
265 RISCVCPU
*cpu
= opaque
;
266 CPURISCVState
*env
= &cpu
->env
;
268 env
->xl
= cpu_recompute_xl(env
);
269 riscv_cpu_update_mask(env
);
273 static bool smstateen_needed(void *opaque
)
275 RISCVCPU
*cpu
= opaque
;
277 return cpu
->cfg
.ext_smstateen
;
280 static const VMStateDescription vmstate_smstateen
= {
281 .name
= "cpu/smtateen",
283 .minimum_version_id
= 1,
284 .needed
= smstateen_needed
,
285 .fields
= (const VMStateField
[]) {
286 VMSTATE_UINT64_ARRAY(env
.mstateen
, RISCVCPU
, 4),
287 VMSTATE_UINT64_ARRAY(env
.hstateen
, RISCVCPU
, 4),
288 VMSTATE_UINT64_ARRAY(env
.sstateen
, RISCVCPU
, 4),
289 VMSTATE_END_OF_LIST()
293 static bool envcfg_needed(void *opaque
)
295 RISCVCPU
*cpu
= opaque
;
296 CPURISCVState
*env
= &cpu
->env
;
298 return (env
->priv_ver
>= PRIV_VERSION_1_12_0
? 1 : 0);
301 static const VMStateDescription vmstate_envcfg
= {
302 .name
= "cpu/envcfg",
304 .minimum_version_id
= 1,
305 .needed
= envcfg_needed
,
306 .fields
= (const VMStateField
[]) {
307 VMSTATE_UINT64(env
.menvcfg
, RISCVCPU
),
308 VMSTATE_UINTTL(env
.senvcfg
, RISCVCPU
),
309 VMSTATE_UINT64(env
.henvcfg
, RISCVCPU
),
310 VMSTATE_END_OF_LIST()
314 static bool pmu_needed(void *opaque
)
316 RISCVCPU
*cpu
= opaque
;
318 return (cpu
->cfg
.pmu_mask
> 0);
321 static const VMStateDescription vmstate_pmu_ctr_state
= {
324 .minimum_version_id
= 1,
325 .needed
= pmu_needed
,
326 .fields
= (const VMStateField
[]) {
327 VMSTATE_UINTTL(mhpmcounter_val
, PMUCTRState
),
328 VMSTATE_UINTTL(mhpmcounterh_val
, PMUCTRState
),
329 VMSTATE_UINTTL(mhpmcounter_prev
, PMUCTRState
),
330 VMSTATE_UINTTL(mhpmcounterh_prev
, PMUCTRState
),
331 VMSTATE_BOOL(started
, PMUCTRState
),
332 VMSTATE_END_OF_LIST()
336 static bool jvt_needed(void *opaque
)
338 RISCVCPU
*cpu
= opaque
;
340 return cpu
->cfg
.ext_zcmt
;
343 static const VMStateDescription vmstate_jvt
= {
346 .minimum_version_id
= 1,
347 .needed
= jvt_needed
,
348 .fields
= (const VMStateField
[]) {
349 VMSTATE_UINTTL(env
.jvt
, RISCVCPU
),
350 VMSTATE_END_OF_LIST()
354 const VMStateDescription vmstate_riscv_cpu
= {
357 .minimum_version_id
= 10,
358 .post_load
= riscv_cpu_post_load
,
359 .fields
= (const VMStateField
[]) {
360 VMSTATE_UINTTL_ARRAY(env
.gpr
, RISCVCPU
, 32),
361 VMSTATE_UINT64_ARRAY(env
.fpr
, RISCVCPU
, 32),
362 VMSTATE_UINT8_ARRAY(env
.miprio
, RISCVCPU
, 64),
363 VMSTATE_UINT8_ARRAY(env
.siprio
, RISCVCPU
, 64),
364 VMSTATE_UINTTL(env
.pc
, RISCVCPU
),
365 VMSTATE_UINTTL(env
.load_res
, RISCVCPU
),
366 VMSTATE_UINTTL(env
.load_val
, RISCVCPU
),
367 VMSTATE_UINTTL(env
.frm
, RISCVCPU
),
368 VMSTATE_UINTTL(env
.badaddr
, RISCVCPU
),
369 VMSTATE_UINTTL(env
.guest_phys_fault_addr
, RISCVCPU
),
370 VMSTATE_UINTTL(env
.priv_ver
, RISCVCPU
),
371 VMSTATE_UINTTL(env
.vext_ver
, RISCVCPU
),
372 VMSTATE_UINT32(env
.misa_mxl
, RISCVCPU
),
373 VMSTATE_UINT32(env
.misa_ext
, RISCVCPU
),
375 VMSTATE_UINT32(env
.misa_ext_mask
, RISCVCPU
),
376 VMSTATE_UINTTL(env
.priv
, RISCVCPU
),
377 VMSTATE_BOOL(env
.virt_enabled
, RISCVCPU
),
378 VMSTATE_UINT64(env
.resetvec
, RISCVCPU
),
379 VMSTATE_UINTTL(env
.mhartid
, RISCVCPU
),
380 VMSTATE_UINT64(env
.mstatus
, RISCVCPU
),
381 VMSTATE_UINT64(env
.mip
, RISCVCPU
),
382 VMSTATE_UINT64(env
.miclaim
, RISCVCPU
),
383 VMSTATE_UINT64(env
.mie
, RISCVCPU
),
384 VMSTATE_UINT64(env
.mvien
, RISCVCPU
),
385 VMSTATE_UINT64(env
.mvip
, RISCVCPU
),
386 VMSTATE_UINT64(env
.sie
, RISCVCPU
),
387 VMSTATE_UINT64(env
.mideleg
, RISCVCPU
),
388 VMSTATE_UINTTL(env
.satp
, RISCVCPU
),
389 VMSTATE_UINTTL(env
.stval
, RISCVCPU
),
390 VMSTATE_UINTTL(env
.medeleg
, RISCVCPU
),
391 VMSTATE_UINTTL(env
.stvec
, RISCVCPU
),
392 VMSTATE_UINTTL(env
.sepc
, RISCVCPU
),
393 VMSTATE_UINTTL(env
.scause
, RISCVCPU
),
394 VMSTATE_UINTTL(env
.mtvec
, RISCVCPU
),
395 VMSTATE_UINTTL(env
.mepc
, RISCVCPU
),
396 VMSTATE_UINTTL(env
.mcause
, RISCVCPU
),
397 VMSTATE_UINTTL(env
.mtval
, RISCVCPU
),
398 VMSTATE_UINTTL(env
.miselect
, RISCVCPU
),
399 VMSTATE_UINTTL(env
.siselect
, RISCVCPU
),
400 VMSTATE_UINT32(env
.scounteren
, RISCVCPU
),
401 VMSTATE_UINT32(env
.mcounteren
, RISCVCPU
),
402 VMSTATE_UINT32(env
.mcountinhibit
, RISCVCPU
),
403 VMSTATE_STRUCT_ARRAY(env
.pmu_ctrs
, RISCVCPU
, RV_MAX_MHPMCOUNTERS
, 0,
404 vmstate_pmu_ctr_state
, PMUCTRState
),
405 VMSTATE_UINTTL_ARRAY(env
.mhpmevent_val
, RISCVCPU
, RV_MAX_MHPMEVENTS
),
406 VMSTATE_UINTTL_ARRAY(env
.mhpmeventh_val
, RISCVCPU
, RV_MAX_MHPMEVENTS
),
407 VMSTATE_UINTTL(env
.sscratch
, RISCVCPU
),
408 VMSTATE_UINTTL(env
.mscratch
, RISCVCPU
),
409 VMSTATE_UINT64(env
.stimecmp
, RISCVCPU
),
411 VMSTATE_END_OF_LIST()
413 .subsections
= (const VMStateDescription
* const []) {
417 &vmstate_pointermasking
,