2 * RISC-V VMState Description
4 * Copyright (c) 2020 Huawei Technologies Co., Ltd
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2 or later, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
21 #include "qemu/error-report.h"
22 #include "sysemu/kvm.h"
23 #include "migration/cpu.h"
24 #include "sysemu/cpu-timers.h"
27 static bool pmp_needed(void *opaque
)
29 RISCVCPU
*cpu
= opaque
;
30 CPURISCVState
*env
= &cpu
->env
;
32 return riscv_feature(env
, RISCV_FEATURE_PMP
);
35 static int pmp_post_load(void *opaque
, int version_id
)
37 RISCVCPU
*cpu
= opaque
;
38 CPURISCVState
*env
= &cpu
->env
;
41 for (i
= 0; i
< MAX_RISCV_PMPS
; i
++) {
42 pmp_update_rule_addr(env
, i
);
44 pmp_update_rule_nums(env
);
49 static const VMStateDescription vmstate_pmp_entry
= {
50 .name
= "cpu/pmp/entry",
52 .minimum_version_id
= 1,
53 .fields
= (VMStateField
[]) {
54 VMSTATE_UINTTL(addr_reg
, pmp_entry_t
),
55 VMSTATE_UINT8(cfg_reg
, pmp_entry_t
),
60 static const VMStateDescription vmstate_pmp
= {
63 .minimum_version_id
= 1,
65 .post_load
= pmp_post_load
,
66 .fields
= (VMStateField
[]) {
67 VMSTATE_STRUCT_ARRAY(env
.pmp_state
.pmp
, RISCVCPU
, MAX_RISCV_PMPS
,
68 0, vmstate_pmp_entry
, pmp_entry_t
),
73 static bool hyper_needed(void *opaque
)
75 RISCVCPU
*cpu
= opaque
;
76 CPURISCVState
*env
= &cpu
->env
;
78 return riscv_has_ext(env
, RVH
);
81 static const VMStateDescription vmstate_hyper
= {
84 .minimum_version_id
= 2,
85 .needed
= hyper_needed
,
86 .fields
= (VMStateField
[]) {
87 VMSTATE_UINTTL(env
.hstatus
, RISCVCPU
),
88 VMSTATE_UINTTL(env
.hedeleg
, RISCVCPU
),
89 VMSTATE_UINT64(env
.hideleg
, RISCVCPU
),
90 VMSTATE_UINTTL(env
.hcounteren
, RISCVCPU
),
91 VMSTATE_UINTTL(env
.htval
, RISCVCPU
),
92 VMSTATE_UINTTL(env
.htinst
, RISCVCPU
),
93 VMSTATE_UINTTL(env
.hgatp
, RISCVCPU
),
94 VMSTATE_UINTTL(env
.hgeie
, RISCVCPU
),
95 VMSTATE_UINTTL(env
.hgeip
, RISCVCPU
),
96 VMSTATE_UINT64(env
.htimedelta
, RISCVCPU
),
97 VMSTATE_UINT64(env
.vstimecmp
, RISCVCPU
),
99 VMSTATE_UINTTL(env
.hvictl
, RISCVCPU
),
100 VMSTATE_UINT8_ARRAY(env
.hviprio
, RISCVCPU
, 64),
102 VMSTATE_UINT64(env
.vsstatus
, RISCVCPU
),
103 VMSTATE_UINTTL(env
.vstvec
, RISCVCPU
),
104 VMSTATE_UINTTL(env
.vsscratch
, RISCVCPU
),
105 VMSTATE_UINTTL(env
.vsepc
, RISCVCPU
),
106 VMSTATE_UINTTL(env
.vscause
, RISCVCPU
),
107 VMSTATE_UINTTL(env
.vstval
, RISCVCPU
),
108 VMSTATE_UINTTL(env
.vsatp
, RISCVCPU
),
109 VMSTATE_UINTTL(env
.vsiselect
, RISCVCPU
),
111 VMSTATE_UINTTL(env
.mtval2
, RISCVCPU
),
112 VMSTATE_UINTTL(env
.mtinst
, RISCVCPU
),
114 VMSTATE_UINTTL(env
.stvec_hs
, RISCVCPU
),
115 VMSTATE_UINTTL(env
.sscratch_hs
, RISCVCPU
),
116 VMSTATE_UINTTL(env
.sepc_hs
, RISCVCPU
),
117 VMSTATE_UINTTL(env
.scause_hs
, RISCVCPU
),
118 VMSTATE_UINTTL(env
.stval_hs
, RISCVCPU
),
119 VMSTATE_UINTTL(env
.satp_hs
, RISCVCPU
),
120 VMSTATE_UINT64(env
.mstatus_hs
, RISCVCPU
),
122 VMSTATE_END_OF_LIST()
126 static bool vector_needed(void *opaque
)
128 RISCVCPU
*cpu
= opaque
;
129 CPURISCVState
*env
= &cpu
->env
;
131 return riscv_has_ext(env
, RVV
);
134 static const VMStateDescription vmstate_vector
= {
135 .name
= "cpu/vector",
137 .minimum_version_id
= 2,
138 .needed
= vector_needed
,
139 .fields
= (VMStateField
[]) {
140 VMSTATE_UINT64_ARRAY(env
.vreg
, RISCVCPU
, 32 * RV_VLEN_MAX
/ 64),
141 VMSTATE_UINTTL(env
.vxrm
, RISCVCPU
),
142 VMSTATE_UINTTL(env
.vxsat
, RISCVCPU
),
143 VMSTATE_UINTTL(env
.vl
, RISCVCPU
),
144 VMSTATE_UINTTL(env
.vstart
, RISCVCPU
),
145 VMSTATE_UINTTL(env
.vtype
, RISCVCPU
),
146 VMSTATE_BOOL(env
.vill
, RISCVCPU
),
147 VMSTATE_END_OF_LIST()
151 static bool pointermasking_needed(void *opaque
)
153 RISCVCPU
*cpu
= opaque
;
154 CPURISCVState
*env
= &cpu
->env
;
156 return riscv_has_ext(env
, RVJ
);
159 static const VMStateDescription vmstate_pointermasking
= {
160 .name
= "cpu/pointer_masking",
162 .minimum_version_id
= 1,
163 .needed
= pointermasking_needed
,
164 .fields
= (VMStateField
[]) {
165 VMSTATE_UINTTL(env
.mmte
, RISCVCPU
),
166 VMSTATE_UINTTL(env
.mpmmask
, RISCVCPU
),
167 VMSTATE_UINTTL(env
.mpmbase
, RISCVCPU
),
168 VMSTATE_UINTTL(env
.spmmask
, RISCVCPU
),
169 VMSTATE_UINTTL(env
.spmbase
, RISCVCPU
),
170 VMSTATE_UINTTL(env
.upmmask
, RISCVCPU
),
171 VMSTATE_UINTTL(env
.upmbase
, RISCVCPU
),
173 VMSTATE_END_OF_LIST()
177 static bool rv128_needed(void *opaque
)
179 RISCVCPU
*cpu
= opaque
;
180 CPURISCVState
*env
= &cpu
->env
;
182 return env
->misa_mxl_max
== MXL_RV128
;
185 static const VMStateDescription vmstate_rv128
= {
188 .minimum_version_id
= 1,
189 .needed
= rv128_needed
,
190 .fields
= (VMStateField
[]) {
191 VMSTATE_UINTTL_ARRAY(env
.gprh
, RISCVCPU
, 32),
192 VMSTATE_UINT64(env
.mscratchh
, RISCVCPU
),
193 VMSTATE_UINT64(env
.sscratchh
, RISCVCPU
),
194 VMSTATE_END_OF_LIST()
198 static bool kvmtimer_needed(void *opaque
)
200 return kvm_enabled();
203 static int cpu_post_load(void *opaque
, int version_id
)
205 RISCVCPU
*cpu
= opaque
;
206 CPURISCVState
*env
= &cpu
->env
;
208 env
->kvm_timer_dirty
= true;
212 static const VMStateDescription vmstate_kvmtimer
= {
213 .name
= "cpu/kvmtimer",
215 .minimum_version_id
= 1,
216 .needed
= kvmtimer_needed
,
217 .post_load
= cpu_post_load
,
218 .fields
= (VMStateField
[]) {
219 VMSTATE_UINT64(env
.kvm_timer_time
, RISCVCPU
),
220 VMSTATE_UINT64(env
.kvm_timer_compare
, RISCVCPU
),
221 VMSTATE_UINT64(env
.kvm_timer_state
, RISCVCPU
),
222 VMSTATE_END_OF_LIST()
226 static bool debug_needed(void *opaque
)
228 RISCVCPU
*cpu
= opaque
;
229 CPURISCVState
*env
= &cpu
->env
;
231 return riscv_feature(env
, RISCV_FEATURE_DEBUG
);
234 static int debug_post_load(void *opaque
, int version_id
)
236 RISCVCPU
*cpu
= opaque
;
237 CPURISCVState
*env
= &cpu
->env
;
239 if (icount_enabled()) {
240 env
->itrigger_enabled
= riscv_itrigger_enabled(env
);
246 static const VMStateDescription vmstate_debug
= {
249 .minimum_version_id
= 2,
250 .needed
= debug_needed
,
251 .post_load
= debug_post_load
,
252 .fields
= (VMStateField
[]) {
253 VMSTATE_UINTTL(env
.trigger_cur
, RISCVCPU
),
254 VMSTATE_UINTTL_ARRAY(env
.tdata1
, RISCVCPU
, RV_MAX_TRIGGERS
),
255 VMSTATE_UINTTL_ARRAY(env
.tdata2
, RISCVCPU
, RV_MAX_TRIGGERS
),
256 VMSTATE_UINTTL_ARRAY(env
.tdata3
, RISCVCPU
, RV_MAX_TRIGGERS
),
257 VMSTATE_END_OF_LIST()
261 static int riscv_cpu_post_load(void *opaque
, int version_id
)
263 RISCVCPU
*cpu
= opaque
;
264 CPURISCVState
*env
= &cpu
->env
;
266 env
->xl
= cpu_recompute_xl(env
);
267 riscv_cpu_update_mask(env
);
271 static bool smstateen_needed(void *opaque
)
273 RISCVCPU
*cpu
= opaque
;
275 return cpu
->cfg
.ext_smstateen
;
278 static const VMStateDescription vmstate_smstateen
= {
279 .name
= "cpu/smtateen",
281 .minimum_version_id
= 1,
282 .needed
= smstateen_needed
,
283 .fields
= (VMStateField
[]) {
284 VMSTATE_UINT64_ARRAY(env
.mstateen
, RISCVCPU
, 4),
285 VMSTATE_UINT64_ARRAY(env
.hstateen
, RISCVCPU
, 4),
286 VMSTATE_UINT64_ARRAY(env
.sstateen
, RISCVCPU
, 4),
287 VMSTATE_END_OF_LIST()
291 static bool envcfg_needed(void *opaque
)
293 RISCVCPU
*cpu
= opaque
;
294 CPURISCVState
*env
= &cpu
->env
;
296 return (env
->priv_ver
>= PRIV_VERSION_1_12_0
? 1 : 0);
299 static const VMStateDescription vmstate_envcfg
= {
300 .name
= "cpu/envcfg",
302 .minimum_version_id
= 1,
303 .needed
= envcfg_needed
,
304 .fields
= (VMStateField
[]) {
305 VMSTATE_UINT64(env
.menvcfg
, RISCVCPU
),
306 VMSTATE_UINTTL(env
.senvcfg
, RISCVCPU
),
307 VMSTATE_UINT64(env
.henvcfg
, RISCVCPU
),
308 VMSTATE_END_OF_LIST()
312 static bool pmu_needed(void *opaque
)
314 RISCVCPU
*cpu
= opaque
;
316 return cpu
->cfg
.pmu_num
;
319 static const VMStateDescription vmstate_pmu_ctr_state
= {
322 .minimum_version_id
= 1,
323 .needed
= pmu_needed
,
324 .fields
= (VMStateField
[]) {
325 VMSTATE_UINTTL(mhpmcounter_val
, PMUCTRState
),
326 VMSTATE_UINTTL(mhpmcounterh_val
, PMUCTRState
),
327 VMSTATE_UINTTL(mhpmcounter_prev
, PMUCTRState
),
328 VMSTATE_UINTTL(mhpmcounterh_prev
, PMUCTRState
),
329 VMSTATE_BOOL(started
, PMUCTRState
),
330 VMSTATE_END_OF_LIST()
334 const VMStateDescription vmstate_riscv_cpu
= {
337 .minimum_version_id
= 6,
338 .post_load
= riscv_cpu_post_load
,
339 .fields
= (VMStateField
[]) {
340 VMSTATE_UINTTL_ARRAY(env
.gpr
, RISCVCPU
, 32),
341 VMSTATE_UINT64_ARRAY(env
.fpr
, RISCVCPU
, 32),
342 VMSTATE_UINT8_ARRAY(env
.miprio
, RISCVCPU
, 64),
343 VMSTATE_UINT8_ARRAY(env
.siprio
, RISCVCPU
, 64),
344 VMSTATE_UINTTL(env
.pc
, RISCVCPU
),
345 VMSTATE_UINTTL(env
.load_res
, RISCVCPU
),
346 VMSTATE_UINTTL(env
.load_val
, RISCVCPU
),
347 VMSTATE_UINTTL(env
.frm
, RISCVCPU
),
348 VMSTATE_UINTTL(env
.badaddr
, RISCVCPU
),
349 VMSTATE_UINTTL(env
.guest_phys_fault_addr
, RISCVCPU
),
350 VMSTATE_UINTTL(env
.priv_ver
, RISCVCPU
),
351 VMSTATE_UINTTL(env
.vext_ver
, RISCVCPU
),
352 VMSTATE_UINT32(env
.misa_mxl
, RISCVCPU
),
353 VMSTATE_UINT32(env
.misa_ext
, RISCVCPU
),
354 VMSTATE_UINT32(env
.misa_mxl_max
, RISCVCPU
),
355 VMSTATE_UINT32(env
.misa_ext_mask
, RISCVCPU
),
356 VMSTATE_UINT32(env
.features
, RISCVCPU
),
357 VMSTATE_UINTTL(env
.priv
, RISCVCPU
),
358 VMSTATE_UINTTL(env
.virt
, RISCVCPU
),
359 VMSTATE_UINT64(env
.resetvec
, RISCVCPU
),
360 VMSTATE_UINTTL(env
.mhartid
, RISCVCPU
),
361 VMSTATE_UINT64(env
.mstatus
, RISCVCPU
),
362 VMSTATE_UINT64(env
.mip
, RISCVCPU
),
363 VMSTATE_UINT64(env
.miclaim
, RISCVCPU
),
364 VMSTATE_UINT64(env
.mie
, RISCVCPU
),
365 VMSTATE_UINT64(env
.mideleg
, RISCVCPU
),
366 VMSTATE_UINTTL(env
.satp
, RISCVCPU
),
367 VMSTATE_UINTTL(env
.stval
, RISCVCPU
),
368 VMSTATE_UINTTL(env
.medeleg
, RISCVCPU
),
369 VMSTATE_UINTTL(env
.stvec
, RISCVCPU
),
370 VMSTATE_UINTTL(env
.sepc
, RISCVCPU
),
371 VMSTATE_UINTTL(env
.scause
, RISCVCPU
),
372 VMSTATE_UINTTL(env
.mtvec
, RISCVCPU
),
373 VMSTATE_UINTTL(env
.mepc
, RISCVCPU
),
374 VMSTATE_UINTTL(env
.mcause
, RISCVCPU
),
375 VMSTATE_UINTTL(env
.mtval
, RISCVCPU
),
376 VMSTATE_UINTTL(env
.miselect
, RISCVCPU
),
377 VMSTATE_UINTTL(env
.siselect
, RISCVCPU
),
378 VMSTATE_UINTTL(env
.scounteren
, RISCVCPU
),
379 VMSTATE_UINTTL(env
.mcounteren
, RISCVCPU
),
380 VMSTATE_UINTTL(env
.mcountinhibit
, RISCVCPU
),
381 VMSTATE_STRUCT_ARRAY(env
.pmu_ctrs
, RISCVCPU
, RV_MAX_MHPMCOUNTERS
, 0,
382 vmstate_pmu_ctr_state
, PMUCTRState
),
383 VMSTATE_UINTTL_ARRAY(env
.mhpmevent_val
, RISCVCPU
, RV_MAX_MHPMEVENTS
),
384 VMSTATE_UINTTL_ARRAY(env
.mhpmeventh_val
, RISCVCPU
, RV_MAX_MHPMEVENTS
),
385 VMSTATE_UINTTL(env
.sscratch
, RISCVCPU
),
386 VMSTATE_UINTTL(env
.mscratch
, RISCVCPU
),
387 VMSTATE_UINT64(env
.stimecmp
, RISCVCPU
),
389 VMSTATE_END_OF_LIST()
391 .subsections
= (const VMStateDescription
* []) {
395 &vmstate_pointermasking
,