2 * QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator
4 * Copyright (c) 2004-2007 Fabrice Bellard
5 * Copyright (c) 2007 Jocelyn Mayer
6 * Copyright (c) 2010 David Gibson, IBM Corporation.
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
27 #include "qemu/osdep.h"
28 #include "qemu/datadir.h"
29 #include "qemu/memalign.h"
30 #include "qemu/guest-random.h"
31 #include "qapi/error.h"
32 #include "qapi/qapi-events-machine.h"
33 #include "qapi/qapi-events-qdev.h"
34 #include "qapi/visitor.h"
35 #include "sysemu/sysemu.h"
36 #include "sysemu/hostmem.h"
37 #include "sysemu/numa.h"
38 #include "sysemu/tcg.h"
39 #include "sysemu/qtest.h"
40 #include "sysemu/reset.h"
41 #include "sysemu/runstate.h"
43 #include "hw/fw-path-provider.h"
46 #include "sysemu/device_tree.h"
47 #include "sysemu/cpus.h"
48 #include "sysemu/hw_accel.h"
50 #include "migration/misc.h"
51 #include "migration/qemu-file-types.h"
52 #include "migration/global_state.h"
53 #include "migration/register.h"
54 #include "migration/blocker.h"
55 #include "mmu-hash64.h"
56 #include "mmu-book3s-v3.h"
57 #include "cpu-models.h"
58 #include "hw/core/cpu.h"
60 #include "hw/ppc/ppc.h"
61 #include "hw/loader.h"
63 #include "hw/ppc/fdt.h"
64 #include "hw/ppc/spapr.h"
65 #include "hw/ppc/spapr_nested.h"
66 #include "hw/ppc/spapr_vio.h"
67 #include "hw/ppc/vof.h"
68 #include "hw/qdev-properties.h"
69 #include "hw/pci-host/spapr.h"
70 #include "hw/pci/msi.h"
72 #include "hw/pci/pci.h"
73 #include "hw/scsi/scsi.h"
74 #include "hw/virtio/virtio-scsi.h"
75 #include "hw/virtio/vhost-scsi-common.h"
77 #include "exec/ram_addr.h"
78 #include "exec/confidential-guest-support.h"
80 #include "qemu/config-file.h"
81 #include "qemu/error-report.h"
84 #include "hw/intc/intc.h"
86 #include "hw/ppc/spapr_cpu_core.h"
87 #include "hw/mem/memory-device.h"
88 #include "hw/ppc/spapr_tpm_proxy.h"
89 #include "hw/ppc/spapr_nvdimm.h"
90 #include "hw/ppc/spapr_numa.h"
94 /* SLOF memory layout:
96 * SLOF raw image loaded at 0, copies its romfs right below the flat
97 * device-tree, then position SLOF itself 31M below that
99 * So we set FW_OVERHEAD to 40MB which should account for all of that
102 * We load our kernel at 4M, leaving space for SLOF initial image
104 #define FDT_MAX_ADDR 0x80000000 /* FDT must stay below that */
105 #define FW_MAX_SIZE 0x400000
106 #define FW_FILE_NAME "slof.bin"
107 #define FW_FILE_NAME_VOF "vof.bin"
108 #define FW_OVERHEAD 0x2800000
109 #define KERNEL_LOAD_ADDR FW_MAX_SIZE
111 #define MIN_RMA_SLOF (128 * MiB)
113 #define PHANDLE_INTC 0x00001111
115 /* These two functions implement the VCPU id numbering: one to compute them
116 * all and one to identify thread 0 of a VCORE. Any change to the first one
117 * is likely to have an impact on the second one, so let's keep them close.
119 static int spapr_vcpu_id(SpaprMachineState
*spapr
, int cpu_index
)
121 MachineState
*ms
= MACHINE(spapr
);
122 unsigned int smp_threads
= ms
->smp
.threads
;
126 (cpu_index
/ smp_threads
) * spapr
->vsmt
+ cpu_index
% smp_threads
;
128 static bool spapr_is_thread0_in_vcore(SpaprMachineState
*spapr
,
132 return spapr_get_vcpu_id(cpu
) % spapr
->vsmt
== 0;
135 static bool pre_2_10_vmstate_dummy_icp_needed(void *opaque
)
137 /* Dummy entries correspond to unused ICPState objects in older QEMUs,
138 * and newer QEMUs don't even have them. In both cases, we don't want
139 * to send anything on the wire.
144 static const VMStateDescription pre_2_10_vmstate_dummy_icp
= {
146 * Hack ahead. We can't have two devices with the same name and
147 * instance id. So I rename this to pass make check.
148 * Real help from people who knows the hardware is needed.
150 .name
= "icp/server",
152 .minimum_version_id
= 1,
153 .needed
= pre_2_10_vmstate_dummy_icp_needed
,
154 .fields
= (const VMStateField
[]) {
155 VMSTATE_UNUSED(4), /* uint32_t xirr */
156 VMSTATE_UNUSED(1), /* uint8_t pending_priority */
157 VMSTATE_UNUSED(1), /* uint8_t mfrr */
158 VMSTATE_END_OF_LIST()
163 * See comment in hw/intc/xics.c:icp_realize()
165 * You have to remove vmstate_replace_hack_for_ppc() when you remove
166 * the machine types that need the following function.
168 static void pre_2_10_vmstate_register_dummy_icp(int i
)
170 vmstate_register(NULL
, i
, &pre_2_10_vmstate_dummy_icp
,
171 (void *)(uintptr_t) i
);
175 * See comment in hw/intc/xics.c:icp_realize()
177 * You have to remove vmstate_replace_hack_for_ppc() when you remove
178 * the machine types that need the following function.
180 static void pre_2_10_vmstate_unregister_dummy_icp(int i
)
185 * vmstate_unregister(NULL, &pre_2_10_vmstate_dummy_icp,
186 * (void *)(uintptr_t) i);
190 int spapr_max_server_number(SpaprMachineState
*spapr
)
192 MachineState
*ms
= MACHINE(spapr
);
195 return DIV_ROUND_UP(ms
->smp
.max_cpus
* spapr
->vsmt
, ms
->smp
.threads
);
198 static int spapr_fixup_cpu_smt_dt(void *fdt
, int offset
, PowerPCCPU
*cpu
,
202 g_autofree
uint32_t *servers_prop
= g_new(uint32_t, smt_threads
);
203 g_autofree
uint32_t *gservers_prop
= g_new(uint32_t, smt_threads
* 2);
204 int index
= spapr_get_vcpu_id(cpu
);
206 if (cpu
->compat_pvr
) {
207 ret
= fdt_setprop_cell(fdt
, offset
, "cpu-version", cpu
->compat_pvr
);
213 /* Build interrupt servers and gservers properties */
214 for (i
= 0; i
< smt_threads
; i
++) {
215 servers_prop
[i
] = cpu_to_be32(index
+ i
);
216 /* Hack, direct the group queues back to cpu 0 */
217 gservers_prop
[i
*2] = cpu_to_be32(index
+ i
);
218 gservers_prop
[i
*2 + 1] = 0;
220 ret
= fdt_setprop(fdt
, offset
, "ibm,ppc-interrupt-server#s",
221 servers_prop
, sizeof(*servers_prop
) * smt_threads
);
225 ret
= fdt_setprop(fdt
, offset
, "ibm,ppc-interrupt-gserver#s",
226 gservers_prop
, sizeof(*gservers_prop
) * smt_threads
* 2);
231 static void spapr_dt_pa_features(SpaprMachineState
*spapr
,
233 void *fdt
, int offset
)
236 * SSO (SAO) ordering is supported on KVM and thread=single hosts,
237 * but not MTTCG, so disable it. To advertise it, a cap would have
238 * to be added, or support implemented for MTTCG.
240 * Copy/paste is not supported by TCG, so it is not advertised. KVM
241 * can execute them but it has no accelerator drivers which are usable,
242 * so there isn't much need for it anyway.
245 /* These should be kept in sync with pnv */
246 uint8_t pa_features_206
[] = { 6, 0,
247 0xf6, 0x1f, 0xc7, 0x00, 0x00, 0xc0 };
248 uint8_t pa_features_207
[] = { 24, 0,
249 0xf6, 0x1f, 0xc7, 0xc0, 0x00, 0xf0,
250 0x80, 0x00, 0x00, 0x00, 0x00, 0x00,
251 0x00, 0x00, 0x00, 0x00, 0x80, 0x00,
252 0x80, 0x00, 0x80, 0x00, 0x00, 0x00 };
253 uint8_t pa_features_300
[] = { 66, 0,
254 /* 0: MMU|FPU|SLB|RUN|DABR|NX, 1: fri[nzpm]|DABRX|SPRG3|SLB0|PP110 */
255 /* 2: VPM|DS205|PPR|DS202|DS206, 3: LSD|URG, 5: LE|CFAR|EB|LSQ */
256 0xf6, 0x1f, 0xc7, 0xc0, 0x00, 0xf0, /* 0 - 5 */
258 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, /* 6 - 11 */
260 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, /* 12 - 17 */
261 /* 18: Vec. Scalar, 20: Vec. XOR */
262 0x80, 0x00, 0x80, 0x00, 0x00, 0x00, /* 18 - 23 */
263 /* 24: Ext. Dec, 26: 64 bit ftrs, 28: PM ftrs */
264 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 24 - 29 */
265 /* 32: LE atomic, 34: EBB + ext EBB */
266 0x00, 0x00, 0x80, 0x00, 0xC0, 0x00, /* 30 - 35 */
268 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, /* 36 - 41 */
269 /* 42: PM, 44: PC RA, 46: SC vec'd */
270 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 42 - 47 */
271 /* 48: SIMD, 50: QP BFP, 52: String */
272 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 48 - 53 */
273 /* 54: DecFP, 56: DecI, 58: SHA */
274 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 54 - 59 */
275 /* 60: NM atomic, 62: RNG */
276 0x80, 0x00, 0x80, 0x00, 0x00, 0x00, /* 60 - 65 */
278 /* 3.1 removes SAO, HTM support */
279 uint8_t pa_features_31
[] = { 74, 0,
280 /* 0: MMU|FPU|SLB|RUN|DABR|NX, 1: fri[nzpm]|DABRX|SPRG3|SLB0|PP110 */
281 /* 2: VPM|DS205|PPR|DS202|DS206, 3: LSD|URG, 5: LE|CFAR|EB|LSQ */
282 0xf6, 0x1f, 0xc7, 0xc0, 0x00, 0xf0, /* 0 - 5 */
284 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, /* 6 - 11 */
286 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, /* 12 - 17 */
287 /* 18: Vec. Scalar, 20: Vec. XOR */
288 0x80, 0x00, 0x80, 0x00, 0x00, 0x00, /* 18 - 23 */
289 /* 24: Ext. Dec, 26: 64 bit ftrs, 28: PM ftrs */
290 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 24 - 29 */
291 /* 32: LE atomic, 34: EBB + ext EBB */
292 0x00, 0x00, 0x80, 0x00, 0xC0, 0x00, /* 30 - 35 */
294 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, /* 36 - 41 */
295 /* 42: PM, 44: PC RA, 46: SC vec'd */
296 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 42 - 47 */
297 /* 48: SIMD, 50: QP BFP, 52: String */
298 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 48 - 53 */
299 /* 54: DecFP, 56: DecI, 58: SHA */
300 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 54 - 59 */
301 /* 60: NM atomic, 62: RNG */
302 0x80, 0x00, 0x80, 0x00, 0x00, 0x00, /* 60 - 65 */
303 /* 68: DEXCR[SBHE|IBRTPDUS|SRAPD|NPHIE|PHIE] */
304 0x00, 0x00, 0xce, 0x00, 0x00, 0x00, /* 66 - 71 */
305 /* 72: [P]HASHST/[P]HASHCHK */
306 0x80, 0x00, /* 72 - 73 */
308 uint8_t *pa_features
= NULL
;
311 if (ppc_check_compat(cpu
, CPU_POWERPC_LOGICAL_2_06
, 0, cpu
->compat_pvr
)) {
312 pa_features
= pa_features_206
;
313 pa_size
= sizeof(pa_features_206
);
315 if (ppc_check_compat(cpu
, CPU_POWERPC_LOGICAL_2_07
, 0, cpu
->compat_pvr
)) {
316 pa_features
= pa_features_207
;
317 pa_size
= sizeof(pa_features_207
);
319 if (ppc_check_compat(cpu
, CPU_POWERPC_LOGICAL_3_00
, 0, cpu
->compat_pvr
)) {
320 pa_features
= pa_features_300
;
321 pa_size
= sizeof(pa_features_300
);
323 if (ppc_check_compat(cpu
, CPU_POWERPC_LOGICAL_3_10
, 0, cpu
->compat_pvr
)) {
324 pa_features
= pa_features_31
;
325 pa_size
= sizeof(pa_features_31
);
331 if (ppc_hash64_has(cpu
, PPC_HASH64_CI_LARGEPAGE
)) {
333 * Note: we keep CI large pages off by default because a 64K capable
334 * guest provisioned with large pages might otherwise try to map a qemu
335 * framebuffer (or other kind of memory mapped PCI BAR) using 64K pages
336 * even if that qemu runs on a 4k host.
337 * We dd this bit back here if we are confident this is not an issue
339 pa_features
[3] |= 0x20;
341 if ((spapr_get_cap(spapr
, SPAPR_CAP_HTM
) != 0) && pa_size
> 24) {
342 pa_features
[24] |= 0x80; /* Transactional memory support */
344 if (spapr
->cas_pre_isa3_guest
&& pa_size
> 40) {
345 /* Workaround for broken kernels that attempt (guest) radix
346 * mode when they can't handle it, if they see the radix bit set
347 * in pa-features. So hide it from them. */
348 pa_features
[40 + 2] &= ~0x80; /* Radix MMU */
351 _FDT((fdt_setprop(fdt
, offset
, "ibm,pa-features", pa_features
, pa_size
)));
354 static void spapr_dt_pi_features(SpaprMachineState
*spapr
,
356 void *fdt
, int offset
)
358 uint8_t pi_features
[] = { 1, 0,
361 if (kvm_enabled() && ppc_check_compat(cpu
, CPU_POWERPC_LOGICAL_3_00
,
362 0, cpu
->compat_pvr
)) {
364 * POWER9 and later CPUs with KVM run in LPAR-per-thread mode where
365 * all threads are essentially independent CPUs, and msgsndp does not
366 * work (because it is physically-addressed) and therefore is
367 * emulated by KVM, so disable it here to ensure XIVE will be used.
368 * This is both KVM and CPU implementation-specific behaviour so a KVM
369 * cap would be cleanest, but for now this works. If KVM ever permits
370 * native msgsndp execution by guests, a cap could be added at that
373 pi_features
[2] |= 0x08; /* 4: No msgsndp */
376 _FDT((fdt_setprop(fdt
, offset
, "ibm,pi-features", pi_features
,
377 sizeof(pi_features
))));
380 static hwaddr
spapr_node0_size(MachineState
*machine
)
382 if (machine
->numa_state
->num_nodes
) {
384 for (i
= 0; i
< machine
->numa_state
->num_nodes
; ++i
) {
385 if (machine
->numa_state
->nodes
[i
].node_mem
) {
386 return MIN(pow2floor(machine
->numa_state
->nodes
[i
].node_mem
),
391 return machine
->ram_size
;
394 static void add_str(GString
*s
, const gchar
*s1
)
396 g_string_append_len(s
, s1
, strlen(s1
) + 1);
399 static int spapr_dt_memory_node(SpaprMachineState
*spapr
, void *fdt
, int nodeid
,
400 hwaddr start
, hwaddr size
)
403 uint64_t mem_reg_property
[2];
406 mem_reg_property
[0] = cpu_to_be64(start
);
407 mem_reg_property
[1] = cpu_to_be64(size
);
409 sprintf(mem_name
, "memory@%" HWADDR_PRIx
, start
);
410 off
= fdt_add_subnode(fdt
, 0, mem_name
);
412 _FDT((fdt_setprop_string(fdt
, off
, "device_type", "memory")));
413 _FDT((fdt_setprop(fdt
, off
, "reg", mem_reg_property
,
414 sizeof(mem_reg_property
))));
415 spapr_numa_write_associativity_dt(spapr
, fdt
, off
, nodeid
);
419 static uint32_t spapr_pc_dimm_node(MemoryDeviceInfoList
*list
, ram_addr_t addr
)
421 MemoryDeviceInfoList
*info
;
423 for (info
= list
; info
; info
= info
->next
) {
424 MemoryDeviceInfo
*value
= info
->value
;
426 if (value
&& value
->type
== MEMORY_DEVICE_INFO_KIND_DIMM
) {
427 PCDIMMDeviceInfo
*pcdimm_info
= value
->u
.dimm
.data
;
429 if (addr
>= pcdimm_info
->addr
&&
430 addr
< (pcdimm_info
->addr
+ pcdimm_info
->size
)) {
431 return pcdimm_info
->node
;
439 struct sPAPRDrconfCellV2
{
447 typedef struct DrconfCellQueue
{
448 struct sPAPRDrconfCellV2 cell
;
449 QSIMPLEQ_ENTRY(DrconfCellQueue
) entry
;
452 static DrconfCellQueue
*
453 spapr_get_drconf_cell(uint32_t seq_lmbs
, uint64_t base_addr
,
454 uint32_t drc_index
, uint32_t aa_index
,
457 DrconfCellQueue
*elem
;
459 elem
= g_malloc0(sizeof(*elem
));
460 elem
->cell
.seq_lmbs
= cpu_to_be32(seq_lmbs
);
461 elem
->cell
.base_addr
= cpu_to_be64(base_addr
);
462 elem
->cell
.drc_index
= cpu_to_be32(drc_index
);
463 elem
->cell
.aa_index
= cpu_to_be32(aa_index
);
464 elem
->cell
.flags
= cpu_to_be32(flags
);
469 static int spapr_dt_dynamic_memory_v2(SpaprMachineState
*spapr
, void *fdt
,
470 int offset
, MemoryDeviceInfoList
*dimms
)
472 MachineState
*machine
= MACHINE(spapr
);
473 uint8_t *int_buf
, *cur_index
;
475 uint64_t lmb_size
= SPAPR_MEMORY_BLOCK_SIZE
;
476 uint64_t addr
, cur_addr
, size
;
477 uint32_t nr_boot_lmbs
= (machine
->device_memory
->base
/ lmb_size
);
478 uint64_t mem_end
= machine
->device_memory
->base
+
479 memory_region_size(&machine
->device_memory
->mr
);
480 uint32_t node
, buf_len
, nr_entries
= 0;
482 DrconfCellQueue
*elem
, *next
;
483 MemoryDeviceInfoList
*info
;
484 QSIMPLEQ_HEAD(, DrconfCellQueue
) drconf_queue
485 = QSIMPLEQ_HEAD_INITIALIZER(drconf_queue
);
487 /* Entry to cover RAM and the gap area */
488 elem
= spapr_get_drconf_cell(nr_boot_lmbs
, 0, 0, -1,
489 SPAPR_LMB_FLAGS_RESERVED
|
490 SPAPR_LMB_FLAGS_DRC_INVALID
);
491 QSIMPLEQ_INSERT_TAIL(&drconf_queue
, elem
, entry
);
494 cur_addr
= machine
->device_memory
->base
;
495 for (info
= dimms
; info
; info
= info
->next
) {
496 PCDIMMDeviceInfo
*di
= info
->value
->u
.dimm
.data
;
503 * The NVDIMM area is hotpluggable after the NVDIMM is unplugged. The
504 * area is marked hotpluggable in the next iteration for the bigger
505 * chunk including the NVDIMM occupied area.
507 if (info
->value
->type
== MEMORY_DEVICE_INFO_KIND_NVDIMM
)
510 /* Entry for hot-pluggable area */
511 if (cur_addr
< addr
) {
512 drc
= spapr_drc_by_id(TYPE_SPAPR_DRC_LMB
, cur_addr
/ lmb_size
);
514 elem
= spapr_get_drconf_cell((addr
- cur_addr
) / lmb_size
,
515 cur_addr
, spapr_drc_index(drc
), -1, 0);
516 QSIMPLEQ_INSERT_TAIL(&drconf_queue
, elem
, entry
);
521 drc
= spapr_drc_by_id(TYPE_SPAPR_DRC_LMB
, addr
/ lmb_size
);
523 elem
= spapr_get_drconf_cell(size
/ lmb_size
, addr
,
524 spapr_drc_index(drc
), node
,
525 (SPAPR_LMB_FLAGS_ASSIGNED
|
526 SPAPR_LMB_FLAGS_HOTREMOVABLE
));
527 QSIMPLEQ_INSERT_TAIL(&drconf_queue
, elem
, entry
);
529 cur_addr
= addr
+ size
;
532 /* Entry for remaining hotpluggable area */
533 if (cur_addr
< mem_end
) {
534 drc
= spapr_drc_by_id(TYPE_SPAPR_DRC_LMB
, cur_addr
/ lmb_size
);
536 elem
= spapr_get_drconf_cell((mem_end
- cur_addr
) / lmb_size
,
537 cur_addr
, spapr_drc_index(drc
), -1, 0);
538 QSIMPLEQ_INSERT_TAIL(&drconf_queue
, elem
, entry
);
542 buf_len
= nr_entries
* sizeof(struct sPAPRDrconfCellV2
) + sizeof(uint32_t);
543 int_buf
= cur_index
= g_malloc0(buf_len
);
544 *(uint32_t *)int_buf
= cpu_to_be32(nr_entries
);
545 cur_index
+= sizeof(nr_entries
);
547 QSIMPLEQ_FOREACH_SAFE(elem
, &drconf_queue
, entry
, next
) {
548 memcpy(cur_index
, &elem
->cell
, sizeof(elem
->cell
));
549 cur_index
+= sizeof(elem
->cell
);
550 QSIMPLEQ_REMOVE(&drconf_queue
, elem
, DrconfCellQueue
, entry
);
554 ret
= fdt_setprop(fdt
, offset
, "ibm,dynamic-memory-v2", int_buf
, buf_len
);
562 static int spapr_dt_dynamic_memory(SpaprMachineState
*spapr
, void *fdt
,
563 int offset
, MemoryDeviceInfoList
*dimms
)
565 MachineState
*machine
= MACHINE(spapr
);
567 uint64_t lmb_size
= SPAPR_MEMORY_BLOCK_SIZE
;
568 uint32_t device_lmb_start
= machine
->device_memory
->base
/ lmb_size
;
569 uint32_t nr_lmbs
= (machine
->device_memory
->base
+
570 memory_region_size(&machine
->device_memory
->mr
)) /
572 uint32_t *int_buf
, *cur_index
, buf_len
;
575 * Allocate enough buffer size to fit in ibm,dynamic-memory
577 buf_len
= (nr_lmbs
* SPAPR_DR_LMB_LIST_ENTRY_SIZE
+ 1) * sizeof(uint32_t);
578 cur_index
= int_buf
= g_malloc0(buf_len
);
579 int_buf
[0] = cpu_to_be32(nr_lmbs
);
581 for (i
= 0; i
< nr_lmbs
; i
++) {
582 uint64_t addr
= i
* lmb_size
;
583 uint32_t *dynamic_memory
= cur_index
;
585 if (i
>= device_lmb_start
) {
588 drc
= spapr_drc_by_id(TYPE_SPAPR_DRC_LMB
, i
);
591 dynamic_memory
[0] = cpu_to_be32(addr
>> 32);
592 dynamic_memory
[1] = cpu_to_be32(addr
& 0xffffffff);
593 dynamic_memory
[2] = cpu_to_be32(spapr_drc_index(drc
));
594 dynamic_memory
[3] = cpu_to_be32(0); /* reserved */
595 dynamic_memory
[4] = cpu_to_be32(spapr_pc_dimm_node(dimms
, addr
));
596 if (memory_region_present(get_system_memory(), addr
)) {
597 dynamic_memory
[5] = cpu_to_be32(SPAPR_LMB_FLAGS_ASSIGNED
);
599 dynamic_memory
[5] = cpu_to_be32(0);
603 * LMB information for RMA, boot time RAM and gap b/n RAM and
604 * device memory region -- all these are marked as reserved
605 * and as having no valid DRC.
607 dynamic_memory
[0] = cpu_to_be32(addr
>> 32);
608 dynamic_memory
[1] = cpu_to_be32(addr
& 0xffffffff);
609 dynamic_memory
[2] = cpu_to_be32(0);
610 dynamic_memory
[3] = cpu_to_be32(0); /* reserved */
611 dynamic_memory
[4] = cpu_to_be32(-1);
612 dynamic_memory
[5] = cpu_to_be32(SPAPR_LMB_FLAGS_RESERVED
|
613 SPAPR_LMB_FLAGS_DRC_INVALID
);
616 cur_index
+= SPAPR_DR_LMB_LIST_ENTRY_SIZE
;
618 ret
= fdt_setprop(fdt
, offset
, "ibm,dynamic-memory", int_buf
, buf_len
);
627 * Adds ibm,dynamic-reconfiguration-memory node.
628 * Refer to docs/specs/ppc-spapr-hotplug.txt for the documentation
629 * of this device tree node.
631 static int spapr_dt_dynamic_reconfiguration_memory(SpaprMachineState
*spapr
,
634 MachineState
*machine
= MACHINE(spapr
);
636 uint64_t lmb_size
= SPAPR_MEMORY_BLOCK_SIZE
;
637 uint32_t prop_lmb_size
[] = {cpu_to_be32(lmb_size
>> 32),
638 cpu_to_be32(lmb_size
& 0xffffffff)};
639 MemoryDeviceInfoList
*dimms
= NULL
;
641 /* Don't create the node if there is no device memory. */
642 if (!machine
->device_memory
) {
646 offset
= fdt_add_subnode(fdt
, 0, "ibm,dynamic-reconfiguration-memory");
648 ret
= fdt_setprop(fdt
, offset
, "ibm,lmb-size", prop_lmb_size
,
649 sizeof(prop_lmb_size
));
654 ret
= fdt_setprop_cell(fdt
, offset
, "ibm,memory-flags-mask", 0xff);
659 ret
= fdt_setprop_cell(fdt
, offset
, "ibm,memory-preservation-time", 0x0);
664 /* ibm,dynamic-memory or ibm,dynamic-memory-v2 */
665 dimms
= qmp_memory_device_list();
666 if (spapr_ovec_test(spapr
->ov5_cas
, OV5_DRMEM_V2
)) {
667 ret
= spapr_dt_dynamic_memory_v2(spapr
, fdt
, offset
, dimms
);
669 ret
= spapr_dt_dynamic_memory(spapr
, fdt
, offset
, dimms
);
671 qapi_free_MemoryDeviceInfoList(dimms
);
677 ret
= spapr_numa_write_assoc_lookup_arrays(spapr
, fdt
, offset
);
682 static int spapr_dt_memory(SpaprMachineState
*spapr
, void *fdt
)
684 MachineState
*machine
= MACHINE(spapr
);
685 SpaprMachineClass
*smc
= SPAPR_MACHINE_GET_CLASS(spapr
);
686 hwaddr mem_start
, node_size
;
687 int i
, nb_nodes
= machine
->numa_state
->num_nodes
;
688 NodeInfo
*nodes
= machine
->numa_state
->nodes
;
690 for (i
= 0, mem_start
= 0; i
< nb_nodes
; ++i
) {
691 if (!nodes
[i
].node_mem
) {
694 if (mem_start
>= machine
->ram_size
) {
697 node_size
= nodes
[i
].node_mem
;
698 if (node_size
> machine
->ram_size
- mem_start
) {
699 node_size
= machine
->ram_size
- mem_start
;
703 /* spapr_machine_init() checks for rma_size <= node0_size
705 spapr_dt_memory_node(spapr
, fdt
, i
, 0, spapr
->rma_size
);
706 mem_start
+= spapr
->rma_size
;
707 node_size
-= spapr
->rma_size
;
709 for ( ; node_size
; ) {
710 hwaddr sizetmp
= pow2floor(node_size
);
712 /* mem_start != 0 here */
713 if (ctzl(mem_start
) < ctzl(sizetmp
)) {
714 sizetmp
= 1ULL << ctzl(mem_start
);
717 spapr_dt_memory_node(spapr
, fdt
, i
, mem_start
, sizetmp
);
718 node_size
-= sizetmp
;
719 mem_start
+= sizetmp
;
723 /* Generate ibm,dynamic-reconfiguration-memory node if required */
724 if (spapr_ovec_test(spapr
->ov5_cas
, OV5_DRCONF_MEMORY
)) {
727 g_assert(smc
->dr_lmb_enabled
);
728 ret
= spapr_dt_dynamic_reconfiguration_memory(spapr
, fdt
);
737 static void spapr_dt_cpu(CPUState
*cs
, void *fdt
, int offset
,
738 SpaprMachineState
*spapr
)
740 MachineState
*ms
= MACHINE(spapr
);
741 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
742 CPUPPCState
*env
= &cpu
->env
;
743 PowerPCCPUClass
*pcc
= POWERPC_CPU_GET_CLASS(cs
);
744 int index
= spapr_get_vcpu_id(cpu
);
745 uint32_t segs
[] = {cpu_to_be32(28), cpu_to_be32(40),
746 0xffffffff, 0xffffffff};
747 uint32_t tbfreq
= kvm_enabled() ? kvmppc_get_tbfreq()
748 : SPAPR_TIMEBASE_FREQ
;
749 uint32_t cpufreq
= kvm_enabled() ? kvmppc_get_clockfreq() : 1000000000;
750 uint32_t page_sizes_prop
[64];
751 size_t page_sizes_prop_size
;
752 unsigned int smp_threads
= ms
->smp
.threads
;
753 uint32_t vcpus_per_socket
= smp_threads
* ms
->smp
.cores
;
754 uint32_t pft_size_prop
[] = {0, cpu_to_be32(spapr
->htab_shift
)};
755 int compat_smt
= MIN(smp_threads
, ppc_compat_max_vthreads(cpu
));
758 uint32_t radix_AP_encodings
[PPC_PAGE_SIZES_MAX_SZ
];
761 drc
= spapr_drc_by_id(TYPE_SPAPR_DRC_CPU
, index
);
763 drc_index
= spapr_drc_index(drc
);
764 _FDT((fdt_setprop_cell(fdt
, offset
, "ibm,my-drc-index", drc_index
)));
767 _FDT((fdt_setprop_cell(fdt
, offset
, "reg", index
)));
768 _FDT((fdt_setprop_string(fdt
, offset
, "device_type", "cpu")));
770 _FDT((fdt_setprop_cell(fdt
, offset
, "cpu-version", env
->spr
[SPR_PVR
])));
771 _FDT((fdt_setprop_cell(fdt
, offset
, "d-cache-block-size",
772 env
->dcache_line_size
)));
773 _FDT((fdt_setprop_cell(fdt
, offset
, "d-cache-line-size",
774 env
->dcache_line_size
)));
775 _FDT((fdt_setprop_cell(fdt
, offset
, "i-cache-block-size",
776 env
->icache_line_size
)));
777 _FDT((fdt_setprop_cell(fdt
, offset
, "i-cache-line-size",
778 env
->icache_line_size
)));
780 if (pcc
->l1_dcache_size
) {
781 _FDT((fdt_setprop_cell(fdt
, offset
, "d-cache-size",
782 pcc
->l1_dcache_size
)));
784 warn_report("Unknown L1 dcache size for cpu");
786 if (pcc
->l1_icache_size
) {
787 _FDT((fdt_setprop_cell(fdt
, offset
, "i-cache-size",
788 pcc
->l1_icache_size
)));
790 warn_report("Unknown L1 icache size for cpu");
793 _FDT((fdt_setprop_cell(fdt
, offset
, "timebase-frequency", tbfreq
)));
794 _FDT((fdt_setprop_cell(fdt
, offset
, "clock-frequency", cpufreq
)));
795 _FDT((fdt_setprop_cell(fdt
, offset
, "slb-size", cpu
->hash64_opts
->slb_size
)));
796 _FDT((fdt_setprop_cell(fdt
, offset
, "ibm,slb-size", cpu
->hash64_opts
->slb_size
)));
797 _FDT((fdt_setprop_string(fdt
, offset
, "status", "okay")));
798 _FDT((fdt_setprop(fdt
, offset
, "64-bit", NULL
, 0)));
800 if (ppc_has_spr(cpu
, SPR_PURR
)) {
801 _FDT((fdt_setprop_cell(fdt
, offset
, "ibm,purr", 1)));
803 if (ppc_has_spr(cpu
, SPR_PURR
)) {
804 _FDT((fdt_setprop_cell(fdt
, offset
, "ibm,spurr", 1)));
807 if (ppc_hash64_has(cpu
, PPC_HASH64_1TSEG
)) {
808 _FDT((fdt_setprop(fdt
, offset
, "ibm,processor-segment-sizes",
809 segs
, sizeof(segs
))));
812 /* Advertise VSX (vector extensions) if available
813 * 1 == VMX / Altivec available
816 * Only CPUs for which we create core types in spapr_cpu_core.c
817 * are possible, and all of those have VMX */
818 if (env
->insns_flags
& PPC_ALTIVEC
) {
819 if (spapr_get_cap(spapr
, SPAPR_CAP_VSX
) != 0) {
820 _FDT((fdt_setprop_cell(fdt
, offset
, "ibm,vmx", 2)));
822 _FDT((fdt_setprop_cell(fdt
, offset
, "ibm,vmx", 1)));
826 /* Advertise DFP (Decimal Floating Point) if available
827 * 0 / no property == no DFP
828 * 1 == DFP available */
829 if (spapr_get_cap(spapr
, SPAPR_CAP_DFP
) != 0) {
830 _FDT((fdt_setprop_cell(fdt
, offset
, "ibm,dfp", 1)));
833 page_sizes_prop_size
= ppc_create_page_sizes_prop(cpu
, page_sizes_prop
,
834 sizeof(page_sizes_prop
));
835 if (page_sizes_prop_size
) {
836 _FDT((fdt_setprop(fdt
, offset
, "ibm,segment-page-sizes",
837 page_sizes_prop
, page_sizes_prop_size
)));
840 spapr_dt_pa_features(spapr
, cpu
, fdt
, offset
);
842 spapr_dt_pi_features(spapr
, cpu
, fdt
, offset
);
844 _FDT((fdt_setprop_cell(fdt
, offset
, "ibm,chip-id",
845 cs
->cpu_index
/ vcpus_per_socket
)));
847 _FDT((fdt_setprop(fdt
, offset
, "ibm,pft-size",
848 pft_size_prop
, sizeof(pft_size_prop
))));
850 if (ms
->numa_state
->num_nodes
> 1) {
851 _FDT(spapr_numa_fixup_cpu_dt(spapr
, fdt
, offset
, cpu
));
854 _FDT(spapr_fixup_cpu_smt_dt(fdt
, offset
, cpu
, compat_smt
));
856 if (pcc
->radix_page_info
) {
857 for (i
= 0; i
< pcc
->radix_page_info
->count
; i
++) {
858 radix_AP_encodings
[i
] =
859 cpu_to_be32(pcc
->radix_page_info
->entries
[i
]);
861 _FDT((fdt_setprop(fdt
, offset
, "ibm,processor-radix-AP-encodings",
863 pcc
->radix_page_info
->count
*
864 sizeof(radix_AP_encodings
[0]))));
868 * We set this property to let the guest know that it can use the large
869 * decrementer and its width in bits.
871 if (spapr_get_cap(spapr
, SPAPR_CAP_LARGE_DECREMENTER
) != SPAPR_CAP_OFF
)
872 _FDT((fdt_setprop_u32(fdt
, offset
, "ibm,dec-bits",
873 pcc
->lrg_decr_bits
)));
876 static void spapr_dt_one_cpu(void *fdt
, SpaprMachineState
*spapr
, CPUState
*cs
,
879 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
880 int index
= spapr_get_vcpu_id(cpu
);
881 DeviceClass
*dc
= DEVICE_GET_CLASS(cs
);
882 g_autofree
char *nodename
= NULL
;
885 if (!spapr_is_thread0_in_vcore(spapr
, cpu
)) {
889 nodename
= g_strdup_printf("%s@%x", dc
->fw_name
, index
);
890 offset
= fdt_add_subnode(fdt
, cpus_offset
, nodename
);
892 spapr_dt_cpu(cs
, fdt
, offset
, spapr
);
896 static void spapr_dt_cpus(void *fdt
, SpaprMachineState
*spapr
)
904 cpus_offset
= fdt_add_subnode(fdt
, 0, "cpus");
906 _FDT((fdt_setprop_cell(fdt
, cpus_offset
, "#address-cells", 0x1)));
907 _FDT((fdt_setprop_cell(fdt
, cpus_offset
, "#size-cells", 0x0)));
910 * We walk the CPUs in reverse order to ensure that CPU DT nodes
911 * created by fdt_add_subnode() end up in the right order in FDT
912 * for the guest kernel the enumerate the CPUs correctly.
914 * The CPU list cannot be traversed in reverse order, so we need
920 rev
= g_renew(CPUState
*, rev
, n_cpus
+ 1);
924 for (i
= n_cpus
- 1; i
>= 0; i
--) {
925 spapr_dt_one_cpu(fdt
, spapr
, rev
[i
], cpus_offset
);
931 static int spapr_dt_rng(void *fdt
)
936 node
= qemu_fdt_add_subnode(fdt
, "/ibm,platform-facilities");
940 ret
= fdt_setprop_string(fdt
, node
, "device_type",
941 "ibm,platform-facilities");
942 ret
|= fdt_setprop_cell(fdt
, node
, "#address-cells", 0x1);
943 ret
|= fdt_setprop_cell(fdt
, node
, "#size-cells", 0x0);
945 node
= fdt_add_subnode(fdt
, node
, "ibm,random-v1");
949 ret
|= fdt_setprop_string(fdt
, node
, "compatible", "ibm,random");
954 static void spapr_dt_rtas(SpaprMachineState
*spapr
, void *fdt
)
956 MachineState
*ms
= MACHINE(spapr
);
958 GString
*hypertas
= g_string_sized_new(256);
959 GString
*qemu_hypertas
= g_string_sized_new(256);
960 uint32_t lrdr_capacity
[] = {
963 cpu_to_be32(SPAPR_MEMORY_BLOCK_SIZE
>> 32),
964 cpu_to_be32(SPAPR_MEMORY_BLOCK_SIZE
& 0xffffffff),
965 cpu_to_be32(ms
->smp
.max_cpus
/ ms
->smp
.threads
),
968 /* Do we have device memory? */
969 if (MACHINE(spapr
)->device_memory
) {
970 uint64_t max_device_addr
= MACHINE(spapr
)->device_memory
->base
+
971 memory_region_size(&MACHINE(spapr
)->device_memory
->mr
);
973 lrdr_capacity
[0] = cpu_to_be32(max_device_addr
>> 32);
974 lrdr_capacity
[1] = cpu_to_be32(max_device_addr
& 0xffffffff);
977 _FDT(rtas
= fdt_add_subnode(fdt
, 0, "rtas"));
980 add_str(hypertas
, "hcall-pft");
981 add_str(hypertas
, "hcall-term");
982 add_str(hypertas
, "hcall-dabr");
983 add_str(hypertas
, "hcall-interrupt");
984 add_str(hypertas
, "hcall-tce");
985 add_str(hypertas
, "hcall-vio");
986 add_str(hypertas
, "hcall-splpar");
987 add_str(hypertas
, "hcall-join");
988 add_str(hypertas
, "hcall-bulk");
989 add_str(hypertas
, "hcall-set-mode");
990 add_str(hypertas
, "hcall-sprg0");
991 add_str(hypertas
, "hcall-copy");
992 add_str(hypertas
, "hcall-debug");
993 add_str(hypertas
, "hcall-vphn");
994 if (spapr_get_cap(spapr
, SPAPR_CAP_RPT_INVALIDATE
) == SPAPR_CAP_ON
) {
995 add_str(hypertas
, "hcall-rpt-invalidate");
998 add_str(qemu_hypertas
, "hcall-memop1");
1000 if (!kvm_enabled() || kvmppc_spapr_use_multitce()) {
1001 add_str(hypertas
, "hcall-multi-tce");
1004 if (spapr
->resize_hpt
!= SPAPR_RESIZE_HPT_DISABLED
) {
1005 add_str(hypertas
, "hcall-hpt-resize");
1008 add_str(hypertas
, "hcall-watchdog");
1010 _FDT(fdt_setprop(fdt
, rtas
, "ibm,hypertas-functions",
1011 hypertas
->str
, hypertas
->len
));
1012 g_string_free(hypertas
, TRUE
);
1013 _FDT(fdt_setprop(fdt
, rtas
, "qemu,hypertas-functions",
1014 qemu_hypertas
->str
, qemu_hypertas
->len
));
1015 g_string_free(qemu_hypertas
, TRUE
);
1017 spapr_numa_write_rtas_dt(spapr
, fdt
, rtas
);
1020 * FWNMI reserves RTAS_ERROR_LOG_MAX for the machine check error log,
1021 * and 16 bytes per CPU for system reset error log plus an extra 8 bytes.
1023 * The system reset requirements are driven by existing Linux and PowerVM
1024 * implementation which (contrary to PAPR) saves r3 in the error log
1025 * structure like machine check, so Linux expects to find the saved r3
1026 * value at the address in r3 upon FWNMI-enabled sreset interrupt (and
1027 * does not look at the error value).
1029 * System reset interrupts are not subject to interlock like machine
1030 * check, so this memory area could be corrupted if the sreset is
1031 * interrupted by a machine check (or vice versa) if it was shared. To
1032 * prevent this, system reset uses per-CPU areas for the sreset save
1033 * area. A system reset that interrupts a system reset handler could
1034 * still overwrite this area, but Linux doesn't try to recover in that
1037 * The extra 8 bytes is required because Linux's FWNMI error log check
1040 * RTAS_MIN_SIZE is required for the RTAS blob itself.
1042 _FDT(fdt_setprop_cell(fdt
, rtas
, "rtas-size", RTAS_MIN_SIZE
+
1043 RTAS_ERROR_LOG_MAX
+
1044 ms
->smp
.max_cpus
* sizeof(uint64_t) * 2 +
1046 _FDT(fdt_setprop_cell(fdt
, rtas
, "rtas-error-log-max",
1047 RTAS_ERROR_LOG_MAX
));
1048 _FDT(fdt_setprop_cell(fdt
, rtas
, "rtas-event-scan-rate",
1049 RTAS_EVENT_SCAN_RATE
));
1051 g_assert(msi_nonbroken
);
1052 _FDT(fdt_setprop(fdt
, rtas
, "ibm,change-msix-capable", NULL
, 0));
1055 * According to PAPR, rtas ibm,os-term does not guarantee a return
1056 * back to the guest cpu.
1058 * While an additional ibm,extended-os-term property indicates
1059 * that rtas call return will always occur. Set this property.
1061 _FDT(fdt_setprop(fdt
, rtas
, "ibm,extended-os-term", NULL
, 0));
1063 _FDT(fdt_setprop(fdt
, rtas
, "ibm,lrdr-capacity",
1064 lrdr_capacity
, sizeof(lrdr_capacity
)));
1066 spapr_dt_rtas_tokens(fdt
, rtas
);
1070 * Prepare ibm,arch-vec-5-platform-support, which indicates the MMU
1071 * and the XIVE features that the guest may request and thus the valid
1072 * values for bytes 23..26 of option vector 5:
1074 static void spapr_dt_ov5_platform_support(SpaprMachineState
*spapr
, void *fdt
,
1077 PowerPCCPU
*first_ppc_cpu
= POWERPC_CPU(first_cpu
);
1080 23, 0x00, /* XICS / XIVE mode */
1081 24, 0x00, /* Hash/Radix, filled in below. */
1082 25, 0x00, /* Hash options: Segment Tables == no, GTSE == no. */
1083 26, 0x40, /* Radix options: GTSE == yes. */
1086 if (spapr
->irq
->xics
&& spapr
->irq
->xive
) {
1087 val
[1] = SPAPR_OV5_XIVE_BOTH
;
1088 } else if (spapr
->irq
->xive
) {
1089 val
[1] = SPAPR_OV5_XIVE_EXPLOIT
;
1091 assert(spapr
->irq
->xics
);
1092 val
[1] = SPAPR_OV5_XIVE_LEGACY
;
1095 if (!ppc_check_compat(first_ppc_cpu
, CPU_POWERPC_LOGICAL_3_00
, 0,
1096 first_ppc_cpu
->compat_pvr
)) {
1098 * If we're in a pre POWER9 compat mode then the guest should
1099 * do hash and use the legacy interrupt mode
1101 val
[1] = SPAPR_OV5_XIVE_LEGACY
; /* XICS */
1102 val
[3] = 0x00; /* Hash */
1103 spapr_check_mmu_mode(false);
1104 } else if (kvm_enabled()) {
1105 if (kvmppc_has_cap_mmu_radix() && kvmppc_has_cap_mmu_hash_v3()) {
1106 val
[3] = 0x80; /* OV5_MMU_BOTH */
1107 } else if (kvmppc_has_cap_mmu_radix()) {
1108 val
[3] = 0x40; /* OV5_MMU_RADIX_300 */
1110 val
[3] = 0x00; /* Hash */
1113 /* V3 MMU supports both hash and radix in tcg (with dynamic switching) */
1116 _FDT(fdt_setprop(fdt
, chosen
, "ibm,arch-vec-5-platform-support",
1120 static void spapr_dt_chosen(SpaprMachineState
*spapr
, void *fdt
, bool reset
)
1122 MachineState
*machine
= MACHINE(spapr
);
1123 SpaprMachineClass
*smc
= SPAPR_MACHINE_GET_CLASS(machine
);
1126 _FDT(chosen
= fdt_add_subnode(fdt
, 0, "chosen"));
1129 const char *boot_device
= spapr
->boot_device
;
1130 g_autofree
char *stdout_path
= spapr_vio_stdout_path(spapr
->vio_bus
);
1132 g_autofree
char *bootlist
= get_boot_devices_list(&cb
);
1134 if (machine
->kernel_cmdline
&& machine
->kernel_cmdline
[0]) {
1135 _FDT(fdt_setprop_string(fdt
, chosen
, "bootargs",
1136 machine
->kernel_cmdline
));
1139 if (spapr
->initrd_size
) {
1140 _FDT(fdt_setprop_cell(fdt
, chosen
, "linux,initrd-start",
1141 spapr
->initrd_base
));
1142 _FDT(fdt_setprop_cell(fdt
, chosen
, "linux,initrd-end",
1143 spapr
->initrd_base
+ spapr
->initrd_size
));
1146 if (spapr
->kernel_size
) {
1147 uint64_t kprop
[2] = { cpu_to_be64(spapr
->kernel_addr
),
1148 cpu_to_be64(spapr
->kernel_size
) };
1150 _FDT(fdt_setprop(fdt
, chosen
, "qemu,boot-kernel",
1151 &kprop
, sizeof(kprop
)));
1152 if (spapr
->kernel_le
) {
1153 _FDT(fdt_setprop(fdt
, chosen
, "qemu,boot-kernel-le", NULL
, 0));
1156 if (machine
->boot_config
.has_menu
&& machine
->boot_config
.menu
) {
1157 _FDT((fdt_setprop_cell(fdt
, chosen
, "qemu,boot-menu", true)));
1159 _FDT(fdt_setprop_cell(fdt
, chosen
, "qemu,graphic-width", graphic_width
));
1160 _FDT(fdt_setprop_cell(fdt
, chosen
, "qemu,graphic-height", graphic_height
));
1161 _FDT(fdt_setprop_cell(fdt
, chosen
, "qemu,graphic-depth", graphic_depth
));
1163 if (cb
&& bootlist
) {
1166 for (i
= 0; i
< cb
; i
++) {
1167 if (bootlist
[i
] == '\n') {
1171 _FDT(fdt_setprop_string(fdt
, chosen
, "qemu,boot-list", bootlist
));
1174 if (boot_device
&& strlen(boot_device
)) {
1175 _FDT(fdt_setprop_string(fdt
, chosen
, "qemu,boot-device", boot_device
));
1178 if (spapr
->want_stdout_path
&& stdout_path
) {
1180 * "linux,stdout-path" and "stdout" properties are
1181 * deprecated by linux kernel. New platforms should only
1182 * use the "stdout-path" property. Set the new property
1183 * and continue using older property to remain compatible
1184 * with the existing firmware.
1186 _FDT(fdt_setprop_string(fdt
, chosen
, "linux,stdout-path", stdout_path
));
1187 _FDT(fdt_setprop_string(fdt
, chosen
, "stdout-path", stdout_path
));
1191 * We can deal with BAR reallocation just fine, advertise it
1194 if (smc
->linux_pci_probe
) {
1195 _FDT(fdt_setprop_cell(fdt
, chosen
, "linux,pci-probe-only", 0));
1198 spapr_dt_ov5_platform_support(spapr
, fdt
, chosen
);
1201 _FDT(fdt_setprop(fdt
, chosen
, "rng-seed", spapr
->fdt_rng_seed
, 32));
1203 _FDT(spapr_dt_ovec(fdt
, chosen
, spapr
->ov5_cas
, "ibm,architecture-vec-5"));
1206 static void spapr_dt_hypervisor(SpaprMachineState
*spapr
, void *fdt
)
1208 /* The /hypervisor node isn't in PAPR - this is a hack to allow PR
1209 * KVM to work under pHyp with some guest co-operation */
1211 uint8_t hypercall
[16];
1213 _FDT(hypervisor
= fdt_add_subnode(fdt
, 0, "hypervisor"));
1214 /* indicate KVM hypercall interface */
1215 _FDT(fdt_setprop_string(fdt
, hypervisor
, "compatible", "linux,kvm"));
1216 if (kvmppc_has_cap_fixup_hcalls()) {
1218 * Older KVM versions with older guest kernels were broken
1219 * with the magic page, don't allow the guest to map it.
1221 if (!kvmppc_get_hypercall(cpu_env(first_cpu
), hypercall
,
1222 sizeof(hypercall
))) {
1223 _FDT(fdt_setprop(fdt
, hypervisor
, "hcall-instructions",
1224 hypercall
, sizeof(hypercall
)));
1229 void *spapr_build_fdt(SpaprMachineState
*spapr
, bool reset
, size_t space
)
1231 MachineState
*machine
= MACHINE(spapr
);
1232 MachineClass
*mc
= MACHINE_GET_CLASS(machine
);
1233 SpaprMachineClass
*smc
= SPAPR_MACHINE_GET_CLASS(machine
);
1234 uint32_t root_drc_type_mask
= 0;
1240 fdt
= g_malloc0(space
);
1241 _FDT((fdt_create_empty_tree(fdt
, space
)));
1244 _FDT(fdt_setprop_string(fdt
, 0, "device_type", "chrp"));
1245 _FDT(fdt_setprop_string(fdt
, 0, "model", "IBM pSeries (emulated by qemu)"));
1246 _FDT(fdt_setprop_string(fdt
, 0, "compatible", "qemu,pseries"));
1248 /* Guest UUID & Name*/
1249 buf
= qemu_uuid_unparse_strdup(&qemu_uuid
);
1250 _FDT(fdt_setprop_string(fdt
, 0, "vm,uuid", buf
));
1251 if (qemu_uuid_set
) {
1252 _FDT(fdt_setprop_string(fdt
, 0, "system-id", buf
));
1256 if (qemu_get_vm_name()) {
1257 _FDT(fdt_setprop_string(fdt
, 0, "ibm,partition-name",
1258 qemu_get_vm_name()));
1261 /* Host Model & Serial Number */
1262 if (spapr
->host_model
) {
1263 _FDT(fdt_setprop_string(fdt
, 0, "host-model", spapr
->host_model
));
1264 } else if (smc
->broken_host_serial_model
&& kvmppc_get_host_model(&buf
)) {
1265 _FDT(fdt_setprop_string(fdt
, 0, "host-model", buf
));
1269 if (spapr
->host_serial
) {
1270 _FDT(fdt_setprop_string(fdt
, 0, "host-serial", spapr
->host_serial
));
1271 } else if (smc
->broken_host_serial_model
&& kvmppc_get_host_serial(&buf
)) {
1272 _FDT(fdt_setprop_string(fdt
, 0, "host-serial", buf
));
1276 _FDT(fdt_setprop_cell(fdt
, 0, "#address-cells", 2));
1277 _FDT(fdt_setprop_cell(fdt
, 0, "#size-cells", 2));
1279 /* /interrupt controller */
1280 spapr_irq_dt(spapr
, spapr_max_server_number(spapr
), fdt
, PHANDLE_INTC
);
1282 ret
= spapr_dt_memory(spapr
, fdt
);
1284 error_report("couldn't setup memory nodes in fdt");
1289 spapr_dt_vdevice(spapr
->vio_bus
, fdt
);
1291 if (object_resolve_path_type("", TYPE_SPAPR_RNG
, NULL
)) {
1292 ret
= spapr_dt_rng(fdt
);
1294 error_report("could not set up rng device in the fdt");
1299 QLIST_FOREACH(phb
, &spapr
->phbs
, list
) {
1300 ret
= spapr_dt_phb(spapr
, phb
, PHANDLE_INTC
, fdt
, NULL
);
1302 error_report("couldn't setup PCI devices in fdt");
1307 spapr_dt_cpus(fdt
, spapr
);
1309 /* ibm,drc-indexes and friends */
1310 if (smc
->dr_lmb_enabled
) {
1311 root_drc_type_mask
|= SPAPR_DR_CONNECTOR_TYPE_LMB
;
1313 if (smc
->dr_phb_enabled
) {
1314 root_drc_type_mask
|= SPAPR_DR_CONNECTOR_TYPE_PHB
;
1316 if (mc
->nvdimm_supported
) {
1317 root_drc_type_mask
|= SPAPR_DR_CONNECTOR_TYPE_PMEM
;
1319 if (root_drc_type_mask
) {
1320 _FDT(spapr_dt_drc(fdt
, 0, NULL
, root_drc_type_mask
));
1323 if (mc
->has_hotpluggable_cpus
) {
1324 int offset
= fdt_path_offset(fdt
, "/cpus");
1325 ret
= spapr_dt_drc(fdt
, offset
, NULL
, SPAPR_DR_CONNECTOR_TYPE_CPU
);
1327 error_report("Couldn't set up CPU DR device tree properties");
1332 /* /event-sources */
1333 spapr_dt_events(spapr
, fdt
);
1336 spapr_dt_rtas(spapr
, fdt
);
1339 spapr_dt_chosen(spapr
, fdt
, reset
);
1342 if (kvm_enabled()) {
1343 spapr_dt_hypervisor(spapr
, fdt
);
1346 /* Build memory reserve map */
1348 if (spapr
->kernel_size
) {
1349 _FDT((fdt_add_mem_rsv(fdt
, spapr
->kernel_addr
,
1350 spapr
->kernel_size
)));
1352 if (spapr
->initrd_size
) {
1353 _FDT((fdt_add_mem_rsv(fdt
, spapr
->initrd_base
,
1354 spapr
->initrd_size
)));
1358 /* NVDIMM devices */
1359 if (mc
->nvdimm_supported
) {
1360 spapr_dt_persistent_memory(spapr
, fdt
);
1366 static uint64_t translate_kernel_address(void *opaque
, uint64_t addr
)
1368 SpaprMachineState
*spapr
= opaque
;
1370 return (addr
& 0x0fffffff) + spapr
->kernel_addr
;
1373 static void emulate_spapr_hypercall(PPCVirtualHypervisor
*vhyp
,
1376 CPUPPCState
*env
= &cpu
->env
;
1378 /* The TCG path should also be holding the BQL at this point */
1379 g_assert(bql_locked());
1381 g_assert(!vhyp_cpu_in_nested(cpu
));
1383 if (FIELD_EX64(env
->msr
, MSR
, PR
)) {
1384 hcall_dprintf("Hypercall made with MSR[PR]=1\n");
1385 env
->gpr
[3] = H_PRIVILEGE
;
1387 env
->gpr
[3] = spapr_hypercall(cpu
, env
->gpr
[3], &env
->gpr
[4]);
1391 struct LPCRSyncState
{
1396 static void do_lpcr_sync(CPUState
*cs
, run_on_cpu_data arg
)
1398 struct LPCRSyncState
*s
= arg
.host_ptr
;
1399 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
1400 CPUPPCState
*env
= &cpu
->env
;
1403 cpu_synchronize_state(cs
);
1404 lpcr
= env
->spr
[SPR_LPCR
];
1407 ppc_store_lpcr(cpu
, lpcr
);
1410 void spapr_set_all_lpcrs(target_ulong value
, target_ulong mask
)
1413 struct LPCRSyncState s
= {
1418 run_on_cpu(cs
, do_lpcr_sync
, RUN_ON_CPU_HOST_PTR(&s
));
1422 /* May be used when the machine is not running */
1423 void spapr_init_all_lpcrs(target_ulong value
, target_ulong mask
)
1427 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
1428 CPUPPCState
*env
= &cpu
->env
;
1431 lpcr
= env
->spr
[SPR_LPCR
];
1432 lpcr
&= ~(LPCR_HR
| LPCR_UPRT
);
1433 ppc_store_lpcr(cpu
, lpcr
);
1437 static bool spapr_get_pate(PPCVirtualHypervisor
*vhyp
, PowerPCCPU
*cpu
,
1438 target_ulong lpid
, ppc_v3_pate_t
*entry
)
1440 SpaprMachineState
*spapr
= SPAPR_MACHINE(vhyp
);
1441 SpaprCpuState
*spapr_cpu
= spapr_cpu_state(cpu
);
1443 if (!spapr_cpu
->in_nested
) {
1446 /* Copy PATE1:GR into PATE0:HR */
1447 entry
->dw0
= spapr
->patb_entry
& PATE0_HR
;
1448 entry
->dw1
= spapr
->patb_entry
;
1451 if (spapr_nested_api(spapr
) == NESTED_API_KVM_HV
) {
1452 return spapr_get_pate_nested_hv(spapr
, cpu
, lpid
, entry
);
1453 } else if (spapr_nested_api(spapr
) == NESTED_API_PAPR
) {
1454 return spapr_get_pate_nested_papr(spapr
, cpu
, lpid
, entry
);
1456 g_assert_not_reached();
1461 #define HPTE(_table, _i) (void *)(((uint64_t *)(_table)) + ((_i) * 2))
1462 #define HPTE_VALID(_hpte) (tswap64(*((uint64_t *)(_hpte))) & HPTE64_V_VALID)
1463 #define HPTE_DIRTY(_hpte) (tswap64(*((uint64_t *)(_hpte))) & HPTE64_V_HPTE_DIRTY)
1464 #define CLEAN_HPTE(_hpte) ((*(uint64_t *)(_hpte)) &= tswap64(~HPTE64_V_HPTE_DIRTY))
1465 #define DIRTY_HPTE(_hpte) ((*(uint64_t *)(_hpte)) |= tswap64(HPTE64_V_HPTE_DIRTY))
1468 * Get the fd to access the kernel htab, re-opening it if necessary
1470 static int get_htab_fd(SpaprMachineState
*spapr
)
1472 Error
*local_err
= NULL
;
1474 if (spapr
->htab_fd
>= 0) {
1475 return spapr
->htab_fd
;
1478 spapr
->htab_fd
= kvmppc_get_htab_fd(false, 0, &local_err
);
1479 if (spapr
->htab_fd
< 0) {
1480 error_report_err(local_err
);
1483 return spapr
->htab_fd
;
1486 void close_htab_fd(SpaprMachineState
*spapr
)
1488 if (spapr
->htab_fd
>= 0) {
1489 close(spapr
->htab_fd
);
1491 spapr
->htab_fd
= -1;
1494 static hwaddr
spapr_hpt_mask(PPCVirtualHypervisor
*vhyp
)
1496 SpaprMachineState
*spapr
= SPAPR_MACHINE(vhyp
);
1498 return HTAB_SIZE(spapr
) / HASH_PTEG_SIZE_64
- 1;
1501 static target_ulong
spapr_encode_hpt_for_kvm_pr(PPCVirtualHypervisor
*vhyp
)
1503 SpaprMachineState
*spapr
= SPAPR_MACHINE(vhyp
);
1505 assert(kvm_enabled());
1511 return (target_ulong
)(uintptr_t)spapr
->htab
| (spapr
->htab_shift
- 18);
1514 static const ppc_hash_pte64_t
*spapr_map_hptes(PPCVirtualHypervisor
*vhyp
,
1517 SpaprMachineState
*spapr
= SPAPR_MACHINE(vhyp
);
1518 hwaddr pte_offset
= ptex
* HASH_PTE_SIZE_64
;
1522 * HTAB is controlled by KVM. Fetch into temporary buffer
1524 ppc_hash_pte64_t
*hptes
= g_malloc(n
* HASH_PTE_SIZE_64
);
1525 kvmppc_read_hptes(hptes
, ptex
, n
);
1530 * HTAB is controlled by QEMU. Just point to the internally
1533 return (const ppc_hash_pte64_t
*)(spapr
->htab
+ pte_offset
);
1536 static void spapr_unmap_hptes(PPCVirtualHypervisor
*vhyp
,
1537 const ppc_hash_pte64_t
*hptes
,
1540 SpaprMachineState
*spapr
= SPAPR_MACHINE(vhyp
);
1543 g_free((void *)hptes
);
1546 /* Nothing to do for qemu managed HPT */
1549 void spapr_store_hpte(PowerPCCPU
*cpu
, hwaddr ptex
,
1550 uint64_t pte0
, uint64_t pte1
)
1552 SpaprMachineState
*spapr
= SPAPR_MACHINE(cpu
->vhyp
);
1553 hwaddr offset
= ptex
* HASH_PTE_SIZE_64
;
1556 kvmppc_write_hpte(ptex
, pte0
, pte1
);
1558 if (pte0
& HPTE64_V_VALID
) {
1559 stq_p(spapr
->htab
+ offset
+ HPTE64_DW1
, pte1
);
1561 * When setting valid, we write PTE1 first. This ensures
1562 * proper synchronization with the reading code in
1563 * ppc_hash64_pteg_search()
1566 stq_p(spapr
->htab
+ offset
, pte0
);
1568 stq_p(spapr
->htab
+ offset
, pte0
);
1570 * When clearing it we set PTE0 first. This ensures proper
1571 * synchronization with the reading code in
1572 * ppc_hash64_pteg_search()
1575 stq_p(spapr
->htab
+ offset
+ HPTE64_DW1
, pte1
);
1580 static void spapr_hpte_set_c(PPCVirtualHypervisor
*vhyp
, hwaddr ptex
,
1583 hwaddr offset
= ptex
* HASH_PTE_SIZE_64
+ HPTE64_DW1_C
;
1584 SpaprMachineState
*spapr
= SPAPR_MACHINE(vhyp
);
1587 /* There should always be a hash table when this is called */
1588 error_report("spapr_hpte_set_c called with no hash table !");
1592 /* The HW performs a non-atomic byte update */
1593 stb_p(spapr
->htab
+ offset
, (pte1
& 0xff) | 0x80);
1596 static void spapr_hpte_set_r(PPCVirtualHypervisor
*vhyp
, hwaddr ptex
,
1599 hwaddr offset
= ptex
* HASH_PTE_SIZE_64
+ HPTE64_DW1_R
;
1600 SpaprMachineState
*spapr
= SPAPR_MACHINE(vhyp
);
1603 /* There should always be a hash table when this is called */
1604 error_report("spapr_hpte_set_r called with no hash table !");
1608 /* The HW performs a non-atomic byte update */
1609 stb_p(spapr
->htab
+ offset
, ((pte1
>> 8) & 0xff) | 0x01);
1612 int spapr_hpt_shift_for_ramsize(uint64_t ramsize
)
1616 /* We aim for a hash table of size 1/128 the size of RAM (rounded
1617 * up). The PAPR recommendation is actually 1/64 of RAM size, but
1618 * that's much more than is needed for Linux guests */
1619 shift
= ctz64(pow2ceil(ramsize
)) - 7;
1620 shift
= MAX(shift
, 18); /* Minimum architected size */
1621 shift
= MIN(shift
, 46); /* Maximum architected size */
1625 void spapr_free_hpt(SpaprMachineState
*spapr
)
1627 qemu_vfree(spapr
->htab
);
1629 spapr
->htab_shift
= 0;
1630 close_htab_fd(spapr
);
1633 int spapr_reallocate_hpt(SpaprMachineState
*spapr
, int shift
, Error
**errp
)
1638 /* Clean up any HPT info from a previous boot */
1639 spapr_free_hpt(spapr
);
1641 rc
= kvmppc_reset_htab(shift
);
1643 if (rc
== -EOPNOTSUPP
) {
1644 error_setg(errp
, "HPT not supported in nested guests");
1649 /* kernel-side HPT needed, but couldn't allocate one */
1650 error_setg_errno(errp
, errno
, "Failed to allocate KVM HPT of order %d",
1652 error_append_hint(errp
, "Try smaller maxmem?\n");
1654 } else if (rc
> 0) {
1655 /* kernel-side HPT allocated */
1658 "Requested order %d HPT, but kernel allocated order %ld",
1660 error_append_hint(errp
, "Try smaller maxmem?\n");
1664 spapr
->htab_shift
= shift
;
1667 /* kernel-side HPT not needed, allocate in userspace instead */
1668 size_t size
= 1ULL << shift
;
1671 spapr
->htab
= qemu_memalign(size
, size
);
1672 memset(spapr
->htab
, 0, size
);
1673 spapr
->htab_shift
= shift
;
1675 for (i
= 0; i
< size
/ HASH_PTE_SIZE_64
; i
++) {
1676 DIRTY_HPTE(HPTE(spapr
->htab
, i
));
1679 /* We're setting up a hash table, so that means we're not radix */
1680 spapr
->patb_entry
= 0;
1681 spapr_init_all_lpcrs(0, LPCR_HR
| LPCR_UPRT
);
1685 void spapr_setup_hpt(SpaprMachineState
*spapr
)
1689 if (spapr
->resize_hpt
== SPAPR_RESIZE_HPT_DISABLED
) {
1690 hpt_shift
= spapr_hpt_shift_for_ramsize(MACHINE(spapr
)->maxram_size
);
1692 uint64_t current_ram_size
;
1694 current_ram_size
= MACHINE(spapr
)->ram_size
+ get_plugged_memory_size();
1695 hpt_shift
= spapr_hpt_shift_for_ramsize(current_ram_size
);
1697 spapr_reallocate_hpt(spapr
, hpt_shift
, &error_fatal
);
1699 if (kvm_enabled()) {
1700 hwaddr vrma_limit
= kvmppc_vrma_limit(spapr
->htab_shift
);
1702 /* Check our RMA fits in the possible VRMA */
1703 if (vrma_limit
< spapr
->rma_size
) {
1704 error_report("Unable to create %" HWADDR_PRIu
1705 "MiB RMA (VRMA only allows %" HWADDR_PRIu
"MiB",
1706 spapr
->rma_size
/ MiB
, vrma_limit
/ MiB
);
1712 void spapr_check_mmu_mode(bool guest_radix
)
1715 if (kvm_enabled() && !kvmppc_has_cap_mmu_radix()) {
1716 error_report("Guest requested unavailable MMU mode (radix).");
1720 if (kvm_enabled() && kvmppc_has_cap_mmu_radix()
1721 && !kvmppc_has_cap_mmu_hash_v3()) {
1722 error_report("Guest requested unavailable MMU mode (hash).");
1728 static void spapr_machine_reset(MachineState
*machine
, ShutdownCause reason
)
1730 SpaprMachineState
*spapr
= SPAPR_MACHINE(machine
);
1731 PowerPCCPU
*first_ppc_cpu
;
1736 if (reason
!= SHUTDOWN_CAUSE_SNAPSHOT_LOAD
) {
1738 * Record-replay snapshot load must not consume random, this was
1739 * already replayed from initial machine reset.
1741 qemu_guest_getrandom_nofail(spapr
->fdt_rng_seed
, 32);
1745 confidential_guest_kvm_reset(machine
->cgs
, &error_fatal
);
1747 spapr_caps_apply(spapr
);
1748 spapr_nested_reset(spapr
);
1750 first_ppc_cpu
= POWERPC_CPU(first_cpu
);
1751 if (kvm_enabled() && kvmppc_has_cap_mmu_radix() &&
1752 ppc_type_check_compat(machine
->cpu_type
, CPU_POWERPC_LOGICAL_3_00
, 0,
1753 spapr
->max_compat_pvr
)) {
1755 * If using KVM with radix mode available, VCPUs can be started
1756 * without a HPT because KVM will start them in radix mode.
1757 * Set the GR bit in PATE so that we know there is no HPT.
1759 spapr
->patb_entry
= PATE1_GR
;
1760 spapr_set_all_lpcrs(LPCR_HR
| LPCR_UPRT
, LPCR_HR
| LPCR_UPRT
);
1762 spapr_setup_hpt(spapr
);
1765 qemu_devices_reset(reason
);
1767 spapr_ovec_cleanup(spapr
->ov5_cas
);
1768 spapr
->ov5_cas
= spapr_ovec_new();
1770 ppc_init_compat_all(spapr
->max_compat_pvr
, &error_fatal
);
1773 * This is fixing some of the default configuration of the XIVE
1774 * devices. To be called after the reset of the machine devices.
1776 spapr_irq_reset(spapr
, &error_fatal
);
1779 * There is no CAS under qtest. Simulate one to please the code that
1780 * depends on spapr->ov5_cas. This is especially needed to test device
1781 * unplug, so we do that before resetting the DRCs.
1783 if (qtest_enabled()) {
1784 spapr_ovec_cleanup(spapr
->ov5_cas
);
1785 spapr
->ov5_cas
= spapr_ovec_clone(spapr
->ov5
);
1788 spapr_nvdimm_finish_flushes();
1790 /* DRC reset may cause a device to be unplugged. This will cause troubles
1791 * if this device is used by another device (eg, a running vhost backend
1792 * will crash QEMU if the DIMM holding the vring goes away). To avoid such
1793 * situations, we reset DRCs after all devices have been reset.
1795 spapr_drc_reset_all(spapr
);
1797 spapr_clear_pending_events(spapr
);
1800 * We place the device tree just below either the top of the RMA,
1801 * or just below 2GB, whichever is lower, so that it can be
1802 * processed with 32-bit real mode code if necessary
1804 fdt_addr
= MIN(spapr
->rma_size
, FDT_MAX_ADDR
) - FDT_MAX_SIZE
;
1806 fdt
= spapr_build_fdt(spapr
, true, FDT_MAX_SIZE
);
1808 spapr_vof_reset(spapr
, fdt
, &error_fatal
);
1810 * Do not pack the FDT as the client may change properties.
1811 * VOF client does not expect the FDT so we do not load it to the VM.
1815 /* Should only fail if we've built a corrupted tree */
1818 spapr_cpu_set_entry_state(first_ppc_cpu
, SPAPR_ENTRY_POINT
,
1820 cpu_physical_memory_write(fdt_addr
, fdt
, fdt_totalsize(fdt
));
1822 qemu_fdt_dumpdtb(fdt
, fdt_totalsize(fdt
));
1824 g_free(spapr
->fdt_blob
);
1825 spapr
->fdt_size
= fdt_totalsize(fdt
);
1826 spapr
->fdt_initial_size
= spapr
->fdt_size
;
1827 spapr
->fdt_blob
= fdt
;
1829 /* Set machine->fdt for 'dumpdtb' QMP/HMP command */
1832 /* Set up the entry state */
1833 first_ppc_cpu
->env
.gpr
[5] = 0;
1835 spapr
->fwnmi_system_reset_addr
= -1;
1836 spapr
->fwnmi_machine_check_addr
= -1;
1837 spapr
->fwnmi_machine_check_interlock
= -1;
1839 /* Signal all vCPUs waiting on this condition */
1840 qemu_cond_broadcast(&spapr
->fwnmi_machine_check_interlock_cond
);
1842 migrate_del_blocker(&spapr
->fwnmi_migration_blocker
);
1845 static void spapr_create_nvram(SpaprMachineState
*spapr
)
1847 DeviceState
*dev
= qdev_new("spapr-nvram");
1848 DriveInfo
*dinfo
= drive_get(IF_PFLASH
, 0, 0);
1851 qdev_prop_set_drive_err(dev
, "drive", blk_by_legacy_dinfo(dinfo
),
1855 qdev_realize_and_unref(dev
, &spapr
->vio_bus
->bus
, &error_fatal
);
1857 spapr
->nvram
= (struct SpaprNvram
*)dev
;
1860 static void spapr_rtc_create(SpaprMachineState
*spapr
)
1862 object_initialize_child_with_props(OBJECT(spapr
), "rtc", &spapr
->rtc
,
1863 sizeof(spapr
->rtc
), TYPE_SPAPR_RTC
,
1864 &error_fatal
, NULL
);
1865 qdev_realize(DEVICE(&spapr
->rtc
), NULL
, &error_fatal
);
1866 object_property_add_alias(OBJECT(spapr
), "rtc-time", OBJECT(&spapr
->rtc
),
1870 /* Returns whether we want to use VGA or not */
1871 static bool spapr_vga_init(PCIBus
*pci_bus
, Error
**errp
)
1873 vga_interface_created
= true;
1874 switch (vga_interface_type
) {
1882 return pci_vga_init(pci_bus
) != NULL
;
1885 "Unsupported VGA mode, only -vga std or -vga virtio is supported");
1890 static int spapr_pre_load(void *opaque
)
1894 rc
= spapr_caps_pre_load(opaque
);
1902 static int spapr_post_load(void *opaque
, int version_id
)
1904 SpaprMachineState
*spapr
= (SpaprMachineState
*)opaque
;
1907 err
= spapr_caps_post_migration(spapr
);
1913 * In earlier versions, there was no separate qdev for the PAPR
1914 * RTC, so the RTC offset was stored directly in sPAPREnvironment.
1915 * So when migrating from those versions, poke the incoming offset
1916 * value into the RTC device
1918 if (version_id
< 3) {
1919 err
= spapr_rtc_import_offset(&spapr
->rtc
, spapr
->rtc_offset
);
1925 if (kvm_enabled() && spapr
->patb_entry
) {
1926 PowerPCCPU
*cpu
= POWERPC_CPU(first_cpu
);
1927 bool radix
= !!(spapr
->patb_entry
& PATE1_GR
);
1928 bool gtse
= !!(cpu
->env
.spr
[SPR_LPCR
] & LPCR_GTSE
);
1931 * Update LPCR:HR and UPRT as they may not be set properly in
1934 spapr_set_all_lpcrs(radix
? (LPCR_HR
| LPCR_UPRT
) : 0,
1935 LPCR_HR
| LPCR_UPRT
);
1937 err
= kvmppc_configure_v3_mmu(cpu
, radix
, gtse
, spapr
->patb_entry
);
1939 error_report("Process table config unsupported by the host");
1944 err
= spapr_irq_post_load(spapr
, version_id
);
1952 static int spapr_pre_save(void *opaque
)
1956 rc
= spapr_caps_pre_save(opaque
);
1964 static bool version_before_3(void *opaque
, int version_id
)
1966 return version_id
< 3;
1969 static bool spapr_pending_events_needed(void *opaque
)
1971 SpaprMachineState
*spapr
= (SpaprMachineState
*)opaque
;
1972 return !QTAILQ_EMPTY(&spapr
->pending_events
);
1975 static const VMStateDescription vmstate_spapr_event_entry
= {
1976 .name
= "spapr_event_log_entry",
1978 .minimum_version_id
= 1,
1979 .fields
= (const VMStateField
[]) {
1980 VMSTATE_UINT32(summary
, SpaprEventLogEntry
),
1981 VMSTATE_UINT32(extended_length
, SpaprEventLogEntry
),
1982 VMSTATE_VBUFFER_ALLOC_UINT32(extended_log
, SpaprEventLogEntry
, 0,
1983 NULL
, extended_length
),
1984 VMSTATE_END_OF_LIST()
1988 static const VMStateDescription vmstate_spapr_pending_events
= {
1989 .name
= "spapr_pending_events",
1991 .minimum_version_id
= 1,
1992 .needed
= spapr_pending_events_needed
,
1993 .fields
= (const VMStateField
[]) {
1994 VMSTATE_QTAILQ_V(pending_events
, SpaprMachineState
, 1,
1995 vmstate_spapr_event_entry
, SpaprEventLogEntry
, next
),
1996 VMSTATE_END_OF_LIST()
2000 static bool spapr_ov5_cas_needed(void *opaque
)
2002 SpaprMachineState
*spapr
= opaque
;
2003 SpaprOptionVector
*ov5_mask
= spapr_ovec_new();
2006 /* Prior to the introduction of SpaprOptionVector, we had two option
2007 * vectors we dealt with: OV5_FORM1_AFFINITY, and OV5_DRCONF_MEMORY.
2008 * Both of these options encode machine topology into the device-tree
2009 * in such a way that the now-booted OS should still be able to interact
2010 * appropriately with QEMU regardless of what options were actually
2011 * negotiatied on the source side.
2013 * As such, we can avoid migrating the CAS-negotiated options if these
2014 * are the only options available on the current machine/platform.
2015 * Since these are the only options available for pseries-2.7 and
2016 * earlier, this allows us to maintain old->new/new->old migration
2019 * For QEMU 2.8+, there are additional CAS-negotiatable options available
2020 * via default pseries-2.8 machines and explicit command-line parameters.
2021 * Some of these options, like OV5_HP_EVT, *do* require QEMU to be aware
2022 * of the actual CAS-negotiated values to continue working properly. For
2023 * example, availability of memory unplug depends on knowing whether
2024 * OV5_HP_EVT was negotiated via CAS.
2026 * Thus, for any cases where the set of available CAS-negotiatable
2027 * options extends beyond OV5_FORM1_AFFINITY and OV5_DRCONF_MEMORY, we
2028 * include the CAS-negotiated options in the migration stream, unless
2029 * if they affect boot time behaviour only.
2031 spapr_ovec_set(ov5_mask
, OV5_FORM1_AFFINITY
);
2032 spapr_ovec_set(ov5_mask
, OV5_DRCONF_MEMORY
);
2033 spapr_ovec_set(ov5_mask
, OV5_DRMEM_V2
);
2035 /* We need extra information if we have any bits outside the mask
2037 cas_needed
= !spapr_ovec_subset(spapr
->ov5
, ov5_mask
);
2039 spapr_ovec_cleanup(ov5_mask
);
2044 static const VMStateDescription vmstate_spapr_ov5_cas
= {
2045 .name
= "spapr_option_vector_ov5_cas",
2047 .minimum_version_id
= 1,
2048 .needed
= spapr_ov5_cas_needed
,
2049 .fields
= (const VMStateField
[]) {
2050 VMSTATE_STRUCT_POINTER_V(ov5_cas
, SpaprMachineState
, 1,
2051 vmstate_spapr_ovec
, SpaprOptionVector
),
2052 VMSTATE_END_OF_LIST()
2056 static bool spapr_patb_entry_needed(void *opaque
)
2058 SpaprMachineState
*spapr
= opaque
;
2060 return !!spapr
->patb_entry
;
2063 static const VMStateDescription vmstate_spapr_patb_entry
= {
2064 .name
= "spapr_patb_entry",
2066 .minimum_version_id
= 1,
2067 .needed
= spapr_patb_entry_needed
,
2068 .fields
= (const VMStateField
[]) {
2069 VMSTATE_UINT64(patb_entry
, SpaprMachineState
),
2070 VMSTATE_END_OF_LIST()
2074 static bool spapr_irq_map_needed(void *opaque
)
2076 SpaprMachineState
*spapr
= opaque
;
2078 return spapr
->irq_map
&& !bitmap_empty(spapr
->irq_map
, spapr
->irq_map_nr
);
2081 static const VMStateDescription vmstate_spapr_irq_map
= {
2082 .name
= "spapr_irq_map",
2084 .minimum_version_id
= 1,
2085 .needed
= spapr_irq_map_needed
,
2086 .fields
= (const VMStateField
[]) {
2087 VMSTATE_BITMAP(irq_map
, SpaprMachineState
, 0, irq_map_nr
),
2088 VMSTATE_END_OF_LIST()
2092 static bool spapr_dtb_needed(void *opaque
)
2094 SpaprMachineClass
*smc
= SPAPR_MACHINE_GET_CLASS(opaque
);
2096 return smc
->update_dt_enabled
;
2099 static int spapr_dtb_pre_load(void *opaque
)
2101 SpaprMachineState
*spapr
= (SpaprMachineState
*)opaque
;
2103 g_free(spapr
->fdt_blob
);
2104 spapr
->fdt_blob
= NULL
;
2105 spapr
->fdt_size
= 0;
2110 static const VMStateDescription vmstate_spapr_dtb
= {
2111 .name
= "spapr_dtb",
2113 .minimum_version_id
= 1,
2114 .needed
= spapr_dtb_needed
,
2115 .pre_load
= spapr_dtb_pre_load
,
2116 .fields
= (const VMStateField
[]) {
2117 VMSTATE_UINT32(fdt_initial_size
, SpaprMachineState
),
2118 VMSTATE_UINT32(fdt_size
, SpaprMachineState
),
2119 VMSTATE_VBUFFER_ALLOC_UINT32(fdt_blob
, SpaprMachineState
, 0, NULL
,
2121 VMSTATE_END_OF_LIST()
2125 static bool spapr_fwnmi_needed(void *opaque
)
2127 SpaprMachineState
*spapr
= (SpaprMachineState
*)opaque
;
2129 return spapr
->fwnmi_machine_check_addr
!= -1;
2132 static int spapr_fwnmi_pre_save(void *opaque
)
2134 SpaprMachineState
*spapr
= (SpaprMachineState
*)opaque
;
2137 * Check if machine check handling is in progress and print a
2140 if (spapr
->fwnmi_machine_check_interlock
!= -1) {
2141 warn_report("A machine check is being handled during migration. The"
2142 "handler may run and log hardware error on the destination");
2148 static const VMStateDescription vmstate_spapr_fwnmi
= {
2149 .name
= "spapr_fwnmi",
2151 .minimum_version_id
= 1,
2152 .needed
= spapr_fwnmi_needed
,
2153 .pre_save
= spapr_fwnmi_pre_save
,
2154 .fields
= (const VMStateField
[]) {
2155 VMSTATE_UINT64(fwnmi_system_reset_addr
, SpaprMachineState
),
2156 VMSTATE_UINT64(fwnmi_machine_check_addr
, SpaprMachineState
),
2157 VMSTATE_INT32(fwnmi_machine_check_interlock
, SpaprMachineState
),
2158 VMSTATE_END_OF_LIST()
2162 static const VMStateDescription vmstate_spapr
= {
2165 .minimum_version_id
= 1,
2166 .pre_load
= spapr_pre_load
,
2167 .post_load
= spapr_post_load
,
2168 .pre_save
= spapr_pre_save
,
2169 .fields
= (const VMStateField
[]) {
2170 /* used to be @next_irq */
2171 VMSTATE_UNUSED_BUFFER(version_before_3
, 0, 4),
2174 VMSTATE_UINT64_TEST(rtc_offset
, SpaprMachineState
, version_before_3
),
2176 VMSTATE_PPC_TIMEBASE_V(tb
, SpaprMachineState
, 2),
2177 VMSTATE_END_OF_LIST()
2179 .subsections
= (const VMStateDescription
* const []) {
2180 &vmstate_spapr_ov5_cas
,
2181 &vmstate_spapr_patb_entry
,
2182 &vmstate_spapr_pending_events
,
2183 &vmstate_spapr_cap_htm
,
2184 &vmstate_spapr_cap_vsx
,
2185 &vmstate_spapr_cap_dfp
,
2186 &vmstate_spapr_cap_cfpc
,
2187 &vmstate_spapr_cap_sbbc
,
2188 &vmstate_spapr_cap_ibs
,
2189 &vmstate_spapr_cap_hpt_maxpagesize
,
2190 &vmstate_spapr_irq_map
,
2191 &vmstate_spapr_cap_nested_kvm_hv
,
2193 &vmstate_spapr_cap_large_decr
,
2194 &vmstate_spapr_cap_ccf_assist
,
2195 &vmstate_spapr_cap_fwnmi
,
2196 &vmstate_spapr_fwnmi
,
2197 &vmstate_spapr_cap_rpt_invalidate
,
2198 &vmstate_spapr_cap_ail_mode_3
,
2199 &vmstate_spapr_cap_nested_papr
,
2204 static int htab_save_setup(QEMUFile
*f
, void *opaque
, Error
**errp
)
2206 SpaprMachineState
*spapr
= opaque
;
2208 /* "Iteration" header */
2209 if (!spapr
->htab_shift
) {
2210 qemu_put_be32(f
, -1);
2212 qemu_put_be32(f
, spapr
->htab_shift
);
2216 spapr
->htab_save_index
= 0;
2217 spapr
->htab_first_pass
= true;
2219 if (spapr
->htab_shift
) {
2220 assert(kvm_enabled());
2228 static void htab_save_chunk(QEMUFile
*f
, SpaprMachineState
*spapr
,
2229 int chunkstart
, int n_valid
, int n_invalid
)
2231 qemu_put_be32(f
, chunkstart
);
2232 qemu_put_be16(f
, n_valid
);
2233 qemu_put_be16(f
, n_invalid
);
2234 qemu_put_buffer(f
, HPTE(spapr
->htab
, chunkstart
),
2235 HASH_PTE_SIZE_64
* n_valid
);
2238 static void htab_save_end_marker(QEMUFile
*f
)
2240 qemu_put_be32(f
, 0);
2241 qemu_put_be16(f
, 0);
2242 qemu_put_be16(f
, 0);
2245 static void htab_save_first_pass(QEMUFile
*f
, SpaprMachineState
*spapr
,
2248 bool has_timeout
= max_ns
!= -1;
2249 int htabslots
= HTAB_SIZE(spapr
) / HASH_PTE_SIZE_64
;
2250 int index
= spapr
->htab_save_index
;
2251 int64_t starttime
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
);
2253 assert(spapr
->htab_first_pass
);
2258 /* Consume invalid HPTEs */
2259 while ((index
< htabslots
)
2260 && !HPTE_VALID(HPTE(spapr
->htab
, index
))) {
2261 CLEAN_HPTE(HPTE(spapr
->htab
, index
));
2265 /* Consume valid HPTEs */
2267 while ((index
< htabslots
) && (index
- chunkstart
< USHRT_MAX
)
2268 && HPTE_VALID(HPTE(spapr
->htab
, index
))) {
2269 CLEAN_HPTE(HPTE(spapr
->htab
, index
));
2273 if (index
> chunkstart
) {
2274 int n_valid
= index
- chunkstart
;
2276 htab_save_chunk(f
, spapr
, chunkstart
, n_valid
, 0);
2279 (qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) - starttime
) > max_ns
) {
2283 } while ((index
< htabslots
) && !migration_rate_exceeded(f
));
2285 if (index
>= htabslots
) {
2286 assert(index
== htabslots
);
2288 spapr
->htab_first_pass
= false;
2290 spapr
->htab_save_index
= index
;
2293 static int htab_save_later_pass(QEMUFile
*f
, SpaprMachineState
*spapr
,
2296 bool final
= max_ns
< 0;
2297 int htabslots
= HTAB_SIZE(spapr
) / HASH_PTE_SIZE_64
;
2298 int examined
= 0, sent
= 0;
2299 int index
= spapr
->htab_save_index
;
2300 int64_t starttime
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
);
2302 assert(!spapr
->htab_first_pass
);
2305 int chunkstart
, invalidstart
;
2307 /* Consume non-dirty HPTEs */
2308 while ((index
< htabslots
)
2309 && !HPTE_DIRTY(HPTE(spapr
->htab
, index
))) {
2315 /* Consume valid dirty HPTEs */
2316 while ((index
< htabslots
) && (index
- chunkstart
< USHRT_MAX
)
2317 && HPTE_DIRTY(HPTE(spapr
->htab
, index
))
2318 && HPTE_VALID(HPTE(spapr
->htab
, index
))) {
2319 CLEAN_HPTE(HPTE(spapr
->htab
, index
));
2324 invalidstart
= index
;
2325 /* Consume invalid dirty HPTEs */
2326 while ((index
< htabslots
) && (index
- invalidstart
< USHRT_MAX
)
2327 && HPTE_DIRTY(HPTE(spapr
->htab
, index
))
2328 && !HPTE_VALID(HPTE(spapr
->htab
, index
))) {
2329 CLEAN_HPTE(HPTE(spapr
->htab
, index
));
2334 if (index
> chunkstart
) {
2335 int n_valid
= invalidstart
- chunkstart
;
2336 int n_invalid
= index
- invalidstart
;
2338 htab_save_chunk(f
, spapr
, chunkstart
, n_valid
, n_invalid
);
2339 sent
+= index
- chunkstart
;
2341 if (!final
&& (qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) - starttime
) > max_ns
) {
2346 if (examined
>= htabslots
) {
2350 if (index
>= htabslots
) {
2351 assert(index
== htabslots
);
2354 } while ((examined
< htabslots
) && (!migration_rate_exceeded(f
) || final
));
2356 if (index
>= htabslots
) {
2357 assert(index
== htabslots
);
2361 spapr
->htab_save_index
= index
;
2363 return (examined
>= htabslots
) && (sent
== 0) ? 1 : 0;
2366 #define MAX_ITERATION_NS 5000000 /* 5 ms */
2367 #define MAX_KVM_BUF_SIZE 2048
2369 static int htab_save_iterate(QEMUFile
*f
, void *opaque
)
2371 SpaprMachineState
*spapr
= opaque
;
2375 /* Iteration header */
2376 if (!spapr
->htab_shift
) {
2377 qemu_put_be32(f
, -1);
2380 qemu_put_be32(f
, 0);
2384 assert(kvm_enabled());
2386 fd
= get_htab_fd(spapr
);
2391 rc
= kvmppc_save_htab(f
, fd
, MAX_KVM_BUF_SIZE
, MAX_ITERATION_NS
);
2395 } else if (spapr
->htab_first_pass
) {
2396 htab_save_first_pass(f
, spapr
, MAX_ITERATION_NS
);
2398 rc
= htab_save_later_pass(f
, spapr
, MAX_ITERATION_NS
);
2401 htab_save_end_marker(f
);
2406 static int htab_save_complete(QEMUFile
*f
, void *opaque
)
2408 SpaprMachineState
*spapr
= opaque
;
2411 /* Iteration header */
2412 if (!spapr
->htab_shift
) {
2413 qemu_put_be32(f
, -1);
2416 qemu_put_be32(f
, 0);
2422 assert(kvm_enabled());
2424 fd
= get_htab_fd(spapr
);
2429 rc
= kvmppc_save_htab(f
, fd
, MAX_KVM_BUF_SIZE
, -1);
2434 if (spapr
->htab_first_pass
) {
2435 htab_save_first_pass(f
, spapr
, -1);
2437 htab_save_later_pass(f
, spapr
, -1);
2441 htab_save_end_marker(f
);
2446 static int htab_load(QEMUFile
*f
, void *opaque
, int version_id
)
2448 SpaprMachineState
*spapr
= opaque
;
2449 uint32_t section_hdr
;
2451 Error
*local_err
= NULL
;
2453 if (version_id
< 1 || version_id
> 1) {
2454 error_report("htab_load() bad version");
2458 section_hdr
= qemu_get_be32(f
);
2460 if (section_hdr
== -1) {
2461 spapr_free_hpt(spapr
);
2468 /* First section gives the htab size */
2469 ret
= spapr_reallocate_hpt(spapr
, section_hdr
, &local_err
);
2471 error_report_err(local_err
);
2478 assert(kvm_enabled());
2480 fd
= kvmppc_get_htab_fd(true, 0, &local_err
);
2482 error_report_err(local_err
);
2489 uint16_t n_valid
, n_invalid
;
2491 index
= qemu_get_be32(f
);
2492 n_valid
= qemu_get_be16(f
);
2493 n_invalid
= qemu_get_be16(f
);
2495 if ((index
== 0) && (n_valid
== 0) && (n_invalid
== 0)) {
2500 if ((index
+ n_valid
+ n_invalid
) >
2501 (HTAB_SIZE(spapr
) / HASH_PTE_SIZE_64
)) {
2502 /* Bad index in stream */
2504 "htab_load() bad index %d (%hd+%hd entries) in htab stream (htab_shift=%d)",
2505 index
, n_valid
, n_invalid
, spapr
->htab_shift
);
2511 qemu_get_buffer(f
, HPTE(spapr
->htab
, index
),
2512 HASH_PTE_SIZE_64
* n_valid
);
2515 memset(HPTE(spapr
->htab
, index
+ n_valid
), 0,
2516 HASH_PTE_SIZE_64
* n_invalid
);
2523 rc
= kvmppc_load_htab_chunk(f
, fd
, index
, n_valid
, n_invalid
,
2526 error_report_err(local_err
);
2540 static void htab_save_cleanup(void *opaque
)
2542 SpaprMachineState
*spapr
= opaque
;
2544 close_htab_fd(spapr
);
2547 static SaveVMHandlers savevm_htab_handlers
= {
2548 .save_setup
= htab_save_setup
,
2549 .save_live_iterate
= htab_save_iterate
,
2550 .save_live_complete_precopy
= htab_save_complete
,
2551 .save_cleanup
= htab_save_cleanup
,
2552 .load_state
= htab_load
,
2555 static void spapr_boot_set(void *opaque
, const char *boot_device
,
2558 SpaprMachineState
*spapr
= SPAPR_MACHINE(opaque
);
2560 g_free(spapr
->boot_device
);
2561 spapr
->boot_device
= g_strdup(boot_device
);
2564 static void spapr_create_lmb_dr_connectors(SpaprMachineState
*spapr
)
2566 MachineState
*machine
= MACHINE(spapr
);
2567 uint64_t lmb_size
= SPAPR_MEMORY_BLOCK_SIZE
;
2568 uint32_t nr_lmbs
= (machine
->maxram_size
- machine
->ram_size
)/lmb_size
;
2571 g_assert(!nr_lmbs
|| machine
->device_memory
);
2572 for (i
= 0; i
< nr_lmbs
; i
++) {
2575 addr
= i
* lmb_size
+ machine
->device_memory
->base
;
2576 spapr_dr_connector_new(OBJECT(spapr
), TYPE_SPAPR_DRC_LMB
,
2582 * If RAM size, maxmem size and individual node mem sizes aren't aligned
2583 * to SPAPR_MEMORY_BLOCK_SIZE(256MB), then refuse to start the guest
2584 * since we can't support such unaligned sizes with DRCONF_MEMORY.
2586 static void spapr_validate_node_memory(MachineState
*machine
, Error
**errp
)
2590 if (machine
->ram_size
% SPAPR_MEMORY_BLOCK_SIZE
) {
2591 error_setg(errp
, "Memory size 0x" RAM_ADDR_FMT
2592 " is not aligned to %" PRIu64
" MiB",
2594 SPAPR_MEMORY_BLOCK_SIZE
/ MiB
);
2598 if (machine
->maxram_size
% SPAPR_MEMORY_BLOCK_SIZE
) {
2599 error_setg(errp
, "Maximum memory size 0x" RAM_ADDR_FMT
2600 " is not aligned to %" PRIu64
" MiB",
2602 SPAPR_MEMORY_BLOCK_SIZE
/ MiB
);
2606 for (i
= 0; i
< machine
->numa_state
->num_nodes
; i
++) {
2607 if (machine
->numa_state
->nodes
[i
].node_mem
% SPAPR_MEMORY_BLOCK_SIZE
) {
2609 "Node %d memory size 0x%" PRIx64
2610 " is not aligned to %" PRIu64
" MiB",
2611 i
, machine
->numa_state
->nodes
[i
].node_mem
,
2612 SPAPR_MEMORY_BLOCK_SIZE
/ MiB
);
2618 /* find cpu slot in machine->possible_cpus by core_id */
2619 static CPUArchId
*spapr_find_cpu_slot(MachineState
*ms
, uint32_t id
, int *idx
)
2621 int index
= id
/ ms
->smp
.threads
;
2623 if (index
>= ms
->possible_cpus
->len
) {
2629 return &ms
->possible_cpus
->cpus
[index
];
2632 static void spapr_set_vsmt_mode(SpaprMachineState
*spapr
, Error
**errp
)
2634 MachineState
*ms
= MACHINE(spapr
);
2635 SpaprMachineClass
*smc
= SPAPR_MACHINE_GET_CLASS(spapr
);
2636 Error
*local_err
= NULL
;
2637 bool vsmt_user
= !!spapr
->vsmt
;
2638 int kvm_smt
= kvmppc_smt_threads();
2640 unsigned int smp_threads
= ms
->smp
.threads
;
2642 if (tcg_enabled()) {
2643 if (smp_threads
> 1 &&
2644 !ppc_type_check_compat(ms
->cpu_type
, CPU_POWERPC_LOGICAL_2_07
, 0,
2645 spapr
->max_compat_pvr
)) {
2646 error_setg(errp
, "TCG only supports SMT on POWER8 or newer CPUs");
2650 if (smp_threads
> 8) {
2651 error_setg(errp
, "TCG cannot support more than 8 threads/core "
2652 "on a pseries machine");
2656 if (!is_power_of_2(smp_threads
)) {
2657 error_setg(errp
, "Cannot support %d threads/core on a pseries "
2658 "machine because it must be a power of 2", smp_threads
);
2662 /* Determine the VSMT mode to use: */
2664 if (spapr
->vsmt
< smp_threads
) {
2665 error_setg(errp
, "Cannot support VSMT mode %d"
2666 " because it must be >= threads/core (%d)",
2667 spapr
->vsmt
, smp_threads
);
2670 /* In this case, spapr->vsmt has been set by the command line */
2671 } else if (!smc
->smp_threads_vsmt
) {
2673 * Default VSMT value is tricky, because we need it to be as
2674 * consistent as possible (for migration), but this requires
2675 * changing it for at least some existing cases. We pick 8 as
2676 * the value that we'd get with KVM on POWER8, the
2677 * overwhelmingly common case in production systems.
2679 spapr
->vsmt
= MAX(8, smp_threads
);
2681 spapr
->vsmt
= smp_threads
;
2684 /* KVM: If necessary, set the SMT mode: */
2685 if (kvm_enabled() && (spapr
->vsmt
!= kvm_smt
)) {
2686 ret
= kvmppc_set_smt_threads(spapr
->vsmt
);
2688 /* Looks like KVM isn't able to change VSMT mode */
2689 error_setg(&local_err
,
2690 "Failed to set KVM's VSMT mode to %d (errno %d)",
2692 /* We can live with that if the default one is big enough
2693 * for the number of threads, and a submultiple of the one
2694 * we want. In this case we'll waste some vcpu ids, but
2695 * behaviour will be correct */
2696 if ((kvm_smt
>= smp_threads
) && ((spapr
->vsmt
% kvm_smt
) == 0)) {
2697 warn_report_err(local_err
);
2700 error_append_hint(&local_err
,
2701 "On PPC, a VM with %d threads/core"
2702 " on a host with %d threads/core"
2703 " requires the use of VSMT mode %d.\n",
2704 smp_threads
, kvm_smt
, spapr
->vsmt
);
2706 kvmppc_error_append_smt_possible_hint(&local_err
);
2707 error_propagate(errp
, local_err
);
2711 /* else TCG: nothing to do currently */
2714 static void spapr_init_cpus(SpaprMachineState
*spapr
)
2716 MachineState
*machine
= MACHINE(spapr
);
2717 MachineClass
*mc
= MACHINE_GET_CLASS(machine
);
2718 SpaprMachineClass
*smc
= SPAPR_MACHINE_GET_CLASS(machine
);
2719 const char *type
= spapr_get_cpu_core_type(machine
->cpu_type
);
2720 const CPUArchIdList
*possible_cpus
;
2721 unsigned int smp_cpus
= machine
->smp
.cpus
;
2722 unsigned int smp_threads
= machine
->smp
.threads
;
2723 unsigned int max_cpus
= machine
->smp
.max_cpus
;
2724 int boot_cores_nr
= smp_cpus
/ smp_threads
;
2727 possible_cpus
= mc
->possible_cpu_arch_ids(machine
);
2728 if (mc
->has_hotpluggable_cpus
) {
2729 if (smp_cpus
% smp_threads
) {
2730 error_report("smp_cpus (%u) must be multiple of threads (%u)",
2731 smp_cpus
, smp_threads
);
2734 if (max_cpus
% smp_threads
) {
2735 error_report("max_cpus (%u) must be multiple of threads (%u)",
2736 max_cpus
, smp_threads
);
2740 if (max_cpus
!= smp_cpus
) {
2741 error_report("This machine version does not support CPU hotplug");
2744 boot_cores_nr
= possible_cpus
->len
;
2747 if (smc
->pre_2_10_has_unused_icps
) {
2748 for (i
= 0; i
< spapr_max_server_number(spapr
); i
++) {
2749 /* Dummy entries get deregistered when real ICPState objects
2750 * are registered during CPU core hotplug.
2752 pre_2_10_vmstate_register_dummy_icp(i
);
2756 for (i
= 0; i
< possible_cpus
->len
; i
++) {
2757 int core_id
= i
* smp_threads
;
2759 if (mc
->has_hotpluggable_cpus
) {
2760 spapr_dr_connector_new(OBJECT(spapr
), TYPE_SPAPR_DRC_CPU
,
2761 spapr_vcpu_id(spapr
, core_id
));
2764 if (i
< boot_cores_nr
) {
2765 Object
*core
= object_new(type
);
2766 int nr_threads
= smp_threads
;
2768 /* Handle the partially filled core for older machine types */
2769 if ((i
+ 1) * smp_threads
>= smp_cpus
) {
2770 nr_threads
= smp_cpus
- i
* smp_threads
;
2773 object_property_set_int(core
, "nr-threads", nr_threads
,
2775 object_property_set_int(core
, CPU_CORE_PROP_CORE_ID
, core_id
,
2777 qdev_realize(DEVICE(core
), NULL
, &error_fatal
);
2784 static PCIHostState
*spapr_create_default_phb(void)
2788 dev
= qdev_new(TYPE_SPAPR_PCI_HOST_BRIDGE
);
2789 qdev_prop_set_uint32(dev
, "index", 0);
2790 sysbus_realize_and_unref(SYS_BUS_DEVICE(dev
), &error_fatal
);
2792 return PCI_HOST_BRIDGE(dev
);
2795 static hwaddr
spapr_rma_size(SpaprMachineState
*spapr
, Error
**errp
)
2797 MachineState
*machine
= MACHINE(spapr
);
2798 SpaprMachineClass
*smc
= SPAPR_MACHINE_GET_CLASS(spapr
);
2799 hwaddr rma_size
= machine
->ram_size
;
2800 hwaddr node0_size
= spapr_node0_size(machine
);
2802 /* RMA has to fit in the first NUMA node */
2803 rma_size
= MIN(rma_size
, node0_size
);
2806 * VRMA access is via a special 1TiB SLB mapping, so the RMA can
2809 rma_size
= MIN(rma_size
, 1 * TiB
);
2812 * Clamp the RMA size based on machine type. This is for
2813 * migration compatibility with older qemu versions, which limited
2814 * the RMA size for complicated and mostly bad reasons.
2816 if (smc
->rma_limit
) {
2817 rma_size
= MIN(rma_size
, smc
->rma_limit
);
2820 if (rma_size
< MIN_RMA_SLOF
) {
2822 "pSeries SLOF firmware requires >= %" HWADDR_PRIx
2823 "ldMiB guest RMA (Real Mode Area memory)",
2824 MIN_RMA_SLOF
/ MiB
);
2831 static void spapr_create_nvdimm_dr_connectors(SpaprMachineState
*spapr
)
2833 MachineState
*machine
= MACHINE(spapr
);
2836 for (i
= 0; i
< machine
->ram_slots
; i
++) {
2837 spapr_dr_connector_new(OBJECT(spapr
), TYPE_SPAPR_DRC_PMEM
, i
);
2841 /* pSeries LPAR / sPAPR hardware init */
2842 static void spapr_machine_init(MachineState
*machine
)
2844 SpaprMachineState
*spapr
= SPAPR_MACHINE(machine
);
2845 SpaprMachineClass
*smc
= SPAPR_MACHINE_GET_CLASS(machine
);
2846 MachineClass
*mc
= MACHINE_GET_CLASS(machine
);
2847 const char *bios_default
= spapr
->vof
? FW_FILE_NAME_VOF
: FW_FILE_NAME
;
2848 const char *bios_name
= machine
->firmware
?: bios_default
;
2849 g_autofree
char *filename
= qemu_find_file(QEMU_FILE_TYPE_BIOS
, bios_name
);
2850 const char *kernel_filename
= machine
->kernel_filename
;
2851 const char *initrd_filename
= machine
->initrd_filename
;
2855 MemoryRegion
*sysmem
= get_system_memory();
2856 long load_limit
, fw_size
;
2857 Error
*resize_hpt_err
= NULL
;
2861 error_report("Could not find LPAR firmware '%s'", bios_name
);
2864 fw_size
= load_image_targphys(filename
, 0, FW_MAX_SIZE
);
2866 error_report("Could not load LPAR firmware '%s'", filename
);
2871 * if Secure VM (PEF) support is configured, then initialize it
2874 confidential_guest_kvm_init(machine
->cgs
, &error_fatal
);
2877 msi_nonbroken
= true;
2879 QLIST_INIT(&spapr
->phbs
);
2880 QTAILQ_INIT(&spapr
->pending_dimm_unplugs
);
2882 /* Determine capabilities to run with */
2883 spapr_caps_init(spapr
);
2885 kvmppc_check_papr_resize_hpt(&resize_hpt_err
);
2886 if (spapr
->resize_hpt
== SPAPR_RESIZE_HPT_DEFAULT
) {
2888 * If the user explicitly requested a mode we should either
2889 * supply it, or fail completely (which we do below). But if
2890 * it's not set explicitly, we reset our mode to something
2893 if (resize_hpt_err
) {
2894 spapr
->resize_hpt
= SPAPR_RESIZE_HPT_DISABLED
;
2895 error_free(resize_hpt_err
);
2896 resize_hpt_err
= NULL
;
2898 spapr
->resize_hpt
= smc
->resize_hpt_default
;
2902 assert(spapr
->resize_hpt
!= SPAPR_RESIZE_HPT_DEFAULT
);
2904 if ((spapr
->resize_hpt
!= SPAPR_RESIZE_HPT_DISABLED
) && resize_hpt_err
) {
2906 * User requested HPT resize, but this host can't supply it. Bail out
2908 error_report_err(resize_hpt_err
);
2911 error_free(resize_hpt_err
);
2913 spapr
->rma_size
= spapr_rma_size(spapr
, &error_fatal
);
2915 /* Setup a load limit for the ramdisk leaving room for SLOF and FDT */
2916 load_limit
= MIN(spapr
->rma_size
, FDT_MAX_ADDR
) - FW_OVERHEAD
;
2919 * VSMT must be set in order to be able to compute VCPU ids, ie to
2920 * call spapr_max_server_number() or spapr_vcpu_id().
2922 spapr_set_vsmt_mode(spapr
, &error_fatal
);
2924 /* Set up Interrupt Controller before we create the VCPUs */
2925 spapr_irq_init(spapr
, &error_fatal
);
2927 /* Set up containers for ibm,client-architecture-support negotiated options
2929 spapr
->ov5
= spapr_ovec_new();
2930 spapr
->ov5_cas
= spapr_ovec_new();
2932 if (smc
->dr_lmb_enabled
) {
2933 spapr_ovec_set(spapr
->ov5
, OV5_DRCONF_MEMORY
);
2934 spapr_validate_node_memory(machine
, &error_fatal
);
2937 spapr_ovec_set(spapr
->ov5
, OV5_FORM1_AFFINITY
);
2939 /* Do not advertise FORM2 NUMA support for pseries-6.1 and older */
2940 if (!smc
->pre_6_2_numa_affinity
) {
2941 spapr_ovec_set(spapr
->ov5
, OV5_FORM2_AFFINITY
);
2944 /* advertise support for dedicated HP event source to guests */
2945 if (spapr
->use_hotplug_event_source
) {
2946 spapr_ovec_set(spapr
->ov5
, OV5_HP_EVT
);
2949 /* advertise support for HPT resizing */
2950 if (spapr
->resize_hpt
!= SPAPR_RESIZE_HPT_DISABLED
) {
2951 spapr_ovec_set(spapr
->ov5
, OV5_HPT_RESIZE
);
2954 /* advertise support for ibm,dyamic-memory-v2 */
2955 spapr_ovec_set(spapr
->ov5
, OV5_DRMEM_V2
);
2957 /* advertise XIVE on POWER9 machines */
2958 if (spapr
->irq
->xive
) {
2959 spapr_ovec_set(spapr
->ov5
, OV5_XIVE_EXPLOIT
);
2963 spapr_init_cpus(spapr
);
2965 /* Init numa_assoc_array */
2966 spapr_numa_associativity_init(spapr
, machine
);
2968 if ((!kvm_enabled() || kvmppc_has_cap_mmu_radix()) &&
2969 ppc_type_check_compat(machine
->cpu_type
, CPU_POWERPC_LOGICAL_3_00
, 0,
2970 spapr
->max_compat_pvr
)) {
2971 spapr_ovec_set(spapr
->ov5
, OV5_MMU_RADIX_300
);
2972 /* KVM and TCG always allow GTSE with radix... */
2973 spapr_ovec_set(spapr
->ov5
, OV5_MMU_RADIX_GTSE
);
2975 /* ... but not with hash (currently). */
2977 if (kvm_enabled()) {
2978 /* Enable H_LOGICAL_CI_* so SLOF can talk to in-kernel devices */
2979 kvmppc_enable_logical_ci_hcalls();
2980 kvmppc_enable_set_mode_hcall();
2982 /* H_CLEAR_MOD/_REF are mandatory in PAPR, but off by default */
2983 kvmppc_enable_clear_ref_mod_hcalls();
2985 /* Enable H_PAGE_INIT */
2986 kvmppc_enable_h_page_init();
2990 memory_region_add_subregion(sysmem
, 0, machine
->ram
);
2992 /* initialize hotplug memory address space */
2993 if (machine
->ram_size
< machine
->maxram_size
) {
2994 ram_addr_t device_mem_size
= machine
->maxram_size
- machine
->ram_size
;
2995 hwaddr device_mem_base
;
2998 * Limit the number of hotpluggable memory slots to half the number
2999 * slots that KVM supports, leaving the other half for PCI and other
3000 * devices. However ensure that number of slots doesn't drop below 32.
3002 int max_memslots
= kvm_enabled() ? kvm_get_max_memslots() / 2 :
3003 SPAPR_MAX_RAM_SLOTS
;
3005 if (max_memslots
< SPAPR_MAX_RAM_SLOTS
) {
3006 max_memslots
= SPAPR_MAX_RAM_SLOTS
;
3008 if (machine
->ram_slots
> max_memslots
) {
3009 error_report("Specified number of memory slots %"
3010 PRIu64
" exceeds max supported %d",
3011 machine
->ram_slots
, max_memslots
);
3015 device_mem_base
= ROUND_UP(machine
->ram_size
, SPAPR_DEVICE_MEM_ALIGN
);
3016 machine_memory_devices_init(machine
, device_mem_base
, device_mem_size
);
3019 if (smc
->dr_lmb_enabled
) {
3020 spapr_create_lmb_dr_connectors(spapr
);
3023 if (mc
->nvdimm_supported
) {
3024 spapr_create_nvdimm_dr_connectors(spapr
);
3027 /* Set up RTAS event infrastructure */
3028 spapr_events_init(spapr
);
3030 /* Set up the RTC RTAS interfaces */
3031 spapr_rtc_create(spapr
);
3033 /* Set up VIO bus */
3034 spapr
->vio_bus
= spapr_vio_bus_init();
3036 for (i
= 0; serial_hd(i
); i
++) {
3037 spapr_vty_create(spapr
->vio_bus
, serial_hd(i
));
3040 /* We always have at least the nvram device on VIO */
3041 spapr_create_nvram(spapr
);
3044 * Setup hotplug / dynamic-reconfiguration connectors. top-level
3045 * connectors (described in root DT node's "ibm,drc-types" property)
3046 * are pre-initialized here. additional child connectors (such as
3047 * connectors for a PHBs PCI slots) are added as needed during their
3048 * parent's realization.
3050 if (smc
->dr_phb_enabled
) {
3051 for (i
= 0; i
< SPAPR_MAX_PHBS
; i
++) {
3052 spapr_dr_connector_new(OBJECT(machine
), TYPE_SPAPR_DRC_PHB
, i
);
3057 spapr_pci_rtas_init();
3059 phb
= spapr_create_default_phb();
3061 while ((nd
= qemu_find_nic_info("spapr-vlan", true, "ibmveth"))) {
3062 spapr_vlan_create(spapr
->vio_bus
, nd
);
3065 pci_init_nic_devices(phb
->bus
, NULL
);
3067 for (i
= 0; i
<= drive_get_max_bus(IF_SCSI
); i
++) {
3068 spapr_vscsi_create(spapr
->vio_bus
);
3072 has_vga
= spapr_vga_init(phb
->bus
, &error_fatal
);
3074 spapr
->want_stdout_path
= !machine
->enable_graphics
;
3075 machine
->usb
|= defaults_enabled() && !machine
->usb_disabled
;
3077 spapr
->want_stdout_path
= true;
3081 if (smc
->use_ohci_by_default
) {
3082 pci_create_simple(phb
->bus
, -1, "pci-ohci");
3084 pci_create_simple(phb
->bus
, -1, "nec-usb-xhci");
3090 usb_bus
= USB_BUS(object_resolve_type_unambiguous(TYPE_USB_BUS
,
3092 usb_create_simple(usb_bus
, "usb-kbd");
3093 usb_create_simple(usb_bus
, "usb-mouse");
3097 if (kernel_filename
) {
3098 uint64_t loaded_addr
= 0;
3100 spapr
->kernel_size
= load_elf(kernel_filename
, NULL
,
3101 translate_kernel_address
, spapr
,
3102 NULL
, &loaded_addr
, NULL
, NULL
, 1,
3103 PPC_ELF_MACHINE
, 0, 0);
3104 if (spapr
->kernel_size
== ELF_LOAD_WRONG_ENDIAN
) {
3105 spapr
->kernel_size
= load_elf(kernel_filename
, NULL
,
3106 translate_kernel_address
, spapr
,
3107 NULL
, &loaded_addr
, NULL
, NULL
, 0,
3108 PPC_ELF_MACHINE
, 0, 0);
3109 spapr
->kernel_le
= spapr
->kernel_size
> 0;
3111 if (spapr
->kernel_size
< 0) {
3112 error_report("error loading %s: %s", kernel_filename
,
3113 load_elf_strerror(spapr
->kernel_size
));
3117 if (spapr
->kernel_addr
!= loaded_addr
) {
3118 warn_report("spapr: kernel_addr changed from 0x%"PRIx64
3120 spapr
->kernel_addr
, loaded_addr
);
3121 spapr
->kernel_addr
= loaded_addr
;
3125 if (initrd_filename
) {
3126 /* Try to locate the initrd in the gap between the kernel
3127 * and the firmware. Add a bit of space just in case
3129 spapr
->initrd_base
= (spapr
->kernel_addr
+ spapr
->kernel_size
3130 + 0x1ffff) & ~0xffff;
3131 spapr
->initrd_size
= load_image_targphys(initrd_filename
,
3134 - spapr
->initrd_base
);
3135 if (spapr
->initrd_size
< 0) {
3136 error_report("could not load initial ram disk '%s'",
3143 /* FIXME: Should register things through the MachineState's qdev
3144 * interface, this is a legacy from the sPAPREnvironment structure
3145 * which predated MachineState but had a similar function */
3146 vmstate_register(NULL
, 0, &vmstate_spapr
, spapr
);
3147 register_savevm_live("spapr/htab", VMSTATE_INSTANCE_ID_ANY
, 1,
3148 &savevm_htab_handlers
, spapr
);
3150 qbus_set_hotplug_handler(sysbus_get_default(), OBJECT(machine
));
3152 qemu_register_boot_set(spapr_boot_set
, spapr
);
3155 * Nothing needs to be done to resume a suspended guest because
3156 * suspending does not change the machine state, so no need for
3157 * a ->wakeup method.
3159 qemu_register_wakeup_support();
3161 if (kvm_enabled()) {
3162 /* to stop and start vmclock */
3163 qemu_add_vm_change_state_handler(cpu_ppc_clock_vm_state_change
,
3166 kvmppc_spapr_enable_inkernel_multitce();
3169 qemu_cond_init(&spapr
->fwnmi_machine_check_interlock_cond
);
3171 spapr
->vof
->fw_size
= fw_size
; /* for claim() on itself */
3172 spapr_register_hypercall(KVMPPC_H_VOF_CLIENT
, spapr_h_vof_client
);
3175 spapr_watchdog_init(spapr
);
3178 #define DEFAULT_KVM_TYPE "auto"
3179 static int spapr_kvm_type(MachineState
*machine
, const char *vm_type
)
3182 * The use of g_ascii_strcasecmp() for 'hv' and 'pr' is to
3183 * accommodate the 'HV' and 'PV' formats that exists in the
3184 * wild. The 'auto' mode is being introduced already as
3185 * lower-case, thus we don't need to bother checking for
3188 if (!vm_type
|| !strcmp(vm_type
, DEFAULT_KVM_TYPE
)) {
3192 if (!g_ascii_strcasecmp(vm_type
, "hv")) {
3196 if (!g_ascii_strcasecmp(vm_type
, "pr")) {
3200 error_report("Unknown kvm-type specified '%s'", vm_type
);
3205 * Implementation of an interface to adjust firmware path
3206 * for the bootindex property handling.
3208 static char *spapr_get_fw_dev_path(FWPathProvider
*p
, BusState
*bus
,
3211 #define CAST(type, obj, name) \
3212 ((type *)object_dynamic_cast(OBJECT(obj), (name)))
3213 SCSIDevice
*d
= CAST(SCSIDevice
, dev
, TYPE_SCSI_DEVICE
);
3214 SpaprPhbState
*phb
= CAST(SpaprPhbState
, dev
, TYPE_SPAPR_PCI_HOST_BRIDGE
);
3215 VHostSCSICommon
*vsc
= CAST(VHostSCSICommon
, dev
, TYPE_VHOST_SCSI_COMMON
);
3216 PCIDevice
*pcidev
= CAST(PCIDevice
, dev
, TYPE_PCI_DEVICE
);
3219 void *spapr
= CAST(void, bus
->parent
, "spapr-vscsi");
3220 VirtIOSCSI
*virtio
= CAST(VirtIOSCSI
, bus
->parent
, TYPE_VIRTIO_SCSI
);
3221 USBDevice
*usb
= CAST(USBDevice
, bus
->parent
, TYPE_USB_DEVICE
);
3225 * Replace "channel@0/disk@0,0" with "disk@8000000000000000":
3226 * In the top 16 bits of the 64-bit LUN, we use SRP luns of the form
3227 * 0x8000 | (target << 8) | (bus << 5) | lun
3228 * (see the "Logical unit addressing format" table in SAM5)
3230 unsigned id
= 0x8000 | (d
->id
<< 8) | (d
->channel
<< 5) | d
->lun
;
3231 return g_strdup_printf("%s@%"PRIX64
, qdev_fw_name(dev
),
3232 (uint64_t)id
<< 48);
3233 } else if (virtio
) {
3235 * We use SRP luns of the form 01000000 | (target << 8) | lun
3236 * in the top 32 bits of the 64-bit LUN
3237 * Note: the quote above is from SLOF and it is wrong,
3238 * the actual binding is:
3239 * swap 0100 or 10 << or 20 << ( target lun-id -- srplun )
3241 unsigned id
= 0x1000000 | (d
->id
<< 16) | d
->lun
;
3242 if (d
->lun
>= 256) {
3243 /* Use the LUN "flat space addressing method" */
3246 return g_strdup_printf("%s@%"PRIX64
, qdev_fw_name(dev
),
3247 (uint64_t)id
<< 32);
3250 * We use SRP luns of the form 01000000 | (usb-port << 16) | lun
3251 * in the top 32 bits of the 64-bit LUN
3253 unsigned usb_port
= atoi(usb
->port
->path
);
3254 unsigned id
= 0x1000000 | (usb_port
<< 16) | d
->lun
;
3255 return g_strdup_printf("%s@%"PRIX64
, qdev_fw_name(dev
),
3256 (uint64_t)id
<< 32);
3261 * SLOF probes the USB devices, and if it recognizes that the device is a
3262 * storage device, it changes its name to "storage" instead of "usb-host",
3263 * and additionally adds a child node for the SCSI LUN, so the correct
3264 * boot path in SLOF is something like .../storage@1/disk@xxx" instead.
3266 if (strcmp("usb-host", qdev_fw_name(dev
)) == 0) {
3267 USBDevice
*usbdev
= CAST(USBDevice
, dev
, TYPE_USB_DEVICE
);
3268 if (usb_device_is_scsi_storage(usbdev
)) {
3269 return g_strdup_printf("storage@%s/disk", usbdev
->port
->path
);
3274 /* Replace "pci" with "pci@800000020000000" */
3275 return g_strdup_printf("pci@%"PRIX64
, phb
->buid
);
3279 /* Same logic as virtio above */
3280 unsigned id
= 0x1000000 | (vsc
->target
<< 16) | vsc
->lun
;
3281 return g_strdup_printf("disk@%"PRIX64
, (uint64_t)id
<< 32);
3284 if (g_str_equal("pci-bridge", qdev_fw_name(dev
))) {
3285 /* SLOF uses "pci" instead of "pci-bridge" for PCI bridges */
3286 PCIDevice
*pdev
= CAST(PCIDevice
, dev
, TYPE_PCI_DEVICE
);
3287 return g_strdup_printf("pci@%x", PCI_SLOT(pdev
->devfn
));
3291 return spapr_pci_fw_dev_name(pcidev
);
3297 static char *spapr_get_kvm_type(Object
*obj
, Error
**errp
)
3299 SpaprMachineState
*spapr
= SPAPR_MACHINE(obj
);
3301 return g_strdup(spapr
->kvm_type
);
3304 static void spapr_set_kvm_type(Object
*obj
, const char *value
, Error
**errp
)
3306 SpaprMachineState
*spapr
= SPAPR_MACHINE(obj
);
3308 g_free(spapr
->kvm_type
);
3309 spapr
->kvm_type
= g_strdup(value
);
3312 static bool spapr_get_modern_hotplug_events(Object
*obj
, Error
**errp
)
3314 SpaprMachineState
*spapr
= SPAPR_MACHINE(obj
);
3316 return spapr
->use_hotplug_event_source
;
3319 static void spapr_set_modern_hotplug_events(Object
*obj
, bool value
,
3322 SpaprMachineState
*spapr
= SPAPR_MACHINE(obj
);
3324 spapr
->use_hotplug_event_source
= value
;
3327 static bool spapr_get_msix_emulation(Object
*obj
, Error
**errp
)
3332 static char *spapr_get_resize_hpt(Object
*obj
, Error
**errp
)
3334 SpaprMachineState
*spapr
= SPAPR_MACHINE(obj
);
3336 switch (spapr
->resize_hpt
) {
3337 case SPAPR_RESIZE_HPT_DEFAULT
:
3338 return g_strdup("default");
3339 case SPAPR_RESIZE_HPT_DISABLED
:
3340 return g_strdup("disabled");
3341 case SPAPR_RESIZE_HPT_ENABLED
:
3342 return g_strdup("enabled");
3343 case SPAPR_RESIZE_HPT_REQUIRED
:
3344 return g_strdup("required");
3346 g_assert_not_reached();
3349 static void spapr_set_resize_hpt(Object
*obj
, const char *value
, Error
**errp
)
3351 SpaprMachineState
*spapr
= SPAPR_MACHINE(obj
);
3353 if (strcmp(value
, "default") == 0) {
3354 spapr
->resize_hpt
= SPAPR_RESIZE_HPT_DEFAULT
;
3355 } else if (strcmp(value
, "disabled") == 0) {
3356 spapr
->resize_hpt
= SPAPR_RESIZE_HPT_DISABLED
;
3357 } else if (strcmp(value
, "enabled") == 0) {
3358 spapr
->resize_hpt
= SPAPR_RESIZE_HPT_ENABLED
;
3359 } else if (strcmp(value
, "required") == 0) {
3360 spapr
->resize_hpt
= SPAPR_RESIZE_HPT_REQUIRED
;
3362 error_setg(errp
, "Bad value for \"resize-hpt\" property");
3366 static bool spapr_get_vof(Object
*obj
, Error
**errp
)
3368 SpaprMachineState
*spapr
= SPAPR_MACHINE(obj
);
3370 return spapr
->vof
!= NULL
;
3373 static void spapr_set_vof(Object
*obj
, bool value
, Error
**errp
)
3375 SpaprMachineState
*spapr
= SPAPR_MACHINE(obj
);
3378 vof_cleanup(spapr
->vof
);
3385 spapr
->vof
= g_malloc0(sizeof(*spapr
->vof
));
3388 static char *spapr_get_ic_mode(Object
*obj
, Error
**errp
)
3390 SpaprMachineState
*spapr
= SPAPR_MACHINE(obj
);
3392 if (spapr
->irq
== &spapr_irq_xics_legacy
) {
3393 return g_strdup("legacy");
3394 } else if (spapr
->irq
== &spapr_irq_xics
) {
3395 return g_strdup("xics");
3396 } else if (spapr
->irq
== &spapr_irq_xive
) {
3397 return g_strdup("xive");
3398 } else if (spapr
->irq
== &spapr_irq_dual
) {
3399 return g_strdup("dual");
3401 g_assert_not_reached();
3404 static void spapr_set_ic_mode(Object
*obj
, const char *value
, Error
**errp
)
3406 SpaprMachineState
*spapr
= SPAPR_MACHINE(obj
);
3408 if (SPAPR_MACHINE_GET_CLASS(spapr
)->legacy_irq_allocation
) {
3409 error_setg(errp
, "This machine only uses the legacy XICS backend, don't pass ic-mode");
3413 /* The legacy IRQ backend can not be set */
3414 if (strcmp(value
, "xics") == 0) {
3415 spapr
->irq
= &spapr_irq_xics
;
3416 } else if (strcmp(value
, "xive") == 0) {
3417 spapr
->irq
= &spapr_irq_xive
;
3418 } else if (strcmp(value
, "dual") == 0) {
3419 spapr
->irq
= &spapr_irq_dual
;
3421 error_setg(errp
, "Bad value for \"ic-mode\" property");
3425 static char *spapr_get_host_model(Object
*obj
, Error
**errp
)
3427 SpaprMachineState
*spapr
= SPAPR_MACHINE(obj
);
3429 return g_strdup(spapr
->host_model
);
3432 static void spapr_set_host_model(Object
*obj
, const char *value
, Error
**errp
)
3434 SpaprMachineState
*spapr
= SPAPR_MACHINE(obj
);
3436 g_free(spapr
->host_model
);
3437 spapr
->host_model
= g_strdup(value
);
3440 static char *spapr_get_host_serial(Object
*obj
, Error
**errp
)
3442 SpaprMachineState
*spapr
= SPAPR_MACHINE(obj
);
3444 return g_strdup(spapr
->host_serial
);
3447 static void spapr_set_host_serial(Object
*obj
, const char *value
, Error
**errp
)
3449 SpaprMachineState
*spapr
= SPAPR_MACHINE(obj
);
3451 g_free(spapr
->host_serial
);
3452 spapr
->host_serial
= g_strdup(value
);
3455 static void spapr_instance_init(Object
*obj
)
3457 SpaprMachineState
*spapr
= SPAPR_MACHINE(obj
);
3458 SpaprMachineClass
*smc
= SPAPR_MACHINE_GET_CLASS(spapr
);
3459 MachineState
*ms
= MACHINE(spapr
);
3460 MachineClass
*mc
= MACHINE_GET_CLASS(ms
);
3463 * NVDIMM support went live in 5.1 without considering that, in
3464 * other archs, the user needs to enable NVDIMM support with the
3465 * 'nvdimm' machine option and the default behavior is NVDIMM
3466 * support disabled. It is too late to roll back to the standard
3467 * behavior without breaking 5.1 guests.
3469 if (mc
->nvdimm_supported
) {
3470 ms
->nvdimms_state
->is_enabled
= true;
3473 spapr
->htab_fd
= -1;
3474 spapr
->use_hotplug_event_source
= true;
3475 spapr
->kvm_type
= g_strdup(DEFAULT_KVM_TYPE
);
3476 object_property_add_str(obj
, "kvm-type",
3477 spapr_get_kvm_type
, spapr_set_kvm_type
);
3478 object_property_set_description(obj
, "kvm-type",
3479 "Specifies the KVM virtualization mode (auto,"
3480 " hv, pr). Defaults to 'auto'. This mode will use"
3481 " any available KVM module loaded in the host,"
3482 " where kvm_hv takes precedence if both kvm_hv and"
3483 " kvm_pr are loaded.");
3484 object_property_add_bool(obj
, "modern-hotplug-events",
3485 spapr_get_modern_hotplug_events
,
3486 spapr_set_modern_hotplug_events
);
3487 object_property_set_description(obj
, "modern-hotplug-events",
3488 "Use dedicated hotplug event mechanism in"
3489 " place of standard EPOW events when possible"
3490 " (required for memory hot-unplug support)");
3491 ppc_compat_add_property(obj
, "max-cpu-compat", &spapr
->max_compat_pvr
,
3492 "Maximum permitted CPU compatibility mode");
3494 object_property_add_str(obj
, "resize-hpt",
3495 spapr_get_resize_hpt
, spapr_set_resize_hpt
);
3496 object_property_set_description(obj
, "resize-hpt",
3497 "Resizing of the Hash Page Table (enabled, disabled, required)");
3498 object_property_add_uint32_ptr(obj
, "vsmt",
3499 &spapr
->vsmt
, OBJ_PROP_FLAG_READWRITE
);
3500 object_property_set_description(obj
, "vsmt",
3501 "Virtual SMT: KVM behaves as if this were"
3502 " the host's SMT mode");
3504 object_property_add_bool(obj
, "vfio-no-msix-emulation",
3505 spapr_get_msix_emulation
, NULL
);
3507 object_property_add_uint64_ptr(obj
, "kernel-addr",
3508 &spapr
->kernel_addr
, OBJ_PROP_FLAG_READWRITE
);
3509 object_property_set_description(obj
, "kernel-addr",
3510 stringify(KERNEL_LOAD_ADDR
)
3511 " for -kernel is the default");
3512 spapr
->kernel_addr
= KERNEL_LOAD_ADDR
;
3514 object_property_add_bool(obj
, "x-vof", spapr_get_vof
, spapr_set_vof
);
3515 object_property_set_description(obj
, "x-vof",
3516 "Enable Virtual Open Firmware (experimental)");
3518 /* The machine class defines the default interrupt controller mode */
3519 spapr
->irq
= smc
->irq
;
3520 object_property_add_str(obj
, "ic-mode", spapr_get_ic_mode
,
3522 object_property_set_description(obj
, "ic-mode",
3523 "Specifies the interrupt controller mode (xics, xive, dual)");
3525 object_property_add_str(obj
, "host-model",
3526 spapr_get_host_model
, spapr_set_host_model
);
3527 object_property_set_description(obj
, "host-model",
3528 "Host model to advertise in guest device tree");
3529 object_property_add_str(obj
, "host-serial",
3530 spapr_get_host_serial
, spapr_set_host_serial
);
3531 object_property_set_description(obj
, "host-serial",
3532 "Host serial number to advertise in guest device tree");
3535 static void spapr_machine_finalizefn(Object
*obj
)
3537 SpaprMachineState
*spapr
= SPAPR_MACHINE(obj
);
3539 g_free(spapr
->kvm_type
);
3542 void spapr_do_system_reset_on_cpu(CPUState
*cs
, run_on_cpu_data arg
)
3544 SpaprMachineState
*spapr
= SPAPR_MACHINE(qdev_get_machine());
3545 CPUPPCState
*env
= cpu_env(cs
);
3547 cpu_synchronize_state(cs
);
3548 /* If FWNMI is inactive, addr will be -1, which will deliver to 0x100 */
3549 if (spapr
->fwnmi_system_reset_addr
!= -1) {
3550 uint64_t rtas_addr
, addr
;
3552 /* get rtas addr from fdt */
3553 rtas_addr
= spapr_get_rtas_addr();
3555 qemu_system_guest_panicked(NULL
);
3559 addr
= rtas_addr
+ RTAS_ERROR_LOG_MAX
+ cs
->cpu_index
* sizeof(uint64_t)*2;
3560 stq_be_phys(&address_space_memory
, addr
, env
->gpr
[3]);
3561 stq_be_phys(&address_space_memory
, addr
+ sizeof(uint64_t), 0);
3564 ppc_cpu_do_system_reset(cs
);
3565 if (spapr
->fwnmi_system_reset_addr
!= -1) {
3566 env
->nip
= spapr
->fwnmi_system_reset_addr
;
3570 static void spapr_nmi(NMIState
*n
, int cpu_index
, Error
**errp
)
3575 async_run_on_cpu(cs
, spapr_do_system_reset_on_cpu
, RUN_ON_CPU_NULL
);
3579 int spapr_lmb_dt_populate(SpaprDrc
*drc
, SpaprMachineState
*spapr
,
3580 void *fdt
, int *fdt_start_offset
, Error
**errp
)
3585 addr
= spapr_drc_index(drc
) * SPAPR_MEMORY_BLOCK_SIZE
;
3586 node
= object_property_get_uint(OBJECT(drc
->dev
), PC_DIMM_NODE_PROP
,
3588 *fdt_start_offset
= spapr_dt_memory_node(spapr
, fdt
, node
, addr
,
3589 SPAPR_MEMORY_BLOCK_SIZE
);
3593 static void spapr_add_lmbs(DeviceState
*dev
, uint64_t addr_start
, uint64_t size
,
3594 bool dedicated_hp_event_source
)
3597 uint32_t nr_lmbs
= size
/SPAPR_MEMORY_BLOCK_SIZE
;
3599 uint64_t addr
= addr_start
;
3600 bool hotplugged
= spapr_drc_hotplugged(dev
);
3602 for (i
= 0; i
< nr_lmbs
; i
++) {
3603 drc
= spapr_drc_by_id(TYPE_SPAPR_DRC_LMB
,
3604 addr
/ SPAPR_MEMORY_BLOCK_SIZE
);
3608 * memory_device_get_free_addr() provided a range of free addresses
3609 * that doesn't overlap with any existing mapping at pre-plug. The
3610 * corresponding LMB DRCs are thus assumed to be all attachable.
3612 spapr_drc_attach(drc
, dev
);
3614 spapr_drc_reset(drc
);
3616 addr
+= SPAPR_MEMORY_BLOCK_SIZE
;
3618 /* send hotplug notification to the
3619 * guest only in case of hotplugged memory
3622 if (dedicated_hp_event_source
) {
3623 drc
= spapr_drc_by_id(TYPE_SPAPR_DRC_LMB
,
3624 addr_start
/ SPAPR_MEMORY_BLOCK_SIZE
);
3626 spapr_hotplug_req_add_by_count_indexed(SPAPR_DR_CONNECTOR_TYPE_LMB
,
3628 spapr_drc_index(drc
));
3630 spapr_hotplug_req_add_by_count(SPAPR_DR_CONNECTOR_TYPE_LMB
,
3636 static void spapr_memory_plug(HotplugHandler
*hotplug_dev
, DeviceState
*dev
)
3638 SpaprMachineState
*ms
= SPAPR_MACHINE(hotplug_dev
);
3639 PCDIMMDevice
*dimm
= PC_DIMM(dev
);
3640 uint64_t size
, addr
;
3642 bool is_nvdimm
= object_dynamic_cast(OBJECT(dev
), TYPE_NVDIMM
);
3644 size
= memory_device_get_region_size(MEMORY_DEVICE(dev
), &error_abort
);
3646 pc_dimm_plug(dimm
, MACHINE(ms
));
3649 addr
= object_property_get_uint(OBJECT(dimm
),
3650 PC_DIMM_ADDR_PROP
, &error_abort
);
3651 spapr_add_lmbs(dev
, addr
, size
,
3652 spapr_ovec_test(ms
->ov5_cas
, OV5_HP_EVT
));
3654 slot
= object_property_get_int(OBJECT(dimm
),
3655 PC_DIMM_SLOT_PROP
, &error_abort
);
3656 /* We should have valid slot number at this point */
3657 g_assert(slot
>= 0);
3658 spapr_add_nvdimm(dev
, slot
);
3662 static void spapr_memory_pre_plug(HotplugHandler
*hotplug_dev
, DeviceState
*dev
,
3665 const SpaprMachineClass
*smc
= SPAPR_MACHINE_GET_CLASS(hotplug_dev
);
3666 SpaprMachineState
*spapr
= SPAPR_MACHINE(hotplug_dev
);
3667 bool is_nvdimm
= object_dynamic_cast(OBJECT(dev
), TYPE_NVDIMM
);
3668 PCDIMMDevice
*dimm
= PC_DIMM(dev
);
3669 Error
*local_err
= NULL
;
3674 if (!smc
->dr_lmb_enabled
) {
3675 error_setg(errp
, "Memory hotplug not supported for this machine");
3679 size
= memory_device_get_region_size(MEMORY_DEVICE(dimm
), &local_err
);
3681 error_propagate(errp
, local_err
);
3686 if (!spapr_nvdimm_validate(hotplug_dev
, NVDIMM(dev
), size
, errp
)) {
3689 } else if (size
% SPAPR_MEMORY_BLOCK_SIZE
) {
3690 error_setg(errp
, "Hotplugged memory size must be a multiple of "
3691 "%" PRIu64
" MB", SPAPR_MEMORY_BLOCK_SIZE
/ MiB
);
3695 memdev
= object_property_get_link(OBJECT(dimm
), PC_DIMM_MEMDEV_PROP
,
3697 pagesize
= host_memory_backend_pagesize(MEMORY_BACKEND(memdev
));
3698 if (!spapr_check_pagesize(spapr
, pagesize
, errp
)) {
3702 pc_dimm_pre_plug(dimm
, MACHINE(hotplug_dev
), errp
);
3705 struct SpaprDimmState
{
3708 QTAILQ_ENTRY(SpaprDimmState
) next
;
3711 static SpaprDimmState
*spapr_pending_dimm_unplugs_find(SpaprMachineState
*s
,
3714 SpaprDimmState
*dimm_state
= NULL
;
3716 QTAILQ_FOREACH(dimm_state
, &s
->pending_dimm_unplugs
, next
) {
3717 if (dimm_state
->dimm
== dimm
) {
3724 static SpaprDimmState
*spapr_pending_dimm_unplugs_add(SpaprMachineState
*spapr
,
3728 SpaprDimmState
*ds
= NULL
;
3731 * If this request is for a DIMM whose removal had failed earlier
3732 * (due to guest's refusal to remove the LMBs), we would have this
3733 * dimm already in the pending_dimm_unplugs list. In that
3734 * case don't add again.
3736 ds
= spapr_pending_dimm_unplugs_find(spapr
, dimm
);
3738 ds
= g_new0(SpaprDimmState
, 1);
3739 ds
->nr_lmbs
= nr_lmbs
;
3741 QTAILQ_INSERT_HEAD(&spapr
->pending_dimm_unplugs
, ds
, next
);
3746 static void spapr_pending_dimm_unplugs_remove(SpaprMachineState
*spapr
,
3747 SpaprDimmState
*dimm_state
)
3749 QTAILQ_REMOVE(&spapr
->pending_dimm_unplugs
, dimm_state
, next
);
3753 static SpaprDimmState
*spapr_recover_pending_dimm_state(SpaprMachineState
*ms
,
3757 uint64_t size
= memory_device_get_region_size(MEMORY_DEVICE(dimm
),
3759 uint32_t nr_lmbs
= size
/ SPAPR_MEMORY_BLOCK_SIZE
;
3760 uint32_t avail_lmbs
= 0;
3761 uint64_t addr_start
, addr
;
3764 addr_start
= object_property_get_uint(OBJECT(dimm
), PC_DIMM_ADDR_PROP
,
3768 for (i
= 0; i
< nr_lmbs
; i
++) {
3769 drc
= spapr_drc_by_id(TYPE_SPAPR_DRC_LMB
,
3770 addr
/ SPAPR_MEMORY_BLOCK_SIZE
);
3775 addr
+= SPAPR_MEMORY_BLOCK_SIZE
;
3778 return spapr_pending_dimm_unplugs_add(ms
, avail_lmbs
, dimm
);
3781 void spapr_memory_unplug_rollback(SpaprMachineState
*spapr
, DeviceState
*dev
)
3787 uint64_t size
, addr_start
, addr
;
3794 dimm
= PC_DIMM(dev
);
3795 ds
= spapr_pending_dimm_unplugs_find(spapr
, dimm
);
3798 * 'ds == NULL' would mean that the DIMM doesn't have a pending
3799 * unplug state, but one of its DRC is marked as unplug_requested.
3800 * This is bad and weird enough to g_assert() out.
3804 spapr_pending_dimm_unplugs_remove(spapr
, ds
);
3806 size
= memory_device_get_region_size(MEMORY_DEVICE(dimm
), &error_abort
);
3807 nr_lmbs
= size
/ SPAPR_MEMORY_BLOCK_SIZE
;
3809 addr_start
= object_property_get_uint(OBJECT(dimm
), PC_DIMM_ADDR_PROP
,
3813 for (i
= 0; i
< nr_lmbs
; i
++) {
3814 drc
= spapr_drc_by_id(TYPE_SPAPR_DRC_LMB
,
3815 addr
/ SPAPR_MEMORY_BLOCK_SIZE
);
3818 drc
->unplug_requested
= false;
3819 addr
+= SPAPR_MEMORY_BLOCK_SIZE
;
3823 * Tell QAPI that something happened and the memory
3824 * hotunplug wasn't successful.
3826 qapi_event_send_device_unplug_guest_error(dev
->id
,
3827 dev
->canonical_path
);
3830 /* Callback to be called during DRC release. */
3831 void spapr_lmb_release(DeviceState
*dev
)
3833 HotplugHandler
*hotplug_ctrl
= qdev_get_hotplug_handler(dev
);
3834 SpaprMachineState
*spapr
= SPAPR_MACHINE(hotplug_ctrl
);
3835 SpaprDimmState
*ds
= spapr_pending_dimm_unplugs_find(spapr
, PC_DIMM(dev
));
3837 /* This information will get lost if a migration occurs
3838 * during the unplug process. In this case recover it. */
3840 ds
= spapr_recover_pending_dimm_state(spapr
, PC_DIMM(dev
));
3842 /* The DRC being examined by the caller at least must be counted */
3843 g_assert(ds
->nr_lmbs
);
3846 if (--ds
->nr_lmbs
) {
3851 * Now that all the LMBs have been removed by the guest, call the
3852 * unplug handler chain. This can never fail.
3854 hotplug_handler_unplug(hotplug_ctrl
, dev
, &error_abort
);
3855 object_unparent(OBJECT(dev
));
3858 static void spapr_memory_unplug(HotplugHandler
*hotplug_dev
, DeviceState
*dev
)
3860 SpaprMachineState
*spapr
= SPAPR_MACHINE(hotplug_dev
);
3861 SpaprDimmState
*ds
= spapr_pending_dimm_unplugs_find(spapr
, PC_DIMM(dev
));
3863 /* We really shouldn't get this far without anything to unplug */
3866 pc_dimm_unplug(PC_DIMM(dev
), MACHINE(hotplug_dev
));
3867 qdev_unrealize(dev
);
3868 spapr_pending_dimm_unplugs_remove(spapr
, ds
);
3871 static void spapr_memory_unplug_request(HotplugHandler
*hotplug_dev
,
3872 DeviceState
*dev
, Error
**errp
)
3874 SpaprMachineState
*spapr
= SPAPR_MACHINE(hotplug_dev
);
3875 PCDIMMDevice
*dimm
= PC_DIMM(dev
);
3877 uint64_t size
, addr_start
, addr
;
3881 if (object_dynamic_cast(OBJECT(dev
), TYPE_NVDIMM
)) {
3882 error_setg(errp
, "nvdimm device hot unplug is not supported yet.");
3886 size
= memory_device_get_region_size(MEMORY_DEVICE(dimm
), &error_abort
);
3887 nr_lmbs
= size
/ SPAPR_MEMORY_BLOCK_SIZE
;
3889 addr_start
= object_property_get_uint(OBJECT(dimm
), PC_DIMM_ADDR_PROP
,
3893 * An existing pending dimm state for this DIMM means that there is an
3894 * unplug operation in progress, waiting for the spapr_lmb_release
3895 * callback to complete the job (BQL can't cover that far). In this case,
3896 * bail out to avoid detaching DRCs that were already released.
3898 if (spapr_pending_dimm_unplugs_find(spapr
, dimm
)) {
3899 error_setg(errp
, "Memory unplug already in progress for device %s",
3904 spapr_pending_dimm_unplugs_add(spapr
, nr_lmbs
, dimm
);
3907 for (i
= 0; i
< nr_lmbs
; i
++) {
3908 drc
= spapr_drc_by_id(TYPE_SPAPR_DRC_LMB
,
3909 addr
/ SPAPR_MEMORY_BLOCK_SIZE
);
3912 spapr_drc_unplug_request(drc
);
3913 addr
+= SPAPR_MEMORY_BLOCK_SIZE
;
3916 drc
= spapr_drc_by_id(TYPE_SPAPR_DRC_LMB
,
3917 addr_start
/ SPAPR_MEMORY_BLOCK_SIZE
);
3918 spapr_hotplug_req_remove_by_count_indexed(SPAPR_DR_CONNECTOR_TYPE_LMB
,
3919 nr_lmbs
, spapr_drc_index(drc
));
3922 /* Callback to be called during DRC release. */
3923 void spapr_core_release(DeviceState
*dev
)
3925 HotplugHandler
*hotplug_ctrl
= qdev_get_hotplug_handler(dev
);
3927 /* Call the unplug handler chain. This can never fail. */
3928 hotplug_handler_unplug(hotplug_ctrl
, dev
, &error_abort
);
3929 object_unparent(OBJECT(dev
));
3932 static void spapr_core_unplug(HotplugHandler
*hotplug_dev
, DeviceState
*dev
)
3934 MachineState
*ms
= MACHINE(hotplug_dev
);
3935 SpaprMachineClass
*smc
= SPAPR_MACHINE_GET_CLASS(ms
);
3936 CPUCore
*cc
= CPU_CORE(dev
);
3937 CPUArchId
*core_slot
= spapr_find_cpu_slot(ms
, cc
->core_id
, NULL
);
3939 if (smc
->pre_2_10_has_unused_icps
) {
3940 SpaprCpuCore
*sc
= SPAPR_CPU_CORE(OBJECT(dev
));
3943 for (i
= 0; i
< cc
->nr_threads
; i
++) {
3944 CPUState
*cs
= CPU(sc
->threads
[i
]);
3946 pre_2_10_vmstate_register_dummy_icp(cs
->cpu_index
);
3951 core_slot
->cpu
= NULL
;
3952 qdev_unrealize(dev
);
3956 void spapr_core_unplug_request(HotplugHandler
*hotplug_dev
, DeviceState
*dev
,
3959 SpaprMachineState
*spapr
= SPAPR_MACHINE(OBJECT(hotplug_dev
));
3962 CPUCore
*cc
= CPU_CORE(dev
);
3964 if (!spapr_find_cpu_slot(MACHINE(hotplug_dev
), cc
->core_id
, &index
)) {
3965 error_setg(errp
, "Unable to find CPU core with core-id: %d",
3970 error_setg(errp
, "Boot CPU core may not be unplugged");
3974 drc
= spapr_drc_by_id(TYPE_SPAPR_DRC_CPU
,
3975 spapr_vcpu_id(spapr
, cc
->core_id
));
3978 if (!spapr_drc_unplug_requested(drc
)) {
3979 spapr_drc_unplug_request(drc
);
3983 * spapr_hotplug_req_remove_by_index is left unguarded, out of the
3984 * "!spapr_drc_unplug_requested" check, to allow for multiple IRQ
3985 * pulses removing the same CPU. Otherwise, in an failed hotunplug
3986 * attempt (e.g. the kernel will refuse to remove the last online
3987 * CPU), we will never attempt it again because unplug_requested
3988 * will still be 'true' in that case.
3990 spapr_hotplug_req_remove_by_index(drc
);
3993 int spapr_core_dt_populate(SpaprDrc
*drc
, SpaprMachineState
*spapr
,
3994 void *fdt
, int *fdt_start_offset
, Error
**errp
)
3996 SpaprCpuCore
*core
= SPAPR_CPU_CORE(drc
->dev
);
3997 CPUState
*cs
= CPU(core
->threads
[0]);
3998 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
3999 DeviceClass
*dc
= DEVICE_GET_CLASS(cs
);
4000 int id
= spapr_get_vcpu_id(cpu
);
4001 g_autofree
char *nodename
= NULL
;
4004 nodename
= g_strdup_printf("%s@%x", dc
->fw_name
, id
);
4005 offset
= fdt_add_subnode(fdt
, 0, nodename
);
4007 spapr_dt_cpu(cs
, fdt
, offset
, spapr
);
4010 * spapr_dt_cpu() does not fill the 'name' property in the
4011 * CPU node. The function is called during boot process, before
4012 * and after CAS, and overwriting the 'name' property written
4013 * by SLOF is not allowed.
4015 * Write it manually after spapr_dt_cpu(). This makes the hotplug
4016 * CPUs more compatible with the coldplugged ones, which have
4017 * the 'name' property. Linux Kernel also relies on this
4018 * property to identify CPU nodes.
4020 _FDT((fdt_setprop_string(fdt
, offset
, "name", nodename
)));
4022 *fdt_start_offset
= offset
;
4026 static void spapr_core_plug(HotplugHandler
*hotplug_dev
, DeviceState
*dev
)
4028 SpaprMachineState
*spapr
= SPAPR_MACHINE(OBJECT(hotplug_dev
));
4029 MachineClass
*mc
= MACHINE_GET_CLASS(spapr
);
4030 SpaprMachineClass
*smc
= SPAPR_MACHINE_CLASS(mc
);
4031 SpaprCpuCore
*core
= SPAPR_CPU_CORE(OBJECT(dev
));
4032 CPUCore
*cc
= CPU_CORE(dev
);
4034 CPUArchId
*core_slot
;
4036 bool hotplugged
= spapr_drc_hotplugged(dev
);
4039 core_slot
= spapr_find_cpu_slot(MACHINE(hotplug_dev
), cc
->core_id
, &index
);
4040 g_assert(core_slot
); /* Already checked in spapr_core_pre_plug() */
4042 drc
= spapr_drc_by_id(TYPE_SPAPR_DRC_CPU
,
4043 spapr_vcpu_id(spapr
, cc
->core_id
));
4045 g_assert(drc
|| !mc
->has_hotpluggable_cpus
);
4049 * spapr_core_pre_plug() already buys us this is a brand new
4050 * core being plugged into a free slot. Nothing should already
4051 * be attached to the corresponding DRC.
4053 spapr_drc_attach(drc
, dev
);
4057 * Send hotplug notification interrupt to the guest only
4058 * in case of hotplugged CPUs.
4060 spapr_hotplug_req_add_by_index(drc
);
4062 spapr_drc_reset(drc
);
4066 core_slot
->cpu
= CPU(dev
);
4069 * Set compatibility mode to match the boot CPU, which was either set
4070 * by the machine reset code or by CAS. This really shouldn't fail at
4074 for (i
= 0; i
< cc
->nr_threads
; i
++) {
4075 ppc_set_compat(core
->threads
[i
], POWERPC_CPU(first_cpu
)->compat_pvr
,
4080 if (smc
->pre_2_10_has_unused_icps
) {
4081 for (i
= 0; i
< cc
->nr_threads
; i
++) {
4082 CPUState
*cs
= CPU(core
->threads
[i
]);
4083 pre_2_10_vmstate_unregister_dummy_icp(cs
->cpu_index
);
4088 static void spapr_core_pre_plug(HotplugHandler
*hotplug_dev
, DeviceState
*dev
,
4091 MachineState
*machine
= MACHINE(OBJECT(hotplug_dev
));
4092 MachineClass
*mc
= MACHINE_GET_CLASS(hotplug_dev
);
4093 CPUCore
*cc
= CPU_CORE(dev
);
4094 const char *base_core_type
= spapr_get_cpu_core_type(machine
->cpu_type
);
4095 const char *type
= object_get_typename(OBJECT(dev
));
4096 CPUArchId
*core_slot
;
4098 unsigned int smp_threads
= machine
->smp
.threads
;
4100 if (dev
->hotplugged
&& !mc
->has_hotpluggable_cpus
) {
4101 error_setg(errp
, "CPU hotplug not supported for this machine");
4105 if (strcmp(base_core_type
, type
)) {
4106 error_setg(errp
, "CPU core type should be %s", base_core_type
);
4110 if (cc
->core_id
% smp_threads
) {
4111 error_setg(errp
, "invalid core id %d", cc
->core_id
);
4116 * In general we should have homogeneous threads-per-core, but old
4117 * (pre hotplug support) machine types allow the last core to have
4118 * reduced threads as a compatibility hack for when we allowed
4119 * total vcpus not a multiple of threads-per-core.
4121 if (mc
->has_hotpluggable_cpus
&& (cc
->nr_threads
!= smp_threads
)) {
4122 error_setg(errp
, "invalid nr-threads %d, must be %d", cc
->nr_threads
,
4127 core_slot
= spapr_find_cpu_slot(MACHINE(hotplug_dev
), cc
->core_id
, &index
);
4129 error_setg(errp
, "core id %d out of range", cc
->core_id
);
4133 if (core_slot
->cpu
) {
4134 error_setg(errp
, "core %d already populated", cc
->core_id
);
4138 numa_cpu_pre_plug(core_slot
, dev
, errp
);
4141 int spapr_phb_dt_populate(SpaprDrc
*drc
, SpaprMachineState
*spapr
,
4142 void *fdt
, int *fdt_start_offset
, Error
**errp
)
4144 SpaprPhbState
*sphb
= SPAPR_PCI_HOST_BRIDGE(drc
->dev
);
4147 intc_phandle
= spapr_irq_get_phandle(spapr
, spapr
->fdt_blob
, errp
);
4148 if (intc_phandle
<= 0) {
4152 if (spapr_dt_phb(spapr
, sphb
, intc_phandle
, fdt
, fdt_start_offset
)) {
4153 error_setg(errp
, "unable to create FDT node for PHB %d", sphb
->index
);
4157 /* generally SLOF creates these, for hotplug it's up to QEMU */
4158 _FDT(fdt_setprop_string(fdt
, *fdt_start_offset
, "name", "pci"));
4163 static bool spapr_phb_pre_plug(HotplugHandler
*hotplug_dev
, DeviceState
*dev
,
4166 SpaprMachineState
*spapr
= SPAPR_MACHINE(OBJECT(hotplug_dev
));
4167 SpaprPhbState
*sphb
= SPAPR_PCI_HOST_BRIDGE(dev
);
4168 SpaprMachineClass
*smc
= SPAPR_MACHINE_GET_CLASS(spapr
);
4169 const unsigned windows_supported
= spapr_phb_windows_supported(sphb
);
4172 if (dev
->hotplugged
&& !smc
->dr_phb_enabled
) {
4173 error_setg(errp
, "PHB hotplug not supported for this machine");
4177 if (sphb
->index
== (uint32_t)-1) {
4178 error_setg(errp
, "\"index\" for PAPR PHB is mandatory");
4182 drc
= spapr_drc_by_id(TYPE_SPAPR_DRC_PHB
, sphb
->index
);
4183 if (drc
&& drc
->dev
) {
4184 error_setg(errp
, "PHB %d already attached", sphb
->index
);
4189 * This will check that sphb->index doesn't exceed the maximum number of
4190 * PHBs for the current machine type.
4193 smc
->phb_placement(spapr
, sphb
->index
,
4194 &sphb
->buid
, &sphb
->io_win_addr
,
4195 &sphb
->mem_win_addr
, &sphb
->mem64_win_addr
,
4196 windows_supported
, sphb
->dma_liobn
,
4200 static void spapr_phb_plug(HotplugHandler
*hotplug_dev
, DeviceState
*dev
)
4202 SpaprMachineState
*spapr
= SPAPR_MACHINE(OBJECT(hotplug_dev
));
4203 SpaprMachineClass
*smc
= SPAPR_MACHINE_GET_CLASS(spapr
);
4204 SpaprPhbState
*sphb
= SPAPR_PCI_HOST_BRIDGE(dev
);
4206 bool hotplugged
= spapr_drc_hotplugged(dev
);
4208 if (!smc
->dr_phb_enabled
) {
4212 drc
= spapr_drc_by_id(TYPE_SPAPR_DRC_PHB
, sphb
->index
);
4213 /* hotplug hooks should check it's enabled before getting this far */
4216 /* spapr_phb_pre_plug() already checked the DRC is attachable */
4217 spapr_drc_attach(drc
, dev
);
4220 spapr_hotplug_req_add_by_index(drc
);
4222 spapr_drc_reset(drc
);
4226 void spapr_phb_release(DeviceState
*dev
)
4228 HotplugHandler
*hotplug_ctrl
= qdev_get_hotplug_handler(dev
);
4230 hotplug_handler_unplug(hotplug_ctrl
, dev
, &error_abort
);
4231 object_unparent(OBJECT(dev
));
4234 static void spapr_phb_unplug(HotplugHandler
*hotplug_dev
, DeviceState
*dev
)
4236 qdev_unrealize(dev
);
4239 static void spapr_phb_unplug_request(HotplugHandler
*hotplug_dev
,
4240 DeviceState
*dev
, Error
**errp
)
4242 SpaprPhbState
*sphb
= SPAPR_PCI_HOST_BRIDGE(dev
);
4245 drc
= spapr_drc_by_id(TYPE_SPAPR_DRC_PHB
, sphb
->index
);
4248 if (!spapr_drc_unplug_requested(drc
)) {
4249 spapr_drc_unplug_request(drc
);
4250 spapr_hotplug_req_remove_by_index(drc
);
4253 "PCI Host Bridge unplug already in progress for device %s",
4259 bool spapr_tpm_proxy_pre_plug(HotplugHandler
*hotplug_dev
, DeviceState
*dev
,
4262 SpaprMachineState
*spapr
= SPAPR_MACHINE(OBJECT(hotplug_dev
));
4264 if (spapr
->tpm_proxy
!= NULL
) {
4265 error_setg(errp
, "Only one TPM proxy can be specified for this machine");
4272 static void spapr_tpm_proxy_plug(HotplugHandler
*hotplug_dev
, DeviceState
*dev
)
4274 SpaprMachineState
*spapr
= SPAPR_MACHINE(OBJECT(hotplug_dev
));
4275 SpaprTpmProxy
*tpm_proxy
= SPAPR_TPM_PROXY(dev
);
4277 /* Already checked in spapr_tpm_proxy_pre_plug() */
4278 g_assert(spapr
->tpm_proxy
== NULL
);
4280 spapr
->tpm_proxy
= tpm_proxy
;
4283 static void spapr_tpm_proxy_unplug(HotplugHandler
*hotplug_dev
, DeviceState
*dev
)
4285 SpaprMachineState
*spapr
= SPAPR_MACHINE(OBJECT(hotplug_dev
));
4287 qdev_unrealize(dev
);
4288 object_unparent(OBJECT(dev
));
4289 spapr
->tpm_proxy
= NULL
;
4292 static void spapr_machine_device_plug(HotplugHandler
*hotplug_dev
,
4293 DeviceState
*dev
, Error
**errp
)
4295 if (object_dynamic_cast(OBJECT(dev
), TYPE_PC_DIMM
)) {
4296 spapr_memory_plug(hotplug_dev
, dev
);
4297 } else if (object_dynamic_cast(OBJECT(dev
), TYPE_SPAPR_CPU_CORE
)) {
4298 spapr_core_plug(hotplug_dev
, dev
);
4299 } else if (object_dynamic_cast(OBJECT(dev
), TYPE_SPAPR_PCI_HOST_BRIDGE
)) {
4300 spapr_phb_plug(hotplug_dev
, dev
);
4301 } else if (object_dynamic_cast(OBJECT(dev
), TYPE_SPAPR_TPM_PROXY
)) {
4302 spapr_tpm_proxy_plug(hotplug_dev
, dev
);
4306 static void spapr_machine_device_unplug(HotplugHandler
*hotplug_dev
,
4307 DeviceState
*dev
, Error
**errp
)
4309 if (object_dynamic_cast(OBJECT(dev
), TYPE_PC_DIMM
)) {
4310 spapr_memory_unplug(hotplug_dev
, dev
);
4311 } else if (object_dynamic_cast(OBJECT(dev
), TYPE_SPAPR_CPU_CORE
)) {
4312 spapr_core_unplug(hotplug_dev
, dev
);
4313 } else if (object_dynamic_cast(OBJECT(dev
), TYPE_SPAPR_PCI_HOST_BRIDGE
)) {
4314 spapr_phb_unplug(hotplug_dev
, dev
);
4315 } else if (object_dynamic_cast(OBJECT(dev
), TYPE_SPAPR_TPM_PROXY
)) {
4316 spapr_tpm_proxy_unplug(hotplug_dev
, dev
);
4320 bool spapr_memory_hot_unplug_supported(SpaprMachineState
*spapr
)
4322 return spapr_ovec_test(spapr
->ov5_cas
, OV5_HP_EVT
) ||
4324 * CAS will process all pending unplug requests.
4326 * HACK: a guest could theoretically have cleared all bits in OV5,
4327 * but none of the guests we care for do.
4329 spapr_ovec_empty(spapr
->ov5_cas
);
4332 static void spapr_machine_device_unplug_request(HotplugHandler
*hotplug_dev
,
4333 DeviceState
*dev
, Error
**errp
)
4335 SpaprMachineState
*sms
= SPAPR_MACHINE(OBJECT(hotplug_dev
));
4336 MachineClass
*mc
= MACHINE_GET_CLASS(sms
);
4337 SpaprMachineClass
*smc
= SPAPR_MACHINE_CLASS(mc
);
4339 if (object_dynamic_cast(OBJECT(dev
), TYPE_PC_DIMM
)) {
4340 if (spapr_memory_hot_unplug_supported(sms
)) {
4341 spapr_memory_unplug_request(hotplug_dev
, dev
, errp
);
4343 error_setg(errp
, "Memory hot unplug not supported for this guest");
4345 } else if (object_dynamic_cast(OBJECT(dev
), TYPE_SPAPR_CPU_CORE
)) {
4346 if (!mc
->has_hotpluggable_cpus
) {
4347 error_setg(errp
, "CPU hot unplug not supported on this machine");
4350 spapr_core_unplug_request(hotplug_dev
, dev
, errp
);
4351 } else if (object_dynamic_cast(OBJECT(dev
), TYPE_SPAPR_PCI_HOST_BRIDGE
)) {
4352 if (!smc
->dr_phb_enabled
) {
4353 error_setg(errp
, "PHB hot unplug not supported on this machine");
4356 spapr_phb_unplug_request(hotplug_dev
, dev
, errp
);
4357 } else if (object_dynamic_cast(OBJECT(dev
), TYPE_SPAPR_TPM_PROXY
)) {
4358 spapr_tpm_proxy_unplug(hotplug_dev
, dev
);
4362 static void spapr_machine_device_pre_plug(HotplugHandler
*hotplug_dev
,
4363 DeviceState
*dev
, Error
**errp
)
4365 if (object_dynamic_cast(OBJECT(dev
), TYPE_PC_DIMM
)) {
4366 spapr_memory_pre_plug(hotplug_dev
, dev
, errp
);
4367 } else if (object_dynamic_cast(OBJECT(dev
), TYPE_SPAPR_CPU_CORE
)) {
4368 spapr_core_pre_plug(hotplug_dev
, dev
, errp
);
4369 } else if (object_dynamic_cast(OBJECT(dev
), TYPE_SPAPR_PCI_HOST_BRIDGE
)) {
4370 spapr_phb_pre_plug(hotplug_dev
, dev
, errp
);
4371 } else if (object_dynamic_cast(OBJECT(dev
), TYPE_SPAPR_TPM_PROXY
)) {
4372 spapr_tpm_proxy_pre_plug(hotplug_dev
, dev
, errp
);
4376 static HotplugHandler
*spapr_get_hotplug_handler(MachineState
*machine
,
4379 if (object_dynamic_cast(OBJECT(dev
), TYPE_PC_DIMM
) ||
4380 object_dynamic_cast(OBJECT(dev
), TYPE_SPAPR_CPU_CORE
) ||
4381 object_dynamic_cast(OBJECT(dev
), TYPE_SPAPR_PCI_HOST_BRIDGE
) ||
4382 object_dynamic_cast(OBJECT(dev
), TYPE_SPAPR_TPM_PROXY
)) {
4383 return HOTPLUG_HANDLER(machine
);
4385 if (object_dynamic_cast(OBJECT(dev
), TYPE_PCI_DEVICE
)) {
4386 PCIDevice
*pcidev
= PCI_DEVICE(dev
);
4387 PCIBus
*root
= pci_device_root_bus(pcidev
);
4388 SpaprPhbState
*phb
=
4389 (SpaprPhbState
*)object_dynamic_cast(OBJECT(BUS(root
)->parent
),
4390 TYPE_SPAPR_PCI_HOST_BRIDGE
);
4393 return HOTPLUG_HANDLER(phb
);
4399 static CpuInstanceProperties
4400 spapr_cpu_index_to_props(MachineState
*machine
, unsigned cpu_index
)
4402 CPUArchId
*core_slot
;
4403 MachineClass
*mc
= MACHINE_GET_CLASS(machine
);
4405 /* make sure possible_cpu are initialized */
4406 mc
->possible_cpu_arch_ids(machine
);
4407 /* get CPU core slot containing thread that matches cpu_index */
4408 core_slot
= spapr_find_cpu_slot(machine
, cpu_index
, NULL
);
4410 return core_slot
->props
;
4413 static int64_t spapr_get_default_cpu_node_id(const MachineState
*ms
, int idx
)
4415 return idx
/ ms
->smp
.cores
% ms
->numa_state
->num_nodes
;
4418 static const CPUArchIdList
*spapr_possible_cpu_arch_ids(MachineState
*machine
)
4421 unsigned int smp_threads
= machine
->smp
.threads
;
4422 unsigned int smp_cpus
= machine
->smp
.cpus
;
4423 const char *core_type
;
4424 int spapr_max_cores
= machine
->smp
.max_cpus
/ smp_threads
;
4425 MachineClass
*mc
= MACHINE_GET_CLASS(machine
);
4427 if (!mc
->has_hotpluggable_cpus
) {
4428 spapr_max_cores
= QEMU_ALIGN_UP(smp_cpus
, smp_threads
) / smp_threads
;
4430 if (machine
->possible_cpus
) {
4431 assert(machine
->possible_cpus
->len
== spapr_max_cores
);
4432 return machine
->possible_cpus
;
4435 core_type
= spapr_get_cpu_core_type(machine
->cpu_type
);
4437 error_report("Unable to find sPAPR CPU Core definition");
4441 machine
->possible_cpus
= g_malloc0(sizeof(CPUArchIdList
) +
4442 sizeof(CPUArchId
) * spapr_max_cores
);
4443 machine
->possible_cpus
->len
= spapr_max_cores
;
4444 for (i
= 0; i
< machine
->possible_cpus
->len
; i
++) {
4445 int core_id
= i
* smp_threads
;
4447 machine
->possible_cpus
->cpus
[i
].type
= core_type
;
4448 machine
->possible_cpus
->cpus
[i
].vcpus_count
= smp_threads
;
4449 machine
->possible_cpus
->cpus
[i
].arch_id
= core_id
;
4450 machine
->possible_cpus
->cpus
[i
].props
.has_core_id
= true;
4451 machine
->possible_cpus
->cpus
[i
].props
.core_id
= core_id
;
4453 return machine
->possible_cpus
;
4456 static bool spapr_phb_placement(SpaprMachineState
*spapr
, uint32_t index
,
4457 uint64_t *buid
, hwaddr
*pio
,
4458 hwaddr
*mmio32
, hwaddr
*mmio64
,
4459 unsigned n_dma
, uint32_t *liobns
, Error
**errp
)
4462 * New-style PHB window placement.
4464 * Goals: Gives large (1TiB), naturally aligned 64-bit MMIO window
4465 * for each PHB, in addition to 2GiB 32-bit MMIO and 64kiB PIO
4468 * Some guest kernels can't work with MMIO windows above 1<<46
4469 * (64TiB), so we place up to 31 PHBs in the area 32TiB..64TiB
4471 * 32TiB..(33TiB+1984kiB) contains the 64kiB PIO windows for each
4472 * PHB stacked together. (32TiB+2GiB)..(32TiB+64GiB) contains the
4473 * 2GiB 32-bit MMIO windows for each PHB. Then 33..64TiB has the
4474 * 1TiB 64-bit MMIO windows for each PHB.
4476 const uint64_t base_buid
= 0x800000020000000ULL
;
4479 /* Sanity check natural alignments */
4480 QEMU_BUILD_BUG_ON((SPAPR_PCI_BASE
% SPAPR_PCI_MEM64_WIN_SIZE
) != 0);
4481 QEMU_BUILD_BUG_ON((SPAPR_PCI_LIMIT
% SPAPR_PCI_MEM64_WIN_SIZE
) != 0);
4482 QEMU_BUILD_BUG_ON((SPAPR_PCI_MEM64_WIN_SIZE
% SPAPR_PCI_MEM32_WIN_SIZE
) != 0);
4483 QEMU_BUILD_BUG_ON((SPAPR_PCI_MEM32_WIN_SIZE
% SPAPR_PCI_IO_WIN_SIZE
) != 0);
4484 /* Sanity check bounds */
4485 QEMU_BUILD_BUG_ON((SPAPR_MAX_PHBS
* SPAPR_PCI_IO_WIN_SIZE
) >
4486 SPAPR_PCI_MEM32_WIN_SIZE
);
4487 QEMU_BUILD_BUG_ON((SPAPR_MAX_PHBS
* SPAPR_PCI_MEM32_WIN_SIZE
) >
4488 SPAPR_PCI_MEM64_WIN_SIZE
);
4490 if (index
>= SPAPR_MAX_PHBS
) {
4491 error_setg(errp
, "\"index\" for PAPR PHB is too large (max %llu)",
4492 SPAPR_MAX_PHBS
- 1);
4496 *buid
= base_buid
+ index
;
4497 for (i
= 0; i
< n_dma
; ++i
) {
4498 liobns
[i
] = SPAPR_PCI_LIOBN(index
, i
);
4501 *pio
= SPAPR_PCI_BASE
+ index
* SPAPR_PCI_IO_WIN_SIZE
;
4502 *mmio32
= SPAPR_PCI_BASE
+ (index
+ 1) * SPAPR_PCI_MEM32_WIN_SIZE
;
4503 *mmio64
= SPAPR_PCI_BASE
+ (index
+ 1) * SPAPR_PCI_MEM64_WIN_SIZE
;
4507 static ICSState
*spapr_ics_get(XICSFabric
*dev
, int irq
)
4509 SpaprMachineState
*spapr
= SPAPR_MACHINE(dev
);
4511 return ics_valid_irq(spapr
->ics
, irq
) ? spapr
->ics
: NULL
;
4514 static void spapr_ics_resend(XICSFabric
*dev
)
4516 SpaprMachineState
*spapr
= SPAPR_MACHINE(dev
);
4518 ics_resend(spapr
->ics
);
4521 static ICPState
*spapr_icp_get(XICSFabric
*xi
, int vcpu_id
)
4523 PowerPCCPU
*cpu
= spapr_find_cpu(vcpu_id
);
4525 return cpu
? spapr_cpu_state(cpu
)->icp
: NULL
;
4528 static void spapr_pic_print_info(InterruptStatsProvider
*obj
, GString
*buf
)
4530 SpaprMachineState
*spapr
= SPAPR_MACHINE(obj
);
4532 spapr_irq_print_info(spapr
, buf
);
4533 g_string_append_printf(buf
, "irqchip: %s\n",
4534 kvm_irqchip_in_kernel() ? "in-kernel" : "emulated");
4538 * This is a XIVE only operation
4540 static int spapr_match_nvt(XiveFabric
*xfb
, uint8_t format
,
4541 uint8_t nvt_blk
, uint32_t nvt_idx
,
4542 bool cam_ignore
, uint8_t priority
,
4543 uint32_t logic_serv
, XiveTCTXMatch
*match
)
4545 SpaprMachineState
*spapr
= SPAPR_MACHINE(xfb
);
4546 XivePresenter
*xptr
= XIVE_PRESENTER(spapr
->active_intc
);
4547 XivePresenterClass
*xpc
= XIVE_PRESENTER_GET_CLASS(xptr
);
4550 count
= xpc
->match_nvt(xptr
, format
, nvt_blk
, nvt_idx
, cam_ignore
,
4551 priority
, logic_serv
, match
);
4557 * When we implement the save and restore of the thread interrupt
4558 * contexts in the enter/exit CPU handlers of the machine and the
4559 * escalations in QEMU, we should be able to handle non dispatched
4562 * Until this is done, the sPAPR machine should find at least one
4563 * matching context always.
4566 qemu_log_mask(LOG_GUEST_ERROR
, "XIVE: NVT %x/%x is not dispatched\n",
4573 int spapr_get_vcpu_id(PowerPCCPU
*cpu
)
4575 return cpu
->vcpu_id
;
4578 bool spapr_set_vcpu_id(PowerPCCPU
*cpu
, int cpu_index
, Error
**errp
)
4580 SpaprMachineState
*spapr
= SPAPR_MACHINE(qdev_get_machine());
4581 MachineState
*ms
= MACHINE(spapr
);
4584 vcpu_id
= spapr_vcpu_id(spapr
, cpu_index
);
4586 if (kvm_enabled() && !kvm_vcpu_id_is_valid(vcpu_id
)) {
4587 error_setg(errp
, "Can't create CPU with id %d in KVM", vcpu_id
);
4588 error_append_hint(errp
, "Adjust the number of cpus to %d "
4589 "or try to raise the number of threads per core\n",
4590 vcpu_id
* ms
->smp
.threads
/ spapr
->vsmt
);
4594 cpu
->vcpu_id
= vcpu_id
;
4598 PowerPCCPU
*spapr_find_cpu(int vcpu_id
)
4603 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
4605 if (spapr_get_vcpu_id(cpu
) == vcpu_id
) {
4613 static bool spapr_cpu_in_nested(PowerPCCPU
*cpu
)
4615 SpaprCpuState
*spapr_cpu
= spapr_cpu_state(cpu
);
4617 return spapr_cpu
->in_nested
;
4620 static void spapr_cpu_exec_enter(PPCVirtualHypervisor
*vhyp
, PowerPCCPU
*cpu
)
4622 SpaprCpuState
*spapr_cpu
= spapr_cpu_state(cpu
);
4624 /* These are only called by TCG, KVM maintains dispatch state */
4626 spapr_cpu
->prod
= false;
4627 if (spapr_cpu
->vpa_addr
) {
4628 CPUState
*cs
= CPU(cpu
);
4631 dispatch
= ldl_be_phys(cs
->as
,
4632 spapr_cpu
->vpa_addr
+ VPA_DISPATCH_COUNTER
);
4634 if ((dispatch
& 1) != 0) {
4635 qemu_log_mask(LOG_GUEST_ERROR
,
4636 "VPA: incorrect dispatch counter value for "
4637 "dispatched partition %u, correcting.\n", dispatch
);
4641 spapr_cpu
->vpa_addr
+ VPA_DISPATCH_COUNTER
, dispatch
);
4645 static void spapr_cpu_exec_exit(PPCVirtualHypervisor
*vhyp
, PowerPCCPU
*cpu
)
4647 SpaprCpuState
*spapr_cpu
= spapr_cpu_state(cpu
);
4649 if (spapr_cpu
->vpa_addr
) {
4650 CPUState
*cs
= CPU(cpu
);
4653 dispatch
= ldl_be_phys(cs
->as
,
4654 spapr_cpu
->vpa_addr
+ VPA_DISPATCH_COUNTER
);
4656 if ((dispatch
& 1) != 1) {
4657 qemu_log_mask(LOG_GUEST_ERROR
,
4658 "VPA: incorrect dispatch counter value for "
4659 "preempted partition %u, correcting.\n", dispatch
);
4663 spapr_cpu
->vpa_addr
+ VPA_DISPATCH_COUNTER
, dispatch
);
4667 static void spapr_machine_class_init(ObjectClass
*oc
, void *data
)
4669 MachineClass
*mc
= MACHINE_CLASS(oc
);
4670 SpaprMachineClass
*smc
= SPAPR_MACHINE_CLASS(oc
);
4671 FWPathProviderClass
*fwc
= FW_PATH_PROVIDER_CLASS(oc
);
4672 NMIClass
*nc
= NMI_CLASS(oc
);
4673 HotplugHandlerClass
*hc
= HOTPLUG_HANDLER_CLASS(oc
);
4674 PPCVirtualHypervisorClass
*vhc
= PPC_VIRTUAL_HYPERVISOR_CLASS(oc
);
4675 XICSFabricClass
*xic
= XICS_FABRIC_CLASS(oc
);
4676 InterruptStatsProviderClass
*ispc
= INTERRUPT_STATS_PROVIDER_CLASS(oc
);
4677 XiveFabricClass
*xfc
= XIVE_FABRIC_CLASS(oc
);
4678 VofMachineIfClass
*vmc
= VOF_MACHINE_CLASS(oc
);
4680 mc
->desc
= "pSeries Logical Partition (PAPR compliant)";
4681 mc
->ignore_boot_device_suffixes
= true;
4684 * We set up the default / latest behaviour here. The class_init
4685 * functions for the specific versioned machine types can override
4686 * these details for backwards compatibility
4688 mc
->init
= spapr_machine_init
;
4689 mc
->reset
= spapr_machine_reset
;
4690 mc
->block_default_type
= IF_SCSI
;
4693 * While KVM determines max cpus in kvm_init() using kvm_max_vcpus(),
4694 * In TCG the limit is restricted by the range of CPU IPIs available.
4696 mc
->max_cpus
= SPAPR_IRQ_NR_IPIS
;
4698 mc
->no_parallel
= 1;
4699 mc
->default_boot_order
= "";
4700 mc
->default_ram_size
= 512 * MiB
;
4701 mc
->default_ram_id
= "ppc_spapr.ram";
4702 mc
->default_display
= "std";
4703 mc
->kvm_type
= spapr_kvm_type
;
4704 machine_class_allow_dynamic_sysbus_dev(mc
, TYPE_SPAPR_PCI_HOST_BRIDGE
);
4705 mc
->pci_allow_0_address
= true;
4706 assert(!mc
->get_hotplug_handler
);
4707 mc
->get_hotplug_handler
= spapr_get_hotplug_handler
;
4708 hc
->pre_plug
= spapr_machine_device_pre_plug
;
4709 hc
->plug
= spapr_machine_device_plug
;
4710 mc
->cpu_index_to_instance_props
= spapr_cpu_index_to_props
;
4711 mc
->get_default_cpu_node_id
= spapr_get_default_cpu_node_id
;
4712 mc
->possible_cpu_arch_ids
= spapr_possible_cpu_arch_ids
;
4713 hc
->unplug_request
= spapr_machine_device_unplug_request
;
4714 hc
->unplug
= spapr_machine_device_unplug
;
4716 smc
->dr_lmb_enabled
= true;
4717 smc
->update_dt_enabled
= true;
4718 mc
->default_cpu_type
= POWERPC_CPU_TYPE_NAME("power10_v2.0");
4719 mc
->has_hotpluggable_cpus
= true;
4720 mc
->nvdimm_supported
= true;
4721 smc
->resize_hpt_default
= SPAPR_RESIZE_HPT_ENABLED
;
4722 fwc
->get_dev_path
= spapr_get_fw_dev_path
;
4723 nc
->nmi_monitor_handler
= spapr_nmi
;
4724 smc
->phb_placement
= spapr_phb_placement
;
4725 vhc
->cpu_in_nested
= spapr_cpu_in_nested
;
4726 vhc
->deliver_hv_excp
= spapr_exit_nested
;
4727 vhc
->hypercall
= emulate_spapr_hypercall
;
4728 vhc
->hpt_mask
= spapr_hpt_mask
;
4729 vhc
->map_hptes
= spapr_map_hptes
;
4730 vhc
->unmap_hptes
= spapr_unmap_hptes
;
4731 vhc
->hpte_set_c
= spapr_hpte_set_c
;
4732 vhc
->hpte_set_r
= spapr_hpte_set_r
;
4733 vhc
->get_pate
= spapr_get_pate
;
4734 vhc
->encode_hpt_for_kvm_pr
= spapr_encode_hpt_for_kvm_pr
;
4735 vhc
->cpu_exec_enter
= spapr_cpu_exec_enter
;
4736 vhc
->cpu_exec_exit
= spapr_cpu_exec_exit
;
4737 xic
->ics_get
= spapr_ics_get
;
4738 xic
->ics_resend
= spapr_ics_resend
;
4739 xic
->icp_get
= spapr_icp_get
;
4740 ispc
->print_info
= spapr_pic_print_info
;
4741 /* Force NUMA node memory size to be a multiple of
4742 * SPAPR_MEMORY_BLOCK_SIZE (256M) since that's the granularity
4743 * in which LMBs are represented and hot-added
4745 mc
->numa_mem_align_shift
= 28;
4746 mc
->auto_enable_numa
= true;
4748 smc
->default_caps
.caps
[SPAPR_CAP_HTM
] = SPAPR_CAP_OFF
;
4749 smc
->default_caps
.caps
[SPAPR_CAP_VSX
] = SPAPR_CAP_ON
;
4750 smc
->default_caps
.caps
[SPAPR_CAP_DFP
] = SPAPR_CAP_ON
;
4751 smc
->default_caps
.caps
[SPAPR_CAP_CFPC
] = SPAPR_CAP_WORKAROUND
;
4752 smc
->default_caps
.caps
[SPAPR_CAP_SBBC
] = SPAPR_CAP_WORKAROUND
;
4753 smc
->default_caps
.caps
[SPAPR_CAP_IBS
] = SPAPR_CAP_WORKAROUND
;
4754 smc
->default_caps
.caps
[SPAPR_CAP_HPT_MAXPAGESIZE
] = 16; /* 64kiB */
4755 smc
->default_caps
.caps
[SPAPR_CAP_NESTED_KVM_HV
] = SPAPR_CAP_OFF
;
4756 smc
->default_caps
.caps
[SPAPR_CAP_NESTED_PAPR
] = SPAPR_CAP_OFF
;
4757 smc
->default_caps
.caps
[SPAPR_CAP_LARGE_DECREMENTER
] = SPAPR_CAP_ON
;
4758 smc
->default_caps
.caps
[SPAPR_CAP_CCF_ASSIST
] = SPAPR_CAP_ON
;
4759 smc
->default_caps
.caps
[SPAPR_CAP_FWNMI
] = SPAPR_CAP_ON
;
4760 smc
->default_caps
.caps
[SPAPR_CAP_RPT_INVALIDATE
] = SPAPR_CAP_OFF
;
4763 * This cap specifies whether the AIL 3 mode for
4764 * H_SET_RESOURCE is supported. The default is modified
4765 * by default_caps_with_cpu().
4767 smc
->default_caps
.caps
[SPAPR_CAP_AIL_MODE_3
] = SPAPR_CAP_ON
;
4768 spapr_caps_add_properties(smc
);
4769 smc
->irq
= &spapr_irq_dual
;
4770 smc
->dr_phb_enabled
= true;
4771 smc
->linux_pci_probe
= true;
4772 smc
->smp_threads_vsmt
= true;
4773 smc
->nr_xirqs
= SPAPR_NR_XIRQS
;
4774 xfc
->match_nvt
= spapr_match_nvt
;
4775 vmc
->client_architecture_support
= spapr_vof_client_architecture_support
;
4776 vmc
->quiesce
= spapr_vof_quiesce
;
4777 vmc
->setprop
= spapr_vof_setprop
;
4780 static const TypeInfo spapr_machine_info
= {
4781 .name
= TYPE_SPAPR_MACHINE
,
4782 .parent
= TYPE_MACHINE
,
4784 .instance_size
= sizeof(SpaprMachineState
),
4785 .instance_init
= spapr_instance_init
,
4786 .instance_finalize
= spapr_machine_finalizefn
,
4787 .class_size
= sizeof(SpaprMachineClass
),
4788 .class_init
= spapr_machine_class_init
,
4789 .interfaces
= (InterfaceInfo
[]) {
4790 { TYPE_FW_PATH_PROVIDER
},
4792 { TYPE_HOTPLUG_HANDLER
},
4793 { TYPE_PPC_VIRTUAL_HYPERVISOR
},
4794 { TYPE_XICS_FABRIC
},
4795 { TYPE_INTERRUPT_STATS_PROVIDER
},
4796 { TYPE_XIVE_FABRIC
},
4797 { TYPE_VOF_MACHINE_IF
},
4802 static void spapr_machine_latest_class_options(MachineClass
*mc
)
4804 mc
->alias
= "pseries";
4805 mc
->is_default
= true;
4808 #define DEFINE_SPAPR_MACHINE_IMPL(latest, ...) \
4809 static void MACHINE_VER_SYM(class_init, spapr, __VA_ARGS__)( \
4813 MachineClass *mc = MACHINE_CLASS(oc); \
4814 MACHINE_VER_SYM(class_options, spapr, __VA_ARGS__)(mc); \
4815 MACHINE_VER_DEPRECATION(__VA_ARGS__); \
4817 spapr_machine_latest_class_options(mc); \
4820 static const TypeInfo MACHINE_VER_SYM(info, spapr, __VA_ARGS__) = \
4822 .name = MACHINE_VER_TYPE_NAME("pseries", __VA_ARGS__), \
4823 .parent = TYPE_SPAPR_MACHINE, \
4824 .class_init = MACHINE_VER_SYM(class_init, spapr, __VA_ARGS__), \
4826 static void MACHINE_VER_SYM(register, spapr, __VA_ARGS__)(void) \
4828 MACHINE_VER_DELETION(__VA_ARGS__); \
4829 type_register(&MACHINE_VER_SYM(info, spapr, __VA_ARGS__)); \
4831 type_init(MACHINE_VER_SYM(register, spapr, __VA_ARGS__))
4833 #define DEFINE_SPAPR_MACHINE_AS_LATEST(major, minor) \
4834 DEFINE_SPAPR_MACHINE_IMPL(true, major, minor)
4835 #define DEFINE_SPAPR_MACHINE(major, minor) \
4836 DEFINE_SPAPR_MACHINE_IMPL(false, major, minor)
4837 #define DEFINE_SPAPR_MACHINE_TAGGED(major, minor, tag) \
4838 DEFINE_SPAPR_MACHINE_IMPL(false, major, minor, _, tag)
4843 static void spapr_machine_9_2_class_options(MachineClass
*mc
)
4845 /* Defaults for the latest behaviour inherited from the base class */
4848 DEFINE_SPAPR_MACHINE_AS_LATEST(9, 2);
4853 static void spapr_machine_9_1_class_options(MachineClass
*mc
)
4855 spapr_machine_9_2_class_options(mc
);
4856 compat_props_add(mc
->compat_props
, hw_compat_9_1
, hw_compat_9_1_len
);
4859 DEFINE_SPAPR_MACHINE(9, 1);
4864 static void spapr_machine_9_0_class_options(MachineClass
*mc
)
4866 spapr_machine_9_1_class_options(mc
);
4867 compat_props_add(mc
->compat_props
, hw_compat_9_0
, hw_compat_9_0_len
);
4870 DEFINE_SPAPR_MACHINE(9, 0);
4875 static void spapr_machine_8_2_class_options(MachineClass
*mc
)
4877 spapr_machine_9_0_class_options(mc
);
4878 compat_props_add(mc
->compat_props
, hw_compat_8_2
, hw_compat_8_2_len
);
4881 DEFINE_SPAPR_MACHINE(8, 2);
4886 static void spapr_machine_8_1_class_options(MachineClass
*mc
)
4888 spapr_machine_8_2_class_options(mc
);
4889 compat_props_add(mc
->compat_props
, hw_compat_8_1
, hw_compat_8_1_len
);
4892 DEFINE_SPAPR_MACHINE(8, 1);
4897 static void spapr_machine_8_0_class_options(MachineClass
*mc
)
4899 spapr_machine_8_1_class_options(mc
);
4900 compat_props_add(mc
->compat_props
, hw_compat_8_0
, hw_compat_8_0_len
);
4903 DEFINE_SPAPR_MACHINE(8, 0);
4908 static void spapr_machine_7_2_class_options(MachineClass
*mc
)
4910 spapr_machine_8_0_class_options(mc
);
4911 compat_props_add(mc
->compat_props
, hw_compat_7_2
, hw_compat_7_2_len
);
4914 DEFINE_SPAPR_MACHINE(7, 2);
4919 static void spapr_machine_7_1_class_options(MachineClass
*mc
)
4921 spapr_machine_7_2_class_options(mc
);
4922 compat_props_add(mc
->compat_props
, hw_compat_7_1
, hw_compat_7_1_len
);
4925 DEFINE_SPAPR_MACHINE(7, 1);
4930 static void spapr_machine_7_0_class_options(MachineClass
*mc
)
4932 spapr_machine_7_1_class_options(mc
);
4933 compat_props_add(mc
->compat_props
, hw_compat_7_0
, hw_compat_7_0_len
);
4936 DEFINE_SPAPR_MACHINE(7, 0);
4941 static void spapr_machine_6_2_class_options(MachineClass
*mc
)
4943 spapr_machine_7_0_class_options(mc
);
4944 compat_props_add(mc
->compat_props
, hw_compat_6_2
, hw_compat_6_2_len
);
4947 DEFINE_SPAPR_MACHINE(6, 2);
4952 static void spapr_machine_6_1_class_options(MachineClass
*mc
)
4954 SpaprMachineClass
*smc
= SPAPR_MACHINE_CLASS(mc
);
4956 spapr_machine_6_2_class_options(mc
);
4957 compat_props_add(mc
->compat_props
, hw_compat_6_1
, hw_compat_6_1_len
);
4958 smc
->pre_6_2_numa_affinity
= true;
4959 mc
->smp_props
.prefer_sockets
= true;
4962 DEFINE_SPAPR_MACHINE(6, 1);
4967 static void spapr_machine_6_0_class_options(MachineClass
*mc
)
4969 spapr_machine_6_1_class_options(mc
);
4970 compat_props_add(mc
->compat_props
, hw_compat_6_0
, hw_compat_6_0_len
);
4973 DEFINE_SPAPR_MACHINE(6, 0);
4978 static void spapr_machine_5_2_class_options(MachineClass
*mc
)
4980 spapr_machine_6_0_class_options(mc
);
4981 compat_props_add(mc
->compat_props
, hw_compat_5_2
, hw_compat_5_2_len
);
4984 DEFINE_SPAPR_MACHINE(5, 2);
4989 static void spapr_machine_5_1_class_options(MachineClass
*mc
)
4991 SpaprMachineClass
*smc
= SPAPR_MACHINE_CLASS(mc
);
4993 spapr_machine_5_2_class_options(mc
);
4994 compat_props_add(mc
->compat_props
, hw_compat_5_1
, hw_compat_5_1_len
);
4995 smc
->pre_5_2_numa_associativity
= true;
4998 DEFINE_SPAPR_MACHINE(5, 1);
5003 static void spapr_machine_5_0_class_options(MachineClass
*mc
)
5005 SpaprMachineClass
*smc
= SPAPR_MACHINE_CLASS(mc
);
5006 static GlobalProperty compat
[] = {
5007 { TYPE_SPAPR_PCI_HOST_BRIDGE
, "pre-5.1-associativity", "on" },
5010 spapr_machine_5_1_class_options(mc
);
5011 compat_props_add(mc
->compat_props
, hw_compat_5_0
, hw_compat_5_0_len
);
5012 compat_props_add(mc
->compat_props
, compat
, G_N_ELEMENTS(compat
));
5013 mc
->numa_mem_supported
= true;
5014 smc
->pre_5_1_assoc_refpoints
= true;
5017 DEFINE_SPAPR_MACHINE(5, 0);
5022 static void spapr_machine_4_2_class_options(MachineClass
*mc
)
5024 SpaprMachineClass
*smc
= SPAPR_MACHINE_CLASS(mc
);
5026 spapr_machine_5_0_class_options(mc
);
5027 compat_props_add(mc
->compat_props
, hw_compat_4_2
, hw_compat_4_2_len
);
5028 smc
->default_caps
.caps
[SPAPR_CAP_CCF_ASSIST
] = SPAPR_CAP_OFF
;
5029 smc
->default_caps
.caps
[SPAPR_CAP_FWNMI
] = SPAPR_CAP_OFF
;
5030 smc
->rma_limit
= 16 * GiB
;
5031 mc
->nvdimm_supported
= false;
5034 DEFINE_SPAPR_MACHINE(4, 2);
5039 static void spapr_machine_4_1_class_options(MachineClass
*mc
)
5041 SpaprMachineClass
*smc
= SPAPR_MACHINE_CLASS(mc
);
5042 static GlobalProperty compat
[] = {
5043 /* Only allow 4kiB and 64kiB IOMMU pagesizes */
5044 { TYPE_SPAPR_PCI_HOST_BRIDGE
, "pgsz", "0x11000" },
5047 spapr_machine_4_2_class_options(mc
);
5048 smc
->linux_pci_probe
= false;
5049 smc
->smp_threads_vsmt
= false;
5050 compat_props_add(mc
->compat_props
, hw_compat_4_1
, hw_compat_4_1_len
);
5051 compat_props_add(mc
->compat_props
, compat
, G_N_ELEMENTS(compat
));
5054 DEFINE_SPAPR_MACHINE(4, 1);
5059 static bool phb_placement_4_0(SpaprMachineState
*spapr
, uint32_t index
,
5060 uint64_t *buid
, hwaddr
*pio
,
5061 hwaddr
*mmio32
, hwaddr
*mmio64
,
5062 unsigned n_dma
, uint32_t *liobns
, Error
**errp
)
5064 if (!spapr_phb_placement(spapr
, index
, buid
, pio
, mmio32
, mmio64
, n_dma
,
5070 static void spapr_machine_4_0_class_options(MachineClass
*mc
)
5072 SpaprMachineClass
*smc
= SPAPR_MACHINE_CLASS(mc
);
5074 spapr_machine_4_1_class_options(mc
);
5075 compat_props_add(mc
->compat_props
, hw_compat_4_0
, hw_compat_4_0_len
);
5076 smc
->phb_placement
= phb_placement_4_0
;
5077 smc
->irq
= &spapr_irq_xics
;
5078 smc
->pre_4_1_migration
= true;
5081 DEFINE_SPAPR_MACHINE(4, 0);
5086 static void spapr_machine_3_1_class_options(MachineClass
*mc
)
5088 SpaprMachineClass
*smc
= SPAPR_MACHINE_CLASS(mc
);
5090 spapr_machine_4_0_class_options(mc
);
5091 compat_props_add(mc
->compat_props
, hw_compat_3_1
, hw_compat_3_1_len
);
5093 mc
->default_cpu_type
= POWERPC_CPU_TYPE_NAME("power8_v2.0");
5094 smc
->update_dt_enabled
= false;
5095 smc
->dr_phb_enabled
= false;
5096 smc
->broken_host_serial_model
= true;
5097 smc
->default_caps
.caps
[SPAPR_CAP_CFPC
] = SPAPR_CAP_BROKEN
;
5098 smc
->default_caps
.caps
[SPAPR_CAP_SBBC
] = SPAPR_CAP_BROKEN
;
5099 smc
->default_caps
.caps
[SPAPR_CAP_IBS
] = SPAPR_CAP_BROKEN
;
5100 smc
->default_caps
.caps
[SPAPR_CAP_LARGE_DECREMENTER
] = SPAPR_CAP_OFF
;
5103 DEFINE_SPAPR_MACHINE(3, 1);
5109 static void spapr_machine_3_0_class_options(MachineClass
*mc
)
5111 SpaprMachineClass
*smc
= SPAPR_MACHINE_CLASS(mc
);
5113 spapr_machine_3_1_class_options(mc
);
5114 compat_props_add(mc
->compat_props
, hw_compat_3_0
, hw_compat_3_0_len
);
5116 smc
->legacy_irq_allocation
= true;
5117 smc
->nr_xirqs
= 0x400;
5118 smc
->irq
= &spapr_irq_xics_legacy
;
5121 DEFINE_SPAPR_MACHINE(3, 0);
5126 static void spapr_machine_2_12_class_options(MachineClass
*mc
)
5128 SpaprMachineClass
*smc
= SPAPR_MACHINE_CLASS(mc
);
5129 static GlobalProperty compat
[] = {
5130 { TYPE_POWERPC_CPU
, "pre-3.0-migration", "on" },
5131 { TYPE_SPAPR_CPU_CORE
, "pre-3.0-migration", "on" },
5134 spapr_machine_3_0_class_options(mc
);
5135 compat_props_add(mc
->compat_props
, hw_compat_2_12
, hw_compat_2_12_len
);
5136 compat_props_add(mc
->compat_props
, compat
, G_N_ELEMENTS(compat
));
5138 /* We depend on kvm_enabled() to choose a default value for the
5139 * hpt-max-page-size capability. Of course we can't do it here
5140 * because this is too early and the HW accelerator isn't initialized
5141 * yet. Postpone this to machine init (see default_caps_with_cpu()).
5143 smc
->default_caps
.caps
[SPAPR_CAP_HPT_MAXPAGESIZE
] = 0;
5146 DEFINE_SPAPR_MACHINE(2, 12);
5148 static void spapr_machine_2_12_sxxm_class_options(MachineClass
*mc
)
5150 SpaprMachineClass
*smc
= SPAPR_MACHINE_CLASS(mc
);
5152 spapr_machine_2_12_class_options(mc
);
5153 smc
->default_caps
.caps
[SPAPR_CAP_CFPC
] = SPAPR_CAP_WORKAROUND
;
5154 smc
->default_caps
.caps
[SPAPR_CAP_SBBC
] = SPAPR_CAP_WORKAROUND
;
5155 smc
->default_caps
.caps
[SPAPR_CAP_IBS
] = SPAPR_CAP_FIXED_CCD
;
5158 DEFINE_SPAPR_MACHINE_TAGGED(2, 12, sxxm
);
5164 static void spapr_machine_2_11_class_options(MachineClass
*mc
)
5166 SpaprMachineClass
*smc
= SPAPR_MACHINE_CLASS(mc
);
5168 spapr_machine_2_12_class_options(mc
);
5169 smc
->default_caps
.caps
[SPAPR_CAP_HTM
] = SPAPR_CAP_ON
;
5170 compat_props_add(mc
->compat_props
, hw_compat_2_11
, hw_compat_2_11_len
);
5173 DEFINE_SPAPR_MACHINE(2, 11);
5179 static void spapr_machine_2_10_class_options(MachineClass
*mc
)
5181 spapr_machine_2_11_class_options(mc
);
5182 compat_props_add(mc
->compat_props
, hw_compat_2_10
, hw_compat_2_10_len
);
5185 DEFINE_SPAPR_MACHINE(2, 10);
5191 static void spapr_machine_2_9_class_options(MachineClass
*mc
)
5193 SpaprMachineClass
*smc
= SPAPR_MACHINE_CLASS(mc
);
5194 static GlobalProperty compat
[] = {
5195 { TYPE_POWERPC_CPU
, "pre-2.10-migration", "on" },
5198 spapr_machine_2_10_class_options(mc
);
5199 compat_props_add(mc
->compat_props
, hw_compat_2_9
, hw_compat_2_9_len
);
5200 compat_props_add(mc
->compat_props
, compat
, G_N_ELEMENTS(compat
));
5201 smc
->pre_2_10_has_unused_icps
= true;
5202 smc
->resize_hpt_default
= SPAPR_RESIZE_HPT_DISABLED
;
5205 DEFINE_SPAPR_MACHINE(2, 9);
5211 static void spapr_machine_2_8_class_options(MachineClass
*mc
)
5213 static GlobalProperty compat
[] = {
5214 { TYPE_SPAPR_PCI_HOST_BRIDGE
, "pcie-extended-configuration-space", "off" },
5217 spapr_machine_2_9_class_options(mc
);
5218 compat_props_add(mc
->compat_props
, hw_compat_2_8
, hw_compat_2_8_len
);
5219 compat_props_add(mc
->compat_props
, compat
, G_N_ELEMENTS(compat
));
5220 mc
->numa_mem_align_shift
= 23;
5223 DEFINE_SPAPR_MACHINE(2, 8);
5229 static bool phb_placement_2_7(SpaprMachineState
*spapr
, uint32_t index
,
5230 uint64_t *buid
, hwaddr
*pio
,
5231 hwaddr
*mmio32
, hwaddr
*mmio64
,
5232 unsigned n_dma
, uint32_t *liobns
, Error
**errp
)
5234 /* Legacy PHB placement for pseries-2.7 and earlier machine types */
5235 const uint64_t base_buid
= 0x800000020000000ULL
;
5236 const hwaddr phb_spacing
= 0x1000000000ULL
; /* 64 GiB */
5237 const hwaddr mmio_offset
= 0xa0000000; /* 2 GiB + 512 MiB */
5238 const hwaddr pio_offset
= 0x80000000; /* 2 GiB */
5239 const uint32_t max_index
= 255;
5240 const hwaddr phb0_alignment
= 0x10000000000ULL
; /* 1 TiB */
5242 uint64_t ram_top
= MACHINE(spapr
)->ram_size
;
5243 hwaddr phb0_base
, phb_base
;
5246 /* Do we have device memory? */
5247 if (MACHINE(spapr
)->device_memory
) {
5248 /* Can't just use maxram_size, because there may be an
5249 * alignment gap between normal and device memory regions
5251 ram_top
= MACHINE(spapr
)->device_memory
->base
+
5252 memory_region_size(&MACHINE(spapr
)->device_memory
->mr
);
5255 phb0_base
= QEMU_ALIGN_UP(ram_top
, phb0_alignment
);
5257 if (index
> max_index
) {
5258 error_setg(errp
, "\"index\" for PAPR PHB is too large (max %u)",
5263 *buid
= base_buid
+ index
;
5264 for (i
= 0; i
< n_dma
; ++i
) {
5265 liobns
[i
] = SPAPR_PCI_LIOBN(index
, i
);
5268 phb_base
= phb0_base
+ index
* phb_spacing
;
5269 *pio
= phb_base
+ pio_offset
;
5270 *mmio32
= phb_base
+ mmio_offset
;
5272 * We don't set the 64-bit MMIO window, relying on the PHB's
5273 * fallback behaviour of automatically splitting a large "32-bit"
5274 * window into contiguous 32-bit and 64-bit windows
5280 static void spapr_machine_2_7_class_options(MachineClass
*mc
)
5282 SpaprMachineClass
*smc
= SPAPR_MACHINE_CLASS(mc
);
5283 static GlobalProperty compat
[] = {
5284 { TYPE_SPAPR_PCI_HOST_BRIDGE
, "mem_win_size", "0xf80000000", },
5285 { TYPE_SPAPR_PCI_HOST_BRIDGE
, "mem64_win_size", "0", },
5286 { TYPE_POWERPC_CPU
, "pre-2.8-migration", "on", },
5287 { TYPE_SPAPR_PCI_HOST_BRIDGE
, "pre-2.8-migration", "on", },
5290 spapr_machine_2_8_class_options(mc
);
5291 mc
->default_cpu_type
= POWERPC_CPU_TYPE_NAME("power7_v2.3");
5292 mc
->default_machine_opts
= "modern-hotplug-events=off";
5293 compat_props_add(mc
->compat_props
, hw_compat_2_7
, hw_compat_2_7_len
);
5294 compat_props_add(mc
->compat_props
, compat
, G_N_ELEMENTS(compat
));
5295 smc
->phb_placement
= phb_placement_2_7
;
5298 DEFINE_SPAPR_MACHINE(2, 7);
5304 static void spapr_machine_2_6_class_options(MachineClass
*mc
)
5306 static GlobalProperty compat
[] = {
5307 { TYPE_SPAPR_PCI_HOST_BRIDGE
, "ddw", "off" },
5310 spapr_machine_2_7_class_options(mc
);
5311 mc
->has_hotpluggable_cpus
= false;
5312 compat_props_add(mc
->compat_props
, hw_compat_2_6
, hw_compat_2_6_len
);
5313 compat_props_add(mc
->compat_props
, compat
, G_N_ELEMENTS(compat
));
5316 DEFINE_SPAPR_MACHINE(2, 6);
5322 static void spapr_machine_2_5_class_options(MachineClass
*mc
)
5324 SpaprMachineClass
*smc
= SPAPR_MACHINE_CLASS(mc
);
5325 static GlobalProperty compat
[] = {
5326 { "spapr-vlan", "use-rx-buffer-pools", "off" },
5329 spapr_machine_2_6_class_options(mc
);
5330 smc
->use_ohci_by_default
= true;
5331 compat_props_add(mc
->compat_props
, hw_compat_2_5
, hw_compat_2_5_len
);
5332 compat_props_add(mc
->compat_props
, compat
, G_N_ELEMENTS(compat
));
5335 DEFINE_SPAPR_MACHINE(2, 5);
5341 static void spapr_machine_2_4_class_options(MachineClass
*mc
)
5343 SpaprMachineClass
*smc
= SPAPR_MACHINE_CLASS(mc
);
5345 spapr_machine_2_5_class_options(mc
);
5346 smc
->dr_lmb_enabled
= false;
5347 compat_props_add(mc
->compat_props
, hw_compat_2_4
, hw_compat_2_4_len
);
5350 DEFINE_SPAPR_MACHINE(2, 4);
5356 static void spapr_machine_2_3_class_options(MachineClass
*mc
)
5358 static GlobalProperty compat
[] = {
5359 { "spapr-pci-host-bridge", "dynamic-reconfiguration", "off" },
5361 spapr_machine_2_4_class_options(mc
);
5362 compat_props_add(mc
->compat_props
, hw_compat_2_3
, hw_compat_2_3_len
);
5363 compat_props_add(mc
->compat_props
, compat
, G_N_ELEMENTS(compat
));
5365 DEFINE_SPAPR_MACHINE(2, 3);
5371 static void spapr_machine_2_2_class_options(MachineClass
*mc
)
5373 static GlobalProperty compat
[] = {
5374 { TYPE_SPAPR_PCI_HOST_BRIDGE
, "mem_win_size", "0x20000000" },
5377 spapr_machine_2_3_class_options(mc
);
5378 compat_props_add(mc
->compat_props
, hw_compat_2_2
, hw_compat_2_2_len
);
5379 compat_props_add(mc
->compat_props
, compat
, G_N_ELEMENTS(compat
));
5380 mc
->default_machine_opts
= "modern-hotplug-events=off,suppress-vmdesc=on";
5382 DEFINE_SPAPR_MACHINE(2, 2);
5388 static void spapr_machine_2_1_class_options(MachineClass
*mc
)
5390 spapr_machine_2_2_class_options(mc
);
5391 compat_props_add(mc
->compat_props
, hw_compat_2_1
, hw_compat_2_1_len
);
5393 DEFINE_SPAPR_MACHINE(2, 1);
5395 static void spapr_machine_register_types(void)
5397 type_register_static(&spapr_machine_info
);
5400 type_init(spapr_machine_register_types
)