2 * QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator
4 * Copyright (c) 2004-2007 Fabrice Bellard
5 * Copyright (c) 2007 Jocelyn Mayer
6 * Copyright (c) 2010 David Gibson, IBM Corporation.
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
27 #include "qemu/osdep.h"
28 #include "qemu-common.h"
29 #include "qapi/error.h"
30 #include "qapi/visitor.h"
31 #include "sysemu/sysemu.h"
32 #include "sysemu/hostmem.h"
33 #include "sysemu/numa.h"
34 #include "sysemu/qtest.h"
35 #include "sysemu/reset.h"
36 #include "sysemu/runstate.h"
38 #include "hw/fw-path-provider.h"
41 #include "sysemu/device_tree.h"
42 #include "sysemu/cpus.h"
43 #include "sysemu/hw_accel.h"
45 #include "migration/misc.h"
46 #include "migration/qemu-file-types.h"
47 #include "migration/global_state.h"
48 #include "migration/register.h"
49 #include "mmu-hash64.h"
50 #include "mmu-book3s-v3.h"
51 #include "cpu-models.h"
52 #include "hw/core/cpu.h"
54 #include "hw/boards.h"
55 #include "hw/ppc/ppc.h"
56 #include "hw/loader.h"
58 #include "hw/ppc/fdt.h"
59 #include "hw/ppc/spapr.h"
60 #include "hw/ppc/spapr_vio.h"
61 #include "hw/qdev-properties.h"
62 #include "hw/pci-host/spapr.h"
63 #include "hw/pci/msi.h"
65 #include "hw/pci/pci.h"
66 #include "hw/scsi/scsi.h"
67 #include "hw/virtio/virtio-scsi.h"
68 #include "hw/virtio/vhost-scsi-common.h"
70 #include "exec/address-spaces.h"
71 #include "exec/ram_addr.h"
73 #include "qemu/config-file.h"
74 #include "qemu/error-report.h"
77 #include "hw/intc/intc.h"
79 #include "qemu/cutils.h"
80 #include "hw/ppc/spapr_cpu_core.h"
81 #include "hw/mem/memory-device.h"
82 #include "hw/ppc/spapr_tpm_proxy.h"
86 /* SLOF memory layout:
88 * SLOF raw image loaded at 0, copies its romfs right below the flat
89 * device-tree, then position SLOF itself 31M below that
91 * So we set FW_OVERHEAD to 40MB which should account for all of that
94 * We load our kernel at 4M, leaving space for SLOF initial image
96 #define FDT_MAX_SIZE 0x100000
97 #define RTAS_MAX_SIZE 0x10000
98 #define RTAS_MAX_ADDR 0x80000000 /* RTAS must stay below that */
99 #define FW_MAX_SIZE 0x400000
100 #define FW_FILE_NAME "slof.bin"
101 #define FW_OVERHEAD 0x2800000
102 #define KERNEL_LOAD_ADDR FW_MAX_SIZE
104 #define MIN_RMA_SLOF 128UL
106 #define PHANDLE_INTC 0x00001111
108 /* These two functions implement the VCPU id numbering: one to compute them
109 * all and one to identify thread 0 of a VCORE. Any change to the first one
110 * is likely to have an impact on the second one, so let's keep them close.
112 static int spapr_vcpu_id(SpaprMachineState
*spapr
, int cpu_index
)
114 MachineState
*ms
= MACHINE(spapr
);
115 unsigned int smp_threads
= ms
->smp
.threads
;
119 (cpu_index
/ smp_threads
) * spapr
->vsmt
+ cpu_index
% smp_threads
;
121 static bool spapr_is_thread0_in_vcore(SpaprMachineState
*spapr
,
125 return spapr_get_vcpu_id(cpu
) % spapr
->vsmt
== 0;
128 static bool pre_2_10_vmstate_dummy_icp_needed(void *opaque
)
130 /* Dummy entries correspond to unused ICPState objects in older QEMUs,
131 * and newer QEMUs don't even have them. In both cases, we don't want
132 * to send anything on the wire.
137 static const VMStateDescription pre_2_10_vmstate_dummy_icp
= {
138 .name
= "icp/server",
140 .minimum_version_id
= 1,
141 .needed
= pre_2_10_vmstate_dummy_icp_needed
,
142 .fields
= (VMStateField
[]) {
143 VMSTATE_UNUSED(4), /* uint32_t xirr */
144 VMSTATE_UNUSED(1), /* uint8_t pending_priority */
145 VMSTATE_UNUSED(1), /* uint8_t mfrr */
146 VMSTATE_END_OF_LIST()
150 static void pre_2_10_vmstate_register_dummy_icp(int i
)
152 vmstate_register(NULL
, i
, &pre_2_10_vmstate_dummy_icp
,
153 (void *)(uintptr_t) i
);
156 static void pre_2_10_vmstate_unregister_dummy_icp(int i
)
158 vmstate_unregister(NULL
, &pre_2_10_vmstate_dummy_icp
,
159 (void *)(uintptr_t) i
);
162 int spapr_max_server_number(SpaprMachineState
*spapr
)
164 MachineState
*ms
= MACHINE(spapr
);
167 return DIV_ROUND_UP(ms
->smp
.max_cpus
* spapr
->vsmt
, ms
->smp
.threads
);
170 static int spapr_fixup_cpu_smt_dt(void *fdt
, int offset
, PowerPCCPU
*cpu
,
174 uint32_t servers_prop
[smt_threads
];
175 uint32_t gservers_prop
[smt_threads
* 2];
176 int index
= spapr_get_vcpu_id(cpu
);
178 if (cpu
->compat_pvr
) {
179 ret
= fdt_setprop_cell(fdt
, offset
, "cpu-version", cpu
->compat_pvr
);
185 /* Build interrupt servers and gservers properties */
186 for (i
= 0; i
< smt_threads
; i
++) {
187 servers_prop
[i
] = cpu_to_be32(index
+ i
);
188 /* Hack, direct the group queues back to cpu 0 */
189 gservers_prop
[i
*2] = cpu_to_be32(index
+ i
);
190 gservers_prop
[i
*2 + 1] = 0;
192 ret
= fdt_setprop(fdt
, offset
, "ibm,ppc-interrupt-server#s",
193 servers_prop
, sizeof(servers_prop
));
197 ret
= fdt_setprop(fdt
, offset
, "ibm,ppc-interrupt-gserver#s",
198 gservers_prop
, sizeof(gservers_prop
));
203 static int spapr_fixup_cpu_numa_dt(void *fdt
, int offset
, PowerPCCPU
*cpu
)
205 int index
= spapr_get_vcpu_id(cpu
);
206 uint32_t associativity
[] = {cpu_to_be32(0x5),
210 cpu_to_be32(cpu
->node_id
),
213 /* Advertise NUMA via ibm,associativity */
214 return fdt_setprop(fdt
, offset
, "ibm,associativity", associativity
,
215 sizeof(associativity
));
218 /* Populate the "ibm,pa-features" property */
219 static void spapr_populate_pa_features(SpaprMachineState
*spapr
,
221 void *fdt
, int offset
,
224 uint8_t pa_features_206
[] = { 6, 0,
225 0xf6, 0x1f, 0xc7, 0x00, 0x80, 0xc0 };
226 uint8_t pa_features_207
[] = { 24, 0,
227 0xf6, 0x1f, 0xc7, 0xc0, 0x80, 0xf0,
228 0x80, 0x00, 0x00, 0x00, 0x00, 0x00,
229 0x00, 0x00, 0x00, 0x00, 0x80, 0x00,
230 0x80, 0x00, 0x80, 0x00, 0x00, 0x00 };
231 uint8_t pa_features_300
[] = { 66, 0,
232 /* 0: MMU|FPU|SLB|RUN|DABR|NX, 1: fri[nzpm]|DABRX|SPRG3|SLB0|PP110 */
233 /* 2: VPM|DS205|PPR|DS202|DS206, 3: LSD|URG, SSO, 5: LE|CFAR|EB|LSQ */
234 0xf6, 0x1f, 0xc7, 0xc0, 0x80, 0xf0, /* 0 - 5 */
236 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, /* 6 - 11 */
238 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, /* 12 - 17 */
239 /* 18: Vec. Scalar, 20: Vec. XOR, 22: HTM */
240 0x80, 0x00, 0x80, 0x00, 0x00, 0x00, /* 18 - 23 */
241 /* 24: Ext. Dec, 26: 64 bit ftrs, 28: PM ftrs */
242 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 24 - 29 */
243 /* 30: MMR, 32: LE atomic, 34: EBB + ext EBB */
244 0x80, 0x00, 0x80, 0x00, 0xC0, 0x00, /* 30 - 35 */
245 /* 36: SPR SO, 38: Copy/Paste, 40: Radix MMU */
246 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 36 - 41 */
247 /* 42: PM, 44: PC RA, 46: SC vec'd */
248 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 42 - 47 */
249 /* 48: SIMD, 50: QP BFP, 52: String */
250 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 48 - 53 */
251 /* 54: DecFP, 56: DecI, 58: SHA */
252 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 54 - 59 */
253 /* 60: NM atomic, 62: RNG */
254 0x80, 0x00, 0x80, 0x00, 0x00, 0x00, /* 60 - 65 */
256 uint8_t *pa_features
= NULL
;
259 if (ppc_check_compat(cpu
, CPU_POWERPC_LOGICAL_2_06
, 0, cpu
->compat_pvr
)) {
260 pa_features
= pa_features_206
;
261 pa_size
= sizeof(pa_features_206
);
263 if (ppc_check_compat(cpu
, CPU_POWERPC_LOGICAL_2_07
, 0, cpu
->compat_pvr
)) {
264 pa_features
= pa_features_207
;
265 pa_size
= sizeof(pa_features_207
);
267 if (ppc_check_compat(cpu
, CPU_POWERPC_LOGICAL_3_00
, 0, cpu
->compat_pvr
)) {
268 pa_features
= pa_features_300
;
269 pa_size
= sizeof(pa_features_300
);
275 if (ppc_hash64_has(cpu
, PPC_HASH64_CI_LARGEPAGE
)) {
277 * Note: we keep CI large pages off by default because a 64K capable
278 * guest provisioned with large pages might otherwise try to map a qemu
279 * framebuffer (or other kind of memory mapped PCI BAR) using 64K pages
280 * even if that qemu runs on a 4k host.
281 * We dd this bit back here if we are confident this is not an issue
283 pa_features
[3] |= 0x20;
285 if ((spapr_get_cap(spapr
, SPAPR_CAP_HTM
) != 0) && pa_size
> 24) {
286 pa_features
[24] |= 0x80; /* Transactional memory support */
288 if (legacy_guest
&& pa_size
> 40) {
289 /* Workaround for broken kernels that attempt (guest) radix
290 * mode when they can't handle it, if they see the radix bit set
291 * in pa-features. So hide it from them. */
292 pa_features
[40 + 2] &= ~0x80; /* Radix MMU */
295 _FDT((fdt_setprop(fdt
, offset
, "ibm,pa-features", pa_features
, pa_size
)));
298 static int spapr_fixup_cpu_dt(void *fdt
, SpaprMachineState
*spapr
)
300 MachineState
*ms
= MACHINE(spapr
);
301 int ret
= 0, offset
, cpus_offset
;
304 uint32_t pft_size_prop
[] = {0, cpu_to_be32(spapr
->htab_shift
)};
307 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
308 DeviceClass
*dc
= DEVICE_GET_CLASS(cs
);
309 int index
= spapr_get_vcpu_id(cpu
);
310 int compat_smt
= MIN(ms
->smp
.threads
, ppc_compat_max_vthreads(cpu
));
312 if (!spapr_is_thread0_in_vcore(spapr
, cpu
)) {
316 snprintf(cpu_model
, 32, "%s@%x", dc
->fw_name
, index
);
318 cpus_offset
= fdt_path_offset(fdt
, "/cpus");
319 if (cpus_offset
< 0) {
320 cpus_offset
= fdt_add_subnode(fdt
, 0, "cpus");
321 if (cpus_offset
< 0) {
325 offset
= fdt_subnode_offset(fdt
, cpus_offset
, cpu_model
);
327 offset
= fdt_add_subnode(fdt
, cpus_offset
, cpu_model
);
333 ret
= fdt_setprop(fdt
, offset
, "ibm,pft-size",
334 pft_size_prop
, sizeof(pft_size_prop
));
339 if (ms
->numa_state
->num_nodes
> 1) {
340 ret
= spapr_fixup_cpu_numa_dt(fdt
, offset
, cpu
);
346 ret
= spapr_fixup_cpu_smt_dt(fdt
, offset
, cpu
, compat_smt
);
351 spapr_populate_pa_features(spapr
, cpu
, fdt
, offset
,
352 spapr
->cas_legacy_guest_workaround
);
357 static hwaddr
spapr_node0_size(MachineState
*machine
)
359 if (machine
->numa_state
->num_nodes
) {
361 for (i
= 0; i
< machine
->numa_state
->num_nodes
; ++i
) {
362 if (machine
->numa_state
->nodes
[i
].node_mem
) {
363 return MIN(pow2floor(machine
->numa_state
->nodes
[i
].node_mem
),
368 return machine
->ram_size
;
371 static void add_str(GString
*s
, const gchar
*s1
)
373 g_string_append_len(s
, s1
, strlen(s1
) + 1);
376 static int spapr_populate_memory_node(void *fdt
, int nodeid
, hwaddr start
,
379 uint32_t associativity
[] = {
380 cpu_to_be32(0x4), /* length */
381 cpu_to_be32(0x0), cpu_to_be32(0x0),
382 cpu_to_be32(0x0), cpu_to_be32(nodeid
)
385 uint64_t mem_reg_property
[2];
388 mem_reg_property
[0] = cpu_to_be64(start
);
389 mem_reg_property
[1] = cpu_to_be64(size
);
391 sprintf(mem_name
, "memory@" TARGET_FMT_lx
, start
);
392 off
= fdt_add_subnode(fdt
, 0, mem_name
);
394 _FDT((fdt_setprop_string(fdt
, off
, "device_type", "memory")));
395 _FDT((fdt_setprop(fdt
, off
, "reg", mem_reg_property
,
396 sizeof(mem_reg_property
))));
397 _FDT((fdt_setprop(fdt
, off
, "ibm,associativity", associativity
,
398 sizeof(associativity
))));
402 static int spapr_populate_memory(SpaprMachineState
*spapr
, void *fdt
)
404 MachineState
*machine
= MACHINE(spapr
);
405 hwaddr mem_start
, node_size
;
406 int i
, nb_nodes
= machine
->numa_state
->num_nodes
;
407 NodeInfo
*nodes
= machine
->numa_state
->nodes
;
410 /* No NUMA nodes, assume there is just one node with whole RAM */
413 ramnode
.node_mem
= machine
->ram_size
;
417 for (i
= 0, mem_start
= 0; i
< nb_nodes
; ++i
) {
418 if (!nodes
[i
].node_mem
) {
421 if (mem_start
>= machine
->ram_size
) {
424 node_size
= nodes
[i
].node_mem
;
425 if (node_size
> machine
->ram_size
- mem_start
) {
426 node_size
= machine
->ram_size
- mem_start
;
430 /* spapr_machine_init() checks for rma_size <= node0_size
432 spapr_populate_memory_node(fdt
, i
, 0, spapr
->rma_size
);
433 mem_start
+= spapr
->rma_size
;
434 node_size
-= spapr
->rma_size
;
436 for ( ; node_size
; ) {
437 hwaddr sizetmp
= pow2floor(node_size
);
439 /* mem_start != 0 here */
440 if (ctzl(mem_start
) < ctzl(sizetmp
)) {
441 sizetmp
= 1ULL << ctzl(mem_start
);
444 spapr_populate_memory_node(fdt
, i
, mem_start
, sizetmp
);
445 node_size
-= sizetmp
;
446 mem_start
+= sizetmp
;
453 static void spapr_populate_cpu_dt(CPUState
*cs
, void *fdt
, int offset
,
454 SpaprMachineState
*spapr
)
456 MachineState
*ms
= MACHINE(spapr
);
457 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
458 CPUPPCState
*env
= &cpu
->env
;
459 PowerPCCPUClass
*pcc
= POWERPC_CPU_GET_CLASS(cs
);
460 int index
= spapr_get_vcpu_id(cpu
);
461 uint32_t segs
[] = {cpu_to_be32(28), cpu_to_be32(40),
462 0xffffffff, 0xffffffff};
463 uint32_t tbfreq
= kvm_enabled() ? kvmppc_get_tbfreq()
464 : SPAPR_TIMEBASE_FREQ
;
465 uint32_t cpufreq
= kvm_enabled() ? kvmppc_get_clockfreq() : 1000000000;
466 uint32_t page_sizes_prop
[64];
467 size_t page_sizes_prop_size
;
468 unsigned int smp_threads
= ms
->smp
.threads
;
469 uint32_t vcpus_per_socket
= smp_threads
* ms
->smp
.cores
;
470 uint32_t pft_size_prop
[] = {0, cpu_to_be32(spapr
->htab_shift
)};
471 int compat_smt
= MIN(smp_threads
, ppc_compat_max_vthreads(cpu
));
474 uint32_t radix_AP_encodings
[PPC_PAGE_SIZES_MAX_SZ
];
477 drc
= spapr_drc_by_id(TYPE_SPAPR_DRC_CPU
, index
);
479 drc_index
= spapr_drc_index(drc
);
480 _FDT((fdt_setprop_cell(fdt
, offset
, "ibm,my-drc-index", drc_index
)));
483 _FDT((fdt_setprop_cell(fdt
, offset
, "reg", index
)));
484 _FDT((fdt_setprop_string(fdt
, offset
, "device_type", "cpu")));
486 _FDT((fdt_setprop_cell(fdt
, offset
, "cpu-version", env
->spr
[SPR_PVR
])));
487 _FDT((fdt_setprop_cell(fdt
, offset
, "d-cache-block-size",
488 env
->dcache_line_size
)));
489 _FDT((fdt_setprop_cell(fdt
, offset
, "d-cache-line-size",
490 env
->dcache_line_size
)));
491 _FDT((fdt_setprop_cell(fdt
, offset
, "i-cache-block-size",
492 env
->icache_line_size
)));
493 _FDT((fdt_setprop_cell(fdt
, offset
, "i-cache-line-size",
494 env
->icache_line_size
)));
496 if (pcc
->l1_dcache_size
) {
497 _FDT((fdt_setprop_cell(fdt
, offset
, "d-cache-size",
498 pcc
->l1_dcache_size
)));
500 warn_report("Unknown L1 dcache size for cpu");
502 if (pcc
->l1_icache_size
) {
503 _FDT((fdt_setprop_cell(fdt
, offset
, "i-cache-size",
504 pcc
->l1_icache_size
)));
506 warn_report("Unknown L1 icache size for cpu");
509 _FDT((fdt_setprop_cell(fdt
, offset
, "timebase-frequency", tbfreq
)));
510 _FDT((fdt_setprop_cell(fdt
, offset
, "clock-frequency", cpufreq
)));
511 _FDT((fdt_setprop_cell(fdt
, offset
, "slb-size", cpu
->hash64_opts
->slb_size
)));
512 _FDT((fdt_setprop_cell(fdt
, offset
, "ibm,slb-size", cpu
->hash64_opts
->slb_size
)));
513 _FDT((fdt_setprop_string(fdt
, offset
, "status", "okay")));
514 _FDT((fdt_setprop(fdt
, offset
, "64-bit", NULL
, 0)));
516 if (env
->spr_cb
[SPR_PURR
].oea_read
) {
517 _FDT((fdt_setprop_cell(fdt
, offset
, "ibm,purr", 1)));
519 if (env
->spr_cb
[SPR_SPURR
].oea_read
) {
520 _FDT((fdt_setprop_cell(fdt
, offset
, "ibm,spurr", 1)));
523 if (ppc_hash64_has(cpu
, PPC_HASH64_1TSEG
)) {
524 _FDT((fdt_setprop(fdt
, offset
, "ibm,processor-segment-sizes",
525 segs
, sizeof(segs
))));
528 /* Advertise VSX (vector extensions) if available
529 * 1 == VMX / Altivec available
532 * Only CPUs for which we create core types in spapr_cpu_core.c
533 * are possible, and all of those have VMX */
534 if (spapr_get_cap(spapr
, SPAPR_CAP_VSX
) != 0) {
535 _FDT((fdt_setprop_cell(fdt
, offset
, "ibm,vmx", 2)));
537 _FDT((fdt_setprop_cell(fdt
, offset
, "ibm,vmx", 1)));
540 /* Advertise DFP (Decimal Floating Point) if available
541 * 0 / no property == no DFP
542 * 1 == DFP available */
543 if (spapr_get_cap(spapr
, SPAPR_CAP_DFP
) != 0) {
544 _FDT((fdt_setprop_cell(fdt
, offset
, "ibm,dfp", 1)));
547 page_sizes_prop_size
= ppc_create_page_sizes_prop(cpu
, page_sizes_prop
,
548 sizeof(page_sizes_prop
));
549 if (page_sizes_prop_size
) {
550 _FDT((fdt_setprop(fdt
, offset
, "ibm,segment-page-sizes",
551 page_sizes_prop
, page_sizes_prop_size
)));
554 spapr_populate_pa_features(spapr
, cpu
, fdt
, offset
, false);
556 _FDT((fdt_setprop_cell(fdt
, offset
, "ibm,chip-id",
557 cs
->cpu_index
/ vcpus_per_socket
)));
559 _FDT((fdt_setprop(fdt
, offset
, "ibm,pft-size",
560 pft_size_prop
, sizeof(pft_size_prop
))));
562 if (ms
->numa_state
->num_nodes
> 1) {
563 _FDT(spapr_fixup_cpu_numa_dt(fdt
, offset
, cpu
));
566 _FDT(spapr_fixup_cpu_smt_dt(fdt
, offset
, cpu
, compat_smt
));
568 if (pcc
->radix_page_info
) {
569 for (i
= 0; i
< pcc
->radix_page_info
->count
; i
++) {
570 radix_AP_encodings
[i
] =
571 cpu_to_be32(pcc
->radix_page_info
->entries
[i
]);
573 _FDT((fdt_setprop(fdt
, offset
, "ibm,processor-radix-AP-encodings",
575 pcc
->radix_page_info
->count
*
576 sizeof(radix_AP_encodings
[0]))));
580 * We set this property to let the guest know that it can use the large
581 * decrementer and its width in bits.
583 if (spapr_get_cap(spapr
, SPAPR_CAP_LARGE_DECREMENTER
) != SPAPR_CAP_OFF
)
584 _FDT((fdt_setprop_u32(fdt
, offset
, "ibm,dec-bits",
585 pcc
->lrg_decr_bits
)));
588 static void spapr_populate_cpus_dt_node(void *fdt
, SpaprMachineState
*spapr
)
597 cpus_offset
= fdt_add_subnode(fdt
, 0, "cpus");
599 _FDT((fdt_setprop_cell(fdt
, cpus_offset
, "#address-cells", 0x1)));
600 _FDT((fdt_setprop_cell(fdt
, cpus_offset
, "#size-cells", 0x0)));
603 * We walk the CPUs in reverse order to ensure that CPU DT nodes
604 * created by fdt_add_subnode() end up in the right order in FDT
605 * for the guest kernel the enumerate the CPUs correctly.
607 * The CPU list cannot be traversed in reverse order, so we need
613 rev
= g_renew(CPUState
*, rev
, n_cpus
+ 1);
617 for (i
= n_cpus
- 1; i
>= 0; i
--) {
618 CPUState
*cs
= rev
[i
];
619 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
620 int index
= spapr_get_vcpu_id(cpu
);
621 DeviceClass
*dc
= DEVICE_GET_CLASS(cs
);
624 if (!spapr_is_thread0_in_vcore(spapr
, cpu
)) {
628 nodename
= g_strdup_printf("%s@%x", dc
->fw_name
, index
);
629 offset
= fdt_add_subnode(fdt
, cpus_offset
, nodename
);
632 spapr_populate_cpu_dt(cs
, fdt
, offset
, spapr
);
638 static int spapr_rng_populate_dt(void *fdt
)
643 node
= qemu_fdt_add_subnode(fdt
, "/ibm,platform-facilities");
647 ret
= fdt_setprop_string(fdt
, node
, "device_type",
648 "ibm,platform-facilities");
649 ret
|= fdt_setprop_cell(fdt
, node
, "#address-cells", 0x1);
650 ret
|= fdt_setprop_cell(fdt
, node
, "#size-cells", 0x0);
652 node
= fdt_add_subnode(fdt
, node
, "ibm,random-v1");
656 ret
|= fdt_setprop_string(fdt
, node
, "compatible", "ibm,random");
661 static uint32_t spapr_pc_dimm_node(MemoryDeviceInfoList
*list
, ram_addr_t addr
)
663 MemoryDeviceInfoList
*info
;
665 for (info
= list
; info
; info
= info
->next
) {
666 MemoryDeviceInfo
*value
= info
->value
;
668 if (value
&& value
->type
== MEMORY_DEVICE_INFO_KIND_DIMM
) {
669 PCDIMMDeviceInfo
*pcdimm_info
= value
->u
.dimm
.data
;
671 if (addr
>= pcdimm_info
->addr
&&
672 addr
< (pcdimm_info
->addr
+ pcdimm_info
->size
)) {
673 return pcdimm_info
->node
;
681 struct sPAPRDrconfCellV2
{
689 typedef struct DrconfCellQueue
{
690 struct sPAPRDrconfCellV2 cell
;
691 QSIMPLEQ_ENTRY(DrconfCellQueue
) entry
;
694 static DrconfCellQueue
*
695 spapr_get_drconf_cell(uint32_t seq_lmbs
, uint64_t base_addr
,
696 uint32_t drc_index
, uint32_t aa_index
,
699 DrconfCellQueue
*elem
;
701 elem
= g_malloc0(sizeof(*elem
));
702 elem
->cell
.seq_lmbs
= cpu_to_be32(seq_lmbs
);
703 elem
->cell
.base_addr
= cpu_to_be64(base_addr
);
704 elem
->cell
.drc_index
= cpu_to_be32(drc_index
);
705 elem
->cell
.aa_index
= cpu_to_be32(aa_index
);
706 elem
->cell
.flags
= cpu_to_be32(flags
);
711 /* ibm,dynamic-memory-v2 */
712 static int spapr_populate_drmem_v2(SpaprMachineState
*spapr
, void *fdt
,
713 int offset
, MemoryDeviceInfoList
*dimms
)
715 MachineState
*machine
= MACHINE(spapr
);
716 uint8_t *int_buf
, *cur_index
;
718 uint64_t lmb_size
= SPAPR_MEMORY_BLOCK_SIZE
;
719 uint64_t addr
, cur_addr
, size
;
720 uint32_t nr_boot_lmbs
= (machine
->device_memory
->base
/ lmb_size
);
721 uint64_t mem_end
= machine
->device_memory
->base
+
722 memory_region_size(&machine
->device_memory
->mr
);
723 uint32_t node
, buf_len
, nr_entries
= 0;
725 DrconfCellQueue
*elem
, *next
;
726 MemoryDeviceInfoList
*info
;
727 QSIMPLEQ_HEAD(, DrconfCellQueue
) drconf_queue
728 = QSIMPLEQ_HEAD_INITIALIZER(drconf_queue
);
730 /* Entry to cover RAM and the gap area */
731 elem
= spapr_get_drconf_cell(nr_boot_lmbs
, 0, 0, -1,
732 SPAPR_LMB_FLAGS_RESERVED
|
733 SPAPR_LMB_FLAGS_DRC_INVALID
);
734 QSIMPLEQ_INSERT_TAIL(&drconf_queue
, elem
, entry
);
737 cur_addr
= machine
->device_memory
->base
;
738 for (info
= dimms
; info
; info
= info
->next
) {
739 PCDIMMDeviceInfo
*di
= info
->value
->u
.dimm
.data
;
745 /* Entry for hot-pluggable area */
746 if (cur_addr
< addr
) {
747 drc
= spapr_drc_by_id(TYPE_SPAPR_DRC_LMB
, cur_addr
/ lmb_size
);
749 elem
= spapr_get_drconf_cell((addr
- cur_addr
) / lmb_size
,
750 cur_addr
, spapr_drc_index(drc
), -1, 0);
751 QSIMPLEQ_INSERT_TAIL(&drconf_queue
, elem
, entry
);
756 drc
= spapr_drc_by_id(TYPE_SPAPR_DRC_LMB
, addr
/ lmb_size
);
758 elem
= spapr_get_drconf_cell(size
/ lmb_size
, addr
,
759 spapr_drc_index(drc
), node
,
760 SPAPR_LMB_FLAGS_ASSIGNED
);
761 QSIMPLEQ_INSERT_TAIL(&drconf_queue
, elem
, entry
);
763 cur_addr
= addr
+ size
;
766 /* Entry for remaining hotpluggable area */
767 if (cur_addr
< mem_end
) {
768 drc
= spapr_drc_by_id(TYPE_SPAPR_DRC_LMB
, cur_addr
/ lmb_size
);
770 elem
= spapr_get_drconf_cell((mem_end
- cur_addr
) / lmb_size
,
771 cur_addr
, spapr_drc_index(drc
), -1, 0);
772 QSIMPLEQ_INSERT_TAIL(&drconf_queue
, elem
, entry
);
776 buf_len
= nr_entries
* sizeof(struct sPAPRDrconfCellV2
) + sizeof(uint32_t);
777 int_buf
= cur_index
= g_malloc0(buf_len
);
778 *(uint32_t *)int_buf
= cpu_to_be32(nr_entries
);
779 cur_index
+= sizeof(nr_entries
);
781 QSIMPLEQ_FOREACH_SAFE(elem
, &drconf_queue
, entry
, next
) {
782 memcpy(cur_index
, &elem
->cell
, sizeof(elem
->cell
));
783 cur_index
+= sizeof(elem
->cell
);
784 QSIMPLEQ_REMOVE(&drconf_queue
, elem
, DrconfCellQueue
, entry
);
788 ret
= fdt_setprop(fdt
, offset
, "ibm,dynamic-memory-v2", int_buf
, buf_len
);
796 /* ibm,dynamic-memory */
797 static int spapr_populate_drmem_v1(SpaprMachineState
*spapr
, void *fdt
,
798 int offset
, MemoryDeviceInfoList
*dimms
)
800 MachineState
*machine
= MACHINE(spapr
);
802 uint64_t lmb_size
= SPAPR_MEMORY_BLOCK_SIZE
;
803 uint32_t device_lmb_start
= machine
->device_memory
->base
/ lmb_size
;
804 uint32_t nr_lmbs
= (machine
->device_memory
->base
+
805 memory_region_size(&machine
->device_memory
->mr
)) /
807 uint32_t *int_buf
, *cur_index
, buf_len
;
810 * Allocate enough buffer size to fit in ibm,dynamic-memory
812 buf_len
= (nr_lmbs
* SPAPR_DR_LMB_LIST_ENTRY_SIZE
+ 1) * sizeof(uint32_t);
813 cur_index
= int_buf
= g_malloc0(buf_len
);
814 int_buf
[0] = cpu_to_be32(nr_lmbs
);
816 for (i
= 0; i
< nr_lmbs
; i
++) {
817 uint64_t addr
= i
* lmb_size
;
818 uint32_t *dynamic_memory
= cur_index
;
820 if (i
>= device_lmb_start
) {
823 drc
= spapr_drc_by_id(TYPE_SPAPR_DRC_LMB
, i
);
826 dynamic_memory
[0] = cpu_to_be32(addr
>> 32);
827 dynamic_memory
[1] = cpu_to_be32(addr
& 0xffffffff);
828 dynamic_memory
[2] = cpu_to_be32(spapr_drc_index(drc
));
829 dynamic_memory
[3] = cpu_to_be32(0); /* reserved */
830 dynamic_memory
[4] = cpu_to_be32(spapr_pc_dimm_node(dimms
, addr
));
831 if (memory_region_present(get_system_memory(), addr
)) {
832 dynamic_memory
[5] = cpu_to_be32(SPAPR_LMB_FLAGS_ASSIGNED
);
834 dynamic_memory
[5] = cpu_to_be32(0);
838 * LMB information for RMA, boot time RAM and gap b/n RAM and
839 * device memory region -- all these are marked as reserved
840 * and as having no valid DRC.
842 dynamic_memory
[0] = cpu_to_be32(addr
>> 32);
843 dynamic_memory
[1] = cpu_to_be32(addr
& 0xffffffff);
844 dynamic_memory
[2] = cpu_to_be32(0);
845 dynamic_memory
[3] = cpu_to_be32(0); /* reserved */
846 dynamic_memory
[4] = cpu_to_be32(-1);
847 dynamic_memory
[5] = cpu_to_be32(SPAPR_LMB_FLAGS_RESERVED
|
848 SPAPR_LMB_FLAGS_DRC_INVALID
);
851 cur_index
+= SPAPR_DR_LMB_LIST_ENTRY_SIZE
;
853 ret
= fdt_setprop(fdt
, offset
, "ibm,dynamic-memory", int_buf
, buf_len
);
862 * Adds ibm,dynamic-reconfiguration-memory node.
863 * Refer to docs/specs/ppc-spapr-hotplug.txt for the documentation
864 * of this device tree node.
866 static int spapr_populate_drconf_memory(SpaprMachineState
*spapr
, void *fdt
)
868 MachineState
*machine
= MACHINE(spapr
);
869 int nb_numa_nodes
= machine
->numa_state
->num_nodes
;
871 uint64_t lmb_size
= SPAPR_MEMORY_BLOCK_SIZE
;
872 uint32_t prop_lmb_size
[] = {0, cpu_to_be32(lmb_size
)};
873 uint32_t *int_buf
, *cur_index
, buf_len
;
874 int nr_nodes
= nb_numa_nodes
? nb_numa_nodes
: 1;
875 MemoryDeviceInfoList
*dimms
= NULL
;
878 * Don't create the node if there is no device memory
880 if (machine
->ram_size
== machine
->maxram_size
) {
884 offset
= fdt_add_subnode(fdt
, 0, "ibm,dynamic-reconfiguration-memory");
886 ret
= fdt_setprop(fdt
, offset
, "ibm,lmb-size", prop_lmb_size
,
887 sizeof(prop_lmb_size
));
892 ret
= fdt_setprop_cell(fdt
, offset
, "ibm,memory-flags-mask", 0xff);
897 ret
= fdt_setprop_cell(fdt
, offset
, "ibm,memory-preservation-time", 0x0);
902 /* ibm,dynamic-memory or ibm,dynamic-memory-v2 */
903 dimms
= qmp_memory_device_list();
904 if (spapr_ovec_test(spapr
->ov5_cas
, OV5_DRMEM_V2
)) {
905 ret
= spapr_populate_drmem_v2(spapr
, fdt
, offset
, dimms
);
907 ret
= spapr_populate_drmem_v1(spapr
, fdt
, offset
, dimms
);
909 qapi_free_MemoryDeviceInfoList(dimms
);
915 /* ibm,associativity-lookup-arrays */
916 buf_len
= (nr_nodes
* 4 + 2) * sizeof(uint32_t);
917 cur_index
= int_buf
= g_malloc0(buf_len
);
918 int_buf
[0] = cpu_to_be32(nr_nodes
);
919 int_buf
[1] = cpu_to_be32(4); /* Number of entries per associativity list */
921 for (i
= 0; i
< nr_nodes
; i
++) {
922 uint32_t associativity
[] = {
928 memcpy(cur_index
, associativity
, sizeof(associativity
));
931 ret
= fdt_setprop(fdt
, offset
, "ibm,associativity-lookup-arrays", int_buf
,
932 (cur_index
- int_buf
) * sizeof(uint32_t));
938 static int spapr_dt_cas_updates(SpaprMachineState
*spapr
, void *fdt
,
939 SpaprOptionVector
*ov5_updates
)
941 SpaprMachineClass
*smc
= SPAPR_MACHINE_GET_CLASS(spapr
);
944 /* Generate ibm,dynamic-reconfiguration-memory node if required */
945 if (spapr_ovec_test(ov5_updates
, OV5_DRCONF_MEMORY
)) {
946 g_assert(smc
->dr_lmb_enabled
);
947 ret
= spapr_populate_drconf_memory(spapr
, fdt
);
953 offset
= fdt_path_offset(fdt
, "/chosen");
955 offset
= fdt_add_subnode(fdt
, 0, "chosen");
960 ret
= spapr_ovec_populate_dt(fdt
, offset
, spapr
->ov5_cas
,
961 "ibm,architecture-vec-5");
967 static bool spapr_hotplugged_dev_before_cas(void)
969 Object
*drc_container
, *obj
;
970 ObjectProperty
*prop
;
971 ObjectPropertyIterator iter
;
973 drc_container
= container_get(object_get_root(), "/dr-connector");
974 object_property_iter_init(&iter
, drc_container
);
975 while ((prop
= object_property_iter_next(&iter
))) {
976 if (!strstart(prop
->type
, "link<", NULL
)) {
979 obj
= object_property_get_link(drc_container
, prop
->name
, NULL
);
980 if (spapr_drc_needed(obj
)) {
987 int spapr_h_cas_compose_response(SpaprMachineState
*spapr
,
988 target_ulong addr
, target_ulong size
,
989 SpaprOptionVector
*ov5_updates
)
991 void *fdt
, *fdt_skel
;
992 SpaprDeviceTreeUpdateHeader hdr
= { .version_id
= 1 };
994 if (spapr_hotplugged_dev_before_cas()) {
998 if (size
< sizeof(hdr
) || size
> FW_MAX_SIZE
) {
999 error_report("SLOF provided an unexpected CAS buffer size "
1000 TARGET_FMT_lu
" (min: %zu, max: %u)",
1001 size
, sizeof(hdr
), FW_MAX_SIZE
);
1005 size
-= sizeof(hdr
);
1007 /* Create skeleton */
1008 fdt_skel
= g_malloc0(size
);
1009 _FDT((fdt_create(fdt_skel
, size
)));
1010 _FDT((fdt_finish_reservemap(fdt_skel
)));
1011 _FDT((fdt_begin_node(fdt_skel
, "")));
1012 _FDT((fdt_end_node(fdt_skel
)));
1013 _FDT((fdt_finish(fdt_skel
)));
1014 fdt
= g_malloc0(size
);
1015 _FDT((fdt_open_into(fdt_skel
, fdt
, size
)));
1018 /* Fixup cpu nodes */
1019 _FDT((spapr_fixup_cpu_dt(fdt
, spapr
)));
1021 if (spapr_dt_cas_updates(spapr
, fdt
, ov5_updates
)) {
1025 /* Pack resulting tree */
1026 _FDT((fdt_pack(fdt
)));
1028 if (fdt_totalsize(fdt
) + sizeof(hdr
) > size
) {
1029 trace_spapr_cas_failed(size
);
1033 cpu_physical_memory_write(addr
, &hdr
, sizeof(hdr
));
1034 cpu_physical_memory_write(addr
+ sizeof(hdr
), fdt
, fdt_totalsize(fdt
));
1035 trace_spapr_cas_continue(fdt_totalsize(fdt
) + sizeof(hdr
));
1041 static void spapr_dt_rtas(SpaprMachineState
*spapr
, void *fdt
)
1043 MachineState
*ms
= MACHINE(spapr
);
1045 GString
*hypertas
= g_string_sized_new(256);
1046 GString
*qemu_hypertas
= g_string_sized_new(256);
1047 uint32_t refpoints
[] = { cpu_to_be32(0x4), cpu_to_be32(0x4) };
1048 uint64_t max_device_addr
= MACHINE(spapr
)->device_memory
->base
+
1049 memory_region_size(&MACHINE(spapr
)->device_memory
->mr
);
1050 uint32_t lrdr_capacity
[] = {
1051 cpu_to_be32(max_device_addr
>> 32),
1052 cpu_to_be32(max_device_addr
& 0xffffffff),
1053 0, cpu_to_be32(SPAPR_MEMORY_BLOCK_SIZE
),
1054 cpu_to_be32(ms
->smp
.max_cpus
/ ms
->smp
.threads
),
1056 uint32_t maxdomain
= cpu_to_be32(spapr
->gpu_numa_id
> 1 ? 1 : 0);
1057 uint32_t maxdomains
[] = {
1062 cpu_to_be32(spapr
->gpu_numa_id
),
1065 _FDT(rtas
= fdt_add_subnode(fdt
, 0, "rtas"));
1068 add_str(hypertas
, "hcall-pft");
1069 add_str(hypertas
, "hcall-term");
1070 add_str(hypertas
, "hcall-dabr");
1071 add_str(hypertas
, "hcall-interrupt");
1072 add_str(hypertas
, "hcall-tce");
1073 add_str(hypertas
, "hcall-vio");
1074 add_str(hypertas
, "hcall-splpar");
1075 add_str(hypertas
, "hcall-join");
1076 add_str(hypertas
, "hcall-bulk");
1077 add_str(hypertas
, "hcall-set-mode");
1078 add_str(hypertas
, "hcall-sprg0");
1079 add_str(hypertas
, "hcall-copy");
1080 add_str(hypertas
, "hcall-debug");
1081 add_str(hypertas
, "hcall-vphn");
1082 add_str(qemu_hypertas
, "hcall-memop1");
1084 if (!kvm_enabled() || kvmppc_spapr_use_multitce()) {
1085 add_str(hypertas
, "hcall-multi-tce");
1088 if (spapr
->resize_hpt
!= SPAPR_RESIZE_HPT_DISABLED
) {
1089 add_str(hypertas
, "hcall-hpt-resize");
1092 _FDT(fdt_setprop(fdt
, rtas
, "ibm,hypertas-functions",
1093 hypertas
->str
, hypertas
->len
));
1094 g_string_free(hypertas
, TRUE
);
1095 _FDT(fdt_setprop(fdt
, rtas
, "qemu,hypertas-functions",
1096 qemu_hypertas
->str
, qemu_hypertas
->len
));
1097 g_string_free(qemu_hypertas
, TRUE
);
1099 _FDT(fdt_setprop(fdt
, rtas
, "ibm,associativity-reference-points",
1100 refpoints
, sizeof(refpoints
)));
1102 _FDT(fdt_setprop(fdt
, rtas
, "ibm,max-associativity-domains",
1103 maxdomains
, sizeof(maxdomains
)));
1105 _FDT(fdt_setprop_cell(fdt
, rtas
, "rtas-error-log-max",
1106 RTAS_ERROR_LOG_MAX
));
1107 _FDT(fdt_setprop_cell(fdt
, rtas
, "rtas-event-scan-rate",
1108 RTAS_EVENT_SCAN_RATE
));
1110 g_assert(msi_nonbroken
);
1111 _FDT(fdt_setprop(fdt
, rtas
, "ibm,change-msix-capable", NULL
, 0));
1114 * According to PAPR, rtas ibm,os-term does not guarantee a return
1115 * back to the guest cpu.
1117 * While an additional ibm,extended-os-term property indicates
1118 * that rtas call return will always occur. Set this property.
1120 _FDT(fdt_setprop(fdt
, rtas
, "ibm,extended-os-term", NULL
, 0));
1122 _FDT(fdt_setprop(fdt
, rtas
, "ibm,lrdr-capacity",
1123 lrdr_capacity
, sizeof(lrdr_capacity
)));
1125 spapr_dt_rtas_tokens(fdt
, rtas
);
1129 * Prepare ibm,arch-vec-5-platform-support, which indicates the MMU
1130 * and the XIVE features that the guest may request and thus the valid
1131 * values for bytes 23..26 of option vector 5:
1133 static void spapr_dt_ov5_platform_support(SpaprMachineState
*spapr
, void *fdt
,
1136 PowerPCCPU
*first_ppc_cpu
= POWERPC_CPU(first_cpu
);
1139 23, spapr
->irq
->ov5
, /* Xive mode. */
1140 24, 0x00, /* Hash/Radix, filled in below. */
1141 25, 0x00, /* Hash options: Segment Tables == no, GTSE == no. */
1142 26, 0x40, /* Radix options: GTSE == yes. */
1145 if (!ppc_check_compat(first_ppc_cpu
, CPU_POWERPC_LOGICAL_3_00
, 0,
1146 first_ppc_cpu
->compat_pvr
)) {
1148 * If we're in a pre POWER9 compat mode then the guest should
1149 * do hash and use the legacy interrupt mode
1151 val
[1] = 0x00; /* XICS */
1152 val
[3] = 0x00; /* Hash */
1153 } else if (kvm_enabled()) {
1154 if (kvmppc_has_cap_mmu_radix() && kvmppc_has_cap_mmu_hash_v3()) {
1155 val
[3] = 0x80; /* OV5_MMU_BOTH */
1156 } else if (kvmppc_has_cap_mmu_radix()) {
1157 val
[3] = 0x40; /* OV5_MMU_RADIX_300 */
1159 val
[3] = 0x00; /* Hash */
1162 /* V3 MMU supports both hash and radix in tcg (with dynamic switching) */
1165 _FDT(fdt_setprop(fdt
, chosen
, "ibm,arch-vec-5-platform-support",
1169 static void spapr_dt_chosen(SpaprMachineState
*spapr
, void *fdt
)
1171 MachineState
*machine
= MACHINE(spapr
);
1172 SpaprMachineClass
*smc
= SPAPR_MACHINE_GET_CLASS(machine
);
1174 const char *boot_device
= machine
->boot_order
;
1175 char *stdout_path
= spapr_vio_stdout_path(spapr
->vio_bus
);
1177 char *bootlist
= get_boot_devices_list(&cb
);
1179 _FDT(chosen
= fdt_add_subnode(fdt
, 0, "chosen"));
1181 _FDT(fdt_setprop_string(fdt
, chosen
, "bootargs", machine
->kernel_cmdline
));
1182 _FDT(fdt_setprop_cell(fdt
, chosen
, "linux,initrd-start",
1183 spapr
->initrd_base
));
1184 _FDT(fdt_setprop_cell(fdt
, chosen
, "linux,initrd-end",
1185 spapr
->initrd_base
+ spapr
->initrd_size
));
1187 if (spapr
->kernel_size
) {
1188 uint64_t kprop
[2] = { cpu_to_be64(KERNEL_LOAD_ADDR
),
1189 cpu_to_be64(spapr
->kernel_size
) };
1191 _FDT(fdt_setprop(fdt
, chosen
, "qemu,boot-kernel",
1192 &kprop
, sizeof(kprop
)));
1193 if (spapr
->kernel_le
) {
1194 _FDT(fdt_setprop(fdt
, chosen
, "qemu,boot-kernel-le", NULL
, 0));
1198 _FDT((fdt_setprop_cell(fdt
, chosen
, "qemu,boot-menu", boot_menu
)));
1200 _FDT(fdt_setprop_cell(fdt
, chosen
, "qemu,graphic-width", graphic_width
));
1201 _FDT(fdt_setprop_cell(fdt
, chosen
, "qemu,graphic-height", graphic_height
));
1202 _FDT(fdt_setprop_cell(fdt
, chosen
, "qemu,graphic-depth", graphic_depth
));
1204 if (cb
&& bootlist
) {
1207 for (i
= 0; i
< cb
; i
++) {
1208 if (bootlist
[i
] == '\n') {
1212 _FDT(fdt_setprop_string(fdt
, chosen
, "qemu,boot-list", bootlist
));
1215 if (boot_device
&& strlen(boot_device
)) {
1216 _FDT(fdt_setprop_string(fdt
, chosen
, "qemu,boot-device", boot_device
));
1219 if (!spapr
->has_graphics
&& stdout_path
) {
1221 * "linux,stdout-path" and "stdout" properties are deprecated by linux
1222 * kernel. New platforms should only use the "stdout-path" property. Set
1223 * the new property and continue using older property to remain
1224 * compatible with the existing firmware.
1226 _FDT(fdt_setprop_string(fdt
, chosen
, "linux,stdout-path", stdout_path
));
1227 _FDT(fdt_setprop_string(fdt
, chosen
, "stdout-path", stdout_path
));
1230 /* We can deal with BAR reallocation just fine, advertise it to the guest */
1231 if (smc
->linux_pci_probe
) {
1232 _FDT(fdt_setprop_cell(fdt
, chosen
, "linux,pci-probe-only", 0));
1235 spapr_dt_ov5_platform_support(spapr
, fdt
, chosen
);
1237 g_free(stdout_path
);
1241 static void spapr_dt_hypervisor(SpaprMachineState
*spapr
, void *fdt
)
1243 /* The /hypervisor node isn't in PAPR - this is a hack to allow PR
1244 * KVM to work under pHyp with some guest co-operation */
1246 uint8_t hypercall
[16];
1248 _FDT(hypervisor
= fdt_add_subnode(fdt
, 0, "hypervisor"));
1249 /* indicate KVM hypercall interface */
1250 _FDT(fdt_setprop_string(fdt
, hypervisor
, "compatible", "linux,kvm"));
1251 if (kvmppc_has_cap_fixup_hcalls()) {
1253 * Older KVM versions with older guest kernels were broken
1254 * with the magic page, don't allow the guest to map it.
1256 if (!kvmppc_get_hypercall(first_cpu
->env_ptr
, hypercall
,
1257 sizeof(hypercall
))) {
1258 _FDT(fdt_setprop(fdt
, hypervisor
, "hcall-instructions",
1259 hypercall
, sizeof(hypercall
)));
1264 static void *spapr_build_fdt(SpaprMachineState
*spapr
)
1266 MachineState
*machine
= MACHINE(spapr
);
1267 MachineClass
*mc
= MACHINE_GET_CLASS(machine
);
1268 SpaprMachineClass
*smc
= SPAPR_MACHINE_GET_CLASS(machine
);
1274 fdt
= g_malloc0(FDT_MAX_SIZE
);
1275 _FDT((fdt_create_empty_tree(fdt
, FDT_MAX_SIZE
)));
1278 _FDT(fdt_setprop_string(fdt
, 0, "device_type", "chrp"));
1279 _FDT(fdt_setprop_string(fdt
, 0, "model", "IBM pSeries (emulated by qemu)"));
1280 _FDT(fdt_setprop_string(fdt
, 0, "compatible", "qemu,pseries"));
1282 /* Guest UUID & Name*/
1283 buf
= qemu_uuid_unparse_strdup(&qemu_uuid
);
1284 _FDT(fdt_setprop_string(fdt
, 0, "vm,uuid", buf
));
1285 if (qemu_uuid_set
) {
1286 _FDT(fdt_setprop_string(fdt
, 0, "system-id", buf
));
1290 if (qemu_get_vm_name()) {
1291 _FDT(fdt_setprop_string(fdt
, 0, "ibm,partition-name",
1292 qemu_get_vm_name()));
1295 /* Host Model & Serial Number */
1296 if (spapr
->host_model
) {
1297 _FDT(fdt_setprop_string(fdt
, 0, "host-model", spapr
->host_model
));
1298 } else if (smc
->broken_host_serial_model
&& kvmppc_get_host_model(&buf
)) {
1299 _FDT(fdt_setprop_string(fdt
, 0, "host-model", buf
));
1303 if (spapr
->host_serial
) {
1304 _FDT(fdt_setprop_string(fdt
, 0, "host-serial", spapr
->host_serial
));
1305 } else if (smc
->broken_host_serial_model
&& kvmppc_get_host_serial(&buf
)) {
1306 _FDT(fdt_setprop_string(fdt
, 0, "host-serial", buf
));
1310 _FDT(fdt_setprop_cell(fdt
, 0, "#address-cells", 2));
1311 _FDT(fdt_setprop_cell(fdt
, 0, "#size-cells", 2));
1313 /* /interrupt controller */
1314 spapr
->irq
->dt_populate(spapr
, spapr_max_server_number(spapr
), fdt
,
1317 ret
= spapr_populate_memory(spapr
, fdt
);
1319 error_report("couldn't setup memory nodes in fdt");
1324 spapr_dt_vdevice(spapr
->vio_bus
, fdt
);
1326 if (object_resolve_path_type("", TYPE_SPAPR_RNG
, NULL
)) {
1327 ret
= spapr_rng_populate_dt(fdt
);
1329 error_report("could not set up rng device in the fdt");
1334 QLIST_FOREACH(phb
, &spapr
->phbs
, list
) {
1335 ret
= spapr_dt_phb(phb
, PHANDLE_INTC
, fdt
, spapr
->irq
->nr_msis
, NULL
);
1337 error_report("couldn't setup PCI devices in fdt");
1343 spapr_populate_cpus_dt_node(fdt
, spapr
);
1345 if (smc
->dr_lmb_enabled
) {
1346 _FDT(spapr_dt_drc(fdt
, 0, NULL
, SPAPR_DR_CONNECTOR_TYPE_LMB
));
1349 if (mc
->has_hotpluggable_cpus
) {
1350 int offset
= fdt_path_offset(fdt
, "/cpus");
1351 ret
= spapr_dt_drc(fdt
, offset
, NULL
, SPAPR_DR_CONNECTOR_TYPE_CPU
);
1353 error_report("Couldn't set up CPU DR device tree properties");
1358 /* /event-sources */
1359 spapr_dt_events(spapr
, fdt
);
1362 spapr_dt_rtas(spapr
, fdt
);
1365 spapr_dt_chosen(spapr
, fdt
);
1368 if (kvm_enabled()) {
1369 spapr_dt_hypervisor(spapr
, fdt
);
1372 /* Build memory reserve map */
1373 if (spapr
->kernel_size
) {
1374 _FDT((fdt_add_mem_rsv(fdt
, KERNEL_LOAD_ADDR
, spapr
->kernel_size
)));
1376 if (spapr
->initrd_size
) {
1377 _FDT((fdt_add_mem_rsv(fdt
, spapr
->initrd_base
, spapr
->initrd_size
)));
1380 /* ibm,client-architecture-support updates */
1381 ret
= spapr_dt_cas_updates(spapr
, fdt
, spapr
->ov5_cas
);
1383 error_report("couldn't setup CAS properties fdt");
1387 if (smc
->dr_phb_enabled
) {
1388 ret
= spapr_dt_drc(fdt
, 0, NULL
, SPAPR_DR_CONNECTOR_TYPE_PHB
);
1390 error_report("Couldn't set up PHB DR device tree properties");
1398 static uint64_t translate_kernel_address(void *opaque
, uint64_t addr
)
1400 return (addr
& 0x0fffffff) + KERNEL_LOAD_ADDR
;
1403 static void emulate_spapr_hypercall(PPCVirtualHypervisor
*vhyp
,
1406 CPUPPCState
*env
= &cpu
->env
;
1408 /* The TCG path should also be holding the BQL at this point */
1409 g_assert(qemu_mutex_iothread_locked());
1412 hcall_dprintf("Hypercall made with MSR[PR]=1\n");
1413 env
->gpr
[3] = H_PRIVILEGE
;
1415 env
->gpr
[3] = spapr_hypercall(cpu
, env
->gpr
[3], &env
->gpr
[4]);
1419 struct LPCRSyncState
{
1424 static void do_lpcr_sync(CPUState
*cs
, run_on_cpu_data arg
)
1426 struct LPCRSyncState
*s
= arg
.host_ptr
;
1427 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
1428 CPUPPCState
*env
= &cpu
->env
;
1431 cpu_synchronize_state(cs
);
1432 lpcr
= env
->spr
[SPR_LPCR
];
1435 ppc_store_lpcr(cpu
, lpcr
);
1438 void spapr_set_all_lpcrs(target_ulong value
, target_ulong mask
)
1441 struct LPCRSyncState s
= {
1446 run_on_cpu(cs
, do_lpcr_sync
, RUN_ON_CPU_HOST_PTR(&s
));
1450 static void spapr_get_pate(PPCVirtualHypervisor
*vhyp
, ppc_v3_pate_t
*entry
)
1452 SpaprMachineState
*spapr
= SPAPR_MACHINE(vhyp
);
1454 /* Copy PATE1:GR into PATE0:HR */
1455 entry
->dw0
= spapr
->patb_entry
& PATE0_HR
;
1456 entry
->dw1
= spapr
->patb_entry
;
1459 #define HPTE(_table, _i) (void *)(((uint64_t *)(_table)) + ((_i) * 2))
1460 #define HPTE_VALID(_hpte) (tswap64(*((uint64_t *)(_hpte))) & HPTE64_V_VALID)
1461 #define HPTE_DIRTY(_hpte) (tswap64(*((uint64_t *)(_hpte))) & HPTE64_V_HPTE_DIRTY)
1462 #define CLEAN_HPTE(_hpte) ((*(uint64_t *)(_hpte)) &= tswap64(~HPTE64_V_HPTE_DIRTY))
1463 #define DIRTY_HPTE(_hpte) ((*(uint64_t *)(_hpte)) |= tswap64(HPTE64_V_HPTE_DIRTY))
1466 * Get the fd to access the kernel htab, re-opening it if necessary
1468 static int get_htab_fd(SpaprMachineState
*spapr
)
1470 Error
*local_err
= NULL
;
1472 if (spapr
->htab_fd
>= 0) {
1473 return spapr
->htab_fd
;
1476 spapr
->htab_fd
= kvmppc_get_htab_fd(false, 0, &local_err
);
1477 if (spapr
->htab_fd
< 0) {
1478 error_report_err(local_err
);
1481 return spapr
->htab_fd
;
1484 void close_htab_fd(SpaprMachineState
*spapr
)
1486 if (spapr
->htab_fd
>= 0) {
1487 close(spapr
->htab_fd
);
1489 spapr
->htab_fd
= -1;
1492 static hwaddr
spapr_hpt_mask(PPCVirtualHypervisor
*vhyp
)
1494 SpaprMachineState
*spapr
= SPAPR_MACHINE(vhyp
);
1496 return HTAB_SIZE(spapr
) / HASH_PTEG_SIZE_64
- 1;
1499 static target_ulong
spapr_encode_hpt_for_kvm_pr(PPCVirtualHypervisor
*vhyp
)
1501 SpaprMachineState
*spapr
= SPAPR_MACHINE(vhyp
);
1503 assert(kvm_enabled());
1509 return (target_ulong
)(uintptr_t)spapr
->htab
| (spapr
->htab_shift
- 18);
1512 static const ppc_hash_pte64_t
*spapr_map_hptes(PPCVirtualHypervisor
*vhyp
,
1515 SpaprMachineState
*spapr
= SPAPR_MACHINE(vhyp
);
1516 hwaddr pte_offset
= ptex
* HASH_PTE_SIZE_64
;
1520 * HTAB is controlled by KVM. Fetch into temporary buffer
1522 ppc_hash_pte64_t
*hptes
= g_malloc(n
* HASH_PTE_SIZE_64
);
1523 kvmppc_read_hptes(hptes
, ptex
, n
);
1528 * HTAB is controlled by QEMU. Just point to the internally
1531 return (const ppc_hash_pte64_t
*)(spapr
->htab
+ pte_offset
);
1534 static void spapr_unmap_hptes(PPCVirtualHypervisor
*vhyp
,
1535 const ppc_hash_pte64_t
*hptes
,
1538 SpaprMachineState
*spapr
= SPAPR_MACHINE(vhyp
);
1541 g_free((void *)hptes
);
1544 /* Nothing to do for qemu managed HPT */
1547 void spapr_store_hpte(PowerPCCPU
*cpu
, hwaddr ptex
,
1548 uint64_t pte0
, uint64_t pte1
)
1550 SpaprMachineState
*spapr
= SPAPR_MACHINE(cpu
->vhyp
);
1551 hwaddr offset
= ptex
* HASH_PTE_SIZE_64
;
1554 kvmppc_write_hpte(ptex
, pte0
, pte1
);
1556 if (pte0
& HPTE64_V_VALID
) {
1557 stq_p(spapr
->htab
+ offset
+ HASH_PTE_SIZE_64
/ 2, pte1
);
1559 * When setting valid, we write PTE1 first. This ensures
1560 * proper synchronization with the reading code in
1561 * ppc_hash64_pteg_search()
1564 stq_p(spapr
->htab
+ offset
, pte0
);
1566 stq_p(spapr
->htab
+ offset
, pte0
);
1568 * When clearing it we set PTE0 first. This ensures proper
1569 * synchronization with the reading code in
1570 * ppc_hash64_pteg_search()
1573 stq_p(spapr
->htab
+ offset
+ HASH_PTE_SIZE_64
/ 2, pte1
);
1578 static void spapr_hpte_set_c(PPCVirtualHypervisor
*vhyp
, hwaddr ptex
,
1581 hwaddr offset
= ptex
* HASH_PTE_SIZE_64
+ 15;
1582 SpaprMachineState
*spapr
= SPAPR_MACHINE(vhyp
);
1585 /* There should always be a hash table when this is called */
1586 error_report("spapr_hpte_set_c called with no hash table !");
1590 /* The HW performs a non-atomic byte update */
1591 stb_p(spapr
->htab
+ offset
, (pte1
& 0xff) | 0x80);
1594 static void spapr_hpte_set_r(PPCVirtualHypervisor
*vhyp
, hwaddr ptex
,
1597 hwaddr offset
= ptex
* HASH_PTE_SIZE_64
+ 14;
1598 SpaprMachineState
*spapr
= SPAPR_MACHINE(vhyp
);
1601 /* There should always be a hash table when this is called */
1602 error_report("spapr_hpte_set_r called with no hash table !");
1606 /* The HW performs a non-atomic byte update */
1607 stb_p(spapr
->htab
+ offset
, ((pte1
>> 8) & 0xff) | 0x01);
1610 int spapr_hpt_shift_for_ramsize(uint64_t ramsize
)
1614 /* We aim for a hash table of size 1/128 the size of RAM (rounded
1615 * up). The PAPR recommendation is actually 1/64 of RAM size, but
1616 * that's much more than is needed for Linux guests */
1617 shift
= ctz64(pow2ceil(ramsize
)) - 7;
1618 shift
= MAX(shift
, 18); /* Minimum architected size */
1619 shift
= MIN(shift
, 46); /* Maximum architected size */
1623 void spapr_free_hpt(SpaprMachineState
*spapr
)
1625 g_free(spapr
->htab
);
1627 spapr
->htab_shift
= 0;
1628 close_htab_fd(spapr
);
1631 void spapr_reallocate_hpt(SpaprMachineState
*spapr
, int shift
,
1636 /* Clean up any HPT info from a previous boot */
1637 spapr_free_hpt(spapr
);
1639 rc
= kvmppc_reset_htab(shift
);
1641 /* kernel-side HPT needed, but couldn't allocate one */
1642 error_setg_errno(errp
, errno
,
1643 "Failed to allocate KVM HPT of order %d (try smaller maxmem?)",
1645 /* This is almost certainly fatal, but if the caller really
1646 * wants to carry on with shift == 0, it's welcome to try */
1647 } else if (rc
> 0) {
1648 /* kernel-side HPT allocated */
1651 "Requested order %d HPT, but kernel allocated order %ld (try smaller maxmem?)",
1655 spapr
->htab_shift
= shift
;
1658 /* kernel-side HPT not needed, allocate in userspace instead */
1659 size_t size
= 1ULL << shift
;
1662 spapr
->htab
= qemu_memalign(size
, size
);
1664 error_setg_errno(errp
, errno
,
1665 "Could not allocate HPT of order %d", shift
);
1669 memset(spapr
->htab
, 0, size
);
1670 spapr
->htab_shift
= shift
;
1672 for (i
= 0; i
< size
/ HASH_PTE_SIZE_64
; i
++) {
1673 DIRTY_HPTE(HPTE(spapr
->htab
, i
));
1676 /* We're setting up a hash table, so that means we're not radix */
1677 spapr
->patb_entry
= 0;
1678 spapr_set_all_lpcrs(0, LPCR_HR
| LPCR_UPRT
);
1681 void spapr_setup_hpt_and_vrma(SpaprMachineState
*spapr
)
1685 if ((spapr
->resize_hpt
== SPAPR_RESIZE_HPT_DISABLED
)
1686 || (spapr
->cas_reboot
1687 && !spapr_ovec_test(spapr
->ov5_cas
, OV5_HPT_RESIZE
))) {
1688 hpt_shift
= spapr_hpt_shift_for_ramsize(MACHINE(spapr
)->maxram_size
);
1690 uint64_t current_ram_size
;
1692 current_ram_size
= MACHINE(spapr
)->ram_size
+ get_plugged_memory_size();
1693 hpt_shift
= spapr_hpt_shift_for_ramsize(current_ram_size
);
1695 spapr_reallocate_hpt(spapr
, hpt_shift
, &error_fatal
);
1697 if (spapr
->vrma_adjust
) {
1698 spapr
->rma_size
= kvmppc_rma_size(spapr_node0_size(MACHINE(spapr
)),
1703 static int spapr_reset_drcs(Object
*child
, void *opaque
)
1706 (SpaprDrc
*) object_dynamic_cast(child
,
1707 TYPE_SPAPR_DR_CONNECTOR
);
1710 spapr_drc_reset(drc
);
1716 static void spapr_machine_reset(MachineState
*machine
)
1718 SpaprMachineState
*spapr
= SPAPR_MACHINE(machine
);
1719 PowerPCCPU
*first_ppc_cpu
;
1720 uint32_t rtas_limit
;
1721 hwaddr rtas_addr
, fdt_addr
;
1725 spapr_caps_apply(spapr
);
1727 first_ppc_cpu
= POWERPC_CPU(first_cpu
);
1728 if (kvm_enabled() && kvmppc_has_cap_mmu_radix() &&
1729 ppc_type_check_compat(machine
->cpu_type
, CPU_POWERPC_LOGICAL_3_00
, 0,
1730 spapr
->max_compat_pvr
)) {
1732 * If using KVM with radix mode available, VCPUs can be started
1733 * without a HPT because KVM will start them in radix mode.
1734 * Set the GR bit in PATE so that we know there is no HPT.
1736 spapr
->patb_entry
= PATE1_GR
;
1737 spapr_set_all_lpcrs(LPCR_HR
| LPCR_UPRT
, LPCR_HR
| LPCR_UPRT
);
1739 spapr_setup_hpt_and_vrma(spapr
);
1743 * NVLink2-connected GPU RAM needs to be placed on a separate NUMA node.
1744 * We assign a new numa ID per GPU in spapr_pci_collect_nvgpu() which is
1745 * called from vPHB reset handler so we initialize the counter here.
1746 * If no NUMA is configured from the QEMU side, we start from 1 as GPU RAM
1747 * must be equally distant from any other node.
1748 * The final value of spapr->gpu_numa_id is going to be written to
1749 * max-associativity-domains in spapr_build_fdt().
1751 spapr
->gpu_numa_id
= MAX(1, machine
->numa_state
->num_nodes
);
1752 qemu_devices_reset();
1755 * If this reset wasn't generated by CAS, we should reset our
1756 * negotiated options and start from scratch
1758 if (!spapr
->cas_reboot
) {
1759 spapr_ovec_cleanup(spapr
->ov5_cas
);
1760 spapr
->ov5_cas
= spapr_ovec_new();
1762 ppc_set_compat_all(spapr
->max_compat_pvr
, &error_fatal
);
1766 * This is fixing some of the default configuration of the XIVE
1767 * devices. To be called after the reset of the machine devices.
1769 spapr_irq_reset(spapr
, &error_fatal
);
1772 * There is no CAS under qtest. Simulate one to please the code that
1773 * depends on spapr->ov5_cas. This is especially needed to test device
1774 * unplug, so we do that before resetting the DRCs.
1776 if (qtest_enabled()) {
1777 spapr_ovec_cleanup(spapr
->ov5_cas
);
1778 spapr
->ov5_cas
= spapr_ovec_clone(spapr
->ov5
);
1781 /* DRC reset may cause a device to be unplugged. This will cause troubles
1782 * if this device is used by another device (eg, a running vhost backend
1783 * will crash QEMU if the DIMM holding the vring goes away). To avoid such
1784 * situations, we reset DRCs after all devices have been reset.
1786 object_child_foreach_recursive(object_get_root(), spapr_reset_drcs
, NULL
);
1788 spapr_clear_pending_events(spapr
);
1791 * We place the device tree and RTAS just below either the top of the RMA,
1792 * or just below 2GB, whichever is lower, so that it can be
1793 * processed with 32-bit real mode code if necessary
1795 rtas_limit
= MIN(spapr
->rma_size
, RTAS_MAX_ADDR
);
1796 rtas_addr
= rtas_limit
- RTAS_MAX_SIZE
;
1797 fdt_addr
= rtas_addr
- FDT_MAX_SIZE
;
1799 fdt
= spapr_build_fdt(spapr
);
1801 spapr_load_rtas(spapr
, fdt
, rtas_addr
);
1805 /* Should only fail if we've built a corrupted tree */
1808 if (fdt_totalsize(fdt
) > FDT_MAX_SIZE
) {
1809 error_report("FDT too big ! 0x%x bytes (max is 0x%x)",
1810 fdt_totalsize(fdt
), FDT_MAX_SIZE
);
1815 qemu_fdt_dumpdtb(fdt
, fdt_totalsize(fdt
));
1816 cpu_physical_memory_write(fdt_addr
, fdt
, fdt_totalsize(fdt
));
1817 g_free(spapr
->fdt_blob
);
1818 spapr
->fdt_size
= fdt_totalsize(fdt
);
1819 spapr
->fdt_initial_size
= spapr
->fdt_size
;
1820 spapr
->fdt_blob
= fdt
;
1822 /* Set up the entry state */
1823 spapr_cpu_set_entry_state(first_ppc_cpu
, SPAPR_ENTRY_POINT
, fdt_addr
);
1824 first_ppc_cpu
->env
.gpr
[5] = 0;
1826 spapr
->cas_reboot
= false;
1829 static void spapr_create_nvram(SpaprMachineState
*spapr
)
1831 DeviceState
*dev
= qdev_create(&spapr
->vio_bus
->bus
, "spapr-nvram");
1832 DriveInfo
*dinfo
= drive_get(IF_PFLASH
, 0, 0);
1835 qdev_prop_set_drive(dev
, "drive", blk_by_legacy_dinfo(dinfo
),
1839 qdev_init_nofail(dev
);
1841 spapr
->nvram
= (struct SpaprNvram
*)dev
;
1844 static void spapr_rtc_create(SpaprMachineState
*spapr
)
1846 object_initialize_child(OBJECT(spapr
), "rtc",
1847 &spapr
->rtc
, sizeof(spapr
->rtc
), TYPE_SPAPR_RTC
,
1848 &error_fatal
, NULL
);
1849 object_property_set_bool(OBJECT(&spapr
->rtc
), true, "realized",
1851 object_property_add_alias(OBJECT(spapr
), "rtc-time", OBJECT(&spapr
->rtc
),
1852 "date", &error_fatal
);
1855 /* Returns whether we want to use VGA or not */
1856 static bool spapr_vga_init(PCIBus
*pci_bus
, Error
**errp
)
1858 switch (vga_interface_type
) {
1866 return pci_vga_init(pci_bus
) != NULL
;
1869 "Unsupported VGA mode, only -vga std or -vga virtio is supported");
1874 static int spapr_pre_load(void *opaque
)
1878 rc
= spapr_caps_pre_load(opaque
);
1886 static int spapr_post_load(void *opaque
, int version_id
)
1888 SpaprMachineState
*spapr
= (SpaprMachineState
*)opaque
;
1891 err
= spapr_caps_post_migration(spapr
);
1897 * In earlier versions, there was no separate qdev for the PAPR
1898 * RTC, so the RTC offset was stored directly in sPAPREnvironment.
1899 * So when migrating from those versions, poke the incoming offset
1900 * value into the RTC device
1902 if (version_id
< 3) {
1903 err
= spapr_rtc_import_offset(&spapr
->rtc
, spapr
->rtc_offset
);
1909 if (kvm_enabled() && spapr
->patb_entry
) {
1910 PowerPCCPU
*cpu
= POWERPC_CPU(first_cpu
);
1911 bool radix
= !!(spapr
->patb_entry
& PATE1_GR
);
1912 bool gtse
= !!(cpu
->env
.spr
[SPR_LPCR
] & LPCR_GTSE
);
1915 * Update LPCR:HR and UPRT as they may not be set properly in
1918 spapr_set_all_lpcrs(radix
? (LPCR_HR
| LPCR_UPRT
) : 0,
1919 LPCR_HR
| LPCR_UPRT
);
1921 err
= kvmppc_configure_v3_mmu(cpu
, radix
, gtse
, spapr
->patb_entry
);
1923 error_report("Process table config unsupported by the host");
1928 err
= spapr_irq_post_load(spapr
, version_id
);
1936 static int spapr_pre_save(void *opaque
)
1940 rc
= spapr_caps_pre_save(opaque
);
1948 static bool version_before_3(void *opaque
, int version_id
)
1950 return version_id
< 3;
1953 static bool spapr_pending_events_needed(void *opaque
)
1955 SpaprMachineState
*spapr
= (SpaprMachineState
*)opaque
;
1956 return !QTAILQ_EMPTY(&spapr
->pending_events
);
1959 static const VMStateDescription vmstate_spapr_event_entry
= {
1960 .name
= "spapr_event_log_entry",
1962 .minimum_version_id
= 1,
1963 .fields
= (VMStateField
[]) {
1964 VMSTATE_UINT32(summary
, SpaprEventLogEntry
),
1965 VMSTATE_UINT32(extended_length
, SpaprEventLogEntry
),
1966 VMSTATE_VBUFFER_ALLOC_UINT32(extended_log
, SpaprEventLogEntry
, 0,
1967 NULL
, extended_length
),
1968 VMSTATE_END_OF_LIST()
1972 static const VMStateDescription vmstate_spapr_pending_events
= {
1973 .name
= "spapr_pending_events",
1975 .minimum_version_id
= 1,
1976 .needed
= spapr_pending_events_needed
,
1977 .fields
= (VMStateField
[]) {
1978 VMSTATE_QTAILQ_V(pending_events
, SpaprMachineState
, 1,
1979 vmstate_spapr_event_entry
, SpaprEventLogEntry
, next
),
1980 VMSTATE_END_OF_LIST()
1984 static bool spapr_ov5_cas_needed(void *opaque
)
1986 SpaprMachineState
*spapr
= opaque
;
1987 SpaprOptionVector
*ov5_mask
= spapr_ovec_new();
1988 SpaprOptionVector
*ov5_legacy
= spapr_ovec_new();
1989 SpaprOptionVector
*ov5_removed
= spapr_ovec_new();
1992 /* Prior to the introduction of SpaprOptionVector, we had two option
1993 * vectors we dealt with: OV5_FORM1_AFFINITY, and OV5_DRCONF_MEMORY.
1994 * Both of these options encode machine topology into the device-tree
1995 * in such a way that the now-booted OS should still be able to interact
1996 * appropriately with QEMU regardless of what options were actually
1997 * negotiatied on the source side.
1999 * As such, we can avoid migrating the CAS-negotiated options if these
2000 * are the only options available on the current machine/platform.
2001 * Since these are the only options available for pseries-2.7 and
2002 * earlier, this allows us to maintain old->new/new->old migration
2005 * For QEMU 2.8+, there are additional CAS-negotiatable options available
2006 * via default pseries-2.8 machines and explicit command-line parameters.
2007 * Some of these options, like OV5_HP_EVT, *do* require QEMU to be aware
2008 * of the actual CAS-negotiated values to continue working properly. For
2009 * example, availability of memory unplug depends on knowing whether
2010 * OV5_HP_EVT was negotiated via CAS.
2012 * Thus, for any cases where the set of available CAS-negotiatable
2013 * options extends beyond OV5_FORM1_AFFINITY and OV5_DRCONF_MEMORY, we
2014 * include the CAS-negotiated options in the migration stream, unless
2015 * if they affect boot time behaviour only.
2017 spapr_ovec_set(ov5_mask
, OV5_FORM1_AFFINITY
);
2018 spapr_ovec_set(ov5_mask
, OV5_DRCONF_MEMORY
);
2019 spapr_ovec_set(ov5_mask
, OV5_DRMEM_V2
);
2021 /* spapr_ovec_diff returns true if bits were removed. we avoid using
2022 * the mask itself since in the future it's possible "legacy" bits may be
2023 * removed via machine options, which could generate a false positive
2024 * that breaks migration.
2026 spapr_ovec_intersect(ov5_legacy
, spapr
->ov5
, ov5_mask
);
2027 cas_needed
= spapr_ovec_diff(ov5_removed
, spapr
->ov5
, ov5_legacy
);
2029 spapr_ovec_cleanup(ov5_mask
);
2030 spapr_ovec_cleanup(ov5_legacy
);
2031 spapr_ovec_cleanup(ov5_removed
);
2036 static const VMStateDescription vmstate_spapr_ov5_cas
= {
2037 .name
= "spapr_option_vector_ov5_cas",
2039 .minimum_version_id
= 1,
2040 .needed
= spapr_ov5_cas_needed
,
2041 .fields
= (VMStateField
[]) {
2042 VMSTATE_STRUCT_POINTER_V(ov5_cas
, SpaprMachineState
, 1,
2043 vmstate_spapr_ovec
, SpaprOptionVector
),
2044 VMSTATE_END_OF_LIST()
2048 static bool spapr_patb_entry_needed(void *opaque
)
2050 SpaprMachineState
*spapr
= opaque
;
2052 return !!spapr
->patb_entry
;
2055 static const VMStateDescription vmstate_spapr_patb_entry
= {
2056 .name
= "spapr_patb_entry",
2058 .minimum_version_id
= 1,
2059 .needed
= spapr_patb_entry_needed
,
2060 .fields
= (VMStateField
[]) {
2061 VMSTATE_UINT64(patb_entry
, SpaprMachineState
),
2062 VMSTATE_END_OF_LIST()
2066 static bool spapr_irq_map_needed(void *opaque
)
2068 SpaprMachineState
*spapr
= opaque
;
2070 return spapr
->irq_map
&& !bitmap_empty(spapr
->irq_map
, spapr
->irq_map_nr
);
2073 static const VMStateDescription vmstate_spapr_irq_map
= {
2074 .name
= "spapr_irq_map",
2076 .minimum_version_id
= 1,
2077 .needed
= spapr_irq_map_needed
,
2078 .fields
= (VMStateField
[]) {
2079 VMSTATE_BITMAP(irq_map
, SpaprMachineState
, 0, irq_map_nr
),
2080 VMSTATE_END_OF_LIST()
2084 static bool spapr_dtb_needed(void *opaque
)
2086 SpaprMachineClass
*smc
= SPAPR_MACHINE_GET_CLASS(opaque
);
2088 return smc
->update_dt_enabled
;
2091 static int spapr_dtb_pre_load(void *opaque
)
2093 SpaprMachineState
*spapr
= (SpaprMachineState
*)opaque
;
2095 g_free(spapr
->fdt_blob
);
2096 spapr
->fdt_blob
= NULL
;
2097 spapr
->fdt_size
= 0;
2102 static const VMStateDescription vmstate_spapr_dtb
= {
2103 .name
= "spapr_dtb",
2105 .minimum_version_id
= 1,
2106 .needed
= spapr_dtb_needed
,
2107 .pre_load
= spapr_dtb_pre_load
,
2108 .fields
= (VMStateField
[]) {
2109 VMSTATE_UINT32(fdt_initial_size
, SpaprMachineState
),
2110 VMSTATE_UINT32(fdt_size
, SpaprMachineState
),
2111 VMSTATE_VBUFFER_ALLOC_UINT32(fdt_blob
, SpaprMachineState
, 0, NULL
,
2113 VMSTATE_END_OF_LIST()
2117 static const VMStateDescription vmstate_spapr
= {
2120 .minimum_version_id
= 1,
2121 .pre_load
= spapr_pre_load
,
2122 .post_load
= spapr_post_load
,
2123 .pre_save
= spapr_pre_save
,
2124 .fields
= (VMStateField
[]) {
2125 /* used to be @next_irq */
2126 VMSTATE_UNUSED_BUFFER(version_before_3
, 0, 4),
2129 VMSTATE_UINT64_TEST(rtc_offset
, SpaprMachineState
, version_before_3
),
2131 VMSTATE_PPC_TIMEBASE_V(tb
, SpaprMachineState
, 2),
2132 VMSTATE_END_OF_LIST()
2134 .subsections
= (const VMStateDescription
*[]) {
2135 &vmstate_spapr_ov5_cas
,
2136 &vmstate_spapr_patb_entry
,
2137 &vmstate_spapr_pending_events
,
2138 &vmstate_spapr_cap_htm
,
2139 &vmstate_spapr_cap_vsx
,
2140 &vmstate_spapr_cap_dfp
,
2141 &vmstate_spapr_cap_cfpc
,
2142 &vmstate_spapr_cap_sbbc
,
2143 &vmstate_spapr_cap_ibs
,
2144 &vmstate_spapr_cap_hpt_maxpagesize
,
2145 &vmstate_spapr_irq_map
,
2146 &vmstate_spapr_cap_nested_kvm_hv
,
2148 &vmstate_spapr_cap_large_decr
,
2149 &vmstate_spapr_cap_ccf_assist
,
2154 static int htab_save_setup(QEMUFile
*f
, void *opaque
)
2156 SpaprMachineState
*spapr
= opaque
;
2158 /* "Iteration" header */
2159 if (!spapr
->htab_shift
) {
2160 qemu_put_be32(f
, -1);
2162 qemu_put_be32(f
, spapr
->htab_shift
);
2166 spapr
->htab_save_index
= 0;
2167 spapr
->htab_first_pass
= true;
2169 if (spapr
->htab_shift
) {
2170 assert(kvm_enabled());
2178 static void htab_save_chunk(QEMUFile
*f
, SpaprMachineState
*spapr
,
2179 int chunkstart
, int n_valid
, int n_invalid
)
2181 qemu_put_be32(f
, chunkstart
);
2182 qemu_put_be16(f
, n_valid
);
2183 qemu_put_be16(f
, n_invalid
);
2184 qemu_put_buffer(f
, HPTE(spapr
->htab
, chunkstart
),
2185 HASH_PTE_SIZE_64
* n_valid
);
2188 static void htab_save_end_marker(QEMUFile
*f
)
2190 qemu_put_be32(f
, 0);
2191 qemu_put_be16(f
, 0);
2192 qemu_put_be16(f
, 0);
2195 static void htab_save_first_pass(QEMUFile
*f
, SpaprMachineState
*spapr
,
2198 bool has_timeout
= max_ns
!= -1;
2199 int htabslots
= HTAB_SIZE(spapr
) / HASH_PTE_SIZE_64
;
2200 int index
= spapr
->htab_save_index
;
2201 int64_t starttime
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
);
2203 assert(spapr
->htab_first_pass
);
2208 /* Consume invalid HPTEs */
2209 while ((index
< htabslots
)
2210 && !HPTE_VALID(HPTE(spapr
->htab
, index
))) {
2211 CLEAN_HPTE(HPTE(spapr
->htab
, index
));
2215 /* Consume valid HPTEs */
2217 while ((index
< htabslots
) && (index
- chunkstart
< USHRT_MAX
)
2218 && HPTE_VALID(HPTE(spapr
->htab
, index
))) {
2219 CLEAN_HPTE(HPTE(spapr
->htab
, index
));
2223 if (index
> chunkstart
) {
2224 int n_valid
= index
- chunkstart
;
2226 htab_save_chunk(f
, spapr
, chunkstart
, n_valid
, 0);
2229 (qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) - starttime
) > max_ns
) {
2233 } while ((index
< htabslots
) && !qemu_file_rate_limit(f
));
2235 if (index
>= htabslots
) {
2236 assert(index
== htabslots
);
2238 spapr
->htab_first_pass
= false;
2240 spapr
->htab_save_index
= index
;
2243 static int htab_save_later_pass(QEMUFile
*f
, SpaprMachineState
*spapr
,
2246 bool final
= max_ns
< 0;
2247 int htabslots
= HTAB_SIZE(spapr
) / HASH_PTE_SIZE_64
;
2248 int examined
= 0, sent
= 0;
2249 int index
= spapr
->htab_save_index
;
2250 int64_t starttime
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
);
2252 assert(!spapr
->htab_first_pass
);
2255 int chunkstart
, invalidstart
;
2257 /* Consume non-dirty HPTEs */
2258 while ((index
< htabslots
)
2259 && !HPTE_DIRTY(HPTE(spapr
->htab
, index
))) {
2265 /* Consume valid dirty HPTEs */
2266 while ((index
< htabslots
) && (index
- chunkstart
< USHRT_MAX
)
2267 && HPTE_DIRTY(HPTE(spapr
->htab
, index
))
2268 && HPTE_VALID(HPTE(spapr
->htab
, index
))) {
2269 CLEAN_HPTE(HPTE(spapr
->htab
, index
));
2274 invalidstart
= index
;
2275 /* Consume invalid dirty HPTEs */
2276 while ((index
< htabslots
) && (index
- invalidstart
< USHRT_MAX
)
2277 && HPTE_DIRTY(HPTE(spapr
->htab
, index
))
2278 && !HPTE_VALID(HPTE(spapr
->htab
, index
))) {
2279 CLEAN_HPTE(HPTE(spapr
->htab
, index
));
2284 if (index
> chunkstart
) {
2285 int n_valid
= invalidstart
- chunkstart
;
2286 int n_invalid
= index
- invalidstart
;
2288 htab_save_chunk(f
, spapr
, chunkstart
, n_valid
, n_invalid
);
2289 sent
+= index
- chunkstart
;
2291 if (!final
&& (qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) - starttime
) > max_ns
) {
2296 if (examined
>= htabslots
) {
2300 if (index
>= htabslots
) {
2301 assert(index
== htabslots
);
2304 } while ((examined
< htabslots
) && (!qemu_file_rate_limit(f
) || final
));
2306 if (index
>= htabslots
) {
2307 assert(index
== htabslots
);
2311 spapr
->htab_save_index
= index
;
2313 return (examined
>= htabslots
) && (sent
== 0) ? 1 : 0;
2316 #define MAX_ITERATION_NS 5000000 /* 5 ms */
2317 #define MAX_KVM_BUF_SIZE 2048
2319 static int htab_save_iterate(QEMUFile
*f
, void *opaque
)
2321 SpaprMachineState
*spapr
= opaque
;
2325 /* Iteration header */
2326 if (!spapr
->htab_shift
) {
2327 qemu_put_be32(f
, -1);
2330 qemu_put_be32(f
, 0);
2334 assert(kvm_enabled());
2336 fd
= get_htab_fd(spapr
);
2341 rc
= kvmppc_save_htab(f
, fd
, MAX_KVM_BUF_SIZE
, MAX_ITERATION_NS
);
2345 } else if (spapr
->htab_first_pass
) {
2346 htab_save_first_pass(f
, spapr
, MAX_ITERATION_NS
);
2348 rc
= htab_save_later_pass(f
, spapr
, MAX_ITERATION_NS
);
2351 htab_save_end_marker(f
);
2356 static int htab_save_complete(QEMUFile
*f
, void *opaque
)
2358 SpaprMachineState
*spapr
= opaque
;
2361 /* Iteration header */
2362 if (!spapr
->htab_shift
) {
2363 qemu_put_be32(f
, -1);
2366 qemu_put_be32(f
, 0);
2372 assert(kvm_enabled());
2374 fd
= get_htab_fd(spapr
);
2379 rc
= kvmppc_save_htab(f
, fd
, MAX_KVM_BUF_SIZE
, -1);
2384 if (spapr
->htab_first_pass
) {
2385 htab_save_first_pass(f
, spapr
, -1);
2387 htab_save_later_pass(f
, spapr
, -1);
2391 htab_save_end_marker(f
);
2396 static int htab_load(QEMUFile
*f
, void *opaque
, int version_id
)
2398 SpaprMachineState
*spapr
= opaque
;
2399 uint32_t section_hdr
;
2401 Error
*local_err
= NULL
;
2403 if (version_id
< 1 || version_id
> 1) {
2404 error_report("htab_load() bad version");
2408 section_hdr
= qemu_get_be32(f
);
2410 if (section_hdr
== -1) {
2411 spapr_free_hpt(spapr
);
2416 /* First section gives the htab size */
2417 spapr_reallocate_hpt(spapr
, section_hdr
, &local_err
);
2419 error_report_err(local_err
);
2426 assert(kvm_enabled());
2428 fd
= kvmppc_get_htab_fd(true, 0, &local_err
);
2430 error_report_err(local_err
);
2437 uint16_t n_valid
, n_invalid
;
2439 index
= qemu_get_be32(f
);
2440 n_valid
= qemu_get_be16(f
);
2441 n_invalid
= qemu_get_be16(f
);
2443 if ((index
== 0) && (n_valid
== 0) && (n_invalid
== 0)) {
2448 if ((index
+ n_valid
+ n_invalid
) >
2449 (HTAB_SIZE(spapr
) / HASH_PTE_SIZE_64
)) {
2450 /* Bad index in stream */
2452 "htab_load() bad index %d (%hd+%hd entries) in htab stream (htab_shift=%d)",
2453 index
, n_valid
, n_invalid
, spapr
->htab_shift
);
2459 qemu_get_buffer(f
, HPTE(spapr
->htab
, index
),
2460 HASH_PTE_SIZE_64
* n_valid
);
2463 memset(HPTE(spapr
->htab
, index
+ n_valid
), 0,
2464 HASH_PTE_SIZE_64
* n_invalid
);
2471 rc
= kvmppc_load_htab_chunk(f
, fd
, index
, n_valid
, n_invalid
);
2486 static void htab_save_cleanup(void *opaque
)
2488 SpaprMachineState
*spapr
= opaque
;
2490 close_htab_fd(spapr
);
2493 static SaveVMHandlers savevm_htab_handlers
= {
2494 .save_setup
= htab_save_setup
,
2495 .save_live_iterate
= htab_save_iterate
,
2496 .save_live_complete_precopy
= htab_save_complete
,
2497 .save_cleanup
= htab_save_cleanup
,
2498 .load_state
= htab_load
,
2501 static void spapr_boot_set(void *opaque
, const char *boot_device
,
2504 MachineState
*machine
= MACHINE(opaque
);
2505 machine
->boot_order
= g_strdup(boot_device
);
2508 static void spapr_create_lmb_dr_connectors(SpaprMachineState
*spapr
)
2510 MachineState
*machine
= MACHINE(spapr
);
2511 uint64_t lmb_size
= SPAPR_MEMORY_BLOCK_SIZE
;
2512 uint32_t nr_lmbs
= (machine
->maxram_size
- machine
->ram_size
)/lmb_size
;
2515 for (i
= 0; i
< nr_lmbs
; i
++) {
2518 addr
= i
* lmb_size
+ machine
->device_memory
->base
;
2519 spapr_dr_connector_new(OBJECT(spapr
), TYPE_SPAPR_DRC_LMB
,
2525 * If RAM size, maxmem size and individual node mem sizes aren't aligned
2526 * to SPAPR_MEMORY_BLOCK_SIZE(256MB), then refuse to start the guest
2527 * since we can't support such unaligned sizes with DRCONF_MEMORY.
2529 static void spapr_validate_node_memory(MachineState
*machine
, Error
**errp
)
2533 if (machine
->ram_size
% SPAPR_MEMORY_BLOCK_SIZE
) {
2534 error_setg(errp
, "Memory size 0x" RAM_ADDR_FMT
2535 " is not aligned to %" PRIu64
" MiB",
2537 SPAPR_MEMORY_BLOCK_SIZE
/ MiB
);
2541 if (machine
->maxram_size
% SPAPR_MEMORY_BLOCK_SIZE
) {
2542 error_setg(errp
, "Maximum memory size 0x" RAM_ADDR_FMT
2543 " is not aligned to %" PRIu64
" MiB",
2545 SPAPR_MEMORY_BLOCK_SIZE
/ MiB
);
2549 for (i
= 0; i
< machine
->numa_state
->num_nodes
; i
++) {
2550 if (machine
->numa_state
->nodes
[i
].node_mem
% SPAPR_MEMORY_BLOCK_SIZE
) {
2552 "Node %d memory size 0x%" PRIx64
2553 " is not aligned to %" PRIu64
" MiB",
2554 i
, machine
->numa_state
->nodes
[i
].node_mem
,
2555 SPAPR_MEMORY_BLOCK_SIZE
/ MiB
);
2561 /* find cpu slot in machine->possible_cpus by core_id */
2562 static CPUArchId
*spapr_find_cpu_slot(MachineState
*ms
, uint32_t id
, int *idx
)
2564 int index
= id
/ ms
->smp
.threads
;
2566 if (index
>= ms
->possible_cpus
->len
) {
2572 return &ms
->possible_cpus
->cpus
[index
];
2575 static void spapr_set_vsmt_mode(SpaprMachineState
*spapr
, Error
**errp
)
2577 MachineState
*ms
= MACHINE(spapr
);
2578 Error
*local_err
= NULL
;
2579 bool vsmt_user
= !!spapr
->vsmt
;
2580 int kvm_smt
= kvmppc_smt_threads();
2582 unsigned int smp_threads
= ms
->smp
.threads
;
2584 if (!kvm_enabled() && (smp_threads
> 1)) {
2585 error_setg(&local_err
, "TCG cannot support more than 1 thread/core "
2586 "on a pseries machine");
2589 if (!is_power_of_2(smp_threads
)) {
2590 error_setg(&local_err
, "Cannot support %d threads/core on a pseries "
2591 "machine because it must be a power of 2", smp_threads
);
2595 /* Detemine the VSMT mode to use: */
2597 if (spapr
->vsmt
< smp_threads
) {
2598 error_setg(&local_err
, "Cannot support VSMT mode %d"
2599 " because it must be >= threads/core (%d)",
2600 spapr
->vsmt
, smp_threads
);
2603 /* In this case, spapr->vsmt has been set by the command line */
2606 * Default VSMT value is tricky, because we need it to be as
2607 * consistent as possible (for migration), but this requires
2608 * changing it for at least some existing cases. We pick 8 as
2609 * the value that we'd get with KVM on POWER8, the
2610 * overwhelmingly common case in production systems.
2612 spapr
->vsmt
= MAX(8, smp_threads
);
2615 /* KVM: If necessary, set the SMT mode: */
2616 if (kvm_enabled() && (spapr
->vsmt
!= kvm_smt
)) {
2617 ret
= kvmppc_set_smt_threads(spapr
->vsmt
);
2619 /* Looks like KVM isn't able to change VSMT mode */
2620 error_setg(&local_err
,
2621 "Failed to set KVM's VSMT mode to %d (errno %d)",
2623 /* We can live with that if the default one is big enough
2624 * for the number of threads, and a submultiple of the one
2625 * we want. In this case we'll waste some vcpu ids, but
2626 * behaviour will be correct */
2627 if ((kvm_smt
>= smp_threads
) && ((spapr
->vsmt
% kvm_smt
) == 0)) {
2628 warn_report_err(local_err
);
2633 error_append_hint(&local_err
,
2634 "On PPC, a VM with %d threads/core"
2635 " on a host with %d threads/core"
2636 " requires the use of VSMT mode %d.\n",
2637 smp_threads
, kvm_smt
, spapr
->vsmt
);
2639 kvmppc_hint_smt_possible(&local_err
);
2644 /* else TCG: nothing to do currently */
2646 error_propagate(errp
, local_err
);
2649 static void spapr_init_cpus(SpaprMachineState
*spapr
)
2651 MachineState
*machine
= MACHINE(spapr
);
2652 MachineClass
*mc
= MACHINE_GET_CLASS(machine
);
2653 SpaprMachineClass
*smc
= SPAPR_MACHINE_GET_CLASS(machine
);
2654 const char *type
= spapr_get_cpu_core_type(machine
->cpu_type
);
2655 const CPUArchIdList
*possible_cpus
;
2656 unsigned int smp_cpus
= machine
->smp
.cpus
;
2657 unsigned int smp_threads
= machine
->smp
.threads
;
2658 unsigned int max_cpus
= machine
->smp
.max_cpus
;
2659 int boot_cores_nr
= smp_cpus
/ smp_threads
;
2662 possible_cpus
= mc
->possible_cpu_arch_ids(machine
);
2663 if (mc
->has_hotpluggable_cpus
) {
2664 if (smp_cpus
% smp_threads
) {
2665 error_report("smp_cpus (%u) must be multiple of threads (%u)",
2666 smp_cpus
, smp_threads
);
2669 if (max_cpus
% smp_threads
) {
2670 error_report("max_cpus (%u) must be multiple of threads (%u)",
2671 max_cpus
, smp_threads
);
2675 if (max_cpus
!= smp_cpus
) {
2676 error_report("This machine version does not support CPU hotplug");
2679 boot_cores_nr
= possible_cpus
->len
;
2682 if (smc
->pre_2_10_has_unused_icps
) {
2685 for (i
= 0; i
< spapr_max_server_number(spapr
); i
++) {
2686 /* Dummy entries get deregistered when real ICPState objects
2687 * are registered during CPU core hotplug.
2689 pre_2_10_vmstate_register_dummy_icp(i
);
2693 for (i
= 0; i
< possible_cpus
->len
; i
++) {
2694 int core_id
= i
* smp_threads
;
2696 if (mc
->has_hotpluggable_cpus
) {
2697 spapr_dr_connector_new(OBJECT(spapr
), TYPE_SPAPR_DRC_CPU
,
2698 spapr_vcpu_id(spapr
, core_id
));
2701 if (i
< boot_cores_nr
) {
2702 Object
*core
= object_new(type
);
2703 int nr_threads
= smp_threads
;
2705 /* Handle the partially filled core for older machine types */
2706 if ((i
+ 1) * smp_threads
>= smp_cpus
) {
2707 nr_threads
= smp_cpus
- i
* smp_threads
;
2710 object_property_set_int(core
, nr_threads
, "nr-threads",
2712 object_property_set_int(core
, core_id
, CPU_CORE_PROP_CORE_ID
,
2714 object_property_set_bool(core
, true, "realized", &error_fatal
);
2721 static PCIHostState
*spapr_create_default_phb(void)
2725 dev
= qdev_create(NULL
, TYPE_SPAPR_PCI_HOST_BRIDGE
);
2726 qdev_prop_set_uint32(dev
, "index", 0);
2727 qdev_init_nofail(dev
);
2729 return PCI_HOST_BRIDGE(dev
);
2732 /* pSeries LPAR / sPAPR hardware init */
2733 static void spapr_machine_init(MachineState
*machine
)
2735 SpaprMachineState
*spapr
= SPAPR_MACHINE(machine
);
2736 SpaprMachineClass
*smc
= SPAPR_MACHINE_GET_CLASS(machine
);
2737 const char *kernel_filename
= machine
->kernel_filename
;
2738 const char *initrd_filename
= machine
->initrd_filename
;
2741 MemoryRegion
*sysmem
= get_system_memory();
2742 MemoryRegion
*ram
= g_new(MemoryRegion
, 1);
2743 hwaddr node0_size
= spapr_node0_size(machine
);
2744 long load_limit
, fw_size
;
2746 Error
*resize_hpt_err
= NULL
;
2748 msi_nonbroken
= true;
2750 QLIST_INIT(&spapr
->phbs
);
2751 QTAILQ_INIT(&spapr
->pending_dimm_unplugs
);
2753 /* Determine capabilities to run with */
2754 spapr_caps_init(spapr
);
2756 kvmppc_check_papr_resize_hpt(&resize_hpt_err
);
2757 if (spapr
->resize_hpt
== SPAPR_RESIZE_HPT_DEFAULT
) {
2759 * If the user explicitly requested a mode we should either
2760 * supply it, or fail completely (which we do below). But if
2761 * it's not set explicitly, we reset our mode to something
2764 if (resize_hpt_err
) {
2765 spapr
->resize_hpt
= SPAPR_RESIZE_HPT_DISABLED
;
2766 error_free(resize_hpt_err
);
2767 resize_hpt_err
= NULL
;
2769 spapr
->resize_hpt
= smc
->resize_hpt_default
;
2773 assert(spapr
->resize_hpt
!= SPAPR_RESIZE_HPT_DEFAULT
);
2775 if ((spapr
->resize_hpt
!= SPAPR_RESIZE_HPT_DISABLED
) && resize_hpt_err
) {
2777 * User requested HPT resize, but this host can't supply it. Bail out
2779 error_report_err(resize_hpt_err
);
2783 spapr
->rma_size
= node0_size
;
2785 /* With KVM, we don't actually know whether KVM supports an
2786 * unbounded RMA (PR KVM) or is limited by the hash table size
2787 * (HV KVM using VRMA), so we always assume the latter
2789 * In that case, we also limit the initial allocations for RTAS
2790 * etc... to 256M since we have no way to know what the VRMA size
2791 * is going to be as it depends on the size of the hash table
2792 * which isn't determined yet.
2794 if (kvm_enabled()) {
2795 spapr
->vrma_adjust
= 1;
2796 spapr
->rma_size
= MIN(spapr
->rma_size
, 0x10000000);
2799 /* Actually we don't support unbounded RMA anymore since we added
2800 * proper emulation of HV mode. The max we can get is 16G which
2801 * also happens to be what we configure for PAPR mode so make sure
2802 * we don't do anything bigger than that
2804 spapr
->rma_size
= MIN(spapr
->rma_size
, 0x400000000ull
);
2806 if (spapr
->rma_size
> node0_size
) {
2807 error_report("Numa node 0 has to span the RMA (%#08"HWADDR_PRIx
")",
2812 /* Setup a load limit for the ramdisk leaving room for SLOF and FDT */
2813 load_limit
= MIN(spapr
->rma_size
, RTAS_MAX_ADDR
) - FW_OVERHEAD
;
2816 * VSMT must be set in order to be able to compute VCPU ids, ie to
2817 * call spapr_max_server_number() or spapr_vcpu_id().
2819 spapr_set_vsmt_mode(spapr
, &error_fatal
);
2821 /* Set up Interrupt Controller before we create the VCPUs */
2822 spapr_irq_init(spapr
, &error_fatal
);
2824 /* Set up containers for ibm,client-architecture-support negotiated options
2826 spapr
->ov5
= spapr_ovec_new();
2827 spapr
->ov5_cas
= spapr_ovec_new();
2829 if (smc
->dr_lmb_enabled
) {
2830 spapr_ovec_set(spapr
->ov5
, OV5_DRCONF_MEMORY
);
2831 spapr_validate_node_memory(machine
, &error_fatal
);
2834 spapr_ovec_set(spapr
->ov5
, OV5_FORM1_AFFINITY
);
2836 /* advertise support for dedicated HP event source to guests */
2837 if (spapr
->use_hotplug_event_source
) {
2838 spapr_ovec_set(spapr
->ov5
, OV5_HP_EVT
);
2841 /* advertise support for HPT resizing */
2842 if (spapr
->resize_hpt
!= SPAPR_RESIZE_HPT_DISABLED
) {
2843 spapr_ovec_set(spapr
->ov5
, OV5_HPT_RESIZE
);
2846 /* advertise support for ibm,dyamic-memory-v2 */
2847 spapr_ovec_set(spapr
->ov5
, OV5_DRMEM_V2
);
2849 /* advertise XIVE on POWER9 machines */
2850 if (spapr
->irq
->ov5
& (SPAPR_OV5_XIVE_EXPLOIT
| SPAPR_OV5_XIVE_BOTH
)) {
2851 spapr_ovec_set(spapr
->ov5
, OV5_XIVE_EXPLOIT
);
2855 spapr_init_cpus(spapr
);
2858 * check we don't have a memory-less/cpu-less NUMA node
2859 * Firmware relies on the existing memory/cpu topology to provide the
2860 * NUMA topology to the kernel.
2861 * And the linux kernel needs to know the NUMA topology at start
2862 * to be able to hotplug CPUs later.
2864 if (machine
->numa_state
->num_nodes
) {
2865 for (i
= 0; i
< machine
->numa_state
->num_nodes
; ++i
) {
2866 /* check for memory-less node */
2867 if (machine
->numa_state
->nodes
[i
].node_mem
== 0) {
2870 /* check for cpu-less node */
2872 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
2873 if (cpu
->node_id
== i
) {
2878 /* memory-less and cpu-less node */
2881 "Memory-less/cpu-less nodes are not supported (node %d)",
2890 if ((!kvm_enabled() || kvmppc_has_cap_mmu_radix()) &&
2891 ppc_type_check_compat(machine
->cpu_type
, CPU_POWERPC_LOGICAL_3_00
, 0,
2892 spapr
->max_compat_pvr
)) {
2893 /* KVM and TCG always allow GTSE with radix... */
2894 spapr_ovec_set(spapr
->ov5
, OV5_MMU_RADIX_GTSE
);
2896 /* ... but not with hash (currently). */
2898 if (kvm_enabled()) {
2899 /* Enable H_LOGICAL_CI_* so SLOF can talk to in-kernel devices */
2900 kvmppc_enable_logical_ci_hcalls();
2901 kvmppc_enable_set_mode_hcall();
2903 /* H_CLEAR_MOD/_REF are mandatory in PAPR, but off by default */
2904 kvmppc_enable_clear_ref_mod_hcalls();
2906 /* Enable H_PAGE_INIT */
2907 kvmppc_enable_h_page_init();
2911 memory_region_allocate_system_memory(ram
, NULL
, "ppc_spapr.ram",
2913 memory_region_add_subregion(sysmem
, 0, ram
);
2915 /* always allocate the device memory information */
2916 machine
->device_memory
= g_malloc0(sizeof(*machine
->device_memory
));
2918 /* initialize hotplug memory address space */
2919 if (machine
->ram_size
< machine
->maxram_size
) {
2920 ram_addr_t device_mem_size
= machine
->maxram_size
- machine
->ram_size
;
2922 * Limit the number of hotpluggable memory slots to half the number
2923 * slots that KVM supports, leaving the other half for PCI and other
2924 * devices. However ensure that number of slots doesn't drop below 32.
2926 int max_memslots
= kvm_enabled() ? kvm_get_max_memslots() / 2 :
2927 SPAPR_MAX_RAM_SLOTS
;
2929 if (max_memslots
< SPAPR_MAX_RAM_SLOTS
) {
2930 max_memslots
= SPAPR_MAX_RAM_SLOTS
;
2932 if (machine
->ram_slots
> max_memslots
) {
2933 error_report("Specified number of memory slots %"
2934 PRIu64
" exceeds max supported %d",
2935 machine
->ram_slots
, max_memslots
);
2939 machine
->device_memory
->base
= ROUND_UP(machine
->ram_size
,
2940 SPAPR_DEVICE_MEM_ALIGN
);
2941 memory_region_init(&machine
->device_memory
->mr
, OBJECT(spapr
),
2942 "device-memory", device_mem_size
);
2943 memory_region_add_subregion(sysmem
, machine
->device_memory
->base
,
2944 &machine
->device_memory
->mr
);
2947 if (smc
->dr_lmb_enabled
) {
2948 spapr_create_lmb_dr_connectors(spapr
);
2951 filename
= qemu_find_file(QEMU_FILE_TYPE_BIOS
, "spapr-rtas.bin");
2953 error_report("Could not find LPAR rtas '%s'", "spapr-rtas.bin");
2956 spapr
->rtas_size
= get_image_size(filename
);
2957 if (spapr
->rtas_size
< 0) {
2958 error_report("Could not get size of LPAR rtas '%s'", filename
);
2961 spapr
->rtas_blob
= g_malloc(spapr
->rtas_size
);
2962 if (load_image_size(filename
, spapr
->rtas_blob
, spapr
->rtas_size
) < 0) {
2963 error_report("Could not load LPAR rtas '%s'", filename
);
2966 if (spapr
->rtas_size
> RTAS_MAX_SIZE
) {
2967 error_report("RTAS too big ! 0x%zx bytes (max is 0x%x)",
2968 (size_t)spapr
->rtas_size
, RTAS_MAX_SIZE
);
2973 /* Set up RTAS event infrastructure */
2974 spapr_events_init(spapr
);
2976 /* Set up the RTC RTAS interfaces */
2977 spapr_rtc_create(spapr
);
2979 /* Set up VIO bus */
2980 spapr
->vio_bus
= spapr_vio_bus_init();
2982 for (i
= 0; i
< serial_max_hds(); i
++) {
2984 spapr_vty_create(spapr
->vio_bus
, serial_hd(i
));
2988 /* We always have at least the nvram device on VIO */
2989 spapr_create_nvram(spapr
);
2992 * Setup hotplug / dynamic-reconfiguration connectors. top-level
2993 * connectors (described in root DT node's "ibm,drc-types" property)
2994 * are pre-initialized here. additional child connectors (such as
2995 * connectors for a PHBs PCI slots) are added as needed during their
2996 * parent's realization.
2998 if (smc
->dr_phb_enabled
) {
2999 for (i
= 0; i
< SPAPR_MAX_PHBS
; i
++) {
3000 spapr_dr_connector_new(OBJECT(machine
), TYPE_SPAPR_DRC_PHB
, i
);
3005 spapr_pci_rtas_init();
3007 phb
= spapr_create_default_phb();
3009 for (i
= 0; i
< nb_nics
; i
++) {
3010 NICInfo
*nd
= &nd_table
[i
];
3013 nd
->model
= g_strdup("spapr-vlan");
3016 if (g_str_equal(nd
->model
, "spapr-vlan") ||
3017 g_str_equal(nd
->model
, "ibmveth")) {
3018 spapr_vlan_create(spapr
->vio_bus
, nd
);
3020 pci_nic_init_nofail(&nd_table
[i
], phb
->bus
, nd
->model
, NULL
);
3024 for (i
= 0; i
<= drive_get_max_bus(IF_SCSI
); i
++) {
3025 spapr_vscsi_create(spapr
->vio_bus
);
3029 if (spapr_vga_init(phb
->bus
, &error_fatal
)) {
3030 spapr
->has_graphics
= true;
3031 machine
->usb
|= defaults_enabled() && !machine
->usb_disabled
;
3035 if (smc
->use_ohci_by_default
) {
3036 pci_create_simple(phb
->bus
, -1, "pci-ohci");
3038 pci_create_simple(phb
->bus
, -1, "nec-usb-xhci");
3041 if (spapr
->has_graphics
) {
3042 USBBus
*usb_bus
= usb_bus_find(-1);
3044 usb_create_simple(usb_bus
, "usb-kbd");
3045 usb_create_simple(usb_bus
, "usb-mouse");
3049 if (spapr
->rma_size
< (MIN_RMA_SLOF
* MiB
)) {
3051 "pSeries SLOF firmware requires >= %ldM guest RMA (Real Mode Area memory)",
3056 if (kernel_filename
) {
3057 uint64_t lowaddr
= 0;
3059 spapr
->kernel_size
= load_elf(kernel_filename
, NULL
,
3060 translate_kernel_address
, NULL
,
3061 NULL
, &lowaddr
, NULL
, 1,
3062 PPC_ELF_MACHINE
, 0, 0);
3063 if (spapr
->kernel_size
== ELF_LOAD_WRONG_ENDIAN
) {
3064 spapr
->kernel_size
= load_elf(kernel_filename
, NULL
,
3065 translate_kernel_address
, NULL
, NULL
,
3066 &lowaddr
, NULL
, 0, PPC_ELF_MACHINE
,
3068 spapr
->kernel_le
= spapr
->kernel_size
> 0;
3070 if (spapr
->kernel_size
< 0) {
3071 error_report("error loading %s: %s", kernel_filename
,
3072 load_elf_strerror(spapr
->kernel_size
));
3077 if (initrd_filename
) {
3078 /* Try to locate the initrd in the gap between the kernel
3079 * and the firmware. Add a bit of space just in case
3081 spapr
->initrd_base
= (KERNEL_LOAD_ADDR
+ spapr
->kernel_size
3082 + 0x1ffff) & ~0xffff;
3083 spapr
->initrd_size
= load_image_targphys(initrd_filename
,
3086 - spapr
->initrd_base
);
3087 if (spapr
->initrd_size
< 0) {
3088 error_report("could not load initial ram disk '%s'",
3095 if (bios_name
== NULL
) {
3096 bios_name
= FW_FILE_NAME
;
3098 filename
= qemu_find_file(QEMU_FILE_TYPE_BIOS
, bios_name
);
3100 error_report("Could not find LPAR firmware '%s'", bios_name
);
3103 fw_size
= load_image_targphys(filename
, 0, FW_MAX_SIZE
);
3105 error_report("Could not load LPAR firmware '%s'", filename
);
3110 /* FIXME: Should register things through the MachineState's qdev
3111 * interface, this is a legacy from the sPAPREnvironment structure
3112 * which predated MachineState but had a similar function */
3113 vmstate_register(NULL
, 0, &vmstate_spapr
, spapr
);
3114 register_savevm_live("spapr/htab", -1, 1,
3115 &savevm_htab_handlers
, spapr
);
3117 qbus_set_hotplug_handler(sysbus_get_default(), OBJECT(machine
),
3120 qemu_register_boot_set(spapr_boot_set
, spapr
);
3123 * Nothing needs to be done to resume a suspended guest because
3124 * suspending does not change the machine state, so no need for
3125 * a ->wakeup method.
3127 qemu_register_wakeup_support();
3129 if (kvm_enabled()) {
3130 /* to stop and start vmclock */
3131 qemu_add_vm_change_state_handler(cpu_ppc_clock_vm_state_change
,
3134 kvmppc_spapr_enable_inkernel_multitce();
3138 static int spapr_kvm_type(MachineState
*machine
, const char *vm_type
)
3144 if (!strcmp(vm_type
, "HV")) {
3148 if (!strcmp(vm_type
, "PR")) {
3152 error_report("Unknown kvm-type specified '%s'", vm_type
);
3157 * Implementation of an interface to adjust firmware path
3158 * for the bootindex property handling.
3160 static char *spapr_get_fw_dev_path(FWPathProvider
*p
, BusState
*bus
,
3163 #define CAST(type, obj, name) \
3164 ((type *)object_dynamic_cast(OBJECT(obj), (name)))
3165 SCSIDevice
*d
= CAST(SCSIDevice
, dev
, TYPE_SCSI_DEVICE
);
3166 SpaprPhbState
*phb
= CAST(SpaprPhbState
, dev
, TYPE_SPAPR_PCI_HOST_BRIDGE
);
3167 VHostSCSICommon
*vsc
= CAST(VHostSCSICommon
, dev
, TYPE_VHOST_SCSI_COMMON
);
3170 void *spapr
= CAST(void, bus
->parent
, "spapr-vscsi");
3171 VirtIOSCSI
*virtio
= CAST(VirtIOSCSI
, bus
->parent
, TYPE_VIRTIO_SCSI
);
3172 USBDevice
*usb
= CAST(USBDevice
, bus
->parent
, TYPE_USB_DEVICE
);
3176 * Replace "channel@0/disk@0,0" with "disk@8000000000000000":
3177 * In the top 16 bits of the 64-bit LUN, we use SRP luns of the form
3178 * 0x8000 | (target << 8) | (bus << 5) | lun
3179 * (see the "Logical unit addressing format" table in SAM5)
3181 unsigned id
= 0x8000 | (d
->id
<< 8) | (d
->channel
<< 5) | d
->lun
;
3182 return g_strdup_printf("%s@%"PRIX64
, qdev_fw_name(dev
),
3183 (uint64_t)id
<< 48);
3184 } else if (virtio
) {
3186 * We use SRP luns of the form 01000000 | (target << 8) | lun
3187 * in the top 32 bits of the 64-bit LUN
3188 * Note: the quote above is from SLOF and it is wrong,
3189 * the actual binding is:
3190 * swap 0100 or 10 << or 20 << ( target lun-id -- srplun )
3192 unsigned id
= 0x1000000 | (d
->id
<< 16) | d
->lun
;
3193 if (d
->lun
>= 256) {
3194 /* Use the LUN "flat space addressing method" */
3197 return g_strdup_printf("%s@%"PRIX64
, qdev_fw_name(dev
),
3198 (uint64_t)id
<< 32);
3201 * We use SRP luns of the form 01000000 | (usb-port << 16) | lun
3202 * in the top 32 bits of the 64-bit LUN
3204 unsigned usb_port
= atoi(usb
->port
->path
);
3205 unsigned id
= 0x1000000 | (usb_port
<< 16) | d
->lun
;
3206 return g_strdup_printf("%s@%"PRIX64
, qdev_fw_name(dev
),
3207 (uint64_t)id
<< 32);
3212 * SLOF probes the USB devices, and if it recognizes that the device is a
3213 * storage device, it changes its name to "storage" instead of "usb-host",
3214 * and additionally adds a child node for the SCSI LUN, so the correct
3215 * boot path in SLOF is something like .../storage@1/disk@xxx" instead.
3217 if (strcmp("usb-host", qdev_fw_name(dev
)) == 0) {
3218 USBDevice
*usbdev
= CAST(USBDevice
, dev
, TYPE_USB_DEVICE
);
3219 if (usb_host_dev_is_scsi_storage(usbdev
)) {
3220 return g_strdup_printf("storage@%s/disk", usbdev
->port
->path
);
3225 /* Replace "pci" with "pci@800000020000000" */
3226 return g_strdup_printf("pci@%"PRIX64
, phb
->buid
);
3230 /* Same logic as virtio above */
3231 unsigned id
= 0x1000000 | (vsc
->target
<< 16) | vsc
->lun
;
3232 return g_strdup_printf("disk@%"PRIX64
, (uint64_t)id
<< 32);
3235 if (g_str_equal("pci-bridge", qdev_fw_name(dev
))) {
3236 /* SLOF uses "pci" instead of "pci-bridge" for PCI bridges */
3237 PCIDevice
*pcidev
= CAST(PCIDevice
, dev
, TYPE_PCI_DEVICE
);
3238 return g_strdup_printf("pci@%x", PCI_SLOT(pcidev
->devfn
));
3244 static char *spapr_get_kvm_type(Object
*obj
, Error
**errp
)
3246 SpaprMachineState
*spapr
= SPAPR_MACHINE(obj
);
3248 return g_strdup(spapr
->kvm_type
);
3251 static void spapr_set_kvm_type(Object
*obj
, const char *value
, Error
**errp
)
3253 SpaprMachineState
*spapr
= SPAPR_MACHINE(obj
);
3255 g_free(spapr
->kvm_type
);
3256 spapr
->kvm_type
= g_strdup(value
);
3259 static bool spapr_get_modern_hotplug_events(Object
*obj
, Error
**errp
)
3261 SpaprMachineState
*spapr
= SPAPR_MACHINE(obj
);
3263 return spapr
->use_hotplug_event_source
;
3266 static void spapr_set_modern_hotplug_events(Object
*obj
, bool value
,
3269 SpaprMachineState
*spapr
= SPAPR_MACHINE(obj
);
3271 spapr
->use_hotplug_event_source
= value
;
3274 static bool spapr_get_msix_emulation(Object
*obj
, Error
**errp
)
3279 static char *spapr_get_resize_hpt(Object
*obj
, Error
**errp
)
3281 SpaprMachineState
*spapr
= SPAPR_MACHINE(obj
);
3283 switch (spapr
->resize_hpt
) {
3284 case SPAPR_RESIZE_HPT_DEFAULT
:
3285 return g_strdup("default");
3286 case SPAPR_RESIZE_HPT_DISABLED
:
3287 return g_strdup("disabled");
3288 case SPAPR_RESIZE_HPT_ENABLED
:
3289 return g_strdup("enabled");
3290 case SPAPR_RESIZE_HPT_REQUIRED
:
3291 return g_strdup("required");
3293 g_assert_not_reached();
3296 static void spapr_set_resize_hpt(Object
*obj
, const char *value
, Error
**errp
)
3298 SpaprMachineState
*spapr
= SPAPR_MACHINE(obj
);
3300 if (strcmp(value
, "default") == 0) {
3301 spapr
->resize_hpt
= SPAPR_RESIZE_HPT_DEFAULT
;
3302 } else if (strcmp(value
, "disabled") == 0) {
3303 spapr
->resize_hpt
= SPAPR_RESIZE_HPT_DISABLED
;
3304 } else if (strcmp(value
, "enabled") == 0) {
3305 spapr
->resize_hpt
= SPAPR_RESIZE_HPT_ENABLED
;
3306 } else if (strcmp(value
, "required") == 0) {
3307 spapr
->resize_hpt
= SPAPR_RESIZE_HPT_REQUIRED
;
3309 error_setg(errp
, "Bad value for \"resize-hpt\" property");
3313 static void spapr_get_vsmt(Object
*obj
, Visitor
*v
, const char *name
,
3314 void *opaque
, Error
**errp
)
3316 visit_type_uint32(v
, name
, (uint32_t *)opaque
, errp
);
3319 static void spapr_set_vsmt(Object
*obj
, Visitor
*v
, const char *name
,
3320 void *opaque
, Error
**errp
)
3322 visit_type_uint32(v
, name
, (uint32_t *)opaque
, errp
);
3325 static char *spapr_get_ic_mode(Object
*obj
, Error
**errp
)
3327 SpaprMachineState
*spapr
= SPAPR_MACHINE(obj
);
3329 if (spapr
->irq
== &spapr_irq_xics_legacy
) {
3330 return g_strdup("legacy");
3331 } else if (spapr
->irq
== &spapr_irq_xics
) {
3332 return g_strdup("xics");
3333 } else if (spapr
->irq
== &spapr_irq_xive
) {
3334 return g_strdup("xive");
3335 } else if (spapr
->irq
== &spapr_irq_dual
) {
3336 return g_strdup("dual");
3338 g_assert_not_reached();
3341 static void spapr_set_ic_mode(Object
*obj
, const char *value
, Error
**errp
)
3343 SpaprMachineState
*spapr
= SPAPR_MACHINE(obj
);
3345 if (SPAPR_MACHINE_GET_CLASS(spapr
)->legacy_irq_allocation
) {
3346 error_setg(errp
, "This machine only uses the legacy XICS backend, don't pass ic-mode");
3350 /* The legacy IRQ backend can not be set */
3351 if (strcmp(value
, "xics") == 0) {
3352 spapr
->irq
= &spapr_irq_xics
;
3353 } else if (strcmp(value
, "xive") == 0) {
3354 spapr
->irq
= &spapr_irq_xive
;
3355 } else if (strcmp(value
, "dual") == 0) {
3356 spapr
->irq
= &spapr_irq_dual
;
3358 error_setg(errp
, "Bad value for \"ic-mode\" property");
3362 static char *spapr_get_host_model(Object
*obj
, Error
**errp
)
3364 SpaprMachineState
*spapr
= SPAPR_MACHINE(obj
);
3366 return g_strdup(spapr
->host_model
);
3369 static void spapr_set_host_model(Object
*obj
, const char *value
, Error
**errp
)
3371 SpaprMachineState
*spapr
= SPAPR_MACHINE(obj
);
3373 g_free(spapr
->host_model
);
3374 spapr
->host_model
= g_strdup(value
);
3377 static char *spapr_get_host_serial(Object
*obj
, Error
**errp
)
3379 SpaprMachineState
*spapr
= SPAPR_MACHINE(obj
);
3381 return g_strdup(spapr
->host_serial
);
3384 static void spapr_set_host_serial(Object
*obj
, const char *value
, Error
**errp
)
3386 SpaprMachineState
*spapr
= SPAPR_MACHINE(obj
);
3388 g_free(spapr
->host_serial
);
3389 spapr
->host_serial
= g_strdup(value
);
3392 static void spapr_instance_init(Object
*obj
)
3394 SpaprMachineState
*spapr
= SPAPR_MACHINE(obj
);
3395 SpaprMachineClass
*smc
= SPAPR_MACHINE_GET_CLASS(spapr
);
3397 spapr
->htab_fd
= -1;
3398 spapr
->use_hotplug_event_source
= true;
3399 object_property_add_str(obj
, "kvm-type",
3400 spapr_get_kvm_type
, spapr_set_kvm_type
, NULL
);
3401 object_property_set_description(obj
, "kvm-type",
3402 "Specifies the KVM virtualization mode (HV, PR)",
3404 object_property_add_bool(obj
, "modern-hotplug-events",
3405 spapr_get_modern_hotplug_events
,
3406 spapr_set_modern_hotplug_events
,
3408 object_property_set_description(obj
, "modern-hotplug-events",
3409 "Use dedicated hotplug event mechanism in"
3410 " place of standard EPOW events when possible"
3411 " (required for memory hot-unplug support)",
3413 ppc_compat_add_property(obj
, "max-cpu-compat", &spapr
->max_compat_pvr
,
3414 "Maximum permitted CPU compatibility mode",
3417 object_property_add_str(obj
, "resize-hpt",
3418 spapr_get_resize_hpt
, spapr_set_resize_hpt
, NULL
);
3419 object_property_set_description(obj
, "resize-hpt",
3420 "Resizing of the Hash Page Table (enabled, disabled, required)",
3422 object_property_add(obj
, "vsmt", "uint32", spapr_get_vsmt
,
3423 spapr_set_vsmt
, NULL
, &spapr
->vsmt
, &error_abort
);
3424 object_property_set_description(obj
, "vsmt",
3425 "Virtual SMT: KVM behaves as if this were"
3426 " the host's SMT mode", &error_abort
);
3427 object_property_add_bool(obj
, "vfio-no-msix-emulation",
3428 spapr_get_msix_emulation
, NULL
, NULL
);
3430 /* The machine class defines the default interrupt controller mode */
3431 spapr
->irq
= smc
->irq
;
3432 object_property_add_str(obj
, "ic-mode", spapr_get_ic_mode
,
3433 spapr_set_ic_mode
, NULL
);
3434 object_property_set_description(obj
, "ic-mode",
3435 "Specifies the interrupt controller mode (xics, xive, dual)",
3438 object_property_add_str(obj
, "host-model",
3439 spapr_get_host_model
, spapr_set_host_model
,
3441 object_property_set_description(obj
, "host-model",
3442 "Host model to advertise in guest device tree", &error_abort
);
3443 object_property_add_str(obj
, "host-serial",
3444 spapr_get_host_serial
, spapr_set_host_serial
,
3446 object_property_set_description(obj
, "host-serial",
3447 "Host serial number to advertise in guest device tree", &error_abort
);
3450 static void spapr_machine_finalizefn(Object
*obj
)
3452 SpaprMachineState
*spapr
= SPAPR_MACHINE(obj
);
3454 g_free(spapr
->kvm_type
);
3457 void spapr_do_system_reset_on_cpu(CPUState
*cs
, run_on_cpu_data arg
)
3459 cpu_synchronize_state(cs
);
3460 ppc_cpu_do_system_reset(cs
);
3463 static void spapr_nmi(NMIState
*n
, int cpu_index
, Error
**errp
)
3468 async_run_on_cpu(cs
, spapr_do_system_reset_on_cpu
, RUN_ON_CPU_NULL
);
3472 int spapr_lmb_dt_populate(SpaprDrc
*drc
, SpaprMachineState
*spapr
,
3473 void *fdt
, int *fdt_start_offset
, Error
**errp
)
3478 addr
= spapr_drc_index(drc
) * SPAPR_MEMORY_BLOCK_SIZE
;
3479 node
= object_property_get_uint(OBJECT(drc
->dev
), PC_DIMM_NODE_PROP
,
3481 *fdt_start_offset
= spapr_populate_memory_node(fdt
, node
, addr
,
3482 SPAPR_MEMORY_BLOCK_SIZE
);
3486 static void spapr_add_lmbs(DeviceState
*dev
, uint64_t addr_start
, uint64_t size
,
3487 bool dedicated_hp_event_source
, Error
**errp
)
3490 uint32_t nr_lmbs
= size
/SPAPR_MEMORY_BLOCK_SIZE
;
3492 uint64_t addr
= addr_start
;
3493 bool hotplugged
= spapr_drc_hotplugged(dev
);
3494 Error
*local_err
= NULL
;
3496 for (i
= 0; i
< nr_lmbs
; i
++) {
3497 drc
= spapr_drc_by_id(TYPE_SPAPR_DRC_LMB
,
3498 addr
/ SPAPR_MEMORY_BLOCK_SIZE
);
3501 spapr_drc_attach(drc
, dev
, &local_err
);
3503 while (addr
> addr_start
) {
3504 addr
-= SPAPR_MEMORY_BLOCK_SIZE
;
3505 drc
= spapr_drc_by_id(TYPE_SPAPR_DRC_LMB
,
3506 addr
/ SPAPR_MEMORY_BLOCK_SIZE
);
3507 spapr_drc_detach(drc
);
3509 error_propagate(errp
, local_err
);
3513 spapr_drc_reset(drc
);
3515 addr
+= SPAPR_MEMORY_BLOCK_SIZE
;
3517 /* send hotplug notification to the
3518 * guest only in case of hotplugged memory
3521 if (dedicated_hp_event_source
) {
3522 drc
= spapr_drc_by_id(TYPE_SPAPR_DRC_LMB
,
3523 addr_start
/ SPAPR_MEMORY_BLOCK_SIZE
);
3524 spapr_hotplug_req_add_by_count_indexed(SPAPR_DR_CONNECTOR_TYPE_LMB
,
3526 spapr_drc_index(drc
));
3528 spapr_hotplug_req_add_by_count(SPAPR_DR_CONNECTOR_TYPE_LMB
,
3534 static void spapr_memory_plug(HotplugHandler
*hotplug_dev
, DeviceState
*dev
,
3537 Error
*local_err
= NULL
;
3538 SpaprMachineState
*ms
= SPAPR_MACHINE(hotplug_dev
);
3539 PCDIMMDevice
*dimm
= PC_DIMM(dev
);
3540 uint64_t size
, addr
;
3542 size
= memory_device_get_region_size(MEMORY_DEVICE(dev
), &error_abort
);
3544 pc_dimm_plug(dimm
, MACHINE(ms
), &local_err
);
3549 addr
= object_property_get_uint(OBJECT(dimm
),
3550 PC_DIMM_ADDR_PROP
, &local_err
);
3555 spapr_add_lmbs(dev
, addr
, size
, spapr_ovec_test(ms
->ov5_cas
, OV5_HP_EVT
),
3564 pc_dimm_unplug(dimm
, MACHINE(ms
));
3566 error_propagate(errp
, local_err
);
3569 static void spapr_memory_pre_plug(HotplugHandler
*hotplug_dev
, DeviceState
*dev
,
3572 const SpaprMachineClass
*smc
= SPAPR_MACHINE_GET_CLASS(hotplug_dev
);
3573 SpaprMachineState
*spapr
= SPAPR_MACHINE(hotplug_dev
);
3574 PCDIMMDevice
*dimm
= PC_DIMM(dev
);
3575 Error
*local_err
= NULL
;
3580 if (!smc
->dr_lmb_enabled
) {
3581 error_setg(errp
, "Memory hotplug not supported for this machine");
3585 size
= memory_device_get_region_size(MEMORY_DEVICE(dimm
), &local_err
);
3587 error_propagate(errp
, local_err
);
3591 if (size
% SPAPR_MEMORY_BLOCK_SIZE
) {
3592 error_setg(errp
, "Hotplugged memory size must be a multiple of "
3593 "%" PRIu64
" MB", SPAPR_MEMORY_BLOCK_SIZE
/ MiB
);
3597 memdev
= object_property_get_link(OBJECT(dimm
), PC_DIMM_MEMDEV_PROP
,
3599 pagesize
= host_memory_backend_pagesize(MEMORY_BACKEND(memdev
));
3600 spapr_check_pagesize(spapr
, pagesize
, &local_err
);
3602 error_propagate(errp
, local_err
);
3606 pc_dimm_pre_plug(dimm
, MACHINE(hotplug_dev
), NULL
, errp
);
3609 struct SpaprDimmState
{
3612 QTAILQ_ENTRY(SpaprDimmState
) next
;
3615 static SpaprDimmState
*spapr_pending_dimm_unplugs_find(SpaprMachineState
*s
,
3618 SpaprDimmState
*dimm_state
= NULL
;
3620 QTAILQ_FOREACH(dimm_state
, &s
->pending_dimm_unplugs
, next
) {
3621 if (dimm_state
->dimm
== dimm
) {
3628 static SpaprDimmState
*spapr_pending_dimm_unplugs_add(SpaprMachineState
*spapr
,
3632 SpaprDimmState
*ds
= NULL
;
3635 * If this request is for a DIMM whose removal had failed earlier
3636 * (due to guest's refusal to remove the LMBs), we would have this
3637 * dimm already in the pending_dimm_unplugs list. In that
3638 * case don't add again.
3640 ds
= spapr_pending_dimm_unplugs_find(spapr
, dimm
);
3642 ds
= g_malloc0(sizeof(SpaprDimmState
));
3643 ds
->nr_lmbs
= nr_lmbs
;
3645 QTAILQ_INSERT_HEAD(&spapr
->pending_dimm_unplugs
, ds
, next
);
3650 static void spapr_pending_dimm_unplugs_remove(SpaprMachineState
*spapr
,
3651 SpaprDimmState
*dimm_state
)
3653 QTAILQ_REMOVE(&spapr
->pending_dimm_unplugs
, dimm_state
, next
);
3657 static SpaprDimmState
*spapr_recover_pending_dimm_state(SpaprMachineState
*ms
,
3661 uint64_t size
= memory_device_get_region_size(MEMORY_DEVICE(dimm
),
3663 uint32_t nr_lmbs
= size
/ SPAPR_MEMORY_BLOCK_SIZE
;
3664 uint32_t avail_lmbs
= 0;
3665 uint64_t addr_start
, addr
;
3668 addr_start
= object_property_get_int(OBJECT(dimm
), PC_DIMM_ADDR_PROP
,
3672 for (i
= 0; i
< nr_lmbs
; i
++) {
3673 drc
= spapr_drc_by_id(TYPE_SPAPR_DRC_LMB
,
3674 addr
/ SPAPR_MEMORY_BLOCK_SIZE
);
3679 addr
+= SPAPR_MEMORY_BLOCK_SIZE
;
3682 return spapr_pending_dimm_unplugs_add(ms
, avail_lmbs
, dimm
);
3685 /* Callback to be called during DRC release. */
3686 void spapr_lmb_release(DeviceState
*dev
)
3688 HotplugHandler
*hotplug_ctrl
= qdev_get_hotplug_handler(dev
);
3689 SpaprMachineState
*spapr
= SPAPR_MACHINE(hotplug_ctrl
);
3690 SpaprDimmState
*ds
= spapr_pending_dimm_unplugs_find(spapr
, PC_DIMM(dev
));
3692 /* This information will get lost if a migration occurs
3693 * during the unplug process. In this case recover it. */
3695 ds
= spapr_recover_pending_dimm_state(spapr
, PC_DIMM(dev
));
3697 /* The DRC being examined by the caller at least must be counted */
3698 g_assert(ds
->nr_lmbs
);
3701 if (--ds
->nr_lmbs
) {
3706 * Now that all the LMBs have been removed by the guest, call the
3707 * unplug handler chain. This can never fail.
3709 hotplug_handler_unplug(hotplug_ctrl
, dev
, &error_abort
);
3710 object_unparent(OBJECT(dev
));
3713 static void spapr_memory_unplug(HotplugHandler
*hotplug_dev
, DeviceState
*dev
)
3715 SpaprMachineState
*spapr
= SPAPR_MACHINE(hotplug_dev
);
3716 SpaprDimmState
*ds
= spapr_pending_dimm_unplugs_find(spapr
, PC_DIMM(dev
));
3718 pc_dimm_unplug(PC_DIMM(dev
), MACHINE(hotplug_dev
));
3719 object_property_set_bool(OBJECT(dev
), false, "realized", NULL
);
3720 spapr_pending_dimm_unplugs_remove(spapr
, ds
);
3723 static void spapr_memory_unplug_request(HotplugHandler
*hotplug_dev
,
3724 DeviceState
*dev
, Error
**errp
)
3726 SpaprMachineState
*spapr
= SPAPR_MACHINE(hotplug_dev
);
3727 Error
*local_err
= NULL
;
3728 PCDIMMDevice
*dimm
= PC_DIMM(dev
);
3730 uint64_t size
, addr_start
, addr
;
3734 size
= memory_device_get_region_size(MEMORY_DEVICE(dimm
), &error_abort
);
3735 nr_lmbs
= size
/ SPAPR_MEMORY_BLOCK_SIZE
;
3737 addr_start
= object_property_get_uint(OBJECT(dimm
), PC_DIMM_ADDR_PROP
,
3744 * An existing pending dimm state for this DIMM means that there is an
3745 * unplug operation in progress, waiting for the spapr_lmb_release
3746 * callback to complete the job (BQL can't cover that far). In this case,
3747 * bail out to avoid detaching DRCs that were already released.
3749 if (spapr_pending_dimm_unplugs_find(spapr
, dimm
)) {
3750 error_setg(&local_err
,
3751 "Memory unplug already in progress for device %s",
3756 spapr_pending_dimm_unplugs_add(spapr
, nr_lmbs
, dimm
);
3759 for (i
= 0; i
< nr_lmbs
; i
++) {
3760 drc
= spapr_drc_by_id(TYPE_SPAPR_DRC_LMB
,
3761 addr
/ SPAPR_MEMORY_BLOCK_SIZE
);
3764 spapr_drc_detach(drc
);
3765 addr
+= SPAPR_MEMORY_BLOCK_SIZE
;
3768 drc
= spapr_drc_by_id(TYPE_SPAPR_DRC_LMB
,
3769 addr_start
/ SPAPR_MEMORY_BLOCK_SIZE
);
3770 spapr_hotplug_req_remove_by_count_indexed(SPAPR_DR_CONNECTOR_TYPE_LMB
,
3771 nr_lmbs
, spapr_drc_index(drc
));
3773 error_propagate(errp
, local_err
);
3776 /* Callback to be called during DRC release. */
3777 void spapr_core_release(DeviceState
*dev
)
3779 HotplugHandler
*hotplug_ctrl
= qdev_get_hotplug_handler(dev
);
3781 /* Call the unplug handler chain. This can never fail. */
3782 hotplug_handler_unplug(hotplug_ctrl
, dev
, &error_abort
);
3783 object_unparent(OBJECT(dev
));
3786 static void spapr_core_unplug(HotplugHandler
*hotplug_dev
, DeviceState
*dev
)
3788 MachineState
*ms
= MACHINE(hotplug_dev
);
3789 SpaprMachineClass
*smc
= SPAPR_MACHINE_GET_CLASS(ms
);
3790 CPUCore
*cc
= CPU_CORE(dev
);
3791 CPUArchId
*core_slot
= spapr_find_cpu_slot(ms
, cc
->core_id
, NULL
);
3793 if (smc
->pre_2_10_has_unused_icps
) {
3794 SpaprCpuCore
*sc
= SPAPR_CPU_CORE(OBJECT(dev
));
3797 for (i
= 0; i
< cc
->nr_threads
; i
++) {
3798 CPUState
*cs
= CPU(sc
->threads
[i
]);
3800 pre_2_10_vmstate_register_dummy_icp(cs
->cpu_index
);
3805 core_slot
->cpu
= NULL
;
3806 object_property_set_bool(OBJECT(dev
), false, "realized", NULL
);
3810 void spapr_core_unplug_request(HotplugHandler
*hotplug_dev
, DeviceState
*dev
,
3813 SpaprMachineState
*spapr
= SPAPR_MACHINE(OBJECT(hotplug_dev
));
3816 CPUCore
*cc
= CPU_CORE(dev
);
3818 if (!spapr_find_cpu_slot(MACHINE(hotplug_dev
), cc
->core_id
, &index
)) {
3819 error_setg(errp
, "Unable to find CPU core with core-id: %d",
3824 error_setg(errp
, "Boot CPU core may not be unplugged");
3828 drc
= spapr_drc_by_id(TYPE_SPAPR_DRC_CPU
,
3829 spapr_vcpu_id(spapr
, cc
->core_id
));
3832 spapr_drc_detach(drc
);
3834 spapr_hotplug_req_remove_by_index(drc
);
3837 int spapr_core_dt_populate(SpaprDrc
*drc
, SpaprMachineState
*spapr
,
3838 void *fdt
, int *fdt_start_offset
, Error
**errp
)
3840 SpaprCpuCore
*core
= SPAPR_CPU_CORE(drc
->dev
);
3841 CPUState
*cs
= CPU(core
->threads
[0]);
3842 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
3843 DeviceClass
*dc
= DEVICE_GET_CLASS(cs
);
3844 int id
= spapr_get_vcpu_id(cpu
);
3848 nodename
= g_strdup_printf("%s@%x", dc
->fw_name
, id
);
3849 offset
= fdt_add_subnode(fdt
, 0, nodename
);
3852 spapr_populate_cpu_dt(cs
, fdt
, offset
, spapr
);
3854 *fdt_start_offset
= offset
;
3858 static void spapr_core_plug(HotplugHandler
*hotplug_dev
, DeviceState
*dev
,
3861 SpaprMachineState
*spapr
= SPAPR_MACHINE(OBJECT(hotplug_dev
));
3862 MachineClass
*mc
= MACHINE_GET_CLASS(spapr
);
3863 SpaprMachineClass
*smc
= SPAPR_MACHINE_CLASS(mc
);
3864 SpaprCpuCore
*core
= SPAPR_CPU_CORE(OBJECT(dev
));
3865 CPUCore
*cc
= CPU_CORE(dev
);
3868 Error
*local_err
= NULL
;
3869 CPUArchId
*core_slot
;
3871 bool hotplugged
= spapr_drc_hotplugged(dev
);
3874 core_slot
= spapr_find_cpu_slot(MACHINE(hotplug_dev
), cc
->core_id
, &index
);
3876 error_setg(errp
, "Unable to find CPU core with core-id: %d",
3880 drc
= spapr_drc_by_id(TYPE_SPAPR_DRC_CPU
,
3881 spapr_vcpu_id(spapr
, cc
->core_id
));
3883 g_assert(drc
|| !mc
->has_hotpluggable_cpus
);
3886 spapr_drc_attach(drc
, dev
, &local_err
);
3888 error_propagate(errp
, local_err
);
3894 * Send hotplug notification interrupt to the guest only
3895 * in case of hotplugged CPUs.
3897 spapr_hotplug_req_add_by_index(drc
);
3899 spapr_drc_reset(drc
);
3903 core_slot
->cpu
= OBJECT(dev
);
3905 if (smc
->pre_2_10_has_unused_icps
) {
3906 for (i
= 0; i
< cc
->nr_threads
; i
++) {
3907 cs
= CPU(core
->threads
[i
]);
3908 pre_2_10_vmstate_unregister_dummy_icp(cs
->cpu_index
);
3913 * Set compatibility mode to match the boot CPU, which was either set
3914 * by the machine reset code or by CAS.
3917 for (i
= 0; i
< cc
->nr_threads
; i
++) {
3918 ppc_set_compat(core
->threads
[i
], POWERPC_CPU(first_cpu
)->compat_pvr
,
3921 error_propagate(errp
, local_err
);
3928 static void spapr_core_pre_plug(HotplugHandler
*hotplug_dev
, DeviceState
*dev
,
3931 MachineState
*machine
= MACHINE(OBJECT(hotplug_dev
));
3932 MachineClass
*mc
= MACHINE_GET_CLASS(hotplug_dev
);
3933 Error
*local_err
= NULL
;
3934 CPUCore
*cc
= CPU_CORE(dev
);
3935 const char *base_core_type
= spapr_get_cpu_core_type(machine
->cpu_type
);
3936 const char *type
= object_get_typename(OBJECT(dev
));
3937 CPUArchId
*core_slot
;
3939 unsigned int smp_threads
= machine
->smp
.threads
;
3941 if (dev
->hotplugged
&& !mc
->has_hotpluggable_cpus
) {
3942 error_setg(&local_err
, "CPU hotplug not supported for this machine");
3946 if (strcmp(base_core_type
, type
)) {
3947 error_setg(&local_err
, "CPU core type should be %s", base_core_type
);
3951 if (cc
->core_id
% smp_threads
) {
3952 error_setg(&local_err
, "invalid core id %d", cc
->core_id
);
3957 * In general we should have homogeneous threads-per-core, but old
3958 * (pre hotplug support) machine types allow the last core to have
3959 * reduced threads as a compatibility hack for when we allowed
3960 * total vcpus not a multiple of threads-per-core.
3962 if (mc
->has_hotpluggable_cpus
&& (cc
->nr_threads
!= smp_threads
)) {
3963 error_setg(&local_err
, "invalid nr-threads %d, must be %d",
3964 cc
->nr_threads
, smp_threads
);
3968 core_slot
= spapr_find_cpu_slot(MACHINE(hotplug_dev
), cc
->core_id
, &index
);
3970 error_setg(&local_err
, "core id %d out of range", cc
->core_id
);
3974 if (core_slot
->cpu
) {
3975 error_setg(&local_err
, "core %d already populated", cc
->core_id
);
3979 numa_cpu_pre_plug(core_slot
, dev
, &local_err
);
3982 error_propagate(errp
, local_err
);
3985 int spapr_phb_dt_populate(SpaprDrc
*drc
, SpaprMachineState
*spapr
,
3986 void *fdt
, int *fdt_start_offset
, Error
**errp
)
3988 SpaprPhbState
*sphb
= SPAPR_PCI_HOST_BRIDGE(drc
->dev
);
3991 intc_phandle
= spapr_irq_get_phandle(spapr
, spapr
->fdt_blob
, errp
);
3992 if (intc_phandle
<= 0) {
3996 if (spapr_dt_phb(sphb
, intc_phandle
, fdt
, spapr
->irq
->nr_msis
,
3997 fdt_start_offset
)) {
3998 error_setg(errp
, "unable to create FDT node for PHB %d", sphb
->index
);
4002 /* generally SLOF creates these, for hotplug it's up to QEMU */
4003 _FDT(fdt_setprop_string(fdt
, *fdt_start_offset
, "name", "pci"));
4008 static void spapr_phb_pre_plug(HotplugHandler
*hotplug_dev
, DeviceState
*dev
,
4011 SpaprMachineState
*spapr
= SPAPR_MACHINE(OBJECT(hotplug_dev
));
4012 SpaprPhbState
*sphb
= SPAPR_PCI_HOST_BRIDGE(dev
);
4013 SpaprMachineClass
*smc
= SPAPR_MACHINE_GET_CLASS(spapr
);
4014 const unsigned windows_supported
= spapr_phb_windows_supported(sphb
);
4016 if (dev
->hotplugged
&& !smc
->dr_phb_enabled
) {
4017 error_setg(errp
, "PHB hotplug not supported for this machine");
4021 if (sphb
->index
== (uint32_t)-1) {
4022 error_setg(errp
, "\"index\" for PAPR PHB is mandatory");
4027 * This will check that sphb->index doesn't exceed the maximum number of
4028 * PHBs for the current machine type.
4030 smc
->phb_placement(spapr
, sphb
->index
,
4031 &sphb
->buid
, &sphb
->io_win_addr
,
4032 &sphb
->mem_win_addr
, &sphb
->mem64_win_addr
,
4033 windows_supported
, sphb
->dma_liobn
,
4034 &sphb
->nv2_gpa_win_addr
, &sphb
->nv2_atsd_win_addr
,
4038 static void spapr_phb_plug(HotplugHandler
*hotplug_dev
, DeviceState
*dev
,
4041 SpaprMachineState
*spapr
= SPAPR_MACHINE(OBJECT(hotplug_dev
));
4042 SpaprMachineClass
*smc
= SPAPR_MACHINE_GET_CLASS(spapr
);
4043 SpaprPhbState
*sphb
= SPAPR_PCI_HOST_BRIDGE(dev
);
4045 bool hotplugged
= spapr_drc_hotplugged(dev
);
4046 Error
*local_err
= NULL
;
4048 if (!smc
->dr_phb_enabled
) {
4052 drc
= spapr_drc_by_id(TYPE_SPAPR_DRC_PHB
, sphb
->index
);
4053 /* hotplug hooks should check it's enabled before getting this far */
4056 spapr_drc_attach(drc
, DEVICE(dev
), &local_err
);
4058 error_propagate(errp
, local_err
);
4063 spapr_hotplug_req_add_by_index(drc
);
4065 spapr_drc_reset(drc
);
4069 void spapr_phb_release(DeviceState
*dev
)
4071 HotplugHandler
*hotplug_ctrl
= qdev_get_hotplug_handler(dev
);
4073 hotplug_handler_unplug(hotplug_ctrl
, dev
, &error_abort
);
4074 object_unparent(OBJECT(dev
));
4077 static void spapr_phb_unplug(HotplugHandler
*hotplug_dev
, DeviceState
*dev
)
4079 object_property_set_bool(OBJECT(dev
), false, "realized", NULL
);
4082 static void spapr_phb_unplug_request(HotplugHandler
*hotplug_dev
,
4083 DeviceState
*dev
, Error
**errp
)
4085 SpaprPhbState
*sphb
= SPAPR_PCI_HOST_BRIDGE(dev
);
4088 drc
= spapr_drc_by_id(TYPE_SPAPR_DRC_PHB
, sphb
->index
);
4091 if (!spapr_drc_unplug_requested(drc
)) {
4092 spapr_drc_detach(drc
);
4093 spapr_hotplug_req_remove_by_index(drc
);
4097 static void spapr_tpm_proxy_plug(HotplugHandler
*hotplug_dev
, DeviceState
*dev
,
4100 SpaprMachineState
*spapr
= SPAPR_MACHINE(OBJECT(hotplug_dev
));
4101 SpaprTpmProxy
*tpm_proxy
= SPAPR_TPM_PROXY(dev
);
4103 if (spapr
->tpm_proxy
!= NULL
) {
4104 error_setg(errp
, "Only one TPM proxy can be specified for this machine");
4108 spapr
->tpm_proxy
= tpm_proxy
;
4111 static void spapr_tpm_proxy_unplug(HotplugHandler
*hotplug_dev
, DeviceState
*dev
)
4113 SpaprMachineState
*spapr
= SPAPR_MACHINE(OBJECT(hotplug_dev
));
4115 object_property_set_bool(OBJECT(dev
), false, "realized", NULL
);
4116 object_unparent(OBJECT(dev
));
4117 spapr
->tpm_proxy
= NULL
;
4120 static void spapr_machine_device_plug(HotplugHandler
*hotplug_dev
,
4121 DeviceState
*dev
, Error
**errp
)
4123 if (object_dynamic_cast(OBJECT(dev
), TYPE_PC_DIMM
)) {
4124 spapr_memory_plug(hotplug_dev
, dev
, errp
);
4125 } else if (object_dynamic_cast(OBJECT(dev
), TYPE_SPAPR_CPU_CORE
)) {
4126 spapr_core_plug(hotplug_dev
, dev
, errp
);
4127 } else if (object_dynamic_cast(OBJECT(dev
), TYPE_SPAPR_PCI_HOST_BRIDGE
)) {
4128 spapr_phb_plug(hotplug_dev
, dev
, errp
);
4129 } else if (object_dynamic_cast(OBJECT(dev
), TYPE_SPAPR_TPM_PROXY
)) {
4130 spapr_tpm_proxy_plug(hotplug_dev
, dev
, errp
);
4134 static void spapr_machine_device_unplug(HotplugHandler
*hotplug_dev
,
4135 DeviceState
*dev
, Error
**errp
)
4137 if (object_dynamic_cast(OBJECT(dev
), TYPE_PC_DIMM
)) {
4138 spapr_memory_unplug(hotplug_dev
, dev
);
4139 } else if (object_dynamic_cast(OBJECT(dev
), TYPE_SPAPR_CPU_CORE
)) {
4140 spapr_core_unplug(hotplug_dev
, dev
);
4141 } else if (object_dynamic_cast(OBJECT(dev
), TYPE_SPAPR_PCI_HOST_BRIDGE
)) {
4142 spapr_phb_unplug(hotplug_dev
, dev
);
4143 } else if (object_dynamic_cast(OBJECT(dev
), TYPE_SPAPR_TPM_PROXY
)) {
4144 spapr_tpm_proxy_unplug(hotplug_dev
, dev
);
4148 static void spapr_machine_device_unplug_request(HotplugHandler
*hotplug_dev
,
4149 DeviceState
*dev
, Error
**errp
)
4151 SpaprMachineState
*sms
= SPAPR_MACHINE(OBJECT(hotplug_dev
));
4152 MachineClass
*mc
= MACHINE_GET_CLASS(sms
);
4153 SpaprMachineClass
*smc
= SPAPR_MACHINE_CLASS(mc
);
4155 if (object_dynamic_cast(OBJECT(dev
), TYPE_PC_DIMM
)) {
4156 if (spapr_ovec_test(sms
->ov5_cas
, OV5_HP_EVT
)) {
4157 spapr_memory_unplug_request(hotplug_dev
, dev
, errp
);
4159 /* NOTE: this means there is a window after guest reset, prior to
4160 * CAS negotiation, where unplug requests will fail due to the
4161 * capability not being detected yet. This is a bit different than
4162 * the case with PCI unplug, where the events will be queued and
4163 * eventually handled by the guest after boot
4165 error_setg(errp
, "Memory hot unplug not supported for this guest");
4167 } else if (object_dynamic_cast(OBJECT(dev
), TYPE_SPAPR_CPU_CORE
)) {
4168 if (!mc
->has_hotpluggable_cpus
) {
4169 error_setg(errp
, "CPU hot unplug not supported on this machine");
4172 spapr_core_unplug_request(hotplug_dev
, dev
, errp
);
4173 } else if (object_dynamic_cast(OBJECT(dev
), TYPE_SPAPR_PCI_HOST_BRIDGE
)) {
4174 if (!smc
->dr_phb_enabled
) {
4175 error_setg(errp
, "PHB hot unplug not supported on this machine");
4178 spapr_phb_unplug_request(hotplug_dev
, dev
, errp
);
4179 } else if (object_dynamic_cast(OBJECT(dev
), TYPE_SPAPR_TPM_PROXY
)) {
4180 spapr_tpm_proxy_unplug(hotplug_dev
, dev
);
4184 static void spapr_machine_device_pre_plug(HotplugHandler
*hotplug_dev
,
4185 DeviceState
*dev
, Error
**errp
)
4187 if (object_dynamic_cast(OBJECT(dev
), TYPE_PC_DIMM
)) {
4188 spapr_memory_pre_plug(hotplug_dev
, dev
, errp
);
4189 } else if (object_dynamic_cast(OBJECT(dev
), TYPE_SPAPR_CPU_CORE
)) {
4190 spapr_core_pre_plug(hotplug_dev
, dev
, errp
);
4191 } else if (object_dynamic_cast(OBJECT(dev
), TYPE_SPAPR_PCI_HOST_BRIDGE
)) {
4192 spapr_phb_pre_plug(hotplug_dev
, dev
, errp
);
4196 static HotplugHandler
*spapr_get_hotplug_handler(MachineState
*machine
,
4199 if (object_dynamic_cast(OBJECT(dev
), TYPE_PC_DIMM
) ||
4200 object_dynamic_cast(OBJECT(dev
), TYPE_SPAPR_CPU_CORE
) ||
4201 object_dynamic_cast(OBJECT(dev
), TYPE_SPAPR_PCI_HOST_BRIDGE
) ||
4202 object_dynamic_cast(OBJECT(dev
), TYPE_SPAPR_TPM_PROXY
)) {
4203 return HOTPLUG_HANDLER(machine
);
4205 if (object_dynamic_cast(OBJECT(dev
), TYPE_PCI_DEVICE
)) {
4206 PCIDevice
*pcidev
= PCI_DEVICE(dev
);
4207 PCIBus
*root
= pci_device_root_bus(pcidev
);
4208 SpaprPhbState
*phb
=
4209 (SpaprPhbState
*)object_dynamic_cast(OBJECT(BUS(root
)->parent
),
4210 TYPE_SPAPR_PCI_HOST_BRIDGE
);
4213 return HOTPLUG_HANDLER(phb
);
4219 static CpuInstanceProperties
4220 spapr_cpu_index_to_props(MachineState
*machine
, unsigned cpu_index
)
4222 CPUArchId
*core_slot
;
4223 MachineClass
*mc
= MACHINE_GET_CLASS(machine
);
4225 /* make sure possible_cpu are intialized */
4226 mc
->possible_cpu_arch_ids(machine
);
4227 /* get CPU core slot containing thread that matches cpu_index */
4228 core_slot
= spapr_find_cpu_slot(machine
, cpu_index
, NULL
);
4230 return core_slot
->props
;
4233 static int64_t spapr_get_default_cpu_node_id(const MachineState
*ms
, int idx
)
4235 return idx
/ ms
->smp
.cores
% ms
->numa_state
->num_nodes
;
4238 static const CPUArchIdList
*spapr_possible_cpu_arch_ids(MachineState
*machine
)
4241 unsigned int smp_threads
= machine
->smp
.threads
;
4242 unsigned int smp_cpus
= machine
->smp
.cpus
;
4243 const char *core_type
;
4244 int spapr_max_cores
= machine
->smp
.max_cpus
/ smp_threads
;
4245 MachineClass
*mc
= MACHINE_GET_CLASS(machine
);
4247 if (!mc
->has_hotpluggable_cpus
) {
4248 spapr_max_cores
= QEMU_ALIGN_UP(smp_cpus
, smp_threads
) / smp_threads
;
4250 if (machine
->possible_cpus
) {
4251 assert(machine
->possible_cpus
->len
== spapr_max_cores
);
4252 return machine
->possible_cpus
;
4255 core_type
= spapr_get_cpu_core_type(machine
->cpu_type
);
4257 error_report("Unable to find sPAPR CPU Core definition");
4261 machine
->possible_cpus
= g_malloc0(sizeof(CPUArchIdList
) +
4262 sizeof(CPUArchId
) * spapr_max_cores
);
4263 machine
->possible_cpus
->len
= spapr_max_cores
;
4264 for (i
= 0; i
< machine
->possible_cpus
->len
; i
++) {
4265 int core_id
= i
* smp_threads
;
4267 machine
->possible_cpus
->cpus
[i
].type
= core_type
;
4268 machine
->possible_cpus
->cpus
[i
].vcpus_count
= smp_threads
;
4269 machine
->possible_cpus
->cpus
[i
].arch_id
= core_id
;
4270 machine
->possible_cpus
->cpus
[i
].props
.has_core_id
= true;
4271 machine
->possible_cpus
->cpus
[i
].props
.core_id
= core_id
;
4273 return machine
->possible_cpus
;
4276 static void spapr_phb_placement(SpaprMachineState
*spapr
, uint32_t index
,
4277 uint64_t *buid
, hwaddr
*pio
,
4278 hwaddr
*mmio32
, hwaddr
*mmio64
,
4279 unsigned n_dma
, uint32_t *liobns
,
4280 hwaddr
*nv2gpa
, hwaddr
*nv2atsd
, Error
**errp
)
4283 * New-style PHB window placement.
4285 * Goals: Gives large (1TiB), naturally aligned 64-bit MMIO window
4286 * for each PHB, in addition to 2GiB 32-bit MMIO and 64kiB PIO
4289 * Some guest kernels can't work with MMIO windows above 1<<46
4290 * (64TiB), so we place up to 31 PHBs in the area 32TiB..64TiB
4292 * 32TiB..(33TiB+1984kiB) contains the 64kiB PIO windows for each
4293 * PHB stacked together. (32TiB+2GiB)..(32TiB+64GiB) contains the
4294 * 2GiB 32-bit MMIO windows for each PHB. Then 33..64TiB has the
4295 * 1TiB 64-bit MMIO windows for each PHB.
4297 const uint64_t base_buid
= 0x800000020000000ULL
;
4300 /* Sanity check natural alignments */
4301 QEMU_BUILD_BUG_ON((SPAPR_PCI_BASE
% SPAPR_PCI_MEM64_WIN_SIZE
) != 0);
4302 QEMU_BUILD_BUG_ON((SPAPR_PCI_LIMIT
% SPAPR_PCI_MEM64_WIN_SIZE
) != 0);
4303 QEMU_BUILD_BUG_ON((SPAPR_PCI_MEM64_WIN_SIZE
% SPAPR_PCI_MEM32_WIN_SIZE
) != 0);
4304 QEMU_BUILD_BUG_ON((SPAPR_PCI_MEM32_WIN_SIZE
% SPAPR_PCI_IO_WIN_SIZE
) != 0);
4305 /* Sanity check bounds */
4306 QEMU_BUILD_BUG_ON((SPAPR_MAX_PHBS
* SPAPR_PCI_IO_WIN_SIZE
) >
4307 SPAPR_PCI_MEM32_WIN_SIZE
);
4308 QEMU_BUILD_BUG_ON((SPAPR_MAX_PHBS
* SPAPR_PCI_MEM32_WIN_SIZE
) >
4309 SPAPR_PCI_MEM64_WIN_SIZE
);
4311 if (index
>= SPAPR_MAX_PHBS
) {
4312 error_setg(errp
, "\"index\" for PAPR PHB is too large (max %llu)",
4313 SPAPR_MAX_PHBS
- 1);
4317 *buid
= base_buid
+ index
;
4318 for (i
= 0; i
< n_dma
; ++i
) {
4319 liobns
[i
] = SPAPR_PCI_LIOBN(index
, i
);
4322 *pio
= SPAPR_PCI_BASE
+ index
* SPAPR_PCI_IO_WIN_SIZE
;
4323 *mmio32
= SPAPR_PCI_BASE
+ (index
+ 1) * SPAPR_PCI_MEM32_WIN_SIZE
;
4324 *mmio64
= SPAPR_PCI_BASE
+ (index
+ 1) * SPAPR_PCI_MEM64_WIN_SIZE
;
4326 *nv2gpa
= SPAPR_PCI_NV2RAM64_WIN_BASE
+ index
* SPAPR_PCI_NV2RAM64_WIN_SIZE
;
4327 *nv2atsd
= SPAPR_PCI_NV2ATSD_WIN_BASE
+ index
* SPAPR_PCI_NV2ATSD_WIN_SIZE
;
4330 static ICSState
*spapr_ics_get(XICSFabric
*dev
, int irq
)
4332 SpaprMachineState
*spapr
= SPAPR_MACHINE(dev
);
4334 return ics_valid_irq(spapr
->ics
, irq
) ? spapr
->ics
: NULL
;
4337 static void spapr_ics_resend(XICSFabric
*dev
)
4339 SpaprMachineState
*spapr
= SPAPR_MACHINE(dev
);
4341 ics_resend(spapr
->ics
);
4344 static ICPState
*spapr_icp_get(XICSFabric
*xi
, int vcpu_id
)
4346 PowerPCCPU
*cpu
= spapr_find_cpu(vcpu_id
);
4348 return cpu
? spapr_cpu_state(cpu
)->icp
: NULL
;
4351 static void spapr_pic_print_info(InterruptStatsProvider
*obj
,
4354 SpaprMachineState
*spapr
= SPAPR_MACHINE(obj
);
4356 spapr
->irq
->print_info(spapr
, mon
);
4359 int spapr_get_vcpu_id(PowerPCCPU
*cpu
)
4361 return cpu
->vcpu_id
;
4364 void spapr_set_vcpu_id(PowerPCCPU
*cpu
, int cpu_index
, Error
**errp
)
4366 SpaprMachineState
*spapr
= SPAPR_MACHINE(qdev_get_machine());
4367 MachineState
*ms
= MACHINE(spapr
);
4370 vcpu_id
= spapr_vcpu_id(spapr
, cpu_index
);
4372 if (kvm_enabled() && !kvm_vcpu_id_is_valid(vcpu_id
)) {
4373 error_setg(errp
, "Can't create CPU with id %d in KVM", vcpu_id
);
4374 error_append_hint(errp
, "Adjust the number of cpus to %d "
4375 "or try to raise the number of threads per core\n",
4376 vcpu_id
* ms
->smp
.threads
/ spapr
->vsmt
);
4380 cpu
->vcpu_id
= vcpu_id
;
4383 PowerPCCPU
*spapr_find_cpu(int vcpu_id
)
4388 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
4390 if (spapr_get_vcpu_id(cpu
) == vcpu_id
) {
4398 static void spapr_cpu_exec_enter(PPCVirtualHypervisor
*vhyp
, PowerPCCPU
*cpu
)
4400 SpaprCpuState
*spapr_cpu
= spapr_cpu_state(cpu
);
4402 /* These are only called by TCG, KVM maintains dispatch state */
4404 spapr_cpu
->prod
= false;
4405 if (spapr_cpu
->vpa_addr
) {
4406 CPUState
*cs
= CPU(cpu
);
4409 dispatch
= ldl_be_phys(cs
->as
,
4410 spapr_cpu
->vpa_addr
+ VPA_DISPATCH_COUNTER
);
4412 if ((dispatch
& 1) != 0) {
4413 qemu_log_mask(LOG_GUEST_ERROR
,
4414 "VPA: incorrect dispatch counter value for "
4415 "dispatched partition %u, correcting.\n", dispatch
);
4419 spapr_cpu
->vpa_addr
+ VPA_DISPATCH_COUNTER
, dispatch
);
4423 static void spapr_cpu_exec_exit(PPCVirtualHypervisor
*vhyp
, PowerPCCPU
*cpu
)
4425 SpaprCpuState
*spapr_cpu
= spapr_cpu_state(cpu
);
4427 if (spapr_cpu
->vpa_addr
) {
4428 CPUState
*cs
= CPU(cpu
);
4431 dispatch
= ldl_be_phys(cs
->as
,
4432 spapr_cpu
->vpa_addr
+ VPA_DISPATCH_COUNTER
);
4434 if ((dispatch
& 1) != 1) {
4435 qemu_log_mask(LOG_GUEST_ERROR
,
4436 "VPA: incorrect dispatch counter value for "
4437 "preempted partition %u, correcting.\n", dispatch
);
4441 spapr_cpu
->vpa_addr
+ VPA_DISPATCH_COUNTER
, dispatch
);
4445 static void spapr_machine_class_init(ObjectClass
*oc
, void *data
)
4447 MachineClass
*mc
= MACHINE_CLASS(oc
);
4448 SpaprMachineClass
*smc
= SPAPR_MACHINE_CLASS(oc
);
4449 FWPathProviderClass
*fwc
= FW_PATH_PROVIDER_CLASS(oc
);
4450 NMIClass
*nc
= NMI_CLASS(oc
);
4451 HotplugHandlerClass
*hc
= HOTPLUG_HANDLER_CLASS(oc
);
4452 PPCVirtualHypervisorClass
*vhc
= PPC_VIRTUAL_HYPERVISOR_CLASS(oc
);
4453 XICSFabricClass
*xic
= XICS_FABRIC_CLASS(oc
);
4454 InterruptStatsProviderClass
*ispc
= INTERRUPT_STATS_PROVIDER_CLASS(oc
);
4456 mc
->desc
= "pSeries Logical Partition (PAPR compliant)";
4457 mc
->ignore_boot_device_suffixes
= true;
4460 * We set up the default / latest behaviour here. The class_init
4461 * functions for the specific versioned machine types can override
4462 * these details for backwards compatibility
4464 mc
->init
= spapr_machine_init
;
4465 mc
->reset
= spapr_machine_reset
;
4466 mc
->block_default_type
= IF_SCSI
;
4467 mc
->max_cpus
= 1024;
4468 mc
->no_parallel
= 1;
4469 mc
->default_boot_order
= "";
4470 mc
->default_ram_size
= 512 * MiB
;
4471 mc
->default_display
= "std";
4472 mc
->kvm_type
= spapr_kvm_type
;
4473 machine_class_allow_dynamic_sysbus_dev(mc
, TYPE_SPAPR_PCI_HOST_BRIDGE
);
4474 mc
->pci_allow_0_address
= true;
4475 assert(!mc
->get_hotplug_handler
);
4476 mc
->get_hotplug_handler
= spapr_get_hotplug_handler
;
4477 hc
->pre_plug
= spapr_machine_device_pre_plug
;
4478 hc
->plug
= spapr_machine_device_plug
;
4479 mc
->cpu_index_to_instance_props
= spapr_cpu_index_to_props
;
4480 mc
->get_default_cpu_node_id
= spapr_get_default_cpu_node_id
;
4481 mc
->possible_cpu_arch_ids
= spapr_possible_cpu_arch_ids
;
4482 hc
->unplug_request
= spapr_machine_device_unplug_request
;
4483 hc
->unplug
= spapr_machine_device_unplug
;
4485 smc
->dr_lmb_enabled
= true;
4486 smc
->update_dt_enabled
= true;
4487 mc
->default_cpu_type
= POWERPC_CPU_TYPE_NAME("power9_v2.0");
4488 mc
->has_hotpluggable_cpus
= true;
4489 smc
->resize_hpt_default
= SPAPR_RESIZE_HPT_ENABLED
;
4490 fwc
->get_dev_path
= spapr_get_fw_dev_path
;
4491 nc
->nmi_monitor_handler
= spapr_nmi
;
4492 smc
->phb_placement
= spapr_phb_placement
;
4493 vhc
->hypercall
= emulate_spapr_hypercall
;
4494 vhc
->hpt_mask
= spapr_hpt_mask
;
4495 vhc
->map_hptes
= spapr_map_hptes
;
4496 vhc
->unmap_hptes
= spapr_unmap_hptes
;
4497 vhc
->hpte_set_c
= spapr_hpte_set_c
;
4498 vhc
->hpte_set_r
= spapr_hpte_set_r
;
4499 vhc
->get_pate
= spapr_get_pate
;
4500 vhc
->encode_hpt_for_kvm_pr
= spapr_encode_hpt_for_kvm_pr
;
4501 vhc
->cpu_exec_enter
= spapr_cpu_exec_enter
;
4502 vhc
->cpu_exec_exit
= spapr_cpu_exec_exit
;
4503 xic
->ics_get
= spapr_ics_get
;
4504 xic
->ics_resend
= spapr_ics_resend
;
4505 xic
->icp_get
= spapr_icp_get
;
4506 ispc
->print_info
= spapr_pic_print_info
;
4507 /* Force NUMA node memory size to be a multiple of
4508 * SPAPR_MEMORY_BLOCK_SIZE (256M) since that's the granularity
4509 * in which LMBs are represented and hot-added
4511 mc
->numa_mem_align_shift
= 28;
4512 mc
->numa_mem_supported
= true;
4514 smc
->default_caps
.caps
[SPAPR_CAP_HTM
] = SPAPR_CAP_OFF
;
4515 smc
->default_caps
.caps
[SPAPR_CAP_VSX
] = SPAPR_CAP_ON
;
4516 smc
->default_caps
.caps
[SPAPR_CAP_DFP
] = SPAPR_CAP_ON
;
4517 smc
->default_caps
.caps
[SPAPR_CAP_CFPC
] = SPAPR_CAP_WORKAROUND
;
4518 smc
->default_caps
.caps
[SPAPR_CAP_SBBC
] = SPAPR_CAP_WORKAROUND
;
4519 smc
->default_caps
.caps
[SPAPR_CAP_IBS
] = SPAPR_CAP_WORKAROUND
;
4520 smc
->default_caps
.caps
[SPAPR_CAP_HPT_MAXPAGESIZE
] = 16; /* 64kiB */
4521 smc
->default_caps
.caps
[SPAPR_CAP_NESTED_KVM_HV
] = SPAPR_CAP_OFF
;
4522 smc
->default_caps
.caps
[SPAPR_CAP_LARGE_DECREMENTER
] = SPAPR_CAP_ON
;
4523 smc
->default_caps
.caps
[SPAPR_CAP_CCF_ASSIST
] = SPAPR_CAP_OFF
;
4524 spapr_caps_add_properties(smc
, &error_abort
);
4525 smc
->irq
= &spapr_irq_dual
;
4526 smc
->dr_phb_enabled
= true;
4527 smc
->linux_pci_probe
= true;
4530 static const TypeInfo spapr_machine_info
= {
4531 .name
= TYPE_SPAPR_MACHINE
,
4532 .parent
= TYPE_MACHINE
,
4534 .instance_size
= sizeof(SpaprMachineState
),
4535 .instance_init
= spapr_instance_init
,
4536 .instance_finalize
= spapr_machine_finalizefn
,
4537 .class_size
= sizeof(SpaprMachineClass
),
4538 .class_init
= spapr_machine_class_init
,
4539 .interfaces
= (InterfaceInfo
[]) {
4540 { TYPE_FW_PATH_PROVIDER
},
4542 { TYPE_HOTPLUG_HANDLER
},
4543 { TYPE_PPC_VIRTUAL_HYPERVISOR
},
4544 { TYPE_XICS_FABRIC
},
4545 { TYPE_INTERRUPT_STATS_PROVIDER
},
4550 #define DEFINE_SPAPR_MACHINE(suffix, verstr, latest) \
4551 static void spapr_machine_##suffix##_class_init(ObjectClass *oc, \
4554 MachineClass *mc = MACHINE_CLASS(oc); \
4555 spapr_machine_##suffix##_class_options(mc); \
4557 mc->alias = "pseries"; \
4558 mc->is_default = 1; \
4561 static const TypeInfo spapr_machine_##suffix##_info = { \
4562 .name = MACHINE_TYPE_NAME("pseries-" verstr), \
4563 .parent = TYPE_SPAPR_MACHINE, \
4564 .class_init = spapr_machine_##suffix##_class_init, \
4566 static void spapr_machine_register_##suffix(void) \
4568 type_register(&spapr_machine_##suffix##_info); \
4570 type_init(spapr_machine_register_##suffix)
4575 static void spapr_machine_4_2_class_options(MachineClass
*mc
)
4577 /* Defaults for the latest behaviour inherited from the base class */
4580 DEFINE_SPAPR_MACHINE(4_2
, "4.2", true);
4585 static void spapr_machine_4_1_class_options(MachineClass
*mc
)
4587 SpaprMachineClass
*smc
= SPAPR_MACHINE_CLASS(mc
);
4588 static GlobalProperty compat
[] = {
4589 /* Only allow 4kiB and 64kiB IOMMU pagesizes */
4590 { TYPE_SPAPR_PCI_HOST_BRIDGE
, "pgsz", "0x11000" },
4593 spapr_machine_4_2_class_options(mc
);
4594 smc
->linux_pci_probe
= false;
4595 compat_props_add(mc
->compat_props
, hw_compat_4_1
, hw_compat_4_1_len
);
4596 compat_props_add(mc
->compat_props
, compat
, G_N_ELEMENTS(compat
));
4599 DEFINE_SPAPR_MACHINE(4_1
, "4.1", false);
4604 static void phb_placement_4_0(SpaprMachineState
*spapr
, uint32_t index
,
4605 uint64_t *buid
, hwaddr
*pio
,
4606 hwaddr
*mmio32
, hwaddr
*mmio64
,
4607 unsigned n_dma
, uint32_t *liobns
,
4608 hwaddr
*nv2gpa
, hwaddr
*nv2atsd
, Error
**errp
)
4610 spapr_phb_placement(spapr
, index
, buid
, pio
, mmio32
, mmio64
, n_dma
, liobns
,
4611 nv2gpa
, nv2atsd
, errp
);
4616 static void spapr_machine_4_0_class_options(MachineClass
*mc
)
4618 SpaprMachineClass
*smc
= SPAPR_MACHINE_CLASS(mc
);
4620 spapr_machine_4_1_class_options(mc
);
4621 compat_props_add(mc
->compat_props
, hw_compat_4_0
, hw_compat_4_0_len
);
4622 smc
->phb_placement
= phb_placement_4_0
;
4623 smc
->irq
= &spapr_irq_xics
;
4624 smc
->pre_4_1_migration
= true;
4627 DEFINE_SPAPR_MACHINE(4_0
, "4.0", false);
4632 static void spapr_machine_3_1_class_options(MachineClass
*mc
)
4634 SpaprMachineClass
*smc
= SPAPR_MACHINE_CLASS(mc
);
4636 spapr_machine_4_0_class_options(mc
);
4637 compat_props_add(mc
->compat_props
, hw_compat_3_1
, hw_compat_3_1_len
);
4639 mc
->default_cpu_type
= POWERPC_CPU_TYPE_NAME("power8_v2.0");
4640 smc
->update_dt_enabled
= false;
4641 smc
->dr_phb_enabled
= false;
4642 smc
->broken_host_serial_model
= true;
4643 smc
->default_caps
.caps
[SPAPR_CAP_CFPC
] = SPAPR_CAP_BROKEN
;
4644 smc
->default_caps
.caps
[SPAPR_CAP_SBBC
] = SPAPR_CAP_BROKEN
;
4645 smc
->default_caps
.caps
[SPAPR_CAP_IBS
] = SPAPR_CAP_BROKEN
;
4646 smc
->default_caps
.caps
[SPAPR_CAP_LARGE_DECREMENTER
] = SPAPR_CAP_OFF
;
4649 DEFINE_SPAPR_MACHINE(3_1
, "3.1", false);
4655 static void spapr_machine_3_0_class_options(MachineClass
*mc
)
4657 SpaprMachineClass
*smc
= SPAPR_MACHINE_CLASS(mc
);
4659 spapr_machine_3_1_class_options(mc
);
4660 compat_props_add(mc
->compat_props
, hw_compat_3_0
, hw_compat_3_0_len
);
4662 smc
->legacy_irq_allocation
= true;
4663 smc
->irq
= &spapr_irq_xics_legacy
;
4666 DEFINE_SPAPR_MACHINE(3_0
, "3.0", false);
4671 static void spapr_machine_2_12_class_options(MachineClass
*mc
)
4673 SpaprMachineClass
*smc
= SPAPR_MACHINE_CLASS(mc
);
4674 static GlobalProperty compat
[] = {
4675 { TYPE_POWERPC_CPU
, "pre-3.0-migration", "on" },
4676 { TYPE_SPAPR_CPU_CORE
, "pre-3.0-migration", "on" },
4679 spapr_machine_3_0_class_options(mc
);
4680 compat_props_add(mc
->compat_props
, hw_compat_2_12
, hw_compat_2_12_len
);
4681 compat_props_add(mc
->compat_props
, compat
, G_N_ELEMENTS(compat
));
4683 /* We depend on kvm_enabled() to choose a default value for the
4684 * hpt-max-page-size capability. Of course we can't do it here
4685 * because this is too early and the HW accelerator isn't initialzed
4686 * yet. Postpone this to machine init (see default_caps_with_cpu()).
4688 smc
->default_caps
.caps
[SPAPR_CAP_HPT_MAXPAGESIZE
] = 0;
4691 DEFINE_SPAPR_MACHINE(2_12
, "2.12", false);
4693 static void spapr_machine_2_12_sxxm_class_options(MachineClass
*mc
)
4695 SpaprMachineClass
*smc
= SPAPR_MACHINE_CLASS(mc
);
4697 spapr_machine_2_12_class_options(mc
);
4698 smc
->default_caps
.caps
[SPAPR_CAP_CFPC
] = SPAPR_CAP_WORKAROUND
;
4699 smc
->default_caps
.caps
[SPAPR_CAP_SBBC
] = SPAPR_CAP_WORKAROUND
;
4700 smc
->default_caps
.caps
[SPAPR_CAP_IBS
] = SPAPR_CAP_FIXED_CCD
;
4703 DEFINE_SPAPR_MACHINE(2_12_sxxm
, "2.12-sxxm", false);
4709 static void spapr_machine_2_11_class_options(MachineClass
*mc
)
4711 SpaprMachineClass
*smc
= SPAPR_MACHINE_CLASS(mc
);
4713 spapr_machine_2_12_class_options(mc
);
4714 smc
->default_caps
.caps
[SPAPR_CAP_HTM
] = SPAPR_CAP_ON
;
4715 compat_props_add(mc
->compat_props
, hw_compat_2_11
, hw_compat_2_11_len
);
4718 DEFINE_SPAPR_MACHINE(2_11
, "2.11", false);
4724 static void spapr_machine_2_10_class_options(MachineClass
*mc
)
4726 spapr_machine_2_11_class_options(mc
);
4727 compat_props_add(mc
->compat_props
, hw_compat_2_10
, hw_compat_2_10_len
);
4730 DEFINE_SPAPR_MACHINE(2_10
, "2.10", false);
4736 static void spapr_machine_2_9_class_options(MachineClass
*mc
)
4738 SpaprMachineClass
*smc
= SPAPR_MACHINE_CLASS(mc
);
4739 static GlobalProperty compat
[] = {
4740 { TYPE_POWERPC_CPU
, "pre-2.10-migration", "on" },
4743 spapr_machine_2_10_class_options(mc
);
4744 compat_props_add(mc
->compat_props
, hw_compat_2_9
, hw_compat_2_9_len
);
4745 compat_props_add(mc
->compat_props
, compat
, G_N_ELEMENTS(compat
));
4746 mc
->numa_auto_assign_ram
= numa_legacy_auto_assign_ram
;
4747 smc
->pre_2_10_has_unused_icps
= true;
4748 smc
->resize_hpt_default
= SPAPR_RESIZE_HPT_DISABLED
;
4751 DEFINE_SPAPR_MACHINE(2_9
, "2.9", false);
4757 static void spapr_machine_2_8_class_options(MachineClass
*mc
)
4759 static GlobalProperty compat
[] = {
4760 { TYPE_SPAPR_PCI_HOST_BRIDGE
, "pcie-extended-configuration-space", "off" },
4763 spapr_machine_2_9_class_options(mc
);
4764 compat_props_add(mc
->compat_props
, hw_compat_2_8
, hw_compat_2_8_len
);
4765 compat_props_add(mc
->compat_props
, compat
, G_N_ELEMENTS(compat
));
4766 mc
->numa_mem_align_shift
= 23;
4769 DEFINE_SPAPR_MACHINE(2_8
, "2.8", false);
4775 static void phb_placement_2_7(SpaprMachineState
*spapr
, uint32_t index
,
4776 uint64_t *buid
, hwaddr
*pio
,
4777 hwaddr
*mmio32
, hwaddr
*mmio64
,
4778 unsigned n_dma
, uint32_t *liobns
,
4779 hwaddr
*nv2gpa
, hwaddr
*nv2atsd
, Error
**errp
)
4781 /* Legacy PHB placement for pseries-2.7 and earlier machine types */
4782 const uint64_t base_buid
= 0x800000020000000ULL
;
4783 const hwaddr phb_spacing
= 0x1000000000ULL
; /* 64 GiB */
4784 const hwaddr mmio_offset
= 0xa0000000; /* 2 GiB + 512 MiB */
4785 const hwaddr pio_offset
= 0x80000000; /* 2 GiB */
4786 const uint32_t max_index
= 255;
4787 const hwaddr phb0_alignment
= 0x10000000000ULL
; /* 1 TiB */
4789 uint64_t ram_top
= MACHINE(spapr
)->ram_size
;
4790 hwaddr phb0_base
, phb_base
;
4793 /* Do we have device memory? */
4794 if (MACHINE(spapr
)->maxram_size
> ram_top
) {
4795 /* Can't just use maxram_size, because there may be an
4796 * alignment gap between normal and device memory regions
4798 ram_top
= MACHINE(spapr
)->device_memory
->base
+
4799 memory_region_size(&MACHINE(spapr
)->device_memory
->mr
);
4802 phb0_base
= QEMU_ALIGN_UP(ram_top
, phb0_alignment
);
4804 if (index
> max_index
) {
4805 error_setg(errp
, "\"index\" for PAPR PHB is too large (max %u)",
4810 *buid
= base_buid
+ index
;
4811 for (i
= 0; i
< n_dma
; ++i
) {
4812 liobns
[i
] = SPAPR_PCI_LIOBN(index
, i
);
4815 phb_base
= phb0_base
+ index
* phb_spacing
;
4816 *pio
= phb_base
+ pio_offset
;
4817 *mmio32
= phb_base
+ mmio_offset
;
4819 * We don't set the 64-bit MMIO window, relying on the PHB's
4820 * fallback behaviour of automatically splitting a large "32-bit"
4821 * window into contiguous 32-bit and 64-bit windows
4828 static void spapr_machine_2_7_class_options(MachineClass
*mc
)
4830 SpaprMachineClass
*smc
= SPAPR_MACHINE_CLASS(mc
);
4831 static GlobalProperty compat
[] = {
4832 { TYPE_SPAPR_PCI_HOST_BRIDGE
, "mem_win_size", "0xf80000000", },
4833 { TYPE_SPAPR_PCI_HOST_BRIDGE
, "mem64_win_size", "0", },
4834 { TYPE_POWERPC_CPU
, "pre-2.8-migration", "on", },
4835 { TYPE_SPAPR_PCI_HOST_BRIDGE
, "pre-2.8-migration", "on", },
4838 spapr_machine_2_8_class_options(mc
);
4839 mc
->default_cpu_type
= POWERPC_CPU_TYPE_NAME("power7_v2.3");
4840 mc
->default_machine_opts
= "modern-hotplug-events=off";
4841 compat_props_add(mc
->compat_props
, hw_compat_2_7
, hw_compat_2_7_len
);
4842 compat_props_add(mc
->compat_props
, compat
, G_N_ELEMENTS(compat
));
4843 smc
->phb_placement
= phb_placement_2_7
;
4846 DEFINE_SPAPR_MACHINE(2_7
, "2.7", false);
4852 static void spapr_machine_2_6_class_options(MachineClass
*mc
)
4854 static GlobalProperty compat
[] = {
4855 { TYPE_SPAPR_PCI_HOST_BRIDGE
, "ddw", "off" },
4858 spapr_machine_2_7_class_options(mc
);
4859 mc
->has_hotpluggable_cpus
= false;
4860 compat_props_add(mc
->compat_props
, hw_compat_2_6
, hw_compat_2_6_len
);
4861 compat_props_add(mc
->compat_props
, compat
, G_N_ELEMENTS(compat
));
4864 DEFINE_SPAPR_MACHINE(2_6
, "2.6", false);
4870 static void spapr_machine_2_5_class_options(MachineClass
*mc
)
4872 SpaprMachineClass
*smc
= SPAPR_MACHINE_CLASS(mc
);
4873 static GlobalProperty compat
[] = {
4874 { "spapr-vlan", "use-rx-buffer-pools", "off" },
4877 spapr_machine_2_6_class_options(mc
);
4878 smc
->use_ohci_by_default
= true;
4879 compat_props_add(mc
->compat_props
, hw_compat_2_5
, hw_compat_2_5_len
);
4880 compat_props_add(mc
->compat_props
, compat
, G_N_ELEMENTS(compat
));
4883 DEFINE_SPAPR_MACHINE(2_5
, "2.5", false);
4889 static void spapr_machine_2_4_class_options(MachineClass
*mc
)
4891 SpaprMachineClass
*smc
= SPAPR_MACHINE_CLASS(mc
);
4893 spapr_machine_2_5_class_options(mc
);
4894 smc
->dr_lmb_enabled
= false;
4895 compat_props_add(mc
->compat_props
, hw_compat_2_4
, hw_compat_2_4_len
);
4898 DEFINE_SPAPR_MACHINE(2_4
, "2.4", false);
4904 static void spapr_machine_2_3_class_options(MachineClass
*mc
)
4906 static GlobalProperty compat
[] = {
4907 { "spapr-pci-host-bridge", "dynamic-reconfiguration", "off" },
4909 spapr_machine_2_4_class_options(mc
);
4910 compat_props_add(mc
->compat_props
, hw_compat_2_3
, hw_compat_2_3_len
);
4911 compat_props_add(mc
->compat_props
, compat
, G_N_ELEMENTS(compat
));
4913 DEFINE_SPAPR_MACHINE(2_3
, "2.3", false);
4919 static void spapr_machine_2_2_class_options(MachineClass
*mc
)
4921 static GlobalProperty compat
[] = {
4922 { TYPE_SPAPR_PCI_HOST_BRIDGE
, "mem_win_size", "0x20000000" },
4925 spapr_machine_2_3_class_options(mc
);
4926 compat_props_add(mc
->compat_props
, hw_compat_2_2
, hw_compat_2_2_len
);
4927 compat_props_add(mc
->compat_props
, compat
, G_N_ELEMENTS(compat
));
4928 mc
->default_machine_opts
= "modern-hotplug-events=off,suppress-vmdesc=on";
4930 DEFINE_SPAPR_MACHINE(2_2
, "2.2", false);
4936 static void spapr_machine_2_1_class_options(MachineClass
*mc
)
4938 spapr_machine_2_2_class_options(mc
);
4939 compat_props_add(mc
->compat_props
, hw_compat_2_1
, hw_compat_2_1_len
);
4941 DEFINE_SPAPR_MACHINE(2_1
, "2.1", false);
4943 static void spapr_machine_register_types(void)
4945 type_register_static(&spapr_machine_info
);
4948 type_init(spapr_machine_register_types
)