2 * QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator
4 * Copyright (c) 2004-2007 Fabrice Bellard
5 * Copyright (c) 2007 Jocelyn Mayer
6 * Copyright (c) 2010 David Gibson, IBM Corporation.
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
27 #include "qemu/osdep.h"
28 #include "qapi/error.h"
29 #include "sysemu/sysemu.h"
30 #include "sysemu/numa.h"
33 #include "hw/fw-path-provider.h"
36 #include "sysemu/device_tree.h"
37 #include "sysemu/block-backend.h"
38 #include "sysemu/cpus.h"
39 #include "sysemu/hw_accel.h"
41 #include "migration/misc.h"
42 #include "migration/global_state.h"
43 #include "migration/register.h"
44 #include "mmu-hash64.h"
45 #include "mmu-book3s-v3.h"
48 #include "hw/boards.h"
49 #include "hw/ppc/ppc.h"
50 #include "hw/loader.h"
52 #include "hw/ppc/fdt.h"
53 #include "hw/ppc/spapr.h"
54 #include "hw/ppc/spapr_vio.h"
55 #include "hw/pci-host/spapr.h"
56 #include "hw/ppc/xics.h"
57 #include "hw/pci/msi.h"
59 #include "hw/pci/pci.h"
60 #include "hw/scsi/scsi.h"
61 #include "hw/virtio/virtio-scsi.h"
62 #include "hw/virtio/vhost-scsi-common.h"
64 #include "exec/address-spaces.h"
66 #include "qemu/config-file.h"
67 #include "qemu/error-report.h"
70 #include "hw/intc/intc.h"
72 #include "hw/compat.h"
73 #include "qemu/cutils.h"
74 #include "hw/ppc/spapr_cpu_core.h"
75 #include "qmp-commands.h"
79 /* SLOF memory layout:
81 * SLOF raw image loaded at 0, copies its romfs right below the flat
82 * device-tree, then position SLOF itself 31M below that
84 * So we set FW_OVERHEAD to 40MB which should account for all of that
87 * We load our kernel at 4M, leaving space for SLOF initial image
89 #define FDT_MAX_SIZE 0x100000
90 #define RTAS_MAX_SIZE 0x10000
91 #define RTAS_MAX_ADDR 0x80000000 /* RTAS must stay below that */
92 #define FW_MAX_SIZE 0x400000
93 #define FW_FILE_NAME "slof.bin"
94 #define FW_OVERHEAD 0x2800000
95 #define KERNEL_LOAD_ADDR FW_MAX_SIZE
97 #define MIN_RMA_SLOF 128UL
99 #define PHANDLE_XICP 0x00001111
101 static ICSState
*spapr_ics_create(sPAPRMachineState
*spapr
,
102 const char *type_ics
,
103 int nr_irqs
, Error
**errp
)
105 Error
*local_err
= NULL
;
108 obj
= object_new(type_ics
);
109 object_property_add_child(OBJECT(spapr
), "ics", obj
, &error_abort
);
110 object_property_add_const_link(obj
, ICS_PROP_XICS
, OBJECT(spapr
),
112 object_property_set_int(obj
, nr_irqs
, "nr-irqs", &local_err
);
116 object_property_set_bool(obj
, true, "realized", &local_err
);
121 return ICS_SIMPLE(obj
);
124 error_propagate(errp
, local_err
);
128 static bool pre_2_10_vmstate_dummy_icp_needed(void *opaque
)
130 /* Dummy entries correspond to unused ICPState objects in older QEMUs,
131 * and newer QEMUs don't even have them. In both cases, we don't want
132 * to send anything on the wire.
137 static const VMStateDescription pre_2_10_vmstate_dummy_icp
= {
138 .name
= "icp/server",
140 .minimum_version_id
= 1,
141 .needed
= pre_2_10_vmstate_dummy_icp_needed
,
142 .fields
= (VMStateField
[]) {
143 VMSTATE_UNUSED(4), /* uint32_t xirr */
144 VMSTATE_UNUSED(1), /* uint8_t pending_priority */
145 VMSTATE_UNUSED(1), /* uint8_t mfrr */
146 VMSTATE_END_OF_LIST()
150 static void pre_2_10_vmstate_register_dummy_icp(int i
)
152 vmstate_register(NULL
, i
, &pre_2_10_vmstate_dummy_icp
,
153 (void *)(uintptr_t) i
);
156 static void pre_2_10_vmstate_unregister_dummy_icp(int i
)
158 vmstate_unregister(NULL
, &pre_2_10_vmstate_dummy_icp
,
159 (void *)(uintptr_t) i
);
162 static inline int xics_max_server_number(void)
164 return DIV_ROUND_UP(max_cpus
* kvmppc_smt_threads(), smp_threads
);
167 static void xics_system_init(MachineState
*machine
, int nr_irqs
, Error
**errp
)
169 sPAPRMachineState
*spapr
= SPAPR_MACHINE(machine
);
170 sPAPRMachineClass
*smc
= SPAPR_MACHINE_GET_CLASS(machine
);
173 if (machine_kernel_irqchip_allowed(machine
) &&
174 !xics_kvm_init(spapr
, errp
)) {
175 spapr
->icp_type
= TYPE_KVM_ICP
;
176 spapr
->ics
= spapr_ics_create(spapr
, TYPE_ICS_KVM
, nr_irqs
, errp
);
178 if (machine_kernel_irqchip_required(machine
) && !spapr
->ics
) {
179 error_prepend(errp
, "kernel_irqchip requested but unavailable: ");
185 xics_spapr_init(spapr
);
186 spapr
->icp_type
= TYPE_ICP
;
187 spapr
->ics
= spapr_ics_create(spapr
, TYPE_ICS_SIMPLE
, nr_irqs
, errp
);
193 if (smc
->pre_2_10_has_unused_icps
) {
196 for (i
= 0; i
< xics_max_server_number(); i
++) {
197 /* Dummy entries get deregistered when real ICPState objects
198 * are registered during CPU core hotplug.
200 pre_2_10_vmstate_register_dummy_icp(i
);
205 static int spapr_fixup_cpu_smt_dt(void *fdt
, int offset
, PowerPCCPU
*cpu
,
209 uint32_t servers_prop
[smt_threads
];
210 uint32_t gservers_prop
[smt_threads
* 2];
211 int index
= ppc_get_vcpu_dt_id(cpu
);
213 if (cpu
->compat_pvr
) {
214 ret
= fdt_setprop_cell(fdt
, offset
, "cpu-version", cpu
->compat_pvr
);
220 /* Build interrupt servers and gservers properties */
221 for (i
= 0; i
< smt_threads
; i
++) {
222 servers_prop
[i
] = cpu_to_be32(index
+ i
);
223 /* Hack, direct the group queues back to cpu 0 */
224 gservers_prop
[i
*2] = cpu_to_be32(index
+ i
);
225 gservers_prop
[i
*2 + 1] = 0;
227 ret
= fdt_setprop(fdt
, offset
, "ibm,ppc-interrupt-server#s",
228 servers_prop
, sizeof(servers_prop
));
232 ret
= fdt_setprop(fdt
, offset
, "ibm,ppc-interrupt-gserver#s",
233 gservers_prop
, sizeof(gservers_prop
));
238 static int spapr_fixup_cpu_numa_dt(void *fdt
, int offset
, PowerPCCPU
*cpu
)
240 int index
= ppc_get_vcpu_dt_id(cpu
);
241 uint32_t associativity
[] = {cpu_to_be32(0x5),
245 cpu_to_be32(cpu
->node_id
),
248 /* Advertise NUMA via ibm,associativity */
249 return fdt_setprop(fdt
, offset
, "ibm,associativity", associativity
,
250 sizeof(associativity
));
253 /* Populate the "ibm,pa-features" property */
254 static void spapr_populate_pa_features(CPUPPCState
*env
, void *fdt
, int offset
,
257 uint8_t pa_features_206
[] = { 6, 0,
258 0xf6, 0x1f, 0xc7, 0x00, 0x80, 0xc0 };
259 uint8_t pa_features_207
[] = { 24, 0,
260 0xf6, 0x1f, 0xc7, 0xc0, 0x80, 0xf0,
261 0x80, 0x00, 0x00, 0x00, 0x00, 0x00,
262 0x00, 0x00, 0x00, 0x00, 0x80, 0x00,
263 0x80, 0x00, 0x80, 0x00, 0x00, 0x00 };
264 uint8_t pa_features_300
[] = { 66, 0,
265 /* 0: MMU|FPU|SLB|RUN|DABR|NX, 1: fri[nzpm]|DABRX|SPRG3|SLB0|PP110 */
266 /* 2: VPM|DS205|PPR|DS202|DS206, 3: LSD|URG, SSO, 5: LE|CFAR|EB|LSQ */
267 0xf6, 0x1f, 0xc7, 0xc0, 0x80, 0xf0, /* 0 - 5 */
269 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, /* 6 - 11 */
271 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, /* 12 - 17 */
272 /* 18: Vec. Scalar, 20: Vec. XOR, 22: HTM */
273 0x80, 0x00, 0x80, 0x00, 0x00, 0x00, /* 18 - 23 */
274 /* 24: Ext. Dec, 26: 64 bit ftrs, 28: PM ftrs */
275 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 24 - 29 */
276 /* 30: MMR, 32: LE atomic, 34: EBB + ext EBB */
277 0x80, 0x00, 0x80, 0x00, 0xC0, 0x00, /* 30 - 35 */
278 /* 36: SPR SO, 38: Copy/Paste, 40: Radix MMU */
279 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 36 - 41 */
280 /* 42: PM, 44: PC RA, 46: SC vec'd */
281 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 42 - 47 */
282 /* 48: SIMD, 50: QP BFP, 52: String */
283 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 48 - 53 */
284 /* 54: DecFP, 56: DecI, 58: SHA */
285 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 54 - 59 */
286 /* 60: NM atomic, 62: RNG */
287 0x80, 0x00, 0x80, 0x00, 0x00, 0x00, /* 60 - 65 */
289 uint8_t *pa_features
;
292 switch (POWERPC_MMU_VER(env
->mmu_model
)) {
293 case POWERPC_MMU_VER_2_06
:
294 pa_features
= pa_features_206
;
295 pa_size
= sizeof(pa_features_206
);
297 case POWERPC_MMU_VER_2_07
:
298 pa_features
= pa_features_207
;
299 pa_size
= sizeof(pa_features_207
);
301 case POWERPC_MMU_VER_3_00
:
302 pa_features
= pa_features_300
;
303 pa_size
= sizeof(pa_features_300
);
309 if (env
->ci_large_pages
) {
311 * Note: we keep CI large pages off by default because a 64K capable
312 * guest provisioned with large pages might otherwise try to map a qemu
313 * framebuffer (or other kind of memory mapped PCI BAR) using 64K pages
314 * even if that qemu runs on a 4k host.
315 * We dd this bit back here if we are confident this is not an issue
317 pa_features
[3] |= 0x20;
319 if (kvmppc_has_cap_htm() && pa_size
> 24) {
320 pa_features
[24] |= 0x80; /* Transactional memory support */
322 if (legacy_guest
&& pa_size
> 40) {
323 /* Workaround for broken kernels that attempt (guest) radix
324 * mode when they can't handle it, if they see the radix bit set
325 * in pa-features. So hide it from them. */
326 pa_features
[40 + 2] &= ~0x80; /* Radix MMU */
329 _FDT((fdt_setprop(fdt
, offset
, "ibm,pa-features", pa_features
, pa_size
)));
332 static int spapr_fixup_cpu_dt(void *fdt
, sPAPRMachineState
*spapr
)
334 int ret
= 0, offset
, cpus_offset
;
337 int smt
= kvmppc_smt_threads();
338 uint32_t pft_size_prop
[] = {0, cpu_to_be32(spapr
->htab_shift
)};
341 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
342 CPUPPCState
*env
= &cpu
->env
;
343 DeviceClass
*dc
= DEVICE_GET_CLASS(cs
);
344 int index
= ppc_get_vcpu_dt_id(cpu
);
345 int compat_smt
= MIN(smp_threads
, ppc_compat_max_threads(cpu
));
347 if ((index
% smt
) != 0) {
351 snprintf(cpu_model
, 32, "%s@%x", dc
->fw_name
, index
);
353 cpus_offset
= fdt_path_offset(fdt
, "/cpus");
354 if (cpus_offset
< 0) {
355 cpus_offset
= fdt_add_subnode(fdt
, fdt_path_offset(fdt
, "/"),
357 if (cpus_offset
< 0) {
361 offset
= fdt_subnode_offset(fdt
, cpus_offset
, cpu_model
);
363 offset
= fdt_add_subnode(fdt
, cpus_offset
, cpu_model
);
369 ret
= fdt_setprop(fdt
, offset
, "ibm,pft-size",
370 pft_size_prop
, sizeof(pft_size_prop
));
375 if (nb_numa_nodes
> 1) {
376 ret
= spapr_fixup_cpu_numa_dt(fdt
, offset
, cpu
);
382 ret
= spapr_fixup_cpu_smt_dt(fdt
, offset
, cpu
, compat_smt
);
387 spapr_populate_pa_features(env
, fdt
, offset
,
388 spapr
->cas_legacy_guest_workaround
);
393 static hwaddr
spapr_node0_size(void)
395 MachineState
*machine
= MACHINE(qdev_get_machine());
399 for (i
= 0; i
< nb_numa_nodes
; ++i
) {
400 if (numa_info
[i
].node_mem
) {
401 return MIN(pow2floor(numa_info
[i
].node_mem
),
406 return machine
->ram_size
;
409 static void add_str(GString
*s
, const gchar
*s1
)
411 g_string_append_len(s
, s1
, strlen(s1
) + 1);
414 static int spapr_populate_memory_node(void *fdt
, int nodeid
, hwaddr start
,
417 uint32_t associativity
[] = {
418 cpu_to_be32(0x4), /* length */
419 cpu_to_be32(0x0), cpu_to_be32(0x0),
420 cpu_to_be32(0x0), cpu_to_be32(nodeid
)
423 uint64_t mem_reg_property
[2];
426 mem_reg_property
[0] = cpu_to_be64(start
);
427 mem_reg_property
[1] = cpu_to_be64(size
);
429 sprintf(mem_name
, "memory@" TARGET_FMT_lx
, start
);
430 off
= fdt_add_subnode(fdt
, 0, mem_name
);
432 _FDT((fdt_setprop_string(fdt
, off
, "device_type", "memory")));
433 _FDT((fdt_setprop(fdt
, off
, "reg", mem_reg_property
,
434 sizeof(mem_reg_property
))));
435 _FDT((fdt_setprop(fdt
, off
, "ibm,associativity", associativity
,
436 sizeof(associativity
))));
440 static int spapr_populate_memory(sPAPRMachineState
*spapr
, void *fdt
)
442 MachineState
*machine
= MACHINE(spapr
);
443 hwaddr mem_start
, node_size
;
444 int i
, nb_nodes
= nb_numa_nodes
;
445 NodeInfo
*nodes
= numa_info
;
448 /* No NUMA nodes, assume there is just one node with whole RAM */
449 if (!nb_numa_nodes
) {
451 ramnode
.node_mem
= machine
->ram_size
;
455 for (i
= 0, mem_start
= 0; i
< nb_nodes
; ++i
) {
456 if (!nodes
[i
].node_mem
) {
459 if (mem_start
>= machine
->ram_size
) {
462 node_size
= nodes
[i
].node_mem
;
463 if (node_size
> machine
->ram_size
- mem_start
) {
464 node_size
= machine
->ram_size
- mem_start
;
468 /* ppc_spapr_init() checks for rma_size <= node0_size already */
469 spapr_populate_memory_node(fdt
, i
, 0, spapr
->rma_size
);
470 mem_start
+= spapr
->rma_size
;
471 node_size
-= spapr
->rma_size
;
473 for ( ; node_size
; ) {
474 hwaddr sizetmp
= pow2floor(node_size
);
476 /* mem_start != 0 here */
477 if (ctzl(mem_start
) < ctzl(sizetmp
)) {
478 sizetmp
= 1ULL << ctzl(mem_start
);
481 spapr_populate_memory_node(fdt
, i
, mem_start
, sizetmp
);
482 node_size
-= sizetmp
;
483 mem_start
+= sizetmp
;
490 static void spapr_populate_cpu_dt(CPUState
*cs
, void *fdt
, int offset
,
491 sPAPRMachineState
*spapr
)
493 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
494 CPUPPCState
*env
= &cpu
->env
;
495 PowerPCCPUClass
*pcc
= POWERPC_CPU_GET_CLASS(cs
);
496 int index
= ppc_get_vcpu_dt_id(cpu
);
497 uint32_t segs
[] = {cpu_to_be32(28), cpu_to_be32(40),
498 0xffffffff, 0xffffffff};
499 uint32_t tbfreq
= kvm_enabled() ? kvmppc_get_tbfreq()
500 : SPAPR_TIMEBASE_FREQ
;
501 uint32_t cpufreq
= kvm_enabled() ? kvmppc_get_clockfreq() : 1000000000;
502 uint32_t page_sizes_prop
[64];
503 size_t page_sizes_prop_size
;
504 uint32_t vcpus_per_socket
= smp_threads
* smp_cores
;
505 uint32_t pft_size_prop
[] = {0, cpu_to_be32(spapr
->htab_shift
)};
506 int compat_smt
= MIN(smp_threads
, ppc_compat_max_threads(cpu
));
507 sPAPRDRConnector
*drc
;
509 uint32_t radix_AP_encodings
[PPC_PAGE_SIZES_MAX_SZ
];
512 drc
= spapr_drc_by_id(TYPE_SPAPR_DRC_CPU
, index
);
514 drc_index
= spapr_drc_index(drc
);
515 _FDT((fdt_setprop_cell(fdt
, offset
, "ibm,my-drc-index", drc_index
)));
518 _FDT((fdt_setprop_cell(fdt
, offset
, "reg", index
)));
519 _FDT((fdt_setprop_string(fdt
, offset
, "device_type", "cpu")));
521 _FDT((fdt_setprop_cell(fdt
, offset
, "cpu-version", env
->spr
[SPR_PVR
])));
522 _FDT((fdt_setprop_cell(fdt
, offset
, "d-cache-block-size",
523 env
->dcache_line_size
)));
524 _FDT((fdt_setprop_cell(fdt
, offset
, "d-cache-line-size",
525 env
->dcache_line_size
)));
526 _FDT((fdt_setprop_cell(fdt
, offset
, "i-cache-block-size",
527 env
->icache_line_size
)));
528 _FDT((fdt_setprop_cell(fdt
, offset
, "i-cache-line-size",
529 env
->icache_line_size
)));
531 if (pcc
->l1_dcache_size
) {
532 _FDT((fdt_setprop_cell(fdt
, offset
, "d-cache-size",
533 pcc
->l1_dcache_size
)));
535 warn_report("Unknown L1 dcache size for cpu");
537 if (pcc
->l1_icache_size
) {
538 _FDT((fdt_setprop_cell(fdt
, offset
, "i-cache-size",
539 pcc
->l1_icache_size
)));
541 warn_report("Unknown L1 icache size for cpu");
544 _FDT((fdt_setprop_cell(fdt
, offset
, "timebase-frequency", tbfreq
)));
545 _FDT((fdt_setprop_cell(fdt
, offset
, "clock-frequency", cpufreq
)));
546 _FDT((fdt_setprop_cell(fdt
, offset
, "slb-size", env
->slb_nr
)));
547 _FDT((fdt_setprop_cell(fdt
, offset
, "ibm,slb-size", env
->slb_nr
)));
548 _FDT((fdt_setprop_string(fdt
, offset
, "status", "okay")));
549 _FDT((fdt_setprop(fdt
, offset
, "64-bit", NULL
, 0)));
551 if (env
->spr_cb
[SPR_PURR
].oea_read
) {
552 _FDT((fdt_setprop(fdt
, offset
, "ibm,purr", NULL
, 0)));
555 if (env
->mmu_model
& POWERPC_MMU_1TSEG
) {
556 _FDT((fdt_setprop(fdt
, offset
, "ibm,processor-segment-sizes",
557 segs
, sizeof(segs
))));
560 /* Advertise VMX/VSX (vector extensions) if available
561 * 0 / no property == no vector extensions
562 * 1 == VMX / Altivec available
563 * 2 == VSX available */
564 if (env
->insns_flags
& PPC_ALTIVEC
) {
565 uint32_t vmx
= (env
->insns_flags2
& PPC2_VSX
) ? 2 : 1;
567 _FDT((fdt_setprop_cell(fdt
, offset
, "ibm,vmx", vmx
)));
570 /* Advertise DFP (Decimal Floating Point) if available
571 * 0 / no property == no DFP
572 * 1 == DFP available */
573 if (env
->insns_flags2
& PPC2_DFP
) {
574 _FDT((fdt_setprop_cell(fdt
, offset
, "ibm,dfp", 1)));
577 page_sizes_prop_size
= ppc_create_page_sizes_prop(env
, page_sizes_prop
,
578 sizeof(page_sizes_prop
));
579 if (page_sizes_prop_size
) {
580 _FDT((fdt_setprop(fdt
, offset
, "ibm,segment-page-sizes",
581 page_sizes_prop
, page_sizes_prop_size
)));
584 spapr_populate_pa_features(env
, fdt
, offset
, false);
586 _FDT((fdt_setprop_cell(fdt
, offset
, "ibm,chip-id",
587 cs
->cpu_index
/ vcpus_per_socket
)));
589 _FDT((fdt_setprop(fdt
, offset
, "ibm,pft-size",
590 pft_size_prop
, sizeof(pft_size_prop
))));
592 if (nb_numa_nodes
> 1) {
593 _FDT(spapr_fixup_cpu_numa_dt(fdt
, offset
, cpu
));
596 _FDT(spapr_fixup_cpu_smt_dt(fdt
, offset
, cpu
, compat_smt
));
598 if (pcc
->radix_page_info
) {
599 for (i
= 0; i
< pcc
->radix_page_info
->count
; i
++) {
600 radix_AP_encodings
[i
] =
601 cpu_to_be32(pcc
->radix_page_info
->entries
[i
]);
603 _FDT((fdt_setprop(fdt
, offset
, "ibm,processor-radix-AP-encodings",
605 pcc
->radix_page_info
->count
*
606 sizeof(radix_AP_encodings
[0]))));
610 static void spapr_populate_cpus_dt_node(void *fdt
, sPAPRMachineState
*spapr
)
615 int smt
= kvmppc_smt_threads();
617 cpus_offset
= fdt_add_subnode(fdt
, 0, "cpus");
619 _FDT((fdt_setprop_cell(fdt
, cpus_offset
, "#address-cells", 0x1)));
620 _FDT((fdt_setprop_cell(fdt
, cpus_offset
, "#size-cells", 0x0)));
623 * We walk the CPUs in reverse order to ensure that CPU DT nodes
624 * created by fdt_add_subnode() end up in the right order in FDT
625 * for the guest kernel the enumerate the CPUs correctly.
627 CPU_FOREACH_REVERSE(cs
) {
628 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
629 int index
= ppc_get_vcpu_dt_id(cpu
);
630 DeviceClass
*dc
= DEVICE_GET_CLASS(cs
);
633 if ((index
% smt
) != 0) {
637 nodename
= g_strdup_printf("%s@%x", dc
->fw_name
, index
);
638 offset
= fdt_add_subnode(fdt
, cpus_offset
, nodename
);
641 spapr_populate_cpu_dt(cs
, fdt
, offset
, spapr
);
647 * Adds ibm,dynamic-reconfiguration-memory node.
648 * Refer to docs/specs/ppc-spapr-hotplug.txt for the documentation
649 * of this device tree node.
651 static int spapr_populate_drconf_memory(sPAPRMachineState
*spapr
, void *fdt
)
653 MachineState
*machine
= MACHINE(spapr
);
655 uint64_t lmb_size
= SPAPR_MEMORY_BLOCK_SIZE
;
656 uint32_t prop_lmb_size
[] = {0, cpu_to_be32(lmb_size
)};
657 uint32_t hotplug_lmb_start
= spapr
->hotplug_memory
.base
/ lmb_size
;
658 uint32_t nr_lmbs
= (spapr
->hotplug_memory
.base
+
659 memory_region_size(&spapr
->hotplug_memory
.mr
)) /
661 uint32_t *int_buf
, *cur_index
, buf_len
;
662 int nr_nodes
= nb_numa_nodes
? nb_numa_nodes
: 1;
665 * Don't create the node if there is no hotpluggable memory
667 if (machine
->ram_size
== machine
->maxram_size
) {
672 * Allocate enough buffer size to fit in ibm,dynamic-memory
673 * or ibm,associativity-lookup-arrays
675 buf_len
= MAX(nr_lmbs
* SPAPR_DR_LMB_LIST_ENTRY_SIZE
+ 1, nr_nodes
* 4 + 2)
677 cur_index
= int_buf
= g_malloc0(buf_len
);
679 offset
= fdt_add_subnode(fdt
, 0, "ibm,dynamic-reconfiguration-memory");
681 ret
= fdt_setprop(fdt
, offset
, "ibm,lmb-size", prop_lmb_size
,
682 sizeof(prop_lmb_size
));
687 ret
= fdt_setprop_cell(fdt
, offset
, "ibm,memory-flags-mask", 0xff);
692 ret
= fdt_setprop_cell(fdt
, offset
, "ibm,memory-preservation-time", 0x0);
697 /* ibm,dynamic-memory */
698 int_buf
[0] = cpu_to_be32(nr_lmbs
);
700 for (i
= 0; i
< nr_lmbs
; i
++) {
701 uint64_t addr
= i
* lmb_size
;
702 uint32_t *dynamic_memory
= cur_index
;
704 if (i
>= hotplug_lmb_start
) {
705 sPAPRDRConnector
*drc
;
707 drc
= spapr_drc_by_id(TYPE_SPAPR_DRC_LMB
, i
);
710 dynamic_memory
[0] = cpu_to_be32(addr
>> 32);
711 dynamic_memory
[1] = cpu_to_be32(addr
& 0xffffffff);
712 dynamic_memory
[2] = cpu_to_be32(spapr_drc_index(drc
));
713 dynamic_memory
[3] = cpu_to_be32(0); /* reserved */
714 dynamic_memory
[4] = cpu_to_be32(numa_get_node(addr
, NULL
));
715 if (memory_region_present(get_system_memory(), addr
)) {
716 dynamic_memory
[5] = cpu_to_be32(SPAPR_LMB_FLAGS_ASSIGNED
);
718 dynamic_memory
[5] = cpu_to_be32(0);
722 * LMB information for RMA, boot time RAM and gap b/n RAM and
723 * hotplug memory region -- all these are marked as reserved
724 * and as having no valid DRC.
726 dynamic_memory
[0] = cpu_to_be32(addr
>> 32);
727 dynamic_memory
[1] = cpu_to_be32(addr
& 0xffffffff);
728 dynamic_memory
[2] = cpu_to_be32(0);
729 dynamic_memory
[3] = cpu_to_be32(0); /* reserved */
730 dynamic_memory
[4] = cpu_to_be32(-1);
731 dynamic_memory
[5] = cpu_to_be32(SPAPR_LMB_FLAGS_RESERVED
|
732 SPAPR_LMB_FLAGS_DRC_INVALID
);
735 cur_index
+= SPAPR_DR_LMB_LIST_ENTRY_SIZE
;
737 ret
= fdt_setprop(fdt
, offset
, "ibm,dynamic-memory", int_buf
, buf_len
);
742 /* ibm,associativity-lookup-arrays */
744 int_buf
[0] = cpu_to_be32(nr_nodes
);
745 int_buf
[1] = cpu_to_be32(4); /* Number of entries per associativity list */
747 for (i
= 0; i
< nr_nodes
; i
++) {
748 uint32_t associativity
[] = {
754 memcpy(cur_index
, associativity
, sizeof(associativity
));
757 ret
= fdt_setprop(fdt
, offset
, "ibm,associativity-lookup-arrays", int_buf
,
758 (cur_index
- int_buf
) * sizeof(uint32_t));
764 static int spapr_dt_cas_updates(sPAPRMachineState
*spapr
, void *fdt
,
765 sPAPROptionVector
*ov5_updates
)
767 sPAPRMachineClass
*smc
= SPAPR_MACHINE_GET_CLASS(spapr
);
770 /* Generate ibm,dynamic-reconfiguration-memory node if required */
771 if (spapr_ovec_test(ov5_updates
, OV5_DRCONF_MEMORY
)) {
772 g_assert(smc
->dr_lmb_enabled
);
773 ret
= spapr_populate_drconf_memory(spapr
, fdt
);
779 /* /interrupt controller */
780 if (!spapr_ovec_test(ov5_updates
, OV5_XIVE_EXPLOIT
)) {
781 spapr_dt_xics(xics_max_server_number(), fdt
, PHANDLE_XICP
);
784 offset
= fdt_path_offset(fdt
, "/chosen");
786 offset
= fdt_add_subnode(fdt
, 0, "chosen");
791 ret
= spapr_ovec_populate_dt(fdt
, offset
, spapr
->ov5_cas
,
792 "ibm,architecture-vec-5");
798 int spapr_h_cas_compose_response(sPAPRMachineState
*spapr
,
799 target_ulong addr
, target_ulong size
,
800 sPAPROptionVector
*ov5_updates
)
802 void *fdt
, *fdt_skel
;
803 sPAPRDeviceTreeUpdateHeader hdr
= { .version_id
= 1 };
807 /* Create skeleton */
808 fdt_skel
= g_malloc0(size
);
809 _FDT((fdt_create(fdt_skel
, size
)));
810 _FDT((fdt_begin_node(fdt_skel
, "")));
811 _FDT((fdt_end_node(fdt_skel
)));
812 _FDT((fdt_finish(fdt_skel
)));
813 fdt
= g_malloc0(size
);
814 _FDT((fdt_open_into(fdt_skel
, fdt
, size
)));
817 /* Fixup cpu nodes */
818 _FDT((spapr_fixup_cpu_dt(fdt
, spapr
)));
820 if (spapr_dt_cas_updates(spapr
, fdt
, ov5_updates
)) {
824 /* Pack resulting tree */
825 _FDT((fdt_pack(fdt
)));
827 if (fdt_totalsize(fdt
) + sizeof(hdr
) > size
) {
828 trace_spapr_cas_failed(size
);
832 cpu_physical_memory_write(addr
, &hdr
, sizeof(hdr
));
833 cpu_physical_memory_write(addr
+ sizeof(hdr
), fdt
, fdt_totalsize(fdt
));
834 trace_spapr_cas_continue(fdt_totalsize(fdt
) + sizeof(hdr
));
840 static void spapr_dt_rtas(sPAPRMachineState
*spapr
, void *fdt
)
843 GString
*hypertas
= g_string_sized_new(256);
844 GString
*qemu_hypertas
= g_string_sized_new(256);
845 uint32_t refpoints
[] = { cpu_to_be32(0x4), cpu_to_be32(0x4) };
846 uint64_t max_hotplug_addr
= spapr
->hotplug_memory
.base
+
847 memory_region_size(&spapr
->hotplug_memory
.mr
);
848 uint32_t lrdr_capacity
[] = {
849 cpu_to_be32(max_hotplug_addr
>> 32),
850 cpu_to_be32(max_hotplug_addr
& 0xffffffff),
851 0, cpu_to_be32(SPAPR_MEMORY_BLOCK_SIZE
),
852 cpu_to_be32(max_cpus
/ smp_threads
),
855 _FDT(rtas
= fdt_add_subnode(fdt
, 0, "rtas"));
858 add_str(hypertas
, "hcall-pft");
859 add_str(hypertas
, "hcall-term");
860 add_str(hypertas
, "hcall-dabr");
861 add_str(hypertas
, "hcall-interrupt");
862 add_str(hypertas
, "hcall-tce");
863 add_str(hypertas
, "hcall-vio");
864 add_str(hypertas
, "hcall-splpar");
865 add_str(hypertas
, "hcall-bulk");
866 add_str(hypertas
, "hcall-set-mode");
867 add_str(hypertas
, "hcall-sprg0");
868 add_str(hypertas
, "hcall-copy");
869 add_str(hypertas
, "hcall-debug");
870 add_str(qemu_hypertas
, "hcall-memop1");
872 if (!kvm_enabled() || kvmppc_spapr_use_multitce()) {
873 add_str(hypertas
, "hcall-multi-tce");
876 if (spapr
->resize_hpt
!= SPAPR_RESIZE_HPT_DISABLED
) {
877 add_str(hypertas
, "hcall-hpt-resize");
880 _FDT(fdt_setprop(fdt
, rtas
, "ibm,hypertas-functions",
881 hypertas
->str
, hypertas
->len
));
882 g_string_free(hypertas
, TRUE
);
883 _FDT(fdt_setprop(fdt
, rtas
, "qemu,hypertas-functions",
884 qemu_hypertas
->str
, qemu_hypertas
->len
));
885 g_string_free(qemu_hypertas
, TRUE
);
887 _FDT(fdt_setprop(fdt
, rtas
, "ibm,associativity-reference-points",
888 refpoints
, sizeof(refpoints
)));
890 _FDT(fdt_setprop_cell(fdt
, rtas
, "rtas-error-log-max",
891 RTAS_ERROR_LOG_MAX
));
892 _FDT(fdt_setprop_cell(fdt
, rtas
, "rtas-event-scan-rate",
893 RTAS_EVENT_SCAN_RATE
));
896 _FDT(fdt_setprop(fdt
, rtas
, "ibm,change-msix-capable", NULL
, 0));
900 * According to PAPR, rtas ibm,os-term does not guarantee a return
901 * back to the guest cpu.
903 * While an additional ibm,extended-os-term property indicates
904 * that rtas call return will always occur. Set this property.
906 _FDT(fdt_setprop(fdt
, rtas
, "ibm,extended-os-term", NULL
, 0));
908 _FDT(fdt_setprop(fdt
, rtas
, "ibm,lrdr-capacity",
909 lrdr_capacity
, sizeof(lrdr_capacity
)));
911 spapr_dt_rtas_tokens(fdt
, rtas
);
914 /* Prepare ibm,arch-vec-5-platform-support, which indicates the MMU features
915 * that the guest may request and thus the valid values for bytes 24..26 of
916 * option vector 5: */
917 static void spapr_dt_ov5_platform_support(void *fdt
, int chosen
)
919 PowerPCCPU
*first_ppc_cpu
= POWERPC_CPU(first_cpu
);
922 23, 0x00, /* Xive mode: 0 = legacy (as in ISA 2.7), 1 = Exploitation */
923 24, 0x00, /* Hash/Radix, filled in below. */
924 25, 0x00, /* Hash options: Segment Tables == no, GTSE == no. */
925 26, 0x40, /* Radix options: GTSE == yes. */
929 if (kvmppc_has_cap_mmu_radix() && kvmppc_has_cap_mmu_hash_v3()) {
930 val
[3] = 0x80; /* OV5_MMU_BOTH */
931 } else if (kvmppc_has_cap_mmu_radix()) {
932 val
[3] = 0x40; /* OV5_MMU_RADIX_300 */
934 val
[3] = 0x00; /* Hash */
937 if (first_ppc_cpu
->env
.mmu_model
& POWERPC_MMU_V3
) {
938 /* V3 MMU supports both hash and radix (with dynamic switching) */
941 /* Otherwise we can only do hash */
945 _FDT(fdt_setprop(fdt
, chosen
, "ibm,arch-vec-5-platform-support",
949 static void spapr_dt_chosen(sPAPRMachineState
*spapr
, void *fdt
)
951 MachineState
*machine
= MACHINE(spapr
);
953 const char *boot_device
= machine
->boot_order
;
954 char *stdout_path
= spapr_vio_stdout_path(spapr
->vio_bus
);
956 char *bootlist
= get_boot_devices_list(&cb
, true);
958 _FDT(chosen
= fdt_add_subnode(fdt
, 0, "chosen"));
960 _FDT(fdt_setprop_string(fdt
, chosen
, "bootargs", machine
->kernel_cmdline
));
961 _FDT(fdt_setprop_cell(fdt
, chosen
, "linux,initrd-start",
962 spapr
->initrd_base
));
963 _FDT(fdt_setprop_cell(fdt
, chosen
, "linux,initrd-end",
964 spapr
->initrd_base
+ spapr
->initrd_size
));
966 if (spapr
->kernel_size
) {
967 uint64_t kprop
[2] = { cpu_to_be64(KERNEL_LOAD_ADDR
),
968 cpu_to_be64(spapr
->kernel_size
) };
970 _FDT(fdt_setprop(fdt
, chosen
, "qemu,boot-kernel",
971 &kprop
, sizeof(kprop
)));
972 if (spapr
->kernel_le
) {
973 _FDT(fdt_setprop(fdt
, chosen
, "qemu,boot-kernel-le", NULL
, 0));
977 _FDT((fdt_setprop_cell(fdt
, chosen
, "qemu,boot-menu", boot_menu
)));
979 _FDT(fdt_setprop_cell(fdt
, chosen
, "qemu,graphic-width", graphic_width
));
980 _FDT(fdt_setprop_cell(fdt
, chosen
, "qemu,graphic-height", graphic_height
));
981 _FDT(fdt_setprop_cell(fdt
, chosen
, "qemu,graphic-depth", graphic_depth
));
983 if (cb
&& bootlist
) {
986 for (i
= 0; i
< cb
; i
++) {
987 if (bootlist
[i
] == '\n') {
991 _FDT(fdt_setprop_string(fdt
, chosen
, "qemu,boot-list", bootlist
));
994 if (boot_device
&& strlen(boot_device
)) {
995 _FDT(fdt_setprop_string(fdt
, chosen
, "qemu,boot-device", boot_device
));
998 if (!spapr
->has_graphics
&& stdout_path
) {
999 _FDT(fdt_setprop_string(fdt
, chosen
, "linux,stdout-path", stdout_path
));
1002 spapr_dt_ov5_platform_support(fdt
, chosen
);
1004 g_free(stdout_path
);
1008 static void spapr_dt_hypervisor(sPAPRMachineState
*spapr
, void *fdt
)
1010 /* The /hypervisor node isn't in PAPR - this is a hack to allow PR
1011 * KVM to work under pHyp with some guest co-operation */
1013 uint8_t hypercall
[16];
1015 _FDT(hypervisor
= fdt_add_subnode(fdt
, 0, "hypervisor"));
1016 /* indicate KVM hypercall interface */
1017 _FDT(fdt_setprop_string(fdt
, hypervisor
, "compatible", "linux,kvm"));
1018 if (kvmppc_has_cap_fixup_hcalls()) {
1020 * Older KVM versions with older guest kernels were broken
1021 * with the magic page, don't allow the guest to map it.
1023 if (!kvmppc_get_hypercall(first_cpu
->env_ptr
, hypercall
,
1024 sizeof(hypercall
))) {
1025 _FDT(fdt_setprop(fdt
, hypervisor
, "hcall-instructions",
1026 hypercall
, sizeof(hypercall
)));
1031 static void *spapr_build_fdt(sPAPRMachineState
*spapr
,
1035 MachineState
*machine
= MACHINE(qdev_get_machine());
1036 MachineClass
*mc
= MACHINE_GET_CLASS(machine
);
1037 sPAPRMachineClass
*smc
= SPAPR_MACHINE_GET_CLASS(machine
);
1043 fdt
= g_malloc0(FDT_MAX_SIZE
);
1044 _FDT((fdt_create_empty_tree(fdt
, FDT_MAX_SIZE
)));
1047 _FDT(fdt_setprop_string(fdt
, 0, "device_type", "chrp"));
1048 _FDT(fdt_setprop_string(fdt
, 0, "model", "IBM pSeries (emulated by qemu)"));
1049 _FDT(fdt_setprop_string(fdt
, 0, "compatible", "qemu,pseries"));
1052 * Add info to guest to indentify which host is it being run on
1053 * and what is the uuid of the guest
1055 if (kvmppc_get_host_model(&buf
)) {
1056 _FDT(fdt_setprop_string(fdt
, 0, "host-model", buf
));
1059 if (kvmppc_get_host_serial(&buf
)) {
1060 _FDT(fdt_setprop_string(fdt
, 0, "host-serial", buf
));
1064 buf
= qemu_uuid_unparse_strdup(&qemu_uuid
);
1066 _FDT(fdt_setprop_string(fdt
, 0, "vm,uuid", buf
));
1067 if (qemu_uuid_set
) {
1068 _FDT(fdt_setprop_string(fdt
, 0, "system-id", buf
));
1072 if (qemu_get_vm_name()) {
1073 _FDT(fdt_setprop_string(fdt
, 0, "ibm,partition-name",
1074 qemu_get_vm_name()));
1077 _FDT(fdt_setprop_cell(fdt
, 0, "#address-cells", 2));
1078 _FDT(fdt_setprop_cell(fdt
, 0, "#size-cells", 2));
1080 ret
= spapr_populate_memory(spapr
, fdt
);
1082 error_report("couldn't setup memory nodes in fdt");
1087 spapr_dt_vdevice(spapr
->vio_bus
, fdt
);
1089 if (object_resolve_path_type("", TYPE_SPAPR_RNG
, NULL
)) {
1090 ret
= spapr_rng_populate_dt(fdt
);
1092 error_report("could not set up rng device in the fdt");
1097 QLIST_FOREACH(phb
, &spapr
->phbs
, list
) {
1098 ret
= spapr_populate_pci_dt(phb
, PHANDLE_XICP
, fdt
);
1100 error_report("couldn't setup PCI devices in fdt");
1106 spapr_populate_cpus_dt_node(fdt
, spapr
);
1108 if (smc
->dr_lmb_enabled
) {
1109 _FDT(spapr_drc_populate_dt(fdt
, 0, NULL
, SPAPR_DR_CONNECTOR_TYPE_LMB
));
1112 if (mc
->has_hotpluggable_cpus
) {
1113 int offset
= fdt_path_offset(fdt
, "/cpus");
1114 ret
= spapr_drc_populate_dt(fdt
, offset
, NULL
,
1115 SPAPR_DR_CONNECTOR_TYPE_CPU
);
1117 error_report("Couldn't set up CPU DR device tree properties");
1122 /* /event-sources */
1123 spapr_dt_events(spapr
, fdt
);
1126 spapr_dt_rtas(spapr
, fdt
);
1129 spapr_dt_chosen(spapr
, fdt
);
1132 if (kvm_enabled()) {
1133 spapr_dt_hypervisor(spapr
, fdt
);
1136 /* Build memory reserve map */
1137 if (spapr
->kernel_size
) {
1138 _FDT((fdt_add_mem_rsv(fdt
, KERNEL_LOAD_ADDR
, spapr
->kernel_size
)));
1140 if (spapr
->initrd_size
) {
1141 _FDT((fdt_add_mem_rsv(fdt
, spapr
->initrd_base
, spapr
->initrd_size
)));
1144 /* ibm,client-architecture-support updates */
1145 ret
= spapr_dt_cas_updates(spapr
, fdt
, spapr
->ov5_cas
);
1147 error_report("couldn't setup CAS properties fdt");
1154 static uint64_t translate_kernel_address(void *opaque
, uint64_t addr
)
1156 return (addr
& 0x0fffffff) + KERNEL_LOAD_ADDR
;
1159 static void emulate_spapr_hypercall(PPCVirtualHypervisor
*vhyp
,
1162 CPUPPCState
*env
= &cpu
->env
;
1164 /* The TCG path should also be holding the BQL at this point */
1165 g_assert(qemu_mutex_iothread_locked());
1168 hcall_dprintf("Hypercall made with MSR[PR]=1\n");
1169 env
->gpr
[3] = H_PRIVILEGE
;
1171 env
->gpr
[3] = spapr_hypercall(cpu
, env
->gpr
[3], &env
->gpr
[4]);
1175 static uint64_t spapr_get_patbe(PPCVirtualHypervisor
*vhyp
)
1177 sPAPRMachineState
*spapr
= SPAPR_MACHINE(vhyp
);
1179 return spapr
->patb_entry
;
1182 #define HPTE(_table, _i) (void *)(((uint64_t *)(_table)) + ((_i) * 2))
1183 #define HPTE_VALID(_hpte) (tswap64(*((uint64_t *)(_hpte))) & HPTE64_V_VALID)
1184 #define HPTE_DIRTY(_hpte) (tswap64(*((uint64_t *)(_hpte))) & HPTE64_V_HPTE_DIRTY)
1185 #define CLEAN_HPTE(_hpte) ((*(uint64_t *)(_hpte)) &= tswap64(~HPTE64_V_HPTE_DIRTY))
1186 #define DIRTY_HPTE(_hpte) ((*(uint64_t *)(_hpte)) |= tswap64(HPTE64_V_HPTE_DIRTY))
1189 * Get the fd to access the kernel htab, re-opening it if necessary
1191 static int get_htab_fd(sPAPRMachineState
*spapr
)
1193 if (spapr
->htab_fd
>= 0) {
1194 return spapr
->htab_fd
;
1197 spapr
->htab_fd
= kvmppc_get_htab_fd(false);
1198 if (spapr
->htab_fd
< 0) {
1199 error_report("Unable to open fd for reading hash table from KVM: %s",
1203 return spapr
->htab_fd
;
1206 void close_htab_fd(sPAPRMachineState
*spapr
)
1208 if (spapr
->htab_fd
>= 0) {
1209 close(spapr
->htab_fd
);
1211 spapr
->htab_fd
= -1;
1214 static hwaddr
spapr_hpt_mask(PPCVirtualHypervisor
*vhyp
)
1216 sPAPRMachineState
*spapr
= SPAPR_MACHINE(vhyp
);
1218 return HTAB_SIZE(spapr
) / HASH_PTEG_SIZE_64
- 1;
1221 static const ppc_hash_pte64_t
*spapr_map_hptes(PPCVirtualHypervisor
*vhyp
,
1224 sPAPRMachineState
*spapr
= SPAPR_MACHINE(vhyp
);
1225 hwaddr pte_offset
= ptex
* HASH_PTE_SIZE_64
;
1229 * HTAB is controlled by KVM. Fetch into temporary buffer
1231 ppc_hash_pte64_t
*hptes
= g_malloc(n
* HASH_PTE_SIZE_64
);
1232 kvmppc_read_hptes(hptes
, ptex
, n
);
1237 * HTAB is controlled by QEMU. Just point to the internally
1240 return (const ppc_hash_pte64_t
*)(spapr
->htab
+ pte_offset
);
1243 static void spapr_unmap_hptes(PPCVirtualHypervisor
*vhyp
,
1244 const ppc_hash_pte64_t
*hptes
,
1247 sPAPRMachineState
*spapr
= SPAPR_MACHINE(vhyp
);
1250 g_free((void *)hptes
);
1253 /* Nothing to do for qemu managed HPT */
1256 static void spapr_store_hpte(PPCVirtualHypervisor
*vhyp
, hwaddr ptex
,
1257 uint64_t pte0
, uint64_t pte1
)
1259 sPAPRMachineState
*spapr
= SPAPR_MACHINE(vhyp
);
1260 hwaddr offset
= ptex
* HASH_PTE_SIZE_64
;
1263 kvmppc_write_hpte(ptex
, pte0
, pte1
);
1265 stq_p(spapr
->htab
+ offset
, pte0
);
1266 stq_p(spapr
->htab
+ offset
+ HASH_PTE_SIZE_64
/ 2, pte1
);
1270 int spapr_hpt_shift_for_ramsize(uint64_t ramsize
)
1274 /* We aim for a hash table of size 1/128 the size of RAM (rounded
1275 * up). The PAPR recommendation is actually 1/64 of RAM size, but
1276 * that's much more than is needed for Linux guests */
1277 shift
= ctz64(pow2ceil(ramsize
)) - 7;
1278 shift
= MAX(shift
, 18); /* Minimum architected size */
1279 shift
= MIN(shift
, 46); /* Maximum architected size */
1283 void spapr_free_hpt(sPAPRMachineState
*spapr
)
1285 g_free(spapr
->htab
);
1287 spapr
->htab_shift
= 0;
1288 close_htab_fd(spapr
);
1291 void spapr_reallocate_hpt(sPAPRMachineState
*spapr
, int shift
,
1296 /* Clean up any HPT info from a previous boot */
1297 spapr_free_hpt(spapr
);
1299 rc
= kvmppc_reset_htab(shift
);
1301 /* kernel-side HPT needed, but couldn't allocate one */
1302 error_setg_errno(errp
, errno
,
1303 "Failed to allocate KVM HPT of order %d (try smaller maxmem?)",
1305 /* This is almost certainly fatal, but if the caller really
1306 * wants to carry on with shift == 0, it's welcome to try */
1307 } else if (rc
> 0) {
1308 /* kernel-side HPT allocated */
1311 "Requested order %d HPT, but kernel allocated order %ld (try smaller maxmem?)",
1315 spapr
->htab_shift
= shift
;
1318 /* kernel-side HPT not needed, allocate in userspace instead */
1319 size_t size
= 1ULL << shift
;
1322 spapr
->htab
= qemu_memalign(size
, size
);
1324 error_setg_errno(errp
, errno
,
1325 "Could not allocate HPT of order %d", shift
);
1329 memset(spapr
->htab
, 0, size
);
1330 spapr
->htab_shift
= shift
;
1332 for (i
= 0; i
< size
/ HASH_PTE_SIZE_64
; i
++) {
1333 DIRTY_HPTE(HPTE(spapr
->htab
, i
));
1338 void spapr_setup_hpt_and_vrma(sPAPRMachineState
*spapr
)
1342 if ((spapr
->resize_hpt
== SPAPR_RESIZE_HPT_DISABLED
)
1343 || (spapr
->cas_reboot
1344 && !spapr_ovec_test(spapr
->ov5_cas
, OV5_HPT_RESIZE
))) {
1345 hpt_shift
= spapr_hpt_shift_for_ramsize(MACHINE(spapr
)->maxram_size
);
1347 hpt_shift
= spapr_hpt_shift_for_ramsize(MACHINE(spapr
)->ram_size
);
1349 spapr_reallocate_hpt(spapr
, hpt_shift
, &error_fatal
);
1351 if (spapr
->vrma_adjust
) {
1352 spapr
->rma_size
= kvmppc_rma_size(spapr_node0_size(),
1355 /* We're setting up a hash table, so that means we're not radix */
1356 spapr
->patb_entry
= 0;
1359 static void find_unknown_sysbus_device(SysBusDevice
*sbdev
, void *opaque
)
1361 bool matched
= false;
1363 if (object_dynamic_cast(OBJECT(sbdev
), TYPE_SPAPR_PCI_HOST_BRIDGE
)) {
1368 error_report("Device %s is not supported by this machine yet.",
1369 qdev_fw_name(DEVICE(sbdev
)));
1374 static void ppc_spapr_reset(void)
1376 MachineState
*machine
= MACHINE(qdev_get_machine());
1377 sPAPRMachineState
*spapr
= SPAPR_MACHINE(machine
);
1378 PowerPCCPU
*first_ppc_cpu
;
1379 uint32_t rtas_limit
;
1380 hwaddr rtas_addr
, fdt_addr
;
1384 /* Check for unknown sysbus devices */
1385 foreach_dynamic_sysbus_device(find_unknown_sysbus_device
, NULL
);
1387 if (kvm_enabled() && kvmppc_has_cap_mmu_radix()) {
1388 /* If using KVM with radix mode available, VCPUs can be started
1389 * without a HPT because KVM will start them in radix mode.
1390 * Set the GR bit in PATB so that we know there is no HPT. */
1391 spapr
->patb_entry
= PATBE1_GR
;
1393 spapr_setup_hpt_and_vrma(spapr
);
1396 qemu_devices_reset();
1399 * We place the device tree and RTAS just below either the top of the RMA,
1400 * or just below 2GB, whichever is lowere, so that it can be
1401 * processed with 32-bit real mode code if necessary
1403 rtas_limit
= MIN(spapr
->rma_size
, RTAS_MAX_ADDR
);
1404 rtas_addr
= rtas_limit
- RTAS_MAX_SIZE
;
1405 fdt_addr
= rtas_addr
- FDT_MAX_SIZE
;
1407 /* if this reset wasn't generated by CAS, we should reset our
1408 * negotiated options and start from scratch */
1409 if (!spapr
->cas_reboot
) {
1410 spapr_ovec_cleanup(spapr
->ov5_cas
);
1411 spapr
->ov5_cas
= spapr_ovec_new();
1413 ppc_set_compat_all(spapr
->max_compat_pvr
, &error_fatal
);
1416 fdt
= spapr_build_fdt(spapr
, rtas_addr
, spapr
->rtas_size
);
1418 spapr_load_rtas(spapr
, fdt
, rtas_addr
);
1422 /* Should only fail if we've built a corrupted tree */
1425 if (fdt_totalsize(fdt
) > FDT_MAX_SIZE
) {
1426 error_report("FDT too big ! 0x%x bytes (max is 0x%x)",
1427 fdt_totalsize(fdt
), FDT_MAX_SIZE
);
1432 qemu_fdt_dumpdtb(fdt
, fdt_totalsize(fdt
));
1433 cpu_physical_memory_write(fdt_addr
, fdt
, fdt_totalsize(fdt
));
1436 /* Set up the entry state */
1437 first_ppc_cpu
= POWERPC_CPU(first_cpu
);
1438 first_ppc_cpu
->env
.gpr
[3] = fdt_addr
;
1439 first_ppc_cpu
->env
.gpr
[5] = 0;
1440 first_cpu
->halted
= 0;
1441 first_ppc_cpu
->env
.nip
= SPAPR_ENTRY_POINT
;
1443 spapr
->cas_reboot
= false;
1446 static void spapr_create_nvram(sPAPRMachineState
*spapr
)
1448 DeviceState
*dev
= qdev_create(&spapr
->vio_bus
->bus
, "spapr-nvram");
1449 DriveInfo
*dinfo
= drive_get(IF_PFLASH
, 0, 0);
1452 qdev_prop_set_drive(dev
, "drive", blk_by_legacy_dinfo(dinfo
),
1456 qdev_init_nofail(dev
);
1458 spapr
->nvram
= (struct sPAPRNVRAM
*)dev
;
1461 static void spapr_rtc_create(sPAPRMachineState
*spapr
)
1463 object_initialize(&spapr
->rtc
, sizeof(spapr
->rtc
), TYPE_SPAPR_RTC
);
1464 object_property_add_child(OBJECT(spapr
), "rtc", OBJECT(&spapr
->rtc
),
1466 object_property_set_bool(OBJECT(&spapr
->rtc
), true, "realized",
1468 object_property_add_alias(OBJECT(spapr
), "rtc-time", OBJECT(&spapr
->rtc
),
1469 "date", &error_fatal
);
1472 /* Returns whether we want to use VGA or not */
1473 static bool spapr_vga_init(PCIBus
*pci_bus
, Error
**errp
)
1475 switch (vga_interface_type
) {
1482 return pci_vga_init(pci_bus
) != NULL
;
1485 "Unsupported VGA mode, only -vga std or -vga virtio is supported");
1490 static int spapr_post_load(void *opaque
, int version_id
)
1492 sPAPRMachineState
*spapr
= (sPAPRMachineState
*)opaque
;
1495 if (!object_dynamic_cast(OBJECT(spapr
->ics
), TYPE_ICS_KVM
)) {
1498 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
1499 icp_resend(ICP(cpu
->intc
));
1503 /* In earlier versions, there was no separate qdev for the PAPR
1504 * RTC, so the RTC offset was stored directly in sPAPREnvironment.
1505 * So when migrating from those versions, poke the incoming offset
1506 * value into the RTC device */
1507 if (version_id
< 3) {
1508 err
= spapr_rtc_import_offset(&spapr
->rtc
, spapr
->rtc_offset
);
1511 if (spapr
->patb_entry
) {
1512 PowerPCCPU
*cpu
= POWERPC_CPU(first_cpu
);
1513 bool radix
= !!(spapr
->patb_entry
& PATBE1_GR
);
1514 bool gtse
= !!(cpu
->env
.spr
[SPR_LPCR
] & LPCR_GTSE
);
1516 err
= kvmppc_configure_v3_mmu(cpu
, radix
, gtse
, spapr
->patb_entry
);
1518 error_report("Process table config unsupported by the host");
1526 static bool version_before_3(void *opaque
, int version_id
)
1528 return version_id
< 3;
1531 static bool spapr_pending_events_needed(void *opaque
)
1533 sPAPRMachineState
*spapr
= (sPAPRMachineState
*)opaque
;
1534 return !QTAILQ_EMPTY(&spapr
->pending_events
);
1537 static const VMStateDescription vmstate_spapr_event_entry
= {
1538 .name
= "spapr_event_log_entry",
1540 .minimum_version_id
= 1,
1541 .fields
= (VMStateField
[]) {
1542 VMSTATE_UINT32(summary
, sPAPREventLogEntry
),
1543 VMSTATE_UINT32(extended_length
, sPAPREventLogEntry
),
1544 VMSTATE_VBUFFER_ALLOC_UINT32(extended_log
, sPAPREventLogEntry
, 0,
1545 NULL
, extended_length
),
1546 VMSTATE_END_OF_LIST()
1550 static const VMStateDescription vmstate_spapr_pending_events
= {
1551 .name
= "spapr_pending_events",
1553 .minimum_version_id
= 1,
1554 .needed
= spapr_pending_events_needed
,
1555 .fields
= (VMStateField
[]) {
1556 VMSTATE_QTAILQ_V(pending_events
, sPAPRMachineState
, 1,
1557 vmstate_spapr_event_entry
, sPAPREventLogEntry
, next
),
1558 VMSTATE_END_OF_LIST()
1562 static bool spapr_ov5_cas_needed(void *opaque
)
1564 sPAPRMachineState
*spapr
= opaque
;
1565 sPAPROptionVector
*ov5_mask
= spapr_ovec_new();
1566 sPAPROptionVector
*ov5_legacy
= spapr_ovec_new();
1567 sPAPROptionVector
*ov5_removed
= spapr_ovec_new();
1570 /* Prior to the introduction of sPAPROptionVector, we had two option
1571 * vectors we dealt with: OV5_FORM1_AFFINITY, and OV5_DRCONF_MEMORY.
1572 * Both of these options encode machine topology into the device-tree
1573 * in such a way that the now-booted OS should still be able to interact
1574 * appropriately with QEMU regardless of what options were actually
1575 * negotiatied on the source side.
1577 * As such, we can avoid migrating the CAS-negotiated options if these
1578 * are the only options available on the current machine/platform.
1579 * Since these are the only options available for pseries-2.7 and
1580 * earlier, this allows us to maintain old->new/new->old migration
1583 * For QEMU 2.8+, there are additional CAS-negotiatable options available
1584 * via default pseries-2.8 machines and explicit command-line parameters.
1585 * Some of these options, like OV5_HP_EVT, *do* require QEMU to be aware
1586 * of the actual CAS-negotiated values to continue working properly. For
1587 * example, availability of memory unplug depends on knowing whether
1588 * OV5_HP_EVT was negotiated via CAS.
1590 * Thus, for any cases where the set of available CAS-negotiatable
1591 * options extends beyond OV5_FORM1_AFFINITY and OV5_DRCONF_MEMORY, we
1592 * include the CAS-negotiated options in the migration stream.
1594 spapr_ovec_set(ov5_mask
, OV5_FORM1_AFFINITY
);
1595 spapr_ovec_set(ov5_mask
, OV5_DRCONF_MEMORY
);
1597 /* spapr_ovec_diff returns true if bits were removed. we avoid using
1598 * the mask itself since in the future it's possible "legacy" bits may be
1599 * removed via machine options, which could generate a false positive
1600 * that breaks migration.
1602 spapr_ovec_intersect(ov5_legacy
, spapr
->ov5
, ov5_mask
);
1603 cas_needed
= spapr_ovec_diff(ov5_removed
, spapr
->ov5
, ov5_legacy
);
1605 spapr_ovec_cleanup(ov5_mask
);
1606 spapr_ovec_cleanup(ov5_legacy
);
1607 spapr_ovec_cleanup(ov5_removed
);
1612 static const VMStateDescription vmstate_spapr_ov5_cas
= {
1613 .name
= "spapr_option_vector_ov5_cas",
1615 .minimum_version_id
= 1,
1616 .needed
= spapr_ov5_cas_needed
,
1617 .fields
= (VMStateField
[]) {
1618 VMSTATE_STRUCT_POINTER_V(ov5_cas
, sPAPRMachineState
, 1,
1619 vmstate_spapr_ovec
, sPAPROptionVector
),
1620 VMSTATE_END_OF_LIST()
1624 static bool spapr_patb_entry_needed(void *opaque
)
1626 sPAPRMachineState
*spapr
= opaque
;
1628 return !!spapr
->patb_entry
;
1631 static const VMStateDescription vmstate_spapr_patb_entry
= {
1632 .name
= "spapr_patb_entry",
1634 .minimum_version_id
= 1,
1635 .needed
= spapr_patb_entry_needed
,
1636 .fields
= (VMStateField
[]) {
1637 VMSTATE_UINT64(patb_entry
, sPAPRMachineState
),
1638 VMSTATE_END_OF_LIST()
1642 static const VMStateDescription vmstate_spapr
= {
1645 .minimum_version_id
= 1,
1646 .post_load
= spapr_post_load
,
1647 .fields
= (VMStateField
[]) {
1648 /* used to be @next_irq */
1649 VMSTATE_UNUSED_BUFFER(version_before_3
, 0, 4),
1652 VMSTATE_UINT64_TEST(rtc_offset
, sPAPRMachineState
, version_before_3
),
1654 VMSTATE_PPC_TIMEBASE_V(tb
, sPAPRMachineState
, 2),
1655 VMSTATE_END_OF_LIST()
1657 .subsections
= (const VMStateDescription
*[]) {
1658 &vmstate_spapr_ov5_cas
,
1659 &vmstate_spapr_patb_entry
,
1660 &vmstate_spapr_pending_events
,
1665 static int htab_save_setup(QEMUFile
*f
, void *opaque
)
1667 sPAPRMachineState
*spapr
= opaque
;
1669 /* "Iteration" header */
1670 if (!spapr
->htab_shift
) {
1671 qemu_put_be32(f
, -1);
1673 qemu_put_be32(f
, spapr
->htab_shift
);
1677 spapr
->htab_save_index
= 0;
1678 spapr
->htab_first_pass
= true;
1680 if (spapr
->htab_shift
) {
1681 assert(kvm_enabled());
1689 static void htab_save_first_pass(QEMUFile
*f
, sPAPRMachineState
*spapr
,
1692 bool has_timeout
= max_ns
!= -1;
1693 int htabslots
= HTAB_SIZE(spapr
) / HASH_PTE_SIZE_64
;
1694 int index
= spapr
->htab_save_index
;
1695 int64_t starttime
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
);
1697 assert(spapr
->htab_first_pass
);
1702 /* Consume invalid HPTEs */
1703 while ((index
< htabslots
)
1704 && !HPTE_VALID(HPTE(spapr
->htab
, index
))) {
1705 CLEAN_HPTE(HPTE(spapr
->htab
, index
));
1709 /* Consume valid HPTEs */
1711 while ((index
< htabslots
) && (index
- chunkstart
< USHRT_MAX
)
1712 && HPTE_VALID(HPTE(spapr
->htab
, index
))) {
1713 CLEAN_HPTE(HPTE(spapr
->htab
, index
));
1717 if (index
> chunkstart
) {
1718 int n_valid
= index
- chunkstart
;
1720 qemu_put_be32(f
, chunkstart
);
1721 qemu_put_be16(f
, n_valid
);
1722 qemu_put_be16(f
, 0);
1723 qemu_put_buffer(f
, HPTE(spapr
->htab
, chunkstart
),
1724 HASH_PTE_SIZE_64
* n_valid
);
1727 (qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) - starttime
) > max_ns
) {
1731 } while ((index
< htabslots
) && !qemu_file_rate_limit(f
));
1733 if (index
>= htabslots
) {
1734 assert(index
== htabslots
);
1736 spapr
->htab_first_pass
= false;
1738 spapr
->htab_save_index
= index
;
1741 static int htab_save_later_pass(QEMUFile
*f
, sPAPRMachineState
*spapr
,
1744 bool final
= max_ns
< 0;
1745 int htabslots
= HTAB_SIZE(spapr
) / HASH_PTE_SIZE_64
;
1746 int examined
= 0, sent
= 0;
1747 int index
= spapr
->htab_save_index
;
1748 int64_t starttime
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
);
1750 assert(!spapr
->htab_first_pass
);
1753 int chunkstart
, invalidstart
;
1755 /* Consume non-dirty HPTEs */
1756 while ((index
< htabslots
)
1757 && !HPTE_DIRTY(HPTE(spapr
->htab
, index
))) {
1763 /* Consume valid dirty HPTEs */
1764 while ((index
< htabslots
) && (index
- chunkstart
< USHRT_MAX
)
1765 && HPTE_DIRTY(HPTE(spapr
->htab
, index
))
1766 && HPTE_VALID(HPTE(spapr
->htab
, index
))) {
1767 CLEAN_HPTE(HPTE(spapr
->htab
, index
));
1772 invalidstart
= index
;
1773 /* Consume invalid dirty HPTEs */
1774 while ((index
< htabslots
) && (index
- invalidstart
< USHRT_MAX
)
1775 && HPTE_DIRTY(HPTE(spapr
->htab
, index
))
1776 && !HPTE_VALID(HPTE(spapr
->htab
, index
))) {
1777 CLEAN_HPTE(HPTE(spapr
->htab
, index
));
1782 if (index
> chunkstart
) {
1783 int n_valid
= invalidstart
- chunkstart
;
1784 int n_invalid
= index
- invalidstart
;
1786 qemu_put_be32(f
, chunkstart
);
1787 qemu_put_be16(f
, n_valid
);
1788 qemu_put_be16(f
, n_invalid
);
1789 qemu_put_buffer(f
, HPTE(spapr
->htab
, chunkstart
),
1790 HASH_PTE_SIZE_64
* n_valid
);
1791 sent
+= index
- chunkstart
;
1793 if (!final
&& (qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) - starttime
) > max_ns
) {
1798 if (examined
>= htabslots
) {
1802 if (index
>= htabslots
) {
1803 assert(index
== htabslots
);
1806 } while ((examined
< htabslots
) && (!qemu_file_rate_limit(f
) || final
));
1808 if (index
>= htabslots
) {
1809 assert(index
== htabslots
);
1813 spapr
->htab_save_index
= index
;
1815 return (examined
>= htabslots
) && (sent
== 0) ? 1 : 0;
1818 #define MAX_ITERATION_NS 5000000 /* 5 ms */
1819 #define MAX_KVM_BUF_SIZE 2048
1821 static int htab_save_iterate(QEMUFile
*f
, void *opaque
)
1823 sPAPRMachineState
*spapr
= opaque
;
1827 /* Iteration header */
1828 if (!spapr
->htab_shift
) {
1829 qemu_put_be32(f
, -1);
1832 qemu_put_be32(f
, 0);
1836 assert(kvm_enabled());
1838 fd
= get_htab_fd(spapr
);
1843 rc
= kvmppc_save_htab(f
, fd
, MAX_KVM_BUF_SIZE
, MAX_ITERATION_NS
);
1847 } else if (spapr
->htab_first_pass
) {
1848 htab_save_first_pass(f
, spapr
, MAX_ITERATION_NS
);
1850 rc
= htab_save_later_pass(f
, spapr
, MAX_ITERATION_NS
);
1854 qemu_put_be32(f
, 0);
1855 qemu_put_be16(f
, 0);
1856 qemu_put_be16(f
, 0);
1861 static int htab_save_complete(QEMUFile
*f
, void *opaque
)
1863 sPAPRMachineState
*spapr
= opaque
;
1866 /* Iteration header */
1867 if (!spapr
->htab_shift
) {
1868 qemu_put_be32(f
, -1);
1871 qemu_put_be32(f
, 0);
1877 assert(kvm_enabled());
1879 fd
= get_htab_fd(spapr
);
1884 rc
= kvmppc_save_htab(f
, fd
, MAX_KVM_BUF_SIZE
, -1);
1889 if (spapr
->htab_first_pass
) {
1890 htab_save_first_pass(f
, spapr
, -1);
1892 htab_save_later_pass(f
, spapr
, -1);
1896 qemu_put_be32(f
, 0);
1897 qemu_put_be16(f
, 0);
1898 qemu_put_be16(f
, 0);
1903 static int htab_load(QEMUFile
*f
, void *opaque
, int version_id
)
1905 sPAPRMachineState
*spapr
= opaque
;
1906 uint32_t section_hdr
;
1909 if (version_id
< 1 || version_id
> 1) {
1910 error_report("htab_load() bad version");
1914 section_hdr
= qemu_get_be32(f
);
1916 if (section_hdr
== -1) {
1917 spapr_free_hpt(spapr
);
1922 Error
*local_err
= NULL
;
1924 /* First section gives the htab size */
1925 spapr_reallocate_hpt(spapr
, section_hdr
, &local_err
);
1927 error_report_err(local_err
);
1934 assert(kvm_enabled());
1936 fd
= kvmppc_get_htab_fd(true);
1938 error_report("Unable to open fd to restore KVM hash table: %s",
1945 uint16_t n_valid
, n_invalid
;
1947 index
= qemu_get_be32(f
);
1948 n_valid
= qemu_get_be16(f
);
1949 n_invalid
= qemu_get_be16(f
);
1951 if ((index
== 0) && (n_valid
== 0) && (n_invalid
== 0)) {
1956 if ((index
+ n_valid
+ n_invalid
) >
1957 (HTAB_SIZE(spapr
) / HASH_PTE_SIZE_64
)) {
1958 /* Bad index in stream */
1960 "htab_load() bad index %d (%hd+%hd entries) in htab stream (htab_shift=%d)",
1961 index
, n_valid
, n_invalid
, spapr
->htab_shift
);
1967 qemu_get_buffer(f
, HPTE(spapr
->htab
, index
),
1968 HASH_PTE_SIZE_64
* n_valid
);
1971 memset(HPTE(spapr
->htab
, index
+ n_valid
), 0,
1972 HASH_PTE_SIZE_64
* n_invalid
);
1979 rc
= kvmppc_load_htab_chunk(f
, fd
, index
, n_valid
, n_invalid
);
1994 static void htab_save_cleanup(void *opaque
)
1996 sPAPRMachineState
*spapr
= opaque
;
1998 close_htab_fd(spapr
);
2001 static SaveVMHandlers savevm_htab_handlers
= {
2002 .save_setup
= htab_save_setup
,
2003 .save_live_iterate
= htab_save_iterate
,
2004 .save_live_complete_precopy
= htab_save_complete
,
2005 .save_cleanup
= htab_save_cleanup
,
2006 .load_state
= htab_load
,
2009 static void spapr_boot_set(void *opaque
, const char *boot_device
,
2012 MachineState
*machine
= MACHINE(qdev_get_machine());
2013 machine
->boot_order
= g_strdup(boot_device
);
2016 static void spapr_create_lmb_dr_connectors(sPAPRMachineState
*spapr
)
2018 MachineState
*machine
= MACHINE(spapr
);
2019 uint64_t lmb_size
= SPAPR_MEMORY_BLOCK_SIZE
;
2020 uint32_t nr_lmbs
= (machine
->maxram_size
- machine
->ram_size
)/lmb_size
;
2023 for (i
= 0; i
< nr_lmbs
; i
++) {
2026 addr
= i
* lmb_size
+ spapr
->hotplug_memory
.base
;
2027 spapr_dr_connector_new(OBJECT(spapr
), TYPE_SPAPR_DRC_LMB
,
2033 * If RAM size, maxmem size and individual node mem sizes aren't aligned
2034 * to SPAPR_MEMORY_BLOCK_SIZE(256MB), then refuse to start the guest
2035 * since we can't support such unaligned sizes with DRCONF_MEMORY.
2037 static void spapr_validate_node_memory(MachineState
*machine
, Error
**errp
)
2041 if (machine
->ram_size
% SPAPR_MEMORY_BLOCK_SIZE
) {
2042 error_setg(errp
, "Memory size 0x" RAM_ADDR_FMT
2043 " is not aligned to %llu MiB",
2045 SPAPR_MEMORY_BLOCK_SIZE
/ M_BYTE
);
2049 if (machine
->maxram_size
% SPAPR_MEMORY_BLOCK_SIZE
) {
2050 error_setg(errp
, "Maximum memory size 0x" RAM_ADDR_FMT
2051 " is not aligned to %llu MiB",
2053 SPAPR_MEMORY_BLOCK_SIZE
/ M_BYTE
);
2057 for (i
= 0; i
< nb_numa_nodes
; i
++) {
2058 if (numa_info
[i
].node_mem
% SPAPR_MEMORY_BLOCK_SIZE
) {
2060 "Node %d memory size 0x%" PRIx64
2061 " is not aligned to %llu MiB",
2062 i
, numa_info
[i
].node_mem
,
2063 SPAPR_MEMORY_BLOCK_SIZE
/ M_BYTE
);
2069 /* find cpu slot in machine->possible_cpus by core_id */
2070 static CPUArchId
*spapr_find_cpu_slot(MachineState
*ms
, uint32_t id
, int *idx
)
2072 int index
= id
/ smp_threads
;
2074 if (index
>= ms
->possible_cpus
->len
) {
2080 return &ms
->possible_cpus
->cpus
[index
];
2083 static void spapr_init_cpus(sPAPRMachineState
*spapr
)
2085 MachineState
*machine
= MACHINE(spapr
);
2086 MachineClass
*mc
= MACHINE_GET_CLASS(machine
);
2087 char *type
= spapr_get_cpu_core_type(machine
->cpu_model
);
2088 int smt
= kvmppc_smt_threads();
2089 const CPUArchIdList
*possible_cpus
;
2090 int boot_cores_nr
= smp_cpus
/ smp_threads
;
2094 error_report("Unable to find sPAPR CPU Core definition");
2098 possible_cpus
= mc
->possible_cpu_arch_ids(machine
);
2099 if (mc
->has_hotpluggable_cpus
) {
2100 if (smp_cpus
% smp_threads
) {
2101 error_report("smp_cpus (%u) must be multiple of threads (%u)",
2102 smp_cpus
, smp_threads
);
2105 if (max_cpus
% smp_threads
) {
2106 error_report("max_cpus (%u) must be multiple of threads (%u)",
2107 max_cpus
, smp_threads
);
2111 if (max_cpus
!= smp_cpus
) {
2112 error_report("This machine version does not support CPU hotplug");
2115 boot_cores_nr
= possible_cpus
->len
;
2118 for (i
= 0; i
< possible_cpus
->len
; i
++) {
2119 int core_id
= i
* smp_threads
;
2121 if (mc
->has_hotpluggable_cpus
) {
2122 spapr_dr_connector_new(OBJECT(spapr
), TYPE_SPAPR_DRC_CPU
,
2123 (core_id
/ smp_threads
) * smt
);
2126 if (i
< boot_cores_nr
) {
2127 Object
*core
= object_new(type
);
2128 int nr_threads
= smp_threads
;
2130 /* Handle the partially filled core for older machine types */
2131 if ((i
+ 1) * smp_threads
>= smp_cpus
) {
2132 nr_threads
= smp_cpus
- i
* smp_threads
;
2135 object_property_set_int(core
, nr_threads
, "nr-threads",
2137 object_property_set_int(core
, core_id
, CPU_CORE_PROP_CORE_ID
,
2139 object_property_set_bool(core
, true, "realized", &error_fatal
);
2145 /* pSeries LPAR / sPAPR hardware init */
2146 static void ppc_spapr_init(MachineState
*machine
)
2148 sPAPRMachineState
*spapr
= SPAPR_MACHINE(machine
);
2149 sPAPRMachineClass
*smc
= SPAPR_MACHINE_GET_CLASS(machine
);
2150 const char *kernel_filename
= machine
->kernel_filename
;
2151 const char *initrd_filename
= machine
->initrd_filename
;
2154 MemoryRegion
*sysmem
= get_system_memory();
2155 MemoryRegion
*ram
= g_new(MemoryRegion
, 1);
2156 MemoryRegion
*rma_region
;
2158 hwaddr rma_alloc_size
;
2159 hwaddr node0_size
= spapr_node0_size();
2160 long load_limit
, fw_size
;
2162 Error
*resize_hpt_err
= NULL
;
2164 msi_nonbroken
= true;
2166 QLIST_INIT(&spapr
->phbs
);
2167 QTAILQ_INIT(&spapr
->pending_dimm_unplugs
);
2169 /* Check HPT resizing availability */
2170 kvmppc_check_papr_resize_hpt(&resize_hpt_err
);
2171 if (spapr
->resize_hpt
== SPAPR_RESIZE_HPT_DEFAULT
) {
2173 * If the user explicitly requested a mode we should either
2174 * supply it, or fail completely (which we do below). But if
2175 * it's not set explicitly, we reset our mode to something
2178 if (resize_hpt_err
) {
2179 spapr
->resize_hpt
= SPAPR_RESIZE_HPT_DISABLED
;
2180 error_free(resize_hpt_err
);
2181 resize_hpt_err
= NULL
;
2183 spapr
->resize_hpt
= smc
->resize_hpt_default
;
2187 assert(spapr
->resize_hpt
!= SPAPR_RESIZE_HPT_DEFAULT
);
2189 if ((spapr
->resize_hpt
!= SPAPR_RESIZE_HPT_DISABLED
) && resize_hpt_err
) {
2191 * User requested HPT resize, but this host can't supply it. Bail out
2193 error_report_err(resize_hpt_err
);
2197 /* Allocate RMA if necessary */
2198 rma_alloc_size
= kvmppc_alloc_rma(&rma
);
2200 if (rma_alloc_size
== -1) {
2201 error_report("Unable to create RMA");
2205 if (rma_alloc_size
&& (rma_alloc_size
< node0_size
)) {
2206 spapr
->rma_size
= rma_alloc_size
;
2208 spapr
->rma_size
= node0_size
;
2210 /* With KVM, we don't actually know whether KVM supports an
2211 * unbounded RMA (PR KVM) or is limited by the hash table size
2212 * (HV KVM using VRMA), so we always assume the latter
2214 * In that case, we also limit the initial allocations for RTAS
2215 * etc... to 256M since we have no way to know what the VRMA size
2216 * is going to be as it depends on the size of the hash table
2217 * isn't determined yet.
2219 if (kvm_enabled()) {
2220 spapr
->vrma_adjust
= 1;
2221 spapr
->rma_size
= MIN(spapr
->rma_size
, 0x10000000);
2224 /* Actually we don't support unbounded RMA anymore since we
2225 * added proper emulation of HV mode. The max we can get is
2226 * 16G which also happens to be what we configure for PAPR
2227 * mode so make sure we don't do anything bigger than that
2229 spapr
->rma_size
= MIN(spapr
->rma_size
, 0x400000000ull
);
2232 if (spapr
->rma_size
> node0_size
) {
2233 error_report("Numa node 0 has to span the RMA (%#08"HWADDR_PRIx
")",
2238 /* Setup a load limit for the ramdisk leaving room for SLOF and FDT */
2239 load_limit
= MIN(spapr
->rma_size
, RTAS_MAX_ADDR
) - FW_OVERHEAD
;
2241 /* Set up Interrupt Controller before we create the VCPUs */
2242 xics_system_init(machine
, XICS_IRQS_SPAPR
, &error_fatal
);
2244 /* Set up containers for ibm,client-set-architecture negotiated options */
2245 spapr
->ov5
= spapr_ovec_new();
2246 spapr
->ov5_cas
= spapr_ovec_new();
2248 if (smc
->dr_lmb_enabled
) {
2249 spapr_ovec_set(spapr
->ov5
, OV5_DRCONF_MEMORY
);
2250 spapr_validate_node_memory(machine
, &error_fatal
);
2253 spapr_ovec_set(spapr
->ov5
, OV5_FORM1_AFFINITY
);
2254 if (!kvm_enabled() || kvmppc_has_cap_mmu_radix()) {
2255 /* KVM and TCG always allow GTSE with radix... */
2256 spapr_ovec_set(spapr
->ov5
, OV5_MMU_RADIX_GTSE
);
2258 /* ... but not with hash (currently). */
2260 /* advertise support for dedicated HP event source to guests */
2261 if (spapr
->use_hotplug_event_source
) {
2262 spapr_ovec_set(spapr
->ov5
, OV5_HP_EVT
);
2265 /* advertise support for HPT resizing */
2266 if (spapr
->resize_hpt
!= SPAPR_RESIZE_HPT_DISABLED
) {
2267 spapr_ovec_set(spapr
->ov5
, OV5_HPT_RESIZE
);
2271 if (machine
->cpu_model
== NULL
) {
2272 machine
->cpu_model
= kvm_enabled() ? "host" : smc
->tcg_default_cpu
;
2275 spapr_cpu_parse_features(spapr
);
2277 spapr_init_cpus(spapr
);
2279 if (kvm_enabled()) {
2280 /* Enable H_LOGICAL_CI_* so SLOF can talk to in-kernel devices */
2281 kvmppc_enable_logical_ci_hcalls();
2282 kvmppc_enable_set_mode_hcall();
2284 /* H_CLEAR_MOD/_REF are mandatory in PAPR, but off by default */
2285 kvmppc_enable_clear_ref_mod_hcalls();
2289 memory_region_allocate_system_memory(ram
, NULL
, "ppc_spapr.ram",
2291 memory_region_add_subregion(sysmem
, 0, ram
);
2293 if (rma_alloc_size
&& rma
) {
2294 rma_region
= g_new(MemoryRegion
, 1);
2295 memory_region_init_ram_ptr(rma_region
, NULL
, "ppc_spapr.rma",
2296 rma_alloc_size
, rma
);
2297 vmstate_register_ram_global(rma_region
);
2298 memory_region_add_subregion(sysmem
, 0, rma_region
);
2301 /* initialize hotplug memory address space */
2302 if (machine
->ram_size
< machine
->maxram_size
) {
2303 ram_addr_t hotplug_mem_size
= machine
->maxram_size
- machine
->ram_size
;
2305 * Limit the number of hotpluggable memory slots to half the number
2306 * slots that KVM supports, leaving the other half for PCI and other
2307 * devices. However ensure that number of slots doesn't drop below 32.
2309 int max_memslots
= kvm_enabled() ? kvm_get_max_memslots() / 2 :
2310 SPAPR_MAX_RAM_SLOTS
;
2312 if (max_memslots
< SPAPR_MAX_RAM_SLOTS
) {
2313 max_memslots
= SPAPR_MAX_RAM_SLOTS
;
2315 if (machine
->ram_slots
> max_memslots
) {
2316 error_report("Specified number of memory slots %"
2317 PRIu64
" exceeds max supported %d",
2318 machine
->ram_slots
, max_memslots
);
2322 spapr
->hotplug_memory
.base
= ROUND_UP(machine
->ram_size
,
2323 SPAPR_HOTPLUG_MEM_ALIGN
);
2324 memory_region_init(&spapr
->hotplug_memory
.mr
, OBJECT(spapr
),
2325 "hotplug-memory", hotplug_mem_size
);
2326 memory_region_add_subregion(sysmem
, spapr
->hotplug_memory
.base
,
2327 &spapr
->hotplug_memory
.mr
);
2330 if (smc
->dr_lmb_enabled
) {
2331 spapr_create_lmb_dr_connectors(spapr
);
2334 filename
= qemu_find_file(QEMU_FILE_TYPE_BIOS
, "spapr-rtas.bin");
2336 error_report("Could not find LPAR rtas '%s'", "spapr-rtas.bin");
2339 spapr
->rtas_size
= get_image_size(filename
);
2340 if (spapr
->rtas_size
< 0) {
2341 error_report("Could not get size of LPAR rtas '%s'", filename
);
2344 spapr
->rtas_blob
= g_malloc(spapr
->rtas_size
);
2345 if (load_image_size(filename
, spapr
->rtas_blob
, spapr
->rtas_size
) < 0) {
2346 error_report("Could not load LPAR rtas '%s'", filename
);
2349 if (spapr
->rtas_size
> RTAS_MAX_SIZE
) {
2350 error_report("RTAS too big ! 0x%zx bytes (max is 0x%x)",
2351 (size_t)spapr
->rtas_size
, RTAS_MAX_SIZE
);
2356 /* Set up RTAS event infrastructure */
2357 spapr_events_init(spapr
);
2359 /* Set up the RTC RTAS interfaces */
2360 spapr_rtc_create(spapr
);
2362 /* Set up VIO bus */
2363 spapr
->vio_bus
= spapr_vio_bus_init();
2365 for (i
= 0; i
< MAX_SERIAL_PORTS
; i
++) {
2366 if (serial_hds
[i
]) {
2367 spapr_vty_create(spapr
->vio_bus
, serial_hds
[i
]);
2371 /* We always have at least the nvram device on VIO */
2372 spapr_create_nvram(spapr
);
2375 spapr_pci_rtas_init();
2377 phb
= spapr_create_phb(spapr
, 0);
2379 for (i
= 0; i
< nb_nics
; i
++) {
2380 NICInfo
*nd
= &nd_table
[i
];
2383 nd
->model
= g_strdup("ibmveth");
2386 if (strcmp(nd
->model
, "ibmveth") == 0) {
2387 spapr_vlan_create(spapr
->vio_bus
, nd
);
2389 pci_nic_init_nofail(&nd_table
[i
], phb
->bus
, nd
->model
, NULL
);
2393 for (i
= 0; i
<= drive_get_max_bus(IF_SCSI
); i
++) {
2394 spapr_vscsi_create(spapr
->vio_bus
);
2398 if (spapr_vga_init(phb
->bus
, &error_fatal
)) {
2399 spapr
->has_graphics
= true;
2400 machine
->usb
|= defaults_enabled() && !machine
->usb_disabled
;
2404 if (smc
->use_ohci_by_default
) {
2405 pci_create_simple(phb
->bus
, -1, "pci-ohci");
2407 pci_create_simple(phb
->bus
, -1, "nec-usb-xhci");
2410 if (spapr
->has_graphics
) {
2411 USBBus
*usb_bus
= usb_bus_find(-1);
2413 usb_create_simple(usb_bus
, "usb-kbd");
2414 usb_create_simple(usb_bus
, "usb-mouse");
2418 if (spapr
->rma_size
< (MIN_RMA_SLOF
<< 20)) {
2420 "pSeries SLOF firmware requires >= %ldM guest RMA (Real Mode Area memory)",
2425 if (kernel_filename
) {
2426 uint64_t lowaddr
= 0;
2428 spapr
->kernel_size
= load_elf(kernel_filename
, translate_kernel_address
,
2429 NULL
, NULL
, &lowaddr
, NULL
, 1,
2430 PPC_ELF_MACHINE
, 0, 0);
2431 if (spapr
->kernel_size
== ELF_LOAD_WRONG_ENDIAN
) {
2432 spapr
->kernel_size
= load_elf(kernel_filename
,
2433 translate_kernel_address
, NULL
, NULL
,
2434 &lowaddr
, NULL
, 0, PPC_ELF_MACHINE
,
2436 spapr
->kernel_le
= spapr
->kernel_size
> 0;
2438 if (spapr
->kernel_size
< 0) {
2439 error_report("error loading %s: %s", kernel_filename
,
2440 load_elf_strerror(spapr
->kernel_size
));
2445 if (initrd_filename
) {
2446 /* Try to locate the initrd in the gap between the kernel
2447 * and the firmware. Add a bit of space just in case
2449 spapr
->initrd_base
= (KERNEL_LOAD_ADDR
+ spapr
->kernel_size
2450 + 0x1ffff) & ~0xffff;
2451 spapr
->initrd_size
= load_image_targphys(initrd_filename
,
2454 - spapr
->initrd_base
);
2455 if (spapr
->initrd_size
< 0) {
2456 error_report("could not load initial ram disk '%s'",
2463 if (bios_name
== NULL
) {
2464 bios_name
= FW_FILE_NAME
;
2466 filename
= qemu_find_file(QEMU_FILE_TYPE_BIOS
, bios_name
);
2468 error_report("Could not find LPAR firmware '%s'", bios_name
);
2471 fw_size
= load_image_targphys(filename
, 0, FW_MAX_SIZE
);
2473 error_report("Could not load LPAR firmware '%s'", filename
);
2478 /* FIXME: Should register things through the MachineState's qdev
2479 * interface, this is a legacy from the sPAPREnvironment structure
2480 * which predated MachineState but had a similar function */
2481 vmstate_register(NULL
, 0, &vmstate_spapr
, spapr
);
2482 register_savevm_live(NULL
, "spapr/htab", -1, 1,
2483 &savevm_htab_handlers
, spapr
);
2485 qemu_register_boot_set(spapr_boot_set
, spapr
);
2487 if (kvm_enabled()) {
2488 /* to stop and start vmclock */
2489 qemu_add_vm_change_state_handler(cpu_ppc_clock_vm_state_change
,
2492 kvmppc_spapr_enable_inkernel_multitce();
2496 static int spapr_kvm_type(const char *vm_type
)
2502 if (!strcmp(vm_type
, "HV")) {
2506 if (!strcmp(vm_type
, "PR")) {
2510 error_report("Unknown kvm-type specified '%s'", vm_type
);
2515 * Implementation of an interface to adjust firmware path
2516 * for the bootindex property handling.
2518 static char *spapr_get_fw_dev_path(FWPathProvider
*p
, BusState
*bus
,
2521 #define CAST(type, obj, name) \
2522 ((type *)object_dynamic_cast(OBJECT(obj), (name)))
2523 SCSIDevice
*d
= CAST(SCSIDevice
, dev
, TYPE_SCSI_DEVICE
);
2524 sPAPRPHBState
*phb
= CAST(sPAPRPHBState
, dev
, TYPE_SPAPR_PCI_HOST_BRIDGE
);
2525 VHostSCSICommon
*vsc
= CAST(VHostSCSICommon
, dev
, TYPE_VHOST_SCSI_COMMON
);
2528 void *spapr
= CAST(void, bus
->parent
, "spapr-vscsi");
2529 VirtIOSCSI
*virtio
= CAST(VirtIOSCSI
, bus
->parent
, TYPE_VIRTIO_SCSI
);
2530 USBDevice
*usb
= CAST(USBDevice
, bus
->parent
, TYPE_USB_DEVICE
);
2534 * Replace "channel@0/disk@0,0" with "disk@8000000000000000":
2535 * We use SRP luns of the form 8000 | (bus << 8) | (id << 5) | lun
2536 * in the top 16 bits of the 64-bit LUN
2538 unsigned id
= 0x8000 | (d
->id
<< 8) | d
->lun
;
2539 return g_strdup_printf("%s@%"PRIX64
, qdev_fw_name(dev
),
2540 (uint64_t)id
<< 48);
2541 } else if (virtio
) {
2543 * We use SRP luns of the form 01000000 | (target << 8) | lun
2544 * in the top 32 bits of the 64-bit LUN
2545 * Note: the quote above is from SLOF and it is wrong,
2546 * the actual binding is:
2547 * swap 0100 or 10 << or 20 << ( target lun-id -- srplun )
2549 unsigned id
= 0x1000000 | (d
->id
<< 16) | d
->lun
;
2550 return g_strdup_printf("%s@%"PRIX64
, qdev_fw_name(dev
),
2551 (uint64_t)id
<< 32);
2554 * We use SRP luns of the form 01000000 | (usb-port << 16) | lun
2555 * in the top 32 bits of the 64-bit LUN
2557 unsigned usb_port
= atoi(usb
->port
->path
);
2558 unsigned id
= 0x1000000 | (usb_port
<< 16) | d
->lun
;
2559 return g_strdup_printf("%s@%"PRIX64
, qdev_fw_name(dev
),
2560 (uint64_t)id
<< 32);
2565 * SLOF probes the USB devices, and if it recognizes that the device is a
2566 * storage device, it changes its name to "storage" instead of "usb-host",
2567 * and additionally adds a child node for the SCSI LUN, so the correct
2568 * boot path in SLOF is something like .../storage@1/disk@xxx" instead.
2570 if (strcmp("usb-host", qdev_fw_name(dev
)) == 0) {
2571 USBDevice
*usbdev
= CAST(USBDevice
, dev
, TYPE_USB_DEVICE
);
2572 if (usb_host_dev_is_scsi_storage(usbdev
)) {
2573 return g_strdup_printf("storage@%s/disk", usbdev
->port
->path
);
2578 /* Replace "pci" with "pci@800000020000000" */
2579 return g_strdup_printf("pci@%"PRIX64
, phb
->buid
);
2583 /* Same logic as virtio above */
2584 unsigned id
= 0x1000000 | (vsc
->target
<< 16) | vsc
->lun
;
2585 return g_strdup_printf("disk@%"PRIX64
, (uint64_t)id
<< 32);
2588 if (g_str_equal("pci-bridge", qdev_fw_name(dev
))) {
2589 /* SLOF uses "pci" instead of "pci-bridge" for PCI bridges */
2590 PCIDevice
*pcidev
= CAST(PCIDevice
, dev
, TYPE_PCI_DEVICE
);
2591 return g_strdup_printf("pci@%x", PCI_SLOT(pcidev
->devfn
));
2597 static char *spapr_get_kvm_type(Object
*obj
, Error
**errp
)
2599 sPAPRMachineState
*spapr
= SPAPR_MACHINE(obj
);
2601 return g_strdup(spapr
->kvm_type
);
2604 static void spapr_set_kvm_type(Object
*obj
, const char *value
, Error
**errp
)
2606 sPAPRMachineState
*spapr
= SPAPR_MACHINE(obj
);
2608 g_free(spapr
->kvm_type
);
2609 spapr
->kvm_type
= g_strdup(value
);
2612 static bool spapr_get_modern_hotplug_events(Object
*obj
, Error
**errp
)
2614 sPAPRMachineState
*spapr
= SPAPR_MACHINE(obj
);
2616 return spapr
->use_hotplug_event_source
;
2619 static void spapr_set_modern_hotplug_events(Object
*obj
, bool value
,
2622 sPAPRMachineState
*spapr
= SPAPR_MACHINE(obj
);
2624 spapr
->use_hotplug_event_source
= value
;
2627 static char *spapr_get_resize_hpt(Object
*obj
, Error
**errp
)
2629 sPAPRMachineState
*spapr
= SPAPR_MACHINE(obj
);
2631 switch (spapr
->resize_hpt
) {
2632 case SPAPR_RESIZE_HPT_DEFAULT
:
2633 return g_strdup("default");
2634 case SPAPR_RESIZE_HPT_DISABLED
:
2635 return g_strdup("disabled");
2636 case SPAPR_RESIZE_HPT_ENABLED
:
2637 return g_strdup("enabled");
2638 case SPAPR_RESIZE_HPT_REQUIRED
:
2639 return g_strdup("required");
2641 g_assert_not_reached();
2644 static void spapr_set_resize_hpt(Object
*obj
, const char *value
, Error
**errp
)
2646 sPAPRMachineState
*spapr
= SPAPR_MACHINE(obj
);
2648 if (strcmp(value
, "default") == 0) {
2649 spapr
->resize_hpt
= SPAPR_RESIZE_HPT_DEFAULT
;
2650 } else if (strcmp(value
, "disabled") == 0) {
2651 spapr
->resize_hpt
= SPAPR_RESIZE_HPT_DISABLED
;
2652 } else if (strcmp(value
, "enabled") == 0) {
2653 spapr
->resize_hpt
= SPAPR_RESIZE_HPT_ENABLED
;
2654 } else if (strcmp(value
, "required") == 0) {
2655 spapr
->resize_hpt
= SPAPR_RESIZE_HPT_REQUIRED
;
2657 error_setg(errp
, "Bad value for \"resize-hpt\" property");
2661 static void spapr_machine_initfn(Object
*obj
)
2663 sPAPRMachineState
*spapr
= SPAPR_MACHINE(obj
);
2665 spapr
->htab_fd
= -1;
2666 spapr
->use_hotplug_event_source
= true;
2667 object_property_add_str(obj
, "kvm-type",
2668 spapr_get_kvm_type
, spapr_set_kvm_type
, NULL
);
2669 object_property_set_description(obj
, "kvm-type",
2670 "Specifies the KVM virtualization mode (HV, PR)",
2672 object_property_add_bool(obj
, "modern-hotplug-events",
2673 spapr_get_modern_hotplug_events
,
2674 spapr_set_modern_hotplug_events
,
2676 object_property_set_description(obj
, "modern-hotplug-events",
2677 "Use dedicated hotplug event mechanism in"
2678 " place of standard EPOW events when possible"
2679 " (required for memory hot-unplug support)",
2682 ppc_compat_add_property(obj
, "max-cpu-compat", &spapr
->max_compat_pvr
,
2683 "Maximum permitted CPU compatibility mode",
2686 object_property_add_str(obj
, "resize-hpt",
2687 spapr_get_resize_hpt
, spapr_set_resize_hpt
, NULL
);
2688 object_property_set_description(obj
, "resize-hpt",
2689 "Resizing of the Hash Page Table (enabled, disabled, required)",
2693 static void spapr_machine_finalizefn(Object
*obj
)
2695 sPAPRMachineState
*spapr
= SPAPR_MACHINE(obj
);
2697 g_free(spapr
->kvm_type
);
2700 void spapr_do_system_reset_on_cpu(CPUState
*cs
, run_on_cpu_data arg
)
2702 cpu_synchronize_state(cs
);
2703 ppc_cpu_do_system_reset(cs
);
2706 static void spapr_nmi(NMIState
*n
, int cpu_index
, Error
**errp
)
2711 async_run_on_cpu(cs
, spapr_do_system_reset_on_cpu
, RUN_ON_CPU_NULL
);
2715 static void spapr_add_lmbs(DeviceState
*dev
, uint64_t addr_start
, uint64_t size
,
2716 uint32_t node
, bool dedicated_hp_event_source
,
2719 sPAPRDRConnector
*drc
;
2720 uint32_t nr_lmbs
= size
/SPAPR_MEMORY_BLOCK_SIZE
;
2721 int i
, fdt_offset
, fdt_size
;
2723 uint64_t addr
= addr_start
;
2724 bool hotplugged
= spapr_drc_hotplugged(dev
);
2725 Error
*local_err
= NULL
;
2727 for (i
= 0; i
< nr_lmbs
; i
++) {
2728 drc
= spapr_drc_by_id(TYPE_SPAPR_DRC_LMB
,
2729 addr
/ SPAPR_MEMORY_BLOCK_SIZE
);
2732 fdt
= create_device_tree(&fdt_size
);
2733 fdt_offset
= spapr_populate_memory_node(fdt
, node
, addr
,
2734 SPAPR_MEMORY_BLOCK_SIZE
);
2736 spapr_drc_attach(drc
, dev
, fdt
, fdt_offset
, &local_err
);
2738 while (addr
> addr_start
) {
2739 addr
-= SPAPR_MEMORY_BLOCK_SIZE
;
2740 drc
= spapr_drc_by_id(TYPE_SPAPR_DRC_LMB
,
2741 addr
/ SPAPR_MEMORY_BLOCK_SIZE
);
2742 spapr_drc_detach(drc
);
2745 error_propagate(errp
, local_err
);
2749 spapr_drc_reset(drc
);
2751 addr
+= SPAPR_MEMORY_BLOCK_SIZE
;
2753 /* send hotplug notification to the
2754 * guest only in case of hotplugged memory
2757 if (dedicated_hp_event_source
) {
2758 drc
= spapr_drc_by_id(TYPE_SPAPR_DRC_LMB
,
2759 addr_start
/ SPAPR_MEMORY_BLOCK_SIZE
);
2760 spapr_hotplug_req_add_by_count_indexed(SPAPR_DR_CONNECTOR_TYPE_LMB
,
2762 spapr_drc_index(drc
));
2764 spapr_hotplug_req_add_by_count(SPAPR_DR_CONNECTOR_TYPE_LMB
,
2770 static void spapr_memory_plug(HotplugHandler
*hotplug_dev
, DeviceState
*dev
,
2771 uint32_t node
, Error
**errp
)
2773 Error
*local_err
= NULL
;
2774 sPAPRMachineState
*ms
= SPAPR_MACHINE(hotplug_dev
);
2775 PCDIMMDevice
*dimm
= PC_DIMM(dev
);
2776 PCDIMMDeviceClass
*ddc
= PC_DIMM_GET_CLASS(dimm
);
2777 MemoryRegion
*mr
= ddc
->get_memory_region(dimm
);
2778 uint64_t align
= memory_region_get_alignment(mr
);
2779 uint64_t size
= memory_region_size(mr
);
2782 pc_dimm_memory_plug(dev
, &ms
->hotplug_memory
, mr
, align
, &local_err
);
2787 addr
= object_property_get_uint(OBJECT(dimm
),
2788 PC_DIMM_ADDR_PROP
, &local_err
);
2793 spapr_add_lmbs(dev
, addr
, size
, node
,
2794 spapr_ovec_test(ms
->ov5_cas
, OV5_HP_EVT
),
2803 pc_dimm_memory_unplug(dev
, &ms
->hotplug_memory
, mr
);
2805 error_propagate(errp
, local_err
);
2808 static void spapr_memory_pre_plug(HotplugHandler
*hotplug_dev
, DeviceState
*dev
,
2811 PCDIMMDevice
*dimm
= PC_DIMM(dev
);
2812 PCDIMMDeviceClass
*ddc
= PC_DIMM_GET_CLASS(dimm
);
2813 MemoryRegion
*mr
= ddc
->get_memory_region(dimm
);
2814 uint64_t size
= memory_region_size(mr
);
2817 if (size
% SPAPR_MEMORY_BLOCK_SIZE
) {
2818 error_setg(errp
, "Hotplugged memory size must be a multiple of "
2819 "%lld MB", SPAPR_MEMORY_BLOCK_SIZE
/ M_BYTE
);
2823 mem_dev
= object_property_get_str(OBJECT(dimm
), PC_DIMM_MEMDEV_PROP
, NULL
);
2824 if (mem_dev
&& !kvmppc_is_mem_backend_page_size_ok(mem_dev
)) {
2825 error_setg(errp
, "Memory backend has bad page size. "
2826 "Use 'memory-backend-file' with correct mem-path.");
2834 struct sPAPRDIMMState
{
2837 QTAILQ_ENTRY(sPAPRDIMMState
) next
;
2840 static sPAPRDIMMState
*spapr_pending_dimm_unplugs_find(sPAPRMachineState
*s
,
2843 sPAPRDIMMState
*dimm_state
= NULL
;
2845 QTAILQ_FOREACH(dimm_state
, &s
->pending_dimm_unplugs
, next
) {
2846 if (dimm_state
->dimm
== dimm
) {
2853 static void spapr_pending_dimm_unplugs_add(sPAPRMachineState
*spapr
,
2854 sPAPRDIMMState
*dimm_state
)
2856 g_assert(!spapr_pending_dimm_unplugs_find(spapr
, dimm_state
->dimm
));
2857 QTAILQ_INSERT_HEAD(&spapr
->pending_dimm_unplugs
, dimm_state
, next
);
2860 static void spapr_pending_dimm_unplugs_remove(sPAPRMachineState
*spapr
,
2861 sPAPRDIMMState
*dimm_state
)
2863 QTAILQ_REMOVE(&spapr
->pending_dimm_unplugs
, dimm_state
, next
);
2867 static sPAPRDIMMState
*spapr_recover_pending_dimm_state(sPAPRMachineState
*ms
,
2870 sPAPRDRConnector
*drc
;
2871 PCDIMMDeviceClass
*ddc
= PC_DIMM_GET_CLASS(dimm
);
2872 MemoryRegion
*mr
= ddc
->get_memory_region(dimm
);
2873 uint64_t size
= memory_region_size(mr
);
2874 uint32_t nr_lmbs
= size
/ SPAPR_MEMORY_BLOCK_SIZE
;
2875 uint32_t avail_lmbs
= 0;
2876 uint64_t addr_start
, addr
;
2880 addr_start
= object_property_get_int(OBJECT(dimm
), PC_DIMM_ADDR_PROP
,
2884 for (i
= 0; i
< nr_lmbs
; i
++) {
2885 drc
= spapr_drc_by_id(TYPE_SPAPR_DRC_LMB
,
2886 addr
/ SPAPR_MEMORY_BLOCK_SIZE
);
2891 addr
+= SPAPR_MEMORY_BLOCK_SIZE
;
2894 ds
= g_malloc0(sizeof(sPAPRDIMMState
));
2895 ds
->nr_lmbs
= avail_lmbs
;
2897 spapr_pending_dimm_unplugs_add(ms
, ds
);
2901 /* Callback to be called during DRC release. */
2902 void spapr_lmb_release(DeviceState
*dev
)
2904 sPAPRMachineState
*spapr
= SPAPR_MACHINE(qdev_get_hotplug_handler(dev
));
2905 PCDIMMDevice
*dimm
= PC_DIMM(dev
);
2906 PCDIMMDeviceClass
*ddc
= PC_DIMM_GET_CLASS(dimm
);
2907 MemoryRegion
*mr
= ddc
->get_memory_region(dimm
);
2908 sPAPRDIMMState
*ds
= spapr_pending_dimm_unplugs_find(spapr
, PC_DIMM(dev
));
2910 /* This information will get lost if a migration occurs
2911 * during the unplug process. In this case recover it. */
2913 ds
= spapr_recover_pending_dimm_state(spapr
, PC_DIMM(dev
));
2914 /* The DRC being examined by the caller at least must be counted */
2915 g_assert(ds
->nr_lmbs
);
2918 if (--ds
->nr_lmbs
) {
2922 spapr_pending_dimm_unplugs_remove(spapr
, ds
);
2925 * Now that all the LMBs have been removed by the guest, call the
2926 * pc-dimm unplug handler to cleanup up the pc-dimm device.
2928 pc_dimm_memory_unplug(dev
, &spapr
->hotplug_memory
, mr
);
2929 object_unparent(OBJECT(dev
));
2932 static void spapr_memory_unplug_request(HotplugHandler
*hotplug_dev
,
2933 DeviceState
*dev
, Error
**errp
)
2935 sPAPRMachineState
*spapr
= SPAPR_MACHINE(hotplug_dev
);
2936 Error
*local_err
= NULL
;
2937 PCDIMMDevice
*dimm
= PC_DIMM(dev
);
2938 PCDIMMDeviceClass
*ddc
= PC_DIMM_GET_CLASS(dimm
);
2939 MemoryRegion
*mr
= ddc
->get_memory_region(dimm
);
2940 uint64_t size
= memory_region_size(mr
);
2941 uint32_t nr_lmbs
= size
/ SPAPR_MEMORY_BLOCK_SIZE
;
2942 uint64_t addr_start
, addr
;
2944 sPAPRDRConnector
*drc
;
2947 addr_start
= object_property_get_uint(OBJECT(dimm
), PC_DIMM_ADDR_PROP
,
2953 ds
= g_malloc0(sizeof(sPAPRDIMMState
));
2954 ds
->nr_lmbs
= nr_lmbs
;
2956 spapr_pending_dimm_unplugs_add(spapr
, ds
);
2959 for (i
= 0; i
< nr_lmbs
; i
++) {
2960 drc
= spapr_drc_by_id(TYPE_SPAPR_DRC_LMB
,
2961 addr
/ SPAPR_MEMORY_BLOCK_SIZE
);
2964 spapr_drc_detach(drc
);
2965 addr
+= SPAPR_MEMORY_BLOCK_SIZE
;
2968 drc
= spapr_drc_by_id(TYPE_SPAPR_DRC_LMB
,
2969 addr_start
/ SPAPR_MEMORY_BLOCK_SIZE
);
2970 spapr_hotplug_req_remove_by_count_indexed(SPAPR_DR_CONNECTOR_TYPE_LMB
,
2971 nr_lmbs
, spapr_drc_index(drc
));
2973 error_propagate(errp
, local_err
);
2976 static void *spapr_populate_hotplug_cpu_dt(CPUState
*cs
, int *fdt_offset
,
2977 sPAPRMachineState
*spapr
)
2979 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
2980 DeviceClass
*dc
= DEVICE_GET_CLASS(cs
);
2981 int id
= ppc_get_vcpu_dt_id(cpu
);
2983 int offset
, fdt_size
;
2986 fdt
= create_device_tree(&fdt_size
);
2987 nodename
= g_strdup_printf("%s@%x", dc
->fw_name
, id
);
2988 offset
= fdt_add_subnode(fdt
, 0, nodename
);
2990 spapr_populate_cpu_dt(cs
, fdt
, offset
, spapr
);
2993 *fdt_offset
= offset
;
2997 /* Callback to be called during DRC release. */
2998 void spapr_core_release(DeviceState
*dev
)
3000 MachineState
*ms
= MACHINE(qdev_get_hotplug_handler(dev
));
3001 sPAPRMachineClass
*smc
= SPAPR_MACHINE_GET_CLASS(ms
);
3002 CPUCore
*cc
= CPU_CORE(dev
);
3003 CPUArchId
*core_slot
= spapr_find_cpu_slot(ms
, cc
->core_id
, NULL
);
3005 if (smc
->pre_2_10_has_unused_icps
) {
3006 sPAPRCPUCore
*sc
= SPAPR_CPU_CORE(OBJECT(dev
));
3007 sPAPRCPUCoreClass
*scc
= SPAPR_CPU_CORE_GET_CLASS(OBJECT(cc
));
3008 const char *typename
= object_class_get_name(scc
->cpu_class
);
3009 size_t size
= object_type_get_instance_size(typename
);
3012 for (i
= 0; i
< cc
->nr_threads
; i
++) {
3013 CPUState
*cs
= CPU(sc
->threads
+ i
* size
);
3015 pre_2_10_vmstate_register_dummy_icp(cs
->cpu_index
);
3020 core_slot
->cpu
= NULL
;
3021 object_unparent(OBJECT(dev
));
3025 void spapr_core_unplug_request(HotplugHandler
*hotplug_dev
, DeviceState
*dev
,
3029 sPAPRDRConnector
*drc
;
3030 CPUCore
*cc
= CPU_CORE(dev
);
3031 int smt
= kvmppc_smt_threads();
3033 if (!spapr_find_cpu_slot(MACHINE(hotplug_dev
), cc
->core_id
, &index
)) {
3034 error_setg(errp
, "Unable to find CPU core with core-id: %d",
3039 error_setg(errp
, "Boot CPU core may not be unplugged");
3043 drc
= spapr_drc_by_id(TYPE_SPAPR_DRC_CPU
, index
* smt
);
3046 spapr_drc_detach(drc
);
3048 spapr_hotplug_req_remove_by_index(drc
);
3051 static void spapr_core_plug(HotplugHandler
*hotplug_dev
, DeviceState
*dev
,
3054 sPAPRMachineState
*spapr
= SPAPR_MACHINE(OBJECT(hotplug_dev
));
3055 MachineClass
*mc
= MACHINE_GET_CLASS(spapr
);
3056 sPAPRMachineClass
*smc
= SPAPR_MACHINE_CLASS(mc
);
3057 sPAPRCPUCore
*core
= SPAPR_CPU_CORE(OBJECT(dev
));
3058 CPUCore
*cc
= CPU_CORE(dev
);
3059 CPUState
*cs
= CPU(core
->threads
);
3060 sPAPRDRConnector
*drc
;
3061 Error
*local_err
= NULL
;
3062 int smt
= kvmppc_smt_threads();
3063 CPUArchId
*core_slot
;
3065 bool hotplugged
= spapr_drc_hotplugged(dev
);
3067 core_slot
= spapr_find_cpu_slot(MACHINE(hotplug_dev
), cc
->core_id
, &index
);
3069 error_setg(errp
, "Unable to find CPU core with core-id: %d",
3073 drc
= spapr_drc_by_id(TYPE_SPAPR_DRC_CPU
, index
* smt
);
3075 g_assert(drc
|| !mc
->has_hotpluggable_cpus
);
3081 fdt
= spapr_populate_hotplug_cpu_dt(cs
, &fdt_offset
, spapr
);
3083 spapr_drc_attach(drc
, dev
, fdt
, fdt_offset
, &local_err
);
3086 error_propagate(errp
, local_err
);
3092 * Send hotplug notification interrupt to the guest only
3093 * in case of hotplugged CPUs.
3095 spapr_hotplug_req_add_by_index(drc
);
3097 spapr_drc_reset(drc
);
3101 core_slot
->cpu
= OBJECT(dev
);
3103 if (smc
->pre_2_10_has_unused_icps
) {
3104 sPAPRCPUCoreClass
*scc
= SPAPR_CPU_CORE_GET_CLASS(OBJECT(cc
));
3105 const char *typename
= object_class_get_name(scc
->cpu_class
);
3106 size_t size
= object_type_get_instance_size(typename
);
3109 for (i
= 0; i
< cc
->nr_threads
; i
++) {
3110 sPAPRCPUCore
*sc
= SPAPR_CPU_CORE(dev
);
3111 void *obj
= sc
->threads
+ i
* size
;
3114 pre_2_10_vmstate_unregister_dummy_icp(cs
->cpu_index
);
3119 static void spapr_core_pre_plug(HotplugHandler
*hotplug_dev
, DeviceState
*dev
,
3122 MachineState
*machine
= MACHINE(OBJECT(hotplug_dev
));
3123 MachineClass
*mc
= MACHINE_GET_CLASS(hotplug_dev
);
3124 Error
*local_err
= NULL
;
3125 CPUCore
*cc
= CPU_CORE(dev
);
3126 char *base_core_type
= spapr_get_cpu_core_type(machine
->cpu_model
);
3127 const char *type
= object_get_typename(OBJECT(dev
));
3128 CPUArchId
*core_slot
;
3131 if (dev
->hotplugged
&& !mc
->has_hotpluggable_cpus
) {
3132 error_setg(&local_err
, "CPU hotplug not supported for this machine");
3136 if (strcmp(base_core_type
, type
)) {
3137 error_setg(&local_err
, "CPU core type should be %s", base_core_type
);
3141 if (cc
->core_id
% smp_threads
) {
3142 error_setg(&local_err
, "invalid core id %d", cc
->core_id
);
3147 * In general we should have homogeneous threads-per-core, but old
3148 * (pre hotplug support) machine types allow the last core to have
3149 * reduced threads as a compatibility hack for when we allowed
3150 * total vcpus not a multiple of threads-per-core.
3152 if (mc
->has_hotpluggable_cpus
&& (cc
->nr_threads
!= smp_threads
)) {
3153 error_setg(&local_err
, "invalid nr-threads %d, must be %d",
3154 cc
->nr_threads
, smp_threads
);
3158 core_slot
= spapr_find_cpu_slot(MACHINE(hotplug_dev
), cc
->core_id
, &index
);
3160 error_setg(&local_err
, "core id %d out of range", cc
->core_id
);
3164 if (core_slot
->cpu
) {
3165 error_setg(&local_err
, "core %d already populated", cc
->core_id
);
3169 numa_cpu_pre_plug(core_slot
, dev
, &local_err
);
3172 g_free(base_core_type
);
3173 error_propagate(errp
, local_err
);
3176 static void spapr_machine_device_plug(HotplugHandler
*hotplug_dev
,
3177 DeviceState
*dev
, Error
**errp
)
3179 sPAPRMachineClass
*smc
= SPAPR_MACHINE_GET_CLASS(qdev_get_machine());
3181 if (object_dynamic_cast(OBJECT(dev
), TYPE_PC_DIMM
)) {
3184 if (!smc
->dr_lmb_enabled
) {
3185 error_setg(errp
, "Memory hotplug not supported for this machine");
3188 node
= object_property_get_uint(OBJECT(dev
), PC_DIMM_NODE_PROP
, errp
);
3192 if (node
< 0 || node
>= MAX_NODES
) {
3193 error_setg(errp
, "Invaild node %d", node
);
3198 * Currently PowerPC kernel doesn't allow hot-adding memory to
3199 * memory-less node, but instead will silently add the memory
3200 * to the first node that has some memory. This causes two
3201 * unexpected behaviours for the user.
3203 * - Memory gets hotplugged to a different node than what the user
3205 * - Since pc-dimm subsystem in QEMU still thinks that memory belongs
3206 * to memory-less node, a reboot will set things accordingly
3207 * and the previously hotplugged memory now ends in the right node.
3208 * This appears as if some memory moved from one node to another.
3210 * So until kernel starts supporting memory hotplug to memory-less
3211 * nodes, just prevent such attempts upfront in QEMU.
3213 if (nb_numa_nodes
&& !numa_info
[node
].node_mem
) {
3214 error_setg(errp
, "Can't hotplug memory to memory-less node %d",
3219 spapr_memory_plug(hotplug_dev
, dev
, node
, errp
);
3220 } else if (object_dynamic_cast(OBJECT(dev
), TYPE_SPAPR_CPU_CORE
)) {
3221 spapr_core_plug(hotplug_dev
, dev
, errp
);
3225 static void spapr_machine_device_unplug_request(HotplugHandler
*hotplug_dev
,
3226 DeviceState
*dev
, Error
**errp
)
3228 sPAPRMachineState
*sms
= SPAPR_MACHINE(qdev_get_machine());
3229 MachineClass
*mc
= MACHINE_GET_CLASS(qdev_get_machine());
3231 if (object_dynamic_cast(OBJECT(dev
), TYPE_PC_DIMM
)) {
3232 if (spapr_ovec_test(sms
->ov5_cas
, OV5_HP_EVT
)) {
3233 spapr_memory_unplug_request(hotplug_dev
, dev
, errp
);
3235 /* NOTE: this means there is a window after guest reset, prior to
3236 * CAS negotiation, where unplug requests will fail due to the
3237 * capability not being detected yet. This is a bit different than
3238 * the case with PCI unplug, where the events will be queued and
3239 * eventually handled by the guest after boot
3241 error_setg(errp
, "Memory hot unplug not supported for this guest");
3243 } else if (object_dynamic_cast(OBJECT(dev
), TYPE_SPAPR_CPU_CORE
)) {
3244 if (!mc
->has_hotpluggable_cpus
) {
3245 error_setg(errp
, "CPU hot unplug not supported on this machine");
3248 spapr_core_unplug_request(hotplug_dev
, dev
, errp
);
3252 static void spapr_machine_device_pre_plug(HotplugHandler
*hotplug_dev
,
3253 DeviceState
*dev
, Error
**errp
)
3255 if (object_dynamic_cast(OBJECT(dev
), TYPE_PC_DIMM
)) {
3256 spapr_memory_pre_plug(hotplug_dev
, dev
, errp
);
3257 } else if (object_dynamic_cast(OBJECT(dev
), TYPE_SPAPR_CPU_CORE
)) {
3258 spapr_core_pre_plug(hotplug_dev
, dev
, errp
);
3262 static HotplugHandler
*spapr_get_hotplug_handler(MachineState
*machine
,
3265 if (object_dynamic_cast(OBJECT(dev
), TYPE_PC_DIMM
) ||
3266 object_dynamic_cast(OBJECT(dev
), TYPE_SPAPR_CPU_CORE
)) {
3267 return HOTPLUG_HANDLER(machine
);
3272 static CpuInstanceProperties
3273 spapr_cpu_index_to_props(MachineState
*machine
, unsigned cpu_index
)
3275 CPUArchId
*core_slot
;
3276 MachineClass
*mc
= MACHINE_GET_CLASS(machine
);
3278 /* make sure possible_cpu are intialized */
3279 mc
->possible_cpu_arch_ids(machine
);
3280 /* get CPU core slot containing thread that matches cpu_index */
3281 core_slot
= spapr_find_cpu_slot(machine
, cpu_index
, NULL
);
3283 return core_slot
->props
;
3286 static const CPUArchIdList
*spapr_possible_cpu_arch_ids(MachineState
*machine
)
3289 int spapr_max_cores
= max_cpus
/ smp_threads
;
3290 MachineClass
*mc
= MACHINE_GET_CLASS(machine
);
3292 if (!mc
->has_hotpluggable_cpus
) {
3293 spapr_max_cores
= QEMU_ALIGN_UP(smp_cpus
, smp_threads
) / smp_threads
;
3295 if (machine
->possible_cpus
) {
3296 assert(machine
->possible_cpus
->len
== spapr_max_cores
);
3297 return machine
->possible_cpus
;
3300 machine
->possible_cpus
= g_malloc0(sizeof(CPUArchIdList
) +
3301 sizeof(CPUArchId
) * spapr_max_cores
);
3302 machine
->possible_cpus
->len
= spapr_max_cores
;
3303 for (i
= 0; i
< machine
->possible_cpus
->len
; i
++) {
3304 int core_id
= i
* smp_threads
;
3306 machine
->possible_cpus
->cpus
[i
].vcpus_count
= smp_threads
;
3307 machine
->possible_cpus
->cpus
[i
].arch_id
= core_id
;
3308 machine
->possible_cpus
->cpus
[i
].props
.has_core_id
= true;
3309 machine
->possible_cpus
->cpus
[i
].props
.core_id
= core_id
;
3311 /* default distribution of CPUs over NUMA nodes */
3312 if (nb_numa_nodes
) {
3313 /* preset values but do not enable them i.e. 'has_node_id = false',
3314 * numa init code will enable them later if manual mapping wasn't
3316 machine
->possible_cpus
->cpus
[i
].props
.node_id
=
3317 core_id
/ smp_threads
/ smp_cores
% nb_numa_nodes
;
3320 return machine
->possible_cpus
;
3323 static void spapr_phb_placement(sPAPRMachineState
*spapr
, uint32_t index
,
3324 uint64_t *buid
, hwaddr
*pio
,
3325 hwaddr
*mmio32
, hwaddr
*mmio64
,
3326 unsigned n_dma
, uint32_t *liobns
, Error
**errp
)
3329 * New-style PHB window placement.
3331 * Goals: Gives large (1TiB), naturally aligned 64-bit MMIO window
3332 * for each PHB, in addition to 2GiB 32-bit MMIO and 64kiB PIO
3335 * Some guest kernels can't work with MMIO windows above 1<<46
3336 * (64TiB), so we place up to 31 PHBs in the area 32TiB..64TiB
3338 * 32TiB..(33TiB+1984kiB) contains the 64kiB PIO windows for each
3339 * PHB stacked together. (32TiB+2GiB)..(32TiB+64GiB) contains the
3340 * 2GiB 32-bit MMIO windows for each PHB. Then 33..64TiB has the
3341 * 1TiB 64-bit MMIO windows for each PHB.
3343 const uint64_t base_buid
= 0x800000020000000ULL
;
3344 #define SPAPR_MAX_PHBS ((SPAPR_PCI_LIMIT - SPAPR_PCI_BASE) / \
3345 SPAPR_PCI_MEM64_WIN_SIZE - 1)
3348 /* Sanity check natural alignments */
3349 QEMU_BUILD_BUG_ON((SPAPR_PCI_BASE
% SPAPR_PCI_MEM64_WIN_SIZE
) != 0);
3350 QEMU_BUILD_BUG_ON((SPAPR_PCI_LIMIT
% SPAPR_PCI_MEM64_WIN_SIZE
) != 0);
3351 QEMU_BUILD_BUG_ON((SPAPR_PCI_MEM64_WIN_SIZE
% SPAPR_PCI_MEM32_WIN_SIZE
) != 0);
3352 QEMU_BUILD_BUG_ON((SPAPR_PCI_MEM32_WIN_SIZE
% SPAPR_PCI_IO_WIN_SIZE
) != 0);
3353 /* Sanity check bounds */
3354 QEMU_BUILD_BUG_ON((SPAPR_MAX_PHBS
* SPAPR_PCI_IO_WIN_SIZE
) >
3355 SPAPR_PCI_MEM32_WIN_SIZE
);
3356 QEMU_BUILD_BUG_ON((SPAPR_MAX_PHBS
* SPAPR_PCI_MEM32_WIN_SIZE
) >
3357 SPAPR_PCI_MEM64_WIN_SIZE
);
3359 if (index
>= SPAPR_MAX_PHBS
) {
3360 error_setg(errp
, "\"index\" for PAPR PHB is too large (max %llu)",
3361 SPAPR_MAX_PHBS
- 1);
3365 *buid
= base_buid
+ index
;
3366 for (i
= 0; i
< n_dma
; ++i
) {
3367 liobns
[i
] = SPAPR_PCI_LIOBN(index
, i
);
3370 *pio
= SPAPR_PCI_BASE
+ index
* SPAPR_PCI_IO_WIN_SIZE
;
3371 *mmio32
= SPAPR_PCI_BASE
+ (index
+ 1) * SPAPR_PCI_MEM32_WIN_SIZE
;
3372 *mmio64
= SPAPR_PCI_BASE
+ (index
+ 1) * SPAPR_PCI_MEM64_WIN_SIZE
;
3375 static ICSState
*spapr_ics_get(XICSFabric
*dev
, int irq
)
3377 sPAPRMachineState
*spapr
= SPAPR_MACHINE(dev
);
3379 return ics_valid_irq(spapr
->ics
, irq
) ? spapr
->ics
: NULL
;
3382 static void spapr_ics_resend(XICSFabric
*dev
)
3384 sPAPRMachineState
*spapr
= SPAPR_MACHINE(dev
);
3386 ics_resend(spapr
->ics
);
3389 static ICPState
*spapr_icp_get(XICSFabric
*xi
, int cpu_dt_id
)
3391 PowerPCCPU
*cpu
= ppc_get_vcpu_by_dt_id(cpu_dt_id
);
3393 return cpu
? ICP(cpu
->intc
) : NULL
;
3396 static void spapr_pic_print_info(InterruptStatsProvider
*obj
,
3399 sPAPRMachineState
*spapr
= SPAPR_MACHINE(obj
);
3403 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
3405 icp_pic_print_info(ICP(cpu
->intc
), mon
);
3408 ics_pic_print_info(spapr
->ics
, mon
);
3411 static void spapr_machine_class_init(ObjectClass
*oc
, void *data
)
3413 MachineClass
*mc
= MACHINE_CLASS(oc
);
3414 sPAPRMachineClass
*smc
= SPAPR_MACHINE_CLASS(oc
);
3415 FWPathProviderClass
*fwc
= FW_PATH_PROVIDER_CLASS(oc
);
3416 NMIClass
*nc
= NMI_CLASS(oc
);
3417 HotplugHandlerClass
*hc
= HOTPLUG_HANDLER_CLASS(oc
);
3418 PPCVirtualHypervisorClass
*vhc
= PPC_VIRTUAL_HYPERVISOR_CLASS(oc
);
3419 XICSFabricClass
*xic
= XICS_FABRIC_CLASS(oc
);
3420 InterruptStatsProviderClass
*ispc
= INTERRUPT_STATS_PROVIDER_CLASS(oc
);
3422 mc
->desc
= "pSeries Logical Partition (PAPR compliant)";
3425 * We set up the default / latest behaviour here. The class_init
3426 * functions for the specific versioned machine types can override
3427 * these details for backwards compatibility
3429 mc
->init
= ppc_spapr_init
;
3430 mc
->reset
= ppc_spapr_reset
;
3431 mc
->block_default_type
= IF_SCSI
;
3432 mc
->max_cpus
= 1024;
3433 mc
->no_parallel
= 1;
3434 mc
->default_boot_order
= "";
3435 mc
->default_ram_size
= 512 * M_BYTE
;
3436 mc
->kvm_type
= spapr_kvm_type
;
3437 mc
->has_dynamic_sysbus
= true;
3438 mc
->pci_allow_0_address
= true;
3439 mc
->get_hotplug_handler
= spapr_get_hotplug_handler
;
3440 hc
->pre_plug
= spapr_machine_device_pre_plug
;
3441 hc
->plug
= spapr_machine_device_plug
;
3442 mc
->cpu_index_to_instance_props
= spapr_cpu_index_to_props
;
3443 mc
->possible_cpu_arch_ids
= spapr_possible_cpu_arch_ids
;
3444 hc
->unplug_request
= spapr_machine_device_unplug_request
;
3446 smc
->dr_lmb_enabled
= true;
3447 smc
->tcg_default_cpu
= "POWER8";
3448 mc
->has_hotpluggable_cpus
= true;
3449 smc
->resize_hpt_default
= SPAPR_RESIZE_HPT_ENABLED
;
3450 fwc
->get_dev_path
= spapr_get_fw_dev_path
;
3451 nc
->nmi_monitor_handler
= spapr_nmi
;
3452 smc
->phb_placement
= spapr_phb_placement
;
3453 vhc
->hypercall
= emulate_spapr_hypercall
;
3454 vhc
->hpt_mask
= spapr_hpt_mask
;
3455 vhc
->map_hptes
= spapr_map_hptes
;
3456 vhc
->unmap_hptes
= spapr_unmap_hptes
;
3457 vhc
->store_hpte
= spapr_store_hpte
;
3458 vhc
->get_patbe
= spapr_get_patbe
;
3459 xic
->ics_get
= spapr_ics_get
;
3460 xic
->ics_resend
= spapr_ics_resend
;
3461 xic
->icp_get
= spapr_icp_get
;
3462 ispc
->print_info
= spapr_pic_print_info
;
3463 /* Force NUMA node memory size to be a multiple of
3464 * SPAPR_MEMORY_BLOCK_SIZE (256M) since that's the granularity
3465 * in which LMBs are represented and hot-added
3467 mc
->numa_mem_align_shift
= 28;
3470 static const TypeInfo spapr_machine_info
= {
3471 .name
= TYPE_SPAPR_MACHINE
,
3472 .parent
= TYPE_MACHINE
,
3474 .instance_size
= sizeof(sPAPRMachineState
),
3475 .instance_init
= spapr_machine_initfn
,
3476 .instance_finalize
= spapr_machine_finalizefn
,
3477 .class_size
= sizeof(sPAPRMachineClass
),
3478 .class_init
= spapr_machine_class_init
,
3479 .interfaces
= (InterfaceInfo
[]) {
3480 { TYPE_FW_PATH_PROVIDER
},
3482 { TYPE_HOTPLUG_HANDLER
},
3483 { TYPE_PPC_VIRTUAL_HYPERVISOR
},
3484 { TYPE_XICS_FABRIC
},
3485 { TYPE_INTERRUPT_STATS_PROVIDER
},
3490 #define DEFINE_SPAPR_MACHINE(suffix, verstr, latest) \
3491 static void spapr_machine_##suffix##_class_init(ObjectClass *oc, \
3494 MachineClass *mc = MACHINE_CLASS(oc); \
3495 spapr_machine_##suffix##_class_options(mc); \
3497 mc->alias = "pseries"; \
3498 mc->is_default = 1; \
3501 static void spapr_machine_##suffix##_instance_init(Object *obj) \
3503 MachineState *machine = MACHINE(obj); \
3504 spapr_machine_##suffix##_instance_options(machine); \
3506 static const TypeInfo spapr_machine_##suffix##_info = { \
3507 .name = MACHINE_TYPE_NAME("pseries-" verstr), \
3508 .parent = TYPE_SPAPR_MACHINE, \
3509 .class_init = spapr_machine_##suffix##_class_init, \
3510 .instance_init = spapr_machine_##suffix##_instance_init, \
3512 static void spapr_machine_register_##suffix(void) \
3514 type_register(&spapr_machine_##suffix##_info); \
3516 type_init(spapr_machine_register_##suffix)
3521 static void spapr_machine_2_10_instance_options(MachineState
*machine
)
3525 static void spapr_machine_2_10_class_options(MachineClass
*mc
)
3527 /* Defaults for the latest behaviour inherited from the base class */
3530 DEFINE_SPAPR_MACHINE(2_10
, "2.10", true);
3535 #define SPAPR_COMPAT_2_9 \
3538 .driver = TYPE_POWERPC_CPU, \
3539 .property = "pre-2.10-migration", \
3543 static void spapr_machine_2_9_instance_options(MachineState *machine)
3545 spapr_machine_2_10_instance_options(machine
);
3548 static void spapr_machine_2_9_class_options(MachineClass
*mc
)
3550 sPAPRMachineClass
*smc
= SPAPR_MACHINE_CLASS(mc
);
3552 spapr_machine_2_10_class_options(mc
);
3553 SET_MACHINE_COMPAT(mc
, SPAPR_COMPAT_2_9
);
3554 mc
->numa_auto_assign_ram
= numa_legacy_auto_assign_ram
;
3555 smc
->pre_2_10_has_unused_icps
= true;
3556 smc
->resize_hpt_default
= SPAPR_RESIZE_HPT_DISABLED
;
3559 DEFINE_SPAPR_MACHINE(2_9
, "2.9", false);
3564 #define SPAPR_COMPAT_2_8 \
3567 .driver = TYPE_SPAPR_PCI_HOST_BRIDGE, \
3568 .property = "pcie-extended-configuration-space", \
3572 static void spapr_machine_2_8_instance_options(MachineState
*machine
)
3574 spapr_machine_2_9_instance_options(machine
);
3577 static void spapr_machine_2_8_class_options(MachineClass
*mc
)
3579 spapr_machine_2_9_class_options(mc
);
3580 SET_MACHINE_COMPAT(mc
, SPAPR_COMPAT_2_8
);
3581 mc
->numa_mem_align_shift
= 23;
3584 DEFINE_SPAPR_MACHINE(2_8
, "2.8", false);
3589 #define SPAPR_COMPAT_2_7 \
3592 .driver = TYPE_SPAPR_PCI_HOST_BRIDGE, \
3593 .property = "mem_win_size", \
3594 .value = stringify(SPAPR_PCI_2_7_MMIO_WIN_SIZE),\
3597 .driver = TYPE_SPAPR_PCI_HOST_BRIDGE, \
3598 .property = "mem64_win_size", \
3602 .driver = TYPE_POWERPC_CPU, \
3603 .property = "pre-2.8-migration", \
3607 .driver = TYPE_SPAPR_PCI_HOST_BRIDGE, \
3608 .property = "pre-2.8-migration", \
3612 static void phb_placement_2_7(sPAPRMachineState
*spapr
, uint32_t index
,
3613 uint64_t *buid
, hwaddr
*pio
,
3614 hwaddr
*mmio32
, hwaddr
*mmio64
,
3615 unsigned n_dma
, uint32_t *liobns
, Error
**errp
)
3617 /* Legacy PHB placement for pseries-2.7 and earlier machine types */
3618 const uint64_t base_buid
= 0x800000020000000ULL
;
3619 const hwaddr phb_spacing
= 0x1000000000ULL
; /* 64 GiB */
3620 const hwaddr mmio_offset
= 0xa0000000; /* 2 GiB + 512 MiB */
3621 const hwaddr pio_offset
= 0x80000000; /* 2 GiB */
3622 const uint32_t max_index
= 255;
3623 const hwaddr phb0_alignment
= 0x10000000000ULL
; /* 1 TiB */
3625 uint64_t ram_top
= MACHINE(spapr
)->ram_size
;
3626 hwaddr phb0_base
, phb_base
;
3629 /* Do we have hotpluggable memory? */
3630 if (MACHINE(spapr
)->maxram_size
> ram_top
) {
3631 /* Can't just use maxram_size, because there may be an
3632 * alignment gap between normal and hotpluggable memory
3634 ram_top
= spapr
->hotplug_memory
.base
+
3635 memory_region_size(&spapr
->hotplug_memory
.mr
);
3638 phb0_base
= QEMU_ALIGN_UP(ram_top
, phb0_alignment
);
3640 if (index
> max_index
) {
3641 error_setg(errp
, "\"index\" for PAPR PHB is too large (max %u)",
3646 *buid
= base_buid
+ index
;
3647 for (i
= 0; i
< n_dma
; ++i
) {
3648 liobns
[i
] = SPAPR_PCI_LIOBN(index
, i
);
3651 phb_base
= phb0_base
+ index
* phb_spacing
;
3652 *pio
= phb_base
+ pio_offset
;
3653 *mmio32
= phb_base
+ mmio_offset
;
3655 * We don't set the 64-bit MMIO window, relying on the PHB's
3656 * fallback behaviour of automatically splitting a large "32-bit"
3657 * window into contiguous 32-bit and 64-bit windows
3661 static void spapr_machine_2_7_instance_options(MachineState
*machine
)
3663 sPAPRMachineState
*spapr
= SPAPR_MACHINE(machine
);
3665 spapr_machine_2_8_instance_options(machine
);
3666 spapr
->use_hotplug_event_source
= false;
3669 static void spapr_machine_2_7_class_options(MachineClass
*mc
)
3671 sPAPRMachineClass
*smc
= SPAPR_MACHINE_CLASS(mc
);
3673 spapr_machine_2_8_class_options(mc
);
3674 smc
->tcg_default_cpu
= "POWER7";
3675 SET_MACHINE_COMPAT(mc
, SPAPR_COMPAT_2_7
);
3676 smc
->phb_placement
= phb_placement_2_7
;
3679 DEFINE_SPAPR_MACHINE(2_7
, "2.7", false);
3684 #define SPAPR_COMPAT_2_6 \
3687 .driver = TYPE_SPAPR_PCI_HOST_BRIDGE,\
3689 .value = stringify(off),\
3692 static void spapr_machine_2_6_instance_options(MachineState
*machine
)
3694 spapr_machine_2_7_instance_options(machine
);
3697 static void spapr_machine_2_6_class_options(MachineClass
*mc
)
3699 spapr_machine_2_7_class_options(mc
);
3700 mc
->has_hotpluggable_cpus
= false;
3701 SET_MACHINE_COMPAT(mc
, SPAPR_COMPAT_2_6
);
3704 DEFINE_SPAPR_MACHINE(2_6
, "2.6", false);
3709 #define SPAPR_COMPAT_2_5 \
3712 .driver = "spapr-vlan", \
3713 .property = "use-rx-buffer-pools", \
3717 static void spapr_machine_2_5_instance_options(MachineState
*machine
)
3719 spapr_machine_2_6_instance_options(machine
);
3722 static void spapr_machine_2_5_class_options(MachineClass
*mc
)
3724 sPAPRMachineClass
*smc
= SPAPR_MACHINE_CLASS(mc
);
3726 spapr_machine_2_6_class_options(mc
);
3727 smc
->use_ohci_by_default
= true;
3728 SET_MACHINE_COMPAT(mc
, SPAPR_COMPAT_2_5
);
3731 DEFINE_SPAPR_MACHINE(2_5
, "2.5", false);
3736 #define SPAPR_COMPAT_2_4 \
3739 static void spapr_machine_2_4_instance_options(MachineState
*machine
)
3741 spapr_machine_2_5_instance_options(machine
);
3744 static void spapr_machine_2_4_class_options(MachineClass
*mc
)
3746 sPAPRMachineClass
*smc
= SPAPR_MACHINE_CLASS(mc
);
3748 spapr_machine_2_5_class_options(mc
);
3749 smc
->dr_lmb_enabled
= false;
3750 SET_MACHINE_COMPAT(mc
, SPAPR_COMPAT_2_4
);
3753 DEFINE_SPAPR_MACHINE(2_4
, "2.4", false);
3758 #define SPAPR_COMPAT_2_3 \
3761 .driver = "spapr-pci-host-bridge",\
3762 .property = "dynamic-reconfiguration",\
3766 static void spapr_machine_2_3_instance_options(MachineState
*machine
)
3768 spapr_machine_2_4_instance_options(machine
);
3771 static void spapr_machine_2_3_class_options(MachineClass
*mc
)
3773 spapr_machine_2_4_class_options(mc
);
3774 SET_MACHINE_COMPAT(mc
, SPAPR_COMPAT_2_3
);
3776 DEFINE_SPAPR_MACHINE(2_3
, "2.3", false);
3782 #define SPAPR_COMPAT_2_2 \
3785 .driver = TYPE_SPAPR_PCI_HOST_BRIDGE,\
3786 .property = "mem_win_size",\
3787 .value = "0x20000000",\
3790 static void spapr_machine_2_2_instance_options(MachineState
*machine
)
3792 spapr_machine_2_3_instance_options(machine
);
3793 machine
->suppress_vmdesc
= true;
3796 static void spapr_machine_2_2_class_options(MachineClass
*mc
)
3798 spapr_machine_2_3_class_options(mc
);
3799 SET_MACHINE_COMPAT(mc
, SPAPR_COMPAT_2_2
);
3801 DEFINE_SPAPR_MACHINE(2_2
, "2.2", false);
3806 #define SPAPR_COMPAT_2_1 \
3809 static void spapr_machine_2_1_instance_options(MachineState
*machine
)
3811 spapr_machine_2_2_instance_options(machine
);
3814 static void spapr_machine_2_1_class_options(MachineClass
*mc
)
3816 spapr_machine_2_2_class_options(mc
);
3817 SET_MACHINE_COMPAT(mc
, SPAPR_COMPAT_2_1
);
3819 DEFINE_SPAPR_MACHINE(2_1
, "2.1", false);
3821 static void spapr_machine_register_types(void)
3823 type_register_static(&spapr_machine_info
);
3826 type_init(spapr_machine_register_types
)