2 * QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator
4 * Copyright (c) 2004-2007 Fabrice Bellard
5 * Copyright (c) 2007 Jocelyn Mayer
6 * Copyright (c) 2010 David Gibson, IBM Corporation.
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
27 #include "qemu/osdep.h"
28 #include "qapi/error.h"
29 #include "qapi/visitor.h"
30 #include "sysemu/sysemu.h"
31 #include "sysemu/numa.h"
34 #include "hw/fw-path-provider.h"
37 #include "sysemu/device_tree.h"
38 #include "sysemu/block-backend.h"
39 #include "sysemu/cpus.h"
40 #include "sysemu/hw_accel.h"
42 #include "migration/misc.h"
43 #include "migration/global_state.h"
44 #include "migration/register.h"
45 #include "mmu-hash64.h"
46 #include "mmu-book3s-v3.h"
47 #include "cpu-models.h"
50 #include "hw/boards.h"
51 #include "hw/ppc/ppc.h"
52 #include "hw/loader.h"
54 #include "hw/ppc/fdt.h"
55 #include "hw/ppc/spapr.h"
56 #include "hw/ppc/spapr_vio.h"
57 #include "hw/pci-host/spapr.h"
58 #include "hw/ppc/xics.h"
59 #include "hw/pci/msi.h"
61 #include "hw/pci/pci.h"
62 #include "hw/scsi/scsi.h"
63 #include "hw/virtio/virtio-scsi.h"
64 #include "hw/virtio/vhost-scsi-common.h"
66 #include "exec/address-spaces.h"
68 #include "qemu/config-file.h"
69 #include "qemu/error-report.h"
72 #include "hw/intc/intc.h"
74 #include "hw/compat.h"
75 #include "qemu/cutils.h"
76 #include "hw/ppc/spapr_cpu_core.h"
80 /* SLOF memory layout:
82 * SLOF raw image loaded at 0, copies its romfs right below the flat
83 * device-tree, then position SLOF itself 31M below that
85 * So we set FW_OVERHEAD to 40MB which should account for all of that
88 * We load our kernel at 4M, leaving space for SLOF initial image
90 #define FDT_MAX_SIZE 0x100000
91 #define RTAS_MAX_SIZE 0x10000
92 #define RTAS_MAX_ADDR 0x80000000 /* RTAS must stay below that */
93 #define FW_MAX_SIZE 0x400000
94 #define FW_FILE_NAME "slof.bin"
95 #define FW_OVERHEAD 0x2800000
96 #define KERNEL_LOAD_ADDR FW_MAX_SIZE
98 #define MIN_RMA_SLOF 128UL
100 #define PHANDLE_XICP 0x00001111
102 /* These two functions implement the VCPU id numbering: one to compute them
103 * all and one to identify thread 0 of a VCORE. Any change to the first one
104 * is likely to have an impact on the second one, so let's keep them close.
106 static int spapr_vcpu_id(sPAPRMachineState
*spapr
, int cpu_index
)
110 (cpu_index
/ smp_threads
) * spapr
->vsmt
+ cpu_index
% smp_threads
;
112 static bool spapr_is_thread0_in_vcore(sPAPRMachineState
*spapr
,
116 return spapr_get_vcpu_id(cpu
) % spapr
->vsmt
== 0;
119 static ICSState
*spapr_ics_create(sPAPRMachineState
*spapr
,
120 const char *type_ics
,
121 int nr_irqs
, Error
**errp
)
123 Error
*local_err
= NULL
;
126 obj
= object_new(type_ics
);
127 object_property_add_child(OBJECT(spapr
), "ics", obj
, &error_abort
);
128 object_property_add_const_link(obj
, ICS_PROP_XICS
, OBJECT(spapr
),
130 object_property_set_int(obj
, nr_irqs
, "nr-irqs", &local_err
);
134 object_property_set_bool(obj
, true, "realized", &local_err
);
139 return ICS_SIMPLE(obj
);
142 error_propagate(errp
, local_err
);
146 static bool pre_2_10_vmstate_dummy_icp_needed(void *opaque
)
148 /* Dummy entries correspond to unused ICPState objects in older QEMUs,
149 * and newer QEMUs don't even have them. In both cases, we don't want
150 * to send anything on the wire.
155 static const VMStateDescription pre_2_10_vmstate_dummy_icp
= {
156 .name
= "icp/server",
158 .minimum_version_id
= 1,
159 .needed
= pre_2_10_vmstate_dummy_icp_needed
,
160 .fields
= (VMStateField
[]) {
161 VMSTATE_UNUSED(4), /* uint32_t xirr */
162 VMSTATE_UNUSED(1), /* uint8_t pending_priority */
163 VMSTATE_UNUSED(1), /* uint8_t mfrr */
164 VMSTATE_END_OF_LIST()
168 static void pre_2_10_vmstate_register_dummy_icp(int i
)
170 vmstate_register(NULL
, i
, &pre_2_10_vmstate_dummy_icp
,
171 (void *)(uintptr_t) i
);
174 static void pre_2_10_vmstate_unregister_dummy_icp(int i
)
176 vmstate_unregister(NULL
, &pre_2_10_vmstate_dummy_icp
,
177 (void *)(uintptr_t) i
);
180 static int xics_max_server_number(sPAPRMachineState
*spapr
)
183 return DIV_ROUND_UP(max_cpus
* spapr
->vsmt
, smp_threads
);
186 static void xics_system_init(MachineState
*machine
, int nr_irqs
, Error
**errp
)
188 sPAPRMachineState
*spapr
= SPAPR_MACHINE(machine
);
191 if (machine_kernel_irqchip_allowed(machine
) &&
192 !xics_kvm_init(spapr
, errp
)) {
193 spapr
->icp_type
= TYPE_KVM_ICP
;
194 spapr
->ics
= spapr_ics_create(spapr
, TYPE_ICS_KVM
, nr_irqs
, errp
);
196 if (machine_kernel_irqchip_required(machine
) && !spapr
->ics
) {
197 error_prepend(errp
, "kernel_irqchip requested but unavailable: ");
203 xics_spapr_init(spapr
);
204 spapr
->icp_type
= TYPE_ICP
;
205 spapr
->ics
= spapr_ics_create(spapr
, TYPE_ICS_SIMPLE
, nr_irqs
, errp
);
212 static int spapr_fixup_cpu_smt_dt(void *fdt
, int offset
, PowerPCCPU
*cpu
,
216 uint32_t servers_prop
[smt_threads
];
217 uint32_t gservers_prop
[smt_threads
* 2];
218 int index
= spapr_get_vcpu_id(cpu
);
220 if (cpu
->compat_pvr
) {
221 ret
= fdt_setprop_cell(fdt
, offset
, "cpu-version", cpu
->compat_pvr
);
227 /* Build interrupt servers and gservers properties */
228 for (i
= 0; i
< smt_threads
; i
++) {
229 servers_prop
[i
] = cpu_to_be32(index
+ i
);
230 /* Hack, direct the group queues back to cpu 0 */
231 gservers_prop
[i
*2] = cpu_to_be32(index
+ i
);
232 gservers_prop
[i
*2 + 1] = 0;
234 ret
= fdt_setprop(fdt
, offset
, "ibm,ppc-interrupt-server#s",
235 servers_prop
, sizeof(servers_prop
));
239 ret
= fdt_setprop(fdt
, offset
, "ibm,ppc-interrupt-gserver#s",
240 gservers_prop
, sizeof(gservers_prop
));
245 static int spapr_fixup_cpu_numa_dt(void *fdt
, int offset
, PowerPCCPU
*cpu
)
247 int index
= spapr_get_vcpu_id(cpu
);
248 uint32_t associativity
[] = {cpu_to_be32(0x5),
252 cpu_to_be32(cpu
->node_id
),
255 /* Advertise NUMA via ibm,associativity */
256 return fdt_setprop(fdt
, offset
, "ibm,associativity", associativity
,
257 sizeof(associativity
));
260 /* Populate the "ibm,pa-features" property */
261 static void spapr_populate_pa_features(sPAPRMachineState
*spapr
,
263 void *fdt
, int offset
,
266 uint8_t pa_features_206
[] = { 6, 0,
267 0xf6, 0x1f, 0xc7, 0x00, 0x80, 0xc0 };
268 uint8_t pa_features_207
[] = { 24, 0,
269 0xf6, 0x1f, 0xc7, 0xc0, 0x80, 0xf0,
270 0x80, 0x00, 0x00, 0x00, 0x00, 0x00,
271 0x00, 0x00, 0x00, 0x00, 0x80, 0x00,
272 0x80, 0x00, 0x80, 0x00, 0x00, 0x00 };
273 uint8_t pa_features_300
[] = { 66, 0,
274 /* 0: MMU|FPU|SLB|RUN|DABR|NX, 1: fri[nzpm]|DABRX|SPRG3|SLB0|PP110 */
275 /* 2: VPM|DS205|PPR|DS202|DS206, 3: LSD|URG, SSO, 5: LE|CFAR|EB|LSQ */
276 0xf6, 0x1f, 0xc7, 0xc0, 0x80, 0xf0, /* 0 - 5 */
278 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, /* 6 - 11 */
280 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, /* 12 - 17 */
281 /* 18: Vec. Scalar, 20: Vec. XOR, 22: HTM */
282 0x80, 0x00, 0x80, 0x00, 0x00, 0x00, /* 18 - 23 */
283 /* 24: Ext. Dec, 26: 64 bit ftrs, 28: PM ftrs */
284 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 24 - 29 */
285 /* 30: MMR, 32: LE atomic, 34: EBB + ext EBB */
286 0x80, 0x00, 0x80, 0x00, 0xC0, 0x00, /* 30 - 35 */
287 /* 36: SPR SO, 38: Copy/Paste, 40: Radix MMU */
288 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 36 - 41 */
289 /* 42: PM, 44: PC RA, 46: SC vec'd */
290 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 42 - 47 */
291 /* 48: SIMD, 50: QP BFP, 52: String */
292 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 48 - 53 */
293 /* 54: DecFP, 56: DecI, 58: SHA */
294 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 54 - 59 */
295 /* 60: NM atomic, 62: RNG */
296 0x80, 0x00, 0x80, 0x00, 0x00, 0x00, /* 60 - 65 */
298 uint8_t *pa_features
= NULL
;
301 if (ppc_check_compat(cpu
, CPU_POWERPC_LOGICAL_2_06
, 0, cpu
->compat_pvr
)) {
302 pa_features
= pa_features_206
;
303 pa_size
= sizeof(pa_features_206
);
305 if (ppc_check_compat(cpu
, CPU_POWERPC_LOGICAL_2_07
, 0, cpu
->compat_pvr
)) {
306 pa_features
= pa_features_207
;
307 pa_size
= sizeof(pa_features_207
);
309 if (ppc_check_compat(cpu
, CPU_POWERPC_LOGICAL_3_00
, 0, cpu
->compat_pvr
)) {
310 pa_features
= pa_features_300
;
311 pa_size
= sizeof(pa_features_300
);
317 if (ppc_hash64_has(cpu
, PPC_HASH64_CI_LARGEPAGE
)) {
319 * Note: we keep CI large pages off by default because a 64K capable
320 * guest provisioned with large pages might otherwise try to map a qemu
321 * framebuffer (or other kind of memory mapped PCI BAR) using 64K pages
322 * even if that qemu runs on a 4k host.
323 * We dd this bit back here if we are confident this is not an issue
325 pa_features
[3] |= 0x20;
327 if ((spapr_get_cap(spapr
, SPAPR_CAP_HTM
) != 0) && pa_size
> 24) {
328 pa_features
[24] |= 0x80; /* Transactional memory support */
330 if (legacy_guest
&& pa_size
> 40) {
331 /* Workaround for broken kernels that attempt (guest) radix
332 * mode when they can't handle it, if they see the radix bit set
333 * in pa-features. So hide it from them. */
334 pa_features
[40 + 2] &= ~0x80; /* Radix MMU */
337 _FDT((fdt_setprop(fdt
, offset
, "ibm,pa-features", pa_features
, pa_size
)));
340 static int spapr_fixup_cpu_dt(void *fdt
, sPAPRMachineState
*spapr
)
342 int ret
= 0, offset
, cpus_offset
;
345 uint32_t pft_size_prop
[] = {0, cpu_to_be32(spapr
->htab_shift
)};
348 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
349 DeviceClass
*dc
= DEVICE_GET_CLASS(cs
);
350 int index
= spapr_get_vcpu_id(cpu
);
351 int compat_smt
= MIN(smp_threads
, ppc_compat_max_vthreads(cpu
));
353 if (!spapr_is_thread0_in_vcore(spapr
, cpu
)) {
357 snprintf(cpu_model
, 32, "%s@%x", dc
->fw_name
, index
);
359 cpus_offset
= fdt_path_offset(fdt
, "/cpus");
360 if (cpus_offset
< 0) {
361 cpus_offset
= fdt_add_subnode(fdt
, 0, "cpus");
362 if (cpus_offset
< 0) {
366 offset
= fdt_subnode_offset(fdt
, cpus_offset
, cpu_model
);
368 offset
= fdt_add_subnode(fdt
, cpus_offset
, cpu_model
);
374 ret
= fdt_setprop(fdt
, offset
, "ibm,pft-size",
375 pft_size_prop
, sizeof(pft_size_prop
));
380 if (nb_numa_nodes
> 1) {
381 ret
= spapr_fixup_cpu_numa_dt(fdt
, offset
, cpu
);
387 ret
= spapr_fixup_cpu_smt_dt(fdt
, offset
, cpu
, compat_smt
);
392 spapr_populate_pa_features(spapr
, cpu
, fdt
, offset
,
393 spapr
->cas_legacy_guest_workaround
);
398 static hwaddr
spapr_node0_size(MachineState
*machine
)
402 for (i
= 0; i
< nb_numa_nodes
; ++i
) {
403 if (numa_info
[i
].node_mem
) {
404 return MIN(pow2floor(numa_info
[i
].node_mem
),
409 return machine
->ram_size
;
412 static void add_str(GString
*s
, const gchar
*s1
)
414 g_string_append_len(s
, s1
, strlen(s1
) + 1);
417 static int spapr_populate_memory_node(void *fdt
, int nodeid
, hwaddr start
,
420 uint32_t associativity
[] = {
421 cpu_to_be32(0x4), /* length */
422 cpu_to_be32(0x0), cpu_to_be32(0x0),
423 cpu_to_be32(0x0), cpu_to_be32(nodeid
)
426 uint64_t mem_reg_property
[2];
429 mem_reg_property
[0] = cpu_to_be64(start
);
430 mem_reg_property
[1] = cpu_to_be64(size
);
432 sprintf(mem_name
, "memory@" TARGET_FMT_lx
, start
);
433 off
= fdt_add_subnode(fdt
, 0, mem_name
);
435 _FDT((fdt_setprop_string(fdt
, off
, "device_type", "memory")));
436 _FDT((fdt_setprop(fdt
, off
, "reg", mem_reg_property
,
437 sizeof(mem_reg_property
))));
438 _FDT((fdt_setprop(fdt
, off
, "ibm,associativity", associativity
,
439 sizeof(associativity
))));
443 static int spapr_populate_memory(sPAPRMachineState
*spapr
, void *fdt
)
445 MachineState
*machine
= MACHINE(spapr
);
446 hwaddr mem_start
, node_size
;
447 int i
, nb_nodes
= nb_numa_nodes
;
448 NodeInfo
*nodes
= numa_info
;
451 /* No NUMA nodes, assume there is just one node with whole RAM */
452 if (!nb_numa_nodes
) {
454 ramnode
.node_mem
= machine
->ram_size
;
458 for (i
= 0, mem_start
= 0; i
< nb_nodes
; ++i
) {
459 if (!nodes
[i
].node_mem
) {
462 if (mem_start
>= machine
->ram_size
) {
465 node_size
= nodes
[i
].node_mem
;
466 if (node_size
> machine
->ram_size
- mem_start
) {
467 node_size
= machine
->ram_size
- mem_start
;
471 /* spapr_machine_init() checks for rma_size <= node0_size
473 spapr_populate_memory_node(fdt
, i
, 0, spapr
->rma_size
);
474 mem_start
+= spapr
->rma_size
;
475 node_size
-= spapr
->rma_size
;
477 for ( ; node_size
; ) {
478 hwaddr sizetmp
= pow2floor(node_size
);
480 /* mem_start != 0 here */
481 if (ctzl(mem_start
) < ctzl(sizetmp
)) {
482 sizetmp
= 1ULL << ctzl(mem_start
);
485 spapr_populate_memory_node(fdt
, i
, mem_start
, sizetmp
);
486 node_size
-= sizetmp
;
487 mem_start
+= sizetmp
;
494 static void spapr_populate_cpu_dt(CPUState
*cs
, void *fdt
, int offset
,
495 sPAPRMachineState
*spapr
)
497 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
498 CPUPPCState
*env
= &cpu
->env
;
499 PowerPCCPUClass
*pcc
= POWERPC_CPU_GET_CLASS(cs
);
500 int index
= spapr_get_vcpu_id(cpu
);
501 uint32_t segs
[] = {cpu_to_be32(28), cpu_to_be32(40),
502 0xffffffff, 0xffffffff};
503 uint32_t tbfreq
= kvm_enabled() ? kvmppc_get_tbfreq()
504 : SPAPR_TIMEBASE_FREQ
;
505 uint32_t cpufreq
= kvm_enabled() ? kvmppc_get_clockfreq() : 1000000000;
506 uint32_t page_sizes_prop
[64];
507 size_t page_sizes_prop_size
;
508 uint32_t vcpus_per_socket
= smp_threads
* smp_cores
;
509 uint32_t pft_size_prop
[] = {0, cpu_to_be32(spapr
->htab_shift
)};
510 int compat_smt
= MIN(smp_threads
, ppc_compat_max_vthreads(cpu
));
511 sPAPRDRConnector
*drc
;
513 uint32_t radix_AP_encodings
[PPC_PAGE_SIZES_MAX_SZ
];
516 drc
= spapr_drc_by_id(TYPE_SPAPR_DRC_CPU
, index
);
518 drc_index
= spapr_drc_index(drc
);
519 _FDT((fdt_setprop_cell(fdt
, offset
, "ibm,my-drc-index", drc_index
)));
522 _FDT((fdt_setprop_cell(fdt
, offset
, "reg", index
)));
523 _FDT((fdt_setprop_string(fdt
, offset
, "device_type", "cpu")));
525 _FDT((fdt_setprop_cell(fdt
, offset
, "cpu-version", env
->spr
[SPR_PVR
])));
526 _FDT((fdt_setprop_cell(fdt
, offset
, "d-cache-block-size",
527 env
->dcache_line_size
)));
528 _FDT((fdt_setprop_cell(fdt
, offset
, "d-cache-line-size",
529 env
->dcache_line_size
)));
530 _FDT((fdt_setprop_cell(fdt
, offset
, "i-cache-block-size",
531 env
->icache_line_size
)));
532 _FDT((fdt_setprop_cell(fdt
, offset
, "i-cache-line-size",
533 env
->icache_line_size
)));
535 if (pcc
->l1_dcache_size
) {
536 _FDT((fdt_setprop_cell(fdt
, offset
, "d-cache-size",
537 pcc
->l1_dcache_size
)));
539 warn_report("Unknown L1 dcache size for cpu");
541 if (pcc
->l1_icache_size
) {
542 _FDT((fdt_setprop_cell(fdt
, offset
, "i-cache-size",
543 pcc
->l1_icache_size
)));
545 warn_report("Unknown L1 icache size for cpu");
548 _FDT((fdt_setprop_cell(fdt
, offset
, "timebase-frequency", tbfreq
)));
549 _FDT((fdt_setprop_cell(fdt
, offset
, "clock-frequency", cpufreq
)));
550 _FDT((fdt_setprop_cell(fdt
, offset
, "slb-size", env
->slb_nr
)));
551 _FDT((fdt_setprop_cell(fdt
, offset
, "ibm,slb-size", env
->slb_nr
)));
552 _FDT((fdt_setprop_string(fdt
, offset
, "status", "okay")));
553 _FDT((fdt_setprop(fdt
, offset
, "64-bit", NULL
, 0)));
555 if (env
->spr_cb
[SPR_PURR
].oea_read
) {
556 _FDT((fdt_setprop(fdt
, offset
, "ibm,purr", NULL
, 0)));
559 if (ppc_hash64_has(cpu
, PPC_HASH64_1TSEG
)) {
560 _FDT((fdt_setprop(fdt
, offset
, "ibm,processor-segment-sizes",
561 segs
, sizeof(segs
))));
564 /* Advertise VSX (vector extensions) if available
565 * 1 == VMX / Altivec available
568 * Only CPUs for which we create core types in spapr_cpu_core.c
569 * are possible, and all of those have VMX */
570 if (spapr_get_cap(spapr
, SPAPR_CAP_VSX
) != 0) {
571 _FDT((fdt_setprop_cell(fdt
, offset
, "ibm,vmx", 2)));
573 _FDT((fdt_setprop_cell(fdt
, offset
, "ibm,vmx", 1)));
576 /* Advertise DFP (Decimal Floating Point) if available
577 * 0 / no property == no DFP
578 * 1 == DFP available */
579 if (spapr_get_cap(spapr
, SPAPR_CAP_DFP
) != 0) {
580 _FDT((fdt_setprop_cell(fdt
, offset
, "ibm,dfp", 1)));
583 page_sizes_prop_size
= ppc_create_page_sizes_prop(cpu
, page_sizes_prop
,
584 sizeof(page_sizes_prop
));
585 if (page_sizes_prop_size
) {
586 _FDT((fdt_setprop(fdt
, offset
, "ibm,segment-page-sizes",
587 page_sizes_prop
, page_sizes_prop_size
)));
590 spapr_populate_pa_features(spapr
, cpu
, fdt
, offset
, false);
592 _FDT((fdt_setprop_cell(fdt
, offset
, "ibm,chip-id",
593 cs
->cpu_index
/ vcpus_per_socket
)));
595 _FDT((fdt_setprop(fdt
, offset
, "ibm,pft-size",
596 pft_size_prop
, sizeof(pft_size_prop
))));
598 if (nb_numa_nodes
> 1) {
599 _FDT(spapr_fixup_cpu_numa_dt(fdt
, offset
, cpu
));
602 _FDT(spapr_fixup_cpu_smt_dt(fdt
, offset
, cpu
, compat_smt
));
604 if (pcc
->radix_page_info
) {
605 for (i
= 0; i
< pcc
->radix_page_info
->count
; i
++) {
606 radix_AP_encodings
[i
] =
607 cpu_to_be32(pcc
->radix_page_info
->entries
[i
]);
609 _FDT((fdt_setprop(fdt
, offset
, "ibm,processor-radix-AP-encodings",
611 pcc
->radix_page_info
->count
*
612 sizeof(radix_AP_encodings
[0]))));
616 static void spapr_populate_cpus_dt_node(void *fdt
, sPAPRMachineState
*spapr
)
622 cpus_offset
= fdt_add_subnode(fdt
, 0, "cpus");
624 _FDT((fdt_setprop_cell(fdt
, cpus_offset
, "#address-cells", 0x1)));
625 _FDT((fdt_setprop_cell(fdt
, cpus_offset
, "#size-cells", 0x0)));
628 * We walk the CPUs in reverse order to ensure that CPU DT nodes
629 * created by fdt_add_subnode() end up in the right order in FDT
630 * for the guest kernel the enumerate the CPUs correctly.
632 CPU_FOREACH_REVERSE(cs
) {
633 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
634 int index
= spapr_get_vcpu_id(cpu
);
635 DeviceClass
*dc
= DEVICE_GET_CLASS(cs
);
638 if (!spapr_is_thread0_in_vcore(spapr
, cpu
)) {
642 nodename
= g_strdup_printf("%s@%x", dc
->fw_name
, index
);
643 offset
= fdt_add_subnode(fdt
, cpus_offset
, nodename
);
646 spapr_populate_cpu_dt(cs
, fdt
, offset
, spapr
);
651 static uint32_t spapr_pc_dimm_node(MemoryDeviceInfoList
*list
, ram_addr_t addr
)
653 MemoryDeviceInfoList
*info
;
655 for (info
= list
; info
; info
= info
->next
) {
656 MemoryDeviceInfo
*value
= info
->value
;
658 if (value
&& value
->type
== MEMORY_DEVICE_INFO_KIND_DIMM
) {
659 PCDIMMDeviceInfo
*pcdimm_info
= value
->u
.dimm
.data
;
661 if (pcdimm_info
->addr
>= addr
&&
662 addr
< (pcdimm_info
->addr
+ pcdimm_info
->size
)) {
663 return pcdimm_info
->node
;
672 * Adds ibm,dynamic-reconfiguration-memory node.
673 * Refer to docs/specs/ppc-spapr-hotplug.txt for the documentation
674 * of this device tree node.
676 static int spapr_populate_drconf_memory(sPAPRMachineState
*spapr
, void *fdt
)
678 MachineState
*machine
= MACHINE(spapr
);
680 uint64_t lmb_size
= SPAPR_MEMORY_BLOCK_SIZE
;
681 uint32_t prop_lmb_size
[] = {0, cpu_to_be32(lmb_size
)};
682 uint32_t hotplug_lmb_start
= spapr
->hotplug_memory
.base
/ lmb_size
;
683 uint32_t nr_lmbs
= (spapr
->hotplug_memory
.base
+
684 memory_region_size(&spapr
->hotplug_memory
.mr
)) /
686 uint32_t *int_buf
, *cur_index
, buf_len
;
687 int nr_nodes
= nb_numa_nodes
? nb_numa_nodes
: 1;
688 MemoryDeviceInfoList
*dimms
= NULL
;
691 * Don't create the node if there is no hotpluggable memory
693 if (machine
->ram_size
== machine
->maxram_size
) {
698 * Allocate enough buffer size to fit in ibm,dynamic-memory
699 * or ibm,associativity-lookup-arrays
701 buf_len
= MAX(nr_lmbs
* SPAPR_DR_LMB_LIST_ENTRY_SIZE
+ 1, nr_nodes
* 4 + 2)
703 cur_index
= int_buf
= g_malloc0(buf_len
);
705 offset
= fdt_add_subnode(fdt
, 0, "ibm,dynamic-reconfiguration-memory");
707 ret
= fdt_setprop(fdt
, offset
, "ibm,lmb-size", prop_lmb_size
,
708 sizeof(prop_lmb_size
));
713 ret
= fdt_setprop_cell(fdt
, offset
, "ibm,memory-flags-mask", 0xff);
718 ret
= fdt_setprop_cell(fdt
, offset
, "ibm,memory-preservation-time", 0x0);
723 if (hotplug_lmb_start
) {
724 dimms
= qmp_pc_dimm_device_list();
727 /* ibm,dynamic-memory */
728 int_buf
[0] = cpu_to_be32(nr_lmbs
);
730 for (i
= 0; i
< nr_lmbs
; i
++) {
731 uint64_t addr
= i
* lmb_size
;
732 uint32_t *dynamic_memory
= cur_index
;
734 if (i
>= hotplug_lmb_start
) {
735 sPAPRDRConnector
*drc
;
737 drc
= spapr_drc_by_id(TYPE_SPAPR_DRC_LMB
, i
);
740 dynamic_memory
[0] = cpu_to_be32(addr
>> 32);
741 dynamic_memory
[1] = cpu_to_be32(addr
& 0xffffffff);
742 dynamic_memory
[2] = cpu_to_be32(spapr_drc_index(drc
));
743 dynamic_memory
[3] = cpu_to_be32(0); /* reserved */
744 dynamic_memory
[4] = cpu_to_be32(spapr_pc_dimm_node(dimms
, addr
));
745 if (memory_region_present(get_system_memory(), addr
)) {
746 dynamic_memory
[5] = cpu_to_be32(SPAPR_LMB_FLAGS_ASSIGNED
);
748 dynamic_memory
[5] = cpu_to_be32(0);
752 * LMB information for RMA, boot time RAM and gap b/n RAM and
753 * hotplug memory region -- all these are marked as reserved
754 * and as having no valid DRC.
756 dynamic_memory
[0] = cpu_to_be32(addr
>> 32);
757 dynamic_memory
[1] = cpu_to_be32(addr
& 0xffffffff);
758 dynamic_memory
[2] = cpu_to_be32(0);
759 dynamic_memory
[3] = cpu_to_be32(0); /* reserved */
760 dynamic_memory
[4] = cpu_to_be32(-1);
761 dynamic_memory
[5] = cpu_to_be32(SPAPR_LMB_FLAGS_RESERVED
|
762 SPAPR_LMB_FLAGS_DRC_INVALID
);
765 cur_index
+= SPAPR_DR_LMB_LIST_ENTRY_SIZE
;
767 qapi_free_MemoryDeviceInfoList(dimms
);
768 ret
= fdt_setprop(fdt
, offset
, "ibm,dynamic-memory", int_buf
, buf_len
);
773 /* ibm,associativity-lookup-arrays */
775 int_buf
[0] = cpu_to_be32(nr_nodes
);
776 int_buf
[1] = cpu_to_be32(4); /* Number of entries per associativity list */
778 for (i
= 0; i
< nr_nodes
; i
++) {
779 uint32_t associativity
[] = {
785 memcpy(cur_index
, associativity
, sizeof(associativity
));
788 ret
= fdt_setprop(fdt
, offset
, "ibm,associativity-lookup-arrays", int_buf
,
789 (cur_index
- int_buf
) * sizeof(uint32_t));
795 static int spapr_dt_cas_updates(sPAPRMachineState
*spapr
, void *fdt
,
796 sPAPROptionVector
*ov5_updates
)
798 sPAPRMachineClass
*smc
= SPAPR_MACHINE_GET_CLASS(spapr
);
801 /* Generate ibm,dynamic-reconfiguration-memory node if required */
802 if (spapr_ovec_test(ov5_updates
, OV5_DRCONF_MEMORY
)) {
803 g_assert(smc
->dr_lmb_enabled
);
804 ret
= spapr_populate_drconf_memory(spapr
, fdt
);
810 offset
= fdt_path_offset(fdt
, "/chosen");
812 offset
= fdt_add_subnode(fdt
, 0, "chosen");
817 ret
= spapr_ovec_populate_dt(fdt
, offset
, spapr
->ov5_cas
,
818 "ibm,architecture-vec-5");
824 static bool spapr_hotplugged_dev_before_cas(void)
826 Object
*drc_container
, *obj
;
827 ObjectProperty
*prop
;
828 ObjectPropertyIterator iter
;
830 drc_container
= container_get(object_get_root(), "/dr-connector");
831 object_property_iter_init(&iter
, drc_container
);
832 while ((prop
= object_property_iter_next(&iter
))) {
833 if (!strstart(prop
->type
, "link<", NULL
)) {
836 obj
= object_property_get_link(drc_container
, prop
->name
, NULL
);
837 if (spapr_drc_needed(obj
)) {
844 int spapr_h_cas_compose_response(sPAPRMachineState
*spapr
,
845 target_ulong addr
, target_ulong size
,
846 sPAPROptionVector
*ov5_updates
)
848 void *fdt
, *fdt_skel
;
849 sPAPRDeviceTreeUpdateHeader hdr
= { .version_id
= 1 };
851 if (spapr_hotplugged_dev_before_cas()) {
855 if (size
< sizeof(hdr
) || size
> FW_MAX_SIZE
) {
856 error_report("SLOF provided an unexpected CAS buffer size "
857 TARGET_FMT_lu
" (min: %zu, max: %u)",
858 size
, sizeof(hdr
), FW_MAX_SIZE
);
864 /* Create skeleton */
865 fdt_skel
= g_malloc0(size
);
866 _FDT((fdt_create(fdt_skel
, size
)));
867 _FDT((fdt_finish_reservemap(fdt_skel
)));
868 _FDT((fdt_begin_node(fdt_skel
, "")));
869 _FDT((fdt_end_node(fdt_skel
)));
870 _FDT((fdt_finish(fdt_skel
)));
871 fdt
= g_malloc0(size
);
872 _FDT((fdt_open_into(fdt_skel
, fdt
, size
)));
875 /* Fixup cpu nodes */
876 _FDT((spapr_fixup_cpu_dt(fdt
, spapr
)));
878 if (spapr_dt_cas_updates(spapr
, fdt
, ov5_updates
)) {
882 /* Pack resulting tree */
883 _FDT((fdt_pack(fdt
)));
885 if (fdt_totalsize(fdt
) + sizeof(hdr
) > size
) {
886 trace_spapr_cas_failed(size
);
890 cpu_physical_memory_write(addr
, &hdr
, sizeof(hdr
));
891 cpu_physical_memory_write(addr
+ sizeof(hdr
), fdt
, fdt_totalsize(fdt
));
892 trace_spapr_cas_continue(fdt_totalsize(fdt
) + sizeof(hdr
));
898 static void spapr_dt_rtas(sPAPRMachineState
*spapr
, void *fdt
)
901 GString
*hypertas
= g_string_sized_new(256);
902 GString
*qemu_hypertas
= g_string_sized_new(256);
903 uint32_t refpoints
[] = { cpu_to_be32(0x4), cpu_to_be32(0x4) };
904 uint64_t max_hotplug_addr
= spapr
->hotplug_memory
.base
+
905 memory_region_size(&spapr
->hotplug_memory
.mr
);
906 uint32_t lrdr_capacity
[] = {
907 cpu_to_be32(max_hotplug_addr
>> 32),
908 cpu_to_be32(max_hotplug_addr
& 0xffffffff),
909 0, cpu_to_be32(SPAPR_MEMORY_BLOCK_SIZE
),
910 cpu_to_be32(max_cpus
/ smp_threads
),
913 _FDT(rtas
= fdt_add_subnode(fdt
, 0, "rtas"));
916 add_str(hypertas
, "hcall-pft");
917 add_str(hypertas
, "hcall-term");
918 add_str(hypertas
, "hcall-dabr");
919 add_str(hypertas
, "hcall-interrupt");
920 add_str(hypertas
, "hcall-tce");
921 add_str(hypertas
, "hcall-vio");
922 add_str(hypertas
, "hcall-splpar");
923 add_str(hypertas
, "hcall-bulk");
924 add_str(hypertas
, "hcall-set-mode");
925 add_str(hypertas
, "hcall-sprg0");
926 add_str(hypertas
, "hcall-copy");
927 add_str(hypertas
, "hcall-debug");
928 add_str(qemu_hypertas
, "hcall-memop1");
930 if (!kvm_enabled() || kvmppc_spapr_use_multitce()) {
931 add_str(hypertas
, "hcall-multi-tce");
934 if (spapr
->resize_hpt
!= SPAPR_RESIZE_HPT_DISABLED
) {
935 add_str(hypertas
, "hcall-hpt-resize");
938 _FDT(fdt_setprop(fdt
, rtas
, "ibm,hypertas-functions",
939 hypertas
->str
, hypertas
->len
));
940 g_string_free(hypertas
, TRUE
);
941 _FDT(fdt_setprop(fdt
, rtas
, "qemu,hypertas-functions",
942 qemu_hypertas
->str
, qemu_hypertas
->len
));
943 g_string_free(qemu_hypertas
, TRUE
);
945 _FDT(fdt_setprop(fdt
, rtas
, "ibm,associativity-reference-points",
946 refpoints
, sizeof(refpoints
)));
948 _FDT(fdt_setprop_cell(fdt
, rtas
, "rtas-error-log-max",
949 RTAS_ERROR_LOG_MAX
));
950 _FDT(fdt_setprop_cell(fdt
, rtas
, "rtas-event-scan-rate",
951 RTAS_EVENT_SCAN_RATE
));
953 g_assert(msi_nonbroken
);
954 _FDT(fdt_setprop(fdt
, rtas
, "ibm,change-msix-capable", NULL
, 0));
957 * According to PAPR, rtas ibm,os-term does not guarantee a return
958 * back to the guest cpu.
960 * While an additional ibm,extended-os-term property indicates
961 * that rtas call return will always occur. Set this property.
963 _FDT(fdt_setprop(fdt
, rtas
, "ibm,extended-os-term", NULL
, 0));
965 _FDT(fdt_setprop(fdt
, rtas
, "ibm,lrdr-capacity",
966 lrdr_capacity
, sizeof(lrdr_capacity
)));
968 spapr_dt_rtas_tokens(fdt
, rtas
);
971 /* Prepare ibm,arch-vec-5-platform-support, which indicates the MMU features
972 * that the guest may request and thus the valid values for bytes 24..26 of
973 * option vector 5: */
974 static void spapr_dt_ov5_platform_support(void *fdt
, int chosen
)
976 PowerPCCPU
*first_ppc_cpu
= POWERPC_CPU(first_cpu
);
979 23, 0x00, /* Xive mode, filled in below. */
980 24, 0x00, /* Hash/Radix, filled in below. */
981 25, 0x00, /* Hash options: Segment Tables == no, GTSE == no. */
982 26, 0x40, /* Radix options: GTSE == yes. */
985 if (!ppc_check_compat(first_ppc_cpu
, CPU_POWERPC_LOGICAL_3_00
, 0,
986 first_ppc_cpu
->compat_pvr
)) {
987 /* If we're in a pre POWER9 compat mode then the guest should do hash */
988 val
[3] = 0x00; /* Hash */
989 } else if (kvm_enabled()) {
990 if (kvmppc_has_cap_mmu_radix() && kvmppc_has_cap_mmu_hash_v3()) {
991 val
[3] = 0x80; /* OV5_MMU_BOTH */
992 } else if (kvmppc_has_cap_mmu_radix()) {
993 val
[3] = 0x40; /* OV5_MMU_RADIX_300 */
995 val
[3] = 0x00; /* Hash */
998 /* V3 MMU supports both hash and radix in tcg (with dynamic switching) */
1001 _FDT(fdt_setprop(fdt
, chosen
, "ibm,arch-vec-5-platform-support",
1005 static void spapr_dt_chosen(sPAPRMachineState
*spapr
, void *fdt
)
1007 MachineState
*machine
= MACHINE(spapr
);
1009 const char *boot_device
= machine
->boot_order
;
1010 char *stdout_path
= spapr_vio_stdout_path(spapr
->vio_bus
);
1012 char *bootlist
= get_boot_devices_list(&cb
, true);
1014 _FDT(chosen
= fdt_add_subnode(fdt
, 0, "chosen"));
1016 _FDT(fdt_setprop_string(fdt
, chosen
, "bootargs", machine
->kernel_cmdline
));
1017 _FDT(fdt_setprop_cell(fdt
, chosen
, "linux,initrd-start",
1018 spapr
->initrd_base
));
1019 _FDT(fdt_setprop_cell(fdt
, chosen
, "linux,initrd-end",
1020 spapr
->initrd_base
+ spapr
->initrd_size
));
1022 if (spapr
->kernel_size
) {
1023 uint64_t kprop
[2] = { cpu_to_be64(KERNEL_LOAD_ADDR
),
1024 cpu_to_be64(spapr
->kernel_size
) };
1026 _FDT(fdt_setprop(fdt
, chosen
, "qemu,boot-kernel",
1027 &kprop
, sizeof(kprop
)));
1028 if (spapr
->kernel_le
) {
1029 _FDT(fdt_setprop(fdt
, chosen
, "qemu,boot-kernel-le", NULL
, 0));
1033 _FDT((fdt_setprop_cell(fdt
, chosen
, "qemu,boot-menu", boot_menu
)));
1035 _FDT(fdt_setprop_cell(fdt
, chosen
, "qemu,graphic-width", graphic_width
));
1036 _FDT(fdt_setprop_cell(fdt
, chosen
, "qemu,graphic-height", graphic_height
));
1037 _FDT(fdt_setprop_cell(fdt
, chosen
, "qemu,graphic-depth", graphic_depth
));
1039 if (cb
&& bootlist
) {
1042 for (i
= 0; i
< cb
; i
++) {
1043 if (bootlist
[i
] == '\n') {
1047 _FDT(fdt_setprop_string(fdt
, chosen
, "qemu,boot-list", bootlist
));
1050 if (boot_device
&& strlen(boot_device
)) {
1051 _FDT(fdt_setprop_string(fdt
, chosen
, "qemu,boot-device", boot_device
));
1054 if (!spapr
->has_graphics
&& stdout_path
) {
1056 * "linux,stdout-path" and "stdout" properties are deprecated by linux
1057 * kernel. New platforms should only use the "stdout-path" property. Set
1058 * the new property and continue using older property to remain
1059 * compatible with the existing firmware.
1061 _FDT(fdt_setprop_string(fdt
, chosen
, "linux,stdout-path", stdout_path
));
1062 _FDT(fdt_setprop_string(fdt
, chosen
, "stdout-path", stdout_path
));
1065 spapr_dt_ov5_platform_support(fdt
, chosen
);
1067 g_free(stdout_path
);
1071 static void spapr_dt_hypervisor(sPAPRMachineState
*spapr
, void *fdt
)
1073 /* The /hypervisor node isn't in PAPR - this is a hack to allow PR
1074 * KVM to work under pHyp with some guest co-operation */
1076 uint8_t hypercall
[16];
1078 _FDT(hypervisor
= fdt_add_subnode(fdt
, 0, "hypervisor"));
1079 /* indicate KVM hypercall interface */
1080 _FDT(fdt_setprop_string(fdt
, hypervisor
, "compatible", "linux,kvm"));
1081 if (kvmppc_has_cap_fixup_hcalls()) {
1083 * Older KVM versions with older guest kernels were broken
1084 * with the magic page, don't allow the guest to map it.
1086 if (!kvmppc_get_hypercall(first_cpu
->env_ptr
, hypercall
,
1087 sizeof(hypercall
))) {
1088 _FDT(fdt_setprop(fdt
, hypervisor
, "hcall-instructions",
1089 hypercall
, sizeof(hypercall
)));
1094 static void *spapr_build_fdt(sPAPRMachineState
*spapr
,
1098 MachineState
*machine
= MACHINE(spapr
);
1099 MachineClass
*mc
= MACHINE_GET_CLASS(machine
);
1100 sPAPRMachineClass
*smc
= SPAPR_MACHINE_GET_CLASS(machine
);
1106 fdt
= g_malloc0(FDT_MAX_SIZE
);
1107 _FDT((fdt_create_empty_tree(fdt
, FDT_MAX_SIZE
)));
1110 _FDT(fdt_setprop_string(fdt
, 0, "device_type", "chrp"));
1111 _FDT(fdt_setprop_string(fdt
, 0, "model", "IBM pSeries (emulated by qemu)"));
1112 _FDT(fdt_setprop_string(fdt
, 0, "compatible", "qemu,pseries"));
1115 * Add info to guest to indentify which host is it being run on
1116 * and what is the uuid of the guest
1118 if (kvmppc_get_host_model(&buf
)) {
1119 _FDT(fdt_setprop_string(fdt
, 0, "host-model", buf
));
1122 if (kvmppc_get_host_serial(&buf
)) {
1123 _FDT(fdt_setprop_string(fdt
, 0, "host-serial", buf
));
1127 buf
= qemu_uuid_unparse_strdup(&qemu_uuid
);
1129 _FDT(fdt_setprop_string(fdt
, 0, "vm,uuid", buf
));
1130 if (qemu_uuid_set
) {
1131 _FDT(fdt_setprop_string(fdt
, 0, "system-id", buf
));
1135 if (qemu_get_vm_name()) {
1136 _FDT(fdt_setprop_string(fdt
, 0, "ibm,partition-name",
1137 qemu_get_vm_name()));
1140 _FDT(fdt_setprop_cell(fdt
, 0, "#address-cells", 2));
1141 _FDT(fdt_setprop_cell(fdt
, 0, "#size-cells", 2));
1143 /* /interrupt controller */
1144 spapr_dt_xics(xics_max_server_number(spapr
), fdt
, PHANDLE_XICP
);
1146 ret
= spapr_populate_memory(spapr
, fdt
);
1148 error_report("couldn't setup memory nodes in fdt");
1153 spapr_dt_vdevice(spapr
->vio_bus
, fdt
);
1155 if (object_resolve_path_type("", TYPE_SPAPR_RNG
, NULL
)) {
1156 ret
= spapr_rng_populate_dt(fdt
);
1158 error_report("could not set up rng device in the fdt");
1163 QLIST_FOREACH(phb
, &spapr
->phbs
, list
) {
1164 ret
= spapr_populate_pci_dt(phb
, PHANDLE_XICP
, fdt
);
1166 error_report("couldn't setup PCI devices in fdt");
1172 spapr_populate_cpus_dt_node(fdt
, spapr
);
1174 if (smc
->dr_lmb_enabled
) {
1175 _FDT(spapr_drc_populate_dt(fdt
, 0, NULL
, SPAPR_DR_CONNECTOR_TYPE_LMB
));
1178 if (mc
->has_hotpluggable_cpus
) {
1179 int offset
= fdt_path_offset(fdt
, "/cpus");
1180 ret
= spapr_drc_populate_dt(fdt
, offset
, NULL
,
1181 SPAPR_DR_CONNECTOR_TYPE_CPU
);
1183 error_report("Couldn't set up CPU DR device tree properties");
1188 /* /event-sources */
1189 spapr_dt_events(spapr
, fdt
);
1192 spapr_dt_rtas(spapr
, fdt
);
1195 spapr_dt_chosen(spapr
, fdt
);
1198 if (kvm_enabled()) {
1199 spapr_dt_hypervisor(spapr
, fdt
);
1202 /* Build memory reserve map */
1203 if (spapr
->kernel_size
) {
1204 _FDT((fdt_add_mem_rsv(fdt
, KERNEL_LOAD_ADDR
, spapr
->kernel_size
)));
1206 if (spapr
->initrd_size
) {
1207 _FDT((fdt_add_mem_rsv(fdt
, spapr
->initrd_base
, spapr
->initrd_size
)));
1210 /* ibm,client-architecture-support updates */
1211 ret
= spapr_dt_cas_updates(spapr
, fdt
, spapr
->ov5_cas
);
1213 error_report("couldn't setup CAS properties fdt");
1220 static uint64_t translate_kernel_address(void *opaque
, uint64_t addr
)
1222 return (addr
& 0x0fffffff) + KERNEL_LOAD_ADDR
;
1225 static void emulate_spapr_hypercall(PPCVirtualHypervisor
*vhyp
,
1228 CPUPPCState
*env
= &cpu
->env
;
1230 /* The TCG path should also be holding the BQL at this point */
1231 g_assert(qemu_mutex_iothread_locked());
1234 hcall_dprintf("Hypercall made with MSR[PR]=1\n");
1235 env
->gpr
[3] = H_PRIVILEGE
;
1237 env
->gpr
[3] = spapr_hypercall(cpu
, env
->gpr
[3], &env
->gpr
[4]);
1241 static uint64_t spapr_get_patbe(PPCVirtualHypervisor
*vhyp
)
1243 sPAPRMachineState
*spapr
= SPAPR_MACHINE(vhyp
);
1245 return spapr
->patb_entry
;
1248 #define HPTE(_table, _i) (void *)(((uint64_t *)(_table)) + ((_i) * 2))
1249 #define HPTE_VALID(_hpte) (tswap64(*((uint64_t *)(_hpte))) & HPTE64_V_VALID)
1250 #define HPTE_DIRTY(_hpte) (tswap64(*((uint64_t *)(_hpte))) & HPTE64_V_HPTE_DIRTY)
1251 #define CLEAN_HPTE(_hpte) ((*(uint64_t *)(_hpte)) &= tswap64(~HPTE64_V_HPTE_DIRTY))
1252 #define DIRTY_HPTE(_hpte) ((*(uint64_t *)(_hpte)) |= tswap64(HPTE64_V_HPTE_DIRTY))
1255 * Get the fd to access the kernel htab, re-opening it if necessary
1257 static int get_htab_fd(sPAPRMachineState
*spapr
)
1259 Error
*local_err
= NULL
;
1261 if (spapr
->htab_fd
>= 0) {
1262 return spapr
->htab_fd
;
1265 spapr
->htab_fd
= kvmppc_get_htab_fd(false, 0, &local_err
);
1266 if (spapr
->htab_fd
< 0) {
1267 error_report_err(local_err
);
1270 return spapr
->htab_fd
;
1273 void close_htab_fd(sPAPRMachineState
*spapr
)
1275 if (spapr
->htab_fd
>= 0) {
1276 close(spapr
->htab_fd
);
1278 spapr
->htab_fd
= -1;
1281 static hwaddr
spapr_hpt_mask(PPCVirtualHypervisor
*vhyp
)
1283 sPAPRMachineState
*spapr
= SPAPR_MACHINE(vhyp
);
1285 return HTAB_SIZE(spapr
) / HASH_PTEG_SIZE_64
- 1;
1288 static target_ulong
spapr_encode_hpt_for_kvm_pr(PPCVirtualHypervisor
*vhyp
)
1290 sPAPRMachineState
*spapr
= SPAPR_MACHINE(vhyp
);
1292 assert(kvm_enabled());
1298 return (target_ulong
)(uintptr_t)spapr
->htab
| (spapr
->htab_shift
- 18);
1301 static const ppc_hash_pte64_t
*spapr_map_hptes(PPCVirtualHypervisor
*vhyp
,
1304 sPAPRMachineState
*spapr
= SPAPR_MACHINE(vhyp
);
1305 hwaddr pte_offset
= ptex
* HASH_PTE_SIZE_64
;
1309 * HTAB is controlled by KVM. Fetch into temporary buffer
1311 ppc_hash_pte64_t
*hptes
= g_malloc(n
* HASH_PTE_SIZE_64
);
1312 kvmppc_read_hptes(hptes
, ptex
, n
);
1317 * HTAB is controlled by QEMU. Just point to the internally
1320 return (const ppc_hash_pte64_t
*)(spapr
->htab
+ pte_offset
);
1323 static void spapr_unmap_hptes(PPCVirtualHypervisor
*vhyp
,
1324 const ppc_hash_pte64_t
*hptes
,
1327 sPAPRMachineState
*spapr
= SPAPR_MACHINE(vhyp
);
1330 g_free((void *)hptes
);
1333 /* Nothing to do for qemu managed HPT */
1336 static void spapr_store_hpte(PPCVirtualHypervisor
*vhyp
, hwaddr ptex
,
1337 uint64_t pte0
, uint64_t pte1
)
1339 sPAPRMachineState
*spapr
= SPAPR_MACHINE(vhyp
);
1340 hwaddr offset
= ptex
* HASH_PTE_SIZE_64
;
1343 kvmppc_write_hpte(ptex
, pte0
, pte1
);
1345 stq_p(spapr
->htab
+ offset
, pte0
);
1346 stq_p(spapr
->htab
+ offset
+ HASH_PTE_SIZE_64
/ 2, pte1
);
1350 int spapr_hpt_shift_for_ramsize(uint64_t ramsize
)
1354 /* We aim for a hash table of size 1/128 the size of RAM (rounded
1355 * up). The PAPR recommendation is actually 1/64 of RAM size, but
1356 * that's much more than is needed for Linux guests */
1357 shift
= ctz64(pow2ceil(ramsize
)) - 7;
1358 shift
= MAX(shift
, 18); /* Minimum architected size */
1359 shift
= MIN(shift
, 46); /* Maximum architected size */
1363 void spapr_free_hpt(sPAPRMachineState
*spapr
)
1365 g_free(spapr
->htab
);
1367 spapr
->htab_shift
= 0;
1368 close_htab_fd(spapr
);
1371 void spapr_reallocate_hpt(sPAPRMachineState
*spapr
, int shift
,
1376 /* Clean up any HPT info from a previous boot */
1377 spapr_free_hpt(spapr
);
1379 rc
= kvmppc_reset_htab(shift
);
1381 /* kernel-side HPT needed, but couldn't allocate one */
1382 error_setg_errno(errp
, errno
,
1383 "Failed to allocate KVM HPT of order %d (try smaller maxmem?)",
1385 /* This is almost certainly fatal, but if the caller really
1386 * wants to carry on with shift == 0, it's welcome to try */
1387 } else if (rc
> 0) {
1388 /* kernel-side HPT allocated */
1391 "Requested order %d HPT, but kernel allocated order %ld (try smaller maxmem?)",
1395 spapr
->htab_shift
= shift
;
1398 /* kernel-side HPT not needed, allocate in userspace instead */
1399 size_t size
= 1ULL << shift
;
1402 spapr
->htab
= qemu_memalign(size
, size
);
1404 error_setg_errno(errp
, errno
,
1405 "Could not allocate HPT of order %d", shift
);
1409 memset(spapr
->htab
, 0, size
);
1410 spapr
->htab_shift
= shift
;
1412 for (i
= 0; i
< size
/ HASH_PTE_SIZE_64
; i
++) {
1413 DIRTY_HPTE(HPTE(spapr
->htab
, i
));
1416 /* We're setting up a hash table, so that means we're not radix */
1417 spapr
->patb_entry
= 0;
1420 void spapr_setup_hpt_and_vrma(sPAPRMachineState
*spapr
)
1424 if ((spapr
->resize_hpt
== SPAPR_RESIZE_HPT_DISABLED
)
1425 || (spapr
->cas_reboot
1426 && !spapr_ovec_test(spapr
->ov5_cas
, OV5_HPT_RESIZE
))) {
1427 hpt_shift
= spapr_hpt_shift_for_ramsize(MACHINE(spapr
)->maxram_size
);
1429 uint64_t current_ram_size
;
1431 current_ram_size
= MACHINE(spapr
)->ram_size
+ get_plugged_memory_size();
1432 hpt_shift
= spapr_hpt_shift_for_ramsize(current_ram_size
);
1434 spapr_reallocate_hpt(spapr
, hpt_shift
, &error_fatal
);
1436 if (spapr
->vrma_adjust
) {
1437 spapr
->rma_size
= kvmppc_rma_size(spapr_node0_size(MACHINE(spapr
)),
1442 static int spapr_reset_drcs(Object
*child
, void *opaque
)
1444 sPAPRDRConnector
*drc
=
1445 (sPAPRDRConnector
*) object_dynamic_cast(child
,
1446 TYPE_SPAPR_DR_CONNECTOR
);
1449 spapr_drc_reset(drc
);
1455 static void spapr_machine_reset(void)
1457 MachineState
*machine
= MACHINE(qdev_get_machine());
1458 sPAPRMachineState
*spapr
= SPAPR_MACHINE(machine
);
1459 PowerPCCPU
*first_ppc_cpu
;
1460 uint32_t rtas_limit
;
1461 hwaddr rtas_addr
, fdt_addr
;
1465 spapr_caps_reset(spapr
);
1467 first_ppc_cpu
= POWERPC_CPU(first_cpu
);
1468 if (kvm_enabled() && kvmppc_has_cap_mmu_radix() &&
1469 ppc_check_compat(first_ppc_cpu
, CPU_POWERPC_LOGICAL_3_00
, 0,
1470 spapr
->max_compat_pvr
)) {
1471 /* If using KVM with radix mode available, VCPUs can be started
1472 * without a HPT because KVM will start them in radix mode.
1473 * Set the GR bit in PATB so that we know there is no HPT. */
1474 spapr
->patb_entry
= PATBE1_GR
;
1476 spapr_setup_hpt_and_vrma(spapr
);
1479 /* if this reset wasn't generated by CAS, we should reset our
1480 * negotiated options and start from scratch */
1481 if (!spapr
->cas_reboot
) {
1482 spapr_ovec_cleanup(spapr
->ov5_cas
);
1483 spapr
->ov5_cas
= spapr_ovec_new();
1485 ppc_set_compat(first_ppc_cpu
, spapr
->max_compat_pvr
, &error_fatal
);
1488 qemu_devices_reset();
1490 /* DRC reset may cause a device to be unplugged. This will cause troubles
1491 * if this device is used by another device (eg, a running vhost backend
1492 * will crash QEMU if the DIMM holding the vring goes away). To avoid such
1493 * situations, we reset DRCs after all devices have been reset.
1495 object_child_foreach_recursive(object_get_root(), spapr_reset_drcs
, NULL
);
1497 spapr_clear_pending_events(spapr
);
1500 * We place the device tree and RTAS just below either the top of the RMA,
1501 * or just below 2GB, whichever is lowere, so that it can be
1502 * processed with 32-bit real mode code if necessary
1504 rtas_limit
= MIN(spapr
->rma_size
, RTAS_MAX_ADDR
);
1505 rtas_addr
= rtas_limit
- RTAS_MAX_SIZE
;
1506 fdt_addr
= rtas_addr
- FDT_MAX_SIZE
;
1508 fdt
= spapr_build_fdt(spapr
, rtas_addr
, spapr
->rtas_size
);
1510 spapr_load_rtas(spapr
, fdt
, rtas_addr
);
1514 /* Should only fail if we've built a corrupted tree */
1517 if (fdt_totalsize(fdt
) > FDT_MAX_SIZE
) {
1518 error_report("FDT too big ! 0x%x bytes (max is 0x%x)",
1519 fdt_totalsize(fdt
), FDT_MAX_SIZE
);
1524 qemu_fdt_dumpdtb(fdt
, fdt_totalsize(fdt
));
1525 cpu_physical_memory_write(fdt_addr
, fdt
, fdt_totalsize(fdt
));
1528 /* Set up the entry state */
1529 first_ppc_cpu
->env
.gpr
[3] = fdt_addr
;
1530 first_ppc_cpu
->env
.gpr
[5] = 0;
1531 first_cpu
->halted
= 0;
1532 first_ppc_cpu
->env
.nip
= SPAPR_ENTRY_POINT
;
1534 spapr
->cas_reboot
= false;
1537 static void spapr_create_nvram(sPAPRMachineState
*spapr
)
1539 DeviceState
*dev
= qdev_create(&spapr
->vio_bus
->bus
, "spapr-nvram");
1540 DriveInfo
*dinfo
= drive_get(IF_PFLASH
, 0, 0);
1543 qdev_prop_set_drive(dev
, "drive", blk_by_legacy_dinfo(dinfo
),
1547 qdev_init_nofail(dev
);
1549 spapr
->nvram
= (struct sPAPRNVRAM
*)dev
;
1552 static void spapr_rtc_create(sPAPRMachineState
*spapr
)
1554 object_initialize(&spapr
->rtc
, sizeof(spapr
->rtc
), TYPE_SPAPR_RTC
);
1555 object_property_add_child(OBJECT(spapr
), "rtc", OBJECT(&spapr
->rtc
),
1557 object_property_set_bool(OBJECT(&spapr
->rtc
), true, "realized",
1559 object_property_add_alias(OBJECT(spapr
), "rtc-time", OBJECT(&spapr
->rtc
),
1560 "date", &error_fatal
);
1563 /* Returns whether we want to use VGA or not */
1564 static bool spapr_vga_init(PCIBus
*pci_bus
, Error
**errp
)
1566 switch (vga_interface_type
) {
1573 return pci_vga_init(pci_bus
) != NULL
;
1576 "Unsupported VGA mode, only -vga std or -vga virtio is supported");
1581 static int spapr_pre_load(void *opaque
)
1585 rc
= spapr_caps_pre_load(opaque
);
1593 static int spapr_post_load(void *opaque
, int version_id
)
1595 sPAPRMachineState
*spapr
= (sPAPRMachineState
*)opaque
;
1598 err
= spapr_caps_post_migration(spapr
);
1603 if (!object_dynamic_cast(OBJECT(spapr
->ics
), TYPE_ICS_KVM
)) {
1606 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
1607 icp_resend(ICP(cpu
->intc
));
1611 /* In earlier versions, there was no separate qdev for the PAPR
1612 * RTC, so the RTC offset was stored directly in sPAPREnvironment.
1613 * So when migrating from those versions, poke the incoming offset
1614 * value into the RTC device */
1615 if (version_id
< 3) {
1616 err
= spapr_rtc_import_offset(&spapr
->rtc
, spapr
->rtc_offset
);
1619 if (kvm_enabled() && spapr
->patb_entry
) {
1620 PowerPCCPU
*cpu
= POWERPC_CPU(first_cpu
);
1621 bool radix
= !!(spapr
->patb_entry
& PATBE1_GR
);
1622 bool gtse
= !!(cpu
->env
.spr
[SPR_LPCR
] & LPCR_GTSE
);
1624 err
= kvmppc_configure_v3_mmu(cpu
, radix
, gtse
, spapr
->patb_entry
);
1626 error_report("Process table config unsupported by the host");
1634 static int spapr_pre_save(void *opaque
)
1638 rc
= spapr_caps_pre_save(opaque
);
1646 static bool version_before_3(void *opaque
, int version_id
)
1648 return version_id
< 3;
1651 static bool spapr_pending_events_needed(void *opaque
)
1653 sPAPRMachineState
*spapr
= (sPAPRMachineState
*)opaque
;
1654 return !QTAILQ_EMPTY(&spapr
->pending_events
);
1657 static const VMStateDescription vmstate_spapr_event_entry
= {
1658 .name
= "spapr_event_log_entry",
1660 .minimum_version_id
= 1,
1661 .fields
= (VMStateField
[]) {
1662 VMSTATE_UINT32(summary
, sPAPREventLogEntry
),
1663 VMSTATE_UINT32(extended_length
, sPAPREventLogEntry
),
1664 VMSTATE_VBUFFER_ALLOC_UINT32(extended_log
, sPAPREventLogEntry
, 0,
1665 NULL
, extended_length
),
1666 VMSTATE_END_OF_LIST()
1670 static const VMStateDescription vmstate_spapr_pending_events
= {
1671 .name
= "spapr_pending_events",
1673 .minimum_version_id
= 1,
1674 .needed
= spapr_pending_events_needed
,
1675 .fields
= (VMStateField
[]) {
1676 VMSTATE_QTAILQ_V(pending_events
, sPAPRMachineState
, 1,
1677 vmstate_spapr_event_entry
, sPAPREventLogEntry
, next
),
1678 VMSTATE_END_OF_LIST()
1682 static bool spapr_ov5_cas_needed(void *opaque
)
1684 sPAPRMachineState
*spapr
= opaque
;
1685 sPAPROptionVector
*ov5_mask
= spapr_ovec_new();
1686 sPAPROptionVector
*ov5_legacy
= spapr_ovec_new();
1687 sPAPROptionVector
*ov5_removed
= spapr_ovec_new();
1690 /* Prior to the introduction of sPAPROptionVector, we had two option
1691 * vectors we dealt with: OV5_FORM1_AFFINITY, and OV5_DRCONF_MEMORY.
1692 * Both of these options encode machine topology into the device-tree
1693 * in such a way that the now-booted OS should still be able to interact
1694 * appropriately with QEMU regardless of what options were actually
1695 * negotiatied on the source side.
1697 * As such, we can avoid migrating the CAS-negotiated options if these
1698 * are the only options available on the current machine/platform.
1699 * Since these are the only options available for pseries-2.7 and
1700 * earlier, this allows us to maintain old->new/new->old migration
1703 * For QEMU 2.8+, there are additional CAS-negotiatable options available
1704 * via default pseries-2.8 machines and explicit command-line parameters.
1705 * Some of these options, like OV5_HP_EVT, *do* require QEMU to be aware
1706 * of the actual CAS-negotiated values to continue working properly. For
1707 * example, availability of memory unplug depends on knowing whether
1708 * OV5_HP_EVT was negotiated via CAS.
1710 * Thus, for any cases where the set of available CAS-negotiatable
1711 * options extends beyond OV5_FORM1_AFFINITY and OV5_DRCONF_MEMORY, we
1712 * include the CAS-negotiated options in the migration stream.
1714 spapr_ovec_set(ov5_mask
, OV5_FORM1_AFFINITY
);
1715 spapr_ovec_set(ov5_mask
, OV5_DRCONF_MEMORY
);
1717 /* spapr_ovec_diff returns true if bits were removed. we avoid using
1718 * the mask itself since in the future it's possible "legacy" bits may be
1719 * removed via machine options, which could generate a false positive
1720 * that breaks migration.
1722 spapr_ovec_intersect(ov5_legacy
, spapr
->ov5
, ov5_mask
);
1723 cas_needed
= spapr_ovec_diff(ov5_removed
, spapr
->ov5
, ov5_legacy
);
1725 spapr_ovec_cleanup(ov5_mask
);
1726 spapr_ovec_cleanup(ov5_legacy
);
1727 spapr_ovec_cleanup(ov5_removed
);
1732 static const VMStateDescription vmstate_spapr_ov5_cas
= {
1733 .name
= "spapr_option_vector_ov5_cas",
1735 .minimum_version_id
= 1,
1736 .needed
= spapr_ov5_cas_needed
,
1737 .fields
= (VMStateField
[]) {
1738 VMSTATE_STRUCT_POINTER_V(ov5_cas
, sPAPRMachineState
, 1,
1739 vmstate_spapr_ovec
, sPAPROptionVector
),
1740 VMSTATE_END_OF_LIST()
1744 static bool spapr_patb_entry_needed(void *opaque
)
1746 sPAPRMachineState
*spapr
= opaque
;
1748 return !!spapr
->patb_entry
;
1751 static const VMStateDescription vmstate_spapr_patb_entry
= {
1752 .name
= "spapr_patb_entry",
1754 .minimum_version_id
= 1,
1755 .needed
= spapr_patb_entry_needed
,
1756 .fields
= (VMStateField
[]) {
1757 VMSTATE_UINT64(patb_entry
, sPAPRMachineState
),
1758 VMSTATE_END_OF_LIST()
1762 static const VMStateDescription vmstate_spapr
= {
1765 .minimum_version_id
= 1,
1766 .pre_load
= spapr_pre_load
,
1767 .post_load
= spapr_post_load
,
1768 .pre_save
= spapr_pre_save
,
1769 .fields
= (VMStateField
[]) {
1770 /* used to be @next_irq */
1771 VMSTATE_UNUSED_BUFFER(version_before_3
, 0, 4),
1774 VMSTATE_UINT64_TEST(rtc_offset
, sPAPRMachineState
, version_before_3
),
1776 VMSTATE_PPC_TIMEBASE_V(tb
, sPAPRMachineState
, 2),
1777 VMSTATE_END_OF_LIST()
1779 .subsections
= (const VMStateDescription
*[]) {
1780 &vmstate_spapr_ov5_cas
,
1781 &vmstate_spapr_patb_entry
,
1782 &vmstate_spapr_pending_events
,
1783 &vmstate_spapr_cap_htm
,
1784 &vmstate_spapr_cap_vsx
,
1785 &vmstate_spapr_cap_dfp
,
1786 &vmstate_spapr_cap_cfpc
,
1787 &vmstate_spapr_cap_sbbc
,
1788 &vmstate_spapr_cap_ibs
,
1793 static int htab_save_setup(QEMUFile
*f
, void *opaque
)
1795 sPAPRMachineState
*spapr
= opaque
;
1797 /* "Iteration" header */
1798 if (!spapr
->htab_shift
) {
1799 qemu_put_be32(f
, -1);
1801 qemu_put_be32(f
, spapr
->htab_shift
);
1805 spapr
->htab_save_index
= 0;
1806 spapr
->htab_first_pass
= true;
1808 if (spapr
->htab_shift
) {
1809 assert(kvm_enabled());
1817 static void htab_save_chunk(QEMUFile
*f
, sPAPRMachineState
*spapr
,
1818 int chunkstart
, int n_valid
, int n_invalid
)
1820 qemu_put_be32(f
, chunkstart
);
1821 qemu_put_be16(f
, n_valid
);
1822 qemu_put_be16(f
, n_invalid
);
1823 qemu_put_buffer(f
, HPTE(spapr
->htab
, chunkstart
),
1824 HASH_PTE_SIZE_64
* n_valid
);
1827 static void htab_save_end_marker(QEMUFile
*f
)
1829 qemu_put_be32(f
, 0);
1830 qemu_put_be16(f
, 0);
1831 qemu_put_be16(f
, 0);
1834 static void htab_save_first_pass(QEMUFile
*f
, sPAPRMachineState
*spapr
,
1837 bool has_timeout
= max_ns
!= -1;
1838 int htabslots
= HTAB_SIZE(spapr
) / HASH_PTE_SIZE_64
;
1839 int index
= spapr
->htab_save_index
;
1840 int64_t starttime
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
);
1842 assert(spapr
->htab_first_pass
);
1847 /* Consume invalid HPTEs */
1848 while ((index
< htabslots
)
1849 && !HPTE_VALID(HPTE(spapr
->htab
, index
))) {
1850 CLEAN_HPTE(HPTE(spapr
->htab
, index
));
1854 /* Consume valid HPTEs */
1856 while ((index
< htabslots
) && (index
- chunkstart
< USHRT_MAX
)
1857 && HPTE_VALID(HPTE(spapr
->htab
, index
))) {
1858 CLEAN_HPTE(HPTE(spapr
->htab
, index
));
1862 if (index
> chunkstart
) {
1863 int n_valid
= index
- chunkstart
;
1865 htab_save_chunk(f
, spapr
, chunkstart
, n_valid
, 0);
1868 (qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) - starttime
) > max_ns
) {
1872 } while ((index
< htabslots
) && !qemu_file_rate_limit(f
));
1874 if (index
>= htabslots
) {
1875 assert(index
== htabslots
);
1877 spapr
->htab_first_pass
= false;
1879 spapr
->htab_save_index
= index
;
1882 static int htab_save_later_pass(QEMUFile
*f
, sPAPRMachineState
*spapr
,
1885 bool final
= max_ns
< 0;
1886 int htabslots
= HTAB_SIZE(spapr
) / HASH_PTE_SIZE_64
;
1887 int examined
= 0, sent
= 0;
1888 int index
= spapr
->htab_save_index
;
1889 int64_t starttime
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
);
1891 assert(!spapr
->htab_first_pass
);
1894 int chunkstart
, invalidstart
;
1896 /* Consume non-dirty HPTEs */
1897 while ((index
< htabslots
)
1898 && !HPTE_DIRTY(HPTE(spapr
->htab
, index
))) {
1904 /* Consume valid dirty HPTEs */
1905 while ((index
< htabslots
) && (index
- chunkstart
< USHRT_MAX
)
1906 && HPTE_DIRTY(HPTE(spapr
->htab
, index
))
1907 && HPTE_VALID(HPTE(spapr
->htab
, index
))) {
1908 CLEAN_HPTE(HPTE(spapr
->htab
, index
));
1913 invalidstart
= index
;
1914 /* Consume invalid dirty HPTEs */
1915 while ((index
< htabslots
) && (index
- invalidstart
< USHRT_MAX
)
1916 && HPTE_DIRTY(HPTE(spapr
->htab
, index
))
1917 && !HPTE_VALID(HPTE(spapr
->htab
, index
))) {
1918 CLEAN_HPTE(HPTE(spapr
->htab
, index
));
1923 if (index
> chunkstart
) {
1924 int n_valid
= invalidstart
- chunkstart
;
1925 int n_invalid
= index
- invalidstart
;
1927 htab_save_chunk(f
, spapr
, chunkstart
, n_valid
, n_invalid
);
1928 sent
+= index
- chunkstart
;
1930 if (!final
&& (qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) - starttime
) > max_ns
) {
1935 if (examined
>= htabslots
) {
1939 if (index
>= htabslots
) {
1940 assert(index
== htabslots
);
1943 } while ((examined
< htabslots
) && (!qemu_file_rate_limit(f
) || final
));
1945 if (index
>= htabslots
) {
1946 assert(index
== htabslots
);
1950 spapr
->htab_save_index
= index
;
1952 return (examined
>= htabslots
) && (sent
== 0) ? 1 : 0;
1955 #define MAX_ITERATION_NS 5000000 /* 5 ms */
1956 #define MAX_KVM_BUF_SIZE 2048
1958 static int htab_save_iterate(QEMUFile
*f
, void *opaque
)
1960 sPAPRMachineState
*spapr
= opaque
;
1964 /* Iteration header */
1965 if (!spapr
->htab_shift
) {
1966 qemu_put_be32(f
, -1);
1969 qemu_put_be32(f
, 0);
1973 assert(kvm_enabled());
1975 fd
= get_htab_fd(spapr
);
1980 rc
= kvmppc_save_htab(f
, fd
, MAX_KVM_BUF_SIZE
, MAX_ITERATION_NS
);
1984 } else if (spapr
->htab_first_pass
) {
1985 htab_save_first_pass(f
, spapr
, MAX_ITERATION_NS
);
1987 rc
= htab_save_later_pass(f
, spapr
, MAX_ITERATION_NS
);
1990 htab_save_end_marker(f
);
1995 static int htab_save_complete(QEMUFile
*f
, void *opaque
)
1997 sPAPRMachineState
*spapr
= opaque
;
2000 /* Iteration header */
2001 if (!spapr
->htab_shift
) {
2002 qemu_put_be32(f
, -1);
2005 qemu_put_be32(f
, 0);
2011 assert(kvm_enabled());
2013 fd
= get_htab_fd(spapr
);
2018 rc
= kvmppc_save_htab(f
, fd
, MAX_KVM_BUF_SIZE
, -1);
2023 if (spapr
->htab_first_pass
) {
2024 htab_save_first_pass(f
, spapr
, -1);
2026 htab_save_later_pass(f
, spapr
, -1);
2030 htab_save_end_marker(f
);
2035 static int htab_load(QEMUFile
*f
, void *opaque
, int version_id
)
2037 sPAPRMachineState
*spapr
= opaque
;
2038 uint32_t section_hdr
;
2040 Error
*local_err
= NULL
;
2042 if (version_id
< 1 || version_id
> 1) {
2043 error_report("htab_load() bad version");
2047 section_hdr
= qemu_get_be32(f
);
2049 if (section_hdr
== -1) {
2050 spapr_free_hpt(spapr
);
2055 /* First section gives the htab size */
2056 spapr_reallocate_hpt(spapr
, section_hdr
, &local_err
);
2058 error_report_err(local_err
);
2065 assert(kvm_enabled());
2067 fd
= kvmppc_get_htab_fd(true, 0, &local_err
);
2069 error_report_err(local_err
);
2076 uint16_t n_valid
, n_invalid
;
2078 index
= qemu_get_be32(f
);
2079 n_valid
= qemu_get_be16(f
);
2080 n_invalid
= qemu_get_be16(f
);
2082 if ((index
== 0) && (n_valid
== 0) && (n_invalid
== 0)) {
2087 if ((index
+ n_valid
+ n_invalid
) >
2088 (HTAB_SIZE(spapr
) / HASH_PTE_SIZE_64
)) {
2089 /* Bad index in stream */
2091 "htab_load() bad index %d (%hd+%hd entries) in htab stream (htab_shift=%d)",
2092 index
, n_valid
, n_invalid
, spapr
->htab_shift
);
2098 qemu_get_buffer(f
, HPTE(spapr
->htab
, index
),
2099 HASH_PTE_SIZE_64
* n_valid
);
2102 memset(HPTE(spapr
->htab
, index
+ n_valid
), 0,
2103 HASH_PTE_SIZE_64
* n_invalid
);
2110 rc
= kvmppc_load_htab_chunk(f
, fd
, index
, n_valid
, n_invalid
);
2125 static void htab_save_cleanup(void *opaque
)
2127 sPAPRMachineState
*spapr
= opaque
;
2129 close_htab_fd(spapr
);
2132 static SaveVMHandlers savevm_htab_handlers
= {
2133 .save_setup
= htab_save_setup
,
2134 .save_live_iterate
= htab_save_iterate
,
2135 .save_live_complete_precopy
= htab_save_complete
,
2136 .save_cleanup
= htab_save_cleanup
,
2137 .load_state
= htab_load
,
2140 static void spapr_boot_set(void *opaque
, const char *boot_device
,
2143 MachineState
*machine
= MACHINE(opaque
);
2144 machine
->boot_order
= g_strdup(boot_device
);
2147 static void spapr_create_lmb_dr_connectors(sPAPRMachineState
*spapr
)
2149 MachineState
*machine
= MACHINE(spapr
);
2150 uint64_t lmb_size
= SPAPR_MEMORY_BLOCK_SIZE
;
2151 uint32_t nr_lmbs
= (machine
->maxram_size
- machine
->ram_size
)/lmb_size
;
2154 for (i
= 0; i
< nr_lmbs
; i
++) {
2157 addr
= i
* lmb_size
+ spapr
->hotplug_memory
.base
;
2158 spapr_dr_connector_new(OBJECT(spapr
), TYPE_SPAPR_DRC_LMB
,
2164 * If RAM size, maxmem size and individual node mem sizes aren't aligned
2165 * to SPAPR_MEMORY_BLOCK_SIZE(256MB), then refuse to start the guest
2166 * since we can't support such unaligned sizes with DRCONF_MEMORY.
2168 static void spapr_validate_node_memory(MachineState
*machine
, Error
**errp
)
2172 if (machine
->ram_size
% SPAPR_MEMORY_BLOCK_SIZE
) {
2173 error_setg(errp
, "Memory size 0x" RAM_ADDR_FMT
2174 " is not aligned to %llu MiB",
2176 SPAPR_MEMORY_BLOCK_SIZE
/ M_BYTE
);
2180 if (machine
->maxram_size
% SPAPR_MEMORY_BLOCK_SIZE
) {
2181 error_setg(errp
, "Maximum memory size 0x" RAM_ADDR_FMT
2182 " is not aligned to %llu MiB",
2184 SPAPR_MEMORY_BLOCK_SIZE
/ M_BYTE
);
2188 for (i
= 0; i
< nb_numa_nodes
; i
++) {
2189 if (numa_info
[i
].node_mem
% SPAPR_MEMORY_BLOCK_SIZE
) {
2191 "Node %d memory size 0x%" PRIx64
2192 " is not aligned to %llu MiB",
2193 i
, numa_info
[i
].node_mem
,
2194 SPAPR_MEMORY_BLOCK_SIZE
/ M_BYTE
);
2200 /* find cpu slot in machine->possible_cpus by core_id */
2201 static CPUArchId
*spapr_find_cpu_slot(MachineState
*ms
, uint32_t id
, int *idx
)
2203 int index
= id
/ smp_threads
;
2205 if (index
>= ms
->possible_cpus
->len
) {
2211 return &ms
->possible_cpus
->cpus
[index
];
2214 static void spapr_set_vsmt_mode(sPAPRMachineState
*spapr
, Error
**errp
)
2216 Error
*local_err
= NULL
;
2217 bool vsmt_user
= !!spapr
->vsmt
;
2218 int kvm_smt
= kvmppc_smt_threads();
2221 if (!kvm_enabled() && (smp_threads
> 1)) {
2222 error_setg(&local_err
, "TCG cannot support more than 1 thread/core "
2223 "on a pseries machine");
2226 if (!is_power_of_2(smp_threads
)) {
2227 error_setg(&local_err
, "Cannot support %d threads/core on a pseries "
2228 "machine because it must be a power of 2", smp_threads
);
2232 /* Detemine the VSMT mode to use: */
2234 if (spapr
->vsmt
< smp_threads
) {
2235 error_setg(&local_err
, "Cannot support VSMT mode %d"
2236 " because it must be >= threads/core (%d)",
2237 spapr
->vsmt
, smp_threads
);
2240 /* In this case, spapr->vsmt has been set by the command line */
2243 * Default VSMT value is tricky, because we need it to be as
2244 * consistent as possible (for migration), but this requires
2245 * changing it for at least some existing cases. We pick 8 as
2246 * the value that we'd get with KVM on POWER8, the
2247 * overwhelmingly common case in production systems.
2249 spapr
->vsmt
= MAX(8, smp_threads
);
2252 /* KVM: If necessary, set the SMT mode: */
2253 if (kvm_enabled() && (spapr
->vsmt
!= kvm_smt
)) {
2254 ret
= kvmppc_set_smt_threads(spapr
->vsmt
);
2256 /* Looks like KVM isn't able to change VSMT mode */
2257 error_setg(&local_err
,
2258 "Failed to set KVM's VSMT mode to %d (errno %d)",
2260 /* We can live with that if the default one is big enough
2261 * for the number of threads, and a submultiple of the one
2262 * we want. In this case we'll waste some vcpu ids, but
2263 * behaviour will be correct */
2264 if ((kvm_smt
>= smp_threads
) && ((spapr
->vsmt
% kvm_smt
) == 0)) {
2265 warn_report_err(local_err
);
2270 error_append_hint(&local_err
,
2271 "On PPC, a VM with %d threads/core"
2272 " on a host with %d threads/core"
2273 " requires the use of VSMT mode %d.\n",
2274 smp_threads
, kvm_smt
, spapr
->vsmt
);
2276 kvmppc_hint_smt_possible(&local_err
);
2281 /* else TCG: nothing to do currently */
2283 error_propagate(errp
, local_err
);
2286 static void spapr_init_cpus(sPAPRMachineState
*spapr
)
2288 MachineState
*machine
= MACHINE(spapr
);
2289 MachineClass
*mc
= MACHINE_GET_CLASS(machine
);
2290 sPAPRMachineClass
*smc
= SPAPR_MACHINE_GET_CLASS(machine
);
2291 const char *type
= spapr_get_cpu_core_type(machine
->cpu_type
);
2292 const CPUArchIdList
*possible_cpus
;
2293 int boot_cores_nr
= smp_cpus
/ smp_threads
;
2296 possible_cpus
= mc
->possible_cpu_arch_ids(machine
);
2297 if (mc
->has_hotpluggable_cpus
) {
2298 if (smp_cpus
% smp_threads
) {
2299 error_report("smp_cpus (%u) must be multiple of threads (%u)",
2300 smp_cpus
, smp_threads
);
2303 if (max_cpus
% smp_threads
) {
2304 error_report("max_cpus (%u) must be multiple of threads (%u)",
2305 max_cpus
, smp_threads
);
2309 if (max_cpus
!= smp_cpus
) {
2310 error_report("This machine version does not support CPU hotplug");
2313 boot_cores_nr
= possible_cpus
->len
;
2316 /* VSMT must be set in order to be able to compute VCPU ids, ie to
2317 * call xics_max_server_number() or spapr_vcpu_id().
2319 spapr_set_vsmt_mode(spapr
, &error_fatal
);
2321 if (smc
->pre_2_10_has_unused_icps
) {
2324 for (i
= 0; i
< xics_max_server_number(spapr
); i
++) {
2325 /* Dummy entries get deregistered when real ICPState objects
2326 * are registered during CPU core hotplug.
2328 pre_2_10_vmstate_register_dummy_icp(i
);
2332 for (i
= 0; i
< possible_cpus
->len
; i
++) {
2333 int core_id
= i
* smp_threads
;
2335 if (mc
->has_hotpluggable_cpus
) {
2336 spapr_dr_connector_new(OBJECT(spapr
), TYPE_SPAPR_DRC_CPU
,
2337 spapr_vcpu_id(spapr
, core_id
));
2340 if (i
< boot_cores_nr
) {
2341 Object
*core
= object_new(type
);
2342 int nr_threads
= smp_threads
;
2344 /* Handle the partially filled core for older machine types */
2345 if ((i
+ 1) * smp_threads
>= smp_cpus
) {
2346 nr_threads
= smp_cpus
- i
* smp_threads
;
2349 object_property_set_int(core
, nr_threads
, "nr-threads",
2351 object_property_set_int(core
, core_id
, CPU_CORE_PROP_CORE_ID
,
2353 object_property_set_bool(core
, true, "realized", &error_fatal
);
2358 /* pSeries LPAR / sPAPR hardware init */
2359 static void spapr_machine_init(MachineState
*machine
)
2361 sPAPRMachineState
*spapr
= SPAPR_MACHINE(machine
);
2362 sPAPRMachineClass
*smc
= SPAPR_MACHINE_GET_CLASS(machine
);
2363 const char *kernel_filename
= machine
->kernel_filename
;
2364 const char *initrd_filename
= machine
->initrd_filename
;
2367 MemoryRegion
*sysmem
= get_system_memory();
2368 MemoryRegion
*ram
= g_new(MemoryRegion
, 1);
2369 MemoryRegion
*rma_region
;
2371 hwaddr rma_alloc_size
;
2372 hwaddr node0_size
= spapr_node0_size(machine
);
2373 long load_limit
, fw_size
;
2375 Error
*resize_hpt_err
= NULL
;
2377 msi_nonbroken
= true;
2379 QLIST_INIT(&spapr
->phbs
);
2380 QTAILQ_INIT(&spapr
->pending_dimm_unplugs
);
2382 /* Check HPT resizing availability */
2383 kvmppc_check_papr_resize_hpt(&resize_hpt_err
);
2384 if (spapr
->resize_hpt
== SPAPR_RESIZE_HPT_DEFAULT
) {
2386 * If the user explicitly requested a mode we should either
2387 * supply it, or fail completely (which we do below). But if
2388 * it's not set explicitly, we reset our mode to something
2391 if (resize_hpt_err
) {
2392 spapr
->resize_hpt
= SPAPR_RESIZE_HPT_DISABLED
;
2393 error_free(resize_hpt_err
);
2394 resize_hpt_err
= NULL
;
2396 spapr
->resize_hpt
= smc
->resize_hpt_default
;
2400 assert(spapr
->resize_hpt
!= SPAPR_RESIZE_HPT_DEFAULT
);
2402 if ((spapr
->resize_hpt
!= SPAPR_RESIZE_HPT_DISABLED
) && resize_hpt_err
) {
2404 * User requested HPT resize, but this host can't supply it. Bail out
2406 error_report_err(resize_hpt_err
);
2410 /* Allocate RMA if necessary */
2411 rma_alloc_size
= kvmppc_alloc_rma(&rma
);
2413 if (rma_alloc_size
== -1) {
2414 error_report("Unable to create RMA");
2418 if (rma_alloc_size
&& (rma_alloc_size
< node0_size
)) {
2419 spapr
->rma_size
= rma_alloc_size
;
2421 spapr
->rma_size
= node0_size
;
2423 /* With KVM, we don't actually know whether KVM supports an
2424 * unbounded RMA (PR KVM) or is limited by the hash table size
2425 * (HV KVM using VRMA), so we always assume the latter
2427 * In that case, we also limit the initial allocations for RTAS
2428 * etc... to 256M since we have no way to know what the VRMA size
2429 * is going to be as it depends on the size of the hash table
2430 * isn't determined yet.
2432 if (kvm_enabled()) {
2433 spapr
->vrma_adjust
= 1;
2434 spapr
->rma_size
= MIN(spapr
->rma_size
, 0x10000000);
2437 /* Actually we don't support unbounded RMA anymore since we
2438 * added proper emulation of HV mode. The max we can get is
2439 * 16G which also happens to be what we configure for PAPR
2440 * mode so make sure we don't do anything bigger than that
2442 spapr
->rma_size
= MIN(spapr
->rma_size
, 0x400000000ull
);
2445 if (spapr
->rma_size
> node0_size
) {
2446 error_report("Numa node 0 has to span the RMA (%#08"HWADDR_PRIx
")",
2451 /* Setup a load limit for the ramdisk leaving room for SLOF and FDT */
2452 load_limit
= MIN(spapr
->rma_size
, RTAS_MAX_ADDR
) - FW_OVERHEAD
;
2454 /* Set up Interrupt Controller before we create the VCPUs */
2455 xics_system_init(machine
, XICS_IRQS_SPAPR
, &error_fatal
);
2457 /* Set up containers for ibm,client-architecture-support negotiated options
2459 spapr
->ov5
= spapr_ovec_new();
2460 spapr
->ov5_cas
= spapr_ovec_new();
2462 if (smc
->dr_lmb_enabled
) {
2463 spapr_ovec_set(spapr
->ov5
, OV5_DRCONF_MEMORY
);
2464 spapr_validate_node_memory(machine
, &error_fatal
);
2467 spapr_ovec_set(spapr
->ov5
, OV5_FORM1_AFFINITY
);
2468 if (!kvm_enabled() || kvmppc_has_cap_mmu_radix()) {
2469 /* KVM and TCG always allow GTSE with radix... */
2470 spapr_ovec_set(spapr
->ov5
, OV5_MMU_RADIX_GTSE
);
2472 /* ... but not with hash (currently). */
2474 /* advertise support for dedicated HP event source to guests */
2475 if (spapr
->use_hotplug_event_source
) {
2476 spapr_ovec_set(spapr
->ov5
, OV5_HP_EVT
);
2479 /* advertise support for HPT resizing */
2480 if (spapr
->resize_hpt
!= SPAPR_RESIZE_HPT_DISABLED
) {
2481 spapr_ovec_set(spapr
->ov5
, OV5_HPT_RESIZE
);
2485 spapr_init_cpus(spapr
);
2487 if (kvm_enabled()) {
2488 /* Enable H_LOGICAL_CI_* so SLOF can talk to in-kernel devices */
2489 kvmppc_enable_logical_ci_hcalls();
2490 kvmppc_enable_set_mode_hcall();
2492 /* H_CLEAR_MOD/_REF are mandatory in PAPR, but off by default */
2493 kvmppc_enable_clear_ref_mod_hcalls();
2497 memory_region_allocate_system_memory(ram
, NULL
, "ppc_spapr.ram",
2499 memory_region_add_subregion(sysmem
, 0, ram
);
2501 if (rma_alloc_size
&& rma
) {
2502 rma_region
= g_new(MemoryRegion
, 1);
2503 memory_region_init_ram_ptr(rma_region
, NULL
, "ppc_spapr.rma",
2504 rma_alloc_size
, rma
);
2505 vmstate_register_ram_global(rma_region
);
2506 memory_region_add_subregion(sysmem
, 0, rma_region
);
2509 /* initialize hotplug memory address space */
2510 if (machine
->ram_size
< machine
->maxram_size
) {
2511 ram_addr_t hotplug_mem_size
= machine
->maxram_size
- machine
->ram_size
;
2513 * Limit the number of hotpluggable memory slots to half the number
2514 * slots that KVM supports, leaving the other half for PCI and other
2515 * devices. However ensure that number of slots doesn't drop below 32.
2517 int max_memslots
= kvm_enabled() ? kvm_get_max_memslots() / 2 :
2518 SPAPR_MAX_RAM_SLOTS
;
2520 if (max_memslots
< SPAPR_MAX_RAM_SLOTS
) {
2521 max_memslots
= SPAPR_MAX_RAM_SLOTS
;
2523 if (machine
->ram_slots
> max_memslots
) {
2524 error_report("Specified number of memory slots %"
2525 PRIu64
" exceeds max supported %d",
2526 machine
->ram_slots
, max_memslots
);
2530 spapr
->hotplug_memory
.base
= ROUND_UP(machine
->ram_size
,
2531 SPAPR_HOTPLUG_MEM_ALIGN
);
2532 memory_region_init(&spapr
->hotplug_memory
.mr
, OBJECT(spapr
),
2533 "hotplug-memory", hotplug_mem_size
);
2534 memory_region_add_subregion(sysmem
, spapr
->hotplug_memory
.base
,
2535 &spapr
->hotplug_memory
.mr
);
2538 if (smc
->dr_lmb_enabled
) {
2539 spapr_create_lmb_dr_connectors(spapr
);
2542 filename
= qemu_find_file(QEMU_FILE_TYPE_BIOS
, "spapr-rtas.bin");
2544 error_report("Could not find LPAR rtas '%s'", "spapr-rtas.bin");
2547 spapr
->rtas_size
= get_image_size(filename
);
2548 if (spapr
->rtas_size
< 0) {
2549 error_report("Could not get size of LPAR rtas '%s'", filename
);
2552 spapr
->rtas_blob
= g_malloc(spapr
->rtas_size
);
2553 if (load_image_size(filename
, spapr
->rtas_blob
, spapr
->rtas_size
) < 0) {
2554 error_report("Could not load LPAR rtas '%s'", filename
);
2557 if (spapr
->rtas_size
> RTAS_MAX_SIZE
) {
2558 error_report("RTAS too big ! 0x%zx bytes (max is 0x%x)",
2559 (size_t)spapr
->rtas_size
, RTAS_MAX_SIZE
);
2564 /* Set up RTAS event infrastructure */
2565 spapr_events_init(spapr
);
2567 /* Set up the RTC RTAS interfaces */
2568 spapr_rtc_create(spapr
);
2570 /* Set up VIO bus */
2571 spapr
->vio_bus
= spapr_vio_bus_init();
2573 for (i
= 0; i
< serial_max_hds(); i
++) {
2575 spapr_vty_create(spapr
->vio_bus
, serial_hd(i
));
2579 /* We always have at least the nvram device on VIO */
2580 spapr_create_nvram(spapr
);
2583 spapr_pci_rtas_init();
2585 phb
= spapr_create_phb(spapr
, 0);
2587 for (i
= 0; i
< nb_nics
; i
++) {
2588 NICInfo
*nd
= &nd_table
[i
];
2591 nd
->model
= g_strdup("spapr-vlan");
2594 if (g_str_equal(nd
->model
, "spapr-vlan") ||
2595 g_str_equal(nd
->model
, "ibmveth")) {
2596 spapr_vlan_create(spapr
->vio_bus
, nd
);
2598 pci_nic_init_nofail(&nd_table
[i
], phb
->bus
, nd
->model
, NULL
);
2602 for (i
= 0; i
<= drive_get_max_bus(IF_SCSI
); i
++) {
2603 spapr_vscsi_create(spapr
->vio_bus
);
2607 if (spapr_vga_init(phb
->bus
, &error_fatal
)) {
2608 spapr
->has_graphics
= true;
2609 machine
->usb
|= defaults_enabled() && !machine
->usb_disabled
;
2613 if (smc
->use_ohci_by_default
) {
2614 pci_create_simple(phb
->bus
, -1, "pci-ohci");
2616 pci_create_simple(phb
->bus
, -1, "nec-usb-xhci");
2619 if (spapr
->has_graphics
) {
2620 USBBus
*usb_bus
= usb_bus_find(-1);
2622 usb_create_simple(usb_bus
, "usb-kbd");
2623 usb_create_simple(usb_bus
, "usb-mouse");
2627 if (spapr
->rma_size
< (MIN_RMA_SLOF
<< 20)) {
2629 "pSeries SLOF firmware requires >= %ldM guest RMA (Real Mode Area memory)",
2634 if (kernel_filename
) {
2635 uint64_t lowaddr
= 0;
2637 spapr
->kernel_size
= load_elf(kernel_filename
, translate_kernel_address
,
2638 NULL
, NULL
, &lowaddr
, NULL
, 1,
2639 PPC_ELF_MACHINE
, 0, 0);
2640 if (spapr
->kernel_size
== ELF_LOAD_WRONG_ENDIAN
) {
2641 spapr
->kernel_size
= load_elf(kernel_filename
,
2642 translate_kernel_address
, NULL
, NULL
,
2643 &lowaddr
, NULL
, 0, PPC_ELF_MACHINE
,
2645 spapr
->kernel_le
= spapr
->kernel_size
> 0;
2647 if (spapr
->kernel_size
< 0) {
2648 error_report("error loading %s: %s", kernel_filename
,
2649 load_elf_strerror(spapr
->kernel_size
));
2654 if (initrd_filename
) {
2655 /* Try to locate the initrd in the gap between the kernel
2656 * and the firmware. Add a bit of space just in case
2658 spapr
->initrd_base
= (KERNEL_LOAD_ADDR
+ spapr
->kernel_size
2659 + 0x1ffff) & ~0xffff;
2660 spapr
->initrd_size
= load_image_targphys(initrd_filename
,
2663 - spapr
->initrd_base
);
2664 if (spapr
->initrd_size
< 0) {
2665 error_report("could not load initial ram disk '%s'",
2672 if (bios_name
== NULL
) {
2673 bios_name
= FW_FILE_NAME
;
2675 filename
= qemu_find_file(QEMU_FILE_TYPE_BIOS
, bios_name
);
2677 error_report("Could not find LPAR firmware '%s'", bios_name
);
2680 fw_size
= load_image_targphys(filename
, 0, FW_MAX_SIZE
);
2682 error_report("Could not load LPAR firmware '%s'", filename
);
2687 /* FIXME: Should register things through the MachineState's qdev
2688 * interface, this is a legacy from the sPAPREnvironment structure
2689 * which predated MachineState but had a similar function */
2690 vmstate_register(NULL
, 0, &vmstate_spapr
, spapr
);
2691 register_savevm_live(NULL
, "spapr/htab", -1, 1,
2692 &savevm_htab_handlers
, spapr
);
2694 qemu_register_boot_set(spapr_boot_set
, spapr
);
2696 if (kvm_enabled()) {
2697 /* to stop and start vmclock */
2698 qemu_add_vm_change_state_handler(cpu_ppc_clock_vm_state_change
,
2701 kvmppc_spapr_enable_inkernel_multitce();
2705 static int spapr_kvm_type(const char *vm_type
)
2711 if (!strcmp(vm_type
, "HV")) {
2715 if (!strcmp(vm_type
, "PR")) {
2719 error_report("Unknown kvm-type specified '%s'", vm_type
);
2724 * Implementation of an interface to adjust firmware path
2725 * for the bootindex property handling.
2727 static char *spapr_get_fw_dev_path(FWPathProvider
*p
, BusState
*bus
,
2730 #define CAST(type, obj, name) \
2731 ((type *)object_dynamic_cast(OBJECT(obj), (name)))
2732 SCSIDevice
*d
= CAST(SCSIDevice
, dev
, TYPE_SCSI_DEVICE
);
2733 sPAPRPHBState
*phb
= CAST(sPAPRPHBState
, dev
, TYPE_SPAPR_PCI_HOST_BRIDGE
);
2734 VHostSCSICommon
*vsc
= CAST(VHostSCSICommon
, dev
, TYPE_VHOST_SCSI_COMMON
);
2737 void *spapr
= CAST(void, bus
->parent
, "spapr-vscsi");
2738 VirtIOSCSI
*virtio
= CAST(VirtIOSCSI
, bus
->parent
, TYPE_VIRTIO_SCSI
);
2739 USBDevice
*usb
= CAST(USBDevice
, bus
->parent
, TYPE_USB_DEVICE
);
2743 * Replace "channel@0/disk@0,0" with "disk@8000000000000000":
2744 * We use SRP luns of the form 8000 | (bus << 8) | (id << 5) | lun
2745 * in the top 16 bits of the 64-bit LUN
2747 unsigned id
= 0x8000 | (d
->id
<< 8) | d
->lun
;
2748 return g_strdup_printf("%s@%"PRIX64
, qdev_fw_name(dev
),
2749 (uint64_t)id
<< 48);
2750 } else if (virtio
) {
2752 * We use SRP luns of the form 01000000 | (target << 8) | lun
2753 * in the top 32 bits of the 64-bit LUN
2754 * Note: the quote above is from SLOF and it is wrong,
2755 * the actual binding is:
2756 * swap 0100 or 10 << or 20 << ( target lun-id -- srplun )
2758 unsigned id
= 0x1000000 | (d
->id
<< 16) | d
->lun
;
2759 if (d
->lun
>= 256) {
2760 /* Use the LUN "flat space addressing method" */
2763 return g_strdup_printf("%s@%"PRIX64
, qdev_fw_name(dev
),
2764 (uint64_t)id
<< 32);
2767 * We use SRP luns of the form 01000000 | (usb-port << 16) | lun
2768 * in the top 32 bits of the 64-bit LUN
2770 unsigned usb_port
= atoi(usb
->port
->path
);
2771 unsigned id
= 0x1000000 | (usb_port
<< 16) | d
->lun
;
2772 return g_strdup_printf("%s@%"PRIX64
, qdev_fw_name(dev
),
2773 (uint64_t)id
<< 32);
2778 * SLOF probes the USB devices, and if it recognizes that the device is a
2779 * storage device, it changes its name to "storage" instead of "usb-host",
2780 * and additionally adds a child node for the SCSI LUN, so the correct
2781 * boot path in SLOF is something like .../storage@1/disk@xxx" instead.
2783 if (strcmp("usb-host", qdev_fw_name(dev
)) == 0) {
2784 USBDevice
*usbdev
= CAST(USBDevice
, dev
, TYPE_USB_DEVICE
);
2785 if (usb_host_dev_is_scsi_storage(usbdev
)) {
2786 return g_strdup_printf("storage@%s/disk", usbdev
->port
->path
);
2791 /* Replace "pci" with "pci@800000020000000" */
2792 return g_strdup_printf("pci@%"PRIX64
, phb
->buid
);
2796 /* Same logic as virtio above */
2797 unsigned id
= 0x1000000 | (vsc
->target
<< 16) | vsc
->lun
;
2798 return g_strdup_printf("disk@%"PRIX64
, (uint64_t)id
<< 32);
2801 if (g_str_equal("pci-bridge", qdev_fw_name(dev
))) {
2802 /* SLOF uses "pci" instead of "pci-bridge" for PCI bridges */
2803 PCIDevice
*pcidev
= CAST(PCIDevice
, dev
, TYPE_PCI_DEVICE
);
2804 return g_strdup_printf("pci@%x", PCI_SLOT(pcidev
->devfn
));
2810 static char *spapr_get_kvm_type(Object
*obj
, Error
**errp
)
2812 sPAPRMachineState
*spapr
= SPAPR_MACHINE(obj
);
2814 return g_strdup(spapr
->kvm_type
);
2817 static void spapr_set_kvm_type(Object
*obj
, const char *value
, Error
**errp
)
2819 sPAPRMachineState
*spapr
= SPAPR_MACHINE(obj
);
2821 g_free(spapr
->kvm_type
);
2822 spapr
->kvm_type
= g_strdup(value
);
2825 static bool spapr_get_modern_hotplug_events(Object
*obj
, Error
**errp
)
2827 sPAPRMachineState
*spapr
= SPAPR_MACHINE(obj
);
2829 return spapr
->use_hotplug_event_source
;
2832 static void spapr_set_modern_hotplug_events(Object
*obj
, bool value
,
2835 sPAPRMachineState
*spapr
= SPAPR_MACHINE(obj
);
2837 spapr
->use_hotplug_event_source
= value
;
2840 static bool spapr_get_msix_emulation(Object
*obj
, Error
**errp
)
2845 static char *spapr_get_resize_hpt(Object
*obj
, Error
**errp
)
2847 sPAPRMachineState
*spapr
= SPAPR_MACHINE(obj
);
2849 switch (spapr
->resize_hpt
) {
2850 case SPAPR_RESIZE_HPT_DEFAULT
:
2851 return g_strdup("default");
2852 case SPAPR_RESIZE_HPT_DISABLED
:
2853 return g_strdup("disabled");
2854 case SPAPR_RESIZE_HPT_ENABLED
:
2855 return g_strdup("enabled");
2856 case SPAPR_RESIZE_HPT_REQUIRED
:
2857 return g_strdup("required");
2859 g_assert_not_reached();
2862 static void spapr_set_resize_hpt(Object
*obj
, const char *value
, Error
**errp
)
2864 sPAPRMachineState
*spapr
= SPAPR_MACHINE(obj
);
2866 if (strcmp(value
, "default") == 0) {
2867 spapr
->resize_hpt
= SPAPR_RESIZE_HPT_DEFAULT
;
2868 } else if (strcmp(value
, "disabled") == 0) {
2869 spapr
->resize_hpt
= SPAPR_RESIZE_HPT_DISABLED
;
2870 } else if (strcmp(value
, "enabled") == 0) {
2871 spapr
->resize_hpt
= SPAPR_RESIZE_HPT_ENABLED
;
2872 } else if (strcmp(value
, "required") == 0) {
2873 spapr
->resize_hpt
= SPAPR_RESIZE_HPT_REQUIRED
;
2875 error_setg(errp
, "Bad value for \"resize-hpt\" property");
2879 static void spapr_get_vsmt(Object
*obj
, Visitor
*v
, const char *name
,
2880 void *opaque
, Error
**errp
)
2882 visit_type_uint32(v
, name
, (uint32_t *)opaque
, errp
);
2885 static void spapr_set_vsmt(Object
*obj
, Visitor
*v
, const char *name
,
2886 void *opaque
, Error
**errp
)
2888 visit_type_uint32(v
, name
, (uint32_t *)opaque
, errp
);
2891 static void spapr_instance_init(Object
*obj
)
2893 sPAPRMachineState
*spapr
= SPAPR_MACHINE(obj
);
2895 spapr
->htab_fd
= -1;
2896 spapr
->use_hotplug_event_source
= true;
2897 object_property_add_str(obj
, "kvm-type",
2898 spapr_get_kvm_type
, spapr_set_kvm_type
, NULL
);
2899 object_property_set_description(obj
, "kvm-type",
2900 "Specifies the KVM virtualization mode (HV, PR)",
2902 object_property_add_bool(obj
, "modern-hotplug-events",
2903 spapr_get_modern_hotplug_events
,
2904 spapr_set_modern_hotplug_events
,
2906 object_property_set_description(obj
, "modern-hotplug-events",
2907 "Use dedicated hotplug event mechanism in"
2908 " place of standard EPOW events when possible"
2909 " (required for memory hot-unplug support)",
2912 ppc_compat_add_property(obj
, "max-cpu-compat", &spapr
->max_compat_pvr
,
2913 "Maximum permitted CPU compatibility mode",
2916 object_property_add_str(obj
, "resize-hpt",
2917 spapr_get_resize_hpt
, spapr_set_resize_hpt
, NULL
);
2918 object_property_set_description(obj
, "resize-hpt",
2919 "Resizing of the Hash Page Table (enabled, disabled, required)",
2921 object_property_add(obj
, "vsmt", "uint32", spapr_get_vsmt
,
2922 spapr_set_vsmt
, NULL
, &spapr
->vsmt
, &error_abort
);
2923 object_property_set_description(obj
, "vsmt",
2924 "Virtual SMT: KVM behaves as if this were"
2925 " the host's SMT mode", &error_abort
);
2926 object_property_add_bool(obj
, "vfio-no-msix-emulation",
2927 spapr_get_msix_emulation
, NULL
, NULL
);
2930 static void spapr_machine_finalizefn(Object
*obj
)
2932 sPAPRMachineState
*spapr
= SPAPR_MACHINE(obj
);
2934 g_free(spapr
->kvm_type
);
2937 void spapr_do_system_reset_on_cpu(CPUState
*cs
, run_on_cpu_data arg
)
2939 cpu_synchronize_state(cs
);
2940 ppc_cpu_do_system_reset(cs
);
2943 static void spapr_nmi(NMIState
*n
, int cpu_index
, Error
**errp
)
2948 async_run_on_cpu(cs
, spapr_do_system_reset_on_cpu
, RUN_ON_CPU_NULL
);
2952 static void spapr_add_lmbs(DeviceState
*dev
, uint64_t addr_start
, uint64_t size
,
2953 uint32_t node
, bool dedicated_hp_event_source
,
2956 sPAPRDRConnector
*drc
;
2957 uint32_t nr_lmbs
= size
/SPAPR_MEMORY_BLOCK_SIZE
;
2958 int i
, fdt_offset
, fdt_size
;
2960 uint64_t addr
= addr_start
;
2961 bool hotplugged
= spapr_drc_hotplugged(dev
);
2962 Error
*local_err
= NULL
;
2964 for (i
= 0; i
< nr_lmbs
; i
++) {
2965 drc
= spapr_drc_by_id(TYPE_SPAPR_DRC_LMB
,
2966 addr
/ SPAPR_MEMORY_BLOCK_SIZE
);
2969 fdt
= create_device_tree(&fdt_size
);
2970 fdt_offset
= spapr_populate_memory_node(fdt
, node
, addr
,
2971 SPAPR_MEMORY_BLOCK_SIZE
);
2973 spapr_drc_attach(drc
, dev
, fdt
, fdt_offset
, &local_err
);
2975 while (addr
> addr_start
) {
2976 addr
-= SPAPR_MEMORY_BLOCK_SIZE
;
2977 drc
= spapr_drc_by_id(TYPE_SPAPR_DRC_LMB
,
2978 addr
/ SPAPR_MEMORY_BLOCK_SIZE
);
2979 spapr_drc_detach(drc
);
2982 error_propagate(errp
, local_err
);
2986 spapr_drc_reset(drc
);
2988 addr
+= SPAPR_MEMORY_BLOCK_SIZE
;
2990 /* send hotplug notification to the
2991 * guest only in case of hotplugged memory
2994 if (dedicated_hp_event_source
) {
2995 drc
= spapr_drc_by_id(TYPE_SPAPR_DRC_LMB
,
2996 addr_start
/ SPAPR_MEMORY_BLOCK_SIZE
);
2997 spapr_hotplug_req_add_by_count_indexed(SPAPR_DR_CONNECTOR_TYPE_LMB
,
2999 spapr_drc_index(drc
));
3001 spapr_hotplug_req_add_by_count(SPAPR_DR_CONNECTOR_TYPE_LMB
,
3007 static void spapr_memory_plug(HotplugHandler
*hotplug_dev
, DeviceState
*dev
,
3008 uint32_t node
, Error
**errp
)
3010 Error
*local_err
= NULL
;
3011 sPAPRMachineState
*ms
= SPAPR_MACHINE(hotplug_dev
);
3012 PCDIMMDevice
*dimm
= PC_DIMM(dev
);
3013 PCDIMMDeviceClass
*ddc
= PC_DIMM_GET_CLASS(dimm
);
3015 uint64_t align
, size
, addr
;
3017 mr
= ddc
->get_memory_region(dimm
, &local_err
);
3021 align
= memory_region_get_alignment(mr
);
3022 size
= memory_region_size(mr
);
3024 pc_dimm_memory_plug(dev
, &ms
->hotplug_memory
, mr
, align
, &local_err
);
3029 addr
= object_property_get_uint(OBJECT(dimm
),
3030 PC_DIMM_ADDR_PROP
, &local_err
);
3035 spapr_add_lmbs(dev
, addr
, size
, node
,
3036 spapr_ovec_test(ms
->ov5_cas
, OV5_HP_EVT
),
3045 pc_dimm_memory_unplug(dev
, &ms
->hotplug_memory
, mr
);
3047 error_propagate(errp
, local_err
);
3050 static void spapr_memory_pre_plug(HotplugHandler
*hotplug_dev
, DeviceState
*dev
,
3053 PCDIMMDevice
*dimm
= PC_DIMM(dev
);
3054 PCDIMMDeviceClass
*ddc
= PC_DIMM_GET_CLASS(dimm
);
3059 mr
= ddc
->get_memory_region(dimm
, errp
);
3063 size
= memory_region_size(mr
);
3065 if (size
% SPAPR_MEMORY_BLOCK_SIZE
) {
3066 error_setg(errp
, "Hotplugged memory size must be a multiple of "
3067 "%lld MB", SPAPR_MEMORY_BLOCK_SIZE
/ M_BYTE
);
3071 mem_dev
= object_property_get_str(OBJECT(dimm
), PC_DIMM_MEMDEV_PROP
, NULL
);
3072 if (mem_dev
&& !kvmppc_is_mem_backend_page_size_ok(mem_dev
)) {
3073 error_setg(errp
, "Memory backend has bad page size. "
3074 "Use 'memory-backend-file' with correct mem-path.");
3082 struct sPAPRDIMMState
{
3085 QTAILQ_ENTRY(sPAPRDIMMState
) next
;
3088 static sPAPRDIMMState
*spapr_pending_dimm_unplugs_find(sPAPRMachineState
*s
,
3091 sPAPRDIMMState
*dimm_state
= NULL
;
3093 QTAILQ_FOREACH(dimm_state
, &s
->pending_dimm_unplugs
, next
) {
3094 if (dimm_state
->dimm
== dimm
) {
3101 static sPAPRDIMMState
*spapr_pending_dimm_unplugs_add(sPAPRMachineState
*spapr
,
3105 sPAPRDIMMState
*ds
= NULL
;
3108 * If this request is for a DIMM whose removal had failed earlier
3109 * (due to guest's refusal to remove the LMBs), we would have this
3110 * dimm already in the pending_dimm_unplugs list. In that
3111 * case don't add again.
3113 ds
= spapr_pending_dimm_unplugs_find(spapr
, dimm
);
3115 ds
= g_malloc0(sizeof(sPAPRDIMMState
));
3116 ds
->nr_lmbs
= nr_lmbs
;
3118 QTAILQ_INSERT_HEAD(&spapr
->pending_dimm_unplugs
, ds
, next
);
3123 static void spapr_pending_dimm_unplugs_remove(sPAPRMachineState
*spapr
,
3124 sPAPRDIMMState
*dimm_state
)
3126 QTAILQ_REMOVE(&spapr
->pending_dimm_unplugs
, dimm_state
, next
);
3130 static sPAPRDIMMState
*spapr_recover_pending_dimm_state(sPAPRMachineState
*ms
,
3133 sPAPRDRConnector
*drc
;
3134 PCDIMMDeviceClass
*ddc
= PC_DIMM_GET_CLASS(dimm
);
3135 MemoryRegion
*mr
= ddc
->get_memory_region(dimm
, &error_abort
);
3136 uint64_t size
= memory_region_size(mr
);
3137 uint32_t nr_lmbs
= size
/ SPAPR_MEMORY_BLOCK_SIZE
;
3138 uint32_t avail_lmbs
= 0;
3139 uint64_t addr_start
, addr
;
3142 addr_start
= object_property_get_int(OBJECT(dimm
), PC_DIMM_ADDR_PROP
,
3146 for (i
= 0; i
< nr_lmbs
; i
++) {
3147 drc
= spapr_drc_by_id(TYPE_SPAPR_DRC_LMB
,
3148 addr
/ SPAPR_MEMORY_BLOCK_SIZE
);
3153 addr
+= SPAPR_MEMORY_BLOCK_SIZE
;
3156 return spapr_pending_dimm_unplugs_add(ms
, avail_lmbs
, dimm
);
3159 /* Callback to be called during DRC release. */
3160 void spapr_lmb_release(DeviceState
*dev
)
3162 sPAPRMachineState
*spapr
= SPAPR_MACHINE(qdev_get_hotplug_handler(dev
));
3163 PCDIMMDevice
*dimm
= PC_DIMM(dev
);
3164 PCDIMMDeviceClass
*ddc
= PC_DIMM_GET_CLASS(dimm
);
3165 MemoryRegion
*mr
= ddc
->get_memory_region(dimm
, &error_abort
);
3166 sPAPRDIMMState
*ds
= spapr_pending_dimm_unplugs_find(spapr
, PC_DIMM(dev
));
3168 /* This information will get lost if a migration occurs
3169 * during the unplug process. In this case recover it. */
3171 ds
= spapr_recover_pending_dimm_state(spapr
, PC_DIMM(dev
));
3173 /* The DRC being examined by the caller at least must be counted */
3174 g_assert(ds
->nr_lmbs
);
3177 if (--ds
->nr_lmbs
) {
3182 * Now that all the LMBs have been removed by the guest, call the
3183 * pc-dimm unplug handler to cleanup up the pc-dimm device.
3185 pc_dimm_memory_unplug(dev
, &spapr
->hotplug_memory
, mr
);
3186 object_unparent(OBJECT(dev
));
3187 spapr_pending_dimm_unplugs_remove(spapr
, ds
);
3190 static void spapr_memory_unplug_request(HotplugHandler
*hotplug_dev
,
3191 DeviceState
*dev
, Error
**errp
)
3193 sPAPRMachineState
*spapr
= SPAPR_MACHINE(hotplug_dev
);
3194 Error
*local_err
= NULL
;
3195 PCDIMMDevice
*dimm
= PC_DIMM(dev
);
3196 PCDIMMDeviceClass
*ddc
= PC_DIMM_GET_CLASS(dimm
);
3199 uint64_t size
, addr_start
, addr
;
3201 sPAPRDRConnector
*drc
;
3203 mr
= ddc
->get_memory_region(dimm
, &local_err
);
3207 size
= memory_region_size(mr
);
3208 nr_lmbs
= size
/ SPAPR_MEMORY_BLOCK_SIZE
;
3210 addr_start
= object_property_get_uint(OBJECT(dimm
), PC_DIMM_ADDR_PROP
,
3217 * An existing pending dimm state for this DIMM means that there is an
3218 * unplug operation in progress, waiting for the spapr_lmb_release
3219 * callback to complete the job (BQL can't cover that far). In this case,
3220 * bail out to avoid detaching DRCs that were already released.
3222 if (spapr_pending_dimm_unplugs_find(spapr
, dimm
)) {
3223 error_setg(&local_err
,
3224 "Memory unplug already in progress for device %s",
3229 spapr_pending_dimm_unplugs_add(spapr
, nr_lmbs
, dimm
);
3232 for (i
= 0; i
< nr_lmbs
; i
++) {
3233 drc
= spapr_drc_by_id(TYPE_SPAPR_DRC_LMB
,
3234 addr
/ SPAPR_MEMORY_BLOCK_SIZE
);
3237 spapr_drc_detach(drc
);
3238 addr
+= SPAPR_MEMORY_BLOCK_SIZE
;
3241 drc
= spapr_drc_by_id(TYPE_SPAPR_DRC_LMB
,
3242 addr_start
/ SPAPR_MEMORY_BLOCK_SIZE
);
3243 spapr_hotplug_req_remove_by_count_indexed(SPAPR_DR_CONNECTOR_TYPE_LMB
,
3244 nr_lmbs
, spapr_drc_index(drc
));
3246 error_propagate(errp
, local_err
);
3249 static void *spapr_populate_hotplug_cpu_dt(CPUState
*cs
, int *fdt_offset
,
3250 sPAPRMachineState
*spapr
)
3252 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
3253 DeviceClass
*dc
= DEVICE_GET_CLASS(cs
);
3254 int id
= spapr_get_vcpu_id(cpu
);
3256 int offset
, fdt_size
;
3259 fdt
= create_device_tree(&fdt_size
);
3260 nodename
= g_strdup_printf("%s@%x", dc
->fw_name
, id
);
3261 offset
= fdt_add_subnode(fdt
, 0, nodename
);
3263 spapr_populate_cpu_dt(cs
, fdt
, offset
, spapr
);
3266 *fdt_offset
= offset
;
3270 /* Callback to be called during DRC release. */
3271 void spapr_core_release(DeviceState
*dev
)
3273 MachineState
*ms
= MACHINE(qdev_get_hotplug_handler(dev
));
3274 sPAPRMachineClass
*smc
= SPAPR_MACHINE_GET_CLASS(ms
);
3275 CPUCore
*cc
= CPU_CORE(dev
);
3276 CPUArchId
*core_slot
= spapr_find_cpu_slot(ms
, cc
->core_id
, NULL
);
3278 if (smc
->pre_2_10_has_unused_icps
) {
3279 sPAPRCPUCore
*sc
= SPAPR_CPU_CORE(OBJECT(dev
));
3282 for (i
= 0; i
< cc
->nr_threads
; i
++) {
3283 CPUState
*cs
= CPU(sc
->threads
[i
]);
3285 pre_2_10_vmstate_register_dummy_icp(cs
->cpu_index
);
3290 core_slot
->cpu
= NULL
;
3291 object_unparent(OBJECT(dev
));
3295 void spapr_core_unplug_request(HotplugHandler
*hotplug_dev
, DeviceState
*dev
,
3298 sPAPRMachineState
*spapr
= SPAPR_MACHINE(OBJECT(hotplug_dev
));
3300 sPAPRDRConnector
*drc
;
3301 CPUCore
*cc
= CPU_CORE(dev
);
3303 if (!spapr_find_cpu_slot(MACHINE(hotplug_dev
), cc
->core_id
, &index
)) {
3304 error_setg(errp
, "Unable to find CPU core with core-id: %d",
3309 error_setg(errp
, "Boot CPU core may not be unplugged");
3313 drc
= spapr_drc_by_id(TYPE_SPAPR_DRC_CPU
,
3314 spapr_vcpu_id(spapr
, cc
->core_id
));
3317 spapr_drc_detach(drc
);
3319 spapr_hotplug_req_remove_by_index(drc
);
3322 static void spapr_core_plug(HotplugHandler
*hotplug_dev
, DeviceState
*dev
,
3325 sPAPRMachineState
*spapr
= SPAPR_MACHINE(OBJECT(hotplug_dev
));
3326 MachineClass
*mc
= MACHINE_GET_CLASS(spapr
);
3327 sPAPRMachineClass
*smc
= SPAPR_MACHINE_CLASS(mc
);
3328 sPAPRCPUCore
*core
= SPAPR_CPU_CORE(OBJECT(dev
));
3329 CPUCore
*cc
= CPU_CORE(dev
);
3330 CPUState
*cs
= CPU(core
->threads
[0]);
3331 sPAPRDRConnector
*drc
;
3332 Error
*local_err
= NULL
;
3333 CPUArchId
*core_slot
;
3335 bool hotplugged
= spapr_drc_hotplugged(dev
);
3337 core_slot
= spapr_find_cpu_slot(MACHINE(hotplug_dev
), cc
->core_id
, &index
);
3339 error_setg(errp
, "Unable to find CPU core with core-id: %d",
3343 drc
= spapr_drc_by_id(TYPE_SPAPR_DRC_CPU
,
3344 spapr_vcpu_id(spapr
, cc
->core_id
));
3346 g_assert(drc
|| !mc
->has_hotpluggable_cpus
);
3352 fdt
= spapr_populate_hotplug_cpu_dt(cs
, &fdt_offset
, spapr
);
3354 spapr_drc_attach(drc
, dev
, fdt
, fdt_offset
, &local_err
);
3357 error_propagate(errp
, local_err
);
3363 * Send hotplug notification interrupt to the guest only
3364 * in case of hotplugged CPUs.
3366 spapr_hotplug_req_add_by_index(drc
);
3368 spapr_drc_reset(drc
);
3372 core_slot
->cpu
= OBJECT(dev
);
3374 if (smc
->pre_2_10_has_unused_icps
) {
3377 for (i
= 0; i
< cc
->nr_threads
; i
++) {
3378 cs
= CPU(core
->threads
[i
]);
3379 pre_2_10_vmstate_unregister_dummy_icp(cs
->cpu_index
);
3384 static void spapr_core_pre_plug(HotplugHandler
*hotplug_dev
, DeviceState
*dev
,
3387 MachineState
*machine
= MACHINE(OBJECT(hotplug_dev
));
3388 MachineClass
*mc
= MACHINE_GET_CLASS(hotplug_dev
);
3389 Error
*local_err
= NULL
;
3390 CPUCore
*cc
= CPU_CORE(dev
);
3391 const char *base_core_type
= spapr_get_cpu_core_type(machine
->cpu_type
);
3392 const char *type
= object_get_typename(OBJECT(dev
));
3393 CPUArchId
*core_slot
;
3396 if (dev
->hotplugged
&& !mc
->has_hotpluggable_cpus
) {
3397 error_setg(&local_err
, "CPU hotplug not supported for this machine");
3401 if (strcmp(base_core_type
, type
)) {
3402 error_setg(&local_err
, "CPU core type should be %s", base_core_type
);
3406 if (cc
->core_id
% smp_threads
) {
3407 error_setg(&local_err
, "invalid core id %d", cc
->core_id
);
3412 * In general we should have homogeneous threads-per-core, but old
3413 * (pre hotplug support) machine types allow the last core to have
3414 * reduced threads as a compatibility hack for when we allowed
3415 * total vcpus not a multiple of threads-per-core.
3417 if (mc
->has_hotpluggable_cpus
&& (cc
->nr_threads
!= smp_threads
)) {
3418 error_setg(&local_err
, "invalid nr-threads %d, must be %d",
3419 cc
->nr_threads
, smp_threads
);
3423 core_slot
= spapr_find_cpu_slot(MACHINE(hotplug_dev
), cc
->core_id
, &index
);
3425 error_setg(&local_err
, "core id %d out of range", cc
->core_id
);
3429 if (core_slot
->cpu
) {
3430 error_setg(&local_err
, "core %d already populated", cc
->core_id
);
3434 numa_cpu_pre_plug(core_slot
, dev
, &local_err
);
3437 error_propagate(errp
, local_err
);
3440 static void spapr_machine_device_plug(HotplugHandler
*hotplug_dev
,
3441 DeviceState
*dev
, Error
**errp
)
3443 MachineState
*ms
= MACHINE(hotplug_dev
);
3444 sPAPRMachineClass
*smc
= SPAPR_MACHINE_GET_CLASS(ms
);
3446 if (object_dynamic_cast(OBJECT(dev
), TYPE_PC_DIMM
)) {
3449 if (!smc
->dr_lmb_enabled
) {
3450 error_setg(errp
, "Memory hotplug not supported for this machine");
3453 node
= object_property_get_uint(OBJECT(dev
), PC_DIMM_NODE_PROP
, errp
);
3457 if (node
< 0 || node
>= MAX_NODES
) {
3458 error_setg(errp
, "Invaild node %d", node
);
3462 spapr_memory_plug(hotplug_dev
, dev
, node
, errp
);
3463 } else if (object_dynamic_cast(OBJECT(dev
), TYPE_SPAPR_CPU_CORE
)) {
3464 spapr_core_plug(hotplug_dev
, dev
, errp
);
3468 static void spapr_machine_device_unplug_request(HotplugHandler
*hotplug_dev
,
3469 DeviceState
*dev
, Error
**errp
)
3471 sPAPRMachineState
*sms
= SPAPR_MACHINE(OBJECT(hotplug_dev
));
3472 MachineClass
*mc
= MACHINE_GET_CLASS(sms
);
3474 if (object_dynamic_cast(OBJECT(dev
), TYPE_PC_DIMM
)) {
3475 if (spapr_ovec_test(sms
->ov5_cas
, OV5_HP_EVT
)) {
3476 spapr_memory_unplug_request(hotplug_dev
, dev
, errp
);
3478 /* NOTE: this means there is a window after guest reset, prior to
3479 * CAS negotiation, where unplug requests will fail due to the
3480 * capability not being detected yet. This is a bit different than
3481 * the case with PCI unplug, where the events will be queued and
3482 * eventually handled by the guest after boot
3484 error_setg(errp
, "Memory hot unplug not supported for this guest");
3486 } else if (object_dynamic_cast(OBJECT(dev
), TYPE_SPAPR_CPU_CORE
)) {
3487 if (!mc
->has_hotpluggable_cpus
) {
3488 error_setg(errp
, "CPU hot unplug not supported on this machine");
3491 spapr_core_unplug_request(hotplug_dev
, dev
, errp
);
3495 static void spapr_machine_device_pre_plug(HotplugHandler
*hotplug_dev
,
3496 DeviceState
*dev
, Error
**errp
)
3498 if (object_dynamic_cast(OBJECT(dev
), TYPE_PC_DIMM
)) {
3499 spapr_memory_pre_plug(hotplug_dev
, dev
, errp
);
3500 } else if (object_dynamic_cast(OBJECT(dev
), TYPE_SPAPR_CPU_CORE
)) {
3501 spapr_core_pre_plug(hotplug_dev
, dev
, errp
);
3505 static HotplugHandler
*spapr_get_hotplug_handler(MachineState
*machine
,
3508 if (object_dynamic_cast(OBJECT(dev
), TYPE_PC_DIMM
) ||
3509 object_dynamic_cast(OBJECT(dev
), TYPE_SPAPR_CPU_CORE
)) {
3510 return HOTPLUG_HANDLER(machine
);
3515 static CpuInstanceProperties
3516 spapr_cpu_index_to_props(MachineState
*machine
, unsigned cpu_index
)
3518 CPUArchId
*core_slot
;
3519 MachineClass
*mc
= MACHINE_GET_CLASS(machine
);
3521 /* make sure possible_cpu are intialized */
3522 mc
->possible_cpu_arch_ids(machine
);
3523 /* get CPU core slot containing thread that matches cpu_index */
3524 core_slot
= spapr_find_cpu_slot(machine
, cpu_index
, NULL
);
3526 return core_slot
->props
;
3529 static int64_t spapr_get_default_cpu_node_id(const MachineState
*ms
, int idx
)
3531 return idx
/ smp_cores
% nb_numa_nodes
;
3534 static const CPUArchIdList
*spapr_possible_cpu_arch_ids(MachineState
*machine
)
3537 const char *core_type
;
3538 int spapr_max_cores
= max_cpus
/ smp_threads
;
3539 MachineClass
*mc
= MACHINE_GET_CLASS(machine
);
3541 if (!mc
->has_hotpluggable_cpus
) {
3542 spapr_max_cores
= QEMU_ALIGN_UP(smp_cpus
, smp_threads
) / smp_threads
;
3544 if (machine
->possible_cpus
) {
3545 assert(machine
->possible_cpus
->len
== spapr_max_cores
);
3546 return machine
->possible_cpus
;
3549 core_type
= spapr_get_cpu_core_type(machine
->cpu_type
);
3551 error_report("Unable to find sPAPR CPU Core definition");
3555 machine
->possible_cpus
= g_malloc0(sizeof(CPUArchIdList
) +
3556 sizeof(CPUArchId
) * spapr_max_cores
);
3557 machine
->possible_cpus
->len
= spapr_max_cores
;
3558 for (i
= 0; i
< machine
->possible_cpus
->len
; i
++) {
3559 int core_id
= i
* smp_threads
;
3561 machine
->possible_cpus
->cpus
[i
].type
= core_type
;
3562 machine
->possible_cpus
->cpus
[i
].vcpus_count
= smp_threads
;
3563 machine
->possible_cpus
->cpus
[i
].arch_id
= core_id
;
3564 machine
->possible_cpus
->cpus
[i
].props
.has_core_id
= true;
3565 machine
->possible_cpus
->cpus
[i
].props
.core_id
= core_id
;
3567 return machine
->possible_cpus
;
3570 static void spapr_phb_placement(sPAPRMachineState
*spapr
, uint32_t index
,
3571 uint64_t *buid
, hwaddr
*pio
,
3572 hwaddr
*mmio32
, hwaddr
*mmio64
,
3573 unsigned n_dma
, uint32_t *liobns
, Error
**errp
)
3576 * New-style PHB window placement.
3578 * Goals: Gives large (1TiB), naturally aligned 64-bit MMIO window
3579 * for each PHB, in addition to 2GiB 32-bit MMIO and 64kiB PIO
3582 * Some guest kernels can't work with MMIO windows above 1<<46
3583 * (64TiB), so we place up to 31 PHBs in the area 32TiB..64TiB
3585 * 32TiB..(33TiB+1984kiB) contains the 64kiB PIO windows for each
3586 * PHB stacked together. (32TiB+2GiB)..(32TiB+64GiB) contains the
3587 * 2GiB 32-bit MMIO windows for each PHB. Then 33..64TiB has the
3588 * 1TiB 64-bit MMIO windows for each PHB.
3590 const uint64_t base_buid
= 0x800000020000000ULL
;
3591 #define SPAPR_MAX_PHBS ((SPAPR_PCI_LIMIT - SPAPR_PCI_BASE) / \
3592 SPAPR_PCI_MEM64_WIN_SIZE - 1)
3595 /* Sanity check natural alignments */
3596 QEMU_BUILD_BUG_ON((SPAPR_PCI_BASE
% SPAPR_PCI_MEM64_WIN_SIZE
) != 0);
3597 QEMU_BUILD_BUG_ON((SPAPR_PCI_LIMIT
% SPAPR_PCI_MEM64_WIN_SIZE
) != 0);
3598 QEMU_BUILD_BUG_ON((SPAPR_PCI_MEM64_WIN_SIZE
% SPAPR_PCI_MEM32_WIN_SIZE
) != 0);
3599 QEMU_BUILD_BUG_ON((SPAPR_PCI_MEM32_WIN_SIZE
% SPAPR_PCI_IO_WIN_SIZE
) != 0);
3600 /* Sanity check bounds */
3601 QEMU_BUILD_BUG_ON((SPAPR_MAX_PHBS
* SPAPR_PCI_IO_WIN_SIZE
) >
3602 SPAPR_PCI_MEM32_WIN_SIZE
);
3603 QEMU_BUILD_BUG_ON((SPAPR_MAX_PHBS
* SPAPR_PCI_MEM32_WIN_SIZE
) >
3604 SPAPR_PCI_MEM64_WIN_SIZE
);
3606 if (index
>= SPAPR_MAX_PHBS
) {
3607 error_setg(errp
, "\"index\" for PAPR PHB is too large (max %llu)",
3608 SPAPR_MAX_PHBS
- 1);
3612 *buid
= base_buid
+ index
;
3613 for (i
= 0; i
< n_dma
; ++i
) {
3614 liobns
[i
] = SPAPR_PCI_LIOBN(index
, i
);
3617 *pio
= SPAPR_PCI_BASE
+ index
* SPAPR_PCI_IO_WIN_SIZE
;
3618 *mmio32
= SPAPR_PCI_BASE
+ (index
+ 1) * SPAPR_PCI_MEM32_WIN_SIZE
;
3619 *mmio64
= SPAPR_PCI_BASE
+ (index
+ 1) * SPAPR_PCI_MEM64_WIN_SIZE
;
3622 static ICSState
*spapr_ics_get(XICSFabric
*dev
, int irq
)
3624 sPAPRMachineState
*spapr
= SPAPR_MACHINE(dev
);
3626 return ics_valid_irq(spapr
->ics
, irq
) ? spapr
->ics
: NULL
;
3629 static void spapr_ics_resend(XICSFabric
*dev
)
3631 sPAPRMachineState
*spapr
= SPAPR_MACHINE(dev
);
3633 ics_resend(spapr
->ics
);
3636 static ICPState
*spapr_icp_get(XICSFabric
*xi
, int vcpu_id
)
3638 PowerPCCPU
*cpu
= spapr_find_cpu(vcpu_id
);
3640 return cpu
? ICP(cpu
->intc
) : NULL
;
3643 #define ICS_IRQ_FREE(ics, srcno) \
3644 (!((ics)->irqs[(srcno)].flags & (XICS_FLAGS_IRQ_MASK)))
3646 static int ics_find_free_block(ICSState
*ics
, int num
, int alignnum
)
3650 for (first
= 0; first
< ics
->nr_irqs
; first
+= alignnum
) {
3651 if (num
> (ics
->nr_irqs
- first
)) {
3654 for (i
= first
; i
< first
+ num
; ++i
) {
3655 if (!ICS_IRQ_FREE(ics
, i
)) {
3659 if (i
== (first
+ num
)) {
3668 * Allocate the IRQ number and set the IRQ type, LSI or MSI
3670 static void spapr_irq_set_lsi(sPAPRMachineState
*spapr
, int irq
, bool lsi
)
3672 ics_set_irq_type(spapr
->ics
, irq
- spapr
->ics
->offset
, lsi
);
3675 int spapr_irq_alloc(sPAPRMachineState
*spapr
, int irq_hint
, bool lsi
,
3678 ICSState
*ics
= spapr
->ics
;
3684 if (!ICS_IRQ_FREE(ics
, irq_hint
- ics
->offset
)) {
3685 error_setg(errp
, "can't allocate IRQ %d: already in use", irq_hint
);
3690 irq
= ics_find_free_block(ics
, 1, 1);
3692 error_setg(errp
, "can't allocate IRQ: no IRQ left");
3698 spapr_irq_set_lsi(spapr
, irq
, lsi
);
3699 trace_spapr_irq_alloc(irq
);
3705 * Allocate block of consecutive IRQs, and return the number of the first IRQ in
3706 * the block. If align==true, aligns the first IRQ number to num.
3708 int spapr_irq_alloc_block(sPAPRMachineState
*spapr
, int num
, bool lsi
,
3709 bool align
, Error
**errp
)
3711 ICSState
*ics
= spapr
->ics
;
3717 * MSIMesage::data is used for storing VIRQ so
3718 * it has to be aligned to num to support multiple
3719 * MSI vectors. MSI-X is not affected by this.
3720 * The hint is used for the first IRQ, the rest should
3721 * be allocated continuously.
3724 assert((num
== 1) || (num
== 2) || (num
== 4) ||
3725 (num
== 8) || (num
== 16) || (num
== 32));
3726 first
= ics_find_free_block(ics
, num
, num
);
3728 first
= ics_find_free_block(ics
, num
, 1);
3731 error_setg(errp
, "can't find a free %d-IRQ block", num
);
3735 first
+= ics
->offset
;
3736 for (i
= first
; i
< first
+ num
; ++i
) {
3737 spapr_irq_set_lsi(spapr
, i
, lsi
);
3740 trace_spapr_irq_alloc_block(first
, num
, lsi
, align
);
3745 void spapr_irq_free(sPAPRMachineState
*spapr
, int irq
, int num
)
3747 ICSState
*ics
= spapr
->ics
;
3748 int srcno
= irq
- ics
->offset
;
3751 if (ics_valid_irq(ics
, irq
)) {
3752 trace_spapr_irq_free(0, irq
, num
);
3753 for (i
= srcno
; i
< srcno
+ num
; ++i
) {
3754 if (ICS_IRQ_FREE(ics
, i
)) {
3755 trace_spapr_irq_free_warn(0, i
+ ics
->offset
);
3757 memset(&ics
->irqs
[i
], 0, sizeof(ICSIRQState
));
3762 qemu_irq
spapr_qirq(sPAPRMachineState
*spapr
, int irq
)
3764 ICSState
*ics
= spapr
->ics
;
3766 if (ics_valid_irq(ics
, irq
)) {
3767 return ics
->qirqs
[irq
- ics
->offset
];
3773 static void spapr_pic_print_info(InterruptStatsProvider
*obj
,
3776 sPAPRMachineState
*spapr
= SPAPR_MACHINE(obj
);
3780 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
3782 icp_pic_print_info(ICP(cpu
->intc
), mon
);
3785 ics_pic_print_info(spapr
->ics
, mon
);
3788 int spapr_get_vcpu_id(PowerPCCPU
*cpu
)
3790 return cpu
->vcpu_id
;
3793 void spapr_set_vcpu_id(PowerPCCPU
*cpu
, int cpu_index
, Error
**errp
)
3795 sPAPRMachineState
*spapr
= SPAPR_MACHINE(qdev_get_machine());
3798 vcpu_id
= spapr_vcpu_id(spapr
, cpu_index
);
3800 if (kvm_enabled() && !kvm_vcpu_id_is_valid(vcpu_id
)) {
3801 error_setg(errp
, "Can't create CPU with id %d in KVM", vcpu_id
);
3802 error_append_hint(errp
, "Adjust the number of cpus to %d "
3803 "or try to raise the number of threads per core\n",
3804 vcpu_id
* smp_threads
/ spapr
->vsmt
);
3808 cpu
->vcpu_id
= vcpu_id
;
3811 PowerPCCPU
*spapr_find_cpu(int vcpu_id
)
3816 PowerPCCPU
*cpu
= POWERPC_CPU(cs
);
3818 if (spapr_get_vcpu_id(cpu
) == vcpu_id
) {
3826 static void spapr_machine_class_init(ObjectClass
*oc
, void *data
)
3828 MachineClass
*mc
= MACHINE_CLASS(oc
);
3829 sPAPRMachineClass
*smc
= SPAPR_MACHINE_CLASS(oc
);
3830 FWPathProviderClass
*fwc
= FW_PATH_PROVIDER_CLASS(oc
);
3831 NMIClass
*nc
= NMI_CLASS(oc
);
3832 HotplugHandlerClass
*hc
= HOTPLUG_HANDLER_CLASS(oc
);
3833 PPCVirtualHypervisorClass
*vhc
= PPC_VIRTUAL_HYPERVISOR_CLASS(oc
);
3834 XICSFabricClass
*xic
= XICS_FABRIC_CLASS(oc
);
3835 InterruptStatsProviderClass
*ispc
= INTERRUPT_STATS_PROVIDER_CLASS(oc
);
3837 mc
->desc
= "pSeries Logical Partition (PAPR compliant)";
3840 * We set up the default / latest behaviour here. The class_init
3841 * functions for the specific versioned machine types can override
3842 * these details for backwards compatibility
3844 mc
->init
= spapr_machine_init
;
3845 mc
->reset
= spapr_machine_reset
;
3846 mc
->block_default_type
= IF_SCSI
;
3847 mc
->max_cpus
= 1024;
3848 mc
->no_parallel
= 1;
3849 mc
->default_boot_order
= "";
3850 mc
->default_ram_size
= 512 * M_BYTE
;
3851 mc
->kvm_type
= spapr_kvm_type
;
3852 machine_class_allow_dynamic_sysbus_dev(mc
, TYPE_SPAPR_PCI_HOST_BRIDGE
);
3853 mc
->pci_allow_0_address
= true;
3854 mc
->get_hotplug_handler
= spapr_get_hotplug_handler
;
3855 hc
->pre_plug
= spapr_machine_device_pre_plug
;
3856 hc
->plug
= spapr_machine_device_plug
;
3857 mc
->cpu_index_to_instance_props
= spapr_cpu_index_to_props
;
3858 mc
->get_default_cpu_node_id
= spapr_get_default_cpu_node_id
;
3859 mc
->possible_cpu_arch_ids
= spapr_possible_cpu_arch_ids
;
3860 hc
->unplug_request
= spapr_machine_device_unplug_request
;
3862 smc
->dr_lmb_enabled
= true;
3863 mc
->default_cpu_type
= POWERPC_CPU_TYPE_NAME("power8_v2.0");
3864 mc
->has_hotpluggable_cpus
= true;
3865 smc
->resize_hpt_default
= SPAPR_RESIZE_HPT_ENABLED
;
3866 fwc
->get_dev_path
= spapr_get_fw_dev_path
;
3867 nc
->nmi_monitor_handler
= spapr_nmi
;
3868 smc
->phb_placement
= spapr_phb_placement
;
3869 vhc
->hypercall
= emulate_spapr_hypercall
;
3870 vhc
->hpt_mask
= spapr_hpt_mask
;
3871 vhc
->map_hptes
= spapr_map_hptes
;
3872 vhc
->unmap_hptes
= spapr_unmap_hptes
;
3873 vhc
->store_hpte
= spapr_store_hpte
;
3874 vhc
->get_patbe
= spapr_get_patbe
;
3875 vhc
->encode_hpt_for_kvm_pr
= spapr_encode_hpt_for_kvm_pr
;
3876 xic
->ics_get
= spapr_ics_get
;
3877 xic
->ics_resend
= spapr_ics_resend
;
3878 xic
->icp_get
= spapr_icp_get
;
3879 ispc
->print_info
= spapr_pic_print_info
;
3880 /* Force NUMA node memory size to be a multiple of
3881 * SPAPR_MEMORY_BLOCK_SIZE (256M) since that's the granularity
3882 * in which LMBs are represented and hot-added
3884 mc
->numa_mem_align_shift
= 28;
3886 smc
->default_caps
.caps
[SPAPR_CAP_HTM
] = SPAPR_CAP_OFF
;
3887 smc
->default_caps
.caps
[SPAPR_CAP_VSX
] = SPAPR_CAP_ON
;
3888 smc
->default_caps
.caps
[SPAPR_CAP_DFP
] = SPAPR_CAP_ON
;
3889 smc
->default_caps
.caps
[SPAPR_CAP_CFPC
] = SPAPR_CAP_BROKEN
;
3890 smc
->default_caps
.caps
[SPAPR_CAP_SBBC
] = SPAPR_CAP_BROKEN
;
3891 smc
->default_caps
.caps
[SPAPR_CAP_IBS
] = SPAPR_CAP_BROKEN
;
3892 spapr_caps_add_properties(smc
, &error_abort
);
3895 static const TypeInfo spapr_machine_info
= {
3896 .name
= TYPE_SPAPR_MACHINE
,
3897 .parent
= TYPE_MACHINE
,
3899 .instance_size
= sizeof(sPAPRMachineState
),
3900 .instance_init
= spapr_instance_init
,
3901 .instance_finalize
= spapr_machine_finalizefn
,
3902 .class_size
= sizeof(sPAPRMachineClass
),
3903 .class_init
= spapr_machine_class_init
,
3904 .interfaces
= (InterfaceInfo
[]) {
3905 { TYPE_FW_PATH_PROVIDER
},
3907 { TYPE_HOTPLUG_HANDLER
},
3908 { TYPE_PPC_VIRTUAL_HYPERVISOR
},
3909 { TYPE_XICS_FABRIC
},
3910 { TYPE_INTERRUPT_STATS_PROVIDER
},
3915 #define DEFINE_SPAPR_MACHINE(suffix, verstr, latest) \
3916 static void spapr_machine_##suffix##_class_init(ObjectClass *oc, \
3919 MachineClass *mc = MACHINE_CLASS(oc); \
3920 spapr_machine_##suffix##_class_options(mc); \
3922 mc->alias = "pseries"; \
3923 mc->is_default = 1; \
3926 static void spapr_machine_##suffix##_instance_init(Object *obj) \
3928 MachineState *machine = MACHINE(obj); \
3929 spapr_machine_##suffix##_instance_options(machine); \
3931 static const TypeInfo spapr_machine_##suffix##_info = { \
3932 .name = MACHINE_TYPE_NAME("pseries-" verstr), \
3933 .parent = TYPE_SPAPR_MACHINE, \
3934 .class_init = spapr_machine_##suffix##_class_init, \
3935 .instance_init = spapr_machine_##suffix##_instance_init, \
3937 static void spapr_machine_register_##suffix(void) \
3939 type_register(&spapr_machine_##suffix##_info); \
3941 type_init(spapr_machine_register_##suffix)
3946 static void spapr_machine_2_13_instance_options(MachineState
*machine
)
3950 static void spapr_machine_2_13_class_options(MachineClass
*mc
)
3952 /* Defaults for the latest behaviour inherited from the base class */
3955 DEFINE_SPAPR_MACHINE(2_13
, "2.13", true);
3960 #define SPAPR_COMPAT_2_12 \
3963 static void spapr_machine_2_12_instance_options(MachineState
*machine
)
3965 spapr_machine_2_13_instance_options(machine
);
3968 static void spapr_machine_2_12_class_options(MachineClass
*mc
)
3970 spapr_machine_2_13_class_options(mc
);
3971 SET_MACHINE_COMPAT(mc
, SPAPR_COMPAT_2_12
);
3974 DEFINE_SPAPR_MACHINE(2_12
, "2.12", false);
3976 static void spapr_machine_2_12_sxxm_instance_options(MachineState
*machine
)
3978 spapr_machine_2_12_instance_options(machine
);
3981 static void spapr_machine_2_12_sxxm_class_options(MachineClass
*mc
)
3983 sPAPRMachineClass
*smc
= SPAPR_MACHINE_CLASS(mc
);
3985 spapr_machine_2_12_class_options(mc
);
3986 smc
->default_caps
.caps
[SPAPR_CAP_CFPC
] = SPAPR_CAP_WORKAROUND
;
3987 smc
->default_caps
.caps
[SPAPR_CAP_SBBC
] = SPAPR_CAP_WORKAROUND
;
3988 smc
->default_caps
.caps
[SPAPR_CAP_IBS
] = SPAPR_CAP_FIXED_CCD
;
3991 DEFINE_SPAPR_MACHINE(2_12_sxxm
, "2.12-sxxm", false);
3996 #define SPAPR_COMPAT_2_11 \
3999 static void spapr_machine_2_11_instance_options(MachineState
*machine
)
4001 spapr_machine_2_12_instance_options(machine
);
4004 static void spapr_machine_2_11_class_options(MachineClass
*mc
)
4006 sPAPRMachineClass
*smc
= SPAPR_MACHINE_CLASS(mc
);
4008 spapr_machine_2_12_class_options(mc
);
4009 smc
->default_caps
.caps
[SPAPR_CAP_HTM
] = SPAPR_CAP_ON
;
4010 SET_MACHINE_COMPAT(mc
, SPAPR_COMPAT_2_11
);
4013 DEFINE_SPAPR_MACHINE(2_11
, "2.11", false);
4018 #define SPAPR_COMPAT_2_10 \
4021 static void spapr_machine_2_10_instance_options(MachineState
*machine
)
4023 spapr_machine_2_11_instance_options(machine
);
4026 static void spapr_machine_2_10_class_options(MachineClass
*mc
)
4028 spapr_machine_2_11_class_options(mc
);
4029 SET_MACHINE_COMPAT(mc
, SPAPR_COMPAT_2_10
);
4032 DEFINE_SPAPR_MACHINE(2_10
, "2.10", false);
4037 #define SPAPR_COMPAT_2_9 \
4040 .driver = TYPE_POWERPC_CPU, \
4041 .property = "pre-2.10-migration", \
4045 static void spapr_machine_2_9_instance_options(MachineState *machine)
4047 spapr_machine_2_10_instance_options(machine
);
4050 static void spapr_machine_2_9_class_options(MachineClass
*mc
)
4052 sPAPRMachineClass
*smc
= SPAPR_MACHINE_CLASS(mc
);
4054 spapr_machine_2_10_class_options(mc
);
4055 SET_MACHINE_COMPAT(mc
, SPAPR_COMPAT_2_9
);
4056 mc
->numa_auto_assign_ram
= numa_legacy_auto_assign_ram
;
4057 smc
->pre_2_10_has_unused_icps
= true;
4058 smc
->resize_hpt_default
= SPAPR_RESIZE_HPT_DISABLED
;
4061 DEFINE_SPAPR_MACHINE(2_9
, "2.9", false);
4066 #define SPAPR_COMPAT_2_8 \
4069 .driver = TYPE_SPAPR_PCI_HOST_BRIDGE, \
4070 .property = "pcie-extended-configuration-space", \
4074 static void spapr_machine_2_8_instance_options(MachineState
*machine
)
4076 spapr_machine_2_9_instance_options(machine
);
4079 static void spapr_machine_2_8_class_options(MachineClass
*mc
)
4081 spapr_machine_2_9_class_options(mc
);
4082 SET_MACHINE_COMPAT(mc
, SPAPR_COMPAT_2_8
);
4083 mc
->numa_mem_align_shift
= 23;
4086 DEFINE_SPAPR_MACHINE(2_8
, "2.8", false);
4091 #define SPAPR_COMPAT_2_7 \
4094 .driver = TYPE_SPAPR_PCI_HOST_BRIDGE, \
4095 .property = "mem_win_size", \
4096 .value = stringify(SPAPR_PCI_2_7_MMIO_WIN_SIZE),\
4099 .driver = TYPE_SPAPR_PCI_HOST_BRIDGE, \
4100 .property = "mem64_win_size", \
4104 .driver = TYPE_POWERPC_CPU, \
4105 .property = "pre-2.8-migration", \
4109 .driver = TYPE_SPAPR_PCI_HOST_BRIDGE, \
4110 .property = "pre-2.8-migration", \
4114 static void phb_placement_2_7(sPAPRMachineState
*spapr
, uint32_t index
,
4115 uint64_t *buid
, hwaddr
*pio
,
4116 hwaddr
*mmio32
, hwaddr
*mmio64
,
4117 unsigned n_dma
, uint32_t *liobns
, Error
**errp
)
4119 /* Legacy PHB placement for pseries-2.7 and earlier machine types */
4120 const uint64_t base_buid
= 0x800000020000000ULL
;
4121 const hwaddr phb_spacing
= 0x1000000000ULL
; /* 64 GiB */
4122 const hwaddr mmio_offset
= 0xa0000000; /* 2 GiB + 512 MiB */
4123 const hwaddr pio_offset
= 0x80000000; /* 2 GiB */
4124 const uint32_t max_index
= 255;
4125 const hwaddr phb0_alignment
= 0x10000000000ULL
; /* 1 TiB */
4127 uint64_t ram_top
= MACHINE(spapr
)->ram_size
;
4128 hwaddr phb0_base
, phb_base
;
4131 /* Do we have hotpluggable memory? */
4132 if (MACHINE(spapr
)->maxram_size
> ram_top
) {
4133 /* Can't just use maxram_size, because there may be an
4134 * alignment gap between normal and hotpluggable memory
4136 ram_top
= spapr
->hotplug_memory
.base
+
4137 memory_region_size(&spapr
->hotplug_memory
.mr
);
4140 phb0_base
= QEMU_ALIGN_UP(ram_top
, phb0_alignment
);
4142 if (index
> max_index
) {
4143 error_setg(errp
, "\"index\" for PAPR PHB is too large (max %u)",
4148 *buid
= base_buid
+ index
;
4149 for (i
= 0; i
< n_dma
; ++i
) {
4150 liobns
[i
] = SPAPR_PCI_LIOBN(index
, i
);
4153 phb_base
= phb0_base
+ index
* phb_spacing
;
4154 *pio
= phb_base
+ pio_offset
;
4155 *mmio32
= phb_base
+ mmio_offset
;
4157 * We don't set the 64-bit MMIO window, relying on the PHB's
4158 * fallback behaviour of automatically splitting a large "32-bit"
4159 * window into contiguous 32-bit and 64-bit windows
4163 static void spapr_machine_2_7_instance_options(MachineState
*machine
)
4165 sPAPRMachineState
*spapr
= SPAPR_MACHINE(machine
);
4167 spapr_machine_2_8_instance_options(machine
);
4168 spapr
->use_hotplug_event_source
= false;
4171 static void spapr_machine_2_7_class_options(MachineClass
*mc
)
4173 sPAPRMachineClass
*smc
= SPAPR_MACHINE_CLASS(mc
);
4175 spapr_machine_2_8_class_options(mc
);
4176 mc
->default_cpu_type
= POWERPC_CPU_TYPE_NAME("power7_v2.3");
4177 SET_MACHINE_COMPAT(mc
, SPAPR_COMPAT_2_7
);
4178 smc
->phb_placement
= phb_placement_2_7
;
4181 DEFINE_SPAPR_MACHINE(2_7
, "2.7", false);
4186 #define SPAPR_COMPAT_2_6 \
4189 .driver = TYPE_SPAPR_PCI_HOST_BRIDGE,\
4191 .value = stringify(off),\
4194 static void spapr_machine_2_6_instance_options(MachineState
*machine
)
4196 spapr_machine_2_7_instance_options(machine
);
4199 static void spapr_machine_2_6_class_options(MachineClass
*mc
)
4201 spapr_machine_2_7_class_options(mc
);
4202 mc
->has_hotpluggable_cpus
= false;
4203 SET_MACHINE_COMPAT(mc
, SPAPR_COMPAT_2_6
);
4206 DEFINE_SPAPR_MACHINE(2_6
, "2.6", false);
4211 #define SPAPR_COMPAT_2_5 \
4214 .driver = "spapr-vlan", \
4215 .property = "use-rx-buffer-pools", \
4219 static void spapr_machine_2_5_instance_options(MachineState
*machine
)
4221 spapr_machine_2_6_instance_options(machine
);
4224 static void spapr_machine_2_5_class_options(MachineClass
*mc
)
4226 sPAPRMachineClass
*smc
= SPAPR_MACHINE_CLASS(mc
);
4228 spapr_machine_2_6_class_options(mc
);
4229 smc
->use_ohci_by_default
= true;
4230 SET_MACHINE_COMPAT(mc
, SPAPR_COMPAT_2_5
);
4233 DEFINE_SPAPR_MACHINE(2_5
, "2.5", false);
4238 #define SPAPR_COMPAT_2_4 \
4241 static void spapr_machine_2_4_instance_options(MachineState
*machine
)
4243 spapr_machine_2_5_instance_options(machine
);
4246 static void spapr_machine_2_4_class_options(MachineClass
*mc
)
4248 sPAPRMachineClass
*smc
= SPAPR_MACHINE_CLASS(mc
);
4250 spapr_machine_2_5_class_options(mc
);
4251 smc
->dr_lmb_enabled
= false;
4252 SET_MACHINE_COMPAT(mc
, SPAPR_COMPAT_2_4
);
4255 DEFINE_SPAPR_MACHINE(2_4
, "2.4", false);
4260 #define SPAPR_COMPAT_2_3 \
4263 .driver = "spapr-pci-host-bridge",\
4264 .property = "dynamic-reconfiguration",\
4268 static void spapr_machine_2_3_instance_options(MachineState
*machine
)
4270 spapr_machine_2_4_instance_options(machine
);
4273 static void spapr_machine_2_3_class_options(MachineClass
*mc
)
4275 spapr_machine_2_4_class_options(mc
);
4276 SET_MACHINE_COMPAT(mc
, SPAPR_COMPAT_2_3
);
4278 DEFINE_SPAPR_MACHINE(2_3
, "2.3", false);
4284 #define SPAPR_COMPAT_2_2 \
4287 .driver = TYPE_SPAPR_PCI_HOST_BRIDGE,\
4288 .property = "mem_win_size",\
4289 .value = "0x20000000",\
4292 static void spapr_machine_2_2_instance_options(MachineState
*machine
)
4294 spapr_machine_2_3_instance_options(machine
);
4295 machine
->suppress_vmdesc
= true;
4298 static void spapr_machine_2_2_class_options(MachineClass
*mc
)
4300 spapr_machine_2_3_class_options(mc
);
4301 SET_MACHINE_COMPAT(mc
, SPAPR_COMPAT_2_2
);
4303 DEFINE_SPAPR_MACHINE(2_2
, "2.2", false);
4308 #define SPAPR_COMPAT_2_1 \
4311 static void spapr_machine_2_1_instance_options(MachineState
*machine
)
4313 spapr_machine_2_2_instance_options(machine
);
4316 static void spapr_machine_2_1_class_options(MachineClass
*mc
)
4318 spapr_machine_2_2_class_options(mc
);
4319 SET_MACHINE_COMPAT(mc
, SPAPR_COMPAT_2_1
);
4321 DEFINE_SPAPR_MACHINE(2_1
, "2.1", false);
4323 static void spapr_machine_register_types(void)
4325 type_register_static(&spapr_machine_info
);
4328 type_init(spapr_machine_register_types
)