hw/i386: Make pit a property of common x86 base machine type
[qemu.git] / hw / i386 / x86.c
blob784d54efa96b2d10ce143948c8bf655b500c2809
1 /*
2 * Copyright (c) 2003-2004 Fabrice Bellard
3 * Copyright (c) 2019 Red Hat, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a copy
6 * of this software and associated documentation files (the "Software"), to deal
7 * in the Software without restriction, including without limitation the rights
8 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 * copies of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
21 * THE SOFTWARE.
23 #include "qemu/osdep.h"
24 #include "qemu/error-report.h"
25 #include "qemu/option.h"
26 #include "qemu/cutils.h"
27 #include "qemu/units.h"
28 #include "qemu/datadir.h"
29 #include "qapi/error.h"
30 #include "qapi/qmp/qerror.h"
31 #include "qapi/qapi-visit-common.h"
32 #include "qapi/clone-visitor.h"
33 #include "qapi/qapi-visit-machine.h"
34 #include "qapi/visitor.h"
35 #include "sysemu/qtest.h"
36 #include "sysemu/whpx.h"
37 #include "sysemu/numa.h"
38 #include "sysemu/replay.h"
39 #include "sysemu/sysemu.h"
40 #include "sysemu/cpu-timers.h"
41 #include "sysemu/xen.h"
42 #include "trace.h"
44 #include "hw/i386/x86.h"
45 #include "target/i386/cpu.h"
46 #include "hw/i386/topology.h"
47 #include "hw/i386/fw_cfg.h"
48 #include "hw/intc/i8259.h"
49 #include "hw/rtc/mc146818rtc.h"
50 #include "target/i386/sev.h"
52 #include "hw/acpi/cpu_hotplug.h"
53 #include "hw/irq.h"
54 #include "hw/nmi.h"
55 #include "hw/loader.h"
56 #include "multiboot.h"
57 #include "elf.h"
58 #include "standard-headers/asm-x86/bootparam.h"
59 #include CONFIG_DEVICES
60 #include "kvm/kvm_i386.h"
62 /* Physical Address of PVH entry point read from kernel ELF NOTE */
63 static size_t pvh_start_addr;
65 inline void init_topo_info(X86CPUTopoInfo *topo_info,
66 const X86MachineState *x86ms)
68 MachineState *ms = MACHINE(x86ms);
70 topo_info->dies_per_pkg = ms->smp.dies;
71 topo_info->cores_per_die = ms->smp.cores;
72 topo_info->threads_per_core = ms->smp.threads;
76 * Calculates initial APIC ID for a specific CPU index
78 * Currently we need to be able to calculate the APIC ID from the CPU index
79 * alone (without requiring a CPU object), as the QEMU<->Seabios interfaces have
80 * no concept of "CPU index", and the NUMA tables on fw_cfg need the APIC ID of
81 * all CPUs up to max_cpus.
83 uint32_t x86_cpu_apic_id_from_index(X86MachineState *x86ms,
84 unsigned int cpu_index)
86 X86CPUTopoInfo topo_info;
88 init_topo_info(&topo_info, x86ms);
90 return x86_apicid_from_cpu_idx(&topo_info, cpu_index);
94 void x86_cpu_new(X86MachineState *x86ms, int64_t apic_id, Error **errp)
96 Object *cpu = object_new(MACHINE(x86ms)->cpu_type);
98 if (!object_property_set_uint(cpu, "apic-id", apic_id, errp)) {
99 goto out;
101 qdev_realize(DEVICE(cpu), NULL, errp);
103 out:
104 object_unref(cpu);
107 void x86_cpus_init(X86MachineState *x86ms, int default_cpu_version)
109 int i;
110 const CPUArchIdList *possible_cpus;
111 MachineState *ms = MACHINE(x86ms);
112 MachineClass *mc = MACHINE_GET_CLASS(x86ms);
114 x86_cpu_set_default_version(default_cpu_version);
117 * Calculates the limit to CPU APIC ID values
119 * Limit for the APIC ID value, so that all
120 * CPU APIC IDs are < x86ms->apic_id_limit.
122 * This is used for FW_CFG_MAX_CPUS. See comments on fw_cfg_arch_create().
124 x86ms->apic_id_limit = x86_cpu_apic_id_from_index(x86ms,
125 ms->smp.max_cpus - 1) + 1;
128 * Can we support APIC ID 255 or higher?
130 * Under Xen: yes.
131 * With userspace emulated lapic: no
132 * With KVM's in-kernel lapic: only if X2APIC API is enabled.
134 if (x86ms->apic_id_limit > 255 && !xen_enabled() &&
135 (!kvm_irqchip_in_kernel() || !kvm_enable_x2apic())) {
136 error_report("current -smp configuration requires kernel "
137 "irqchip and X2APIC API support.");
138 exit(EXIT_FAILURE);
141 possible_cpus = mc->possible_cpu_arch_ids(ms);
142 for (i = 0; i < ms->smp.cpus; i++) {
143 x86_cpu_new(x86ms, possible_cpus->cpus[i].arch_id, &error_fatal);
147 void x86_rtc_set_cpus_count(ISADevice *rtc, uint16_t cpus_count)
149 if (cpus_count > 0xff) {
151 * If the number of CPUs can't be represented in 8 bits, the
152 * BIOS must use "FW_CFG_NB_CPUS". Set RTC field to 0 just
153 * to make old BIOSes fail more predictably.
155 rtc_set_memory(rtc, 0x5f, 0);
156 } else {
157 rtc_set_memory(rtc, 0x5f, cpus_count - 1);
161 static int x86_apic_cmp(const void *a, const void *b)
163 CPUArchId *apic_a = (CPUArchId *)a;
164 CPUArchId *apic_b = (CPUArchId *)b;
166 return apic_a->arch_id - apic_b->arch_id;
170 * returns pointer to CPUArchId descriptor that matches CPU's apic_id
171 * in ms->possible_cpus->cpus, if ms->possible_cpus->cpus has no
172 * entry corresponding to CPU's apic_id returns NULL.
174 CPUArchId *x86_find_cpu_slot(MachineState *ms, uint32_t id, int *idx)
176 CPUArchId apic_id, *found_cpu;
178 apic_id.arch_id = id;
179 found_cpu = bsearch(&apic_id, ms->possible_cpus->cpus,
180 ms->possible_cpus->len, sizeof(*ms->possible_cpus->cpus),
181 x86_apic_cmp);
182 if (found_cpu && idx) {
183 *idx = found_cpu - ms->possible_cpus->cpus;
185 return found_cpu;
188 void x86_cpu_plug(HotplugHandler *hotplug_dev,
189 DeviceState *dev, Error **errp)
191 CPUArchId *found_cpu;
192 Error *local_err = NULL;
193 X86CPU *cpu = X86_CPU(dev);
194 X86MachineState *x86ms = X86_MACHINE(hotplug_dev);
196 if (x86ms->acpi_dev) {
197 hotplug_handler_plug(x86ms->acpi_dev, dev, &local_err);
198 if (local_err) {
199 goto out;
203 /* increment the number of CPUs */
204 x86ms->boot_cpus++;
205 if (x86ms->rtc) {
206 x86_rtc_set_cpus_count(x86ms->rtc, x86ms->boot_cpus);
208 if (x86ms->fw_cfg) {
209 fw_cfg_modify_i16(x86ms->fw_cfg, FW_CFG_NB_CPUS, x86ms->boot_cpus);
212 found_cpu = x86_find_cpu_slot(MACHINE(x86ms), cpu->apic_id, NULL);
213 found_cpu->cpu = OBJECT(dev);
214 out:
215 error_propagate(errp, local_err);
218 void x86_cpu_unplug_request_cb(HotplugHandler *hotplug_dev,
219 DeviceState *dev, Error **errp)
221 int idx = -1;
222 X86CPU *cpu = X86_CPU(dev);
223 X86MachineState *x86ms = X86_MACHINE(hotplug_dev);
225 if (!x86ms->acpi_dev) {
226 error_setg(errp, "CPU hot unplug not supported without ACPI");
227 return;
230 x86_find_cpu_slot(MACHINE(x86ms), cpu->apic_id, &idx);
231 assert(idx != -1);
232 if (idx == 0) {
233 error_setg(errp, "Boot CPU is unpluggable");
234 return;
237 hotplug_handler_unplug_request(x86ms->acpi_dev, dev,
238 errp);
241 void x86_cpu_unplug_cb(HotplugHandler *hotplug_dev,
242 DeviceState *dev, Error **errp)
244 CPUArchId *found_cpu;
245 Error *local_err = NULL;
246 X86CPU *cpu = X86_CPU(dev);
247 X86MachineState *x86ms = X86_MACHINE(hotplug_dev);
249 hotplug_handler_unplug(x86ms->acpi_dev, dev, &local_err);
250 if (local_err) {
251 goto out;
254 found_cpu = x86_find_cpu_slot(MACHINE(x86ms), cpu->apic_id, NULL);
255 found_cpu->cpu = NULL;
256 qdev_unrealize(dev);
258 /* decrement the number of CPUs */
259 x86ms->boot_cpus--;
260 /* Update the number of CPUs in CMOS */
261 x86_rtc_set_cpus_count(x86ms->rtc, x86ms->boot_cpus);
262 fw_cfg_modify_i16(x86ms->fw_cfg, FW_CFG_NB_CPUS, x86ms->boot_cpus);
263 out:
264 error_propagate(errp, local_err);
267 void x86_cpu_pre_plug(HotplugHandler *hotplug_dev,
268 DeviceState *dev, Error **errp)
270 int idx;
271 CPUState *cs;
272 CPUArchId *cpu_slot;
273 X86CPUTopoIDs topo_ids;
274 X86CPU *cpu = X86_CPU(dev);
275 CPUX86State *env = &cpu->env;
276 MachineState *ms = MACHINE(hotplug_dev);
277 X86MachineState *x86ms = X86_MACHINE(hotplug_dev);
278 unsigned int smp_cores = ms->smp.cores;
279 unsigned int smp_threads = ms->smp.threads;
280 X86CPUTopoInfo topo_info;
282 if (!object_dynamic_cast(OBJECT(cpu), ms->cpu_type)) {
283 error_setg(errp, "Invalid CPU type, expected cpu type: '%s'",
284 ms->cpu_type);
285 return;
288 if (x86ms->acpi_dev) {
289 Error *local_err = NULL;
291 hotplug_handler_pre_plug(HOTPLUG_HANDLER(x86ms->acpi_dev), dev,
292 &local_err);
293 if (local_err) {
294 error_propagate(errp, local_err);
295 return;
299 init_topo_info(&topo_info, x86ms);
301 env->nr_dies = ms->smp.dies;
304 * If APIC ID is not set,
305 * set it based on socket/die/core/thread properties.
307 if (cpu->apic_id == UNASSIGNED_APIC_ID) {
308 int max_socket = (ms->smp.max_cpus - 1) /
309 smp_threads / smp_cores / ms->smp.dies;
312 * die-id was optional in QEMU 4.0 and older, so keep it optional
313 * if there's only one die per socket.
315 if (cpu->die_id < 0 && ms->smp.dies == 1) {
316 cpu->die_id = 0;
319 if (cpu->socket_id < 0) {
320 error_setg(errp, "CPU socket-id is not set");
321 return;
322 } else if (cpu->socket_id > max_socket) {
323 error_setg(errp, "Invalid CPU socket-id: %u must be in range 0:%u",
324 cpu->socket_id, max_socket);
325 return;
327 if (cpu->die_id < 0) {
328 error_setg(errp, "CPU die-id is not set");
329 return;
330 } else if (cpu->die_id > ms->smp.dies - 1) {
331 error_setg(errp, "Invalid CPU die-id: %u must be in range 0:%u",
332 cpu->die_id, ms->smp.dies - 1);
333 return;
335 if (cpu->core_id < 0) {
336 error_setg(errp, "CPU core-id is not set");
337 return;
338 } else if (cpu->core_id > (smp_cores - 1)) {
339 error_setg(errp, "Invalid CPU core-id: %u must be in range 0:%u",
340 cpu->core_id, smp_cores - 1);
341 return;
343 if (cpu->thread_id < 0) {
344 error_setg(errp, "CPU thread-id is not set");
345 return;
346 } else if (cpu->thread_id > (smp_threads - 1)) {
347 error_setg(errp, "Invalid CPU thread-id: %u must be in range 0:%u",
348 cpu->thread_id, smp_threads - 1);
349 return;
352 topo_ids.pkg_id = cpu->socket_id;
353 topo_ids.die_id = cpu->die_id;
354 topo_ids.core_id = cpu->core_id;
355 topo_ids.smt_id = cpu->thread_id;
356 cpu->apic_id = x86_apicid_from_topo_ids(&topo_info, &topo_ids);
359 cpu_slot = x86_find_cpu_slot(MACHINE(x86ms), cpu->apic_id, &idx);
360 if (!cpu_slot) {
361 MachineState *ms = MACHINE(x86ms);
363 x86_topo_ids_from_apicid(cpu->apic_id, &topo_info, &topo_ids);
364 error_setg(errp,
365 "Invalid CPU [socket: %u, die: %u, core: %u, thread: %u] with"
366 " APIC ID %" PRIu32 ", valid index range 0:%d",
367 topo_ids.pkg_id, topo_ids.die_id, topo_ids.core_id, topo_ids.smt_id,
368 cpu->apic_id, ms->possible_cpus->len - 1);
369 return;
372 if (cpu_slot->cpu) {
373 error_setg(errp, "CPU[%d] with APIC ID %" PRIu32 " exists",
374 idx, cpu->apic_id);
375 return;
378 /* if 'address' properties socket-id/core-id/thread-id are not set, set them
379 * so that machine_query_hotpluggable_cpus would show correct values
381 /* TODO: move socket_id/core_id/thread_id checks into x86_cpu_realizefn()
382 * once -smp refactoring is complete and there will be CPU private
383 * CPUState::nr_cores and CPUState::nr_threads fields instead of globals */
384 x86_topo_ids_from_apicid(cpu->apic_id, &topo_info, &topo_ids);
385 if (cpu->socket_id != -1 && cpu->socket_id != topo_ids.pkg_id) {
386 error_setg(errp, "property socket-id: %u doesn't match set apic-id:"
387 " 0x%x (socket-id: %u)", cpu->socket_id, cpu->apic_id,
388 topo_ids.pkg_id);
389 return;
391 cpu->socket_id = topo_ids.pkg_id;
393 if (cpu->die_id != -1 && cpu->die_id != topo_ids.die_id) {
394 error_setg(errp, "property die-id: %u doesn't match set apic-id:"
395 " 0x%x (die-id: %u)", cpu->die_id, cpu->apic_id, topo_ids.die_id);
396 return;
398 cpu->die_id = topo_ids.die_id;
400 if (cpu->core_id != -1 && cpu->core_id != topo_ids.core_id) {
401 error_setg(errp, "property core-id: %u doesn't match set apic-id:"
402 " 0x%x (core-id: %u)", cpu->core_id, cpu->apic_id,
403 topo_ids.core_id);
404 return;
406 cpu->core_id = topo_ids.core_id;
408 if (cpu->thread_id != -1 && cpu->thread_id != topo_ids.smt_id) {
409 error_setg(errp, "property thread-id: %u doesn't match set apic-id:"
410 " 0x%x (thread-id: %u)", cpu->thread_id, cpu->apic_id,
411 topo_ids.smt_id);
412 return;
414 cpu->thread_id = topo_ids.smt_id;
416 if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VPINDEX) &&
417 !kvm_hv_vpindex_settable()) {
418 error_setg(errp, "kernel doesn't allow setting HyperV VP_INDEX");
419 return;
422 cs = CPU(cpu);
423 cs->cpu_index = idx;
425 numa_cpu_pre_plug(cpu_slot, dev, errp);
428 CpuInstanceProperties
429 x86_cpu_index_to_props(MachineState *ms, unsigned cpu_index)
431 MachineClass *mc = MACHINE_GET_CLASS(ms);
432 const CPUArchIdList *possible_cpus = mc->possible_cpu_arch_ids(ms);
434 assert(cpu_index < possible_cpus->len);
435 return possible_cpus->cpus[cpu_index].props;
438 int64_t x86_get_default_cpu_node_id(const MachineState *ms, int idx)
440 X86CPUTopoIDs topo_ids;
441 X86MachineState *x86ms = X86_MACHINE(ms);
442 X86CPUTopoInfo topo_info;
444 init_topo_info(&topo_info, x86ms);
446 assert(idx < ms->possible_cpus->len);
447 x86_topo_ids_from_apicid(ms->possible_cpus->cpus[idx].arch_id,
448 &topo_info, &topo_ids);
449 return topo_ids.pkg_id % ms->numa_state->num_nodes;
452 const CPUArchIdList *x86_possible_cpu_arch_ids(MachineState *ms)
454 X86MachineState *x86ms = X86_MACHINE(ms);
455 unsigned int max_cpus = ms->smp.max_cpus;
456 X86CPUTopoInfo topo_info;
457 int i;
459 if (ms->possible_cpus) {
461 * make sure that max_cpus hasn't changed since the first use, i.e.
462 * -smp hasn't been parsed after it
464 assert(ms->possible_cpus->len == max_cpus);
465 return ms->possible_cpus;
468 ms->possible_cpus = g_malloc0(sizeof(CPUArchIdList) +
469 sizeof(CPUArchId) * max_cpus);
470 ms->possible_cpus->len = max_cpus;
472 init_topo_info(&topo_info, x86ms);
474 for (i = 0; i < ms->possible_cpus->len; i++) {
475 X86CPUTopoIDs topo_ids;
477 ms->possible_cpus->cpus[i].type = ms->cpu_type;
478 ms->possible_cpus->cpus[i].vcpus_count = 1;
479 ms->possible_cpus->cpus[i].arch_id =
480 x86_cpu_apic_id_from_index(x86ms, i);
481 x86_topo_ids_from_apicid(ms->possible_cpus->cpus[i].arch_id,
482 &topo_info, &topo_ids);
483 ms->possible_cpus->cpus[i].props.has_socket_id = true;
484 ms->possible_cpus->cpus[i].props.socket_id = topo_ids.pkg_id;
485 if (ms->smp.dies > 1) {
486 ms->possible_cpus->cpus[i].props.has_die_id = true;
487 ms->possible_cpus->cpus[i].props.die_id = topo_ids.die_id;
489 ms->possible_cpus->cpus[i].props.has_core_id = true;
490 ms->possible_cpus->cpus[i].props.core_id = topo_ids.core_id;
491 ms->possible_cpus->cpus[i].props.has_thread_id = true;
492 ms->possible_cpus->cpus[i].props.thread_id = topo_ids.smt_id;
494 return ms->possible_cpus;
497 static void x86_nmi(NMIState *n, int cpu_index, Error **errp)
499 /* cpu index isn't used */
500 CPUState *cs;
502 CPU_FOREACH(cs) {
503 X86CPU *cpu = X86_CPU(cs);
505 if (!cpu->apic_state) {
506 cpu_interrupt(cs, CPU_INTERRUPT_NMI);
507 } else {
508 apic_deliver_nmi(cpu->apic_state);
513 static long get_file_size(FILE *f)
515 long where, size;
517 /* XXX: on Unix systems, using fstat() probably makes more sense */
519 where = ftell(f);
520 fseek(f, 0, SEEK_END);
521 size = ftell(f);
522 fseek(f, where, SEEK_SET);
524 return size;
527 /* TSC handling */
528 uint64_t cpu_get_tsc(CPUX86State *env)
530 return cpus_get_elapsed_ticks();
533 /* IRQ handling */
534 static void pic_irq_request(void *opaque, int irq, int level)
536 CPUState *cs = first_cpu;
537 X86CPU *cpu = X86_CPU(cs);
539 trace_x86_pic_interrupt(irq, level);
540 if (cpu->apic_state && !kvm_irqchip_in_kernel() &&
541 !whpx_apic_in_platform()) {
542 CPU_FOREACH(cs) {
543 cpu = X86_CPU(cs);
544 if (apic_accept_pic_intr(cpu->apic_state)) {
545 apic_deliver_pic_intr(cpu->apic_state, level);
548 } else {
549 if (level) {
550 cpu_interrupt(cs, CPU_INTERRUPT_HARD);
551 } else {
552 cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
557 qemu_irq x86_allocate_cpu_irq(void)
559 return qemu_allocate_irq(pic_irq_request, NULL, 0);
562 int cpu_get_pic_interrupt(CPUX86State *env)
564 X86CPU *cpu = env_archcpu(env);
565 int intno;
567 if (!kvm_irqchip_in_kernel() && !whpx_apic_in_platform()) {
568 intno = apic_get_interrupt(cpu->apic_state);
569 if (intno >= 0) {
570 return intno;
572 /* read the irq from the PIC */
573 if (!apic_accept_pic_intr(cpu->apic_state)) {
574 return -1;
578 intno = pic_read_irq(isa_pic);
579 return intno;
582 DeviceState *cpu_get_current_apic(void)
584 if (current_cpu) {
585 X86CPU *cpu = X86_CPU(current_cpu);
586 return cpu->apic_state;
587 } else {
588 return NULL;
592 void gsi_handler(void *opaque, int n, int level)
594 GSIState *s = opaque;
596 trace_x86_gsi_interrupt(n, level);
597 switch (n) {
598 case 0 ... ISA_NUM_IRQS - 1:
599 if (s->i8259_irq[n]) {
600 /* Under KVM, Kernel will forward to both PIC and IOAPIC */
601 qemu_set_irq(s->i8259_irq[n], level);
603 /* fall through */
604 case ISA_NUM_IRQS ... IOAPIC_NUM_PINS - 1:
605 qemu_set_irq(s->ioapic_irq[n], level);
606 break;
607 case IO_APIC_SECONDARY_IRQBASE
608 ... IO_APIC_SECONDARY_IRQBASE + IOAPIC_NUM_PINS - 1:
609 qemu_set_irq(s->ioapic2_irq[n - IO_APIC_SECONDARY_IRQBASE], level);
610 break;
614 void ioapic_init_gsi(GSIState *gsi_state, const char *parent_name)
616 DeviceState *dev;
617 SysBusDevice *d;
618 unsigned int i;
620 assert(parent_name);
621 if (kvm_ioapic_in_kernel()) {
622 dev = qdev_new(TYPE_KVM_IOAPIC);
623 } else {
624 dev = qdev_new(TYPE_IOAPIC);
626 object_property_add_child(object_resolve_path(parent_name, NULL),
627 "ioapic", OBJECT(dev));
628 d = SYS_BUS_DEVICE(dev);
629 sysbus_realize_and_unref(d, &error_fatal);
630 sysbus_mmio_map(d, 0, IO_APIC_DEFAULT_ADDRESS);
632 for (i = 0; i < IOAPIC_NUM_PINS; i++) {
633 gsi_state->ioapic_irq[i] = qdev_get_gpio_in(dev, i);
637 DeviceState *ioapic_init_secondary(GSIState *gsi_state)
639 DeviceState *dev;
640 SysBusDevice *d;
641 unsigned int i;
643 dev = qdev_new(TYPE_IOAPIC);
644 d = SYS_BUS_DEVICE(dev);
645 sysbus_realize_and_unref(d, &error_fatal);
646 sysbus_mmio_map(d, 0, IO_APIC_SECONDARY_ADDRESS);
648 for (i = 0; i < IOAPIC_NUM_PINS; i++) {
649 gsi_state->ioapic2_irq[i] = qdev_get_gpio_in(dev, i);
651 return dev;
654 struct setup_data {
655 uint64_t next;
656 uint32_t type;
657 uint32_t len;
658 uint8_t data[];
659 } __attribute__((packed));
663 * The entry point into the kernel for PVH boot is different from
664 * the native entry point. The PVH entry is defined by the x86/HVM
665 * direct boot ABI and is available in an ELFNOTE in the kernel binary.
667 * This function is passed to load_elf() when it is called from
668 * load_elfboot() which then additionally checks for an ELF Note of
669 * type XEN_ELFNOTE_PHYS32_ENTRY and passes it to this function to
670 * parse the PVH entry address from the ELF Note.
672 * Due to trickery in elf_opts.h, load_elf() is actually available as
673 * load_elf32() or load_elf64() and this routine needs to be able
674 * to deal with being called as 32 or 64 bit.
676 * The address of the PVH entry point is saved to the 'pvh_start_addr'
677 * global variable. (although the entry point is 32-bit, the kernel
678 * binary can be either 32-bit or 64-bit).
680 static uint64_t read_pvh_start_addr(void *arg1, void *arg2, bool is64)
682 size_t *elf_note_data_addr;
684 /* Check if ELF Note header passed in is valid */
685 if (arg1 == NULL) {
686 return 0;
689 if (is64) {
690 struct elf64_note *nhdr64 = (struct elf64_note *)arg1;
691 uint64_t nhdr_size64 = sizeof(struct elf64_note);
692 uint64_t phdr_align = *(uint64_t *)arg2;
693 uint64_t nhdr_namesz = nhdr64->n_namesz;
695 elf_note_data_addr =
696 ((void *)nhdr64) + nhdr_size64 +
697 QEMU_ALIGN_UP(nhdr_namesz, phdr_align);
699 pvh_start_addr = *elf_note_data_addr;
700 } else {
701 struct elf32_note *nhdr32 = (struct elf32_note *)arg1;
702 uint32_t nhdr_size32 = sizeof(struct elf32_note);
703 uint32_t phdr_align = *(uint32_t *)arg2;
704 uint32_t nhdr_namesz = nhdr32->n_namesz;
706 elf_note_data_addr =
707 ((void *)nhdr32) + nhdr_size32 +
708 QEMU_ALIGN_UP(nhdr_namesz, phdr_align);
710 pvh_start_addr = *(uint32_t *)elf_note_data_addr;
713 return pvh_start_addr;
716 static bool load_elfboot(const char *kernel_filename,
717 int kernel_file_size,
718 uint8_t *header,
719 size_t pvh_xen_start_addr,
720 FWCfgState *fw_cfg)
722 uint32_t flags = 0;
723 uint32_t mh_load_addr = 0;
724 uint32_t elf_kernel_size = 0;
725 uint64_t elf_entry;
726 uint64_t elf_low, elf_high;
727 int kernel_size;
729 if (ldl_p(header) != 0x464c457f) {
730 return false; /* no elfboot */
733 bool elf_is64 = header[EI_CLASS] == ELFCLASS64;
734 flags = elf_is64 ?
735 ((Elf64_Ehdr *)header)->e_flags : ((Elf32_Ehdr *)header)->e_flags;
737 if (flags & 0x00010004) { /* LOAD_ELF_HEADER_HAS_ADDR */
738 error_report("elfboot unsupported flags = %x", flags);
739 exit(1);
742 uint64_t elf_note_type = XEN_ELFNOTE_PHYS32_ENTRY;
743 kernel_size = load_elf(kernel_filename, read_pvh_start_addr,
744 NULL, &elf_note_type, &elf_entry,
745 &elf_low, &elf_high, NULL, 0, I386_ELF_MACHINE,
746 0, 0);
748 if (kernel_size < 0) {
749 error_report("Error while loading elf kernel");
750 exit(1);
752 mh_load_addr = elf_low;
753 elf_kernel_size = elf_high - elf_low;
755 if (pvh_start_addr == 0) {
756 error_report("Error loading uncompressed kernel without PVH ELF Note");
757 exit(1);
759 fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_ENTRY, pvh_start_addr);
760 fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_ADDR, mh_load_addr);
761 fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_SIZE, elf_kernel_size);
763 return true;
766 void x86_load_linux(X86MachineState *x86ms,
767 FWCfgState *fw_cfg,
768 int acpi_data_size,
769 bool pvh_enabled)
771 bool linuxboot_dma_enabled = X86_MACHINE_GET_CLASS(x86ms)->fwcfg_dma_enabled;
772 uint16_t protocol;
773 int setup_size, kernel_size, cmdline_size;
774 int dtb_size, setup_data_offset;
775 uint32_t initrd_max;
776 uint8_t header[8192], *setup, *kernel;
777 hwaddr real_addr, prot_addr, cmdline_addr, initrd_addr = 0;
778 FILE *f;
779 char *vmode;
780 MachineState *machine = MACHINE(x86ms);
781 struct setup_data *setup_data;
782 const char *kernel_filename = machine->kernel_filename;
783 const char *initrd_filename = machine->initrd_filename;
784 const char *dtb_filename = machine->dtb;
785 const char *kernel_cmdline = machine->kernel_cmdline;
786 SevKernelLoaderContext sev_load_ctx = {};
788 /* Align to 16 bytes as a paranoia measure */
789 cmdline_size = (strlen(kernel_cmdline) + 16) & ~15;
791 /* load the kernel header */
792 f = fopen(kernel_filename, "rb");
793 if (!f) {
794 fprintf(stderr, "qemu: could not open kernel file '%s': %s\n",
795 kernel_filename, strerror(errno));
796 exit(1);
799 kernel_size = get_file_size(f);
800 if (!kernel_size ||
801 fread(header, 1, MIN(ARRAY_SIZE(header), kernel_size), f) !=
802 MIN(ARRAY_SIZE(header), kernel_size)) {
803 fprintf(stderr, "qemu: could not load kernel '%s': %s\n",
804 kernel_filename, strerror(errno));
805 exit(1);
808 /* kernel protocol version */
809 if (ldl_p(header + 0x202) == 0x53726448) {
810 protocol = lduw_p(header + 0x206);
811 } else {
813 * This could be a multiboot kernel. If it is, let's stop treating it
814 * like a Linux kernel.
815 * Note: some multiboot images could be in the ELF format (the same of
816 * PVH), so we try multiboot first since we check the multiboot magic
817 * header before to load it.
819 if (load_multiboot(x86ms, fw_cfg, f, kernel_filename, initrd_filename,
820 kernel_cmdline, kernel_size, header)) {
821 return;
824 * Check if the file is an uncompressed kernel file (ELF) and load it,
825 * saving the PVH entry point used by the x86/HVM direct boot ABI.
826 * If load_elfboot() is successful, populate the fw_cfg info.
828 if (pvh_enabled &&
829 load_elfboot(kernel_filename, kernel_size,
830 header, pvh_start_addr, fw_cfg)) {
831 fclose(f);
833 fw_cfg_add_i32(fw_cfg, FW_CFG_CMDLINE_SIZE,
834 strlen(kernel_cmdline) + 1);
835 fw_cfg_add_string(fw_cfg, FW_CFG_CMDLINE_DATA, kernel_cmdline);
837 fw_cfg_add_i32(fw_cfg, FW_CFG_SETUP_SIZE, sizeof(header));
838 fw_cfg_add_bytes(fw_cfg, FW_CFG_SETUP_DATA,
839 header, sizeof(header));
841 /* load initrd */
842 if (initrd_filename) {
843 GMappedFile *mapped_file;
844 gsize initrd_size;
845 gchar *initrd_data;
846 GError *gerr = NULL;
848 mapped_file = g_mapped_file_new(initrd_filename, false, &gerr);
849 if (!mapped_file) {
850 fprintf(stderr, "qemu: error reading initrd %s: %s\n",
851 initrd_filename, gerr->message);
852 exit(1);
854 x86ms->initrd_mapped_file = mapped_file;
856 initrd_data = g_mapped_file_get_contents(mapped_file);
857 initrd_size = g_mapped_file_get_length(mapped_file);
858 initrd_max = x86ms->below_4g_mem_size - acpi_data_size - 1;
859 if (initrd_size >= initrd_max) {
860 fprintf(stderr, "qemu: initrd is too large, cannot support."
861 "(max: %"PRIu32", need %"PRId64")\n",
862 initrd_max, (uint64_t)initrd_size);
863 exit(1);
866 initrd_addr = (initrd_max - initrd_size) & ~4095;
868 fw_cfg_add_i32(fw_cfg, FW_CFG_INITRD_ADDR, initrd_addr);
869 fw_cfg_add_i32(fw_cfg, FW_CFG_INITRD_SIZE, initrd_size);
870 fw_cfg_add_bytes(fw_cfg, FW_CFG_INITRD_DATA, initrd_data,
871 initrd_size);
874 option_rom[nb_option_roms].bootindex = 0;
875 option_rom[nb_option_roms].name = "pvh.bin";
876 nb_option_roms++;
878 return;
880 protocol = 0;
883 if (protocol < 0x200 || !(header[0x211] & 0x01)) {
884 /* Low kernel */
885 real_addr = 0x90000;
886 cmdline_addr = 0x9a000 - cmdline_size;
887 prot_addr = 0x10000;
888 } else if (protocol < 0x202) {
889 /* High but ancient kernel */
890 real_addr = 0x90000;
891 cmdline_addr = 0x9a000 - cmdline_size;
892 prot_addr = 0x100000;
893 } else {
894 /* High and recent kernel */
895 real_addr = 0x10000;
896 cmdline_addr = 0x20000;
897 prot_addr = 0x100000;
900 /* highest address for loading the initrd */
901 if (protocol >= 0x20c &&
902 lduw_p(header + 0x236) & XLF_CAN_BE_LOADED_ABOVE_4G) {
904 * Linux has supported initrd up to 4 GB for a very long time (2007,
905 * long before XLF_CAN_BE_LOADED_ABOVE_4G which was added in 2013),
906 * though it only sets initrd_max to 2 GB to "work around bootloader
907 * bugs". Luckily, QEMU firmware(which does something like bootloader)
908 * has supported this.
910 * It's believed that if XLF_CAN_BE_LOADED_ABOVE_4G is set, initrd can
911 * be loaded into any address.
913 * In addition, initrd_max is uint32_t simply because QEMU doesn't
914 * support the 64-bit boot protocol (specifically the ext_ramdisk_image
915 * field).
917 * Therefore here just limit initrd_max to UINT32_MAX simply as well.
919 initrd_max = UINT32_MAX;
920 } else if (protocol >= 0x203) {
921 initrd_max = ldl_p(header + 0x22c);
922 } else {
923 initrd_max = 0x37ffffff;
926 if (initrd_max >= x86ms->below_4g_mem_size - acpi_data_size) {
927 initrd_max = x86ms->below_4g_mem_size - acpi_data_size - 1;
930 fw_cfg_add_i32(fw_cfg, FW_CFG_CMDLINE_ADDR, cmdline_addr);
931 fw_cfg_add_i32(fw_cfg, FW_CFG_CMDLINE_SIZE, strlen(kernel_cmdline) + 1);
932 fw_cfg_add_string(fw_cfg, FW_CFG_CMDLINE_DATA, kernel_cmdline);
933 sev_load_ctx.cmdline_data = (char *)kernel_cmdline;
934 sev_load_ctx.cmdline_size = strlen(kernel_cmdline) + 1;
936 if (protocol >= 0x202) {
937 stl_p(header + 0x228, cmdline_addr);
938 } else {
939 stw_p(header + 0x20, 0xA33F);
940 stw_p(header + 0x22, cmdline_addr - real_addr);
943 /* handle vga= parameter */
944 vmode = strstr(kernel_cmdline, "vga=");
945 if (vmode) {
946 unsigned int video_mode;
947 const char *end;
948 int ret;
949 /* skip "vga=" */
950 vmode += 4;
951 if (!strncmp(vmode, "normal", 6)) {
952 video_mode = 0xffff;
953 } else if (!strncmp(vmode, "ext", 3)) {
954 video_mode = 0xfffe;
955 } else if (!strncmp(vmode, "ask", 3)) {
956 video_mode = 0xfffd;
957 } else {
958 ret = qemu_strtoui(vmode, &end, 0, &video_mode);
959 if (ret != 0 || (*end && *end != ' ')) {
960 fprintf(stderr, "qemu: invalid 'vga=' kernel parameter.\n");
961 exit(1);
964 stw_p(header + 0x1fa, video_mode);
967 /* loader type */
969 * High nybble = B reserved for QEMU; low nybble is revision number.
970 * If this code is substantially changed, you may want to consider
971 * incrementing the revision.
973 if (protocol >= 0x200) {
974 header[0x210] = 0xB0;
976 /* heap */
977 if (protocol >= 0x201) {
978 header[0x211] |= 0x80; /* CAN_USE_HEAP */
979 stw_p(header + 0x224, cmdline_addr - real_addr - 0x200);
982 /* load initrd */
983 if (initrd_filename) {
984 GMappedFile *mapped_file;
985 gsize initrd_size;
986 gchar *initrd_data;
987 GError *gerr = NULL;
989 if (protocol < 0x200) {
990 fprintf(stderr, "qemu: linux kernel too old to load a ram disk\n");
991 exit(1);
994 mapped_file = g_mapped_file_new(initrd_filename, false, &gerr);
995 if (!mapped_file) {
996 fprintf(stderr, "qemu: error reading initrd %s: %s\n",
997 initrd_filename, gerr->message);
998 exit(1);
1000 x86ms->initrd_mapped_file = mapped_file;
1002 initrd_data = g_mapped_file_get_contents(mapped_file);
1003 initrd_size = g_mapped_file_get_length(mapped_file);
1004 if (initrd_size >= initrd_max) {
1005 fprintf(stderr, "qemu: initrd is too large, cannot support."
1006 "(max: %"PRIu32", need %"PRId64")\n",
1007 initrd_max, (uint64_t)initrd_size);
1008 exit(1);
1011 initrd_addr = (initrd_max - initrd_size) & ~4095;
1013 fw_cfg_add_i32(fw_cfg, FW_CFG_INITRD_ADDR, initrd_addr);
1014 fw_cfg_add_i32(fw_cfg, FW_CFG_INITRD_SIZE, initrd_size);
1015 fw_cfg_add_bytes(fw_cfg, FW_CFG_INITRD_DATA, initrd_data, initrd_size);
1016 sev_load_ctx.initrd_data = initrd_data;
1017 sev_load_ctx.initrd_size = initrd_size;
1019 stl_p(header + 0x218, initrd_addr);
1020 stl_p(header + 0x21c, initrd_size);
1023 /* load kernel and setup */
1024 setup_size = header[0x1f1];
1025 if (setup_size == 0) {
1026 setup_size = 4;
1028 setup_size = (setup_size + 1) * 512;
1029 if (setup_size > kernel_size) {
1030 fprintf(stderr, "qemu: invalid kernel header\n");
1031 exit(1);
1033 kernel_size -= setup_size;
1035 setup = g_malloc(setup_size);
1036 kernel = g_malloc(kernel_size);
1037 fseek(f, 0, SEEK_SET);
1038 if (fread(setup, 1, setup_size, f) != setup_size) {
1039 fprintf(stderr, "fread() failed\n");
1040 exit(1);
1042 if (fread(kernel, 1, kernel_size, f) != kernel_size) {
1043 fprintf(stderr, "fread() failed\n");
1044 exit(1);
1046 fclose(f);
1048 /* append dtb to kernel */
1049 if (dtb_filename) {
1050 if (protocol < 0x209) {
1051 fprintf(stderr, "qemu: Linux kernel too old to load a dtb\n");
1052 exit(1);
1055 dtb_size = get_image_size(dtb_filename);
1056 if (dtb_size <= 0) {
1057 fprintf(stderr, "qemu: error reading dtb %s: %s\n",
1058 dtb_filename, strerror(errno));
1059 exit(1);
1062 setup_data_offset = QEMU_ALIGN_UP(kernel_size, 16);
1063 kernel_size = setup_data_offset + sizeof(struct setup_data) + dtb_size;
1064 kernel = g_realloc(kernel, kernel_size);
1066 stq_p(header + 0x250, prot_addr + setup_data_offset);
1068 setup_data = (struct setup_data *)(kernel + setup_data_offset);
1069 setup_data->next = 0;
1070 setup_data->type = cpu_to_le32(SETUP_DTB);
1071 setup_data->len = cpu_to_le32(dtb_size);
1073 load_image_size(dtb_filename, setup_data->data, dtb_size);
1077 * If we're starting an encrypted VM, it will be OVMF based, which uses the
1078 * efi stub for booting and doesn't require any values to be placed in the
1079 * kernel header. We therefore don't update the header so the hash of the
1080 * kernel on the other side of the fw_cfg interface matches the hash of the
1081 * file the user passed in.
1083 if (!sev_enabled()) {
1084 memcpy(setup, header, MIN(sizeof(header), setup_size));
1087 fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_ADDR, prot_addr);
1088 fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_SIZE, kernel_size);
1089 fw_cfg_add_bytes(fw_cfg, FW_CFG_KERNEL_DATA, kernel, kernel_size);
1090 sev_load_ctx.kernel_data = (char *)kernel;
1091 sev_load_ctx.kernel_size = kernel_size;
1093 fw_cfg_add_i32(fw_cfg, FW_CFG_SETUP_ADDR, real_addr);
1094 fw_cfg_add_i32(fw_cfg, FW_CFG_SETUP_SIZE, setup_size);
1095 fw_cfg_add_bytes(fw_cfg, FW_CFG_SETUP_DATA, setup, setup_size);
1096 sev_load_ctx.setup_data = (char *)setup;
1097 sev_load_ctx.setup_size = setup_size;
1099 if (sev_enabled()) {
1100 sev_add_kernel_loader_hashes(&sev_load_ctx, &error_fatal);
1103 option_rom[nb_option_roms].bootindex = 0;
1104 option_rom[nb_option_roms].name = "linuxboot.bin";
1105 if (linuxboot_dma_enabled && fw_cfg_dma_enabled(fw_cfg)) {
1106 option_rom[nb_option_roms].name = "linuxboot_dma.bin";
1108 nb_option_roms++;
1111 void x86_bios_rom_init(MachineState *ms, const char *default_firmware,
1112 MemoryRegion *rom_memory, bool isapc_ram_fw)
1114 const char *bios_name;
1115 char *filename;
1116 MemoryRegion *bios, *isa_bios;
1117 int bios_size, isa_bios_size;
1118 int ret;
1120 /* BIOS load */
1121 bios_name = ms->firmware ?: default_firmware;
1122 filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name);
1123 if (filename) {
1124 bios_size = get_image_size(filename);
1125 } else {
1126 bios_size = -1;
1128 if (bios_size <= 0 ||
1129 (bios_size % 65536) != 0) {
1130 goto bios_error;
1132 bios = g_malloc(sizeof(*bios));
1133 memory_region_init_ram(bios, NULL, "pc.bios", bios_size, &error_fatal);
1134 if (sev_enabled()) {
1136 * The concept of a "reset" simply doesn't exist for
1137 * confidential computing guests, we have to destroy and
1138 * re-launch them instead. So there is no need to register
1139 * the firmware as rom to properly re-initialize on reset.
1140 * Just go for a straight file load instead.
1142 void *ptr = memory_region_get_ram_ptr(bios);
1143 load_image_size(filename, ptr, bios_size);
1144 x86_firmware_configure(ptr, bios_size);
1145 } else {
1146 if (!isapc_ram_fw) {
1147 memory_region_set_readonly(bios, true);
1149 ret = rom_add_file_fixed(bios_name, (uint32_t)(-bios_size), -1);
1150 if (ret != 0) {
1151 goto bios_error;
1154 g_free(filename);
1156 /* map the last 128KB of the BIOS in ISA space */
1157 isa_bios_size = MIN(bios_size, 128 * KiB);
1158 isa_bios = g_malloc(sizeof(*isa_bios));
1159 memory_region_init_alias(isa_bios, NULL, "isa-bios", bios,
1160 bios_size - isa_bios_size, isa_bios_size);
1161 memory_region_add_subregion_overlap(rom_memory,
1162 0x100000 - isa_bios_size,
1163 isa_bios,
1165 if (!isapc_ram_fw) {
1166 memory_region_set_readonly(isa_bios, true);
1169 /* map all the bios at the top of memory */
1170 memory_region_add_subregion(rom_memory,
1171 (uint32_t)(-bios_size),
1172 bios);
1173 return;
1175 bios_error:
1176 fprintf(stderr, "qemu: could not load PC BIOS '%s'\n", bios_name);
1177 exit(1);
1180 bool x86_machine_is_smm_enabled(const X86MachineState *x86ms)
1182 bool smm_available = false;
1184 if (x86ms->smm == ON_OFF_AUTO_OFF) {
1185 return false;
1188 if (tcg_enabled() || qtest_enabled()) {
1189 smm_available = true;
1190 } else if (kvm_enabled()) {
1191 smm_available = kvm_has_smm();
1194 if (smm_available) {
1195 return true;
1198 if (x86ms->smm == ON_OFF_AUTO_ON) {
1199 error_report("System Management Mode not supported by this hypervisor.");
1200 exit(1);
1202 return false;
1205 static void x86_machine_get_smm(Object *obj, Visitor *v, const char *name,
1206 void *opaque, Error **errp)
1208 X86MachineState *x86ms = X86_MACHINE(obj);
1209 OnOffAuto smm = x86ms->smm;
1211 visit_type_OnOffAuto(v, name, &smm, errp);
1214 static void x86_machine_set_smm(Object *obj, Visitor *v, const char *name,
1215 void *opaque, Error **errp)
1217 X86MachineState *x86ms = X86_MACHINE(obj);
1219 visit_type_OnOffAuto(v, name, &x86ms->smm, errp);
1222 bool x86_machine_is_acpi_enabled(const X86MachineState *x86ms)
1224 if (x86ms->acpi == ON_OFF_AUTO_OFF) {
1225 return false;
1227 return true;
1230 static void x86_machine_get_acpi(Object *obj, Visitor *v, const char *name,
1231 void *opaque, Error **errp)
1233 X86MachineState *x86ms = X86_MACHINE(obj);
1234 OnOffAuto acpi = x86ms->acpi;
1236 visit_type_OnOffAuto(v, name, &acpi, errp);
1239 static void x86_machine_set_acpi(Object *obj, Visitor *v, const char *name,
1240 void *opaque, Error **errp)
1242 X86MachineState *x86ms = X86_MACHINE(obj);
1244 visit_type_OnOffAuto(v, name, &x86ms->acpi, errp);
1247 static void x86_machine_get_pit(Object *obj, Visitor *v, const char *name,
1248 void *opaque, Error **errp)
1250 X86MachineState *x86ms = X86_MACHINE(obj);
1251 OnOffAuto pit = x86ms->pit;
1253 visit_type_OnOffAuto(v, name, &pit, errp);
1256 static void x86_machine_set_pit(Object *obj, Visitor *v, const char *name,
1257 void *opaque, Error **errp)
1259 X86MachineState *x86ms = X86_MACHINE(obj);;
1261 visit_type_OnOffAuto(v, name, &x86ms->pit, errp);
1264 static char *x86_machine_get_oem_id(Object *obj, Error **errp)
1266 X86MachineState *x86ms = X86_MACHINE(obj);
1268 return g_strdup(x86ms->oem_id);
1271 static void x86_machine_set_oem_id(Object *obj, const char *value, Error **errp)
1273 X86MachineState *x86ms = X86_MACHINE(obj);
1274 size_t len = strlen(value);
1276 if (len > 6) {
1277 error_setg(errp,
1278 "User specified "X86_MACHINE_OEM_ID" value is bigger than "
1279 "6 bytes in size");
1280 return;
1283 strncpy(x86ms->oem_id, value, 6);
1286 static char *x86_machine_get_oem_table_id(Object *obj, Error **errp)
1288 X86MachineState *x86ms = X86_MACHINE(obj);
1290 return g_strdup(x86ms->oem_table_id);
1293 static void x86_machine_set_oem_table_id(Object *obj, const char *value,
1294 Error **errp)
1296 X86MachineState *x86ms = X86_MACHINE(obj);
1297 size_t len = strlen(value);
1299 if (len > 8) {
1300 error_setg(errp,
1301 "User specified "X86_MACHINE_OEM_TABLE_ID
1302 " value is bigger than "
1303 "8 bytes in size");
1304 return;
1306 strncpy(x86ms->oem_table_id, value, 8);
1309 static void x86_machine_get_bus_lock_ratelimit(Object *obj, Visitor *v,
1310 const char *name, void *opaque, Error **errp)
1312 X86MachineState *x86ms = X86_MACHINE(obj);
1313 uint64_t bus_lock_ratelimit = x86ms->bus_lock_ratelimit;
1315 visit_type_uint64(v, name, &bus_lock_ratelimit, errp);
1318 static void x86_machine_set_bus_lock_ratelimit(Object *obj, Visitor *v,
1319 const char *name, void *opaque, Error **errp)
1321 X86MachineState *x86ms = X86_MACHINE(obj);
1323 visit_type_uint64(v, name, &x86ms->bus_lock_ratelimit, errp);
1326 static void machine_get_sgx_epc(Object *obj, Visitor *v, const char *name,
1327 void *opaque, Error **errp)
1329 X86MachineState *x86ms = X86_MACHINE(obj);
1330 SgxEPCList *list = x86ms->sgx_epc_list;
1332 visit_type_SgxEPCList(v, name, &list, errp);
1335 static void machine_set_sgx_epc(Object *obj, Visitor *v, const char *name,
1336 void *opaque, Error **errp)
1338 X86MachineState *x86ms = X86_MACHINE(obj);
1339 SgxEPCList *list;
1341 list = x86ms->sgx_epc_list;
1342 visit_type_SgxEPCList(v, name, &x86ms->sgx_epc_list, errp);
1344 qapi_free_SgxEPCList(list);
1347 static void x86_machine_initfn(Object *obj)
1349 X86MachineState *x86ms = X86_MACHINE(obj);
1351 x86ms->smm = ON_OFF_AUTO_AUTO;
1352 x86ms->acpi = ON_OFF_AUTO_AUTO;
1353 x86ms->pit = ON_OFF_AUTO_AUTO;
1354 x86ms->pci_irq_mask = ACPI_BUILD_PCI_IRQS;
1355 x86ms->oem_id = g_strndup(ACPI_BUILD_APPNAME6, 6);
1356 x86ms->oem_table_id = g_strndup(ACPI_BUILD_APPNAME8, 8);
1357 x86ms->bus_lock_ratelimit = 0;
1360 static void x86_machine_class_init(ObjectClass *oc, void *data)
1362 MachineClass *mc = MACHINE_CLASS(oc);
1363 X86MachineClass *x86mc = X86_MACHINE_CLASS(oc);
1364 NMIClass *nc = NMI_CLASS(oc);
1366 mc->cpu_index_to_instance_props = x86_cpu_index_to_props;
1367 mc->get_default_cpu_node_id = x86_get_default_cpu_node_id;
1368 mc->possible_cpu_arch_ids = x86_possible_cpu_arch_ids;
1369 x86mc->save_tsc_khz = true;
1370 x86mc->fwcfg_dma_enabled = true;
1371 nc->nmi_monitor_handler = x86_nmi;
1373 object_class_property_add(oc, X86_MACHINE_SMM, "OnOffAuto",
1374 x86_machine_get_smm, x86_machine_set_smm,
1375 NULL, NULL);
1376 object_class_property_set_description(oc, X86_MACHINE_SMM,
1377 "Enable SMM");
1379 object_class_property_add(oc, X86_MACHINE_ACPI, "OnOffAuto",
1380 x86_machine_get_acpi, x86_machine_set_acpi,
1381 NULL, NULL);
1382 object_class_property_set_description(oc, X86_MACHINE_ACPI,
1383 "Enable ACPI");
1385 object_class_property_add(oc, X86_MACHINE_PIT, "OnOffAuto",
1386 x86_machine_get_pit,
1387 x86_machine_set_pit,
1388 NULL, NULL);
1389 object_class_property_set_description(oc, X86_MACHINE_PIT,
1390 "Enable i8254 PIT");
1392 object_class_property_add_str(oc, X86_MACHINE_OEM_ID,
1393 x86_machine_get_oem_id,
1394 x86_machine_set_oem_id);
1395 object_class_property_set_description(oc, X86_MACHINE_OEM_ID,
1396 "Override the default value of field OEMID "
1397 "in ACPI table header."
1398 "The string may be up to 6 bytes in size");
1401 object_class_property_add_str(oc, X86_MACHINE_OEM_TABLE_ID,
1402 x86_machine_get_oem_table_id,
1403 x86_machine_set_oem_table_id);
1404 object_class_property_set_description(oc, X86_MACHINE_OEM_TABLE_ID,
1405 "Override the default value of field OEM Table ID "
1406 "in ACPI table header."
1407 "The string may be up to 8 bytes in size");
1409 object_class_property_add(oc, X86_MACHINE_BUS_LOCK_RATELIMIT, "uint64_t",
1410 x86_machine_get_bus_lock_ratelimit,
1411 x86_machine_set_bus_lock_ratelimit, NULL, NULL);
1412 object_class_property_set_description(oc, X86_MACHINE_BUS_LOCK_RATELIMIT,
1413 "Set the ratelimit for the bus locks acquired in VMs");
1415 object_class_property_add(oc, "sgx-epc", "SgxEPC",
1416 machine_get_sgx_epc, machine_set_sgx_epc,
1417 NULL, NULL);
1418 object_class_property_set_description(oc, "sgx-epc",
1419 "SGX EPC device");
1422 static const TypeInfo x86_machine_info = {
1423 .name = TYPE_X86_MACHINE,
1424 .parent = TYPE_MACHINE,
1425 .abstract = true,
1426 .instance_size = sizeof(X86MachineState),
1427 .instance_init = x86_machine_initfn,
1428 .class_size = sizeof(X86MachineClass),
1429 .class_init = x86_machine_class_init,
1430 .interfaces = (InterfaceInfo[]) {
1431 { TYPE_NMI },
1436 static void x86_machine_register_types(void)
1438 type_register_static(&x86_machine_info);
1441 type_init(x86_machine_register_types)