ide: fix halted IO segfault at reset
[qemu/rayw.git] / xen-hvm.c
blobeb577926a1c2f3eb55f883e5e8dfbf83a5d92479
1 /*
2 * Copyright (C) 2010 Citrix Ltd.
4 * This work is licensed under the terms of the GNU GPL, version 2. See
5 * the COPYING file in the top-level directory.
7 * Contributions after 2012-01-13 are licensed under the terms of the
8 * GNU GPL, version 2 or (at your option) any later version.
9 */
11 #include "qemu/osdep.h"
13 #include "cpu.h"
14 #include "hw/pci/pci.h"
15 #include "hw/i386/pc.h"
16 #include "hw/i386/apic-msidef.h"
17 #include "hw/xen/xen_common.h"
18 #include "hw/xen/xen_backend.h"
19 #include "qmp-commands.h"
21 #include "sysemu/char.h"
22 #include "qemu/error-report.h"
23 #include "qemu/range.h"
24 #include "sysemu/xen-mapcache.h"
25 #include "trace.h"
26 #include "exec/address-spaces.h"
28 #include <xen/hvm/ioreq.h>
29 #include <xen/hvm/params.h>
30 #include <xen/hvm/e820.h>
32 //#define DEBUG_XEN_HVM
34 #ifdef DEBUG_XEN_HVM
35 #define DPRINTF(fmt, ...) \
36 do { fprintf(stderr, "xen: " fmt, ## __VA_ARGS__); } while (0)
37 #else
38 #define DPRINTF(fmt, ...) \
39 do { } while (0)
40 #endif
42 static MemoryRegion ram_memory, ram_640k, ram_lo, ram_hi;
43 static MemoryRegion *framebuffer;
44 static bool xen_in_migration;
46 /* Compatibility with older version */
48 /* This allows QEMU to build on a system that has Xen 4.5 or earlier
49 * installed. This here (not in hw/xen/xen_common.h) because xen/hvm/ioreq.h
50 * needs to be included before this block and hw/xen/xen_common.h needs to
51 * be included before xen/hvm/ioreq.h
53 #ifndef IOREQ_TYPE_VMWARE_PORT
54 #define IOREQ_TYPE_VMWARE_PORT 3
55 struct vmware_regs {
56 uint32_t esi;
57 uint32_t edi;
58 uint32_t ebx;
59 uint32_t ecx;
60 uint32_t edx;
62 typedef struct vmware_regs vmware_regs_t;
64 struct shared_vmport_iopage {
65 struct vmware_regs vcpu_vmport_regs[1];
67 typedef struct shared_vmport_iopage shared_vmport_iopage_t;
68 #endif
70 static inline uint32_t xen_vcpu_eport(shared_iopage_t *shared_page, int i)
72 return shared_page->vcpu_ioreq[i].vp_eport;
74 static inline ioreq_t *xen_vcpu_ioreq(shared_iopage_t *shared_page, int vcpu)
76 return &shared_page->vcpu_ioreq[vcpu];
79 #define BUFFER_IO_MAX_DELAY 100
81 typedef struct XenPhysmap {
82 hwaddr start_addr;
83 ram_addr_t size;
84 const char *name;
85 hwaddr phys_offset;
87 QLIST_ENTRY(XenPhysmap) list;
88 } XenPhysmap;
90 typedef struct XenIOState {
91 ioservid_t ioservid;
92 shared_iopage_t *shared_page;
93 shared_vmport_iopage_t *shared_vmport_page;
94 buffered_iopage_t *buffered_io_page;
95 QEMUTimer *buffered_io_timer;
96 CPUState **cpu_by_vcpu_id;
97 /* the evtchn port for polling the notification, */
98 evtchn_port_t *ioreq_local_port;
99 /* evtchn local port for buffered io */
100 evtchn_port_t bufioreq_local_port;
101 /* the evtchn fd for polling */
102 xenevtchn_handle *xce_handle;
103 /* which vcpu we are serving */
104 int send_vcpu;
106 struct xs_handle *xenstore;
107 MemoryListener memory_listener;
108 MemoryListener io_listener;
109 DeviceListener device_listener;
110 QLIST_HEAD(, XenPhysmap) physmap;
111 hwaddr free_phys_offset;
112 const XenPhysmap *log_for_dirtybit;
114 Notifier exit;
115 Notifier suspend;
116 Notifier wakeup;
117 } XenIOState;
119 /* Xen specific function for piix pci */
121 int xen_pci_slot_get_pirq(PCIDevice *pci_dev, int irq_num)
123 return irq_num + ((pci_dev->devfn >> 3) << 2);
126 void xen_piix3_set_irq(void *opaque, int irq_num, int level)
128 xc_hvm_set_pci_intx_level(xen_xc, xen_domid, 0, 0, irq_num >> 2,
129 irq_num & 3, level);
132 void xen_piix_pci_write_config_client(uint32_t address, uint32_t val, int len)
134 int i;
136 /* Scan for updates to PCI link routes (0x60-0x63). */
137 for (i = 0; i < len; i++) {
138 uint8_t v = (val >> (8 * i)) & 0xff;
139 if (v & 0x80) {
140 v = 0;
142 v &= 0xf;
143 if (((address + i) >= 0x60) && ((address + i) <= 0x63)) {
144 xc_hvm_set_pci_link_route(xen_xc, xen_domid, address + i - 0x60, v);
149 int xen_is_pirq_msi(uint32_t msi_data)
151 /* If vector is 0, the msi is remapped into a pirq, passed as
152 * dest_id.
154 return ((msi_data & MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT) == 0;
157 void xen_hvm_inject_msi(uint64_t addr, uint32_t data)
159 xc_hvm_inject_msi(xen_xc, xen_domid, addr, data);
162 static void xen_suspend_notifier(Notifier *notifier, void *data)
164 xc_set_hvm_param(xen_xc, xen_domid, HVM_PARAM_ACPI_S_STATE, 3);
167 /* Xen Interrupt Controller */
169 static void xen_set_irq(void *opaque, int irq, int level)
171 xc_hvm_set_isa_irq_level(xen_xc, xen_domid, irq, level);
174 qemu_irq *xen_interrupt_controller_init(void)
176 return qemu_allocate_irqs(xen_set_irq, NULL, 16);
179 /* Memory Ops */
181 static void xen_ram_init(PCMachineState *pcms,
182 ram_addr_t ram_size, MemoryRegion **ram_memory_p)
184 MemoryRegion *sysmem = get_system_memory();
185 ram_addr_t block_len;
186 uint64_t user_lowmem = object_property_get_int(qdev_get_machine(),
187 PC_MACHINE_MAX_RAM_BELOW_4G,
188 &error_abort);
190 /* Handle the machine opt max-ram-below-4g. It is basically doing
191 * min(xen limit, user limit).
193 if (!user_lowmem) {
194 user_lowmem = HVM_BELOW_4G_RAM_END; /* default */
196 if (HVM_BELOW_4G_RAM_END <= user_lowmem) {
197 user_lowmem = HVM_BELOW_4G_RAM_END;
200 if (ram_size >= user_lowmem) {
201 pcms->above_4g_mem_size = ram_size - user_lowmem;
202 pcms->below_4g_mem_size = user_lowmem;
203 } else {
204 pcms->above_4g_mem_size = 0;
205 pcms->below_4g_mem_size = ram_size;
207 if (!pcms->above_4g_mem_size) {
208 block_len = ram_size;
209 } else {
211 * Xen does not allocate the memory continuously, it keeps a
212 * hole of the size computed above or passed in.
214 block_len = (1ULL << 32) + pcms->above_4g_mem_size;
216 memory_region_init_ram(&ram_memory, NULL, "xen.ram", block_len,
217 &error_fatal);
218 *ram_memory_p = &ram_memory;
219 vmstate_register_ram_global(&ram_memory);
221 memory_region_init_alias(&ram_640k, NULL, "xen.ram.640k",
222 &ram_memory, 0, 0xa0000);
223 memory_region_add_subregion(sysmem, 0, &ram_640k);
224 /* Skip of the VGA IO memory space, it will be registered later by the VGA
225 * emulated device.
227 * The area between 0xc0000 and 0x100000 will be used by SeaBIOS to load
228 * the Options ROM, so it is registered here as RAM.
230 memory_region_init_alias(&ram_lo, NULL, "xen.ram.lo",
231 &ram_memory, 0xc0000,
232 pcms->below_4g_mem_size - 0xc0000);
233 memory_region_add_subregion(sysmem, 0xc0000, &ram_lo);
234 if (pcms->above_4g_mem_size > 0) {
235 memory_region_init_alias(&ram_hi, NULL, "xen.ram.hi",
236 &ram_memory, 0x100000000ULL,
237 pcms->above_4g_mem_size);
238 memory_region_add_subregion(sysmem, 0x100000000ULL, &ram_hi);
242 void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size, MemoryRegion *mr,
243 Error **errp)
245 unsigned long nr_pfn;
246 xen_pfn_t *pfn_list;
247 int i;
249 if (runstate_check(RUN_STATE_INMIGRATE)) {
250 /* RAM already populated in Xen */
251 fprintf(stderr, "%s: do not alloc "RAM_ADDR_FMT
252 " bytes of ram at "RAM_ADDR_FMT" when runstate is INMIGRATE\n",
253 __func__, size, ram_addr);
254 return;
257 if (mr == &ram_memory) {
258 return;
261 trace_xen_ram_alloc(ram_addr, size);
263 nr_pfn = size >> TARGET_PAGE_BITS;
264 pfn_list = g_malloc(sizeof (*pfn_list) * nr_pfn);
266 for (i = 0; i < nr_pfn; i++) {
267 pfn_list[i] = (ram_addr >> TARGET_PAGE_BITS) + i;
270 if (xc_domain_populate_physmap_exact(xen_xc, xen_domid, nr_pfn, 0, 0, pfn_list)) {
271 error_setg(errp, "xen: failed to populate ram at " RAM_ADDR_FMT,
272 ram_addr);
275 g_free(pfn_list);
278 static XenPhysmap *get_physmapping(XenIOState *state,
279 hwaddr start_addr, ram_addr_t size)
281 XenPhysmap *physmap = NULL;
283 start_addr &= TARGET_PAGE_MASK;
285 QLIST_FOREACH(physmap, &state->physmap, list) {
286 if (range_covers_byte(physmap->start_addr, physmap->size, start_addr)) {
287 return physmap;
290 return NULL;
293 static hwaddr xen_phys_offset_to_gaddr(hwaddr start_addr,
294 ram_addr_t size, void *opaque)
296 hwaddr addr = start_addr & TARGET_PAGE_MASK;
297 XenIOState *xen_io_state = opaque;
298 XenPhysmap *physmap = NULL;
300 QLIST_FOREACH(physmap, &xen_io_state->physmap, list) {
301 if (range_covers_byte(physmap->phys_offset, physmap->size, addr)) {
302 return physmap->start_addr;
306 return start_addr;
309 static int xen_add_to_physmap(XenIOState *state,
310 hwaddr start_addr,
311 ram_addr_t size,
312 MemoryRegion *mr,
313 hwaddr offset_within_region)
315 unsigned long i = 0;
316 int rc = 0;
317 XenPhysmap *physmap = NULL;
318 hwaddr pfn, start_gpfn;
319 hwaddr phys_offset = memory_region_get_ram_addr(mr);
320 char path[80], value[17];
321 const char *mr_name;
323 if (get_physmapping(state, start_addr, size)) {
324 return 0;
326 if (size <= 0) {
327 return -1;
330 /* Xen can only handle a single dirty log region for now and we want
331 * the linear framebuffer to be that region.
332 * Avoid tracking any regions that is not videoram and avoid tracking
333 * the legacy vga region. */
334 if (mr == framebuffer && start_addr > 0xbffff) {
335 goto go_physmap;
337 return -1;
339 go_physmap:
340 DPRINTF("mapping vram to %"HWADDR_PRIx" - %"HWADDR_PRIx"\n",
341 start_addr, start_addr + size);
343 pfn = phys_offset >> TARGET_PAGE_BITS;
344 start_gpfn = start_addr >> TARGET_PAGE_BITS;
345 for (i = 0; i < size >> TARGET_PAGE_BITS; i++) {
346 unsigned long idx = pfn + i;
347 xen_pfn_t gpfn = start_gpfn + i;
349 rc = xen_xc_domain_add_to_physmap(xen_xc, xen_domid, XENMAPSPACE_gmfn, idx, gpfn);
350 if (rc) {
351 DPRINTF("add_to_physmap MFN %"PRI_xen_pfn" to PFN %"
352 PRI_xen_pfn" failed: %d (errno: %d)\n", idx, gpfn, rc, errno);
353 return -rc;
357 mr_name = memory_region_name(mr);
359 physmap = g_malloc(sizeof (XenPhysmap));
361 physmap->start_addr = start_addr;
362 physmap->size = size;
363 physmap->name = mr_name;
364 physmap->phys_offset = phys_offset;
366 QLIST_INSERT_HEAD(&state->physmap, physmap, list);
368 xc_domain_pin_memory_cacheattr(xen_xc, xen_domid,
369 start_addr >> TARGET_PAGE_BITS,
370 (start_addr + size - 1) >> TARGET_PAGE_BITS,
371 XEN_DOMCTL_MEM_CACHEATTR_WB);
373 snprintf(path, sizeof(path),
374 "/local/domain/0/device-model/%d/physmap/%"PRIx64"/start_addr",
375 xen_domid, (uint64_t)phys_offset);
376 snprintf(value, sizeof(value), "%"PRIx64, (uint64_t)start_addr);
377 if (!xs_write(state->xenstore, 0, path, value, strlen(value))) {
378 return -1;
380 snprintf(path, sizeof(path),
381 "/local/domain/0/device-model/%d/physmap/%"PRIx64"/size",
382 xen_domid, (uint64_t)phys_offset);
383 snprintf(value, sizeof(value), "%"PRIx64, (uint64_t)size);
384 if (!xs_write(state->xenstore, 0, path, value, strlen(value))) {
385 return -1;
387 if (mr_name) {
388 snprintf(path, sizeof(path),
389 "/local/domain/0/device-model/%d/physmap/%"PRIx64"/name",
390 xen_domid, (uint64_t)phys_offset);
391 if (!xs_write(state->xenstore, 0, path, mr_name, strlen(mr_name))) {
392 return -1;
396 return 0;
399 static int xen_remove_from_physmap(XenIOState *state,
400 hwaddr start_addr,
401 ram_addr_t size)
403 unsigned long i = 0;
404 int rc = 0;
405 XenPhysmap *physmap = NULL;
406 hwaddr phys_offset = 0;
408 physmap = get_physmapping(state, start_addr, size);
409 if (physmap == NULL) {
410 return -1;
413 phys_offset = physmap->phys_offset;
414 size = physmap->size;
416 DPRINTF("unmapping vram to %"HWADDR_PRIx" - %"HWADDR_PRIx", at "
417 "%"HWADDR_PRIx"\n", start_addr, start_addr + size, phys_offset);
419 size >>= TARGET_PAGE_BITS;
420 start_addr >>= TARGET_PAGE_BITS;
421 phys_offset >>= TARGET_PAGE_BITS;
422 for (i = 0; i < size; i++) {
423 xen_pfn_t idx = start_addr + i;
424 xen_pfn_t gpfn = phys_offset + i;
426 rc = xen_xc_domain_add_to_physmap(xen_xc, xen_domid, XENMAPSPACE_gmfn, idx, gpfn);
427 if (rc) {
428 fprintf(stderr, "add_to_physmap MFN %"PRI_xen_pfn" to PFN %"
429 PRI_xen_pfn" failed: %d (errno: %d)\n", idx, gpfn, rc, errno);
430 return -rc;
434 QLIST_REMOVE(physmap, list);
435 if (state->log_for_dirtybit == physmap) {
436 state->log_for_dirtybit = NULL;
438 g_free(physmap);
440 return 0;
443 static void xen_set_memory(struct MemoryListener *listener,
444 MemoryRegionSection *section,
445 bool add)
447 XenIOState *state = container_of(listener, XenIOState, memory_listener);
448 hwaddr start_addr = section->offset_within_address_space;
449 ram_addr_t size = int128_get64(section->size);
450 bool log_dirty = memory_region_is_logging(section->mr, DIRTY_MEMORY_VGA);
451 hvmmem_type_t mem_type;
453 if (section->mr == &ram_memory) {
454 return;
455 } else {
456 if (add) {
457 xen_map_memory_section(xen_xc, xen_domid, state->ioservid,
458 section);
459 } else {
460 xen_unmap_memory_section(xen_xc, xen_domid, state->ioservid,
461 section);
465 if (!memory_region_is_ram(section->mr)) {
466 return;
469 if (log_dirty != add) {
470 return;
473 trace_xen_client_set_memory(start_addr, size, log_dirty);
475 start_addr &= TARGET_PAGE_MASK;
476 size = TARGET_PAGE_ALIGN(size);
478 if (add) {
479 if (!memory_region_is_rom(section->mr)) {
480 xen_add_to_physmap(state, start_addr, size,
481 section->mr, section->offset_within_region);
482 } else {
483 mem_type = HVMMEM_ram_ro;
484 if (xc_hvm_set_mem_type(xen_xc, xen_domid, mem_type,
485 start_addr >> TARGET_PAGE_BITS,
486 size >> TARGET_PAGE_BITS)) {
487 DPRINTF("xc_hvm_set_mem_type error, addr: "TARGET_FMT_plx"\n",
488 start_addr);
491 } else {
492 if (xen_remove_from_physmap(state, start_addr, size) < 0) {
493 DPRINTF("physmapping does not exist at "TARGET_FMT_plx"\n", start_addr);
498 static void xen_region_add(MemoryListener *listener,
499 MemoryRegionSection *section)
501 memory_region_ref(section->mr);
502 xen_set_memory(listener, section, true);
505 static void xen_region_del(MemoryListener *listener,
506 MemoryRegionSection *section)
508 xen_set_memory(listener, section, false);
509 memory_region_unref(section->mr);
512 static void xen_io_add(MemoryListener *listener,
513 MemoryRegionSection *section)
515 XenIOState *state = container_of(listener, XenIOState, io_listener);
516 MemoryRegion *mr = section->mr;
518 if (mr->ops == &unassigned_io_ops) {
519 return;
522 memory_region_ref(mr);
524 xen_map_io_section(xen_xc, xen_domid, state->ioservid, section);
527 static void xen_io_del(MemoryListener *listener,
528 MemoryRegionSection *section)
530 XenIOState *state = container_of(listener, XenIOState, io_listener);
531 MemoryRegion *mr = section->mr;
533 if (mr->ops == &unassigned_io_ops) {
534 return;
537 xen_unmap_io_section(xen_xc, xen_domid, state->ioservid, section);
539 memory_region_unref(mr);
542 static void xen_device_realize(DeviceListener *listener,
543 DeviceState *dev)
545 XenIOState *state = container_of(listener, XenIOState, device_listener);
547 if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) {
548 PCIDevice *pci_dev = PCI_DEVICE(dev);
550 xen_map_pcidev(xen_xc, xen_domid, state->ioservid, pci_dev);
554 static void xen_device_unrealize(DeviceListener *listener,
555 DeviceState *dev)
557 XenIOState *state = container_of(listener, XenIOState, device_listener);
559 if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) {
560 PCIDevice *pci_dev = PCI_DEVICE(dev);
562 xen_unmap_pcidev(xen_xc, xen_domid, state->ioservid, pci_dev);
566 static void xen_sync_dirty_bitmap(XenIOState *state,
567 hwaddr start_addr,
568 ram_addr_t size)
570 hwaddr npages = size >> TARGET_PAGE_BITS;
571 const int width = sizeof(unsigned long) * 8;
572 unsigned long bitmap[DIV_ROUND_UP(npages, width)];
573 int rc, i, j;
574 const XenPhysmap *physmap = NULL;
576 physmap = get_physmapping(state, start_addr, size);
577 if (physmap == NULL) {
578 /* not handled */
579 return;
582 if (state->log_for_dirtybit == NULL) {
583 state->log_for_dirtybit = physmap;
584 } else if (state->log_for_dirtybit != physmap) {
585 /* Only one range for dirty bitmap can be tracked. */
586 return;
589 rc = xc_hvm_track_dirty_vram(xen_xc, xen_domid,
590 start_addr >> TARGET_PAGE_BITS, npages,
591 bitmap);
592 if (rc < 0) {
593 #ifndef ENODATA
594 #define ENODATA ENOENT
595 #endif
596 if (errno == ENODATA) {
597 memory_region_set_dirty(framebuffer, 0, size);
598 DPRINTF("xen: track_dirty_vram failed (0x" TARGET_FMT_plx
599 ", 0x" TARGET_FMT_plx "): %s\n",
600 start_addr, start_addr + size, strerror(errno));
602 return;
605 for (i = 0; i < ARRAY_SIZE(bitmap); i++) {
606 unsigned long map = bitmap[i];
607 while (map != 0) {
608 j = ctzl(map);
609 map &= ~(1ul << j);
610 memory_region_set_dirty(framebuffer,
611 (i * width + j) * TARGET_PAGE_SIZE,
612 TARGET_PAGE_SIZE);
617 static void xen_log_start(MemoryListener *listener,
618 MemoryRegionSection *section,
619 int old, int new)
621 XenIOState *state = container_of(listener, XenIOState, memory_listener);
623 if (new & ~old & (1 << DIRTY_MEMORY_VGA)) {
624 xen_sync_dirty_bitmap(state, section->offset_within_address_space,
625 int128_get64(section->size));
629 static void xen_log_stop(MemoryListener *listener, MemoryRegionSection *section,
630 int old, int new)
632 XenIOState *state = container_of(listener, XenIOState, memory_listener);
634 if (old & ~new & (1 << DIRTY_MEMORY_VGA)) {
635 state->log_for_dirtybit = NULL;
636 /* Disable dirty bit tracking */
637 xc_hvm_track_dirty_vram(xen_xc, xen_domid, 0, 0, NULL);
641 static void xen_log_sync(MemoryListener *listener, MemoryRegionSection *section)
643 XenIOState *state = container_of(listener, XenIOState, memory_listener);
645 xen_sync_dirty_bitmap(state, section->offset_within_address_space,
646 int128_get64(section->size));
649 static void xen_log_global_start(MemoryListener *listener)
651 if (xen_enabled()) {
652 xen_in_migration = true;
656 static void xen_log_global_stop(MemoryListener *listener)
658 xen_in_migration = false;
661 static MemoryListener xen_memory_listener = {
662 .region_add = xen_region_add,
663 .region_del = xen_region_del,
664 .log_start = xen_log_start,
665 .log_stop = xen_log_stop,
666 .log_sync = xen_log_sync,
667 .log_global_start = xen_log_global_start,
668 .log_global_stop = xen_log_global_stop,
669 .priority = 10,
672 static MemoryListener xen_io_listener = {
673 .region_add = xen_io_add,
674 .region_del = xen_io_del,
675 .priority = 10,
678 static DeviceListener xen_device_listener = {
679 .realize = xen_device_realize,
680 .unrealize = xen_device_unrealize,
683 /* get the ioreq packets from share mem */
684 static ioreq_t *cpu_get_ioreq_from_shared_memory(XenIOState *state, int vcpu)
686 ioreq_t *req = xen_vcpu_ioreq(state->shared_page, vcpu);
688 if (req->state != STATE_IOREQ_READY) {
689 DPRINTF("I/O request not ready: "
690 "%x, ptr: %x, port: %"PRIx64", "
691 "data: %"PRIx64", count: %u, size: %u\n",
692 req->state, req->data_is_ptr, req->addr,
693 req->data, req->count, req->size);
694 return NULL;
697 xen_rmb(); /* see IOREQ_READY /then/ read contents of ioreq */
699 req->state = STATE_IOREQ_INPROCESS;
700 return req;
703 /* use poll to get the port notification */
704 /* ioreq_vec--out,the */
705 /* retval--the number of ioreq packet */
706 static ioreq_t *cpu_get_ioreq(XenIOState *state)
708 int i;
709 evtchn_port_t port;
711 port = xenevtchn_pending(state->xce_handle);
712 if (port == state->bufioreq_local_port) {
713 timer_mod(state->buffered_io_timer,
714 BUFFER_IO_MAX_DELAY + qemu_clock_get_ms(QEMU_CLOCK_REALTIME));
715 return NULL;
718 if (port != -1) {
719 for (i = 0; i < max_cpus; i++) {
720 if (state->ioreq_local_port[i] == port) {
721 break;
725 if (i == max_cpus) {
726 hw_error("Fatal error while trying to get io event!\n");
729 /* unmask the wanted port again */
730 xenevtchn_unmask(state->xce_handle, port);
732 /* get the io packet from shared memory */
733 state->send_vcpu = i;
734 return cpu_get_ioreq_from_shared_memory(state, i);
737 /* read error or read nothing */
738 return NULL;
741 static uint32_t do_inp(uint32_t addr, unsigned long size)
743 switch (size) {
744 case 1:
745 return cpu_inb(addr);
746 case 2:
747 return cpu_inw(addr);
748 case 4:
749 return cpu_inl(addr);
750 default:
751 hw_error("inp: bad size: %04x %lx", addr, size);
755 static void do_outp(uint32_t addr,
756 unsigned long size, uint32_t val)
758 switch (size) {
759 case 1:
760 return cpu_outb(addr, val);
761 case 2:
762 return cpu_outw(addr, val);
763 case 4:
764 return cpu_outl(addr, val);
765 default:
766 hw_error("outp: bad size: %04x %lx", addr, size);
771 * Helper functions which read/write an object from/to physical guest
772 * memory, as part of the implementation of an ioreq.
774 * Equivalent to
775 * cpu_physical_memory_rw(addr + (req->df ? -1 : +1) * req->size * i,
776 * val, req->size, 0/1)
777 * except without the integer overflow problems.
779 static void rw_phys_req_item(hwaddr addr,
780 ioreq_t *req, uint32_t i, void *val, int rw)
782 /* Do everything unsigned so overflow just results in a truncated result
783 * and accesses to undesired parts of guest memory, which is up
784 * to the guest */
785 hwaddr offset = (hwaddr)req->size * i;
786 if (req->df) {
787 addr -= offset;
788 } else {
789 addr += offset;
791 cpu_physical_memory_rw(addr, val, req->size, rw);
794 static inline void read_phys_req_item(hwaddr addr,
795 ioreq_t *req, uint32_t i, void *val)
797 rw_phys_req_item(addr, req, i, val, 0);
799 static inline void write_phys_req_item(hwaddr addr,
800 ioreq_t *req, uint32_t i, void *val)
802 rw_phys_req_item(addr, req, i, val, 1);
806 static void cpu_ioreq_pio(ioreq_t *req)
808 uint32_t i;
810 trace_cpu_ioreq_pio(req, req->dir, req->df, req->data_is_ptr, req->addr,
811 req->data, req->count, req->size);
813 if (req->dir == IOREQ_READ) {
814 if (!req->data_is_ptr) {
815 req->data = do_inp(req->addr, req->size);
816 trace_cpu_ioreq_pio_read_reg(req, req->data, req->addr,
817 req->size);
818 } else {
819 uint32_t tmp;
821 for (i = 0; i < req->count; i++) {
822 tmp = do_inp(req->addr, req->size);
823 write_phys_req_item(req->data, req, i, &tmp);
826 } else if (req->dir == IOREQ_WRITE) {
827 if (!req->data_is_ptr) {
828 trace_cpu_ioreq_pio_write_reg(req, req->data, req->addr,
829 req->size);
830 do_outp(req->addr, req->size, req->data);
831 } else {
832 for (i = 0; i < req->count; i++) {
833 uint32_t tmp = 0;
835 read_phys_req_item(req->data, req, i, &tmp);
836 do_outp(req->addr, req->size, tmp);
842 static void cpu_ioreq_move(ioreq_t *req)
844 uint32_t i;
846 trace_cpu_ioreq_move(req, req->dir, req->df, req->data_is_ptr, req->addr,
847 req->data, req->count, req->size);
849 if (!req->data_is_ptr) {
850 if (req->dir == IOREQ_READ) {
851 for (i = 0; i < req->count; i++) {
852 read_phys_req_item(req->addr, req, i, &req->data);
854 } else if (req->dir == IOREQ_WRITE) {
855 for (i = 0; i < req->count; i++) {
856 write_phys_req_item(req->addr, req, i, &req->data);
859 } else {
860 uint64_t tmp;
862 if (req->dir == IOREQ_READ) {
863 for (i = 0; i < req->count; i++) {
864 read_phys_req_item(req->addr, req, i, &tmp);
865 write_phys_req_item(req->data, req, i, &tmp);
867 } else if (req->dir == IOREQ_WRITE) {
868 for (i = 0; i < req->count; i++) {
869 read_phys_req_item(req->data, req, i, &tmp);
870 write_phys_req_item(req->addr, req, i, &tmp);
876 static void regs_to_cpu(vmware_regs_t *vmport_regs, ioreq_t *req)
878 X86CPU *cpu;
879 CPUX86State *env;
881 cpu = X86_CPU(current_cpu);
882 env = &cpu->env;
883 env->regs[R_EAX] = req->data;
884 env->regs[R_EBX] = vmport_regs->ebx;
885 env->regs[R_ECX] = vmport_regs->ecx;
886 env->regs[R_EDX] = vmport_regs->edx;
887 env->regs[R_ESI] = vmport_regs->esi;
888 env->regs[R_EDI] = vmport_regs->edi;
891 static void regs_from_cpu(vmware_regs_t *vmport_regs)
893 X86CPU *cpu = X86_CPU(current_cpu);
894 CPUX86State *env = &cpu->env;
896 vmport_regs->ebx = env->regs[R_EBX];
897 vmport_regs->ecx = env->regs[R_ECX];
898 vmport_regs->edx = env->regs[R_EDX];
899 vmport_regs->esi = env->regs[R_ESI];
900 vmport_regs->edi = env->regs[R_EDI];
903 static void handle_vmport_ioreq(XenIOState *state, ioreq_t *req)
905 vmware_regs_t *vmport_regs;
907 assert(state->shared_vmport_page);
908 vmport_regs =
909 &state->shared_vmport_page->vcpu_vmport_regs[state->send_vcpu];
910 QEMU_BUILD_BUG_ON(sizeof(*req) < sizeof(*vmport_regs));
912 current_cpu = state->cpu_by_vcpu_id[state->send_vcpu];
913 regs_to_cpu(vmport_regs, req);
914 cpu_ioreq_pio(req);
915 regs_from_cpu(vmport_regs);
916 current_cpu = NULL;
919 static void handle_ioreq(XenIOState *state, ioreq_t *req)
921 trace_handle_ioreq(req, req->type, req->dir, req->df, req->data_is_ptr,
922 req->addr, req->data, req->count, req->size);
924 if (!req->data_is_ptr && (req->dir == IOREQ_WRITE) &&
925 (req->size < sizeof (target_ulong))) {
926 req->data &= ((target_ulong) 1 << (8 * req->size)) - 1;
929 if (req->dir == IOREQ_WRITE)
930 trace_handle_ioreq_write(req, req->type, req->df, req->data_is_ptr,
931 req->addr, req->data, req->count, req->size);
933 switch (req->type) {
934 case IOREQ_TYPE_PIO:
935 cpu_ioreq_pio(req);
936 break;
937 case IOREQ_TYPE_COPY:
938 cpu_ioreq_move(req);
939 break;
940 case IOREQ_TYPE_VMWARE_PORT:
941 handle_vmport_ioreq(state, req);
942 break;
943 case IOREQ_TYPE_TIMEOFFSET:
944 break;
945 case IOREQ_TYPE_INVALIDATE:
946 xen_invalidate_map_cache();
947 break;
948 case IOREQ_TYPE_PCI_CONFIG: {
949 uint32_t sbdf = req->addr >> 32;
950 uint32_t val;
952 /* Fake a write to port 0xCF8 so that
953 * the config space access will target the
954 * correct device model.
956 val = (1u << 31) |
957 ((req->addr & 0x0f00) << 16) |
958 ((sbdf & 0xffff) << 8) |
959 (req->addr & 0xfc);
960 do_outp(0xcf8, 4, val);
962 /* Now issue the config space access via
963 * port 0xCFC
965 req->addr = 0xcfc | (req->addr & 0x03);
966 cpu_ioreq_pio(req);
967 break;
969 default:
970 hw_error("Invalid ioreq type 0x%x\n", req->type);
972 if (req->dir == IOREQ_READ) {
973 trace_handle_ioreq_read(req, req->type, req->df, req->data_is_ptr,
974 req->addr, req->data, req->count, req->size);
978 static int handle_buffered_iopage(XenIOState *state)
980 buffered_iopage_t *buf_page = state->buffered_io_page;
981 buf_ioreq_t *buf_req = NULL;
982 ioreq_t req;
983 int qw;
985 if (!buf_page) {
986 return 0;
989 memset(&req, 0x00, sizeof(req));
991 for (;;) {
992 uint32_t rdptr = buf_page->read_pointer, wrptr;
994 xen_rmb();
995 wrptr = buf_page->write_pointer;
996 xen_rmb();
997 if (rdptr != buf_page->read_pointer) {
998 continue;
1000 if (rdptr == wrptr) {
1001 break;
1003 buf_req = &buf_page->buf_ioreq[rdptr % IOREQ_BUFFER_SLOT_NUM];
1004 req.size = 1UL << buf_req->size;
1005 req.count = 1;
1006 req.addr = buf_req->addr;
1007 req.data = buf_req->data;
1008 req.state = STATE_IOREQ_READY;
1009 req.dir = buf_req->dir;
1010 req.df = 1;
1011 req.type = buf_req->type;
1012 req.data_is_ptr = 0;
1013 qw = (req.size == 8);
1014 if (qw) {
1015 buf_req = &buf_page->buf_ioreq[(rdptr + 1) %
1016 IOREQ_BUFFER_SLOT_NUM];
1017 req.data |= ((uint64_t)buf_req->data) << 32;
1020 handle_ioreq(state, &req);
1022 atomic_add(&buf_page->read_pointer, qw + 1);
1025 return req.count;
1028 static void handle_buffered_io(void *opaque)
1030 XenIOState *state = opaque;
1032 if (handle_buffered_iopage(state)) {
1033 timer_mod(state->buffered_io_timer,
1034 BUFFER_IO_MAX_DELAY + qemu_clock_get_ms(QEMU_CLOCK_REALTIME));
1035 } else {
1036 timer_del(state->buffered_io_timer);
1037 xenevtchn_unmask(state->xce_handle, state->bufioreq_local_port);
1041 static void cpu_handle_ioreq(void *opaque)
1043 XenIOState *state = opaque;
1044 ioreq_t *req = cpu_get_ioreq(state);
1046 handle_buffered_iopage(state);
1047 if (req) {
1048 handle_ioreq(state, req);
1050 if (req->state != STATE_IOREQ_INPROCESS) {
1051 fprintf(stderr, "Badness in I/O request ... not in service?!: "
1052 "%x, ptr: %x, port: %"PRIx64", "
1053 "data: %"PRIx64", count: %u, size: %u, type: %u\n",
1054 req->state, req->data_is_ptr, req->addr,
1055 req->data, req->count, req->size, req->type);
1056 destroy_hvm_domain(false);
1057 return;
1060 xen_wmb(); /* Update ioreq contents /then/ update state. */
1063 * We do this before we send the response so that the tools
1064 * have the opportunity to pick up on the reset before the
1065 * guest resumes and does a hlt with interrupts disabled which
1066 * causes Xen to powerdown the domain.
1068 if (runstate_is_running()) {
1069 if (qemu_shutdown_requested_get()) {
1070 destroy_hvm_domain(false);
1072 if (qemu_reset_requested_get()) {
1073 qemu_system_reset(VMRESET_REPORT);
1074 destroy_hvm_domain(true);
1078 req->state = STATE_IORESP_READY;
1079 xenevtchn_notify(state->xce_handle,
1080 state->ioreq_local_port[state->send_vcpu]);
1084 static void xen_main_loop_prepare(XenIOState *state)
1086 int evtchn_fd = -1;
1088 if (state->xce_handle != NULL) {
1089 evtchn_fd = xenevtchn_fd(state->xce_handle);
1092 state->buffered_io_timer = timer_new_ms(QEMU_CLOCK_REALTIME, handle_buffered_io,
1093 state);
1095 if (evtchn_fd != -1) {
1096 CPUState *cpu_state;
1098 DPRINTF("%s: Init cpu_by_vcpu_id\n", __func__);
1099 CPU_FOREACH(cpu_state) {
1100 DPRINTF("%s: cpu_by_vcpu_id[%d]=%p\n",
1101 __func__, cpu_state->cpu_index, cpu_state);
1102 state->cpu_by_vcpu_id[cpu_state->cpu_index] = cpu_state;
1104 qemu_set_fd_handler(evtchn_fd, cpu_handle_ioreq, NULL, state);
1109 static void xen_hvm_change_state_handler(void *opaque, int running,
1110 RunState rstate)
1112 XenIOState *state = opaque;
1114 if (running) {
1115 xen_main_loop_prepare(state);
1118 xen_set_ioreq_server_state(xen_xc, xen_domid,
1119 state->ioservid,
1120 (rstate == RUN_STATE_RUNNING));
1123 static void xen_exit_notifier(Notifier *n, void *data)
1125 XenIOState *state = container_of(n, XenIOState, exit);
1127 xenevtchn_close(state->xce_handle);
1128 xs_daemon_close(state->xenstore);
1131 static void xen_read_physmap(XenIOState *state)
1133 XenPhysmap *physmap = NULL;
1134 unsigned int len, num, i;
1135 char path[80], *value = NULL;
1136 char **entries = NULL;
1138 snprintf(path, sizeof(path),
1139 "/local/domain/0/device-model/%d/physmap", xen_domid);
1140 entries = xs_directory(state->xenstore, 0, path, &num);
1141 if (entries == NULL)
1142 return;
1144 for (i = 0; i < num; i++) {
1145 physmap = g_malloc(sizeof (XenPhysmap));
1146 physmap->phys_offset = strtoull(entries[i], NULL, 16);
1147 snprintf(path, sizeof(path),
1148 "/local/domain/0/device-model/%d/physmap/%s/start_addr",
1149 xen_domid, entries[i]);
1150 value = xs_read(state->xenstore, 0, path, &len);
1151 if (value == NULL) {
1152 g_free(physmap);
1153 continue;
1155 physmap->start_addr = strtoull(value, NULL, 16);
1156 free(value);
1158 snprintf(path, sizeof(path),
1159 "/local/domain/0/device-model/%d/physmap/%s/size",
1160 xen_domid, entries[i]);
1161 value = xs_read(state->xenstore, 0, path, &len);
1162 if (value == NULL) {
1163 g_free(physmap);
1164 continue;
1166 physmap->size = strtoull(value, NULL, 16);
1167 free(value);
1169 snprintf(path, sizeof(path),
1170 "/local/domain/0/device-model/%d/physmap/%s/name",
1171 xen_domid, entries[i]);
1172 physmap->name = xs_read(state->xenstore, 0, path, &len);
1174 QLIST_INSERT_HEAD(&state->physmap, physmap, list);
1176 free(entries);
1179 static void xen_wakeup_notifier(Notifier *notifier, void *data)
1181 xc_set_hvm_param(xen_xc, xen_domid, HVM_PARAM_ACPI_S_STATE, 0);
1184 void xen_hvm_init(PCMachineState *pcms, MemoryRegion **ram_memory)
1186 int i, rc;
1187 xen_pfn_t ioreq_pfn;
1188 xen_pfn_t bufioreq_pfn;
1189 evtchn_port_t bufioreq_evtchn;
1190 XenIOState *state;
1192 state = g_malloc0(sizeof (XenIOState));
1194 state->xce_handle = xenevtchn_open(NULL, 0);
1195 if (state->xce_handle == NULL) {
1196 perror("xen: event channel open");
1197 goto err;
1200 state->xenstore = xs_daemon_open();
1201 if (state->xenstore == NULL) {
1202 perror("xen: xenstore open");
1203 goto err;
1206 rc = xen_create_ioreq_server(xen_xc, xen_domid, &state->ioservid);
1207 if (rc < 0) {
1208 perror("xen: ioreq server create");
1209 goto err;
1212 state->exit.notify = xen_exit_notifier;
1213 qemu_add_exit_notifier(&state->exit);
1215 state->suspend.notify = xen_suspend_notifier;
1216 qemu_register_suspend_notifier(&state->suspend);
1218 state->wakeup.notify = xen_wakeup_notifier;
1219 qemu_register_wakeup_notifier(&state->wakeup);
1221 rc = xen_get_ioreq_server_info(xen_xc, xen_domid, state->ioservid,
1222 &ioreq_pfn, &bufioreq_pfn,
1223 &bufioreq_evtchn);
1224 if (rc < 0) {
1225 error_report("failed to get ioreq server info: error %d handle=%p",
1226 errno, xen_xc);
1227 goto err;
1230 DPRINTF("shared page at pfn %lx\n", ioreq_pfn);
1231 DPRINTF("buffered io page at pfn %lx\n", bufioreq_pfn);
1232 DPRINTF("buffered io evtchn is %x\n", bufioreq_evtchn);
1234 state->shared_page = xenforeignmemory_map(xen_fmem, xen_domid,
1235 PROT_READ|PROT_WRITE,
1236 1, &ioreq_pfn, NULL);
1237 if (state->shared_page == NULL) {
1238 error_report("map shared IO page returned error %d handle=%p",
1239 errno, xen_xc);
1240 goto err;
1243 rc = xen_get_vmport_regs_pfn(xen_xc, xen_domid, &ioreq_pfn);
1244 if (!rc) {
1245 DPRINTF("shared vmport page at pfn %lx\n", ioreq_pfn);
1246 state->shared_vmport_page =
1247 xenforeignmemory_map(xen_fmem, xen_domid, PROT_READ|PROT_WRITE,
1248 1, &ioreq_pfn, NULL);
1249 if (state->shared_vmport_page == NULL) {
1250 error_report("map shared vmport IO page returned error %d handle=%p",
1251 errno, xen_xc);
1252 goto err;
1254 } else if (rc != -ENOSYS) {
1255 error_report("get vmport regs pfn returned error %d, rc=%d",
1256 errno, rc);
1257 goto err;
1260 state->buffered_io_page = xenforeignmemory_map(xen_fmem, xen_domid,
1261 PROT_READ|PROT_WRITE,
1262 1, &bufioreq_pfn, NULL);
1263 if (state->buffered_io_page == NULL) {
1264 error_report("map buffered IO page returned error %d", errno);
1265 goto err;
1268 /* Note: cpus is empty at this point in init */
1269 state->cpu_by_vcpu_id = g_malloc0(max_cpus * sizeof(CPUState *));
1271 rc = xen_set_ioreq_server_state(xen_xc, xen_domid, state->ioservid, true);
1272 if (rc < 0) {
1273 error_report("failed to enable ioreq server info: error %d handle=%p",
1274 errno, xen_xc);
1275 goto err;
1278 state->ioreq_local_port = g_malloc0(max_cpus * sizeof (evtchn_port_t));
1280 /* FIXME: how about if we overflow the page here? */
1281 for (i = 0; i < max_cpus; i++) {
1282 rc = xenevtchn_bind_interdomain(state->xce_handle, xen_domid,
1283 xen_vcpu_eport(state->shared_page, i));
1284 if (rc == -1) {
1285 error_report("shared evtchn %d bind error %d", i, errno);
1286 goto err;
1288 state->ioreq_local_port[i] = rc;
1291 rc = xenevtchn_bind_interdomain(state->xce_handle, xen_domid,
1292 bufioreq_evtchn);
1293 if (rc == -1) {
1294 error_report("buffered evtchn bind error %d", errno);
1295 goto err;
1297 state->bufioreq_local_port = rc;
1299 /* Init RAM management */
1300 xen_map_cache_init(xen_phys_offset_to_gaddr, state);
1301 xen_ram_init(pcms, ram_size, ram_memory);
1303 qemu_add_vm_change_state_handler(xen_hvm_change_state_handler, state);
1305 state->memory_listener = xen_memory_listener;
1306 QLIST_INIT(&state->physmap);
1307 memory_listener_register(&state->memory_listener, &address_space_memory);
1308 state->log_for_dirtybit = NULL;
1310 state->io_listener = xen_io_listener;
1311 memory_listener_register(&state->io_listener, &address_space_io);
1313 state->device_listener = xen_device_listener;
1314 device_listener_register(&state->device_listener);
1316 /* Initialize backend core & drivers */
1317 if (xen_be_init() != 0) {
1318 error_report("xen backend core setup failed");
1319 goto err;
1321 xen_be_register("console", &xen_console_ops);
1322 xen_be_register("vkbd", &xen_kbdmouse_ops);
1323 xen_be_register("qdisk", &xen_blkdev_ops);
1324 xen_read_physmap(state);
1325 return;
1327 err:
1328 error_report("xen hardware virtual machine initialisation failed");
1329 exit(1);
1332 void destroy_hvm_domain(bool reboot)
1334 xc_interface *xc_handle;
1335 int sts;
1337 xc_handle = xc_interface_open(0, 0, 0);
1338 if (xc_handle == NULL) {
1339 fprintf(stderr, "Cannot acquire xenctrl handle\n");
1340 } else {
1341 sts = xc_domain_shutdown(xc_handle, xen_domid,
1342 reboot ? SHUTDOWN_reboot : SHUTDOWN_poweroff);
1343 if (sts != 0) {
1344 fprintf(stderr, "xc_domain_shutdown failed to issue %s, "
1345 "sts %d, %s\n", reboot ? "reboot" : "poweroff",
1346 sts, strerror(errno));
1347 } else {
1348 fprintf(stderr, "Issued domain %d %s\n", xen_domid,
1349 reboot ? "reboot" : "poweroff");
1351 xc_interface_close(xc_handle);
1355 void xen_register_framebuffer(MemoryRegion *mr)
1357 framebuffer = mr;
1360 void xen_shutdown_fatal_error(const char *fmt, ...)
1362 va_list ap;
1364 va_start(ap, fmt);
1365 vfprintf(stderr, fmt, ap);
1366 va_end(ap);
1367 fprintf(stderr, "Will destroy the domain.\n");
1368 /* destroy the domain */
1369 qemu_system_shutdown_request();
1372 void xen_modified_memory(ram_addr_t start, ram_addr_t length)
1374 if (unlikely(xen_in_migration)) {
1375 int rc;
1376 ram_addr_t start_pfn, nb_pages;
1378 if (length == 0) {
1379 length = TARGET_PAGE_SIZE;
1381 start_pfn = start >> TARGET_PAGE_BITS;
1382 nb_pages = ((start + length + TARGET_PAGE_SIZE - 1) >> TARGET_PAGE_BITS)
1383 - start_pfn;
1384 rc = xc_hvm_modified_memory(xen_xc, xen_domid, start_pfn, nb_pages);
1385 if (rc) {
1386 fprintf(stderr,
1387 "%s failed for "RAM_ADDR_FMT" ("RAM_ADDR_FMT"): %i, %s\n",
1388 __func__, start, nb_pages, rc, strerror(-rc));
1393 void qmp_xen_set_global_dirty_log(bool enable, Error **errp)
1395 if (enable) {
1396 memory_global_dirty_log_start();
1397 } else {
1398 memory_global_dirty_log_stop();