block/iscsi: handle zero events from iscsi_which_events
[qemu/kevin.git] / xen-hvm.c
blob315864ca705f3bac0f6ff856dbae400c5145c217
1 /*
2 * Copyright (C) 2010 Citrix Ltd.
4 * This work is licensed under the terms of the GNU GPL, version 2. See
5 * the COPYING file in the top-level directory.
7 * Contributions after 2012-01-13 are licensed under the terms of the
8 * GNU GPL, version 2 or (at your option) any later version.
9 */
11 #include <sys/mman.h>
13 #include "hw/pci/pci.h"
14 #include "hw/i386/pc.h"
15 #include "hw/xen/xen_common.h"
16 #include "hw/xen/xen_backend.h"
17 #include "qmp-commands.h"
19 #include "sysemu/char.h"
20 #include "qemu/range.h"
21 #include "sysemu/xen-mapcache.h"
22 #include "trace.h"
23 #include "exec/address-spaces.h"
25 #include <xen/hvm/ioreq.h>
26 #include <xen/hvm/params.h>
27 #include <xen/hvm/e820.h>
29 //#define DEBUG_XEN_HVM
31 #ifdef DEBUG_XEN_HVM
32 #define DPRINTF(fmt, ...) \
33 do { fprintf(stderr, "xen: " fmt, ## __VA_ARGS__); } while (0)
34 #else
35 #define DPRINTF(fmt, ...) \
36 do { } while (0)
37 #endif
39 static MemoryRegion ram_memory, ram_640k, ram_lo, ram_hi;
40 static MemoryRegion *framebuffer;
41 static bool xen_in_migration;
43 /* Compatibility with older version */
45 /* This allows QEMU to build on a system that has Xen 4.5 or earlier
46 * installed. This here (not in hw/xen/xen_common.h) because xen/hvm/ioreq.h
47 * needs to be included before this block and hw/xen/xen_common.h needs to
48 * be included before xen/hvm/ioreq.h
50 #ifndef IOREQ_TYPE_VMWARE_PORT
51 #define IOREQ_TYPE_VMWARE_PORT 3
52 struct vmware_regs {
53 uint32_t esi;
54 uint32_t edi;
55 uint32_t ebx;
56 uint32_t ecx;
57 uint32_t edx;
59 typedef struct vmware_regs vmware_regs_t;
61 struct shared_vmport_iopage {
62 struct vmware_regs vcpu_vmport_regs[1];
64 typedef struct shared_vmport_iopage shared_vmport_iopage_t;
65 #endif
67 #if __XEN_LATEST_INTERFACE_VERSION__ < 0x0003020a
68 static inline uint32_t xen_vcpu_eport(shared_iopage_t *shared_page, int i)
70 return shared_page->vcpu_iodata[i].vp_eport;
72 static inline ioreq_t *xen_vcpu_ioreq(shared_iopage_t *shared_page, int vcpu)
74 return &shared_page->vcpu_iodata[vcpu].vp_ioreq;
76 # define FMT_ioreq_size PRIx64
77 #else
78 static inline uint32_t xen_vcpu_eport(shared_iopage_t *shared_page, int i)
80 return shared_page->vcpu_ioreq[i].vp_eport;
82 static inline ioreq_t *xen_vcpu_ioreq(shared_iopage_t *shared_page, int vcpu)
84 return &shared_page->vcpu_ioreq[vcpu];
86 # define FMT_ioreq_size "u"
87 #endif
89 #define BUFFER_IO_MAX_DELAY 100
90 /* Leave some slack so that hvmloader does not complain about lack of
91 * memory at boot time ("Could not allocate order=0 extent").
92 * Once hvmloader is modified to cope with that situation without
93 * printing warning messages, QEMU_SPARE_PAGES can be removed.
95 #define QEMU_SPARE_PAGES 16
97 typedef struct XenPhysmap {
98 hwaddr start_addr;
99 ram_addr_t size;
100 const char *name;
101 hwaddr phys_offset;
103 QLIST_ENTRY(XenPhysmap) list;
104 } XenPhysmap;
106 typedef struct XenIOState {
107 ioservid_t ioservid;
108 shared_iopage_t *shared_page;
109 shared_vmport_iopage_t *shared_vmport_page;
110 buffered_iopage_t *buffered_io_page;
111 QEMUTimer *buffered_io_timer;
112 CPUState **cpu_by_vcpu_id;
113 /* the evtchn port for polling the notification, */
114 evtchn_port_t *ioreq_local_port;
115 /* evtchn local port for buffered io */
116 evtchn_port_t bufioreq_local_port;
117 /* the evtchn fd for polling */
118 XenEvtchn xce_handle;
119 /* which vcpu we are serving */
120 int send_vcpu;
122 struct xs_handle *xenstore;
123 MemoryListener memory_listener;
124 MemoryListener io_listener;
125 DeviceListener device_listener;
126 QLIST_HEAD(, XenPhysmap) physmap;
127 hwaddr free_phys_offset;
128 const XenPhysmap *log_for_dirtybit;
130 Notifier exit;
131 Notifier suspend;
132 Notifier wakeup;
133 } XenIOState;
135 /* Xen specific function for piix pci */
137 int xen_pci_slot_get_pirq(PCIDevice *pci_dev, int irq_num)
139 return irq_num + ((pci_dev->devfn >> 3) << 2);
142 void xen_piix3_set_irq(void *opaque, int irq_num, int level)
144 xc_hvm_set_pci_intx_level(xen_xc, xen_domid, 0, 0, irq_num >> 2,
145 irq_num & 3, level);
148 void xen_piix_pci_write_config_client(uint32_t address, uint32_t val, int len)
150 int i;
152 /* Scan for updates to PCI link routes (0x60-0x63). */
153 for (i = 0; i < len; i++) {
154 uint8_t v = (val >> (8 * i)) & 0xff;
155 if (v & 0x80) {
156 v = 0;
158 v &= 0xf;
159 if (((address + i) >= 0x60) && ((address + i) <= 0x63)) {
160 xc_hvm_set_pci_link_route(xen_xc, xen_domid, address + i - 0x60, v);
165 void xen_hvm_inject_msi(uint64_t addr, uint32_t data)
167 xen_xc_hvm_inject_msi(xen_xc, xen_domid, addr, data);
170 static void xen_suspend_notifier(Notifier *notifier, void *data)
172 xc_set_hvm_param(xen_xc, xen_domid, HVM_PARAM_ACPI_S_STATE, 3);
175 /* Xen Interrupt Controller */
177 static void xen_set_irq(void *opaque, int irq, int level)
179 xc_hvm_set_isa_irq_level(xen_xc, xen_domid, irq, level);
182 qemu_irq *xen_interrupt_controller_init(void)
184 return qemu_allocate_irqs(xen_set_irq, NULL, 16);
187 /* Memory Ops */
189 static void xen_ram_init(ram_addr_t *below_4g_mem_size,
190 ram_addr_t *above_4g_mem_size,
191 ram_addr_t ram_size, MemoryRegion **ram_memory_p)
193 MemoryRegion *sysmem = get_system_memory();
194 ram_addr_t block_len;
195 uint64_t user_lowmem = object_property_get_int(qdev_get_machine(),
196 PC_MACHINE_MAX_RAM_BELOW_4G,
197 &error_abort);
199 /* Handle the machine opt max-ram-below-4g. It is basically doing
200 * min(xen limit, user limit).
202 if (HVM_BELOW_4G_RAM_END <= user_lowmem) {
203 user_lowmem = HVM_BELOW_4G_RAM_END;
206 if (ram_size >= user_lowmem) {
207 *above_4g_mem_size = ram_size - user_lowmem;
208 *below_4g_mem_size = user_lowmem;
209 } else {
210 *above_4g_mem_size = 0;
211 *below_4g_mem_size = ram_size;
213 if (!*above_4g_mem_size) {
214 block_len = ram_size;
215 } else {
217 * Xen does not allocate the memory continuously, it keeps a
218 * hole of the size computed above or passed in.
220 block_len = (1ULL << 32) + *above_4g_mem_size;
222 memory_region_init_ram(&ram_memory, NULL, "xen.ram", block_len,
223 &error_abort);
224 *ram_memory_p = &ram_memory;
225 vmstate_register_ram_global(&ram_memory);
227 memory_region_init_alias(&ram_640k, NULL, "xen.ram.640k",
228 &ram_memory, 0, 0xa0000);
229 memory_region_add_subregion(sysmem, 0, &ram_640k);
230 /* Skip of the VGA IO memory space, it will be registered later by the VGA
231 * emulated device.
233 * The area between 0xc0000 and 0x100000 will be used by SeaBIOS to load
234 * the Options ROM, so it is registered here as RAM.
236 memory_region_init_alias(&ram_lo, NULL, "xen.ram.lo",
237 &ram_memory, 0xc0000,
238 *below_4g_mem_size - 0xc0000);
239 memory_region_add_subregion(sysmem, 0xc0000, &ram_lo);
240 if (*above_4g_mem_size > 0) {
241 memory_region_init_alias(&ram_hi, NULL, "xen.ram.hi",
242 &ram_memory, 0x100000000ULL,
243 *above_4g_mem_size);
244 memory_region_add_subregion(sysmem, 0x100000000ULL, &ram_hi);
248 void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size, MemoryRegion *mr)
250 unsigned long nr_pfn;
251 xen_pfn_t *pfn_list;
252 int i;
253 xc_domaininfo_t info;
254 unsigned long free_pages;
256 if (runstate_check(RUN_STATE_INMIGRATE)) {
257 /* RAM already populated in Xen */
258 fprintf(stderr, "%s: do not alloc "RAM_ADDR_FMT
259 " bytes of ram at "RAM_ADDR_FMT" when runstate is INMIGRATE\n",
260 __func__, size, ram_addr);
261 return;
264 if (mr == &ram_memory) {
265 return;
268 trace_xen_ram_alloc(ram_addr, size);
270 nr_pfn = size >> TARGET_PAGE_BITS;
271 pfn_list = g_malloc(sizeof (*pfn_list) * nr_pfn);
273 for (i = 0; i < nr_pfn; i++) {
274 pfn_list[i] = (ram_addr >> TARGET_PAGE_BITS) + i;
277 if ((xc_domain_getinfolist(xen_xc, xen_domid, 1, &info) != 1) ||
278 (info.domain != xen_domid)) {
279 hw_error("xc_domain_getinfolist failed");
281 free_pages = info.max_pages - info.tot_pages;
282 if (free_pages > QEMU_SPARE_PAGES) {
283 free_pages -= QEMU_SPARE_PAGES;
284 } else {
285 free_pages = 0;
287 if ((free_pages < nr_pfn) &&
288 (xc_domain_setmaxmem(xen_xc, xen_domid,
289 ((info.max_pages + nr_pfn - free_pages)
290 << (XC_PAGE_SHIFT - 10))) < 0)) {
291 hw_error("xc_domain_setmaxmem failed");
293 if (xc_domain_populate_physmap_exact(xen_xc, xen_domid, nr_pfn, 0, 0, pfn_list)) {
294 hw_error("xen: failed to populate ram at " RAM_ADDR_FMT, ram_addr);
297 g_free(pfn_list);
300 static XenPhysmap *get_physmapping(XenIOState *state,
301 hwaddr start_addr, ram_addr_t size)
303 XenPhysmap *physmap = NULL;
305 start_addr &= TARGET_PAGE_MASK;
307 QLIST_FOREACH(physmap, &state->physmap, list) {
308 if (range_covers_byte(physmap->start_addr, physmap->size, start_addr)) {
309 return physmap;
312 return NULL;
315 static hwaddr xen_phys_offset_to_gaddr(hwaddr start_addr,
316 ram_addr_t size, void *opaque)
318 hwaddr addr = start_addr & TARGET_PAGE_MASK;
319 XenIOState *xen_io_state = opaque;
320 XenPhysmap *physmap = NULL;
322 QLIST_FOREACH(physmap, &xen_io_state->physmap, list) {
323 if (range_covers_byte(physmap->phys_offset, physmap->size, addr)) {
324 return physmap->start_addr;
328 return start_addr;
331 #if CONFIG_XEN_CTRL_INTERFACE_VERSION >= 340
332 static int xen_add_to_physmap(XenIOState *state,
333 hwaddr start_addr,
334 ram_addr_t size,
335 MemoryRegion *mr,
336 hwaddr offset_within_region)
338 unsigned long i = 0;
339 int rc = 0;
340 XenPhysmap *physmap = NULL;
341 hwaddr pfn, start_gpfn;
342 hwaddr phys_offset = memory_region_get_ram_addr(mr);
343 char path[80], value[17];
344 const char *mr_name;
346 if (get_physmapping(state, start_addr, size)) {
347 return 0;
349 if (size <= 0) {
350 return -1;
353 /* Xen can only handle a single dirty log region for now and we want
354 * the linear framebuffer to be that region.
355 * Avoid tracking any regions that is not videoram and avoid tracking
356 * the legacy vga region. */
357 if (mr == framebuffer && start_addr > 0xbffff) {
358 goto go_physmap;
360 return -1;
362 go_physmap:
363 DPRINTF("mapping vram to %"HWADDR_PRIx" - %"HWADDR_PRIx"\n",
364 start_addr, start_addr + size);
366 pfn = phys_offset >> TARGET_PAGE_BITS;
367 start_gpfn = start_addr >> TARGET_PAGE_BITS;
368 for (i = 0; i < size >> TARGET_PAGE_BITS; i++) {
369 unsigned long idx = pfn + i;
370 xen_pfn_t gpfn = start_gpfn + i;
372 rc = xc_domain_add_to_physmap(xen_xc, xen_domid, XENMAPSPACE_gmfn, idx, gpfn);
373 if (rc) {
374 DPRINTF("add_to_physmap MFN %"PRI_xen_pfn" to PFN %"
375 PRI_xen_pfn" failed: %d\n", idx, gpfn, rc);
376 return -rc;
380 mr_name = memory_region_name(mr);
382 physmap = g_malloc(sizeof (XenPhysmap));
384 physmap->start_addr = start_addr;
385 physmap->size = size;
386 physmap->name = mr_name;
387 physmap->phys_offset = phys_offset;
389 QLIST_INSERT_HEAD(&state->physmap, physmap, list);
391 xc_domain_pin_memory_cacheattr(xen_xc, xen_domid,
392 start_addr >> TARGET_PAGE_BITS,
393 (start_addr + size - 1) >> TARGET_PAGE_BITS,
394 XEN_DOMCTL_MEM_CACHEATTR_WB);
396 snprintf(path, sizeof(path),
397 "/local/domain/0/device-model/%d/physmap/%"PRIx64"/start_addr",
398 xen_domid, (uint64_t)phys_offset);
399 snprintf(value, sizeof(value), "%"PRIx64, (uint64_t)start_addr);
400 if (!xs_write(state->xenstore, 0, path, value, strlen(value))) {
401 return -1;
403 snprintf(path, sizeof(path),
404 "/local/domain/0/device-model/%d/physmap/%"PRIx64"/size",
405 xen_domid, (uint64_t)phys_offset);
406 snprintf(value, sizeof(value), "%"PRIx64, (uint64_t)size);
407 if (!xs_write(state->xenstore, 0, path, value, strlen(value))) {
408 return -1;
410 if (mr_name) {
411 snprintf(path, sizeof(path),
412 "/local/domain/0/device-model/%d/physmap/%"PRIx64"/name",
413 xen_domid, (uint64_t)phys_offset);
414 if (!xs_write(state->xenstore, 0, path, mr_name, strlen(mr_name))) {
415 return -1;
419 return 0;
422 static int xen_remove_from_physmap(XenIOState *state,
423 hwaddr start_addr,
424 ram_addr_t size)
426 unsigned long i = 0;
427 int rc = 0;
428 XenPhysmap *physmap = NULL;
429 hwaddr phys_offset = 0;
431 physmap = get_physmapping(state, start_addr, size);
432 if (physmap == NULL) {
433 return -1;
436 phys_offset = physmap->phys_offset;
437 size = physmap->size;
439 DPRINTF("unmapping vram to %"HWADDR_PRIx" - %"HWADDR_PRIx", at "
440 "%"HWADDR_PRIx"\n", start_addr, start_addr + size, phys_offset);
442 size >>= TARGET_PAGE_BITS;
443 start_addr >>= TARGET_PAGE_BITS;
444 phys_offset >>= TARGET_PAGE_BITS;
445 for (i = 0; i < size; i++) {
446 xen_pfn_t idx = start_addr + i;
447 xen_pfn_t gpfn = phys_offset + i;
449 rc = xc_domain_add_to_physmap(xen_xc, xen_domid, XENMAPSPACE_gmfn, idx, gpfn);
450 if (rc) {
451 fprintf(stderr, "add_to_physmap MFN %"PRI_xen_pfn" to PFN %"
452 PRI_xen_pfn" failed: %d\n", idx, gpfn, rc);
453 return -rc;
457 QLIST_REMOVE(physmap, list);
458 if (state->log_for_dirtybit == physmap) {
459 state->log_for_dirtybit = NULL;
461 g_free(physmap);
463 return 0;
466 #else
467 static int xen_add_to_physmap(XenIOState *state,
468 hwaddr start_addr,
469 ram_addr_t size,
470 MemoryRegion *mr,
471 hwaddr offset_within_region)
473 return -ENOSYS;
476 static int xen_remove_from_physmap(XenIOState *state,
477 hwaddr start_addr,
478 ram_addr_t size)
480 return -ENOSYS;
482 #endif
484 static void xen_set_memory(struct MemoryListener *listener,
485 MemoryRegionSection *section,
486 bool add)
488 XenIOState *state = container_of(listener, XenIOState, memory_listener);
489 hwaddr start_addr = section->offset_within_address_space;
490 ram_addr_t size = int128_get64(section->size);
491 bool log_dirty = memory_region_is_logging(section->mr);
492 hvmmem_type_t mem_type;
494 if (section->mr == &ram_memory) {
495 return;
496 } else {
497 if (add) {
498 xen_map_memory_section(xen_xc, xen_domid, state->ioservid,
499 section);
500 } else {
501 xen_unmap_memory_section(xen_xc, xen_domid, state->ioservid,
502 section);
506 if (!memory_region_is_ram(section->mr)) {
507 return;
510 if (log_dirty != add) {
511 return;
514 trace_xen_client_set_memory(start_addr, size, log_dirty);
516 start_addr &= TARGET_PAGE_MASK;
517 size = TARGET_PAGE_ALIGN(size);
519 if (add) {
520 if (!memory_region_is_rom(section->mr)) {
521 xen_add_to_physmap(state, start_addr, size,
522 section->mr, section->offset_within_region);
523 } else {
524 mem_type = HVMMEM_ram_ro;
525 if (xc_hvm_set_mem_type(xen_xc, xen_domid, mem_type,
526 start_addr >> TARGET_PAGE_BITS,
527 size >> TARGET_PAGE_BITS)) {
528 DPRINTF("xc_hvm_set_mem_type error, addr: "TARGET_FMT_plx"\n",
529 start_addr);
532 } else {
533 if (xen_remove_from_physmap(state, start_addr, size) < 0) {
534 DPRINTF("physmapping does not exist at "TARGET_FMT_plx"\n", start_addr);
539 static void xen_region_add(MemoryListener *listener,
540 MemoryRegionSection *section)
542 memory_region_ref(section->mr);
543 xen_set_memory(listener, section, true);
546 static void xen_region_del(MemoryListener *listener,
547 MemoryRegionSection *section)
549 xen_set_memory(listener, section, false);
550 memory_region_unref(section->mr);
553 static void xen_io_add(MemoryListener *listener,
554 MemoryRegionSection *section)
556 XenIOState *state = container_of(listener, XenIOState, io_listener);
558 memory_region_ref(section->mr);
560 xen_map_io_section(xen_xc, xen_domid, state->ioservid, section);
563 static void xen_io_del(MemoryListener *listener,
564 MemoryRegionSection *section)
566 XenIOState *state = container_of(listener, XenIOState, io_listener);
568 xen_unmap_io_section(xen_xc, xen_domid, state->ioservid, section);
570 memory_region_unref(section->mr);
573 static void xen_device_realize(DeviceListener *listener,
574 DeviceState *dev)
576 XenIOState *state = container_of(listener, XenIOState, device_listener);
578 if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) {
579 PCIDevice *pci_dev = PCI_DEVICE(dev);
581 xen_map_pcidev(xen_xc, xen_domid, state->ioservid, pci_dev);
585 static void xen_device_unrealize(DeviceListener *listener,
586 DeviceState *dev)
588 XenIOState *state = container_of(listener, XenIOState, device_listener);
590 if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) {
591 PCIDevice *pci_dev = PCI_DEVICE(dev);
593 xen_unmap_pcidev(xen_xc, xen_domid, state->ioservid, pci_dev);
597 static void xen_sync_dirty_bitmap(XenIOState *state,
598 hwaddr start_addr,
599 ram_addr_t size)
601 hwaddr npages = size >> TARGET_PAGE_BITS;
602 const int width = sizeof(unsigned long) * 8;
603 unsigned long bitmap[(npages + width - 1) / width];
604 int rc, i, j;
605 const XenPhysmap *physmap = NULL;
607 physmap = get_physmapping(state, start_addr, size);
608 if (physmap == NULL) {
609 /* not handled */
610 return;
613 if (state->log_for_dirtybit == NULL) {
614 state->log_for_dirtybit = physmap;
615 } else if (state->log_for_dirtybit != physmap) {
616 /* Only one range for dirty bitmap can be tracked. */
617 return;
620 rc = xc_hvm_track_dirty_vram(xen_xc, xen_domid,
621 start_addr >> TARGET_PAGE_BITS, npages,
622 bitmap);
623 if (rc < 0) {
624 #ifndef ENODATA
625 #define ENODATA ENOENT
626 #endif
627 if (errno == ENODATA) {
628 memory_region_set_dirty(framebuffer, 0, size);
629 DPRINTF("xen: track_dirty_vram failed (0x" TARGET_FMT_plx
630 ", 0x" TARGET_FMT_plx "): %s\n",
631 start_addr, start_addr + size, strerror(errno));
633 return;
636 for (i = 0; i < ARRAY_SIZE(bitmap); i++) {
637 unsigned long map = bitmap[i];
638 while (map != 0) {
639 j = ctzl(map);
640 map &= ~(1ul << j);
641 memory_region_set_dirty(framebuffer,
642 (i * width + j) * TARGET_PAGE_SIZE,
643 TARGET_PAGE_SIZE);
648 static void xen_log_start(MemoryListener *listener,
649 MemoryRegionSection *section)
651 XenIOState *state = container_of(listener, XenIOState, memory_listener);
653 xen_sync_dirty_bitmap(state, section->offset_within_address_space,
654 int128_get64(section->size));
657 static void xen_log_stop(MemoryListener *listener, MemoryRegionSection *section)
659 XenIOState *state = container_of(listener, XenIOState, memory_listener);
661 state->log_for_dirtybit = NULL;
662 /* Disable dirty bit tracking */
663 xc_hvm_track_dirty_vram(xen_xc, xen_domid, 0, 0, NULL);
666 static void xen_log_sync(MemoryListener *listener, MemoryRegionSection *section)
668 XenIOState *state = container_of(listener, XenIOState, memory_listener);
670 xen_sync_dirty_bitmap(state, section->offset_within_address_space,
671 int128_get64(section->size));
674 static void xen_log_global_start(MemoryListener *listener)
676 if (xen_enabled()) {
677 xen_in_migration = true;
681 static void xen_log_global_stop(MemoryListener *listener)
683 xen_in_migration = false;
686 static MemoryListener xen_memory_listener = {
687 .region_add = xen_region_add,
688 .region_del = xen_region_del,
689 .log_start = xen_log_start,
690 .log_stop = xen_log_stop,
691 .log_sync = xen_log_sync,
692 .log_global_start = xen_log_global_start,
693 .log_global_stop = xen_log_global_stop,
694 .priority = 10,
697 static MemoryListener xen_io_listener = {
698 .region_add = xen_io_add,
699 .region_del = xen_io_del,
700 .priority = 10,
703 static DeviceListener xen_device_listener = {
704 .realize = xen_device_realize,
705 .unrealize = xen_device_unrealize,
708 /* get the ioreq packets from share mem */
709 static ioreq_t *cpu_get_ioreq_from_shared_memory(XenIOState *state, int vcpu)
711 ioreq_t *req = xen_vcpu_ioreq(state->shared_page, vcpu);
713 if (req->state != STATE_IOREQ_READY) {
714 DPRINTF("I/O request not ready: "
715 "%x, ptr: %x, port: %"PRIx64", "
716 "data: %"PRIx64", count: %" FMT_ioreq_size ", size: %" FMT_ioreq_size "\n",
717 req->state, req->data_is_ptr, req->addr,
718 req->data, req->count, req->size);
719 return NULL;
722 xen_rmb(); /* see IOREQ_READY /then/ read contents of ioreq */
724 req->state = STATE_IOREQ_INPROCESS;
725 return req;
728 /* use poll to get the port notification */
729 /* ioreq_vec--out,the */
730 /* retval--the number of ioreq packet */
731 static ioreq_t *cpu_get_ioreq(XenIOState *state)
733 int i;
734 evtchn_port_t port;
736 port = xc_evtchn_pending(state->xce_handle);
737 if (port == state->bufioreq_local_port) {
738 timer_mod(state->buffered_io_timer,
739 BUFFER_IO_MAX_DELAY + qemu_clock_get_ms(QEMU_CLOCK_REALTIME));
740 return NULL;
743 if (port != -1) {
744 for (i = 0; i < max_cpus; i++) {
745 if (state->ioreq_local_port[i] == port) {
746 break;
750 if (i == max_cpus) {
751 hw_error("Fatal error while trying to get io event!\n");
754 /* unmask the wanted port again */
755 xc_evtchn_unmask(state->xce_handle, port);
757 /* get the io packet from shared memory */
758 state->send_vcpu = i;
759 return cpu_get_ioreq_from_shared_memory(state, i);
762 /* read error or read nothing */
763 return NULL;
766 static uint32_t do_inp(pio_addr_t addr, unsigned long size)
768 switch (size) {
769 case 1:
770 return cpu_inb(addr);
771 case 2:
772 return cpu_inw(addr);
773 case 4:
774 return cpu_inl(addr);
775 default:
776 hw_error("inp: bad size: %04"FMT_pioaddr" %lx", addr, size);
780 static void do_outp(pio_addr_t addr,
781 unsigned long size, uint32_t val)
783 switch (size) {
784 case 1:
785 return cpu_outb(addr, val);
786 case 2:
787 return cpu_outw(addr, val);
788 case 4:
789 return cpu_outl(addr, val);
790 default:
791 hw_error("outp: bad size: %04"FMT_pioaddr" %lx", addr, size);
796 * Helper functions which read/write an object from/to physical guest
797 * memory, as part of the implementation of an ioreq.
799 * Equivalent to
800 * cpu_physical_memory_rw(addr + (req->df ? -1 : +1) * req->size * i,
801 * val, req->size, 0/1)
802 * except without the integer overflow problems.
804 static void rw_phys_req_item(hwaddr addr,
805 ioreq_t *req, uint32_t i, void *val, int rw)
807 /* Do everything unsigned so overflow just results in a truncated result
808 * and accesses to undesired parts of guest memory, which is up
809 * to the guest */
810 hwaddr offset = (hwaddr)req->size * i;
811 if (req->df) {
812 addr -= offset;
813 } else {
814 addr += offset;
816 cpu_physical_memory_rw(addr, val, req->size, rw);
819 static inline void read_phys_req_item(hwaddr addr,
820 ioreq_t *req, uint32_t i, void *val)
822 rw_phys_req_item(addr, req, i, val, 0);
824 static inline void write_phys_req_item(hwaddr addr,
825 ioreq_t *req, uint32_t i, void *val)
827 rw_phys_req_item(addr, req, i, val, 1);
831 static void cpu_ioreq_pio(ioreq_t *req)
833 uint32_t i;
835 if (req->dir == IOREQ_READ) {
836 if (!req->data_is_ptr) {
837 req->data = do_inp(req->addr, req->size);
838 } else {
839 uint32_t tmp;
841 for (i = 0; i < req->count; i++) {
842 tmp = do_inp(req->addr, req->size);
843 write_phys_req_item(req->data, req, i, &tmp);
846 } else if (req->dir == IOREQ_WRITE) {
847 if (!req->data_is_ptr) {
848 do_outp(req->addr, req->size, req->data);
849 } else {
850 for (i = 0; i < req->count; i++) {
851 uint32_t tmp = 0;
853 read_phys_req_item(req->data, req, i, &tmp);
854 do_outp(req->addr, req->size, tmp);
860 static void cpu_ioreq_move(ioreq_t *req)
862 uint32_t i;
864 if (!req->data_is_ptr) {
865 if (req->dir == IOREQ_READ) {
866 for (i = 0; i < req->count; i++) {
867 read_phys_req_item(req->addr, req, i, &req->data);
869 } else if (req->dir == IOREQ_WRITE) {
870 for (i = 0; i < req->count; i++) {
871 write_phys_req_item(req->addr, req, i, &req->data);
874 } else {
875 uint64_t tmp;
877 if (req->dir == IOREQ_READ) {
878 for (i = 0; i < req->count; i++) {
879 read_phys_req_item(req->addr, req, i, &tmp);
880 write_phys_req_item(req->data, req, i, &tmp);
882 } else if (req->dir == IOREQ_WRITE) {
883 for (i = 0; i < req->count; i++) {
884 read_phys_req_item(req->data, req, i, &tmp);
885 write_phys_req_item(req->addr, req, i, &tmp);
891 static void regs_to_cpu(vmware_regs_t *vmport_regs, ioreq_t *req)
893 X86CPU *cpu;
894 CPUX86State *env;
896 cpu = X86_CPU(current_cpu);
897 env = &cpu->env;
898 env->regs[R_EAX] = req->data;
899 env->regs[R_EBX] = vmport_regs->ebx;
900 env->regs[R_ECX] = vmport_regs->ecx;
901 env->regs[R_EDX] = vmport_regs->edx;
902 env->regs[R_ESI] = vmport_regs->esi;
903 env->regs[R_EDI] = vmport_regs->edi;
906 static void regs_from_cpu(vmware_regs_t *vmport_regs)
908 X86CPU *cpu = X86_CPU(current_cpu);
909 CPUX86State *env = &cpu->env;
911 vmport_regs->ebx = env->regs[R_EBX];
912 vmport_regs->ecx = env->regs[R_ECX];
913 vmport_regs->edx = env->regs[R_EDX];
914 vmport_regs->esi = env->regs[R_ESI];
915 vmport_regs->edi = env->regs[R_EDI];
918 static void handle_vmport_ioreq(XenIOState *state, ioreq_t *req)
920 vmware_regs_t *vmport_regs;
922 assert(state->shared_vmport_page);
923 vmport_regs =
924 &state->shared_vmport_page->vcpu_vmport_regs[state->send_vcpu];
925 QEMU_BUILD_BUG_ON(sizeof(*req) < sizeof(*vmport_regs));
927 current_cpu = state->cpu_by_vcpu_id[state->send_vcpu];
928 regs_to_cpu(vmport_regs, req);
929 cpu_ioreq_pio(req);
930 regs_from_cpu(vmport_regs);
931 current_cpu = NULL;
934 static void handle_ioreq(XenIOState *state, ioreq_t *req)
936 if (!req->data_is_ptr && (req->dir == IOREQ_WRITE) &&
937 (req->size < sizeof (target_ulong))) {
938 req->data &= ((target_ulong) 1 << (8 * req->size)) - 1;
941 switch (req->type) {
942 case IOREQ_TYPE_PIO:
943 cpu_ioreq_pio(req);
944 break;
945 case IOREQ_TYPE_COPY:
946 cpu_ioreq_move(req);
947 break;
948 case IOREQ_TYPE_VMWARE_PORT:
949 handle_vmport_ioreq(state, req);
950 break;
951 case IOREQ_TYPE_TIMEOFFSET:
952 break;
953 case IOREQ_TYPE_INVALIDATE:
954 xen_invalidate_map_cache();
955 break;
956 case IOREQ_TYPE_PCI_CONFIG: {
957 uint32_t sbdf = req->addr >> 32;
958 uint32_t val;
960 /* Fake a write to port 0xCF8 so that
961 * the config space access will target the
962 * correct device model.
964 val = (1u << 31) |
965 ((req->addr & 0x0f00) << 16) |
966 ((sbdf & 0xffff) << 8) |
967 (req->addr & 0xfc);
968 do_outp(0xcf8, 4, val);
970 /* Now issue the config space access via
971 * port 0xCFC
973 req->addr = 0xcfc | (req->addr & 0x03);
974 cpu_ioreq_pio(req);
975 break;
977 default:
978 hw_error("Invalid ioreq type 0x%x\n", req->type);
982 static int handle_buffered_iopage(XenIOState *state)
984 buf_ioreq_t *buf_req = NULL;
985 ioreq_t req;
986 int qw;
988 if (!state->buffered_io_page) {
989 return 0;
992 memset(&req, 0x00, sizeof(req));
994 while (state->buffered_io_page->read_pointer != state->buffered_io_page->write_pointer) {
995 buf_req = &state->buffered_io_page->buf_ioreq[
996 state->buffered_io_page->read_pointer % IOREQ_BUFFER_SLOT_NUM];
997 req.size = 1UL << buf_req->size;
998 req.count = 1;
999 req.addr = buf_req->addr;
1000 req.data = buf_req->data;
1001 req.state = STATE_IOREQ_READY;
1002 req.dir = buf_req->dir;
1003 req.df = 1;
1004 req.type = buf_req->type;
1005 req.data_is_ptr = 0;
1006 qw = (req.size == 8);
1007 if (qw) {
1008 buf_req = &state->buffered_io_page->buf_ioreq[
1009 (state->buffered_io_page->read_pointer + 1) % IOREQ_BUFFER_SLOT_NUM];
1010 req.data |= ((uint64_t)buf_req->data) << 32;
1013 handle_ioreq(state, &req);
1015 xen_mb();
1016 state->buffered_io_page->read_pointer += qw ? 2 : 1;
1019 return req.count;
1022 static void handle_buffered_io(void *opaque)
1024 XenIOState *state = opaque;
1026 if (handle_buffered_iopage(state)) {
1027 timer_mod(state->buffered_io_timer,
1028 BUFFER_IO_MAX_DELAY + qemu_clock_get_ms(QEMU_CLOCK_REALTIME));
1029 } else {
1030 timer_del(state->buffered_io_timer);
1031 xc_evtchn_unmask(state->xce_handle, state->bufioreq_local_port);
1035 static void cpu_handle_ioreq(void *opaque)
1037 XenIOState *state = opaque;
1038 ioreq_t *req = cpu_get_ioreq(state);
1040 handle_buffered_iopage(state);
1041 if (req) {
1042 handle_ioreq(state, req);
1044 if (req->state != STATE_IOREQ_INPROCESS) {
1045 fprintf(stderr, "Badness in I/O request ... not in service?!: "
1046 "%x, ptr: %x, port: %"PRIx64", "
1047 "data: %"PRIx64", count: %" FMT_ioreq_size
1048 ", size: %" FMT_ioreq_size
1049 ", type: %"FMT_ioreq_size"\n",
1050 req->state, req->data_is_ptr, req->addr,
1051 req->data, req->count, req->size, req->type);
1052 destroy_hvm_domain(false);
1053 return;
1056 xen_wmb(); /* Update ioreq contents /then/ update state. */
1059 * We do this before we send the response so that the tools
1060 * have the opportunity to pick up on the reset before the
1061 * guest resumes and does a hlt with interrupts disabled which
1062 * causes Xen to powerdown the domain.
1064 if (runstate_is_running()) {
1065 if (qemu_shutdown_requested_get()) {
1066 destroy_hvm_domain(false);
1068 if (qemu_reset_requested_get()) {
1069 qemu_system_reset(VMRESET_REPORT);
1070 destroy_hvm_domain(true);
1074 req->state = STATE_IORESP_READY;
1075 xc_evtchn_notify(state->xce_handle, state->ioreq_local_port[state->send_vcpu]);
1079 static void xen_main_loop_prepare(XenIOState *state)
1081 int evtchn_fd = -1;
1083 if (state->xce_handle != XC_HANDLER_INITIAL_VALUE) {
1084 evtchn_fd = xc_evtchn_fd(state->xce_handle);
1087 state->buffered_io_timer = timer_new_ms(QEMU_CLOCK_REALTIME, handle_buffered_io,
1088 state);
1090 if (evtchn_fd != -1) {
1091 CPUState *cpu_state;
1093 DPRINTF("%s: Init cpu_by_vcpu_id\n", __func__);
1094 CPU_FOREACH(cpu_state) {
1095 DPRINTF("%s: cpu_by_vcpu_id[%d]=%p\n",
1096 __func__, cpu_state->cpu_index, cpu_state);
1097 state->cpu_by_vcpu_id[cpu_state->cpu_index] = cpu_state;
1099 qemu_set_fd_handler(evtchn_fd, cpu_handle_ioreq, NULL, state);
1104 static void xen_hvm_change_state_handler(void *opaque, int running,
1105 RunState rstate)
1107 XenIOState *state = opaque;
1109 if (running) {
1110 xen_main_loop_prepare(state);
1113 xen_set_ioreq_server_state(xen_xc, xen_domid,
1114 state->ioservid,
1115 (rstate == RUN_STATE_RUNNING));
1118 static void xen_exit_notifier(Notifier *n, void *data)
1120 XenIOState *state = container_of(n, XenIOState, exit);
1122 xc_evtchn_close(state->xce_handle);
1123 xs_daemon_close(state->xenstore);
1126 static void xen_read_physmap(XenIOState *state)
1128 XenPhysmap *physmap = NULL;
1129 unsigned int len, num, i;
1130 char path[80], *value = NULL;
1131 char **entries = NULL;
1133 snprintf(path, sizeof(path),
1134 "/local/domain/0/device-model/%d/physmap", xen_domid);
1135 entries = xs_directory(state->xenstore, 0, path, &num);
1136 if (entries == NULL)
1137 return;
1139 for (i = 0; i < num; i++) {
1140 physmap = g_malloc(sizeof (XenPhysmap));
1141 physmap->phys_offset = strtoull(entries[i], NULL, 16);
1142 snprintf(path, sizeof(path),
1143 "/local/domain/0/device-model/%d/physmap/%s/start_addr",
1144 xen_domid, entries[i]);
1145 value = xs_read(state->xenstore, 0, path, &len);
1146 if (value == NULL) {
1147 g_free(physmap);
1148 continue;
1150 physmap->start_addr = strtoull(value, NULL, 16);
1151 free(value);
1153 snprintf(path, sizeof(path),
1154 "/local/domain/0/device-model/%d/physmap/%s/size",
1155 xen_domid, entries[i]);
1156 value = xs_read(state->xenstore, 0, path, &len);
1157 if (value == NULL) {
1158 g_free(physmap);
1159 continue;
1161 physmap->size = strtoull(value, NULL, 16);
1162 free(value);
1164 snprintf(path, sizeof(path),
1165 "/local/domain/0/device-model/%d/physmap/%s/name",
1166 xen_domid, entries[i]);
1167 physmap->name = xs_read(state->xenstore, 0, path, &len);
1169 QLIST_INSERT_HEAD(&state->physmap, physmap, list);
1171 free(entries);
1174 static void xen_wakeup_notifier(Notifier *notifier, void *data)
1176 xc_set_hvm_param(xen_xc, xen_domid, HVM_PARAM_ACPI_S_STATE, 0);
1179 /* return 0 means OK, or -1 means critical issue -- will exit(1) */
1180 int xen_hvm_init(ram_addr_t *below_4g_mem_size, ram_addr_t *above_4g_mem_size,
1181 MemoryRegion **ram_memory)
1183 int i, rc;
1184 xen_pfn_t ioreq_pfn;
1185 xen_pfn_t bufioreq_pfn;
1186 evtchn_port_t bufioreq_evtchn;
1187 XenIOState *state;
1189 state = g_malloc0(sizeof (XenIOState));
1191 state->xce_handle = xen_xc_evtchn_open(NULL, 0);
1192 if (state->xce_handle == XC_HANDLER_INITIAL_VALUE) {
1193 perror("xen: event channel open");
1194 return -1;
1197 state->xenstore = xs_daemon_open();
1198 if (state->xenstore == NULL) {
1199 perror("xen: xenstore open");
1200 return -1;
1203 rc = xen_create_ioreq_server(xen_xc, xen_domid, &state->ioservid);
1204 if (rc < 0) {
1205 perror("xen: ioreq server create");
1206 return -1;
1209 state->exit.notify = xen_exit_notifier;
1210 qemu_add_exit_notifier(&state->exit);
1212 state->suspend.notify = xen_suspend_notifier;
1213 qemu_register_suspend_notifier(&state->suspend);
1215 state->wakeup.notify = xen_wakeup_notifier;
1216 qemu_register_wakeup_notifier(&state->wakeup);
1218 rc = xen_get_ioreq_server_info(xen_xc, xen_domid, state->ioservid,
1219 &ioreq_pfn, &bufioreq_pfn,
1220 &bufioreq_evtchn);
1221 if (rc < 0) {
1222 hw_error("failed to get ioreq server info: error %d handle=" XC_INTERFACE_FMT,
1223 errno, xen_xc);
1226 DPRINTF("shared page at pfn %lx\n", ioreq_pfn);
1227 DPRINTF("buffered io page at pfn %lx\n", bufioreq_pfn);
1228 DPRINTF("buffered io evtchn is %x\n", bufioreq_evtchn);
1230 state->shared_page = xc_map_foreign_range(xen_xc, xen_domid, XC_PAGE_SIZE,
1231 PROT_READ|PROT_WRITE, ioreq_pfn);
1232 if (state->shared_page == NULL) {
1233 hw_error("map shared IO page returned error %d handle=" XC_INTERFACE_FMT,
1234 errno, xen_xc);
1237 rc = xen_get_vmport_regs_pfn(xen_xc, xen_domid, &ioreq_pfn);
1238 if (!rc) {
1239 DPRINTF("shared vmport page at pfn %lx\n", ioreq_pfn);
1240 state->shared_vmport_page =
1241 xc_map_foreign_range(xen_xc, xen_domid, XC_PAGE_SIZE,
1242 PROT_READ|PROT_WRITE, ioreq_pfn);
1243 if (state->shared_vmport_page == NULL) {
1244 hw_error("map shared vmport IO page returned error %d handle="
1245 XC_INTERFACE_FMT, errno, xen_xc);
1247 } else if (rc != -ENOSYS) {
1248 hw_error("get vmport regs pfn returned error %d, rc=%d", errno, rc);
1251 state->buffered_io_page = xc_map_foreign_range(xen_xc, xen_domid,
1252 XC_PAGE_SIZE,
1253 PROT_READ|PROT_WRITE,
1254 bufioreq_pfn);
1255 if (state->buffered_io_page == NULL) {
1256 hw_error("map buffered IO page returned error %d", errno);
1259 /* Note: cpus is empty at this point in init */
1260 state->cpu_by_vcpu_id = g_malloc0(max_cpus * sizeof(CPUState *));
1262 rc = xen_set_ioreq_server_state(xen_xc, xen_domid, state->ioservid, true);
1263 if (rc < 0) {
1264 hw_error("failed to enable ioreq server info: error %d handle=" XC_INTERFACE_FMT,
1265 errno, xen_xc);
1268 state->ioreq_local_port = g_malloc0(max_cpus * sizeof (evtchn_port_t));
1270 /* FIXME: how about if we overflow the page here? */
1271 for (i = 0; i < max_cpus; i++) {
1272 rc = xc_evtchn_bind_interdomain(state->xce_handle, xen_domid,
1273 xen_vcpu_eport(state->shared_page, i));
1274 if (rc == -1) {
1275 fprintf(stderr, "shared evtchn %d bind error %d\n", i, errno);
1276 return -1;
1278 state->ioreq_local_port[i] = rc;
1281 rc = xc_evtchn_bind_interdomain(state->xce_handle, xen_domid,
1282 bufioreq_evtchn);
1283 if (rc == -1) {
1284 fprintf(stderr, "buffered evtchn bind error %d\n", errno);
1285 return -1;
1287 state->bufioreq_local_port = rc;
1289 /* Init RAM management */
1290 xen_map_cache_init(xen_phys_offset_to_gaddr, state);
1291 xen_ram_init(below_4g_mem_size, above_4g_mem_size, ram_size, ram_memory);
1293 qemu_add_vm_change_state_handler(xen_hvm_change_state_handler, state);
1295 state->memory_listener = xen_memory_listener;
1296 QLIST_INIT(&state->physmap);
1297 memory_listener_register(&state->memory_listener, &address_space_memory);
1298 state->log_for_dirtybit = NULL;
1300 state->io_listener = xen_io_listener;
1301 memory_listener_register(&state->io_listener, &address_space_io);
1303 state->device_listener = xen_device_listener;
1304 device_listener_register(&state->device_listener);
1306 /* Initialize backend core & drivers */
1307 if (xen_be_init() != 0) {
1308 fprintf(stderr, "%s: xen backend core setup failed\n", __FUNCTION__);
1309 return -1;
1311 xen_be_register("console", &xen_console_ops);
1312 xen_be_register("vkbd", &xen_kbdmouse_ops);
1313 xen_be_register("qdisk", &xen_blkdev_ops);
1314 xen_read_physmap(state);
1316 return 0;
1319 void destroy_hvm_domain(bool reboot)
1321 XenXC xc_handle;
1322 int sts;
1324 xc_handle = xen_xc_interface_open(0, 0, 0);
1325 if (xc_handle == XC_HANDLER_INITIAL_VALUE) {
1326 fprintf(stderr, "Cannot acquire xenctrl handle\n");
1327 } else {
1328 sts = xc_domain_shutdown(xc_handle, xen_domid,
1329 reboot ? SHUTDOWN_reboot : SHUTDOWN_poweroff);
1330 if (sts != 0) {
1331 fprintf(stderr, "xc_domain_shutdown failed to issue %s, "
1332 "sts %d, %s\n", reboot ? "reboot" : "poweroff",
1333 sts, strerror(errno));
1334 } else {
1335 fprintf(stderr, "Issued domain %d %s\n", xen_domid,
1336 reboot ? "reboot" : "poweroff");
1338 xc_interface_close(xc_handle);
1342 void xen_register_framebuffer(MemoryRegion *mr)
1344 framebuffer = mr;
1347 void xen_shutdown_fatal_error(const char *fmt, ...)
1349 va_list ap;
1351 va_start(ap, fmt);
1352 vfprintf(stderr, fmt, ap);
1353 va_end(ap);
1354 fprintf(stderr, "Will destroy the domain.\n");
1355 /* destroy the domain */
1356 qemu_system_shutdown_request();
1359 void xen_modified_memory(ram_addr_t start, ram_addr_t length)
1361 if (unlikely(xen_in_migration)) {
1362 int rc;
1363 ram_addr_t start_pfn, nb_pages;
1365 if (length == 0) {
1366 length = TARGET_PAGE_SIZE;
1368 start_pfn = start >> TARGET_PAGE_BITS;
1369 nb_pages = ((start + length + TARGET_PAGE_SIZE - 1) >> TARGET_PAGE_BITS)
1370 - start_pfn;
1371 rc = xc_hvm_modified_memory(xen_xc, xen_domid, start_pfn, nb_pages);
1372 if (rc) {
1373 fprintf(stderr,
1374 "%s failed for "RAM_ADDR_FMT" ("RAM_ADDR_FMT"): %i, %s\n",
1375 __func__, start, nb_pages, rc, strerror(-rc));
1380 void qmp_xen_set_global_dirty_log(bool enable, Error **errp)
1382 if (enable) {
1383 memory_global_dirty_log_start();
1384 } else {
1385 memory_global_dirty_log_stop();