qcow2: Fix BDRV_O_INACTIVE handling in qcow2_invalidate_cache()
[qemu/ar7.git] / xen-hvm.c
blob2a9339062a94d381f5b8808a98eba90e750c341c
1 /*
2 * Copyright (C) 2010 Citrix Ltd.
4 * This work is licensed under the terms of the GNU GPL, version 2. See
5 * the COPYING file in the top-level directory.
7 * Contributions after 2012-01-13 are licensed under the terms of the
8 * GNU GPL, version 2 or (at your option) any later version.
9 */
11 #include <sys/mman.h>
13 #include "hw/pci/pci.h"
14 #include "hw/i386/pc.h"
15 #include "hw/xen/xen_common.h"
16 #include "hw/xen/xen_backend.h"
17 #include "qmp-commands.h"
19 #include "sysemu/char.h"
20 #include "qemu/range.h"
21 #include "sysemu/xen-mapcache.h"
22 #include "trace.h"
23 #include "exec/address-spaces.h"
25 #include <xen/hvm/ioreq.h>
26 #include <xen/hvm/params.h>
27 #include <xen/hvm/e820.h>
29 //#define DEBUG_XEN_HVM
31 #ifdef DEBUG_XEN_HVM
32 #define DPRINTF(fmt, ...) \
33 do { fprintf(stderr, "xen: " fmt, ## __VA_ARGS__); } while (0)
34 #else
35 #define DPRINTF(fmt, ...) \
36 do { } while (0)
37 #endif
39 static MemoryRegion ram_memory, ram_640k, ram_lo, ram_hi;
40 static MemoryRegion *framebuffer;
41 static bool xen_in_migration;
43 /* Compatibility with older version */
45 /* This allows QEMU to build on a system that has Xen 4.5 or earlier
46 * installed. This here (not in hw/xen/xen_common.h) because xen/hvm/ioreq.h
47 * needs to be included before this block and hw/xen/xen_common.h needs to
48 * be included before xen/hvm/ioreq.h
50 #ifndef IOREQ_TYPE_VMWARE_PORT
51 #define IOREQ_TYPE_VMWARE_PORT 3
52 struct vmware_regs {
53 uint32_t esi;
54 uint32_t edi;
55 uint32_t ebx;
56 uint32_t ecx;
57 uint32_t edx;
59 typedef struct vmware_regs vmware_regs_t;
61 struct shared_vmport_iopage {
62 struct vmware_regs vcpu_vmport_regs[1];
64 typedef struct shared_vmport_iopage shared_vmport_iopage_t;
65 #endif
67 #if __XEN_LATEST_INTERFACE_VERSION__ < 0x0003020a
68 static inline uint32_t xen_vcpu_eport(shared_iopage_t *shared_page, int i)
70 return shared_page->vcpu_iodata[i].vp_eport;
72 static inline ioreq_t *xen_vcpu_ioreq(shared_iopage_t *shared_page, int vcpu)
74 return &shared_page->vcpu_iodata[vcpu].vp_ioreq;
76 # define FMT_ioreq_size PRIx64
77 #else
78 static inline uint32_t xen_vcpu_eport(shared_iopage_t *shared_page, int i)
80 return shared_page->vcpu_ioreq[i].vp_eport;
82 static inline ioreq_t *xen_vcpu_ioreq(shared_iopage_t *shared_page, int vcpu)
84 return &shared_page->vcpu_ioreq[vcpu];
86 # define FMT_ioreq_size "u"
87 #endif
89 #define BUFFER_IO_MAX_DELAY 100
91 typedef struct XenPhysmap {
92 hwaddr start_addr;
93 ram_addr_t size;
94 const char *name;
95 hwaddr phys_offset;
97 QLIST_ENTRY(XenPhysmap) list;
98 } XenPhysmap;
100 typedef struct XenIOState {
101 ioservid_t ioservid;
102 shared_iopage_t *shared_page;
103 shared_vmport_iopage_t *shared_vmport_page;
104 buffered_iopage_t *buffered_io_page;
105 QEMUTimer *buffered_io_timer;
106 CPUState **cpu_by_vcpu_id;
107 /* the evtchn port for polling the notification, */
108 evtchn_port_t *ioreq_local_port;
109 /* evtchn local port for buffered io */
110 evtchn_port_t bufioreq_local_port;
111 /* the evtchn fd for polling */
112 XenEvtchn xce_handle;
113 /* which vcpu we are serving */
114 int send_vcpu;
116 struct xs_handle *xenstore;
117 MemoryListener memory_listener;
118 MemoryListener io_listener;
119 DeviceListener device_listener;
120 QLIST_HEAD(, XenPhysmap) physmap;
121 hwaddr free_phys_offset;
122 const XenPhysmap *log_for_dirtybit;
124 Notifier exit;
125 Notifier suspend;
126 Notifier wakeup;
127 } XenIOState;
129 /* Xen specific function for piix pci */
131 int xen_pci_slot_get_pirq(PCIDevice *pci_dev, int irq_num)
133 return irq_num + ((pci_dev->devfn >> 3) << 2);
136 void xen_piix3_set_irq(void *opaque, int irq_num, int level)
138 xc_hvm_set_pci_intx_level(xen_xc, xen_domid, 0, 0, irq_num >> 2,
139 irq_num & 3, level);
142 void xen_piix_pci_write_config_client(uint32_t address, uint32_t val, int len)
144 int i;
146 /* Scan for updates to PCI link routes (0x60-0x63). */
147 for (i = 0; i < len; i++) {
148 uint8_t v = (val >> (8 * i)) & 0xff;
149 if (v & 0x80) {
150 v = 0;
152 v &= 0xf;
153 if (((address + i) >= 0x60) && ((address + i) <= 0x63)) {
154 xc_hvm_set_pci_link_route(xen_xc, xen_domid, address + i - 0x60, v);
159 void xen_hvm_inject_msi(uint64_t addr, uint32_t data)
161 xen_xc_hvm_inject_msi(xen_xc, xen_domid, addr, data);
164 static void xen_suspend_notifier(Notifier *notifier, void *data)
166 xc_set_hvm_param(xen_xc, xen_domid, HVM_PARAM_ACPI_S_STATE, 3);
169 /* Xen Interrupt Controller */
171 static void xen_set_irq(void *opaque, int irq, int level)
173 xc_hvm_set_isa_irq_level(xen_xc, xen_domid, irq, level);
176 qemu_irq *xen_interrupt_controller_init(void)
178 return qemu_allocate_irqs(xen_set_irq, NULL, 16);
181 /* Memory Ops */
183 static void xen_ram_init(PCMachineState *pcms,
184 ram_addr_t ram_size, MemoryRegion **ram_memory_p)
186 MemoryRegion *sysmem = get_system_memory();
187 ram_addr_t block_len;
188 uint64_t user_lowmem = object_property_get_int(qdev_get_machine(),
189 PC_MACHINE_MAX_RAM_BELOW_4G,
190 &error_abort);
192 /* Handle the machine opt max-ram-below-4g. It is basically doing
193 * min(xen limit, user limit).
195 if (HVM_BELOW_4G_RAM_END <= user_lowmem) {
196 user_lowmem = HVM_BELOW_4G_RAM_END;
199 if (ram_size >= user_lowmem) {
200 pcms->above_4g_mem_size = ram_size - user_lowmem;
201 pcms->below_4g_mem_size = user_lowmem;
202 } else {
203 pcms->above_4g_mem_size = 0;
204 pcms->below_4g_mem_size = ram_size;
206 if (!pcms->above_4g_mem_size) {
207 block_len = ram_size;
208 } else {
210 * Xen does not allocate the memory continuously, it keeps a
211 * hole of the size computed above or passed in.
213 block_len = (1ULL << 32) + pcms->above_4g_mem_size;
215 memory_region_init_ram(&ram_memory, NULL, "xen.ram", block_len,
216 &error_fatal);
217 *ram_memory_p = &ram_memory;
218 vmstate_register_ram_global(&ram_memory);
220 memory_region_init_alias(&ram_640k, NULL, "xen.ram.640k",
221 &ram_memory, 0, 0xa0000);
222 memory_region_add_subregion(sysmem, 0, &ram_640k);
223 /* Skip of the VGA IO memory space, it will be registered later by the VGA
224 * emulated device.
226 * The area between 0xc0000 and 0x100000 will be used by SeaBIOS to load
227 * the Options ROM, so it is registered here as RAM.
229 memory_region_init_alias(&ram_lo, NULL, "xen.ram.lo",
230 &ram_memory, 0xc0000,
231 pcms->below_4g_mem_size - 0xc0000);
232 memory_region_add_subregion(sysmem, 0xc0000, &ram_lo);
233 if (pcms->above_4g_mem_size > 0) {
234 memory_region_init_alias(&ram_hi, NULL, "xen.ram.hi",
235 &ram_memory, 0x100000000ULL,
236 pcms->above_4g_mem_size);
237 memory_region_add_subregion(sysmem, 0x100000000ULL, &ram_hi);
241 void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size, MemoryRegion *mr)
243 /* FIXME caller ram_block_add() wants error_setg() on failure */
244 unsigned long nr_pfn;
245 xen_pfn_t *pfn_list;
246 int i;
248 if (runstate_check(RUN_STATE_INMIGRATE)) {
249 /* RAM already populated in Xen */
250 fprintf(stderr, "%s: do not alloc "RAM_ADDR_FMT
251 " bytes of ram at "RAM_ADDR_FMT" when runstate is INMIGRATE\n",
252 __func__, size, ram_addr);
253 return;
256 if (mr == &ram_memory) {
257 return;
260 trace_xen_ram_alloc(ram_addr, size);
262 nr_pfn = size >> TARGET_PAGE_BITS;
263 pfn_list = g_malloc(sizeof (*pfn_list) * nr_pfn);
265 for (i = 0; i < nr_pfn; i++) {
266 pfn_list[i] = (ram_addr >> TARGET_PAGE_BITS) + i;
269 if (xc_domain_populate_physmap_exact(xen_xc, xen_domid, nr_pfn, 0, 0, pfn_list)) {
270 hw_error("xen: failed to populate ram at " RAM_ADDR_FMT, ram_addr);
273 g_free(pfn_list);
276 static XenPhysmap *get_physmapping(XenIOState *state,
277 hwaddr start_addr, ram_addr_t size)
279 XenPhysmap *physmap = NULL;
281 start_addr &= TARGET_PAGE_MASK;
283 QLIST_FOREACH(physmap, &state->physmap, list) {
284 if (range_covers_byte(physmap->start_addr, physmap->size, start_addr)) {
285 return physmap;
288 return NULL;
291 static hwaddr xen_phys_offset_to_gaddr(hwaddr start_addr,
292 ram_addr_t size, void *opaque)
294 hwaddr addr = start_addr & TARGET_PAGE_MASK;
295 XenIOState *xen_io_state = opaque;
296 XenPhysmap *physmap = NULL;
298 QLIST_FOREACH(physmap, &xen_io_state->physmap, list) {
299 if (range_covers_byte(physmap->phys_offset, physmap->size, addr)) {
300 return physmap->start_addr;
304 return start_addr;
307 #if CONFIG_XEN_CTRL_INTERFACE_VERSION >= 340
308 static int xen_add_to_physmap(XenIOState *state,
309 hwaddr start_addr,
310 ram_addr_t size,
311 MemoryRegion *mr,
312 hwaddr offset_within_region)
314 unsigned long i = 0;
315 int rc = 0;
316 XenPhysmap *physmap = NULL;
317 hwaddr pfn, start_gpfn;
318 hwaddr phys_offset = memory_region_get_ram_addr(mr);
319 char path[80], value[17];
320 const char *mr_name;
322 if (get_physmapping(state, start_addr, size)) {
323 return 0;
325 if (size <= 0) {
326 return -1;
329 /* Xen can only handle a single dirty log region for now and we want
330 * the linear framebuffer to be that region.
331 * Avoid tracking any regions that is not videoram and avoid tracking
332 * the legacy vga region. */
333 if (mr == framebuffer && start_addr > 0xbffff) {
334 goto go_physmap;
336 return -1;
338 go_physmap:
339 DPRINTF("mapping vram to %"HWADDR_PRIx" - %"HWADDR_PRIx"\n",
340 start_addr, start_addr + size);
342 pfn = phys_offset >> TARGET_PAGE_BITS;
343 start_gpfn = start_addr >> TARGET_PAGE_BITS;
344 for (i = 0; i < size >> TARGET_PAGE_BITS; i++) {
345 unsigned long idx = pfn + i;
346 xen_pfn_t gpfn = start_gpfn + i;
348 rc = xen_xc_domain_add_to_physmap(xen_xc, xen_domid, XENMAPSPACE_gmfn, idx, gpfn);
349 if (rc) {
350 DPRINTF("add_to_physmap MFN %"PRI_xen_pfn" to PFN %"
351 PRI_xen_pfn" failed: %d (errno: %d)\n", idx, gpfn, rc, errno);
352 return -rc;
356 mr_name = memory_region_name(mr);
358 physmap = g_malloc(sizeof (XenPhysmap));
360 physmap->start_addr = start_addr;
361 physmap->size = size;
362 physmap->name = mr_name;
363 physmap->phys_offset = phys_offset;
365 QLIST_INSERT_HEAD(&state->physmap, physmap, list);
367 xc_domain_pin_memory_cacheattr(xen_xc, xen_domid,
368 start_addr >> TARGET_PAGE_BITS,
369 (start_addr + size - 1) >> TARGET_PAGE_BITS,
370 XEN_DOMCTL_MEM_CACHEATTR_WB);
372 snprintf(path, sizeof(path),
373 "/local/domain/0/device-model/%d/physmap/%"PRIx64"/start_addr",
374 xen_domid, (uint64_t)phys_offset);
375 snprintf(value, sizeof(value), "%"PRIx64, (uint64_t)start_addr);
376 if (!xs_write(state->xenstore, 0, path, value, strlen(value))) {
377 return -1;
379 snprintf(path, sizeof(path),
380 "/local/domain/0/device-model/%d/physmap/%"PRIx64"/size",
381 xen_domid, (uint64_t)phys_offset);
382 snprintf(value, sizeof(value), "%"PRIx64, (uint64_t)size);
383 if (!xs_write(state->xenstore, 0, path, value, strlen(value))) {
384 return -1;
386 if (mr_name) {
387 snprintf(path, sizeof(path),
388 "/local/domain/0/device-model/%d/physmap/%"PRIx64"/name",
389 xen_domid, (uint64_t)phys_offset);
390 if (!xs_write(state->xenstore, 0, path, mr_name, strlen(mr_name))) {
391 return -1;
395 return 0;
398 static int xen_remove_from_physmap(XenIOState *state,
399 hwaddr start_addr,
400 ram_addr_t size)
402 unsigned long i = 0;
403 int rc = 0;
404 XenPhysmap *physmap = NULL;
405 hwaddr phys_offset = 0;
407 physmap = get_physmapping(state, start_addr, size);
408 if (physmap == NULL) {
409 return -1;
412 phys_offset = physmap->phys_offset;
413 size = physmap->size;
415 DPRINTF("unmapping vram to %"HWADDR_PRIx" - %"HWADDR_PRIx", at "
416 "%"HWADDR_PRIx"\n", start_addr, start_addr + size, phys_offset);
418 size >>= TARGET_PAGE_BITS;
419 start_addr >>= TARGET_PAGE_BITS;
420 phys_offset >>= TARGET_PAGE_BITS;
421 for (i = 0; i < size; i++) {
422 xen_pfn_t idx = start_addr + i;
423 xen_pfn_t gpfn = phys_offset + i;
425 rc = xen_xc_domain_add_to_physmap(xen_xc, xen_domid, XENMAPSPACE_gmfn, idx, gpfn);
426 if (rc) {
427 fprintf(stderr, "add_to_physmap MFN %"PRI_xen_pfn" to PFN %"
428 PRI_xen_pfn" failed: %d (errno: %d)\n", idx, gpfn, rc, errno);
429 return -rc;
433 QLIST_REMOVE(physmap, list);
434 if (state->log_for_dirtybit == physmap) {
435 state->log_for_dirtybit = NULL;
437 g_free(physmap);
439 return 0;
442 #else
443 static int xen_add_to_physmap(XenIOState *state,
444 hwaddr start_addr,
445 ram_addr_t size,
446 MemoryRegion *mr,
447 hwaddr offset_within_region)
449 return -ENOSYS;
452 static int xen_remove_from_physmap(XenIOState *state,
453 hwaddr start_addr,
454 ram_addr_t size)
456 return -ENOSYS;
458 #endif
460 static void xen_set_memory(struct MemoryListener *listener,
461 MemoryRegionSection *section,
462 bool add)
464 XenIOState *state = container_of(listener, XenIOState, memory_listener);
465 hwaddr start_addr = section->offset_within_address_space;
466 ram_addr_t size = int128_get64(section->size);
467 bool log_dirty = memory_region_is_logging(section->mr, DIRTY_MEMORY_VGA);
468 hvmmem_type_t mem_type;
470 if (section->mr == &ram_memory) {
471 return;
472 } else {
473 if (add) {
474 xen_map_memory_section(xen_xc, xen_domid, state->ioservid,
475 section);
476 } else {
477 xen_unmap_memory_section(xen_xc, xen_domid, state->ioservid,
478 section);
482 if (!memory_region_is_ram(section->mr)) {
483 return;
486 if (log_dirty != add) {
487 return;
490 trace_xen_client_set_memory(start_addr, size, log_dirty);
492 start_addr &= TARGET_PAGE_MASK;
493 size = TARGET_PAGE_ALIGN(size);
495 if (add) {
496 if (!memory_region_is_rom(section->mr)) {
497 xen_add_to_physmap(state, start_addr, size,
498 section->mr, section->offset_within_region);
499 } else {
500 mem_type = HVMMEM_ram_ro;
501 if (xc_hvm_set_mem_type(xen_xc, xen_domid, mem_type,
502 start_addr >> TARGET_PAGE_BITS,
503 size >> TARGET_PAGE_BITS)) {
504 DPRINTF("xc_hvm_set_mem_type error, addr: "TARGET_FMT_plx"\n",
505 start_addr);
508 } else {
509 if (xen_remove_from_physmap(state, start_addr, size) < 0) {
510 DPRINTF("physmapping does not exist at "TARGET_FMT_plx"\n", start_addr);
515 static void xen_region_add(MemoryListener *listener,
516 MemoryRegionSection *section)
518 memory_region_ref(section->mr);
519 xen_set_memory(listener, section, true);
522 static void xen_region_del(MemoryListener *listener,
523 MemoryRegionSection *section)
525 xen_set_memory(listener, section, false);
526 memory_region_unref(section->mr);
529 static void xen_io_add(MemoryListener *listener,
530 MemoryRegionSection *section)
532 XenIOState *state = container_of(listener, XenIOState, io_listener);
534 memory_region_ref(section->mr);
536 xen_map_io_section(xen_xc, xen_domid, state->ioservid, section);
539 static void xen_io_del(MemoryListener *listener,
540 MemoryRegionSection *section)
542 XenIOState *state = container_of(listener, XenIOState, io_listener);
544 xen_unmap_io_section(xen_xc, xen_domid, state->ioservid, section);
546 memory_region_unref(section->mr);
549 static void xen_device_realize(DeviceListener *listener,
550 DeviceState *dev)
552 XenIOState *state = container_of(listener, XenIOState, device_listener);
554 if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) {
555 PCIDevice *pci_dev = PCI_DEVICE(dev);
557 xen_map_pcidev(xen_xc, xen_domid, state->ioservid, pci_dev);
561 static void xen_device_unrealize(DeviceListener *listener,
562 DeviceState *dev)
564 XenIOState *state = container_of(listener, XenIOState, device_listener);
566 if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) {
567 PCIDevice *pci_dev = PCI_DEVICE(dev);
569 xen_unmap_pcidev(xen_xc, xen_domid, state->ioservid, pci_dev);
573 static void xen_sync_dirty_bitmap(XenIOState *state,
574 hwaddr start_addr,
575 ram_addr_t size)
577 hwaddr npages = size >> TARGET_PAGE_BITS;
578 const int width = sizeof(unsigned long) * 8;
579 unsigned long bitmap[(npages + width - 1) / width];
580 int rc, i, j;
581 const XenPhysmap *physmap = NULL;
583 physmap = get_physmapping(state, start_addr, size);
584 if (physmap == NULL) {
585 /* not handled */
586 return;
589 if (state->log_for_dirtybit == NULL) {
590 state->log_for_dirtybit = physmap;
591 } else if (state->log_for_dirtybit != physmap) {
592 /* Only one range for dirty bitmap can be tracked. */
593 return;
596 rc = xc_hvm_track_dirty_vram(xen_xc, xen_domid,
597 start_addr >> TARGET_PAGE_BITS, npages,
598 bitmap);
599 if (rc < 0) {
600 #ifndef ENODATA
601 #define ENODATA ENOENT
602 #endif
603 if (errno == ENODATA) {
604 memory_region_set_dirty(framebuffer, 0, size);
605 DPRINTF("xen: track_dirty_vram failed (0x" TARGET_FMT_plx
606 ", 0x" TARGET_FMT_plx "): %s\n",
607 start_addr, start_addr + size, strerror(errno));
609 return;
612 for (i = 0; i < ARRAY_SIZE(bitmap); i++) {
613 unsigned long map = bitmap[i];
614 while (map != 0) {
615 j = ctzl(map);
616 map &= ~(1ul << j);
617 memory_region_set_dirty(framebuffer,
618 (i * width + j) * TARGET_PAGE_SIZE,
619 TARGET_PAGE_SIZE);
624 static void xen_log_start(MemoryListener *listener,
625 MemoryRegionSection *section,
626 int old, int new)
628 XenIOState *state = container_of(listener, XenIOState, memory_listener);
630 if (new & ~old & (1 << DIRTY_MEMORY_VGA)) {
631 xen_sync_dirty_bitmap(state, section->offset_within_address_space,
632 int128_get64(section->size));
636 static void xen_log_stop(MemoryListener *listener, MemoryRegionSection *section,
637 int old, int new)
639 XenIOState *state = container_of(listener, XenIOState, memory_listener);
641 if (old & ~new & (1 << DIRTY_MEMORY_VGA)) {
642 state->log_for_dirtybit = NULL;
643 /* Disable dirty bit tracking */
644 xc_hvm_track_dirty_vram(xen_xc, xen_domid, 0, 0, NULL);
648 static void xen_log_sync(MemoryListener *listener, MemoryRegionSection *section)
650 XenIOState *state = container_of(listener, XenIOState, memory_listener);
652 xen_sync_dirty_bitmap(state, section->offset_within_address_space,
653 int128_get64(section->size));
656 static void xen_log_global_start(MemoryListener *listener)
658 if (xen_enabled()) {
659 xen_in_migration = true;
663 static void xen_log_global_stop(MemoryListener *listener)
665 xen_in_migration = false;
668 static MemoryListener xen_memory_listener = {
669 .region_add = xen_region_add,
670 .region_del = xen_region_del,
671 .log_start = xen_log_start,
672 .log_stop = xen_log_stop,
673 .log_sync = xen_log_sync,
674 .log_global_start = xen_log_global_start,
675 .log_global_stop = xen_log_global_stop,
676 .priority = 10,
679 static MemoryListener xen_io_listener = {
680 .region_add = xen_io_add,
681 .region_del = xen_io_del,
682 .priority = 10,
685 static DeviceListener xen_device_listener = {
686 .realize = xen_device_realize,
687 .unrealize = xen_device_unrealize,
690 /* get the ioreq packets from share mem */
691 static ioreq_t *cpu_get_ioreq_from_shared_memory(XenIOState *state, int vcpu)
693 ioreq_t *req = xen_vcpu_ioreq(state->shared_page, vcpu);
695 if (req->state != STATE_IOREQ_READY) {
696 DPRINTF("I/O request not ready: "
697 "%x, ptr: %x, port: %"PRIx64", "
698 "data: %"PRIx64", count: %" FMT_ioreq_size ", size: %" FMT_ioreq_size "\n",
699 req->state, req->data_is_ptr, req->addr,
700 req->data, req->count, req->size);
701 return NULL;
704 xen_rmb(); /* see IOREQ_READY /then/ read contents of ioreq */
706 req->state = STATE_IOREQ_INPROCESS;
707 return req;
710 /* use poll to get the port notification */
711 /* ioreq_vec--out,the */
712 /* retval--the number of ioreq packet */
713 static ioreq_t *cpu_get_ioreq(XenIOState *state)
715 int i;
716 evtchn_port_t port;
718 port = xc_evtchn_pending(state->xce_handle);
719 if (port == state->bufioreq_local_port) {
720 timer_mod(state->buffered_io_timer,
721 BUFFER_IO_MAX_DELAY + qemu_clock_get_ms(QEMU_CLOCK_REALTIME));
722 return NULL;
725 if (port != -1) {
726 for (i = 0; i < max_cpus; i++) {
727 if (state->ioreq_local_port[i] == port) {
728 break;
732 if (i == max_cpus) {
733 hw_error("Fatal error while trying to get io event!\n");
736 /* unmask the wanted port again */
737 xc_evtchn_unmask(state->xce_handle, port);
739 /* get the io packet from shared memory */
740 state->send_vcpu = i;
741 return cpu_get_ioreq_from_shared_memory(state, i);
744 /* read error or read nothing */
745 return NULL;
748 static uint32_t do_inp(pio_addr_t addr, unsigned long size)
750 switch (size) {
751 case 1:
752 return cpu_inb(addr);
753 case 2:
754 return cpu_inw(addr);
755 case 4:
756 return cpu_inl(addr);
757 default:
758 hw_error("inp: bad size: %04"FMT_pioaddr" %lx", addr, size);
762 static void do_outp(pio_addr_t addr,
763 unsigned long size, uint32_t val)
765 switch (size) {
766 case 1:
767 return cpu_outb(addr, val);
768 case 2:
769 return cpu_outw(addr, val);
770 case 4:
771 return cpu_outl(addr, val);
772 default:
773 hw_error("outp: bad size: %04"FMT_pioaddr" %lx", addr, size);
778 * Helper functions which read/write an object from/to physical guest
779 * memory, as part of the implementation of an ioreq.
781 * Equivalent to
782 * cpu_physical_memory_rw(addr + (req->df ? -1 : +1) * req->size * i,
783 * val, req->size, 0/1)
784 * except without the integer overflow problems.
786 static void rw_phys_req_item(hwaddr addr,
787 ioreq_t *req, uint32_t i, void *val, int rw)
789 /* Do everything unsigned so overflow just results in a truncated result
790 * and accesses to undesired parts of guest memory, which is up
791 * to the guest */
792 hwaddr offset = (hwaddr)req->size * i;
793 if (req->df) {
794 addr -= offset;
795 } else {
796 addr += offset;
798 cpu_physical_memory_rw(addr, val, req->size, rw);
801 static inline void read_phys_req_item(hwaddr addr,
802 ioreq_t *req, uint32_t i, void *val)
804 rw_phys_req_item(addr, req, i, val, 0);
806 static inline void write_phys_req_item(hwaddr addr,
807 ioreq_t *req, uint32_t i, void *val)
809 rw_phys_req_item(addr, req, i, val, 1);
813 static void cpu_ioreq_pio(ioreq_t *req)
815 uint32_t i;
817 trace_cpu_ioreq_pio(req, req->dir, req->df, req->data_is_ptr, req->addr,
818 req->data, req->count, req->size);
820 if (req->dir == IOREQ_READ) {
821 if (!req->data_is_ptr) {
822 req->data = do_inp(req->addr, req->size);
823 trace_cpu_ioreq_pio_read_reg(req, req->data, req->addr,
824 req->size);
825 } else {
826 uint32_t tmp;
828 for (i = 0; i < req->count; i++) {
829 tmp = do_inp(req->addr, req->size);
830 write_phys_req_item(req->data, req, i, &tmp);
833 } else if (req->dir == IOREQ_WRITE) {
834 if (!req->data_is_ptr) {
835 trace_cpu_ioreq_pio_write_reg(req, req->data, req->addr,
836 req->size);
837 do_outp(req->addr, req->size, req->data);
838 } else {
839 for (i = 0; i < req->count; i++) {
840 uint32_t tmp = 0;
842 read_phys_req_item(req->data, req, i, &tmp);
843 do_outp(req->addr, req->size, tmp);
849 static void cpu_ioreq_move(ioreq_t *req)
851 uint32_t i;
853 trace_cpu_ioreq_move(req, req->dir, req->df, req->data_is_ptr, req->addr,
854 req->data, req->count, req->size);
856 if (!req->data_is_ptr) {
857 if (req->dir == IOREQ_READ) {
858 for (i = 0; i < req->count; i++) {
859 read_phys_req_item(req->addr, req, i, &req->data);
861 } else if (req->dir == IOREQ_WRITE) {
862 for (i = 0; i < req->count; i++) {
863 write_phys_req_item(req->addr, req, i, &req->data);
866 } else {
867 uint64_t tmp;
869 if (req->dir == IOREQ_READ) {
870 for (i = 0; i < req->count; i++) {
871 read_phys_req_item(req->addr, req, i, &tmp);
872 write_phys_req_item(req->data, req, i, &tmp);
874 } else if (req->dir == IOREQ_WRITE) {
875 for (i = 0; i < req->count; i++) {
876 read_phys_req_item(req->data, req, i, &tmp);
877 write_phys_req_item(req->addr, req, i, &tmp);
883 static void regs_to_cpu(vmware_regs_t *vmport_regs, ioreq_t *req)
885 X86CPU *cpu;
886 CPUX86State *env;
888 cpu = X86_CPU(current_cpu);
889 env = &cpu->env;
890 env->regs[R_EAX] = req->data;
891 env->regs[R_EBX] = vmport_regs->ebx;
892 env->regs[R_ECX] = vmport_regs->ecx;
893 env->regs[R_EDX] = vmport_regs->edx;
894 env->regs[R_ESI] = vmport_regs->esi;
895 env->regs[R_EDI] = vmport_regs->edi;
898 static void regs_from_cpu(vmware_regs_t *vmport_regs)
900 X86CPU *cpu = X86_CPU(current_cpu);
901 CPUX86State *env = &cpu->env;
903 vmport_regs->ebx = env->regs[R_EBX];
904 vmport_regs->ecx = env->regs[R_ECX];
905 vmport_regs->edx = env->regs[R_EDX];
906 vmport_regs->esi = env->regs[R_ESI];
907 vmport_regs->edi = env->regs[R_EDI];
910 static void handle_vmport_ioreq(XenIOState *state, ioreq_t *req)
912 vmware_regs_t *vmport_regs;
914 assert(state->shared_vmport_page);
915 vmport_regs =
916 &state->shared_vmport_page->vcpu_vmport_regs[state->send_vcpu];
917 QEMU_BUILD_BUG_ON(sizeof(*req) < sizeof(*vmport_regs));
919 current_cpu = state->cpu_by_vcpu_id[state->send_vcpu];
920 regs_to_cpu(vmport_regs, req);
921 cpu_ioreq_pio(req);
922 regs_from_cpu(vmport_regs);
923 current_cpu = NULL;
926 static void handle_ioreq(XenIOState *state, ioreq_t *req)
928 trace_handle_ioreq(req, req->type, req->dir, req->df, req->data_is_ptr,
929 req->addr, req->data, req->count, req->size);
931 if (!req->data_is_ptr && (req->dir == IOREQ_WRITE) &&
932 (req->size < sizeof (target_ulong))) {
933 req->data &= ((target_ulong) 1 << (8 * req->size)) - 1;
936 if (req->dir == IOREQ_WRITE)
937 trace_handle_ioreq_write(req, req->type, req->df, req->data_is_ptr,
938 req->addr, req->data, req->count, req->size);
940 switch (req->type) {
941 case IOREQ_TYPE_PIO:
942 cpu_ioreq_pio(req);
943 break;
944 case IOREQ_TYPE_COPY:
945 cpu_ioreq_move(req);
946 break;
947 case IOREQ_TYPE_VMWARE_PORT:
948 handle_vmport_ioreq(state, req);
949 break;
950 case IOREQ_TYPE_TIMEOFFSET:
951 break;
952 case IOREQ_TYPE_INVALIDATE:
953 xen_invalidate_map_cache();
954 break;
955 case IOREQ_TYPE_PCI_CONFIG: {
956 uint32_t sbdf = req->addr >> 32;
957 uint32_t val;
959 /* Fake a write to port 0xCF8 so that
960 * the config space access will target the
961 * correct device model.
963 val = (1u << 31) |
964 ((req->addr & 0x0f00) << 16) |
965 ((sbdf & 0xffff) << 8) |
966 (req->addr & 0xfc);
967 do_outp(0xcf8, 4, val);
969 /* Now issue the config space access via
970 * port 0xCFC
972 req->addr = 0xcfc | (req->addr & 0x03);
973 cpu_ioreq_pio(req);
974 break;
976 default:
977 hw_error("Invalid ioreq type 0x%x\n", req->type);
979 if (req->dir == IOREQ_READ) {
980 trace_handle_ioreq_read(req, req->type, req->df, req->data_is_ptr,
981 req->addr, req->data, req->count, req->size);
985 static int handle_buffered_iopage(XenIOState *state)
987 buffered_iopage_t *buf_page = state->buffered_io_page;
988 buf_ioreq_t *buf_req = NULL;
989 ioreq_t req;
990 int qw;
992 if (!buf_page) {
993 return 0;
996 memset(&req, 0x00, sizeof(req));
998 for (;;) {
999 uint32_t rdptr = buf_page->read_pointer, wrptr;
1001 xen_rmb();
1002 wrptr = buf_page->write_pointer;
1003 xen_rmb();
1004 if (rdptr != buf_page->read_pointer) {
1005 continue;
1007 if (rdptr == wrptr) {
1008 break;
1010 buf_req = &buf_page->buf_ioreq[rdptr % IOREQ_BUFFER_SLOT_NUM];
1011 req.size = 1UL << buf_req->size;
1012 req.count = 1;
1013 req.addr = buf_req->addr;
1014 req.data = buf_req->data;
1015 req.state = STATE_IOREQ_READY;
1016 req.dir = buf_req->dir;
1017 req.df = 1;
1018 req.type = buf_req->type;
1019 req.data_is_ptr = 0;
1020 qw = (req.size == 8);
1021 if (qw) {
1022 buf_req = &buf_page->buf_ioreq[(rdptr + 1) %
1023 IOREQ_BUFFER_SLOT_NUM];
1024 req.data |= ((uint64_t)buf_req->data) << 32;
1027 handle_ioreq(state, &req);
1029 atomic_add(&buf_page->read_pointer, qw + 1);
1032 return req.count;
1035 static void handle_buffered_io(void *opaque)
1037 XenIOState *state = opaque;
1039 if (handle_buffered_iopage(state)) {
1040 timer_mod(state->buffered_io_timer,
1041 BUFFER_IO_MAX_DELAY + qemu_clock_get_ms(QEMU_CLOCK_REALTIME));
1042 } else {
1043 timer_del(state->buffered_io_timer);
1044 xc_evtchn_unmask(state->xce_handle, state->bufioreq_local_port);
1048 static void cpu_handle_ioreq(void *opaque)
1050 XenIOState *state = opaque;
1051 ioreq_t *req = cpu_get_ioreq(state);
1053 handle_buffered_iopage(state);
1054 if (req) {
1055 handle_ioreq(state, req);
1057 if (req->state != STATE_IOREQ_INPROCESS) {
1058 fprintf(stderr, "Badness in I/O request ... not in service?!: "
1059 "%x, ptr: %x, port: %"PRIx64", "
1060 "data: %"PRIx64", count: %" FMT_ioreq_size
1061 ", size: %" FMT_ioreq_size
1062 ", type: %"FMT_ioreq_size"\n",
1063 req->state, req->data_is_ptr, req->addr,
1064 req->data, req->count, req->size, req->type);
1065 destroy_hvm_domain(false);
1066 return;
1069 xen_wmb(); /* Update ioreq contents /then/ update state. */
1072 * We do this before we send the response so that the tools
1073 * have the opportunity to pick up on the reset before the
1074 * guest resumes and does a hlt with interrupts disabled which
1075 * causes Xen to powerdown the domain.
1077 if (runstate_is_running()) {
1078 if (qemu_shutdown_requested_get()) {
1079 destroy_hvm_domain(false);
1081 if (qemu_reset_requested_get()) {
1082 qemu_system_reset(VMRESET_REPORT);
1083 destroy_hvm_domain(true);
1087 req->state = STATE_IORESP_READY;
1088 xc_evtchn_notify(state->xce_handle, state->ioreq_local_port[state->send_vcpu]);
1092 static void xen_main_loop_prepare(XenIOState *state)
1094 int evtchn_fd = -1;
1096 if (state->xce_handle != XC_HANDLER_INITIAL_VALUE) {
1097 evtchn_fd = xc_evtchn_fd(state->xce_handle);
1100 state->buffered_io_timer = timer_new_ms(QEMU_CLOCK_REALTIME, handle_buffered_io,
1101 state);
1103 if (evtchn_fd != -1) {
1104 CPUState *cpu_state;
1106 DPRINTF("%s: Init cpu_by_vcpu_id\n", __func__);
1107 CPU_FOREACH(cpu_state) {
1108 DPRINTF("%s: cpu_by_vcpu_id[%d]=%p\n",
1109 __func__, cpu_state->cpu_index, cpu_state);
1110 state->cpu_by_vcpu_id[cpu_state->cpu_index] = cpu_state;
1112 qemu_set_fd_handler(evtchn_fd, cpu_handle_ioreq, NULL, state);
1117 static void xen_hvm_change_state_handler(void *opaque, int running,
1118 RunState rstate)
1120 XenIOState *state = opaque;
1122 if (running) {
1123 xen_main_loop_prepare(state);
1126 xen_set_ioreq_server_state(xen_xc, xen_domid,
1127 state->ioservid,
1128 (rstate == RUN_STATE_RUNNING));
1131 static void xen_exit_notifier(Notifier *n, void *data)
1133 XenIOState *state = container_of(n, XenIOState, exit);
1135 xc_evtchn_close(state->xce_handle);
1136 xs_daemon_close(state->xenstore);
1139 static void xen_read_physmap(XenIOState *state)
1141 XenPhysmap *physmap = NULL;
1142 unsigned int len, num, i;
1143 char path[80], *value = NULL;
1144 char **entries = NULL;
1146 snprintf(path, sizeof(path),
1147 "/local/domain/0/device-model/%d/physmap", xen_domid);
1148 entries = xs_directory(state->xenstore, 0, path, &num);
1149 if (entries == NULL)
1150 return;
1152 for (i = 0; i < num; i++) {
1153 physmap = g_malloc(sizeof (XenPhysmap));
1154 physmap->phys_offset = strtoull(entries[i], NULL, 16);
1155 snprintf(path, sizeof(path),
1156 "/local/domain/0/device-model/%d/physmap/%s/start_addr",
1157 xen_domid, entries[i]);
1158 value = xs_read(state->xenstore, 0, path, &len);
1159 if (value == NULL) {
1160 g_free(physmap);
1161 continue;
1163 physmap->start_addr = strtoull(value, NULL, 16);
1164 free(value);
1166 snprintf(path, sizeof(path),
1167 "/local/domain/0/device-model/%d/physmap/%s/size",
1168 xen_domid, entries[i]);
1169 value = xs_read(state->xenstore, 0, path, &len);
1170 if (value == NULL) {
1171 g_free(physmap);
1172 continue;
1174 physmap->size = strtoull(value, NULL, 16);
1175 free(value);
1177 snprintf(path, sizeof(path),
1178 "/local/domain/0/device-model/%d/physmap/%s/name",
1179 xen_domid, entries[i]);
1180 physmap->name = xs_read(state->xenstore, 0, path, &len);
1182 QLIST_INSERT_HEAD(&state->physmap, physmap, list);
1184 free(entries);
1187 static void xen_wakeup_notifier(Notifier *notifier, void *data)
1189 xc_set_hvm_param(xen_xc, xen_domid, HVM_PARAM_ACPI_S_STATE, 0);
1192 /* return 0 means OK, or -1 means critical issue -- will exit(1) */
1193 int xen_hvm_init(PCMachineState *pcms,
1194 MemoryRegion **ram_memory)
1197 * FIXME Returns -1 without cleaning up on some errors (harmless
1198 * as long as the caller exit()s on error), dies with hw_error()
1199 * on others. hw_error() isn't approprate here. Should probably
1200 * simply exit() on all errors.
1202 int i, rc;
1203 xen_pfn_t ioreq_pfn;
1204 xen_pfn_t bufioreq_pfn;
1205 evtchn_port_t bufioreq_evtchn;
1206 XenIOState *state;
1208 state = g_malloc0(sizeof (XenIOState));
1210 state->xce_handle = xen_xc_evtchn_open(NULL, 0);
1211 if (state->xce_handle == XC_HANDLER_INITIAL_VALUE) {
1212 perror("xen: event channel open");
1213 return -1;
1216 state->xenstore = xs_daemon_open();
1217 if (state->xenstore == NULL) {
1218 perror("xen: xenstore open");
1219 return -1;
1222 rc = xen_create_ioreq_server(xen_xc, xen_domid, &state->ioservid);
1223 if (rc < 0) {
1224 perror("xen: ioreq server create");
1225 return -1;
1228 state->exit.notify = xen_exit_notifier;
1229 qemu_add_exit_notifier(&state->exit);
1231 state->suspend.notify = xen_suspend_notifier;
1232 qemu_register_suspend_notifier(&state->suspend);
1234 state->wakeup.notify = xen_wakeup_notifier;
1235 qemu_register_wakeup_notifier(&state->wakeup);
1237 rc = xen_get_ioreq_server_info(xen_xc, xen_domid, state->ioservid,
1238 &ioreq_pfn, &bufioreq_pfn,
1239 &bufioreq_evtchn);
1240 if (rc < 0) {
1241 hw_error("failed to get ioreq server info: error %d handle=" XC_INTERFACE_FMT,
1242 errno, xen_xc);
1245 DPRINTF("shared page at pfn %lx\n", ioreq_pfn);
1246 DPRINTF("buffered io page at pfn %lx\n", bufioreq_pfn);
1247 DPRINTF("buffered io evtchn is %x\n", bufioreq_evtchn);
1249 state->shared_page = xc_map_foreign_range(xen_xc, xen_domid, XC_PAGE_SIZE,
1250 PROT_READ|PROT_WRITE, ioreq_pfn);
1251 if (state->shared_page == NULL) {
1252 hw_error("map shared IO page returned error %d handle=" XC_INTERFACE_FMT,
1253 errno, xen_xc);
1256 rc = xen_get_vmport_regs_pfn(xen_xc, xen_domid, &ioreq_pfn);
1257 if (!rc) {
1258 DPRINTF("shared vmport page at pfn %lx\n", ioreq_pfn);
1259 state->shared_vmport_page =
1260 xc_map_foreign_range(xen_xc, xen_domid, XC_PAGE_SIZE,
1261 PROT_READ|PROT_WRITE, ioreq_pfn);
1262 if (state->shared_vmport_page == NULL) {
1263 hw_error("map shared vmport IO page returned error %d handle="
1264 XC_INTERFACE_FMT, errno, xen_xc);
1266 } else if (rc != -ENOSYS) {
1267 hw_error("get vmport regs pfn returned error %d, rc=%d", errno, rc);
1270 state->buffered_io_page = xc_map_foreign_range(xen_xc, xen_domid,
1271 XC_PAGE_SIZE,
1272 PROT_READ|PROT_WRITE,
1273 bufioreq_pfn);
1274 if (state->buffered_io_page == NULL) {
1275 hw_error("map buffered IO page returned error %d", errno);
1278 /* Note: cpus is empty at this point in init */
1279 state->cpu_by_vcpu_id = g_malloc0(max_cpus * sizeof(CPUState *));
1281 rc = xen_set_ioreq_server_state(xen_xc, xen_domid, state->ioservid, true);
1282 if (rc < 0) {
1283 hw_error("failed to enable ioreq server info: error %d handle=" XC_INTERFACE_FMT,
1284 errno, xen_xc);
1287 state->ioreq_local_port = g_malloc0(max_cpus * sizeof (evtchn_port_t));
1289 /* FIXME: how about if we overflow the page here? */
1290 for (i = 0; i < max_cpus; i++) {
1291 rc = xc_evtchn_bind_interdomain(state->xce_handle, xen_domid,
1292 xen_vcpu_eport(state->shared_page, i));
1293 if (rc == -1) {
1294 fprintf(stderr, "shared evtchn %d bind error %d\n", i, errno);
1295 return -1;
1297 state->ioreq_local_port[i] = rc;
1300 rc = xc_evtchn_bind_interdomain(state->xce_handle, xen_domid,
1301 bufioreq_evtchn);
1302 if (rc == -1) {
1303 fprintf(stderr, "buffered evtchn bind error %d\n", errno);
1304 return -1;
1306 state->bufioreq_local_port = rc;
1308 /* Init RAM management */
1309 xen_map_cache_init(xen_phys_offset_to_gaddr, state);
1310 xen_ram_init(pcms, ram_size, ram_memory);
1312 qemu_add_vm_change_state_handler(xen_hvm_change_state_handler, state);
1314 state->memory_listener = xen_memory_listener;
1315 QLIST_INIT(&state->physmap);
1316 memory_listener_register(&state->memory_listener, &address_space_memory);
1317 state->log_for_dirtybit = NULL;
1319 state->io_listener = xen_io_listener;
1320 memory_listener_register(&state->io_listener, &address_space_io);
1322 state->device_listener = xen_device_listener;
1323 device_listener_register(&state->device_listener);
1325 /* Initialize backend core & drivers */
1326 if (xen_be_init() != 0) {
1327 fprintf(stderr, "%s: xen backend core setup failed\n", __FUNCTION__);
1328 return -1;
1330 xen_be_register("console", &xen_console_ops);
1331 xen_be_register("vkbd", &xen_kbdmouse_ops);
1332 xen_be_register("qdisk", &xen_blkdev_ops);
1333 xen_read_physmap(state);
1335 return 0;
1338 void destroy_hvm_domain(bool reboot)
1340 XenXC xc_handle;
1341 int sts;
1343 xc_handle = xen_xc_interface_open(0, 0, 0);
1344 if (xc_handle == XC_HANDLER_INITIAL_VALUE) {
1345 fprintf(stderr, "Cannot acquire xenctrl handle\n");
1346 } else {
1347 sts = xc_domain_shutdown(xc_handle, xen_domid,
1348 reboot ? SHUTDOWN_reboot : SHUTDOWN_poweroff);
1349 if (sts != 0) {
1350 fprintf(stderr, "xc_domain_shutdown failed to issue %s, "
1351 "sts %d, %s\n", reboot ? "reboot" : "poweroff",
1352 sts, strerror(errno));
1353 } else {
1354 fprintf(stderr, "Issued domain %d %s\n", xen_domid,
1355 reboot ? "reboot" : "poweroff");
1357 xc_interface_close(xc_handle);
1361 void xen_register_framebuffer(MemoryRegion *mr)
1363 framebuffer = mr;
1366 void xen_shutdown_fatal_error(const char *fmt, ...)
1368 va_list ap;
1370 va_start(ap, fmt);
1371 vfprintf(stderr, fmt, ap);
1372 va_end(ap);
1373 fprintf(stderr, "Will destroy the domain.\n");
1374 /* destroy the domain */
1375 qemu_system_shutdown_request();
1378 void xen_modified_memory(ram_addr_t start, ram_addr_t length)
1380 if (unlikely(xen_in_migration)) {
1381 int rc;
1382 ram_addr_t start_pfn, nb_pages;
1384 if (length == 0) {
1385 length = TARGET_PAGE_SIZE;
1387 start_pfn = start >> TARGET_PAGE_BITS;
1388 nb_pages = ((start + length + TARGET_PAGE_SIZE - 1) >> TARGET_PAGE_BITS)
1389 - start_pfn;
1390 rc = xc_hvm_modified_memory(xen_xc, xen_domid, start_pfn, nb_pages);
1391 if (rc) {
1392 fprintf(stderr,
1393 "%s failed for "RAM_ADDR_FMT" ("RAM_ADDR_FMT"): %i, %s\n",
1394 __func__, start, nb_pages, rc, strerror(-rc));
1399 void qmp_xen_set_global_dirty_log(bool enable, Error **errp)
1401 if (enable) {
1402 memory_global_dirty_log_start();
1403 } else {
1404 memory_global_dirty_log_stop();