hw/arm/virt: fix pl031 addr typo
[qemu/ar7.git] / xen-hvm.c
bloba2486cfd7ecc321ee2e3719884b35e627b0223b4
1 /*
2 * Copyright (C) 2010 Citrix Ltd.
4 * This work is licensed under the terms of the GNU GPL, version 2. See
5 * the COPYING file in the top-level directory.
7 * Contributions after 2012-01-13 are licensed under the terms of the
8 * GNU GPL, version 2 or (at your option) any later version.
9 */
11 #include <sys/mman.h>
13 #include "hw/pci/pci.h"
14 #include "hw/i386/pc.h"
15 #include "hw/xen/xen_common.h"
16 #include "hw/xen/xen_backend.h"
17 #include "qmp-commands.h"
19 #include "sysemu/char.h"
20 #include "qemu/range.h"
21 #include "sysemu/xen-mapcache.h"
22 #include "trace.h"
23 #include "exec/address-spaces.h"
25 #include <xen/hvm/ioreq.h>
26 #include <xen/hvm/params.h>
27 #include <xen/hvm/e820.h>
29 //#define DEBUG_XEN_HVM
31 #ifdef DEBUG_XEN_HVM
32 #define DPRINTF(fmt, ...) \
33 do { fprintf(stderr, "xen: " fmt, ## __VA_ARGS__); } while (0)
34 #else
35 #define DPRINTF(fmt, ...) \
36 do { } while (0)
37 #endif
39 static MemoryRegion ram_memory, ram_640k, ram_lo, ram_hi;
40 static MemoryRegion *framebuffer;
41 static bool xen_in_migration;
43 /* Compatibility with older version */
44 #if __XEN_LATEST_INTERFACE_VERSION__ < 0x0003020a
45 static inline uint32_t xen_vcpu_eport(shared_iopage_t *shared_page, int i)
47 return shared_page->vcpu_iodata[i].vp_eport;
49 static inline ioreq_t *xen_vcpu_ioreq(shared_iopage_t *shared_page, int vcpu)
51 return &shared_page->vcpu_iodata[vcpu].vp_ioreq;
53 # define FMT_ioreq_size PRIx64
54 #else
55 static inline uint32_t xen_vcpu_eport(shared_iopage_t *shared_page, int i)
57 return shared_page->vcpu_ioreq[i].vp_eport;
59 static inline ioreq_t *xen_vcpu_ioreq(shared_iopage_t *shared_page, int vcpu)
61 return &shared_page->vcpu_ioreq[vcpu];
63 # define FMT_ioreq_size "u"
64 #endif
65 #ifndef HVM_PARAM_BUFIOREQ_EVTCHN
66 #define HVM_PARAM_BUFIOREQ_EVTCHN 26
67 #endif
69 #define BUFFER_IO_MAX_DELAY 100
71 typedef struct XenPhysmap {
72 hwaddr start_addr;
73 ram_addr_t size;
74 char *name;
75 hwaddr phys_offset;
77 QLIST_ENTRY(XenPhysmap) list;
78 } XenPhysmap;
80 typedef struct XenIOState {
81 shared_iopage_t *shared_page;
82 buffered_iopage_t *buffered_io_page;
83 QEMUTimer *buffered_io_timer;
84 /* the evtchn port for polling the notification, */
85 evtchn_port_t *ioreq_local_port;
86 /* evtchn local port for buffered io */
87 evtchn_port_t bufioreq_local_port;
88 /* the evtchn fd for polling */
89 XenEvtchn xce_handle;
90 /* which vcpu we are serving */
91 int send_vcpu;
93 struct xs_handle *xenstore;
94 MemoryListener memory_listener;
95 QLIST_HEAD(, XenPhysmap) physmap;
96 hwaddr free_phys_offset;
97 const XenPhysmap *log_for_dirtybit;
99 Notifier exit;
100 Notifier suspend;
101 Notifier wakeup;
102 } XenIOState;
104 /* Xen specific function for piix pci */
106 int xen_pci_slot_get_pirq(PCIDevice *pci_dev, int irq_num)
108 return irq_num + ((pci_dev->devfn >> 3) << 2);
111 void xen_piix3_set_irq(void *opaque, int irq_num, int level)
113 xc_hvm_set_pci_intx_level(xen_xc, xen_domid, 0, 0, irq_num >> 2,
114 irq_num & 3, level);
117 void xen_piix_pci_write_config_client(uint32_t address, uint32_t val, int len)
119 int i;
121 /* Scan for updates to PCI link routes (0x60-0x63). */
122 for (i = 0; i < len; i++) {
123 uint8_t v = (val >> (8 * i)) & 0xff;
124 if (v & 0x80) {
125 v = 0;
127 v &= 0xf;
128 if (((address + i) >= 0x60) && ((address + i) <= 0x63)) {
129 xc_hvm_set_pci_link_route(xen_xc, xen_domid, address + i - 0x60, v);
134 void xen_hvm_inject_msi(uint64_t addr, uint32_t data)
136 xen_xc_hvm_inject_msi(xen_xc, xen_domid, addr, data);
139 static void xen_suspend_notifier(Notifier *notifier, void *data)
141 xc_set_hvm_param(xen_xc, xen_domid, HVM_PARAM_ACPI_S_STATE, 3);
144 /* Xen Interrupt Controller */
146 static void xen_set_irq(void *opaque, int irq, int level)
148 xc_hvm_set_isa_irq_level(xen_xc, xen_domid, irq, level);
151 qemu_irq *xen_interrupt_controller_init(void)
153 return qemu_allocate_irqs(xen_set_irq, NULL, 16);
156 /* Memory Ops */
158 static void xen_ram_init(ram_addr_t *below_4g_mem_size,
159 ram_addr_t *above_4g_mem_size,
160 ram_addr_t ram_size, MemoryRegion **ram_memory_p)
162 MemoryRegion *sysmem = get_system_memory();
163 ram_addr_t block_len;
164 uint64_t user_lowmem = object_property_get_int(qdev_get_machine(),
165 PC_MACHINE_MAX_RAM_BELOW_4G,
166 &error_abort);
168 /* Handle the machine opt max-ram-below-4g. It is basically doing
169 * min(xen limit, user limit).
171 if (HVM_BELOW_4G_RAM_END <= user_lowmem) {
172 user_lowmem = HVM_BELOW_4G_RAM_END;
175 if (ram_size >= user_lowmem) {
176 *above_4g_mem_size = ram_size - user_lowmem;
177 *below_4g_mem_size = user_lowmem;
178 } else {
179 *above_4g_mem_size = 0;
180 *below_4g_mem_size = ram_size;
182 if (!*above_4g_mem_size) {
183 block_len = ram_size;
184 } else {
186 * Xen does not allocate the memory continuously, it keeps a
187 * hole of the size computed above or passed in.
189 block_len = (1ULL << 32) + *above_4g_mem_size;
191 memory_region_init_ram(&ram_memory, NULL, "xen.ram", block_len);
192 *ram_memory_p = &ram_memory;
193 vmstate_register_ram_global(&ram_memory);
195 memory_region_init_alias(&ram_640k, NULL, "xen.ram.640k",
196 &ram_memory, 0, 0xa0000);
197 memory_region_add_subregion(sysmem, 0, &ram_640k);
198 /* Skip of the VGA IO memory space, it will be registered later by the VGA
199 * emulated device.
201 * The area between 0xc0000 and 0x100000 will be used by SeaBIOS to load
202 * the Options ROM, so it is registered here as RAM.
204 memory_region_init_alias(&ram_lo, NULL, "xen.ram.lo",
205 &ram_memory, 0xc0000,
206 *below_4g_mem_size - 0xc0000);
207 memory_region_add_subregion(sysmem, 0xc0000, &ram_lo);
208 if (*above_4g_mem_size > 0) {
209 memory_region_init_alias(&ram_hi, NULL, "xen.ram.hi",
210 &ram_memory, 0x100000000ULL,
211 *above_4g_mem_size);
212 memory_region_add_subregion(sysmem, 0x100000000ULL, &ram_hi);
216 void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size, MemoryRegion *mr)
218 unsigned long nr_pfn;
219 xen_pfn_t *pfn_list;
220 int i;
222 if (runstate_check(RUN_STATE_INMIGRATE)) {
223 /* RAM already populated in Xen */
224 fprintf(stderr, "%s: do not alloc "RAM_ADDR_FMT
225 " bytes of ram at "RAM_ADDR_FMT" when runstate is INMIGRATE\n",
226 __func__, size, ram_addr);
227 return;
230 if (mr == &ram_memory) {
231 return;
234 trace_xen_ram_alloc(ram_addr, size);
236 nr_pfn = size >> TARGET_PAGE_BITS;
237 pfn_list = g_malloc(sizeof (*pfn_list) * nr_pfn);
239 for (i = 0; i < nr_pfn; i++) {
240 pfn_list[i] = (ram_addr >> TARGET_PAGE_BITS) + i;
243 if (xc_domain_populate_physmap_exact(xen_xc, xen_domid, nr_pfn, 0, 0, pfn_list)) {
244 hw_error("xen: failed to populate ram at " RAM_ADDR_FMT, ram_addr);
247 g_free(pfn_list);
250 static XenPhysmap *get_physmapping(XenIOState *state,
251 hwaddr start_addr, ram_addr_t size)
253 XenPhysmap *physmap = NULL;
255 start_addr &= TARGET_PAGE_MASK;
257 QLIST_FOREACH(physmap, &state->physmap, list) {
258 if (range_covers_byte(physmap->start_addr, physmap->size, start_addr)) {
259 return physmap;
262 return NULL;
265 static hwaddr xen_phys_offset_to_gaddr(hwaddr start_addr,
266 ram_addr_t size, void *opaque)
268 hwaddr addr = start_addr & TARGET_PAGE_MASK;
269 XenIOState *xen_io_state = opaque;
270 XenPhysmap *physmap = NULL;
272 QLIST_FOREACH(physmap, &xen_io_state->physmap, list) {
273 if (range_covers_byte(physmap->phys_offset, physmap->size, addr)) {
274 return physmap->start_addr;
278 return start_addr;
281 #if CONFIG_XEN_CTRL_INTERFACE_VERSION >= 340
282 static int xen_add_to_physmap(XenIOState *state,
283 hwaddr start_addr,
284 ram_addr_t size,
285 MemoryRegion *mr,
286 hwaddr offset_within_region)
288 unsigned long i = 0;
289 int rc = 0;
290 XenPhysmap *physmap = NULL;
291 hwaddr pfn, start_gpfn;
292 hwaddr phys_offset = memory_region_get_ram_addr(mr);
293 char path[80], value[17];
295 if (get_physmapping(state, start_addr, size)) {
296 return 0;
298 if (size <= 0) {
299 return -1;
302 /* Xen can only handle a single dirty log region for now and we want
303 * the linear framebuffer to be that region.
304 * Avoid tracking any regions that is not videoram and avoid tracking
305 * the legacy vga region. */
306 if (mr == framebuffer && start_addr > 0xbffff) {
307 goto go_physmap;
309 return -1;
311 go_physmap:
312 DPRINTF("mapping vram to %"HWADDR_PRIx" - %"HWADDR_PRIx"\n",
313 start_addr, start_addr + size);
315 pfn = phys_offset >> TARGET_PAGE_BITS;
316 start_gpfn = start_addr >> TARGET_PAGE_BITS;
317 for (i = 0; i < size >> TARGET_PAGE_BITS; i++) {
318 unsigned long idx = pfn + i;
319 xen_pfn_t gpfn = start_gpfn + i;
321 rc = xc_domain_add_to_physmap(xen_xc, xen_domid, XENMAPSPACE_gmfn, idx, gpfn);
322 if (rc) {
323 DPRINTF("add_to_physmap MFN %"PRI_xen_pfn" to PFN %"
324 PRI_xen_pfn" failed: %d\n", idx, gpfn, rc);
325 return -rc;
329 physmap = g_malloc(sizeof (XenPhysmap));
331 physmap->start_addr = start_addr;
332 physmap->size = size;
333 physmap->name = (char *)mr->name;
334 physmap->phys_offset = phys_offset;
336 QLIST_INSERT_HEAD(&state->physmap, physmap, list);
338 xc_domain_pin_memory_cacheattr(xen_xc, xen_domid,
339 start_addr >> TARGET_PAGE_BITS,
340 (start_addr + size - 1) >> TARGET_PAGE_BITS,
341 XEN_DOMCTL_MEM_CACHEATTR_WB);
343 snprintf(path, sizeof(path),
344 "/local/domain/0/device-model/%d/physmap/%"PRIx64"/start_addr",
345 xen_domid, (uint64_t)phys_offset);
346 snprintf(value, sizeof(value), "%"PRIx64, (uint64_t)start_addr);
347 if (!xs_write(state->xenstore, 0, path, value, strlen(value))) {
348 return -1;
350 snprintf(path, sizeof(path),
351 "/local/domain/0/device-model/%d/physmap/%"PRIx64"/size",
352 xen_domid, (uint64_t)phys_offset);
353 snprintf(value, sizeof(value), "%"PRIx64, (uint64_t)size);
354 if (!xs_write(state->xenstore, 0, path, value, strlen(value))) {
355 return -1;
357 if (mr->name) {
358 snprintf(path, sizeof(path),
359 "/local/domain/0/device-model/%d/physmap/%"PRIx64"/name",
360 xen_domid, (uint64_t)phys_offset);
361 if (!xs_write(state->xenstore, 0, path, mr->name, strlen(mr->name))) {
362 return -1;
366 return 0;
369 static int xen_remove_from_physmap(XenIOState *state,
370 hwaddr start_addr,
371 ram_addr_t size)
373 unsigned long i = 0;
374 int rc = 0;
375 XenPhysmap *physmap = NULL;
376 hwaddr phys_offset = 0;
378 physmap = get_physmapping(state, start_addr, size);
379 if (physmap == NULL) {
380 return -1;
383 phys_offset = physmap->phys_offset;
384 size = physmap->size;
386 DPRINTF("unmapping vram to %"HWADDR_PRIx" - %"HWADDR_PRIx", at "
387 "%"HWADDR_PRIx"\n", start_addr, start_addr + size, phys_offset);
389 size >>= TARGET_PAGE_BITS;
390 start_addr >>= TARGET_PAGE_BITS;
391 phys_offset >>= TARGET_PAGE_BITS;
392 for (i = 0; i < size; i++) {
393 xen_pfn_t idx = start_addr + i;
394 xen_pfn_t gpfn = phys_offset + i;
396 rc = xc_domain_add_to_physmap(xen_xc, xen_domid, XENMAPSPACE_gmfn, idx, gpfn);
397 if (rc) {
398 fprintf(stderr, "add_to_physmap MFN %"PRI_xen_pfn" to PFN %"
399 PRI_xen_pfn" failed: %d\n", idx, gpfn, rc);
400 return -rc;
404 QLIST_REMOVE(physmap, list);
405 if (state->log_for_dirtybit == physmap) {
406 state->log_for_dirtybit = NULL;
408 g_free(physmap);
410 return 0;
413 #else
414 static int xen_add_to_physmap(XenIOState *state,
415 hwaddr start_addr,
416 ram_addr_t size,
417 MemoryRegion *mr,
418 hwaddr offset_within_region)
420 return -ENOSYS;
423 static int xen_remove_from_physmap(XenIOState *state,
424 hwaddr start_addr,
425 ram_addr_t size)
427 return -ENOSYS;
429 #endif
431 static void xen_set_memory(struct MemoryListener *listener,
432 MemoryRegionSection *section,
433 bool add)
435 XenIOState *state = container_of(listener, XenIOState, memory_listener);
436 hwaddr start_addr = section->offset_within_address_space;
437 ram_addr_t size = int128_get64(section->size);
438 bool log_dirty = memory_region_is_logging(section->mr);
439 hvmmem_type_t mem_type;
441 if (!memory_region_is_ram(section->mr)) {
442 return;
445 if (!(section->mr != &ram_memory
446 && ( (log_dirty && add) || (!log_dirty && !add)))) {
447 return;
450 trace_xen_client_set_memory(start_addr, size, log_dirty);
452 start_addr &= TARGET_PAGE_MASK;
453 size = TARGET_PAGE_ALIGN(size);
455 if (add) {
456 if (!memory_region_is_rom(section->mr)) {
457 xen_add_to_physmap(state, start_addr, size,
458 section->mr, section->offset_within_region);
459 } else {
460 mem_type = HVMMEM_ram_ro;
461 if (xc_hvm_set_mem_type(xen_xc, xen_domid, mem_type,
462 start_addr >> TARGET_PAGE_BITS,
463 size >> TARGET_PAGE_BITS)) {
464 DPRINTF("xc_hvm_set_mem_type error, addr: "TARGET_FMT_plx"\n",
465 start_addr);
468 } else {
469 if (xen_remove_from_physmap(state, start_addr, size) < 0) {
470 DPRINTF("physmapping does not exist at "TARGET_FMT_plx"\n", start_addr);
475 static void xen_region_add(MemoryListener *listener,
476 MemoryRegionSection *section)
478 memory_region_ref(section->mr);
479 xen_set_memory(listener, section, true);
482 static void xen_region_del(MemoryListener *listener,
483 MemoryRegionSection *section)
485 xen_set_memory(listener, section, false);
486 memory_region_unref(section->mr);
489 static void xen_sync_dirty_bitmap(XenIOState *state,
490 hwaddr start_addr,
491 ram_addr_t size)
493 hwaddr npages = size >> TARGET_PAGE_BITS;
494 const int width = sizeof(unsigned long) * 8;
495 unsigned long bitmap[(npages + width - 1) / width];
496 int rc, i, j;
497 const XenPhysmap *physmap = NULL;
499 physmap = get_physmapping(state, start_addr, size);
500 if (physmap == NULL) {
501 /* not handled */
502 return;
505 if (state->log_for_dirtybit == NULL) {
506 state->log_for_dirtybit = physmap;
507 } else if (state->log_for_dirtybit != physmap) {
508 /* Only one range for dirty bitmap can be tracked. */
509 return;
512 rc = xc_hvm_track_dirty_vram(xen_xc, xen_domid,
513 start_addr >> TARGET_PAGE_BITS, npages,
514 bitmap);
515 if (rc < 0) {
516 if (rc != -ENODATA) {
517 memory_region_set_dirty(framebuffer, 0, size);
518 DPRINTF("xen: track_dirty_vram failed (0x" TARGET_FMT_plx
519 ", 0x" TARGET_FMT_plx "): %s\n",
520 start_addr, start_addr + size, strerror(-rc));
522 return;
525 for (i = 0; i < ARRAY_SIZE(bitmap); i++) {
526 unsigned long map = bitmap[i];
527 while (map != 0) {
528 j = ctzl(map);
529 map &= ~(1ul << j);
530 memory_region_set_dirty(framebuffer,
531 (i * width + j) * TARGET_PAGE_SIZE,
532 TARGET_PAGE_SIZE);
537 static void xen_log_start(MemoryListener *listener,
538 MemoryRegionSection *section)
540 XenIOState *state = container_of(listener, XenIOState, memory_listener);
542 xen_sync_dirty_bitmap(state, section->offset_within_address_space,
543 int128_get64(section->size));
546 static void xen_log_stop(MemoryListener *listener, MemoryRegionSection *section)
548 XenIOState *state = container_of(listener, XenIOState, memory_listener);
550 state->log_for_dirtybit = NULL;
551 /* Disable dirty bit tracking */
552 xc_hvm_track_dirty_vram(xen_xc, xen_domid, 0, 0, NULL);
555 static void xen_log_sync(MemoryListener *listener, MemoryRegionSection *section)
557 XenIOState *state = container_of(listener, XenIOState, memory_listener);
559 xen_sync_dirty_bitmap(state, section->offset_within_address_space,
560 int128_get64(section->size));
563 static void xen_log_global_start(MemoryListener *listener)
565 if (xen_enabled()) {
566 xen_in_migration = true;
570 static void xen_log_global_stop(MemoryListener *listener)
572 xen_in_migration = false;
575 static MemoryListener xen_memory_listener = {
576 .region_add = xen_region_add,
577 .region_del = xen_region_del,
578 .log_start = xen_log_start,
579 .log_stop = xen_log_stop,
580 .log_sync = xen_log_sync,
581 .log_global_start = xen_log_global_start,
582 .log_global_stop = xen_log_global_stop,
583 .priority = 10,
586 /* get the ioreq packets from share mem */
587 static ioreq_t *cpu_get_ioreq_from_shared_memory(XenIOState *state, int vcpu)
589 ioreq_t *req = xen_vcpu_ioreq(state->shared_page, vcpu);
591 if (req->state != STATE_IOREQ_READY) {
592 DPRINTF("I/O request not ready: "
593 "%x, ptr: %x, port: %"PRIx64", "
594 "data: %"PRIx64", count: %" FMT_ioreq_size ", size: %" FMT_ioreq_size "\n",
595 req->state, req->data_is_ptr, req->addr,
596 req->data, req->count, req->size);
597 return NULL;
600 xen_rmb(); /* see IOREQ_READY /then/ read contents of ioreq */
602 req->state = STATE_IOREQ_INPROCESS;
603 return req;
606 /* use poll to get the port notification */
607 /* ioreq_vec--out,the */
608 /* retval--the number of ioreq packet */
609 static ioreq_t *cpu_get_ioreq(XenIOState *state)
611 int i;
612 evtchn_port_t port;
614 port = xc_evtchn_pending(state->xce_handle);
615 if (port == state->bufioreq_local_port) {
616 timer_mod(state->buffered_io_timer,
617 BUFFER_IO_MAX_DELAY + qemu_clock_get_ms(QEMU_CLOCK_REALTIME));
618 return NULL;
621 if (port != -1) {
622 for (i = 0; i < max_cpus; i++) {
623 if (state->ioreq_local_port[i] == port) {
624 break;
628 if (i == max_cpus) {
629 hw_error("Fatal error while trying to get io event!\n");
632 /* unmask the wanted port again */
633 xc_evtchn_unmask(state->xce_handle, port);
635 /* get the io packet from shared memory */
636 state->send_vcpu = i;
637 return cpu_get_ioreq_from_shared_memory(state, i);
640 /* read error or read nothing */
641 return NULL;
644 static uint32_t do_inp(pio_addr_t addr, unsigned long size)
646 switch (size) {
647 case 1:
648 return cpu_inb(addr);
649 case 2:
650 return cpu_inw(addr);
651 case 4:
652 return cpu_inl(addr);
653 default:
654 hw_error("inp: bad size: %04"FMT_pioaddr" %lx", addr, size);
658 static void do_outp(pio_addr_t addr,
659 unsigned long size, uint32_t val)
661 switch (size) {
662 case 1:
663 return cpu_outb(addr, val);
664 case 2:
665 return cpu_outw(addr, val);
666 case 4:
667 return cpu_outl(addr, val);
668 default:
669 hw_error("outp: bad size: %04"FMT_pioaddr" %lx", addr, size);
674 * Helper functions which read/write an object from/to physical guest
675 * memory, as part of the implementation of an ioreq.
677 * Equivalent to
678 * cpu_physical_memory_rw(addr + (req->df ? -1 : +1) * req->size * i,
679 * val, req->size, 0/1)
680 * except without the integer overflow problems.
682 static void rw_phys_req_item(hwaddr addr,
683 ioreq_t *req, uint32_t i, void *val, int rw)
685 /* Do everything unsigned so overflow just results in a truncated result
686 * and accesses to undesired parts of guest memory, which is up
687 * to the guest */
688 hwaddr offset = (hwaddr)req->size * i;
689 if (req->df) {
690 addr -= offset;
691 } else {
692 addr += offset;
694 cpu_physical_memory_rw(addr, val, req->size, rw);
697 static inline void read_phys_req_item(hwaddr addr,
698 ioreq_t *req, uint32_t i, void *val)
700 rw_phys_req_item(addr, req, i, val, 0);
702 static inline void write_phys_req_item(hwaddr addr,
703 ioreq_t *req, uint32_t i, void *val)
705 rw_phys_req_item(addr, req, i, val, 1);
709 static void cpu_ioreq_pio(ioreq_t *req)
711 uint32_t i;
713 if (req->dir == IOREQ_READ) {
714 if (!req->data_is_ptr) {
715 req->data = do_inp(req->addr, req->size);
716 } else {
717 uint32_t tmp;
719 for (i = 0; i < req->count; i++) {
720 tmp = do_inp(req->addr, req->size);
721 write_phys_req_item(req->data, req, i, &tmp);
724 } else if (req->dir == IOREQ_WRITE) {
725 if (!req->data_is_ptr) {
726 do_outp(req->addr, req->size, req->data);
727 } else {
728 for (i = 0; i < req->count; i++) {
729 uint32_t tmp = 0;
731 read_phys_req_item(req->data, req, i, &tmp);
732 do_outp(req->addr, req->size, tmp);
738 static void cpu_ioreq_move(ioreq_t *req)
740 uint32_t i;
742 if (!req->data_is_ptr) {
743 if (req->dir == IOREQ_READ) {
744 for (i = 0; i < req->count; i++) {
745 read_phys_req_item(req->addr, req, i, &req->data);
747 } else if (req->dir == IOREQ_WRITE) {
748 for (i = 0; i < req->count; i++) {
749 write_phys_req_item(req->addr, req, i, &req->data);
752 } else {
753 uint64_t tmp;
755 if (req->dir == IOREQ_READ) {
756 for (i = 0; i < req->count; i++) {
757 read_phys_req_item(req->addr, req, i, &tmp);
758 write_phys_req_item(req->data, req, i, &tmp);
760 } else if (req->dir == IOREQ_WRITE) {
761 for (i = 0; i < req->count; i++) {
762 read_phys_req_item(req->data, req, i, &tmp);
763 write_phys_req_item(req->addr, req, i, &tmp);
769 static void handle_ioreq(ioreq_t *req)
771 if (!req->data_is_ptr && (req->dir == IOREQ_WRITE) &&
772 (req->size < sizeof (target_ulong))) {
773 req->data &= ((target_ulong) 1 << (8 * req->size)) - 1;
776 switch (req->type) {
777 case IOREQ_TYPE_PIO:
778 cpu_ioreq_pio(req);
779 break;
780 case IOREQ_TYPE_COPY:
781 cpu_ioreq_move(req);
782 break;
783 case IOREQ_TYPE_TIMEOFFSET:
784 break;
785 case IOREQ_TYPE_INVALIDATE:
786 xen_invalidate_map_cache();
787 break;
788 default:
789 hw_error("Invalid ioreq type 0x%x\n", req->type);
793 static int handle_buffered_iopage(XenIOState *state)
795 buf_ioreq_t *buf_req = NULL;
796 ioreq_t req;
797 int qw;
799 if (!state->buffered_io_page) {
800 return 0;
803 memset(&req, 0x00, sizeof(req));
805 while (state->buffered_io_page->read_pointer != state->buffered_io_page->write_pointer) {
806 buf_req = &state->buffered_io_page->buf_ioreq[
807 state->buffered_io_page->read_pointer % IOREQ_BUFFER_SLOT_NUM];
808 req.size = 1UL << buf_req->size;
809 req.count = 1;
810 req.addr = buf_req->addr;
811 req.data = buf_req->data;
812 req.state = STATE_IOREQ_READY;
813 req.dir = buf_req->dir;
814 req.df = 1;
815 req.type = buf_req->type;
816 req.data_is_ptr = 0;
817 qw = (req.size == 8);
818 if (qw) {
819 buf_req = &state->buffered_io_page->buf_ioreq[
820 (state->buffered_io_page->read_pointer + 1) % IOREQ_BUFFER_SLOT_NUM];
821 req.data |= ((uint64_t)buf_req->data) << 32;
824 handle_ioreq(&req);
826 xen_mb();
827 state->buffered_io_page->read_pointer += qw ? 2 : 1;
830 return req.count;
833 static void handle_buffered_io(void *opaque)
835 XenIOState *state = opaque;
837 if (handle_buffered_iopage(state)) {
838 timer_mod(state->buffered_io_timer,
839 BUFFER_IO_MAX_DELAY + qemu_clock_get_ms(QEMU_CLOCK_REALTIME));
840 } else {
841 timer_del(state->buffered_io_timer);
842 xc_evtchn_unmask(state->xce_handle, state->bufioreq_local_port);
846 static void cpu_handle_ioreq(void *opaque)
848 XenIOState *state = opaque;
849 ioreq_t *req = cpu_get_ioreq(state);
851 handle_buffered_iopage(state);
852 if (req) {
853 handle_ioreq(req);
855 if (req->state != STATE_IOREQ_INPROCESS) {
856 fprintf(stderr, "Badness in I/O request ... not in service?!: "
857 "%x, ptr: %x, port: %"PRIx64", "
858 "data: %"PRIx64", count: %" FMT_ioreq_size ", size: %" FMT_ioreq_size "\n",
859 req->state, req->data_is_ptr, req->addr,
860 req->data, req->count, req->size);
861 destroy_hvm_domain(false);
862 return;
865 xen_wmb(); /* Update ioreq contents /then/ update state. */
868 * We do this before we send the response so that the tools
869 * have the opportunity to pick up on the reset before the
870 * guest resumes and does a hlt with interrupts disabled which
871 * causes Xen to powerdown the domain.
873 if (runstate_is_running()) {
874 if (qemu_shutdown_requested_get()) {
875 destroy_hvm_domain(false);
877 if (qemu_reset_requested_get()) {
878 qemu_system_reset(VMRESET_REPORT);
879 destroy_hvm_domain(true);
883 req->state = STATE_IORESP_READY;
884 xc_evtchn_notify(state->xce_handle, state->ioreq_local_port[state->send_vcpu]);
888 static void xen_main_loop_prepare(XenIOState *state)
890 int evtchn_fd = -1;
892 if (state->xce_handle != XC_HANDLER_INITIAL_VALUE) {
893 evtchn_fd = xc_evtchn_fd(state->xce_handle);
896 state->buffered_io_timer = timer_new_ms(QEMU_CLOCK_REALTIME, handle_buffered_io,
897 state);
899 if (evtchn_fd != -1) {
900 qemu_set_fd_handler(evtchn_fd, cpu_handle_ioreq, NULL, state);
905 static void xen_hvm_change_state_handler(void *opaque, int running,
906 RunState rstate)
908 XenIOState *xstate = opaque;
909 if (running) {
910 xen_main_loop_prepare(xstate);
914 static void xen_exit_notifier(Notifier *n, void *data)
916 XenIOState *state = container_of(n, XenIOState, exit);
918 xc_evtchn_close(state->xce_handle);
919 xs_daemon_close(state->xenstore);
922 static void xen_read_physmap(XenIOState *state)
924 XenPhysmap *physmap = NULL;
925 unsigned int len, num, i;
926 char path[80], *value = NULL;
927 char **entries = NULL;
929 snprintf(path, sizeof(path),
930 "/local/domain/0/device-model/%d/physmap", xen_domid);
931 entries = xs_directory(state->xenstore, 0, path, &num);
932 if (entries == NULL)
933 return;
935 for (i = 0; i < num; i++) {
936 physmap = g_malloc(sizeof (XenPhysmap));
937 physmap->phys_offset = strtoull(entries[i], NULL, 16);
938 snprintf(path, sizeof(path),
939 "/local/domain/0/device-model/%d/physmap/%s/start_addr",
940 xen_domid, entries[i]);
941 value = xs_read(state->xenstore, 0, path, &len);
942 if (value == NULL) {
943 g_free(physmap);
944 continue;
946 physmap->start_addr = strtoull(value, NULL, 16);
947 free(value);
949 snprintf(path, sizeof(path),
950 "/local/domain/0/device-model/%d/physmap/%s/size",
951 xen_domid, entries[i]);
952 value = xs_read(state->xenstore, 0, path, &len);
953 if (value == NULL) {
954 g_free(physmap);
955 continue;
957 physmap->size = strtoull(value, NULL, 16);
958 free(value);
960 snprintf(path, sizeof(path),
961 "/local/domain/0/device-model/%d/physmap/%s/name",
962 xen_domid, entries[i]);
963 physmap->name = xs_read(state->xenstore, 0, path, &len);
965 QLIST_INSERT_HEAD(&state->physmap, physmap, list);
967 free(entries);
970 static void xen_wakeup_notifier(Notifier *notifier, void *data)
972 xc_set_hvm_param(xen_xc, xen_domid, HVM_PARAM_ACPI_S_STATE, 0);
975 int xen_hvm_init(ram_addr_t *below_4g_mem_size, ram_addr_t *above_4g_mem_size,
976 MemoryRegion **ram_memory)
978 int i, rc;
979 unsigned long ioreq_pfn;
980 unsigned long bufioreq_evtchn;
981 XenIOState *state;
983 state = g_malloc0(sizeof (XenIOState));
985 state->xce_handle = xen_xc_evtchn_open(NULL, 0);
986 if (state->xce_handle == XC_HANDLER_INITIAL_VALUE) {
987 perror("xen: event channel open");
988 g_free(state);
989 return -errno;
992 state->xenstore = xs_daemon_open();
993 if (state->xenstore == NULL) {
994 perror("xen: xenstore open");
995 g_free(state);
996 return -errno;
999 state->exit.notify = xen_exit_notifier;
1000 qemu_add_exit_notifier(&state->exit);
1002 state->suspend.notify = xen_suspend_notifier;
1003 qemu_register_suspend_notifier(&state->suspend);
1005 state->wakeup.notify = xen_wakeup_notifier;
1006 qemu_register_wakeup_notifier(&state->wakeup);
1008 xc_get_hvm_param(xen_xc, xen_domid, HVM_PARAM_IOREQ_PFN, &ioreq_pfn);
1009 DPRINTF("shared page at pfn %lx\n", ioreq_pfn);
1010 state->shared_page = xc_map_foreign_range(xen_xc, xen_domid, XC_PAGE_SIZE,
1011 PROT_READ|PROT_WRITE, ioreq_pfn);
1012 if (state->shared_page == NULL) {
1013 hw_error("map shared IO page returned error %d handle=" XC_INTERFACE_FMT,
1014 errno, xen_xc);
1017 xc_get_hvm_param(xen_xc, xen_domid, HVM_PARAM_BUFIOREQ_PFN, &ioreq_pfn);
1018 DPRINTF("buffered io page at pfn %lx\n", ioreq_pfn);
1019 state->buffered_io_page = xc_map_foreign_range(xen_xc, xen_domid, XC_PAGE_SIZE,
1020 PROT_READ|PROT_WRITE, ioreq_pfn);
1021 if (state->buffered_io_page == NULL) {
1022 hw_error("map buffered IO page returned error %d", errno);
1025 state->ioreq_local_port = g_malloc0(max_cpus * sizeof (evtchn_port_t));
1027 /* FIXME: how about if we overflow the page here? */
1028 for (i = 0; i < max_cpus; i++) {
1029 rc = xc_evtchn_bind_interdomain(state->xce_handle, xen_domid,
1030 xen_vcpu_eport(state->shared_page, i));
1031 if (rc == -1) {
1032 fprintf(stderr, "bind interdomain ioctl error %d\n", errno);
1033 return -1;
1035 state->ioreq_local_port[i] = rc;
1038 rc = xc_get_hvm_param(xen_xc, xen_domid, HVM_PARAM_BUFIOREQ_EVTCHN,
1039 &bufioreq_evtchn);
1040 if (rc < 0) {
1041 fprintf(stderr, "failed to get HVM_PARAM_BUFIOREQ_EVTCHN\n");
1042 return -1;
1044 rc = xc_evtchn_bind_interdomain(state->xce_handle, xen_domid,
1045 (uint32_t)bufioreq_evtchn);
1046 if (rc == -1) {
1047 fprintf(stderr, "bind interdomain ioctl error %d\n", errno);
1048 return -1;
1050 state->bufioreq_local_port = rc;
1052 /* Init RAM management */
1053 xen_map_cache_init(xen_phys_offset_to_gaddr, state);
1054 xen_ram_init(below_4g_mem_size, above_4g_mem_size, ram_size, ram_memory);
1056 qemu_add_vm_change_state_handler(xen_hvm_change_state_handler, state);
1058 state->memory_listener = xen_memory_listener;
1059 QLIST_INIT(&state->physmap);
1060 memory_listener_register(&state->memory_listener, &address_space_memory);
1061 state->log_for_dirtybit = NULL;
1063 /* Initialize backend core & drivers */
1064 if (xen_be_init() != 0) {
1065 fprintf(stderr, "%s: xen backend core setup failed\n", __FUNCTION__);
1066 exit(1);
1068 xen_be_register("console", &xen_console_ops);
1069 xen_be_register("vkbd", &xen_kbdmouse_ops);
1070 xen_be_register("qdisk", &xen_blkdev_ops);
1071 xen_read_physmap(state);
1073 return 0;
1076 void destroy_hvm_domain(bool reboot)
1078 XenXC xc_handle;
1079 int sts;
1081 xc_handle = xen_xc_interface_open(0, 0, 0);
1082 if (xc_handle == XC_HANDLER_INITIAL_VALUE) {
1083 fprintf(stderr, "Cannot acquire xenctrl handle\n");
1084 } else {
1085 sts = xc_domain_shutdown(xc_handle, xen_domid,
1086 reboot ? SHUTDOWN_reboot : SHUTDOWN_poweroff);
1087 if (sts != 0) {
1088 fprintf(stderr, "xc_domain_shutdown failed to issue %s, "
1089 "sts %d, %s\n", reboot ? "reboot" : "poweroff",
1090 sts, strerror(errno));
1091 } else {
1092 fprintf(stderr, "Issued domain %d %s\n", xen_domid,
1093 reboot ? "reboot" : "poweroff");
1095 xc_interface_close(xc_handle);
1099 void xen_register_framebuffer(MemoryRegion *mr)
1101 framebuffer = mr;
1104 void xen_shutdown_fatal_error(const char *fmt, ...)
1106 va_list ap;
1108 va_start(ap, fmt);
1109 vfprintf(stderr, fmt, ap);
1110 va_end(ap);
1111 fprintf(stderr, "Will destroy the domain.\n");
1112 /* destroy the domain */
1113 qemu_system_shutdown_request();
1116 void xen_modified_memory(ram_addr_t start, ram_addr_t length)
1118 if (unlikely(xen_in_migration)) {
1119 int rc;
1120 ram_addr_t start_pfn, nb_pages;
1122 if (length == 0) {
1123 length = TARGET_PAGE_SIZE;
1125 start_pfn = start >> TARGET_PAGE_BITS;
1126 nb_pages = ((start + length + TARGET_PAGE_SIZE - 1) >> TARGET_PAGE_BITS)
1127 - start_pfn;
1128 rc = xc_hvm_modified_memory(xen_xc, xen_domid, start_pfn, nb_pages);
1129 if (rc) {
1130 fprintf(stderr,
1131 "%s failed for "RAM_ADDR_FMT" ("RAM_ADDR_FMT"): %i, %s\n",
1132 __func__, start, nb_pages, rc, strerror(-rc));
1137 void qmp_xen_set_global_dirty_log(bool enable, Error **errp)
1139 if (enable) {
1140 memory_global_dirty_log_start();
1141 } else {
1142 memory_global_dirty_log_stop();