block/curl: Replace usleep by g_usleep
[qemu-kvm.git] / xen-all.c
blob3e6de417ec4964c3c9ee8476333957640799cdb4
1 /*
2 * Copyright (C) 2010 Citrix Ltd.
4 * This work is licensed under the terms of the GNU GPL, version 2. See
5 * the COPYING file in the top-level directory.
7 * Contributions after 2012-01-13 are licensed under the terms of the
8 * GNU GPL, version 2 or (at your option) any later version.
9 */
11 #include <sys/mman.h>
13 #include "hw/pci.h"
14 #include "hw/pc.h"
15 #include "hw/xen_common.h"
16 #include "hw/xen_backend.h"
18 #include "range.h"
19 #include "xen-mapcache.h"
20 #include "trace.h"
21 #include "exec-memory.h"
23 #include <xen/hvm/ioreq.h>
24 #include <xen/hvm/params.h>
25 #include <xen/hvm/e820.h>
27 //#define DEBUG_XEN
29 #ifdef DEBUG_XEN
30 #define DPRINTF(fmt, ...) \
31 do { fprintf(stderr, "xen: " fmt, ## __VA_ARGS__); } while (0)
32 #else
33 #define DPRINTF(fmt, ...) \
34 do { } while (0)
35 #endif
37 static MemoryRegion ram_memory, ram_640k, ram_lo, ram_hi;
38 static MemoryRegion *framebuffer;
40 /* Compatibility with older version */
41 #if __XEN_LATEST_INTERFACE_VERSION__ < 0x0003020a
42 static inline uint32_t xen_vcpu_eport(shared_iopage_t *shared_page, int i)
44 return shared_page->vcpu_iodata[i].vp_eport;
46 static inline ioreq_t *xen_vcpu_ioreq(shared_iopage_t *shared_page, int vcpu)
48 return &shared_page->vcpu_iodata[vcpu].vp_ioreq;
50 # define FMT_ioreq_size PRIx64
51 #else
52 static inline uint32_t xen_vcpu_eport(shared_iopage_t *shared_page, int i)
54 return shared_page->vcpu_ioreq[i].vp_eport;
56 static inline ioreq_t *xen_vcpu_ioreq(shared_iopage_t *shared_page, int vcpu)
58 return &shared_page->vcpu_ioreq[vcpu];
60 # define FMT_ioreq_size "u"
61 #endif
63 #define BUFFER_IO_MAX_DELAY 100
65 typedef struct XenPhysmap {
66 target_phys_addr_t start_addr;
67 ram_addr_t size;
68 char *name;
69 target_phys_addr_t phys_offset;
71 QLIST_ENTRY(XenPhysmap) list;
72 } XenPhysmap;
74 typedef struct XenIOState {
75 shared_iopage_t *shared_page;
76 buffered_iopage_t *buffered_io_page;
77 QEMUTimer *buffered_io_timer;
78 /* the evtchn port for polling the notification, */
79 evtchn_port_t *ioreq_local_port;
80 /* the evtchn fd for polling */
81 XenEvtchn xce_handle;
82 /* which vcpu we are serving */
83 int send_vcpu;
85 struct xs_handle *xenstore;
86 MemoryListener memory_listener;
87 QLIST_HEAD(, XenPhysmap) physmap;
88 target_phys_addr_t free_phys_offset;
89 const XenPhysmap *log_for_dirtybit;
91 Notifier exit;
92 Notifier suspend;
93 } XenIOState;
95 /* Xen specific function for piix pci */
97 int xen_pci_slot_get_pirq(PCIDevice *pci_dev, int irq_num)
99 return irq_num + ((pci_dev->devfn >> 3) << 2);
102 void xen_piix3_set_irq(void *opaque, int irq_num, int level)
104 xc_hvm_set_pci_intx_level(xen_xc, xen_domid, 0, 0, irq_num >> 2,
105 irq_num & 3, level);
108 void xen_piix_pci_write_config_client(uint32_t address, uint32_t val, int len)
110 int i;
112 /* Scan for updates to PCI link routes (0x60-0x63). */
113 for (i = 0; i < len; i++) {
114 uint8_t v = (val >> (8 * i)) & 0xff;
115 if (v & 0x80) {
116 v = 0;
118 v &= 0xf;
119 if (((address + i) >= 0x60) && ((address + i) <= 0x63)) {
120 xc_hvm_set_pci_link_route(xen_xc, xen_domid, address + i - 0x60, v);
125 static void xen_suspend_notifier(Notifier *notifier, void *data)
127 xc_set_hvm_param(xen_xc, xen_domid, HVM_PARAM_ACPI_S_STATE, 3);
130 /* Xen Interrupt Controller */
132 static void xen_set_irq(void *opaque, int irq, int level)
134 xc_hvm_set_isa_irq_level(xen_xc, xen_domid, irq, level);
137 qemu_irq *xen_interrupt_controller_init(void)
139 return qemu_allocate_irqs(xen_set_irq, NULL, 16);
142 /* Memory Ops */
144 static void xen_ram_init(ram_addr_t ram_size)
146 MemoryRegion *sysmem = get_system_memory();
147 ram_addr_t below_4g_mem_size, above_4g_mem_size = 0;
148 ram_addr_t block_len;
150 block_len = ram_size;
151 if (ram_size >= HVM_BELOW_4G_RAM_END) {
152 /* Xen does not allocate the memory continuously, and keep a hole at
153 * HVM_BELOW_4G_MMIO_START of HVM_BELOW_4G_MMIO_LENGTH
155 block_len += HVM_BELOW_4G_MMIO_LENGTH;
157 memory_region_init_ram(&ram_memory, "xen.ram", block_len);
158 vmstate_register_ram_global(&ram_memory);
160 if (ram_size >= HVM_BELOW_4G_RAM_END) {
161 above_4g_mem_size = ram_size - HVM_BELOW_4G_RAM_END;
162 below_4g_mem_size = HVM_BELOW_4G_RAM_END;
163 } else {
164 below_4g_mem_size = ram_size;
167 memory_region_init_alias(&ram_640k, "xen.ram.640k",
168 &ram_memory, 0, 0xa0000);
169 memory_region_add_subregion(sysmem, 0, &ram_640k);
170 /* Skip of the VGA IO memory space, it will be registered later by the VGA
171 * emulated device.
173 * The area between 0xc0000 and 0x100000 will be used by SeaBIOS to load
174 * the Options ROM, so it is registered here as RAM.
176 memory_region_init_alias(&ram_lo, "xen.ram.lo",
177 &ram_memory, 0xc0000, below_4g_mem_size - 0xc0000);
178 memory_region_add_subregion(sysmem, 0xc0000, &ram_lo);
179 if (above_4g_mem_size > 0) {
180 memory_region_init_alias(&ram_hi, "xen.ram.hi",
181 &ram_memory, 0x100000000ULL,
182 above_4g_mem_size);
183 memory_region_add_subregion(sysmem, 0x100000000ULL, &ram_hi);
187 void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size, MemoryRegion *mr)
189 unsigned long nr_pfn;
190 xen_pfn_t *pfn_list;
191 int i;
193 if (runstate_check(RUN_STATE_INMIGRATE)) {
194 /* RAM already populated in Xen */
195 fprintf(stderr, "%s: do not alloc "RAM_ADDR_FMT
196 " bytes of ram at "RAM_ADDR_FMT" when runstate is INMIGRATE\n",
197 __func__, size, ram_addr);
198 return;
201 if (mr == &ram_memory) {
202 return;
205 trace_xen_ram_alloc(ram_addr, size);
207 nr_pfn = size >> TARGET_PAGE_BITS;
208 pfn_list = g_malloc(sizeof (*pfn_list) * nr_pfn);
210 for (i = 0; i < nr_pfn; i++) {
211 pfn_list[i] = (ram_addr >> TARGET_PAGE_BITS) + i;
214 if (xc_domain_populate_physmap_exact(xen_xc, xen_domid, nr_pfn, 0, 0, pfn_list)) {
215 hw_error("xen: failed to populate ram at " RAM_ADDR_FMT, ram_addr);
218 g_free(pfn_list);
221 static XenPhysmap *get_physmapping(XenIOState *state,
222 target_phys_addr_t start_addr, ram_addr_t size)
224 XenPhysmap *physmap = NULL;
226 start_addr &= TARGET_PAGE_MASK;
228 QLIST_FOREACH(physmap, &state->physmap, list) {
229 if (range_covers_byte(physmap->start_addr, physmap->size, start_addr)) {
230 return physmap;
233 return NULL;
236 static target_phys_addr_t xen_phys_offset_to_gaddr(target_phys_addr_t start_addr,
237 ram_addr_t size, void *opaque)
239 target_phys_addr_t addr = start_addr & TARGET_PAGE_MASK;
240 XenIOState *xen_io_state = opaque;
241 XenPhysmap *physmap = NULL;
243 QLIST_FOREACH(physmap, &xen_io_state->physmap, list) {
244 if (range_covers_byte(physmap->phys_offset, physmap->size, addr)) {
245 return physmap->start_addr;
249 return start_addr;
252 #if CONFIG_XEN_CTRL_INTERFACE_VERSION >= 340
253 static int xen_add_to_physmap(XenIOState *state,
254 target_phys_addr_t start_addr,
255 ram_addr_t size,
256 MemoryRegion *mr,
257 target_phys_addr_t offset_within_region)
259 unsigned long i = 0;
260 int rc = 0;
261 XenPhysmap *physmap = NULL;
262 target_phys_addr_t pfn, start_gpfn;
263 target_phys_addr_t phys_offset = memory_region_get_ram_addr(mr);
264 char path[80], value[17];
266 if (get_physmapping(state, start_addr, size)) {
267 return 0;
269 if (size <= 0) {
270 return -1;
273 /* Xen can only handle a single dirty log region for now and we want
274 * the linear framebuffer to be that region.
275 * Avoid tracking any regions that is not videoram and avoid tracking
276 * the legacy vga region. */
277 if (mr == framebuffer && start_addr > 0xbffff) {
278 goto go_physmap;
280 return -1;
282 go_physmap:
283 DPRINTF("mapping vram to %llx - %llx\n", start_addr, start_addr + size);
285 pfn = phys_offset >> TARGET_PAGE_BITS;
286 start_gpfn = start_addr >> TARGET_PAGE_BITS;
287 for (i = 0; i < size >> TARGET_PAGE_BITS; i++) {
288 unsigned long idx = pfn + i;
289 xen_pfn_t gpfn = start_gpfn + i;
291 rc = xc_domain_add_to_physmap(xen_xc, xen_domid, XENMAPSPACE_gmfn, idx, gpfn);
292 if (rc) {
293 DPRINTF("add_to_physmap MFN %"PRI_xen_pfn" to PFN %"
294 PRI_xen_pfn" failed: %d\n", idx, gpfn, rc);
295 return -rc;
299 physmap = g_malloc(sizeof (XenPhysmap));
301 physmap->start_addr = start_addr;
302 physmap->size = size;
303 physmap->name = (char *)mr->name;
304 physmap->phys_offset = phys_offset;
306 QLIST_INSERT_HEAD(&state->physmap, physmap, list);
308 xc_domain_pin_memory_cacheattr(xen_xc, xen_domid,
309 start_addr >> TARGET_PAGE_BITS,
310 (start_addr + size) >> TARGET_PAGE_BITS,
311 XEN_DOMCTL_MEM_CACHEATTR_WB);
313 snprintf(path, sizeof(path),
314 "/local/domain/0/device-model/%d/physmap/%"PRIx64"/start_addr",
315 xen_domid, (uint64_t)phys_offset);
316 snprintf(value, sizeof(value), "%"PRIx64, (uint64_t)start_addr);
317 if (!xs_write(state->xenstore, 0, path, value, strlen(value))) {
318 return -1;
320 snprintf(path, sizeof(path),
321 "/local/domain/0/device-model/%d/physmap/%"PRIx64"/size",
322 xen_domid, (uint64_t)phys_offset);
323 snprintf(value, sizeof(value), "%"PRIx64, (uint64_t)size);
324 if (!xs_write(state->xenstore, 0, path, value, strlen(value))) {
325 return -1;
327 if (mr->name) {
328 snprintf(path, sizeof(path),
329 "/local/domain/0/device-model/%d/physmap/%"PRIx64"/name",
330 xen_domid, (uint64_t)phys_offset);
331 if (!xs_write(state->xenstore, 0, path, mr->name, strlen(mr->name))) {
332 return -1;
336 return 0;
339 static int xen_remove_from_physmap(XenIOState *state,
340 target_phys_addr_t start_addr,
341 ram_addr_t size)
343 unsigned long i = 0;
344 int rc = 0;
345 XenPhysmap *physmap = NULL;
346 target_phys_addr_t phys_offset = 0;
348 physmap = get_physmapping(state, start_addr, size);
349 if (physmap == NULL) {
350 return -1;
353 phys_offset = physmap->phys_offset;
354 size = physmap->size;
356 DPRINTF("unmapping vram to %llx - %llx, from %llx\n",
357 phys_offset, phys_offset + size, start_addr);
359 size >>= TARGET_PAGE_BITS;
360 start_addr >>= TARGET_PAGE_BITS;
361 phys_offset >>= TARGET_PAGE_BITS;
362 for (i = 0; i < size; i++) {
363 unsigned long idx = start_addr + i;
364 xen_pfn_t gpfn = phys_offset + i;
366 rc = xc_domain_add_to_physmap(xen_xc, xen_domid, XENMAPSPACE_gmfn, idx, gpfn);
367 if (rc) {
368 fprintf(stderr, "add_to_physmap MFN %"PRI_xen_pfn" to PFN %"
369 PRI_xen_pfn" failed: %d\n", idx, gpfn, rc);
370 return -rc;
374 QLIST_REMOVE(physmap, list);
375 if (state->log_for_dirtybit == physmap) {
376 state->log_for_dirtybit = NULL;
378 free(physmap);
380 return 0;
383 #else
384 static int xen_add_to_physmap(XenIOState *state,
385 target_phys_addr_t start_addr,
386 ram_addr_t size,
387 MemoryRegion *mr,
388 target_phys_addr_t offset_within_region)
390 return -ENOSYS;
393 static int xen_remove_from_physmap(XenIOState *state,
394 target_phys_addr_t start_addr,
395 ram_addr_t size)
397 return -ENOSYS;
399 #endif
401 static void xen_set_memory(struct MemoryListener *listener,
402 MemoryRegionSection *section,
403 bool add)
405 XenIOState *state = container_of(listener, XenIOState, memory_listener);
406 target_phys_addr_t start_addr = section->offset_within_address_space;
407 ram_addr_t size = section->size;
408 bool log_dirty = memory_region_is_logging(section->mr);
409 hvmmem_type_t mem_type;
411 if (!memory_region_is_ram(section->mr)) {
412 return;
415 if (!(section->mr != &ram_memory
416 && ( (log_dirty && add) || (!log_dirty && !add)))) {
417 return;
420 trace_xen_client_set_memory(start_addr, size, log_dirty);
422 start_addr &= TARGET_PAGE_MASK;
423 size = TARGET_PAGE_ALIGN(size);
425 if (add) {
426 if (!memory_region_is_rom(section->mr)) {
427 xen_add_to_physmap(state, start_addr, size,
428 section->mr, section->offset_within_region);
429 } else {
430 mem_type = HVMMEM_ram_ro;
431 if (xc_hvm_set_mem_type(xen_xc, xen_domid, mem_type,
432 start_addr >> TARGET_PAGE_BITS,
433 size >> TARGET_PAGE_BITS)) {
434 DPRINTF("xc_hvm_set_mem_type error, addr: "TARGET_FMT_plx"\n",
435 start_addr);
438 } else {
439 if (xen_remove_from_physmap(state, start_addr, size) < 0) {
440 DPRINTF("physmapping does not exist at "TARGET_FMT_plx"\n", start_addr);
445 static void xen_begin(MemoryListener *listener)
449 static void xen_commit(MemoryListener *listener)
453 static void xen_region_add(MemoryListener *listener,
454 MemoryRegionSection *section)
456 xen_set_memory(listener, section, true);
459 static void xen_region_del(MemoryListener *listener,
460 MemoryRegionSection *section)
462 xen_set_memory(listener, section, false);
465 static void xen_region_nop(MemoryListener *listener,
466 MemoryRegionSection *section)
470 static void xen_sync_dirty_bitmap(XenIOState *state,
471 target_phys_addr_t start_addr,
472 ram_addr_t size)
474 target_phys_addr_t npages = size >> TARGET_PAGE_BITS;
475 const int width = sizeof(unsigned long) * 8;
476 unsigned long bitmap[(npages + width - 1) / width];
477 int rc, i, j;
478 const XenPhysmap *physmap = NULL;
480 physmap = get_physmapping(state, start_addr, size);
481 if (physmap == NULL) {
482 /* not handled */
483 return;
486 if (state->log_for_dirtybit == NULL) {
487 state->log_for_dirtybit = physmap;
488 } else if (state->log_for_dirtybit != physmap) {
489 /* Only one range for dirty bitmap can be tracked. */
490 return;
493 rc = xc_hvm_track_dirty_vram(xen_xc, xen_domid,
494 start_addr >> TARGET_PAGE_BITS, npages,
495 bitmap);
496 if (rc < 0) {
497 if (rc != -ENODATA) {
498 fprintf(stderr, "xen: track_dirty_vram failed (0x" TARGET_FMT_plx
499 ", 0x" TARGET_FMT_plx "): %s\n",
500 start_addr, start_addr + size, strerror(-rc));
502 return;
505 for (i = 0; i < ARRAY_SIZE(bitmap); i++) {
506 unsigned long map = bitmap[i];
507 while (map != 0) {
508 j = ffsl(map) - 1;
509 map &= ~(1ul << j);
510 memory_region_set_dirty(framebuffer,
511 (i * width + j) * TARGET_PAGE_SIZE,
512 TARGET_PAGE_SIZE);
517 static void xen_log_start(MemoryListener *listener,
518 MemoryRegionSection *section)
520 XenIOState *state = container_of(listener, XenIOState, memory_listener);
522 xen_sync_dirty_bitmap(state, section->offset_within_address_space,
523 section->size);
526 static void xen_log_stop(MemoryListener *listener, MemoryRegionSection *section)
528 XenIOState *state = container_of(listener, XenIOState, memory_listener);
530 state->log_for_dirtybit = NULL;
531 /* Disable dirty bit tracking */
532 xc_hvm_track_dirty_vram(xen_xc, xen_domid, 0, 0, NULL);
535 static void xen_log_sync(MemoryListener *listener, MemoryRegionSection *section)
537 XenIOState *state = container_of(listener, XenIOState, memory_listener);
539 xen_sync_dirty_bitmap(state, section->offset_within_address_space,
540 section->size);
543 static void xen_log_global_start(MemoryListener *listener)
547 static void xen_log_global_stop(MemoryListener *listener)
551 static void xen_eventfd_add(MemoryListener *listener,
552 MemoryRegionSection *section,
553 bool match_data, uint64_t data, int fd)
557 static void xen_eventfd_del(MemoryListener *listener,
558 MemoryRegionSection *section,
559 bool match_data, uint64_t data, int fd)
563 static MemoryListener xen_memory_listener = {
564 .begin = xen_begin,
565 .commit = xen_commit,
566 .region_add = xen_region_add,
567 .region_del = xen_region_del,
568 .region_nop = xen_region_nop,
569 .log_start = xen_log_start,
570 .log_stop = xen_log_stop,
571 .log_sync = xen_log_sync,
572 .log_global_start = xen_log_global_start,
573 .log_global_stop = xen_log_global_stop,
574 .eventfd_add = xen_eventfd_add,
575 .eventfd_del = xen_eventfd_del,
576 .priority = 10,
579 /* VCPU Operations, MMIO, IO ring ... */
581 static void xen_reset_vcpu(void *opaque)
583 CPUArchState *env = opaque;
585 env->halted = 1;
588 void xen_vcpu_init(void)
590 CPUArchState *first_cpu;
592 if ((first_cpu = qemu_get_cpu(0))) {
593 qemu_register_reset(xen_reset_vcpu, first_cpu);
594 xen_reset_vcpu(first_cpu);
598 /* get the ioreq packets from share mem */
599 static ioreq_t *cpu_get_ioreq_from_shared_memory(XenIOState *state, int vcpu)
601 ioreq_t *req = xen_vcpu_ioreq(state->shared_page, vcpu);
603 if (req->state != STATE_IOREQ_READY) {
604 DPRINTF("I/O request not ready: "
605 "%x, ptr: %x, port: %"PRIx64", "
606 "data: %"PRIx64", count: %" FMT_ioreq_size ", size: %" FMT_ioreq_size "\n",
607 req->state, req->data_is_ptr, req->addr,
608 req->data, req->count, req->size);
609 return NULL;
612 xen_rmb(); /* see IOREQ_READY /then/ read contents of ioreq */
614 req->state = STATE_IOREQ_INPROCESS;
615 return req;
618 /* use poll to get the port notification */
619 /* ioreq_vec--out,the */
620 /* retval--the number of ioreq packet */
621 static ioreq_t *cpu_get_ioreq(XenIOState *state)
623 int i;
624 evtchn_port_t port;
626 port = xc_evtchn_pending(state->xce_handle);
627 if (port != -1) {
628 for (i = 0; i < smp_cpus; i++) {
629 if (state->ioreq_local_port[i] == port) {
630 break;
634 if (i == smp_cpus) {
635 hw_error("Fatal error while trying to get io event!\n");
638 /* unmask the wanted port again */
639 xc_evtchn_unmask(state->xce_handle, port);
641 /* get the io packet from shared memory */
642 state->send_vcpu = i;
643 return cpu_get_ioreq_from_shared_memory(state, i);
646 /* read error or read nothing */
647 return NULL;
650 static uint32_t do_inp(pio_addr_t addr, unsigned long size)
652 switch (size) {
653 case 1:
654 return cpu_inb(addr);
655 case 2:
656 return cpu_inw(addr);
657 case 4:
658 return cpu_inl(addr);
659 default:
660 hw_error("inp: bad size: %04"FMT_pioaddr" %lx", addr, size);
664 static void do_outp(pio_addr_t addr,
665 unsigned long size, uint32_t val)
667 switch (size) {
668 case 1:
669 return cpu_outb(addr, val);
670 case 2:
671 return cpu_outw(addr, val);
672 case 4:
673 return cpu_outl(addr, val);
674 default:
675 hw_error("outp: bad size: %04"FMT_pioaddr" %lx", addr, size);
679 static void cpu_ioreq_pio(ioreq_t *req)
681 int i, sign;
683 sign = req->df ? -1 : 1;
685 if (req->dir == IOREQ_READ) {
686 if (!req->data_is_ptr) {
687 req->data = do_inp(req->addr, req->size);
688 } else {
689 uint32_t tmp;
691 for (i = 0; i < req->count; i++) {
692 tmp = do_inp(req->addr, req->size);
693 cpu_physical_memory_write(req->data + (sign * i * req->size),
694 (uint8_t *) &tmp, req->size);
697 } else if (req->dir == IOREQ_WRITE) {
698 if (!req->data_is_ptr) {
699 do_outp(req->addr, req->size, req->data);
700 } else {
701 for (i = 0; i < req->count; i++) {
702 uint32_t tmp = 0;
704 cpu_physical_memory_read(req->data + (sign * i * req->size),
705 (uint8_t*) &tmp, req->size);
706 do_outp(req->addr, req->size, tmp);
712 static void cpu_ioreq_move(ioreq_t *req)
714 int i, sign;
716 sign = req->df ? -1 : 1;
718 if (!req->data_is_ptr) {
719 if (req->dir == IOREQ_READ) {
720 for (i = 0; i < req->count; i++) {
721 cpu_physical_memory_read(req->addr + (sign * i * req->size),
722 (uint8_t *) &req->data, req->size);
724 } else if (req->dir == IOREQ_WRITE) {
725 for (i = 0; i < req->count; i++) {
726 cpu_physical_memory_write(req->addr + (sign * i * req->size),
727 (uint8_t *) &req->data, req->size);
730 } else {
731 uint64_t tmp;
733 if (req->dir == IOREQ_READ) {
734 for (i = 0; i < req->count; i++) {
735 cpu_physical_memory_read(req->addr + (sign * i * req->size),
736 (uint8_t*) &tmp, req->size);
737 cpu_physical_memory_write(req->data + (sign * i * req->size),
738 (uint8_t*) &tmp, req->size);
740 } else if (req->dir == IOREQ_WRITE) {
741 for (i = 0; i < req->count; i++) {
742 cpu_physical_memory_read(req->data + (sign * i * req->size),
743 (uint8_t*) &tmp, req->size);
744 cpu_physical_memory_write(req->addr + (sign * i * req->size),
745 (uint8_t*) &tmp, req->size);
751 static void handle_ioreq(ioreq_t *req)
753 if (!req->data_is_ptr && (req->dir == IOREQ_WRITE) &&
754 (req->size < sizeof (target_ulong))) {
755 req->data &= ((target_ulong) 1 << (8 * req->size)) - 1;
758 switch (req->type) {
759 case IOREQ_TYPE_PIO:
760 cpu_ioreq_pio(req);
761 break;
762 case IOREQ_TYPE_COPY:
763 cpu_ioreq_move(req);
764 break;
765 case IOREQ_TYPE_TIMEOFFSET:
766 break;
767 case IOREQ_TYPE_INVALIDATE:
768 xen_invalidate_map_cache();
769 break;
770 default:
771 hw_error("Invalid ioreq type 0x%x\n", req->type);
775 static void handle_buffered_iopage(XenIOState *state)
777 buf_ioreq_t *buf_req = NULL;
778 ioreq_t req;
779 int qw;
781 if (!state->buffered_io_page) {
782 return;
785 while (state->buffered_io_page->read_pointer != state->buffered_io_page->write_pointer) {
786 buf_req = &state->buffered_io_page->buf_ioreq[
787 state->buffered_io_page->read_pointer % IOREQ_BUFFER_SLOT_NUM];
788 req.size = 1UL << buf_req->size;
789 req.count = 1;
790 req.addr = buf_req->addr;
791 req.data = buf_req->data;
792 req.state = STATE_IOREQ_READY;
793 req.dir = buf_req->dir;
794 req.df = 1;
795 req.type = buf_req->type;
796 req.data_is_ptr = 0;
797 qw = (req.size == 8);
798 if (qw) {
799 buf_req = &state->buffered_io_page->buf_ioreq[
800 (state->buffered_io_page->read_pointer + 1) % IOREQ_BUFFER_SLOT_NUM];
801 req.data |= ((uint64_t)buf_req->data) << 32;
804 handle_ioreq(&req);
806 xen_mb();
807 state->buffered_io_page->read_pointer += qw ? 2 : 1;
811 static void handle_buffered_io(void *opaque)
813 XenIOState *state = opaque;
815 handle_buffered_iopage(state);
816 qemu_mod_timer(state->buffered_io_timer,
817 BUFFER_IO_MAX_DELAY + qemu_get_clock_ms(rt_clock));
820 static void cpu_handle_ioreq(void *opaque)
822 XenIOState *state = opaque;
823 ioreq_t *req = cpu_get_ioreq(state);
825 handle_buffered_iopage(state);
826 if (req) {
827 handle_ioreq(req);
829 if (req->state != STATE_IOREQ_INPROCESS) {
830 fprintf(stderr, "Badness in I/O request ... not in service?!: "
831 "%x, ptr: %x, port: %"PRIx64", "
832 "data: %"PRIx64", count: %" FMT_ioreq_size ", size: %" FMT_ioreq_size "\n",
833 req->state, req->data_is_ptr, req->addr,
834 req->data, req->count, req->size);
835 destroy_hvm_domain();
836 return;
839 xen_wmb(); /* Update ioreq contents /then/ update state. */
842 * We do this before we send the response so that the tools
843 * have the opportunity to pick up on the reset before the
844 * guest resumes and does a hlt with interrupts disabled which
845 * causes Xen to powerdown the domain.
847 if (runstate_is_running()) {
848 if (qemu_shutdown_requested_get()) {
849 destroy_hvm_domain();
851 if (qemu_reset_requested_get()) {
852 qemu_system_reset(VMRESET_REPORT);
856 req->state = STATE_IORESP_READY;
857 xc_evtchn_notify(state->xce_handle, state->ioreq_local_port[state->send_vcpu]);
861 static int store_dev_info(int domid, CharDriverState *cs, const char *string)
863 struct xs_handle *xs = NULL;
864 char *path = NULL;
865 char *newpath = NULL;
866 char *pts = NULL;
867 int ret = -1;
869 /* Only continue if we're talking to a pty. */
870 if (strncmp(cs->filename, "pty:", 4)) {
871 return 0;
873 pts = cs->filename + 4;
875 /* We now have everything we need to set the xenstore entry. */
876 xs = xs_open(0);
877 if (xs == NULL) {
878 fprintf(stderr, "Could not contact XenStore\n");
879 goto out;
882 path = xs_get_domain_path(xs, domid);
883 if (path == NULL) {
884 fprintf(stderr, "xs_get_domain_path() error\n");
885 goto out;
887 newpath = realloc(path, (strlen(path) + strlen(string) +
888 strlen("/tty") + 1));
889 if (newpath == NULL) {
890 fprintf(stderr, "realloc error\n");
891 goto out;
893 path = newpath;
895 strcat(path, string);
896 strcat(path, "/tty");
897 if (!xs_write(xs, XBT_NULL, path, pts, strlen(pts))) {
898 fprintf(stderr, "xs_write for '%s' fail", string);
899 goto out;
901 ret = 0;
903 out:
904 free(path);
905 xs_close(xs);
907 return ret;
910 void xenstore_store_pv_console_info(int i, CharDriverState *chr)
912 if (i == 0) {
913 store_dev_info(xen_domid, chr, "/console");
914 } else {
915 char buf[32];
916 snprintf(buf, sizeof(buf), "/device/console/%d", i);
917 store_dev_info(xen_domid, chr, buf);
921 static void xenstore_record_dm_state(struct xs_handle *xs, const char *state)
923 char path[50];
925 if (xs == NULL) {
926 fprintf(stderr, "xenstore connection not initialized\n");
927 exit(1);
930 snprintf(path, sizeof (path), "/local/domain/0/device-model/%u/state", xen_domid);
931 if (!xs_write(xs, XBT_NULL, path, state, strlen(state))) {
932 fprintf(stderr, "error recording dm state\n");
933 exit(1);
937 static void xen_main_loop_prepare(XenIOState *state)
939 int evtchn_fd = -1;
941 if (state->xce_handle != XC_HANDLER_INITIAL_VALUE) {
942 evtchn_fd = xc_evtchn_fd(state->xce_handle);
945 state->buffered_io_timer = qemu_new_timer_ms(rt_clock, handle_buffered_io,
946 state);
947 qemu_mod_timer(state->buffered_io_timer, qemu_get_clock_ms(rt_clock));
949 if (evtchn_fd != -1) {
950 qemu_set_fd_handler(evtchn_fd, cpu_handle_ioreq, NULL, state);
955 /* Initialise Xen */
957 static void xen_change_state_handler(void *opaque, int running,
958 RunState state)
960 if (running) {
961 /* record state running */
962 xenstore_record_dm_state(xenstore, "running");
966 static void xen_hvm_change_state_handler(void *opaque, int running,
967 RunState rstate)
969 XenIOState *xstate = opaque;
970 if (running) {
971 xen_main_loop_prepare(xstate);
975 static void xen_exit_notifier(Notifier *n, void *data)
977 XenIOState *state = container_of(n, XenIOState, exit);
979 xc_evtchn_close(state->xce_handle);
980 xs_daemon_close(state->xenstore);
983 int xen_init(void)
985 xen_xc = xen_xc_interface_open(0, 0, 0);
986 if (xen_xc == XC_HANDLER_INITIAL_VALUE) {
987 xen_be_printf(NULL, 0, "can't open xen interface\n");
988 return -1;
990 qemu_add_vm_change_state_handler(xen_change_state_handler, NULL);
992 return 0;
995 static void xen_read_physmap(XenIOState *state)
997 XenPhysmap *physmap = NULL;
998 unsigned int len, num, i;
999 char path[80], *value = NULL;
1000 char **entries = NULL;
1002 snprintf(path, sizeof(path),
1003 "/local/domain/0/device-model/%d/physmap", xen_domid);
1004 entries = xs_directory(state->xenstore, 0, path, &num);
1005 if (entries == NULL)
1006 return;
1008 for (i = 0; i < num; i++) {
1009 physmap = g_malloc(sizeof (XenPhysmap));
1010 physmap->phys_offset = strtoull(entries[i], NULL, 16);
1011 snprintf(path, sizeof(path),
1012 "/local/domain/0/device-model/%d/physmap/%s/start_addr",
1013 xen_domid, entries[i]);
1014 value = xs_read(state->xenstore, 0, path, &len);
1015 if (value == NULL) {
1016 free(physmap);
1017 continue;
1019 physmap->start_addr = strtoull(value, NULL, 16);
1020 free(value);
1022 snprintf(path, sizeof(path),
1023 "/local/domain/0/device-model/%d/physmap/%s/size",
1024 xen_domid, entries[i]);
1025 value = xs_read(state->xenstore, 0, path, &len);
1026 if (value == NULL) {
1027 free(physmap);
1028 continue;
1030 physmap->size = strtoull(value, NULL, 16);
1031 free(value);
1033 snprintf(path, sizeof(path),
1034 "/local/domain/0/device-model/%d/physmap/%s/name",
1035 xen_domid, entries[i]);
1036 physmap->name = xs_read(state->xenstore, 0, path, &len);
1038 QLIST_INSERT_HEAD(&state->physmap, physmap, list);
1040 free(entries);
1041 return;
1044 int xen_hvm_init(void)
1046 int i, rc;
1047 unsigned long ioreq_pfn;
1048 XenIOState *state;
1050 state = g_malloc0(sizeof (XenIOState));
1052 state->xce_handle = xen_xc_evtchn_open(NULL, 0);
1053 if (state->xce_handle == XC_HANDLER_INITIAL_VALUE) {
1054 perror("xen: event channel open");
1055 return -errno;
1058 state->xenstore = xs_daemon_open();
1059 if (state->xenstore == NULL) {
1060 perror("xen: xenstore open");
1061 return -errno;
1064 state->exit.notify = xen_exit_notifier;
1065 qemu_add_exit_notifier(&state->exit);
1067 state->suspend.notify = xen_suspend_notifier;
1068 qemu_register_suspend_notifier(&state->suspend);
1070 xc_get_hvm_param(xen_xc, xen_domid, HVM_PARAM_IOREQ_PFN, &ioreq_pfn);
1071 DPRINTF("shared page at pfn %lx\n", ioreq_pfn);
1072 state->shared_page = xc_map_foreign_range(xen_xc, xen_domid, XC_PAGE_SIZE,
1073 PROT_READ|PROT_WRITE, ioreq_pfn);
1074 if (state->shared_page == NULL) {
1075 hw_error("map shared IO page returned error %d handle=" XC_INTERFACE_FMT,
1076 errno, xen_xc);
1079 xc_get_hvm_param(xen_xc, xen_domid, HVM_PARAM_BUFIOREQ_PFN, &ioreq_pfn);
1080 DPRINTF("buffered io page at pfn %lx\n", ioreq_pfn);
1081 state->buffered_io_page = xc_map_foreign_range(xen_xc, xen_domid, XC_PAGE_SIZE,
1082 PROT_READ|PROT_WRITE, ioreq_pfn);
1083 if (state->buffered_io_page == NULL) {
1084 hw_error("map buffered IO page returned error %d", errno);
1087 state->ioreq_local_port = g_malloc0(smp_cpus * sizeof (evtchn_port_t));
1089 /* FIXME: how about if we overflow the page here? */
1090 for (i = 0; i < smp_cpus; i++) {
1091 rc = xc_evtchn_bind_interdomain(state->xce_handle, xen_domid,
1092 xen_vcpu_eport(state->shared_page, i));
1093 if (rc == -1) {
1094 fprintf(stderr, "bind interdomain ioctl error %d\n", errno);
1095 return -1;
1097 state->ioreq_local_port[i] = rc;
1100 /* Init RAM management */
1101 xen_map_cache_init(xen_phys_offset_to_gaddr, state);
1102 xen_ram_init(ram_size);
1104 qemu_add_vm_change_state_handler(xen_hvm_change_state_handler, state);
1106 state->memory_listener = xen_memory_listener;
1107 QLIST_INIT(&state->physmap);
1108 memory_listener_register(&state->memory_listener, get_system_memory());
1109 state->log_for_dirtybit = NULL;
1111 /* Initialize backend core & drivers */
1112 if (xen_be_init() != 0) {
1113 fprintf(stderr, "%s: xen backend core setup failed\n", __FUNCTION__);
1114 exit(1);
1116 xen_be_register("console", &xen_console_ops);
1117 xen_be_register("vkbd", &xen_kbdmouse_ops);
1118 xen_be_register("qdisk", &xen_blkdev_ops);
1119 xen_read_physmap(state);
1121 return 0;
1124 void destroy_hvm_domain(void)
1126 XenXC xc_handle;
1127 int sts;
1129 xc_handle = xen_xc_interface_open(0, 0, 0);
1130 if (xc_handle == XC_HANDLER_INITIAL_VALUE) {
1131 fprintf(stderr, "Cannot acquire xenctrl handle\n");
1132 } else {
1133 sts = xc_domain_shutdown(xc_handle, xen_domid, SHUTDOWN_poweroff);
1134 if (sts != 0) {
1135 fprintf(stderr, "? xc_domain_shutdown failed to issue poweroff, "
1136 "sts %d, %s\n", sts, strerror(errno));
1137 } else {
1138 fprintf(stderr, "Issued domain %d poweroff\n", xen_domid);
1140 xc_interface_close(xc_handle);
1144 void xen_register_framebuffer(MemoryRegion *mr)
1146 framebuffer = mr;