spapr/drc: Clean up local variable shadowing in rtas_ibm_configure_connector()
[qemu/kevin.git] / hw / xen / xen-hvm-common.c
blob565dc39c8f622c05db37eea40f4391dc640d6ff6
1 #include "qemu/osdep.h"
2 #include "qemu/units.h"
3 #include "qapi/error.h"
4 #include "trace.h"
6 #include "hw/pci/pci_host.h"
7 #include "hw/xen/xen-hvm-common.h"
8 #include "hw/xen/xen-bus.h"
9 #include "hw/boards.h"
10 #include "hw/xen/arch_hvm.h"
12 MemoryRegion ram_memory;
14 void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size, MemoryRegion *mr,
15 Error **errp)
17 unsigned long nr_pfn;
18 xen_pfn_t *pfn_list;
19 int i;
21 if (runstate_check(RUN_STATE_INMIGRATE)) {
22 /* RAM already populated in Xen */
23 fprintf(stderr, "%s: do not alloc "RAM_ADDR_FMT
24 " bytes of ram at "RAM_ADDR_FMT" when runstate is INMIGRATE\n",
25 __func__, size, ram_addr);
26 return;
29 if (mr == &ram_memory) {
30 return;
33 trace_xen_ram_alloc(ram_addr, size);
35 nr_pfn = size >> TARGET_PAGE_BITS;
36 pfn_list = g_new(xen_pfn_t, nr_pfn);
38 for (i = 0; i < nr_pfn; i++) {
39 pfn_list[i] = (ram_addr >> TARGET_PAGE_BITS) + i;
42 if (xc_domain_populate_physmap_exact(xen_xc, xen_domid, nr_pfn, 0, 0, pfn_list)) {
43 error_setg(errp, "xen: failed to populate ram at " RAM_ADDR_FMT,
44 ram_addr);
47 g_free(pfn_list);
50 static void xen_set_memory(struct MemoryListener *listener,
51 MemoryRegionSection *section,
52 bool add)
54 XenIOState *state = container_of(listener, XenIOState, memory_listener);
56 if (section->mr == &ram_memory) {
57 return;
58 } else {
59 if (add) {
60 xen_map_memory_section(xen_domid, state->ioservid,
61 section);
62 } else {
63 xen_unmap_memory_section(xen_domid, state->ioservid,
64 section);
68 arch_xen_set_memory(state, section, add);
71 void xen_region_add(MemoryListener *listener,
72 MemoryRegionSection *section)
74 memory_region_ref(section->mr);
75 xen_set_memory(listener, section, true);
78 void xen_region_del(MemoryListener *listener,
79 MemoryRegionSection *section)
81 xen_set_memory(listener, section, false);
82 memory_region_unref(section->mr);
85 void xen_io_add(MemoryListener *listener,
86 MemoryRegionSection *section)
88 XenIOState *state = container_of(listener, XenIOState, io_listener);
89 MemoryRegion *mr = section->mr;
91 if (mr->ops == &unassigned_io_ops) {
92 return;
95 memory_region_ref(mr);
97 xen_map_io_section(xen_domid, state->ioservid, section);
100 void xen_io_del(MemoryListener *listener,
101 MemoryRegionSection *section)
103 XenIOState *state = container_of(listener, XenIOState, io_listener);
104 MemoryRegion *mr = section->mr;
106 if (mr->ops == &unassigned_io_ops) {
107 return;
110 xen_unmap_io_section(xen_domid, state->ioservid, section);
112 memory_region_unref(mr);
115 void xen_device_realize(DeviceListener *listener,
116 DeviceState *dev)
118 XenIOState *state = container_of(listener, XenIOState, device_listener);
120 if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) {
121 PCIDevice *pci_dev = PCI_DEVICE(dev);
122 XenPciDevice *xendev = g_new(XenPciDevice, 1);
124 xendev->pci_dev = pci_dev;
125 xendev->sbdf = PCI_BUILD_BDF(pci_dev_bus_num(pci_dev),
126 pci_dev->devfn);
127 QLIST_INSERT_HEAD(&state->dev_list, xendev, entry);
129 xen_map_pcidev(xen_domid, state->ioservid, pci_dev);
133 void xen_device_unrealize(DeviceListener *listener,
134 DeviceState *dev)
136 XenIOState *state = container_of(listener, XenIOState, device_listener);
138 if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) {
139 PCIDevice *pci_dev = PCI_DEVICE(dev);
140 XenPciDevice *xendev, *next;
142 xen_unmap_pcidev(xen_domid, state->ioservid, pci_dev);
144 QLIST_FOREACH_SAFE(xendev, &state->dev_list, entry, next) {
145 if (xendev->pci_dev == pci_dev) {
146 QLIST_REMOVE(xendev, entry);
147 g_free(xendev);
148 break;
154 MemoryListener xen_io_listener = {
155 .name = "xen-io",
156 .region_add = xen_io_add,
157 .region_del = xen_io_del,
158 .priority = MEMORY_LISTENER_PRIORITY_ACCEL,
161 DeviceListener xen_device_listener = {
162 .realize = xen_device_realize,
163 .unrealize = xen_device_unrealize,
166 /* get the ioreq packets from share mem */
167 static ioreq_t *cpu_get_ioreq_from_shared_memory(XenIOState *state, int vcpu)
169 ioreq_t *req = xen_vcpu_ioreq(state->shared_page, vcpu);
171 if (req->state != STATE_IOREQ_READY) {
172 DPRINTF("I/O request not ready: "
173 "%x, ptr: %x, port: %"PRIx64", "
174 "data: %"PRIx64", count: %u, size: %u\n",
175 req->state, req->data_is_ptr, req->addr,
176 req->data, req->count, req->size);
177 return NULL;
180 xen_rmb(); /* see IOREQ_READY /then/ read contents of ioreq */
182 req->state = STATE_IOREQ_INPROCESS;
183 return req;
186 /* use poll to get the port notification */
187 /* ioreq_vec--out,the */
188 /* retval--the number of ioreq packet */
189 static ioreq_t *cpu_get_ioreq(XenIOState *state)
191 MachineState *ms = MACHINE(qdev_get_machine());
192 unsigned int max_cpus = ms->smp.max_cpus;
193 int i;
194 evtchn_port_t port;
196 port = qemu_xen_evtchn_pending(state->xce_handle);
197 if (port == state->bufioreq_local_port) {
198 timer_mod(state->buffered_io_timer,
199 BUFFER_IO_MAX_DELAY + qemu_clock_get_ms(QEMU_CLOCK_REALTIME));
200 return NULL;
203 if (port != -1) {
204 for (i = 0; i < max_cpus; i++) {
205 if (state->ioreq_local_port[i] == port) {
206 break;
210 if (i == max_cpus) {
211 hw_error("Fatal error while trying to get io event!\n");
214 /* unmask the wanted port again */
215 qemu_xen_evtchn_unmask(state->xce_handle, port);
217 /* get the io packet from shared memory */
218 state->send_vcpu = i;
219 return cpu_get_ioreq_from_shared_memory(state, i);
222 /* read error or read nothing */
223 return NULL;
226 static uint32_t do_inp(uint32_t addr, unsigned long size)
228 switch (size) {
229 case 1:
230 return cpu_inb(addr);
231 case 2:
232 return cpu_inw(addr);
233 case 4:
234 return cpu_inl(addr);
235 default:
236 hw_error("inp: bad size: %04x %lx", addr, size);
240 static void do_outp(uint32_t addr,
241 unsigned long size, uint32_t val)
243 switch (size) {
244 case 1:
245 return cpu_outb(addr, val);
246 case 2:
247 return cpu_outw(addr, val);
248 case 4:
249 return cpu_outl(addr, val);
250 default:
251 hw_error("outp: bad size: %04x %lx", addr, size);
256 * Helper functions which read/write an object from/to physical guest
257 * memory, as part of the implementation of an ioreq.
259 * Equivalent to
260 * cpu_physical_memory_rw(addr + (req->df ? -1 : +1) * req->size * i,
261 * val, req->size, 0/1)
262 * except without the integer overflow problems.
264 static void rw_phys_req_item(hwaddr addr,
265 ioreq_t *req, uint32_t i, void *val, int rw)
267 /* Do everything unsigned so overflow just results in a truncated result
268 * and accesses to undesired parts of guest memory, which is up
269 * to the guest */
270 hwaddr offset = (hwaddr)req->size * i;
271 if (req->df) {
272 addr -= offset;
273 } else {
274 addr += offset;
276 cpu_physical_memory_rw(addr, val, req->size, rw);
279 static inline void read_phys_req_item(hwaddr addr,
280 ioreq_t *req, uint32_t i, void *val)
282 rw_phys_req_item(addr, req, i, val, 0);
284 static inline void write_phys_req_item(hwaddr addr,
285 ioreq_t *req, uint32_t i, void *val)
287 rw_phys_req_item(addr, req, i, val, 1);
291 void cpu_ioreq_pio(ioreq_t *req)
293 uint32_t i;
295 trace_cpu_ioreq_pio(req, req->dir, req->df, req->data_is_ptr, req->addr,
296 req->data, req->count, req->size);
298 if (req->size > sizeof(uint32_t)) {
299 hw_error("PIO: bad size (%u)", req->size);
302 if (req->dir == IOREQ_READ) {
303 if (!req->data_is_ptr) {
304 req->data = do_inp(req->addr, req->size);
305 trace_cpu_ioreq_pio_read_reg(req, req->data, req->addr,
306 req->size);
307 } else {
308 uint32_t tmp;
310 for (i = 0; i < req->count; i++) {
311 tmp = do_inp(req->addr, req->size);
312 write_phys_req_item(req->data, req, i, &tmp);
315 } else if (req->dir == IOREQ_WRITE) {
316 if (!req->data_is_ptr) {
317 trace_cpu_ioreq_pio_write_reg(req, req->data, req->addr,
318 req->size);
319 do_outp(req->addr, req->size, req->data);
320 } else {
321 for (i = 0; i < req->count; i++) {
322 uint32_t tmp = 0;
324 read_phys_req_item(req->data, req, i, &tmp);
325 do_outp(req->addr, req->size, tmp);
331 static void cpu_ioreq_move(ioreq_t *req)
333 uint32_t i;
335 trace_cpu_ioreq_move(req, req->dir, req->df, req->data_is_ptr, req->addr,
336 req->data, req->count, req->size);
338 if (req->size > sizeof(req->data)) {
339 hw_error("MMIO: bad size (%u)", req->size);
342 if (!req->data_is_ptr) {
343 if (req->dir == IOREQ_READ) {
344 for (i = 0; i < req->count; i++) {
345 read_phys_req_item(req->addr, req, i, &req->data);
347 } else if (req->dir == IOREQ_WRITE) {
348 for (i = 0; i < req->count; i++) {
349 write_phys_req_item(req->addr, req, i, &req->data);
352 } else {
353 uint64_t tmp;
355 if (req->dir == IOREQ_READ) {
356 for (i = 0; i < req->count; i++) {
357 read_phys_req_item(req->addr, req, i, &tmp);
358 write_phys_req_item(req->data, req, i, &tmp);
360 } else if (req->dir == IOREQ_WRITE) {
361 for (i = 0; i < req->count; i++) {
362 read_phys_req_item(req->data, req, i, &tmp);
363 write_phys_req_item(req->addr, req, i, &tmp);
369 static void cpu_ioreq_config(XenIOState *state, ioreq_t *req)
371 uint32_t sbdf = req->addr >> 32;
372 uint32_t reg = req->addr;
373 XenPciDevice *xendev;
375 if (req->size != sizeof(uint8_t) && req->size != sizeof(uint16_t) &&
376 req->size != sizeof(uint32_t)) {
377 hw_error("PCI config access: bad size (%u)", req->size);
380 if (req->count != 1) {
381 hw_error("PCI config access: bad count (%u)", req->count);
384 QLIST_FOREACH(xendev, &state->dev_list, entry) {
385 if (xendev->sbdf != sbdf) {
386 continue;
389 if (!req->data_is_ptr) {
390 if (req->dir == IOREQ_READ) {
391 req->data = pci_host_config_read_common(
392 xendev->pci_dev, reg, PCI_CONFIG_SPACE_SIZE,
393 req->size);
394 trace_cpu_ioreq_config_read(req, xendev->sbdf, reg,
395 req->size, req->data);
396 } else if (req->dir == IOREQ_WRITE) {
397 trace_cpu_ioreq_config_write(req, xendev->sbdf, reg,
398 req->size, req->data);
399 pci_host_config_write_common(
400 xendev->pci_dev, reg, PCI_CONFIG_SPACE_SIZE,
401 req->data, req->size);
403 } else {
404 uint32_t tmp;
406 if (req->dir == IOREQ_READ) {
407 tmp = pci_host_config_read_common(
408 xendev->pci_dev, reg, PCI_CONFIG_SPACE_SIZE,
409 req->size);
410 trace_cpu_ioreq_config_read(req, xendev->sbdf, reg,
411 req->size, tmp);
412 write_phys_req_item(req->data, req, 0, &tmp);
413 } else if (req->dir == IOREQ_WRITE) {
414 read_phys_req_item(req->data, req, 0, &tmp);
415 trace_cpu_ioreq_config_write(req, xendev->sbdf, reg,
416 req->size, tmp);
417 pci_host_config_write_common(
418 xendev->pci_dev, reg, PCI_CONFIG_SPACE_SIZE,
419 tmp, req->size);
425 static void handle_ioreq(XenIOState *state, ioreq_t *req)
427 trace_handle_ioreq(req, req->type, req->dir, req->df, req->data_is_ptr,
428 req->addr, req->data, req->count, req->size);
430 if (!req->data_is_ptr && (req->dir == IOREQ_WRITE) &&
431 (req->size < sizeof (target_ulong))) {
432 req->data &= ((target_ulong) 1 << (8 * req->size)) - 1;
435 if (req->dir == IOREQ_WRITE)
436 trace_handle_ioreq_write(req, req->type, req->df, req->data_is_ptr,
437 req->addr, req->data, req->count, req->size);
439 switch (req->type) {
440 case IOREQ_TYPE_PIO:
441 cpu_ioreq_pio(req);
442 break;
443 case IOREQ_TYPE_COPY:
444 cpu_ioreq_move(req);
445 break;
446 case IOREQ_TYPE_TIMEOFFSET:
447 break;
448 case IOREQ_TYPE_INVALIDATE:
449 xen_invalidate_map_cache();
450 break;
451 case IOREQ_TYPE_PCI_CONFIG:
452 cpu_ioreq_config(state, req);
453 break;
454 default:
455 arch_handle_ioreq(state, req);
457 if (req->dir == IOREQ_READ) {
458 trace_handle_ioreq_read(req, req->type, req->df, req->data_is_ptr,
459 req->addr, req->data, req->count, req->size);
463 static bool handle_buffered_iopage(XenIOState *state)
465 buffered_iopage_t *buf_page = state->buffered_io_page;
466 buf_ioreq_t *buf_req = NULL;
467 bool handled_ioreq = false;
468 ioreq_t req;
469 int qw;
471 if (!buf_page) {
472 return 0;
475 memset(&req, 0x00, sizeof(req));
476 req.state = STATE_IOREQ_READY;
477 req.count = 1;
478 req.dir = IOREQ_WRITE;
480 for (;;) {
481 uint32_t rdptr = buf_page->read_pointer, wrptr;
483 xen_rmb();
484 wrptr = buf_page->write_pointer;
485 xen_rmb();
486 if (rdptr != buf_page->read_pointer) {
487 continue;
489 if (rdptr == wrptr) {
490 break;
492 buf_req = &buf_page->buf_ioreq[rdptr % IOREQ_BUFFER_SLOT_NUM];
493 req.size = 1U << buf_req->size;
494 req.addr = buf_req->addr;
495 req.data = buf_req->data;
496 req.type = buf_req->type;
497 xen_rmb();
498 qw = (req.size == 8);
499 if (qw) {
500 if (rdptr + 1 == wrptr) {
501 hw_error("Incomplete quad word buffered ioreq");
503 buf_req = &buf_page->buf_ioreq[(rdptr + 1) %
504 IOREQ_BUFFER_SLOT_NUM];
505 req.data |= ((uint64_t)buf_req->data) << 32;
506 xen_rmb();
509 handle_ioreq(state, &req);
511 /* Only req.data may get updated by handle_ioreq(), albeit even that
512 * should not happen as such data would never make it to the guest (we
513 * can only usefully see writes here after all).
515 assert(req.state == STATE_IOREQ_READY);
516 assert(req.count == 1);
517 assert(req.dir == IOREQ_WRITE);
518 assert(!req.data_is_ptr);
520 qatomic_add(&buf_page->read_pointer, qw + 1);
521 handled_ioreq = true;
524 return handled_ioreq;
527 static void handle_buffered_io(void *opaque)
529 XenIOState *state = opaque;
531 if (handle_buffered_iopage(state)) {
532 timer_mod(state->buffered_io_timer,
533 BUFFER_IO_MAX_DELAY + qemu_clock_get_ms(QEMU_CLOCK_REALTIME));
534 } else {
535 timer_del(state->buffered_io_timer);
536 qemu_xen_evtchn_unmask(state->xce_handle, state->bufioreq_local_port);
540 static void cpu_handle_ioreq(void *opaque)
542 XenIOState *state = opaque;
543 ioreq_t *req = cpu_get_ioreq(state);
545 handle_buffered_iopage(state);
546 if (req) {
547 ioreq_t copy = *req;
549 xen_rmb();
550 handle_ioreq(state, &copy);
551 req->data = copy.data;
553 if (req->state != STATE_IOREQ_INPROCESS) {
554 fprintf(stderr, "Badness in I/O request ... not in service?!: "
555 "%x, ptr: %x, port: %"PRIx64", "
556 "data: %"PRIx64", count: %u, size: %u, type: %u\n",
557 req->state, req->data_is_ptr, req->addr,
558 req->data, req->count, req->size, req->type);
559 destroy_hvm_domain(false);
560 return;
563 xen_wmb(); /* Update ioreq contents /then/ update state. */
566 * We do this before we send the response so that the tools
567 * have the opportunity to pick up on the reset before the
568 * guest resumes and does a hlt with interrupts disabled which
569 * causes Xen to powerdown the domain.
571 if (runstate_is_running()) {
572 ShutdownCause request;
574 if (qemu_shutdown_requested_get()) {
575 destroy_hvm_domain(false);
577 request = qemu_reset_requested_get();
578 if (request) {
579 qemu_system_reset(request);
580 destroy_hvm_domain(true);
584 req->state = STATE_IORESP_READY;
585 qemu_xen_evtchn_notify(state->xce_handle,
586 state->ioreq_local_port[state->send_vcpu]);
590 static void xen_main_loop_prepare(XenIOState *state)
592 int evtchn_fd = -1;
594 if (state->xce_handle != NULL) {
595 evtchn_fd = qemu_xen_evtchn_fd(state->xce_handle);
598 state->buffered_io_timer = timer_new_ms(QEMU_CLOCK_REALTIME, handle_buffered_io,
599 state);
601 if (evtchn_fd != -1) {
602 CPUState *cpu_state;
604 DPRINTF("%s: Init cpu_by_vcpu_id\n", __func__);
605 CPU_FOREACH(cpu_state) {
606 DPRINTF("%s: cpu_by_vcpu_id[%d]=%p\n",
607 __func__, cpu_state->cpu_index, cpu_state);
608 state->cpu_by_vcpu_id[cpu_state->cpu_index] = cpu_state;
610 qemu_set_fd_handler(evtchn_fd, cpu_handle_ioreq, NULL, state);
615 void xen_hvm_change_state_handler(void *opaque, bool running,
616 RunState rstate)
618 XenIOState *state = opaque;
620 if (running) {
621 xen_main_loop_prepare(state);
624 xen_set_ioreq_server_state(xen_domid,
625 state->ioservid,
626 (rstate == RUN_STATE_RUNNING));
629 void xen_exit_notifier(Notifier *n, void *data)
631 XenIOState *state = container_of(n, XenIOState, exit);
633 xen_destroy_ioreq_server(xen_domid, state->ioservid);
634 if (state->fres != NULL) {
635 xenforeignmemory_unmap_resource(xen_fmem, state->fres);
638 qemu_xen_evtchn_close(state->xce_handle);
639 xs_daemon_close(state->xenstore);
642 static int xen_map_ioreq_server(XenIOState *state)
644 void *addr = NULL;
645 xen_pfn_t ioreq_pfn;
646 xen_pfn_t bufioreq_pfn;
647 evtchn_port_t bufioreq_evtchn;
648 int rc;
651 * Attempt to map using the resource API and fall back to normal
652 * foreign mapping if this is not supported.
654 QEMU_BUILD_BUG_ON(XENMEM_resource_ioreq_server_frame_bufioreq != 0);
655 QEMU_BUILD_BUG_ON(XENMEM_resource_ioreq_server_frame_ioreq(0) != 1);
656 state->fres = xenforeignmemory_map_resource(xen_fmem, xen_domid,
657 XENMEM_resource_ioreq_server,
658 state->ioservid, 0, 2,
659 &addr,
660 PROT_READ | PROT_WRITE, 0);
661 if (state->fres != NULL) {
662 trace_xen_map_resource_ioreq(state->ioservid, addr);
663 state->buffered_io_page = addr;
664 state->shared_page = addr + XC_PAGE_SIZE;
665 } else if (errno != EOPNOTSUPP) {
666 error_report("failed to map ioreq server resources: error %d handle=%p",
667 errno, xen_xc);
668 return -1;
671 rc = xen_get_ioreq_server_info(xen_domid, state->ioservid,
672 (state->shared_page == NULL) ?
673 &ioreq_pfn : NULL,
674 (state->buffered_io_page == NULL) ?
675 &bufioreq_pfn : NULL,
676 &bufioreq_evtchn);
677 if (rc < 0) {
678 error_report("failed to get ioreq server info: error %d handle=%p",
679 errno, xen_xc);
680 return rc;
683 if (state->shared_page == NULL) {
684 DPRINTF("shared page at pfn %lx\n", ioreq_pfn);
686 state->shared_page = xenforeignmemory_map(xen_fmem, xen_domid,
687 PROT_READ | PROT_WRITE,
688 1, &ioreq_pfn, NULL);
689 if (state->shared_page == NULL) {
690 error_report("map shared IO page returned error %d handle=%p",
691 errno, xen_xc);
695 if (state->buffered_io_page == NULL) {
696 DPRINTF("buffered io page at pfn %lx\n", bufioreq_pfn);
698 state->buffered_io_page = xenforeignmemory_map(xen_fmem, xen_domid,
699 PROT_READ | PROT_WRITE,
700 1, &bufioreq_pfn,
701 NULL);
702 if (state->buffered_io_page == NULL) {
703 error_report("map buffered IO page returned error %d", errno);
704 return -1;
708 if (state->shared_page == NULL || state->buffered_io_page == NULL) {
709 return -1;
712 DPRINTF("buffered io evtchn is %x\n", bufioreq_evtchn);
714 state->bufioreq_remote_port = bufioreq_evtchn;
716 return 0;
719 void destroy_hvm_domain(bool reboot)
721 xc_interface *xc_handle;
722 int sts;
723 int rc;
725 unsigned int reason = reboot ? SHUTDOWN_reboot : SHUTDOWN_poweroff;
727 if (xen_dmod) {
728 rc = xendevicemodel_shutdown(xen_dmod, xen_domid, reason);
729 if (!rc) {
730 return;
732 if (errno != ENOTTY /* old Xen */) {
733 error_report("xendevicemodel_shutdown failed with error %d", errno);
735 /* well, try the old thing then */
738 xc_handle = xc_interface_open(0, 0, 0);
739 if (xc_handle == NULL) {
740 fprintf(stderr, "Cannot acquire xenctrl handle\n");
741 } else {
742 sts = xc_domain_shutdown(xc_handle, xen_domid, reason);
743 if (sts != 0) {
744 fprintf(stderr, "xc_domain_shutdown failed to issue %s, "
745 "sts %d, %s\n", reboot ? "reboot" : "poweroff",
746 sts, strerror(errno));
747 } else {
748 fprintf(stderr, "Issued domain %d %s\n", xen_domid,
749 reboot ? "reboot" : "poweroff");
751 xc_interface_close(xc_handle);
755 void xen_shutdown_fatal_error(const char *fmt, ...)
757 va_list ap;
759 va_start(ap, fmt);
760 vfprintf(stderr, fmt, ap);
761 va_end(ap);
762 fprintf(stderr, "Will destroy the domain.\n");
763 /* destroy the domain */
764 qemu_system_shutdown_request(SHUTDOWN_CAUSE_HOST_ERROR);
767 static void xen_do_ioreq_register(XenIOState *state,
768 unsigned int max_cpus,
769 const MemoryListener *xen_memory_listener)
771 int i, rc;
773 state->exit.notify = xen_exit_notifier;
774 qemu_add_exit_notifier(&state->exit);
777 * Register wake-up support in QMP query-current-machine API
779 qemu_register_wakeup_support();
781 rc = xen_map_ioreq_server(state);
782 if (rc < 0) {
783 goto err;
786 /* Note: cpus is empty at this point in init */
787 state->cpu_by_vcpu_id = g_new0(CPUState *, max_cpus);
789 rc = xen_set_ioreq_server_state(xen_domid, state->ioservid, true);
790 if (rc < 0) {
791 error_report("failed to enable ioreq server info: error %d handle=%p",
792 errno, xen_xc);
793 goto err;
796 state->ioreq_local_port = g_new0(evtchn_port_t, max_cpus);
798 /* FIXME: how about if we overflow the page here? */
799 for (i = 0; i < max_cpus; i++) {
800 rc = qemu_xen_evtchn_bind_interdomain(state->xce_handle, xen_domid,
801 xen_vcpu_eport(state->shared_page,
802 i));
803 if (rc == -1) {
804 error_report("shared evtchn %d bind error %d", i, errno);
805 goto err;
807 state->ioreq_local_port[i] = rc;
810 rc = qemu_xen_evtchn_bind_interdomain(state->xce_handle, xen_domid,
811 state->bufioreq_remote_port);
812 if (rc == -1) {
813 error_report("buffered evtchn bind error %d", errno);
814 goto err;
816 state->bufioreq_local_port = rc;
818 /* Init RAM management */
819 #ifdef XEN_COMPAT_PHYSMAP
820 xen_map_cache_init(xen_phys_offset_to_gaddr, state);
821 #else
822 xen_map_cache_init(NULL, state);
823 #endif
825 qemu_add_vm_change_state_handler(xen_hvm_change_state_handler, state);
827 state->memory_listener = *xen_memory_listener;
828 memory_listener_register(&state->memory_listener, &address_space_memory);
830 state->io_listener = xen_io_listener;
831 memory_listener_register(&state->io_listener, &address_space_io);
833 state->device_listener = xen_device_listener;
834 QLIST_INIT(&state->dev_list);
835 device_listener_register(&state->device_listener);
837 return;
839 err:
840 error_report("xen hardware virtual machine initialisation failed");
841 exit(1);
844 void xen_register_ioreq(XenIOState *state, unsigned int max_cpus,
845 const MemoryListener *xen_memory_listener)
847 int rc;
849 setup_xen_backend_ops();
851 state->xce_handle = qemu_xen_evtchn_open();
852 if (state->xce_handle == NULL) {
853 error_report("xen: event channel open failed with error %d", errno);
854 goto err;
857 state->xenstore = xs_daemon_open();
858 if (state->xenstore == NULL) {
859 error_report("xen: xenstore open failed with error %d", errno);
860 goto err;
863 rc = xen_create_ioreq_server(xen_domid, &state->ioservid);
864 if (!rc) {
865 xen_do_ioreq_register(state, max_cpus, xen_memory_listener);
866 } else {
867 warn_report("xen: failed to create ioreq server");
870 xen_bus_init();
872 xen_be_init();
874 return;
876 err:
877 error_report("xen hardware virtual machine backend registration failed");
878 exit(1);