2 * Copyright (C) 2010 Citrix Ltd.
4 * This work is licensed under the terms of the GNU GPL, version 2. See
5 * the COPYING file in the top-level directory.
7 * Contributions after 2012-01-13 are licensed under the terms of the
8 * GNU GPL, version 2 or (at your option) any later version.
11 #include "qemu/osdep.h"
14 #include "hw/pci/pci.h"
15 #include "hw/i386/pc.h"
16 #include "hw/i386/apic-msidef.h"
17 #include "hw/xen/xen_common.h"
18 #include "hw/xen/xen_backend.h"
19 #include "qmp-commands.h"
21 #include "sysemu/char.h"
22 #include "qemu/error-report.h"
23 #include "qemu/range.h"
24 #include "sysemu/xen-mapcache.h"
26 #include "exec/address-spaces.h"
28 #include <xen/hvm/ioreq.h>
29 #include <xen/hvm/params.h>
30 #include <xen/hvm/e820.h>
32 //#define DEBUG_XEN_HVM
35 #define DPRINTF(fmt, ...) \
36 do { fprintf(stderr, "xen: " fmt, ## __VA_ARGS__); } while (0)
38 #define DPRINTF(fmt, ...) \
42 static MemoryRegion ram_memory
, ram_640k
, ram_lo
, ram_hi
;
43 static MemoryRegion
*framebuffer
;
44 static bool xen_in_migration
;
46 /* Compatibility with older version */
48 /* This allows QEMU to build on a system that has Xen 4.5 or earlier
49 * installed. This here (not in hw/xen/xen_common.h) because xen/hvm/ioreq.h
50 * needs to be included before this block and hw/xen/xen_common.h needs to
51 * be included before xen/hvm/ioreq.h
53 #ifndef IOREQ_TYPE_VMWARE_PORT
54 #define IOREQ_TYPE_VMWARE_PORT 3
62 typedef struct vmware_regs vmware_regs_t
;
64 struct shared_vmport_iopage
{
65 struct vmware_regs vcpu_vmport_regs
[1];
67 typedef struct shared_vmport_iopage shared_vmport_iopage_t
;
70 static inline uint32_t xen_vcpu_eport(shared_iopage_t
*shared_page
, int i
)
72 return shared_page
->vcpu_ioreq
[i
].vp_eport
;
74 static inline ioreq_t
*xen_vcpu_ioreq(shared_iopage_t
*shared_page
, int vcpu
)
76 return &shared_page
->vcpu_ioreq
[vcpu
];
79 #define BUFFER_IO_MAX_DELAY 100
81 typedef struct XenPhysmap
{
87 QLIST_ENTRY(XenPhysmap
) list
;
90 typedef struct XenIOState
{
92 shared_iopage_t
*shared_page
;
93 shared_vmport_iopage_t
*shared_vmport_page
;
94 buffered_iopage_t
*buffered_io_page
;
95 QEMUTimer
*buffered_io_timer
;
96 CPUState
**cpu_by_vcpu_id
;
97 /* the evtchn port for polling the notification, */
98 evtchn_port_t
*ioreq_local_port
;
99 /* evtchn local port for buffered io */
100 evtchn_port_t bufioreq_local_port
;
101 /* the evtchn fd for polling */
102 xenevtchn_handle
*xce_handle
;
103 /* which vcpu we are serving */
106 struct xs_handle
*xenstore
;
107 MemoryListener memory_listener
;
108 MemoryListener io_listener
;
109 DeviceListener device_listener
;
110 QLIST_HEAD(, XenPhysmap
) physmap
;
111 hwaddr free_phys_offset
;
112 const XenPhysmap
*log_for_dirtybit
;
119 /* Xen specific function for piix pci */
121 int xen_pci_slot_get_pirq(PCIDevice
*pci_dev
, int irq_num
)
123 return irq_num
+ ((pci_dev
->devfn
>> 3) << 2);
126 void xen_piix3_set_irq(void *opaque
, int irq_num
, int level
)
128 xc_hvm_set_pci_intx_level(xen_xc
, xen_domid
, 0, 0, irq_num
>> 2,
132 void xen_piix_pci_write_config_client(uint32_t address
, uint32_t val
, int len
)
136 /* Scan for updates to PCI link routes (0x60-0x63). */
137 for (i
= 0; i
< len
; i
++) {
138 uint8_t v
= (val
>> (8 * i
)) & 0xff;
143 if (((address
+ i
) >= 0x60) && ((address
+ i
) <= 0x63)) {
144 xc_hvm_set_pci_link_route(xen_xc
, xen_domid
, address
+ i
- 0x60, v
);
149 int xen_is_pirq_msi(uint32_t msi_data
)
151 /* If vector is 0, the msi is remapped into a pirq, passed as
154 return ((msi_data
& MSI_DATA_VECTOR_MASK
) >> MSI_DATA_VECTOR_SHIFT
) == 0;
157 void xen_hvm_inject_msi(uint64_t addr
, uint32_t data
)
159 xc_hvm_inject_msi(xen_xc
, xen_domid
, addr
, data
);
162 static void xen_suspend_notifier(Notifier
*notifier
, void *data
)
164 xc_set_hvm_param(xen_xc
, xen_domid
, HVM_PARAM_ACPI_S_STATE
, 3);
167 /* Xen Interrupt Controller */
169 static void xen_set_irq(void *opaque
, int irq
, int level
)
171 xc_hvm_set_isa_irq_level(xen_xc
, xen_domid
, irq
, level
);
174 qemu_irq
*xen_interrupt_controller_init(void)
176 return qemu_allocate_irqs(xen_set_irq
, NULL
, 16);
181 static void xen_ram_init(PCMachineState
*pcms
,
182 ram_addr_t ram_size
, MemoryRegion
**ram_memory_p
)
184 MemoryRegion
*sysmem
= get_system_memory();
185 ram_addr_t block_len
;
186 uint64_t user_lowmem
= object_property_get_int(qdev_get_machine(),
187 PC_MACHINE_MAX_RAM_BELOW_4G
,
190 /* Handle the machine opt max-ram-below-4g. It is basically doing
191 * min(xen limit, user limit).
193 if (HVM_BELOW_4G_RAM_END
<= user_lowmem
) {
194 user_lowmem
= HVM_BELOW_4G_RAM_END
;
197 if (ram_size
>= user_lowmem
) {
198 pcms
->above_4g_mem_size
= ram_size
- user_lowmem
;
199 pcms
->below_4g_mem_size
= user_lowmem
;
201 pcms
->above_4g_mem_size
= 0;
202 pcms
->below_4g_mem_size
= ram_size
;
204 if (!pcms
->above_4g_mem_size
) {
205 block_len
= ram_size
;
208 * Xen does not allocate the memory continuously, it keeps a
209 * hole of the size computed above or passed in.
211 block_len
= (1ULL << 32) + pcms
->above_4g_mem_size
;
213 memory_region_init_ram(&ram_memory
, NULL
, "xen.ram", block_len
,
215 *ram_memory_p
= &ram_memory
;
216 vmstate_register_ram_global(&ram_memory
);
218 memory_region_init_alias(&ram_640k
, NULL
, "xen.ram.640k",
219 &ram_memory
, 0, 0xa0000);
220 memory_region_add_subregion(sysmem
, 0, &ram_640k
);
221 /* Skip of the VGA IO memory space, it will be registered later by the VGA
224 * The area between 0xc0000 and 0x100000 will be used by SeaBIOS to load
225 * the Options ROM, so it is registered here as RAM.
227 memory_region_init_alias(&ram_lo
, NULL
, "xen.ram.lo",
228 &ram_memory
, 0xc0000,
229 pcms
->below_4g_mem_size
- 0xc0000);
230 memory_region_add_subregion(sysmem
, 0xc0000, &ram_lo
);
231 if (pcms
->above_4g_mem_size
> 0) {
232 memory_region_init_alias(&ram_hi
, NULL
, "xen.ram.hi",
233 &ram_memory
, 0x100000000ULL
,
234 pcms
->above_4g_mem_size
);
235 memory_region_add_subregion(sysmem
, 0x100000000ULL
, &ram_hi
);
239 void xen_ram_alloc(ram_addr_t ram_addr
, ram_addr_t size
, MemoryRegion
*mr
,
242 unsigned long nr_pfn
;
246 if (runstate_check(RUN_STATE_INMIGRATE
)) {
247 /* RAM already populated in Xen */
248 fprintf(stderr
, "%s: do not alloc "RAM_ADDR_FMT
249 " bytes of ram at "RAM_ADDR_FMT
" when runstate is INMIGRATE\n",
250 __func__
, size
, ram_addr
);
254 if (mr
== &ram_memory
) {
258 trace_xen_ram_alloc(ram_addr
, size
);
260 nr_pfn
= size
>> TARGET_PAGE_BITS
;
261 pfn_list
= g_malloc(sizeof (*pfn_list
) * nr_pfn
);
263 for (i
= 0; i
< nr_pfn
; i
++) {
264 pfn_list
[i
] = (ram_addr
>> TARGET_PAGE_BITS
) + i
;
267 if (xc_domain_populate_physmap_exact(xen_xc
, xen_domid
, nr_pfn
, 0, 0, pfn_list
)) {
268 error_setg(errp
, "xen: failed to populate ram at " RAM_ADDR_FMT
,
275 static XenPhysmap
*get_physmapping(XenIOState
*state
,
276 hwaddr start_addr
, ram_addr_t size
)
278 XenPhysmap
*physmap
= NULL
;
280 start_addr
&= TARGET_PAGE_MASK
;
282 QLIST_FOREACH(physmap
, &state
->physmap
, list
) {
283 if (range_covers_byte(physmap
->start_addr
, physmap
->size
, start_addr
)) {
290 static hwaddr
xen_phys_offset_to_gaddr(hwaddr start_addr
,
291 ram_addr_t size
, void *opaque
)
293 hwaddr addr
= start_addr
& TARGET_PAGE_MASK
;
294 XenIOState
*xen_io_state
= opaque
;
295 XenPhysmap
*physmap
= NULL
;
297 QLIST_FOREACH(physmap
, &xen_io_state
->physmap
, list
) {
298 if (range_covers_byte(physmap
->phys_offset
, physmap
->size
, addr
)) {
299 return physmap
->start_addr
;
306 static int xen_add_to_physmap(XenIOState
*state
,
310 hwaddr offset_within_region
)
314 XenPhysmap
*physmap
= NULL
;
315 hwaddr pfn
, start_gpfn
;
316 hwaddr phys_offset
= memory_region_get_ram_addr(mr
);
317 char path
[80], value
[17];
320 if (get_physmapping(state
, start_addr
, size
)) {
327 /* Xen can only handle a single dirty log region for now and we want
328 * the linear framebuffer to be that region.
329 * Avoid tracking any regions that is not videoram and avoid tracking
330 * the legacy vga region. */
331 if (mr
== framebuffer
&& start_addr
> 0xbffff) {
337 DPRINTF("mapping vram to %"HWADDR_PRIx
" - %"HWADDR_PRIx
"\n",
338 start_addr
, start_addr
+ size
);
340 pfn
= phys_offset
>> TARGET_PAGE_BITS
;
341 start_gpfn
= start_addr
>> TARGET_PAGE_BITS
;
342 for (i
= 0; i
< size
>> TARGET_PAGE_BITS
; i
++) {
343 unsigned long idx
= pfn
+ i
;
344 xen_pfn_t gpfn
= start_gpfn
+ i
;
346 rc
= xen_xc_domain_add_to_physmap(xen_xc
, xen_domid
, XENMAPSPACE_gmfn
, idx
, gpfn
);
348 DPRINTF("add_to_physmap MFN %"PRI_xen_pfn
" to PFN %"
349 PRI_xen_pfn
" failed: %d (errno: %d)\n", idx
, gpfn
, rc
, errno
);
354 mr_name
= memory_region_name(mr
);
356 physmap
= g_malloc(sizeof (XenPhysmap
));
358 physmap
->start_addr
= start_addr
;
359 physmap
->size
= size
;
360 physmap
->name
= mr_name
;
361 physmap
->phys_offset
= phys_offset
;
363 QLIST_INSERT_HEAD(&state
->physmap
, physmap
, list
);
365 xc_domain_pin_memory_cacheattr(xen_xc
, xen_domid
,
366 start_addr
>> TARGET_PAGE_BITS
,
367 (start_addr
+ size
- 1) >> TARGET_PAGE_BITS
,
368 XEN_DOMCTL_MEM_CACHEATTR_WB
);
370 snprintf(path
, sizeof(path
),
371 "/local/domain/0/device-model/%d/physmap/%"PRIx64
"/start_addr",
372 xen_domid
, (uint64_t)phys_offset
);
373 snprintf(value
, sizeof(value
), "%"PRIx64
, (uint64_t)start_addr
);
374 if (!xs_write(state
->xenstore
, 0, path
, value
, strlen(value
))) {
377 snprintf(path
, sizeof(path
),
378 "/local/domain/0/device-model/%d/physmap/%"PRIx64
"/size",
379 xen_domid
, (uint64_t)phys_offset
);
380 snprintf(value
, sizeof(value
), "%"PRIx64
, (uint64_t)size
);
381 if (!xs_write(state
->xenstore
, 0, path
, value
, strlen(value
))) {
385 snprintf(path
, sizeof(path
),
386 "/local/domain/0/device-model/%d/physmap/%"PRIx64
"/name",
387 xen_domid
, (uint64_t)phys_offset
);
388 if (!xs_write(state
->xenstore
, 0, path
, mr_name
, strlen(mr_name
))) {
396 static int xen_remove_from_physmap(XenIOState
*state
,
402 XenPhysmap
*physmap
= NULL
;
403 hwaddr phys_offset
= 0;
405 physmap
= get_physmapping(state
, start_addr
, size
);
406 if (physmap
== NULL
) {
410 phys_offset
= physmap
->phys_offset
;
411 size
= physmap
->size
;
413 DPRINTF("unmapping vram to %"HWADDR_PRIx
" - %"HWADDR_PRIx
", at "
414 "%"HWADDR_PRIx
"\n", start_addr
, start_addr
+ size
, phys_offset
);
416 size
>>= TARGET_PAGE_BITS
;
417 start_addr
>>= TARGET_PAGE_BITS
;
418 phys_offset
>>= TARGET_PAGE_BITS
;
419 for (i
= 0; i
< size
; i
++) {
420 xen_pfn_t idx
= start_addr
+ i
;
421 xen_pfn_t gpfn
= phys_offset
+ i
;
423 rc
= xen_xc_domain_add_to_physmap(xen_xc
, xen_domid
, XENMAPSPACE_gmfn
, idx
, gpfn
);
425 fprintf(stderr
, "add_to_physmap MFN %"PRI_xen_pfn
" to PFN %"
426 PRI_xen_pfn
" failed: %d (errno: %d)\n", idx
, gpfn
, rc
, errno
);
431 QLIST_REMOVE(physmap
, list
);
432 if (state
->log_for_dirtybit
== physmap
) {
433 state
->log_for_dirtybit
= NULL
;
440 static void xen_set_memory(struct MemoryListener
*listener
,
441 MemoryRegionSection
*section
,
444 XenIOState
*state
= container_of(listener
, XenIOState
, memory_listener
);
445 hwaddr start_addr
= section
->offset_within_address_space
;
446 ram_addr_t size
= int128_get64(section
->size
);
447 bool log_dirty
= memory_region_is_logging(section
->mr
, DIRTY_MEMORY_VGA
);
448 hvmmem_type_t mem_type
;
450 if (section
->mr
== &ram_memory
) {
454 xen_map_memory_section(xen_xc
, xen_domid
, state
->ioservid
,
457 xen_unmap_memory_section(xen_xc
, xen_domid
, state
->ioservid
,
462 if (!memory_region_is_ram(section
->mr
)) {
466 if (log_dirty
!= add
) {
470 trace_xen_client_set_memory(start_addr
, size
, log_dirty
);
472 start_addr
&= TARGET_PAGE_MASK
;
473 size
= TARGET_PAGE_ALIGN(size
);
476 if (!memory_region_is_rom(section
->mr
)) {
477 xen_add_to_physmap(state
, start_addr
, size
,
478 section
->mr
, section
->offset_within_region
);
480 mem_type
= HVMMEM_ram_ro
;
481 if (xc_hvm_set_mem_type(xen_xc
, xen_domid
, mem_type
,
482 start_addr
>> TARGET_PAGE_BITS
,
483 size
>> TARGET_PAGE_BITS
)) {
484 DPRINTF("xc_hvm_set_mem_type error, addr: "TARGET_FMT_plx
"\n",
489 if (xen_remove_from_physmap(state
, start_addr
, size
) < 0) {
490 DPRINTF("physmapping does not exist at "TARGET_FMT_plx
"\n", start_addr
);
495 static void xen_region_add(MemoryListener
*listener
,
496 MemoryRegionSection
*section
)
498 memory_region_ref(section
->mr
);
499 xen_set_memory(listener
, section
, true);
502 static void xen_region_del(MemoryListener
*listener
,
503 MemoryRegionSection
*section
)
505 xen_set_memory(listener
, section
, false);
506 memory_region_unref(section
->mr
);
509 static void xen_io_add(MemoryListener
*listener
,
510 MemoryRegionSection
*section
)
512 XenIOState
*state
= container_of(listener
, XenIOState
, io_listener
);
513 MemoryRegion
*mr
= section
->mr
;
515 if (mr
->ops
== &unassigned_io_ops
) {
519 memory_region_ref(mr
);
521 xen_map_io_section(xen_xc
, xen_domid
, state
->ioservid
, section
);
524 static void xen_io_del(MemoryListener
*listener
,
525 MemoryRegionSection
*section
)
527 XenIOState
*state
= container_of(listener
, XenIOState
, io_listener
);
528 MemoryRegion
*mr
= section
->mr
;
530 if (mr
->ops
== &unassigned_io_ops
) {
534 xen_unmap_io_section(xen_xc
, xen_domid
, state
->ioservid
, section
);
536 memory_region_unref(mr
);
539 static void xen_device_realize(DeviceListener
*listener
,
542 XenIOState
*state
= container_of(listener
, XenIOState
, device_listener
);
544 if (object_dynamic_cast(OBJECT(dev
), TYPE_PCI_DEVICE
)) {
545 PCIDevice
*pci_dev
= PCI_DEVICE(dev
);
547 xen_map_pcidev(xen_xc
, xen_domid
, state
->ioservid
, pci_dev
);
551 static void xen_device_unrealize(DeviceListener
*listener
,
554 XenIOState
*state
= container_of(listener
, XenIOState
, device_listener
);
556 if (object_dynamic_cast(OBJECT(dev
), TYPE_PCI_DEVICE
)) {
557 PCIDevice
*pci_dev
= PCI_DEVICE(dev
);
559 xen_unmap_pcidev(xen_xc
, xen_domid
, state
->ioservid
, pci_dev
);
563 static void xen_sync_dirty_bitmap(XenIOState
*state
,
567 hwaddr npages
= size
>> TARGET_PAGE_BITS
;
568 const int width
= sizeof(unsigned long) * 8;
569 unsigned long bitmap
[DIV_ROUND_UP(npages
, width
)];
571 const XenPhysmap
*physmap
= NULL
;
573 physmap
= get_physmapping(state
, start_addr
, size
);
574 if (physmap
== NULL
) {
579 if (state
->log_for_dirtybit
== NULL
) {
580 state
->log_for_dirtybit
= physmap
;
581 } else if (state
->log_for_dirtybit
!= physmap
) {
582 /* Only one range for dirty bitmap can be tracked. */
586 rc
= xc_hvm_track_dirty_vram(xen_xc
, xen_domid
,
587 start_addr
>> TARGET_PAGE_BITS
, npages
,
591 #define ENODATA ENOENT
593 if (errno
== ENODATA
) {
594 memory_region_set_dirty(framebuffer
, 0, size
);
595 DPRINTF("xen: track_dirty_vram failed (0x" TARGET_FMT_plx
596 ", 0x" TARGET_FMT_plx
"): %s\n",
597 start_addr
, start_addr
+ size
, strerror(errno
));
602 for (i
= 0; i
< ARRAY_SIZE(bitmap
); i
++) {
603 unsigned long map
= bitmap
[i
];
607 memory_region_set_dirty(framebuffer
,
608 (i
* width
+ j
) * TARGET_PAGE_SIZE
,
614 static void xen_log_start(MemoryListener
*listener
,
615 MemoryRegionSection
*section
,
618 XenIOState
*state
= container_of(listener
, XenIOState
, memory_listener
);
620 if (new & ~old
& (1 << DIRTY_MEMORY_VGA
)) {
621 xen_sync_dirty_bitmap(state
, section
->offset_within_address_space
,
622 int128_get64(section
->size
));
626 static void xen_log_stop(MemoryListener
*listener
, MemoryRegionSection
*section
,
629 XenIOState
*state
= container_of(listener
, XenIOState
, memory_listener
);
631 if (old
& ~new & (1 << DIRTY_MEMORY_VGA
)) {
632 state
->log_for_dirtybit
= NULL
;
633 /* Disable dirty bit tracking */
634 xc_hvm_track_dirty_vram(xen_xc
, xen_domid
, 0, 0, NULL
);
638 static void xen_log_sync(MemoryListener
*listener
, MemoryRegionSection
*section
)
640 XenIOState
*state
= container_of(listener
, XenIOState
, memory_listener
);
642 xen_sync_dirty_bitmap(state
, section
->offset_within_address_space
,
643 int128_get64(section
->size
));
646 static void xen_log_global_start(MemoryListener
*listener
)
649 xen_in_migration
= true;
653 static void xen_log_global_stop(MemoryListener
*listener
)
655 xen_in_migration
= false;
658 static MemoryListener xen_memory_listener
= {
659 .region_add
= xen_region_add
,
660 .region_del
= xen_region_del
,
661 .log_start
= xen_log_start
,
662 .log_stop
= xen_log_stop
,
663 .log_sync
= xen_log_sync
,
664 .log_global_start
= xen_log_global_start
,
665 .log_global_stop
= xen_log_global_stop
,
669 static MemoryListener xen_io_listener
= {
670 .region_add
= xen_io_add
,
671 .region_del
= xen_io_del
,
675 static DeviceListener xen_device_listener
= {
676 .realize
= xen_device_realize
,
677 .unrealize
= xen_device_unrealize
,
680 /* get the ioreq packets from share mem */
681 static ioreq_t
*cpu_get_ioreq_from_shared_memory(XenIOState
*state
, int vcpu
)
683 ioreq_t
*req
= xen_vcpu_ioreq(state
->shared_page
, vcpu
);
685 if (req
->state
!= STATE_IOREQ_READY
) {
686 DPRINTF("I/O request not ready: "
687 "%x, ptr: %x, port: %"PRIx64
", "
688 "data: %"PRIx64
", count: %u, size: %u\n",
689 req
->state
, req
->data_is_ptr
, req
->addr
,
690 req
->data
, req
->count
, req
->size
);
694 xen_rmb(); /* see IOREQ_READY /then/ read contents of ioreq */
696 req
->state
= STATE_IOREQ_INPROCESS
;
700 /* use poll to get the port notification */
701 /* ioreq_vec--out,the */
702 /* retval--the number of ioreq packet */
703 static ioreq_t
*cpu_get_ioreq(XenIOState
*state
)
708 port
= xenevtchn_pending(state
->xce_handle
);
709 if (port
== state
->bufioreq_local_port
) {
710 timer_mod(state
->buffered_io_timer
,
711 BUFFER_IO_MAX_DELAY
+ qemu_clock_get_ms(QEMU_CLOCK_REALTIME
));
716 for (i
= 0; i
< max_cpus
; i
++) {
717 if (state
->ioreq_local_port
[i
] == port
) {
723 hw_error("Fatal error while trying to get io event!\n");
726 /* unmask the wanted port again */
727 xenevtchn_unmask(state
->xce_handle
, port
);
729 /* get the io packet from shared memory */
730 state
->send_vcpu
= i
;
731 return cpu_get_ioreq_from_shared_memory(state
, i
);
734 /* read error or read nothing */
738 static uint32_t do_inp(uint32_t addr
, unsigned long size
)
742 return cpu_inb(addr
);
744 return cpu_inw(addr
);
746 return cpu_inl(addr
);
748 hw_error("inp: bad size: %04x %lx", addr
, size
);
752 static void do_outp(uint32_t addr
,
753 unsigned long size
, uint32_t val
)
757 return cpu_outb(addr
, val
);
759 return cpu_outw(addr
, val
);
761 return cpu_outl(addr
, val
);
763 hw_error("outp: bad size: %04x %lx", addr
, size
);
768 * Helper functions which read/write an object from/to physical guest
769 * memory, as part of the implementation of an ioreq.
772 * cpu_physical_memory_rw(addr + (req->df ? -1 : +1) * req->size * i,
773 * val, req->size, 0/1)
774 * except without the integer overflow problems.
776 static void rw_phys_req_item(hwaddr addr
,
777 ioreq_t
*req
, uint32_t i
, void *val
, int rw
)
779 /* Do everything unsigned so overflow just results in a truncated result
780 * and accesses to undesired parts of guest memory, which is up
782 hwaddr offset
= (hwaddr
)req
->size
* i
;
788 cpu_physical_memory_rw(addr
, val
, req
->size
, rw
);
791 static inline void read_phys_req_item(hwaddr addr
,
792 ioreq_t
*req
, uint32_t i
, void *val
)
794 rw_phys_req_item(addr
, req
, i
, val
, 0);
796 static inline void write_phys_req_item(hwaddr addr
,
797 ioreq_t
*req
, uint32_t i
, void *val
)
799 rw_phys_req_item(addr
, req
, i
, val
, 1);
803 static void cpu_ioreq_pio(ioreq_t
*req
)
807 trace_cpu_ioreq_pio(req
, req
->dir
, req
->df
, req
->data_is_ptr
, req
->addr
,
808 req
->data
, req
->count
, req
->size
);
810 if (req
->dir
== IOREQ_READ
) {
811 if (!req
->data_is_ptr
) {
812 req
->data
= do_inp(req
->addr
, req
->size
);
813 trace_cpu_ioreq_pio_read_reg(req
, req
->data
, req
->addr
,
818 for (i
= 0; i
< req
->count
; i
++) {
819 tmp
= do_inp(req
->addr
, req
->size
);
820 write_phys_req_item(req
->data
, req
, i
, &tmp
);
823 } else if (req
->dir
== IOREQ_WRITE
) {
824 if (!req
->data_is_ptr
) {
825 trace_cpu_ioreq_pio_write_reg(req
, req
->data
, req
->addr
,
827 do_outp(req
->addr
, req
->size
, req
->data
);
829 for (i
= 0; i
< req
->count
; i
++) {
832 read_phys_req_item(req
->data
, req
, i
, &tmp
);
833 do_outp(req
->addr
, req
->size
, tmp
);
839 static void cpu_ioreq_move(ioreq_t
*req
)
843 trace_cpu_ioreq_move(req
, req
->dir
, req
->df
, req
->data_is_ptr
, req
->addr
,
844 req
->data
, req
->count
, req
->size
);
846 if (!req
->data_is_ptr
) {
847 if (req
->dir
== IOREQ_READ
) {
848 for (i
= 0; i
< req
->count
; i
++) {
849 read_phys_req_item(req
->addr
, req
, i
, &req
->data
);
851 } else if (req
->dir
== IOREQ_WRITE
) {
852 for (i
= 0; i
< req
->count
; i
++) {
853 write_phys_req_item(req
->addr
, req
, i
, &req
->data
);
859 if (req
->dir
== IOREQ_READ
) {
860 for (i
= 0; i
< req
->count
; i
++) {
861 read_phys_req_item(req
->addr
, req
, i
, &tmp
);
862 write_phys_req_item(req
->data
, req
, i
, &tmp
);
864 } else if (req
->dir
== IOREQ_WRITE
) {
865 for (i
= 0; i
< req
->count
; i
++) {
866 read_phys_req_item(req
->data
, req
, i
, &tmp
);
867 write_phys_req_item(req
->addr
, req
, i
, &tmp
);
873 static void regs_to_cpu(vmware_regs_t
*vmport_regs
, ioreq_t
*req
)
878 cpu
= X86_CPU(current_cpu
);
880 env
->regs
[R_EAX
] = req
->data
;
881 env
->regs
[R_EBX
] = vmport_regs
->ebx
;
882 env
->regs
[R_ECX
] = vmport_regs
->ecx
;
883 env
->regs
[R_EDX
] = vmport_regs
->edx
;
884 env
->regs
[R_ESI
] = vmport_regs
->esi
;
885 env
->regs
[R_EDI
] = vmport_regs
->edi
;
888 static void regs_from_cpu(vmware_regs_t
*vmport_regs
)
890 X86CPU
*cpu
= X86_CPU(current_cpu
);
891 CPUX86State
*env
= &cpu
->env
;
893 vmport_regs
->ebx
= env
->regs
[R_EBX
];
894 vmport_regs
->ecx
= env
->regs
[R_ECX
];
895 vmport_regs
->edx
= env
->regs
[R_EDX
];
896 vmport_regs
->esi
= env
->regs
[R_ESI
];
897 vmport_regs
->edi
= env
->regs
[R_EDI
];
900 static void handle_vmport_ioreq(XenIOState
*state
, ioreq_t
*req
)
902 vmware_regs_t
*vmport_regs
;
904 assert(state
->shared_vmport_page
);
906 &state
->shared_vmport_page
->vcpu_vmport_regs
[state
->send_vcpu
];
907 QEMU_BUILD_BUG_ON(sizeof(*req
) < sizeof(*vmport_regs
));
909 current_cpu
= state
->cpu_by_vcpu_id
[state
->send_vcpu
];
910 regs_to_cpu(vmport_regs
, req
);
912 regs_from_cpu(vmport_regs
);
916 static void handle_ioreq(XenIOState
*state
, ioreq_t
*req
)
918 trace_handle_ioreq(req
, req
->type
, req
->dir
, req
->df
, req
->data_is_ptr
,
919 req
->addr
, req
->data
, req
->count
, req
->size
);
921 if (!req
->data_is_ptr
&& (req
->dir
== IOREQ_WRITE
) &&
922 (req
->size
< sizeof (target_ulong
))) {
923 req
->data
&= ((target_ulong
) 1 << (8 * req
->size
)) - 1;
926 if (req
->dir
== IOREQ_WRITE
)
927 trace_handle_ioreq_write(req
, req
->type
, req
->df
, req
->data_is_ptr
,
928 req
->addr
, req
->data
, req
->count
, req
->size
);
934 case IOREQ_TYPE_COPY
:
937 case IOREQ_TYPE_VMWARE_PORT
:
938 handle_vmport_ioreq(state
, req
);
940 case IOREQ_TYPE_TIMEOFFSET
:
942 case IOREQ_TYPE_INVALIDATE
:
943 xen_invalidate_map_cache();
945 case IOREQ_TYPE_PCI_CONFIG
: {
946 uint32_t sbdf
= req
->addr
>> 32;
949 /* Fake a write to port 0xCF8 so that
950 * the config space access will target the
951 * correct device model.
954 ((req
->addr
& 0x0f00) << 16) |
955 ((sbdf
& 0xffff) << 8) |
957 do_outp(0xcf8, 4, val
);
959 /* Now issue the config space access via
962 req
->addr
= 0xcfc | (req
->addr
& 0x03);
967 hw_error("Invalid ioreq type 0x%x\n", req
->type
);
969 if (req
->dir
== IOREQ_READ
) {
970 trace_handle_ioreq_read(req
, req
->type
, req
->df
, req
->data_is_ptr
,
971 req
->addr
, req
->data
, req
->count
, req
->size
);
975 static int handle_buffered_iopage(XenIOState
*state
)
977 buffered_iopage_t
*buf_page
= state
->buffered_io_page
;
978 buf_ioreq_t
*buf_req
= NULL
;
986 memset(&req
, 0x00, sizeof(req
));
989 uint32_t rdptr
= buf_page
->read_pointer
, wrptr
;
992 wrptr
= buf_page
->write_pointer
;
994 if (rdptr
!= buf_page
->read_pointer
) {
997 if (rdptr
== wrptr
) {
1000 buf_req
= &buf_page
->buf_ioreq
[rdptr
% IOREQ_BUFFER_SLOT_NUM
];
1001 req
.size
= 1UL << buf_req
->size
;
1003 req
.addr
= buf_req
->addr
;
1004 req
.data
= buf_req
->data
;
1005 req
.state
= STATE_IOREQ_READY
;
1006 req
.dir
= buf_req
->dir
;
1008 req
.type
= buf_req
->type
;
1009 req
.data_is_ptr
= 0;
1010 qw
= (req
.size
== 8);
1012 buf_req
= &buf_page
->buf_ioreq
[(rdptr
+ 1) %
1013 IOREQ_BUFFER_SLOT_NUM
];
1014 req
.data
|= ((uint64_t)buf_req
->data
) << 32;
1017 handle_ioreq(state
, &req
);
1019 atomic_add(&buf_page
->read_pointer
, qw
+ 1);
1025 static void handle_buffered_io(void *opaque
)
1027 XenIOState
*state
= opaque
;
1029 if (handle_buffered_iopage(state
)) {
1030 timer_mod(state
->buffered_io_timer
,
1031 BUFFER_IO_MAX_DELAY
+ qemu_clock_get_ms(QEMU_CLOCK_REALTIME
));
1033 timer_del(state
->buffered_io_timer
);
1034 xenevtchn_unmask(state
->xce_handle
, state
->bufioreq_local_port
);
1038 static void cpu_handle_ioreq(void *opaque
)
1040 XenIOState
*state
= opaque
;
1041 ioreq_t
*req
= cpu_get_ioreq(state
);
1043 handle_buffered_iopage(state
);
1045 handle_ioreq(state
, req
);
1047 if (req
->state
!= STATE_IOREQ_INPROCESS
) {
1048 fprintf(stderr
, "Badness in I/O request ... not in service?!: "
1049 "%x, ptr: %x, port: %"PRIx64
", "
1050 "data: %"PRIx64
", count: %u, size: %u, type: %u\n",
1051 req
->state
, req
->data_is_ptr
, req
->addr
,
1052 req
->data
, req
->count
, req
->size
, req
->type
);
1053 destroy_hvm_domain(false);
1057 xen_wmb(); /* Update ioreq contents /then/ update state. */
1060 * We do this before we send the response so that the tools
1061 * have the opportunity to pick up on the reset before the
1062 * guest resumes and does a hlt with interrupts disabled which
1063 * causes Xen to powerdown the domain.
1065 if (runstate_is_running()) {
1066 if (qemu_shutdown_requested_get()) {
1067 destroy_hvm_domain(false);
1069 if (qemu_reset_requested_get()) {
1070 qemu_system_reset(VMRESET_REPORT
);
1071 destroy_hvm_domain(true);
1075 req
->state
= STATE_IORESP_READY
;
1076 xenevtchn_notify(state
->xce_handle
,
1077 state
->ioreq_local_port
[state
->send_vcpu
]);
1081 static void xen_main_loop_prepare(XenIOState
*state
)
1085 if (state
->xce_handle
!= NULL
) {
1086 evtchn_fd
= xenevtchn_fd(state
->xce_handle
);
1089 state
->buffered_io_timer
= timer_new_ms(QEMU_CLOCK_REALTIME
, handle_buffered_io
,
1092 if (evtchn_fd
!= -1) {
1093 CPUState
*cpu_state
;
1095 DPRINTF("%s: Init cpu_by_vcpu_id\n", __func__
);
1096 CPU_FOREACH(cpu_state
) {
1097 DPRINTF("%s: cpu_by_vcpu_id[%d]=%p\n",
1098 __func__
, cpu_state
->cpu_index
, cpu_state
);
1099 state
->cpu_by_vcpu_id
[cpu_state
->cpu_index
] = cpu_state
;
1101 qemu_set_fd_handler(evtchn_fd
, cpu_handle_ioreq
, NULL
, state
);
1106 static void xen_hvm_change_state_handler(void *opaque
, int running
,
1109 XenIOState
*state
= opaque
;
1112 xen_main_loop_prepare(state
);
1115 xen_set_ioreq_server_state(xen_xc
, xen_domid
,
1117 (rstate
== RUN_STATE_RUNNING
));
1120 static void xen_exit_notifier(Notifier
*n
, void *data
)
1122 XenIOState
*state
= container_of(n
, XenIOState
, exit
);
1124 xenevtchn_close(state
->xce_handle
);
1125 xs_daemon_close(state
->xenstore
);
1128 static void xen_read_physmap(XenIOState
*state
)
1130 XenPhysmap
*physmap
= NULL
;
1131 unsigned int len
, num
, i
;
1132 char path
[80], *value
= NULL
;
1133 char **entries
= NULL
;
1135 snprintf(path
, sizeof(path
),
1136 "/local/domain/0/device-model/%d/physmap", xen_domid
);
1137 entries
= xs_directory(state
->xenstore
, 0, path
, &num
);
1138 if (entries
== NULL
)
1141 for (i
= 0; i
< num
; i
++) {
1142 physmap
= g_malloc(sizeof (XenPhysmap
));
1143 physmap
->phys_offset
= strtoull(entries
[i
], NULL
, 16);
1144 snprintf(path
, sizeof(path
),
1145 "/local/domain/0/device-model/%d/physmap/%s/start_addr",
1146 xen_domid
, entries
[i
]);
1147 value
= xs_read(state
->xenstore
, 0, path
, &len
);
1148 if (value
== NULL
) {
1152 physmap
->start_addr
= strtoull(value
, NULL
, 16);
1155 snprintf(path
, sizeof(path
),
1156 "/local/domain/0/device-model/%d/physmap/%s/size",
1157 xen_domid
, entries
[i
]);
1158 value
= xs_read(state
->xenstore
, 0, path
, &len
);
1159 if (value
== NULL
) {
1163 physmap
->size
= strtoull(value
, NULL
, 16);
1166 snprintf(path
, sizeof(path
),
1167 "/local/domain/0/device-model/%d/physmap/%s/name",
1168 xen_domid
, entries
[i
]);
1169 physmap
->name
= xs_read(state
->xenstore
, 0, path
, &len
);
1171 QLIST_INSERT_HEAD(&state
->physmap
, physmap
, list
);
1176 static void xen_wakeup_notifier(Notifier
*notifier
, void *data
)
1178 xc_set_hvm_param(xen_xc
, xen_domid
, HVM_PARAM_ACPI_S_STATE
, 0);
1181 void xen_hvm_init(PCMachineState
*pcms
, MemoryRegion
**ram_memory
)
1184 xen_pfn_t ioreq_pfn
;
1185 xen_pfn_t bufioreq_pfn
;
1186 evtchn_port_t bufioreq_evtchn
;
1189 state
= g_malloc0(sizeof (XenIOState
));
1191 state
->xce_handle
= xenevtchn_open(NULL
, 0);
1192 if (state
->xce_handle
== NULL
) {
1193 perror("xen: event channel open");
1197 state
->xenstore
= xs_daemon_open();
1198 if (state
->xenstore
== NULL
) {
1199 perror("xen: xenstore open");
1203 rc
= xen_create_ioreq_server(xen_xc
, xen_domid
, &state
->ioservid
);
1205 perror("xen: ioreq server create");
1209 state
->exit
.notify
= xen_exit_notifier
;
1210 qemu_add_exit_notifier(&state
->exit
);
1212 state
->suspend
.notify
= xen_suspend_notifier
;
1213 qemu_register_suspend_notifier(&state
->suspend
);
1215 state
->wakeup
.notify
= xen_wakeup_notifier
;
1216 qemu_register_wakeup_notifier(&state
->wakeup
);
1218 rc
= xen_get_ioreq_server_info(xen_xc
, xen_domid
, state
->ioservid
,
1219 &ioreq_pfn
, &bufioreq_pfn
,
1222 error_report("failed to get ioreq server info: error %d handle=%p",
1227 DPRINTF("shared page at pfn %lx\n", ioreq_pfn
);
1228 DPRINTF("buffered io page at pfn %lx\n", bufioreq_pfn
);
1229 DPRINTF("buffered io evtchn is %x\n", bufioreq_evtchn
);
1231 state
->shared_page
= xenforeignmemory_map(xen_fmem
, xen_domid
,
1232 PROT_READ
|PROT_WRITE
,
1233 1, &ioreq_pfn
, NULL
);
1234 if (state
->shared_page
== NULL
) {
1235 error_report("map shared IO page returned error %d handle=%p",
1240 rc
= xen_get_vmport_regs_pfn(xen_xc
, xen_domid
, &ioreq_pfn
);
1242 DPRINTF("shared vmport page at pfn %lx\n", ioreq_pfn
);
1243 state
->shared_vmport_page
=
1244 xenforeignmemory_map(xen_fmem
, xen_domid
, PROT_READ
|PROT_WRITE
,
1245 1, &ioreq_pfn
, NULL
);
1246 if (state
->shared_vmport_page
== NULL
) {
1247 error_report("map shared vmport IO page returned error %d handle=%p",
1251 } else if (rc
!= -ENOSYS
) {
1252 error_report("get vmport regs pfn returned error %d, rc=%d",
1257 state
->buffered_io_page
= xenforeignmemory_map(xen_fmem
, xen_domid
,
1258 PROT_READ
|PROT_WRITE
,
1259 1, &bufioreq_pfn
, NULL
);
1260 if (state
->buffered_io_page
== NULL
) {
1261 error_report("map buffered IO page returned error %d", errno
);
1265 /* Note: cpus is empty at this point in init */
1266 state
->cpu_by_vcpu_id
= g_malloc0(max_cpus
* sizeof(CPUState
*));
1268 rc
= xen_set_ioreq_server_state(xen_xc
, xen_domid
, state
->ioservid
, true);
1270 error_report("failed to enable ioreq server info: error %d handle=%p",
1275 state
->ioreq_local_port
= g_malloc0(max_cpus
* sizeof (evtchn_port_t
));
1277 /* FIXME: how about if we overflow the page here? */
1278 for (i
= 0; i
< max_cpus
; i
++) {
1279 rc
= xenevtchn_bind_interdomain(state
->xce_handle
, xen_domid
,
1280 xen_vcpu_eport(state
->shared_page
, i
));
1282 error_report("shared evtchn %d bind error %d", i
, errno
);
1285 state
->ioreq_local_port
[i
] = rc
;
1288 rc
= xenevtchn_bind_interdomain(state
->xce_handle
, xen_domid
,
1291 error_report("buffered evtchn bind error %d", errno
);
1294 state
->bufioreq_local_port
= rc
;
1296 /* Init RAM management */
1297 xen_map_cache_init(xen_phys_offset_to_gaddr
, state
);
1298 xen_ram_init(pcms
, ram_size
, ram_memory
);
1300 qemu_add_vm_change_state_handler(xen_hvm_change_state_handler
, state
);
1302 state
->memory_listener
= xen_memory_listener
;
1303 QLIST_INIT(&state
->physmap
);
1304 memory_listener_register(&state
->memory_listener
, &address_space_memory
);
1305 state
->log_for_dirtybit
= NULL
;
1307 state
->io_listener
= xen_io_listener
;
1308 memory_listener_register(&state
->io_listener
, &address_space_io
);
1310 state
->device_listener
= xen_device_listener
;
1311 device_listener_register(&state
->device_listener
);
1313 /* Initialize backend core & drivers */
1314 if (xen_be_init() != 0) {
1315 error_report("xen backend core setup failed");
1318 xen_be_register("console", &xen_console_ops
);
1319 xen_be_register("vkbd", &xen_kbdmouse_ops
);
1320 xen_be_register("qdisk", &xen_blkdev_ops
);
1321 xen_read_physmap(state
);
1325 error_report("xen hardware virtual machine initialisation failed");
1329 void destroy_hvm_domain(bool reboot
)
1331 xc_interface
*xc_handle
;
1334 xc_handle
= xc_interface_open(0, 0, 0);
1335 if (xc_handle
== NULL
) {
1336 fprintf(stderr
, "Cannot acquire xenctrl handle\n");
1338 sts
= xc_domain_shutdown(xc_handle
, xen_domid
,
1339 reboot
? SHUTDOWN_reboot
: SHUTDOWN_poweroff
);
1341 fprintf(stderr
, "xc_domain_shutdown failed to issue %s, "
1342 "sts %d, %s\n", reboot
? "reboot" : "poweroff",
1343 sts
, strerror(errno
));
1345 fprintf(stderr
, "Issued domain %d %s\n", xen_domid
,
1346 reboot
? "reboot" : "poweroff");
1348 xc_interface_close(xc_handle
);
1352 void xen_register_framebuffer(MemoryRegion
*mr
)
1357 void xen_shutdown_fatal_error(const char *fmt
, ...)
1362 vfprintf(stderr
, fmt
, ap
);
1364 fprintf(stderr
, "Will destroy the domain.\n");
1365 /* destroy the domain */
1366 qemu_system_shutdown_request();
1369 void xen_modified_memory(ram_addr_t start
, ram_addr_t length
)
1371 if (unlikely(xen_in_migration
)) {
1373 ram_addr_t start_pfn
, nb_pages
;
1376 length
= TARGET_PAGE_SIZE
;
1378 start_pfn
= start
>> TARGET_PAGE_BITS
;
1379 nb_pages
= ((start
+ length
+ TARGET_PAGE_SIZE
- 1) >> TARGET_PAGE_BITS
)
1381 rc
= xc_hvm_modified_memory(xen_xc
, xen_domid
, start_pfn
, nb_pages
);
1384 "%s failed for "RAM_ADDR_FMT
" ("RAM_ADDR_FMT
"): %i, %s\n",
1385 __func__
, start
, nb_pages
, rc
, strerror(-rc
));
1390 void qmp_xen_set_global_dirty_log(bool enable
, Error
**errp
)
1393 memory_global_dirty_log_start();
1395 memory_global_dirty_log_stop();