2 * Copyright (C) 2010 Citrix Ltd.
4 * This work is licensed under the terms of the GNU GPL, version 2. See
5 * the COPYING file in the top-level directory.
13 #include "hw/xen_common.h"
14 #include "hw/xen_backend.h"
16 #include "xen-mapcache.h"
19 #include <xen/hvm/ioreq.h>
20 #include <xen/hvm/params.h>
25 #define DPRINTF(fmt, ...) \
26 do { fprintf(stderr, "xen: " fmt, ## __VA_ARGS__); } while (0)
28 #define DPRINTF(fmt, ...) \
32 /* Compatibility with older version */
33 #if __XEN_LATEST_INTERFACE_VERSION__ < 0x0003020a
34 static inline uint32_t xen_vcpu_eport(shared_iopage_t
*shared_page
, int i
)
36 return shared_page
->vcpu_iodata
[i
].vp_eport
;
38 static inline ioreq_t
*xen_vcpu_ioreq(shared_iopage_t
*shared_page
, int vcpu
)
40 return &shared_page
->vcpu_iodata
[vcpu
].vp_ioreq
;
42 # define FMT_ioreq_size PRIx64
44 static inline uint32_t xen_vcpu_eport(shared_iopage_t
*shared_page
, int i
)
46 return shared_page
->vcpu_ioreq
[i
].vp_eport
;
48 static inline ioreq_t
*xen_vcpu_ioreq(shared_iopage_t
*shared_page
, int vcpu
)
50 return &shared_page
->vcpu_ioreq
[vcpu
];
52 # define FMT_ioreq_size "u"
55 #define BUFFER_IO_MAX_DELAY 100
57 typedef struct XenIOState
{
58 shared_iopage_t
*shared_page
;
59 buffered_iopage_t
*buffered_io_page
;
60 QEMUTimer
*buffered_io_timer
;
61 /* the evtchn port for polling the notification, */
62 evtchn_port_t
*ioreq_local_port
;
63 /* the evtchn fd for polling */
65 /* which vcpu we are serving */
68 struct xs_handle
*xenstore
;
73 /* Xen specific function for piix pci */
75 int xen_pci_slot_get_pirq(PCIDevice
*pci_dev
, int irq_num
)
77 return irq_num
+ ((pci_dev
->devfn
>> 3) << 2);
80 void xen_piix3_set_irq(void *opaque
, int irq_num
, int level
)
82 xc_hvm_set_pci_intx_level(xen_xc
, xen_domid
, 0, 0, irq_num
>> 2,
86 void xen_piix_pci_write_config_client(uint32_t address
, uint32_t val
, int len
)
90 /* Scan for updates to PCI link routes (0x60-0x63). */
91 for (i
= 0; i
< len
; i
++) {
92 uint8_t v
= (val
>> (8 * i
)) & 0xff;
97 if (((address
+ i
) >= 0x60) && ((address
+ i
) <= 0x63)) {
98 xc_hvm_set_pci_link_route(xen_xc
, xen_domid
, address
+ i
- 0x60, v
);
103 void xen_cmos_set_s3_resume(void *opaque
, int irq
, int level
)
105 pc_cmos_set_s3_resume(opaque
, irq
, level
);
107 xc_set_hvm_param(xen_xc
, xen_domid
, HVM_PARAM_ACPI_S_STATE
, 3);
111 /* Xen Interrupt Controller */
113 static void xen_set_irq(void *opaque
, int irq
, int level
)
115 xc_hvm_set_isa_irq_level(xen_xc
, xen_domid
, irq
, level
);
118 qemu_irq
*xen_interrupt_controller_init(void)
120 return qemu_allocate_irqs(xen_set_irq
, NULL
, 16);
125 static void xen_ram_init(ram_addr_t ram_size
)
128 ram_addr_t below_4g_mem_size
, above_4g_mem_size
= 0;
130 new_block
= qemu_mallocz(sizeof (*new_block
));
131 pstrcpy(new_block
->idstr
, sizeof (new_block
->idstr
), "xen.ram");
132 new_block
->host
= NULL
;
133 new_block
->offset
= 0;
134 new_block
->length
= ram_size
;
136 QLIST_INSERT_HEAD(&ram_list
.blocks
, new_block
, next
);
138 ram_list
.phys_dirty
= qemu_realloc(ram_list
.phys_dirty
,
139 new_block
->length
>> TARGET_PAGE_BITS
);
140 memset(ram_list
.phys_dirty
+ (new_block
->offset
>> TARGET_PAGE_BITS
),
141 0xff, new_block
->length
>> TARGET_PAGE_BITS
);
143 if (ram_size
>= 0xe0000000 ) {
144 above_4g_mem_size
= ram_size
- 0xe0000000;
145 below_4g_mem_size
= 0xe0000000;
147 below_4g_mem_size
= ram_size
;
150 cpu_register_physical_memory(0, below_4g_mem_size
, new_block
->offset
);
151 #if TARGET_PHYS_ADDR_BITS > 32
152 if (above_4g_mem_size
> 0) {
153 cpu_register_physical_memory(0x100000000ULL
, above_4g_mem_size
,
154 new_block
->offset
+ below_4g_mem_size
);
159 void xen_ram_alloc(ram_addr_t ram_addr
, ram_addr_t size
)
161 unsigned long nr_pfn
;
165 trace_xen_ram_alloc(ram_addr
, size
);
167 nr_pfn
= size
>> TARGET_PAGE_BITS
;
168 pfn_list
= qemu_malloc(sizeof (*pfn_list
) * nr_pfn
);
170 for (i
= 0; i
< nr_pfn
; i
++) {
171 pfn_list
[i
] = (ram_addr
>> TARGET_PAGE_BITS
) + i
;
174 if (xc_domain_populate_physmap_exact(xen_xc
, xen_domid
, nr_pfn
, 0, 0, pfn_list
)) {
175 hw_error("xen: failed to populate ram at %lx", ram_addr
);
182 /* VCPU Operations, MMIO, IO ring ... */
184 static void xen_reset_vcpu(void *opaque
)
186 CPUState
*env
= opaque
;
191 void xen_vcpu_init(void)
195 if ((first_cpu
= qemu_get_cpu(0))) {
196 qemu_register_reset(xen_reset_vcpu
, first_cpu
);
197 xen_reset_vcpu(first_cpu
);
201 /* get the ioreq packets from share mem */
202 static ioreq_t
*cpu_get_ioreq_from_shared_memory(XenIOState
*state
, int vcpu
)
204 ioreq_t
*req
= xen_vcpu_ioreq(state
->shared_page
, vcpu
);
206 if (req
->state
!= STATE_IOREQ_READY
) {
207 DPRINTF("I/O request not ready: "
208 "%x, ptr: %x, port: %"PRIx64
", "
209 "data: %"PRIx64
", count: %" FMT_ioreq_size
", size: %" FMT_ioreq_size
"\n",
210 req
->state
, req
->data_is_ptr
, req
->addr
,
211 req
->data
, req
->count
, req
->size
);
215 xen_rmb(); /* see IOREQ_READY /then/ read contents of ioreq */
217 req
->state
= STATE_IOREQ_INPROCESS
;
221 /* use poll to get the port notification */
222 /* ioreq_vec--out,the */
223 /* retval--the number of ioreq packet */
224 static ioreq_t
*cpu_get_ioreq(XenIOState
*state
)
229 port
= xc_evtchn_pending(state
->xce_handle
);
231 for (i
= 0; i
< smp_cpus
; i
++) {
232 if (state
->ioreq_local_port
[i
] == port
) {
238 hw_error("Fatal error while trying to get io event!\n");
241 /* unmask the wanted port again */
242 xc_evtchn_unmask(state
->xce_handle
, port
);
244 /* get the io packet from shared memory */
245 state
->send_vcpu
= i
;
246 return cpu_get_ioreq_from_shared_memory(state
, i
);
249 /* read error or read nothing */
253 static uint32_t do_inp(pio_addr_t addr
, unsigned long size
)
257 return cpu_inb(addr
);
259 return cpu_inw(addr
);
261 return cpu_inl(addr
);
263 hw_error("inp: bad size: %04"FMT_pioaddr
" %lx", addr
, size
);
267 static void do_outp(pio_addr_t addr
,
268 unsigned long size
, uint32_t val
)
272 return cpu_outb(addr
, val
);
274 return cpu_outw(addr
, val
);
276 return cpu_outl(addr
, val
);
278 hw_error("outp: bad size: %04"FMT_pioaddr
" %lx", addr
, size
);
282 static void cpu_ioreq_pio(ioreq_t
*req
)
286 sign
= req
->df
? -1 : 1;
288 if (req
->dir
== IOREQ_READ
) {
289 if (!req
->data_is_ptr
) {
290 req
->data
= do_inp(req
->addr
, req
->size
);
294 for (i
= 0; i
< req
->count
; i
++) {
295 tmp
= do_inp(req
->addr
, req
->size
);
296 cpu_physical_memory_write(req
->data
+ (sign
* i
* req
->size
),
297 (uint8_t *) &tmp
, req
->size
);
300 } else if (req
->dir
== IOREQ_WRITE
) {
301 if (!req
->data_is_ptr
) {
302 do_outp(req
->addr
, req
->size
, req
->data
);
304 for (i
= 0; i
< req
->count
; i
++) {
307 cpu_physical_memory_read(req
->data
+ (sign
* i
* req
->size
),
308 (uint8_t*) &tmp
, req
->size
);
309 do_outp(req
->addr
, req
->size
, tmp
);
315 static void cpu_ioreq_move(ioreq_t
*req
)
319 sign
= req
->df
? -1 : 1;
321 if (!req
->data_is_ptr
) {
322 if (req
->dir
== IOREQ_READ
) {
323 for (i
= 0; i
< req
->count
; i
++) {
324 cpu_physical_memory_read(req
->addr
+ (sign
* i
* req
->size
),
325 (uint8_t *) &req
->data
, req
->size
);
327 } else if (req
->dir
== IOREQ_WRITE
) {
328 for (i
= 0; i
< req
->count
; i
++) {
329 cpu_physical_memory_write(req
->addr
+ (sign
* i
* req
->size
),
330 (uint8_t *) &req
->data
, req
->size
);
336 if (req
->dir
== IOREQ_READ
) {
337 for (i
= 0; i
< req
->count
; i
++) {
338 cpu_physical_memory_read(req
->addr
+ (sign
* i
* req
->size
),
339 (uint8_t*) &tmp
, req
->size
);
340 cpu_physical_memory_write(req
->data
+ (sign
* i
* req
->size
),
341 (uint8_t*) &tmp
, req
->size
);
343 } else if (req
->dir
== IOREQ_WRITE
) {
344 for (i
= 0; i
< req
->count
; i
++) {
345 cpu_physical_memory_read(req
->data
+ (sign
* i
* req
->size
),
346 (uint8_t*) &tmp
, req
->size
);
347 cpu_physical_memory_write(req
->addr
+ (sign
* i
* req
->size
),
348 (uint8_t*) &tmp
, req
->size
);
354 static void handle_ioreq(ioreq_t
*req
)
356 if (!req
->data_is_ptr
&& (req
->dir
== IOREQ_WRITE
) &&
357 (req
->size
< sizeof (target_ulong
))) {
358 req
->data
&= ((target_ulong
) 1 << (8 * req
->size
)) - 1;
365 case IOREQ_TYPE_COPY
:
368 case IOREQ_TYPE_TIMEOFFSET
:
370 case IOREQ_TYPE_INVALIDATE
:
371 qemu_invalidate_map_cache();
374 hw_error("Invalid ioreq type 0x%x\n", req
->type
);
378 static void handle_buffered_iopage(XenIOState
*state
)
380 buf_ioreq_t
*buf_req
= NULL
;
384 if (!state
->buffered_io_page
) {
388 while (state
->buffered_io_page
->read_pointer
!= state
->buffered_io_page
->write_pointer
) {
389 buf_req
= &state
->buffered_io_page
->buf_ioreq
[
390 state
->buffered_io_page
->read_pointer
% IOREQ_BUFFER_SLOT_NUM
];
391 req
.size
= 1UL << buf_req
->size
;
393 req
.addr
= buf_req
->addr
;
394 req
.data
= buf_req
->data
;
395 req
.state
= STATE_IOREQ_READY
;
396 req
.dir
= buf_req
->dir
;
398 req
.type
= buf_req
->type
;
400 qw
= (req
.size
== 8);
402 buf_req
= &state
->buffered_io_page
->buf_ioreq
[
403 (state
->buffered_io_page
->read_pointer
+ 1) % IOREQ_BUFFER_SLOT_NUM
];
404 req
.data
|= ((uint64_t)buf_req
->data
) << 32;
410 state
->buffered_io_page
->read_pointer
+= qw
? 2 : 1;
414 static void handle_buffered_io(void *opaque
)
416 XenIOState
*state
= opaque
;
418 handle_buffered_iopage(state
);
419 qemu_mod_timer(state
->buffered_io_timer
,
420 BUFFER_IO_MAX_DELAY
+ qemu_get_clock_ms(rt_clock
));
423 static void cpu_handle_ioreq(void *opaque
)
425 XenIOState
*state
= opaque
;
426 ioreq_t
*req
= cpu_get_ioreq(state
);
428 handle_buffered_iopage(state
);
432 if (req
->state
!= STATE_IOREQ_INPROCESS
) {
433 fprintf(stderr
, "Badness in I/O request ... not in service?!: "
434 "%x, ptr: %x, port: %"PRIx64
", "
435 "data: %"PRIx64
", count: %" FMT_ioreq_size
", size: %" FMT_ioreq_size
"\n",
436 req
->state
, req
->data_is_ptr
, req
->addr
,
437 req
->data
, req
->count
, req
->size
);
438 destroy_hvm_domain();
442 xen_wmb(); /* Update ioreq contents /then/ update state. */
445 * We do this before we send the response so that the tools
446 * have the opportunity to pick up on the reset before the
447 * guest resumes and does a hlt with interrupts disabled which
448 * causes Xen to powerdown the domain.
451 if (qemu_shutdown_requested_get()) {
452 destroy_hvm_domain();
454 if (qemu_reset_requested_get()) {
459 req
->state
= STATE_IORESP_READY
;
460 xc_evtchn_notify(state
->xce_handle
, state
->ioreq_local_port
[state
->send_vcpu
]);
464 static void xenstore_record_dm_state(XenIOState
*s
, const char *state
)
468 snprintf(path
, sizeof (path
), "/local/domain/0/device-model/%u/state", xen_domid
);
469 if (!xs_write(s
->xenstore
, XBT_NULL
, path
, state
, strlen(state
))) {
470 fprintf(stderr
, "error recording dm state\n");
475 static void xen_main_loop_prepare(XenIOState
*state
)
479 if (state
->xce_handle
!= XC_HANDLER_INITIAL_VALUE
) {
480 evtchn_fd
= xc_evtchn_fd(state
->xce_handle
);
483 state
->buffered_io_timer
= qemu_new_timer_ms(rt_clock
, handle_buffered_io
,
485 qemu_mod_timer(state
->buffered_io_timer
, qemu_get_clock_ms(rt_clock
));
487 if (evtchn_fd
!= -1) {
488 qemu_set_fd_handler(evtchn_fd
, cpu_handle_ioreq
, NULL
, state
);
491 /* record state running */
492 xenstore_record_dm_state(state
, "running");
498 static void xen_vm_change_state_handler(void *opaque
, int running
, int reason
)
500 XenIOState
*state
= opaque
;
502 xen_main_loop_prepare(state
);
506 static void xen_exit_notifier(Notifier
*n
)
508 XenIOState
*state
= container_of(n
, XenIOState
, exit
);
510 xc_evtchn_close(state
->xce_handle
);
511 xs_daemon_close(state
->xenstore
);
516 xen_xc
= xen_xc_interface_open(0, 0, 0);
517 if (xen_xc
== XC_HANDLER_INITIAL_VALUE
) {
518 xen_be_printf(NULL
, 0, "can't open xen interface\n");
525 int xen_hvm_init(void)
528 unsigned long ioreq_pfn
;
531 state
= qemu_mallocz(sizeof (XenIOState
));
533 state
->xce_handle
= xen_xc_evtchn_open(NULL
, 0);
534 if (state
->xce_handle
== XC_HANDLER_INITIAL_VALUE
) {
535 perror("xen: event channel open");
539 state
->xenstore
= xs_daemon_open();
540 if (state
->xenstore
== NULL
) {
541 perror("xen: xenstore open");
545 state
->exit
.notify
= xen_exit_notifier
;
546 qemu_add_exit_notifier(&state
->exit
);
548 xc_get_hvm_param(xen_xc
, xen_domid
, HVM_PARAM_IOREQ_PFN
, &ioreq_pfn
);
549 DPRINTF("shared page at pfn %lx\n", ioreq_pfn
);
550 state
->shared_page
= xc_map_foreign_range(xen_xc
, xen_domid
, XC_PAGE_SIZE
,
551 PROT_READ
|PROT_WRITE
, ioreq_pfn
);
552 if (state
->shared_page
== NULL
) {
553 hw_error("map shared IO page returned error %d handle=" XC_INTERFACE_FMT
,
557 xc_get_hvm_param(xen_xc
, xen_domid
, HVM_PARAM_BUFIOREQ_PFN
, &ioreq_pfn
);
558 DPRINTF("buffered io page at pfn %lx\n", ioreq_pfn
);
559 state
->buffered_io_page
= xc_map_foreign_range(xen_xc
, xen_domid
, XC_PAGE_SIZE
,
560 PROT_READ
|PROT_WRITE
, ioreq_pfn
);
561 if (state
->buffered_io_page
== NULL
) {
562 hw_error("map buffered IO page returned error %d", errno
);
565 state
->ioreq_local_port
= qemu_mallocz(smp_cpus
* sizeof (evtchn_port_t
));
567 /* FIXME: how about if we overflow the page here? */
568 for (i
= 0; i
< smp_cpus
; i
++) {
569 rc
= xc_evtchn_bind_interdomain(state
->xce_handle
, xen_domid
,
570 xen_vcpu_eport(state
->shared_page
, i
));
572 fprintf(stderr
, "bind interdomain ioctl error %d\n", errno
);
575 state
->ioreq_local_port
[i
] = rc
;
578 /* Init RAM management */
579 qemu_map_cache_init();
580 xen_ram_init(ram_size
);
582 qemu_add_vm_change_state_handler(xen_vm_change_state_handler
, state
);
587 void destroy_hvm_domain(void)
592 xc_handle
= xen_xc_interface_open(0, 0, 0);
593 if (xc_handle
== XC_HANDLER_INITIAL_VALUE
) {
594 fprintf(stderr
, "Cannot acquire xenctrl handle\n");
596 sts
= xc_domain_shutdown(xc_handle
, xen_domid
, SHUTDOWN_poweroff
);
598 fprintf(stderr
, "? xc_domain_shutdown failed to issue poweroff, "
599 "sts %d, %s\n", sts
, strerror(errno
));
601 fprintf(stderr
, "Issued domain %d poweroff\n", xen_domid
);
603 xc_interface_close(xc_handle
);