blockdev: Use blk_remove_bs() in do_drive_del()
[qemu/ar7.git] / hw / misc / ivshmem.c
blob48b7a34a8f0283e430e7fb82e0f328dc3225f241
1 /*
2 * Inter-VM Shared Memory PCI device.
4 * Author:
5 * Cam Macdonell <cam@cs.ualberta.ca>
7 * Based On: cirrus_vga.c
8 * Copyright (c) 2004 Fabrice Bellard
9 * Copyright (c) 2004 Makoto Suzuki (suzu)
11 * and rtl8139.c
12 * Copyright (c) 2006 Igor Kovalenko
14 * This code is licensed under the GNU GPL v2.
16 * Contributions after 2012-01-13 are licensed under the terms of the
17 * GNU GPL, version 2 or (at your option) any later version.
19 #include "qemu/osdep.h"
20 #include "hw/hw.h"
21 #include "hw/i386/pc.h"
22 #include "hw/pci/pci.h"
23 #include "hw/pci/msi.h"
24 #include "hw/pci/msix.h"
25 #include "sysemu/kvm.h"
26 #include "migration/migration.h"
27 #include "qemu/error-report.h"
28 #include "qemu/event_notifier.h"
29 #include "qemu/fifo8.h"
30 #include "sysemu/char.h"
31 #include "sysemu/hostmem.h"
32 #include "qapi/visitor.h"
33 #include "exec/ram_addr.h"
35 #include "hw/misc/ivshmem.h"
37 #include <sys/mman.h>
39 #define PCI_VENDOR_ID_IVSHMEM PCI_VENDOR_ID_REDHAT_QUMRANET
40 #define PCI_DEVICE_ID_IVSHMEM 0x1110
42 #define IVSHMEM_MAX_PEERS G_MAXUINT16
43 #define IVSHMEM_IOEVENTFD 0
44 #define IVSHMEM_MSI 1
46 #define IVSHMEM_PEER 0
47 #define IVSHMEM_MASTER 1
49 #define IVSHMEM_REG_BAR_SIZE 0x100
51 //#define DEBUG_IVSHMEM
52 #ifdef DEBUG_IVSHMEM
53 #define IVSHMEM_DPRINTF(fmt, ...) \
54 do {printf("IVSHMEM: " fmt, ## __VA_ARGS__); } while (0)
55 #else
56 #define IVSHMEM_DPRINTF(fmt, ...)
57 #endif
59 #define TYPE_IVSHMEM "ivshmem"
60 #define IVSHMEM(obj) \
61 OBJECT_CHECK(IVShmemState, (obj), TYPE_IVSHMEM)
63 typedef struct Peer {
64 int nb_eventfds;
65 EventNotifier *eventfds;
66 } Peer;
68 typedef struct MSIVector {
69 PCIDevice *pdev;
70 int virq;
71 } MSIVector;
73 typedef struct IVShmemState {
74 /*< private >*/
75 PCIDevice parent_obj;
76 /*< public >*/
78 HostMemoryBackend *hostmem;
79 uint32_t intrmask;
80 uint32_t intrstatus;
82 CharDriverState **eventfd_chr;
83 CharDriverState *server_chr;
84 Fifo8 incoming_fifo;
85 MemoryRegion ivshmem_mmio;
87 /* We might need to register the BAR before we actually have the memory.
88 * So prepare a container MemoryRegion for the BAR immediately and
89 * add a subregion when we have the memory.
91 MemoryRegion bar;
92 MemoryRegion ivshmem;
93 uint64_t ivshmem_size; /* size of shared memory region */
94 uint32_t ivshmem_64bit;
96 Peer *peers;
97 int nb_peers; /* how many peers we have space for */
99 int vm_id;
100 uint32_t vectors;
101 uint32_t features;
102 MSIVector *msi_vectors;
104 Error *migration_blocker;
106 char * shmobj;
107 char * sizearg;
108 char * role;
109 int role_val; /* scalar to avoid multiple string comparisons */
110 } IVShmemState;
112 /* registers for the Inter-VM shared memory device */
113 enum ivshmem_registers {
114 INTRMASK = 0,
115 INTRSTATUS = 4,
116 IVPOSITION = 8,
117 DOORBELL = 12,
120 static inline uint32_t ivshmem_has_feature(IVShmemState *ivs,
121 unsigned int feature) {
122 return (ivs->features & (1 << feature));
125 /* accessing registers - based on rtl8139 */
126 static void ivshmem_update_irq(IVShmemState *s)
128 PCIDevice *d = PCI_DEVICE(s);
129 int isr;
130 isr = (s->intrstatus & s->intrmask) & 0xffffffff;
132 /* don't print ISR resets */
133 if (isr) {
134 IVSHMEM_DPRINTF("Set IRQ to %d (%04x %04x)\n",
135 isr ? 1 : 0, s->intrstatus, s->intrmask);
138 pci_set_irq(d, (isr != 0));
141 static void ivshmem_IntrMask_write(IVShmemState *s, uint32_t val)
143 IVSHMEM_DPRINTF("IntrMask write(w) val = 0x%04x\n", val);
145 s->intrmask = val;
147 ivshmem_update_irq(s);
150 static uint32_t ivshmem_IntrMask_read(IVShmemState *s)
152 uint32_t ret = s->intrmask;
154 IVSHMEM_DPRINTF("intrmask read(w) val = 0x%04x\n", ret);
156 return ret;
159 static void ivshmem_IntrStatus_write(IVShmemState *s, uint32_t val)
161 IVSHMEM_DPRINTF("IntrStatus write(w) val = 0x%04x\n", val);
163 s->intrstatus = val;
165 ivshmem_update_irq(s);
168 static uint32_t ivshmem_IntrStatus_read(IVShmemState *s)
170 uint32_t ret = s->intrstatus;
172 /* reading ISR clears all interrupts */
173 s->intrstatus = 0;
175 ivshmem_update_irq(s);
177 return ret;
180 static void ivshmem_io_write(void *opaque, hwaddr addr,
181 uint64_t val, unsigned size)
183 IVShmemState *s = opaque;
185 uint16_t dest = val >> 16;
186 uint16_t vector = val & 0xff;
188 addr &= 0xfc;
190 IVSHMEM_DPRINTF("writing to addr " TARGET_FMT_plx "\n", addr);
191 switch (addr)
193 case INTRMASK:
194 ivshmem_IntrMask_write(s, val);
195 break;
197 case INTRSTATUS:
198 ivshmem_IntrStatus_write(s, val);
199 break;
201 case DOORBELL:
202 /* check that dest VM ID is reasonable */
203 if (dest >= s->nb_peers) {
204 IVSHMEM_DPRINTF("Invalid destination VM ID (%d)\n", dest);
205 break;
208 /* check doorbell range */
209 if (vector < s->peers[dest].nb_eventfds) {
210 IVSHMEM_DPRINTF("Notifying VM %d on vector %d\n", dest, vector);
211 event_notifier_set(&s->peers[dest].eventfds[vector]);
212 } else {
213 IVSHMEM_DPRINTF("Invalid destination vector %d on VM %d\n",
214 vector, dest);
216 break;
217 default:
218 IVSHMEM_DPRINTF("Unhandled write " TARGET_FMT_plx "\n", addr);
222 static uint64_t ivshmem_io_read(void *opaque, hwaddr addr,
223 unsigned size)
226 IVShmemState *s = opaque;
227 uint32_t ret;
229 switch (addr)
231 case INTRMASK:
232 ret = ivshmem_IntrMask_read(s);
233 break;
235 case INTRSTATUS:
236 ret = ivshmem_IntrStatus_read(s);
237 break;
239 case IVPOSITION:
240 /* return my VM ID if the memory is mapped */
241 if (memory_region_is_mapped(&s->ivshmem)) {
242 ret = s->vm_id;
243 } else {
244 ret = -1;
246 break;
248 default:
249 IVSHMEM_DPRINTF("why are we reading " TARGET_FMT_plx "\n", addr);
250 ret = 0;
253 return ret;
256 static const MemoryRegionOps ivshmem_mmio_ops = {
257 .read = ivshmem_io_read,
258 .write = ivshmem_io_write,
259 .endianness = DEVICE_NATIVE_ENDIAN,
260 .impl = {
261 .min_access_size = 4,
262 .max_access_size = 4,
266 static int ivshmem_can_receive(void * opaque)
268 return sizeof(int64_t);
271 static void ivshmem_event(void *opaque, int event)
273 IVSHMEM_DPRINTF("ivshmem_event %d\n", event);
276 static void ivshmem_vector_notify(void *opaque)
278 MSIVector *entry = opaque;
279 PCIDevice *pdev = entry->pdev;
280 IVShmemState *s = IVSHMEM(pdev);
281 int vector = entry - s->msi_vectors;
282 EventNotifier *n = &s->peers[s->vm_id].eventfds[vector];
284 if (!event_notifier_test_and_clear(n)) {
285 return;
288 IVSHMEM_DPRINTF("interrupt on vector %p %d\n", pdev, vector);
289 if (ivshmem_has_feature(s, IVSHMEM_MSI)) {
290 msix_notify(pdev, vector);
291 } else {
292 ivshmem_IntrStatus_write(s, 1);
296 static int ivshmem_vector_unmask(PCIDevice *dev, unsigned vector,
297 MSIMessage msg)
299 IVShmemState *s = IVSHMEM(dev);
300 EventNotifier *n = &s->peers[s->vm_id].eventfds[vector];
301 MSIVector *v = &s->msi_vectors[vector];
302 int ret;
304 IVSHMEM_DPRINTF("vector unmask %p %d\n", dev, vector);
306 ret = kvm_irqchip_update_msi_route(kvm_state, v->virq, msg, dev);
307 if (ret < 0) {
308 return ret;
311 return kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, n, NULL, v->virq);
314 static void ivshmem_vector_mask(PCIDevice *dev, unsigned vector)
316 IVShmemState *s = IVSHMEM(dev);
317 EventNotifier *n = &s->peers[s->vm_id].eventfds[vector];
318 int ret;
320 IVSHMEM_DPRINTF("vector mask %p %d\n", dev, vector);
322 ret = kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, n,
323 s->msi_vectors[vector].virq);
324 if (ret != 0) {
325 error_report("remove_irqfd_notifier_gsi failed");
329 static void ivshmem_vector_poll(PCIDevice *dev,
330 unsigned int vector_start,
331 unsigned int vector_end)
333 IVShmemState *s = IVSHMEM(dev);
334 unsigned int vector;
336 IVSHMEM_DPRINTF("vector poll %p %d-%d\n", dev, vector_start, vector_end);
338 vector_end = MIN(vector_end, s->vectors);
340 for (vector = vector_start; vector < vector_end; vector++) {
341 EventNotifier *notifier = &s->peers[s->vm_id].eventfds[vector];
343 if (!msix_is_masked(dev, vector)) {
344 continue;
347 if (event_notifier_test_and_clear(notifier)) {
348 msix_set_pending(dev, vector);
353 static void watch_vector_notifier(IVShmemState *s, EventNotifier *n,
354 int vector)
356 int eventfd = event_notifier_get_fd(n);
358 /* if MSI is supported we need multiple interrupts */
359 s->msi_vectors[vector].pdev = PCI_DEVICE(s);
361 qemu_set_fd_handler(eventfd, ivshmem_vector_notify,
362 NULL, &s->msi_vectors[vector]);
365 static int check_shm_size(IVShmemState *s, int fd, Error **errp)
367 /* check that the guest isn't going to try and map more memory than the
368 * the object has allocated return -1 to indicate error */
370 struct stat buf;
372 if (fstat(fd, &buf) < 0) {
373 error_setg(errp, "exiting: fstat on fd %d failed: %s",
374 fd, strerror(errno));
375 return -1;
378 if (s->ivshmem_size > buf.st_size) {
379 error_setg(errp, "Requested memory size greater"
380 " than shared object size (%" PRIu64 " > %" PRIu64")",
381 s->ivshmem_size, (uint64_t)buf.st_size);
382 return -1;
383 } else {
384 return 0;
388 /* create the shared memory BAR when we are not using the server, so we can
389 * create the BAR and map the memory immediately */
390 static int create_shared_memory_BAR(IVShmemState *s, int fd, uint8_t attr,
391 Error **errp)
393 void * ptr;
395 ptr = mmap(0, s->ivshmem_size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
396 if (ptr == MAP_FAILED) {
397 error_setg_errno(errp, errno, "Failed to mmap shared memory");
398 return -1;
401 memory_region_init_ram_ptr(&s->ivshmem, OBJECT(s), "ivshmem.bar2",
402 s->ivshmem_size, ptr);
403 qemu_set_ram_fd(s->ivshmem.ram_addr, fd);
404 vmstate_register_ram(&s->ivshmem, DEVICE(s));
405 memory_region_add_subregion(&s->bar, 0, &s->ivshmem);
407 /* region for shared memory */
408 pci_register_bar(PCI_DEVICE(s), 2, attr, &s->bar);
410 return 0;
413 static void ivshmem_add_eventfd(IVShmemState *s, int posn, int i)
415 memory_region_add_eventfd(&s->ivshmem_mmio,
416 DOORBELL,
418 true,
419 (posn << 16) | i,
420 &s->peers[posn].eventfds[i]);
423 static void ivshmem_del_eventfd(IVShmemState *s, int posn, int i)
425 memory_region_del_eventfd(&s->ivshmem_mmio,
426 DOORBELL,
428 true,
429 (posn << 16) | i,
430 &s->peers[posn].eventfds[i]);
433 static void close_peer_eventfds(IVShmemState *s, int posn)
435 int i, n;
437 if (!ivshmem_has_feature(s, IVSHMEM_IOEVENTFD)) {
438 return;
440 if (posn < 0 || posn >= s->nb_peers) {
441 error_report("invalid peer %d", posn);
442 return;
445 n = s->peers[posn].nb_eventfds;
447 memory_region_transaction_begin();
448 for (i = 0; i < n; i++) {
449 ivshmem_del_eventfd(s, posn, i);
451 memory_region_transaction_commit();
452 for (i = 0; i < n; i++) {
453 event_notifier_cleanup(&s->peers[posn].eventfds[i]);
456 g_free(s->peers[posn].eventfds);
457 s->peers[posn].nb_eventfds = 0;
460 /* this function increase the dynamic storage need to store data about other
461 * peers */
462 static int resize_peers(IVShmemState *s, int new_min_size)
465 int j, old_size;
467 /* limit number of max peers */
468 if (new_min_size <= 0 || new_min_size > IVSHMEM_MAX_PEERS) {
469 return -1;
471 if (new_min_size <= s->nb_peers) {
472 return 0;
475 old_size = s->nb_peers;
476 s->nb_peers = new_min_size;
478 IVSHMEM_DPRINTF("bumping storage to %d peers\n", s->nb_peers);
480 s->peers = g_realloc(s->peers, s->nb_peers * sizeof(Peer));
482 for (j = old_size; j < s->nb_peers; j++) {
483 s->peers[j].eventfds = g_new0(EventNotifier, s->vectors);
484 s->peers[j].nb_eventfds = 0;
487 return 0;
490 static bool fifo_update_and_get(IVShmemState *s, const uint8_t *buf, int size,
491 void *data, size_t len)
493 const uint8_t *p;
494 uint32_t num;
496 assert(len <= sizeof(int64_t)); /* limitation of the fifo */
497 if (fifo8_is_empty(&s->incoming_fifo) && size == len) {
498 memcpy(data, buf, size);
499 return true;
502 IVSHMEM_DPRINTF("short read of %d bytes\n", size);
504 num = MIN(size, sizeof(int64_t) - fifo8_num_used(&s->incoming_fifo));
505 fifo8_push_all(&s->incoming_fifo, buf, num);
507 if (fifo8_num_used(&s->incoming_fifo) < len) {
508 assert(num == 0);
509 return false;
512 size -= num;
513 buf += num;
514 p = fifo8_pop_buf(&s->incoming_fifo, len, &num);
515 assert(num == len);
517 memcpy(data, p, len);
519 if (size > 0) {
520 fifo8_push_all(&s->incoming_fifo, buf, size);
523 return true;
526 static bool fifo_update_and_get_i64(IVShmemState *s,
527 const uint8_t *buf, int size, int64_t *i64)
529 if (fifo_update_and_get(s, buf, size, i64, sizeof(*i64))) {
530 *i64 = GINT64_FROM_LE(*i64);
531 return true;
534 return false;
537 static int ivshmem_add_kvm_msi_virq(IVShmemState *s, int vector)
539 PCIDevice *pdev = PCI_DEVICE(s);
540 MSIMessage msg = msix_get_message(pdev, vector);
541 int ret;
543 IVSHMEM_DPRINTF("ivshmem_add_kvm_msi_virq vector:%d\n", vector);
545 if (s->msi_vectors[vector].pdev != NULL) {
546 return 0;
549 ret = kvm_irqchip_add_msi_route(kvm_state, msg, pdev);
550 if (ret < 0) {
551 error_report("ivshmem: kvm_irqchip_add_msi_route failed");
552 return -1;
555 s->msi_vectors[vector].virq = ret;
556 s->msi_vectors[vector].pdev = pdev;
558 return 0;
561 static void setup_interrupt(IVShmemState *s, int vector)
563 EventNotifier *n = &s->peers[s->vm_id].eventfds[vector];
564 bool with_irqfd = kvm_msi_via_irqfd_enabled() &&
565 ivshmem_has_feature(s, IVSHMEM_MSI);
566 PCIDevice *pdev = PCI_DEVICE(s);
568 IVSHMEM_DPRINTF("setting up interrupt for vector: %d\n", vector);
570 if (!with_irqfd) {
571 IVSHMEM_DPRINTF("with eventfd");
572 watch_vector_notifier(s, n, vector);
573 } else if (msix_enabled(pdev)) {
574 IVSHMEM_DPRINTF("with irqfd");
575 if (ivshmem_add_kvm_msi_virq(s, vector) < 0) {
576 return;
579 if (!msix_is_masked(pdev, vector)) {
580 kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, n, NULL,
581 s->msi_vectors[vector].virq);
583 } else {
584 /* it will be delayed until msix is enabled, in write_config */
585 IVSHMEM_DPRINTF("with irqfd, delayed until msix enabled");
589 static void ivshmem_read(void *opaque, const uint8_t *buf, int size)
591 IVShmemState *s = opaque;
592 int incoming_fd;
593 int new_eventfd;
594 int64_t incoming_posn;
595 Error *err = NULL;
596 Peer *peer;
598 if (!fifo_update_and_get_i64(s, buf, size, &incoming_posn)) {
599 return;
602 if (incoming_posn < -1) {
603 IVSHMEM_DPRINTF("invalid incoming_posn %" PRId64 "\n", incoming_posn);
604 return;
607 /* pick off s->server_chr->msgfd and store it, posn should accompany msg */
608 incoming_fd = qemu_chr_fe_get_msgfd(s->server_chr);
609 IVSHMEM_DPRINTF("posn is %" PRId64 ", fd is %d\n",
610 incoming_posn, incoming_fd);
612 /* make sure we have enough space for this peer */
613 if (incoming_posn >= s->nb_peers) {
614 if (resize_peers(s, incoming_posn + 1) < 0) {
615 error_report("failed to resize peers array");
616 if (incoming_fd != -1) {
617 close(incoming_fd);
619 return;
623 peer = &s->peers[incoming_posn];
625 if (incoming_fd == -1) {
626 /* if posn is positive and unseen before then this is our posn*/
627 if (incoming_posn >= 0 && s->vm_id == -1) {
628 /* receive our posn */
629 s->vm_id = incoming_posn;
630 } else {
631 /* otherwise an fd == -1 means an existing peer has gone away */
632 IVSHMEM_DPRINTF("posn %" PRId64 " has gone away\n", incoming_posn);
633 close_peer_eventfds(s, incoming_posn);
635 return;
638 /* if the position is -1, then it's shared memory region fd */
639 if (incoming_posn == -1) {
640 void * map_ptr;
642 if (memory_region_is_mapped(&s->ivshmem)) {
643 error_report("shm already initialized");
644 close(incoming_fd);
645 return;
648 if (check_shm_size(s, incoming_fd, &err) == -1) {
649 error_report_err(err);
650 close(incoming_fd);
651 return;
654 /* mmap the region and map into the BAR2 */
655 map_ptr = mmap(0, s->ivshmem_size, PROT_READ|PROT_WRITE, MAP_SHARED,
656 incoming_fd, 0);
657 if (map_ptr == MAP_FAILED) {
658 error_report("Failed to mmap shared memory %s", strerror(errno));
659 close(incoming_fd);
660 return;
662 memory_region_init_ram_ptr(&s->ivshmem, OBJECT(s),
663 "ivshmem.bar2", s->ivshmem_size, map_ptr);
664 qemu_set_ram_fd(s->ivshmem.ram_addr, incoming_fd);
665 vmstate_register_ram(&s->ivshmem, DEVICE(s));
667 IVSHMEM_DPRINTF("guest h/w addr = %p, size = %" PRIu64 "\n",
668 map_ptr, s->ivshmem_size);
670 memory_region_add_subregion(&s->bar, 0, &s->ivshmem);
672 return;
675 /* each peer has an associated array of eventfds, and we keep
676 * track of how many eventfds received so far */
677 /* get a new eventfd: */
678 if (peer->nb_eventfds >= s->vectors) {
679 error_report("Too many eventfd received, device has %d vectors",
680 s->vectors);
681 close(incoming_fd);
682 return;
685 new_eventfd = peer->nb_eventfds++;
687 /* this is an eventfd for a particular peer VM */
688 IVSHMEM_DPRINTF("eventfds[%" PRId64 "][%d] = %d\n", incoming_posn,
689 new_eventfd, incoming_fd);
690 event_notifier_init_fd(&peer->eventfds[new_eventfd], incoming_fd);
691 fcntl_setfl(incoming_fd, O_NONBLOCK); /* msix/irqfd poll non block */
693 if (incoming_posn == s->vm_id) {
694 setup_interrupt(s, new_eventfd);
697 if (ivshmem_has_feature(s, IVSHMEM_IOEVENTFD)) {
698 ivshmem_add_eventfd(s, incoming_posn, new_eventfd);
702 static void ivshmem_check_version(void *opaque, const uint8_t * buf, int size)
704 IVShmemState *s = opaque;
705 int tmp;
706 int64_t version;
708 if (!fifo_update_and_get_i64(s, buf, size, &version)) {
709 return;
712 tmp = qemu_chr_fe_get_msgfd(s->server_chr);
713 if (tmp != -1 || version != IVSHMEM_PROTOCOL_VERSION) {
714 fprintf(stderr, "incompatible version, you are connecting to a ivshmem-"
715 "server using a different protocol please check your setup\n");
716 qemu_chr_delete(s->server_chr);
717 s->server_chr = NULL;
718 return;
721 IVSHMEM_DPRINTF("version check ok, switch to real chardev handler\n");
722 qemu_chr_add_handlers(s->server_chr, ivshmem_can_receive, ivshmem_read,
723 ivshmem_event, s);
726 /* Select the MSI-X vectors used by device.
727 * ivshmem maps events to vectors statically, so
728 * we just enable all vectors on init and after reset. */
729 static void ivshmem_use_msix(IVShmemState * s)
731 PCIDevice *d = PCI_DEVICE(s);
732 int i;
734 IVSHMEM_DPRINTF("%s, msix present: %d\n", __func__, msix_present(d));
735 if (!msix_present(d)) {
736 return;
739 for (i = 0; i < s->vectors; i++) {
740 msix_vector_use(d, i);
744 static void ivshmem_reset(DeviceState *d)
746 IVShmemState *s = IVSHMEM(d);
748 s->intrstatus = 0;
749 s->intrmask = 0;
750 ivshmem_use_msix(s);
753 static int ivshmem_setup_interrupts(IVShmemState *s)
755 /* allocate QEMU callback data for receiving interrupts */
756 s->msi_vectors = g_malloc0(s->vectors * sizeof(MSIVector));
758 if (ivshmem_has_feature(s, IVSHMEM_MSI)) {
759 if (msix_init_exclusive_bar(PCI_DEVICE(s), s->vectors, 1)) {
760 return -1;
763 IVSHMEM_DPRINTF("msix initialized (%d vectors)\n", s->vectors);
764 ivshmem_use_msix(s);
767 return 0;
770 static void ivshmem_enable_irqfd(IVShmemState *s)
772 PCIDevice *pdev = PCI_DEVICE(s);
773 int i;
775 for (i = 0; i < s->peers[s->vm_id].nb_eventfds; i++) {
776 ivshmem_add_kvm_msi_virq(s, i);
779 if (msix_set_vector_notifiers(pdev,
780 ivshmem_vector_unmask,
781 ivshmem_vector_mask,
782 ivshmem_vector_poll)) {
783 error_report("ivshmem: msix_set_vector_notifiers failed");
787 static void ivshmem_remove_kvm_msi_virq(IVShmemState *s, int vector)
789 IVSHMEM_DPRINTF("ivshmem_remove_kvm_msi_virq vector:%d\n", vector);
791 if (s->msi_vectors[vector].pdev == NULL) {
792 return;
795 /* it was cleaned when masked in the frontend. */
796 kvm_irqchip_release_virq(kvm_state, s->msi_vectors[vector].virq);
798 s->msi_vectors[vector].pdev = NULL;
801 static void ivshmem_disable_irqfd(IVShmemState *s)
803 PCIDevice *pdev = PCI_DEVICE(s);
804 int i;
806 for (i = 0; i < s->peers[s->vm_id].nb_eventfds; i++) {
807 ivshmem_remove_kvm_msi_virq(s, i);
810 msix_unset_vector_notifiers(pdev);
813 static void ivshmem_write_config(PCIDevice *pdev, uint32_t address,
814 uint32_t val, int len)
816 IVShmemState *s = IVSHMEM(pdev);
817 int is_enabled, was_enabled = msix_enabled(pdev);
819 pci_default_write_config(pdev, address, val, len);
820 is_enabled = msix_enabled(pdev);
822 if (kvm_msi_via_irqfd_enabled() && s->vm_id != -1) {
823 if (!was_enabled && is_enabled) {
824 ivshmem_enable_irqfd(s);
825 } else if (was_enabled && !is_enabled) {
826 ivshmem_disable_irqfd(s);
831 static void pci_ivshmem_realize(PCIDevice *dev, Error **errp)
833 IVShmemState *s = IVSHMEM(dev);
834 uint8_t *pci_conf;
835 uint8_t attr = PCI_BASE_ADDRESS_SPACE_MEMORY |
836 PCI_BASE_ADDRESS_MEM_PREFETCH;
838 if (!!s->server_chr + !!s->shmobj + !!s->hostmem != 1) {
839 error_setg(errp,
840 "You must specify either 'shm', 'chardev' or 'x-memdev'");
841 return;
844 if (s->hostmem) {
845 MemoryRegion *mr;
847 if (s->sizearg) {
848 g_warning("size argument ignored with hostmem");
851 mr = host_memory_backend_get_memory(s->hostmem, errp);
852 s->ivshmem_size = memory_region_size(mr);
853 } else if (s->sizearg == NULL) {
854 s->ivshmem_size = 4 << 20; /* 4 MB default */
855 } else {
856 char *end;
857 int64_t size = qemu_strtosz(s->sizearg, &end);
858 if (size < 0 || *end != '\0' || !is_power_of_2(size)) {
859 error_setg(errp, "Invalid size %s", s->sizearg);
860 return;
862 s->ivshmem_size = size;
865 fifo8_create(&s->incoming_fifo, sizeof(int64_t));
867 /* IRQFD requires MSI */
868 if (ivshmem_has_feature(s, IVSHMEM_IOEVENTFD) &&
869 !ivshmem_has_feature(s, IVSHMEM_MSI)) {
870 error_setg(errp, "ioeventfd/irqfd requires MSI");
871 return;
874 /* check that role is reasonable */
875 if (s->role) {
876 if (strncmp(s->role, "peer", 5) == 0) {
877 s->role_val = IVSHMEM_PEER;
878 } else if (strncmp(s->role, "master", 7) == 0) {
879 s->role_val = IVSHMEM_MASTER;
880 } else {
881 error_setg(errp, "'role' must be 'peer' or 'master'");
882 return;
884 } else {
885 s->role_val = IVSHMEM_MASTER; /* default */
888 if (s->role_val == IVSHMEM_PEER) {
889 error_setg(&s->migration_blocker,
890 "Migration is disabled when using feature 'peer mode' in device 'ivshmem'");
891 migrate_add_blocker(s->migration_blocker);
894 pci_conf = dev->config;
895 pci_conf[PCI_COMMAND] = PCI_COMMAND_IO | PCI_COMMAND_MEMORY;
897 pci_config_set_interrupt_pin(pci_conf, 1);
899 memory_region_init_io(&s->ivshmem_mmio, OBJECT(s), &ivshmem_mmio_ops, s,
900 "ivshmem-mmio", IVSHMEM_REG_BAR_SIZE);
902 /* region for registers*/
903 pci_register_bar(dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY,
904 &s->ivshmem_mmio);
906 memory_region_init(&s->bar, OBJECT(s), "ivshmem-bar2-container", s->ivshmem_size);
907 if (s->ivshmem_64bit) {
908 attr |= PCI_BASE_ADDRESS_MEM_TYPE_64;
911 if (s->hostmem != NULL) {
912 MemoryRegion *mr;
914 IVSHMEM_DPRINTF("using hostmem\n");
916 mr = host_memory_backend_get_memory(MEMORY_BACKEND(s->hostmem), errp);
917 vmstate_register_ram(mr, DEVICE(s));
918 memory_region_add_subregion(&s->bar, 0, mr);
919 pci_register_bar(PCI_DEVICE(s), 2, attr, &s->bar);
920 } else if (s->server_chr != NULL) {
921 /* FIXME do not rely on what chr drivers put into filename */
922 if (strncmp(s->server_chr->filename, "unix:", 5)) {
923 error_setg(errp, "chardev is not a unix client socket");
924 return;
927 /* if we get a UNIX socket as the parameter we will talk
928 * to the ivshmem server to receive the memory region */
930 IVSHMEM_DPRINTF("using shared memory server (socket = %s)\n",
931 s->server_chr->filename);
933 if (ivshmem_setup_interrupts(s) < 0) {
934 error_setg(errp, "failed to initialize interrupts");
935 return;
938 /* we allocate enough space for 16 peers and grow as needed */
939 resize_peers(s, 16);
940 s->vm_id = -1;
942 pci_register_bar(dev, 2, attr, &s->bar);
944 s->eventfd_chr = g_malloc0(s->vectors * sizeof(CharDriverState *));
946 qemu_chr_add_handlers(s->server_chr, ivshmem_can_receive,
947 ivshmem_check_version, ivshmem_event, s);
948 } else {
949 /* just map the file immediately, we're not using a server */
950 int fd;
952 IVSHMEM_DPRINTF("using shm_open (shm object = %s)\n", s->shmobj);
954 /* try opening with O_EXCL and if it succeeds zero the memory
955 * by truncating to 0 */
956 if ((fd = shm_open(s->shmobj, O_CREAT|O_RDWR|O_EXCL,
957 S_IRWXU|S_IRWXG|S_IRWXO)) > 0) {
958 /* truncate file to length PCI device's memory */
959 if (ftruncate(fd, s->ivshmem_size) != 0) {
960 error_report("could not truncate shared file");
963 } else if ((fd = shm_open(s->shmobj, O_CREAT|O_RDWR,
964 S_IRWXU|S_IRWXG|S_IRWXO)) < 0) {
965 error_setg(errp, "could not open shared file");
966 return;
969 if (check_shm_size(s, fd, errp) == -1) {
970 return;
973 create_shared_memory_BAR(s, fd, attr, errp);
977 static void pci_ivshmem_exit(PCIDevice *dev)
979 IVShmemState *s = IVSHMEM(dev);
980 int i;
982 fifo8_destroy(&s->incoming_fifo);
984 if (s->migration_blocker) {
985 migrate_del_blocker(s->migration_blocker);
986 error_free(s->migration_blocker);
989 if (memory_region_is_mapped(&s->ivshmem)) {
990 if (!s->hostmem) {
991 void *addr = memory_region_get_ram_ptr(&s->ivshmem);
992 int fd;
994 if (munmap(addr, s->ivshmem_size) == -1) {
995 error_report("Failed to munmap shared memory %s",
996 strerror(errno));
999 if ((fd = qemu_get_ram_fd(s->ivshmem.ram_addr)) != -1)
1000 close(fd);
1003 vmstate_unregister_ram(&s->ivshmem, DEVICE(dev));
1004 memory_region_del_subregion(&s->bar, &s->ivshmem);
1007 if (s->eventfd_chr) {
1008 for (i = 0; i < s->vectors; i++) {
1009 if (s->eventfd_chr[i]) {
1010 qemu_chr_free(s->eventfd_chr[i]);
1013 g_free(s->eventfd_chr);
1016 if (s->peers) {
1017 for (i = 0; i < s->nb_peers; i++) {
1018 close_peer_eventfds(s, i);
1020 g_free(s->peers);
1023 if (ivshmem_has_feature(s, IVSHMEM_MSI)) {
1024 msix_uninit_exclusive_bar(dev);
1027 g_free(s->msi_vectors);
1030 static bool test_msix(void *opaque, int version_id)
1032 IVShmemState *s = opaque;
1034 return ivshmem_has_feature(s, IVSHMEM_MSI);
1037 static bool test_no_msix(void *opaque, int version_id)
1039 return !test_msix(opaque, version_id);
1042 static int ivshmem_pre_load(void *opaque)
1044 IVShmemState *s = opaque;
1046 if (s->role_val == IVSHMEM_PEER) {
1047 error_report("'peer' devices are not migratable");
1048 return -EINVAL;
1051 return 0;
1054 static int ivshmem_post_load(void *opaque, int version_id)
1056 IVShmemState *s = opaque;
1058 if (ivshmem_has_feature(s, IVSHMEM_MSI)) {
1059 ivshmem_use_msix(s);
1062 return 0;
1065 static int ivshmem_load_old(QEMUFile *f, void *opaque, int version_id)
1067 IVShmemState *s = opaque;
1068 PCIDevice *pdev = PCI_DEVICE(s);
1069 int ret;
1071 IVSHMEM_DPRINTF("ivshmem_load_old\n");
1073 if (version_id != 0) {
1074 return -EINVAL;
1077 if (s->role_val == IVSHMEM_PEER) {
1078 error_report("'peer' devices are not migratable");
1079 return -EINVAL;
1082 ret = pci_device_load(pdev, f);
1083 if (ret) {
1084 return ret;
1087 if (ivshmem_has_feature(s, IVSHMEM_MSI)) {
1088 msix_load(pdev, f);
1089 ivshmem_use_msix(s);
1090 } else {
1091 s->intrstatus = qemu_get_be32(f);
1092 s->intrmask = qemu_get_be32(f);
1095 return 0;
1098 static const VMStateDescription ivshmem_vmsd = {
1099 .name = "ivshmem",
1100 .version_id = 1,
1101 .minimum_version_id = 1,
1102 .pre_load = ivshmem_pre_load,
1103 .post_load = ivshmem_post_load,
1104 .fields = (VMStateField[]) {
1105 VMSTATE_PCI_DEVICE(parent_obj, IVShmemState),
1107 VMSTATE_MSIX_TEST(parent_obj, IVShmemState, test_msix),
1108 VMSTATE_UINT32_TEST(intrstatus, IVShmemState, test_no_msix),
1109 VMSTATE_UINT32_TEST(intrmask, IVShmemState, test_no_msix),
1111 VMSTATE_END_OF_LIST()
1113 .load_state_old = ivshmem_load_old,
1114 .minimum_version_id_old = 0
1117 static Property ivshmem_properties[] = {
1118 DEFINE_PROP_CHR("chardev", IVShmemState, server_chr),
1119 DEFINE_PROP_STRING("size", IVShmemState, sizearg),
1120 DEFINE_PROP_UINT32("vectors", IVShmemState, vectors, 1),
1121 DEFINE_PROP_BIT("ioeventfd", IVShmemState, features, IVSHMEM_IOEVENTFD, false),
1122 DEFINE_PROP_BIT("msi", IVShmemState, features, IVSHMEM_MSI, true),
1123 DEFINE_PROP_STRING("shm", IVShmemState, shmobj),
1124 DEFINE_PROP_STRING("role", IVShmemState, role),
1125 DEFINE_PROP_UINT32("use64", IVShmemState, ivshmem_64bit, 1),
1126 DEFINE_PROP_END_OF_LIST(),
1129 static void ivshmem_class_init(ObjectClass *klass, void *data)
1131 DeviceClass *dc = DEVICE_CLASS(klass);
1132 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
1134 k->realize = pci_ivshmem_realize;
1135 k->exit = pci_ivshmem_exit;
1136 k->config_write = ivshmem_write_config;
1137 k->vendor_id = PCI_VENDOR_ID_IVSHMEM;
1138 k->device_id = PCI_DEVICE_ID_IVSHMEM;
1139 k->class_id = PCI_CLASS_MEMORY_RAM;
1140 dc->reset = ivshmem_reset;
1141 dc->props = ivshmem_properties;
1142 dc->vmsd = &ivshmem_vmsd;
1143 set_bit(DEVICE_CATEGORY_MISC, dc->categories);
1144 dc->desc = "Inter-VM shared memory";
1147 static void ivshmem_check_memdev_is_busy(Object *obj, const char *name,
1148 Object *val, Error **errp)
1150 MemoryRegion *mr;
1152 mr = host_memory_backend_get_memory(MEMORY_BACKEND(val), errp);
1153 if (memory_region_is_mapped(mr)) {
1154 char *path = object_get_canonical_path_component(val);
1155 error_setg(errp, "can't use already busy memdev: %s", path);
1156 g_free(path);
1157 } else {
1158 qdev_prop_allow_set_link_before_realize(obj, name, val, errp);
1162 static void ivshmem_init(Object *obj)
1164 IVShmemState *s = IVSHMEM(obj);
1166 object_property_add_link(obj, "x-memdev", TYPE_MEMORY_BACKEND,
1167 (Object **)&s->hostmem,
1168 ivshmem_check_memdev_is_busy,
1169 OBJ_PROP_LINK_UNREF_ON_RELEASE,
1170 &error_abort);
1173 static const TypeInfo ivshmem_info = {
1174 .name = TYPE_IVSHMEM,
1175 .parent = TYPE_PCI_DEVICE,
1176 .instance_size = sizeof(IVShmemState),
1177 .instance_init = ivshmem_init,
1178 .class_init = ivshmem_class_init,
1181 static void ivshmem_register_types(void)
1183 type_register_static(&ivshmem_info);
1186 type_init(ivshmem_register_types)