hw/block/pflash_cfi02: Factor out pflash_reset_state_machine()
[qemu/ar7.git] / hw / xen / xen_pt_msi.c
blobb71563f98a8bc61069b2e6e7200713d396cc52d0
1 /*
2 * Copyright (c) 2007, Intel Corporation.
4 * This work is licensed under the terms of the GNU GPL, version 2. See
5 * the COPYING file in the top-level directory.
7 * Jiang Yunhong <yunhong.jiang@intel.com>
9 * This file implements direct PCI assignment to a HVM guest
12 #include "qemu/osdep.h"
14 #include "hw/xen/xen-legacy-backend.h"
15 #include "xen_pt.h"
16 #include "hw/i386/apic-msidef.h"
19 #define XEN_PT_AUTO_ASSIGN -1
21 /* shift count for gflags */
22 #define XEN_PT_GFLAGS_SHIFT_DEST_ID 0
23 #define XEN_PT_GFLAGS_SHIFT_RH 8
24 #define XEN_PT_GFLAGS_SHIFT_DM 9
25 #define XEN_PT_GFLAGSSHIFT_DELIV_MODE 12
26 #define XEN_PT_GFLAGSSHIFT_TRG_MODE 15
27 #define XEN_PT_GFLAGSSHIFT_UNMASKED 16
29 #define latch(fld) latch[PCI_MSIX_ENTRY_##fld / sizeof(uint32_t)]
32 * Helpers
35 static inline uint8_t msi_vector(uint32_t data)
37 return (data & MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT;
40 static inline uint8_t msi_dest_id(uint32_t addr)
42 return (addr & MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT;
45 static inline uint32_t msi_ext_dest_id(uint32_t addr_hi)
47 return addr_hi & 0xffffff00;
50 static uint32_t msi_gflags(uint32_t data, uint64_t addr)
52 uint32_t result = 0;
53 int rh, dm, dest_id, deliv_mode, trig_mode;
55 rh = (addr >> MSI_ADDR_REDIRECTION_SHIFT) & 0x1;
56 dm = (addr >> MSI_ADDR_DEST_MODE_SHIFT) & 0x1;
57 dest_id = msi_dest_id(addr);
58 deliv_mode = (data >> MSI_DATA_DELIVERY_MODE_SHIFT) & 0x7;
59 trig_mode = (data >> MSI_DATA_TRIGGER_SHIFT) & 0x1;
61 result = dest_id | (rh << XEN_PT_GFLAGS_SHIFT_RH)
62 | (dm << XEN_PT_GFLAGS_SHIFT_DM)
63 | (deliv_mode << XEN_PT_GFLAGSSHIFT_DELIV_MODE)
64 | (trig_mode << XEN_PT_GFLAGSSHIFT_TRG_MODE);
66 return result;
69 static inline uint64_t msi_addr64(XenPTMSI *msi)
71 return (uint64_t)msi->addr_hi << 32 | msi->addr_lo;
74 static int msi_msix_enable(XenPCIPassthroughState *s,
75 uint32_t address,
76 uint16_t flag,
77 bool enable)
79 uint16_t val = 0;
80 int rc;
82 if (!address) {
83 return -1;
86 rc = xen_host_pci_get_word(&s->real_device, address, &val);
87 if (rc) {
88 XEN_PT_ERR(&s->dev, "Failed to read MSI/MSI-X register (0x%x), rc:%d\n",
89 address, rc);
90 return rc;
92 if (enable) {
93 val |= flag;
94 } else {
95 val &= ~flag;
97 rc = xen_host_pci_set_word(&s->real_device, address, val);
98 if (rc) {
99 XEN_PT_ERR(&s->dev, "Failed to write MSI/MSI-X register (0x%x), rc:%d\n",
100 address, rc);
102 return rc;
105 static int msi_msix_setup(XenPCIPassthroughState *s,
106 uint64_t addr,
107 uint32_t data,
108 int *ppirq,
109 bool is_msix,
110 int msix_entry,
111 bool is_not_mapped)
113 uint8_t gvec = msi_vector(data);
114 int rc = 0;
116 assert((!is_msix && msix_entry == 0) || is_msix);
118 if (xen_is_pirq_msi(data)) {
119 *ppirq = msi_ext_dest_id(addr >> 32) | msi_dest_id(addr);
120 if (!*ppirq) {
121 /* this probably identifies an misconfiguration of the guest,
122 * try the emulated path */
123 *ppirq = XEN_PT_UNASSIGNED_PIRQ;
124 } else {
125 XEN_PT_LOG(&s->dev, "requested pirq %d for MSI%s"
126 " (vec: 0x%x, entry: 0x%x)\n",
127 *ppirq, is_msix ? "-X" : "", gvec, msix_entry);
131 if (is_not_mapped) {
132 uint64_t table_base = 0;
134 if (is_msix) {
135 table_base = s->msix->table_base;
138 rc = xc_physdev_map_pirq_msi(xen_xc, xen_domid, XEN_PT_AUTO_ASSIGN,
139 ppirq, PCI_DEVFN(s->real_device.dev,
140 s->real_device.func),
141 s->real_device.bus,
142 msix_entry, table_base);
143 if (rc) {
144 XEN_PT_ERR(&s->dev,
145 "Mapping of MSI%s (err: %i, vec: 0x%x, entry 0x%x)\n",
146 is_msix ? "-X" : "", errno, gvec, msix_entry);
147 return rc;
151 return 0;
153 static int msi_msix_update(XenPCIPassthroughState *s,
154 uint64_t addr,
155 uint32_t data,
156 int pirq,
157 bool is_msix,
158 int msix_entry,
159 int *old_pirq,
160 bool masked)
162 PCIDevice *d = &s->dev;
163 uint8_t gvec = msi_vector(data);
164 uint32_t gflags = msi_gflags(data, addr);
165 int rc = 0;
166 uint64_t table_addr = 0;
168 XEN_PT_LOG(d, "Updating MSI%s with pirq %d gvec 0x%x gflags 0x%x"
169 " (entry: 0x%x)\n",
170 is_msix ? "-X" : "", pirq, gvec, gflags, msix_entry);
172 if (is_msix) {
173 table_addr = s->msix->mmio_base_addr;
176 gflags |= masked ? 0 : (1u << XEN_PT_GFLAGSSHIFT_UNMASKED);
178 rc = xc_domain_update_msi_irq(xen_xc, xen_domid, gvec,
179 pirq, gflags, table_addr);
181 if (rc) {
182 XEN_PT_ERR(d, "Updating of MSI%s failed. (err: %d)\n",
183 is_msix ? "-X" : "", errno);
185 if (xc_physdev_unmap_pirq(xen_xc, xen_domid, *old_pirq)) {
186 XEN_PT_ERR(d, "Unmapping of MSI%s pirq %d failed. (err: %d)\n",
187 is_msix ? "-X" : "", *old_pirq, errno);
189 *old_pirq = XEN_PT_UNASSIGNED_PIRQ;
191 return rc;
194 static int msi_msix_disable(XenPCIPassthroughState *s,
195 uint64_t addr,
196 uint32_t data,
197 int pirq,
198 bool is_msix,
199 bool is_binded)
201 PCIDevice *d = &s->dev;
202 uint8_t gvec = msi_vector(data);
203 uint32_t gflags = msi_gflags(data, addr);
204 int rc = 0;
206 if (pirq == XEN_PT_UNASSIGNED_PIRQ) {
207 return 0;
210 if (is_binded) {
211 XEN_PT_LOG(d, "Unbind MSI%s with pirq %d, gvec 0x%x\n",
212 is_msix ? "-X" : "", pirq, gvec);
213 rc = xc_domain_unbind_msi_irq(xen_xc, xen_domid, gvec, pirq, gflags);
214 if (rc) {
215 XEN_PT_ERR(d, "Unbinding of MSI%s failed. (err: %d, pirq: %d, gvec: 0x%x)\n",
216 is_msix ? "-X" : "", errno, pirq, gvec);
217 return rc;
221 XEN_PT_LOG(d, "Unmap MSI%s pirq %d\n", is_msix ? "-X" : "", pirq);
222 rc = xc_physdev_unmap_pirq(xen_xc, xen_domid, pirq);
223 if (rc) {
224 XEN_PT_ERR(d, "Unmapping of MSI%s pirq %d failed. (err: %i)\n",
225 is_msix ? "-X" : "", pirq, errno);
226 return rc;
229 return 0;
233 * MSI virtualization functions
236 static int xen_pt_msi_set_enable(XenPCIPassthroughState *s, bool enable)
238 XEN_PT_LOG(&s->dev, "%s MSI.\n", enable ? "enabling" : "disabling");
240 if (!s->msi) {
241 return -1;
244 return msi_msix_enable(s, s->msi->ctrl_offset, PCI_MSI_FLAGS_ENABLE,
245 enable);
248 /* setup physical msi, but don't enable it */
249 int xen_pt_msi_setup(XenPCIPassthroughState *s)
251 int pirq = XEN_PT_UNASSIGNED_PIRQ;
252 int rc = 0;
253 XenPTMSI *msi = s->msi;
255 if (msi->initialized) {
256 XEN_PT_ERR(&s->dev,
257 "Setup physical MSI when it has been properly initialized.\n");
258 return -1;
261 rc = msi_msix_setup(s, msi_addr64(msi), msi->data, &pirq, false, 0, true);
262 if (rc) {
263 return rc;
266 if (pirq < 0) {
267 XEN_PT_ERR(&s->dev, "Invalid pirq number: %d.\n", pirq);
268 return -1;
271 msi->pirq = pirq;
272 XEN_PT_LOG(&s->dev, "MSI mapped with pirq %d.\n", pirq);
274 return 0;
277 int xen_pt_msi_update(XenPCIPassthroughState *s)
279 XenPTMSI *msi = s->msi;
281 /* Current MSI emulation in QEMU only supports 1 vector */
282 return msi_msix_update(s, msi_addr64(msi), msi->data, msi->pirq,
283 false, 0, &msi->pirq, msi->mask & 1);
286 void xen_pt_msi_disable(XenPCIPassthroughState *s)
288 XenPTMSI *msi = s->msi;
290 if (!msi) {
291 return;
294 (void)xen_pt_msi_set_enable(s, false);
296 msi_msix_disable(s, msi_addr64(msi), msi->data, msi->pirq, false,
297 msi->initialized);
299 /* clear msi info */
300 msi->flags &= ~PCI_MSI_FLAGS_ENABLE;
301 msi->initialized = false;
302 msi->mapped = false;
303 msi->pirq = XEN_PT_UNASSIGNED_PIRQ;
307 * MSI-X virtualization functions
310 static int msix_set_enable(XenPCIPassthroughState *s, bool enabled)
312 XEN_PT_LOG(&s->dev, "%s MSI-X.\n", enabled ? "enabling" : "disabling");
314 if (!s->msix) {
315 return -1;
318 return msi_msix_enable(s, s->msix->ctrl_offset, PCI_MSIX_FLAGS_ENABLE,
319 enabled);
322 static int xen_pt_msix_update_one(XenPCIPassthroughState *s, int entry_nr,
323 uint32_t vec_ctrl)
325 XenPTMSIXEntry *entry = NULL;
326 int pirq;
327 int rc;
329 if (entry_nr < 0 || entry_nr >= s->msix->total_entries) {
330 return -EINVAL;
333 entry = &s->msix->msix_entry[entry_nr];
335 if (!entry->updated) {
336 return 0;
339 pirq = entry->pirq;
342 * Update the entry addr and data to the latest values only when the
343 * entry is masked or they are all masked, as required by the spec.
344 * Addr and data changes while the MSI-X entry is unmasked get deferred
345 * until the next masked -> unmasked transition.
347 if (pirq == XEN_PT_UNASSIGNED_PIRQ || s->msix->maskall ||
348 (vec_ctrl & PCI_MSIX_ENTRY_CTRL_MASKBIT)) {
349 entry->addr = entry->latch(LOWER_ADDR) |
350 ((uint64_t)entry->latch(UPPER_ADDR) << 32);
351 entry->data = entry->latch(DATA);
354 rc = msi_msix_setup(s, entry->addr, entry->data, &pirq, true, entry_nr,
355 entry->pirq == XEN_PT_UNASSIGNED_PIRQ);
356 if (rc) {
357 return rc;
359 if (entry->pirq == XEN_PT_UNASSIGNED_PIRQ) {
360 entry->pirq = pirq;
363 rc = msi_msix_update(s, entry->addr, entry->data, pirq, true,
364 entry_nr, &entry->pirq,
365 vec_ctrl & PCI_MSIX_ENTRY_CTRL_MASKBIT);
367 if (!rc) {
368 entry->updated = false;
371 return rc;
374 int xen_pt_msix_update(XenPCIPassthroughState *s)
376 XenPTMSIX *msix = s->msix;
377 int i;
379 for (i = 0; i < msix->total_entries; i++) {
380 xen_pt_msix_update_one(s, i, msix->msix_entry[i].latch(VECTOR_CTRL));
383 return 0;
386 void xen_pt_msix_disable(XenPCIPassthroughState *s)
388 int i = 0;
390 msix_set_enable(s, false);
392 for (i = 0; i < s->msix->total_entries; i++) {
393 XenPTMSIXEntry *entry = &s->msix->msix_entry[i];
395 msi_msix_disable(s, entry->addr, entry->data, entry->pirq, true, true);
397 /* clear MSI-X info */
398 entry->pirq = XEN_PT_UNASSIGNED_PIRQ;
399 entry->updated = false;
403 int xen_pt_msix_update_remap(XenPCIPassthroughState *s, int bar_index)
405 XenPTMSIXEntry *entry;
406 int i, ret;
408 if (!(s->msix && s->msix->bar_index == bar_index)) {
409 return 0;
412 for (i = 0; i < s->msix->total_entries; i++) {
413 entry = &s->msix->msix_entry[i];
414 if (entry->pirq != XEN_PT_UNASSIGNED_PIRQ) {
415 ret = xc_domain_unbind_pt_irq(xen_xc, xen_domid, entry->pirq,
416 PT_IRQ_TYPE_MSI, 0, 0, 0, 0);
417 if (ret) {
418 XEN_PT_ERR(&s->dev, "unbind MSI-X entry %d failed (err: %d)\n",
419 entry->pirq, errno);
421 entry->updated = true;
424 return xen_pt_msix_update(s);
427 static uint32_t get_entry_value(XenPTMSIXEntry *e, int offset)
429 assert(!(offset % sizeof(*e->latch)));
430 return e->latch[offset / sizeof(*e->latch)];
433 static void set_entry_value(XenPTMSIXEntry *e, int offset, uint32_t val)
435 assert(!(offset % sizeof(*e->latch)));
436 e->latch[offset / sizeof(*e->latch)] = val;
439 static void pci_msix_write(void *opaque, hwaddr addr,
440 uint64_t val, unsigned size)
442 XenPCIPassthroughState *s = opaque;
443 XenPTMSIX *msix = s->msix;
444 XenPTMSIXEntry *entry;
445 unsigned int entry_nr, offset;
447 entry_nr = addr / PCI_MSIX_ENTRY_SIZE;
448 if (entry_nr >= msix->total_entries) {
449 return;
451 entry = &msix->msix_entry[entry_nr];
452 offset = addr % PCI_MSIX_ENTRY_SIZE;
454 if (offset != PCI_MSIX_ENTRY_VECTOR_CTRL) {
455 if (get_entry_value(entry, offset) == val
456 && entry->pirq != XEN_PT_UNASSIGNED_PIRQ) {
457 return;
460 entry->updated = true;
461 } else if (msix->enabled && entry->updated &&
462 !(val & PCI_MSIX_ENTRY_CTRL_MASKBIT)) {
463 const volatile uint32_t *vec_ctrl;
466 * If Xen intercepts the mask bit access, entry->vec_ctrl may not be
467 * up-to-date. Read from hardware directly.
469 vec_ctrl = s->msix->phys_iomem_base + entry_nr * PCI_MSIX_ENTRY_SIZE
470 + PCI_MSIX_ENTRY_VECTOR_CTRL;
471 xen_pt_msix_update_one(s, entry_nr, *vec_ctrl);
474 set_entry_value(entry, offset, val);
477 static uint64_t pci_msix_read(void *opaque, hwaddr addr,
478 unsigned size)
480 XenPCIPassthroughState *s = opaque;
481 XenPTMSIX *msix = s->msix;
482 int entry_nr, offset;
484 entry_nr = addr / PCI_MSIX_ENTRY_SIZE;
485 if (entry_nr < 0) {
486 XEN_PT_ERR(&s->dev, "asked MSI-X entry '%i' invalid!\n", entry_nr);
487 return 0;
490 offset = addr % PCI_MSIX_ENTRY_SIZE;
492 if (addr < msix->total_entries * PCI_MSIX_ENTRY_SIZE) {
493 return get_entry_value(&msix->msix_entry[entry_nr], offset);
494 } else {
495 /* Pending Bit Array (PBA) */
496 return *(uint32_t *)(msix->phys_iomem_base + addr);
500 static bool pci_msix_accepts(void *opaque, hwaddr addr,
501 unsigned size, bool is_write,
502 MemTxAttrs attrs)
504 return !(addr & (size - 1));
507 static const MemoryRegionOps pci_msix_ops = {
508 .read = pci_msix_read,
509 .write = pci_msix_write,
510 .endianness = DEVICE_NATIVE_ENDIAN,
511 .valid = {
512 .min_access_size = 4,
513 .max_access_size = 4,
514 .unaligned = false,
515 .accepts = pci_msix_accepts
517 .impl = {
518 .min_access_size = 4,
519 .max_access_size = 4,
520 .unaligned = false
524 int xen_pt_msix_init(XenPCIPassthroughState *s, uint32_t base)
526 uint8_t id = 0;
527 uint16_t control = 0;
528 uint32_t table_off = 0;
529 int i, total_entries, bar_index;
530 XenHostPCIDevice *hd = &s->real_device;
531 PCIDevice *d = &s->dev;
532 int fd = -1;
533 XenPTMSIX *msix = NULL;
534 int rc = 0;
536 rc = xen_host_pci_get_byte(hd, base + PCI_CAP_LIST_ID, &id);
537 if (rc) {
538 return rc;
541 if (id != PCI_CAP_ID_MSIX) {
542 XEN_PT_ERR(d, "Invalid id 0x%x base 0x%x\n", id, base);
543 return -1;
546 rc = xen_host_pci_get_word(hd, base + PCI_MSIX_FLAGS, &control);
547 if (rc) {
548 XEN_PT_ERR(d, "Failed to read PCI_MSIX_FLAGS field\n");
549 return rc;
551 total_entries = control & PCI_MSIX_FLAGS_QSIZE;
552 total_entries += 1;
554 s->msix = g_malloc0(sizeof (XenPTMSIX)
555 + total_entries * sizeof (XenPTMSIXEntry));
556 msix = s->msix;
558 msix->total_entries = total_entries;
559 for (i = 0; i < total_entries; i++) {
560 msix->msix_entry[i].pirq = XEN_PT_UNASSIGNED_PIRQ;
563 memory_region_init_io(&msix->mmio, OBJECT(s), &pci_msix_ops,
564 s, "xen-pci-pt-msix",
565 (total_entries * PCI_MSIX_ENTRY_SIZE
566 + XC_PAGE_SIZE - 1)
567 & XC_PAGE_MASK);
569 rc = xen_host_pci_get_long(hd, base + PCI_MSIX_TABLE, &table_off);
570 if (rc) {
571 XEN_PT_ERR(d, "Failed to read PCI_MSIX_TABLE field\n");
572 goto error_out;
574 bar_index = msix->bar_index = table_off & PCI_MSIX_FLAGS_BIRMASK;
575 table_off = table_off & ~PCI_MSIX_FLAGS_BIRMASK;
576 msix->table_base = s->real_device.io_regions[bar_index].base_addr;
577 XEN_PT_LOG(d, "get MSI-X table BAR base 0x%"PRIx64"\n", msix->table_base);
579 fd = open("/dev/mem", O_RDWR);
580 if (fd == -1) {
581 rc = -errno;
582 XEN_PT_ERR(d, "Can't open /dev/mem: %s\n", strerror(errno));
583 goto error_out;
585 XEN_PT_LOG(d, "table_off = 0x%x, total_entries = %d\n",
586 table_off, total_entries);
587 msix->table_offset_adjust = table_off & 0x0fff;
588 msix->phys_iomem_base =
589 mmap(NULL,
590 total_entries * PCI_MSIX_ENTRY_SIZE + msix->table_offset_adjust,
591 PROT_READ,
592 MAP_SHARED | MAP_LOCKED,
594 msix->table_base + table_off - msix->table_offset_adjust);
595 close(fd);
596 if (msix->phys_iomem_base == MAP_FAILED) {
597 rc = -errno;
598 XEN_PT_ERR(d, "Can't map physical MSI-X table: %s\n", strerror(errno));
599 goto error_out;
601 msix->phys_iomem_base = (char *)msix->phys_iomem_base
602 + msix->table_offset_adjust;
604 XEN_PT_LOG(d, "mapping physical MSI-X table to %p\n",
605 msix->phys_iomem_base);
607 memory_region_add_subregion_overlap(&s->bar[bar_index], table_off,
608 &msix->mmio,
609 2); /* Priority: pci default + 1 */
611 return 0;
613 error_out:
614 g_free(s->msix);
615 s->msix = NULL;
616 return rc;
619 void xen_pt_msix_unmap(XenPCIPassthroughState *s)
621 XenPTMSIX *msix = s->msix;
623 if (!msix) {
624 return;
627 /* unmap the MSI-X memory mapped register area */
628 if (msix->phys_iomem_base) {
629 XEN_PT_LOG(&s->dev, "unmapping physical MSI-X table from %p\n",
630 msix->phys_iomem_base);
631 munmap(msix->phys_iomem_base, msix->total_entries * PCI_MSIX_ENTRY_SIZE
632 + msix->table_offset_adjust);
635 memory_region_del_subregion(&s->bar[msix->bar_index], &msix->mmio);
638 void xen_pt_msix_delete(XenPCIPassthroughState *s)
640 XenPTMSIX *msix = s->msix;
642 if (!msix) {
643 return;
646 object_unparent(OBJECT(&msix->mmio));
648 g_free(s->msix);
649 s->msix = NULL;