throttle: add throttle_max_is_missing_limit() test
[qemu/ar7.git] / hw / xen / xen_pt_msi.c
blob263e0514a23b66b0f6f41111db272934db257b85
1 /*
2 * Copyright (c) 2007, Intel Corporation.
4 * This work is licensed under the terms of the GNU GPL, version 2. See
5 * the COPYING file in the top-level directory.
7 * Jiang Yunhong <yunhong.jiang@intel.com>
9 * This file implements direct PCI assignment to a HVM guest
12 #include <sys/mman.h>
14 #include "hw/xen/xen_backend.h"
15 #include "xen_pt.h"
16 #include "hw/i386/apic-msidef.h"
19 #define XEN_PT_AUTO_ASSIGN -1
21 /* shift count for gflags */
22 #define XEN_PT_GFLAGS_SHIFT_DEST_ID 0
23 #define XEN_PT_GFLAGS_SHIFT_RH 8
24 #define XEN_PT_GFLAGS_SHIFT_DM 9
25 #define XEN_PT_GFLAGSSHIFT_DELIV_MODE 12
26 #define XEN_PT_GFLAGSSHIFT_TRG_MODE 15
30 * Helpers
33 static inline uint8_t msi_vector(uint32_t data)
35 return (data & MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT;
38 static inline uint8_t msi_dest_id(uint32_t addr)
40 return (addr & MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT;
43 static inline uint32_t msi_ext_dest_id(uint32_t addr_hi)
45 return addr_hi & 0xffffff00;
48 static uint32_t msi_gflags(uint32_t data, uint64_t addr)
50 uint32_t result = 0;
51 int rh, dm, dest_id, deliv_mode, trig_mode;
53 rh = (addr >> MSI_ADDR_REDIRECTION_SHIFT) & 0x1;
54 dm = (addr >> MSI_ADDR_DEST_MODE_SHIFT) & 0x1;
55 dest_id = msi_dest_id(addr);
56 deliv_mode = (data >> MSI_DATA_DELIVERY_MODE_SHIFT) & 0x7;
57 trig_mode = (data >> MSI_DATA_TRIGGER_SHIFT) & 0x1;
59 result = dest_id | (rh << XEN_PT_GFLAGS_SHIFT_RH)
60 | (dm << XEN_PT_GFLAGS_SHIFT_DM)
61 | (deliv_mode << XEN_PT_GFLAGSSHIFT_DELIV_MODE)
62 | (trig_mode << XEN_PT_GFLAGSSHIFT_TRG_MODE);
64 return result;
67 static inline uint64_t msi_addr64(XenPTMSI *msi)
69 return (uint64_t)msi->addr_hi << 32 | msi->addr_lo;
72 static int msi_msix_enable(XenPCIPassthroughState *s,
73 uint32_t address,
74 uint16_t flag,
75 bool enable)
77 uint16_t val = 0;
79 if (!address) {
80 return -1;
83 xen_host_pci_get_word(&s->real_device, address, &val);
84 if (enable) {
85 val |= flag;
86 } else {
87 val &= ~flag;
89 xen_host_pci_set_word(&s->real_device, address, val);
90 return 0;
93 static int msi_msix_setup(XenPCIPassthroughState *s,
94 uint64_t addr,
95 uint32_t data,
96 int *ppirq,
97 bool is_msix,
98 int msix_entry,
99 bool is_not_mapped)
101 uint8_t gvec = msi_vector(data);
102 int rc = 0;
104 assert((!is_msix && msix_entry == 0) || is_msix);
106 if (gvec == 0) {
107 /* if gvec is 0, the guest is asking for a particular pirq that
108 * is passed as dest_id */
109 *ppirq = msi_ext_dest_id(addr >> 32) | msi_dest_id(addr);
110 if (!*ppirq) {
111 /* this probably identifies an misconfiguration of the guest,
112 * try the emulated path */
113 *ppirq = XEN_PT_UNASSIGNED_PIRQ;
114 } else {
115 XEN_PT_LOG(&s->dev, "requested pirq %d for MSI%s"
116 " (vec: %#x, entry: %#x)\n",
117 *ppirq, is_msix ? "-X" : "", gvec, msix_entry);
121 if (is_not_mapped) {
122 uint64_t table_base = 0;
124 if (is_msix) {
125 table_base = s->msix->table_base;
128 rc = xc_physdev_map_pirq_msi(xen_xc, xen_domid, XEN_PT_AUTO_ASSIGN,
129 ppirq, PCI_DEVFN(s->real_device.dev,
130 s->real_device.func),
131 s->real_device.bus,
132 msix_entry, table_base);
133 if (rc) {
134 XEN_PT_ERR(&s->dev,
135 "Mapping of MSI%s (err: %i, vec: %#x, entry %#x)\n",
136 is_msix ? "-X" : "", errno, gvec, msix_entry);
137 return rc;
141 return 0;
143 static int msi_msix_update(XenPCIPassthroughState *s,
144 uint64_t addr,
145 uint32_t data,
146 int pirq,
147 bool is_msix,
148 int msix_entry,
149 int *old_pirq)
151 PCIDevice *d = &s->dev;
152 uint8_t gvec = msi_vector(data);
153 uint32_t gflags = msi_gflags(data, addr);
154 int rc = 0;
155 uint64_t table_addr = 0;
157 XEN_PT_LOG(d, "Updating MSI%s with pirq %d gvec %#x gflags %#x"
158 " (entry: %#x)\n",
159 is_msix ? "-X" : "", pirq, gvec, gflags, msix_entry);
161 if (is_msix) {
162 table_addr = s->msix->mmio_base_addr;
165 rc = xc_domain_update_msi_irq(xen_xc, xen_domid, gvec,
166 pirq, gflags, table_addr);
168 if (rc) {
169 XEN_PT_ERR(d, "Updating of MSI%s failed. (err: %d)\n",
170 is_msix ? "-X" : "", errno);
172 if (xc_physdev_unmap_pirq(xen_xc, xen_domid, *old_pirq)) {
173 XEN_PT_ERR(d, "Unmapping of MSI%s pirq %d failed. (err: %d)\n",
174 is_msix ? "-X" : "", *old_pirq, errno);
176 *old_pirq = XEN_PT_UNASSIGNED_PIRQ;
178 return rc;
181 static int msi_msix_disable(XenPCIPassthroughState *s,
182 uint64_t addr,
183 uint32_t data,
184 int pirq,
185 bool is_msix,
186 bool is_binded)
188 PCIDevice *d = &s->dev;
189 uint8_t gvec = msi_vector(data);
190 uint32_t gflags = msi_gflags(data, addr);
191 int rc = 0;
193 if (pirq == XEN_PT_UNASSIGNED_PIRQ) {
194 return 0;
197 if (is_binded) {
198 XEN_PT_LOG(d, "Unbind MSI%s with pirq %d, gvec %#x\n",
199 is_msix ? "-X" : "", pirq, gvec);
200 rc = xc_domain_unbind_msi_irq(xen_xc, xen_domid, gvec, pirq, gflags);
201 if (rc) {
202 XEN_PT_ERR(d, "Unbinding of MSI%s failed. (err: %d, pirq: %d, gvec: %#x)\n",
203 is_msix ? "-X" : "", errno, pirq, gvec);
204 return rc;
208 XEN_PT_LOG(d, "Unmap MSI%s pirq %d\n", is_msix ? "-X" : "", pirq);
209 rc = xc_physdev_unmap_pirq(xen_xc, xen_domid, pirq);
210 if (rc) {
211 XEN_PT_ERR(d, "Unmapping of MSI%s pirq %d failed. (err: %i)\n",
212 is_msix ? "-X" : "", pirq, errno);
213 return rc;
216 return 0;
220 * MSI virtualization functions
223 int xen_pt_msi_set_enable(XenPCIPassthroughState *s, bool enable)
225 XEN_PT_LOG(&s->dev, "%s MSI.\n", enable ? "enabling" : "disabling");
227 if (!s->msi) {
228 return -1;
231 return msi_msix_enable(s, s->msi->ctrl_offset, PCI_MSI_FLAGS_ENABLE,
232 enable);
235 /* setup physical msi, but don't enable it */
236 int xen_pt_msi_setup(XenPCIPassthroughState *s)
238 int pirq = XEN_PT_UNASSIGNED_PIRQ;
239 int rc = 0;
240 XenPTMSI *msi = s->msi;
242 if (msi->initialized) {
243 XEN_PT_ERR(&s->dev,
244 "Setup physical MSI when it has been properly initialized.\n");
245 return -1;
248 rc = msi_msix_setup(s, msi_addr64(msi), msi->data, &pirq, false, 0, true);
249 if (rc) {
250 return rc;
253 if (pirq < 0) {
254 XEN_PT_ERR(&s->dev, "Invalid pirq number: %d.\n", pirq);
255 return -1;
258 msi->pirq = pirq;
259 XEN_PT_LOG(&s->dev, "MSI mapped with pirq %d.\n", pirq);
261 return 0;
264 int xen_pt_msi_update(XenPCIPassthroughState *s)
266 XenPTMSI *msi = s->msi;
267 return msi_msix_update(s, msi_addr64(msi), msi->data, msi->pirq,
268 false, 0, &msi->pirq);
271 void xen_pt_msi_disable(XenPCIPassthroughState *s)
273 XenPTMSI *msi = s->msi;
275 if (!msi) {
276 return;
279 xen_pt_msi_set_enable(s, false);
281 msi_msix_disable(s, msi_addr64(msi), msi->data, msi->pirq, false,
282 msi->initialized);
284 /* clear msi info */
285 msi->flags &= ~PCI_MSI_FLAGS_ENABLE;
286 msi->initialized = false;
287 msi->mapped = false;
288 msi->pirq = XEN_PT_UNASSIGNED_PIRQ;
292 * MSI-X virtualization functions
295 static int msix_set_enable(XenPCIPassthroughState *s, bool enabled)
297 XEN_PT_LOG(&s->dev, "%s MSI-X.\n", enabled ? "enabling" : "disabling");
299 if (!s->msix) {
300 return -1;
303 return msi_msix_enable(s, s->msix->ctrl_offset, PCI_MSIX_FLAGS_ENABLE,
304 enabled);
307 static int xen_pt_msix_update_one(XenPCIPassthroughState *s, int entry_nr)
309 XenPTMSIXEntry *entry = NULL;
310 int pirq;
311 int rc;
313 if (entry_nr < 0 || entry_nr >= s->msix->total_entries) {
314 return -EINVAL;
317 entry = &s->msix->msix_entry[entry_nr];
319 if (!entry->updated) {
320 return 0;
323 pirq = entry->pirq;
325 rc = msi_msix_setup(s, entry->addr, entry->data, &pirq, true, entry_nr,
326 entry->pirq == XEN_PT_UNASSIGNED_PIRQ);
327 if (rc) {
328 return rc;
330 if (entry->pirq == XEN_PT_UNASSIGNED_PIRQ) {
331 entry->pirq = pirq;
334 rc = msi_msix_update(s, entry->addr, entry->data, pirq, true,
335 entry_nr, &entry->pirq);
337 if (!rc) {
338 entry->updated = false;
341 return rc;
344 int xen_pt_msix_update(XenPCIPassthroughState *s)
346 XenPTMSIX *msix = s->msix;
347 int i;
349 for (i = 0; i < msix->total_entries; i++) {
350 xen_pt_msix_update_one(s, i);
353 return 0;
356 void xen_pt_msix_disable(XenPCIPassthroughState *s)
358 int i = 0;
360 msix_set_enable(s, false);
362 for (i = 0; i < s->msix->total_entries; i++) {
363 XenPTMSIXEntry *entry = &s->msix->msix_entry[i];
365 msi_msix_disable(s, entry->addr, entry->data, entry->pirq, true, true);
367 /* clear MSI-X info */
368 entry->pirq = XEN_PT_UNASSIGNED_PIRQ;
369 entry->updated = false;
373 int xen_pt_msix_update_remap(XenPCIPassthroughState *s, int bar_index)
375 XenPTMSIXEntry *entry;
376 int i, ret;
378 if (!(s->msix && s->msix->bar_index == bar_index)) {
379 return 0;
382 for (i = 0; i < s->msix->total_entries; i++) {
383 entry = &s->msix->msix_entry[i];
384 if (entry->pirq != XEN_PT_UNASSIGNED_PIRQ) {
385 ret = xc_domain_unbind_pt_irq(xen_xc, xen_domid, entry->pirq,
386 PT_IRQ_TYPE_MSI, 0, 0, 0, 0);
387 if (ret) {
388 XEN_PT_ERR(&s->dev, "unbind MSI-X entry %d failed (err: %d)\n",
389 entry->pirq, errno);
391 entry->updated = true;
394 return xen_pt_msix_update(s);
397 static uint32_t get_entry_value(XenPTMSIXEntry *e, int offset)
399 switch (offset) {
400 case PCI_MSIX_ENTRY_LOWER_ADDR:
401 return e->addr & UINT32_MAX;
402 case PCI_MSIX_ENTRY_UPPER_ADDR:
403 return e->addr >> 32;
404 case PCI_MSIX_ENTRY_DATA:
405 return e->data;
406 case PCI_MSIX_ENTRY_VECTOR_CTRL:
407 return e->vector_ctrl;
408 default:
409 return 0;
413 static void set_entry_value(XenPTMSIXEntry *e, int offset, uint32_t val)
415 switch (offset) {
416 case PCI_MSIX_ENTRY_LOWER_ADDR:
417 e->addr = (e->addr & ((uint64_t)UINT32_MAX << 32)) | val;
418 break;
419 case PCI_MSIX_ENTRY_UPPER_ADDR:
420 e->addr = (uint64_t)val << 32 | (e->addr & UINT32_MAX);
421 break;
422 case PCI_MSIX_ENTRY_DATA:
423 e->data = val;
424 break;
425 case PCI_MSIX_ENTRY_VECTOR_CTRL:
426 e->vector_ctrl = val;
427 break;
431 static void pci_msix_write(void *opaque, hwaddr addr,
432 uint64_t val, unsigned size)
434 XenPCIPassthroughState *s = opaque;
435 XenPTMSIX *msix = s->msix;
436 XenPTMSIXEntry *entry;
437 unsigned int entry_nr, offset;
439 entry_nr = addr / PCI_MSIX_ENTRY_SIZE;
440 if (entry_nr >= msix->total_entries) {
441 return;
443 entry = &msix->msix_entry[entry_nr];
444 offset = addr % PCI_MSIX_ENTRY_SIZE;
446 if (offset != PCI_MSIX_ENTRY_VECTOR_CTRL) {
447 const volatile uint32_t *vec_ctrl;
449 if (get_entry_value(entry, offset) == val
450 && entry->pirq != XEN_PT_UNASSIGNED_PIRQ) {
451 return;
455 * If Xen intercepts the mask bit access, entry->vec_ctrl may not be
456 * up-to-date. Read from hardware directly.
458 vec_ctrl = s->msix->phys_iomem_base + entry_nr * PCI_MSIX_ENTRY_SIZE
459 + PCI_MSIX_ENTRY_VECTOR_CTRL;
461 if (msix->enabled && !(*vec_ctrl & PCI_MSIX_ENTRY_CTRL_MASKBIT)) {
462 if (!entry->warned) {
463 entry->warned = true;
464 XEN_PT_ERR(&s->dev, "Can't update msix entry %d since MSI-X is"
465 " already enabled.\n", entry_nr);
467 return;
470 entry->updated = true;
473 set_entry_value(entry, offset, val);
475 if (offset == PCI_MSIX_ENTRY_VECTOR_CTRL) {
476 if (msix->enabled && !(val & PCI_MSIX_ENTRY_CTRL_MASKBIT)) {
477 xen_pt_msix_update_one(s, entry_nr);
482 static uint64_t pci_msix_read(void *opaque, hwaddr addr,
483 unsigned size)
485 XenPCIPassthroughState *s = opaque;
486 XenPTMSIX *msix = s->msix;
487 int entry_nr, offset;
489 entry_nr = addr / PCI_MSIX_ENTRY_SIZE;
490 if (entry_nr < 0) {
491 XEN_PT_ERR(&s->dev, "asked MSI-X entry '%i' invalid!\n", entry_nr);
492 return 0;
495 offset = addr % PCI_MSIX_ENTRY_SIZE;
497 if (addr < msix->total_entries * PCI_MSIX_ENTRY_SIZE) {
498 return get_entry_value(&msix->msix_entry[entry_nr], offset);
499 } else {
500 /* Pending Bit Array (PBA) */
501 return *(uint32_t *)(msix->phys_iomem_base + addr);
505 static const MemoryRegionOps pci_msix_ops = {
506 .read = pci_msix_read,
507 .write = pci_msix_write,
508 .endianness = DEVICE_NATIVE_ENDIAN,
509 .valid = {
510 .min_access_size = 4,
511 .max_access_size = 4,
512 .unaligned = false,
516 int xen_pt_msix_init(XenPCIPassthroughState *s, uint32_t base)
518 uint8_t id = 0;
519 uint16_t control = 0;
520 uint32_t table_off = 0;
521 int i, total_entries, bar_index;
522 XenHostPCIDevice *hd = &s->real_device;
523 PCIDevice *d = &s->dev;
524 int fd = -1;
525 XenPTMSIX *msix = NULL;
526 int rc = 0;
528 rc = xen_host_pci_get_byte(hd, base + PCI_CAP_LIST_ID, &id);
529 if (rc) {
530 return rc;
533 if (id != PCI_CAP_ID_MSIX) {
534 XEN_PT_ERR(d, "Invalid id %#x base %#x\n", id, base);
535 return -1;
538 xen_host_pci_get_word(hd, base + PCI_MSIX_FLAGS, &control);
539 total_entries = control & PCI_MSIX_FLAGS_QSIZE;
540 total_entries += 1;
542 s->msix = g_malloc0(sizeof (XenPTMSIX)
543 + total_entries * sizeof (XenPTMSIXEntry));
544 msix = s->msix;
546 msix->total_entries = total_entries;
547 for (i = 0; i < total_entries; i++) {
548 msix->msix_entry[i].pirq = XEN_PT_UNASSIGNED_PIRQ;
551 memory_region_init_io(&msix->mmio, OBJECT(s), &pci_msix_ops,
552 s, "xen-pci-pt-msix",
553 (total_entries * PCI_MSIX_ENTRY_SIZE
554 + XC_PAGE_SIZE - 1)
555 & XC_PAGE_MASK);
557 xen_host_pci_get_long(hd, base + PCI_MSIX_TABLE, &table_off);
558 bar_index = msix->bar_index = table_off & PCI_MSIX_FLAGS_BIRMASK;
559 table_off = table_off & ~PCI_MSIX_FLAGS_BIRMASK;
560 msix->table_base = s->real_device.io_regions[bar_index].base_addr;
561 XEN_PT_LOG(d, "get MSI-X table BAR base 0x%"PRIx64"\n", msix->table_base);
563 fd = open("/dev/mem", O_RDWR);
564 if (fd == -1) {
565 rc = -errno;
566 XEN_PT_ERR(d, "Can't open /dev/mem: %s\n", strerror(errno));
567 goto error_out;
569 XEN_PT_LOG(d, "table_off = %#x, total_entries = %d\n",
570 table_off, total_entries);
571 msix->table_offset_adjust = table_off & 0x0fff;
572 msix->phys_iomem_base =
573 mmap(NULL,
574 total_entries * PCI_MSIX_ENTRY_SIZE + msix->table_offset_adjust,
575 PROT_READ,
576 MAP_SHARED | MAP_LOCKED,
578 msix->table_base + table_off - msix->table_offset_adjust);
579 close(fd);
580 if (msix->phys_iomem_base == MAP_FAILED) {
581 rc = -errno;
582 XEN_PT_ERR(d, "Can't map physical MSI-X table: %s\n", strerror(errno));
583 goto error_out;
585 msix->phys_iomem_base = (char *)msix->phys_iomem_base
586 + msix->table_offset_adjust;
588 XEN_PT_LOG(d, "mapping physical MSI-X table to %p\n",
589 msix->phys_iomem_base);
591 memory_region_add_subregion_overlap(&s->bar[bar_index], table_off,
592 &msix->mmio,
593 2); /* Priority: pci default + 1 */
595 return 0;
597 error_out:
598 g_free(s->msix);
599 s->msix = NULL;
600 return rc;
603 void xen_pt_msix_delete(XenPCIPassthroughState *s)
605 XenPTMSIX *msix = s->msix;
607 if (!msix) {
608 return;
611 /* unmap the MSI-X memory mapped register area */
612 if (msix->phys_iomem_base) {
613 XEN_PT_LOG(&s->dev, "unmapping physical MSI-X table from %p\n",
614 msix->phys_iomem_base);
615 munmap(msix->phys_iomem_base, msix->total_entries * PCI_MSIX_ENTRY_SIZE
616 + msix->table_offset_adjust);
619 memory_region_del_subregion(&s->bar[msix->bar_index], &msix->mmio);
621 g_free(s->msix);
622 s->msix = NULL;