2 * Copyright (c) 2007, Intel Corporation.
4 * This work is licensed under the terms of the GNU GPL, version 2. See
5 * the COPYING file in the top-level directory.
7 * Jiang Yunhong <yunhong.jiang@intel.com>
9 * This file implements direct PCI assignment to a HVM guest
12 #include "qemu/osdep.h"
15 #include "hw/xen/xen_backend.h"
17 #include "hw/i386/apic-msidef.h"
20 #define XEN_PT_AUTO_ASSIGN -1
22 /* shift count for gflags */
23 #define XEN_PT_GFLAGS_SHIFT_DEST_ID 0
24 #define XEN_PT_GFLAGS_SHIFT_RH 8
25 #define XEN_PT_GFLAGS_SHIFT_DM 9
26 #define XEN_PT_GFLAGSSHIFT_DELIV_MODE 12
27 #define XEN_PT_GFLAGSSHIFT_TRG_MODE 15
29 #define latch(fld) latch[PCI_MSIX_ENTRY_##fld / sizeof(uint32_t)]
35 static inline uint8_t msi_vector(uint32_t data
)
37 return (data
& MSI_DATA_VECTOR_MASK
) >> MSI_DATA_VECTOR_SHIFT
;
40 static inline uint8_t msi_dest_id(uint32_t addr
)
42 return (addr
& MSI_ADDR_DEST_ID_MASK
) >> MSI_ADDR_DEST_ID_SHIFT
;
45 static inline uint32_t msi_ext_dest_id(uint32_t addr_hi
)
47 return addr_hi
& 0xffffff00;
50 static uint32_t msi_gflags(uint32_t data
, uint64_t addr
)
53 int rh
, dm
, dest_id
, deliv_mode
, trig_mode
;
55 rh
= (addr
>> MSI_ADDR_REDIRECTION_SHIFT
) & 0x1;
56 dm
= (addr
>> MSI_ADDR_DEST_MODE_SHIFT
) & 0x1;
57 dest_id
= msi_dest_id(addr
);
58 deliv_mode
= (data
>> MSI_DATA_DELIVERY_MODE_SHIFT
) & 0x7;
59 trig_mode
= (data
>> MSI_DATA_TRIGGER_SHIFT
) & 0x1;
61 result
= dest_id
| (rh
<< XEN_PT_GFLAGS_SHIFT_RH
)
62 | (dm
<< XEN_PT_GFLAGS_SHIFT_DM
)
63 | (deliv_mode
<< XEN_PT_GFLAGSSHIFT_DELIV_MODE
)
64 | (trig_mode
<< XEN_PT_GFLAGSSHIFT_TRG_MODE
);
69 static inline uint64_t msi_addr64(XenPTMSI
*msi
)
71 return (uint64_t)msi
->addr_hi
<< 32 | msi
->addr_lo
;
74 static int msi_msix_enable(XenPCIPassthroughState
*s
,
86 rc
= xen_host_pci_get_word(&s
->real_device
, address
, &val
);
88 XEN_PT_ERR(&s
->dev
, "Failed to read MSI/MSI-X register (0x%x), rc:%d\n",
97 rc
= xen_host_pci_set_word(&s
->real_device
, address
, val
);
99 XEN_PT_ERR(&s
->dev
, "Failed to write MSI/MSI-X register (0x%x), rc:%d\n",
105 static int msi_msix_setup(XenPCIPassthroughState
*s
,
113 uint8_t gvec
= msi_vector(data
);
116 assert((!is_msix
&& msix_entry
== 0) || is_msix
);
119 /* if gvec is 0, the guest is asking for a particular pirq that
120 * is passed as dest_id */
121 *ppirq
= msi_ext_dest_id(addr
>> 32) | msi_dest_id(addr
);
123 /* this probably identifies an misconfiguration of the guest,
124 * try the emulated path */
125 *ppirq
= XEN_PT_UNASSIGNED_PIRQ
;
127 XEN_PT_LOG(&s
->dev
, "requested pirq %d for MSI%s"
128 " (vec: %#x, entry: %#x)\n",
129 *ppirq
, is_msix
? "-X" : "", gvec
, msix_entry
);
134 uint64_t table_base
= 0;
137 table_base
= s
->msix
->table_base
;
140 rc
= xc_physdev_map_pirq_msi(xen_xc
, xen_domid
, XEN_PT_AUTO_ASSIGN
,
141 ppirq
, PCI_DEVFN(s
->real_device
.dev
,
142 s
->real_device
.func
),
144 msix_entry
, table_base
);
147 "Mapping of MSI%s (err: %i, vec: %#x, entry %#x)\n",
148 is_msix
? "-X" : "", errno
, gvec
, msix_entry
);
155 static int msi_msix_update(XenPCIPassthroughState
*s
,
163 PCIDevice
*d
= &s
->dev
;
164 uint8_t gvec
= msi_vector(data
);
165 uint32_t gflags
= msi_gflags(data
, addr
);
167 uint64_t table_addr
= 0;
169 XEN_PT_LOG(d
, "Updating MSI%s with pirq %d gvec %#x gflags %#x"
171 is_msix
? "-X" : "", pirq
, gvec
, gflags
, msix_entry
);
174 table_addr
= s
->msix
->mmio_base_addr
;
177 rc
= xc_domain_update_msi_irq(xen_xc
, xen_domid
, gvec
,
178 pirq
, gflags
, table_addr
);
181 XEN_PT_ERR(d
, "Updating of MSI%s failed. (err: %d)\n",
182 is_msix
? "-X" : "", errno
);
184 if (xc_physdev_unmap_pirq(xen_xc
, xen_domid
, *old_pirq
)) {
185 XEN_PT_ERR(d
, "Unmapping of MSI%s pirq %d failed. (err: %d)\n",
186 is_msix
? "-X" : "", *old_pirq
, errno
);
188 *old_pirq
= XEN_PT_UNASSIGNED_PIRQ
;
193 static int msi_msix_disable(XenPCIPassthroughState
*s
,
200 PCIDevice
*d
= &s
->dev
;
201 uint8_t gvec
= msi_vector(data
);
202 uint32_t gflags
= msi_gflags(data
, addr
);
205 if (pirq
== XEN_PT_UNASSIGNED_PIRQ
) {
210 XEN_PT_LOG(d
, "Unbind MSI%s with pirq %d, gvec %#x\n",
211 is_msix
? "-X" : "", pirq
, gvec
);
212 rc
= xc_domain_unbind_msi_irq(xen_xc
, xen_domid
, gvec
, pirq
, gflags
);
214 XEN_PT_ERR(d
, "Unbinding of MSI%s failed. (err: %d, pirq: %d, gvec: %#x)\n",
215 is_msix
? "-X" : "", errno
, pirq
, gvec
);
220 XEN_PT_LOG(d
, "Unmap MSI%s pirq %d\n", is_msix
? "-X" : "", pirq
);
221 rc
= xc_physdev_unmap_pirq(xen_xc
, xen_domid
, pirq
);
223 XEN_PT_ERR(d
, "Unmapping of MSI%s pirq %d failed. (err: %i)\n",
224 is_msix
? "-X" : "", pirq
, errno
);
232 * MSI virtualization functions
235 static int xen_pt_msi_set_enable(XenPCIPassthroughState
*s
, bool enable
)
237 XEN_PT_LOG(&s
->dev
, "%s MSI.\n", enable
? "enabling" : "disabling");
243 return msi_msix_enable(s
, s
->msi
->ctrl_offset
, PCI_MSI_FLAGS_ENABLE
,
247 /* setup physical msi, but don't enable it */
248 int xen_pt_msi_setup(XenPCIPassthroughState
*s
)
250 int pirq
= XEN_PT_UNASSIGNED_PIRQ
;
252 XenPTMSI
*msi
= s
->msi
;
254 if (msi
->initialized
) {
256 "Setup physical MSI when it has been properly initialized.\n");
260 rc
= msi_msix_setup(s
, msi_addr64(msi
), msi
->data
, &pirq
, false, 0, true);
266 XEN_PT_ERR(&s
->dev
, "Invalid pirq number: %d.\n", pirq
);
271 XEN_PT_LOG(&s
->dev
, "MSI mapped with pirq %d.\n", pirq
);
276 int xen_pt_msi_update(XenPCIPassthroughState
*s
)
278 XenPTMSI
*msi
= s
->msi
;
279 return msi_msix_update(s
, msi_addr64(msi
), msi
->data
, msi
->pirq
,
280 false, 0, &msi
->pirq
);
283 void xen_pt_msi_disable(XenPCIPassthroughState
*s
)
285 XenPTMSI
*msi
= s
->msi
;
291 (void)xen_pt_msi_set_enable(s
, false);
293 msi_msix_disable(s
, msi_addr64(msi
), msi
->data
, msi
->pirq
, false,
297 msi
->flags
&= ~PCI_MSI_FLAGS_ENABLE
;
298 msi
->initialized
= false;
300 msi
->pirq
= XEN_PT_UNASSIGNED_PIRQ
;
304 * MSI-X virtualization functions
307 static int msix_set_enable(XenPCIPassthroughState
*s
, bool enabled
)
309 XEN_PT_LOG(&s
->dev
, "%s MSI-X.\n", enabled
? "enabling" : "disabling");
315 return msi_msix_enable(s
, s
->msix
->ctrl_offset
, PCI_MSIX_FLAGS_ENABLE
,
319 static int xen_pt_msix_update_one(XenPCIPassthroughState
*s
, int entry_nr
,
322 XenPTMSIXEntry
*entry
= NULL
;
326 if (entry_nr
< 0 || entry_nr
>= s
->msix
->total_entries
) {
330 entry
= &s
->msix
->msix_entry
[entry_nr
];
332 if (!entry
->updated
) {
339 * Update the entry addr and data to the latest values only when the
340 * entry is masked or they are all masked, as required by the spec.
341 * Addr and data changes while the MSI-X entry is unmasked get deferred
342 * until the next masked -> unmasked transition.
344 if (pirq
== XEN_PT_UNASSIGNED_PIRQ
|| s
->msix
->maskall
||
345 (vec_ctrl
& PCI_MSIX_ENTRY_CTRL_MASKBIT
)) {
346 entry
->addr
= entry
->latch(LOWER_ADDR
) |
347 ((uint64_t)entry
->latch(UPPER_ADDR
) << 32);
348 entry
->data
= entry
->latch(DATA
);
351 rc
= msi_msix_setup(s
, entry
->addr
, entry
->data
, &pirq
, true, entry_nr
,
352 entry
->pirq
== XEN_PT_UNASSIGNED_PIRQ
);
356 if (entry
->pirq
== XEN_PT_UNASSIGNED_PIRQ
) {
360 rc
= msi_msix_update(s
, entry
->addr
, entry
->data
, pirq
, true,
361 entry_nr
, &entry
->pirq
);
364 entry
->updated
= false;
370 int xen_pt_msix_update(XenPCIPassthroughState
*s
)
372 XenPTMSIX
*msix
= s
->msix
;
375 for (i
= 0; i
< msix
->total_entries
; i
++) {
376 xen_pt_msix_update_one(s
, i
, msix
->msix_entry
[i
].latch(VECTOR_CTRL
));
382 void xen_pt_msix_disable(XenPCIPassthroughState
*s
)
386 msix_set_enable(s
, false);
388 for (i
= 0; i
< s
->msix
->total_entries
; i
++) {
389 XenPTMSIXEntry
*entry
= &s
->msix
->msix_entry
[i
];
391 msi_msix_disable(s
, entry
->addr
, entry
->data
, entry
->pirq
, true, true);
393 /* clear MSI-X info */
394 entry
->pirq
= XEN_PT_UNASSIGNED_PIRQ
;
395 entry
->updated
= false;
399 int xen_pt_msix_update_remap(XenPCIPassthroughState
*s
, int bar_index
)
401 XenPTMSIXEntry
*entry
;
404 if (!(s
->msix
&& s
->msix
->bar_index
== bar_index
)) {
408 for (i
= 0; i
< s
->msix
->total_entries
; i
++) {
409 entry
= &s
->msix
->msix_entry
[i
];
410 if (entry
->pirq
!= XEN_PT_UNASSIGNED_PIRQ
) {
411 ret
= xc_domain_unbind_pt_irq(xen_xc
, xen_domid
, entry
->pirq
,
412 PT_IRQ_TYPE_MSI
, 0, 0, 0, 0);
414 XEN_PT_ERR(&s
->dev
, "unbind MSI-X entry %d failed (err: %d)\n",
417 entry
->updated
= true;
420 return xen_pt_msix_update(s
);
423 static uint32_t get_entry_value(XenPTMSIXEntry
*e
, int offset
)
425 assert(!(offset
% sizeof(*e
->latch
)));
426 return e
->latch
[offset
/ sizeof(*e
->latch
)];
429 static void set_entry_value(XenPTMSIXEntry
*e
, int offset
, uint32_t val
)
431 assert(!(offset
% sizeof(*e
->latch
)));
432 e
->latch
[offset
/ sizeof(*e
->latch
)] = val
;
435 static void pci_msix_write(void *opaque
, hwaddr addr
,
436 uint64_t val
, unsigned size
)
438 XenPCIPassthroughState
*s
= opaque
;
439 XenPTMSIX
*msix
= s
->msix
;
440 XenPTMSIXEntry
*entry
;
441 unsigned int entry_nr
, offset
;
443 entry_nr
= addr
/ PCI_MSIX_ENTRY_SIZE
;
444 if (entry_nr
>= msix
->total_entries
) {
447 entry
= &msix
->msix_entry
[entry_nr
];
448 offset
= addr
% PCI_MSIX_ENTRY_SIZE
;
450 if (offset
!= PCI_MSIX_ENTRY_VECTOR_CTRL
) {
451 if (get_entry_value(entry
, offset
) == val
452 && entry
->pirq
!= XEN_PT_UNASSIGNED_PIRQ
) {
456 entry
->updated
= true;
457 } else if (msix
->enabled
&& entry
->updated
&&
458 !(val
& PCI_MSIX_ENTRY_CTRL_MASKBIT
)) {
459 const volatile uint32_t *vec_ctrl
;
462 * If Xen intercepts the mask bit access, entry->vec_ctrl may not be
463 * up-to-date. Read from hardware directly.
465 vec_ctrl
= s
->msix
->phys_iomem_base
+ entry_nr
* PCI_MSIX_ENTRY_SIZE
466 + PCI_MSIX_ENTRY_VECTOR_CTRL
;
467 xen_pt_msix_update_one(s
, entry_nr
, *vec_ctrl
);
470 set_entry_value(entry
, offset
, val
);
473 static uint64_t pci_msix_read(void *opaque
, hwaddr addr
,
476 XenPCIPassthroughState
*s
= opaque
;
477 XenPTMSIX
*msix
= s
->msix
;
478 int entry_nr
, offset
;
480 entry_nr
= addr
/ PCI_MSIX_ENTRY_SIZE
;
482 XEN_PT_ERR(&s
->dev
, "asked MSI-X entry '%i' invalid!\n", entry_nr
);
486 offset
= addr
% PCI_MSIX_ENTRY_SIZE
;
488 if (addr
< msix
->total_entries
* PCI_MSIX_ENTRY_SIZE
) {
489 return get_entry_value(&msix
->msix_entry
[entry_nr
], offset
);
491 /* Pending Bit Array (PBA) */
492 return *(uint32_t *)(msix
->phys_iomem_base
+ addr
);
496 static bool pci_msix_accepts(void *opaque
, hwaddr addr
,
497 unsigned size
, bool is_write
)
499 return !(addr
& (size
- 1));
502 static const MemoryRegionOps pci_msix_ops
= {
503 .read
= pci_msix_read
,
504 .write
= pci_msix_write
,
505 .endianness
= DEVICE_NATIVE_ENDIAN
,
507 .min_access_size
= 4,
508 .max_access_size
= 4,
510 .accepts
= pci_msix_accepts
513 .min_access_size
= 4,
514 .max_access_size
= 4,
519 int xen_pt_msix_init(XenPCIPassthroughState
*s
, uint32_t base
)
522 uint16_t control
= 0;
523 uint32_t table_off
= 0;
524 int i
, total_entries
, bar_index
;
525 XenHostPCIDevice
*hd
= &s
->real_device
;
526 PCIDevice
*d
= &s
->dev
;
528 XenPTMSIX
*msix
= NULL
;
531 rc
= xen_host_pci_get_byte(hd
, base
+ PCI_CAP_LIST_ID
, &id
);
536 if (id
!= PCI_CAP_ID_MSIX
) {
537 XEN_PT_ERR(d
, "Invalid id %#x base %#x\n", id
, base
);
541 xen_host_pci_get_word(hd
, base
+ PCI_MSIX_FLAGS
, &control
);
542 total_entries
= control
& PCI_MSIX_FLAGS_QSIZE
;
545 s
->msix
= g_malloc0(sizeof (XenPTMSIX
)
546 + total_entries
* sizeof (XenPTMSIXEntry
));
549 msix
->total_entries
= total_entries
;
550 for (i
= 0; i
< total_entries
; i
++) {
551 msix
->msix_entry
[i
].pirq
= XEN_PT_UNASSIGNED_PIRQ
;
554 memory_region_init_io(&msix
->mmio
, OBJECT(s
), &pci_msix_ops
,
555 s
, "xen-pci-pt-msix",
556 (total_entries
* PCI_MSIX_ENTRY_SIZE
560 xen_host_pci_get_long(hd
, base
+ PCI_MSIX_TABLE
, &table_off
);
561 bar_index
= msix
->bar_index
= table_off
& PCI_MSIX_FLAGS_BIRMASK
;
562 table_off
= table_off
& ~PCI_MSIX_FLAGS_BIRMASK
;
563 msix
->table_base
= s
->real_device
.io_regions
[bar_index
].base_addr
;
564 XEN_PT_LOG(d
, "get MSI-X table BAR base 0x%"PRIx64
"\n", msix
->table_base
);
566 fd
= open("/dev/mem", O_RDWR
);
569 XEN_PT_ERR(d
, "Can't open /dev/mem: %s\n", strerror(errno
));
572 XEN_PT_LOG(d
, "table_off = %#x, total_entries = %d\n",
573 table_off
, total_entries
);
574 msix
->table_offset_adjust
= table_off
& 0x0fff;
575 msix
->phys_iomem_base
=
577 total_entries
* PCI_MSIX_ENTRY_SIZE
+ msix
->table_offset_adjust
,
579 MAP_SHARED
| MAP_LOCKED
,
581 msix
->table_base
+ table_off
- msix
->table_offset_adjust
);
583 if (msix
->phys_iomem_base
== MAP_FAILED
) {
585 XEN_PT_ERR(d
, "Can't map physical MSI-X table: %s\n", strerror(errno
));
588 msix
->phys_iomem_base
= (char *)msix
->phys_iomem_base
589 + msix
->table_offset_adjust
;
591 XEN_PT_LOG(d
, "mapping physical MSI-X table to %p\n",
592 msix
->phys_iomem_base
);
594 memory_region_add_subregion_overlap(&s
->bar
[bar_index
], table_off
,
596 2); /* Priority: pci default + 1 */
606 void xen_pt_msix_unmap(XenPCIPassthroughState
*s
)
608 XenPTMSIX
*msix
= s
->msix
;
614 /* unmap the MSI-X memory mapped register area */
615 if (msix
->phys_iomem_base
) {
616 XEN_PT_LOG(&s
->dev
, "unmapping physical MSI-X table from %p\n",
617 msix
->phys_iomem_base
);
618 munmap(msix
->phys_iomem_base
, msix
->total_entries
* PCI_MSIX_ENTRY_SIZE
619 + msix
->table_offset_adjust
);
622 memory_region_del_subregion(&s
->bar
[msix
->bar_index
], &msix
->mmio
);
625 void xen_pt_msix_delete(XenPCIPassthroughState
*s
)
627 XenPTMSIX
*msix
= s
->msix
;
633 object_unparent(OBJECT(&msix
->mmio
));