4 * This module includes support for MSI-X in pci devices.
6 * Author: Michael S. Tsirkin <mst@redhat.com>
8 * Copyright (c) 2009, Red Hat Inc, Michael S. Tsirkin (mst@redhat.com)
10 * This work is licensed under the terms of the GNU GPL, version 2. See
11 * the COPYING file in the top-level directory.
13 * Contributions after 2012-01-13 are licensed under the terms of the
14 * GNU GPL, version 2 or (at your option) any later version.
17 #include "qemu/osdep.h"
18 #include "hw/pci/msi.h"
19 #include "hw/pci/msix.h"
20 #include "hw/pci/pci.h"
21 #include "hw/xen/xen.h"
22 #include "sysemu/xen.h"
23 #include "migration/qemu-file-types.h"
24 #include "migration/vmstate.h"
25 #include "qemu/range.h"
26 #include "qapi/error.h"
29 #include "hw/i386/kvm/xen_evtchn.h"
31 /* MSI enable bit and maskall bit are in byte 1 in FLAGS register */
32 #define MSIX_CONTROL_OFFSET (PCI_MSIX_FLAGS + 1)
33 #define MSIX_ENABLE_MASK (PCI_MSIX_FLAGS_ENABLE >> 8)
34 #define MSIX_MASKALL_MASK (PCI_MSIX_FLAGS_MASKALL >> 8)
36 static MSIMessage
msix_prepare_message(PCIDevice
*dev
, unsigned vector
)
38 uint8_t *table_entry
= dev
->msix_table
+ vector
* PCI_MSIX_ENTRY_SIZE
;
41 msg
.address
= pci_get_quad(table_entry
+ PCI_MSIX_ENTRY_LOWER_ADDR
);
42 msg
.data
= pci_get_long(table_entry
+ PCI_MSIX_ENTRY_DATA
);
46 MSIMessage
msix_get_message(PCIDevice
*dev
, unsigned vector
)
48 return dev
->msix_prepare_message(dev
, vector
);
52 * Special API for POWER to configure the vectors through
53 * a side channel. Should never be used by devices.
55 void msix_set_message(PCIDevice
*dev
, int vector
, struct MSIMessage msg
)
57 uint8_t *table_entry
= dev
->msix_table
+ vector
* PCI_MSIX_ENTRY_SIZE
;
59 pci_set_quad(table_entry
+ PCI_MSIX_ENTRY_LOWER_ADDR
, msg
.address
);
60 pci_set_long(table_entry
+ PCI_MSIX_ENTRY_DATA
, msg
.data
);
61 table_entry
[PCI_MSIX_ENTRY_VECTOR_CTRL
] &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT
;
64 static uint8_t msix_pending_mask(int vector
)
66 return 1 << (vector
% 8);
69 static uint8_t *msix_pending_byte(PCIDevice
*dev
, int vector
)
71 return dev
->msix_pba
+ vector
/ 8;
74 static int msix_is_pending(PCIDevice
*dev
, int vector
)
76 return *msix_pending_byte(dev
, vector
) & msix_pending_mask(vector
);
79 void msix_set_pending(PCIDevice
*dev
, unsigned int vector
)
81 *msix_pending_byte(dev
, vector
) |= msix_pending_mask(vector
);
84 void msix_clr_pending(PCIDevice
*dev
, int vector
)
86 *msix_pending_byte(dev
, vector
) &= ~msix_pending_mask(vector
);
89 static bool msix_vector_masked(PCIDevice
*dev
, unsigned int vector
, bool fmask
)
91 unsigned offset
= vector
* PCI_MSIX_ENTRY_SIZE
;
92 uint8_t *data
= &dev
->msix_table
[offset
+ PCI_MSIX_ENTRY_DATA
];
93 /* MSIs on Xen can be remapped into pirqs. In those cases, masking
94 * and unmasking go through the PV evtchn path. */
95 if (xen_enabled() && xen_is_pirq_msi(pci_get_long(data
))) {
98 return fmask
|| dev
->msix_table
[offset
+ PCI_MSIX_ENTRY_VECTOR_CTRL
] &
99 PCI_MSIX_ENTRY_CTRL_MASKBIT
;
102 bool msix_is_masked(PCIDevice
*dev
, unsigned int vector
)
104 return msix_vector_masked(dev
, vector
, dev
->msix_function_masked
);
107 static void msix_fire_vector_notifier(PCIDevice
*dev
,
108 unsigned int vector
, bool is_masked
)
113 if (!dev
->msix_vector_use_notifier
) {
117 dev
->msix_vector_release_notifier(dev
, vector
);
119 msg
= msix_get_message(dev
, vector
);
120 ret
= dev
->msix_vector_use_notifier(dev
, vector
, msg
);
125 static void msix_handle_mask_update(PCIDevice
*dev
, int vector
, bool was_masked
)
127 bool is_masked
= msix_is_masked(dev
, vector
);
129 if (xen_mode
== XEN_EMULATE
) {
130 MSIMessage msg
= msix_prepare_message(dev
, vector
);
132 xen_evtchn_snoop_msi(dev
, true, vector
, msg
.address
, msg
.data
,
136 if (is_masked
== was_masked
) {
140 msix_fire_vector_notifier(dev
, vector
, is_masked
);
142 if (!is_masked
&& msix_is_pending(dev
, vector
)) {
143 msix_clr_pending(dev
, vector
);
144 msix_notify(dev
, vector
);
148 void msix_set_mask(PCIDevice
*dev
, int vector
, bool mask
)
153 assert(vector
< dev
->msix_entries_nr
);
155 offset
= vector
* PCI_MSIX_ENTRY_SIZE
+ PCI_MSIX_ENTRY_VECTOR_CTRL
;
157 was_masked
= msix_is_masked(dev
, vector
);
160 dev
->msix_table
[offset
] |= PCI_MSIX_ENTRY_CTRL_MASKBIT
;
162 dev
->msix_table
[offset
] &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT
;
165 msix_handle_mask_update(dev
, vector
, was_masked
);
168 static bool msix_masked(PCIDevice
*dev
)
170 return dev
->config
[dev
->msix_cap
+ MSIX_CONTROL_OFFSET
] & MSIX_MASKALL_MASK
;
173 static void msix_update_function_masked(PCIDevice
*dev
)
175 dev
->msix_function_masked
= !msix_enabled(dev
) || msix_masked(dev
);
178 /* Handle MSI-X capability config write. */
179 void msix_write_config(PCIDevice
*dev
, uint32_t addr
,
180 uint32_t val
, int len
)
182 unsigned enable_pos
= dev
->msix_cap
+ MSIX_CONTROL_OFFSET
;
186 if (!msix_present(dev
) || !range_covers_byte(addr
, len
, enable_pos
)) {
190 trace_msix_write_config(dev
->name
, msix_enabled(dev
), msix_masked(dev
));
192 was_masked
= dev
->msix_function_masked
;
193 msix_update_function_masked(dev
);
195 if (!msix_enabled(dev
)) {
199 pci_device_deassert_intx(dev
);
201 if (dev
->msix_function_masked
== was_masked
) {
205 for (vector
= 0; vector
< dev
->msix_entries_nr
; ++vector
) {
206 msix_handle_mask_update(dev
, vector
,
207 msix_vector_masked(dev
, vector
, was_masked
));
211 static uint64_t msix_table_mmio_read(void *opaque
, hwaddr addr
,
214 PCIDevice
*dev
= opaque
;
216 assert(addr
+ size
<= dev
->msix_entries_nr
* PCI_MSIX_ENTRY_SIZE
);
217 return pci_get_long(dev
->msix_table
+ addr
);
220 static void msix_table_mmio_write(void *opaque
, hwaddr addr
,
221 uint64_t val
, unsigned size
)
223 PCIDevice
*dev
= opaque
;
224 int vector
= addr
/ PCI_MSIX_ENTRY_SIZE
;
227 assert(addr
+ size
<= dev
->msix_entries_nr
* PCI_MSIX_ENTRY_SIZE
);
229 was_masked
= msix_is_masked(dev
, vector
);
230 pci_set_long(dev
->msix_table
+ addr
, val
);
231 msix_handle_mask_update(dev
, vector
, was_masked
);
234 static const MemoryRegionOps msix_table_mmio_ops
= {
235 .read
= msix_table_mmio_read
,
236 .write
= msix_table_mmio_write
,
237 .endianness
= DEVICE_LITTLE_ENDIAN
,
239 .min_access_size
= 4,
240 .max_access_size
= 8,
243 .max_access_size
= 4,
247 static uint64_t msix_pba_mmio_read(void *opaque
, hwaddr addr
,
250 PCIDevice
*dev
= opaque
;
251 if (dev
->msix_vector_poll_notifier
) {
252 unsigned vector_start
= addr
* 8;
253 unsigned vector_end
= MIN(addr
+ size
* 8, dev
->msix_entries_nr
);
254 dev
->msix_vector_poll_notifier(dev
, vector_start
, vector_end
);
257 return pci_get_long(dev
->msix_pba
+ addr
);
260 static void msix_pba_mmio_write(void *opaque
, hwaddr addr
,
261 uint64_t val
, unsigned size
)
265 static const MemoryRegionOps msix_pba_mmio_ops
= {
266 .read
= msix_pba_mmio_read
,
267 .write
= msix_pba_mmio_write
,
268 .endianness
= DEVICE_LITTLE_ENDIAN
,
270 .min_access_size
= 4,
271 .max_access_size
= 8,
274 .max_access_size
= 4,
278 static void msix_mask_all(struct PCIDevice
*dev
, unsigned nentries
)
282 for (vector
= 0; vector
< nentries
; ++vector
) {
284 vector
* PCI_MSIX_ENTRY_SIZE
+ PCI_MSIX_ENTRY_VECTOR_CTRL
;
285 bool was_masked
= msix_is_masked(dev
, vector
);
287 dev
->msix_table
[offset
] |= PCI_MSIX_ENTRY_CTRL_MASKBIT
;
288 msix_handle_mask_update(dev
, vector
, was_masked
);
293 * Make PCI device @dev MSI-X capable
294 * @nentries is the max number of MSI-X vectors that the device support.
295 * @table_bar is the MemoryRegion that MSI-X table structure resides.
296 * @table_bar_nr is number of base address register corresponding to @table_bar.
297 * @table_offset indicates the offset that the MSI-X table structure starts with
299 * @pba_bar is the MemoryRegion that the Pending Bit Array structure resides.
300 * @pba_bar_nr is number of base address register corresponding to @pba_bar.
301 * @pba_offset indicates the offset that the Pending Bit Array structure
302 * starts with in @pba_bar.
303 * Non-zero @cap_pos puts capability MSI-X at that offset in PCI config space.
304 * @errp is for returning errors.
306 * Return 0 on success; set @errp and return -errno on error:
307 * -ENOTSUP means lacking msi support for a msi-capable platform.
308 * -EINVAL means capability overlap, happens when @cap_pos is non-zero,
309 * also means a programming error, except device assignment, which can check
310 * if a real HW is broken.
312 int msix_init(struct PCIDevice
*dev
, unsigned short nentries
,
313 MemoryRegion
*table_bar
, uint8_t table_bar_nr
,
314 unsigned table_offset
, MemoryRegion
*pba_bar
,
315 uint8_t pba_bar_nr
, unsigned pba_offset
, uint8_t cap_pos
,
319 unsigned table_size
, pba_size
;
322 /* Nothing to do if MSI is not supported by interrupt controller */
323 if (!msi_nonbroken
) {
324 error_setg(errp
, "MSI-X is not supported by interrupt controller");
328 if (nentries
< 1 || nentries
> PCI_MSIX_FLAGS_QSIZE
+ 1) {
329 error_setg(errp
, "The number of MSI-X vectors is invalid");
333 table_size
= nentries
* PCI_MSIX_ENTRY_SIZE
;
334 pba_size
= QEMU_ALIGN_UP(nentries
, 64) / 8;
336 /* Sanity test: table & pba don't overlap, fit within BARs, min aligned */
337 if ((table_bar_nr
== pba_bar_nr
&&
338 ranges_overlap(table_offset
, table_size
, pba_offset
, pba_size
)) ||
339 table_offset
+ table_size
> memory_region_size(table_bar
) ||
340 pba_offset
+ pba_size
> memory_region_size(pba_bar
) ||
341 (table_offset
| pba_offset
) & PCI_MSIX_FLAGS_BIRMASK
) {
342 error_setg(errp
, "table & pba overlap, or they don't fit in BARs,"
347 cap
= pci_add_capability(dev
, PCI_CAP_ID_MSIX
,
348 cap_pos
, MSIX_CAP_LENGTH
, errp
);
354 dev
->cap_present
|= QEMU_PCI_CAP_MSIX
;
355 config
= dev
->config
+ cap
;
357 pci_set_word(config
+ PCI_MSIX_FLAGS
, nentries
- 1);
358 dev
->msix_entries_nr
= nentries
;
359 dev
->msix_function_masked
= true;
361 pci_set_long(config
+ PCI_MSIX_TABLE
, table_offset
| table_bar_nr
);
362 pci_set_long(config
+ PCI_MSIX_PBA
, pba_offset
| pba_bar_nr
);
364 /* Make flags bit writable. */
365 dev
->wmask
[cap
+ MSIX_CONTROL_OFFSET
] |= MSIX_ENABLE_MASK
|
368 dev
->msix_table
= g_malloc0(table_size
);
369 dev
->msix_pba
= g_malloc0(pba_size
);
370 dev
->msix_entry_used
= g_malloc0(nentries
* sizeof *dev
->msix_entry_used
);
372 msix_mask_all(dev
, nentries
);
374 memory_region_init_io(&dev
->msix_table_mmio
, OBJECT(dev
), &msix_table_mmio_ops
, dev
,
375 "msix-table", table_size
);
376 memory_region_add_subregion(table_bar
, table_offset
, &dev
->msix_table_mmio
);
377 memory_region_init_io(&dev
->msix_pba_mmio
, OBJECT(dev
), &msix_pba_mmio_ops
, dev
,
378 "msix-pba", pba_size
);
379 memory_region_add_subregion(pba_bar
, pba_offset
, &dev
->msix_pba_mmio
);
381 dev
->msix_prepare_message
= msix_prepare_message
;
386 int msix_init_exclusive_bar(PCIDevice
*dev
, unsigned short nentries
,
387 uint8_t bar_nr
, Error
**errp
)
391 uint32_t bar_size
= 4096;
392 uint32_t bar_pba_offset
= bar_size
/ 2;
393 uint32_t bar_pba_size
= QEMU_ALIGN_UP(nentries
, 64) / 8;
396 * Migration compatibility dictates that this remains a 4k
397 * BAR with the vector table in the lower half and PBA in
398 * the upper half for nentries which is lower or equal to 128.
399 * No need to care about using more than 65 entries for legacy
400 * machine types who has at most 64 queues.
402 if (nentries
* PCI_MSIX_ENTRY_SIZE
> bar_pba_offset
) {
403 bar_pba_offset
= nentries
* PCI_MSIX_ENTRY_SIZE
;
406 if (bar_pba_offset
+ bar_pba_size
> 4096) {
407 bar_size
= bar_pba_offset
+ bar_pba_size
;
410 bar_size
= pow2ceil(bar_size
);
412 name
= g_strdup_printf("%s-msix", dev
->name
);
413 memory_region_init(&dev
->msix_exclusive_bar
, OBJECT(dev
), name
, bar_size
);
416 ret
= msix_init(dev
, nentries
, &dev
->msix_exclusive_bar
, bar_nr
,
417 0, &dev
->msix_exclusive_bar
,
418 bar_nr
, bar_pba_offset
,
424 pci_register_bar(dev
, bar_nr
, PCI_BASE_ADDRESS_SPACE_MEMORY
,
425 &dev
->msix_exclusive_bar
);
430 static void msix_free_irq_entries(PCIDevice
*dev
)
434 for (vector
= 0; vector
< dev
->msix_entries_nr
; ++vector
) {
435 dev
->msix_entry_used
[vector
] = 0;
436 msix_clr_pending(dev
, vector
);
440 static void msix_clear_all_vectors(PCIDevice
*dev
)
444 for (vector
= 0; vector
< dev
->msix_entries_nr
; ++vector
) {
445 msix_clr_pending(dev
, vector
);
449 /* Clean up resources for the device. */
450 void msix_uninit(PCIDevice
*dev
, MemoryRegion
*table_bar
, MemoryRegion
*pba_bar
)
452 if (!msix_present(dev
)) {
455 pci_del_capability(dev
, PCI_CAP_ID_MSIX
, MSIX_CAP_LENGTH
);
457 msix_free_irq_entries(dev
);
458 dev
->msix_entries_nr
= 0;
459 memory_region_del_subregion(pba_bar
, &dev
->msix_pba_mmio
);
460 g_free(dev
->msix_pba
);
461 dev
->msix_pba
= NULL
;
462 memory_region_del_subregion(table_bar
, &dev
->msix_table_mmio
);
463 g_free(dev
->msix_table
);
464 dev
->msix_table
= NULL
;
465 g_free(dev
->msix_entry_used
);
466 dev
->msix_entry_used
= NULL
;
467 dev
->cap_present
&= ~QEMU_PCI_CAP_MSIX
;
468 dev
->msix_prepare_message
= NULL
;
471 void msix_uninit_exclusive_bar(PCIDevice
*dev
)
473 if (msix_present(dev
)) {
474 msix_uninit(dev
, &dev
->msix_exclusive_bar
, &dev
->msix_exclusive_bar
);
478 void msix_save(PCIDevice
*dev
, QEMUFile
*f
)
480 unsigned n
= dev
->msix_entries_nr
;
482 if (!msix_present(dev
)) {
486 qemu_put_buffer(f
, dev
->msix_table
, n
* PCI_MSIX_ENTRY_SIZE
);
487 qemu_put_buffer(f
, dev
->msix_pba
, DIV_ROUND_UP(n
, 8));
490 /* Should be called after restoring the config space. */
491 void msix_load(PCIDevice
*dev
, QEMUFile
*f
)
493 unsigned n
= dev
->msix_entries_nr
;
496 if (!msix_present(dev
)) {
500 msix_clear_all_vectors(dev
);
501 qemu_get_buffer(f
, dev
->msix_table
, n
* PCI_MSIX_ENTRY_SIZE
);
502 qemu_get_buffer(f
, dev
->msix_pba
, DIV_ROUND_UP(n
, 8));
503 msix_update_function_masked(dev
);
505 for (vector
= 0; vector
< n
; vector
++) {
506 msix_handle_mask_update(dev
, vector
, true);
510 /* Does device support MSI-X? */
511 int msix_present(PCIDevice
*dev
)
513 return dev
->cap_present
& QEMU_PCI_CAP_MSIX
;
516 /* Is MSI-X enabled? */
517 int msix_enabled(PCIDevice
*dev
)
519 return (dev
->cap_present
& QEMU_PCI_CAP_MSIX
) &&
520 (dev
->config
[dev
->msix_cap
+ MSIX_CONTROL_OFFSET
] &
524 /* Send an MSI-X message */
525 void msix_notify(PCIDevice
*dev
, unsigned vector
)
529 assert(vector
< dev
->msix_entries_nr
);
531 if (!dev
->msix_entry_used
[vector
]) {
535 if (msix_is_masked(dev
, vector
)) {
536 msix_set_pending(dev
, vector
);
540 msg
= msix_get_message(dev
, vector
);
542 msi_send_message(dev
, msg
);
545 void msix_reset(PCIDevice
*dev
)
547 if (!msix_present(dev
)) {
550 msix_clear_all_vectors(dev
);
551 dev
->config
[dev
->msix_cap
+ MSIX_CONTROL_OFFSET
] &=
552 ~dev
->wmask
[dev
->msix_cap
+ MSIX_CONTROL_OFFSET
];
553 memset(dev
->msix_table
, 0, dev
->msix_entries_nr
* PCI_MSIX_ENTRY_SIZE
);
554 memset(dev
->msix_pba
, 0, QEMU_ALIGN_UP(dev
->msix_entries_nr
, 64) / 8);
555 msix_mask_all(dev
, dev
->msix_entries_nr
);
558 /* PCI spec suggests that devices make it possible for software to configure
559 * less vectors than supported by the device, but does not specify a standard
560 * mechanism for devices to do so.
562 * We support this by asking devices to declare vectors software is going to
563 * actually use, and checking this on the notification path. Devices that
564 * don't want to follow the spec suggestion can declare all vectors as used. */
566 /* Mark vector as used. */
567 void msix_vector_use(PCIDevice
*dev
, unsigned vector
)
569 assert(vector
< dev
->msix_entries_nr
);
570 dev
->msix_entry_used
[vector
]++;
573 /* Mark vector as unused. */
574 void msix_vector_unuse(PCIDevice
*dev
, unsigned vector
)
576 assert(vector
< dev
->msix_entries_nr
);
577 if (!dev
->msix_entry_used
[vector
]) {
580 if (--dev
->msix_entry_used
[vector
]) {
583 msix_clr_pending(dev
, vector
);
586 void msix_unuse_all_vectors(PCIDevice
*dev
)
588 if (!msix_present(dev
)) {
591 msix_free_irq_entries(dev
);
594 unsigned int msix_nr_vectors_allocated(const PCIDevice
*dev
)
596 return dev
->msix_entries_nr
;
599 static int msix_set_notifier_for_vector(PCIDevice
*dev
, unsigned int vector
)
603 if (msix_is_masked(dev
, vector
)) {
606 msg
= msix_get_message(dev
, vector
);
607 return dev
->msix_vector_use_notifier(dev
, vector
, msg
);
610 static void msix_unset_notifier_for_vector(PCIDevice
*dev
, unsigned int vector
)
612 if (msix_is_masked(dev
, vector
)) {
615 dev
->msix_vector_release_notifier(dev
, vector
);
618 int msix_set_vector_notifiers(PCIDevice
*dev
,
619 MSIVectorUseNotifier use_notifier
,
620 MSIVectorReleaseNotifier release_notifier
,
621 MSIVectorPollNotifier poll_notifier
)
625 assert(use_notifier
&& release_notifier
);
627 dev
->msix_vector_use_notifier
= use_notifier
;
628 dev
->msix_vector_release_notifier
= release_notifier
;
629 dev
->msix_vector_poll_notifier
= poll_notifier
;
631 if ((dev
->config
[dev
->msix_cap
+ MSIX_CONTROL_OFFSET
] &
632 (MSIX_ENABLE_MASK
| MSIX_MASKALL_MASK
)) == MSIX_ENABLE_MASK
) {
633 for (vector
= 0; vector
< dev
->msix_entries_nr
; vector
++) {
634 ret
= msix_set_notifier_for_vector(dev
, vector
);
640 if (dev
->msix_vector_poll_notifier
) {
641 dev
->msix_vector_poll_notifier(dev
, 0, dev
->msix_entries_nr
);
646 while (--vector
>= 0) {
647 msix_unset_notifier_for_vector(dev
, vector
);
649 dev
->msix_vector_use_notifier
= NULL
;
650 dev
->msix_vector_release_notifier
= NULL
;
654 void msix_unset_vector_notifiers(PCIDevice
*dev
)
658 assert(dev
->msix_vector_use_notifier
&&
659 dev
->msix_vector_release_notifier
);
661 if ((dev
->config
[dev
->msix_cap
+ MSIX_CONTROL_OFFSET
] &
662 (MSIX_ENABLE_MASK
| MSIX_MASKALL_MASK
)) == MSIX_ENABLE_MASK
) {
663 for (vector
= 0; vector
< dev
->msix_entries_nr
; vector
++) {
664 msix_unset_notifier_for_vector(dev
, vector
);
667 dev
->msix_vector_use_notifier
= NULL
;
668 dev
->msix_vector_release_notifier
= NULL
;
669 dev
->msix_vector_poll_notifier
= NULL
;
672 static int put_msix_state(QEMUFile
*f
, void *pv
, size_t size
,
673 const VMStateField
*field
, JSONWriter
*vmdesc
)
680 static int get_msix_state(QEMUFile
*f
, void *pv
, size_t size
,
681 const VMStateField
*field
)
687 static VMStateInfo vmstate_info_msix
= {
688 .name
= "msix state",
689 .get
= get_msix_state
,
690 .put
= put_msix_state
,
693 const VMStateDescription vmstate_msix
= {
695 .fields
= (VMStateField
[]) {
699 .field_exists
= NULL
,
700 .size
= 0, /* ouch */
701 .info
= &vmstate_info_msix
,
705 VMSTATE_END_OF_LIST()