4 * This module includes support for MSI-X in pci devices.
6 * Author: Michael S. Tsirkin <mst@redhat.com>
8 * Copyright (c) 2009, Red Hat Inc, Michael S. Tsirkin (mst@redhat.com)
10 * This work is licensed under the terms of the GNU GPL, version 2. See
11 * the COPYING file in the top-level directory.
13 * Contributions after 2012-01-13 are licensed under the terms of the
14 * GNU GPL, version 2 or (at your option) any later version.
23 #define MSIX_CAP_LENGTH 12
25 /* MSI enable bit and maskall bit are in byte 1 in FLAGS register */
26 #define MSIX_CONTROL_OFFSET (PCI_MSIX_FLAGS + 1)
27 #define MSIX_ENABLE_MASK (PCI_MSIX_FLAGS_ENABLE >> 8)
28 #define MSIX_MASKALL_MASK (PCI_MSIX_FLAGS_MASKALL >> 8)
30 /* How much space does an MSIX table need. */
31 /* The spec requires giving the table structure
32 * a 4K aligned region all by itself. */
33 #define MSIX_PAGE_SIZE 0x1000
34 /* Reserve second half of the page for pending bits */
35 #define MSIX_PAGE_PENDING (MSIX_PAGE_SIZE / 2)
36 #define MSIX_MAX_ENTRIES 32
38 static MSIMessage
msix_get_message(PCIDevice
*dev
, unsigned vector
)
40 uint8_t *table_entry
= dev
->msix_table_page
+ vector
* PCI_MSIX_ENTRY_SIZE
;
43 msg
.address
= pci_get_quad(table_entry
+ PCI_MSIX_ENTRY_LOWER_ADDR
);
44 msg
.data
= pci_get_long(table_entry
+ PCI_MSIX_ENTRY_DATA
);
48 /* Add MSI-X capability to the config space for the device. */
49 /* Given a bar and its size, add MSI-X table on top of it
50 * and fill MSI-X capability in the config space.
51 * Original bar size must be a power of 2 or 0.
52 * New bar size is returned. */
53 static int msix_add_config(struct PCIDevice
*pdev
, unsigned short nentries
,
54 unsigned bar_nr
, unsigned bar_size
)
60 if (nentries
< 1 || nentries
> PCI_MSIX_FLAGS_QSIZE
+ 1)
62 if (bar_size
> 0x80000000)
65 /* Add space for MSI-X structures */
67 new_size
= MSIX_PAGE_SIZE
;
68 } else if (bar_size
< MSIX_PAGE_SIZE
) {
69 bar_size
= MSIX_PAGE_SIZE
;
70 new_size
= MSIX_PAGE_SIZE
* 2;
72 new_size
= bar_size
* 2;
75 pdev
->msix_bar_size
= new_size
;
76 config_offset
= pci_add_capability(pdev
, PCI_CAP_ID_MSIX
,
78 if (config_offset
< 0)
80 config
= pdev
->config
+ config_offset
;
82 pci_set_word(config
+ PCI_MSIX_FLAGS
, nentries
- 1);
83 /* Table on top of BAR */
84 pci_set_long(config
+ PCI_MSIX_TABLE
, bar_size
| bar_nr
);
85 /* Pending bits on top of that */
86 pci_set_long(config
+ PCI_MSIX_PBA
, (bar_size
+ MSIX_PAGE_PENDING
) |
88 pdev
->msix_cap
= config_offset
;
89 /* Make flags bit writable. */
90 pdev
->wmask
[config_offset
+ MSIX_CONTROL_OFFSET
] |= MSIX_ENABLE_MASK
|
92 pdev
->msix_function_masked
= true;
96 static uint64_t msix_mmio_read(void *opaque
, target_phys_addr_t addr
,
99 PCIDevice
*dev
= opaque
;
100 unsigned int offset
= addr
& (MSIX_PAGE_SIZE
- 1) & ~0x3;
101 void *page
= dev
->msix_table_page
;
103 return pci_get_long(page
+ offset
);
106 static uint8_t msix_pending_mask(int vector
)
108 return 1 << (vector
% 8);
111 static uint8_t *msix_pending_byte(PCIDevice
*dev
, int vector
)
113 return dev
->msix_table_page
+ MSIX_PAGE_PENDING
+ vector
/ 8;
116 static int msix_is_pending(PCIDevice
*dev
, int vector
)
118 return *msix_pending_byte(dev
, vector
) & msix_pending_mask(vector
);
121 static void msix_set_pending(PCIDevice
*dev
, int vector
)
123 *msix_pending_byte(dev
, vector
) |= msix_pending_mask(vector
);
126 static void msix_clr_pending(PCIDevice
*dev
, int vector
)
128 *msix_pending_byte(dev
, vector
) &= ~msix_pending_mask(vector
);
131 static bool msix_vector_masked(PCIDevice
*dev
, int vector
, bool fmask
)
133 unsigned offset
= vector
* PCI_MSIX_ENTRY_SIZE
+ PCI_MSIX_ENTRY_VECTOR_CTRL
;
134 return fmask
|| dev
->msix_table_page
[offset
] & PCI_MSIX_ENTRY_CTRL_MASKBIT
;
137 static bool msix_is_masked(PCIDevice
*dev
, int vector
)
139 return msix_vector_masked(dev
, vector
, dev
->msix_function_masked
);
142 static void msix_fire_vector_notifier(PCIDevice
*dev
,
143 unsigned int vector
, bool is_masked
)
148 if (!dev
->msix_vector_use_notifier
) {
152 dev
->msix_vector_release_notifier(dev
, vector
);
154 msg
= msix_get_message(dev
, vector
);
155 ret
= dev
->msix_vector_use_notifier(dev
, vector
, msg
);
160 static void msix_handle_mask_update(PCIDevice
*dev
, int vector
, bool was_masked
)
162 bool is_masked
= msix_is_masked(dev
, vector
);
164 if (is_masked
== was_masked
) {
168 msix_fire_vector_notifier(dev
, vector
, is_masked
);
170 if (!is_masked
&& msix_is_pending(dev
, vector
)) {
171 msix_clr_pending(dev
, vector
);
172 msix_notify(dev
, vector
);
176 static void msix_update_function_masked(PCIDevice
*dev
)
178 dev
->msix_function_masked
= !msix_enabled(dev
) ||
179 (dev
->config
[dev
->msix_cap
+ MSIX_CONTROL_OFFSET
] & MSIX_MASKALL_MASK
);
182 /* Handle MSI-X capability config write. */
183 void msix_write_config(PCIDevice
*dev
, uint32_t addr
,
184 uint32_t val
, int len
)
186 unsigned enable_pos
= dev
->msix_cap
+ MSIX_CONTROL_OFFSET
;
190 if (!range_covers_byte(addr
, len
, enable_pos
)) {
194 was_masked
= dev
->msix_function_masked
;
195 msix_update_function_masked(dev
);
197 if (!msix_enabled(dev
)) {
201 pci_device_deassert_intx(dev
);
203 if (dev
->msix_function_masked
== was_masked
) {
207 for (vector
= 0; vector
< dev
->msix_entries_nr
; ++vector
) {
208 msix_handle_mask_update(dev
, vector
,
209 msix_vector_masked(dev
, vector
, was_masked
));
213 static void msix_mmio_write(void *opaque
, target_phys_addr_t addr
,
214 uint64_t val
, unsigned size
)
216 PCIDevice
*dev
= opaque
;
217 unsigned int offset
= addr
& (MSIX_PAGE_SIZE
- 1) & ~0x3;
218 int vector
= offset
/ PCI_MSIX_ENTRY_SIZE
;
221 /* MSI-X page includes a read-only PBA and a writeable Vector Control. */
222 if (vector
>= dev
->msix_entries_nr
) {
226 was_masked
= msix_is_masked(dev
, vector
);
227 pci_set_long(dev
->msix_table_page
+ offset
, val
);
228 msix_handle_mask_update(dev
, vector
, was_masked
);
231 static const MemoryRegionOps msix_mmio_ops
= {
232 .read
= msix_mmio_read
,
233 .write
= msix_mmio_write
,
234 .endianness
= DEVICE_NATIVE_ENDIAN
,
236 .min_access_size
= 4,
237 .max_access_size
= 4,
241 static void msix_mmio_setup(PCIDevice
*d
, MemoryRegion
*bar
)
243 uint8_t *config
= d
->config
+ d
->msix_cap
;
244 uint32_t table
= pci_get_long(config
+ PCI_MSIX_TABLE
);
245 uint32_t offset
= table
& ~(MSIX_PAGE_SIZE
- 1);
246 /* TODO: for assigned devices, we'll want to make it possible to map
247 * pending bits separately in case they are in a separate bar. */
249 memory_region_add_subregion(bar
, offset
, &d
->msix_mmio
);
252 static void msix_mask_all(struct PCIDevice
*dev
, unsigned nentries
)
256 for (vector
= 0; vector
< nentries
; ++vector
) {
258 vector
* PCI_MSIX_ENTRY_SIZE
+ PCI_MSIX_ENTRY_VECTOR_CTRL
;
259 bool was_masked
= msix_is_masked(dev
, vector
);
261 dev
->msix_table_page
[offset
] |= PCI_MSIX_ENTRY_CTRL_MASKBIT
;
262 msix_handle_mask_update(dev
, vector
, was_masked
);
266 /* Initialize the MSI-X structures. Note: if MSI-X is supported, BAR size is
267 * modified, it should be retrieved with msix_bar_size. */
268 int msix_init(struct PCIDevice
*dev
, unsigned short nentries
,
270 unsigned bar_nr
, unsigned bar_size
)
274 /* Nothing to do if MSI is not supported by interrupt controller */
275 if (!msi_supported
) {
278 if (nentries
> MSIX_MAX_ENTRIES
)
281 dev
->msix_entry_used
= g_malloc0(MSIX_MAX_ENTRIES
*
282 sizeof *dev
->msix_entry_used
);
284 dev
->msix_table_page
= g_malloc0(MSIX_PAGE_SIZE
);
285 msix_mask_all(dev
, nentries
);
287 memory_region_init_io(&dev
->msix_mmio
, &msix_mmio_ops
, dev
,
288 "msix", MSIX_PAGE_SIZE
);
290 dev
->msix_entries_nr
= nentries
;
291 ret
= msix_add_config(dev
, nentries
, bar_nr
, bar_size
);
295 dev
->cap_present
|= QEMU_PCI_CAP_MSIX
;
296 msix_mmio_setup(dev
, bar
);
300 dev
->msix_entries_nr
= 0;
301 memory_region_destroy(&dev
->msix_mmio
);
302 g_free(dev
->msix_table_page
);
303 dev
->msix_table_page
= NULL
;
304 g_free(dev
->msix_entry_used
);
305 dev
->msix_entry_used
= NULL
;
309 static void msix_free_irq_entries(PCIDevice
*dev
)
313 for (vector
= 0; vector
< dev
->msix_entries_nr
; ++vector
) {
314 dev
->msix_entry_used
[vector
] = 0;
315 msix_clr_pending(dev
, vector
);
319 /* Clean up resources for the device. */
320 int msix_uninit(PCIDevice
*dev
, MemoryRegion
*bar
)
322 if (!(dev
->cap_present
& QEMU_PCI_CAP_MSIX
))
324 pci_del_capability(dev
, PCI_CAP_ID_MSIX
, MSIX_CAP_LENGTH
);
326 msix_free_irq_entries(dev
);
327 dev
->msix_entries_nr
= 0;
328 memory_region_del_subregion(bar
, &dev
->msix_mmio
);
329 memory_region_destroy(&dev
->msix_mmio
);
330 g_free(dev
->msix_table_page
);
331 dev
->msix_table_page
= NULL
;
332 g_free(dev
->msix_entry_used
);
333 dev
->msix_entry_used
= NULL
;
334 dev
->cap_present
&= ~QEMU_PCI_CAP_MSIX
;
338 void msix_save(PCIDevice
*dev
, QEMUFile
*f
)
340 unsigned n
= dev
->msix_entries_nr
;
342 if (!(dev
->cap_present
& QEMU_PCI_CAP_MSIX
)) {
346 qemu_put_buffer(f
, dev
->msix_table_page
, n
* PCI_MSIX_ENTRY_SIZE
);
347 qemu_put_buffer(f
, dev
->msix_table_page
+ MSIX_PAGE_PENDING
, (n
+ 7) / 8);
350 /* Should be called after restoring the config space. */
351 void msix_load(PCIDevice
*dev
, QEMUFile
*f
)
353 unsigned n
= dev
->msix_entries_nr
;
356 if (!(dev
->cap_present
& QEMU_PCI_CAP_MSIX
)) {
360 msix_free_irq_entries(dev
);
361 qemu_get_buffer(f
, dev
->msix_table_page
, n
* PCI_MSIX_ENTRY_SIZE
);
362 qemu_get_buffer(f
, dev
->msix_table_page
+ MSIX_PAGE_PENDING
, (n
+ 7) / 8);
363 msix_update_function_masked(dev
);
365 for (vector
= 0; vector
< n
; vector
++) {
366 msix_handle_mask_update(dev
, vector
, true);
370 /* Does device support MSI-X? */
371 int msix_present(PCIDevice
*dev
)
373 return dev
->cap_present
& QEMU_PCI_CAP_MSIX
;
376 /* Is MSI-X enabled? */
377 int msix_enabled(PCIDevice
*dev
)
379 return (dev
->cap_present
& QEMU_PCI_CAP_MSIX
) &&
380 (dev
->config
[dev
->msix_cap
+ MSIX_CONTROL_OFFSET
] &
384 /* Size of bar where MSI-X table resides, or 0 if MSI-X not supported. */
385 uint32_t msix_bar_size(PCIDevice
*dev
)
387 return (dev
->cap_present
& QEMU_PCI_CAP_MSIX
) ?
388 dev
->msix_bar_size
: 0;
391 /* Send an MSI-X message */
392 void msix_notify(PCIDevice
*dev
, unsigned vector
)
396 if (vector
>= dev
->msix_entries_nr
|| !dev
->msix_entry_used
[vector
])
398 if (msix_is_masked(dev
, vector
)) {
399 msix_set_pending(dev
, vector
);
403 msg
= msix_get_message(dev
, vector
);
405 stl_le_phys(msg
.address
, msg
.data
);
408 void msix_reset(PCIDevice
*dev
)
410 if (!(dev
->cap_present
& QEMU_PCI_CAP_MSIX
))
412 msix_free_irq_entries(dev
);
413 dev
->config
[dev
->msix_cap
+ MSIX_CONTROL_OFFSET
] &=
414 ~dev
->wmask
[dev
->msix_cap
+ MSIX_CONTROL_OFFSET
];
415 memset(dev
->msix_table_page
, 0, MSIX_PAGE_SIZE
);
416 msix_mask_all(dev
, dev
->msix_entries_nr
);
419 /* PCI spec suggests that devices make it possible for software to configure
420 * less vectors than supported by the device, but does not specify a standard
421 * mechanism for devices to do so.
423 * We support this by asking devices to declare vectors software is going to
424 * actually use, and checking this on the notification path. Devices that
425 * don't want to follow the spec suggestion can declare all vectors as used. */
427 /* Mark vector as used. */
428 int msix_vector_use(PCIDevice
*dev
, unsigned vector
)
430 if (vector
>= dev
->msix_entries_nr
)
432 dev
->msix_entry_used
[vector
]++;
436 /* Mark vector as unused. */
437 void msix_vector_unuse(PCIDevice
*dev
, unsigned vector
)
439 if (vector
>= dev
->msix_entries_nr
|| !dev
->msix_entry_used
[vector
]) {
442 if (--dev
->msix_entry_used
[vector
]) {
445 msix_clr_pending(dev
, vector
);
448 void msix_unuse_all_vectors(PCIDevice
*dev
)
450 if (!(dev
->cap_present
& QEMU_PCI_CAP_MSIX
))
452 msix_free_irq_entries(dev
);
455 unsigned int msix_nr_vectors_allocated(const PCIDevice
*dev
)
457 return dev
->msix_entries_nr
;
460 static int msix_set_notifier_for_vector(PCIDevice
*dev
, unsigned int vector
)
464 if (msix_is_masked(dev
, vector
)) {
467 msg
= msix_get_message(dev
, vector
);
468 return dev
->msix_vector_use_notifier(dev
, vector
, msg
);
471 static void msix_unset_notifier_for_vector(PCIDevice
*dev
, unsigned int vector
)
473 if (msix_is_masked(dev
, vector
)) {
476 dev
->msix_vector_release_notifier(dev
, vector
);
479 int msix_set_vector_notifiers(PCIDevice
*dev
,
480 MSIVectorUseNotifier use_notifier
,
481 MSIVectorReleaseNotifier release_notifier
)
485 assert(use_notifier
&& release_notifier
);
487 dev
->msix_vector_use_notifier
= use_notifier
;
488 dev
->msix_vector_release_notifier
= release_notifier
;
490 if ((dev
->config
[dev
->msix_cap
+ MSIX_CONTROL_OFFSET
] &
491 (MSIX_ENABLE_MASK
| MSIX_MASKALL_MASK
)) == MSIX_ENABLE_MASK
) {
492 for (vector
= 0; vector
< dev
->msix_entries_nr
; vector
++) {
493 ret
= msix_set_notifier_for_vector(dev
, vector
);
502 while (--vector
>= 0) {
503 msix_unset_notifier_for_vector(dev
, vector
);
505 dev
->msix_vector_use_notifier
= NULL
;
506 dev
->msix_vector_release_notifier
= NULL
;
510 void msix_unset_vector_notifiers(PCIDevice
*dev
)
514 assert(dev
->msix_vector_use_notifier
&&
515 dev
->msix_vector_release_notifier
);
517 if ((dev
->config
[dev
->msix_cap
+ MSIX_CONTROL_OFFSET
] &
518 (MSIX_ENABLE_MASK
| MSIX_MASKALL_MASK
)) == MSIX_ENABLE_MASK
) {
519 for (vector
= 0; vector
< dev
->msix_entries_nr
; vector
++) {
520 msix_unset_notifier_for_vector(dev
, vector
);
523 dev
->msix_vector_use_notifier
= NULL
;
524 dev
->msix_vector_release_notifier
= NULL
;