4 * This module includes support for MSI-X in pci devices.
6 * Author: Michael S. Tsirkin <mst@redhat.com>
8 * Copyright (c) 2009, Red Hat Inc, Michael S. Tsirkin (mst@redhat.com)
10 * This work is licensed under the terms of the GNU GPL, version 2. See
11 * the COPYING file in the top-level directory.
13 * Contributions after 2012-01-13 are licensed under the terms of the
14 * GNU GPL, version 2 or (at your option) any later version.
23 #define MSIX_CAP_LENGTH 12
25 /* MSI enable bit and maskall bit are in byte 1 in FLAGS register */
26 #define MSIX_CONTROL_OFFSET (PCI_MSIX_FLAGS + 1)
27 #define MSIX_ENABLE_MASK (PCI_MSIX_FLAGS_ENABLE >> 8)
28 #define MSIX_MASKALL_MASK (PCI_MSIX_FLAGS_MASKALL >> 8)
30 /* How much space does an MSIX table need. */
31 /* The spec requires giving the table structure
32 * a 4K aligned region all by itself. */
33 #define MSIX_PAGE_SIZE 0x1000
34 /* Reserve second half of the page for pending bits */
35 #define MSIX_PAGE_PENDING (MSIX_PAGE_SIZE / 2)
36 #define MSIX_MAX_ENTRIES 32
38 static MSIMessage
msix_get_message(PCIDevice
*dev
, unsigned vector
)
40 uint8_t *table_entry
= dev
->msix_table_page
+ vector
* PCI_MSIX_ENTRY_SIZE
;
43 msg
.address
= pci_get_quad(table_entry
+ PCI_MSIX_ENTRY_LOWER_ADDR
);
44 msg
.data
= pci_get_long(table_entry
+ PCI_MSIX_ENTRY_DATA
);
48 /* Add MSI-X capability to the config space for the device. */
49 /* Given a bar and its size, add MSI-X table on top of it
50 * and fill MSI-X capability in the config space.
51 * Original bar size must be a power of 2 or 0.
52 * New bar size is returned. */
53 static int msix_add_config(struct PCIDevice
*pdev
, unsigned short nentries
,
54 unsigned bar_nr
, unsigned bar_size
)
60 if (nentries
< 1 || nentries
> PCI_MSIX_FLAGS_QSIZE
+ 1)
62 if (bar_size
> 0x80000000)
65 /* Add space for MSI-X structures */
67 new_size
= MSIX_PAGE_SIZE
;
68 } else if (bar_size
< MSIX_PAGE_SIZE
) {
69 bar_size
= MSIX_PAGE_SIZE
;
70 new_size
= MSIX_PAGE_SIZE
* 2;
72 new_size
= bar_size
* 2;
75 pdev
->msix_bar_size
= new_size
;
76 config_offset
= pci_add_capability(pdev
, PCI_CAP_ID_MSIX
,
78 if (config_offset
< 0)
80 config
= pdev
->config
+ config_offset
;
82 pci_set_word(config
+ PCI_MSIX_FLAGS
, nentries
- 1);
83 /* Table on top of BAR */
84 pci_set_long(config
+ PCI_MSIX_TABLE
, bar_size
| bar_nr
);
85 /* Pending bits on top of that */
86 pci_set_long(config
+ PCI_MSIX_PBA
, (bar_size
+ MSIX_PAGE_PENDING
) |
88 pdev
->msix_cap
= config_offset
;
89 /* Make flags bit writable. */
90 pdev
->wmask
[config_offset
+ MSIX_CONTROL_OFFSET
] |= MSIX_ENABLE_MASK
|
92 pdev
->msix_function_masked
= true;
96 static uint64_t msix_mmio_read(void *opaque
, target_phys_addr_t addr
,
99 PCIDevice
*dev
= opaque
;
100 unsigned int offset
= addr
& (MSIX_PAGE_SIZE
- 1) & ~0x3;
101 void *page
= dev
->msix_table_page
;
103 return pci_get_long(page
+ offset
);
106 static uint8_t msix_pending_mask(int vector
)
108 return 1 << (vector
% 8);
111 static uint8_t *msix_pending_byte(PCIDevice
*dev
, int vector
)
113 return dev
->msix_table_page
+ MSIX_PAGE_PENDING
+ vector
/ 8;
116 static int msix_is_pending(PCIDevice
*dev
, int vector
)
118 return *msix_pending_byte(dev
, vector
) & msix_pending_mask(vector
);
121 static void msix_set_pending(PCIDevice
*dev
, int vector
)
123 *msix_pending_byte(dev
, vector
) |= msix_pending_mask(vector
);
126 static void msix_clr_pending(PCIDevice
*dev
, int vector
)
128 *msix_pending_byte(dev
, vector
) &= ~msix_pending_mask(vector
);
131 static bool msix_vector_masked(PCIDevice
*dev
, int vector
, bool fmask
)
133 unsigned offset
= vector
* PCI_MSIX_ENTRY_SIZE
+ PCI_MSIX_ENTRY_VECTOR_CTRL
;
134 return fmask
|| dev
->msix_table_page
[offset
] & PCI_MSIX_ENTRY_CTRL_MASKBIT
;
137 static bool msix_is_masked(PCIDevice
*dev
, int vector
)
139 return msix_vector_masked(dev
, vector
, dev
->msix_function_masked
);
142 static void msix_fire_vector_notifier(PCIDevice
*dev
,
143 unsigned int vector
, bool is_masked
)
148 if (!dev
->msix_vector_use_notifier
) {
152 dev
->msix_vector_release_notifier(dev
, vector
);
154 msg
= msix_get_message(dev
, vector
);
155 ret
= dev
->msix_vector_use_notifier(dev
, vector
, msg
);
160 static void msix_handle_mask_update(PCIDevice
*dev
, int vector
, bool was_masked
)
162 bool is_masked
= msix_is_masked(dev
, vector
);
164 if (is_masked
== was_masked
) {
168 msix_fire_vector_notifier(dev
, vector
, is_masked
);
170 if (!is_masked
&& msix_is_pending(dev
, vector
)) {
171 msix_clr_pending(dev
, vector
);
172 msix_notify(dev
, vector
);
176 static void msix_update_function_masked(PCIDevice
*dev
)
178 dev
->msix_function_masked
= !msix_enabled(dev
) ||
179 (dev
->config
[dev
->msix_cap
+ MSIX_CONTROL_OFFSET
] & MSIX_MASKALL_MASK
);
182 /* Handle MSI-X capability config write. */
183 void msix_write_config(PCIDevice
*dev
, uint32_t addr
,
184 uint32_t val
, int len
)
186 unsigned enable_pos
= dev
->msix_cap
+ MSIX_CONTROL_OFFSET
;
190 if (!msix_present(dev
) || !range_covers_byte(addr
, len
, enable_pos
)) {
194 was_masked
= dev
->msix_function_masked
;
195 msix_update_function_masked(dev
);
197 if (!msix_enabled(dev
)) {
201 pci_device_deassert_intx(dev
);
203 if (dev
->msix_function_masked
== was_masked
) {
207 for (vector
= 0; vector
< dev
->msix_entries_nr
; ++vector
) {
208 msix_handle_mask_update(dev
, vector
,
209 msix_vector_masked(dev
, vector
, was_masked
));
213 static void msix_mmio_write(void *opaque
, target_phys_addr_t addr
,
214 uint64_t val
, unsigned size
)
216 PCIDevice
*dev
= opaque
;
217 unsigned int offset
= addr
& (MSIX_PAGE_SIZE
- 1) & ~0x3;
218 int vector
= offset
/ PCI_MSIX_ENTRY_SIZE
;
221 /* MSI-X page includes a read-only PBA and a writeable Vector Control. */
222 if (vector
>= dev
->msix_entries_nr
) {
226 was_masked
= msix_is_masked(dev
, vector
);
227 pci_set_long(dev
->msix_table_page
+ offset
, val
);
228 msix_handle_mask_update(dev
, vector
, was_masked
);
231 static const MemoryRegionOps msix_mmio_ops
= {
232 .read
= msix_mmio_read
,
233 .write
= msix_mmio_write
,
234 .endianness
= DEVICE_NATIVE_ENDIAN
,
236 .min_access_size
= 4,
237 .max_access_size
= 4,
241 static void msix_mmio_setup(PCIDevice
*d
, MemoryRegion
*bar
)
243 uint8_t *config
= d
->config
+ d
->msix_cap
;
244 uint32_t table
= pci_get_long(config
+ PCI_MSIX_TABLE
);
245 uint32_t offset
= table
& ~(MSIX_PAGE_SIZE
- 1);
246 /* TODO: for assigned devices, we'll want to make it possible to map
247 * pending bits separately in case they are in a separate bar. */
249 memory_region_add_subregion(bar
, offset
, &d
->msix_mmio
);
252 static void msix_mask_all(struct PCIDevice
*dev
, unsigned nentries
)
256 for (vector
= 0; vector
< nentries
; ++vector
) {
258 vector
* PCI_MSIX_ENTRY_SIZE
+ PCI_MSIX_ENTRY_VECTOR_CTRL
;
259 bool was_masked
= msix_is_masked(dev
, vector
);
261 dev
->msix_table_page
[offset
] |= PCI_MSIX_ENTRY_CTRL_MASKBIT
;
262 msix_handle_mask_update(dev
, vector
, was_masked
);
266 /* Initialize the MSI-X structures. Note: if MSI-X is supported, BAR size is
267 * modified, it should be retrieved with msix_bar_size. */
268 int msix_init(struct PCIDevice
*dev
, unsigned short nentries
,
270 unsigned bar_nr
, unsigned bar_size
)
274 /* Nothing to do if MSI is not supported by interrupt controller */
275 if (!msi_supported
) {
278 if (nentries
> MSIX_MAX_ENTRIES
)
281 dev
->msix_entry_used
= g_malloc0(MSIX_MAX_ENTRIES
*
282 sizeof *dev
->msix_entry_used
);
284 dev
->msix_table_page
= g_malloc0(MSIX_PAGE_SIZE
);
285 msix_mask_all(dev
, nentries
);
287 memory_region_init_io(&dev
->msix_mmio
, &msix_mmio_ops
, dev
,
288 "msix", MSIX_PAGE_SIZE
);
290 dev
->msix_entries_nr
= nentries
;
291 ret
= msix_add_config(dev
, nentries
, bar_nr
, bar_size
);
295 dev
->cap_present
|= QEMU_PCI_CAP_MSIX
;
296 msix_mmio_setup(dev
, bar
);
300 dev
->msix_entries_nr
= 0;
301 memory_region_destroy(&dev
->msix_mmio
);
302 g_free(dev
->msix_table_page
);
303 dev
->msix_table_page
= NULL
;
304 g_free(dev
->msix_entry_used
);
305 dev
->msix_entry_used
= NULL
;
309 static void msix_free_irq_entries(PCIDevice
*dev
)
313 for (vector
= 0; vector
< dev
->msix_entries_nr
; ++vector
) {
314 dev
->msix_entry_used
[vector
] = 0;
315 msix_clr_pending(dev
, vector
);
319 /* Clean up resources for the device. */
320 int msix_uninit(PCIDevice
*dev
, MemoryRegion
*bar
)
322 if (!msix_present(dev
)) {
325 pci_del_capability(dev
, PCI_CAP_ID_MSIX
, MSIX_CAP_LENGTH
);
327 msix_free_irq_entries(dev
);
328 dev
->msix_entries_nr
= 0;
329 memory_region_del_subregion(bar
, &dev
->msix_mmio
);
330 memory_region_destroy(&dev
->msix_mmio
);
331 g_free(dev
->msix_table_page
);
332 dev
->msix_table_page
= NULL
;
333 g_free(dev
->msix_entry_used
);
334 dev
->msix_entry_used
= NULL
;
335 dev
->cap_present
&= ~QEMU_PCI_CAP_MSIX
;
339 void msix_save(PCIDevice
*dev
, QEMUFile
*f
)
341 unsigned n
= dev
->msix_entries_nr
;
343 if (!msix_present(dev
)) {
347 qemu_put_buffer(f
, dev
->msix_table_page
, n
* PCI_MSIX_ENTRY_SIZE
);
348 qemu_put_buffer(f
, dev
->msix_table_page
+ MSIX_PAGE_PENDING
, (n
+ 7) / 8);
351 /* Should be called after restoring the config space. */
352 void msix_load(PCIDevice
*dev
, QEMUFile
*f
)
354 unsigned n
= dev
->msix_entries_nr
;
357 if (!msix_present(dev
)) {
361 msix_free_irq_entries(dev
);
362 qemu_get_buffer(f
, dev
->msix_table_page
, n
* PCI_MSIX_ENTRY_SIZE
);
363 qemu_get_buffer(f
, dev
->msix_table_page
+ MSIX_PAGE_PENDING
, (n
+ 7) / 8);
364 msix_update_function_masked(dev
);
366 for (vector
= 0; vector
< n
; vector
++) {
367 msix_handle_mask_update(dev
, vector
, true);
371 /* Does device support MSI-X? */
372 int msix_present(PCIDevice
*dev
)
374 return dev
->cap_present
& QEMU_PCI_CAP_MSIX
;
377 /* Is MSI-X enabled? */
378 int msix_enabled(PCIDevice
*dev
)
380 return (dev
->cap_present
& QEMU_PCI_CAP_MSIX
) &&
381 (dev
->config
[dev
->msix_cap
+ MSIX_CONTROL_OFFSET
] &
385 /* Size of bar where MSI-X table resides, or 0 if MSI-X not supported. */
386 uint32_t msix_bar_size(PCIDevice
*dev
)
388 return (dev
->cap_present
& QEMU_PCI_CAP_MSIX
) ?
389 dev
->msix_bar_size
: 0;
392 /* Send an MSI-X message */
393 void msix_notify(PCIDevice
*dev
, unsigned vector
)
397 if (vector
>= dev
->msix_entries_nr
|| !dev
->msix_entry_used
[vector
])
399 if (msix_is_masked(dev
, vector
)) {
400 msix_set_pending(dev
, vector
);
404 msg
= msix_get_message(dev
, vector
);
406 stl_le_phys(msg
.address
, msg
.data
);
409 void msix_reset(PCIDevice
*dev
)
411 if (!msix_present(dev
)) {
414 msix_free_irq_entries(dev
);
415 dev
->config
[dev
->msix_cap
+ MSIX_CONTROL_OFFSET
] &=
416 ~dev
->wmask
[dev
->msix_cap
+ MSIX_CONTROL_OFFSET
];
417 memset(dev
->msix_table_page
, 0, MSIX_PAGE_SIZE
);
418 msix_mask_all(dev
, dev
->msix_entries_nr
);
421 /* PCI spec suggests that devices make it possible for software to configure
422 * less vectors than supported by the device, but does not specify a standard
423 * mechanism for devices to do so.
425 * We support this by asking devices to declare vectors software is going to
426 * actually use, and checking this on the notification path. Devices that
427 * don't want to follow the spec suggestion can declare all vectors as used. */
429 /* Mark vector as used. */
430 int msix_vector_use(PCIDevice
*dev
, unsigned vector
)
432 if (vector
>= dev
->msix_entries_nr
)
434 dev
->msix_entry_used
[vector
]++;
438 /* Mark vector as unused. */
439 void msix_vector_unuse(PCIDevice
*dev
, unsigned vector
)
441 if (vector
>= dev
->msix_entries_nr
|| !dev
->msix_entry_used
[vector
]) {
444 if (--dev
->msix_entry_used
[vector
]) {
447 msix_clr_pending(dev
, vector
);
450 void msix_unuse_all_vectors(PCIDevice
*dev
)
452 if (!msix_present(dev
)) {
455 msix_free_irq_entries(dev
);
458 unsigned int msix_nr_vectors_allocated(const PCIDevice
*dev
)
460 return dev
->msix_entries_nr
;
463 static int msix_set_notifier_for_vector(PCIDevice
*dev
, unsigned int vector
)
467 if (msix_is_masked(dev
, vector
)) {
470 msg
= msix_get_message(dev
, vector
);
471 return dev
->msix_vector_use_notifier(dev
, vector
, msg
);
474 static void msix_unset_notifier_for_vector(PCIDevice
*dev
, unsigned int vector
)
476 if (msix_is_masked(dev
, vector
)) {
479 dev
->msix_vector_release_notifier(dev
, vector
);
482 int msix_set_vector_notifiers(PCIDevice
*dev
,
483 MSIVectorUseNotifier use_notifier
,
484 MSIVectorReleaseNotifier release_notifier
)
488 assert(use_notifier
&& release_notifier
);
490 dev
->msix_vector_use_notifier
= use_notifier
;
491 dev
->msix_vector_release_notifier
= release_notifier
;
493 if ((dev
->config
[dev
->msix_cap
+ MSIX_CONTROL_OFFSET
] &
494 (MSIX_ENABLE_MASK
| MSIX_MASKALL_MASK
)) == MSIX_ENABLE_MASK
) {
495 for (vector
= 0; vector
< dev
->msix_entries_nr
; vector
++) {
496 ret
= msix_set_notifier_for_vector(dev
, vector
);
505 while (--vector
>= 0) {
506 msix_unset_notifier_for_vector(dev
, vector
);
508 dev
->msix_vector_use_notifier
= NULL
;
509 dev
->msix_vector_release_notifier
= NULL
;
513 void msix_unset_vector_notifiers(PCIDevice
*dev
)
517 assert(dev
->msix_vector_use_notifier
&&
518 dev
->msix_vector_release_notifier
);
520 if ((dev
->config
[dev
->msix_cap
+ MSIX_CONTROL_OFFSET
] &
521 (MSIX_ENABLE_MASK
| MSIX_MASKALL_MASK
)) == MSIX_ENABLE_MASK
) {
522 for (vector
= 0; vector
< dev
->msix_entries_nr
; vector
++) {
523 msix_unset_notifier_for_vector(dev
, vector
);
526 dev
->msix_vector_use_notifier
= NULL
;
527 dev
->msix_vector_release_notifier
= NULL
;