audio: rename audio_sw_bytes_free()
[qemu/ar7.git] / hw / pci / msix.c
blob1e381a98137cb9933023ba996325db104e8d8834
1 /*
2 * MSI-X device support
4 * This module includes support for MSI-X in pci devices.
6 * Author: Michael S. Tsirkin <mst@redhat.com>
8 * Copyright (c) 2009, Red Hat Inc, Michael S. Tsirkin (mst@redhat.com)
10 * This work is licensed under the terms of the GNU GPL, version 2. See
11 * the COPYING file in the top-level directory.
13 * Contributions after 2012-01-13 are licensed under the terms of the
14 * GNU GPL, version 2 or (at your option) any later version.
17 #include "qemu/osdep.h"
18 #include "hw/pci/msi.h"
19 #include "hw/pci/msix.h"
20 #include "hw/pci/pci.h"
21 #include "hw/xen/xen.h"
22 #include "sysemu/xen.h"
23 #include "migration/qemu-file-types.h"
24 #include "migration/vmstate.h"
25 #include "qemu/range.h"
26 #include "qapi/error.h"
27 #include "trace.h"
29 /* MSI enable bit and maskall bit are in byte 1 in FLAGS register */
30 #define MSIX_CONTROL_OFFSET (PCI_MSIX_FLAGS + 1)
31 #define MSIX_ENABLE_MASK (PCI_MSIX_FLAGS_ENABLE >> 8)
32 #define MSIX_MASKALL_MASK (PCI_MSIX_FLAGS_MASKALL >> 8)
34 static MSIMessage msix_prepare_message(PCIDevice *dev, unsigned vector)
36 uint8_t *table_entry = dev->msix_table + vector * PCI_MSIX_ENTRY_SIZE;
37 MSIMessage msg;
39 msg.address = pci_get_quad(table_entry + PCI_MSIX_ENTRY_LOWER_ADDR);
40 msg.data = pci_get_long(table_entry + PCI_MSIX_ENTRY_DATA);
41 return msg;
44 MSIMessage msix_get_message(PCIDevice *dev, unsigned vector)
46 return dev->msix_prepare_message(dev, vector);
50 * Special API for POWER to configure the vectors through
51 * a side channel. Should never be used by devices.
53 void msix_set_message(PCIDevice *dev, int vector, struct MSIMessage msg)
55 uint8_t *table_entry = dev->msix_table + vector * PCI_MSIX_ENTRY_SIZE;
57 pci_set_quad(table_entry + PCI_MSIX_ENTRY_LOWER_ADDR, msg.address);
58 pci_set_long(table_entry + PCI_MSIX_ENTRY_DATA, msg.data);
59 table_entry[PCI_MSIX_ENTRY_VECTOR_CTRL] &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT;
62 static uint8_t msix_pending_mask(int vector)
64 return 1 << (vector % 8);
67 static uint8_t *msix_pending_byte(PCIDevice *dev, int vector)
69 return dev->msix_pba + vector / 8;
72 static int msix_is_pending(PCIDevice *dev, int vector)
74 return *msix_pending_byte(dev, vector) & msix_pending_mask(vector);
77 void msix_set_pending(PCIDevice *dev, unsigned int vector)
79 *msix_pending_byte(dev, vector) |= msix_pending_mask(vector);
82 void msix_clr_pending(PCIDevice *dev, int vector)
84 *msix_pending_byte(dev, vector) &= ~msix_pending_mask(vector);
87 static bool msix_vector_masked(PCIDevice *dev, unsigned int vector, bool fmask)
89 unsigned offset = vector * PCI_MSIX_ENTRY_SIZE;
90 uint8_t *data = &dev->msix_table[offset + PCI_MSIX_ENTRY_DATA];
91 /* MSIs on Xen can be remapped into pirqs. In those cases, masking
92 * and unmasking go through the PV evtchn path. */
93 if (xen_enabled() && xen_is_pirq_msi(pci_get_long(data))) {
94 return false;
96 return fmask || dev->msix_table[offset + PCI_MSIX_ENTRY_VECTOR_CTRL] &
97 PCI_MSIX_ENTRY_CTRL_MASKBIT;
100 bool msix_is_masked(PCIDevice *dev, unsigned int vector)
102 return msix_vector_masked(dev, vector, dev->msix_function_masked);
105 static void msix_fire_vector_notifier(PCIDevice *dev,
106 unsigned int vector, bool is_masked)
108 MSIMessage msg;
109 int ret;
111 if (!dev->msix_vector_use_notifier) {
112 return;
114 if (is_masked) {
115 dev->msix_vector_release_notifier(dev, vector);
116 } else {
117 msg = msix_get_message(dev, vector);
118 ret = dev->msix_vector_use_notifier(dev, vector, msg);
119 assert(ret >= 0);
123 static void msix_handle_mask_update(PCIDevice *dev, int vector, bool was_masked)
125 bool is_masked = msix_is_masked(dev, vector);
127 if (is_masked == was_masked) {
128 return;
131 msix_fire_vector_notifier(dev, vector, is_masked);
133 if (!is_masked && msix_is_pending(dev, vector)) {
134 msix_clr_pending(dev, vector);
135 msix_notify(dev, vector);
139 void msix_set_mask(PCIDevice *dev, int vector, bool mask, Error **errp)
141 ERRP_GUARD();
142 unsigned offset;
143 bool was_masked;
145 if (vector > dev->msix_entries_nr) {
146 error_setg(errp, "msix: vector %d not allocated. max vector is %d",
147 vector, dev->msix_entries_nr);
148 return;
151 offset = vector * PCI_MSIX_ENTRY_SIZE + PCI_MSIX_ENTRY_VECTOR_CTRL;
153 was_masked = msix_is_masked(dev, vector);
155 if (mask) {
156 dev->msix_table[offset] |= PCI_MSIX_ENTRY_CTRL_MASKBIT;
157 } else {
158 dev->msix_table[offset] &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT;
161 msix_handle_mask_update(dev, vector, was_masked);
164 static bool msix_masked(PCIDevice *dev)
166 return dev->config[dev->msix_cap + MSIX_CONTROL_OFFSET] & MSIX_MASKALL_MASK;
169 static void msix_update_function_masked(PCIDevice *dev)
171 dev->msix_function_masked = !msix_enabled(dev) || msix_masked(dev);
174 /* Handle MSI-X capability config write. */
175 void msix_write_config(PCIDevice *dev, uint32_t addr,
176 uint32_t val, int len)
178 unsigned enable_pos = dev->msix_cap + MSIX_CONTROL_OFFSET;
179 int vector;
180 bool was_masked;
182 if (!msix_present(dev) || !range_covers_byte(addr, len, enable_pos)) {
183 return;
186 trace_msix_write_config(dev->name, msix_enabled(dev), msix_masked(dev));
188 was_masked = dev->msix_function_masked;
189 msix_update_function_masked(dev);
191 if (!msix_enabled(dev)) {
192 return;
195 pci_device_deassert_intx(dev);
197 if (dev->msix_function_masked == was_masked) {
198 return;
201 for (vector = 0; vector < dev->msix_entries_nr; ++vector) {
202 msix_handle_mask_update(dev, vector,
203 msix_vector_masked(dev, vector, was_masked));
207 static uint64_t msix_table_mmio_read(void *opaque, hwaddr addr,
208 unsigned size)
210 PCIDevice *dev = opaque;
212 assert(addr + size <= dev->msix_entries_nr * PCI_MSIX_ENTRY_SIZE);
213 return pci_get_long(dev->msix_table + addr);
216 static void msix_table_mmio_write(void *opaque, hwaddr addr,
217 uint64_t val, unsigned size)
219 PCIDevice *dev = opaque;
220 int vector = addr / PCI_MSIX_ENTRY_SIZE;
221 bool was_masked;
223 assert(addr + size <= dev->msix_entries_nr * PCI_MSIX_ENTRY_SIZE);
225 was_masked = msix_is_masked(dev, vector);
226 pci_set_long(dev->msix_table + addr, val);
227 msix_handle_mask_update(dev, vector, was_masked);
230 static const MemoryRegionOps msix_table_mmio_ops = {
231 .read = msix_table_mmio_read,
232 .write = msix_table_mmio_write,
233 .endianness = DEVICE_LITTLE_ENDIAN,
234 .valid = {
235 .min_access_size = 4,
236 .max_access_size = 8,
238 .impl = {
239 .max_access_size = 4,
243 static uint64_t msix_pba_mmio_read(void *opaque, hwaddr addr,
244 unsigned size)
246 PCIDevice *dev = opaque;
247 if (dev->msix_vector_poll_notifier) {
248 unsigned vector_start = addr * 8;
249 unsigned vector_end = MIN(addr + size * 8, dev->msix_entries_nr);
250 dev->msix_vector_poll_notifier(dev, vector_start, vector_end);
253 return pci_get_long(dev->msix_pba + addr);
256 static void msix_pba_mmio_write(void *opaque, hwaddr addr,
257 uint64_t val, unsigned size)
261 static const MemoryRegionOps msix_pba_mmio_ops = {
262 .read = msix_pba_mmio_read,
263 .write = msix_pba_mmio_write,
264 .endianness = DEVICE_LITTLE_ENDIAN,
265 .valid = {
266 .min_access_size = 4,
267 .max_access_size = 8,
269 .impl = {
270 .max_access_size = 4,
274 static void msix_mask_all(struct PCIDevice *dev, unsigned nentries)
276 int vector;
278 for (vector = 0; vector < nentries; ++vector) {
279 unsigned offset =
280 vector * PCI_MSIX_ENTRY_SIZE + PCI_MSIX_ENTRY_VECTOR_CTRL;
281 bool was_masked = msix_is_masked(dev, vector);
283 dev->msix_table[offset] |= PCI_MSIX_ENTRY_CTRL_MASKBIT;
284 msix_handle_mask_update(dev, vector, was_masked);
289 * Make PCI device @dev MSI-X capable
290 * @nentries is the max number of MSI-X vectors that the device support.
291 * @table_bar is the MemoryRegion that MSI-X table structure resides.
292 * @table_bar_nr is number of base address register corresponding to @table_bar.
293 * @table_offset indicates the offset that the MSI-X table structure starts with
294 * in @table_bar.
295 * @pba_bar is the MemoryRegion that the Pending Bit Array structure resides.
296 * @pba_bar_nr is number of base address register corresponding to @pba_bar.
297 * @pba_offset indicates the offset that the Pending Bit Array structure
298 * starts with in @pba_bar.
299 * Non-zero @cap_pos puts capability MSI-X at that offset in PCI config space.
300 * @errp is for returning errors.
302 * Return 0 on success; set @errp and return -errno on error:
303 * -ENOTSUP means lacking msi support for a msi-capable platform.
304 * -EINVAL means capability overlap, happens when @cap_pos is non-zero,
305 * also means a programming error, except device assignment, which can check
306 * if a real HW is broken.
308 int msix_init(struct PCIDevice *dev, unsigned short nentries,
309 MemoryRegion *table_bar, uint8_t table_bar_nr,
310 unsigned table_offset, MemoryRegion *pba_bar,
311 uint8_t pba_bar_nr, unsigned pba_offset, uint8_t cap_pos,
312 Error **errp)
314 int cap;
315 unsigned table_size, pba_size;
316 uint8_t *config;
318 /* Nothing to do if MSI is not supported by interrupt controller */
319 if (!msi_nonbroken) {
320 error_setg(errp, "MSI-X is not supported by interrupt controller");
321 return -ENOTSUP;
324 if (nentries < 1 || nentries > PCI_MSIX_FLAGS_QSIZE + 1) {
325 error_setg(errp, "The number of MSI-X vectors is invalid");
326 return -EINVAL;
329 table_size = nentries * PCI_MSIX_ENTRY_SIZE;
330 pba_size = QEMU_ALIGN_UP(nentries, 64) / 8;
332 /* Sanity test: table & pba don't overlap, fit within BARs, min aligned */
333 if ((table_bar_nr == pba_bar_nr &&
334 ranges_overlap(table_offset, table_size, pba_offset, pba_size)) ||
335 table_offset + table_size > memory_region_size(table_bar) ||
336 pba_offset + pba_size > memory_region_size(pba_bar) ||
337 (table_offset | pba_offset) & PCI_MSIX_FLAGS_BIRMASK) {
338 error_setg(errp, "table & pba overlap, or they don't fit in BARs,"
339 " or don't align");
340 return -EINVAL;
343 cap = pci_add_capability(dev, PCI_CAP_ID_MSIX,
344 cap_pos, MSIX_CAP_LENGTH, errp);
345 if (cap < 0) {
346 return cap;
349 dev->msix_cap = cap;
350 dev->cap_present |= QEMU_PCI_CAP_MSIX;
351 config = dev->config + cap;
353 pci_set_word(config + PCI_MSIX_FLAGS, nentries - 1);
354 dev->msix_entries_nr = nentries;
355 dev->msix_function_masked = true;
357 pci_set_long(config + PCI_MSIX_TABLE, table_offset | table_bar_nr);
358 pci_set_long(config + PCI_MSIX_PBA, pba_offset | pba_bar_nr);
360 /* Make flags bit writable. */
361 dev->wmask[cap + MSIX_CONTROL_OFFSET] |= MSIX_ENABLE_MASK |
362 MSIX_MASKALL_MASK;
364 dev->msix_table = g_malloc0(table_size);
365 dev->msix_pba = g_malloc0(pba_size);
366 dev->msix_entry_used = g_malloc0(nentries * sizeof *dev->msix_entry_used);
368 msix_mask_all(dev, nentries);
370 memory_region_init_io(&dev->msix_table_mmio, OBJECT(dev), &msix_table_mmio_ops, dev,
371 "msix-table", table_size);
372 memory_region_add_subregion(table_bar, table_offset, &dev->msix_table_mmio);
373 memory_region_init_io(&dev->msix_pba_mmio, OBJECT(dev), &msix_pba_mmio_ops, dev,
374 "msix-pba", pba_size);
375 memory_region_add_subregion(pba_bar, pba_offset, &dev->msix_pba_mmio);
377 dev->msix_prepare_message = msix_prepare_message;
379 return 0;
382 int msix_init_exclusive_bar(PCIDevice *dev, unsigned short nentries,
383 uint8_t bar_nr, Error **errp)
385 int ret;
386 char *name;
387 uint32_t bar_size = 4096;
388 uint32_t bar_pba_offset = bar_size / 2;
389 uint32_t bar_pba_size = QEMU_ALIGN_UP(nentries, 64) / 8;
392 * Migration compatibility dictates that this remains a 4k
393 * BAR with the vector table in the lower half and PBA in
394 * the upper half for nentries which is lower or equal to 128.
395 * No need to care about using more than 65 entries for legacy
396 * machine types who has at most 64 queues.
398 if (nentries * PCI_MSIX_ENTRY_SIZE > bar_pba_offset) {
399 bar_pba_offset = nentries * PCI_MSIX_ENTRY_SIZE;
402 if (bar_pba_offset + bar_pba_size > 4096) {
403 bar_size = bar_pba_offset + bar_pba_size;
406 bar_size = pow2ceil(bar_size);
408 name = g_strdup_printf("%s-msix", dev->name);
409 memory_region_init(&dev->msix_exclusive_bar, OBJECT(dev), name, bar_size);
410 g_free(name);
412 ret = msix_init(dev, nentries, &dev->msix_exclusive_bar, bar_nr,
413 0, &dev->msix_exclusive_bar,
414 bar_nr, bar_pba_offset,
415 0, errp);
416 if (ret) {
417 return ret;
420 pci_register_bar(dev, bar_nr, PCI_BASE_ADDRESS_SPACE_MEMORY,
421 &dev->msix_exclusive_bar);
423 return 0;
426 static void msix_free_irq_entries(PCIDevice *dev)
428 int vector;
430 for (vector = 0; vector < dev->msix_entries_nr; ++vector) {
431 dev->msix_entry_used[vector] = 0;
432 msix_clr_pending(dev, vector);
436 static void msix_clear_all_vectors(PCIDevice *dev)
438 int vector;
440 for (vector = 0; vector < dev->msix_entries_nr; ++vector) {
441 msix_clr_pending(dev, vector);
445 /* Clean up resources for the device. */
446 void msix_uninit(PCIDevice *dev, MemoryRegion *table_bar, MemoryRegion *pba_bar)
448 if (!msix_present(dev)) {
449 return;
451 pci_del_capability(dev, PCI_CAP_ID_MSIX, MSIX_CAP_LENGTH);
452 dev->msix_cap = 0;
453 msix_free_irq_entries(dev);
454 dev->msix_entries_nr = 0;
455 memory_region_del_subregion(pba_bar, &dev->msix_pba_mmio);
456 g_free(dev->msix_pba);
457 dev->msix_pba = NULL;
458 memory_region_del_subregion(table_bar, &dev->msix_table_mmio);
459 g_free(dev->msix_table);
460 dev->msix_table = NULL;
461 g_free(dev->msix_entry_used);
462 dev->msix_entry_used = NULL;
463 dev->cap_present &= ~QEMU_PCI_CAP_MSIX;
464 dev->msix_prepare_message = NULL;
467 void msix_uninit_exclusive_bar(PCIDevice *dev)
469 if (msix_present(dev)) {
470 msix_uninit(dev, &dev->msix_exclusive_bar, &dev->msix_exclusive_bar);
474 void msix_save(PCIDevice *dev, QEMUFile *f)
476 unsigned n = dev->msix_entries_nr;
478 if (!msix_present(dev)) {
479 return;
482 qemu_put_buffer(f, dev->msix_table, n * PCI_MSIX_ENTRY_SIZE);
483 qemu_put_buffer(f, dev->msix_pba, DIV_ROUND_UP(n, 8));
486 /* Should be called after restoring the config space. */
487 void msix_load(PCIDevice *dev, QEMUFile *f)
489 unsigned n = dev->msix_entries_nr;
490 unsigned int vector;
492 if (!msix_present(dev)) {
493 return;
496 msix_clear_all_vectors(dev);
497 qemu_get_buffer(f, dev->msix_table, n * PCI_MSIX_ENTRY_SIZE);
498 qemu_get_buffer(f, dev->msix_pba, DIV_ROUND_UP(n, 8));
499 msix_update_function_masked(dev);
501 for (vector = 0; vector < n; vector++) {
502 msix_handle_mask_update(dev, vector, true);
506 /* Does device support MSI-X? */
507 int msix_present(PCIDevice *dev)
509 return dev->cap_present & QEMU_PCI_CAP_MSIX;
512 /* Is MSI-X enabled? */
513 int msix_enabled(PCIDevice *dev)
515 return (dev->cap_present & QEMU_PCI_CAP_MSIX) &&
516 (dev->config[dev->msix_cap + MSIX_CONTROL_OFFSET] &
517 MSIX_ENABLE_MASK);
520 /* Send an MSI-X message */
521 void msix_notify(PCIDevice *dev, unsigned vector)
523 MSIMessage msg;
525 if (vector >= dev->msix_entries_nr || !dev->msix_entry_used[vector]) {
526 return;
529 if (msix_is_masked(dev, vector)) {
530 msix_set_pending(dev, vector);
531 return;
534 msg = msix_get_message(dev, vector);
536 msi_send_message(dev, msg);
539 void msix_reset(PCIDevice *dev)
541 if (!msix_present(dev)) {
542 return;
544 msix_clear_all_vectors(dev);
545 dev->config[dev->msix_cap + MSIX_CONTROL_OFFSET] &=
546 ~dev->wmask[dev->msix_cap + MSIX_CONTROL_OFFSET];
547 memset(dev->msix_table, 0, dev->msix_entries_nr * PCI_MSIX_ENTRY_SIZE);
548 memset(dev->msix_pba, 0, QEMU_ALIGN_UP(dev->msix_entries_nr, 64) / 8);
549 msix_mask_all(dev, dev->msix_entries_nr);
552 /* PCI spec suggests that devices make it possible for software to configure
553 * less vectors than supported by the device, but does not specify a standard
554 * mechanism for devices to do so.
556 * We support this by asking devices to declare vectors software is going to
557 * actually use, and checking this on the notification path. Devices that
558 * don't want to follow the spec suggestion can declare all vectors as used. */
560 /* Mark vector as used. */
561 int msix_vector_use(PCIDevice *dev, unsigned vector)
563 if (vector >= dev->msix_entries_nr) {
564 return -EINVAL;
567 dev->msix_entry_used[vector]++;
568 return 0;
571 /* Mark vector as unused. */
572 void msix_vector_unuse(PCIDevice *dev, unsigned vector)
574 if (vector >= dev->msix_entries_nr || !dev->msix_entry_used[vector]) {
575 return;
577 if (--dev->msix_entry_used[vector]) {
578 return;
580 msix_clr_pending(dev, vector);
583 void msix_unuse_all_vectors(PCIDevice *dev)
585 if (!msix_present(dev)) {
586 return;
588 msix_free_irq_entries(dev);
591 unsigned int msix_nr_vectors_allocated(const PCIDevice *dev)
593 return dev->msix_entries_nr;
596 static int msix_set_notifier_for_vector(PCIDevice *dev, unsigned int vector)
598 MSIMessage msg;
600 if (msix_is_masked(dev, vector)) {
601 return 0;
603 msg = msix_get_message(dev, vector);
604 return dev->msix_vector_use_notifier(dev, vector, msg);
607 static void msix_unset_notifier_for_vector(PCIDevice *dev, unsigned int vector)
609 if (msix_is_masked(dev, vector)) {
610 return;
612 dev->msix_vector_release_notifier(dev, vector);
615 int msix_set_vector_notifiers(PCIDevice *dev,
616 MSIVectorUseNotifier use_notifier,
617 MSIVectorReleaseNotifier release_notifier,
618 MSIVectorPollNotifier poll_notifier)
620 int vector, ret;
622 assert(use_notifier && release_notifier);
624 dev->msix_vector_use_notifier = use_notifier;
625 dev->msix_vector_release_notifier = release_notifier;
626 dev->msix_vector_poll_notifier = poll_notifier;
628 if ((dev->config[dev->msix_cap + MSIX_CONTROL_OFFSET] &
629 (MSIX_ENABLE_MASK | MSIX_MASKALL_MASK)) == MSIX_ENABLE_MASK) {
630 for (vector = 0; vector < dev->msix_entries_nr; vector++) {
631 ret = msix_set_notifier_for_vector(dev, vector);
632 if (ret < 0) {
633 goto undo;
637 if (dev->msix_vector_poll_notifier) {
638 dev->msix_vector_poll_notifier(dev, 0, dev->msix_entries_nr);
640 return 0;
642 undo:
643 while (--vector >= 0) {
644 msix_unset_notifier_for_vector(dev, vector);
646 dev->msix_vector_use_notifier = NULL;
647 dev->msix_vector_release_notifier = NULL;
648 return ret;
651 void msix_unset_vector_notifiers(PCIDevice *dev)
653 int vector;
655 assert(dev->msix_vector_use_notifier &&
656 dev->msix_vector_release_notifier);
658 if ((dev->config[dev->msix_cap + MSIX_CONTROL_OFFSET] &
659 (MSIX_ENABLE_MASK | MSIX_MASKALL_MASK)) == MSIX_ENABLE_MASK) {
660 for (vector = 0; vector < dev->msix_entries_nr; vector++) {
661 msix_unset_notifier_for_vector(dev, vector);
664 dev->msix_vector_use_notifier = NULL;
665 dev->msix_vector_release_notifier = NULL;
666 dev->msix_vector_poll_notifier = NULL;
669 static int put_msix_state(QEMUFile *f, void *pv, size_t size,
670 const VMStateField *field, JSONWriter *vmdesc)
672 msix_save(pv, f);
674 return 0;
677 static int get_msix_state(QEMUFile *f, void *pv, size_t size,
678 const VMStateField *field)
680 msix_load(pv, f);
681 return 0;
684 static VMStateInfo vmstate_info_msix = {
685 .name = "msix state",
686 .get = get_msix_state,
687 .put = put_msix_state,
690 const VMStateDescription vmstate_msix = {
691 .name = "msix",
692 .fields = (VMStateField[]) {
694 .name = "msix",
695 .version_id = 0,
696 .field_exists = NULL,
697 .size = 0, /* ouch */
698 .info = &vmstate_info_msix,
699 .flags = VMS_SINGLE,
700 .offset = 0,
702 VMSTATE_END_OF_LIST()