1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Virtio PCI driver - common functionality for all device versions
5 * This module allows virtio devices to be used over a virtual PCI device.
6 * This can be used with QEMU based VMMs like KVM or Xen.
8 * Copyright IBM Corp. 2007
9 * Copyright Red Hat, Inc. 2014
12 * Anthony Liguori <aliguori@us.ibm.com>
13 * Rusty Russell <rusty@rustcorp.com.au>
14 * Michael S. Tsirkin <mst@redhat.com>
17 #include "virtio_pci_common.h"
19 static bool force_legacy
= false;
21 #if IS_ENABLED(CONFIG_VIRTIO_PCI_LEGACY)
22 module_param(force_legacy
, bool, 0444);
23 MODULE_PARM_DESC(force_legacy
,
24 "Force legacy mode for transitional virtio 1 devices");
27 /* wait for pending irq handlers */
28 void vp_synchronize_vectors(struct virtio_device
*vdev
)
30 struct virtio_pci_device
*vp_dev
= to_vp_device(vdev
);
33 if (vp_dev
->intx_enabled
)
34 synchronize_irq(vp_dev
->pci_dev
->irq
);
36 for (i
= 0; i
< vp_dev
->msix_vectors
; ++i
)
37 synchronize_irq(pci_irq_vector(vp_dev
->pci_dev
, i
));
40 /* the notify function used when creating a virt queue */
41 bool vp_notify(struct virtqueue
*vq
)
43 /* we write the queue's selector into the notification register to
44 * signal the other end */
45 iowrite16(vq
->index
, (void __iomem
*)vq
->priv
);
49 /* Notify all slow path virtqueues on an interrupt. */
50 static void vp_vring_slow_path_interrupt(int irq
,
51 struct virtio_pci_device
*vp_dev
)
53 struct virtio_pci_vq_info
*info
;
56 spin_lock_irqsave(&vp_dev
->lock
, flags
);
57 list_for_each_entry(info
, &vp_dev
->slow_virtqueues
, node
)
58 vring_interrupt(irq
, info
->vq
);
59 spin_unlock_irqrestore(&vp_dev
->lock
, flags
);
62 /* Handle a configuration change: Tell driver if it wants to know. */
63 static irqreturn_t
vp_config_changed(int irq
, void *opaque
)
65 struct virtio_pci_device
*vp_dev
= opaque
;
67 virtio_config_changed(&vp_dev
->vdev
);
68 vp_vring_slow_path_interrupt(irq
, vp_dev
);
72 /* Notify all virtqueues on an interrupt. */
73 static irqreturn_t
vp_vring_interrupt(int irq
, void *opaque
)
75 struct virtio_pci_device
*vp_dev
= opaque
;
76 struct virtio_pci_vq_info
*info
;
77 irqreturn_t ret
= IRQ_NONE
;
80 spin_lock_irqsave(&vp_dev
->lock
, flags
);
81 list_for_each_entry(info
, &vp_dev
->virtqueues
, node
) {
82 if (vring_interrupt(irq
, info
->vq
) == IRQ_HANDLED
)
85 spin_unlock_irqrestore(&vp_dev
->lock
, flags
);
90 /* A small wrapper to also acknowledge the interrupt when it's handled.
91 * I really need an EIO hook for the vring so I can ack the interrupt once we
92 * know that we'll be handling the IRQ but before we invoke the callback since
93 * the callback may notify the host which results in the host attempting to
94 * raise an interrupt that we would then mask once we acknowledged the
96 static irqreturn_t
vp_interrupt(int irq
, void *opaque
)
98 struct virtio_pci_device
*vp_dev
= opaque
;
101 /* reading the ISR has the effect of also clearing it so it's very
102 * important to save off the value. */
103 isr
= ioread8(vp_dev
->isr
);
105 /* It's definitely not us if the ISR was not high */
109 /* Configuration change? Tell driver if it wants to know. */
110 if (isr
& VIRTIO_PCI_ISR_CONFIG
)
111 vp_config_changed(irq
, opaque
);
113 return vp_vring_interrupt(irq
, opaque
);
116 static int vp_request_msix_vectors(struct virtio_device
*vdev
, int nvectors
,
117 bool per_vq_vectors
, struct irq_affinity
*desc
)
119 struct virtio_pci_device
*vp_dev
= to_vp_device(vdev
);
120 const char *name
= dev_name(&vp_dev
->vdev
.dev
);
121 unsigned int flags
= PCI_IRQ_MSIX
;
125 vp_dev
->msix_vectors
= nvectors
;
127 vp_dev
->msix_names
= kmalloc_array(nvectors
,
128 sizeof(*vp_dev
->msix_names
),
130 if (!vp_dev
->msix_names
)
132 vp_dev
->msix_affinity_masks
133 = kcalloc(nvectors
, sizeof(*vp_dev
->msix_affinity_masks
),
135 if (!vp_dev
->msix_affinity_masks
)
137 for (i
= 0; i
< nvectors
; ++i
)
138 if (!alloc_cpumask_var(&vp_dev
->msix_affinity_masks
[i
],
146 flags
|= PCI_IRQ_AFFINITY
;
147 desc
->pre_vectors
++; /* virtio config vector */
150 err
= pci_alloc_irq_vectors_affinity(vp_dev
->pci_dev
, nvectors
,
151 nvectors
, flags
, desc
);
154 vp_dev
->msix_enabled
= 1;
156 /* Set the vector used for configuration */
157 v
= vp_dev
->msix_used_vectors
;
158 snprintf(vp_dev
->msix_names
[v
], sizeof *vp_dev
->msix_names
,
160 err
= request_irq(pci_irq_vector(vp_dev
->pci_dev
, v
),
161 vp_config_changed
, 0, vp_dev
->msix_names
[v
],
165 ++vp_dev
->msix_used_vectors
;
167 v
= vp_dev
->config_vector(vp_dev
, v
);
168 /* Verify we had enough resources to assign the vector */
169 if (v
== VIRTIO_MSI_NO_VECTOR
) {
174 if (!per_vq_vectors
) {
175 /* Shared vector for all VQs */
176 v
= vp_dev
->msix_used_vectors
;
177 snprintf(vp_dev
->msix_names
[v
], sizeof *vp_dev
->msix_names
,
178 "%s-virtqueues", name
);
179 err
= request_irq(pci_irq_vector(vp_dev
->pci_dev
, v
),
180 vp_vring_interrupt
, 0, vp_dev
->msix_names
[v
],
184 ++vp_dev
->msix_used_vectors
;
191 static bool vp_is_slow_path_vector(u16 msix_vec
)
193 return msix_vec
== VP_MSIX_CONFIG_VECTOR
;
196 static struct virtqueue
*vp_setup_vq(struct virtio_device
*vdev
, unsigned int index
,
197 void (*callback
)(struct virtqueue
*vq
),
201 struct virtio_pci_vq_info
**p_info
)
203 struct virtio_pci_device
*vp_dev
= to_vp_device(vdev
);
204 struct virtio_pci_vq_info
*info
= kmalloc(sizeof *info
, GFP_KERNEL
);
205 struct virtqueue
*vq
;
208 /* fill out our structure that represents an active queue */
210 return ERR_PTR(-ENOMEM
);
212 vq
= vp_dev
->setup_vq(vp_dev
, info
, index
, callback
, name
, ctx
,
219 spin_lock_irqsave(&vp_dev
->lock
, flags
);
220 if (!vp_is_slow_path_vector(msix_vec
))
221 list_add(&info
->node
, &vp_dev
->virtqueues
);
223 list_add(&info
->node
, &vp_dev
->slow_virtqueues
);
224 spin_unlock_irqrestore(&vp_dev
->lock
, flags
);
226 INIT_LIST_HEAD(&info
->node
);
237 static void vp_del_vq(struct virtqueue
*vq
)
239 struct virtio_pci_device
*vp_dev
= to_vp_device(vq
->vdev
);
240 struct virtio_pci_vq_info
*info
= vp_dev
->vqs
[vq
->index
];
244 * If it fails during re-enable reset vq. This way we won't rejoin
245 * info->node to the queue. Prevent unexpected irqs.
248 spin_lock_irqsave(&vp_dev
->lock
, flags
);
249 list_del(&info
->node
);
250 spin_unlock_irqrestore(&vp_dev
->lock
, flags
);
253 vp_dev
->del_vq(info
);
257 /* the config->del_vqs() implementation */
258 void vp_del_vqs(struct virtio_device
*vdev
)
260 struct virtio_pci_device
*vp_dev
= to_vp_device(vdev
);
261 struct virtqueue
*vq
, *n
;
264 list_for_each_entry_safe(vq
, n
, &vdev
->vqs
, list
) {
265 if (vp_dev
->per_vq_vectors
) {
266 int v
= vp_dev
->vqs
[vq
->index
]->msix_vector
;
268 if (v
!= VIRTIO_MSI_NO_VECTOR
&&
269 !vp_is_slow_path_vector(v
)) {
270 int irq
= pci_irq_vector(vp_dev
->pci_dev
, v
);
272 irq_update_affinity_hint(irq
, NULL
);
278 vp_dev
->per_vq_vectors
= false;
280 if (vp_dev
->intx_enabled
) {
281 free_irq(vp_dev
->pci_dev
->irq
, vp_dev
);
282 vp_dev
->intx_enabled
= 0;
285 for (i
= 0; i
< vp_dev
->msix_used_vectors
; ++i
)
286 free_irq(pci_irq_vector(vp_dev
->pci_dev
, i
), vp_dev
);
288 if (vp_dev
->msix_affinity_masks
) {
289 for (i
= 0; i
< vp_dev
->msix_vectors
; i
++)
290 free_cpumask_var(vp_dev
->msix_affinity_masks
[i
]);
293 if (vp_dev
->msix_enabled
) {
294 /* Disable the vector used for configuration */
295 vp_dev
->config_vector(vp_dev
, VIRTIO_MSI_NO_VECTOR
);
297 pci_free_irq_vectors(vp_dev
->pci_dev
);
298 vp_dev
->msix_enabled
= 0;
301 vp_dev
->msix_vectors
= 0;
302 vp_dev
->msix_used_vectors
= 0;
303 kfree(vp_dev
->msix_names
);
304 vp_dev
->msix_names
= NULL
;
305 kfree(vp_dev
->msix_affinity_masks
);
306 vp_dev
->msix_affinity_masks
= NULL
;
311 enum vp_vq_vector_policy
{
312 VP_VQ_VECTOR_POLICY_EACH
,
313 VP_VQ_VECTOR_POLICY_SHARED_SLOW
,
314 VP_VQ_VECTOR_POLICY_SHARED
,
317 static struct virtqueue
*
318 vp_find_one_vq_msix(struct virtio_device
*vdev
, int queue_idx
,
319 vq_callback_t
*callback
, const char *name
, bool ctx
,
320 bool slow_path
, int *allocated_vectors
,
321 enum vp_vq_vector_policy vector_policy
,
322 struct virtio_pci_vq_info
**p_info
)
324 struct virtio_pci_device
*vp_dev
= to_vp_device(vdev
);
325 struct virtqueue
*vq
;
330 msix_vec
= VIRTIO_MSI_NO_VECTOR
;
331 else if (vector_policy
== VP_VQ_VECTOR_POLICY_EACH
||
332 (vector_policy
== VP_VQ_VECTOR_POLICY_SHARED_SLOW
&&
334 msix_vec
= (*allocated_vectors
)++;
335 else if (vector_policy
!= VP_VQ_VECTOR_POLICY_EACH
&&
337 msix_vec
= VP_MSIX_CONFIG_VECTOR
;
339 msix_vec
= VP_MSIX_VQ_VECTOR
;
340 vq
= vp_setup_vq(vdev
, queue_idx
, callback
, name
, ctx
, msix_vec
,
345 if (vector_policy
== VP_VQ_VECTOR_POLICY_SHARED
||
346 msix_vec
== VIRTIO_MSI_NO_VECTOR
||
347 vp_is_slow_path_vector(msix_vec
))
350 /* allocate per-vq irq if available and necessary */
351 snprintf(vp_dev
->msix_names
[msix_vec
], sizeof(*vp_dev
->msix_names
),
352 "%s-%s", dev_name(&vp_dev
->vdev
.dev
), name
);
353 err
= request_irq(pci_irq_vector(vp_dev
->pci_dev
, msix_vec
),
355 vp_dev
->msix_names
[msix_vec
], vq
);
364 static int vp_find_vqs_msix(struct virtio_device
*vdev
, unsigned int nvqs
,
365 struct virtqueue
*vqs
[],
366 struct virtqueue_info vqs_info
[],
367 enum vp_vq_vector_policy vector_policy
,
368 struct irq_affinity
*desc
)
370 struct virtio_pci_device
*vp_dev
= to_vp_device(vdev
);
371 struct virtio_pci_admin_vq
*avq
= &vp_dev
->admin_vq
;
372 struct virtqueue_info
*vqi
;
373 int i
, err
, nvectors
, allocated_vectors
, queue_idx
= 0;
374 struct virtqueue
*vq
;
378 vp_dev
->vqs
= kcalloc(nvqs
, sizeof(*vp_dev
->vqs
), GFP_KERNEL
);
382 if (vp_dev
->avq_index
) {
383 err
= vp_dev
->avq_index(vdev
, &avq
->vq_index
, &avq_num
);
388 per_vq_vectors
= vector_policy
!= VP_VQ_VECTOR_POLICY_SHARED
;
390 if (per_vq_vectors
) {
391 /* Best option: one for change interrupt, one per vq. */
393 for (i
= 0; i
< nvqs
; ++i
) {
395 if (vqi
->name
&& vqi
->callback
)
398 if (avq_num
&& vector_policy
== VP_VQ_VECTOR_POLICY_EACH
)
401 /* Second best: one for change, shared for all vqs. */
405 err
= vp_request_msix_vectors(vdev
, nvectors
, per_vq_vectors
, desc
);
409 vp_dev
->per_vq_vectors
= per_vq_vectors
;
410 allocated_vectors
= vp_dev
->msix_used_vectors
;
411 for (i
= 0; i
< nvqs
; ++i
) {
417 vqs
[i
] = vp_find_one_vq_msix(vdev
, queue_idx
++, vqi
->callback
,
418 vqi
->name
, vqi
->ctx
, false,
419 &allocated_vectors
, vector_policy
,
421 if (IS_ERR(vqs
[i
])) {
422 err
= PTR_ERR(vqs
[i
]);
429 sprintf(avq
->name
, "avq.%u", avq
->vq_index
);
430 vq
= vp_find_one_vq_msix(vdev
, avq
->vq_index
, vp_modern_avq_done
,
431 avq
->name
, false, true, &allocated_vectors
,
432 vector_policy
, &vp_dev
->admin_vq
.info
);
445 static int vp_find_vqs_intx(struct virtio_device
*vdev
, unsigned int nvqs
,
446 struct virtqueue
*vqs
[],
447 struct virtqueue_info vqs_info
[])
449 struct virtio_pci_device
*vp_dev
= to_vp_device(vdev
);
450 struct virtio_pci_admin_vq
*avq
= &vp_dev
->admin_vq
;
451 int i
, err
, queue_idx
= 0;
452 struct virtqueue
*vq
;
455 vp_dev
->vqs
= kcalloc(nvqs
, sizeof(*vp_dev
->vqs
), GFP_KERNEL
);
459 if (vp_dev
->avq_index
) {
460 err
= vp_dev
->avq_index(vdev
, &avq
->vq_index
, &avq_num
);
465 err
= request_irq(vp_dev
->pci_dev
->irq
, vp_interrupt
, IRQF_SHARED
,
466 dev_name(&vdev
->dev
), vp_dev
);
470 vp_dev
->intx_enabled
= 1;
471 vp_dev
->per_vq_vectors
= false;
472 for (i
= 0; i
< nvqs
; ++i
) {
473 struct virtqueue_info
*vqi
= &vqs_info
[i
];
479 vqs
[i
] = vp_setup_vq(vdev
, queue_idx
++, vqi
->callback
,
481 VIRTIO_MSI_NO_VECTOR
, &vp_dev
->vqs
[i
]);
482 if (IS_ERR(vqs
[i
])) {
483 err
= PTR_ERR(vqs
[i
]);
490 sprintf(avq
->name
, "avq.%u", avq
->vq_index
);
491 vq
= vp_setup_vq(vdev
, queue_idx
++, vp_modern_avq_done
, avq
->name
,
492 false, VIRTIO_MSI_NO_VECTOR
,
493 &vp_dev
->admin_vq
.info
);
505 /* the config->find_vqs() implementation */
506 int vp_find_vqs(struct virtio_device
*vdev
, unsigned int nvqs
,
507 struct virtqueue
*vqs
[], struct virtqueue_info vqs_info
[],
508 struct irq_affinity
*desc
)
512 /* Try MSI-X with one vector per queue. */
513 err
= vp_find_vqs_msix(vdev
, nvqs
, vqs
, vqs_info
,
514 VP_VQ_VECTOR_POLICY_EACH
, desc
);
517 /* Fallback: MSI-X with one shared vector for config and
518 * slow path queues, one vector per queue for the rest.
520 err
= vp_find_vqs_msix(vdev
, nvqs
, vqs
, vqs_info
,
521 VP_VQ_VECTOR_POLICY_SHARED_SLOW
, desc
);
524 /* Fallback: MSI-X with one vector for config, one shared for queues. */
525 err
= vp_find_vqs_msix(vdev
, nvqs
, vqs
, vqs_info
,
526 VP_VQ_VECTOR_POLICY_SHARED
, desc
);
529 /* Is there an interrupt? If not give up. */
530 if (!(to_vp_device(vdev
)->pci_dev
->irq
))
532 /* Finally fall back to regular interrupts. */
533 return vp_find_vqs_intx(vdev
, nvqs
, vqs
, vqs_info
);
536 const char *vp_bus_name(struct virtio_device
*vdev
)
538 struct virtio_pci_device
*vp_dev
= to_vp_device(vdev
);
540 return pci_name(vp_dev
->pci_dev
);
543 /* Setup the affinity for a virtqueue:
544 * - force the affinity for per vq vector
545 * - OR over all affinities for shared MSI
546 * - ignore the affinity request if we're using INTX
548 int vp_set_vq_affinity(struct virtqueue
*vq
, const struct cpumask
*cpu_mask
)
550 struct virtio_device
*vdev
= vq
->vdev
;
551 struct virtio_pci_device
*vp_dev
= to_vp_device(vdev
);
552 struct virtio_pci_vq_info
*info
= vp_dev
->vqs
[vq
->index
];
553 struct cpumask
*mask
;
559 if (vp_dev
->msix_enabled
) {
560 mask
= vp_dev
->msix_affinity_masks
[info
->msix_vector
];
561 irq
= pci_irq_vector(vp_dev
->pci_dev
, info
->msix_vector
);
563 irq_update_affinity_hint(irq
, NULL
);
565 cpumask_copy(mask
, cpu_mask
);
566 irq_set_affinity_and_hint(irq
, mask
);
572 const struct cpumask
*vp_get_vq_affinity(struct virtio_device
*vdev
, int index
)
574 struct virtio_pci_device
*vp_dev
= to_vp_device(vdev
);
576 if (!vp_dev
->per_vq_vectors
||
577 vp_dev
->vqs
[index
]->msix_vector
== VIRTIO_MSI_NO_VECTOR
||
578 vp_is_slow_path_vector(vp_dev
->vqs
[index
]->msix_vector
))
581 return pci_irq_get_affinity(vp_dev
->pci_dev
,
582 vp_dev
->vqs
[index
]->msix_vector
);
585 #ifdef CONFIG_PM_SLEEP
586 static int virtio_pci_freeze(struct device
*dev
)
588 struct pci_dev
*pci_dev
= to_pci_dev(dev
);
589 struct virtio_pci_device
*vp_dev
= pci_get_drvdata(pci_dev
);
592 ret
= virtio_device_freeze(&vp_dev
->vdev
);
595 pci_disable_device(pci_dev
);
599 static int virtio_pci_restore(struct device
*dev
)
601 struct pci_dev
*pci_dev
= to_pci_dev(dev
);
602 struct virtio_pci_device
*vp_dev
= pci_get_drvdata(pci_dev
);
605 ret
= pci_enable_device(pci_dev
);
609 pci_set_master(pci_dev
);
610 return virtio_device_restore(&vp_dev
->vdev
);
613 static bool vp_supports_pm_no_reset(struct device
*dev
)
615 struct pci_dev
*pci_dev
= to_pci_dev(dev
);
618 if (!pci_dev
->pm_cap
)
621 pci_read_config_word(pci_dev
, pci_dev
->pm_cap
+ PCI_PM_CTRL
, &pmcsr
);
622 if (PCI_POSSIBLE_ERROR(pmcsr
)) {
623 dev_err(dev
, "Unable to query pmcsr");
627 return pmcsr
& PCI_PM_CTRL_NO_SOFT_RESET
;
630 static int virtio_pci_suspend(struct device
*dev
)
632 return vp_supports_pm_no_reset(dev
) ? 0 : virtio_pci_freeze(dev
);
635 static int virtio_pci_resume(struct device
*dev
)
637 return vp_supports_pm_no_reset(dev
) ? 0 : virtio_pci_restore(dev
);
640 static const struct dev_pm_ops virtio_pci_pm_ops
= {
641 .suspend
= virtio_pci_suspend
,
642 .resume
= virtio_pci_resume
,
643 .freeze
= virtio_pci_freeze
,
644 .thaw
= virtio_pci_restore
,
645 .poweroff
= virtio_pci_freeze
,
646 .restore
= virtio_pci_restore
,
651 /* Qumranet donated their vendor ID for devices 0x1000 thru 0x10FF. */
652 static const struct pci_device_id virtio_pci_id_table
[] = {
653 { PCI_DEVICE(PCI_VENDOR_ID_REDHAT_QUMRANET
, PCI_ANY_ID
) },
657 MODULE_DEVICE_TABLE(pci
, virtio_pci_id_table
);
659 static void virtio_pci_release_dev(struct device
*_d
)
661 struct virtio_device
*vdev
= dev_to_virtio(_d
);
662 struct virtio_pci_device
*vp_dev
= to_vp_device(vdev
);
664 /* As struct device is a kobject, it's not safe to
665 * free the memory (including the reference counter itself)
666 * until it's release callback. */
670 static int virtio_pci_probe(struct pci_dev
*pci_dev
,
671 const struct pci_device_id
*id
)
673 struct virtio_pci_device
*vp_dev
, *reg_dev
= NULL
;
676 /* allocate our structure and fill it out */
677 vp_dev
= kzalloc(sizeof(struct virtio_pci_device
), GFP_KERNEL
);
681 pci_set_drvdata(pci_dev
, vp_dev
);
682 vp_dev
->vdev
.dev
.parent
= &pci_dev
->dev
;
683 vp_dev
->vdev
.dev
.release
= virtio_pci_release_dev
;
684 vp_dev
->pci_dev
= pci_dev
;
685 INIT_LIST_HEAD(&vp_dev
->virtqueues
);
686 INIT_LIST_HEAD(&vp_dev
->slow_virtqueues
);
687 spin_lock_init(&vp_dev
->lock
);
689 /* enable the device */
690 rc
= pci_enable_device(pci_dev
);
692 goto err_enable_device
;
695 rc
= virtio_pci_legacy_probe(vp_dev
);
696 /* Also try modern mode if we can't map BAR0 (no IO space). */
697 if (rc
== -ENODEV
|| rc
== -ENOMEM
)
698 rc
= virtio_pci_modern_probe(vp_dev
);
702 rc
= virtio_pci_modern_probe(vp_dev
);
704 rc
= virtio_pci_legacy_probe(vp_dev
);
709 pci_set_master(pci_dev
);
711 rc
= register_virtio_device(&vp_dev
->vdev
);
719 if (vp_dev
->is_legacy
)
720 virtio_pci_legacy_remove(vp_dev
);
722 virtio_pci_modern_remove(vp_dev
);
724 pci_disable_device(pci_dev
);
727 put_device(&vp_dev
->vdev
.dev
);
733 static void virtio_pci_remove(struct pci_dev
*pci_dev
)
735 struct virtio_pci_device
*vp_dev
= pci_get_drvdata(pci_dev
);
736 struct device
*dev
= get_device(&vp_dev
->vdev
.dev
);
739 * Device is marked broken on surprise removal so that virtio upper
740 * layers can abort any ongoing operation.
742 if (!pci_device_is_present(pci_dev
))
743 virtio_break_device(&vp_dev
->vdev
);
745 pci_disable_sriov(pci_dev
);
747 unregister_virtio_device(&vp_dev
->vdev
);
749 if (vp_dev
->is_legacy
)
750 virtio_pci_legacy_remove(vp_dev
);
752 virtio_pci_modern_remove(vp_dev
);
754 pci_disable_device(pci_dev
);
758 static int virtio_pci_sriov_configure(struct pci_dev
*pci_dev
, int num_vfs
)
760 struct virtio_pci_device
*vp_dev
= pci_get_drvdata(pci_dev
);
761 struct virtio_device
*vdev
= &vp_dev
->vdev
;
764 if (!(vdev
->config
->get_status(vdev
) & VIRTIO_CONFIG_S_DRIVER_OK
))
767 if (!__virtio_test_bit(vdev
, VIRTIO_F_SR_IOV
))
770 if (pci_vfs_assigned(pci_dev
))
774 pci_disable_sriov(pci_dev
);
778 ret
= pci_enable_sriov(pci_dev
, num_vfs
);
785 static struct pci_driver virtio_pci_driver
= {
786 .name
= "virtio-pci",
787 .id_table
= virtio_pci_id_table
,
788 .probe
= virtio_pci_probe
,
789 .remove
= virtio_pci_remove
,
790 #ifdef CONFIG_PM_SLEEP
791 .driver
.pm
= &virtio_pci_pm_ops
,
793 .sriov_configure
= virtio_pci_sriov_configure
,
796 struct virtio_device
*virtio_pci_vf_get_pf_dev(struct pci_dev
*pdev
)
798 struct virtio_pci_device
*pf_vp_dev
;
800 pf_vp_dev
= pci_iov_get_pf_drvdata(pdev
, &virtio_pci_driver
);
801 if (IS_ERR(pf_vp_dev
))
804 return &pf_vp_dev
->vdev
;
807 module_pci_driver(virtio_pci_driver
);
809 MODULE_AUTHOR("Anthony Liguori <aliguori@us.ibm.com>");
810 MODULE_DESCRIPTION("virtio-pci");
811 MODULE_LICENSE("GPL");