4 * Copyright (C) 2009 Intel Corporation, Yu Zhao <yu.zhao@intel.com>
6 * PCI Express I/O Virtualization (IOV) support.
8 * Address Translation Service 1.0
11 #include <linux/pci.h>
12 #include <linux/slab.h>
13 #include <linux/mutex.h>
14 #include <linux/export.h>
15 #include <linux/string.h>
16 #include <linux/delay.h>
17 #include <linux/pci-ats.h>
20 #define VIRTFN_ID_LEN 16
22 static inline u8
virtfn_bus(struct pci_dev
*dev
, int id
)
24 return dev
->bus
->number
+ ((dev
->devfn
+ dev
->sriov
->offset
+
25 dev
->sriov
->stride
* id
) >> 8);
28 static inline u8
virtfn_devfn(struct pci_dev
*dev
, int id
)
30 return (dev
->devfn
+ dev
->sriov
->offset
+
31 dev
->sriov
->stride
* id
) & 0xff;
34 static struct pci_bus
*virtfn_add_bus(struct pci_bus
*bus
, int busnr
)
37 struct pci_bus
*child
;
39 if (bus
->number
== busnr
)
42 child
= pci_find_bus(pci_domain_nr(bus
), busnr
);
46 child
= pci_add_new_bus(bus
, NULL
, busnr
);
50 pci_bus_insert_busn_res(child
, busnr
, busnr
);
51 child
->dev
.parent
= bus
->bridge
;
52 rc
= pci_bus_add_child(child
);
54 pci_remove_bus(child
);
61 static void virtfn_remove_bus(struct pci_bus
*bus
, int busnr
)
63 struct pci_bus
*child
;
65 if (bus
->number
== busnr
)
68 child
= pci_find_bus(pci_domain_nr(bus
), busnr
);
71 if (list_empty(&child
->devices
))
72 pci_remove_bus(child
);
75 static int virtfn_add(struct pci_dev
*dev
, int id
, int reset
)
80 char buf
[VIRTFN_ID_LEN
];
81 struct pci_dev
*virtfn
;
83 struct pci_sriov
*iov
= dev
->sriov
;
85 virtfn
= alloc_pci_dev();
89 mutex_lock(&iov
->dev
->sriov
->lock
);
90 virtfn
->bus
= virtfn_add_bus(dev
->bus
, virtfn_bus(dev
, id
));
93 mutex_unlock(&iov
->dev
->sriov
->lock
);
96 virtfn
->devfn
= virtfn_devfn(dev
, id
);
97 virtfn
->vendor
= dev
->vendor
;
98 pci_read_config_word(dev
, iov
->pos
+ PCI_SRIOV_VF_DID
, &virtfn
->device
);
99 pci_setup_device(virtfn
);
100 virtfn
->dev
.parent
= dev
->dev
.parent
;
102 for (i
= 0; i
< PCI_SRIOV_NUM_BARS
; i
++) {
103 res
= dev
->resource
+ PCI_IOV_RESOURCES
+ i
;
106 virtfn
->resource
[i
].name
= pci_name(virtfn
);
107 virtfn
->resource
[i
].flags
= res
->flags
;
108 size
= resource_size(res
);
109 do_div(size
, iov
->total
);
110 virtfn
->resource
[i
].start
= res
->start
+ size
* id
;
111 virtfn
->resource
[i
].end
= virtfn
->resource
[i
].start
+ size
- 1;
112 rc
= request_resource(res
, &virtfn
->resource
[i
]);
117 __pci_reset_function(virtfn
);
119 pci_device_add(virtfn
, virtfn
->bus
);
120 mutex_unlock(&iov
->dev
->sriov
->lock
);
122 virtfn
->physfn
= pci_dev_get(dev
);
123 virtfn
->is_virtfn
= 1;
125 rc
= pci_bus_add_device(virtfn
);
128 sprintf(buf
, "virtfn%u", id
);
129 rc
= sysfs_create_link(&dev
->dev
.kobj
, &virtfn
->dev
.kobj
, buf
);
132 rc
= sysfs_create_link(&virtfn
->dev
.kobj
, &dev
->dev
.kobj
, "physfn");
136 kobject_uevent(&virtfn
->dev
.kobj
, KOBJ_CHANGE
);
141 sysfs_remove_link(&dev
->dev
.kobj
, buf
);
144 mutex_lock(&iov
->dev
->sriov
->lock
);
145 pci_stop_and_remove_bus_device(virtfn
);
146 virtfn_remove_bus(dev
->bus
, virtfn_bus(dev
, id
));
147 mutex_unlock(&iov
->dev
->sriov
->lock
);
152 static void virtfn_remove(struct pci_dev
*dev
, int id
, int reset
)
154 char buf
[VIRTFN_ID_LEN
];
155 struct pci_dev
*virtfn
;
156 struct pci_sriov
*iov
= dev
->sriov
;
158 virtfn
= pci_get_domain_bus_and_slot(pci_domain_nr(dev
->bus
),
159 virtfn_bus(dev
, id
), virtfn_devfn(dev
, id
));
166 device_release_driver(&virtfn
->dev
);
167 __pci_reset_function(virtfn
);
170 sprintf(buf
, "virtfn%u", id
);
171 sysfs_remove_link(&dev
->dev
.kobj
, buf
);
173 * pci_stop_dev() could have been called for this virtfn already,
174 * so the directory for the virtfn may have been removed before.
175 * Double check to avoid spurious sysfs warnings.
177 if (virtfn
->dev
.kobj
.sd
)
178 sysfs_remove_link(&virtfn
->dev
.kobj
, "physfn");
180 mutex_lock(&iov
->dev
->sriov
->lock
);
181 pci_stop_and_remove_bus_device(virtfn
);
182 virtfn_remove_bus(dev
->bus
, virtfn_bus(dev
, id
));
183 mutex_unlock(&iov
->dev
->sriov
->lock
);
188 static int sriov_migration(struct pci_dev
*dev
)
191 struct pci_sriov
*iov
= dev
->sriov
;
196 if (!(iov
->cap
& PCI_SRIOV_CAP_VFM
))
199 pci_read_config_word(dev
, iov
->pos
+ PCI_SRIOV_STATUS
, &status
);
200 if (!(status
& PCI_SRIOV_STATUS_VFM
))
203 schedule_work(&iov
->mtask
);
208 static void sriov_migration_task(struct work_struct
*work
)
213 struct pci_sriov
*iov
= container_of(work
, struct pci_sriov
, mtask
);
215 for (i
= iov
->initial
; i
< iov
->nr_virtfn
; i
++) {
216 state
= readb(iov
->mstate
+ i
);
217 if (state
== PCI_SRIOV_VFM_MI
) {
218 writeb(PCI_SRIOV_VFM_AV
, iov
->mstate
+ i
);
219 state
= readb(iov
->mstate
+ i
);
220 if (state
== PCI_SRIOV_VFM_AV
)
221 virtfn_add(iov
->self
, i
, 1);
222 } else if (state
== PCI_SRIOV_VFM_MO
) {
223 virtfn_remove(iov
->self
, i
, 1);
224 writeb(PCI_SRIOV_VFM_UA
, iov
->mstate
+ i
);
225 state
= readb(iov
->mstate
+ i
);
226 if (state
== PCI_SRIOV_VFM_AV
)
227 virtfn_add(iov
->self
, i
, 0);
231 pci_read_config_word(iov
->self
, iov
->pos
+ PCI_SRIOV_STATUS
, &status
);
232 status
&= ~PCI_SRIOV_STATUS_VFM
;
233 pci_write_config_word(iov
->self
, iov
->pos
+ PCI_SRIOV_STATUS
, status
);
236 static int sriov_enable_migration(struct pci_dev
*dev
, int nr_virtfn
)
241 struct pci_sriov
*iov
= dev
->sriov
;
243 if (nr_virtfn
<= iov
->initial
)
246 pci_read_config_dword(dev
, iov
->pos
+ PCI_SRIOV_VFM
, &table
);
247 bir
= PCI_SRIOV_VFM_BIR(table
);
248 if (bir
> PCI_STD_RESOURCE_END
)
251 table
= PCI_SRIOV_VFM_OFFSET(table
);
252 if (table
+ nr_virtfn
> pci_resource_len(dev
, bir
))
255 pa
= pci_resource_start(dev
, bir
) + table
;
256 iov
->mstate
= ioremap(pa
, nr_virtfn
);
260 INIT_WORK(&iov
->mtask
, sriov_migration_task
);
262 iov
->ctrl
|= PCI_SRIOV_CTRL_VFM
| PCI_SRIOV_CTRL_INTR
;
263 pci_write_config_word(dev
, iov
->pos
+ PCI_SRIOV_CTRL
, iov
->ctrl
);
268 static void sriov_disable_migration(struct pci_dev
*dev
)
270 struct pci_sriov
*iov
= dev
->sriov
;
272 iov
->ctrl
&= ~(PCI_SRIOV_CTRL_VFM
| PCI_SRIOV_CTRL_INTR
);
273 pci_write_config_word(dev
, iov
->pos
+ PCI_SRIOV_CTRL
, iov
->ctrl
);
275 cancel_work_sync(&iov
->mtask
);
276 iounmap(iov
->mstate
);
279 static int sriov_enable(struct pci_dev
*dev
, int nr_virtfn
)
284 u16 offset
, stride
, initial
;
285 struct resource
*res
;
286 struct pci_dev
*pdev
;
287 struct pci_sriov
*iov
= dev
->sriov
;
296 pci_read_config_word(dev
, iov
->pos
+ PCI_SRIOV_INITIAL_VF
, &initial
);
297 if (initial
> iov
->total
||
298 (!(iov
->cap
& PCI_SRIOV_CAP_VFM
) && (initial
!= iov
->total
)))
301 if (nr_virtfn
< 0 || nr_virtfn
> iov
->total
||
302 (!(iov
->cap
& PCI_SRIOV_CAP_VFM
) && (nr_virtfn
> initial
)))
305 pci_write_config_word(dev
, iov
->pos
+ PCI_SRIOV_NUM_VF
, nr_virtfn
);
306 pci_read_config_word(dev
, iov
->pos
+ PCI_SRIOV_VF_OFFSET
, &offset
);
307 pci_read_config_word(dev
, iov
->pos
+ PCI_SRIOV_VF_STRIDE
, &stride
);
308 if (!offset
|| (nr_virtfn
> 1 && !stride
))
312 for (i
= 0; i
< PCI_SRIOV_NUM_BARS
; i
++) {
313 bars
|= (1 << (i
+ PCI_IOV_RESOURCES
));
314 res
= dev
->resource
+ PCI_IOV_RESOURCES
+ i
;
318 if (nres
!= iov
->nres
) {
319 dev_err(&dev
->dev
, "not enough MMIO resources for SR-IOV\n");
323 iov
->offset
= offset
;
324 iov
->stride
= stride
;
326 if (virtfn_bus(dev
, nr_virtfn
- 1) > dev
->bus
->busn_res
.end
) {
327 dev_err(&dev
->dev
, "SR-IOV: bus number out of range\n");
331 if (pci_enable_resources(dev
, bars
)) {
332 dev_err(&dev
->dev
, "SR-IOV: IOV BARS not allocated\n");
336 if (iov
->link
!= dev
->devfn
) {
337 pdev
= pci_get_slot(dev
->bus
, iov
->link
);
343 if (!pdev
->is_physfn
)
346 rc
= sysfs_create_link(&dev
->dev
.kobj
,
347 &pdev
->dev
.kobj
, "dep_link");
352 iov
->ctrl
|= PCI_SRIOV_CTRL_VFE
| PCI_SRIOV_CTRL_MSE
;
353 pci_cfg_access_lock(dev
);
354 pci_write_config_word(dev
, iov
->pos
+ PCI_SRIOV_CTRL
, iov
->ctrl
);
356 pci_cfg_access_unlock(dev
);
358 iov
->initial
= initial
;
359 if (nr_virtfn
< initial
)
362 for (i
= 0; i
< initial
; i
++) {
363 rc
= virtfn_add(dev
, i
, 0);
368 if (iov
->cap
& PCI_SRIOV_CAP_VFM
) {
369 rc
= sriov_enable_migration(dev
, nr_virtfn
);
374 kobject_uevent(&dev
->dev
.kobj
, KOBJ_CHANGE
);
375 iov
->nr_virtfn
= nr_virtfn
;
380 for (j
= 0; j
< i
; j
++)
381 virtfn_remove(dev
, j
, 0);
383 iov
->ctrl
&= ~(PCI_SRIOV_CTRL_VFE
| PCI_SRIOV_CTRL_MSE
);
384 pci_cfg_access_lock(dev
);
385 pci_write_config_word(dev
, iov
->pos
+ PCI_SRIOV_CTRL
, iov
->ctrl
);
387 pci_cfg_access_unlock(dev
);
389 if (iov
->link
!= dev
->devfn
)
390 sysfs_remove_link(&dev
->dev
.kobj
, "dep_link");
395 static void sriov_disable(struct pci_dev
*dev
)
398 struct pci_sriov
*iov
= dev
->sriov
;
403 if (iov
->cap
& PCI_SRIOV_CAP_VFM
)
404 sriov_disable_migration(dev
);
406 for (i
= 0; i
< iov
->nr_virtfn
; i
++)
407 virtfn_remove(dev
, i
, 0);
409 iov
->ctrl
&= ~(PCI_SRIOV_CTRL_VFE
| PCI_SRIOV_CTRL_MSE
);
410 pci_cfg_access_lock(dev
);
411 pci_write_config_word(dev
, iov
->pos
+ PCI_SRIOV_CTRL
, iov
->ctrl
);
413 pci_cfg_access_unlock(dev
);
415 if (iov
->link
!= dev
->devfn
)
416 sysfs_remove_link(&dev
->dev
.kobj
, "dep_link");
421 static int sriov_init(struct pci_dev
*dev
, int pos
)
427 u16 ctrl
, total
, offset
, stride
;
428 struct pci_sriov
*iov
;
429 struct resource
*res
;
430 struct pci_dev
*pdev
;
432 if (dev
->pcie_type
!= PCI_EXP_TYPE_RC_END
&&
433 dev
->pcie_type
!= PCI_EXP_TYPE_ENDPOINT
)
436 pci_read_config_word(dev
, pos
+ PCI_SRIOV_CTRL
, &ctrl
);
437 if (ctrl
& PCI_SRIOV_CTRL_VFE
) {
438 pci_write_config_word(dev
, pos
+ PCI_SRIOV_CTRL
, 0);
442 pci_read_config_word(dev
, pos
+ PCI_SRIOV_TOTAL_VF
, &total
);
447 list_for_each_entry(pdev
, &dev
->bus
->devices
, bus_list
)
452 if (pci_ari_enabled(dev
->bus
))
453 ctrl
|= PCI_SRIOV_CTRL_ARI
;
456 pci_write_config_word(dev
, pos
+ PCI_SRIOV_CTRL
, ctrl
);
457 pci_read_config_word(dev
, pos
+ PCI_SRIOV_VF_OFFSET
, &offset
);
458 pci_read_config_word(dev
, pos
+ PCI_SRIOV_VF_STRIDE
, &stride
);
459 if (!offset
|| (total
> 1 && !stride
))
462 pci_read_config_dword(dev
, pos
+ PCI_SRIOV_SUP_PGSIZE
, &pgsz
);
463 i
= PAGE_SHIFT
> 12 ? PAGE_SHIFT
- 12 : 0;
464 pgsz
&= ~((1 << i
) - 1);
469 pci_write_config_dword(dev
, pos
+ PCI_SRIOV_SYS_PGSIZE
, pgsz
);
472 for (i
= 0; i
< PCI_SRIOV_NUM_BARS
; i
++) {
473 res
= dev
->resource
+ PCI_IOV_RESOURCES
+ i
;
474 i
+= __pci_read_base(dev
, pci_bar_unknown
, res
,
475 pos
+ PCI_SRIOV_BAR
+ i
* 4);
478 if (resource_size(res
) & (PAGE_SIZE
- 1)) {
482 res
->end
= res
->start
+ resource_size(res
) * total
- 1;
486 iov
= kzalloc(sizeof(*iov
), GFP_KERNEL
);
496 iov
->offset
= offset
;
497 iov
->stride
= stride
;
500 pci_read_config_dword(dev
, pos
+ PCI_SRIOV_CAP
, &iov
->cap
);
501 pci_read_config_byte(dev
, pos
+ PCI_SRIOV_FUNC_LINK
, &iov
->link
);
502 if (dev
->pcie_type
== PCI_EXP_TYPE_RC_END
)
503 iov
->link
= PCI_DEVFN(PCI_SLOT(dev
->devfn
), iov
->link
);
506 iov
->dev
= pci_dev_get(pdev
);
510 mutex_init(&iov
->lock
);
518 for (i
= 0; i
< PCI_SRIOV_NUM_BARS
; i
++) {
519 res
= dev
->resource
+ PCI_IOV_RESOURCES
+ i
;
526 static void sriov_release(struct pci_dev
*dev
)
528 BUG_ON(dev
->sriov
->nr_virtfn
);
530 if (dev
!= dev
->sriov
->dev
)
531 pci_dev_put(dev
->sriov
->dev
);
533 mutex_destroy(&dev
->sriov
->lock
);
539 static void sriov_restore_state(struct pci_dev
*dev
)
543 struct pci_sriov
*iov
= dev
->sriov
;
545 pci_read_config_word(dev
, iov
->pos
+ PCI_SRIOV_CTRL
, &ctrl
);
546 if (ctrl
& PCI_SRIOV_CTRL_VFE
)
549 for (i
= PCI_IOV_RESOURCES
; i
<= PCI_IOV_RESOURCE_END
; i
++)
550 pci_update_resource(dev
, i
);
552 pci_write_config_dword(dev
, iov
->pos
+ PCI_SRIOV_SYS_PGSIZE
, iov
->pgsz
);
553 pci_write_config_word(dev
, iov
->pos
+ PCI_SRIOV_NUM_VF
, iov
->nr_virtfn
);
554 pci_write_config_word(dev
, iov
->pos
+ PCI_SRIOV_CTRL
, iov
->ctrl
);
555 if (iov
->ctrl
& PCI_SRIOV_CTRL_VFE
)
560 * pci_iov_init - initialize the IOV capability
561 * @dev: the PCI device
563 * Returns 0 on success, or negative on failure.
565 int pci_iov_init(struct pci_dev
*dev
)
569 if (!pci_is_pcie(dev
))
572 pos
= pci_find_ext_capability(dev
, PCI_EXT_CAP_ID_SRIOV
);
574 return sriov_init(dev
, pos
);
580 * pci_iov_release - release resources used by the IOV capability
581 * @dev: the PCI device
583 void pci_iov_release(struct pci_dev
*dev
)
590 * pci_iov_resource_bar - get position of the SR-IOV BAR
591 * @dev: the PCI device
592 * @resno: the resource number
593 * @type: the BAR type to be filled in
595 * Returns position of the BAR encapsulated in the SR-IOV capability.
597 int pci_iov_resource_bar(struct pci_dev
*dev
, int resno
,
598 enum pci_bar_type
*type
)
600 if (resno
< PCI_IOV_RESOURCES
|| resno
> PCI_IOV_RESOURCE_END
)
603 BUG_ON(!dev
->is_physfn
);
605 *type
= pci_bar_unknown
;
607 return dev
->sriov
->pos
+ PCI_SRIOV_BAR
+
608 4 * (resno
- PCI_IOV_RESOURCES
);
612 * pci_sriov_resource_alignment - get resource alignment for VF BAR
613 * @dev: the PCI device
614 * @resno: the resource number
616 * Returns the alignment of the VF BAR found in the SR-IOV capability.
617 * This is not the same as the resource size which is defined as
618 * the VF BAR size multiplied by the number of VFs. The alignment
619 * is just the VF BAR size.
621 resource_size_t
pci_sriov_resource_alignment(struct pci_dev
*dev
, int resno
)
624 enum pci_bar_type type
;
625 int reg
= pci_iov_resource_bar(dev
, resno
, &type
);
630 __pci_read_base(dev
, type
, &tmp
, reg
);
631 return resource_alignment(&tmp
);
635 * pci_restore_iov_state - restore the state of the IOV capability
636 * @dev: the PCI device
638 void pci_restore_iov_state(struct pci_dev
*dev
)
641 sriov_restore_state(dev
);
645 * pci_iov_bus_range - find bus range used by Virtual Function
648 * Returns max number of buses (exclude current one) used by Virtual
651 int pci_iov_bus_range(struct pci_bus
*bus
)
657 list_for_each_entry(dev
, &bus
->devices
, bus_list
) {
660 busnr
= virtfn_bus(dev
, dev
->sriov
->total
- 1);
665 return max
? max
- bus
->number
: 0;
669 * pci_enable_sriov - enable the SR-IOV capability
670 * @dev: the PCI device
671 * @nr_virtfn: number of virtual functions to enable
673 * Returns 0 on success, or negative on failure.
675 int pci_enable_sriov(struct pci_dev
*dev
, int nr_virtfn
)
682 return sriov_enable(dev
, nr_virtfn
);
684 EXPORT_SYMBOL_GPL(pci_enable_sriov
);
687 * pci_disable_sriov - disable the SR-IOV capability
688 * @dev: the PCI device
690 void pci_disable_sriov(struct pci_dev
*dev
)
699 EXPORT_SYMBOL_GPL(pci_disable_sriov
);
702 * pci_sriov_migration - notify SR-IOV core of Virtual Function Migration
703 * @dev: the PCI device
705 * Returns IRQ_HANDLED if the IRQ is handled, or IRQ_NONE if not.
707 * Physical Function driver is responsible to register IRQ handler using
708 * VF Migration Interrupt Message Number, and call this function when the
709 * interrupt is generated by the hardware.
711 irqreturn_t
pci_sriov_migration(struct pci_dev
*dev
)
716 return sriov_migration(dev
) ? IRQ_HANDLED
: IRQ_NONE
;
718 EXPORT_SYMBOL_GPL(pci_sriov_migration
);
721 * pci_num_vf - return number of VFs associated with a PF device_release_driver
722 * @dev: the PCI device
724 * Returns number of VFs, or 0 if SR-IOV is not enabled.
726 int pci_num_vf(struct pci_dev
*dev
)
728 if (!dev
|| !dev
->is_physfn
)
731 return dev
->sriov
->nr_virtfn
;
733 EXPORT_SYMBOL_GPL(pci_num_vf
);