2 * PCI Bus Services, see include/linux/pci.h for further explanation.
4 * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
7 * Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
10 #include <linux/kernel.h>
11 #include <linux/delay.h>
12 #include <linux/init.h>
13 #include <linux/pci.h>
15 #include <linux/slab.h>
16 #include <linux/module.h>
17 #include <linux/spinlock.h>
18 #include <linux/string.h>
19 #include <linux/log2.h>
20 #include <linux/pci-aspm.h>
21 #include <linux/pm_wakeup.h>
22 #include <linux/interrupt.h>
23 #include <linux/device.h>
24 #include <linux/pm_runtime.h>
25 #include <asm-generic/pci-bridge.h>
26 #include <asm/setup.h>
29 const char *pci_power_names
[] = {
30 "error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
32 EXPORT_SYMBOL_GPL(pci_power_names
);
34 int isa_dma_bridge_buggy
;
35 EXPORT_SYMBOL(isa_dma_bridge_buggy
);
38 EXPORT_SYMBOL(pci_pci_problems
);
40 unsigned int pci_pm_d3_delay
;
42 static void pci_pme_list_scan(struct work_struct
*work
);
44 static LIST_HEAD(pci_pme_list
);
45 static DEFINE_MUTEX(pci_pme_list_mutex
);
46 static DECLARE_DELAYED_WORK(pci_pme_work
, pci_pme_list_scan
);
48 struct pci_pme_device
{
49 struct list_head list
;
53 #define PME_TIMEOUT 1000 /* How long between PME checks */
55 static void pci_dev_d3_sleep(struct pci_dev
*dev
)
57 unsigned int delay
= dev
->d3_delay
;
59 if (delay
< pci_pm_d3_delay
)
60 delay
= pci_pm_d3_delay
;
65 #ifdef CONFIG_PCI_DOMAINS
66 int pci_domains_supported
= 1;
69 #define DEFAULT_CARDBUS_IO_SIZE (256)
70 #define DEFAULT_CARDBUS_MEM_SIZE (64*1024*1024)
71 /* pci=cbmemsize=nnM,cbiosize=nn can override this */
72 unsigned long pci_cardbus_io_size
= DEFAULT_CARDBUS_IO_SIZE
;
73 unsigned long pci_cardbus_mem_size
= DEFAULT_CARDBUS_MEM_SIZE
;
75 #define DEFAULT_HOTPLUG_IO_SIZE (256)
76 #define DEFAULT_HOTPLUG_MEM_SIZE (2*1024*1024)
77 /* pci=hpmemsize=nnM,hpiosize=nn can override this */
78 unsigned long pci_hotplug_io_size
= DEFAULT_HOTPLUG_IO_SIZE
;
79 unsigned long pci_hotplug_mem_size
= DEFAULT_HOTPLUG_MEM_SIZE
;
81 enum pcie_bus_config_types pcie_bus_config
= PCIE_BUS_TUNE_OFF
;
84 * The default CLS is used if arch didn't set CLS explicitly and not
85 * all pci devices agree on the same value. Arch can override either
86 * the dfl or actual value as it sees fit. Don't forget this is
87 * measured in 32-bit words, not bytes.
89 u8 pci_dfl_cache_line_size
= L1_CACHE_BYTES
>> 2;
90 u8 pci_cache_line_size
;
93 * If we set up a device for bus mastering, we need to check the latency
94 * timer as certain BIOSes forget to set it properly.
96 unsigned int pcibios_max_latency
= 255;
98 /* If set, the PCIe ARI capability will not be used. */
99 static bool pcie_ari_disabled
;
102 * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
103 * @bus: pointer to PCI bus structure to search
105 * Given a PCI bus, returns the highest PCI bus number present in the set
106 * including the given PCI bus and its list of child PCI buses.
108 unsigned char pci_bus_max_busnr(struct pci_bus
* bus
)
110 struct list_head
*tmp
;
111 unsigned char max
, n
;
113 max
= bus
->busn_res
.end
;
114 list_for_each(tmp
, &bus
->children
) {
115 n
= pci_bus_max_busnr(pci_bus_b(tmp
));
121 EXPORT_SYMBOL_GPL(pci_bus_max_busnr
);
123 #ifdef CONFIG_HAS_IOMEM
124 void __iomem
*pci_ioremap_bar(struct pci_dev
*pdev
, int bar
)
127 * Make sure the BAR is actually a memory resource, not an IO resource
129 if (!(pci_resource_flags(pdev
, bar
) & IORESOURCE_MEM
)) {
133 return ioremap_nocache(pci_resource_start(pdev
, bar
),
134 pci_resource_len(pdev
, bar
));
136 EXPORT_SYMBOL_GPL(pci_ioremap_bar
);
139 #define PCI_FIND_CAP_TTL 48
141 static int __pci_find_next_cap_ttl(struct pci_bus
*bus
, unsigned int devfn
,
142 u8 pos
, int cap
, int *ttl
)
147 pci_bus_read_config_byte(bus
, devfn
, pos
, &pos
);
151 pci_bus_read_config_byte(bus
, devfn
, pos
+ PCI_CAP_LIST_ID
,
157 pos
+= PCI_CAP_LIST_NEXT
;
162 static int __pci_find_next_cap(struct pci_bus
*bus
, unsigned int devfn
,
165 int ttl
= PCI_FIND_CAP_TTL
;
167 return __pci_find_next_cap_ttl(bus
, devfn
, pos
, cap
, &ttl
);
170 int pci_find_next_capability(struct pci_dev
*dev
, u8 pos
, int cap
)
172 return __pci_find_next_cap(dev
->bus
, dev
->devfn
,
173 pos
+ PCI_CAP_LIST_NEXT
, cap
);
175 EXPORT_SYMBOL_GPL(pci_find_next_capability
);
177 static int __pci_bus_find_cap_start(struct pci_bus
*bus
,
178 unsigned int devfn
, u8 hdr_type
)
182 pci_bus_read_config_word(bus
, devfn
, PCI_STATUS
, &status
);
183 if (!(status
& PCI_STATUS_CAP_LIST
))
187 case PCI_HEADER_TYPE_NORMAL
:
188 case PCI_HEADER_TYPE_BRIDGE
:
189 return PCI_CAPABILITY_LIST
;
190 case PCI_HEADER_TYPE_CARDBUS
:
191 return PCI_CB_CAPABILITY_LIST
;
200 * pci_find_capability - query for devices' capabilities
201 * @dev: PCI device to query
202 * @cap: capability code
204 * Tell if a device supports a given PCI capability.
205 * Returns the address of the requested capability structure within the
206 * device's PCI configuration space or 0 in case the device does not
207 * support it. Possible values for @cap:
209 * %PCI_CAP_ID_PM Power Management
210 * %PCI_CAP_ID_AGP Accelerated Graphics Port
211 * %PCI_CAP_ID_VPD Vital Product Data
212 * %PCI_CAP_ID_SLOTID Slot Identification
213 * %PCI_CAP_ID_MSI Message Signalled Interrupts
214 * %PCI_CAP_ID_CHSWP CompactPCI HotSwap
215 * %PCI_CAP_ID_PCIX PCI-X
216 * %PCI_CAP_ID_EXP PCI Express
218 int pci_find_capability(struct pci_dev
*dev
, int cap
)
222 pos
= __pci_bus_find_cap_start(dev
->bus
, dev
->devfn
, dev
->hdr_type
);
224 pos
= __pci_find_next_cap(dev
->bus
, dev
->devfn
, pos
, cap
);
230 * pci_bus_find_capability - query for devices' capabilities
231 * @bus: the PCI bus to query
232 * @devfn: PCI device to query
233 * @cap: capability code
235 * Like pci_find_capability() but works for pci devices that do not have a
236 * pci_dev structure set up yet.
238 * Returns the address of the requested capability structure within the
239 * device's PCI configuration space or 0 in case the device does not
242 int pci_bus_find_capability(struct pci_bus
*bus
, unsigned int devfn
, int cap
)
247 pci_bus_read_config_byte(bus
, devfn
, PCI_HEADER_TYPE
, &hdr_type
);
249 pos
= __pci_bus_find_cap_start(bus
, devfn
, hdr_type
& 0x7f);
251 pos
= __pci_find_next_cap(bus
, devfn
, pos
, cap
);
257 * pci_find_next_ext_capability - Find an extended capability
258 * @dev: PCI device to query
259 * @start: address at which to start looking (0 to start at beginning of list)
260 * @cap: capability code
262 * Returns the address of the next matching extended capability structure
263 * within the device's PCI configuration space or 0 if the device does
264 * not support it. Some capabilities can occur several times, e.g., the
265 * vendor-specific capability, and this provides a way to find them all.
267 int pci_find_next_ext_capability(struct pci_dev
*dev
, int start
, int cap
)
271 int pos
= PCI_CFG_SPACE_SIZE
;
273 /* minimum 8 bytes per capability */
274 ttl
= (PCI_CFG_SPACE_EXP_SIZE
- PCI_CFG_SPACE_SIZE
) / 8;
276 if (dev
->cfg_size
<= PCI_CFG_SPACE_SIZE
)
282 if (pci_read_config_dword(dev
, pos
, &header
) != PCIBIOS_SUCCESSFUL
)
286 * If we have no capabilities, this is indicated by cap ID,
287 * cap version and next pointer all being 0.
293 if (PCI_EXT_CAP_ID(header
) == cap
&& pos
!= start
)
296 pos
= PCI_EXT_CAP_NEXT(header
);
297 if (pos
< PCI_CFG_SPACE_SIZE
)
300 if (pci_read_config_dword(dev
, pos
, &header
) != PCIBIOS_SUCCESSFUL
)
306 EXPORT_SYMBOL_GPL(pci_find_next_ext_capability
);
309 * pci_find_ext_capability - Find an extended capability
310 * @dev: PCI device to query
311 * @cap: capability code
313 * Returns the address of the requested extended capability structure
314 * within the device's PCI configuration space or 0 if the device does
315 * not support it. Possible values for @cap:
317 * %PCI_EXT_CAP_ID_ERR Advanced Error Reporting
318 * %PCI_EXT_CAP_ID_VC Virtual Channel
319 * %PCI_EXT_CAP_ID_DSN Device Serial Number
320 * %PCI_EXT_CAP_ID_PWR Power Budgeting
322 int pci_find_ext_capability(struct pci_dev
*dev
, int cap
)
324 return pci_find_next_ext_capability(dev
, 0, cap
);
326 EXPORT_SYMBOL_GPL(pci_find_ext_capability
);
328 static int __pci_find_next_ht_cap(struct pci_dev
*dev
, int pos
, int ht_cap
)
330 int rc
, ttl
= PCI_FIND_CAP_TTL
;
333 if (ht_cap
== HT_CAPTYPE_SLAVE
|| ht_cap
== HT_CAPTYPE_HOST
)
334 mask
= HT_3BIT_CAP_MASK
;
336 mask
= HT_5BIT_CAP_MASK
;
338 pos
= __pci_find_next_cap_ttl(dev
->bus
, dev
->devfn
, pos
,
339 PCI_CAP_ID_HT
, &ttl
);
341 rc
= pci_read_config_byte(dev
, pos
+ 3, &cap
);
342 if (rc
!= PCIBIOS_SUCCESSFUL
)
345 if ((cap
& mask
) == ht_cap
)
348 pos
= __pci_find_next_cap_ttl(dev
->bus
, dev
->devfn
,
349 pos
+ PCI_CAP_LIST_NEXT
,
350 PCI_CAP_ID_HT
, &ttl
);
356 * pci_find_next_ht_capability - query a device's Hypertransport capabilities
357 * @dev: PCI device to query
358 * @pos: Position from which to continue searching
359 * @ht_cap: Hypertransport capability code
361 * To be used in conjunction with pci_find_ht_capability() to search for
362 * all capabilities matching @ht_cap. @pos should always be a value returned
363 * from pci_find_ht_capability().
365 * NB. To be 100% safe against broken PCI devices, the caller should take
366 * steps to avoid an infinite loop.
368 int pci_find_next_ht_capability(struct pci_dev
*dev
, int pos
, int ht_cap
)
370 return __pci_find_next_ht_cap(dev
, pos
+ PCI_CAP_LIST_NEXT
, ht_cap
);
372 EXPORT_SYMBOL_GPL(pci_find_next_ht_capability
);
375 * pci_find_ht_capability - query a device's Hypertransport capabilities
376 * @dev: PCI device to query
377 * @ht_cap: Hypertransport capability code
379 * Tell if a device supports a given Hypertransport capability.
380 * Returns an address within the device's PCI configuration space
381 * or 0 in case the device does not support the request capability.
382 * The address points to the PCI capability, of type PCI_CAP_ID_HT,
383 * which has a Hypertransport capability matching @ht_cap.
385 int pci_find_ht_capability(struct pci_dev
*dev
, int ht_cap
)
389 pos
= __pci_bus_find_cap_start(dev
->bus
, dev
->devfn
, dev
->hdr_type
);
391 pos
= __pci_find_next_ht_cap(dev
, pos
, ht_cap
);
395 EXPORT_SYMBOL_GPL(pci_find_ht_capability
);
398 * pci_find_parent_resource - return resource region of parent bus of given region
399 * @dev: PCI device structure contains resources to be searched
400 * @res: child resource record for which parent is sought
402 * For given resource region of given device, return the resource
403 * region of parent bus the given region is contained in or where
404 * it should be allocated from.
407 pci_find_parent_resource(const struct pci_dev
*dev
, struct resource
*res
)
409 const struct pci_bus
*bus
= dev
->bus
;
411 struct resource
*best
= NULL
, *r
;
413 pci_bus_for_each_resource(bus
, r
, i
) {
416 if (res
->start
&& !(res
->start
>= r
->start
&& res
->end
<= r
->end
))
417 continue; /* Not contained */
418 if ((res
->flags
^ r
->flags
) & (IORESOURCE_IO
| IORESOURCE_MEM
))
419 continue; /* Wrong type */
420 if (!((res
->flags
^ r
->flags
) & IORESOURCE_PREFETCH
))
421 return r
; /* Exact match */
422 /* We can't insert a non-prefetch resource inside a prefetchable parent .. */
423 if (r
->flags
& IORESOURCE_PREFETCH
)
425 /* .. but we can put a prefetchable resource inside a non-prefetchable one */
433 * pci_restore_bars - restore a devices BAR values (e.g. after wake-up)
434 * @dev: PCI device to have its BARs restored
436 * Restore the BAR values for a given device, so as to make it
437 * accessible by its driver.
440 pci_restore_bars(struct pci_dev
*dev
)
444 for (i
= 0; i
< PCI_BRIDGE_RESOURCES
; i
++)
445 pci_update_resource(dev
, i
);
448 static struct pci_platform_pm_ops
*pci_platform_pm
;
450 int pci_set_platform_pm(struct pci_platform_pm_ops
*ops
)
452 if (!ops
->is_manageable
|| !ops
->set_state
|| !ops
->choose_state
453 || !ops
->sleep_wake
|| !ops
->can_wakeup
)
455 pci_platform_pm
= ops
;
459 static inline bool platform_pci_power_manageable(struct pci_dev
*dev
)
461 return pci_platform_pm
? pci_platform_pm
->is_manageable(dev
) : false;
464 static inline int platform_pci_set_power_state(struct pci_dev
*dev
,
467 return pci_platform_pm
? pci_platform_pm
->set_state(dev
, t
) : -ENOSYS
;
470 static inline pci_power_t
platform_pci_choose_state(struct pci_dev
*dev
)
472 return pci_platform_pm
?
473 pci_platform_pm
->choose_state(dev
) : PCI_POWER_ERROR
;
476 static inline bool platform_pci_can_wakeup(struct pci_dev
*dev
)
478 return pci_platform_pm
? pci_platform_pm
->can_wakeup(dev
) : false;
481 static inline int platform_pci_sleep_wake(struct pci_dev
*dev
, bool enable
)
483 return pci_platform_pm
?
484 pci_platform_pm
->sleep_wake(dev
, enable
) : -ENODEV
;
487 static inline int platform_pci_run_wake(struct pci_dev
*dev
, bool enable
)
489 return pci_platform_pm
?
490 pci_platform_pm
->run_wake(dev
, enable
) : -ENODEV
;
494 * pci_raw_set_power_state - Use PCI PM registers to set the power state of
496 * @dev: PCI device to handle.
497 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
500 * -EINVAL if the requested state is invalid.
501 * -EIO if device does not support PCI PM or its PM capabilities register has a
502 * wrong version, or device doesn't support the requested state.
503 * 0 if device already is in the requested state.
504 * 0 if device's power state has been successfully changed.
506 static int pci_raw_set_power_state(struct pci_dev
*dev
, pci_power_t state
)
509 bool need_restore
= false;
511 /* Check if we're already there */
512 if (dev
->current_state
== state
)
518 if (state
< PCI_D0
|| state
> PCI_D3hot
)
521 /* Validate current state:
522 * Can enter D0 from any state, but if we can only go deeper
523 * to sleep if we're already in a low power state
525 if (state
!= PCI_D0
&& dev
->current_state
<= PCI_D3cold
526 && dev
->current_state
> state
) {
527 dev_err(&dev
->dev
, "invalid power transition "
528 "(from state %d to %d)\n", dev
->current_state
, state
);
532 /* check if this device supports the desired state */
533 if ((state
== PCI_D1
&& !dev
->d1_support
)
534 || (state
== PCI_D2
&& !dev
->d2_support
))
537 pci_read_config_word(dev
, dev
->pm_cap
+ PCI_PM_CTRL
, &pmcsr
);
539 /* If we're (effectively) in D3, force entire word to 0.
540 * This doesn't affect PME_Status, disables PME_En, and
541 * sets PowerState to 0.
543 switch (dev
->current_state
) {
547 pmcsr
&= ~PCI_PM_CTRL_STATE_MASK
;
552 case PCI_UNKNOWN
: /* Boot-up */
553 if ((pmcsr
& PCI_PM_CTRL_STATE_MASK
) == PCI_D3hot
554 && !(pmcsr
& PCI_PM_CTRL_NO_SOFT_RESET
))
556 /* Fall-through: force to D0 */
562 /* enter specified state */
563 pci_write_config_word(dev
, dev
->pm_cap
+ PCI_PM_CTRL
, pmcsr
);
565 /* Mandatory power management transition delays */
566 /* see PCI PM 1.1 5.6.1 table 18 */
567 if (state
== PCI_D3hot
|| dev
->current_state
== PCI_D3hot
)
568 pci_dev_d3_sleep(dev
);
569 else if (state
== PCI_D2
|| dev
->current_state
== PCI_D2
)
570 udelay(PCI_PM_D2_DELAY
);
572 pci_read_config_word(dev
, dev
->pm_cap
+ PCI_PM_CTRL
, &pmcsr
);
573 dev
->current_state
= (pmcsr
& PCI_PM_CTRL_STATE_MASK
);
574 if (dev
->current_state
!= state
&& printk_ratelimit())
575 dev_info(&dev
->dev
, "Refused to change power state, "
576 "currently in D%d\n", dev
->current_state
);
579 * According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
580 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
581 * from D3hot to D0 _may_ perform an internal reset, thereby
582 * going to "D0 Uninitialized" rather than "D0 Initialized".
583 * For example, at least some versions of the 3c905B and the
584 * 3c556B exhibit this behaviour.
586 * At least some laptop BIOSen (e.g. the Thinkpad T21) leave
587 * devices in a D3hot state at boot. Consequently, we need to
588 * restore at least the BARs so that the device will be
589 * accessible to its driver.
592 pci_restore_bars(dev
);
595 pcie_aspm_pm_state_change(dev
->bus
->self
);
601 * pci_update_current_state - Read PCI power state of given device from its
602 * PCI PM registers and cache it
603 * @dev: PCI device to handle.
604 * @state: State to cache in case the device doesn't have the PM capability
606 void pci_update_current_state(struct pci_dev
*dev
, pci_power_t state
)
612 * Configuration space is not accessible for device in
613 * D3cold, so just keep or set D3cold for safety
615 if (dev
->current_state
== PCI_D3cold
)
617 if (state
== PCI_D3cold
) {
618 dev
->current_state
= PCI_D3cold
;
621 pci_read_config_word(dev
, dev
->pm_cap
+ PCI_PM_CTRL
, &pmcsr
);
622 dev
->current_state
= (pmcsr
& PCI_PM_CTRL_STATE_MASK
);
624 dev
->current_state
= state
;
629 * pci_power_up - Put the given device into D0 forcibly
630 * @dev: PCI device to power up
632 void pci_power_up(struct pci_dev
*dev
)
634 if (platform_pci_power_manageable(dev
))
635 platform_pci_set_power_state(dev
, PCI_D0
);
637 pci_raw_set_power_state(dev
, PCI_D0
);
638 pci_update_current_state(dev
, PCI_D0
);
642 * pci_platform_power_transition - Use platform to change device power state
643 * @dev: PCI device to handle.
644 * @state: State to put the device into.
646 static int pci_platform_power_transition(struct pci_dev
*dev
, pci_power_t state
)
650 if (platform_pci_power_manageable(dev
)) {
651 error
= platform_pci_set_power_state(dev
, state
);
653 pci_update_current_state(dev
, state
);
654 /* Fall back to PCI_D0 if native PM is not supported */
656 dev
->current_state
= PCI_D0
;
659 /* Fall back to PCI_D0 if native PM is not supported */
661 dev
->current_state
= PCI_D0
;
668 * __pci_start_power_transition - Start power transition of a PCI device
669 * @dev: PCI device to handle.
670 * @state: State to put the device into.
672 static void __pci_start_power_transition(struct pci_dev
*dev
, pci_power_t state
)
674 if (state
== PCI_D0
) {
675 pci_platform_power_transition(dev
, PCI_D0
);
677 * Mandatory power management transition delays, see
678 * PCI Express Base Specification Revision 2.0 Section
679 * 6.6.1: Conventional Reset. Do not delay for
680 * devices powered on/off by corresponding bridge,
681 * because have already delayed for the bridge.
683 if (dev
->runtime_d3cold
) {
684 msleep(dev
->d3cold_delay
);
686 * When powering on a bridge from D3cold, the
687 * whole hierarchy may be powered on into
688 * D0uninitialized state, resume them to give
689 * them a chance to suspend again
691 pci_wakeup_bus(dev
->subordinate
);
697 * __pci_dev_set_current_state - Set current state of a PCI device
698 * @dev: Device to handle
699 * @data: pointer to state to be set
701 static int __pci_dev_set_current_state(struct pci_dev
*dev
, void *data
)
703 pci_power_t state
= *(pci_power_t
*)data
;
705 dev
->current_state
= state
;
710 * __pci_bus_set_current_state - Walk given bus and set current state of devices
711 * @bus: Top bus of the subtree to walk.
712 * @state: state to be set
714 static void __pci_bus_set_current_state(struct pci_bus
*bus
, pci_power_t state
)
717 pci_walk_bus(bus
, __pci_dev_set_current_state
, &state
);
721 * __pci_complete_power_transition - Complete power transition of a PCI device
722 * @dev: PCI device to handle.
723 * @state: State to put the device into.
725 * This function should not be called directly by device drivers.
727 int __pci_complete_power_transition(struct pci_dev
*dev
, pci_power_t state
)
733 ret
= pci_platform_power_transition(dev
, state
);
734 /* Power off the bridge may power off the whole hierarchy */
735 if (!ret
&& state
== PCI_D3cold
)
736 __pci_bus_set_current_state(dev
->subordinate
, PCI_D3cold
);
739 EXPORT_SYMBOL_GPL(__pci_complete_power_transition
);
742 * pci_set_power_state - Set the power state of a PCI device
743 * @dev: PCI device to handle.
744 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
746 * Transition a device to a new power state, using the platform firmware and/or
747 * the device's PCI PM registers.
750 * -EINVAL if the requested state is invalid.
751 * -EIO if device does not support PCI PM or its PM capabilities register has a
752 * wrong version, or device doesn't support the requested state.
753 * 0 if device already is in the requested state.
754 * 0 if device's power state has been successfully changed.
756 int pci_set_power_state(struct pci_dev
*dev
, pci_power_t state
)
760 /* bound the state we're entering */
761 if (state
> PCI_D3cold
)
763 else if (state
< PCI_D0
)
765 else if ((state
== PCI_D1
|| state
== PCI_D2
) && pci_no_d1d2(dev
))
767 * If the device or the parent bridge do not support PCI PM,
768 * ignore the request if we're doing anything other than putting
769 * it into D0 (which would only happen on boot).
773 /* Check if we're already there */
774 if (dev
->current_state
== state
)
777 __pci_start_power_transition(dev
, state
);
779 /* This device is quirked not to be put into D3, so
780 don't put it in D3 */
781 if (state
>= PCI_D3hot
&& (dev
->dev_flags
& PCI_DEV_FLAGS_NO_D3
))
785 * To put device in D3cold, we put device into D3hot in native
786 * way, then put device into D3cold with platform ops
788 error
= pci_raw_set_power_state(dev
, state
> PCI_D3hot
?
791 if (!__pci_complete_power_transition(dev
, state
))
794 * When aspm_policy is "powersave" this call ensures
795 * that ASPM is configured.
797 if (!error
&& dev
->bus
->self
)
798 pcie_aspm_powersave_config_link(dev
->bus
->self
);
804 * pci_choose_state - Choose the power state of a PCI device
805 * @dev: PCI device to be suspended
806 * @state: target sleep state for the whole system. This is the value
807 * that is passed to suspend() function.
809 * Returns PCI power state suitable for given device and given system
813 pci_power_t
pci_choose_state(struct pci_dev
*dev
, pm_message_t state
)
817 if (!pci_find_capability(dev
, PCI_CAP_ID_PM
))
820 ret
= platform_pci_choose_state(dev
);
821 if (ret
!= PCI_POWER_ERROR
)
824 switch (state
.event
) {
827 case PM_EVENT_FREEZE
:
828 case PM_EVENT_PRETHAW
:
829 /* REVISIT both freeze and pre-thaw "should" use D0 */
830 case PM_EVENT_SUSPEND
:
831 case PM_EVENT_HIBERNATE
:
834 dev_info(&dev
->dev
, "unrecognized suspend event %d\n",
841 EXPORT_SYMBOL(pci_choose_state
);
843 #define PCI_EXP_SAVE_REGS 7
846 static struct pci_cap_saved_state
*pci_find_saved_cap(
847 struct pci_dev
*pci_dev
, char cap
)
849 struct pci_cap_saved_state
*tmp
;
850 struct hlist_node
*pos
;
852 hlist_for_each_entry(tmp
, pos
, &pci_dev
->saved_cap_space
, next
) {
853 if (tmp
->cap
.cap_nr
== cap
)
859 static int pci_save_pcie_state(struct pci_dev
*dev
)
862 struct pci_cap_saved_state
*save_state
;
865 if (!pci_is_pcie(dev
))
868 save_state
= pci_find_saved_cap(dev
, PCI_CAP_ID_EXP
);
870 dev_err(&dev
->dev
, "buffer not found in %s\n", __func__
);
874 cap
= (u16
*)&save_state
->cap
.data
[0];
875 pcie_capability_read_word(dev
, PCI_EXP_DEVCTL
, &cap
[i
++]);
876 pcie_capability_read_word(dev
, PCI_EXP_LNKCTL
, &cap
[i
++]);
877 pcie_capability_read_word(dev
, PCI_EXP_SLTCTL
, &cap
[i
++]);
878 pcie_capability_read_word(dev
, PCI_EXP_RTCTL
, &cap
[i
++]);
879 pcie_capability_read_word(dev
, PCI_EXP_DEVCTL2
, &cap
[i
++]);
880 pcie_capability_read_word(dev
, PCI_EXP_LNKCTL2
, &cap
[i
++]);
881 pcie_capability_read_word(dev
, PCI_EXP_SLTCTL2
, &cap
[i
++]);
886 static void pci_restore_pcie_state(struct pci_dev
*dev
)
889 struct pci_cap_saved_state
*save_state
;
892 save_state
= pci_find_saved_cap(dev
, PCI_CAP_ID_EXP
);
896 cap
= (u16
*)&save_state
->cap
.data
[0];
897 pcie_capability_write_word(dev
, PCI_EXP_DEVCTL
, cap
[i
++]);
898 pcie_capability_write_word(dev
, PCI_EXP_LNKCTL
, cap
[i
++]);
899 pcie_capability_write_word(dev
, PCI_EXP_SLTCTL
, cap
[i
++]);
900 pcie_capability_write_word(dev
, PCI_EXP_RTCTL
, cap
[i
++]);
901 pcie_capability_write_word(dev
, PCI_EXP_DEVCTL2
, cap
[i
++]);
902 pcie_capability_write_word(dev
, PCI_EXP_LNKCTL2
, cap
[i
++]);
903 pcie_capability_write_word(dev
, PCI_EXP_SLTCTL2
, cap
[i
++]);
907 static int pci_save_pcix_state(struct pci_dev
*dev
)
910 struct pci_cap_saved_state
*save_state
;
912 pos
= pci_find_capability(dev
, PCI_CAP_ID_PCIX
);
916 save_state
= pci_find_saved_cap(dev
, PCI_CAP_ID_PCIX
);
918 dev_err(&dev
->dev
, "buffer not found in %s\n", __func__
);
922 pci_read_config_word(dev
, pos
+ PCI_X_CMD
,
923 (u16
*)save_state
->cap
.data
);
928 static void pci_restore_pcix_state(struct pci_dev
*dev
)
931 struct pci_cap_saved_state
*save_state
;
934 save_state
= pci_find_saved_cap(dev
, PCI_CAP_ID_PCIX
);
935 pos
= pci_find_capability(dev
, PCI_CAP_ID_PCIX
);
936 if (!save_state
|| pos
<= 0)
938 cap
= (u16
*)&save_state
->cap
.data
[0];
940 pci_write_config_word(dev
, pos
+ PCI_X_CMD
, cap
[i
++]);
945 * pci_save_state - save the PCI configuration space of a device before suspending
946 * @dev: - PCI device that we're dealing with
949 pci_save_state(struct pci_dev
*dev
)
952 /* XXX: 100% dword access ok here? */
953 for (i
= 0; i
< 16; i
++)
954 pci_read_config_dword(dev
, i
* 4, &dev
->saved_config_space
[i
]);
955 dev
->state_saved
= true;
956 if ((i
= pci_save_pcie_state(dev
)) != 0)
958 if ((i
= pci_save_pcix_state(dev
)) != 0)
963 static void pci_restore_config_dword(struct pci_dev
*pdev
, int offset
,
964 u32 saved_val
, int retry
)
968 pci_read_config_dword(pdev
, offset
, &val
);
969 if (val
== saved_val
)
973 dev_dbg(&pdev
->dev
, "restoring config space at offset "
974 "%#x (was %#x, writing %#x)\n", offset
, val
, saved_val
);
975 pci_write_config_dword(pdev
, offset
, saved_val
);
979 pci_read_config_dword(pdev
, offset
, &val
);
980 if (val
== saved_val
)
987 static void pci_restore_config_space_range(struct pci_dev
*pdev
,
988 int start
, int end
, int retry
)
992 for (index
= end
; index
>= start
; index
--)
993 pci_restore_config_dword(pdev
, 4 * index
,
994 pdev
->saved_config_space
[index
],
998 static void pci_restore_config_space(struct pci_dev
*pdev
)
1000 if (pdev
->hdr_type
== PCI_HEADER_TYPE_NORMAL
) {
1001 pci_restore_config_space_range(pdev
, 10, 15, 0);
1002 /* Restore BARs before the command register. */
1003 pci_restore_config_space_range(pdev
, 4, 9, 10);
1004 pci_restore_config_space_range(pdev
, 0, 3, 0);
1006 pci_restore_config_space_range(pdev
, 0, 15, 0);
1011 * pci_restore_state - Restore the saved state of a PCI device
1012 * @dev: - PCI device that we're dealing with
1014 void pci_restore_state(struct pci_dev
*dev
)
1016 if (!dev
->state_saved
)
1019 /* PCI Express register must be restored first */
1020 pci_restore_pcie_state(dev
);
1021 pci_restore_ats_state(dev
);
1023 pci_restore_config_space(dev
);
1025 pci_restore_pcix_state(dev
);
1026 pci_restore_msi_state(dev
);
1027 pci_restore_iov_state(dev
);
1029 dev
->state_saved
= false;
1032 struct pci_saved_state
{
1033 u32 config_space
[16];
1034 struct pci_cap_saved_data cap
[0];
1038 * pci_store_saved_state - Allocate and return an opaque struct containing
1039 * the device saved state.
1040 * @dev: PCI device that we're dealing with
1042 * Rerturn NULL if no state or error.
1044 struct pci_saved_state
*pci_store_saved_state(struct pci_dev
*dev
)
1046 struct pci_saved_state
*state
;
1047 struct pci_cap_saved_state
*tmp
;
1048 struct pci_cap_saved_data
*cap
;
1049 struct hlist_node
*pos
;
1052 if (!dev
->state_saved
)
1055 size
= sizeof(*state
) + sizeof(struct pci_cap_saved_data
);
1057 hlist_for_each_entry(tmp
, pos
, &dev
->saved_cap_space
, next
)
1058 size
+= sizeof(struct pci_cap_saved_data
) + tmp
->cap
.size
;
1060 state
= kzalloc(size
, GFP_KERNEL
);
1064 memcpy(state
->config_space
, dev
->saved_config_space
,
1065 sizeof(state
->config_space
));
1068 hlist_for_each_entry(tmp
, pos
, &dev
->saved_cap_space
, next
) {
1069 size_t len
= sizeof(struct pci_cap_saved_data
) + tmp
->cap
.size
;
1070 memcpy(cap
, &tmp
->cap
, len
);
1071 cap
= (struct pci_cap_saved_data
*)((u8
*)cap
+ len
);
1073 /* Empty cap_save terminates list */
1077 EXPORT_SYMBOL_GPL(pci_store_saved_state
);
1080 * pci_load_saved_state - Reload the provided save state into struct pci_dev.
1081 * @dev: PCI device that we're dealing with
1082 * @state: Saved state returned from pci_store_saved_state()
1084 int pci_load_saved_state(struct pci_dev
*dev
, struct pci_saved_state
*state
)
1086 struct pci_cap_saved_data
*cap
;
1088 dev
->state_saved
= false;
1093 memcpy(dev
->saved_config_space
, state
->config_space
,
1094 sizeof(state
->config_space
));
1098 struct pci_cap_saved_state
*tmp
;
1100 tmp
= pci_find_saved_cap(dev
, cap
->cap_nr
);
1101 if (!tmp
|| tmp
->cap
.size
!= cap
->size
)
1104 memcpy(tmp
->cap
.data
, cap
->data
, tmp
->cap
.size
);
1105 cap
= (struct pci_cap_saved_data
*)((u8
*)cap
+
1106 sizeof(struct pci_cap_saved_data
) + cap
->size
);
1109 dev
->state_saved
= true;
1112 EXPORT_SYMBOL_GPL(pci_load_saved_state
);
1115 * pci_load_and_free_saved_state - Reload the save state pointed to by state,
1116 * and free the memory allocated for it.
1117 * @dev: PCI device that we're dealing with
1118 * @state: Pointer to saved state returned from pci_store_saved_state()
1120 int pci_load_and_free_saved_state(struct pci_dev
*dev
,
1121 struct pci_saved_state
**state
)
1123 int ret
= pci_load_saved_state(dev
, *state
);
1128 EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state
);
1130 static int do_pci_enable_device(struct pci_dev
*dev
, int bars
)
1134 err
= pci_set_power_state(dev
, PCI_D0
);
1135 if (err
< 0 && err
!= -EIO
)
1137 err
= pcibios_enable_device(dev
, bars
);
1140 pci_fixup_device(pci_fixup_enable
, dev
);
1146 * pci_reenable_device - Resume abandoned device
1147 * @dev: PCI device to be resumed
1149 * Note this function is a backend of pci_default_resume and is not supposed
1150 * to be called by normal code, write proper resume handler and use it instead.
1152 int pci_reenable_device(struct pci_dev
*dev
)
1154 if (pci_is_enabled(dev
))
1155 return do_pci_enable_device(dev
, (1 << PCI_NUM_RESOURCES
) - 1);
1159 static int __pci_enable_device_flags(struct pci_dev
*dev
,
1160 resource_size_t flags
)
1166 * Power state could be unknown at this point, either due to a fresh
1167 * boot or a device removal call. So get the current power state
1168 * so that things like MSI message writing will behave as expected
1169 * (e.g. if the device really is in D0 at enable time).
1173 pci_read_config_word(dev
, dev
->pm_cap
+ PCI_PM_CTRL
, &pmcsr
);
1174 dev
->current_state
= (pmcsr
& PCI_PM_CTRL_STATE_MASK
);
1177 if (atomic_add_return(1, &dev
->enable_cnt
) > 1)
1178 return 0; /* already enabled */
1180 /* only skip sriov related */
1181 for (i
= 0; i
<= PCI_ROM_RESOURCE
; i
++)
1182 if (dev
->resource
[i
].flags
& flags
)
1184 for (i
= PCI_BRIDGE_RESOURCES
; i
< DEVICE_COUNT_RESOURCE
; i
++)
1185 if (dev
->resource
[i
].flags
& flags
)
1188 err
= do_pci_enable_device(dev
, bars
);
1190 atomic_dec(&dev
->enable_cnt
);
1195 * pci_enable_device_io - Initialize a device for use with IO space
1196 * @dev: PCI device to be initialized
1198 * Initialize device before it's used by a driver. Ask low-level code
1199 * to enable I/O resources. Wake up the device if it was suspended.
1200 * Beware, this function can fail.
1202 int pci_enable_device_io(struct pci_dev
*dev
)
1204 return __pci_enable_device_flags(dev
, IORESOURCE_IO
);
1208 * pci_enable_device_mem - Initialize a device for use with Memory space
1209 * @dev: PCI device to be initialized
1211 * Initialize device before it's used by a driver. Ask low-level code
1212 * to enable Memory resources. Wake up the device if it was suspended.
1213 * Beware, this function can fail.
1215 int pci_enable_device_mem(struct pci_dev
*dev
)
1217 return __pci_enable_device_flags(dev
, IORESOURCE_MEM
);
1221 * pci_enable_device - Initialize device before it's used by a driver.
1222 * @dev: PCI device to be initialized
1224 * Initialize device before it's used by a driver. Ask low-level code
1225 * to enable I/O and memory. Wake up the device if it was suspended.
1226 * Beware, this function can fail.
1228 * Note we don't actually enable the device many times if we call
1229 * this function repeatedly (we just increment the count).
1231 int pci_enable_device(struct pci_dev
*dev
)
1233 return __pci_enable_device_flags(dev
, IORESOURCE_MEM
| IORESOURCE_IO
);
1237 * Managed PCI resources. This manages device on/off, intx/msi/msix
1238 * on/off and BAR regions. pci_dev itself records msi/msix status, so
1239 * there's no need to track it separately. pci_devres is initialized
1240 * when a device is enabled using managed PCI device enable interface.
1243 unsigned int enabled
:1;
1244 unsigned int pinned
:1;
1245 unsigned int orig_intx
:1;
1246 unsigned int restore_intx
:1;
1250 static void pcim_release(struct device
*gendev
, void *res
)
1252 struct pci_dev
*dev
= container_of(gendev
, struct pci_dev
, dev
);
1253 struct pci_devres
*this = res
;
1256 if (dev
->msi_enabled
)
1257 pci_disable_msi(dev
);
1258 if (dev
->msix_enabled
)
1259 pci_disable_msix(dev
);
1261 for (i
= 0; i
< DEVICE_COUNT_RESOURCE
; i
++)
1262 if (this->region_mask
& (1 << i
))
1263 pci_release_region(dev
, i
);
1265 if (this->restore_intx
)
1266 pci_intx(dev
, this->orig_intx
);
1268 if (this->enabled
&& !this->pinned
)
1269 pci_disable_device(dev
);
1272 static struct pci_devres
* get_pci_dr(struct pci_dev
*pdev
)
1274 struct pci_devres
*dr
, *new_dr
;
1276 dr
= devres_find(&pdev
->dev
, pcim_release
, NULL
, NULL
);
1280 new_dr
= devres_alloc(pcim_release
, sizeof(*new_dr
), GFP_KERNEL
);
1283 return devres_get(&pdev
->dev
, new_dr
, NULL
, NULL
);
1286 static struct pci_devres
* find_pci_dr(struct pci_dev
*pdev
)
1288 if (pci_is_managed(pdev
))
1289 return devres_find(&pdev
->dev
, pcim_release
, NULL
, NULL
);
1294 * pcim_enable_device - Managed pci_enable_device()
1295 * @pdev: PCI device to be initialized
1297 * Managed pci_enable_device().
1299 int pcim_enable_device(struct pci_dev
*pdev
)
1301 struct pci_devres
*dr
;
1304 dr
= get_pci_dr(pdev
);
1310 rc
= pci_enable_device(pdev
);
1312 pdev
->is_managed
= 1;
1319 * pcim_pin_device - Pin managed PCI device
1320 * @pdev: PCI device to pin
1322 * Pin managed PCI device @pdev. Pinned device won't be disabled on
1323 * driver detach. @pdev must have been enabled with
1324 * pcim_enable_device().
1326 void pcim_pin_device(struct pci_dev
*pdev
)
1328 struct pci_devres
*dr
;
1330 dr
= find_pci_dr(pdev
);
1331 WARN_ON(!dr
|| !dr
->enabled
);
1337 * pcibios_add_device - provide arch specific hooks when adding device dev
1338 * @dev: the PCI device being added
1340 * Permits the platform to provide architecture specific functionality when
1341 * devices are added. This is the default implementation. Architecture
1342 * implementations can override this.
1344 int __weak
pcibios_add_device (struct pci_dev
*dev
)
1350 * pcibios_disable_device - disable arch specific PCI resources for device dev
1351 * @dev: the PCI device to disable
1353 * Disables architecture specific PCI resources for the device. This
1354 * is the default implementation. Architecture implementations can
1357 void __weak
pcibios_disable_device (struct pci_dev
*dev
) {}
1359 static void do_pci_disable_device(struct pci_dev
*dev
)
1363 pci_read_config_word(dev
, PCI_COMMAND
, &pci_command
);
1364 if (pci_command
& PCI_COMMAND_MASTER
) {
1365 pci_command
&= ~PCI_COMMAND_MASTER
;
1366 pci_write_config_word(dev
, PCI_COMMAND
, pci_command
);
1369 pcibios_disable_device(dev
);
1373 * pci_disable_enabled_device - Disable device without updating enable_cnt
1374 * @dev: PCI device to disable
1376 * NOTE: This function is a backend of PCI power management routines and is
1377 * not supposed to be called drivers.
1379 void pci_disable_enabled_device(struct pci_dev
*dev
)
1381 if (pci_is_enabled(dev
))
1382 do_pci_disable_device(dev
);
1386 * pci_disable_device - Disable PCI device after use
1387 * @dev: PCI device to be disabled
1389 * Signal to the system that the PCI device is not in use by the system
1390 * anymore. This only involves disabling PCI bus-mastering, if active.
1392 * Note we don't actually disable the device until all callers of
1393 * pci_enable_device() have called pci_disable_device().
1396 pci_disable_device(struct pci_dev
*dev
)
1398 struct pci_devres
*dr
;
1400 dr
= find_pci_dr(dev
);
1404 if (atomic_sub_return(1, &dev
->enable_cnt
) != 0)
1407 do_pci_disable_device(dev
);
1409 dev
->is_busmaster
= 0;
1413 * pcibios_set_pcie_reset_state - set reset state for device dev
1414 * @dev: the PCIe device reset
1415 * @state: Reset state to enter into
1418 * Sets the PCIe reset state for the device. This is the default
1419 * implementation. Architecture implementations can override this.
1421 int __weak
pcibios_set_pcie_reset_state(struct pci_dev
*dev
,
1422 enum pcie_reset_state state
)
1428 * pci_set_pcie_reset_state - set reset state for device dev
1429 * @dev: the PCIe device reset
1430 * @state: Reset state to enter into
1433 * Sets the PCI reset state for the device.
1435 int pci_set_pcie_reset_state(struct pci_dev
*dev
, enum pcie_reset_state state
)
1437 return pcibios_set_pcie_reset_state(dev
, state
);
1441 * pci_check_pme_status - Check if given device has generated PME.
1442 * @dev: Device to check.
1444 * Check the PME status of the device and if set, clear it and clear PME enable
1445 * (if set). Return 'true' if PME status and PME enable were both set or
1446 * 'false' otherwise.
1448 bool pci_check_pme_status(struct pci_dev
*dev
)
1457 pmcsr_pos
= dev
->pm_cap
+ PCI_PM_CTRL
;
1458 pci_read_config_word(dev
, pmcsr_pos
, &pmcsr
);
1459 if (!(pmcsr
& PCI_PM_CTRL_PME_STATUS
))
1462 /* Clear PME status. */
1463 pmcsr
|= PCI_PM_CTRL_PME_STATUS
;
1464 if (pmcsr
& PCI_PM_CTRL_PME_ENABLE
) {
1465 /* Disable PME to avoid interrupt flood. */
1466 pmcsr
&= ~PCI_PM_CTRL_PME_ENABLE
;
1470 pci_write_config_word(dev
, pmcsr_pos
, pmcsr
);
1476 * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set.
1477 * @dev: Device to handle.
1478 * @pme_poll_reset: Whether or not to reset the device's pme_poll flag.
1480 * Check if @dev has generated PME and queue a resume request for it in that
1483 static int pci_pme_wakeup(struct pci_dev
*dev
, void *pme_poll_reset
)
1485 if (pme_poll_reset
&& dev
->pme_poll
)
1486 dev
->pme_poll
= false;
1488 if (pci_check_pme_status(dev
)) {
1489 pci_wakeup_event(dev
);
1490 pm_request_resume(&dev
->dev
);
1496 * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary.
1497 * @bus: Top bus of the subtree to walk.
1499 void pci_pme_wakeup_bus(struct pci_bus
*bus
)
1502 pci_walk_bus(bus
, pci_pme_wakeup
, (void *)true);
1506 * pci_wakeup - Wake up a PCI device
1507 * @pci_dev: Device to handle.
1508 * @ign: ignored parameter
1510 static int pci_wakeup(struct pci_dev
*pci_dev
, void *ign
)
1512 pci_wakeup_event(pci_dev
);
1513 pm_request_resume(&pci_dev
->dev
);
1518 * pci_wakeup_bus - Walk given bus and wake up devices on it
1519 * @bus: Top bus of the subtree to walk.
1521 void pci_wakeup_bus(struct pci_bus
*bus
)
1524 pci_walk_bus(bus
, pci_wakeup
, NULL
);
1528 * pci_pme_capable - check the capability of PCI device to generate PME#
1529 * @dev: PCI device to handle.
1530 * @state: PCI state from which device will issue PME#.
1532 bool pci_pme_capable(struct pci_dev
*dev
, pci_power_t state
)
1537 return !!(dev
->pme_support
& (1 << state
));
1540 static void pci_pme_list_scan(struct work_struct
*work
)
1542 struct pci_pme_device
*pme_dev
, *n
;
1544 mutex_lock(&pci_pme_list_mutex
);
1545 if (!list_empty(&pci_pme_list
)) {
1546 list_for_each_entry_safe(pme_dev
, n
, &pci_pme_list
, list
) {
1547 if (pme_dev
->dev
->pme_poll
) {
1548 struct pci_dev
*bridge
;
1550 bridge
= pme_dev
->dev
->bus
->self
;
1552 * If bridge is in low power state, the
1553 * configuration space of subordinate devices
1554 * may be not accessible
1556 if (bridge
&& bridge
->current_state
!= PCI_D0
)
1558 pci_pme_wakeup(pme_dev
->dev
, NULL
);
1560 list_del(&pme_dev
->list
);
1564 if (!list_empty(&pci_pme_list
))
1565 schedule_delayed_work(&pci_pme_work
,
1566 msecs_to_jiffies(PME_TIMEOUT
));
1568 mutex_unlock(&pci_pme_list_mutex
);
1572 * pci_pme_active - enable or disable PCI device's PME# function
1573 * @dev: PCI device to handle.
1574 * @enable: 'true' to enable PME# generation; 'false' to disable it.
1576 * The caller must verify that the device is capable of generating PME# before
1577 * calling this function with @enable equal to 'true'.
1579 void pci_pme_active(struct pci_dev
*dev
, bool enable
)
1586 pci_read_config_word(dev
, dev
->pm_cap
+ PCI_PM_CTRL
, &pmcsr
);
1587 /* Clear PME_Status by writing 1 to it and enable PME# */
1588 pmcsr
|= PCI_PM_CTRL_PME_STATUS
| PCI_PM_CTRL_PME_ENABLE
;
1590 pmcsr
&= ~PCI_PM_CTRL_PME_ENABLE
;
1592 pci_write_config_word(dev
, dev
->pm_cap
+ PCI_PM_CTRL
, pmcsr
);
1595 * PCI (as opposed to PCIe) PME requires that the device have
1596 * its PME# line hooked up correctly. Not all hardware vendors
1597 * do this, so the PME never gets delivered and the device
1598 * remains asleep. The easiest way around this is to
1599 * periodically walk the list of suspended devices and check
1600 * whether any have their PME flag set. The assumption is that
1601 * we'll wake up often enough anyway that this won't be a huge
1602 * hit, and the power savings from the devices will still be a
1605 * Although PCIe uses in-band PME message instead of PME# line
1606 * to report PME, PME does not work for some PCIe devices in
1607 * reality. For example, there are devices that set their PME
1608 * status bits, but don't really bother to send a PME message;
1609 * there are PCI Express Root Ports that don't bother to
1610 * trigger interrupts when they receive PME messages from the
1611 * devices below. So PME poll is used for PCIe devices too.
1614 if (dev
->pme_poll
) {
1615 struct pci_pme_device
*pme_dev
;
1617 pme_dev
= kmalloc(sizeof(struct pci_pme_device
),
1622 mutex_lock(&pci_pme_list_mutex
);
1623 list_add(&pme_dev
->list
, &pci_pme_list
);
1624 if (list_is_singular(&pci_pme_list
))
1625 schedule_delayed_work(&pci_pme_work
,
1626 msecs_to_jiffies(PME_TIMEOUT
));
1627 mutex_unlock(&pci_pme_list_mutex
);
1629 mutex_lock(&pci_pme_list_mutex
);
1630 list_for_each_entry(pme_dev
, &pci_pme_list
, list
) {
1631 if (pme_dev
->dev
== dev
) {
1632 list_del(&pme_dev
->list
);
1637 mutex_unlock(&pci_pme_list_mutex
);
1642 dev_dbg(&dev
->dev
, "PME# %s\n", enable
? "enabled" : "disabled");
1646 * __pci_enable_wake - enable PCI device as wakeup event source
1647 * @dev: PCI device affected
1648 * @state: PCI state from which device will issue wakeup events
1649 * @runtime: True if the events are to be generated at run time
1650 * @enable: True to enable event generation; false to disable
1652 * This enables the device as a wakeup event source, or disables it.
1653 * When such events involves platform-specific hooks, those hooks are
1654 * called automatically by this routine.
1656 * Devices with legacy power management (no standard PCI PM capabilities)
1657 * always require such platform hooks.
1660 * 0 is returned on success
1661 * -EINVAL is returned if device is not supposed to wake up the system
1662 * Error code depending on the platform is returned if both the platform and
1663 * the native mechanism fail to enable the generation of wake-up events
1665 int __pci_enable_wake(struct pci_dev
*dev
, pci_power_t state
,
1666 bool runtime
, bool enable
)
1670 if (enable
&& !runtime
&& !device_may_wakeup(&dev
->dev
))
1673 /* Don't do the same thing twice in a row for one device. */
1674 if (!!enable
== !!dev
->wakeup_prepared
)
1678 * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don
1679 * Anderson we should be doing PME# wake enable followed by ACPI wake
1680 * enable. To disable wake-up we call the platform first, for symmetry.
1686 if (pci_pme_capable(dev
, state
))
1687 pci_pme_active(dev
, true);
1690 error
= runtime
? platform_pci_run_wake(dev
, true) :
1691 platform_pci_sleep_wake(dev
, true);
1695 dev
->wakeup_prepared
= true;
1698 platform_pci_run_wake(dev
, false);
1700 platform_pci_sleep_wake(dev
, false);
1701 pci_pme_active(dev
, false);
1702 dev
->wakeup_prepared
= false;
1707 EXPORT_SYMBOL(__pci_enable_wake
);
1710 * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold
1711 * @dev: PCI device to prepare
1712 * @enable: True to enable wake-up event generation; false to disable
1714 * Many drivers want the device to wake up the system from D3_hot or D3_cold
1715 * and this function allows them to set that up cleanly - pci_enable_wake()
1716 * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI
1717 * ordering constraints.
1719 * This function only returns error code if the device is not capable of
1720 * generating PME# from both D3_hot and D3_cold, and the platform is unable to
1721 * enable wake-up power for it.
1723 int pci_wake_from_d3(struct pci_dev
*dev
, bool enable
)
1725 return pci_pme_capable(dev
, PCI_D3cold
) ?
1726 pci_enable_wake(dev
, PCI_D3cold
, enable
) :
1727 pci_enable_wake(dev
, PCI_D3hot
, enable
);
1731 * pci_target_state - find an appropriate low power state for a given PCI dev
1734 * Use underlying platform code to find a supported low power state for @dev.
1735 * If the platform can't manage @dev, return the deepest state from which it
1736 * can generate wake events, based on any available PME info.
1738 pci_power_t
pci_target_state(struct pci_dev
*dev
)
1740 pci_power_t target_state
= PCI_D3hot
;
1742 if (platform_pci_power_manageable(dev
)) {
1744 * Call the platform to choose the target state of the device
1745 * and enable wake-up from this state if supported.
1747 pci_power_t state
= platform_pci_choose_state(dev
);
1750 case PCI_POWER_ERROR
:
1755 if (pci_no_d1d2(dev
))
1758 target_state
= state
;
1760 } else if (!dev
->pm_cap
) {
1761 target_state
= PCI_D0
;
1762 } else if (device_may_wakeup(&dev
->dev
)) {
1764 * Find the deepest state from which the device can generate
1765 * wake-up events, make it the target state and enable device
1768 if (dev
->pme_support
) {
1770 && !(dev
->pme_support
& (1 << target_state
)))
1775 return target_state
;
1779 * pci_prepare_to_sleep - prepare PCI device for system-wide transition into a sleep state
1780 * @dev: Device to handle.
1782 * Choose the power state appropriate for the device depending on whether
1783 * it can wake up the system and/or is power manageable by the platform
1784 * (PCI_D3hot is the default) and put the device into that state.
1786 int pci_prepare_to_sleep(struct pci_dev
*dev
)
1788 pci_power_t target_state
= pci_target_state(dev
);
1791 if (target_state
== PCI_POWER_ERROR
)
1794 /* D3cold during system suspend/hibernate is not supported */
1795 if (target_state
> PCI_D3hot
)
1796 target_state
= PCI_D3hot
;
1798 pci_enable_wake(dev
, target_state
, device_may_wakeup(&dev
->dev
));
1800 error
= pci_set_power_state(dev
, target_state
);
1803 pci_enable_wake(dev
, target_state
, false);
1809 * pci_back_from_sleep - turn PCI device on during system-wide transition into working state
1810 * @dev: Device to handle.
1812 * Disable device's system wake-up capability and put it into D0.
1814 int pci_back_from_sleep(struct pci_dev
*dev
)
1816 pci_enable_wake(dev
, PCI_D0
, false);
1817 return pci_set_power_state(dev
, PCI_D0
);
1821 * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend.
1822 * @dev: PCI device being suspended.
1824 * Prepare @dev to generate wake-up events at run time and put it into a low
1827 int pci_finish_runtime_suspend(struct pci_dev
*dev
)
1829 pci_power_t target_state
= pci_target_state(dev
);
1832 if (target_state
== PCI_POWER_ERROR
)
1835 dev
->runtime_d3cold
= target_state
== PCI_D3cold
;
1837 __pci_enable_wake(dev
, target_state
, true, pci_dev_run_wake(dev
));
1839 error
= pci_set_power_state(dev
, target_state
);
1842 __pci_enable_wake(dev
, target_state
, true, false);
1843 dev
->runtime_d3cold
= false;
1850 * pci_dev_run_wake - Check if device can generate run-time wake-up events.
1851 * @dev: Device to check.
1853 * Return true if the device itself is cabable of generating wake-up events
1854 * (through the platform or using the native PCIe PME) or if the device supports
1855 * PME and one of its upstream bridges can generate wake-up events.
1857 bool pci_dev_run_wake(struct pci_dev
*dev
)
1859 struct pci_bus
*bus
= dev
->bus
;
1861 if (device_run_wake(&dev
->dev
))
1864 if (!dev
->pme_support
)
1867 while (bus
->parent
) {
1868 struct pci_dev
*bridge
= bus
->self
;
1870 if (device_run_wake(&bridge
->dev
))
1876 /* We have reached the root bus. */
1878 return device_run_wake(bus
->bridge
);
1882 EXPORT_SYMBOL_GPL(pci_dev_run_wake
);
1884 void pci_config_pm_runtime_get(struct pci_dev
*pdev
)
1886 struct device
*dev
= &pdev
->dev
;
1887 struct device
*parent
= dev
->parent
;
1890 pm_runtime_get_sync(parent
);
1891 pm_runtime_get_noresume(dev
);
1893 * pdev->current_state is set to PCI_D3cold during suspending,
1894 * so wait until suspending completes
1896 pm_runtime_barrier(dev
);
1898 * Only need to resume devices in D3cold, because config
1899 * registers are still accessible for devices suspended but
1902 if (pdev
->current_state
== PCI_D3cold
)
1903 pm_runtime_resume(dev
);
1906 void pci_config_pm_runtime_put(struct pci_dev
*pdev
)
1908 struct device
*dev
= &pdev
->dev
;
1909 struct device
*parent
= dev
->parent
;
1911 pm_runtime_put(dev
);
1913 pm_runtime_put_sync(parent
);
1917 * pci_pm_init - Initialize PM functions of given PCI device
1918 * @dev: PCI device to handle.
1920 void pci_pm_init(struct pci_dev
*dev
)
1925 pm_runtime_forbid(&dev
->dev
);
1926 pm_runtime_set_active(&dev
->dev
);
1927 pm_runtime_enable(&dev
->dev
);
1928 device_enable_async_suspend(&dev
->dev
);
1929 dev
->wakeup_prepared
= false;
1933 /* find PCI PM capability in list */
1934 pm
= pci_find_capability(dev
, PCI_CAP_ID_PM
);
1937 /* Check device's ability to generate PME# */
1938 pci_read_config_word(dev
, pm
+ PCI_PM_PMC
, &pmc
);
1940 if ((pmc
& PCI_PM_CAP_VER_MASK
) > 3) {
1941 dev_err(&dev
->dev
, "unsupported PM cap regs version (%u)\n",
1942 pmc
& PCI_PM_CAP_VER_MASK
);
1947 dev
->d3_delay
= PCI_PM_D3_WAIT
;
1948 dev
->d3cold_delay
= PCI_PM_D3COLD_WAIT
;
1949 dev
->d3cold_allowed
= true;
1951 dev
->d1_support
= false;
1952 dev
->d2_support
= false;
1953 if (!pci_no_d1d2(dev
)) {
1954 if (pmc
& PCI_PM_CAP_D1
)
1955 dev
->d1_support
= true;
1956 if (pmc
& PCI_PM_CAP_D2
)
1957 dev
->d2_support
= true;
1959 if (dev
->d1_support
|| dev
->d2_support
)
1960 dev_printk(KERN_DEBUG
, &dev
->dev
, "supports%s%s\n",
1961 dev
->d1_support
? " D1" : "",
1962 dev
->d2_support
? " D2" : "");
1965 pmc
&= PCI_PM_CAP_PME_MASK
;
1967 dev_printk(KERN_DEBUG
, &dev
->dev
,
1968 "PME# supported from%s%s%s%s%s\n",
1969 (pmc
& PCI_PM_CAP_PME_D0
) ? " D0" : "",
1970 (pmc
& PCI_PM_CAP_PME_D1
) ? " D1" : "",
1971 (pmc
& PCI_PM_CAP_PME_D2
) ? " D2" : "",
1972 (pmc
& PCI_PM_CAP_PME_D3
) ? " D3hot" : "",
1973 (pmc
& PCI_PM_CAP_PME_D3cold
) ? " D3cold" : "");
1974 dev
->pme_support
= pmc
>> PCI_PM_CAP_PME_SHIFT
;
1975 dev
->pme_poll
= true;
1977 * Make device's PM flags reflect the wake-up capability, but
1978 * let the user space enable it to wake up the system as needed.
1980 device_set_wakeup_capable(&dev
->dev
, true);
1981 /* Disable the PME# generation functionality */
1982 pci_pme_active(dev
, false);
1984 dev
->pme_support
= 0;
1989 * platform_pci_wakeup_init - init platform wakeup if present
1992 * Some devices don't have PCI PM caps but can still generate wakeup
1993 * events through platform methods (like ACPI events). If @dev supports
1994 * platform wakeup events, set the device flag to indicate as much. This
1995 * may be redundant if the device also supports PCI PM caps, but double
1996 * initialization should be safe in that case.
1998 void platform_pci_wakeup_init(struct pci_dev
*dev
)
2000 if (!platform_pci_can_wakeup(dev
))
2003 device_set_wakeup_capable(&dev
->dev
, true);
2004 platform_pci_sleep_wake(dev
, false);
2007 static void pci_add_saved_cap(struct pci_dev
*pci_dev
,
2008 struct pci_cap_saved_state
*new_cap
)
2010 hlist_add_head(&new_cap
->next
, &pci_dev
->saved_cap_space
);
2014 * pci_add_save_buffer - allocate buffer for saving given capability registers
2015 * @dev: the PCI device
2016 * @cap: the capability to allocate the buffer for
2017 * @size: requested size of the buffer
2019 static int pci_add_cap_save_buffer(
2020 struct pci_dev
*dev
, char cap
, unsigned int size
)
2023 struct pci_cap_saved_state
*save_state
;
2025 pos
= pci_find_capability(dev
, cap
);
2029 save_state
= kzalloc(sizeof(*save_state
) + size
, GFP_KERNEL
);
2033 save_state
->cap
.cap_nr
= cap
;
2034 save_state
->cap
.size
= size
;
2035 pci_add_saved_cap(dev
, save_state
);
2041 * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities
2042 * @dev: the PCI device
2044 void pci_allocate_cap_save_buffers(struct pci_dev
*dev
)
2048 error
= pci_add_cap_save_buffer(dev
, PCI_CAP_ID_EXP
,
2049 PCI_EXP_SAVE_REGS
* sizeof(u16
));
2052 "unable to preallocate PCI Express save buffer\n");
2054 error
= pci_add_cap_save_buffer(dev
, PCI_CAP_ID_PCIX
, sizeof(u16
));
2057 "unable to preallocate PCI-X save buffer\n");
2060 void pci_free_cap_save_buffers(struct pci_dev
*dev
)
2062 struct pci_cap_saved_state
*tmp
;
2063 struct hlist_node
*pos
, *n
;
2065 hlist_for_each_entry_safe(tmp
, pos
, n
, &dev
->saved_cap_space
, next
)
2070 * pci_enable_ari - enable ARI forwarding if hardware support it
2071 * @dev: the PCI device
2073 void pci_enable_ari(struct pci_dev
*dev
)
2076 struct pci_dev
*bridge
;
2078 if (pcie_ari_disabled
|| !pci_is_pcie(dev
) || dev
->devfn
)
2081 if (!pci_find_ext_capability(dev
, PCI_EXT_CAP_ID_ARI
))
2084 bridge
= dev
->bus
->self
;
2088 pcie_capability_read_dword(bridge
, PCI_EXP_DEVCAP2
, &cap
);
2089 if (!(cap
& PCI_EXP_DEVCAP2_ARI
))
2092 pcie_capability_set_word(bridge
, PCI_EXP_DEVCTL2
, PCI_EXP_DEVCTL2_ARI
);
2093 bridge
->ari_enabled
= 1;
2097 * pci_enable_ido - enable ID-based Ordering on a device
2098 * @dev: the PCI device
2099 * @type: which types of IDO to enable
2101 * Enable ID-based ordering on @dev. @type can contain the bits
2102 * %PCI_EXP_IDO_REQUEST and/or %PCI_EXP_IDO_COMPLETION to indicate
2103 * which types of transactions are allowed to be re-ordered.
2105 void pci_enable_ido(struct pci_dev
*dev
, unsigned long type
)
2109 if (type
& PCI_EXP_IDO_REQUEST
)
2110 ctrl
|= PCI_EXP_IDO_REQ_EN
;
2111 if (type
& PCI_EXP_IDO_COMPLETION
)
2112 ctrl
|= PCI_EXP_IDO_CMP_EN
;
2114 pcie_capability_set_word(dev
, PCI_EXP_DEVCTL2
, ctrl
);
2116 EXPORT_SYMBOL(pci_enable_ido
);
2119 * pci_disable_ido - disable ID-based ordering on a device
2120 * @dev: the PCI device
2121 * @type: which types of IDO to disable
2123 void pci_disable_ido(struct pci_dev
*dev
, unsigned long type
)
2127 if (type
& PCI_EXP_IDO_REQUEST
)
2128 ctrl
|= PCI_EXP_IDO_REQ_EN
;
2129 if (type
& PCI_EXP_IDO_COMPLETION
)
2130 ctrl
|= PCI_EXP_IDO_CMP_EN
;
2132 pcie_capability_clear_word(dev
, PCI_EXP_DEVCTL2
, ctrl
);
2134 EXPORT_SYMBOL(pci_disable_ido
);
2137 * pci_enable_obff - enable optimized buffer flush/fill
2139 * @type: type of signaling to use
2141 * Try to enable @type OBFF signaling on @dev. It will try using WAKE#
2142 * signaling if possible, falling back to message signaling only if
2143 * WAKE# isn't supported. @type should indicate whether the PCIe link
2144 * be brought out of L0s or L1 to send the message. It should be either
2145 * %PCI_EXP_OBFF_SIGNAL_ALWAYS or %PCI_OBFF_SIGNAL_L0.
2147 * If your device can benefit from receiving all messages, even at the
2148 * power cost of bringing the link back up from a low power state, use
2149 * %PCI_EXP_OBFF_SIGNAL_ALWAYS. Otherwise, use %PCI_OBFF_SIGNAL_L0 (the
2153 * Zero on success, appropriate error number on failure.
2155 int pci_enable_obff(struct pci_dev
*dev
, enum pci_obff_signal_type type
)
2161 pcie_capability_read_dword(dev
, PCI_EXP_DEVCAP2
, &cap
);
2162 if (!(cap
& PCI_EXP_OBFF_MASK
))
2163 return -ENOTSUPP
; /* no OBFF support at all */
2165 /* Make sure the topology supports OBFF as well */
2166 if (dev
->bus
->self
) {
2167 ret
= pci_enable_obff(dev
->bus
->self
, type
);
2172 pcie_capability_read_word(dev
, PCI_EXP_DEVCTL2
, &ctrl
);
2173 if (cap
& PCI_EXP_OBFF_WAKE
)
2174 ctrl
|= PCI_EXP_OBFF_WAKE_EN
;
2177 case PCI_EXP_OBFF_SIGNAL_L0
:
2178 if (!(ctrl
& PCI_EXP_OBFF_WAKE_EN
))
2179 ctrl
|= PCI_EXP_OBFF_MSGA_EN
;
2181 case PCI_EXP_OBFF_SIGNAL_ALWAYS
:
2182 ctrl
&= ~PCI_EXP_OBFF_WAKE_EN
;
2183 ctrl
|= PCI_EXP_OBFF_MSGB_EN
;
2186 WARN(1, "bad OBFF signal type\n");
2190 pcie_capability_write_word(dev
, PCI_EXP_DEVCTL2
, ctrl
);
2194 EXPORT_SYMBOL(pci_enable_obff
);
2197 * pci_disable_obff - disable optimized buffer flush/fill
2200 * Disable OBFF on @dev.
2202 void pci_disable_obff(struct pci_dev
*dev
)
2204 pcie_capability_clear_word(dev
, PCI_EXP_DEVCTL2
, PCI_EXP_OBFF_WAKE_EN
);
2206 EXPORT_SYMBOL(pci_disable_obff
);
2209 * pci_ltr_supported - check whether a device supports LTR
2213 * True if @dev supports latency tolerance reporting, false otherwise.
2215 static bool pci_ltr_supported(struct pci_dev
*dev
)
2219 pcie_capability_read_dword(dev
, PCI_EXP_DEVCAP2
, &cap
);
2221 return cap
& PCI_EXP_DEVCAP2_LTR
;
2225 * pci_enable_ltr - enable latency tolerance reporting
2228 * Enable LTR on @dev if possible, which means enabling it first on
2232 * Zero on success, errno on failure.
2234 int pci_enable_ltr(struct pci_dev
*dev
)
2238 /* Only primary function can enable/disable LTR */
2239 if (PCI_FUNC(dev
->devfn
) != 0)
2242 if (!pci_ltr_supported(dev
))
2245 /* Enable upstream ports first */
2246 if (dev
->bus
->self
) {
2247 ret
= pci_enable_ltr(dev
->bus
->self
);
2252 return pcie_capability_set_word(dev
, PCI_EXP_DEVCTL2
, PCI_EXP_LTR_EN
);
2254 EXPORT_SYMBOL(pci_enable_ltr
);
2257 * pci_disable_ltr - disable latency tolerance reporting
2260 void pci_disable_ltr(struct pci_dev
*dev
)
2262 /* Only primary function can enable/disable LTR */
2263 if (PCI_FUNC(dev
->devfn
) != 0)
2266 if (!pci_ltr_supported(dev
))
2269 pcie_capability_clear_word(dev
, PCI_EXP_DEVCTL2
, PCI_EXP_LTR_EN
);
2271 EXPORT_SYMBOL(pci_disable_ltr
);
2273 static int __pci_ltr_scale(int *val
)
2277 while (*val
> 1023) {
2278 *val
= (*val
+ 31) / 32;
2285 * pci_set_ltr - set LTR latency values
2287 * @snoop_lat_ns: snoop latency in nanoseconds
2288 * @nosnoop_lat_ns: nosnoop latency in nanoseconds
2290 * Figure out the scale and set the LTR values accordingly.
2292 int pci_set_ltr(struct pci_dev
*dev
, int snoop_lat_ns
, int nosnoop_lat_ns
)
2294 int pos
, ret
, snoop_scale
, nosnoop_scale
;
2297 if (!pci_ltr_supported(dev
))
2300 snoop_scale
= __pci_ltr_scale(&snoop_lat_ns
);
2301 nosnoop_scale
= __pci_ltr_scale(&nosnoop_lat_ns
);
2303 if (snoop_lat_ns
> PCI_LTR_VALUE_MASK
||
2304 nosnoop_lat_ns
> PCI_LTR_VALUE_MASK
)
2307 if ((snoop_scale
> (PCI_LTR_SCALE_MASK
>> PCI_LTR_SCALE_SHIFT
)) ||
2308 (nosnoop_scale
> (PCI_LTR_SCALE_MASK
>> PCI_LTR_SCALE_SHIFT
)))
2311 pos
= pci_find_ext_capability(dev
, PCI_EXT_CAP_ID_LTR
);
2315 val
= (snoop_scale
<< PCI_LTR_SCALE_SHIFT
) | snoop_lat_ns
;
2316 ret
= pci_write_config_word(dev
, pos
+ PCI_LTR_MAX_SNOOP_LAT
, val
);
2320 val
= (nosnoop_scale
<< PCI_LTR_SCALE_SHIFT
) | nosnoop_lat_ns
;
2321 ret
= pci_write_config_word(dev
, pos
+ PCI_LTR_MAX_NOSNOOP_LAT
, val
);
2327 EXPORT_SYMBOL(pci_set_ltr
);
2329 static int pci_acs_enable
;
2332 * pci_request_acs - ask for ACS to be enabled if supported
2334 void pci_request_acs(void)
2340 * pci_enable_acs - enable ACS if hardware support it
2341 * @dev: the PCI device
2343 void pci_enable_acs(struct pci_dev
*dev
)
2349 if (!pci_acs_enable
)
2352 pos
= pci_find_ext_capability(dev
, PCI_EXT_CAP_ID_ACS
);
2356 pci_read_config_word(dev
, pos
+ PCI_ACS_CAP
, &cap
);
2357 pci_read_config_word(dev
, pos
+ PCI_ACS_CTRL
, &ctrl
);
2359 /* Source Validation */
2360 ctrl
|= (cap
& PCI_ACS_SV
);
2362 /* P2P Request Redirect */
2363 ctrl
|= (cap
& PCI_ACS_RR
);
2365 /* P2P Completion Redirect */
2366 ctrl
|= (cap
& PCI_ACS_CR
);
2368 /* Upstream Forwarding */
2369 ctrl
|= (cap
& PCI_ACS_UF
);
2371 pci_write_config_word(dev
, pos
+ PCI_ACS_CTRL
, ctrl
);
2375 * pci_acs_enabled - test ACS against required flags for a given device
2376 * @pdev: device to test
2377 * @acs_flags: required PCI ACS flags
2379 * Return true if the device supports the provided flags. Automatically
2380 * filters out flags that are not implemented on multifunction devices.
2382 bool pci_acs_enabled(struct pci_dev
*pdev
, u16 acs_flags
)
2387 ret
= pci_dev_specific_acs_enabled(pdev
, acs_flags
);
2391 if (!pci_is_pcie(pdev
))
2394 /* Filter out flags not applicable to multifunction */
2395 if (pdev
->multifunction
)
2396 acs_flags
&= (PCI_ACS_RR
| PCI_ACS_CR
|
2397 PCI_ACS_EC
| PCI_ACS_DT
);
2399 if (pci_pcie_type(pdev
) == PCI_EXP_TYPE_DOWNSTREAM
||
2400 pci_pcie_type(pdev
) == PCI_EXP_TYPE_ROOT_PORT
||
2401 pdev
->multifunction
) {
2402 pos
= pci_find_ext_capability(pdev
, PCI_EXT_CAP_ID_ACS
);
2406 pci_read_config_word(pdev
, pos
+ PCI_ACS_CTRL
, &ctrl
);
2407 if ((ctrl
& acs_flags
) != acs_flags
)
2415 * pci_acs_path_enable - test ACS flags from start to end in a hierarchy
2416 * @start: starting downstream device
2417 * @end: ending upstream device or NULL to search to the root bus
2418 * @acs_flags: required flags
2420 * Walk up a device tree from start to end testing PCI ACS support. If
2421 * any step along the way does not support the required flags, return false.
2423 bool pci_acs_path_enabled(struct pci_dev
*start
,
2424 struct pci_dev
*end
, u16 acs_flags
)
2426 struct pci_dev
*pdev
, *parent
= start
;
2431 if (!pci_acs_enabled(pdev
, acs_flags
))
2434 if (pci_is_root_bus(pdev
->bus
))
2435 return (end
== NULL
);
2437 parent
= pdev
->bus
->self
;
2438 } while (pdev
!= end
);
2444 * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
2445 * @dev: the PCI device
2446 * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTD, 4=INTD)
2448 * Perform INTx swizzling for a device behind one level of bridge. This is
2449 * required by section 9.1 of the PCI-to-PCI bridge specification for devices
2450 * behind bridges on add-in cards. For devices with ARI enabled, the slot
2451 * number is always 0 (see the Implementation Note in section 2.2.8.1 of
2452 * the PCI Express Base Specification, Revision 2.1)
2454 u8
pci_swizzle_interrupt_pin(const struct pci_dev
*dev
, u8 pin
)
2458 if (pci_ari_enabled(dev
->bus
))
2461 slot
= PCI_SLOT(dev
->devfn
);
2463 return (((pin
- 1) + slot
) % 4) + 1;
2467 pci_get_interrupt_pin(struct pci_dev
*dev
, struct pci_dev
**bridge
)
2475 while (!pci_is_root_bus(dev
->bus
)) {
2476 pin
= pci_swizzle_interrupt_pin(dev
, pin
);
2477 dev
= dev
->bus
->self
;
2484 * pci_common_swizzle - swizzle INTx all the way to root bridge
2485 * @dev: the PCI device
2486 * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD)
2488 * Perform INTx swizzling for a device. This traverses through all PCI-to-PCI
2489 * bridges all the way up to a PCI root bus.
2491 u8
pci_common_swizzle(struct pci_dev
*dev
, u8
*pinp
)
2495 while (!pci_is_root_bus(dev
->bus
)) {
2496 pin
= pci_swizzle_interrupt_pin(dev
, pin
);
2497 dev
= dev
->bus
->self
;
2500 return PCI_SLOT(dev
->devfn
);
2504 * pci_release_region - Release a PCI bar
2505 * @pdev: PCI device whose resources were previously reserved by pci_request_region
2506 * @bar: BAR to release
2508 * Releases the PCI I/O and memory resources previously reserved by a
2509 * successful call to pci_request_region. Call this function only
2510 * after all use of the PCI regions has ceased.
2512 void pci_release_region(struct pci_dev
*pdev
, int bar
)
2514 struct pci_devres
*dr
;
2516 if (pci_resource_len(pdev
, bar
) == 0)
2518 if (pci_resource_flags(pdev
, bar
) & IORESOURCE_IO
)
2519 release_region(pci_resource_start(pdev
, bar
),
2520 pci_resource_len(pdev
, bar
));
2521 else if (pci_resource_flags(pdev
, bar
) & IORESOURCE_MEM
)
2522 release_mem_region(pci_resource_start(pdev
, bar
),
2523 pci_resource_len(pdev
, bar
));
2525 dr
= find_pci_dr(pdev
);
2527 dr
->region_mask
&= ~(1 << bar
);
2531 * __pci_request_region - Reserved PCI I/O and memory resource
2532 * @pdev: PCI device whose resources are to be reserved
2533 * @bar: BAR to be reserved
2534 * @res_name: Name to be associated with resource.
2535 * @exclusive: whether the region access is exclusive or not
2537 * Mark the PCI region associated with PCI device @pdev BR @bar as
2538 * being reserved by owner @res_name. Do not access any
2539 * address inside the PCI regions unless this call returns
2542 * If @exclusive is set, then the region is marked so that userspace
2543 * is explicitly not allowed to map the resource via /dev/mem or
2544 * sysfs MMIO access.
2546 * Returns 0 on success, or %EBUSY on error. A warning
2547 * message is also printed on failure.
2549 static int __pci_request_region(struct pci_dev
*pdev
, int bar
, const char *res_name
,
2552 struct pci_devres
*dr
;
2554 if (pci_resource_len(pdev
, bar
) == 0)
2557 if (pci_resource_flags(pdev
, bar
) & IORESOURCE_IO
) {
2558 if (!request_region(pci_resource_start(pdev
, bar
),
2559 pci_resource_len(pdev
, bar
), res_name
))
2562 else if (pci_resource_flags(pdev
, bar
) & IORESOURCE_MEM
) {
2563 if (!__request_mem_region(pci_resource_start(pdev
, bar
),
2564 pci_resource_len(pdev
, bar
), res_name
,
2569 dr
= find_pci_dr(pdev
);
2571 dr
->region_mask
|= 1 << bar
;
2576 dev_warn(&pdev
->dev
, "BAR %d: can't reserve %pR\n", bar
,
2577 &pdev
->resource
[bar
]);
2582 * pci_request_region - Reserve PCI I/O and memory resource
2583 * @pdev: PCI device whose resources are to be reserved
2584 * @bar: BAR to be reserved
2585 * @res_name: Name to be associated with resource
2587 * Mark the PCI region associated with PCI device @pdev BAR @bar as
2588 * being reserved by owner @res_name. Do not access any
2589 * address inside the PCI regions unless this call returns
2592 * Returns 0 on success, or %EBUSY on error. A warning
2593 * message is also printed on failure.
2595 int pci_request_region(struct pci_dev
*pdev
, int bar
, const char *res_name
)
2597 return __pci_request_region(pdev
, bar
, res_name
, 0);
2601 * pci_request_region_exclusive - Reserved PCI I/O and memory resource
2602 * @pdev: PCI device whose resources are to be reserved
2603 * @bar: BAR to be reserved
2604 * @res_name: Name to be associated with resource.
2606 * Mark the PCI region associated with PCI device @pdev BR @bar as
2607 * being reserved by owner @res_name. Do not access any
2608 * address inside the PCI regions unless this call returns
2611 * Returns 0 on success, or %EBUSY on error. A warning
2612 * message is also printed on failure.
2614 * The key difference that _exclusive makes it that userspace is
2615 * explicitly not allowed to map the resource via /dev/mem or
2618 int pci_request_region_exclusive(struct pci_dev
*pdev
, int bar
, const char *res_name
)
2620 return __pci_request_region(pdev
, bar
, res_name
, IORESOURCE_EXCLUSIVE
);
2623 * pci_release_selected_regions - Release selected PCI I/O and memory resources
2624 * @pdev: PCI device whose resources were previously reserved
2625 * @bars: Bitmask of BARs to be released
2627 * Release selected PCI I/O and memory resources previously reserved.
2628 * Call this function only after all use of the PCI regions has ceased.
2630 void pci_release_selected_regions(struct pci_dev
*pdev
, int bars
)
2634 for (i
= 0; i
< 6; i
++)
2635 if (bars
& (1 << i
))
2636 pci_release_region(pdev
, i
);
2639 int __pci_request_selected_regions(struct pci_dev
*pdev
, int bars
,
2640 const char *res_name
, int excl
)
2644 for (i
= 0; i
< 6; i
++)
2645 if (bars
& (1 << i
))
2646 if (__pci_request_region(pdev
, i
, res_name
, excl
))
2652 if (bars
& (1 << i
))
2653 pci_release_region(pdev
, i
);
2660 * pci_request_selected_regions - Reserve selected PCI I/O and memory resources
2661 * @pdev: PCI device whose resources are to be reserved
2662 * @bars: Bitmask of BARs to be requested
2663 * @res_name: Name to be associated with resource
2665 int pci_request_selected_regions(struct pci_dev
*pdev
, int bars
,
2666 const char *res_name
)
2668 return __pci_request_selected_regions(pdev
, bars
, res_name
, 0);
2671 int pci_request_selected_regions_exclusive(struct pci_dev
*pdev
,
2672 int bars
, const char *res_name
)
2674 return __pci_request_selected_regions(pdev
, bars
, res_name
,
2675 IORESOURCE_EXCLUSIVE
);
2679 * pci_release_regions - Release reserved PCI I/O and memory resources
2680 * @pdev: PCI device whose resources were previously reserved by pci_request_regions
2682 * Releases all PCI I/O and memory resources previously reserved by a
2683 * successful call to pci_request_regions. Call this function only
2684 * after all use of the PCI regions has ceased.
2687 void pci_release_regions(struct pci_dev
*pdev
)
2689 pci_release_selected_regions(pdev
, (1 << 6) - 1);
2693 * pci_request_regions - Reserved PCI I/O and memory resources
2694 * @pdev: PCI device whose resources are to be reserved
2695 * @res_name: Name to be associated with resource.
2697 * Mark all PCI regions associated with PCI device @pdev as
2698 * being reserved by owner @res_name. Do not access any
2699 * address inside the PCI regions unless this call returns
2702 * Returns 0 on success, or %EBUSY on error. A warning
2703 * message is also printed on failure.
2705 int pci_request_regions(struct pci_dev
*pdev
, const char *res_name
)
2707 return pci_request_selected_regions(pdev
, ((1 << 6) - 1), res_name
);
2711 * pci_request_regions_exclusive - Reserved PCI I/O and memory resources
2712 * @pdev: PCI device whose resources are to be reserved
2713 * @res_name: Name to be associated with resource.
2715 * Mark all PCI regions associated with PCI device @pdev as
2716 * being reserved by owner @res_name. Do not access any
2717 * address inside the PCI regions unless this call returns
2720 * pci_request_regions_exclusive() will mark the region so that
2721 * /dev/mem and the sysfs MMIO access will not be allowed.
2723 * Returns 0 on success, or %EBUSY on error. A warning
2724 * message is also printed on failure.
2726 int pci_request_regions_exclusive(struct pci_dev
*pdev
, const char *res_name
)
2728 return pci_request_selected_regions_exclusive(pdev
,
2729 ((1 << 6) - 1), res_name
);
2732 static void __pci_set_master(struct pci_dev
*dev
, bool enable
)
2736 pci_read_config_word(dev
, PCI_COMMAND
, &old_cmd
);
2738 cmd
= old_cmd
| PCI_COMMAND_MASTER
;
2740 cmd
= old_cmd
& ~PCI_COMMAND_MASTER
;
2741 if (cmd
!= old_cmd
) {
2742 dev_dbg(&dev
->dev
, "%s bus mastering\n",
2743 enable
? "enabling" : "disabling");
2744 pci_write_config_word(dev
, PCI_COMMAND
, cmd
);
2746 dev
->is_busmaster
= enable
;
2750 * pcibios_setup - process "pci=" kernel boot arguments
2751 * @str: string used to pass in "pci=" kernel boot arguments
2753 * Process kernel boot arguments. This is the default implementation.
2754 * Architecture specific implementations can override this as necessary.
2756 char * __weak __init
pcibios_setup(char *str
)
2762 * pcibios_set_master - enable PCI bus-mastering for device dev
2763 * @dev: the PCI device to enable
2765 * Enables PCI bus-mastering for the device. This is the default
2766 * implementation. Architecture specific implementations can override
2767 * this if necessary.
2769 void __weak
pcibios_set_master(struct pci_dev
*dev
)
2773 /* The latency timer doesn't apply to PCIe (either Type 0 or Type 1) */
2774 if (pci_is_pcie(dev
))
2777 pci_read_config_byte(dev
, PCI_LATENCY_TIMER
, &lat
);
2779 lat
= (64 <= pcibios_max_latency
) ? 64 : pcibios_max_latency
;
2780 else if (lat
> pcibios_max_latency
)
2781 lat
= pcibios_max_latency
;
2784 dev_printk(KERN_DEBUG
, &dev
->dev
, "setting latency timer to %d\n", lat
);
2785 pci_write_config_byte(dev
, PCI_LATENCY_TIMER
, lat
);
2789 * pci_set_master - enables bus-mastering for device dev
2790 * @dev: the PCI device to enable
2792 * Enables bus-mastering on the device and calls pcibios_set_master()
2793 * to do the needed arch specific settings.
2795 void pci_set_master(struct pci_dev
*dev
)
2797 __pci_set_master(dev
, true);
2798 pcibios_set_master(dev
);
2802 * pci_clear_master - disables bus-mastering for device dev
2803 * @dev: the PCI device to disable
2805 void pci_clear_master(struct pci_dev
*dev
)
2807 __pci_set_master(dev
, false);
2811 * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed
2812 * @dev: the PCI device for which MWI is to be enabled
2814 * Helper function for pci_set_mwi.
2815 * Originally copied from drivers/net/acenic.c.
2816 * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>.
2818 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2820 int pci_set_cacheline_size(struct pci_dev
*dev
)
2824 if (!pci_cache_line_size
)
2827 /* Validate current setting: the PCI_CACHE_LINE_SIZE must be
2828 equal to or multiple of the right value. */
2829 pci_read_config_byte(dev
, PCI_CACHE_LINE_SIZE
, &cacheline_size
);
2830 if (cacheline_size
>= pci_cache_line_size
&&
2831 (cacheline_size
% pci_cache_line_size
) == 0)
2834 /* Write the correct value. */
2835 pci_write_config_byte(dev
, PCI_CACHE_LINE_SIZE
, pci_cache_line_size
);
2837 pci_read_config_byte(dev
, PCI_CACHE_LINE_SIZE
, &cacheline_size
);
2838 if (cacheline_size
== pci_cache_line_size
)
2841 dev_printk(KERN_DEBUG
, &dev
->dev
, "cache line size of %d is not "
2842 "supported\n", pci_cache_line_size
<< 2);
2846 EXPORT_SYMBOL_GPL(pci_set_cacheline_size
);
2848 #ifdef PCI_DISABLE_MWI
2849 int pci_set_mwi(struct pci_dev
*dev
)
2854 int pci_try_set_mwi(struct pci_dev
*dev
)
2859 void pci_clear_mwi(struct pci_dev
*dev
)
2866 * pci_set_mwi - enables memory-write-invalidate PCI transaction
2867 * @dev: the PCI device for which MWI is enabled
2869 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
2871 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2874 pci_set_mwi(struct pci_dev
*dev
)
2879 rc
= pci_set_cacheline_size(dev
);
2883 pci_read_config_word(dev
, PCI_COMMAND
, &cmd
);
2884 if (! (cmd
& PCI_COMMAND_INVALIDATE
)) {
2885 dev_dbg(&dev
->dev
, "enabling Mem-Wr-Inval\n");
2886 cmd
|= PCI_COMMAND_INVALIDATE
;
2887 pci_write_config_word(dev
, PCI_COMMAND
, cmd
);
2894 * pci_try_set_mwi - enables memory-write-invalidate PCI transaction
2895 * @dev: the PCI device for which MWI is enabled
2897 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
2898 * Callers are not required to check the return value.
2900 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2902 int pci_try_set_mwi(struct pci_dev
*dev
)
2904 int rc
= pci_set_mwi(dev
);
2909 * pci_clear_mwi - disables Memory-Write-Invalidate for device dev
2910 * @dev: the PCI device to disable
2912 * Disables PCI Memory-Write-Invalidate transaction on the device
2915 pci_clear_mwi(struct pci_dev
*dev
)
2919 pci_read_config_word(dev
, PCI_COMMAND
, &cmd
);
2920 if (cmd
& PCI_COMMAND_INVALIDATE
) {
2921 cmd
&= ~PCI_COMMAND_INVALIDATE
;
2922 pci_write_config_word(dev
, PCI_COMMAND
, cmd
);
2925 #endif /* ! PCI_DISABLE_MWI */
2928 * pci_intx - enables/disables PCI INTx for device dev
2929 * @pdev: the PCI device to operate on
2930 * @enable: boolean: whether to enable or disable PCI INTx
2932 * Enables/disables PCI INTx for device dev
2935 pci_intx(struct pci_dev
*pdev
, int enable
)
2937 u16 pci_command
, new;
2939 pci_read_config_word(pdev
, PCI_COMMAND
, &pci_command
);
2942 new = pci_command
& ~PCI_COMMAND_INTX_DISABLE
;
2944 new = pci_command
| PCI_COMMAND_INTX_DISABLE
;
2947 if (new != pci_command
) {
2948 struct pci_devres
*dr
;
2950 pci_write_config_word(pdev
, PCI_COMMAND
, new);
2952 dr
= find_pci_dr(pdev
);
2953 if (dr
&& !dr
->restore_intx
) {
2954 dr
->restore_intx
= 1;
2955 dr
->orig_intx
= !enable
;
2961 * pci_intx_mask_supported - probe for INTx masking support
2962 * @dev: the PCI device to operate on
2964 * Check if the device dev support INTx masking via the config space
2967 bool pci_intx_mask_supported(struct pci_dev
*dev
)
2969 bool mask_supported
= false;
2972 if (dev
->broken_intx_masking
)
2975 pci_cfg_access_lock(dev
);
2977 pci_read_config_word(dev
, PCI_COMMAND
, &orig
);
2978 pci_write_config_word(dev
, PCI_COMMAND
,
2979 orig
^ PCI_COMMAND_INTX_DISABLE
);
2980 pci_read_config_word(dev
, PCI_COMMAND
, &new);
2983 * There's no way to protect against hardware bugs or detect them
2984 * reliably, but as long as we know what the value should be, let's
2985 * go ahead and check it.
2987 if ((new ^ orig
) & ~PCI_COMMAND_INTX_DISABLE
) {
2988 dev_err(&dev
->dev
, "Command register changed from "
2989 "0x%x to 0x%x: driver or hardware bug?\n", orig
, new);
2990 } else if ((new ^ orig
) & PCI_COMMAND_INTX_DISABLE
) {
2991 mask_supported
= true;
2992 pci_write_config_word(dev
, PCI_COMMAND
, orig
);
2995 pci_cfg_access_unlock(dev
);
2996 return mask_supported
;
2998 EXPORT_SYMBOL_GPL(pci_intx_mask_supported
);
3000 static bool pci_check_and_set_intx_mask(struct pci_dev
*dev
, bool mask
)
3002 struct pci_bus
*bus
= dev
->bus
;
3003 bool mask_updated
= true;
3004 u32 cmd_status_dword
;
3005 u16 origcmd
, newcmd
;
3006 unsigned long flags
;
3010 * We do a single dword read to retrieve both command and status.
3011 * Document assumptions that make this possible.
3013 BUILD_BUG_ON(PCI_COMMAND
% 4);
3014 BUILD_BUG_ON(PCI_COMMAND
+ 2 != PCI_STATUS
);
3016 raw_spin_lock_irqsave(&pci_lock
, flags
);
3018 bus
->ops
->read(bus
, dev
->devfn
, PCI_COMMAND
, 4, &cmd_status_dword
);
3020 irq_pending
= (cmd_status_dword
>> 16) & PCI_STATUS_INTERRUPT
;
3023 * Check interrupt status register to see whether our device
3024 * triggered the interrupt (when masking) or the next IRQ is
3025 * already pending (when unmasking).
3027 if (mask
!= irq_pending
) {
3028 mask_updated
= false;
3032 origcmd
= cmd_status_dword
;
3033 newcmd
= origcmd
& ~PCI_COMMAND_INTX_DISABLE
;
3035 newcmd
|= PCI_COMMAND_INTX_DISABLE
;
3036 if (newcmd
!= origcmd
)
3037 bus
->ops
->write(bus
, dev
->devfn
, PCI_COMMAND
, 2, newcmd
);
3040 raw_spin_unlock_irqrestore(&pci_lock
, flags
);
3042 return mask_updated
;
3046 * pci_check_and_mask_intx - mask INTx on pending interrupt
3047 * @dev: the PCI device to operate on
3049 * Check if the device dev has its INTx line asserted, mask it and
3050 * return true in that case. False is returned if not interrupt was
3053 bool pci_check_and_mask_intx(struct pci_dev
*dev
)
3055 return pci_check_and_set_intx_mask(dev
, true);
3057 EXPORT_SYMBOL_GPL(pci_check_and_mask_intx
);
3060 * pci_check_and_mask_intx - unmask INTx of no interrupt is pending
3061 * @dev: the PCI device to operate on
3063 * Check if the device dev has its INTx line asserted, unmask it if not
3064 * and return true. False is returned and the mask remains active if
3065 * there was still an interrupt pending.
3067 bool pci_check_and_unmask_intx(struct pci_dev
*dev
)
3069 return pci_check_and_set_intx_mask(dev
, false);
3071 EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx
);
3074 * pci_msi_off - disables any msi or msix capabilities
3075 * @dev: the PCI device to operate on
3077 * If you want to use msi see pci_enable_msi and friends.
3078 * This is a lower level primitive that allows us to disable
3079 * msi operation at the device level.
3081 void pci_msi_off(struct pci_dev
*dev
)
3086 pos
= pci_find_capability(dev
, PCI_CAP_ID_MSI
);
3088 pci_read_config_word(dev
, pos
+ PCI_MSI_FLAGS
, &control
);
3089 control
&= ~PCI_MSI_FLAGS_ENABLE
;
3090 pci_write_config_word(dev
, pos
+ PCI_MSI_FLAGS
, control
);
3092 pos
= pci_find_capability(dev
, PCI_CAP_ID_MSIX
);
3094 pci_read_config_word(dev
, pos
+ PCI_MSIX_FLAGS
, &control
);
3095 control
&= ~PCI_MSIX_FLAGS_ENABLE
;
3096 pci_write_config_word(dev
, pos
+ PCI_MSIX_FLAGS
, control
);
3099 EXPORT_SYMBOL_GPL(pci_msi_off
);
3101 int pci_set_dma_max_seg_size(struct pci_dev
*dev
, unsigned int size
)
3103 return dma_set_max_seg_size(&dev
->dev
, size
);
3105 EXPORT_SYMBOL(pci_set_dma_max_seg_size
);
3107 int pci_set_dma_seg_boundary(struct pci_dev
*dev
, unsigned long mask
)
3109 return dma_set_seg_boundary(&dev
->dev
, mask
);
3111 EXPORT_SYMBOL(pci_set_dma_seg_boundary
);
3113 static int pcie_flr(struct pci_dev
*dev
, int probe
)
3119 pcie_capability_read_dword(dev
, PCI_EXP_DEVCAP
, &cap
);
3120 if (!(cap
& PCI_EXP_DEVCAP_FLR
))
3126 /* Wait for Transaction Pending bit clean */
3127 for (i
= 0; i
< 4; i
++) {
3129 msleep((1 << (i
- 1)) * 100);
3131 pcie_capability_read_word(dev
, PCI_EXP_DEVSTA
, &status
);
3132 if (!(status
& PCI_EXP_DEVSTA_TRPND
))
3136 dev_err(&dev
->dev
, "transaction is not cleared; "
3137 "proceeding with reset anyway\n");
3140 pcie_capability_set_word(dev
, PCI_EXP_DEVCTL
, PCI_EXP_DEVCTL_BCR_FLR
);
3147 static int pci_af_flr(struct pci_dev
*dev
, int probe
)
3154 pos
= pci_find_capability(dev
, PCI_CAP_ID_AF
);
3158 pci_read_config_byte(dev
, pos
+ PCI_AF_CAP
, &cap
);
3159 if (!(cap
& PCI_AF_CAP_TP
) || !(cap
& PCI_AF_CAP_FLR
))
3165 /* Wait for Transaction Pending bit clean */
3166 for (i
= 0; i
< 4; i
++) {
3168 msleep((1 << (i
- 1)) * 100);
3170 pci_read_config_byte(dev
, pos
+ PCI_AF_STATUS
, &status
);
3171 if (!(status
& PCI_AF_STATUS_TP
))
3175 dev_err(&dev
->dev
, "transaction is not cleared; "
3176 "proceeding with reset anyway\n");
3179 pci_write_config_byte(dev
, pos
+ PCI_AF_CTRL
, PCI_AF_CTRL_FLR
);
3186 * pci_pm_reset - Put device into PCI_D3 and back into PCI_D0.
3187 * @dev: Device to reset.
3188 * @probe: If set, only check if the device can be reset this way.
3190 * If @dev supports native PCI PM and its PCI_PM_CTRL_NO_SOFT_RESET flag is
3191 * unset, it will be reinitialized internally when going from PCI_D3hot to
3192 * PCI_D0. If that's the case and the device is not in a low-power state
3193 * already, force it into PCI_D3hot and back to PCI_D0, causing it to be reset.
3195 * NOTE: This causes the caller to sleep for twice the device power transition
3196 * cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms
3197 * by devault (i.e. unless the @dev's d3_delay field has a different value).
3198 * Moreover, only devices in D0 can be reset by this function.
3200 static int pci_pm_reset(struct pci_dev
*dev
, int probe
)
3207 pci_read_config_word(dev
, dev
->pm_cap
+ PCI_PM_CTRL
, &csr
);
3208 if (csr
& PCI_PM_CTRL_NO_SOFT_RESET
)
3214 if (dev
->current_state
!= PCI_D0
)
3217 csr
&= ~PCI_PM_CTRL_STATE_MASK
;
3219 pci_write_config_word(dev
, dev
->pm_cap
+ PCI_PM_CTRL
, csr
);
3220 pci_dev_d3_sleep(dev
);
3222 csr
&= ~PCI_PM_CTRL_STATE_MASK
;
3224 pci_write_config_word(dev
, dev
->pm_cap
+ PCI_PM_CTRL
, csr
);
3225 pci_dev_d3_sleep(dev
);
3230 static int pci_parent_bus_reset(struct pci_dev
*dev
, int probe
)
3233 struct pci_dev
*pdev
;
3235 if (pci_is_root_bus(dev
->bus
) || dev
->subordinate
|| !dev
->bus
->self
)
3238 list_for_each_entry(pdev
, &dev
->bus
->devices
, bus_list
)
3245 pci_read_config_word(dev
->bus
->self
, PCI_BRIDGE_CONTROL
, &ctrl
);
3246 ctrl
|= PCI_BRIDGE_CTL_BUS_RESET
;
3247 pci_write_config_word(dev
->bus
->self
, PCI_BRIDGE_CONTROL
, ctrl
);
3250 ctrl
&= ~PCI_BRIDGE_CTL_BUS_RESET
;
3251 pci_write_config_word(dev
->bus
->self
, PCI_BRIDGE_CONTROL
, ctrl
);
3257 static int __pci_dev_reset(struct pci_dev
*dev
, int probe
)
3263 rc
= pci_dev_specific_reset(dev
, probe
);
3267 rc
= pcie_flr(dev
, probe
);
3271 rc
= pci_af_flr(dev
, probe
);
3275 rc
= pci_pm_reset(dev
, probe
);
3279 rc
= pci_parent_bus_reset(dev
, probe
);
3284 static int pci_dev_reset(struct pci_dev
*dev
, int probe
)
3289 pci_cfg_access_lock(dev
);
3290 /* block PM suspend, driver probe, etc. */
3291 device_lock(&dev
->dev
);
3294 rc
= __pci_dev_reset(dev
, probe
);
3297 device_unlock(&dev
->dev
);
3298 pci_cfg_access_unlock(dev
);
3303 * __pci_reset_function - reset a PCI device function
3304 * @dev: PCI device to reset
3306 * Some devices allow an individual function to be reset without affecting
3307 * other functions in the same device. The PCI device must be responsive
3308 * to PCI config space in order to use this function.
3310 * The device function is presumed to be unused when this function is called.
3311 * Resetting the device will make the contents of PCI configuration space
3312 * random, so any caller of this must be prepared to reinitialise the
3313 * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
3316 * Returns 0 if the device function was successfully reset or negative if the
3317 * device doesn't support resetting a single function.
3319 int __pci_reset_function(struct pci_dev
*dev
)
3321 return pci_dev_reset(dev
, 0);
3323 EXPORT_SYMBOL_GPL(__pci_reset_function
);
3326 * __pci_reset_function_locked - reset a PCI device function while holding
3327 * the @dev mutex lock.
3328 * @dev: PCI device to reset
3330 * Some devices allow an individual function to be reset without affecting
3331 * other functions in the same device. The PCI device must be responsive
3332 * to PCI config space in order to use this function.
3334 * The device function is presumed to be unused and the caller is holding
3335 * the device mutex lock when this function is called.
3336 * Resetting the device will make the contents of PCI configuration space
3337 * random, so any caller of this must be prepared to reinitialise the
3338 * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
3341 * Returns 0 if the device function was successfully reset or negative if the
3342 * device doesn't support resetting a single function.
3344 int __pci_reset_function_locked(struct pci_dev
*dev
)
3346 return __pci_dev_reset(dev
, 0);
3348 EXPORT_SYMBOL_GPL(__pci_reset_function_locked
);
3351 * pci_probe_reset_function - check whether the device can be safely reset
3352 * @dev: PCI device to reset
3354 * Some devices allow an individual function to be reset without affecting
3355 * other functions in the same device. The PCI device must be responsive
3356 * to PCI config space in order to use this function.
3358 * Returns 0 if the device function can be reset or negative if the
3359 * device doesn't support resetting a single function.
3361 int pci_probe_reset_function(struct pci_dev
*dev
)
3363 return pci_dev_reset(dev
, 1);
3367 * pci_reset_function - quiesce and reset a PCI device function
3368 * @dev: PCI device to reset
3370 * Some devices allow an individual function to be reset without affecting
3371 * other functions in the same device. The PCI device must be responsive
3372 * to PCI config space in order to use this function.
3374 * This function does not just reset the PCI portion of a device, but
3375 * clears all the state associated with the device. This function differs
3376 * from __pci_reset_function in that it saves and restores device state
3379 * Returns 0 if the device function was successfully reset or negative if the
3380 * device doesn't support resetting a single function.
3382 int pci_reset_function(struct pci_dev
*dev
)
3386 rc
= pci_dev_reset(dev
, 1);
3390 pci_save_state(dev
);
3393 * both INTx and MSI are disabled after the Interrupt Disable bit
3394 * is set and the Bus Master bit is cleared.
3396 pci_write_config_word(dev
, PCI_COMMAND
, PCI_COMMAND_INTX_DISABLE
);
3398 rc
= pci_dev_reset(dev
, 0);
3400 pci_restore_state(dev
);
3404 EXPORT_SYMBOL_GPL(pci_reset_function
);
3407 * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
3408 * @dev: PCI device to query
3410 * Returns mmrbc: maximum designed memory read count in bytes
3411 * or appropriate error value.
3413 int pcix_get_max_mmrbc(struct pci_dev
*dev
)
3418 cap
= pci_find_capability(dev
, PCI_CAP_ID_PCIX
);
3422 if (pci_read_config_dword(dev
, cap
+ PCI_X_STATUS
, &stat
))
3425 return 512 << ((stat
& PCI_X_STATUS_MAX_READ
) >> 21);
3427 EXPORT_SYMBOL(pcix_get_max_mmrbc
);
3430 * pcix_get_mmrbc - get PCI-X maximum memory read byte count
3431 * @dev: PCI device to query
3433 * Returns mmrbc: maximum memory read count in bytes
3434 * or appropriate error value.
3436 int pcix_get_mmrbc(struct pci_dev
*dev
)
3441 cap
= pci_find_capability(dev
, PCI_CAP_ID_PCIX
);
3445 if (pci_read_config_word(dev
, cap
+ PCI_X_CMD
, &cmd
))
3448 return 512 << ((cmd
& PCI_X_CMD_MAX_READ
) >> 2);
3450 EXPORT_SYMBOL(pcix_get_mmrbc
);
3453 * pcix_set_mmrbc - set PCI-X maximum memory read byte count
3454 * @dev: PCI device to query
3455 * @mmrbc: maximum memory read count in bytes
3456 * valid values are 512, 1024, 2048, 4096
3458 * If possible sets maximum memory read byte count, some bridges have erratas
3459 * that prevent this.
3461 int pcix_set_mmrbc(struct pci_dev
*dev
, int mmrbc
)
3467 if (mmrbc
< 512 || mmrbc
> 4096 || !is_power_of_2(mmrbc
))
3470 v
= ffs(mmrbc
) - 10;
3472 cap
= pci_find_capability(dev
, PCI_CAP_ID_PCIX
);
3476 if (pci_read_config_dword(dev
, cap
+ PCI_X_STATUS
, &stat
))
3479 if (v
> (stat
& PCI_X_STATUS_MAX_READ
) >> 21)
3482 if (pci_read_config_word(dev
, cap
+ PCI_X_CMD
, &cmd
))
3485 o
= (cmd
& PCI_X_CMD_MAX_READ
) >> 2;
3487 if (v
> o
&& (dev
->bus
->bus_flags
& PCI_BUS_FLAGS_NO_MMRBC
))
3490 cmd
&= ~PCI_X_CMD_MAX_READ
;
3492 if (pci_write_config_word(dev
, cap
+ PCI_X_CMD
, cmd
))
3497 EXPORT_SYMBOL(pcix_set_mmrbc
);
3500 * pcie_get_readrq - get PCI Express read request size
3501 * @dev: PCI device to query
3503 * Returns maximum memory read request in bytes
3504 * or appropriate error value.
3506 int pcie_get_readrq(struct pci_dev
*dev
)
3510 pcie_capability_read_word(dev
, PCI_EXP_DEVCTL
, &ctl
);
3512 return 128 << ((ctl
& PCI_EXP_DEVCTL_READRQ
) >> 12);
3514 EXPORT_SYMBOL(pcie_get_readrq
);
3517 * pcie_set_readrq - set PCI Express maximum memory read request
3518 * @dev: PCI device to query
3519 * @rq: maximum memory read count in bytes
3520 * valid values are 128, 256, 512, 1024, 2048, 4096
3522 * If possible sets maximum memory read request in bytes
3524 int pcie_set_readrq(struct pci_dev
*dev
, int rq
)
3528 if (rq
< 128 || rq
> 4096 || !is_power_of_2(rq
))
3532 * If using the "performance" PCIe config, we clamp the
3533 * read rq size to the max packet size to prevent the
3534 * host bridge generating requests larger than we can
3537 if (pcie_bus_config
== PCIE_BUS_PERFORMANCE
) {
3538 int mps
= pcie_get_mps(dev
);
3546 v
= (ffs(rq
) - 8) << 12;
3548 return pcie_capability_clear_and_set_word(dev
, PCI_EXP_DEVCTL
,
3549 PCI_EXP_DEVCTL_READRQ
, v
);
3551 EXPORT_SYMBOL(pcie_set_readrq
);
3554 * pcie_get_mps - get PCI Express maximum payload size
3555 * @dev: PCI device to query
3557 * Returns maximum payload size in bytes
3558 * or appropriate error value.
3560 int pcie_get_mps(struct pci_dev
*dev
)
3564 pcie_capability_read_word(dev
, PCI_EXP_DEVCTL
, &ctl
);
3566 return 128 << ((ctl
& PCI_EXP_DEVCTL_PAYLOAD
) >> 5);
3570 * pcie_set_mps - set PCI Express maximum payload size
3571 * @dev: PCI device to query
3572 * @mps: maximum payload size in bytes
3573 * valid values are 128, 256, 512, 1024, 2048, 4096
3575 * If possible sets maximum payload size
3577 int pcie_set_mps(struct pci_dev
*dev
, int mps
)
3581 if (mps
< 128 || mps
> 4096 || !is_power_of_2(mps
))
3585 if (v
> dev
->pcie_mpss
)
3589 return pcie_capability_clear_and_set_word(dev
, PCI_EXP_DEVCTL
,
3590 PCI_EXP_DEVCTL_PAYLOAD
, v
);
3594 * pci_select_bars - Make BAR mask from the type of resource
3595 * @dev: the PCI device for which BAR mask is made
3596 * @flags: resource type mask to be selected
3598 * This helper routine makes bar mask from the type of resource.
3600 int pci_select_bars(struct pci_dev
*dev
, unsigned long flags
)
3603 for (i
= 0; i
< PCI_NUM_RESOURCES
; i
++)
3604 if (pci_resource_flags(dev
, i
) & flags
)
3610 * pci_resource_bar - get position of the BAR associated with a resource
3611 * @dev: the PCI device
3612 * @resno: the resource number
3613 * @type: the BAR type to be filled in
3615 * Returns BAR position in config space, or 0 if the BAR is invalid.
3617 int pci_resource_bar(struct pci_dev
*dev
, int resno
, enum pci_bar_type
*type
)
3621 if (resno
< PCI_ROM_RESOURCE
) {
3622 *type
= pci_bar_unknown
;
3623 return PCI_BASE_ADDRESS_0
+ 4 * resno
;
3624 } else if (resno
== PCI_ROM_RESOURCE
) {
3625 *type
= pci_bar_mem32
;
3626 return dev
->rom_base_reg
;
3627 } else if (resno
< PCI_BRIDGE_RESOURCES
) {
3628 /* device specific resource */
3629 reg
= pci_iov_resource_bar(dev
, resno
, type
);
3634 dev_err(&dev
->dev
, "BAR %d: invalid resource\n", resno
);
3638 /* Some architectures require additional programming to enable VGA */
3639 static arch_set_vga_state_t arch_set_vga_state
;
3641 void __init
pci_register_set_vga_state(arch_set_vga_state_t func
)
3643 arch_set_vga_state
= func
; /* NULL disables */
3646 static int pci_set_vga_state_arch(struct pci_dev
*dev
, bool decode
,
3647 unsigned int command_bits
, u32 flags
)
3649 if (arch_set_vga_state
)
3650 return arch_set_vga_state(dev
, decode
, command_bits
,
3656 * pci_set_vga_state - set VGA decode state on device and parents if requested
3657 * @dev: the PCI device
3658 * @decode: true = enable decoding, false = disable decoding
3659 * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
3660 * @flags: traverse ancestors and change bridges
3661 * CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE
3663 int pci_set_vga_state(struct pci_dev
*dev
, bool decode
,
3664 unsigned int command_bits
, u32 flags
)
3666 struct pci_bus
*bus
;
3667 struct pci_dev
*bridge
;
3671 WARN_ON((flags
& PCI_VGA_STATE_CHANGE_DECODES
) & (command_bits
& ~(PCI_COMMAND_IO
|PCI_COMMAND_MEMORY
)));
3673 /* ARCH specific VGA enables */
3674 rc
= pci_set_vga_state_arch(dev
, decode
, command_bits
, flags
);
3678 if (flags
& PCI_VGA_STATE_CHANGE_DECODES
) {
3679 pci_read_config_word(dev
, PCI_COMMAND
, &cmd
);
3681 cmd
|= command_bits
;
3683 cmd
&= ~command_bits
;
3684 pci_write_config_word(dev
, PCI_COMMAND
, cmd
);
3687 if (!(flags
& PCI_VGA_STATE_CHANGE_BRIDGE
))
3694 pci_read_config_word(bridge
, PCI_BRIDGE_CONTROL
,
3697 cmd
|= PCI_BRIDGE_CTL_VGA
;
3699 cmd
&= ~PCI_BRIDGE_CTL_VGA
;
3700 pci_write_config_word(bridge
, PCI_BRIDGE_CONTROL
,
3708 #define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE
3709 static char resource_alignment_param
[RESOURCE_ALIGNMENT_PARAM_SIZE
] = {0};
3710 static DEFINE_SPINLOCK(resource_alignment_lock
);
3713 * pci_specified_resource_alignment - get resource alignment specified by user.
3714 * @dev: the PCI device to get
3716 * RETURNS: Resource alignment if it is specified.
3717 * Zero if it is not specified.
3719 resource_size_t
pci_specified_resource_alignment(struct pci_dev
*dev
)
3721 int seg
, bus
, slot
, func
, align_order
, count
;
3722 resource_size_t align
= 0;
3725 spin_lock(&resource_alignment_lock
);
3726 p
= resource_alignment_param
;
3729 if (sscanf(p
, "%d%n", &align_order
, &count
) == 1 &&
3735 if (sscanf(p
, "%x:%x:%x.%x%n",
3736 &seg
, &bus
, &slot
, &func
, &count
) != 4) {
3738 if (sscanf(p
, "%x:%x.%x%n",
3739 &bus
, &slot
, &func
, &count
) != 3) {
3740 /* Invalid format */
3741 printk(KERN_ERR
"PCI: Can't parse resource_alignment parameter: %s\n",
3747 if (seg
== pci_domain_nr(dev
->bus
) &&
3748 bus
== dev
->bus
->number
&&
3749 slot
== PCI_SLOT(dev
->devfn
) &&
3750 func
== PCI_FUNC(dev
->devfn
)) {
3751 if (align_order
== -1) {
3754 align
= 1 << align_order
;
3759 if (*p
!= ';' && *p
!= ',') {
3760 /* End of param or invalid format */
3765 spin_unlock(&resource_alignment_lock
);
3770 * pci_is_reassigndev - check if specified PCI is target device to reassign
3771 * @dev: the PCI device to check
3773 * RETURNS: non-zero for PCI device is a target device to reassign,
3776 int pci_is_reassigndev(struct pci_dev
*dev
)
3778 return (pci_specified_resource_alignment(dev
) != 0);
3782 * This function disables memory decoding and releases memory resources
3783 * of the device specified by kernel's boot parameter 'pci=resource_alignment='.
3784 * It also rounds up size to specified alignment.
3785 * Later on, the kernel will assign page-aligned memory resource back
3788 void pci_reassigndev_resource_alignment(struct pci_dev
*dev
)
3792 resource_size_t align
, size
;
3795 if (!pci_is_reassigndev(dev
))
3798 if (dev
->hdr_type
== PCI_HEADER_TYPE_NORMAL
&&
3799 (dev
->class >> 8) == PCI_CLASS_BRIDGE_HOST
) {
3801 "Can't reassign resources to host bridge.\n");
3806 "Disabling memory decoding and releasing memory resources.\n");
3807 pci_read_config_word(dev
, PCI_COMMAND
, &command
);
3808 command
&= ~PCI_COMMAND_MEMORY
;
3809 pci_write_config_word(dev
, PCI_COMMAND
, command
);
3811 align
= pci_specified_resource_alignment(dev
);
3812 for (i
= 0; i
< PCI_BRIDGE_RESOURCES
; i
++) {
3813 r
= &dev
->resource
[i
];
3814 if (!(r
->flags
& IORESOURCE_MEM
))
3816 size
= resource_size(r
);
3820 "Rounding up size of resource #%d to %#llx.\n",
3821 i
, (unsigned long long)size
);
3826 /* Need to disable bridge's resource window,
3827 * to enable the kernel to reassign new resource
3830 if (dev
->hdr_type
== PCI_HEADER_TYPE_BRIDGE
&&
3831 (dev
->class >> 8) == PCI_CLASS_BRIDGE_PCI
) {
3832 for (i
= PCI_BRIDGE_RESOURCES
; i
< PCI_NUM_RESOURCES
; i
++) {
3833 r
= &dev
->resource
[i
];
3834 if (!(r
->flags
& IORESOURCE_MEM
))
3836 r
->end
= resource_size(r
) - 1;
3839 pci_disable_bridge_window(dev
);
3843 ssize_t
pci_set_resource_alignment_param(const char *buf
, size_t count
)
3845 if (count
> RESOURCE_ALIGNMENT_PARAM_SIZE
- 1)
3846 count
= RESOURCE_ALIGNMENT_PARAM_SIZE
- 1;
3847 spin_lock(&resource_alignment_lock
);
3848 strncpy(resource_alignment_param
, buf
, count
);
3849 resource_alignment_param
[count
] = '\0';
3850 spin_unlock(&resource_alignment_lock
);
3854 ssize_t
pci_get_resource_alignment_param(char *buf
, size_t size
)
3857 spin_lock(&resource_alignment_lock
);
3858 count
= snprintf(buf
, size
, "%s", resource_alignment_param
);
3859 spin_unlock(&resource_alignment_lock
);
3863 static ssize_t
pci_resource_alignment_show(struct bus_type
*bus
, char *buf
)
3865 return pci_get_resource_alignment_param(buf
, PAGE_SIZE
);
3868 static ssize_t
pci_resource_alignment_store(struct bus_type
*bus
,
3869 const char *buf
, size_t count
)
3871 return pci_set_resource_alignment_param(buf
, count
);
3874 BUS_ATTR(resource_alignment
, 0644, pci_resource_alignment_show
,
3875 pci_resource_alignment_store
);
3877 static int __init
pci_resource_alignment_sysfs_init(void)
3879 return bus_create_file(&pci_bus_type
,
3880 &bus_attr_resource_alignment
);
3883 late_initcall(pci_resource_alignment_sysfs_init
);
3885 static void pci_no_domains(void)
3887 #ifdef CONFIG_PCI_DOMAINS
3888 pci_domains_supported
= 0;
3893 * pci_ext_cfg_avail - can we access extended PCI config space?
3895 * Returns 1 if we can access PCI extended config space (offsets
3896 * greater than 0xff). This is the default implementation. Architecture
3897 * implementations can override this.
3899 int __weak
pci_ext_cfg_avail(void)
3904 void __weak
pci_fixup_cardbus(struct pci_bus
*bus
)
3907 EXPORT_SYMBOL(pci_fixup_cardbus
);
3909 static int __init
pci_setup(char *str
)
3912 char *k
= strchr(str
, ',');
3915 if (*str
&& (str
= pcibios_setup(str
)) && *str
) {
3916 if (!strcmp(str
, "nomsi")) {
3918 } else if (!strcmp(str
, "noaer")) {
3920 } else if (!strncmp(str
, "realloc=", 8)) {
3921 pci_realloc_get_opt(str
+ 8);
3922 } else if (!strncmp(str
, "realloc", 7)) {
3923 pci_realloc_get_opt("on");
3924 } else if (!strcmp(str
, "nodomains")) {
3926 } else if (!strncmp(str
, "noari", 5)) {
3927 pcie_ari_disabled
= true;
3928 } else if (!strncmp(str
, "cbiosize=", 9)) {
3929 pci_cardbus_io_size
= memparse(str
+ 9, &str
);
3930 } else if (!strncmp(str
, "cbmemsize=", 10)) {
3931 pci_cardbus_mem_size
= memparse(str
+ 10, &str
);
3932 } else if (!strncmp(str
, "resource_alignment=", 19)) {
3933 pci_set_resource_alignment_param(str
+ 19,
3935 } else if (!strncmp(str
, "ecrc=", 5)) {
3936 pcie_ecrc_get_policy(str
+ 5);
3937 } else if (!strncmp(str
, "hpiosize=", 9)) {
3938 pci_hotplug_io_size
= memparse(str
+ 9, &str
);
3939 } else if (!strncmp(str
, "hpmemsize=", 10)) {
3940 pci_hotplug_mem_size
= memparse(str
+ 10, &str
);
3941 } else if (!strncmp(str
, "pcie_bus_tune_off", 17)) {
3942 pcie_bus_config
= PCIE_BUS_TUNE_OFF
;
3943 } else if (!strncmp(str
, "pcie_bus_safe", 13)) {
3944 pcie_bus_config
= PCIE_BUS_SAFE
;
3945 } else if (!strncmp(str
, "pcie_bus_perf", 13)) {
3946 pcie_bus_config
= PCIE_BUS_PERFORMANCE
;
3947 } else if (!strncmp(str
, "pcie_bus_peer2peer", 18)) {
3948 pcie_bus_config
= PCIE_BUS_PEER2PEER
;
3949 } else if (!strncmp(str
, "pcie_scan_all", 13)) {
3950 pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS
);
3952 printk(KERN_ERR
"PCI: Unknown option `%s'\n",
3960 early_param("pci", pci_setup
);
3962 EXPORT_SYMBOL(pci_reenable_device
);
3963 EXPORT_SYMBOL(pci_enable_device_io
);
3964 EXPORT_SYMBOL(pci_enable_device_mem
);
3965 EXPORT_SYMBOL(pci_enable_device
);
3966 EXPORT_SYMBOL(pcim_enable_device
);
3967 EXPORT_SYMBOL(pcim_pin_device
);
3968 EXPORT_SYMBOL(pci_disable_device
);
3969 EXPORT_SYMBOL(pci_find_capability
);
3970 EXPORT_SYMBOL(pci_bus_find_capability
);
3971 EXPORT_SYMBOL(pci_release_regions
);
3972 EXPORT_SYMBOL(pci_request_regions
);
3973 EXPORT_SYMBOL(pci_request_regions_exclusive
);
3974 EXPORT_SYMBOL(pci_release_region
);
3975 EXPORT_SYMBOL(pci_request_region
);
3976 EXPORT_SYMBOL(pci_request_region_exclusive
);
3977 EXPORT_SYMBOL(pci_release_selected_regions
);
3978 EXPORT_SYMBOL(pci_request_selected_regions
);
3979 EXPORT_SYMBOL(pci_request_selected_regions_exclusive
);
3980 EXPORT_SYMBOL(pci_set_master
);
3981 EXPORT_SYMBOL(pci_clear_master
);
3982 EXPORT_SYMBOL(pci_set_mwi
);
3983 EXPORT_SYMBOL(pci_try_set_mwi
);
3984 EXPORT_SYMBOL(pci_clear_mwi
);
3985 EXPORT_SYMBOL_GPL(pci_intx
);
3986 EXPORT_SYMBOL(pci_assign_resource
);
3987 EXPORT_SYMBOL(pci_find_parent_resource
);
3988 EXPORT_SYMBOL(pci_select_bars
);
3990 EXPORT_SYMBOL(pci_set_power_state
);
3991 EXPORT_SYMBOL(pci_save_state
);
3992 EXPORT_SYMBOL(pci_restore_state
);
3993 EXPORT_SYMBOL(pci_pme_capable
);
3994 EXPORT_SYMBOL(pci_pme_active
);
3995 EXPORT_SYMBOL(pci_wake_from_d3
);
3996 EXPORT_SYMBOL(pci_target_state
);
3997 EXPORT_SYMBOL(pci_prepare_to_sleep
);
3998 EXPORT_SYMBOL(pci_back_from_sleep
);
3999 EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state
);