1 /* SPDX-License-Identifier: GPL-2.0-only */
3 #include <console/console.h>
4 #include <commonlib/helpers.h>
6 #include <device/device.h>
7 #include <device/pci.h>
8 #include <device/pci_ids.h>
9 #include <device/pci_ops.h>
10 #include <device/pciexp.h>
12 static unsigned int ext_cap_id(unsigned int cap
)
17 static unsigned int ext_cap_next_offset(unsigned int cap
)
19 return cap
>> 20 & 0xffc;
22 static unsigned int find_ext_cap_offset(const struct device
*dev
, unsigned int cap_id
,
25 unsigned int this_cap_offset
= offset
;
27 while (this_cap_offset
>= PCIE_EXT_CAP_OFFSET
) {
28 const unsigned int this_cap
= pci_read_config32(dev
, this_cap_offset
);
30 /* Bail out when this request is unsupported */
31 if (this_cap
== 0xffffffff)
34 if (ext_cap_id(this_cap
) == cap_id
)
35 return this_cap_offset
;
37 this_cap_offset
= ext_cap_next_offset(this_cap
);
44 * Search for an extended capability with the ID `cap`.
46 * Returns the offset of the first matching extended
47 * capability if found, or 0 otherwise.
49 * A new search is started with `offset == 0`.
50 * To continue a search, the prior return value
51 * should be passed as `offset`.
53 unsigned int pciexp_find_extended_cap(const struct device
*dev
, unsigned int cap
,
56 unsigned int next_cap_offset
;
59 next_cap_offset
= ext_cap_next_offset(pci_read_config32(dev
, offset
));
61 next_cap_offset
= PCIE_EXT_CAP_OFFSET
;
63 return find_ext_cap_offset(dev
, cap
, next_cap_offset
);
67 * Search for a vendor-specific extended capability,
68 * with the vendor-specific ID `cap`.
70 * Returns the offset of the vendor-specific header,
71 * i.e. the offset of the extended capability + 4,
72 * or 0 if none is found.
74 * A new search is started with `offset == 0`.
75 * To continue a search, the prior return value
76 * should be passed as `offset`.
78 unsigned int pciexp_find_ext_vendor_cap(const struct device
*dev
, unsigned int cap
,
81 /* Reconstruct capability offset from vendor-specific header offset. */
86 offset
= pciexp_find_extended_cap(dev
, PCI_EXT_CAP_ID_VNDR
, offset
);
90 const unsigned int vndr_cap
= pci_read_config32(dev
, offset
+ 4);
91 if ((vndr_cap
& 0xffff) == cap
)
97 * Re-train a PCIe link
99 #define PCIE_TRAIN_RETRY 10000
100 static int pciexp_retrain_link(struct device
*dev
, unsigned int cap
)
106 * Implementation note (page 633) in PCIe Specification 3.0 suggests
107 * polling the Link Training bit in the Link Status register until the
108 * value returned is 0 before setting the Retrain Link bit to 1.
109 * This is meant to avoid a race condition when using the
110 * Retrain Link mechanism.
112 for (try = PCIE_TRAIN_RETRY
; try > 0; try--) {
113 lnk
= pci_read_config16(dev
, cap
+ PCI_EXP_LNKSTA
);
114 if (!(lnk
& PCI_EXP_LNKSTA_LT
))
119 printk(BIOS_ERR
, "%s: Link Retrain timeout\n", dev_path(dev
));
123 /* Start link retraining */
124 lnk
= pci_read_config16(dev
, cap
+ PCI_EXP_LNKCTL
);
125 lnk
|= PCI_EXP_LNKCTL_RL
;
126 pci_write_config16(dev
, cap
+ PCI_EXP_LNKCTL
, lnk
);
128 /* Wait for training to complete */
129 for (try = PCIE_TRAIN_RETRY
; try > 0; try--) {
130 lnk
= pci_read_config16(dev
, cap
+ PCI_EXP_LNKSTA
);
131 if (!(lnk
& PCI_EXP_LNKSTA_LT
))
136 printk(BIOS_ERR
, "%s: Link Retrain timeout\n", dev_path(dev
));
141 * Check the Slot Clock Configuration for root port and endpoint
142 * and enable Common Clock Configuration if possible. If CCC is
143 * enabled the link must be retrained.
145 static void pciexp_enable_common_clock(struct device
*root
, unsigned int root_cap
,
146 struct device
*endp
, unsigned int endp_cap
)
148 u16 root_scc
, endp_scc
, lnkctl
;
150 /* Get Slot Clock Configuration for root port */
151 root_scc
= pci_read_config16(root
, root_cap
+ PCI_EXP_LNKSTA
);
152 root_scc
&= PCI_EXP_LNKSTA_SLC
;
154 /* Get Slot Clock Configuration for endpoint */
155 endp_scc
= pci_read_config16(endp
, endp_cap
+ PCI_EXP_LNKSTA
);
156 endp_scc
&= PCI_EXP_LNKSTA_SLC
;
158 /* Enable Common Clock Configuration and retrain */
159 if (root_scc
&& endp_scc
) {
160 printk(BIOS_INFO
, "Enabling Common Clock Configuration\n");
162 /* Set in endpoint */
163 lnkctl
= pci_read_config16(endp
, endp_cap
+ PCI_EXP_LNKCTL
);
164 lnkctl
|= PCI_EXP_LNKCTL_CCC
;
165 pci_write_config16(endp
, endp_cap
+ PCI_EXP_LNKCTL
, lnkctl
);
167 /* Set in root port */
168 lnkctl
= pci_read_config16(root
, root_cap
+ PCI_EXP_LNKCTL
);
169 lnkctl
|= PCI_EXP_LNKCTL_CCC
;
170 pci_write_config16(root
, root_cap
+ PCI_EXP_LNKCTL
, lnkctl
);
172 /* Retrain link if CCC was enabled */
173 pciexp_retrain_link(root
, root_cap
);
177 static void pciexp_enable_clock_power_pm(struct device
*endp
, unsigned int endp_cap
)
179 /* check if per port clk req is supported in device */
182 endp_ca
= pci_read_config32(endp
, endp_cap
+ PCI_EXP_LNKCAP
);
183 if ((endp_ca
& PCI_EXP_CLK_PM
) == 0) {
184 printk(BIOS_INFO
, "PCIE CLK PM is not supported by endpoint\n");
187 lnkctl
= pci_read_config16(endp
, endp_cap
+ PCI_EXP_LNKCTL
);
188 lnkctl
= lnkctl
| PCI_EXP_EN_CLK_PM
;
189 pci_write_config16(endp
, endp_cap
+ PCI_EXP_LNKCTL
, lnkctl
);
192 static bool _pciexp_ltr_supported(struct device
*dev
, unsigned int cap
)
194 return pci_read_config16(dev
, cap
+ PCI_EXP_DEVCAP2
) & PCI_EXP_DEVCAP2_LTR
;
197 static bool _pciexp_ltr_enabled(struct device
*dev
, unsigned int cap
)
199 return pci_read_config16(dev
, cap
+ PCI_EXP_DEVCTL2
) & PCI_EXP_DEV2_LTR
;
202 static bool _pciexp_enable_ltr(struct device
*parent
, unsigned int parent_cap
,
203 struct device
*dev
, unsigned int cap
)
205 if (!_pciexp_ltr_supported(dev
, cap
)) {
206 printk(BIOS_DEBUG
, "%s: No LTR support\n", dev_path(dev
));
210 if (_pciexp_ltr_enabled(dev
, cap
))
214 (!_pciexp_ltr_supported(parent
, parent_cap
) ||
215 !_pciexp_ltr_enabled(parent
, parent_cap
)))
218 pci_or_config16(dev
, cap
+ PCI_EXP_DEVCTL2
, PCI_EXP_DEV2_LTR
);
219 printk(BIOS_INFO
, "%s: Enabled LTR\n", dev_path(dev
));
223 static void pciexp_enable_ltr(struct device
*dev
)
225 const unsigned int cap
= pci_find_capability(dev
, PCI_CAP_ID_PCIE
);
230 * If we have get_ltr_max_latencies(), treat `dev` as the root.
231 * If not, let _pciexp_enable_ltr() query the parent's state.
233 struct device
*parent
= NULL
;
234 unsigned int parent_cap
= 0;
235 if (!dev
->ops
->ops_pci
|| !dev
->ops
->ops_pci
->get_ltr_max_latencies
) {
236 parent
= dev
->bus
->dev
;
237 if (parent
->path
.type
!= DEVICE_PATH_PCI
)
239 parent_cap
= pci_find_capability(parent
, PCI_CAP_ID_PCIE
);
244 (void)_pciexp_enable_ltr(parent
, parent_cap
, dev
, cap
);
247 bool pciexp_get_ltr_max_latencies(struct device
*dev
, u16
*max_snoop
, u16
*max_nosnoop
)
249 /* Walk the hierarchy up to find get_ltr_max_latencies(). */
251 if (dev
->ops
->ops_pci
&& dev
->ops
->ops_pci
->get_ltr_max_latencies
)
253 if (dev
->bus
->dev
== dev
|| dev
->bus
->dev
->path
.type
!= DEVICE_PATH_PCI
)
258 dev
->ops
->ops_pci
->get_ltr_max_latencies(max_snoop
, max_nosnoop
);
262 static void pciexp_configure_ltr(struct device
*parent
, unsigned int parent_cap
,
263 struct device
*dev
, unsigned int cap
)
265 if (!_pciexp_enable_ltr(parent
, parent_cap
, dev
, cap
))
268 const unsigned int ltr_cap
= pciexp_find_extended_cap(dev
, PCIE_EXT_CAP_LTR_ID
, 0);
272 u16 max_snoop
, max_nosnoop
;
273 if (!pciexp_get_ltr_max_latencies(dev
, &max_snoop
, &max_nosnoop
))
276 pci_write_config16(dev
, ltr_cap
+ PCI_LTR_MAX_SNOOP
, max_snoop
);
277 pci_write_config16(dev
, ltr_cap
+ PCI_LTR_MAX_NOSNOOP
, max_nosnoop
);
278 printk(BIOS_INFO
, "%s: Programmed LTR max latencies\n", dev_path(dev
));
281 static unsigned char pciexp_L1_substate_cal(struct device
*dev
, unsigned int endp_cap
,
284 unsigned char mult
[4] = {2, 10, 100, 0};
286 unsigned int L1SubStateSupport
= *data
& 0xf;
287 unsigned int comm_mode_rst_time
= (*data
>> 8) & 0xff;
288 unsigned int power_on_scale
= (*data
>> 16) & 0x3;
289 unsigned int power_on_value
= (*data
>> 19) & 0x1f;
291 unsigned int endp_data
= pci_read_config32(dev
, endp_cap
+ 4);
292 unsigned int endp_L1SubStateSupport
= endp_data
& 0xf;
293 unsigned int endp_comm_mode_restore_time
= (endp_data
>> 8) & 0xff;
294 unsigned int endp_power_on_scale
= (endp_data
>> 16) & 0x3;
295 unsigned int endp_power_on_value
= (endp_data
>> 19) & 0x1f;
297 L1SubStateSupport
&= endp_L1SubStateSupport
;
299 if (L1SubStateSupport
== 0)
302 if (power_on_value
* mult
[power_on_scale
] <
303 endp_power_on_value
* mult
[endp_power_on_scale
]) {
304 power_on_value
= endp_power_on_value
;
305 power_on_scale
= endp_power_on_scale
;
307 if (comm_mode_rst_time
< endp_comm_mode_restore_time
)
308 comm_mode_rst_time
= endp_comm_mode_restore_time
;
310 *data
= (comm_mode_rst_time
<< 8) | (power_on_scale
<< 16)
311 | (power_on_value
<< 19) | L1SubStateSupport
;
316 static void pciexp_L1_substate_commit(struct device
*root
, struct device
*dev
,
317 unsigned int root_cap
, unsigned int end_cap
)
319 struct device
*dev_t
;
320 unsigned char L1_ss_ok
;
321 unsigned int rp_L1_support
= pci_read_config32(root
, root_cap
+ 4);
322 unsigned int L1SubStateSupport
;
323 unsigned int comm_mode_rst_time
;
324 unsigned int power_on_scale
;
325 unsigned int endp_power_on_value
;
327 for (dev_t
= dev
; dev_t
; dev_t
= dev_t
->sibling
) {
329 * rp_L1_support is init'd above from root port.
330 * it needs coordination with endpoints to reach in common.
331 * if certain endpoint doesn't support L1 Sub-State, abort
332 * this feature enabling.
334 L1_ss_ok
= pciexp_L1_substate_cal(dev_t
, end_cap
,
340 L1SubStateSupport
= rp_L1_support
& 0xf;
341 comm_mode_rst_time
= (rp_L1_support
>> 8) & 0xff;
342 power_on_scale
= (rp_L1_support
>> 16) & 0x3;
343 endp_power_on_value
= (rp_L1_support
>> 19) & 0x1f;
345 printk(BIOS_INFO
, "L1 Sub-State supported from root port %d\n",
346 root
->path
.pci
.devfn
>> 3);
347 printk(BIOS_INFO
, "L1 Sub-State Support = 0x%x\n", L1SubStateSupport
);
348 printk(BIOS_INFO
, "CommonModeRestoreTime = 0x%x\n", comm_mode_rst_time
);
349 printk(BIOS_INFO
, "Power On Value = 0x%x, Power On Scale = 0x%x\n",
350 endp_power_on_value
, power_on_scale
);
352 pci_update_config32(root
, root_cap
+ 0x08, ~0xff00,
353 (comm_mode_rst_time
<< 8));
355 pci_update_config32(root
, root_cap
+ 0x0c, 0xffffff04,
356 (endp_power_on_value
<< 3) | (power_on_scale
));
358 /* TODO: 0xa0, 2 are values that work on some chipsets but really
359 * should be determined dynamically by looking at downstream devices.
361 pci_update_config32(root
, root_cap
+ 0x08,
362 ~(ASPM_LTR_L12_THRESHOLD_VALUE_MASK
|
363 ASPM_LTR_L12_THRESHOLD_SCALE_MASK
),
364 (0xa0 << ASPM_LTR_L12_THRESHOLD_VALUE_OFFSET
) |
365 (2 << ASPM_LTR_L12_THRESHOLD_SCALE_OFFSET
));
367 pci_update_config32(root
, root_cap
+ 0x08, ~0x1f,
370 for (dev_t
= dev
; dev_t
; dev_t
= dev_t
->sibling
) {
371 pci_update_config32(dev_t
, end_cap
+ 0x0c, 0xffffff04,
372 (endp_power_on_value
<< 3) | (power_on_scale
));
374 pci_update_config32(dev_t
, end_cap
+ 0x08,
375 ~(ASPM_LTR_L12_THRESHOLD_VALUE_MASK
|
376 ASPM_LTR_L12_THRESHOLD_SCALE_MASK
),
377 (0xa0 << ASPM_LTR_L12_THRESHOLD_VALUE_OFFSET
) |
378 (2 << ASPM_LTR_L12_THRESHOLD_SCALE_OFFSET
));
380 pci_update_config32(dev_t
, end_cap
+ 0x08, ~0x1f,
385 static void pciexp_config_L1_sub_state(struct device
*root
, struct device
*dev
)
387 unsigned int root_cap
, end_cap
;
389 /* Do it for function 0 only */
390 if (dev
->path
.pci
.devfn
& 0x7)
393 root_cap
= pciexp_find_extended_cap(root
, PCIE_EXT_CAP_L1SS_ID
, 0);
397 end_cap
= pciexp_find_extended_cap(dev
, PCIE_EXT_CAP_L1SS_ID
, 0);
399 if (dev
->vendor
!= PCI_VID_INTEL
)
402 end_cap
= pciexp_find_ext_vendor_cap(dev
, 0xcafe, 0);
407 pciexp_L1_substate_commit(root
, dev
, root_cap
, end_cap
);
411 * Determine the ASPM L0s or L1 exit latency for a link
412 * by checking both root port and endpoint and returning
413 * the highest latency value.
415 static int pciexp_aspm_latency(struct device
*root
, unsigned int root_cap
,
416 struct device
*endp
, unsigned int endp_cap
,
419 int root_lat
= 0, endp_lat
= 0;
420 u32 root_lnkcap
, endp_lnkcap
;
422 root_lnkcap
= pci_read_config32(root
, root_cap
+ PCI_EXP_LNKCAP
);
423 endp_lnkcap
= pci_read_config32(endp
, endp_cap
+ PCI_EXP_LNKCAP
);
425 /* Make sure the link supports this ASPM type by checking
426 * capability bits 11:10 with aspm_type offset by 1 */
427 if (!(root_lnkcap
& (1 << (type
+ 9))) ||
428 !(endp_lnkcap
& (1 << (type
+ 9))))
431 /* Find the one with higher latency */
434 root_lat
= (root_lnkcap
& PCI_EXP_LNKCAP_L0SEL
) >> 12;
435 endp_lat
= (endp_lnkcap
& PCI_EXP_LNKCAP_L0SEL
) >> 12;
438 root_lat
= (root_lnkcap
& PCI_EXP_LNKCAP_L1EL
) >> 15;
439 endp_lat
= (endp_lnkcap
& PCI_EXP_LNKCAP_L1EL
) >> 15;
445 return (endp_lat
> root_lat
) ? endp_lat
: root_lat
;
449 * Enable ASPM on PCIe root port and endpoint.
451 static void pciexp_enable_aspm(struct device
*root
, unsigned int root_cap
,
452 struct device
*endp
, unsigned int endp_cap
)
454 const char *aspm_type_str
[] = { "None", "L0s", "L1", "L0s and L1" };
455 enum aspm_type apmc
= PCIE_ASPM_NONE
;
456 int exit_latency
, ok_latency
;
460 if (endp
->disable_pcie_aspm
)
463 /* Get endpoint device capabilities for acceptable limits */
464 devcap
= pci_read_config32(endp
, endp_cap
+ PCI_EXP_DEVCAP
);
466 /* Enable L0s if it is within endpoint acceptable limit */
467 ok_latency
= (devcap
& PCI_EXP_DEVCAP_L0S
) >> 6;
468 exit_latency
= pciexp_aspm_latency(root
, root_cap
, endp
, endp_cap
,
470 if (exit_latency
>= 0 && exit_latency
<= ok_latency
)
471 apmc
|= PCIE_ASPM_L0S
;
473 /* Enable L1 if it is within endpoint acceptable limit */
474 ok_latency
= (devcap
& PCI_EXP_DEVCAP_L1
) >> 9;
475 exit_latency
= pciexp_aspm_latency(root
, root_cap
, endp
, endp_cap
,
477 if (exit_latency
>= 0 && exit_latency
<= ok_latency
)
478 apmc
|= PCIE_ASPM_L1
;
480 if (apmc
!= PCIE_ASPM_NONE
) {
481 /* Set APMC in root port first */
482 lnkctl
= pci_read_config16(root
, root_cap
+ PCI_EXP_LNKCTL
);
484 pci_write_config16(root
, root_cap
+ PCI_EXP_LNKCTL
, lnkctl
);
486 /* Set APMC in endpoint device next */
487 lnkctl
= pci_read_config16(endp
, endp_cap
+ PCI_EXP_LNKCTL
);
489 pci_write_config16(endp
, endp_cap
+ PCI_EXP_LNKCTL
, lnkctl
);
492 printk(BIOS_INFO
, "ASPM: Enabled %s\n", aspm_type_str
[apmc
]);
496 * Set max payload size of endpoint in accordance with max payload size of root port.
498 static void pciexp_set_max_payload_size(struct device
*root
, unsigned int root_cap
,
499 struct device
*endp
, unsigned int endp_cap
)
501 unsigned int endp_max_payload
, root_max_payload
, max_payload
;
502 u16 endp_devctl
, root_devctl
;
503 u32 endp_devcap
, root_devcap
;
505 /* Get max payload size supported by endpoint */
506 endp_devcap
= pci_read_config32(endp
, endp_cap
+ PCI_EXP_DEVCAP
);
507 endp_max_payload
= endp_devcap
& PCI_EXP_DEVCAP_PAYLOAD
;
509 /* Get max payload size supported by root port */
510 root_devcap
= pci_read_config32(root
, root_cap
+ PCI_EXP_DEVCAP
);
511 root_max_payload
= root_devcap
& PCI_EXP_DEVCAP_PAYLOAD
;
513 /* Set max payload to smaller of the reported device capability. */
514 max_payload
= MIN(endp_max_payload
, root_max_payload
);
515 if (max_payload
> 5) {
516 /* Values 6 and 7 are reserved in PCIe 3.0 specs. */
517 printk(BIOS_ERR
, "PCIe: Max_Payload_Size field restricted from %d to 5\n",
522 endp_devctl
= pci_read_config16(endp
, endp_cap
+ PCI_EXP_DEVCTL
);
523 endp_devctl
&= ~PCI_EXP_DEVCTL_PAYLOAD
;
524 endp_devctl
|= max_payload
<< 5;
525 pci_write_config16(endp
, endp_cap
+ PCI_EXP_DEVCTL
, endp_devctl
);
527 root_devctl
= pci_read_config16(root
, root_cap
+ PCI_EXP_DEVCTL
);
528 root_devctl
&= ~PCI_EXP_DEVCTL_PAYLOAD
;
529 root_devctl
|= max_payload
<< 5;
530 pci_write_config16(root
, root_cap
+ PCI_EXP_DEVCTL
, root_devctl
);
532 printk(BIOS_INFO
, "PCIe: Max_Payload_Size adjusted to %d\n", (1 << (max_payload
+ 7)));
536 * Clear Lane Error State at the end of PCIe link training.
537 * Lane error status is cleared if PCIEXP_LANE_ERR_STAT_CLEAR is set.
538 * Lane error is normal during link training, so we need to clear it.
539 * At this moment, link has been used, but for a very short duration.
541 static void clear_lane_error_status(struct device
*dev
)
546 pos
= pciexp_find_extended_cap(dev
, PCI_EXP_SEC_CAP_ID
, 0);
550 reg32
= pci_read_config32(dev
, pos
+ PCI_EXP_SEC_LANE_ERR_STATUS
);
554 printk(BIOS_DEBUG
, "%s: Clear Lane Error Status.\n", dev_path(dev
));
555 printk(BIOS_DEBUG
, "LaneErrStat:0x%x\n", reg32
);
556 pci_write_config32(dev
, pos
+ PCI_EXP_SEC_LANE_ERR_STATUS
, reg32
);
559 static void pciexp_tune_dev(struct device
*dev
)
561 struct device
*root
= dev
->bus
->dev
;
562 unsigned int root_cap
, cap
;
564 cap
= pci_find_capability(dev
, PCI_CAP_ID_PCIE
);
568 root_cap
= pci_find_capability(root
, PCI_CAP_ID_PCIE
);
572 /* Check for and enable Common Clock */
573 if (CONFIG(PCIEXP_COMMON_CLOCK
))
574 pciexp_enable_common_clock(root
, root_cap
, dev
, cap
);
576 /* Check if per port CLK req is supported by endpoint*/
577 if (CONFIG(PCIEXP_CLK_PM
))
578 pciexp_enable_clock_power_pm(dev
, cap
);
580 /* Enable L1 Sub-State when both root port and endpoint support */
581 if (CONFIG(PCIEXP_L1_SUB_STATE
))
582 pciexp_config_L1_sub_state(root
, dev
);
584 /* Check for and enable ASPM */
585 if (CONFIG(PCIEXP_ASPM
))
586 pciexp_enable_aspm(root
, root_cap
, dev
, cap
);
588 /* Clear PCIe Lane Error Status */
589 if (CONFIG(PCIEXP_LANE_ERR_STAT_CLEAR
))
590 clear_lane_error_status(root
);
592 /* Adjust Max_Payload_Size of link ends. */
593 pciexp_set_max_payload_size(root
, root_cap
, dev
, cap
);
595 pciexp_configure_ltr(root
, root_cap
, dev
, cap
);
598 void pciexp_scan_bus(struct bus
*bus
, unsigned int min_devfn
,
599 unsigned int max_devfn
)
601 struct device
*child
;
603 pciexp_enable_ltr(bus
->dev
);
605 pci_scan_bus(bus
, min_devfn
, max_devfn
);
607 for (child
= bus
->children
; child
; child
= child
->sibling
) {
608 if (child
->path
.type
!= DEVICE_PATH_PCI
)
610 if ((child
->path
.pci
.devfn
< min_devfn
) ||
611 (child
->path
.pci
.devfn
> max_devfn
)) {
614 pciexp_tune_dev(child
);
618 void pciexp_scan_bridge(struct device
*dev
)
620 do_pci_scan_bridge(dev
, pciexp_scan_bus
);
623 /** Default device operations for PCI Express bridges */
624 static struct pci_operations pciexp_bus_ops_pci
= {
628 struct device_operations default_pciexp_ops_bus
= {
629 .read_resources
= pci_bus_read_resources
,
630 .set_resources
= pci_dev_set_resources
,
631 .enable_resources
= pci_bus_enable_resources
,
632 .scan_bus
= pciexp_scan_bridge
,
633 .reset_bus
= pci_bus_reset
,
634 .ops_pci
= &pciexp_bus_ops_pci
,
637 static void pciexp_hotplug_dummy_read_resources(struct device
*dev
)
639 struct resource
*resource
;
641 /* Add extra memory space */
642 resource
= new_resource(dev
, 0x10);
643 resource
->size
= CONFIG_PCIEXP_HOTPLUG_MEM
;
644 resource
->align
= 12;
646 resource
->limit
= 0xffffffff;
647 resource
->flags
|= IORESOURCE_MEM
;
649 /* Add extra prefetchable memory space */
650 resource
= new_resource(dev
, 0x14);
651 resource
->size
= CONFIG_PCIEXP_HOTPLUG_PREFETCH_MEM
;
652 resource
->align
= 12;
654 resource
->limit
= 0xffffffffffffffff;
655 resource
->flags
|= IORESOURCE_MEM
| IORESOURCE_PREFETCH
;
657 /* Set resource flag requesting allocation above 4G boundary. */
658 if (CONFIG(PCIEXP_HOTPLUG_PREFETCH_MEM_ABOVE_4G
))
659 resource
->flags
|= IORESOURCE_ABOVE_4G
;
661 /* Add extra I/O space */
662 resource
= new_resource(dev
, 0x18);
663 resource
->size
= CONFIG_PCIEXP_HOTPLUG_IO
;
664 resource
->align
= 12;
666 resource
->limit
= 0xffff;
667 resource
->flags
|= IORESOURCE_IO
;
670 static struct device_operations pciexp_hotplug_dummy_ops
= {
671 .read_resources
= pciexp_hotplug_dummy_read_resources
,
672 .set_resources
= noop_set_resources
,
675 void pciexp_hotplug_scan_bridge(struct device
*dev
)
677 dev
->hotplug_port
= 1;
678 dev
->hotplug_buses
= CONFIG_PCIEXP_HOTPLUG_BUSES
;
680 /* Normal PCIe Scan */
681 pciexp_scan_bridge(dev
);
683 /* Add dummy slot to preserve resources, must happen after bus scan */
684 struct device
*dummy
;
685 struct device_path dummy_path
= { .type
= DEVICE_PATH_NONE
};
686 dummy
= alloc_dev(dev
->link_list
, &dummy_path
);
687 dummy
->ops
= &pciexp_hotplug_dummy_ops
;
690 struct device_operations default_pciexp_hotplug_ops_bus
= {
691 .read_resources
= pci_bus_read_resources
,
692 .set_resources
= pci_dev_set_resources
,
693 .enable_resources
= pci_bus_enable_resources
,
694 .scan_bus
= pciexp_hotplug_scan_bridge
,
695 .reset_bus
= pci_bus_reset
,
696 .ops_pci
= &pciexp_bus_ops_pci
,