1 /* SPDX-License-Identifier: GPL-2.0-only */
3 #include <console/console.h>
4 #include <commonlib/helpers.h>
6 #include <device/device.h>
7 #include <device/pci.h>
8 #include <device/pci_ops.h>
9 #include <device/pciexp.h>
11 unsigned int pciexp_find_extended_cap(struct device
*dev
, unsigned int cap
)
13 unsigned int this_cap_offset
, next_cap_offset
;
14 unsigned int this_cap
, cafe
;
16 this_cap_offset
= PCIE_EXT_CAP_OFFSET
;
18 this_cap
= pci_read_config32(dev
, this_cap_offset
);
19 next_cap_offset
= this_cap
>> 20;
21 cafe
= pci_read_config32(dev
, this_cap_offset
+ 4);
24 return this_cap_offset
;
26 return this_cap_offset
+ 4;
28 this_cap_offset
= next_cap_offset
;
29 } while (next_cap_offset
!= 0);
35 * Re-train a PCIe link
37 #define PCIE_TRAIN_RETRY 10000
38 static int pciexp_retrain_link(struct device
*dev
, unsigned int cap
)
44 * Implementation note (page 633) in PCIe Specification 3.0 suggests
45 * polling the Link Training bit in the Link Status register until the
46 * value returned is 0 before setting the Retrain Link bit to 1.
47 * This is meant to avoid a race condition when using the
48 * Retrain Link mechanism.
50 for (try = PCIE_TRAIN_RETRY
; try > 0; try--) {
51 lnk
= pci_read_config16(dev
, cap
+ PCI_EXP_LNKSTA
);
52 if (!(lnk
& PCI_EXP_LNKSTA_LT
))
57 printk(BIOS_ERR
, "%s: Link Retrain timeout\n", dev_path(dev
));
61 /* Start link retraining */
62 lnk
= pci_read_config16(dev
, cap
+ PCI_EXP_LNKCTL
);
63 lnk
|= PCI_EXP_LNKCTL_RL
;
64 pci_write_config16(dev
, cap
+ PCI_EXP_LNKCTL
, lnk
);
66 /* Wait for training to complete */
67 for (try = PCIE_TRAIN_RETRY
; try > 0; try--) {
68 lnk
= pci_read_config16(dev
, cap
+ PCI_EXP_LNKSTA
);
69 if (!(lnk
& PCI_EXP_LNKSTA_LT
))
74 printk(BIOS_ERR
, "%s: Link Retrain timeout\n", dev_path(dev
));
79 * Check the Slot Clock Configuration for root port and endpoint
80 * and enable Common Clock Configuration if possible. If CCC is
81 * enabled the link must be retrained.
83 static void pciexp_enable_common_clock(struct device
*root
, unsigned int root_cap
,
84 struct device
*endp
, unsigned int endp_cap
)
86 u16 root_scc
, endp_scc
, lnkctl
;
88 /* Get Slot Clock Configuration for root port */
89 root_scc
= pci_read_config16(root
, root_cap
+ PCI_EXP_LNKSTA
);
90 root_scc
&= PCI_EXP_LNKSTA_SLC
;
92 /* Get Slot Clock Configuration for endpoint */
93 endp_scc
= pci_read_config16(endp
, endp_cap
+ PCI_EXP_LNKSTA
);
94 endp_scc
&= PCI_EXP_LNKSTA_SLC
;
96 /* Enable Common Clock Configuration and retrain */
97 if (root_scc
&& endp_scc
) {
98 printk(BIOS_INFO
, "Enabling Common Clock Configuration\n");
100 /* Set in endpoint */
101 lnkctl
= pci_read_config16(endp
, endp_cap
+ PCI_EXP_LNKCTL
);
102 lnkctl
|= PCI_EXP_LNKCTL_CCC
;
103 pci_write_config16(endp
, endp_cap
+ PCI_EXP_LNKCTL
, lnkctl
);
105 /* Set in root port */
106 lnkctl
= pci_read_config16(root
, root_cap
+ PCI_EXP_LNKCTL
);
107 lnkctl
|= PCI_EXP_LNKCTL_CCC
;
108 pci_write_config16(root
, root_cap
+ PCI_EXP_LNKCTL
, lnkctl
);
110 /* Retrain link if CCC was enabled */
111 pciexp_retrain_link(root
, root_cap
);
115 static void pciexp_enable_clock_power_pm(struct device
*endp
, unsigned int endp_cap
)
117 /* check if per port clk req is supported in device */
120 endp_ca
= pci_read_config32(endp
, endp_cap
+ PCI_EXP_LNKCAP
);
121 if ((endp_ca
& PCI_EXP_CLK_PM
) == 0) {
122 printk(BIOS_INFO
, "PCIE CLK PM is not supported by endpoint\n");
125 lnkctl
= pci_read_config16(endp
, endp_cap
+ PCI_EXP_LNKCTL
);
126 lnkctl
= lnkctl
| PCI_EXP_EN_CLK_PM
;
127 pci_write_config16(endp
, endp_cap
+ PCI_EXP_LNKCTL
, lnkctl
);
130 static void pciexp_config_max_latency(struct device
*root
, struct device
*dev
)
133 cap
= pciexp_find_extended_cap(dev
, PCIE_EXT_CAP_LTR_ID
);
134 if ((cap
) && (root
->ops
->ops_pci
!= NULL
) &&
135 (root
->ops
->ops_pci
->set_L1_ss_latency
!= NULL
))
136 root
->ops
->ops_pci
->set_L1_ss_latency(dev
, cap
+ 4);
139 static bool pciexp_is_ltr_supported(struct device
*dev
, unsigned int cap
)
143 val
= pci_read_config16(dev
, cap
+ PCI_EXP_DEV_CAP2_OFFSET
);
145 if (val
& LTR_MECHANISM_SUPPORT
)
151 static void pciexp_configure_ltr(struct device
*dev
)
155 cap
= pci_find_capability(dev
, PCI_CAP_ID_PCIE
);
158 * Check if capability pointer is valid and
159 * device supports LTR mechanism.
161 if (!cap
|| !pciexp_is_ltr_supported(dev
, cap
)) {
162 printk(BIOS_INFO
, "Failed to enable LTR for dev = %s\n",
167 cap
+= PCI_EXP_DEV_CTL_STS2_CAP_OFFSET
;
169 /* Enable LTR for device */
170 pci_update_config32(dev
, cap
, ~LTR_MECHANISM_EN
, LTR_MECHANISM_EN
);
172 /* Configure Max Snoop Latency */
173 pciexp_config_max_latency(dev
->bus
->dev
, dev
);
176 static void pciexp_enable_ltr(struct device
*dev
)
179 struct device
*child
;
181 for (bus
= dev
->link_list
; bus
; bus
= bus
->next
) {
182 for (child
= bus
->children
; child
; child
= child
->sibling
) {
183 pciexp_configure_ltr(child
);
184 if (child
->ops
&& child
->ops
->scan_bus
)
185 pciexp_enable_ltr(child
);
190 static unsigned char pciexp_L1_substate_cal(struct device
*dev
, unsigned int endp_cap
,
193 unsigned char mult
[4] = {2, 10, 100, 0};
195 unsigned int L1SubStateSupport
= *data
& 0xf;
196 unsigned int comm_mode_rst_time
= (*data
>> 8) & 0xff;
197 unsigned int power_on_scale
= (*data
>> 16) & 0x3;
198 unsigned int power_on_value
= (*data
>> 19) & 0x1f;
200 unsigned int endp_data
= pci_read_config32(dev
, endp_cap
+ 4);
201 unsigned int endp_L1SubStateSupport
= endp_data
& 0xf;
202 unsigned int endp_comm_mode_restore_time
= (endp_data
>> 8) & 0xff;
203 unsigned int endp_power_on_scale
= (endp_data
>> 16) & 0x3;
204 unsigned int endp_power_on_value
= (endp_data
>> 19) & 0x1f;
206 L1SubStateSupport
&= endp_L1SubStateSupport
;
208 if (L1SubStateSupport
== 0)
211 if (power_on_value
* mult
[power_on_scale
] <
212 endp_power_on_value
* mult
[endp_power_on_scale
]) {
213 power_on_value
= endp_power_on_value
;
214 power_on_scale
= endp_power_on_scale
;
216 if (comm_mode_rst_time
< endp_comm_mode_restore_time
)
217 comm_mode_rst_time
= endp_comm_mode_restore_time
;
219 *data
= (comm_mode_rst_time
<< 8) | (power_on_scale
<< 16)
220 | (power_on_value
<< 19) | L1SubStateSupport
;
225 static void pciexp_L1_substate_commit(struct device
*root
, struct device
*dev
,
226 unsigned int root_cap
, unsigned int end_cap
)
228 struct device
*dev_t
;
229 unsigned char L1_ss_ok
;
230 unsigned int rp_L1_support
= pci_read_config32(root
, root_cap
+ 4);
231 unsigned int L1SubStateSupport
;
232 unsigned int comm_mode_rst_time
;
233 unsigned int power_on_scale
;
234 unsigned int endp_power_on_value
;
236 for (dev_t
= dev
; dev_t
; dev_t
= dev_t
->sibling
) {
238 * rp_L1_support is init'd above from root port.
239 * it needs coordination with endpoints to reach in common.
240 * if certain endpoint doesn't support L1 Sub-State, abort
241 * this feature enabling.
243 L1_ss_ok
= pciexp_L1_substate_cal(dev_t
, end_cap
,
249 L1SubStateSupport
= rp_L1_support
& 0xf;
250 comm_mode_rst_time
= (rp_L1_support
>> 8) & 0xff;
251 power_on_scale
= (rp_L1_support
>> 16) & 0x3;
252 endp_power_on_value
= (rp_L1_support
>> 19) & 0x1f;
254 printk(BIOS_INFO
, "L1 Sub-State supported from root port %d\n",
255 root
->path
.pci
.devfn
>> 3);
256 printk(BIOS_INFO
, "L1 Sub-State Support = 0x%x\n", L1SubStateSupport
);
257 printk(BIOS_INFO
, "CommonModeRestoreTime = 0x%x\n", comm_mode_rst_time
);
258 printk(BIOS_INFO
, "Power On Value = 0x%x, Power On Scale = 0x%x\n",
259 endp_power_on_value
, power_on_scale
);
261 pci_update_config32(root
, root_cap
+ 0x08, ~0xff00,
262 (comm_mode_rst_time
<< 8));
264 pci_update_config32(root
, root_cap
+ 0x0c, 0xffffff04,
265 (endp_power_on_value
<< 3) | (power_on_scale
));
267 /* TODO: 0xa0, 2 are values that work on some chipsets but really
268 * should be determined dynamically by looking at downstream devices.
270 pci_update_config32(root
, root_cap
+ 0x08,
271 ~(ASPM_LTR_L12_THRESHOLD_VALUE_MASK
|
272 ASPM_LTR_L12_THRESHOLD_SCALE_MASK
),
273 (0xa0 << ASPM_LTR_L12_THRESHOLD_VALUE_OFFSET
) |
274 (2 << ASPM_LTR_L12_THRESHOLD_SCALE_OFFSET
));
276 pci_update_config32(root
, root_cap
+ 0x08, ~0x1f,
279 for (dev_t
= dev
; dev_t
; dev_t
= dev_t
->sibling
) {
280 pci_update_config32(dev_t
, end_cap
+ 0x0c, 0xffffff04,
281 (endp_power_on_value
<< 3) | (power_on_scale
));
283 pci_update_config32(dev_t
, end_cap
+ 0x08,
284 ~(ASPM_LTR_L12_THRESHOLD_VALUE_MASK
|
285 ASPM_LTR_L12_THRESHOLD_SCALE_MASK
),
286 (0xa0 << ASPM_LTR_L12_THRESHOLD_VALUE_OFFSET
) |
287 (2 << ASPM_LTR_L12_THRESHOLD_SCALE_OFFSET
));
289 pci_update_config32(dev_t
, end_cap
+ 0x08, ~0x1f,
294 static void pciexp_config_L1_sub_state(struct device
*root
, struct device
*dev
)
296 unsigned int root_cap
, end_cap
;
298 /* Do it for function 0 only */
299 if (dev
->path
.pci
.devfn
& 0x7)
302 root_cap
= pciexp_find_extended_cap(root
, PCIE_EXT_CAP_L1SS_ID
);
306 end_cap
= pciexp_find_extended_cap(dev
, PCIE_EXT_CAP_L1SS_ID
);
308 end_cap
= pciexp_find_extended_cap(dev
, 0xcafe);
313 pciexp_L1_substate_commit(root
, dev
, root_cap
, end_cap
);
317 * Determine the ASPM L0s or L1 exit latency for a link
318 * by checking both root port and endpoint and returning
319 * the highest latency value.
321 static int pciexp_aspm_latency(struct device
*root
, unsigned int root_cap
,
322 struct device
*endp
, unsigned int endp_cap
,
325 int root_lat
= 0, endp_lat
= 0;
326 u32 root_lnkcap
, endp_lnkcap
;
328 root_lnkcap
= pci_read_config32(root
, root_cap
+ PCI_EXP_LNKCAP
);
329 endp_lnkcap
= pci_read_config32(endp
, endp_cap
+ PCI_EXP_LNKCAP
);
331 /* Make sure the link supports this ASPM type by checking
332 * capability bits 11:10 with aspm_type offset by 1 */
333 if (!(root_lnkcap
& (1 << (type
+ 9))) ||
334 !(endp_lnkcap
& (1 << (type
+ 9))))
337 /* Find the one with higher latency */
340 root_lat
= (root_lnkcap
& PCI_EXP_LNKCAP_L0SEL
) >> 12;
341 endp_lat
= (endp_lnkcap
& PCI_EXP_LNKCAP_L0SEL
) >> 12;
344 root_lat
= (root_lnkcap
& PCI_EXP_LNKCAP_L1EL
) >> 15;
345 endp_lat
= (endp_lnkcap
& PCI_EXP_LNKCAP_L1EL
) >> 15;
351 return (endp_lat
> root_lat
) ? endp_lat
: root_lat
;
355 * Enable ASPM on PCIe root port and endpoint.
357 static void pciexp_enable_aspm(struct device
*root
, unsigned int root_cap
,
358 struct device
*endp
, unsigned int endp_cap
)
360 const char *aspm_type_str
[] = { "None", "L0s", "L1", "L0s and L1" };
361 enum aspm_type apmc
= PCIE_ASPM_NONE
;
362 int exit_latency
, ok_latency
;
366 if (endp
->disable_pcie_aspm
)
369 /* Get endpoint device capabilities for acceptable limits */
370 devcap
= pci_read_config32(endp
, endp_cap
+ PCI_EXP_DEVCAP
);
372 /* Enable L0s if it is within endpoint acceptable limit */
373 ok_latency
= (devcap
& PCI_EXP_DEVCAP_L0S
) >> 6;
374 exit_latency
= pciexp_aspm_latency(root
, root_cap
, endp
, endp_cap
,
376 if (exit_latency
>= 0 && exit_latency
<= ok_latency
)
377 apmc
|= PCIE_ASPM_L0S
;
379 /* Enable L1 if it is within endpoint acceptable limit */
380 ok_latency
= (devcap
& PCI_EXP_DEVCAP_L1
) >> 9;
381 exit_latency
= pciexp_aspm_latency(root
, root_cap
, endp
, endp_cap
,
383 if (exit_latency
>= 0 && exit_latency
<= ok_latency
)
384 apmc
|= PCIE_ASPM_L1
;
386 if (apmc
!= PCIE_ASPM_NONE
) {
387 /* Set APMC in root port first */
388 lnkctl
= pci_read_config16(root
, root_cap
+ PCI_EXP_LNKCTL
);
390 pci_write_config16(root
, root_cap
+ PCI_EXP_LNKCTL
, lnkctl
);
392 /* Set APMC in endpoint device next */
393 lnkctl
= pci_read_config16(endp
, endp_cap
+ PCI_EXP_LNKCTL
);
395 pci_write_config16(endp
, endp_cap
+ PCI_EXP_LNKCTL
, lnkctl
);
398 printk(BIOS_INFO
, "ASPM: Enabled %s\n", aspm_type_str
[apmc
]);
402 * Set max payload size of endpoint in accordance with max payload size of root port.
404 static void pciexp_set_max_payload_size(struct device
*root
, unsigned int root_cap
,
405 struct device
*endp
, unsigned int endp_cap
)
407 unsigned int endp_max_payload
, root_max_payload
, max_payload
;
408 u16 endp_devctl
, root_devctl
;
409 u32 endp_devcap
, root_devcap
;
411 /* Get max payload size supported by endpoint */
412 endp_devcap
= pci_read_config32(endp
, endp_cap
+ PCI_EXP_DEVCAP
);
413 endp_max_payload
= endp_devcap
& PCI_EXP_DEVCAP_PAYLOAD
;
415 /* Get max payload size supported by root port */
416 root_devcap
= pci_read_config32(root
, root_cap
+ PCI_EXP_DEVCAP
);
417 root_max_payload
= root_devcap
& PCI_EXP_DEVCAP_PAYLOAD
;
419 /* Set max payload to smaller of the reported device capability. */
420 max_payload
= MIN(endp_max_payload
, root_max_payload
);
421 if (max_payload
> 5) {
422 /* Values 6 and 7 are reserved in PCIe 3.0 specs. */
423 printk(BIOS_ERR
, "PCIe: Max_Payload_Size field restricted from %d to 5\n",
428 endp_devctl
= pci_read_config16(endp
, endp_cap
+ PCI_EXP_DEVCTL
);
429 endp_devctl
&= ~PCI_EXP_DEVCTL_PAYLOAD
;
430 endp_devctl
|= max_payload
<< 5;
431 pci_write_config16(endp
, endp_cap
+ PCI_EXP_DEVCTL
, endp_devctl
);
433 root_devctl
= pci_read_config16(root
, root_cap
+ PCI_EXP_DEVCTL
);
434 root_devctl
&= ~PCI_EXP_DEVCTL_PAYLOAD
;
435 root_devctl
|= max_payload
<< 5;
436 pci_write_config16(root
, root_cap
+ PCI_EXP_DEVCTL
, root_devctl
);
438 printk(BIOS_INFO
, "PCIe: Max_Payload_Size adjusted to %d\n", (1 << (max_payload
+ 7)));
441 static void pciexp_tune_dev(struct device
*dev
)
443 struct device
*root
= dev
->bus
->dev
;
444 unsigned int root_cap
, cap
;
446 cap
= pci_find_capability(dev
, PCI_CAP_ID_PCIE
);
450 root_cap
= pci_find_capability(root
, PCI_CAP_ID_PCIE
);
454 /* Check for and enable Common Clock */
455 if (CONFIG(PCIEXP_COMMON_CLOCK
))
456 pciexp_enable_common_clock(root
, root_cap
, dev
, cap
);
458 /* Check if per port CLK req is supported by endpoint*/
459 if (CONFIG(PCIEXP_CLK_PM
))
460 pciexp_enable_clock_power_pm(dev
, cap
);
462 /* Enable L1 Sub-State when both root port and endpoint support */
463 if (CONFIG(PCIEXP_L1_SUB_STATE
))
464 pciexp_config_L1_sub_state(root
, dev
);
466 /* Check for and enable ASPM */
467 if (CONFIG(PCIEXP_ASPM
))
468 pciexp_enable_aspm(root
, root_cap
, dev
, cap
);
470 /* Adjust Max_Payload_Size of link ends. */
471 pciexp_set_max_payload_size(root
, root_cap
, dev
, cap
);
474 void pciexp_scan_bus(struct bus
*bus
, unsigned int min_devfn
,
475 unsigned int max_devfn
)
477 struct device
*child
;
478 pci_scan_bus(bus
, min_devfn
, max_devfn
);
480 for (child
= bus
->children
; child
; child
= child
->sibling
) {
481 if ((child
->path
.pci
.devfn
< min_devfn
) ||
482 (child
->path
.pci
.devfn
> max_devfn
)) {
485 pciexp_tune_dev(child
);
489 void pciexp_scan_bridge(struct device
*dev
)
491 do_pci_scan_bridge(dev
, pciexp_scan_bus
);
492 pciexp_enable_ltr(dev
);
495 /** Default device operations for PCI Express bridges */
496 static struct pci_operations pciexp_bus_ops_pci
= {
500 struct device_operations default_pciexp_ops_bus
= {
501 .read_resources
= pci_bus_read_resources
,
502 .set_resources
= pci_dev_set_resources
,
503 .enable_resources
= pci_bus_enable_resources
,
504 .scan_bus
= pciexp_scan_bridge
,
505 .reset_bus
= pci_bus_reset
,
506 .ops_pci
= &pciexp_bus_ops_pci
,
509 #if CONFIG(PCIEXP_HOTPLUG)
511 static void pciexp_hotplug_dummy_read_resources(struct device
*dev
)
513 struct resource
*resource
;
515 /* Add extra memory space */
516 resource
= new_resource(dev
, 0x10);
517 resource
->size
= CONFIG_PCIEXP_HOTPLUG_MEM
;
518 resource
->align
= 12;
520 resource
->limit
= 0xffffffff;
521 resource
->flags
|= IORESOURCE_MEM
;
523 /* Add extra prefetchable memory space */
524 resource
= new_resource(dev
, 0x14);
525 resource
->size
= CONFIG_PCIEXP_HOTPLUG_PREFETCH_MEM
;
526 resource
->align
= 12;
528 resource
->limit
= 0xffffffffffffffff;
529 resource
->flags
|= IORESOURCE_MEM
| IORESOURCE_PREFETCH
;
531 /* Set resource flag requesting allocation above 4G boundary. */
532 if (CONFIG(PCIEXP_HOTPLUG_PREFETCH_MEM_ABOVE_4G
))
533 resource
->flags
|= IORESOURCE_ABOVE_4G
;
535 /* Add extra I/O space */
536 resource
= new_resource(dev
, 0x18);
537 resource
->size
= CONFIG_PCIEXP_HOTPLUG_IO
;
538 resource
->align
= 12;
540 resource
->limit
= 0xffff;
541 resource
->flags
|= IORESOURCE_IO
;
544 static struct device_operations pciexp_hotplug_dummy_ops
= {
545 .read_resources
= pciexp_hotplug_dummy_read_resources
,
548 void pciexp_hotplug_scan_bridge(struct device
*dev
)
550 dev
->hotplug_buses
= CONFIG_PCIEXP_HOTPLUG_BUSES
;
552 /* Normal PCIe Scan */
553 pciexp_scan_bridge(dev
);
555 /* Add dummy slot to preserve resources, must happen after bus scan */
556 struct device
*dummy
;
557 struct device_path dummy_path
= { .type
= DEVICE_PATH_NONE
};
558 dummy
= alloc_dev(dev
->link_list
, &dummy_path
);
559 dummy
->ops
= &pciexp_hotplug_dummy_ops
;
562 struct device_operations default_pciexp_hotplug_ops_bus
= {
563 .read_resources
= pci_bus_read_resources
,
564 .set_resources
= pci_dev_set_resources
,
565 .enable_resources
= pci_bus_enable_resources
,
566 .scan_bus
= pciexp_hotplug_scan_bridge
,
567 .reset_bus
= pci_bus_reset
,
568 .ops_pci
= &pciexp_bus_ops_pci
,
570 #endif /* CONFIG(PCIEXP_HOTPLUG) */