mb/google/brya/var/agah: Update FBVDD power-down delay
[coreboot.git] / src / device / pciexp_device.c
blob36143402abd610ad03056cf08ac02b4cb9bbe2a6
1 /* SPDX-License-Identifier: GPL-2.0-only */
3 #include <console/console.h>
4 #include <commonlib/helpers.h>
5 #include <delay.h>
6 #include <device/device.h>
7 #include <device/pci.h>
8 #include <device/pci_ops.h>
9 #include <device/pciexp.h>
11 static unsigned int pciexp_get_ext_cap_offset(const struct device *dev, unsigned int cap,
12 unsigned int offset)
14 unsigned int this_cap_offset = offset;
15 unsigned int next_cap_offset, this_cap, cafe;
16 do {
17 this_cap = pci_read_config32(dev, this_cap_offset);
18 cafe = pci_read_config32(dev, this_cap_offset + 4);
19 if ((this_cap & 0xffff) == cap) {
20 return this_cap_offset;
21 } else if ((cafe & 0xffff) == cap) {
22 return this_cap_offset + 4;
23 } else {
24 next_cap_offset = this_cap >> 20;
25 this_cap_offset = next_cap_offset;
27 } while (next_cap_offset != 0);
29 return 0;
32 unsigned int pciexp_find_next_extended_cap(const struct device *dev, unsigned int cap,
33 unsigned int pos)
35 const unsigned int next_cap_offset = pci_read_config32(dev, pos) >> 20;
36 return pciexp_get_ext_cap_offset(dev, cap, next_cap_offset);
39 unsigned int pciexp_find_extended_cap(const struct device *dev, unsigned int cap)
41 return pciexp_get_ext_cap_offset(dev, cap, PCIE_EXT_CAP_OFFSET);
45 * Re-train a PCIe link
47 #define PCIE_TRAIN_RETRY 10000
48 static int pciexp_retrain_link(struct device *dev, unsigned int cap)
50 unsigned int try;
51 u16 lnk;
54 * Implementation note (page 633) in PCIe Specification 3.0 suggests
55 * polling the Link Training bit in the Link Status register until the
56 * value returned is 0 before setting the Retrain Link bit to 1.
57 * This is meant to avoid a race condition when using the
58 * Retrain Link mechanism.
60 for (try = PCIE_TRAIN_RETRY; try > 0; try--) {
61 lnk = pci_read_config16(dev, cap + PCI_EXP_LNKSTA);
62 if (!(lnk & PCI_EXP_LNKSTA_LT))
63 break;
64 udelay(100);
66 if (try == 0) {
67 printk(BIOS_ERR, "%s: Link Retrain timeout\n", dev_path(dev));
68 return -1;
71 /* Start link retraining */
72 lnk = pci_read_config16(dev, cap + PCI_EXP_LNKCTL);
73 lnk |= PCI_EXP_LNKCTL_RL;
74 pci_write_config16(dev, cap + PCI_EXP_LNKCTL, lnk);
76 /* Wait for training to complete */
77 for (try = PCIE_TRAIN_RETRY; try > 0; try--) {
78 lnk = pci_read_config16(dev, cap + PCI_EXP_LNKSTA);
79 if (!(lnk & PCI_EXP_LNKSTA_LT))
80 return 0;
81 udelay(100);
84 printk(BIOS_ERR, "%s: Link Retrain timeout\n", dev_path(dev));
85 return -1;
89 * Check the Slot Clock Configuration for root port and endpoint
90 * and enable Common Clock Configuration if possible. If CCC is
91 * enabled the link must be retrained.
93 static void pciexp_enable_common_clock(struct device *root, unsigned int root_cap,
94 struct device *endp, unsigned int endp_cap)
96 u16 root_scc, endp_scc, lnkctl;
98 /* Get Slot Clock Configuration for root port */
99 root_scc = pci_read_config16(root, root_cap + PCI_EXP_LNKSTA);
100 root_scc &= PCI_EXP_LNKSTA_SLC;
102 /* Get Slot Clock Configuration for endpoint */
103 endp_scc = pci_read_config16(endp, endp_cap + PCI_EXP_LNKSTA);
104 endp_scc &= PCI_EXP_LNKSTA_SLC;
106 /* Enable Common Clock Configuration and retrain */
107 if (root_scc && endp_scc) {
108 printk(BIOS_INFO, "Enabling Common Clock Configuration\n");
110 /* Set in endpoint */
111 lnkctl = pci_read_config16(endp, endp_cap + PCI_EXP_LNKCTL);
112 lnkctl |= PCI_EXP_LNKCTL_CCC;
113 pci_write_config16(endp, endp_cap + PCI_EXP_LNKCTL, lnkctl);
115 /* Set in root port */
116 lnkctl = pci_read_config16(root, root_cap + PCI_EXP_LNKCTL);
117 lnkctl |= PCI_EXP_LNKCTL_CCC;
118 pci_write_config16(root, root_cap + PCI_EXP_LNKCTL, lnkctl);
120 /* Retrain link if CCC was enabled */
121 pciexp_retrain_link(root, root_cap);
125 static void pciexp_enable_clock_power_pm(struct device *endp, unsigned int endp_cap)
127 /* check if per port clk req is supported in device */
128 u32 endp_ca;
129 u16 lnkctl;
130 endp_ca = pci_read_config32(endp, endp_cap + PCI_EXP_LNKCAP);
131 if ((endp_ca & PCI_EXP_CLK_PM) == 0) {
132 printk(BIOS_INFO, "PCIE CLK PM is not supported by endpoint\n");
133 return;
135 lnkctl = pci_read_config16(endp, endp_cap + PCI_EXP_LNKCTL);
136 lnkctl = lnkctl | PCI_EXP_EN_CLK_PM;
137 pci_write_config16(endp, endp_cap + PCI_EXP_LNKCTL, lnkctl);
140 static bool _pciexp_ltr_supported(struct device *dev, unsigned int cap)
142 return pci_read_config16(dev, cap + PCI_EXP_DEVCAP2) & PCI_EXP_DEVCAP2_LTR;
145 static bool _pciexp_ltr_enabled(struct device *dev, unsigned int cap)
147 return pci_read_config16(dev, cap + PCI_EXP_DEVCTL2) & PCI_EXP_DEV2_LTR;
150 static bool _pciexp_enable_ltr(struct device *parent, unsigned int parent_cap,
151 struct device *dev, unsigned int cap)
153 if (!_pciexp_ltr_supported(dev, cap)) {
154 printk(BIOS_DEBUG, "%s: No LTR support\n", dev_path(dev));
155 return false;
158 if (_pciexp_ltr_enabled(dev, cap))
159 return true;
161 if (parent &&
162 (parent->path.type != DEVICE_PATH_PCI ||
163 !_pciexp_ltr_supported(parent, parent_cap) ||
164 !_pciexp_ltr_enabled(parent, parent_cap)))
165 return false;
167 pci_or_config16(dev, cap + PCI_EXP_DEVCTL2, PCI_EXP_DEV2_LTR);
168 printk(BIOS_INFO, "%s: Enabled LTR\n", dev_path(dev));
169 return true;
172 static void pciexp_enable_ltr(struct device *dev)
174 const unsigned int cap = pci_find_capability(dev, PCI_CAP_ID_PCIE);
175 if (!cap)
176 return;
179 * If we have get_ltr_max_latencies(), treat `dev` as the root.
180 * If not, let _pciexp_enable_ltr() query the parent's state.
182 struct device *parent = NULL;
183 unsigned int parent_cap = 0;
184 if (!dev->ops->ops_pci || !dev->ops->ops_pci->get_ltr_max_latencies) {
185 parent = dev->bus->dev;
186 parent_cap = pci_find_capability(dev, PCI_CAP_ID_PCIE);
187 if (!parent_cap)
188 return;
191 (void)_pciexp_enable_ltr(parent, parent_cap, dev, cap);
194 bool pciexp_get_ltr_max_latencies(struct device *dev, u16 *max_snoop, u16 *max_nosnoop)
196 /* Walk the hierarchy up to find get_ltr_max_latencies(). */
197 do {
198 if (dev->ops->ops_pci && dev->ops->ops_pci->get_ltr_max_latencies)
199 break;
200 if (dev->bus->dev == dev || dev->bus->dev->path.type != DEVICE_PATH_PCI)
201 return false;
202 dev = dev->bus->dev;
203 } while (true);
205 dev->ops->ops_pci->get_ltr_max_latencies(max_snoop, max_nosnoop);
206 return true;
209 static void pciexp_configure_ltr(struct device *parent, unsigned int parent_cap,
210 struct device *dev, unsigned int cap)
212 if (!_pciexp_enable_ltr(parent, parent_cap, dev, cap))
213 return;
215 const unsigned int ltr_cap = pciexp_find_extended_cap(dev, PCIE_EXT_CAP_LTR_ID);
216 if (!ltr_cap)
217 return;
219 u16 max_snoop, max_nosnoop;
220 if (!pciexp_get_ltr_max_latencies(dev, &max_snoop, &max_nosnoop))
221 return;
223 pci_write_config16(dev, ltr_cap + PCI_LTR_MAX_SNOOP, max_snoop);
224 pci_write_config16(dev, ltr_cap + PCI_LTR_MAX_NOSNOOP, max_nosnoop);
225 printk(BIOS_INFO, "%s: Programmed LTR max latencies\n", dev_path(dev));
228 static unsigned char pciexp_L1_substate_cal(struct device *dev, unsigned int endp_cap,
229 unsigned int *data)
231 unsigned char mult[4] = {2, 10, 100, 0};
233 unsigned int L1SubStateSupport = *data & 0xf;
234 unsigned int comm_mode_rst_time = (*data >> 8) & 0xff;
235 unsigned int power_on_scale = (*data >> 16) & 0x3;
236 unsigned int power_on_value = (*data >> 19) & 0x1f;
238 unsigned int endp_data = pci_read_config32(dev, endp_cap + 4);
239 unsigned int endp_L1SubStateSupport = endp_data & 0xf;
240 unsigned int endp_comm_mode_restore_time = (endp_data >> 8) & 0xff;
241 unsigned int endp_power_on_scale = (endp_data >> 16) & 0x3;
242 unsigned int endp_power_on_value = (endp_data >> 19) & 0x1f;
244 L1SubStateSupport &= endp_L1SubStateSupport;
246 if (L1SubStateSupport == 0)
247 return 0;
249 if (power_on_value * mult[power_on_scale] <
250 endp_power_on_value * mult[endp_power_on_scale]) {
251 power_on_value = endp_power_on_value;
252 power_on_scale = endp_power_on_scale;
254 if (comm_mode_rst_time < endp_comm_mode_restore_time)
255 comm_mode_rst_time = endp_comm_mode_restore_time;
257 *data = (comm_mode_rst_time << 8) | (power_on_scale << 16)
258 | (power_on_value << 19) | L1SubStateSupport;
260 return 1;
263 static void pciexp_L1_substate_commit(struct device *root, struct device *dev,
264 unsigned int root_cap, unsigned int end_cap)
266 struct device *dev_t;
267 unsigned char L1_ss_ok;
268 unsigned int rp_L1_support = pci_read_config32(root, root_cap + 4);
269 unsigned int L1SubStateSupport;
270 unsigned int comm_mode_rst_time;
271 unsigned int power_on_scale;
272 unsigned int endp_power_on_value;
274 for (dev_t = dev; dev_t; dev_t = dev_t->sibling) {
276 * rp_L1_support is init'd above from root port.
277 * it needs coordination with endpoints to reach in common.
278 * if certain endpoint doesn't support L1 Sub-State, abort
279 * this feature enabling.
281 L1_ss_ok = pciexp_L1_substate_cal(dev_t, end_cap,
282 &rp_L1_support);
283 if (!L1_ss_ok)
284 return;
287 L1SubStateSupport = rp_L1_support & 0xf;
288 comm_mode_rst_time = (rp_L1_support >> 8) & 0xff;
289 power_on_scale = (rp_L1_support >> 16) & 0x3;
290 endp_power_on_value = (rp_L1_support >> 19) & 0x1f;
292 printk(BIOS_INFO, "L1 Sub-State supported from root port %d\n",
293 root->path.pci.devfn >> 3);
294 printk(BIOS_INFO, "L1 Sub-State Support = 0x%x\n", L1SubStateSupport);
295 printk(BIOS_INFO, "CommonModeRestoreTime = 0x%x\n", comm_mode_rst_time);
296 printk(BIOS_INFO, "Power On Value = 0x%x, Power On Scale = 0x%x\n",
297 endp_power_on_value, power_on_scale);
299 pci_update_config32(root, root_cap + 0x08, ~0xff00,
300 (comm_mode_rst_time << 8));
302 pci_update_config32(root, root_cap + 0x0c, 0xffffff04,
303 (endp_power_on_value << 3) | (power_on_scale));
305 /* TODO: 0xa0, 2 are values that work on some chipsets but really
306 * should be determined dynamically by looking at downstream devices.
308 pci_update_config32(root, root_cap + 0x08,
309 ~(ASPM_LTR_L12_THRESHOLD_VALUE_MASK |
310 ASPM_LTR_L12_THRESHOLD_SCALE_MASK),
311 (0xa0 << ASPM_LTR_L12_THRESHOLD_VALUE_OFFSET) |
312 (2 << ASPM_LTR_L12_THRESHOLD_SCALE_OFFSET));
314 pci_update_config32(root, root_cap + 0x08, ~0x1f,
315 L1SubStateSupport);
317 for (dev_t = dev; dev_t; dev_t = dev_t->sibling) {
318 pci_update_config32(dev_t, end_cap + 0x0c, 0xffffff04,
319 (endp_power_on_value << 3) | (power_on_scale));
321 pci_update_config32(dev_t, end_cap + 0x08,
322 ~(ASPM_LTR_L12_THRESHOLD_VALUE_MASK |
323 ASPM_LTR_L12_THRESHOLD_SCALE_MASK),
324 (0xa0 << ASPM_LTR_L12_THRESHOLD_VALUE_OFFSET) |
325 (2 << ASPM_LTR_L12_THRESHOLD_SCALE_OFFSET));
327 pci_update_config32(dev_t, end_cap + 0x08, ~0x1f,
328 L1SubStateSupport);
332 static void pciexp_config_L1_sub_state(struct device *root, struct device *dev)
334 unsigned int root_cap, end_cap;
336 /* Do it for function 0 only */
337 if (dev->path.pci.devfn & 0x7)
338 return;
340 root_cap = pciexp_find_extended_cap(root, PCIE_EXT_CAP_L1SS_ID);
341 if (!root_cap)
342 return;
344 end_cap = pciexp_find_extended_cap(dev, PCIE_EXT_CAP_L1SS_ID);
345 if (!end_cap) {
346 end_cap = pciexp_find_extended_cap(dev, 0xcafe);
347 if (!end_cap)
348 return;
351 pciexp_L1_substate_commit(root, dev, root_cap, end_cap);
355 * Determine the ASPM L0s or L1 exit latency for a link
356 * by checking both root port and endpoint and returning
357 * the highest latency value.
359 static int pciexp_aspm_latency(struct device *root, unsigned int root_cap,
360 struct device *endp, unsigned int endp_cap,
361 enum aspm_type type)
363 int root_lat = 0, endp_lat = 0;
364 u32 root_lnkcap, endp_lnkcap;
366 root_lnkcap = pci_read_config32(root, root_cap + PCI_EXP_LNKCAP);
367 endp_lnkcap = pci_read_config32(endp, endp_cap + PCI_EXP_LNKCAP);
369 /* Make sure the link supports this ASPM type by checking
370 * capability bits 11:10 with aspm_type offset by 1 */
371 if (!(root_lnkcap & (1 << (type + 9))) ||
372 !(endp_lnkcap & (1 << (type + 9))))
373 return -1;
375 /* Find the one with higher latency */
376 switch (type) {
377 case PCIE_ASPM_L0S:
378 root_lat = (root_lnkcap & PCI_EXP_LNKCAP_L0SEL) >> 12;
379 endp_lat = (endp_lnkcap & PCI_EXP_LNKCAP_L0SEL) >> 12;
380 break;
381 case PCIE_ASPM_L1:
382 root_lat = (root_lnkcap & PCI_EXP_LNKCAP_L1EL) >> 15;
383 endp_lat = (endp_lnkcap & PCI_EXP_LNKCAP_L1EL) >> 15;
384 break;
385 default:
386 return -1;
389 return (endp_lat > root_lat) ? endp_lat : root_lat;
393 * Enable ASPM on PCIe root port and endpoint.
395 static void pciexp_enable_aspm(struct device *root, unsigned int root_cap,
396 struct device *endp, unsigned int endp_cap)
398 const char *aspm_type_str[] = { "None", "L0s", "L1", "L0s and L1" };
399 enum aspm_type apmc = PCIE_ASPM_NONE;
400 int exit_latency, ok_latency;
401 u16 lnkctl;
402 u32 devcap;
404 if (endp->disable_pcie_aspm)
405 return;
407 /* Get endpoint device capabilities for acceptable limits */
408 devcap = pci_read_config32(endp, endp_cap + PCI_EXP_DEVCAP);
410 /* Enable L0s if it is within endpoint acceptable limit */
411 ok_latency = (devcap & PCI_EXP_DEVCAP_L0S) >> 6;
412 exit_latency = pciexp_aspm_latency(root, root_cap, endp, endp_cap,
413 PCIE_ASPM_L0S);
414 if (exit_latency >= 0 && exit_latency <= ok_latency)
415 apmc |= PCIE_ASPM_L0S;
417 /* Enable L1 if it is within endpoint acceptable limit */
418 ok_latency = (devcap & PCI_EXP_DEVCAP_L1) >> 9;
419 exit_latency = pciexp_aspm_latency(root, root_cap, endp, endp_cap,
420 PCIE_ASPM_L1);
421 if (exit_latency >= 0 && exit_latency <= ok_latency)
422 apmc |= PCIE_ASPM_L1;
424 if (apmc != PCIE_ASPM_NONE) {
425 /* Set APMC in root port first */
426 lnkctl = pci_read_config16(root, root_cap + PCI_EXP_LNKCTL);
427 lnkctl |= apmc;
428 pci_write_config16(root, root_cap + PCI_EXP_LNKCTL, lnkctl);
430 /* Set APMC in endpoint device next */
431 lnkctl = pci_read_config16(endp, endp_cap + PCI_EXP_LNKCTL);
432 lnkctl |= apmc;
433 pci_write_config16(endp, endp_cap + PCI_EXP_LNKCTL, lnkctl);
436 printk(BIOS_INFO, "ASPM: Enabled %s\n", aspm_type_str[apmc]);
440 * Set max payload size of endpoint in accordance with max payload size of root port.
442 static void pciexp_set_max_payload_size(struct device *root, unsigned int root_cap,
443 struct device *endp, unsigned int endp_cap)
445 unsigned int endp_max_payload, root_max_payload, max_payload;
446 u16 endp_devctl, root_devctl;
447 u32 endp_devcap, root_devcap;
449 /* Get max payload size supported by endpoint */
450 endp_devcap = pci_read_config32(endp, endp_cap + PCI_EXP_DEVCAP);
451 endp_max_payload = endp_devcap & PCI_EXP_DEVCAP_PAYLOAD;
453 /* Get max payload size supported by root port */
454 root_devcap = pci_read_config32(root, root_cap + PCI_EXP_DEVCAP);
455 root_max_payload = root_devcap & PCI_EXP_DEVCAP_PAYLOAD;
457 /* Set max payload to smaller of the reported device capability. */
458 max_payload = MIN(endp_max_payload, root_max_payload);
459 if (max_payload > 5) {
460 /* Values 6 and 7 are reserved in PCIe 3.0 specs. */
461 printk(BIOS_ERR, "PCIe: Max_Payload_Size field restricted from %d to 5\n",
462 max_payload);
463 max_payload = 5;
466 endp_devctl = pci_read_config16(endp, endp_cap + PCI_EXP_DEVCTL);
467 endp_devctl &= ~PCI_EXP_DEVCTL_PAYLOAD;
468 endp_devctl |= max_payload << 5;
469 pci_write_config16(endp, endp_cap + PCI_EXP_DEVCTL, endp_devctl);
471 root_devctl = pci_read_config16(root, root_cap + PCI_EXP_DEVCTL);
472 root_devctl &= ~PCI_EXP_DEVCTL_PAYLOAD;
473 root_devctl |= max_payload << 5;
474 pci_write_config16(root, root_cap + PCI_EXP_DEVCTL, root_devctl);
476 printk(BIOS_INFO, "PCIe: Max_Payload_Size adjusted to %d\n", (1 << (max_payload + 7)));
479 static void pciexp_tune_dev(struct device *dev)
481 struct device *root = dev->bus->dev;
482 unsigned int root_cap, cap;
484 cap = pci_find_capability(dev, PCI_CAP_ID_PCIE);
485 if (!cap)
486 return;
488 root_cap = pci_find_capability(root, PCI_CAP_ID_PCIE);
489 if (!root_cap)
490 return;
492 /* Check for and enable Common Clock */
493 if (CONFIG(PCIEXP_COMMON_CLOCK))
494 pciexp_enable_common_clock(root, root_cap, dev, cap);
496 /* Check if per port CLK req is supported by endpoint*/
497 if (CONFIG(PCIEXP_CLK_PM))
498 pciexp_enable_clock_power_pm(dev, cap);
500 /* Enable L1 Sub-State when both root port and endpoint support */
501 if (CONFIG(PCIEXP_L1_SUB_STATE))
502 pciexp_config_L1_sub_state(root, dev);
504 /* Check for and enable ASPM */
505 if (CONFIG(PCIEXP_ASPM))
506 pciexp_enable_aspm(root, root_cap, dev, cap);
508 /* Adjust Max_Payload_Size of link ends. */
509 pciexp_set_max_payload_size(root, root_cap, dev, cap);
511 pciexp_configure_ltr(root, root_cap, dev, cap);
514 void pciexp_scan_bus(struct bus *bus, unsigned int min_devfn,
515 unsigned int max_devfn)
517 struct device *child;
519 pciexp_enable_ltr(bus->dev);
521 pci_scan_bus(bus, min_devfn, max_devfn);
523 for (child = bus->children; child; child = child->sibling) {
524 if (child->path.type != DEVICE_PATH_PCI)
525 continue;
526 if ((child->path.pci.devfn < min_devfn) ||
527 (child->path.pci.devfn > max_devfn)) {
528 continue;
530 pciexp_tune_dev(child);
534 void pciexp_scan_bridge(struct device *dev)
536 do_pci_scan_bridge(dev, pciexp_scan_bus);
539 /** Default device operations for PCI Express bridges */
540 static struct pci_operations pciexp_bus_ops_pci = {
541 .set_subsystem = 0,
544 struct device_operations default_pciexp_ops_bus = {
545 .read_resources = pci_bus_read_resources,
546 .set_resources = pci_dev_set_resources,
547 .enable_resources = pci_bus_enable_resources,
548 .scan_bus = pciexp_scan_bridge,
549 .reset_bus = pci_bus_reset,
550 .ops_pci = &pciexp_bus_ops_pci,
553 static void pciexp_hotplug_dummy_read_resources(struct device *dev)
555 struct resource *resource;
557 /* Add extra memory space */
558 resource = new_resource(dev, 0x10);
559 resource->size = CONFIG_PCIEXP_HOTPLUG_MEM;
560 resource->align = 12;
561 resource->gran = 12;
562 resource->limit = 0xffffffff;
563 resource->flags |= IORESOURCE_MEM;
565 /* Add extra prefetchable memory space */
566 resource = new_resource(dev, 0x14);
567 resource->size = CONFIG_PCIEXP_HOTPLUG_PREFETCH_MEM;
568 resource->align = 12;
569 resource->gran = 12;
570 resource->limit = 0xffffffffffffffff;
571 resource->flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
573 /* Set resource flag requesting allocation above 4G boundary. */
574 if (CONFIG(PCIEXP_HOTPLUG_PREFETCH_MEM_ABOVE_4G))
575 resource->flags |= IORESOURCE_ABOVE_4G;
577 /* Add extra I/O space */
578 resource = new_resource(dev, 0x18);
579 resource->size = CONFIG_PCIEXP_HOTPLUG_IO;
580 resource->align = 12;
581 resource->gran = 12;
582 resource->limit = 0xffff;
583 resource->flags |= IORESOURCE_IO;
586 static struct device_operations pciexp_hotplug_dummy_ops = {
587 .read_resources = pciexp_hotplug_dummy_read_resources,
588 .set_resources = noop_set_resources,
591 void pciexp_hotplug_scan_bridge(struct device *dev)
593 dev->hotplug_buses = CONFIG_PCIEXP_HOTPLUG_BUSES;
595 /* Normal PCIe Scan */
596 pciexp_scan_bridge(dev);
598 /* Add dummy slot to preserve resources, must happen after bus scan */
599 struct device *dummy;
600 struct device_path dummy_path = { .type = DEVICE_PATH_NONE };
601 dummy = alloc_dev(dev->link_list, &dummy_path);
602 dummy->ops = &pciexp_hotplug_dummy_ops;
605 struct device_operations default_pciexp_hotplug_ops_bus = {
606 .read_resources = pci_bus_read_resources,
607 .set_resources = pci_dev_set_resources,
608 .enable_resources = pci_bus_enable_resources,
609 .scan_bus = pciexp_hotplug_scan_bridge,
610 .reset_bus = pci_bus_reset,
611 .ops_pci = &pciexp_bus_ops_pci,