Merge commit '2bee374f9ad3367948d472f4e3489135fcac9e1c'
[unleashed.git] / usr / src / uts / common / io / pciex / pcie.c
blob3aa7006a45c6bc68ea03a7d92c1182d3e95299a0
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2017, Joyent, Inc.
27 #include <sys/sysmacros.h>
28 #include <sys/types.h>
29 #include <sys/kmem.h>
30 #include <sys/modctl.h>
31 #include <sys/ddi.h>
32 #include <sys/sunddi.h>
33 #include <sys/sunndi.h>
34 #include <sys/fm/protocol.h>
35 #include <sys/fm/util.h>
36 #include <sys/promif.h>
37 #include <sys/disp.h>
38 #include <sys/stat.h>
39 #include <sys/file.h>
40 #include <sys/pci_cap.h>
41 #include <sys/pci_impl.h>
42 #include <sys/pcie_impl.h>
43 #include <sys/hotplug/pci/pcie_hp.h>
44 #include <sys/hotplug/pci/pciehpc.h>
45 #include <sys/hotplug/pci/pcishpc.h>
46 #include <sys/hotplug/pci/pcicfg.h>
47 #include <sys/pci_cfgacc.h>
49 /* Local functions prototypes */
50 static void pcie_init_pfd(dev_info_t *);
51 static void pcie_fini_pfd(dev_info_t *);
53 #if defined(__i386) || defined(__amd64)
54 static void pcie_check_io_mem_range(ddi_acc_handle_t, boolean_t *, boolean_t *);
55 #endif /* defined(__i386) || defined(__amd64) */
57 #ifdef DEBUG
58 uint_t pcie_debug_flags = 0;
59 static void pcie_print_bus(pcie_bus_t *bus_p);
60 void pcie_dbg(char *fmt, ...);
61 #endif /* DEBUG */
63 /* Variable to control default PCI-Express config settings */
64 ushort_t pcie_command_default =
65 PCI_COMM_SERR_ENABLE |
66 PCI_COMM_WAIT_CYC_ENAB |
67 PCI_COMM_PARITY_DETECT |
68 PCI_COMM_ME |
69 PCI_COMM_MAE |
70 PCI_COMM_IO;
72 /* xxx_fw are bits that are controlled by FW and should not be modified */
73 ushort_t pcie_command_default_fw =
74 PCI_COMM_SPEC_CYC |
75 PCI_COMM_MEMWR_INVAL |
76 PCI_COMM_PALETTE_SNOOP |
77 PCI_COMM_WAIT_CYC_ENAB |
78 0xF800; /* Reserved Bits */
80 ushort_t pcie_bdg_command_default_fw =
81 PCI_BCNF_BCNTRL_ISA_ENABLE |
82 PCI_BCNF_BCNTRL_VGA_ENABLE |
83 0xF000; /* Reserved Bits */
85 /* PCI-Express Base error defaults */
86 ushort_t pcie_base_err_default =
87 PCIE_DEVCTL_CE_REPORTING_EN |
88 PCIE_DEVCTL_NFE_REPORTING_EN |
89 PCIE_DEVCTL_FE_REPORTING_EN |
90 PCIE_DEVCTL_UR_REPORTING_EN;
92 /* PCI-Express Device Control Register */
93 uint16_t pcie_devctl_default = PCIE_DEVCTL_RO_EN |
94 PCIE_DEVCTL_MAX_READ_REQ_512;
96 /* PCI-Express AER Root Control Register */
97 #define PCIE_ROOT_SYS_ERR (PCIE_ROOTCTL_SYS_ERR_ON_CE_EN | \
98 PCIE_ROOTCTL_SYS_ERR_ON_NFE_EN | \
99 PCIE_ROOTCTL_SYS_ERR_ON_FE_EN)
101 ushort_t pcie_root_ctrl_default =
102 PCIE_ROOTCTL_SYS_ERR_ON_CE_EN |
103 PCIE_ROOTCTL_SYS_ERR_ON_NFE_EN |
104 PCIE_ROOTCTL_SYS_ERR_ON_FE_EN;
106 /* PCI-Express Root Error Command Register */
107 ushort_t pcie_root_error_cmd_default =
108 PCIE_AER_RE_CMD_CE_REP_EN |
109 PCIE_AER_RE_CMD_NFE_REP_EN |
110 PCIE_AER_RE_CMD_FE_REP_EN;
112 /* ECRC settings in the PCIe AER Control Register */
113 uint32_t pcie_ecrc_value =
114 PCIE_AER_CTL_ECRC_GEN_ENA |
115 PCIE_AER_CTL_ECRC_CHECK_ENA;
118 * If a particular platform wants to disable certain errors such as UR/MA,
119 * instead of using #defines have the platform's PCIe Root Complex driver set
120 * these masks using the pcie_get_XXX_mask and pcie_set_XXX_mask functions. For
121 * x86 the closest thing to a PCIe root complex driver is NPE. For SPARC the
122 * closest PCIe root complex driver is PX.
124 * pcie_serr_disable_flag : disable SERR only (in RCR and command reg) x86
125 * systems may want to disable SERR in general. For root ports, enabling SERR
126 * causes NMIs which are not handled and results in a watchdog timeout error.
128 uint32_t pcie_aer_uce_mask = 0; /* AER UE Mask */
129 uint32_t pcie_aer_ce_mask = 0; /* AER CE Mask */
130 uint32_t pcie_aer_suce_mask = 0; /* AER Secondary UE Mask */
131 uint32_t pcie_serr_disable_flag = 0; /* Disable SERR */
133 /* Default severities needed for eversholt. Error handling doesn't care */
134 uint32_t pcie_aer_uce_severity = PCIE_AER_UCE_MTLP | PCIE_AER_UCE_RO | \
135 PCIE_AER_UCE_FCP | PCIE_AER_UCE_SD | PCIE_AER_UCE_DLP | \
136 PCIE_AER_UCE_TRAINING;
137 uint32_t pcie_aer_suce_severity = PCIE_AER_SUCE_SERR_ASSERT | \
138 PCIE_AER_SUCE_UC_ADDR_ERR | PCIE_AER_SUCE_UC_ATTR_ERR | \
139 PCIE_AER_SUCE_USC_MSG_DATA_ERR;
141 int pcie_max_mps = PCIE_DEVCTL_MAX_PAYLOAD_4096 >> 5;
142 int pcie_disable_ari = 0;
144 static void pcie_scan_mps(dev_info_t *rc_dip, dev_info_t *dip,
145 int *max_supported);
146 static int pcie_get_max_supported(dev_info_t *dip, void *arg);
147 static int pcie_map_phys(dev_info_t *dip, pci_regspec_t *phys_spec,
148 caddr_t *addrp, ddi_acc_handle_t *handlep);
149 static void pcie_unmap_phys(ddi_acc_handle_t *handlep, pci_regspec_t *ph);
151 dev_info_t *pcie_get_rc_dip(dev_info_t *dip);
154 * modload support
157 static struct modlmisc modlmisc = {
158 &mod_miscops, /* Type of module */
159 "PCI Express Framework Module"
162 static struct modlinkage modlinkage = {
163 MODREV_1,
164 (void *)&modlmisc,
165 NULL
169 * Global Variables needed for a non-atomic version of ddi_fm_ereport_post.
170 * Currently used to send the pci.fabric ereports whose payload depends on the
171 * type of PCI device it is being sent for.
173 char *pcie_nv_buf;
174 nv_alloc_t *pcie_nvap;
175 nvlist_t *pcie_nvl;
178 _init(void)
180 int rval;
182 pcie_nv_buf = kmem_alloc(ERPT_DATA_SZ, KM_SLEEP);
183 pcie_nvap = fm_nva_xcreate(pcie_nv_buf, ERPT_DATA_SZ);
184 pcie_nvl = fm_nvlist_create(pcie_nvap);
186 if ((rval = mod_install(&modlinkage)) != 0) {
187 fm_nvlist_destroy(pcie_nvl, FM_NVA_RETAIN);
188 fm_nva_xdestroy(pcie_nvap);
189 kmem_free(pcie_nv_buf, ERPT_DATA_SZ);
191 return (rval);
195 _fini()
197 int rval;
199 if ((rval = mod_remove(&modlinkage)) == 0) {
200 fm_nvlist_destroy(pcie_nvl, FM_NVA_RETAIN);
201 fm_nva_xdestroy(pcie_nvap);
202 kmem_free(pcie_nv_buf, ERPT_DATA_SZ);
204 return (rval);
208 _info(struct modinfo *modinfop)
210 return (mod_info(&modlinkage, modinfop));
213 /* ARGSUSED */
215 pcie_init(dev_info_t *dip, caddr_t arg)
217 int ret = DDI_SUCCESS;
220 * Create a "devctl" minor node to support DEVCTL_DEVICE_*
221 * and DEVCTL_BUS_* ioctls to this bus.
223 if ((ret = ddi_create_minor_node(dip, "devctl", S_IFCHR,
224 PCI_MINOR_NUM(ddi_get_instance(dip), PCI_DEVCTL_MINOR),
225 DDI_NT_NEXUS, 0)) != DDI_SUCCESS) {
226 PCIE_DBG("Failed to create devctl minor node for %s%d\n",
227 ddi_driver_name(dip), ddi_get_instance(dip));
229 return (ret);
232 if ((ret = pcie_hp_init(dip, arg)) != DDI_SUCCESS) {
234 * On some x86 platforms, we observed unexpected hotplug
235 * initialization failures in recent years. The known cause
236 * is a hardware issue: while the problem PCI bridges have
237 * the Hotplug Capable registers set, the machine actually
238 * does not implement the expected ACPI object.
240 * We don't want to stop PCI driver attach and system boot
241 * just because of this hotplug initialization failure.
242 * Continue with a debug message printed.
244 PCIE_DBG("%s%d: Failed setting hotplug framework\n",
245 ddi_driver_name(dip), ddi_get_instance(dip));
249 return (DDI_SUCCESS);
252 /* ARGSUSED */
254 pcie_uninit(dev_info_t *dip)
256 int ret = DDI_SUCCESS;
258 if (pcie_ari_is_enabled(dip) == PCIE_ARI_FORW_ENABLED)
259 (void) pcie_ari_disable(dip);
261 if ((ret = pcie_hp_uninit(dip)) != DDI_SUCCESS) {
262 PCIE_DBG("Failed to uninitialize hotplug for %s%d\n",
263 ddi_driver_name(dip), ddi_get_instance(dip));
265 return (ret);
268 ddi_remove_minor_node(dip, "devctl");
270 return (ret);
274 * PCIe module interface for enabling hotplug interrupt.
276 * It should be called after pcie_init() is done and bus driver's
277 * interrupt handlers have being attached.
280 pcie_hpintr_enable(dev_info_t *dip)
282 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
283 pcie_hp_ctrl_t *ctrl_p = PCIE_GET_HP_CTRL(dip);
285 if (PCIE_IS_PCIE_HOTPLUG_ENABLED(bus_p)) {
286 (void) (ctrl_p->hc_ops.enable_hpc_intr)(ctrl_p);
287 } else if (PCIE_IS_PCI_HOTPLUG_ENABLED(bus_p)) {
288 (void) pcishpc_enable_irqs(ctrl_p);
290 return (DDI_SUCCESS);
294 * PCIe module interface for disabling hotplug interrupt.
296 * It should be called before pcie_uninit() is called and bus driver's
297 * interrupt handlers is dettached.
300 pcie_hpintr_disable(dev_info_t *dip)
302 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
303 pcie_hp_ctrl_t *ctrl_p = PCIE_GET_HP_CTRL(dip);
305 if (PCIE_IS_PCIE_HOTPLUG_ENABLED(bus_p)) {
306 (void) (ctrl_p->hc_ops.disable_hpc_intr)(ctrl_p);
307 } else if (PCIE_IS_PCI_HOTPLUG_ENABLED(bus_p)) {
308 (void) pcishpc_disable_irqs(ctrl_p);
310 return (DDI_SUCCESS);
313 /* ARGSUSED */
315 pcie_intr(dev_info_t *dip)
317 return (pcie_hp_intr(dip));
320 /* ARGSUSED */
322 pcie_open(dev_info_t *dip, dev_t *devp, int flags, int otyp, cred_t *credp)
324 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
327 * Make sure the open is for the right file type.
329 if (otyp != OTYP_CHR)
330 return (EINVAL);
333 * Handle the open by tracking the device state.
335 if ((bus_p->bus_soft_state == PCI_SOFT_STATE_OPEN_EXCL) ||
336 ((flags & FEXCL) &&
337 (bus_p->bus_soft_state != PCI_SOFT_STATE_CLOSED))) {
338 return (EBUSY);
341 if (flags & FEXCL)
342 bus_p->bus_soft_state = PCI_SOFT_STATE_OPEN_EXCL;
343 else
344 bus_p->bus_soft_state = PCI_SOFT_STATE_OPEN;
346 return (0);
349 /* ARGSUSED */
351 pcie_close(dev_info_t *dip, dev_t dev, int flags, int otyp, cred_t *credp)
353 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
355 if (otyp != OTYP_CHR)
356 return (EINVAL);
358 bus_p->bus_soft_state = PCI_SOFT_STATE_CLOSED;
360 return (0);
363 /* ARGSUSED */
365 pcie_ioctl(dev_info_t *dip, dev_t dev, int cmd, intptr_t arg, int mode,
366 cred_t *credp, int *rvalp)
368 struct devctl_iocdata *dcp;
369 uint_t bus_state;
370 int rv = DDI_SUCCESS;
373 * We can use the generic implementation for devctl ioctl
375 switch (cmd) {
376 case DEVCTL_DEVICE_GETSTATE:
377 case DEVCTL_DEVICE_ONLINE:
378 case DEVCTL_DEVICE_OFFLINE:
379 case DEVCTL_BUS_GETSTATE:
380 return (ndi_devctl_ioctl(dip, cmd, arg, mode, 0));
381 default:
382 break;
386 * read devctl ioctl data
388 if (ndi_dc_allochdl((void *)arg, &dcp) != NDI_SUCCESS)
389 return (EFAULT);
391 switch (cmd) {
392 case DEVCTL_BUS_QUIESCE:
393 if (ndi_get_bus_state(dip, &bus_state) == NDI_SUCCESS)
394 if (bus_state == BUS_QUIESCED)
395 break;
396 (void) ndi_set_bus_state(dip, BUS_QUIESCED);
397 break;
398 case DEVCTL_BUS_UNQUIESCE:
399 if (ndi_get_bus_state(dip, &bus_state) == NDI_SUCCESS)
400 if (bus_state == BUS_ACTIVE)
401 break;
402 (void) ndi_set_bus_state(dip, BUS_ACTIVE);
403 break;
404 case DEVCTL_BUS_RESET:
405 case DEVCTL_BUS_RESETALL:
406 case DEVCTL_DEVICE_RESET:
407 rv = ENOTSUP;
408 break;
409 default:
410 rv = ENOTTY;
413 ndi_dc_freehdl(dcp);
414 return (rv);
417 /* ARGSUSED */
419 pcie_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
420 int flags, char *name, caddr_t valuep, int *lengthp)
422 if (dev == DDI_DEV_T_ANY)
423 goto skip;
425 if (PCIE_IS_HOTPLUG_CAPABLE(dip) &&
426 strcmp(name, "pci-occupant") == 0) {
427 int pci_dev = PCI_MINOR_NUM_TO_PCI_DEVNUM(getminor(dev));
429 pcie_hp_create_occupant_props(dip, dev, pci_dev);
432 skip:
433 return (ddi_prop_op(dev, dip, prop_op, flags, name, valuep, lengthp));
437 pcie_init_cfghdl(dev_info_t *cdip)
439 pcie_bus_t *bus_p;
440 ddi_acc_handle_t eh = NULL;
442 bus_p = PCIE_DIP2BUS(cdip);
443 if (bus_p == NULL)
444 return (DDI_FAILURE);
446 /* Create an config access special to error handling */
447 if (pci_config_setup(cdip, &eh) != DDI_SUCCESS) {
448 cmn_err(CE_WARN, "Cannot setup config access"
449 " for BDF 0x%x\n", bus_p->bus_bdf);
450 return (DDI_FAILURE);
453 bus_p->bus_cfg_hdl = eh;
454 return (DDI_SUCCESS);
457 void
458 pcie_fini_cfghdl(dev_info_t *cdip)
460 pcie_bus_t *bus_p = PCIE_DIP2BUS(cdip);
462 pci_config_teardown(&bus_p->bus_cfg_hdl);
465 void
466 pcie_determine_serial(dev_info_t *dip)
468 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
469 ddi_acc_handle_t h;
470 uint16_t cap;
471 uchar_t serial[8];
472 uint32_t low, high;
474 if (!PCIE_IS_PCIE(bus_p))
475 return;
477 h = bus_p->bus_cfg_hdl;
479 if ((PCI_CAP_LOCATE(h, PCI_CAP_XCFG_SPC(PCIE_EXT_CAP_ID_SER), &cap)) ==
480 DDI_FAILURE)
481 return;
483 high = PCI_XCAP_GET32(h, 0, cap, PCIE_SER_SID_UPPER_DW);
484 low = PCI_XCAP_GET32(h, 0, cap, PCIE_SER_SID_LOWER_DW);
487 * Here, we're trying to figure out if we had an invalid PCIe read. From
488 * looking at the contents of the value, it can be hard to tell the
489 * difference between a value that has all 1s correctly versus if we had
490 * an error. In this case, we only assume it's invalid if both register
491 * reads are invalid. We also only use 32-bit reads as we're not sure if
492 * all devices will support these as 64-bit reads, while we know that
493 * they'll support these as 32-bit reads.
495 if (high == PCI_EINVAL32 && low == PCI_EINVAL32)
496 return;
498 serial[0] = low & 0xff;
499 serial[1] = (low >> 8) & 0xff;
500 serial[2] = (low >> 16) & 0xff;
501 serial[3] = (low >> 24) & 0xff;
502 serial[4] = high & 0xff;
503 serial[5] = (high >> 8) & 0xff;
504 serial[6] = (high >> 16) & 0xff;
505 serial[7] = (high >> 24) & 0xff;
507 (void) ndi_prop_update_byte_array(DDI_DEV_T_NONE, dip, "pcie-serial",
508 serial, sizeof (serial));
512 * PCI-Express child device initialization.
513 * This function enables generic pci-express interrupts and error
514 * handling.
516 * @param pdip root dip (root nexus's dip)
517 * @param cdip child's dip (device's dip)
518 * @return DDI_SUCCESS or DDI_FAILURE
520 /* ARGSUSED */
522 pcie_initchild(dev_info_t *cdip)
524 uint16_t tmp16, reg16;
525 pcie_bus_t *bus_p;
526 uint32_t devid, venid;
528 bus_p = PCIE_DIP2BUS(cdip);
529 if (bus_p == NULL) {
530 PCIE_DBG("%s: BUS not found.\n",
531 ddi_driver_name(cdip));
533 return (DDI_FAILURE);
536 if (pcie_init_cfghdl(cdip) != DDI_SUCCESS)
537 return (DDI_FAILURE);
540 * Update pcie_bus_t with real Vendor Id Device Id.
542 * For assigned devices in IOV environment, the OBP will return
543 * faked device id/vendor id on configration read and for both
544 * properties in root domain. translate_devid() function will
545 * update the properties with real device-id/vendor-id on such
546 * platforms, so that we can utilize the properties here to get
547 * real device-id/vendor-id and overwrite the faked ids.
549 * For unassigned devices or devices in non-IOV environment, the
550 * operation below won't make a difference.
552 * The IOV implementation only supports assignment of PCIE
553 * endpoint devices. Devices under pci-pci bridges don't need
554 * operation like this.
556 devid = ddi_prop_get_int(DDI_DEV_T_ANY, cdip, DDI_PROP_DONTPASS,
557 "device-id", -1);
558 venid = ddi_prop_get_int(DDI_DEV_T_ANY, cdip, DDI_PROP_DONTPASS,
559 "vendor-id", -1);
560 bus_p->bus_dev_ven_id = (devid << 16) | (venid & 0xffff);
562 /* Clear the device's status register */
563 reg16 = PCIE_GET(16, bus_p, PCI_CONF_STAT);
564 PCIE_PUT(16, bus_p, PCI_CONF_STAT, reg16);
566 /* Setup the device's command register */
567 reg16 = PCIE_GET(16, bus_p, PCI_CONF_COMM);
568 tmp16 = (reg16 & pcie_command_default_fw) | pcie_command_default;
570 #if defined(__i386) || defined(__amd64)
571 boolean_t empty_io_range = B_FALSE;
572 boolean_t empty_mem_range = B_FALSE;
574 * Check for empty IO and Mem ranges on bridges. If so disable IO/Mem
575 * access as it can cause a hang if enabled.
577 pcie_check_io_mem_range(bus_p->bus_cfg_hdl, &empty_io_range,
578 &empty_mem_range);
579 if ((empty_io_range == B_TRUE) &&
580 (pcie_command_default & PCI_COMM_IO)) {
581 tmp16 &= ~PCI_COMM_IO;
582 PCIE_DBG("No I/O range found for %s, bdf 0x%x\n",
583 ddi_driver_name(cdip), bus_p->bus_bdf);
585 if ((empty_mem_range == B_TRUE) &&
586 (pcie_command_default & PCI_COMM_MAE)) {
587 tmp16 &= ~PCI_COMM_MAE;
588 PCIE_DBG("No Mem range found for %s, bdf 0x%x\n",
589 ddi_driver_name(cdip), bus_p->bus_bdf);
591 #endif /* defined(__i386) || defined(__amd64) */
593 if (pcie_serr_disable_flag && PCIE_IS_PCIE(bus_p))
594 tmp16 &= ~PCI_COMM_SERR_ENABLE;
596 PCIE_PUT(16, bus_p, PCI_CONF_COMM, tmp16);
597 PCIE_DBG_CFG(cdip, bus_p, "COMMAND", 16, PCI_CONF_COMM, reg16);
600 * If the device has a bus control register then program it
601 * based on the settings in the command register.
603 if (PCIE_IS_BDG(bus_p)) {
604 /* Clear the device's secondary status register */
605 reg16 = PCIE_GET(16, bus_p, PCI_BCNF_SEC_STATUS);
606 PCIE_PUT(16, bus_p, PCI_BCNF_SEC_STATUS, reg16);
608 /* Setup the device's secondary command register */
609 reg16 = PCIE_GET(16, bus_p, PCI_BCNF_BCNTRL);
610 tmp16 = (reg16 & pcie_bdg_command_default_fw);
612 tmp16 |= PCI_BCNF_BCNTRL_SERR_ENABLE;
614 * Workaround for this Nvidia bridge. Don't enable the SERR
615 * enable bit in the bridge control register as it could lead to
616 * bogus NMIs.
618 if (bus_p->bus_dev_ven_id == 0x037010DE)
619 tmp16 &= ~PCI_BCNF_BCNTRL_SERR_ENABLE;
621 if (pcie_command_default & PCI_COMM_PARITY_DETECT)
622 tmp16 |= PCI_BCNF_BCNTRL_PARITY_ENABLE;
625 * Enable Master Abort Mode only if URs have not been masked.
626 * For PCI and PCIe-PCI bridges, enabling this bit causes a
627 * Master Aborts/UR to be forwarded as a UR/TA or SERR. If this
628 * bit is masked, posted requests are dropped and non-posted
629 * requests are returned with -1.
631 if (pcie_aer_uce_mask & PCIE_AER_UCE_UR)
632 tmp16 &= ~PCI_BCNF_BCNTRL_MAST_AB_MODE;
633 else
634 tmp16 |= PCI_BCNF_BCNTRL_MAST_AB_MODE;
635 PCIE_PUT(16, bus_p, PCI_BCNF_BCNTRL, tmp16);
636 PCIE_DBG_CFG(cdip, bus_p, "SEC CMD", 16, PCI_BCNF_BCNTRL,
637 reg16);
640 if (PCIE_IS_PCIE(bus_p)) {
641 /* Setup PCIe device control register */
642 reg16 = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL);
643 /* note: MPS/MRRS are initialized in pcie_initchild_mps() */
644 tmp16 = (reg16 & (PCIE_DEVCTL_MAX_READ_REQ_MASK |
645 PCIE_DEVCTL_MAX_PAYLOAD_MASK)) |
646 (pcie_devctl_default & ~(PCIE_DEVCTL_MAX_READ_REQ_MASK |
647 PCIE_DEVCTL_MAX_PAYLOAD_MASK));
648 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL, tmp16);
649 PCIE_DBG_CAP(cdip, bus_p, "DEVCTL", 16, PCIE_DEVCTL, reg16);
651 /* Enable PCIe errors */
652 pcie_enable_errors(cdip);
654 pcie_determine_serial(cdip);
657 bus_p->bus_ari = B_FALSE;
658 if ((pcie_ari_is_enabled(ddi_get_parent(cdip))
659 == PCIE_ARI_FORW_ENABLED) && (pcie_ari_device(cdip)
660 == PCIE_ARI_DEVICE)) {
661 bus_p->bus_ari = B_TRUE;
664 if (pcie_initchild_mps(cdip) == DDI_FAILURE) {
665 pcie_fini_cfghdl(cdip);
666 return (DDI_FAILURE);
669 return (DDI_SUCCESS);
672 static void
673 pcie_init_pfd(dev_info_t *dip)
675 pf_data_t *pfd_p = PCIE_ZALLOC(pf_data_t);
676 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
678 PCIE_DIP2PFD(dip) = pfd_p;
680 pfd_p->pe_bus_p = bus_p;
681 pfd_p->pe_severity_flags = 0;
682 pfd_p->pe_orig_severity_flags = 0;
683 pfd_p->pe_lock = B_FALSE;
684 pfd_p->pe_valid = B_FALSE;
686 /* Allocate the root fault struct for both RC and RP */
687 if (PCIE_IS_ROOT(bus_p)) {
688 PCIE_ROOT_FAULT(pfd_p) = PCIE_ZALLOC(pf_root_fault_t);
689 PCIE_ROOT_FAULT(pfd_p)->scan_bdf = PCIE_INVALID_BDF;
690 PCIE_ROOT_EH_SRC(pfd_p) = PCIE_ZALLOC(pf_root_eh_src_t);
693 PCI_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pci_err_regs_t);
694 PFD_AFFECTED_DEV(pfd_p) = PCIE_ZALLOC(pf_affected_dev_t);
695 PFD_AFFECTED_DEV(pfd_p)->pe_affected_bdf = PCIE_INVALID_BDF;
697 if (PCIE_IS_BDG(bus_p))
698 PCI_BDG_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pci_bdg_err_regs_t);
700 if (PCIE_IS_PCIE(bus_p)) {
701 PCIE_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_err_regs_t);
703 if (PCIE_IS_RP(bus_p))
704 PCIE_RP_REG(pfd_p) =
705 PCIE_ZALLOC(pf_pcie_rp_err_regs_t);
707 PCIE_ADV_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_adv_err_regs_t);
708 PCIE_ADV_REG(pfd_p)->pcie_ue_tgt_bdf = PCIE_INVALID_BDF;
710 if (PCIE_IS_RP(bus_p)) {
711 PCIE_ADV_RP_REG(pfd_p) =
712 PCIE_ZALLOC(pf_pcie_adv_rp_err_regs_t);
713 PCIE_ADV_RP_REG(pfd_p)->pcie_rp_ce_src_id =
714 PCIE_INVALID_BDF;
715 PCIE_ADV_RP_REG(pfd_p)->pcie_rp_ue_src_id =
716 PCIE_INVALID_BDF;
717 } else if (PCIE_IS_PCIE_BDG(bus_p)) {
718 PCIE_ADV_BDG_REG(pfd_p) =
719 PCIE_ZALLOC(pf_pcie_adv_bdg_err_regs_t);
720 PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_tgt_bdf =
721 PCIE_INVALID_BDF;
724 if (PCIE_IS_PCIE_BDG(bus_p) && PCIE_IS_PCIX(bus_p)) {
725 PCIX_BDG_ERR_REG(pfd_p) =
726 PCIE_ZALLOC(pf_pcix_bdg_err_regs_t);
728 if (PCIX_ECC_VERSION_CHECK(bus_p)) {
729 PCIX_BDG_ECC_REG(pfd_p, 0) =
730 PCIE_ZALLOC(pf_pcix_ecc_regs_t);
731 PCIX_BDG_ECC_REG(pfd_p, 1) =
732 PCIE_ZALLOC(pf_pcix_ecc_regs_t);
735 } else if (PCIE_IS_PCIX(bus_p)) {
736 if (PCIE_IS_BDG(bus_p)) {
737 PCIX_BDG_ERR_REG(pfd_p) =
738 PCIE_ZALLOC(pf_pcix_bdg_err_regs_t);
740 if (PCIX_ECC_VERSION_CHECK(bus_p)) {
741 PCIX_BDG_ECC_REG(pfd_p, 0) =
742 PCIE_ZALLOC(pf_pcix_ecc_regs_t);
743 PCIX_BDG_ECC_REG(pfd_p, 1) =
744 PCIE_ZALLOC(pf_pcix_ecc_regs_t);
746 } else {
747 PCIX_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pcix_err_regs_t);
749 if (PCIX_ECC_VERSION_CHECK(bus_p))
750 PCIX_ECC_REG(pfd_p) =
751 PCIE_ZALLOC(pf_pcix_ecc_regs_t);
756 static void
757 pcie_fini_pfd(dev_info_t *dip)
759 pf_data_t *pfd_p = PCIE_DIP2PFD(dip);
760 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
762 if (PCIE_IS_PCIE(bus_p)) {
763 if (PCIE_IS_PCIE_BDG(bus_p) && PCIE_IS_PCIX(bus_p)) {
764 if (PCIX_ECC_VERSION_CHECK(bus_p)) {
765 kmem_free(PCIX_BDG_ECC_REG(pfd_p, 0),
766 sizeof (pf_pcix_ecc_regs_t));
767 kmem_free(PCIX_BDG_ECC_REG(pfd_p, 1),
768 sizeof (pf_pcix_ecc_regs_t));
771 kmem_free(PCIX_BDG_ERR_REG(pfd_p),
772 sizeof (pf_pcix_bdg_err_regs_t));
775 if (PCIE_IS_RP(bus_p))
776 kmem_free(PCIE_ADV_RP_REG(pfd_p),
777 sizeof (pf_pcie_adv_rp_err_regs_t));
778 else if (PCIE_IS_PCIE_BDG(bus_p))
779 kmem_free(PCIE_ADV_BDG_REG(pfd_p),
780 sizeof (pf_pcie_adv_bdg_err_regs_t));
782 kmem_free(PCIE_ADV_REG(pfd_p),
783 sizeof (pf_pcie_adv_err_regs_t));
785 if (PCIE_IS_RP(bus_p))
786 kmem_free(PCIE_RP_REG(pfd_p),
787 sizeof (pf_pcie_rp_err_regs_t));
789 kmem_free(PCIE_ERR_REG(pfd_p), sizeof (pf_pcie_err_regs_t));
790 } else if (PCIE_IS_PCIX(bus_p)) {
791 if (PCIE_IS_BDG(bus_p)) {
792 if (PCIX_ECC_VERSION_CHECK(bus_p)) {
793 kmem_free(PCIX_BDG_ECC_REG(pfd_p, 0),
794 sizeof (pf_pcix_ecc_regs_t));
795 kmem_free(PCIX_BDG_ECC_REG(pfd_p, 1),
796 sizeof (pf_pcix_ecc_regs_t));
799 kmem_free(PCIX_BDG_ERR_REG(pfd_p),
800 sizeof (pf_pcix_bdg_err_regs_t));
801 } else {
802 if (PCIX_ECC_VERSION_CHECK(bus_p))
803 kmem_free(PCIX_ECC_REG(pfd_p),
804 sizeof (pf_pcix_ecc_regs_t));
806 kmem_free(PCIX_ERR_REG(pfd_p),
807 sizeof (pf_pcix_err_regs_t));
811 if (PCIE_IS_BDG(bus_p))
812 kmem_free(PCI_BDG_ERR_REG(pfd_p),
813 sizeof (pf_pci_bdg_err_regs_t));
815 kmem_free(PFD_AFFECTED_DEV(pfd_p), sizeof (pf_affected_dev_t));
816 kmem_free(PCI_ERR_REG(pfd_p), sizeof (pf_pci_err_regs_t));
818 if (PCIE_IS_ROOT(bus_p)) {
819 kmem_free(PCIE_ROOT_FAULT(pfd_p), sizeof (pf_root_fault_t));
820 kmem_free(PCIE_ROOT_EH_SRC(pfd_p), sizeof (pf_root_eh_src_t));
823 kmem_free(PCIE_DIP2PFD(dip), sizeof (pf_data_t));
825 PCIE_DIP2PFD(dip) = NULL;
830 * Special functions to allocate pf_data_t's for PCIe root complexes.
831 * Note: Root Complex not Root Port
833 void
834 pcie_rc_init_pfd(dev_info_t *dip, pf_data_t *pfd_p)
836 pfd_p->pe_bus_p = PCIE_DIP2DOWNBUS(dip);
837 pfd_p->pe_severity_flags = 0;
838 pfd_p->pe_orig_severity_flags = 0;
839 pfd_p->pe_lock = B_FALSE;
840 pfd_p->pe_valid = B_FALSE;
842 PCIE_ROOT_FAULT(pfd_p) = PCIE_ZALLOC(pf_root_fault_t);
843 PCIE_ROOT_FAULT(pfd_p)->scan_bdf = PCIE_INVALID_BDF;
844 PCIE_ROOT_EH_SRC(pfd_p) = PCIE_ZALLOC(pf_root_eh_src_t);
845 PCI_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pci_err_regs_t);
846 PFD_AFFECTED_DEV(pfd_p) = PCIE_ZALLOC(pf_affected_dev_t);
847 PFD_AFFECTED_DEV(pfd_p)->pe_affected_bdf = PCIE_INVALID_BDF;
848 PCI_BDG_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pci_bdg_err_regs_t);
849 PCIE_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_err_regs_t);
850 PCIE_RP_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_rp_err_regs_t);
851 PCIE_ADV_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_adv_err_regs_t);
852 PCIE_ADV_RP_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_adv_rp_err_regs_t);
853 PCIE_ADV_RP_REG(pfd_p)->pcie_rp_ce_src_id = PCIE_INVALID_BDF;
854 PCIE_ADV_RP_REG(pfd_p)->pcie_rp_ue_src_id = PCIE_INVALID_BDF;
856 PCIE_ADV_REG(pfd_p)->pcie_ue_sev = pcie_aer_uce_severity;
859 void
860 pcie_rc_fini_pfd(pf_data_t *pfd_p)
862 kmem_free(PCIE_ADV_RP_REG(pfd_p), sizeof (pf_pcie_adv_rp_err_regs_t));
863 kmem_free(PCIE_ADV_REG(pfd_p), sizeof (pf_pcie_adv_err_regs_t));
864 kmem_free(PCIE_RP_REG(pfd_p), sizeof (pf_pcie_rp_err_regs_t));
865 kmem_free(PCIE_ERR_REG(pfd_p), sizeof (pf_pcie_err_regs_t));
866 kmem_free(PCI_BDG_ERR_REG(pfd_p), sizeof (pf_pci_bdg_err_regs_t));
867 kmem_free(PFD_AFFECTED_DEV(pfd_p), sizeof (pf_affected_dev_t));
868 kmem_free(PCI_ERR_REG(pfd_p), sizeof (pf_pci_err_regs_t));
869 kmem_free(PCIE_ROOT_FAULT(pfd_p), sizeof (pf_root_fault_t));
870 kmem_free(PCIE_ROOT_EH_SRC(pfd_p), sizeof (pf_root_eh_src_t));
874 * init pcie_bus_t for root complex
876 * Only a few of the fields in bus_t is valid for root complex.
877 * The fields that are bracketed are initialized in this routine:
879 * dev_info_t * <bus_dip>
880 * dev_info_t * bus_rp_dip
881 * ddi_acc_handle_t bus_cfg_hdl
882 * uint_t <bus_fm_flags>
883 * pcie_req_id_t bus_bdf
884 * pcie_req_id_t bus_rp_bdf
885 * uint32_t bus_dev_ven_id
886 * uint8_t bus_rev_id
887 * uint8_t <bus_hdr_type>
888 * uint16_t <bus_dev_type>
889 * uint8_t bus_bdg_secbus
890 * uint16_t bus_pcie_off
891 * uint16_t <bus_aer_off>
892 * uint16_t bus_pcix_off
893 * uint16_t bus_ecc_ver
894 * pci_bus_range_t bus_bus_range
895 * ppb_ranges_t * bus_addr_ranges
896 * int bus_addr_entries
897 * pci_regspec_t * bus_assigned_addr
898 * int bus_assigned_entries
899 * pf_data_t * bus_pfd
900 * pcie_domain_t * <bus_dom>
901 * int bus_mps
902 * uint64_t bus_cfgacc_base
903 * void * bus_plat_private
905 void
906 pcie_rc_init_bus(dev_info_t *dip)
908 pcie_bus_t *bus_p;
910 bus_p = (pcie_bus_t *)kmem_zalloc(sizeof (pcie_bus_t), KM_SLEEP);
911 bus_p->bus_dip = dip;
912 bus_p->bus_dev_type = PCIE_PCIECAP_DEV_TYPE_RC_PSEUDO;
913 bus_p->bus_hdr_type = PCI_HEADER_ONE;
915 /* Fake that there are AER logs */
916 bus_p->bus_aer_off = (uint16_t)-1;
918 /* Needed only for handle lookup */
919 bus_p->bus_fm_flags |= PF_FM_READY;
921 ndi_set_bus_private(dip, B_FALSE, DEVI_PORT_TYPE_PCI, bus_p);
923 PCIE_BUS2DOM(bus_p) = PCIE_ZALLOC(pcie_domain_t);
926 void
927 pcie_rc_fini_bus(dev_info_t *dip)
929 pcie_bus_t *bus_p = PCIE_DIP2DOWNBUS(dip);
930 ndi_set_bus_private(dip, B_FALSE, 0, NULL);
931 kmem_free(PCIE_BUS2DOM(bus_p), sizeof (pcie_domain_t));
932 kmem_free(bus_p, sizeof (pcie_bus_t));
936 * partially init pcie_bus_t for device (dip,bdf) for accessing pci
937 * config space
939 * This routine is invoked during boot, either after creating a devinfo node
940 * (x86 case) or during px driver attach (sparc case); it is also invoked
941 * in hotplug context after a devinfo node is created.
943 * The fields that are bracketed are initialized if flag PCIE_BUS_INITIAL
944 * is set:
946 * dev_info_t * <bus_dip>
947 * dev_info_t * <bus_rp_dip>
948 * ddi_acc_handle_t bus_cfg_hdl
949 * uint_t bus_fm_flags
950 * pcie_req_id_t <bus_bdf>
951 * pcie_req_id_t <bus_rp_bdf>
952 * uint32_t <bus_dev_ven_id>
953 * uint8_t <bus_rev_id>
954 * uint8_t <bus_hdr_type>
955 * uint16_t <bus_dev_type>
956 * uint8_t <bus_bdg_secbus
957 * uint16_t <bus_pcie_off>
958 * uint16_t <bus_aer_off>
959 * uint16_t <bus_pcix_off>
960 * uint16_t <bus_ecc_ver>
961 * pci_bus_range_t bus_bus_range
962 * ppb_ranges_t * bus_addr_ranges
963 * int bus_addr_entries
964 * pci_regspec_t * bus_assigned_addr
965 * int bus_assigned_entries
966 * pf_data_t * bus_pfd
967 * pcie_domain_t * bus_dom
968 * int bus_mps
969 * uint64_t bus_cfgacc_base
970 * void * bus_plat_private
972 * The fields that are bracketed are initialized if flag PCIE_BUS_FINAL
973 * is set:
975 * dev_info_t * bus_dip
976 * dev_info_t * bus_rp_dip
977 * ddi_acc_handle_t bus_cfg_hdl
978 * uint_t bus_fm_flags
979 * pcie_req_id_t bus_bdf
980 * pcie_req_id_t bus_rp_bdf
981 * uint32_t bus_dev_ven_id
982 * uint8_t bus_rev_id
983 * uint8_t bus_hdr_type
984 * uint16_t bus_dev_type
985 * uint8_t <bus_bdg_secbus>
986 * uint16_t bus_pcie_off
987 * uint16_t bus_aer_off
988 * uint16_t bus_pcix_off
989 * uint16_t bus_ecc_ver
990 * pci_bus_range_t <bus_bus_range>
991 * ppb_ranges_t * <bus_addr_ranges>
992 * int <bus_addr_entries>
993 * pci_regspec_t * <bus_assigned_addr>
994 * int <bus_assigned_entries>
995 * pf_data_t * <bus_pfd>
996 * pcie_domain_t * bus_dom
997 * int bus_mps
998 * uint64_t bus_cfgacc_base
999 * void * <bus_plat_private>
1002 pcie_bus_t *
1003 pcie_init_bus(dev_info_t *dip, pcie_req_id_t bdf, uint8_t flags)
1005 uint16_t status, base, baseptr, num_cap;
1006 uint32_t capid;
1007 int range_size;
1008 pcie_bus_t *bus_p;
1009 dev_info_t *rcdip;
1010 dev_info_t *pdip;
1011 const char *errstr = NULL;
1013 if (!(flags & PCIE_BUS_INITIAL))
1014 goto initial_done;
1016 bus_p = kmem_zalloc(sizeof (pcie_bus_t), KM_SLEEP);
1018 bus_p->bus_dip = dip;
1019 bus_p->bus_bdf = bdf;
1021 rcdip = pcie_get_rc_dip(dip);
1022 ASSERT(rcdip != NULL);
1024 /* Save the Vendor ID, Device ID and revision ID */
1025 bus_p->bus_dev_ven_id = pci_cfgacc_get32(rcdip, bdf, PCI_CONF_VENID);
1026 bus_p->bus_rev_id = pci_cfgacc_get8(rcdip, bdf, PCI_CONF_REVID);
1027 /* Save the Header Type */
1028 bus_p->bus_hdr_type = pci_cfgacc_get8(rcdip, bdf, PCI_CONF_HEADER);
1029 bus_p->bus_hdr_type &= PCI_HEADER_TYPE_M;
1032 * Figure out the device type and all the relavant capability offsets
1034 /* set default value */
1035 bus_p->bus_dev_type = PCIE_PCIECAP_DEV_TYPE_PCI_PSEUDO;
1037 status = pci_cfgacc_get16(rcdip, bdf, PCI_CONF_STAT);
1038 if (status == PCI_CAP_EINVAL16 || !(status & PCI_STAT_CAP))
1039 goto caps_done; /* capability not supported */
1041 /* Relevant conventional capabilities first */
1043 /* Conventional caps: PCI_CAP_ID_PCI_E, PCI_CAP_ID_PCIX */
1044 num_cap = 2;
1046 switch (bus_p->bus_hdr_type) {
1047 case PCI_HEADER_ZERO:
1048 baseptr = PCI_CONF_CAP_PTR;
1049 break;
1050 case PCI_HEADER_PPB:
1051 baseptr = PCI_BCNF_CAP_PTR;
1052 break;
1053 case PCI_HEADER_CARDBUS:
1054 baseptr = PCI_CBUS_CAP_PTR;
1055 break;
1056 default:
1057 cmn_err(CE_WARN, "%s: unexpected pci header type:%x",
1058 __func__, bus_p->bus_hdr_type);
1059 goto caps_done;
1062 base = baseptr;
1063 for (base = pci_cfgacc_get8(rcdip, bdf, base); base && num_cap;
1064 base = pci_cfgacc_get8(rcdip, bdf, base + PCI_CAP_NEXT_PTR)) {
1065 capid = pci_cfgacc_get8(rcdip, bdf, base);
1066 switch (capid) {
1067 case PCI_CAP_ID_PCI_E:
1068 bus_p->bus_pcie_off = base;
1069 bus_p->bus_dev_type = pci_cfgacc_get16(rcdip, bdf,
1070 base + PCIE_PCIECAP) & PCIE_PCIECAP_DEV_TYPE_MASK;
1072 /* Check and save PCIe hotplug capability information */
1073 if ((PCIE_IS_RP(bus_p) || PCIE_IS_SWD(bus_p)) &&
1074 (pci_cfgacc_get16(rcdip, bdf, base + PCIE_PCIECAP)
1075 & PCIE_PCIECAP_SLOT_IMPL) &&
1076 (pci_cfgacc_get32(rcdip, bdf, base + PCIE_SLOTCAP)
1077 & PCIE_SLOTCAP_HP_CAPABLE))
1078 bus_p->bus_hp_sup_modes |= PCIE_NATIVE_HP_MODE;
1080 num_cap--;
1081 break;
1082 case PCI_CAP_ID_PCIX:
1083 bus_p->bus_pcix_off = base;
1084 if (PCIE_IS_BDG(bus_p))
1085 bus_p->bus_ecc_ver =
1086 pci_cfgacc_get16(rcdip, bdf, base +
1087 PCI_PCIX_SEC_STATUS) & PCI_PCIX_VER_MASK;
1088 else
1089 bus_p->bus_ecc_ver =
1090 pci_cfgacc_get16(rcdip, bdf, base +
1091 PCI_PCIX_COMMAND) & PCI_PCIX_VER_MASK;
1092 num_cap--;
1093 break;
1094 default:
1095 break;
1099 /* Check and save PCI hotplug (SHPC) capability information */
1100 if (PCIE_IS_BDG(bus_p)) {
1101 base = baseptr;
1102 for (base = pci_cfgacc_get8(rcdip, bdf, base);
1103 base; base = pci_cfgacc_get8(rcdip, bdf,
1104 base + PCI_CAP_NEXT_PTR)) {
1105 capid = pci_cfgacc_get8(rcdip, bdf, base);
1106 if (capid == PCI_CAP_ID_PCI_HOTPLUG) {
1107 bus_p->bus_pci_hp_off = base;
1108 bus_p->bus_hp_sup_modes |= PCIE_PCI_HP_MODE;
1109 break;
1114 /* Then, relevant extended capabilities */
1116 if (!PCIE_IS_PCIE(bus_p))
1117 goto caps_done;
1119 /* Extended caps: PCIE_EXT_CAP_ID_AER */
1120 for (base = PCIE_EXT_CAP; base; base = (capid >>
1121 PCIE_EXT_CAP_NEXT_PTR_SHIFT) & PCIE_EXT_CAP_NEXT_PTR_MASK) {
1122 capid = pci_cfgacc_get32(rcdip, bdf, base);
1123 if (capid == PCI_CAP_EINVAL32)
1124 break;
1125 if (((capid >> PCIE_EXT_CAP_ID_SHIFT) & PCIE_EXT_CAP_ID_MASK)
1126 == PCIE_EXT_CAP_ID_AER) {
1127 bus_p->bus_aer_off = base;
1128 break;
1132 caps_done:
1133 /* save RP dip and RP bdf */
1134 if (PCIE_IS_RP(bus_p)) {
1135 bus_p->bus_rp_dip = dip;
1136 bus_p->bus_rp_bdf = bus_p->bus_bdf;
1137 } else {
1138 for (pdip = ddi_get_parent(dip); pdip;
1139 pdip = ddi_get_parent(pdip)) {
1140 pcie_bus_t *parent_bus_p = PCIE_DIP2BUS(pdip);
1143 * If RP dip and RP bdf in parent's bus_t have
1144 * been initialized, simply use these instead of
1145 * continuing up to the RC.
1147 if (parent_bus_p->bus_rp_dip != NULL) {
1148 bus_p->bus_rp_dip = parent_bus_p->bus_rp_dip;
1149 bus_p->bus_rp_bdf = parent_bus_p->bus_rp_bdf;
1150 break;
1154 * When debugging be aware that some NVIDIA x86
1155 * architectures have 2 nodes for each RP, One at Bus
1156 * 0x0 and one at Bus 0x80. The requester is from Bus
1157 * 0x80
1159 if (PCIE_IS_ROOT(parent_bus_p)) {
1160 bus_p->bus_rp_dip = pdip;
1161 bus_p->bus_rp_bdf = parent_bus_p->bus_bdf;
1162 break;
1167 bus_p->bus_soft_state = PCI_SOFT_STATE_CLOSED;
1168 bus_p->bus_fm_flags = 0;
1169 bus_p->bus_mps = 0;
1171 ndi_set_bus_private(dip, B_TRUE, DEVI_PORT_TYPE_PCI, (void *)bus_p);
1173 if (PCIE_IS_HOTPLUG_CAPABLE(dip))
1174 (void) ndi_prop_create_boolean(DDI_DEV_T_NONE, dip,
1175 "hotplug-capable");
1177 initial_done:
1178 if (!(flags & PCIE_BUS_FINAL))
1179 goto final_done;
1181 /* already initialized? */
1182 bus_p = PCIE_DIP2BUS(dip);
1184 /* Save the Range information if device is a switch/bridge */
1185 if (PCIE_IS_BDG(bus_p)) {
1186 /* get "bus_range" property */
1187 range_size = sizeof (pci_bus_range_t);
1188 if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1189 "bus-range", (caddr_t)&bus_p->bus_bus_range, &range_size)
1190 != DDI_PROP_SUCCESS) {
1191 errstr = "Cannot find \"bus-range\" property";
1192 cmn_err(CE_WARN,
1193 "PCIE init err info failed BDF 0x%x:%s\n",
1194 bus_p->bus_bdf, errstr);
1197 /* get secondary bus number */
1198 rcdip = pcie_get_rc_dip(dip);
1199 ASSERT(rcdip != NULL);
1201 bus_p->bus_bdg_secbus = pci_cfgacc_get8(rcdip,
1202 bus_p->bus_bdf, PCI_BCNF_SECBUS);
1204 /* Get "ranges" property */
1205 if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1206 "ranges", (caddr_t)&bus_p->bus_addr_ranges,
1207 &bus_p->bus_addr_entries) != DDI_PROP_SUCCESS)
1208 bus_p->bus_addr_entries = 0;
1209 bus_p->bus_addr_entries /= sizeof (ppb_ranges_t);
1212 /* save "assigned-addresses" property array, ignore failues */
1213 if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1214 "assigned-addresses", (caddr_t)&bus_p->bus_assigned_addr,
1215 &bus_p->bus_assigned_entries) == DDI_PROP_SUCCESS)
1216 bus_p->bus_assigned_entries /= sizeof (pci_regspec_t);
1217 else
1218 bus_p->bus_assigned_entries = 0;
1220 pcie_init_pfd(dip);
1222 pcie_init_plat(dip);
1224 final_done:
1226 PCIE_DBG("Add %s(dip 0x%p, bdf 0x%x, secbus 0x%x)\n",
1227 ddi_driver_name(dip), (void *)dip, bus_p->bus_bdf,
1228 bus_p->bus_bdg_secbus);
1229 #ifdef DEBUG
1230 pcie_print_bus(bus_p);
1231 #endif
1233 return (bus_p);
1237 * Invoked before destroying devinfo node, mostly during hotplug
1238 * operation to free pcie_bus_t data structure
1240 /* ARGSUSED */
1241 void
1242 pcie_fini_bus(dev_info_t *dip, uint8_t flags)
1244 pcie_bus_t *bus_p = PCIE_DIP2UPBUS(dip);
1245 ASSERT(bus_p);
1247 if (flags & PCIE_BUS_INITIAL) {
1248 pcie_fini_plat(dip);
1249 pcie_fini_pfd(dip);
1251 kmem_free(bus_p->bus_assigned_addr,
1252 (sizeof (pci_regspec_t) * bus_p->bus_assigned_entries));
1253 kmem_free(bus_p->bus_addr_ranges,
1254 (sizeof (ppb_ranges_t) * bus_p->bus_addr_entries));
1255 /* zero out the fields that have been destroyed */
1256 bus_p->bus_assigned_addr = NULL;
1257 bus_p->bus_addr_ranges = NULL;
1258 bus_p->bus_assigned_entries = 0;
1259 bus_p->bus_addr_entries = 0;
1262 if (flags & PCIE_BUS_FINAL) {
1263 if (PCIE_IS_HOTPLUG_CAPABLE(dip)) {
1264 (void) ndi_prop_remove(DDI_DEV_T_NONE, dip,
1265 "hotplug-capable");
1268 ndi_set_bus_private(dip, B_TRUE, 0, NULL);
1269 kmem_free(bus_p, sizeof (pcie_bus_t));
1274 pcie_postattach_child(dev_info_t *cdip)
1276 pcie_bus_t *bus_p = PCIE_DIP2BUS(cdip);
1278 if (!bus_p)
1279 return (DDI_FAILURE);
1281 return (pcie_enable_ce(cdip));
1285 * PCI-Express child device de-initialization.
1286 * This function disables generic pci-express interrupts and error
1287 * handling.
1289 void
1290 pcie_uninitchild(dev_info_t *cdip)
1292 pcie_disable_errors(cdip);
1293 pcie_fini_cfghdl(cdip);
1294 pcie_fini_dom(cdip);
1298 * find the root complex dip
1300 dev_info_t *
1301 pcie_get_rc_dip(dev_info_t *dip)
1303 dev_info_t *rcdip;
1304 pcie_bus_t *rc_bus_p;
1306 for (rcdip = ddi_get_parent(dip); rcdip;
1307 rcdip = ddi_get_parent(rcdip)) {
1308 rc_bus_p = PCIE_DIP2BUS(rcdip);
1309 if (rc_bus_p && PCIE_IS_RC(rc_bus_p))
1310 break;
1313 return (rcdip);
1316 static boolean_t
1317 pcie_is_pci_device(dev_info_t *dip)
1319 dev_info_t *pdip;
1320 char *device_type;
1322 pdip = ddi_get_parent(dip);
1323 ASSERT(pdip);
1325 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pdip, DDI_PROP_DONTPASS,
1326 "device_type", &device_type) != DDI_PROP_SUCCESS)
1327 return (B_FALSE);
1329 if (strcmp(device_type, "pciex") != 0 &&
1330 strcmp(device_type, "pci") != 0) {
1331 ddi_prop_free(device_type);
1332 return (B_FALSE);
1335 ddi_prop_free(device_type);
1336 return (B_TRUE);
1339 typedef struct {
1340 boolean_t init;
1341 uint8_t flags;
1342 } pcie_bus_arg_t;
1344 /*ARGSUSED*/
1345 static int
1346 pcie_fab_do_init_fini(dev_info_t *dip, void *arg)
1348 pcie_req_id_t bdf;
1349 pcie_bus_arg_t *bus_arg = (pcie_bus_arg_t *)arg;
1351 if (!pcie_is_pci_device(dip))
1352 goto out;
1354 if (bus_arg->init) {
1355 if (pcie_get_bdf_from_dip(dip, &bdf) != DDI_SUCCESS)
1356 goto out;
1358 (void) pcie_init_bus(dip, bdf, bus_arg->flags);
1359 } else {
1360 (void) pcie_fini_bus(dip, bus_arg->flags);
1363 return (DDI_WALK_CONTINUE);
1365 out:
1366 return (DDI_WALK_PRUNECHILD);
1369 void
1370 pcie_fab_init_bus(dev_info_t *rcdip, uint8_t flags)
1372 int circular_count;
1373 dev_info_t *dip = ddi_get_child(rcdip);
1374 pcie_bus_arg_t arg;
1376 arg.init = B_TRUE;
1377 arg.flags = flags;
1379 ndi_devi_enter(rcdip, &circular_count);
1380 ddi_walk_devs(dip, pcie_fab_do_init_fini, &arg);
1381 ndi_devi_exit(rcdip, circular_count);
1384 void
1385 pcie_fab_fini_bus(dev_info_t *rcdip, uint8_t flags)
1387 int circular_count;
1388 dev_info_t *dip = ddi_get_child(rcdip);
1389 pcie_bus_arg_t arg;
1391 arg.init = B_FALSE;
1392 arg.flags = flags;
1394 ndi_devi_enter(rcdip, &circular_count);
1395 ddi_walk_devs(dip, pcie_fab_do_init_fini, &arg);
1396 ndi_devi_exit(rcdip, circular_count);
1399 void
1400 pcie_enable_errors(dev_info_t *dip)
1402 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
1403 uint16_t reg16, tmp16;
1404 uint32_t reg32, tmp32;
1406 ASSERT(bus_p);
1409 * Clear any pending errors
1411 pcie_clear_errors(dip);
1413 if (!PCIE_IS_PCIE(bus_p))
1414 return;
1417 * Enable Baseline Error Handling but leave CE reporting off (poweron
1418 * default).
1420 if ((reg16 = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL)) !=
1421 PCI_CAP_EINVAL16) {
1422 tmp16 = (reg16 & (PCIE_DEVCTL_MAX_READ_REQ_MASK |
1423 PCIE_DEVCTL_MAX_PAYLOAD_MASK)) |
1424 (pcie_devctl_default & ~(PCIE_DEVCTL_MAX_READ_REQ_MASK |
1425 PCIE_DEVCTL_MAX_PAYLOAD_MASK)) |
1426 (pcie_base_err_default & (~PCIE_DEVCTL_CE_REPORTING_EN));
1428 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL, tmp16);
1429 PCIE_DBG_CAP(dip, bus_p, "DEVCTL", 16, PCIE_DEVCTL, reg16);
1432 /* Enable Root Port Baseline Error Receiving */
1433 if (PCIE_IS_ROOT(bus_p) &&
1434 (reg16 = PCIE_CAP_GET(16, bus_p, PCIE_ROOTCTL)) !=
1435 PCI_CAP_EINVAL16) {
1437 tmp16 = pcie_serr_disable_flag ?
1438 (pcie_root_ctrl_default & ~PCIE_ROOT_SYS_ERR) :
1439 pcie_root_ctrl_default;
1440 PCIE_CAP_PUT(16, bus_p, PCIE_ROOTCTL, tmp16);
1441 PCIE_DBG_CAP(dip, bus_p, "ROOT DEVCTL", 16, PCIE_ROOTCTL,
1442 reg16);
1446 * Enable PCI-Express Advanced Error Handling if Exists
1448 if (!PCIE_HAS_AER(bus_p))
1449 return;
1451 /* Set Uncorrectable Severity */
1452 if ((reg32 = PCIE_AER_GET(32, bus_p, PCIE_AER_UCE_SERV)) !=
1453 PCI_CAP_EINVAL32) {
1454 tmp32 = pcie_aer_uce_severity;
1456 PCIE_AER_PUT(32, bus_p, PCIE_AER_UCE_SERV, tmp32);
1457 PCIE_DBG_AER(dip, bus_p, "AER UCE SEV", 32, PCIE_AER_UCE_SERV,
1458 reg32);
1461 /* Enable Uncorrectable errors */
1462 if ((reg32 = PCIE_AER_GET(32, bus_p, PCIE_AER_UCE_MASK)) !=
1463 PCI_CAP_EINVAL32) {
1464 tmp32 = pcie_aer_uce_mask;
1466 PCIE_AER_PUT(32, bus_p, PCIE_AER_UCE_MASK, tmp32);
1467 PCIE_DBG_AER(dip, bus_p, "AER UCE MASK", 32, PCIE_AER_UCE_MASK,
1468 reg32);
1471 /* Enable ECRC generation and checking */
1472 if ((reg32 = PCIE_AER_GET(32, bus_p, PCIE_AER_CTL)) !=
1473 PCI_CAP_EINVAL32) {
1474 tmp32 = reg32 | pcie_ecrc_value;
1475 PCIE_AER_PUT(32, bus_p, PCIE_AER_CTL, tmp32);
1476 PCIE_DBG_AER(dip, bus_p, "AER CTL", 32, PCIE_AER_CTL, reg32);
1479 /* Enable Secondary Uncorrectable errors if this is a bridge */
1480 if (!PCIE_IS_PCIE_BDG(bus_p))
1481 goto root;
1483 /* Set Uncorrectable Severity */
1484 if ((reg32 = PCIE_AER_GET(32, bus_p, PCIE_AER_SUCE_SERV)) !=
1485 PCI_CAP_EINVAL32) {
1486 tmp32 = pcie_aer_suce_severity;
1488 PCIE_AER_PUT(32, bus_p, PCIE_AER_SUCE_SERV, tmp32);
1489 PCIE_DBG_AER(dip, bus_p, "AER SUCE SEV", 32, PCIE_AER_SUCE_SERV,
1490 reg32);
1493 if ((reg32 = PCIE_AER_GET(32, bus_p, PCIE_AER_SUCE_MASK)) !=
1494 PCI_CAP_EINVAL32) {
1495 PCIE_AER_PUT(32, bus_p, PCIE_AER_SUCE_MASK, pcie_aer_suce_mask);
1496 PCIE_DBG_AER(dip, bus_p, "AER SUCE MASK", 32,
1497 PCIE_AER_SUCE_MASK, reg32);
1500 root:
1502 * Enable Root Control this is a Root device
1504 if (!PCIE_IS_ROOT(bus_p))
1505 return;
1507 if ((reg16 = PCIE_AER_GET(16, bus_p, PCIE_AER_RE_CMD)) !=
1508 PCI_CAP_EINVAL16) {
1509 PCIE_AER_PUT(16, bus_p, PCIE_AER_RE_CMD,
1510 pcie_root_error_cmd_default);
1511 PCIE_DBG_AER(dip, bus_p, "AER Root Err Cmd", 16,
1512 PCIE_AER_RE_CMD, reg16);
1517 * This function is used for enabling CE reporting and setting the AER CE mask.
1518 * When called from outside the pcie module it should always be preceded by
1519 * a call to pcie_enable_errors.
1522 pcie_enable_ce(dev_info_t *dip)
1524 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
1525 uint16_t device_sts, device_ctl;
1526 uint32_t tmp_pcie_aer_ce_mask;
1528 if (!PCIE_IS_PCIE(bus_p))
1529 return (DDI_SUCCESS);
1532 * The "pcie_ce_mask" property is used to control both the CE reporting
1533 * enable field in the device control register and the AER CE mask. We
1534 * leave CE reporting disabled if pcie_ce_mask is set to -1.
1537 tmp_pcie_aer_ce_mask = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1538 DDI_PROP_DONTPASS, "pcie_ce_mask", pcie_aer_ce_mask);
1540 if (tmp_pcie_aer_ce_mask == (uint32_t)-1) {
1542 * Nothing to do since CE reporting has already been disabled.
1544 return (DDI_SUCCESS);
1547 if (PCIE_HAS_AER(bus_p)) {
1548 /* Enable AER CE */
1549 PCIE_AER_PUT(32, bus_p, PCIE_AER_CE_MASK, tmp_pcie_aer_ce_mask);
1550 PCIE_DBG_AER(dip, bus_p, "AER CE MASK", 32, PCIE_AER_CE_MASK,
1553 /* Clear any pending AER CE errors */
1554 PCIE_AER_PUT(32, bus_p, PCIE_AER_CE_STS, -1);
1557 /* clear any pending CE errors */
1558 if ((device_sts = PCIE_CAP_GET(16, bus_p, PCIE_DEVSTS)) !=
1559 PCI_CAP_EINVAL16)
1560 PCIE_CAP_PUT(16, bus_p, PCIE_DEVSTS,
1561 device_sts & (~PCIE_DEVSTS_CE_DETECTED));
1563 /* Enable CE reporting */
1564 device_ctl = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL);
1565 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL,
1566 (device_ctl & (~PCIE_DEVCTL_ERR_MASK)) | pcie_base_err_default);
1567 PCIE_DBG_CAP(dip, bus_p, "DEVCTL", 16, PCIE_DEVCTL, device_ctl);
1569 return (DDI_SUCCESS);
1572 /* ARGSUSED */
1573 void
1574 pcie_disable_errors(dev_info_t *dip)
1576 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
1577 uint16_t device_ctl;
1578 uint32_t aer_reg;
1580 if (!PCIE_IS_PCIE(bus_p))
1581 return;
1584 * Disable PCI-Express Baseline Error Handling
1586 device_ctl = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL);
1587 device_ctl &= ~PCIE_DEVCTL_ERR_MASK;
1588 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL, device_ctl);
1591 * Disable PCI-Express Advanced Error Handling if Exists
1593 if (!PCIE_HAS_AER(bus_p))
1594 goto root;
1596 /* Disable Uncorrectable errors */
1597 PCIE_AER_PUT(32, bus_p, PCIE_AER_UCE_MASK, PCIE_AER_UCE_BITS);
1599 /* Disable Correctable errors */
1600 PCIE_AER_PUT(32, bus_p, PCIE_AER_CE_MASK, PCIE_AER_CE_BITS);
1602 /* Disable ECRC generation and checking */
1603 if ((aer_reg = PCIE_AER_GET(32, bus_p, PCIE_AER_CTL)) !=
1604 PCI_CAP_EINVAL32) {
1605 aer_reg &= ~(PCIE_AER_CTL_ECRC_GEN_ENA |
1606 PCIE_AER_CTL_ECRC_CHECK_ENA);
1608 PCIE_AER_PUT(32, bus_p, PCIE_AER_CTL, aer_reg);
1611 * Disable Secondary Uncorrectable errors if this is a bridge
1613 if (!PCIE_IS_PCIE_BDG(bus_p))
1614 goto root;
1616 PCIE_AER_PUT(32, bus_p, PCIE_AER_SUCE_MASK, PCIE_AER_SUCE_BITS);
1618 root:
1620 * disable Root Control this is a Root device
1622 if (!PCIE_IS_ROOT(bus_p))
1623 return;
1625 if (!pcie_serr_disable_flag) {
1626 device_ctl = PCIE_CAP_GET(16, bus_p, PCIE_ROOTCTL);
1627 device_ctl &= ~PCIE_ROOT_SYS_ERR;
1628 PCIE_CAP_PUT(16, bus_p, PCIE_ROOTCTL, device_ctl);
1631 if (!PCIE_HAS_AER(bus_p))
1632 return;
1634 if ((device_ctl = PCIE_CAP_GET(16, bus_p, PCIE_AER_RE_CMD)) !=
1635 PCI_CAP_EINVAL16) {
1636 device_ctl &= ~pcie_root_error_cmd_default;
1637 PCIE_CAP_PUT(16, bus_p, PCIE_AER_RE_CMD, device_ctl);
1642 * Extract bdf from "reg" property.
1645 pcie_get_bdf_from_dip(dev_info_t *dip, pcie_req_id_t *bdf)
1647 pci_regspec_t *regspec;
1648 int reglen;
1650 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1651 "reg", (int **)&regspec, (uint_t *)&reglen) != DDI_SUCCESS)
1652 return (DDI_FAILURE);
1654 if (reglen < (sizeof (pci_regspec_t) / sizeof (int))) {
1655 ddi_prop_free(regspec);
1656 return (DDI_FAILURE);
1659 /* Get phys_hi from first element. All have same bdf. */
1660 *bdf = (regspec->pci_phys_hi & (PCI_REG_BDFR_M ^ PCI_REG_REG_M)) >> 8;
1662 ddi_prop_free(regspec);
1663 return (DDI_SUCCESS);
1666 dev_info_t *
1667 pcie_get_my_childs_dip(dev_info_t *dip, dev_info_t *rdip)
1669 dev_info_t *cdip = rdip;
1671 for (; ddi_get_parent(cdip) != dip; cdip = ddi_get_parent(cdip))
1674 return (cdip);
1677 uint32_t
1678 pcie_get_bdf_for_dma_xfer(dev_info_t *dip, dev_info_t *rdip)
1680 dev_info_t *cdip;
1683 * As part of the probing, the PCI fcode interpreter may setup a DMA
1684 * request if a given card has a fcode on it using dip and rdip of the
1685 * hotplug connector i.e, dip and rdip of px/pcieb driver. In this
1686 * case, return a invalid value for the bdf since we cannot get to the
1687 * bdf value of the actual device which will be initiating this DMA.
1689 if (rdip == dip)
1690 return (PCIE_INVALID_BDF);
1692 cdip = pcie_get_my_childs_dip(dip, rdip);
1695 * For a given rdip, return the bdf value of dip's (px or pcieb)
1696 * immediate child or secondary bus-id if dip is a PCIe2PCI bridge.
1698 * XXX - For now, return a invalid bdf value for all PCI and PCI-X
1699 * devices since this needs more work.
1701 return (PCI_GET_PCIE2PCI_SECBUS(cdip) ?
1702 PCIE_INVALID_BDF : PCI_GET_BDF(cdip));
1705 uint32_t
1706 pcie_get_aer_uce_mask()
1708 return (pcie_aer_uce_mask);
1710 uint32_t
1711 pcie_get_aer_ce_mask()
1713 return (pcie_aer_ce_mask);
1715 uint32_t
1716 pcie_get_aer_suce_mask()
1718 return (pcie_aer_suce_mask);
1720 uint32_t
1721 pcie_get_serr_mask()
1723 return (pcie_serr_disable_flag);
1726 void
1727 pcie_set_aer_uce_mask(uint32_t mask)
1729 pcie_aer_uce_mask = mask;
1730 if (mask & PCIE_AER_UCE_UR)
1731 pcie_base_err_default &= ~PCIE_DEVCTL_UR_REPORTING_EN;
1732 else
1733 pcie_base_err_default |= PCIE_DEVCTL_UR_REPORTING_EN;
1735 if (mask & PCIE_AER_UCE_ECRC)
1736 pcie_ecrc_value = 0;
1739 void
1740 pcie_set_aer_ce_mask(uint32_t mask)
1742 pcie_aer_ce_mask = mask;
1744 void
1745 pcie_set_aer_suce_mask(uint32_t mask)
1747 pcie_aer_suce_mask = mask;
1749 void
1750 pcie_set_serr_mask(uint32_t mask)
1752 pcie_serr_disable_flag = mask;
1756 * Is the rdip a child of dip. Used for checking certain CTLOPS from bubbling
1757 * up erronously. Ex. ISA ctlops to a PCI-PCI Bridge.
1759 boolean_t
1760 pcie_is_child(dev_info_t *dip, dev_info_t *rdip)
1762 dev_info_t *cdip = ddi_get_child(dip);
1763 for (; cdip; cdip = ddi_get_next_sibling(cdip))
1764 if (cdip == rdip)
1765 break;
1766 return (cdip != NULL);
1769 boolean_t
1770 pcie_is_link_disabled(dev_info_t *dip)
1772 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
1774 if (PCIE_IS_PCIE(bus_p)) {
1775 if (PCIE_CAP_GET(16, bus_p, PCIE_LINKCTL) &
1776 PCIE_LINKCTL_LINK_DISABLE)
1777 return (B_TRUE);
1779 return (B_FALSE);
1783 * Initialize the MPS for a root port.
1785 * dip - dip of root port device.
1787 void
1788 pcie_init_root_port_mps(dev_info_t *dip)
1790 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
1791 int rp_cap, max_supported = pcie_max_mps;
1793 (void) pcie_get_fabric_mps(ddi_get_parent(dip),
1794 ddi_get_child(dip), &max_supported);
1796 rp_cap = PCI_CAP_GET16(bus_p->bus_cfg_hdl, 0,
1797 bus_p->bus_pcie_off, PCIE_DEVCAP) &
1798 PCIE_DEVCAP_MAX_PAYLOAD_MASK;
1800 if (rp_cap < max_supported)
1801 max_supported = rp_cap;
1803 bus_p->bus_mps = max_supported;
1804 (void) pcie_initchild_mps(dip);
1808 * Initialize the Maximum Payload Size of a device.
1810 * cdip - dip of device.
1812 * returns - DDI_SUCCESS or DDI_FAILURE
1815 pcie_initchild_mps(dev_info_t *cdip)
1817 pcie_bus_t *bus_p;
1818 dev_info_t *pdip = ddi_get_parent(cdip);
1819 uint8_t dev_type;
1821 bus_p = PCIE_DIP2BUS(cdip);
1822 if (bus_p == NULL) {
1823 PCIE_DBG("%s: BUS not found.\n",
1824 ddi_driver_name(cdip));
1825 return (DDI_FAILURE);
1828 dev_type = bus_p->bus_dev_type;
1831 * For ARI Devices, only function zero's MPS needs to be set.
1833 if ((dev_type == PCIE_PCIECAP_DEV_TYPE_PCIE_DEV) &&
1834 (pcie_ari_is_enabled(pdip) == PCIE_ARI_FORW_ENABLED)) {
1835 pcie_req_id_t child_bdf;
1837 if (pcie_get_bdf_from_dip(cdip, &child_bdf) == DDI_FAILURE)
1838 return (DDI_FAILURE);
1839 if ((child_bdf & PCIE_REQ_ID_ARI_FUNC_MASK) != 0)
1840 return (DDI_SUCCESS);
1843 if (PCIE_IS_PCIE(bus_p)) {
1844 int suggested_mrrs, fabric_mps;
1845 uint16_t device_mps, device_mps_cap, device_mrrs, dev_ctrl;
1847 dev_ctrl = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL);
1848 if ((fabric_mps = (PCIE_IS_RP(bus_p) ? bus_p :
1849 PCIE_DIP2BUS(pdip))->bus_mps) < 0) {
1850 dev_ctrl = (dev_ctrl & ~(PCIE_DEVCTL_MAX_READ_REQ_MASK |
1851 PCIE_DEVCTL_MAX_PAYLOAD_MASK)) |
1852 (pcie_devctl_default &
1853 (PCIE_DEVCTL_MAX_READ_REQ_MASK |
1854 PCIE_DEVCTL_MAX_PAYLOAD_MASK));
1856 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL, dev_ctrl);
1857 return (DDI_SUCCESS);
1860 device_mps_cap = PCIE_CAP_GET(16, bus_p, PCIE_DEVCAP) &
1861 PCIE_DEVCAP_MAX_PAYLOAD_MASK;
1863 device_mrrs = (dev_ctrl & PCIE_DEVCTL_MAX_READ_REQ_MASK) >>
1864 PCIE_DEVCTL_MAX_READ_REQ_SHIFT;
1866 if (device_mps_cap < fabric_mps)
1867 device_mrrs = device_mps = device_mps_cap;
1868 else
1869 device_mps = (uint16_t)fabric_mps;
1871 suggested_mrrs = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY,
1872 cdip, DDI_PROP_DONTPASS, "suggested-mrrs", device_mrrs);
1874 if ((device_mps == fabric_mps) ||
1875 (suggested_mrrs < device_mrrs))
1876 device_mrrs = (uint16_t)suggested_mrrs;
1879 * Replace MPS and MRRS settings.
1881 dev_ctrl &= ~(PCIE_DEVCTL_MAX_READ_REQ_MASK |
1882 PCIE_DEVCTL_MAX_PAYLOAD_MASK);
1884 dev_ctrl |= ((device_mrrs << PCIE_DEVCTL_MAX_READ_REQ_SHIFT) |
1885 device_mps << PCIE_DEVCTL_MAX_PAYLOAD_SHIFT);
1887 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL, dev_ctrl);
1889 bus_p->bus_mps = device_mps;
1892 return (DDI_SUCCESS);
1896 * Scans a device tree/branch for a maximum payload size capabilities.
1898 * rc_dip - dip of Root Complex.
1899 * dip - dip of device where scan will begin.
1900 * max_supported (IN) - maximum allowable MPS.
1901 * max_supported (OUT) - maximum payload size capability of fabric.
1903 void
1904 pcie_get_fabric_mps(dev_info_t *rc_dip, dev_info_t *dip, int *max_supported)
1906 if (dip == NULL)
1907 return;
1910 * Perform a fabric scan to obtain Maximum Payload Capabilities
1912 (void) pcie_scan_mps(rc_dip, dip, max_supported);
1914 PCIE_DBG("MPS: Highest Common MPS= %x\n", max_supported);
1918 * Scans fabric and determines Maximum Payload Size based on
1919 * highest common denominator alogorithm
1921 static void
1922 pcie_scan_mps(dev_info_t *rc_dip, dev_info_t *dip, int *max_supported)
1924 int circular_count;
1925 pcie_max_supported_t max_pay_load_supported;
1927 max_pay_load_supported.dip = rc_dip;
1928 max_pay_load_supported.highest_common_mps = *max_supported;
1930 ndi_devi_enter(ddi_get_parent(dip), &circular_count);
1931 ddi_walk_devs(dip, pcie_get_max_supported,
1932 (void *)&max_pay_load_supported);
1933 ndi_devi_exit(ddi_get_parent(dip), circular_count);
1935 *max_supported = max_pay_load_supported.highest_common_mps;
1939 * Called as part of the Maximum Payload Size scan.
1941 static int
1942 pcie_get_max_supported(dev_info_t *dip, void *arg)
1944 uint32_t max_supported;
1945 uint16_t cap_ptr;
1946 pcie_max_supported_t *current = (pcie_max_supported_t *)arg;
1947 pci_regspec_t *reg;
1948 int rlen;
1949 caddr_t virt;
1950 ddi_acc_handle_t config_handle;
1952 if (ddi_get_child(current->dip) == NULL) {
1953 goto fail1;
1956 if (pcie_dev(dip) == DDI_FAILURE) {
1957 PCIE_DBG("MPS: pcie_get_max_supported: %s: "
1958 "Not a PCIe dev\n", ddi_driver_name(dip));
1959 goto fail1;
1963 * If the suggested-mrrs property exists, then don't include this
1964 * device in the MPS capabilities scan.
1966 if (ddi_prop_exists(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1967 "suggested-mrrs") != 0)
1968 goto fail1;
1970 if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "reg",
1971 (caddr_t)&reg, &rlen) != DDI_PROP_SUCCESS) {
1972 PCIE_DBG("MPS: pcie_get_max_supported: %s: "
1973 "Can not read reg\n", ddi_driver_name(dip));
1974 goto fail1;
1977 if (pcie_map_phys(ddi_get_child(current->dip), reg, &virt,
1978 &config_handle) != DDI_SUCCESS) {
1979 PCIE_DBG("MPS: pcie_get_max_supported: %s: pcie_map_phys "
1980 "failed\n", ddi_driver_name(dip));
1981 goto fail2;
1984 if ((PCI_CAP_LOCATE(config_handle, PCI_CAP_ID_PCI_E, &cap_ptr)) ==
1985 DDI_FAILURE) {
1986 goto fail3;
1989 max_supported = PCI_CAP_GET16(config_handle, 0, cap_ptr,
1990 PCIE_DEVCAP) & PCIE_DEVCAP_MAX_PAYLOAD_MASK;
1992 PCIE_DBG("PCIE MPS: %s: MPS Capabilities %x\n", ddi_driver_name(dip),
1993 max_supported);
1995 if (max_supported < current->highest_common_mps)
1996 current->highest_common_mps = max_supported;
1998 fail3:
1999 pcie_unmap_phys(&config_handle, reg);
2000 fail2:
2001 kmem_free(reg, rlen);
2002 fail1:
2003 return (DDI_WALK_CONTINUE);
2007 * Determines if there are any root ports attached to a root complex.
2009 * dip - dip of root complex
2011 * Returns - DDI_SUCCESS if there is at least one root port otherwise
2012 * DDI_FAILURE.
2015 pcie_root_port(dev_info_t *dip)
2017 int port_type;
2018 uint16_t cap_ptr;
2019 ddi_acc_handle_t config_handle;
2020 dev_info_t *cdip = ddi_get_child(dip);
2023 * Determine if any of the children of the passed in dip
2024 * are root ports.
2026 for (; cdip; cdip = ddi_get_next_sibling(cdip)) {
2028 if (pci_config_setup(cdip, &config_handle) != DDI_SUCCESS)
2029 continue;
2031 if ((PCI_CAP_LOCATE(config_handle, PCI_CAP_ID_PCI_E,
2032 &cap_ptr)) == DDI_FAILURE) {
2033 pci_config_teardown(&config_handle);
2034 continue;
2037 port_type = PCI_CAP_GET16(config_handle, 0, cap_ptr,
2038 PCIE_PCIECAP) & PCIE_PCIECAP_DEV_TYPE_MASK;
2040 pci_config_teardown(&config_handle);
2042 if (port_type == PCIE_PCIECAP_DEV_TYPE_ROOT)
2043 return (DDI_SUCCESS);
2046 /* No root ports were found */
2048 return (DDI_FAILURE);
2052 * Function that determines if a device a PCIe device.
2054 * dip - dip of device.
2056 * returns - DDI_SUCCESS if device is a PCIe device, otherwise DDI_FAILURE.
2059 pcie_dev(dev_info_t *dip)
2061 /* get parent device's device_type property */
2062 char *device_type;
2063 int rc = DDI_FAILURE;
2064 dev_info_t *pdip = ddi_get_parent(dip);
2066 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pdip,
2067 DDI_PROP_DONTPASS, "device_type", &device_type)
2068 != DDI_PROP_SUCCESS) {
2069 return (DDI_FAILURE);
2072 if (strcmp(device_type, "pciex") == 0)
2073 rc = DDI_SUCCESS;
2074 else
2075 rc = DDI_FAILURE;
2077 ddi_prop_free(device_type);
2078 return (rc);
2082 * Function to map in a device's memory space.
2084 static int
2085 pcie_map_phys(dev_info_t *dip, pci_regspec_t *phys_spec,
2086 caddr_t *addrp, ddi_acc_handle_t *handlep)
2088 ddi_map_req_t mr;
2089 ddi_acc_hdl_t *hp;
2090 int result;
2091 ddi_device_acc_attr_t attr;
2093 attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
2094 attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
2095 attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
2096 attr.devacc_attr_access = DDI_CAUTIOUS_ACC;
2098 *handlep = impl_acc_hdl_alloc(KM_SLEEP, NULL);
2099 hp = impl_acc_hdl_get(*handlep);
2100 hp->ah_vers = VERS_ACCHDL;
2101 hp->ah_dip = dip;
2102 hp->ah_rnumber = 0;
2103 hp->ah_offset = 0;
2104 hp->ah_len = 0;
2105 hp->ah_acc = attr;
2107 mr.map_op = DDI_MO_MAP_LOCKED;
2108 mr.map_type = DDI_MT_REGSPEC;
2109 mr.map_obj.rp = (struct regspec *)phys_spec;
2110 mr.map_prot = PROT_READ | PROT_WRITE;
2111 mr.map_flags = DDI_MF_KERNEL_MAPPING;
2112 mr.map_handlep = hp;
2113 mr.map_vers = DDI_MAP_VERSION;
2115 result = ddi_map(dip, &mr, 0, 0, addrp);
2117 if (result != DDI_SUCCESS) {
2118 impl_acc_hdl_free(*handlep);
2119 *handlep = (ddi_acc_handle_t)NULL;
2120 } else {
2121 hp->ah_addr = *addrp;
2124 return (result);
2128 * Map out memory that was mapped in with pcie_map_phys();
2130 static void
2131 pcie_unmap_phys(ddi_acc_handle_t *handlep, pci_regspec_t *ph)
2133 ddi_map_req_t mr;
2134 ddi_acc_hdl_t *hp;
2136 hp = impl_acc_hdl_get(*handlep);
2137 ASSERT(hp);
2139 mr.map_op = DDI_MO_UNMAP;
2140 mr.map_type = DDI_MT_REGSPEC;
2141 mr.map_obj.rp = (struct regspec *)ph;
2142 mr.map_prot = PROT_READ | PROT_WRITE;
2143 mr.map_flags = DDI_MF_KERNEL_MAPPING;
2144 mr.map_handlep = hp;
2145 mr.map_vers = DDI_MAP_VERSION;
2147 (void) ddi_map(hp->ah_dip, &mr, hp->ah_offset,
2148 hp->ah_len, &hp->ah_addr);
2150 impl_acc_hdl_free(*handlep);
2151 *handlep = (ddi_acc_handle_t)NULL;
2154 void
2155 pcie_set_rber_fatal(dev_info_t *dip, boolean_t val)
2157 pcie_bus_t *bus_p = PCIE_DIP2UPBUS(dip);
2158 bus_p->bus_pfd->pe_rber_fatal = val;
2162 * Return parent Root Port's pe_rber_fatal value.
2164 boolean_t
2165 pcie_get_rber_fatal(dev_info_t *dip)
2167 pcie_bus_t *bus_p = PCIE_DIP2UPBUS(dip);
2168 pcie_bus_t *rp_bus_p = PCIE_DIP2UPBUS(bus_p->bus_rp_dip);
2169 return (rp_bus_p->bus_pfd->pe_rber_fatal);
2173 pcie_ari_supported(dev_info_t *dip)
2175 uint32_t devcap2;
2176 uint16_t pciecap;
2177 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
2178 uint8_t dev_type;
2180 PCIE_DBG("pcie_ari_supported: dip=%p\n", dip);
2182 if (bus_p == NULL)
2183 return (PCIE_ARI_FORW_NOT_SUPPORTED);
2185 dev_type = bus_p->bus_dev_type;
2187 if ((dev_type != PCIE_PCIECAP_DEV_TYPE_DOWN) &&
2188 (dev_type != PCIE_PCIECAP_DEV_TYPE_ROOT))
2189 return (PCIE_ARI_FORW_NOT_SUPPORTED);
2191 if (pcie_disable_ari) {
2192 PCIE_DBG("pcie_ari_supported: dip=%p: ARI Disabled\n", dip);
2193 return (PCIE_ARI_FORW_NOT_SUPPORTED);
2196 pciecap = PCIE_CAP_GET(16, bus_p, PCIE_PCIECAP);
2198 if ((pciecap & PCIE_PCIECAP_VER_MASK) < PCIE_PCIECAP_VER_2_0) {
2199 PCIE_DBG("pcie_ari_supported: dip=%p: Not 2.0\n", dip);
2200 return (PCIE_ARI_FORW_NOT_SUPPORTED);
2203 devcap2 = PCIE_CAP_GET(32, bus_p, PCIE_DEVCAP2);
2205 PCIE_DBG("pcie_ari_supported: dip=%p: DevCap2=0x%x\n",
2206 dip, devcap2);
2208 if (devcap2 & PCIE_DEVCAP2_ARI_FORWARD) {
2209 PCIE_DBG("pcie_ari_supported: "
2210 "dip=%p: ARI Forwarding is supported\n", dip);
2211 return (PCIE_ARI_FORW_SUPPORTED);
2213 return (PCIE_ARI_FORW_NOT_SUPPORTED);
2217 pcie_ari_enable(dev_info_t *dip)
2219 uint16_t devctl2;
2220 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
2222 PCIE_DBG("pcie_ari_enable: dip=%p\n", dip);
2224 if (pcie_ari_supported(dip) == PCIE_ARI_FORW_NOT_SUPPORTED)
2225 return (DDI_FAILURE);
2227 devctl2 = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL2);
2228 devctl2 |= PCIE_DEVCTL2_ARI_FORWARD_EN;
2229 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL2, devctl2);
2231 PCIE_DBG("pcie_ari_enable: dip=%p: writing 0x%x to DevCtl2\n",
2232 dip, devctl2);
2234 return (DDI_SUCCESS);
2238 pcie_ari_disable(dev_info_t *dip)
2240 uint16_t devctl2;
2241 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
2243 PCIE_DBG("pcie_ari_disable: dip=%p\n", dip);
2245 if (pcie_ari_supported(dip) == PCIE_ARI_FORW_NOT_SUPPORTED)
2246 return (DDI_FAILURE);
2248 devctl2 = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL2);
2249 devctl2 &= ~PCIE_DEVCTL2_ARI_FORWARD_EN;
2250 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL2, devctl2);
2252 PCIE_DBG("pcie_ari_disable: dip=%p: writing 0x%x to DevCtl2\n",
2253 dip, devctl2);
2255 return (DDI_SUCCESS);
2259 pcie_ari_is_enabled(dev_info_t *dip)
2261 uint16_t devctl2;
2262 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
2264 PCIE_DBG("pcie_ari_is_enabled: dip=%p\n", dip);
2266 if (pcie_ari_supported(dip) == PCIE_ARI_FORW_NOT_SUPPORTED)
2267 return (PCIE_ARI_FORW_DISABLED);
2269 devctl2 = PCIE_CAP_GET(32, bus_p, PCIE_DEVCTL2);
2271 PCIE_DBG("pcie_ari_is_enabled: dip=%p: DevCtl2=0x%x\n",
2272 dip, devctl2);
2274 if (devctl2 & PCIE_DEVCTL2_ARI_FORWARD_EN) {
2275 PCIE_DBG("pcie_ari_is_enabled: "
2276 "dip=%p: ARI Forwarding is enabled\n", dip);
2277 return (PCIE_ARI_FORW_ENABLED);
2280 return (PCIE_ARI_FORW_DISABLED);
2284 pcie_ari_device(dev_info_t *dip)
2286 ddi_acc_handle_t handle;
2287 uint16_t cap_ptr;
2289 PCIE_DBG("pcie_ari_device: dip=%p\n", dip);
2292 * XXX - This function may be called before the bus_p structure
2293 * has been populated. This code can be changed to remove
2294 * pci_config_setup()/pci_config_teardown() when the RFE
2295 * to populate the bus_p structures early in boot is putback.
2298 /* First make sure it is a PCIe device */
2300 if (pci_config_setup(dip, &handle) != DDI_SUCCESS)
2301 return (PCIE_NOT_ARI_DEVICE);
2303 if ((PCI_CAP_LOCATE(handle, PCI_CAP_ID_PCI_E, &cap_ptr))
2304 != DDI_SUCCESS) {
2305 pci_config_teardown(&handle);
2306 return (PCIE_NOT_ARI_DEVICE);
2309 /* Locate the ARI Capability */
2311 if ((PCI_CAP_LOCATE(handle, PCI_CAP_XCFG_SPC(PCIE_EXT_CAP_ID_ARI),
2312 &cap_ptr)) == DDI_FAILURE) {
2313 pci_config_teardown(&handle);
2314 return (PCIE_NOT_ARI_DEVICE);
2317 /* ARI Capability was found so it must be a ARI device */
2318 PCIE_DBG("pcie_ari_device: ARI Device dip=%p\n", dip);
2320 pci_config_teardown(&handle);
2321 return (PCIE_ARI_DEVICE);
2325 pcie_ari_get_next_function(dev_info_t *dip, int *func)
2327 uint32_t val;
2328 uint16_t cap_ptr, next_function;
2329 ddi_acc_handle_t handle;
2332 * XXX - This function may be called before the bus_p structure
2333 * has been populated. This code can be changed to remove
2334 * pci_config_setup()/pci_config_teardown() when the RFE
2335 * to populate the bus_p structures early in boot is putback.
2338 if (pci_config_setup(dip, &handle) != DDI_SUCCESS)
2339 return (DDI_FAILURE);
2341 if ((PCI_CAP_LOCATE(handle,
2342 PCI_CAP_XCFG_SPC(PCIE_EXT_CAP_ID_ARI), &cap_ptr)) == DDI_FAILURE) {
2343 pci_config_teardown(&handle);
2344 return (DDI_FAILURE);
2347 val = PCI_CAP_GET32(handle, 0, cap_ptr, PCIE_ARI_CAP);
2349 next_function = (val >> PCIE_ARI_CAP_NEXT_FUNC_SHIFT) &
2350 PCIE_ARI_CAP_NEXT_FUNC_MASK;
2352 pci_config_teardown(&handle);
2354 *func = next_function;
2356 return (DDI_SUCCESS);
2359 dev_info_t *
2360 pcie_func_to_dip(dev_info_t *dip, pcie_req_id_t function)
2362 pcie_req_id_t child_bdf;
2363 dev_info_t *cdip;
2365 for (cdip = ddi_get_child(dip); cdip;
2366 cdip = ddi_get_next_sibling(cdip)) {
2368 if (pcie_get_bdf_from_dip(cdip, &child_bdf) == DDI_FAILURE)
2369 return (NULL);
2371 if ((child_bdf & PCIE_REQ_ID_ARI_FUNC_MASK) == function)
2372 return (cdip);
2374 return (NULL);
2377 #ifdef DEBUG
2379 static void
2380 pcie_print_bus(pcie_bus_t *bus_p)
2382 pcie_dbg("\tbus_dip = 0x%p\n", bus_p->bus_dip);
2383 pcie_dbg("\tbus_fm_flags = 0x%x\n", bus_p->bus_fm_flags);
2385 pcie_dbg("\tbus_bdf = 0x%x\n", bus_p->bus_bdf);
2386 pcie_dbg("\tbus_dev_ven_id = 0x%x\n", bus_p->bus_dev_ven_id);
2387 pcie_dbg("\tbus_rev_id = 0x%x\n", bus_p->bus_rev_id);
2388 pcie_dbg("\tbus_hdr_type = 0x%x\n", bus_p->bus_hdr_type);
2389 pcie_dbg("\tbus_dev_type = 0x%x\n", bus_p->bus_dev_type);
2390 pcie_dbg("\tbus_bdg_secbus = 0x%x\n", bus_p->bus_bdg_secbus);
2391 pcie_dbg("\tbus_pcie_off = 0x%x\n", bus_p->bus_pcie_off);
2392 pcie_dbg("\tbus_aer_off = 0x%x\n", bus_p->bus_aer_off);
2393 pcie_dbg("\tbus_pcix_off = 0x%x\n", bus_p->bus_pcix_off);
2394 pcie_dbg("\tbus_ecc_ver = 0x%x\n", bus_p->bus_ecc_ver);
2398 * For debugging purposes set pcie_dbg_print != 0 to see printf messages
2399 * during interrupt.
2401 * When a proper solution is in place this code will disappear.
2402 * Potential solutions are:
2403 * o circular buffers
2404 * o taskq to print at lower pil
2406 int pcie_dbg_print = 0;
2407 void
2408 pcie_dbg(char *fmt, ...)
2410 va_list ap;
2412 if (!pcie_debug_flags) {
2413 return;
2415 va_start(ap, fmt);
2416 if (servicing_interrupt()) {
2417 if (pcie_dbg_print) {
2418 prom_vprintf(fmt, ap);
2420 } else {
2421 prom_vprintf(fmt, ap);
2423 va_end(ap);
2425 #endif /* DEBUG */
2427 #if defined(__i386) || defined(__amd64)
2428 static void
2429 pcie_check_io_mem_range(ddi_acc_handle_t cfg_hdl, boolean_t *empty_io_range,
2430 boolean_t *empty_mem_range)
2432 uint8_t class, subclass;
2433 uint_t val;
2435 class = pci_config_get8(cfg_hdl, PCI_CONF_BASCLASS);
2436 subclass = pci_config_get8(cfg_hdl, PCI_CONF_SUBCLASS);
2438 if ((class == PCI_CLASS_BRIDGE) && (subclass == PCI_BRIDGE_PCI)) {
2439 val = (((uint_t)pci_config_get8(cfg_hdl, PCI_BCNF_IO_BASE_LOW) &
2440 PCI_BCNF_IO_MASK) << 8);
2442 * Assuming that a zero based io_range[0] implies an
2443 * invalid I/O range. Likewise for mem_range[0].
2445 if (val == 0)
2446 *empty_io_range = B_TRUE;
2447 val = (((uint_t)pci_config_get16(cfg_hdl, PCI_BCNF_MEM_BASE) &
2448 PCI_BCNF_MEM_MASK) << 16);
2449 if (val == 0)
2450 *empty_mem_range = B_TRUE;
2454 #endif /* defined(__i386) || defined(__amd64) */