4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
28 * Copyright 2012 Garrett D'Amore <garrett@damore.org>. All rights reserved.
32 * Host to PCI-Express local bus driver
36 #include <sys/modctl.h>
38 #include <sys/pci_impl.h>
39 #include <sys/pcie_impl.h>
40 #include <sys/sysmacros.h>
41 #include <sys/ddi_intr.h>
42 #include <sys/sunndi.h>
43 #include <sys/sunddi.h>
44 #include <sys/ddifm.h>
45 #include <sys/ndifm.h>
46 #include <sys/fm/util.h>
47 #include <sys/hotplug/pci/pcie_hp.h>
48 #include <io/pci/pci_tools_ext.h>
49 #include <io/pci/pci_common.h>
50 #include <io/pciex/pcie_nvidia.h>
55 #define NPE_IS_HANDLE_FOR_STDCFG_ACC(hp) \
57 ((ddi_acc_hdl_t *)(hp))->ah_platform_private != NULL && \
58 (((ddi_acc_impl_t *)((ddi_acc_hdl_t *)(hp))-> \
59 ah_platform_private)-> \
60 ahi_acc_attr &(DDI_ACCATTR_CPU_VADDR|DDI_ACCATTR_CONFIG_SPACE)) \
61 == DDI_ACCATTR_CONFIG_SPACE)
64 * Bus Operation functions
66 static int npe_bus_map(dev_info_t
*, dev_info_t
*, ddi_map_req_t
*,
67 off_t
, off_t
, caddr_t
*);
68 static int npe_ctlops(dev_info_t
*, dev_info_t
*, ddi_ctl_enum_t
,
70 static int npe_intr_ops(dev_info_t
*, dev_info_t
*, ddi_intr_op_t
,
71 ddi_intr_handle_impl_t
*, void *);
72 static int npe_fm_init(dev_info_t
*, dev_info_t
*, int,
73 ddi_iblock_cookie_t
*);
75 static int npe_fm_callback(dev_info_t
*, ddi_fm_error_t
*, const void *);
78 * Disable URs and Received MA for all PCIe devices. Until x86 SW is changed so
79 * that random drivers do not do PIO accesses on devices that it does not own,
80 * these error bits must be disabled. SERR must also be disabled if URs have
83 uint32_t npe_aer_uce_mask
= PCIE_AER_UCE_UR
;
84 uint32_t npe_aer_ce_mask
= 0;
85 uint32_t npe_aer_suce_mask
= PCIE_AER_SUCE_RCVD_MA
;
87 struct bus_ops npe_bus_ops
= {
104 0, /* (*bus_get_eventcookie)(); */
105 0, /* (*bus_add_eventcall)(); */
106 0, /* (*bus_remove_eventcall)(); */
107 0, /* (*bus_post_event)(); */
108 0, /* (*bus_intr_ctl)(); */
109 0, /* (*bus_config)(); */
110 0, /* (*bus_unconfig)(); */
111 npe_fm_init
, /* (*bus_fm_init)(); */
112 NULL
, /* (*bus_fm_fini)(); */
113 NULL
, /* (*bus_fm_access_enter)(); */
114 NULL
, /* (*bus_fm_access_exit)(); */
115 NULL
, /* (*bus_power)(); */
116 npe_intr_ops
, /* (*bus_intr_op)(); */
117 pcie_hp_common_ops
/* (*bus_hp_op)(); */
120 static int npe_open(dev_t
*, int, int, cred_t
*);
121 static int npe_close(dev_t
, int, int, cred_t
*);
122 static int npe_ioctl(dev_t
, int, intptr_t, int, cred_t
*, int *);
124 struct cb_ops npe_cb_ops
= {
126 npe_close
, /* close */
127 nodev
, /* strategy */
132 npe_ioctl
, /* ioctl */
137 pcie_prop_op
, /* cb_prop_op */
138 NULL
, /* streamtab */
139 D_NEW
| D_MP
| D_HOTPLUG
, /* Driver compatibility flag */
141 nodev
, /* int (*cb_aread)() */
142 nodev
/* int (*cb_awrite)() */
147 * Device Node Operation functions
149 static int npe_attach(dev_info_t
*devi
, ddi_attach_cmd_t cmd
);
150 static int npe_detach(dev_info_t
*devi
, ddi_detach_cmd_t cmd
);
151 static int npe_info(dev_info_t
*, ddi_info_cmd_t
, void *, void **);
153 struct dev_ops npe_ops
= {
154 DEVO_REV
, /* devo_rev */
157 nulldev
, /* identify */
159 npe_attach
, /* attach */
160 npe_detach
, /* detach */
162 &npe_cb_ops
, /* driver operations */
163 &npe_bus_ops
, /* bus operations */
165 ddi_quiesce_not_needed
, /* quiesce */
169 * Internal routines in support of particular npe_ctlops.
171 static int npe_removechild(dev_info_t
*child
);
172 static int npe_initchild(dev_info_t
*child
);
175 * External support routine
177 extern void npe_query_acpi_mcfg(dev_info_t
*dip
);
178 extern void npe_ck804_fix_aer_ptr(ddi_acc_handle_t cfg_hdl
);
179 extern int npe_disable_empty_bridges_workaround(dev_info_t
*child
);
180 extern void npe_nvidia_error_workaround(ddi_acc_handle_t cfg_hdl
);
181 extern void npe_intel_error_workaround(ddi_acc_handle_t cfg_hdl
);
182 extern boolean_t
npe_is_mmcfg_supported(dev_info_t
*dip
);
183 extern void npe_enable_htmsi_children(dev_info_t
*dip
);
184 extern int npe_save_htconfig_children(dev_info_t
*dip
);
185 extern int npe_restore_htconfig_children(dev_info_t
*dip
);
188 * Module linkage information for the kernel.
190 static struct modldrv modldrv
= {
191 &mod_driverops
, /* Type of module */
192 "Host to PCIe nexus driver", /* Name of module */
193 &npe_ops
, /* driver ops */
196 static struct modlinkage modlinkage
= {
202 /* Save minimal state. */
211 * Initialize per-pci bus soft state pointer.
213 e
= ddi_soft_state_init(&npe_statep
, sizeof (pci_state_t
), 1);
217 if ((e
= mod_install(&modlinkage
)) != 0)
218 ddi_soft_state_fini(&npe_statep
);
229 rc
= mod_remove(&modlinkage
);
233 ddi_soft_state_fini(&npe_statep
);
239 _info(struct modinfo
*modinfop
)
241 return (mod_info(&modlinkage
, modinfop
));
246 npe_info(dev_info_t
*dip
, ddi_info_cmd_t cmd
, void *arg
, void **result
)
248 minor_t minor
= getminor((dev_t
)arg
);
249 int instance
= PCI_MINOR_NUM_TO_INSTANCE(minor
);
250 pci_state_t
*pcip
= ddi_get_soft_state(npe_statep
, instance
);
251 int ret
= DDI_SUCCESS
;
254 case DDI_INFO_DEVT2INSTANCE
:
255 *result
= (void *)(intptr_t)instance
;
257 case DDI_INFO_DEVT2DEVINFO
:
263 *result
= (void *)pcip
->pci_dip
;
275 npe_attach(dev_info_t
*devi
, ddi_attach_cmd_t cmd
)
277 int instance
= ddi_get_instance(devi
);
278 pci_state_t
*pcip
= NULL
;
280 if (cmd
== DDI_RESUME
) {
282 * the system might still be able to resume even if this fails
284 (void) npe_restore_htconfig_children(devi
);
285 return (DDI_SUCCESS
);
289 * We must do this here in order to ensure that all top level devices
290 * get their HyperTransport MSI mapping regs programmed first.
291 * "Memory controller" and "hostbridge" class devices are leaf devices
292 * that may affect MSI translation functionality for devices
293 * connected to the same link/bus.
295 * This will also program HT MSI mapping registers on root buses
296 * devices (basically sitting on an HT bus) that are not dependent
297 * on the aforementioned HT devices for MSI translation.
299 npe_enable_htmsi_children(devi
);
301 if (ddi_prop_update_string(DDI_DEV_T_NONE
, devi
, "device_type",
302 "pciex") != DDI_PROP_SUCCESS
) {
303 cmn_err(CE_WARN
, "npe: 'device_type' prop create failed");
306 if (ddi_soft_state_zalloc(npe_statep
, instance
) == DDI_SUCCESS
)
307 pcip
= ddi_get_soft_state(npe_statep
, instance
);
310 return (DDI_FAILURE
);
312 pcip
->pci_dip
= devi
;
313 pcip
->pci_soft_state
= PCI_SOFT_STATE_CLOSED
;
315 if (pcie_init(devi
, NULL
) != DDI_SUCCESS
)
318 /* Second arg: initialize for pci_express root nexus */
319 if (pcitool_init(devi
, B_TRUE
) != DDI_SUCCESS
)
322 pcip
->pci_fmcap
= DDI_FM_EREPORT_CAPABLE
| DDI_FM_ERRCB_CAPABLE
|
323 DDI_FM_ACCCHK_CAPABLE
| DDI_FM_DMACHK_CAPABLE
;
324 ddi_fm_init(devi
, &pcip
->pci_fmcap
, &pcip
->pci_fm_ibc
);
326 if (pcip
->pci_fmcap
& DDI_FM_ERRCB_CAPABLE
) {
327 ddi_fm_handler_register(devi
, npe_fm_callback
, NULL
);
330 PCIE_DIP2PFD(devi
) = kmem_zalloc(sizeof (pf_data_t
), KM_SLEEP
);
331 pcie_rc_init_pfd(devi
, PCIE_DIP2PFD(devi
));
333 npe_query_acpi_mcfg(devi
);
334 ddi_report_dev(devi
);
335 pcie_fab_init_bus(devi
, PCIE_BUS_FINAL
);
337 return (DDI_SUCCESS
);
340 (void) pcie_uninit(devi
);
342 pcie_rc_fini_bus(devi
);
343 ddi_soft_state_free(npe_statep
, instance
);
345 return (DDI_FAILURE
);
350 npe_detach(dev_info_t
*devi
, ddi_detach_cmd_t cmd
)
352 int instance
= ddi_get_instance(devi
);
355 pcip
= ddi_get_soft_state(npe_statep
, ddi_get_instance(devi
));
359 pcie_fab_fini_bus(devi
, PCIE_BUS_INITIAL
);
361 /* Uninitialize pcitool support. */
362 pcitool_uninit(devi
);
364 if (pcie_uninit(devi
) != DDI_SUCCESS
)
365 return (DDI_FAILURE
);
367 if (pcip
->pci_fmcap
& DDI_FM_ERRCB_CAPABLE
)
368 ddi_fm_handler_unregister(devi
);
370 pcie_rc_fini_pfd(PCIE_DIP2PFD(devi
));
371 kmem_free(PCIE_DIP2PFD(devi
), sizeof (pf_data_t
));
374 ddi_soft_state_free(npe_statep
, instance
);
375 return (DDI_SUCCESS
);
379 * the system might still be able to suspend/resume even if
382 (void) npe_save_htconfig_children(devi
);
383 return (DDI_SUCCESS
);
385 return (DDI_FAILURE
);
390 * Configure the access handle for standard configuration space
391 * access (see pci_fm_acc_setup for code that initializes the
392 * access-function pointers).
395 npe_setup_std_pcicfg_acc(dev_info_t
*rdip
, ddi_map_req_t
*mp
,
396 ddi_acc_hdl_t
*hp
, off_t offset
, off_t len
)
400 if ((ret
= pci_fm_acc_setup(hp
, offset
, len
)) ==
402 if (DDI_FM_ACC_ERR_CAP(ddi_fm_capable(rdip
)) &&
403 mp
->map_handlep
->ah_acc
.devacc_attr_access
404 != DDI_DEFAULT_ACC
) {
405 ndi_fmc_insert(rdip
, ACC_HANDLE
,
406 (void *)mp
->map_handlep
, NULL
);
413 npe_bus_map(dev_info_t
*dip
, dev_info_t
*rdip
, ddi_map_req_t
*mp
,
414 off_t offset
, off_t len
, caddr_t
*vaddrp
)
422 pci_regspec_t pci_reg
;
423 pci_regspec_t
*pci_rp
;
425 pci_acc_cfblk_t
*cfp
;
430 mr
= *mp
; /* Get private copy of request */
434 * check for register number
436 switch (mp
->map_type
) {
438 pci_reg
= *(pci_regspec_t
*)(mp
->map_obj
.rp
);
440 if (pci_common_get_reg_prop(rdip
, pci_rp
) != DDI_SUCCESS
)
441 return (DDI_FAILURE
);
444 rnumber
= mp
->map_obj
.rnumber
;
446 * get ALL "reg" properties for dip, select the one of
447 * of interest. In x86, "assigned-addresses" property
448 * is identical to the "reg" property, so there is no
449 * need to cross check the two to determine the physical
450 * address of the registers.
451 * This routine still performs some validity checks to
452 * make sure that everything is okay.
454 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY
, rdip
,
455 DDI_PROP_DONTPASS
, "reg", (int **)&pci_rp
,
456 (uint_t
*)&length
) != DDI_PROP_SUCCESS
)
457 return (DDI_FAILURE
);
460 * validate the register number.
462 length
/= (sizeof (pci_regspec_t
) / sizeof (int));
463 if (rnumber
>= length
) {
464 ddi_prop_free(pci_rp
);
465 return (DDI_FAILURE
);
469 * copy the required entry.
471 pci_reg
= pci_rp
[rnumber
];
474 * free the memory allocated by ddi_prop_lookup_int_array
476 ddi_prop_free(pci_rp
);
479 if (pci_common_get_reg_prop(rdip
, pci_rp
) != DDI_SUCCESS
)
480 return (DDI_FAILURE
);
481 mp
->map_type
= DDI_MT_REGSPEC
;
484 return (DDI_ME_INVAL
);
487 space
= pci_rp
->pci_phys_hi
& PCI_REG_ADDR_M
;
490 * check for unmap and unlock of address space
492 if ((mp
->map_op
== DDI_MO_UNMAP
) || (mp
->map_op
== DDI_MO_UNLOCK
)) {
495 reg
.regspec_bustype
= 1;
498 case PCI_ADDR_CONFIG
:
500 * If this is an unmap/unlock of a standard config
501 * space mapping (memory-mapped config space mappings
502 * would have the DDI_ACCATTR_CPU_VADDR bit set in the
503 * acc_attr), undo that setup here.
505 if (NPE_IS_HANDLE_FOR_STDCFG_ACC(mp
->map_handlep
)) {
507 if (DDI_FM_ACC_ERR_CAP(ddi_fm_capable(rdip
)) &&
508 mp
->map_handlep
->ah_acc
.devacc_attr_access
509 != DDI_DEFAULT_ACC
) {
510 ndi_fmc_remove(rdip
, ACC_HANDLE
,
511 (void *)mp
->map_handlep
);
513 return (DDI_SUCCESS
);
516 pci_rp
->pci_size_low
= PCIE_CONF_HDR_SIZE
;
521 * MEM64 requires special treatment on map, to check
522 * that the device is below 4G. On unmap, however,
523 * we can assume that everything is OK... the map
524 * must have succeeded.
528 reg
.regspec_bustype
= 0;
532 return (DDI_FAILURE
);
536 * Adjust offset and length
537 * A non-zero length means override the one in the regspec.
539 pci_rp
->pci_phys_low
+= (uint_t
)offset
;
541 pci_rp
->pci_size_low
= len
;
543 reg
.regspec_addr
= pci_rp
->pci_phys_low
;
544 reg
.regspec_size
= pci_rp
->pci_size_low
;
546 mp
->map_obj
.rp
= ®
;
547 retval
= ddi_map(dip
, mp
, (off_t
)0, (off_t
)0, vaddrp
);
548 if (DDI_FM_ACC_ERR_CAP(ddi_fm_capable(rdip
)) &&
549 mp
->map_handlep
->ah_acc
.devacc_attr_access
!=
551 ndi_fmc_remove(rdip
, ACC_HANDLE
,
552 (void *)mp
->map_handlep
);
558 /* check for user mapping request - not legal for Config */
559 if (mp
->map_op
== DDI_MO_MAP_HANDLE
&& space
== PCI_ADDR_CONFIG
) {
560 cmn_err(CE_NOTE
, "npe: Config mapping request from user\n");
561 return (DDI_FAILURE
);
566 * Note that pci_fm_acc_setup() is called to serve two purposes
567 * i) enable legacy PCI I/O style config space access
568 * ii) register with FMA
570 if (space
== PCI_ADDR_CONFIG
) {
572 /* Can't map config space without a handle */
573 hp
= (ddi_acc_hdl_t
*)mp
->map_handlep
;
575 return (DDI_FAILURE
);
577 /* record the device address for future reference */
578 cfp
= (pci_acc_cfblk_t
*)&hp
->ah_bus_private
;
579 cfp
->c_busnum
= PCI_REG_BUS_G(pci_rp
->pci_phys_hi
);
580 cfp
->c_devnum
= PCI_REG_DEV_G(pci_rp
->pci_phys_hi
);
581 cfp
->c_funcnum
= PCI_REG_FUNC_G(pci_rp
->pci_phys_hi
);
583 *vaddrp
= (caddr_t
)offset
;
585 /* Check if MMCFG is supported */
586 if (!npe_is_mmcfg_supported(rdip
)) {
587 return (npe_setup_std_pcicfg_acc(rdip
, mp
, hp
,
592 if (ddi_prop_lookup_int64_array(DDI_DEV_T_ANY
, rdip
, 0,
593 "ecfg", &ecfginfo
, &nelem
) == DDI_PROP_SUCCESS
) {
596 cfp
->c_busnum
< ecfginfo
[2] ||
597 cfp
->c_busnum
> ecfginfo
[3]) {
599 * Invalid property or Doesn't contain the
600 * requested bus; fall back to standard
601 * (I/O-based) config access.
603 ddi_prop_free(ecfginfo
);
604 return (npe_setup_std_pcicfg_acc(rdip
, mp
, hp
,
607 pci_rp
->pci_phys_low
= ecfginfo
[0];
609 ddi_prop_free(ecfginfo
);
611 pci_rp
->pci_phys_low
+= ((cfp
->c_busnum
<< 20) |
612 (cfp
->c_devnum
) << 15 |
613 (cfp
->c_funcnum
<< 12));
615 pci_rp
->pci_size_low
= PCIE_CONF_HDR_SIZE
;
619 * Couldn't find the MMCFG property -- fall back to
620 * standard config access
622 return (npe_setup_std_pcicfg_acc(rdip
, mp
, hp
,
627 length
= pci_rp
->pci_size_low
;
632 if ((offset
>= length
) || (len
> length
) || (offset
+ len
> length
))
633 return (DDI_FAILURE
);
636 * Adjust offset and length
637 * A non-zero length means override the one in the regspec.
639 pci_rp
->pci_phys_low
+= (uint_t
)offset
;
641 pci_rp
->pci_size_low
= len
;
644 * convert the pci regsec into the generic regspec used by the
645 * parent root nexus driver.
649 reg
.regspec_bustype
= 1;
651 case PCI_ADDR_CONFIG
:
654 * We can't handle 64-bit devices that are mapped above
655 * 4G or that are larger than 4G.
657 if (pci_rp
->pci_phys_mid
!= 0 || pci_rp
->pci_size_hi
!= 0)
658 return (DDI_FAILURE
);
660 * Other than that, we can treat them as 32-bit mappings
664 reg
.regspec_bustype
= 0;
667 return (DDI_FAILURE
);
670 reg
.regspec_addr
= pci_rp
->pci_phys_low
;
671 reg
.regspec_size
= pci_rp
->pci_size_low
;
673 mp
->map_obj
.rp
= ®
;
674 retval
= ddi_map(dip
, mp
, (off_t
)0, (off_t
)0, vaddrp
);
675 if (retval
== DDI_SUCCESS
) {
677 * For config space gets force use of cautious access routines.
678 * These will handle default and protected mode accesses too.
680 if (space
== PCI_ADDR_CONFIG
) {
681 ap
= (ddi_acc_impl_t
*)mp
->map_handlep
;
682 ap
->ahi_acc_attr
&= ~DDI_ACCATTR_DIRECT
;
683 ap
->ahi_acc_attr
|= DDI_ACCATTR_CONFIG_SPACE
;
684 ap
->ahi_get8
= i_ddi_caut_get8
;
685 ap
->ahi_get16
= i_ddi_caut_get16
;
686 ap
->ahi_get32
= i_ddi_caut_get32
;
687 ap
->ahi_get64
= i_ddi_caut_get64
;
688 ap
->ahi_rep_get8
= i_ddi_caut_rep_get8
;
689 ap
->ahi_rep_get16
= i_ddi_caut_rep_get16
;
690 ap
->ahi_rep_get32
= i_ddi_caut_rep_get32
;
691 ap
->ahi_rep_get64
= i_ddi_caut_rep_get64
;
693 if (DDI_FM_ACC_ERR_CAP(ddi_fm_capable(rdip
)) &&
694 mp
->map_handlep
->ah_acc
.devacc_attr_access
!=
696 ndi_fmc_insert(rdip
, ACC_HANDLE
,
697 (void *)mp
->map_handlep
, NULL
);
707 npe_ctlops(dev_info_t
*dip
, dev_info_t
*rdip
,
708 ddi_ctl_enum_t ctlop
, void *arg
, void *result
)
713 pci_regspec_t
*drv_regp
;
714 struct attachspec
*asp
;
715 struct detachspec
*dsp
;
716 pci_state_t
*pci_p
= ddi_get_soft_state(npe_statep
,
717 ddi_get_instance(dip
));
720 case DDI_CTLOPS_REPORTDEV
:
721 if (rdip
== (dev_info_t
*)0)
722 return (DDI_FAILURE
);
723 cmn_err(CE_CONT
, "?PCI Express-device: %s@%s, %s%d\n",
724 ddi_node_name(rdip
), ddi_get_name_addr(rdip
),
725 ddi_driver_name(rdip
), ddi_get_instance(rdip
));
726 return (DDI_SUCCESS
);
728 case DDI_CTLOPS_INITCHILD
:
729 return (npe_initchild((dev_info_t
*)arg
));
731 case DDI_CTLOPS_UNINITCHILD
:
732 return (npe_removechild((dev_info_t
*)arg
));
734 case DDI_CTLOPS_SIDDEV
:
735 return (DDI_SUCCESS
);
737 case DDI_CTLOPS_REGSIZE
:
738 case DDI_CTLOPS_NREGS
:
739 if (rdip
== (dev_info_t
*)0)
740 return (DDI_FAILURE
);
743 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY
, rdip
,
744 DDI_PROP_DONTPASS
, "reg", (int **)&drv_regp
,
745 ®len
) != DDI_PROP_SUCCESS
) {
746 return (DDI_FAILURE
);
749 totreg
= (reglen
* sizeof (int)) / sizeof (pci_regspec_t
);
750 if (ctlop
== DDI_CTLOPS_NREGS
)
751 *(int *)result
= totreg
;
752 else if (ctlop
== DDI_CTLOPS_REGSIZE
) {
755 ddi_prop_free(drv_regp
);
756 return (DDI_FAILURE
);
758 *(off_t
*)result
= drv_regp
[rn
].pci_size_low
;
760 ddi_prop_free(drv_regp
);
762 return (DDI_SUCCESS
);
764 case DDI_CTLOPS_POWER
:
766 power_req_t
*reqp
= (power_req_t
*)arg
;
768 * We currently understand reporting of PCI_PM_IDLESPEED
769 * capability. Everything else is passed up.
771 if ((reqp
->request_type
== PMR_REPORT_PMCAP
) &&
772 (reqp
->req
.report_pmcap_req
.cap
== PCI_PM_IDLESPEED
))
773 return (DDI_SUCCESS
);
778 case DDI_CTLOPS_PEEK
:
779 case DDI_CTLOPS_POKE
:
780 return (pci_common_peekpoke(dip
, rdip
, ctlop
, arg
, result
));
782 /* X86 systems support PME wakeup from suspended state */
783 case DDI_CTLOPS_ATTACH
:
784 if (!pcie_is_child(dip
, rdip
))
785 return (DDI_SUCCESS
);
787 asp
= (struct attachspec
*)arg
;
788 if ((asp
->when
== DDI_POST
) && (asp
->result
== DDI_SUCCESS
)) {
789 pf_init(rdip
, (void *)pci_p
->pci_fm_ibc
, asp
->cmd
);
790 (void) pcie_postattach_child(rdip
);
793 /* only do this for immediate children */
794 if (asp
->cmd
== DDI_RESUME
&& asp
->when
== DDI_PRE
&&
795 ddi_get_parent(rdip
) == dip
)
796 if (pci_pre_resume(rdip
) != DDI_SUCCESS
) {
797 /* Not good, better stop now. */
799 "Couldn't pre-resume device %p",
804 return (DDI_SUCCESS
);
806 case DDI_CTLOPS_DETACH
:
807 if (!pcie_is_child(dip
, rdip
))
808 return (DDI_SUCCESS
);
810 dsp
= (struct detachspec
*)arg
;
812 if (dsp
->when
== DDI_PRE
)
813 pf_fini(rdip
, dsp
->cmd
);
815 /* only do this for immediate children */
816 if (dsp
->cmd
== DDI_SUSPEND
&& dsp
->when
== DDI_POST
&&
817 ddi_get_parent(rdip
) == dip
)
818 if (pci_post_suspend(rdip
) != DDI_SUCCESS
)
819 return (DDI_FAILURE
);
821 return (DDI_SUCCESS
);
827 return (ddi_ctlops(dip
, rdip
, ctlop
, arg
, result
));
836 npe_intr_ops(dev_info_t
*pdip
, dev_info_t
*rdip
, ddi_intr_op_t intr_op
,
837 ddi_intr_handle_impl_t
*hdlp
, void *result
)
839 return (pci_common_intr_ops(pdip
, rdip
, intr_op
, hdlp
, result
));
844 npe_initchild(dev_info_t
*child
)
849 ddi_acc_handle_t cfg_hdl
;
852 * Do not bind drivers to empty bridges.
853 * Fail above, if the bridge is found to be hotplug capable
855 if (npe_disable_empty_bridges_workaround(child
) == 1)
856 return (DDI_FAILURE
);
858 if (pci_common_name_child(child
, name
, 80) != DDI_SUCCESS
)
859 return (DDI_FAILURE
);
861 ddi_set_name_addr(child
, name
);
864 * Pseudo nodes indicate a prototype node with per-instance
865 * properties to be merged into the real h/w device node.
866 * The interpretation of the unit-address is DD[,F]
867 * where DD is the device id and F is the function.
869 if (ndi_dev_is_persistent_node(child
) == 0) {
870 extern int pci_allow_pseudo_children
;
872 ddi_set_parent_data(child
, NULL
);
875 * Try to merge the properties from this prototype
876 * node into real h/w nodes.
878 if (ndi_merge_node(child
, pci_common_name_child
) ==
881 * Merged ok - return failure to remove the node.
883 ddi_set_name_addr(child
, NULL
);
884 return (DDI_FAILURE
);
887 /* workaround for DDIVS to run under PCI Express */
888 if (pci_allow_pseudo_children
) {
890 * If the "interrupts" property doesn't exist,
891 * this must be the ddivs no-intr case, and it returns
892 * DDI_SUCCESS instead of DDI_FAILURE.
894 if (ddi_prop_get_int(DDI_DEV_T_ANY
, child
,
895 DDI_PROP_DONTPASS
, "interrupts", -1) == -1)
896 return (DDI_SUCCESS
);
898 * Create the ddi_parent_private_data for a pseudo
901 pci_common_set_parent_private_data(child
);
902 return (DDI_SUCCESS
);
906 * The child was not merged into a h/w node,
907 * but there's not much we can do with it other
908 * than return failure to cause the node to be removed.
910 cmn_err(CE_WARN
, "!%s@%s: %s.conf properties not merged",
911 ddi_get_name(child
), ddi_get_name_addr(child
),
912 ddi_get_name(child
));
913 ddi_set_name_addr(child
, NULL
);
914 return (DDI_NOT_WELL_FORMED
);
917 if (ddi_prop_get_int(DDI_DEV_T_ANY
, child
, DDI_PROP_DONTPASS
,
918 "interrupts", -1) != -1)
919 pci_common_set_parent_private_data(child
);
921 ddi_set_parent_data(child
, NULL
);
923 /* Disable certain errors on PCIe drivers for x86 platforms */
924 regs
= pcie_get_aer_uce_mask() | npe_aer_uce_mask
;
925 pcie_set_aer_uce_mask(regs
);
926 regs
= pcie_get_aer_ce_mask() | npe_aer_ce_mask
;
927 pcie_set_aer_ce_mask(regs
);
928 regs
= pcie_get_aer_suce_mask() | npe_aer_suce_mask
;
929 pcie_set_aer_suce_mask(regs
);
932 * If URs are disabled, mask SERRs as well, otherwise the system will
933 * still be notified of URs
935 if (npe_aer_uce_mask
& PCIE_AER_UCE_UR
)
936 pcie_set_serr_mask(1);
938 if (pci_config_setup(child
, &cfg_hdl
) == DDI_SUCCESS
) {
939 npe_ck804_fix_aer_ptr(cfg_hdl
);
940 npe_nvidia_error_workaround(cfg_hdl
);
941 npe_intel_error_workaround(cfg_hdl
);
942 pci_config_teardown(&cfg_hdl
);
945 bus_p
= PCIE_DIP2BUS(child
);
947 uint16_t device_id
= (uint16_t)(bus_p
->bus_dev_ven_id
>> 16);
948 uint16_t vendor_id
= (uint16_t)(bus_p
->bus_dev_ven_id
& 0xFFFF);
949 uint16_t rev_id
= bus_p
->bus_rev_id
;
951 /* Disable AER for certain NVIDIA Chipsets */
952 if ((vendor_id
== NVIDIA_VENDOR_ID
) &&
953 (device_id
== NVIDIA_CK804_DEVICE_ID
) &&
954 (rev_id
< NVIDIA_CK804_AER_VALID_REVID
))
955 bus_p
->bus_aer_off
= 0;
957 pcie_init_dom(child
);
958 (void) pcie_initchild(child
);
961 return (DDI_SUCCESS
);
966 npe_removechild(dev_info_t
*dip
)
968 pcie_uninitchild(dip
);
970 ddi_set_name_addr(dip
, NULL
);
973 * Strip the node to properly convert it back to prototype form
975 ddi_remove_minor_node(dip
, NULL
);
977 ddi_prop_remove_all(dip
);
979 return (DDI_SUCCESS
);
983 npe_open(dev_t
*devp
, int flags
, int otyp
, cred_t
*credp
)
985 minor_t minor
= getminor(*devp
);
986 int instance
= PCI_MINOR_NUM_TO_INSTANCE(minor
);
987 pci_state_t
*pci_p
= ddi_get_soft_state(npe_statep
, instance
);
991 * Make sure the open is for the right file type.
993 if (otyp
!= OTYP_CHR
)
999 mutex_enter(&pci_p
->pci_mutex
);
1000 switch (PCI_MINOR_NUM_TO_PCI_DEVNUM(minor
)) {
1001 case PCI_TOOL_REG_MINOR_NUM
:
1002 case PCI_TOOL_INTR_MINOR_NUM
:
1005 /* Handle devctl ioctls */
1006 rv
= pcie_open(pci_p
->pci_dip
, devp
, flags
, otyp
, credp
);
1007 mutex_exit(&pci_p
->pci_mutex
);
1011 /* Handle pcitool ioctls */
1012 if (flags
& FEXCL
) {
1013 if (pci_p
->pci_soft_state
!= PCI_SOFT_STATE_CLOSED
) {
1014 mutex_exit(&pci_p
->pci_mutex
);
1015 cmn_err(CE_NOTE
, "npe_open: busy");
1018 pci_p
->pci_soft_state
= PCI_SOFT_STATE_OPEN_EXCL
;
1020 if (pci_p
->pci_soft_state
== PCI_SOFT_STATE_OPEN_EXCL
) {
1021 mutex_exit(&pci_p
->pci_mutex
);
1022 cmn_err(CE_NOTE
, "npe_open: busy");
1025 pci_p
->pci_soft_state
= PCI_SOFT_STATE_OPEN
;
1027 mutex_exit(&pci_p
->pci_mutex
);
1033 npe_close(dev_t dev
, int flags
, int otyp
, cred_t
*credp
)
1035 minor_t minor
= getminor(dev
);
1036 int instance
= PCI_MINOR_NUM_TO_INSTANCE(minor
);
1037 pci_state_t
*pci_p
= ddi_get_soft_state(npe_statep
, instance
);
1043 mutex_enter(&pci_p
->pci_mutex
);
1045 switch (PCI_MINOR_NUM_TO_PCI_DEVNUM(minor
)) {
1046 case PCI_TOOL_REG_MINOR_NUM
:
1047 case PCI_TOOL_INTR_MINOR_NUM
:
1050 /* Handle devctl ioctls */
1051 rv
= pcie_close(pci_p
->pci_dip
, dev
, flags
, otyp
, credp
);
1052 mutex_exit(&pci_p
->pci_mutex
);
1056 /* Handle pcitool ioctls */
1057 pci_p
->pci_soft_state
= PCI_SOFT_STATE_CLOSED
;
1058 mutex_exit(&pci_p
->pci_mutex
);
1063 npe_ioctl(dev_t dev
, int cmd
, intptr_t arg
, int mode
, cred_t
*credp
, int *rvalp
)
1065 minor_t minor
= getminor(dev
);
1066 int instance
= PCI_MINOR_NUM_TO_INSTANCE(minor
);
1067 pci_state_t
*pci_p
= ddi_get_soft_state(npe_statep
, instance
);
1073 switch (PCI_MINOR_NUM_TO_PCI_DEVNUM(minor
)) {
1074 case PCI_TOOL_REG_MINOR_NUM
:
1075 case PCI_TOOL_INTR_MINOR_NUM
:
1076 /* To handle pcitool related ioctls */
1077 ret
= pci_common_ioctl(pci_p
->pci_dip
, dev
, cmd
, arg
, mode
,
1081 /* To handle devctl and hotplug related ioctls */
1082 ret
= pcie_ioctl(pci_p
->pci_dip
, dev
, cmd
, arg
, mode
, credp
,
1092 npe_fm_init(dev_info_t
*dip
, dev_info_t
*tdip
, int cap
,
1093 ddi_iblock_cookie_t
*ibc
)
1095 pci_state_t
*pcip
= ddi_get_soft_state(npe_statep
,
1096 ddi_get_instance(dip
));
1098 ASSERT(ibc
!= NULL
);
1099 *ibc
= pcip
->pci_fm_ibc
;
1101 return (pcip
->pci_fmcap
);
1106 npe_fm_callback(dev_info_t
*dip
, ddi_fm_error_t
*derr
, const void *no_used
)
1109 * On current x86 systems, npe's callback does not get called for failed
1110 * loads. If in the future this feature is used, the fault PA should be
1111 * logged in the derr->fme_bus_specific field. The appropriate PCIe
1112 * error handling code should be called and needs to be coordinated with
1113 * safe access handling.