2 * Copyright (c) 2006 QLogic, Inc. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * This file contains all of the code that is specific to the
35 * InfiniPath PCIe chip.
38 #include <linux/interrupt.h>
39 #include <linux/pci.h>
40 #include <linux/delay.h>
43 #include "ipath_kernel.h"
44 #include "ipath_registers.h"
46 static void ipath_setup_pe_setextled(struct ipath_devdata
*, u64
, u64
);
49 * This file contains all the chip-specific register information and
50 * access functions for the QLogic InfiniPath PCI-Express chip.
52 * This lists the InfiniPath registers, in the actual chip layout.
53 * This structure should never be directly accessed.
55 struct _infinipath_do_not_use_kernel_regs
{
56 unsigned long long Revision
;
57 unsigned long long Control
;
58 unsigned long long PageAlign
;
59 unsigned long long PortCnt
;
60 unsigned long long DebugPortSelect
;
61 unsigned long long Reserved0
;
62 unsigned long long SendRegBase
;
63 unsigned long long UserRegBase
;
64 unsigned long long CounterRegBase
;
65 unsigned long long Scratch
;
66 unsigned long long Reserved1
;
67 unsigned long long Reserved2
;
68 unsigned long long IntBlocked
;
69 unsigned long long IntMask
;
70 unsigned long long IntStatus
;
71 unsigned long long IntClear
;
72 unsigned long long ErrorMask
;
73 unsigned long long ErrorStatus
;
74 unsigned long long ErrorClear
;
75 unsigned long long HwErrMask
;
76 unsigned long long HwErrStatus
;
77 unsigned long long HwErrClear
;
78 unsigned long long HwDiagCtrl
;
79 unsigned long long MDIO
;
80 unsigned long long IBCStatus
;
81 unsigned long long IBCCtrl
;
82 unsigned long long ExtStatus
;
83 unsigned long long ExtCtrl
;
84 unsigned long long GPIOOut
;
85 unsigned long long GPIOMask
;
86 unsigned long long GPIOStatus
;
87 unsigned long long GPIOClear
;
88 unsigned long long RcvCtrl
;
89 unsigned long long RcvBTHQP
;
90 unsigned long long RcvHdrSize
;
91 unsigned long long RcvHdrCnt
;
92 unsigned long long RcvHdrEntSize
;
93 unsigned long long RcvTIDBase
;
94 unsigned long long RcvTIDCnt
;
95 unsigned long long RcvEgrBase
;
96 unsigned long long RcvEgrCnt
;
97 unsigned long long RcvBufBase
;
98 unsigned long long RcvBufSize
;
99 unsigned long long RxIntMemBase
;
100 unsigned long long RxIntMemSize
;
101 unsigned long long RcvPartitionKey
;
102 unsigned long long Reserved3
;
103 unsigned long long RcvPktLEDCnt
;
104 unsigned long long Reserved4
[8];
105 unsigned long long SendCtrl
;
106 unsigned long long SendPIOBufBase
;
107 unsigned long long SendPIOSize
;
108 unsigned long long SendPIOBufCnt
;
109 unsigned long long SendPIOAvailAddr
;
110 unsigned long long TxIntMemBase
;
111 unsigned long long TxIntMemSize
;
112 unsigned long long Reserved5
;
113 unsigned long long PCIeRBufTestReg0
;
114 unsigned long long PCIeRBufTestReg1
;
115 unsigned long long Reserved51
[6];
116 unsigned long long SendBufferError
;
117 unsigned long long SendBufferErrorCONT1
;
118 unsigned long long Reserved6SBE
[6];
119 unsigned long long RcvHdrAddr0
;
120 unsigned long long RcvHdrAddr1
;
121 unsigned long long RcvHdrAddr2
;
122 unsigned long long RcvHdrAddr3
;
123 unsigned long long RcvHdrAddr4
;
124 unsigned long long Reserved7RHA
[11];
125 unsigned long long RcvHdrTailAddr0
;
126 unsigned long long RcvHdrTailAddr1
;
127 unsigned long long RcvHdrTailAddr2
;
128 unsigned long long RcvHdrTailAddr3
;
129 unsigned long long RcvHdrTailAddr4
;
130 unsigned long long Reserved8RHTA
[11];
131 unsigned long long Reserved9SW
[8];
132 unsigned long long SerdesConfig0
;
133 unsigned long long SerdesConfig1
;
134 unsigned long long SerdesStatus
;
135 unsigned long long XGXSConfig
;
136 unsigned long long IBPLLCfg
;
137 unsigned long long Reserved10SW2
[3];
138 unsigned long long PCIEQ0SerdesConfig0
;
139 unsigned long long PCIEQ0SerdesConfig1
;
140 unsigned long long PCIEQ0SerdesStatus
;
141 unsigned long long Reserved11
;
142 unsigned long long PCIEQ1SerdesConfig0
;
143 unsigned long long PCIEQ1SerdesConfig1
;
144 unsigned long long PCIEQ1SerdesStatus
;
145 unsigned long long Reserved12
;
148 #define IPATH_KREG_OFFSET(field) (offsetof(struct \
149 _infinipath_do_not_use_kernel_regs, field) / sizeof(u64))
150 #define IPATH_CREG_OFFSET(field) (offsetof( \
151 struct infinipath_counters, field) / sizeof(u64))
153 static const struct ipath_kregs ipath_pe_kregs
= {
154 .kr_control
= IPATH_KREG_OFFSET(Control
),
155 .kr_counterregbase
= IPATH_KREG_OFFSET(CounterRegBase
),
156 .kr_debugportselect
= IPATH_KREG_OFFSET(DebugPortSelect
),
157 .kr_errorclear
= IPATH_KREG_OFFSET(ErrorClear
),
158 .kr_errormask
= IPATH_KREG_OFFSET(ErrorMask
),
159 .kr_errorstatus
= IPATH_KREG_OFFSET(ErrorStatus
),
160 .kr_extctrl
= IPATH_KREG_OFFSET(ExtCtrl
),
161 .kr_extstatus
= IPATH_KREG_OFFSET(ExtStatus
),
162 .kr_gpio_clear
= IPATH_KREG_OFFSET(GPIOClear
),
163 .kr_gpio_mask
= IPATH_KREG_OFFSET(GPIOMask
),
164 .kr_gpio_out
= IPATH_KREG_OFFSET(GPIOOut
),
165 .kr_gpio_status
= IPATH_KREG_OFFSET(GPIOStatus
),
166 .kr_hwdiagctrl
= IPATH_KREG_OFFSET(HwDiagCtrl
),
167 .kr_hwerrclear
= IPATH_KREG_OFFSET(HwErrClear
),
168 .kr_hwerrmask
= IPATH_KREG_OFFSET(HwErrMask
),
169 .kr_hwerrstatus
= IPATH_KREG_OFFSET(HwErrStatus
),
170 .kr_ibcctrl
= IPATH_KREG_OFFSET(IBCCtrl
),
171 .kr_ibcstatus
= IPATH_KREG_OFFSET(IBCStatus
),
172 .kr_intblocked
= IPATH_KREG_OFFSET(IntBlocked
),
173 .kr_intclear
= IPATH_KREG_OFFSET(IntClear
),
174 .kr_intmask
= IPATH_KREG_OFFSET(IntMask
),
175 .kr_intstatus
= IPATH_KREG_OFFSET(IntStatus
),
176 .kr_mdio
= IPATH_KREG_OFFSET(MDIO
),
177 .kr_pagealign
= IPATH_KREG_OFFSET(PageAlign
),
178 .kr_partitionkey
= IPATH_KREG_OFFSET(RcvPartitionKey
),
179 .kr_portcnt
= IPATH_KREG_OFFSET(PortCnt
),
180 .kr_rcvbthqp
= IPATH_KREG_OFFSET(RcvBTHQP
),
181 .kr_rcvbufbase
= IPATH_KREG_OFFSET(RcvBufBase
),
182 .kr_rcvbufsize
= IPATH_KREG_OFFSET(RcvBufSize
),
183 .kr_rcvctrl
= IPATH_KREG_OFFSET(RcvCtrl
),
184 .kr_rcvegrbase
= IPATH_KREG_OFFSET(RcvEgrBase
),
185 .kr_rcvegrcnt
= IPATH_KREG_OFFSET(RcvEgrCnt
),
186 .kr_rcvhdrcnt
= IPATH_KREG_OFFSET(RcvHdrCnt
),
187 .kr_rcvhdrentsize
= IPATH_KREG_OFFSET(RcvHdrEntSize
),
188 .kr_rcvhdrsize
= IPATH_KREG_OFFSET(RcvHdrSize
),
189 .kr_rcvintmembase
= IPATH_KREG_OFFSET(RxIntMemBase
),
190 .kr_rcvintmemsize
= IPATH_KREG_OFFSET(RxIntMemSize
),
191 .kr_rcvtidbase
= IPATH_KREG_OFFSET(RcvTIDBase
),
192 .kr_rcvtidcnt
= IPATH_KREG_OFFSET(RcvTIDCnt
),
193 .kr_revision
= IPATH_KREG_OFFSET(Revision
),
194 .kr_scratch
= IPATH_KREG_OFFSET(Scratch
),
195 .kr_sendbuffererror
= IPATH_KREG_OFFSET(SendBufferError
),
196 .kr_sendctrl
= IPATH_KREG_OFFSET(SendCtrl
),
197 .kr_sendpioavailaddr
= IPATH_KREG_OFFSET(SendPIOAvailAddr
),
198 .kr_sendpiobufbase
= IPATH_KREG_OFFSET(SendPIOBufBase
),
199 .kr_sendpiobufcnt
= IPATH_KREG_OFFSET(SendPIOBufCnt
),
200 .kr_sendpiosize
= IPATH_KREG_OFFSET(SendPIOSize
),
201 .kr_sendregbase
= IPATH_KREG_OFFSET(SendRegBase
),
202 .kr_txintmembase
= IPATH_KREG_OFFSET(TxIntMemBase
),
203 .kr_txintmemsize
= IPATH_KREG_OFFSET(TxIntMemSize
),
204 .kr_userregbase
= IPATH_KREG_OFFSET(UserRegBase
),
205 .kr_serdesconfig0
= IPATH_KREG_OFFSET(SerdesConfig0
),
206 .kr_serdesconfig1
= IPATH_KREG_OFFSET(SerdesConfig1
),
207 .kr_serdesstatus
= IPATH_KREG_OFFSET(SerdesStatus
),
208 .kr_xgxsconfig
= IPATH_KREG_OFFSET(XGXSConfig
),
209 .kr_ibpllcfg
= IPATH_KREG_OFFSET(IBPLLCfg
),
212 * These should not be used directly via ipath_write_kreg64(),
213 * use them with ipath_write_kreg64_port(),
215 .kr_rcvhdraddr
= IPATH_KREG_OFFSET(RcvHdrAddr0
),
216 .kr_rcvhdrtailaddr
= IPATH_KREG_OFFSET(RcvHdrTailAddr0
),
218 /* The rcvpktled register controls one of the debug port signals, so
219 * a packet activity LED can be connected to it. */
220 .kr_rcvpktledcnt
= IPATH_KREG_OFFSET(RcvPktLEDCnt
),
221 .kr_pcierbuftestreg0
= IPATH_KREG_OFFSET(PCIeRBufTestReg0
),
222 .kr_pcierbuftestreg1
= IPATH_KREG_OFFSET(PCIeRBufTestReg1
),
223 .kr_pcieq0serdesconfig0
= IPATH_KREG_OFFSET(PCIEQ0SerdesConfig0
),
224 .kr_pcieq0serdesconfig1
= IPATH_KREG_OFFSET(PCIEQ0SerdesConfig1
),
225 .kr_pcieq0serdesstatus
= IPATH_KREG_OFFSET(PCIEQ0SerdesStatus
),
226 .kr_pcieq1serdesconfig0
= IPATH_KREG_OFFSET(PCIEQ1SerdesConfig0
),
227 .kr_pcieq1serdesconfig1
= IPATH_KREG_OFFSET(PCIEQ1SerdesConfig1
),
228 .kr_pcieq1serdesstatus
= IPATH_KREG_OFFSET(PCIEQ1SerdesStatus
)
231 static const struct ipath_cregs ipath_pe_cregs
= {
232 .cr_badformatcnt
= IPATH_CREG_OFFSET(RxBadFormatCnt
),
233 .cr_erricrccnt
= IPATH_CREG_OFFSET(RxICRCErrCnt
),
234 .cr_errlinkcnt
= IPATH_CREG_OFFSET(RxLinkProblemCnt
),
235 .cr_errlpcrccnt
= IPATH_CREG_OFFSET(RxLPCRCErrCnt
),
236 .cr_errpkey
= IPATH_CREG_OFFSET(RxPKeyMismatchCnt
),
237 .cr_errrcvflowctrlcnt
= IPATH_CREG_OFFSET(RxFlowCtrlErrCnt
),
238 .cr_err_rlencnt
= IPATH_CREG_OFFSET(RxLenErrCnt
),
239 .cr_errslencnt
= IPATH_CREG_OFFSET(TxLenErrCnt
),
240 .cr_errtidfull
= IPATH_CREG_OFFSET(RxTIDFullErrCnt
),
241 .cr_errtidvalid
= IPATH_CREG_OFFSET(RxTIDValidErrCnt
),
242 .cr_errvcrccnt
= IPATH_CREG_OFFSET(RxVCRCErrCnt
),
243 .cr_ibstatuschange
= IPATH_CREG_OFFSET(IBStatusChangeCnt
),
244 .cr_intcnt
= IPATH_CREG_OFFSET(LBIntCnt
),
245 .cr_invalidrlencnt
= IPATH_CREG_OFFSET(RxMaxMinLenErrCnt
),
246 .cr_invalidslencnt
= IPATH_CREG_OFFSET(TxMaxMinLenErrCnt
),
247 .cr_lbflowstallcnt
= IPATH_CREG_OFFSET(LBFlowStallCnt
),
248 .cr_pktrcvcnt
= IPATH_CREG_OFFSET(RxDataPktCnt
),
249 .cr_pktrcvflowctrlcnt
= IPATH_CREG_OFFSET(RxFlowPktCnt
),
250 .cr_pktsendcnt
= IPATH_CREG_OFFSET(TxDataPktCnt
),
251 .cr_pktsendflowcnt
= IPATH_CREG_OFFSET(TxFlowPktCnt
),
252 .cr_portovflcnt
= IPATH_CREG_OFFSET(RxP0HdrEgrOvflCnt
),
253 .cr_rcvebpcnt
= IPATH_CREG_OFFSET(RxEBPCnt
),
254 .cr_rcvovflcnt
= IPATH_CREG_OFFSET(RxBufOvflCnt
),
255 .cr_senddropped
= IPATH_CREG_OFFSET(TxDroppedPktCnt
),
256 .cr_sendstallcnt
= IPATH_CREG_OFFSET(TxFlowStallCnt
),
257 .cr_sendunderruncnt
= IPATH_CREG_OFFSET(TxUnderrunCnt
),
258 .cr_wordrcvcnt
= IPATH_CREG_OFFSET(RxDwordCnt
),
259 .cr_wordsendcnt
= IPATH_CREG_OFFSET(TxDwordCnt
),
260 .cr_unsupvlcnt
= IPATH_CREG_OFFSET(TxUnsupVLErrCnt
),
261 .cr_rxdroppktcnt
= IPATH_CREG_OFFSET(RxDroppedPktCnt
),
262 .cr_iblinkerrrecovcnt
= IPATH_CREG_OFFSET(IBLinkErrRecoveryCnt
),
263 .cr_iblinkdowncnt
= IPATH_CREG_OFFSET(IBLinkDownedCnt
),
264 .cr_ibsymbolerrcnt
= IPATH_CREG_OFFSET(IBSymbolErrCnt
)
267 /* kr_intstatus, kr_intclear, kr_intmask bits */
268 #define INFINIPATH_I_RCVURG_MASK ((1U<<5)-1)
269 #define INFINIPATH_I_RCVAVAIL_MASK ((1U<<5)-1)
271 /* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */
272 #define INFINIPATH_HWE_PCIEMEMPARITYERR_MASK 0x000000000000003fULL
273 #define INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT 0
274 #define INFINIPATH_HWE_PCIEPOISONEDTLP 0x0000000010000000ULL
275 #define INFINIPATH_HWE_PCIECPLTIMEOUT 0x0000000020000000ULL
276 #define INFINIPATH_HWE_PCIEBUSPARITYXTLH 0x0000000040000000ULL
277 #define INFINIPATH_HWE_PCIEBUSPARITYXADM 0x0000000080000000ULL
278 #define INFINIPATH_HWE_PCIEBUSPARITYRADM 0x0000000100000000ULL
279 #define INFINIPATH_HWE_COREPLL_FBSLIP 0x0080000000000000ULL
280 #define INFINIPATH_HWE_COREPLL_RFSLIP 0x0100000000000000ULL
281 #define INFINIPATH_HWE_PCIE1PLLFAILED 0x0400000000000000ULL
282 #define INFINIPATH_HWE_PCIE0PLLFAILED 0x0800000000000000ULL
283 #define INFINIPATH_HWE_SERDESPLLFAILED 0x1000000000000000ULL
285 /* kr_extstatus bits */
286 #define INFINIPATH_EXTS_FREQSEL 0x2
287 #define INFINIPATH_EXTS_SERDESSEL 0x4
288 #define INFINIPATH_EXTS_MEMBIST_ENDTEST 0x0000000000004000
289 #define INFINIPATH_EXTS_MEMBIST_FOUND 0x0000000000008000
291 #define _IPATH_GPIO_SDA_NUM 1
292 #define _IPATH_GPIO_SCL_NUM 0
294 #define IPATH_GPIO_SDA (1ULL << \
295 (_IPATH_GPIO_SDA_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT))
296 #define IPATH_GPIO_SCL (1ULL << \
297 (_IPATH_GPIO_SCL_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT))
300 * Rev2 silicon allows suppressing check for ArmLaunch errors.
301 * this can speed up short packet sends on systems that do
302 * not guaranteee write-order.
304 #define INFINIPATH_XGXS_SUPPRESS_ARMLAUNCH_ERR (1ULL<<63)
306 /* 6120 specific hardware errors... */
307 static const struct ipath_hwerror_msgs ipath_6120_hwerror_msgs
[] = {
308 INFINIPATH_HWE_MSG(PCIEPOISONEDTLP
, "PCIe Poisoned TLP"),
309 INFINIPATH_HWE_MSG(PCIECPLTIMEOUT
, "PCIe completion timeout"),
311 * In practice, it's unlikely wthat we'll see PCIe PLL, or bus
312 * parity or memory parity error failures, because most likely we
313 * won't be able to talk to the core of the chip. Nonetheless, we
314 * might see them, if they are in parts of the PCIe core that aren't
317 INFINIPATH_HWE_MSG(PCIE1PLLFAILED
, "PCIePLL1"),
318 INFINIPATH_HWE_MSG(PCIE0PLLFAILED
, "PCIePLL0"),
319 INFINIPATH_HWE_MSG(PCIEBUSPARITYXTLH
, "PCIe XTLH core parity"),
320 INFINIPATH_HWE_MSG(PCIEBUSPARITYXADM
, "PCIe ADM TX core parity"),
321 INFINIPATH_HWE_MSG(PCIEBUSPARITYRADM
, "PCIe ADM RX core parity"),
322 INFINIPATH_HWE_MSG(RXDSYNCMEMPARITYERR
, "Rx Dsync"),
323 INFINIPATH_HWE_MSG(SERDESPLLFAILED
, "SerDes PLL"),
326 #define TXE_PIO_PARITY ((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF | \
327 INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC) \
328 << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT)
330 static int ipath_pe_txe_recover(struct ipath_devdata
*);
333 * ipath_pe_handle_hwerrors - display hardware errors.
334 * @dd: the infinipath device
335 * @msg: the output buffer
336 * @msgl: the size of the output buffer
338 * Use same msg buffer as regular errors to avoid excessive stack
339 * use. Most hardware errors are catastrophic, but for right now,
340 * we'll print them and continue. We reuse the same message buffer as
341 * ipath_handle_errors() to avoid excessive stack usage.
343 static void ipath_pe_handle_hwerrors(struct ipath_devdata
*dd
, char *msg
,
351 hwerrs
= ipath_read_kreg64(dd
, dd
->ipath_kregs
->kr_hwerrstatus
);
354 * better than printing cofusing messages
355 * This seems to be related to clearing the crc error, or
356 * the pll error during init.
358 ipath_cdbg(VERBOSE
, "Called but no hardware errors set\n");
360 } else if (hwerrs
== ~0ULL) {
361 ipath_dev_err(dd
, "Read of hardware error status failed "
362 "(all bits set); ignoring\n");
365 ipath_stats
.sps_hwerrs
++;
367 /* Always clear the error status register, except MEMBISTFAIL,
368 * regardless of whether we continue or stop using the chip.
369 * We want that set so we know it failed, even across driver reload.
370 * We'll still ignore it in the hwerrmask. We do this partly for
371 * diagnostics, but also for support */
372 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_hwerrclear
,
373 hwerrs
&~INFINIPATH_HWE_MEMBISTFAILED
);
375 hwerrs
&= dd
->ipath_hwerrmask
;
378 * make sure we get this much out, unless told to be quiet,
379 * or it's occurred within the last 5 seconds
381 if ((hwerrs
& ~(dd
->ipath_lasthwerror
|
382 ((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF
|
383 INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC
)
384 << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT
))) ||
385 (ipath_debug
& __IPATH_VERBDBG
))
386 dev_info(&dd
->pcidev
->dev
, "Hardware error: hwerr=0x%llx "
387 "(cleared)\n", (unsigned long long) hwerrs
);
388 dd
->ipath_lasthwerror
|= hwerrs
;
390 if (hwerrs
& ~dd
->ipath_hwe_bitsextant
)
391 ipath_dev_err(dd
, "hwerror interrupt with unknown errors "
392 "%llx set\n", (unsigned long long)
393 (hwerrs
& ~dd
->ipath_hwe_bitsextant
));
395 ctrl
= ipath_read_kreg32(dd
, dd
->ipath_kregs
->kr_control
);
396 if (ctrl
& INFINIPATH_C_FREEZEMODE
) {
398 * parity errors in send memory are recoverable,
399 * just cancel the send (if indicated in * sendbuffererror),
400 * count the occurrence, unfreeze (if no other handled
401 * hardware error bits are set), and continue. They can
402 * occur if a processor speculative read is done to the PIO
403 * buffer while we are sending a packet, for example.
405 if ((hwerrs
& TXE_PIO_PARITY
) && ipath_pe_txe_recover(dd
))
406 hwerrs
&= ~TXE_PIO_PARITY
;
409 * if any set that we aren't ignoring only make the
410 * complaint once, in case it's stuck or recurring,
411 * and we get here multiple times
412 * Force link down, so switch knows, and
413 * LEDs are turned off
415 if (dd
->ipath_flags
& IPATH_INITTED
) {
416 ipath_set_linkstate(dd
, IPATH_IB_LINKDOWN
);
417 ipath_setup_pe_setextled(dd
,
418 INFINIPATH_IBCS_L_STATE_DOWN
,
419 INFINIPATH_IBCS_LT_STATE_DISABLED
);
420 ipath_dev_err(dd
, "Fatal Hardware Error (freeze "
421 "mode), no longer usable, SN %.16s\n",
426 * Mark as having had an error for driver, and also
427 * for /sys and status word mapped to user programs.
428 * This marks unit as not usable, until reset
430 *dd
->ipath_statusp
&= ~IPATH_STATUS_IB_READY
;
431 *dd
->ipath_statusp
|= IPATH_STATUS_HWERROR
;
432 dd
->ipath_flags
&= ~IPATH_INITTED
;
434 ipath_dbg("Clearing freezemode on ignored hardware "
436 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_control
,
443 if (hwerrs
& INFINIPATH_HWE_MEMBISTFAILED
) {
444 strlcat(msg
, "[Memory BIST test failed, InfiniPath hardware unusable]",
446 /* ignore from now on, so disable until driver reloaded */
447 *dd
->ipath_statusp
|= IPATH_STATUS_HWERROR
;
448 dd
->ipath_hwerrmask
&= ~INFINIPATH_HWE_MEMBISTFAILED
;
449 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_hwerrmask
,
450 dd
->ipath_hwerrmask
);
453 ipath_format_hwerrors(hwerrs
,
454 ipath_6120_hwerror_msgs
,
455 sizeof(ipath_6120_hwerror_msgs
)/
456 sizeof(ipath_6120_hwerror_msgs
[0]),
459 if (hwerrs
& (INFINIPATH_HWE_PCIEMEMPARITYERR_MASK
460 << INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT
)) {
461 bits
= (u32
) ((hwerrs
>>
462 INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT
) &
463 INFINIPATH_HWE_PCIEMEMPARITYERR_MASK
);
464 snprintf(bitsmsg
, sizeof bitsmsg
,
465 "[PCIe Mem Parity Errs %x] ", bits
);
466 strlcat(msg
, bitsmsg
, msgl
);
469 #define _IPATH_PLL_FAIL (INFINIPATH_HWE_COREPLL_FBSLIP | \
470 INFINIPATH_HWE_COREPLL_RFSLIP )
472 if (hwerrs
& _IPATH_PLL_FAIL
) {
473 snprintf(bitsmsg
, sizeof bitsmsg
,
474 "[PLL failed (%llx), InfiniPath hardware unusable]",
475 (unsigned long long) hwerrs
& _IPATH_PLL_FAIL
);
476 strlcat(msg
, bitsmsg
, msgl
);
477 /* ignore from now on, so disable until driver reloaded */
478 dd
->ipath_hwerrmask
&= ~(hwerrs
& _IPATH_PLL_FAIL
);
479 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_hwerrmask
,
480 dd
->ipath_hwerrmask
);
483 if (hwerrs
& INFINIPATH_HWE_SERDESPLLFAILED
) {
485 * If it occurs, it is left masked since the eternal
486 * interface is unused
488 dd
->ipath_hwerrmask
&= ~INFINIPATH_HWE_SERDESPLLFAILED
;
489 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_hwerrmask
,
490 dd
->ipath_hwerrmask
);
494 ipath_dev_err(dd
, "%s hardware error\n", msg
);
495 if (isfatal
&& !ipath_diag_inuse
&& dd
->ipath_freezemsg
) {
497 * for /sys status file ; if no trailing } is copied, we'll
498 * know it was truncated.
500 snprintf(dd
->ipath_freezemsg
, dd
->ipath_freezelen
,
506 * ipath_pe_boardname - fill in the board name
507 * @dd: the infinipath device
508 * @name: the output buffer
509 * @namelen: the size of the output buffer
511 * info is based on the board revision register
513 static int ipath_pe_boardname(struct ipath_devdata
*dd
, char *name
,
517 u8 boardrev
= dd
->ipath_boardrev
;
522 n
= "InfiniPath_Emulation";
525 n
= "InfiniPath_QLE7140-Bringup";
528 n
= "InfiniPath_QLE7140";
531 n
= "InfiniPath_QMI7140";
534 n
= "InfiniPath_QEM7140";
537 n
= "InfiniPath_QMH7140";
540 n
= "InfiniPath_QLE7142";
544 "Don't yet know about board with ID %u\n",
546 snprintf(name
, namelen
, "Unknown_InfiniPath_PCIe_%u",
551 snprintf(name
, namelen
, "%s", n
);
553 if (dd
->ipath_majrev
!= 4 || !dd
->ipath_minrev
|| dd
->ipath_minrev
>2) {
554 ipath_dev_err(dd
, "Unsupported InfiniPath hardware revision %u.%u!\n",
555 dd
->ipath_majrev
, dd
->ipath_minrev
);
564 * ipath_pe_init_hwerrors - enable hardware errors
565 * @dd: the infinipath device
567 * now that we have finished initializing everything that might reasonably
568 * cause a hardware error, and cleared those errors bits as they occur,
569 * we can enable hardware errors in the mask (potentially enabling
570 * freeze mode), and enable hardware errors as errors (along with
571 * everything else) in errormask
573 static void ipath_pe_init_hwerrors(struct ipath_devdata
*dd
)
578 extsval
= ipath_read_kreg64(dd
, dd
->ipath_kregs
->kr_extstatus
);
580 if (!(extsval
& INFINIPATH_EXTS_MEMBIST_ENDTEST
))
581 ipath_dev_err(dd
, "MemBIST did not complete!\n");
582 if (extsval
& INFINIPATH_EXTS_MEMBIST_FOUND
)
583 ipath_dbg("MemBIST corrected\n");
585 val
= ~0ULL; /* barring bugs, all hwerrors become interrupts, */
587 if (!dd
->ipath_boardrev
) // no PLL for Emulator
588 val
&= ~INFINIPATH_HWE_SERDESPLLFAILED
;
590 if (dd
->ipath_minrev
< 2) {
591 /* workaround bug 9460 in internal interface bus parity
592 * checking. Fixed (HW bug 9490) in Rev2.
594 val
&= ~INFINIPATH_HWE_PCIEBUSPARITYRADM
;
596 dd
->ipath_hwerrmask
= val
;
600 * ipath_pe_bringup_serdes - bring up the serdes
601 * @dd: the infinipath device
603 static int ipath_pe_bringup_serdes(struct ipath_devdata
*dd
)
605 u64 val
, config1
, prev_val
;
608 ipath_dbg("Trying to bringup serdes\n");
610 if (ipath_read_kreg64(dd
, dd
->ipath_kregs
->kr_hwerrstatus
) &
611 INFINIPATH_HWE_SERDESPLLFAILED
) {
612 ipath_dbg("At start, serdes PLL failed bit set "
613 "in hwerrstatus, clearing and continuing\n");
614 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_hwerrclear
,
615 INFINIPATH_HWE_SERDESPLLFAILED
);
618 val
= ipath_read_kreg64(dd
, dd
->ipath_kregs
->kr_serdesconfig0
);
619 config1
= ipath_read_kreg64(dd
, dd
->ipath_kregs
->kr_serdesconfig1
);
621 ipath_cdbg(VERBOSE
, "SerDes status config0=%llx config1=%llx, "
622 "xgxsconfig %llx\n", (unsigned long long) val
,
623 (unsigned long long) config1
, (unsigned long long)
624 ipath_read_kreg64(dd
, dd
->ipath_kregs
->kr_xgxsconfig
));
627 * Force reset on, also set rxdetect enable. Must do before reading
628 * serdesstatus at least for simulation, or some of the bits in
629 * serdes status will come back as undefined and cause simulation
632 val
|= INFINIPATH_SERDC0_RESET_PLL
| INFINIPATH_SERDC0_RXDETECT_EN
633 | INFINIPATH_SERDC0_L1PWR_DN
;
634 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_serdesconfig0
, val
);
635 /* be sure chip saw it */
636 ipath_read_kreg64(dd
, dd
->ipath_kregs
->kr_scratch
);
637 udelay(5); /* need pll reset set at least for a bit */
639 * after PLL is reset, set the per-lane Resets and TxIdle and
640 * clear the PLL reset and rxdetect (to get falling edge).
641 * Leave L1PWR bits set (permanently)
643 val
&= ~(INFINIPATH_SERDC0_RXDETECT_EN
| INFINIPATH_SERDC0_RESET_PLL
644 | INFINIPATH_SERDC0_L1PWR_DN
);
645 val
|= INFINIPATH_SERDC0_RESET_MASK
| INFINIPATH_SERDC0_TXIDLE
;
646 ipath_cdbg(VERBOSE
, "Clearing pll reset and setting lane resets "
647 "and txidle (%llx)\n", (unsigned long long) val
);
648 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_serdesconfig0
, val
);
649 /* be sure chip saw it */
650 ipath_read_kreg64(dd
, dd
->ipath_kregs
->kr_scratch
);
651 /* need PLL reset clear for at least 11 usec before lane
652 * resets cleared; give it a few more to be sure */
654 val
&= ~(INFINIPATH_SERDC0_RESET_MASK
| INFINIPATH_SERDC0_TXIDLE
);
656 ipath_cdbg(VERBOSE
, "Clearing lane resets and txidle "
657 "(writing %llx)\n", (unsigned long long) val
);
658 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_serdesconfig0
, val
);
659 /* be sure chip saw it */
660 val
= ipath_read_kreg64(dd
, dd
->ipath_kregs
->kr_scratch
);
662 val
= ipath_read_kreg64(dd
, dd
->ipath_kregs
->kr_xgxsconfig
);
664 if (((val
>> INFINIPATH_XGXS_MDIOADDR_SHIFT
) &
665 INFINIPATH_XGXS_MDIOADDR_MASK
) != 3) {
667 ~(INFINIPATH_XGXS_MDIOADDR_MASK
<<
668 INFINIPATH_XGXS_MDIOADDR_SHIFT
);
670 val
|= 3ULL << INFINIPATH_XGXS_MDIOADDR_SHIFT
;
672 if (val
& INFINIPATH_XGXS_RESET
) {
673 val
&= ~INFINIPATH_XGXS_RESET
;
675 if (((val
>> INFINIPATH_XGXS_RX_POL_SHIFT
) &
676 INFINIPATH_XGXS_RX_POL_MASK
) != dd
->ipath_rx_pol_inv
) {
677 /* need to compensate for Tx inversion in partner */
678 val
&= ~(INFINIPATH_XGXS_RX_POL_MASK
<<
679 INFINIPATH_XGXS_RX_POL_SHIFT
);
680 val
|= dd
->ipath_rx_pol_inv
<<
681 INFINIPATH_XGXS_RX_POL_SHIFT
;
683 if (dd
->ipath_minrev
>= 2) {
684 /* Rev 2. can tolerate multiple writes to PBC, and
685 * allowing them can provide lower latency on some
686 * CPUs, but this feature is off by default, only
687 * turned on by setting D63 of XGXSconfig reg.
688 * May want to make this conditional more
689 * fine-grained in future. This is not exactly
690 * related to XGXS, but where the bit ended up.
692 val
|= INFINIPATH_XGXS_SUPPRESS_ARMLAUNCH_ERR
;
695 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_xgxsconfig
, val
);
697 val
= ipath_read_kreg64(dd
, dd
->ipath_kregs
->kr_serdesconfig0
);
699 /* clear current and de-emphasis bits */
700 config1
&= ~0x0ffffffff00ULL
;
701 /* set current to 20ma */
702 config1
|= 0x00000000000ULL
;
703 /* set de-emphasis to -5.68dB */
704 config1
|= 0x0cccc000000ULL
;
705 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_serdesconfig1
, config1
);
707 ipath_cdbg(VERBOSE
, "done: SerDes status config0=%llx "
708 "config1=%llx, sstatus=%llx xgxs=%llx\n",
709 (unsigned long long) val
, (unsigned long long) config1
,
711 ipath_read_kreg64(dd
, dd
->ipath_kregs
->kr_serdesstatus
),
713 ipath_read_kreg64(dd
, dd
->ipath_kregs
->kr_xgxsconfig
));
715 if (!ipath_waitfor_mdio_cmdready(dd
)) {
717 dd
, dd
->ipath_kregs
->kr_mdio
,
718 ipath_mdio_req(IPATH_MDIO_CMD_READ
, 31,
719 IPATH_MDIO_CTRL_XGXS_REG_8
, 0));
720 if (ipath_waitfor_complete(dd
, dd
->ipath_kregs
->kr_mdio
,
721 IPATH_MDIO_DATAVALID
, &val
))
722 ipath_dbg("Never got MDIO data for XGXS "
725 ipath_cdbg(VERBOSE
, "MDIO Read reg8, "
726 "'bank' 31 %x\n", (u32
) val
);
728 ipath_dbg("Never got MDIO cmdready for XGXS status read\n");
734 * ipath_pe_quiet_serdes - set serdes to txidle
735 * @dd: the infinipath device
736 * Called when driver is being unloaded
738 static void ipath_pe_quiet_serdes(struct ipath_devdata
*dd
)
740 u64 val
= ipath_read_kreg64(dd
, dd
->ipath_kregs
->kr_serdesconfig0
);
742 val
|= INFINIPATH_SERDC0_TXIDLE
;
743 ipath_dbg("Setting TxIdleEn on serdes (config0 = %llx)\n",
744 (unsigned long long) val
);
745 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_serdesconfig0
, val
);
748 static int ipath_pe_intconfig(struct ipath_devdata
*dd
)
753 * If the chip supports added error indication via GPIO pins,
754 * enable interrupts on those bits so the interrupt routine
755 * can count the events. Also set flag so interrupt routine
756 * can know they are expected.
758 chiprev
= dd
->ipath_revision
>> INFINIPATH_R_CHIPREVMINOR_SHIFT
;
759 if ((chiprev
& INFINIPATH_R_CHIPREVMINOR_MASK
) > 1) {
760 /* Rev2+ reports extra errors via internal GPIO pins */
761 dd
->ipath_flags
|= IPATH_GPIO_ERRINTRS
;
762 dd
->ipath_gpio_mask
|= IPATH_GPIO_ERRINTR_MASK
;
763 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_gpio_mask
,
764 dd
->ipath_gpio_mask
);
770 * ipath_setup_pe_setextled - set the state of the two external LEDs
771 * @dd: the infinipath device
773 * @ltst: the LT state
775 * These LEDs indicate the physical and logical state of IB link.
776 * For this chip (at least with recommended board pinouts), LED1
777 * is Yellow (logical state) and LED2 is Green (physical state),
779 * Note: We try to match the Mellanox HCA LED behavior as best
780 * we can. Green indicates physical link state is OK (something is
781 * plugged in, and we can train).
782 * Amber indicates the link is logically up (ACTIVE).
783 * Mellanox further blinks the amber LED to indicate data packet
784 * activity, but we have no hardware support for that, so it would
785 * require waking up every 10-20 msecs and checking the counters
786 * on the chip, and then turning the LED off if appropriate. That's
787 * visible overhead, so not something we will do.
790 static void ipath_setup_pe_setextled(struct ipath_devdata
*dd
, u64 lst
,
794 unsigned long flags
= 0;
796 /* the diags use the LED to indicate diag info, so we leave
797 * the external LED alone when the diags are running */
798 if (ipath_diag_inuse
)
801 /* Allow override of LED display for, e.g. Locating system in rack */
802 if (dd
->ipath_led_override
) {
803 ltst
= (dd
->ipath_led_override
& IPATH_LED_PHYS
)
804 ? INFINIPATH_IBCS_LT_STATE_LINKUP
805 : INFINIPATH_IBCS_LT_STATE_DISABLED
;
806 lst
= (dd
->ipath_led_override
& IPATH_LED_LOG
)
807 ? INFINIPATH_IBCS_L_STATE_ACTIVE
808 : INFINIPATH_IBCS_L_STATE_DOWN
;
811 spin_lock_irqsave(&dd
->ipath_gpio_lock
, flags
);
812 extctl
= dd
->ipath_extctrl
& ~(INFINIPATH_EXTC_LED1PRIPORT_ON
|
813 INFINIPATH_EXTC_LED2PRIPORT_ON
);
815 if (ltst
& INFINIPATH_IBCS_LT_STATE_LINKUP
)
816 extctl
|= INFINIPATH_EXTC_LED2PRIPORT_ON
;
817 if (lst
== INFINIPATH_IBCS_L_STATE_ACTIVE
)
818 extctl
|= INFINIPATH_EXTC_LED1PRIPORT_ON
;
819 dd
->ipath_extctrl
= extctl
;
820 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_extctrl
, extctl
);
821 spin_unlock_irqrestore(&dd
->ipath_gpio_lock
, flags
);
825 * ipath_setup_pe_cleanup - clean up any per-chip chip-specific stuff
826 * @dd: the infinipath device
828 * This is called during driver unload.
829 * We do the pci_disable_msi here, not in generic code, because it
830 * isn't used for the HT chips. If we do end up needing pci_enable_msi
831 * at some point in the future for HT, we'll move the call back
832 * into the main init_one code.
834 static void ipath_setup_pe_cleanup(struct ipath_devdata
*dd
)
836 dd
->ipath_msi_lo
= 0; /* just in case unload fails */
837 pci_disable_msi(dd
->pcidev
);
841 * ipath_setup_pe_config - setup PCIe config related stuff
842 * @dd: the infinipath device
843 * @pdev: the PCI device
845 * The pci_enable_msi() call will fail on systems with MSI quirks
846 * such as those with AMD8131, even if the device of interest is not
847 * attached to that device, (in the 2.6.13 - 2.6.15 kernels, at least, fixed
849 * All that can be done is to edit the kernel source to remove the quirk
850 * check until that is fixed.
851 * We do not need to call enable_msi() for our HyperTransport chip,
852 * even though it uses MSI, and we want to avoid the quirk warning, so
853 * So we call enable_msi only for PCIe. If we do end up needing
854 * pci_enable_msi at some point in the future for HT, we'll move the
855 * call back into the main init_one code.
856 * We save the msi lo and hi values, so we can restore them after
857 * chip reset (the kernel PCI infrastructure doesn't yet handle that
860 static int ipath_setup_pe_config(struct ipath_devdata
*dd
,
861 struct pci_dev
*pdev
)
865 dd
->ipath_msi_lo
= 0; /* used as a flag during reset processing */
866 ret
= pci_enable_msi(dd
->pcidev
);
868 ipath_dev_err(dd
, "pci_enable_msi failed: %d, "
869 "interrupts may not work\n", ret
);
870 /* continue even if it fails, we may still be OK... */
871 dd
->ipath_irq
= pdev
->irq
;
873 if ((pos
= pci_find_capability(dd
->pcidev
, PCI_CAP_ID_MSI
))) {
875 pci_read_config_dword(dd
->pcidev
, pos
+ PCI_MSI_ADDRESS_LO
,
877 pci_read_config_dword(dd
->pcidev
, pos
+ PCI_MSI_ADDRESS_HI
,
879 pci_read_config_word(dd
->pcidev
, pos
+ PCI_MSI_FLAGS
,
881 /* now save the data (vector) info */
882 pci_read_config_word(dd
->pcidev
,
883 pos
+ ((control
& PCI_MSI_FLAGS_64BIT
)
885 &dd
->ipath_msi_data
);
886 ipath_cdbg(VERBOSE
, "Read msi data 0x%x from config offset "
887 "0x%x, control=0x%x\n", dd
->ipath_msi_data
,
888 pos
+ ((control
& PCI_MSI_FLAGS_64BIT
) ? 12 : 8),
890 /* we save the cachelinesize also, although it doesn't
892 pci_read_config_byte(dd
->pcidev
, PCI_CACHE_LINE_SIZE
,
893 &dd
->ipath_pci_cacheline
);
895 ipath_dev_err(dd
, "Can't find MSI capability, "
896 "can't save MSI settings for reset\n");
897 if ((pos
= pci_find_capability(dd
->pcidev
, PCI_CAP_ID_EXP
))) {
899 pci_read_config_word(dd
->pcidev
, pos
+ PCI_EXP_LNKSTA
,
904 ipath_dev_err(dd
, "PCIe width %u, "
905 "performance reduced\n", linkstat
);
908 ipath_dev_err(dd
, "Can't find PCI Express "
913 static void ipath_init_pe_variables(struct ipath_devdata
*dd
)
916 * bits for selecting i2c direction and values,
917 * used for I2C serial flash
919 dd
->ipath_gpio_sda_num
= _IPATH_GPIO_SDA_NUM
;
920 dd
->ipath_gpio_scl_num
= _IPATH_GPIO_SCL_NUM
;
921 dd
->ipath_gpio_sda
= IPATH_GPIO_SDA
;
922 dd
->ipath_gpio_scl
= IPATH_GPIO_SCL
;
924 /* variables for sanity checking interrupt and errors */
925 dd
->ipath_hwe_bitsextant
=
926 (INFINIPATH_HWE_RXEMEMPARITYERR_MASK
<<
927 INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT
) |
928 (INFINIPATH_HWE_TXEMEMPARITYERR_MASK
<<
929 INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT
) |
930 (INFINIPATH_HWE_PCIEMEMPARITYERR_MASK
<<
931 INFINIPATH_HWE_PCIEMEMPARITYERR_SHIFT
) |
932 INFINIPATH_HWE_PCIE1PLLFAILED
|
933 INFINIPATH_HWE_PCIE0PLLFAILED
|
934 INFINIPATH_HWE_PCIEPOISONEDTLP
|
935 INFINIPATH_HWE_PCIECPLTIMEOUT
|
936 INFINIPATH_HWE_PCIEBUSPARITYXTLH
|
937 INFINIPATH_HWE_PCIEBUSPARITYXADM
|
938 INFINIPATH_HWE_PCIEBUSPARITYRADM
|
939 INFINIPATH_HWE_MEMBISTFAILED
|
940 INFINIPATH_HWE_COREPLL_FBSLIP
|
941 INFINIPATH_HWE_COREPLL_RFSLIP
|
942 INFINIPATH_HWE_SERDESPLLFAILED
|
943 INFINIPATH_HWE_IBCBUSTOSPCPARITYERR
|
944 INFINIPATH_HWE_IBCBUSFRSPCPARITYERR
;
945 dd
->ipath_i_bitsextant
=
946 (INFINIPATH_I_RCVURG_MASK
<< INFINIPATH_I_RCVURG_SHIFT
) |
947 (INFINIPATH_I_RCVAVAIL_MASK
<<
948 INFINIPATH_I_RCVAVAIL_SHIFT
) |
949 INFINIPATH_I_ERROR
| INFINIPATH_I_SPIOSENT
|
950 INFINIPATH_I_SPIOBUFAVAIL
| INFINIPATH_I_GPIO
;
951 dd
->ipath_e_bitsextant
=
952 INFINIPATH_E_RFORMATERR
| INFINIPATH_E_RVCRC
|
953 INFINIPATH_E_RICRC
| INFINIPATH_E_RMINPKTLEN
|
954 INFINIPATH_E_RMAXPKTLEN
| INFINIPATH_E_RLONGPKTLEN
|
955 INFINIPATH_E_RSHORTPKTLEN
| INFINIPATH_E_RUNEXPCHAR
|
956 INFINIPATH_E_RUNSUPVL
| INFINIPATH_E_REBP
|
957 INFINIPATH_E_RIBFLOW
| INFINIPATH_E_RBADVERSION
|
958 INFINIPATH_E_RRCVEGRFULL
| INFINIPATH_E_RRCVHDRFULL
|
959 INFINIPATH_E_RBADTID
| INFINIPATH_E_RHDRLEN
|
960 INFINIPATH_E_RHDR
| INFINIPATH_E_RIBLOSTLINK
|
961 INFINIPATH_E_SMINPKTLEN
| INFINIPATH_E_SMAXPKTLEN
|
962 INFINIPATH_E_SUNDERRUN
| INFINIPATH_E_SPKTLEN
|
963 INFINIPATH_E_SDROPPEDSMPPKT
| INFINIPATH_E_SDROPPEDDATAPKT
|
964 INFINIPATH_E_SPIOARMLAUNCH
| INFINIPATH_E_SUNEXPERRPKTNUM
|
965 INFINIPATH_E_SUNSUPVL
| INFINIPATH_E_IBSTATUSCHANGED
|
966 INFINIPATH_E_INVALIDADDR
| INFINIPATH_E_RESET
|
967 INFINIPATH_E_HARDWARE
;
969 dd
->ipath_i_rcvavail_mask
= INFINIPATH_I_RCVAVAIL_MASK
;
970 dd
->ipath_i_rcvurg_mask
= INFINIPATH_I_RCVURG_MASK
;
973 /* setup the MSI stuff again after a reset. I'd like to just call
974 * pci_enable_msi() and request_irq() again, but when I do that,
975 * the MSI enable bit doesn't get set in the command word, and
976 * we switch to to a different interrupt vector, which is confusing,
977 * so I instead just do it all inline. Perhaps somehow can tie this
978 * into the PCIe hotplug support at some point
979 * Note, because I'm doing it all here, I don't call pci_disable_msi()
980 * or free_irq() at the start of ipath_setup_pe_reset().
982 static int ipath_reinit_msi(struct ipath_devdata
*dd
)
988 if (!dd
->ipath_msi_lo
) {
989 dev_info(&dd
->pcidev
->dev
, "Can't restore MSI config, "
990 "initial setup failed?\n");
995 if (!(pos
= pci_find_capability(dd
->pcidev
, PCI_CAP_ID_MSI
))) {
996 ipath_dev_err(dd
, "Can't find MSI capability, "
997 "can't restore MSI settings\n");
1001 ipath_cdbg(VERBOSE
, "Writing msi_lo 0x%x to config offset 0x%x\n",
1002 dd
->ipath_msi_lo
, pos
+ PCI_MSI_ADDRESS_LO
);
1003 pci_write_config_dword(dd
->pcidev
, pos
+ PCI_MSI_ADDRESS_LO
,
1005 ipath_cdbg(VERBOSE
, "Writing msi_lo 0x%x to config offset 0x%x\n",
1006 dd
->ipath_msi_hi
, pos
+ PCI_MSI_ADDRESS_HI
);
1007 pci_write_config_dword(dd
->pcidev
, pos
+ PCI_MSI_ADDRESS_HI
,
1009 pci_read_config_word(dd
->pcidev
, pos
+ PCI_MSI_FLAGS
, &control
);
1010 if (!(control
& PCI_MSI_FLAGS_ENABLE
)) {
1011 ipath_cdbg(VERBOSE
, "MSI control at off %x was %x, "
1012 "setting MSI enable (%x)\n", pos
+ PCI_MSI_FLAGS
,
1013 control
, control
| PCI_MSI_FLAGS_ENABLE
);
1014 control
|= PCI_MSI_FLAGS_ENABLE
;
1015 pci_write_config_word(dd
->pcidev
, pos
+ PCI_MSI_FLAGS
,
1018 /* now rewrite the data (vector) info */
1019 pci_write_config_word(dd
->pcidev
, pos
+
1020 ((control
& PCI_MSI_FLAGS_64BIT
) ? 12 : 8),
1021 dd
->ipath_msi_data
);
1022 /* we restore the cachelinesize also, although it doesn't really
1024 pci_write_config_byte(dd
->pcidev
, PCI_CACHE_LINE_SIZE
,
1025 dd
->ipath_pci_cacheline
);
1026 /* and now set the pci master bit again */
1027 pci_set_master(dd
->pcidev
);
1034 /* This routine sleeps, so it can only be called from user context, not
1035 * from interrupt context. If we need interrupt context, we can split
1036 * it into two routines.
1038 static int ipath_setup_pe_reset(struct ipath_devdata
*dd
)
1044 /* Use ERROR so it shows up in logs, etc. */
1045 ipath_dev_err(dd
, "Resetting InfiniPath unit %u\n", dd
->ipath_unit
);
1046 /* keep chip from being accessed in a few places */
1047 dd
->ipath_flags
&= ~(IPATH_INITTED
|IPATH_PRESENT
);
1048 val
= dd
->ipath_control
| INFINIPATH_C_RESET
;
1049 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_control
, val
);
1052 for (i
= 1; i
<= 5; i
++) {
1054 /* allow MBIST, etc. to complete; longer on each retry.
1055 * We sometimes get machine checks from bus timeout if no
1056 * response, so for now, make it *really* long.
1058 msleep(1000 + (1 + i
) * 2000);
1060 pci_write_config_dword(dd
->pcidev
, PCI_BASE_ADDRESS_0
,
1061 dd
->ipath_pcibar0
)))
1062 ipath_dev_err(dd
, "rewrite of BAR0 failed: %d\n",
1065 pci_write_config_dword(dd
->pcidev
, PCI_BASE_ADDRESS_1
,
1066 dd
->ipath_pcibar1
)))
1067 ipath_dev_err(dd
, "rewrite of BAR1 failed: %d\n",
1069 /* now re-enable memory access */
1070 if ((r
= pci_enable_device(dd
->pcidev
)))
1071 ipath_dev_err(dd
, "pci_enable_device failed after "
1073 /* whether it worked or not, mark as present, again */
1074 dd
->ipath_flags
|= IPATH_PRESENT
;
1075 val
= ipath_read_kreg64(dd
, dd
->ipath_kregs
->kr_revision
);
1076 if (val
== dd
->ipath_revision
) {
1077 ipath_cdbg(VERBOSE
, "Got matching revision "
1078 "register %llx on try %d\n",
1079 (unsigned long long) val
, i
);
1080 ret
= ipath_reinit_msi(dd
);
1083 /* Probably getting -1 back */
1084 ipath_dbg("Didn't get expected revision register, "
1085 "got %llx, try %d\n", (unsigned long long) val
,
1088 ret
= 0; /* failed */
1095 * ipath_pe_put_tid - write a TID in chip
1096 * @dd: the infinipath device
1097 * @tidptr: pointer to the expected TID (in chip) to udpate
1098 * @tidtype: 0 for eager, 1 for expected
1099 * @pa: physical address of in memory buffer; ipath_tidinvalid if freeing
1101 * This exists as a separate routine to allow for special locking etc.
1102 * It's used for both the full cleanup on exit, as well as the normal
1103 * setup and teardown.
1105 static void ipath_pe_put_tid(struct ipath_devdata
*dd
, u64 __iomem
*tidptr
,
1106 u32 type
, unsigned long pa
)
1108 u32 __iomem
*tidp32
= (u32 __iomem
*)tidptr
;
1109 unsigned long flags
= 0; /* keep gcc quiet */
1111 if (pa
!= dd
->ipath_tidinvalid
) {
1112 if (pa
& ((1U << 11) - 1)) {
1113 dev_info(&dd
->pcidev
->dev
, "BUG: physaddr %lx "
1114 "not 4KB aligned!\n", pa
);
1118 /* paranoia check */
1121 "BUG: Physical page address 0x%lx "
1122 "has bits set in 31-29\n", pa
);
1125 pa
|= dd
->ipath_tidtemplate
;
1126 else /* for now, always full 4KB page */
1130 /* workaround chip bug 9437 by writing each TID twice
1131 * and holding a spinlock around the writes, so they don't
1132 * intermix with other TID (eager or expected) writes
1133 * Unfortunately, this call can be done from interrupt level
1134 * for the port 0 eager TIDs, so we have to use irqsave
1136 spin_lock_irqsave(&dd
->ipath_tid_lock
, flags
);
1137 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_scratch
, 0xfeeddeaf);
1138 if (dd
->ipath_kregbase
)
1140 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_scratch
, 0xdeadbeef);
1142 spin_unlock_irqrestore(&dd
->ipath_tid_lock
, flags
);
1145 * ipath_pe_put_tid_2 - write a TID in chip, Revision 2 or higher
1146 * @dd: the infinipath device
1147 * @tidptr: pointer to the expected TID (in chip) to udpate
1148 * @tidtype: 0 for eager, 1 for expected
1149 * @pa: physical address of in memory buffer; ipath_tidinvalid if freeing
1151 * This exists as a separate routine to allow for selection of the
1152 * appropriate "flavor". The static calls in cleanup just use the
1153 * revision-agnostic form, as they are not performance critical.
1155 static void ipath_pe_put_tid_2(struct ipath_devdata
*dd
, u64 __iomem
*tidptr
,
1156 u32 type
, unsigned long pa
)
1158 u32 __iomem
*tidp32
= (u32 __iomem
*)tidptr
;
1160 if (pa
!= dd
->ipath_tidinvalid
) {
1161 if (pa
& ((1U << 11) - 1)) {
1162 dev_info(&dd
->pcidev
->dev
, "BUG: physaddr %lx "
1163 "not 2KB aligned!\n", pa
);
1167 /* paranoia check */
1170 "BUG: Physical page address 0x%lx "
1171 "has bits set in 31-29\n", pa
);
1174 pa
|= dd
->ipath_tidtemplate
;
1175 else /* for now, always full 4KB page */
1178 if (dd
->ipath_kregbase
)
1185 * ipath_pe_clear_tid - clear all TID entries for a port, expected and eager
1186 * @dd: the infinipath device
1189 * clear all TID entries for a port, expected and eager.
1190 * Used from ipath_close(). On this chip, TIDs are only 32 bits,
1191 * not 64, but they are still on 64 bit boundaries, so tidbase
1192 * is declared as u64 * for the pointer math, even though we write 32 bits
1194 static void ipath_pe_clear_tids(struct ipath_devdata
*dd
, unsigned port
)
1196 u64 __iomem
*tidbase
;
1197 unsigned long tidinv
;
1200 if (!dd
->ipath_kregbase
)
1203 ipath_cdbg(VERBOSE
, "Invalidate TIDs for port %u\n", port
);
1205 tidinv
= dd
->ipath_tidinvalid
;
1206 tidbase
= (u64 __iomem
*)
1207 ((char __iomem
*)(dd
->ipath_kregbase
) +
1208 dd
->ipath_rcvtidbase
+
1209 port
* dd
->ipath_rcvtidcnt
* sizeof(*tidbase
));
1211 for (i
= 0; i
< dd
->ipath_rcvtidcnt
; i
++)
1212 ipath_pe_put_tid(dd
, &tidbase
[i
], 0, tidinv
);
1214 tidbase
= (u64 __iomem
*)
1215 ((char __iomem
*)(dd
->ipath_kregbase
) +
1216 dd
->ipath_rcvegrbase
+
1217 port
* dd
->ipath_rcvegrcnt
* sizeof(*tidbase
));
1219 for (i
= 0; i
< dd
->ipath_rcvegrcnt
; i
++)
1220 ipath_pe_put_tid(dd
, &tidbase
[i
], 1, tidinv
);
1224 * ipath_pe_tidtemplate - setup constants for TID updates
1225 * @dd: the infinipath device
1227 * We setup stuff that we use a lot, to avoid calculating each time
1229 static void ipath_pe_tidtemplate(struct ipath_devdata
*dd
)
1231 u32 egrsize
= dd
->ipath_rcvegrbufsize
;
1233 /* For now, we always allocate 4KB buffers (at init) so we can
1234 * receive max size packets. We may want a module parameter to
1235 * specify 2KB or 4KB and/or make be per port instead of per device
1236 * for those who want to reduce memory footprint. Note that the
1237 * ipath_rcvhdrentsize size must be large enough to hold the largest
1238 * IB header (currently 96 bytes) that we expect to handle (plus of
1239 * course the 2 dwords of RHF).
1241 if (egrsize
== 2048)
1242 dd
->ipath_tidtemplate
= 1U << 29;
1243 else if (egrsize
== 4096)
1244 dd
->ipath_tidtemplate
= 2U << 29;
1247 dev_info(&dd
->pcidev
->dev
, "BUG: unsupported egrbufsize "
1248 "%u, using %u\n", dd
->ipath_rcvegrbufsize
,
1250 dd
->ipath_tidtemplate
= 2U << 29;
1252 dd
->ipath_tidinvalid
= 0;
1255 static int ipath_pe_early_init(struct ipath_devdata
*dd
)
1257 dd
->ipath_flags
|= IPATH_4BYTE_TID
;
1260 * For openfabrics, we need to be able to handle an IB header of
1261 * 24 dwords. HT chip has arbitrary sized receive buffers, so we
1262 * made them the same size as the PIO buffers. This chip does not
1263 * handle arbitrary size buffers, so we need the header large enough
1264 * to handle largest IB header, but still have room for a 2KB MTU
1265 * standard IB packet.
1267 dd
->ipath_rcvhdrentsize
= 24;
1268 dd
->ipath_rcvhdrsize
= IPATH_DFLT_RCVHDRSIZE
;
1271 * To truly support a 4KB MTU (for usermode), we need to
1272 * bump this to a larger value. For now, we use them for
1275 dd
->ipath_rcvegrbufsize
= 2048;
1277 * the min() check here is currently a nop, but it may not always
1278 * be, depending on just how we do ipath_rcvegrbufsize
1280 dd
->ipath_ibmaxlen
= min(dd
->ipath_piosize2k
,
1281 dd
->ipath_rcvegrbufsize
+
1282 (dd
->ipath_rcvhdrentsize
<< 2));
1283 dd
->ipath_init_ibmaxlen
= dd
->ipath_ibmaxlen
;
1286 * We can request a receive interrupt for 1 or
1287 * more packets from current offset. For now, we set this
1288 * up for a single packet.
1290 dd
->ipath_rhdrhead_intr_off
= 1ULL<<32;
1292 ipath_get_eeprom_info(dd
);
1297 int __attribute__((weak
)) ipath_unordered_wc(void)
1303 * ipath_init_pe_get_base_info - set chip-specific flags for user code
1304 * @pd: the infinipath port
1305 * @kbase: ipath_base_info pointer
1307 * We set the PCIE flag because the lower bandwidth on PCIe vs
1308 * HyperTransport can affect some user packet algorithms.
1310 static int ipath_pe_get_base_info(struct ipath_portdata
*pd
, void *kbase
)
1312 struct ipath_base_info
*kinfo
= kbase
;
1313 struct ipath_devdata
*dd
;
1315 if (ipath_unordered_wc()) {
1316 kinfo
->spi_runtime_flags
|= IPATH_RUNTIME_FORCE_WC_ORDER
;
1317 ipath_cdbg(PROC
, "Intel processor, forcing WC order\n");
1320 ipath_cdbg(PROC
, "Not Intel processor, WC ordered\n");
1327 if (dd
!= NULL
&& dd
->ipath_minrev
>= 2) {
1328 ipath_cdbg(PROC
, "IBA6120 Rev2, allow multiple PBC write\n");
1329 kinfo
->spi_runtime_flags
|= IPATH_RUNTIME_PBC_REWRITE
;
1330 ipath_cdbg(PROC
, "IBA6120 Rev2, allow loose DMA alignment\n");
1331 kinfo
->spi_runtime_flags
|= IPATH_RUNTIME_LOOSE_DMA_ALIGN
;
1335 kinfo
->spi_runtime_flags
|= IPATH_RUNTIME_PCIE
;
1339 static void ipath_pe_free_irq(struct ipath_devdata
*dd
)
1341 free_irq(dd
->ipath_irq
, dd
);
1346 * On platforms using this chip, and not having ordered WC stores, we
1347 * can get TXE parity errors due to speculative reads to the PIO buffers,
1348 * and this, due to a chip bug can result in (many) false parity error
1349 * reports. So it's a debug print on those, and an info print on systems
1350 * where the speculative reads don't occur.
1351 * Because we can get lots of false errors, we have no upper limit
1352 * on recovery attempts on those platforms.
1354 static int ipath_pe_txe_recover(struct ipath_devdata
*dd
)
1356 if (ipath_unordered_wc())
1357 ipath_dbg("Recovering from TXE PIO parity error\n");
1359 int cnt
= ++ipath_stats
.sps_txeparity
;
1360 if (cnt
>= IPATH_MAX_PARITY_ATTEMPTS
) {
1361 if (cnt
== IPATH_MAX_PARITY_ATTEMPTS
)
1363 "Too many attempts to recover from "
1364 "TXE parity, giving up\n");
1367 dev_info(&dd
->pcidev
->dev
,
1368 "Recovering from TXE PIO parity error\n");
1370 ipath_disarm_senderrbufs(dd
, 1);
1375 * ipath_init_iba6120_funcs - set up the chip-specific function pointers
1376 * @dd: the infinipath device
1378 * This is global, and is called directly at init to set up the
1379 * chip-specific function pointers for later use.
1381 void ipath_init_iba6120_funcs(struct ipath_devdata
*dd
)
1383 dd
->ipath_f_intrsetup
= ipath_pe_intconfig
;
1384 dd
->ipath_f_bus
= ipath_setup_pe_config
;
1385 dd
->ipath_f_reset
= ipath_setup_pe_reset
;
1386 dd
->ipath_f_get_boardname
= ipath_pe_boardname
;
1387 dd
->ipath_f_init_hwerrors
= ipath_pe_init_hwerrors
;
1388 dd
->ipath_f_early_init
= ipath_pe_early_init
;
1389 dd
->ipath_f_handle_hwerrors
= ipath_pe_handle_hwerrors
;
1390 dd
->ipath_f_quiet_serdes
= ipath_pe_quiet_serdes
;
1391 dd
->ipath_f_bringup_serdes
= ipath_pe_bringup_serdes
;
1392 dd
->ipath_f_clear_tids
= ipath_pe_clear_tids
;
1393 if (dd
->ipath_minrev
>= 2)
1394 dd
->ipath_f_put_tid
= ipath_pe_put_tid_2
;
1396 dd
->ipath_f_put_tid
= ipath_pe_put_tid
;
1397 dd
->ipath_f_cleanup
= ipath_setup_pe_cleanup
;
1398 dd
->ipath_f_setextled
= ipath_setup_pe_setextled
;
1399 dd
->ipath_f_get_base_info
= ipath_pe_get_base_info
;
1400 dd
->ipath_f_free_irq
= ipath_pe_free_irq
;
1402 /* initialize chip-specific variables */
1403 dd
->ipath_f_tidtemplate
= ipath_pe_tidtemplate
;
1406 * setup the register offsets, since they are different for each
1409 dd
->ipath_kregs
= &ipath_pe_kregs
;
1410 dd
->ipath_cregs
= &ipath_pe_cregs
;
1412 ipath_init_pe_variables(dd
);