Merge branch 'modsplit-Oct31_2011' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / infiniband / hw / qib / qib_driver.c
blobc90a55f4120fe6b23972a4762b6c0d9ee0e27872
1 /*
2 * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
34 #include <linux/spinlock.h>
35 #include <linux/pci.h>
36 #include <linux/io.h>
37 #include <linux/delay.h>
38 #include <linux/netdevice.h>
39 #include <linux/vmalloc.h>
40 #include <linux/module.h>
42 #include "qib.h"
45 * The size has to be longer than this string, so we can append
46 * board/chip information to it in the init code.
48 const char ib_qib_version[] = QIB_IDSTR "\n";
50 DEFINE_SPINLOCK(qib_devs_lock);
51 LIST_HEAD(qib_dev_list);
52 DEFINE_MUTEX(qib_mutex); /* general driver use */
54 unsigned qib_ibmtu;
55 module_param_named(ibmtu, qib_ibmtu, uint, S_IRUGO);
56 MODULE_PARM_DESC(ibmtu, "Set max IB MTU (0=2KB, 1=256, 2=512, ... 5=4096");
58 unsigned qib_compat_ddr_negotiate = 1;
59 module_param_named(compat_ddr_negotiate, qib_compat_ddr_negotiate, uint,
60 S_IWUSR | S_IRUGO);
61 MODULE_PARM_DESC(compat_ddr_negotiate,
62 "Attempt pre-IBTA 1.2 DDR speed negotiation");
64 MODULE_LICENSE("Dual BSD/GPL");
65 MODULE_AUTHOR("QLogic <support@qlogic.com>");
66 MODULE_DESCRIPTION("QLogic IB driver");
69 * QIB_PIO_MAXIBHDR is the max IB header size allowed for in our
70 * PIO send buffers. This is well beyond anything currently
71 * defined in the InfiniBand spec.
73 #define QIB_PIO_MAXIBHDR 128
76 * QIB_MAX_PKT_RCV is the max # if packets processed per receive interrupt.
78 #define QIB_MAX_PKT_RECV 64
80 struct qlogic_ib_stats qib_stats;
82 const char *qib_get_unit_name(int unit)
84 static char iname[16];
86 snprintf(iname, sizeof iname, "infinipath%u", unit);
87 return iname;
91 * Return count of units with at least one port ACTIVE.
93 int qib_count_active_units(void)
95 struct qib_devdata *dd;
96 struct qib_pportdata *ppd;
97 unsigned long flags;
98 int pidx, nunits_active = 0;
100 spin_lock_irqsave(&qib_devs_lock, flags);
101 list_for_each_entry(dd, &qib_dev_list, list) {
102 if (!(dd->flags & QIB_PRESENT) || !dd->kregbase)
103 continue;
104 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
105 ppd = dd->pport + pidx;
106 if (ppd->lid && (ppd->lflags & (QIBL_LINKINIT |
107 QIBL_LINKARMED | QIBL_LINKACTIVE))) {
108 nunits_active++;
109 break;
113 spin_unlock_irqrestore(&qib_devs_lock, flags);
114 return nunits_active;
118 * Return count of all units, optionally return in arguments
119 * the number of usable (present) units, and the number of
120 * ports that are up.
122 int qib_count_units(int *npresentp, int *nupp)
124 int nunits = 0, npresent = 0, nup = 0;
125 struct qib_devdata *dd;
126 unsigned long flags;
127 int pidx;
128 struct qib_pportdata *ppd;
130 spin_lock_irqsave(&qib_devs_lock, flags);
132 list_for_each_entry(dd, &qib_dev_list, list) {
133 nunits++;
134 if ((dd->flags & QIB_PRESENT) && dd->kregbase)
135 npresent++;
136 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
137 ppd = dd->pport + pidx;
138 if (ppd->lid && (ppd->lflags & (QIBL_LINKINIT |
139 QIBL_LINKARMED | QIBL_LINKACTIVE)))
140 nup++;
144 spin_unlock_irqrestore(&qib_devs_lock, flags);
146 if (npresentp)
147 *npresentp = npresent;
148 if (nupp)
149 *nupp = nup;
151 return nunits;
155 * qib_wait_linkstate - wait for an IB link state change to occur
156 * @dd: the qlogic_ib device
157 * @state: the state to wait for
158 * @msecs: the number of milliseconds to wait
160 * wait up to msecs milliseconds for IB link state change to occur for
161 * now, take the easy polling route. Currently used only by
162 * qib_set_linkstate. Returns 0 if state reached, otherwise
163 * -ETIMEDOUT state can have multiple states set, for any of several
164 * transitions.
166 int qib_wait_linkstate(struct qib_pportdata *ppd, u32 state, int msecs)
168 int ret;
169 unsigned long flags;
171 spin_lock_irqsave(&ppd->lflags_lock, flags);
172 if (ppd->state_wanted) {
173 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
174 ret = -EBUSY;
175 goto bail;
177 ppd->state_wanted = state;
178 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
179 wait_event_interruptible_timeout(ppd->state_wait,
180 (ppd->lflags & state),
181 msecs_to_jiffies(msecs));
182 spin_lock_irqsave(&ppd->lflags_lock, flags);
183 ppd->state_wanted = 0;
184 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
186 if (!(ppd->lflags & state))
187 ret = -ETIMEDOUT;
188 else
189 ret = 0;
190 bail:
191 return ret;
194 int qib_set_linkstate(struct qib_pportdata *ppd, u8 newstate)
196 u32 lstate;
197 int ret;
198 struct qib_devdata *dd = ppd->dd;
199 unsigned long flags;
201 switch (newstate) {
202 case QIB_IB_LINKDOWN_ONLY:
203 dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
204 IB_LINKCMD_DOWN | IB_LINKINITCMD_NOP);
205 /* don't wait */
206 ret = 0;
207 goto bail;
209 case QIB_IB_LINKDOWN:
210 dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
211 IB_LINKCMD_DOWN | IB_LINKINITCMD_POLL);
212 /* don't wait */
213 ret = 0;
214 goto bail;
216 case QIB_IB_LINKDOWN_SLEEP:
217 dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
218 IB_LINKCMD_DOWN | IB_LINKINITCMD_SLEEP);
219 /* don't wait */
220 ret = 0;
221 goto bail;
223 case QIB_IB_LINKDOWN_DISABLE:
224 dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
225 IB_LINKCMD_DOWN | IB_LINKINITCMD_DISABLE);
226 /* don't wait */
227 ret = 0;
228 goto bail;
230 case QIB_IB_LINKARM:
231 if (ppd->lflags & QIBL_LINKARMED) {
232 ret = 0;
233 goto bail;
235 if (!(ppd->lflags & (QIBL_LINKINIT | QIBL_LINKACTIVE))) {
236 ret = -EINVAL;
237 goto bail;
240 * Since the port can be ACTIVE when we ask for ARMED,
241 * clear QIBL_LINKV so we can wait for a transition.
242 * If the link isn't ARMED, then something else happened
243 * and there is no point waiting for ARMED.
245 spin_lock_irqsave(&ppd->lflags_lock, flags);
246 ppd->lflags &= ~QIBL_LINKV;
247 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
248 dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
249 IB_LINKCMD_ARMED | IB_LINKINITCMD_NOP);
250 lstate = QIBL_LINKV;
251 break;
253 case QIB_IB_LINKACTIVE:
254 if (ppd->lflags & QIBL_LINKACTIVE) {
255 ret = 0;
256 goto bail;
258 if (!(ppd->lflags & QIBL_LINKARMED)) {
259 ret = -EINVAL;
260 goto bail;
262 dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
263 IB_LINKCMD_ACTIVE | IB_LINKINITCMD_NOP);
264 lstate = QIBL_LINKACTIVE;
265 break;
267 default:
268 ret = -EINVAL;
269 goto bail;
271 ret = qib_wait_linkstate(ppd, lstate, 10);
273 bail:
274 return ret;
278 * Get address of eager buffer from it's index (allocated in chunks, not
279 * contiguous).
281 static inline void *qib_get_egrbuf(const struct qib_ctxtdata *rcd, u32 etail)
283 const u32 chunk = etail >> rcd->rcvegrbufs_perchunk_shift;
284 const u32 idx = etail & ((u32)rcd->rcvegrbufs_perchunk - 1);
286 return rcd->rcvegrbuf[chunk] + (idx << rcd->dd->rcvegrbufsize_shift);
290 * Returns 1 if error was a CRC, else 0.
291 * Needed for some chip's synthesized error counters.
293 static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd,
294 u32 ctxt, u32 eflags, u32 l, u32 etail,
295 __le32 *rhf_addr, struct qib_message_header *rhdr)
297 u32 ret = 0;
299 if (eflags & (QLOGIC_IB_RHF_H_ICRCERR | QLOGIC_IB_RHF_H_VCRCERR))
300 ret = 1;
301 else if (eflags == QLOGIC_IB_RHF_H_TIDERR) {
302 /* For TIDERR and RC QPs premptively schedule a NAK */
303 struct qib_ib_header *hdr = (struct qib_ib_header *) rhdr;
304 struct qib_other_headers *ohdr = NULL;
305 struct qib_ibport *ibp = &ppd->ibport_data;
306 struct qib_qp *qp = NULL;
307 u32 tlen = qib_hdrget_length_in_bytes(rhf_addr);
308 u16 lid = be16_to_cpu(hdr->lrh[1]);
309 int lnh = be16_to_cpu(hdr->lrh[0]) & 3;
310 u32 qp_num;
311 u32 opcode;
312 u32 psn;
313 int diff;
315 /* Sanity check packet */
316 if (tlen < 24)
317 goto drop;
319 if (lid < QIB_MULTICAST_LID_BASE) {
320 lid &= ~((1 << ppd->lmc) - 1);
321 if (unlikely(lid != ppd->lid))
322 goto drop;
325 /* Check for GRH */
326 if (lnh == QIB_LRH_BTH)
327 ohdr = &hdr->u.oth;
328 else if (lnh == QIB_LRH_GRH) {
329 u32 vtf;
331 ohdr = &hdr->u.l.oth;
332 if (hdr->u.l.grh.next_hdr != IB_GRH_NEXT_HDR)
333 goto drop;
334 vtf = be32_to_cpu(hdr->u.l.grh.version_tclass_flow);
335 if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION)
336 goto drop;
337 } else
338 goto drop;
340 /* Get opcode and PSN from packet */
341 opcode = be32_to_cpu(ohdr->bth[0]);
342 opcode >>= 24;
343 psn = be32_to_cpu(ohdr->bth[2]);
345 /* Get the destination QP number. */
346 qp_num = be32_to_cpu(ohdr->bth[1]) & QIB_QPN_MASK;
347 if (qp_num != QIB_MULTICAST_QPN) {
348 int ruc_res;
349 qp = qib_lookup_qpn(ibp, qp_num);
350 if (!qp)
351 goto drop;
354 * Handle only RC QPs - for other QP types drop error
355 * packet.
357 spin_lock(&qp->r_lock);
359 /* Check for valid receive state. */
360 if (!(ib_qib_state_ops[qp->state] &
361 QIB_PROCESS_RECV_OK)) {
362 ibp->n_pkt_drops++;
363 goto unlock;
366 switch (qp->ibqp.qp_type) {
367 case IB_QPT_RC:
368 ruc_res =
369 qib_ruc_check_hdr(
370 ibp, hdr,
371 lnh == QIB_LRH_GRH,
373 be32_to_cpu(ohdr->bth[0]));
374 if (ruc_res) {
375 goto unlock;
378 /* Only deal with RDMA Writes for now */
379 if (opcode <
380 IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST) {
381 diff = qib_cmp24(psn, qp->r_psn);
382 if (!qp->r_nak_state && diff >= 0) {
383 ibp->n_rc_seqnak++;
384 qp->r_nak_state =
385 IB_NAK_PSN_ERROR;
386 /* Use the expected PSN. */
387 qp->r_ack_psn = qp->r_psn;
389 * Wait to send the sequence
390 * NAK until all packets
391 * in the receive queue have
392 * been processed.
393 * Otherwise, we end up
394 * propagating congestion.
396 if (list_empty(&qp->rspwait)) {
397 qp->r_flags |=
398 QIB_R_RSP_NAK;
399 atomic_inc(
400 &qp->refcount);
401 list_add_tail(
402 &qp->rspwait,
403 &rcd->qp_wait_list);
405 } /* Out of sequence NAK */
406 } /* QP Request NAKs */
407 break;
408 case IB_QPT_SMI:
409 case IB_QPT_GSI:
410 case IB_QPT_UD:
411 case IB_QPT_UC:
412 default:
413 /* For now don't handle any other QP types */
414 break;
417 unlock:
418 spin_unlock(&qp->r_lock);
420 * Notify qib_destroy_qp() if it is waiting
421 * for us to finish.
423 if (atomic_dec_and_test(&qp->refcount))
424 wake_up(&qp->wait);
425 } /* Unicast QP */
426 } /* Valid packet with TIDErr */
428 drop:
429 return ret;
433 * qib_kreceive - receive a packet
434 * @rcd: the qlogic_ib context
435 * @llic: gets count of good packets needed to clear lli,
436 * (used with chips that need need to track crcs for lli)
438 * called from interrupt handler for errors or receive interrupt
439 * Returns number of CRC error packets, needed by some chips for
440 * local link integrity tracking. crcs are adjusted down by following
441 * good packets, if any, and count of good packets is also tracked.
443 u32 qib_kreceive(struct qib_ctxtdata *rcd, u32 *llic, u32 *npkts)
445 struct qib_devdata *dd = rcd->dd;
446 struct qib_pportdata *ppd = rcd->ppd;
447 __le32 *rhf_addr;
448 void *ebuf;
449 const u32 rsize = dd->rcvhdrentsize; /* words */
450 const u32 maxcnt = dd->rcvhdrcnt * rsize; /* words */
451 u32 etail = -1, l, hdrqtail;
452 struct qib_message_header *hdr;
453 u32 eflags, etype, tlen, i = 0, updegr = 0, crcs = 0;
454 int last;
455 u64 lval;
456 struct qib_qp *qp, *nqp;
458 l = rcd->head;
459 rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset;
460 if (dd->flags & QIB_NODMA_RTAIL) {
461 u32 seq = qib_hdrget_seq(rhf_addr);
462 if (seq != rcd->seq_cnt)
463 goto bail;
464 hdrqtail = 0;
465 } else {
466 hdrqtail = qib_get_rcvhdrtail(rcd);
467 if (l == hdrqtail)
468 goto bail;
469 smp_rmb(); /* prevent speculative reads of dma'ed hdrq */
472 for (last = 0, i = 1; !last; i += !last) {
473 hdr = dd->f_get_msgheader(dd, rhf_addr);
474 eflags = qib_hdrget_err_flags(rhf_addr);
475 etype = qib_hdrget_rcv_type(rhf_addr);
476 /* total length */
477 tlen = qib_hdrget_length_in_bytes(rhf_addr);
478 ebuf = NULL;
479 if ((dd->flags & QIB_NODMA_RTAIL) ?
480 qib_hdrget_use_egr_buf(rhf_addr) :
481 (etype != RCVHQ_RCV_TYPE_EXPECTED)) {
482 etail = qib_hdrget_index(rhf_addr);
483 updegr = 1;
484 if (tlen > sizeof(*hdr) ||
485 etype >= RCVHQ_RCV_TYPE_NON_KD)
486 ebuf = qib_get_egrbuf(rcd, etail);
488 if (!eflags) {
489 u16 lrh_len = be16_to_cpu(hdr->lrh[2]) << 2;
491 if (lrh_len != tlen) {
492 qib_stats.sps_lenerrs++;
493 goto move_along;
496 if (etype == RCVHQ_RCV_TYPE_NON_KD && !eflags &&
497 ebuf == NULL &&
498 tlen > (dd->rcvhdrentsize - 2 + 1 -
499 qib_hdrget_offset(rhf_addr)) << 2) {
500 goto move_along;
504 * Both tiderr and qibhdrerr are set for all plain IB
505 * packets; only qibhdrerr should be set.
507 if (unlikely(eflags))
508 crcs += qib_rcv_hdrerr(rcd, ppd, rcd->ctxt, eflags, l,
509 etail, rhf_addr, hdr);
510 else if (etype == RCVHQ_RCV_TYPE_NON_KD) {
511 qib_ib_rcv(rcd, hdr, ebuf, tlen);
512 if (crcs)
513 crcs--;
514 else if (llic && *llic)
515 --*llic;
517 move_along:
518 l += rsize;
519 if (l >= maxcnt)
520 l = 0;
521 if (i == QIB_MAX_PKT_RECV)
522 last = 1;
524 rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset;
525 if (dd->flags & QIB_NODMA_RTAIL) {
526 u32 seq = qib_hdrget_seq(rhf_addr);
528 if (++rcd->seq_cnt > 13)
529 rcd->seq_cnt = 1;
530 if (seq != rcd->seq_cnt)
531 last = 1;
532 } else if (l == hdrqtail)
533 last = 1;
535 * Update head regs etc., every 16 packets, if not last pkt,
536 * to help prevent rcvhdrq overflows, when many packets
537 * are processed and queue is nearly full.
538 * Don't request an interrupt for intermediate updates.
540 lval = l;
541 if (!last && !(i & 0xf)) {
542 dd->f_update_usrhead(rcd, lval, updegr, etail, i);
543 updegr = 0;
547 * Notify qib_destroy_qp() if it is waiting
548 * for lookaside_qp to finish.
550 if (rcd->lookaside_qp) {
551 if (atomic_dec_and_test(&rcd->lookaside_qp->refcount))
552 wake_up(&rcd->lookaside_qp->wait);
553 rcd->lookaside_qp = NULL;
556 rcd->head = l;
557 rcd->pkt_count += i;
560 * Iterate over all QPs waiting to respond.
561 * The list won't change since the IRQ is only run on one CPU.
563 list_for_each_entry_safe(qp, nqp, &rcd->qp_wait_list, rspwait) {
564 list_del_init(&qp->rspwait);
565 if (qp->r_flags & QIB_R_RSP_NAK) {
566 qp->r_flags &= ~QIB_R_RSP_NAK;
567 qib_send_rc_ack(qp);
569 if (qp->r_flags & QIB_R_RSP_SEND) {
570 unsigned long flags;
572 qp->r_flags &= ~QIB_R_RSP_SEND;
573 spin_lock_irqsave(&qp->s_lock, flags);
574 if (ib_qib_state_ops[qp->state] &
575 QIB_PROCESS_OR_FLUSH_SEND)
576 qib_schedule_send(qp);
577 spin_unlock_irqrestore(&qp->s_lock, flags);
579 if (atomic_dec_and_test(&qp->refcount))
580 wake_up(&qp->wait);
583 bail:
584 /* Report number of packets consumed */
585 if (npkts)
586 *npkts = i;
589 * Always write head at end, and setup rcv interrupt, even
590 * if no packets were processed.
592 lval = (u64)rcd->head | dd->rhdrhead_intr_off;
593 dd->f_update_usrhead(rcd, lval, updegr, etail, i);
594 return crcs;
598 * qib_set_mtu - set the MTU
599 * @ppd: the perport data
600 * @arg: the new MTU
602 * We can handle "any" incoming size, the issue here is whether we
603 * need to restrict our outgoing size. For now, we don't do any
604 * sanity checking on this, and we don't deal with what happens to
605 * programs that are already running when the size changes.
606 * NOTE: changing the MTU will usually cause the IBC to go back to
607 * link INIT state...
609 int qib_set_mtu(struct qib_pportdata *ppd, u16 arg)
611 u32 piosize;
612 int ret, chk;
614 if (arg != 256 && arg != 512 && arg != 1024 && arg != 2048 &&
615 arg != 4096) {
616 ret = -EINVAL;
617 goto bail;
619 chk = ib_mtu_enum_to_int(qib_ibmtu);
620 if (chk > 0 && arg > chk) {
621 ret = -EINVAL;
622 goto bail;
625 piosize = ppd->ibmaxlen;
626 ppd->ibmtu = arg;
628 if (arg >= (piosize - QIB_PIO_MAXIBHDR)) {
629 /* Only if it's not the initial value (or reset to it) */
630 if (piosize != ppd->init_ibmaxlen) {
631 if (arg > piosize && arg <= ppd->init_ibmaxlen)
632 piosize = ppd->init_ibmaxlen - 2 * sizeof(u32);
633 ppd->ibmaxlen = piosize;
635 } else if ((arg + QIB_PIO_MAXIBHDR) != ppd->ibmaxlen) {
636 piosize = arg + QIB_PIO_MAXIBHDR - 2 * sizeof(u32);
637 ppd->ibmaxlen = piosize;
640 ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_MTU, 0);
642 ret = 0;
644 bail:
645 return ret;
648 int qib_set_lid(struct qib_pportdata *ppd, u32 lid, u8 lmc)
650 struct qib_devdata *dd = ppd->dd;
651 ppd->lid = lid;
652 ppd->lmc = lmc;
654 dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LIDLMC,
655 lid | (~((1U << lmc) - 1)) << 16);
657 qib_devinfo(dd->pcidev, "IB%u:%u got a lid: 0x%x\n",
658 dd->unit, ppd->port, lid);
660 return 0;
664 * Following deal with the "obviously simple" task of overriding the state
665 * of the LEDS, which normally indicate link physical and logical status.
666 * The complications arise in dealing with different hardware mappings
667 * and the board-dependent routine being called from interrupts.
668 * and then there's the requirement to _flash_ them.
670 #define LED_OVER_FREQ_SHIFT 8
671 #define LED_OVER_FREQ_MASK (0xFF<<LED_OVER_FREQ_SHIFT)
672 /* Below is "non-zero" to force override, but both actual LEDs are off */
673 #define LED_OVER_BOTH_OFF (8)
675 static void qib_run_led_override(unsigned long opaque)
677 struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
678 struct qib_devdata *dd = ppd->dd;
679 int timeoff;
680 int ph_idx;
682 if (!(dd->flags & QIB_INITTED))
683 return;
685 ph_idx = ppd->led_override_phase++ & 1;
686 ppd->led_override = ppd->led_override_vals[ph_idx];
687 timeoff = ppd->led_override_timeoff;
689 dd->f_setextled(ppd, 1);
691 * don't re-fire the timer if user asked for it to be off; we let
692 * it fire one more time after they turn it off to simplify
694 if (ppd->led_override_vals[0] || ppd->led_override_vals[1])
695 mod_timer(&ppd->led_override_timer, jiffies + timeoff);
698 void qib_set_led_override(struct qib_pportdata *ppd, unsigned int val)
700 struct qib_devdata *dd = ppd->dd;
701 int timeoff, freq;
703 if (!(dd->flags & QIB_INITTED))
704 return;
706 /* First check if we are blinking. If not, use 1HZ polling */
707 timeoff = HZ;
708 freq = (val & LED_OVER_FREQ_MASK) >> LED_OVER_FREQ_SHIFT;
710 if (freq) {
711 /* For blink, set each phase from one nybble of val */
712 ppd->led_override_vals[0] = val & 0xF;
713 ppd->led_override_vals[1] = (val >> 4) & 0xF;
714 timeoff = (HZ << 4)/freq;
715 } else {
716 /* Non-blink set both phases the same. */
717 ppd->led_override_vals[0] = val & 0xF;
718 ppd->led_override_vals[1] = val & 0xF;
720 ppd->led_override_timeoff = timeoff;
723 * If the timer has not already been started, do so. Use a "quick"
724 * timeout so the function will be called soon, to look at our request.
726 if (atomic_inc_return(&ppd->led_override_timer_active) == 1) {
727 /* Need to start timer */
728 init_timer(&ppd->led_override_timer);
729 ppd->led_override_timer.function = qib_run_led_override;
730 ppd->led_override_timer.data = (unsigned long) ppd;
731 ppd->led_override_timer.expires = jiffies + 1;
732 add_timer(&ppd->led_override_timer);
733 } else {
734 if (ppd->led_override_vals[0] || ppd->led_override_vals[1])
735 mod_timer(&ppd->led_override_timer, jiffies + 1);
736 atomic_dec(&ppd->led_override_timer_active);
741 * qib_reset_device - reset the chip if possible
742 * @unit: the device to reset
744 * Whether or not reset is successful, we attempt to re-initialize the chip
745 * (that is, much like a driver unload/reload). We clear the INITTED flag
746 * so that the various entry points will fail until we reinitialize. For
747 * now, we only allow this if no user contexts are open that use chip resources
749 int qib_reset_device(int unit)
751 int ret, i;
752 struct qib_devdata *dd = qib_lookup(unit);
753 struct qib_pportdata *ppd;
754 unsigned long flags;
755 int pidx;
757 if (!dd) {
758 ret = -ENODEV;
759 goto bail;
762 qib_devinfo(dd->pcidev, "Reset on unit %u requested\n", unit);
764 if (!dd->kregbase || !(dd->flags & QIB_PRESENT)) {
765 qib_devinfo(dd->pcidev, "Invalid unit number %u or "
766 "not initialized or not present\n", unit);
767 ret = -ENXIO;
768 goto bail;
771 spin_lock_irqsave(&dd->uctxt_lock, flags);
772 if (dd->rcd)
773 for (i = dd->first_user_ctxt; i < dd->cfgctxts; i++) {
774 if (!dd->rcd[i] || !dd->rcd[i]->cnt)
775 continue;
776 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
777 ret = -EBUSY;
778 goto bail;
780 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
782 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
783 ppd = dd->pport + pidx;
784 if (atomic_read(&ppd->led_override_timer_active)) {
785 /* Need to stop LED timer, _then_ shut off LEDs */
786 del_timer_sync(&ppd->led_override_timer);
787 atomic_set(&ppd->led_override_timer_active, 0);
790 /* Shut off LEDs after we are sure timer is not running */
791 ppd->led_override = LED_OVER_BOTH_OFF;
792 dd->f_setextled(ppd, 0);
793 if (dd->flags & QIB_HAS_SEND_DMA)
794 qib_teardown_sdma(ppd);
797 ret = dd->f_reset(dd);
798 if (ret == 1)
799 ret = qib_init(dd, 1);
800 else
801 ret = -EAGAIN;
802 if (ret)
803 qib_dev_err(dd, "Reinitialize unit %u after "
804 "reset failed with %d\n", unit, ret);
805 else
806 qib_devinfo(dd->pcidev, "Reinitialized unit %u after "
807 "resetting\n", unit);
809 bail:
810 return ret;