2 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <rdma/ib_mad.h>
35 #include <rdma/ib_user_verbs.h>
37 #include <linux/utsname.h>
38 #include <linux/rculist.h>
40 #include "ipath_kernel.h"
41 #include "ipath_verbs.h"
42 #include "ipath_common.h"
44 static unsigned int ib_ipath_qp_table_size
= 251;
45 module_param_named(qp_table_size
, ib_ipath_qp_table_size
, uint
, S_IRUGO
);
46 MODULE_PARM_DESC(qp_table_size
, "QP table size");
48 unsigned int ib_ipath_lkey_table_size
= 12;
49 module_param_named(lkey_table_size
, ib_ipath_lkey_table_size
, uint
,
51 MODULE_PARM_DESC(lkey_table_size
,
52 "LKEY table size in bits (2^n, 1 <= n <= 23)");
54 static unsigned int ib_ipath_max_pds
= 0xFFFF;
55 module_param_named(max_pds
, ib_ipath_max_pds
, uint
, S_IWUSR
| S_IRUGO
);
56 MODULE_PARM_DESC(max_pds
,
57 "Maximum number of protection domains to support");
59 static unsigned int ib_ipath_max_ahs
= 0xFFFF;
60 module_param_named(max_ahs
, ib_ipath_max_ahs
, uint
, S_IWUSR
| S_IRUGO
);
61 MODULE_PARM_DESC(max_ahs
, "Maximum number of address handles to support");
63 unsigned int ib_ipath_max_cqes
= 0x2FFFF;
64 module_param_named(max_cqes
, ib_ipath_max_cqes
, uint
, S_IWUSR
| S_IRUGO
);
65 MODULE_PARM_DESC(max_cqes
,
66 "Maximum number of completion queue entries to support");
68 unsigned int ib_ipath_max_cqs
= 0x1FFFF;
69 module_param_named(max_cqs
, ib_ipath_max_cqs
, uint
, S_IWUSR
| S_IRUGO
);
70 MODULE_PARM_DESC(max_cqs
, "Maximum number of completion queues to support");
72 unsigned int ib_ipath_max_qp_wrs
= 0x3FFF;
73 module_param_named(max_qp_wrs
, ib_ipath_max_qp_wrs
, uint
,
75 MODULE_PARM_DESC(max_qp_wrs
, "Maximum number of QP WRs to support");
77 unsigned int ib_ipath_max_qps
= 16384;
78 module_param_named(max_qps
, ib_ipath_max_qps
, uint
, S_IWUSR
| S_IRUGO
);
79 MODULE_PARM_DESC(max_qps
, "Maximum number of QPs to support");
81 unsigned int ib_ipath_max_sges
= 0x60;
82 module_param_named(max_sges
, ib_ipath_max_sges
, uint
, S_IWUSR
| S_IRUGO
);
83 MODULE_PARM_DESC(max_sges
, "Maximum number of SGEs to support");
85 unsigned int ib_ipath_max_mcast_grps
= 16384;
86 module_param_named(max_mcast_grps
, ib_ipath_max_mcast_grps
, uint
,
88 MODULE_PARM_DESC(max_mcast_grps
,
89 "Maximum number of multicast groups to support");
91 unsigned int ib_ipath_max_mcast_qp_attached
= 16;
92 module_param_named(max_mcast_qp_attached
, ib_ipath_max_mcast_qp_attached
,
93 uint
, S_IWUSR
| S_IRUGO
);
94 MODULE_PARM_DESC(max_mcast_qp_attached
,
95 "Maximum number of attached QPs to support");
97 unsigned int ib_ipath_max_srqs
= 1024;
98 module_param_named(max_srqs
, ib_ipath_max_srqs
, uint
, S_IWUSR
| S_IRUGO
);
99 MODULE_PARM_DESC(max_srqs
, "Maximum number of SRQs to support");
101 unsigned int ib_ipath_max_srq_sges
= 128;
102 module_param_named(max_srq_sges
, ib_ipath_max_srq_sges
,
103 uint
, S_IWUSR
| S_IRUGO
);
104 MODULE_PARM_DESC(max_srq_sges
, "Maximum number of SRQ SGEs to support");
106 unsigned int ib_ipath_max_srq_wrs
= 0x1FFFF;
107 module_param_named(max_srq_wrs
, ib_ipath_max_srq_wrs
,
108 uint
, S_IWUSR
| S_IRUGO
);
109 MODULE_PARM_DESC(max_srq_wrs
, "Maximum number of SRQ WRs support");
111 static unsigned int ib_ipath_disable_sma
;
112 module_param_named(disable_sma
, ib_ipath_disable_sma
, uint
, S_IWUSR
| S_IRUGO
);
113 MODULE_PARM_DESC(disable_sma
, "Disable the SMA");
116 * Note that it is OK to post send work requests in the SQE and ERR
117 * states; ipath_do_send() will process them and generate error
118 * completions as per IB 1.2 C10-96.
120 const int ib_ipath_state_ops
[IB_QPS_ERR
+ 1] = {
122 [IB_QPS_INIT
] = IPATH_POST_RECV_OK
,
123 [IB_QPS_RTR
] = IPATH_POST_RECV_OK
| IPATH_PROCESS_RECV_OK
,
124 [IB_QPS_RTS
] = IPATH_POST_RECV_OK
| IPATH_PROCESS_RECV_OK
|
125 IPATH_POST_SEND_OK
| IPATH_PROCESS_SEND_OK
|
126 IPATH_PROCESS_NEXT_SEND_OK
,
127 [IB_QPS_SQD
] = IPATH_POST_RECV_OK
| IPATH_PROCESS_RECV_OK
|
128 IPATH_POST_SEND_OK
| IPATH_PROCESS_SEND_OK
,
129 [IB_QPS_SQE
] = IPATH_POST_RECV_OK
| IPATH_PROCESS_RECV_OK
|
130 IPATH_POST_SEND_OK
| IPATH_FLUSH_SEND
,
131 [IB_QPS_ERR
] = IPATH_POST_RECV_OK
| IPATH_FLUSH_RECV
|
132 IPATH_POST_SEND_OK
| IPATH_FLUSH_SEND
,
135 struct ipath_ucontext
{
136 struct ib_ucontext ibucontext
;
139 static inline struct ipath_ucontext
*to_iucontext(struct ib_ucontext
142 return container_of(ibucontext
, struct ipath_ucontext
, ibucontext
);
146 * Translate ib_wr_opcode into ib_wc_opcode.
148 const enum ib_wc_opcode ib_ipath_wc_opcode
[] = {
149 [IB_WR_RDMA_WRITE
] = IB_WC_RDMA_WRITE
,
150 [IB_WR_RDMA_WRITE_WITH_IMM
] = IB_WC_RDMA_WRITE
,
151 [IB_WR_SEND
] = IB_WC_SEND
,
152 [IB_WR_SEND_WITH_IMM
] = IB_WC_SEND
,
153 [IB_WR_RDMA_READ
] = IB_WC_RDMA_READ
,
154 [IB_WR_ATOMIC_CMP_AND_SWP
] = IB_WC_COMP_SWAP
,
155 [IB_WR_ATOMIC_FETCH_AND_ADD
] = IB_WC_FETCH_ADD
161 static __be64 sys_image_guid
;
164 * ipath_copy_sge - copy data to SGE memory
166 * @data: the data to copy
167 * @length: the length of the data
169 void ipath_copy_sge(struct ipath_sge_state
*ss
, void *data
, u32 length
)
171 struct ipath_sge
*sge
= &ss
->sge
;
174 u32 len
= sge
->length
;
178 if (len
> sge
->sge_length
)
179 len
= sge
->sge_length
;
181 memcpy(sge
->vaddr
, data
, len
);
184 sge
->sge_length
-= len
;
185 if (sge
->sge_length
== 0) {
187 *sge
= *ss
->sg_list
++;
188 } else if (sge
->length
== 0 && sge
->mr
!= NULL
) {
189 if (++sge
->n
>= IPATH_SEGSZ
) {
190 if (++sge
->m
>= sge
->mr
->mapsz
)
195 sge
->mr
->map
[sge
->m
]->segs
[sge
->n
].vaddr
;
197 sge
->mr
->map
[sge
->m
]->segs
[sge
->n
].length
;
205 * ipath_skip_sge - skip over SGE memory - XXX almost dup of prev func
207 * @length: the number of bytes to skip
209 void ipath_skip_sge(struct ipath_sge_state
*ss
, u32 length
)
211 struct ipath_sge
*sge
= &ss
->sge
;
214 u32 len
= sge
->length
;
218 if (len
> sge
->sge_length
)
219 len
= sge
->sge_length
;
223 sge
->sge_length
-= len
;
224 if (sge
->sge_length
== 0) {
226 *sge
= *ss
->sg_list
++;
227 } else if (sge
->length
== 0 && sge
->mr
!= NULL
) {
228 if (++sge
->n
>= IPATH_SEGSZ
) {
229 if (++sge
->m
>= sge
->mr
->mapsz
)
234 sge
->mr
->map
[sge
->m
]->segs
[sge
->n
].vaddr
;
236 sge
->mr
->map
[sge
->m
]->segs
[sge
->n
].length
;
243 * Count the number of DMA descriptors needed to send length bytes of data.
244 * Don't modify the ipath_sge_state to get the count.
245 * Return zero if any of the segments is not aligned.
247 static u32
ipath_count_sge(struct ipath_sge_state
*ss
, u32 length
)
249 struct ipath_sge
*sg_list
= ss
->sg_list
;
250 struct ipath_sge sge
= ss
->sge
;
251 u8 num_sge
= ss
->num_sge
;
252 u32 ndesc
= 1; /* count the header */
255 u32 len
= sge
.length
;
259 if (len
> sge
.sge_length
)
260 len
= sge
.sge_length
;
262 if (((long) sge
.vaddr
& (sizeof(u32
) - 1)) ||
263 (len
!= length
&& (len
& (sizeof(u32
) - 1)))) {
270 sge
.sge_length
-= len
;
271 if (sge
.sge_length
== 0) {
274 } else if (sge
.length
== 0 && sge
.mr
!= NULL
) {
275 if (++sge
.n
>= IPATH_SEGSZ
) {
276 if (++sge
.m
>= sge
.mr
->mapsz
)
281 sge
.mr
->map
[sge
.m
]->segs
[sge
.n
].vaddr
;
283 sge
.mr
->map
[sge
.m
]->segs
[sge
.n
].length
;
291 * Copy from the SGEs to the data buffer.
293 static void ipath_copy_from_sge(void *data
, struct ipath_sge_state
*ss
,
296 struct ipath_sge
*sge
= &ss
->sge
;
299 u32 len
= sge
->length
;
303 if (len
> sge
->sge_length
)
304 len
= sge
->sge_length
;
306 memcpy(data
, sge
->vaddr
, len
);
309 sge
->sge_length
-= len
;
310 if (sge
->sge_length
== 0) {
312 *sge
= *ss
->sg_list
++;
313 } else if (sge
->length
== 0 && sge
->mr
!= NULL
) {
314 if (++sge
->n
>= IPATH_SEGSZ
) {
315 if (++sge
->m
>= sge
->mr
->mapsz
)
320 sge
->mr
->map
[sge
->m
]->segs
[sge
->n
].vaddr
;
322 sge
->mr
->map
[sge
->m
]->segs
[sge
->n
].length
;
330 * ipath_post_one_send - post one RC, UC, or UD send work request
331 * @qp: the QP to post on
332 * @wr: the work request to send
334 static int ipath_post_one_send(struct ipath_qp
*qp
, struct ib_send_wr
*wr
)
336 struct ipath_swqe
*wqe
;
344 spin_lock_irqsave(&qp
->s_lock
, flags
);
346 /* Check that state is OK to post send. */
347 if (unlikely(!(ib_ipath_state_ops
[qp
->state
] & IPATH_POST_SEND_OK
)))
350 /* IB spec says that num_sge == 0 is OK. */
351 if (wr
->num_sge
> qp
->s_max_sge
)
355 * Don't allow RDMA reads or atomic operations on UC or
356 * undefined operations.
357 * Make sure buffer is large enough to hold the result for atomics.
359 if (qp
->ibqp
.qp_type
== IB_QPT_UC
) {
360 if ((unsigned) wr
->opcode
>= IB_WR_RDMA_READ
)
362 } else if (qp
->ibqp
.qp_type
== IB_QPT_UD
) {
363 /* Check UD opcode */
364 if (wr
->opcode
!= IB_WR_SEND
&&
365 wr
->opcode
!= IB_WR_SEND_WITH_IMM
)
367 /* Check UD destination address PD */
368 if (qp
->ibqp
.pd
!= wr
->wr
.ud
.ah
->pd
)
370 } else if ((unsigned) wr
->opcode
> IB_WR_ATOMIC_FETCH_AND_ADD
)
372 else if (wr
->opcode
>= IB_WR_ATOMIC_CMP_AND_SWP
&&
374 wr
->sg_list
[0].length
< sizeof(u64
) ||
375 wr
->sg_list
[0].addr
& (sizeof(u64
) - 1)))
377 else if (wr
->opcode
>= IB_WR_RDMA_READ
&& !qp
->s_max_rd_atomic
)
380 next
= qp
->s_head
+ 1;
381 if (next
>= qp
->s_size
)
383 if (next
== qp
->s_last
) {
388 wqe
= get_swqe_ptr(qp
, qp
->s_head
);
392 acc
= wr
->opcode
>= IB_WR_RDMA_READ
?
393 IB_ACCESS_LOCAL_WRITE
: 0;
394 for (i
= 0, j
= 0; i
< wr
->num_sge
; i
++) {
395 u32 length
= wr
->sg_list
[i
].length
;
400 ok
= ipath_lkey_ok(qp
, &wqe
->sg_list
[j
],
401 &wr
->sg_list
[i
], acc
);
404 wqe
->length
+= length
;
409 if (qp
->ibqp
.qp_type
== IB_QPT_UC
||
410 qp
->ibqp
.qp_type
== IB_QPT_RC
) {
411 if (wqe
->length
> 0x80000000U
)
413 } else if (wqe
->length
> to_idev(qp
->ibqp
.device
)->dd
->ipath_ibmtu
)
415 wqe
->ssn
= qp
->s_ssn
++;
424 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
429 * ipath_post_send - post a send on a QP
430 * @ibqp: the QP to post the send on
431 * @wr: the list of work requests to post
432 * @bad_wr: the first bad WR is put here
434 * This may be called from interrupt context.
436 static int ipath_post_send(struct ib_qp
*ibqp
, struct ib_send_wr
*wr
,
437 struct ib_send_wr
**bad_wr
)
439 struct ipath_qp
*qp
= to_iqp(ibqp
);
442 for (; wr
; wr
= wr
->next
) {
443 err
= ipath_post_one_send(qp
, wr
);
450 /* Try to do the send work in the caller's context. */
451 ipath_do_send((unsigned long) qp
);
458 * ipath_post_receive - post a receive on a QP
459 * @ibqp: the QP to post the receive on
460 * @wr: the WR to post
461 * @bad_wr: the first bad WR is put here
463 * This may be called from interrupt context.
465 static int ipath_post_receive(struct ib_qp
*ibqp
, struct ib_recv_wr
*wr
,
466 struct ib_recv_wr
**bad_wr
)
468 struct ipath_qp
*qp
= to_iqp(ibqp
);
469 struct ipath_rwq
*wq
= qp
->r_rq
.wq
;
473 /* Check that state is OK to post receive. */
474 if (!(ib_ipath_state_ops
[qp
->state
] & IPATH_POST_RECV_OK
) || !wq
) {
480 for (; wr
; wr
= wr
->next
) {
481 struct ipath_rwqe
*wqe
;
485 if ((unsigned) wr
->num_sge
> qp
->r_rq
.max_sge
) {
491 spin_lock_irqsave(&qp
->r_rq
.lock
, flags
);
493 if (next
>= qp
->r_rq
.size
)
495 if (next
== wq
->tail
) {
496 spin_unlock_irqrestore(&qp
->r_rq
.lock
, flags
);
502 wqe
= get_rwqe_ptr(&qp
->r_rq
, wq
->head
);
503 wqe
->wr_id
= wr
->wr_id
;
504 wqe
->num_sge
= wr
->num_sge
;
505 for (i
= 0; i
< wr
->num_sge
; i
++)
506 wqe
->sg_list
[i
] = wr
->sg_list
[i
];
507 /* Make sure queue entry is written before the head index. */
510 spin_unlock_irqrestore(&qp
->r_rq
.lock
, flags
);
519 * ipath_qp_rcv - processing an incoming packet on a QP
520 * @dev: the device the packet came on
521 * @hdr: the packet header
522 * @has_grh: true if the packet has a GRH
523 * @data: the packet data
524 * @tlen: the packet length
525 * @qp: the QP the packet came on
527 * This is called from ipath_ib_rcv() to process an incoming packet
529 * Called at interrupt level.
531 static void ipath_qp_rcv(struct ipath_ibdev
*dev
,
532 struct ipath_ib_header
*hdr
, int has_grh
,
533 void *data
, u32 tlen
, struct ipath_qp
*qp
)
535 /* Check for valid receive state. */
536 if (!(ib_ipath_state_ops
[qp
->state
] & IPATH_PROCESS_RECV_OK
)) {
541 switch (qp
->ibqp
.qp_type
) {
544 if (ib_ipath_disable_sma
)
548 ipath_ud_rcv(dev
, hdr
, has_grh
, data
, tlen
, qp
);
552 ipath_rc_rcv(dev
, hdr
, has_grh
, data
, tlen
, qp
);
556 ipath_uc_rcv(dev
, hdr
, has_grh
, data
, tlen
, qp
);
565 * ipath_ib_rcv - process an incoming packet
566 * @arg: the device pointer
567 * @rhdr: the header of the packet
568 * @data: the packet data
569 * @tlen: the packet length
571 * This is called from ipath_kreceive() to process an incoming packet at
572 * interrupt level. Tlen is the length of the header + data + CRC in bytes.
574 void ipath_ib_rcv(struct ipath_ibdev
*dev
, void *rhdr
, void *data
,
577 struct ipath_ib_header
*hdr
= rhdr
;
578 struct ipath_other_headers
*ohdr
;
585 if (unlikely(dev
== NULL
))
588 if (unlikely(tlen
< 24)) { /* LRH+BTH+CRC */
593 /* Check for a valid destination LID (see ch. 7.11.1). */
594 lid
= be16_to_cpu(hdr
->lrh
[1]);
595 if (lid
< IPATH_MULTICAST_LID_BASE
) {
596 lid
&= ~((1 << dev
->dd
->ipath_lmc
) - 1);
597 if (unlikely(lid
!= dev
->dd
->ipath_lid
)) {
604 lnh
= be16_to_cpu(hdr
->lrh
[0]) & 3;
605 if (lnh
== IPATH_LRH_BTH
)
607 else if (lnh
== IPATH_LRH_GRH
)
608 ohdr
= &hdr
->u
.l
.oth
;
614 opcode
= be32_to_cpu(ohdr
->bth
[0]) >> 24;
615 dev
->opstats
[opcode
].n_bytes
+= tlen
;
616 dev
->opstats
[opcode
].n_packets
++;
618 /* Get the destination QP number. */
619 qp_num
= be32_to_cpu(ohdr
->bth
[1]) & IPATH_QPN_MASK
;
620 if (qp_num
== IPATH_MULTICAST_QPN
) {
621 struct ipath_mcast
*mcast
;
622 struct ipath_mcast_qp
*p
;
624 if (lnh
!= IPATH_LRH_GRH
) {
628 mcast
= ipath_mcast_find(&hdr
->u
.l
.grh
.dgid
);
633 dev
->n_multicast_rcv
++;
634 list_for_each_entry_rcu(p
, &mcast
->qp_list
, list
)
635 ipath_qp_rcv(dev
, hdr
, 1, data
, tlen
, p
->qp
);
637 * Notify ipath_multicast_detach() if it is waiting for us
640 if (atomic_dec_return(&mcast
->refcount
) <= 1)
641 wake_up(&mcast
->wait
);
643 qp
= ipath_lookup_qpn(&dev
->qp_table
, qp_num
);
645 dev
->n_unicast_rcv
++;
646 ipath_qp_rcv(dev
, hdr
, lnh
== IPATH_LRH_GRH
, data
,
649 * Notify ipath_destroy_qp() if it is waiting
652 if (atomic_dec_and_test(&qp
->refcount
))
662 * ipath_ib_timer - verbs timer
663 * @arg: the device pointer
665 * This is called from ipath_do_rcv_timer() at interrupt level to check for
666 * QPs which need retransmits and to collect performance numbers.
668 static void ipath_ib_timer(struct ipath_ibdev
*dev
)
670 struct ipath_qp
*resend
= NULL
;
671 struct ipath_qp
*rnr
= NULL
;
672 struct list_head
*last
;
679 spin_lock_irqsave(&dev
->pending_lock
, flags
);
680 /* Start filling the next pending queue. */
681 if (++dev
->pending_index
>= ARRAY_SIZE(dev
->pending
))
682 dev
->pending_index
= 0;
683 /* Save any requests still in the new queue, they have timed out. */
684 last
= &dev
->pending
[dev
->pending_index
];
685 while (!list_empty(last
)) {
686 qp
= list_entry(last
->next
, struct ipath_qp
, timerwait
);
687 list_del_init(&qp
->timerwait
);
688 qp
->timer_next
= resend
;
690 atomic_inc(&qp
->refcount
);
692 last
= &dev
->rnrwait
;
693 if (!list_empty(last
)) {
694 qp
= list_entry(last
->next
, struct ipath_qp
, timerwait
);
695 if (--qp
->s_rnr_timeout
== 0) {
697 list_del_init(&qp
->timerwait
);
698 qp
->timer_next
= rnr
;
700 atomic_inc(&qp
->refcount
);
701 if (list_empty(last
))
703 qp
= list_entry(last
->next
, struct ipath_qp
,
705 } while (qp
->s_rnr_timeout
== 0);
709 * We should only be in the started state if pma_sample_start != 0
711 if (dev
->pma_sample_status
== IB_PMA_SAMPLE_STATUS_STARTED
&&
712 --dev
->pma_sample_start
== 0) {
713 dev
->pma_sample_status
= IB_PMA_SAMPLE_STATUS_RUNNING
;
714 ipath_snapshot_counters(dev
->dd
, &dev
->ipath_sword
,
718 &dev
->ipath_xmit_wait
);
720 if (dev
->pma_sample_status
== IB_PMA_SAMPLE_STATUS_RUNNING
) {
721 if (dev
->pma_sample_interval
== 0) {
722 u64 ta
, tb
, tc
, td
, te
;
724 dev
->pma_sample_status
= IB_PMA_SAMPLE_STATUS_DONE
;
725 ipath_snapshot_counters(dev
->dd
, &ta
, &tb
,
728 dev
->ipath_sword
= ta
- dev
->ipath_sword
;
729 dev
->ipath_rword
= tb
- dev
->ipath_rword
;
730 dev
->ipath_spkts
= tc
- dev
->ipath_spkts
;
731 dev
->ipath_rpkts
= td
- dev
->ipath_rpkts
;
732 dev
->ipath_xmit_wait
= te
- dev
->ipath_xmit_wait
;
735 dev
->pma_sample_interval
--;
737 spin_unlock_irqrestore(&dev
->pending_lock
, flags
);
739 /* XXX What if timer fires again while this is running? */
740 while (resend
!= NULL
) {
742 resend
= qp
->timer_next
;
744 spin_lock_irqsave(&qp
->s_lock
, flags
);
745 if (qp
->s_last
!= qp
->s_tail
&&
746 ib_ipath_state_ops
[qp
->state
] & IPATH_PROCESS_SEND_OK
) {
748 ipath_restart_rc(qp
, qp
->s_last_psn
+ 1);
750 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
752 /* Notify ipath_destroy_qp() if it is waiting. */
753 if (atomic_dec_and_test(&qp
->refcount
))
756 while (rnr
!= NULL
) {
758 rnr
= qp
->timer_next
;
760 spin_lock_irqsave(&qp
->s_lock
, flags
);
761 if (ib_ipath_state_ops
[qp
->state
] & IPATH_PROCESS_SEND_OK
)
762 ipath_schedule_send(qp
);
763 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
765 /* Notify ipath_destroy_qp() if it is waiting. */
766 if (atomic_dec_and_test(&qp
->refcount
))
771 static void update_sge(struct ipath_sge_state
*ss
, u32 length
)
773 struct ipath_sge
*sge
= &ss
->sge
;
775 sge
->vaddr
+= length
;
776 sge
->length
-= length
;
777 sge
->sge_length
-= length
;
778 if (sge
->sge_length
== 0) {
780 *sge
= *ss
->sg_list
++;
781 } else if (sge
->length
== 0 && sge
->mr
!= NULL
) {
782 if (++sge
->n
>= IPATH_SEGSZ
) {
783 if (++sge
->m
>= sge
->mr
->mapsz
)
787 sge
->vaddr
= sge
->mr
->map
[sge
->m
]->segs
[sge
->n
].vaddr
;
788 sge
->length
= sge
->mr
->map
[sge
->m
]->segs
[sge
->n
].length
;
792 #ifdef __LITTLE_ENDIAN
793 static inline u32
get_upper_bits(u32 data
, u32 shift
)
795 return data
>> shift
;
798 static inline u32
set_upper_bits(u32 data
, u32 shift
)
800 return data
<< shift
;
803 static inline u32
clear_upper_bytes(u32 data
, u32 n
, u32 off
)
805 data
<<= ((sizeof(u32
) - n
) * BITS_PER_BYTE
);
806 data
>>= ((sizeof(u32
) - n
- off
) * BITS_PER_BYTE
);
810 static inline u32
get_upper_bits(u32 data
, u32 shift
)
812 return data
<< shift
;
815 static inline u32
set_upper_bits(u32 data
, u32 shift
)
817 return data
>> shift
;
820 static inline u32
clear_upper_bytes(u32 data
, u32 n
, u32 off
)
822 data
>>= ((sizeof(u32
) - n
) * BITS_PER_BYTE
);
823 data
<<= ((sizeof(u32
) - n
- off
) * BITS_PER_BYTE
);
828 static void copy_io(u32 __iomem
*piobuf
, struct ipath_sge_state
*ss
,
829 u32 length
, unsigned flush_wc
)
836 u32 len
= ss
->sge
.length
;
841 if (len
> ss
->sge
.sge_length
)
842 len
= ss
->sge
.sge_length
;
844 /* If the source address is not aligned, try to align it. */
845 off
= (unsigned long)ss
->sge
.vaddr
& (sizeof(u32
) - 1);
847 u32
*addr
= (u32
*)((unsigned long)ss
->sge
.vaddr
&
849 u32 v
= get_upper_bits(*addr
, off
* BITS_PER_BYTE
);
852 y
= sizeof(u32
) - off
;
855 if (len
+ extra
>= sizeof(u32
)) {
856 data
|= set_upper_bits(v
, extra
*
858 len
= sizeof(u32
) - extra
;
863 __raw_writel(data
, piobuf
);
868 /* Clear unused upper bytes */
869 data
|= clear_upper_bytes(v
, len
, extra
);
877 /* Source address is aligned. */
878 u32
*addr
= (u32
*) ss
->sge
.vaddr
;
879 int shift
= extra
* BITS_PER_BYTE
;
880 int ushift
= 32 - shift
;
883 while (l
>= sizeof(u32
)) {
886 data
|= set_upper_bits(v
, shift
);
887 __raw_writel(data
, piobuf
);
888 data
= get_upper_bits(v
, ushift
);
894 * We still have 'extra' number of bytes leftover.
899 if (l
+ extra
>= sizeof(u32
)) {
900 data
|= set_upper_bits(v
, shift
);
901 len
-= l
+ extra
- sizeof(u32
);
906 __raw_writel(data
, piobuf
);
911 /* Clear unused upper bytes */
912 data
|= clear_upper_bytes(v
, l
,
920 } else if (len
== length
) {
924 } else if (len
== length
) {
928 * Need to round up for the last dword in the
932 __iowrite32_copy(piobuf
, ss
->sge
.vaddr
, w
- 1);
934 last
= ((u32
*) ss
->sge
.vaddr
)[w
- 1];
939 __iowrite32_copy(piobuf
, ss
->sge
.vaddr
, w
);
942 extra
= len
& (sizeof(u32
) - 1);
944 u32 v
= ((u32
*) ss
->sge
.vaddr
)[w
];
946 /* Clear unused upper bytes */
947 data
= clear_upper_bytes(v
, extra
, 0);
953 /* Update address before sending packet. */
954 update_sge(ss
, length
);
956 /* must flush early everything before trigger word */
958 __raw_writel(last
, piobuf
);
959 /* be sure trigger word is written */
962 __raw_writel(last
, piobuf
);
966 * Convert IB rate to delay multiplier.
968 unsigned ipath_ib_rate_to_mult(enum ib_rate rate
)
971 case IB_RATE_2_5_GBPS
: return 8;
972 case IB_RATE_5_GBPS
: return 4;
973 case IB_RATE_10_GBPS
: return 2;
974 case IB_RATE_20_GBPS
: return 1;
980 * Convert delay multiplier to IB rate
982 static enum ib_rate
ipath_mult_to_ib_rate(unsigned mult
)
985 case 8: return IB_RATE_2_5_GBPS
;
986 case 4: return IB_RATE_5_GBPS
;
987 case 2: return IB_RATE_10_GBPS
;
988 case 1: return IB_RATE_20_GBPS
;
989 default: return IB_RATE_PORT_CURRENT
;
993 static inline struct ipath_verbs_txreq
*get_txreq(struct ipath_ibdev
*dev
)
995 struct ipath_verbs_txreq
*tx
= NULL
;
998 spin_lock_irqsave(&dev
->pending_lock
, flags
);
999 if (!list_empty(&dev
->txreq_free
)) {
1000 struct list_head
*l
= dev
->txreq_free
.next
;
1003 tx
= list_entry(l
, struct ipath_verbs_txreq
, txreq
.list
);
1005 spin_unlock_irqrestore(&dev
->pending_lock
, flags
);
1009 static inline void put_txreq(struct ipath_ibdev
*dev
,
1010 struct ipath_verbs_txreq
*tx
)
1012 unsigned long flags
;
1014 spin_lock_irqsave(&dev
->pending_lock
, flags
);
1015 list_add(&tx
->txreq
.list
, &dev
->txreq_free
);
1016 spin_unlock_irqrestore(&dev
->pending_lock
, flags
);
1019 static void sdma_complete(void *cookie
, int status
)
1021 struct ipath_verbs_txreq
*tx
= cookie
;
1022 struct ipath_qp
*qp
= tx
->qp
;
1023 struct ipath_ibdev
*dev
= to_idev(qp
->ibqp
.device
);
1025 enum ib_wc_status ibs
= status
== IPATH_SDMA_TXREQ_S_OK
?
1026 IB_WC_SUCCESS
: IB_WC_WR_FLUSH_ERR
;
1028 if (atomic_dec_and_test(&qp
->s_dma_busy
)) {
1029 spin_lock_irqsave(&qp
->s_lock
, flags
);
1031 ipath_send_complete(qp
, tx
->wqe
, ibs
);
1032 if ((ib_ipath_state_ops
[qp
->state
] & IPATH_FLUSH_SEND
&&
1033 qp
->s_last
!= qp
->s_head
) ||
1034 (qp
->s_flags
& IPATH_S_WAIT_DMA
))
1035 ipath_schedule_send(qp
);
1036 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
1037 wake_up(&qp
->wait_dma
);
1038 } else if (tx
->wqe
) {
1039 spin_lock_irqsave(&qp
->s_lock
, flags
);
1040 ipath_send_complete(qp
, tx
->wqe
, ibs
);
1041 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
1044 if (tx
->txreq
.flags
& IPATH_SDMA_TXREQ_F_FREEBUF
)
1045 kfree(tx
->txreq
.map_addr
);
1048 if (atomic_dec_and_test(&qp
->refcount
))
1052 static void decrement_dma_busy(struct ipath_qp
*qp
)
1056 if (atomic_dec_and_test(&qp
->s_dma_busy
)) {
1057 spin_lock_irqsave(&qp
->s_lock
, flags
);
1058 if ((ib_ipath_state_ops
[qp
->state
] & IPATH_FLUSH_SEND
&&
1059 qp
->s_last
!= qp
->s_head
) ||
1060 (qp
->s_flags
& IPATH_S_WAIT_DMA
))
1061 ipath_schedule_send(qp
);
1062 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
1063 wake_up(&qp
->wait_dma
);
1068 * Compute the number of clock cycles of delay before sending the next packet.
1069 * The multipliers reflect the number of clocks for the fastest rate so
1070 * one tick at 4xDDR is 8 ticks at 1xSDR.
1071 * If the destination port will take longer to receive a packet than
1072 * the outgoing link can send it, we need to delay sending the next packet
1073 * by the difference in time it takes the receiver to receive and the sender
1074 * to send this packet.
1075 * Note that this delay is always correct for UC and RC but not always
1076 * optimal for UD. For UD, the destination HCA can be different for each
1077 * packet, in which case, we could send packets to a different destination
1078 * while "waiting" for the delay. The overhead for doing this without
1079 * HW support is more than just paying the cost of delaying some packets
1082 static inline unsigned ipath_pkt_delay(u32 plen
, u8 snd_mult
, u8 rcv_mult
)
1084 return (rcv_mult
> snd_mult
) ?
1085 (plen
* (rcv_mult
- snd_mult
) + 1) >> 1 : 0;
1088 static int ipath_verbs_send_dma(struct ipath_qp
*qp
,
1089 struct ipath_ib_header
*hdr
, u32 hdrwords
,
1090 struct ipath_sge_state
*ss
, u32 len
,
1091 u32 plen
, u32 dwords
)
1093 struct ipath_ibdev
*dev
= to_idev(qp
->ibqp
.device
);
1094 struct ipath_devdata
*dd
= dev
->dd
;
1095 struct ipath_verbs_txreq
*tx
;
1104 /* resend previously constructed packet */
1105 atomic_inc(&qp
->s_dma_busy
);
1106 ret
= ipath_sdma_verbs_send(dd
, tx
->ss
, tx
->len
, tx
);
1109 decrement_dma_busy(qp
);
1114 tx
= get_txreq(dev
);
1121 * Get the saved delay count we computed for the previous packet
1122 * and save the delay count for this packet to be used next time
1125 control
= qp
->s_pkt_delay
;
1126 qp
->s_pkt_delay
= ipath_pkt_delay(plen
, dd
->delay_mult
, qp
->s_dmult
);
1129 atomic_inc(&qp
->refcount
);
1130 tx
->wqe
= qp
->s_wqe
;
1131 tx
->txreq
.callback
= sdma_complete
;
1132 tx
->txreq
.callback_cookie
= tx
;
1133 tx
->txreq
.flags
= IPATH_SDMA_TXREQ_F_HEADTOHOST
|
1134 IPATH_SDMA_TXREQ_F_INTREQ
| IPATH_SDMA_TXREQ_F_FREEDESC
;
1135 if (plen
+ 1 >= IPATH_SMALLBUF_DWORDS
)
1136 tx
->txreq
.flags
|= IPATH_SDMA_TXREQ_F_USELARGEBUF
;
1138 /* VL15 packets bypass credit check */
1139 if ((be16_to_cpu(hdr
->lrh
[0]) >> 12) == 15) {
1140 control
|= 1ULL << 31;
1141 tx
->txreq
.flags
|= IPATH_SDMA_TXREQ_F_VL15
;
1146 * Don't try to DMA if it takes more descriptors than
1149 ndesc
= ipath_count_sge(ss
, len
);
1150 if (ndesc
>= dd
->ipath_sdma_descq_cnt
)
1155 tx
->hdr
.pbc
[0] = cpu_to_le32(plen
);
1156 tx
->hdr
.pbc
[1] = cpu_to_le32(control
);
1157 memcpy(&tx
->hdr
.hdr
, hdr
, hdrwords
<< 2);
1158 tx
->txreq
.sg_count
= ndesc
;
1159 tx
->map_len
= (hdrwords
+ 2) << 2;
1160 tx
->txreq
.map_addr
= &tx
->hdr
;
1161 atomic_inc(&qp
->s_dma_busy
);
1162 ret
= ipath_sdma_verbs_send(dd
, ss
, dwords
, tx
);
1164 /* save ss and length in dwords */
1168 decrement_dma_busy(qp
);
1173 /* Allocate a buffer and copy the header and payload to it. */
1174 tx
->map_len
= (plen
+ 1) << 2;
1175 piobuf
= kmalloc(tx
->map_len
, GFP_ATOMIC
);
1176 if (unlikely(piobuf
== NULL
)) {
1180 tx
->txreq
.map_addr
= piobuf
;
1181 tx
->txreq
.flags
|= IPATH_SDMA_TXREQ_F_FREEBUF
;
1182 tx
->txreq
.sg_count
= 1;
1184 *piobuf
++ = (__force u32
) cpu_to_le32(plen
);
1185 *piobuf
++ = (__force u32
) cpu_to_le32(control
);
1186 memcpy(piobuf
, hdr
, hdrwords
<< 2);
1187 ipath_copy_from_sge(piobuf
+ hdrwords
, ss
, len
);
1189 atomic_inc(&qp
->s_dma_busy
);
1190 ret
= ipath_sdma_verbs_send(dd
, NULL
, 0, tx
);
1192 * If we couldn't queue the DMA request, save the info
1193 * and try again later rather than destroying the
1194 * buffer and undoing the side effects of the copy.
1200 decrement_dma_busy(qp
);
1206 if (atomic_dec_and_test(&qp
->refcount
))
1213 static int ipath_verbs_send_pio(struct ipath_qp
*qp
,
1214 struct ipath_ib_header
*ibhdr
, u32 hdrwords
,
1215 struct ipath_sge_state
*ss
, u32 len
,
1216 u32 plen
, u32 dwords
)
1218 struct ipath_devdata
*dd
= to_idev(qp
->ibqp
.device
)->dd
;
1219 u32
*hdr
= (u32
*) ibhdr
;
1220 u32 __iomem
*piobuf
;
1226 piobuf
= ipath_getpiobuf(dd
, plen
, NULL
);
1227 if (unlikely(piobuf
== NULL
)) {
1233 * Get the saved delay count we computed for the previous packet
1234 * and save the delay count for this packet to be used next time
1237 control
= qp
->s_pkt_delay
;
1238 qp
->s_pkt_delay
= ipath_pkt_delay(plen
, dd
->delay_mult
, qp
->s_dmult
);
1240 /* VL15 packets bypass credit check */
1241 if ((be16_to_cpu(ibhdr
->lrh
[0]) >> 12) == 15)
1242 control
|= 1ULL << 31;
1245 * Write the length to the control qword plus any needed flags.
1246 * We have to flush after the PBC for correctness on some cpus
1247 * or WC buffer can be written out of order.
1249 writeq(((u64
) control
<< 32) | plen
, piobuf
);
1252 flush_wc
= dd
->ipath_flags
& IPATH_PIO_FLUSH_WC
;
1255 * If there is just the header portion, must flush before
1256 * writing last word of header for correctness, and after
1257 * the last header word (trigger word).
1261 __iowrite32_copy(piobuf
, hdr
, hdrwords
- 1);
1263 __raw_writel(hdr
[hdrwords
- 1], piobuf
+ hdrwords
- 1);
1266 __iowrite32_copy(piobuf
, hdr
, hdrwords
);
1272 __iowrite32_copy(piobuf
, hdr
, hdrwords
);
1275 /* The common case is aligned and contained in one segment. */
1276 if (likely(ss
->num_sge
== 1 && len
<= ss
->sge
.length
&&
1277 !((unsigned long)ss
->sge
.vaddr
& (sizeof(u32
) - 1)))) {
1278 u32
*addr
= (u32
*) ss
->sge
.vaddr
;
1280 /* Update address before sending packet. */
1281 update_sge(ss
, len
);
1283 __iowrite32_copy(piobuf
, addr
, dwords
- 1);
1284 /* must flush early everything before trigger word */
1286 __raw_writel(addr
[dwords
- 1], piobuf
+ dwords
- 1);
1287 /* be sure trigger word is written */
1290 __iowrite32_copy(piobuf
, addr
, dwords
);
1293 copy_io(piobuf
, ss
, len
, flush_wc
);
1296 spin_lock_irqsave(&qp
->s_lock
, flags
);
1297 ipath_send_complete(qp
, qp
->s_wqe
, IB_WC_SUCCESS
);
1298 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
1306 * ipath_verbs_send - send a packet
1307 * @qp: the QP to send on
1308 * @hdr: the packet header
1309 * @hdrwords: the number of 32-bit words in the header
1310 * @ss: the SGE to send
1311 * @len: the length of the packet in bytes
1313 int ipath_verbs_send(struct ipath_qp
*qp
, struct ipath_ib_header
*hdr
,
1314 u32 hdrwords
, struct ipath_sge_state
*ss
, u32 len
)
1316 struct ipath_devdata
*dd
= to_idev(qp
->ibqp
.device
)->dd
;
1319 u32 dwords
= (len
+ 3) >> 2;
1322 * Calculate the send buffer trigger address.
1323 * The +1 counts for the pbc control dword following the pbc length.
1325 plen
= hdrwords
+ dwords
+ 1;
1328 * VL15 packets (IB_QPT_SMI) will always use PIO, so we
1329 * can defer SDMA restart until link goes ACTIVE without
1330 * worrying about just how we got there.
1332 if (qp
->ibqp
.qp_type
== IB_QPT_SMI
||
1333 !(dd
->ipath_flags
& IPATH_HAS_SEND_DMA
))
1334 ret
= ipath_verbs_send_pio(qp
, hdr
, hdrwords
, ss
, len
,
1337 ret
= ipath_verbs_send_dma(qp
, hdr
, hdrwords
, ss
, len
,
1343 int ipath_snapshot_counters(struct ipath_devdata
*dd
, u64
*swords
,
1344 u64
*rwords
, u64
*spkts
, u64
*rpkts
,
1349 if (!(dd
->ipath_flags
& IPATH_INITTED
)) {
1350 /* no hardware, freeze, etc. */
1354 *swords
= ipath_snap_cntr(dd
, dd
->ipath_cregs
->cr_wordsendcnt
);
1355 *rwords
= ipath_snap_cntr(dd
, dd
->ipath_cregs
->cr_wordrcvcnt
);
1356 *spkts
= ipath_snap_cntr(dd
, dd
->ipath_cregs
->cr_pktsendcnt
);
1357 *rpkts
= ipath_snap_cntr(dd
, dd
->ipath_cregs
->cr_pktrcvcnt
);
1358 *xmit_wait
= ipath_snap_cntr(dd
, dd
->ipath_cregs
->cr_sendstallcnt
);
1367 * ipath_get_counters - get various chip counters
1368 * @dd: the infinipath device
1369 * @cntrs: counters are placed here
1371 * Return the counters needed by recv_pma_get_portcounters().
1373 int ipath_get_counters(struct ipath_devdata
*dd
,
1374 struct ipath_verbs_counters
*cntrs
)
1376 struct ipath_cregs
const *crp
= dd
->ipath_cregs
;
1379 if (!(dd
->ipath_flags
& IPATH_INITTED
)) {
1380 /* no hardware, freeze, etc. */
1384 cntrs
->symbol_error_counter
=
1385 ipath_snap_cntr(dd
, crp
->cr_ibsymbolerrcnt
);
1386 cntrs
->link_error_recovery_counter
=
1387 ipath_snap_cntr(dd
, crp
->cr_iblinkerrrecovcnt
);
1389 * The link downed counter counts when the other side downs the
1390 * connection. We add in the number of times we downed the link
1391 * due to local link integrity errors to compensate.
1393 cntrs
->link_downed_counter
=
1394 ipath_snap_cntr(dd
, crp
->cr_iblinkdowncnt
);
1395 cntrs
->port_rcv_errors
=
1396 ipath_snap_cntr(dd
, crp
->cr_rxdroppktcnt
) +
1397 ipath_snap_cntr(dd
, crp
->cr_rcvovflcnt
) +
1398 ipath_snap_cntr(dd
, crp
->cr_portovflcnt
) +
1399 ipath_snap_cntr(dd
, crp
->cr_err_rlencnt
) +
1400 ipath_snap_cntr(dd
, crp
->cr_invalidrlencnt
) +
1401 ipath_snap_cntr(dd
, crp
->cr_errlinkcnt
) +
1402 ipath_snap_cntr(dd
, crp
->cr_erricrccnt
) +
1403 ipath_snap_cntr(dd
, crp
->cr_errvcrccnt
) +
1404 ipath_snap_cntr(dd
, crp
->cr_errlpcrccnt
) +
1405 ipath_snap_cntr(dd
, crp
->cr_badformatcnt
) +
1406 dd
->ipath_rxfc_unsupvl_errs
;
1407 if (crp
->cr_rxotherlocalphyerrcnt
)
1408 cntrs
->port_rcv_errors
+=
1409 ipath_snap_cntr(dd
, crp
->cr_rxotherlocalphyerrcnt
);
1410 if (crp
->cr_rxvlerrcnt
)
1411 cntrs
->port_rcv_errors
+=
1412 ipath_snap_cntr(dd
, crp
->cr_rxvlerrcnt
);
1413 cntrs
->port_rcv_remphys_errors
=
1414 ipath_snap_cntr(dd
, crp
->cr_rcvebpcnt
);
1415 cntrs
->port_xmit_discards
= ipath_snap_cntr(dd
, crp
->cr_unsupvlcnt
);
1416 cntrs
->port_xmit_data
= ipath_snap_cntr(dd
, crp
->cr_wordsendcnt
);
1417 cntrs
->port_rcv_data
= ipath_snap_cntr(dd
, crp
->cr_wordrcvcnt
);
1418 cntrs
->port_xmit_packets
= ipath_snap_cntr(dd
, crp
->cr_pktsendcnt
);
1419 cntrs
->port_rcv_packets
= ipath_snap_cntr(dd
, crp
->cr_pktrcvcnt
);
1420 cntrs
->local_link_integrity_errors
=
1421 crp
->cr_locallinkintegrityerrcnt
?
1422 ipath_snap_cntr(dd
, crp
->cr_locallinkintegrityerrcnt
) :
1423 ((dd
->ipath_flags
& IPATH_GPIO_ERRINTRS
) ?
1424 dd
->ipath_lli_errs
: dd
->ipath_lli_errors
);
1425 cntrs
->excessive_buffer_overrun_errors
=
1426 crp
->cr_excessbufferovflcnt
?
1427 ipath_snap_cntr(dd
, crp
->cr_excessbufferovflcnt
) :
1428 dd
->ipath_overrun_thresh_errs
;
1429 cntrs
->vl15_dropped
= crp
->cr_vl15droppedpktcnt
?
1430 ipath_snap_cntr(dd
, crp
->cr_vl15droppedpktcnt
) : 0;
1439 * ipath_ib_piobufavail - callback when a PIO buffer is available
1440 * @arg: the device pointer
1442 * This is called from ipath_intr() at interrupt level when a PIO buffer is
1443 * available after ipath_verbs_send() returned an error that no buffers were
1444 * available. Return 1 if we consumed all the PIO buffers and we still have
1445 * QPs waiting for buffers (for now, just restart the send tasklet and
1448 int ipath_ib_piobufavail(struct ipath_ibdev
*dev
)
1450 struct list_head
*list
;
1451 struct ipath_qp
*qplist
;
1452 struct ipath_qp
*qp
;
1453 unsigned long flags
;
1458 list
= &dev
->piowait
;
1461 spin_lock_irqsave(&dev
->pending_lock
, flags
);
1462 while (!list_empty(list
)) {
1463 qp
= list_entry(list
->next
, struct ipath_qp
, piowait
);
1464 list_del_init(&qp
->piowait
);
1465 qp
->pio_next
= qplist
;
1467 atomic_inc(&qp
->refcount
);
1469 spin_unlock_irqrestore(&dev
->pending_lock
, flags
);
1471 while (qplist
!= NULL
) {
1473 qplist
= qp
->pio_next
;
1475 spin_lock_irqsave(&qp
->s_lock
, flags
);
1476 if (ib_ipath_state_ops
[qp
->state
] & IPATH_PROCESS_SEND_OK
)
1477 ipath_schedule_send(qp
);
1478 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
1480 /* Notify ipath_destroy_qp() if it is waiting. */
1481 if (atomic_dec_and_test(&qp
->refcount
))
1489 static int ipath_query_device(struct ib_device
*ibdev
,
1490 struct ib_device_attr
*props
)
1492 struct ipath_ibdev
*dev
= to_idev(ibdev
);
1494 memset(props
, 0, sizeof(*props
));
1496 props
->device_cap_flags
= IB_DEVICE_BAD_PKEY_CNTR
|
1497 IB_DEVICE_BAD_QKEY_CNTR
| IB_DEVICE_SHUTDOWN_PORT
|
1498 IB_DEVICE_SYS_IMAGE_GUID
| IB_DEVICE_RC_RNR_NAK_GEN
|
1499 IB_DEVICE_PORT_ACTIVE_EVENT
| IB_DEVICE_SRQ_RESIZE
;
1500 props
->page_size_cap
= PAGE_SIZE
;
1502 IPATH_SRC_OUI_1
<< 16 | IPATH_SRC_OUI_2
<< 8 | IPATH_SRC_OUI_3
;
1503 props
->vendor_part_id
= dev
->dd
->ipath_deviceid
;
1504 props
->hw_ver
= dev
->dd
->ipath_pcirev
;
1506 props
->sys_image_guid
= dev
->sys_image_guid
;
1508 props
->max_mr_size
= ~0ull;
1509 props
->max_qp
= ib_ipath_max_qps
;
1510 props
->max_qp_wr
= ib_ipath_max_qp_wrs
;
1511 props
->max_sge
= ib_ipath_max_sges
;
1512 props
->max_cq
= ib_ipath_max_cqs
;
1513 props
->max_ah
= ib_ipath_max_ahs
;
1514 props
->max_cqe
= ib_ipath_max_cqes
;
1515 props
->max_mr
= dev
->lk_table
.max
;
1516 props
->max_fmr
= dev
->lk_table
.max
;
1517 props
->max_map_per_fmr
= 32767;
1518 props
->max_pd
= ib_ipath_max_pds
;
1519 props
->max_qp_rd_atom
= IPATH_MAX_RDMA_ATOMIC
;
1520 props
->max_qp_init_rd_atom
= 255;
1521 /* props->max_res_rd_atom */
1522 props
->max_srq
= ib_ipath_max_srqs
;
1523 props
->max_srq_wr
= ib_ipath_max_srq_wrs
;
1524 props
->max_srq_sge
= ib_ipath_max_srq_sges
;
1525 /* props->local_ca_ack_delay */
1526 props
->atomic_cap
= IB_ATOMIC_GLOB
;
1527 props
->max_pkeys
= ipath_get_npkeys(dev
->dd
);
1528 props
->max_mcast_grp
= ib_ipath_max_mcast_grps
;
1529 props
->max_mcast_qp_attach
= ib_ipath_max_mcast_qp_attached
;
1530 props
->max_total_mcast_qp_attach
= props
->max_mcast_qp_attach
*
1531 props
->max_mcast_grp
;
1536 const u8 ipath_cvt_physportstate
[32] = {
1537 [INFINIPATH_IBCS_LT_STATE_DISABLED
] = IB_PHYSPORTSTATE_DISABLED
,
1538 [INFINIPATH_IBCS_LT_STATE_LINKUP
] = IB_PHYSPORTSTATE_LINKUP
,
1539 [INFINIPATH_IBCS_LT_STATE_POLLACTIVE
] = IB_PHYSPORTSTATE_POLL
,
1540 [INFINIPATH_IBCS_LT_STATE_POLLQUIET
] = IB_PHYSPORTSTATE_POLL
,
1541 [INFINIPATH_IBCS_LT_STATE_SLEEPDELAY
] = IB_PHYSPORTSTATE_SLEEP
,
1542 [INFINIPATH_IBCS_LT_STATE_SLEEPQUIET
] = IB_PHYSPORTSTATE_SLEEP
,
1543 [INFINIPATH_IBCS_LT_STATE_CFGDEBOUNCE
] =
1544 IB_PHYSPORTSTATE_CFG_TRAIN
,
1545 [INFINIPATH_IBCS_LT_STATE_CFGRCVFCFG
] =
1546 IB_PHYSPORTSTATE_CFG_TRAIN
,
1547 [INFINIPATH_IBCS_LT_STATE_CFGWAITRMT
] =
1548 IB_PHYSPORTSTATE_CFG_TRAIN
,
1549 [INFINIPATH_IBCS_LT_STATE_CFGIDLE
] = IB_PHYSPORTSTATE_CFG_TRAIN
,
1550 [INFINIPATH_IBCS_LT_STATE_RECOVERRETRAIN
] =
1551 IB_PHYSPORTSTATE_LINK_ERR_RECOVER
,
1552 [INFINIPATH_IBCS_LT_STATE_RECOVERWAITRMT
] =
1553 IB_PHYSPORTSTATE_LINK_ERR_RECOVER
,
1554 [INFINIPATH_IBCS_LT_STATE_RECOVERIDLE
] =
1555 IB_PHYSPORTSTATE_LINK_ERR_RECOVER
,
1556 [0x10] = IB_PHYSPORTSTATE_CFG_TRAIN
,
1557 [0x11] = IB_PHYSPORTSTATE_CFG_TRAIN
,
1558 [0x12] = IB_PHYSPORTSTATE_CFG_TRAIN
,
1559 [0x13] = IB_PHYSPORTSTATE_CFG_TRAIN
,
1560 [0x14] = IB_PHYSPORTSTATE_CFG_TRAIN
,
1561 [0x15] = IB_PHYSPORTSTATE_CFG_TRAIN
,
1562 [0x16] = IB_PHYSPORTSTATE_CFG_TRAIN
,
1563 [0x17] = IB_PHYSPORTSTATE_CFG_TRAIN
1566 u32
ipath_get_cr_errpkey(struct ipath_devdata
*dd
)
1568 return ipath_read_creg32(dd
, dd
->ipath_cregs
->cr_errpkey
);
1571 static int ipath_query_port(struct ib_device
*ibdev
,
1572 u8 port
, struct ib_port_attr
*props
)
1574 struct ipath_ibdev
*dev
= to_idev(ibdev
);
1575 struct ipath_devdata
*dd
= dev
->dd
;
1577 u16 lid
= dd
->ipath_lid
;
1580 memset(props
, 0, sizeof(*props
));
1581 props
->lid
= lid
? lid
: __constant_be16_to_cpu(IB_LID_PERMISSIVE
);
1582 props
->lmc
= dd
->ipath_lmc
;
1583 props
->sm_lid
= dev
->sm_lid
;
1584 props
->sm_sl
= dev
->sm_sl
;
1585 ibcstat
= dd
->ipath_lastibcstat
;
1586 /* map LinkState to IB portinfo values. */
1587 props
->state
= ipath_ib_linkstate(dd
, ibcstat
) + 1;
1589 /* See phys_state_show() */
1590 props
->phys_state
= /* MEA: assumes shift == 0 */
1591 ipath_cvt_physportstate
[dd
->ipath_lastibcstat
&
1593 props
->port_cap_flags
= dev
->port_cap_flags
;
1594 props
->gid_tbl_len
= 1;
1595 props
->max_msg_sz
= 0x80000000;
1596 props
->pkey_tbl_len
= ipath_get_npkeys(dd
);
1597 props
->bad_pkey_cntr
= ipath_get_cr_errpkey(dd
) -
1598 dev
->z_pkey_violations
;
1599 props
->qkey_viol_cntr
= dev
->qkey_violations
;
1600 props
->active_width
= dd
->ipath_link_width_active
;
1601 /* See rate_show() */
1602 props
->active_speed
= dd
->ipath_link_speed_active
;
1603 props
->max_vl_num
= 1; /* VLCap = VL0 */
1604 props
->init_type_reply
= 0;
1606 props
->max_mtu
= ipath_mtu4096
? IB_MTU_4096
: IB_MTU_2048
;
1607 switch (dd
->ipath_ibmtu
) {
1626 props
->active_mtu
= mtu
;
1627 props
->subnet_timeout
= dev
->subnet_timeout
;
1632 static int ipath_modify_device(struct ib_device
*device
,
1633 int device_modify_mask
,
1634 struct ib_device_modify
*device_modify
)
1638 if (device_modify_mask
& ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID
|
1639 IB_DEVICE_MODIFY_NODE_DESC
)) {
1644 if (device_modify_mask
& IB_DEVICE_MODIFY_NODE_DESC
)
1645 memcpy(device
->node_desc
, device_modify
->node_desc
, 64);
1647 if (device_modify_mask
& IB_DEVICE_MODIFY_SYS_IMAGE_GUID
)
1648 to_idev(device
)->sys_image_guid
=
1649 cpu_to_be64(device_modify
->sys_image_guid
);
1657 static int ipath_modify_port(struct ib_device
*ibdev
,
1658 u8 port
, int port_modify_mask
,
1659 struct ib_port_modify
*props
)
1661 struct ipath_ibdev
*dev
= to_idev(ibdev
);
1663 dev
->port_cap_flags
|= props
->set_port_cap_mask
;
1664 dev
->port_cap_flags
&= ~props
->clr_port_cap_mask
;
1665 if (port_modify_mask
& IB_PORT_SHUTDOWN
)
1666 ipath_set_linkstate(dev
->dd
, IPATH_IB_LINKDOWN
);
1667 if (port_modify_mask
& IB_PORT_RESET_QKEY_CNTR
)
1668 dev
->qkey_violations
= 0;
1672 static int ipath_query_gid(struct ib_device
*ibdev
, u8 port
,
1673 int index
, union ib_gid
*gid
)
1675 struct ipath_ibdev
*dev
= to_idev(ibdev
);
1682 gid
->global
.subnet_prefix
= dev
->gid_prefix
;
1683 gid
->global
.interface_id
= dev
->dd
->ipath_guid
;
1691 static struct ib_pd
*ipath_alloc_pd(struct ib_device
*ibdev
,
1692 struct ib_ucontext
*context
,
1693 struct ib_udata
*udata
)
1695 struct ipath_ibdev
*dev
= to_idev(ibdev
);
1696 struct ipath_pd
*pd
;
1700 * This is actually totally arbitrary. Some correctness tests
1701 * assume there's a maximum number of PDs that can be allocated.
1702 * We don't actually have this limit, but we fail the test if
1703 * we allow allocations of more than we report for this value.
1706 pd
= kmalloc(sizeof *pd
, GFP_KERNEL
);
1708 ret
= ERR_PTR(-ENOMEM
);
1712 spin_lock(&dev
->n_pds_lock
);
1713 if (dev
->n_pds_allocated
== ib_ipath_max_pds
) {
1714 spin_unlock(&dev
->n_pds_lock
);
1716 ret
= ERR_PTR(-ENOMEM
);
1720 dev
->n_pds_allocated
++;
1721 spin_unlock(&dev
->n_pds_lock
);
1723 /* ib_alloc_pd() will initialize pd->ibpd. */
1724 pd
->user
= udata
!= NULL
;
1732 static int ipath_dealloc_pd(struct ib_pd
*ibpd
)
1734 struct ipath_pd
*pd
= to_ipd(ibpd
);
1735 struct ipath_ibdev
*dev
= to_idev(ibpd
->device
);
1737 spin_lock(&dev
->n_pds_lock
);
1738 dev
->n_pds_allocated
--;
1739 spin_unlock(&dev
->n_pds_lock
);
1747 * ipath_create_ah - create an address handle
1748 * @pd: the protection domain
1749 * @ah_attr: the attributes of the AH
1751 * This may be called from interrupt context.
1753 static struct ib_ah
*ipath_create_ah(struct ib_pd
*pd
,
1754 struct ib_ah_attr
*ah_attr
)
1756 struct ipath_ah
*ah
;
1758 struct ipath_ibdev
*dev
= to_idev(pd
->device
);
1759 unsigned long flags
;
1761 /* A multicast address requires a GRH (see ch. 8.4.1). */
1762 if (ah_attr
->dlid
>= IPATH_MULTICAST_LID_BASE
&&
1763 ah_attr
->dlid
!= IPATH_PERMISSIVE_LID
&&
1764 !(ah_attr
->ah_flags
& IB_AH_GRH
)) {
1765 ret
= ERR_PTR(-EINVAL
);
1769 if (ah_attr
->dlid
== 0) {
1770 ret
= ERR_PTR(-EINVAL
);
1774 if (ah_attr
->port_num
< 1 ||
1775 ah_attr
->port_num
> pd
->device
->phys_port_cnt
) {
1776 ret
= ERR_PTR(-EINVAL
);
1780 ah
= kmalloc(sizeof *ah
, GFP_ATOMIC
);
1782 ret
= ERR_PTR(-ENOMEM
);
1786 spin_lock_irqsave(&dev
->n_ahs_lock
, flags
);
1787 if (dev
->n_ahs_allocated
== ib_ipath_max_ahs
) {
1788 spin_unlock_irqrestore(&dev
->n_ahs_lock
, flags
);
1790 ret
= ERR_PTR(-ENOMEM
);
1794 dev
->n_ahs_allocated
++;
1795 spin_unlock_irqrestore(&dev
->n_ahs_lock
, flags
);
1797 /* ib_create_ah() will initialize ah->ibah. */
1798 ah
->attr
= *ah_attr
;
1799 ah
->attr
.static_rate
= ipath_ib_rate_to_mult(ah_attr
->static_rate
);
1808 * ipath_destroy_ah - destroy an address handle
1809 * @ibah: the AH to destroy
1811 * This may be called from interrupt context.
1813 static int ipath_destroy_ah(struct ib_ah
*ibah
)
1815 struct ipath_ibdev
*dev
= to_idev(ibah
->device
);
1816 struct ipath_ah
*ah
= to_iah(ibah
);
1817 unsigned long flags
;
1819 spin_lock_irqsave(&dev
->n_ahs_lock
, flags
);
1820 dev
->n_ahs_allocated
--;
1821 spin_unlock_irqrestore(&dev
->n_ahs_lock
, flags
);
1828 static int ipath_query_ah(struct ib_ah
*ibah
, struct ib_ah_attr
*ah_attr
)
1830 struct ipath_ah
*ah
= to_iah(ibah
);
1832 *ah_attr
= ah
->attr
;
1833 ah_attr
->static_rate
= ipath_mult_to_ib_rate(ah
->attr
.static_rate
);
1839 * ipath_get_npkeys - return the size of the PKEY table for port 0
1840 * @dd: the infinipath device
1842 unsigned ipath_get_npkeys(struct ipath_devdata
*dd
)
1844 return ARRAY_SIZE(dd
->ipath_pd
[0]->port_pkeys
);
1848 * ipath_get_pkey - return the indexed PKEY from the port 0 PKEY table
1849 * @dd: the infinipath device
1850 * @index: the PKEY index
1852 unsigned ipath_get_pkey(struct ipath_devdata
*dd
, unsigned index
)
1856 if (index
>= ARRAY_SIZE(dd
->ipath_pd
[0]->port_pkeys
))
1859 ret
= dd
->ipath_pd
[0]->port_pkeys
[index
];
1864 static int ipath_query_pkey(struct ib_device
*ibdev
, u8 port
, u16 index
,
1867 struct ipath_ibdev
*dev
= to_idev(ibdev
);
1870 if (index
>= ipath_get_npkeys(dev
->dd
)) {
1875 *pkey
= ipath_get_pkey(dev
->dd
, index
);
1883 * ipath_alloc_ucontext - allocate a ucontest
1884 * @ibdev: the infiniband device
1885 * @udata: not used by the InfiniPath driver
1888 static struct ib_ucontext
*ipath_alloc_ucontext(struct ib_device
*ibdev
,
1889 struct ib_udata
*udata
)
1891 struct ipath_ucontext
*context
;
1892 struct ib_ucontext
*ret
;
1894 context
= kmalloc(sizeof *context
, GFP_KERNEL
);
1896 ret
= ERR_PTR(-ENOMEM
);
1900 ret
= &context
->ibucontext
;
1906 static int ipath_dealloc_ucontext(struct ib_ucontext
*context
)
1908 kfree(to_iucontext(context
));
1912 static int ipath_verbs_register_sysfs(struct ib_device
*dev
);
1914 static void __verbs_timer(unsigned long arg
)
1916 struct ipath_devdata
*dd
= (struct ipath_devdata
*) arg
;
1918 /* Handle verbs layer timeouts. */
1919 ipath_ib_timer(dd
->verbs_dev
);
1921 mod_timer(&dd
->verbs_timer
, jiffies
+ 1);
1924 static int enable_timer(struct ipath_devdata
*dd
)
1927 * Early chips had a design flaw where the chip and kernel idea
1928 * of the tail register don't always agree, and therefore we won't
1929 * get an interrupt on the next packet received.
1930 * If the board supports per packet receive interrupts, use it.
1931 * Otherwise, the timer function periodically checks for packets
1932 * to cover this case.
1933 * Either way, the timer is needed for verbs layer related
1936 if (dd
->ipath_flags
& IPATH_GPIO_INTR
) {
1937 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_debugportselect
,
1938 0x2074076542310ULL
);
1939 /* Enable GPIO bit 2 interrupt */
1940 dd
->ipath_gpio_mask
|= (u64
) (1 << IPATH_GPIO_PORT0_BIT
);
1941 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_gpio_mask
,
1942 dd
->ipath_gpio_mask
);
1945 init_timer(&dd
->verbs_timer
);
1946 dd
->verbs_timer
.function
= __verbs_timer
;
1947 dd
->verbs_timer
.data
= (unsigned long)dd
;
1948 dd
->verbs_timer
.expires
= jiffies
+ 1;
1949 add_timer(&dd
->verbs_timer
);
1954 static int disable_timer(struct ipath_devdata
*dd
)
1956 /* Disable GPIO bit 2 interrupt */
1957 if (dd
->ipath_flags
& IPATH_GPIO_INTR
) {
1958 /* Disable GPIO bit 2 interrupt */
1959 dd
->ipath_gpio_mask
&= ~((u64
) (1 << IPATH_GPIO_PORT0_BIT
));
1960 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_gpio_mask
,
1961 dd
->ipath_gpio_mask
);
1963 * We might want to undo changes to debugportselect,
1968 del_timer_sync(&dd
->verbs_timer
);
1974 * ipath_register_ib_device - register our device with the infiniband core
1975 * @dd: the device data structure
1976 * Return the allocated ipath_ibdev pointer or NULL on error.
1978 int ipath_register_ib_device(struct ipath_devdata
*dd
)
1980 struct ipath_verbs_counters cntrs
;
1981 struct ipath_ibdev
*idev
;
1982 struct ib_device
*dev
;
1983 struct ipath_verbs_txreq
*tx
;
1987 idev
= (struct ipath_ibdev
*)ib_alloc_device(sizeof *idev
);
1995 if (dd
->ipath_sdma_descq_cnt
) {
1996 tx
= kmalloc(dd
->ipath_sdma_descq_cnt
* sizeof *tx
,
2004 idev
->txreq_bufs
= tx
;
2006 /* Only need to initialize non-zero fields. */
2007 spin_lock_init(&idev
->n_pds_lock
);
2008 spin_lock_init(&idev
->n_ahs_lock
);
2009 spin_lock_init(&idev
->n_cqs_lock
);
2010 spin_lock_init(&idev
->n_qps_lock
);
2011 spin_lock_init(&idev
->n_srqs_lock
);
2012 spin_lock_init(&idev
->n_mcast_grps_lock
);
2014 spin_lock_init(&idev
->qp_table
.lock
);
2015 spin_lock_init(&idev
->lk_table
.lock
);
2016 idev
->sm_lid
= __constant_be16_to_cpu(IB_LID_PERMISSIVE
);
2017 /* Set the prefix to the default value (see ch. 4.1.1) */
2018 idev
->gid_prefix
= __constant_cpu_to_be64(0xfe80000000000000ULL
);
2020 ret
= ipath_init_qp_table(idev
, ib_ipath_qp_table_size
);
2025 * The top ib_ipath_lkey_table_size bits are used to index the
2026 * table. The lower 8 bits can be owned by the user (copied from
2027 * the LKEY). The remaining bits act as a generation number or tag.
2029 idev
->lk_table
.max
= 1 << ib_ipath_lkey_table_size
;
2030 idev
->lk_table
.table
= kzalloc(idev
->lk_table
.max
*
2031 sizeof(*idev
->lk_table
.table
),
2033 if (idev
->lk_table
.table
== NULL
) {
2037 INIT_LIST_HEAD(&idev
->pending_mmaps
);
2038 spin_lock_init(&idev
->pending_lock
);
2039 idev
->mmap_offset
= PAGE_SIZE
;
2040 spin_lock_init(&idev
->mmap_offset_lock
);
2041 INIT_LIST_HEAD(&idev
->pending
[0]);
2042 INIT_LIST_HEAD(&idev
->pending
[1]);
2043 INIT_LIST_HEAD(&idev
->pending
[2]);
2044 INIT_LIST_HEAD(&idev
->piowait
);
2045 INIT_LIST_HEAD(&idev
->rnrwait
);
2046 INIT_LIST_HEAD(&idev
->txreq_free
);
2047 idev
->pending_index
= 0;
2048 idev
->port_cap_flags
=
2049 IB_PORT_SYS_IMAGE_GUID_SUP
| IB_PORT_CLIENT_REG_SUP
;
2050 if (dd
->ipath_flags
& IPATH_HAS_LINK_LATENCY
)
2051 idev
->port_cap_flags
|= IB_PORT_LINK_LATENCY_SUP
;
2052 idev
->pma_counter_select
[0] = IB_PMA_PORT_XMIT_DATA
;
2053 idev
->pma_counter_select
[1] = IB_PMA_PORT_RCV_DATA
;
2054 idev
->pma_counter_select
[2] = IB_PMA_PORT_XMIT_PKTS
;
2055 idev
->pma_counter_select
[3] = IB_PMA_PORT_RCV_PKTS
;
2056 idev
->pma_counter_select
[4] = IB_PMA_PORT_XMIT_WAIT
;
2058 /* Snapshot current HW counters to "clear" them. */
2059 ipath_get_counters(dd
, &cntrs
);
2060 idev
->z_symbol_error_counter
= cntrs
.symbol_error_counter
;
2061 idev
->z_link_error_recovery_counter
=
2062 cntrs
.link_error_recovery_counter
;
2063 idev
->z_link_downed_counter
= cntrs
.link_downed_counter
;
2064 idev
->z_port_rcv_errors
= cntrs
.port_rcv_errors
;
2065 idev
->z_port_rcv_remphys_errors
=
2066 cntrs
.port_rcv_remphys_errors
;
2067 idev
->z_port_xmit_discards
= cntrs
.port_xmit_discards
;
2068 idev
->z_port_xmit_data
= cntrs
.port_xmit_data
;
2069 idev
->z_port_rcv_data
= cntrs
.port_rcv_data
;
2070 idev
->z_port_xmit_packets
= cntrs
.port_xmit_packets
;
2071 idev
->z_port_rcv_packets
= cntrs
.port_rcv_packets
;
2072 idev
->z_local_link_integrity_errors
=
2073 cntrs
.local_link_integrity_errors
;
2074 idev
->z_excessive_buffer_overrun_errors
=
2075 cntrs
.excessive_buffer_overrun_errors
;
2076 idev
->z_vl15_dropped
= cntrs
.vl15_dropped
;
2078 for (i
= 0; i
< dd
->ipath_sdma_descq_cnt
; i
++, tx
++)
2079 list_add(&tx
->txreq
.list
, &idev
->txreq_free
);
2082 * The system image GUID is supposed to be the same for all
2083 * IB HCAs in a single system but since there can be other
2084 * device types in the system, we can't be sure this is unique.
2086 if (!sys_image_guid
)
2087 sys_image_guid
= dd
->ipath_guid
;
2088 idev
->sys_image_guid
= sys_image_guid
;
2089 idev
->ib_unit
= dd
->ipath_unit
;
2092 strlcpy(dev
->name
, "ipath%d", IB_DEVICE_NAME_MAX
);
2093 dev
->owner
= THIS_MODULE
;
2094 dev
->node_guid
= dd
->ipath_guid
;
2095 dev
->uverbs_abi_ver
= IPATH_UVERBS_ABI_VERSION
;
2096 dev
->uverbs_cmd_mask
=
2097 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT
) |
2098 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE
) |
2099 (1ull << IB_USER_VERBS_CMD_QUERY_PORT
) |
2100 (1ull << IB_USER_VERBS_CMD_ALLOC_PD
) |
2101 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD
) |
2102 (1ull << IB_USER_VERBS_CMD_CREATE_AH
) |
2103 (1ull << IB_USER_VERBS_CMD_DESTROY_AH
) |
2104 (1ull << IB_USER_VERBS_CMD_QUERY_AH
) |
2105 (1ull << IB_USER_VERBS_CMD_REG_MR
) |
2106 (1ull << IB_USER_VERBS_CMD_DEREG_MR
) |
2107 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL
) |
2108 (1ull << IB_USER_VERBS_CMD_CREATE_CQ
) |
2109 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ
) |
2110 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ
) |
2111 (1ull << IB_USER_VERBS_CMD_POLL_CQ
) |
2112 (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ
) |
2113 (1ull << IB_USER_VERBS_CMD_CREATE_QP
) |
2114 (1ull << IB_USER_VERBS_CMD_QUERY_QP
) |
2115 (1ull << IB_USER_VERBS_CMD_MODIFY_QP
) |
2116 (1ull << IB_USER_VERBS_CMD_DESTROY_QP
) |
2117 (1ull << IB_USER_VERBS_CMD_POST_SEND
) |
2118 (1ull << IB_USER_VERBS_CMD_POST_RECV
) |
2119 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST
) |
2120 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST
) |
2121 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ
) |
2122 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ
) |
2123 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ
) |
2124 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ
) |
2125 (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV
);
2126 dev
->node_type
= RDMA_NODE_IB_CA
;
2127 dev
->phys_port_cnt
= 1;
2128 dev
->num_comp_vectors
= 1;
2129 dev
->dma_device
= &dd
->pcidev
->dev
;
2130 dev
->query_device
= ipath_query_device
;
2131 dev
->modify_device
= ipath_modify_device
;
2132 dev
->query_port
= ipath_query_port
;
2133 dev
->modify_port
= ipath_modify_port
;
2134 dev
->query_pkey
= ipath_query_pkey
;
2135 dev
->query_gid
= ipath_query_gid
;
2136 dev
->alloc_ucontext
= ipath_alloc_ucontext
;
2137 dev
->dealloc_ucontext
= ipath_dealloc_ucontext
;
2138 dev
->alloc_pd
= ipath_alloc_pd
;
2139 dev
->dealloc_pd
= ipath_dealloc_pd
;
2140 dev
->create_ah
= ipath_create_ah
;
2141 dev
->destroy_ah
= ipath_destroy_ah
;
2142 dev
->query_ah
= ipath_query_ah
;
2143 dev
->create_srq
= ipath_create_srq
;
2144 dev
->modify_srq
= ipath_modify_srq
;
2145 dev
->query_srq
= ipath_query_srq
;
2146 dev
->destroy_srq
= ipath_destroy_srq
;
2147 dev
->create_qp
= ipath_create_qp
;
2148 dev
->modify_qp
= ipath_modify_qp
;
2149 dev
->query_qp
= ipath_query_qp
;
2150 dev
->destroy_qp
= ipath_destroy_qp
;
2151 dev
->post_send
= ipath_post_send
;
2152 dev
->post_recv
= ipath_post_receive
;
2153 dev
->post_srq_recv
= ipath_post_srq_receive
;
2154 dev
->create_cq
= ipath_create_cq
;
2155 dev
->destroy_cq
= ipath_destroy_cq
;
2156 dev
->resize_cq
= ipath_resize_cq
;
2157 dev
->poll_cq
= ipath_poll_cq
;
2158 dev
->req_notify_cq
= ipath_req_notify_cq
;
2159 dev
->get_dma_mr
= ipath_get_dma_mr
;
2160 dev
->reg_phys_mr
= ipath_reg_phys_mr
;
2161 dev
->reg_user_mr
= ipath_reg_user_mr
;
2162 dev
->dereg_mr
= ipath_dereg_mr
;
2163 dev
->alloc_fmr
= ipath_alloc_fmr
;
2164 dev
->map_phys_fmr
= ipath_map_phys_fmr
;
2165 dev
->unmap_fmr
= ipath_unmap_fmr
;
2166 dev
->dealloc_fmr
= ipath_dealloc_fmr
;
2167 dev
->attach_mcast
= ipath_multicast_attach
;
2168 dev
->detach_mcast
= ipath_multicast_detach
;
2169 dev
->process_mad
= ipath_process_mad
;
2170 dev
->mmap
= ipath_mmap
;
2171 dev
->dma_ops
= &ipath_dma_mapping_ops
;
2173 snprintf(dev
->node_desc
, sizeof(dev
->node_desc
),
2174 IPATH_IDSTR
" %s", init_utsname()->nodename
);
2176 ret
= ib_register_device(dev
);
2180 if (ipath_verbs_register_sysfs(dev
))
2188 ib_unregister_device(dev
);
2190 kfree(idev
->lk_table
.table
);
2192 kfree(idev
->qp_table
.table
);
2194 kfree(idev
->txreq_bufs
);
2196 ib_dealloc_device(dev
);
2197 ipath_dev_err(dd
, "cannot register verbs: %d!\n", -ret
);
2201 dd
->verbs_dev
= idev
;
2205 void ipath_unregister_ib_device(struct ipath_ibdev
*dev
)
2207 struct ib_device
*ibdev
= &dev
->ibdev
;
2210 ib_unregister_device(ibdev
);
2212 disable_timer(dev
->dd
);
2214 if (!list_empty(&dev
->pending
[0]) ||
2215 !list_empty(&dev
->pending
[1]) ||
2216 !list_empty(&dev
->pending
[2]))
2217 ipath_dev_err(dev
->dd
, "pending list not empty!\n");
2218 if (!list_empty(&dev
->piowait
))
2219 ipath_dev_err(dev
->dd
, "piowait list not empty!\n");
2220 if (!list_empty(&dev
->rnrwait
))
2221 ipath_dev_err(dev
->dd
, "rnrwait list not empty!\n");
2222 if (!ipath_mcast_tree_empty())
2223 ipath_dev_err(dev
->dd
, "multicast table memory leak!\n");
2225 * Note that ipath_unregister_ib_device() can be called before all
2226 * the QPs are destroyed!
2228 qps_inuse
= ipath_free_all_qps(&dev
->qp_table
);
2230 ipath_dev_err(dev
->dd
, "QP memory leak! %u still in use\n",
2232 kfree(dev
->qp_table
.table
);
2233 kfree(dev
->lk_table
.table
);
2234 kfree(dev
->txreq_bufs
);
2235 ib_dealloc_device(ibdev
);
2238 static ssize_t
show_rev(struct device
*device
, struct device_attribute
*attr
,
2241 struct ipath_ibdev
*dev
=
2242 container_of(device
, struct ipath_ibdev
, ibdev
.dev
);
2244 return sprintf(buf
, "%x\n", dev
->dd
->ipath_pcirev
);
2247 static ssize_t
show_hca(struct device
*device
, struct device_attribute
*attr
,
2250 struct ipath_ibdev
*dev
=
2251 container_of(device
, struct ipath_ibdev
, ibdev
.dev
);
2254 ret
= dev
->dd
->ipath_f_get_boardname(dev
->dd
, buf
, 128);
2264 static ssize_t
show_stats(struct device
*device
, struct device_attribute
*attr
,
2267 struct ipath_ibdev
*dev
=
2268 container_of(device
, struct ipath_ibdev
, ibdev
.dev
);
2286 dev
->n_rc_resends
, dev
->n_rc_qacks
, dev
->n_rc_acks
,
2287 dev
->n_seq_naks
, dev
->n_rdma_seq
, dev
->n_rnr_naks
,
2288 dev
->n_other_naks
, dev
->n_timeouts
,
2289 dev
->n_rdma_dup_busy
, dev
->n_piowait
, dev
->n_unaligned
,
2290 dev
->n_pkt_drops
, dev
->n_wqe_errs
);
2291 for (i
= 0; i
< ARRAY_SIZE(dev
->opstats
); i
++) {
2292 const struct ipath_opcode_stats
*si
= &dev
->opstats
[i
];
2294 if (!si
->n_packets
&& !si
->n_bytes
)
2296 len
+= sprintf(buf
+ len
, "%02x %llu/%llu\n", i
,
2297 (unsigned long long) si
->n_packets
,
2298 (unsigned long long) si
->n_bytes
);
2303 static DEVICE_ATTR(hw_rev
, S_IRUGO
, show_rev
, NULL
);
2304 static DEVICE_ATTR(hca_type
, S_IRUGO
, show_hca
, NULL
);
2305 static DEVICE_ATTR(board_id
, S_IRUGO
, show_hca
, NULL
);
2306 static DEVICE_ATTR(stats
, S_IRUGO
, show_stats
, NULL
);
2308 static struct device_attribute
*ipath_class_attributes
[] = {
2315 static int ipath_verbs_register_sysfs(struct ib_device
*dev
)
2320 for (i
= 0; i
< ARRAY_SIZE(ipath_class_attributes
); ++i
)
2321 if (device_create_file(&dev
->dev
,
2322 ipath_class_attributes
[i
])) {