2 * Copyright (c) 2006 QLogic, Inc. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <rdma/ib_smi.h>
36 #include "ipath_verbs.h"
37 #include "ipath_kernel.h"
39 static int init_sge(struct ipath_qp
*qp
, struct ipath_rwqe
*wqe
,
40 u32
*lengthp
, struct ipath_sge_state
*ss
)
42 struct ipath_ibdev
*dev
= to_idev(qp
->ibqp
.device
);
43 int user
= to_ipd(qp
->ibqp
.pd
)->user
;
48 for (i
= j
= 0; i
< wqe
->num_sge
; i
++) {
49 if (wqe
->sg_list
[i
].length
== 0)
52 if ((user
&& wqe
->sg_list
[i
].lkey
== 0) ||
53 !ipath_lkey_ok(&dev
->lk_table
,
54 j
? &ss
->sg_list
[j
- 1] : &ss
->sge
,
55 &wqe
->sg_list
[i
], IB_ACCESS_LOCAL_WRITE
))
57 *lengthp
+= wqe
->sg_list
[i
].length
;
65 wc
.wr_id
= wqe
->wr_id
;
66 wc
.status
= IB_WC_LOC_PROT_ERR
;
67 wc
.opcode
= IB_WC_RECV
;
71 wc
.qp_num
= qp
->ibqp
.qp_num
;
77 wc
.dlid_path_bits
= 0;
79 /* Signal solicited completion event. */
80 ipath_cq_enter(to_icq(qp
->ibqp
.recv_cq
), &wc
, 1);
87 * ipath_ud_loopback - handle send on loopback QPs
90 * @length: the length of the data to send
91 * @wr: the work request
92 * @wc: the work completion entry
94 * This is called from ipath_post_ud_send() to forward a WQE addressed
96 * Note that the receive interrupt handler may be calling ipath_ud_rcv()
97 * while this is being called.
99 static void ipath_ud_loopback(struct ipath_qp
*sqp
,
100 struct ipath_sge_state
*ss
,
101 u32 length
, struct ib_send_wr
*wr
,
104 struct ipath_ibdev
*dev
= to_idev(sqp
->ibqp
.device
);
106 struct ib_ah_attr
*ah_attr
;
109 struct ipath_srq
*srq
;
110 struct ipath_sge_state rsge
;
111 struct ipath_sge
*sge
;
112 struct ipath_rwq
*wq
;
113 struct ipath_rwqe
*wqe
;
114 void (*handler
)(struct ib_event
*, void *);
118 qp
= ipath_lookup_qpn(&dev
->qp_table
, wr
->wr
.ud
.remote_qpn
);
123 * Check that the qkey matches (except for QP0, see 9.6.1.4.1).
124 * Qkeys with the high order bit set mean use the
125 * qkey from the QP context instead of the WR (see 10.2.5).
127 if (unlikely(qp
->ibqp
.qp_num
&&
128 ((int) wr
->wr
.ud
.remote_qkey
< 0
129 ? qp
->qkey
: wr
->wr
.ud
.remote_qkey
) != qp
->qkey
)) {
130 /* XXX OK to lose a count once in a while. */
131 dev
->qkey_violations
++;
137 * A GRH is expected to preceed the data even if not
138 * present on the wire.
140 wc
->byte_len
= length
+ sizeof(struct ib_grh
);
142 if (wr
->opcode
== IB_WR_SEND_WITH_IMM
) {
143 wc
->wc_flags
= IB_WC_WITH_IMM
;
144 wc
->imm_data
= wr
->imm_data
;
150 if (wr
->num_sge
> 1) {
151 rsge
.sg_list
= kmalloc((wr
->num_sge
- 1) *
152 sizeof(struct ipath_sge
),
158 * Get the next work request entry to find where to put the data.
159 * Note that it is safe to drop the lock after changing rq->tail
160 * since ipath_post_receive() won't fill the empty slot.
163 srq
= to_isrq(qp
->ibqp
.srq
);
164 handler
= srq
->ibsrq
.event_handler
;
172 spin_lock_irqsave(&rq
->lock
, flags
);
176 if (unlikely(tail
== wq
->head
)) {
177 spin_unlock_irqrestore(&rq
->lock
, flags
);
181 wqe
= get_rwqe_ptr(rq
, tail
);
182 if (++tail
>= rq
->size
)
184 if (init_sge(qp
, wqe
, &rlen
, &rsge
))
188 /* Silently drop packets which are too big. */
189 if (wc
->byte_len
> rlen
) {
190 spin_unlock_irqrestore(&rq
->lock
, flags
);
195 wc
->wr_id
= wqe
->wr_id
;
200 * validate head pointer value and compute
201 * the number of remaining WQEs.
207 n
+= rq
->size
- tail
;
210 if (n
< srq
->limit
) {
214 spin_unlock_irqrestore(&rq
->lock
, flags
);
215 ev
.device
= qp
->ibqp
.device
;
216 ev
.element
.srq
= qp
->ibqp
.srq
;
217 ev
.event
= IB_EVENT_SRQ_LIMIT_REACHED
;
218 handler(&ev
, srq
->ibsrq
.srq_context
);
220 spin_unlock_irqrestore(&rq
->lock
, flags
);
222 spin_unlock_irqrestore(&rq
->lock
, flags
);
224 ah_attr
= &to_iah(wr
->wr
.ud
.ah
)->attr
;
225 if (ah_attr
->ah_flags
& IB_AH_GRH
) {
226 ipath_copy_sge(&rsge
, &ah_attr
->grh
, sizeof(struct ib_grh
));
227 wc
->wc_flags
|= IB_WC_GRH
;
229 ipath_skip_sge(&rsge
, sizeof(struct ib_grh
));
232 u32 len
= sge
->length
;
237 ipath_copy_sge(&rsge
, sge
->vaddr
, len
);
240 sge
->sge_length
-= len
;
241 if (sge
->sge_length
== 0) {
243 *sge
= *ss
->sg_list
++;
244 } else if (sge
->length
== 0 && sge
->mr
!= NULL
) {
245 if (++sge
->n
>= IPATH_SEGSZ
) {
246 if (++sge
->m
>= sge
->mr
->mapsz
)
251 sge
->mr
->map
[sge
->m
]->segs
[sge
->n
].vaddr
;
253 sge
->mr
->map
[sge
->m
]->segs
[sge
->n
].length
;
257 wc
->status
= IB_WC_SUCCESS
;
258 wc
->opcode
= IB_WC_RECV
;
260 wc
->qp_num
= qp
->ibqp
.qp_num
;
261 wc
->src_qp
= sqp
->ibqp
.qp_num
;
262 /* XXX do we know which pkey matched? Only needed for GSI. */
264 wc
->slid
= dev
->dd
->ipath_lid
|
265 (ah_attr
->src_path_bits
&
266 ((1 << (dev
->mkeyprot_resv_lmc
& 7)) - 1));
267 wc
->sl
= ah_attr
->sl
;
269 ah_attr
->dlid
& ((1 << (dev
->mkeyprot_resv_lmc
& 7)) - 1);
270 /* Signal completion event if the solicited bit is set. */
271 ipath_cq_enter(to_icq(qp
->ibqp
.recv_cq
), wc
,
272 wr
->send_flags
& IB_SEND_SOLICITED
);
277 if (atomic_dec_and_test(&qp
->refcount
))
282 * ipath_post_ud_send - post a UD send on QP
284 * @wr: the work request
286 * Note that we actually send the data as it is posted instead of putting
287 * the request into a ring buffer. If we wanted to use a ring buffer,
288 * we would need to save a reference to the destination address in the SWQE.
290 int ipath_post_ud_send(struct ipath_qp
*qp
, struct ib_send_wr
*wr
)
292 struct ipath_ibdev
*dev
= to_idev(qp
->ibqp
.device
);
293 struct ipath_other_headers
*ohdr
;
294 struct ib_ah_attr
*ah_attr
;
295 struct ipath_sge_state ss
;
296 struct ipath_sge
*sg_list
;
308 if (!(ib_ipath_state_ops
[qp
->state
] & IPATH_PROCESS_SEND_OK
)) {
313 /* IB spec says that num_sge == 0 is OK. */
314 if (wr
->num_sge
> qp
->s_max_sge
) {
319 if (wr
->num_sge
> 1) {
320 sg_list
= kmalloc((qp
->s_max_sge
- 1) * sizeof(*sg_list
),
329 /* Check the buffer to send. */
330 ss
.sg_list
= sg_list
;
334 ss
.sge
.sge_length
= 0;
337 for (i
= 0; i
< wr
->num_sge
; i
++) {
339 if (to_ipd(qp
->ibqp
.pd
)->user
&& wr
->sg_list
[i
].lkey
== 0) {
344 if (wr
->sg_list
[i
].length
== 0)
346 if (!ipath_lkey_ok(&dev
->lk_table
, ss
.num_sge
?
347 sg_list
+ ss
.num_sge
- 1 : &ss
.sge
,
348 &wr
->sg_list
[i
], 0)) {
352 len
+= wr
->sg_list
[i
].length
;
355 /* Check for invalid packet size. */
356 if (len
> dev
->dd
->ipath_ibmtu
) {
360 extra_bytes
= (4 - len
) & 3;
361 nwords
= (len
+ extra_bytes
) >> 2;
363 /* Construct the header. */
364 ah_attr
= &to_iah(wr
->wr
.ud
.ah
)->attr
;
365 if (ah_attr
->dlid
== 0) {
369 if (ah_attr
->dlid
>= IPATH_MULTICAST_LID_BASE
) {
370 if (ah_attr
->dlid
!= IPATH_PERMISSIVE_LID
)
371 dev
->n_multicast_xmit
++;
373 dev
->n_unicast_xmit
++;
375 dev
->n_unicast_xmit
++;
376 lid
= ah_attr
->dlid
&
377 ~((1 << (dev
->mkeyprot_resv_lmc
& 7)) - 1);
378 if (unlikely(lid
== dev
->dd
->ipath_lid
)) {
380 * Pass in an uninitialized ib_wc to save stack
383 ipath_ud_loopback(qp
, &ss
, len
, wr
, &wc
);
387 if (ah_attr
->ah_flags
& IB_AH_GRH
) {
388 /* Header size in 32-bit words. */
390 lrh0
= IPATH_LRH_GRH
;
391 ohdr
= &qp
->s_hdr
.u
.l
.oth
;
392 qp
->s_hdr
.u
.l
.grh
.version_tclass_flow
=
393 cpu_to_be32((6 << 28) |
394 (ah_attr
->grh
.traffic_class
<< 20) |
395 ah_attr
->grh
.flow_label
);
396 qp
->s_hdr
.u
.l
.grh
.paylen
=
397 cpu_to_be16(((wr
->opcode
==
398 IB_WR_SEND_WITH_IMM
? 6 : 5) +
399 nwords
+ SIZE_OF_CRC
) << 2);
400 /* next_hdr is defined by C8-7 in ch. 8.4.1 */
401 qp
->s_hdr
.u
.l
.grh
.next_hdr
= 0x1B;
402 qp
->s_hdr
.u
.l
.grh
.hop_limit
= ah_attr
->grh
.hop_limit
;
403 /* The SGID is 32-bit aligned. */
404 qp
->s_hdr
.u
.l
.grh
.sgid
.global
.subnet_prefix
=
406 qp
->s_hdr
.u
.l
.grh
.sgid
.global
.interface_id
=
408 qp
->s_hdr
.u
.l
.grh
.dgid
= ah_attr
->grh
.dgid
;
410 * Don't worry about sending to locally attached multicast
411 * QPs. It is unspecified by the spec. what happens.
414 /* Header size in 32-bit words. */
416 lrh0
= IPATH_LRH_BTH
;
417 ohdr
= &qp
->s_hdr
.u
.oth
;
419 if (wr
->opcode
== IB_WR_SEND_WITH_IMM
) {
420 ohdr
->u
.ud
.imm_data
= wr
->imm_data
;
421 wc
.imm_data
= wr
->imm_data
;
423 bth0
= IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE
<< 24;
424 } else if (wr
->opcode
== IB_WR_SEND
) {
426 bth0
= IB_OPCODE_UD_SEND_ONLY
<< 24;
431 lrh0
|= ah_attr
->sl
<< 4;
432 if (qp
->ibqp
.qp_type
== IB_QPT_SMI
)
433 lrh0
|= 0xF000; /* Set VL (see ch. 13.5.3.1) */
434 qp
->s_hdr
.lrh
[0] = cpu_to_be16(lrh0
);
435 qp
->s_hdr
.lrh
[1] = cpu_to_be16(ah_attr
->dlid
); /* DEST LID */
436 qp
->s_hdr
.lrh
[2] = cpu_to_be16(hwords
+ nwords
+ SIZE_OF_CRC
);
437 lid
= dev
->dd
->ipath_lid
;
439 lid
|= ah_attr
->src_path_bits
&
440 ((1 << (dev
->mkeyprot_resv_lmc
& 7)) - 1);
441 qp
->s_hdr
.lrh
[3] = cpu_to_be16(lid
);
443 qp
->s_hdr
.lrh
[3] = IB_LID_PERMISSIVE
;
444 if (wr
->send_flags
& IB_SEND_SOLICITED
)
446 bth0
|= extra_bytes
<< 20;
447 bth0
|= qp
->ibqp
.qp_type
== IB_QPT_SMI
? IPATH_DEFAULT_P_KEY
:
448 ipath_get_pkey(dev
->dd
, qp
->s_pkey_index
);
449 ohdr
->bth
[0] = cpu_to_be32(bth0
);
451 * Use the multicast QP if the destination LID is a multicast LID.
453 ohdr
->bth
[1] = ah_attr
->dlid
>= IPATH_MULTICAST_LID_BASE
&&
454 ah_attr
->dlid
!= IPATH_PERMISSIVE_LID
?
455 __constant_cpu_to_be32(IPATH_MULTICAST_QPN
) :
456 cpu_to_be32(wr
->wr
.ud
.remote_qpn
);
457 /* XXX Could lose a PSN count but not worth locking */
458 ohdr
->bth
[2] = cpu_to_be32(qp
->s_next_psn
++ & IPATH_PSN_MASK
);
460 * Qkeys with the high order bit set mean use the
461 * qkey from the QP context instead of the WR (see 10.2.5).
463 ohdr
->u
.ud
.deth
[0] = cpu_to_be32((int)wr
->wr
.ud
.remote_qkey
< 0 ?
464 qp
->qkey
: wr
->wr
.ud
.remote_qkey
);
465 ohdr
->u
.ud
.deth
[1] = cpu_to_be32(qp
->ibqp
.qp_num
);
466 if (ipath_verbs_send(dev
->dd
, hwords
, (u32
*) &qp
->s_hdr
,
471 /* Queue the completion status entry. */
472 if (!test_bit(IPATH_S_SIGNAL_REQ_WR
, &qp
->s_flags
) ||
473 (wr
->send_flags
& IB_SEND_SIGNALED
)) {
474 wc
.wr_id
= wr
->wr_id
;
475 wc
.status
= IB_WC_SUCCESS
;
477 wc
.opcode
= IB_WC_SEND
;
479 wc
.qp_num
= qp
->ibqp
.qp_num
;
482 /* XXX initialize other fields? */
483 ipath_cq_enter(to_icq(qp
->ibqp
.send_cq
), &wc
, 0);
494 * ipath_ud_rcv - receive an incoming UD packet
495 * @dev: the device the packet came in on
496 * @hdr: the packet header
497 * @has_grh: true if the packet has a GRH
498 * @data: the packet data
499 * @tlen: the packet length
500 * @qp: the QP the packet came on
502 * This is called from ipath_qp_rcv() to process an incoming UD packet
504 * Called at interrupt level.
506 void ipath_ud_rcv(struct ipath_ibdev
*dev
, struct ipath_ib_header
*hdr
,
507 int has_grh
, void *data
, u32 tlen
, struct ipath_qp
*qp
)
509 struct ipath_other_headers
*ohdr
;
522 hdrsize
= 8 + 12 + 8; /* LRH + BTH + DETH */
523 qkey
= be32_to_cpu(ohdr
->u
.ud
.deth
[0]);
524 src_qp
= be32_to_cpu(ohdr
->u
.ud
.deth
[1]);
527 ohdr
= &hdr
->u
.l
.oth
;
528 hdrsize
= 8 + 40 + 12 + 8; /* LRH + GRH + BTH + DETH */
530 * The header with GRH is 68 bytes and the core driver sets
531 * the eager header buffer size to 56 bytes so the last 12
532 * bytes of the IB header is in the data buffer.
534 header_in_data
= dev
->dd
->ipath_rcvhdrentsize
== 16;
535 if (header_in_data
) {
536 qkey
= be32_to_cpu(((__be32
*) data
)[1]);
537 src_qp
= be32_to_cpu(((__be32
*) data
)[2]);
540 qkey
= be32_to_cpu(ohdr
->u
.ud
.deth
[0]);
541 src_qp
= be32_to_cpu(ohdr
->u
.ud
.deth
[1]);
544 src_qp
&= IPATH_QPN_MASK
;
547 * Check that the permissive LID is only used on QP0
548 * and the QKEY matches (see 9.6.1.4.1 and 9.6.1.5.1).
550 if (qp
->ibqp
.qp_num
) {
551 if (unlikely(hdr
->lrh
[1] == IB_LID_PERMISSIVE
||
552 hdr
->lrh
[3] == IB_LID_PERMISSIVE
)) {
556 if (unlikely(qkey
!= qp
->qkey
)) {
557 /* XXX OK to lose a count once in a while. */
558 dev
->qkey_violations
++;
562 } else if (hdr
->lrh
[1] == IB_LID_PERMISSIVE
||
563 hdr
->lrh
[3] == IB_LID_PERMISSIVE
) {
564 struct ib_smp
*smp
= (struct ib_smp
*) data
;
566 if (smp
->mgmt_class
!= IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
) {
572 /* Get the number of bytes the message was padded by. */
573 pad
= (be32_to_cpu(ohdr
->bth
[0]) >> 20) & 3;
574 if (unlikely(tlen
< (hdrsize
+ pad
+ 4))) {
575 /* Drop incomplete packets. */
579 tlen
-= hdrsize
+ pad
+ 4;
581 /* Drop invalid MAD packets (see 13.5.3.1). */
582 if (unlikely((qp
->ibqp
.qp_num
== 0 &&
584 (be16_to_cpu(hdr
->lrh
[0]) >> 12) != 15)) ||
585 (qp
->ibqp
.qp_num
== 1 &&
587 (be16_to_cpu(hdr
->lrh
[0]) >> 12) == 15)))) {
593 * A GRH is expected to preceed the data even if not
594 * present on the wire.
596 wc
.byte_len
= tlen
+ sizeof(struct ib_grh
);
599 * The opcode is in the low byte when its in network order
600 * (top byte when in host order).
602 opcode
= be32_to_cpu(ohdr
->bth
[0]) >> 24;
603 if (qp
->ibqp
.qp_num
> 1 &&
604 opcode
== IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE
) {
605 if (header_in_data
) {
606 wc
.imm_data
= *(__be32
*) data
;
607 data
+= sizeof(__be32
);
609 wc
.imm_data
= ohdr
->u
.ud
.imm_data
;
610 wc
.wc_flags
= IB_WC_WITH_IMM
;
611 hdrsize
+= sizeof(u32
);
612 } else if (opcode
== IB_OPCODE_UD_SEND_ONLY
) {
621 * Get the next work request entry to find where to put the data.
625 else if (!ipath_get_rwqe(qp
, 0)) {
627 * Count VL15 packets dropped due to no receive buffer.
628 * Otherwise, count them as buffer overruns since usually,
629 * the HW will be able to receive packets even if there are
630 * no QPs with posted receive buffers.
632 if (qp
->ibqp
.qp_num
== 0)
633 dev
->n_vl15_dropped
++;
638 /* Silently drop packets which are too big. */
639 if (wc
.byte_len
> qp
->r_len
) {
645 ipath_copy_sge(&qp
->r_sge
, &hdr
->u
.l
.grh
,
646 sizeof(struct ib_grh
));
647 wc
.wc_flags
|= IB_WC_GRH
;
649 ipath_skip_sge(&qp
->r_sge
, sizeof(struct ib_grh
));
650 ipath_copy_sge(&qp
->r_sge
, data
,
651 wc
.byte_len
- sizeof(struct ib_grh
));
652 wc
.wr_id
= qp
->r_wr_id
;
653 wc
.status
= IB_WC_SUCCESS
;
654 wc
.opcode
= IB_WC_RECV
;
656 wc
.qp_num
= qp
->ibqp
.qp_num
;
658 /* XXX do we know which pkey matched? Only needed for GSI. */
660 wc
.slid
= be16_to_cpu(hdr
->lrh
[3]);
661 wc
.sl
= (be16_to_cpu(hdr
->lrh
[0]) >> 4) & 0xF;
662 dlid
= be16_to_cpu(hdr
->lrh
[1]);
664 * Save the LMC lower bits if the destination LID is a unicast LID.
666 wc
.dlid_path_bits
= dlid
>= IPATH_MULTICAST_LID_BASE
? 0 :
667 dlid
& ((1 << (dev
->mkeyprot_resv_lmc
& 7)) - 1);
668 /* Signal completion event if the solicited bit is set. */
669 ipath_cq_enter(to_icq(qp
->ibqp
.recv_cq
), &wc
,
671 __constant_cpu_to_be32(1 << 23)) != 0);