2 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <rdma/ib_smi.h>
35 #include "ipath_verbs.h"
36 #include "ips_common.h"
39 * ipath_ud_loopback - handle send on loopback QPs
42 * @length: the length of the data to send
43 * @wr: the work request
44 * @wc: the work completion entry
46 * This is called from ipath_post_ud_send() to forward a WQE addressed
49 static void ipath_ud_loopback(struct ipath_qp
*sqp
, struct ipath_sge_state
*ss
,
50 u32 length
, struct ib_send_wr
*wr
, struct ib_wc
*wc
)
52 struct ipath_ibdev
*dev
= to_idev(sqp
->ibqp
.device
);
54 struct ib_ah_attr
*ah_attr
;
57 struct ipath_srq
*srq
;
58 struct ipath_sge_state rsge
;
59 struct ipath_sge
*sge
;
60 struct ipath_rwqe
*wqe
;
62 qp
= ipath_lookup_qpn(&dev
->qp_table
, wr
->wr
.ud
.remote_qpn
);
67 * Check that the qkey matches (except for QP0, see 9.6.1.4.1).
68 * Qkeys with the high order bit set mean use the
69 * qkey from the QP context instead of the WR (see 10.2.5).
71 if (unlikely(qp
->ibqp
.qp_num
&&
72 ((int) wr
->wr
.ud
.remote_qkey
< 0
73 ? qp
->qkey
: wr
->wr
.ud
.remote_qkey
) != qp
->qkey
)) {
74 /* XXX OK to lose a count once in a while. */
75 dev
->qkey_violations
++;
81 * A GRH is expected to preceed the data even if not
82 * present on the wire.
84 wc
->byte_len
= length
+ sizeof(struct ib_grh
);
86 if (wr
->opcode
== IB_WR_SEND_WITH_IMM
) {
87 wc
->wc_flags
= IB_WC_WITH_IMM
;
88 wc
->imm_data
= wr
->imm_data
;
95 * Get the next work request entry to find where to put the data.
96 * Note that it is safe to drop the lock after changing rq->tail
97 * since ipath_post_receive() won't fill the empty slot.
100 srq
= to_isrq(qp
->ibqp
.srq
);
106 spin_lock_irqsave(&rq
->lock
, flags
);
107 if (rq
->tail
== rq
->head
) {
108 spin_unlock_irqrestore(&rq
->lock
, flags
);
112 /* Silently drop packets which are too big. */
113 wqe
= get_rwqe_ptr(rq
, rq
->tail
);
114 if (wc
->byte_len
> wqe
->length
) {
115 spin_unlock_irqrestore(&rq
->lock
, flags
);
119 wc
->wr_id
= wqe
->wr_id
;
120 rsge
.sge
= wqe
->sg_list
[0];
121 rsge
.sg_list
= wqe
->sg_list
+ 1;
122 rsge
.num_sge
= wqe
->num_sge
;
123 if (++rq
->tail
>= rq
->size
)
125 if (srq
&& srq
->ibsrq
.event_handler
) {
128 if (rq
->head
< rq
->tail
)
129 n
= rq
->size
+ rq
->head
- rq
->tail
;
131 n
= rq
->head
- rq
->tail
;
132 if (n
< srq
->limit
) {
136 spin_unlock_irqrestore(&rq
->lock
, flags
);
137 ev
.device
= qp
->ibqp
.device
;
138 ev
.element
.srq
= qp
->ibqp
.srq
;
139 ev
.event
= IB_EVENT_SRQ_LIMIT_REACHED
;
140 srq
->ibsrq
.event_handler(&ev
,
141 srq
->ibsrq
.srq_context
);
143 spin_unlock_irqrestore(&rq
->lock
, flags
);
145 spin_unlock_irqrestore(&rq
->lock
, flags
);
146 ah_attr
= &to_iah(wr
->wr
.ud
.ah
)->attr
;
147 if (ah_attr
->ah_flags
& IB_AH_GRH
) {
148 ipath_copy_sge(&rsge
, &ah_attr
->grh
, sizeof(struct ib_grh
));
149 wc
->wc_flags
|= IB_WC_GRH
;
151 ipath_skip_sge(&rsge
, sizeof(struct ib_grh
));
154 u32 len
= sge
->length
;
159 ipath_copy_sge(&rsge
, sge
->vaddr
, len
);
162 sge
->sge_length
-= len
;
163 if (sge
->sge_length
== 0) {
165 *sge
= *ss
->sg_list
++;
166 } else if (sge
->length
== 0 && sge
->mr
!= NULL
) {
167 if (++sge
->n
>= IPATH_SEGSZ
) {
168 if (++sge
->m
>= sge
->mr
->mapsz
)
173 sge
->mr
->map
[sge
->m
]->segs
[sge
->n
].vaddr
;
175 sge
->mr
->map
[sge
->m
]->segs
[sge
->n
].length
;
179 wc
->status
= IB_WC_SUCCESS
;
180 wc
->opcode
= IB_WC_RECV
;
182 wc
->qp_num
= qp
->ibqp
.qp_num
;
183 wc
->src_qp
= sqp
->ibqp
.qp_num
;
184 /* XXX do we know which pkey matched? Only needed for GSI. */
186 wc
->slid
= ipath_layer_get_lid(dev
->dd
) |
187 (ah_attr
->src_path_bits
&
188 ((1 << (dev
->mkeyprot_resv_lmc
& 7)) - 1));
189 wc
->sl
= ah_attr
->sl
;
191 ah_attr
->dlid
& ((1 << (dev
->mkeyprot_resv_lmc
& 7)) - 1);
192 /* Signal completion event if the solicited bit is set. */
193 ipath_cq_enter(to_icq(qp
->ibqp
.recv_cq
), wc
,
194 wr
->send_flags
& IB_SEND_SOLICITED
);
197 if (atomic_dec_and_test(&qp
->refcount
))
202 * ipath_post_ud_send - post a UD send on QP
204 * @wr: the work request
206 * Note that we actually send the data as it is posted instead of putting
207 * the request into a ring buffer. If we wanted to use a ring buffer,
208 * we would need to save a reference to the destination address in the SWQE.
210 int ipath_post_ud_send(struct ipath_qp
*qp
, struct ib_send_wr
*wr
)
212 struct ipath_ibdev
*dev
= to_idev(qp
->ibqp
.device
);
213 struct ipath_other_headers
*ohdr
;
214 struct ib_ah_attr
*ah_attr
;
215 struct ipath_sge_state ss
;
216 struct ipath_sge
*sg_list
;
228 if (!(ib_ipath_state_ops
[qp
->state
] & IPATH_PROCESS_SEND_OK
)) {
233 /* IB spec says that num_sge == 0 is OK. */
234 if (wr
->num_sge
> qp
->s_max_sge
) {
239 if (wr
->num_sge
> 1) {
240 sg_list
= kmalloc((qp
->s_max_sge
- 1) * sizeof(*sg_list
),
249 /* Check the buffer to send. */
250 ss
.sg_list
= sg_list
;
254 ss
.sge
.sge_length
= 0;
257 for (i
= 0; i
< wr
->num_sge
; i
++) {
259 if (to_ipd(qp
->ibqp
.pd
)->user
&& wr
->sg_list
[i
].lkey
== 0) {
264 if (wr
->sg_list
[i
].length
== 0)
266 if (!ipath_lkey_ok(&dev
->lk_table
, ss
.num_sge
?
267 sg_list
+ ss
.num_sge
- 1 : &ss
.sge
,
268 &wr
->sg_list
[i
], 0)) {
272 len
+= wr
->sg_list
[i
].length
;
275 extra_bytes
= (4 - len
) & 3;
276 nwords
= (len
+ extra_bytes
) >> 2;
278 /* Construct the header. */
279 ah_attr
= &to_iah(wr
->wr
.ud
.ah
)->attr
;
280 if (ah_attr
->dlid
== 0) {
284 if (ah_attr
->dlid
>= IPS_MULTICAST_LID_BASE
) {
285 if (ah_attr
->dlid
!= IPS_PERMISSIVE_LID
)
286 dev
->n_multicast_xmit
++;
288 dev
->n_unicast_xmit
++;
290 dev
->n_unicast_xmit
++;
291 lid
= ah_attr
->dlid
&
292 ~((1 << (dev
->mkeyprot_resv_lmc
& 7)) - 1);
293 if (unlikely(lid
== ipath_layer_get_lid(dev
->dd
))) {
295 * Pass in an uninitialized ib_wc to save stack
298 ipath_ud_loopback(qp
, &ss
, len
, wr
, &wc
);
302 if (ah_attr
->ah_flags
& IB_AH_GRH
) {
303 /* Header size in 32-bit words. */
306 ohdr
= &qp
->s_hdr
.u
.l
.oth
;
307 qp
->s_hdr
.u
.l
.grh
.version_tclass_flow
=
308 cpu_to_be32((6 << 28) |
309 (ah_attr
->grh
.traffic_class
<< 20) |
310 ah_attr
->grh
.flow_label
);
311 qp
->s_hdr
.u
.l
.grh
.paylen
=
312 cpu_to_be16(((wr
->opcode
==
313 IB_WR_SEND_WITH_IMM
? 6 : 5) +
314 nwords
+ SIZE_OF_CRC
) << 2);
315 /* next_hdr is defined by C8-7 in ch. 8.4.1 */
316 qp
->s_hdr
.u
.l
.grh
.next_hdr
= 0x1B;
317 qp
->s_hdr
.u
.l
.grh
.hop_limit
= ah_attr
->grh
.hop_limit
;
318 /* The SGID is 32-bit aligned. */
319 qp
->s_hdr
.u
.l
.grh
.sgid
.global
.subnet_prefix
=
321 qp
->s_hdr
.u
.l
.grh
.sgid
.global
.interface_id
=
322 ipath_layer_get_guid(dev
->dd
);
323 qp
->s_hdr
.u
.l
.grh
.dgid
= ah_attr
->grh
.dgid
;
325 * Don't worry about sending to locally attached multicast
326 * QPs. It is unspecified by the spec. what happens.
329 /* Header size in 32-bit words. */
332 ohdr
= &qp
->s_hdr
.u
.oth
;
334 if (wr
->opcode
== IB_WR_SEND_WITH_IMM
) {
335 ohdr
->u
.ud
.imm_data
= wr
->imm_data
;
336 wc
.imm_data
= wr
->imm_data
;
338 bth0
= IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE
<< 24;
339 } else if (wr
->opcode
== IB_WR_SEND
) {
341 bth0
= IB_OPCODE_UD_SEND_ONLY
<< 24;
346 lrh0
|= ah_attr
->sl
<< 4;
347 if (qp
->ibqp
.qp_type
== IB_QPT_SMI
)
348 lrh0
|= 0xF000; /* Set VL (see ch. 13.5.3.1) */
349 qp
->s_hdr
.lrh
[0] = cpu_to_be16(lrh0
);
350 qp
->s_hdr
.lrh
[1] = cpu_to_be16(ah_attr
->dlid
); /* DEST LID */
351 qp
->s_hdr
.lrh
[2] = cpu_to_be16(hwords
+ nwords
+ SIZE_OF_CRC
);
352 lid
= ipath_layer_get_lid(dev
->dd
);
354 lid
|= ah_attr
->src_path_bits
&
355 ((1 << (dev
->mkeyprot_resv_lmc
& 7)) - 1);
356 qp
->s_hdr
.lrh
[3] = cpu_to_be16(lid
);
358 qp
->s_hdr
.lrh
[3] = IB_LID_PERMISSIVE
;
359 if (wr
->send_flags
& IB_SEND_SOLICITED
)
361 bth0
|= extra_bytes
<< 20;
362 bth0
|= qp
->ibqp
.qp_type
== IB_QPT_SMI
? IPS_DEFAULT_P_KEY
:
363 ipath_layer_get_pkey(dev
->dd
, qp
->s_pkey_index
);
364 ohdr
->bth
[0] = cpu_to_be32(bth0
);
366 * Use the multicast QP if the destination LID is a multicast LID.
368 ohdr
->bth
[1] = ah_attr
->dlid
>= IPS_MULTICAST_LID_BASE
&&
369 ah_attr
->dlid
!= IPS_PERMISSIVE_LID
?
370 __constant_cpu_to_be32(IPS_MULTICAST_QPN
) :
371 cpu_to_be32(wr
->wr
.ud
.remote_qpn
);
372 /* XXX Could lose a PSN count but not worth locking */
373 ohdr
->bth
[2] = cpu_to_be32(qp
->s_next_psn
++ & IPS_PSN_MASK
);
375 * Qkeys with the high order bit set mean use the
376 * qkey from the QP context instead of the WR (see 10.2.5).
378 ohdr
->u
.ud
.deth
[0] = cpu_to_be32((int)wr
->wr
.ud
.remote_qkey
< 0 ?
379 qp
->qkey
: wr
->wr
.ud
.remote_qkey
);
380 ohdr
->u
.ud
.deth
[1] = cpu_to_be32(qp
->ibqp
.qp_num
);
381 if (ipath_verbs_send(dev
->dd
, hwords
, (u32
*) &qp
->s_hdr
,
386 /* Queue the completion status entry. */
387 if (!test_bit(IPATH_S_SIGNAL_REQ_WR
, &qp
->s_flags
) ||
388 (wr
->send_flags
& IB_SEND_SIGNALED
)) {
389 wc
.wr_id
= wr
->wr_id
;
390 wc
.status
= IB_WC_SUCCESS
;
392 wc
.opcode
= IB_WC_SEND
;
394 wc
.qp_num
= qp
->ibqp
.qp_num
;
397 /* XXX initialize other fields? */
398 ipath_cq_enter(to_icq(qp
->ibqp
.send_cq
), &wc
, 0);
409 * ipath_ud_rcv - receive an incoming UD packet
410 * @dev: the device the packet came in on
411 * @hdr: the packet header
412 * @has_grh: true if the packet has a GRH
413 * @data: the packet data
414 * @tlen: the packet length
415 * @qp: the QP the packet came on
417 * This is called from ipath_qp_rcv() to process an incoming UD packet
419 * Called at interrupt level.
421 void ipath_ud_rcv(struct ipath_ibdev
*dev
, struct ipath_ib_header
*hdr
,
422 int has_grh
, void *data
, u32 tlen
, struct ipath_qp
*qp
)
424 struct ipath_other_headers
*ohdr
;
433 struct ipath_srq
*srq
;
434 struct ipath_rwqe
*wqe
;
441 hdrsize
= 8 + 12 + 8; /* LRH + BTH + DETH */
442 qkey
= be32_to_cpu(ohdr
->u
.ud
.deth
[0]);
443 src_qp
= be32_to_cpu(ohdr
->u
.ud
.deth
[1]);
446 ohdr
= &hdr
->u
.l
.oth
;
447 hdrsize
= 8 + 40 + 12 + 8; /* LRH + GRH + BTH + DETH */
449 * The header with GRH is 68 bytes and the core driver sets
450 * the eager header buffer size to 56 bytes so the last 12
451 * bytes of the IB header is in the data buffer.
454 ipath_layer_get_rcvhdrentsize(dev
->dd
) == 16;
455 if (header_in_data
) {
456 qkey
= be32_to_cpu(((__be32
*) data
)[1]);
457 src_qp
= be32_to_cpu(((__be32
*) data
)[2]);
460 qkey
= be32_to_cpu(ohdr
->u
.ud
.deth
[0]);
461 src_qp
= be32_to_cpu(ohdr
->u
.ud
.deth
[1]);
464 src_qp
&= IPS_QPN_MASK
;
467 * Check that the permissive LID is only used on QP0
468 * and the QKEY matches (see 9.6.1.4.1 and 9.6.1.5.1).
470 if (qp
->ibqp
.qp_num
) {
471 if (unlikely(hdr
->lrh
[1] == IB_LID_PERMISSIVE
||
472 hdr
->lrh
[3] == IB_LID_PERMISSIVE
)) {
476 if (unlikely(qkey
!= qp
->qkey
)) {
477 /* XXX OK to lose a count once in a while. */
478 dev
->qkey_violations
++;
482 } else if (hdr
->lrh
[1] == IB_LID_PERMISSIVE
||
483 hdr
->lrh
[3] == IB_LID_PERMISSIVE
) {
484 struct ib_smp
*smp
= (struct ib_smp
*) data
;
486 if (smp
->mgmt_class
!= IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
) {
492 /* Get the number of bytes the message was padded by. */
493 pad
= (be32_to_cpu(ohdr
->bth
[0]) >> 20) & 3;
494 if (unlikely(tlen
< (hdrsize
+ pad
+ 4))) {
495 /* Drop incomplete packets. */
499 tlen
-= hdrsize
+ pad
+ 4;
501 /* Drop invalid MAD packets (see 13.5.3.1). */
502 if (unlikely((qp
->ibqp
.qp_num
== 0 &&
504 (be16_to_cpu(hdr
->lrh
[0]) >> 12) != 15)) ||
505 (qp
->ibqp
.qp_num
== 1 &&
507 (be16_to_cpu(hdr
->lrh
[0]) >> 12) == 15)))) {
513 * A GRH is expected to preceed the data even if not
514 * present on the wire.
516 wc
.byte_len
= tlen
+ sizeof(struct ib_grh
);
519 * The opcode is in the low byte when its in network order
520 * (top byte when in host order).
522 opcode
= be32_to_cpu(ohdr
->bth
[0]) >> 24;
523 if (qp
->ibqp
.qp_num
> 1 &&
524 opcode
== IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE
) {
525 if (header_in_data
) {
526 wc
.imm_data
= *(__be32
*) data
;
527 data
+= sizeof(__be32
);
529 wc
.imm_data
= ohdr
->u
.ud
.imm_data
;
530 wc
.wc_flags
= IB_WC_WITH_IMM
;
531 hdrsize
+= sizeof(u32
);
532 } else if (opcode
== IB_OPCODE_UD_SEND_ONLY
) {
541 * Get the next work request entry to find where to put the data.
542 * Note that it is safe to drop the lock after changing rq->tail
543 * since ipath_post_receive() won't fill the empty slot.
546 srq
= to_isrq(qp
->ibqp
.srq
);
552 spin_lock_irqsave(&rq
->lock
, flags
);
553 if (rq
->tail
== rq
->head
) {
554 spin_unlock_irqrestore(&rq
->lock
, flags
);
558 /* Silently drop packets which are too big. */
559 wqe
= get_rwqe_ptr(rq
, rq
->tail
);
560 if (wc
.byte_len
> wqe
->length
) {
561 spin_unlock_irqrestore(&rq
->lock
, flags
);
565 wc
.wr_id
= wqe
->wr_id
;
566 qp
->r_sge
.sge
= wqe
->sg_list
[0];
567 qp
->r_sge
.sg_list
= wqe
->sg_list
+ 1;
568 qp
->r_sge
.num_sge
= wqe
->num_sge
;
569 if (++rq
->tail
>= rq
->size
)
571 if (srq
&& srq
->ibsrq
.event_handler
) {
574 if (rq
->head
< rq
->tail
)
575 n
= rq
->size
+ rq
->head
- rq
->tail
;
577 n
= rq
->head
- rq
->tail
;
578 if (n
< srq
->limit
) {
582 spin_unlock_irqrestore(&rq
->lock
, flags
);
583 ev
.device
= qp
->ibqp
.device
;
584 ev
.element
.srq
= qp
->ibqp
.srq
;
585 ev
.event
= IB_EVENT_SRQ_LIMIT_REACHED
;
586 srq
->ibsrq
.event_handler(&ev
,
587 srq
->ibsrq
.srq_context
);
589 spin_unlock_irqrestore(&rq
->lock
, flags
);
591 spin_unlock_irqrestore(&rq
->lock
, flags
);
593 ipath_copy_sge(&qp
->r_sge
, &hdr
->u
.l
.grh
,
594 sizeof(struct ib_grh
));
595 wc
.wc_flags
|= IB_WC_GRH
;
597 ipath_skip_sge(&qp
->r_sge
, sizeof(struct ib_grh
));
598 ipath_copy_sge(&qp
->r_sge
, data
,
599 wc
.byte_len
- sizeof(struct ib_grh
));
600 wc
.status
= IB_WC_SUCCESS
;
601 wc
.opcode
= IB_WC_RECV
;
603 wc
.qp_num
= qp
->ibqp
.qp_num
;
605 /* XXX do we know which pkey matched? Only needed for GSI. */
607 wc
.slid
= be16_to_cpu(hdr
->lrh
[3]);
608 wc
.sl
= (be16_to_cpu(hdr
->lrh
[0]) >> 4) & 0xF;
609 dlid
= be16_to_cpu(hdr
->lrh
[1]);
611 * Save the LMC lower bits if the destination LID is a unicast LID.
613 wc
.dlid_path_bits
= dlid
>= IPS_MULTICAST_LID_BASE
? 0 :
614 dlid
& ((1 << (dev
->mkeyprot_resv_lmc
& 7)) - 1);
615 /* Signal completion event if the solicited bit is set. */
616 ipath_cq_enter(to_icq(qp
->ibqp
.recv_cq
), &wc
,
618 __constant_cpu_to_be32(1 << 23)) != 0);