2 * Copyright (c) 2006 QLogic, Inc. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include "ipath_verbs.h"
35 #include "ipath_kernel.h"
38 * Convert the AETH RNR timeout code into the number of milliseconds.
40 const u32 ib_ipath_rnr_table
[32] = {
76 * ipath_insert_rnr_queue - put QP on the RNR timeout list for the device
79 * XXX Use a simple list for now. We might need a priority
80 * queue if we have lots of QPs waiting for RNR timeouts
81 * but that should be rare.
83 void ipath_insert_rnr_queue(struct ipath_qp
*qp
)
85 struct ipath_ibdev
*dev
= to_idev(qp
->ibqp
.device
);
88 spin_lock_irqsave(&dev
->pending_lock
, flags
);
89 if (list_empty(&dev
->rnrwait
))
90 list_add(&qp
->timerwait
, &dev
->rnrwait
);
92 struct list_head
*l
= &dev
->rnrwait
;
93 struct ipath_qp
*nqp
= list_entry(l
->next
, struct ipath_qp
,
96 while (qp
->s_rnr_timeout
>= nqp
->s_rnr_timeout
) {
97 qp
->s_rnr_timeout
-= nqp
->s_rnr_timeout
;
99 if (l
->next
== &dev
->rnrwait
)
101 nqp
= list_entry(l
->next
, struct ipath_qp
,
104 list_add(&qp
->timerwait
, l
);
106 spin_unlock_irqrestore(&dev
->pending_lock
, flags
);
109 static int init_sge(struct ipath_qp
*qp
, struct ipath_rwqe
*wqe
)
111 struct ipath_ibdev
*dev
= to_idev(qp
->ibqp
.device
);
112 int user
= to_ipd(qp
->ibqp
.pd
)->user
;
117 for (i
= j
= 0; i
< wqe
->num_sge
; i
++) {
118 if (wqe
->sg_list
[i
].length
== 0)
121 if ((user
&& wqe
->sg_list
[i
].lkey
== 0) ||
122 !ipath_lkey_ok(&dev
->lk_table
,
123 &qp
->r_sg_list
[j
], &wqe
->sg_list
[i
],
124 IB_ACCESS_LOCAL_WRITE
))
126 qp
->r_len
+= wqe
->sg_list
[i
].length
;
129 qp
->r_sge
.sge
= qp
->r_sg_list
[0];
130 qp
->r_sge
.sg_list
= qp
->r_sg_list
+ 1;
131 qp
->r_sge
.num_sge
= j
;
136 wc
.wr_id
= wqe
->wr_id
;
137 wc
.status
= IB_WC_LOC_PROT_ERR
;
138 wc
.opcode
= IB_WC_RECV
;
142 wc
.qp_num
= qp
->ibqp
.qp_num
;
148 wc
.dlid_path_bits
= 0;
150 /* Signal solicited completion event. */
151 ipath_cq_enter(to_icq(qp
->ibqp
.recv_cq
), &wc
, 1);
158 * ipath_get_rwqe - copy the next RWQE into the QP's RWQE
160 * @wr_id_only: update wr_id only, not SGEs
162 * Return 0 if no RWQE is available, otherwise return 1.
164 * Can be called from interrupt level.
166 int ipath_get_rwqe(struct ipath_qp
*qp
, int wr_id_only
)
170 struct ipath_rwq
*wq
;
171 struct ipath_srq
*srq
;
172 struct ipath_rwqe
*wqe
;
173 void (*handler
)(struct ib_event
*, void *);
178 srq
= to_isrq(qp
->ibqp
.srq
);
179 handler
= srq
->ibsrq
.event_handler
;
187 spin_lock_irqsave(&rq
->lock
, flags
);
190 /* Validate tail before using it since it is user writable. */
191 if (tail
>= rq
->size
)
194 if (unlikely(tail
== wq
->head
)) {
195 spin_unlock_irqrestore(&rq
->lock
, flags
);
199 wqe
= get_rwqe_ptr(rq
, tail
);
200 if (++tail
>= rq
->size
)
202 } while (!wr_id_only
&& !init_sge(qp
, wqe
));
203 qp
->r_wr_id
= wqe
->wr_id
;
211 * validate head pointer value and compute
212 * the number of remaining WQEs.
218 n
+= rq
->size
- tail
;
221 if (n
< srq
->limit
) {
225 spin_unlock_irqrestore(&rq
->lock
, flags
);
226 ev
.device
= qp
->ibqp
.device
;
227 ev
.element
.srq
= qp
->ibqp
.srq
;
228 ev
.event
= IB_EVENT_SRQ_LIMIT_REACHED
;
229 handler(&ev
, srq
->ibsrq
.srq_context
);
233 spin_unlock_irqrestore(&rq
->lock
, flags
);
240 * ipath_ruc_loopback - handle UC and RC lookback requests
241 * @sqp: the loopback QP
243 * This is called from ipath_do_uc_send() or ipath_do_rc_send() to
244 * forward a WQE addressed to the same HCA.
245 * Note that although we are single threaded due to the tasklet, we still
246 * have to protect against post_send(). We don't have to worry about
247 * receive interrupts since this is a connected protocol and all packets
248 * will pass through here.
250 static void ipath_ruc_loopback(struct ipath_qp
*sqp
)
252 struct ipath_ibdev
*dev
= to_idev(sqp
->ibqp
.device
);
254 struct ipath_swqe
*wqe
;
255 struct ipath_sge
*sge
;
260 qp
= ipath_lookup_qpn(&dev
->qp_table
, sqp
->remote_qpn
);
267 spin_lock_irqsave(&sqp
->s_lock
, flags
);
269 if (!(ib_ipath_state_ops
[sqp
->state
] & IPATH_PROCESS_SEND_OK
)) {
270 spin_unlock_irqrestore(&sqp
->s_lock
, flags
);
274 /* Get the next send request. */
275 if (sqp
->s_last
== sqp
->s_head
) {
276 /* Send work queue is empty. */
277 spin_unlock_irqrestore(&sqp
->s_lock
, flags
);
282 * We can rely on the entry not changing without the s_lock
283 * being held until we update s_last.
285 wqe
= get_swqe_ptr(sqp
, sqp
->s_last
);
286 spin_unlock_irqrestore(&sqp
->s_lock
, flags
);
291 sqp
->s_sge
.sge
= wqe
->sg_list
[0];
292 sqp
->s_sge
.sg_list
= wqe
->sg_list
+ 1;
293 sqp
->s_sge
.num_sge
= wqe
->wr
.num_sge
;
294 sqp
->s_len
= wqe
->length
;
295 switch (wqe
->wr
.opcode
) {
296 case IB_WR_SEND_WITH_IMM
:
297 wc
.wc_flags
= IB_WC_WITH_IMM
;
298 wc
.imm_data
= wqe
->wr
.imm_data
;
301 if (!ipath_get_rwqe(qp
, 0)) {
304 if (qp
->ibqp
.qp_type
== IB_QPT_UC
)
306 if (sqp
->s_rnr_retry
== 0) {
307 wc
.status
= IB_WC_RNR_RETRY_EXC_ERR
;
310 if (sqp
->s_rnr_retry_cnt
< 7)
314 ib_ipath_rnr_table
[sqp
->r_min_rnr_timer
];
315 ipath_insert_rnr_queue(sqp
);
320 case IB_WR_RDMA_WRITE_WITH_IMM
:
321 wc
.wc_flags
= IB_WC_WITH_IMM
;
322 wc
.imm_data
= wqe
->wr
.imm_data
;
323 if (!ipath_get_rwqe(qp
, 1))
326 case IB_WR_RDMA_WRITE
:
327 if (wqe
->length
== 0)
329 if (unlikely(!ipath_rkey_ok(dev
, &qp
->r_sge
, wqe
->length
,
330 wqe
->wr
.wr
.rdma
.remote_addr
,
331 wqe
->wr
.wr
.rdma
.rkey
,
332 IB_ACCESS_REMOTE_WRITE
))) {
334 wc
.status
= IB_WC_REM_ACCESS_ERR
;
336 wc
.wr_id
= wqe
->wr
.wr_id
;
337 wc
.opcode
= ib_ipath_wc_opcode
[wqe
->wr
.opcode
];
340 wc
.qp_num
= sqp
->ibqp
.qp_num
;
341 wc
.src_qp
= sqp
->remote_qpn
;
343 wc
.slid
= sqp
->remote_ah_attr
.dlid
;
344 wc
.sl
= sqp
->remote_ah_attr
.sl
;
345 wc
.dlid_path_bits
= 0;
347 ipath_sqerror_qp(sqp
, &wc
);
352 case IB_WR_RDMA_READ
:
353 if (unlikely(!ipath_rkey_ok(dev
, &sqp
->s_sge
, wqe
->length
,
354 wqe
->wr
.wr
.rdma
.remote_addr
,
355 wqe
->wr
.wr
.rdma
.rkey
,
356 IB_ACCESS_REMOTE_READ
)))
358 if (unlikely(!(qp
->qp_access_flags
&
359 IB_ACCESS_REMOTE_READ
)))
361 qp
->r_sge
.sge
= wqe
->sg_list
[0];
362 qp
->r_sge
.sg_list
= wqe
->sg_list
+ 1;
363 qp
->r_sge
.num_sge
= wqe
->wr
.num_sge
;
366 case IB_WR_ATOMIC_CMP_AND_SWP
:
367 case IB_WR_ATOMIC_FETCH_AND_ADD
:
368 if (unlikely(!ipath_rkey_ok(dev
, &qp
->r_sge
, sizeof(u64
),
369 wqe
->wr
.wr
.rdma
.remote_addr
,
370 wqe
->wr
.wr
.rdma
.rkey
,
371 IB_ACCESS_REMOTE_ATOMIC
)))
373 /* Perform atomic OP and save result. */
374 sdata
= wqe
->wr
.wr
.atomic
.swap
;
375 spin_lock_irqsave(&dev
->pending_lock
, flags
);
376 qp
->r_atomic_data
= *(u64
*) qp
->r_sge
.sge
.vaddr
;
377 if (wqe
->wr
.opcode
== IB_WR_ATOMIC_FETCH_AND_ADD
)
378 *(u64
*) qp
->r_sge
.sge
.vaddr
=
379 qp
->r_atomic_data
+ sdata
;
380 else if (qp
->r_atomic_data
== wqe
->wr
.wr
.atomic
.compare_add
)
381 *(u64
*) qp
->r_sge
.sge
.vaddr
= sdata
;
382 spin_unlock_irqrestore(&dev
->pending_lock
, flags
);
383 *(u64
*) sqp
->s_sge
.sge
.vaddr
= qp
->r_atomic_data
;
390 sge
= &sqp
->s_sge
.sge
;
392 u32 len
= sqp
->s_len
;
394 if (len
> sge
->length
)
397 ipath_copy_sge(&qp
->r_sge
, sge
->vaddr
, len
);
400 sge
->sge_length
-= len
;
401 if (sge
->sge_length
== 0) {
402 if (--sqp
->s_sge
.num_sge
)
403 *sge
= *sqp
->s_sge
.sg_list
++;
404 } else if (sge
->length
== 0 && sge
->mr
!= NULL
) {
405 if (++sge
->n
>= IPATH_SEGSZ
) {
406 if (++sge
->m
>= sge
->mr
->mapsz
)
411 sge
->mr
->map
[sge
->m
]->segs
[sge
->n
].vaddr
;
413 sge
->mr
->map
[sge
->m
]->segs
[sge
->n
].length
;
418 if (wqe
->wr
.opcode
== IB_WR_RDMA_WRITE
||
419 wqe
->wr
.opcode
== IB_WR_RDMA_READ
)
422 if (wqe
->wr
.opcode
== IB_WR_RDMA_WRITE_WITH_IMM
)
423 wc
.opcode
= IB_WC_RECV_RDMA_WITH_IMM
;
425 wc
.opcode
= IB_WC_RECV
;
426 wc
.wr_id
= qp
->r_wr_id
;
427 wc
.status
= IB_WC_SUCCESS
;
429 wc
.byte_len
= wqe
->length
;
430 wc
.qp_num
= qp
->ibqp
.qp_num
;
431 wc
.src_qp
= qp
->remote_qpn
;
432 /* XXX do we know which pkey matched? Only needed for GSI. */
434 wc
.slid
= qp
->remote_ah_attr
.dlid
;
435 wc
.sl
= qp
->remote_ah_attr
.sl
;
436 wc
.dlid_path_bits
= 0;
437 /* Signal completion event if the solicited bit is set. */
438 ipath_cq_enter(to_icq(qp
->ibqp
.recv_cq
), &wc
,
439 wqe
->wr
.send_flags
& IB_SEND_SOLICITED
);
442 sqp
->s_rnr_retry
= sqp
->s_rnr_retry_cnt
;
444 if (!test_bit(IPATH_S_SIGNAL_REQ_WR
, &sqp
->s_flags
) ||
445 (wqe
->wr
.send_flags
& IB_SEND_SIGNALED
)) {
446 wc
.wr_id
= wqe
->wr
.wr_id
;
447 wc
.status
= IB_WC_SUCCESS
;
448 wc
.opcode
= ib_ipath_wc_opcode
[wqe
->wr
.opcode
];
450 wc
.byte_len
= wqe
->length
;
451 wc
.qp_num
= sqp
->ibqp
.qp_num
;
456 wc
.dlid_path_bits
= 0;
458 ipath_cq_enter(to_icq(sqp
->ibqp
.send_cq
), &wc
, 0);
461 /* Update s_last now that we are finished with the SWQE */
462 spin_lock_irqsave(&sqp
->s_lock
, flags
);
463 if (++sqp
->s_last
>= sqp
->s_size
)
465 spin_unlock_irqrestore(&sqp
->s_lock
, flags
);
469 if (atomic_dec_and_test(&qp
->refcount
))
473 static int want_buffer(struct ipath_devdata
*dd
)
475 set_bit(IPATH_S_PIOINTBUFAVAIL
, &dd
->ipath_sendctrl
);
476 ipath_write_kreg(dd
, dd
->ipath_kregs
->kr_sendctrl
,
483 * ipath_no_bufs_available - tell the layer driver we need buffers
484 * @qp: the QP that caused the problem
485 * @dev: the device we ran out of buffers on
487 * Called when we run out of PIO buffers.
489 void ipath_no_bufs_available(struct ipath_qp
*qp
, struct ipath_ibdev
*dev
)
493 spin_lock_irqsave(&dev
->pending_lock
, flags
);
494 if (list_empty(&qp
->piowait
))
495 list_add_tail(&qp
->piowait
, &dev
->piowait
);
496 spin_unlock_irqrestore(&dev
->pending_lock
, flags
);
498 * Note that as soon as want_buffer() is called and
499 * possibly before it returns, ipath_ib_piobufavail()
500 * could be called. If we are still in the tasklet function,
501 * tasklet_hi_schedule() will not call us until the next time
502 * tasklet_hi_schedule() is called.
503 * We clear the tasklet flag now since we are committing to return
504 * from the tasklet function.
506 clear_bit(IPATH_S_BUSY
, &qp
->s_flags
);
507 tasklet_unlock(&qp
->s_task
);
508 want_buffer(dev
->dd
);
513 * ipath_post_ruc_send - post RC and UC sends
514 * @qp: the QP to post on
515 * @wr: the work request to send
517 int ipath_post_ruc_send(struct ipath_qp
*qp
, struct ib_send_wr
*wr
)
519 struct ipath_swqe
*wqe
;
527 * Don't allow RDMA reads or atomic operations on UC or
528 * undefined operations.
529 * Make sure buffer is large enough to hold the result for atomics.
531 if (qp
->ibqp
.qp_type
== IB_QPT_UC
) {
532 if ((unsigned) wr
->opcode
>= IB_WR_RDMA_READ
) {
536 } else if ((unsigned) wr
->opcode
> IB_WR_ATOMIC_FETCH_AND_ADD
) {
539 } else if (wr
->opcode
>= IB_WR_ATOMIC_CMP_AND_SWP
&&
541 wr
->sg_list
[0].length
< sizeof(u64
) ||
542 wr
->sg_list
[0].addr
& (sizeof(u64
) - 1))) {
546 /* IB spec says that num_sge == 0 is OK. */
547 if (wr
->num_sge
> qp
->s_max_sge
) {
551 spin_lock_irqsave(&qp
->s_lock
, flags
);
552 next
= qp
->s_head
+ 1;
553 if (next
>= qp
->s_size
)
555 if (next
== qp
->s_last
) {
556 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
561 wqe
= get_swqe_ptr(qp
, qp
->s_head
);
563 wqe
->ssn
= qp
->s_ssn
++;
564 wqe
->sg_list
[0].mr
= NULL
;
565 wqe
->sg_list
[0].vaddr
= NULL
;
566 wqe
->sg_list
[0].length
= 0;
567 wqe
->sg_list
[0].sge_length
= 0;
569 acc
= wr
->opcode
>= IB_WR_RDMA_READ
? IB_ACCESS_LOCAL_WRITE
: 0;
570 for (i
= 0, j
= 0; i
< wr
->num_sge
; i
++) {
571 if (to_ipd(qp
->ibqp
.pd
)->user
&& wr
->sg_list
[i
].lkey
== 0) {
572 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
576 if (wr
->sg_list
[i
].length
== 0)
578 if (!ipath_lkey_ok(&to_idev(qp
->ibqp
.device
)->lk_table
,
579 &wqe
->sg_list
[j
], &wr
->sg_list
[i
],
581 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
585 wqe
->length
+= wr
->sg_list
[i
].length
;
590 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
592 ipath_do_ruc_send((unsigned long) qp
);
601 * ipath_make_grh - construct a GRH header
602 * @dev: a pointer to the ipath device
603 * @hdr: a pointer to the GRH header being constructed
604 * @grh: the global route address to send to
605 * @hwords: the number of 32 bit words of header being sent
606 * @nwords: the number of 32 bit words of data being sent
608 * Return the size of the header in 32 bit words.
610 u32
ipath_make_grh(struct ipath_ibdev
*dev
, struct ib_grh
*hdr
,
611 struct ib_global_route
*grh
, u32 hwords
, u32 nwords
)
613 hdr
->version_tclass_flow
=
614 cpu_to_be32((6 << 28) |
615 (grh
->traffic_class
<< 20) |
617 hdr
->paylen
= cpu_to_be16((hwords
- 2 + nwords
+ SIZE_OF_CRC
) << 2);
618 /* next_hdr is defined by C8-7 in ch. 8.4.1 */
619 hdr
->next_hdr
= 0x1B;
620 hdr
->hop_limit
= grh
->hop_limit
;
621 /* The SGID is 32-bit aligned. */
622 hdr
->sgid
.global
.subnet_prefix
= dev
->gid_prefix
;
623 hdr
->sgid
.global
.interface_id
= dev
->dd
->ipath_guid
;
624 hdr
->dgid
= grh
->dgid
;
626 /* GRH header size in 32-bit words. */
627 return sizeof(struct ib_grh
) / sizeof(u32
);
631 * ipath_do_ruc_send - perform a send on an RC or UC QP
632 * @data: contains a pointer to the QP
634 * Process entries in the send work queue until credit or queue is
635 * exhausted. Only allow one CPU to send a packet per QP (tasklet).
636 * Otherwise, after we drop the QP s_lock, two threads could send
637 * packets out of order.
639 void ipath_do_ruc_send(unsigned long data
)
641 struct ipath_qp
*qp
= (struct ipath_qp
*)data
;
642 struct ipath_ibdev
*dev
= to_idev(qp
->ibqp
.device
);
649 u32 pmtu
= ib_mtu_enum_to_int(qp
->path_mtu
);
650 struct ipath_other_headers
*ohdr
;
652 if (test_and_set_bit(IPATH_S_BUSY
, &qp
->s_flags
))
655 if (unlikely(qp
->remote_ah_attr
.dlid
== dev
->dd
->ipath_lid
)) {
656 ipath_ruc_loopback(qp
);
660 ohdr
= &qp
->s_hdr
.u
.oth
;
661 if (qp
->remote_ah_attr
.ah_flags
& IB_AH_GRH
)
662 ohdr
= &qp
->s_hdr
.u
.l
.oth
;
665 /* Check for a constructed packet to be sent. */
666 if (qp
->s_hdrwords
!= 0) {
668 * If no PIO bufs are available, return. An interrupt will
669 * call ipath_ib_piobufavail() when one is available.
671 if (ipath_verbs_send(dev
->dd
, qp
->s_hdrwords
,
672 (u32
*) &qp
->s_hdr
, qp
->s_cur_size
,
674 ipath_no_bufs_available(qp
, dev
);
677 dev
->n_unicast_xmit
++;
678 /* Record that we sent the packet and s_hdr is empty. */
683 * The lock is needed to synchronize between setting
684 * qp->s_ack_state, resend timer, and post_send().
686 spin_lock_irqsave(&qp
->s_lock
, flags
);
688 /* Sending responses has higher priority over sending requests. */
689 if (qp
->s_ack_state
!= IB_OPCODE_RC_ACKNOWLEDGE
&&
690 (bth0
= ipath_make_rc_ack(qp
, ohdr
, pmtu
)) != 0)
691 bth2
= qp
->s_ack_psn
++ & IPATH_PSN_MASK
;
692 else if (!((qp
->ibqp
.qp_type
== IB_QPT_RC
) ?
693 ipath_make_rc_req(qp
, ohdr
, pmtu
, &bth0
, &bth2
) :
694 ipath_make_uc_req(qp
, ohdr
, pmtu
, &bth0
, &bth2
))) {
696 * Clear the busy bit before unlocking to avoid races with
697 * adding new work queue items and then failing to process
700 clear_bit(IPATH_S_BUSY
, &qp
->s_flags
);
701 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
705 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
707 /* Construct the header. */
708 extra_bytes
= (4 - qp
->s_cur_size
) & 3;
709 nwords
= (qp
->s_cur_size
+ extra_bytes
) >> 2;
710 lrh0
= IPATH_LRH_BTH
;
711 if (unlikely(qp
->remote_ah_attr
.ah_flags
& IB_AH_GRH
)) {
712 qp
->s_hdrwords
+= ipath_make_grh(dev
, &qp
->s_hdr
.u
.l
.grh
,
713 &qp
->remote_ah_attr
.grh
,
714 qp
->s_hdrwords
, nwords
);
715 lrh0
= IPATH_LRH_GRH
;
717 lrh0
|= qp
->remote_ah_attr
.sl
<< 4;
718 qp
->s_hdr
.lrh
[0] = cpu_to_be16(lrh0
);
719 qp
->s_hdr
.lrh
[1] = cpu_to_be16(qp
->remote_ah_attr
.dlid
);
720 qp
->s_hdr
.lrh
[2] = cpu_to_be16(qp
->s_hdrwords
+ nwords
+
722 qp
->s_hdr
.lrh
[3] = cpu_to_be16(dev
->dd
->ipath_lid
);
723 bth0
|= ipath_get_pkey(dev
->dd
, qp
->s_pkey_index
);
724 bth0
|= extra_bytes
<< 20;
725 ohdr
->bth
[0] = cpu_to_be32(bth0
);
726 ohdr
->bth
[1] = cpu_to_be32(qp
->remote_qpn
);
727 ohdr
->bth
[2] = cpu_to_be32(bth2
);
729 /* Check for more work to do. */
733 clear_bit(IPATH_S_BUSY
, &qp
->s_flags
);