[PATCH] inode-diet: Eliminate i_blksize from the inode structure
[linux-2.6/libata-dev.git] / drivers / infiniband / hw / ipath / ipath_ruc.c
blob5c1da2d25e03e6c16ff4572ff59e88de613be0f5
1 /*
2 * Copyright (c) 2006 QLogic, Inc. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
34 #include "ipath_verbs.h"
35 #include "ipath_kernel.h"
38 * Convert the AETH RNR timeout code into the number of milliseconds.
40 const u32 ib_ipath_rnr_table[32] = {
41 656, /* 0 */
42 1, /* 1 */
43 1, /* 2 */
44 1, /* 3 */
45 1, /* 4 */
46 1, /* 5 */
47 1, /* 6 */
48 1, /* 7 */
49 1, /* 8 */
50 1, /* 9 */
51 1, /* A */
52 1, /* B */
53 1, /* C */
54 1, /* D */
55 2, /* E */
56 2, /* F */
57 3, /* 10 */
58 4, /* 11 */
59 6, /* 12 */
60 8, /* 13 */
61 11, /* 14 */
62 16, /* 15 */
63 21, /* 16 */
64 31, /* 17 */
65 41, /* 18 */
66 62, /* 19 */
67 82, /* 1A */
68 123, /* 1B */
69 164, /* 1C */
70 246, /* 1D */
71 328, /* 1E */
72 492 /* 1F */
75 /**
76 * ipath_insert_rnr_queue - put QP on the RNR timeout list for the device
77 * @qp: the QP
79 * XXX Use a simple list for now. We might need a priority
80 * queue if we have lots of QPs waiting for RNR timeouts
81 * but that should be rare.
83 void ipath_insert_rnr_queue(struct ipath_qp *qp)
85 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
86 unsigned long flags;
88 spin_lock_irqsave(&dev->pending_lock, flags);
89 if (list_empty(&dev->rnrwait))
90 list_add(&qp->timerwait, &dev->rnrwait);
91 else {
92 struct list_head *l = &dev->rnrwait;
93 struct ipath_qp *nqp = list_entry(l->next, struct ipath_qp,
94 timerwait);
96 while (qp->s_rnr_timeout >= nqp->s_rnr_timeout) {
97 qp->s_rnr_timeout -= nqp->s_rnr_timeout;
98 l = l->next;
99 if (l->next == &dev->rnrwait)
100 break;
101 nqp = list_entry(l->next, struct ipath_qp,
102 timerwait);
104 list_add(&qp->timerwait, l);
106 spin_unlock_irqrestore(&dev->pending_lock, flags);
109 static int init_sge(struct ipath_qp *qp, struct ipath_rwqe *wqe)
111 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
112 int user = to_ipd(qp->ibqp.pd)->user;
113 int i, j, ret;
114 struct ib_wc wc;
116 qp->r_len = 0;
117 for (i = j = 0; i < wqe->num_sge; i++) {
118 if (wqe->sg_list[i].length == 0)
119 continue;
120 /* Check LKEY */
121 if ((user && wqe->sg_list[i].lkey == 0) ||
122 !ipath_lkey_ok(&dev->lk_table,
123 &qp->r_sg_list[j], &wqe->sg_list[i],
124 IB_ACCESS_LOCAL_WRITE))
125 goto bad_lkey;
126 qp->r_len += wqe->sg_list[i].length;
127 j++;
129 qp->r_sge.sge = qp->r_sg_list[0];
130 qp->r_sge.sg_list = qp->r_sg_list + 1;
131 qp->r_sge.num_sge = j;
132 ret = 1;
133 goto bail;
135 bad_lkey:
136 wc.wr_id = wqe->wr_id;
137 wc.status = IB_WC_LOC_PROT_ERR;
138 wc.opcode = IB_WC_RECV;
139 wc.vendor_err = 0;
140 wc.byte_len = 0;
141 wc.imm_data = 0;
142 wc.qp_num = qp->ibqp.qp_num;
143 wc.src_qp = 0;
144 wc.wc_flags = 0;
145 wc.pkey_index = 0;
146 wc.slid = 0;
147 wc.sl = 0;
148 wc.dlid_path_bits = 0;
149 wc.port_num = 0;
150 /* Signal solicited completion event. */
151 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
152 ret = 0;
153 bail:
154 return ret;
158 * ipath_get_rwqe - copy the next RWQE into the QP's RWQE
159 * @qp: the QP
160 * @wr_id_only: update wr_id only, not SGEs
162 * Return 0 if no RWQE is available, otherwise return 1.
164 * Can be called from interrupt level.
166 int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only)
168 unsigned long flags;
169 struct ipath_rq *rq;
170 struct ipath_rwq *wq;
171 struct ipath_srq *srq;
172 struct ipath_rwqe *wqe;
173 void (*handler)(struct ib_event *, void *);
174 u32 tail;
175 int ret;
177 if (qp->ibqp.srq) {
178 srq = to_isrq(qp->ibqp.srq);
179 handler = srq->ibsrq.event_handler;
180 rq = &srq->rq;
181 } else {
182 srq = NULL;
183 handler = NULL;
184 rq = &qp->r_rq;
187 spin_lock_irqsave(&rq->lock, flags);
188 wq = rq->wq;
189 tail = wq->tail;
190 /* Validate tail before using it since it is user writable. */
191 if (tail >= rq->size)
192 tail = 0;
193 do {
194 if (unlikely(tail == wq->head)) {
195 spin_unlock_irqrestore(&rq->lock, flags);
196 ret = 0;
197 goto bail;
199 wqe = get_rwqe_ptr(rq, tail);
200 if (++tail >= rq->size)
201 tail = 0;
202 } while (!wr_id_only && !init_sge(qp, wqe));
203 qp->r_wr_id = wqe->wr_id;
204 wq->tail = tail;
206 ret = 1;
207 if (handler) {
208 u32 n;
211 * validate head pointer value and compute
212 * the number of remaining WQEs.
214 n = wq->head;
215 if (n >= rq->size)
216 n = 0;
217 if (n < tail)
218 n += rq->size - tail;
219 else
220 n -= tail;
221 if (n < srq->limit) {
222 struct ib_event ev;
224 srq->limit = 0;
225 spin_unlock_irqrestore(&rq->lock, flags);
226 ev.device = qp->ibqp.device;
227 ev.element.srq = qp->ibqp.srq;
228 ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
229 handler(&ev, srq->ibsrq.srq_context);
230 goto bail;
233 spin_unlock_irqrestore(&rq->lock, flags);
235 bail:
236 return ret;
240 * ipath_ruc_loopback - handle UC and RC lookback requests
241 * @sqp: the loopback QP
243 * This is called from ipath_do_uc_send() or ipath_do_rc_send() to
244 * forward a WQE addressed to the same HCA.
245 * Note that although we are single threaded due to the tasklet, we still
246 * have to protect against post_send(). We don't have to worry about
247 * receive interrupts since this is a connected protocol and all packets
248 * will pass through here.
250 static void ipath_ruc_loopback(struct ipath_qp *sqp)
252 struct ipath_ibdev *dev = to_idev(sqp->ibqp.device);
253 struct ipath_qp *qp;
254 struct ipath_swqe *wqe;
255 struct ipath_sge *sge;
256 unsigned long flags;
257 struct ib_wc wc;
258 u64 sdata;
260 qp = ipath_lookup_qpn(&dev->qp_table, sqp->remote_qpn);
261 if (!qp) {
262 dev->n_pkt_drops++;
263 return;
266 again:
267 spin_lock_irqsave(&sqp->s_lock, flags);
269 if (!(ib_ipath_state_ops[sqp->state] & IPATH_PROCESS_SEND_OK)) {
270 spin_unlock_irqrestore(&sqp->s_lock, flags);
271 goto done;
274 /* Get the next send request. */
275 if (sqp->s_last == sqp->s_head) {
276 /* Send work queue is empty. */
277 spin_unlock_irqrestore(&sqp->s_lock, flags);
278 goto done;
282 * We can rely on the entry not changing without the s_lock
283 * being held until we update s_last.
285 wqe = get_swqe_ptr(sqp, sqp->s_last);
286 spin_unlock_irqrestore(&sqp->s_lock, flags);
288 wc.wc_flags = 0;
289 wc.imm_data = 0;
291 sqp->s_sge.sge = wqe->sg_list[0];
292 sqp->s_sge.sg_list = wqe->sg_list + 1;
293 sqp->s_sge.num_sge = wqe->wr.num_sge;
294 sqp->s_len = wqe->length;
295 switch (wqe->wr.opcode) {
296 case IB_WR_SEND_WITH_IMM:
297 wc.wc_flags = IB_WC_WITH_IMM;
298 wc.imm_data = wqe->wr.imm_data;
299 /* FALLTHROUGH */
300 case IB_WR_SEND:
301 if (!ipath_get_rwqe(qp, 0)) {
302 rnr_nak:
303 /* Handle RNR NAK */
304 if (qp->ibqp.qp_type == IB_QPT_UC)
305 goto send_comp;
306 if (sqp->s_rnr_retry == 0) {
307 wc.status = IB_WC_RNR_RETRY_EXC_ERR;
308 goto err;
310 if (sqp->s_rnr_retry_cnt < 7)
311 sqp->s_rnr_retry--;
312 dev->n_rnr_naks++;
313 sqp->s_rnr_timeout =
314 ib_ipath_rnr_table[sqp->r_min_rnr_timer];
315 ipath_insert_rnr_queue(sqp);
316 goto done;
318 break;
320 case IB_WR_RDMA_WRITE_WITH_IMM:
321 wc.wc_flags = IB_WC_WITH_IMM;
322 wc.imm_data = wqe->wr.imm_data;
323 if (!ipath_get_rwqe(qp, 1))
324 goto rnr_nak;
325 /* FALLTHROUGH */
326 case IB_WR_RDMA_WRITE:
327 if (wqe->length == 0)
328 break;
329 if (unlikely(!ipath_rkey_ok(dev, &qp->r_sge, wqe->length,
330 wqe->wr.wr.rdma.remote_addr,
331 wqe->wr.wr.rdma.rkey,
332 IB_ACCESS_REMOTE_WRITE))) {
333 acc_err:
334 wc.status = IB_WC_REM_ACCESS_ERR;
335 err:
336 wc.wr_id = wqe->wr.wr_id;
337 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
338 wc.vendor_err = 0;
339 wc.byte_len = 0;
340 wc.qp_num = sqp->ibqp.qp_num;
341 wc.src_qp = sqp->remote_qpn;
342 wc.pkey_index = 0;
343 wc.slid = sqp->remote_ah_attr.dlid;
344 wc.sl = sqp->remote_ah_attr.sl;
345 wc.dlid_path_bits = 0;
346 wc.port_num = 0;
347 ipath_sqerror_qp(sqp, &wc);
348 goto done;
350 break;
352 case IB_WR_RDMA_READ:
353 if (unlikely(!ipath_rkey_ok(dev, &sqp->s_sge, wqe->length,
354 wqe->wr.wr.rdma.remote_addr,
355 wqe->wr.wr.rdma.rkey,
356 IB_ACCESS_REMOTE_READ)))
357 goto acc_err;
358 if (unlikely(!(qp->qp_access_flags &
359 IB_ACCESS_REMOTE_READ)))
360 goto acc_err;
361 qp->r_sge.sge = wqe->sg_list[0];
362 qp->r_sge.sg_list = wqe->sg_list + 1;
363 qp->r_sge.num_sge = wqe->wr.num_sge;
364 break;
366 case IB_WR_ATOMIC_CMP_AND_SWP:
367 case IB_WR_ATOMIC_FETCH_AND_ADD:
368 if (unlikely(!ipath_rkey_ok(dev, &qp->r_sge, sizeof(u64),
369 wqe->wr.wr.rdma.remote_addr,
370 wqe->wr.wr.rdma.rkey,
371 IB_ACCESS_REMOTE_ATOMIC)))
372 goto acc_err;
373 /* Perform atomic OP and save result. */
374 sdata = wqe->wr.wr.atomic.swap;
375 spin_lock_irqsave(&dev->pending_lock, flags);
376 qp->r_atomic_data = *(u64 *) qp->r_sge.sge.vaddr;
377 if (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
378 *(u64 *) qp->r_sge.sge.vaddr =
379 qp->r_atomic_data + sdata;
380 else if (qp->r_atomic_data == wqe->wr.wr.atomic.compare_add)
381 *(u64 *) qp->r_sge.sge.vaddr = sdata;
382 spin_unlock_irqrestore(&dev->pending_lock, flags);
383 *(u64 *) sqp->s_sge.sge.vaddr = qp->r_atomic_data;
384 goto send_comp;
386 default:
387 goto done;
390 sge = &sqp->s_sge.sge;
391 while (sqp->s_len) {
392 u32 len = sqp->s_len;
394 if (len > sge->length)
395 len = sge->length;
396 BUG_ON(len == 0);
397 ipath_copy_sge(&qp->r_sge, sge->vaddr, len);
398 sge->vaddr += len;
399 sge->length -= len;
400 sge->sge_length -= len;
401 if (sge->sge_length == 0) {
402 if (--sqp->s_sge.num_sge)
403 *sge = *sqp->s_sge.sg_list++;
404 } else if (sge->length == 0 && sge->mr != NULL) {
405 if (++sge->n >= IPATH_SEGSZ) {
406 if (++sge->m >= sge->mr->mapsz)
407 break;
408 sge->n = 0;
410 sge->vaddr =
411 sge->mr->map[sge->m]->segs[sge->n].vaddr;
412 sge->length =
413 sge->mr->map[sge->m]->segs[sge->n].length;
415 sqp->s_len -= len;
418 if (wqe->wr.opcode == IB_WR_RDMA_WRITE ||
419 wqe->wr.opcode == IB_WR_RDMA_READ)
420 goto send_comp;
422 if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM)
423 wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
424 else
425 wc.opcode = IB_WC_RECV;
426 wc.wr_id = qp->r_wr_id;
427 wc.status = IB_WC_SUCCESS;
428 wc.vendor_err = 0;
429 wc.byte_len = wqe->length;
430 wc.qp_num = qp->ibqp.qp_num;
431 wc.src_qp = qp->remote_qpn;
432 /* XXX do we know which pkey matched? Only needed for GSI. */
433 wc.pkey_index = 0;
434 wc.slid = qp->remote_ah_attr.dlid;
435 wc.sl = qp->remote_ah_attr.sl;
436 wc.dlid_path_bits = 0;
437 /* Signal completion event if the solicited bit is set. */
438 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
439 wqe->wr.send_flags & IB_SEND_SOLICITED);
441 send_comp:
442 sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
444 if (!test_bit(IPATH_S_SIGNAL_REQ_WR, &sqp->s_flags) ||
445 (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
446 wc.wr_id = wqe->wr.wr_id;
447 wc.status = IB_WC_SUCCESS;
448 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
449 wc.vendor_err = 0;
450 wc.byte_len = wqe->length;
451 wc.qp_num = sqp->ibqp.qp_num;
452 wc.src_qp = 0;
453 wc.pkey_index = 0;
454 wc.slid = 0;
455 wc.sl = 0;
456 wc.dlid_path_bits = 0;
457 wc.port_num = 0;
458 ipath_cq_enter(to_icq(sqp->ibqp.send_cq), &wc, 0);
461 /* Update s_last now that we are finished with the SWQE */
462 spin_lock_irqsave(&sqp->s_lock, flags);
463 if (++sqp->s_last >= sqp->s_size)
464 sqp->s_last = 0;
465 spin_unlock_irqrestore(&sqp->s_lock, flags);
466 goto again;
468 done:
469 if (atomic_dec_and_test(&qp->refcount))
470 wake_up(&qp->wait);
473 static int want_buffer(struct ipath_devdata *dd)
475 set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl);
476 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
477 dd->ipath_sendctrl);
479 return 0;
483 * ipath_no_bufs_available - tell the layer driver we need buffers
484 * @qp: the QP that caused the problem
485 * @dev: the device we ran out of buffers on
487 * Called when we run out of PIO buffers.
489 void ipath_no_bufs_available(struct ipath_qp *qp, struct ipath_ibdev *dev)
491 unsigned long flags;
493 spin_lock_irqsave(&dev->pending_lock, flags);
494 if (list_empty(&qp->piowait))
495 list_add_tail(&qp->piowait, &dev->piowait);
496 spin_unlock_irqrestore(&dev->pending_lock, flags);
498 * Note that as soon as want_buffer() is called and
499 * possibly before it returns, ipath_ib_piobufavail()
500 * could be called. If we are still in the tasklet function,
501 * tasklet_hi_schedule() will not call us until the next time
502 * tasklet_hi_schedule() is called.
503 * We clear the tasklet flag now since we are committing to return
504 * from the tasklet function.
506 clear_bit(IPATH_S_BUSY, &qp->s_flags);
507 tasklet_unlock(&qp->s_task);
508 want_buffer(dev->dd);
509 dev->n_piowait++;
513 * ipath_post_ruc_send - post RC and UC sends
514 * @qp: the QP to post on
515 * @wr: the work request to send
517 int ipath_post_ruc_send(struct ipath_qp *qp, struct ib_send_wr *wr)
519 struct ipath_swqe *wqe;
520 unsigned long flags;
521 u32 next;
522 int i, j;
523 int acc;
524 int ret;
527 * Don't allow RDMA reads or atomic operations on UC or
528 * undefined operations.
529 * Make sure buffer is large enough to hold the result for atomics.
531 if (qp->ibqp.qp_type == IB_QPT_UC) {
532 if ((unsigned) wr->opcode >= IB_WR_RDMA_READ) {
533 ret = -EINVAL;
534 goto bail;
536 } else if ((unsigned) wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD) {
537 ret = -EINVAL;
538 goto bail;
539 } else if (wr->opcode >= IB_WR_ATOMIC_CMP_AND_SWP &&
540 (wr->num_sge == 0 ||
541 wr->sg_list[0].length < sizeof(u64) ||
542 wr->sg_list[0].addr & (sizeof(u64) - 1))) {
543 ret = -EINVAL;
544 goto bail;
546 /* IB spec says that num_sge == 0 is OK. */
547 if (wr->num_sge > qp->s_max_sge) {
548 ret = -ENOMEM;
549 goto bail;
551 spin_lock_irqsave(&qp->s_lock, flags);
552 next = qp->s_head + 1;
553 if (next >= qp->s_size)
554 next = 0;
555 if (next == qp->s_last) {
556 spin_unlock_irqrestore(&qp->s_lock, flags);
557 ret = -EINVAL;
558 goto bail;
561 wqe = get_swqe_ptr(qp, qp->s_head);
562 wqe->wr = *wr;
563 wqe->ssn = qp->s_ssn++;
564 wqe->sg_list[0].mr = NULL;
565 wqe->sg_list[0].vaddr = NULL;
566 wqe->sg_list[0].length = 0;
567 wqe->sg_list[0].sge_length = 0;
568 wqe->length = 0;
569 acc = wr->opcode >= IB_WR_RDMA_READ ? IB_ACCESS_LOCAL_WRITE : 0;
570 for (i = 0, j = 0; i < wr->num_sge; i++) {
571 if (to_ipd(qp->ibqp.pd)->user && wr->sg_list[i].lkey == 0) {
572 spin_unlock_irqrestore(&qp->s_lock, flags);
573 ret = -EINVAL;
574 goto bail;
576 if (wr->sg_list[i].length == 0)
577 continue;
578 if (!ipath_lkey_ok(&to_idev(qp->ibqp.device)->lk_table,
579 &wqe->sg_list[j], &wr->sg_list[i],
580 acc)) {
581 spin_unlock_irqrestore(&qp->s_lock, flags);
582 ret = -EINVAL;
583 goto bail;
585 wqe->length += wr->sg_list[i].length;
586 j++;
588 wqe->wr.num_sge = j;
589 qp->s_head = next;
590 spin_unlock_irqrestore(&qp->s_lock, flags);
592 ipath_do_ruc_send((unsigned long) qp);
594 ret = 0;
596 bail:
597 return ret;
601 * ipath_make_grh - construct a GRH header
602 * @dev: a pointer to the ipath device
603 * @hdr: a pointer to the GRH header being constructed
604 * @grh: the global route address to send to
605 * @hwords: the number of 32 bit words of header being sent
606 * @nwords: the number of 32 bit words of data being sent
608 * Return the size of the header in 32 bit words.
610 u32 ipath_make_grh(struct ipath_ibdev *dev, struct ib_grh *hdr,
611 struct ib_global_route *grh, u32 hwords, u32 nwords)
613 hdr->version_tclass_flow =
614 cpu_to_be32((6 << 28) |
615 (grh->traffic_class << 20) |
616 grh->flow_label);
617 hdr->paylen = cpu_to_be16((hwords - 2 + nwords + SIZE_OF_CRC) << 2);
618 /* next_hdr is defined by C8-7 in ch. 8.4.1 */
619 hdr->next_hdr = 0x1B;
620 hdr->hop_limit = grh->hop_limit;
621 /* The SGID is 32-bit aligned. */
622 hdr->sgid.global.subnet_prefix = dev->gid_prefix;
623 hdr->sgid.global.interface_id = dev->dd->ipath_guid;
624 hdr->dgid = grh->dgid;
626 /* GRH header size in 32-bit words. */
627 return sizeof(struct ib_grh) / sizeof(u32);
631 * ipath_do_ruc_send - perform a send on an RC or UC QP
632 * @data: contains a pointer to the QP
634 * Process entries in the send work queue until credit or queue is
635 * exhausted. Only allow one CPU to send a packet per QP (tasklet).
636 * Otherwise, after we drop the QP s_lock, two threads could send
637 * packets out of order.
639 void ipath_do_ruc_send(unsigned long data)
641 struct ipath_qp *qp = (struct ipath_qp *)data;
642 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
643 unsigned long flags;
644 u16 lrh0;
645 u32 nwords;
646 u32 extra_bytes;
647 u32 bth0;
648 u32 bth2;
649 u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
650 struct ipath_other_headers *ohdr;
652 if (test_and_set_bit(IPATH_S_BUSY, &qp->s_flags))
653 goto bail;
655 if (unlikely(qp->remote_ah_attr.dlid == dev->dd->ipath_lid)) {
656 ipath_ruc_loopback(qp);
657 goto clear;
660 ohdr = &qp->s_hdr.u.oth;
661 if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
662 ohdr = &qp->s_hdr.u.l.oth;
664 again:
665 /* Check for a constructed packet to be sent. */
666 if (qp->s_hdrwords != 0) {
668 * If no PIO bufs are available, return. An interrupt will
669 * call ipath_ib_piobufavail() when one is available.
671 if (ipath_verbs_send(dev->dd, qp->s_hdrwords,
672 (u32 *) &qp->s_hdr, qp->s_cur_size,
673 qp->s_cur_sge)) {
674 ipath_no_bufs_available(qp, dev);
675 goto bail;
677 dev->n_unicast_xmit++;
678 /* Record that we sent the packet and s_hdr is empty. */
679 qp->s_hdrwords = 0;
683 * The lock is needed to synchronize between setting
684 * qp->s_ack_state, resend timer, and post_send().
686 spin_lock_irqsave(&qp->s_lock, flags);
688 /* Sending responses has higher priority over sending requests. */
689 if (qp->s_ack_state != IB_OPCODE_RC_ACKNOWLEDGE &&
690 (bth0 = ipath_make_rc_ack(qp, ohdr, pmtu)) != 0)
691 bth2 = qp->s_ack_psn++ & IPATH_PSN_MASK;
692 else if (!((qp->ibqp.qp_type == IB_QPT_RC) ?
693 ipath_make_rc_req(qp, ohdr, pmtu, &bth0, &bth2) :
694 ipath_make_uc_req(qp, ohdr, pmtu, &bth0, &bth2))) {
696 * Clear the busy bit before unlocking to avoid races with
697 * adding new work queue items and then failing to process
698 * them.
700 clear_bit(IPATH_S_BUSY, &qp->s_flags);
701 spin_unlock_irqrestore(&qp->s_lock, flags);
702 goto bail;
705 spin_unlock_irqrestore(&qp->s_lock, flags);
707 /* Construct the header. */
708 extra_bytes = (4 - qp->s_cur_size) & 3;
709 nwords = (qp->s_cur_size + extra_bytes) >> 2;
710 lrh0 = IPATH_LRH_BTH;
711 if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
712 qp->s_hdrwords += ipath_make_grh(dev, &qp->s_hdr.u.l.grh,
713 &qp->remote_ah_attr.grh,
714 qp->s_hdrwords, nwords);
715 lrh0 = IPATH_LRH_GRH;
717 lrh0 |= qp->remote_ah_attr.sl << 4;
718 qp->s_hdr.lrh[0] = cpu_to_be16(lrh0);
719 qp->s_hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
720 qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords +
721 SIZE_OF_CRC);
722 qp->s_hdr.lrh[3] = cpu_to_be16(dev->dd->ipath_lid);
723 bth0 |= ipath_get_pkey(dev->dd, qp->s_pkey_index);
724 bth0 |= extra_bytes << 20;
725 ohdr->bth[0] = cpu_to_be32(bth0);
726 ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
727 ohdr->bth[2] = cpu_to_be32(bth2);
729 /* Check for more work to do. */
730 goto again;
732 clear:
733 clear_bit(IPATH_S_BUSY, &qp->s_flags);
734 bail:
735 return;