2 * Copyright(c) 2015, 2016 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
49 #include <rdma/rdma_vt.h>
50 #include <rdma/rdmavt_qp.h>
54 #include "verbs_txreq.h"
57 /* cut down ridiculously long IB macro names */
58 #define OP(x) RC_OP(x)
61 * hfi1_add_retry_timer - add/start a retry timer
64 * add a retry timer on the QP
66 static inline void hfi1_add_retry_timer(struct rvt_qp
*qp
)
68 struct ib_qp
*ibqp
= &qp
->ibqp
;
69 struct rvt_dev_info
*rdi
= ib_to_rvt(ibqp
->device
);
71 lockdep_assert_held(&qp
->s_lock
);
72 qp
->s_flags
|= RVT_S_TIMER
;
73 /* 4.096 usec. * (1 << qp->timeout) */
74 qp
->s_timer
.expires
= jiffies
+ qp
->timeout_jiffies
+
76 add_timer(&qp
->s_timer
);
80 * hfi1_add_rnr_timer - add/start an rnr timer
82 * @to - timeout in usecs
84 * add an rnr timer on the QP
86 void hfi1_add_rnr_timer(struct rvt_qp
*qp
, u32 to
)
88 struct hfi1_qp_priv
*priv
= qp
->priv
;
90 lockdep_assert_held(&qp
->s_lock
);
91 qp
->s_flags
|= RVT_S_WAIT_RNR
;
92 qp
->s_timer
.expires
= jiffies
+ usecs_to_jiffies(to
);
93 add_timer(&priv
->s_rnr_timer
);
97 * hfi1_mod_retry_timer - mod a retry timer
100 * Modify a potentially already running retry
103 static inline void hfi1_mod_retry_timer(struct rvt_qp
*qp
)
105 struct ib_qp
*ibqp
= &qp
->ibqp
;
106 struct rvt_dev_info
*rdi
= ib_to_rvt(ibqp
->device
);
108 lockdep_assert_held(&qp
->s_lock
);
109 qp
->s_flags
|= RVT_S_TIMER
;
110 /* 4.096 usec. * (1 << qp->timeout) */
111 mod_timer(&qp
->s_timer
, jiffies
+ qp
->timeout_jiffies
+
116 * hfi1_stop_retry_timer - stop a retry timer
119 * stop a retry timer and return if the timer
122 static inline int hfi1_stop_retry_timer(struct rvt_qp
*qp
)
126 lockdep_assert_held(&qp
->s_lock
);
127 /* Remove QP from retry */
128 if (qp
->s_flags
& RVT_S_TIMER
) {
129 qp
->s_flags
&= ~RVT_S_TIMER
;
130 rval
= del_timer(&qp
->s_timer
);
136 * hfi1_stop_rc_timers - stop all timers
139 * stop any pending timers
141 void hfi1_stop_rc_timers(struct rvt_qp
*qp
)
143 struct hfi1_qp_priv
*priv
= qp
->priv
;
145 lockdep_assert_held(&qp
->s_lock
);
146 /* Remove QP from all timers */
147 if (qp
->s_flags
& (RVT_S_TIMER
| RVT_S_WAIT_RNR
)) {
148 qp
->s_flags
&= ~(RVT_S_TIMER
| RVT_S_WAIT_RNR
);
149 del_timer(&qp
->s_timer
);
150 del_timer(&priv
->s_rnr_timer
);
155 * hfi1_stop_rnr_timer - stop an rnr timer
158 * stop an rnr timer and return if the timer
161 static inline int hfi1_stop_rnr_timer(struct rvt_qp
*qp
)
164 struct hfi1_qp_priv
*priv
= qp
->priv
;
166 lockdep_assert_held(&qp
->s_lock
);
167 /* Remove QP from rnr timer */
168 if (qp
->s_flags
& RVT_S_WAIT_RNR
) {
169 qp
->s_flags
&= ~RVT_S_WAIT_RNR
;
170 rval
= del_timer(&priv
->s_rnr_timer
);
176 * hfi1_del_timers_sync - wait for any timeout routines to exit
179 void hfi1_del_timers_sync(struct rvt_qp
*qp
)
181 struct hfi1_qp_priv
*priv
= qp
->priv
;
183 del_timer_sync(&qp
->s_timer
);
184 del_timer_sync(&priv
->s_rnr_timer
);
187 static u32
restart_sge(struct rvt_sge_state
*ss
, struct rvt_swqe
*wqe
,
192 len
= delta_psn(psn
, wqe
->psn
) * pmtu
;
193 ss
->sge
= wqe
->sg_list
[0];
194 ss
->sg_list
= wqe
->sg_list
+ 1;
195 ss
->num_sge
= wqe
->wr
.num_sge
;
196 ss
->total_len
= wqe
->length
;
197 hfi1_skip_sge(ss
, len
, 0);
198 return wqe
->length
- len
;
202 * make_rc_ack - construct a response packet (ACK, NAK, or RDMA read)
203 * @dev: the device for this QP
204 * @qp: a pointer to the QP
205 * @ohdr: a pointer to the IB header being constructed
206 * @ps: the xmit packet state
208 * Return 1 if constructed; otherwise, return 0.
209 * Note that we are in the responder's side of the QP context.
210 * Note the QP s_lock must be held.
212 static int make_rc_ack(struct hfi1_ibdev
*dev
, struct rvt_qp
*qp
,
213 struct ib_other_headers
*ohdr
,
214 struct hfi1_pkt_state
*ps
)
216 struct rvt_ack_entry
*e
;
223 struct hfi1_qp_priv
*priv
= qp
->priv
;
225 lockdep_assert_held(&qp
->s_lock
);
226 /* Don't send an ACK if we aren't supposed to. */
227 if (!(ib_rvt_state_ops
[qp
->state
] & RVT_PROCESS_RECV_OK
))
230 /* header size in 32-bit words LRH+BTH = (8+12)/4. */
233 switch (qp
->s_ack_state
) {
234 case OP(RDMA_READ_RESPONSE_LAST
):
235 case OP(RDMA_READ_RESPONSE_ONLY
):
236 e
= &qp
->s_ack_queue
[qp
->s_tail_ack_queue
];
237 if (e
->rdma_sge
.mr
) {
238 rvt_put_mr(e
->rdma_sge
.mr
);
239 e
->rdma_sge
.mr
= NULL
;
242 case OP(ATOMIC_ACKNOWLEDGE
):
244 * We can increment the tail pointer now that the last
245 * response has been sent instead of only being
248 if (++qp
->s_tail_ack_queue
> HFI1_MAX_RDMA_ATOMIC
)
249 qp
->s_tail_ack_queue
= 0;
252 case OP(ACKNOWLEDGE
):
253 /* Check for no next entry in the queue. */
254 if (qp
->r_head_ack_queue
== qp
->s_tail_ack_queue
) {
255 if (qp
->s_flags
& RVT_S_ACK_PENDING
)
260 e
= &qp
->s_ack_queue
[qp
->s_tail_ack_queue
];
261 if (e
->opcode
== OP(RDMA_READ_REQUEST
)) {
263 * If a RDMA read response is being resent and
264 * we haven't seen the duplicate request yet,
265 * then stop sending the remaining responses the
266 * responder has seen until the requester re-sends it.
268 len
= e
->rdma_sge
.sge_length
;
269 if (len
&& !e
->rdma_sge
.mr
) {
270 qp
->s_tail_ack_queue
= qp
->r_head_ack_queue
;
273 /* Copy SGE state in case we need to resend */
274 ps
->s_txreq
->mr
= e
->rdma_sge
.mr
;
276 rvt_get_mr(ps
->s_txreq
->mr
);
277 qp
->s_ack_rdma_sge
.sge
= e
->rdma_sge
;
278 qp
->s_ack_rdma_sge
.num_sge
= 1;
279 ps
->s_txreq
->ss
= &qp
->s_ack_rdma_sge
;
282 qp
->s_ack_state
= OP(RDMA_READ_RESPONSE_FIRST
);
284 qp
->s_ack_state
= OP(RDMA_READ_RESPONSE_ONLY
);
287 ohdr
->u
.aeth
= hfi1_compute_aeth(qp
);
289 qp
->s_ack_rdma_psn
= e
->psn
;
290 bth2
= mask_psn(qp
->s_ack_rdma_psn
++);
292 /* COMPARE_SWAP or FETCH_ADD */
293 ps
->s_txreq
->ss
= NULL
;
295 qp
->s_ack_state
= OP(ATOMIC_ACKNOWLEDGE
);
296 ohdr
->u
.at
.aeth
= hfi1_compute_aeth(qp
);
297 ib_u64_put(e
->atomic_data
, &ohdr
->u
.at
.atomic_ack_eth
);
298 hwords
+= sizeof(ohdr
->u
.at
) / sizeof(u32
);
299 bth2
= mask_psn(e
->psn
);
302 bth0
= qp
->s_ack_state
<< 24;
305 case OP(RDMA_READ_RESPONSE_FIRST
):
306 qp
->s_ack_state
= OP(RDMA_READ_RESPONSE_MIDDLE
);
308 case OP(RDMA_READ_RESPONSE_MIDDLE
):
309 ps
->s_txreq
->ss
= &qp
->s_ack_rdma_sge
;
310 ps
->s_txreq
->mr
= qp
->s_ack_rdma_sge
.sge
.mr
;
312 rvt_get_mr(ps
->s_txreq
->mr
);
313 len
= qp
->s_ack_rdma_sge
.sge
.sge_length
;
316 middle
= HFI1_CAP_IS_KSET(SDMA_AHG
);
318 ohdr
->u
.aeth
= hfi1_compute_aeth(qp
);
320 qp
->s_ack_state
= OP(RDMA_READ_RESPONSE_LAST
);
321 e
= &qp
->s_ack_queue
[qp
->s_tail_ack_queue
];
324 bth0
= qp
->s_ack_state
<< 24;
325 bth2
= mask_psn(qp
->s_ack_rdma_psn
++);
331 * Send a regular ACK.
332 * Set the s_ack_state so we wait until after sending
333 * the ACK before setting s_ack_state to ACKNOWLEDGE
336 qp
->s_ack_state
= OP(SEND_ONLY
);
337 qp
->s_flags
&= ~RVT_S_ACK_PENDING
;
338 ps
->s_txreq
->ss
= NULL
;
341 cpu_to_be32((qp
->r_msn
& HFI1_MSN_MASK
) |
343 HFI1_AETH_CREDIT_SHIFT
));
345 ohdr
->u
.aeth
= hfi1_compute_aeth(qp
);
348 bth0
= OP(ACKNOWLEDGE
) << 24;
349 bth2
= mask_psn(qp
->s_ack_psn
);
351 qp
->s_rdma_ack_cnt
++;
352 qp
->s_hdrwords
= hwords
;
353 ps
->s_txreq
->sde
= priv
->s_sde
;
354 ps
->s_txreq
->s_cur_size
= len
;
355 hfi1_make_ruc_header(qp
, ohdr
, bth0
, bth2
, middle
, ps
);
357 ps
->s_txreq
->hdr_dwords
= qp
->s_hdrwords
+ 2;
361 qp
->s_ack_state
= OP(ACKNOWLEDGE
);
363 * Ensure s_rdma_ack_cnt changes are committed prior to resetting
367 qp
->s_flags
&= ~(RVT_S_RESP_PENDING
374 * hfi1_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC)
375 * @qp: a pointer to the QP
377 * Assumes s_lock is held.
379 * Return 1 if constructed; otherwise, return 0.
381 int hfi1_make_rc_req(struct rvt_qp
*qp
, struct hfi1_pkt_state
*ps
)
383 struct hfi1_qp_priv
*priv
= qp
->priv
;
384 struct hfi1_ibdev
*dev
= to_idev(qp
->ibqp
.device
);
385 struct ib_other_headers
*ohdr
;
386 struct rvt_sge_state
*ss
;
387 struct rvt_swqe
*wqe
;
388 /* header size in 32-bit words LRH+BTH = (8+12)/4. */
398 lockdep_assert_held(&qp
->s_lock
);
399 ps
->s_txreq
= get_txreq(ps
->dev
, qp
);
400 if (IS_ERR(ps
->s_txreq
))
403 ohdr
= &ps
->s_txreq
->phdr
.hdr
.u
.oth
;
404 if (qp
->remote_ah_attr
.ah_flags
& IB_AH_GRH
)
405 ohdr
= &ps
->s_txreq
->phdr
.hdr
.u
.l
.oth
;
407 /* Sending responses has higher priority over sending requests. */
408 if ((qp
->s_flags
& RVT_S_RESP_PENDING
) &&
409 make_rc_ack(dev
, qp
, ohdr
, ps
))
412 if (!(ib_rvt_state_ops
[qp
->state
] & RVT_PROCESS_SEND_OK
)) {
413 if (!(ib_rvt_state_ops
[qp
->state
] & RVT_FLUSH_SEND
))
415 /* We are in the error state, flush the work request. */
416 smp_read_barrier_depends(); /* see post_one_send() */
417 if (qp
->s_last
== ACCESS_ONCE(qp
->s_head
))
419 /* If DMAs are in progress, we can't flush immediately. */
420 if (iowait_sdma_pending(&priv
->s_iowait
)) {
421 qp
->s_flags
|= RVT_S_WAIT_DMA
;
425 wqe
= rvt_get_swqe_ptr(qp
, qp
->s_last
);
426 hfi1_send_complete(qp
, wqe
, qp
->s_last
!= qp
->s_acked
?
427 IB_WC_SUCCESS
: IB_WC_WR_FLUSH_ERR
);
428 /* will get called again */
432 if (qp
->s_flags
& (RVT_S_WAIT_RNR
| RVT_S_WAIT_ACK
))
435 if (cmp_psn(qp
->s_psn
, qp
->s_sending_hpsn
) <= 0) {
436 if (cmp_psn(qp
->s_sending_psn
, qp
->s_sending_hpsn
) <= 0) {
437 qp
->s_flags
|= RVT_S_WAIT_PSN
;
440 qp
->s_sending_psn
= qp
->s_psn
;
441 qp
->s_sending_hpsn
= qp
->s_psn
- 1;
444 /* Send a request. */
445 wqe
= rvt_get_swqe_ptr(qp
, qp
->s_cur
);
446 switch (qp
->s_state
) {
448 if (!(ib_rvt_state_ops
[qp
->state
] & RVT_PROCESS_NEXT_SEND_OK
))
451 * Resend an old request or start a new one.
453 * We keep track of the current SWQE so that
454 * we don't reset the "furthest progress" state
455 * if we need to back up.
458 if (qp
->s_cur
== qp
->s_tail
) {
459 /* Check if send work queue is empty. */
460 if (qp
->s_tail
== qp
->s_head
) {
465 * If a fence is requested, wait for previous
466 * RDMA read and atomic operations to finish.
468 if ((wqe
->wr
.send_flags
& IB_SEND_FENCE
) &&
469 qp
->s_num_rd_atomic
) {
470 qp
->s_flags
|= RVT_S_WAIT_FENCE
;
474 * Local operations are processed immediately
475 * after all prior requests have completed
477 if (wqe
->wr
.opcode
== IB_WR_REG_MR
||
478 wqe
->wr
.opcode
== IB_WR_LOCAL_INV
) {
482 if (qp
->s_last
!= qp
->s_cur
)
484 if (++qp
->s_cur
== qp
->s_size
)
486 if (++qp
->s_tail
== qp
->s_size
)
488 if (!(wqe
->wr
.send_flags
&
489 RVT_SEND_COMPLETION_ONLY
)) {
490 err
= rvt_invalidate_rkey(
492 wqe
->wr
.ex
.invalidate_rkey
);
495 hfi1_send_complete(qp
, wqe
,
496 err
? IB_WC_LOC_PROT_ERR
499 atomic_dec(&qp
->local_ops_pending
);
505 qp
->s_psn
= wqe
->psn
;
508 * Note that we have to be careful not to modify the
509 * original work request since we may need to resend
514 bth2
= mask_psn(qp
->s_psn
);
515 switch (wqe
->wr
.opcode
) {
517 case IB_WR_SEND_WITH_IMM
:
518 case IB_WR_SEND_WITH_INV
:
519 /* If no credit, return. */
520 if (!(qp
->s_flags
& RVT_S_UNLIMITED_CREDIT
) &&
521 cmp_msn(wqe
->ssn
, qp
->s_lsn
+ 1) > 0) {
522 qp
->s_flags
|= RVT_S_WAIT_SSN_CREDIT
;
526 qp
->s_state
= OP(SEND_FIRST
);
530 if (wqe
->wr
.opcode
== IB_WR_SEND
) {
531 qp
->s_state
= OP(SEND_ONLY
);
532 } else if (wqe
->wr
.opcode
== IB_WR_SEND_WITH_IMM
) {
533 qp
->s_state
= OP(SEND_ONLY_WITH_IMMEDIATE
);
534 /* Immediate data comes after the BTH */
535 ohdr
->u
.imm_data
= wqe
->wr
.ex
.imm_data
;
538 qp
->s_state
= OP(SEND_ONLY_WITH_INVALIDATE
);
539 /* Invalidate rkey comes after the BTH */
540 ohdr
->u
.ieth
= cpu_to_be32(
541 wqe
->wr
.ex
.invalidate_rkey
);
544 if (wqe
->wr
.send_flags
& IB_SEND_SOLICITED
)
545 bth0
|= IB_BTH_SOLICITED
;
546 bth2
|= IB_BTH_REQ_ACK
;
547 if (++qp
->s_cur
== qp
->s_size
)
551 case IB_WR_RDMA_WRITE
:
552 if (newreq
&& !(qp
->s_flags
& RVT_S_UNLIMITED_CREDIT
))
555 case IB_WR_RDMA_WRITE_WITH_IMM
:
556 /* If no credit, return. */
557 if (!(qp
->s_flags
& RVT_S_UNLIMITED_CREDIT
) &&
558 cmp_msn(wqe
->ssn
, qp
->s_lsn
+ 1) > 0) {
559 qp
->s_flags
|= RVT_S_WAIT_SSN_CREDIT
;
563 wqe
->rdma_wr
.remote_addr
,
565 ohdr
->u
.rc
.reth
.rkey
=
566 cpu_to_be32(wqe
->rdma_wr
.rkey
);
567 ohdr
->u
.rc
.reth
.length
= cpu_to_be32(len
);
568 hwords
+= sizeof(struct ib_reth
) / sizeof(u32
);
570 qp
->s_state
= OP(RDMA_WRITE_FIRST
);
574 if (wqe
->wr
.opcode
== IB_WR_RDMA_WRITE
) {
575 qp
->s_state
= OP(RDMA_WRITE_ONLY
);
578 OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE
);
579 /* Immediate data comes after RETH */
580 ohdr
->u
.rc
.imm_data
= wqe
->wr
.ex
.imm_data
;
582 if (wqe
->wr
.send_flags
& IB_SEND_SOLICITED
)
583 bth0
|= IB_BTH_SOLICITED
;
585 bth2
|= IB_BTH_REQ_ACK
;
586 if (++qp
->s_cur
== qp
->s_size
)
590 case IB_WR_RDMA_READ
:
592 * Don't allow more operations to be started
593 * than the QP limits allow.
596 if (qp
->s_num_rd_atomic
>=
597 qp
->s_max_rd_atomic
) {
598 qp
->s_flags
|= RVT_S_WAIT_RDMAR
;
601 qp
->s_num_rd_atomic
++;
602 if (!(qp
->s_flags
& RVT_S_UNLIMITED_CREDIT
))
606 wqe
->rdma_wr
.remote_addr
,
608 ohdr
->u
.rc
.reth
.rkey
=
609 cpu_to_be32(wqe
->rdma_wr
.rkey
);
610 ohdr
->u
.rc
.reth
.length
= cpu_to_be32(len
);
611 qp
->s_state
= OP(RDMA_READ_REQUEST
);
612 hwords
+= sizeof(ohdr
->u
.rc
.reth
) / sizeof(u32
);
615 bth2
|= IB_BTH_REQ_ACK
;
616 if (++qp
->s_cur
== qp
->s_size
)
620 case IB_WR_ATOMIC_CMP_AND_SWP
:
621 case IB_WR_ATOMIC_FETCH_AND_ADD
:
623 * Don't allow more operations to be started
624 * than the QP limits allow.
627 if (qp
->s_num_rd_atomic
>=
628 qp
->s_max_rd_atomic
) {
629 qp
->s_flags
|= RVT_S_WAIT_RDMAR
;
632 qp
->s_num_rd_atomic
++;
633 if (!(qp
->s_flags
& RVT_S_UNLIMITED_CREDIT
))
636 if (wqe
->wr
.opcode
== IB_WR_ATOMIC_CMP_AND_SWP
) {
637 qp
->s_state
= OP(COMPARE_SWAP
);
638 put_ib_ateth_swap(wqe
->atomic_wr
.swap
,
639 &ohdr
->u
.atomic_eth
);
640 put_ib_ateth_compare(wqe
->atomic_wr
.compare_add
,
641 &ohdr
->u
.atomic_eth
);
643 qp
->s_state
= OP(FETCH_ADD
);
644 put_ib_ateth_swap(wqe
->atomic_wr
.compare_add
,
645 &ohdr
->u
.atomic_eth
);
646 put_ib_ateth_compare(0, &ohdr
->u
.atomic_eth
);
648 put_ib_ateth_vaddr(wqe
->atomic_wr
.remote_addr
,
649 &ohdr
->u
.atomic_eth
);
650 ohdr
->u
.atomic_eth
.rkey
= cpu_to_be32(
651 wqe
->atomic_wr
.rkey
);
652 hwords
+= sizeof(struct ib_atomic_eth
) / sizeof(u32
);
655 bth2
|= IB_BTH_REQ_ACK
;
656 if (++qp
->s_cur
== qp
->s_size
)
663 qp
->s_sge
.sge
= wqe
->sg_list
[0];
664 qp
->s_sge
.sg_list
= wqe
->sg_list
+ 1;
665 qp
->s_sge
.num_sge
= wqe
->wr
.num_sge
;
666 qp
->s_sge
.total_len
= wqe
->length
;
667 qp
->s_len
= wqe
->length
;
670 if (qp
->s_tail
>= qp
->s_size
)
673 if (wqe
->wr
.opcode
== IB_WR_RDMA_READ
)
674 qp
->s_psn
= wqe
->lpsn
+ 1;
679 case OP(RDMA_READ_RESPONSE_FIRST
):
681 * qp->s_state is normally set to the opcode of the
682 * last packet constructed for new requests and therefore
683 * is never set to RDMA read response.
684 * RDMA_READ_RESPONSE_FIRST is used by the ACK processing
685 * thread to indicate a SEND needs to be restarted from an
686 * earlier PSN without interfering with the sending thread.
689 qp
->s_len
= restart_sge(&qp
->s_sge
, wqe
, qp
->s_psn
, pmtu
);
692 qp
->s_state
= OP(SEND_MIDDLE
);
694 case OP(SEND_MIDDLE
):
695 bth2
= mask_psn(qp
->s_psn
++);
700 middle
= HFI1_CAP_IS_KSET(SDMA_AHG
);
703 if (wqe
->wr
.opcode
== IB_WR_SEND
) {
704 qp
->s_state
= OP(SEND_LAST
);
705 } else if (wqe
->wr
.opcode
== IB_WR_SEND_WITH_IMM
) {
706 qp
->s_state
= OP(SEND_LAST_WITH_IMMEDIATE
);
707 /* Immediate data comes after the BTH */
708 ohdr
->u
.imm_data
= wqe
->wr
.ex
.imm_data
;
711 qp
->s_state
= OP(SEND_LAST_WITH_INVALIDATE
);
712 /* invalidate data comes after the BTH */
713 ohdr
->u
.ieth
= cpu_to_be32(wqe
->wr
.ex
.invalidate_rkey
);
716 if (wqe
->wr
.send_flags
& IB_SEND_SOLICITED
)
717 bth0
|= IB_BTH_SOLICITED
;
718 bth2
|= IB_BTH_REQ_ACK
;
720 if (qp
->s_cur
>= qp
->s_size
)
724 case OP(RDMA_READ_RESPONSE_LAST
):
726 * qp->s_state is normally set to the opcode of the
727 * last packet constructed for new requests and therefore
728 * is never set to RDMA read response.
729 * RDMA_READ_RESPONSE_LAST is used by the ACK processing
730 * thread to indicate a RDMA write needs to be restarted from
731 * an earlier PSN without interfering with the sending thread.
734 qp
->s_len
= restart_sge(&qp
->s_sge
, wqe
, qp
->s_psn
, pmtu
);
736 case OP(RDMA_WRITE_FIRST
):
737 qp
->s_state
= OP(RDMA_WRITE_MIDDLE
);
739 case OP(RDMA_WRITE_MIDDLE
):
740 bth2
= mask_psn(qp
->s_psn
++);
745 middle
= HFI1_CAP_IS_KSET(SDMA_AHG
);
748 if (wqe
->wr
.opcode
== IB_WR_RDMA_WRITE
) {
749 qp
->s_state
= OP(RDMA_WRITE_LAST
);
751 qp
->s_state
= OP(RDMA_WRITE_LAST_WITH_IMMEDIATE
);
752 /* Immediate data comes after the BTH */
753 ohdr
->u
.imm_data
= wqe
->wr
.ex
.imm_data
;
755 if (wqe
->wr
.send_flags
& IB_SEND_SOLICITED
)
756 bth0
|= IB_BTH_SOLICITED
;
758 bth2
|= IB_BTH_REQ_ACK
;
760 if (qp
->s_cur
>= qp
->s_size
)
764 case OP(RDMA_READ_RESPONSE_MIDDLE
):
766 * qp->s_state is normally set to the opcode of the
767 * last packet constructed for new requests and therefore
768 * is never set to RDMA read response.
769 * RDMA_READ_RESPONSE_MIDDLE is used by the ACK processing
770 * thread to indicate a RDMA read needs to be restarted from
771 * an earlier PSN without interfering with the sending thread.
774 len
= (delta_psn(qp
->s_psn
, wqe
->psn
)) * pmtu
;
776 wqe
->rdma_wr
.remote_addr
+ len
,
778 ohdr
->u
.rc
.reth
.rkey
=
779 cpu_to_be32(wqe
->rdma_wr
.rkey
);
780 ohdr
->u
.rc
.reth
.length
= cpu_to_be32(wqe
->length
- len
);
781 qp
->s_state
= OP(RDMA_READ_REQUEST
);
782 hwords
+= sizeof(ohdr
->u
.rc
.reth
) / sizeof(u32
);
783 bth2
= mask_psn(qp
->s_psn
) | IB_BTH_REQ_ACK
;
784 qp
->s_psn
= wqe
->lpsn
+ 1;
788 if (qp
->s_cur
== qp
->s_size
)
792 qp
->s_sending_hpsn
= bth2
;
793 delta
= delta_psn(bth2
, wqe
->psn
);
794 if (delta
&& delta
% HFI1_PSN_CREDIT
== 0)
795 bth2
|= IB_BTH_REQ_ACK
;
796 if (qp
->s_flags
& RVT_S_SEND_ONE
) {
797 qp
->s_flags
&= ~RVT_S_SEND_ONE
;
798 qp
->s_flags
|= RVT_S_WAIT_ACK
;
799 bth2
|= IB_BTH_REQ_ACK
;
802 qp
->s_hdrwords
= hwords
;
803 ps
->s_txreq
->sde
= priv
->s_sde
;
804 ps
->s_txreq
->ss
= ss
;
805 ps
->s_txreq
->s_cur_size
= len
;
806 hfi1_make_ruc_header(
809 bth0
| (qp
->s_state
<< 24),
814 ps
->s_txreq
->hdr_dwords
= qp
->s_hdrwords
+ 2;
818 hfi1_put_txreq(ps
->s_txreq
);
823 hfi1_put_txreq(ps
->s_txreq
);
827 qp
->s_flags
&= ~RVT_S_BUSY
;
833 * hfi1_send_rc_ack - Construct an ACK packet and send it
834 * @qp: a pointer to the QP
836 * This is called from hfi1_rc_rcv() and handle_receive_interrupt().
837 * Note that RDMA reads and atomics are handled in the
838 * send side QP state and send engine.
840 void hfi1_send_rc_ack(struct hfi1_ctxtdata
*rcd
, struct rvt_qp
*qp
,
843 struct hfi1_ibport
*ibp
= to_iport(qp
->ibqp
.device
, qp
->port_num
);
844 struct hfi1_pportdata
*ppd
= ppd_from_ibp(ibp
);
845 u64 pbc
, pbc_flags
= 0;
851 struct send_context
*sc
;
852 struct pio_buf
*pbuf
;
853 struct ib_header hdr
;
854 struct ib_other_headers
*ohdr
;
857 /* Don't send ACK or NAK if a RDMA read or atomic is pending. */
858 if (qp
->s_flags
& RVT_S_RESP_PENDING
)
861 /* Ensure s_rdma_ack_cnt changes are committed */
862 smp_read_barrier_depends();
863 if (qp
->s_rdma_ack_cnt
)
866 /* Construct the header */
867 /* header size in 32-bit words LRH+BTH+AETH = (8+12+4)/4 */
869 if (unlikely(qp
->remote_ah_attr
.ah_flags
& IB_AH_GRH
)) {
870 hwords
+= hfi1_make_grh(ibp
, &hdr
.u
.l
.grh
,
871 &qp
->remote_ah_attr
.grh
, hwords
, 0);
878 /* read pkey_index w/o lock (its atomic) */
879 bth0
= hfi1_get_pkey(ibp
, qp
->s_pkey_index
) | (OP(ACKNOWLEDGE
) << 24);
880 if (qp
->s_mig_state
== IB_MIG_MIGRATED
)
881 bth0
|= IB_BTH_MIG_REQ
;
883 ohdr
->u
.aeth
= cpu_to_be32((qp
->r_msn
& HFI1_MSN_MASK
) |
885 HFI1_AETH_CREDIT_SHIFT
));
887 ohdr
->u
.aeth
= hfi1_compute_aeth(qp
);
888 sc5
= ibp
->sl_to_sc
[qp
->remote_ah_attr
.sl
];
889 /* set PBC_DC_INFO bit (aka SC[4]) in pbc_flags */
890 pbc_flags
|= ((!!(sc5
& 0x10)) << PBC_DC_INFO_SHIFT
);
891 lrh0
|= (sc5
& 0xf) << 12 | (qp
->remote_ah_attr
.sl
& 0xf) << 4;
892 hdr
.lrh
[0] = cpu_to_be16(lrh0
);
893 hdr
.lrh
[1] = cpu_to_be16(qp
->remote_ah_attr
.dlid
);
894 hdr
.lrh
[2] = cpu_to_be16(hwords
+ SIZE_OF_CRC
);
895 hdr
.lrh
[3] = cpu_to_be16(ppd
->lid
| qp
->remote_ah_attr
.src_path_bits
);
896 ohdr
->bth
[0] = cpu_to_be32(bth0
);
897 ohdr
->bth
[1] = cpu_to_be32(qp
->remote_qpn
);
898 ohdr
->bth
[1] |= cpu_to_be32((!!is_fecn
) << HFI1_BECN_SHIFT
);
899 ohdr
->bth
[2] = cpu_to_be32(mask_psn(qp
->r_ack_psn
));
901 /* Don't try to send ACKs if the link isn't ACTIVE */
902 if (driver_lstate(ppd
) != IB_PORT_ACTIVE
)
906 plen
= 2 /* PBC */ + hwords
;
907 vl
= sc_to_vlt(ppd
->dd
, sc5
);
908 pbc
= create_pbc(ppd
, pbc_flags
, qp
->srate_mbps
, vl
, plen
);
910 pbuf
= sc_buffer_alloc(sc
, plen
, NULL
, NULL
);
913 * We have no room to send at the moment. Pass
914 * responsibility for sending the ACK to the send engine
915 * so that when enough buffer space becomes available,
916 * the ACK is sent ahead of other outgoing packets.
921 trace_ack_output_ibhdr(dd_from_ibdev(qp
->ibqp
.device
), &hdr
);
923 /* write the pbc and data */
924 ppd
->dd
->pio_inline_send(ppd
->dd
, pbuf
, pbc
, &hdr
, hwords
);
929 spin_lock_irqsave(&qp
->s_lock
, flags
);
930 if (!(ib_rvt_state_ops
[qp
->state
] & RVT_PROCESS_RECV_OK
))
932 this_cpu_inc(*ibp
->rvp
.rc_qacks
);
933 qp
->s_flags
|= RVT_S_ACK_PENDING
| RVT_S_RESP_PENDING
;
934 qp
->s_nak_state
= qp
->r_nak_state
;
935 qp
->s_ack_psn
= qp
->r_ack_psn
;
937 qp
->s_flags
|= RVT_S_ECN
;
939 /* Schedule the send engine. */
940 hfi1_schedule_send(qp
);
942 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
946 * reset_psn - reset the QP state to send starting from PSN
948 * @psn: the packet sequence number to restart at
950 * This is called from hfi1_rc_rcv() to process an incoming RC ACK
952 * Called at interrupt level with the QP s_lock held.
954 static void reset_psn(struct rvt_qp
*qp
, u32 psn
)
957 struct rvt_swqe
*wqe
= rvt_get_swqe_ptr(qp
, n
);
960 lockdep_assert_held(&qp
->s_lock
);
964 * If we are starting the request from the beginning,
965 * let the normal send code handle initialization.
967 if (cmp_psn(psn
, wqe
->psn
) <= 0) {
968 qp
->s_state
= OP(SEND_LAST
);
972 /* Find the work request opcode corresponding to the given PSN. */
973 opcode
= wqe
->wr
.opcode
;
977 if (++n
== qp
->s_size
)
981 wqe
= rvt_get_swqe_ptr(qp
, n
);
982 diff
= cmp_psn(psn
, wqe
->psn
);
987 * If we are starting the request from the beginning,
988 * let the normal send code handle initialization.
991 qp
->s_state
= OP(SEND_LAST
);
994 opcode
= wqe
->wr
.opcode
;
998 * Set the state to restart in the middle of a request.
999 * Don't change the s_sge, s_cur_sge, or s_cur_size.
1000 * See hfi1_make_rc_req().
1004 case IB_WR_SEND_WITH_IMM
:
1005 qp
->s_state
= OP(RDMA_READ_RESPONSE_FIRST
);
1008 case IB_WR_RDMA_WRITE
:
1009 case IB_WR_RDMA_WRITE_WITH_IMM
:
1010 qp
->s_state
= OP(RDMA_READ_RESPONSE_LAST
);
1013 case IB_WR_RDMA_READ
:
1014 qp
->s_state
= OP(RDMA_READ_RESPONSE_MIDDLE
);
1019 * This case shouldn't happen since its only
1022 qp
->s_state
= OP(SEND_LAST
);
1027 * Set RVT_S_WAIT_PSN as rc_complete() may start the timer
1028 * asynchronously before the send engine can get scheduled.
1029 * Doing it in hfi1_make_rc_req() is too late.
1031 if ((cmp_psn(qp
->s_psn
, qp
->s_sending_hpsn
) <= 0) &&
1032 (cmp_psn(qp
->s_sending_psn
, qp
->s_sending_hpsn
) <= 0))
1033 qp
->s_flags
|= RVT_S_WAIT_PSN
;
1034 qp
->s_flags
&= ~RVT_S_AHG_VALID
;
1038 * Back up requester to resend the last un-ACKed request.
1039 * The QP r_lock and s_lock should be held and interrupts disabled.
1041 static void restart_rc(struct rvt_qp
*qp
, u32 psn
, int wait
)
1043 struct rvt_swqe
*wqe
= rvt_get_swqe_ptr(qp
, qp
->s_acked
);
1044 struct hfi1_ibport
*ibp
;
1046 lockdep_assert_held(&qp
->r_lock
);
1047 lockdep_assert_held(&qp
->s_lock
);
1048 if (qp
->s_retry
== 0) {
1049 if (qp
->s_mig_state
== IB_MIG_ARMED
) {
1050 hfi1_migrate_qp(qp
);
1051 qp
->s_retry
= qp
->s_retry_cnt
;
1052 } else if (qp
->s_last
== qp
->s_acked
) {
1053 hfi1_send_complete(qp
, wqe
, IB_WC_RETRY_EXC_ERR
);
1054 rvt_error_qp(qp
, IB_WC_WR_FLUSH_ERR
);
1056 } else { /* need to handle delayed completion */
1063 ibp
= to_iport(qp
->ibqp
.device
, qp
->port_num
);
1064 if (wqe
->wr
.opcode
== IB_WR_RDMA_READ
)
1065 ibp
->rvp
.n_rc_resends
++;
1067 ibp
->rvp
.n_rc_resends
+= delta_psn(qp
->s_psn
, psn
);
1069 qp
->s_flags
&= ~(RVT_S_WAIT_FENCE
| RVT_S_WAIT_RDMAR
|
1070 RVT_S_WAIT_SSN_CREDIT
| RVT_S_WAIT_PSN
|
1073 qp
->s_flags
|= RVT_S_SEND_ONE
;
1078 * This is called from s_timer for missing responses.
1080 void hfi1_rc_timeout(unsigned long arg
)
1082 struct rvt_qp
*qp
= (struct rvt_qp
*)arg
;
1083 struct hfi1_ibport
*ibp
;
1084 unsigned long flags
;
1086 spin_lock_irqsave(&qp
->r_lock
, flags
);
1087 spin_lock(&qp
->s_lock
);
1088 if (qp
->s_flags
& RVT_S_TIMER
) {
1089 ibp
= to_iport(qp
->ibqp
.device
, qp
->port_num
);
1090 ibp
->rvp
.n_rc_timeouts
++;
1091 qp
->s_flags
&= ~RVT_S_TIMER
;
1092 del_timer(&qp
->s_timer
);
1093 trace_hfi1_timeout(qp
, qp
->s_last_psn
+ 1);
1094 restart_rc(qp
, qp
->s_last_psn
+ 1, 1);
1095 hfi1_schedule_send(qp
);
1097 spin_unlock(&qp
->s_lock
);
1098 spin_unlock_irqrestore(&qp
->r_lock
, flags
);
1102 * This is called from s_timer for RNR timeouts.
1104 void hfi1_rc_rnr_retry(unsigned long arg
)
1106 struct rvt_qp
*qp
= (struct rvt_qp
*)arg
;
1107 unsigned long flags
;
1109 spin_lock_irqsave(&qp
->s_lock
, flags
);
1110 hfi1_stop_rnr_timer(qp
);
1111 hfi1_schedule_send(qp
);
1112 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
1116 * Set qp->s_sending_psn to the next PSN after the given one.
1117 * This would be psn+1 except when RDMA reads are present.
1119 static void reset_sending_psn(struct rvt_qp
*qp
, u32 psn
)
1121 struct rvt_swqe
*wqe
;
1124 lockdep_assert_held(&qp
->s_lock
);
1125 /* Find the work request corresponding to the given PSN. */
1127 wqe
= rvt_get_swqe_ptr(qp
, n
);
1128 if (cmp_psn(psn
, wqe
->lpsn
) <= 0) {
1129 if (wqe
->wr
.opcode
== IB_WR_RDMA_READ
)
1130 qp
->s_sending_psn
= wqe
->lpsn
+ 1;
1132 qp
->s_sending_psn
= psn
+ 1;
1135 if (++n
== qp
->s_size
)
1137 if (n
== qp
->s_tail
)
1143 * This should be called with the QP s_lock held and interrupts disabled.
1145 void hfi1_rc_send_complete(struct rvt_qp
*qp
, struct ib_header
*hdr
)
1147 struct ib_other_headers
*ohdr
;
1148 struct rvt_swqe
*wqe
;
1153 lockdep_assert_held(&qp
->s_lock
);
1154 if (!(ib_rvt_state_ops
[qp
->state
] & RVT_PROCESS_OR_FLUSH_SEND
))
1157 /* Find out where the BTH is */
1158 if ((be16_to_cpu(hdr
->lrh
[0]) & 3) == HFI1_LRH_BTH
)
1161 ohdr
= &hdr
->u
.l
.oth
;
1163 opcode
= be32_to_cpu(ohdr
->bth
[0]) >> 24;
1164 if (opcode
>= OP(RDMA_READ_RESPONSE_FIRST
) &&
1165 opcode
<= OP(ATOMIC_ACKNOWLEDGE
)) {
1166 WARN_ON(!qp
->s_rdma_ack_cnt
);
1167 qp
->s_rdma_ack_cnt
--;
1171 psn
= be32_to_cpu(ohdr
->bth
[2]);
1172 reset_sending_psn(qp
, psn
);
1175 * Start timer after a packet requesting an ACK has been sent and
1176 * there are still requests that haven't been acked.
1178 if ((psn
& IB_BTH_REQ_ACK
) && qp
->s_acked
!= qp
->s_tail
&&
1180 (RVT_S_TIMER
| RVT_S_WAIT_RNR
| RVT_S_WAIT_PSN
)) &&
1181 (ib_rvt_state_ops
[qp
->state
] & RVT_PROCESS_RECV_OK
))
1182 hfi1_add_retry_timer(qp
);
1184 while (qp
->s_last
!= qp
->s_acked
) {
1187 wqe
= rvt_get_swqe_ptr(qp
, qp
->s_last
);
1188 if (cmp_psn(wqe
->lpsn
, qp
->s_sending_psn
) >= 0 &&
1189 cmp_psn(qp
->s_sending_psn
, qp
->s_sending_hpsn
) <= 0)
1191 s_last
= qp
->s_last
;
1192 if (++s_last
>= qp
->s_size
)
1194 qp
->s_last
= s_last
;
1195 /* see post_send() */
1197 for (i
= 0; i
< wqe
->wr
.num_sge
; i
++) {
1198 struct rvt_sge
*sge
= &wqe
->sg_list
[i
];
1200 rvt_put_mr(sge
->mr
);
1202 rvt_qp_swqe_complete(qp
, wqe
, IB_WC_SUCCESS
);
1205 * If we were waiting for sends to complete before re-sending,
1206 * and they are now complete, restart sending.
1208 trace_hfi1_sendcomplete(qp
, psn
);
1209 if (qp
->s_flags
& RVT_S_WAIT_PSN
&&
1210 cmp_psn(qp
->s_sending_psn
, qp
->s_sending_hpsn
) > 0) {
1211 qp
->s_flags
&= ~RVT_S_WAIT_PSN
;
1212 qp
->s_sending_psn
= qp
->s_psn
;
1213 qp
->s_sending_hpsn
= qp
->s_psn
- 1;
1214 hfi1_schedule_send(qp
);
1218 static inline void update_last_psn(struct rvt_qp
*qp
, u32 psn
)
1220 qp
->s_last_psn
= psn
;
1224 * Generate a SWQE completion.
1225 * This is similar to hfi1_send_complete but has to check to be sure
1226 * that the SGEs are not being referenced if the SWQE is being resent.
1228 static struct rvt_swqe
*do_rc_completion(struct rvt_qp
*qp
,
1229 struct rvt_swqe
*wqe
,
1230 struct hfi1_ibport
*ibp
)
1234 lockdep_assert_held(&qp
->s_lock
);
1236 * Don't decrement refcount and don't generate a
1237 * completion if the SWQE is being resent until the send
1240 if (cmp_psn(wqe
->lpsn
, qp
->s_sending_psn
) < 0 ||
1241 cmp_psn(qp
->s_sending_psn
, qp
->s_sending_hpsn
) > 0) {
1244 for (i
= 0; i
< wqe
->wr
.num_sge
; i
++) {
1245 struct rvt_sge
*sge
= &wqe
->sg_list
[i
];
1247 rvt_put_mr(sge
->mr
);
1249 s_last
= qp
->s_last
;
1250 if (++s_last
>= qp
->s_size
)
1252 qp
->s_last
= s_last
;
1253 /* see post_send() */
1255 rvt_qp_swqe_complete(qp
, wqe
, IB_WC_SUCCESS
);
1257 struct hfi1_pportdata
*ppd
= ppd_from_ibp(ibp
);
1259 this_cpu_inc(*ibp
->rvp
.rc_delayed_comp
);
1261 * If send progress not running attempt to progress
1264 if (ppd
->dd
->flags
& HFI1_HAS_SEND_DMA
) {
1265 struct sdma_engine
*engine
;
1268 /* For now use sc to find engine */
1269 sc5
= ibp
->sl_to_sc
[qp
->remote_ah_attr
.sl
];
1270 engine
= qp_to_sdma_engine(qp
, sc5
);
1271 sdma_engine_progress_schedule(engine
);
1275 qp
->s_retry
= qp
->s_retry_cnt
;
1276 update_last_psn(qp
, wqe
->lpsn
);
1279 * If we are completing a request which is in the process of
1280 * being resent, we can stop re-sending it since we know the
1281 * responder has already seen it.
1283 if (qp
->s_acked
== qp
->s_cur
) {
1284 if (++qp
->s_cur
>= qp
->s_size
)
1286 qp
->s_acked
= qp
->s_cur
;
1287 wqe
= rvt_get_swqe_ptr(qp
, qp
->s_cur
);
1288 if (qp
->s_acked
!= qp
->s_tail
) {
1289 qp
->s_state
= OP(SEND_LAST
);
1290 qp
->s_psn
= wqe
->psn
;
1293 if (++qp
->s_acked
>= qp
->s_size
)
1295 if (qp
->state
== IB_QPS_SQD
&& qp
->s_acked
== qp
->s_cur
)
1297 wqe
= rvt_get_swqe_ptr(qp
, qp
->s_acked
);
1303 * do_rc_ack - process an incoming RC ACK
1304 * @qp: the QP the ACK came in on
1305 * @psn: the packet sequence number of the ACK
1306 * @opcode: the opcode of the request that resulted in the ACK
1308 * This is called from rc_rcv_resp() to process an incoming RC ACK
1310 * May be called at interrupt level, with the QP s_lock held.
1311 * Returns 1 if OK, 0 if current operation should be aborted (NAK).
1313 static int do_rc_ack(struct rvt_qp
*qp
, u32 aeth
, u32 psn
, int opcode
,
1314 u64 val
, struct hfi1_ctxtdata
*rcd
)
1316 struct hfi1_ibport
*ibp
;
1317 enum ib_wc_status status
;
1318 struct rvt_swqe
*wqe
;
1324 lockdep_assert_held(&qp
->s_lock
);
1326 * Note that NAKs implicitly ACK outstanding SEND and RDMA write
1327 * requests and implicitly NAK RDMA read and atomic requests issued
1328 * before the NAK'ed request. The MSN won't include the NAK'ed
1329 * request but will include an ACK'ed request(s).
1334 wqe
= rvt_get_swqe_ptr(qp
, qp
->s_acked
);
1335 ibp
= to_iport(qp
->ibqp
.device
, qp
->port_num
);
1338 * The MSN might be for a later WQE than the PSN indicates so
1339 * only complete WQEs that the PSN finishes.
1341 while ((diff
= delta_psn(ack_psn
, wqe
->lpsn
)) >= 0) {
1343 * RDMA_READ_RESPONSE_ONLY is a special case since
1344 * we want to generate completion events for everything
1345 * before the RDMA read, copy the data, then generate
1346 * the completion for the read.
1348 if (wqe
->wr
.opcode
== IB_WR_RDMA_READ
&&
1349 opcode
== OP(RDMA_READ_RESPONSE_ONLY
) &&
1355 * If this request is a RDMA read or atomic, and the ACK is
1356 * for a later operation, this ACK NAKs the RDMA read or
1357 * atomic. In other words, only a RDMA_READ_LAST or ONLY
1358 * can ACK a RDMA read and likewise for atomic ops. Note
1359 * that the NAK case can only happen if relaxed ordering is
1360 * used and requests are sent after an RDMA read or atomic
1361 * is sent but before the response is received.
1363 if ((wqe
->wr
.opcode
== IB_WR_RDMA_READ
&&
1364 (opcode
!= OP(RDMA_READ_RESPONSE_LAST
) || diff
!= 0)) ||
1365 ((wqe
->wr
.opcode
== IB_WR_ATOMIC_CMP_AND_SWP
||
1366 wqe
->wr
.opcode
== IB_WR_ATOMIC_FETCH_AND_ADD
) &&
1367 (opcode
!= OP(ATOMIC_ACKNOWLEDGE
) || diff
!= 0))) {
1368 /* Retry this request. */
1369 if (!(qp
->r_flags
& RVT_R_RDMAR_SEQ
)) {
1370 qp
->r_flags
|= RVT_R_RDMAR_SEQ
;
1371 restart_rc(qp
, qp
->s_last_psn
+ 1, 0);
1372 if (list_empty(&qp
->rspwait
)) {
1373 qp
->r_flags
|= RVT_R_RSP_SEND
;
1375 list_add_tail(&qp
->rspwait
,
1376 &rcd
->qp_wait_list
);
1380 * No need to process the ACK/NAK since we are
1381 * restarting an earlier request.
1385 if (wqe
->wr
.opcode
== IB_WR_ATOMIC_CMP_AND_SWP
||
1386 wqe
->wr
.opcode
== IB_WR_ATOMIC_FETCH_AND_ADD
) {
1387 u64
*vaddr
= wqe
->sg_list
[0].vaddr
;
1390 if (qp
->s_num_rd_atomic
&&
1391 (wqe
->wr
.opcode
== IB_WR_RDMA_READ
||
1392 wqe
->wr
.opcode
== IB_WR_ATOMIC_CMP_AND_SWP
||
1393 wqe
->wr
.opcode
== IB_WR_ATOMIC_FETCH_AND_ADD
)) {
1394 qp
->s_num_rd_atomic
--;
1395 /* Restart sending task if fence is complete */
1396 if ((qp
->s_flags
& RVT_S_WAIT_FENCE
) &&
1397 !qp
->s_num_rd_atomic
) {
1398 qp
->s_flags
&= ~(RVT_S_WAIT_FENCE
|
1400 hfi1_schedule_send(qp
);
1401 } else if (qp
->s_flags
& RVT_S_WAIT_RDMAR
) {
1402 qp
->s_flags
&= ~(RVT_S_WAIT_RDMAR
|
1404 hfi1_schedule_send(qp
);
1407 wqe
= do_rc_completion(qp
, wqe
, ibp
);
1408 if (qp
->s_acked
== qp
->s_tail
)
1412 switch (aeth
>> 29) {
1414 this_cpu_inc(*ibp
->rvp
.rc_acks
);
1415 if (qp
->s_acked
!= qp
->s_tail
) {
1417 * We are expecting more ACKs so
1418 * mod the retry timer.
1420 hfi1_mod_retry_timer(qp
);
1422 * We can stop re-sending the earlier packets and
1423 * continue with the next packet the receiver wants.
1425 if (cmp_psn(qp
->s_psn
, psn
) <= 0)
1426 reset_psn(qp
, psn
+ 1);
1428 /* No more acks - kill all timers */
1429 hfi1_stop_rc_timers(qp
);
1430 if (cmp_psn(qp
->s_psn
, psn
) <= 0) {
1431 qp
->s_state
= OP(SEND_LAST
);
1432 qp
->s_psn
= psn
+ 1;
1435 if (qp
->s_flags
& RVT_S_WAIT_ACK
) {
1436 qp
->s_flags
&= ~RVT_S_WAIT_ACK
;
1437 hfi1_schedule_send(qp
);
1439 hfi1_get_credit(qp
, aeth
);
1440 qp
->s_rnr_retry
= qp
->s_rnr_retry_cnt
;
1441 qp
->s_retry
= qp
->s_retry_cnt
;
1442 update_last_psn(qp
, psn
);
1445 case 1: /* RNR NAK */
1446 ibp
->rvp
.n_rnr_naks
++;
1447 if (qp
->s_acked
== qp
->s_tail
)
1449 if (qp
->s_flags
& RVT_S_WAIT_RNR
)
1451 if (qp
->s_rnr_retry
== 0) {
1452 status
= IB_WC_RNR_RETRY_EXC_ERR
;
1455 if (qp
->s_rnr_retry_cnt
< 7)
1458 /* The last valid PSN is the previous PSN. */
1459 update_last_psn(qp
, psn
- 1);
1461 ibp
->rvp
.n_rc_resends
+= delta_psn(qp
->s_psn
, psn
);
1465 qp
->s_flags
&= ~(RVT_S_WAIT_SSN_CREDIT
| RVT_S_WAIT_ACK
);
1466 hfi1_stop_rc_timers(qp
);
1468 ib_hfi1_rnr_table
[(aeth
>> HFI1_AETH_CREDIT_SHIFT
) &
1469 HFI1_AETH_CREDIT_MASK
];
1470 hfi1_add_rnr_timer(qp
, to
);
1474 if (qp
->s_acked
== qp
->s_tail
)
1476 /* The last valid PSN is the previous PSN. */
1477 update_last_psn(qp
, psn
- 1);
1478 switch ((aeth
>> HFI1_AETH_CREDIT_SHIFT
) &
1479 HFI1_AETH_CREDIT_MASK
) {
1480 case 0: /* PSN sequence error */
1481 ibp
->rvp
.n_seq_naks
++;
1483 * Back up to the responder's expected PSN.
1484 * Note that we might get a NAK in the middle of an
1485 * RDMA READ response which terminates the RDMA
1488 restart_rc(qp
, psn
, 0);
1489 hfi1_schedule_send(qp
);
1492 case 1: /* Invalid Request */
1493 status
= IB_WC_REM_INV_REQ_ERR
;
1494 ibp
->rvp
.n_other_naks
++;
1497 case 2: /* Remote Access Error */
1498 status
= IB_WC_REM_ACCESS_ERR
;
1499 ibp
->rvp
.n_other_naks
++;
1502 case 3: /* Remote Operation Error */
1503 status
= IB_WC_REM_OP_ERR
;
1504 ibp
->rvp
.n_other_naks
++;
1506 if (qp
->s_last
== qp
->s_acked
) {
1507 hfi1_send_complete(qp
, wqe
, status
);
1508 rvt_error_qp(qp
, IB_WC_WR_FLUSH_ERR
);
1513 /* Ignore other reserved NAK error codes */
1516 qp
->s_retry
= qp
->s_retry_cnt
;
1517 qp
->s_rnr_retry
= qp
->s_rnr_retry_cnt
;
1520 default: /* 2: reserved */
1522 /* Ignore reserved NAK codes. */
1525 /* cannot be reached */
1527 hfi1_stop_rc_timers(qp
);
1532 * We have seen an out of sequence RDMA read middle or last packet.
1533 * This ACKs SENDs and RDMA writes up to the first RDMA read or atomic SWQE.
1535 static void rdma_seq_err(struct rvt_qp
*qp
, struct hfi1_ibport
*ibp
, u32 psn
,
1536 struct hfi1_ctxtdata
*rcd
)
1538 struct rvt_swqe
*wqe
;
1540 lockdep_assert_held(&qp
->s_lock
);
1541 /* Remove QP from retry timer */
1542 hfi1_stop_rc_timers(qp
);
1544 wqe
= rvt_get_swqe_ptr(qp
, qp
->s_acked
);
1546 while (cmp_psn(psn
, wqe
->lpsn
) > 0) {
1547 if (wqe
->wr
.opcode
== IB_WR_RDMA_READ
||
1548 wqe
->wr
.opcode
== IB_WR_ATOMIC_CMP_AND_SWP
||
1549 wqe
->wr
.opcode
== IB_WR_ATOMIC_FETCH_AND_ADD
)
1551 wqe
= do_rc_completion(qp
, wqe
, ibp
);
1554 ibp
->rvp
.n_rdma_seq
++;
1555 qp
->r_flags
|= RVT_R_RDMAR_SEQ
;
1556 restart_rc(qp
, qp
->s_last_psn
+ 1, 0);
1557 if (list_empty(&qp
->rspwait
)) {
1558 qp
->r_flags
|= RVT_R_RSP_SEND
;
1560 list_add_tail(&qp
->rspwait
, &rcd
->qp_wait_list
);
1565 * rc_rcv_resp - process an incoming RC response packet
1566 * @ibp: the port this packet came in on
1567 * @ohdr: the other headers for this packet
1568 * @data: the packet data
1569 * @tlen: the packet length
1570 * @qp: the QP for this packet
1571 * @opcode: the opcode for this packet
1572 * @psn: the packet sequence number for this packet
1573 * @hdrsize: the header length
1574 * @pmtu: the path MTU
1576 * This is called from hfi1_rc_rcv() to process an incoming RC response
1577 * packet for the given QP.
1578 * Called at interrupt level.
1580 static void rc_rcv_resp(struct hfi1_ibport
*ibp
,
1581 struct ib_other_headers
*ohdr
,
1582 void *data
, u32 tlen
, struct rvt_qp
*qp
,
1583 u32 opcode
, u32 psn
, u32 hdrsize
, u32 pmtu
,
1584 struct hfi1_ctxtdata
*rcd
)
1586 struct rvt_swqe
*wqe
;
1587 enum ib_wc_status status
;
1588 unsigned long flags
;
1594 spin_lock_irqsave(&qp
->s_lock
, flags
);
1596 trace_hfi1_ack(qp
, psn
);
1598 /* Ignore invalid responses. */
1599 smp_read_barrier_depends(); /* see post_one_send */
1600 if (cmp_psn(psn
, ACCESS_ONCE(qp
->s_next_psn
)) >= 0)
1603 /* Ignore duplicate responses. */
1604 diff
= cmp_psn(psn
, qp
->s_last_psn
);
1605 if (unlikely(diff
<= 0)) {
1606 /* Update credits for "ghost" ACKs */
1607 if (diff
== 0 && opcode
== OP(ACKNOWLEDGE
)) {
1608 aeth
= be32_to_cpu(ohdr
->u
.aeth
);
1609 if ((aeth
>> 29) == 0)
1610 hfi1_get_credit(qp
, aeth
);
1616 * Skip everything other than the PSN we expect, if we are waiting
1617 * for a reply to a restarted RDMA read or atomic op.
1619 if (qp
->r_flags
& RVT_R_RDMAR_SEQ
) {
1620 if (cmp_psn(psn
, qp
->s_last_psn
+ 1) != 0)
1622 qp
->r_flags
&= ~RVT_R_RDMAR_SEQ
;
1625 if (unlikely(qp
->s_acked
== qp
->s_tail
))
1627 wqe
= rvt_get_swqe_ptr(qp
, qp
->s_acked
);
1628 status
= IB_WC_SUCCESS
;
1631 case OP(ACKNOWLEDGE
):
1632 case OP(ATOMIC_ACKNOWLEDGE
):
1633 case OP(RDMA_READ_RESPONSE_FIRST
):
1634 aeth
= be32_to_cpu(ohdr
->u
.aeth
);
1635 if (opcode
== OP(ATOMIC_ACKNOWLEDGE
))
1636 val
= ib_u64_get(&ohdr
->u
.at
.atomic_ack_eth
);
1639 if (!do_rc_ack(qp
, aeth
, psn
, opcode
, val
, rcd
) ||
1640 opcode
!= OP(RDMA_READ_RESPONSE_FIRST
))
1642 wqe
= rvt_get_swqe_ptr(qp
, qp
->s_acked
);
1643 if (unlikely(wqe
->wr
.opcode
!= IB_WR_RDMA_READ
))
1646 * If this is a response to a resent RDMA read, we
1647 * have to be careful to copy the data to the right
1650 qp
->s_rdma_read_len
= restart_sge(&qp
->s_rdma_read_sge
,
1654 case OP(RDMA_READ_RESPONSE_MIDDLE
):
1655 /* no AETH, no ACK */
1656 if (unlikely(cmp_psn(psn
, qp
->s_last_psn
+ 1)))
1658 if (unlikely(wqe
->wr
.opcode
!= IB_WR_RDMA_READ
))
1661 if (unlikely(tlen
!= (hdrsize
+ pmtu
+ 4)))
1663 if (unlikely(pmtu
>= qp
->s_rdma_read_len
))
1667 * We got a response so update the timeout.
1668 * 4.096 usec. * (1 << qp->timeout)
1670 qp
->s_flags
|= RVT_S_TIMER
;
1671 mod_timer(&qp
->s_timer
, jiffies
+ qp
->timeout_jiffies
);
1672 if (qp
->s_flags
& RVT_S_WAIT_ACK
) {
1673 qp
->s_flags
&= ~RVT_S_WAIT_ACK
;
1674 hfi1_schedule_send(qp
);
1677 if (opcode
== OP(RDMA_READ_RESPONSE_MIDDLE
))
1678 qp
->s_retry
= qp
->s_retry_cnt
;
1681 * Update the RDMA receive state but do the copy w/o
1682 * holding the locks and blocking interrupts.
1684 qp
->s_rdma_read_len
-= pmtu
;
1685 update_last_psn(qp
, psn
);
1686 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
1687 hfi1_copy_sge(&qp
->s_rdma_read_sge
, data
, pmtu
, 0, 0);
1690 case OP(RDMA_READ_RESPONSE_ONLY
):
1691 aeth
= be32_to_cpu(ohdr
->u
.aeth
);
1692 if (!do_rc_ack(qp
, aeth
, psn
, opcode
, 0, rcd
))
1694 /* Get the number of bytes the message was padded by. */
1695 pad
= (be32_to_cpu(ohdr
->bth
[0]) >> 20) & 3;
1697 * Check that the data size is >= 0 && <= pmtu.
1698 * Remember to account for ICRC (4).
1700 if (unlikely(tlen
< (hdrsize
+ pad
+ 4)))
1703 * If this is a response to a resent RDMA read, we
1704 * have to be careful to copy the data to the right
1707 wqe
= rvt_get_swqe_ptr(qp
, qp
->s_acked
);
1708 qp
->s_rdma_read_len
= restart_sge(&qp
->s_rdma_read_sge
,
1712 case OP(RDMA_READ_RESPONSE_LAST
):
1713 /* ACKs READ req. */
1714 if (unlikely(cmp_psn(psn
, qp
->s_last_psn
+ 1)))
1716 if (unlikely(wqe
->wr
.opcode
!= IB_WR_RDMA_READ
))
1718 /* Get the number of bytes the message was padded by. */
1719 pad
= (be32_to_cpu(ohdr
->bth
[0]) >> 20) & 3;
1721 * Check that the data size is >= 1 && <= pmtu.
1722 * Remember to account for ICRC (4).
1724 if (unlikely(tlen
<= (hdrsize
+ pad
+ 4)))
1727 tlen
-= hdrsize
+ pad
+ 4;
1728 if (unlikely(tlen
!= qp
->s_rdma_read_len
))
1730 aeth
= be32_to_cpu(ohdr
->u
.aeth
);
1731 hfi1_copy_sge(&qp
->s_rdma_read_sge
, data
, tlen
, 0, 0);
1732 WARN_ON(qp
->s_rdma_read_sge
.num_sge
);
1733 (void)do_rc_ack(qp
, aeth
, psn
,
1734 OP(RDMA_READ_RESPONSE_LAST
), 0, rcd
);
1739 status
= IB_WC_LOC_QP_OP_ERR
;
1743 rdma_seq_err(qp
, ibp
, psn
, rcd
);
1747 status
= IB_WC_LOC_LEN_ERR
;
1749 if (qp
->s_last
== qp
->s_acked
) {
1750 hfi1_send_complete(qp
, wqe
, status
);
1751 rvt_error_qp(qp
, IB_WC_WR_FLUSH_ERR
);
1754 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
1759 static inline void rc_defered_ack(struct hfi1_ctxtdata
*rcd
,
1762 if (list_empty(&qp
->rspwait
)) {
1763 qp
->r_flags
|= RVT_R_RSP_NAK
;
1765 list_add_tail(&qp
->rspwait
, &rcd
->qp_wait_list
);
1769 static inline void rc_cancel_ack(struct rvt_qp
*qp
)
1771 struct hfi1_qp_priv
*priv
= qp
->priv
;
1773 priv
->r_adefered
= 0;
1774 if (list_empty(&qp
->rspwait
))
1776 list_del_init(&qp
->rspwait
);
1777 qp
->r_flags
&= ~RVT_R_RSP_NAK
;
1782 * rc_rcv_error - process an incoming duplicate or error RC packet
1783 * @ohdr: the other headers for this packet
1784 * @data: the packet data
1785 * @qp: the QP for this packet
1786 * @opcode: the opcode for this packet
1787 * @psn: the packet sequence number for this packet
1788 * @diff: the difference between the PSN and the expected PSN
1790 * This is called from hfi1_rc_rcv() to process an unexpected
1791 * incoming RC packet for the given QP.
1792 * Called at interrupt level.
1793 * Return 1 if no more processing is needed; otherwise return 0 to
1794 * schedule a response to be sent.
1796 static noinline
int rc_rcv_error(struct ib_other_headers
*ohdr
, void *data
,
1797 struct rvt_qp
*qp
, u32 opcode
, u32 psn
,
1798 int diff
, struct hfi1_ctxtdata
*rcd
)
1800 struct hfi1_ibport
*ibp
= to_iport(qp
->ibqp
.device
, qp
->port_num
);
1801 struct rvt_ack_entry
*e
;
1802 unsigned long flags
;
1806 trace_hfi1_rcv_error(qp
, psn
);
1809 * Packet sequence error.
1810 * A NAK will ACK earlier sends and RDMA writes.
1811 * Don't queue the NAK if we already sent one.
1813 if (!qp
->r_nak_state
) {
1814 ibp
->rvp
.n_rc_seqnak
++;
1815 qp
->r_nak_state
= IB_NAK_PSN_ERROR
;
1816 /* Use the expected PSN. */
1817 qp
->r_ack_psn
= qp
->r_psn
;
1819 * Wait to send the sequence NAK until all packets
1820 * in the receive queue have been processed.
1821 * Otherwise, we end up propagating congestion.
1823 rc_defered_ack(rcd
, qp
);
1829 * Handle a duplicate request. Don't re-execute SEND, RDMA
1830 * write or atomic op. Don't NAK errors, just silently drop
1831 * the duplicate request. Note that r_sge, r_len, and
1832 * r_rcv_len may be in use so don't modify them.
1834 * We are supposed to ACK the earliest duplicate PSN but we
1835 * can coalesce an outstanding duplicate ACK. We have to
1836 * send the earliest so that RDMA reads can be restarted at
1837 * the requester's expected PSN.
1839 * First, find where this duplicate PSN falls within the
1840 * ACKs previously sent.
1841 * old_req is true if there is an older response that is scheduled
1842 * to be sent before sending this one.
1846 ibp
->rvp
.n_rc_dupreq
++;
1848 spin_lock_irqsave(&qp
->s_lock
, flags
);
1850 for (i
= qp
->r_head_ack_queue
; ; i
= prev
) {
1851 if (i
== qp
->s_tail_ack_queue
)
1856 prev
= HFI1_MAX_RDMA_ATOMIC
;
1857 if (prev
== qp
->r_head_ack_queue
) {
1861 e
= &qp
->s_ack_queue
[prev
];
1866 if (cmp_psn(psn
, e
->psn
) >= 0) {
1867 if (prev
== qp
->s_tail_ack_queue
&&
1868 cmp_psn(psn
, e
->lpsn
) <= 0)
1874 case OP(RDMA_READ_REQUEST
): {
1875 struct ib_reth
*reth
;
1880 * If we didn't find the RDMA read request in the ack queue,
1881 * we can ignore this request.
1883 if (!e
|| e
->opcode
!= OP(RDMA_READ_REQUEST
))
1885 /* RETH comes after BTH */
1886 reth
= &ohdr
->u
.rc
.reth
;
1888 * Address range must be a subset of the original
1889 * request and start on pmtu boundaries.
1890 * We reuse the old ack_queue slot since the requester
1891 * should not back up and request an earlier PSN for the
1894 offset
= delta_psn(psn
, e
->psn
) * qp
->pmtu
;
1895 len
= be32_to_cpu(reth
->length
);
1896 if (unlikely(offset
+ len
!= e
->rdma_sge
.sge_length
))
1898 if (e
->rdma_sge
.mr
) {
1899 rvt_put_mr(e
->rdma_sge
.mr
);
1900 e
->rdma_sge
.mr
= NULL
;
1903 u32 rkey
= be32_to_cpu(reth
->rkey
);
1904 u64 vaddr
= get_ib_reth_vaddr(reth
);
1907 ok
= rvt_rkey_ok(qp
, &e
->rdma_sge
, len
, vaddr
, rkey
,
1908 IB_ACCESS_REMOTE_READ
);
1912 e
->rdma_sge
.vaddr
= NULL
;
1913 e
->rdma_sge
.length
= 0;
1914 e
->rdma_sge
.sge_length
= 0;
1919 qp
->s_tail_ack_queue
= prev
;
1923 case OP(COMPARE_SWAP
):
1924 case OP(FETCH_ADD
): {
1926 * If we didn't find the atomic request in the ack queue
1927 * or the send engine is already backed up to send an
1928 * earlier entry, we can ignore this request.
1930 if (!e
|| e
->opcode
!= (u8
)opcode
|| old_req
)
1932 qp
->s_tail_ack_queue
= prev
;
1938 * Ignore this operation if it doesn't request an ACK
1939 * or an earlier RDMA read or atomic is going to be resent.
1941 if (!(psn
& IB_BTH_REQ_ACK
) || old_req
)
1944 * Resend the most recent ACK if this request is
1945 * after all the previous RDMA reads and atomics.
1947 if (i
== qp
->r_head_ack_queue
) {
1948 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
1949 qp
->r_nak_state
= 0;
1950 qp
->r_ack_psn
= qp
->r_psn
- 1;
1955 * Resend the RDMA read or atomic op which
1956 * ACKs this duplicate request.
1958 qp
->s_tail_ack_queue
= i
;
1961 qp
->s_ack_state
= OP(ACKNOWLEDGE
);
1962 qp
->s_flags
|= RVT_S_RESP_PENDING
;
1963 qp
->r_nak_state
= 0;
1964 hfi1_schedule_send(qp
);
1967 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
1975 void hfi1_rc_error(struct rvt_qp
*qp
, enum ib_wc_status err
)
1977 unsigned long flags
;
1980 spin_lock_irqsave(&qp
->s_lock
, flags
);
1981 lastwqe
= rvt_error_qp(qp
, err
);
1982 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
1987 ev
.device
= qp
->ibqp
.device
;
1988 ev
.element
.qp
= &qp
->ibqp
;
1989 ev
.event
= IB_EVENT_QP_LAST_WQE_REACHED
;
1990 qp
->ibqp
.event_handler(&ev
, qp
->ibqp
.qp_context
);
1994 static inline void update_ack_queue(struct rvt_qp
*qp
, unsigned n
)
1999 if (next
> HFI1_MAX_RDMA_ATOMIC
)
2001 qp
->s_tail_ack_queue
= next
;
2002 qp
->s_ack_state
= OP(ACKNOWLEDGE
);
2005 static void log_cca_event(struct hfi1_pportdata
*ppd
, u8 sl
, u32 rlid
,
2006 u32 lqpn
, u32 rqpn
, u8 svc_type
)
2008 struct opa_hfi1_cong_log_event_internal
*cc_event
;
2009 unsigned long flags
;
2011 if (sl
>= OPA_MAX_SLS
)
2014 spin_lock_irqsave(&ppd
->cc_log_lock
, flags
);
2016 ppd
->threshold_cong_event_map
[sl
/ 8] |= 1 << (sl
% 8);
2017 ppd
->threshold_event_counter
++;
2019 cc_event
= &ppd
->cc_events
[ppd
->cc_log_idx
++];
2020 if (ppd
->cc_log_idx
== OPA_CONG_LOG_ELEMS
)
2021 ppd
->cc_log_idx
= 0;
2022 cc_event
->lqpn
= lqpn
& RVT_QPN_MASK
;
2023 cc_event
->rqpn
= rqpn
& RVT_QPN_MASK
;
2025 cc_event
->svc_type
= svc_type
;
2026 cc_event
->rlid
= rlid
;
2027 /* keep timestamp in units of 1.024 usec */
2028 cc_event
->timestamp
= ktime_to_ns(ktime_get()) / 1024;
2030 spin_unlock_irqrestore(&ppd
->cc_log_lock
, flags
);
2033 void process_becn(struct hfi1_pportdata
*ppd
, u8 sl
, u16 rlid
, u32 lqpn
,
2034 u32 rqpn
, u8 svc_type
)
2036 struct cca_timer
*cca_timer
;
2037 u16 ccti
, ccti_incr
, ccti_timer
, ccti_limit
;
2038 u8 trigger_threshold
;
2039 struct cc_state
*cc_state
;
2040 unsigned long flags
;
2042 if (sl
>= OPA_MAX_SLS
)
2045 cc_state
= get_cc_state(ppd
);
2051 * 1) increase CCTI (for this SL)
2052 * 2) select IPG (i.e., call set_link_ipg())
2055 ccti_limit
= cc_state
->cct
.ccti_limit
;
2056 ccti_incr
= cc_state
->cong_setting
.entries
[sl
].ccti_increase
;
2057 ccti_timer
= cc_state
->cong_setting
.entries
[sl
].ccti_timer
;
2059 cc_state
->cong_setting
.entries
[sl
].trigger_threshold
;
2061 spin_lock_irqsave(&ppd
->cca_timer_lock
, flags
);
2063 cca_timer
= &ppd
->cca_timer
[sl
];
2064 if (cca_timer
->ccti
< ccti_limit
) {
2065 if (cca_timer
->ccti
+ ccti_incr
<= ccti_limit
)
2066 cca_timer
->ccti
+= ccti_incr
;
2068 cca_timer
->ccti
= ccti_limit
;
2072 ccti
= cca_timer
->ccti
;
2074 if (!hrtimer_active(&cca_timer
->hrtimer
)) {
2075 /* ccti_timer is in units of 1.024 usec */
2076 unsigned long nsec
= 1024 * ccti_timer
;
2078 hrtimer_start(&cca_timer
->hrtimer
, ns_to_ktime(nsec
),
2082 spin_unlock_irqrestore(&ppd
->cca_timer_lock
, flags
);
2084 if ((trigger_threshold
!= 0) && (ccti
>= trigger_threshold
))
2085 log_cca_event(ppd
, sl
, rlid
, lqpn
, rqpn
, svc_type
);
2089 * hfi1_rc_rcv - process an incoming RC packet
2090 * @rcd: the context pointer
2091 * @hdr: the header of this packet
2092 * @rcv_flags: flags relevant to rcv processing
2093 * @data: the packet data
2094 * @tlen: the packet length
2095 * @qp: the QP for this packet
2097 * This is called from qp_rcv() to process an incoming RC packet
2099 * May be called at interrupt level.
2101 void hfi1_rc_rcv(struct hfi1_packet
*packet
)
2103 struct hfi1_ctxtdata
*rcd
= packet
->rcd
;
2104 struct ib_header
*hdr
= packet
->hdr
;
2105 u32 rcv_flags
= packet
->rcv_flags
;
2106 void *data
= packet
->ebuf
;
2107 u32 tlen
= packet
->tlen
;
2108 struct rvt_qp
*qp
= packet
->qp
;
2109 struct hfi1_ibport
*ibp
= to_iport(qp
->ibqp
.device
, qp
->port_num
);
2110 struct ib_other_headers
*ohdr
= packet
->ohdr
;
2112 u32 hdrsize
= packet
->hlen
;
2116 u32 pmtu
= qp
->pmtu
;
2118 struct ib_reth
*reth
;
2119 unsigned long flags
;
2120 int ret
, is_fecn
= 0;
2124 lockdep_assert_held(&qp
->r_lock
);
2125 bth0
= be32_to_cpu(ohdr
->bth
[0]);
2126 if (hfi1_ruc_check_hdr(ibp
, hdr
, rcv_flags
& HFI1_HAS_GRH
, qp
, bth0
))
2129 is_fecn
= process_ecn(qp
, packet
, false);
2131 psn
= be32_to_cpu(ohdr
->bth
[2]);
2132 opcode
= (bth0
>> 24) & 0xff;
2135 * Process responses (ACKs) before anything else. Note that the
2136 * packet sequence number will be for something in the send work
2137 * queue rather than the expected receive packet sequence number.
2138 * In other words, this QP is the requester.
2140 if (opcode
>= OP(RDMA_READ_RESPONSE_FIRST
) &&
2141 opcode
<= OP(ATOMIC_ACKNOWLEDGE
)) {
2142 rc_rcv_resp(ibp
, ohdr
, data
, tlen
, qp
, opcode
, psn
,
2143 hdrsize
, pmtu
, rcd
);
2149 /* Compute 24 bits worth of difference. */
2150 diff
= delta_psn(psn
, qp
->r_psn
);
2151 if (unlikely(diff
)) {
2152 if (rc_rcv_error(ohdr
, data
, qp
, opcode
, psn
, diff
, rcd
))
2157 /* Check for opcode sequence errors. */
2158 switch (qp
->r_state
) {
2159 case OP(SEND_FIRST
):
2160 case OP(SEND_MIDDLE
):
2161 if (opcode
== OP(SEND_MIDDLE
) ||
2162 opcode
== OP(SEND_LAST
) ||
2163 opcode
== OP(SEND_LAST_WITH_IMMEDIATE
) ||
2164 opcode
== OP(SEND_LAST_WITH_INVALIDATE
))
2168 case OP(RDMA_WRITE_FIRST
):
2169 case OP(RDMA_WRITE_MIDDLE
):
2170 if (opcode
== OP(RDMA_WRITE_MIDDLE
) ||
2171 opcode
== OP(RDMA_WRITE_LAST
) ||
2172 opcode
== OP(RDMA_WRITE_LAST_WITH_IMMEDIATE
))
2177 if (opcode
== OP(SEND_MIDDLE
) ||
2178 opcode
== OP(SEND_LAST
) ||
2179 opcode
== OP(SEND_LAST_WITH_IMMEDIATE
) ||
2180 opcode
== OP(SEND_LAST_WITH_INVALIDATE
) ||
2181 opcode
== OP(RDMA_WRITE_MIDDLE
) ||
2182 opcode
== OP(RDMA_WRITE_LAST
) ||
2183 opcode
== OP(RDMA_WRITE_LAST_WITH_IMMEDIATE
))
2186 * Note that it is up to the requester to not send a new
2187 * RDMA read or atomic operation before receiving an ACK
2188 * for the previous operation.
2193 if (qp
->state
== IB_QPS_RTR
&& !(qp
->r_flags
& RVT_R_COMM_EST
))
2196 /* OK, process the packet. */
2198 case OP(SEND_FIRST
):
2199 ret
= hfi1_rvt_get_rwqe(qp
, 0);
2206 case OP(SEND_MIDDLE
):
2207 case OP(RDMA_WRITE_MIDDLE
):
2209 /* Check for invalid length PMTU or posted rwqe len. */
2210 if (unlikely(tlen
!= (hdrsize
+ pmtu
+ 4)))
2212 qp
->r_rcv_len
+= pmtu
;
2213 if (unlikely(qp
->r_rcv_len
> qp
->r_len
))
2215 hfi1_copy_sge(&qp
->r_sge
, data
, pmtu
, 1, 0);
2218 case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE
):
2220 ret
= hfi1_rvt_get_rwqe(qp
, 1);
2228 case OP(SEND_ONLY_WITH_IMMEDIATE
):
2229 case OP(SEND_ONLY_WITH_INVALIDATE
):
2230 ret
= hfi1_rvt_get_rwqe(qp
, 0);
2236 if (opcode
== OP(SEND_ONLY
))
2237 goto no_immediate_data
;
2238 if (opcode
== OP(SEND_ONLY_WITH_INVALIDATE
))
2240 /* FALLTHROUGH for SEND_ONLY_WITH_IMMEDIATE */
2241 case OP(SEND_LAST_WITH_IMMEDIATE
):
2243 wc
.ex
.imm_data
= ohdr
->u
.imm_data
;
2244 wc
.wc_flags
= IB_WC_WITH_IMM
;
2246 case OP(SEND_LAST_WITH_INVALIDATE
):
2248 rkey
= be32_to_cpu(ohdr
->u
.ieth
);
2249 if (rvt_invalidate_rkey(qp
, rkey
))
2250 goto no_immediate_data
;
2251 wc
.ex
.invalidate_rkey
= rkey
;
2252 wc
.wc_flags
= IB_WC_WITH_INVALIDATE
;
2254 case OP(RDMA_WRITE_LAST
):
2255 copy_last
= ibpd_to_rvtpd(qp
->ibqp
.pd
)->user
;
2262 /* Get the number of bytes the message was padded by. */
2263 pad
= (bth0
>> 20) & 3;
2264 /* Check for invalid length. */
2265 /* LAST len should be >= 1 */
2266 if (unlikely(tlen
< (hdrsize
+ pad
+ 4)))
2268 /* Don't count the CRC. */
2269 tlen
-= (hdrsize
+ pad
+ 4);
2270 wc
.byte_len
= tlen
+ qp
->r_rcv_len
;
2271 if (unlikely(wc
.byte_len
> qp
->r_len
))
2273 hfi1_copy_sge(&qp
->r_sge
, data
, tlen
, 1, copy_last
);
2274 rvt_put_ss(&qp
->r_sge
);
2276 if (!__test_and_clear_bit(RVT_R_WRID_VALID
, &qp
->r_aflags
))
2278 wc
.wr_id
= qp
->r_wr_id
;
2279 wc
.status
= IB_WC_SUCCESS
;
2280 if (opcode
== OP(RDMA_WRITE_LAST_WITH_IMMEDIATE
) ||
2281 opcode
== OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE
))
2282 wc
.opcode
= IB_WC_RECV_RDMA_WITH_IMM
;
2284 wc
.opcode
= IB_WC_RECV
;
2286 wc
.src_qp
= qp
->remote_qpn
;
2287 wc
.slid
= qp
->remote_ah_attr
.dlid
;
2289 * It seems that IB mandates the presence of an SL in a
2290 * work completion only for the UD transport (see section
2291 * 11.4.2 of IBTA Vol. 1).
2293 * However, the way the SL is chosen below is consistent
2294 * with the way that IB/qib works and is trying avoid
2295 * introducing incompatibilities.
2297 * See also OPA Vol. 1, section 9.7.6, and table 9-17.
2299 wc
.sl
= qp
->remote_ah_attr
.sl
;
2300 /* zero fields that are N/A */
2303 wc
.dlid_path_bits
= 0;
2305 /* Signal completion event if the solicited bit is set. */
2306 rvt_cq_enter(ibcq_to_rvtcq(qp
->ibqp
.recv_cq
), &wc
,
2307 (bth0
& IB_BTH_SOLICITED
) != 0);
2310 case OP(RDMA_WRITE_ONLY
):
2313 case OP(RDMA_WRITE_FIRST
):
2314 case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE
):
2315 if (unlikely(!(qp
->qp_access_flags
& IB_ACCESS_REMOTE_WRITE
)))
2318 reth
= &ohdr
->u
.rc
.reth
;
2319 qp
->r_len
= be32_to_cpu(reth
->length
);
2321 qp
->r_sge
.sg_list
= NULL
;
2322 if (qp
->r_len
!= 0) {
2323 u32 rkey
= be32_to_cpu(reth
->rkey
);
2324 u64 vaddr
= get_ib_reth_vaddr(reth
);
2327 /* Check rkey & NAK */
2328 ok
= rvt_rkey_ok(qp
, &qp
->r_sge
.sge
, qp
->r_len
, vaddr
,
2329 rkey
, IB_ACCESS_REMOTE_WRITE
);
2332 qp
->r_sge
.num_sge
= 1;
2334 qp
->r_sge
.num_sge
= 0;
2335 qp
->r_sge
.sge
.mr
= NULL
;
2336 qp
->r_sge
.sge
.vaddr
= NULL
;
2337 qp
->r_sge
.sge
.length
= 0;
2338 qp
->r_sge
.sge
.sge_length
= 0;
2340 if (opcode
== OP(RDMA_WRITE_FIRST
))
2342 else if (opcode
== OP(RDMA_WRITE_ONLY
))
2343 goto no_immediate_data
;
2344 ret
= hfi1_rvt_get_rwqe(qp
, 1);
2349 wc
.ex
.imm_data
= ohdr
->u
.rc
.imm_data
;
2350 wc
.wc_flags
= IB_WC_WITH_IMM
;
2353 case OP(RDMA_READ_REQUEST
): {
2354 struct rvt_ack_entry
*e
;
2358 if (unlikely(!(qp
->qp_access_flags
& IB_ACCESS_REMOTE_READ
)))
2360 next
= qp
->r_head_ack_queue
+ 1;
2361 /* s_ack_queue is size HFI1_MAX_RDMA_ATOMIC+1 so use > not >= */
2362 if (next
> HFI1_MAX_RDMA_ATOMIC
)
2364 spin_lock_irqsave(&qp
->s_lock
, flags
);
2365 if (unlikely(next
== qp
->s_tail_ack_queue
)) {
2366 if (!qp
->s_ack_queue
[next
].sent
)
2367 goto nack_inv_unlck
;
2368 update_ack_queue(qp
, next
);
2370 e
= &qp
->s_ack_queue
[qp
->r_head_ack_queue
];
2371 if (e
->opcode
== OP(RDMA_READ_REQUEST
) && e
->rdma_sge
.mr
) {
2372 rvt_put_mr(e
->rdma_sge
.mr
);
2373 e
->rdma_sge
.mr
= NULL
;
2375 reth
= &ohdr
->u
.rc
.reth
;
2376 len
= be32_to_cpu(reth
->length
);
2378 u32 rkey
= be32_to_cpu(reth
->rkey
);
2379 u64 vaddr
= get_ib_reth_vaddr(reth
);
2382 /* Check rkey & NAK */
2383 ok
= rvt_rkey_ok(qp
, &e
->rdma_sge
, len
, vaddr
,
2384 rkey
, IB_ACCESS_REMOTE_READ
);
2386 goto nack_acc_unlck
;
2388 * Update the next expected PSN. We add 1 later
2389 * below, so only add the remainder here.
2392 qp
->r_psn
+= (len
- 1) / pmtu
;
2394 e
->rdma_sge
.mr
= NULL
;
2395 e
->rdma_sge
.vaddr
= NULL
;
2396 e
->rdma_sge
.length
= 0;
2397 e
->rdma_sge
.sge_length
= 0;
2402 e
->lpsn
= qp
->r_psn
;
2404 * We need to increment the MSN here instead of when we
2405 * finish sending the result since a duplicate request would
2406 * increment it more than once.
2410 qp
->r_state
= opcode
;
2411 qp
->r_nak_state
= 0;
2412 qp
->r_head_ack_queue
= next
;
2414 /* Schedule the send engine. */
2415 qp
->s_flags
|= RVT_S_RESP_PENDING
;
2416 hfi1_schedule_send(qp
);
2418 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
2424 case OP(COMPARE_SWAP
):
2425 case OP(FETCH_ADD
): {
2426 struct ib_atomic_eth
*ateth
;
2427 struct rvt_ack_entry
*e
;
2434 if (unlikely(!(qp
->qp_access_flags
& IB_ACCESS_REMOTE_ATOMIC
)))
2436 next
= qp
->r_head_ack_queue
+ 1;
2437 if (next
> HFI1_MAX_RDMA_ATOMIC
)
2439 spin_lock_irqsave(&qp
->s_lock
, flags
);
2440 if (unlikely(next
== qp
->s_tail_ack_queue
)) {
2441 if (!qp
->s_ack_queue
[next
].sent
)
2442 goto nack_inv_unlck
;
2443 update_ack_queue(qp
, next
);
2445 e
= &qp
->s_ack_queue
[qp
->r_head_ack_queue
];
2446 if (e
->opcode
== OP(RDMA_READ_REQUEST
) && e
->rdma_sge
.mr
) {
2447 rvt_put_mr(e
->rdma_sge
.mr
);
2448 e
->rdma_sge
.mr
= NULL
;
2450 ateth
= &ohdr
->u
.atomic_eth
;
2451 vaddr
= get_ib_ateth_vaddr(ateth
);
2452 if (unlikely(vaddr
& (sizeof(u64
) - 1)))
2453 goto nack_inv_unlck
;
2454 rkey
= be32_to_cpu(ateth
->rkey
);
2455 /* Check rkey & NAK */
2456 if (unlikely(!rvt_rkey_ok(qp
, &qp
->r_sge
.sge
, sizeof(u64
),
2458 IB_ACCESS_REMOTE_ATOMIC
)))
2459 goto nack_acc_unlck
;
2460 /* Perform atomic OP and save result. */
2461 maddr
= (atomic64_t
*)qp
->r_sge
.sge
.vaddr
;
2462 sdata
= get_ib_ateth_swap(ateth
);
2463 e
->atomic_data
= (opcode
== OP(FETCH_ADD
)) ?
2464 (u64
)atomic64_add_return(sdata
, maddr
) - sdata
:
2465 (u64
)cmpxchg((u64
*)qp
->r_sge
.sge
.vaddr
,
2466 get_ib_ateth_compare(ateth
),
2468 rvt_put_mr(qp
->r_sge
.sge
.mr
);
2469 qp
->r_sge
.num_sge
= 0;
2476 qp
->r_state
= opcode
;
2477 qp
->r_nak_state
= 0;
2478 qp
->r_head_ack_queue
= next
;
2480 /* Schedule the send engine. */
2481 qp
->s_flags
|= RVT_S_RESP_PENDING
;
2482 hfi1_schedule_send(qp
);
2484 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
2491 /* NAK unknown opcodes. */
2495 qp
->r_state
= opcode
;
2496 qp
->r_ack_psn
= psn
;
2497 qp
->r_nak_state
= 0;
2498 /* Send an ACK if requested or required. */
2499 if (psn
& IB_BTH_REQ_ACK
) {
2500 struct hfi1_qp_priv
*priv
= qp
->priv
;
2502 if (packet
->numpkt
== 0) {
2506 if (priv
->r_adefered
>= HFI1_PSN_CREDIT
) {
2510 if (unlikely(is_fecn
)) {
2515 rc_defered_ack(rcd
, qp
);
2520 qp
->r_nak_state
= qp
->r_min_rnr_timer
| IB_RNR_NAK
;
2521 qp
->r_ack_psn
= qp
->r_psn
;
2522 /* Queue RNR NAK for later */
2523 rc_defered_ack(rcd
, qp
);
2527 hfi1_rc_error(qp
, IB_WC_LOC_QP_OP_ERR
);
2528 qp
->r_nak_state
= IB_NAK_REMOTE_OPERATIONAL_ERROR
;
2529 qp
->r_ack_psn
= qp
->r_psn
;
2530 /* Queue NAK for later */
2531 rc_defered_ack(rcd
, qp
);
2535 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
2537 hfi1_rc_error(qp
, IB_WC_LOC_QP_OP_ERR
);
2538 qp
->r_nak_state
= IB_NAK_INVALID_REQUEST
;
2539 qp
->r_ack_psn
= qp
->r_psn
;
2540 /* Queue NAK for later */
2541 rc_defered_ack(rcd
, qp
);
2545 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
2547 hfi1_rc_error(qp
, IB_WC_LOC_PROT_ERR
);
2548 qp
->r_nak_state
= IB_NAK_REMOTE_ACCESS_ERROR
;
2549 qp
->r_ack_psn
= qp
->r_psn
;
2551 hfi1_send_rc_ack(rcd
, qp
, is_fecn
);
2554 void hfi1_rc_hdrerr(
2555 struct hfi1_ctxtdata
*rcd
,
2556 struct ib_header
*hdr
,
2560 int has_grh
= rcv_flags
& HFI1_HAS_GRH
;
2561 struct ib_other_headers
*ohdr
;
2562 struct hfi1_ibport
*ibp
= to_iport(qp
->ibqp
.device
, qp
->port_num
);
2570 ohdr
= &hdr
->u
.l
.oth
;
2572 bth0
= be32_to_cpu(ohdr
->bth
[0]);
2573 if (hfi1_ruc_check_hdr(ibp
, hdr
, has_grh
, qp
, bth0
))
2576 psn
= be32_to_cpu(ohdr
->bth
[2]);
2577 opcode
= (bth0
>> 24) & 0xff;
2579 /* Only deal with RDMA Writes for now */
2580 if (opcode
< IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST
) {
2581 diff
= delta_psn(psn
, qp
->r_psn
);
2582 if (!qp
->r_nak_state
&& diff
>= 0) {
2583 ibp
->rvp
.n_rc_seqnak
++;
2584 qp
->r_nak_state
= IB_NAK_PSN_ERROR
;
2585 /* Use the expected PSN. */
2586 qp
->r_ack_psn
= qp
->r_psn
;
2588 * Wait to send the sequence
2589 * NAK until all packets
2590 * in the receive queue have
2592 * Otherwise, we end up
2593 * propagating congestion.
2595 rc_defered_ack(rcd
, qp
);
2596 } /* Out of sequence NAK */
2597 } /* QP Request NAKs */