2 * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include "iwch_provider.h"
39 static inline int iwch_build_rdma_send(union t3_wr
*wqe
, struct ib_send_wr
*wr
,
47 case IB_WR_SEND_WITH_IMM
:
48 if (wr
->send_flags
& IB_SEND_SOLICITED
)
49 wqe
->send
.rdmaop
= T3_SEND_WITH_SE
;
51 wqe
->send
.rdmaop
= T3_SEND
;
52 wqe
->send
.rem_stag
= 0;
54 #if 0 /* Not currently supported */
55 case TYPE_SEND_INVALIDATE
:
56 case TYPE_SEND_INVALIDATE_IMMEDIATE
:
57 wqe
->send
.rdmaop
= T3_SEND_WITH_INV
;
58 wqe
->send
.rem_stag
= cpu_to_be32(wr
->wr
.rdma
.rkey
);
60 case TYPE_SEND_SE_INVALIDATE
:
61 wqe
->send
.rdmaop
= T3_SEND_WITH_SE_INV
;
62 wqe
->send
.rem_stag
= cpu_to_be32(wr
->wr
.rdma
.rkey
);
68 if (wr
->num_sge
> T3_MAX_SGE
)
70 wqe
->send
.reserved
[0] = 0;
71 wqe
->send
.reserved
[1] = 0;
72 wqe
->send
.reserved
[2] = 0;
73 if (wr
->opcode
== IB_WR_SEND_WITH_IMM
) {
75 wqe
->send
.sgl
[0].stag
= wr
->imm_data
;
76 wqe
->send
.sgl
[0].len
= __constant_cpu_to_be32(0);
77 wqe
->send
.num_sgle
= __constant_cpu_to_be32(0);
81 for (i
= 0; i
< wr
->num_sge
; i
++) {
82 if ((plen
+ wr
->sg_list
[i
].length
) < plen
) {
85 plen
+= wr
->sg_list
[i
].length
;
86 wqe
->send
.sgl
[i
].stag
=
87 cpu_to_be32(wr
->sg_list
[i
].lkey
);
88 wqe
->send
.sgl
[i
].len
=
89 cpu_to_be32(wr
->sg_list
[i
].length
);
90 wqe
->send
.sgl
[i
].to
= cpu_to_be64(wr
->sg_list
[i
].addr
);
92 wqe
->send
.num_sgle
= cpu_to_be32(wr
->num_sge
);
93 *flit_cnt
= 4 + ((wr
->num_sge
) << 1);
95 wqe
->send
.plen
= cpu_to_be32(plen
);
99 static inline int iwch_build_rdma_write(union t3_wr
*wqe
, struct ib_send_wr
*wr
,
104 if (wr
->num_sge
> T3_MAX_SGE
)
106 wqe
->write
.rdmaop
= T3_RDMA_WRITE
;
107 wqe
->write
.reserved
[0] = 0;
108 wqe
->write
.reserved
[1] = 0;
109 wqe
->write
.reserved
[2] = 0;
110 wqe
->write
.stag_sink
= cpu_to_be32(wr
->wr
.rdma
.rkey
);
111 wqe
->write
.to_sink
= cpu_to_be64(wr
->wr
.rdma
.remote_addr
);
113 if (wr
->opcode
== IB_WR_RDMA_WRITE_WITH_IMM
) {
115 wqe
->write
.sgl
[0].stag
= wr
->imm_data
;
116 wqe
->write
.sgl
[0].len
= __constant_cpu_to_be32(0);
117 wqe
->write
.num_sgle
= __constant_cpu_to_be32(0);
121 for (i
= 0; i
< wr
->num_sge
; i
++) {
122 if ((plen
+ wr
->sg_list
[i
].length
) < plen
) {
125 plen
+= wr
->sg_list
[i
].length
;
126 wqe
->write
.sgl
[i
].stag
=
127 cpu_to_be32(wr
->sg_list
[i
].lkey
);
128 wqe
->write
.sgl
[i
].len
=
129 cpu_to_be32(wr
->sg_list
[i
].length
);
130 wqe
->write
.sgl
[i
].to
=
131 cpu_to_be64(wr
->sg_list
[i
].addr
);
133 wqe
->write
.num_sgle
= cpu_to_be32(wr
->num_sge
);
134 *flit_cnt
= 5 + ((wr
->num_sge
) << 1);
136 wqe
->write
.plen
= cpu_to_be32(plen
);
140 static inline int iwch_build_rdma_read(union t3_wr
*wqe
, struct ib_send_wr
*wr
,
145 wqe
->read
.rdmaop
= T3_READ_REQ
;
146 wqe
->read
.reserved
[0] = 0;
147 wqe
->read
.reserved
[1] = 0;
148 wqe
->read
.reserved
[2] = 0;
149 wqe
->read
.rem_stag
= cpu_to_be32(wr
->wr
.rdma
.rkey
);
150 wqe
->read
.rem_to
= cpu_to_be64(wr
->wr
.rdma
.remote_addr
);
151 wqe
->read
.local_stag
= cpu_to_be32(wr
->sg_list
[0].lkey
);
152 wqe
->read
.local_len
= cpu_to_be32(wr
->sg_list
[0].length
);
153 wqe
->read
.local_to
= cpu_to_be64(wr
->sg_list
[0].addr
);
154 *flit_cnt
= sizeof(struct t3_rdma_read_wr
) >> 3;
159 * TBD: this is going to be moved to firmware. Missing pdid/qpid check for now.
161 static inline int iwch_sgl2pbl_map(struct iwch_dev
*rhp
,
162 struct ib_sge
*sg_list
, u32 num_sgle
,
163 u32
* pbl_addr
, u8
* page_size
)
168 for (i
= 0; i
< num_sgle
; i
++) {
170 mhp
= get_mhp(rhp
, (sg_list
[i
].lkey
) >> 8);
172 PDBG("%s %d\n", __FUNCTION__
, __LINE__
);
175 if (!mhp
->attr
.state
) {
176 PDBG("%s %d\n", __FUNCTION__
, __LINE__
);
179 if (mhp
->attr
.zbva
) {
180 PDBG("%s %d\n", __FUNCTION__
, __LINE__
);
184 if (sg_list
[i
].addr
< mhp
->attr
.va_fbo
) {
185 PDBG("%s %d\n", __FUNCTION__
, __LINE__
);
188 if (sg_list
[i
].addr
+ ((u64
) sg_list
[i
].length
) <
190 PDBG("%s %d\n", __FUNCTION__
, __LINE__
);
193 if (sg_list
[i
].addr
+ ((u64
) sg_list
[i
].length
) >
194 mhp
->attr
.va_fbo
+ ((u64
) mhp
->attr
.len
)) {
195 PDBG("%s %d\n", __FUNCTION__
, __LINE__
);
198 offset
= sg_list
[i
].addr
- mhp
->attr
.va_fbo
;
199 offset
+= ((u32
) mhp
->attr
.va_fbo
) %
200 (1UL << (12 + mhp
->attr
.page_size
));
201 pbl_addr
[i
] = ((mhp
->attr
.pbl_addr
-
202 rhp
->rdev
.rnic_info
.pbl_base
) >> 3) +
203 (offset
>> (12 + mhp
->attr
.page_size
));
204 page_size
[i
] = mhp
->attr
.page_size
;
209 static inline int iwch_build_rdma_recv(struct iwch_dev
*rhp
,
211 struct ib_recv_wr
*wr
)
216 if (wr
->num_sge
> T3_MAX_SGE
)
218 err
= iwch_sgl2pbl_map(rhp
, wr
->sg_list
, wr
->num_sge
, pbl_addr
,
222 wqe
->recv
.pagesz
[0] = page_size
[0];
223 wqe
->recv
.pagesz
[1] = page_size
[1];
224 wqe
->recv
.pagesz
[2] = page_size
[2];
225 wqe
->recv
.pagesz
[3] = page_size
[3];
226 wqe
->recv
.num_sgle
= cpu_to_be32(wr
->num_sge
);
227 for (i
= 0; i
< wr
->num_sge
; i
++) {
228 wqe
->recv
.sgl
[i
].stag
= cpu_to_be32(wr
->sg_list
[i
].lkey
);
229 wqe
->recv
.sgl
[i
].len
= cpu_to_be32(wr
->sg_list
[i
].length
);
231 /* to in the WQE == the offset into the page */
232 wqe
->recv
.sgl
[i
].to
= cpu_to_be64(((u32
) wr
->sg_list
[i
].addr
) %
233 (1UL << (12 + page_size
[i
])));
235 /* pbl_addr is the adapters address in the PBL */
236 wqe
->recv
.pbl_addr
[i
] = cpu_to_be32(pbl_addr
[i
]);
238 for (; i
< T3_MAX_SGE
; i
++) {
239 wqe
->recv
.sgl
[i
].stag
= 0;
240 wqe
->recv
.sgl
[i
].len
= 0;
241 wqe
->recv
.sgl
[i
].to
= 0;
242 wqe
->recv
.pbl_addr
[i
] = 0;
247 int iwch_post_send(struct ib_qp
*ibqp
, struct ib_send_wr
*wr
,
248 struct ib_send_wr
**bad_wr
)
252 enum t3_wr_opcode t3_wr_opcode
= 0;
253 enum t3_wr_flags t3_wr_flags
;
261 qhp
= to_iwch_qp(ibqp
);
262 spin_lock_irqsave(&qhp
->lock
, flag
);
263 if (qhp
->attr
.state
> IWCH_QP_STATE_RTS
) {
264 spin_unlock_irqrestore(&qhp
->lock
, flag
);
267 num_wrs
= Q_FREECNT(qhp
->wq
.sq_rptr
, qhp
->wq
.sq_wptr
,
268 qhp
->wq
.sq_size_log2
);
270 spin_unlock_irqrestore(&qhp
->lock
, flag
);
279 idx
= Q_PTR2IDX(qhp
->wq
.wptr
, qhp
->wq
.size_log2
);
280 wqe
= (union t3_wr
*) (qhp
->wq
.queue
+ idx
);
282 if (wr
->send_flags
& IB_SEND_SOLICITED
)
283 t3_wr_flags
|= T3_SOLICITED_EVENT_FLAG
;
284 if (wr
->send_flags
& IB_SEND_FENCE
)
285 t3_wr_flags
|= T3_READ_FENCE_FLAG
;
286 if (wr
->send_flags
& IB_SEND_SIGNALED
)
287 t3_wr_flags
|= T3_COMPLETION_FLAG
;
289 Q_PTR2IDX(qhp
->wq
.sq_wptr
, qhp
->wq
.sq_size_log2
);
290 switch (wr
->opcode
) {
292 case IB_WR_SEND_WITH_IMM
:
293 t3_wr_opcode
= T3_WR_SEND
;
294 err
= iwch_build_rdma_send(wqe
, wr
, &t3_wr_flit_cnt
);
296 case IB_WR_RDMA_WRITE
:
297 case IB_WR_RDMA_WRITE_WITH_IMM
:
298 t3_wr_opcode
= T3_WR_WRITE
;
299 err
= iwch_build_rdma_write(wqe
, wr
, &t3_wr_flit_cnt
);
301 case IB_WR_RDMA_READ
:
302 t3_wr_opcode
= T3_WR_READ
;
303 t3_wr_flags
= 0; /* T3 reads are always signaled */
304 err
= iwch_build_rdma_read(wqe
, wr
, &t3_wr_flit_cnt
);
307 sqp
->read_len
= wqe
->read
.local_len
;
308 if (!qhp
->wq
.oldest_read
)
309 qhp
->wq
.oldest_read
= sqp
;
312 PDBG("%s post of type=%d TBD!\n", __FUNCTION__
,
320 wqe
->send
.wrid
.id0
.hi
= qhp
->wq
.sq_wptr
;
321 sqp
->wr_id
= wr
->wr_id
;
322 sqp
->opcode
= wr2opcode(t3_wr_opcode
);
323 sqp
->sq_wptr
= qhp
->wq
.sq_wptr
;
325 sqp
->signaled
= (wr
->send_flags
& IB_SEND_SIGNALED
);
327 build_fw_riwrh((void *) wqe
, t3_wr_opcode
, t3_wr_flags
,
328 Q_GENBIT(qhp
->wq
.wptr
, qhp
->wq
.size_log2
),
330 PDBG("%s cookie 0x%llx wq idx 0x%x swsq idx %ld opcode %d\n",
331 __FUNCTION__
, (unsigned long long) wr
->wr_id
, idx
,
332 Q_PTR2IDX(qhp
->wq
.sq_wptr
, qhp
->wq
.sq_size_log2
),
339 spin_unlock_irqrestore(&qhp
->lock
, flag
);
340 ring_doorbell(qhp
->wq
.doorbell
, qhp
->wq
.qpid
);
344 int iwch_post_receive(struct ib_qp
*ibqp
, struct ib_recv_wr
*wr
,
345 struct ib_recv_wr
**bad_wr
)
354 qhp
= to_iwch_qp(ibqp
);
355 spin_lock_irqsave(&qhp
->lock
, flag
);
356 if (qhp
->attr
.state
> IWCH_QP_STATE_RTS
) {
357 spin_unlock_irqrestore(&qhp
->lock
, flag
);
360 num_wrs
= Q_FREECNT(qhp
->wq
.rq_rptr
, qhp
->wq
.rq_wptr
,
361 qhp
->wq
.rq_size_log2
) - 1;
363 spin_unlock_irqrestore(&qhp
->lock
, flag
);
367 idx
= Q_PTR2IDX(qhp
->wq
.wptr
, qhp
->wq
.size_log2
);
368 wqe
= (union t3_wr
*) (qhp
->wq
.queue
+ idx
);
370 err
= iwch_build_rdma_recv(qhp
->rhp
, wqe
, wr
);
377 qhp
->wq
.rq
[Q_PTR2IDX(qhp
->wq
.rq_wptr
, qhp
->wq
.rq_size_log2
)] =
379 build_fw_riwrh((void *) wqe
, T3_WR_RCV
, T3_COMPLETION_FLAG
,
380 Q_GENBIT(qhp
->wq
.wptr
, qhp
->wq
.size_log2
),
381 0, sizeof(struct t3_receive_wr
) >> 3);
382 PDBG("%s cookie 0x%llx idx 0x%x rq_wptr 0x%x rw_rptr 0x%x "
383 "wqe %p \n", __FUNCTION__
, (unsigned long long) wr
->wr_id
,
384 idx
, qhp
->wq
.rq_wptr
, qhp
->wq
.rq_rptr
, wqe
);
390 spin_unlock_irqrestore(&qhp
->lock
, flag
);
391 ring_doorbell(qhp
->wq
.doorbell
, qhp
->wq
.qpid
);
395 int iwch_bind_mw(struct ib_qp
*qp
,
397 struct ib_mw_bind
*mw_bind
)
399 struct iwch_dev
*rhp
;
409 enum t3_wr_flags t3_wr_flags
;
413 qhp
= to_iwch_qp(qp
);
414 mhp
= to_iwch_mw(mw
);
417 spin_lock_irqsave(&qhp
->lock
, flag
);
418 if (qhp
->attr
.state
> IWCH_QP_STATE_RTS
) {
419 spin_unlock_irqrestore(&qhp
->lock
, flag
);
422 num_wrs
= Q_FREECNT(qhp
->wq
.sq_rptr
, qhp
->wq
.sq_wptr
,
423 qhp
->wq
.sq_size_log2
);
424 if ((num_wrs
) <= 0) {
425 spin_unlock_irqrestore(&qhp
->lock
, flag
);
428 idx
= Q_PTR2IDX(qhp
->wq
.wptr
, qhp
->wq
.size_log2
);
429 PDBG("%s: idx 0x%0x, mw 0x%p, mw_bind 0x%p\n", __FUNCTION__
, idx
,
431 wqe
= (union t3_wr
*) (qhp
->wq
.queue
+ idx
);
434 if (mw_bind
->send_flags
& IB_SEND_SIGNALED
)
435 t3_wr_flags
= T3_COMPLETION_FLAG
;
437 sgl
.addr
= mw_bind
->addr
;
438 sgl
.lkey
= mw_bind
->mr
->lkey
;
439 sgl
.length
= mw_bind
->length
;
440 wqe
->bind
.reserved
= 0;
441 wqe
->bind
.type
= T3_VA_BASED_TO
;
443 /* TBD: check perms */
444 wqe
->bind
.perms
= iwch_convert_access(mw_bind
->mw_access_flags
);
445 wqe
->bind
.mr_stag
= cpu_to_be32(mw_bind
->mr
->lkey
);
446 wqe
->bind
.mw_stag
= cpu_to_be32(mw
->rkey
);
447 wqe
->bind
.mw_len
= cpu_to_be32(mw_bind
->length
);
448 wqe
->bind
.mw_va
= cpu_to_be64(mw_bind
->addr
);
449 err
= iwch_sgl2pbl_map(rhp
, &sgl
, 1, &pbl_addr
, &page_size
);
451 spin_unlock_irqrestore(&qhp
->lock
, flag
);
454 wqe
->send
.wrid
.id0
.hi
= qhp
->wq
.sq_wptr
;
455 sqp
= qhp
->wq
.sq
+ Q_PTR2IDX(qhp
->wq
.sq_wptr
, qhp
->wq
.sq_size_log2
);
456 sqp
->wr_id
= mw_bind
->wr_id
;
457 sqp
->opcode
= T3_BIND_MW
;
458 sqp
->sq_wptr
= qhp
->wq
.sq_wptr
;
460 sqp
->signaled
= (mw_bind
->send_flags
& IB_SEND_SIGNALED
);
461 wqe
->bind
.mr_pbl_addr
= cpu_to_be32(pbl_addr
);
462 wqe
->bind
.mr_pagesz
= page_size
;
463 wqe
->flit
[T3_SQ_COOKIE_FLIT
] = mw_bind
->wr_id
;
464 build_fw_riwrh((void *)wqe
, T3_WR_BIND
, t3_wr_flags
,
465 Q_GENBIT(qhp
->wq
.wptr
, qhp
->wq
.size_log2
), 0,
466 sizeof(struct t3_bind_mw_wr
) >> 3);
469 spin_unlock_irqrestore(&qhp
->lock
, flag
);
471 ring_doorbell(qhp
->wq
.doorbell
, qhp
->wq
.qpid
);
476 static inline void build_term_codes(int t3err
, u8
*layer_type
, u8
*ecode
,
482 *layer_type
= LAYER_DDP
|DDP_TAGGED_ERR
;
483 *ecode
= DDPT_INV_STAG
;
484 } else if (tagged
== 2) {
485 *layer_type
= LAYER_RDMAP
|RDMAP_REMOTE_PROT
;
486 *ecode
= RDMAP_INV_STAG
;
493 *layer_type
= LAYER_DDP
|DDP_TAGGED_ERR
;
494 *ecode
= DDPT_STAG_NOT_ASSOC
;
495 } else if (tagged
== 2) {
496 *layer_type
= LAYER_RDMAP
|RDMAP_REMOTE_PROT
;
497 *ecode
= RDMAP_STAG_NOT_ASSOC
;
501 *layer_type
= LAYER_RDMAP
|RDMAP_REMOTE_PROT
;
502 *ecode
= RDMAP_TO_WRAP
;
506 *layer_type
= LAYER_DDP
|DDP_TAGGED_ERR
;
507 *ecode
= DDPT_BASE_BOUNDS
;
508 } else if (tagged
== 2) {
509 *layer_type
= LAYER_RDMAP
|RDMAP_REMOTE_PROT
;
510 *ecode
= RDMAP_BASE_BOUNDS
;
512 *layer_type
= LAYER_DDP
|DDP_UNTAGGED_ERR
;
513 *ecode
= DDPU_MSG_TOOBIG
;
516 case TPT_ERR_INVALIDATE_SHARED_MR
:
517 case TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND
:
518 *layer_type
= LAYER_RDMAP
|RDMAP_REMOTE_OP
;
519 *ecode
= RDMAP_CANT_INV_STAG
;
522 case TPT_ERR_ECC_PSTAG
:
523 case TPT_ERR_INTERNAL_ERR
:
524 *layer_type
= LAYER_RDMAP
|RDMAP_LOCAL_CATA
;
527 case TPT_ERR_OUT_OF_RQE
:
528 *layer_type
= LAYER_DDP
|DDP_UNTAGGED_ERR
;
529 *ecode
= DDPU_INV_MSN_NOBUF
;
531 case TPT_ERR_PBL_ADDR_BOUND
:
532 *layer_type
= LAYER_DDP
|DDP_TAGGED_ERR
;
533 *ecode
= DDPT_BASE_BOUNDS
;
536 *layer_type
= LAYER_MPA
|DDP_LLP
;
537 *ecode
= MPA_CRC_ERR
;
540 *layer_type
= LAYER_MPA
|DDP_LLP
;
541 *ecode
= MPA_MARKER_ERR
;
543 case TPT_ERR_PDU_LEN_ERR
:
544 *layer_type
= LAYER_DDP
|DDP_UNTAGGED_ERR
;
545 *ecode
= DDPU_MSG_TOOBIG
;
547 case TPT_ERR_DDP_VERSION
:
549 *layer_type
= LAYER_DDP
|DDP_TAGGED_ERR
;
550 *ecode
= DDPT_INV_VERS
;
552 *layer_type
= LAYER_DDP
|DDP_UNTAGGED_ERR
;
553 *ecode
= DDPU_INV_VERS
;
556 case TPT_ERR_RDMA_VERSION
:
557 *layer_type
= LAYER_RDMAP
|RDMAP_REMOTE_OP
;
558 *ecode
= RDMAP_INV_VERS
;
561 *layer_type
= LAYER_RDMAP
|RDMAP_REMOTE_OP
;
562 *ecode
= RDMAP_INV_OPCODE
;
564 case TPT_ERR_DDP_QUEUE_NUM
:
565 *layer_type
= LAYER_DDP
|DDP_UNTAGGED_ERR
;
566 *ecode
= DDPU_INV_QN
;
569 case TPT_ERR_MSN_GAP
:
570 case TPT_ERR_MSN_RANGE
:
571 case TPT_ERR_IRD_OVERFLOW
:
572 *layer_type
= LAYER_DDP
|DDP_UNTAGGED_ERR
;
573 *ecode
= DDPU_INV_MSN_RANGE
;
576 *layer_type
= LAYER_DDP
|DDP_LOCAL_CATA
;
580 *layer_type
= LAYER_DDP
|DDP_UNTAGGED_ERR
;
581 *ecode
= DDPU_INV_MO
;
584 *layer_type
= LAYER_RDMAP
|DDP_LOCAL_CATA
;
591 * This posts a TERMINATE with layer=RDMA, type=catastrophic.
593 int iwch_post_terminate(struct iwch_qp
*qhp
, struct respQ_msg_t
*rsp_msg
)
596 struct terminate_message
*term
;
601 PDBG("%s %d\n", __FUNCTION__
, __LINE__
);
602 skb
= alloc_skb(40, GFP_ATOMIC
);
604 printk(KERN_ERR
"%s cannot send TERMINATE!\n", __FUNCTION__
);
607 wqe
= (union t3_wr
*)skb_put(skb
, 40);
609 wqe
->send
.rdmaop
= T3_TERMINATE
;
611 /* immediate data length */
612 wqe
->send
.plen
= htonl(4);
614 /* immediate data starts here. */
615 term
= (struct terminate_message
*)wqe
->send
.sgl
;
617 status
= CQE_STATUS(rsp_msg
->cqe
);
618 if (CQE_OPCODE(rsp_msg
->cqe
) == T3_RDMA_WRITE
)
620 if ((CQE_OPCODE(rsp_msg
->cqe
) == T3_READ_REQ
) ||
621 (CQE_OPCODE(rsp_msg
->cqe
) == T3_READ_RESP
))
624 status
= TPT_ERR_INTERNAL_ERR
;
626 build_term_codes(status
, &term
->layer_etype
, &term
->ecode
, tagged
);
627 build_fw_riwrh((void *)wqe
, T3_WR_SEND
,
628 T3_COMPLETION_FLAG
| T3_NOTIFY_FLAG
, 1,
630 skb
->priority
= CPL_PRIORITY_DATA
;
631 return cxgb3_ofld_send(qhp
->rhp
->rdev
.t3cdev_p
, skb
);
635 * Assumes qhp lock is held.
637 static void __flush_qp(struct iwch_qp
*qhp
, unsigned long *flag
)
639 struct iwch_cq
*rchp
, *schp
;
642 rchp
= get_chp(qhp
->rhp
, qhp
->attr
.rcq
);
643 schp
= get_chp(qhp
->rhp
, qhp
->attr
.scq
);
645 PDBG("%s qhp %p rchp %p schp %p\n", __FUNCTION__
, qhp
, rchp
, schp
);
646 /* take a ref on the qhp since we must release the lock */
647 atomic_inc(&qhp
->refcnt
);
648 spin_unlock_irqrestore(&qhp
->lock
, *flag
);
650 /* locking heirarchy: cq lock first, then qp lock. */
651 spin_lock_irqsave(&rchp
->lock
, *flag
);
652 spin_lock(&qhp
->lock
);
653 cxio_flush_hw_cq(&rchp
->cq
);
654 cxio_count_rcqes(&rchp
->cq
, &qhp
->wq
, &count
);
655 cxio_flush_rq(&qhp
->wq
, &rchp
->cq
, count
);
656 spin_unlock(&qhp
->lock
);
657 spin_unlock_irqrestore(&rchp
->lock
, *flag
);
659 /* locking heirarchy: cq lock first, then qp lock. */
660 spin_lock_irqsave(&schp
->lock
, *flag
);
661 spin_lock(&qhp
->lock
);
662 cxio_flush_hw_cq(&schp
->cq
);
663 cxio_count_scqes(&schp
->cq
, &qhp
->wq
, &count
);
664 cxio_flush_sq(&qhp
->wq
, &schp
->cq
, count
);
665 spin_unlock(&qhp
->lock
);
666 spin_unlock_irqrestore(&schp
->lock
, *flag
);
669 if (atomic_dec_and_test(&qhp
->refcnt
))
672 spin_lock_irqsave(&qhp
->lock
, *flag
);
675 static inline void flush_qp(struct iwch_qp
*qhp
, unsigned long *flag
)
677 if (t3b_device(qhp
->rhp
))
678 cxio_set_wq_in_error(&qhp
->wq
);
680 __flush_qp(qhp
, flag
);
685 * Return non zero if at least one RECV was pre-posted.
687 static inline int rqes_posted(struct iwch_qp
*qhp
)
689 return fw_riwrh_opcode((struct fw_riwrh
*)qhp
->wq
.queue
) == T3_WR_RCV
;
692 static int rdma_init(struct iwch_dev
*rhp
, struct iwch_qp
*qhp
,
693 enum iwch_qp_attr_mask mask
,
694 struct iwch_qp_attributes
*attrs
)
696 struct t3_rdma_init_attr init_attr
;
699 init_attr
.tid
= qhp
->ep
->hwtid
;
700 init_attr
.qpid
= qhp
->wq
.qpid
;
701 init_attr
.pdid
= qhp
->attr
.pd
;
702 init_attr
.scqid
= qhp
->attr
.scq
;
703 init_attr
.rcqid
= qhp
->attr
.rcq
;
704 init_attr
.rq_addr
= qhp
->wq
.rq_addr
;
705 init_attr
.rq_size
= 1 << qhp
->wq
.rq_size_log2
;
706 init_attr
.mpaattrs
= uP_RI_MPA_IETF_ENABLE
|
707 qhp
->attr
.mpa_attr
.recv_marker_enabled
|
708 (qhp
->attr
.mpa_attr
.xmit_marker_enabled
<< 1) |
709 (qhp
->attr
.mpa_attr
.crc_enabled
<< 2);
712 * XXX - The IWCM doesn't quite handle getting these
713 * attrs set before going into RTS. For now, just turn
717 init_attr
.qpcaps
= qhp
->attr
.enableRdmaRead
|
718 (qhp
->attr
.enableRdmaWrite
<< 1) |
719 (qhp
->attr
.enableBind
<< 2) |
720 (qhp
->attr
.enable_stag0_fastreg
<< 3) |
721 (qhp
->attr
.enable_stag0_fastreg
<< 4);
723 init_attr
.qpcaps
= 0x1f;
725 init_attr
.tcp_emss
= qhp
->ep
->emss
;
726 init_attr
.ord
= qhp
->attr
.max_ord
;
727 init_attr
.ird
= qhp
->attr
.max_ird
;
728 init_attr
.qp_dma_addr
= qhp
->wq
.dma_addr
;
729 init_attr
.qp_dma_size
= (1UL << qhp
->wq
.size_log2
);
730 init_attr
.flags
= rqes_posted(qhp
) ? RECVS_POSTED
: 0;
731 PDBG("%s init_attr.rq_addr 0x%x init_attr.rq_size = %d "
732 "flags 0x%x qpcaps 0x%x\n", __FUNCTION__
,
733 init_attr
.rq_addr
, init_attr
.rq_size
,
734 init_attr
.flags
, init_attr
.qpcaps
);
735 ret
= cxio_rdma_init(&rhp
->rdev
, &init_attr
);
736 PDBG("%s ret %d\n", __FUNCTION__
, ret
);
740 int iwch_modify_qp(struct iwch_dev
*rhp
, struct iwch_qp
*qhp
,
741 enum iwch_qp_attr_mask mask
,
742 struct iwch_qp_attributes
*attrs
,
746 struct iwch_qp_attributes newattr
= qhp
->attr
;
752 struct iwch_ep
*ep
= NULL
;
754 PDBG("%s qhp %p qpid 0x%x ep %p state %d -> %d\n", __FUNCTION__
,
755 qhp
, qhp
->wq
.qpid
, qhp
->ep
, qhp
->attr
.state
,
756 (mask
& IWCH_QP_ATTR_NEXT_STATE
) ? attrs
->next_state
: -1);
758 spin_lock_irqsave(&qhp
->lock
, flag
);
760 /* Process attr changes if in IDLE */
761 if (mask
& IWCH_QP_ATTR_VALID_MODIFY
) {
762 if (qhp
->attr
.state
!= IWCH_QP_STATE_IDLE
) {
766 if (mask
& IWCH_QP_ATTR_ENABLE_RDMA_READ
)
767 newattr
.enable_rdma_read
= attrs
->enable_rdma_read
;
768 if (mask
& IWCH_QP_ATTR_ENABLE_RDMA_WRITE
)
769 newattr
.enable_rdma_write
= attrs
->enable_rdma_write
;
770 if (mask
& IWCH_QP_ATTR_ENABLE_RDMA_BIND
)
771 newattr
.enable_bind
= attrs
->enable_bind
;
772 if (mask
& IWCH_QP_ATTR_MAX_ORD
) {
774 rhp
->attr
.max_rdma_read_qp_depth
) {
778 newattr
.max_ord
= attrs
->max_ord
;
780 if (mask
& IWCH_QP_ATTR_MAX_IRD
) {
782 rhp
->attr
.max_rdma_reads_per_qp
) {
786 newattr
.max_ird
= attrs
->max_ird
;
791 if (!(mask
& IWCH_QP_ATTR_NEXT_STATE
))
793 if (qhp
->attr
.state
== attrs
->next_state
)
796 switch (qhp
->attr
.state
) {
797 case IWCH_QP_STATE_IDLE
:
798 switch (attrs
->next_state
) {
799 case IWCH_QP_STATE_RTS
:
800 if (!(mask
& IWCH_QP_ATTR_LLP_STREAM_HANDLE
)) {
804 if (!(mask
& IWCH_QP_ATTR_MPA_ATTR
)) {
808 qhp
->attr
.mpa_attr
= attrs
->mpa_attr
;
809 qhp
->attr
.llp_stream_handle
= attrs
->llp_stream_handle
;
810 qhp
->ep
= qhp
->attr
.llp_stream_handle
;
811 qhp
->attr
.state
= IWCH_QP_STATE_RTS
;
814 * Ref the endpoint here and deref when we
815 * disassociate the endpoint from the QP. This
816 * happens in CLOSING->IDLE transition or *->ERROR
819 get_ep(&qhp
->ep
->com
);
820 spin_unlock_irqrestore(&qhp
->lock
, flag
);
821 ret
= rdma_init(rhp
, qhp
, mask
, attrs
);
822 spin_lock_irqsave(&qhp
->lock
, flag
);
826 case IWCH_QP_STATE_ERROR
:
827 qhp
->attr
.state
= IWCH_QP_STATE_ERROR
;
828 flush_qp(qhp
, &flag
);
835 case IWCH_QP_STATE_RTS
:
836 switch (attrs
->next_state
) {
837 case IWCH_QP_STATE_CLOSING
:
838 BUG_ON(atomic_read(&qhp
->ep
->com
.kref
.refcount
) < 2);
839 qhp
->attr
.state
= IWCH_QP_STATE_CLOSING
;
846 case IWCH_QP_STATE_TERMINATE
:
847 qhp
->attr
.state
= IWCH_QP_STATE_TERMINATE
;
848 if (t3b_device(qhp
->rhp
))
849 cxio_set_wq_in_error(&qhp
->wq
);
853 case IWCH_QP_STATE_ERROR
:
854 qhp
->attr
.state
= IWCH_QP_STATE_ERROR
;
867 case IWCH_QP_STATE_CLOSING
:
872 switch (attrs
->next_state
) {
873 case IWCH_QP_STATE_IDLE
:
874 qhp
->attr
.state
= IWCH_QP_STATE_IDLE
;
875 qhp
->attr
.llp_stream_handle
= NULL
;
876 put_ep(&qhp
->ep
->com
);
880 case IWCH_QP_STATE_ERROR
:
887 case IWCH_QP_STATE_ERROR
:
888 if (attrs
->next_state
!= IWCH_QP_STATE_IDLE
) {
893 if (!Q_EMPTY(qhp
->wq
.sq_rptr
, qhp
->wq
.sq_wptr
) ||
894 !Q_EMPTY(qhp
->wq
.rq_rptr
, qhp
->wq
.rq_wptr
)) {
898 qhp
->attr
.state
= IWCH_QP_STATE_IDLE
;
899 memset(&qhp
->attr
, 0, sizeof(qhp
->attr
));
901 case IWCH_QP_STATE_TERMINATE
:
909 printk(KERN_ERR
"%s in a bad state %d\n",
910 __FUNCTION__
, qhp
->attr
.state
);
917 PDBG("%s disassociating ep %p qpid 0x%x\n", __FUNCTION__
, qhp
->ep
,
920 /* disassociate the LLP connection */
921 qhp
->attr
.llp_stream_handle
= NULL
;
924 qhp
->attr
.state
= IWCH_QP_STATE_ERROR
;
928 flush_qp(qhp
, &flag
);
930 spin_unlock_irqrestore(&qhp
->lock
, flag
);
933 iwch_post_terminate(qhp
, NULL
);
936 * If disconnect is 1, then we need to initiate a disconnect
937 * on the EP. This can be a normal close (RTS->CLOSING) or
938 * an abnormal close (RTS/CLOSING->ERROR).
941 iwch_ep_disconnect(ep
, abort
, GFP_KERNEL
);
944 * If free is 1, then we've disassociated the EP from the QP
945 * and we need to dereference the EP.
950 PDBG("%s exit state %d\n", __FUNCTION__
, qhp
->attr
.state
);
954 static int quiesce_qp(struct iwch_qp
*qhp
)
956 spin_lock_irq(&qhp
->lock
);
957 iwch_quiesce_tid(qhp
->ep
);
958 qhp
->flags
|= QP_QUIESCED
;
959 spin_unlock_irq(&qhp
->lock
);
963 static int resume_qp(struct iwch_qp
*qhp
)
965 spin_lock_irq(&qhp
->lock
);
966 iwch_resume_tid(qhp
->ep
);
967 qhp
->flags
&= ~QP_QUIESCED
;
968 spin_unlock_irq(&qhp
->lock
);
972 int iwch_quiesce_qps(struct iwch_cq
*chp
)
977 for (i
=0; i
< T3_MAX_NUM_QP
; i
++) {
978 qhp
= get_qhp(chp
->rhp
, i
);
981 if ((qhp
->attr
.rcq
== chp
->cq
.cqid
) && !qp_quiesced(qhp
)) {
985 if ((qhp
->attr
.scq
== chp
->cq
.cqid
) && !qp_quiesced(qhp
))
991 int iwch_resume_qps(struct iwch_cq
*chp
)
996 for (i
=0; i
< T3_MAX_NUM_QP
; i
++) {
997 qhp
= get_qhp(chp
->rhp
, i
);
1000 if ((qhp
->attr
.rcq
== chp
->cq
.cqid
) && qp_quiesced(qhp
)) {
1004 if ((qhp
->attr
.scq
== chp
->cq
.cqid
) && qp_quiesced(qhp
))