2 * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include "iwch_provider.h"
36 #include "cxio_resource.h"
40 static int build_rdma_send(union t3_wr
*wqe
, struct ib_send_wr
*wr
,
48 if (wr
->send_flags
& IB_SEND_SOLICITED
)
49 wqe
->send
.rdmaop
= T3_SEND_WITH_SE
;
51 wqe
->send
.rdmaop
= T3_SEND
;
52 wqe
->send
.rem_stag
= 0;
54 case IB_WR_SEND_WITH_INV
:
55 if (wr
->send_flags
& IB_SEND_SOLICITED
)
56 wqe
->send
.rdmaop
= T3_SEND_WITH_SE_INV
;
58 wqe
->send
.rdmaop
= T3_SEND_WITH_INV
;
59 wqe
->send
.rem_stag
= cpu_to_be32(wr
->ex
.invalidate_rkey
);
64 if (wr
->num_sge
> T3_MAX_SGE
)
66 wqe
->send
.reserved
[0] = 0;
67 wqe
->send
.reserved
[1] = 0;
68 wqe
->send
.reserved
[2] = 0;
70 for (i
= 0; i
< wr
->num_sge
; i
++) {
71 if ((plen
+ wr
->sg_list
[i
].length
) < plen
)
74 plen
+= wr
->sg_list
[i
].length
;
75 wqe
->send
.sgl
[i
].stag
= cpu_to_be32(wr
->sg_list
[i
].lkey
);
76 wqe
->send
.sgl
[i
].len
= cpu_to_be32(wr
->sg_list
[i
].length
);
77 wqe
->send
.sgl
[i
].to
= cpu_to_be64(wr
->sg_list
[i
].addr
);
79 wqe
->send
.num_sgle
= cpu_to_be32(wr
->num_sge
);
80 *flit_cnt
= 4 + ((wr
->num_sge
) << 1);
81 wqe
->send
.plen
= cpu_to_be32(plen
);
85 static int build_rdma_write(union t3_wr
*wqe
, struct ib_send_wr
*wr
,
90 if (wr
->num_sge
> T3_MAX_SGE
)
92 wqe
->write
.rdmaop
= T3_RDMA_WRITE
;
93 wqe
->write
.reserved
[0] = 0;
94 wqe
->write
.reserved
[1] = 0;
95 wqe
->write
.reserved
[2] = 0;
96 wqe
->write
.stag_sink
= cpu_to_be32(wr
->wr
.rdma
.rkey
);
97 wqe
->write
.to_sink
= cpu_to_be64(wr
->wr
.rdma
.remote_addr
);
99 if (wr
->opcode
== IB_WR_RDMA_WRITE_WITH_IMM
) {
101 wqe
->write
.sgl
[0].stag
= wr
->ex
.imm_data
;
102 wqe
->write
.sgl
[0].len
= __constant_cpu_to_be32(0);
103 wqe
->write
.num_sgle
= __constant_cpu_to_be32(0);
107 for (i
= 0; i
< wr
->num_sge
; i
++) {
108 if ((plen
+ wr
->sg_list
[i
].length
) < plen
) {
111 plen
+= wr
->sg_list
[i
].length
;
112 wqe
->write
.sgl
[i
].stag
=
113 cpu_to_be32(wr
->sg_list
[i
].lkey
);
114 wqe
->write
.sgl
[i
].len
=
115 cpu_to_be32(wr
->sg_list
[i
].length
);
116 wqe
->write
.sgl
[i
].to
=
117 cpu_to_be64(wr
->sg_list
[i
].addr
);
119 wqe
->write
.num_sgle
= cpu_to_be32(wr
->num_sge
);
120 *flit_cnt
= 5 + ((wr
->num_sge
) << 1);
122 wqe
->write
.plen
= cpu_to_be32(plen
);
126 static int build_rdma_read(union t3_wr
*wqe
, struct ib_send_wr
*wr
,
131 wqe
->read
.rdmaop
= T3_READ_REQ
;
132 if (wr
->opcode
== IB_WR_RDMA_READ_WITH_INV
)
133 wqe
->read
.local_inv
= 1;
135 wqe
->read
.local_inv
= 0;
136 wqe
->read
.reserved
[0] = 0;
137 wqe
->read
.reserved
[1] = 0;
138 wqe
->read
.rem_stag
= cpu_to_be32(wr
->wr
.rdma
.rkey
);
139 wqe
->read
.rem_to
= cpu_to_be64(wr
->wr
.rdma
.remote_addr
);
140 wqe
->read
.local_stag
= cpu_to_be32(wr
->sg_list
[0].lkey
);
141 wqe
->read
.local_len
= cpu_to_be32(wr
->sg_list
[0].length
);
142 wqe
->read
.local_to
= cpu_to_be64(wr
->sg_list
[0].addr
);
143 *flit_cnt
= sizeof(struct t3_rdma_read_wr
) >> 3;
147 static int build_fastreg(union t3_wr
*wqe
, struct ib_send_wr
*wr
,
148 u8
*flit_cnt
, int *wr_cnt
, struct t3_wq
*wq
)
153 if (wr
->wr
.fast_reg
.page_list_len
> T3_MAX_FASTREG_DEPTH
)
156 wqe
->fastreg
.stag
= cpu_to_be32(wr
->wr
.fast_reg
.rkey
);
157 wqe
->fastreg
.len
= cpu_to_be32(wr
->wr
.fast_reg
.length
);
158 wqe
->fastreg
.va_base_hi
= cpu_to_be32(wr
->wr
.fast_reg
.iova_start
>> 32);
159 wqe
->fastreg
.va_base_lo_fbo
=
160 cpu_to_be32(wr
->wr
.fast_reg
.iova_start
& 0xffffffff);
161 wqe
->fastreg
.page_type_perms
= cpu_to_be32(
162 V_FR_PAGE_COUNT(wr
->wr
.fast_reg
.page_list_len
) |
163 V_FR_PAGE_SIZE(wr
->wr
.fast_reg
.page_shift
-12) |
164 V_FR_TYPE(TPT_VATO
) |
165 V_FR_PERMS(iwch_ib_to_tpt_access(wr
->wr
.fast_reg
.access_flags
)));
166 p
= &wqe
->fastreg
.pbl_addrs
[0];
167 for (i
= 0; i
< wr
->wr
.fast_reg
.page_list_len
; i
++, p
++) {
169 /* If we need a 2nd WR, then set it up */
170 if (i
== T3_MAX_FASTREG_FRAG
) {
172 wqe
= (union t3_wr
*)(wq
->queue
+
173 Q_PTR2IDX((wq
->wptr
+1), wq
->size_log2
));
174 build_fw_riwrh((void *)wqe
, T3_WR_FASTREG
, 0,
175 Q_GENBIT(wq
->wptr
+ 1, wq
->size_log2
),
176 0, 1 + wr
->wr
.fast_reg
.page_list_len
- T3_MAX_FASTREG_FRAG
,
179 p
= &wqe
->pbl_frag
.pbl_addrs
[0];
181 *p
= cpu_to_be64((u64
)wr
->wr
.fast_reg
.page_list
->page_list
[i
]);
183 *flit_cnt
= 5 + wr
->wr
.fast_reg
.page_list_len
;
189 static int build_inv_stag(union t3_wr
*wqe
, struct ib_send_wr
*wr
,
192 wqe
->local_inv
.stag
= cpu_to_be32(wr
->ex
.invalidate_rkey
);
193 wqe
->local_inv
.reserved
= 0;
194 *flit_cnt
= sizeof(struct t3_local_inv_wr
) >> 3;
199 * TBD: this is going to be moved to firmware. Missing pdid/qpid check for now.
201 static int iwch_sgl2pbl_map(struct iwch_dev
*rhp
, struct ib_sge
*sg_list
,
202 u32 num_sgle
, u32
* pbl_addr
, u8
* page_size
)
207 for (i
= 0; i
< num_sgle
; i
++) {
209 mhp
= get_mhp(rhp
, (sg_list
[i
].lkey
) >> 8);
211 PDBG("%s %d\n", __func__
, __LINE__
);
214 if (!mhp
->attr
.state
) {
215 PDBG("%s %d\n", __func__
, __LINE__
);
218 if (mhp
->attr
.zbva
) {
219 PDBG("%s %d\n", __func__
, __LINE__
);
223 if (sg_list
[i
].addr
< mhp
->attr
.va_fbo
) {
224 PDBG("%s %d\n", __func__
, __LINE__
);
227 if (sg_list
[i
].addr
+ ((u64
) sg_list
[i
].length
) <
229 PDBG("%s %d\n", __func__
, __LINE__
);
232 if (sg_list
[i
].addr
+ ((u64
) sg_list
[i
].length
) >
233 mhp
->attr
.va_fbo
+ ((u64
) mhp
->attr
.len
)) {
234 PDBG("%s %d\n", __func__
, __LINE__
);
237 offset
= sg_list
[i
].addr
- mhp
->attr
.va_fbo
;
238 offset
+= ((u32
) mhp
->attr
.va_fbo
) %
239 (1UL << (12 + mhp
->attr
.page_size
));
240 pbl_addr
[i
] = ((mhp
->attr
.pbl_addr
-
241 rhp
->rdev
.rnic_info
.pbl_base
) >> 3) +
242 (offset
>> (12 + mhp
->attr
.page_size
));
243 page_size
[i
] = mhp
->attr
.page_size
;
248 static int build_rdma_recv(struct iwch_qp
*qhp
, union t3_wr
*wqe
,
249 struct ib_recv_wr
*wr
)
252 u32 pbl_addr
[T3_MAX_SGE
];
253 u8 page_size
[T3_MAX_SGE
];
255 err
= iwch_sgl2pbl_map(qhp
->rhp
, wr
->sg_list
, wr
->num_sge
, pbl_addr
,
259 wqe
->recv
.pagesz
[0] = page_size
[0];
260 wqe
->recv
.pagesz
[1] = page_size
[1];
261 wqe
->recv
.pagesz
[2] = page_size
[2];
262 wqe
->recv
.pagesz
[3] = page_size
[3];
263 wqe
->recv
.num_sgle
= cpu_to_be32(wr
->num_sge
);
264 for (i
= 0; i
< wr
->num_sge
; i
++) {
265 wqe
->recv
.sgl
[i
].stag
= cpu_to_be32(wr
->sg_list
[i
].lkey
);
266 wqe
->recv
.sgl
[i
].len
= cpu_to_be32(wr
->sg_list
[i
].length
);
268 /* to in the WQE == the offset into the page */
269 wqe
->recv
.sgl
[i
].to
= cpu_to_be64(((u32
) wr
->sg_list
[i
].addr
) %
270 (1UL << (12 + page_size
[i
])));
272 /* pbl_addr is the adapters address in the PBL */
273 wqe
->recv
.pbl_addr
[i
] = cpu_to_be32(pbl_addr
[i
]);
275 for (; i
< T3_MAX_SGE
; i
++) {
276 wqe
->recv
.sgl
[i
].stag
= 0;
277 wqe
->recv
.sgl
[i
].len
= 0;
278 wqe
->recv
.sgl
[i
].to
= 0;
279 wqe
->recv
.pbl_addr
[i
] = 0;
281 qhp
->wq
.rq
[Q_PTR2IDX(qhp
->wq
.rq_wptr
,
282 qhp
->wq
.rq_size_log2
)].wr_id
= wr
->wr_id
;
283 qhp
->wq
.rq
[Q_PTR2IDX(qhp
->wq
.rq_wptr
,
284 qhp
->wq
.rq_size_log2
)].pbl_addr
= 0;
288 static int build_zero_stag_recv(struct iwch_qp
*qhp
, union t3_wr
*wqe
,
289 struct ib_recv_wr
*wr
)
297 * The T3 HW requires the PBL in the HW recv descriptor to reference
298 * a PBL entry. So we allocate the max needed PBL memory here and pass
299 * it to the uP in the recv WR. The uP will build the PBL and setup
300 * the HW recv descriptor.
302 pbl_addr
= cxio_hal_pblpool_alloc(&qhp
->rhp
->rdev
, T3_STAG0_PBL_SIZE
);
307 * Compute the 8B aligned offset.
309 pbl_offset
= (pbl_addr
- qhp
->rhp
->rdev
.rnic_info
.pbl_base
) >> 3;
311 wqe
->recv
.num_sgle
= cpu_to_be32(wr
->num_sge
);
313 for (i
= 0; i
< wr
->num_sge
; i
++) {
316 * Use a 128MB page size. This and an imposed 128MB
317 * sge length limit allows us to require only a 2-entry HW
318 * PBL for each SGE. This restriction is acceptable since
319 * since it is not possible to allocate 128MB of contiguous
320 * DMA coherent memory!
322 if (wr
->sg_list
[i
].length
> T3_STAG0_MAX_PBE_LEN
)
324 wqe
->recv
.pagesz
[i
] = T3_STAG0_PAGE_SHIFT
;
327 * T3 restricts a recv to all zero-stag or all non-zero-stag.
329 if (wr
->sg_list
[i
].lkey
!= 0)
331 wqe
->recv
.sgl
[i
].stag
= 0;
332 wqe
->recv
.sgl
[i
].len
= cpu_to_be32(wr
->sg_list
[i
].length
);
333 wqe
->recv
.sgl
[i
].to
= cpu_to_be64(wr
->sg_list
[i
].addr
);
334 wqe
->recv
.pbl_addr
[i
] = cpu_to_be32(pbl_offset
);
337 for (; i
< T3_MAX_SGE
; i
++) {
338 wqe
->recv
.pagesz
[i
] = 0;
339 wqe
->recv
.sgl
[i
].stag
= 0;
340 wqe
->recv
.sgl
[i
].len
= 0;
341 wqe
->recv
.sgl
[i
].to
= 0;
342 wqe
->recv
.pbl_addr
[i
] = 0;
344 qhp
->wq
.rq
[Q_PTR2IDX(qhp
->wq
.rq_wptr
,
345 qhp
->wq
.rq_size_log2
)].wr_id
= wr
->wr_id
;
346 qhp
->wq
.rq
[Q_PTR2IDX(qhp
->wq
.rq_wptr
,
347 qhp
->wq
.rq_size_log2
)].pbl_addr
= pbl_addr
;
351 int iwch_post_send(struct ib_qp
*ibqp
, struct ib_send_wr
*wr
,
352 struct ib_send_wr
**bad_wr
)
355 u8
uninitialized_var(t3_wr_flit_cnt
);
356 enum t3_wr_opcode t3_wr_opcode
= 0;
357 enum t3_wr_flags t3_wr_flags
;
366 qhp
= to_iwch_qp(ibqp
);
367 spin_lock_irqsave(&qhp
->lock
, flag
);
368 if (qhp
->attr
.state
> IWCH_QP_STATE_RTS
) {
369 spin_unlock_irqrestore(&qhp
->lock
, flag
);
372 num_wrs
= Q_FREECNT(qhp
->wq
.sq_rptr
, qhp
->wq
.sq_wptr
,
373 qhp
->wq
.sq_size_log2
);
375 spin_unlock_irqrestore(&qhp
->lock
, flag
);
384 idx
= Q_PTR2IDX(qhp
->wq
.wptr
, qhp
->wq
.size_log2
);
385 wqe
= (union t3_wr
*) (qhp
->wq
.queue
+ idx
);
387 if (wr
->send_flags
& IB_SEND_SOLICITED
)
388 t3_wr_flags
|= T3_SOLICITED_EVENT_FLAG
;
389 if (wr
->send_flags
& IB_SEND_SIGNALED
)
390 t3_wr_flags
|= T3_COMPLETION_FLAG
;
392 Q_PTR2IDX(qhp
->wq
.sq_wptr
, qhp
->wq
.sq_size_log2
);
393 switch (wr
->opcode
) {
395 case IB_WR_SEND_WITH_INV
:
396 if (wr
->send_flags
& IB_SEND_FENCE
)
397 t3_wr_flags
|= T3_READ_FENCE_FLAG
;
398 t3_wr_opcode
= T3_WR_SEND
;
399 err
= build_rdma_send(wqe
, wr
, &t3_wr_flit_cnt
);
401 case IB_WR_RDMA_WRITE
:
402 case IB_WR_RDMA_WRITE_WITH_IMM
:
403 t3_wr_opcode
= T3_WR_WRITE
;
404 err
= build_rdma_write(wqe
, wr
, &t3_wr_flit_cnt
);
406 case IB_WR_RDMA_READ
:
407 case IB_WR_RDMA_READ_WITH_INV
:
408 t3_wr_opcode
= T3_WR_READ
;
409 t3_wr_flags
= 0; /* T3 reads are always signaled */
410 err
= build_rdma_read(wqe
, wr
, &t3_wr_flit_cnt
);
413 sqp
->read_len
= wqe
->read
.local_len
;
414 if (!qhp
->wq
.oldest_read
)
415 qhp
->wq
.oldest_read
= sqp
;
417 case IB_WR_FAST_REG_MR
:
418 t3_wr_opcode
= T3_WR_FASTREG
;
419 err
= build_fastreg(wqe
, wr
, &t3_wr_flit_cnt
,
422 case IB_WR_LOCAL_INV
:
423 if (wr
->send_flags
& IB_SEND_FENCE
)
424 t3_wr_flags
|= T3_LOCAL_FENCE_FLAG
;
425 t3_wr_opcode
= T3_WR_INV_STAG
;
426 err
= build_inv_stag(wqe
, wr
, &t3_wr_flit_cnt
);
429 PDBG("%s post of type=%d TBD!\n", __func__
,
437 wqe
->send
.wrid
.id0
.hi
= qhp
->wq
.sq_wptr
;
438 sqp
->wr_id
= wr
->wr_id
;
439 sqp
->opcode
= wr2opcode(t3_wr_opcode
);
440 sqp
->sq_wptr
= qhp
->wq
.sq_wptr
;
442 sqp
->signaled
= (wr
->send_flags
& IB_SEND_SIGNALED
);
444 build_fw_riwrh((void *) wqe
, t3_wr_opcode
, t3_wr_flags
,
445 Q_GENBIT(qhp
->wq
.wptr
, qhp
->wq
.size_log2
),
447 (wr_cnt
== 1) ? T3_SOPEOP
: T3_SOP
);
448 PDBG("%s cookie 0x%llx wq idx 0x%x swsq idx %ld opcode %d\n",
449 __func__
, (unsigned long long) wr
->wr_id
, idx
,
450 Q_PTR2IDX(qhp
->wq
.sq_wptr
, qhp
->wq
.sq_size_log2
),
454 qhp
->wq
.wptr
+= wr_cnt
;
457 spin_unlock_irqrestore(&qhp
->lock
, flag
);
458 ring_doorbell(qhp
->wq
.doorbell
, qhp
->wq
.qpid
);
462 int iwch_post_receive(struct ib_qp
*ibqp
, struct ib_recv_wr
*wr
,
463 struct ib_recv_wr
**bad_wr
)
472 qhp
= to_iwch_qp(ibqp
);
473 spin_lock_irqsave(&qhp
->lock
, flag
);
474 if (qhp
->attr
.state
> IWCH_QP_STATE_RTS
) {
475 spin_unlock_irqrestore(&qhp
->lock
, flag
);
478 num_wrs
= Q_FREECNT(qhp
->wq
.rq_rptr
, qhp
->wq
.rq_wptr
,
479 qhp
->wq
.rq_size_log2
) - 1;
481 spin_unlock_irqrestore(&qhp
->lock
, flag
);
485 if (wr
->num_sge
> T3_MAX_SGE
) {
490 idx
= Q_PTR2IDX(qhp
->wq
.wptr
, qhp
->wq
.size_log2
);
491 wqe
= (union t3_wr
*) (qhp
->wq
.queue
+ idx
);
493 if (wr
->sg_list
[0].lkey
)
494 err
= build_rdma_recv(qhp
, wqe
, wr
);
496 err
= build_zero_stag_recv(qhp
, wqe
, wr
);
503 build_fw_riwrh((void *) wqe
, T3_WR_RCV
, T3_COMPLETION_FLAG
,
504 Q_GENBIT(qhp
->wq
.wptr
, qhp
->wq
.size_log2
),
505 0, sizeof(struct t3_receive_wr
) >> 3, T3_SOPEOP
);
506 PDBG("%s cookie 0x%llx idx 0x%x rq_wptr 0x%x rw_rptr 0x%x "
507 "wqe %p \n", __func__
, (unsigned long long) wr
->wr_id
,
508 idx
, qhp
->wq
.rq_wptr
, qhp
->wq
.rq_rptr
, wqe
);
514 spin_unlock_irqrestore(&qhp
->lock
, flag
);
515 ring_doorbell(qhp
->wq
.doorbell
, qhp
->wq
.qpid
);
519 int iwch_bind_mw(struct ib_qp
*qp
,
521 struct ib_mw_bind
*mw_bind
)
523 struct iwch_dev
*rhp
;
533 enum t3_wr_flags t3_wr_flags
;
537 qhp
= to_iwch_qp(qp
);
538 mhp
= to_iwch_mw(mw
);
541 spin_lock_irqsave(&qhp
->lock
, flag
);
542 if (qhp
->attr
.state
> IWCH_QP_STATE_RTS
) {
543 spin_unlock_irqrestore(&qhp
->lock
, flag
);
546 num_wrs
= Q_FREECNT(qhp
->wq
.sq_rptr
, qhp
->wq
.sq_wptr
,
547 qhp
->wq
.sq_size_log2
);
548 if ((num_wrs
) <= 0) {
549 spin_unlock_irqrestore(&qhp
->lock
, flag
);
552 idx
= Q_PTR2IDX(qhp
->wq
.wptr
, qhp
->wq
.size_log2
);
553 PDBG("%s: idx 0x%0x, mw 0x%p, mw_bind 0x%p\n", __func__
, idx
,
555 wqe
= (union t3_wr
*) (qhp
->wq
.queue
+ idx
);
558 if (mw_bind
->send_flags
& IB_SEND_SIGNALED
)
559 t3_wr_flags
= T3_COMPLETION_FLAG
;
561 sgl
.addr
= mw_bind
->addr
;
562 sgl
.lkey
= mw_bind
->mr
->lkey
;
563 sgl
.length
= mw_bind
->length
;
564 wqe
->bind
.reserved
= 0;
565 wqe
->bind
.type
= TPT_VATO
;
567 /* TBD: check perms */
568 wqe
->bind
.perms
= iwch_ib_to_tpt_access(mw_bind
->mw_access_flags
);
569 wqe
->bind
.mr_stag
= cpu_to_be32(mw_bind
->mr
->lkey
);
570 wqe
->bind
.mw_stag
= cpu_to_be32(mw
->rkey
);
571 wqe
->bind
.mw_len
= cpu_to_be32(mw_bind
->length
);
572 wqe
->bind
.mw_va
= cpu_to_be64(mw_bind
->addr
);
573 err
= iwch_sgl2pbl_map(rhp
, &sgl
, 1, &pbl_addr
, &page_size
);
575 spin_unlock_irqrestore(&qhp
->lock
, flag
);
578 wqe
->send
.wrid
.id0
.hi
= qhp
->wq
.sq_wptr
;
579 sqp
= qhp
->wq
.sq
+ Q_PTR2IDX(qhp
->wq
.sq_wptr
, qhp
->wq
.sq_size_log2
);
580 sqp
->wr_id
= mw_bind
->wr_id
;
581 sqp
->opcode
= T3_BIND_MW
;
582 sqp
->sq_wptr
= qhp
->wq
.sq_wptr
;
584 sqp
->signaled
= (mw_bind
->send_flags
& IB_SEND_SIGNALED
);
585 wqe
->bind
.mr_pbl_addr
= cpu_to_be32(pbl_addr
);
586 wqe
->bind
.mr_pagesz
= page_size
;
587 build_fw_riwrh((void *)wqe
, T3_WR_BIND
, t3_wr_flags
,
588 Q_GENBIT(qhp
->wq
.wptr
, qhp
->wq
.size_log2
), 0,
589 sizeof(struct t3_bind_mw_wr
) >> 3, T3_SOPEOP
);
592 spin_unlock_irqrestore(&qhp
->lock
, flag
);
594 ring_doorbell(qhp
->wq
.doorbell
, qhp
->wq
.qpid
);
599 static inline void build_term_codes(struct respQ_msg_t
*rsp_msg
,
600 u8
*layer_type
, u8
*ecode
)
602 int status
= TPT_ERR_INTERNAL_ERR
;
609 status
= CQE_STATUS(rsp_msg
->cqe
);
610 opcode
= CQE_OPCODE(rsp_msg
->cqe
);
611 rqtype
= RQ_TYPE(rsp_msg
->cqe
);
612 send_inv
= (opcode
== T3_SEND_WITH_INV
) ||
613 (opcode
== T3_SEND_WITH_SE_INV
);
614 tagged
= (opcode
== T3_RDMA_WRITE
) ||
615 (rqtype
&& (opcode
== T3_READ_RESP
));
621 *layer_type
= LAYER_RDMAP
|RDMAP_REMOTE_OP
;
622 *ecode
= RDMAP_CANT_INV_STAG
;
624 *layer_type
= LAYER_RDMAP
|RDMAP_REMOTE_PROT
;
625 *ecode
= RDMAP_INV_STAG
;
629 *layer_type
= LAYER_RDMAP
|RDMAP_REMOTE_PROT
;
630 if ((opcode
== T3_SEND_WITH_INV
) ||
631 (opcode
== T3_SEND_WITH_SE_INV
))
632 *ecode
= RDMAP_CANT_INV_STAG
;
634 *ecode
= RDMAP_STAG_NOT_ASSOC
;
637 *layer_type
= LAYER_RDMAP
|RDMAP_REMOTE_PROT
;
638 *ecode
= RDMAP_STAG_NOT_ASSOC
;
641 *layer_type
= LAYER_RDMAP
|RDMAP_REMOTE_PROT
;
642 *ecode
= RDMAP_ACC_VIOL
;
645 *layer_type
= LAYER_RDMAP
|RDMAP_REMOTE_PROT
;
646 *ecode
= RDMAP_TO_WRAP
;
650 *layer_type
= LAYER_DDP
|DDP_TAGGED_ERR
;
651 *ecode
= DDPT_BASE_BOUNDS
;
653 *layer_type
= LAYER_RDMAP
|RDMAP_REMOTE_PROT
;
654 *ecode
= RDMAP_BASE_BOUNDS
;
657 case TPT_ERR_INVALIDATE_SHARED_MR
:
658 case TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND
:
659 *layer_type
= LAYER_RDMAP
|RDMAP_REMOTE_OP
;
660 *ecode
= RDMAP_CANT_INV_STAG
;
663 case TPT_ERR_ECC_PSTAG
:
664 case TPT_ERR_INTERNAL_ERR
:
665 *layer_type
= LAYER_RDMAP
|RDMAP_LOCAL_CATA
;
668 case TPT_ERR_OUT_OF_RQE
:
669 *layer_type
= LAYER_DDP
|DDP_UNTAGGED_ERR
;
670 *ecode
= DDPU_INV_MSN_NOBUF
;
672 case TPT_ERR_PBL_ADDR_BOUND
:
673 *layer_type
= LAYER_DDP
|DDP_TAGGED_ERR
;
674 *ecode
= DDPT_BASE_BOUNDS
;
677 *layer_type
= LAYER_MPA
|DDP_LLP
;
678 *ecode
= MPA_CRC_ERR
;
681 *layer_type
= LAYER_MPA
|DDP_LLP
;
682 *ecode
= MPA_MARKER_ERR
;
684 case TPT_ERR_PDU_LEN_ERR
:
685 *layer_type
= LAYER_DDP
|DDP_UNTAGGED_ERR
;
686 *ecode
= DDPU_MSG_TOOBIG
;
688 case TPT_ERR_DDP_VERSION
:
690 *layer_type
= LAYER_DDP
|DDP_TAGGED_ERR
;
691 *ecode
= DDPT_INV_VERS
;
693 *layer_type
= LAYER_DDP
|DDP_UNTAGGED_ERR
;
694 *ecode
= DDPU_INV_VERS
;
697 case TPT_ERR_RDMA_VERSION
:
698 *layer_type
= LAYER_RDMAP
|RDMAP_REMOTE_OP
;
699 *ecode
= RDMAP_INV_VERS
;
702 *layer_type
= LAYER_RDMAP
|RDMAP_REMOTE_OP
;
703 *ecode
= RDMAP_INV_OPCODE
;
705 case TPT_ERR_DDP_QUEUE_NUM
:
706 *layer_type
= LAYER_DDP
|DDP_UNTAGGED_ERR
;
707 *ecode
= DDPU_INV_QN
;
710 case TPT_ERR_MSN_GAP
:
711 case TPT_ERR_MSN_RANGE
:
712 case TPT_ERR_IRD_OVERFLOW
:
713 *layer_type
= LAYER_DDP
|DDP_UNTAGGED_ERR
;
714 *ecode
= DDPU_INV_MSN_RANGE
;
717 *layer_type
= LAYER_DDP
|DDP_LOCAL_CATA
;
721 *layer_type
= LAYER_DDP
|DDP_UNTAGGED_ERR
;
722 *ecode
= DDPU_INV_MO
;
725 *layer_type
= LAYER_RDMAP
|DDP_LOCAL_CATA
;
731 int iwch_post_zb_read(struct iwch_qp
*qhp
)
735 u8 flit_cnt
= sizeof(struct t3_rdma_read_wr
) >> 3;
737 PDBG("%s enter\n", __func__
);
738 skb
= alloc_skb(40, GFP_KERNEL
);
740 printk(KERN_ERR
"%s cannot send zb_read!!\n", __func__
);
743 wqe
= (union t3_wr
*)skb_put(skb
, sizeof(struct t3_rdma_read_wr
));
744 memset(wqe
, 0, sizeof(struct t3_rdma_read_wr
));
745 wqe
->read
.rdmaop
= T3_READ_REQ
;
746 wqe
->read
.reserved
[0] = 0;
747 wqe
->read
.reserved
[1] = 0;
748 wqe
->read
.reserved
[2] = 0;
749 wqe
->read
.rem_stag
= cpu_to_be32(1);
750 wqe
->read
.rem_to
= cpu_to_be64(1);
751 wqe
->read
.local_stag
= cpu_to_be32(1);
752 wqe
->read
.local_len
= cpu_to_be32(0);
753 wqe
->read
.local_to
= cpu_to_be64(1);
754 wqe
->send
.wrh
.op_seop_flags
= cpu_to_be32(V_FW_RIWR_OP(T3_WR_READ
));
755 wqe
->send
.wrh
.gen_tid_len
= cpu_to_be32(V_FW_RIWR_TID(qhp
->ep
->hwtid
)|
756 V_FW_RIWR_LEN(flit_cnt
));
757 skb
->priority
= CPL_PRIORITY_DATA
;
758 return cxgb3_ofld_send(qhp
->rhp
->rdev
.t3cdev_p
, skb
);
762 * This posts a TERMINATE with layer=RDMA, type=catastrophic.
764 int iwch_post_terminate(struct iwch_qp
*qhp
, struct respQ_msg_t
*rsp_msg
)
767 struct terminate_message
*term
;
770 PDBG("%s %d\n", __func__
, __LINE__
);
771 skb
= alloc_skb(40, GFP_ATOMIC
);
773 printk(KERN_ERR
"%s cannot send TERMINATE!\n", __func__
);
776 wqe
= (union t3_wr
*)skb_put(skb
, 40);
778 wqe
->send
.rdmaop
= T3_TERMINATE
;
780 /* immediate data length */
781 wqe
->send
.plen
= htonl(4);
783 /* immediate data starts here. */
784 term
= (struct terminate_message
*)wqe
->send
.sgl
;
785 build_term_codes(rsp_msg
, &term
->layer_etype
, &term
->ecode
);
786 wqe
->send
.wrh
.op_seop_flags
= cpu_to_be32(V_FW_RIWR_OP(T3_WR_SEND
) |
787 V_FW_RIWR_FLAGS(T3_COMPLETION_FLAG
| T3_NOTIFY_FLAG
));
788 wqe
->send
.wrh
.gen_tid_len
= cpu_to_be32(V_FW_RIWR_TID(qhp
->ep
->hwtid
));
789 skb
->priority
= CPL_PRIORITY_DATA
;
790 return cxgb3_ofld_send(qhp
->rhp
->rdev
.t3cdev_p
, skb
);
794 * Assumes qhp lock is held.
796 static void __flush_qp(struct iwch_qp
*qhp
, unsigned long *flag
)
798 struct iwch_cq
*rchp
, *schp
;
802 rchp
= get_chp(qhp
->rhp
, qhp
->attr
.rcq
);
803 schp
= get_chp(qhp
->rhp
, qhp
->attr
.scq
);
805 PDBG("%s qhp %p rchp %p schp %p\n", __func__
, qhp
, rchp
, schp
);
806 /* take a ref on the qhp since we must release the lock */
807 atomic_inc(&qhp
->refcnt
);
808 spin_unlock_irqrestore(&qhp
->lock
, *flag
);
810 /* locking heirarchy: cq lock first, then qp lock. */
811 spin_lock_irqsave(&rchp
->lock
, *flag
);
812 spin_lock(&qhp
->lock
);
813 cxio_flush_hw_cq(&rchp
->cq
);
814 cxio_count_rcqes(&rchp
->cq
, &qhp
->wq
, &count
);
815 flushed
= cxio_flush_rq(&qhp
->wq
, &rchp
->cq
, count
);
816 spin_unlock(&qhp
->lock
);
817 spin_unlock_irqrestore(&rchp
->lock
, *flag
);
819 (*rchp
->ibcq
.comp_handler
)(&rchp
->ibcq
, rchp
->ibcq
.cq_context
);
821 /* locking heirarchy: cq lock first, then qp lock. */
822 spin_lock_irqsave(&schp
->lock
, *flag
);
823 spin_lock(&qhp
->lock
);
824 cxio_flush_hw_cq(&schp
->cq
);
825 cxio_count_scqes(&schp
->cq
, &qhp
->wq
, &count
);
826 flushed
= cxio_flush_sq(&qhp
->wq
, &schp
->cq
, count
);
827 spin_unlock(&qhp
->lock
);
828 spin_unlock_irqrestore(&schp
->lock
, *flag
);
830 (*schp
->ibcq
.comp_handler
)(&schp
->ibcq
, schp
->ibcq
.cq_context
);
833 if (atomic_dec_and_test(&qhp
->refcnt
))
836 spin_lock_irqsave(&qhp
->lock
, *flag
);
839 static void flush_qp(struct iwch_qp
*qhp
, unsigned long *flag
)
841 if (qhp
->ibqp
.uobject
)
842 cxio_set_wq_in_error(&qhp
->wq
);
844 __flush_qp(qhp
, flag
);
849 * Return count of RECV WRs posted
851 u16
iwch_rqes_posted(struct iwch_qp
*qhp
)
853 union t3_wr
*wqe
= qhp
->wq
.queue
;
855 while ((count
+1) != 0 && fw_riwrh_opcode((struct fw_riwrh
*)wqe
) == T3_WR_RCV
) {
859 PDBG("%s qhp %p count %u\n", __func__
, qhp
, count
);
863 static int rdma_init(struct iwch_dev
*rhp
, struct iwch_qp
*qhp
,
864 enum iwch_qp_attr_mask mask
,
865 struct iwch_qp_attributes
*attrs
)
867 struct t3_rdma_init_attr init_attr
;
870 init_attr
.tid
= qhp
->ep
->hwtid
;
871 init_attr
.qpid
= qhp
->wq
.qpid
;
872 init_attr
.pdid
= qhp
->attr
.pd
;
873 init_attr
.scqid
= qhp
->attr
.scq
;
874 init_attr
.rcqid
= qhp
->attr
.rcq
;
875 init_attr
.rq_addr
= qhp
->wq
.rq_addr
;
876 init_attr
.rq_size
= 1 << qhp
->wq
.rq_size_log2
;
877 init_attr
.mpaattrs
= uP_RI_MPA_IETF_ENABLE
|
878 qhp
->attr
.mpa_attr
.recv_marker_enabled
|
879 (qhp
->attr
.mpa_attr
.xmit_marker_enabled
<< 1) |
880 (qhp
->attr
.mpa_attr
.crc_enabled
<< 2);
883 * XXX - The IWCM doesn't quite handle getting these
884 * attrs set before going into RTS. For now, just turn
888 init_attr
.qpcaps
= qhp
->attr
.enableRdmaRead
|
889 (qhp
->attr
.enableRdmaWrite
<< 1) |
890 (qhp
->attr
.enableBind
<< 2) |
891 (qhp
->attr
.enable_stag0_fastreg
<< 3) |
892 (qhp
->attr
.enable_stag0_fastreg
<< 4);
894 init_attr
.qpcaps
= 0x1f;
896 init_attr
.tcp_emss
= qhp
->ep
->emss
;
897 init_attr
.ord
= qhp
->attr
.max_ord
;
898 init_attr
.ird
= qhp
->attr
.max_ird
;
899 init_attr
.qp_dma_addr
= qhp
->wq
.dma_addr
;
900 init_attr
.qp_dma_size
= (1UL << qhp
->wq
.size_log2
);
901 init_attr
.rqe_count
= iwch_rqes_posted(qhp
);
902 init_attr
.flags
= qhp
->attr
.mpa_attr
.initiator
? MPA_INITIATOR
: 0;
903 if (!qhp
->ibqp
.uobject
)
904 init_attr
.flags
|= PRIV_QP
;
906 init_attr
.rtr_type
= RTR_READ
;
907 if (init_attr
.ord
== 0 && qhp
->attr
.mpa_attr
.initiator
)
909 if (init_attr
.ird
== 0 && !qhp
->attr
.mpa_attr
.initiator
)
912 init_attr
.rtr_type
= 0;
913 init_attr
.irs
= qhp
->ep
->rcv_seq
;
914 PDBG("%s init_attr.rq_addr 0x%x init_attr.rq_size = %d "
915 "flags 0x%x qpcaps 0x%x\n", __func__
,
916 init_attr
.rq_addr
, init_attr
.rq_size
,
917 init_attr
.flags
, init_attr
.qpcaps
);
918 ret
= cxio_rdma_init(&rhp
->rdev
, &init_attr
);
919 PDBG("%s ret %d\n", __func__
, ret
);
923 int iwch_modify_qp(struct iwch_dev
*rhp
, struct iwch_qp
*qhp
,
924 enum iwch_qp_attr_mask mask
,
925 struct iwch_qp_attributes
*attrs
,
929 struct iwch_qp_attributes newattr
= qhp
->attr
;
935 struct iwch_ep
*ep
= NULL
;
937 PDBG("%s qhp %p qpid 0x%x ep %p state %d -> %d\n", __func__
,
938 qhp
, qhp
->wq
.qpid
, qhp
->ep
, qhp
->attr
.state
,
939 (mask
& IWCH_QP_ATTR_NEXT_STATE
) ? attrs
->next_state
: -1);
941 spin_lock_irqsave(&qhp
->lock
, flag
);
943 /* Process attr changes if in IDLE */
944 if (mask
& IWCH_QP_ATTR_VALID_MODIFY
) {
945 if (qhp
->attr
.state
!= IWCH_QP_STATE_IDLE
) {
949 if (mask
& IWCH_QP_ATTR_ENABLE_RDMA_READ
)
950 newattr
.enable_rdma_read
= attrs
->enable_rdma_read
;
951 if (mask
& IWCH_QP_ATTR_ENABLE_RDMA_WRITE
)
952 newattr
.enable_rdma_write
= attrs
->enable_rdma_write
;
953 if (mask
& IWCH_QP_ATTR_ENABLE_RDMA_BIND
)
954 newattr
.enable_bind
= attrs
->enable_bind
;
955 if (mask
& IWCH_QP_ATTR_MAX_ORD
) {
957 rhp
->attr
.max_rdma_read_qp_depth
) {
961 newattr
.max_ord
= attrs
->max_ord
;
963 if (mask
& IWCH_QP_ATTR_MAX_IRD
) {
965 rhp
->attr
.max_rdma_reads_per_qp
) {
969 newattr
.max_ird
= attrs
->max_ird
;
974 if (!(mask
& IWCH_QP_ATTR_NEXT_STATE
))
976 if (qhp
->attr
.state
== attrs
->next_state
)
979 switch (qhp
->attr
.state
) {
980 case IWCH_QP_STATE_IDLE
:
981 switch (attrs
->next_state
) {
982 case IWCH_QP_STATE_RTS
:
983 if (!(mask
& IWCH_QP_ATTR_LLP_STREAM_HANDLE
)) {
987 if (!(mask
& IWCH_QP_ATTR_MPA_ATTR
)) {
991 qhp
->attr
.mpa_attr
= attrs
->mpa_attr
;
992 qhp
->attr
.llp_stream_handle
= attrs
->llp_stream_handle
;
993 qhp
->ep
= qhp
->attr
.llp_stream_handle
;
994 qhp
->attr
.state
= IWCH_QP_STATE_RTS
;
997 * Ref the endpoint here and deref when we
998 * disassociate the endpoint from the QP. This
999 * happens in CLOSING->IDLE transition or *->ERROR
1002 get_ep(&qhp
->ep
->com
);
1003 spin_unlock_irqrestore(&qhp
->lock
, flag
);
1004 ret
= rdma_init(rhp
, qhp
, mask
, attrs
);
1005 spin_lock_irqsave(&qhp
->lock
, flag
);
1009 case IWCH_QP_STATE_ERROR
:
1010 qhp
->attr
.state
= IWCH_QP_STATE_ERROR
;
1011 flush_qp(qhp
, &flag
);
1018 case IWCH_QP_STATE_RTS
:
1019 switch (attrs
->next_state
) {
1020 case IWCH_QP_STATE_CLOSING
:
1021 BUG_ON(atomic_read(&qhp
->ep
->com
.kref
.refcount
) < 2);
1022 qhp
->attr
.state
= IWCH_QP_STATE_CLOSING
;
1030 case IWCH_QP_STATE_TERMINATE
:
1031 qhp
->attr
.state
= IWCH_QP_STATE_TERMINATE
;
1032 if (qhp
->ibqp
.uobject
)
1033 cxio_set_wq_in_error(&qhp
->wq
);
1037 case IWCH_QP_STATE_ERROR
:
1038 qhp
->attr
.state
= IWCH_QP_STATE_ERROR
;
1052 case IWCH_QP_STATE_CLOSING
:
1057 switch (attrs
->next_state
) {
1058 case IWCH_QP_STATE_IDLE
:
1059 flush_qp(qhp
, &flag
);
1060 qhp
->attr
.state
= IWCH_QP_STATE_IDLE
;
1061 qhp
->attr
.llp_stream_handle
= NULL
;
1062 put_ep(&qhp
->ep
->com
);
1064 wake_up(&qhp
->wait
);
1066 case IWCH_QP_STATE_ERROR
:
1073 case IWCH_QP_STATE_ERROR
:
1074 if (attrs
->next_state
!= IWCH_QP_STATE_IDLE
) {
1079 if (!Q_EMPTY(qhp
->wq
.sq_rptr
, qhp
->wq
.sq_wptr
) ||
1080 !Q_EMPTY(qhp
->wq
.rq_rptr
, qhp
->wq
.rq_wptr
)) {
1084 qhp
->attr
.state
= IWCH_QP_STATE_IDLE
;
1085 memset(&qhp
->attr
, 0, sizeof(qhp
->attr
));
1087 case IWCH_QP_STATE_TERMINATE
:
1095 printk(KERN_ERR
"%s in a bad state %d\n",
1096 __func__
, qhp
->attr
.state
);
1103 PDBG("%s disassociating ep %p qpid 0x%x\n", __func__
, qhp
->ep
,
1106 /* disassociate the LLP connection */
1107 qhp
->attr
.llp_stream_handle
= NULL
;
1110 qhp
->attr
.state
= IWCH_QP_STATE_ERROR
;
1112 wake_up(&qhp
->wait
);
1114 flush_qp(qhp
, &flag
);
1116 spin_unlock_irqrestore(&qhp
->lock
, flag
);
1119 iwch_post_terminate(qhp
, NULL
);
1122 * If disconnect is 1, then we need to initiate a disconnect
1123 * on the EP. This can be a normal close (RTS->CLOSING) or
1124 * an abnormal close (RTS/CLOSING->ERROR).
1127 iwch_ep_disconnect(ep
, abort
, GFP_KERNEL
);
1132 * If free is 1, then we've disassociated the EP from the QP
1133 * and we need to dereference the EP.
1138 PDBG("%s exit state %d\n", __func__
, qhp
->attr
.state
);
1142 static int quiesce_qp(struct iwch_qp
*qhp
)
1144 spin_lock_irq(&qhp
->lock
);
1145 iwch_quiesce_tid(qhp
->ep
);
1146 qhp
->flags
|= QP_QUIESCED
;
1147 spin_unlock_irq(&qhp
->lock
);
1151 static int resume_qp(struct iwch_qp
*qhp
)
1153 spin_lock_irq(&qhp
->lock
);
1154 iwch_resume_tid(qhp
->ep
);
1155 qhp
->flags
&= ~QP_QUIESCED
;
1156 spin_unlock_irq(&qhp
->lock
);
1160 int iwch_quiesce_qps(struct iwch_cq
*chp
)
1163 struct iwch_qp
*qhp
;
1165 for (i
=0; i
< T3_MAX_NUM_QP
; i
++) {
1166 qhp
= get_qhp(chp
->rhp
, i
);
1169 if ((qhp
->attr
.rcq
== chp
->cq
.cqid
) && !qp_quiesced(qhp
)) {
1173 if ((qhp
->attr
.scq
== chp
->cq
.cqid
) && !qp_quiesced(qhp
))
1179 int iwch_resume_qps(struct iwch_cq
*chp
)
1182 struct iwch_qp
*qhp
;
1184 for (i
=0; i
< T3_MAX_NUM_QP
; i
++) {
1185 qhp
= get_qhp(chp
->rhp
, i
);
1188 if ((qhp
->attr
.rcq
== chp
->cq
.cqid
) && qp_quiesced(qhp
)) {
1192 if ((qhp
->attr
.scq
== chp
->cq
.cqid
) && qp_quiesced(qhp
))