2 * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
3 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
4 * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/kernel.h>
35 #include <linux/module.h>
36 #include <linux/slab.h>
37 #include <linux/delay.h>
39 #include "iscsi_iser.h"
41 #define ISCSI_ISER_MAX_CONN 8
42 #define ISER_MAX_RX_LEN (ISER_QP_MAX_RECV_DTOS * ISCSI_ISER_MAX_CONN)
43 #define ISER_MAX_TX_LEN (ISER_QP_MAX_REQ_DTOS * ISCSI_ISER_MAX_CONN)
44 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_LEN + ISER_MAX_TX_LEN + \
47 static int iser_cq_poll_limit
= 512;
49 static void iser_cq_tasklet_fn(unsigned long data
);
50 static void iser_cq_callback(struct ib_cq
*cq
, void *cq_context
);
52 static void iser_cq_event_callback(struct ib_event
*cause
, void *context
)
54 iser_err("got cq event %d \n", cause
->event
);
57 static void iser_qp_event_callback(struct ib_event
*cause
, void *context
)
59 iser_err("got qp event %d\n",cause
->event
);
62 static void iser_event_handler(struct ib_event_handler
*handler
,
63 struct ib_event
*event
)
65 iser_err("async event %d on device %s port %d\n", event
->event
,
66 event
->device
->name
, event
->element
.port_num
);
70 * iser_create_device_ib_res - creates Protection Domain (PD), Completion
71 * Queue (CQ), DMA Memory Region (DMA MR) with the device associated with
74 * returns 0 on success, -1 on failure
76 static int iser_create_device_ib_res(struct iser_device
*device
)
78 struct ib_device_attr
*dev_attr
= &device
->dev_attr
;
81 ret
= ib_query_device(device
->ib_device
, dev_attr
);
83 pr_warn("Query device failed for %s\n", device
->ib_device
->name
);
87 /* Assign function handles - based on FMR support */
88 if (device
->ib_device
->alloc_fmr
&& device
->ib_device
->dealloc_fmr
&&
89 device
->ib_device
->map_phys_fmr
&& device
->ib_device
->unmap_fmr
) {
90 iser_info("FMR supported, using FMR for registration\n");
91 device
->iser_alloc_rdma_reg_res
= iser_create_fmr_pool
;
92 device
->iser_free_rdma_reg_res
= iser_free_fmr_pool
;
93 device
->iser_reg_rdma_mem
= iser_reg_rdma_mem_fmr
;
94 device
->iser_unreg_rdma_mem
= iser_unreg_mem_fmr
;
96 if (dev_attr
->device_cap_flags
& IB_DEVICE_MEM_MGT_EXTENSIONS
) {
97 iser_info("FastReg supported, using FastReg for registration\n");
98 device
->iser_alloc_rdma_reg_res
= iser_create_fastreg_pool
;
99 device
->iser_free_rdma_reg_res
= iser_free_fastreg_pool
;
100 device
->iser_reg_rdma_mem
= iser_reg_rdma_mem_fastreg
;
101 device
->iser_unreg_rdma_mem
= iser_unreg_mem_fastreg
;
103 iser_err("IB device does not support FMRs nor FastRegs, can't register memory\n");
107 device
->comps_used
= min(ISER_MAX_CQ
,
108 device
->ib_device
->num_comp_vectors
);
110 max_cqe
= min(ISER_MAX_CQ_LEN
, dev_attr
->max_cqe
);
112 iser_info("using %d CQs, device %s supports %d vectors max_cqe %d\n",
113 device
->comps_used
, device
->ib_device
->name
,
114 device
->ib_device
->num_comp_vectors
, max_cqe
);
116 device
->pd
= ib_alloc_pd(device
->ib_device
);
117 if (IS_ERR(device
->pd
))
120 for (i
= 0; i
< device
->comps_used
; i
++) {
121 struct iser_comp
*comp
= &device
->comps
[i
];
123 comp
->device
= device
;
124 comp
->cq
= ib_create_cq(device
->ib_device
,
126 iser_cq_event_callback
,
129 if (IS_ERR(comp
->cq
)) {
134 if (ib_req_notify_cq(comp
->cq
, IB_CQ_NEXT_COMP
))
137 tasklet_init(&comp
->tasklet
, iser_cq_tasklet_fn
,
138 (unsigned long)comp
);
141 device
->mr
= ib_get_dma_mr(device
->pd
, IB_ACCESS_LOCAL_WRITE
|
142 IB_ACCESS_REMOTE_WRITE
|
143 IB_ACCESS_REMOTE_READ
);
144 if (IS_ERR(device
->mr
))
147 INIT_IB_EVENT_HANDLER(&device
->event_handler
, device
->ib_device
,
149 if (ib_register_event_handler(&device
->event_handler
))
155 ib_dereg_mr(device
->mr
);
157 for (i
= 0; i
< device
->comps_used
; i
++)
158 tasklet_kill(&device
->comps
[i
].tasklet
);
160 for (i
= 0; i
< device
->comps_used
; i
++) {
161 struct iser_comp
*comp
= &device
->comps
[i
];
164 ib_destroy_cq(comp
->cq
);
166 ib_dealloc_pd(device
->pd
);
168 iser_err("failed to allocate an IB resource\n");
173 * iser_free_device_ib_res - destroy/dealloc/dereg the DMA MR,
174 * CQ and PD created with the device associated with the adapator.
176 static void iser_free_device_ib_res(struct iser_device
*device
)
179 BUG_ON(device
->mr
== NULL
);
181 for (i
= 0; i
< device
->comps_used
; i
++) {
182 struct iser_comp
*comp
= &device
->comps
[i
];
184 tasklet_kill(&comp
->tasklet
);
185 ib_destroy_cq(comp
->cq
);
189 (void)ib_unregister_event_handler(&device
->event_handler
);
190 (void)ib_dereg_mr(device
->mr
);
191 (void)ib_dealloc_pd(device
->pd
);
198 * iser_create_fmr_pool - Creates FMR pool and page_vector
200 * returns 0 on success, or errno code on failure
202 int iser_create_fmr_pool(struct ib_conn
*ib_conn
, unsigned cmds_max
)
204 struct iser_device
*device
= ib_conn
->device
;
205 struct ib_fmr_pool_param params
;
208 ib_conn
->fmr
.page_vec
= kmalloc(sizeof(*ib_conn
->fmr
.page_vec
) +
209 (sizeof(u64
)*(ISCSI_ISER_SG_TABLESIZE
+ 1)),
211 if (!ib_conn
->fmr
.page_vec
)
214 ib_conn
->fmr
.page_vec
->pages
= (u64
*)(ib_conn
->fmr
.page_vec
+ 1);
216 params
.page_shift
= SHIFT_4K
;
217 /* when the first/last SG element are not start/end *
218 * page aligned, the map whould be of N+1 pages */
219 params
.max_pages_per_fmr
= ISCSI_ISER_SG_TABLESIZE
+ 1;
220 /* make the pool size twice the max number of SCSI commands *
221 * the ML is expected to queue, watermark for unmap at 50% */
222 params
.pool_size
= cmds_max
* 2;
223 params
.dirty_watermark
= cmds_max
;
225 params
.flush_function
= NULL
;
226 params
.access
= (IB_ACCESS_LOCAL_WRITE
|
227 IB_ACCESS_REMOTE_WRITE
|
228 IB_ACCESS_REMOTE_READ
);
230 ib_conn
->fmr
.pool
= ib_create_fmr_pool(device
->pd
, ¶ms
);
231 if (!IS_ERR(ib_conn
->fmr
.pool
))
234 /* no FMR => no need for page_vec */
235 kfree(ib_conn
->fmr
.page_vec
);
236 ib_conn
->fmr
.page_vec
= NULL
;
238 ret
= PTR_ERR(ib_conn
->fmr
.pool
);
239 ib_conn
->fmr
.pool
= NULL
;
240 if (ret
!= -ENOSYS
) {
241 iser_err("FMR allocation failed, err %d\n", ret
);
244 iser_warn("FMRs are not supported, using unaligned mode\n");
250 * iser_free_fmr_pool - releases the FMR pool and page vec
252 void iser_free_fmr_pool(struct ib_conn
*ib_conn
)
254 iser_info("freeing conn %p fmr pool %p\n",
255 ib_conn
, ib_conn
->fmr
.pool
);
257 if (ib_conn
->fmr
.pool
!= NULL
)
258 ib_destroy_fmr_pool(ib_conn
->fmr
.pool
);
260 ib_conn
->fmr
.pool
= NULL
;
262 kfree(ib_conn
->fmr
.page_vec
);
263 ib_conn
->fmr
.page_vec
= NULL
;
267 iser_create_fastreg_desc(struct ib_device
*ib_device
, struct ib_pd
*pd
,
268 bool pi_enable
, struct fast_reg_descriptor
*desc
)
272 desc
->data_frpl
= ib_alloc_fast_reg_page_list(ib_device
,
273 ISCSI_ISER_SG_TABLESIZE
+ 1);
274 if (IS_ERR(desc
->data_frpl
)) {
275 ret
= PTR_ERR(desc
->data_frpl
);
276 iser_err("Failed to allocate ib_fast_reg_page_list err=%d\n",
278 return PTR_ERR(desc
->data_frpl
);
281 desc
->data_mr
= ib_alloc_fast_reg_mr(pd
, ISCSI_ISER_SG_TABLESIZE
+ 1);
282 if (IS_ERR(desc
->data_mr
)) {
283 ret
= PTR_ERR(desc
->data_mr
);
284 iser_err("Failed to allocate ib_fast_reg_mr err=%d\n", ret
);
285 goto fast_reg_mr_failure
;
287 desc
->reg_indicators
|= ISER_DATA_KEY_VALID
;
290 struct ib_mr_init_attr mr_init_attr
= {0};
291 struct iser_pi_context
*pi_ctx
= NULL
;
293 desc
->pi_ctx
= kzalloc(sizeof(*desc
->pi_ctx
), GFP_KERNEL
);
295 iser_err("Failed to allocate pi context\n");
297 goto pi_ctx_alloc_failure
;
299 pi_ctx
= desc
->pi_ctx
;
301 pi_ctx
->prot_frpl
= ib_alloc_fast_reg_page_list(ib_device
,
302 ISCSI_ISER_SG_TABLESIZE
);
303 if (IS_ERR(pi_ctx
->prot_frpl
)) {
304 ret
= PTR_ERR(pi_ctx
->prot_frpl
);
305 iser_err("Failed to allocate prot frpl ret=%d\n",
307 goto prot_frpl_failure
;
310 pi_ctx
->prot_mr
= ib_alloc_fast_reg_mr(pd
,
311 ISCSI_ISER_SG_TABLESIZE
+ 1);
312 if (IS_ERR(pi_ctx
->prot_mr
)) {
313 ret
= PTR_ERR(pi_ctx
->prot_mr
);
314 iser_err("Failed to allocate prot frmr ret=%d\n",
316 goto prot_mr_failure
;
318 desc
->reg_indicators
|= ISER_PROT_KEY_VALID
;
320 mr_init_attr
.max_reg_descriptors
= 2;
321 mr_init_attr
.flags
|= IB_MR_SIGNATURE_EN
;
322 pi_ctx
->sig_mr
= ib_create_mr(pd
, &mr_init_attr
);
323 if (IS_ERR(pi_ctx
->sig_mr
)) {
324 ret
= PTR_ERR(pi_ctx
->sig_mr
);
325 iser_err("Failed to allocate signature enabled mr err=%d\n",
329 desc
->reg_indicators
|= ISER_SIG_KEY_VALID
;
331 desc
->reg_indicators
&= ~ISER_FASTREG_PROTECTED
;
333 iser_dbg("Create fr_desc %p page_list %p\n",
334 desc
, desc
->data_frpl
->page_list
);
338 ib_dereg_mr(desc
->pi_ctx
->prot_mr
);
340 ib_free_fast_reg_page_list(desc
->pi_ctx
->prot_frpl
);
343 pi_ctx_alloc_failure
:
344 ib_dereg_mr(desc
->data_mr
);
346 ib_free_fast_reg_page_list(desc
->data_frpl
);
352 * iser_create_fastreg_pool - Creates pool of fast_reg descriptors
353 * for fast registration work requests.
354 * returns 0 on success, or errno code on failure
356 int iser_create_fastreg_pool(struct ib_conn
*ib_conn
, unsigned cmds_max
)
358 struct iser_device
*device
= ib_conn
->device
;
359 struct fast_reg_descriptor
*desc
;
362 INIT_LIST_HEAD(&ib_conn
->fastreg
.pool
);
363 ib_conn
->fastreg
.pool_size
= 0;
364 for (i
= 0; i
< cmds_max
; i
++) {
365 desc
= kzalloc(sizeof(*desc
), GFP_KERNEL
);
367 iser_err("Failed to allocate a new fast_reg descriptor\n");
372 ret
= iser_create_fastreg_desc(device
->ib_device
, device
->pd
,
373 ib_conn
->pi_support
, desc
);
375 iser_err("Failed to create fastreg descriptor err=%d\n",
381 list_add_tail(&desc
->list
, &ib_conn
->fastreg
.pool
);
382 ib_conn
->fastreg
.pool_size
++;
388 iser_free_fastreg_pool(ib_conn
);
393 * iser_free_fastreg_pool - releases the pool of fast_reg descriptors
395 void iser_free_fastreg_pool(struct ib_conn
*ib_conn
)
397 struct fast_reg_descriptor
*desc
, *tmp
;
400 if (list_empty(&ib_conn
->fastreg
.pool
))
403 iser_info("freeing conn %p fr pool\n", ib_conn
);
405 list_for_each_entry_safe(desc
, tmp
, &ib_conn
->fastreg
.pool
, list
) {
406 list_del(&desc
->list
);
407 ib_free_fast_reg_page_list(desc
->data_frpl
);
408 ib_dereg_mr(desc
->data_mr
);
410 ib_free_fast_reg_page_list(desc
->pi_ctx
->prot_frpl
);
411 ib_dereg_mr(desc
->pi_ctx
->prot_mr
);
412 ib_destroy_mr(desc
->pi_ctx
->sig_mr
);
419 if (i
< ib_conn
->fastreg
.pool_size
)
420 iser_warn("pool still has %d regions registered\n",
421 ib_conn
->fastreg
.pool_size
- i
);
425 * iser_create_ib_conn_res - Queue-Pair (QP)
427 * returns 0 on success, -1 on failure
429 static int iser_create_ib_conn_res(struct ib_conn
*ib_conn
)
431 struct iser_conn
*iser_conn
= container_of(ib_conn
, struct iser_conn
,
433 struct iser_device
*device
;
434 struct ib_device_attr
*dev_attr
;
435 struct ib_qp_init_attr init_attr
;
437 int index
, min_index
= 0;
439 BUG_ON(ib_conn
->device
== NULL
);
441 device
= ib_conn
->device
;
442 dev_attr
= &device
->dev_attr
;
444 memset(&init_attr
, 0, sizeof init_attr
);
446 mutex_lock(&ig
.connlist_mutex
);
447 /* select the CQ with the minimal number of usages */
448 for (index
= 0; index
< device
->comps_used
; index
++) {
449 if (device
->comps
[index
].active_qps
<
450 device
->comps
[min_index
].active_qps
)
453 ib_conn
->comp
= &device
->comps
[min_index
];
454 ib_conn
->comp
->active_qps
++;
455 mutex_unlock(&ig
.connlist_mutex
);
456 iser_info("cq index %d used for ib_conn %p\n", min_index
, ib_conn
);
458 init_attr
.event_handler
= iser_qp_event_callback
;
459 init_attr
.qp_context
= (void *)ib_conn
;
460 init_attr
.send_cq
= ib_conn
->comp
->cq
;
461 init_attr
.recv_cq
= ib_conn
->comp
->cq
;
462 init_attr
.cap
.max_recv_wr
= ISER_QP_MAX_RECV_DTOS
;
463 init_attr
.cap
.max_send_sge
= 2;
464 init_attr
.cap
.max_recv_sge
= 1;
465 init_attr
.sq_sig_type
= IB_SIGNAL_REQ_WR
;
466 init_attr
.qp_type
= IB_QPT_RC
;
467 if (ib_conn
->pi_support
) {
468 init_attr
.cap
.max_send_wr
= ISER_QP_SIG_MAX_REQ_DTOS
+ 1;
469 init_attr
.create_flags
|= IB_QP_CREATE_SIGNATURE_EN
;
470 iser_conn
->max_cmds
=
471 ISER_GET_MAX_XMIT_CMDS(ISER_QP_SIG_MAX_REQ_DTOS
);
473 if (dev_attr
->max_qp_wr
> ISER_QP_MAX_REQ_DTOS
) {
474 init_attr
.cap
.max_send_wr
= ISER_QP_MAX_REQ_DTOS
+ 1;
475 iser_conn
->max_cmds
=
476 ISER_GET_MAX_XMIT_CMDS(ISER_QP_MAX_REQ_DTOS
);
478 init_attr
.cap
.max_send_wr
= dev_attr
->max_qp_wr
;
479 iser_conn
->max_cmds
=
480 ISER_GET_MAX_XMIT_CMDS(dev_attr
->max_qp_wr
);
481 iser_dbg("device %s supports max_send_wr %d\n",
482 device
->ib_device
->name
, dev_attr
->max_qp_wr
);
486 ret
= rdma_create_qp(ib_conn
->cma_id
, device
->pd
, &init_attr
);
490 ib_conn
->qp
= ib_conn
->cma_id
->qp
;
491 iser_info("setting conn %p cma_id %p qp %p\n",
492 ib_conn
, ib_conn
->cma_id
,
493 ib_conn
->cma_id
->qp
);
497 iser_err("unable to alloc mem or create resource, err %d\n", ret
);
502 * based on the resolved device node GUID see if there already allocated
503 * device for this device. If there's no such, create one.
506 struct iser_device
*iser_device_find_by_ib_device(struct rdma_cm_id
*cma_id
)
508 struct iser_device
*device
;
510 mutex_lock(&ig
.device_list_mutex
);
512 list_for_each_entry(device
, &ig
.device_list
, ig_list
)
513 /* find if there's a match using the node GUID */
514 if (device
->ib_device
->node_guid
== cma_id
->device
->node_guid
)
517 device
= kzalloc(sizeof *device
, GFP_KERNEL
);
521 /* assign this device to the device */
522 device
->ib_device
= cma_id
->device
;
523 /* init the device and link it into ig device list */
524 if (iser_create_device_ib_res(device
)) {
529 list_add(&device
->ig_list
, &ig
.device_list
);
534 mutex_unlock(&ig
.device_list_mutex
);
538 /* if there's no demand for this device, release it */
539 static void iser_device_try_release(struct iser_device
*device
)
541 mutex_lock(&ig
.device_list_mutex
);
543 iser_info("device %p refcount %d\n", device
, device
->refcount
);
544 if (!device
->refcount
) {
545 iser_free_device_ib_res(device
);
546 list_del(&device
->ig_list
);
549 mutex_unlock(&ig
.device_list_mutex
);
553 * Called with state mutex held
555 static int iser_conn_state_comp_exch(struct iser_conn
*iser_conn
,
556 enum iser_conn_state comp
,
557 enum iser_conn_state exch
)
561 ret
= (iser_conn
->state
== comp
);
563 iser_conn
->state
= exch
;
568 void iser_release_work(struct work_struct
*work
)
570 struct iser_conn
*iser_conn
;
572 iser_conn
= container_of(work
, struct iser_conn
, release_work
);
574 /* Wait for conn_stop to complete */
575 wait_for_completion(&iser_conn
->stop_completion
);
576 /* Wait for IB resouces cleanup to complete */
577 wait_for_completion(&iser_conn
->ib_completion
);
579 mutex_lock(&iser_conn
->state_mutex
);
580 iser_conn
->state
= ISER_CONN_DOWN
;
581 mutex_unlock(&iser_conn
->state_mutex
);
583 iser_conn_release(iser_conn
);
587 * iser_free_ib_conn_res - release IB related resources
588 * @iser_conn: iser connection struct
589 * @destroy_device: indicator if we need to try to release
590 * the iser device (only iscsi shutdown and DEVICE_REMOVAL
593 * This routine is called with the iser state mutex held
594 * so the cm_id removal is out of here. It is Safe to
595 * be invoked multiple times.
597 static void iser_free_ib_conn_res(struct iser_conn
*iser_conn
,
600 struct ib_conn
*ib_conn
= &iser_conn
->ib_conn
;
601 struct iser_device
*device
= ib_conn
->device
;
603 iser_info("freeing conn %p cma_id %p qp %p\n",
604 iser_conn
, ib_conn
->cma_id
, ib_conn
->qp
);
606 iser_free_rx_descriptors(iser_conn
);
608 if (ib_conn
->qp
!= NULL
) {
609 ib_conn
->comp
->active_qps
--;
610 rdma_destroy_qp(ib_conn
->cma_id
);
614 if (destroy_device
&& device
!= NULL
) {
615 iser_device_try_release(device
);
616 ib_conn
->device
= NULL
;
621 * Frees all conn objects and deallocs conn descriptor
623 void iser_conn_release(struct iser_conn
*iser_conn
)
625 struct ib_conn
*ib_conn
= &iser_conn
->ib_conn
;
627 mutex_lock(&ig
.connlist_mutex
);
628 list_del(&iser_conn
->conn_list
);
629 mutex_unlock(&ig
.connlist_mutex
);
631 mutex_lock(&iser_conn
->state_mutex
);
632 if (iser_conn
->state
!= ISER_CONN_DOWN
)
633 iser_warn("iser conn %p state %d, expected state down.\n",
634 iser_conn
, iser_conn
->state
);
636 * In case we never got to bind stage, we still need to
637 * release IB resources (which is safe to call more than once).
639 iser_free_ib_conn_res(iser_conn
, true);
640 mutex_unlock(&iser_conn
->state_mutex
);
642 if (ib_conn
->cma_id
!= NULL
) {
643 rdma_destroy_id(ib_conn
->cma_id
);
644 ib_conn
->cma_id
= NULL
;
651 * triggers start of the disconnect procedures and wait for them to be done
652 * Called with state mutex held
654 int iser_conn_terminate(struct iser_conn
*iser_conn
)
656 struct ib_conn
*ib_conn
= &iser_conn
->ib_conn
;
657 struct ib_send_wr
*bad_wr
;
660 /* terminate the iser conn only if the conn state is UP */
661 if (!iser_conn_state_comp_exch(iser_conn
, ISER_CONN_UP
,
662 ISER_CONN_TERMINATING
))
665 iser_info("iser_conn %p state %d\n", iser_conn
, iser_conn
->state
);
667 /* suspend queuing of new iscsi commands */
668 if (iser_conn
->iscsi_conn
)
669 iscsi_suspend_queue(iser_conn
->iscsi_conn
);
672 * In case we didn't already clean up the cma_id (peer initiated
673 * a disconnection), we need to Cause the CMA to change the QP
676 if (ib_conn
->cma_id
) {
677 err
= rdma_disconnect(ib_conn
->cma_id
);
679 iser_err("Failed to disconnect, conn: 0x%p err %d\n",
682 /* post an indication that all flush errors were consumed */
683 err
= ib_post_send(ib_conn
->qp
, &ib_conn
->beacon
, &bad_wr
);
685 iser_err("conn %p failed to post beacon", ib_conn
);
687 wait_for_completion(&ib_conn
->flush_comp
);
694 * Called with state mutex held
696 static void iser_connect_error(struct rdma_cm_id
*cma_id
)
698 struct iser_conn
*iser_conn
;
700 iser_conn
= (struct iser_conn
*)cma_id
->context
;
701 iser_conn
->state
= ISER_CONN_DOWN
;
705 * Called with state mutex held
707 static void iser_addr_handler(struct rdma_cm_id
*cma_id
)
709 struct iser_device
*device
;
710 struct iser_conn
*iser_conn
;
711 struct ib_conn
*ib_conn
;
714 iser_conn
= (struct iser_conn
*)cma_id
->context
;
715 if (iser_conn
->state
!= ISER_CONN_PENDING
)
719 ib_conn
= &iser_conn
->ib_conn
;
720 device
= iser_device_find_by_ib_device(cma_id
);
722 iser_err("device lookup/creation failed\n");
723 iser_connect_error(cma_id
);
727 ib_conn
->device
= device
;
729 /* connection T10-PI support */
730 if (iser_pi_enable
) {
731 if (!(device
->dev_attr
.device_cap_flags
&
732 IB_DEVICE_SIGNATURE_HANDOVER
)) {
733 iser_warn("T10-PI requested but not supported on %s, "
734 "continue without T10-PI\n",
735 ib_conn
->device
->ib_device
->name
);
736 ib_conn
->pi_support
= false;
738 ib_conn
->pi_support
= true;
742 ret
= rdma_resolve_route(cma_id
, 1000);
744 iser_err("resolve route failed: %d\n", ret
);
745 iser_connect_error(cma_id
);
751 * Called with state mutex held
753 static void iser_route_handler(struct rdma_cm_id
*cma_id
)
755 struct rdma_conn_param conn_param
;
757 struct iser_cm_hdr req_hdr
;
758 struct iser_conn
*iser_conn
= (struct iser_conn
*)cma_id
->context
;
759 struct ib_conn
*ib_conn
= &iser_conn
->ib_conn
;
760 struct iser_device
*device
= ib_conn
->device
;
762 if (iser_conn
->state
!= ISER_CONN_PENDING
)
766 ret
= iser_create_ib_conn_res(ib_conn
);
770 memset(&conn_param
, 0, sizeof conn_param
);
771 conn_param
.responder_resources
= device
->dev_attr
.max_qp_rd_atom
;
772 conn_param
.initiator_depth
= 1;
773 conn_param
.retry_count
= 7;
774 conn_param
.rnr_retry_count
= 6;
776 memset(&req_hdr
, 0, sizeof(req_hdr
));
777 req_hdr
.flags
= (ISER_ZBVA_NOT_SUPPORTED
|
778 ISER_SEND_W_INV_NOT_SUPPORTED
);
779 conn_param
.private_data
= (void *)&req_hdr
;
780 conn_param
.private_data_len
= sizeof(struct iser_cm_hdr
);
782 ret
= rdma_connect(cma_id
, &conn_param
);
784 iser_err("failure connecting: %d\n", ret
);
790 iser_connect_error(cma_id
);
793 static void iser_connected_handler(struct rdma_cm_id
*cma_id
)
795 struct iser_conn
*iser_conn
;
796 struct ib_qp_attr attr
;
797 struct ib_qp_init_attr init_attr
;
799 iser_conn
= (struct iser_conn
*)cma_id
->context
;
800 if (iser_conn
->state
!= ISER_CONN_PENDING
)
804 (void)ib_query_qp(cma_id
->qp
, &attr
, ~0, &init_attr
);
805 iser_info("remote qpn:%x my qpn:%x\n", attr
.dest_qp_num
, cma_id
->qp
->qp_num
);
807 iser_conn
->state
= ISER_CONN_UP
;
808 complete(&iser_conn
->up_completion
);
811 static void iser_disconnected_handler(struct rdma_cm_id
*cma_id
)
813 struct iser_conn
*iser_conn
= (struct iser_conn
*)cma_id
->context
;
815 if (iser_conn_terminate(iser_conn
)) {
816 if (iser_conn
->iscsi_conn
)
817 iscsi_conn_failure(iser_conn
->iscsi_conn
,
818 ISCSI_ERR_CONN_FAILED
);
820 iser_err("iscsi_iser connection isn't bound\n");
824 static void iser_cleanup_handler(struct rdma_cm_id
*cma_id
,
827 struct iser_conn
*iser_conn
= (struct iser_conn
*)cma_id
->context
;
830 * We are not guaranteed that we visited disconnected_handler
831 * by now, call it here to be safe that we handle CM drep
834 iser_disconnected_handler(cma_id
);
835 iser_free_ib_conn_res(iser_conn
, destroy_device
);
836 complete(&iser_conn
->ib_completion
);
839 static int iser_cma_handler(struct rdma_cm_id
*cma_id
, struct rdma_cm_event
*event
)
841 struct iser_conn
*iser_conn
;
844 iser_conn
= (struct iser_conn
*)cma_id
->context
;
845 iser_info("event %d status %d conn %p id %p\n",
846 event
->event
, event
->status
, cma_id
->context
, cma_id
);
848 mutex_lock(&iser_conn
->state_mutex
);
849 switch (event
->event
) {
850 case RDMA_CM_EVENT_ADDR_RESOLVED
:
851 iser_addr_handler(cma_id
);
853 case RDMA_CM_EVENT_ROUTE_RESOLVED
:
854 iser_route_handler(cma_id
);
856 case RDMA_CM_EVENT_ESTABLISHED
:
857 iser_connected_handler(cma_id
);
859 case RDMA_CM_EVENT_ADDR_ERROR
:
860 case RDMA_CM_EVENT_ROUTE_ERROR
:
861 case RDMA_CM_EVENT_CONNECT_ERROR
:
862 case RDMA_CM_EVENT_UNREACHABLE
:
863 case RDMA_CM_EVENT_REJECTED
:
864 iser_connect_error(cma_id
);
866 case RDMA_CM_EVENT_DISCONNECTED
:
867 case RDMA_CM_EVENT_ADDR_CHANGE
:
868 iser_disconnected_handler(cma_id
);
870 case RDMA_CM_EVENT_DEVICE_REMOVAL
:
872 * we *must* destroy the device as we cannot rely
873 * on iscsid to be around to initiate error handling.
874 * also implicitly destroy the cma_id.
876 iser_cleanup_handler(cma_id
, true);
877 iser_conn
->ib_conn
.cma_id
= NULL
;
880 case RDMA_CM_EVENT_TIMEWAIT_EXIT
:
881 iser_cleanup_handler(cma_id
, false);
884 iser_err("Unexpected RDMA CM event (%d)\n", event
->event
);
887 mutex_unlock(&iser_conn
->state_mutex
);
892 void iser_conn_init(struct iser_conn
*iser_conn
)
894 iser_conn
->state
= ISER_CONN_INIT
;
895 iser_conn
->ib_conn
.post_recv_buf_count
= 0;
896 init_completion(&iser_conn
->ib_conn
.flush_comp
);
897 init_completion(&iser_conn
->stop_completion
);
898 init_completion(&iser_conn
->ib_completion
);
899 init_completion(&iser_conn
->up_completion
);
900 INIT_LIST_HEAD(&iser_conn
->conn_list
);
901 spin_lock_init(&iser_conn
->ib_conn
.lock
);
902 mutex_init(&iser_conn
->state_mutex
);
906 * starts the process of connecting to the target
907 * sleeps until the connection is established or rejected
909 int iser_connect(struct iser_conn
*iser_conn
,
910 struct sockaddr
*src_addr
,
911 struct sockaddr
*dst_addr
,
914 struct ib_conn
*ib_conn
= &iser_conn
->ib_conn
;
917 mutex_lock(&iser_conn
->state_mutex
);
919 sprintf(iser_conn
->name
, "%pISp", dst_addr
);
921 iser_info("connecting to: %s\n", iser_conn
->name
);
923 /* the device is known only --after-- address resolution */
924 ib_conn
->device
= NULL
;
926 iser_conn
->state
= ISER_CONN_PENDING
;
928 ib_conn
->beacon
.wr_id
= ISER_BEACON_WRID
;
929 ib_conn
->beacon
.opcode
= IB_WR_SEND
;
931 ib_conn
->cma_id
= rdma_create_id(iser_cma_handler
,
933 RDMA_PS_TCP
, IB_QPT_RC
);
934 if (IS_ERR(ib_conn
->cma_id
)) {
935 err
= PTR_ERR(ib_conn
->cma_id
);
936 iser_err("rdma_create_id failed: %d\n", err
);
940 err
= rdma_resolve_addr(ib_conn
->cma_id
, src_addr
, dst_addr
, 1000);
942 iser_err("rdma_resolve_addr failed: %d\n", err
);
947 wait_for_completion_interruptible(&iser_conn
->up_completion
);
949 if (iser_conn
->state
!= ISER_CONN_UP
) {
951 goto connect_failure
;
954 mutex_unlock(&iser_conn
->state_mutex
);
956 mutex_lock(&ig
.connlist_mutex
);
957 list_add(&iser_conn
->conn_list
, &ig
.connlist
);
958 mutex_unlock(&ig
.connlist_mutex
);
962 ib_conn
->cma_id
= NULL
;
964 iser_conn
->state
= ISER_CONN_DOWN
;
966 mutex_unlock(&iser_conn
->state_mutex
);
967 iser_conn_release(iser_conn
);
972 * iser_reg_page_vec - Register physical memory
974 * returns: 0 on success, errno code on failure
976 int iser_reg_page_vec(struct ib_conn
*ib_conn
,
977 struct iser_page_vec
*page_vec
,
978 struct iser_mem_reg
*mem_reg
)
980 struct ib_pool_fmr
*mem
;
985 page_list
= page_vec
->pages
;
986 io_addr
= page_list
[0];
988 mem
= ib_fmr_pool_map_phys(ib_conn
->fmr
.pool
,
994 status
= (int)PTR_ERR(mem
);
995 iser_err("ib_fmr_pool_map_phys failed: %d\n", status
);
999 mem_reg
->lkey
= mem
->fmr
->lkey
;
1000 mem_reg
->rkey
= mem
->fmr
->rkey
;
1001 mem_reg
->len
= page_vec
->length
* SIZE_4K
;
1002 mem_reg
->va
= io_addr
;
1004 mem_reg
->mem_h
= (void *)mem
;
1006 mem_reg
->va
+= page_vec
->offset
;
1007 mem_reg
->len
= page_vec
->data_size
;
1009 iser_dbg("PHYSICAL Mem.register, [PHYS p_array: 0x%p, sz: %d, "
1010 "entry[0]: (0x%08lx,%ld)] -> "
1011 "[lkey: 0x%08X mem_h: 0x%p va: 0x%08lX sz: %ld]\n",
1012 page_vec
, page_vec
->length
,
1013 (unsigned long)page_vec
->pages
[0],
1014 (unsigned long)page_vec
->data_size
,
1015 (unsigned int)mem_reg
->lkey
, mem_reg
->mem_h
,
1016 (unsigned long)mem_reg
->va
, (unsigned long)mem_reg
->len
);
1021 * Unregister (previosuly registered using FMR) memory.
1022 * If memory is non-FMR does nothing.
1024 void iser_unreg_mem_fmr(struct iscsi_iser_task
*iser_task
,
1025 enum iser_data_dir cmd_dir
)
1027 struct iser_mem_reg
*reg
= &iser_task
->rdma_regd
[cmd_dir
].reg
;
1033 iser_dbg("PHYSICAL Mem.Unregister mem_h %p\n",reg
->mem_h
);
1035 ret
= ib_fmr_pool_unmap((struct ib_pool_fmr
*)reg
->mem_h
);
1037 iser_err("ib_fmr_pool_unmap failed %d\n", ret
);
1042 void iser_unreg_mem_fastreg(struct iscsi_iser_task
*iser_task
,
1043 enum iser_data_dir cmd_dir
)
1045 struct iser_mem_reg
*reg
= &iser_task
->rdma_regd
[cmd_dir
].reg
;
1046 struct iser_conn
*iser_conn
= iser_task
->iser_conn
;
1047 struct ib_conn
*ib_conn
= &iser_conn
->ib_conn
;
1048 struct fast_reg_descriptor
*desc
= reg
->mem_h
;
1055 spin_lock_bh(&ib_conn
->lock
);
1056 list_add_tail(&desc
->list
, &ib_conn
->fastreg
.pool
);
1057 spin_unlock_bh(&ib_conn
->lock
);
1060 int iser_post_recvl(struct iser_conn
*iser_conn
)
1062 struct ib_recv_wr rx_wr
, *rx_wr_failed
;
1063 struct ib_conn
*ib_conn
= &iser_conn
->ib_conn
;
1067 sge
.addr
= iser_conn
->login_resp_dma
;
1068 sge
.length
= ISER_RX_LOGIN_SIZE
;
1069 sge
.lkey
= ib_conn
->device
->mr
->lkey
;
1071 rx_wr
.wr_id
= (unsigned long)iser_conn
->login_resp_buf
;
1072 rx_wr
.sg_list
= &sge
;
1076 ib_conn
->post_recv_buf_count
++;
1077 ib_ret
= ib_post_recv(ib_conn
->qp
, &rx_wr
, &rx_wr_failed
);
1079 iser_err("ib_post_recv failed ret=%d\n", ib_ret
);
1080 ib_conn
->post_recv_buf_count
--;
1085 int iser_post_recvm(struct iser_conn
*iser_conn
, int count
)
1087 struct ib_recv_wr
*rx_wr
, *rx_wr_failed
;
1089 struct ib_conn
*ib_conn
= &iser_conn
->ib_conn
;
1090 unsigned int my_rx_head
= iser_conn
->rx_desc_head
;
1091 struct iser_rx_desc
*rx_desc
;
1093 for (rx_wr
= ib_conn
->rx_wr
, i
= 0; i
< count
; i
++, rx_wr
++) {
1094 rx_desc
= &iser_conn
->rx_descs
[my_rx_head
];
1095 rx_wr
->wr_id
= (unsigned long)rx_desc
;
1096 rx_wr
->sg_list
= &rx_desc
->rx_sg
;
1098 rx_wr
->next
= rx_wr
+ 1;
1099 my_rx_head
= (my_rx_head
+ 1) & iser_conn
->qp_max_recv_dtos_mask
;
1103 rx_wr
->next
= NULL
; /* mark end of work requests list */
1105 ib_conn
->post_recv_buf_count
+= count
;
1106 ib_ret
= ib_post_recv(ib_conn
->qp
, ib_conn
->rx_wr
, &rx_wr_failed
);
1108 iser_err("ib_post_recv failed ret=%d\n", ib_ret
);
1109 ib_conn
->post_recv_buf_count
-= count
;
1111 iser_conn
->rx_desc_head
= my_rx_head
;
1117 * iser_start_send - Initiate a Send DTO operation
1119 * returns 0 on success, -1 on failure
1121 int iser_post_send(struct ib_conn
*ib_conn
, struct iser_tx_desc
*tx_desc
,
1125 struct ib_send_wr send_wr
, *send_wr_failed
;
1127 ib_dma_sync_single_for_device(ib_conn
->device
->ib_device
,
1128 tx_desc
->dma_addr
, ISER_HEADERS_LEN
,
1131 send_wr
.next
= NULL
;
1132 send_wr
.wr_id
= (unsigned long)tx_desc
;
1133 send_wr
.sg_list
= tx_desc
->tx_sg
;
1134 send_wr
.num_sge
= tx_desc
->num_sge
;
1135 send_wr
.opcode
= IB_WR_SEND
;
1136 send_wr
.send_flags
= signal
? IB_SEND_SIGNALED
: 0;
1138 ib_ret
= ib_post_send(ib_conn
->qp
, &send_wr
, &send_wr_failed
);
1140 iser_err("ib_post_send failed, ret:%d\n", ib_ret
);
1146 * is_iser_tx_desc - Indicate if the completion wr_id
1147 * is a TX descriptor or not.
1148 * @iser_conn: iser connection
1149 * @wr_id: completion WR identifier
1151 * Since we cannot rely on wc opcode in FLUSH errors
1152 * we must work around it by checking if the wr_id address
1153 * falls in the iser connection rx_descs buffer. If so
1154 * it is an RX descriptor, otherwize it is a TX.
1157 is_iser_tx_desc(struct iser_conn
*iser_conn
, void *wr_id
)
1159 void *start
= iser_conn
->rx_descs
;
1160 int len
= iser_conn
->num_rx_descs
* sizeof(*iser_conn
->rx_descs
);
1162 if (wr_id
>= start
&& wr_id
< start
+ len
)
1169 * iser_handle_comp_error() - Handle error completion
1170 * @ib_conn: connection RDMA resources
1171 * @wc: work completion
1173 * Notes: We may handle a FLUSH error completion and in this case
1174 * we only cleanup in case TX type was DATAOUT. For non-FLUSH
1175 * error completion we should also notify iscsi layer that
1176 * connection is failed (in case we passed bind stage).
1179 iser_handle_comp_error(struct ib_conn
*ib_conn
,
1182 struct iser_conn
*iser_conn
= container_of(ib_conn
, struct iser_conn
,
1185 if (wc
->status
!= IB_WC_WR_FLUSH_ERR
)
1186 if (iser_conn
->iscsi_conn
)
1187 iscsi_conn_failure(iser_conn
->iscsi_conn
,
1188 ISCSI_ERR_CONN_FAILED
);
1190 if (is_iser_tx_desc(iser_conn
, (void *)wc
->wr_id
)) {
1191 struct iser_tx_desc
*desc
= (struct iser_tx_desc
*)wc
->wr_id
;
1193 if (desc
->type
== ISCSI_TX_DATAOUT
)
1194 kmem_cache_free(ig
.desc_cache
, desc
);
1196 ib_conn
->post_recv_buf_count
--;
1201 * iser_handle_wc - handle a single work completion
1202 * @wc: work completion
1204 * Soft-IRQ context, work completion can be either
1205 * SEND or RECV, and can turn out successful or
1206 * with error (or flush error).
1208 static void iser_handle_wc(struct ib_wc
*wc
)
1210 struct ib_conn
*ib_conn
;
1211 struct iser_tx_desc
*tx_desc
;
1212 struct iser_rx_desc
*rx_desc
;
1214 ib_conn
= wc
->qp
->qp_context
;
1215 if (wc
->status
== IB_WC_SUCCESS
) {
1216 if (wc
->opcode
== IB_WC_RECV
) {
1217 rx_desc
= (struct iser_rx_desc
*)wc
->wr_id
;
1218 iser_rcv_completion(rx_desc
, wc
->byte_len
,
1221 if (wc
->opcode
== IB_WC_SEND
) {
1222 tx_desc
= (struct iser_tx_desc
*)wc
->wr_id
;
1223 iser_snd_completion(tx_desc
, ib_conn
);
1225 iser_err("Unknown wc opcode %d\n", wc
->opcode
);
1228 if (wc
->status
!= IB_WC_WR_FLUSH_ERR
)
1229 iser_err("wr id %llx status %d vend_err %x\n",
1230 wc
->wr_id
, wc
->status
, wc
->vendor_err
);
1232 iser_dbg("flush error: wr id %llx\n", wc
->wr_id
);
1234 if (wc
->wr_id
!= ISER_FASTREG_LI_WRID
&&
1235 wc
->wr_id
!= ISER_BEACON_WRID
)
1236 iser_handle_comp_error(ib_conn
, wc
);
1238 /* complete in case all flush errors were consumed */
1239 if (wc
->wr_id
== ISER_BEACON_WRID
)
1240 complete(&ib_conn
->flush_comp
);
1245 * iser_cq_tasklet_fn - iSER completion polling loop
1246 * @data: iSER completion context
1248 * Soft-IRQ context, polling connection CQ until
1249 * either CQ was empty or we exausted polling budget
1251 static void iser_cq_tasklet_fn(unsigned long data
)
1253 struct iser_comp
*comp
= (struct iser_comp
*)data
;
1254 struct ib_cq
*cq
= comp
->cq
;
1255 struct ib_wc
*const wcs
= comp
->wcs
;
1256 int i
, n
, completed
= 0;
1258 while ((n
= ib_poll_cq(cq
, ARRAY_SIZE(comp
->wcs
), wcs
)) > 0) {
1259 for (i
= 0; i
< n
; i
++)
1260 iser_handle_wc(&wcs
[i
]);
1263 if (completed
>= iser_cq_poll_limit
)
1268 * It is assumed here that arming CQ only once its empty
1269 * would not cause interrupts to be missed.
1271 ib_req_notify_cq(cq
, IB_CQ_NEXT_COMP
);
1273 iser_dbg("got %d completions\n", completed
);
1276 static void iser_cq_callback(struct ib_cq
*cq
, void *cq_context
)
1278 struct iser_comp
*comp
= cq_context
;
1280 tasklet_schedule(&comp
->tasklet
);
1283 u8
iser_check_task_pi_status(struct iscsi_iser_task
*iser_task
,
1284 enum iser_data_dir cmd_dir
, sector_t
*sector
)
1286 struct iser_mem_reg
*reg
= &iser_task
->rdma_regd
[cmd_dir
].reg
;
1287 struct fast_reg_descriptor
*desc
= reg
->mem_h
;
1288 unsigned long sector_size
= iser_task
->sc
->device
->sector_size
;
1289 struct ib_mr_status mr_status
;
1292 if (desc
&& desc
->reg_indicators
& ISER_FASTREG_PROTECTED
) {
1293 desc
->reg_indicators
&= ~ISER_FASTREG_PROTECTED
;
1294 ret
= ib_check_mr_status(desc
->pi_ctx
->sig_mr
,
1295 IB_MR_CHECK_SIG_STATUS
, &mr_status
);
1297 pr_err("ib_check_mr_status failed, ret %d\n", ret
);
1301 if (mr_status
.fail_status
& IB_MR_CHECK_SIG_STATUS
) {
1302 sector_t sector_off
= mr_status
.sig_err
.sig_err_offset
;
1304 do_div(sector_off
, sector_size
+ 8);
1305 *sector
= scsi_get_lba(iser_task
->sc
) + sector_off
;
1307 pr_err("PI error found type %d at sector %llx "
1308 "expected %x vs actual %x\n",
1309 mr_status
.sig_err
.err_type
,
1310 (unsigned long long)*sector
,
1311 mr_status
.sig_err
.expected
,
1312 mr_status
.sig_err
.actual
);
1314 switch (mr_status
.sig_err
.err_type
) {
1315 case IB_SIG_BAD_GUARD
:
1317 case IB_SIG_BAD_REFTAG
:
1319 case IB_SIG_BAD_APPTAG
:
1327 /* Not alot we can do here, return ambiguous guard error */