2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * $Id: ib_srp.c 3932 2005-11-01 17:19:29Z roland $
35 #include <linux/module.h>
36 #include <linux/init.h>
37 #include <linux/slab.h>
38 #include <linux/err.h>
39 #include <linux/string.h>
40 #include <linux/parser.h>
41 #include <linux/random.h>
42 #include <linux/jiffies.h>
44 #include <asm/atomic.h>
46 #include <scsi/scsi.h>
47 #include <scsi/scsi_device.h>
48 #include <scsi/scsi_dbg.h>
51 #include <rdma/ib_cache.h>
55 #define DRV_NAME "ib_srp"
56 #define PFX DRV_NAME ": "
57 #define DRV_VERSION "0.2"
58 #define DRV_RELDATE "November 1, 2005"
60 MODULE_AUTHOR("Roland Dreier");
61 MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator "
62 "v" DRV_VERSION
" (" DRV_RELDATE
")");
63 MODULE_LICENSE("Dual BSD/GPL");
65 static int srp_sg_tablesize
= SRP_DEF_SG_TABLESIZE
;
66 static int srp_max_iu_len
;
68 module_param(srp_sg_tablesize
, int, 0444);
69 MODULE_PARM_DESC(srp_sg_tablesize
,
70 "Max number of gather/scatter entries per I/O (default is 12)");
72 static int topspin_workarounds
= 1;
74 module_param(topspin_workarounds
, int, 0444);
75 MODULE_PARM_DESC(topspin_workarounds
,
76 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
78 static const u8 topspin_oui
[3] = { 0x00, 0x05, 0xad };
80 static void srp_add_one(struct ib_device
*device
);
81 static void srp_remove_one(struct ib_device
*device
);
82 static void srp_completion(struct ib_cq
*cq
, void *target_ptr
);
83 static int srp_cm_handler(struct ib_cm_id
*cm_id
, struct ib_cm_event
*event
);
85 static struct ib_client srp_client
= {
88 .remove
= srp_remove_one
91 static inline struct srp_target_port
*host_to_target(struct Scsi_Host
*host
)
93 return (struct srp_target_port
*) host
->hostdata
;
96 static const char *srp_target_info(struct Scsi_Host
*host
)
98 return host_to_target(host
)->target_name
;
101 static struct srp_iu
*srp_alloc_iu(struct srp_host
*host
, size_t size
,
103 enum dma_data_direction direction
)
107 iu
= kmalloc(sizeof *iu
, gfp_mask
);
111 iu
->buf
= kzalloc(size
, gfp_mask
);
115 iu
->dma
= dma_map_single(host
->dev
->dev
->dma_device
,
116 iu
->buf
, size
, direction
);
117 if (dma_mapping_error(iu
->dma
))
121 iu
->direction
= direction
;
133 static void srp_free_iu(struct srp_host
*host
, struct srp_iu
*iu
)
138 dma_unmap_single(host
->dev
->dev
->dma_device
,
139 iu
->dma
, iu
->size
, iu
->direction
);
144 static void srp_qp_event(struct ib_event
*event
, void *context
)
146 printk(KERN_ERR PFX
"QP event %d\n", event
->event
);
149 static int srp_init_qp(struct srp_target_port
*target
,
152 struct ib_qp_attr
*attr
;
155 attr
= kmalloc(sizeof *attr
, GFP_KERNEL
);
159 ret
= ib_find_cached_pkey(target
->srp_host
->dev
->dev
,
160 target
->srp_host
->port
,
161 be16_to_cpu(target
->path
.pkey
),
166 attr
->qp_state
= IB_QPS_INIT
;
167 attr
->qp_access_flags
= (IB_ACCESS_REMOTE_READ
|
168 IB_ACCESS_REMOTE_WRITE
);
169 attr
->port_num
= target
->srp_host
->port
;
171 ret
= ib_modify_qp(qp
, attr
,
182 static int srp_create_target_ib(struct srp_target_port
*target
)
184 struct ib_qp_init_attr
*init_attr
;
187 init_attr
= kzalloc(sizeof *init_attr
, GFP_KERNEL
);
191 target
->cq
= ib_create_cq(target
->srp_host
->dev
->dev
, srp_completion
,
192 NULL
, target
, SRP_CQ_SIZE
);
193 if (IS_ERR(target
->cq
)) {
194 ret
= PTR_ERR(target
->cq
);
198 ib_req_notify_cq(target
->cq
, IB_CQ_NEXT_COMP
);
200 init_attr
->event_handler
= srp_qp_event
;
201 init_attr
->cap
.max_send_wr
= SRP_SQ_SIZE
;
202 init_attr
->cap
.max_recv_wr
= SRP_RQ_SIZE
;
203 init_attr
->cap
.max_recv_sge
= 1;
204 init_attr
->cap
.max_send_sge
= 1;
205 init_attr
->sq_sig_type
= IB_SIGNAL_ALL_WR
;
206 init_attr
->qp_type
= IB_QPT_RC
;
207 init_attr
->send_cq
= target
->cq
;
208 init_attr
->recv_cq
= target
->cq
;
210 target
->qp
= ib_create_qp(target
->srp_host
->dev
->pd
, init_attr
);
211 if (IS_ERR(target
->qp
)) {
212 ret
= PTR_ERR(target
->qp
);
213 ib_destroy_cq(target
->cq
);
217 ret
= srp_init_qp(target
, target
->qp
);
219 ib_destroy_qp(target
->qp
);
220 ib_destroy_cq(target
->cq
);
229 static void srp_free_target_ib(struct srp_target_port
*target
)
233 ib_destroy_qp(target
->qp
);
234 ib_destroy_cq(target
->cq
);
236 for (i
= 0; i
< SRP_RQ_SIZE
; ++i
)
237 srp_free_iu(target
->srp_host
, target
->rx_ring
[i
]);
238 for (i
= 0; i
< SRP_SQ_SIZE
+ 1; ++i
)
239 srp_free_iu(target
->srp_host
, target
->tx_ring
[i
]);
242 static void srp_path_rec_completion(int status
,
243 struct ib_sa_path_rec
*pathrec
,
246 struct srp_target_port
*target
= target_ptr
;
248 target
->status
= status
;
250 printk(KERN_ERR PFX
"Got failed path rec status %d\n", status
);
252 target
->path
= *pathrec
;
253 complete(&target
->done
);
256 static int srp_lookup_path(struct srp_target_port
*target
)
258 target
->path
.numb_path
= 1;
260 init_completion(&target
->done
);
262 target
->path_query_id
= ib_sa_path_rec_get(target
->srp_host
->dev
->dev
,
263 target
->srp_host
->port
,
265 IB_SA_PATH_REC_DGID
|
266 IB_SA_PATH_REC_SGID
|
267 IB_SA_PATH_REC_NUMB_PATH
|
269 SRP_PATH_REC_TIMEOUT_MS
,
271 srp_path_rec_completion
,
272 target
, &target
->path_query
);
273 if (target
->path_query_id
< 0)
274 return target
->path_query_id
;
276 wait_for_completion(&target
->done
);
278 if (target
->status
< 0)
279 printk(KERN_WARNING PFX
"Path record query failed\n");
281 return target
->status
;
284 static int srp_send_req(struct srp_target_port
*target
)
287 struct ib_cm_req_param param
;
288 struct srp_login_req priv
;
292 req
= kzalloc(sizeof *req
, GFP_KERNEL
);
296 req
->param
.primary_path
= &target
->path
;
297 req
->param
.alternate_path
= NULL
;
298 req
->param
.service_id
= target
->service_id
;
299 req
->param
.qp_num
= target
->qp
->qp_num
;
300 req
->param
.qp_type
= target
->qp
->qp_type
;
301 req
->param
.private_data
= &req
->priv
;
302 req
->param
.private_data_len
= sizeof req
->priv
;
303 req
->param
.flow_control
= 1;
305 get_random_bytes(&req
->param
.starting_psn
, 4);
306 req
->param
.starting_psn
&= 0xffffff;
309 * Pick some arbitrary defaults here; we could make these
310 * module parameters if anyone cared about setting them.
312 req
->param
.responder_resources
= 4;
313 req
->param
.remote_cm_response_timeout
= 20;
314 req
->param
.local_cm_response_timeout
= 20;
315 req
->param
.retry_count
= 7;
316 req
->param
.rnr_retry_count
= 7;
317 req
->param
.max_cm_retries
= 15;
319 req
->priv
.opcode
= SRP_LOGIN_REQ
;
321 req
->priv
.req_it_iu_len
= cpu_to_be32(srp_max_iu_len
);
322 req
->priv
.req_buf_fmt
= cpu_to_be16(SRP_BUF_FORMAT_DIRECT
|
323 SRP_BUF_FORMAT_INDIRECT
);
325 * In the published SRP specification (draft rev. 16a), the
326 * port identifier format is 8 bytes of ID extension followed
327 * by 8 bytes of GUID. Older drafts put the two halves in the
328 * opposite order, so that the GUID comes first.
330 * Targets conforming to these obsolete drafts can be
331 * recognized by the I/O Class they report.
333 if (target
->io_class
== SRP_REV10_IB_IO_CLASS
) {
334 memcpy(req
->priv
.initiator_port_id
,
335 target
->srp_host
->initiator_port_id
+ 8, 8);
336 memcpy(req
->priv
.initiator_port_id
+ 8,
337 target
->srp_host
->initiator_port_id
, 8);
338 memcpy(req
->priv
.target_port_id
, &target
->ioc_guid
, 8);
339 memcpy(req
->priv
.target_port_id
+ 8, &target
->id_ext
, 8);
341 memcpy(req
->priv
.initiator_port_id
,
342 target
->srp_host
->initiator_port_id
, 16);
343 memcpy(req
->priv
.target_port_id
, &target
->id_ext
, 8);
344 memcpy(req
->priv
.target_port_id
+ 8, &target
->ioc_guid
, 8);
348 * Topspin/Cisco SRP targets will reject our login unless we
349 * zero out the first 8 bytes of our initiator port ID. The
350 * second 8 bytes must be our local node GUID, but we always
353 if (topspin_workarounds
&& !memcmp(&target
->ioc_guid
, topspin_oui
, 3)) {
354 printk(KERN_DEBUG PFX
"Topspin/Cisco initiator port ID workaround "
355 "activated for target GUID %016llx\n",
356 (unsigned long long) be64_to_cpu(target
->ioc_guid
));
357 memset(req
->priv
.initiator_port_id
, 0, 8);
360 status
= ib_send_cm_req(target
->cm_id
, &req
->param
);
367 static void srp_disconnect_target(struct srp_target_port
*target
)
369 /* XXX should send SRP_I_LOGOUT request */
371 init_completion(&target
->done
);
372 if (ib_send_cm_dreq(target
->cm_id
, NULL
, 0)) {
373 printk(KERN_DEBUG PFX
"Sending CM DREQ failed\n");
376 wait_for_completion(&target
->done
);
379 static void srp_remove_work(void *target_ptr
)
381 struct srp_target_port
*target
= target_ptr
;
383 spin_lock_irq(target
->scsi_host
->host_lock
);
384 if (target
->state
!= SRP_TARGET_DEAD
) {
385 spin_unlock_irq(target
->scsi_host
->host_lock
);
388 target
->state
= SRP_TARGET_REMOVED
;
389 spin_unlock_irq(target
->scsi_host
->host_lock
);
391 spin_lock(&target
->srp_host
->target_lock
);
392 list_del(&target
->list
);
393 spin_unlock(&target
->srp_host
->target_lock
);
395 scsi_remove_host(target
->scsi_host
);
396 ib_destroy_cm_id(target
->cm_id
);
397 srp_free_target_ib(target
);
398 scsi_host_put(target
->scsi_host
);
401 static int srp_connect_target(struct srp_target_port
*target
)
405 ret
= srp_lookup_path(target
);
410 init_completion(&target
->done
);
411 ret
= srp_send_req(target
);
414 wait_for_completion(&target
->done
);
417 * The CM event handling code will set status to
418 * SRP_PORT_REDIRECT if we get a port redirect REJ
419 * back, or SRP_DLID_REDIRECT if we get a lid/qp
422 switch (target
->status
) {
426 case SRP_PORT_REDIRECT
:
427 ret
= srp_lookup_path(target
);
432 case SRP_DLID_REDIRECT
:
436 return target
->status
;
441 static void srp_unmap_data(struct scsi_cmnd
*scmnd
,
442 struct srp_target_port
*target
,
443 struct srp_request
*req
)
445 struct scatterlist
*scat
;
448 if (!scmnd
->request_buffer
||
449 (scmnd
->sc_data_direction
!= DMA_TO_DEVICE
&&
450 scmnd
->sc_data_direction
!= DMA_FROM_DEVICE
))
454 ib_fmr_pool_unmap(req
->fmr
);
459 * This handling of non-SG commands can be killed when the
460 * SCSI midlayer no longer generates non-SG commands.
462 if (likely(scmnd
->use_sg
)) {
463 nents
= scmnd
->use_sg
;
464 scat
= scmnd
->request_buffer
;
467 scat
= &req
->fake_sg
;
470 dma_unmap_sg(target
->srp_host
->dev
->dev
->dma_device
, scat
, nents
,
471 scmnd
->sc_data_direction
);
474 static void srp_remove_req(struct srp_target_port
*target
, struct srp_request
*req
)
476 srp_unmap_data(req
->scmnd
, target
, req
);
477 list_move_tail(&req
->list
, &target
->free_reqs
);
480 static void srp_reset_req(struct srp_target_port
*target
, struct srp_request
*req
)
482 req
->scmnd
->result
= DID_RESET
<< 16;
483 req
->scmnd
->scsi_done(req
->scmnd
);
484 srp_remove_req(target
, req
);
487 static int srp_reconnect_target(struct srp_target_port
*target
)
489 struct ib_cm_id
*new_cm_id
;
490 struct ib_qp_attr qp_attr
;
491 struct srp_request
*req
, *tmp
;
495 spin_lock_irq(target
->scsi_host
->host_lock
);
496 if (target
->state
!= SRP_TARGET_LIVE
) {
497 spin_unlock_irq(target
->scsi_host
->host_lock
);
500 target
->state
= SRP_TARGET_CONNECTING
;
501 spin_unlock_irq(target
->scsi_host
->host_lock
);
503 srp_disconnect_target(target
);
505 * Now get a new local CM ID so that we avoid confusing the
506 * target in case things are really fouled up.
508 new_cm_id
= ib_create_cm_id(target
->srp_host
->dev
->dev
,
509 srp_cm_handler
, target
);
510 if (IS_ERR(new_cm_id
)) {
511 ret
= PTR_ERR(new_cm_id
);
514 ib_destroy_cm_id(target
->cm_id
);
515 target
->cm_id
= new_cm_id
;
517 qp_attr
.qp_state
= IB_QPS_RESET
;
518 ret
= ib_modify_qp(target
->qp
, &qp_attr
, IB_QP_STATE
);
522 ret
= srp_init_qp(target
, target
->qp
);
526 while (ib_poll_cq(target
->cq
, 1, &wc
) > 0)
529 list_for_each_entry_safe(req
, tmp
, &target
->req_queue
, list
)
530 srp_reset_req(target
, req
);
536 ret
= srp_connect_target(target
);
540 spin_lock_irq(target
->scsi_host
->host_lock
);
541 if (target
->state
== SRP_TARGET_CONNECTING
) {
543 target
->state
= SRP_TARGET_LIVE
;
546 spin_unlock_irq(target
->scsi_host
->host_lock
);
551 printk(KERN_ERR PFX
"reconnect failed (%d), removing target port.\n", ret
);
554 * We couldn't reconnect, so kill our target port off.
555 * However, we have to defer the real removal because we might
556 * be in the context of the SCSI error handler now, which
557 * would deadlock if we call scsi_remove_host().
559 spin_lock_irq(target
->scsi_host
->host_lock
);
560 if (target
->state
== SRP_TARGET_CONNECTING
) {
561 target
->state
= SRP_TARGET_DEAD
;
562 INIT_WORK(&target
->work
, srp_remove_work
, target
);
563 schedule_work(&target
->work
);
565 spin_unlock_irq(target
->scsi_host
->host_lock
);
570 static int srp_map_fmr(struct srp_device
*dev
, struct scatterlist
*scat
,
571 int sg_cnt
, struct srp_request
*req
,
572 struct srp_direct_buf
*buf
)
585 for (i
= 0; i
< sg_cnt
; ++i
) {
586 if (sg_dma_address(&scat
[i
]) & ~dev
->fmr_page_mask
) {
592 if ((sg_dma_address(&scat
[i
]) + sg_dma_len(&scat
[i
])) &
593 ~dev
->fmr_page_mask
) {
600 len
+= sg_dma_len(&scat
[i
]);
603 page_cnt
+= len
>> dev
->fmr_page_shift
;
604 if (page_cnt
> SRP_FMR_SIZE
)
607 dma_pages
= kmalloc(sizeof (u64
) * page_cnt
, GFP_ATOMIC
);
612 for (i
= 0; i
< sg_cnt
; ++i
)
613 for (j
= 0; j
< sg_dma_len(&scat
[i
]); j
+= dev
->fmr_page_size
)
614 dma_pages
[page_cnt
++] =
615 (sg_dma_address(&scat
[i
]) & dev
->fmr_page_mask
) + j
;
617 req
->fmr
= ib_fmr_pool_map_phys(dev
->fmr_pool
,
618 dma_pages
, page_cnt
, &io_addr
);
619 if (IS_ERR(req
->fmr
)) {
620 ret
= PTR_ERR(req
->fmr
);
625 buf
->va
= cpu_to_be64(sg_dma_address(&scat
[0]) & ~dev
->fmr_page_mask
);
626 buf
->key
= cpu_to_be32(req
->fmr
->fmr
->rkey
);
627 buf
->len
= cpu_to_be32(len
);
637 static int srp_map_data(struct scsi_cmnd
*scmnd
, struct srp_target_port
*target
,
638 struct srp_request
*req
)
640 struct scatterlist
*scat
;
641 struct srp_cmd
*cmd
= req
->cmd
->buf
;
642 int len
, nents
, count
;
643 u8 fmt
= SRP_DATA_DESC_DIRECT
;
645 if (!scmnd
->request_buffer
|| scmnd
->sc_data_direction
== DMA_NONE
)
646 return sizeof (struct srp_cmd
);
648 if (scmnd
->sc_data_direction
!= DMA_FROM_DEVICE
&&
649 scmnd
->sc_data_direction
!= DMA_TO_DEVICE
) {
650 printk(KERN_WARNING PFX
"Unhandled data direction %d\n",
651 scmnd
->sc_data_direction
);
656 * This handling of non-SG commands can be killed when the
657 * SCSI midlayer no longer generates non-SG commands.
659 if (likely(scmnd
->use_sg
)) {
660 nents
= scmnd
->use_sg
;
661 scat
= scmnd
->request_buffer
;
664 scat
= &req
->fake_sg
;
665 sg_init_one(scat
, scmnd
->request_buffer
, scmnd
->request_bufflen
);
668 count
= dma_map_sg(target
->srp_host
->dev
->dev
->dma_device
,
669 scat
, nents
, scmnd
->sc_data_direction
);
671 fmt
= SRP_DATA_DESC_DIRECT
;
672 len
= sizeof (struct srp_cmd
) + sizeof (struct srp_direct_buf
);
676 * The midlayer only generated a single gather/scatter
677 * entry, or DMA mapping coalesced everything to a
678 * single entry. So a direct descriptor along with
679 * the DMA MR suffices.
681 struct srp_direct_buf
*buf
= (void *) cmd
->add_data
;
683 buf
->va
= cpu_to_be64(sg_dma_address(scat
));
684 buf
->key
= cpu_to_be32(target
->srp_host
->dev
->mr
->rkey
);
685 buf
->len
= cpu_to_be32(sg_dma_len(scat
));
686 } else if (srp_map_fmr(target
->srp_host
->dev
, scat
, count
, req
,
687 (void *) cmd
->add_data
)) {
689 * FMR mapping failed, and the scatterlist has more
690 * than one entry. Generate an indirect memory
693 struct srp_indirect_buf
*buf
= (void *) cmd
->add_data
;
697 fmt
= SRP_DATA_DESC_INDIRECT
;
698 len
= sizeof (struct srp_cmd
) +
699 sizeof (struct srp_indirect_buf
) +
700 count
* sizeof (struct srp_direct_buf
);
702 for (i
= 0; i
< count
; ++i
) {
703 buf
->desc_list
[i
].va
=
704 cpu_to_be64(sg_dma_address(&scat
[i
]));
705 buf
->desc_list
[i
].key
=
706 cpu_to_be32(target
->srp_host
->dev
->mr
->rkey
);
707 buf
->desc_list
[i
].len
=
708 cpu_to_be32(sg_dma_len(&scat
[i
]));
709 datalen
+= sg_dma_len(&scat
[i
]);
712 if (scmnd
->sc_data_direction
== DMA_TO_DEVICE
)
713 cmd
->data_out_desc_cnt
= count
;
715 cmd
->data_in_desc_cnt
= count
;
718 cpu_to_be64(req
->cmd
->dma
+ sizeof *cmd
+ sizeof *buf
);
719 buf
->table_desc
.key
=
720 cpu_to_be32(target
->srp_host
->dev
->mr
->rkey
);
721 buf
->table_desc
.len
=
722 cpu_to_be32(count
* sizeof (struct srp_direct_buf
));
724 buf
->len
= cpu_to_be32(datalen
);
727 if (scmnd
->sc_data_direction
== DMA_TO_DEVICE
)
728 cmd
->buf_fmt
= fmt
<< 4;
735 static void srp_process_rsp(struct srp_target_port
*target
, struct srp_rsp
*rsp
)
737 struct srp_request
*req
;
738 struct scsi_cmnd
*scmnd
;
742 delta
= (s32
) be32_to_cpu(rsp
->req_lim_delta
);
744 spin_lock_irqsave(target
->scsi_host
->host_lock
, flags
);
746 target
->req_lim
+= delta
;
748 req
= &target
->req_ring
[rsp
->tag
& ~SRP_TAG_TSK_MGMT
];
750 if (unlikely(rsp
->tag
& SRP_TAG_TSK_MGMT
)) {
751 if (be32_to_cpu(rsp
->resp_data_len
) < 4)
752 req
->tsk_status
= -1;
754 req
->tsk_status
= rsp
->data
[3];
755 complete(&req
->done
);
759 printk(KERN_ERR
"Null scmnd for RSP w/tag %016llx\n",
760 (unsigned long long) rsp
->tag
);
761 scmnd
->result
= rsp
->status
;
763 if (rsp
->flags
& SRP_RSP_FLAG_SNSVALID
) {
764 memcpy(scmnd
->sense_buffer
, rsp
->data
+
765 be32_to_cpu(rsp
->resp_data_len
),
766 min_t(int, be32_to_cpu(rsp
->sense_data_len
),
767 SCSI_SENSE_BUFFERSIZE
));
770 if (rsp
->flags
& (SRP_RSP_FLAG_DOOVER
| SRP_RSP_FLAG_DOUNDER
))
771 scmnd
->resid
= be32_to_cpu(rsp
->data_out_res_cnt
);
772 else if (rsp
->flags
& (SRP_RSP_FLAG_DIOVER
| SRP_RSP_FLAG_DIUNDER
))
773 scmnd
->resid
= be32_to_cpu(rsp
->data_in_res_cnt
);
775 if (!req
->tsk_mgmt
) {
776 scmnd
->host_scribble
= (void *) -1L;
777 scmnd
->scsi_done(scmnd
);
779 srp_remove_req(target
, req
);
784 spin_unlock_irqrestore(target
->scsi_host
->host_lock
, flags
);
787 static void srp_reconnect_work(void *target_ptr
)
789 struct srp_target_port
*target
= target_ptr
;
791 srp_reconnect_target(target
);
794 static void srp_handle_recv(struct srp_target_port
*target
, struct ib_wc
*wc
)
799 iu
= target
->rx_ring
[wc
->wr_id
& ~SRP_OP_RECV
];
801 dma_sync_single_for_cpu(target
->srp_host
->dev
->dev
->dma_device
, iu
->dma
,
802 target
->max_ti_iu_len
, DMA_FROM_DEVICE
);
804 opcode
= *(u8
*) iu
->buf
;
809 printk(KERN_ERR PFX
"recv completion, opcode 0x%02x\n", opcode
);
811 for (i
= 0; i
< wc
->byte_len
; ++i
) {
813 printk(KERN_ERR
" [%02x] ", i
);
814 printk(" %02x", ((u8
*) iu
->buf
)[i
]);
815 if ((i
+ 1) % 8 == 0)
819 if (wc
->byte_len
% 8)
825 srp_process_rsp(target
, iu
->buf
);
829 /* XXX Handle target logout */
830 printk(KERN_WARNING PFX
"Got target logout request\n");
834 printk(KERN_WARNING PFX
"Unhandled SRP opcode 0x%02x\n", opcode
);
838 dma_sync_single_for_device(target
->srp_host
->dev
->dev
->dma_device
, iu
->dma
,
839 target
->max_ti_iu_len
, DMA_FROM_DEVICE
);
842 static void srp_completion(struct ib_cq
*cq
, void *target_ptr
)
844 struct srp_target_port
*target
= target_ptr
;
848 ib_req_notify_cq(cq
, IB_CQ_NEXT_COMP
);
849 while (ib_poll_cq(cq
, 1, &wc
) > 0) {
851 printk(KERN_ERR PFX
"failed %s status %d\n",
852 wc
.wr_id
& SRP_OP_RECV
? "receive" : "send",
854 spin_lock_irqsave(target
->scsi_host
->host_lock
, flags
);
855 if (target
->state
== SRP_TARGET_LIVE
)
856 schedule_work(&target
->work
);
857 spin_unlock_irqrestore(target
->scsi_host
->host_lock
, flags
);
861 if (wc
.wr_id
& SRP_OP_RECV
)
862 srp_handle_recv(target
, &wc
);
868 static int __srp_post_recv(struct srp_target_port
*target
)
872 struct ib_recv_wr wr
, *bad_wr
;
876 next
= target
->rx_head
& (SRP_RQ_SIZE
- 1);
877 wr
.wr_id
= next
| SRP_OP_RECV
;
878 iu
= target
->rx_ring
[next
];
881 list
.length
= iu
->size
;
882 list
.lkey
= target
->srp_host
->dev
->mr
->lkey
;
888 ret
= ib_post_recv(target
->qp
, &wr
, &bad_wr
);
895 static int srp_post_recv(struct srp_target_port
*target
)
900 spin_lock_irqsave(target
->scsi_host
->host_lock
, flags
);
901 ret
= __srp_post_recv(target
);
902 spin_unlock_irqrestore(target
->scsi_host
->host_lock
, flags
);
908 * Must be called with target->scsi_host->host_lock held to protect
909 * req_lim and tx_head. Lock cannot be dropped between call here and
910 * call to __srp_post_send().
912 static struct srp_iu
*__srp_get_tx_iu(struct srp_target_port
*target
)
914 if (target
->tx_head
- target
->tx_tail
>= SRP_SQ_SIZE
)
917 if (unlikely(target
->req_lim
< 1))
918 ++target
->zero_req_lim
;
920 return target
->tx_ring
[target
->tx_head
& SRP_SQ_SIZE
];
924 * Must be called with target->scsi_host->host_lock held to protect
925 * req_lim and tx_head.
927 static int __srp_post_send(struct srp_target_port
*target
,
928 struct srp_iu
*iu
, int len
)
931 struct ib_send_wr wr
, *bad_wr
;
936 list
.lkey
= target
->srp_host
->dev
->mr
->lkey
;
939 wr
.wr_id
= target
->tx_head
& SRP_SQ_SIZE
;
942 wr
.opcode
= IB_WR_SEND
;
943 wr
.send_flags
= IB_SEND_SIGNALED
;
945 ret
= ib_post_send(target
->qp
, &wr
, &bad_wr
);
955 static int srp_queuecommand(struct scsi_cmnd
*scmnd
,
956 void (*done
)(struct scsi_cmnd
*))
958 struct srp_target_port
*target
= host_to_target(scmnd
->device
->host
);
959 struct srp_request
*req
;
964 if (target
->state
== SRP_TARGET_CONNECTING
)
967 if (target
->state
== SRP_TARGET_DEAD
||
968 target
->state
== SRP_TARGET_REMOVED
) {
969 scmnd
->result
= DID_BAD_TARGET
<< 16;
974 iu
= __srp_get_tx_iu(target
);
978 dma_sync_single_for_cpu(target
->srp_host
->dev
->dev
->dma_device
, iu
->dma
,
979 srp_max_iu_len
, DMA_TO_DEVICE
);
981 req
= list_entry(target
->free_reqs
.next
, struct srp_request
, list
);
983 scmnd
->scsi_done
= done
;
985 scmnd
->host_scribble
= (void *) (long) req
->index
;
988 memset(cmd
, 0, sizeof *cmd
);
990 cmd
->opcode
= SRP_CMD
;
991 cmd
->lun
= cpu_to_be64((u64
) scmnd
->device
->lun
<< 48);
992 cmd
->tag
= req
->index
;
993 memcpy(cmd
->cdb
, scmnd
->cmnd
, scmnd
->cmd_len
);
998 req
->tsk_mgmt
= NULL
;
1000 len
= srp_map_data(scmnd
, target
, req
);
1002 printk(KERN_ERR PFX
"Failed to map data\n");
1006 if (__srp_post_recv(target
)) {
1007 printk(KERN_ERR PFX
"Recv failed\n");
1011 dma_sync_single_for_device(target
->srp_host
->dev
->dev
->dma_device
, iu
->dma
,
1012 srp_max_iu_len
, DMA_TO_DEVICE
);
1014 if (__srp_post_send(target
, iu
, len
)) {
1015 printk(KERN_ERR PFX
"Send failed\n");
1019 list_move_tail(&req
->list
, &target
->req_queue
);
1024 srp_unmap_data(scmnd
, target
, req
);
1027 return SCSI_MLQUEUE_HOST_BUSY
;
1030 static int srp_alloc_iu_bufs(struct srp_target_port
*target
)
1034 for (i
= 0; i
< SRP_RQ_SIZE
; ++i
) {
1035 target
->rx_ring
[i
] = srp_alloc_iu(target
->srp_host
,
1036 target
->max_ti_iu_len
,
1037 GFP_KERNEL
, DMA_FROM_DEVICE
);
1038 if (!target
->rx_ring
[i
])
1042 for (i
= 0; i
< SRP_SQ_SIZE
+ 1; ++i
) {
1043 target
->tx_ring
[i
] = srp_alloc_iu(target
->srp_host
,
1045 GFP_KERNEL
, DMA_TO_DEVICE
);
1046 if (!target
->tx_ring
[i
])
1053 for (i
= 0; i
< SRP_RQ_SIZE
; ++i
) {
1054 srp_free_iu(target
->srp_host
, target
->rx_ring
[i
]);
1055 target
->rx_ring
[i
] = NULL
;
1058 for (i
= 0; i
< SRP_SQ_SIZE
+ 1; ++i
) {
1059 srp_free_iu(target
->srp_host
, target
->tx_ring
[i
]);
1060 target
->tx_ring
[i
] = NULL
;
1066 static void srp_cm_rej_handler(struct ib_cm_id
*cm_id
,
1067 struct ib_cm_event
*event
,
1068 struct srp_target_port
*target
)
1070 struct ib_class_port_info
*cpi
;
1073 switch (event
->param
.rej_rcvd
.reason
) {
1074 case IB_CM_REJ_PORT_CM_REDIRECT
:
1075 cpi
= event
->param
.rej_rcvd
.ari
;
1076 target
->path
.dlid
= cpi
->redirect_lid
;
1077 target
->path
.pkey
= cpi
->redirect_pkey
;
1078 cm_id
->remote_cm_qpn
= be32_to_cpu(cpi
->redirect_qp
) & 0x00ffffff;
1079 memcpy(target
->path
.dgid
.raw
, cpi
->redirect_gid
, 16);
1081 target
->status
= target
->path
.dlid
?
1082 SRP_DLID_REDIRECT
: SRP_PORT_REDIRECT
;
1085 case IB_CM_REJ_PORT_REDIRECT
:
1086 if (topspin_workarounds
&&
1087 !memcmp(&target
->ioc_guid
, topspin_oui
, 3)) {
1089 * Topspin/Cisco SRP gateways incorrectly send
1090 * reject reason code 25 when they mean 24
1093 memcpy(target
->path
.dgid
.raw
,
1094 event
->param
.rej_rcvd
.ari
, 16);
1096 printk(KERN_DEBUG PFX
"Topspin/Cisco redirect to target port GID %016llx%016llx\n",
1097 (unsigned long long) be64_to_cpu(target
->path
.dgid
.global
.subnet_prefix
),
1098 (unsigned long long) be64_to_cpu(target
->path
.dgid
.global
.interface_id
));
1100 target
->status
= SRP_PORT_REDIRECT
;
1102 printk(KERN_WARNING
" REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
1103 target
->status
= -ECONNRESET
;
1107 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID
:
1108 printk(KERN_WARNING
" REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
1109 target
->status
= -ECONNRESET
;
1112 case IB_CM_REJ_CONSUMER_DEFINED
:
1113 opcode
= *(u8
*) event
->private_data
;
1114 if (opcode
== SRP_LOGIN_REJ
) {
1115 struct srp_login_rej
*rej
= event
->private_data
;
1116 u32 reason
= be32_to_cpu(rej
->reason
);
1118 if (reason
== SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE
)
1119 printk(KERN_WARNING PFX
1120 "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
1122 printk(KERN_WARNING PFX
1123 "SRP LOGIN REJECTED, reason 0x%08x\n", reason
);
1125 printk(KERN_WARNING
" REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
1126 " opcode 0x%02x\n", opcode
);
1127 target
->status
= -ECONNRESET
;
1131 printk(KERN_WARNING
" REJ reason 0x%x\n",
1132 event
->param
.rej_rcvd
.reason
);
1133 target
->status
= -ECONNRESET
;
1137 static int srp_cm_handler(struct ib_cm_id
*cm_id
, struct ib_cm_event
*event
)
1139 struct srp_target_port
*target
= cm_id
->context
;
1140 struct ib_qp_attr
*qp_attr
= NULL
;
1145 switch (event
->event
) {
1146 case IB_CM_REQ_ERROR
:
1147 printk(KERN_DEBUG PFX
"Sending CM REQ failed\n");
1149 target
->status
= -ECONNRESET
;
1152 case IB_CM_REP_RECEIVED
:
1154 opcode
= *(u8
*) event
->private_data
;
1156 if (opcode
== SRP_LOGIN_RSP
) {
1157 struct srp_login_rsp
*rsp
= event
->private_data
;
1159 target
->max_ti_iu_len
= be32_to_cpu(rsp
->max_ti_iu_len
);
1160 target
->req_lim
= be32_to_cpu(rsp
->req_lim_delta
);
1162 target
->scsi_host
->can_queue
= min(target
->req_lim
,
1163 target
->scsi_host
->can_queue
);
1165 printk(KERN_WARNING PFX
"Unhandled RSP opcode %#x\n", opcode
);
1166 target
->status
= -ECONNRESET
;
1170 target
->status
= srp_alloc_iu_bufs(target
);
1174 qp_attr
= kmalloc(sizeof *qp_attr
, GFP_KERNEL
);
1176 target
->status
= -ENOMEM
;
1180 qp_attr
->qp_state
= IB_QPS_RTR
;
1181 target
->status
= ib_cm_init_qp_attr(cm_id
, qp_attr
, &attr_mask
);
1185 target
->status
= ib_modify_qp(target
->qp
, qp_attr
, attr_mask
);
1189 target
->status
= srp_post_recv(target
);
1193 qp_attr
->qp_state
= IB_QPS_RTS
;
1194 target
->status
= ib_cm_init_qp_attr(cm_id
, qp_attr
, &attr_mask
);
1198 target
->status
= ib_modify_qp(target
->qp
, qp_attr
, attr_mask
);
1202 target
->status
= ib_send_cm_rtu(cm_id
, NULL
, 0);
1208 case IB_CM_REJ_RECEIVED
:
1209 printk(KERN_DEBUG PFX
"REJ received\n");
1212 srp_cm_rej_handler(cm_id
, event
, target
);
1215 case IB_CM_DREQ_RECEIVED
:
1216 printk(KERN_WARNING PFX
"DREQ received - connection closed\n");
1217 if (ib_send_cm_drep(cm_id
, NULL
, 0))
1218 printk(KERN_ERR PFX
"Sending CM DREP failed\n");
1221 case IB_CM_TIMEWAIT_EXIT
:
1222 printk(KERN_ERR PFX
"connection closed\n");
1228 case IB_CM_MRA_RECEIVED
:
1229 case IB_CM_DREQ_ERROR
:
1230 case IB_CM_DREP_RECEIVED
:
1234 printk(KERN_WARNING PFX
"Unhandled CM event %d\n", event
->event
);
1239 complete(&target
->done
);
1246 static int srp_send_tsk_mgmt(struct srp_target_port
*target
,
1247 struct srp_request
*req
, u8 func
)
1250 struct srp_tsk_mgmt
*tsk_mgmt
;
1252 spin_lock_irq(target
->scsi_host
->host_lock
);
1254 if (target
->state
== SRP_TARGET_DEAD
||
1255 target
->state
== SRP_TARGET_REMOVED
) {
1256 req
->scmnd
->result
= DID_BAD_TARGET
<< 16;
1260 init_completion(&req
->done
);
1262 iu
= __srp_get_tx_iu(target
);
1267 memset(tsk_mgmt
, 0, sizeof *tsk_mgmt
);
1269 tsk_mgmt
->opcode
= SRP_TSK_MGMT
;
1270 tsk_mgmt
->lun
= cpu_to_be64((u64
) req
->scmnd
->device
->lun
<< 48);
1271 tsk_mgmt
->tag
= req
->index
| SRP_TAG_TSK_MGMT
;
1272 tsk_mgmt
->tsk_mgmt_func
= func
;
1273 tsk_mgmt
->task_tag
= req
->index
;
1275 if (__srp_post_send(target
, iu
, sizeof *tsk_mgmt
))
1280 spin_unlock_irq(target
->scsi_host
->host_lock
);
1282 if (!wait_for_completion_timeout(&req
->done
,
1283 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS
)))
1289 spin_unlock_irq(target
->scsi_host
->host_lock
);
1293 static int srp_find_req(struct srp_target_port
*target
,
1294 struct scsi_cmnd
*scmnd
,
1295 struct srp_request
**req
)
1297 if (scmnd
->host_scribble
== (void *) -1L)
1300 *req
= &target
->req_ring
[(long) scmnd
->host_scribble
];
1305 static int srp_abort(struct scsi_cmnd
*scmnd
)
1307 struct srp_target_port
*target
= host_to_target(scmnd
->device
->host
);
1308 struct srp_request
*req
;
1311 printk(KERN_ERR
"SRP abort called\n");
1313 if (srp_find_req(target
, scmnd
, &req
))
1315 if (srp_send_tsk_mgmt(target
, req
, SRP_TSK_ABORT_TASK
))
1318 spin_lock_irq(target
->scsi_host
->host_lock
);
1320 if (req
->cmd_done
) {
1321 srp_remove_req(target
, req
);
1322 scmnd
->scsi_done(scmnd
);
1323 } else if (!req
->tsk_status
) {
1324 srp_remove_req(target
, req
);
1325 scmnd
->result
= DID_ABORT
<< 16;
1329 spin_unlock_irq(target
->scsi_host
->host_lock
);
1334 static int srp_reset_device(struct scsi_cmnd
*scmnd
)
1336 struct srp_target_port
*target
= host_to_target(scmnd
->device
->host
);
1337 struct srp_request
*req
, *tmp
;
1339 printk(KERN_ERR
"SRP reset_device called\n");
1341 if (srp_find_req(target
, scmnd
, &req
))
1343 if (srp_send_tsk_mgmt(target
, req
, SRP_TSK_LUN_RESET
))
1345 if (req
->tsk_status
)
1348 spin_lock_irq(target
->scsi_host
->host_lock
);
1350 list_for_each_entry_safe(req
, tmp
, &target
->req_queue
, list
)
1351 if (req
->scmnd
->device
== scmnd
->device
)
1352 srp_reset_req(target
, req
);
1354 spin_unlock_irq(target
->scsi_host
->host_lock
);
1359 static int srp_reset_host(struct scsi_cmnd
*scmnd
)
1361 struct srp_target_port
*target
= host_to_target(scmnd
->device
->host
);
1364 printk(KERN_ERR PFX
"SRP reset_host called\n");
1366 if (!srp_reconnect_target(target
))
1372 static ssize_t
show_id_ext(struct class_device
*cdev
, char *buf
)
1374 struct srp_target_port
*target
= host_to_target(class_to_shost(cdev
));
1376 if (target
->state
== SRP_TARGET_DEAD
||
1377 target
->state
== SRP_TARGET_REMOVED
)
1380 return sprintf(buf
, "0x%016llx\n",
1381 (unsigned long long) be64_to_cpu(target
->id_ext
));
1384 static ssize_t
show_ioc_guid(struct class_device
*cdev
, char *buf
)
1386 struct srp_target_port
*target
= host_to_target(class_to_shost(cdev
));
1388 if (target
->state
== SRP_TARGET_DEAD
||
1389 target
->state
== SRP_TARGET_REMOVED
)
1392 return sprintf(buf
, "0x%016llx\n",
1393 (unsigned long long) be64_to_cpu(target
->ioc_guid
));
1396 static ssize_t
show_service_id(struct class_device
*cdev
, char *buf
)
1398 struct srp_target_port
*target
= host_to_target(class_to_shost(cdev
));
1400 if (target
->state
== SRP_TARGET_DEAD
||
1401 target
->state
== SRP_TARGET_REMOVED
)
1404 return sprintf(buf
, "0x%016llx\n",
1405 (unsigned long long) be64_to_cpu(target
->service_id
));
1408 static ssize_t
show_pkey(struct class_device
*cdev
, char *buf
)
1410 struct srp_target_port
*target
= host_to_target(class_to_shost(cdev
));
1412 if (target
->state
== SRP_TARGET_DEAD
||
1413 target
->state
== SRP_TARGET_REMOVED
)
1416 return sprintf(buf
, "0x%04x\n", be16_to_cpu(target
->path
.pkey
));
1419 static ssize_t
show_dgid(struct class_device
*cdev
, char *buf
)
1421 struct srp_target_port
*target
= host_to_target(class_to_shost(cdev
));
1423 if (target
->state
== SRP_TARGET_DEAD
||
1424 target
->state
== SRP_TARGET_REMOVED
)
1427 return sprintf(buf
, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
1428 be16_to_cpu(((__be16
*) target
->path
.dgid
.raw
)[0]),
1429 be16_to_cpu(((__be16
*) target
->path
.dgid
.raw
)[1]),
1430 be16_to_cpu(((__be16
*) target
->path
.dgid
.raw
)[2]),
1431 be16_to_cpu(((__be16
*) target
->path
.dgid
.raw
)[3]),
1432 be16_to_cpu(((__be16
*) target
->path
.dgid
.raw
)[4]),
1433 be16_to_cpu(((__be16
*) target
->path
.dgid
.raw
)[5]),
1434 be16_to_cpu(((__be16
*) target
->path
.dgid
.raw
)[6]),
1435 be16_to_cpu(((__be16
*) target
->path
.dgid
.raw
)[7]));
1438 static ssize_t
show_zero_req_lim(struct class_device
*cdev
, char *buf
)
1440 struct srp_target_port
*target
= host_to_target(class_to_shost(cdev
));
1442 if (target
->state
== SRP_TARGET_DEAD
||
1443 target
->state
== SRP_TARGET_REMOVED
)
1446 return sprintf(buf
, "%d\n", target
->zero_req_lim
);
1449 static CLASS_DEVICE_ATTR(id_ext
, S_IRUGO
, show_id_ext
, NULL
);
1450 static CLASS_DEVICE_ATTR(ioc_guid
, S_IRUGO
, show_ioc_guid
, NULL
);
1451 static CLASS_DEVICE_ATTR(service_id
, S_IRUGO
, show_service_id
, NULL
);
1452 static CLASS_DEVICE_ATTR(pkey
, S_IRUGO
, show_pkey
, NULL
);
1453 static CLASS_DEVICE_ATTR(dgid
, S_IRUGO
, show_dgid
, NULL
);
1454 static CLASS_DEVICE_ATTR(zero_req_lim
, S_IRUGO
, show_zero_req_lim
, NULL
);
1456 static struct class_device_attribute
*srp_host_attrs
[] = {
1457 &class_device_attr_id_ext
,
1458 &class_device_attr_ioc_guid
,
1459 &class_device_attr_service_id
,
1460 &class_device_attr_pkey
,
1461 &class_device_attr_dgid
,
1462 &class_device_attr_zero_req_lim
,
1466 static struct scsi_host_template srp_template
= {
1467 .module
= THIS_MODULE
,
1469 .info
= srp_target_info
,
1470 .queuecommand
= srp_queuecommand
,
1471 .eh_abort_handler
= srp_abort
,
1472 .eh_device_reset_handler
= srp_reset_device
,
1473 .eh_host_reset_handler
= srp_reset_host
,
1474 .can_queue
= SRP_SQ_SIZE
,
1476 .cmd_per_lun
= SRP_SQ_SIZE
,
1477 .use_clustering
= ENABLE_CLUSTERING
,
1478 .shost_attrs
= srp_host_attrs
1481 static int srp_add_target(struct srp_host
*host
, struct srp_target_port
*target
)
1483 sprintf(target
->target_name
, "SRP.T10:%016llX",
1484 (unsigned long long) be64_to_cpu(target
->id_ext
));
1486 if (scsi_add_host(target
->scsi_host
, host
->dev
->dev
->dma_device
))
1489 spin_lock(&host
->target_lock
);
1490 list_add_tail(&target
->list
, &host
->target_list
);
1491 spin_unlock(&host
->target_lock
);
1493 target
->state
= SRP_TARGET_LIVE
;
1495 scsi_scan_target(&target
->scsi_host
->shost_gendev
,
1496 0, target
->scsi_id
, SCAN_WILD_CARD
, 0);
1501 static void srp_release_class_dev(struct class_device
*class_dev
)
1503 struct srp_host
*host
=
1504 container_of(class_dev
, struct srp_host
, class_dev
);
1506 complete(&host
->released
);
1509 static struct class srp_class
= {
1510 .name
= "infiniband_srp",
1511 .release
= srp_release_class_dev
1515 * Target ports are added by writing
1517 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
1518 * pkey=<P_Key>,service_id=<service ID>
1520 * to the add_target sysfs attribute.
1524 SRP_OPT_ID_EXT
= 1 << 0,
1525 SRP_OPT_IOC_GUID
= 1 << 1,
1526 SRP_OPT_DGID
= 1 << 2,
1527 SRP_OPT_PKEY
= 1 << 3,
1528 SRP_OPT_SERVICE_ID
= 1 << 4,
1529 SRP_OPT_MAX_SECT
= 1 << 5,
1530 SRP_OPT_MAX_CMD_PER_LUN
= 1 << 6,
1531 SRP_OPT_IO_CLASS
= 1 << 7,
1532 SRP_OPT_ALL
= (SRP_OPT_ID_EXT
|
1536 SRP_OPT_SERVICE_ID
),
1539 static match_table_t srp_opt_tokens
= {
1540 { SRP_OPT_ID_EXT
, "id_ext=%s" },
1541 { SRP_OPT_IOC_GUID
, "ioc_guid=%s" },
1542 { SRP_OPT_DGID
, "dgid=%s" },
1543 { SRP_OPT_PKEY
, "pkey=%x" },
1544 { SRP_OPT_SERVICE_ID
, "service_id=%s" },
1545 { SRP_OPT_MAX_SECT
, "max_sect=%d" },
1546 { SRP_OPT_MAX_CMD_PER_LUN
, "max_cmd_per_lun=%d" },
1547 { SRP_OPT_IO_CLASS
, "io_class=%x" },
1548 { SRP_OPT_ERR
, NULL
}
1551 static int srp_parse_options(const char *buf
, struct srp_target_port
*target
)
1553 char *options
, *sep_opt
;
1556 substring_t args
[MAX_OPT_ARGS
];
1562 options
= kstrdup(buf
, GFP_KERNEL
);
1567 while ((p
= strsep(&sep_opt
, ",")) != NULL
) {
1571 token
= match_token(p
, srp_opt_tokens
, args
);
1575 case SRP_OPT_ID_EXT
:
1576 p
= match_strdup(args
);
1577 target
->id_ext
= cpu_to_be64(simple_strtoull(p
, NULL
, 16));
1581 case SRP_OPT_IOC_GUID
:
1582 p
= match_strdup(args
);
1583 target
->ioc_guid
= cpu_to_be64(simple_strtoull(p
, NULL
, 16));
1588 p
= match_strdup(args
);
1589 if (strlen(p
) != 32) {
1590 printk(KERN_WARNING PFX
"bad dest GID parameter '%s'\n", p
);
1595 for (i
= 0; i
< 16; ++i
) {
1596 strlcpy(dgid
, p
+ i
* 2, 3);
1597 target
->path
.dgid
.raw
[i
] = simple_strtoul(dgid
, NULL
, 16);
1603 if (match_hex(args
, &token
)) {
1604 printk(KERN_WARNING PFX
"bad P_Key parameter '%s'\n", p
);
1607 target
->path
.pkey
= cpu_to_be16(token
);
1610 case SRP_OPT_SERVICE_ID
:
1611 p
= match_strdup(args
);
1612 target
->service_id
= cpu_to_be64(simple_strtoull(p
, NULL
, 16));
1616 case SRP_OPT_MAX_SECT
:
1617 if (match_int(args
, &token
)) {
1618 printk(KERN_WARNING PFX
"bad max sect parameter '%s'\n", p
);
1621 target
->scsi_host
->max_sectors
= token
;
1624 case SRP_OPT_MAX_CMD_PER_LUN
:
1625 if (match_int(args
, &token
)) {
1626 printk(KERN_WARNING PFX
"bad max cmd_per_lun parameter '%s'\n", p
);
1629 target
->scsi_host
->cmd_per_lun
= min(token
, SRP_SQ_SIZE
);
1632 case SRP_OPT_IO_CLASS
:
1633 if (match_hex(args
, &token
)) {
1634 printk(KERN_WARNING PFX
"bad IO class parameter '%s' \n", p
);
1637 if (token
!= SRP_REV10_IB_IO_CLASS
&&
1638 token
!= SRP_REV16A_IB_IO_CLASS
) {
1639 printk(KERN_WARNING PFX
"unknown IO class parameter value"
1640 " %x specified (use %x or %x).\n",
1641 token
, SRP_REV10_IB_IO_CLASS
, SRP_REV16A_IB_IO_CLASS
);
1644 target
->io_class
= token
;
1648 printk(KERN_WARNING PFX
"unknown parameter or missing value "
1649 "'%s' in target creation request\n", p
);
1654 if ((opt_mask
& SRP_OPT_ALL
) == SRP_OPT_ALL
)
1657 for (i
= 0; i
< ARRAY_SIZE(srp_opt_tokens
); ++i
)
1658 if ((srp_opt_tokens
[i
].token
& SRP_OPT_ALL
) &&
1659 !(srp_opt_tokens
[i
].token
& opt_mask
))
1660 printk(KERN_WARNING PFX
"target creation request is "
1661 "missing parameter '%s'\n",
1662 srp_opt_tokens
[i
].pattern
);
1669 static ssize_t
srp_create_target(struct class_device
*class_dev
,
1670 const char *buf
, size_t count
)
1672 struct srp_host
*host
=
1673 container_of(class_dev
, struct srp_host
, class_dev
);
1674 struct Scsi_Host
*target_host
;
1675 struct srp_target_port
*target
;
1679 target_host
= scsi_host_alloc(&srp_template
,
1680 sizeof (struct srp_target_port
));
1684 target_host
->max_lun
= SRP_MAX_LUN
;
1686 target
= host_to_target(target_host
);
1687 memset(target
, 0, sizeof *target
);
1689 target
->io_class
= SRP_REV16A_IB_IO_CLASS
;
1690 target
->scsi_host
= target_host
;
1691 target
->srp_host
= host
;
1693 INIT_WORK(&target
->work
, srp_reconnect_work
, target
);
1695 INIT_LIST_HEAD(&target
->free_reqs
);
1696 INIT_LIST_HEAD(&target
->req_queue
);
1697 for (i
= 0; i
< SRP_SQ_SIZE
; ++i
) {
1698 target
->req_ring
[i
].index
= i
;
1699 list_add_tail(&target
->req_ring
[i
].list
, &target
->free_reqs
);
1702 ret
= srp_parse_options(buf
, target
);
1706 ib_get_cached_gid(host
->dev
->dev
, host
->port
, 0, &target
->path
.sgid
);
1708 printk(KERN_DEBUG PFX
"new target: id_ext %016llx ioc_guid %016llx pkey %04x "
1709 "service_id %016llx dgid %04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
1710 (unsigned long long) be64_to_cpu(target
->id_ext
),
1711 (unsigned long long) be64_to_cpu(target
->ioc_guid
),
1712 be16_to_cpu(target
->path
.pkey
),
1713 (unsigned long long) be64_to_cpu(target
->service_id
),
1714 (int) be16_to_cpu(*(__be16
*) &target
->path
.dgid
.raw
[0]),
1715 (int) be16_to_cpu(*(__be16
*) &target
->path
.dgid
.raw
[2]),
1716 (int) be16_to_cpu(*(__be16
*) &target
->path
.dgid
.raw
[4]),
1717 (int) be16_to_cpu(*(__be16
*) &target
->path
.dgid
.raw
[6]),
1718 (int) be16_to_cpu(*(__be16
*) &target
->path
.dgid
.raw
[8]),
1719 (int) be16_to_cpu(*(__be16
*) &target
->path
.dgid
.raw
[10]),
1720 (int) be16_to_cpu(*(__be16
*) &target
->path
.dgid
.raw
[12]),
1721 (int) be16_to_cpu(*(__be16
*) &target
->path
.dgid
.raw
[14]));
1723 ret
= srp_create_target_ib(target
);
1727 target
->cm_id
= ib_create_cm_id(host
->dev
->dev
, srp_cm_handler
, target
);
1728 if (IS_ERR(target
->cm_id
)) {
1729 ret
= PTR_ERR(target
->cm_id
);
1733 ret
= srp_connect_target(target
);
1735 printk(KERN_ERR PFX
"Connection failed\n");
1739 ret
= srp_add_target(host
, target
);
1741 goto err_disconnect
;
1746 srp_disconnect_target(target
);
1749 ib_destroy_cm_id(target
->cm_id
);
1752 srp_free_target_ib(target
);
1755 scsi_host_put(target_host
);
1760 static CLASS_DEVICE_ATTR(add_target
, S_IWUSR
, NULL
, srp_create_target
);
1762 static ssize_t
show_ibdev(struct class_device
*class_dev
, char *buf
)
1764 struct srp_host
*host
=
1765 container_of(class_dev
, struct srp_host
, class_dev
);
1767 return sprintf(buf
, "%s\n", host
->dev
->dev
->name
);
1770 static CLASS_DEVICE_ATTR(ibdev
, S_IRUGO
, show_ibdev
, NULL
);
1772 static ssize_t
show_port(struct class_device
*class_dev
, char *buf
)
1774 struct srp_host
*host
=
1775 container_of(class_dev
, struct srp_host
, class_dev
);
1777 return sprintf(buf
, "%d\n", host
->port
);
1780 static CLASS_DEVICE_ATTR(port
, S_IRUGO
, show_port
, NULL
);
1782 static struct srp_host
*srp_add_port(struct srp_device
*device
, u8 port
)
1784 struct srp_host
*host
;
1786 host
= kzalloc(sizeof *host
, GFP_KERNEL
);
1790 INIT_LIST_HEAD(&host
->target_list
);
1791 spin_lock_init(&host
->target_lock
);
1792 init_completion(&host
->released
);
1796 host
->initiator_port_id
[7] = port
;
1797 memcpy(host
->initiator_port_id
+ 8, &device
->dev
->node_guid
, 8);
1799 host
->class_dev
.class = &srp_class
;
1800 host
->class_dev
.dev
= device
->dev
->dma_device
;
1801 snprintf(host
->class_dev
.class_id
, BUS_ID_SIZE
, "srp-%s-%d",
1802 device
->dev
->name
, port
);
1804 if (class_device_register(&host
->class_dev
))
1806 if (class_device_create_file(&host
->class_dev
, &class_device_attr_add_target
))
1808 if (class_device_create_file(&host
->class_dev
, &class_device_attr_ibdev
))
1810 if (class_device_create_file(&host
->class_dev
, &class_device_attr_port
))
1816 class_device_unregister(&host
->class_dev
);
1824 static void srp_add_one(struct ib_device
*device
)
1826 struct srp_device
*srp_dev
;
1827 struct ib_device_attr
*dev_attr
;
1828 struct ib_fmr_pool_param fmr_param
;
1829 struct srp_host
*host
;
1832 dev_attr
= kmalloc(sizeof *dev_attr
, GFP_KERNEL
);
1836 if (ib_query_device(device
, dev_attr
)) {
1837 printk(KERN_WARNING PFX
"Query device failed for %s\n",
1842 srp_dev
= kmalloc(sizeof *srp_dev
, GFP_KERNEL
);
1847 * Use the smallest page size supported by the HCA, down to a
1848 * minimum of 512 bytes (which is the smallest sector that a
1849 * SCSI command will ever carry).
1851 srp_dev
->fmr_page_shift
= max(9, ffs(dev_attr
->page_size_cap
) - 1);
1852 srp_dev
->fmr_page_size
= 1 << srp_dev
->fmr_page_shift
;
1853 srp_dev
->fmr_page_mask
= ~((unsigned long) srp_dev
->fmr_page_size
- 1);
1855 INIT_LIST_HEAD(&srp_dev
->dev_list
);
1857 srp_dev
->dev
= device
;
1858 srp_dev
->pd
= ib_alloc_pd(device
);
1859 if (IS_ERR(srp_dev
->pd
))
1862 srp_dev
->mr
= ib_get_dma_mr(srp_dev
->pd
,
1863 IB_ACCESS_LOCAL_WRITE
|
1864 IB_ACCESS_REMOTE_READ
|
1865 IB_ACCESS_REMOTE_WRITE
);
1866 if (IS_ERR(srp_dev
->mr
))
1869 memset(&fmr_param
, 0, sizeof fmr_param
);
1870 fmr_param
.pool_size
= SRP_FMR_POOL_SIZE
;
1871 fmr_param
.dirty_watermark
= SRP_FMR_DIRTY_SIZE
;
1872 fmr_param
.cache
= 1;
1873 fmr_param
.max_pages_per_fmr
= SRP_FMR_SIZE
;
1874 fmr_param
.page_shift
= srp_dev
->fmr_page_shift
;
1875 fmr_param
.access
= (IB_ACCESS_LOCAL_WRITE
|
1876 IB_ACCESS_REMOTE_WRITE
|
1877 IB_ACCESS_REMOTE_READ
);
1879 srp_dev
->fmr_pool
= ib_create_fmr_pool(srp_dev
->pd
, &fmr_param
);
1880 if (IS_ERR(srp_dev
->fmr_pool
))
1881 srp_dev
->fmr_pool
= NULL
;
1883 if (device
->node_type
== IB_NODE_SWITCH
) {
1888 e
= device
->phys_port_cnt
;
1891 for (p
= s
; p
<= e
; ++p
) {
1892 host
= srp_add_port(srp_dev
, p
);
1894 list_add_tail(&host
->list
, &srp_dev
->dev_list
);
1897 ib_set_client_data(device
, &srp_client
, srp_dev
);
1902 ib_dealloc_pd(srp_dev
->pd
);
1911 static void srp_remove_one(struct ib_device
*device
)
1913 struct srp_device
*srp_dev
;
1914 struct srp_host
*host
, *tmp_host
;
1915 LIST_HEAD(target_list
);
1916 struct srp_target_port
*target
, *tmp_target
;
1918 srp_dev
= ib_get_client_data(device
, &srp_client
);
1920 list_for_each_entry_safe(host
, tmp_host
, &srp_dev
->dev_list
, list
) {
1921 class_device_unregister(&host
->class_dev
);
1923 * Wait for the sysfs entry to go away, so that no new
1924 * target ports can be created.
1926 wait_for_completion(&host
->released
);
1929 * Mark all target ports as removed, so we stop queueing
1930 * commands and don't try to reconnect.
1932 spin_lock(&host
->target_lock
);
1933 list_for_each_entry(target
, &host
->target_list
, list
) {
1934 spin_lock_irq(target
->scsi_host
->host_lock
);
1935 target
->state
= SRP_TARGET_REMOVED
;
1936 spin_unlock_irq(target
->scsi_host
->host_lock
);
1938 spin_unlock(&host
->target_lock
);
1941 * Wait for any reconnection tasks that may have
1942 * started before we marked our target ports as
1943 * removed, and any target port removal tasks.
1945 flush_scheduled_work();
1947 list_for_each_entry_safe(target
, tmp_target
,
1948 &host
->target_list
, list
) {
1949 scsi_remove_host(target
->scsi_host
);
1950 srp_disconnect_target(target
);
1951 ib_destroy_cm_id(target
->cm_id
);
1952 srp_free_target_ib(target
);
1953 scsi_host_put(target
->scsi_host
);
1959 if (srp_dev
->fmr_pool
)
1960 ib_destroy_fmr_pool(srp_dev
->fmr_pool
);
1961 ib_dereg_mr(srp_dev
->mr
);
1962 ib_dealloc_pd(srp_dev
->pd
);
1967 static int __init
srp_init_module(void)
1971 srp_template
.sg_tablesize
= srp_sg_tablesize
;
1972 srp_max_iu_len
= (sizeof (struct srp_cmd
) +
1973 sizeof (struct srp_indirect_buf
) +
1974 srp_sg_tablesize
* 16);
1976 ret
= class_register(&srp_class
);
1978 printk(KERN_ERR PFX
"couldn't register class infiniband_srp\n");
1982 ret
= ib_register_client(&srp_client
);
1984 printk(KERN_ERR PFX
"couldn't register IB client\n");
1985 class_unregister(&srp_class
);
1992 static void __exit
srp_cleanup_module(void)
1994 ib_unregister_client(&srp_client
);
1995 class_unregister(&srp_class
);
1998 module_init(srp_init_module
);
1999 module_exit(srp_cleanup_module
);