2 * IBM eServer i/pSeries Virtual SCSI Target Driver
3 * Copyright (C) 2003-2005 Dave Boutcher (boutcher@us.ibm.com) IBM Corp.
4 * Santiago Leon (santil@us.ibm.com) IBM Corp.
5 * Linda Xie (lxie@us.ibm.com) IBM Corp.
7 * Copyright (C) 2005-2006 FUJITA Tomonori <tomof@acm.org>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
24 #include <linux/interrupt.h>
25 #include <linux/module.h>
26 #include <linux/slab.h>
27 #include <scsi/scsi.h>
28 #include <scsi/scsi_host.h>
29 #include <scsi/scsi_transport_srp.h>
30 #include <scsi/scsi_tgt.h>
31 #include <scsi/libsrp.h>
32 #include <asm/hvcall.h>
33 #include <asm/iommu.h>
39 #define INITIAL_SRP_LIMIT 16
40 #define DEFAULT_MAX_SECTORS 256
42 #define TGT_NAME "ibmvstgt"
47 #define h_copy_rdma(l, sa, sb, da, db) \
48 plpar_hcall_norets(H_COPY_RDMA, l, sa, sb, da, db)
49 #define h_send_crq(ua, l, h) \
50 plpar_hcall_norets(H_SEND_CRQ, ua, l, h)
51 #define h_reg_crq(ua, tok, sz)\
52 plpar_hcall_norets(H_REG_CRQ, ua, tok, sz);
53 #define h_free_crq(ua) \
54 plpar_hcall_norets(H_FREE_CRQ, ua);
56 /* tmp - will replace with SCSI logging stuff */
57 #define eprintk(fmt, args...) \
59 printk("%s(%d) " fmt, __func__, __LINE__, ##args); \
61 /* #define dprintk eprintk */
62 #define dprintk(fmt, args...)
65 struct vio_dev
*dma_dev
;
67 struct crq_queue crq_queue
;
68 struct work_struct crq_work
;
72 struct srp_target
*target
;
74 struct srp_rport
*rport
;
77 static struct workqueue_struct
*vtgtd
;
78 static struct scsi_transport_template
*ibmvstgt_transport_template
;
81 * These are fixed for the system and come from the Open Firmware device tree.
82 * We just store them here to save getting them every time.
84 static char system_id
[64] = "";
85 static char partition_name
[97] = "UNKNOWN";
86 static unsigned int partition_number
= -1;
88 static struct vio_port
*target_to_port(struct srp_target
*target
)
90 return (struct vio_port
*) target
->ldata
;
93 static inline union viosrp_iu
*vio_iu(struct iu_entry
*iue
)
95 return (union viosrp_iu
*) (iue
->sbuf
->buf
);
98 static int send_iu(struct iu_entry
*iue
, uint64_t length
, uint8_t format
)
100 struct srp_target
*target
= iue
->target
;
101 struct vio_port
*vport
= target_to_port(target
);
104 struct viosrp_crq cooked
;
108 /* First copy the SRP */
109 rc
= h_copy_rdma(length
, vport
->liobn
, iue
->sbuf
->dma
,
110 vport
->riobn
, iue
->remote_token
);
113 eprintk("Error %ld transferring data\n", rc
);
115 crq
.cooked
.valid
= 0x80;
116 crq
.cooked
.format
= format
;
117 crq
.cooked
.reserved
= 0x00;
118 crq
.cooked
.timeout
= 0x00;
119 crq
.cooked
.IU_length
= length
;
120 crq
.cooked
.IU_data_ptr
= vio_iu(iue
)->srp
.rsp
.tag
;
123 crq
.cooked
.status
= 0x99; /* Just needs to be non-zero */
125 crq
.cooked
.status
= 0x00;
127 rc1
= h_send_crq(vport
->dma_dev
->unit_address
, crq
.raw
[0], crq
.raw
[1]);
130 eprintk("%ld sending response\n", rc1
);
137 #define SRP_RSP_SENSE_DATA_LEN 18
139 static int send_rsp(struct iu_entry
*iue
, struct scsi_cmnd
*sc
,
140 unsigned char status
, unsigned char asc
)
142 union viosrp_iu
*iu
= vio_iu(iue
);
143 uint64_t tag
= iu
->srp
.rsp
.tag
;
145 /* If the linked bit is on and status is good */
146 if (test_bit(V_LINKED
, &iue
->flags
) && (status
== NO_SENSE
))
149 memset(iu
, 0, sizeof(struct srp_rsp
));
150 iu
->srp
.rsp
.opcode
= SRP_RSP
;
151 iu
->srp
.rsp
.req_lim_delta
= 1;
152 iu
->srp
.rsp
.tag
= tag
;
154 if (test_bit(V_DIOVER
, &iue
->flags
))
155 iu
->srp
.rsp
.flags
|= SRP_RSP_FLAG_DIOVER
;
157 iu
->srp
.rsp
.data_in_res_cnt
= 0;
158 iu
->srp
.rsp
.data_out_res_cnt
= 0;
160 iu
->srp
.rsp
.flags
&= ~SRP_RSP_FLAG_RSPVALID
;
162 iu
->srp
.rsp
.resp_data_len
= 0;
163 iu
->srp
.rsp
.status
= status
;
165 uint8_t *sense
= iu
->srp
.rsp
.data
;
168 iu
->srp
.rsp
.flags
|= SRP_RSP_FLAG_SNSVALID
;
169 iu
->srp
.rsp
.sense_data_len
= SCSI_SENSE_BUFFERSIZE
;
170 memcpy(sense
, sc
->sense_buffer
, SCSI_SENSE_BUFFERSIZE
);
172 iu
->srp
.rsp
.status
= SAM_STAT_CHECK_CONDITION
;
173 iu
->srp
.rsp
.flags
|= SRP_RSP_FLAG_SNSVALID
;
174 iu
->srp
.rsp
.sense_data_len
= SRP_RSP_SENSE_DATA_LEN
;
176 /* Valid bit and 'current errors' */
177 sense
[0] = (0x1 << 7 | 0x70);
180 /* Additional sense length */
181 sense
[7] = 0xa; /* 10 bytes */
182 /* Additional sense code */
187 send_iu(iue
, sizeof(iu
->srp
.rsp
) + SRP_RSP_SENSE_DATA_LEN
,
193 static void handle_cmd_queue(struct srp_target
*target
)
195 struct Scsi_Host
*shost
= target
->shost
;
196 struct srp_rport
*rport
= target_to_port(target
)->rport
;
197 struct iu_entry
*iue
;
203 spin_lock_irqsave(&target
->lock
, flags
);
205 list_for_each_entry(iue
, &target
->cmd_queue
, ilist
) {
206 if (!test_and_set_bit(V_FLYING
, &iue
->flags
)) {
207 spin_unlock_irqrestore(&target
->lock
, flags
);
208 cmd
= iue
->sbuf
->buf
;
209 err
= srp_cmd_queue(shost
, cmd
, iue
,
210 (unsigned long)rport
, 0);
212 eprintk("cannot queue cmd %p %d\n", cmd
, err
);
219 spin_unlock_irqrestore(&target
->lock
, flags
);
222 static int ibmvstgt_rdma(struct scsi_cmnd
*sc
, struct scatterlist
*sg
, int nsg
,
223 struct srp_direct_buf
*md
, int nmd
,
224 enum dma_data_direction dir
, unsigned int rest
)
226 struct iu_entry
*iue
= (struct iu_entry
*) sc
->SCp
.ptr
;
227 struct srp_target
*target
= iue
->target
;
228 struct vio_port
*vport
= target_to_port(target
);
231 unsigned int done
= 0;
235 token
= sg_dma_address(sg
+ sidx
);
237 for (i
= 0; i
< nmd
&& rest
; i
++) {
238 unsigned int mdone
, mlen
;
240 mlen
= min(rest
, md
[i
].len
);
241 for (mdone
= 0; mlen
;) {
242 int slen
= min(sg_dma_len(sg
+ sidx
) - soff
, mlen
);
244 if (dir
== DMA_TO_DEVICE
)
245 err
= h_copy_rdma(slen
,
251 err
= h_copy_rdma(slen
,
257 if (err
!= H_SUCCESS
) {
258 eprintk("rdma error %d %d %ld\n", dir
, slen
, err
);
267 if (soff
== sg_dma_len(sg
+ sidx
)) {
270 token
= sg_dma_address(sg
+ sidx
);
273 eprintk("out of sg %p %d %d\n",
285 static int ibmvstgt_cmd_done(struct scsi_cmnd
*sc
,
286 void (*done
)(struct scsi_cmnd
*))
289 struct iu_entry
*iue
= (struct iu_entry
*) sc
->SCp
.ptr
;
290 struct srp_target
*target
= iue
->target
;
293 dprintk("%p %p %x %u\n", iue
, target
, vio_iu(iue
)->srp
.cmd
.cdb
[0],
296 if (scsi_sg_count(sc
))
297 err
= srp_transfer_data(sc
, &vio_iu(iue
)->srp
.cmd
, ibmvstgt_rdma
, 1, 1);
299 spin_lock_irqsave(&target
->lock
, flags
);
300 list_del(&iue
->ilist
);
301 spin_unlock_irqrestore(&target
->lock
, flags
);
303 if (err
|| sc
->result
!= SAM_STAT_GOOD
) {
304 eprintk("operation failed %p %d %x\n",
305 iue
, sc
->result
, vio_iu(iue
)->srp
.cmd
.cdb
[0]);
306 send_rsp(iue
, sc
, HARDWARE_ERROR
, 0x00);
308 send_rsp(iue
, sc
, NO_SENSE
, 0x00);
315 int send_adapter_info(struct iu_entry
*iue
,
316 dma_addr_t remote_buffer
, uint16_t length
)
318 struct srp_target
*target
= iue
->target
;
319 struct vio_port
*vport
= target_to_port(target
);
320 struct Scsi_Host
*shost
= target
->shost
;
321 dma_addr_t data_token
;
322 struct mad_adapter_info_data
*info
;
325 info
= dma_alloc_coherent(target
->dev
, sizeof(*info
), &data_token
,
328 eprintk("bad dma_alloc_coherent %p\n", target
);
332 /* Get remote info */
333 err
= h_copy_rdma(sizeof(*info
), vport
->riobn
, remote_buffer
,
334 vport
->liobn
, data_token
);
335 if (err
== H_SUCCESS
) {
336 dprintk("Client connect: %s (%d)\n",
337 info
->partition_name
, info
->partition_number
);
340 memset(info
, 0, sizeof(*info
));
342 strcpy(info
->srp_version
, "16.a");
343 strncpy(info
->partition_name
, partition_name
,
344 sizeof(info
->partition_name
));
345 info
->partition_number
= partition_number
;
346 info
->mad_version
= 1;
348 info
->port_max_txu
[0] = shost
->hostt
->max_sectors
<< 9;
350 /* Send our info to remote */
351 err
= h_copy_rdma(sizeof(*info
), vport
->liobn
, data_token
,
352 vport
->riobn
, remote_buffer
);
354 dma_free_coherent(target
->dev
, sizeof(*info
), info
, data_token
);
356 if (err
!= H_SUCCESS
) {
357 eprintk("Error sending adapter info %d\n", err
);
364 static void process_login(struct iu_entry
*iue
)
366 union viosrp_iu
*iu
= vio_iu(iue
);
367 struct srp_login_rsp
*rsp
= &iu
->srp
.login_rsp
;
368 uint64_t tag
= iu
->srp
.rsp
.tag
;
369 struct Scsi_Host
*shost
= iue
->target
->shost
;
370 struct srp_target
*target
= host_to_srp_target(shost
);
371 struct vio_port
*vport
= target_to_port(target
);
372 struct srp_rport_identifiers ids
;
374 memset(&ids
, 0, sizeof(ids
));
375 sprintf(ids
.port_id
, "%x", vport
->dma_dev
->unit_address
);
376 ids
.roles
= SRP_RPORT_ROLE_INITIATOR
;
378 vport
->rport
= srp_rport_add(shost
, &ids
);
380 /* TODO handle case that requested size is wrong and
381 * buffer format is wrong
383 memset(iu
, 0, sizeof(struct srp_login_rsp
));
384 rsp
->opcode
= SRP_LOGIN_RSP
;
385 rsp
->req_lim_delta
= INITIAL_SRP_LIMIT
;
387 rsp
->max_it_iu_len
= sizeof(union srp_iu
);
388 rsp
->max_ti_iu_len
= sizeof(union srp_iu
);
389 /* direct and indirect */
390 rsp
->buf_fmt
= SRP_BUF_FORMAT_DIRECT
| SRP_BUF_FORMAT_INDIRECT
;
392 send_iu(iue
, sizeof(*rsp
), VIOSRP_SRP_FORMAT
);
395 static inline void queue_cmd(struct iu_entry
*iue
)
397 struct srp_target
*target
= iue
->target
;
400 spin_lock_irqsave(&target
->lock
, flags
);
401 list_add_tail(&iue
->ilist
, &target
->cmd_queue
);
402 spin_unlock_irqrestore(&target
->lock
, flags
);
405 static int process_tsk_mgmt(struct iu_entry
*iue
)
407 union viosrp_iu
*iu
= vio_iu(iue
);
410 dprintk("%p %u\n", iue
, iu
->srp
.tsk_mgmt
.tsk_mgmt_func
);
412 switch (iu
->srp
.tsk_mgmt
.tsk_mgmt_func
) {
413 case SRP_TSK_ABORT_TASK
:
416 case SRP_TSK_ABORT_TASK_SET
:
419 case SRP_TSK_CLEAR_TASK_SET
:
422 case SRP_TSK_LUN_RESET
:
423 fn
= LOGICAL_UNIT_RESET
;
425 case SRP_TSK_CLEAR_ACA
:
432 scsi_tgt_tsk_mgmt_request(iue
->target
->shost
,
433 (unsigned long)iue
->target
->shost
,
435 iu
->srp
.tsk_mgmt
.task_tag
,
436 (struct scsi_lun
*) &iu
->srp
.tsk_mgmt
.lun
,
439 send_rsp(iue
, NULL
, ILLEGAL_REQUEST
, 0x20);
444 static int process_mad_iu(struct iu_entry
*iue
)
446 union viosrp_iu
*iu
= vio_iu(iue
);
447 struct viosrp_adapter_info
*info
;
448 struct viosrp_host_config
*conf
;
450 switch (iu
->mad
.empty_iu
.common
.type
) {
451 case VIOSRP_EMPTY_IU_TYPE
:
452 eprintk("%s\n", "Unsupported EMPTY MAD IU");
454 case VIOSRP_ERROR_LOG_TYPE
:
455 eprintk("%s\n", "Unsupported ERROR LOG MAD IU");
456 iu
->mad
.error_log
.common
.status
= 1;
457 send_iu(iue
, sizeof(iu
->mad
.error_log
), VIOSRP_MAD_FORMAT
);
459 case VIOSRP_ADAPTER_INFO_TYPE
:
460 info
= &iu
->mad
.adapter_info
;
461 info
->common
.status
= send_adapter_info(iue
, info
->buffer
,
462 info
->common
.length
);
463 send_iu(iue
, sizeof(*info
), VIOSRP_MAD_FORMAT
);
465 case VIOSRP_HOST_CONFIG_TYPE
:
466 conf
= &iu
->mad
.host_config
;
467 conf
->common
.status
= 1;
468 send_iu(iue
, sizeof(*conf
), VIOSRP_MAD_FORMAT
);
471 eprintk("Unknown type %u\n", iu
->srp
.rsp
.opcode
);
477 static int process_srp_iu(struct iu_entry
*iue
)
479 union viosrp_iu
*iu
= vio_iu(iue
);
481 u8 opcode
= iu
->srp
.rsp
.opcode
;
488 done
= process_tsk_mgmt(iue
);
502 eprintk("Unsupported type %u\n", opcode
);
505 eprintk("Unknown type %u\n", opcode
);
511 static void process_iu(struct viosrp_crq
*crq
, struct srp_target
*target
)
513 struct vio_port
*vport
= target_to_port(target
);
514 struct iu_entry
*iue
;
518 iue
= srp_iu_get(target
);
520 eprintk("Error getting IU from pool, %p\n", target
);
524 iue
->remote_token
= crq
->IU_data_ptr
;
526 err
= h_copy_rdma(crq
->IU_length
, vport
->riobn
,
527 iue
->remote_token
, vport
->liobn
, iue
->sbuf
->dma
);
529 if (err
!= H_SUCCESS
) {
530 eprintk("%ld transferring data error %p\n", err
, iue
);
534 if (crq
->format
== VIOSRP_MAD_FORMAT
)
535 done
= process_mad_iu(iue
);
537 done
= process_srp_iu(iue
);
543 static irqreturn_t
ibmvstgt_interrupt(int dummy
, void *data
)
545 struct srp_target
*target
= data
;
546 struct vio_port
*vport
= target_to_port(target
);
548 vio_disable_interrupts(vport
->dma_dev
);
549 queue_work(vtgtd
, &vport
->crq_work
);
554 static int crq_queue_create(struct crq_queue
*queue
, struct srp_target
*target
)
557 struct vio_port
*vport
= target_to_port(target
);
559 queue
->msgs
= (struct viosrp_crq
*) get_zeroed_page(GFP_KERNEL
);
562 queue
->size
= PAGE_SIZE
/ sizeof(*queue
->msgs
);
564 queue
->msg_token
= dma_map_single(target
->dev
, queue
->msgs
,
565 queue
->size
* sizeof(*queue
->msgs
),
568 if (dma_mapping_error(target
->dev
, queue
->msg_token
))
571 err
= h_reg_crq(vport
->dma_dev
->unit_address
, queue
->msg_token
,
574 /* If the adapter was left active for some reason (like kexec)
575 * try freeing and re-registering
577 if (err
== H_RESOURCE
) {
579 err
= h_free_crq(vport
->dma_dev
->unit_address
);
580 } while (err
== H_BUSY
|| H_IS_LONG_BUSY(err
));
582 err
= h_reg_crq(vport
->dma_dev
->unit_address
, queue
->msg_token
,
586 if (err
!= H_SUCCESS
&& err
!= 2) {
587 eprintk("Error 0x%x opening virtual adapter\n", err
);
591 err
= request_irq(vport
->dma_dev
->irq
, &ibmvstgt_interrupt
,
592 IRQF_DISABLED
, "ibmvstgt", target
);
596 vio_enable_interrupts(vport
->dma_dev
);
598 h_send_crq(vport
->dma_dev
->unit_address
, 0xC001000000000000, 0);
601 spin_lock_init(&queue
->lock
);
607 err
= h_free_crq(vport
->dma_dev
->unit_address
);
608 } while (err
== H_BUSY
|| H_IS_LONG_BUSY(err
));
611 dma_unmap_single(target
->dev
, queue
->msg_token
,
612 queue
->size
* sizeof(*queue
->msgs
), DMA_BIDIRECTIONAL
);
614 free_page((unsigned long) queue
->msgs
);
620 static void crq_queue_destroy(struct srp_target
*target
)
622 struct vio_port
*vport
= target_to_port(target
);
623 struct crq_queue
*queue
= &vport
->crq_queue
;
626 free_irq(vport
->dma_dev
->irq
, target
);
628 err
= h_free_crq(vport
->dma_dev
->unit_address
);
629 } while (err
== H_BUSY
|| H_IS_LONG_BUSY(err
));
631 dma_unmap_single(target
->dev
, queue
->msg_token
,
632 queue
->size
* sizeof(*queue
->msgs
), DMA_BIDIRECTIONAL
);
634 free_page((unsigned long) queue
->msgs
);
637 static void process_crq(struct viosrp_crq
*crq
, struct srp_target
*target
)
639 struct vio_port
*vport
= target_to_port(target
);
640 dprintk("%x %x\n", crq
->valid
, crq
->format
);
642 switch (crq
->valid
) {
645 switch (crq
->format
) {
647 h_send_crq(vport
->dma_dev
->unit_address
,
648 0xC002000000000000, 0);
653 eprintk("Unknown format %u\n", crq
->format
);
657 /* transport event */
661 switch (crq
->format
) {
662 case VIOSRP_SRP_FORMAT
:
663 case VIOSRP_MAD_FORMAT
:
664 process_iu(crq
, target
);
666 case VIOSRP_OS400_FORMAT
:
667 case VIOSRP_AIX_FORMAT
:
668 case VIOSRP_LINUX_FORMAT
:
669 case VIOSRP_INLINE_FORMAT
:
670 eprintk("Unsupported format %u\n", crq
->format
);
673 eprintk("Unknown format %u\n", crq
->format
);
677 eprintk("unknown message type 0x%02x!?\n", crq
->valid
);
681 static inline struct viosrp_crq
*next_crq(struct crq_queue
*queue
)
683 struct viosrp_crq
*crq
;
686 spin_lock_irqsave(&queue
->lock
, flags
);
687 crq
= &queue
->msgs
[queue
->cur
];
688 if (crq
->valid
& 0x80) {
689 if (++queue
->cur
== queue
->size
)
693 spin_unlock_irqrestore(&queue
->lock
, flags
);
698 static void handle_crq(struct work_struct
*work
)
700 struct vio_port
*vport
= container_of(work
, struct vio_port
, crq_work
);
701 struct srp_target
*target
= vport
->target
;
702 struct viosrp_crq
*crq
;
706 while ((crq
= next_crq(&vport
->crq_queue
)) != NULL
) {
707 process_crq(crq
, target
);
711 vio_enable_interrupts(vport
->dma_dev
);
713 crq
= next_crq(&vport
->crq_queue
);
715 vio_disable_interrupts(vport
->dma_dev
);
716 process_crq(crq
, target
);
722 handle_cmd_queue(target
);
726 static int ibmvstgt_eh_abort_handler(struct scsi_cmnd
*sc
)
729 struct iu_entry
*iue
= (struct iu_entry
*) sc
->SCp
.ptr
;
730 struct srp_target
*target
= iue
->target
;
732 dprintk("%p %p %x\n", iue
, target
, vio_iu(iue
)->srp
.cmd
.cdb
[0]);
734 spin_lock_irqsave(&target
->lock
, flags
);
735 list_del(&iue
->ilist
);
736 spin_unlock_irqrestore(&target
->lock
, flags
);
743 static int ibmvstgt_tsk_mgmt_response(struct Scsi_Host
*shost
,
744 u64 itn_id
, u64 mid
, int result
)
746 struct iu_entry
*iue
= (struct iu_entry
*) ((void *) mid
);
747 union viosrp_iu
*iu
= vio_iu(iue
);
748 unsigned char status
, asc
;
750 eprintk("%p %d\n", iue
, result
);
754 switch (iu
->srp
.tsk_mgmt
.tsk_mgmt_func
) {
755 case SRP_TSK_ABORT_TASK
:
758 status
= ABORTED_COMMAND
;
764 send_rsp(iue
, NULL
, status
, asc
);
770 static int ibmvstgt_it_nexus_response(struct Scsi_Host
*shost
, u64 itn_id
,
773 struct srp_target
*target
= host_to_srp_target(shost
);
774 struct vio_port
*vport
= target_to_port(target
);
777 eprintk("%p %d\n", shost
, result
);
778 srp_rport_del(vport
->rport
);
784 static ssize_t
system_id_show(struct device
*dev
,
785 struct device_attribute
*attr
, char *buf
)
787 return snprintf(buf
, PAGE_SIZE
, "%s\n", system_id
);
790 static ssize_t
partition_number_show(struct device
*dev
,
791 struct device_attribute
*attr
, char *buf
)
793 return snprintf(buf
, PAGE_SIZE
, "%x\n", partition_number
);
796 static ssize_t
unit_address_show(struct device
*dev
,
797 struct device_attribute
*attr
, char *buf
)
799 struct Scsi_Host
*shost
= class_to_shost(dev
);
800 struct srp_target
*target
= host_to_srp_target(shost
);
801 struct vio_port
*vport
= target_to_port(target
);
802 return snprintf(buf
, PAGE_SIZE
, "%x\n", vport
->dma_dev
->unit_address
);
805 static DEVICE_ATTR(system_id
, S_IRUGO
, system_id_show
, NULL
);
806 static DEVICE_ATTR(partition_number
, S_IRUGO
, partition_number_show
, NULL
);
807 static DEVICE_ATTR(unit_address
, S_IRUGO
, unit_address_show
, NULL
);
809 static struct device_attribute
*ibmvstgt_attrs
[] = {
811 &dev_attr_partition_number
,
812 &dev_attr_unit_address
,
816 static struct scsi_host_template ibmvstgt_sht
= {
818 .module
= THIS_MODULE
,
819 .can_queue
= INITIAL_SRP_LIMIT
,
820 .sg_tablesize
= SG_ALL
,
821 .use_clustering
= DISABLE_CLUSTERING
,
822 .max_sectors
= DEFAULT_MAX_SECTORS
,
823 .transfer_response
= ibmvstgt_cmd_done
,
824 .eh_abort_handler
= ibmvstgt_eh_abort_handler
,
825 .shost_attrs
= ibmvstgt_attrs
,
826 .proc_name
= TGT_NAME
,
827 .supported_mode
= MODE_TARGET
,
830 static int ibmvstgt_probe(struct vio_dev
*dev
, const struct vio_device_id
*id
)
832 struct Scsi_Host
*shost
;
833 struct srp_target
*target
;
834 struct vio_port
*vport
;
835 unsigned int *dma
, dma_size
;
838 vport
= kzalloc(sizeof(struct vio_port
), GFP_KERNEL
);
841 shost
= scsi_host_alloc(&ibmvstgt_sht
, sizeof(struct srp_target
));
844 shost
->transportt
= ibmvstgt_transport_template
;
846 target
= host_to_srp_target(shost
);
847 target
->shost
= shost
;
848 vport
->dma_dev
= dev
;
849 target
->ldata
= vport
;
850 vport
->target
= target
;
851 err
= srp_target_alloc(target
, &dev
->dev
, INITIAL_SRP_LIMIT
,
856 dma
= (unsigned int *) vio_get_attribute(dev
, "ibm,my-dma-window",
858 if (!dma
|| dma_size
!= 40) {
859 eprintk("Couldn't get window property %d\n", dma_size
);
861 goto free_srp_target
;
863 vport
->liobn
= dma
[0];
864 vport
->riobn
= dma
[5];
866 INIT_WORK(&vport
->crq_work
, handle_crq
);
868 err
= scsi_add_host(shost
, target
->dev
);
870 goto free_srp_target
;
872 err
= scsi_tgt_alloc_queue(shost
);
876 err
= crq_queue_create(&vport
->crq_queue
, target
);
882 scsi_tgt_free_queue(shost
);
884 scsi_remove_host(shost
);
886 srp_target_free(target
);
888 scsi_host_put(shost
);
894 static int ibmvstgt_remove(struct vio_dev
*dev
)
896 struct srp_target
*target
= dev_get_drvdata(&dev
->dev
);
897 struct Scsi_Host
*shost
= target
->shost
;
898 struct vio_port
*vport
= target
->ldata
;
900 crq_queue_destroy(target
);
901 srp_remove_host(shost
);
902 scsi_remove_host(shost
);
903 scsi_tgt_free_queue(shost
);
904 srp_target_free(target
);
906 scsi_host_put(shost
);
910 static struct vio_device_id ibmvstgt_device_table
[] = {
911 {"v-scsi-host", "IBM,v-scsi-host"},
915 MODULE_DEVICE_TABLE(vio
, ibmvstgt_device_table
);
917 static struct vio_driver ibmvstgt_driver
= {
918 .id_table
= ibmvstgt_device_table
,
919 .probe
= ibmvstgt_probe
,
920 .remove
= ibmvstgt_remove
,
924 static int get_system_info(void)
926 struct device_node
*rootdn
;
927 const char *id
, *model
, *name
;
928 const unsigned int *num
;
930 rootdn
= of_find_node_by_path("/");
934 model
= of_get_property(rootdn
, "model", NULL
);
935 id
= of_get_property(rootdn
, "system-id", NULL
);
937 snprintf(system_id
, sizeof(system_id
), "%s-%s", model
, id
);
939 name
= of_get_property(rootdn
, "ibm,partition-name", NULL
);
941 strncpy(partition_name
, name
, sizeof(partition_name
));
943 num
= of_get_property(rootdn
, "ibm,partition-no", NULL
);
945 partition_number
= *num
;
951 static struct srp_function_template ibmvstgt_transport_functions
= {
952 .tsk_mgmt_response
= ibmvstgt_tsk_mgmt_response
,
953 .it_nexus_response
= ibmvstgt_it_nexus_response
,
956 static int __init
ibmvstgt_init(void)
960 printk("IBM eServer i/pSeries Virtual SCSI Target Driver\n");
962 ibmvstgt_transport_template
=
963 srp_attach_transport(&ibmvstgt_transport_functions
);
964 if (!ibmvstgt_transport_template
)
967 vtgtd
= create_workqueue("ibmvtgtd");
969 goto release_transport
;
971 err
= get_system_info();
975 err
= vio_register_driver(&ibmvstgt_driver
);
981 destroy_workqueue(vtgtd
);
983 srp_release_transport(ibmvstgt_transport_template
);
987 static void __exit
ibmvstgt_exit(void)
989 printk("Unregister IBM virtual SCSI driver\n");
991 destroy_workqueue(vtgtd
);
992 vio_unregister_driver(&ibmvstgt_driver
);
993 srp_release_transport(ibmvstgt_transport_template
);
996 MODULE_DESCRIPTION("IBM Virtual SCSI Target");
997 MODULE_AUTHOR("Santiago Leon");
998 MODULE_LICENSE("GPL");
1000 module_init(ibmvstgt_init
);
1001 module_exit(ibmvstgt_exit
);