2 * SCSI target kernel/user interface functions
4 * Copyright (C) 2005 FUJITA Tomonori <tomof@acm.org>
5 * Copyright (C) 2005 Mike Christie <michaelc@cs.wisc.edu>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as
9 * published by the Free Software Foundation; either version 2 of the
10 * License, or (at your option) any later version.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
22 #include <linux/miscdevice.h>
23 #include <linux/file.h>
25 #include <scsi/scsi.h>
26 #include <scsi/scsi_cmnd.h>
27 #include <scsi/scsi_device.h>
28 #include <scsi/scsi_host.h>
29 #include <scsi/scsi_tgt.h>
30 #include <scsi/scsi_tgt_if.h>
32 #include <asm/cacheflush.h>
34 #include "scsi_tgt_priv.h"
36 #if TGT_RING_SIZE < PAGE_SIZE
37 # define TGT_RING_SIZE PAGE_SIZE
40 #define TGT_RING_PAGES (TGT_RING_SIZE >> PAGE_SHIFT)
41 #define TGT_EVENT_PER_PAGE (PAGE_SIZE / sizeof(struct tgt_event))
42 #define TGT_MAX_EVENTS (TGT_EVENT_PER_PAGE * TGT_RING_PAGES)
46 unsigned long tr_pages
[TGT_RING_PAGES
];
50 /* tx_ring : kernel->user, rx_ring : user->kernel */
51 static struct tgt_ring tx_ring
, rx_ring
;
52 static DECLARE_WAIT_QUEUE_HEAD(tgt_poll_wait
);
54 static inline void tgt_ring_idx_inc(struct tgt_ring
*ring
)
56 if (ring
->tr_idx
== TGT_MAX_EVENTS
- 1)
62 static struct tgt_event
*tgt_head_event(struct tgt_ring
*ring
, u32 idx
)
66 pidx
= idx
/ TGT_EVENT_PER_PAGE
;
67 off
= idx
% TGT_EVENT_PER_PAGE
;
69 return (struct tgt_event
*)
70 (ring
->tr_pages
[pidx
] + sizeof(struct tgt_event
) * off
);
73 static int tgt_uspace_send_event(u32 type
, struct tgt_event
*p
)
76 struct tgt_ring
*ring
= &tx_ring
;
80 spin_lock_irqsave(&ring
->tr_lock
, flags
);
82 ev
= tgt_head_event(ring
, ring
->tr_idx
);
84 tgt_ring_idx_inc(ring
);
88 spin_unlock_irqrestore(&ring
->tr_lock
, flags
);
93 memcpy(ev
, p
, sizeof(*ev
));
98 flush_dcache_page(virt_to_page(ev
));
100 wake_up_interruptible(&tgt_poll_wait
);
105 int scsi_tgt_uspace_send_cmd(struct scsi_cmnd
*cmd
, struct scsi_lun
*lun
, u64 tag
)
107 struct Scsi_Host
*shost
= scsi_tgt_cmd_to_host(cmd
);
111 memset(&ev
, 0, sizeof(ev
));
112 ev
.p
.cmd_req
.host_no
= shost
->host_no
;
113 ev
.p
.cmd_req
.data_len
= cmd
->request_bufflen
;
114 memcpy(ev
.p
.cmd_req
.scb
, cmd
->cmnd
, sizeof(ev
.p
.cmd_req
.scb
));
115 memcpy(ev
.p
.cmd_req
.lun
, lun
, sizeof(ev
.p
.cmd_req
.lun
));
116 ev
.p
.cmd_req
.attribute
= cmd
->tag
;
117 ev
.p
.cmd_req
.tag
= tag
;
119 dprintk("%p %d %u %x %llx\n", cmd
, shost
->host_no
,
120 ev
.p
.cmd_req
.data_len
, cmd
->tag
,
121 (unsigned long long) ev
.p
.cmd_req
.tag
);
123 err
= tgt_uspace_send_event(TGT_KEVENT_CMD_REQ
, &ev
);
125 eprintk("tx buf is full, could not send\n");
130 int scsi_tgt_uspace_send_status(struct scsi_cmnd
*cmd
, u64 tag
)
132 struct Scsi_Host
*shost
= scsi_tgt_cmd_to_host(cmd
);
136 memset(&ev
, 0, sizeof(ev
));
137 ev
.p
.cmd_done
.host_no
= shost
->host_no
;
138 ev
.p
.cmd_done
.tag
= tag
;
139 ev
.p
.cmd_done
.result
= cmd
->result
;
141 dprintk("%p %d %llu %u %x\n", cmd
, shost
->host_no
,
142 (unsigned long long) ev
.p
.cmd_req
.tag
,
143 ev
.p
.cmd_req
.data_len
, cmd
->tag
);
145 err
= tgt_uspace_send_event(TGT_KEVENT_CMD_DONE
, &ev
);
147 eprintk("tx buf is full, could not send\n");
152 int scsi_tgt_uspace_send_tsk_mgmt(int host_no
, int function
, u64 tag
,
153 struct scsi_lun
*scsilun
, void *data
)
158 memset(&ev
, 0, sizeof(ev
));
159 ev
.p
.tsk_mgmt_req
.host_no
= host_no
;
160 ev
.p
.tsk_mgmt_req
.function
= function
;
161 ev
.p
.tsk_mgmt_req
.tag
= tag
;
162 memcpy(ev
.p
.tsk_mgmt_req
.lun
, scsilun
, sizeof(ev
.p
.tsk_mgmt_req
.lun
));
163 ev
.p
.tsk_mgmt_req
.mid
= (u64
) (unsigned long) data
;
165 dprintk("%d %x %llx %llx\n", host_no
, function
, (unsigned long long) tag
,
166 (unsigned long long) ev
.p
.tsk_mgmt_req
.mid
);
168 err
= tgt_uspace_send_event(TGT_KEVENT_TSK_MGMT_REQ
, &ev
);
170 eprintk("tx buf is full, could not send\n");
175 static int event_recv_msg(struct tgt_event
*ev
)
179 switch (ev
->hdr
.type
) {
180 case TGT_UEVENT_CMD_RSP
:
181 err
= scsi_tgt_kspace_exec(ev
->p
.cmd_rsp
.host_no
,
182 ev
->p
.cmd_rsp
.result
,
186 ev
->p
.cmd_rsp
.sense_uaddr
,
187 ev
->p
.cmd_rsp
.sense_len
,
190 case TGT_UEVENT_TSK_MGMT_RSP
:
191 err
= scsi_tgt_kspace_tsk_mgmt(ev
->p
.tsk_mgmt_rsp
.host_no
,
192 ev
->p
.tsk_mgmt_rsp
.mid
,
193 ev
->p
.tsk_mgmt_rsp
.result
);
196 eprintk("unknown type %d\n", ev
->hdr
.type
);
203 static ssize_t
tgt_write(struct file
*file
, const char __user
* buffer
,
204 size_t count
, loff_t
* ppos
)
206 struct tgt_event
*ev
;
207 struct tgt_ring
*ring
= &rx_ring
;
210 ev
= tgt_head_event(ring
, ring
->tr_idx
);
211 /* do we need this? */
212 flush_dcache_page(virt_to_page(ev
));
217 tgt_ring_idx_inc(ring
);
225 static unsigned int tgt_poll(struct file
* file
, struct poll_table_struct
*wait
)
227 struct tgt_event
*ev
;
228 struct tgt_ring
*ring
= &tx_ring
;
230 unsigned int mask
= 0;
233 poll_wait(file
, &tgt_poll_wait
, wait
);
235 spin_lock_irqsave(&ring
->tr_lock
, flags
);
237 idx
= ring
->tr_idx
? ring
->tr_idx
- 1 : TGT_MAX_EVENTS
- 1;
238 ev
= tgt_head_event(ring
, idx
);
240 mask
|= POLLIN
| POLLRDNORM
;
242 spin_unlock_irqrestore(&ring
->tr_lock
, flags
);
247 static int uspace_ring_map(struct vm_area_struct
*vma
, unsigned long addr
,
248 struct tgt_ring
*ring
)
252 for (i
= 0; i
< TGT_RING_PAGES
; i
++) {
253 struct page
*page
= virt_to_page(ring
->tr_pages
[i
]);
254 err
= vm_insert_page(vma
, addr
, page
);
263 static int tgt_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
271 if (vma
->vm_end
- vma
->vm_start
!= TGT_RING_SIZE
* 2) {
272 eprintk("mmap size must be %lu, not %lu \n",
273 TGT_RING_SIZE
* 2, vma
->vm_end
- vma
->vm_start
);
277 addr
= vma
->vm_start
;
278 err
= uspace_ring_map(vma
, addr
, &tx_ring
);
281 err
= uspace_ring_map(vma
, addr
+ TGT_RING_SIZE
, &rx_ring
);
286 static int tgt_open(struct inode
*inode
, struct file
*file
)
288 tx_ring
.tr_idx
= rx_ring
.tr_idx
= 0;
293 static const struct file_operations tgt_fops
= {
294 .owner
= THIS_MODULE
,
301 static struct miscdevice tgt_miscdev
= {
302 .minor
= MISC_DYNAMIC_MINOR
,
307 static void tgt_ring_exit(struct tgt_ring
*ring
)
311 for (i
= 0; i
< TGT_RING_PAGES
; i
++)
312 free_page(ring
->tr_pages
[i
]);
315 static int tgt_ring_init(struct tgt_ring
*ring
)
319 spin_lock_init(&ring
->tr_lock
);
321 for (i
= 0; i
< TGT_RING_PAGES
; i
++) {
322 ring
->tr_pages
[i
] = get_zeroed_page(GFP_KERNEL
);
323 if (!ring
->tr_pages
[i
]) {
324 eprintk("out of memory\n");
332 void scsi_tgt_if_exit(void)
334 tgt_ring_exit(&tx_ring
);
335 tgt_ring_exit(&rx_ring
);
336 misc_deregister(&tgt_miscdev
);
339 int scsi_tgt_if_init(void)
343 err
= tgt_ring_init(&tx_ring
);
347 err
= tgt_ring_init(&rx_ring
);
351 err
= misc_register(&tgt_miscdev
);
357 tgt_ring_exit(&rx_ring
);
359 tgt_ring_exit(&tx_ring
);