3 * Started: Aug 9 by Lawrence Foard (entropy@world.std.com),
4 * to allow user process control of SCSI devices.
5 * Development Sponsored by Killy Corp. NY NY
7 * Original driver (sg.c):
8 * Copyright (C) 1992 Lawrence Foard
9 * Version 2 and 3 extensions to driver:
10 * Copyright (C) 1998 - 2005 Douglas Gilbert
12 * Modified 19-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Devfs support
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
21 static int sg_version_num
= 30533; /* 2 digits for each component */
22 #define SG_VERSION_STR "3.5.33"
25 * D. P. Gilbert (dgilbert@interlog.com, dougg@triode.net.au), notes:
26 * - scsi logging is available via SCSI_LOG_TIMEOUT macros. First
27 * the kernel/module needs to be built with CONFIG_SCSI_LOGGING
28 * (otherwise the macros compile to empty statements).
31 #include <linux/config.h>
32 #include <linux/module.h>
35 #include <linux/kernel.h>
36 #include <linux/sched.h>
37 #include <linux/string.h>
39 #include <linux/errno.h>
40 #include <linux/mtio.h>
41 #include <linux/ioctl.h>
42 #include <linux/fcntl.h>
43 #include <linux/init.h>
44 #include <linux/poll.h>
45 #include <linux/smp_lock.h>
46 #include <linux/moduleparam.h>
47 #include <linux/cdev.h>
48 #include <linux/seq_file.h>
49 #include <linux/blkdev.h>
50 #include <linux/delay.h>
51 #include <linux/scatterlist.h>
54 #include <scsi/scsi_dbg.h>
55 #include <scsi/scsi_host.h>
56 #include <scsi/scsi_driver.h>
57 #include <scsi/scsi_ioctl.h>
60 #include "scsi_logging.h"
62 #ifdef CONFIG_SCSI_PROC_FS
63 #include <linux/proc_fs.h>
64 static char *sg_version_date
= "20050908";
66 static int sg_proc_init(void);
67 static void sg_proc_cleanup(void);
70 #define SG_ALLOW_DIO_DEF 0
71 #define SG_ALLOW_DIO_CODE /* compile out by commenting this define */
73 #define SG_MAX_DEVS 32768
76 * Suppose you want to calculate the formula muldiv(x,m,d)=int(x * m / d)
77 * Then when using 32 bit integers x * m may overflow during the calculation.
78 * Replacing muldiv(x) by muldiv(x)=((x % d) * m) / d + int(x / d) * m
79 * calculates the same, but prevents the overflow when both m and d
80 * are "small" numbers (like HZ and USER_HZ).
81 * Of course an overflow is inavoidable if the result of muldiv doesn't fit
84 #define MULDIV(X,MUL,DIV) ((((X % DIV) * MUL) / DIV) + ((X / DIV) * MUL))
86 #define SG_DEFAULT_TIMEOUT MULDIV(SG_DEFAULT_TIMEOUT_USER, HZ, USER_HZ)
88 int sg_big_buff
= SG_DEF_RESERVED_SIZE
;
89 /* N.B. This variable is readable and writeable via
90 /proc/scsi/sg/def_reserved_size . Each time sg_open() is called a buffer
91 of this size (or less if there is not enough memory) will be reserved
92 for use by this file descriptor. [Deprecated usage: this variable is also
93 readable via /proc/sys/kernel/sg-big-buff if the sg driver is built into
94 the kernel (i.e. it is not a module).] */
95 static int def_reserved_size
= -1; /* picks up init parameter */
96 static int sg_allow_dio
= SG_ALLOW_DIO_DEF
;
98 #define SG_SECTOR_SZ 512
99 #define SG_SECTOR_MSK (SG_SECTOR_SZ - 1)
101 #define SG_DEV_ARR_LUMP 32 /* amount to over allocate sg_dev_arr by */
103 static int sg_add(struct class_device
*, struct class_interface
*);
104 static void sg_remove(struct class_device
*, struct class_interface
*);
106 static DEFINE_RWLOCK(sg_dev_arr_lock
); /* Also used to lock
107 file descriptor list for device */
109 static struct class_interface sg_interface
= {
114 typedef struct sg_scatter_hold
{ /* holding area for scsi scatter gather info */
115 unsigned short k_use_sg
; /* Count of kernel scatter-gather pieces */
116 unsigned short sglist_len
; /* size of malloc'd scatter-gather list ++ */
117 unsigned bufflen
; /* Size of (aggregate) data buffer */
118 unsigned b_malloc_len
; /* actual len malloc'ed in buffer */
119 struct scatterlist
*buffer
;/* scatter list */
120 char dio_in_use
; /* 0->indirect IO (or mmap), 1->dio */
121 unsigned char cmd_opcode
; /* first byte of command */
124 struct sg_device
; /* forward declarations */
127 typedef struct sg_request
{ /* SG_MAX_QUEUE requests outstanding per file */
128 struct sg_request
*nextrp
; /* NULL -> tail request (slist) */
129 struct sg_fd
*parentfp
; /* NULL -> not in use */
130 Sg_scatter_hold data
; /* hold buffer, perhaps scatter list */
131 sg_io_hdr_t header
; /* scsi command+info, see <scsi/sg.h> */
132 unsigned char sense_b
[SCSI_SENSE_BUFFERSIZE
];
133 char res_used
; /* 1 -> using reserve buffer, 0 -> not ... */
134 char orphan
; /* 1 -> drop on sight, 0 -> normal */
135 char sg_io_owned
; /* 1 -> packet belongs to SG_IO */
136 volatile char done
; /* 0->before bh, 1->before read, 2->read */
139 typedef struct sg_fd
{ /* holds the state of a file descriptor */
140 struct sg_fd
*nextfp
; /* NULL when last opened fd on this device */
141 struct sg_device
*parentdp
; /* owning device */
142 wait_queue_head_t read_wait
; /* queue read until command done */
143 rwlock_t rq_list_lock
; /* protect access to list in req_arr */
144 int timeout
; /* defaults to SG_DEFAULT_TIMEOUT */
145 int timeout_user
; /* defaults to SG_DEFAULT_TIMEOUT_USER */
146 Sg_scatter_hold reserve
; /* buffer held for this file descriptor */
147 unsigned save_scat_len
; /* original length of trunc. scat. element */
148 Sg_request
*headrp
; /* head of request slist, NULL->empty */
149 struct fasync_struct
*async_qp
; /* used by asynchronous notification */
150 Sg_request req_arr
[SG_MAX_QUEUE
]; /* used as singly-linked list */
151 char low_dma
; /* as in parent but possibly overridden to 1 */
152 char force_packid
; /* 1 -> pack_id input to read(), 0 -> ignored */
153 volatile char closed
; /* 1 -> fd closed but request(s) outstanding */
154 char cmd_q
; /* 1 -> allow command queuing, 0 -> don't */
155 char next_cmd_len
; /* 0 -> automatic (def), >0 -> use on next write() */
156 char keep_orphan
; /* 0 -> drop orphan (def), 1 -> keep for read() */
157 char mmap_called
; /* 0 -> mmap() never called on this fd */
160 typedef struct sg_device
{ /* holds the state of each scsi generic device */
161 struct scsi_device
*device
;
162 wait_queue_head_t o_excl_wait
; /* queue open() when O_EXCL in use */
163 int sg_tablesize
; /* adapter's max scatter-gather table size */
164 Sg_fd
*headfp
; /* first open fd belonging to this device */
165 volatile char detached
; /* 0->attached, 1->detached pending removal */
166 volatile char exclude
; /* opened for exclusive access */
167 char sgdebug
; /* 0->off, 1->sense, 9->dump dev, 10-> all devs */
168 struct gendisk
*disk
;
169 struct cdev
* cdev
; /* char_dev [sysfs: /sys/cdev/major/sg<n>] */
172 static int sg_fasync(int fd
, struct file
*filp
, int mode
);
173 /* tasklet or soft irq callback */
174 static void sg_cmd_done(void *data
, char *sense
, int result
, int resid
);
175 static int sg_start_req(Sg_request
* srp
);
176 static void sg_finish_rem_req(Sg_request
* srp
);
177 static int sg_build_indirect(Sg_scatter_hold
* schp
, Sg_fd
* sfp
, int buff_size
);
178 static int sg_build_sgat(Sg_scatter_hold
* schp
, const Sg_fd
* sfp
,
180 static ssize_t
sg_new_read(Sg_fd
* sfp
, char __user
*buf
, size_t count
,
182 static ssize_t
sg_new_write(Sg_fd
* sfp
, const char __user
*buf
, size_t count
,
183 int blocking
, int read_only
, Sg_request
** o_srp
);
184 static int sg_common_write(Sg_fd
* sfp
, Sg_request
* srp
,
185 unsigned char *cmnd
, int timeout
, int blocking
);
186 static int sg_u_iovec(sg_io_hdr_t
* hp
, int sg_num
, int ind
,
187 int wr_xf
, int *countp
, unsigned char __user
**up
);
188 static int sg_write_xfer(Sg_request
* srp
);
189 static int sg_read_xfer(Sg_request
* srp
);
190 static int sg_read_oxfer(Sg_request
* srp
, char __user
*outp
, int num_read_xfer
);
191 static void sg_remove_scat(Sg_scatter_hold
* schp
);
192 static void sg_build_reserve(Sg_fd
* sfp
, int req_size
);
193 static void sg_link_reserve(Sg_fd
* sfp
, Sg_request
* srp
, int size
);
194 static void sg_unlink_reserve(Sg_fd
* sfp
, Sg_request
* srp
);
195 static struct page
*sg_page_malloc(int rqSz
, int lowDma
, int *retSzp
);
196 static void sg_page_free(struct page
*page
, int size
);
197 static Sg_fd
*sg_add_sfp(Sg_device
* sdp
, int dev
);
198 static int sg_remove_sfp(Sg_device
* sdp
, Sg_fd
* sfp
);
199 static void __sg_remove_sfp(Sg_device
* sdp
, Sg_fd
* sfp
);
200 static Sg_request
*sg_get_rq_mark(Sg_fd
* sfp
, int pack_id
);
201 static Sg_request
*sg_add_request(Sg_fd
* sfp
);
202 static int sg_remove_request(Sg_fd
* sfp
, Sg_request
* srp
);
203 static int sg_res_in_use(Sg_fd
* sfp
);
204 static int sg_allow_access(unsigned char opcode
, char dev_type
);
205 static int sg_build_direct(Sg_request
* srp
, Sg_fd
* sfp
, int dxfer_len
);
206 static Sg_device
*sg_get_dev(int dev
);
207 #ifdef CONFIG_SCSI_PROC_FS
208 static int sg_last_dev(void);
211 static Sg_device
**sg_dev_arr
= NULL
;
212 static int sg_dev_max
;
213 static int sg_nr_dev
;
215 #define SZ_SG_HEADER sizeof(struct sg_header)
216 #define SZ_SG_IO_HDR sizeof(sg_io_hdr_t)
217 #define SZ_SG_IOVEC sizeof(sg_iovec_t)
218 #define SZ_SG_REQ_INFO sizeof(sg_req_info_t)
221 sg_open(struct inode
*inode
, struct file
*filp
)
223 int dev
= iminor(inode
);
224 int flags
= filp
->f_flags
;
225 struct request_queue
*q
;
231 nonseekable_open(inode
, filp
);
232 SCSI_LOG_TIMEOUT(3, printk("sg_open: dev=%d, flags=0x%x\n", dev
, flags
));
233 sdp
= sg_get_dev(dev
);
234 if ((!sdp
) || (!sdp
->device
))
239 /* This driver's module count bumped by fops_get in <linux/fs.h> */
240 /* Prevent the device driver from vanishing while we sleep */
241 retval
= scsi_device_get(sdp
->device
);
245 if (!((flags
& O_NONBLOCK
) ||
246 scsi_block_when_processing_errors(sdp
->device
))) {
248 /* we are in error recovery for this device */
252 if (flags
& O_EXCL
) {
253 if (O_RDONLY
== (flags
& O_ACCMODE
)) {
254 retval
= -EPERM
; /* Can't lock it with read only access */
257 if (sdp
->headfp
&& (flags
& O_NONBLOCK
)) {
262 __wait_event_interruptible(sdp
->o_excl_wait
,
263 ((sdp
->headfp
|| sdp
->exclude
) ? 0 : (sdp
->exclude
= 1)), res
);
265 retval
= res
; /* -ERESTARTSYS because signal hit process */
268 } else if (sdp
->exclude
) { /* some other fd has an exclusive lock on dev */
269 if (flags
& O_NONBLOCK
) {
274 __wait_event_interruptible(sdp
->o_excl_wait
, (!sdp
->exclude
),
277 retval
= res
; /* -ERESTARTSYS because signal hit process */
285 if (!sdp
->headfp
) { /* no existing opens on this device */
287 q
= sdp
->device
->request_queue
;
288 sdp
->sg_tablesize
= min(q
->max_hw_segments
,
289 q
->max_phys_segments
);
291 if ((sfp
= sg_add_sfp(sdp
, dev
)))
292 filp
->private_data
= sfp
;
295 sdp
->exclude
= 0; /* undo if error */
302 scsi_device_put(sdp
->device
);
306 /* Following function was formerly called 'sg_close' */
308 sg_release(struct inode
*inode
, struct file
*filp
)
313 if ((!(sfp
= (Sg_fd
*) filp
->private_data
)) || (!(sdp
= sfp
->parentdp
)))
315 SCSI_LOG_TIMEOUT(3, printk("sg_release: %s\n", sdp
->disk
->disk_name
));
316 sg_fasync(-1, filp
, 0); /* remove filp from async notification list */
317 if (0 == sg_remove_sfp(sdp
, sfp
)) { /* Returns 1 when sdp gone */
318 if (!sdp
->detached
) {
319 scsi_device_put(sdp
->device
);
322 wake_up_interruptible(&sdp
->o_excl_wait
);
328 sg_read(struct file
*filp
, char __user
*buf
, size_t count
, loff_t
* ppos
)
333 int req_pack_id
= -1;
335 struct sg_header
*old_hdr
= NULL
;
338 if ((!(sfp
= (Sg_fd
*) filp
->private_data
)) || (!(sdp
= sfp
->parentdp
)))
340 SCSI_LOG_TIMEOUT(3, printk("sg_read: %s, count=%d\n",
341 sdp
->disk
->disk_name
, (int) count
));
343 if (!access_ok(VERIFY_WRITE
, buf
, count
))
345 if (sfp
->force_packid
&& (count
>= SZ_SG_HEADER
)) {
346 old_hdr
= kmalloc(SZ_SG_HEADER
, GFP_KERNEL
);
349 if (__copy_from_user(old_hdr
, buf
, SZ_SG_HEADER
)) {
353 if (old_hdr
->reply_len
< 0) {
354 if (count
>= SZ_SG_IO_HDR
) {
355 sg_io_hdr_t
*new_hdr
;
356 new_hdr
= kmalloc(SZ_SG_IO_HDR
, GFP_KERNEL
);
361 retval
=__copy_from_user
362 (new_hdr
, buf
, SZ_SG_IO_HDR
);
363 req_pack_id
= new_hdr
->pack_id
;
371 req_pack_id
= old_hdr
->pack_id
;
373 srp
= sg_get_rq_mark(sfp
, req_pack_id
);
374 if (!srp
) { /* now wait on packet to arrive */
379 if (filp
->f_flags
& O_NONBLOCK
) {
384 retval
= 0; /* following macro beats race condition */
385 __wait_event_interruptible(sfp
->read_wait
,
387 (srp
= sg_get_rq_mark(sfp
, req_pack_id
))),
396 /* -ERESTARTSYS as signal hit process */
400 if (srp
->header
.interface_id
!= '\0') {
401 retval
= sg_new_read(sfp
, buf
, count
, srp
);
406 if (old_hdr
== NULL
) {
407 old_hdr
= kmalloc(SZ_SG_HEADER
, GFP_KERNEL
);
413 memset(old_hdr
, 0, SZ_SG_HEADER
);
414 old_hdr
->reply_len
= (int) hp
->timeout
;
415 old_hdr
->pack_len
= old_hdr
->reply_len
; /* old, strange behaviour */
416 old_hdr
->pack_id
= hp
->pack_id
;
417 old_hdr
->twelve_byte
=
418 ((srp
->data
.cmd_opcode
>= 0xc0) && (12 == hp
->cmd_len
)) ? 1 : 0;
419 old_hdr
->target_status
= hp
->masked_status
;
420 old_hdr
->host_status
= hp
->host_status
;
421 old_hdr
->driver_status
= hp
->driver_status
;
422 if ((CHECK_CONDITION
& hp
->masked_status
) ||
423 (DRIVER_SENSE
& hp
->driver_status
))
424 memcpy(old_hdr
->sense_buffer
, srp
->sense_b
,
425 sizeof (old_hdr
->sense_buffer
));
426 switch (hp
->host_status
) {
427 /* This setup of 'result' is for backward compatibility and is best
428 ignored by the user who should use target, host + driver status */
430 case DID_PASSTHROUGH
:
437 old_hdr
->result
= EBUSY
;
444 old_hdr
->result
= EIO
;
447 old_hdr
->result
= (srp
->sense_b
[0] == 0 &&
448 hp
->masked_status
== GOOD
) ? 0 : EIO
;
451 old_hdr
->result
= EIO
;
455 /* Now copy the result back to the user buffer. */
456 if (count
>= SZ_SG_HEADER
) {
457 if (__copy_to_user(buf
, old_hdr
, SZ_SG_HEADER
)) {
462 if (count
> old_hdr
->reply_len
)
463 count
= old_hdr
->reply_len
;
464 if (count
> SZ_SG_HEADER
) {
465 if (sg_read_oxfer(srp
, buf
, count
- SZ_SG_HEADER
)) {
471 count
= (old_hdr
->result
== 0) ? 0 : -EIO
;
472 sg_finish_rem_req(srp
);
480 sg_new_read(Sg_fd
* sfp
, char __user
*buf
, size_t count
, Sg_request
* srp
)
482 sg_io_hdr_t
*hp
= &srp
->header
;
486 if (count
< SZ_SG_IO_HDR
) {
491 if ((hp
->mx_sb_len
> 0) && hp
->sbp
) {
492 if ((CHECK_CONDITION
& hp
->masked_status
) ||
493 (DRIVER_SENSE
& hp
->driver_status
)) {
494 int sb_len
= SCSI_SENSE_BUFFERSIZE
;
495 sb_len
= (hp
->mx_sb_len
> sb_len
) ? sb_len
: hp
->mx_sb_len
;
496 len
= 8 + (int) srp
->sense_b
[7]; /* Additional sense length field */
497 len
= (len
> sb_len
) ? sb_len
: len
;
498 if (copy_to_user(hp
->sbp
, srp
->sense_b
, len
)) {
505 if (hp
->masked_status
|| hp
->host_status
|| hp
->driver_status
)
506 hp
->info
|= SG_INFO_CHECK
;
507 if (copy_to_user(buf
, hp
, SZ_SG_IO_HDR
)) {
511 err
= sg_read_xfer(srp
);
513 sg_finish_rem_req(srp
);
514 return (0 == err
) ? count
: err
;
518 sg_write(struct file
*filp
, const char __user
*buf
, size_t count
, loff_t
* ppos
)
520 int mxsize
, cmd_size
, k
;
521 int input_size
, blocking
;
522 unsigned char opcode
;
526 struct sg_header old_hdr
;
528 unsigned char cmnd
[MAX_COMMAND_SIZE
];
530 if ((!(sfp
= (Sg_fd
*) filp
->private_data
)) || (!(sdp
= sfp
->parentdp
)))
532 SCSI_LOG_TIMEOUT(3, printk("sg_write: %s, count=%d\n",
533 sdp
->disk
->disk_name
, (int) count
));
536 if (!((filp
->f_flags
& O_NONBLOCK
) ||
537 scsi_block_when_processing_errors(sdp
->device
)))
540 if (!access_ok(VERIFY_READ
, buf
, count
))
541 return -EFAULT
; /* protects following copy_from_user()s + get_user()s */
542 if (count
< SZ_SG_HEADER
)
544 if (__copy_from_user(&old_hdr
, buf
, SZ_SG_HEADER
))
546 blocking
= !(filp
->f_flags
& O_NONBLOCK
);
547 if (old_hdr
.reply_len
< 0)
548 return sg_new_write(sfp
, buf
, count
, blocking
, 0, NULL
);
549 if (count
< (SZ_SG_HEADER
+ 6))
550 return -EIO
; /* The minimum scsi command length is 6 bytes. */
552 if (!(srp
= sg_add_request(sfp
))) {
553 SCSI_LOG_TIMEOUT(1, printk("sg_write: queue full\n"));
557 __get_user(opcode
, buf
);
558 if (sfp
->next_cmd_len
> 0) {
559 if (sfp
->next_cmd_len
> MAX_COMMAND_SIZE
) {
560 SCSI_LOG_TIMEOUT(1, printk("sg_write: command length too long\n"));
561 sfp
->next_cmd_len
= 0;
562 sg_remove_request(sfp
, srp
);
565 cmd_size
= sfp
->next_cmd_len
;
566 sfp
->next_cmd_len
= 0; /* reset so only this write() effected */
568 cmd_size
= COMMAND_SIZE(opcode
); /* based on SCSI command group */
569 if ((opcode
>= 0xc0) && old_hdr
.twelve_byte
)
572 SCSI_LOG_TIMEOUT(4, printk(
573 "sg_write: scsi opcode=0x%02x, cmd_size=%d\n", (int) opcode
, cmd_size
));
574 /* Determine buffer size. */
575 input_size
= count
- cmd_size
;
576 mxsize
= (input_size
> old_hdr
.reply_len
) ? input_size
: old_hdr
.reply_len
;
577 mxsize
-= SZ_SG_HEADER
;
578 input_size
-= SZ_SG_HEADER
;
579 if (input_size
< 0) {
580 sg_remove_request(sfp
, srp
);
581 return -EIO
; /* User did not pass enough bytes for this command. */
584 hp
->interface_id
= '\0'; /* indicator of old interface tunnelled */
585 hp
->cmd_len
= (unsigned char) cmd_size
;
589 hp
->dxfer_direction
= (old_hdr
.reply_len
> SZ_SG_HEADER
) ?
590 SG_DXFER_TO_FROM_DEV
: SG_DXFER_TO_DEV
;
592 hp
->dxfer_direction
= (mxsize
> 0) ? SG_DXFER_FROM_DEV
: SG_DXFER_NONE
;
593 hp
->dxfer_len
= mxsize
;
594 hp
->dxferp
= (char __user
*)buf
+ cmd_size
;
596 hp
->timeout
= old_hdr
.reply_len
; /* structure abuse ... */
597 hp
->flags
= input_size
; /* structure abuse ... */
598 hp
->pack_id
= old_hdr
.pack_id
;
600 if (__copy_from_user(cmnd
, buf
, cmd_size
))
603 * SG_DXFER_TO_FROM_DEV is functionally equivalent to SG_DXFER_FROM_DEV,
604 * but is is possible that the app intended SG_DXFER_TO_DEV, because there
605 * is a non-zero input_size, so emit a warning.
607 if (hp
->dxfer_direction
== SG_DXFER_TO_FROM_DEV
)
608 if (printk_ratelimit())
610 "sg_write: data in/out %d/%d bytes for SCSI command 0x%x--"
611 "guessing data in;\n" KERN_WARNING
" "
612 "program %s not setting count and/or reply_len properly\n",
613 old_hdr
.reply_len
- (int)SZ_SG_HEADER
,
614 input_size
, (unsigned int) cmnd
[0],
616 k
= sg_common_write(sfp
, srp
, cmnd
, sfp
->timeout
, blocking
);
617 return (k
< 0) ? k
: count
;
621 sg_new_write(Sg_fd
* sfp
, const char __user
*buf
, size_t count
,
622 int blocking
, int read_only
, Sg_request
** o_srp
)
627 unsigned char cmnd
[MAX_COMMAND_SIZE
];
629 unsigned long ul_timeout
;
631 if (count
< SZ_SG_IO_HDR
)
633 if (!access_ok(VERIFY_READ
, buf
, count
))
634 return -EFAULT
; /* protects following copy_from_user()s + get_user()s */
636 sfp
->cmd_q
= 1; /* when sg_io_hdr seen, set command queuing on */
637 if (!(srp
= sg_add_request(sfp
))) {
638 SCSI_LOG_TIMEOUT(1, printk("sg_new_write: queue full\n"));
642 if (__copy_from_user(hp
, buf
, SZ_SG_IO_HDR
)) {
643 sg_remove_request(sfp
, srp
);
646 if (hp
->interface_id
!= 'S') {
647 sg_remove_request(sfp
, srp
);
650 if (hp
->flags
& SG_FLAG_MMAP_IO
) {
651 if (hp
->dxfer_len
> sfp
->reserve
.bufflen
) {
652 sg_remove_request(sfp
, srp
);
653 return -ENOMEM
; /* MMAP_IO size must fit in reserve buffer */
655 if (hp
->flags
& SG_FLAG_DIRECT_IO
) {
656 sg_remove_request(sfp
, srp
);
657 return -EINVAL
; /* either MMAP_IO or DIRECT_IO (not both) */
659 if (sg_res_in_use(sfp
)) {
660 sg_remove_request(sfp
, srp
);
661 return -EBUSY
; /* reserve buffer already being used */
664 ul_timeout
= msecs_to_jiffies(srp
->header
.timeout
);
665 timeout
= (ul_timeout
< INT_MAX
) ? ul_timeout
: INT_MAX
;
666 if ((!hp
->cmdp
) || (hp
->cmd_len
< 6) || (hp
->cmd_len
> sizeof (cmnd
))) {
667 sg_remove_request(sfp
, srp
);
670 if (!access_ok(VERIFY_READ
, hp
->cmdp
, hp
->cmd_len
)) {
671 sg_remove_request(sfp
, srp
);
672 return -EFAULT
; /* protects following copy_from_user()s + get_user()s */
674 if (__copy_from_user(cmnd
, hp
->cmdp
, hp
->cmd_len
)) {
675 sg_remove_request(sfp
, srp
);
679 (!sg_allow_access(cmnd
[0], sfp
->parentdp
->device
->type
))) {
680 sg_remove_request(sfp
, srp
);
683 k
= sg_common_write(sfp
, srp
, cmnd
, timeout
, blocking
);
692 sg_common_write(Sg_fd
* sfp
, Sg_request
* srp
,
693 unsigned char *cmnd
, int timeout
, int blocking
)
696 Sg_device
*sdp
= sfp
->parentdp
;
697 sg_io_hdr_t
*hp
= &srp
->header
;
699 srp
->data
.cmd_opcode
= cmnd
[0]; /* hold opcode of command */
701 hp
->masked_status
= 0;
705 hp
->driver_status
= 0;
707 SCSI_LOG_TIMEOUT(4, printk("sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n",
708 (int) cmnd
[0], (int) hp
->cmd_len
));
710 if ((k
= sg_start_req(srp
))) {
711 SCSI_LOG_TIMEOUT(1, printk("sg_write: start_req err=%d\n", k
));
712 sg_finish_rem_req(srp
);
713 return k
; /* probably out of space --> ENOMEM */
715 if ((k
= sg_write_xfer(srp
))) {
716 SCSI_LOG_TIMEOUT(1, printk("sg_write: write_xfer, bad address\n"));
717 sg_finish_rem_req(srp
);
721 sg_finish_rem_req(srp
);
725 switch (hp
->dxfer_direction
) {
726 case SG_DXFER_TO_FROM_DEV
:
727 case SG_DXFER_FROM_DEV
:
728 data_dir
= DMA_FROM_DEVICE
;
730 case SG_DXFER_TO_DEV
:
731 data_dir
= DMA_TO_DEVICE
;
733 case SG_DXFER_UNKNOWN
:
734 data_dir
= DMA_BIDIRECTIONAL
;
740 hp
->duration
= jiffies_to_msecs(jiffies
);
741 /* Now send everything of to mid-level. The next time we hear about this
742 packet is when sg_cmd_done() is called (i.e. a callback). */
743 if (scsi_execute_async(sdp
->device
, cmnd
, hp
->cmd_len
, data_dir
, srp
->data
.buffer
,
744 hp
->dxfer_len
, srp
->data
.k_use_sg
, timeout
,
745 SG_DEFAULT_RETRIES
, srp
, sg_cmd_done
,
747 SCSI_LOG_TIMEOUT(1, printk("sg_write: scsi_execute_async failed\n"));
749 * most likely out of mem, but could also be a bad map
751 sg_finish_rem_req(srp
);
758 sg_srp_done(Sg_request
*srp
, Sg_fd
*sfp
)
760 unsigned long iflags
;
763 read_lock_irqsave(&sfp
->rq_list_lock
, iflags
);
765 read_unlock_irqrestore(&sfp
->rq_list_lock
, iflags
);
770 sg_ioctl(struct inode
*inode
, struct file
*filp
,
771 unsigned int cmd_in
, unsigned long arg
)
773 void __user
*p
= (void __user
*)arg
;
775 int result
, val
, read_only
;
779 unsigned long iflags
;
781 if ((!(sfp
= (Sg_fd
*) filp
->private_data
)) || (!(sdp
= sfp
->parentdp
)))
783 SCSI_LOG_TIMEOUT(3, printk("sg_ioctl: %s, cmd=0x%x\n",
784 sdp
->disk
->disk_name
, (int) cmd_in
));
785 read_only
= (O_RDWR
!= (filp
->f_flags
& O_ACCMODE
));
790 int blocking
= 1; /* ignore O_NONBLOCK flag */
794 if (!scsi_block_when_processing_errors(sdp
->device
))
796 if (!access_ok(VERIFY_WRITE
, p
, SZ_SG_IO_HDR
))
799 sg_new_write(sfp
, p
, SZ_SG_IO_HDR
,
800 blocking
, read_only
, &srp
);
803 srp
->sg_io_owned
= 1;
805 result
= 0; /* following macro to beat race condition */
806 __wait_event_interruptible(sfp
->read_wait
,
807 (sdp
->detached
|| sfp
->closed
|| sg_srp_done(srp
, sfp
)),
812 return 0; /* request packet dropped already */
816 return result
; /* -ERESTARTSYS because signal hit process */
818 write_lock_irqsave(&sfp
->rq_list_lock
, iflags
);
820 write_unlock_irqrestore(&sfp
->rq_list_lock
, iflags
);
821 result
= sg_new_read(sfp
, p
, SZ_SG_IO_HDR
, srp
);
822 return (result
< 0) ? result
: 0;
825 result
= get_user(val
, ip
);
830 if (val
>= MULDIV (INT_MAX
, USER_HZ
, HZ
))
831 val
= MULDIV (INT_MAX
, USER_HZ
, HZ
);
832 sfp
->timeout_user
= val
;
833 sfp
->timeout
= MULDIV (val
, HZ
, USER_HZ
);
836 case SG_GET_TIMEOUT
: /* N.B. User receives timeout as return value */
837 /* strange ..., for backward compatibility */
838 return sfp
->timeout_user
;
839 case SG_SET_FORCE_LOW_DMA
:
840 result
= get_user(val
, ip
);
845 if ((0 == sfp
->low_dma
) && (0 == sg_res_in_use(sfp
))) {
846 val
= (int) sfp
->reserve
.bufflen
;
847 sg_remove_scat(&sfp
->reserve
);
848 sg_build_reserve(sfp
, val
);
853 sfp
->low_dma
= sdp
->device
->host
->unchecked_isa_dma
;
857 return put_user((int) sfp
->low_dma
, ip
);
859 if (!access_ok(VERIFY_WRITE
, p
, sizeof (sg_scsi_id_t
)))
862 sg_scsi_id_t __user
*sg_idp
= p
;
866 __put_user((int) sdp
->device
->host
->host_no
,
868 __put_user((int) sdp
->device
->channel
,
870 __put_user((int) sdp
->device
->id
, &sg_idp
->scsi_id
);
871 __put_user((int) sdp
->device
->lun
, &sg_idp
->lun
);
872 __put_user((int) sdp
->device
->type
, &sg_idp
->scsi_type
);
873 __put_user((short) sdp
->device
->host
->cmd_per_lun
,
874 &sg_idp
->h_cmd_per_lun
);
875 __put_user((short) sdp
->device
->queue_depth
,
876 &sg_idp
->d_queue_depth
);
877 __put_user(0, &sg_idp
->unused
[0]);
878 __put_user(0, &sg_idp
->unused
[1]);
881 case SG_SET_FORCE_PACK_ID
:
882 result
= get_user(val
, ip
);
885 sfp
->force_packid
= val
? 1 : 0;
888 if (!access_ok(VERIFY_WRITE
, ip
, sizeof (int)))
890 read_lock_irqsave(&sfp
->rq_list_lock
, iflags
);
891 for (srp
= sfp
->headrp
; srp
; srp
= srp
->nextrp
) {
892 if ((1 == srp
->done
) && (!srp
->sg_io_owned
)) {
893 read_unlock_irqrestore(&sfp
->rq_list_lock
,
895 __put_user(srp
->header
.pack_id
, ip
);
899 read_unlock_irqrestore(&sfp
->rq_list_lock
, iflags
);
902 case SG_GET_NUM_WAITING
:
903 read_lock_irqsave(&sfp
->rq_list_lock
, iflags
);
904 for (val
= 0, srp
= sfp
->headrp
; srp
; srp
= srp
->nextrp
) {
905 if ((1 == srp
->done
) && (!srp
->sg_io_owned
))
908 read_unlock_irqrestore(&sfp
->rq_list_lock
, iflags
);
909 return put_user(val
, ip
);
910 case SG_GET_SG_TABLESIZE
:
911 return put_user(sdp
->sg_tablesize
, ip
);
912 case SG_SET_RESERVED_SIZE
:
913 result
= get_user(val
, ip
);
918 if (val
!= sfp
->reserve
.bufflen
) {
919 if (sg_res_in_use(sfp
) || sfp
->mmap_called
)
921 sg_remove_scat(&sfp
->reserve
);
922 sg_build_reserve(sfp
, val
);
925 case SG_GET_RESERVED_SIZE
:
926 val
= (int) sfp
->reserve
.bufflen
;
927 return put_user(val
, ip
);
928 case SG_SET_COMMAND_Q
:
929 result
= get_user(val
, ip
);
932 sfp
->cmd_q
= val
? 1 : 0;
934 case SG_GET_COMMAND_Q
:
935 return put_user((int) sfp
->cmd_q
, ip
);
936 case SG_SET_KEEP_ORPHAN
:
937 result
= get_user(val
, ip
);
940 sfp
->keep_orphan
= val
;
942 case SG_GET_KEEP_ORPHAN
:
943 return put_user((int) sfp
->keep_orphan
, ip
);
944 case SG_NEXT_CMD_LEN
:
945 result
= get_user(val
, ip
);
948 sfp
->next_cmd_len
= (val
> 0) ? val
: 0;
950 case SG_GET_VERSION_NUM
:
951 return put_user(sg_version_num
, ip
);
952 case SG_GET_ACCESS_COUNT
:
953 /* faked - we don't have a real access count anymore */
954 val
= (sdp
->device
? 1 : 0);
955 return put_user(val
, ip
);
956 case SG_GET_REQUEST_TABLE
:
957 if (!access_ok(VERIFY_WRITE
, p
, SZ_SG_REQ_INFO
* SG_MAX_QUEUE
))
960 sg_req_info_t
*rinfo
;
963 rinfo
= kmalloc(SZ_SG_REQ_INFO
* SG_MAX_QUEUE
,
967 read_lock_irqsave(&sfp
->rq_list_lock
, iflags
);
968 for (srp
= sfp
->headrp
, val
= 0; val
< SG_MAX_QUEUE
;
969 ++val
, srp
= srp
? srp
->nextrp
: srp
) {
970 memset(&rinfo
[val
], 0, SZ_SG_REQ_INFO
);
972 rinfo
[val
].req_state
= srp
->done
+ 1;
974 srp
->header
.masked_status
&
975 srp
->header
.host_status
&
976 srp
->header
.driver_status
;
978 rinfo
[val
].duration
=
979 srp
->header
.duration
;
981 ms
= jiffies_to_msecs(jiffies
);
982 rinfo
[val
].duration
=
983 (ms
> srp
->header
.duration
) ?
984 (ms
- srp
->header
.duration
) : 0;
986 rinfo
[val
].orphan
= srp
->orphan
;
987 rinfo
[val
].sg_io_owned
=
995 read_unlock_irqrestore(&sfp
->rq_list_lock
, iflags
);
996 result
= __copy_to_user(p
, rinfo
,
997 SZ_SG_REQ_INFO
* SG_MAX_QUEUE
);
998 result
= result
? -EFAULT
: 0;
1002 case SG_EMULATED_HOST
:
1005 return put_user(sdp
->device
->host
->hostt
->emulated
, ip
);
1009 if (filp
->f_flags
& O_NONBLOCK
) {
1010 if (scsi_host_in_recovery(sdp
->device
->host
))
1012 } else if (!scsi_block_when_processing_errors(sdp
->device
))
1014 result
= get_user(val
, ip
);
1017 if (SG_SCSI_RESET_NOTHING
== val
)
1020 case SG_SCSI_RESET_DEVICE
:
1021 val
= SCSI_TRY_RESET_DEVICE
;
1023 case SG_SCSI_RESET_BUS
:
1024 val
= SCSI_TRY_RESET_BUS
;
1026 case SG_SCSI_RESET_HOST
:
1027 val
= SCSI_TRY_RESET_HOST
;
1032 if (!capable(CAP_SYS_ADMIN
) || !capable(CAP_SYS_RAWIO
))
1034 return (scsi_reset_provider(sdp
->device
, val
) ==
1035 SUCCESS
) ? 0 : -EIO
;
1036 case SCSI_IOCTL_SEND_COMMAND
:
1040 unsigned char opcode
= WRITE_6
;
1041 Scsi_Ioctl_Command __user
*siocp
= p
;
1043 if (copy_from_user(&opcode
, siocp
->data
, 1))
1045 if (!sg_allow_access(opcode
, sdp
->device
->type
))
1048 return sg_scsi_ioctl(filp
, sdp
->device
->request_queue
, NULL
, p
);
1050 result
= get_user(val
, ip
);
1053 sdp
->sgdebug
= (char) val
;
1055 case SCSI_IOCTL_GET_IDLUN
:
1056 case SCSI_IOCTL_GET_BUS_NUMBER
:
1057 case SCSI_IOCTL_PROBE_HOST
:
1058 case SG_GET_TRANSFORM
:
1061 return scsi_ioctl(sdp
->device
, cmd_in
, p
);
1064 return -EPERM
; /* don't know so take safe approach */
1065 return scsi_ioctl(sdp
->device
, cmd_in
, p
);
1069 #ifdef CONFIG_COMPAT
1070 static long sg_compat_ioctl(struct file
*filp
, unsigned int cmd_in
, unsigned long arg
)
1074 struct scsi_device
*sdev
;
1076 if ((!(sfp
= (Sg_fd
*) filp
->private_data
)) || (!(sdp
= sfp
->parentdp
)))
1080 if (sdev
->host
->hostt
->compat_ioctl
) {
1083 ret
= sdev
->host
->hostt
->compat_ioctl(sdev
, cmd_in
, (void __user
*)arg
);
1088 return -ENOIOCTLCMD
;
1093 sg_poll(struct file
*filp
, poll_table
* wait
)
1095 unsigned int res
= 0;
1100 unsigned long iflags
;
1102 if ((!(sfp
= (Sg_fd
*) filp
->private_data
)) || (!(sdp
= sfp
->parentdp
))
1105 poll_wait(filp
, &sfp
->read_wait
, wait
);
1106 read_lock_irqsave(&sfp
->rq_list_lock
, iflags
);
1107 for (srp
= sfp
->headrp
; srp
; srp
= srp
->nextrp
) {
1108 /* if any read waiting, flag it */
1109 if ((0 == res
) && (1 == srp
->done
) && (!srp
->sg_io_owned
))
1110 res
= POLLIN
| POLLRDNORM
;
1113 read_unlock_irqrestore(&sfp
->rq_list_lock
, iflags
);
1117 else if (!sfp
->cmd_q
) {
1119 res
|= POLLOUT
| POLLWRNORM
;
1120 } else if (count
< SG_MAX_QUEUE
)
1121 res
|= POLLOUT
| POLLWRNORM
;
1122 SCSI_LOG_TIMEOUT(3, printk("sg_poll: %s, res=0x%x\n",
1123 sdp
->disk
->disk_name
, (int) res
));
1128 sg_fasync(int fd
, struct file
*filp
, int mode
)
1134 if ((!(sfp
= (Sg_fd
*) filp
->private_data
)) || (!(sdp
= sfp
->parentdp
)))
1136 SCSI_LOG_TIMEOUT(3, printk("sg_fasync: %s, mode=%d\n",
1137 sdp
->disk
->disk_name
, mode
));
1139 retval
= fasync_helper(fd
, filp
, mode
, &sfp
->async_qp
);
1140 return (retval
< 0) ? retval
: 0;
1143 static struct page
*
1144 sg_vma_nopage(struct vm_area_struct
*vma
, unsigned long addr
, int *type
)
1147 struct page
*page
= NOPAGE_SIGBUS
;
1148 unsigned long offset
, len
, sa
;
1149 Sg_scatter_hold
*rsv_schp
;
1150 struct scatterlist
*sg
;
1153 if ((NULL
== vma
) || (!(sfp
= (Sg_fd
*) vma
->vm_private_data
)))
1155 rsv_schp
= &sfp
->reserve
;
1156 offset
= addr
- vma
->vm_start
;
1157 if (offset
>= rsv_schp
->bufflen
)
1159 SCSI_LOG_TIMEOUT(3, printk("sg_vma_nopage: offset=%lu, scatg=%d\n",
1160 offset
, rsv_schp
->k_use_sg
));
1161 sg
= rsv_schp
->buffer
;
1163 for (k
= 0; (k
< rsv_schp
->k_use_sg
) && (sa
< vma
->vm_end
);
1165 len
= vma
->vm_end
- sa
;
1166 len
= (len
< sg
->length
) ? len
: sg
->length
;
1169 get_page(page
); /* increment page count */
1177 *type
= VM_FAULT_MINOR
;
1181 static struct vm_operations_struct sg_mmap_vm_ops
= {
1182 .nopage
= sg_vma_nopage
,
1186 sg_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
1189 unsigned long req_sz
, len
, sa
;
1190 Sg_scatter_hold
*rsv_schp
;
1192 struct scatterlist
*sg
;
1194 if ((!filp
) || (!vma
) || (!(sfp
= (Sg_fd
*) filp
->private_data
)))
1196 req_sz
= vma
->vm_end
- vma
->vm_start
;
1197 SCSI_LOG_TIMEOUT(3, printk("sg_mmap starting, vm_start=%p, len=%d\n",
1198 (void *) vma
->vm_start
, (int) req_sz
));
1200 return -EINVAL
; /* want no offset */
1201 rsv_schp
= &sfp
->reserve
;
1202 if (req_sz
> rsv_schp
->bufflen
)
1203 return -ENOMEM
; /* cannot map more than reserved buffer */
1206 sg
= rsv_schp
->buffer
;
1207 for (k
= 0; (k
< rsv_schp
->k_use_sg
) && (sa
< vma
->vm_end
);
1209 len
= vma
->vm_end
- sa
;
1210 len
= (len
< sg
->length
) ? len
: sg
->length
;
1214 sfp
->mmap_called
= 1;
1215 vma
->vm_flags
|= VM_RESERVED
;
1216 vma
->vm_private_data
= sfp
;
1217 vma
->vm_ops
= &sg_mmap_vm_ops
;
1221 /* This function is a "bottom half" handler that is called by the
1222 * mid level when a command is completed (or has failed). */
1224 sg_cmd_done(void *data
, char *sense
, int result
, int resid
)
1226 Sg_request
*srp
= data
;
1227 Sg_device
*sdp
= NULL
;
1229 unsigned long iflags
;
1233 printk(KERN_ERR
"sg_cmd_done: NULL request\n");
1236 sfp
= srp
->parentfp
;
1238 sdp
= sfp
->parentdp
;
1239 if ((NULL
== sdp
) || sdp
->detached
) {
1240 printk(KERN_INFO
"sg_cmd_done: device detached\n");
1245 SCSI_LOG_TIMEOUT(4, printk("sg_cmd_done: %s, pack_id=%d, res=0x%x\n",
1246 sdp
->disk
->disk_name
, srp
->header
.pack_id
, result
));
1247 srp
->header
.resid
= resid
;
1248 ms
= jiffies_to_msecs(jiffies
);
1249 srp
->header
.duration
= (ms
> srp
->header
.duration
) ?
1250 (ms
- srp
->header
.duration
) : 0;
1252 struct scsi_sense_hdr sshdr
;
1254 memcpy(srp
->sense_b
, sense
, sizeof (srp
->sense_b
));
1255 srp
->header
.status
= 0xff & result
;
1256 srp
->header
.masked_status
= status_byte(result
);
1257 srp
->header
.msg_status
= msg_byte(result
);
1258 srp
->header
.host_status
= host_byte(result
);
1259 srp
->header
.driver_status
= driver_byte(result
);
1260 if ((sdp
->sgdebug
> 0) &&
1261 ((CHECK_CONDITION
== srp
->header
.masked_status
) ||
1262 (COMMAND_TERMINATED
== srp
->header
.masked_status
)))
1263 __scsi_print_sense("sg_cmd_done", sense
,
1264 SCSI_SENSE_BUFFERSIZE
);
1266 /* Following if statement is a patch supplied by Eric Youngdale */
1267 if (driver_byte(result
) != 0
1268 && scsi_normalize_sense(sense
, SCSI_SENSE_BUFFERSIZE
, &sshdr
)
1269 && !scsi_sense_is_deferred(&sshdr
)
1270 && sshdr
.sense_key
== UNIT_ATTENTION
1271 && sdp
->device
->removable
) {
1272 /* Detected possible disc change. Set the bit - this */
1273 /* may be used if there are filesystems using this device */
1274 sdp
->device
->changed
= 1;
1277 /* Rely on write phase to clean out srp status values, so no "else" */
1279 if (sfp
->closed
) { /* whoops this fd already released, cleanup */
1280 SCSI_LOG_TIMEOUT(1, printk("sg_cmd_done: already closed, freeing ...\n"));
1281 sg_finish_rem_req(srp
);
1283 if (NULL
== sfp
->headrp
) {
1284 SCSI_LOG_TIMEOUT(1, printk("sg...bh: already closed, final cleanup\n"));
1285 if (0 == sg_remove_sfp(sdp
, sfp
)) { /* device still present */
1286 scsi_device_put(sdp
->device
);
1290 } else if (srp
&& srp
->orphan
) {
1291 if (sfp
->keep_orphan
)
1292 srp
->sg_io_owned
= 0;
1294 sg_finish_rem_req(srp
);
1299 /* Now wake up any sg_read() that is waiting for this packet. */
1300 kill_fasync(&sfp
->async_qp
, SIGPOLL
, POLL_IN
);
1301 write_lock_irqsave(&sfp
->rq_list_lock
, iflags
);
1303 wake_up_interruptible(&sfp
->read_wait
);
1304 write_unlock_irqrestore(&sfp
->rq_list_lock
, iflags
);
1308 static struct file_operations sg_fops
= {
1309 .owner
= THIS_MODULE
,
1314 #ifdef CONFIG_COMPAT
1315 .compat_ioctl
= sg_compat_ioctl
,
1319 .release
= sg_release
,
1320 .fasync
= sg_fasync
,
1323 static struct class *sg_sysfs_class
;
1325 static int sg_sysfs_valid
= 0;
1327 static int sg_alloc(struct gendisk
*disk
, struct scsi_device
*scsidp
)
1329 struct request_queue
*q
= scsidp
->request_queue
;
1331 unsigned long iflags
;
1332 void *old_sg_dev_arr
= NULL
;
1335 sdp
= kzalloc(sizeof(Sg_device
), GFP_KERNEL
);
1337 printk(KERN_WARNING
"kmalloc Sg_device failure\n");
1341 write_lock_irqsave(&sg_dev_arr_lock
, iflags
);
1342 if (unlikely(sg_nr_dev
>= sg_dev_max
)) { /* try to resize */
1344 int tmp_dev_max
= sg_nr_dev
+ SG_DEV_ARR_LUMP
;
1345 write_unlock_irqrestore(&sg_dev_arr_lock
, iflags
);
1347 tmp_da
= kzalloc(tmp_dev_max
* sizeof(Sg_device
*), GFP_KERNEL
);
1348 if (unlikely(!tmp_da
))
1351 write_lock_irqsave(&sg_dev_arr_lock
, iflags
);
1352 memcpy(tmp_da
, sg_dev_arr
, sg_dev_max
* sizeof(Sg_device
*));
1353 old_sg_dev_arr
= sg_dev_arr
;
1354 sg_dev_arr
= tmp_da
;
1355 sg_dev_max
= tmp_dev_max
;
1358 for (k
= 0; k
< sg_dev_max
; k
++)
1361 if (unlikely(k
>= SG_MAX_DEVS
))
1364 SCSI_LOG_TIMEOUT(3, printk("sg_alloc: dev=%d \n", k
));
1365 sprintf(disk
->disk_name
, "sg%d", k
);
1366 disk
->first_minor
= k
;
1368 sdp
->device
= scsidp
;
1369 init_waitqueue_head(&sdp
->o_excl_wait
);
1370 sdp
->sg_tablesize
= min(q
->max_hw_segments
, q
->max_phys_segments
);
1373 sg_dev_arr
[k
] = sdp
;
1374 write_unlock_irqrestore(&sg_dev_arr_lock
, iflags
);
1380 kfree(old_sg_dev_arr
);
1384 printk(KERN_WARNING
"sg_alloc: device array cannot be resized\n");
1389 write_unlock_irqrestore(&sg_dev_arr_lock
, iflags
);
1390 sdev_printk(KERN_WARNING
, scsidp
,
1391 "Unable to attach sg device type=%d, minor "
1392 "number exceeds %d\n", scsidp
->type
, SG_MAX_DEVS
- 1);
1398 sg_add(struct class_device
*cl_dev
, struct class_interface
*cl_intf
)
1400 struct scsi_device
*scsidp
= to_scsi_device(cl_dev
->dev
);
1401 struct gendisk
*disk
;
1402 Sg_device
*sdp
= NULL
;
1403 struct cdev
* cdev
= NULL
;
1406 disk
= alloc_disk(1);
1408 printk(KERN_WARNING
"alloc_disk failed\n");
1411 disk
->major
= SCSI_GENERIC_MAJOR
;
1414 cdev
= cdev_alloc();
1416 printk(KERN_WARNING
"cdev_alloc failed\n");
1419 cdev
->owner
= THIS_MODULE
;
1420 cdev
->ops
= &sg_fops
;
1422 error
= sg_alloc(disk
, scsidp
);
1424 printk(KERN_WARNING
"sg_alloc failed\n");
1428 sdp
= sg_dev_arr
[k
];
1430 error
= cdev_add(cdev
, MKDEV(SCSI_GENERIC_MAJOR
, k
), 1);
1435 if (sg_sysfs_valid
) {
1436 struct class_device
* sg_class_member
;
1438 sg_class_member
= class_device_create(sg_sysfs_class
, NULL
,
1439 MKDEV(SCSI_GENERIC_MAJOR
, k
),
1442 if (IS_ERR(sg_class_member
))
1443 printk(KERN_WARNING
"sg_add: "
1444 "class_device_create failed\n");
1445 class_set_devdata(sg_class_member
, sdp
);
1446 error
= sysfs_create_link(&scsidp
->sdev_gendev
.kobj
,
1447 &sg_class_member
->kobj
, "generic");
1449 printk(KERN_ERR
"sg_add: unable to make symlink "
1450 "'generic' back to sg%d\n", k
);
1452 printk(KERN_WARNING
"sg_add: sg_sys INvalid\n");
1454 sdev_printk(KERN_NOTICE
, scsidp
,
1455 "Attached scsi generic sg%d type %d\n", k
,scsidp
->type
);
1467 sg_remove(struct class_device
*cl_dev
, struct class_interface
*cl_intf
)
1469 struct scsi_device
*scsidp
= to_scsi_device(cl_dev
->dev
);
1470 Sg_device
*sdp
= NULL
;
1471 unsigned long iflags
;
1478 if (NULL
== sg_dev_arr
)
1481 write_lock_irqsave(&sg_dev_arr_lock
, iflags
);
1482 for (k
= 0; k
< sg_dev_max
; k
++) {
1483 sdp
= sg_dev_arr
[k
];
1484 if ((NULL
== sdp
) || (sdp
->device
!= scsidp
))
1485 continue; /* dirty but lowers nesting */
1488 for (sfp
= sdp
->headfp
; sfp
; sfp
= tsfp
) {
1490 for (srp
= sfp
->headrp
; srp
; srp
= tsrp
) {
1492 if (sfp
->closed
|| (0 == sg_srp_done(srp
, sfp
)))
1493 sg_finish_rem_req(srp
);
1496 scsi_device_put(sdp
->device
);
1497 __sg_remove_sfp(sdp
, sfp
);
1500 wake_up_interruptible(&sfp
->read_wait
);
1501 kill_fasync(&sfp
->async_qp
, SIGPOLL
,
1505 SCSI_LOG_TIMEOUT(3, printk("sg_detach: dev=%d, dirty\n", k
));
1506 if (NULL
== sdp
->headfp
) {
1507 sg_dev_arr
[k
] = NULL
;
1509 } else { /* nothing active, simple case */
1510 SCSI_LOG_TIMEOUT(3, printk("sg_detach: dev=%d\n", k
));
1511 sg_dev_arr
[k
] = NULL
;
1516 write_unlock_irqrestore(&sg_dev_arr_lock
, iflags
);
1519 sysfs_remove_link(&scsidp
->sdev_gendev
.kobj
, "generic");
1520 class_device_destroy(sg_sysfs_class
, MKDEV(SCSI_GENERIC_MAJOR
, k
));
1521 cdev_del(sdp
->cdev
);
1523 put_disk(sdp
->disk
);
1525 if (NULL
== sdp
->headfp
)
1526 kfree((char *) sdp
);
1530 msleep(10); /* dirty detach so delay device destruction */
1533 /* Set 'perm' (4th argument) to 0 to disable module_param's definition
1534 * of sysfs parameters (which module_param doesn't yet support).
1535 * Sysfs parameters defined explicitly below.
1537 module_param_named(def_reserved_size
, def_reserved_size
, int, S_IRUGO
);
1538 module_param_named(allow_dio
, sg_allow_dio
, int, S_IRUGO
| S_IWUSR
);
1540 MODULE_AUTHOR("Douglas Gilbert");
1541 MODULE_DESCRIPTION("SCSI generic (sg) driver");
1542 MODULE_LICENSE("GPL");
1543 MODULE_VERSION(SG_VERSION_STR
);
1544 MODULE_ALIAS_CHARDEV_MAJOR(SCSI_GENERIC_MAJOR
);
1546 MODULE_PARM_DESC(def_reserved_size
, "size of buffer reserved for each fd");
1547 MODULE_PARM_DESC(allow_dio
, "allow direct I/O (default: 0 (disallow))");
1554 if (def_reserved_size
>= 0)
1555 sg_big_buff
= def_reserved_size
;
1557 rc
= register_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR
, 0),
1561 sg_sysfs_class
= class_create(THIS_MODULE
, "scsi_generic");
1562 if ( IS_ERR(sg_sysfs_class
) ) {
1563 rc
= PTR_ERR(sg_sysfs_class
);
1567 rc
= scsi_register_interface(&sg_interface
);
1569 #ifdef CONFIG_SCSI_PROC_FS
1571 #endif /* CONFIG_SCSI_PROC_FS */
1574 class_destroy(sg_sysfs_class
);
1576 unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR
, 0), SG_MAX_DEVS
);
1583 #ifdef CONFIG_SCSI_PROC_FS
1585 #endif /* CONFIG_SCSI_PROC_FS */
1586 scsi_unregister_interface(&sg_interface
);
1587 class_destroy(sg_sysfs_class
);
1589 unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR
, 0),
1591 kfree((char *)sg_dev_arr
);
1597 sg_start_req(Sg_request
* srp
)
1600 Sg_fd
*sfp
= srp
->parentfp
;
1601 sg_io_hdr_t
*hp
= &srp
->header
;
1602 int dxfer_len
= (int) hp
->dxfer_len
;
1603 int dxfer_dir
= hp
->dxfer_direction
;
1604 Sg_scatter_hold
*req_schp
= &srp
->data
;
1605 Sg_scatter_hold
*rsv_schp
= &sfp
->reserve
;
1607 SCSI_LOG_TIMEOUT(4, printk("sg_start_req: dxfer_len=%d\n", dxfer_len
));
1608 if ((dxfer_len
<= 0) || (dxfer_dir
== SG_DXFER_NONE
))
1610 if (sg_allow_dio
&& (hp
->flags
& SG_FLAG_DIRECT_IO
) &&
1611 (dxfer_dir
!= SG_DXFER_UNKNOWN
) && (0 == hp
->iovec_count
) &&
1612 (!sfp
->parentdp
->device
->host
->unchecked_isa_dma
)) {
1613 res
= sg_build_direct(srp
, sfp
, dxfer_len
);
1614 if (res
<= 0) /* -ve -> error, 0 -> done, 1 -> try indirect */
1617 if ((!sg_res_in_use(sfp
)) && (dxfer_len
<= rsv_schp
->bufflen
))
1618 sg_link_reserve(sfp
, srp
, dxfer_len
);
1620 res
= sg_build_indirect(req_schp
, sfp
, dxfer_len
);
1622 sg_remove_scat(req_schp
);
1630 sg_finish_rem_req(Sg_request
* srp
)
1632 Sg_fd
*sfp
= srp
->parentfp
;
1633 Sg_scatter_hold
*req_schp
= &srp
->data
;
1635 SCSI_LOG_TIMEOUT(4, printk("sg_finish_rem_req: res_used=%d\n", (int) srp
->res_used
));
1637 sg_unlink_reserve(sfp
, srp
);
1639 sg_remove_scat(req_schp
);
1640 sg_remove_request(sfp
, srp
);
1644 sg_build_sgat(Sg_scatter_hold
* schp
, const Sg_fd
* sfp
, int tablesize
)
1646 int sg_bufflen
= tablesize
* sizeof(struct scatterlist
);
1647 gfp_t gfp_flags
= GFP_ATOMIC
| __GFP_NOWARN
;
1650 * TODO: test without low_dma, we should not need it since
1651 * the block layer will bounce the buffer for us
1653 * XXX(hch): we shouldn't need GFP_DMA for the actual S/G list.
1656 gfp_flags
|= GFP_DMA
;
1657 schp
->buffer
= kzalloc(sg_bufflen
, gfp_flags
);
1660 schp
->sglist_len
= sg_bufflen
;
1661 return tablesize
; /* number of scat_gath elements allocated */
1664 #ifdef SG_ALLOW_DIO_CODE
1665 /* vvvvvvvv following code borrowed from st driver's direct IO vvvvvvvvv */
1666 /* TODO: hopefully we can use the generic block layer code */
1668 /* Pin down user pages and put them into a scatter gather list. Returns <= 0 if
1669 - mapping of all pages not successful
1670 (i.e., either completely successful or fails)
1673 st_map_user_pages(struct scatterlist
*sgl
, const unsigned int max_pages
,
1674 unsigned long uaddr
, size_t count
, int rw
)
1676 unsigned long end
= (uaddr
+ count
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
1677 unsigned long start
= uaddr
>> PAGE_SHIFT
;
1678 const int nr_pages
= end
- start
;
1680 struct page
**pages
;
1682 /* User attempted Overflow! */
1683 if ((uaddr
+ count
) < uaddr
)
1687 if (nr_pages
> max_pages
)
1694 if ((pages
= kmalloc(max_pages
* sizeof(*pages
), GFP_ATOMIC
)) == NULL
)
1697 /* Try to fault in all of the necessary pages */
1698 down_read(¤t
->mm
->mmap_sem
);
1699 /* rw==READ means read from drive, write into memory area */
1700 res
= get_user_pages(
1706 0, /* don't force */
1709 up_read(¤t
->mm
->mmap_sem
);
1711 /* Errors and no page mapped should return here */
1715 for (i
=0; i
< nr_pages
; i
++) {
1716 /* FIXME: flush superflous for rw==READ,
1717 * probably wrong function for rw==WRITE
1719 flush_dcache_page(pages
[i
]);
1720 /* ?? Is locking needed? I don't think so */
1721 /* if (TestSetPageLocked(pages[i]))
1725 sgl
[0].page
= pages
[0];
1726 sgl
[0].offset
= uaddr
& ~PAGE_MASK
;
1728 sgl
[0].length
= PAGE_SIZE
- sgl
[0].offset
;
1729 count
-= sgl
[0].length
;
1730 for (i
=1; i
< nr_pages
; i
++) {
1731 sgl
[i
].page
= pages
[i
];
1732 sgl
[i
].length
= count
< PAGE_SIZE
? count
: PAGE_SIZE
;
1737 sgl
[0].length
= count
;
1745 for (j
=0; j
< res
; j
++)
1746 page_cache_release(pages
[j
]);
1754 /* And unmap them... */
1756 st_unmap_user_pages(struct scatterlist
*sgl
, const unsigned int nr_pages
,
1761 for (i
=0; i
< nr_pages
; i
++) {
1762 struct page
*page
= sgl
[i
].page
;
1766 /* unlock_page(page); */
1767 /* FIXME: cache flush missing for rw==READ
1768 * FIXME: call the correct reference counting function
1770 page_cache_release(page
);
1776 /* ^^^^^^^^ above code borrowed from st driver's direct IO ^^^^^^^^^ */
1780 /* Returns: -ve -> error, 0 -> done, 1 -> try indirect */
1782 sg_build_direct(Sg_request
* srp
, Sg_fd
* sfp
, int dxfer_len
)
1784 #ifdef SG_ALLOW_DIO_CODE
1785 sg_io_hdr_t
*hp
= &srp
->header
;
1786 Sg_scatter_hold
*schp
= &srp
->data
;
1787 int sg_tablesize
= sfp
->parentdp
->sg_tablesize
;
1788 int mx_sc_elems
, res
;
1789 struct scsi_device
*sdev
= sfp
->parentdp
->device
;
1791 if (((unsigned long)hp
->dxferp
&
1792 queue_dma_alignment(sdev
->request_queue
)) != 0)
1795 mx_sc_elems
= sg_build_sgat(schp
, sfp
, sg_tablesize
);
1796 if (mx_sc_elems
<= 0) {
1799 res
= st_map_user_pages(schp
->buffer
, mx_sc_elems
,
1800 (unsigned long)hp
->dxferp
, dxfer_len
,
1801 (SG_DXFER_TO_DEV
== hp
->dxfer_direction
) ? 1 : 0);
1803 sg_remove_scat(schp
);
1806 schp
->k_use_sg
= res
;
1807 schp
->dio_in_use
= 1;
1808 hp
->info
|= SG_INFO_DIRECT_IO
;
1816 sg_build_indirect(Sg_scatter_hold
* schp
, Sg_fd
* sfp
, int buff_size
)
1818 struct scatterlist
*sg
;
1819 int ret_sz
= 0, k
, rem_sz
, num
, mx_sc_elems
;
1820 int sg_tablesize
= sfp
->parentdp
->sg_tablesize
;
1821 int blk_size
= buff_size
;
1822 struct page
*p
= NULL
;
1824 if ((blk_size
< 0) || (!sfp
))
1827 ++blk_size
; /* don't know why */
1828 /* round request up to next highest SG_SECTOR_SZ byte boundary */
1829 blk_size
= (blk_size
+ SG_SECTOR_MSK
) & (~SG_SECTOR_MSK
);
1830 SCSI_LOG_TIMEOUT(4, printk("sg_build_indirect: buff_size=%d, blk_size=%d\n",
1831 buff_size
, blk_size
));
1833 /* N.B. ret_sz carried into this block ... */
1834 mx_sc_elems
= sg_build_sgat(schp
, sfp
, sg_tablesize
);
1835 if (mx_sc_elems
< 0)
1836 return mx_sc_elems
; /* most likely -ENOMEM */
1838 for (k
= 0, sg
= schp
->buffer
, rem_sz
= blk_size
;
1839 (rem_sz
> 0) && (k
< mx_sc_elems
);
1840 ++k
, rem_sz
-= ret_sz
, ++sg
) {
1842 num
= (rem_sz
> SG_SCATTER_SZ
) ? SG_SCATTER_SZ
: rem_sz
;
1843 p
= sg_page_malloc(num
, sfp
->low_dma
, &ret_sz
);
1848 sg
->length
= ret_sz
;
1850 SCSI_LOG_TIMEOUT(5, printk("sg_build_build: k=%d, a=0x%p, len=%d\n",
1852 } /* end of for loop */
1855 SCSI_LOG_TIMEOUT(5, printk("sg_build_indirect: k_use_sg=%d, rem_sz=%d\n", k
, rem_sz
));
1857 schp
->bufflen
= blk_size
;
1858 if (rem_sz
> 0) /* must have failed */
1865 sg_write_xfer(Sg_request
* srp
)
1867 sg_io_hdr_t
*hp
= &srp
->header
;
1868 Sg_scatter_hold
*schp
= &srp
->data
;
1869 struct scatterlist
*sg
= schp
->buffer
;
1871 int j
, k
, onum
, usglen
, ksglen
, res
;
1872 int iovec_count
= (int) hp
->iovec_count
;
1873 int dxfer_dir
= hp
->dxfer_direction
;
1875 unsigned char __user
*up
;
1876 int new_interface
= ('\0' == hp
->interface_id
) ? 0 : 1;
1878 if ((SG_DXFER_UNKNOWN
== dxfer_dir
) || (SG_DXFER_TO_DEV
== dxfer_dir
) ||
1879 (SG_DXFER_TO_FROM_DEV
== dxfer_dir
)) {
1880 num_xfer
= (int) (new_interface
? hp
->dxfer_len
: hp
->flags
);
1881 if (schp
->bufflen
< num_xfer
)
1882 num_xfer
= schp
->bufflen
;
1884 if ((num_xfer
<= 0) || (schp
->dio_in_use
) ||
1886 && ((SG_FLAG_NO_DXFER
| SG_FLAG_MMAP_IO
) & hp
->flags
)))
1889 SCSI_LOG_TIMEOUT(4, printk("sg_write_xfer: num_xfer=%d, iovec_count=%d, k_use_sg=%d\n",
1890 num_xfer
, iovec_count
, schp
->k_use_sg
));
1893 if (!access_ok(VERIFY_READ
, hp
->dxferp
, SZ_SG_IOVEC
* onum
))
1898 ksglen
= sg
->length
;
1899 p
= page_address(sg
->page
);
1900 for (j
= 0, k
= 0; j
< onum
; ++j
) {
1901 res
= sg_u_iovec(hp
, iovec_count
, j
, 1, &usglen
, &up
);
1905 for (; p
; ++sg
, ksglen
= sg
->length
,
1906 p
= page_address(sg
->page
)) {
1909 if (ksglen
> usglen
) {
1910 if (usglen
>= num_xfer
) {
1911 if (__copy_from_user(p
, up
, num_xfer
))
1915 if (__copy_from_user(p
, up
, usglen
))
1921 if (ksglen
>= num_xfer
) {
1922 if (__copy_from_user(p
, up
, num_xfer
))
1926 if (__copy_from_user(p
, up
, ksglen
))
1932 if (k
>= schp
->k_use_sg
)
1941 sg_u_iovec(sg_io_hdr_t
* hp
, int sg_num
, int ind
,
1942 int wr_xf
, int *countp
, unsigned char __user
**up
)
1944 int num_xfer
= (int) hp
->dxfer_len
;
1945 unsigned char __user
*p
= hp
->dxferp
;
1949 if (wr_xf
&& ('\0' == hp
->interface_id
))
1950 count
= (int) hp
->flags
; /* holds "old" input_size */
1955 if (__copy_from_user(&iovec
, p
+ ind
*SZ_SG_IOVEC
, SZ_SG_IOVEC
))
1958 count
= (int) iovec
.iov_len
;
1960 if (!access_ok(wr_xf
? VERIFY_READ
: VERIFY_WRITE
, p
, count
))
1970 sg_remove_scat(Sg_scatter_hold
* schp
)
1972 SCSI_LOG_TIMEOUT(4, printk("sg_remove_scat: k_use_sg=%d\n", schp
->k_use_sg
));
1973 if (schp
->buffer
&& (schp
->sglist_len
> 0)) {
1974 struct scatterlist
*sg
= schp
->buffer
;
1976 if (schp
->dio_in_use
) {
1977 #ifdef SG_ALLOW_DIO_CODE
1978 st_unmap_user_pages(sg
, schp
->k_use_sg
, TRUE
);
1983 for (k
= 0; (k
< schp
->k_use_sg
) && sg
->page
;
1985 SCSI_LOG_TIMEOUT(5, printk(
1986 "sg_remove_scat: k=%d, a=0x%p, len=%d\n",
1987 k
, sg
->page
, sg
->length
));
1988 sg_page_free(sg
->page
, sg
->length
);
1991 kfree(schp
->buffer
);
1993 memset(schp
, 0, sizeof (*schp
));
1997 sg_read_xfer(Sg_request
* srp
)
1999 sg_io_hdr_t
*hp
= &srp
->header
;
2000 Sg_scatter_hold
*schp
= &srp
->data
;
2001 struct scatterlist
*sg
= schp
->buffer
;
2003 int j
, k
, onum
, usglen
, ksglen
, res
;
2004 int iovec_count
= (int) hp
->iovec_count
;
2005 int dxfer_dir
= hp
->dxfer_direction
;
2007 unsigned char __user
*up
;
2008 int new_interface
= ('\0' == hp
->interface_id
) ? 0 : 1;
2010 if ((SG_DXFER_UNKNOWN
== dxfer_dir
) || (SG_DXFER_FROM_DEV
== dxfer_dir
)
2011 || (SG_DXFER_TO_FROM_DEV
== dxfer_dir
)) {
2012 num_xfer
= hp
->dxfer_len
;
2013 if (schp
->bufflen
< num_xfer
)
2014 num_xfer
= schp
->bufflen
;
2016 if ((num_xfer
<= 0) || (schp
->dio_in_use
) ||
2018 && ((SG_FLAG_NO_DXFER
| SG_FLAG_MMAP_IO
) & hp
->flags
)))
2021 SCSI_LOG_TIMEOUT(4, printk("sg_read_xfer: num_xfer=%d, iovec_count=%d, k_use_sg=%d\n",
2022 num_xfer
, iovec_count
, schp
->k_use_sg
));
2025 if (!access_ok(VERIFY_READ
, hp
->dxferp
, SZ_SG_IOVEC
* onum
))
2030 p
= page_address(sg
->page
);
2031 ksglen
= sg
->length
;
2032 for (j
= 0, k
= 0; j
< onum
; ++j
) {
2033 res
= sg_u_iovec(hp
, iovec_count
, j
, 0, &usglen
, &up
);
2037 for (; p
; ++sg
, ksglen
= sg
->length
,
2038 p
= page_address(sg
->page
)) {
2041 if (ksglen
> usglen
) {
2042 if (usglen
>= num_xfer
) {
2043 if (__copy_to_user(up
, p
, num_xfer
))
2047 if (__copy_to_user(up
, p
, usglen
))
2053 if (ksglen
>= num_xfer
) {
2054 if (__copy_to_user(up
, p
, num_xfer
))
2058 if (__copy_to_user(up
, p
, ksglen
))
2064 if (k
>= schp
->k_use_sg
)
2073 sg_read_oxfer(Sg_request
* srp
, char __user
*outp
, int num_read_xfer
)
2075 Sg_scatter_hold
*schp
= &srp
->data
;
2076 struct scatterlist
*sg
= schp
->buffer
;
2079 SCSI_LOG_TIMEOUT(4, printk("sg_read_oxfer: num_read_xfer=%d\n",
2081 if ((!outp
) || (num_read_xfer
<= 0))
2084 for (k
= 0; (k
< schp
->k_use_sg
) && sg
->page
; ++k
, ++sg
) {
2086 if (num
> num_read_xfer
) {
2087 if (__copy_to_user(outp
, page_address(sg
->page
),
2092 if (__copy_to_user(outp
, page_address(sg
->page
),
2095 num_read_xfer
-= num
;
2096 if (num_read_xfer
<= 0)
2106 sg_build_reserve(Sg_fd
* sfp
, int req_size
)
2108 Sg_scatter_hold
*schp
= &sfp
->reserve
;
2110 SCSI_LOG_TIMEOUT(4, printk("sg_build_reserve: req_size=%d\n", req_size
));
2112 if (req_size
< PAGE_SIZE
)
2113 req_size
= PAGE_SIZE
;
2114 if (0 == sg_build_indirect(schp
, sfp
, req_size
))
2117 sg_remove_scat(schp
);
2118 req_size
>>= 1; /* divide by 2 */
2119 } while (req_size
> (PAGE_SIZE
/ 2));
2123 sg_link_reserve(Sg_fd
* sfp
, Sg_request
* srp
, int size
)
2125 Sg_scatter_hold
*req_schp
= &srp
->data
;
2126 Sg_scatter_hold
*rsv_schp
= &sfp
->reserve
;
2127 struct scatterlist
*sg
= rsv_schp
->buffer
;
2131 SCSI_LOG_TIMEOUT(4, printk("sg_link_reserve: size=%d\n", size
));
2134 for (k
= 0; k
< rsv_schp
->k_use_sg
; ++k
, ++sg
) {
2137 sfp
->save_scat_len
= num
;
2139 req_schp
->k_use_sg
= k
+ 1;
2140 req_schp
->sglist_len
= rsv_schp
->sglist_len
;
2141 req_schp
->buffer
= rsv_schp
->buffer
;
2143 req_schp
->bufflen
= size
;
2144 req_schp
->b_malloc_len
= rsv_schp
->b_malloc_len
;
2150 if (k
>= rsv_schp
->k_use_sg
)
2151 SCSI_LOG_TIMEOUT(1, printk("sg_link_reserve: BAD size\n"));
2155 sg_unlink_reserve(Sg_fd
* sfp
, Sg_request
* srp
)
2157 Sg_scatter_hold
*req_schp
= &srp
->data
;
2158 Sg_scatter_hold
*rsv_schp
= &sfp
->reserve
;
2160 SCSI_LOG_TIMEOUT(4, printk("sg_unlink_reserve: req->k_use_sg=%d\n",
2161 (int) req_schp
->k_use_sg
));
2162 if ((rsv_schp
->k_use_sg
> 0) && (req_schp
->k_use_sg
> 0)) {
2163 struct scatterlist
*sg
= rsv_schp
->buffer
;
2165 if (sfp
->save_scat_len
> 0)
2166 (sg
+ (req_schp
->k_use_sg
- 1))->length
=
2167 (unsigned) sfp
->save_scat_len
;
2169 SCSI_LOG_TIMEOUT(1, printk ("sg_unlink_reserve: BAD save_scat_len\n"));
2171 req_schp
->k_use_sg
= 0;
2172 req_schp
->bufflen
= 0;
2173 req_schp
->buffer
= NULL
;
2174 req_schp
->sglist_len
= 0;
2175 sfp
->save_scat_len
= 0;
2180 sg_get_rq_mark(Sg_fd
* sfp
, int pack_id
)
2183 unsigned long iflags
;
2185 write_lock_irqsave(&sfp
->rq_list_lock
, iflags
);
2186 for (resp
= sfp
->headrp
; resp
; resp
= resp
->nextrp
) {
2187 /* look for requests that are ready + not SG_IO owned */
2188 if ((1 == resp
->done
) && (!resp
->sg_io_owned
) &&
2189 ((-1 == pack_id
) || (resp
->header
.pack_id
== pack_id
))) {
2190 resp
->done
= 2; /* guard against other readers */
2194 write_unlock_irqrestore(&sfp
->rq_list_lock
, iflags
);
2198 #ifdef CONFIG_SCSI_PROC_FS
2200 sg_get_nth_request(Sg_fd
* sfp
, int nth
)
2203 unsigned long iflags
;
2206 read_lock_irqsave(&sfp
->rq_list_lock
, iflags
);
2207 for (k
= 0, resp
= sfp
->headrp
; resp
&& (k
< nth
);
2208 ++k
, resp
= resp
->nextrp
) ;
2209 read_unlock_irqrestore(&sfp
->rq_list_lock
, iflags
);
2214 /* always adds to end of list */
2216 sg_add_request(Sg_fd
* sfp
)
2219 unsigned long iflags
;
2221 Sg_request
*rp
= sfp
->req_arr
;
2223 write_lock_irqsave(&sfp
->rq_list_lock
, iflags
);
2226 memset(rp
, 0, sizeof (Sg_request
));
2231 if (0 == sfp
->cmd_q
)
2232 resp
= NULL
; /* command queuing disallowed */
2234 for (k
= 0; k
< SG_MAX_QUEUE
; ++k
, ++rp
) {
2238 if (k
< SG_MAX_QUEUE
) {
2239 memset(rp
, 0, sizeof (Sg_request
));
2241 while (resp
->nextrp
)
2242 resp
= resp
->nextrp
;
2250 resp
->nextrp
= NULL
;
2251 resp
->header
.duration
= jiffies_to_msecs(jiffies
);
2253 write_unlock_irqrestore(&sfp
->rq_list_lock
, iflags
);
2257 /* Return of 1 for found; 0 for not found */
2259 sg_remove_request(Sg_fd
* sfp
, Sg_request
* srp
)
2261 Sg_request
*prev_rp
;
2263 unsigned long iflags
;
2266 if ((!sfp
) || (!srp
) || (!sfp
->headrp
))
2268 write_lock_irqsave(&sfp
->rq_list_lock
, iflags
);
2269 prev_rp
= sfp
->headrp
;
2270 if (srp
== prev_rp
) {
2271 sfp
->headrp
= prev_rp
->nextrp
;
2272 prev_rp
->parentfp
= NULL
;
2275 while ((rp
= prev_rp
->nextrp
)) {
2277 prev_rp
->nextrp
= rp
->nextrp
;
2278 rp
->parentfp
= NULL
;
2285 write_unlock_irqrestore(&sfp
->rq_list_lock
, iflags
);
2289 #ifdef CONFIG_SCSI_PROC_FS
2291 sg_get_nth_sfp(Sg_device
* sdp
, int nth
)
2294 unsigned long iflags
;
2297 read_lock_irqsave(&sg_dev_arr_lock
, iflags
);
2298 for (k
= 0, resp
= sdp
->headfp
; resp
&& (k
< nth
);
2299 ++k
, resp
= resp
->nextfp
) ;
2300 read_unlock_irqrestore(&sg_dev_arr_lock
, iflags
);
2306 sg_add_sfp(Sg_device
* sdp
, int dev
)
2309 unsigned long iflags
;
2311 sfp
= kzalloc(sizeof(*sfp
), GFP_ATOMIC
| __GFP_NOWARN
);
2315 init_waitqueue_head(&sfp
->read_wait
);
2316 rwlock_init(&sfp
->rq_list_lock
);
2318 sfp
->timeout
= SG_DEFAULT_TIMEOUT
;
2319 sfp
->timeout_user
= SG_DEFAULT_TIMEOUT_USER
;
2320 sfp
->force_packid
= SG_DEF_FORCE_PACK_ID
;
2321 sfp
->low_dma
= (SG_DEF_FORCE_LOW_DMA
== 0) ?
2322 sdp
->device
->host
->unchecked_isa_dma
: 1;
2323 sfp
->cmd_q
= SG_DEF_COMMAND_Q
;
2324 sfp
->keep_orphan
= SG_DEF_KEEP_ORPHAN
;
2325 sfp
->parentdp
= sdp
;
2326 write_lock_irqsave(&sg_dev_arr_lock
, iflags
);
2329 else { /* add to tail of existing list */
2330 Sg_fd
*pfp
= sdp
->headfp
;
2335 write_unlock_irqrestore(&sg_dev_arr_lock
, iflags
);
2336 SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: sfp=0x%p\n", sfp
));
2337 sg_build_reserve(sfp
, sg_big_buff
);
2338 SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: bufflen=%d, k_use_sg=%d\n",
2339 sfp
->reserve
.bufflen
, sfp
->reserve
.k_use_sg
));
2344 __sg_remove_sfp(Sg_device
* sdp
, Sg_fd
* sfp
)
2349 prev_fp
= sdp
->headfp
;
2351 sdp
->headfp
= prev_fp
->nextfp
;
2353 while ((fp
= prev_fp
->nextfp
)) {
2355 prev_fp
->nextfp
= fp
->nextfp
;
2361 if (sfp
->reserve
.bufflen
> 0) {
2363 printk("__sg_remove_sfp: bufflen=%d, k_use_sg=%d\n",
2364 (int) sfp
->reserve
.bufflen
, (int) sfp
->reserve
.k_use_sg
));
2365 sg_remove_scat(&sfp
->reserve
);
2367 sfp
->parentdp
= NULL
;
2368 SCSI_LOG_TIMEOUT(6, printk("__sg_remove_sfp: sfp=0x%p\n", sfp
));
2372 /* Returns 0 in normal case, 1 when detached and sdp object removed */
2374 sg_remove_sfp(Sg_device
* sdp
, Sg_fd
* sfp
)
2381 for (srp
= sfp
->headrp
; srp
; srp
= tsrp
) {
2383 if (sg_srp_done(srp
, sfp
))
2384 sg_finish_rem_req(srp
);
2389 unsigned long iflags
;
2391 write_lock_irqsave(&sg_dev_arr_lock
, iflags
);
2392 __sg_remove_sfp(sdp
, sfp
);
2393 if (sdp
->detached
&& (NULL
== sdp
->headfp
)) {
2397 for (k
= 0; k
< maxd
; ++k
) {
2398 if (sdp
== sg_dev_arr
[k
])
2402 sg_dev_arr
[k
] = NULL
;
2403 kfree((char *) sdp
);
2406 write_unlock_irqrestore(&sg_dev_arr_lock
, iflags
);
2408 /* MOD_INC's to inhibit unloading sg and associated adapter driver */
2409 /* only bump the access_count if we actually succeeded in
2410 * throwing another counter on the host module */
2411 scsi_device_get(sdp
->device
); /* XXX: retval ignored? */
2412 sfp
->closed
= 1; /* flag dirty state on this fd */
2413 SCSI_LOG_TIMEOUT(1, printk("sg_remove_sfp: worrisome, %d writes pending\n",
2420 sg_res_in_use(Sg_fd
* sfp
)
2422 const Sg_request
*srp
;
2423 unsigned long iflags
;
2425 read_lock_irqsave(&sfp
->rq_list_lock
, iflags
);
2426 for (srp
= sfp
->headrp
; srp
; srp
= srp
->nextrp
)
2429 read_unlock_irqrestore(&sfp
->rq_list_lock
, iflags
);
2433 /* If retSzp==NULL want exact size or fail */
2434 static struct page
*
2435 sg_page_malloc(int rqSz
, int lowDma
, int *retSzp
)
2437 struct page
*resp
= NULL
;
2446 page_mask
= GFP_ATOMIC
| GFP_DMA
| __GFP_COMP
| __GFP_NOWARN
;
2448 page_mask
= GFP_ATOMIC
| __GFP_COMP
| __GFP_NOWARN
;
2450 for (order
= 0, a_size
= PAGE_SIZE
; a_size
< rqSz
;
2451 order
++, a_size
<<= 1) ;
2452 resp
= alloc_pages(page_mask
, order
);
2453 while ((!resp
) && order
&& retSzp
) {
2455 a_size
>>= 1; /* divide by 2, until PAGE_SIZE */
2456 resp
= alloc_pages(page_mask
, order
); /* try half */
2460 if (!capable(CAP_SYS_ADMIN
) || !capable(CAP_SYS_RAWIO
))
2461 memset(page_address(resp
), 0, resSz
);
2469 sg_page_free(struct page
*page
, int size
)
2475 for (order
= 0, a_size
= PAGE_SIZE
; a_size
< size
;
2476 order
++, a_size
<<= 1) ;
2477 __free_pages(page
, order
);
2480 #ifndef MAINTENANCE_IN_CMD
2481 #define MAINTENANCE_IN_CMD 0xa3
2484 static unsigned char allow_ops
[] = { TEST_UNIT_READY
, REQUEST_SENSE
,
2485 INQUIRY
, READ_CAPACITY
, READ_BUFFER
, READ_6
, READ_10
, READ_12
,
2486 READ_16
, MODE_SENSE
, MODE_SENSE_10
, LOG_SENSE
, REPORT_LUNS
,
2487 SERVICE_ACTION_IN
, RECEIVE_DIAGNOSTIC
, READ_LONG
, MAINTENANCE_IN_CMD
2491 sg_allow_access(unsigned char opcode
, char dev_type
)
2495 if (TYPE_SCANNER
== dev_type
) /* TYPE_ROM maybe burner */
2497 for (k
= 0; k
< sizeof (allow_ops
); ++k
) {
2498 if (opcode
== allow_ops
[k
])
2504 #ifdef CONFIG_SCSI_PROC_FS
2509 unsigned long iflags
;
2511 read_lock_irqsave(&sg_dev_arr_lock
, iflags
);
2512 for (k
= sg_dev_max
- 1; k
>= 0; --k
)
2513 if (sg_dev_arr
[k
] && sg_dev_arr
[k
]->device
)
2515 read_unlock_irqrestore(&sg_dev_arr_lock
, iflags
);
2516 return k
+ 1; /* origin 1 */
2523 Sg_device
*sdp
= NULL
;
2524 unsigned long iflags
;
2526 if (sg_dev_arr
&& (dev
>= 0)) {
2527 read_lock_irqsave(&sg_dev_arr_lock
, iflags
);
2528 if (dev
< sg_dev_max
)
2529 sdp
= sg_dev_arr
[dev
];
2530 read_unlock_irqrestore(&sg_dev_arr_lock
, iflags
);
2535 #ifdef CONFIG_SCSI_PROC_FS
2537 static struct proc_dir_entry
*sg_proc_sgp
= NULL
;
2539 static char sg_proc_sg_dirname
[] = "scsi/sg";
2541 static int sg_proc_seq_show_int(struct seq_file
*s
, void *v
);
2543 static int sg_proc_single_open_adio(struct inode
*inode
, struct file
*file
);
2544 static ssize_t
sg_proc_write_adio(struct file
*filp
, const char __user
*buffer
,
2545 size_t count
, loff_t
*off
);
2546 static struct file_operations adio_fops
= {
2547 /* .owner, .read and .llseek added in sg_proc_init() */
2548 .open
= sg_proc_single_open_adio
,
2549 .write
= sg_proc_write_adio
,
2550 .release
= single_release
,
2553 static int sg_proc_single_open_dressz(struct inode
*inode
, struct file
*file
);
2554 static ssize_t
sg_proc_write_dressz(struct file
*filp
,
2555 const char __user
*buffer
, size_t count
, loff_t
*off
);
2556 static struct file_operations dressz_fops
= {
2557 .open
= sg_proc_single_open_dressz
,
2558 .write
= sg_proc_write_dressz
,
2559 .release
= single_release
,
2562 static int sg_proc_seq_show_version(struct seq_file
*s
, void *v
);
2563 static int sg_proc_single_open_version(struct inode
*inode
, struct file
*file
);
2564 static struct file_operations version_fops
= {
2565 .open
= sg_proc_single_open_version
,
2566 .release
= single_release
,
2569 static int sg_proc_seq_show_devhdr(struct seq_file
*s
, void *v
);
2570 static int sg_proc_single_open_devhdr(struct inode
*inode
, struct file
*file
);
2571 static struct file_operations devhdr_fops
= {
2572 .open
= sg_proc_single_open_devhdr
,
2573 .release
= single_release
,
2576 static int sg_proc_seq_show_dev(struct seq_file
*s
, void *v
);
2577 static int sg_proc_open_dev(struct inode
*inode
, struct file
*file
);
2578 static void * dev_seq_start(struct seq_file
*s
, loff_t
*pos
);
2579 static void * dev_seq_next(struct seq_file
*s
, void *v
, loff_t
*pos
);
2580 static void dev_seq_stop(struct seq_file
*s
, void *v
);
2581 static struct file_operations dev_fops
= {
2582 .open
= sg_proc_open_dev
,
2583 .release
= seq_release
,
2585 static struct seq_operations dev_seq_ops
= {
2586 .start
= dev_seq_start
,
2587 .next
= dev_seq_next
,
2588 .stop
= dev_seq_stop
,
2589 .show
= sg_proc_seq_show_dev
,
2592 static int sg_proc_seq_show_devstrs(struct seq_file
*s
, void *v
);
2593 static int sg_proc_open_devstrs(struct inode
*inode
, struct file
*file
);
2594 static struct file_operations devstrs_fops
= {
2595 .open
= sg_proc_open_devstrs
,
2596 .release
= seq_release
,
2598 static struct seq_operations devstrs_seq_ops
= {
2599 .start
= dev_seq_start
,
2600 .next
= dev_seq_next
,
2601 .stop
= dev_seq_stop
,
2602 .show
= sg_proc_seq_show_devstrs
,
2605 static int sg_proc_seq_show_debug(struct seq_file
*s
, void *v
);
2606 static int sg_proc_open_debug(struct inode
*inode
, struct file
*file
);
2607 static struct file_operations debug_fops
= {
2608 .open
= sg_proc_open_debug
,
2609 .release
= seq_release
,
2611 static struct seq_operations debug_seq_ops
= {
2612 .start
= dev_seq_start
,
2613 .next
= dev_seq_next
,
2614 .stop
= dev_seq_stop
,
2615 .show
= sg_proc_seq_show_debug
,
2619 struct sg_proc_leaf
{
2621 struct file_operations
* fops
;
2624 static struct sg_proc_leaf sg_proc_leaf_arr
[] = {
2625 {"allow_dio", &adio_fops
},
2626 {"debug", &debug_fops
},
2627 {"def_reserved_size", &dressz_fops
},
2628 {"device_hdr", &devhdr_fops
},
2629 {"devices", &dev_fops
},
2630 {"device_strs", &devstrs_fops
},
2631 {"version", &version_fops
}
2639 sizeof (sg_proc_leaf_arr
) / sizeof (sg_proc_leaf_arr
[0]);
2640 struct proc_dir_entry
*pdep
;
2641 struct sg_proc_leaf
* leaf
;
2643 sg_proc_sgp
= proc_mkdir(sg_proc_sg_dirname
, NULL
);
2646 for (k
= 0; k
< num_leaves
; ++k
) {
2647 leaf
= &sg_proc_leaf_arr
[k
];
2648 mask
= leaf
->fops
->write
? S_IRUGO
| S_IWUSR
: S_IRUGO
;
2649 pdep
= create_proc_entry(leaf
->name
, mask
, sg_proc_sgp
);
2651 leaf
->fops
->owner
= THIS_MODULE
,
2652 leaf
->fops
->read
= seq_read
,
2653 leaf
->fops
->llseek
= seq_lseek
,
2654 pdep
->proc_fops
= leaf
->fops
;
2661 sg_proc_cleanup(void)
2665 sizeof (sg_proc_leaf_arr
) / sizeof (sg_proc_leaf_arr
[0]);
2669 for (k
= 0; k
< num_leaves
; ++k
)
2670 remove_proc_entry(sg_proc_leaf_arr
[k
].name
, sg_proc_sgp
);
2671 remove_proc_entry(sg_proc_sg_dirname
, NULL
);
2675 static int sg_proc_seq_show_int(struct seq_file
*s
, void *v
)
2677 seq_printf(s
, "%d\n", *((int *)s
->private));
2681 static int sg_proc_single_open_adio(struct inode
*inode
, struct file
*file
)
2683 return single_open(file
, sg_proc_seq_show_int
, &sg_allow_dio
);
2687 sg_proc_write_adio(struct file
*filp
, const char __user
*buffer
,
2688 size_t count
, loff_t
*off
)
2693 if (!capable(CAP_SYS_ADMIN
) || !capable(CAP_SYS_RAWIO
))
2695 num
= (count
< 10) ? count
: 10;
2696 if (copy_from_user(buff
, buffer
, num
))
2699 sg_allow_dio
= simple_strtoul(buff
, NULL
, 10) ? 1 : 0;
2703 static int sg_proc_single_open_dressz(struct inode
*inode
, struct file
*file
)
2705 return single_open(file
, sg_proc_seq_show_int
, &sg_big_buff
);
2709 sg_proc_write_dressz(struct file
*filp
, const char __user
*buffer
,
2710 size_t count
, loff_t
*off
)
2713 unsigned long k
= ULONG_MAX
;
2716 if (!capable(CAP_SYS_ADMIN
) || !capable(CAP_SYS_RAWIO
))
2718 num
= (count
< 10) ? count
: 10;
2719 if (copy_from_user(buff
, buffer
, num
))
2722 k
= simple_strtoul(buff
, NULL
, 10);
2723 if (k
<= 1048576) { /* limit "big buff" to 1 MB */
2730 static int sg_proc_seq_show_version(struct seq_file
*s
, void *v
)
2732 seq_printf(s
, "%d\t%s [%s]\n", sg_version_num
, SG_VERSION_STR
,
2737 static int sg_proc_single_open_version(struct inode
*inode
, struct file
*file
)
2739 return single_open(file
, sg_proc_seq_show_version
, NULL
);
2742 static int sg_proc_seq_show_devhdr(struct seq_file
*s
, void *v
)
2744 seq_printf(s
, "host\tchan\tid\tlun\ttype\topens\tqdepth\tbusy\t"
2749 static int sg_proc_single_open_devhdr(struct inode
*inode
, struct file
*file
)
2751 return single_open(file
, sg_proc_seq_show_devhdr
, NULL
);
2754 struct sg_proc_deviter
{
2759 static void * dev_seq_start(struct seq_file
*s
, loff_t
*pos
)
2761 struct sg_proc_deviter
* it
= kmalloc(sizeof(*it
), GFP_KERNEL
);
2767 if (NULL
== sg_dev_arr
)
2770 it
->max
= sg_last_dev();
2771 if (it
->index
>= it
->max
)
2776 static void * dev_seq_next(struct seq_file
*s
, void *v
, loff_t
*pos
)
2778 struct sg_proc_deviter
* it
= s
->private;
2781 return (it
->index
< it
->max
) ? it
: NULL
;
2784 static void dev_seq_stop(struct seq_file
*s
, void *v
)
2789 static int sg_proc_open_dev(struct inode
*inode
, struct file
*file
)
2791 return seq_open(file
, &dev_seq_ops
);
2794 static int sg_proc_seq_show_dev(struct seq_file
*s
, void *v
)
2796 struct sg_proc_deviter
* it
= (struct sg_proc_deviter
*) v
;
2798 struct scsi_device
*scsidp
;
2800 sdp
= it
? sg_get_dev(it
->index
) : NULL
;
2801 if (sdp
&& (scsidp
= sdp
->device
) && (!sdp
->detached
))
2802 seq_printf(s
, "%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\n",
2803 scsidp
->host
->host_no
, scsidp
->channel
,
2804 scsidp
->id
, scsidp
->lun
, (int) scsidp
->type
,
2806 (int) scsidp
->queue_depth
,
2807 (int) scsidp
->device_busy
,
2808 (int) scsi_device_online(scsidp
));
2810 seq_printf(s
, "-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\n");
2814 static int sg_proc_open_devstrs(struct inode
*inode
, struct file
*file
)
2816 return seq_open(file
, &devstrs_seq_ops
);
2819 static int sg_proc_seq_show_devstrs(struct seq_file
*s
, void *v
)
2821 struct sg_proc_deviter
* it
= (struct sg_proc_deviter
*) v
;
2823 struct scsi_device
*scsidp
;
2825 sdp
= it
? sg_get_dev(it
->index
) : NULL
;
2826 if (sdp
&& (scsidp
= sdp
->device
) && (!sdp
->detached
))
2827 seq_printf(s
, "%8.8s\t%16.16s\t%4.4s\n",
2828 scsidp
->vendor
, scsidp
->model
, scsidp
->rev
);
2830 seq_printf(s
, "<no active device>\n");
2834 static void sg_proc_debug_helper(struct seq_file
*s
, Sg_device
* sdp
)
2836 int k
, m
, new_interface
, blen
, usg
;
2839 const sg_io_hdr_t
*hp
;
2843 for (k
= 0; (fp
= sg_get_nth_sfp(sdp
, k
)); ++k
) {
2844 seq_printf(s
, " FD(%d): timeout=%dms bufflen=%d "
2845 "(res)sgat=%d low_dma=%d\n", k
+ 1,
2846 jiffies_to_msecs(fp
->timeout
),
2847 fp
->reserve
.bufflen
,
2848 (int) fp
->reserve
.k_use_sg
,
2850 seq_printf(s
, " cmd_q=%d f_packid=%d k_orphan=%d closed=%d\n",
2851 (int) fp
->cmd_q
, (int) fp
->force_packid
,
2852 (int) fp
->keep_orphan
, (int) fp
->closed
);
2853 for (m
= 0; (srp
= sg_get_nth_request(fp
, m
)); ++m
) {
2855 new_interface
= (hp
->interface_id
== '\0') ? 0 : 1;
2856 if (srp
->res_used
) {
2857 if (new_interface
&&
2858 (SG_FLAG_MMAP_IO
& hp
->flags
))
2863 if (SG_INFO_DIRECT_IO_MASK
& hp
->info
)
2869 blen
= srp
->data
.bufflen
;
2870 usg
= srp
->data
.k_use_sg
;
2871 seq_printf(s
, srp
->done
?
2872 ((1 == srp
->done
) ? "rcv:" : "fin:")
2874 seq_printf(s
, " id=%d blen=%d",
2875 srp
->header
.pack_id
, blen
);
2877 seq_printf(s
, " dur=%d", hp
->duration
);
2879 ms
= jiffies_to_msecs(jiffies
);
2880 seq_printf(s
, " t_o/elap=%d/%d",
2881 (new_interface
? hp
->timeout
:
2882 jiffies_to_msecs(fp
->timeout
)),
2883 (ms
> hp
->duration
? ms
- hp
->duration
: 0));
2885 seq_printf(s
, "ms sgat=%d op=0x%02x\n", usg
,
2886 (int) srp
->data
.cmd_opcode
);
2889 seq_printf(s
, " No requests active\n");
2893 static int sg_proc_open_debug(struct inode
*inode
, struct file
*file
)
2895 return seq_open(file
, &debug_seq_ops
);
2898 static int sg_proc_seq_show_debug(struct seq_file
*s
, void *v
)
2900 struct sg_proc_deviter
* it
= (struct sg_proc_deviter
*) v
;
2903 if (it
&& (0 == it
->index
)) {
2904 seq_printf(s
, "dev_max(currently)=%d max_active_device=%d "
2905 "(origin 1)\n", sg_dev_max
, (int)it
->max
);
2906 seq_printf(s
, " def_reserved_size=%d\n", sg_big_buff
);
2908 sdp
= it
? sg_get_dev(it
->index
) : NULL
;
2910 struct scsi_device
*scsidp
= sdp
->device
;
2912 if (NULL
== scsidp
) {
2913 seq_printf(s
, "device %d detached ??\n",
2918 if (sg_get_nth_sfp(sdp
, 0)) {
2919 seq_printf(s
, " >>> device=%s ",
2920 sdp
->disk
->disk_name
);
2922 seq_printf(s
, "detached pending close ");
2925 (s
, "scsi%d chan=%d id=%d lun=%d em=%d",
2926 scsidp
->host
->host_no
,
2927 scsidp
->channel
, scsidp
->id
,
2929 scsidp
->host
->hostt
->emulated
);
2930 seq_printf(s
, " sg_tablesize=%d excl=%d\n",
2931 sdp
->sg_tablesize
, sdp
->exclude
);
2933 sg_proc_debug_helper(s
, sdp
);
2938 #endif /* CONFIG_SCSI_PROC_FS */
2940 module_init(init_sg
);
2941 module_exit(exit_sg
);