dm: calculate queue limits during resume not load
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / scsi / sg.c
blob8201387b4daa29c374cc850107f8b3d6d21ae0dd
1 /*
2 * History:
3 * Started: Aug 9 by Lawrence Foard (entropy@world.std.com),
4 * to allow user process control of SCSI devices.
5 * Development Sponsored by Killy Corp. NY NY
7 * Original driver (sg.c):
8 * Copyright (C) 1992 Lawrence Foard
9 * Version 2 and 3 extensions to driver:
10 * Copyright (C) 1998 - 2005 Douglas Gilbert
12 * Modified 19-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Devfs support
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
17 * any later version.
21 static int sg_version_num = 30534; /* 2 digits for each component */
22 #define SG_VERSION_STR "3.5.34"
25 * D. P. Gilbert (dgilbert@interlog.com, dougg@triode.net.au), notes:
26 * - scsi logging is available via SCSI_LOG_TIMEOUT macros. First
27 * the kernel/module needs to be built with CONFIG_SCSI_LOGGING
28 * (otherwise the macros compile to empty statements).
31 #include <linux/module.h>
33 #include <linux/fs.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/string.h>
37 #include <linux/mm.h>
38 #include <linux/errno.h>
39 #include <linux/mtio.h>
40 #include <linux/ioctl.h>
41 #include <linux/fcntl.h>
42 #include <linux/init.h>
43 #include <linux/poll.h>
44 #include <linux/moduleparam.h>
45 #include <linux/cdev.h>
46 #include <linux/idr.h>
47 #include <linux/seq_file.h>
48 #include <linux/blkdev.h>
49 #include <linux/delay.h>
50 #include <linux/blktrace_api.h>
51 #include <linux/smp_lock.h>
53 #include "scsi.h"
54 #include <scsi/scsi_dbg.h>
55 #include <scsi/scsi_host.h>
56 #include <scsi/scsi_driver.h>
57 #include <scsi/scsi_ioctl.h>
58 #include <scsi/sg.h>
60 #include "scsi_logging.h"
62 #ifdef CONFIG_SCSI_PROC_FS
63 #include <linux/proc_fs.h>
64 static char *sg_version_date = "20061027";
66 static int sg_proc_init(void);
67 static void sg_proc_cleanup(void);
68 #endif
70 #define SG_ALLOW_DIO_DEF 0
72 #define SG_MAX_DEVS 32768
75 * Suppose you want to calculate the formula muldiv(x,m,d)=int(x * m / d)
76 * Then when using 32 bit integers x * m may overflow during the calculation.
77 * Replacing muldiv(x) by muldiv(x)=((x % d) * m) / d + int(x / d) * m
78 * calculates the same, but prevents the overflow when both m and d
79 * are "small" numbers (like HZ and USER_HZ).
80 * Of course an overflow is inavoidable if the result of muldiv doesn't fit
81 * in 32 bits.
83 #define MULDIV(X,MUL,DIV) ((((X % DIV) * MUL) / DIV) + ((X / DIV) * MUL))
85 #define SG_DEFAULT_TIMEOUT MULDIV(SG_DEFAULT_TIMEOUT_USER, HZ, USER_HZ)
87 int sg_big_buff = SG_DEF_RESERVED_SIZE;
88 /* N.B. This variable is readable and writeable via
89 /proc/scsi/sg/def_reserved_size . Each time sg_open() is called a buffer
90 of this size (or less if there is not enough memory) will be reserved
91 for use by this file descriptor. [Deprecated usage: this variable is also
92 readable via /proc/sys/kernel/sg-big-buff if the sg driver is built into
93 the kernel (i.e. it is not a module).] */
94 static int def_reserved_size = -1; /* picks up init parameter */
95 static int sg_allow_dio = SG_ALLOW_DIO_DEF;
97 static int scatter_elem_sz = SG_SCATTER_SZ;
98 static int scatter_elem_sz_prev = SG_SCATTER_SZ;
100 #define SG_SECTOR_SZ 512
102 static int sg_add(struct device *, struct class_interface *);
103 static void sg_remove(struct device *, struct class_interface *);
105 static DEFINE_IDR(sg_index_idr);
106 static DEFINE_RWLOCK(sg_index_lock); /* Also used to lock
107 file descriptor list for device */
109 static struct class_interface sg_interface = {
110 .add_dev = sg_add,
111 .remove_dev = sg_remove,
114 typedef struct sg_scatter_hold { /* holding area for scsi scatter gather info */
115 unsigned short k_use_sg; /* Count of kernel scatter-gather pieces */
116 unsigned sglist_len; /* size of malloc'd scatter-gather list ++ */
117 unsigned bufflen; /* Size of (aggregate) data buffer */
118 struct page **pages;
119 int page_order;
120 char dio_in_use; /* 0->indirect IO (or mmap), 1->dio */
121 unsigned char cmd_opcode; /* first byte of command */
122 } Sg_scatter_hold;
124 struct sg_device; /* forward declarations */
125 struct sg_fd;
127 typedef struct sg_request { /* SG_MAX_QUEUE requests outstanding per file */
128 struct sg_request *nextrp; /* NULL -> tail request (slist) */
129 struct sg_fd *parentfp; /* NULL -> not in use */
130 Sg_scatter_hold data; /* hold buffer, perhaps scatter list */
131 sg_io_hdr_t header; /* scsi command+info, see <scsi/sg.h> */
132 unsigned char sense_b[SCSI_SENSE_BUFFERSIZE];
133 char res_used; /* 1 -> using reserve buffer, 0 -> not ... */
134 char orphan; /* 1 -> drop on sight, 0 -> normal */
135 char sg_io_owned; /* 1 -> packet belongs to SG_IO */
136 volatile char done; /* 0->before bh, 1->before read, 2->read */
137 struct request *rq;
138 struct bio *bio;
139 struct execute_work ew;
140 } Sg_request;
142 typedef struct sg_fd { /* holds the state of a file descriptor */
143 struct list_head sfd_siblings;
144 struct sg_device *parentdp; /* owning device */
145 wait_queue_head_t read_wait; /* queue read until command done */
146 rwlock_t rq_list_lock; /* protect access to list in req_arr */
147 int timeout; /* defaults to SG_DEFAULT_TIMEOUT */
148 int timeout_user; /* defaults to SG_DEFAULT_TIMEOUT_USER */
149 Sg_scatter_hold reserve; /* buffer held for this file descriptor */
150 unsigned save_scat_len; /* original length of trunc. scat. element */
151 Sg_request *headrp; /* head of request slist, NULL->empty */
152 struct fasync_struct *async_qp; /* used by asynchronous notification */
153 Sg_request req_arr[SG_MAX_QUEUE]; /* used as singly-linked list */
154 char low_dma; /* as in parent but possibly overridden to 1 */
155 char force_packid; /* 1 -> pack_id input to read(), 0 -> ignored */
156 volatile char closed; /* 1 -> fd closed but request(s) outstanding */
157 char cmd_q; /* 1 -> allow command queuing, 0 -> don't */
158 char next_cmd_len; /* 0 -> automatic (def), >0 -> use on next write() */
159 char keep_orphan; /* 0 -> drop orphan (def), 1 -> keep for read() */
160 char mmap_called; /* 0 -> mmap() never called on this fd */
161 struct kref f_ref;
162 struct execute_work ew;
163 } Sg_fd;
165 typedef struct sg_device { /* holds the state of each scsi generic device */
166 struct scsi_device *device;
167 wait_queue_head_t o_excl_wait; /* queue open() when O_EXCL in use */
168 int sg_tablesize; /* adapter's max scatter-gather table size */
169 u32 index; /* device index number */
170 struct list_head sfds;
171 volatile char detached; /* 0->attached, 1->detached pending removal */
172 volatile char exclude; /* opened for exclusive access */
173 char sgdebug; /* 0->off, 1->sense, 9->dump dev, 10-> all devs */
174 struct gendisk *disk;
175 struct cdev * cdev; /* char_dev [sysfs: /sys/cdev/major/sg<n>] */
176 struct kref d_ref;
177 } Sg_device;
179 /* tasklet or soft irq callback */
180 static void sg_rq_end_io(struct request *rq, int uptodate);
181 static int sg_start_req(Sg_request *srp, unsigned char *cmd);
182 static int sg_finish_rem_req(Sg_request * srp);
183 static int sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size);
184 static ssize_t sg_new_read(Sg_fd * sfp, char __user *buf, size_t count,
185 Sg_request * srp);
186 static ssize_t sg_new_write(Sg_fd *sfp, struct file *file,
187 const char __user *buf, size_t count, int blocking,
188 int read_only, int sg_io_owned, Sg_request **o_srp);
189 static int sg_common_write(Sg_fd * sfp, Sg_request * srp,
190 unsigned char *cmnd, int timeout, int blocking);
191 static int sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer);
192 static void sg_remove_scat(Sg_scatter_hold * schp);
193 static void sg_build_reserve(Sg_fd * sfp, int req_size);
194 static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size);
195 static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp);
196 static Sg_fd *sg_add_sfp(Sg_device * sdp, int dev);
197 static void sg_remove_sfp(struct kref *);
198 static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id);
199 static Sg_request *sg_add_request(Sg_fd * sfp);
200 static int sg_remove_request(Sg_fd * sfp, Sg_request * srp);
201 static int sg_res_in_use(Sg_fd * sfp);
202 static Sg_device *sg_get_dev(int dev);
203 static void sg_put_dev(Sg_device *sdp);
205 #define SZ_SG_HEADER sizeof(struct sg_header)
206 #define SZ_SG_IO_HDR sizeof(sg_io_hdr_t)
207 #define SZ_SG_IOVEC sizeof(sg_iovec_t)
208 #define SZ_SG_REQ_INFO sizeof(sg_req_info_t)
210 static int sg_allow_access(struct file *filp, unsigned char *cmd)
212 struct sg_fd *sfp = (struct sg_fd *)filp->private_data;
213 struct request_queue *q = sfp->parentdp->device->request_queue;
215 if (sfp->parentdp->device->type == TYPE_SCANNER)
216 return 0;
218 return blk_verify_command(&q->cmd_filter,
219 cmd, filp->f_mode & FMODE_WRITE);
222 static int
223 sg_open(struct inode *inode, struct file *filp)
225 int dev = iminor(inode);
226 int flags = filp->f_flags;
227 struct request_queue *q;
228 Sg_device *sdp;
229 Sg_fd *sfp;
230 int res;
231 int retval;
233 lock_kernel();
234 nonseekable_open(inode, filp);
235 SCSI_LOG_TIMEOUT(3, printk("sg_open: dev=%d, flags=0x%x\n", dev, flags));
236 sdp = sg_get_dev(dev);
237 if (IS_ERR(sdp)) {
238 retval = PTR_ERR(sdp);
239 sdp = NULL;
240 goto sg_put;
243 /* This driver's module count bumped by fops_get in <linux/fs.h> */
244 /* Prevent the device driver from vanishing while we sleep */
245 retval = scsi_device_get(sdp->device);
246 if (retval)
247 goto sg_put;
249 if (!((flags & O_NONBLOCK) ||
250 scsi_block_when_processing_errors(sdp->device))) {
251 retval = -ENXIO;
252 /* we are in error recovery for this device */
253 goto error_out;
256 if (flags & O_EXCL) {
257 if (O_RDONLY == (flags & O_ACCMODE)) {
258 retval = -EPERM; /* Can't lock it with read only access */
259 goto error_out;
261 if (!list_empty(&sdp->sfds) && (flags & O_NONBLOCK)) {
262 retval = -EBUSY;
263 goto error_out;
265 res = 0;
266 __wait_event_interruptible(sdp->o_excl_wait,
267 ((!list_empty(&sdp->sfds) || sdp->exclude) ? 0 : (sdp->exclude = 1)), res);
268 if (res) {
269 retval = res; /* -ERESTARTSYS because signal hit process */
270 goto error_out;
272 } else if (sdp->exclude) { /* some other fd has an exclusive lock on dev */
273 if (flags & O_NONBLOCK) {
274 retval = -EBUSY;
275 goto error_out;
277 res = 0;
278 __wait_event_interruptible(sdp->o_excl_wait, (!sdp->exclude),
279 res);
280 if (res) {
281 retval = res; /* -ERESTARTSYS because signal hit process */
282 goto error_out;
285 if (sdp->detached) {
286 retval = -ENODEV;
287 goto error_out;
289 if (list_empty(&sdp->sfds)) { /* no existing opens on this device */
290 sdp->sgdebug = 0;
291 q = sdp->device->request_queue;
292 sdp->sg_tablesize = min(queue_max_hw_segments(q),
293 queue_max_phys_segments(q));
295 if ((sfp = sg_add_sfp(sdp, dev)))
296 filp->private_data = sfp;
297 else {
298 if (flags & O_EXCL) {
299 sdp->exclude = 0; /* undo if error */
300 wake_up_interruptible(&sdp->o_excl_wait);
302 retval = -ENOMEM;
303 goto error_out;
305 retval = 0;
306 error_out:
307 if (retval)
308 scsi_device_put(sdp->device);
309 sg_put:
310 if (sdp)
311 sg_put_dev(sdp);
312 unlock_kernel();
313 return retval;
316 /* Following function was formerly called 'sg_close' */
317 static int
318 sg_release(struct inode *inode, struct file *filp)
320 Sg_device *sdp;
321 Sg_fd *sfp;
323 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
324 return -ENXIO;
325 SCSI_LOG_TIMEOUT(3, printk("sg_release: %s\n", sdp->disk->disk_name));
327 sfp->closed = 1;
329 sdp->exclude = 0;
330 wake_up_interruptible(&sdp->o_excl_wait);
332 kref_put(&sfp->f_ref, sg_remove_sfp);
333 return 0;
336 static ssize_t
337 sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
339 Sg_device *sdp;
340 Sg_fd *sfp;
341 Sg_request *srp;
342 int req_pack_id = -1;
343 sg_io_hdr_t *hp;
344 struct sg_header *old_hdr = NULL;
345 int retval = 0;
347 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
348 return -ENXIO;
349 SCSI_LOG_TIMEOUT(3, printk("sg_read: %s, count=%d\n",
350 sdp->disk->disk_name, (int) count));
352 if (!access_ok(VERIFY_WRITE, buf, count))
353 return -EFAULT;
354 if (sfp->force_packid && (count >= SZ_SG_HEADER)) {
355 old_hdr = kmalloc(SZ_SG_HEADER, GFP_KERNEL);
356 if (!old_hdr)
357 return -ENOMEM;
358 if (__copy_from_user(old_hdr, buf, SZ_SG_HEADER)) {
359 retval = -EFAULT;
360 goto free_old_hdr;
362 if (old_hdr->reply_len < 0) {
363 if (count >= SZ_SG_IO_HDR) {
364 sg_io_hdr_t *new_hdr;
365 new_hdr = kmalloc(SZ_SG_IO_HDR, GFP_KERNEL);
366 if (!new_hdr) {
367 retval = -ENOMEM;
368 goto free_old_hdr;
370 retval =__copy_from_user
371 (new_hdr, buf, SZ_SG_IO_HDR);
372 req_pack_id = new_hdr->pack_id;
373 kfree(new_hdr);
374 if (retval) {
375 retval = -EFAULT;
376 goto free_old_hdr;
379 } else
380 req_pack_id = old_hdr->pack_id;
382 srp = sg_get_rq_mark(sfp, req_pack_id);
383 if (!srp) { /* now wait on packet to arrive */
384 if (sdp->detached) {
385 retval = -ENODEV;
386 goto free_old_hdr;
388 if (filp->f_flags & O_NONBLOCK) {
389 retval = -EAGAIN;
390 goto free_old_hdr;
392 while (1) {
393 retval = 0; /* following macro beats race condition */
394 __wait_event_interruptible(sfp->read_wait,
395 (sdp->detached ||
396 (srp = sg_get_rq_mark(sfp, req_pack_id))),
397 retval);
398 if (sdp->detached) {
399 retval = -ENODEV;
400 goto free_old_hdr;
402 if (0 == retval)
403 break;
405 /* -ERESTARTSYS as signal hit process */
406 goto free_old_hdr;
409 if (srp->header.interface_id != '\0') {
410 retval = sg_new_read(sfp, buf, count, srp);
411 goto free_old_hdr;
414 hp = &srp->header;
415 if (old_hdr == NULL) {
416 old_hdr = kmalloc(SZ_SG_HEADER, GFP_KERNEL);
417 if (! old_hdr) {
418 retval = -ENOMEM;
419 goto free_old_hdr;
422 memset(old_hdr, 0, SZ_SG_HEADER);
423 old_hdr->reply_len = (int) hp->timeout;
424 old_hdr->pack_len = old_hdr->reply_len; /* old, strange behaviour */
425 old_hdr->pack_id = hp->pack_id;
426 old_hdr->twelve_byte =
427 ((srp->data.cmd_opcode >= 0xc0) && (12 == hp->cmd_len)) ? 1 : 0;
428 old_hdr->target_status = hp->masked_status;
429 old_hdr->host_status = hp->host_status;
430 old_hdr->driver_status = hp->driver_status;
431 if ((CHECK_CONDITION & hp->masked_status) ||
432 (DRIVER_SENSE & hp->driver_status))
433 memcpy(old_hdr->sense_buffer, srp->sense_b,
434 sizeof (old_hdr->sense_buffer));
435 switch (hp->host_status) {
436 /* This setup of 'result' is for backward compatibility and is best
437 ignored by the user who should use target, host + driver status */
438 case DID_OK:
439 case DID_PASSTHROUGH:
440 case DID_SOFT_ERROR:
441 old_hdr->result = 0;
442 break;
443 case DID_NO_CONNECT:
444 case DID_BUS_BUSY:
445 case DID_TIME_OUT:
446 old_hdr->result = EBUSY;
447 break;
448 case DID_BAD_TARGET:
449 case DID_ABORT:
450 case DID_PARITY:
451 case DID_RESET:
452 case DID_BAD_INTR:
453 old_hdr->result = EIO;
454 break;
455 case DID_ERROR:
456 old_hdr->result = (srp->sense_b[0] == 0 &&
457 hp->masked_status == GOOD) ? 0 : EIO;
458 break;
459 default:
460 old_hdr->result = EIO;
461 break;
464 /* Now copy the result back to the user buffer. */
465 if (count >= SZ_SG_HEADER) {
466 if (__copy_to_user(buf, old_hdr, SZ_SG_HEADER)) {
467 retval = -EFAULT;
468 goto free_old_hdr;
470 buf += SZ_SG_HEADER;
471 if (count > old_hdr->reply_len)
472 count = old_hdr->reply_len;
473 if (count > SZ_SG_HEADER) {
474 if (sg_read_oxfer(srp, buf, count - SZ_SG_HEADER)) {
475 retval = -EFAULT;
476 goto free_old_hdr;
479 } else
480 count = (old_hdr->result == 0) ? 0 : -EIO;
481 sg_finish_rem_req(srp);
482 retval = count;
483 free_old_hdr:
484 kfree(old_hdr);
485 return retval;
488 static ssize_t
489 sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp)
491 sg_io_hdr_t *hp = &srp->header;
492 int err = 0;
493 int len;
495 if (count < SZ_SG_IO_HDR) {
496 err = -EINVAL;
497 goto err_out;
499 hp->sb_len_wr = 0;
500 if ((hp->mx_sb_len > 0) && hp->sbp) {
501 if ((CHECK_CONDITION & hp->masked_status) ||
502 (DRIVER_SENSE & hp->driver_status)) {
503 int sb_len = SCSI_SENSE_BUFFERSIZE;
504 sb_len = (hp->mx_sb_len > sb_len) ? sb_len : hp->mx_sb_len;
505 len = 8 + (int) srp->sense_b[7]; /* Additional sense length field */
506 len = (len > sb_len) ? sb_len : len;
507 if (copy_to_user(hp->sbp, srp->sense_b, len)) {
508 err = -EFAULT;
509 goto err_out;
511 hp->sb_len_wr = len;
514 if (hp->masked_status || hp->host_status || hp->driver_status)
515 hp->info |= SG_INFO_CHECK;
516 if (copy_to_user(buf, hp, SZ_SG_IO_HDR)) {
517 err = -EFAULT;
518 goto err_out;
520 err_out:
521 err = sg_finish_rem_req(srp);
522 return (0 == err) ? count : err;
525 static ssize_t
526 sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
528 int mxsize, cmd_size, k;
529 int input_size, blocking;
530 unsigned char opcode;
531 Sg_device *sdp;
532 Sg_fd *sfp;
533 Sg_request *srp;
534 struct sg_header old_hdr;
535 sg_io_hdr_t *hp;
536 unsigned char cmnd[MAX_COMMAND_SIZE];
538 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
539 return -ENXIO;
540 SCSI_LOG_TIMEOUT(3, printk("sg_write: %s, count=%d\n",
541 sdp->disk->disk_name, (int) count));
542 if (sdp->detached)
543 return -ENODEV;
544 if (!((filp->f_flags & O_NONBLOCK) ||
545 scsi_block_when_processing_errors(sdp->device)))
546 return -ENXIO;
548 if (!access_ok(VERIFY_READ, buf, count))
549 return -EFAULT; /* protects following copy_from_user()s + get_user()s */
550 if (count < SZ_SG_HEADER)
551 return -EIO;
552 if (__copy_from_user(&old_hdr, buf, SZ_SG_HEADER))
553 return -EFAULT;
554 blocking = !(filp->f_flags & O_NONBLOCK);
555 if (old_hdr.reply_len < 0)
556 return sg_new_write(sfp, filp, buf, count,
557 blocking, 0, 0, NULL);
558 if (count < (SZ_SG_HEADER + 6))
559 return -EIO; /* The minimum scsi command length is 6 bytes. */
561 if (!(srp = sg_add_request(sfp))) {
562 SCSI_LOG_TIMEOUT(1, printk("sg_write: queue full\n"));
563 return -EDOM;
565 buf += SZ_SG_HEADER;
566 __get_user(opcode, buf);
567 if (sfp->next_cmd_len > 0) {
568 if (sfp->next_cmd_len > MAX_COMMAND_SIZE) {
569 SCSI_LOG_TIMEOUT(1, printk("sg_write: command length too long\n"));
570 sfp->next_cmd_len = 0;
571 sg_remove_request(sfp, srp);
572 return -EIO;
574 cmd_size = sfp->next_cmd_len;
575 sfp->next_cmd_len = 0; /* reset so only this write() effected */
576 } else {
577 cmd_size = COMMAND_SIZE(opcode); /* based on SCSI command group */
578 if ((opcode >= 0xc0) && old_hdr.twelve_byte)
579 cmd_size = 12;
581 SCSI_LOG_TIMEOUT(4, printk(
582 "sg_write: scsi opcode=0x%02x, cmd_size=%d\n", (int) opcode, cmd_size));
583 /* Determine buffer size. */
584 input_size = count - cmd_size;
585 mxsize = (input_size > old_hdr.reply_len) ? input_size : old_hdr.reply_len;
586 mxsize -= SZ_SG_HEADER;
587 input_size -= SZ_SG_HEADER;
588 if (input_size < 0) {
589 sg_remove_request(sfp, srp);
590 return -EIO; /* User did not pass enough bytes for this command. */
592 hp = &srp->header;
593 hp->interface_id = '\0'; /* indicator of old interface tunnelled */
594 hp->cmd_len = (unsigned char) cmd_size;
595 hp->iovec_count = 0;
596 hp->mx_sb_len = 0;
597 if (input_size > 0)
598 hp->dxfer_direction = (old_hdr.reply_len > SZ_SG_HEADER) ?
599 SG_DXFER_TO_FROM_DEV : SG_DXFER_TO_DEV;
600 else
601 hp->dxfer_direction = (mxsize > 0) ? SG_DXFER_FROM_DEV : SG_DXFER_NONE;
602 hp->dxfer_len = mxsize;
603 if (hp->dxfer_direction == SG_DXFER_TO_DEV)
604 hp->dxferp = (char __user *)buf + cmd_size;
605 else
606 hp->dxferp = NULL;
607 hp->sbp = NULL;
608 hp->timeout = old_hdr.reply_len; /* structure abuse ... */
609 hp->flags = input_size; /* structure abuse ... */
610 hp->pack_id = old_hdr.pack_id;
611 hp->usr_ptr = NULL;
612 if (__copy_from_user(cmnd, buf, cmd_size))
613 return -EFAULT;
615 * SG_DXFER_TO_FROM_DEV is functionally equivalent to SG_DXFER_FROM_DEV,
616 * but is is possible that the app intended SG_DXFER_TO_DEV, because there
617 * is a non-zero input_size, so emit a warning.
619 if (hp->dxfer_direction == SG_DXFER_TO_FROM_DEV) {
620 static char cmd[TASK_COMM_LEN];
621 if (strcmp(current->comm, cmd) && printk_ratelimit()) {
622 printk(KERN_WARNING
623 "sg_write: data in/out %d/%d bytes for SCSI command 0x%x--"
624 "guessing data in;\n" KERN_WARNING " "
625 "program %s not setting count and/or reply_len properly\n",
626 old_hdr.reply_len - (int)SZ_SG_HEADER,
627 input_size, (unsigned int) cmnd[0],
628 current->comm);
629 strcpy(cmd, current->comm);
632 k = sg_common_write(sfp, srp, cmnd, sfp->timeout, blocking);
633 return (k < 0) ? k : count;
636 static ssize_t
637 sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf,
638 size_t count, int blocking, int read_only, int sg_io_owned,
639 Sg_request **o_srp)
641 int k;
642 Sg_request *srp;
643 sg_io_hdr_t *hp;
644 unsigned char cmnd[MAX_COMMAND_SIZE];
645 int timeout;
646 unsigned long ul_timeout;
648 if (count < SZ_SG_IO_HDR)
649 return -EINVAL;
650 if (!access_ok(VERIFY_READ, buf, count))
651 return -EFAULT; /* protects following copy_from_user()s + get_user()s */
653 sfp->cmd_q = 1; /* when sg_io_hdr seen, set command queuing on */
654 if (!(srp = sg_add_request(sfp))) {
655 SCSI_LOG_TIMEOUT(1, printk("sg_new_write: queue full\n"));
656 return -EDOM;
658 srp->sg_io_owned = sg_io_owned;
659 hp = &srp->header;
660 if (__copy_from_user(hp, buf, SZ_SG_IO_HDR)) {
661 sg_remove_request(sfp, srp);
662 return -EFAULT;
664 if (hp->interface_id != 'S') {
665 sg_remove_request(sfp, srp);
666 return -ENOSYS;
668 if (hp->flags & SG_FLAG_MMAP_IO) {
669 if (hp->dxfer_len > sfp->reserve.bufflen) {
670 sg_remove_request(sfp, srp);
671 return -ENOMEM; /* MMAP_IO size must fit in reserve buffer */
673 if (hp->flags & SG_FLAG_DIRECT_IO) {
674 sg_remove_request(sfp, srp);
675 return -EINVAL; /* either MMAP_IO or DIRECT_IO (not both) */
677 if (sg_res_in_use(sfp)) {
678 sg_remove_request(sfp, srp);
679 return -EBUSY; /* reserve buffer already being used */
682 ul_timeout = msecs_to_jiffies(srp->header.timeout);
683 timeout = (ul_timeout < INT_MAX) ? ul_timeout : INT_MAX;
684 if ((!hp->cmdp) || (hp->cmd_len < 6) || (hp->cmd_len > sizeof (cmnd))) {
685 sg_remove_request(sfp, srp);
686 return -EMSGSIZE;
688 if (!access_ok(VERIFY_READ, hp->cmdp, hp->cmd_len)) {
689 sg_remove_request(sfp, srp);
690 return -EFAULT; /* protects following copy_from_user()s + get_user()s */
692 if (__copy_from_user(cmnd, hp->cmdp, hp->cmd_len)) {
693 sg_remove_request(sfp, srp);
694 return -EFAULT;
696 if (read_only && sg_allow_access(file, cmnd)) {
697 sg_remove_request(sfp, srp);
698 return -EPERM;
700 k = sg_common_write(sfp, srp, cmnd, timeout, blocking);
701 if (k < 0)
702 return k;
703 if (o_srp)
704 *o_srp = srp;
705 return count;
708 static int
709 sg_common_write(Sg_fd * sfp, Sg_request * srp,
710 unsigned char *cmnd, int timeout, int blocking)
712 int k, data_dir;
713 Sg_device *sdp = sfp->parentdp;
714 sg_io_hdr_t *hp = &srp->header;
716 srp->data.cmd_opcode = cmnd[0]; /* hold opcode of command */
717 hp->status = 0;
718 hp->masked_status = 0;
719 hp->msg_status = 0;
720 hp->info = 0;
721 hp->host_status = 0;
722 hp->driver_status = 0;
723 hp->resid = 0;
724 SCSI_LOG_TIMEOUT(4, printk("sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n",
725 (int) cmnd[0], (int) hp->cmd_len));
727 k = sg_start_req(srp, cmnd);
728 if (k) {
729 SCSI_LOG_TIMEOUT(1, printk("sg_common_write: start_req err=%d\n", k));
730 sg_finish_rem_req(srp);
731 return k; /* probably out of space --> ENOMEM */
733 if (sdp->detached) {
734 sg_finish_rem_req(srp);
735 return -ENODEV;
738 switch (hp->dxfer_direction) {
739 case SG_DXFER_TO_FROM_DEV:
740 case SG_DXFER_FROM_DEV:
741 data_dir = DMA_FROM_DEVICE;
742 break;
743 case SG_DXFER_TO_DEV:
744 data_dir = DMA_TO_DEVICE;
745 break;
746 case SG_DXFER_UNKNOWN:
747 data_dir = DMA_BIDIRECTIONAL;
748 break;
749 default:
750 data_dir = DMA_NONE;
751 break;
753 hp->duration = jiffies_to_msecs(jiffies);
755 srp->rq->timeout = timeout;
756 kref_get(&sfp->f_ref); /* sg_rq_end_io() does kref_put(). */
757 blk_execute_rq_nowait(sdp->device->request_queue, sdp->disk,
758 srp->rq, 1, sg_rq_end_io);
759 return 0;
762 static int
763 sg_ioctl(struct inode *inode, struct file *filp,
764 unsigned int cmd_in, unsigned long arg)
766 void __user *p = (void __user *)arg;
767 int __user *ip = p;
768 int result, val, read_only;
769 Sg_device *sdp;
770 Sg_fd *sfp;
771 Sg_request *srp;
772 unsigned long iflags;
774 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
775 return -ENXIO;
777 SCSI_LOG_TIMEOUT(3, printk("sg_ioctl: %s, cmd=0x%x\n",
778 sdp->disk->disk_name, (int) cmd_in));
779 read_only = (O_RDWR != (filp->f_flags & O_ACCMODE));
781 switch (cmd_in) {
782 case SG_IO:
784 int blocking = 1; /* ignore O_NONBLOCK flag */
786 if (sdp->detached)
787 return -ENODEV;
788 if (!scsi_block_when_processing_errors(sdp->device))
789 return -ENXIO;
790 if (!access_ok(VERIFY_WRITE, p, SZ_SG_IO_HDR))
791 return -EFAULT;
792 result =
793 sg_new_write(sfp, filp, p, SZ_SG_IO_HDR,
794 blocking, read_only, 1, &srp);
795 if (result < 0)
796 return result;
797 while (1) {
798 result = 0; /* following macro to beat race condition */
799 __wait_event_interruptible(sfp->read_wait,
800 (srp->done || sdp->detached),
801 result);
802 if (sdp->detached)
803 return -ENODEV;
804 write_lock_irq(&sfp->rq_list_lock);
805 if (srp->done) {
806 srp->done = 2;
807 write_unlock_irq(&sfp->rq_list_lock);
808 break;
810 srp->orphan = 1;
811 write_unlock_irq(&sfp->rq_list_lock);
812 return result; /* -ERESTARTSYS because signal hit process */
814 result = sg_new_read(sfp, p, SZ_SG_IO_HDR, srp);
815 return (result < 0) ? result : 0;
817 case SG_SET_TIMEOUT:
818 result = get_user(val, ip);
819 if (result)
820 return result;
821 if (val < 0)
822 return -EIO;
823 if (val >= MULDIV (INT_MAX, USER_HZ, HZ))
824 val = MULDIV (INT_MAX, USER_HZ, HZ);
825 sfp->timeout_user = val;
826 sfp->timeout = MULDIV (val, HZ, USER_HZ);
828 return 0;
829 case SG_GET_TIMEOUT: /* N.B. User receives timeout as return value */
830 /* strange ..., for backward compatibility */
831 return sfp->timeout_user;
832 case SG_SET_FORCE_LOW_DMA:
833 result = get_user(val, ip);
834 if (result)
835 return result;
836 if (val) {
837 sfp->low_dma = 1;
838 if ((0 == sfp->low_dma) && (0 == sg_res_in_use(sfp))) {
839 val = (int) sfp->reserve.bufflen;
840 sg_remove_scat(&sfp->reserve);
841 sg_build_reserve(sfp, val);
843 } else {
844 if (sdp->detached)
845 return -ENODEV;
846 sfp->low_dma = sdp->device->host->unchecked_isa_dma;
848 return 0;
849 case SG_GET_LOW_DMA:
850 return put_user((int) sfp->low_dma, ip);
851 case SG_GET_SCSI_ID:
852 if (!access_ok(VERIFY_WRITE, p, sizeof (sg_scsi_id_t)))
853 return -EFAULT;
854 else {
855 sg_scsi_id_t __user *sg_idp = p;
857 if (sdp->detached)
858 return -ENODEV;
859 __put_user((int) sdp->device->host->host_no,
860 &sg_idp->host_no);
861 __put_user((int) sdp->device->channel,
862 &sg_idp->channel);
863 __put_user((int) sdp->device->id, &sg_idp->scsi_id);
864 __put_user((int) sdp->device->lun, &sg_idp->lun);
865 __put_user((int) sdp->device->type, &sg_idp->scsi_type);
866 __put_user((short) sdp->device->host->cmd_per_lun,
867 &sg_idp->h_cmd_per_lun);
868 __put_user((short) sdp->device->queue_depth,
869 &sg_idp->d_queue_depth);
870 __put_user(0, &sg_idp->unused[0]);
871 __put_user(0, &sg_idp->unused[1]);
872 return 0;
874 case SG_SET_FORCE_PACK_ID:
875 result = get_user(val, ip);
876 if (result)
877 return result;
878 sfp->force_packid = val ? 1 : 0;
879 return 0;
880 case SG_GET_PACK_ID:
881 if (!access_ok(VERIFY_WRITE, ip, sizeof (int)))
882 return -EFAULT;
883 read_lock_irqsave(&sfp->rq_list_lock, iflags);
884 for (srp = sfp->headrp; srp; srp = srp->nextrp) {
885 if ((1 == srp->done) && (!srp->sg_io_owned)) {
886 read_unlock_irqrestore(&sfp->rq_list_lock,
887 iflags);
888 __put_user(srp->header.pack_id, ip);
889 return 0;
892 read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
893 __put_user(-1, ip);
894 return 0;
895 case SG_GET_NUM_WAITING:
896 read_lock_irqsave(&sfp->rq_list_lock, iflags);
897 for (val = 0, srp = sfp->headrp; srp; srp = srp->nextrp) {
898 if ((1 == srp->done) && (!srp->sg_io_owned))
899 ++val;
901 read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
902 return put_user(val, ip);
903 case SG_GET_SG_TABLESIZE:
904 return put_user(sdp->sg_tablesize, ip);
905 case SG_SET_RESERVED_SIZE:
906 result = get_user(val, ip);
907 if (result)
908 return result;
909 if (val < 0)
910 return -EINVAL;
911 val = min_t(int, val,
912 queue_max_sectors(sdp->device->request_queue) * 512);
913 if (val != sfp->reserve.bufflen) {
914 if (sg_res_in_use(sfp) || sfp->mmap_called)
915 return -EBUSY;
916 sg_remove_scat(&sfp->reserve);
917 sg_build_reserve(sfp, val);
919 return 0;
920 case SG_GET_RESERVED_SIZE:
921 val = min_t(int, sfp->reserve.bufflen,
922 queue_max_sectors(sdp->device->request_queue) * 512);
923 return put_user(val, ip);
924 case SG_SET_COMMAND_Q:
925 result = get_user(val, ip);
926 if (result)
927 return result;
928 sfp->cmd_q = val ? 1 : 0;
929 return 0;
930 case SG_GET_COMMAND_Q:
931 return put_user((int) sfp->cmd_q, ip);
932 case SG_SET_KEEP_ORPHAN:
933 result = get_user(val, ip);
934 if (result)
935 return result;
936 sfp->keep_orphan = val;
937 return 0;
938 case SG_GET_KEEP_ORPHAN:
939 return put_user((int) sfp->keep_orphan, ip);
940 case SG_NEXT_CMD_LEN:
941 result = get_user(val, ip);
942 if (result)
943 return result;
944 sfp->next_cmd_len = (val > 0) ? val : 0;
945 return 0;
946 case SG_GET_VERSION_NUM:
947 return put_user(sg_version_num, ip);
948 case SG_GET_ACCESS_COUNT:
949 /* faked - we don't have a real access count anymore */
950 val = (sdp->device ? 1 : 0);
951 return put_user(val, ip);
952 case SG_GET_REQUEST_TABLE:
953 if (!access_ok(VERIFY_WRITE, p, SZ_SG_REQ_INFO * SG_MAX_QUEUE))
954 return -EFAULT;
955 else {
956 sg_req_info_t *rinfo;
957 unsigned int ms;
959 rinfo = kmalloc(SZ_SG_REQ_INFO * SG_MAX_QUEUE,
960 GFP_KERNEL);
961 if (!rinfo)
962 return -ENOMEM;
963 read_lock_irqsave(&sfp->rq_list_lock, iflags);
964 for (srp = sfp->headrp, val = 0; val < SG_MAX_QUEUE;
965 ++val, srp = srp ? srp->nextrp : srp) {
966 memset(&rinfo[val], 0, SZ_SG_REQ_INFO);
967 if (srp) {
968 rinfo[val].req_state = srp->done + 1;
969 rinfo[val].problem =
970 srp->header.masked_status &
971 srp->header.host_status &
972 srp->header.driver_status;
973 if (srp->done)
974 rinfo[val].duration =
975 srp->header.duration;
976 else {
977 ms = jiffies_to_msecs(jiffies);
978 rinfo[val].duration =
979 (ms > srp->header.duration) ?
980 (ms - srp->header.duration) : 0;
982 rinfo[val].orphan = srp->orphan;
983 rinfo[val].sg_io_owned =
984 srp->sg_io_owned;
985 rinfo[val].pack_id =
986 srp->header.pack_id;
987 rinfo[val].usr_ptr =
988 srp->header.usr_ptr;
991 read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
992 result = __copy_to_user(p, rinfo,
993 SZ_SG_REQ_INFO * SG_MAX_QUEUE);
994 result = result ? -EFAULT : 0;
995 kfree(rinfo);
996 return result;
998 case SG_EMULATED_HOST:
999 if (sdp->detached)
1000 return -ENODEV;
1001 return put_user(sdp->device->host->hostt->emulated, ip);
1002 case SG_SCSI_RESET:
1003 if (sdp->detached)
1004 return -ENODEV;
1005 if (filp->f_flags & O_NONBLOCK) {
1006 if (scsi_host_in_recovery(sdp->device->host))
1007 return -EBUSY;
1008 } else if (!scsi_block_when_processing_errors(sdp->device))
1009 return -EBUSY;
1010 result = get_user(val, ip);
1011 if (result)
1012 return result;
1013 if (SG_SCSI_RESET_NOTHING == val)
1014 return 0;
1015 switch (val) {
1016 case SG_SCSI_RESET_DEVICE:
1017 val = SCSI_TRY_RESET_DEVICE;
1018 break;
1019 case SG_SCSI_RESET_TARGET:
1020 val = SCSI_TRY_RESET_TARGET;
1021 break;
1022 case SG_SCSI_RESET_BUS:
1023 val = SCSI_TRY_RESET_BUS;
1024 break;
1025 case SG_SCSI_RESET_HOST:
1026 val = SCSI_TRY_RESET_HOST;
1027 break;
1028 default:
1029 return -EINVAL;
1031 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
1032 return -EACCES;
1033 return (scsi_reset_provider(sdp->device, val) ==
1034 SUCCESS) ? 0 : -EIO;
1035 case SCSI_IOCTL_SEND_COMMAND:
1036 if (sdp->detached)
1037 return -ENODEV;
1038 if (read_only) {
1039 unsigned char opcode = WRITE_6;
1040 Scsi_Ioctl_Command __user *siocp = p;
1042 if (copy_from_user(&opcode, siocp->data, 1))
1043 return -EFAULT;
1044 if (sg_allow_access(filp, &opcode))
1045 return -EPERM;
1047 return sg_scsi_ioctl(sdp->device->request_queue, NULL, filp->f_mode, p);
1048 case SG_SET_DEBUG:
1049 result = get_user(val, ip);
1050 if (result)
1051 return result;
1052 sdp->sgdebug = (char) val;
1053 return 0;
1054 case SCSI_IOCTL_GET_IDLUN:
1055 case SCSI_IOCTL_GET_BUS_NUMBER:
1056 case SCSI_IOCTL_PROBE_HOST:
1057 case SG_GET_TRANSFORM:
1058 if (sdp->detached)
1059 return -ENODEV;
1060 return scsi_ioctl(sdp->device, cmd_in, p);
1061 case BLKSECTGET:
1062 return put_user(queue_max_sectors(sdp->device->request_queue) * 512,
1063 ip);
1064 case BLKTRACESETUP:
1065 return blk_trace_setup(sdp->device->request_queue,
1066 sdp->disk->disk_name,
1067 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
1068 NULL,
1069 (char *)arg);
1070 case BLKTRACESTART:
1071 return blk_trace_startstop(sdp->device->request_queue, 1);
1072 case BLKTRACESTOP:
1073 return blk_trace_startstop(sdp->device->request_queue, 0);
1074 case BLKTRACETEARDOWN:
1075 return blk_trace_remove(sdp->device->request_queue);
1076 default:
1077 if (read_only)
1078 return -EPERM; /* don't know so take safe approach */
1079 return scsi_ioctl(sdp->device, cmd_in, p);
1083 #ifdef CONFIG_COMPAT
1084 static long sg_compat_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
1086 Sg_device *sdp;
1087 Sg_fd *sfp;
1088 struct scsi_device *sdev;
1090 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
1091 return -ENXIO;
1093 sdev = sdp->device;
1094 if (sdev->host->hostt->compat_ioctl) {
1095 int ret;
1097 ret = sdev->host->hostt->compat_ioctl(sdev, cmd_in, (void __user *)arg);
1099 return ret;
1102 return -ENOIOCTLCMD;
1104 #endif
1106 static unsigned int
1107 sg_poll(struct file *filp, poll_table * wait)
1109 unsigned int res = 0;
1110 Sg_device *sdp;
1111 Sg_fd *sfp;
1112 Sg_request *srp;
1113 int count = 0;
1114 unsigned long iflags;
1116 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))
1117 || sfp->closed)
1118 return POLLERR;
1119 poll_wait(filp, &sfp->read_wait, wait);
1120 read_lock_irqsave(&sfp->rq_list_lock, iflags);
1121 for (srp = sfp->headrp; srp; srp = srp->nextrp) {
1122 /* if any read waiting, flag it */
1123 if ((0 == res) && (1 == srp->done) && (!srp->sg_io_owned))
1124 res = POLLIN | POLLRDNORM;
1125 ++count;
1127 read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
1129 if (sdp->detached)
1130 res |= POLLHUP;
1131 else if (!sfp->cmd_q) {
1132 if (0 == count)
1133 res |= POLLOUT | POLLWRNORM;
1134 } else if (count < SG_MAX_QUEUE)
1135 res |= POLLOUT | POLLWRNORM;
1136 SCSI_LOG_TIMEOUT(3, printk("sg_poll: %s, res=0x%x\n",
1137 sdp->disk->disk_name, (int) res));
1138 return res;
1141 static int
1142 sg_fasync(int fd, struct file *filp, int mode)
1144 Sg_device *sdp;
1145 Sg_fd *sfp;
1147 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
1148 return -ENXIO;
1149 SCSI_LOG_TIMEOUT(3, printk("sg_fasync: %s, mode=%d\n",
1150 sdp->disk->disk_name, mode));
1152 return fasync_helper(fd, filp, mode, &sfp->async_qp);
1155 static int
1156 sg_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1158 Sg_fd *sfp;
1159 unsigned long offset, len, sa;
1160 Sg_scatter_hold *rsv_schp;
1161 int k, length;
1163 if ((NULL == vma) || (!(sfp = (Sg_fd *) vma->vm_private_data)))
1164 return VM_FAULT_SIGBUS;
1165 rsv_schp = &sfp->reserve;
1166 offset = vmf->pgoff << PAGE_SHIFT;
1167 if (offset >= rsv_schp->bufflen)
1168 return VM_FAULT_SIGBUS;
1169 SCSI_LOG_TIMEOUT(3, printk("sg_vma_fault: offset=%lu, scatg=%d\n",
1170 offset, rsv_schp->k_use_sg));
1171 sa = vma->vm_start;
1172 length = 1 << (PAGE_SHIFT + rsv_schp->page_order);
1173 for (k = 0; k < rsv_schp->k_use_sg && sa < vma->vm_end; k++) {
1174 len = vma->vm_end - sa;
1175 len = (len < length) ? len : length;
1176 if (offset < len) {
1177 struct page *page = nth_page(rsv_schp->pages[k],
1178 offset >> PAGE_SHIFT);
1179 get_page(page); /* increment page count */
1180 vmf->page = page;
1181 return 0; /* success */
1183 sa += len;
1184 offset -= len;
1187 return VM_FAULT_SIGBUS;
1190 static struct vm_operations_struct sg_mmap_vm_ops = {
1191 .fault = sg_vma_fault,
1194 static int
1195 sg_mmap(struct file *filp, struct vm_area_struct *vma)
1197 Sg_fd *sfp;
1198 unsigned long req_sz, len, sa;
1199 Sg_scatter_hold *rsv_schp;
1200 int k, length;
1202 if ((!filp) || (!vma) || (!(sfp = (Sg_fd *) filp->private_data)))
1203 return -ENXIO;
1204 req_sz = vma->vm_end - vma->vm_start;
1205 SCSI_LOG_TIMEOUT(3, printk("sg_mmap starting, vm_start=%p, len=%d\n",
1206 (void *) vma->vm_start, (int) req_sz));
1207 if (vma->vm_pgoff)
1208 return -EINVAL; /* want no offset */
1209 rsv_schp = &sfp->reserve;
1210 if (req_sz > rsv_schp->bufflen)
1211 return -ENOMEM; /* cannot map more than reserved buffer */
1213 sa = vma->vm_start;
1214 length = 1 << (PAGE_SHIFT + rsv_schp->page_order);
1215 for (k = 0; k < rsv_schp->k_use_sg && sa < vma->vm_end; k++) {
1216 len = vma->vm_end - sa;
1217 len = (len < length) ? len : length;
1218 sa += len;
1221 sfp->mmap_called = 1;
1222 vma->vm_flags |= VM_RESERVED;
1223 vma->vm_private_data = sfp;
1224 vma->vm_ops = &sg_mmap_vm_ops;
1225 return 0;
1228 static void sg_rq_end_io_usercontext(struct work_struct *work)
1230 struct sg_request *srp = container_of(work, struct sg_request, ew.work);
1231 struct sg_fd *sfp = srp->parentfp;
1233 sg_finish_rem_req(srp);
1234 kref_put(&sfp->f_ref, sg_remove_sfp);
1238 * This function is a "bottom half" handler that is called by the mid
1239 * level when a command is completed (or has failed).
1241 static void sg_rq_end_io(struct request *rq, int uptodate)
1243 struct sg_request *srp = rq->end_io_data;
1244 Sg_device *sdp;
1245 Sg_fd *sfp;
1246 unsigned long iflags;
1247 unsigned int ms;
1248 char *sense;
1249 int result, resid, done = 1;
1251 if (WARN_ON(srp->done != 0))
1252 return;
1254 sfp = srp->parentfp;
1255 if (WARN_ON(sfp == NULL))
1256 return;
1258 sdp = sfp->parentdp;
1259 if (unlikely(sdp->detached))
1260 printk(KERN_INFO "sg_rq_end_io: device detached\n");
1262 sense = rq->sense;
1263 result = rq->errors;
1264 resid = rq->resid_len;
1266 SCSI_LOG_TIMEOUT(4, printk("sg_cmd_done: %s, pack_id=%d, res=0x%x\n",
1267 sdp->disk->disk_name, srp->header.pack_id, result));
1268 srp->header.resid = resid;
1269 ms = jiffies_to_msecs(jiffies);
1270 srp->header.duration = (ms > srp->header.duration) ?
1271 (ms - srp->header.duration) : 0;
1272 if (0 != result) {
1273 struct scsi_sense_hdr sshdr;
1275 srp->header.status = 0xff & result;
1276 srp->header.masked_status = status_byte(result);
1277 srp->header.msg_status = msg_byte(result);
1278 srp->header.host_status = host_byte(result);
1279 srp->header.driver_status = driver_byte(result);
1280 if ((sdp->sgdebug > 0) &&
1281 ((CHECK_CONDITION == srp->header.masked_status) ||
1282 (COMMAND_TERMINATED == srp->header.masked_status)))
1283 __scsi_print_sense("sg_cmd_done", sense,
1284 SCSI_SENSE_BUFFERSIZE);
1286 /* Following if statement is a patch supplied by Eric Youngdale */
1287 if (driver_byte(result) != 0
1288 && scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, &sshdr)
1289 && !scsi_sense_is_deferred(&sshdr)
1290 && sshdr.sense_key == UNIT_ATTENTION
1291 && sdp->device->removable) {
1292 /* Detected possible disc change. Set the bit - this */
1293 /* may be used if there are filesystems using this device */
1294 sdp->device->changed = 1;
1297 /* Rely on write phase to clean out srp status values, so no "else" */
1299 write_lock_irqsave(&sfp->rq_list_lock, iflags);
1300 if (unlikely(srp->orphan)) {
1301 if (sfp->keep_orphan)
1302 srp->sg_io_owned = 0;
1303 else
1304 done = 0;
1306 srp->done = done;
1307 write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
1309 if (likely(done)) {
1310 /* Now wake up any sg_read() that is waiting for this
1311 * packet.
1313 wake_up_interruptible(&sfp->read_wait);
1314 kill_fasync(&sfp->async_qp, SIGPOLL, POLL_IN);
1315 kref_put(&sfp->f_ref, sg_remove_sfp);
1316 } else {
1317 INIT_WORK(&srp->ew.work, sg_rq_end_io_usercontext);
1318 schedule_work(&srp->ew.work);
1322 static struct file_operations sg_fops = {
1323 .owner = THIS_MODULE,
1324 .read = sg_read,
1325 .write = sg_write,
1326 .poll = sg_poll,
1327 .ioctl = sg_ioctl,
1328 #ifdef CONFIG_COMPAT
1329 .compat_ioctl = sg_compat_ioctl,
1330 #endif
1331 .open = sg_open,
1332 .mmap = sg_mmap,
1333 .release = sg_release,
1334 .fasync = sg_fasync,
1337 static struct class *sg_sysfs_class;
1339 static int sg_sysfs_valid = 0;
1341 static Sg_device *sg_alloc(struct gendisk *disk, struct scsi_device *scsidp)
1343 struct request_queue *q = scsidp->request_queue;
1344 Sg_device *sdp;
1345 unsigned long iflags;
1346 int error;
1347 u32 k;
1349 sdp = kzalloc(sizeof(Sg_device), GFP_KERNEL);
1350 if (!sdp) {
1351 printk(KERN_WARNING "kmalloc Sg_device failure\n");
1352 return ERR_PTR(-ENOMEM);
1355 if (!idr_pre_get(&sg_index_idr, GFP_KERNEL)) {
1356 printk(KERN_WARNING "idr expansion Sg_device failure\n");
1357 error = -ENOMEM;
1358 goto out;
1361 write_lock_irqsave(&sg_index_lock, iflags);
1363 error = idr_get_new(&sg_index_idr, sdp, &k);
1364 if (error) {
1365 write_unlock_irqrestore(&sg_index_lock, iflags);
1366 printk(KERN_WARNING "idr allocation Sg_device failure: %d\n",
1367 error);
1368 goto out;
1371 if (unlikely(k >= SG_MAX_DEVS))
1372 goto overflow;
1374 SCSI_LOG_TIMEOUT(3, printk("sg_alloc: dev=%d \n", k));
1375 sprintf(disk->disk_name, "sg%d", k);
1376 disk->first_minor = k;
1377 sdp->disk = disk;
1378 sdp->device = scsidp;
1379 INIT_LIST_HEAD(&sdp->sfds);
1380 init_waitqueue_head(&sdp->o_excl_wait);
1381 sdp->sg_tablesize = min(queue_max_hw_segments(q),
1382 queue_max_phys_segments(q));
1383 sdp->index = k;
1384 kref_init(&sdp->d_ref);
1386 write_unlock_irqrestore(&sg_index_lock, iflags);
1388 error = 0;
1389 out:
1390 if (error) {
1391 kfree(sdp);
1392 return ERR_PTR(error);
1394 return sdp;
1396 overflow:
1397 idr_remove(&sg_index_idr, k);
1398 write_unlock_irqrestore(&sg_index_lock, iflags);
1399 sdev_printk(KERN_WARNING, scsidp,
1400 "Unable to attach sg device type=%d, minor "
1401 "number exceeds %d\n", scsidp->type, SG_MAX_DEVS - 1);
1402 error = -ENODEV;
1403 goto out;
1406 static int
1407 sg_add(struct device *cl_dev, struct class_interface *cl_intf)
1409 struct scsi_device *scsidp = to_scsi_device(cl_dev->parent);
1410 struct gendisk *disk;
1411 Sg_device *sdp = NULL;
1412 struct cdev * cdev = NULL;
1413 int error;
1414 unsigned long iflags;
1416 disk = alloc_disk(1);
1417 if (!disk) {
1418 printk(KERN_WARNING "alloc_disk failed\n");
1419 return -ENOMEM;
1421 disk->major = SCSI_GENERIC_MAJOR;
1423 error = -ENOMEM;
1424 cdev = cdev_alloc();
1425 if (!cdev) {
1426 printk(KERN_WARNING "cdev_alloc failed\n");
1427 goto out;
1429 cdev->owner = THIS_MODULE;
1430 cdev->ops = &sg_fops;
1432 sdp = sg_alloc(disk, scsidp);
1433 if (IS_ERR(sdp)) {
1434 printk(KERN_WARNING "sg_alloc failed\n");
1435 error = PTR_ERR(sdp);
1436 goto out;
1439 error = cdev_add(cdev, MKDEV(SCSI_GENERIC_MAJOR, sdp->index), 1);
1440 if (error)
1441 goto cdev_add_err;
1443 sdp->cdev = cdev;
1444 if (sg_sysfs_valid) {
1445 struct device *sg_class_member;
1447 sg_class_member = device_create(sg_sysfs_class, cl_dev->parent,
1448 MKDEV(SCSI_GENERIC_MAJOR,
1449 sdp->index),
1450 sdp, "%s", disk->disk_name);
1451 if (IS_ERR(sg_class_member)) {
1452 printk(KERN_ERR "sg_add: "
1453 "device_create failed\n");
1454 error = PTR_ERR(sg_class_member);
1455 goto cdev_add_err;
1457 error = sysfs_create_link(&scsidp->sdev_gendev.kobj,
1458 &sg_class_member->kobj, "generic");
1459 if (error)
1460 printk(KERN_ERR "sg_add: unable to make symlink "
1461 "'generic' back to sg%d\n", sdp->index);
1462 } else
1463 printk(KERN_WARNING "sg_add: sg_sys Invalid\n");
1465 sdev_printk(KERN_NOTICE, scsidp,
1466 "Attached scsi generic sg%d type %d\n", sdp->index,
1467 scsidp->type);
1469 dev_set_drvdata(cl_dev, sdp);
1471 return 0;
1473 cdev_add_err:
1474 write_lock_irqsave(&sg_index_lock, iflags);
1475 idr_remove(&sg_index_idr, sdp->index);
1476 write_unlock_irqrestore(&sg_index_lock, iflags);
1477 kfree(sdp);
1479 out:
1480 put_disk(disk);
1481 if (cdev)
1482 cdev_del(cdev);
1483 return error;
1486 static void sg_device_destroy(struct kref *kref)
1488 struct sg_device *sdp = container_of(kref, struct sg_device, d_ref);
1489 unsigned long flags;
1491 /* CAUTION! Note that the device can still be found via idr_find()
1492 * even though the refcount is 0. Therefore, do idr_remove() BEFORE
1493 * any other cleanup.
1496 write_lock_irqsave(&sg_index_lock, flags);
1497 idr_remove(&sg_index_idr, sdp->index);
1498 write_unlock_irqrestore(&sg_index_lock, flags);
1500 SCSI_LOG_TIMEOUT(3,
1501 printk("sg_device_destroy: %s\n",
1502 sdp->disk->disk_name));
1504 put_disk(sdp->disk);
1505 kfree(sdp);
1508 static void sg_remove(struct device *cl_dev, struct class_interface *cl_intf)
1510 struct scsi_device *scsidp = to_scsi_device(cl_dev->parent);
1511 Sg_device *sdp = dev_get_drvdata(cl_dev);
1512 unsigned long iflags;
1513 Sg_fd *sfp;
1515 if (!sdp || sdp->detached)
1516 return;
1518 SCSI_LOG_TIMEOUT(3, printk("sg_remove: %s\n", sdp->disk->disk_name));
1520 /* Need a write lock to set sdp->detached. */
1521 write_lock_irqsave(&sg_index_lock, iflags);
1522 sdp->detached = 1;
1523 list_for_each_entry(sfp, &sdp->sfds, sfd_siblings) {
1524 wake_up_interruptible(&sfp->read_wait);
1525 kill_fasync(&sfp->async_qp, SIGPOLL, POLL_HUP);
1527 write_unlock_irqrestore(&sg_index_lock, iflags);
1529 sysfs_remove_link(&scsidp->sdev_gendev.kobj, "generic");
1530 device_destroy(sg_sysfs_class, MKDEV(SCSI_GENERIC_MAJOR, sdp->index));
1531 cdev_del(sdp->cdev);
1532 sdp->cdev = NULL;
1534 sg_put_dev(sdp);
1537 module_param_named(scatter_elem_sz, scatter_elem_sz, int, S_IRUGO | S_IWUSR);
1538 module_param_named(def_reserved_size, def_reserved_size, int,
1539 S_IRUGO | S_IWUSR);
1540 module_param_named(allow_dio, sg_allow_dio, int, S_IRUGO | S_IWUSR);
1542 MODULE_AUTHOR("Douglas Gilbert");
1543 MODULE_DESCRIPTION("SCSI generic (sg) driver");
1544 MODULE_LICENSE("GPL");
1545 MODULE_VERSION(SG_VERSION_STR);
1546 MODULE_ALIAS_CHARDEV_MAJOR(SCSI_GENERIC_MAJOR);
1548 MODULE_PARM_DESC(scatter_elem_sz, "scatter gather element "
1549 "size (default: max(SG_SCATTER_SZ, PAGE_SIZE))");
1550 MODULE_PARM_DESC(def_reserved_size, "size of buffer reserved for each fd");
1551 MODULE_PARM_DESC(allow_dio, "allow direct I/O (default: 0 (disallow))");
1553 static int __init
1554 init_sg(void)
1556 int rc;
1558 if (scatter_elem_sz < PAGE_SIZE) {
1559 scatter_elem_sz = PAGE_SIZE;
1560 scatter_elem_sz_prev = scatter_elem_sz;
1562 if (def_reserved_size >= 0)
1563 sg_big_buff = def_reserved_size;
1564 else
1565 def_reserved_size = sg_big_buff;
1567 rc = register_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0),
1568 SG_MAX_DEVS, "sg");
1569 if (rc)
1570 return rc;
1571 sg_sysfs_class = class_create(THIS_MODULE, "scsi_generic");
1572 if ( IS_ERR(sg_sysfs_class) ) {
1573 rc = PTR_ERR(sg_sysfs_class);
1574 goto err_out;
1576 sg_sysfs_valid = 1;
1577 rc = scsi_register_interface(&sg_interface);
1578 if (0 == rc) {
1579 #ifdef CONFIG_SCSI_PROC_FS
1580 sg_proc_init();
1581 #endif /* CONFIG_SCSI_PROC_FS */
1582 return 0;
1584 class_destroy(sg_sysfs_class);
1585 err_out:
1586 unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0), SG_MAX_DEVS);
1587 return rc;
1590 static void __exit
1591 exit_sg(void)
1593 #ifdef CONFIG_SCSI_PROC_FS
1594 sg_proc_cleanup();
1595 #endif /* CONFIG_SCSI_PROC_FS */
1596 scsi_unregister_interface(&sg_interface);
1597 class_destroy(sg_sysfs_class);
1598 sg_sysfs_valid = 0;
1599 unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0),
1600 SG_MAX_DEVS);
1601 idr_destroy(&sg_index_idr);
1604 static int sg_start_req(Sg_request *srp, unsigned char *cmd)
1606 int res;
1607 struct request *rq;
1608 Sg_fd *sfp = srp->parentfp;
1609 sg_io_hdr_t *hp = &srp->header;
1610 int dxfer_len = (int) hp->dxfer_len;
1611 int dxfer_dir = hp->dxfer_direction;
1612 unsigned int iov_count = hp->iovec_count;
1613 Sg_scatter_hold *req_schp = &srp->data;
1614 Sg_scatter_hold *rsv_schp = &sfp->reserve;
1615 struct request_queue *q = sfp->parentdp->device->request_queue;
1616 struct rq_map_data *md, map_data;
1617 int rw = hp->dxfer_direction == SG_DXFER_TO_DEV ? WRITE : READ;
1619 SCSI_LOG_TIMEOUT(4, printk(KERN_INFO "sg_start_req: dxfer_len=%d\n",
1620 dxfer_len));
1622 rq = blk_get_request(q, rw, GFP_ATOMIC);
1623 if (!rq)
1624 return -ENOMEM;
1626 memcpy(rq->cmd, cmd, hp->cmd_len);
1628 rq->cmd_len = hp->cmd_len;
1629 rq->cmd_type = REQ_TYPE_BLOCK_PC;
1631 srp->rq = rq;
1632 rq->end_io_data = srp;
1633 rq->sense = srp->sense_b;
1634 rq->retries = SG_DEFAULT_RETRIES;
1636 if ((dxfer_len <= 0) || (dxfer_dir == SG_DXFER_NONE))
1637 return 0;
1639 if (sg_allow_dio && hp->flags & SG_FLAG_DIRECT_IO &&
1640 dxfer_dir != SG_DXFER_UNKNOWN && !iov_count &&
1641 !sfp->parentdp->device->host->unchecked_isa_dma &&
1642 blk_rq_aligned(q, hp->dxferp, dxfer_len))
1643 md = NULL;
1644 else
1645 md = &map_data;
1647 if (md) {
1648 if (!sg_res_in_use(sfp) && dxfer_len <= rsv_schp->bufflen)
1649 sg_link_reserve(sfp, srp, dxfer_len);
1650 else {
1651 res = sg_build_indirect(req_schp, sfp, dxfer_len);
1652 if (res)
1653 return res;
1656 md->pages = req_schp->pages;
1657 md->page_order = req_schp->page_order;
1658 md->nr_entries = req_schp->k_use_sg;
1659 md->offset = 0;
1660 md->null_mapped = hp->dxferp ? 0 : 1;
1663 if (iov_count) {
1664 int len, size = sizeof(struct sg_iovec) * iov_count;
1665 struct iovec *iov;
1667 iov = kmalloc(size, GFP_ATOMIC);
1668 if (!iov)
1669 return -ENOMEM;
1671 if (copy_from_user(iov, hp->dxferp, size)) {
1672 kfree(iov);
1673 return -EFAULT;
1676 len = iov_length(iov, iov_count);
1677 if (hp->dxfer_len < len) {
1678 iov_count = iov_shorten(iov, iov_count, hp->dxfer_len);
1679 len = hp->dxfer_len;
1682 res = blk_rq_map_user_iov(q, rq, md, (struct sg_iovec *)iov,
1683 iov_count,
1684 len, GFP_ATOMIC);
1685 kfree(iov);
1686 } else
1687 res = blk_rq_map_user(q, rq, md, hp->dxferp,
1688 hp->dxfer_len, GFP_ATOMIC);
1690 if (!res) {
1691 srp->bio = rq->bio;
1693 if (!md) {
1694 req_schp->dio_in_use = 1;
1695 hp->info |= SG_INFO_DIRECT_IO;
1698 return res;
1701 static int sg_finish_rem_req(Sg_request * srp)
1703 int ret = 0;
1705 Sg_fd *sfp = srp->parentfp;
1706 Sg_scatter_hold *req_schp = &srp->data;
1708 SCSI_LOG_TIMEOUT(4, printk("sg_finish_rem_req: res_used=%d\n", (int) srp->res_used));
1709 if (srp->res_used)
1710 sg_unlink_reserve(sfp, srp);
1711 else
1712 sg_remove_scat(req_schp);
1714 if (srp->rq) {
1715 if (srp->bio)
1716 ret = blk_rq_unmap_user(srp->bio);
1718 blk_put_request(srp->rq);
1721 sg_remove_request(sfp, srp);
1723 return ret;
1726 static int
1727 sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp, int tablesize)
1729 int sg_bufflen = tablesize * sizeof(struct page *);
1730 gfp_t gfp_flags = GFP_ATOMIC | __GFP_NOWARN;
1732 schp->pages = kzalloc(sg_bufflen, gfp_flags);
1733 if (!schp->pages)
1734 return -ENOMEM;
1735 schp->sglist_len = sg_bufflen;
1736 return tablesize; /* number of scat_gath elements allocated */
1739 static int
1740 sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
1742 int ret_sz = 0, i, k, rem_sz, num, mx_sc_elems;
1743 int sg_tablesize = sfp->parentdp->sg_tablesize;
1744 int blk_size = buff_size, order;
1745 gfp_t gfp_mask = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN;
1747 if (blk_size < 0)
1748 return -EFAULT;
1749 if (0 == blk_size)
1750 ++blk_size; /* don't know why */
1751 /* round request up to next highest SG_SECTOR_SZ byte boundary */
1752 blk_size = ALIGN(blk_size, SG_SECTOR_SZ);
1753 SCSI_LOG_TIMEOUT(4, printk("sg_build_indirect: buff_size=%d, blk_size=%d\n",
1754 buff_size, blk_size));
1756 /* N.B. ret_sz carried into this block ... */
1757 mx_sc_elems = sg_build_sgat(schp, sfp, sg_tablesize);
1758 if (mx_sc_elems < 0)
1759 return mx_sc_elems; /* most likely -ENOMEM */
1761 num = scatter_elem_sz;
1762 if (unlikely(num != scatter_elem_sz_prev)) {
1763 if (num < PAGE_SIZE) {
1764 scatter_elem_sz = PAGE_SIZE;
1765 scatter_elem_sz_prev = PAGE_SIZE;
1766 } else
1767 scatter_elem_sz_prev = num;
1770 if (sfp->low_dma)
1771 gfp_mask |= GFP_DMA;
1773 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
1774 gfp_mask |= __GFP_ZERO;
1776 order = get_order(num);
1777 retry:
1778 ret_sz = 1 << (PAGE_SHIFT + order);
1780 for (k = 0, rem_sz = blk_size; rem_sz > 0 && k < mx_sc_elems;
1781 k++, rem_sz -= ret_sz) {
1783 num = (rem_sz > scatter_elem_sz_prev) ?
1784 scatter_elem_sz_prev : rem_sz;
1786 schp->pages[k] = alloc_pages(gfp_mask, order);
1787 if (!schp->pages[k])
1788 goto out;
1790 if (num == scatter_elem_sz_prev) {
1791 if (unlikely(ret_sz > scatter_elem_sz_prev)) {
1792 scatter_elem_sz = ret_sz;
1793 scatter_elem_sz_prev = ret_sz;
1797 SCSI_LOG_TIMEOUT(5, printk("sg_build_indirect: k=%d, num=%d, "
1798 "ret_sz=%d\n", k, num, ret_sz));
1799 } /* end of for loop */
1801 schp->page_order = order;
1802 schp->k_use_sg = k;
1803 SCSI_LOG_TIMEOUT(5, printk("sg_build_indirect: k_use_sg=%d, "
1804 "rem_sz=%d\n", k, rem_sz));
1806 schp->bufflen = blk_size;
1807 if (rem_sz > 0) /* must have failed */
1808 return -ENOMEM;
1809 return 0;
1810 out:
1811 for (i = 0; i < k; i++)
1812 __free_pages(schp->pages[k], order);
1814 if (--order >= 0)
1815 goto retry;
1817 return -ENOMEM;
1820 static void
1821 sg_remove_scat(Sg_scatter_hold * schp)
1823 SCSI_LOG_TIMEOUT(4, printk("sg_remove_scat: k_use_sg=%d\n", schp->k_use_sg));
1824 if (schp->pages && schp->sglist_len > 0) {
1825 if (!schp->dio_in_use) {
1826 int k;
1828 for (k = 0; k < schp->k_use_sg && schp->pages[k]; k++) {
1829 SCSI_LOG_TIMEOUT(5, printk(
1830 "sg_remove_scat: k=%d, pg=0x%p\n",
1831 k, schp->pages[k]));
1832 __free_pages(schp->pages[k], schp->page_order);
1835 kfree(schp->pages);
1838 memset(schp, 0, sizeof (*schp));
1841 static int
1842 sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer)
1844 Sg_scatter_hold *schp = &srp->data;
1845 int k, num;
1847 SCSI_LOG_TIMEOUT(4, printk("sg_read_oxfer: num_read_xfer=%d\n",
1848 num_read_xfer));
1849 if ((!outp) || (num_read_xfer <= 0))
1850 return 0;
1852 num = 1 << (PAGE_SHIFT + schp->page_order);
1853 for (k = 0; k < schp->k_use_sg && schp->pages[k]; k++) {
1854 if (num > num_read_xfer) {
1855 if (__copy_to_user(outp, page_address(schp->pages[k]),
1856 num_read_xfer))
1857 return -EFAULT;
1858 break;
1859 } else {
1860 if (__copy_to_user(outp, page_address(schp->pages[k]),
1861 num))
1862 return -EFAULT;
1863 num_read_xfer -= num;
1864 if (num_read_xfer <= 0)
1865 break;
1866 outp += num;
1870 return 0;
1873 static void
1874 sg_build_reserve(Sg_fd * sfp, int req_size)
1876 Sg_scatter_hold *schp = &sfp->reserve;
1878 SCSI_LOG_TIMEOUT(4, printk("sg_build_reserve: req_size=%d\n", req_size));
1879 do {
1880 if (req_size < PAGE_SIZE)
1881 req_size = PAGE_SIZE;
1882 if (0 == sg_build_indirect(schp, sfp, req_size))
1883 return;
1884 else
1885 sg_remove_scat(schp);
1886 req_size >>= 1; /* divide by 2 */
1887 } while (req_size > (PAGE_SIZE / 2));
1890 static void
1891 sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size)
1893 Sg_scatter_hold *req_schp = &srp->data;
1894 Sg_scatter_hold *rsv_schp = &sfp->reserve;
1895 int k, num, rem;
1897 srp->res_used = 1;
1898 SCSI_LOG_TIMEOUT(4, printk("sg_link_reserve: size=%d\n", size));
1899 rem = size;
1901 num = 1 << (PAGE_SHIFT + rsv_schp->page_order);
1902 for (k = 0; k < rsv_schp->k_use_sg; k++) {
1903 if (rem <= num) {
1904 req_schp->k_use_sg = k + 1;
1905 req_schp->sglist_len = rsv_schp->sglist_len;
1906 req_schp->pages = rsv_schp->pages;
1908 req_schp->bufflen = size;
1909 req_schp->page_order = rsv_schp->page_order;
1910 break;
1911 } else
1912 rem -= num;
1915 if (k >= rsv_schp->k_use_sg)
1916 SCSI_LOG_TIMEOUT(1, printk("sg_link_reserve: BAD size\n"));
1919 static void
1920 sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp)
1922 Sg_scatter_hold *req_schp = &srp->data;
1924 SCSI_LOG_TIMEOUT(4, printk("sg_unlink_reserve: req->k_use_sg=%d\n",
1925 (int) req_schp->k_use_sg));
1926 req_schp->k_use_sg = 0;
1927 req_schp->bufflen = 0;
1928 req_schp->pages = NULL;
1929 req_schp->page_order = 0;
1930 req_schp->sglist_len = 0;
1931 sfp->save_scat_len = 0;
1932 srp->res_used = 0;
1935 static Sg_request *
1936 sg_get_rq_mark(Sg_fd * sfp, int pack_id)
1938 Sg_request *resp;
1939 unsigned long iflags;
1941 write_lock_irqsave(&sfp->rq_list_lock, iflags);
1942 for (resp = sfp->headrp; resp; resp = resp->nextrp) {
1943 /* look for requests that are ready + not SG_IO owned */
1944 if ((1 == resp->done) && (!resp->sg_io_owned) &&
1945 ((-1 == pack_id) || (resp->header.pack_id == pack_id))) {
1946 resp->done = 2; /* guard against other readers */
1947 break;
1950 write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
1951 return resp;
1954 /* always adds to end of list */
1955 static Sg_request *
1956 sg_add_request(Sg_fd * sfp)
1958 int k;
1959 unsigned long iflags;
1960 Sg_request *resp;
1961 Sg_request *rp = sfp->req_arr;
1963 write_lock_irqsave(&sfp->rq_list_lock, iflags);
1964 resp = sfp->headrp;
1965 if (!resp) {
1966 memset(rp, 0, sizeof (Sg_request));
1967 rp->parentfp = sfp;
1968 resp = rp;
1969 sfp->headrp = resp;
1970 } else {
1971 if (0 == sfp->cmd_q)
1972 resp = NULL; /* command queuing disallowed */
1973 else {
1974 for (k = 0; k < SG_MAX_QUEUE; ++k, ++rp) {
1975 if (!rp->parentfp)
1976 break;
1978 if (k < SG_MAX_QUEUE) {
1979 memset(rp, 0, sizeof (Sg_request));
1980 rp->parentfp = sfp;
1981 while (resp->nextrp)
1982 resp = resp->nextrp;
1983 resp->nextrp = rp;
1984 resp = rp;
1985 } else
1986 resp = NULL;
1989 if (resp) {
1990 resp->nextrp = NULL;
1991 resp->header.duration = jiffies_to_msecs(jiffies);
1993 write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
1994 return resp;
1997 /* Return of 1 for found; 0 for not found */
1998 static int
1999 sg_remove_request(Sg_fd * sfp, Sg_request * srp)
2001 Sg_request *prev_rp;
2002 Sg_request *rp;
2003 unsigned long iflags;
2004 int res = 0;
2006 if ((!sfp) || (!srp) || (!sfp->headrp))
2007 return res;
2008 write_lock_irqsave(&sfp->rq_list_lock, iflags);
2009 prev_rp = sfp->headrp;
2010 if (srp == prev_rp) {
2011 sfp->headrp = prev_rp->nextrp;
2012 prev_rp->parentfp = NULL;
2013 res = 1;
2014 } else {
2015 while ((rp = prev_rp->nextrp)) {
2016 if (srp == rp) {
2017 prev_rp->nextrp = rp->nextrp;
2018 rp->parentfp = NULL;
2019 res = 1;
2020 break;
2022 prev_rp = rp;
2025 write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
2026 return res;
2029 static Sg_fd *
2030 sg_add_sfp(Sg_device * sdp, int dev)
2032 Sg_fd *sfp;
2033 unsigned long iflags;
2034 int bufflen;
2036 sfp = kzalloc(sizeof(*sfp), GFP_ATOMIC | __GFP_NOWARN);
2037 if (!sfp)
2038 return NULL;
2040 init_waitqueue_head(&sfp->read_wait);
2041 rwlock_init(&sfp->rq_list_lock);
2043 kref_init(&sfp->f_ref);
2044 sfp->timeout = SG_DEFAULT_TIMEOUT;
2045 sfp->timeout_user = SG_DEFAULT_TIMEOUT_USER;
2046 sfp->force_packid = SG_DEF_FORCE_PACK_ID;
2047 sfp->low_dma = (SG_DEF_FORCE_LOW_DMA == 0) ?
2048 sdp->device->host->unchecked_isa_dma : 1;
2049 sfp->cmd_q = SG_DEF_COMMAND_Q;
2050 sfp->keep_orphan = SG_DEF_KEEP_ORPHAN;
2051 sfp->parentdp = sdp;
2052 write_lock_irqsave(&sg_index_lock, iflags);
2053 list_add_tail(&sfp->sfd_siblings, &sdp->sfds);
2054 write_unlock_irqrestore(&sg_index_lock, iflags);
2055 SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: sfp=0x%p\n", sfp));
2056 if (unlikely(sg_big_buff != def_reserved_size))
2057 sg_big_buff = def_reserved_size;
2059 bufflen = min_t(int, sg_big_buff,
2060 queue_max_sectors(sdp->device->request_queue) * 512);
2061 sg_build_reserve(sfp, bufflen);
2062 SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: bufflen=%d, k_use_sg=%d\n",
2063 sfp->reserve.bufflen, sfp->reserve.k_use_sg));
2065 kref_get(&sdp->d_ref);
2066 __module_get(THIS_MODULE);
2067 return sfp;
2070 static void sg_remove_sfp_usercontext(struct work_struct *work)
2072 struct sg_fd *sfp = container_of(work, struct sg_fd, ew.work);
2073 struct sg_device *sdp = sfp->parentdp;
2075 /* Cleanup any responses which were never read(). */
2076 while (sfp->headrp)
2077 sg_finish_rem_req(sfp->headrp);
2079 if (sfp->reserve.bufflen > 0) {
2080 SCSI_LOG_TIMEOUT(6,
2081 printk("sg_remove_sfp: bufflen=%d, k_use_sg=%d\n",
2082 (int) sfp->reserve.bufflen,
2083 (int) sfp->reserve.k_use_sg));
2084 sg_remove_scat(&sfp->reserve);
2087 SCSI_LOG_TIMEOUT(6,
2088 printk("sg_remove_sfp: %s, sfp=0x%p\n",
2089 sdp->disk->disk_name,
2090 sfp));
2091 kfree(sfp);
2093 scsi_device_put(sdp->device);
2094 sg_put_dev(sdp);
2095 module_put(THIS_MODULE);
2098 static void sg_remove_sfp(struct kref *kref)
2100 struct sg_fd *sfp = container_of(kref, struct sg_fd, f_ref);
2101 struct sg_device *sdp = sfp->parentdp;
2102 unsigned long iflags;
2104 write_lock_irqsave(&sg_index_lock, iflags);
2105 list_del(&sfp->sfd_siblings);
2106 write_unlock_irqrestore(&sg_index_lock, iflags);
2107 wake_up_interruptible(&sdp->o_excl_wait);
2109 INIT_WORK(&sfp->ew.work, sg_remove_sfp_usercontext);
2110 schedule_work(&sfp->ew.work);
2113 static int
2114 sg_res_in_use(Sg_fd * sfp)
2116 const Sg_request *srp;
2117 unsigned long iflags;
2119 read_lock_irqsave(&sfp->rq_list_lock, iflags);
2120 for (srp = sfp->headrp; srp; srp = srp->nextrp)
2121 if (srp->res_used)
2122 break;
2123 read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
2124 return srp ? 1 : 0;
2127 #ifdef CONFIG_SCSI_PROC_FS
2128 static int
2129 sg_idr_max_id(int id, void *p, void *data)
2131 int *k = data;
2133 if (*k < id)
2134 *k = id;
2136 return 0;
2139 static int
2140 sg_last_dev(void)
2142 int k = -1;
2143 unsigned long iflags;
2145 read_lock_irqsave(&sg_index_lock, iflags);
2146 idr_for_each(&sg_index_idr, sg_idr_max_id, &k);
2147 read_unlock_irqrestore(&sg_index_lock, iflags);
2148 return k + 1; /* origin 1 */
2150 #endif
2152 /* must be called with sg_index_lock held */
2153 static Sg_device *sg_lookup_dev(int dev)
2155 return idr_find(&sg_index_idr, dev);
2158 static Sg_device *sg_get_dev(int dev)
2160 struct sg_device *sdp;
2161 unsigned long flags;
2163 read_lock_irqsave(&sg_index_lock, flags);
2164 sdp = sg_lookup_dev(dev);
2165 if (!sdp)
2166 sdp = ERR_PTR(-ENXIO);
2167 else if (sdp->detached) {
2168 /* If sdp->detached, then the refcount may already be 0, in
2169 * which case it would be a bug to do kref_get().
2171 sdp = ERR_PTR(-ENODEV);
2172 } else
2173 kref_get(&sdp->d_ref);
2174 read_unlock_irqrestore(&sg_index_lock, flags);
2176 return sdp;
2179 static void sg_put_dev(struct sg_device *sdp)
2181 kref_put(&sdp->d_ref, sg_device_destroy);
2184 #ifdef CONFIG_SCSI_PROC_FS
2186 static struct proc_dir_entry *sg_proc_sgp = NULL;
2188 static char sg_proc_sg_dirname[] = "scsi/sg";
2190 static int sg_proc_seq_show_int(struct seq_file *s, void *v);
2192 static int sg_proc_single_open_adio(struct inode *inode, struct file *file);
2193 static ssize_t sg_proc_write_adio(struct file *filp, const char __user *buffer,
2194 size_t count, loff_t *off);
2195 static struct file_operations adio_fops = {
2196 /* .owner, .read and .llseek added in sg_proc_init() */
2197 .open = sg_proc_single_open_adio,
2198 .write = sg_proc_write_adio,
2199 .release = single_release,
2202 static int sg_proc_single_open_dressz(struct inode *inode, struct file *file);
2203 static ssize_t sg_proc_write_dressz(struct file *filp,
2204 const char __user *buffer, size_t count, loff_t *off);
2205 static struct file_operations dressz_fops = {
2206 .open = sg_proc_single_open_dressz,
2207 .write = sg_proc_write_dressz,
2208 .release = single_release,
2211 static int sg_proc_seq_show_version(struct seq_file *s, void *v);
2212 static int sg_proc_single_open_version(struct inode *inode, struct file *file);
2213 static struct file_operations version_fops = {
2214 .open = sg_proc_single_open_version,
2215 .release = single_release,
2218 static int sg_proc_seq_show_devhdr(struct seq_file *s, void *v);
2219 static int sg_proc_single_open_devhdr(struct inode *inode, struct file *file);
2220 static struct file_operations devhdr_fops = {
2221 .open = sg_proc_single_open_devhdr,
2222 .release = single_release,
2225 static int sg_proc_seq_show_dev(struct seq_file *s, void *v);
2226 static int sg_proc_open_dev(struct inode *inode, struct file *file);
2227 static void * dev_seq_start(struct seq_file *s, loff_t *pos);
2228 static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos);
2229 static void dev_seq_stop(struct seq_file *s, void *v);
2230 static struct file_operations dev_fops = {
2231 .open = sg_proc_open_dev,
2232 .release = seq_release,
2234 static struct seq_operations dev_seq_ops = {
2235 .start = dev_seq_start,
2236 .next = dev_seq_next,
2237 .stop = dev_seq_stop,
2238 .show = sg_proc_seq_show_dev,
2241 static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v);
2242 static int sg_proc_open_devstrs(struct inode *inode, struct file *file);
2243 static struct file_operations devstrs_fops = {
2244 .open = sg_proc_open_devstrs,
2245 .release = seq_release,
2247 static struct seq_operations devstrs_seq_ops = {
2248 .start = dev_seq_start,
2249 .next = dev_seq_next,
2250 .stop = dev_seq_stop,
2251 .show = sg_proc_seq_show_devstrs,
2254 static int sg_proc_seq_show_debug(struct seq_file *s, void *v);
2255 static int sg_proc_open_debug(struct inode *inode, struct file *file);
2256 static struct file_operations debug_fops = {
2257 .open = sg_proc_open_debug,
2258 .release = seq_release,
2260 static struct seq_operations debug_seq_ops = {
2261 .start = dev_seq_start,
2262 .next = dev_seq_next,
2263 .stop = dev_seq_stop,
2264 .show = sg_proc_seq_show_debug,
2268 struct sg_proc_leaf {
2269 const char * name;
2270 struct file_operations * fops;
2273 static struct sg_proc_leaf sg_proc_leaf_arr[] = {
2274 {"allow_dio", &adio_fops},
2275 {"debug", &debug_fops},
2276 {"def_reserved_size", &dressz_fops},
2277 {"device_hdr", &devhdr_fops},
2278 {"devices", &dev_fops},
2279 {"device_strs", &devstrs_fops},
2280 {"version", &version_fops}
2283 static int
2284 sg_proc_init(void)
2286 int k, mask;
2287 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
2288 struct sg_proc_leaf * leaf;
2290 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
2291 if (!sg_proc_sgp)
2292 return 1;
2293 for (k = 0; k < num_leaves; ++k) {
2294 leaf = &sg_proc_leaf_arr[k];
2295 mask = leaf->fops->write ? S_IRUGO | S_IWUSR : S_IRUGO;
2296 leaf->fops->owner = THIS_MODULE;
2297 leaf->fops->read = seq_read;
2298 leaf->fops->llseek = seq_lseek;
2299 proc_create(leaf->name, mask, sg_proc_sgp, leaf->fops);
2301 return 0;
2304 static void
2305 sg_proc_cleanup(void)
2307 int k;
2308 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
2310 if (!sg_proc_sgp)
2311 return;
2312 for (k = 0; k < num_leaves; ++k)
2313 remove_proc_entry(sg_proc_leaf_arr[k].name, sg_proc_sgp);
2314 remove_proc_entry(sg_proc_sg_dirname, NULL);
2318 static int sg_proc_seq_show_int(struct seq_file *s, void *v)
2320 seq_printf(s, "%d\n", *((int *)s->private));
2321 return 0;
2324 static int sg_proc_single_open_adio(struct inode *inode, struct file *file)
2326 return single_open(file, sg_proc_seq_show_int, &sg_allow_dio);
2329 static ssize_t
2330 sg_proc_write_adio(struct file *filp, const char __user *buffer,
2331 size_t count, loff_t *off)
2333 int num;
2334 char buff[11];
2336 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
2337 return -EACCES;
2338 num = (count < 10) ? count : 10;
2339 if (copy_from_user(buff, buffer, num))
2340 return -EFAULT;
2341 buff[num] = '\0';
2342 sg_allow_dio = simple_strtoul(buff, NULL, 10) ? 1 : 0;
2343 return count;
2346 static int sg_proc_single_open_dressz(struct inode *inode, struct file *file)
2348 return single_open(file, sg_proc_seq_show_int, &sg_big_buff);
2351 static ssize_t
2352 sg_proc_write_dressz(struct file *filp, const char __user *buffer,
2353 size_t count, loff_t *off)
2355 int num;
2356 unsigned long k = ULONG_MAX;
2357 char buff[11];
2359 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
2360 return -EACCES;
2361 num = (count < 10) ? count : 10;
2362 if (copy_from_user(buff, buffer, num))
2363 return -EFAULT;
2364 buff[num] = '\0';
2365 k = simple_strtoul(buff, NULL, 10);
2366 if (k <= 1048576) { /* limit "big buff" to 1 MB */
2367 sg_big_buff = k;
2368 return count;
2370 return -ERANGE;
2373 static int sg_proc_seq_show_version(struct seq_file *s, void *v)
2375 seq_printf(s, "%d\t%s [%s]\n", sg_version_num, SG_VERSION_STR,
2376 sg_version_date);
2377 return 0;
2380 static int sg_proc_single_open_version(struct inode *inode, struct file *file)
2382 return single_open(file, sg_proc_seq_show_version, NULL);
2385 static int sg_proc_seq_show_devhdr(struct seq_file *s, void *v)
2387 seq_printf(s, "host\tchan\tid\tlun\ttype\topens\tqdepth\tbusy\t"
2388 "online\n");
2389 return 0;
2392 static int sg_proc_single_open_devhdr(struct inode *inode, struct file *file)
2394 return single_open(file, sg_proc_seq_show_devhdr, NULL);
2397 struct sg_proc_deviter {
2398 loff_t index;
2399 size_t max;
2402 static void * dev_seq_start(struct seq_file *s, loff_t *pos)
2404 struct sg_proc_deviter * it = kmalloc(sizeof(*it), GFP_KERNEL);
2406 s->private = it;
2407 if (! it)
2408 return NULL;
2410 it->index = *pos;
2411 it->max = sg_last_dev();
2412 if (it->index >= it->max)
2413 return NULL;
2414 return it;
2417 static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos)
2419 struct sg_proc_deviter * it = s->private;
2421 *pos = ++it->index;
2422 return (it->index < it->max) ? it : NULL;
2425 static void dev_seq_stop(struct seq_file *s, void *v)
2427 kfree(s->private);
2430 static int sg_proc_open_dev(struct inode *inode, struct file *file)
2432 return seq_open(file, &dev_seq_ops);
2435 static int sg_proc_seq_show_dev(struct seq_file *s, void *v)
2437 struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
2438 Sg_device *sdp;
2439 struct scsi_device *scsidp;
2440 unsigned long iflags;
2442 read_lock_irqsave(&sg_index_lock, iflags);
2443 sdp = it ? sg_lookup_dev(it->index) : NULL;
2444 if (sdp && (scsidp = sdp->device) && (!sdp->detached))
2445 seq_printf(s, "%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\n",
2446 scsidp->host->host_no, scsidp->channel,
2447 scsidp->id, scsidp->lun, (int) scsidp->type,
2449 (int) scsidp->queue_depth,
2450 (int) scsidp->device_busy,
2451 (int) scsi_device_online(scsidp));
2452 else
2453 seq_printf(s, "-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\n");
2454 read_unlock_irqrestore(&sg_index_lock, iflags);
2455 return 0;
2458 static int sg_proc_open_devstrs(struct inode *inode, struct file *file)
2460 return seq_open(file, &devstrs_seq_ops);
2463 static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v)
2465 struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
2466 Sg_device *sdp;
2467 struct scsi_device *scsidp;
2468 unsigned long iflags;
2470 read_lock_irqsave(&sg_index_lock, iflags);
2471 sdp = it ? sg_lookup_dev(it->index) : NULL;
2472 if (sdp && (scsidp = sdp->device) && (!sdp->detached))
2473 seq_printf(s, "%8.8s\t%16.16s\t%4.4s\n",
2474 scsidp->vendor, scsidp->model, scsidp->rev);
2475 else
2476 seq_printf(s, "<no active device>\n");
2477 read_unlock_irqrestore(&sg_index_lock, iflags);
2478 return 0;
2481 /* must be called while holding sg_index_lock */
2482 static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
2484 int k, m, new_interface, blen, usg;
2485 Sg_request *srp;
2486 Sg_fd *fp;
2487 const sg_io_hdr_t *hp;
2488 const char * cp;
2489 unsigned int ms;
2491 k = 0;
2492 list_for_each_entry(fp, &sdp->sfds, sfd_siblings) {
2493 k++;
2494 read_lock(&fp->rq_list_lock); /* irqs already disabled */
2495 seq_printf(s, " FD(%d): timeout=%dms bufflen=%d "
2496 "(res)sgat=%d low_dma=%d\n", k,
2497 jiffies_to_msecs(fp->timeout),
2498 fp->reserve.bufflen,
2499 (int) fp->reserve.k_use_sg,
2500 (int) fp->low_dma);
2501 seq_printf(s, " cmd_q=%d f_packid=%d k_orphan=%d closed=%d\n",
2502 (int) fp->cmd_q, (int) fp->force_packid,
2503 (int) fp->keep_orphan, (int) fp->closed);
2504 for (m = 0, srp = fp->headrp;
2505 srp != NULL;
2506 ++m, srp = srp->nextrp) {
2507 hp = &srp->header;
2508 new_interface = (hp->interface_id == '\0') ? 0 : 1;
2509 if (srp->res_used) {
2510 if (new_interface &&
2511 (SG_FLAG_MMAP_IO & hp->flags))
2512 cp = " mmap>> ";
2513 else
2514 cp = " rb>> ";
2515 } else {
2516 if (SG_INFO_DIRECT_IO_MASK & hp->info)
2517 cp = " dio>> ";
2518 else
2519 cp = " ";
2521 seq_printf(s, cp);
2522 blen = srp->data.bufflen;
2523 usg = srp->data.k_use_sg;
2524 seq_printf(s, srp->done ?
2525 ((1 == srp->done) ? "rcv:" : "fin:")
2526 : "act:");
2527 seq_printf(s, " id=%d blen=%d",
2528 srp->header.pack_id, blen);
2529 if (srp->done)
2530 seq_printf(s, " dur=%d", hp->duration);
2531 else {
2532 ms = jiffies_to_msecs(jiffies);
2533 seq_printf(s, " t_o/elap=%d/%d",
2534 (new_interface ? hp->timeout :
2535 jiffies_to_msecs(fp->timeout)),
2536 (ms > hp->duration ? ms - hp->duration : 0));
2538 seq_printf(s, "ms sgat=%d op=0x%02x\n", usg,
2539 (int) srp->data.cmd_opcode);
2541 if (0 == m)
2542 seq_printf(s, " No requests active\n");
2543 read_unlock(&fp->rq_list_lock);
2547 static int sg_proc_open_debug(struct inode *inode, struct file *file)
2549 return seq_open(file, &debug_seq_ops);
2552 static int sg_proc_seq_show_debug(struct seq_file *s, void *v)
2554 struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
2555 Sg_device *sdp;
2556 unsigned long iflags;
2558 if (it && (0 == it->index)) {
2559 seq_printf(s, "max_active_device=%d(origin 1)\n",
2560 (int)it->max);
2561 seq_printf(s, " def_reserved_size=%d\n", sg_big_buff);
2564 read_lock_irqsave(&sg_index_lock, iflags);
2565 sdp = it ? sg_lookup_dev(it->index) : NULL;
2566 if (sdp && !list_empty(&sdp->sfds)) {
2567 struct scsi_device *scsidp = sdp->device;
2569 seq_printf(s, " >>> device=%s ", sdp->disk->disk_name);
2570 if (sdp->detached)
2571 seq_printf(s, "detached pending close ");
2572 else
2573 seq_printf
2574 (s, "scsi%d chan=%d id=%d lun=%d em=%d",
2575 scsidp->host->host_no,
2576 scsidp->channel, scsidp->id,
2577 scsidp->lun,
2578 scsidp->host->hostt->emulated);
2579 seq_printf(s, " sg_tablesize=%d excl=%d\n",
2580 sdp->sg_tablesize, sdp->exclude);
2581 sg_proc_debug_helper(s, sdp);
2583 read_unlock_irqrestore(&sg_index_lock, iflags);
2584 return 0;
2587 #endif /* CONFIG_SCSI_PROC_FS */
2589 module_init(init_sg);
2590 module_exit(exit_sg);