3 * Started: Aug 9 by Lawrence Foard (entropy@world.std.com),
4 * to allow user process control of SCSI devices.
5 * Development Sponsored by Killy Corp. NY NY
7 * Borrows code from st driver.
9 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/sched.h>
14 #include <linux/string.h>
16 #include <linux/errno.h>
17 #include <linux/mtio.h>
18 #include <linux/ioctl.h>
19 #include <linux/fcntl.h>
20 #include <linux/poll.h>
22 #include <asm/uaccess.h>
23 #include <asm/system.h>
25 #include <linux/blk.h>
28 #include <scsi/scsi_ioctl.h>
31 int sg_big_buff
= SG_BIG_BUFF
; /* for now, sg_big_buff is read-only through sysctl */
33 static int sg_init(void);
34 static int sg_attach(Scsi_Device
*);
35 static int sg_detect(Scsi_Device
*);
36 static void sg_detach(Scsi_Device
*);
39 struct Scsi_Device_Template sg_template
= {NULL
, NULL
, "sg", NULL
, 0xff,
40 SCSI_GENERIC_MAJOR
, 0, 0, 0, 0,
42 NULL
, sg_attach
, sg_detach
};
45 static char *big_buff
= NULL
;
46 static struct wait_queue
*big_wait
; /* wait for buffer available */
47 static int big_inuse
=0;
53 int users
; /* how many people have it open? */
54 struct wait_queue
*generic_wait
; /* wait for device to be available */
55 struct wait_queue
*read_wait
; /* wait for response */
56 struct wait_queue
*write_wait
; /* wait for free buffer */
57 int timeout
; /* current default value for device */
58 int buff_len
; /* length of current buffer */
59 char *buff
; /* the buffer */
60 struct sg_header header
; /* header of pending command */
61 char exclude
; /* opened for exclusive access */
62 char pending
; /* don't accept writes now */
63 char complete
; /* command complete allow a read */
66 static struct scsi_generic
*scsi_generics
=NULL
;
67 static void sg_free(char *buff
,int size
);
69 static int sg_ioctl(struct inode
* inode
,struct file
* file
,
70 unsigned int cmd_in
, unsigned long arg
)
72 int dev
= MINOR(inode
->i_rdev
);
75 if ((dev
<0) || (dev
>=sg_template
.dev_max
))
79 * If we are in the middle of error recovery, then don't allow any
80 * access to this device. Also, error recovery *may* have taken the
81 * device offline, in which case all further access is prohibited.
83 if( !scsi_block_when_processing_errors(scsi_generics
[dev
].device
) )
91 result
= verify_area(VERIFY_READ
, (const void *)arg
, sizeof(int));
92 if (result
) return result
;
94 get_user(scsi_generics
[dev
].timeout
, (int *) arg
);
97 return scsi_generics
[dev
].timeout
;
98 case SG_EMULATED_HOST
:
99 return put_user(scsi_generics
[dev
].device
->host
->hostt
->emulated
, (int *) arg
);
100 case SCSI_IOCTL_SEND_COMMAND
:
102 Allow SCSI_IOCTL_SEND_COMMAND without checking suser() since the
103 user already has read/write access to the generic device and so
104 can execute arbitrary SCSI commands.
106 return scsi_ioctl_send_command(scsi_generics
[dev
].device
, (void *) arg
);
108 return scsi_ioctl(scsi_generics
[dev
].device
, cmd_in
, (void *) arg
);
112 static int sg_open(struct inode
* inode
, struct file
* filp
)
114 int dev
=MINOR(inode
->i_rdev
);
115 int flags
=filp
->f_flags
;
116 if (dev
>=sg_template
.dev_max
|| !scsi_generics
[dev
].device
)
119 if( !scsi_block_when_processing_errors(scsi_generics
[dev
].device
) )
124 if (O_RDWR
!=(flags
& O_ACCMODE
))
128 * If we want exclusive access, then wait until the device is not
129 * busy, and then set the flag to prevent anyone else from using it.
133 while(scsi_generics
[dev
].users
)
135 if (flags
& O_NONBLOCK
)
137 interruptible_sleep_on(&scsi_generics
[dev
].generic_wait
);
138 if (signal_pending(current
))
141 scsi_generics
[dev
].exclude
=1;
145 * Wait until nobody has an exclusive open on
148 while(scsi_generics
[dev
].exclude
)
150 if (flags
& O_NONBLOCK
)
152 interruptible_sleep_on(&scsi_generics
[dev
].generic_wait
);
153 if (signal_pending(current
))
158 * OK, we should have grabbed the device. Mark the thing so
159 * that other processes know that we have it, and initialize the
160 * state variables to known values.
162 if (!scsi_generics
[dev
].users
163 && scsi_generics
[dev
].pending
164 && scsi_generics
[dev
].complete
)
166 if (scsi_generics
[dev
].buff
!= NULL
)
167 sg_free(scsi_generics
[dev
].buff
,scsi_generics
[dev
].buff_len
);
168 scsi_generics
[dev
].buff
=NULL
;
169 scsi_generics
[dev
].pending
=0;
171 if (!scsi_generics
[dev
].users
)
172 scsi_generics
[dev
].timeout
=SG_DEFAULT_TIMEOUT
;
173 if (scsi_generics
[dev
].device
->host
->hostt
->module
)
174 __MOD_INC_USE_COUNT(scsi_generics
[dev
].device
->host
->hostt
->module
);
175 if (sg_template
.module
)
176 __MOD_INC_USE_COUNT(sg_template
.module
);
177 scsi_generics
[dev
].users
++;
181 static int sg_close(struct inode
* inode
, struct file
* filp
)
183 int dev
=MINOR(inode
->i_rdev
);
184 scsi_generics
[dev
].users
--;
185 if (scsi_generics
[dev
].device
->host
->hostt
->module
)
186 __MOD_DEC_USE_COUNT(scsi_generics
[dev
].device
->host
->hostt
->module
);
187 if(sg_template
.module
)
188 __MOD_DEC_USE_COUNT(sg_template
.module
);
189 scsi_generics
[dev
].exclude
=0;
190 wake_up(&scsi_generics
[dev
].generic_wait
);
194 static char *sg_malloc(int size
)
197 return (char *) scsi_malloc(size
);
199 if (size
<=SG_BIG_BUFF
)
203 interruptible_sleep_on(&big_wait
);
204 if (signal_pending(current
))
214 static void sg_free(char *buff
,int size
)
224 scsi_free(buff
,size
);
228 * Read back the results of a previous command. We use the pending and
229 * complete semaphores to tell us whether the buffer is available for us
230 * and whether the command is actually done.
232 static ssize_t
sg_read(struct file
*filp
, char *buf
,
233 size_t count
, loff_t
*ppos
)
235 struct inode
*inode
= filp
->f_dentry
->d_inode
;
236 int dev
=MINOR(inode
->i_rdev
);
238 struct scsi_generic
*device
=&scsi_generics
[dev
];
241 * If we are in the middle of error recovery, don't let anyone
242 * else try and use this device. Also, if error recovery fails, it
243 * may try and take the device offline, in which case all further
244 * access to the device is prohibited.
246 if( !scsi_block_when_processing_errors(scsi_generics
[dev
].device
) )
251 if (ppos
!= &filp
->f_pos
) {
252 /* FIXME: Hmm. Seek to the right place, or fail? */
255 if ((i
=verify_area(VERIFY_WRITE
,buf
,count
)))
259 * Wait until the command is actually done.
261 while(!device
->pending
|| !device
->complete
)
263 if (filp
->f_flags
& O_NONBLOCK
)
267 interruptible_sleep_on(&device
->read_wait
);
268 if (signal_pending(current
))
275 * Now copy the result back to the user buffer.
277 device
->header
.pack_len
=device
->header
.reply_len
;
279 if (count
>=sizeof(struct sg_header
))
281 copy_to_user(buf
,&device
->header
,sizeof(struct sg_header
));
282 buf
+=sizeof(struct sg_header
);
283 if (count
>device
->header
.pack_len
)
284 count
=device
->header
.pack_len
;
285 if (count
> sizeof(struct sg_header
)) {
286 copy_to_user(buf
,device
->buff
,count
-sizeof(struct sg_header
));
290 count
= device
->header
.result
==0 ? 0 : -EIO
;
293 * Clean up, and release the device so that we can send another
296 sg_free(device
->buff
,device
->buff_len
);
299 wake_up(&device
->write_wait
);
304 * This function is called by the interrupt handler when we
305 * actually have a command that is complete. Change the
306 * flags to indicate that we have a result.
308 static void sg_command_done(Scsi_Cmnd
* SCpnt
)
310 int dev
= MINOR(SCpnt
->request
.rq_dev
);
311 struct scsi_generic
*device
= &scsi_generics
[dev
];
312 if (!device
->pending
)
314 printk("unexpected done for sg %d\n",dev
);
315 scsi_release_command(SCpnt
);
321 * See if the command completed normally, or whether something went
324 memcpy(device
->header
.sense_buffer
, SCpnt
->sense_buffer
,
325 sizeof(SCpnt
->sense_buffer
));
326 switch (host_byte(SCpnt
->result
)) {
328 device
->header
.result
= 0;
333 device
->header
.result
= EBUSY
;
340 device
->header
.result
= EIO
;
344 * There really should be DID_UNDERRUN and DID_OVERRUN error values,
345 * and a means for callers of scsi_do_cmd to indicate whether an
346 * underrun or overrun should signal an error. Until that can be
347 * implemented, this kludge allows for returning useful error values
348 * except in cases that return DID_ERROR that might be due to an
351 if (SCpnt
->sense_buffer
[0] == 0 &&
352 status_byte(SCpnt
->result
) == GOOD
)
353 device
->header
.result
= 0;
354 else device
->header
.result
= EIO
;
359 * Now wake up the process that is waiting for the
363 scsi_release_command(SCpnt
);
365 wake_up(&scsi_generics
[dev
].read_wait
);
368 static ssize_t
sg_write(struct file
*filp
, const char *buf
,
369 size_t count
, loff_t
*ppos
)
372 struct inode
*inode
= filp
->f_dentry
->d_inode
;
373 int bsize
,size
,amt
,i
;
374 unsigned char cmnd
[MAX_COMMAND_SIZE
];
375 kdev_t devt
= inode
->i_rdev
;
376 int dev
= MINOR(devt
);
377 struct scsi_generic
* device
=&scsi_generics
[dev
];
379 unsigned char opcode
;
383 * If we are in the middle of error recovery, don't let anyone
384 * else try and use this device. Also, if error recovery fails, it
385 * may try and take the device offline, in which case all further
386 * access to the device is prohibited.
388 if( !scsi_block_when_processing_errors(scsi_generics
[dev
].device
) )
393 if (ppos
!= &filp
->f_pos
) {
394 /* FIXME: Hmm. Seek to the right place, or fail? */
397 if ((i
=verify_area(VERIFY_READ
,buf
,count
)))
400 * The minimum scsi command length is 6 bytes. If we get anything
401 * less than this, it is clearly bogus.
403 if (count
<(sizeof(struct sg_header
) + 6))
407 * If we still have a result pending from a previous command,
408 * wait until the result has been read by the user before sending
411 while(device
->pending
)
413 if (filp
->f_flags
& O_NONBLOCK
)
416 printk("sg_write: sleeping on pending request\n");
418 interruptible_sleep_on(&device
->write_wait
);
419 if (signal_pending(current
))
424 * Mark the device flags for the new state.
428 copy_from_user(&device
->header
,buf
,sizeof(struct sg_header
));
430 device
->header
.pack_len
=count
;
431 buf
+=sizeof(struct sg_header
);
434 * Now we need to grab the command itself from the user's buffer.
436 get_user(opcode
, buf
);
437 size
=COMMAND_SIZE(opcode
);
438 if (opcode
>= 0xc0 && device
->header
.twelve_byte
) size
= 12;
441 * Determine buffer size.
443 input_size
= device
->header
.pack_len
- size
;
444 if( input_size
> device
->header
.reply_len
)
448 bsize
= device
->header
.reply_len
;
452 * Don't include the command header itself in the size.
454 bsize
-=sizeof(struct sg_header
);
455 input_size
-=sizeof(struct sg_header
);
458 * Verify that the user has actually passed enough bytes for this command.
463 wake_up( &device
->write_wait
);
468 * Allocate a buffer that is large enough to hold the data
469 * that has been requested. Round up to an even number of sectors,
470 * since scsi_malloc allocates in chunks of 512 bytes.
475 bsize
=(bsize
+511) & ~511;
478 * If we cannot allocate the buffer, report an error.
480 if ((bsize
<0) || !(device
->buff
=sg_malloc(device
->buff_len
=bsize
)))
483 wake_up(&device
->write_wait
);
488 printk("allocating device\n");
492 * Grab a device pointer for the device we want to talk to. If we
493 * don't want to block, just return with the appropriate message.
495 if (!(SCpnt
=scsi_allocate_device(NULL
,device
->device
, !(filp
->f_flags
& O_NONBLOCK
))))
498 wake_up(&device
->write_wait
);
499 sg_free(device
->buff
,device
->buff_len
);
504 printk("device allocated\n");
507 SCpnt
->request
.rq_dev
= devt
;
508 SCpnt
->request
.rq_status
= RQ_ACTIVE
;
509 SCpnt
->sense_buffer
[0]=0;
510 SCpnt
->cmd_len
= size
;
513 * Now copy the SCSI command from the user's address space.
515 copy_from_user(cmnd
,buf
,size
);
519 * If we are writing data, copy the data we are writing. The pack_len
520 * field also includes the length of the header and the command,
521 * so we need to subtract these off.
523 if (input_size
> 0) copy_from_user(device
->buff
, buf
, input_size
);
526 * Set the LUN field in the command structure.
528 cmnd
[1]= (cmnd
[1] & 0x1f) | (device
->device
->lun
<<5);
535 * Now pass the actual command down to the low-level driver. We
536 * do not do any more here - when the interrupt arrives, we will
537 * then do the post-processing.
539 spin_lock_irqsave(&io_request_lock
, flags
);
540 scsi_do_cmd (SCpnt
,(void *) cmnd
,
541 (void *) device
->buff
,amt
,
542 sg_command_done
,device
->timeout
,SG_DEFAULT_RETRIES
);
543 spin_unlock_irqrestore(&io_request_lock
, flags
);
546 printk("done cmd\n");
552 static unsigned int sg_poll(struct file
*file
, poll_table
* wait
)
554 int dev
= MINOR(file
->f_dentry
->d_inode
->i_rdev
);
555 struct scsi_generic
*device
= &scsi_generics
[dev
];
556 unsigned int mask
= 0;
558 poll_wait(file
, &scsi_generics
[dev
].read_wait
, wait
);
559 poll_wait(file
, &scsi_generics
[dev
].write_wait
, wait
);
560 if(device
->pending
&& device
->complete
)
561 mask
|= POLLIN
| POLLRDNORM
;
563 mask
|= POLLOUT
| POLLWRNORM
;
568 static struct file_operations sg_fops
= {
571 sg_write
, /* write */
574 sg_ioctl
, /* ioctl */
578 sg_close
, /* release */
583 static int sg_detect(Scsi_Device
* SDp
){
590 case TYPE_TAPE
: break;
592 printk("Detected scsi generic sg%c at scsi%d, channel %d, id %d, lun %d\n",
593 'a'+sg_template
.dev_noticed
,
594 SDp
->host
->host_no
, SDp
->channel
, SDp
->id
, SDp
->lun
);
596 sg_template
.dev_noticed
++;
600 /* Driver initialization */
603 static int sg_registered
= 0;
605 if (sg_template
.dev_noticed
== 0) return 0;
608 if (register_chrdev(SCSI_GENERIC_MAJOR
,"sg",&sg_fops
))
610 printk("Unable to get major %d for generic SCSI device\n",
617 /* If we have already been through here, return */
618 if(scsi_generics
) return 0;
621 printk("sg: Init generic device.\n");
625 big_buff
= (char *) scsi_init_malloc(SG_BIG_BUFF
, GFP_ATOMIC
| GFP_DMA
);
628 scsi_generics
= (struct scsi_generic
*)
629 scsi_init_malloc((sg_template
.dev_noticed
+ SG_EXTRA_DEVS
)
630 * sizeof(struct scsi_generic
), GFP_ATOMIC
);
631 memset(scsi_generics
, 0, (sg_template
.dev_noticed
+ SG_EXTRA_DEVS
)
632 * sizeof(struct scsi_generic
));
634 sg_template
.dev_max
= sg_template
.dev_noticed
+ SG_EXTRA_DEVS
;
638 static int sg_attach(Scsi_Device
* SDp
)
640 struct scsi_generic
* gpnt
;
643 if(sg_template
.nr_dev
>= sg_template
.dev_max
)
649 for(gpnt
= scsi_generics
, i
=0; i
<sg_template
.dev_max
; i
++, gpnt
++)
650 if(!gpnt
->device
) break;
652 if(i
>= sg_template
.dev_max
) panic ("scsi_devices corrupt (sg)");
654 scsi_generics
[i
].device
=SDp
;
655 scsi_generics
[i
].users
=0;
656 scsi_generics
[i
].generic_wait
=NULL
;
657 scsi_generics
[i
].read_wait
=NULL
;
658 scsi_generics
[i
].write_wait
=NULL
;
659 scsi_generics
[i
].buff
=NULL
;
660 scsi_generics
[i
].exclude
=0;
661 scsi_generics
[i
].pending
=0;
662 scsi_generics
[i
].timeout
=SG_DEFAULT_TIMEOUT
;
663 sg_template
.nr_dev
++;
669 static void sg_detach(Scsi_Device
* SDp
)
671 struct scsi_generic
* gpnt
;
674 for(gpnt
= scsi_generics
, i
=0; i
<sg_template
.dev_max
; i
++, gpnt
++)
675 if(gpnt
->device
== SDp
) {
678 sg_template
.nr_dev
--;
680 * avoid associated device /dev/sg? bying incremented
681 * each time module is inserted/removed , <dan@lectra.fr>
683 sg_template
.dev_noticed
--;
691 int init_module(void) {
692 sg_template
.module
= &__this_module
;
693 return scsi_register_module(MODULE_SCSI_DEV
, &sg_template
);
696 void cleanup_module( void)
698 scsi_unregister_module(MODULE_SCSI_DEV
, &sg_template
);
699 unregister_chrdev(SCSI_GENERIC_MAJOR
, "sg");
701 if(scsi_generics
!= NULL
) {
702 scsi_init_free((char *) scsi_generics
,
703 (sg_template
.dev_noticed
+ SG_EXTRA_DEVS
)
704 * sizeof(struct scsi_generic
));
706 sg_template
.dev_max
= 0;
709 scsi_init_free(big_buff
, SG_BIG_BUFF
);
715 * Overrides for Emacs so that we almost follow Linus's tabbing style.
716 * Emacs will notice this stuff at the end of the file and automatically
717 * adjust the settings for this buffer only. This must remain at the end
719 * ---------------------------------------------------------------------------
722 * c-brace-imaginary-offset: 0
724 * c-argdecl-indent: 4
726 * c-continued-statement-offset: 4
727 * c-continued-brace-offset: 0
728 * indent-tabs-mode: nil