2 * scsi_lib.c Copyright (C) 1999 Eric Youngdale
4 * SCSI queueing library.
5 * Initial versions: Eric Youngdale (eric@andante.org).
6 * Based upon conversations with large numbers
7 * of people at Linux Expo.
10 #include <linux/bio.h>
11 #include <linux/blkdev.h>
12 #include <linux/completion.h>
13 #include <linux/kernel.h>
14 #include <linux/mempool.h>
15 #include <linux/slab.h>
16 #include <linux/init.h>
17 #include <linux/pci.h>
19 #include <scsi/scsi.h>
20 #include <scsi/scsi_dbg.h>
21 #include <scsi/scsi_device.h>
22 #include <scsi/scsi_driver.h>
23 #include <scsi/scsi_eh.h>
24 #include <scsi/scsi_host.h>
25 #include <scsi/scsi_request.h>
27 #include "scsi_priv.h"
28 #include "scsi_logging.h"
31 #define SG_MEMPOOL_NR (sizeof(scsi_sg_pools)/sizeof(struct scsi_host_sg_pool))
32 #define SG_MEMPOOL_SIZE 32
34 struct scsi_host_sg_pool
{
41 #if (SCSI_MAX_PHYS_SEGMENTS < 32)
42 #error SCSI_MAX_PHYS_SEGMENTS is too small
45 #define SP(x) { x, "sgpool-" #x }
46 struct scsi_host_sg_pool scsi_sg_pools
[] = {
50 #if (SCSI_MAX_PHYS_SEGMENTS > 32)
52 #if (SCSI_MAX_PHYS_SEGMENTS > 64)
54 #if (SCSI_MAX_PHYS_SEGMENTS > 128)
56 #if (SCSI_MAX_PHYS_SEGMENTS > 256)
57 #error SCSI_MAX_PHYS_SEGMENTS is too large
67 * Function: scsi_insert_special_req()
69 * Purpose: Insert pre-formed request into request queue.
71 * Arguments: sreq - request that is ready to be queued.
72 * at_head - boolean. True if we should insert at head
73 * of queue, false if we should insert at tail.
75 * Lock status: Assumed that lock is not held upon entry.
79 * Notes: This function is called from character device and from
80 * ioctl types of functions where the caller knows exactly
81 * what SCSI command needs to be issued. The idea is that
82 * we merely inject the command into the queue (at the head
83 * for now), and then call the queue request function to actually
86 int scsi_insert_special_req(struct scsi_request
*sreq
, int at_head
)
89 * Because users of this function are apt to reuse requests with no
90 * modification, we have to sanitise the request flags here
92 sreq
->sr_request
->flags
&= ~REQ_DONTPREP
;
93 blk_insert_request(sreq
->sr_device
->request_queue
, sreq
->sr_request
,
99 * Function: scsi_queue_insert()
101 * Purpose: Insert a command in the midlevel queue.
103 * Arguments: cmd - command that we are adding to queue.
104 * reason - why we are inserting command to queue.
106 * Lock status: Assumed that lock is not held upon entry.
110 * Notes: We do this for one of two cases. Either the host is busy
111 * and it cannot accept any more commands for the time being,
112 * or the device returned QUEUE_FULL and can accept no more
114 * Notes: This could be called either from an interrupt context or a
115 * normal process context.
117 int scsi_queue_insert(struct scsi_cmnd
*cmd
, int reason
)
119 struct Scsi_Host
*host
= cmd
->device
->host
;
120 struct scsi_device
*device
= cmd
->device
;
123 printk("Inserting command %p into mlqueue\n", cmd
));
126 * We are inserting the command into the ml queue. First, we
127 * cancel the timer, so it doesn't time out.
129 scsi_delete_timer(cmd
);
132 * Next, set the appropriate busy bit for the device/host.
134 * If the host/device isn't busy, assume that something actually
135 * completed, and that we should be able to queue a command now.
137 * Note that the prior mid-layer assumption that any host could
138 * always queue at least one command is now broken. The mid-layer
139 * will implement a user specifiable stall (see
140 * scsi_host.max_host_blocked and scsi_device.max_device_blocked)
141 * if a command is requeued with no other commands outstanding
142 * either for the device or for the host.
144 if (reason
== SCSI_MLQUEUE_HOST_BUSY
)
145 host
->host_blocked
= host
->max_host_blocked
;
146 else if (reason
== SCSI_MLQUEUE_DEVICE_BUSY
)
147 device
->device_blocked
= device
->max_device_blocked
;
150 * Register the fact that we own the thing for now.
152 cmd
->state
= SCSI_STATE_MLQUEUE
;
153 cmd
->owner
= SCSI_OWNER_MIDLEVEL
;
156 * Decrement the counters, since these commands are no longer
157 * active on the host/device.
159 scsi_device_unbusy(device
);
162 * Insert this command at the head of the queue for it's device.
163 * It will go before all other commands that are already in the queue.
165 * NOTE: there is magic here about the way the queue is plugged if
166 * we have no outstanding commands.
168 * Although this *doesn't* plug the queue, it does call the request
169 * function. The SCSI request function detects the blocked condition
170 * and plugs the queue appropriately.
172 blk_insert_request(device
->request_queue
, cmd
->request
, 1, cmd
, 1);
177 * Function: scsi_do_req
179 * Purpose: Queue a SCSI request
181 * Arguments: sreq - command descriptor.
182 * cmnd - actual SCSI command to be performed.
183 * buffer - data buffer.
184 * bufflen - size of data buffer.
185 * done - completion function to be run.
186 * timeout - how long to let it run before timeout.
187 * retries - number of retries we allow.
189 * Lock status: No locks held upon entry.
193 * Notes: This function is only used for queueing requests for things
194 * like ioctls and character device requests - this is because
195 * we essentially just inject a request into the queue for the
198 * In order to support the scsi_device_quiesce function, we
199 * now inject requests on the *head* of the device queue
200 * rather than the tail.
202 void scsi_do_req(struct scsi_request
*sreq
, const void *cmnd
,
203 void *buffer
, unsigned bufflen
,
204 void (*done
)(struct scsi_cmnd
*),
205 int timeout
, int retries
)
208 * If the upper level driver is reusing these things, then
209 * we should release the low-level block now. Another one will
210 * be allocated later when this request is getting queued.
212 __scsi_release_request(sreq
);
215 * Our own function scsi_done (which marks the host as not busy,
216 * disables the timeout counter, etc) will be called by us or by the
217 * scsi_hosts[host].queuecommand() function needs to also call
218 * the completion function for the high level driver.
220 memcpy(sreq
->sr_cmnd
, cmnd
, sizeof(sreq
->sr_cmnd
));
221 sreq
->sr_bufflen
= bufflen
;
222 sreq
->sr_buffer
= buffer
;
223 sreq
->sr_allowed
= retries
;
224 sreq
->sr_done
= done
;
225 sreq
->sr_timeout_per_command
= timeout
;
227 if (sreq
->sr_cmd_len
== 0)
228 sreq
->sr_cmd_len
= COMMAND_SIZE(sreq
->sr_cmnd
[0]);
231 * head injection *required* here otherwise quiesce won't work
233 scsi_insert_special_req(sreq
, 1);
236 static void scsi_wait_done(struct scsi_cmnd
*cmd
)
238 struct request
*req
= cmd
->request
;
239 struct request_queue
*q
= cmd
->device
->request_queue
;
242 req
->rq_status
= RQ_SCSI_DONE
; /* Busy, but indicate request done */
244 spin_lock_irqsave(q
->queue_lock
, flags
);
245 if (blk_rq_tagged(req
))
246 blk_queue_end_tag(q
, req
);
247 spin_unlock_irqrestore(q
->queue_lock
, flags
);
250 complete(req
->waiting
);
253 void scsi_wait_req(struct scsi_request
*sreq
, const void *cmnd
, void *buffer
,
254 unsigned bufflen
, int timeout
, int retries
)
256 DECLARE_COMPLETION(wait
);
258 sreq
->sr_request
->waiting
= &wait
;
259 sreq
->sr_request
->rq_status
= RQ_SCSI_BUSY
;
260 scsi_do_req(sreq
, cmnd
, buffer
, bufflen
, scsi_wait_done
,
262 wait_for_completion(&wait
);
263 sreq
->sr_request
->waiting
= NULL
;
264 if (sreq
->sr_request
->rq_status
!= RQ_SCSI_DONE
)
265 sreq
->sr_result
|= (DRIVER_ERROR
<< 24);
267 __scsi_release_request(sreq
);
271 * Function: scsi_init_cmd_errh()
273 * Purpose: Initialize cmd fields related to error handling.
275 * Arguments: cmd - command that is ready to be queued.
279 * Notes: This function has the job of initializing a number of
280 * fields related to error handling. Typically this will
281 * be called once for each command, as required.
283 static int scsi_init_cmd_errh(struct scsi_cmnd
*cmd
)
285 cmd
->owner
= SCSI_OWNER_MIDLEVEL
;
286 cmd
->serial_number
= 0;
287 cmd
->serial_number_at_timeout
= 0;
288 cmd
->abort_reason
= 0;
290 memset(cmd
->sense_buffer
, 0, sizeof cmd
->sense_buffer
);
292 if (cmd
->cmd_len
== 0)
293 cmd
->cmd_len
= COMMAND_SIZE(cmd
->cmnd
[0]);
296 * We need saved copies of a number of fields - this is because
297 * error handling may need to overwrite these with different values
298 * to run different commands, and once error handling is complete,
299 * we will need to restore these values prior to running the actual
302 cmd
->old_use_sg
= cmd
->use_sg
;
303 cmd
->old_cmd_len
= cmd
->cmd_len
;
304 cmd
->sc_old_data_direction
= cmd
->sc_data_direction
;
305 cmd
->old_underflow
= cmd
->underflow
;
306 memcpy(cmd
->data_cmnd
, cmd
->cmnd
, sizeof(cmd
->cmnd
));
307 cmd
->buffer
= cmd
->request_buffer
;
308 cmd
->bufflen
= cmd
->request_bufflen
;
309 cmd
->internal_timeout
= NORMAL_TIMEOUT
;
310 cmd
->abort_reason
= 0;
316 * Function: scsi_setup_cmd_retry()
318 * Purpose: Restore the command state for a retry
320 * Arguments: cmd - command to be restored
324 * Notes: Immediately prior to retrying a command, we need
325 * to restore certain fields that we saved above.
327 void scsi_setup_cmd_retry(struct scsi_cmnd
*cmd
)
329 memcpy(cmd
->cmnd
, cmd
->data_cmnd
, sizeof(cmd
->data_cmnd
));
330 cmd
->request_buffer
= cmd
->buffer
;
331 cmd
->request_bufflen
= cmd
->bufflen
;
332 cmd
->use_sg
= cmd
->old_use_sg
;
333 cmd
->cmd_len
= cmd
->old_cmd_len
;
334 cmd
->sc_data_direction
= cmd
->sc_old_data_direction
;
335 cmd
->underflow
= cmd
->old_underflow
;
338 void scsi_device_unbusy(struct scsi_device
*sdev
)
340 struct Scsi_Host
*shost
= sdev
->host
;
343 spin_lock_irqsave(shost
->host_lock
, flags
);
345 if (unlikely(test_bit(SHOST_RECOVERY
, &shost
->shost_state
) &&
347 scsi_eh_wakeup(shost
);
348 spin_unlock(shost
->host_lock
);
349 spin_lock(&sdev
->sdev_lock
);
351 spin_unlock_irqrestore(&sdev
->sdev_lock
, flags
);
355 * Called for single_lun devices on IO completion. Clear starget_sdev_user,
356 * and call blk_run_queue for all the scsi_devices on the target -
357 * including current_sdev first.
359 * Called with *no* scsi locks held.
361 static void scsi_single_lun_run(struct scsi_device
*current_sdev
)
363 struct Scsi_Host
*shost
= current_sdev
->host
;
364 struct scsi_device
*sdev
, *tmp
;
367 spin_lock_irqsave(shost
->host_lock
, flags
);
368 current_sdev
->sdev_target
->starget_sdev_user
= NULL
;
369 spin_unlock_irqrestore(shost
->host_lock
, flags
);
372 * Call blk_run_queue for all LUNs on the target, starting with
373 * current_sdev. We race with others (to set starget_sdev_user),
374 * but in most cases, we will be first. Ideally, each LU on the
375 * target would get some limited time or requests on the target.
377 blk_run_queue(current_sdev
->request_queue
);
379 spin_lock_irqsave(shost
->host_lock
, flags
);
380 if (current_sdev
->sdev_target
->starget_sdev_user
)
382 list_for_each_entry_safe(sdev
, tmp
, ¤t_sdev
->same_target_siblings
,
383 same_target_siblings
) {
384 if (scsi_device_get(sdev
))
387 spin_unlock_irqrestore(shost
->host_lock
, flags
);
388 blk_run_queue(sdev
->request_queue
);
389 spin_lock_irqsave(shost
->host_lock
, flags
);
391 scsi_device_put(sdev
);
394 spin_unlock_irqrestore(shost
->host_lock
, flags
);
398 * Function: scsi_run_queue()
400 * Purpose: Select a proper request queue to serve next
402 * Arguments: q - last request's queue
406 * Notes: The previous command was completely finished, start
407 * a new one if possible.
409 static void scsi_run_queue(struct request_queue
*q
)
411 struct scsi_device
*sdev
= q
->queuedata
;
412 struct Scsi_Host
*shost
= sdev
->host
;
415 if (sdev
->single_lun
)
416 scsi_single_lun_run(sdev
);
418 spin_lock_irqsave(shost
->host_lock
, flags
);
419 while (!list_empty(&shost
->starved_list
) &&
420 !shost
->host_blocked
&& !shost
->host_self_blocked
&&
421 !((shost
->can_queue
> 0) &&
422 (shost
->host_busy
>= shost
->can_queue
))) {
424 * As long as shost is accepting commands and we have
425 * starved queues, call blk_run_queue. scsi_request_fn
426 * drops the queue_lock and can add us back to the
429 * host_lock protects the starved_list and starved_entry.
430 * scsi_request_fn must get the host_lock before checking
431 * or modifying starved_list or starved_entry.
433 sdev
= list_entry(shost
->starved_list
.next
,
434 struct scsi_device
, starved_entry
);
435 list_del_init(&sdev
->starved_entry
);
436 spin_unlock_irqrestore(shost
->host_lock
, flags
);
438 blk_run_queue(sdev
->request_queue
);
440 spin_lock_irqsave(shost
->host_lock
, flags
);
441 if (unlikely(!list_empty(&sdev
->starved_entry
)))
443 * sdev lost a race, and was put back on the
444 * starved list. This is unlikely but without this
445 * in theory we could loop forever.
449 spin_unlock_irqrestore(shost
->host_lock
, flags
);
455 * Function: scsi_requeue_command()
457 * Purpose: Handle post-processing of completed commands.
459 * Arguments: q - queue to operate on
460 * cmd - command that may need to be requeued.
464 * Notes: After command completion, there may be blocks left
465 * over which weren't finished by the previous command
466 * this can be for a number of reasons - the main one is
467 * I/O errors in the middle of the request, in which case
468 * we need to request the blocks that come after the bad
471 static void scsi_requeue_command(struct request_queue
*q
, struct scsi_cmnd
*cmd
)
473 cmd
->request
->flags
&= ~REQ_DONTPREP
;
474 blk_insert_request(q
, cmd
->request
, 1, cmd
, 1);
479 void scsi_next_command(struct scsi_cmnd
*cmd
)
481 struct request_queue
*q
= cmd
->device
->request_queue
;
483 scsi_put_command(cmd
);
487 void scsi_run_host_queues(struct Scsi_Host
*shost
)
489 struct scsi_device
*sdev
;
491 shost_for_each_device(sdev
, shost
)
492 scsi_run_queue(sdev
->request_queue
);
496 * Function: scsi_end_request()
498 * Purpose: Post-processing of completed commands called from interrupt
499 * handler or a bottom-half handler.
501 * Arguments: cmd - command that is complete.
502 * uptodate - 1 if I/O indicates success, 0 for I/O error.
503 * sectors - number of sectors we want to mark.
504 * requeue - indicates whether we should requeue leftovers.
505 * frequeue - indicates that if we release the command block
506 * that the queue request function should be called.
508 * Lock status: Assumed that lock is not held upon entry.
512 * Notes: This is called for block device requests in order to
513 * mark some number of sectors as complete.
515 * We are guaranteeing that the request queue will be goosed
516 * at some point during this call.
518 static struct scsi_cmnd
*scsi_end_request(struct scsi_cmnd
*cmd
, int uptodate
,
519 int bytes
, int requeue
)
521 request_queue_t
*q
= cmd
->device
->request_queue
;
522 struct request
*req
= cmd
->request
;
526 * If there are blocks left over at the end, set up the command
527 * to queue the remainder of them.
529 if (end_that_request_chunk(req
, uptodate
, bytes
)) {
530 int leftover
= (req
->hard_nr_sectors
<< 9);
532 if (blk_pc_request(req
))
533 leftover
= req
->data_len
;
535 /* kill remainder if no retrys */
536 if (!uptodate
&& blk_noretry_request(req
))
537 end_that_request_chunk(req
, 0, leftover
);
541 * Bleah. Leftovers again. Stick the
542 * leftovers in the front of the
543 * queue, and goose the queue again.
545 scsi_requeue_command(q
, cmd
);
551 add_disk_randomness(req
->rq_disk
);
553 spin_lock_irqsave(q
->queue_lock
, flags
);
554 if (blk_rq_tagged(req
))
555 blk_queue_end_tag(q
, req
);
556 end_that_request_last(req
);
557 spin_unlock_irqrestore(q
->queue_lock
, flags
);
560 * This will goose the queue request function at the end, so we don't
561 * need to worry about launching another command.
563 scsi_next_command(cmd
);
567 static struct scatterlist
*scsi_alloc_sgtable(struct scsi_cmnd
*cmd
, int gfp_mask
)
569 struct scsi_host_sg_pool
*sgp
;
570 struct scatterlist
*sgl
;
572 BUG_ON(!cmd
->use_sg
);
574 switch (cmd
->use_sg
) {
584 #if (SCSI_MAX_PHYS_SEGMENTS > 32)
588 #if (SCSI_MAX_PHYS_SEGMENTS > 64)
592 #if (SCSI_MAX_PHYS_SEGMENTS > 128)
603 sgp
= scsi_sg_pools
+ cmd
->sglist_len
;
604 sgl
= mempool_alloc(sgp
->pool
, gfp_mask
);
606 memset(sgl
, 0, sgp
->size
);
610 static void scsi_free_sgtable(struct scatterlist
*sgl
, int index
)
612 struct scsi_host_sg_pool
*sgp
;
614 BUG_ON(index
> SG_MEMPOOL_NR
);
616 sgp
= scsi_sg_pools
+ index
;
617 mempool_free(sgl
, sgp
->pool
);
621 * Function: scsi_release_buffers()
623 * Purpose: Completion processing for block device I/O requests.
625 * Arguments: cmd - command that we are bailing.
627 * Lock status: Assumed that no lock is held upon entry.
631 * Notes: In the event that an upper level driver rejects a
632 * command, we must release resources allocated during
633 * the __init_io() function. Primarily this would involve
634 * the scatter-gather table, and potentially any bounce
637 static void scsi_release_buffers(struct scsi_cmnd
*cmd
)
639 struct request
*req
= cmd
->request
;
642 * Free up any indirection buffers we allocated for DMA purposes.
645 scsi_free_sgtable(cmd
->request_buffer
, cmd
->sglist_len
);
646 else if (cmd
->request_buffer
!= req
->buffer
)
647 kfree(cmd
->request_buffer
);
650 * Zero these out. They now point to freed memory, and it is
651 * dangerous to hang onto the pointers.
655 cmd
->request_buffer
= NULL
;
656 cmd
->request_bufflen
= 0;
660 * Function: scsi_io_completion()
662 * Purpose: Completion processing for block device I/O requests.
664 * Arguments: cmd - command that is finished.
666 * Lock status: Assumed that no lock is held upon entry.
670 * Notes: This function is matched in terms of capabilities to
671 * the function that created the scatter-gather list.
672 * In other words, if there are no bounce buffers
673 * (the normal case for most drivers), we don't need
674 * the logic to deal with cleaning up afterwards.
676 * We must do one of several things here:
678 * a) Call scsi_end_request. This will finish off the
679 * specified number of sectors. If we are done, the
680 * command block will be released, and the queue
681 * function will be goosed. If we are not done, then
682 * scsi_end_request will directly goose the queue.
684 * b) We can just use scsi_requeue_command() here. This would
685 * be used if we just wanted to retry, for example.
687 void scsi_io_completion(struct scsi_cmnd
*cmd
, unsigned int good_bytes
,
688 unsigned int block_bytes
)
690 int result
= cmd
->result
;
691 int this_count
= cmd
->bufflen
;
692 request_queue_t
*q
= cmd
->device
->request_queue
;
693 struct request
*req
= cmd
->request
;
694 int clear_errors
= 1;
697 * Free up any indirection buffers we allocated for DMA purposes.
698 * For the case of a READ, we need to copy the data out of the
699 * bounce buffer and into the real buffer.
702 scsi_free_sgtable(cmd
->buffer
, cmd
->sglist_len
);
703 else if (cmd
->buffer
!= req
->buffer
) {
704 if (rq_data_dir(req
) == READ
) {
706 char *to
= bio_kmap_irq(req
->bio
, &flags
);
707 memcpy(to
, cmd
->buffer
, cmd
->bufflen
);
708 bio_kunmap_irq(to
, &flags
);
713 if (blk_pc_request(req
)) { /* SG_IO ioctl from block level */
714 req
->errors
= (driver_byte(result
) & DRIVER_SENSE
) ?
715 (CHECK_CONDITION
<< 1) : (result
& 0xff);
718 if (cmd
->sense_buffer
[0] & 0x70) {
719 int len
= 8 + cmd
->sense_buffer
[7];
721 if (len
> SCSI_SENSE_BUFFERSIZE
)
722 len
= SCSI_SENSE_BUFFERSIZE
;
723 memcpy(req
->sense
, cmd
->sense_buffer
, len
);
724 req
->sense_len
= len
;
727 req
->data_len
-= cmd
->bufflen
;
731 * Zero these out. They now point to freed memory, and it is
732 * dangerous to hang onto the pointers.
736 cmd
->request_buffer
= NULL
;
737 cmd
->request_bufflen
= 0;
740 * Next deal with any sectors which we were able to correctly
743 if (good_bytes
>= 0) {
744 SCSI_LOG_HLCOMPLETE(1, printk("%ld sectors total, %d bytes done.\n",
745 req
->nr_sectors
, good_bytes
));
746 SCSI_LOG_HLCOMPLETE(1, printk("use_sg is %d\n", cmd
->use_sg
));
751 * If multiple sectors are requested in one buffer, then
752 * they will have been finished off by the first command.
753 * If not, then we have a multi-buffer command.
755 * If block_bytes != 0, it means we had a medium error
756 * of some sort, and that we want to mark some number of
757 * sectors as not uptodate. Thus we want to inhibit
758 * requeueing right here - we will requeue down below
759 * when we handle the bad sectors.
761 cmd
= scsi_end_request(cmd
, 1, good_bytes
, result
== 0);
764 * If the command completed without error, then either finish off the
765 * rest of the command, or start a new one.
767 if (result
== 0 || cmd
== NULL
) {
772 * Now, if we were good little boys and girls, Santa left us a request
773 * sense buffer. We can extract information from this, so we
774 * can choose a block to remap, etc.
776 if (driver_byte(result
) != 0) {
777 if ((cmd
->sense_buffer
[0] & 0x7f) == 0x70) {
779 * If the device is in the process of becoming ready,
782 if (cmd
->sense_buffer
[12] == 0x04 &&
783 cmd
->sense_buffer
[13] == 0x01) {
784 scsi_requeue_command(q
, cmd
);
787 if ((cmd
->sense_buffer
[2] & 0xf) == UNIT_ATTENTION
) {
788 if (cmd
->device
->removable
) {
789 /* detected disc change. set a bit
790 * and quietly refuse further access.
792 cmd
->device
->changed
= 1;
793 cmd
= scsi_end_request(cmd
, 0,
798 * Must have been a power glitch, or a
799 * bus reset. Could not have been a
800 * media change, so we just retry the
801 * request and see what happens.
803 scsi_requeue_command(q
, cmd
);
809 * If we had an ILLEGAL REQUEST returned, then we may have
810 * performed an unsupported command. The only thing this
811 * should be would be a ten byte read where only a six byte
812 * read was supported. Also, on a system where READ CAPACITY
813 * failed, we may have read past the end of the disk.
816 switch (cmd
->sense_buffer
[2]) {
817 case ILLEGAL_REQUEST
:
818 if (cmd
->device
->use_10_for_rw
&&
819 (cmd
->cmnd
[0] == READ_10
||
820 cmd
->cmnd
[0] == WRITE_10
)) {
821 cmd
->device
->use_10_for_rw
= 0;
823 * This will cause a retry with a 6-byte
826 scsi_requeue_command(q
, cmd
);
829 cmd
= scsi_end_request(cmd
, 0, this_count
, 1);
834 printk(KERN_INFO
"Device %s not ready.\n",
835 req
->rq_disk
? req
->rq_disk
->disk_name
: "");
836 cmd
= scsi_end_request(cmd
, 0, this_count
, 1);
840 case VOLUME_OVERFLOW
:
841 printk("scsi%d: ERROR on channel %d, id %d, lun %d, CDB: ",
842 cmd
->device
->host
->host_no
, (int) cmd
->device
->channel
,
843 (int) cmd
->device
->id
, (int) cmd
->device
->lun
);
844 __scsi_print_command(cmd
->data_cmnd
);
845 scsi_print_sense("", cmd
);
846 cmd
= scsi_end_request(cmd
, 0, block_bytes
, 1);
851 } /* driver byte != 0 */
852 if (host_byte(result
) == DID_RESET
) {
854 * Third party bus reset or reset for error
855 * recovery reasons. Just retry the request
856 * and see what happens.
858 scsi_requeue_command(q
, cmd
);
862 printk("SCSI error : <%d %d %d %d> return code = 0x%x\n",
863 cmd
->device
->host
->host_no
,
864 cmd
->device
->channel
,
866 cmd
->device
->lun
, result
);
868 if (driver_byte(result
) & DRIVER_SENSE
)
869 scsi_print_sense("", cmd
);
871 * Mark a single buffer as not uptodate. Queue the remainder.
872 * We sometimes get this cruft in the event that a medium error
873 * isn't properly reported.
875 block_bytes
= req
->hard_cur_sectors
<< 9;
877 block_bytes
= req
->data_len
;
878 cmd
= scsi_end_request(cmd
, 0, block_bytes
, 1);
883 * Function: scsi_init_io()
885 * Purpose: SCSI I/O initialize function.
887 * Arguments: cmd - Command descriptor we wish to initialize
889 * Returns: 0 on success
890 * BLKPREP_DEFER if the failure is retryable
891 * BLKPREP_KILL if the failure is fatal
893 static int scsi_init_io(struct scsi_cmnd
*cmd
)
895 struct request
*req
= cmd
->request
;
896 struct scatterlist
*sgpnt
;
900 * if this is a rq->data based REQ_BLOCK_PC, setup for a non-sg xfer
902 if ((req
->flags
& REQ_BLOCK_PC
) && !req
->bio
) {
903 cmd
->request_bufflen
= req
->data_len
;
904 cmd
->request_buffer
= req
->data
;
905 req
->buffer
= req
->data
;
911 * we used to not use scatter-gather for single segment request,
912 * but now we do (it makes highmem I/O easier to support without
915 cmd
->use_sg
= req
->nr_phys_segments
;
918 * if sg table allocation fails, requeue request later.
920 sgpnt
= scsi_alloc_sgtable(cmd
, GFP_ATOMIC
);
921 if (unlikely(!sgpnt
)) {
922 req
->flags
|= REQ_SPECIAL
;
923 return BLKPREP_DEFER
;
926 cmd
->request_buffer
= (char *) sgpnt
;
927 cmd
->request_bufflen
= req
->nr_sectors
<< 9;
928 if (blk_pc_request(req
))
929 cmd
->request_bufflen
= req
->data_len
;
933 * Next, walk the list, and fill in the addresses and sizes of
936 count
= blk_rq_map_sg(req
->q
, req
, cmd
->request_buffer
);
939 * mapped well, send it off
941 if (likely(count
<= cmd
->use_sg
)) {
946 printk(KERN_ERR
"Incorrect number of segments after building list\n");
947 printk(KERN_ERR
"counted %d, received %d\n", count
, cmd
->use_sg
);
948 printk(KERN_ERR
"req nr_sec %lu, cur_nr_sec %u\n", req
->nr_sectors
,
949 req
->current_nr_sectors
);
951 /* release the command and kill it */
952 scsi_release_buffers(cmd
);
953 scsi_put_command(cmd
);
957 static int scsi_issue_flush_fn(request_queue_t
*q
, struct gendisk
*disk
,
958 sector_t
*error_sector
)
960 struct scsi_device
*sdev
= q
->queuedata
;
961 struct scsi_driver
*drv
;
963 if (sdev
->sdev_state
!= SDEV_RUNNING
)
966 drv
= *(struct scsi_driver
**) disk
->private_data
;
967 if (drv
->issue_flush
)
968 return drv
->issue_flush(&sdev
->sdev_gendev
, error_sector
);
973 static int scsi_prep_fn(struct request_queue
*q
, struct request
*req
)
975 struct scsi_device
*sdev
= q
->queuedata
;
976 struct scsi_cmnd
*cmd
;
977 int specials_only
= 0;
980 * Just check to see if the device is online. If it isn't, we
981 * refuse to process any commands. The device must be brought
982 * online before trying any recovery commands
984 if (unlikely(!scsi_device_online(sdev
))) {
985 printk(KERN_ERR
"scsi%d (%d:%d): rejecting I/O to offline device\n",
986 sdev
->host
->host_no
, sdev
->id
, sdev
->lun
);
989 if (unlikely(sdev
->sdev_state
!= SDEV_RUNNING
)) {
990 /* OK, we're not in a running state don't prep
992 if (sdev
->sdev_state
== SDEV_DEL
) {
993 /* Device is fully deleted, no commands
994 * at all allowed down */
995 printk(KERN_ERR
"scsi%d (%d:%d): rejecting I/O to dead device\n",
996 sdev
->host
->host_no
, sdev
->id
, sdev
->lun
);
999 /* OK, we only allow special commands (i.e. not
1000 * user initiated ones */
1001 specials_only
= sdev
->sdev_state
;
1005 * Find the actual device driver associated with this command.
1006 * The SPECIAL requests are things like character device or
1007 * ioctls, which did not originate from ll_rw_blk. Note that
1008 * the special field is also used to indicate the cmd for
1009 * the remainder of a partially fulfilled request that can
1010 * come up when there is a medium error. We have to treat
1011 * these two cases differently. We differentiate by looking
1012 * at request->cmd, as this tells us the real story.
1014 if (req
->flags
& REQ_SPECIAL
) {
1015 struct scsi_request
*sreq
= req
->special
;
1017 if (sreq
->sr_magic
== SCSI_REQ_MAGIC
) {
1018 cmd
= scsi_get_command(sreq
->sr_device
, GFP_ATOMIC
);
1021 scsi_init_cmd_from_req(cmd
, sreq
);
1024 } else if (req
->flags
& (REQ_CMD
| REQ_BLOCK_PC
)) {
1026 if(unlikely(specials_only
)) {
1027 if(specials_only
== SDEV_QUIESCE
)
1028 return BLKPREP_DEFER
;
1030 printk(KERN_ERR
"scsi%d (%d:%d): rejecting I/O to device being removed\n",
1031 sdev
->host
->host_no
, sdev
->id
, sdev
->lun
);
1032 return BLKPREP_KILL
;
1037 * Now try and find a command block that we can use.
1039 if (!req
->special
) {
1040 cmd
= scsi_get_command(sdev
, GFP_ATOMIC
);
1046 /* pull a tag out of the request if we have one */
1047 cmd
->tag
= req
->tag
;
1049 blk_dump_rq_flags(req
, "SCSI bad req");
1050 return BLKPREP_KILL
;
1053 /* note the overloading of req->special. When the tag
1054 * is active it always means cmd. If the tag goes
1055 * back for re-queueing, it may be reset */
1060 * FIXME: drop the lock here because the functions below
1061 * expect to be called without the queue lock held. Also,
1062 * previously, we dequeued the request before dropping the
1063 * lock. We hope REQ_STARTED prevents anything untoward from
1066 if (req
->flags
& (REQ_CMD
| REQ_BLOCK_PC
)) {
1067 struct scsi_driver
*drv
;
1071 * This will do a couple of things:
1072 * 1) Fill in the actual SCSI command.
1073 * 2) Fill in any other upper-level specific fields
1076 * If this returns 0, it means that the request failed
1077 * (reading past end of disk, reading offline device,
1078 * etc). This won't actually talk to the device, but
1079 * some kinds of consistency checking may cause the
1080 * request to be rejected immediately.
1084 * This sets up the scatter-gather table (allocating if
1087 ret
= scsi_init_io(cmd
);
1088 if (ret
) /* BLKPREP_KILL return also releases the command */
1092 * Initialize the actual SCSI command for this request.
1094 drv
= *(struct scsi_driver
**)req
->rq_disk
->private_data
;
1095 if (unlikely(!drv
->init_command(cmd
))) {
1096 scsi_release_buffers(cmd
);
1097 scsi_put_command(cmd
);
1098 return BLKPREP_KILL
;
1103 * The request is now prepped, no need to come back here
1105 req
->flags
|= REQ_DONTPREP
;
1109 /* If we defer, the elv_next_request() returns NULL, but the
1110 * queue must be restarted, so we plug here if no returning
1111 * command will automatically do that. */
1112 if (sdev
->device_busy
== 0)
1114 return BLKPREP_DEFER
;
1118 * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else
1121 * Called with the queue_lock held.
1123 static inline int scsi_dev_queue_ready(struct request_queue
*q
,
1124 struct scsi_device
*sdev
)
1126 if (sdev
->device_busy
>= sdev
->queue_depth
)
1128 if (sdev
->device_busy
== 0 && sdev
->device_blocked
) {
1130 * unblock after device_blocked iterates to zero
1132 if (--sdev
->device_blocked
== 0) {
1134 printk("scsi%d (%d:%d) unblocking device at"
1135 " zero depth\n", sdev
->host
->host_no
,
1136 sdev
->id
, sdev
->lun
));
1142 if (sdev
->device_blocked
)
1149 * scsi_host_queue_ready: if we can send requests to shost, return 1 else
1150 * return 0. We must end up running the queue again whenever 0 is
1151 * returned, else IO can hang.
1153 * Called with host_lock held.
1155 static inline int scsi_host_queue_ready(struct request_queue
*q
,
1156 struct Scsi_Host
*shost
,
1157 struct scsi_device
*sdev
)
1159 if (test_bit(SHOST_RECOVERY
, &shost
->shost_state
))
1161 if (shost
->host_busy
== 0 && shost
->host_blocked
) {
1163 * unblock after host_blocked iterates to zero
1165 if (--shost
->host_blocked
== 0) {
1167 printk("scsi%d unblocking host at zero depth\n",
1174 if ((shost
->can_queue
> 0 && shost
->host_busy
>= shost
->can_queue
) ||
1175 shost
->host_blocked
|| shost
->host_self_blocked
) {
1176 if (list_empty(&sdev
->starved_entry
))
1177 list_add_tail(&sdev
->starved_entry
, &shost
->starved_list
);
1181 /* We're OK to process the command, so we can't be starved */
1182 if (!list_empty(&sdev
->starved_entry
))
1183 list_del_init(&sdev
->starved_entry
);
1189 * Function: scsi_request_fn()
1191 * Purpose: Main strategy routine for SCSI.
1193 * Arguments: q - Pointer to actual queue.
1197 * Lock status: IO request lock assumed to be held when called.
1199 static void scsi_request_fn(struct request_queue
*q
)
1201 struct scsi_device
*sdev
= q
->queuedata
;
1202 struct Scsi_Host
*shost
= sdev
->host
;
1203 struct scsi_cmnd
*cmd
;
1204 struct request
*req
;
1206 if(!get_device(&sdev
->sdev_gendev
))
1207 /* We must be tearing the block queue down already */
1211 * To start with, we keep looping until the queue is empty, or until
1212 * the host is no longer able to accept any more requests.
1214 while (!blk_queue_plugged(q
)) {
1217 * get next queueable request. We do this early to make sure
1218 * that the request is fully prepared even if we cannot
1221 req
= elv_next_request(q
);
1222 if (!req
|| !scsi_dev_queue_ready(q
, sdev
))
1225 if (unlikely(!scsi_device_online(sdev
))) {
1226 printk(KERN_ERR
"scsi%d (%d:%d): rejecting I/O to offline device\n",
1227 sdev
->host
->host_no
, sdev
->id
, sdev
->lun
);
1228 blkdev_dequeue_request(req
);
1229 req
->flags
|= REQ_QUIET
;
1230 while (end_that_request_first(req
, 0, req
->nr_sectors
))
1232 end_that_request_last(req
);
1238 * Remove the request from the request list.
1240 if (!(blk_queue_tagged(q
) && !blk_queue_start_tag(q
, req
)))
1241 blkdev_dequeue_request(req
);
1242 sdev
->device_busy
++;
1244 spin_unlock(q
->queue_lock
);
1245 spin_lock(shost
->host_lock
);
1247 if (!scsi_host_queue_ready(q
, shost
, sdev
))
1249 if (sdev
->single_lun
) {
1250 if (sdev
->sdev_target
->starget_sdev_user
&&
1251 sdev
->sdev_target
->starget_sdev_user
!= sdev
)
1253 sdev
->sdev_target
->starget_sdev_user
= sdev
;
1258 * XXX(hch): This is rather suboptimal, scsi_dispatch_cmd will
1259 * take the lock again.
1261 spin_unlock_irq(shost
->host_lock
);
1264 if (unlikely(cmd
== NULL
)) {
1265 printk(KERN_CRIT
"impossible request in %s.\n"
1266 "please mail a stack trace to "
1267 "linux-scsi@vger.kernel.org",
1273 * Finally, initialize any error handling parameters, and set up
1274 * the timers for timeouts.
1276 scsi_init_cmd_errh(cmd
);
1279 * Dispatch the command to the low-level driver.
1281 rtn
= scsi_dispatch_cmd(cmd
);
1282 spin_lock_irq(q
->queue_lock
);
1284 /* we're refusing the command; because of
1285 * the way locks get dropped, we need to
1286 * check here if plugging is required */
1287 if(sdev
->device_busy
== 0)
1297 spin_unlock_irq(shost
->host_lock
);
1300 * lock q, handle tag, requeue req, and decrement device_busy. We
1301 * must return with queue_lock held.
1303 * Decrementing device_busy without checking it is OK, as all such
1304 * cases (host limits or settings) should run the queue at some
1307 spin_lock_irq(q
->queue_lock
);
1308 blk_requeue_request(q
, req
);
1309 sdev
->device_busy
--;
1310 if(sdev
->device_busy
== 0)
1313 /* must be careful here...if we trigger the ->remove() function
1314 * we cannot be holding the q lock */
1315 spin_unlock_irq(q
->queue_lock
);
1316 put_device(&sdev
->sdev_gendev
);
1317 spin_lock_irq(q
->queue_lock
);
1320 u64
scsi_calculate_bounce_limit(struct Scsi_Host
*shost
)
1322 struct device
*host_dev
;
1324 if (shost
->unchecked_isa_dma
)
1325 return BLK_BOUNCE_ISA
;
1327 host_dev
= scsi_get_device(shost
);
1328 if (PCI_DMA_BUS_IS_PHYS
&& host_dev
&& host_dev
->dma_mask
)
1329 return *host_dev
->dma_mask
;
1332 * Platforms with virtual-DMA translation
1333 * hardware have no practical limit.
1335 return BLK_BOUNCE_ANY
;
1338 struct request_queue
*scsi_alloc_queue(struct scsi_device
*sdev
)
1340 struct Scsi_Host
*shost
= sdev
->host
;
1341 struct request_queue
*q
;
1343 q
= blk_init_queue(scsi_request_fn
, &sdev
->sdev_lock
);
1347 blk_queue_prep_rq(q
, scsi_prep_fn
);
1349 blk_queue_max_hw_segments(q
, shost
->sg_tablesize
);
1350 blk_queue_max_phys_segments(q
, SCSI_MAX_PHYS_SEGMENTS
);
1351 blk_queue_max_sectors(q
, shost
->max_sectors
);
1352 blk_queue_bounce_limit(q
, scsi_calculate_bounce_limit(shost
));
1353 blk_queue_segment_boundary(q
, shost
->dma_boundary
);
1354 blk_queue_issue_flush_fn(q
, scsi_issue_flush_fn
);
1356 if (!shost
->use_clustering
)
1357 clear_bit(QUEUE_FLAG_CLUSTER
, &q
->queue_flags
);
1361 void scsi_free_queue(struct request_queue
*q
)
1363 blk_cleanup_queue(q
);
1367 * Function: scsi_block_requests()
1369 * Purpose: Utility function used by low-level drivers to prevent further
1370 * commands from being queued to the device.
1372 * Arguments: shost - Host in question
1376 * Lock status: No locks are assumed held.
1378 * Notes: There is no timer nor any other means by which the requests
1379 * get unblocked other than the low-level driver calling
1380 * scsi_unblock_requests().
1382 void scsi_block_requests(struct Scsi_Host
*shost
)
1384 shost
->host_self_blocked
= 1;
1388 * Function: scsi_unblock_requests()
1390 * Purpose: Utility function used by low-level drivers to allow further
1391 * commands from being queued to the device.
1393 * Arguments: shost - Host in question
1397 * Lock status: No locks are assumed held.
1399 * Notes: There is no timer nor any other means by which the requests
1400 * get unblocked other than the low-level driver calling
1401 * scsi_unblock_requests().
1403 * This is done as an API function so that changes to the
1404 * internals of the scsi mid-layer won't require wholesale
1405 * changes to drivers that use this feature.
1407 void scsi_unblock_requests(struct Scsi_Host
*shost
)
1409 shost
->host_self_blocked
= 0;
1410 scsi_run_host_queues(shost
);
1413 int __init
scsi_init_queue(void)
1417 for (i
= 0; i
< SG_MEMPOOL_NR
; i
++) {
1418 struct scsi_host_sg_pool
*sgp
= scsi_sg_pools
+ i
;
1419 int size
= sgp
->size
* sizeof(struct scatterlist
);
1421 sgp
->slab
= kmem_cache_create(sgp
->name
, size
, 0,
1422 SLAB_HWCACHE_ALIGN
, NULL
, NULL
);
1424 printk(KERN_ERR
"SCSI: can't init sg slab %s\n",
1428 sgp
->pool
= mempool_create(SG_MEMPOOL_SIZE
,
1429 mempool_alloc_slab
, mempool_free_slab
,
1432 printk(KERN_ERR
"SCSI: can't init sg mempool %s\n",
1440 void scsi_exit_queue(void)
1444 for (i
= 0; i
< SG_MEMPOOL_NR
; i
++) {
1445 struct scsi_host_sg_pool
*sgp
= scsi_sg_pools
+ i
;
1446 mempool_destroy(sgp
->pool
);
1447 kmem_cache_destroy(sgp
->slab
);
1451 * __scsi_mode_sense - issue a mode sense, falling back from 10 to
1452 * six bytes if necessary.
1453 * @sreq: SCSI request to fill in with the MODE_SENSE
1454 * @dbd: set if mode sense will allow block descriptors to be returned
1455 * @modepage: mode page being requested
1456 * @buffer: request buffer (may not be smaller than eight bytes)
1457 * @len: length of request buffer.
1458 * @timeout: command timeout
1459 * @retries: number of retries before failing
1460 * @data: returns a structure abstracting the mode header data
1462 * Returns zero if unsuccessful, or the header offset (either 4
1463 * or 8 depending on whether a six or ten byte command was
1464 * issued) if successful.
1467 __scsi_mode_sense(struct scsi_request
*sreq
, int dbd
, int modepage
,
1468 unsigned char *buffer
, int len
, int timeout
, int retries
,
1469 struct scsi_mode_data
*data
) {
1470 unsigned char cmd
[12];
1474 memset(data
, 0, sizeof(*data
));
1475 memset(&cmd
[0], 0, 12);
1476 cmd
[1] = dbd
& 0x18; /* allows DBD and LLBA bits */
1480 use_10_for_ms
= sreq
->sr_device
->use_10_for_ms
;
1482 if (use_10_for_ms
) {
1486 cmd
[0] = MODE_SENSE_10
;
1493 cmd
[0] = MODE_SENSE
;
1498 sreq
->sr_cmd_len
= 0;
1499 sreq
->sr_sense_buffer
[0] = 0;
1500 sreq
->sr_sense_buffer
[2] = 0;
1501 sreq
->sr_data_direction
= DMA_FROM_DEVICE
;
1503 memset(buffer
, 0, len
);
1505 scsi_wait_req(sreq
, cmd
, buffer
, len
, timeout
, retries
);
1507 /* This code looks awful: what it's doing is making sure an
1508 * ILLEGAL REQUEST sense return identifies the actual command
1509 * byte as the problem. MODE_SENSE commands can return
1510 * ILLEGAL REQUEST if the code page isn't supported */
1511 if (use_10_for_ms
&& ! scsi_status_is_good(sreq
->sr_result
) &&
1512 (driver_byte(sreq
->sr_result
) & DRIVER_SENSE
) &&
1513 sreq
->sr_sense_buffer
[2] == ILLEGAL_REQUEST
&&
1514 (sreq
->sr_sense_buffer
[4] & 0x40) == 0x40 &&
1515 sreq
->sr_sense_buffer
[5] == 0 &&
1516 sreq
->sr_sense_buffer
[6] == 0 ) {
1517 sreq
->sr_device
->use_10_for_ms
= 0;
1521 if(scsi_status_is_good(sreq
->sr_result
)) {
1522 data
->header_length
= header_length
;
1524 data
->length
= buffer
[0]*256 + buffer
[1] + 2;
1525 data
->medium_type
= buffer
[2];
1526 data
->device_specific
= buffer
[3];
1527 data
->longlba
= buffer
[4] & 0x01;
1528 data
->block_descriptor_length
= buffer
[6]*256
1531 data
->length
= buffer
[0] + 1;
1532 data
->medium_type
= buffer
[1];
1533 data
->device_specific
= buffer
[2];
1534 data
->block_descriptor_length
= buffer
[3];
1538 return sreq
->sr_result
;
1542 * scsi_mode_sense - issue a mode sense, falling back from 10 to
1543 * six bytes if necessary.
1544 * @sdev: scsi device to send command to.
1545 * @dbd: set if mode sense will disable block descriptors in the return
1546 * @modepage: mode page being requested
1547 * @buffer: request buffer (may not be smaller than eight bytes)
1548 * @len: length of request buffer.
1549 * @timeout: command timeout
1550 * @retries: number of retries before failing
1552 * Returns zero if unsuccessful, or the header offset (either 4
1553 * or 8 depending on whether a six or ten byte command was
1554 * issued) if successful.
1557 scsi_mode_sense(struct scsi_device
*sdev
, int dbd
, int modepage
,
1558 unsigned char *buffer
, int len
, int timeout
, int retries
,
1559 struct scsi_mode_data
*data
)
1561 struct scsi_request
*sreq
= scsi_allocate_request(sdev
, GFP_KERNEL
);
1567 ret
= __scsi_mode_sense(sreq
, dbd
, modepage
, buffer
, len
,
1568 timeout
, retries
, data
);
1570 scsi_release_request(sreq
);
1576 scsi_test_unit_ready(struct scsi_device
*sdev
, int timeout
, int retries
)
1578 struct scsi_request
*sreq
;
1580 TEST_UNIT_READY
, 0, 0, 0, 0, 0,
1584 sreq
= scsi_allocate_request(sdev
, GFP_KERNEL
);
1588 sreq
->sr_data_direction
= DMA_NONE
;
1589 scsi_wait_req(sreq
, cmd
, NULL
, 0, timeout
, retries
);
1591 if ((driver_byte(sreq
->sr_result
) & DRIVER_SENSE
) &&
1592 (sreq
->sr_sense_buffer
[2] & 0x0f) == UNIT_ATTENTION
&&
1595 sreq
->sr_result
= 0;
1597 result
= sreq
->sr_result
;
1598 scsi_release_request(sreq
);
1601 EXPORT_SYMBOL(scsi_test_unit_ready
);
1604 * scsi_device_set_state - Take the given device through the device
1606 * @sdev: scsi device to change the state of.
1607 * @state: state to change to.
1609 * Returns zero if unsuccessful or an error if the requested
1610 * transition is illegal.
1613 scsi_device_set_state(struct scsi_device
*sdev
, enum scsi_device_state state
)
1615 enum scsi_device_state oldstate
= sdev
->sdev_state
;
1617 if (state
== oldstate
)
1622 /* There are no legal states that come back to
1623 * created. This is the manually initialised start
1680 sdev
->sdev_state
= state
;
1684 dev_printk(KERN_ERR
, &sdev
->sdev_gendev
,
1685 "Illegal state transition %s->%s\n",
1686 scsi_device_state_name(oldstate
),
1687 scsi_device_state_name(state
));
1691 EXPORT_SYMBOL(scsi_device_set_state
);
1694 * scsi_device_quiesce - Block user issued commands.
1695 * @sdev: scsi device to quiesce.
1697 * This works by trying to transition to the SDEV_QUIESCE state
1698 * (which must be a legal transition). When the device is in this
1699 * state, only special requests will be accepted, all others will
1700 * be deferred. Since special requests may also be requeued requests,
1701 * a successful return doesn't guarantee the device will be
1702 * totally quiescent.
1704 * Must be called with user context, may sleep.
1706 * Returns zero if unsuccessful or an error if not.
1709 scsi_device_quiesce(struct scsi_device
*sdev
)
1711 int err
= scsi_device_set_state(sdev
, SDEV_QUIESCE
);
1715 scsi_run_queue(sdev
->request_queue
);
1716 while (sdev
->device_busy
) {
1717 schedule_timeout(HZ
/5);
1718 scsi_run_queue(sdev
->request_queue
);
1722 EXPORT_SYMBOL(scsi_device_quiesce
);
1725 * scsi_device_resume - Restart user issued commands to a quiesced device.
1726 * @sdev: scsi device to resume.
1728 * Moves the device from quiesced back to running and restarts the
1731 * Must be called with user context, may sleep.
1734 scsi_device_resume(struct scsi_device
*sdev
)
1736 if(scsi_device_set_state(sdev
, SDEV_RUNNING
))
1738 scsi_run_queue(sdev
->request_queue
);
1740 EXPORT_SYMBOL(scsi_device_resume
);