[SCSI] remove scsi_eh_eflags_ macros
[linux-2.6/suspend2-2.6.18.git] / drivers / scsi / scsi_lib.c
blob58dcb0534a2614d0e76d871858cfb5e5073f90b0
1 /*
2 * scsi_lib.c Copyright (C) 1999 Eric Youngdale
4 * SCSI queueing library.
5 * Initial versions: Eric Youngdale (eric@andante.org).
6 * Based upon conversations with large numbers
7 * of people at Linux Expo.
8 */
10 #include <linux/bio.h>
11 #include <linux/blkdev.h>
12 #include <linux/completion.h>
13 #include <linux/kernel.h>
14 #include <linux/mempool.h>
15 #include <linux/slab.h>
16 #include <linux/init.h>
17 #include <linux/pci.h>
18 #include <linux/delay.h>
20 #include <scsi/scsi.h>
21 #include <scsi/scsi_dbg.h>
22 #include <scsi/scsi_device.h>
23 #include <scsi/scsi_driver.h>
24 #include <scsi/scsi_eh.h>
25 #include <scsi/scsi_host.h>
26 #include <scsi/scsi_request.h>
28 #include "scsi_priv.h"
29 #include "scsi_logging.h"
32 #define SG_MEMPOOL_NR (sizeof(scsi_sg_pools)/sizeof(struct scsi_host_sg_pool))
33 #define SG_MEMPOOL_SIZE 32
35 struct scsi_host_sg_pool {
36 size_t size;
37 char *name;
38 kmem_cache_t *slab;
39 mempool_t *pool;
42 #if (SCSI_MAX_PHYS_SEGMENTS < 32)
43 #error SCSI_MAX_PHYS_SEGMENTS is too small
44 #endif
46 #define SP(x) { x, "sgpool-" #x }
47 static struct scsi_host_sg_pool scsi_sg_pools[] = {
48 SP(8),
49 SP(16),
50 SP(32),
51 #if (SCSI_MAX_PHYS_SEGMENTS > 32)
52 SP(64),
53 #if (SCSI_MAX_PHYS_SEGMENTS > 64)
54 SP(128),
55 #if (SCSI_MAX_PHYS_SEGMENTS > 128)
56 SP(256),
57 #if (SCSI_MAX_PHYS_SEGMENTS > 256)
58 #error SCSI_MAX_PHYS_SEGMENTS is too large
59 #endif
60 #endif
61 #endif
62 #endif
63 };
64 #undef SP
68 * Function: scsi_insert_special_req()
70 * Purpose: Insert pre-formed request into request queue.
72 * Arguments: sreq - request that is ready to be queued.
73 * at_head - boolean. True if we should insert at head
74 * of queue, false if we should insert at tail.
76 * Lock status: Assumed that lock is not held upon entry.
78 * Returns: Nothing
80 * Notes: This function is called from character device and from
81 * ioctl types of functions where the caller knows exactly
82 * what SCSI command needs to be issued. The idea is that
83 * we merely inject the command into the queue (at the head
84 * for now), and then call the queue request function to actually
85 * process it.
87 int scsi_insert_special_req(struct scsi_request *sreq, int at_head)
90 * Because users of this function are apt to reuse requests with no
91 * modification, we have to sanitise the request flags here
93 sreq->sr_request->flags &= ~REQ_DONTPREP;
94 blk_insert_request(sreq->sr_device->request_queue, sreq->sr_request,
95 at_head, sreq);
96 return 0;
99 static void scsi_run_queue(struct request_queue *q);
102 * Function: scsi_queue_insert()
104 * Purpose: Insert a command in the midlevel queue.
106 * Arguments: cmd - command that we are adding to queue.
107 * reason - why we are inserting command to queue.
109 * Lock status: Assumed that lock is not held upon entry.
111 * Returns: Nothing.
113 * Notes: We do this for one of two cases. Either the host is busy
114 * and it cannot accept any more commands for the time being,
115 * or the device returned QUEUE_FULL and can accept no more
116 * commands.
117 * Notes: This could be called either from an interrupt context or a
118 * normal process context.
120 int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
122 struct Scsi_Host *host = cmd->device->host;
123 struct scsi_device *device = cmd->device;
124 struct request_queue *q = device->request_queue;
125 unsigned long flags;
127 SCSI_LOG_MLQUEUE(1,
128 printk("Inserting command %p into mlqueue\n", cmd));
131 * Set the appropriate busy bit for the device/host.
133 * If the host/device isn't busy, assume that something actually
134 * completed, and that we should be able to queue a command now.
136 * Note that the prior mid-layer assumption that any host could
137 * always queue at least one command is now broken. The mid-layer
138 * will implement a user specifiable stall (see
139 * scsi_host.max_host_blocked and scsi_device.max_device_blocked)
140 * if a command is requeued with no other commands outstanding
141 * either for the device or for the host.
143 if (reason == SCSI_MLQUEUE_HOST_BUSY)
144 host->host_blocked = host->max_host_blocked;
145 else if (reason == SCSI_MLQUEUE_DEVICE_BUSY)
146 device->device_blocked = device->max_device_blocked;
149 * Decrement the counters, since these commands are no longer
150 * active on the host/device.
152 scsi_device_unbusy(device);
155 * Requeue this command. It will go before all other commands
156 * that are already in the queue.
158 * NOTE: there is magic here about the way the queue is plugged if
159 * we have no outstanding commands.
161 * Although we *don't* plug the queue, we call the request
162 * function. The SCSI request function detects the blocked condition
163 * and plugs the queue appropriately.
165 spin_lock_irqsave(q->queue_lock, flags);
166 blk_requeue_request(q, cmd->request);
167 spin_unlock_irqrestore(q->queue_lock, flags);
169 scsi_run_queue(q);
171 return 0;
175 * Function: scsi_do_req
177 * Purpose: Queue a SCSI request
179 * Arguments: sreq - command descriptor.
180 * cmnd - actual SCSI command to be performed.
181 * buffer - data buffer.
182 * bufflen - size of data buffer.
183 * done - completion function to be run.
184 * timeout - how long to let it run before timeout.
185 * retries - number of retries we allow.
187 * Lock status: No locks held upon entry.
189 * Returns: Nothing.
191 * Notes: This function is only used for queueing requests for things
192 * like ioctls and character device requests - this is because
193 * we essentially just inject a request into the queue for the
194 * device.
196 * In order to support the scsi_device_quiesce function, we
197 * now inject requests on the *head* of the device queue
198 * rather than the tail.
200 void scsi_do_req(struct scsi_request *sreq, const void *cmnd,
201 void *buffer, unsigned bufflen,
202 void (*done)(struct scsi_cmnd *),
203 int timeout, int retries)
206 * If the upper level driver is reusing these things, then
207 * we should release the low-level block now. Another one will
208 * be allocated later when this request is getting queued.
210 __scsi_release_request(sreq);
213 * Our own function scsi_done (which marks the host as not busy,
214 * disables the timeout counter, etc) will be called by us or by the
215 * scsi_hosts[host].queuecommand() function needs to also call
216 * the completion function for the high level driver.
218 memcpy(sreq->sr_cmnd, cmnd, sizeof(sreq->sr_cmnd));
219 sreq->sr_bufflen = bufflen;
220 sreq->sr_buffer = buffer;
221 sreq->sr_allowed = retries;
222 sreq->sr_done = done;
223 sreq->sr_timeout_per_command = timeout;
225 if (sreq->sr_cmd_len == 0)
226 sreq->sr_cmd_len = COMMAND_SIZE(sreq->sr_cmnd[0]);
229 * head injection *required* here otherwise quiesce won't work
231 scsi_insert_special_req(sreq, 1);
233 EXPORT_SYMBOL(scsi_do_req);
235 static void scsi_wait_done(struct scsi_cmnd *cmd)
237 struct request *req = cmd->request;
238 struct request_queue *q = cmd->device->request_queue;
239 unsigned long flags;
241 req->rq_status = RQ_SCSI_DONE; /* Busy, but indicate request done */
243 spin_lock_irqsave(q->queue_lock, flags);
244 if (blk_rq_tagged(req))
245 blk_queue_end_tag(q, req);
246 spin_unlock_irqrestore(q->queue_lock, flags);
248 if (req->waiting)
249 complete(req->waiting);
252 /* This is the end routine we get to if a command was never attached
253 * to the request. Simply complete the request without changing
254 * rq_status; this will cause a DRIVER_ERROR. */
255 static void scsi_wait_req_end_io(struct request *req)
257 BUG_ON(!req->waiting);
259 complete(req->waiting);
262 void scsi_wait_req(struct scsi_request *sreq, const void *cmnd, void *buffer,
263 unsigned bufflen, int timeout, int retries)
265 DECLARE_COMPLETION(wait);
267 sreq->sr_request->waiting = &wait;
268 sreq->sr_request->rq_status = RQ_SCSI_BUSY;
269 sreq->sr_request->end_io = scsi_wait_req_end_io;
270 scsi_do_req(sreq, cmnd, buffer, bufflen, scsi_wait_done,
271 timeout, retries);
272 wait_for_completion(&wait);
273 sreq->sr_request->waiting = NULL;
274 if (sreq->sr_request->rq_status != RQ_SCSI_DONE)
275 sreq->sr_result |= (DRIVER_ERROR << 24);
277 __scsi_release_request(sreq);
279 EXPORT_SYMBOL(scsi_wait_req);
282 * Function: scsi_init_cmd_errh()
284 * Purpose: Initialize cmd fields related to error handling.
286 * Arguments: cmd - command that is ready to be queued.
288 * Returns: Nothing
290 * Notes: This function has the job of initializing a number of
291 * fields related to error handling. Typically this will
292 * be called once for each command, as required.
294 static int scsi_init_cmd_errh(struct scsi_cmnd *cmd)
296 cmd->serial_number = 0;
298 memset(cmd->sense_buffer, 0, sizeof cmd->sense_buffer);
300 if (cmd->cmd_len == 0)
301 cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]);
304 * We need saved copies of a number of fields - this is because
305 * error handling may need to overwrite these with different values
306 * to run different commands, and once error handling is complete,
307 * we will need to restore these values prior to running the actual
308 * command.
310 cmd->old_use_sg = cmd->use_sg;
311 cmd->old_cmd_len = cmd->cmd_len;
312 cmd->sc_old_data_direction = cmd->sc_data_direction;
313 cmd->old_underflow = cmd->underflow;
314 memcpy(cmd->data_cmnd, cmd->cmnd, sizeof(cmd->cmnd));
315 cmd->buffer = cmd->request_buffer;
316 cmd->bufflen = cmd->request_bufflen;
318 return 1;
322 * Function: scsi_setup_cmd_retry()
324 * Purpose: Restore the command state for a retry
326 * Arguments: cmd - command to be restored
328 * Returns: Nothing
330 * Notes: Immediately prior to retrying a command, we need
331 * to restore certain fields that we saved above.
333 void scsi_setup_cmd_retry(struct scsi_cmnd *cmd)
335 memcpy(cmd->cmnd, cmd->data_cmnd, sizeof(cmd->data_cmnd));
336 cmd->request_buffer = cmd->buffer;
337 cmd->request_bufflen = cmd->bufflen;
338 cmd->use_sg = cmd->old_use_sg;
339 cmd->cmd_len = cmd->old_cmd_len;
340 cmd->sc_data_direction = cmd->sc_old_data_direction;
341 cmd->underflow = cmd->old_underflow;
344 void scsi_device_unbusy(struct scsi_device *sdev)
346 struct Scsi_Host *shost = sdev->host;
347 unsigned long flags;
349 spin_lock_irqsave(shost->host_lock, flags);
350 shost->host_busy--;
351 if (unlikely(test_bit(SHOST_RECOVERY, &shost->shost_state) &&
352 shost->host_failed))
353 scsi_eh_wakeup(shost);
354 spin_unlock(shost->host_lock);
355 spin_lock(sdev->request_queue->queue_lock);
356 sdev->device_busy--;
357 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
361 * Called for single_lun devices on IO completion. Clear starget_sdev_user,
362 * and call blk_run_queue for all the scsi_devices on the target -
363 * including current_sdev first.
365 * Called with *no* scsi locks held.
367 static void scsi_single_lun_run(struct scsi_device *current_sdev)
369 struct Scsi_Host *shost = current_sdev->host;
370 struct scsi_device *sdev, *tmp;
371 struct scsi_target *starget = scsi_target(current_sdev);
372 unsigned long flags;
374 spin_lock_irqsave(shost->host_lock, flags);
375 starget->starget_sdev_user = NULL;
376 spin_unlock_irqrestore(shost->host_lock, flags);
379 * Call blk_run_queue for all LUNs on the target, starting with
380 * current_sdev. We race with others (to set starget_sdev_user),
381 * but in most cases, we will be first. Ideally, each LU on the
382 * target would get some limited time or requests on the target.
384 blk_run_queue(current_sdev->request_queue);
386 spin_lock_irqsave(shost->host_lock, flags);
387 if (starget->starget_sdev_user)
388 goto out;
389 list_for_each_entry_safe(sdev, tmp, &starget->devices,
390 same_target_siblings) {
391 if (sdev == current_sdev)
392 continue;
393 if (scsi_device_get(sdev))
394 continue;
396 spin_unlock_irqrestore(shost->host_lock, flags);
397 blk_run_queue(sdev->request_queue);
398 spin_lock_irqsave(shost->host_lock, flags);
400 scsi_device_put(sdev);
402 out:
403 spin_unlock_irqrestore(shost->host_lock, flags);
407 * Function: scsi_run_queue()
409 * Purpose: Select a proper request queue to serve next
411 * Arguments: q - last request's queue
413 * Returns: Nothing
415 * Notes: The previous command was completely finished, start
416 * a new one if possible.
418 static void scsi_run_queue(struct request_queue *q)
420 struct scsi_device *sdev = q->queuedata;
421 struct Scsi_Host *shost = sdev->host;
422 unsigned long flags;
424 if (sdev->single_lun)
425 scsi_single_lun_run(sdev);
427 spin_lock_irqsave(shost->host_lock, flags);
428 while (!list_empty(&shost->starved_list) &&
429 !shost->host_blocked && !shost->host_self_blocked &&
430 !((shost->can_queue > 0) &&
431 (shost->host_busy >= shost->can_queue))) {
433 * As long as shost is accepting commands and we have
434 * starved queues, call blk_run_queue. scsi_request_fn
435 * drops the queue_lock and can add us back to the
436 * starved_list.
438 * host_lock protects the starved_list and starved_entry.
439 * scsi_request_fn must get the host_lock before checking
440 * or modifying starved_list or starved_entry.
442 sdev = list_entry(shost->starved_list.next,
443 struct scsi_device, starved_entry);
444 list_del_init(&sdev->starved_entry);
445 spin_unlock_irqrestore(shost->host_lock, flags);
447 blk_run_queue(sdev->request_queue);
449 spin_lock_irqsave(shost->host_lock, flags);
450 if (unlikely(!list_empty(&sdev->starved_entry)))
452 * sdev lost a race, and was put back on the
453 * starved list. This is unlikely but without this
454 * in theory we could loop forever.
456 break;
458 spin_unlock_irqrestore(shost->host_lock, flags);
460 blk_run_queue(q);
464 * Function: scsi_requeue_command()
466 * Purpose: Handle post-processing of completed commands.
468 * Arguments: q - queue to operate on
469 * cmd - command that may need to be requeued.
471 * Returns: Nothing
473 * Notes: After command completion, there may be blocks left
474 * over which weren't finished by the previous command
475 * this can be for a number of reasons - the main one is
476 * I/O errors in the middle of the request, in which case
477 * we need to request the blocks that come after the bad
478 * sector.
480 static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
482 unsigned long flags;
484 cmd->request->flags &= ~REQ_DONTPREP;
486 spin_lock_irqsave(q->queue_lock, flags);
487 blk_requeue_request(q, cmd->request);
488 spin_unlock_irqrestore(q->queue_lock, flags);
490 scsi_run_queue(q);
493 void scsi_next_command(struct scsi_cmnd *cmd)
495 struct request_queue *q = cmd->device->request_queue;
497 scsi_put_command(cmd);
498 scsi_run_queue(q);
501 void scsi_run_host_queues(struct Scsi_Host *shost)
503 struct scsi_device *sdev;
505 shost_for_each_device(sdev, shost)
506 scsi_run_queue(sdev->request_queue);
510 * Function: scsi_end_request()
512 * Purpose: Post-processing of completed commands (usually invoked at end
513 * of upper level post-processing and scsi_io_completion).
515 * Arguments: cmd - command that is complete.
516 * uptodate - 1 if I/O indicates success, <= 0 for I/O error.
517 * bytes - number of bytes of completed I/O
518 * requeue - indicates whether we should requeue leftovers.
520 * Lock status: Assumed that lock is not held upon entry.
522 * Returns: cmd if requeue done or required, NULL otherwise
524 * Notes: This is called for block device requests in order to
525 * mark some number of sectors as complete.
527 * We are guaranteeing that the request queue will be goosed
528 * at some point during this call.
530 static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate,
531 int bytes, int requeue)
533 request_queue_t *q = cmd->device->request_queue;
534 struct request *req = cmd->request;
535 unsigned long flags;
538 * If there are blocks left over at the end, set up the command
539 * to queue the remainder of them.
541 if (end_that_request_chunk(req, uptodate, bytes)) {
542 int leftover = (req->hard_nr_sectors << 9);
544 if (blk_pc_request(req))
545 leftover = req->data_len;
547 /* kill remainder if no retrys */
548 if (!uptodate && blk_noretry_request(req))
549 end_that_request_chunk(req, 0, leftover);
550 else {
551 if (requeue)
553 * Bleah. Leftovers again. Stick the
554 * leftovers in the front of the
555 * queue, and goose the queue again.
557 scsi_requeue_command(q, cmd);
559 return cmd;
563 add_disk_randomness(req->rq_disk);
565 spin_lock_irqsave(q->queue_lock, flags);
566 if (blk_rq_tagged(req))
567 blk_queue_end_tag(q, req);
568 end_that_request_last(req);
569 spin_unlock_irqrestore(q->queue_lock, flags);
572 * This will goose the queue request function at the end, so we don't
573 * need to worry about launching another command.
575 scsi_next_command(cmd);
576 return NULL;
579 static struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, int gfp_mask)
581 struct scsi_host_sg_pool *sgp;
582 struct scatterlist *sgl;
584 BUG_ON(!cmd->use_sg);
586 switch (cmd->use_sg) {
587 case 1 ... 8:
588 cmd->sglist_len = 0;
589 break;
590 case 9 ... 16:
591 cmd->sglist_len = 1;
592 break;
593 case 17 ... 32:
594 cmd->sglist_len = 2;
595 break;
596 #if (SCSI_MAX_PHYS_SEGMENTS > 32)
597 case 33 ... 64:
598 cmd->sglist_len = 3;
599 break;
600 #if (SCSI_MAX_PHYS_SEGMENTS > 64)
601 case 65 ... 128:
602 cmd->sglist_len = 4;
603 break;
604 #if (SCSI_MAX_PHYS_SEGMENTS > 128)
605 case 129 ... 256:
606 cmd->sglist_len = 5;
607 break;
608 #endif
609 #endif
610 #endif
611 default:
612 return NULL;
615 sgp = scsi_sg_pools + cmd->sglist_len;
616 sgl = mempool_alloc(sgp->pool, gfp_mask);
617 if (sgl)
618 memset(sgl, 0, sgp->size);
619 return sgl;
622 static void scsi_free_sgtable(struct scatterlist *sgl, int index)
624 struct scsi_host_sg_pool *sgp;
626 BUG_ON(index > SG_MEMPOOL_NR);
628 sgp = scsi_sg_pools + index;
629 mempool_free(sgl, sgp->pool);
633 * Function: scsi_release_buffers()
635 * Purpose: Completion processing for block device I/O requests.
637 * Arguments: cmd - command that we are bailing.
639 * Lock status: Assumed that no lock is held upon entry.
641 * Returns: Nothing
643 * Notes: In the event that an upper level driver rejects a
644 * command, we must release resources allocated during
645 * the __init_io() function. Primarily this would involve
646 * the scatter-gather table, and potentially any bounce
647 * buffers.
649 static void scsi_release_buffers(struct scsi_cmnd *cmd)
651 struct request *req = cmd->request;
654 * Free up any indirection buffers we allocated for DMA purposes.
656 if (cmd->use_sg)
657 scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len);
658 else if (cmd->request_buffer != req->buffer)
659 kfree(cmd->request_buffer);
662 * Zero these out. They now point to freed memory, and it is
663 * dangerous to hang onto the pointers.
665 cmd->buffer = NULL;
666 cmd->bufflen = 0;
667 cmd->request_buffer = NULL;
668 cmd->request_bufflen = 0;
672 * Function: scsi_io_completion()
674 * Purpose: Completion processing for block device I/O requests.
676 * Arguments: cmd - command that is finished.
678 * Lock status: Assumed that no lock is held upon entry.
680 * Returns: Nothing
682 * Notes: This function is matched in terms of capabilities to
683 * the function that created the scatter-gather list.
684 * In other words, if there are no bounce buffers
685 * (the normal case for most drivers), we don't need
686 * the logic to deal with cleaning up afterwards.
688 * We must do one of several things here:
690 * a) Call scsi_end_request. This will finish off the
691 * specified number of sectors. If we are done, the
692 * command block will be released, and the queue
693 * function will be goosed. If we are not done, then
694 * scsi_end_request will directly goose the queue.
696 * b) We can just use scsi_requeue_command() here. This would
697 * be used if we just wanted to retry, for example.
699 void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes,
700 unsigned int block_bytes)
702 int result = cmd->result;
703 int this_count = cmd->bufflen;
704 request_queue_t *q = cmd->device->request_queue;
705 struct request *req = cmd->request;
706 int clear_errors = 1;
707 struct scsi_sense_hdr sshdr;
708 int sense_valid = 0;
709 int sense_deferred = 0;
711 if (blk_complete_barrier_rq(q, req, good_bytes >> 9))
712 return;
715 * Free up any indirection buffers we allocated for DMA purposes.
716 * For the case of a READ, we need to copy the data out of the
717 * bounce buffer and into the real buffer.
719 if (cmd->use_sg)
720 scsi_free_sgtable(cmd->buffer, cmd->sglist_len);
721 else if (cmd->buffer != req->buffer) {
722 if (rq_data_dir(req) == READ) {
723 unsigned long flags;
724 char *to = bio_kmap_irq(req->bio, &flags);
725 memcpy(to, cmd->buffer, cmd->bufflen);
726 bio_kunmap_irq(to, &flags);
728 kfree(cmd->buffer);
731 if (result) {
732 sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
733 if (sense_valid)
734 sense_deferred = scsi_sense_is_deferred(&sshdr);
736 if (blk_pc_request(req)) { /* SG_IO ioctl from block level */
737 req->errors = result;
738 if (result) {
739 clear_errors = 0;
740 if (sense_valid && req->sense) {
742 * SG_IO wants current and deferred errors
744 int len = 8 + cmd->sense_buffer[7];
746 if (len > SCSI_SENSE_BUFFERSIZE)
747 len = SCSI_SENSE_BUFFERSIZE;
748 memcpy(req->sense, cmd->sense_buffer, len);
749 req->sense_len = len;
751 } else
752 req->data_len = cmd->resid;
756 * Zero these out. They now point to freed memory, and it is
757 * dangerous to hang onto the pointers.
759 cmd->buffer = NULL;
760 cmd->bufflen = 0;
761 cmd->request_buffer = NULL;
762 cmd->request_bufflen = 0;
765 * Next deal with any sectors which we were able to correctly
766 * handle.
768 if (good_bytes >= 0) {
769 SCSI_LOG_HLCOMPLETE(1, printk("%ld sectors total, %d bytes done.\n",
770 req->nr_sectors, good_bytes));
771 SCSI_LOG_HLCOMPLETE(1, printk("use_sg is %d\n", cmd->use_sg));
773 if (clear_errors)
774 req->errors = 0;
776 * If multiple sectors are requested in one buffer, then
777 * they will have been finished off by the first command.
778 * If not, then we have a multi-buffer command.
780 * If block_bytes != 0, it means we had a medium error
781 * of some sort, and that we want to mark some number of
782 * sectors as not uptodate. Thus we want to inhibit
783 * requeueing right here - we will requeue down below
784 * when we handle the bad sectors.
786 cmd = scsi_end_request(cmd, 1, good_bytes, result == 0);
789 * If the command completed without error, then either finish off the
790 * rest of the command, or start a new one.
792 if (result == 0 || cmd == NULL ) {
793 return;
797 * Now, if we were good little boys and girls, Santa left us a request
798 * sense buffer. We can extract information from this, so we
799 * can choose a block to remap, etc.
801 if (sense_valid && !sense_deferred) {
802 switch (sshdr.sense_key) {
803 case UNIT_ATTENTION:
804 if (cmd->device->removable) {
805 /* detected disc change. set a bit
806 * and quietly refuse further access.
808 cmd->device->changed = 1;
809 cmd = scsi_end_request(cmd, 0,
810 this_count, 1);
811 return;
812 } else {
814 * Must have been a power glitch, or a
815 * bus reset. Could not have been a
816 * media change, so we just retry the
817 * request and see what happens.
819 scsi_requeue_command(q, cmd);
820 return;
822 break;
823 case ILLEGAL_REQUEST:
825 * If we had an ILLEGAL REQUEST returned, then we may
826 * have performed an unsupported command. The only
827 * thing this should be would be a ten byte read where
828 * only a six byte read was supported. Also, on a
829 * system where READ CAPACITY failed, we may have read
830 * past the end of the disk.
832 if (cmd->device->use_10_for_rw &&
833 (cmd->cmnd[0] == READ_10 ||
834 cmd->cmnd[0] == WRITE_10)) {
835 cmd->device->use_10_for_rw = 0;
837 * This will cause a retry with a 6-byte
838 * command.
840 scsi_requeue_command(q, cmd);
841 result = 0;
842 } else {
843 cmd = scsi_end_request(cmd, 0, this_count, 1);
844 return;
846 break;
847 case NOT_READY:
849 * If the device is in the process of becoming ready,
850 * retry.
852 if (sshdr.asc == 0x04 && sshdr.ascq == 0x01) {
853 scsi_requeue_command(q, cmd);
854 return;
856 printk(KERN_INFO "Device %s not ready.\n",
857 req->rq_disk ? req->rq_disk->disk_name : "");
858 cmd = scsi_end_request(cmd, 0, this_count, 1);
859 return;
860 case VOLUME_OVERFLOW:
861 printk(KERN_INFO "Volume overflow <%d %d %d %d> CDB: ",
862 cmd->device->host->host_no,
863 (int)cmd->device->channel,
864 (int)cmd->device->id, (int)cmd->device->lun);
865 __scsi_print_command(cmd->data_cmnd);
866 scsi_print_sense("", cmd);
867 cmd = scsi_end_request(cmd, 0, block_bytes, 1);
868 return;
869 default:
870 break;
872 } /* driver byte != 0 */
873 if (host_byte(result) == DID_RESET) {
875 * Third party bus reset or reset for error
876 * recovery reasons. Just retry the request
877 * and see what happens.
879 scsi_requeue_command(q, cmd);
880 return;
882 if (result) {
883 printk(KERN_INFO "SCSI error : <%d %d %d %d> return code "
884 "= 0x%x\n", cmd->device->host->host_no,
885 cmd->device->channel,
886 cmd->device->id,
887 cmd->device->lun, result);
889 if (driver_byte(result) & DRIVER_SENSE)
890 scsi_print_sense("", cmd);
892 * Mark a single buffer as not uptodate. Queue the remainder.
893 * We sometimes get this cruft in the event that a medium error
894 * isn't properly reported.
896 block_bytes = req->hard_cur_sectors << 9;
897 if (!block_bytes)
898 block_bytes = req->data_len;
899 cmd = scsi_end_request(cmd, 0, block_bytes, 1);
902 EXPORT_SYMBOL(scsi_io_completion);
905 * Function: scsi_init_io()
907 * Purpose: SCSI I/O initialize function.
909 * Arguments: cmd - Command descriptor we wish to initialize
911 * Returns: 0 on success
912 * BLKPREP_DEFER if the failure is retryable
913 * BLKPREP_KILL if the failure is fatal
915 static int scsi_init_io(struct scsi_cmnd *cmd)
917 struct request *req = cmd->request;
918 struct scatterlist *sgpnt;
919 int count;
922 * if this is a rq->data based REQ_BLOCK_PC, setup for a non-sg xfer
924 if ((req->flags & REQ_BLOCK_PC) && !req->bio) {
925 cmd->request_bufflen = req->data_len;
926 cmd->request_buffer = req->data;
927 req->buffer = req->data;
928 cmd->use_sg = 0;
929 return 0;
933 * we used to not use scatter-gather for single segment request,
934 * but now we do (it makes highmem I/O easier to support without
935 * kmapping pages)
937 cmd->use_sg = req->nr_phys_segments;
940 * if sg table allocation fails, requeue request later.
942 sgpnt = scsi_alloc_sgtable(cmd, GFP_ATOMIC);
943 if (unlikely(!sgpnt))
944 return BLKPREP_DEFER;
946 cmd->request_buffer = (char *) sgpnt;
947 cmd->request_bufflen = req->nr_sectors << 9;
948 if (blk_pc_request(req))
949 cmd->request_bufflen = req->data_len;
950 req->buffer = NULL;
953 * Next, walk the list, and fill in the addresses and sizes of
954 * each segment.
956 count = blk_rq_map_sg(req->q, req, cmd->request_buffer);
959 * mapped well, send it off
961 if (likely(count <= cmd->use_sg)) {
962 cmd->use_sg = count;
963 return 0;
966 printk(KERN_ERR "Incorrect number of segments after building list\n");
967 printk(KERN_ERR "counted %d, received %d\n", count, cmd->use_sg);
968 printk(KERN_ERR "req nr_sec %lu, cur_nr_sec %u\n", req->nr_sectors,
969 req->current_nr_sectors);
971 /* release the command and kill it */
972 scsi_release_buffers(cmd);
973 scsi_put_command(cmd);
974 return BLKPREP_KILL;
977 static int scsi_prepare_flush_fn(request_queue_t *q, struct request *rq)
979 struct scsi_device *sdev = q->queuedata;
980 struct scsi_driver *drv;
982 if (sdev->sdev_state == SDEV_RUNNING) {
983 drv = *(struct scsi_driver **) rq->rq_disk->private_data;
985 if (drv->prepare_flush)
986 return drv->prepare_flush(q, rq);
989 return 0;
992 static void scsi_end_flush_fn(request_queue_t *q, struct request *rq)
994 struct scsi_device *sdev = q->queuedata;
995 struct request *flush_rq = rq->end_io_data;
996 struct scsi_driver *drv;
998 if (flush_rq->errors) {
999 printk("scsi: barrier error, disabling flush support\n");
1000 blk_queue_ordered(q, QUEUE_ORDERED_NONE);
1003 if (sdev->sdev_state == SDEV_RUNNING) {
1004 drv = *(struct scsi_driver **) rq->rq_disk->private_data;
1005 drv->end_flush(q, rq);
1009 static int scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk,
1010 sector_t *error_sector)
1012 struct scsi_device *sdev = q->queuedata;
1013 struct scsi_driver *drv;
1015 if (sdev->sdev_state != SDEV_RUNNING)
1016 return -ENXIO;
1018 drv = *(struct scsi_driver **) disk->private_data;
1019 if (drv->issue_flush)
1020 return drv->issue_flush(&sdev->sdev_gendev, error_sector);
1022 return -EOPNOTSUPP;
1025 static int scsi_prep_fn(struct request_queue *q, struct request *req)
1027 struct scsi_device *sdev = q->queuedata;
1028 struct scsi_cmnd *cmd;
1029 int specials_only = 0;
1032 * Just check to see if the device is online. If it isn't, we
1033 * refuse to process any commands. The device must be brought
1034 * online before trying any recovery commands
1036 if (unlikely(!scsi_device_online(sdev))) {
1037 printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to offline device\n",
1038 sdev->host->host_no, sdev->id, sdev->lun);
1039 return BLKPREP_KILL;
1041 if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
1042 /* OK, we're not in a running state don't prep
1043 * user commands */
1044 if (sdev->sdev_state == SDEV_DEL) {
1045 /* Device is fully deleted, no commands
1046 * at all allowed down */
1047 printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to dead device\n",
1048 sdev->host->host_no, sdev->id, sdev->lun);
1049 return BLKPREP_KILL;
1051 /* OK, we only allow special commands (i.e. not
1052 * user initiated ones */
1053 specials_only = sdev->sdev_state;
1057 * Find the actual device driver associated with this command.
1058 * The SPECIAL requests are things like character device or
1059 * ioctls, which did not originate from ll_rw_blk. Note that
1060 * the special field is also used to indicate the cmd for
1061 * the remainder of a partially fulfilled request that can
1062 * come up when there is a medium error. We have to treat
1063 * these two cases differently. We differentiate by looking
1064 * at request->cmd, as this tells us the real story.
1066 if (req->flags & REQ_SPECIAL) {
1067 struct scsi_request *sreq = req->special;
1069 if (sreq->sr_magic == SCSI_REQ_MAGIC) {
1070 cmd = scsi_get_command(sreq->sr_device, GFP_ATOMIC);
1071 if (unlikely(!cmd))
1072 goto defer;
1073 scsi_init_cmd_from_req(cmd, sreq);
1074 } else
1075 cmd = req->special;
1076 } else if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) {
1078 if(unlikely(specials_only)) {
1079 if(specials_only == SDEV_QUIESCE ||
1080 specials_only == SDEV_BLOCK)
1081 return BLKPREP_DEFER;
1083 printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to device being removed\n",
1084 sdev->host->host_no, sdev->id, sdev->lun);
1085 return BLKPREP_KILL;
1090 * Now try and find a command block that we can use.
1092 if (!req->special) {
1093 cmd = scsi_get_command(sdev, GFP_ATOMIC);
1094 if (unlikely(!cmd))
1095 goto defer;
1096 } else
1097 cmd = req->special;
1099 /* pull a tag out of the request if we have one */
1100 cmd->tag = req->tag;
1101 } else {
1102 blk_dump_rq_flags(req, "SCSI bad req");
1103 return BLKPREP_KILL;
1106 /* note the overloading of req->special. When the tag
1107 * is active it always means cmd. If the tag goes
1108 * back for re-queueing, it may be reset */
1109 req->special = cmd;
1110 cmd->request = req;
1113 * FIXME: drop the lock here because the functions below
1114 * expect to be called without the queue lock held. Also,
1115 * previously, we dequeued the request before dropping the
1116 * lock. We hope REQ_STARTED prevents anything untoward from
1117 * happening now.
1119 if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) {
1120 struct scsi_driver *drv;
1121 int ret;
1124 * This will do a couple of things:
1125 * 1) Fill in the actual SCSI command.
1126 * 2) Fill in any other upper-level specific fields
1127 * (timeout).
1129 * If this returns 0, it means that the request failed
1130 * (reading past end of disk, reading offline device,
1131 * etc). This won't actually talk to the device, but
1132 * some kinds of consistency checking may cause the
1133 * request to be rejected immediately.
1137 * This sets up the scatter-gather table (allocating if
1138 * required).
1140 ret = scsi_init_io(cmd);
1141 if (ret) /* BLKPREP_KILL return also releases the command */
1142 return ret;
1145 * Initialize the actual SCSI command for this request.
1147 drv = *(struct scsi_driver **)req->rq_disk->private_data;
1148 if (unlikely(!drv->init_command(cmd))) {
1149 scsi_release_buffers(cmd);
1150 scsi_put_command(cmd);
1151 return BLKPREP_KILL;
1156 * The request is now prepped, no need to come back here
1158 req->flags |= REQ_DONTPREP;
1159 return BLKPREP_OK;
1161 defer:
1162 /* If we defer, the elv_next_request() returns NULL, but the
1163 * queue must be restarted, so we plug here if no returning
1164 * command will automatically do that. */
1165 if (sdev->device_busy == 0)
1166 blk_plug_device(q);
1167 return BLKPREP_DEFER;
1171 * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else
1172 * return 0.
1174 * Called with the queue_lock held.
1176 static inline int scsi_dev_queue_ready(struct request_queue *q,
1177 struct scsi_device *sdev)
1179 if (sdev->device_busy >= sdev->queue_depth)
1180 return 0;
1181 if (sdev->device_busy == 0 && sdev->device_blocked) {
1183 * unblock after device_blocked iterates to zero
1185 if (--sdev->device_blocked == 0) {
1186 SCSI_LOG_MLQUEUE(3,
1187 printk("scsi%d (%d:%d) unblocking device at"
1188 " zero depth\n", sdev->host->host_no,
1189 sdev->id, sdev->lun));
1190 } else {
1191 blk_plug_device(q);
1192 return 0;
1195 if (sdev->device_blocked)
1196 return 0;
1198 return 1;
1202 * scsi_host_queue_ready: if we can send requests to shost, return 1 else
1203 * return 0. We must end up running the queue again whenever 0 is
1204 * returned, else IO can hang.
1206 * Called with host_lock held.
1208 static inline int scsi_host_queue_ready(struct request_queue *q,
1209 struct Scsi_Host *shost,
1210 struct scsi_device *sdev)
1212 if (test_bit(SHOST_RECOVERY, &shost->shost_state))
1213 return 0;
1214 if (shost->host_busy == 0 && shost->host_blocked) {
1216 * unblock after host_blocked iterates to zero
1218 if (--shost->host_blocked == 0) {
1219 SCSI_LOG_MLQUEUE(3,
1220 printk("scsi%d unblocking host at zero depth\n",
1221 shost->host_no));
1222 } else {
1223 blk_plug_device(q);
1224 return 0;
1227 if ((shost->can_queue > 0 && shost->host_busy >= shost->can_queue) ||
1228 shost->host_blocked || shost->host_self_blocked) {
1229 if (list_empty(&sdev->starved_entry))
1230 list_add_tail(&sdev->starved_entry, &shost->starved_list);
1231 return 0;
1234 /* We're OK to process the command, so we can't be starved */
1235 if (!list_empty(&sdev->starved_entry))
1236 list_del_init(&sdev->starved_entry);
1238 return 1;
1242 * Kill requests for a dead device
1244 static void scsi_kill_requests(request_queue_t *q)
1246 struct request *req;
1248 while ((req = elv_next_request(q)) != NULL) {
1249 blkdev_dequeue_request(req);
1250 req->flags |= REQ_QUIET;
1251 while (end_that_request_first(req, 0, req->nr_sectors))
1253 end_that_request_last(req);
1258 * Function: scsi_request_fn()
1260 * Purpose: Main strategy routine for SCSI.
1262 * Arguments: q - Pointer to actual queue.
1264 * Returns: Nothing
1266 * Lock status: IO request lock assumed to be held when called.
1268 static void scsi_request_fn(struct request_queue *q)
1270 struct scsi_device *sdev = q->queuedata;
1271 struct Scsi_Host *shost;
1272 struct scsi_cmnd *cmd;
1273 struct request *req;
1275 if (!sdev) {
1276 printk("scsi: killing requests for dead queue\n");
1277 scsi_kill_requests(q);
1278 return;
1281 if(!get_device(&sdev->sdev_gendev))
1282 /* We must be tearing the block queue down already */
1283 return;
1286 * To start with, we keep looping until the queue is empty, or until
1287 * the host is no longer able to accept any more requests.
1289 shost = sdev->host;
1290 while (!blk_queue_plugged(q)) {
1291 int rtn;
1293 * get next queueable request. We do this early to make sure
1294 * that the request is fully prepared even if we cannot
1295 * accept it.
1297 req = elv_next_request(q);
1298 if (!req || !scsi_dev_queue_ready(q, sdev))
1299 break;
1301 if (unlikely(!scsi_device_online(sdev))) {
1302 printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to offline device\n",
1303 sdev->host->host_no, sdev->id, sdev->lun);
1304 blkdev_dequeue_request(req);
1305 req->flags |= REQ_QUIET;
1306 while (end_that_request_first(req, 0, req->nr_sectors))
1308 end_that_request_last(req);
1309 continue;
1314 * Remove the request from the request list.
1316 if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req)))
1317 blkdev_dequeue_request(req);
1318 sdev->device_busy++;
1320 spin_unlock(q->queue_lock);
1321 spin_lock(shost->host_lock);
1323 if (!scsi_host_queue_ready(q, shost, sdev))
1324 goto not_ready;
1325 if (sdev->single_lun) {
1326 if (scsi_target(sdev)->starget_sdev_user &&
1327 scsi_target(sdev)->starget_sdev_user != sdev)
1328 goto not_ready;
1329 scsi_target(sdev)->starget_sdev_user = sdev;
1331 shost->host_busy++;
1334 * XXX(hch): This is rather suboptimal, scsi_dispatch_cmd will
1335 * take the lock again.
1337 spin_unlock_irq(shost->host_lock);
1339 cmd = req->special;
1340 if (unlikely(cmd == NULL)) {
1341 printk(KERN_CRIT "impossible request in %s.\n"
1342 "please mail a stack trace to "
1343 "linux-scsi@vger.kernel.org",
1344 __FUNCTION__);
1345 BUG();
1349 * Finally, initialize any error handling parameters, and set up
1350 * the timers for timeouts.
1352 scsi_init_cmd_errh(cmd);
1355 * Dispatch the command to the low-level driver.
1357 rtn = scsi_dispatch_cmd(cmd);
1358 spin_lock_irq(q->queue_lock);
1359 if(rtn) {
1360 /* we're refusing the command; because of
1361 * the way locks get dropped, we need to
1362 * check here if plugging is required */
1363 if(sdev->device_busy == 0)
1364 blk_plug_device(q);
1366 break;
1370 goto out;
1372 not_ready:
1373 spin_unlock_irq(shost->host_lock);
1376 * lock q, handle tag, requeue req, and decrement device_busy. We
1377 * must return with queue_lock held.
1379 * Decrementing device_busy without checking it is OK, as all such
1380 * cases (host limits or settings) should run the queue at some
1381 * later time.
1383 spin_lock_irq(q->queue_lock);
1384 blk_requeue_request(q, req);
1385 sdev->device_busy--;
1386 if(sdev->device_busy == 0)
1387 blk_plug_device(q);
1388 out:
1389 /* must be careful here...if we trigger the ->remove() function
1390 * we cannot be holding the q lock */
1391 spin_unlock_irq(q->queue_lock);
1392 put_device(&sdev->sdev_gendev);
1393 spin_lock_irq(q->queue_lock);
1396 u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
1398 struct device *host_dev;
1399 u64 bounce_limit = 0xffffffff;
1401 if (shost->unchecked_isa_dma)
1402 return BLK_BOUNCE_ISA;
1404 * Platforms with virtual-DMA translation
1405 * hardware have no practical limit.
1407 if (!PCI_DMA_BUS_IS_PHYS)
1408 return BLK_BOUNCE_ANY;
1410 host_dev = scsi_get_device(shost);
1411 if (host_dev && host_dev->dma_mask)
1412 bounce_limit = *host_dev->dma_mask;
1414 return bounce_limit;
1416 EXPORT_SYMBOL(scsi_calculate_bounce_limit);
1418 struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
1420 struct Scsi_Host *shost = sdev->host;
1421 struct request_queue *q;
1423 q = blk_init_queue(scsi_request_fn, NULL);
1424 if (!q)
1425 return NULL;
1427 blk_queue_prep_rq(q, scsi_prep_fn);
1429 blk_queue_max_hw_segments(q, shost->sg_tablesize);
1430 blk_queue_max_phys_segments(q, SCSI_MAX_PHYS_SEGMENTS);
1431 blk_queue_max_sectors(q, shost->max_sectors);
1432 blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
1433 blk_queue_segment_boundary(q, shost->dma_boundary);
1434 blk_queue_issue_flush_fn(q, scsi_issue_flush_fn);
1437 * ordered tags are superior to flush ordering
1439 if (shost->ordered_tag)
1440 blk_queue_ordered(q, QUEUE_ORDERED_TAG);
1441 else if (shost->ordered_flush) {
1442 blk_queue_ordered(q, QUEUE_ORDERED_FLUSH);
1443 q->prepare_flush_fn = scsi_prepare_flush_fn;
1444 q->end_flush_fn = scsi_end_flush_fn;
1447 if (!shost->use_clustering)
1448 clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
1449 return q;
1452 void scsi_free_queue(struct request_queue *q)
1454 blk_cleanup_queue(q);
1458 * Function: scsi_block_requests()
1460 * Purpose: Utility function used by low-level drivers to prevent further
1461 * commands from being queued to the device.
1463 * Arguments: shost - Host in question
1465 * Returns: Nothing
1467 * Lock status: No locks are assumed held.
1469 * Notes: There is no timer nor any other means by which the requests
1470 * get unblocked other than the low-level driver calling
1471 * scsi_unblock_requests().
1473 void scsi_block_requests(struct Scsi_Host *shost)
1475 shost->host_self_blocked = 1;
1477 EXPORT_SYMBOL(scsi_block_requests);
1480 * Function: scsi_unblock_requests()
1482 * Purpose: Utility function used by low-level drivers to allow further
1483 * commands from being queued to the device.
1485 * Arguments: shost - Host in question
1487 * Returns: Nothing
1489 * Lock status: No locks are assumed held.
1491 * Notes: There is no timer nor any other means by which the requests
1492 * get unblocked other than the low-level driver calling
1493 * scsi_unblock_requests().
1495 * This is done as an API function so that changes to the
1496 * internals of the scsi mid-layer won't require wholesale
1497 * changes to drivers that use this feature.
1499 void scsi_unblock_requests(struct Scsi_Host *shost)
1501 shost->host_self_blocked = 0;
1502 scsi_run_host_queues(shost);
1504 EXPORT_SYMBOL(scsi_unblock_requests);
1506 int __init scsi_init_queue(void)
1508 int i;
1510 for (i = 0; i < SG_MEMPOOL_NR; i++) {
1511 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1512 int size = sgp->size * sizeof(struct scatterlist);
1514 sgp->slab = kmem_cache_create(sgp->name, size, 0,
1515 SLAB_HWCACHE_ALIGN, NULL, NULL);
1516 if (!sgp->slab) {
1517 printk(KERN_ERR "SCSI: can't init sg slab %s\n",
1518 sgp->name);
1521 sgp->pool = mempool_create(SG_MEMPOOL_SIZE,
1522 mempool_alloc_slab, mempool_free_slab,
1523 sgp->slab);
1524 if (!sgp->pool) {
1525 printk(KERN_ERR "SCSI: can't init sg mempool %s\n",
1526 sgp->name);
1530 return 0;
1533 void scsi_exit_queue(void)
1535 int i;
1537 for (i = 0; i < SG_MEMPOOL_NR; i++) {
1538 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1539 mempool_destroy(sgp->pool);
1540 kmem_cache_destroy(sgp->slab);
1544 * __scsi_mode_sense - issue a mode sense, falling back from 10 to
1545 * six bytes if necessary.
1546 * @sreq: SCSI request to fill in with the MODE_SENSE
1547 * @dbd: set if mode sense will allow block descriptors to be returned
1548 * @modepage: mode page being requested
1549 * @buffer: request buffer (may not be smaller than eight bytes)
1550 * @len: length of request buffer.
1551 * @timeout: command timeout
1552 * @retries: number of retries before failing
1553 * @data: returns a structure abstracting the mode header data
1555 * Returns zero if unsuccessful, or the header offset (either 4
1556 * or 8 depending on whether a six or ten byte command was
1557 * issued) if successful.
1560 __scsi_mode_sense(struct scsi_request *sreq, int dbd, int modepage,
1561 unsigned char *buffer, int len, int timeout, int retries,
1562 struct scsi_mode_data *data) {
1563 unsigned char cmd[12];
1564 int use_10_for_ms;
1565 int header_length;
1567 memset(data, 0, sizeof(*data));
1568 memset(&cmd[0], 0, 12);
1569 cmd[1] = dbd & 0x18; /* allows DBD and LLBA bits */
1570 cmd[2] = modepage;
1572 retry:
1573 use_10_for_ms = sreq->sr_device->use_10_for_ms;
1575 if (use_10_for_ms) {
1576 if (len < 8)
1577 len = 8;
1579 cmd[0] = MODE_SENSE_10;
1580 cmd[8] = len;
1581 header_length = 8;
1582 } else {
1583 if (len < 4)
1584 len = 4;
1586 cmd[0] = MODE_SENSE;
1587 cmd[4] = len;
1588 header_length = 4;
1591 sreq->sr_cmd_len = 0;
1592 memset(sreq->sr_sense_buffer, 0, sizeof(sreq->sr_sense_buffer));
1593 sreq->sr_data_direction = DMA_FROM_DEVICE;
1595 memset(buffer, 0, len);
1597 scsi_wait_req(sreq, cmd, buffer, len, timeout, retries);
1599 /* This code looks awful: what it's doing is making sure an
1600 * ILLEGAL REQUEST sense return identifies the actual command
1601 * byte as the problem. MODE_SENSE commands can return
1602 * ILLEGAL REQUEST if the code page isn't supported */
1604 if (use_10_for_ms && !scsi_status_is_good(sreq->sr_result) &&
1605 (driver_byte(sreq->sr_result) & DRIVER_SENSE)) {
1606 struct scsi_sense_hdr sshdr;
1608 if (scsi_request_normalize_sense(sreq, &sshdr)) {
1609 if ((sshdr.sense_key == ILLEGAL_REQUEST) &&
1610 (sshdr.asc == 0x20) && (sshdr.ascq == 0)) {
1612 * Invalid command operation code
1614 sreq->sr_device->use_10_for_ms = 0;
1615 goto retry;
1620 if(scsi_status_is_good(sreq->sr_result)) {
1621 data->header_length = header_length;
1622 if(use_10_for_ms) {
1623 data->length = buffer[0]*256 + buffer[1] + 2;
1624 data->medium_type = buffer[2];
1625 data->device_specific = buffer[3];
1626 data->longlba = buffer[4] & 0x01;
1627 data->block_descriptor_length = buffer[6]*256
1628 + buffer[7];
1629 } else {
1630 data->length = buffer[0] + 1;
1631 data->medium_type = buffer[1];
1632 data->device_specific = buffer[2];
1633 data->block_descriptor_length = buffer[3];
1637 return sreq->sr_result;
1639 EXPORT_SYMBOL(__scsi_mode_sense);
1642 * scsi_mode_sense - issue a mode sense, falling back from 10 to
1643 * six bytes if necessary.
1644 * @sdev: scsi device to send command to.
1645 * @dbd: set if mode sense will disable block descriptors in the return
1646 * @modepage: mode page being requested
1647 * @buffer: request buffer (may not be smaller than eight bytes)
1648 * @len: length of request buffer.
1649 * @timeout: command timeout
1650 * @retries: number of retries before failing
1652 * Returns zero if unsuccessful, or the header offset (either 4
1653 * or 8 depending on whether a six or ten byte command was
1654 * issued) if successful.
1657 scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
1658 unsigned char *buffer, int len, int timeout, int retries,
1659 struct scsi_mode_data *data)
1661 struct scsi_request *sreq = scsi_allocate_request(sdev, GFP_KERNEL);
1662 int ret;
1664 if (!sreq)
1665 return -1;
1667 ret = __scsi_mode_sense(sreq, dbd, modepage, buffer, len,
1668 timeout, retries, data);
1670 scsi_release_request(sreq);
1672 return ret;
1674 EXPORT_SYMBOL(scsi_mode_sense);
1677 scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries)
1679 struct scsi_request *sreq;
1680 char cmd[] = {
1681 TEST_UNIT_READY, 0, 0, 0, 0, 0,
1683 int result;
1685 sreq = scsi_allocate_request(sdev, GFP_KERNEL);
1686 if (!sreq)
1687 return -ENOMEM;
1689 sreq->sr_data_direction = DMA_NONE;
1690 scsi_wait_req(sreq, cmd, NULL, 0, timeout, retries);
1692 if ((driver_byte(sreq->sr_result) & DRIVER_SENSE) && sdev->removable) {
1693 struct scsi_sense_hdr sshdr;
1695 if ((scsi_request_normalize_sense(sreq, &sshdr)) &&
1696 ((sshdr.sense_key == UNIT_ATTENTION) ||
1697 (sshdr.sense_key == NOT_READY))) {
1698 sdev->changed = 1;
1699 sreq->sr_result = 0;
1702 result = sreq->sr_result;
1703 scsi_release_request(sreq);
1704 return result;
1706 EXPORT_SYMBOL(scsi_test_unit_ready);
1709 * scsi_device_set_state - Take the given device through the device
1710 * state model.
1711 * @sdev: scsi device to change the state of.
1712 * @state: state to change to.
1714 * Returns zero if unsuccessful or an error if the requested
1715 * transition is illegal.
1718 scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
1720 enum scsi_device_state oldstate = sdev->sdev_state;
1722 if (state == oldstate)
1723 return 0;
1725 switch (state) {
1726 case SDEV_CREATED:
1727 /* There are no legal states that come back to
1728 * created. This is the manually initialised start
1729 * state */
1730 goto illegal;
1732 case SDEV_RUNNING:
1733 switch (oldstate) {
1734 case SDEV_CREATED:
1735 case SDEV_OFFLINE:
1736 case SDEV_QUIESCE:
1737 case SDEV_BLOCK:
1738 break;
1739 default:
1740 goto illegal;
1742 break;
1744 case SDEV_QUIESCE:
1745 switch (oldstate) {
1746 case SDEV_RUNNING:
1747 case SDEV_OFFLINE:
1748 break;
1749 default:
1750 goto illegal;
1752 break;
1754 case SDEV_OFFLINE:
1755 switch (oldstate) {
1756 case SDEV_CREATED:
1757 case SDEV_RUNNING:
1758 case SDEV_QUIESCE:
1759 case SDEV_BLOCK:
1760 break;
1761 default:
1762 goto illegal;
1764 break;
1766 case SDEV_BLOCK:
1767 switch (oldstate) {
1768 case SDEV_CREATED:
1769 case SDEV_RUNNING:
1770 break;
1771 default:
1772 goto illegal;
1774 break;
1776 case SDEV_CANCEL:
1777 switch (oldstate) {
1778 case SDEV_CREATED:
1779 case SDEV_RUNNING:
1780 case SDEV_OFFLINE:
1781 case SDEV_BLOCK:
1782 break;
1783 default:
1784 goto illegal;
1786 break;
1788 case SDEV_DEL:
1789 switch (oldstate) {
1790 case SDEV_CANCEL:
1791 break;
1792 default:
1793 goto illegal;
1795 break;
1798 sdev->sdev_state = state;
1799 return 0;
1801 illegal:
1802 SCSI_LOG_ERROR_RECOVERY(1,
1803 dev_printk(KERN_ERR, &sdev->sdev_gendev,
1804 "Illegal state transition %s->%s\n",
1805 scsi_device_state_name(oldstate),
1806 scsi_device_state_name(state))
1808 return -EINVAL;
1810 EXPORT_SYMBOL(scsi_device_set_state);
1813 * scsi_device_quiesce - Block user issued commands.
1814 * @sdev: scsi device to quiesce.
1816 * This works by trying to transition to the SDEV_QUIESCE state
1817 * (which must be a legal transition). When the device is in this
1818 * state, only special requests will be accepted, all others will
1819 * be deferred. Since special requests may also be requeued requests,
1820 * a successful return doesn't guarantee the device will be
1821 * totally quiescent.
1823 * Must be called with user context, may sleep.
1825 * Returns zero if unsuccessful or an error if not.
1828 scsi_device_quiesce(struct scsi_device *sdev)
1830 int err = scsi_device_set_state(sdev, SDEV_QUIESCE);
1831 if (err)
1832 return err;
1834 scsi_run_queue(sdev->request_queue);
1835 while (sdev->device_busy) {
1836 msleep_interruptible(200);
1837 scsi_run_queue(sdev->request_queue);
1839 return 0;
1841 EXPORT_SYMBOL(scsi_device_quiesce);
1844 * scsi_device_resume - Restart user issued commands to a quiesced device.
1845 * @sdev: scsi device to resume.
1847 * Moves the device from quiesced back to running and restarts the
1848 * queues.
1850 * Must be called with user context, may sleep.
1852 void
1853 scsi_device_resume(struct scsi_device *sdev)
1855 if(scsi_device_set_state(sdev, SDEV_RUNNING))
1856 return;
1857 scsi_run_queue(sdev->request_queue);
1859 EXPORT_SYMBOL(scsi_device_resume);
1861 static void
1862 device_quiesce_fn(struct scsi_device *sdev, void *data)
1864 scsi_device_quiesce(sdev);
1867 void
1868 scsi_target_quiesce(struct scsi_target *starget)
1870 starget_for_each_device(starget, NULL, device_quiesce_fn);
1872 EXPORT_SYMBOL(scsi_target_quiesce);
1874 static void
1875 device_resume_fn(struct scsi_device *sdev, void *data)
1877 scsi_device_resume(sdev);
1880 void
1881 scsi_target_resume(struct scsi_target *starget)
1883 starget_for_each_device(starget, NULL, device_resume_fn);
1885 EXPORT_SYMBOL(scsi_target_resume);
1888 * scsi_internal_device_block - internal function to put a device
1889 * temporarily into the SDEV_BLOCK state
1890 * @sdev: device to block
1892 * Block request made by scsi lld's to temporarily stop all
1893 * scsi commands on the specified device. Called from interrupt
1894 * or normal process context.
1896 * Returns zero if successful or error if not
1898 * Notes:
1899 * This routine transitions the device to the SDEV_BLOCK state
1900 * (which must be a legal transition). When the device is in this
1901 * state, all commands are deferred until the scsi lld reenables
1902 * the device with scsi_device_unblock or device_block_tmo fires.
1903 * This routine assumes the host_lock is held on entry.
1906 scsi_internal_device_block(struct scsi_device *sdev)
1908 request_queue_t *q = sdev->request_queue;
1909 unsigned long flags;
1910 int err = 0;
1912 err = scsi_device_set_state(sdev, SDEV_BLOCK);
1913 if (err)
1914 return err;
1917 * The device has transitioned to SDEV_BLOCK. Stop the
1918 * block layer from calling the midlayer with this device's
1919 * request queue.
1921 spin_lock_irqsave(q->queue_lock, flags);
1922 blk_stop_queue(q);
1923 spin_unlock_irqrestore(q->queue_lock, flags);
1925 return 0;
1927 EXPORT_SYMBOL_GPL(scsi_internal_device_block);
1930 * scsi_internal_device_unblock - resume a device after a block request
1931 * @sdev: device to resume
1933 * Called by scsi lld's or the midlayer to restart the device queue
1934 * for the previously suspended scsi device. Called from interrupt or
1935 * normal process context.
1937 * Returns zero if successful or error if not.
1939 * Notes:
1940 * This routine transitions the device to the SDEV_RUNNING state
1941 * (which must be a legal transition) allowing the midlayer to
1942 * goose the queue for this device. This routine assumes the
1943 * host_lock is held upon entry.
1946 scsi_internal_device_unblock(struct scsi_device *sdev)
1948 request_queue_t *q = sdev->request_queue;
1949 int err;
1950 unsigned long flags;
1953 * Try to transition the scsi device to SDEV_RUNNING
1954 * and goose the device queue if successful.
1956 err = scsi_device_set_state(sdev, SDEV_RUNNING);
1957 if (err)
1958 return err;
1960 spin_lock_irqsave(q->queue_lock, flags);
1961 blk_start_queue(q);
1962 spin_unlock_irqrestore(q->queue_lock, flags);
1964 return 0;
1966 EXPORT_SYMBOL_GPL(scsi_internal_device_unblock);
1968 static void
1969 device_block(struct scsi_device *sdev, void *data)
1971 scsi_internal_device_block(sdev);
1974 static int
1975 target_block(struct device *dev, void *data)
1977 if (scsi_is_target_device(dev))
1978 starget_for_each_device(to_scsi_target(dev), NULL,
1979 device_block);
1980 return 0;
1983 void
1984 scsi_target_block(struct device *dev)
1986 if (scsi_is_target_device(dev))
1987 starget_for_each_device(to_scsi_target(dev), NULL,
1988 device_block);
1989 else
1990 device_for_each_child(dev, NULL, target_block);
1992 EXPORT_SYMBOL_GPL(scsi_target_block);
1994 static void
1995 device_unblock(struct scsi_device *sdev, void *data)
1997 scsi_internal_device_unblock(sdev);
2000 static int
2001 target_unblock(struct device *dev, void *data)
2003 if (scsi_is_target_device(dev))
2004 starget_for_each_device(to_scsi_target(dev), NULL,
2005 device_unblock);
2006 return 0;
2009 void
2010 scsi_target_unblock(struct device *dev)
2012 if (scsi_is_target_device(dev))
2013 starget_for_each_device(to_scsi_target(dev), NULL,
2014 device_unblock);
2015 else
2016 device_for_each_child(dev, NULL, target_unblock);
2018 EXPORT_SYMBOL_GPL(scsi_target_unblock);