[PATCH] libata: implement and use ata_deh_dev_action()
[linux-2.6/suspend2-2.6.18.git] / drivers / scsi / libata-eh.c
blobc7e6298db05053565d3a337f4baceed342dfa6ff
1 /*
2 * libata-eh.c - libata error handling
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
8 * Copyright 2006 Tejun Heo <htejun@gmail.com>
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; either version 2, or
14 * (at your option) any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
24 * USA.
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
35 #include <linux/config.h>
36 #include <linux/kernel.h>
37 #include <scsi/scsi.h>
38 #include <scsi/scsi_host.h>
39 #include <scsi/scsi_eh.h>
40 #include <scsi/scsi_device.h>
41 #include <scsi/scsi_cmnd.h>
42 #include "scsi_transport_api.h"
44 #include <linux/libata.h>
46 #include "libata.h"
48 static void __ata_port_freeze(struct ata_port *ap);
49 static void ata_eh_finish(struct ata_port *ap);
51 static void ata_ering_record(struct ata_ering *ering, int is_io,
52 unsigned int err_mask)
54 struct ata_ering_entry *ent;
56 WARN_ON(!err_mask);
58 ering->cursor++;
59 ering->cursor %= ATA_ERING_SIZE;
61 ent = &ering->ring[ering->cursor];
62 ent->is_io = is_io;
63 ent->err_mask = err_mask;
64 ent->timestamp = get_jiffies_64();
67 static struct ata_ering_entry * ata_ering_top(struct ata_ering *ering)
69 struct ata_ering_entry *ent = &ering->ring[ering->cursor];
70 if (!ent->err_mask)
71 return NULL;
72 return ent;
75 static int ata_ering_map(struct ata_ering *ering,
76 int (*map_fn)(struct ata_ering_entry *, void *),
77 void *arg)
79 int idx, rc = 0;
80 struct ata_ering_entry *ent;
82 idx = ering->cursor;
83 do {
84 ent = &ering->ring[idx];
85 if (!ent->err_mask)
86 break;
87 rc = map_fn(ent, arg);
88 if (rc)
89 break;
90 idx = (idx - 1 + ATA_ERING_SIZE) % ATA_ERING_SIZE;
91 } while (idx != ering->cursor);
93 return rc;
96 static unsigned int ata_eh_dev_action(struct ata_device *dev)
98 struct ata_eh_context *ehc = &dev->ap->eh_context;
100 return ehc->i.action | ehc->i.dev_action[dev->devno];
103 static void ata_eh_clear_action(struct ata_device *dev,
104 struct ata_eh_info *ehi, unsigned int action)
106 int i;
108 if (!dev) {
109 ehi->action &= ~action;
110 for (i = 0; i < ATA_MAX_DEVICES; i++)
111 ehi->dev_action[i] &= ~action;
112 } else {
113 /* doesn't make sense for port-wide EH actions */
114 WARN_ON(!(action & ATA_EH_PERDEV_MASK));
116 /* break ehi->action into ehi->dev_action */
117 if (ehi->action & action) {
118 for (i = 0; i < ATA_MAX_DEVICES; i++)
119 ehi->dev_action[i] |= ehi->action & action;
120 ehi->action &= ~action;
123 /* turn off the specified per-dev action */
124 ehi->dev_action[dev->devno] &= ~action;
129 * ata_scsi_timed_out - SCSI layer time out callback
130 * @cmd: timed out SCSI command
132 * Handles SCSI layer timeout. We race with normal completion of
133 * the qc for @cmd. If the qc is already gone, we lose and let
134 * the scsi command finish (EH_HANDLED). Otherwise, the qc has
135 * timed out and EH should be invoked. Prevent ata_qc_complete()
136 * from finishing it by setting EH_SCHEDULED and return
137 * EH_NOT_HANDLED.
139 * TODO: kill this function once old EH is gone.
141 * LOCKING:
142 * Called from timer context
144 * RETURNS:
145 * EH_HANDLED or EH_NOT_HANDLED
147 enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
149 struct Scsi_Host *host = cmd->device->host;
150 struct ata_port *ap = ata_shost_to_port(host);
151 unsigned long flags;
152 struct ata_queued_cmd *qc;
153 enum scsi_eh_timer_return ret;
155 DPRINTK("ENTER\n");
157 if (ap->ops->error_handler) {
158 ret = EH_NOT_HANDLED;
159 goto out;
162 ret = EH_HANDLED;
163 spin_lock_irqsave(ap->lock, flags);
164 qc = ata_qc_from_tag(ap, ap->active_tag);
165 if (qc) {
166 WARN_ON(qc->scsicmd != cmd);
167 qc->flags |= ATA_QCFLAG_EH_SCHEDULED;
168 qc->err_mask |= AC_ERR_TIMEOUT;
169 ret = EH_NOT_HANDLED;
171 spin_unlock_irqrestore(ap->lock, flags);
173 out:
174 DPRINTK("EXIT, ret=%d\n", ret);
175 return ret;
179 * ata_scsi_error - SCSI layer error handler callback
180 * @host: SCSI host on which error occurred
182 * Handles SCSI-layer-thrown error events.
184 * LOCKING:
185 * Inherited from SCSI layer (none, can sleep)
187 * RETURNS:
188 * Zero.
190 void ata_scsi_error(struct Scsi_Host *host)
192 struct ata_port *ap = ata_shost_to_port(host);
193 spinlock_t *ap_lock = ap->lock;
194 int i, repeat_cnt = ATA_EH_MAX_REPEAT;
195 unsigned long flags;
197 DPRINTK("ENTER\n");
199 /* synchronize with port task */
200 ata_port_flush_task(ap);
202 /* synchronize with host_set lock and sort out timeouts */
204 /* For new EH, all qcs are finished in one of three ways -
205 * normal completion, error completion, and SCSI timeout.
206 * Both cmpletions can race against SCSI timeout. When normal
207 * completion wins, the qc never reaches EH. When error
208 * completion wins, the qc has ATA_QCFLAG_FAILED set.
210 * When SCSI timeout wins, things are a bit more complex.
211 * Normal or error completion can occur after the timeout but
212 * before this point. In such cases, both types of
213 * completions are honored. A scmd is determined to have
214 * timed out iff its associated qc is active and not failed.
216 if (ap->ops->error_handler) {
217 struct scsi_cmnd *scmd, *tmp;
218 int nr_timedout = 0;
220 spin_lock_irqsave(ap_lock, flags);
222 list_for_each_entry_safe(scmd, tmp, &host->eh_cmd_q, eh_entry) {
223 struct ata_queued_cmd *qc;
225 for (i = 0; i < ATA_MAX_QUEUE; i++) {
226 qc = __ata_qc_from_tag(ap, i);
227 if (qc->flags & ATA_QCFLAG_ACTIVE &&
228 qc->scsicmd == scmd)
229 break;
232 if (i < ATA_MAX_QUEUE) {
233 /* the scmd has an associated qc */
234 if (!(qc->flags & ATA_QCFLAG_FAILED)) {
235 /* which hasn't failed yet, timeout */
236 qc->err_mask |= AC_ERR_TIMEOUT;
237 qc->flags |= ATA_QCFLAG_FAILED;
238 nr_timedout++;
240 } else {
241 /* Normal completion occurred after
242 * SCSI timeout but before this point.
243 * Successfully complete it.
245 scmd->retries = scmd->allowed;
246 scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
250 /* If we have timed out qcs. They belong to EH from
251 * this point but the state of the controller is
252 * unknown. Freeze the port to make sure the IRQ
253 * handler doesn't diddle with those qcs. This must
254 * be done atomically w.r.t. setting QCFLAG_FAILED.
256 if (nr_timedout)
257 __ata_port_freeze(ap);
259 spin_unlock_irqrestore(ap_lock, flags);
260 } else
261 spin_unlock_wait(ap_lock);
263 repeat:
264 /* invoke error handler */
265 if (ap->ops->error_handler) {
266 /* fetch & clear EH info */
267 spin_lock_irqsave(ap_lock, flags);
269 memset(&ap->eh_context, 0, sizeof(ap->eh_context));
270 ap->eh_context.i = ap->eh_info;
271 memset(&ap->eh_info, 0, sizeof(ap->eh_info));
273 ap->flags |= ATA_FLAG_EH_IN_PROGRESS;
274 ap->flags &= ~ATA_FLAG_EH_PENDING;
276 spin_unlock_irqrestore(ap_lock, flags);
278 /* invoke EH. if unloading, just finish failed qcs */
279 if (!(ap->flags & ATA_FLAG_UNLOADING))
280 ap->ops->error_handler(ap);
281 else
282 ata_eh_finish(ap);
284 /* Exception might have happend after ->error_handler
285 * recovered the port but before this point. Repeat
286 * EH in such case.
288 spin_lock_irqsave(ap_lock, flags);
290 if (ap->flags & ATA_FLAG_EH_PENDING) {
291 if (--repeat_cnt) {
292 ata_port_printk(ap, KERN_INFO,
293 "EH pending after completion, "
294 "repeating EH (cnt=%d)\n", repeat_cnt);
295 spin_unlock_irqrestore(ap_lock, flags);
296 goto repeat;
298 ata_port_printk(ap, KERN_ERR, "EH pending after %d "
299 "tries, giving up\n", ATA_EH_MAX_REPEAT);
302 /* this run is complete, make sure EH info is clear */
303 memset(&ap->eh_info, 0, sizeof(ap->eh_info));
305 /* Clear host_eh_scheduled while holding ap_lock such
306 * that if exception occurs after this point but
307 * before EH completion, SCSI midlayer will
308 * re-initiate EH.
310 host->host_eh_scheduled = 0;
312 spin_unlock_irqrestore(ap_lock, flags);
313 } else {
314 WARN_ON(ata_qc_from_tag(ap, ap->active_tag) == NULL);
315 ap->ops->eng_timeout(ap);
318 /* finish or retry handled scmd's and clean up */
319 WARN_ON(host->host_failed || !list_empty(&host->eh_cmd_q));
321 scsi_eh_flush_done_q(&ap->eh_done_q);
323 /* clean up */
324 spin_lock_irqsave(ap_lock, flags);
326 if (ap->flags & ATA_FLAG_LOADING) {
327 ap->flags &= ~ATA_FLAG_LOADING;
328 } else {
329 if (ap->flags & ATA_FLAG_SCSI_HOTPLUG)
330 queue_work(ata_aux_wq, &ap->hotplug_task);
331 if (ap->flags & ATA_FLAG_RECOVERED)
332 ata_port_printk(ap, KERN_INFO, "EH complete\n");
335 ap->flags &= ~(ATA_FLAG_SCSI_HOTPLUG | ATA_FLAG_RECOVERED);
337 /* tell wait_eh that we're done */
338 ap->flags &= ~ATA_FLAG_EH_IN_PROGRESS;
339 wake_up_all(&ap->eh_wait_q);
341 spin_unlock_irqrestore(ap_lock, flags);
343 DPRINTK("EXIT\n");
347 * ata_port_wait_eh - Wait for the currently pending EH to complete
348 * @ap: Port to wait EH for
350 * Wait until the currently pending EH is complete.
352 * LOCKING:
353 * Kernel thread context (may sleep).
355 void ata_port_wait_eh(struct ata_port *ap)
357 unsigned long flags;
358 DEFINE_WAIT(wait);
360 retry:
361 spin_lock_irqsave(ap->lock, flags);
363 while (ap->flags & (ATA_FLAG_EH_PENDING | ATA_FLAG_EH_IN_PROGRESS)) {
364 prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE);
365 spin_unlock_irqrestore(ap->lock, flags);
366 schedule();
367 spin_lock_irqsave(ap->lock, flags);
369 finish_wait(&ap->eh_wait_q, &wait);
371 spin_unlock_irqrestore(ap->lock, flags);
373 /* make sure SCSI EH is complete */
374 if (scsi_host_in_recovery(ap->host)) {
375 msleep(10);
376 goto retry;
381 * ata_qc_timeout - Handle timeout of queued command
382 * @qc: Command that timed out
384 * Some part of the kernel (currently, only the SCSI layer)
385 * has noticed that the active command on port @ap has not
386 * completed after a specified length of time. Handle this
387 * condition by disabling DMA (if necessary) and completing
388 * transactions, with error if necessary.
390 * This also handles the case of the "lost interrupt", where
391 * for some reason (possibly hardware bug, possibly driver bug)
392 * an interrupt was not delivered to the driver, even though the
393 * transaction completed successfully.
395 * TODO: kill this function once old EH is gone.
397 * LOCKING:
398 * Inherited from SCSI layer (none, can sleep)
400 static void ata_qc_timeout(struct ata_queued_cmd *qc)
402 struct ata_port *ap = qc->ap;
403 u8 host_stat = 0, drv_stat;
404 unsigned long flags;
406 DPRINTK("ENTER\n");
408 ap->hsm_task_state = HSM_ST_IDLE;
410 spin_lock_irqsave(ap->lock, flags);
412 switch (qc->tf.protocol) {
414 case ATA_PROT_DMA:
415 case ATA_PROT_ATAPI_DMA:
416 host_stat = ap->ops->bmdma_status(ap);
418 /* before we do anything else, clear DMA-Start bit */
419 ap->ops->bmdma_stop(qc);
421 /* fall through */
423 default:
424 ata_altstatus(ap);
425 drv_stat = ata_chk_status(ap);
427 /* ack bmdma irq events */
428 ap->ops->irq_clear(ap);
430 ata_dev_printk(qc->dev, KERN_ERR, "command 0x%x timeout, "
431 "stat 0x%x host_stat 0x%x\n",
432 qc->tf.command, drv_stat, host_stat);
434 /* complete taskfile transaction */
435 qc->err_mask |= AC_ERR_TIMEOUT;
436 break;
439 spin_unlock_irqrestore(ap->lock, flags);
441 ata_eh_qc_complete(qc);
443 DPRINTK("EXIT\n");
447 * ata_eng_timeout - Handle timeout of queued command
448 * @ap: Port on which timed-out command is active
450 * Some part of the kernel (currently, only the SCSI layer)
451 * has noticed that the active command on port @ap has not
452 * completed after a specified length of time. Handle this
453 * condition by disabling DMA (if necessary) and completing
454 * transactions, with error if necessary.
456 * This also handles the case of the "lost interrupt", where
457 * for some reason (possibly hardware bug, possibly driver bug)
458 * an interrupt was not delivered to the driver, even though the
459 * transaction completed successfully.
461 * TODO: kill this function once old EH is gone.
463 * LOCKING:
464 * Inherited from SCSI layer (none, can sleep)
466 void ata_eng_timeout(struct ata_port *ap)
468 DPRINTK("ENTER\n");
470 ata_qc_timeout(ata_qc_from_tag(ap, ap->active_tag));
472 DPRINTK("EXIT\n");
476 * ata_qc_schedule_eh - schedule qc for error handling
477 * @qc: command to schedule error handling for
479 * Schedule error handling for @qc. EH will kick in as soon as
480 * other commands are drained.
482 * LOCKING:
483 * spin_lock_irqsave(host_set lock)
485 void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
487 struct ata_port *ap = qc->ap;
489 WARN_ON(!ap->ops->error_handler);
491 qc->flags |= ATA_QCFLAG_FAILED;
492 qc->ap->flags |= ATA_FLAG_EH_PENDING;
494 /* The following will fail if timeout has already expired.
495 * ata_scsi_error() takes care of such scmds on EH entry.
496 * Note that ATA_QCFLAG_FAILED is unconditionally set after
497 * this function completes.
499 scsi_req_abort_cmd(qc->scsicmd);
503 * ata_port_schedule_eh - schedule error handling without a qc
504 * @ap: ATA port to schedule EH for
506 * Schedule error handling for @ap. EH will kick in as soon as
507 * all commands are drained.
509 * LOCKING:
510 * spin_lock_irqsave(host_set lock)
512 void ata_port_schedule_eh(struct ata_port *ap)
514 WARN_ON(!ap->ops->error_handler);
516 ap->flags |= ATA_FLAG_EH_PENDING;
517 scsi_schedule_eh(ap->host);
519 DPRINTK("port EH scheduled\n");
523 * ata_port_abort - abort all qc's on the port
524 * @ap: ATA port to abort qc's for
526 * Abort all active qc's of @ap and schedule EH.
528 * LOCKING:
529 * spin_lock_irqsave(host_set lock)
531 * RETURNS:
532 * Number of aborted qc's.
534 int ata_port_abort(struct ata_port *ap)
536 int tag, nr_aborted = 0;
538 WARN_ON(!ap->ops->error_handler);
540 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
541 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
543 if (qc) {
544 qc->flags |= ATA_QCFLAG_FAILED;
545 ata_qc_complete(qc);
546 nr_aborted++;
550 if (!nr_aborted)
551 ata_port_schedule_eh(ap);
553 return nr_aborted;
557 * __ata_port_freeze - freeze port
558 * @ap: ATA port to freeze
560 * This function is called when HSM violation or some other
561 * condition disrupts normal operation of the port. Frozen port
562 * is not allowed to perform any operation until the port is
563 * thawed, which usually follows a successful reset.
565 * ap->ops->freeze() callback can be used for freezing the port
566 * hardware-wise (e.g. mask interrupt and stop DMA engine). If a
567 * port cannot be frozen hardware-wise, the interrupt handler
568 * must ack and clear interrupts unconditionally while the port
569 * is frozen.
571 * LOCKING:
572 * spin_lock_irqsave(host_set lock)
574 static void __ata_port_freeze(struct ata_port *ap)
576 WARN_ON(!ap->ops->error_handler);
578 if (ap->ops->freeze)
579 ap->ops->freeze(ap);
581 ap->flags |= ATA_FLAG_FROZEN;
583 DPRINTK("ata%u port frozen\n", ap->id);
587 * ata_port_freeze - abort & freeze port
588 * @ap: ATA port to freeze
590 * Abort and freeze @ap.
592 * LOCKING:
593 * spin_lock_irqsave(host_set lock)
595 * RETURNS:
596 * Number of aborted commands.
598 int ata_port_freeze(struct ata_port *ap)
600 int nr_aborted;
602 WARN_ON(!ap->ops->error_handler);
604 nr_aborted = ata_port_abort(ap);
605 __ata_port_freeze(ap);
607 return nr_aborted;
611 * ata_eh_freeze_port - EH helper to freeze port
612 * @ap: ATA port to freeze
614 * Freeze @ap.
616 * LOCKING:
617 * None.
619 void ata_eh_freeze_port(struct ata_port *ap)
621 unsigned long flags;
623 if (!ap->ops->error_handler)
624 return;
626 spin_lock_irqsave(ap->lock, flags);
627 __ata_port_freeze(ap);
628 spin_unlock_irqrestore(ap->lock, flags);
632 * ata_port_thaw_port - EH helper to thaw port
633 * @ap: ATA port to thaw
635 * Thaw frozen port @ap.
637 * LOCKING:
638 * None.
640 void ata_eh_thaw_port(struct ata_port *ap)
642 unsigned long flags;
644 if (!ap->ops->error_handler)
645 return;
647 spin_lock_irqsave(ap->lock, flags);
649 ap->flags &= ~ATA_FLAG_FROZEN;
651 if (ap->ops->thaw)
652 ap->ops->thaw(ap);
654 spin_unlock_irqrestore(ap->lock, flags);
656 DPRINTK("ata%u port thawed\n", ap->id);
659 static void ata_eh_scsidone(struct scsi_cmnd *scmd)
661 /* nada */
664 static void __ata_eh_qc_complete(struct ata_queued_cmd *qc)
666 struct ata_port *ap = qc->ap;
667 struct scsi_cmnd *scmd = qc->scsicmd;
668 unsigned long flags;
670 spin_lock_irqsave(ap->lock, flags);
671 qc->scsidone = ata_eh_scsidone;
672 __ata_qc_complete(qc);
673 WARN_ON(ata_tag_valid(qc->tag));
674 spin_unlock_irqrestore(ap->lock, flags);
676 scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
680 * ata_eh_qc_complete - Complete an active ATA command from EH
681 * @qc: Command to complete
683 * Indicate to the mid and upper layers that an ATA command has
684 * completed. To be used from EH.
686 void ata_eh_qc_complete(struct ata_queued_cmd *qc)
688 struct scsi_cmnd *scmd = qc->scsicmd;
689 scmd->retries = scmd->allowed;
690 __ata_eh_qc_complete(qc);
694 * ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH
695 * @qc: Command to retry
697 * Indicate to the mid and upper layers that an ATA command
698 * should be retried. To be used from EH.
700 * SCSI midlayer limits the number of retries to scmd->allowed.
701 * scmd->retries is decremented for commands which get retried
702 * due to unrelated failures (qc->err_mask is zero).
704 void ata_eh_qc_retry(struct ata_queued_cmd *qc)
706 struct scsi_cmnd *scmd = qc->scsicmd;
707 if (!qc->err_mask && scmd->retries)
708 scmd->retries--;
709 __ata_eh_qc_complete(qc);
713 * ata_eh_detach_dev - detach ATA device
714 * @dev: ATA device to detach
716 * Detach @dev.
718 * LOCKING:
719 * None.
721 static void ata_eh_detach_dev(struct ata_device *dev)
723 struct ata_port *ap = dev->ap;
724 unsigned long flags;
726 ata_dev_disable(dev);
728 spin_lock_irqsave(ap->lock, flags);
730 dev->flags &= ~ATA_DFLAG_DETACH;
732 if (ata_scsi_offline_dev(dev)) {
733 dev->flags |= ATA_DFLAG_DETACHED;
734 ap->flags |= ATA_FLAG_SCSI_HOTPLUG;
737 spin_unlock_irqrestore(ap->lock, flags);
741 * ata_eh_about_to_do - about to perform eh_action
742 * @ap: target ATA port
743 * @dev: target ATA dev for per-dev action (can be NULL)
744 * @action: action about to be performed
746 * Called just before performing EH actions to clear related bits
747 * in @ap->eh_info such that eh actions are not unnecessarily
748 * repeated.
750 * LOCKING:
751 * None.
753 static void ata_eh_about_to_do(struct ata_port *ap, struct ata_device *dev,
754 unsigned int action)
756 unsigned long flags;
758 spin_lock_irqsave(ap->lock, flags);
759 ata_eh_clear_action(dev, &ap->eh_info, action);
760 ap->flags |= ATA_FLAG_RECOVERED;
761 spin_unlock_irqrestore(ap->lock, flags);
765 * ata_eh_done - EH action complete
766 * @ap: target ATA port
767 * @dev: target ATA dev for per-dev action (can be NULL)
768 * @action: action just completed
770 * Called right after performing EH actions to clear related bits
771 * in @ap->eh_context.
773 * LOCKING:
774 * None.
776 static void ata_eh_done(struct ata_port *ap, struct ata_device *dev,
777 unsigned int action)
779 ata_eh_clear_action(dev, &ap->eh_context.i, action);
783 * ata_err_string - convert err_mask to descriptive string
784 * @err_mask: error mask to convert to string
786 * Convert @err_mask to descriptive string. Errors are
787 * prioritized according to severity and only the most severe
788 * error is reported.
790 * LOCKING:
791 * None.
793 * RETURNS:
794 * Descriptive string for @err_mask
796 static const char * ata_err_string(unsigned int err_mask)
798 if (err_mask & AC_ERR_HOST_BUS)
799 return "host bus error";
800 if (err_mask & AC_ERR_ATA_BUS)
801 return "ATA bus error";
802 if (err_mask & AC_ERR_TIMEOUT)
803 return "timeout";
804 if (err_mask & AC_ERR_HSM)
805 return "HSM violation";
806 if (err_mask & AC_ERR_SYSTEM)
807 return "internal error";
808 if (err_mask & AC_ERR_MEDIA)
809 return "media error";
810 if (err_mask & AC_ERR_INVALID)
811 return "invalid argument";
812 if (err_mask & AC_ERR_DEV)
813 return "device error";
814 return "unknown error";
818 * ata_read_log_page - read a specific log page
819 * @dev: target device
820 * @page: page to read
821 * @buf: buffer to store read page
822 * @sectors: number of sectors to read
824 * Read log page using READ_LOG_EXT command.
826 * LOCKING:
827 * Kernel thread context (may sleep).
829 * RETURNS:
830 * 0 on success, AC_ERR_* mask otherwise.
832 static unsigned int ata_read_log_page(struct ata_device *dev,
833 u8 page, void *buf, unsigned int sectors)
835 struct ata_taskfile tf;
836 unsigned int err_mask;
838 DPRINTK("read log page - page %d\n", page);
840 ata_tf_init(dev, &tf);
841 tf.command = ATA_CMD_READ_LOG_EXT;
842 tf.lbal = page;
843 tf.nsect = sectors;
844 tf.hob_nsect = sectors >> 8;
845 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE;
846 tf.protocol = ATA_PROT_PIO;
848 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
849 buf, sectors * ATA_SECT_SIZE);
851 DPRINTK("EXIT, err_mask=%x\n", err_mask);
852 return err_mask;
856 * ata_eh_read_log_10h - Read log page 10h for NCQ error details
857 * @dev: Device to read log page 10h from
858 * @tag: Resulting tag of the failed command
859 * @tf: Resulting taskfile registers of the failed command
861 * Read log page 10h to obtain NCQ error details and clear error
862 * condition.
864 * LOCKING:
865 * Kernel thread context (may sleep).
867 * RETURNS:
868 * 0 on success, -errno otherwise.
870 static int ata_eh_read_log_10h(struct ata_device *dev,
871 int *tag, struct ata_taskfile *tf)
873 u8 *buf = dev->ap->sector_buf;
874 unsigned int err_mask;
875 u8 csum;
876 int i;
878 err_mask = ata_read_log_page(dev, ATA_LOG_SATA_NCQ, buf, 1);
879 if (err_mask)
880 return -EIO;
882 csum = 0;
883 for (i = 0; i < ATA_SECT_SIZE; i++)
884 csum += buf[i];
885 if (csum)
886 ata_dev_printk(dev, KERN_WARNING,
887 "invalid checksum 0x%x on log page 10h\n", csum);
889 if (buf[0] & 0x80)
890 return -ENOENT;
892 *tag = buf[0] & 0x1f;
894 tf->command = buf[2];
895 tf->feature = buf[3];
896 tf->lbal = buf[4];
897 tf->lbam = buf[5];
898 tf->lbah = buf[6];
899 tf->device = buf[7];
900 tf->hob_lbal = buf[8];
901 tf->hob_lbam = buf[9];
902 tf->hob_lbah = buf[10];
903 tf->nsect = buf[12];
904 tf->hob_nsect = buf[13];
906 return 0;
910 * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
911 * @dev: device to perform REQUEST_SENSE to
912 * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
914 * Perform ATAPI REQUEST_SENSE after the device reported CHECK
915 * SENSE. This function is EH helper.
917 * LOCKING:
918 * Kernel thread context (may sleep).
920 * RETURNS:
921 * 0 on success, AC_ERR_* mask on failure
923 static unsigned int atapi_eh_request_sense(struct ata_device *dev,
924 unsigned char *sense_buf)
926 struct ata_port *ap = dev->ap;
927 struct ata_taskfile tf;
928 u8 cdb[ATAPI_CDB_LEN];
930 DPRINTK("ATAPI request sense\n");
932 ata_tf_init(dev, &tf);
934 /* FIXME: is this needed? */
935 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
937 /* XXX: why tf_read here? */
938 ap->ops->tf_read(ap, &tf);
940 /* fill these in, for the case where they are -not- overwritten */
941 sense_buf[0] = 0x70;
942 sense_buf[2] = tf.feature >> 4;
944 memset(cdb, 0, ATAPI_CDB_LEN);
945 cdb[0] = REQUEST_SENSE;
946 cdb[4] = SCSI_SENSE_BUFFERSIZE;
948 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
949 tf.command = ATA_CMD_PACKET;
951 /* is it pointless to prefer PIO for "safety reasons"? */
952 if (ap->flags & ATA_FLAG_PIO_DMA) {
953 tf.protocol = ATA_PROT_ATAPI_DMA;
954 tf.feature |= ATAPI_PKT_DMA;
955 } else {
956 tf.protocol = ATA_PROT_ATAPI;
957 tf.lbam = (8 * 1024) & 0xff;
958 tf.lbah = (8 * 1024) >> 8;
961 return ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE,
962 sense_buf, SCSI_SENSE_BUFFERSIZE);
966 * ata_eh_analyze_serror - analyze SError for a failed port
967 * @ap: ATA port to analyze SError for
969 * Analyze SError if available and further determine cause of
970 * failure.
972 * LOCKING:
973 * None.
975 static void ata_eh_analyze_serror(struct ata_port *ap)
977 struct ata_eh_context *ehc = &ap->eh_context;
978 u32 serror = ehc->i.serror;
979 unsigned int err_mask = 0, action = 0;
981 if (serror & SERR_PERSISTENT) {
982 err_mask |= AC_ERR_ATA_BUS;
983 action |= ATA_EH_HARDRESET;
985 if (serror &
986 (SERR_DATA_RECOVERED | SERR_COMM_RECOVERED | SERR_DATA)) {
987 err_mask |= AC_ERR_ATA_BUS;
988 action |= ATA_EH_SOFTRESET;
990 if (serror & SERR_PROTOCOL) {
991 err_mask |= AC_ERR_HSM;
992 action |= ATA_EH_SOFTRESET;
994 if (serror & SERR_INTERNAL) {
995 err_mask |= AC_ERR_SYSTEM;
996 action |= ATA_EH_SOFTRESET;
998 if (serror & (SERR_PHYRDY_CHG | SERR_DEV_XCHG))
999 ata_ehi_hotplugged(&ehc->i);
1001 ehc->i.err_mask |= err_mask;
1002 ehc->i.action |= action;
1006 * ata_eh_analyze_ncq_error - analyze NCQ error
1007 * @ap: ATA port to analyze NCQ error for
1009 * Read log page 10h, determine the offending qc and acquire
1010 * error status TF. For NCQ device errors, all LLDDs have to do
1011 * is setting AC_ERR_DEV in ehi->err_mask. This function takes
1012 * care of the rest.
1014 * LOCKING:
1015 * Kernel thread context (may sleep).
1017 static void ata_eh_analyze_ncq_error(struct ata_port *ap)
1019 struct ata_eh_context *ehc = &ap->eh_context;
1020 struct ata_device *dev = ap->device;
1021 struct ata_queued_cmd *qc;
1022 struct ata_taskfile tf;
1023 int tag, rc;
1025 /* if frozen, we can't do much */
1026 if (ap->flags & ATA_FLAG_FROZEN)
1027 return;
1029 /* is it NCQ device error? */
1030 if (!ap->sactive || !(ehc->i.err_mask & AC_ERR_DEV))
1031 return;
1033 /* has LLDD analyzed already? */
1034 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1035 qc = __ata_qc_from_tag(ap, tag);
1037 if (!(qc->flags & ATA_QCFLAG_FAILED))
1038 continue;
1040 if (qc->err_mask)
1041 return;
1044 /* okay, this error is ours */
1045 rc = ata_eh_read_log_10h(dev, &tag, &tf);
1046 if (rc) {
1047 ata_port_printk(ap, KERN_ERR, "failed to read log page 10h "
1048 "(errno=%d)\n", rc);
1049 return;
1052 if (!(ap->sactive & (1 << tag))) {
1053 ata_port_printk(ap, KERN_ERR, "log page 10h reported "
1054 "inactive tag %d\n", tag);
1055 return;
1058 /* we've got the perpetrator, condemn it */
1059 qc = __ata_qc_from_tag(ap, tag);
1060 memcpy(&qc->result_tf, &tf, sizeof(tf));
1061 qc->err_mask |= AC_ERR_DEV;
1062 ehc->i.err_mask &= ~AC_ERR_DEV;
1066 * ata_eh_analyze_tf - analyze taskfile of a failed qc
1067 * @qc: qc to analyze
1068 * @tf: Taskfile registers to analyze
1070 * Analyze taskfile of @qc and further determine cause of
1071 * failure. This function also requests ATAPI sense data if
1072 * avaliable.
1074 * LOCKING:
1075 * Kernel thread context (may sleep).
1077 * RETURNS:
1078 * Determined recovery action
1080 static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
1081 const struct ata_taskfile *tf)
1083 unsigned int tmp, action = 0;
1084 u8 stat = tf->command, err = tf->feature;
1086 if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) {
1087 qc->err_mask |= AC_ERR_HSM;
1088 return ATA_EH_SOFTRESET;
1091 if (!(qc->err_mask & AC_ERR_DEV))
1092 return 0;
1094 switch (qc->dev->class) {
1095 case ATA_DEV_ATA:
1096 if (err & ATA_ICRC)
1097 qc->err_mask |= AC_ERR_ATA_BUS;
1098 if (err & ATA_UNC)
1099 qc->err_mask |= AC_ERR_MEDIA;
1100 if (err & ATA_IDNF)
1101 qc->err_mask |= AC_ERR_INVALID;
1102 break;
1104 case ATA_DEV_ATAPI:
1105 tmp = atapi_eh_request_sense(qc->dev,
1106 qc->scsicmd->sense_buffer);
1107 if (!tmp) {
1108 /* ATA_QCFLAG_SENSE_VALID is used to tell
1109 * atapi_qc_complete() that sense data is
1110 * already valid.
1112 * TODO: interpret sense data and set
1113 * appropriate err_mask.
1115 qc->flags |= ATA_QCFLAG_SENSE_VALID;
1116 } else
1117 qc->err_mask |= tmp;
1120 if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS))
1121 action |= ATA_EH_SOFTRESET;
1123 return action;
1126 static int ata_eh_categorize_ering_entry(struct ata_ering_entry *ent)
1128 if (ent->err_mask & (AC_ERR_ATA_BUS | AC_ERR_TIMEOUT))
1129 return 1;
1131 if (ent->is_io) {
1132 if (ent->err_mask & AC_ERR_HSM)
1133 return 1;
1134 if ((ent->err_mask &
1135 (AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV)
1136 return 2;
1139 return 0;
1142 struct speed_down_needed_arg {
1143 u64 since;
1144 int nr_errors[3];
1147 static int speed_down_needed_cb(struct ata_ering_entry *ent, void *void_arg)
1149 struct speed_down_needed_arg *arg = void_arg;
1151 if (ent->timestamp < arg->since)
1152 return -1;
1154 arg->nr_errors[ata_eh_categorize_ering_entry(ent)]++;
1155 return 0;
1159 * ata_eh_speed_down_needed - Determine wheter speed down is necessary
1160 * @dev: Device of interest
1162 * This function examines error ring of @dev and determines
1163 * whether speed down is necessary. Speed down is necessary if
1164 * there have been more than 3 of Cat-1 errors or 10 of Cat-2
1165 * errors during last 15 minutes.
1167 * Cat-1 errors are ATA_BUS, TIMEOUT for any command and HSM
1168 * violation for known supported commands.
1170 * Cat-2 errors are unclassified DEV error for known supported
1171 * command.
1173 * LOCKING:
1174 * Inherited from caller.
1176 * RETURNS:
1177 * 1 if speed down is necessary, 0 otherwise
1179 static int ata_eh_speed_down_needed(struct ata_device *dev)
1181 const u64 interval = 15LLU * 60 * HZ;
1182 static const int err_limits[3] = { -1, 3, 10 };
1183 struct speed_down_needed_arg arg;
1184 struct ata_ering_entry *ent;
1185 int err_cat;
1186 u64 j64;
1188 ent = ata_ering_top(&dev->ering);
1189 if (!ent)
1190 return 0;
1192 err_cat = ata_eh_categorize_ering_entry(ent);
1193 if (err_cat == 0)
1194 return 0;
1196 memset(&arg, 0, sizeof(arg));
1198 j64 = get_jiffies_64();
1199 if (j64 >= interval)
1200 arg.since = j64 - interval;
1201 else
1202 arg.since = 0;
1204 ata_ering_map(&dev->ering, speed_down_needed_cb, &arg);
1206 return arg.nr_errors[err_cat] > err_limits[err_cat];
1210 * ata_eh_speed_down - record error and speed down if necessary
1211 * @dev: Failed device
1212 * @is_io: Did the device fail during normal IO?
1213 * @err_mask: err_mask of the error
1215 * Record error and examine error history to determine whether
1216 * adjusting transmission speed is necessary. It also sets
1217 * transmission limits appropriately if such adjustment is
1218 * necessary.
1220 * LOCKING:
1221 * Kernel thread context (may sleep).
1223 * RETURNS:
1224 * 0 on success, -errno otherwise
1226 static int ata_eh_speed_down(struct ata_device *dev, int is_io,
1227 unsigned int err_mask)
1229 if (!err_mask)
1230 return 0;
1232 /* record error and determine whether speed down is necessary */
1233 ata_ering_record(&dev->ering, is_io, err_mask);
1235 if (!ata_eh_speed_down_needed(dev))
1236 return 0;
1238 /* speed down SATA link speed if possible */
1239 if (sata_down_spd_limit(dev->ap) == 0)
1240 return ATA_EH_HARDRESET;
1242 /* lower transfer mode */
1243 if (ata_down_xfermask_limit(dev, 0) == 0)
1244 return ATA_EH_SOFTRESET;
1246 ata_dev_printk(dev, KERN_ERR,
1247 "speed down requested but no transfer mode left\n");
1248 return 0;
1252 * ata_eh_autopsy - analyze error and determine recovery action
1253 * @ap: ATA port to perform autopsy on
1255 * Analyze why @ap failed and determine which recovery action is
1256 * needed. This function also sets more detailed AC_ERR_* values
1257 * and fills sense data for ATAPI CHECK SENSE.
1259 * LOCKING:
1260 * Kernel thread context (may sleep).
1262 static void ata_eh_autopsy(struct ata_port *ap)
1264 struct ata_eh_context *ehc = &ap->eh_context;
1265 unsigned int action = ehc->i.action;
1266 struct ata_device *failed_dev = NULL;
1267 unsigned int all_err_mask = 0;
1268 int tag, is_io = 0;
1269 u32 serror;
1270 int rc;
1272 DPRINTK("ENTER\n");
1274 /* obtain and analyze SError */
1275 rc = sata_scr_read(ap, SCR_ERROR, &serror);
1276 if (rc == 0) {
1277 ehc->i.serror |= serror;
1278 ata_eh_analyze_serror(ap);
1279 } else if (rc != -EOPNOTSUPP)
1280 action |= ATA_EH_HARDRESET;
1282 /* analyze NCQ failure */
1283 ata_eh_analyze_ncq_error(ap);
1285 /* any real error trumps AC_ERR_OTHER */
1286 if (ehc->i.err_mask & ~AC_ERR_OTHER)
1287 ehc->i.err_mask &= ~AC_ERR_OTHER;
1289 all_err_mask |= ehc->i.err_mask;
1291 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1292 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
1294 if (!(qc->flags & ATA_QCFLAG_FAILED))
1295 continue;
1297 /* inherit upper level err_mask */
1298 qc->err_mask |= ehc->i.err_mask;
1300 /* analyze TF */
1301 action |= ata_eh_analyze_tf(qc, &qc->result_tf);
1303 /* DEV errors are probably spurious in case of ATA_BUS error */
1304 if (qc->err_mask & AC_ERR_ATA_BUS)
1305 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_MEDIA |
1306 AC_ERR_INVALID);
1308 /* any real error trumps unknown error */
1309 if (qc->err_mask & ~AC_ERR_OTHER)
1310 qc->err_mask &= ~AC_ERR_OTHER;
1312 /* SENSE_VALID trumps dev/unknown error and revalidation */
1313 if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
1314 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER);
1315 action &= ~ATA_EH_REVALIDATE;
1318 /* accumulate error info */
1319 failed_dev = qc->dev;
1320 all_err_mask |= qc->err_mask;
1321 if (qc->flags & ATA_QCFLAG_IO)
1322 is_io = 1;
1325 /* enforce default EH actions */
1326 if (ap->flags & ATA_FLAG_FROZEN ||
1327 all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT))
1328 action |= ATA_EH_SOFTRESET;
1329 else if (all_err_mask)
1330 action |= ATA_EH_REVALIDATE;
1332 /* if we have offending qcs and the associated failed device */
1333 if (failed_dev) {
1334 /* speed down */
1335 action |= ata_eh_speed_down(failed_dev, is_io, all_err_mask);
1337 /* perform per-dev EH action only on the offending device */
1338 ehc->i.dev_action[failed_dev->devno] |=
1339 action & ATA_EH_PERDEV_MASK;
1340 action &= ~ATA_EH_PERDEV_MASK;
1343 /* record autopsy result */
1344 ehc->i.dev = failed_dev;
1345 ehc->i.action = action;
1347 DPRINTK("EXIT\n");
1351 * ata_eh_report - report error handling to user
1352 * @ap: ATA port EH is going on
1354 * Report EH to user.
1356 * LOCKING:
1357 * None.
1359 static void ata_eh_report(struct ata_port *ap)
1361 struct ata_eh_context *ehc = &ap->eh_context;
1362 const char *frozen, *desc;
1363 int tag, nr_failed = 0;
1365 desc = NULL;
1366 if (ehc->i.desc[0] != '\0')
1367 desc = ehc->i.desc;
1369 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1370 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
1372 if (!(qc->flags & ATA_QCFLAG_FAILED))
1373 continue;
1374 if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask)
1375 continue;
1377 nr_failed++;
1380 if (!nr_failed && !ehc->i.err_mask)
1381 return;
1383 frozen = "";
1384 if (ap->flags & ATA_FLAG_FROZEN)
1385 frozen = " frozen";
1387 if (ehc->i.dev) {
1388 ata_dev_printk(ehc->i.dev, KERN_ERR, "exception Emask 0x%x "
1389 "SAct 0x%x SErr 0x%x action 0x%x%s\n",
1390 ehc->i.err_mask, ap->sactive, ehc->i.serror,
1391 ehc->i.action, frozen);
1392 if (desc)
1393 ata_dev_printk(ehc->i.dev, KERN_ERR, "(%s)\n", desc);
1394 } else {
1395 ata_port_printk(ap, KERN_ERR, "exception Emask 0x%x "
1396 "SAct 0x%x SErr 0x%x action 0x%x%s\n",
1397 ehc->i.err_mask, ap->sactive, ehc->i.serror,
1398 ehc->i.action, frozen);
1399 if (desc)
1400 ata_port_printk(ap, KERN_ERR, "(%s)\n", desc);
1403 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1404 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
1406 if (!(qc->flags & ATA_QCFLAG_FAILED) || !qc->err_mask)
1407 continue;
1409 ata_dev_printk(qc->dev, KERN_ERR, "tag %d cmd 0x%x "
1410 "Emask 0x%x stat 0x%x err 0x%x (%s)\n",
1411 qc->tag, qc->tf.command, qc->err_mask,
1412 qc->result_tf.command, qc->result_tf.feature,
1413 ata_err_string(qc->err_mask));
1417 static int ata_do_reset(struct ata_port *ap, ata_reset_fn_t reset,
1418 unsigned int *classes)
1420 int i, rc;
1422 for (i = 0; i < ATA_MAX_DEVICES; i++)
1423 classes[i] = ATA_DEV_UNKNOWN;
1425 rc = reset(ap, classes);
1426 if (rc)
1427 return rc;
1429 /* If any class isn't ATA_DEV_UNKNOWN, consider classification
1430 * is complete and convert all ATA_DEV_UNKNOWN to
1431 * ATA_DEV_NONE.
1433 for (i = 0; i < ATA_MAX_DEVICES; i++)
1434 if (classes[i] != ATA_DEV_UNKNOWN)
1435 break;
1437 if (i < ATA_MAX_DEVICES)
1438 for (i = 0; i < ATA_MAX_DEVICES; i++)
1439 if (classes[i] == ATA_DEV_UNKNOWN)
1440 classes[i] = ATA_DEV_NONE;
1442 return 0;
1445 static int ata_eh_followup_srst_needed(int rc, int classify,
1446 const unsigned int *classes)
1448 if (rc == -EAGAIN)
1449 return 1;
1450 if (rc != 0)
1451 return 0;
1452 if (classify && classes[0] == ATA_DEV_UNKNOWN)
1453 return 1;
1454 return 0;
1457 static int ata_eh_reset(struct ata_port *ap, int classify,
1458 ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
1459 ata_reset_fn_t hardreset, ata_postreset_fn_t postreset)
1461 struct ata_eh_context *ehc = &ap->eh_context;
1462 unsigned int *classes = ehc->classes;
1463 int tries = ATA_EH_RESET_TRIES;
1464 int verbose = !(ap->flags & ATA_FLAG_LOADING);
1465 unsigned int action;
1466 ata_reset_fn_t reset;
1467 int i, did_followup_srst, rc;
1469 /* Determine which reset to use and record in ehc->i.action.
1470 * prereset() may examine and modify it.
1472 action = ehc->i.action;
1473 ehc->i.action &= ~ATA_EH_RESET_MASK;
1474 if (softreset && (!hardreset || (!sata_set_spd_needed(ap) &&
1475 !(action & ATA_EH_HARDRESET))))
1476 ehc->i.action |= ATA_EH_SOFTRESET;
1477 else
1478 ehc->i.action |= ATA_EH_HARDRESET;
1480 if (prereset) {
1481 rc = prereset(ap);
1482 if (rc) {
1483 ata_port_printk(ap, KERN_ERR,
1484 "prereset failed (errno=%d)\n", rc);
1485 return rc;
1489 /* prereset() might have modified ehc->i.action */
1490 if (ehc->i.action & ATA_EH_HARDRESET)
1491 reset = hardreset;
1492 else if (ehc->i.action & ATA_EH_SOFTRESET)
1493 reset = softreset;
1494 else {
1495 /* prereset told us not to reset, bang classes and return */
1496 for (i = 0; i < ATA_MAX_DEVICES; i++)
1497 classes[i] = ATA_DEV_NONE;
1498 return 0;
1501 /* did prereset() screw up? if so, fix up to avoid oopsing */
1502 if (!reset) {
1503 ata_port_printk(ap, KERN_ERR, "BUG: prereset() requested "
1504 "invalid reset type\n");
1505 if (softreset)
1506 reset = softreset;
1507 else
1508 reset = hardreset;
1511 retry:
1512 /* shut up during boot probing */
1513 if (verbose)
1514 ata_port_printk(ap, KERN_INFO, "%s resetting port\n",
1515 reset == softreset ? "soft" : "hard");
1517 /* reset */
1518 ata_eh_about_to_do(ap, NULL, ATA_EH_RESET_MASK);
1519 ehc->i.flags |= ATA_EHI_DID_RESET;
1521 rc = ata_do_reset(ap, reset, classes);
1523 did_followup_srst = 0;
1524 if (reset == hardreset &&
1525 ata_eh_followup_srst_needed(rc, classify, classes)) {
1526 /* okay, let's do follow-up softreset */
1527 did_followup_srst = 1;
1528 reset = softreset;
1530 if (!reset) {
1531 ata_port_printk(ap, KERN_ERR,
1532 "follow-up softreset required "
1533 "but no softreset avaliable\n");
1534 return -EINVAL;
1537 ata_eh_about_to_do(ap, NULL, ATA_EH_RESET_MASK);
1538 rc = ata_do_reset(ap, reset, classes);
1540 if (rc == 0 && classify &&
1541 classes[0] == ATA_DEV_UNKNOWN) {
1542 ata_port_printk(ap, KERN_ERR,
1543 "classification failed\n");
1544 return -EINVAL;
1548 if (rc && --tries) {
1549 const char *type;
1551 if (reset == softreset) {
1552 if (did_followup_srst)
1553 type = "follow-up soft";
1554 else
1555 type = "soft";
1556 } else
1557 type = "hard";
1559 ata_port_printk(ap, KERN_WARNING,
1560 "%sreset failed, retrying in 5 secs\n", type);
1561 ssleep(5);
1563 if (reset == hardreset)
1564 sata_down_spd_limit(ap);
1565 if (hardreset)
1566 reset = hardreset;
1567 goto retry;
1570 if (rc == 0) {
1571 /* After the reset, the device state is PIO 0 and the
1572 * controller state is undefined. Record the mode.
1574 for (i = 0; i < ATA_MAX_DEVICES; i++)
1575 ap->device[i].pio_mode = XFER_PIO_0;
1577 if (postreset)
1578 postreset(ap, classes);
1580 /* reset successful, schedule revalidation */
1581 ata_eh_done(ap, NULL, ATA_EH_RESET_MASK);
1582 ehc->i.action |= ATA_EH_REVALIDATE;
1585 return rc;
1588 static int ata_eh_revalidate_and_attach(struct ata_port *ap,
1589 struct ata_device **r_failed_dev)
1591 struct ata_eh_context *ehc = &ap->eh_context;
1592 struct ata_device *dev;
1593 unsigned long flags;
1594 int i, rc = 0;
1596 DPRINTK("ENTER\n");
1598 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1599 unsigned int action;
1601 dev = &ap->device[i];
1602 action = ata_eh_dev_action(dev);
1604 if (action & ATA_EH_REVALIDATE && ata_dev_enabled(dev)) {
1605 if (ata_port_offline(ap)) {
1606 rc = -EIO;
1607 break;
1610 ata_eh_about_to_do(ap, dev, ATA_EH_REVALIDATE);
1611 rc = ata_dev_revalidate(dev,
1612 ehc->i.flags & ATA_EHI_DID_RESET);
1613 if (rc)
1614 break;
1616 ata_eh_done(ap, dev, ATA_EH_REVALIDATE);
1618 /* schedule the scsi_rescan_device() here */
1619 queue_work(ata_aux_wq, &(ap->scsi_rescan_task));
1620 } else if (dev->class == ATA_DEV_UNKNOWN &&
1621 ehc->tries[dev->devno] &&
1622 ata_class_enabled(ehc->classes[dev->devno])) {
1623 dev->class = ehc->classes[dev->devno];
1625 rc = ata_dev_read_id(dev, &dev->class, 1, dev->id);
1626 if (rc == 0)
1627 rc = ata_dev_configure(dev, 1);
1629 if (rc) {
1630 dev->class = ATA_DEV_UNKNOWN;
1631 break;
1634 spin_lock_irqsave(ap->lock, flags);
1635 ap->flags |= ATA_FLAG_SCSI_HOTPLUG;
1636 spin_unlock_irqrestore(ap->lock, flags);
1640 if (rc)
1641 *r_failed_dev = dev;
1643 DPRINTK("EXIT\n");
1644 return rc;
1647 static int ata_port_nr_enabled(struct ata_port *ap)
1649 int i, cnt = 0;
1651 for (i = 0; i < ATA_MAX_DEVICES; i++)
1652 if (ata_dev_enabled(&ap->device[i]))
1653 cnt++;
1654 return cnt;
1657 static int ata_port_nr_vacant(struct ata_port *ap)
1659 int i, cnt = 0;
1661 for (i = 0; i < ATA_MAX_DEVICES; i++)
1662 if (ap->device[i].class == ATA_DEV_UNKNOWN)
1663 cnt++;
1664 return cnt;
1667 static int ata_eh_skip_recovery(struct ata_port *ap)
1669 struct ata_eh_context *ehc = &ap->eh_context;
1670 int i;
1672 if (ap->flags & ATA_FLAG_FROZEN || ata_port_nr_enabled(ap))
1673 return 0;
1675 /* skip if class codes for all vacant slots are ATA_DEV_NONE */
1676 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1677 struct ata_device *dev = &ap->device[i];
1679 if (dev->class == ATA_DEV_UNKNOWN &&
1680 ehc->classes[dev->devno] != ATA_DEV_NONE)
1681 return 0;
1684 return 1;
1688 * ata_eh_recover - recover host port after error
1689 * @ap: host port to recover
1690 * @prereset: prereset method (can be NULL)
1691 * @softreset: softreset method (can be NULL)
1692 * @hardreset: hardreset method (can be NULL)
1693 * @postreset: postreset method (can be NULL)
1695 * This is the alpha and omega, eum and yang, heart and soul of
1696 * libata exception handling. On entry, actions required to
1697 * recover the port and hotplug requests are recorded in
1698 * eh_context. This function executes all the operations with
1699 * appropriate retrials and fallbacks to resurrect failed
1700 * devices, detach goners and greet newcomers.
1702 * LOCKING:
1703 * Kernel thread context (may sleep).
1705 * RETURNS:
1706 * 0 on success, -errno on failure.
1708 static int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
1709 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
1710 ata_postreset_fn_t postreset)
1712 struct ata_eh_context *ehc = &ap->eh_context;
1713 struct ata_device *dev;
1714 int down_xfermask, i, rc;
1716 DPRINTK("ENTER\n");
1718 /* prep for recovery */
1719 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1720 dev = &ap->device[i];
1722 ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
1724 /* process hotplug request */
1725 if (dev->flags & ATA_DFLAG_DETACH)
1726 ata_eh_detach_dev(dev);
1728 if (!ata_dev_enabled(dev) &&
1729 ((ehc->i.probe_mask & (1 << dev->devno)) &&
1730 !(ehc->did_probe_mask & (1 << dev->devno)))) {
1731 ata_eh_detach_dev(dev);
1732 ata_dev_init(dev);
1733 ehc->did_probe_mask |= (1 << dev->devno);
1734 ehc->i.action |= ATA_EH_SOFTRESET;
1738 retry:
1739 down_xfermask = 0;
1740 rc = 0;
1742 /* if UNLOADING, finish immediately */
1743 if (ap->flags & ATA_FLAG_UNLOADING)
1744 goto out;
1746 /* skip EH if possible. */
1747 if (ata_eh_skip_recovery(ap))
1748 ehc->i.action = 0;
1750 for (i = 0; i < ATA_MAX_DEVICES; i++)
1751 ehc->classes[i] = ATA_DEV_UNKNOWN;
1753 /* reset */
1754 if (ehc->i.action & ATA_EH_RESET_MASK) {
1755 ata_eh_freeze_port(ap);
1757 rc = ata_eh_reset(ap, ata_port_nr_vacant(ap), prereset,
1758 softreset, hardreset, postreset);
1759 if (rc) {
1760 ata_port_printk(ap, KERN_ERR,
1761 "reset failed, giving up\n");
1762 goto out;
1765 ata_eh_thaw_port(ap);
1768 /* revalidate existing devices and attach new ones */
1769 rc = ata_eh_revalidate_and_attach(ap, &dev);
1770 if (rc)
1771 goto dev_fail;
1773 /* configure transfer mode if the port has been reset */
1774 if (ehc->i.flags & ATA_EHI_DID_RESET) {
1775 rc = ata_set_mode(ap, &dev);
1776 if (rc) {
1777 down_xfermask = 1;
1778 goto dev_fail;
1782 goto out;
1784 dev_fail:
1785 switch (rc) {
1786 case -ENODEV:
1787 /* device missing, schedule probing */
1788 ehc->i.probe_mask |= (1 << dev->devno);
1789 case -EINVAL:
1790 ehc->tries[dev->devno] = 0;
1791 break;
1792 case -EIO:
1793 sata_down_spd_limit(ap);
1794 default:
1795 ehc->tries[dev->devno]--;
1796 if (down_xfermask &&
1797 ata_down_xfermask_limit(dev, ehc->tries[dev->devno] == 1))
1798 ehc->tries[dev->devno] = 0;
1801 if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) {
1802 /* disable device if it has used up all its chances */
1803 ata_dev_disable(dev);
1805 /* detach if offline */
1806 if (ata_port_offline(ap))
1807 ata_eh_detach_dev(dev);
1809 /* probe if requested */
1810 if ((ehc->i.probe_mask & (1 << dev->devno)) &&
1811 !(ehc->did_probe_mask & (1 << dev->devno))) {
1812 ata_eh_detach_dev(dev);
1813 ata_dev_init(dev);
1815 ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
1816 ehc->did_probe_mask |= (1 << dev->devno);
1817 ehc->i.action |= ATA_EH_SOFTRESET;
1819 } else {
1820 /* soft didn't work? be haaaaard */
1821 if (ehc->i.flags & ATA_EHI_DID_RESET)
1822 ehc->i.action |= ATA_EH_HARDRESET;
1823 else
1824 ehc->i.action |= ATA_EH_SOFTRESET;
1827 if (ata_port_nr_enabled(ap)) {
1828 ata_port_printk(ap, KERN_WARNING, "failed to recover some "
1829 "devices, retrying in 5 secs\n");
1830 ssleep(5);
1831 } else {
1832 /* no device left, repeat fast */
1833 msleep(500);
1836 goto retry;
1838 out:
1839 if (rc) {
1840 for (i = 0; i < ATA_MAX_DEVICES; i++)
1841 ata_dev_disable(&ap->device[i]);
1844 DPRINTK("EXIT, rc=%d\n", rc);
1845 return rc;
1849 * ata_eh_finish - finish up EH
1850 * @ap: host port to finish EH for
1852 * Recovery is complete. Clean up EH states and retry or finish
1853 * failed qcs.
1855 * LOCKING:
1856 * None.
1858 static void ata_eh_finish(struct ata_port *ap)
1860 int tag;
1862 /* retry or finish qcs */
1863 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1864 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
1866 if (!(qc->flags & ATA_QCFLAG_FAILED))
1867 continue;
1869 if (qc->err_mask) {
1870 /* FIXME: Once EH migration is complete,
1871 * generate sense data in this function,
1872 * considering both err_mask and tf.
1874 if (qc->err_mask & AC_ERR_INVALID)
1875 ata_eh_qc_complete(qc);
1876 else
1877 ata_eh_qc_retry(qc);
1878 } else {
1879 if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
1880 ata_eh_qc_complete(qc);
1881 } else {
1882 /* feed zero TF to sense generation */
1883 memset(&qc->result_tf, 0, sizeof(qc->result_tf));
1884 ata_eh_qc_retry(qc);
1891 * ata_do_eh - do standard error handling
1892 * @ap: host port to handle error for
1893 * @prereset: prereset method (can be NULL)
1894 * @softreset: softreset method (can be NULL)
1895 * @hardreset: hardreset method (can be NULL)
1896 * @postreset: postreset method (can be NULL)
1898 * Perform standard error handling sequence.
1900 * LOCKING:
1901 * Kernel thread context (may sleep).
1903 void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
1904 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
1905 ata_postreset_fn_t postreset)
1907 if (!(ap->flags & ATA_FLAG_LOADING)) {
1908 ata_eh_autopsy(ap);
1909 ata_eh_report(ap);
1912 ata_eh_recover(ap, prereset, softreset, hardreset, postreset);
1913 ata_eh_finish(ap);