[PATCH] libata-dev: handle DRQ=1 ERR=1 (revised)
[linux-2.6/mini2440.git] / drivers / scsi / libata-core.c
blobda13deccc0e0e2d2aa08aa3a2a394085dbff1c85
1 /*
2 * libata-core.c - helper library for ATA
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
35 #include <linux/config.h>
36 #include <linux/kernel.h>
37 #include <linux/module.h>
38 #include <linux/pci.h>
39 #include <linux/init.h>
40 #include <linux/list.h>
41 #include <linux/mm.h>
42 #include <linux/highmem.h>
43 #include <linux/spinlock.h>
44 #include <linux/blkdev.h>
45 #include <linux/delay.h>
46 #include <linux/timer.h>
47 #include <linux/interrupt.h>
48 #include <linux/completion.h>
49 #include <linux/suspend.h>
50 #include <linux/workqueue.h>
51 #include <linux/jiffies.h>
52 #include <linux/scatterlist.h>
53 #include <scsi/scsi.h>
54 #include "scsi_priv.h"
55 #include <scsi/scsi_cmnd.h>
56 #include <scsi/scsi_host.h>
57 #include <linux/libata.h>
58 #include <asm/io.h>
59 #include <asm/semaphore.h>
60 #include <asm/byteorder.h>
62 #include "libata.h"
64 static unsigned int ata_dev_init_params(struct ata_port *ap,
65 struct ata_device *dev,
66 u16 heads,
67 u16 sectors);
68 static void ata_set_mode(struct ata_port *ap);
69 static unsigned int ata_dev_set_xfermode(struct ata_port *ap,
70 struct ata_device *dev);
71 static void ata_dev_xfermask(struct ata_port *ap, struct ata_device *dev);
73 static unsigned int ata_unique_id = 1;
74 static struct workqueue_struct *ata_wq;
76 int atapi_enabled = 1;
77 module_param(atapi_enabled, int, 0444);
78 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
80 int libata_fua = 0;
81 module_param_named(fua, libata_fua, int, 0444);
82 MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
84 MODULE_AUTHOR("Jeff Garzik");
85 MODULE_DESCRIPTION("Library module for ATA devices");
86 MODULE_LICENSE("GPL");
87 MODULE_VERSION(DRV_VERSION);
90 /**
91 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
92 * @tf: Taskfile to convert
93 * @fis: Buffer into which data will output
94 * @pmp: Port multiplier port
96 * Converts a standard ATA taskfile to a Serial ATA
97 * FIS structure (Register - Host to Device).
99 * LOCKING:
100 * Inherited from caller.
103 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp)
105 fis[0] = 0x27; /* Register - Host to Device FIS */
106 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
107 bit 7 indicates Command FIS */
108 fis[2] = tf->command;
109 fis[3] = tf->feature;
111 fis[4] = tf->lbal;
112 fis[5] = tf->lbam;
113 fis[6] = tf->lbah;
114 fis[7] = tf->device;
116 fis[8] = tf->hob_lbal;
117 fis[9] = tf->hob_lbam;
118 fis[10] = tf->hob_lbah;
119 fis[11] = tf->hob_feature;
121 fis[12] = tf->nsect;
122 fis[13] = tf->hob_nsect;
123 fis[14] = 0;
124 fis[15] = tf->ctl;
126 fis[16] = 0;
127 fis[17] = 0;
128 fis[18] = 0;
129 fis[19] = 0;
133 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
134 * @fis: Buffer from which data will be input
135 * @tf: Taskfile to output
137 * Converts a serial ATA FIS structure to a standard ATA taskfile.
139 * LOCKING:
140 * Inherited from caller.
143 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
145 tf->command = fis[2]; /* status */
146 tf->feature = fis[3]; /* error */
148 tf->lbal = fis[4];
149 tf->lbam = fis[5];
150 tf->lbah = fis[6];
151 tf->device = fis[7];
153 tf->hob_lbal = fis[8];
154 tf->hob_lbam = fis[9];
155 tf->hob_lbah = fis[10];
157 tf->nsect = fis[12];
158 tf->hob_nsect = fis[13];
161 static const u8 ata_rw_cmds[] = {
162 /* pio multi */
163 ATA_CMD_READ_MULTI,
164 ATA_CMD_WRITE_MULTI,
165 ATA_CMD_READ_MULTI_EXT,
166 ATA_CMD_WRITE_MULTI_EXT,
170 ATA_CMD_WRITE_MULTI_FUA_EXT,
171 /* pio */
172 ATA_CMD_PIO_READ,
173 ATA_CMD_PIO_WRITE,
174 ATA_CMD_PIO_READ_EXT,
175 ATA_CMD_PIO_WRITE_EXT,
180 /* dma */
181 ATA_CMD_READ,
182 ATA_CMD_WRITE,
183 ATA_CMD_READ_EXT,
184 ATA_CMD_WRITE_EXT,
188 ATA_CMD_WRITE_FUA_EXT
192 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
193 * @qc: command to examine and configure
195 * Examine the device configuration and tf->flags to calculate
196 * the proper read/write commands and protocol to use.
198 * LOCKING:
199 * caller.
201 int ata_rwcmd_protocol(struct ata_queued_cmd *qc)
203 struct ata_taskfile *tf = &qc->tf;
204 struct ata_device *dev = qc->dev;
205 u8 cmd;
207 int index, fua, lba48, write;
209 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
210 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
211 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
213 if (dev->flags & ATA_DFLAG_PIO) {
214 tf->protocol = ATA_PROT_PIO;
215 index = dev->multi_count ? 0 : 8;
216 } else if (lba48 && (qc->ap->flags & ATA_FLAG_PIO_LBA48)) {
217 /* Unable to use DMA due to host limitation */
218 tf->protocol = ATA_PROT_PIO;
219 index = dev->multi_count ? 0 : 8;
220 } else {
221 tf->protocol = ATA_PROT_DMA;
222 index = 16;
225 cmd = ata_rw_cmds[index + fua + lba48 + write];
226 if (cmd) {
227 tf->command = cmd;
228 return 0;
230 return -1;
234 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
235 * @pio_mask: pio_mask
236 * @mwdma_mask: mwdma_mask
237 * @udma_mask: udma_mask
239 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
240 * unsigned int xfer_mask.
242 * LOCKING:
243 * None.
245 * RETURNS:
246 * Packed xfer_mask.
248 static unsigned int ata_pack_xfermask(unsigned int pio_mask,
249 unsigned int mwdma_mask,
250 unsigned int udma_mask)
252 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
253 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
254 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
258 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
259 * @xfer_mask: xfer_mask to unpack
260 * @pio_mask: resulting pio_mask
261 * @mwdma_mask: resulting mwdma_mask
262 * @udma_mask: resulting udma_mask
264 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
265 * Any NULL distination masks will be ignored.
267 static void ata_unpack_xfermask(unsigned int xfer_mask,
268 unsigned int *pio_mask,
269 unsigned int *mwdma_mask,
270 unsigned int *udma_mask)
272 if (pio_mask)
273 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
274 if (mwdma_mask)
275 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
276 if (udma_mask)
277 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
280 static const struct ata_xfer_ent {
281 int shift, bits;
282 u8 base;
283 } ata_xfer_tbl[] = {
284 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
285 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
286 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
287 { -1, },
291 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
292 * @xfer_mask: xfer_mask of interest
294 * Return matching XFER_* value for @xfer_mask. Only the highest
295 * bit of @xfer_mask is considered.
297 * LOCKING:
298 * None.
300 * RETURNS:
301 * Matching XFER_* value, 0 if no match found.
303 static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
305 int highbit = fls(xfer_mask) - 1;
306 const struct ata_xfer_ent *ent;
308 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
309 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
310 return ent->base + highbit - ent->shift;
311 return 0;
315 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
316 * @xfer_mode: XFER_* of interest
318 * Return matching xfer_mask for @xfer_mode.
320 * LOCKING:
321 * None.
323 * RETURNS:
324 * Matching xfer_mask, 0 if no match found.
326 static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
328 const struct ata_xfer_ent *ent;
330 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
331 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
332 return 1 << (ent->shift + xfer_mode - ent->base);
333 return 0;
337 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
338 * @xfer_mode: XFER_* of interest
340 * Return matching xfer_shift for @xfer_mode.
342 * LOCKING:
343 * None.
345 * RETURNS:
346 * Matching xfer_shift, -1 if no match found.
348 static int ata_xfer_mode2shift(unsigned int xfer_mode)
350 const struct ata_xfer_ent *ent;
352 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
353 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
354 return ent->shift;
355 return -1;
359 * ata_mode_string - convert xfer_mask to string
360 * @xfer_mask: mask of bits supported; only highest bit counts.
362 * Determine string which represents the highest speed
363 * (highest bit in @modemask).
365 * LOCKING:
366 * None.
368 * RETURNS:
369 * Constant C string representing highest speed listed in
370 * @mode_mask, or the constant C string "<n/a>".
372 static const char *ata_mode_string(unsigned int xfer_mask)
374 static const char * const xfer_mode_str[] = {
375 "PIO0",
376 "PIO1",
377 "PIO2",
378 "PIO3",
379 "PIO4",
380 "MWDMA0",
381 "MWDMA1",
382 "MWDMA2",
383 "UDMA/16",
384 "UDMA/25",
385 "UDMA/33",
386 "UDMA/44",
387 "UDMA/66",
388 "UDMA/100",
389 "UDMA/133",
390 "UDMA7",
392 int highbit;
394 highbit = fls(xfer_mask) - 1;
395 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
396 return xfer_mode_str[highbit];
397 return "<n/a>";
400 static const char *sata_spd_string(unsigned int spd)
402 static const char * const spd_str[] = {
403 "1.5 Gbps",
404 "3.0 Gbps",
407 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
408 return "<unknown>";
409 return spd_str[spd - 1];
412 static void ata_dev_disable(struct ata_port *ap, struct ata_device *dev)
414 if (ata_dev_enabled(dev)) {
415 printk(KERN_WARNING "ata%u: dev %u disabled\n",
416 ap->id, dev->devno);
417 dev->class++;
422 * ata_pio_devchk - PATA device presence detection
423 * @ap: ATA channel to examine
424 * @device: Device to examine (starting at zero)
426 * This technique was originally described in
427 * Hale Landis's ATADRVR (www.ata-atapi.com), and
428 * later found its way into the ATA/ATAPI spec.
430 * Write a pattern to the ATA shadow registers,
431 * and if a device is present, it will respond by
432 * correctly storing and echoing back the
433 * ATA shadow register contents.
435 * LOCKING:
436 * caller.
439 static unsigned int ata_pio_devchk(struct ata_port *ap,
440 unsigned int device)
442 struct ata_ioports *ioaddr = &ap->ioaddr;
443 u8 nsect, lbal;
445 ap->ops->dev_select(ap, device);
447 outb(0x55, ioaddr->nsect_addr);
448 outb(0xaa, ioaddr->lbal_addr);
450 outb(0xaa, ioaddr->nsect_addr);
451 outb(0x55, ioaddr->lbal_addr);
453 outb(0x55, ioaddr->nsect_addr);
454 outb(0xaa, ioaddr->lbal_addr);
456 nsect = inb(ioaddr->nsect_addr);
457 lbal = inb(ioaddr->lbal_addr);
459 if ((nsect == 0x55) && (lbal == 0xaa))
460 return 1; /* we found a device */
462 return 0; /* nothing found */
466 * ata_mmio_devchk - PATA device presence detection
467 * @ap: ATA channel to examine
468 * @device: Device to examine (starting at zero)
470 * This technique was originally described in
471 * Hale Landis's ATADRVR (www.ata-atapi.com), and
472 * later found its way into the ATA/ATAPI spec.
474 * Write a pattern to the ATA shadow registers,
475 * and if a device is present, it will respond by
476 * correctly storing and echoing back the
477 * ATA shadow register contents.
479 * LOCKING:
480 * caller.
483 static unsigned int ata_mmio_devchk(struct ata_port *ap,
484 unsigned int device)
486 struct ata_ioports *ioaddr = &ap->ioaddr;
487 u8 nsect, lbal;
489 ap->ops->dev_select(ap, device);
491 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
492 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
494 writeb(0xaa, (void __iomem *) ioaddr->nsect_addr);
495 writeb(0x55, (void __iomem *) ioaddr->lbal_addr);
497 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
498 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
500 nsect = readb((void __iomem *) ioaddr->nsect_addr);
501 lbal = readb((void __iomem *) ioaddr->lbal_addr);
503 if ((nsect == 0x55) && (lbal == 0xaa))
504 return 1; /* we found a device */
506 return 0; /* nothing found */
510 * ata_devchk - PATA device presence detection
511 * @ap: ATA channel to examine
512 * @device: Device to examine (starting at zero)
514 * Dispatch ATA device presence detection, depending
515 * on whether we are using PIO or MMIO to talk to the
516 * ATA shadow registers.
518 * LOCKING:
519 * caller.
522 static unsigned int ata_devchk(struct ata_port *ap,
523 unsigned int device)
525 if (ap->flags & ATA_FLAG_MMIO)
526 return ata_mmio_devchk(ap, device);
527 return ata_pio_devchk(ap, device);
531 * ata_dev_classify - determine device type based on ATA-spec signature
532 * @tf: ATA taskfile register set for device to be identified
534 * Determine from taskfile register contents whether a device is
535 * ATA or ATAPI, as per "Signature and persistence" section
536 * of ATA/PI spec (volume 1, sect 5.14).
538 * LOCKING:
539 * None.
541 * RETURNS:
542 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
543 * the event of failure.
546 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
548 /* Apple's open source Darwin code hints that some devices only
549 * put a proper signature into the LBA mid/high registers,
550 * So, we only check those. It's sufficient for uniqueness.
553 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
554 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
555 DPRINTK("found ATA device by sig\n");
556 return ATA_DEV_ATA;
559 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
560 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
561 DPRINTK("found ATAPI device by sig\n");
562 return ATA_DEV_ATAPI;
565 DPRINTK("unknown device\n");
566 return ATA_DEV_UNKNOWN;
570 * ata_dev_try_classify - Parse returned ATA device signature
571 * @ap: ATA channel to examine
572 * @device: Device to examine (starting at zero)
573 * @r_err: Value of error register on completion
575 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
576 * an ATA/ATAPI-defined set of values is placed in the ATA
577 * shadow registers, indicating the results of device detection
578 * and diagnostics.
580 * Select the ATA device, and read the values from the ATA shadow
581 * registers. Then parse according to the Error register value,
582 * and the spec-defined values examined by ata_dev_classify().
584 * LOCKING:
585 * caller.
587 * RETURNS:
588 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
591 static unsigned int
592 ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
594 struct ata_taskfile tf;
595 unsigned int class;
596 u8 err;
598 ap->ops->dev_select(ap, device);
600 memset(&tf, 0, sizeof(tf));
602 ap->ops->tf_read(ap, &tf);
603 err = tf.feature;
604 if (r_err)
605 *r_err = err;
607 /* see if device passed diags */
608 if (err == 1)
609 /* do nothing */ ;
610 else if ((device == 0) && (err == 0x81))
611 /* do nothing */ ;
612 else
613 return ATA_DEV_NONE;
615 /* determine if device is ATA or ATAPI */
616 class = ata_dev_classify(&tf);
618 if (class == ATA_DEV_UNKNOWN)
619 return ATA_DEV_NONE;
620 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
621 return ATA_DEV_NONE;
622 return class;
626 * ata_id_string - Convert IDENTIFY DEVICE page into string
627 * @id: IDENTIFY DEVICE results we will examine
628 * @s: string into which data is output
629 * @ofs: offset into identify device page
630 * @len: length of string to return. must be an even number.
632 * The strings in the IDENTIFY DEVICE page are broken up into
633 * 16-bit chunks. Run through the string, and output each
634 * 8-bit chunk linearly, regardless of platform.
636 * LOCKING:
637 * caller.
640 void ata_id_string(const u16 *id, unsigned char *s,
641 unsigned int ofs, unsigned int len)
643 unsigned int c;
645 while (len > 0) {
646 c = id[ofs] >> 8;
647 *s = c;
648 s++;
650 c = id[ofs] & 0xff;
651 *s = c;
652 s++;
654 ofs++;
655 len -= 2;
660 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
661 * @id: IDENTIFY DEVICE results we will examine
662 * @s: string into which data is output
663 * @ofs: offset into identify device page
664 * @len: length of string to return. must be an odd number.
666 * This function is identical to ata_id_string except that it
667 * trims trailing spaces and terminates the resulting string with
668 * null. @len must be actual maximum length (even number) + 1.
670 * LOCKING:
671 * caller.
673 void ata_id_c_string(const u16 *id, unsigned char *s,
674 unsigned int ofs, unsigned int len)
676 unsigned char *p;
678 WARN_ON(!(len & 1));
680 ata_id_string(id, s, ofs, len - 1);
682 p = s + strnlen(s, len - 1);
683 while (p > s && p[-1] == ' ')
684 p--;
685 *p = '\0';
688 static u64 ata_id_n_sectors(const u16 *id)
690 if (ata_id_has_lba(id)) {
691 if (ata_id_has_lba48(id))
692 return ata_id_u64(id, 100);
693 else
694 return ata_id_u32(id, 60);
695 } else {
696 if (ata_id_current_chs_valid(id))
697 return ata_id_u32(id, 57);
698 else
699 return id[1] * id[3] * id[6];
704 * ata_noop_dev_select - Select device 0/1 on ATA bus
705 * @ap: ATA channel to manipulate
706 * @device: ATA device (numbered from zero) to select
708 * This function performs no actual function.
710 * May be used as the dev_select() entry in ata_port_operations.
712 * LOCKING:
713 * caller.
715 void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
721 * ata_std_dev_select - Select device 0/1 on ATA bus
722 * @ap: ATA channel to manipulate
723 * @device: ATA device (numbered from zero) to select
725 * Use the method defined in the ATA specification to
726 * make either device 0, or device 1, active on the
727 * ATA channel. Works with both PIO and MMIO.
729 * May be used as the dev_select() entry in ata_port_operations.
731 * LOCKING:
732 * caller.
735 void ata_std_dev_select (struct ata_port *ap, unsigned int device)
737 u8 tmp;
739 if (device == 0)
740 tmp = ATA_DEVICE_OBS;
741 else
742 tmp = ATA_DEVICE_OBS | ATA_DEV1;
744 if (ap->flags & ATA_FLAG_MMIO) {
745 writeb(tmp, (void __iomem *) ap->ioaddr.device_addr);
746 } else {
747 outb(tmp, ap->ioaddr.device_addr);
749 ata_pause(ap); /* needed; also flushes, for mmio */
753 * ata_dev_select - Select device 0/1 on ATA bus
754 * @ap: ATA channel to manipulate
755 * @device: ATA device (numbered from zero) to select
756 * @wait: non-zero to wait for Status register BSY bit to clear
757 * @can_sleep: non-zero if context allows sleeping
759 * Use the method defined in the ATA specification to
760 * make either device 0, or device 1, active on the
761 * ATA channel.
763 * This is a high-level version of ata_std_dev_select(),
764 * which additionally provides the services of inserting
765 * the proper pauses and status polling, where needed.
767 * LOCKING:
768 * caller.
771 void ata_dev_select(struct ata_port *ap, unsigned int device,
772 unsigned int wait, unsigned int can_sleep)
774 VPRINTK("ENTER, ata%u: device %u, wait %u\n",
775 ap->id, device, wait);
777 if (wait)
778 ata_wait_idle(ap);
780 ap->ops->dev_select(ap, device);
782 if (wait) {
783 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
784 msleep(150);
785 ata_wait_idle(ap);
790 * ata_dump_id - IDENTIFY DEVICE info debugging output
791 * @id: IDENTIFY DEVICE page to dump
793 * Dump selected 16-bit words from the given IDENTIFY DEVICE
794 * page.
796 * LOCKING:
797 * caller.
800 static inline void ata_dump_id(const u16 *id)
802 DPRINTK("49==0x%04x "
803 "53==0x%04x "
804 "63==0x%04x "
805 "64==0x%04x "
806 "75==0x%04x \n",
807 id[49],
808 id[53],
809 id[63],
810 id[64],
811 id[75]);
812 DPRINTK("80==0x%04x "
813 "81==0x%04x "
814 "82==0x%04x "
815 "83==0x%04x "
816 "84==0x%04x \n",
817 id[80],
818 id[81],
819 id[82],
820 id[83],
821 id[84]);
822 DPRINTK("88==0x%04x "
823 "93==0x%04x\n",
824 id[88],
825 id[93]);
829 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
830 * @id: IDENTIFY data to compute xfer mask from
832 * Compute the xfermask for this device. This is not as trivial
833 * as it seems if we must consider early devices correctly.
835 * FIXME: pre IDE drive timing (do we care ?).
837 * LOCKING:
838 * None.
840 * RETURNS:
841 * Computed xfermask
843 static unsigned int ata_id_xfermask(const u16 *id)
845 unsigned int pio_mask, mwdma_mask, udma_mask;
847 /* Usual case. Word 53 indicates word 64 is valid */
848 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
849 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
850 pio_mask <<= 3;
851 pio_mask |= 0x7;
852 } else {
853 /* If word 64 isn't valid then Word 51 high byte holds
854 * the PIO timing number for the maximum. Turn it into
855 * a mask.
857 pio_mask = (2 << (id[ATA_ID_OLD_PIO_MODES] & 0xFF)) - 1 ;
859 /* But wait.. there's more. Design your standards by
860 * committee and you too can get a free iordy field to
861 * process. However its the speeds not the modes that
862 * are supported... Note drivers using the timing API
863 * will get this right anyway
867 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
869 udma_mask = 0;
870 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
871 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
873 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
877 * ata_port_queue_task - Queue port_task
878 * @ap: The ata_port to queue port_task for
880 * Schedule @fn(@data) for execution after @delay jiffies using
881 * port_task. There is one port_task per port and it's the
882 * user(low level driver)'s responsibility to make sure that only
883 * one task is active at any given time.
885 * libata core layer takes care of synchronization between
886 * port_task and EH. ata_port_queue_task() may be ignored for EH
887 * synchronization.
889 * LOCKING:
890 * Inherited from caller.
892 void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), void *data,
893 unsigned long delay)
895 int rc;
897 if (ap->flags & ATA_FLAG_FLUSH_PORT_TASK)
898 return;
900 PREPARE_WORK(&ap->port_task, fn, data);
902 if (!delay)
903 rc = queue_work(ata_wq, &ap->port_task);
904 else
905 rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
907 /* rc == 0 means that another user is using port task */
908 WARN_ON(rc == 0);
912 * ata_port_flush_task - Flush port_task
913 * @ap: The ata_port to flush port_task for
915 * After this function completes, port_task is guranteed not to
916 * be running or scheduled.
918 * LOCKING:
919 * Kernel thread context (may sleep)
921 void ata_port_flush_task(struct ata_port *ap)
923 unsigned long flags;
925 DPRINTK("ENTER\n");
927 spin_lock_irqsave(&ap->host_set->lock, flags);
928 ap->flags |= ATA_FLAG_FLUSH_PORT_TASK;
929 spin_unlock_irqrestore(&ap->host_set->lock, flags);
931 DPRINTK("flush #1\n");
932 flush_workqueue(ata_wq);
935 * At this point, if a task is running, it's guaranteed to see
936 * the FLUSH flag; thus, it will never queue pio tasks again.
937 * Cancel and flush.
939 if (!cancel_delayed_work(&ap->port_task)) {
940 DPRINTK("flush #2\n");
941 flush_workqueue(ata_wq);
944 spin_lock_irqsave(&ap->host_set->lock, flags);
945 ap->flags &= ~ATA_FLAG_FLUSH_PORT_TASK;
946 spin_unlock_irqrestore(&ap->host_set->lock, flags);
948 DPRINTK("EXIT\n");
951 void ata_qc_complete_internal(struct ata_queued_cmd *qc)
953 struct completion *waiting = qc->private_data;
955 qc->ap->ops->tf_read(qc->ap, &qc->tf);
956 complete(waiting);
960 * ata_exec_internal - execute libata internal command
961 * @ap: Port to which the command is sent
962 * @dev: Device to which the command is sent
963 * @tf: Taskfile registers for the command and the result
964 * @dma_dir: Data tranfer direction of the command
965 * @buf: Data buffer of the command
966 * @buflen: Length of data buffer
968 * Executes libata internal command with timeout. @tf contains
969 * command on entry and result on return. Timeout and error
970 * conditions are reported via return value. No recovery action
971 * is taken after a command times out. It's caller's duty to
972 * clean up after timeout.
974 * LOCKING:
975 * None. Should be called with kernel context, might sleep.
978 static unsigned
979 ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
980 struct ata_taskfile *tf,
981 int dma_dir, void *buf, unsigned int buflen)
983 u8 command = tf->command;
984 struct ata_queued_cmd *qc;
985 DECLARE_COMPLETION(wait);
986 unsigned long flags;
987 unsigned int err_mask;
989 spin_lock_irqsave(&ap->host_set->lock, flags);
991 qc = ata_qc_new_init(ap, dev);
992 BUG_ON(qc == NULL);
994 qc->tf = *tf;
995 qc->dma_dir = dma_dir;
996 if (dma_dir != DMA_NONE) {
997 ata_sg_init_one(qc, buf, buflen);
998 qc->nsect = buflen / ATA_SECT_SIZE;
1001 qc->private_data = &wait;
1002 qc->complete_fn = ata_qc_complete_internal;
1004 ata_qc_issue(qc);
1006 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1008 if (!wait_for_completion_timeout(&wait, ATA_TMOUT_INTERNAL)) {
1009 ata_port_flush_task(ap);
1011 spin_lock_irqsave(&ap->host_set->lock, flags);
1013 /* We're racing with irq here. If we lose, the
1014 * following test prevents us from completing the qc
1015 * again. If completion irq occurs after here but
1016 * before the caller cleans up, it will result in a
1017 * spurious interrupt. We can live with that.
1019 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1020 qc->err_mask = AC_ERR_TIMEOUT;
1021 ata_qc_complete(qc);
1022 printk(KERN_WARNING "ata%u: qc timeout (cmd 0x%x)\n",
1023 ap->id, command);
1026 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1029 *tf = qc->tf;
1030 err_mask = qc->err_mask;
1032 ata_qc_free(qc);
1034 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1035 * Until those drivers are fixed, we detect the condition
1036 * here, fail the command with AC_ERR_SYSTEM and reenable the
1037 * port.
1039 * Note that this doesn't change any behavior as internal
1040 * command failure results in disabling the device in the
1041 * higher layer for LLDDs without new reset/EH callbacks.
1043 * Kill the following code as soon as those drivers are fixed.
1045 if (ap->flags & ATA_FLAG_PORT_DISABLED) {
1046 err_mask |= AC_ERR_SYSTEM;
1047 ata_port_probe(ap);
1050 return err_mask;
1054 * ata_pio_need_iordy - check if iordy needed
1055 * @adev: ATA device
1057 * Check if the current speed of the device requires IORDY. Used
1058 * by various controllers for chip configuration.
1061 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1063 int pio;
1064 int speed = adev->pio_mode - XFER_PIO_0;
1066 if (speed < 2)
1067 return 0;
1068 if (speed > 2)
1069 return 1;
1071 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1073 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1074 pio = adev->id[ATA_ID_EIDE_PIO];
1075 /* Is the speed faster than the drive allows non IORDY ? */
1076 if (pio) {
1077 /* This is cycle times not frequency - watch the logic! */
1078 if (pio > 240) /* PIO2 is 240nS per cycle */
1079 return 1;
1080 return 0;
1083 return 0;
1087 * ata_dev_read_id - Read ID data from the specified device
1088 * @ap: port on which target device resides
1089 * @dev: target device
1090 * @p_class: pointer to class of the target device (may be changed)
1091 * @post_reset: is this read ID post-reset?
1092 * @p_id: read IDENTIFY page (newly allocated)
1094 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1095 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1096 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1097 * for pre-ATA4 drives.
1099 * LOCKING:
1100 * Kernel thread context (may sleep)
1102 * RETURNS:
1103 * 0 on success, -errno otherwise.
1105 static int ata_dev_read_id(struct ata_port *ap, struct ata_device *dev,
1106 unsigned int *p_class, int post_reset, u16 **p_id)
1108 unsigned int class = *p_class;
1109 struct ata_taskfile tf;
1110 unsigned int err_mask = 0;
1111 u16 *id;
1112 const char *reason;
1113 int rc;
1115 DPRINTK("ENTER, host %u, dev %u\n", ap->id, dev->devno);
1117 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1119 id = kmalloc(sizeof(id[0]) * ATA_ID_WORDS, GFP_KERNEL);
1120 if (id == NULL) {
1121 rc = -ENOMEM;
1122 reason = "out of memory";
1123 goto err_out;
1126 retry:
1127 ata_tf_init(ap, &tf, dev->devno);
1129 switch (class) {
1130 case ATA_DEV_ATA:
1131 tf.command = ATA_CMD_ID_ATA;
1132 break;
1133 case ATA_DEV_ATAPI:
1134 tf.command = ATA_CMD_ID_ATAPI;
1135 break;
1136 default:
1137 rc = -ENODEV;
1138 reason = "unsupported class";
1139 goto err_out;
1142 tf.protocol = ATA_PROT_PIO;
1144 err_mask = ata_exec_internal(ap, dev, &tf, DMA_FROM_DEVICE,
1145 id, sizeof(id[0]) * ATA_ID_WORDS);
1146 if (err_mask) {
1147 rc = -EIO;
1148 reason = "I/O error";
1149 goto err_out;
1152 swap_buf_le16(id, ATA_ID_WORDS);
1154 /* sanity check */
1155 if ((class == ATA_DEV_ATA) != (ata_id_is_ata(id) | ata_id_is_cfa(id))) {
1156 rc = -EINVAL;
1157 reason = "device reports illegal type";
1158 goto err_out;
1161 if (post_reset && class == ATA_DEV_ATA) {
1163 * The exact sequence expected by certain pre-ATA4 drives is:
1164 * SRST RESET
1165 * IDENTIFY
1166 * INITIALIZE DEVICE PARAMETERS
1167 * anything else..
1168 * Some drives were very specific about that exact sequence.
1170 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
1171 err_mask = ata_dev_init_params(ap, dev, id[3], id[6]);
1172 if (err_mask) {
1173 rc = -EIO;
1174 reason = "INIT_DEV_PARAMS failed";
1175 goto err_out;
1178 /* current CHS translation info (id[53-58]) might be
1179 * changed. reread the identify device info.
1181 post_reset = 0;
1182 goto retry;
1186 *p_class = class;
1187 *p_id = id;
1188 return 0;
1190 err_out:
1191 printk(KERN_WARNING "ata%u: dev %u failed to IDENTIFY (%s)\n",
1192 ap->id, dev->devno, reason);
1193 kfree(id);
1194 return rc;
1197 static inline u8 ata_dev_knobble(const struct ata_port *ap,
1198 struct ata_device *dev)
1200 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
1204 * ata_dev_configure - Configure the specified ATA/ATAPI device
1205 * @ap: Port on which target device resides
1206 * @dev: Target device to configure
1207 * @print_info: Enable device info printout
1209 * Configure @dev according to @dev->id. Generic and low-level
1210 * driver specific fixups are also applied.
1212 * LOCKING:
1213 * Kernel thread context (may sleep)
1215 * RETURNS:
1216 * 0 on success, -errno otherwise
1218 static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev,
1219 int print_info)
1221 const u16 *id = dev->id;
1222 unsigned int xfer_mask;
1223 int i, rc;
1225 if (!ata_dev_enabled(dev)) {
1226 DPRINTK("ENTER/EXIT (host %u, dev %u) -- nodev\n",
1227 ap->id, dev->devno);
1228 return 0;
1231 DPRINTK("ENTER, host %u, dev %u\n", ap->id, dev->devno);
1233 /* print device capabilities */
1234 if (print_info)
1235 printk(KERN_DEBUG "ata%u: dev %u cfg 49:%04x 82:%04x 83:%04x "
1236 "84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n",
1237 ap->id, dev->devno, id[49], id[82], id[83],
1238 id[84], id[85], id[86], id[87], id[88]);
1240 /* initialize to-be-configured parameters */
1241 dev->flags = 0;
1242 dev->max_sectors = 0;
1243 dev->cdb_len = 0;
1244 dev->n_sectors = 0;
1245 dev->cylinders = 0;
1246 dev->heads = 0;
1247 dev->sectors = 0;
1250 * common ATA, ATAPI feature tests
1253 /* find max transfer mode; for printk only */
1254 xfer_mask = ata_id_xfermask(id);
1256 ata_dump_id(id);
1258 /* ATA-specific feature tests */
1259 if (dev->class == ATA_DEV_ATA) {
1260 dev->n_sectors = ata_id_n_sectors(id);
1262 if (ata_id_has_lba(id)) {
1263 const char *lba_desc;
1265 lba_desc = "LBA";
1266 dev->flags |= ATA_DFLAG_LBA;
1267 if (ata_id_has_lba48(id)) {
1268 dev->flags |= ATA_DFLAG_LBA48;
1269 lba_desc = "LBA48";
1272 /* print device info to dmesg */
1273 if (print_info)
1274 printk(KERN_INFO "ata%u: dev %u ATA-%d, "
1275 "max %s, %Lu sectors: %s\n",
1276 ap->id, dev->devno,
1277 ata_id_major_version(id),
1278 ata_mode_string(xfer_mask),
1279 (unsigned long long)dev->n_sectors,
1280 lba_desc);
1281 } else {
1282 /* CHS */
1284 /* Default translation */
1285 dev->cylinders = id[1];
1286 dev->heads = id[3];
1287 dev->sectors = id[6];
1289 if (ata_id_current_chs_valid(id)) {
1290 /* Current CHS translation is valid. */
1291 dev->cylinders = id[54];
1292 dev->heads = id[55];
1293 dev->sectors = id[56];
1296 /* print device info to dmesg */
1297 if (print_info)
1298 printk(KERN_INFO "ata%u: dev %u ATA-%d, "
1299 "max %s, %Lu sectors: CHS %u/%u/%u\n",
1300 ap->id, dev->devno,
1301 ata_id_major_version(id),
1302 ata_mode_string(xfer_mask),
1303 (unsigned long long)dev->n_sectors,
1304 dev->cylinders, dev->heads, dev->sectors);
1307 if (dev->id[59] & 0x100) {
1308 dev->multi_count = dev->id[59] & 0xff;
1309 DPRINTK("ata%u: dev %u multi count %u\n",
1310 ap->id, dev->devno, dev->multi_count);
1313 dev->cdb_len = 16;
1316 /* ATAPI-specific feature tests */
1317 else if (dev->class == ATA_DEV_ATAPI) {
1318 char *cdb_intr_string = "";
1320 rc = atapi_cdb_len(id);
1321 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
1322 printk(KERN_WARNING "ata%u: unsupported CDB len\n", ap->id);
1323 rc = -EINVAL;
1324 goto err_out_nosup;
1326 dev->cdb_len = (unsigned int) rc;
1328 if (ata_id_cdb_intr(dev->id)) {
1329 dev->flags |= ATA_DFLAG_CDB_INTR;
1330 cdb_intr_string = ", CDB intr";
1333 /* print device info to dmesg */
1334 if (print_info)
1335 printk(KERN_INFO "ata%u: dev %u ATAPI, max %s%s\n",
1336 ap->id, dev->devno, ata_mode_string(xfer_mask),
1337 cdb_intr_string);
1340 ap->host->max_cmd_len = 0;
1341 for (i = 0; i < ATA_MAX_DEVICES; i++)
1342 ap->host->max_cmd_len = max_t(unsigned int,
1343 ap->host->max_cmd_len,
1344 ap->device[i].cdb_len);
1346 /* limit bridge transfers to udma5, 200 sectors */
1347 if (ata_dev_knobble(ap, dev)) {
1348 if (print_info)
1349 printk(KERN_INFO "ata%u(%u): applying bridge limits\n",
1350 ap->id, dev->devno);
1351 dev->udma_mask &= ATA_UDMA5;
1352 dev->max_sectors = ATA_MAX_SECTORS;
1355 if (ap->ops->dev_config)
1356 ap->ops->dev_config(ap, dev);
1358 DPRINTK("EXIT, drv_stat = 0x%x\n", ata_chk_status(ap));
1359 return 0;
1361 err_out_nosup:
1362 DPRINTK("EXIT, err\n");
1363 return rc;
1367 * ata_bus_probe - Reset and probe ATA bus
1368 * @ap: Bus to probe
1370 * Master ATA bus probing function. Initiates a hardware-dependent
1371 * bus reset, then attempts to identify any devices found on
1372 * the bus.
1374 * LOCKING:
1375 * PCI/etc. bus probe sem.
1377 * RETURNS:
1378 * Zero on success, negative errno otherwise.
1381 static int ata_bus_probe(struct ata_port *ap)
1383 unsigned int classes[ATA_MAX_DEVICES];
1384 int i, rc, found = 0;
1386 ata_port_probe(ap);
1388 /* reset and determine device classes */
1389 for (i = 0; i < ATA_MAX_DEVICES; i++)
1390 classes[i] = ATA_DEV_UNKNOWN;
1392 if (ap->ops->probe_reset) {
1393 rc = ap->ops->probe_reset(ap, classes);
1394 if (rc) {
1395 printk("ata%u: reset failed (errno=%d)\n", ap->id, rc);
1396 return rc;
1398 } else {
1399 ap->ops->phy_reset(ap);
1401 if (!(ap->flags & ATA_FLAG_PORT_DISABLED))
1402 for (i = 0; i < ATA_MAX_DEVICES; i++)
1403 classes[i] = ap->device[i].class;
1405 ata_port_probe(ap);
1408 for (i = 0; i < ATA_MAX_DEVICES; i++)
1409 if (classes[i] == ATA_DEV_UNKNOWN)
1410 classes[i] = ATA_DEV_NONE;
1412 /* read IDENTIFY page and configure devices */
1413 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1414 struct ata_device *dev = &ap->device[i];
1416 dev->class = classes[i];
1418 if (!ata_dev_enabled(dev))
1419 continue;
1421 WARN_ON(dev->id != NULL);
1422 if (ata_dev_read_id(ap, dev, &dev->class, 1, &dev->id)) {
1423 dev->class = ATA_DEV_NONE;
1424 continue;
1427 if (ata_dev_configure(ap, dev, 1)) {
1428 ata_dev_disable(ap, dev);
1429 continue;
1432 found = 1;
1435 if (!found)
1436 goto err_out_disable;
1438 if (ap->ops->set_mode)
1439 ap->ops->set_mode(ap);
1440 else
1441 ata_set_mode(ap);
1443 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1444 goto err_out_disable;
1446 return 0;
1448 err_out_disable:
1449 ap->ops->port_disable(ap);
1450 return -ENODEV;
1454 * ata_port_probe - Mark port as enabled
1455 * @ap: Port for which we indicate enablement
1457 * Modify @ap data structure such that the system
1458 * thinks that the entire port is enabled.
1460 * LOCKING: host_set lock, or some other form of
1461 * serialization.
1464 void ata_port_probe(struct ata_port *ap)
1466 ap->flags &= ~ATA_FLAG_PORT_DISABLED;
1470 * sata_print_link_status - Print SATA link status
1471 * @ap: SATA port to printk link status about
1473 * This function prints link speed and status of a SATA link.
1475 * LOCKING:
1476 * None.
1478 static void sata_print_link_status(struct ata_port *ap)
1480 u32 sstatus, tmp;
1482 if (!ap->ops->scr_read)
1483 return;
1485 sstatus = scr_read(ap, SCR_STATUS);
1487 if (sata_dev_present(ap)) {
1488 tmp = (sstatus >> 4) & 0xf;
1489 printk(KERN_INFO "ata%u: SATA link up %s (SStatus %X)\n",
1490 ap->id, sata_spd_string(tmp), sstatus);
1491 } else {
1492 printk(KERN_INFO "ata%u: SATA link down (SStatus %X)\n",
1493 ap->id, sstatus);
1498 * __sata_phy_reset - Wake/reset a low-level SATA PHY
1499 * @ap: SATA port associated with target SATA PHY.
1501 * This function issues commands to standard SATA Sxxx
1502 * PHY registers, to wake up the phy (and device), and
1503 * clear any reset condition.
1505 * LOCKING:
1506 * PCI/etc. bus probe sem.
1509 void __sata_phy_reset(struct ata_port *ap)
1511 u32 sstatus;
1512 unsigned long timeout = jiffies + (HZ * 5);
1514 if (ap->flags & ATA_FLAG_SATA_RESET) {
1515 /* issue phy wake/reset */
1516 scr_write_flush(ap, SCR_CONTROL, 0x301);
1517 /* Couldn't find anything in SATA I/II specs, but
1518 * AHCI-1.1 10.4.2 says at least 1 ms. */
1519 mdelay(1);
1521 scr_write_flush(ap, SCR_CONTROL, 0x300); /* phy wake/clear reset */
1523 /* wait for phy to become ready, if necessary */
1524 do {
1525 msleep(200);
1526 sstatus = scr_read(ap, SCR_STATUS);
1527 if ((sstatus & 0xf) != 1)
1528 break;
1529 } while (time_before(jiffies, timeout));
1531 /* print link status */
1532 sata_print_link_status(ap);
1534 /* TODO: phy layer with polling, timeouts, etc. */
1535 if (sata_dev_present(ap))
1536 ata_port_probe(ap);
1537 else
1538 ata_port_disable(ap);
1540 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1541 return;
1543 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
1544 ata_port_disable(ap);
1545 return;
1548 ap->cbl = ATA_CBL_SATA;
1552 * sata_phy_reset - Reset SATA bus.
1553 * @ap: SATA port associated with target SATA PHY.
1555 * This function resets the SATA bus, and then probes
1556 * the bus for devices.
1558 * LOCKING:
1559 * PCI/etc. bus probe sem.
1562 void sata_phy_reset(struct ata_port *ap)
1564 __sata_phy_reset(ap);
1565 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1566 return;
1567 ata_bus_reset(ap);
1571 * ata_dev_pair - return other device on cable
1572 * @ap: port
1573 * @adev: device
1575 * Obtain the other device on the same cable, or if none is
1576 * present NULL is returned
1579 struct ata_device *ata_dev_pair(struct ata_port *ap, struct ata_device *adev)
1581 struct ata_device *pair = &ap->device[1 - adev->devno];
1582 if (!ata_dev_enabled(pair))
1583 return NULL;
1584 return pair;
1588 * ata_port_disable - Disable port.
1589 * @ap: Port to be disabled.
1591 * Modify @ap data structure such that the system
1592 * thinks that the entire port is disabled, and should
1593 * never attempt to probe or communicate with devices
1594 * on this port.
1596 * LOCKING: host_set lock, or some other form of
1597 * serialization.
1600 void ata_port_disable(struct ata_port *ap)
1602 ap->device[0].class = ATA_DEV_NONE;
1603 ap->device[1].class = ATA_DEV_NONE;
1604 ap->flags |= ATA_FLAG_PORT_DISABLED;
1608 * This mode timing computation functionality is ported over from
1609 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
1612 * PIO 0-5, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
1613 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
1614 * for PIO 5, which is a nonstandard extension and UDMA6, which
1615 * is currently supported only by Maxtor drives.
1618 static const struct ata_timing ata_timing[] = {
1620 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
1621 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
1622 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
1623 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
1625 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
1626 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
1627 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
1629 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
1631 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
1632 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
1633 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
1635 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
1636 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
1637 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
1639 /* { XFER_PIO_5, 20, 50, 30, 100, 50, 30, 100, 0 }, */
1640 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
1641 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
1643 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
1644 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
1645 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
1647 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
1649 { 0xFF }
1652 #define ENOUGH(v,unit) (((v)-1)/(unit)+1)
1653 #define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
1655 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
1657 q->setup = EZ(t->setup * 1000, T);
1658 q->act8b = EZ(t->act8b * 1000, T);
1659 q->rec8b = EZ(t->rec8b * 1000, T);
1660 q->cyc8b = EZ(t->cyc8b * 1000, T);
1661 q->active = EZ(t->active * 1000, T);
1662 q->recover = EZ(t->recover * 1000, T);
1663 q->cycle = EZ(t->cycle * 1000, T);
1664 q->udma = EZ(t->udma * 1000, UT);
1667 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
1668 struct ata_timing *m, unsigned int what)
1670 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
1671 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
1672 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
1673 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
1674 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
1675 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
1676 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
1677 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
1680 static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
1682 const struct ata_timing *t;
1684 for (t = ata_timing; t->mode != speed; t++)
1685 if (t->mode == 0xFF)
1686 return NULL;
1687 return t;
1690 int ata_timing_compute(struct ata_device *adev, unsigned short speed,
1691 struct ata_timing *t, int T, int UT)
1693 const struct ata_timing *s;
1694 struct ata_timing p;
1697 * Find the mode.
1700 if (!(s = ata_timing_find_mode(speed)))
1701 return -EINVAL;
1703 memcpy(t, s, sizeof(*s));
1706 * If the drive is an EIDE drive, it can tell us it needs extended
1707 * PIO/MW_DMA cycle timing.
1710 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
1711 memset(&p, 0, sizeof(p));
1712 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
1713 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
1714 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
1715 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
1716 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
1718 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
1722 * Convert the timing to bus clock counts.
1725 ata_timing_quantize(t, t, T, UT);
1728 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
1729 * S.M.A.R.T * and some other commands. We have to ensure that the
1730 * DMA cycle timing is slower/equal than the fastest PIO timing.
1733 if (speed > XFER_PIO_4) {
1734 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
1735 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
1739 * Lengthen active & recovery time so that cycle time is correct.
1742 if (t->act8b + t->rec8b < t->cyc8b) {
1743 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
1744 t->rec8b = t->cyc8b - t->act8b;
1747 if (t->active + t->recover < t->cycle) {
1748 t->active += (t->cycle - (t->active + t->recover)) / 2;
1749 t->recover = t->cycle - t->active;
1752 return 0;
1755 static int ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev)
1757 unsigned int err_mask;
1758 int rc;
1760 if (dev->xfer_shift == ATA_SHIFT_PIO)
1761 dev->flags |= ATA_DFLAG_PIO;
1763 err_mask = ata_dev_set_xfermode(ap, dev);
1764 if (err_mask) {
1765 printk(KERN_ERR
1766 "ata%u: failed to set xfermode (err_mask=0x%x)\n",
1767 ap->id, err_mask);
1768 return -EIO;
1771 rc = ata_dev_revalidate(ap, dev, 0);
1772 if (rc) {
1773 printk(KERN_ERR
1774 "ata%u: failed to revalidate after set xfermode\n",
1775 ap->id);
1776 return rc;
1779 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
1780 dev->xfer_shift, (int)dev->xfer_mode);
1782 printk(KERN_INFO "ata%u: dev %u configured for %s\n",
1783 ap->id, dev->devno,
1784 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
1785 return 0;
1789 * ata_set_mode - Program timings and issue SET FEATURES - XFER
1790 * @ap: port on which timings will be programmed
1792 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.).
1794 * LOCKING:
1795 * PCI/etc. bus probe sem.
1797 static void ata_set_mode(struct ata_port *ap)
1799 struct ata_device *dev;
1800 int i, rc, used_dma = 0, found = 0;
1802 /* step 1: calculate xfer_mask */
1803 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1804 unsigned int pio_mask, dma_mask;
1806 dev = &ap->device[i];
1808 if (!ata_dev_enabled(dev))
1809 continue;
1811 ata_dev_xfermask(ap, dev);
1813 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
1814 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
1815 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
1816 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
1818 found = 1;
1819 if (dev->dma_mode)
1820 used_dma = 1;
1822 if (!found)
1823 return;
1825 /* step 2: always set host PIO timings */
1826 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1827 dev = &ap->device[i];
1828 if (!ata_dev_enabled(dev))
1829 continue;
1831 if (!dev->pio_mode) {
1832 printk(KERN_WARNING "ata%u: dev %u no PIO support\n",
1833 ap->id, dev->devno);
1834 rc = -EINVAL;
1835 goto err_out;
1838 dev->xfer_mode = dev->pio_mode;
1839 dev->xfer_shift = ATA_SHIFT_PIO;
1840 if (ap->ops->set_piomode)
1841 ap->ops->set_piomode(ap, dev);
1844 /* step 3: set host DMA timings */
1845 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1846 dev = &ap->device[i];
1848 if (!ata_dev_enabled(dev) || !dev->dma_mode)
1849 continue;
1851 dev->xfer_mode = dev->dma_mode;
1852 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
1853 if (ap->ops->set_dmamode)
1854 ap->ops->set_dmamode(ap, dev);
1857 /* step 4: update devices' xfer mode */
1858 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1859 dev = &ap->device[i];
1861 if (!ata_dev_enabled(dev))
1862 continue;
1864 rc = ata_dev_set_mode(ap, dev);
1865 if (rc)
1866 goto err_out;
1869 /* Record simplex status. If we selected DMA then the other
1870 * host channels are not permitted to do so.
1872 if (used_dma && (ap->host_set->flags & ATA_HOST_SIMPLEX))
1873 ap->host_set->simplex_claimed = 1;
1875 /* step5: chip specific finalisation */
1876 if (ap->ops->post_set_mode)
1877 ap->ops->post_set_mode(ap);
1879 return;
1881 err_out:
1882 ata_port_disable(ap);
1886 * ata_tf_to_host - issue ATA taskfile to host controller
1887 * @ap: port to which command is being issued
1888 * @tf: ATA taskfile register set
1890 * Issues ATA taskfile register set to ATA host controller,
1891 * with proper synchronization with interrupt handler and
1892 * other threads.
1894 * LOCKING:
1895 * spin_lock_irqsave(host_set lock)
1898 static inline void ata_tf_to_host(struct ata_port *ap,
1899 const struct ata_taskfile *tf)
1901 ap->ops->tf_load(ap, tf);
1902 ap->ops->exec_command(ap, tf);
1906 * ata_busy_sleep - sleep until BSY clears, or timeout
1907 * @ap: port containing status register to be polled
1908 * @tmout_pat: impatience timeout
1909 * @tmout: overall timeout
1911 * Sleep until ATA Status register bit BSY clears,
1912 * or a timeout occurs.
1914 * LOCKING: None.
1917 unsigned int ata_busy_sleep (struct ata_port *ap,
1918 unsigned long tmout_pat, unsigned long tmout)
1920 unsigned long timer_start, timeout;
1921 u8 status;
1923 status = ata_busy_wait(ap, ATA_BUSY, 300);
1924 timer_start = jiffies;
1925 timeout = timer_start + tmout_pat;
1926 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
1927 msleep(50);
1928 status = ata_busy_wait(ap, ATA_BUSY, 3);
1931 if (status & ATA_BUSY)
1932 printk(KERN_WARNING "ata%u is slow to respond, "
1933 "please be patient\n", ap->id);
1935 timeout = timer_start + tmout;
1936 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
1937 msleep(50);
1938 status = ata_chk_status(ap);
1941 if (status & ATA_BUSY) {
1942 printk(KERN_ERR "ata%u failed to respond (%lu secs)\n",
1943 ap->id, tmout / HZ);
1944 return 1;
1947 return 0;
1950 static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
1952 struct ata_ioports *ioaddr = &ap->ioaddr;
1953 unsigned int dev0 = devmask & (1 << 0);
1954 unsigned int dev1 = devmask & (1 << 1);
1955 unsigned long timeout;
1957 /* if device 0 was found in ata_devchk, wait for its
1958 * BSY bit to clear
1960 if (dev0)
1961 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1963 /* if device 1 was found in ata_devchk, wait for
1964 * register access, then wait for BSY to clear
1966 timeout = jiffies + ATA_TMOUT_BOOT;
1967 while (dev1) {
1968 u8 nsect, lbal;
1970 ap->ops->dev_select(ap, 1);
1971 if (ap->flags & ATA_FLAG_MMIO) {
1972 nsect = readb((void __iomem *) ioaddr->nsect_addr);
1973 lbal = readb((void __iomem *) ioaddr->lbal_addr);
1974 } else {
1975 nsect = inb(ioaddr->nsect_addr);
1976 lbal = inb(ioaddr->lbal_addr);
1978 if ((nsect == 1) && (lbal == 1))
1979 break;
1980 if (time_after(jiffies, timeout)) {
1981 dev1 = 0;
1982 break;
1984 msleep(50); /* give drive a breather */
1986 if (dev1)
1987 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1989 /* is all this really necessary? */
1990 ap->ops->dev_select(ap, 0);
1991 if (dev1)
1992 ap->ops->dev_select(ap, 1);
1993 if (dev0)
1994 ap->ops->dev_select(ap, 0);
1997 static unsigned int ata_bus_softreset(struct ata_port *ap,
1998 unsigned int devmask)
2000 struct ata_ioports *ioaddr = &ap->ioaddr;
2002 DPRINTK("ata%u: bus reset via SRST\n", ap->id);
2004 /* software reset. causes dev0 to be selected */
2005 if (ap->flags & ATA_FLAG_MMIO) {
2006 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2007 udelay(20); /* FIXME: flush */
2008 writeb(ap->ctl | ATA_SRST, (void __iomem *) ioaddr->ctl_addr);
2009 udelay(20); /* FIXME: flush */
2010 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2011 } else {
2012 outb(ap->ctl, ioaddr->ctl_addr);
2013 udelay(10);
2014 outb(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
2015 udelay(10);
2016 outb(ap->ctl, ioaddr->ctl_addr);
2019 /* spec mandates ">= 2ms" before checking status.
2020 * We wait 150ms, because that was the magic delay used for
2021 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
2022 * between when the ATA command register is written, and then
2023 * status is checked. Because waiting for "a while" before
2024 * checking status is fine, post SRST, we perform this magic
2025 * delay here as well.
2027 * Old drivers/ide uses the 2mS rule and then waits for ready
2029 msleep(150);
2031 /* Before we perform post reset processing we want to see if
2032 * the bus shows 0xFF because the odd clown forgets the D7
2033 * pulldown resistor.
2035 if (ata_check_status(ap) == 0xFF)
2036 return AC_ERR_OTHER;
2038 ata_bus_post_reset(ap, devmask);
2040 return 0;
2044 * ata_bus_reset - reset host port and associated ATA channel
2045 * @ap: port to reset
2047 * This is typically the first time we actually start issuing
2048 * commands to the ATA channel. We wait for BSY to clear, then
2049 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
2050 * result. Determine what devices, if any, are on the channel
2051 * by looking at the device 0/1 error register. Look at the signature
2052 * stored in each device's taskfile registers, to determine if
2053 * the device is ATA or ATAPI.
2055 * LOCKING:
2056 * PCI/etc. bus probe sem.
2057 * Obtains host_set lock.
2059 * SIDE EFFECTS:
2060 * Sets ATA_FLAG_PORT_DISABLED if bus reset fails.
2063 void ata_bus_reset(struct ata_port *ap)
2065 struct ata_ioports *ioaddr = &ap->ioaddr;
2066 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2067 u8 err;
2068 unsigned int dev0, dev1 = 0, devmask = 0;
2070 DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no);
2072 /* determine if device 0/1 are present */
2073 if (ap->flags & ATA_FLAG_SATA_RESET)
2074 dev0 = 1;
2075 else {
2076 dev0 = ata_devchk(ap, 0);
2077 if (slave_possible)
2078 dev1 = ata_devchk(ap, 1);
2081 if (dev0)
2082 devmask |= (1 << 0);
2083 if (dev1)
2084 devmask |= (1 << 1);
2086 /* select device 0 again */
2087 ap->ops->dev_select(ap, 0);
2089 /* issue bus reset */
2090 if (ap->flags & ATA_FLAG_SRST)
2091 if (ata_bus_softreset(ap, devmask))
2092 goto err_out;
2095 * determine by signature whether we have ATA or ATAPI devices
2097 ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
2098 if ((slave_possible) && (err != 0x81))
2099 ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
2101 /* re-enable interrupts */
2102 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
2103 ata_irq_on(ap);
2105 /* is double-select really necessary? */
2106 if (ap->device[1].class != ATA_DEV_NONE)
2107 ap->ops->dev_select(ap, 1);
2108 if (ap->device[0].class != ATA_DEV_NONE)
2109 ap->ops->dev_select(ap, 0);
2111 /* if no devices were detected, disable this port */
2112 if ((ap->device[0].class == ATA_DEV_NONE) &&
2113 (ap->device[1].class == ATA_DEV_NONE))
2114 goto err_out;
2116 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
2117 /* set up device control for ATA_FLAG_SATA_RESET */
2118 if (ap->flags & ATA_FLAG_MMIO)
2119 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2120 else
2121 outb(ap->ctl, ioaddr->ctl_addr);
2124 DPRINTK("EXIT\n");
2125 return;
2127 err_out:
2128 printk(KERN_ERR "ata%u: disabling port\n", ap->id);
2129 ap->ops->port_disable(ap);
2131 DPRINTK("EXIT\n");
2134 static int sata_phy_resume(struct ata_port *ap)
2136 unsigned long timeout = jiffies + (HZ * 5);
2137 u32 sstatus;
2139 scr_write_flush(ap, SCR_CONTROL, 0x300);
2141 /* Wait for phy to become ready, if necessary. */
2142 do {
2143 msleep(200);
2144 sstatus = scr_read(ap, SCR_STATUS);
2145 if ((sstatus & 0xf) != 1)
2146 return 0;
2147 } while (time_before(jiffies, timeout));
2149 return -1;
2153 * ata_std_probeinit - initialize probing
2154 * @ap: port to be probed
2156 * @ap is about to be probed. Initialize it. This function is
2157 * to be used as standard callback for ata_drive_probe_reset().
2159 * NOTE!!! Do not use this function as probeinit if a low level
2160 * driver implements only hardreset. Just pass NULL as probeinit
2161 * in that case. Using this function is probably okay but doing
2162 * so makes reset sequence different from the original
2163 * ->phy_reset implementation and Jeff nervous. :-P
2165 void ata_std_probeinit(struct ata_port *ap)
2167 if ((ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read) {
2168 sata_phy_resume(ap);
2169 if (sata_dev_present(ap))
2170 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2175 * ata_std_softreset - reset host port via ATA SRST
2176 * @ap: port to reset
2177 * @verbose: fail verbosely
2178 * @classes: resulting classes of attached devices
2180 * Reset host port using ATA SRST. This function is to be used
2181 * as standard callback for ata_drive_*_reset() functions.
2183 * LOCKING:
2184 * Kernel thread context (may sleep)
2186 * RETURNS:
2187 * 0 on success, -errno otherwise.
2189 int ata_std_softreset(struct ata_port *ap, int verbose, unsigned int *classes)
2191 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2192 unsigned int devmask = 0, err_mask;
2193 u8 err;
2195 DPRINTK("ENTER\n");
2197 if (ap->ops->scr_read && !sata_dev_present(ap)) {
2198 classes[0] = ATA_DEV_NONE;
2199 goto out;
2202 /* determine if device 0/1 are present */
2203 if (ata_devchk(ap, 0))
2204 devmask |= (1 << 0);
2205 if (slave_possible && ata_devchk(ap, 1))
2206 devmask |= (1 << 1);
2208 /* select device 0 again */
2209 ap->ops->dev_select(ap, 0);
2211 /* issue bus reset */
2212 DPRINTK("about to softreset, devmask=%x\n", devmask);
2213 err_mask = ata_bus_softreset(ap, devmask);
2214 if (err_mask) {
2215 if (verbose)
2216 printk(KERN_ERR "ata%u: SRST failed (err_mask=0x%x)\n",
2217 ap->id, err_mask);
2218 else
2219 DPRINTK("EXIT, softreset failed (err_mask=0x%x)\n",
2220 err_mask);
2221 return -EIO;
2224 /* determine by signature whether we have ATA or ATAPI devices */
2225 classes[0] = ata_dev_try_classify(ap, 0, &err);
2226 if (slave_possible && err != 0x81)
2227 classes[1] = ata_dev_try_classify(ap, 1, &err);
2229 out:
2230 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
2231 return 0;
2235 * sata_std_hardreset - reset host port via SATA phy reset
2236 * @ap: port to reset
2237 * @verbose: fail verbosely
2238 * @class: resulting class of attached device
2240 * SATA phy-reset host port using DET bits of SControl register.
2241 * This function is to be used as standard callback for
2242 * ata_drive_*_reset().
2244 * LOCKING:
2245 * Kernel thread context (may sleep)
2247 * RETURNS:
2248 * 0 on success, -errno otherwise.
2250 int sata_std_hardreset(struct ata_port *ap, int verbose, unsigned int *class)
2252 DPRINTK("ENTER\n");
2254 /* Issue phy wake/reset */
2255 scr_write_flush(ap, SCR_CONTROL, 0x301);
2258 * Couldn't find anything in SATA I/II specs, but AHCI-1.1
2259 * 10.4.2 says at least 1 ms.
2261 msleep(1);
2263 /* Bring phy back */
2264 sata_phy_resume(ap);
2266 /* TODO: phy layer with polling, timeouts, etc. */
2267 if (!sata_dev_present(ap)) {
2268 *class = ATA_DEV_NONE;
2269 DPRINTK("EXIT, link offline\n");
2270 return 0;
2273 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2274 if (verbose)
2275 printk(KERN_ERR "ata%u: COMRESET failed "
2276 "(device not ready)\n", ap->id);
2277 else
2278 DPRINTK("EXIT, device not ready\n");
2279 return -EIO;
2282 ap->ops->dev_select(ap, 0); /* probably unnecessary */
2284 *class = ata_dev_try_classify(ap, 0, NULL);
2286 DPRINTK("EXIT, class=%u\n", *class);
2287 return 0;
2291 * ata_std_postreset - standard postreset callback
2292 * @ap: the target ata_port
2293 * @classes: classes of attached devices
2295 * This function is invoked after a successful reset. Note that
2296 * the device might have been reset more than once using
2297 * different reset methods before postreset is invoked.
2299 * This function is to be used as standard callback for
2300 * ata_drive_*_reset().
2302 * LOCKING:
2303 * Kernel thread context (may sleep)
2305 void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
2307 DPRINTK("ENTER\n");
2309 /* set cable type if it isn't already set */
2310 if (ap->cbl == ATA_CBL_NONE && ap->flags & ATA_FLAG_SATA)
2311 ap->cbl = ATA_CBL_SATA;
2313 /* print link status */
2314 if (ap->cbl == ATA_CBL_SATA)
2315 sata_print_link_status(ap);
2317 /* re-enable interrupts */
2318 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
2319 ata_irq_on(ap);
2321 /* is double-select really necessary? */
2322 if (classes[0] != ATA_DEV_NONE)
2323 ap->ops->dev_select(ap, 1);
2324 if (classes[1] != ATA_DEV_NONE)
2325 ap->ops->dev_select(ap, 0);
2327 /* bail out if no device is present */
2328 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2329 DPRINTK("EXIT, no device\n");
2330 return;
2333 /* set up device control */
2334 if (ap->ioaddr.ctl_addr) {
2335 if (ap->flags & ATA_FLAG_MMIO)
2336 writeb(ap->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
2337 else
2338 outb(ap->ctl, ap->ioaddr.ctl_addr);
2341 DPRINTK("EXIT\n");
2345 * ata_std_probe_reset - standard probe reset method
2346 * @ap: prot to perform probe-reset
2347 * @classes: resulting classes of attached devices
2349 * The stock off-the-shelf ->probe_reset method.
2351 * LOCKING:
2352 * Kernel thread context (may sleep)
2354 * RETURNS:
2355 * 0 on success, -errno otherwise.
2357 int ata_std_probe_reset(struct ata_port *ap, unsigned int *classes)
2359 ata_reset_fn_t hardreset;
2361 hardreset = NULL;
2362 if (ap->flags & ATA_FLAG_SATA && ap->ops->scr_read)
2363 hardreset = sata_std_hardreset;
2365 return ata_drive_probe_reset(ap, ata_std_probeinit,
2366 ata_std_softreset, hardreset,
2367 ata_std_postreset, classes);
2370 static int ata_do_reset(struct ata_port *ap,
2371 ata_reset_fn_t reset, ata_postreset_fn_t postreset,
2372 int verbose, unsigned int *classes)
2374 int i, rc;
2376 for (i = 0; i < ATA_MAX_DEVICES; i++)
2377 classes[i] = ATA_DEV_UNKNOWN;
2379 rc = reset(ap, verbose, classes);
2380 if (rc)
2381 return rc;
2383 /* If any class isn't ATA_DEV_UNKNOWN, consider classification
2384 * is complete and convert all ATA_DEV_UNKNOWN to
2385 * ATA_DEV_NONE.
2387 for (i = 0; i < ATA_MAX_DEVICES; i++)
2388 if (classes[i] != ATA_DEV_UNKNOWN)
2389 break;
2391 if (i < ATA_MAX_DEVICES)
2392 for (i = 0; i < ATA_MAX_DEVICES; i++)
2393 if (classes[i] == ATA_DEV_UNKNOWN)
2394 classes[i] = ATA_DEV_NONE;
2396 if (postreset)
2397 postreset(ap, classes);
2399 return 0;
2403 * ata_drive_probe_reset - Perform probe reset with given methods
2404 * @ap: port to reset
2405 * @probeinit: probeinit method (can be NULL)
2406 * @softreset: softreset method (can be NULL)
2407 * @hardreset: hardreset method (can be NULL)
2408 * @postreset: postreset method (can be NULL)
2409 * @classes: resulting classes of attached devices
2411 * Reset the specified port and classify attached devices using
2412 * given methods. This function prefers softreset but tries all
2413 * possible reset sequences to reset and classify devices. This
2414 * function is intended to be used for constructing ->probe_reset
2415 * callback by low level drivers.
2417 * Reset methods should follow the following rules.
2419 * - Return 0 on sucess, -errno on failure.
2420 * - If classification is supported, fill classes[] with
2421 * recognized class codes.
2422 * - If classification is not supported, leave classes[] alone.
2423 * - If verbose is non-zero, print error message on failure;
2424 * otherwise, shut up.
2426 * LOCKING:
2427 * Kernel thread context (may sleep)
2429 * RETURNS:
2430 * 0 on success, -EINVAL if no reset method is avaliable, -ENODEV
2431 * if classification fails, and any error code from reset
2432 * methods.
2434 int ata_drive_probe_reset(struct ata_port *ap, ata_probeinit_fn_t probeinit,
2435 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
2436 ata_postreset_fn_t postreset, unsigned int *classes)
2438 int rc = -EINVAL;
2440 if (probeinit)
2441 probeinit(ap);
2443 if (softreset) {
2444 rc = ata_do_reset(ap, softreset, postreset, 0, classes);
2445 if (rc == 0 && classes[0] != ATA_DEV_UNKNOWN)
2446 goto done;
2449 if (!hardreset)
2450 goto done;
2452 rc = ata_do_reset(ap, hardreset, postreset, 0, classes);
2453 if (rc || classes[0] != ATA_DEV_UNKNOWN)
2454 goto done;
2456 if (softreset)
2457 rc = ata_do_reset(ap, softreset, postreset, 0, classes);
2459 done:
2460 if (rc == 0 && classes[0] == ATA_DEV_UNKNOWN)
2461 rc = -ENODEV;
2462 return rc;
2466 * ata_dev_same_device - Determine whether new ID matches configured device
2467 * @ap: port on which the device to compare against resides
2468 * @dev: device to compare against
2469 * @new_class: class of the new device
2470 * @new_id: IDENTIFY page of the new device
2472 * Compare @new_class and @new_id against @dev and determine
2473 * whether @dev is the device indicated by @new_class and
2474 * @new_id.
2476 * LOCKING:
2477 * None.
2479 * RETURNS:
2480 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
2482 static int ata_dev_same_device(struct ata_port *ap, struct ata_device *dev,
2483 unsigned int new_class, const u16 *new_id)
2485 const u16 *old_id = dev->id;
2486 unsigned char model[2][41], serial[2][21];
2487 u64 new_n_sectors;
2489 if (dev->class != new_class) {
2490 printk(KERN_INFO
2491 "ata%u: dev %u class mismatch %d != %d\n",
2492 ap->id, dev->devno, dev->class, new_class);
2493 return 0;
2496 ata_id_c_string(old_id, model[0], ATA_ID_PROD_OFS, sizeof(model[0]));
2497 ata_id_c_string(new_id, model[1], ATA_ID_PROD_OFS, sizeof(model[1]));
2498 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO_OFS, sizeof(serial[0]));
2499 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO_OFS, sizeof(serial[1]));
2500 new_n_sectors = ata_id_n_sectors(new_id);
2502 if (strcmp(model[0], model[1])) {
2503 printk(KERN_INFO
2504 "ata%u: dev %u model number mismatch '%s' != '%s'\n",
2505 ap->id, dev->devno, model[0], model[1]);
2506 return 0;
2509 if (strcmp(serial[0], serial[1])) {
2510 printk(KERN_INFO
2511 "ata%u: dev %u serial number mismatch '%s' != '%s'\n",
2512 ap->id, dev->devno, serial[0], serial[1]);
2513 return 0;
2516 if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) {
2517 printk(KERN_INFO
2518 "ata%u: dev %u n_sectors mismatch %llu != %llu\n",
2519 ap->id, dev->devno, (unsigned long long)dev->n_sectors,
2520 (unsigned long long)new_n_sectors);
2521 return 0;
2524 return 1;
2528 * ata_dev_revalidate - Revalidate ATA device
2529 * @ap: port on which the device to revalidate resides
2530 * @dev: device to revalidate
2531 * @post_reset: is this revalidation after reset?
2533 * Re-read IDENTIFY page and make sure @dev is still attached to
2534 * the port.
2536 * LOCKING:
2537 * Kernel thread context (may sleep)
2539 * RETURNS:
2540 * 0 on success, negative errno otherwise
2542 int ata_dev_revalidate(struct ata_port *ap, struct ata_device *dev,
2543 int post_reset)
2545 unsigned int class;
2546 u16 *id;
2547 int rc;
2549 if (!ata_dev_enabled(dev))
2550 return -ENODEV;
2552 class = dev->class;
2553 id = NULL;
2555 /* allocate & read ID data */
2556 rc = ata_dev_read_id(ap, dev, &class, post_reset, &id);
2557 if (rc)
2558 goto fail;
2560 /* is the device still there? */
2561 if (!ata_dev_same_device(ap, dev, class, id)) {
2562 rc = -ENODEV;
2563 goto fail;
2566 kfree(dev->id);
2567 dev->id = id;
2569 /* configure device according to the new ID */
2570 return ata_dev_configure(ap, dev, 0);
2572 fail:
2573 printk(KERN_ERR "ata%u: dev %u revalidation failed (errno=%d)\n",
2574 ap->id, dev->devno, rc);
2575 kfree(id);
2576 return rc;
2579 static const char * const ata_dma_blacklist [] = {
2580 "WDC AC11000H", NULL,
2581 "WDC AC22100H", NULL,
2582 "WDC AC32500H", NULL,
2583 "WDC AC33100H", NULL,
2584 "WDC AC31600H", NULL,
2585 "WDC AC32100H", "24.09P07",
2586 "WDC AC23200L", "21.10N21",
2587 "Compaq CRD-8241B", NULL,
2588 "CRD-8400B", NULL,
2589 "CRD-8480B", NULL,
2590 "CRD-8482B", NULL,
2591 "CRD-84", NULL,
2592 "SanDisk SDP3B", NULL,
2593 "SanDisk SDP3B-64", NULL,
2594 "SANYO CD-ROM CRD", NULL,
2595 "HITACHI CDR-8", NULL,
2596 "HITACHI CDR-8335", NULL,
2597 "HITACHI CDR-8435", NULL,
2598 "Toshiba CD-ROM XM-6202B", NULL,
2599 "TOSHIBA CD-ROM XM-1702BC", NULL,
2600 "CD-532E-A", NULL,
2601 "E-IDE CD-ROM CR-840", NULL,
2602 "CD-ROM Drive/F5A", NULL,
2603 "WPI CDD-820", NULL,
2604 "SAMSUNG CD-ROM SC-148C", NULL,
2605 "SAMSUNG CD-ROM SC", NULL,
2606 "SanDisk SDP3B-64", NULL,
2607 "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,
2608 "_NEC DV5800A", NULL,
2609 "SAMSUNG CD-ROM SN-124", "N001"
2612 static int ata_strim(char *s, size_t len)
2614 len = strnlen(s, len);
2616 /* ATAPI specifies that empty space is blank-filled; remove blanks */
2617 while ((len > 0) && (s[len - 1] == ' ')) {
2618 len--;
2619 s[len] = 0;
2621 return len;
2624 static int ata_dma_blacklisted(const struct ata_device *dev)
2626 unsigned char model_num[40];
2627 unsigned char model_rev[16];
2628 unsigned int nlen, rlen;
2629 int i;
2631 ata_id_string(dev->id, model_num, ATA_ID_PROD_OFS,
2632 sizeof(model_num));
2633 ata_id_string(dev->id, model_rev, ATA_ID_FW_REV_OFS,
2634 sizeof(model_rev));
2635 nlen = ata_strim(model_num, sizeof(model_num));
2636 rlen = ata_strim(model_rev, sizeof(model_rev));
2638 for (i = 0; i < ARRAY_SIZE(ata_dma_blacklist); i += 2) {
2639 if (!strncmp(ata_dma_blacklist[i], model_num, nlen)) {
2640 if (ata_dma_blacklist[i+1] == NULL)
2641 return 1;
2642 if (!strncmp(ata_dma_blacklist[i], model_rev, rlen))
2643 return 1;
2646 return 0;
2650 * ata_dev_xfermask - Compute supported xfermask of the given device
2651 * @ap: Port on which the device to compute xfermask for resides
2652 * @dev: Device to compute xfermask for
2654 * Compute supported xfermask of @dev and store it in
2655 * dev->*_mask. This function is responsible for applying all
2656 * known limits including host controller limits, device
2657 * blacklist, etc...
2659 * FIXME: The current implementation limits all transfer modes to
2660 * the fastest of the lowested device on the port. This is not
2661 * required on most controllers.
2663 * LOCKING:
2664 * None.
2666 static void ata_dev_xfermask(struct ata_port *ap, struct ata_device *dev)
2668 struct ata_host_set *hs = ap->host_set;
2669 unsigned long xfer_mask;
2670 int i;
2672 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
2673 ap->udma_mask);
2675 /* FIXME: Use port-wide xfermask for now */
2676 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2677 struct ata_device *d = &ap->device[i];
2678 if (!ata_dev_enabled(d))
2679 continue;
2680 xfer_mask &= ata_pack_xfermask(d->pio_mask, d->mwdma_mask,
2681 d->udma_mask);
2682 xfer_mask &= ata_id_xfermask(d->id);
2683 if (ata_dma_blacklisted(d))
2684 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
2685 /* Apply cable rule here. Don't apply it early because when
2686 we handle hot plug the cable type can itself change */
2687 if (ap->cbl == ATA_CBL_PATA40)
2688 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
2691 if (ata_dma_blacklisted(dev))
2692 printk(KERN_WARNING "ata%u: dev %u is on DMA blacklist, "
2693 "disabling DMA\n", ap->id, dev->devno);
2695 if (hs->flags & ATA_HOST_SIMPLEX) {
2696 if (hs->simplex_claimed)
2697 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
2699 if (ap->ops->mode_filter)
2700 xfer_mask = ap->ops->mode_filter(ap, dev, xfer_mask);
2702 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2703 &dev->udma_mask);
2707 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
2708 * @ap: Port associated with device @dev
2709 * @dev: Device to which command will be sent
2711 * Issue SET FEATURES - XFER MODE command to device @dev
2712 * on port @ap.
2714 * LOCKING:
2715 * PCI/etc. bus probe sem.
2717 * RETURNS:
2718 * 0 on success, AC_ERR_* mask otherwise.
2721 static unsigned int ata_dev_set_xfermode(struct ata_port *ap,
2722 struct ata_device *dev)
2724 struct ata_taskfile tf;
2725 unsigned int err_mask;
2727 /* set up set-features taskfile */
2728 DPRINTK("set features - xfer mode\n");
2730 ata_tf_init(ap, &tf, dev->devno);
2731 tf.command = ATA_CMD_SET_FEATURES;
2732 tf.feature = SETFEATURES_XFER;
2733 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2734 tf.protocol = ATA_PROT_NODATA;
2735 tf.nsect = dev->xfer_mode;
2737 err_mask = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0);
2739 DPRINTK("EXIT, err_mask=%x\n", err_mask);
2740 return err_mask;
2744 * ata_dev_init_params - Issue INIT DEV PARAMS command
2745 * @ap: Port associated with device @dev
2746 * @dev: Device to which command will be sent
2748 * LOCKING:
2749 * Kernel thread context (may sleep)
2751 * RETURNS:
2752 * 0 on success, AC_ERR_* mask otherwise.
2755 static unsigned int ata_dev_init_params(struct ata_port *ap,
2756 struct ata_device *dev,
2757 u16 heads,
2758 u16 sectors)
2760 struct ata_taskfile tf;
2761 unsigned int err_mask;
2763 /* Number of sectors per track 1-255. Number of heads 1-16 */
2764 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
2765 return AC_ERR_INVALID;
2767 /* set up init dev params taskfile */
2768 DPRINTK("init dev params \n");
2770 ata_tf_init(ap, &tf, dev->devno);
2771 tf.command = ATA_CMD_INIT_DEV_PARAMS;
2772 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2773 tf.protocol = ATA_PROT_NODATA;
2774 tf.nsect = sectors;
2775 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
2777 err_mask = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0);
2779 DPRINTK("EXIT, err_mask=%x\n", err_mask);
2780 return err_mask;
2784 * ata_sg_clean - Unmap DMA memory associated with command
2785 * @qc: Command containing DMA memory to be released
2787 * Unmap all mapped DMA memory associated with this command.
2789 * LOCKING:
2790 * spin_lock_irqsave(host_set lock)
2793 static void ata_sg_clean(struct ata_queued_cmd *qc)
2795 struct ata_port *ap = qc->ap;
2796 struct scatterlist *sg = qc->__sg;
2797 int dir = qc->dma_dir;
2798 void *pad_buf = NULL;
2800 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
2801 WARN_ON(sg == NULL);
2803 if (qc->flags & ATA_QCFLAG_SINGLE)
2804 WARN_ON(qc->n_elem > 1);
2806 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
2808 /* if we padded the buffer out to 32-bit bound, and data
2809 * xfer direction is from-device, we must copy from the
2810 * pad buffer back into the supplied buffer
2812 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
2813 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
2815 if (qc->flags & ATA_QCFLAG_SG) {
2816 if (qc->n_elem)
2817 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
2818 /* restore last sg */
2819 sg[qc->orig_n_elem - 1].length += qc->pad_len;
2820 if (pad_buf) {
2821 struct scatterlist *psg = &qc->pad_sgent;
2822 void *addr = kmap_atomic(psg->page, KM_IRQ0);
2823 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
2824 kunmap_atomic(addr, KM_IRQ0);
2826 } else {
2827 if (qc->n_elem)
2828 dma_unmap_single(ap->dev,
2829 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
2830 dir);
2831 /* restore sg */
2832 sg->length += qc->pad_len;
2833 if (pad_buf)
2834 memcpy(qc->buf_virt + sg->length - qc->pad_len,
2835 pad_buf, qc->pad_len);
2838 qc->flags &= ~ATA_QCFLAG_DMAMAP;
2839 qc->__sg = NULL;
2843 * ata_fill_sg - Fill PCI IDE PRD table
2844 * @qc: Metadata associated with taskfile to be transferred
2846 * Fill PCI IDE PRD (scatter-gather) table with segments
2847 * associated with the current disk command.
2849 * LOCKING:
2850 * spin_lock_irqsave(host_set lock)
2853 static void ata_fill_sg(struct ata_queued_cmd *qc)
2855 struct ata_port *ap = qc->ap;
2856 struct scatterlist *sg;
2857 unsigned int idx;
2859 WARN_ON(qc->__sg == NULL);
2860 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
2862 idx = 0;
2863 ata_for_each_sg(sg, qc) {
2864 u32 addr, offset;
2865 u32 sg_len, len;
2867 /* determine if physical DMA addr spans 64K boundary.
2868 * Note h/w doesn't support 64-bit, so we unconditionally
2869 * truncate dma_addr_t to u32.
2871 addr = (u32) sg_dma_address(sg);
2872 sg_len = sg_dma_len(sg);
2874 while (sg_len) {
2875 offset = addr & 0xffff;
2876 len = sg_len;
2877 if ((offset + sg_len) > 0x10000)
2878 len = 0x10000 - offset;
2880 ap->prd[idx].addr = cpu_to_le32(addr);
2881 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
2882 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
2884 idx++;
2885 sg_len -= len;
2886 addr += len;
2890 if (idx)
2891 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2894 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
2895 * @qc: Metadata associated with taskfile to check
2897 * Allow low-level driver to filter ATA PACKET commands, returning
2898 * a status indicating whether or not it is OK to use DMA for the
2899 * supplied PACKET command.
2901 * LOCKING:
2902 * spin_lock_irqsave(host_set lock)
2904 * RETURNS: 0 when ATAPI DMA can be used
2905 * nonzero otherwise
2907 int ata_check_atapi_dma(struct ata_queued_cmd *qc)
2909 struct ata_port *ap = qc->ap;
2910 int rc = 0; /* Assume ATAPI DMA is OK by default */
2912 if (ap->ops->check_atapi_dma)
2913 rc = ap->ops->check_atapi_dma(qc);
2915 /* We don't support polling DMA.
2916 * Use PIO if the LLDD handles only interrupts in
2917 * the HSM_ST_LAST state and the ATAPI device
2918 * generates CDB interrupts.
2920 if ((ap->flags & ATA_FLAG_PIO_POLLING) &&
2921 (qc->dev->flags & ATA_DFLAG_CDB_INTR))
2922 rc = 1;
2924 return rc;
2927 * ata_qc_prep - Prepare taskfile for submission
2928 * @qc: Metadata associated with taskfile to be prepared
2930 * Prepare ATA taskfile for submission.
2932 * LOCKING:
2933 * spin_lock_irqsave(host_set lock)
2935 void ata_qc_prep(struct ata_queued_cmd *qc)
2937 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2938 return;
2940 ata_fill_sg(qc);
2943 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
2946 * ata_sg_init_one - Associate command with memory buffer
2947 * @qc: Command to be associated
2948 * @buf: Memory buffer
2949 * @buflen: Length of memory buffer, in bytes.
2951 * Initialize the data-related elements of queued_cmd @qc
2952 * to point to a single memory buffer, @buf of byte length @buflen.
2954 * LOCKING:
2955 * spin_lock_irqsave(host_set lock)
2958 void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
2960 struct scatterlist *sg;
2962 qc->flags |= ATA_QCFLAG_SINGLE;
2964 memset(&qc->sgent, 0, sizeof(qc->sgent));
2965 qc->__sg = &qc->sgent;
2966 qc->n_elem = 1;
2967 qc->orig_n_elem = 1;
2968 qc->buf_virt = buf;
2970 sg = qc->__sg;
2971 sg_init_one(sg, buf, buflen);
2975 * ata_sg_init - Associate command with scatter-gather table.
2976 * @qc: Command to be associated
2977 * @sg: Scatter-gather table.
2978 * @n_elem: Number of elements in s/g table.
2980 * Initialize the data-related elements of queued_cmd @qc
2981 * to point to a scatter-gather table @sg, containing @n_elem
2982 * elements.
2984 * LOCKING:
2985 * spin_lock_irqsave(host_set lock)
2988 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
2989 unsigned int n_elem)
2991 qc->flags |= ATA_QCFLAG_SG;
2992 qc->__sg = sg;
2993 qc->n_elem = n_elem;
2994 qc->orig_n_elem = n_elem;
2998 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
2999 * @qc: Command with memory buffer to be mapped.
3001 * DMA-map the memory buffer associated with queued_cmd @qc.
3003 * LOCKING:
3004 * spin_lock_irqsave(host_set lock)
3006 * RETURNS:
3007 * Zero on success, negative on error.
3010 static int ata_sg_setup_one(struct ata_queued_cmd *qc)
3012 struct ata_port *ap = qc->ap;
3013 int dir = qc->dma_dir;
3014 struct scatterlist *sg = qc->__sg;
3015 dma_addr_t dma_address;
3016 int trim_sg = 0;
3018 /* we must lengthen transfers to end on a 32-bit boundary */
3019 qc->pad_len = sg->length & 3;
3020 if (qc->pad_len) {
3021 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3022 struct scatterlist *psg = &qc->pad_sgent;
3024 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
3026 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3028 if (qc->tf.flags & ATA_TFLAG_WRITE)
3029 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
3030 qc->pad_len);
3032 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3033 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3034 /* trim sg */
3035 sg->length -= qc->pad_len;
3036 if (sg->length == 0)
3037 trim_sg = 1;
3039 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
3040 sg->length, qc->pad_len);
3043 if (trim_sg) {
3044 qc->n_elem--;
3045 goto skip_map;
3048 dma_address = dma_map_single(ap->dev, qc->buf_virt,
3049 sg->length, dir);
3050 if (dma_mapping_error(dma_address)) {
3051 /* restore sg */
3052 sg->length += qc->pad_len;
3053 return -1;
3056 sg_dma_address(sg) = dma_address;
3057 sg_dma_len(sg) = sg->length;
3059 skip_map:
3060 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
3061 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3063 return 0;
3067 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
3068 * @qc: Command with scatter-gather table to be mapped.
3070 * DMA-map the scatter-gather table associated with queued_cmd @qc.
3072 * LOCKING:
3073 * spin_lock_irqsave(host_set lock)
3075 * RETURNS:
3076 * Zero on success, negative on error.
3080 static int ata_sg_setup(struct ata_queued_cmd *qc)
3082 struct ata_port *ap = qc->ap;
3083 struct scatterlist *sg = qc->__sg;
3084 struct scatterlist *lsg = &sg[qc->n_elem - 1];
3085 int n_elem, pre_n_elem, dir, trim_sg = 0;
3087 VPRINTK("ENTER, ata%u\n", ap->id);
3088 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
3090 /* we must lengthen transfers to end on a 32-bit boundary */
3091 qc->pad_len = lsg->length & 3;
3092 if (qc->pad_len) {
3093 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3094 struct scatterlist *psg = &qc->pad_sgent;
3095 unsigned int offset;
3097 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
3099 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3102 * psg->page/offset are used to copy to-be-written
3103 * data in this function or read data in ata_sg_clean.
3105 offset = lsg->offset + lsg->length - qc->pad_len;
3106 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
3107 psg->offset = offset_in_page(offset);
3109 if (qc->tf.flags & ATA_TFLAG_WRITE) {
3110 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3111 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
3112 kunmap_atomic(addr, KM_IRQ0);
3115 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3116 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3117 /* trim last sg */
3118 lsg->length -= qc->pad_len;
3119 if (lsg->length == 0)
3120 trim_sg = 1;
3122 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
3123 qc->n_elem - 1, lsg->length, qc->pad_len);
3126 pre_n_elem = qc->n_elem;
3127 if (trim_sg && pre_n_elem)
3128 pre_n_elem--;
3130 if (!pre_n_elem) {
3131 n_elem = 0;
3132 goto skip_map;
3135 dir = qc->dma_dir;
3136 n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
3137 if (n_elem < 1) {
3138 /* restore last sg */
3139 lsg->length += qc->pad_len;
3140 return -1;
3143 DPRINTK("%d sg elements mapped\n", n_elem);
3145 skip_map:
3146 qc->n_elem = n_elem;
3148 return 0;
3152 * ata_poll_qc_complete - turn irq back on and finish qc
3153 * @qc: Command to complete
3154 * @err_mask: ATA status register content
3156 * LOCKING:
3157 * None. (grabs host lock)
3160 void ata_poll_qc_complete(struct ata_queued_cmd *qc)
3162 struct ata_port *ap = qc->ap;
3163 unsigned long flags;
3165 spin_lock_irqsave(&ap->host_set->lock, flags);
3166 ata_irq_on(ap);
3167 ata_qc_complete(qc);
3168 spin_unlock_irqrestore(&ap->host_set->lock, flags);
3172 * swap_buf_le16 - swap halves of 16-bit words in place
3173 * @buf: Buffer to swap
3174 * @buf_words: Number of 16-bit words in buffer.
3176 * Swap halves of 16-bit words if needed to convert from
3177 * little-endian byte order to native cpu byte order, or
3178 * vice-versa.
3180 * LOCKING:
3181 * Inherited from caller.
3183 void swap_buf_le16(u16 *buf, unsigned int buf_words)
3185 #ifdef __BIG_ENDIAN
3186 unsigned int i;
3188 for (i = 0; i < buf_words; i++)
3189 buf[i] = le16_to_cpu(buf[i]);
3190 #endif /* __BIG_ENDIAN */
3194 * ata_mmio_data_xfer - Transfer data by MMIO
3195 * @ap: port to read/write
3196 * @buf: data buffer
3197 * @buflen: buffer length
3198 * @write_data: read/write
3200 * Transfer data from/to the device data register by MMIO.
3202 * LOCKING:
3203 * Inherited from caller.
3206 static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf,
3207 unsigned int buflen, int write_data)
3209 unsigned int i;
3210 unsigned int words = buflen >> 1;
3211 u16 *buf16 = (u16 *) buf;
3212 void __iomem *mmio = (void __iomem *)ap->ioaddr.data_addr;
3214 /* Transfer multiple of 2 bytes */
3215 if (write_data) {
3216 for (i = 0; i < words; i++)
3217 writew(le16_to_cpu(buf16[i]), mmio);
3218 } else {
3219 for (i = 0; i < words; i++)
3220 buf16[i] = cpu_to_le16(readw(mmio));
3223 /* Transfer trailing 1 byte, if any. */
3224 if (unlikely(buflen & 0x01)) {
3225 u16 align_buf[1] = { 0 };
3226 unsigned char *trailing_buf = buf + buflen - 1;
3228 if (write_data) {
3229 memcpy(align_buf, trailing_buf, 1);
3230 writew(le16_to_cpu(align_buf[0]), mmio);
3231 } else {
3232 align_buf[0] = cpu_to_le16(readw(mmio));
3233 memcpy(trailing_buf, align_buf, 1);
3239 * ata_pio_data_xfer - Transfer data by PIO
3240 * @ap: port to read/write
3241 * @buf: data buffer
3242 * @buflen: buffer length
3243 * @write_data: read/write
3245 * Transfer data from/to the device data register by PIO.
3247 * LOCKING:
3248 * Inherited from caller.
3251 static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf,
3252 unsigned int buflen, int write_data)
3254 unsigned int words = buflen >> 1;
3256 /* Transfer multiple of 2 bytes */
3257 if (write_data)
3258 outsw(ap->ioaddr.data_addr, buf, words);
3259 else
3260 insw(ap->ioaddr.data_addr, buf, words);
3262 /* Transfer trailing 1 byte, if any. */
3263 if (unlikely(buflen & 0x01)) {
3264 u16 align_buf[1] = { 0 };
3265 unsigned char *trailing_buf = buf + buflen - 1;
3267 if (write_data) {
3268 memcpy(align_buf, trailing_buf, 1);
3269 outw(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
3270 } else {
3271 align_buf[0] = cpu_to_le16(inw(ap->ioaddr.data_addr));
3272 memcpy(trailing_buf, align_buf, 1);
3278 * ata_data_xfer - Transfer data from/to the data register.
3279 * @ap: port to read/write
3280 * @buf: data buffer
3281 * @buflen: buffer length
3282 * @do_write: read/write
3284 * Transfer data from/to the device data register.
3286 * LOCKING:
3287 * Inherited from caller.
3290 static void ata_data_xfer(struct ata_port *ap, unsigned char *buf,
3291 unsigned int buflen, int do_write)
3293 /* Make the crap hardware pay the costs not the good stuff */
3294 if (unlikely(ap->flags & ATA_FLAG_IRQ_MASK)) {
3295 unsigned long flags;
3296 local_irq_save(flags);
3297 if (ap->flags & ATA_FLAG_MMIO)
3298 ata_mmio_data_xfer(ap, buf, buflen, do_write);
3299 else
3300 ata_pio_data_xfer(ap, buf, buflen, do_write);
3301 local_irq_restore(flags);
3302 } else {
3303 if (ap->flags & ATA_FLAG_MMIO)
3304 ata_mmio_data_xfer(ap, buf, buflen, do_write);
3305 else
3306 ata_pio_data_xfer(ap, buf, buflen, do_write);
3311 * ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data.
3312 * @qc: Command on going
3314 * Transfer ATA_SECT_SIZE of data from/to the ATA device.
3316 * LOCKING:
3317 * Inherited from caller.
3320 static void ata_pio_sector(struct ata_queued_cmd *qc)
3322 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3323 struct scatterlist *sg = qc->__sg;
3324 struct ata_port *ap = qc->ap;
3325 struct page *page;
3326 unsigned int offset;
3327 unsigned char *buf;
3329 if (qc->cursect == (qc->nsect - 1))
3330 ap->hsm_task_state = HSM_ST_LAST;
3332 page = sg[qc->cursg].page;
3333 offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE;
3335 /* get the current page and offset */
3336 page = nth_page(page, (offset >> PAGE_SHIFT));
3337 offset %= PAGE_SIZE;
3339 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3341 if (PageHighMem(page)) {
3342 unsigned long flags;
3344 local_irq_save(flags);
3345 buf = kmap_atomic(page, KM_IRQ0);
3347 /* do the actual data transfer */
3348 ata_data_xfer(ap, buf + offset, ATA_SECT_SIZE, do_write);
3350 kunmap_atomic(buf, KM_IRQ0);
3351 local_irq_restore(flags);
3352 } else {
3353 buf = page_address(page);
3354 ata_data_xfer(ap, buf + offset, ATA_SECT_SIZE, do_write);
3357 qc->cursect++;
3358 qc->cursg_ofs++;
3360 if ((qc->cursg_ofs * ATA_SECT_SIZE) == (&sg[qc->cursg])->length) {
3361 qc->cursg++;
3362 qc->cursg_ofs = 0;
3367 * ata_pio_sectors - Transfer one or many 512-byte sectors.
3368 * @qc: Command on going
3370 * Transfer one or many ATA_SECT_SIZE of data from/to the
3371 * ATA device for the DRQ request.
3373 * LOCKING:
3374 * Inherited from caller.
3377 static void ata_pio_sectors(struct ata_queued_cmd *qc)
3379 if (is_multi_taskfile(&qc->tf)) {
3380 /* READ/WRITE MULTIPLE */
3381 unsigned int nsect;
3383 WARN_ON(qc->dev->multi_count == 0);
3385 nsect = min(qc->nsect - qc->cursect, qc->dev->multi_count);
3386 while (nsect--)
3387 ata_pio_sector(qc);
3388 } else
3389 ata_pio_sector(qc);
3393 * atapi_send_cdb - Write CDB bytes to hardware
3394 * @ap: Port to which ATAPI device is attached.
3395 * @qc: Taskfile currently active
3397 * When device has indicated its readiness to accept
3398 * a CDB, this function is called. Send the CDB.
3400 * LOCKING:
3401 * caller.
3404 static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
3406 /* send SCSI cdb */
3407 DPRINTK("send cdb\n");
3408 WARN_ON(qc->dev->cdb_len < 12);
3410 ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1);
3411 ata_altstatus(ap); /* flush */
3413 switch (qc->tf.protocol) {
3414 case ATA_PROT_ATAPI:
3415 ap->hsm_task_state = HSM_ST;
3416 break;
3417 case ATA_PROT_ATAPI_NODATA:
3418 ap->hsm_task_state = HSM_ST_LAST;
3419 break;
3420 case ATA_PROT_ATAPI_DMA:
3421 ap->hsm_task_state = HSM_ST_LAST;
3422 /* initiate bmdma */
3423 ap->ops->bmdma_start(qc);
3424 break;
3429 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
3430 * @qc: Command on going
3431 * @bytes: number of bytes
3433 * Transfer Transfer data from/to the ATAPI device.
3435 * LOCKING:
3436 * Inherited from caller.
3440 static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
3442 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3443 struct scatterlist *sg = qc->__sg;
3444 struct ata_port *ap = qc->ap;
3445 struct page *page;
3446 unsigned char *buf;
3447 unsigned int offset, count;
3449 if (qc->curbytes + bytes >= qc->nbytes)
3450 ap->hsm_task_state = HSM_ST_LAST;
3452 next_sg:
3453 if (unlikely(qc->cursg >= qc->n_elem)) {
3455 * The end of qc->sg is reached and the device expects
3456 * more data to transfer. In order not to overrun qc->sg
3457 * and fulfill length specified in the byte count register,
3458 * - for read case, discard trailing data from the device
3459 * - for write case, padding zero data to the device
3461 u16 pad_buf[1] = { 0 };
3462 unsigned int words = bytes >> 1;
3463 unsigned int i;
3465 if (words) /* warning if bytes > 1 */
3466 printk(KERN_WARNING "ata%u: %u bytes trailing data\n",
3467 ap->id, bytes);
3469 for (i = 0; i < words; i++)
3470 ata_data_xfer(ap, (unsigned char*)pad_buf, 2, do_write);
3472 ap->hsm_task_state = HSM_ST_LAST;
3473 return;
3476 sg = &qc->__sg[qc->cursg];
3478 page = sg->page;
3479 offset = sg->offset + qc->cursg_ofs;
3481 /* get the current page and offset */
3482 page = nth_page(page, (offset >> PAGE_SHIFT));
3483 offset %= PAGE_SIZE;
3485 /* don't overrun current sg */
3486 count = min(sg->length - qc->cursg_ofs, bytes);
3488 /* don't cross page boundaries */
3489 count = min(count, (unsigned int)PAGE_SIZE - offset);
3491 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3493 if (PageHighMem(page)) {
3494 unsigned long flags;
3496 local_irq_save(flags);
3497 buf = kmap_atomic(page, KM_IRQ0);
3499 /* do the actual data transfer */
3500 ata_data_xfer(ap, buf + offset, count, do_write);
3502 kunmap_atomic(buf, KM_IRQ0);
3503 local_irq_restore(flags);
3504 } else {
3505 buf = page_address(page);
3506 ata_data_xfer(ap, buf + offset, count, do_write);
3509 bytes -= count;
3510 qc->curbytes += count;
3511 qc->cursg_ofs += count;
3513 if (qc->cursg_ofs == sg->length) {
3514 qc->cursg++;
3515 qc->cursg_ofs = 0;
3518 if (bytes)
3519 goto next_sg;
3523 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
3524 * @qc: Command on going
3526 * Transfer Transfer data from/to the ATAPI device.
3528 * LOCKING:
3529 * Inherited from caller.
3532 static void atapi_pio_bytes(struct ata_queued_cmd *qc)
3534 struct ata_port *ap = qc->ap;
3535 struct ata_device *dev = qc->dev;
3536 unsigned int ireason, bc_lo, bc_hi, bytes;
3537 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
3539 ap->ops->tf_read(ap, &qc->tf);
3540 ireason = qc->tf.nsect;
3541 bc_lo = qc->tf.lbam;
3542 bc_hi = qc->tf.lbah;
3543 bytes = (bc_hi << 8) | bc_lo;
3545 /* shall be cleared to zero, indicating xfer of data */
3546 if (ireason & (1 << 0))
3547 goto err_out;
3549 /* make sure transfer direction matches expected */
3550 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
3551 if (do_write != i_write)
3552 goto err_out;
3554 VPRINTK("ata%u: xfering %d bytes\n", ap->id, bytes);
3556 __atapi_pio_bytes(qc, bytes);
3558 return;
3560 err_out:
3561 printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n",
3562 ap->id, dev->devno);
3563 qc->err_mask |= AC_ERR_HSM;
3564 ap->hsm_task_state = HSM_ST_ERR;
3568 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
3569 * @ap: the target ata_port
3570 * @qc: qc on going
3572 * RETURNS:
3573 * 1 if ok in workqueue, 0 otherwise.
3576 static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
3578 if (qc->tf.flags & ATA_TFLAG_POLLING)
3579 return 1;
3581 if (ap->hsm_task_state == HSM_ST_FIRST) {
3582 if (qc->tf.protocol == ATA_PROT_PIO &&
3583 (qc->tf.flags & ATA_TFLAG_WRITE))
3584 return 1;
3586 if (is_atapi_taskfile(&qc->tf) &&
3587 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
3588 return 1;
3591 return 0;
3595 * ata_hsm_move - move the HSM to the next state.
3596 * @ap: the target ata_port
3597 * @qc: qc on going
3598 * @status: current device status
3599 * @in_wq: 1 if called from workqueue, 0 otherwise
3601 * RETURNS:
3602 * 1 when poll next status needed, 0 otherwise.
3605 static int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
3606 u8 status, int in_wq)
3608 unsigned long flags = 0;
3609 int poll_next;
3611 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
3613 /* Make sure ata_qc_issue_prot() does not throw things
3614 * like DMA polling into the workqueue. Notice that
3615 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
3617 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
3619 fsm_start:
3620 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
3621 ap->id, qc->tf.protocol, ap->hsm_task_state, status);
3623 switch (ap->hsm_task_state) {
3624 case HSM_ST_FIRST:
3625 /* Send first data block or PACKET CDB */
3627 /* If polling, we will stay in the work queue after
3628 * sending the data. Otherwise, interrupt handler
3629 * takes over after sending the data.
3631 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
3633 /* check device status */
3634 if (unlikely((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ)) {
3635 /* Wrong status. Let EH handle this */
3636 qc->err_mask |= AC_ERR_HSM;
3637 ap->hsm_task_state = HSM_ST_ERR;
3638 goto fsm_start;
3641 /* Device should not ask for data transfer (DRQ=1)
3642 * when it finds something wrong.
3643 * We ignore DRQ here and stop the HSM by
3644 * changing hsm_task_state to HSM_ST_ERR and
3645 * let the EH abort the command or reset the device.
3647 if (unlikely(status & (ATA_ERR | ATA_DF))) {
3648 printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n",
3649 ap->id, status);
3650 qc->err_mask |= AC_ERR_DEV;
3651 ap->hsm_task_state = HSM_ST_ERR;
3652 goto fsm_start;
3655 /* Send the CDB (atapi) or the first data block (ata pio out).
3656 * During the state transition, interrupt handler shouldn't
3657 * be invoked before the data transfer is complete and
3658 * hsm_task_state is changed. Hence, the following locking.
3660 if (in_wq)
3661 spin_lock_irqsave(&ap->host_set->lock, flags);
3663 if (qc->tf.protocol == ATA_PROT_PIO) {
3664 /* PIO data out protocol.
3665 * send first data block.
3668 /* ata_pio_sectors() might change the state
3669 * to HSM_ST_LAST. so, the state is changed here
3670 * before ata_pio_sectors().
3672 ap->hsm_task_state = HSM_ST;
3673 ata_pio_sectors(qc);
3674 ata_altstatus(ap); /* flush */
3675 } else
3676 /* send CDB */
3677 atapi_send_cdb(ap, qc);
3679 if (in_wq)
3680 spin_unlock_irqrestore(&ap->host_set->lock, flags);
3682 /* if polling, ata_pio_task() handles the rest.
3683 * otherwise, interrupt handler takes over from here.
3685 break;
3687 case HSM_ST:
3688 /* complete command or read/write the data register */
3689 if (qc->tf.protocol == ATA_PROT_ATAPI) {
3690 /* ATAPI PIO protocol */
3691 if ((status & ATA_DRQ) == 0) {
3692 /* no more data to transfer */
3693 ap->hsm_task_state = HSM_ST_LAST;
3694 goto fsm_start;
3697 /* Device should not ask for data transfer (DRQ=1)
3698 * when it finds something wrong.
3699 * We ignore DRQ here and stop the HSM by
3700 * changing hsm_task_state to HSM_ST_ERR and
3701 * let the EH abort the command or reset the device.
3703 if (unlikely(status & (ATA_ERR | ATA_DF))) {
3704 printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n",
3705 ap->id, status);
3706 qc->err_mask |= AC_ERR_DEV;
3707 ap->hsm_task_state = HSM_ST_ERR;
3708 goto fsm_start;
3711 atapi_pio_bytes(qc);
3713 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
3714 /* bad ireason reported by device */
3715 goto fsm_start;
3717 } else {
3718 /* ATA PIO protocol */
3719 if (unlikely((status & ATA_DRQ) == 0)) {
3720 /* handle BSY=0, DRQ=0 as error */
3721 qc->err_mask |= AC_ERR_HSM;
3722 ap->hsm_task_state = HSM_ST_ERR;
3723 goto fsm_start;
3726 /* For PIO reads, some devices may ask for
3727 * data transfer (DRQ=1) alone with ERR=1.
3728 * We respect DRQ here and transfer one
3729 * block of junk data before changing the
3730 * hsm_task_state to HSM_ST_ERR.
3732 * For PIO writes, ERR=1 DRQ=1 doesn't make
3733 * sense since the data block has been
3734 * transferred to the device.
3736 if (unlikely(status & (ATA_ERR | ATA_DF))) {
3737 /* data might be corrputed */
3738 qc->err_mask |= AC_ERR_DEV;
3740 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
3741 ata_pio_sectors(qc);
3742 ata_altstatus(ap);
3743 status = ata_wait_idle(ap);
3746 /* ata_pio_sectors() might change the
3747 * state to HSM_ST_LAST. so, the state
3748 * is changed after ata_pio_sectors().
3750 ap->hsm_task_state = HSM_ST_ERR;
3751 goto fsm_start;
3754 ata_pio_sectors(qc);
3756 if (ap->hsm_task_state == HSM_ST_LAST &&
3757 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
3758 /* all data read */
3759 ata_altstatus(ap);
3760 status = ata_wait_idle(ap);
3761 goto fsm_start;
3765 ata_altstatus(ap); /* flush */
3766 poll_next = 1;
3767 break;
3769 case HSM_ST_LAST:
3770 if (unlikely(!ata_ok(status))) {
3771 qc->err_mask |= __ac_err_mask(status);
3772 ap->hsm_task_state = HSM_ST_ERR;
3773 goto fsm_start;
3776 /* no more data to transfer */
3777 DPRINTK("ata%u: command complete, drv_stat 0x%x\n",
3778 ap->id, status);
3780 WARN_ON(qc->err_mask);
3782 ap->hsm_task_state = HSM_ST_IDLE;
3784 /* complete taskfile transaction */
3785 if (in_wq)
3786 ata_poll_qc_complete(qc);
3787 else
3788 ata_qc_complete(qc);
3790 poll_next = 0;
3791 break;
3793 case HSM_ST_ERR:
3794 if (qc->tf.command != ATA_CMD_PACKET)
3795 printk(KERN_ERR "ata%u: command error, drv_stat 0x%x\n",
3796 ap->id, status);
3798 /* make sure qc->err_mask is available to
3799 * know what's wrong and recover
3801 WARN_ON(qc->err_mask == 0);
3803 ap->hsm_task_state = HSM_ST_IDLE;
3805 /* complete taskfile transaction */
3806 if (in_wq)
3807 ata_poll_qc_complete(qc);
3808 else
3809 ata_qc_complete(qc);
3811 poll_next = 0;
3812 break;
3813 default:
3814 poll_next = 0;
3815 BUG();
3818 return poll_next;
3821 static void ata_pio_task(void *_data)
3823 struct ata_port *ap = _data;
3824 struct ata_queued_cmd *qc;
3825 u8 status;
3826 int poll_next;
3828 fsm_start:
3829 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
3831 qc = ata_qc_from_tag(ap, ap->active_tag);
3832 WARN_ON(qc == NULL);
3835 * This is purely heuristic. This is a fast path.
3836 * Sometimes when we enter, BSY will be cleared in
3837 * a chk-status or two. If not, the drive is probably seeking
3838 * or something. Snooze for a couple msecs, then
3839 * chk-status again. If still busy, queue delayed work.
3841 status = ata_busy_wait(ap, ATA_BUSY, 5);
3842 if (status & ATA_BUSY) {
3843 msleep(2);
3844 status = ata_busy_wait(ap, ATA_BUSY, 10);
3845 if (status & ATA_BUSY) {
3846 ata_port_queue_task(ap, ata_pio_task, ap, ATA_SHORT_PAUSE);
3847 return;
3851 /* move the HSM */
3852 poll_next = ata_hsm_move(ap, qc, status, 1);
3854 /* another command or interrupt handler
3855 * may be running at this point.
3857 if (poll_next)
3858 goto fsm_start;
3862 * ata_qc_timeout - Handle timeout of queued command
3863 * @qc: Command that timed out
3865 * Some part of the kernel (currently, only the SCSI layer)
3866 * has noticed that the active command on port @ap has not
3867 * completed after a specified length of time. Handle this
3868 * condition by disabling DMA (if necessary) and completing
3869 * transactions, with error if necessary.
3871 * This also handles the case of the "lost interrupt", where
3872 * for some reason (possibly hardware bug, possibly driver bug)
3873 * an interrupt was not delivered to the driver, even though the
3874 * transaction completed successfully.
3876 * LOCKING:
3877 * Inherited from SCSI layer (none, can sleep)
3880 static void ata_qc_timeout(struct ata_queued_cmd *qc)
3882 struct ata_port *ap = qc->ap;
3883 struct ata_host_set *host_set = ap->host_set;
3884 u8 host_stat = 0, drv_stat;
3885 unsigned long flags;
3887 DPRINTK("ENTER\n");
3889 ap->hsm_task_state = HSM_ST_IDLE;
3891 spin_lock_irqsave(&host_set->lock, flags);
3893 switch (qc->tf.protocol) {
3895 case ATA_PROT_DMA:
3896 case ATA_PROT_ATAPI_DMA:
3897 host_stat = ap->ops->bmdma_status(ap);
3899 /* before we do anything else, clear DMA-Start bit */
3900 ap->ops->bmdma_stop(qc);
3902 /* fall through */
3904 default:
3905 ata_altstatus(ap);
3906 drv_stat = ata_chk_status(ap);
3908 /* ack bmdma irq events */
3909 ap->ops->irq_clear(ap);
3911 printk(KERN_ERR "ata%u: command 0x%x timeout, stat 0x%x host_stat 0x%x\n",
3912 ap->id, qc->tf.command, drv_stat, host_stat);
3914 ap->hsm_task_state = HSM_ST_IDLE;
3916 /* complete taskfile transaction */
3917 qc->err_mask |= AC_ERR_TIMEOUT;
3918 break;
3921 spin_unlock_irqrestore(&host_set->lock, flags);
3923 ata_eh_qc_complete(qc);
3925 DPRINTK("EXIT\n");
3929 * ata_eng_timeout - Handle timeout of queued command
3930 * @ap: Port on which timed-out command is active
3932 * Some part of the kernel (currently, only the SCSI layer)
3933 * has noticed that the active command on port @ap has not
3934 * completed after a specified length of time. Handle this
3935 * condition by disabling DMA (if necessary) and completing
3936 * transactions, with error if necessary.
3938 * This also handles the case of the "lost interrupt", where
3939 * for some reason (possibly hardware bug, possibly driver bug)
3940 * an interrupt was not delivered to the driver, even though the
3941 * transaction completed successfully.
3943 * LOCKING:
3944 * Inherited from SCSI layer (none, can sleep)
3947 void ata_eng_timeout(struct ata_port *ap)
3949 DPRINTK("ENTER\n");
3951 ata_qc_timeout(ata_qc_from_tag(ap, ap->active_tag));
3953 DPRINTK("EXIT\n");
3957 * ata_qc_new - Request an available ATA command, for queueing
3958 * @ap: Port associated with device @dev
3959 * @dev: Device from whom we request an available command structure
3961 * LOCKING:
3962 * None.
3965 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
3967 struct ata_queued_cmd *qc = NULL;
3968 unsigned int i;
3970 for (i = 0; i < ATA_MAX_QUEUE; i++)
3971 if (!test_and_set_bit(i, &ap->qactive)) {
3972 qc = ata_qc_from_tag(ap, i);
3973 break;
3976 if (qc)
3977 qc->tag = i;
3979 return qc;
3983 * ata_qc_new_init - Request an available ATA command, and initialize it
3984 * @ap: Port associated with device @dev
3985 * @dev: Device from whom we request an available command structure
3987 * LOCKING:
3988 * None.
3991 struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
3992 struct ata_device *dev)
3994 struct ata_queued_cmd *qc;
3996 qc = ata_qc_new(ap);
3997 if (qc) {
3998 qc->scsicmd = NULL;
3999 qc->ap = ap;
4000 qc->dev = dev;
4002 ata_qc_reinit(qc);
4005 return qc;
4009 * ata_qc_free - free unused ata_queued_cmd
4010 * @qc: Command to complete
4012 * Designed to free unused ata_queued_cmd object
4013 * in case something prevents using it.
4015 * LOCKING:
4016 * spin_lock_irqsave(host_set lock)
4018 void ata_qc_free(struct ata_queued_cmd *qc)
4020 struct ata_port *ap = qc->ap;
4021 unsigned int tag;
4023 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4025 qc->flags = 0;
4026 tag = qc->tag;
4027 if (likely(ata_tag_valid(tag))) {
4028 if (tag == ap->active_tag)
4029 ap->active_tag = ATA_TAG_POISON;
4030 qc->tag = ATA_TAG_POISON;
4031 clear_bit(tag, &ap->qactive);
4035 void __ata_qc_complete(struct ata_queued_cmd *qc)
4037 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4038 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
4040 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4041 ata_sg_clean(qc);
4043 /* atapi: mark qc as inactive to prevent the interrupt handler
4044 * from completing the command twice later, before the error handler
4045 * is called. (when rc != 0 and atapi request sense is needed)
4047 qc->flags &= ~ATA_QCFLAG_ACTIVE;
4049 /* call completion callback */
4050 qc->complete_fn(qc);
4053 static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
4055 struct ata_port *ap = qc->ap;
4057 switch (qc->tf.protocol) {
4058 case ATA_PROT_DMA:
4059 case ATA_PROT_ATAPI_DMA:
4060 return 1;
4062 case ATA_PROT_ATAPI:
4063 case ATA_PROT_PIO:
4064 if (ap->flags & ATA_FLAG_PIO_DMA)
4065 return 1;
4067 /* fall through */
4069 default:
4070 return 0;
4073 /* never reached */
4077 * ata_qc_issue - issue taskfile to device
4078 * @qc: command to issue to device
4080 * Prepare an ATA command to submission to device.
4081 * This includes mapping the data into a DMA-able
4082 * area, filling in the S/G table, and finally
4083 * writing the taskfile to hardware, starting the command.
4085 * LOCKING:
4086 * spin_lock_irqsave(host_set lock)
4088 void ata_qc_issue(struct ata_queued_cmd *qc)
4090 struct ata_port *ap = qc->ap;
4092 qc->ap->active_tag = qc->tag;
4093 qc->flags |= ATA_QCFLAG_ACTIVE;
4095 if (ata_should_dma_map(qc)) {
4096 if (qc->flags & ATA_QCFLAG_SG) {
4097 if (ata_sg_setup(qc))
4098 goto sg_err;
4099 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
4100 if (ata_sg_setup_one(qc))
4101 goto sg_err;
4103 } else {
4104 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4107 ap->ops->qc_prep(qc);
4109 qc->err_mask |= ap->ops->qc_issue(qc);
4110 if (unlikely(qc->err_mask))
4111 goto err;
4112 return;
4114 sg_err:
4115 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4116 qc->err_mask |= AC_ERR_SYSTEM;
4117 err:
4118 ata_qc_complete(qc);
4122 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
4123 * @qc: command to issue to device
4125 * Using various libata functions and hooks, this function
4126 * starts an ATA command. ATA commands are grouped into
4127 * classes called "protocols", and issuing each type of protocol
4128 * is slightly different.
4130 * May be used as the qc_issue() entry in ata_port_operations.
4132 * LOCKING:
4133 * spin_lock_irqsave(host_set lock)
4135 * RETURNS:
4136 * Zero on success, AC_ERR_* mask on failure
4139 unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
4141 struct ata_port *ap = qc->ap;
4143 /* Use polling pio if the LLD doesn't handle
4144 * interrupt driven pio and atapi CDB interrupt.
4146 if (ap->flags & ATA_FLAG_PIO_POLLING) {
4147 switch (qc->tf.protocol) {
4148 case ATA_PROT_PIO:
4149 case ATA_PROT_ATAPI:
4150 case ATA_PROT_ATAPI_NODATA:
4151 qc->tf.flags |= ATA_TFLAG_POLLING;
4152 break;
4153 case ATA_PROT_ATAPI_DMA:
4154 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
4155 /* see ata_check_atapi_dma() */
4156 BUG();
4157 break;
4158 default:
4159 break;
4163 /* select the device */
4164 ata_dev_select(ap, qc->dev->devno, 1, 0);
4166 /* start the command */
4167 switch (qc->tf.protocol) {
4168 case ATA_PROT_NODATA:
4169 if (qc->tf.flags & ATA_TFLAG_POLLING)
4170 ata_qc_set_polling(qc);
4172 ata_tf_to_host(ap, &qc->tf);
4173 ap->hsm_task_state = HSM_ST_LAST;
4175 if (qc->tf.flags & ATA_TFLAG_POLLING)
4176 ata_port_queue_task(ap, ata_pio_task, ap, 0);
4178 break;
4180 case ATA_PROT_DMA:
4181 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
4183 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4184 ap->ops->bmdma_setup(qc); /* set up bmdma */
4185 ap->ops->bmdma_start(qc); /* initiate bmdma */
4186 ap->hsm_task_state = HSM_ST_LAST;
4187 break;
4189 case ATA_PROT_PIO:
4190 if (qc->tf.flags & ATA_TFLAG_POLLING)
4191 ata_qc_set_polling(qc);
4193 ata_tf_to_host(ap, &qc->tf);
4195 if (qc->tf.flags & ATA_TFLAG_WRITE) {
4196 /* PIO data out protocol */
4197 ap->hsm_task_state = HSM_ST_FIRST;
4198 ata_port_queue_task(ap, ata_pio_task, ap, 0);
4200 /* always send first data block using
4201 * the ata_pio_task() codepath.
4203 } else {
4204 /* PIO data in protocol */
4205 ap->hsm_task_state = HSM_ST;
4207 if (qc->tf.flags & ATA_TFLAG_POLLING)
4208 ata_port_queue_task(ap, ata_pio_task, ap, 0);
4210 /* if polling, ata_pio_task() handles the rest.
4211 * otherwise, interrupt handler takes over from here.
4215 break;
4217 case ATA_PROT_ATAPI:
4218 case ATA_PROT_ATAPI_NODATA:
4219 if (qc->tf.flags & ATA_TFLAG_POLLING)
4220 ata_qc_set_polling(qc);
4222 ata_tf_to_host(ap, &qc->tf);
4224 ap->hsm_task_state = HSM_ST_FIRST;
4226 /* send cdb by polling if no cdb interrupt */
4227 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
4228 (qc->tf.flags & ATA_TFLAG_POLLING))
4229 ata_port_queue_task(ap, ata_pio_task, ap, 0);
4230 break;
4232 case ATA_PROT_ATAPI_DMA:
4233 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
4235 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4236 ap->ops->bmdma_setup(qc); /* set up bmdma */
4237 ap->hsm_task_state = HSM_ST_FIRST;
4239 /* send cdb by polling if no cdb interrupt */
4240 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4241 ata_port_queue_task(ap, ata_pio_task, ap, 0);
4242 break;
4244 default:
4245 WARN_ON(1);
4246 return AC_ERR_SYSTEM;
4249 return 0;
4253 * ata_host_intr - Handle host interrupt for given (port, task)
4254 * @ap: Port on which interrupt arrived (possibly...)
4255 * @qc: Taskfile currently active in engine
4257 * Handle host interrupt for given queued command. Currently,
4258 * only DMA interrupts are handled. All other commands are
4259 * handled via polling with interrupts disabled (nIEN bit).
4261 * LOCKING:
4262 * spin_lock_irqsave(host_set lock)
4264 * RETURNS:
4265 * One if interrupt was handled, zero if not (shared irq).
4268 inline unsigned int ata_host_intr (struct ata_port *ap,
4269 struct ata_queued_cmd *qc)
4271 u8 status, host_stat = 0;
4273 VPRINTK("ata%u: protocol %d task_state %d\n",
4274 ap->id, qc->tf.protocol, ap->hsm_task_state);
4276 /* Check whether we are expecting interrupt in this state */
4277 switch (ap->hsm_task_state) {
4278 case HSM_ST_FIRST:
4279 /* Some pre-ATAPI-4 devices assert INTRQ
4280 * at this state when ready to receive CDB.
4283 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
4284 * The flag was turned on only for atapi devices.
4285 * No need to check is_atapi_taskfile(&qc->tf) again.
4287 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4288 goto idle_irq;
4289 break;
4290 case HSM_ST_LAST:
4291 if (qc->tf.protocol == ATA_PROT_DMA ||
4292 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
4293 /* check status of DMA engine */
4294 host_stat = ap->ops->bmdma_status(ap);
4295 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
4297 /* if it's not our irq... */
4298 if (!(host_stat & ATA_DMA_INTR))
4299 goto idle_irq;
4301 /* before we do anything else, clear DMA-Start bit */
4302 ap->ops->bmdma_stop(qc);
4304 if (unlikely(host_stat & ATA_DMA_ERR)) {
4305 /* error when transfering data to/from memory */
4306 qc->err_mask |= AC_ERR_HOST_BUS;
4307 ap->hsm_task_state = HSM_ST_ERR;
4310 break;
4311 case HSM_ST:
4312 break;
4313 default:
4314 goto idle_irq;
4317 /* check altstatus */
4318 status = ata_altstatus(ap);
4319 if (status & ATA_BUSY)
4320 goto idle_irq;
4322 /* check main status, clearing INTRQ */
4323 status = ata_chk_status(ap);
4324 if (unlikely(status & ATA_BUSY))
4325 goto idle_irq;
4327 /* ack bmdma irq events */
4328 ap->ops->irq_clear(ap);
4330 ata_hsm_move(ap, qc, status, 0);
4331 return 1; /* irq handled */
4333 idle_irq:
4334 ap->stats.idle_irq++;
4336 #ifdef ATA_IRQ_TRAP
4337 if ((ap->stats.idle_irq % 1000) == 0) {
4338 ata_irq_ack(ap, 0); /* debug trap */
4339 printk(KERN_WARNING "ata%d: irq trap\n", ap->id);
4340 return 1;
4342 #endif
4343 return 0; /* irq not handled */
4347 * ata_interrupt - Default ATA host interrupt handler
4348 * @irq: irq line (unused)
4349 * @dev_instance: pointer to our ata_host_set information structure
4350 * @regs: unused
4352 * Default interrupt handler for PCI IDE devices. Calls
4353 * ata_host_intr() for each port that is not disabled.
4355 * LOCKING:
4356 * Obtains host_set lock during operation.
4358 * RETURNS:
4359 * IRQ_NONE or IRQ_HANDLED.
4362 irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
4364 struct ata_host_set *host_set = dev_instance;
4365 unsigned int i;
4366 unsigned int handled = 0;
4367 unsigned long flags;
4369 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
4370 spin_lock_irqsave(&host_set->lock, flags);
4372 for (i = 0; i < host_set->n_ports; i++) {
4373 struct ata_port *ap;
4375 ap = host_set->ports[i];
4376 if (ap &&
4377 !(ap->flags & ATA_FLAG_PORT_DISABLED)) {
4378 struct ata_queued_cmd *qc;
4380 qc = ata_qc_from_tag(ap, ap->active_tag);
4381 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
4382 (qc->flags & ATA_QCFLAG_ACTIVE))
4383 handled |= ata_host_intr(ap, qc);
4387 spin_unlock_irqrestore(&host_set->lock, flags);
4389 return IRQ_RETVAL(handled);
4394 * Execute a 'simple' command, that only consists of the opcode 'cmd' itself,
4395 * without filling any other registers
4397 static int ata_do_simple_cmd(struct ata_port *ap, struct ata_device *dev,
4398 u8 cmd)
4400 struct ata_taskfile tf;
4401 int err;
4403 ata_tf_init(ap, &tf, dev->devno);
4405 tf.command = cmd;
4406 tf.flags |= ATA_TFLAG_DEVICE;
4407 tf.protocol = ATA_PROT_NODATA;
4409 err = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0);
4410 if (err)
4411 printk(KERN_ERR "%s: ata command failed: %d\n",
4412 __FUNCTION__, err);
4414 return err;
4417 static int ata_flush_cache(struct ata_port *ap, struct ata_device *dev)
4419 u8 cmd;
4421 if (!ata_try_flush_cache(dev))
4422 return 0;
4424 if (ata_id_has_flush_ext(dev->id))
4425 cmd = ATA_CMD_FLUSH_EXT;
4426 else
4427 cmd = ATA_CMD_FLUSH;
4429 return ata_do_simple_cmd(ap, dev, cmd);
4432 static int ata_standby_drive(struct ata_port *ap, struct ata_device *dev)
4434 return ata_do_simple_cmd(ap, dev, ATA_CMD_STANDBYNOW1);
4437 static int ata_start_drive(struct ata_port *ap, struct ata_device *dev)
4439 return ata_do_simple_cmd(ap, dev, ATA_CMD_IDLEIMMEDIATE);
4443 * ata_device_resume - wakeup a previously suspended devices
4444 * @ap: port the device is connected to
4445 * @dev: the device to resume
4447 * Kick the drive back into action, by sending it an idle immediate
4448 * command and making sure its transfer mode matches between drive
4449 * and host.
4452 int ata_device_resume(struct ata_port *ap, struct ata_device *dev)
4454 if (ap->flags & ATA_FLAG_SUSPENDED) {
4455 ap->flags &= ~ATA_FLAG_SUSPENDED;
4456 ata_set_mode(ap);
4458 if (!ata_dev_enabled(dev))
4459 return 0;
4460 if (dev->class == ATA_DEV_ATA)
4461 ata_start_drive(ap, dev);
4463 return 0;
4467 * ata_device_suspend - prepare a device for suspend
4468 * @ap: port the device is connected to
4469 * @dev: the device to suspend
4471 * Flush the cache on the drive, if appropriate, then issue a
4472 * standbynow command.
4474 int ata_device_suspend(struct ata_port *ap, struct ata_device *dev, pm_message_t state)
4476 if (!ata_dev_enabled(dev))
4477 return 0;
4478 if (dev->class == ATA_DEV_ATA)
4479 ata_flush_cache(ap, dev);
4481 if (state.event != PM_EVENT_FREEZE)
4482 ata_standby_drive(ap, dev);
4483 ap->flags |= ATA_FLAG_SUSPENDED;
4484 return 0;
4488 * ata_port_start - Set port up for dma.
4489 * @ap: Port to initialize
4491 * Called just after data structures for each port are
4492 * initialized. Allocates space for PRD table.
4494 * May be used as the port_start() entry in ata_port_operations.
4496 * LOCKING:
4497 * Inherited from caller.
4500 int ata_port_start (struct ata_port *ap)
4502 struct device *dev = ap->dev;
4503 int rc;
4505 ap->prd = dma_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, GFP_KERNEL);
4506 if (!ap->prd)
4507 return -ENOMEM;
4509 rc = ata_pad_alloc(ap, dev);
4510 if (rc) {
4511 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
4512 return rc;
4515 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma);
4517 return 0;
4522 * ata_port_stop - Undo ata_port_start()
4523 * @ap: Port to shut down
4525 * Frees the PRD table.
4527 * May be used as the port_stop() entry in ata_port_operations.
4529 * LOCKING:
4530 * Inherited from caller.
4533 void ata_port_stop (struct ata_port *ap)
4535 struct device *dev = ap->dev;
4537 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
4538 ata_pad_free(ap, dev);
4541 void ata_host_stop (struct ata_host_set *host_set)
4543 if (host_set->mmio_base)
4544 iounmap(host_set->mmio_base);
4549 * ata_host_remove - Unregister SCSI host structure with upper layers
4550 * @ap: Port to unregister
4551 * @do_unregister: 1 if we fully unregister, 0 to just stop the port
4553 * LOCKING:
4554 * Inherited from caller.
4557 static void ata_host_remove(struct ata_port *ap, unsigned int do_unregister)
4559 struct Scsi_Host *sh = ap->host;
4561 DPRINTK("ENTER\n");
4563 if (do_unregister)
4564 scsi_remove_host(sh);
4566 ap->ops->port_stop(ap);
4570 * ata_host_init - Initialize an ata_port structure
4571 * @ap: Structure to initialize
4572 * @host: associated SCSI mid-layer structure
4573 * @host_set: Collection of hosts to which @ap belongs
4574 * @ent: Probe information provided by low-level driver
4575 * @port_no: Port number associated with this ata_port
4577 * Initialize a new ata_port structure, and its associated
4578 * scsi_host.
4580 * LOCKING:
4581 * Inherited from caller.
4584 static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
4585 struct ata_host_set *host_set,
4586 const struct ata_probe_ent *ent, unsigned int port_no)
4588 unsigned int i;
4590 host->max_id = 16;
4591 host->max_lun = 1;
4592 host->max_channel = 1;
4593 host->unique_id = ata_unique_id++;
4594 host->max_cmd_len = 12;
4596 ap->flags = ATA_FLAG_PORT_DISABLED;
4597 ap->id = host->unique_id;
4598 ap->host = host;
4599 ap->ctl = ATA_DEVCTL_OBS;
4600 ap->host_set = host_set;
4601 ap->dev = ent->dev;
4602 ap->port_no = port_no;
4603 ap->hard_port_no =
4604 ent->legacy_mode ? ent->hard_port_no : port_no;
4605 ap->pio_mask = ent->pio_mask;
4606 ap->mwdma_mask = ent->mwdma_mask;
4607 ap->udma_mask = ent->udma_mask;
4608 ap->flags |= ent->host_flags;
4609 ap->ops = ent->port_ops;
4610 ap->cbl = ATA_CBL_NONE;
4611 ap->active_tag = ATA_TAG_POISON;
4612 ap->last_ctl = 0xFF;
4614 INIT_WORK(&ap->port_task, NULL, NULL);
4615 INIT_LIST_HEAD(&ap->eh_done_q);
4617 for (i = 0; i < ATA_MAX_DEVICES; i++) {
4618 struct ata_device *dev = &ap->device[i];
4619 dev->devno = i;
4620 dev->pio_mask = UINT_MAX;
4621 dev->mwdma_mask = UINT_MAX;
4622 dev->udma_mask = UINT_MAX;
4625 #ifdef ATA_IRQ_TRAP
4626 ap->stats.unhandled_irq = 1;
4627 ap->stats.idle_irq = 1;
4628 #endif
4630 memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports));
4634 * ata_host_add - Attach low-level ATA driver to system
4635 * @ent: Information provided by low-level driver
4636 * @host_set: Collections of ports to which we add
4637 * @port_no: Port number associated with this host
4639 * Attach low-level ATA driver to system.
4641 * LOCKING:
4642 * PCI/etc. bus probe sem.
4644 * RETURNS:
4645 * New ata_port on success, for NULL on error.
4648 static struct ata_port * ata_host_add(const struct ata_probe_ent *ent,
4649 struct ata_host_set *host_set,
4650 unsigned int port_no)
4652 struct Scsi_Host *host;
4653 struct ata_port *ap;
4654 int rc;
4656 DPRINTK("ENTER\n");
4658 if (!ent->port_ops->probe_reset &&
4659 !(ent->host_flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST))) {
4660 printk(KERN_ERR "ata%u: no reset mechanism available\n",
4661 port_no);
4662 return NULL;
4665 host = scsi_host_alloc(ent->sht, sizeof(struct ata_port));
4666 if (!host)
4667 return NULL;
4669 host->transportt = &ata_scsi_transport_template;
4671 ap = (struct ata_port *) &host->hostdata[0];
4673 ata_host_init(ap, host, host_set, ent, port_no);
4675 rc = ap->ops->port_start(ap);
4676 if (rc)
4677 goto err_out;
4679 return ap;
4681 err_out:
4682 scsi_host_put(host);
4683 return NULL;
4687 * ata_device_add - Register hardware device with ATA and SCSI layers
4688 * @ent: Probe information describing hardware device to be registered
4690 * This function processes the information provided in the probe
4691 * information struct @ent, allocates the necessary ATA and SCSI
4692 * host information structures, initializes them, and registers
4693 * everything with requisite kernel subsystems.
4695 * This function requests irqs, probes the ATA bus, and probes
4696 * the SCSI bus.
4698 * LOCKING:
4699 * PCI/etc. bus probe sem.
4701 * RETURNS:
4702 * Number of ports registered. Zero on error (no ports registered).
4705 int ata_device_add(const struct ata_probe_ent *ent)
4707 unsigned int count = 0, i;
4708 struct device *dev = ent->dev;
4709 struct ata_host_set *host_set;
4711 DPRINTK("ENTER\n");
4712 /* alloc a container for our list of ATA ports (buses) */
4713 host_set = kzalloc(sizeof(struct ata_host_set) +
4714 (ent->n_ports * sizeof(void *)), GFP_KERNEL);
4715 if (!host_set)
4716 return 0;
4717 spin_lock_init(&host_set->lock);
4719 host_set->dev = dev;
4720 host_set->n_ports = ent->n_ports;
4721 host_set->irq = ent->irq;
4722 host_set->mmio_base = ent->mmio_base;
4723 host_set->private_data = ent->private_data;
4724 host_set->ops = ent->port_ops;
4725 host_set->flags = ent->host_set_flags;
4727 /* register each port bound to this device */
4728 for (i = 0; i < ent->n_ports; i++) {
4729 struct ata_port *ap;
4730 unsigned long xfer_mode_mask;
4732 ap = ata_host_add(ent, host_set, i);
4733 if (!ap)
4734 goto err_out;
4736 host_set->ports[i] = ap;
4737 xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) |
4738 (ap->mwdma_mask << ATA_SHIFT_MWDMA) |
4739 (ap->pio_mask << ATA_SHIFT_PIO);
4741 /* print per-port info to dmesg */
4742 printk(KERN_INFO "ata%u: %cATA max %s cmd 0x%lX ctl 0x%lX "
4743 "bmdma 0x%lX irq %lu\n",
4744 ap->id,
4745 ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
4746 ata_mode_string(xfer_mode_mask),
4747 ap->ioaddr.cmd_addr,
4748 ap->ioaddr.ctl_addr,
4749 ap->ioaddr.bmdma_addr,
4750 ent->irq);
4752 ata_chk_status(ap);
4753 host_set->ops->irq_clear(ap);
4754 count++;
4757 if (!count)
4758 goto err_free_ret;
4760 /* obtain irq, that is shared between channels */
4761 if (request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags,
4762 DRV_NAME, host_set))
4763 goto err_out;
4765 /* perform each probe synchronously */
4766 DPRINTK("probe begin\n");
4767 for (i = 0; i < count; i++) {
4768 struct ata_port *ap;
4769 int rc;
4771 ap = host_set->ports[i];
4773 DPRINTK("ata%u: bus probe begin\n", ap->id);
4774 rc = ata_bus_probe(ap);
4775 DPRINTK("ata%u: bus probe end\n", ap->id);
4777 if (rc) {
4778 /* FIXME: do something useful here?
4779 * Current libata behavior will
4780 * tear down everything when
4781 * the module is removed
4782 * or the h/w is unplugged.
4786 rc = scsi_add_host(ap->host, dev);
4787 if (rc) {
4788 printk(KERN_ERR "ata%u: scsi_add_host failed\n",
4789 ap->id);
4790 /* FIXME: do something useful here */
4791 /* FIXME: handle unconditional calls to
4792 * scsi_scan_host and ata_host_remove, below,
4793 * at the very least
4798 /* probes are done, now scan each port's disk(s) */
4799 DPRINTK("host probe begin\n");
4800 for (i = 0; i < count; i++) {
4801 struct ata_port *ap = host_set->ports[i];
4803 ata_scsi_scan_host(ap);
4806 dev_set_drvdata(dev, host_set);
4808 VPRINTK("EXIT, returning %u\n", ent->n_ports);
4809 return ent->n_ports; /* success */
4811 err_out:
4812 for (i = 0; i < count; i++) {
4813 ata_host_remove(host_set->ports[i], 1);
4814 scsi_host_put(host_set->ports[i]->host);
4816 err_free_ret:
4817 kfree(host_set);
4818 VPRINTK("EXIT, returning 0\n");
4819 return 0;
4823 * ata_host_set_remove - PCI layer callback for device removal
4824 * @host_set: ATA host set that was removed
4826 * Unregister all objects associated with this host set. Free those
4827 * objects.
4829 * LOCKING:
4830 * Inherited from calling layer (may sleep).
4833 void ata_host_set_remove(struct ata_host_set *host_set)
4835 struct ata_port *ap;
4836 unsigned int i;
4838 for (i = 0; i < host_set->n_ports; i++) {
4839 ap = host_set->ports[i];
4840 scsi_remove_host(ap->host);
4843 free_irq(host_set->irq, host_set);
4845 for (i = 0; i < host_set->n_ports; i++) {
4846 ap = host_set->ports[i];
4848 ata_scsi_release(ap->host);
4850 if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) {
4851 struct ata_ioports *ioaddr = &ap->ioaddr;
4853 if (ioaddr->cmd_addr == 0x1f0)
4854 release_region(0x1f0, 8);
4855 else if (ioaddr->cmd_addr == 0x170)
4856 release_region(0x170, 8);
4859 scsi_host_put(ap->host);
4862 if (host_set->ops->host_stop)
4863 host_set->ops->host_stop(host_set);
4865 kfree(host_set);
4869 * ata_scsi_release - SCSI layer callback hook for host unload
4870 * @host: libata host to be unloaded
4872 * Performs all duties necessary to shut down a libata port...
4873 * Kill port kthread, disable port, and release resources.
4875 * LOCKING:
4876 * Inherited from SCSI layer.
4878 * RETURNS:
4879 * One.
4882 int ata_scsi_release(struct Scsi_Host *host)
4884 struct ata_port *ap = (struct ata_port *) &host->hostdata[0];
4885 int i;
4887 DPRINTK("ENTER\n");
4889 ap->ops->port_disable(ap);
4890 ata_host_remove(ap, 0);
4891 for (i = 0; i < ATA_MAX_DEVICES; i++)
4892 kfree(ap->device[i].id);
4894 DPRINTK("EXIT\n");
4895 return 1;
4899 * ata_std_ports - initialize ioaddr with standard port offsets.
4900 * @ioaddr: IO address structure to be initialized
4902 * Utility function which initializes data_addr, error_addr,
4903 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
4904 * device_addr, status_addr, and command_addr to standard offsets
4905 * relative to cmd_addr.
4907 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
4910 void ata_std_ports(struct ata_ioports *ioaddr)
4912 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
4913 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
4914 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
4915 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
4916 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
4917 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
4918 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
4919 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
4920 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
4921 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
4925 #ifdef CONFIG_PCI
4927 void ata_pci_host_stop (struct ata_host_set *host_set)
4929 struct pci_dev *pdev = to_pci_dev(host_set->dev);
4931 pci_iounmap(pdev, host_set->mmio_base);
4935 * ata_pci_remove_one - PCI layer callback for device removal
4936 * @pdev: PCI device that was removed
4938 * PCI layer indicates to libata via this hook that
4939 * hot-unplug or module unload event has occurred.
4940 * Handle this by unregistering all objects associated
4941 * with this PCI device. Free those objects. Then finally
4942 * release PCI resources and disable device.
4944 * LOCKING:
4945 * Inherited from PCI layer (may sleep).
4948 void ata_pci_remove_one (struct pci_dev *pdev)
4950 struct device *dev = pci_dev_to_dev(pdev);
4951 struct ata_host_set *host_set = dev_get_drvdata(dev);
4953 ata_host_set_remove(host_set);
4954 pci_release_regions(pdev);
4955 pci_disable_device(pdev);
4956 dev_set_drvdata(dev, NULL);
4959 /* move to PCI subsystem */
4960 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
4962 unsigned long tmp = 0;
4964 switch (bits->width) {
4965 case 1: {
4966 u8 tmp8 = 0;
4967 pci_read_config_byte(pdev, bits->reg, &tmp8);
4968 tmp = tmp8;
4969 break;
4971 case 2: {
4972 u16 tmp16 = 0;
4973 pci_read_config_word(pdev, bits->reg, &tmp16);
4974 tmp = tmp16;
4975 break;
4977 case 4: {
4978 u32 tmp32 = 0;
4979 pci_read_config_dword(pdev, bits->reg, &tmp32);
4980 tmp = tmp32;
4981 break;
4984 default:
4985 return -EINVAL;
4988 tmp &= bits->mask;
4990 return (tmp == bits->val) ? 1 : 0;
4993 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t state)
4995 pci_save_state(pdev);
4996 pci_disable_device(pdev);
4997 pci_set_power_state(pdev, PCI_D3hot);
4998 return 0;
5001 int ata_pci_device_resume(struct pci_dev *pdev)
5003 pci_set_power_state(pdev, PCI_D0);
5004 pci_restore_state(pdev);
5005 pci_enable_device(pdev);
5006 pci_set_master(pdev);
5007 return 0;
5009 #endif /* CONFIG_PCI */
5012 static int __init ata_init(void)
5014 ata_wq = create_workqueue("ata");
5015 if (!ata_wq)
5016 return -ENOMEM;
5018 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
5019 return 0;
5022 static void __exit ata_exit(void)
5024 destroy_workqueue(ata_wq);
5027 module_init(ata_init);
5028 module_exit(ata_exit);
5030 static unsigned long ratelimit_time;
5031 static spinlock_t ata_ratelimit_lock = SPIN_LOCK_UNLOCKED;
5033 int ata_ratelimit(void)
5035 int rc;
5036 unsigned long flags;
5038 spin_lock_irqsave(&ata_ratelimit_lock, flags);
5040 if (time_after(jiffies, ratelimit_time)) {
5041 rc = 1;
5042 ratelimit_time = jiffies + (HZ/5);
5043 } else
5044 rc = 0;
5046 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
5048 return rc;
5052 * libata is essentially a library of internal helper functions for
5053 * low-level ATA host controller drivers. As such, the API/ABI is
5054 * likely to change as new drivers are added and updated.
5055 * Do not depend on ABI/API stability.
5058 EXPORT_SYMBOL_GPL(ata_std_bios_param);
5059 EXPORT_SYMBOL_GPL(ata_std_ports);
5060 EXPORT_SYMBOL_GPL(ata_device_add);
5061 EXPORT_SYMBOL_GPL(ata_host_set_remove);
5062 EXPORT_SYMBOL_GPL(ata_sg_init);
5063 EXPORT_SYMBOL_GPL(ata_sg_init_one);
5064 EXPORT_SYMBOL_GPL(__ata_qc_complete);
5065 EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
5066 EXPORT_SYMBOL_GPL(ata_eng_timeout);
5067 EXPORT_SYMBOL_GPL(ata_tf_load);
5068 EXPORT_SYMBOL_GPL(ata_tf_read);
5069 EXPORT_SYMBOL_GPL(ata_noop_dev_select);
5070 EXPORT_SYMBOL_GPL(ata_std_dev_select);
5071 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
5072 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
5073 EXPORT_SYMBOL_GPL(ata_check_status);
5074 EXPORT_SYMBOL_GPL(ata_altstatus);
5075 EXPORT_SYMBOL_GPL(ata_exec_command);
5076 EXPORT_SYMBOL_GPL(ata_port_start);
5077 EXPORT_SYMBOL_GPL(ata_port_stop);
5078 EXPORT_SYMBOL_GPL(ata_host_stop);
5079 EXPORT_SYMBOL_GPL(ata_interrupt);
5080 EXPORT_SYMBOL_GPL(ata_qc_prep);
5081 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
5082 EXPORT_SYMBOL_GPL(ata_bmdma_setup);
5083 EXPORT_SYMBOL_GPL(ata_bmdma_start);
5084 EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
5085 EXPORT_SYMBOL_GPL(ata_bmdma_status);
5086 EXPORT_SYMBOL_GPL(ata_bmdma_stop);
5087 EXPORT_SYMBOL_GPL(ata_port_probe);
5088 EXPORT_SYMBOL_GPL(sata_phy_reset);
5089 EXPORT_SYMBOL_GPL(__sata_phy_reset);
5090 EXPORT_SYMBOL_GPL(ata_bus_reset);
5091 EXPORT_SYMBOL_GPL(ata_std_probeinit);
5092 EXPORT_SYMBOL_GPL(ata_std_softreset);
5093 EXPORT_SYMBOL_GPL(sata_std_hardreset);
5094 EXPORT_SYMBOL_GPL(ata_std_postreset);
5095 EXPORT_SYMBOL_GPL(ata_std_probe_reset);
5096 EXPORT_SYMBOL_GPL(ata_drive_probe_reset);
5097 EXPORT_SYMBOL_GPL(ata_dev_revalidate);
5098 EXPORT_SYMBOL_GPL(ata_dev_classify);
5099 EXPORT_SYMBOL_GPL(ata_dev_pair);
5100 EXPORT_SYMBOL_GPL(ata_port_disable);
5101 EXPORT_SYMBOL_GPL(ata_ratelimit);
5102 EXPORT_SYMBOL_GPL(ata_busy_sleep);
5103 EXPORT_SYMBOL_GPL(ata_port_queue_task);
5104 EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
5105 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
5106 EXPORT_SYMBOL_GPL(ata_scsi_error);
5107 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
5108 EXPORT_SYMBOL_GPL(ata_scsi_release);
5109 EXPORT_SYMBOL_GPL(ata_host_intr);
5110 EXPORT_SYMBOL_GPL(ata_id_string);
5111 EXPORT_SYMBOL_GPL(ata_id_c_string);
5112 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
5113 EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
5114 EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
5116 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
5117 EXPORT_SYMBOL_GPL(ata_timing_compute);
5118 EXPORT_SYMBOL_GPL(ata_timing_merge);
5120 #ifdef CONFIG_PCI
5121 EXPORT_SYMBOL_GPL(pci_test_config_bits);
5122 EXPORT_SYMBOL_GPL(ata_pci_host_stop);
5123 EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
5124 EXPORT_SYMBOL_GPL(ata_pci_init_one);
5125 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
5126 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
5127 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
5128 EXPORT_SYMBOL_GPL(ata_pci_default_filter);
5129 EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
5130 #endif /* CONFIG_PCI */
5132 EXPORT_SYMBOL_GPL(ata_device_suspend);
5133 EXPORT_SYMBOL_GPL(ata_device_resume);
5134 EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
5135 EXPORT_SYMBOL_GPL(ata_scsi_device_resume);