[PATCH] Update libata DMA blacklist to cover versions, and resync with IDE layer
[linux-2.6.git] / drivers / scsi / libata-core.c
blobc6600ac965f08ae3e7941bf6badce5ed8ee92fba
1 /*
2 * libata-core.c - helper library for ATA
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
35 #include <linux/config.h>
36 #include <linux/kernel.h>
37 #include <linux/module.h>
38 #include <linux/pci.h>
39 #include <linux/init.h>
40 #include <linux/list.h>
41 #include <linux/mm.h>
42 #include <linux/highmem.h>
43 #include <linux/spinlock.h>
44 #include <linux/blkdev.h>
45 #include <linux/delay.h>
46 #include <linux/timer.h>
47 #include <linux/interrupt.h>
48 #include <linux/completion.h>
49 #include <linux/suspend.h>
50 #include <linux/workqueue.h>
51 #include <linux/jiffies.h>
52 #include <linux/scatterlist.h>
53 #include <scsi/scsi.h>
54 #include "scsi_priv.h"
55 #include <scsi/scsi_cmnd.h>
56 #include <scsi/scsi_host.h>
57 #include <linux/libata.h>
58 #include <asm/io.h>
59 #include <asm/semaphore.h>
60 #include <asm/byteorder.h>
62 #include "libata.h"
64 static unsigned int ata_dev_init_params(struct ata_port *ap,
65 struct ata_device *dev);
66 static void ata_set_mode(struct ata_port *ap);
67 static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev);
68 static unsigned int ata_dev_xfermask(struct ata_port *ap,
69 struct ata_device *dev);
71 static unsigned int ata_unique_id = 1;
72 static struct workqueue_struct *ata_wq;
74 int atapi_enabled = 1;
75 module_param(atapi_enabled, int, 0444);
76 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
78 int libata_fua = 0;
79 module_param_named(fua, libata_fua, int, 0444);
80 MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
82 MODULE_AUTHOR("Jeff Garzik");
83 MODULE_DESCRIPTION("Library module for ATA devices");
84 MODULE_LICENSE("GPL");
85 MODULE_VERSION(DRV_VERSION);
88 /**
89 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
90 * @tf: Taskfile to convert
91 * @fis: Buffer into which data will output
92 * @pmp: Port multiplier port
94 * Converts a standard ATA taskfile to a Serial ATA
95 * FIS structure (Register - Host to Device).
97 * LOCKING:
98 * Inherited from caller.
101 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp)
103 fis[0] = 0x27; /* Register - Host to Device FIS */
104 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
105 bit 7 indicates Command FIS */
106 fis[2] = tf->command;
107 fis[3] = tf->feature;
109 fis[4] = tf->lbal;
110 fis[5] = tf->lbam;
111 fis[6] = tf->lbah;
112 fis[7] = tf->device;
114 fis[8] = tf->hob_lbal;
115 fis[9] = tf->hob_lbam;
116 fis[10] = tf->hob_lbah;
117 fis[11] = tf->hob_feature;
119 fis[12] = tf->nsect;
120 fis[13] = tf->hob_nsect;
121 fis[14] = 0;
122 fis[15] = tf->ctl;
124 fis[16] = 0;
125 fis[17] = 0;
126 fis[18] = 0;
127 fis[19] = 0;
131 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
132 * @fis: Buffer from which data will be input
133 * @tf: Taskfile to output
135 * Converts a serial ATA FIS structure to a standard ATA taskfile.
137 * LOCKING:
138 * Inherited from caller.
141 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
143 tf->command = fis[2]; /* status */
144 tf->feature = fis[3]; /* error */
146 tf->lbal = fis[4];
147 tf->lbam = fis[5];
148 tf->lbah = fis[6];
149 tf->device = fis[7];
151 tf->hob_lbal = fis[8];
152 tf->hob_lbam = fis[9];
153 tf->hob_lbah = fis[10];
155 tf->nsect = fis[12];
156 tf->hob_nsect = fis[13];
159 static const u8 ata_rw_cmds[] = {
160 /* pio multi */
161 ATA_CMD_READ_MULTI,
162 ATA_CMD_WRITE_MULTI,
163 ATA_CMD_READ_MULTI_EXT,
164 ATA_CMD_WRITE_MULTI_EXT,
168 ATA_CMD_WRITE_MULTI_FUA_EXT,
169 /* pio */
170 ATA_CMD_PIO_READ,
171 ATA_CMD_PIO_WRITE,
172 ATA_CMD_PIO_READ_EXT,
173 ATA_CMD_PIO_WRITE_EXT,
178 /* dma */
179 ATA_CMD_READ,
180 ATA_CMD_WRITE,
181 ATA_CMD_READ_EXT,
182 ATA_CMD_WRITE_EXT,
186 ATA_CMD_WRITE_FUA_EXT
190 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
191 * @qc: command to examine and configure
193 * Examine the device configuration and tf->flags to calculate
194 * the proper read/write commands and protocol to use.
196 * LOCKING:
197 * caller.
199 int ata_rwcmd_protocol(struct ata_queued_cmd *qc)
201 struct ata_taskfile *tf = &qc->tf;
202 struct ata_device *dev = qc->dev;
203 u8 cmd;
205 int index, fua, lba48, write;
207 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
208 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
209 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
211 if (dev->flags & ATA_DFLAG_PIO) {
212 tf->protocol = ATA_PROT_PIO;
213 index = dev->multi_count ? 0 : 8;
214 } else if (lba48 && (qc->ap->flags & ATA_FLAG_PIO_LBA48)) {
215 /* Unable to use DMA due to host limitation */
216 tf->protocol = ATA_PROT_PIO;
217 index = dev->multi_count ? 0 : 8;
218 } else {
219 tf->protocol = ATA_PROT_DMA;
220 index = 16;
223 cmd = ata_rw_cmds[index + fua + lba48 + write];
224 if (cmd) {
225 tf->command = cmd;
226 return 0;
228 return -1;
232 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
233 * @pio_mask: pio_mask
234 * @mwdma_mask: mwdma_mask
235 * @udma_mask: udma_mask
237 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
238 * unsigned int xfer_mask.
240 * LOCKING:
241 * None.
243 * RETURNS:
244 * Packed xfer_mask.
246 static unsigned int ata_pack_xfermask(unsigned int pio_mask,
247 unsigned int mwdma_mask,
248 unsigned int udma_mask)
250 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
251 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
252 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
255 static const struct ata_xfer_ent {
256 unsigned int shift, bits;
257 u8 base;
258 } ata_xfer_tbl[] = {
259 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
260 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
261 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
262 { -1, },
266 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
267 * @xfer_mask: xfer_mask of interest
269 * Return matching XFER_* value for @xfer_mask. Only the highest
270 * bit of @xfer_mask is considered.
272 * LOCKING:
273 * None.
275 * RETURNS:
276 * Matching XFER_* value, 0 if no match found.
278 static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
280 int highbit = fls(xfer_mask) - 1;
281 const struct ata_xfer_ent *ent;
283 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
284 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
285 return ent->base + highbit - ent->shift;
286 return 0;
290 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
291 * @xfer_mode: XFER_* of interest
293 * Return matching xfer_mask for @xfer_mode.
295 * LOCKING:
296 * None.
298 * RETURNS:
299 * Matching xfer_mask, 0 if no match found.
301 static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
303 const struct ata_xfer_ent *ent;
305 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
306 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
307 return 1 << (ent->shift + xfer_mode - ent->base);
308 return 0;
312 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
313 * @xfer_mode: XFER_* of interest
315 * Return matching xfer_shift for @xfer_mode.
317 * LOCKING:
318 * None.
320 * RETURNS:
321 * Matching xfer_shift, -1 if no match found.
323 static int ata_xfer_mode2shift(unsigned int xfer_mode)
325 const struct ata_xfer_ent *ent;
327 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
328 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
329 return ent->shift;
330 return -1;
334 * ata_mode_string - convert xfer_mask to string
335 * @xfer_mask: mask of bits supported; only highest bit counts.
337 * Determine string which represents the highest speed
338 * (highest bit in @modemask).
340 * LOCKING:
341 * None.
343 * RETURNS:
344 * Constant C string representing highest speed listed in
345 * @mode_mask, or the constant C string "<n/a>".
347 static const char *ata_mode_string(unsigned int xfer_mask)
349 static const char * const xfer_mode_str[] = {
350 "PIO0",
351 "PIO1",
352 "PIO2",
353 "PIO3",
354 "PIO4",
355 "MWDMA0",
356 "MWDMA1",
357 "MWDMA2",
358 "UDMA/16",
359 "UDMA/25",
360 "UDMA/33",
361 "UDMA/44",
362 "UDMA/66",
363 "UDMA/100",
364 "UDMA/133",
365 "UDMA7",
367 int highbit;
369 highbit = fls(xfer_mask) - 1;
370 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
371 return xfer_mode_str[highbit];
372 return "<n/a>";
376 * ata_pio_devchk - PATA device presence detection
377 * @ap: ATA channel to examine
378 * @device: Device to examine (starting at zero)
380 * This technique was originally described in
381 * Hale Landis's ATADRVR (www.ata-atapi.com), and
382 * later found its way into the ATA/ATAPI spec.
384 * Write a pattern to the ATA shadow registers,
385 * and if a device is present, it will respond by
386 * correctly storing and echoing back the
387 * ATA shadow register contents.
389 * LOCKING:
390 * caller.
393 static unsigned int ata_pio_devchk(struct ata_port *ap,
394 unsigned int device)
396 struct ata_ioports *ioaddr = &ap->ioaddr;
397 u8 nsect, lbal;
399 ap->ops->dev_select(ap, device);
401 outb(0x55, ioaddr->nsect_addr);
402 outb(0xaa, ioaddr->lbal_addr);
404 outb(0xaa, ioaddr->nsect_addr);
405 outb(0x55, ioaddr->lbal_addr);
407 outb(0x55, ioaddr->nsect_addr);
408 outb(0xaa, ioaddr->lbal_addr);
410 nsect = inb(ioaddr->nsect_addr);
411 lbal = inb(ioaddr->lbal_addr);
413 if ((nsect == 0x55) && (lbal == 0xaa))
414 return 1; /* we found a device */
416 return 0; /* nothing found */
420 * ata_mmio_devchk - PATA device presence detection
421 * @ap: ATA channel to examine
422 * @device: Device to examine (starting at zero)
424 * This technique was originally described in
425 * Hale Landis's ATADRVR (www.ata-atapi.com), and
426 * later found its way into the ATA/ATAPI spec.
428 * Write a pattern to the ATA shadow registers,
429 * and if a device is present, it will respond by
430 * correctly storing and echoing back the
431 * ATA shadow register contents.
433 * LOCKING:
434 * caller.
437 static unsigned int ata_mmio_devchk(struct ata_port *ap,
438 unsigned int device)
440 struct ata_ioports *ioaddr = &ap->ioaddr;
441 u8 nsect, lbal;
443 ap->ops->dev_select(ap, device);
445 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
446 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
448 writeb(0xaa, (void __iomem *) ioaddr->nsect_addr);
449 writeb(0x55, (void __iomem *) ioaddr->lbal_addr);
451 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
452 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
454 nsect = readb((void __iomem *) ioaddr->nsect_addr);
455 lbal = readb((void __iomem *) ioaddr->lbal_addr);
457 if ((nsect == 0x55) && (lbal == 0xaa))
458 return 1; /* we found a device */
460 return 0; /* nothing found */
464 * ata_devchk - PATA device presence detection
465 * @ap: ATA channel to examine
466 * @device: Device to examine (starting at zero)
468 * Dispatch ATA device presence detection, depending
469 * on whether we are using PIO or MMIO to talk to the
470 * ATA shadow registers.
472 * LOCKING:
473 * caller.
476 static unsigned int ata_devchk(struct ata_port *ap,
477 unsigned int device)
479 if (ap->flags & ATA_FLAG_MMIO)
480 return ata_mmio_devchk(ap, device);
481 return ata_pio_devchk(ap, device);
485 * ata_dev_classify - determine device type based on ATA-spec signature
486 * @tf: ATA taskfile register set for device to be identified
488 * Determine from taskfile register contents whether a device is
489 * ATA or ATAPI, as per "Signature and persistence" section
490 * of ATA/PI spec (volume 1, sect 5.14).
492 * LOCKING:
493 * None.
495 * RETURNS:
496 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
497 * the event of failure.
500 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
502 /* Apple's open source Darwin code hints that some devices only
503 * put a proper signature into the LBA mid/high registers,
504 * So, we only check those. It's sufficient for uniqueness.
507 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
508 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
509 DPRINTK("found ATA device by sig\n");
510 return ATA_DEV_ATA;
513 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
514 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
515 DPRINTK("found ATAPI device by sig\n");
516 return ATA_DEV_ATAPI;
519 DPRINTK("unknown device\n");
520 return ATA_DEV_UNKNOWN;
524 * ata_dev_try_classify - Parse returned ATA device signature
525 * @ap: ATA channel to examine
526 * @device: Device to examine (starting at zero)
527 * @r_err: Value of error register on completion
529 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
530 * an ATA/ATAPI-defined set of values is placed in the ATA
531 * shadow registers, indicating the results of device detection
532 * and diagnostics.
534 * Select the ATA device, and read the values from the ATA shadow
535 * registers. Then parse according to the Error register value,
536 * and the spec-defined values examined by ata_dev_classify().
538 * LOCKING:
539 * caller.
541 * RETURNS:
542 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
545 static unsigned int
546 ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
548 struct ata_taskfile tf;
549 unsigned int class;
550 u8 err;
552 ap->ops->dev_select(ap, device);
554 memset(&tf, 0, sizeof(tf));
556 ap->ops->tf_read(ap, &tf);
557 err = tf.feature;
558 if (r_err)
559 *r_err = err;
561 /* see if device passed diags */
562 if (err == 1)
563 /* do nothing */ ;
564 else if ((device == 0) && (err == 0x81))
565 /* do nothing */ ;
566 else
567 return ATA_DEV_NONE;
569 /* determine if device is ATA or ATAPI */
570 class = ata_dev_classify(&tf);
572 if (class == ATA_DEV_UNKNOWN)
573 return ATA_DEV_NONE;
574 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
575 return ATA_DEV_NONE;
576 return class;
580 * ata_id_string - Convert IDENTIFY DEVICE page into string
581 * @id: IDENTIFY DEVICE results we will examine
582 * @s: string into which data is output
583 * @ofs: offset into identify device page
584 * @len: length of string to return. must be an even number.
586 * The strings in the IDENTIFY DEVICE page are broken up into
587 * 16-bit chunks. Run through the string, and output each
588 * 8-bit chunk linearly, regardless of platform.
590 * LOCKING:
591 * caller.
594 void ata_id_string(const u16 *id, unsigned char *s,
595 unsigned int ofs, unsigned int len)
597 unsigned int c;
599 while (len > 0) {
600 c = id[ofs] >> 8;
601 *s = c;
602 s++;
604 c = id[ofs] & 0xff;
605 *s = c;
606 s++;
608 ofs++;
609 len -= 2;
614 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
615 * @id: IDENTIFY DEVICE results we will examine
616 * @s: string into which data is output
617 * @ofs: offset into identify device page
618 * @len: length of string to return. must be an odd number.
620 * This function is identical to ata_id_string except that it
621 * trims trailing spaces and terminates the resulting string with
622 * null. @len must be actual maximum length (even number) + 1.
624 * LOCKING:
625 * caller.
627 void ata_id_c_string(const u16 *id, unsigned char *s,
628 unsigned int ofs, unsigned int len)
630 unsigned char *p;
632 WARN_ON(!(len & 1));
634 ata_id_string(id, s, ofs, len - 1);
636 p = s + strnlen(s, len - 1);
637 while (p > s && p[-1] == ' ')
638 p--;
639 *p = '\0';
642 static u64 ata_id_n_sectors(const u16 *id)
644 if (ata_id_has_lba(id)) {
645 if (ata_id_has_lba48(id))
646 return ata_id_u64(id, 100);
647 else
648 return ata_id_u32(id, 60);
649 } else {
650 if (ata_id_current_chs_valid(id))
651 return ata_id_u32(id, 57);
652 else
653 return id[1] * id[3] * id[6];
658 * ata_noop_dev_select - Select device 0/1 on ATA bus
659 * @ap: ATA channel to manipulate
660 * @device: ATA device (numbered from zero) to select
662 * This function performs no actual function.
664 * May be used as the dev_select() entry in ata_port_operations.
666 * LOCKING:
667 * caller.
669 void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
675 * ata_std_dev_select - Select device 0/1 on ATA bus
676 * @ap: ATA channel to manipulate
677 * @device: ATA device (numbered from zero) to select
679 * Use the method defined in the ATA specification to
680 * make either device 0, or device 1, active on the
681 * ATA channel. Works with both PIO and MMIO.
683 * May be used as the dev_select() entry in ata_port_operations.
685 * LOCKING:
686 * caller.
689 void ata_std_dev_select (struct ata_port *ap, unsigned int device)
691 u8 tmp;
693 if (device == 0)
694 tmp = ATA_DEVICE_OBS;
695 else
696 tmp = ATA_DEVICE_OBS | ATA_DEV1;
698 if (ap->flags & ATA_FLAG_MMIO) {
699 writeb(tmp, (void __iomem *) ap->ioaddr.device_addr);
700 } else {
701 outb(tmp, ap->ioaddr.device_addr);
703 ata_pause(ap); /* needed; also flushes, for mmio */
707 * ata_dev_select - Select device 0/1 on ATA bus
708 * @ap: ATA channel to manipulate
709 * @device: ATA device (numbered from zero) to select
710 * @wait: non-zero to wait for Status register BSY bit to clear
711 * @can_sleep: non-zero if context allows sleeping
713 * Use the method defined in the ATA specification to
714 * make either device 0, or device 1, active on the
715 * ATA channel.
717 * This is a high-level version of ata_std_dev_select(),
718 * which additionally provides the services of inserting
719 * the proper pauses and status polling, where needed.
721 * LOCKING:
722 * caller.
725 void ata_dev_select(struct ata_port *ap, unsigned int device,
726 unsigned int wait, unsigned int can_sleep)
728 VPRINTK("ENTER, ata%u: device %u, wait %u\n",
729 ap->id, device, wait);
731 if (wait)
732 ata_wait_idle(ap);
734 ap->ops->dev_select(ap, device);
736 if (wait) {
737 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
738 msleep(150);
739 ata_wait_idle(ap);
744 * ata_dump_id - IDENTIFY DEVICE info debugging output
745 * @id: IDENTIFY DEVICE page to dump
747 * Dump selected 16-bit words from the given IDENTIFY DEVICE
748 * page.
750 * LOCKING:
751 * caller.
754 static inline void ata_dump_id(const u16 *id)
756 DPRINTK("49==0x%04x "
757 "53==0x%04x "
758 "63==0x%04x "
759 "64==0x%04x "
760 "75==0x%04x \n",
761 id[49],
762 id[53],
763 id[63],
764 id[64],
765 id[75]);
766 DPRINTK("80==0x%04x "
767 "81==0x%04x "
768 "82==0x%04x "
769 "83==0x%04x "
770 "84==0x%04x \n",
771 id[80],
772 id[81],
773 id[82],
774 id[83],
775 id[84]);
776 DPRINTK("88==0x%04x "
777 "93==0x%04x\n",
778 id[88],
779 id[93]);
783 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
784 * @id: IDENTIFY data to compute xfer mask from
786 * Compute the xfermask for this device. This is not as trivial
787 * as it seems if we must consider early devices correctly.
789 * FIXME: pre IDE drive timing (do we care ?).
791 * LOCKING:
792 * None.
794 * RETURNS:
795 * Computed xfermask
797 static unsigned int ata_id_xfermask(const u16 *id)
799 unsigned int pio_mask, mwdma_mask, udma_mask;
801 /* Usual case. Word 53 indicates word 64 is valid */
802 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
803 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
804 pio_mask <<= 3;
805 pio_mask |= 0x7;
806 } else {
807 /* If word 64 isn't valid then Word 51 high byte holds
808 * the PIO timing number for the maximum. Turn it into
809 * a mask.
811 pio_mask = (2 << (id[ATA_ID_OLD_PIO_MODES] & 0xFF)) - 1 ;
813 /* But wait.. there's more. Design your standards by
814 * committee and you too can get a free iordy field to
815 * process. However its the speeds not the modes that
816 * are supported... Note drivers using the timing API
817 * will get this right anyway
821 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
823 udma_mask = 0;
824 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
825 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
827 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
831 * ata_port_queue_task - Queue port_task
832 * @ap: The ata_port to queue port_task for
834 * Schedule @fn(@data) for execution after @delay jiffies using
835 * port_task. There is one port_task per port and it's the
836 * user(low level driver)'s responsibility to make sure that only
837 * one task is active at any given time.
839 * libata core layer takes care of synchronization between
840 * port_task and EH. ata_port_queue_task() may be ignored for EH
841 * synchronization.
843 * LOCKING:
844 * Inherited from caller.
846 void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), void *data,
847 unsigned long delay)
849 int rc;
851 if (ap->flags & ATA_FLAG_FLUSH_PORT_TASK)
852 return;
854 PREPARE_WORK(&ap->port_task, fn, data);
856 if (!delay)
857 rc = queue_work(ata_wq, &ap->port_task);
858 else
859 rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
861 /* rc == 0 means that another user is using port task */
862 WARN_ON(rc == 0);
866 * ata_port_flush_task - Flush port_task
867 * @ap: The ata_port to flush port_task for
869 * After this function completes, port_task is guranteed not to
870 * be running or scheduled.
872 * LOCKING:
873 * Kernel thread context (may sleep)
875 void ata_port_flush_task(struct ata_port *ap)
877 unsigned long flags;
879 DPRINTK("ENTER\n");
881 spin_lock_irqsave(&ap->host_set->lock, flags);
882 ap->flags |= ATA_FLAG_FLUSH_PORT_TASK;
883 spin_unlock_irqrestore(&ap->host_set->lock, flags);
885 DPRINTK("flush #1\n");
886 flush_workqueue(ata_wq);
889 * At this point, if a task is running, it's guaranteed to see
890 * the FLUSH flag; thus, it will never queue pio tasks again.
891 * Cancel and flush.
893 if (!cancel_delayed_work(&ap->port_task)) {
894 DPRINTK("flush #2\n");
895 flush_workqueue(ata_wq);
898 spin_lock_irqsave(&ap->host_set->lock, flags);
899 ap->flags &= ~ATA_FLAG_FLUSH_PORT_TASK;
900 spin_unlock_irqrestore(&ap->host_set->lock, flags);
902 DPRINTK("EXIT\n");
905 void ata_qc_complete_internal(struct ata_queued_cmd *qc)
907 struct completion *waiting = qc->private_data;
909 qc->ap->ops->tf_read(qc->ap, &qc->tf);
910 complete(waiting);
914 * ata_exec_internal - execute libata internal command
915 * @ap: Port to which the command is sent
916 * @dev: Device to which the command is sent
917 * @tf: Taskfile registers for the command and the result
918 * @dma_dir: Data tranfer direction of the command
919 * @buf: Data buffer of the command
920 * @buflen: Length of data buffer
922 * Executes libata internal command with timeout. @tf contains
923 * command on entry and result on return. Timeout and error
924 * conditions are reported via return value. No recovery action
925 * is taken after a command times out. It's caller's duty to
926 * clean up after timeout.
928 * LOCKING:
929 * None. Should be called with kernel context, might sleep.
932 static unsigned
933 ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
934 struct ata_taskfile *tf,
935 int dma_dir, void *buf, unsigned int buflen)
937 u8 command = tf->command;
938 struct ata_queued_cmd *qc;
939 DECLARE_COMPLETION(wait);
940 unsigned long flags;
941 unsigned int err_mask;
943 spin_lock_irqsave(&ap->host_set->lock, flags);
945 qc = ata_qc_new_init(ap, dev);
946 BUG_ON(qc == NULL);
948 qc->tf = *tf;
949 qc->dma_dir = dma_dir;
950 if (dma_dir != DMA_NONE) {
951 ata_sg_init_one(qc, buf, buflen);
952 qc->nsect = buflen / ATA_SECT_SIZE;
955 qc->private_data = &wait;
956 qc->complete_fn = ata_qc_complete_internal;
958 qc->err_mask = ata_qc_issue(qc);
959 if (qc->err_mask)
960 ata_qc_complete(qc);
962 spin_unlock_irqrestore(&ap->host_set->lock, flags);
964 if (!wait_for_completion_timeout(&wait, ATA_TMOUT_INTERNAL)) {
965 ata_port_flush_task(ap);
967 spin_lock_irqsave(&ap->host_set->lock, flags);
969 /* We're racing with irq here. If we lose, the
970 * following test prevents us from completing the qc
971 * again. If completion irq occurs after here but
972 * before the caller cleans up, it will result in a
973 * spurious interrupt. We can live with that.
975 if (qc->flags & ATA_QCFLAG_ACTIVE) {
976 qc->err_mask = AC_ERR_TIMEOUT;
977 ata_qc_complete(qc);
978 printk(KERN_WARNING "ata%u: qc timeout (cmd 0x%x)\n",
979 ap->id, command);
982 spin_unlock_irqrestore(&ap->host_set->lock, flags);
985 *tf = qc->tf;
986 err_mask = qc->err_mask;
988 ata_qc_free(qc);
990 return err_mask;
994 * ata_pio_need_iordy - check if iordy needed
995 * @adev: ATA device
997 * Check if the current speed of the device requires IORDY. Used
998 * by various controllers for chip configuration.
1001 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1003 int pio;
1004 int speed = adev->pio_mode - XFER_PIO_0;
1006 if (speed < 2)
1007 return 0;
1008 if (speed > 2)
1009 return 1;
1011 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1013 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1014 pio = adev->id[ATA_ID_EIDE_PIO];
1015 /* Is the speed faster than the drive allows non IORDY ? */
1016 if (pio) {
1017 /* This is cycle times not frequency - watch the logic! */
1018 if (pio > 240) /* PIO2 is 240nS per cycle */
1019 return 1;
1020 return 0;
1023 return 0;
1027 * ata_dev_read_id - Read ID data from the specified device
1028 * @ap: port on which target device resides
1029 * @dev: target device
1030 * @p_class: pointer to class of the target device (may be changed)
1031 * @post_reset: is this read ID post-reset?
1032 * @p_id: read IDENTIFY page (newly allocated)
1034 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1035 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1036 * devices. This function also takes care of EDD signature
1037 * misreporting (to be removed once EDD support is gone) and
1038 * issues ATA_CMD_INIT_DEV_PARAMS for pre-ATA4 drives.
1040 * LOCKING:
1041 * Kernel thread context (may sleep)
1043 * RETURNS:
1044 * 0 on success, -errno otherwise.
1046 static int ata_dev_read_id(struct ata_port *ap, struct ata_device *dev,
1047 unsigned int *p_class, int post_reset, u16 **p_id)
1049 unsigned int class = *p_class;
1050 unsigned int using_edd;
1051 struct ata_taskfile tf;
1052 unsigned int err_mask = 0;
1053 u16 *id;
1054 const char *reason;
1055 int rc;
1057 DPRINTK("ENTER, host %u, dev %u\n", ap->id, dev->devno);
1059 if (ap->ops->probe_reset ||
1060 ap->flags & (ATA_FLAG_SRST | ATA_FLAG_SATA_RESET))
1061 using_edd = 0;
1062 else
1063 using_edd = 1;
1065 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1067 id = kmalloc(sizeof(id[0]) * ATA_ID_WORDS, GFP_KERNEL);
1068 if (id == NULL) {
1069 rc = -ENOMEM;
1070 reason = "out of memory";
1071 goto err_out;
1074 retry:
1075 ata_tf_init(ap, &tf, dev->devno);
1077 switch (class) {
1078 case ATA_DEV_ATA:
1079 tf.command = ATA_CMD_ID_ATA;
1080 break;
1081 case ATA_DEV_ATAPI:
1082 tf.command = ATA_CMD_ID_ATAPI;
1083 break;
1084 default:
1085 rc = -ENODEV;
1086 reason = "unsupported class";
1087 goto err_out;
1090 tf.protocol = ATA_PROT_PIO;
1092 err_mask = ata_exec_internal(ap, dev, &tf, DMA_FROM_DEVICE,
1093 id, sizeof(id[0]) * ATA_ID_WORDS);
1095 if (err_mask) {
1096 rc = -EIO;
1097 reason = "I/O error";
1099 if (err_mask & ~AC_ERR_DEV)
1100 goto err_out;
1103 * arg! EDD works for all test cases, but seems to return
1104 * the ATA signature for some ATAPI devices. Until the
1105 * reason for this is found and fixed, we fix up the mess
1106 * here. If IDENTIFY DEVICE returns command aborted
1107 * (as ATAPI devices do), then we issue an
1108 * IDENTIFY PACKET DEVICE.
1110 * ATA software reset (SRST, the default) does not appear
1111 * to have this problem.
1113 if ((using_edd) && (class == ATA_DEV_ATA)) {
1114 u8 err = tf.feature;
1115 if (err & ATA_ABORTED) {
1116 class = ATA_DEV_ATAPI;
1117 goto retry;
1120 goto err_out;
1123 swap_buf_le16(id, ATA_ID_WORDS);
1125 /* sanity check */
1126 if ((class == ATA_DEV_ATA) != ata_id_is_ata(id)) {
1127 rc = -EINVAL;
1128 reason = "device reports illegal type";
1129 goto err_out;
1132 if (post_reset && class == ATA_DEV_ATA) {
1134 * The exact sequence expected by certain pre-ATA4 drives is:
1135 * SRST RESET
1136 * IDENTIFY
1137 * INITIALIZE DEVICE PARAMETERS
1138 * anything else..
1139 * Some drives were very specific about that exact sequence.
1141 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
1142 err_mask = ata_dev_init_params(ap, dev);
1143 if (err_mask) {
1144 rc = -EIO;
1145 reason = "INIT_DEV_PARAMS failed";
1146 goto err_out;
1149 /* current CHS translation info (id[53-58]) might be
1150 * changed. reread the identify device info.
1152 post_reset = 0;
1153 goto retry;
1157 *p_class = class;
1158 *p_id = id;
1159 return 0;
1161 err_out:
1162 printk(KERN_WARNING "ata%u: dev %u failed to IDENTIFY (%s)\n",
1163 ap->id, dev->devno, reason);
1164 kfree(id);
1165 return rc;
1168 static inline u8 ata_dev_knobble(const struct ata_port *ap,
1169 struct ata_device *dev)
1171 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
1175 * ata_dev_configure - Configure the specified ATA/ATAPI device
1176 * @ap: Port on which target device resides
1177 * @dev: Target device to configure
1178 * @print_info: Enable device info printout
1180 * Configure @dev according to @dev->id. Generic and low-level
1181 * driver specific fixups are also applied.
1183 * LOCKING:
1184 * Kernel thread context (may sleep)
1186 * RETURNS:
1187 * 0 on success, -errno otherwise
1189 static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev,
1190 int print_info)
1192 const u16 *id = dev->id;
1193 unsigned int xfer_mask;
1194 int i, rc;
1196 if (!ata_dev_present(dev)) {
1197 DPRINTK("ENTER/EXIT (host %u, dev %u) -- nodev\n",
1198 ap->id, dev->devno);
1199 return 0;
1202 DPRINTK("ENTER, host %u, dev %u\n", ap->id, dev->devno);
1204 /* print device capabilities */
1205 if (print_info)
1206 printk(KERN_DEBUG "ata%u: dev %u cfg 49:%04x 82:%04x 83:%04x "
1207 "84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n",
1208 ap->id, dev->devno, id[49], id[82], id[83],
1209 id[84], id[85], id[86], id[87], id[88]);
1211 /* initialize to-be-configured parameters */
1212 dev->flags = 0;
1213 dev->max_sectors = 0;
1214 dev->cdb_len = 0;
1215 dev->n_sectors = 0;
1216 dev->cylinders = 0;
1217 dev->heads = 0;
1218 dev->sectors = 0;
1221 * common ATA, ATAPI feature tests
1224 /* we require DMA support (bits 8 of word 49) */
1225 if (!ata_id_has_dma(id)) {
1226 printk(KERN_DEBUG "ata%u: no dma\n", ap->id);
1227 rc = -EINVAL;
1228 goto err_out_nosup;
1231 /* find max transfer mode; for printk only */
1232 xfer_mask = ata_id_xfermask(id);
1234 ata_dump_id(id);
1236 /* ATA-specific feature tests */
1237 if (dev->class == ATA_DEV_ATA) {
1238 dev->n_sectors = ata_id_n_sectors(id);
1240 if (ata_id_has_lba(id)) {
1241 const char *lba_desc;
1243 lba_desc = "LBA";
1244 dev->flags |= ATA_DFLAG_LBA;
1245 if (ata_id_has_lba48(id)) {
1246 dev->flags |= ATA_DFLAG_LBA48;
1247 lba_desc = "LBA48";
1250 /* print device info to dmesg */
1251 if (print_info)
1252 printk(KERN_INFO "ata%u: dev %u ATA-%d, "
1253 "max %s, %Lu sectors: %s\n",
1254 ap->id, dev->devno,
1255 ata_id_major_version(id),
1256 ata_mode_string(xfer_mask),
1257 (unsigned long long)dev->n_sectors,
1258 lba_desc);
1259 } else {
1260 /* CHS */
1262 /* Default translation */
1263 dev->cylinders = id[1];
1264 dev->heads = id[3];
1265 dev->sectors = id[6];
1267 if (ata_id_current_chs_valid(id)) {
1268 /* Current CHS translation is valid. */
1269 dev->cylinders = id[54];
1270 dev->heads = id[55];
1271 dev->sectors = id[56];
1274 /* print device info to dmesg */
1275 if (print_info)
1276 printk(KERN_INFO "ata%u: dev %u ATA-%d, "
1277 "max %s, %Lu sectors: CHS %u/%u/%u\n",
1278 ap->id, dev->devno,
1279 ata_id_major_version(id),
1280 ata_mode_string(xfer_mask),
1281 (unsigned long long)dev->n_sectors,
1282 dev->cylinders, dev->heads, dev->sectors);
1285 dev->cdb_len = 16;
1288 /* ATAPI-specific feature tests */
1289 else if (dev->class == ATA_DEV_ATAPI) {
1290 rc = atapi_cdb_len(id);
1291 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
1292 printk(KERN_WARNING "ata%u: unsupported CDB len\n", ap->id);
1293 rc = -EINVAL;
1294 goto err_out_nosup;
1296 dev->cdb_len = (unsigned int) rc;
1298 /* print device info to dmesg */
1299 if (print_info)
1300 printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n",
1301 ap->id, dev->devno, ata_mode_string(xfer_mask));
1304 ap->host->max_cmd_len = 0;
1305 for (i = 0; i < ATA_MAX_DEVICES; i++)
1306 ap->host->max_cmd_len = max_t(unsigned int,
1307 ap->host->max_cmd_len,
1308 ap->device[i].cdb_len);
1310 /* limit bridge transfers to udma5, 200 sectors */
1311 if (ata_dev_knobble(ap, dev)) {
1312 if (print_info)
1313 printk(KERN_INFO "ata%u(%u): applying bridge limits\n",
1314 ap->id, dev->devno);
1315 ap->udma_mask &= ATA_UDMA5;
1316 dev->max_sectors = ATA_MAX_SECTORS;
1319 if (ap->ops->dev_config)
1320 ap->ops->dev_config(ap, dev);
1322 DPRINTK("EXIT, drv_stat = 0x%x\n", ata_chk_status(ap));
1323 return 0;
1325 err_out_nosup:
1326 printk(KERN_WARNING "ata%u: dev %u not supported, ignoring\n",
1327 ap->id, dev->devno);
1328 DPRINTK("EXIT, err\n");
1329 return rc;
1333 * ata_bus_probe - Reset and probe ATA bus
1334 * @ap: Bus to probe
1336 * Master ATA bus probing function. Initiates a hardware-dependent
1337 * bus reset, then attempts to identify any devices found on
1338 * the bus.
1340 * LOCKING:
1341 * PCI/etc. bus probe sem.
1343 * RETURNS:
1344 * Zero on success, non-zero on error.
1347 static int ata_bus_probe(struct ata_port *ap)
1349 unsigned int classes[ATA_MAX_DEVICES];
1350 unsigned int i, rc, found = 0;
1352 ata_port_probe(ap);
1354 /* reset and determine device classes */
1355 for (i = 0; i < ATA_MAX_DEVICES; i++)
1356 classes[i] = ATA_DEV_UNKNOWN;
1358 if (ap->ops->probe_reset) {
1359 rc = ap->ops->probe_reset(ap, classes);
1360 if (rc) {
1361 printk("ata%u: reset failed (errno=%d)\n", ap->id, rc);
1362 return rc;
1364 } else {
1365 ap->ops->phy_reset(ap);
1367 if (!(ap->flags & ATA_FLAG_PORT_DISABLED))
1368 for (i = 0; i < ATA_MAX_DEVICES; i++)
1369 classes[i] = ap->device[i].class;
1371 ata_port_probe(ap);
1374 for (i = 0; i < ATA_MAX_DEVICES; i++)
1375 if (classes[i] == ATA_DEV_UNKNOWN)
1376 classes[i] = ATA_DEV_NONE;
1378 /* read IDENTIFY page and configure devices */
1379 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1380 struct ata_device *dev = &ap->device[i];
1382 dev->class = classes[i];
1384 if (!ata_dev_present(dev))
1385 continue;
1387 WARN_ON(dev->id != NULL);
1388 if (ata_dev_read_id(ap, dev, &dev->class, 1, &dev->id)) {
1389 dev->class = ATA_DEV_NONE;
1390 continue;
1393 if (ata_dev_configure(ap, dev, 1)) {
1394 dev->class++; /* disable device */
1395 continue;
1398 found = 1;
1401 if (!found)
1402 goto err_out_disable;
1404 ata_set_mode(ap);
1405 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1406 goto err_out_disable;
1408 return 0;
1410 err_out_disable:
1411 ap->ops->port_disable(ap);
1412 return -1;
1416 * ata_port_probe - Mark port as enabled
1417 * @ap: Port for which we indicate enablement
1419 * Modify @ap data structure such that the system
1420 * thinks that the entire port is enabled.
1422 * LOCKING: host_set lock, or some other form of
1423 * serialization.
1426 void ata_port_probe(struct ata_port *ap)
1428 ap->flags &= ~ATA_FLAG_PORT_DISABLED;
1432 * sata_print_link_status - Print SATA link status
1433 * @ap: SATA port to printk link status about
1435 * This function prints link speed and status of a SATA link.
1437 * LOCKING:
1438 * None.
1440 static void sata_print_link_status(struct ata_port *ap)
1442 u32 sstatus, tmp;
1443 const char *speed;
1445 if (!ap->ops->scr_read)
1446 return;
1448 sstatus = scr_read(ap, SCR_STATUS);
1450 if (sata_dev_present(ap)) {
1451 tmp = (sstatus >> 4) & 0xf;
1452 if (tmp & (1 << 0))
1453 speed = "1.5";
1454 else if (tmp & (1 << 1))
1455 speed = "3.0";
1456 else
1457 speed = "<unknown>";
1458 printk(KERN_INFO "ata%u: SATA link up %s Gbps (SStatus %X)\n",
1459 ap->id, speed, sstatus);
1460 } else {
1461 printk(KERN_INFO "ata%u: SATA link down (SStatus %X)\n",
1462 ap->id, sstatus);
1467 * __sata_phy_reset - Wake/reset a low-level SATA PHY
1468 * @ap: SATA port associated with target SATA PHY.
1470 * This function issues commands to standard SATA Sxxx
1471 * PHY registers, to wake up the phy (and device), and
1472 * clear any reset condition.
1474 * LOCKING:
1475 * PCI/etc. bus probe sem.
1478 void __sata_phy_reset(struct ata_port *ap)
1480 u32 sstatus;
1481 unsigned long timeout = jiffies + (HZ * 5);
1483 if (ap->flags & ATA_FLAG_SATA_RESET) {
1484 /* issue phy wake/reset */
1485 scr_write_flush(ap, SCR_CONTROL, 0x301);
1486 /* Couldn't find anything in SATA I/II specs, but
1487 * AHCI-1.1 10.4.2 says at least 1 ms. */
1488 mdelay(1);
1490 scr_write_flush(ap, SCR_CONTROL, 0x300); /* phy wake/clear reset */
1492 /* wait for phy to become ready, if necessary */
1493 do {
1494 msleep(200);
1495 sstatus = scr_read(ap, SCR_STATUS);
1496 if ((sstatus & 0xf) != 1)
1497 break;
1498 } while (time_before(jiffies, timeout));
1500 /* print link status */
1501 sata_print_link_status(ap);
1503 /* TODO: phy layer with polling, timeouts, etc. */
1504 if (sata_dev_present(ap))
1505 ata_port_probe(ap);
1506 else
1507 ata_port_disable(ap);
1509 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1510 return;
1512 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
1513 ata_port_disable(ap);
1514 return;
1517 ap->cbl = ATA_CBL_SATA;
1521 * sata_phy_reset - Reset SATA bus.
1522 * @ap: SATA port associated with target SATA PHY.
1524 * This function resets the SATA bus, and then probes
1525 * the bus for devices.
1527 * LOCKING:
1528 * PCI/etc. bus probe sem.
1531 void sata_phy_reset(struct ata_port *ap)
1533 __sata_phy_reset(ap);
1534 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1535 return;
1536 ata_bus_reset(ap);
1540 * ata_port_disable - Disable port.
1541 * @ap: Port to be disabled.
1543 * Modify @ap data structure such that the system
1544 * thinks that the entire port is disabled, and should
1545 * never attempt to probe or communicate with devices
1546 * on this port.
1548 * LOCKING: host_set lock, or some other form of
1549 * serialization.
1552 void ata_port_disable(struct ata_port *ap)
1554 ap->device[0].class = ATA_DEV_NONE;
1555 ap->device[1].class = ATA_DEV_NONE;
1556 ap->flags |= ATA_FLAG_PORT_DISABLED;
1560 * This mode timing computation functionality is ported over from
1561 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
1564 * PIO 0-5, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
1565 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
1566 * for PIO 5, which is a nonstandard extension and UDMA6, which
1567 * is currently supported only by Maxtor drives.
1570 static const struct ata_timing ata_timing[] = {
1572 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
1573 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
1574 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
1575 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
1577 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
1578 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
1579 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
1581 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
1583 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
1584 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
1585 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
1587 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
1588 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
1589 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
1591 /* { XFER_PIO_5, 20, 50, 30, 100, 50, 30, 100, 0 }, */
1592 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
1593 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
1595 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
1596 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
1597 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
1599 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
1601 { 0xFF }
1604 #define ENOUGH(v,unit) (((v)-1)/(unit)+1)
1605 #define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
1607 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
1609 q->setup = EZ(t->setup * 1000, T);
1610 q->act8b = EZ(t->act8b * 1000, T);
1611 q->rec8b = EZ(t->rec8b * 1000, T);
1612 q->cyc8b = EZ(t->cyc8b * 1000, T);
1613 q->active = EZ(t->active * 1000, T);
1614 q->recover = EZ(t->recover * 1000, T);
1615 q->cycle = EZ(t->cycle * 1000, T);
1616 q->udma = EZ(t->udma * 1000, UT);
1619 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
1620 struct ata_timing *m, unsigned int what)
1622 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
1623 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
1624 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
1625 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
1626 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
1627 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
1628 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
1629 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
1632 static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
1634 const struct ata_timing *t;
1636 for (t = ata_timing; t->mode != speed; t++)
1637 if (t->mode == 0xFF)
1638 return NULL;
1639 return t;
1642 int ata_timing_compute(struct ata_device *adev, unsigned short speed,
1643 struct ata_timing *t, int T, int UT)
1645 const struct ata_timing *s;
1646 struct ata_timing p;
1649 * Find the mode.
1652 if (!(s = ata_timing_find_mode(speed)))
1653 return -EINVAL;
1655 memcpy(t, s, sizeof(*s));
1658 * If the drive is an EIDE drive, it can tell us it needs extended
1659 * PIO/MW_DMA cycle timing.
1662 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
1663 memset(&p, 0, sizeof(p));
1664 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
1665 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
1666 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
1667 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
1668 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
1670 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
1674 * Convert the timing to bus clock counts.
1677 ata_timing_quantize(t, t, T, UT);
1680 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
1681 * S.M.A.R.T * and some other commands. We have to ensure that the
1682 * DMA cycle timing is slower/equal than the fastest PIO timing.
1685 if (speed > XFER_PIO_4) {
1686 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
1687 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
1691 * Lengthen active & recovery time so that cycle time is correct.
1694 if (t->act8b + t->rec8b < t->cyc8b) {
1695 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
1696 t->rec8b = t->cyc8b - t->act8b;
1699 if (t->active + t->recover < t->cycle) {
1700 t->active += (t->cycle - (t->active + t->recover)) / 2;
1701 t->recover = t->cycle - t->active;
1704 return 0;
1707 static void ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev)
1709 if (!ata_dev_present(dev) || (ap->flags & ATA_FLAG_PORT_DISABLED))
1710 return;
1712 if (dev->xfer_shift == ATA_SHIFT_PIO)
1713 dev->flags |= ATA_DFLAG_PIO;
1715 ata_dev_set_xfermode(ap, dev);
1717 if (ata_dev_revalidate(ap, dev, 0)) {
1718 printk(KERN_ERR "ata%u: failed to revalidate after set "
1719 "xfermode, disabled\n", ap->id);
1720 ata_port_disable(ap);
1723 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
1724 dev->xfer_shift, (int)dev->xfer_mode);
1726 printk(KERN_INFO "ata%u: dev %u configured for %s\n",
1727 ap->id, dev->devno,
1728 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
1731 static int ata_host_set_pio(struct ata_port *ap)
1733 int i;
1735 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1736 struct ata_device *dev = &ap->device[i];
1738 if (!ata_dev_present(dev))
1739 continue;
1741 if (!dev->pio_mode) {
1742 printk(KERN_WARNING "ata%u: no PIO support for device %d.\n", ap->id, i);
1743 return -1;
1746 dev->xfer_mode = dev->pio_mode;
1747 dev->xfer_shift = ATA_SHIFT_PIO;
1748 if (ap->ops->set_piomode)
1749 ap->ops->set_piomode(ap, dev);
1752 return 0;
1755 static void ata_host_set_dma(struct ata_port *ap)
1757 int i;
1759 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1760 struct ata_device *dev = &ap->device[i];
1762 if (!ata_dev_present(dev) || !dev->dma_mode)
1763 continue;
1765 dev->xfer_mode = dev->dma_mode;
1766 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
1767 if (ap->ops->set_dmamode)
1768 ap->ops->set_dmamode(ap, dev);
1773 * ata_set_mode - Program timings and issue SET FEATURES - XFER
1774 * @ap: port on which timings will be programmed
1776 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.).
1778 * LOCKING:
1779 * PCI/etc. bus probe sem.
1781 static void ata_set_mode(struct ata_port *ap)
1783 int i, rc;
1785 /* step 1: calculate xfer_mask */
1786 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1787 struct ata_device *dev = &ap->device[i];
1788 unsigned int xfer_mask;
1790 if (!ata_dev_present(dev))
1791 continue;
1793 xfer_mask = ata_dev_xfermask(ap, dev);
1795 dev->pio_mode = ata_xfer_mask2mode(xfer_mask & ATA_MASK_PIO);
1796 dev->dma_mode = ata_xfer_mask2mode(xfer_mask & (ATA_MASK_MWDMA |
1797 ATA_MASK_UDMA));
1800 /* step 2: always set host PIO timings */
1801 rc = ata_host_set_pio(ap);
1802 if (rc)
1803 goto err_out;
1805 /* step 3: set host DMA timings */
1806 ata_host_set_dma(ap);
1808 /* step 4: update devices' xfer mode */
1809 for (i = 0; i < ATA_MAX_DEVICES; i++)
1810 ata_dev_set_mode(ap, &ap->device[i]);
1812 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1813 return;
1815 if (ap->ops->post_set_mode)
1816 ap->ops->post_set_mode(ap);
1818 return;
1820 err_out:
1821 ata_port_disable(ap);
1825 * ata_tf_to_host - issue ATA taskfile to host controller
1826 * @ap: port to which command is being issued
1827 * @tf: ATA taskfile register set
1829 * Issues ATA taskfile register set to ATA host controller,
1830 * with proper synchronization with interrupt handler and
1831 * other threads.
1833 * LOCKING:
1834 * spin_lock_irqsave(host_set lock)
1837 static inline void ata_tf_to_host(struct ata_port *ap,
1838 const struct ata_taskfile *tf)
1840 ap->ops->tf_load(ap, tf);
1841 ap->ops->exec_command(ap, tf);
1845 * ata_busy_sleep - sleep until BSY clears, or timeout
1846 * @ap: port containing status register to be polled
1847 * @tmout_pat: impatience timeout
1848 * @tmout: overall timeout
1850 * Sleep until ATA Status register bit BSY clears,
1851 * or a timeout occurs.
1853 * LOCKING: None.
1856 unsigned int ata_busy_sleep (struct ata_port *ap,
1857 unsigned long tmout_pat, unsigned long tmout)
1859 unsigned long timer_start, timeout;
1860 u8 status;
1862 status = ata_busy_wait(ap, ATA_BUSY, 300);
1863 timer_start = jiffies;
1864 timeout = timer_start + tmout_pat;
1865 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
1866 msleep(50);
1867 status = ata_busy_wait(ap, ATA_BUSY, 3);
1870 if (status & ATA_BUSY)
1871 printk(KERN_WARNING "ata%u is slow to respond, "
1872 "please be patient\n", ap->id);
1874 timeout = timer_start + tmout;
1875 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
1876 msleep(50);
1877 status = ata_chk_status(ap);
1880 if (status & ATA_BUSY) {
1881 printk(KERN_ERR "ata%u failed to respond (%lu secs)\n",
1882 ap->id, tmout / HZ);
1883 return 1;
1886 return 0;
1889 static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
1891 struct ata_ioports *ioaddr = &ap->ioaddr;
1892 unsigned int dev0 = devmask & (1 << 0);
1893 unsigned int dev1 = devmask & (1 << 1);
1894 unsigned long timeout;
1896 /* if device 0 was found in ata_devchk, wait for its
1897 * BSY bit to clear
1899 if (dev0)
1900 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1902 /* if device 1 was found in ata_devchk, wait for
1903 * register access, then wait for BSY to clear
1905 timeout = jiffies + ATA_TMOUT_BOOT;
1906 while (dev1) {
1907 u8 nsect, lbal;
1909 ap->ops->dev_select(ap, 1);
1910 if (ap->flags & ATA_FLAG_MMIO) {
1911 nsect = readb((void __iomem *) ioaddr->nsect_addr);
1912 lbal = readb((void __iomem *) ioaddr->lbal_addr);
1913 } else {
1914 nsect = inb(ioaddr->nsect_addr);
1915 lbal = inb(ioaddr->lbal_addr);
1917 if ((nsect == 1) && (lbal == 1))
1918 break;
1919 if (time_after(jiffies, timeout)) {
1920 dev1 = 0;
1921 break;
1923 msleep(50); /* give drive a breather */
1925 if (dev1)
1926 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1928 /* is all this really necessary? */
1929 ap->ops->dev_select(ap, 0);
1930 if (dev1)
1931 ap->ops->dev_select(ap, 1);
1932 if (dev0)
1933 ap->ops->dev_select(ap, 0);
1937 * ata_bus_edd - Issue EXECUTE DEVICE DIAGNOSTIC command.
1938 * @ap: Port to reset and probe
1940 * Use the EXECUTE DEVICE DIAGNOSTIC command to reset and
1941 * probe the bus. Not often used these days.
1943 * LOCKING:
1944 * PCI/etc. bus probe sem.
1945 * Obtains host_set lock.
1949 static unsigned int ata_bus_edd(struct ata_port *ap)
1951 struct ata_taskfile tf;
1952 unsigned long flags;
1954 /* set up execute-device-diag (bus reset) taskfile */
1955 /* also, take interrupts to a known state (disabled) */
1956 DPRINTK("execute-device-diag\n");
1957 ata_tf_init(ap, &tf, 0);
1958 tf.ctl |= ATA_NIEN;
1959 tf.command = ATA_CMD_EDD;
1960 tf.protocol = ATA_PROT_NODATA;
1962 /* do bus reset */
1963 spin_lock_irqsave(&ap->host_set->lock, flags);
1964 ata_tf_to_host(ap, &tf);
1965 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1967 /* spec says at least 2ms. but who knows with those
1968 * crazy ATAPI devices...
1970 msleep(150);
1972 return ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1975 static unsigned int ata_bus_softreset(struct ata_port *ap,
1976 unsigned int devmask)
1978 struct ata_ioports *ioaddr = &ap->ioaddr;
1980 DPRINTK("ata%u: bus reset via SRST\n", ap->id);
1982 /* software reset. causes dev0 to be selected */
1983 if (ap->flags & ATA_FLAG_MMIO) {
1984 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
1985 udelay(20); /* FIXME: flush */
1986 writeb(ap->ctl | ATA_SRST, (void __iomem *) ioaddr->ctl_addr);
1987 udelay(20); /* FIXME: flush */
1988 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
1989 } else {
1990 outb(ap->ctl, ioaddr->ctl_addr);
1991 udelay(10);
1992 outb(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
1993 udelay(10);
1994 outb(ap->ctl, ioaddr->ctl_addr);
1997 /* spec mandates ">= 2ms" before checking status.
1998 * We wait 150ms, because that was the magic delay used for
1999 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
2000 * between when the ATA command register is written, and then
2001 * status is checked. Because waiting for "a while" before
2002 * checking status is fine, post SRST, we perform this magic
2003 * delay here as well.
2005 * Old drivers/ide uses the 2mS rule and then waits for ready
2007 msleep(150);
2010 /* Before we perform post reset processing we want to see if
2011 the bus shows 0xFF because the odd clown forgets the D7 pulldown
2012 resistor */
2014 if (ata_check_status(ap) == 0xFF)
2015 return 1; /* Positive is failure for some reason */
2017 ata_bus_post_reset(ap, devmask);
2019 return 0;
2023 * ata_bus_reset - reset host port and associated ATA channel
2024 * @ap: port to reset
2026 * This is typically the first time we actually start issuing
2027 * commands to the ATA channel. We wait for BSY to clear, then
2028 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
2029 * result. Determine what devices, if any, are on the channel
2030 * by looking at the device 0/1 error register. Look at the signature
2031 * stored in each device's taskfile registers, to determine if
2032 * the device is ATA or ATAPI.
2034 * LOCKING:
2035 * PCI/etc. bus probe sem.
2036 * Obtains host_set lock.
2038 * SIDE EFFECTS:
2039 * Sets ATA_FLAG_PORT_DISABLED if bus reset fails.
2042 void ata_bus_reset(struct ata_port *ap)
2044 struct ata_ioports *ioaddr = &ap->ioaddr;
2045 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2046 u8 err;
2047 unsigned int dev0, dev1 = 0, rc = 0, devmask = 0;
2049 DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no);
2051 /* determine if device 0/1 are present */
2052 if (ap->flags & ATA_FLAG_SATA_RESET)
2053 dev0 = 1;
2054 else {
2055 dev0 = ata_devchk(ap, 0);
2056 if (slave_possible)
2057 dev1 = ata_devchk(ap, 1);
2060 if (dev0)
2061 devmask |= (1 << 0);
2062 if (dev1)
2063 devmask |= (1 << 1);
2065 /* select device 0 again */
2066 ap->ops->dev_select(ap, 0);
2068 /* issue bus reset */
2069 if (ap->flags & ATA_FLAG_SRST)
2070 rc = ata_bus_softreset(ap, devmask);
2071 else if ((ap->flags & ATA_FLAG_SATA_RESET) == 0) {
2072 /* set up device control */
2073 if (ap->flags & ATA_FLAG_MMIO)
2074 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2075 else
2076 outb(ap->ctl, ioaddr->ctl_addr);
2077 rc = ata_bus_edd(ap);
2080 if (rc)
2081 goto err_out;
2084 * determine by signature whether we have ATA or ATAPI devices
2086 ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
2087 if ((slave_possible) && (err != 0x81))
2088 ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
2090 /* re-enable interrupts */
2091 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
2092 ata_irq_on(ap);
2094 /* is double-select really necessary? */
2095 if (ap->device[1].class != ATA_DEV_NONE)
2096 ap->ops->dev_select(ap, 1);
2097 if (ap->device[0].class != ATA_DEV_NONE)
2098 ap->ops->dev_select(ap, 0);
2100 /* if no devices were detected, disable this port */
2101 if ((ap->device[0].class == ATA_DEV_NONE) &&
2102 (ap->device[1].class == ATA_DEV_NONE))
2103 goto err_out;
2105 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
2106 /* set up device control for ATA_FLAG_SATA_RESET */
2107 if (ap->flags & ATA_FLAG_MMIO)
2108 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2109 else
2110 outb(ap->ctl, ioaddr->ctl_addr);
2113 DPRINTK("EXIT\n");
2114 return;
2116 err_out:
2117 printk(KERN_ERR "ata%u: disabling port\n", ap->id);
2118 ap->ops->port_disable(ap);
2120 DPRINTK("EXIT\n");
2123 static int sata_phy_resume(struct ata_port *ap)
2125 unsigned long timeout = jiffies + (HZ * 5);
2126 u32 sstatus;
2128 scr_write_flush(ap, SCR_CONTROL, 0x300);
2130 /* Wait for phy to become ready, if necessary. */
2131 do {
2132 msleep(200);
2133 sstatus = scr_read(ap, SCR_STATUS);
2134 if ((sstatus & 0xf) != 1)
2135 return 0;
2136 } while (time_before(jiffies, timeout));
2138 return -1;
2142 * ata_std_probeinit - initialize probing
2143 * @ap: port to be probed
2145 * @ap is about to be probed. Initialize it. This function is
2146 * to be used as standard callback for ata_drive_probe_reset().
2148 * NOTE!!! Do not use this function as probeinit if a low level
2149 * driver implements only hardreset. Just pass NULL as probeinit
2150 * in that case. Using this function is probably okay but doing
2151 * so makes reset sequence different from the original
2152 * ->phy_reset implementation and Jeff nervous. :-P
2154 extern void ata_std_probeinit(struct ata_port *ap)
2156 if (ap->flags & ATA_FLAG_SATA && ap->ops->scr_read) {
2157 sata_phy_resume(ap);
2158 if (sata_dev_present(ap))
2159 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2164 * ata_std_softreset - reset host port via ATA SRST
2165 * @ap: port to reset
2166 * @verbose: fail verbosely
2167 * @classes: resulting classes of attached devices
2169 * Reset host port using ATA SRST. This function is to be used
2170 * as standard callback for ata_drive_*_reset() functions.
2172 * LOCKING:
2173 * Kernel thread context (may sleep)
2175 * RETURNS:
2176 * 0 on success, -errno otherwise.
2178 int ata_std_softreset(struct ata_port *ap, int verbose, unsigned int *classes)
2180 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2181 unsigned int devmask = 0, err_mask;
2182 u8 err;
2184 DPRINTK("ENTER\n");
2186 if (ap->ops->scr_read && !sata_dev_present(ap)) {
2187 classes[0] = ATA_DEV_NONE;
2188 goto out;
2191 /* determine if device 0/1 are present */
2192 if (ata_devchk(ap, 0))
2193 devmask |= (1 << 0);
2194 if (slave_possible && ata_devchk(ap, 1))
2195 devmask |= (1 << 1);
2197 /* select device 0 again */
2198 ap->ops->dev_select(ap, 0);
2200 /* issue bus reset */
2201 DPRINTK("about to softreset, devmask=%x\n", devmask);
2202 err_mask = ata_bus_softreset(ap, devmask);
2203 if (err_mask) {
2204 if (verbose)
2205 printk(KERN_ERR "ata%u: SRST failed (err_mask=0x%x)\n",
2206 ap->id, err_mask);
2207 else
2208 DPRINTK("EXIT, softreset failed (err_mask=0x%x)\n",
2209 err_mask);
2210 return -EIO;
2213 /* determine by signature whether we have ATA or ATAPI devices */
2214 classes[0] = ata_dev_try_classify(ap, 0, &err);
2215 if (slave_possible && err != 0x81)
2216 classes[1] = ata_dev_try_classify(ap, 1, &err);
2218 out:
2219 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
2220 return 0;
2224 * sata_std_hardreset - reset host port via SATA phy reset
2225 * @ap: port to reset
2226 * @verbose: fail verbosely
2227 * @class: resulting class of attached device
2229 * SATA phy-reset host port using DET bits of SControl register.
2230 * This function is to be used as standard callback for
2231 * ata_drive_*_reset().
2233 * LOCKING:
2234 * Kernel thread context (may sleep)
2236 * RETURNS:
2237 * 0 on success, -errno otherwise.
2239 int sata_std_hardreset(struct ata_port *ap, int verbose, unsigned int *class)
2241 DPRINTK("ENTER\n");
2243 /* Issue phy wake/reset */
2244 scr_write_flush(ap, SCR_CONTROL, 0x301);
2247 * Couldn't find anything in SATA I/II specs, but AHCI-1.1
2248 * 10.4.2 says at least 1 ms.
2250 msleep(1);
2252 /* Bring phy back */
2253 sata_phy_resume(ap);
2255 /* TODO: phy layer with polling, timeouts, etc. */
2256 if (!sata_dev_present(ap)) {
2257 *class = ATA_DEV_NONE;
2258 DPRINTK("EXIT, link offline\n");
2259 return 0;
2262 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2263 if (verbose)
2264 printk(KERN_ERR "ata%u: COMRESET failed "
2265 "(device not ready)\n", ap->id);
2266 else
2267 DPRINTK("EXIT, device not ready\n");
2268 return -EIO;
2271 ap->ops->dev_select(ap, 0); /* probably unnecessary */
2273 *class = ata_dev_try_classify(ap, 0, NULL);
2275 DPRINTK("EXIT, class=%u\n", *class);
2276 return 0;
2280 * ata_std_postreset - standard postreset callback
2281 * @ap: the target ata_port
2282 * @classes: classes of attached devices
2284 * This function is invoked after a successful reset. Note that
2285 * the device might have been reset more than once using
2286 * different reset methods before postreset is invoked.
2288 * This function is to be used as standard callback for
2289 * ata_drive_*_reset().
2291 * LOCKING:
2292 * Kernel thread context (may sleep)
2294 void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
2296 DPRINTK("ENTER\n");
2298 /* set cable type if it isn't already set */
2299 if (ap->cbl == ATA_CBL_NONE && ap->flags & ATA_FLAG_SATA)
2300 ap->cbl = ATA_CBL_SATA;
2302 /* print link status */
2303 if (ap->cbl == ATA_CBL_SATA)
2304 sata_print_link_status(ap);
2306 /* re-enable interrupts */
2307 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
2308 ata_irq_on(ap);
2310 /* is double-select really necessary? */
2311 if (classes[0] != ATA_DEV_NONE)
2312 ap->ops->dev_select(ap, 1);
2313 if (classes[1] != ATA_DEV_NONE)
2314 ap->ops->dev_select(ap, 0);
2316 /* bail out if no device is present */
2317 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2318 DPRINTK("EXIT, no device\n");
2319 return;
2322 /* set up device control */
2323 if (ap->ioaddr.ctl_addr) {
2324 if (ap->flags & ATA_FLAG_MMIO)
2325 writeb(ap->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
2326 else
2327 outb(ap->ctl, ap->ioaddr.ctl_addr);
2330 DPRINTK("EXIT\n");
2334 * ata_std_probe_reset - standard probe reset method
2335 * @ap: prot to perform probe-reset
2336 * @classes: resulting classes of attached devices
2338 * The stock off-the-shelf ->probe_reset method.
2340 * LOCKING:
2341 * Kernel thread context (may sleep)
2343 * RETURNS:
2344 * 0 on success, -errno otherwise.
2346 int ata_std_probe_reset(struct ata_port *ap, unsigned int *classes)
2348 ata_reset_fn_t hardreset;
2350 hardreset = NULL;
2351 if (ap->flags & ATA_FLAG_SATA && ap->ops->scr_read)
2352 hardreset = sata_std_hardreset;
2354 return ata_drive_probe_reset(ap, ata_std_probeinit,
2355 ata_std_softreset, hardreset,
2356 ata_std_postreset, classes);
2359 static int do_probe_reset(struct ata_port *ap, ata_reset_fn_t reset,
2360 ata_postreset_fn_t postreset,
2361 unsigned int *classes)
2363 int i, rc;
2365 for (i = 0; i < ATA_MAX_DEVICES; i++)
2366 classes[i] = ATA_DEV_UNKNOWN;
2368 rc = reset(ap, 0, classes);
2369 if (rc)
2370 return rc;
2372 /* If any class isn't ATA_DEV_UNKNOWN, consider classification
2373 * is complete and convert all ATA_DEV_UNKNOWN to
2374 * ATA_DEV_NONE.
2376 for (i = 0; i < ATA_MAX_DEVICES; i++)
2377 if (classes[i] != ATA_DEV_UNKNOWN)
2378 break;
2380 if (i < ATA_MAX_DEVICES)
2381 for (i = 0; i < ATA_MAX_DEVICES; i++)
2382 if (classes[i] == ATA_DEV_UNKNOWN)
2383 classes[i] = ATA_DEV_NONE;
2385 if (postreset)
2386 postreset(ap, classes);
2388 return classes[0] != ATA_DEV_UNKNOWN ? 0 : -ENODEV;
2392 * ata_drive_probe_reset - Perform probe reset with given methods
2393 * @ap: port to reset
2394 * @probeinit: probeinit method (can be NULL)
2395 * @softreset: softreset method (can be NULL)
2396 * @hardreset: hardreset method (can be NULL)
2397 * @postreset: postreset method (can be NULL)
2398 * @classes: resulting classes of attached devices
2400 * Reset the specified port and classify attached devices using
2401 * given methods. This function prefers softreset but tries all
2402 * possible reset sequences to reset and classify devices. This
2403 * function is intended to be used for constructing ->probe_reset
2404 * callback by low level drivers.
2406 * Reset methods should follow the following rules.
2408 * - Return 0 on sucess, -errno on failure.
2409 * - If classification is supported, fill classes[] with
2410 * recognized class codes.
2411 * - If classification is not supported, leave classes[] alone.
2412 * - If verbose is non-zero, print error message on failure;
2413 * otherwise, shut up.
2415 * LOCKING:
2416 * Kernel thread context (may sleep)
2418 * RETURNS:
2419 * 0 on success, -EINVAL if no reset method is avaliable, -ENODEV
2420 * if classification fails, and any error code from reset
2421 * methods.
2423 int ata_drive_probe_reset(struct ata_port *ap, ata_probeinit_fn_t probeinit,
2424 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
2425 ata_postreset_fn_t postreset, unsigned int *classes)
2427 int rc = -EINVAL;
2429 if (probeinit)
2430 probeinit(ap);
2432 if (softreset) {
2433 rc = do_probe_reset(ap, softreset, postreset, classes);
2434 if (rc == 0)
2435 return 0;
2438 if (!hardreset)
2439 return rc;
2441 rc = do_probe_reset(ap, hardreset, postreset, classes);
2442 if (rc == 0 || rc != -ENODEV)
2443 return rc;
2445 if (softreset)
2446 rc = do_probe_reset(ap, softreset, postreset, classes);
2448 return rc;
2452 * ata_dev_same_device - Determine whether new ID matches configured device
2453 * @ap: port on which the device to compare against resides
2454 * @dev: device to compare against
2455 * @new_class: class of the new device
2456 * @new_id: IDENTIFY page of the new device
2458 * Compare @new_class and @new_id against @dev and determine
2459 * whether @dev is the device indicated by @new_class and
2460 * @new_id.
2462 * LOCKING:
2463 * None.
2465 * RETURNS:
2466 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
2468 static int ata_dev_same_device(struct ata_port *ap, struct ata_device *dev,
2469 unsigned int new_class, const u16 *new_id)
2471 const u16 *old_id = dev->id;
2472 unsigned char model[2][41], serial[2][21];
2473 u64 new_n_sectors;
2475 if (dev->class != new_class) {
2476 printk(KERN_INFO
2477 "ata%u: dev %u class mismatch %d != %d\n",
2478 ap->id, dev->devno, dev->class, new_class);
2479 return 0;
2482 ata_id_c_string(old_id, model[0], ATA_ID_PROD_OFS, sizeof(model[0]));
2483 ata_id_c_string(new_id, model[1], ATA_ID_PROD_OFS, sizeof(model[1]));
2484 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO_OFS, sizeof(serial[0]));
2485 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO_OFS, sizeof(serial[1]));
2486 new_n_sectors = ata_id_n_sectors(new_id);
2488 if (strcmp(model[0], model[1])) {
2489 printk(KERN_INFO
2490 "ata%u: dev %u model number mismatch '%s' != '%s'\n",
2491 ap->id, dev->devno, model[0], model[1]);
2492 return 0;
2495 if (strcmp(serial[0], serial[1])) {
2496 printk(KERN_INFO
2497 "ata%u: dev %u serial number mismatch '%s' != '%s'\n",
2498 ap->id, dev->devno, serial[0], serial[1]);
2499 return 0;
2502 if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) {
2503 printk(KERN_INFO
2504 "ata%u: dev %u n_sectors mismatch %llu != %llu\n",
2505 ap->id, dev->devno, (unsigned long long)dev->n_sectors,
2506 (unsigned long long)new_n_sectors);
2507 return 0;
2510 return 1;
2514 * ata_dev_revalidate - Revalidate ATA device
2515 * @ap: port on which the device to revalidate resides
2516 * @dev: device to revalidate
2517 * @post_reset: is this revalidation after reset?
2519 * Re-read IDENTIFY page and make sure @dev is still attached to
2520 * the port.
2522 * LOCKING:
2523 * Kernel thread context (may sleep)
2525 * RETURNS:
2526 * 0 on success, negative errno otherwise
2528 int ata_dev_revalidate(struct ata_port *ap, struct ata_device *dev,
2529 int post_reset)
2531 unsigned int class;
2532 u16 *id;
2533 int rc;
2535 if (!ata_dev_present(dev))
2536 return -ENODEV;
2538 class = dev->class;
2539 id = NULL;
2541 /* allocate & read ID data */
2542 rc = ata_dev_read_id(ap, dev, &class, post_reset, &id);
2543 if (rc)
2544 goto fail;
2546 /* is the device still there? */
2547 if (!ata_dev_same_device(ap, dev, class, id)) {
2548 rc = -ENODEV;
2549 goto fail;
2552 kfree(dev->id);
2553 dev->id = id;
2555 /* configure device according to the new ID */
2556 return ata_dev_configure(ap, dev, 0);
2558 fail:
2559 printk(KERN_ERR "ata%u: dev %u revalidation failed (errno=%d)\n",
2560 ap->id, dev->devno, rc);
2561 kfree(id);
2562 return rc;
2565 static const char * const ata_dma_blacklist [] = {
2566 "WDC AC11000H", NULL,
2567 "WDC AC22100H", NULL,
2568 "WDC AC32500H", NULL,
2569 "WDC AC33100H", NULL,
2570 "WDC AC31600H", NULL,
2571 "WDC AC32100H", "24.09P07",
2572 "WDC AC23200L", "21.10N21",
2573 "Compaq CRD-8241B", NULL,
2574 "CRD-8400B", NULL,
2575 "CRD-8480B", NULL,
2576 "CRD-8482B", NULL,
2577 "CRD-84", NULL,
2578 "SanDisk SDP3B", NULL,
2579 "SanDisk SDP3B-64", NULL,
2580 "SANYO CD-ROM CRD", NULL,
2581 "HITACHI CDR-8", NULL,
2582 "HITACHI CDR-8335", NULL,
2583 "HITACHI CDR-8435", NULL,
2584 "Toshiba CD-ROM XM-6202B", NULL,
2585 "TOSHIBA CD-ROM XM-1702BC", NULL,
2586 "CD-532E-A", NULL,
2587 "E-IDE CD-ROM CR-840", NULL,
2588 "CD-ROM Drive/F5A", NULL,
2589 "WPI CDD-820", NULL,
2590 "SAMSUNG CD-ROM SC-148C", NULL,
2591 "SAMSUNG CD-ROM SC", NULL,
2592 "SanDisk SDP3B-64", NULL,
2593 "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,
2594 "_NEC DV5800A", NULL,
2595 "SAMSUNG CD-ROM SN-124", "N001"
2598 static int ata_strim(char *s, size_t len)
2600 len = strnlen(s, len);
2602 /* ATAPI specifies that empty space is blank-filled; remove blanks */
2603 while ((len > 0) && (s[len - 1] == ' ')) {
2604 len--;
2605 s[len] = 0;
2607 return len;
2610 static int ata_dma_blacklisted(const struct ata_device *dev)
2612 unsigned char model_num[40];
2613 unsigned char model_rev[16];
2614 unsigned int nlen, rlen;
2615 int i;
2617 ata_id_string(dev->id, model_num, ATA_ID_PROD_OFS,
2618 sizeof(model_num));
2619 ata_id_string(dev->id, model_rev, ATA_ID_FW_REV_OFS,
2620 sizeof(model_rev));
2621 nlen = ata_strim(model_num, sizeof(model_num));
2622 rlen = ata_strim(model_rev, sizeof(model_rev));
2624 for (i = 0; i < ARRAY_SIZE(ata_dma_blacklist); i += 2) {
2625 if (!strncmp(ata_dma_blacklist[i], model_num, nlen)) {
2626 if (ata_dma_blacklist[i+1] == NULL)
2627 return 1;
2628 if (!strncmp(ata_dma_blacklist[i], model_rev, rlen))
2629 return 1;
2632 return 0;
2636 * ata_dev_xfermask - Compute supported xfermask of the given device
2637 * @ap: Port on which the device to compute xfermask for resides
2638 * @dev: Device to compute xfermask for
2640 * Compute supported xfermask of @dev. This function is
2641 * responsible for applying all known limits including host
2642 * controller limits, device blacklist, etc...
2644 * LOCKING:
2645 * None.
2647 * RETURNS:
2648 * Computed xfermask.
2650 static unsigned int ata_dev_xfermask(struct ata_port *ap,
2651 struct ata_device *dev)
2653 unsigned long xfer_mask;
2654 int i;
2656 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
2657 ap->udma_mask);
2659 /* use port-wide xfermask for now */
2660 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2661 struct ata_device *d = &ap->device[i];
2662 if (!ata_dev_present(d))
2663 continue;
2664 xfer_mask &= ata_id_xfermask(d->id);
2665 if (ata_dma_blacklisted(d))
2666 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
2669 if (ata_dma_blacklisted(dev))
2670 printk(KERN_WARNING "ata%u: dev %u is on DMA blacklist, "
2671 "disabling DMA\n", ap->id, dev->devno);
2673 return xfer_mask;
2677 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
2678 * @ap: Port associated with device @dev
2679 * @dev: Device to which command will be sent
2681 * Issue SET FEATURES - XFER MODE command to device @dev
2682 * on port @ap.
2684 * LOCKING:
2685 * PCI/etc. bus probe sem.
2688 static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev)
2690 struct ata_taskfile tf;
2692 /* set up set-features taskfile */
2693 DPRINTK("set features - xfer mode\n");
2695 ata_tf_init(ap, &tf, dev->devno);
2696 tf.command = ATA_CMD_SET_FEATURES;
2697 tf.feature = SETFEATURES_XFER;
2698 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2699 tf.protocol = ATA_PROT_NODATA;
2700 tf.nsect = dev->xfer_mode;
2702 if (ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0)) {
2703 printk(KERN_ERR "ata%u: failed to set xfermode, disabled\n",
2704 ap->id);
2705 ata_port_disable(ap);
2708 DPRINTK("EXIT\n");
2712 * ata_dev_init_params - Issue INIT DEV PARAMS command
2713 * @ap: Port associated with device @dev
2714 * @dev: Device to which command will be sent
2716 * LOCKING:
2717 * Kernel thread context (may sleep)
2719 * RETURNS:
2720 * 0 on success, AC_ERR_* mask otherwise.
2723 static unsigned int ata_dev_init_params(struct ata_port *ap,
2724 struct ata_device *dev)
2726 struct ata_taskfile tf;
2727 unsigned int err_mask;
2728 u16 sectors = dev->id[6];
2729 u16 heads = dev->id[3];
2731 /* Number of sectors per track 1-255. Number of heads 1-16 */
2732 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
2733 return 0;
2735 /* set up init dev params taskfile */
2736 DPRINTK("init dev params \n");
2738 ata_tf_init(ap, &tf, dev->devno);
2739 tf.command = ATA_CMD_INIT_DEV_PARAMS;
2740 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2741 tf.protocol = ATA_PROT_NODATA;
2742 tf.nsect = sectors;
2743 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
2745 err_mask = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0);
2747 DPRINTK("EXIT, err_mask=%x\n", err_mask);
2748 return err_mask;
2752 * ata_sg_clean - Unmap DMA memory associated with command
2753 * @qc: Command containing DMA memory to be released
2755 * Unmap all mapped DMA memory associated with this command.
2757 * LOCKING:
2758 * spin_lock_irqsave(host_set lock)
2761 static void ata_sg_clean(struct ata_queued_cmd *qc)
2763 struct ata_port *ap = qc->ap;
2764 struct scatterlist *sg = qc->__sg;
2765 int dir = qc->dma_dir;
2766 void *pad_buf = NULL;
2768 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
2769 WARN_ON(sg == NULL);
2771 if (qc->flags & ATA_QCFLAG_SINGLE)
2772 WARN_ON(qc->n_elem > 1);
2774 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
2776 /* if we padded the buffer out to 32-bit bound, and data
2777 * xfer direction is from-device, we must copy from the
2778 * pad buffer back into the supplied buffer
2780 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
2781 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
2783 if (qc->flags & ATA_QCFLAG_SG) {
2784 if (qc->n_elem)
2785 dma_unmap_sg(ap->host_set->dev, sg, qc->n_elem, dir);
2786 /* restore last sg */
2787 sg[qc->orig_n_elem - 1].length += qc->pad_len;
2788 if (pad_buf) {
2789 struct scatterlist *psg = &qc->pad_sgent;
2790 void *addr = kmap_atomic(psg->page, KM_IRQ0);
2791 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
2792 kunmap_atomic(addr, KM_IRQ0);
2794 } else {
2795 if (qc->n_elem)
2796 dma_unmap_single(ap->host_set->dev,
2797 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
2798 dir);
2799 /* restore sg */
2800 sg->length += qc->pad_len;
2801 if (pad_buf)
2802 memcpy(qc->buf_virt + sg->length - qc->pad_len,
2803 pad_buf, qc->pad_len);
2806 qc->flags &= ~ATA_QCFLAG_DMAMAP;
2807 qc->__sg = NULL;
2811 * ata_fill_sg - Fill PCI IDE PRD table
2812 * @qc: Metadata associated with taskfile to be transferred
2814 * Fill PCI IDE PRD (scatter-gather) table with segments
2815 * associated with the current disk command.
2817 * LOCKING:
2818 * spin_lock_irqsave(host_set lock)
2821 static void ata_fill_sg(struct ata_queued_cmd *qc)
2823 struct ata_port *ap = qc->ap;
2824 struct scatterlist *sg;
2825 unsigned int idx;
2827 WARN_ON(qc->__sg == NULL);
2828 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
2830 idx = 0;
2831 ata_for_each_sg(sg, qc) {
2832 u32 addr, offset;
2833 u32 sg_len, len;
2835 /* determine if physical DMA addr spans 64K boundary.
2836 * Note h/w doesn't support 64-bit, so we unconditionally
2837 * truncate dma_addr_t to u32.
2839 addr = (u32) sg_dma_address(sg);
2840 sg_len = sg_dma_len(sg);
2842 while (sg_len) {
2843 offset = addr & 0xffff;
2844 len = sg_len;
2845 if ((offset + sg_len) > 0x10000)
2846 len = 0x10000 - offset;
2848 ap->prd[idx].addr = cpu_to_le32(addr);
2849 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
2850 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
2852 idx++;
2853 sg_len -= len;
2854 addr += len;
2858 if (idx)
2859 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2862 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
2863 * @qc: Metadata associated with taskfile to check
2865 * Allow low-level driver to filter ATA PACKET commands, returning
2866 * a status indicating whether or not it is OK to use DMA for the
2867 * supplied PACKET command.
2869 * LOCKING:
2870 * spin_lock_irqsave(host_set lock)
2872 * RETURNS: 0 when ATAPI DMA can be used
2873 * nonzero otherwise
2875 int ata_check_atapi_dma(struct ata_queued_cmd *qc)
2877 struct ata_port *ap = qc->ap;
2878 int rc = 0; /* Assume ATAPI DMA is OK by default */
2880 if (ap->ops->check_atapi_dma)
2881 rc = ap->ops->check_atapi_dma(qc);
2883 return rc;
2886 * ata_qc_prep - Prepare taskfile for submission
2887 * @qc: Metadata associated with taskfile to be prepared
2889 * Prepare ATA taskfile for submission.
2891 * LOCKING:
2892 * spin_lock_irqsave(host_set lock)
2894 void ata_qc_prep(struct ata_queued_cmd *qc)
2896 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2897 return;
2899 ata_fill_sg(qc);
2902 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
2905 * ata_sg_init_one - Associate command with memory buffer
2906 * @qc: Command to be associated
2907 * @buf: Memory buffer
2908 * @buflen: Length of memory buffer, in bytes.
2910 * Initialize the data-related elements of queued_cmd @qc
2911 * to point to a single memory buffer, @buf of byte length @buflen.
2913 * LOCKING:
2914 * spin_lock_irqsave(host_set lock)
2917 void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
2919 struct scatterlist *sg;
2921 qc->flags |= ATA_QCFLAG_SINGLE;
2923 memset(&qc->sgent, 0, sizeof(qc->sgent));
2924 qc->__sg = &qc->sgent;
2925 qc->n_elem = 1;
2926 qc->orig_n_elem = 1;
2927 qc->buf_virt = buf;
2929 sg = qc->__sg;
2930 sg_init_one(sg, buf, buflen);
2934 * ata_sg_init - Associate command with scatter-gather table.
2935 * @qc: Command to be associated
2936 * @sg: Scatter-gather table.
2937 * @n_elem: Number of elements in s/g table.
2939 * Initialize the data-related elements of queued_cmd @qc
2940 * to point to a scatter-gather table @sg, containing @n_elem
2941 * elements.
2943 * LOCKING:
2944 * spin_lock_irqsave(host_set lock)
2947 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
2948 unsigned int n_elem)
2950 qc->flags |= ATA_QCFLAG_SG;
2951 qc->__sg = sg;
2952 qc->n_elem = n_elem;
2953 qc->orig_n_elem = n_elem;
2957 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
2958 * @qc: Command with memory buffer to be mapped.
2960 * DMA-map the memory buffer associated with queued_cmd @qc.
2962 * LOCKING:
2963 * spin_lock_irqsave(host_set lock)
2965 * RETURNS:
2966 * Zero on success, negative on error.
2969 static int ata_sg_setup_one(struct ata_queued_cmd *qc)
2971 struct ata_port *ap = qc->ap;
2972 int dir = qc->dma_dir;
2973 struct scatterlist *sg = qc->__sg;
2974 dma_addr_t dma_address;
2975 int trim_sg = 0;
2977 /* we must lengthen transfers to end on a 32-bit boundary */
2978 qc->pad_len = sg->length & 3;
2979 if (qc->pad_len) {
2980 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
2981 struct scatterlist *psg = &qc->pad_sgent;
2983 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
2985 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
2987 if (qc->tf.flags & ATA_TFLAG_WRITE)
2988 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
2989 qc->pad_len);
2991 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
2992 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
2993 /* trim sg */
2994 sg->length -= qc->pad_len;
2995 if (sg->length == 0)
2996 trim_sg = 1;
2998 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
2999 sg->length, qc->pad_len);
3002 if (trim_sg) {
3003 qc->n_elem--;
3004 goto skip_map;
3007 dma_address = dma_map_single(ap->host_set->dev, qc->buf_virt,
3008 sg->length, dir);
3009 if (dma_mapping_error(dma_address)) {
3010 /* restore sg */
3011 sg->length += qc->pad_len;
3012 return -1;
3015 sg_dma_address(sg) = dma_address;
3016 sg_dma_len(sg) = sg->length;
3018 skip_map:
3019 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
3020 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3022 return 0;
3026 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
3027 * @qc: Command with scatter-gather table to be mapped.
3029 * DMA-map the scatter-gather table associated with queued_cmd @qc.
3031 * LOCKING:
3032 * spin_lock_irqsave(host_set lock)
3034 * RETURNS:
3035 * Zero on success, negative on error.
3039 static int ata_sg_setup(struct ata_queued_cmd *qc)
3041 struct ata_port *ap = qc->ap;
3042 struct scatterlist *sg = qc->__sg;
3043 struct scatterlist *lsg = &sg[qc->n_elem - 1];
3044 int n_elem, pre_n_elem, dir, trim_sg = 0;
3046 VPRINTK("ENTER, ata%u\n", ap->id);
3047 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
3049 /* we must lengthen transfers to end on a 32-bit boundary */
3050 qc->pad_len = lsg->length & 3;
3051 if (qc->pad_len) {
3052 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3053 struct scatterlist *psg = &qc->pad_sgent;
3054 unsigned int offset;
3056 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
3058 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3061 * psg->page/offset are used to copy to-be-written
3062 * data in this function or read data in ata_sg_clean.
3064 offset = lsg->offset + lsg->length - qc->pad_len;
3065 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
3066 psg->offset = offset_in_page(offset);
3068 if (qc->tf.flags & ATA_TFLAG_WRITE) {
3069 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3070 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
3071 kunmap_atomic(addr, KM_IRQ0);
3074 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3075 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3076 /* trim last sg */
3077 lsg->length -= qc->pad_len;
3078 if (lsg->length == 0)
3079 trim_sg = 1;
3081 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
3082 qc->n_elem - 1, lsg->length, qc->pad_len);
3085 pre_n_elem = qc->n_elem;
3086 if (trim_sg && pre_n_elem)
3087 pre_n_elem--;
3089 if (!pre_n_elem) {
3090 n_elem = 0;
3091 goto skip_map;
3094 dir = qc->dma_dir;
3095 n_elem = dma_map_sg(ap->host_set->dev, sg, pre_n_elem, dir);
3096 if (n_elem < 1) {
3097 /* restore last sg */
3098 lsg->length += qc->pad_len;
3099 return -1;
3102 DPRINTK("%d sg elements mapped\n", n_elem);
3104 skip_map:
3105 qc->n_elem = n_elem;
3107 return 0;
3111 * ata_poll_qc_complete - turn irq back on and finish qc
3112 * @qc: Command to complete
3113 * @err_mask: ATA status register content
3115 * LOCKING:
3116 * None. (grabs host lock)
3119 void ata_poll_qc_complete(struct ata_queued_cmd *qc)
3121 struct ata_port *ap = qc->ap;
3122 unsigned long flags;
3124 spin_lock_irqsave(&ap->host_set->lock, flags);
3125 ap->flags &= ~ATA_FLAG_NOINTR;
3126 ata_irq_on(ap);
3127 ata_qc_complete(qc);
3128 spin_unlock_irqrestore(&ap->host_set->lock, flags);
3132 * ata_pio_poll - poll using PIO, depending on current state
3133 * @ap: the target ata_port
3135 * LOCKING:
3136 * None. (executing in kernel thread context)
3138 * RETURNS:
3139 * timeout value to use
3142 static unsigned long ata_pio_poll(struct ata_port *ap)
3144 struct ata_queued_cmd *qc;
3145 u8 status;
3146 unsigned int poll_state = HSM_ST_UNKNOWN;
3147 unsigned int reg_state = HSM_ST_UNKNOWN;
3149 qc = ata_qc_from_tag(ap, ap->active_tag);
3150 WARN_ON(qc == NULL);
3152 switch (ap->hsm_task_state) {
3153 case HSM_ST:
3154 case HSM_ST_POLL:
3155 poll_state = HSM_ST_POLL;
3156 reg_state = HSM_ST;
3157 break;
3158 case HSM_ST_LAST:
3159 case HSM_ST_LAST_POLL:
3160 poll_state = HSM_ST_LAST_POLL;
3161 reg_state = HSM_ST_LAST;
3162 break;
3163 default:
3164 BUG();
3165 break;
3168 status = ata_chk_status(ap);
3169 if (status & ATA_BUSY) {
3170 if (time_after(jiffies, ap->pio_task_timeout)) {
3171 qc->err_mask |= AC_ERR_TIMEOUT;
3172 ap->hsm_task_state = HSM_ST_TMOUT;
3173 return 0;
3175 ap->hsm_task_state = poll_state;
3176 return ATA_SHORT_PAUSE;
3179 ap->hsm_task_state = reg_state;
3180 return 0;
3184 * ata_pio_complete - check if drive is busy or idle
3185 * @ap: the target ata_port
3187 * LOCKING:
3188 * None. (executing in kernel thread context)
3190 * RETURNS:
3191 * Non-zero if qc completed, zero otherwise.
3194 static int ata_pio_complete (struct ata_port *ap)
3196 struct ata_queued_cmd *qc;
3197 u8 drv_stat;
3200 * This is purely heuristic. This is a fast path. Sometimes when
3201 * we enter, BSY will be cleared in a chk-status or two. If not,
3202 * the drive is probably seeking or something. Snooze for a couple
3203 * msecs, then chk-status again. If still busy, fall back to
3204 * HSM_ST_POLL state.
3206 drv_stat = ata_busy_wait(ap, ATA_BUSY, 10);
3207 if (drv_stat & ATA_BUSY) {
3208 msleep(2);
3209 drv_stat = ata_busy_wait(ap, ATA_BUSY, 10);
3210 if (drv_stat & ATA_BUSY) {
3211 ap->hsm_task_state = HSM_ST_LAST_POLL;
3212 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
3213 return 0;
3217 qc = ata_qc_from_tag(ap, ap->active_tag);
3218 WARN_ON(qc == NULL);
3220 drv_stat = ata_wait_idle(ap);
3221 if (!ata_ok(drv_stat)) {
3222 qc->err_mask |= __ac_err_mask(drv_stat);
3223 ap->hsm_task_state = HSM_ST_ERR;
3224 return 0;
3227 ap->hsm_task_state = HSM_ST_IDLE;
3229 WARN_ON(qc->err_mask);
3230 ata_poll_qc_complete(qc);
3232 /* another command may start at this point */
3234 return 1;
3239 * swap_buf_le16 - swap halves of 16-bit words in place
3240 * @buf: Buffer to swap
3241 * @buf_words: Number of 16-bit words in buffer.
3243 * Swap halves of 16-bit words if needed to convert from
3244 * little-endian byte order to native cpu byte order, or
3245 * vice-versa.
3247 * LOCKING:
3248 * Inherited from caller.
3250 void swap_buf_le16(u16 *buf, unsigned int buf_words)
3252 #ifdef __BIG_ENDIAN
3253 unsigned int i;
3255 for (i = 0; i < buf_words; i++)
3256 buf[i] = le16_to_cpu(buf[i]);
3257 #endif /* __BIG_ENDIAN */
3261 * ata_mmio_data_xfer - Transfer data by MMIO
3262 * @ap: port to read/write
3263 * @buf: data buffer
3264 * @buflen: buffer length
3265 * @write_data: read/write
3267 * Transfer data from/to the device data register by MMIO.
3269 * LOCKING:
3270 * Inherited from caller.
3273 static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf,
3274 unsigned int buflen, int write_data)
3276 unsigned int i;
3277 unsigned int words = buflen >> 1;
3278 u16 *buf16 = (u16 *) buf;
3279 void __iomem *mmio = (void __iomem *)ap->ioaddr.data_addr;
3281 /* Transfer multiple of 2 bytes */
3282 if (write_data) {
3283 for (i = 0; i < words; i++)
3284 writew(le16_to_cpu(buf16[i]), mmio);
3285 } else {
3286 for (i = 0; i < words; i++)
3287 buf16[i] = cpu_to_le16(readw(mmio));
3290 /* Transfer trailing 1 byte, if any. */
3291 if (unlikely(buflen & 0x01)) {
3292 u16 align_buf[1] = { 0 };
3293 unsigned char *trailing_buf = buf + buflen - 1;
3295 if (write_data) {
3296 memcpy(align_buf, trailing_buf, 1);
3297 writew(le16_to_cpu(align_buf[0]), mmio);
3298 } else {
3299 align_buf[0] = cpu_to_le16(readw(mmio));
3300 memcpy(trailing_buf, align_buf, 1);
3306 * ata_pio_data_xfer - Transfer data by PIO
3307 * @ap: port to read/write
3308 * @buf: data buffer
3309 * @buflen: buffer length
3310 * @write_data: read/write
3312 * Transfer data from/to the device data register by PIO.
3314 * LOCKING:
3315 * Inherited from caller.
3318 static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf,
3319 unsigned int buflen, int write_data)
3321 unsigned int words = buflen >> 1;
3323 /* Transfer multiple of 2 bytes */
3324 if (write_data)
3325 outsw(ap->ioaddr.data_addr, buf, words);
3326 else
3327 insw(ap->ioaddr.data_addr, buf, words);
3329 /* Transfer trailing 1 byte, if any. */
3330 if (unlikely(buflen & 0x01)) {
3331 u16 align_buf[1] = { 0 };
3332 unsigned char *trailing_buf = buf + buflen - 1;
3334 if (write_data) {
3335 memcpy(align_buf, trailing_buf, 1);
3336 outw(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
3337 } else {
3338 align_buf[0] = cpu_to_le16(inw(ap->ioaddr.data_addr));
3339 memcpy(trailing_buf, align_buf, 1);
3345 * ata_data_xfer - Transfer data from/to the data register.
3346 * @ap: port to read/write
3347 * @buf: data buffer
3348 * @buflen: buffer length
3349 * @do_write: read/write
3351 * Transfer data from/to the device data register.
3353 * LOCKING:
3354 * Inherited from caller.
3357 static void ata_data_xfer(struct ata_port *ap, unsigned char *buf,
3358 unsigned int buflen, int do_write)
3360 /* Make the crap hardware pay the costs not the good stuff */
3361 if (unlikely(ap->flags & ATA_FLAG_IRQ_MASK)) {
3362 unsigned long flags;
3363 local_irq_save(flags);
3364 if (ap->flags & ATA_FLAG_MMIO)
3365 ata_mmio_data_xfer(ap, buf, buflen, do_write);
3366 else
3367 ata_pio_data_xfer(ap, buf, buflen, do_write);
3368 local_irq_restore(flags);
3369 } else {
3370 if (ap->flags & ATA_FLAG_MMIO)
3371 ata_mmio_data_xfer(ap, buf, buflen, do_write);
3372 else
3373 ata_pio_data_xfer(ap, buf, buflen, do_write);
3378 * ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data.
3379 * @qc: Command on going
3381 * Transfer ATA_SECT_SIZE of data from/to the ATA device.
3383 * LOCKING:
3384 * Inherited from caller.
3387 static void ata_pio_sector(struct ata_queued_cmd *qc)
3389 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3390 struct scatterlist *sg = qc->__sg;
3391 struct ata_port *ap = qc->ap;
3392 struct page *page;
3393 unsigned int offset;
3394 unsigned char *buf;
3396 if (qc->cursect == (qc->nsect - 1))
3397 ap->hsm_task_state = HSM_ST_LAST;
3399 page = sg[qc->cursg].page;
3400 offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE;
3402 /* get the current page and offset */
3403 page = nth_page(page, (offset >> PAGE_SHIFT));
3404 offset %= PAGE_SIZE;
3406 buf = kmap(page) + offset;
3408 qc->cursect++;
3409 qc->cursg_ofs++;
3411 if ((qc->cursg_ofs * ATA_SECT_SIZE) == (&sg[qc->cursg])->length) {
3412 qc->cursg++;
3413 qc->cursg_ofs = 0;
3416 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3418 /* do the actual data transfer */
3419 do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3420 ata_data_xfer(ap, buf, ATA_SECT_SIZE, do_write);
3422 kunmap(page);
3426 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
3427 * @qc: Command on going
3428 * @bytes: number of bytes
3430 * Transfer Transfer data from/to the ATAPI device.
3432 * LOCKING:
3433 * Inherited from caller.
3437 static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
3439 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3440 struct scatterlist *sg = qc->__sg;
3441 struct ata_port *ap = qc->ap;
3442 struct page *page;
3443 unsigned char *buf;
3444 unsigned int offset, count;
3446 if (qc->curbytes + bytes >= qc->nbytes)
3447 ap->hsm_task_state = HSM_ST_LAST;
3449 next_sg:
3450 if (unlikely(qc->cursg >= qc->n_elem)) {
3452 * The end of qc->sg is reached and the device expects
3453 * more data to transfer. In order not to overrun qc->sg
3454 * and fulfill length specified in the byte count register,
3455 * - for read case, discard trailing data from the device
3456 * - for write case, padding zero data to the device
3458 u16 pad_buf[1] = { 0 };
3459 unsigned int words = bytes >> 1;
3460 unsigned int i;
3462 if (words) /* warning if bytes > 1 */
3463 printk(KERN_WARNING "ata%u: %u bytes trailing data\n",
3464 ap->id, bytes);
3466 for (i = 0; i < words; i++)
3467 ata_data_xfer(ap, (unsigned char*)pad_buf, 2, do_write);
3469 ap->hsm_task_state = HSM_ST_LAST;
3470 return;
3473 sg = &qc->__sg[qc->cursg];
3475 page = sg->page;
3476 offset = sg->offset + qc->cursg_ofs;
3478 /* get the current page and offset */
3479 page = nth_page(page, (offset >> PAGE_SHIFT));
3480 offset %= PAGE_SIZE;
3482 /* don't overrun current sg */
3483 count = min(sg->length - qc->cursg_ofs, bytes);
3485 /* don't cross page boundaries */
3486 count = min(count, (unsigned int)PAGE_SIZE - offset);
3488 buf = kmap(page) + offset;
3490 bytes -= count;
3491 qc->curbytes += count;
3492 qc->cursg_ofs += count;
3494 if (qc->cursg_ofs == sg->length) {
3495 qc->cursg++;
3496 qc->cursg_ofs = 0;
3499 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3501 /* do the actual data transfer */
3502 ata_data_xfer(ap, buf, count, do_write);
3504 kunmap(page);
3506 if (bytes)
3507 goto next_sg;
3511 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
3512 * @qc: Command on going
3514 * Transfer Transfer data from/to the ATAPI device.
3516 * LOCKING:
3517 * Inherited from caller.
3520 static void atapi_pio_bytes(struct ata_queued_cmd *qc)
3522 struct ata_port *ap = qc->ap;
3523 struct ata_device *dev = qc->dev;
3524 unsigned int ireason, bc_lo, bc_hi, bytes;
3525 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
3527 ap->ops->tf_read(ap, &qc->tf);
3528 ireason = qc->tf.nsect;
3529 bc_lo = qc->tf.lbam;
3530 bc_hi = qc->tf.lbah;
3531 bytes = (bc_hi << 8) | bc_lo;
3533 /* shall be cleared to zero, indicating xfer of data */
3534 if (ireason & (1 << 0))
3535 goto err_out;
3537 /* make sure transfer direction matches expected */
3538 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
3539 if (do_write != i_write)
3540 goto err_out;
3542 __atapi_pio_bytes(qc, bytes);
3544 return;
3546 err_out:
3547 printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n",
3548 ap->id, dev->devno);
3549 qc->err_mask |= AC_ERR_HSM;
3550 ap->hsm_task_state = HSM_ST_ERR;
3554 * ata_pio_block - start PIO on a block
3555 * @ap: the target ata_port
3557 * LOCKING:
3558 * None. (executing in kernel thread context)
3561 static void ata_pio_block(struct ata_port *ap)
3563 struct ata_queued_cmd *qc;
3564 u8 status;
3567 * This is purely heuristic. This is a fast path.
3568 * Sometimes when we enter, BSY will be cleared in
3569 * a chk-status or two. If not, the drive is probably seeking
3570 * or something. Snooze for a couple msecs, then
3571 * chk-status again. If still busy, fall back to
3572 * HSM_ST_POLL state.
3574 status = ata_busy_wait(ap, ATA_BUSY, 5);
3575 if (status & ATA_BUSY) {
3576 msleep(2);
3577 status = ata_busy_wait(ap, ATA_BUSY, 10);
3578 if (status & ATA_BUSY) {
3579 ap->hsm_task_state = HSM_ST_POLL;
3580 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
3581 return;
3585 qc = ata_qc_from_tag(ap, ap->active_tag);
3586 WARN_ON(qc == NULL);
3588 /* check error */
3589 if (status & (ATA_ERR | ATA_DF)) {
3590 qc->err_mask |= AC_ERR_DEV;
3591 ap->hsm_task_state = HSM_ST_ERR;
3592 return;
3595 /* transfer data if any */
3596 if (is_atapi_taskfile(&qc->tf)) {
3597 /* DRQ=0 means no more data to transfer */
3598 if ((status & ATA_DRQ) == 0) {
3599 ap->hsm_task_state = HSM_ST_LAST;
3600 return;
3603 atapi_pio_bytes(qc);
3604 } else {
3605 /* handle BSY=0, DRQ=0 as error */
3606 if ((status & ATA_DRQ) == 0) {
3607 qc->err_mask |= AC_ERR_HSM;
3608 ap->hsm_task_state = HSM_ST_ERR;
3609 return;
3612 ata_pio_sector(qc);
3616 static void ata_pio_error(struct ata_port *ap)
3618 struct ata_queued_cmd *qc;
3620 qc = ata_qc_from_tag(ap, ap->active_tag);
3621 WARN_ON(qc == NULL);
3623 if (qc->tf.command != ATA_CMD_PACKET)
3624 printk(KERN_WARNING "ata%u: PIO error\n", ap->id);
3626 /* make sure qc->err_mask is available to
3627 * know what's wrong and recover
3629 WARN_ON(qc->err_mask == 0);
3631 ap->hsm_task_state = HSM_ST_IDLE;
3633 ata_poll_qc_complete(qc);
3636 static void ata_pio_task(void *_data)
3638 struct ata_port *ap = _data;
3639 unsigned long timeout;
3640 int qc_completed;
3642 fsm_start:
3643 timeout = 0;
3644 qc_completed = 0;
3646 switch (ap->hsm_task_state) {
3647 case HSM_ST_IDLE:
3648 return;
3650 case HSM_ST:
3651 ata_pio_block(ap);
3652 break;
3654 case HSM_ST_LAST:
3655 qc_completed = ata_pio_complete(ap);
3656 break;
3658 case HSM_ST_POLL:
3659 case HSM_ST_LAST_POLL:
3660 timeout = ata_pio_poll(ap);
3661 break;
3663 case HSM_ST_TMOUT:
3664 case HSM_ST_ERR:
3665 ata_pio_error(ap);
3666 return;
3669 if (timeout)
3670 ata_port_queue_task(ap, ata_pio_task, ap, timeout);
3671 else if (!qc_completed)
3672 goto fsm_start;
3676 * atapi_packet_task - Write CDB bytes to hardware
3677 * @_data: Port to which ATAPI device is attached.
3679 * When device has indicated its readiness to accept
3680 * a CDB, this function is called. Send the CDB.
3681 * If DMA is to be performed, exit immediately.
3682 * Otherwise, we are in polling mode, so poll
3683 * status under operation succeeds or fails.
3685 * LOCKING:
3686 * Kernel thread context (may sleep)
3689 static void atapi_packet_task(void *_data)
3691 struct ata_port *ap = _data;
3692 struct ata_queued_cmd *qc;
3693 u8 status;
3695 qc = ata_qc_from_tag(ap, ap->active_tag);
3696 WARN_ON(qc == NULL);
3697 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
3699 /* sleep-wait for BSY to clear */
3700 DPRINTK("busy wait\n");
3701 if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB)) {
3702 qc->err_mask |= AC_ERR_TIMEOUT;
3703 goto err_out;
3706 /* make sure DRQ is set */
3707 status = ata_chk_status(ap);
3708 if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ) {
3709 qc->err_mask |= AC_ERR_HSM;
3710 goto err_out;
3713 /* send SCSI cdb */
3714 DPRINTK("send cdb\n");
3715 WARN_ON(qc->dev->cdb_len < 12);
3717 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA ||
3718 qc->tf.protocol == ATA_PROT_ATAPI_NODATA) {
3719 unsigned long flags;
3721 /* Once we're done issuing command and kicking bmdma,
3722 * irq handler takes over. To not lose irq, we need
3723 * to clear NOINTR flag before sending cdb, but
3724 * interrupt handler shouldn't be invoked before we're
3725 * finished. Hence, the following locking.
3727 spin_lock_irqsave(&ap->host_set->lock, flags);
3728 ap->flags &= ~ATA_FLAG_NOINTR;
3729 ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1);
3730 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA)
3731 ap->ops->bmdma_start(qc); /* initiate bmdma */
3732 spin_unlock_irqrestore(&ap->host_set->lock, flags);
3733 } else {
3734 ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1);
3736 /* PIO commands are handled by polling */
3737 ap->hsm_task_state = HSM_ST;
3738 ata_port_queue_task(ap, ata_pio_task, ap, 0);
3741 return;
3743 err_out:
3744 ata_poll_qc_complete(qc);
3748 * ata_qc_timeout - Handle timeout of queued command
3749 * @qc: Command that timed out
3751 * Some part of the kernel (currently, only the SCSI layer)
3752 * has noticed that the active command on port @ap has not
3753 * completed after a specified length of time. Handle this
3754 * condition by disabling DMA (if necessary) and completing
3755 * transactions, with error if necessary.
3757 * This also handles the case of the "lost interrupt", where
3758 * for some reason (possibly hardware bug, possibly driver bug)
3759 * an interrupt was not delivered to the driver, even though the
3760 * transaction completed successfully.
3762 * LOCKING:
3763 * Inherited from SCSI layer (none, can sleep)
3766 static void ata_qc_timeout(struct ata_queued_cmd *qc)
3768 struct ata_port *ap = qc->ap;
3769 struct ata_host_set *host_set = ap->host_set;
3770 u8 host_stat = 0, drv_stat;
3771 unsigned long flags;
3773 DPRINTK("ENTER\n");
3775 ap->hsm_task_state = HSM_ST_IDLE;
3777 spin_lock_irqsave(&host_set->lock, flags);
3779 switch (qc->tf.protocol) {
3781 case ATA_PROT_DMA:
3782 case ATA_PROT_ATAPI_DMA:
3783 host_stat = ap->ops->bmdma_status(ap);
3785 /* before we do anything else, clear DMA-Start bit */
3786 ap->ops->bmdma_stop(qc);
3788 /* fall through */
3790 default:
3791 ata_altstatus(ap);
3792 drv_stat = ata_chk_status(ap);
3794 /* ack bmdma irq events */
3795 ap->ops->irq_clear(ap);
3797 printk(KERN_ERR "ata%u: command 0x%x timeout, stat 0x%x host_stat 0x%x\n",
3798 ap->id, qc->tf.command, drv_stat, host_stat);
3800 /* complete taskfile transaction */
3801 qc->err_mask |= ac_err_mask(drv_stat);
3802 break;
3805 spin_unlock_irqrestore(&host_set->lock, flags);
3807 ata_eh_qc_complete(qc);
3809 DPRINTK("EXIT\n");
3813 * ata_eng_timeout - Handle timeout of queued command
3814 * @ap: Port on which timed-out command is active
3816 * Some part of the kernel (currently, only the SCSI layer)
3817 * has noticed that the active command on port @ap has not
3818 * completed after a specified length of time. Handle this
3819 * condition by disabling DMA (if necessary) and completing
3820 * transactions, with error if necessary.
3822 * This also handles the case of the "lost interrupt", where
3823 * for some reason (possibly hardware bug, possibly driver bug)
3824 * an interrupt was not delivered to the driver, even though the
3825 * transaction completed successfully.
3827 * LOCKING:
3828 * Inherited from SCSI layer (none, can sleep)
3831 void ata_eng_timeout(struct ata_port *ap)
3833 DPRINTK("ENTER\n");
3835 ata_qc_timeout(ata_qc_from_tag(ap, ap->active_tag));
3837 DPRINTK("EXIT\n");
3841 * ata_qc_new - Request an available ATA command, for queueing
3842 * @ap: Port associated with device @dev
3843 * @dev: Device from whom we request an available command structure
3845 * LOCKING:
3846 * None.
3849 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
3851 struct ata_queued_cmd *qc = NULL;
3852 unsigned int i;
3854 for (i = 0; i < ATA_MAX_QUEUE; i++)
3855 if (!test_and_set_bit(i, &ap->qactive)) {
3856 qc = ata_qc_from_tag(ap, i);
3857 break;
3860 if (qc)
3861 qc->tag = i;
3863 return qc;
3867 * ata_qc_new_init - Request an available ATA command, and initialize it
3868 * @ap: Port associated with device @dev
3869 * @dev: Device from whom we request an available command structure
3871 * LOCKING:
3872 * None.
3875 struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
3876 struct ata_device *dev)
3878 struct ata_queued_cmd *qc;
3880 qc = ata_qc_new(ap);
3881 if (qc) {
3882 qc->scsicmd = NULL;
3883 qc->ap = ap;
3884 qc->dev = dev;
3886 ata_qc_reinit(qc);
3889 return qc;
3893 * ata_qc_free - free unused ata_queued_cmd
3894 * @qc: Command to complete
3896 * Designed to free unused ata_queued_cmd object
3897 * in case something prevents using it.
3899 * LOCKING:
3900 * spin_lock_irqsave(host_set lock)
3902 void ata_qc_free(struct ata_queued_cmd *qc)
3904 struct ata_port *ap = qc->ap;
3905 unsigned int tag;
3907 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
3909 qc->flags = 0;
3910 tag = qc->tag;
3911 if (likely(ata_tag_valid(tag))) {
3912 if (tag == ap->active_tag)
3913 ap->active_tag = ATA_TAG_POISON;
3914 qc->tag = ATA_TAG_POISON;
3915 clear_bit(tag, &ap->qactive);
3919 void __ata_qc_complete(struct ata_queued_cmd *qc)
3921 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
3922 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
3924 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
3925 ata_sg_clean(qc);
3927 /* atapi: mark qc as inactive to prevent the interrupt handler
3928 * from completing the command twice later, before the error handler
3929 * is called. (when rc != 0 and atapi request sense is needed)
3931 qc->flags &= ~ATA_QCFLAG_ACTIVE;
3933 /* call completion callback */
3934 qc->complete_fn(qc);
3937 static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
3939 struct ata_port *ap = qc->ap;
3941 switch (qc->tf.protocol) {
3942 case ATA_PROT_DMA:
3943 case ATA_PROT_ATAPI_DMA:
3944 return 1;
3946 case ATA_PROT_ATAPI:
3947 case ATA_PROT_PIO:
3948 if (ap->flags & ATA_FLAG_PIO_DMA)
3949 return 1;
3951 /* fall through */
3953 default:
3954 return 0;
3957 /* never reached */
3961 * ata_qc_issue - issue taskfile to device
3962 * @qc: command to issue to device
3964 * Prepare an ATA command to submission to device.
3965 * This includes mapping the data into a DMA-able
3966 * area, filling in the S/G table, and finally
3967 * writing the taskfile to hardware, starting the command.
3969 * LOCKING:
3970 * spin_lock_irqsave(host_set lock)
3972 * RETURNS:
3973 * Zero on success, AC_ERR_* mask on failure
3976 unsigned int ata_qc_issue(struct ata_queued_cmd *qc)
3978 struct ata_port *ap = qc->ap;
3980 if (ata_should_dma_map(qc)) {
3981 if (qc->flags & ATA_QCFLAG_SG) {
3982 if (ata_sg_setup(qc))
3983 goto sg_err;
3984 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
3985 if (ata_sg_setup_one(qc))
3986 goto sg_err;
3988 } else {
3989 qc->flags &= ~ATA_QCFLAG_DMAMAP;
3992 ap->ops->qc_prep(qc);
3994 qc->ap->active_tag = qc->tag;
3995 qc->flags |= ATA_QCFLAG_ACTIVE;
3997 return ap->ops->qc_issue(qc);
3999 sg_err:
4000 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4001 return AC_ERR_SYSTEM;
4006 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
4007 * @qc: command to issue to device
4009 * Using various libata functions and hooks, this function
4010 * starts an ATA command. ATA commands are grouped into
4011 * classes called "protocols", and issuing each type of protocol
4012 * is slightly different.
4014 * May be used as the qc_issue() entry in ata_port_operations.
4016 * LOCKING:
4017 * spin_lock_irqsave(host_set lock)
4019 * RETURNS:
4020 * Zero on success, AC_ERR_* mask on failure
4023 unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
4025 struct ata_port *ap = qc->ap;
4027 ata_dev_select(ap, qc->dev->devno, 1, 0);
4029 switch (qc->tf.protocol) {
4030 case ATA_PROT_NODATA:
4031 ata_tf_to_host(ap, &qc->tf);
4032 break;
4034 case ATA_PROT_DMA:
4035 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4036 ap->ops->bmdma_setup(qc); /* set up bmdma */
4037 ap->ops->bmdma_start(qc); /* initiate bmdma */
4038 break;
4040 case ATA_PROT_PIO: /* load tf registers, initiate polling pio */
4041 ata_qc_set_polling(qc);
4042 ata_tf_to_host(ap, &qc->tf);
4043 ap->hsm_task_state = HSM_ST;
4044 ata_port_queue_task(ap, ata_pio_task, ap, 0);
4045 break;
4047 case ATA_PROT_ATAPI:
4048 ata_qc_set_polling(qc);
4049 ata_tf_to_host(ap, &qc->tf);
4050 ata_port_queue_task(ap, atapi_packet_task, ap, 0);
4051 break;
4053 case ATA_PROT_ATAPI_NODATA:
4054 ap->flags |= ATA_FLAG_NOINTR;
4055 ata_tf_to_host(ap, &qc->tf);
4056 ata_port_queue_task(ap, atapi_packet_task, ap, 0);
4057 break;
4059 case ATA_PROT_ATAPI_DMA:
4060 ap->flags |= ATA_FLAG_NOINTR;
4061 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4062 ap->ops->bmdma_setup(qc); /* set up bmdma */
4063 ata_port_queue_task(ap, atapi_packet_task, ap, 0);
4064 break;
4066 default:
4067 WARN_ON(1);
4068 return AC_ERR_SYSTEM;
4071 return 0;
4075 * ata_bmdma_setup_mmio - Set up PCI IDE BMDMA transaction
4076 * @qc: Info associated with this ATA transaction.
4078 * LOCKING:
4079 * spin_lock_irqsave(host_set lock)
4082 static void ata_bmdma_setup_mmio (struct ata_queued_cmd *qc)
4084 struct ata_port *ap = qc->ap;
4085 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
4086 u8 dmactl;
4087 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
4089 /* load PRD table addr. */
4090 mb(); /* make sure PRD table writes are visible to controller */
4091 writel(ap->prd_dma, mmio + ATA_DMA_TABLE_OFS);
4093 /* specify data direction, triple-check start bit is clear */
4094 dmactl = readb(mmio + ATA_DMA_CMD);
4095 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
4096 if (!rw)
4097 dmactl |= ATA_DMA_WR;
4098 writeb(dmactl, mmio + ATA_DMA_CMD);
4100 /* issue r/w command */
4101 ap->ops->exec_command(ap, &qc->tf);
4105 * ata_bmdma_start_mmio - Start a PCI IDE BMDMA transaction
4106 * @qc: Info associated with this ATA transaction.
4108 * LOCKING:
4109 * spin_lock_irqsave(host_set lock)
4112 static void ata_bmdma_start_mmio (struct ata_queued_cmd *qc)
4114 struct ata_port *ap = qc->ap;
4115 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
4116 u8 dmactl;
4118 /* start host DMA transaction */
4119 dmactl = readb(mmio + ATA_DMA_CMD);
4120 writeb(dmactl | ATA_DMA_START, mmio + ATA_DMA_CMD);
4122 /* Strictly, one may wish to issue a readb() here, to
4123 * flush the mmio write. However, control also passes
4124 * to the hardware at this point, and it will interrupt
4125 * us when we are to resume control. So, in effect,
4126 * we don't care when the mmio write flushes.
4127 * Further, a read of the DMA status register _immediately_
4128 * following the write may not be what certain flaky hardware
4129 * is expected, so I think it is best to not add a readb()
4130 * without first all the MMIO ATA cards/mobos.
4131 * Or maybe I'm just being paranoid.
4136 * ata_bmdma_setup_pio - Set up PCI IDE BMDMA transaction (PIO)
4137 * @qc: Info associated with this ATA transaction.
4139 * LOCKING:
4140 * spin_lock_irqsave(host_set lock)
4143 static void ata_bmdma_setup_pio (struct ata_queued_cmd *qc)
4145 struct ata_port *ap = qc->ap;
4146 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
4147 u8 dmactl;
4149 /* load PRD table addr. */
4150 outl(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
4152 /* specify data direction, triple-check start bit is clear */
4153 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
4154 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
4155 if (!rw)
4156 dmactl |= ATA_DMA_WR;
4157 outb(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
4159 /* issue r/w command */
4160 ap->ops->exec_command(ap, &qc->tf);
4164 * ata_bmdma_start_pio - Start a PCI IDE BMDMA transaction (PIO)
4165 * @qc: Info associated with this ATA transaction.
4167 * LOCKING:
4168 * spin_lock_irqsave(host_set lock)
4171 static void ata_bmdma_start_pio (struct ata_queued_cmd *qc)
4173 struct ata_port *ap = qc->ap;
4174 u8 dmactl;
4176 /* start host DMA transaction */
4177 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
4178 outb(dmactl | ATA_DMA_START,
4179 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
4184 * ata_bmdma_start - Start a PCI IDE BMDMA transaction
4185 * @qc: Info associated with this ATA transaction.
4187 * Writes the ATA_DMA_START flag to the DMA command register.
4189 * May be used as the bmdma_start() entry in ata_port_operations.
4191 * LOCKING:
4192 * spin_lock_irqsave(host_set lock)
4194 void ata_bmdma_start(struct ata_queued_cmd *qc)
4196 if (qc->ap->flags & ATA_FLAG_MMIO)
4197 ata_bmdma_start_mmio(qc);
4198 else
4199 ata_bmdma_start_pio(qc);
4204 * ata_bmdma_setup - Set up PCI IDE BMDMA transaction
4205 * @qc: Info associated with this ATA transaction.
4207 * Writes address of PRD table to device's PRD Table Address
4208 * register, sets the DMA control register, and calls
4209 * ops->exec_command() to start the transfer.
4211 * May be used as the bmdma_setup() entry in ata_port_operations.
4213 * LOCKING:
4214 * spin_lock_irqsave(host_set lock)
4216 void ata_bmdma_setup(struct ata_queued_cmd *qc)
4218 if (qc->ap->flags & ATA_FLAG_MMIO)
4219 ata_bmdma_setup_mmio(qc);
4220 else
4221 ata_bmdma_setup_pio(qc);
4226 * ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt.
4227 * @ap: Port associated with this ATA transaction.
4229 * Clear interrupt and error flags in DMA status register.
4231 * May be used as the irq_clear() entry in ata_port_operations.
4233 * LOCKING:
4234 * spin_lock_irqsave(host_set lock)
4237 void ata_bmdma_irq_clear(struct ata_port *ap)
4239 if (!ap->ioaddr.bmdma_addr)
4240 return;
4242 if (ap->flags & ATA_FLAG_MMIO) {
4243 void __iomem *mmio =
4244 ((void __iomem *) ap->ioaddr.bmdma_addr) + ATA_DMA_STATUS;
4245 writeb(readb(mmio), mmio);
4246 } else {
4247 unsigned long addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS;
4248 outb(inb(addr), addr);
4254 * ata_bmdma_status - Read PCI IDE BMDMA status
4255 * @ap: Port associated with this ATA transaction.
4257 * Read and return BMDMA status register.
4259 * May be used as the bmdma_status() entry in ata_port_operations.
4261 * LOCKING:
4262 * spin_lock_irqsave(host_set lock)
4265 u8 ata_bmdma_status(struct ata_port *ap)
4267 u8 host_stat;
4268 if (ap->flags & ATA_FLAG_MMIO) {
4269 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
4270 host_stat = readb(mmio + ATA_DMA_STATUS);
4271 } else
4272 host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
4273 return host_stat;
4278 * ata_bmdma_stop - Stop PCI IDE BMDMA transfer
4279 * @qc: Command we are ending DMA for
4281 * Clears the ATA_DMA_START flag in the dma control register
4283 * May be used as the bmdma_stop() entry in ata_port_operations.
4285 * LOCKING:
4286 * spin_lock_irqsave(host_set lock)
4289 void ata_bmdma_stop(struct ata_queued_cmd *qc)
4291 struct ata_port *ap = qc->ap;
4292 if (ap->flags & ATA_FLAG_MMIO) {
4293 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
4295 /* clear start/stop bit */
4296 writeb(readb(mmio + ATA_DMA_CMD) & ~ATA_DMA_START,
4297 mmio + ATA_DMA_CMD);
4298 } else {
4299 /* clear start/stop bit */
4300 outb(inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD) & ~ATA_DMA_START,
4301 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
4304 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
4305 ata_altstatus(ap); /* dummy read */
4309 * ata_host_intr - Handle host interrupt for given (port, task)
4310 * @ap: Port on which interrupt arrived (possibly...)
4311 * @qc: Taskfile currently active in engine
4313 * Handle host interrupt for given queued command. Currently,
4314 * only DMA interrupts are handled. All other commands are
4315 * handled via polling with interrupts disabled (nIEN bit).
4317 * LOCKING:
4318 * spin_lock_irqsave(host_set lock)
4320 * RETURNS:
4321 * One if interrupt was handled, zero if not (shared irq).
4324 inline unsigned int ata_host_intr (struct ata_port *ap,
4325 struct ata_queued_cmd *qc)
4327 u8 status, host_stat;
4329 switch (qc->tf.protocol) {
4331 case ATA_PROT_DMA:
4332 case ATA_PROT_ATAPI_DMA:
4333 case ATA_PROT_ATAPI:
4334 /* check status of DMA engine */
4335 host_stat = ap->ops->bmdma_status(ap);
4336 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
4338 /* if it's not our irq... */
4339 if (!(host_stat & ATA_DMA_INTR))
4340 goto idle_irq;
4342 /* before we do anything else, clear DMA-Start bit */
4343 ap->ops->bmdma_stop(qc);
4345 /* fall through */
4347 case ATA_PROT_ATAPI_NODATA:
4348 case ATA_PROT_NODATA:
4349 /* check altstatus */
4350 status = ata_altstatus(ap);
4351 if (status & ATA_BUSY)
4352 goto idle_irq;
4354 /* check main status, clearing INTRQ */
4355 status = ata_chk_status(ap);
4356 if (unlikely(status & ATA_BUSY))
4357 goto idle_irq;
4358 DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
4359 ap->id, qc->tf.protocol, status);
4361 /* ack bmdma irq events */
4362 ap->ops->irq_clear(ap);
4364 /* complete taskfile transaction */
4365 qc->err_mask |= ac_err_mask(status);
4366 ata_qc_complete(qc);
4367 break;
4369 default:
4370 goto idle_irq;
4373 return 1; /* irq handled */
4375 idle_irq:
4376 ap->stats.idle_irq++;
4378 #ifdef ATA_IRQ_TRAP
4379 if ((ap->stats.idle_irq % 1000) == 0) {
4380 ata_irq_ack(ap, 0); /* debug trap */
4381 printk(KERN_WARNING "ata%d: irq trap\n", ap->id);
4382 return 1;
4384 #endif
4385 return 0; /* irq not handled */
4389 * ata_interrupt - Default ATA host interrupt handler
4390 * @irq: irq line (unused)
4391 * @dev_instance: pointer to our ata_host_set information structure
4392 * @regs: unused
4394 * Default interrupt handler for PCI IDE devices. Calls
4395 * ata_host_intr() for each port that is not disabled.
4397 * LOCKING:
4398 * Obtains host_set lock during operation.
4400 * RETURNS:
4401 * IRQ_NONE or IRQ_HANDLED.
4404 irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
4406 struct ata_host_set *host_set = dev_instance;
4407 unsigned int i;
4408 unsigned int handled = 0;
4409 unsigned long flags;
4411 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
4412 spin_lock_irqsave(&host_set->lock, flags);
4414 for (i = 0; i < host_set->n_ports; i++) {
4415 struct ata_port *ap;
4417 ap = host_set->ports[i];
4418 if (ap &&
4419 !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) {
4420 struct ata_queued_cmd *qc;
4422 qc = ata_qc_from_tag(ap, ap->active_tag);
4423 if (qc && (!(qc->tf.ctl & ATA_NIEN)) &&
4424 (qc->flags & ATA_QCFLAG_ACTIVE))
4425 handled |= ata_host_intr(ap, qc);
4429 spin_unlock_irqrestore(&host_set->lock, flags);
4431 return IRQ_RETVAL(handled);
4436 * Execute a 'simple' command, that only consists of the opcode 'cmd' itself,
4437 * without filling any other registers
4439 static int ata_do_simple_cmd(struct ata_port *ap, struct ata_device *dev,
4440 u8 cmd)
4442 struct ata_taskfile tf;
4443 int err;
4445 ata_tf_init(ap, &tf, dev->devno);
4447 tf.command = cmd;
4448 tf.flags |= ATA_TFLAG_DEVICE;
4449 tf.protocol = ATA_PROT_NODATA;
4451 err = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0);
4452 if (err)
4453 printk(KERN_ERR "%s: ata command failed: %d\n",
4454 __FUNCTION__, err);
4456 return err;
4459 static int ata_flush_cache(struct ata_port *ap, struct ata_device *dev)
4461 u8 cmd;
4463 if (!ata_try_flush_cache(dev))
4464 return 0;
4466 if (ata_id_has_flush_ext(dev->id))
4467 cmd = ATA_CMD_FLUSH_EXT;
4468 else
4469 cmd = ATA_CMD_FLUSH;
4471 return ata_do_simple_cmd(ap, dev, cmd);
4474 static int ata_standby_drive(struct ata_port *ap, struct ata_device *dev)
4476 return ata_do_simple_cmd(ap, dev, ATA_CMD_STANDBYNOW1);
4479 static int ata_start_drive(struct ata_port *ap, struct ata_device *dev)
4481 return ata_do_simple_cmd(ap, dev, ATA_CMD_IDLEIMMEDIATE);
4485 * ata_device_resume - wakeup a previously suspended devices
4486 * @ap: port the device is connected to
4487 * @dev: the device to resume
4489 * Kick the drive back into action, by sending it an idle immediate
4490 * command and making sure its transfer mode matches between drive
4491 * and host.
4494 int ata_device_resume(struct ata_port *ap, struct ata_device *dev)
4496 if (ap->flags & ATA_FLAG_SUSPENDED) {
4497 ap->flags &= ~ATA_FLAG_SUSPENDED;
4498 ata_set_mode(ap);
4500 if (!ata_dev_present(dev))
4501 return 0;
4502 if (dev->class == ATA_DEV_ATA)
4503 ata_start_drive(ap, dev);
4505 return 0;
4509 * ata_device_suspend - prepare a device for suspend
4510 * @ap: port the device is connected to
4511 * @dev: the device to suspend
4513 * Flush the cache on the drive, if appropriate, then issue a
4514 * standbynow command.
4516 int ata_device_suspend(struct ata_port *ap, struct ata_device *dev)
4518 if (!ata_dev_present(dev))
4519 return 0;
4520 if (dev->class == ATA_DEV_ATA)
4521 ata_flush_cache(ap, dev);
4523 ata_standby_drive(ap, dev);
4524 ap->flags |= ATA_FLAG_SUSPENDED;
4525 return 0;
4529 * ata_port_start - Set port up for dma.
4530 * @ap: Port to initialize
4532 * Called just after data structures for each port are
4533 * initialized. Allocates space for PRD table.
4535 * May be used as the port_start() entry in ata_port_operations.
4537 * LOCKING:
4538 * Inherited from caller.
4541 int ata_port_start (struct ata_port *ap)
4543 struct device *dev = ap->host_set->dev;
4544 int rc;
4546 ap->prd = dma_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, GFP_KERNEL);
4547 if (!ap->prd)
4548 return -ENOMEM;
4550 rc = ata_pad_alloc(ap, dev);
4551 if (rc) {
4552 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
4553 return rc;
4556 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma);
4558 return 0;
4563 * ata_port_stop - Undo ata_port_start()
4564 * @ap: Port to shut down
4566 * Frees the PRD table.
4568 * May be used as the port_stop() entry in ata_port_operations.
4570 * LOCKING:
4571 * Inherited from caller.
4574 void ata_port_stop (struct ata_port *ap)
4576 struct device *dev = ap->host_set->dev;
4578 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
4579 ata_pad_free(ap, dev);
4582 void ata_host_stop (struct ata_host_set *host_set)
4584 if (host_set->mmio_base)
4585 iounmap(host_set->mmio_base);
4590 * ata_host_remove - Unregister SCSI host structure with upper layers
4591 * @ap: Port to unregister
4592 * @do_unregister: 1 if we fully unregister, 0 to just stop the port
4594 * LOCKING:
4595 * Inherited from caller.
4598 static void ata_host_remove(struct ata_port *ap, unsigned int do_unregister)
4600 struct Scsi_Host *sh = ap->host;
4602 DPRINTK("ENTER\n");
4604 if (do_unregister)
4605 scsi_remove_host(sh);
4607 ap->ops->port_stop(ap);
4611 * ata_host_init - Initialize an ata_port structure
4612 * @ap: Structure to initialize
4613 * @host: associated SCSI mid-layer structure
4614 * @host_set: Collection of hosts to which @ap belongs
4615 * @ent: Probe information provided by low-level driver
4616 * @port_no: Port number associated with this ata_port
4618 * Initialize a new ata_port structure, and its associated
4619 * scsi_host.
4621 * LOCKING:
4622 * Inherited from caller.
4625 static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
4626 struct ata_host_set *host_set,
4627 const struct ata_probe_ent *ent, unsigned int port_no)
4629 unsigned int i;
4631 host->max_id = 16;
4632 host->max_lun = 1;
4633 host->max_channel = 1;
4634 host->unique_id = ata_unique_id++;
4635 host->max_cmd_len = 12;
4637 ap->flags = ATA_FLAG_PORT_DISABLED;
4638 ap->id = host->unique_id;
4639 ap->host = host;
4640 ap->ctl = ATA_DEVCTL_OBS;
4641 ap->host_set = host_set;
4642 ap->port_no = port_no;
4643 ap->hard_port_no =
4644 ent->legacy_mode ? ent->hard_port_no : port_no;
4645 ap->pio_mask = ent->pio_mask;
4646 ap->mwdma_mask = ent->mwdma_mask;
4647 ap->udma_mask = ent->udma_mask;
4648 ap->flags |= ent->host_flags;
4649 ap->ops = ent->port_ops;
4650 ap->cbl = ATA_CBL_NONE;
4651 ap->active_tag = ATA_TAG_POISON;
4652 ap->last_ctl = 0xFF;
4654 INIT_WORK(&ap->port_task, NULL, NULL);
4655 INIT_LIST_HEAD(&ap->eh_done_q);
4657 for (i = 0; i < ATA_MAX_DEVICES; i++)
4658 ap->device[i].devno = i;
4660 #ifdef ATA_IRQ_TRAP
4661 ap->stats.unhandled_irq = 1;
4662 ap->stats.idle_irq = 1;
4663 #endif
4665 memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports));
4669 * ata_host_add - Attach low-level ATA driver to system
4670 * @ent: Information provided by low-level driver
4671 * @host_set: Collections of ports to which we add
4672 * @port_no: Port number associated with this host
4674 * Attach low-level ATA driver to system.
4676 * LOCKING:
4677 * PCI/etc. bus probe sem.
4679 * RETURNS:
4680 * New ata_port on success, for NULL on error.
4683 static struct ata_port * ata_host_add(const struct ata_probe_ent *ent,
4684 struct ata_host_set *host_set,
4685 unsigned int port_no)
4687 struct Scsi_Host *host;
4688 struct ata_port *ap;
4689 int rc;
4691 DPRINTK("ENTER\n");
4692 host = scsi_host_alloc(ent->sht, sizeof(struct ata_port));
4693 if (!host)
4694 return NULL;
4696 host->transportt = &ata_scsi_transport_template;
4698 ap = (struct ata_port *) &host->hostdata[0];
4700 ata_host_init(ap, host, host_set, ent, port_no);
4702 rc = ap->ops->port_start(ap);
4703 if (rc)
4704 goto err_out;
4706 return ap;
4708 err_out:
4709 scsi_host_put(host);
4710 return NULL;
4714 * ata_device_add - Register hardware device with ATA and SCSI layers
4715 * @ent: Probe information describing hardware device to be registered
4717 * This function processes the information provided in the probe
4718 * information struct @ent, allocates the necessary ATA and SCSI
4719 * host information structures, initializes them, and registers
4720 * everything with requisite kernel subsystems.
4722 * This function requests irqs, probes the ATA bus, and probes
4723 * the SCSI bus.
4725 * LOCKING:
4726 * PCI/etc. bus probe sem.
4728 * RETURNS:
4729 * Number of ports registered. Zero on error (no ports registered).
4732 int ata_device_add(const struct ata_probe_ent *ent)
4734 unsigned int count = 0, i;
4735 struct device *dev = ent->dev;
4736 struct ata_host_set *host_set;
4738 DPRINTK("ENTER\n");
4739 /* alloc a container for our list of ATA ports (buses) */
4740 host_set = kzalloc(sizeof(struct ata_host_set) +
4741 (ent->n_ports * sizeof(void *)), GFP_KERNEL);
4742 if (!host_set)
4743 return 0;
4744 spin_lock_init(&host_set->lock);
4746 host_set->dev = dev;
4747 host_set->n_ports = ent->n_ports;
4748 host_set->irq = ent->irq;
4749 host_set->mmio_base = ent->mmio_base;
4750 host_set->private_data = ent->private_data;
4751 host_set->ops = ent->port_ops;
4753 /* register each port bound to this device */
4754 for (i = 0; i < ent->n_ports; i++) {
4755 struct ata_port *ap;
4756 unsigned long xfer_mode_mask;
4758 ap = ata_host_add(ent, host_set, i);
4759 if (!ap)
4760 goto err_out;
4762 host_set->ports[i] = ap;
4763 xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) |
4764 (ap->mwdma_mask << ATA_SHIFT_MWDMA) |
4765 (ap->pio_mask << ATA_SHIFT_PIO);
4767 /* print per-port info to dmesg */
4768 printk(KERN_INFO "ata%u: %cATA max %s cmd 0x%lX ctl 0x%lX "
4769 "bmdma 0x%lX irq %lu\n",
4770 ap->id,
4771 ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
4772 ata_mode_string(xfer_mode_mask),
4773 ap->ioaddr.cmd_addr,
4774 ap->ioaddr.ctl_addr,
4775 ap->ioaddr.bmdma_addr,
4776 ent->irq);
4778 ata_chk_status(ap);
4779 host_set->ops->irq_clear(ap);
4780 count++;
4783 if (!count)
4784 goto err_free_ret;
4786 /* obtain irq, that is shared between channels */
4787 if (request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags,
4788 DRV_NAME, host_set))
4789 goto err_out;
4791 /* perform each probe synchronously */
4792 DPRINTK("probe begin\n");
4793 for (i = 0; i < count; i++) {
4794 struct ata_port *ap;
4795 int rc;
4797 ap = host_set->ports[i];
4799 DPRINTK("ata%u: bus probe begin\n", ap->id);
4800 rc = ata_bus_probe(ap);
4801 DPRINTK("ata%u: bus probe end\n", ap->id);
4803 if (rc) {
4804 /* FIXME: do something useful here?
4805 * Current libata behavior will
4806 * tear down everything when
4807 * the module is removed
4808 * or the h/w is unplugged.
4812 rc = scsi_add_host(ap->host, dev);
4813 if (rc) {
4814 printk(KERN_ERR "ata%u: scsi_add_host failed\n",
4815 ap->id);
4816 /* FIXME: do something useful here */
4817 /* FIXME: handle unconditional calls to
4818 * scsi_scan_host and ata_host_remove, below,
4819 * at the very least
4824 /* probes are done, now scan each port's disk(s) */
4825 DPRINTK("host probe begin\n");
4826 for (i = 0; i < count; i++) {
4827 struct ata_port *ap = host_set->ports[i];
4829 ata_scsi_scan_host(ap);
4832 dev_set_drvdata(dev, host_set);
4834 VPRINTK("EXIT, returning %u\n", ent->n_ports);
4835 return ent->n_ports; /* success */
4837 err_out:
4838 for (i = 0; i < count; i++) {
4839 ata_host_remove(host_set->ports[i], 1);
4840 scsi_host_put(host_set->ports[i]->host);
4842 err_free_ret:
4843 kfree(host_set);
4844 VPRINTK("EXIT, returning 0\n");
4845 return 0;
4849 * ata_host_set_remove - PCI layer callback for device removal
4850 * @host_set: ATA host set that was removed
4852 * Unregister all objects associated with this host set. Free those
4853 * objects.
4855 * LOCKING:
4856 * Inherited from calling layer (may sleep).
4859 void ata_host_set_remove(struct ata_host_set *host_set)
4861 struct ata_port *ap;
4862 unsigned int i;
4864 for (i = 0; i < host_set->n_ports; i++) {
4865 ap = host_set->ports[i];
4866 scsi_remove_host(ap->host);
4869 free_irq(host_set->irq, host_set);
4871 for (i = 0; i < host_set->n_ports; i++) {
4872 ap = host_set->ports[i];
4874 ata_scsi_release(ap->host);
4876 if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) {
4877 struct ata_ioports *ioaddr = &ap->ioaddr;
4879 if (ioaddr->cmd_addr == 0x1f0)
4880 release_region(0x1f0, 8);
4881 else if (ioaddr->cmd_addr == 0x170)
4882 release_region(0x170, 8);
4885 scsi_host_put(ap->host);
4888 if (host_set->ops->host_stop)
4889 host_set->ops->host_stop(host_set);
4891 kfree(host_set);
4895 * ata_scsi_release - SCSI layer callback hook for host unload
4896 * @host: libata host to be unloaded
4898 * Performs all duties necessary to shut down a libata port...
4899 * Kill port kthread, disable port, and release resources.
4901 * LOCKING:
4902 * Inherited from SCSI layer.
4904 * RETURNS:
4905 * One.
4908 int ata_scsi_release(struct Scsi_Host *host)
4910 struct ata_port *ap = (struct ata_port *) &host->hostdata[0];
4911 int i;
4913 DPRINTK("ENTER\n");
4915 ap->ops->port_disable(ap);
4916 ata_host_remove(ap, 0);
4917 for (i = 0; i < ATA_MAX_DEVICES; i++)
4918 kfree(ap->device[i].id);
4920 DPRINTK("EXIT\n");
4921 return 1;
4925 * ata_std_ports - initialize ioaddr with standard port offsets.
4926 * @ioaddr: IO address structure to be initialized
4928 * Utility function which initializes data_addr, error_addr,
4929 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
4930 * device_addr, status_addr, and command_addr to standard offsets
4931 * relative to cmd_addr.
4933 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
4936 void ata_std_ports(struct ata_ioports *ioaddr)
4938 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
4939 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
4940 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
4941 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
4942 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
4943 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
4944 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
4945 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
4946 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
4947 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
4951 #ifdef CONFIG_PCI
4953 void ata_pci_host_stop (struct ata_host_set *host_set)
4955 struct pci_dev *pdev = to_pci_dev(host_set->dev);
4957 pci_iounmap(pdev, host_set->mmio_base);
4961 * ata_pci_remove_one - PCI layer callback for device removal
4962 * @pdev: PCI device that was removed
4964 * PCI layer indicates to libata via this hook that
4965 * hot-unplug or module unload event has occurred.
4966 * Handle this by unregistering all objects associated
4967 * with this PCI device. Free those objects. Then finally
4968 * release PCI resources and disable device.
4970 * LOCKING:
4971 * Inherited from PCI layer (may sleep).
4974 void ata_pci_remove_one (struct pci_dev *pdev)
4976 struct device *dev = pci_dev_to_dev(pdev);
4977 struct ata_host_set *host_set = dev_get_drvdata(dev);
4979 ata_host_set_remove(host_set);
4980 pci_release_regions(pdev);
4981 pci_disable_device(pdev);
4982 dev_set_drvdata(dev, NULL);
4985 /* move to PCI subsystem */
4986 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
4988 unsigned long tmp = 0;
4990 switch (bits->width) {
4991 case 1: {
4992 u8 tmp8 = 0;
4993 pci_read_config_byte(pdev, bits->reg, &tmp8);
4994 tmp = tmp8;
4995 break;
4997 case 2: {
4998 u16 tmp16 = 0;
4999 pci_read_config_word(pdev, bits->reg, &tmp16);
5000 tmp = tmp16;
5001 break;
5003 case 4: {
5004 u32 tmp32 = 0;
5005 pci_read_config_dword(pdev, bits->reg, &tmp32);
5006 tmp = tmp32;
5007 break;
5010 default:
5011 return -EINVAL;
5014 tmp &= bits->mask;
5016 return (tmp == bits->val) ? 1 : 0;
5019 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t state)
5021 pci_save_state(pdev);
5022 pci_disable_device(pdev);
5023 pci_set_power_state(pdev, PCI_D3hot);
5024 return 0;
5027 int ata_pci_device_resume(struct pci_dev *pdev)
5029 pci_set_power_state(pdev, PCI_D0);
5030 pci_restore_state(pdev);
5031 pci_enable_device(pdev);
5032 pci_set_master(pdev);
5033 return 0;
5035 #endif /* CONFIG_PCI */
5038 static int __init ata_init(void)
5040 ata_wq = create_workqueue("ata");
5041 if (!ata_wq)
5042 return -ENOMEM;
5044 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
5045 return 0;
5048 static void __exit ata_exit(void)
5050 destroy_workqueue(ata_wq);
5053 module_init(ata_init);
5054 module_exit(ata_exit);
5056 static unsigned long ratelimit_time;
5057 static spinlock_t ata_ratelimit_lock = SPIN_LOCK_UNLOCKED;
5059 int ata_ratelimit(void)
5061 int rc;
5062 unsigned long flags;
5064 spin_lock_irqsave(&ata_ratelimit_lock, flags);
5066 if (time_after(jiffies, ratelimit_time)) {
5067 rc = 1;
5068 ratelimit_time = jiffies + (HZ/5);
5069 } else
5070 rc = 0;
5072 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
5074 return rc;
5078 * libata is essentially a library of internal helper functions for
5079 * low-level ATA host controller drivers. As such, the API/ABI is
5080 * likely to change as new drivers are added and updated.
5081 * Do not depend on ABI/API stability.
5084 EXPORT_SYMBOL_GPL(ata_std_bios_param);
5085 EXPORT_SYMBOL_GPL(ata_std_ports);
5086 EXPORT_SYMBOL_GPL(ata_device_add);
5087 EXPORT_SYMBOL_GPL(ata_host_set_remove);
5088 EXPORT_SYMBOL_GPL(ata_sg_init);
5089 EXPORT_SYMBOL_GPL(ata_sg_init_one);
5090 EXPORT_SYMBOL_GPL(__ata_qc_complete);
5091 EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
5092 EXPORT_SYMBOL_GPL(ata_eng_timeout);
5093 EXPORT_SYMBOL_GPL(ata_tf_load);
5094 EXPORT_SYMBOL_GPL(ata_tf_read);
5095 EXPORT_SYMBOL_GPL(ata_noop_dev_select);
5096 EXPORT_SYMBOL_GPL(ata_std_dev_select);
5097 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
5098 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
5099 EXPORT_SYMBOL_GPL(ata_check_status);
5100 EXPORT_SYMBOL_GPL(ata_altstatus);
5101 EXPORT_SYMBOL_GPL(ata_exec_command);
5102 EXPORT_SYMBOL_GPL(ata_port_start);
5103 EXPORT_SYMBOL_GPL(ata_port_stop);
5104 EXPORT_SYMBOL_GPL(ata_host_stop);
5105 EXPORT_SYMBOL_GPL(ata_interrupt);
5106 EXPORT_SYMBOL_GPL(ata_qc_prep);
5107 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
5108 EXPORT_SYMBOL_GPL(ata_bmdma_setup);
5109 EXPORT_SYMBOL_GPL(ata_bmdma_start);
5110 EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
5111 EXPORT_SYMBOL_GPL(ata_bmdma_status);
5112 EXPORT_SYMBOL_GPL(ata_bmdma_stop);
5113 EXPORT_SYMBOL_GPL(ata_port_probe);
5114 EXPORT_SYMBOL_GPL(sata_phy_reset);
5115 EXPORT_SYMBOL_GPL(__sata_phy_reset);
5116 EXPORT_SYMBOL_GPL(ata_bus_reset);
5117 EXPORT_SYMBOL_GPL(ata_std_probeinit);
5118 EXPORT_SYMBOL_GPL(ata_std_softreset);
5119 EXPORT_SYMBOL_GPL(sata_std_hardreset);
5120 EXPORT_SYMBOL_GPL(ata_std_postreset);
5121 EXPORT_SYMBOL_GPL(ata_std_probe_reset);
5122 EXPORT_SYMBOL_GPL(ata_drive_probe_reset);
5123 EXPORT_SYMBOL_GPL(ata_dev_revalidate);
5124 EXPORT_SYMBOL_GPL(ata_port_disable);
5125 EXPORT_SYMBOL_GPL(ata_ratelimit);
5126 EXPORT_SYMBOL_GPL(ata_busy_sleep);
5127 EXPORT_SYMBOL_GPL(ata_port_queue_task);
5128 EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
5129 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
5130 EXPORT_SYMBOL_GPL(ata_scsi_error);
5131 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
5132 EXPORT_SYMBOL_GPL(ata_scsi_release);
5133 EXPORT_SYMBOL_GPL(ata_host_intr);
5134 EXPORT_SYMBOL_GPL(ata_dev_classify);
5135 EXPORT_SYMBOL_GPL(ata_id_string);
5136 EXPORT_SYMBOL_GPL(ata_id_c_string);
5137 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
5138 EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
5139 EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
5141 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
5142 EXPORT_SYMBOL_GPL(ata_timing_compute);
5143 EXPORT_SYMBOL_GPL(ata_timing_merge);
5145 #ifdef CONFIG_PCI
5146 EXPORT_SYMBOL_GPL(pci_test_config_bits);
5147 EXPORT_SYMBOL_GPL(ata_pci_host_stop);
5148 EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
5149 EXPORT_SYMBOL_GPL(ata_pci_init_one);
5150 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
5151 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
5152 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
5153 #endif /* CONFIG_PCI */
5155 EXPORT_SYMBOL_GPL(ata_device_suspend);
5156 EXPORT_SYMBOL_GPL(ata_device_resume);
5157 EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
5158 EXPORT_SYMBOL_GPL(ata_scsi_device_resume);