[PATCH] libata: reorganize ata_set_mode()
[linux-2.6/openmoko-kernel/knife-kernel.git] / drivers / scsi / libata-core.c
blobf04561abf6d8e201809970c0bf61414072b86d61
1 /*
2 * libata-core.c - helper library for ATA
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
35 #include <linux/config.h>
36 #include <linux/kernel.h>
37 #include <linux/module.h>
38 #include <linux/pci.h>
39 #include <linux/init.h>
40 #include <linux/list.h>
41 #include <linux/mm.h>
42 #include <linux/highmem.h>
43 #include <linux/spinlock.h>
44 #include <linux/blkdev.h>
45 #include <linux/delay.h>
46 #include <linux/timer.h>
47 #include <linux/interrupt.h>
48 #include <linux/completion.h>
49 #include <linux/suspend.h>
50 #include <linux/workqueue.h>
51 #include <linux/jiffies.h>
52 #include <linux/scatterlist.h>
53 #include <scsi/scsi.h>
54 #include "scsi_priv.h"
55 #include <scsi/scsi_cmnd.h>
56 #include <scsi/scsi_host.h>
57 #include <linux/libata.h>
58 #include <asm/io.h>
59 #include <asm/semaphore.h>
60 #include <asm/byteorder.h>
62 #include "libata.h"
64 static unsigned int ata_dev_init_params(struct ata_port *ap,
65 struct ata_device *dev,
66 u16 heads,
67 u16 sectors);
68 static void ata_set_mode(struct ata_port *ap);
69 static unsigned int ata_dev_set_xfermode(struct ata_port *ap,
70 struct ata_device *dev);
71 static void ata_dev_xfermask(struct ata_port *ap, struct ata_device *dev);
73 static unsigned int ata_unique_id = 1;
74 static struct workqueue_struct *ata_wq;
76 int atapi_enabled = 1;
77 module_param(atapi_enabled, int, 0444);
78 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
80 int libata_fua = 0;
81 module_param_named(fua, libata_fua, int, 0444);
82 MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
84 MODULE_AUTHOR("Jeff Garzik");
85 MODULE_DESCRIPTION("Library module for ATA devices");
86 MODULE_LICENSE("GPL");
87 MODULE_VERSION(DRV_VERSION);
90 /**
91 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
92 * @tf: Taskfile to convert
93 * @fis: Buffer into which data will output
94 * @pmp: Port multiplier port
96 * Converts a standard ATA taskfile to a Serial ATA
97 * FIS structure (Register - Host to Device).
99 * LOCKING:
100 * Inherited from caller.
103 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp)
105 fis[0] = 0x27; /* Register - Host to Device FIS */
106 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
107 bit 7 indicates Command FIS */
108 fis[2] = tf->command;
109 fis[3] = tf->feature;
111 fis[4] = tf->lbal;
112 fis[5] = tf->lbam;
113 fis[6] = tf->lbah;
114 fis[7] = tf->device;
116 fis[8] = tf->hob_lbal;
117 fis[9] = tf->hob_lbam;
118 fis[10] = tf->hob_lbah;
119 fis[11] = tf->hob_feature;
121 fis[12] = tf->nsect;
122 fis[13] = tf->hob_nsect;
123 fis[14] = 0;
124 fis[15] = tf->ctl;
126 fis[16] = 0;
127 fis[17] = 0;
128 fis[18] = 0;
129 fis[19] = 0;
133 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
134 * @fis: Buffer from which data will be input
135 * @tf: Taskfile to output
137 * Converts a serial ATA FIS structure to a standard ATA taskfile.
139 * LOCKING:
140 * Inherited from caller.
143 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
145 tf->command = fis[2]; /* status */
146 tf->feature = fis[3]; /* error */
148 tf->lbal = fis[4];
149 tf->lbam = fis[5];
150 tf->lbah = fis[6];
151 tf->device = fis[7];
153 tf->hob_lbal = fis[8];
154 tf->hob_lbam = fis[9];
155 tf->hob_lbah = fis[10];
157 tf->nsect = fis[12];
158 tf->hob_nsect = fis[13];
161 static const u8 ata_rw_cmds[] = {
162 /* pio multi */
163 ATA_CMD_READ_MULTI,
164 ATA_CMD_WRITE_MULTI,
165 ATA_CMD_READ_MULTI_EXT,
166 ATA_CMD_WRITE_MULTI_EXT,
170 ATA_CMD_WRITE_MULTI_FUA_EXT,
171 /* pio */
172 ATA_CMD_PIO_READ,
173 ATA_CMD_PIO_WRITE,
174 ATA_CMD_PIO_READ_EXT,
175 ATA_CMD_PIO_WRITE_EXT,
180 /* dma */
181 ATA_CMD_READ,
182 ATA_CMD_WRITE,
183 ATA_CMD_READ_EXT,
184 ATA_CMD_WRITE_EXT,
188 ATA_CMD_WRITE_FUA_EXT
192 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
193 * @qc: command to examine and configure
195 * Examine the device configuration and tf->flags to calculate
196 * the proper read/write commands and protocol to use.
198 * LOCKING:
199 * caller.
201 int ata_rwcmd_protocol(struct ata_queued_cmd *qc)
203 struct ata_taskfile *tf = &qc->tf;
204 struct ata_device *dev = qc->dev;
205 u8 cmd;
207 int index, fua, lba48, write;
209 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
210 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
211 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
213 if (dev->flags & ATA_DFLAG_PIO) {
214 tf->protocol = ATA_PROT_PIO;
215 index = dev->multi_count ? 0 : 8;
216 } else if (lba48 && (qc->ap->flags & ATA_FLAG_PIO_LBA48)) {
217 /* Unable to use DMA due to host limitation */
218 tf->protocol = ATA_PROT_PIO;
219 index = dev->multi_count ? 0 : 8;
220 } else {
221 tf->protocol = ATA_PROT_DMA;
222 index = 16;
225 cmd = ata_rw_cmds[index + fua + lba48 + write];
226 if (cmd) {
227 tf->command = cmd;
228 return 0;
230 return -1;
234 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
235 * @pio_mask: pio_mask
236 * @mwdma_mask: mwdma_mask
237 * @udma_mask: udma_mask
239 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
240 * unsigned int xfer_mask.
242 * LOCKING:
243 * None.
245 * RETURNS:
246 * Packed xfer_mask.
248 static unsigned int ata_pack_xfermask(unsigned int pio_mask,
249 unsigned int mwdma_mask,
250 unsigned int udma_mask)
252 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
253 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
254 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
258 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
259 * @xfer_mask: xfer_mask to unpack
260 * @pio_mask: resulting pio_mask
261 * @mwdma_mask: resulting mwdma_mask
262 * @udma_mask: resulting udma_mask
264 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
265 * Any NULL distination masks will be ignored.
267 static void ata_unpack_xfermask(unsigned int xfer_mask,
268 unsigned int *pio_mask,
269 unsigned int *mwdma_mask,
270 unsigned int *udma_mask)
272 if (pio_mask)
273 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
274 if (mwdma_mask)
275 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
276 if (udma_mask)
277 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
280 static const struct ata_xfer_ent {
281 int shift, bits;
282 u8 base;
283 } ata_xfer_tbl[] = {
284 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
285 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
286 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
287 { -1, },
291 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
292 * @xfer_mask: xfer_mask of interest
294 * Return matching XFER_* value for @xfer_mask. Only the highest
295 * bit of @xfer_mask is considered.
297 * LOCKING:
298 * None.
300 * RETURNS:
301 * Matching XFER_* value, 0 if no match found.
303 static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
305 int highbit = fls(xfer_mask) - 1;
306 const struct ata_xfer_ent *ent;
308 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
309 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
310 return ent->base + highbit - ent->shift;
311 return 0;
315 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
316 * @xfer_mode: XFER_* of interest
318 * Return matching xfer_mask for @xfer_mode.
320 * LOCKING:
321 * None.
323 * RETURNS:
324 * Matching xfer_mask, 0 if no match found.
326 static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
328 const struct ata_xfer_ent *ent;
330 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
331 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
332 return 1 << (ent->shift + xfer_mode - ent->base);
333 return 0;
337 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
338 * @xfer_mode: XFER_* of interest
340 * Return matching xfer_shift for @xfer_mode.
342 * LOCKING:
343 * None.
345 * RETURNS:
346 * Matching xfer_shift, -1 if no match found.
348 static int ata_xfer_mode2shift(unsigned int xfer_mode)
350 const struct ata_xfer_ent *ent;
352 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
353 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
354 return ent->shift;
355 return -1;
359 * ata_mode_string - convert xfer_mask to string
360 * @xfer_mask: mask of bits supported; only highest bit counts.
362 * Determine string which represents the highest speed
363 * (highest bit in @modemask).
365 * LOCKING:
366 * None.
368 * RETURNS:
369 * Constant C string representing highest speed listed in
370 * @mode_mask, or the constant C string "<n/a>".
372 static const char *ata_mode_string(unsigned int xfer_mask)
374 static const char * const xfer_mode_str[] = {
375 "PIO0",
376 "PIO1",
377 "PIO2",
378 "PIO3",
379 "PIO4",
380 "MWDMA0",
381 "MWDMA1",
382 "MWDMA2",
383 "UDMA/16",
384 "UDMA/25",
385 "UDMA/33",
386 "UDMA/44",
387 "UDMA/66",
388 "UDMA/100",
389 "UDMA/133",
390 "UDMA7",
392 int highbit;
394 highbit = fls(xfer_mask) - 1;
395 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
396 return xfer_mode_str[highbit];
397 return "<n/a>";
400 static const char *sata_spd_string(unsigned int spd)
402 static const char * const spd_str[] = {
403 "1.5 Gbps",
404 "3.0 Gbps",
407 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
408 return "<unknown>";
409 return spd_str[spd - 1];
412 static void ata_dev_disable(struct ata_port *ap, struct ata_device *dev)
414 if (ata_dev_enabled(dev)) {
415 printk(KERN_WARNING "ata%u: dev %u disabled\n",
416 ap->id, dev->devno);
417 dev->class++;
422 * ata_pio_devchk - PATA device presence detection
423 * @ap: ATA channel to examine
424 * @device: Device to examine (starting at zero)
426 * This technique was originally described in
427 * Hale Landis's ATADRVR (www.ata-atapi.com), and
428 * later found its way into the ATA/ATAPI spec.
430 * Write a pattern to the ATA shadow registers,
431 * and if a device is present, it will respond by
432 * correctly storing and echoing back the
433 * ATA shadow register contents.
435 * LOCKING:
436 * caller.
439 static unsigned int ata_pio_devchk(struct ata_port *ap,
440 unsigned int device)
442 struct ata_ioports *ioaddr = &ap->ioaddr;
443 u8 nsect, lbal;
445 ap->ops->dev_select(ap, device);
447 outb(0x55, ioaddr->nsect_addr);
448 outb(0xaa, ioaddr->lbal_addr);
450 outb(0xaa, ioaddr->nsect_addr);
451 outb(0x55, ioaddr->lbal_addr);
453 outb(0x55, ioaddr->nsect_addr);
454 outb(0xaa, ioaddr->lbal_addr);
456 nsect = inb(ioaddr->nsect_addr);
457 lbal = inb(ioaddr->lbal_addr);
459 if ((nsect == 0x55) && (lbal == 0xaa))
460 return 1; /* we found a device */
462 return 0; /* nothing found */
466 * ata_mmio_devchk - PATA device presence detection
467 * @ap: ATA channel to examine
468 * @device: Device to examine (starting at zero)
470 * This technique was originally described in
471 * Hale Landis's ATADRVR (www.ata-atapi.com), and
472 * later found its way into the ATA/ATAPI spec.
474 * Write a pattern to the ATA shadow registers,
475 * and if a device is present, it will respond by
476 * correctly storing and echoing back the
477 * ATA shadow register contents.
479 * LOCKING:
480 * caller.
483 static unsigned int ata_mmio_devchk(struct ata_port *ap,
484 unsigned int device)
486 struct ata_ioports *ioaddr = &ap->ioaddr;
487 u8 nsect, lbal;
489 ap->ops->dev_select(ap, device);
491 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
492 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
494 writeb(0xaa, (void __iomem *) ioaddr->nsect_addr);
495 writeb(0x55, (void __iomem *) ioaddr->lbal_addr);
497 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
498 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
500 nsect = readb((void __iomem *) ioaddr->nsect_addr);
501 lbal = readb((void __iomem *) ioaddr->lbal_addr);
503 if ((nsect == 0x55) && (lbal == 0xaa))
504 return 1; /* we found a device */
506 return 0; /* nothing found */
510 * ata_devchk - PATA device presence detection
511 * @ap: ATA channel to examine
512 * @device: Device to examine (starting at zero)
514 * Dispatch ATA device presence detection, depending
515 * on whether we are using PIO or MMIO to talk to the
516 * ATA shadow registers.
518 * LOCKING:
519 * caller.
522 static unsigned int ata_devchk(struct ata_port *ap,
523 unsigned int device)
525 if (ap->flags & ATA_FLAG_MMIO)
526 return ata_mmio_devchk(ap, device);
527 return ata_pio_devchk(ap, device);
531 * ata_dev_classify - determine device type based on ATA-spec signature
532 * @tf: ATA taskfile register set for device to be identified
534 * Determine from taskfile register contents whether a device is
535 * ATA or ATAPI, as per "Signature and persistence" section
536 * of ATA/PI spec (volume 1, sect 5.14).
538 * LOCKING:
539 * None.
541 * RETURNS:
542 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
543 * the event of failure.
546 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
548 /* Apple's open source Darwin code hints that some devices only
549 * put a proper signature into the LBA mid/high registers,
550 * So, we only check those. It's sufficient for uniqueness.
553 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
554 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
555 DPRINTK("found ATA device by sig\n");
556 return ATA_DEV_ATA;
559 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
560 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
561 DPRINTK("found ATAPI device by sig\n");
562 return ATA_DEV_ATAPI;
565 DPRINTK("unknown device\n");
566 return ATA_DEV_UNKNOWN;
570 * ata_dev_try_classify - Parse returned ATA device signature
571 * @ap: ATA channel to examine
572 * @device: Device to examine (starting at zero)
573 * @r_err: Value of error register on completion
575 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
576 * an ATA/ATAPI-defined set of values is placed in the ATA
577 * shadow registers, indicating the results of device detection
578 * and diagnostics.
580 * Select the ATA device, and read the values from the ATA shadow
581 * registers. Then parse according to the Error register value,
582 * and the spec-defined values examined by ata_dev_classify().
584 * LOCKING:
585 * caller.
587 * RETURNS:
588 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
591 static unsigned int
592 ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
594 struct ata_taskfile tf;
595 unsigned int class;
596 u8 err;
598 ap->ops->dev_select(ap, device);
600 memset(&tf, 0, sizeof(tf));
602 ap->ops->tf_read(ap, &tf);
603 err = tf.feature;
604 if (r_err)
605 *r_err = err;
607 /* see if device passed diags */
608 if (err == 1)
609 /* do nothing */ ;
610 else if ((device == 0) && (err == 0x81))
611 /* do nothing */ ;
612 else
613 return ATA_DEV_NONE;
615 /* determine if device is ATA or ATAPI */
616 class = ata_dev_classify(&tf);
618 if (class == ATA_DEV_UNKNOWN)
619 return ATA_DEV_NONE;
620 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
621 return ATA_DEV_NONE;
622 return class;
626 * ata_id_string - Convert IDENTIFY DEVICE page into string
627 * @id: IDENTIFY DEVICE results we will examine
628 * @s: string into which data is output
629 * @ofs: offset into identify device page
630 * @len: length of string to return. must be an even number.
632 * The strings in the IDENTIFY DEVICE page are broken up into
633 * 16-bit chunks. Run through the string, and output each
634 * 8-bit chunk linearly, regardless of platform.
636 * LOCKING:
637 * caller.
640 void ata_id_string(const u16 *id, unsigned char *s,
641 unsigned int ofs, unsigned int len)
643 unsigned int c;
645 while (len > 0) {
646 c = id[ofs] >> 8;
647 *s = c;
648 s++;
650 c = id[ofs] & 0xff;
651 *s = c;
652 s++;
654 ofs++;
655 len -= 2;
660 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
661 * @id: IDENTIFY DEVICE results we will examine
662 * @s: string into which data is output
663 * @ofs: offset into identify device page
664 * @len: length of string to return. must be an odd number.
666 * This function is identical to ata_id_string except that it
667 * trims trailing spaces and terminates the resulting string with
668 * null. @len must be actual maximum length (even number) + 1.
670 * LOCKING:
671 * caller.
673 void ata_id_c_string(const u16 *id, unsigned char *s,
674 unsigned int ofs, unsigned int len)
676 unsigned char *p;
678 WARN_ON(!(len & 1));
680 ata_id_string(id, s, ofs, len - 1);
682 p = s + strnlen(s, len - 1);
683 while (p > s && p[-1] == ' ')
684 p--;
685 *p = '\0';
688 static u64 ata_id_n_sectors(const u16 *id)
690 if (ata_id_has_lba(id)) {
691 if (ata_id_has_lba48(id))
692 return ata_id_u64(id, 100);
693 else
694 return ata_id_u32(id, 60);
695 } else {
696 if (ata_id_current_chs_valid(id))
697 return ata_id_u32(id, 57);
698 else
699 return id[1] * id[3] * id[6];
704 * ata_noop_dev_select - Select device 0/1 on ATA bus
705 * @ap: ATA channel to manipulate
706 * @device: ATA device (numbered from zero) to select
708 * This function performs no actual function.
710 * May be used as the dev_select() entry in ata_port_operations.
712 * LOCKING:
713 * caller.
715 void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
721 * ata_std_dev_select - Select device 0/1 on ATA bus
722 * @ap: ATA channel to manipulate
723 * @device: ATA device (numbered from zero) to select
725 * Use the method defined in the ATA specification to
726 * make either device 0, or device 1, active on the
727 * ATA channel. Works with both PIO and MMIO.
729 * May be used as the dev_select() entry in ata_port_operations.
731 * LOCKING:
732 * caller.
735 void ata_std_dev_select (struct ata_port *ap, unsigned int device)
737 u8 tmp;
739 if (device == 0)
740 tmp = ATA_DEVICE_OBS;
741 else
742 tmp = ATA_DEVICE_OBS | ATA_DEV1;
744 if (ap->flags & ATA_FLAG_MMIO) {
745 writeb(tmp, (void __iomem *) ap->ioaddr.device_addr);
746 } else {
747 outb(tmp, ap->ioaddr.device_addr);
749 ata_pause(ap); /* needed; also flushes, for mmio */
753 * ata_dev_select - Select device 0/1 on ATA bus
754 * @ap: ATA channel to manipulate
755 * @device: ATA device (numbered from zero) to select
756 * @wait: non-zero to wait for Status register BSY bit to clear
757 * @can_sleep: non-zero if context allows sleeping
759 * Use the method defined in the ATA specification to
760 * make either device 0, or device 1, active on the
761 * ATA channel.
763 * This is a high-level version of ata_std_dev_select(),
764 * which additionally provides the services of inserting
765 * the proper pauses and status polling, where needed.
767 * LOCKING:
768 * caller.
771 void ata_dev_select(struct ata_port *ap, unsigned int device,
772 unsigned int wait, unsigned int can_sleep)
774 VPRINTK("ENTER, ata%u: device %u, wait %u\n",
775 ap->id, device, wait);
777 if (wait)
778 ata_wait_idle(ap);
780 ap->ops->dev_select(ap, device);
782 if (wait) {
783 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
784 msleep(150);
785 ata_wait_idle(ap);
790 * ata_dump_id - IDENTIFY DEVICE info debugging output
791 * @id: IDENTIFY DEVICE page to dump
793 * Dump selected 16-bit words from the given IDENTIFY DEVICE
794 * page.
796 * LOCKING:
797 * caller.
800 static inline void ata_dump_id(const u16 *id)
802 DPRINTK("49==0x%04x "
803 "53==0x%04x "
804 "63==0x%04x "
805 "64==0x%04x "
806 "75==0x%04x \n",
807 id[49],
808 id[53],
809 id[63],
810 id[64],
811 id[75]);
812 DPRINTK("80==0x%04x "
813 "81==0x%04x "
814 "82==0x%04x "
815 "83==0x%04x "
816 "84==0x%04x \n",
817 id[80],
818 id[81],
819 id[82],
820 id[83],
821 id[84]);
822 DPRINTK("88==0x%04x "
823 "93==0x%04x\n",
824 id[88],
825 id[93]);
829 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
830 * @id: IDENTIFY data to compute xfer mask from
832 * Compute the xfermask for this device. This is not as trivial
833 * as it seems if we must consider early devices correctly.
835 * FIXME: pre IDE drive timing (do we care ?).
837 * LOCKING:
838 * None.
840 * RETURNS:
841 * Computed xfermask
843 static unsigned int ata_id_xfermask(const u16 *id)
845 unsigned int pio_mask, mwdma_mask, udma_mask;
847 /* Usual case. Word 53 indicates word 64 is valid */
848 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
849 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
850 pio_mask <<= 3;
851 pio_mask |= 0x7;
852 } else {
853 /* If word 64 isn't valid then Word 51 high byte holds
854 * the PIO timing number for the maximum. Turn it into
855 * a mask.
857 pio_mask = (2 << (id[ATA_ID_OLD_PIO_MODES] & 0xFF)) - 1 ;
859 /* But wait.. there's more. Design your standards by
860 * committee and you too can get a free iordy field to
861 * process. However its the speeds not the modes that
862 * are supported... Note drivers using the timing API
863 * will get this right anyway
867 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
869 udma_mask = 0;
870 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
871 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
873 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
877 * ata_port_queue_task - Queue port_task
878 * @ap: The ata_port to queue port_task for
880 * Schedule @fn(@data) for execution after @delay jiffies using
881 * port_task. There is one port_task per port and it's the
882 * user(low level driver)'s responsibility to make sure that only
883 * one task is active at any given time.
885 * libata core layer takes care of synchronization between
886 * port_task and EH. ata_port_queue_task() may be ignored for EH
887 * synchronization.
889 * LOCKING:
890 * Inherited from caller.
892 void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), void *data,
893 unsigned long delay)
895 int rc;
897 if (ap->flags & ATA_FLAG_FLUSH_PORT_TASK)
898 return;
900 PREPARE_WORK(&ap->port_task, fn, data);
902 if (!delay)
903 rc = queue_work(ata_wq, &ap->port_task);
904 else
905 rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
907 /* rc == 0 means that another user is using port task */
908 WARN_ON(rc == 0);
912 * ata_port_flush_task - Flush port_task
913 * @ap: The ata_port to flush port_task for
915 * After this function completes, port_task is guranteed not to
916 * be running or scheduled.
918 * LOCKING:
919 * Kernel thread context (may sleep)
921 void ata_port_flush_task(struct ata_port *ap)
923 unsigned long flags;
925 DPRINTK("ENTER\n");
927 spin_lock_irqsave(&ap->host_set->lock, flags);
928 ap->flags |= ATA_FLAG_FLUSH_PORT_TASK;
929 spin_unlock_irqrestore(&ap->host_set->lock, flags);
931 DPRINTK("flush #1\n");
932 flush_workqueue(ata_wq);
935 * At this point, if a task is running, it's guaranteed to see
936 * the FLUSH flag; thus, it will never queue pio tasks again.
937 * Cancel and flush.
939 if (!cancel_delayed_work(&ap->port_task)) {
940 DPRINTK("flush #2\n");
941 flush_workqueue(ata_wq);
944 spin_lock_irqsave(&ap->host_set->lock, flags);
945 ap->flags &= ~ATA_FLAG_FLUSH_PORT_TASK;
946 spin_unlock_irqrestore(&ap->host_set->lock, flags);
948 DPRINTK("EXIT\n");
951 void ata_qc_complete_internal(struct ata_queued_cmd *qc)
953 struct completion *waiting = qc->private_data;
955 qc->ap->ops->tf_read(qc->ap, &qc->tf);
956 complete(waiting);
960 * ata_exec_internal - execute libata internal command
961 * @ap: Port to which the command is sent
962 * @dev: Device to which the command is sent
963 * @tf: Taskfile registers for the command and the result
964 * @dma_dir: Data tranfer direction of the command
965 * @buf: Data buffer of the command
966 * @buflen: Length of data buffer
968 * Executes libata internal command with timeout. @tf contains
969 * command on entry and result on return. Timeout and error
970 * conditions are reported via return value. No recovery action
971 * is taken after a command times out. It's caller's duty to
972 * clean up after timeout.
974 * LOCKING:
975 * None. Should be called with kernel context, might sleep.
978 static unsigned
979 ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
980 struct ata_taskfile *tf,
981 int dma_dir, void *buf, unsigned int buflen)
983 u8 command = tf->command;
984 struct ata_queued_cmd *qc;
985 DECLARE_COMPLETION(wait);
986 unsigned long flags;
987 unsigned int err_mask;
989 spin_lock_irqsave(&ap->host_set->lock, flags);
991 qc = ata_qc_new_init(ap, dev);
992 BUG_ON(qc == NULL);
994 qc->tf = *tf;
995 qc->dma_dir = dma_dir;
996 if (dma_dir != DMA_NONE) {
997 ata_sg_init_one(qc, buf, buflen);
998 qc->nsect = buflen / ATA_SECT_SIZE;
1001 qc->private_data = &wait;
1002 qc->complete_fn = ata_qc_complete_internal;
1004 ata_qc_issue(qc);
1006 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1008 if (!wait_for_completion_timeout(&wait, ATA_TMOUT_INTERNAL)) {
1009 ata_port_flush_task(ap);
1011 spin_lock_irqsave(&ap->host_set->lock, flags);
1013 /* We're racing with irq here. If we lose, the
1014 * following test prevents us from completing the qc
1015 * again. If completion irq occurs after here but
1016 * before the caller cleans up, it will result in a
1017 * spurious interrupt. We can live with that.
1019 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1020 qc->err_mask = AC_ERR_TIMEOUT;
1021 ata_qc_complete(qc);
1022 printk(KERN_WARNING "ata%u: qc timeout (cmd 0x%x)\n",
1023 ap->id, command);
1026 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1029 *tf = qc->tf;
1030 err_mask = qc->err_mask;
1032 ata_qc_free(qc);
1034 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1035 * Until those drivers are fixed, we detect the condition
1036 * here, fail the command with AC_ERR_SYSTEM and reenable the
1037 * port.
1039 * Note that this doesn't change any behavior as internal
1040 * command failure results in disabling the device in the
1041 * higher layer for LLDDs without new reset/EH callbacks.
1043 * Kill the following code as soon as those drivers are fixed.
1045 if (ap->flags & ATA_FLAG_PORT_DISABLED) {
1046 err_mask |= AC_ERR_SYSTEM;
1047 ata_port_probe(ap);
1050 return err_mask;
1054 * ata_pio_need_iordy - check if iordy needed
1055 * @adev: ATA device
1057 * Check if the current speed of the device requires IORDY. Used
1058 * by various controllers for chip configuration.
1061 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1063 int pio;
1064 int speed = adev->pio_mode - XFER_PIO_0;
1066 if (speed < 2)
1067 return 0;
1068 if (speed > 2)
1069 return 1;
1071 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1073 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1074 pio = adev->id[ATA_ID_EIDE_PIO];
1075 /* Is the speed faster than the drive allows non IORDY ? */
1076 if (pio) {
1077 /* This is cycle times not frequency - watch the logic! */
1078 if (pio > 240) /* PIO2 is 240nS per cycle */
1079 return 1;
1080 return 0;
1083 return 0;
1087 * ata_dev_read_id - Read ID data from the specified device
1088 * @ap: port on which target device resides
1089 * @dev: target device
1090 * @p_class: pointer to class of the target device (may be changed)
1091 * @post_reset: is this read ID post-reset?
1092 * @p_id: read IDENTIFY page (newly allocated)
1094 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1095 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1096 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1097 * for pre-ATA4 drives.
1099 * LOCKING:
1100 * Kernel thread context (may sleep)
1102 * RETURNS:
1103 * 0 on success, -errno otherwise.
1105 static int ata_dev_read_id(struct ata_port *ap, struct ata_device *dev,
1106 unsigned int *p_class, int post_reset, u16 **p_id)
1108 unsigned int class = *p_class;
1109 struct ata_taskfile tf;
1110 unsigned int err_mask = 0;
1111 u16 *id;
1112 const char *reason;
1113 int rc;
1115 DPRINTK("ENTER, host %u, dev %u\n", ap->id, dev->devno);
1117 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1119 id = kmalloc(sizeof(id[0]) * ATA_ID_WORDS, GFP_KERNEL);
1120 if (id == NULL) {
1121 rc = -ENOMEM;
1122 reason = "out of memory";
1123 goto err_out;
1126 retry:
1127 ata_tf_init(ap, &tf, dev->devno);
1129 switch (class) {
1130 case ATA_DEV_ATA:
1131 tf.command = ATA_CMD_ID_ATA;
1132 break;
1133 case ATA_DEV_ATAPI:
1134 tf.command = ATA_CMD_ID_ATAPI;
1135 break;
1136 default:
1137 rc = -ENODEV;
1138 reason = "unsupported class";
1139 goto err_out;
1142 tf.protocol = ATA_PROT_PIO;
1144 err_mask = ata_exec_internal(ap, dev, &tf, DMA_FROM_DEVICE,
1145 id, sizeof(id[0]) * ATA_ID_WORDS);
1146 if (err_mask) {
1147 rc = -EIO;
1148 reason = "I/O error";
1149 goto err_out;
1152 swap_buf_le16(id, ATA_ID_WORDS);
1154 /* sanity check */
1155 if ((class == ATA_DEV_ATA) != (ata_id_is_ata(id) | ata_id_is_cfa(id))) {
1156 rc = -EINVAL;
1157 reason = "device reports illegal type";
1158 goto err_out;
1161 if (post_reset && class == ATA_DEV_ATA) {
1163 * The exact sequence expected by certain pre-ATA4 drives is:
1164 * SRST RESET
1165 * IDENTIFY
1166 * INITIALIZE DEVICE PARAMETERS
1167 * anything else..
1168 * Some drives were very specific about that exact sequence.
1170 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
1171 err_mask = ata_dev_init_params(ap, dev, id[3], id[6]);
1172 if (err_mask) {
1173 rc = -EIO;
1174 reason = "INIT_DEV_PARAMS failed";
1175 goto err_out;
1178 /* current CHS translation info (id[53-58]) might be
1179 * changed. reread the identify device info.
1181 post_reset = 0;
1182 goto retry;
1186 *p_class = class;
1187 *p_id = id;
1188 return 0;
1190 err_out:
1191 printk(KERN_WARNING "ata%u: dev %u failed to IDENTIFY (%s)\n",
1192 ap->id, dev->devno, reason);
1193 kfree(id);
1194 return rc;
1197 static inline u8 ata_dev_knobble(const struct ata_port *ap,
1198 struct ata_device *dev)
1200 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
1204 * ata_dev_configure - Configure the specified ATA/ATAPI device
1205 * @ap: Port on which target device resides
1206 * @dev: Target device to configure
1207 * @print_info: Enable device info printout
1209 * Configure @dev according to @dev->id. Generic and low-level
1210 * driver specific fixups are also applied.
1212 * LOCKING:
1213 * Kernel thread context (may sleep)
1215 * RETURNS:
1216 * 0 on success, -errno otherwise
1218 static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev,
1219 int print_info)
1221 const u16 *id = dev->id;
1222 unsigned int xfer_mask;
1223 int i, rc;
1225 if (!ata_dev_enabled(dev)) {
1226 DPRINTK("ENTER/EXIT (host %u, dev %u) -- nodev\n",
1227 ap->id, dev->devno);
1228 return 0;
1231 DPRINTK("ENTER, host %u, dev %u\n", ap->id, dev->devno);
1233 /* print device capabilities */
1234 if (print_info)
1235 printk(KERN_DEBUG "ata%u: dev %u cfg 49:%04x 82:%04x 83:%04x "
1236 "84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n",
1237 ap->id, dev->devno, id[49], id[82], id[83],
1238 id[84], id[85], id[86], id[87], id[88]);
1240 /* initialize to-be-configured parameters */
1241 dev->flags = 0;
1242 dev->max_sectors = 0;
1243 dev->cdb_len = 0;
1244 dev->n_sectors = 0;
1245 dev->cylinders = 0;
1246 dev->heads = 0;
1247 dev->sectors = 0;
1250 * common ATA, ATAPI feature tests
1253 /* find max transfer mode; for printk only */
1254 xfer_mask = ata_id_xfermask(id);
1256 ata_dump_id(id);
1258 /* ATA-specific feature tests */
1259 if (dev->class == ATA_DEV_ATA) {
1260 dev->n_sectors = ata_id_n_sectors(id);
1262 if (ata_id_has_lba(id)) {
1263 const char *lba_desc;
1265 lba_desc = "LBA";
1266 dev->flags |= ATA_DFLAG_LBA;
1267 if (ata_id_has_lba48(id)) {
1268 dev->flags |= ATA_DFLAG_LBA48;
1269 lba_desc = "LBA48";
1272 /* print device info to dmesg */
1273 if (print_info)
1274 printk(KERN_INFO "ata%u: dev %u ATA-%d, "
1275 "max %s, %Lu sectors: %s\n",
1276 ap->id, dev->devno,
1277 ata_id_major_version(id),
1278 ata_mode_string(xfer_mask),
1279 (unsigned long long)dev->n_sectors,
1280 lba_desc);
1281 } else {
1282 /* CHS */
1284 /* Default translation */
1285 dev->cylinders = id[1];
1286 dev->heads = id[3];
1287 dev->sectors = id[6];
1289 if (ata_id_current_chs_valid(id)) {
1290 /* Current CHS translation is valid. */
1291 dev->cylinders = id[54];
1292 dev->heads = id[55];
1293 dev->sectors = id[56];
1296 /* print device info to dmesg */
1297 if (print_info)
1298 printk(KERN_INFO "ata%u: dev %u ATA-%d, "
1299 "max %s, %Lu sectors: CHS %u/%u/%u\n",
1300 ap->id, dev->devno,
1301 ata_id_major_version(id),
1302 ata_mode_string(xfer_mask),
1303 (unsigned long long)dev->n_sectors,
1304 dev->cylinders, dev->heads, dev->sectors);
1307 dev->cdb_len = 16;
1310 /* ATAPI-specific feature tests */
1311 else if (dev->class == ATA_DEV_ATAPI) {
1312 rc = atapi_cdb_len(id);
1313 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
1314 printk(KERN_WARNING "ata%u: unsupported CDB len\n", ap->id);
1315 rc = -EINVAL;
1316 goto err_out_nosup;
1318 dev->cdb_len = (unsigned int) rc;
1320 /* print device info to dmesg */
1321 if (print_info)
1322 printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n",
1323 ap->id, dev->devno, ata_mode_string(xfer_mask));
1326 ap->host->max_cmd_len = 0;
1327 for (i = 0; i < ATA_MAX_DEVICES; i++)
1328 ap->host->max_cmd_len = max_t(unsigned int,
1329 ap->host->max_cmd_len,
1330 ap->device[i].cdb_len);
1332 /* limit bridge transfers to udma5, 200 sectors */
1333 if (ata_dev_knobble(ap, dev)) {
1334 if (print_info)
1335 printk(KERN_INFO "ata%u(%u): applying bridge limits\n",
1336 ap->id, dev->devno);
1337 dev->udma_mask &= ATA_UDMA5;
1338 dev->max_sectors = ATA_MAX_SECTORS;
1341 if (ap->ops->dev_config)
1342 ap->ops->dev_config(ap, dev);
1344 DPRINTK("EXIT, drv_stat = 0x%x\n", ata_chk_status(ap));
1345 return 0;
1347 err_out_nosup:
1348 DPRINTK("EXIT, err\n");
1349 return rc;
1353 * ata_bus_probe - Reset and probe ATA bus
1354 * @ap: Bus to probe
1356 * Master ATA bus probing function. Initiates a hardware-dependent
1357 * bus reset, then attempts to identify any devices found on
1358 * the bus.
1360 * LOCKING:
1361 * PCI/etc. bus probe sem.
1363 * RETURNS:
1364 * Zero on success, negative errno otherwise.
1367 static int ata_bus_probe(struct ata_port *ap)
1369 unsigned int classes[ATA_MAX_DEVICES];
1370 int i, rc, found = 0;
1372 ata_port_probe(ap);
1374 /* reset and determine device classes */
1375 for (i = 0; i < ATA_MAX_DEVICES; i++)
1376 classes[i] = ATA_DEV_UNKNOWN;
1378 if (ap->ops->probe_reset) {
1379 rc = ap->ops->probe_reset(ap, classes);
1380 if (rc) {
1381 printk("ata%u: reset failed (errno=%d)\n", ap->id, rc);
1382 return rc;
1384 } else {
1385 ap->ops->phy_reset(ap);
1387 if (!(ap->flags & ATA_FLAG_PORT_DISABLED))
1388 for (i = 0; i < ATA_MAX_DEVICES; i++)
1389 classes[i] = ap->device[i].class;
1391 ata_port_probe(ap);
1394 for (i = 0; i < ATA_MAX_DEVICES; i++)
1395 if (classes[i] == ATA_DEV_UNKNOWN)
1396 classes[i] = ATA_DEV_NONE;
1398 /* read IDENTIFY page and configure devices */
1399 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1400 struct ata_device *dev = &ap->device[i];
1402 dev->class = classes[i];
1404 if (!ata_dev_enabled(dev))
1405 continue;
1407 WARN_ON(dev->id != NULL);
1408 if (ata_dev_read_id(ap, dev, &dev->class, 1, &dev->id)) {
1409 dev->class = ATA_DEV_NONE;
1410 continue;
1413 if (ata_dev_configure(ap, dev, 1)) {
1414 ata_dev_disable(ap, dev);
1415 continue;
1418 found = 1;
1421 if (!found)
1422 goto err_out_disable;
1424 if (ap->ops->set_mode)
1425 ap->ops->set_mode(ap);
1426 else
1427 ata_set_mode(ap);
1429 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1430 goto err_out_disable;
1432 return 0;
1434 err_out_disable:
1435 ap->ops->port_disable(ap);
1436 return -ENODEV;
1440 * ata_port_probe - Mark port as enabled
1441 * @ap: Port for which we indicate enablement
1443 * Modify @ap data structure such that the system
1444 * thinks that the entire port is enabled.
1446 * LOCKING: host_set lock, or some other form of
1447 * serialization.
1450 void ata_port_probe(struct ata_port *ap)
1452 ap->flags &= ~ATA_FLAG_PORT_DISABLED;
1456 * sata_print_link_status - Print SATA link status
1457 * @ap: SATA port to printk link status about
1459 * This function prints link speed and status of a SATA link.
1461 * LOCKING:
1462 * None.
1464 static void sata_print_link_status(struct ata_port *ap)
1466 u32 sstatus, tmp;
1468 if (!ap->ops->scr_read)
1469 return;
1471 sstatus = scr_read(ap, SCR_STATUS);
1473 if (sata_dev_present(ap)) {
1474 tmp = (sstatus >> 4) & 0xf;
1475 printk(KERN_INFO "ata%u: SATA link up %s (SStatus %X)\n",
1476 ap->id, sata_spd_string(tmp), sstatus);
1477 } else {
1478 printk(KERN_INFO "ata%u: SATA link down (SStatus %X)\n",
1479 ap->id, sstatus);
1484 * __sata_phy_reset - Wake/reset a low-level SATA PHY
1485 * @ap: SATA port associated with target SATA PHY.
1487 * This function issues commands to standard SATA Sxxx
1488 * PHY registers, to wake up the phy (and device), and
1489 * clear any reset condition.
1491 * LOCKING:
1492 * PCI/etc. bus probe sem.
1495 void __sata_phy_reset(struct ata_port *ap)
1497 u32 sstatus;
1498 unsigned long timeout = jiffies + (HZ * 5);
1500 if (ap->flags & ATA_FLAG_SATA_RESET) {
1501 /* issue phy wake/reset */
1502 scr_write_flush(ap, SCR_CONTROL, 0x301);
1503 /* Couldn't find anything in SATA I/II specs, but
1504 * AHCI-1.1 10.4.2 says at least 1 ms. */
1505 mdelay(1);
1507 scr_write_flush(ap, SCR_CONTROL, 0x300); /* phy wake/clear reset */
1509 /* wait for phy to become ready, if necessary */
1510 do {
1511 msleep(200);
1512 sstatus = scr_read(ap, SCR_STATUS);
1513 if ((sstatus & 0xf) != 1)
1514 break;
1515 } while (time_before(jiffies, timeout));
1517 /* print link status */
1518 sata_print_link_status(ap);
1520 /* TODO: phy layer with polling, timeouts, etc. */
1521 if (sata_dev_present(ap))
1522 ata_port_probe(ap);
1523 else
1524 ata_port_disable(ap);
1526 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1527 return;
1529 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
1530 ata_port_disable(ap);
1531 return;
1534 ap->cbl = ATA_CBL_SATA;
1538 * sata_phy_reset - Reset SATA bus.
1539 * @ap: SATA port associated with target SATA PHY.
1541 * This function resets the SATA bus, and then probes
1542 * the bus for devices.
1544 * LOCKING:
1545 * PCI/etc. bus probe sem.
1548 void sata_phy_reset(struct ata_port *ap)
1550 __sata_phy_reset(ap);
1551 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1552 return;
1553 ata_bus_reset(ap);
1557 * ata_dev_pair - return other device on cable
1558 * @ap: port
1559 * @adev: device
1561 * Obtain the other device on the same cable, or if none is
1562 * present NULL is returned
1565 struct ata_device *ata_dev_pair(struct ata_port *ap, struct ata_device *adev)
1567 struct ata_device *pair = &ap->device[1 - adev->devno];
1568 if (!ata_dev_enabled(pair))
1569 return NULL;
1570 return pair;
1574 * ata_port_disable - Disable port.
1575 * @ap: Port to be disabled.
1577 * Modify @ap data structure such that the system
1578 * thinks that the entire port is disabled, and should
1579 * never attempt to probe or communicate with devices
1580 * on this port.
1582 * LOCKING: host_set lock, or some other form of
1583 * serialization.
1586 void ata_port_disable(struct ata_port *ap)
1588 ap->device[0].class = ATA_DEV_NONE;
1589 ap->device[1].class = ATA_DEV_NONE;
1590 ap->flags |= ATA_FLAG_PORT_DISABLED;
1594 * This mode timing computation functionality is ported over from
1595 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
1598 * PIO 0-5, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
1599 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
1600 * for PIO 5, which is a nonstandard extension and UDMA6, which
1601 * is currently supported only by Maxtor drives.
1604 static const struct ata_timing ata_timing[] = {
1606 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
1607 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
1608 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
1609 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
1611 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
1612 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
1613 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
1615 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
1617 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
1618 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
1619 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
1621 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
1622 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
1623 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
1625 /* { XFER_PIO_5, 20, 50, 30, 100, 50, 30, 100, 0 }, */
1626 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
1627 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
1629 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
1630 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
1631 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
1633 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
1635 { 0xFF }
1638 #define ENOUGH(v,unit) (((v)-1)/(unit)+1)
1639 #define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
1641 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
1643 q->setup = EZ(t->setup * 1000, T);
1644 q->act8b = EZ(t->act8b * 1000, T);
1645 q->rec8b = EZ(t->rec8b * 1000, T);
1646 q->cyc8b = EZ(t->cyc8b * 1000, T);
1647 q->active = EZ(t->active * 1000, T);
1648 q->recover = EZ(t->recover * 1000, T);
1649 q->cycle = EZ(t->cycle * 1000, T);
1650 q->udma = EZ(t->udma * 1000, UT);
1653 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
1654 struct ata_timing *m, unsigned int what)
1656 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
1657 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
1658 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
1659 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
1660 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
1661 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
1662 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
1663 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
1666 static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
1668 const struct ata_timing *t;
1670 for (t = ata_timing; t->mode != speed; t++)
1671 if (t->mode == 0xFF)
1672 return NULL;
1673 return t;
1676 int ata_timing_compute(struct ata_device *adev, unsigned short speed,
1677 struct ata_timing *t, int T, int UT)
1679 const struct ata_timing *s;
1680 struct ata_timing p;
1683 * Find the mode.
1686 if (!(s = ata_timing_find_mode(speed)))
1687 return -EINVAL;
1689 memcpy(t, s, sizeof(*s));
1692 * If the drive is an EIDE drive, it can tell us it needs extended
1693 * PIO/MW_DMA cycle timing.
1696 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
1697 memset(&p, 0, sizeof(p));
1698 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
1699 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
1700 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
1701 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
1702 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
1704 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
1708 * Convert the timing to bus clock counts.
1711 ata_timing_quantize(t, t, T, UT);
1714 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
1715 * S.M.A.R.T * and some other commands. We have to ensure that the
1716 * DMA cycle timing is slower/equal than the fastest PIO timing.
1719 if (speed > XFER_PIO_4) {
1720 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
1721 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
1725 * Lengthen active & recovery time so that cycle time is correct.
1728 if (t->act8b + t->rec8b < t->cyc8b) {
1729 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
1730 t->rec8b = t->cyc8b - t->act8b;
1733 if (t->active + t->recover < t->cycle) {
1734 t->active += (t->cycle - (t->active + t->recover)) / 2;
1735 t->recover = t->cycle - t->active;
1738 return 0;
1741 static int ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev)
1743 unsigned int err_mask;
1744 int rc;
1746 if (dev->xfer_shift == ATA_SHIFT_PIO)
1747 dev->flags |= ATA_DFLAG_PIO;
1749 err_mask = ata_dev_set_xfermode(ap, dev);
1750 if (err_mask) {
1751 printk(KERN_ERR
1752 "ata%u: failed to set xfermode (err_mask=0x%x)\n",
1753 ap->id, err_mask);
1754 return -EIO;
1757 rc = ata_dev_revalidate(ap, dev, 0);
1758 if (rc) {
1759 printk(KERN_ERR
1760 "ata%u: failed to revalidate after set xfermode\n",
1761 ap->id);
1762 return rc;
1765 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
1766 dev->xfer_shift, (int)dev->xfer_mode);
1768 printk(KERN_INFO "ata%u: dev %u configured for %s\n",
1769 ap->id, dev->devno,
1770 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
1771 return 0;
1775 * ata_set_mode - Program timings and issue SET FEATURES - XFER
1776 * @ap: port on which timings will be programmed
1778 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.).
1780 * LOCKING:
1781 * PCI/etc. bus probe sem.
1783 static void ata_set_mode(struct ata_port *ap)
1785 struct ata_device *dev;
1786 int i, rc, used_dma = 0, found = 0;
1788 /* step 1: calculate xfer_mask */
1789 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1790 unsigned int pio_mask, dma_mask;
1792 dev = &ap->device[i];
1794 if (!ata_dev_enabled(dev))
1795 continue;
1797 ata_dev_xfermask(ap, dev);
1799 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
1800 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
1801 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
1802 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
1804 found = 1;
1805 if (dev->dma_mode)
1806 used_dma = 1;
1808 if (!found)
1809 return;
1811 /* step 2: always set host PIO timings */
1812 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1813 dev = &ap->device[i];
1814 if (!ata_dev_enabled(dev))
1815 continue;
1817 if (!dev->pio_mode) {
1818 printk(KERN_WARNING "ata%u: dev %u no PIO support\n",
1819 ap->id, dev->devno);
1820 rc = -EINVAL;
1821 goto err_out;
1824 dev->xfer_mode = dev->pio_mode;
1825 dev->xfer_shift = ATA_SHIFT_PIO;
1826 if (ap->ops->set_piomode)
1827 ap->ops->set_piomode(ap, dev);
1830 /* step 3: set host DMA timings */
1831 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1832 dev = &ap->device[i];
1834 if (!ata_dev_enabled(dev) || !dev->dma_mode)
1835 continue;
1837 dev->xfer_mode = dev->dma_mode;
1838 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
1839 if (ap->ops->set_dmamode)
1840 ap->ops->set_dmamode(ap, dev);
1843 /* step 4: update devices' xfer mode */
1844 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1845 dev = &ap->device[i];
1847 if (!ata_dev_enabled(dev))
1848 continue;
1850 rc = ata_dev_set_mode(ap, dev);
1851 if (rc)
1852 goto err_out;
1855 /* Record simplex status. If we selected DMA then the other
1856 * host channels are not permitted to do so.
1858 if (used_dma && (ap->host_set->flags & ATA_HOST_SIMPLEX))
1859 ap->host_set->simplex_claimed = 1;
1861 /* step5: chip specific finalisation */
1862 if (ap->ops->post_set_mode)
1863 ap->ops->post_set_mode(ap);
1865 return;
1867 err_out:
1868 ata_port_disable(ap);
1872 * ata_tf_to_host - issue ATA taskfile to host controller
1873 * @ap: port to which command is being issued
1874 * @tf: ATA taskfile register set
1876 * Issues ATA taskfile register set to ATA host controller,
1877 * with proper synchronization with interrupt handler and
1878 * other threads.
1880 * LOCKING:
1881 * spin_lock_irqsave(host_set lock)
1884 static inline void ata_tf_to_host(struct ata_port *ap,
1885 const struct ata_taskfile *tf)
1887 ap->ops->tf_load(ap, tf);
1888 ap->ops->exec_command(ap, tf);
1892 * ata_busy_sleep - sleep until BSY clears, or timeout
1893 * @ap: port containing status register to be polled
1894 * @tmout_pat: impatience timeout
1895 * @tmout: overall timeout
1897 * Sleep until ATA Status register bit BSY clears,
1898 * or a timeout occurs.
1900 * LOCKING: None.
1903 unsigned int ata_busy_sleep (struct ata_port *ap,
1904 unsigned long tmout_pat, unsigned long tmout)
1906 unsigned long timer_start, timeout;
1907 u8 status;
1909 status = ata_busy_wait(ap, ATA_BUSY, 300);
1910 timer_start = jiffies;
1911 timeout = timer_start + tmout_pat;
1912 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
1913 msleep(50);
1914 status = ata_busy_wait(ap, ATA_BUSY, 3);
1917 if (status & ATA_BUSY)
1918 printk(KERN_WARNING "ata%u is slow to respond, "
1919 "please be patient\n", ap->id);
1921 timeout = timer_start + tmout;
1922 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
1923 msleep(50);
1924 status = ata_chk_status(ap);
1927 if (status & ATA_BUSY) {
1928 printk(KERN_ERR "ata%u failed to respond (%lu secs)\n",
1929 ap->id, tmout / HZ);
1930 return 1;
1933 return 0;
1936 static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
1938 struct ata_ioports *ioaddr = &ap->ioaddr;
1939 unsigned int dev0 = devmask & (1 << 0);
1940 unsigned int dev1 = devmask & (1 << 1);
1941 unsigned long timeout;
1943 /* if device 0 was found in ata_devchk, wait for its
1944 * BSY bit to clear
1946 if (dev0)
1947 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1949 /* if device 1 was found in ata_devchk, wait for
1950 * register access, then wait for BSY to clear
1952 timeout = jiffies + ATA_TMOUT_BOOT;
1953 while (dev1) {
1954 u8 nsect, lbal;
1956 ap->ops->dev_select(ap, 1);
1957 if (ap->flags & ATA_FLAG_MMIO) {
1958 nsect = readb((void __iomem *) ioaddr->nsect_addr);
1959 lbal = readb((void __iomem *) ioaddr->lbal_addr);
1960 } else {
1961 nsect = inb(ioaddr->nsect_addr);
1962 lbal = inb(ioaddr->lbal_addr);
1964 if ((nsect == 1) && (lbal == 1))
1965 break;
1966 if (time_after(jiffies, timeout)) {
1967 dev1 = 0;
1968 break;
1970 msleep(50); /* give drive a breather */
1972 if (dev1)
1973 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1975 /* is all this really necessary? */
1976 ap->ops->dev_select(ap, 0);
1977 if (dev1)
1978 ap->ops->dev_select(ap, 1);
1979 if (dev0)
1980 ap->ops->dev_select(ap, 0);
1983 static unsigned int ata_bus_softreset(struct ata_port *ap,
1984 unsigned int devmask)
1986 struct ata_ioports *ioaddr = &ap->ioaddr;
1988 DPRINTK("ata%u: bus reset via SRST\n", ap->id);
1990 /* software reset. causes dev0 to be selected */
1991 if (ap->flags & ATA_FLAG_MMIO) {
1992 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
1993 udelay(20); /* FIXME: flush */
1994 writeb(ap->ctl | ATA_SRST, (void __iomem *) ioaddr->ctl_addr);
1995 udelay(20); /* FIXME: flush */
1996 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
1997 } else {
1998 outb(ap->ctl, ioaddr->ctl_addr);
1999 udelay(10);
2000 outb(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
2001 udelay(10);
2002 outb(ap->ctl, ioaddr->ctl_addr);
2005 /* spec mandates ">= 2ms" before checking status.
2006 * We wait 150ms, because that was the magic delay used for
2007 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
2008 * between when the ATA command register is written, and then
2009 * status is checked. Because waiting for "a while" before
2010 * checking status is fine, post SRST, we perform this magic
2011 * delay here as well.
2013 * Old drivers/ide uses the 2mS rule and then waits for ready
2015 msleep(150);
2017 /* Before we perform post reset processing we want to see if
2018 * the bus shows 0xFF because the odd clown forgets the D7
2019 * pulldown resistor.
2021 if (ata_check_status(ap) == 0xFF)
2022 return AC_ERR_OTHER;
2024 ata_bus_post_reset(ap, devmask);
2026 return 0;
2030 * ata_bus_reset - reset host port and associated ATA channel
2031 * @ap: port to reset
2033 * This is typically the first time we actually start issuing
2034 * commands to the ATA channel. We wait for BSY to clear, then
2035 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
2036 * result. Determine what devices, if any, are on the channel
2037 * by looking at the device 0/1 error register. Look at the signature
2038 * stored in each device's taskfile registers, to determine if
2039 * the device is ATA or ATAPI.
2041 * LOCKING:
2042 * PCI/etc. bus probe sem.
2043 * Obtains host_set lock.
2045 * SIDE EFFECTS:
2046 * Sets ATA_FLAG_PORT_DISABLED if bus reset fails.
2049 void ata_bus_reset(struct ata_port *ap)
2051 struct ata_ioports *ioaddr = &ap->ioaddr;
2052 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2053 u8 err;
2054 unsigned int dev0, dev1 = 0, devmask = 0;
2056 DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no);
2058 /* determine if device 0/1 are present */
2059 if (ap->flags & ATA_FLAG_SATA_RESET)
2060 dev0 = 1;
2061 else {
2062 dev0 = ata_devchk(ap, 0);
2063 if (slave_possible)
2064 dev1 = ata_devchk(ap, 1);
2067 if (dev0)
2068 devmask |= (1 << 0);
2069 if (dev1)
2070 devmask |= (1 << 1);
2072 /* select device 0 again */
2073 ap->ops->dev_select(ap, 0);
2075 /* issue bus reset */
2076 if (ap->flags & ATA_FLAG_SRST)
2077 if (ata_bus_softreset(ap, devmask))
2078 goto err_out;
2081 * determine by signature whether we have ATA or ATAPI devices
2083 ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
2084 if ((slave_possible) && (err != 0x81))
2085 ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
2087 /* re-enable interrupts */
2088 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
2089 ata_irq_on(ap);
2091 /* is double-select really necessary? */
2092 if (ap->device[1].class != ATA_DEV_NONE)
2093 ap->ops->dev_select(ap, 1);
2094 if (ap->device[0].class != ATA_DEV_NONE)
2095 ap->ops->dev_select(ap, 0);
2097 /* if no devices were detected, disable this port */
2098 if ((ap->device[0].class == ATA_DEV_NONE) &&
2099 (ap->device[1].class == ATA_DEV_NONE))
2100 goto err_out;
2102 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
2103 /* set up device control for ATA_FLAG_SATA_RESET */
2104 if (ap->flags & ATA_FLAG_MMIO)
2105 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2106 else
2107 outb(ap->ctl, ioaddr->ctl_addr);
2110 DPRINTK("EXIT\n");
2111 return;
2113 err_out:
2114 printk(KERN_ERR "ata%u: disabling port\n", ap->id);
2115 ap->ops->port_disable(ap);
2117 DPRINTK("EXIT\n");
2120 static int sata_phy_resume(struct ata_port *ap)
2122 unsigned long timeout = jiffies + (HZ * 5);
2123 u32 sstatus;
2125 scr_write_flush(ap, SCR_CONTROL, 0x300);
2127 /* Wait for phy to become ready, if necessary. */
2128 do {
2129 msleep(200);
2130 sstatus = scr_read(ap, SCR_STATUS);
2131 if ((sstatus & 0xf) != 1)
2132 return 0;
2133 } while (time_before(jiffies, timeout));
2135 return -1;
2139 * ata_std_probeinit - initialize probing
2140 * @ap: port to be probed
2142 * @ap is about to be probed. Initialize it. This function is
2143 * to be used as standard callback for ata_drive_probe_reset().
2145 * NOTE!!! Do not use this function as probeinit if a low level
2146 * driver implements only hardreset. Just pass NULL as probeinit
2147 * in that case. Using this function is probably okay but doing
2148 * so makes reset sequence different from the original
2149 * ->phy_reset implementation and Jeff nervous. :-P
2151 void ata_std_probeinit(struct ata_port *ap)
2153 if ((ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read) {
2154 sata_phy_resume(ap);
2155 if (sata_dev_present(ap))
2156 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2161 * ata_std_softreset - reset host port via ATA SRST
2162 * @ap: port to reset
2163 * @verbose: fail verbosely
2164 * @classes: resulting classes of attached devices
2166 * Reset host port using ATA SRST. This function is to be used
2167 * as standard callback for ata_drive_*_reset() functions.
2169 * LOCKING:
2170 * Kernel thread context (may sleep)
2172 * RETURNS:
2173 * 0 on success, -errno otherwise.
2175 int ata_std_softreset(struct ata_port *ap, int verbose, unsigned int *classes)
2177 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2178 unsigned int devmask = 0, err_mask;
2179 u8 err;
2181 DPRINTK("ENTER\n");
2183 if (ap->ops->scr_read && !sata_dev_present(ap)) {
2184 classes[0] = ATA_DEV_NONE;
2185 goto out;
2188 /* determine if device 0/1 are present */
2189 if (ata_devchk(ap, 0))
2190 devmask |= (1 << 0);
2191 if (slave_possible && ata_devchk(ap, 1))
2192 devmask |= (1 << 1);
2194 /* select device 0 again */
2195 ap->ops->dev_select(ap, 0);
2197 /* issue bus reset */
2198 DPRINTK("about to softreset, devmask=%x\n", devmask);
2199 err_mask = ata_bus_softreset(ap, devmask);
2200 if (err_mask) {
2201 if (verbose)
2202 printk(KERN_ERR "ata%u: SRST failed (err_mask=0x%x)\n",
2203 ap->id, err_mask);
2204 else
2205 DPRINTK("EXIT, softreset failed (err_mask=0x%x)\n",
2206 err_mask);
2207 return -EIO;
2210 /* determine by signature whether we have ATA or ATAPI devices */
2211 classes[0] = ata_dev_try_classify(ap, 0, &err);
2212 if (slave_possible && err != 0x81)
2213 classes[1] = ata_dev_try_classify(ap, 1, &err);
2215 out:
2216 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
2217 return 0;
2221 * sata_std_hardreset - reset host port via SATA phy reset
2222 * @ap: port to reset
2223 * @verbose: fail verbosely
2224 * @class: resulting class of attached device
2226 * SATA phy-reset host port using DET bits of SControl register.
2227 * This function is to be used as standard callback for
2228 * ata_drive_*_reset().
2230 * LOCKING:
2231 * Kernel thread context (may sleep)
2233 * RETURNS:
2234 * 0 on success, -errno otherwise.
2236 int sata_std_hardreset(struct ata_port *ap, int verbose, unsigned int *class)
2238 DPRINTK("ENTER\n");
2240 /* Issue phy wake/reset */
2241 scr_write_flush(ap, SCR_CONTROL, 0x301);
2244 * Couldn't find anything in SATA I/II specs, but AHCI-1.1
2245 * 10.4.2 says at least 1 ms.
2247 msleep(1);
2249 /* Bring phy back */
2250 sata_phy_resume(ap);
2252 /* TODO: phy layer with polling, timeouts, etc. */
2253 if (!sata_dev_present(ap)) {
2254 *class = ATA_DEV_NONE;
2255 DPRINTK("EXIT, link offline\n");
2256 return 0;
2259 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2260 if (verbose)
2261 printk(KERN_ERR "ata%u: COMRESET failed "
2262 "(device not ready)\n", ap->id);
2263 else
2264 DPRINTK("EXIT, device not ready\n");
2265 return -EIO;
2268 ap->ops->dev_select(ap, 0); /* probably unnecessary */
2270 *class = ata_dev_try_classify(ap, 0, NULL);
2272 DPRINTK("EXIT, class=%u\n", *class);
2273 return 0;
2277 * ata_std_postreset - standard postreset callback
2278 * @ap: the target ata_port
2279 * @classes: classes of attached devices
2281 * This function is invoked after a successful reset. Note that
2282 * the device might have been reset more than once using
2283 * different reset methods before postreset is invoked.
2285 * This function is to be used as standard callback for
2286 * ata_drive_*_reset().
2288 * LOCKING:
2289 * Kernel thread context (may sleep)
2291 void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
2293 DPRINTK("ENTER\n");
2295 /* set cable type if it isn't already set */
2296 if (ap->cbl == ATA_CBL_NONE && ap->flags & ATA_FLAG_SATA)
2297 ap->cbl = ATA_CBL_SATA;
2299 /* print link status */
2300 if (ap->cbl == ATA_CBL_SATA)
2301 sata_print_link_status(ap);
2303 /* re-enable interrupts */
2304 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
2305 ata_irq_on(ap);
2307 /* is double-select really necessary? */
2308 if (classes[0] != ATA_DEV_NONE)
2309 ap->ops->dev_select(ap, 1);
2310 if (classes[1] != ATA_DEV_NONE)
2311 ap->ops->dev_select(ap, 0);
2313 /* bail out if no device is present */
2314 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2315 DPRINTK("EXIT, no device\n");
2316 return;
2319 /* set up device control */
2320 if (ap->ioaddr.ctl_addr) {
2321 if (ap->flags & ATA_FLAG_MMIO)
2322 writeb(ap->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
2323 else
2324 outb(ap->ctl, ap->ioaddr.ctl_addr);
2327 DPRINTK("EXIT\n");
2331 * ata_std_probe_reset - standard probe reset method
2332 * @ap: prot to perform probe-reset
2333 * @classes: resulting classes of attached devices
2335 * The stock off-the-shelf ->probe_reset method.
2337 * LOCKING:
2338 * Kernel thread context (may sleep)
2340 * RETURNS:
2341 * 0 on success, -errno otherwise.
2343 int ata_std_probe_reset(struct ata_port *ap, unsigned int *classes)
2345 ata_reset_fn_t hardreset;
2347 hardreset = NULL;
2348 if (ap->flags & ATA_FLAG_SATA && ap->ops->scr_read)
2349 hardreset = sata_std_hardreset;
2351 return ata_drive_probe_reset(ap, ata_std_probeinit,
2352 ata_std_softreset, hardreset,
2353 ata_std_postreset, classes);
2356 static int ata_do_reset(struct ata_port *ap,
2357 ata_reset_fn_t reset, ata_postreset_fn_t postreset,
2358 int verbose, unsigned int *classes)
2360 int i, rc;
2362 for (i = 0; i < ATA_MAX_DEVICES; i++)
2363 classes[i] = ATA_DEV_UNKNOWN;
2365 rc = reset(ap, verbose, classes);
2366 if (rc)
2367 return rc;
2369 /* If any class isn't ATA_DEV_UNKNOWN, consider classification
2370 * is complete and convert all ATA_DEV_UNKNOWN to
2371 * ATA_DEV_NONE.
2373 for (i = 0; i < ATA_MAX_DEVICES; i++)
2374 if (classes[i] != ATA_DEV_UNKNOWN)
2375 break;
2377 if (i < ATA_MAX_DEVICES)
2378 for (i = 0; i < ATA_MAX_DEVICES; i++)
2379 if (classes[i] == ATA_DEV_UNKNOWN)
2380 classes[i] = ATA_DEV_NONE;
2382 if (postreset)
2383 postreset(ap, classes);
2385 return 0;
2389 * ata_drive_probe_reset - Perform probe reset with given methods
2390 * @ap: port to reset
2391 * @probeinit: probeinit method (can be NULL)
2392 * @softreset: softreset method (can be NULL)
2393 * @hardreset: hardreset method (can be NULL)
2394 * @postreset: postreset method (can be NULL)
2395 * @classes: resulting classes of attached devices
2397 * Reset the specified port and classify attached devices using
2398 * given methods. This function prefers softreset but tries all
2399 * possible reset sequences to reset and classify devices. This
2400 * function is intended to be used for constructing ->probe_reset
2401 * callback by low level drivers.
2403 * Reset methods should follow the following rules.
2405 * - Return 0 on sucess, -errno on failure.
2406 * - If classification is supported, fill classes[] with
2407 * recognized class codes.
2408 * - If classification is not supported, leave classes[] alone.
2409 * - If verbose is non-zero, print error message on failure;
2410 * otherwise, shut up.
2412 * LOCKING:
2413 * Kernel thread context (may sleep)
2415 * RETURNS:
2416 * 0 on success, -EINVAL if no reset method is avaliable, -ENODEV
2417 * if classification fails, and any error code from reset
2418 * methods.
2420 int ata_drive_probe_reset(struct ata_port *ap, ata_probeinit_fn_t probeinit,
2421 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
2422 ata_postreset_fn_t postreset, unsigned int *classes)
2424 int rc = -EINVAL;
2426 if (probeinit)
2427 probeinit(ap);
2429 if (softreset) {
2430 rc = ata_do_reset(ap, softreset, postreset, 0, classes);
2431 if (rc == 0 && classes[0] != ATA_DEV_UNKNOWN)
2432 goto done;
2435 if (!hardreset)
2436 goto done;
2438 rc = ata_do_reset(ap, hardreset, postreset, 0, classes);
2439 if (rc || classes[0] != ATA_DEV_UNKNOWN)
2440 goto done;
2442 if (softreset)
2443 rc = ata_do_reset(ap, softreset, postreset, 0, classes);
2445 done:
2446 if (rc == 0 && classes[0] == ATA_DEV_UNKNOWN)
2447 rc = -ENODEV;
2448 return rc;
2452 * ata_dev_same_device - Determine whether new ID matches configured device
2453 * @ap: port on which the device to compare against resides
2454 * @dev: device to compare against
2455 * @new_class: class of the new device
2456 * @new_id: IDENTIFY page of the new device
2458 * Compare @new_class and @new_id against @dev and determine
2459 * whether @dev is the device indicated by @new_class and
2460 * @new_id.
2462 * LOCKING:
2463 * None.
2465 * RETURNS:
2466 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
2468 static int ata_dev_same_device(struct ata_port *ap, struct ata_device *dev,
2469 unsigned int new_class, const u16 *new_id)
2471 const u16 *old_id = dev->id;
2472 unsigned char model[2][41], serial[2][21];
2473 u64 new_n_sectors;
2475 if (dev->class != new_class) {
2476 printk(KERN_INFO
2477 "ata%u: dev %u class mismatch %d != %d\n",
2478 ap->id, dev->devno, dev->class, new_class);
2479 return 0;
2482 ata_id_c_string(old_id, model[0], ATA_ID_PROD_OFS, sizeof(model[0]));
2483 ata_id_c_string(new_id, model[1], ATA_ID_PROD_OFS, sizeof(model[1]));
2484 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO_OFS, sizeof(serial[0]));
2485 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO_OFS, sizeof(serial[1]));
2486 new_n_sectors = ata_id_n_sectors(new_id);
2488 if (strcmp(model[0], model[1])) {
2489 printk(KERN_INFO
2490 "ata%u: dev %u model number mismatch '%s' != '%s'\n",
2491 ap->id, dev->devno, model[0], model[1]);
2492 return 0;
2495 if (strcmp(serial[0], serial[1])) {
2496 printk(KERN_INFO
2497 "ata%u: dev %u serial number mismatch '%s' != '%s'\n",
2498 ap->id, dev->devno, serial[0], serial[1]);
2499 return 0;
2502 if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) {
2503 printk(KERN_INFO
2504 "ata%u: dev %u n_sectors mismatch %llu != %llu\n",
2505 ap->id, dev->devno, (unsigned long long)dev->n_sectors,
2506 (unsigned long long)new_n_sectors);
2507 return 0;
2510 return 1;
2514 * ata_dev_revalidate - Revalidate ATA device
2515 * @ap: port on which the device to revalidate resides
2516 * @dev: device to revalidate
2517 * @post_reset: is this revalidation after reset?
2519 * Re-read IDENTIFY page and make sure @dev is still attached to
2520 * the port.
2522 * LOCKING:
2523 * Kernel thread context (may sleep)
2525 * RETURNS:
2526 * 0 on success, negative errno otherwise
2528 int ata_dev_revalidate(struct ata_port *ap, struct ata_device *dev,
2529 int post_reset)
2531 unsigned int class;
2532 u16 *id;
2533 int rc;
2535 if (!ata_dev_enabled(dev))
2536 return -ENODEV;
2538 class = dev->class;
2539 id = NULL;
2541 /* allocate & read ID data */
2542 rc = ata_dev_read_id(ap, dev, &class, post_reset, &id);
2543 if (rc)
2544 goto fail;
2546 /* is the device still there? */
2547 if (!ata_dev_same_device(ap, dev, class, id)) {
2548 rc = -ENODEV;
2549 goto fail;
2552 kfree(dev->id);
2553 dev->id = id;
2555 /* configure device according to the new ID */
2556 return ata_dev_configure(ap, dev, 0);
2558 fail:
2559 printk(KERN_ERR "ata%u: dev %u revalidation failed (errno=%d)\n",
2560 ap->id, dev->devno, rc);
2561 kfree(id);
2562 return rc;
2565 static const char * const ata_dma_blacklist [] = {
2566 "WDC AC11000H", NULL,
2567 "WDC AC22100H", NULL,
2568 "WDC AC32500H", NULL,
2569 "WDC AC33100H", NULL,
2570 "WDC AC31600H", NULL,
2571 "WDC AC32100H", "24.09P07",
2572 "WDC AC23200L", "21.10N21",
2573 "Compaq CRD-8241B", NULL,
2574 "CRD-8400B", NULL,
2575 "CRD-8480B", NULL,
2576 "CRD-8482B", NULL,
2577 "CRD-84", NULL,
2578 "SanDisk SDP3B", NULL,
2579 "SanDisk SDP3B-64", NULL,
2580 "SANYO CD-ROM CRD", NULL,
2581 "HITACHI CDR-8", NULL,
2582 "HITACHI CDR-8335", NULL,
2583 "HITACHI CDR-8435", NULL,
2584 "Toshiba CD-ROM XM-6202B", NULL,
2585 "TOSHIBA CD-ROM XM-1702BC", NULL,
2586 "CD-532E-A", NULL,
2587 "E-IDE CD-ROM CR-840", NULL,
2588 "CD-ROM Drive/F5A", NULL,
2589 "WPI CDD-820", NULL,
2590 "SAMSUNG CD-ROM SC-148C", NULL,
2591 "SAMSUNG CD-ROM SC", NULL,
2592 "SanDisk SDP3B-64", NULL,
2593 "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,
2594 "_NEC DV5800A", NULL,
2595 "SAMSUNG CD-ROM SN-124", "N001"
2598 static int ata_strim(char *s, size_t len)
2600 len = strnlen(s, len);
2602 /* ATAPI specifies that empty space is blank-filled; remove blanks */
2603 while ((len > 0) && (s[len - 1] == ' ')) {
2604 len--;
2605 s[len] = 0;
2607 return len;
2610 static int ata_dma_blacklisted(const struct ata_device *dev)
2612 unsigned char model_num[40];
2613 unsigned char model_rev[16];
2614 unsigned int nlen, rlen;
2615 int i;
2617 ata_id_string(dev->id, model_num, ATA_ID_PROD_OFS,
2618 sizeof(model_num));
2619 ata_id_string(dev->id, model_rev, ATA_ID_FW_REV_OFS,
2620 sizeof(model_rev));
2621 nlen = ata_strim(model_num, sizeof(model_num));
2622 rlen = ata_strim(model_rev, sizeof(model_rev));
2624 for (i = 0; i < ARRAY_SIZE(ata_dma_blacklist); i += 2) {
2625 if (!strncmp(ata_dma_blacklist[i], model_num, nlen)) {
2626 if (ata_dma_blacklist[i+1] == NULL)
2627 return 1;
2628 if (!strncmp(ata_dma_blacklist[i], model_rev, rlen))
2629 return 1;
2632 return 0;
2636 * ata_dev_xfermask - Compute supported xfermask of the given device
2637 * @ap: Port on which the device to compute xfermask for resides
2638 * @dev: Device to compute xfermask for
2640 * Compute supported xfermask of @dev and store it in
2641 * dev->*_mask. This function is responsible for applying all
2642 * known limits including host controller limits, device
2643 * blacklist, etc...
2645 * FIXME: The current implementation limits all transfer modes to
2646 * the fastest of the lowested device on the port. This is not
2647 * required on most controllers.
2649 * LOCKING:
2650 * None.
2652 static void ata_dev_xfermask(struct ata_port *ap, struct ata_device *dev)
2654 struct ata_host_set *hs = ap->host_set;
2655 unsigned long xfer_mask;
2656 int i;
2658 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
2659 ap->udma_mask);
2661 /* FIXME: Use port-wide xfermask for now */
2662 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2663 struct ata_device *d = &ap->device[i];
2664 if (!ata_dev_enabled(d))
2665 continue;
2666 xfer_mask &= ata_pack_xfermask(d->pio_mask, d->mwdma_mask,
2667 d->udma_mask);
2668 xfer_mask &= ata_id_xfermask(d->id);
2669 if (ata_dma_blacklisted(d))
2670 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
2671 /* Apply cable rule here. Don't apply it early because when
2672 we handle hot plug the cable type can itself change */
2673 if (ap->cbl == ATA_CBL_PATA40)
2674 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
2677 if (ata_dma_blacklisted(dev))
2678 printk(KERN_WARNING "ata%u: dev %u is on DMA blacklist, "
2679 "disabling DMA\n", ap->id, dev->devno);
2681 if (hs->flags & ATA_HOST_SIMPLEX) {
2682 if (hs->simplex_claimed)
2683 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
2685 if (ap->ops->mode_filter)
2686 xfer_mask = ap->ops->mode_filter(ap, dev, xfer_mask);
2688 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2689 &dev->udma_mask);
2693 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
2694 * @ap: Port associated with device @dev
2695 * @dev: Device to which command will be sent
2697 * Issue SET FEATURES - XFER MODE command to device @dev
2698 * on port @ap.
2700 * LOCKING:
2701 * PCI/etc. bus probe sem.
2703 * RETURNS:
2704 * 0 on success, AC_ERR_* mask otherwise.
2707 static unsigned int ata_dev_set_xfermode(struct ata_port *ap,
2708 struct ata_device *dev)
2710 struct ata_taskfile tf;
2711 unsigned int err_mask;
2713 /* set up set-features taskfile */
2714 DPRINTK("set features - xfer mode\n");
2716 ata_tf_init(ap, &tf, dev->devno);
2717 tf.command = ATA_CMD_SET_FEATURES;
2718 tf.feature = SETFEATURES_XFER;
2719 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2720 tf.protocol = ATA_PROT_NODATA;
2721 tf.nsect = dev->xfer_mode;
2723 err_mask = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0);
2725 DPRINTK("EXIT, err_mask=%x\n", err_mask);
2726 return err_mask;
2730 * ata_dev_init_params - Issue INIT DEV PARAMS command
2731 * @ap: Port associated with device @dev
2732 * @dev: Device to which command will be sent
2734 * LOCKING:
2735 * Kernel thread context (may sleep)
2737 * RETURNS:
2738 * 0 on success, AC_ERR_* mask otherwise.
2741 static unsigned int ata_dev_init_params(struct ata_port *ap,
2742 struct ata_device *dev,
2743 u16 heads,
2744 u16 sectors)
2746 struct ata_taskfile tf;
2747 unsigned int err_mask;
2749 /* Number of sectors per track 1-255. Number of heads 1-16 */
2750 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
2751 return AC_ERR_INVALID;
2753 /* set up init dev params taskfile */
2754 DPRINTK("init dev params \n");
2756 ata_tf_init(ap, &tf, dev->devno);
2757 tf.command = ATA_CMD_INIT_DEV_PARAMS;
2758 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2759 tf.protocol = ATA_PROT_NODATA;
2760 tf.nsect = sectors;
2761 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
2763 err_mask = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0);
2765 DPRINTK("EXIT, err_mask=%x\n", err_mask);
2766 return err_mask;
2770 * ata_sg_clean - Unmap DMA memory associated with command
2771 * @qc: Command containing DMA memory to be released
2773 * Unmap all mapped DMA memory associated with this command.
2775 * LOCKING:
2776 * spin_lock_irqsave(host_set lock)
2779 static void ata_sg_clean(struct ata_queued_cmd *qc)
2781 struct ata_port *ap = qc->ap;
2782 struct scatterlist *sg = qc->__sg;
2783 int dir = qc->dma_dir;
2784 void *pad_buf = NULL;
2786 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
2787 WARN_ON(sg == NULL);
2789 if (qc->flags & ATA_QCFLAG_SINGLE)
2790 WARN_ON(qc->n_elem > 1);
2792 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
2794 /* if we padded the buffer out to 32-bit bound, and data
2795 * xfer direction is from-device, we must copy from the
2796 * pad buffer back into the supplied buffer
2798 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
2799 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
2801 if (qc->flags & ATA_QCFLAG_SG) {
2802 if (qc->n_elem)
2803 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
2804 /* restore last sg */
2805 sg[qc->orig_n_elem - 1].length += qc->pad_len;
2806 if (pad_buf) {
2807 struct scatterlist *psg = &qc->pad_sgent;
2808 void *addr = kmap_atomic(psg->page, KM_IRQ0);
2809 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
2810 kunmap_atomic(addr, KM_IRQ0);
2812 } else {
2813 if (qc->n_elem)
2814 dma_unmap_single(ap->dev,
2815 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
2816 dir);
2817 /* restore sg */
2818 sg->length += qc->pad_len;
2819 if (pad_buf)
2820 memcpy(qc->buf_virt + sg->length - qc->pad_len,
2821 pad_buf, qc->pad_len);
2824 qc->flags &= ~ATA_QCFLAG_DMAMAP;
2825 qc->__sg = NULL;
2829 * ata_fill_sg - Fill PCI IDE PRD table
2830 * @qc: Metadata associated with taskfile to be transferred
2832 * Fill PCI IDE PRD (scatter-gather) table with segments
2833 * associated with the current disk command.
2835 * LOCKING:
2836 * spin_lock_irqsave(host_set lock)
2839 static void ata_fill_sg(struct ata_queued_cmd *qc)
2841 struct ata_port *ap = qc->ap;
2842 struct scatterlist *sg;
2843 unsigned int idx;
2845 WARN_ON(qc->__sg == NULL);
2846 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
2848 idx = 0;
2849 ata_for_each_sg(sg, qc) {
2850 u32 addr, offset;
2851 u32 sg_len, len;
2853 /* determine if physical DMA addr spans 64K boundary.
2854 * Note h/w doesn't support 64-bit, so we unconditionally
2855 * truncate dma_addr_t to u32.
2857 addr = (u32) sg_dma_address(sg);
2858 sg_len = sg_dma_len(sg);
2860 while (sg_len) {
2861 offset = addr & 0xffff;
2862 len = sg_len;
2863 if ((offset + sg_len) > 0x10000)
2864 len = 0x10000 - offset;
2866 ap->prd[idx].addr = cpu_to_le32(addr);
2867 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
2868 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
2870 idx++;
2871 sg_len -= len;
2872 addr += len;
2876 if (idx)
2877 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2880 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
2881 * @qc: Metadata associated with taskfile to check
2883 * Allow low-level driver to filter ATA PACKET commands, returning
2884 * a status indicating whether or not it is OK to use DMA for the
2885 * supplied PACKET command.
2887 * LOCKING:
2888 * spin_lock_irqsave(host_set lock)
2890 * RETURNS: 0 when ATAPI DMA can be used
2891 * nonzero otherwise
2893 int ata_check_atapi_dma(struct ata_queued_cmd *qc)
2895 struct ata_port *ap = qc->ap;
2896 int rc = 0; /* Assume ATAPI DMA is OK by default */
2898 if (ap->ops->check_atapi_dma)
2899 rc = ap->ops->check_atapi_dma(qc);
2901 return rc;
2904 * ata_qc_prep - Prepare taskfile for submission
2905 * @qc: Metadata associated with taskfile to be prepared
2907 * Prepare ATA taskfile for submission.
2909 * LOCKING:
2910 * spin_lock_irqsave(host_set lock)
2912 void ata_qc_prep(struct ata_queued_cmd *qc)
2914 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2915 return;
2917 ata_fill_sg(qc);
2920 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
2923 * ata_sg_init_one - Associate command with memory buffer
2924 * @qc: Command to be associated
2925 * @buf: Memory buffer
2926 * @buflen: Length of memory buffer, in bytes.
2928 * Initialize the data-related elements of queued_cmd @qc
2929 * to point to a single memory buffer, @buf of byte length @buflen.
2931 * LOCKING:
2932 * spin_lock_irqsave(host_set lock)
2935 void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
2937 struct scatterlist *sg;
2939 qc->flags |= ATA_QCFLAG_SINGLE;
2941 memset(&qc->sgent, 0, sizeof(qc->sgent));
2942 qc->__sg = &qc->sgent;
2943 qc->n_elem = 1;
2944 qc->orig_n_elem = 1;
2945 qc->buf_virt = buf;
2947 sg = qc->__sg;
2948 sg_init_one(sg, buf, buflen);
2952 * ata_sg_init - Associate command with scatter-gather table.
2953 * @qc: Command to be associated
2954 * @sg: Scatter-gather table.
2955 * @n_elem: Number of elements in s/g table.
2957 * Initialize the data-related elements of queued_cmd @qc
2958 * to point to a scatter-gather table @sg, containing @n_elem
2959 * elements.
2961 * LOCKING:
2962 * spin_lock_irqsave(host_set lock)
2965 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
2966 unsigned int n_elem)
2968 qc->flags |= ATA_QCFLAG_SG;
2969 qc->__sg = sg;
2970 qc->n_elem = n_elem;
2971 qc->orig_n_elem = n_elem;
2975 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
2976 * @qc: Command with memory buffer to be mapped.
2978 * DMA-map the memory buffer associated with queued_cmd @qc.
2980 * LOCKING:
2981 * spin_lock_irqsave(host_set lock)
2983 * RETURNS:
2984 * Zero on success, negative on error.
2987 static int ata_sg_setup_one(struct ata_queued_cmd *qc)
2989 struct ata_port *ap = qc->ap;
2990 int dir = qc->dma_dir;
2991 struct scatterlist *sg = qc->__sg;
2992 dma_addr_t dma_address;
2993 int trim_sg = 0;
2995 /* we must lengthen transfers to end on a 32-bit boundary */
2996 qc->pad_len = sg->length & 3;
2997 if (qc->pad_len) {
2998 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
2999 struct scatterlist *psg = &qc->pad_sgent;
3001 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
3003 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3005 if (qc->tf.flags & ATA_TFLAG_WRITE)
3006 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
3007 qc->pad_len);
3009 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3010 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3011 /* trim sg */
3012 sg->length -= qc->pad_len;
3013 if (sg->length == 0)
3014 trim_sg = 1;
3016 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
3017 sg->length, qc->pad_len);
3020 if (trim_sg) {
3021 qc->n_elem--;
3022 goto skip_map;
3025 dma_address = dma_map_single(ap->dev, qc->buf_virt,
3026 sg->length, dir);
3027 if (dma_mapping_error(dma_address)) {
3028 /* restore sg */
3029 sg->length += qc->pad_len;
3030 return -1;
3033 sg_dma_address(sg) = dma_address;
3034 sg_dma_len(sg) = sg->length;
3036 skip_map:
3037 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
3038 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3040 return 0;
3044 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
3045 * @qc: Command with scatter-gather table to be mapped.
3047 * DMA-map the scatter-gather table associated with queued_cmd @qc.
3049 * LOCKING:
3050 * spin_lock_irqsave(host_set lock)
3052 * RETURNS:
3053 * Zero on success, negative on error.
3057 static int ata_sg_setup(struct ata_queued_cmd *qc)
3059 struct ata_port *ap = qc->ap;
3060 struct scatterlist *sg = qc->__sg;
3061 struct scatterlist *lsg = &sg[qc->n_elem - 1];
3062 int n_elem, pre_n_elem, dir, trim_sg = 0;
3064 VPRINTK("ENTER, ata%u\n", ap->id);
3065 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
3067 /* we must lengthen transfers to end on a 32-bit boundary */
3068 qc->pad_len = lsg->length & 3;
3069 if (qc->pad_len) {
3070 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3071 struct scatterlist *psg = &qc->pad_sgent;
3072 unsigned int offset;
3074 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
3076 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3079 * psg->page/offset are used to copy to-be-written
3080 * data in this function or read data in ata_sg_clean.
3082 offset = lsg->offset + lsg->length - qc->pad_len;
3083 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
3084 psg->offset = offset_in_page(offset);
3086 if (qc->tf.flags & ATA_TFLAG_WRITE) {
3087 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3088 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
3089 kunmap_atomic(addr, KM_IRQ0);
3092 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3093 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3094 /* trim last sg */
3095 lsg->length -= qc->pad_len;
3096 if (lsg->length == 0)
3097 trim_sg = 1;
3099 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
3100 qc->n_elem - 1, lsg->length, qc->pad_len);
3103 pre_n_elem = qc->n_elem;
3104 if (trim_sg && pre_n_elem)
3105 pre_n_elem--;
3107 if (!pre_n_elem) {
3108 n_elem = 0;
3109 goto skip_map;
3112 dir = qc->dma_dir;
3113 n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
3114 if (n_elem < 1) {
3115 /* restore last sg */
3116 lsg->length += qc->pad_len;
3117 return -1;
3120 DPRINTK("%d sg elements mapped\n", n_elem);
3122 skip_map:
3123 qc->n_elem = n_elem;
3125 return 0;
3129 * ata_poll_qc_complete - turn irq back on and finish qc
3130 * @qc: Command to complete
3131 * @err_mask: ATA status register content
3133 * LOCKING:
3134 * None. (grabs host lock)
3137 void ata_poll_qc_complete(struct ata_queued_cmd *qc)
3139 struct ata_port *ap = qc->ap;
3140 unsigned long flags;
3142 spin_lock_irqsave(&ap->host_set->lock, flags);
3143 ap->flags &= ~ATA_FLAG_NOINTR;
3144 ata_irq_on(ap);
3145 ata_qc_complete(qc);
3146 spin_unlock_irqrestore(&ap->host_set->lock, flags);
3150 * ata_pio_poll - poll using PIO, depending on current state
3151 * @ap: the target ata_port
3153 * LOCKING:
3154 * None. (executing in kernel thread context)
3156 * RETURNS:
3157 * timeout value to use
3160 static unsigned long ata_pio_poll(struct ata_port *ap)
3162 struct ata_queued_cmd *qc;
3163 u8 status;
3164 unsigned int poll_state = HSM_ST_UNKNOWN;
3165 unsigned int reg_state = HSM_ST_UNKNOWN;
3167 qc = ata_qc_from_tag(ap, ap->active_tag);
3168 WARN_ON(qc == NULL);
3170 switch (ap->hsm_task_state) {
3171 case HSM_ST:
3172 case HSM_ST_POLL:
3173 poll_state = HSM_ST_POLL;
3174 reg_state = HSM_ST;
3175 break;
3176 case HSM_ST_LAST:
3177 case HSM_ST_LAST_POLL:
3178 poll_state = HSM_ST_LAST_POLL;
3179 reg_state = HSM_ST_LAST;
3180 break;
3181 default:
3182 BUG();
3183 break;
3186 status = ata_chk_status(ap);
3187 if (status & ATA_BUSY) {
3188 if (time_after(jiffies, ap->pio_task_timeout)) {
3189 qc->err_mask |= AC_ERR_TIMEOUT;
3190 ap->hsm_task_state = HSM_ST_TMOUT;
3191 return 0;
3193 ap->hsm_task_state = poll_state;
3194 return ATA_SHORT_PAUSE;
3197 ap->hsm_task_state = reg_state;
3198 return 0;
3202 * ata_pio_complete - check if drive is busy or idle
3203 * @ap: the target ata_port
3205 * LOCKING:
3206 * None. (executing in kernel thread context)
3208 * RETURNS:
3209 * Non-zero if qc completed, zero otherwise.
3212 static int ata_pio_complete (struct ata_port *ap)
3214 struct ata_queued_cmd *qc;
3215 u8 drv_stat;
3218 * This is purely heuristic. This is a fast path. Sometimes when
3219 * we enter, BSY will be cleared in a chk-status or two. If not,
3220 * the drive is probably seeking or something. Snooze for a couple
3221 * msecs, then chk-status again. If still busy, fall back to
3222 * HSM_ST_POLL state.
3224 drv_stat = ata_busy_wait(ap, ATA_BUSY, 10);
3225 if (drv_stat & ATA_BUSY) {
3226 msleep(2);
3227 drv_stat = ata_busy_wait(ap, ATA_BUSY, 10);
3228 if (drv_stat & ATA_BUSY) {
3229 ap->hsm_task_state = HSM_ST_LAST_POLL;
3230 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
3231 return 0;
3235 qc = ata_qc_from_tag(ap, ap->active_tag);
3236 WARN_ON(qc == NULL);
3238 drv_stat = ata_wait_idle(ap);
3239 if (!ata_ok(drv_stat)) {
3240 qc->err_mask |= __ac_err_mask(drv_stat);
3241 ap->hsm_task_state = HSM_ST_ERR;
3242 return 0;
3245 ap->hsm_task_state = HSM_ST_IDLE;
3247 WARN_ON(qc->err_mask);
3248 ata_poll_qc_complete(qc);
3250 /* another command may start at this point */
3252 return 1;
3257 * swap_buf_le16 - swap halves of 16-bit words in place
3258 * @buf: Buffer to swap
3259 * @buf_words: Number of 16-bit words in buffer.
3261 * Swap halves of 16-bit words if needed to convert from
3262 * little-endian byte order to native cpu byte order, or
3263 * vice-versa.
3265 * LOCKING:
3266 * Inherited from caller.
3268 void swap_buf_le16(u16 *buf, unsigned int buf_words)
3270 #ifdef __BIG_ENDIAN
3271 unsigned int i;
3273 for (i = 0; i < buf_words; i++)
3274 buf[i] = le16_to_cpu(buf[i]);
3275 #endif /* __BIG_ENDIAN */
3279 * ata_mmio_data_xfer - Transfer data by MMIO
3280 * @ap: port to read/write
3281 * @buf: data buffer
3282 * @buflen: buffer length
3283 * @write_data: read/write
3285 * Transfer data from/to the device data register by MMIO.
3287 * LOCKING:
3288 * Inherited from caller.
3291 static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf,
3292 unsigned int buflen, int write_data)
3294 unsigned int i;
3295 unsigned int words = buflen >> 1;
3296 u16 *buf16 = (u16 *) buf;
3297 void __iomem *mmio = (void __iomem *)ap->ioaddr.data_addr;
3299 /* Transfer multiple of 2 bytes */
3300 if (write_data) {
3301 for (i = 0; i < words; i++)
3302 writew(le16_to_cpu(buf16[i]), mmio);
3303 } else {
3304 for (i = 0; i < words; i++)
3305 buf16[i] = cpu_to_le16(readw(mmio));
3308 /* Transfer trailing 1 byte, if any. */
3309 if (unlikely(buflen & 0x01)) {
3310 u16 align_buf[1] = { 0 };
3311 unsigned char *trailing_buf = buf + buflen - 1;
3313 if (write_data) {
3314 memcpy(align_buf, trailing_buf, 1);
3315 writew(le16_to_cpu(align_buf[0]), mmio);
3316 } else {
3317 align_buf[0] = cpu_to_le16(readw(mmio));
3318 memcpy(trailing_buf, align_buf, 1);
3324 * ata_pio_data_xfer - Transfer data by PIO
3325 * @ap: port to read/write
3326 * @buf: data buffer
3327 * @buflen: buffer length
3328 * @write_data: read/write
3330 * Transfer data from/to the device data register by PIO.
3332 * LOCKING:
3333 * Inherited from caller.
3336 static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf,
3337 unsigned int buflen, int write_data)
3339 unsigned int words = buflen >> 1;
3341 /* Transfer multiple of 2 bytes */
3342 if (write_data)
3343 outsw(ap->ioaddr.data_addr, buf, words);
3344 else
3345 insw(ap->ioaddr.data_addr, buf, words);
3347 /* Transfer trailing 1 byte, if any. */
3348 if (unlikely(buflen & 0x01)) {
3349 u16 align_buf[1] = { 0 };
3350 unsigned char *trailing_buf = buf + buflen - 1;
3352 if (write_data) {
3353 memcpy(align_buf, trailing_buf, 1);
3354 outw(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
3355 } else {
3356 align_buf[0] = cpu_to_le16(inw(ap->ioaddr.data_addr));
3357 memcpy(trailing_buf, align_buf, 1);
3363 * ata_data_xfer - Transfer data from/to the data register.
3364 * @ap: port to read/write
3365 * @buf: data buffer
3366 * @buflen: buffer length
3367 * @do_write: read/write
3369 * Transfer data from/to the device data register.
3371 * LOCKING:
3372 * Inherited from caller.
3375 static void ata_data_xfer(struct ata_port *ap, unsigned char *buf,
3376 unsigned int buflen, int do_write)
3378 /* Make the crap hardware pay the costs not the good stuff */
3379 if (unlikely(ap->flags & ATA_FLAG_IRQ_MASK)) {
3380 unsigned long flags;
3381 local_irq_save(flags);
3382 if (ap->flags & ATA_FLAG_MMIO)
3383 ata_mmio_data_xfer(ap, buf, buflen, do_write);
3384 else
3385 ata_pio_data_xfer(ap, buf, buflen, do_write);
3386 local_irq_restore(flags);
3387 } else {
3388 if (ap->flags & ATA_FLAG_MMIO)
3389 ata_mmio_data_xfer(ap, buf, buflen, do_write);
3390 else
3391 ata_pio_data_xfer(ap, buf, buflen, do_write);
3396 * ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data.
3397 * @qc: Command on going
3399 * Transfer ATA_SECT_SIZE of data from/to the ATA device.
3401 * LOCKING:
3402 * Inherited from caller.
3405 static void ata_pio_sector(struct ata_queued_cmd *qc)
3407 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3408 struct scatterlist *sg = qc->__sg;
3409 struct ata_port *ap = qc->ap;
3410 struct page *page;
3411 unsigned int offset;
3412 unsigned char *buf;
3414 if (qc->cursect == (qc->nsect - 1))
3415 ap->hsm_task_state = HSM_ST_LAST;
3417 page = sg[qc->cursg].page;
3418 offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE;
3420 /* get the current page and offset */
3421 page = nth_page(page, (offset >> PAGE_SHIFT));
3422 offset %= PAGE_SIZE;
3424 buf = kmap(page) + offset;
3426 qc->cursect++;
3427 qc->cursg_ofs++;
3429 if ((qc->cursg_ofs * ATA_SECT_SIZE) == (&sg[qc->cursg])->length) {
3430 qc->cursg++;
3431 qc->cursg_ofs = 0;
3434 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3436 /* do the actual data transfer */
3437 do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3438 ata_data_xfer(ap, buf, ATA_SECT_SIZE, do_write);
3440 kunmap(page);
3444 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
3445 * @qc: Command on going
3446 * @bytes: number of bytes
3448 * Transfer Transfer data from/to the ATAPI device.
3450 * LOCKING:
3451 * Inherited from caller.
3455 static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
3457 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3458 struct scatterlist *sg = qc->__sg;
3459 struct ata_port *ap = qc->ap;
3460 struct page *page;
3461 unsigned char *buf;
3462 unsigned int offset, count;
3464 if (qc->curbytes + bytes >= qc->nbytes)
3465 ap->hsm_task_state = HSM_ST_LAST;
3467 next_sg:
3468 if (unlikely(qc->cursg >= qc->n_elem)) {
3470 * The end of qc->sg is reached and the device expects
3471 * more data to transfer. In order not to overrun qc->sg
3472 * and fulfill length specified in the byte count register,
3473 * - for read case, discard trailing data from the device
3474 * - for write case, padding zero data to the device
3476 u16 pad_buf[1] = { 0 };
3477 unsigned int words = bytes >> 1;
3478 unsigned int i;
3480 if (words) /* warning if bytes > 1 */
3481 printk(KERN_WARNING "ata%u: %u bytes trailing data\n",
3482 ap->id, bytes);
3484 for (i = 0; i < words; i++)
3485 ata_data_xfer(ap, (unsigned char*)pad_buf, 2, do_write);
3487 ap->hsm_task_state = HSM_ST_LAST;
3488 return;
3491 sg = &qc->__sg[qc->cursg];
3493 page = sg->page;
3494 offset = sg->offset + qc->cursg_ofs;
3496 /* get the current page and offset */
3497 page = nth_page(page, (offset >> PAGE_SHIFT));
3498 offset %= PAGE_SIZE;
3500 /* don't overrun current sg */
3501 count = min(sg->length - qc->cursg_ofs, bytes);
3503 /* don't cross page boundaries */
3504 count = min(count, (unsigned int)PAGE_SIZE - offset);
3506 buf = kmap(page) + offset;
3508 bytes -= count;
3509 qc->curbytes += count;
3510 qc->cursg_ofs += count;
3512 if (qc->cursg_ofs == sg->length) {
3513 qc->cursg++;
3514 qc->cursg_ofs = 0;
3517 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3519 /* do the actual data transfer */
3520 ata_data_xfer(ap, buf, count, do_write);
3522 kunmap(page);
3524 if (bytes)
3525 goto next_sg;
3529 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
3530 * @qc: Command on going
3532 * Transfer Transfer data from/to the ATAPI device.
3534 * LOCKING:
3535 * Inherited from caller.
3538 static void atapi_pio_bytes(struct ata_queued_cmd *qc)
3540 struct ata_port *ap = qc->ap;
3541 struct ata_device *dev = qc->dev;
3542 unsigned int ireason, bc_lo, bc_hi, bytes;
3543 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
3545 ap->ops->tf_read(ap, &qc->tf);
3546 ireason = qc->tf.nsect;
3547 bc_lo = qc->tf.lbam;
3548 bc_hi = qc->tf.lbah;
3549 bytes = (bc_hi << 8) | bc_lo;
3551 /* shall be cleared to zero, indicating xfer of data */
3552 if (ireason & (1 << 0))
3553 goto err_out;
3555 /* make sure transfer direction matches expected */
3556 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
3557 if (do_write != i_write)
3558 goto err_out;
3560 __atapi_pio_bytes(qc, bytes);
3562 return;
3564 err_out:
3565 printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n",
3566 ap->id, dev->devno);
3567 qc->err_mask |= AC_ERR_HSM;
3568 ap->hsm_task_state = HSM_ST_ERR;
3572 * ata_pio_block - start PIO on a block
3573 * @ap: the target ata_port
3575 * LOCKING:
3576 * None. (executing in kernel thread context)
3579 static void ata_pio_block(struct ata_port *ap)
3581 struct ata_queued_cmd *qc;
3582 u8 status;
3585 * This is purely heuristic. This is a fast path.
3586 * Sometimes when we enter, BSY will be cleared in
3587 * a chk-status or two. If not, the drive is probably seeking
3588 * or something. Snooze for a couple msecs, then
3589 * chk-status again. If still busy, fall back to
3590 * HSM_ST_POLL state.
3592 status = ata_busy_wait(ap, ATA_BUSY, 5);
3593 if (status & ATA_BUSY) {
3594 msleep(2);
3595 status = ata_busy_wait(ap, ATA_BUSY, 10);
3596 if (status & ATA_BUSY) {
3597 ap->hsm_task_state = HSM_ST_POLL;
3598 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
3599 return;
3603 qc = ata_qc_from_tag(ap, ap->active_tag);
3604 WARN_ON(qc == NULL);
3606 /* check error */
3607 if (status & (ATA_ERR | ATA_DF)) {
3608 qc->err_mask |= AC_ERR_DEV;
3609 ap->hsm_task_state = HSM_ST_ERR;
3610 return;
3613 /* transfer data if any */
3614 if (is_atapi_taskfile(&qc->tf)) {
3615 /* DRQ=0 means no more data to transfer */
3616 if ((status & ATA_DRQ) == 0) {
3617 ap->hsm_task_state = HSM_ST_LAST;
3618 return;
3621 atapi_pio_bytes(qc);
3622 } else {
3623 /* handle BSY=0, DRQ=0 as error */
3624 if ((status & ATA_DRQ) == 0) {
3625 qc->err_mask |= AC_ERR_HSM;
3626 ap->hsm_task_state = HSM_ST_ERR;
3627 return;
3630 ata_pio_sector(qc);
3634 static void ata_pio_error(struct ata_port *ap)
3636 struct ata_queued_cmd *qc;
3638 qc = ata_qc_from_tag(ap, ap->active_tag);
3639 WARN_ON(qc == NULL);
3641 if (qc->tf.command != ATA_CMD_PACKET)
3642 printk(KERN_WARNING "ata%u: PIO error\n", ap->id);
3644 /* make sure qc->err_mask is available to
3645 * know what's wrong and recover
3647 WARN_ON(qc->err_mask == 0);
3649 ap->hsm_task_state = HSM_ST_IDLE;
3651 ata_poll_qc_complete(qc);
3654 static void ata_pio_task(void *_data)
3656 struct ata_port *ap = _data;
3657 unsigned long timeout;
3658 int qc_completed;
3660 fsm_start:
3661 timeout = 0;
3662 qc_completed = 0;
3664 switch (ap->hsm_task_state) {
3665 case HSM_ST_IDLE:
3666 return;
3668 case HSM_ST:
3669 ata_pio_block(ap);
3670 break;
3672 case HSM_ST_LAST:
3673 qc_completed = ata_pio_complete(ap);
3674 break;
3676 case HSM_ST_POLL:
3677 case HSM_ST_LAST_POLL:
3678 timeout = ata_pio_poll(ap);
3679 break;
3681 case HSM_ST_TMOUT:
3682 case HSM_ST_ERR:
3683 ata_pio_error(ap);
3684 return;
3687 if (timeout)
3688 ata_port_queue_task(ap, ata_pio_task, ap, timeout);
3689 else if (!qc_completed)
3690 goto fsm_start;
3694 * atapi_packet_task - Write CDB bytes to hardware
3695 * @_data: Port to which ATAPI device is attached.
3697 * When device has indicated its readiness to accept
3698 * a CDB, this function is called. Send the CDB.
3699 * If DMA is to be performed, exit immediately.
3700 * Otherwise, we are in polling mode, so poll
3701 * status under operation succeeds or fails.
3703 * LOCKING:
3704 * Kernel thread context (may sleep)
3707 static void atapi_packet_task(void *_data)
3709 struct ata_port *ap = _data;
3710 struct ata_queued_cmd *qc;
3711 u8 status;
3713 qc = ata_qc_from_tag(ap, ap->active_tag);
3714 WARN_ON(qc == NULL);
3715 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
3717 /* sleep-wait for BSY to clear */
3718 DPRINTK("busy wait\n");
3719 if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB)) {
3720 qc->err_mask |= AC_ERR_TIMEOUT;
3721 goto err_out;
3724 /* make sure DRQ is set */
3725 status = ata_chk_status(ap);
3726 if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ) {
3727 qc->err_mask |= AC_ERR_HSM;
3728 goto err_out;
3731 /* send SCSI cdb */
3732 DPRINTK("send cdb\n");
3733 WARN_ON(qc->dev->cdb_len < 12);
3735 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA ||
3736 qc->tf.protocol == ATA_PROT_ATAPI_NODATA) {
3737 unsigned long flags;
3739 /* Once we're done issuing command and kicking bmdma,
3740 * irq handler takes over. To not lose irq, we need
3741 * to clear NOINTR flag before sending cdb, but
3742 * interrupt handler shouldn't be invoked before we're
3743 * finished. Hence, the following locking.
3745 spin_lock_irqsave(&ap->host_set->lock, flags);
3746 ap->flags &= ~ATA_FLAG_NOINTR;
3747 ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1);
3748 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA)
3749 ap->ops->bmdma_start(qc); /* initiate bmdma */
3750 spin_unlock_irqrestore(&ap->host_set->lock, flags);
3751 } else {
3752 ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1);
3754 /* PIO commands are handled by polling */
3755 ap->hsm_task_state = HSM_ST;
3756 ata_port_queue_task(ap, ata_pio_task, ap, 0);
3759 return;
3761 err_out:
3762 ata_poll_qc_complete(qc);
3766 * ata_qc_timeout - Handle timeout of queued command
3767 * @qc: Command that timed out
3769 * Some part of the kernel (currently, only the SCSI layer)
3770 * has noticed that the active command on port @ap has not
3771 * completed after a specified length of time. Handle this
3772 * condition by disabling DMA (if necessary) and completing
3773 * transactions, with error if necessary.
3775 * This also handles the case of the "lost interrupt", where
3776 * for some reason (possibly hardware bug, possibly driver bug)
3777 * an interrupt was not delivered to the driver, even though the
3778 * transaction completed successfully.
3780 * LOCKING:
3781 * Inherited from SCSI layer (none, can sleep)
3784 static void ata_qc_timeout(struct ata_queued_cmd *qc)
3786 struct ata_port *ap = qc->ap;
3787 struct ata_host_set *host_set = ap->host_set;
3788 u8 host_stat = 0, drv_stat;
3789 unsigned long flags;
3791 DPRINTK("ENTER\n");
3793 ap->hsm_task_state = HSM_ST_IDLE;
3795 spin_lock_irqsave(&host_set->lock, flags);
3797 switch (qc->tf.protocol) {
3799 case ATA_PROT_DMA:
3800 case ATA_PROT_ATAPI_DMA:
3801 host_stat = ap->ops->bmdma_status(ap);
3803 /* before we do anything else, clear DMA-Start bit */
3804 ap->ops->bmdma_stop(qc);
3806 /* fall through */
3808 default:
3809 ata_altstatus(ap);
3810 drv_stat = ata_chk_status(ap);
3812 /* ack bmdma irq events */
3813 ap->ops->irq_clear(ap);
3815 printk(KERN_ERR "ata%u: command 0x%x timeout, stat 0x%x host_stat 0x%x\n",
3816 ap->id, qc->tf.command, drv_stat, host_stat);
3818 /* complete taskfile transaction */
3819 qc->err_mask |= ac_err_mask(drv_stat);
3820 break;
3823 spin_unlock_irqrestore(&host_set->lock, flags);
3825 ata_eh_qc_complete(qc);
3827 DPRINTK("EXIT\n");
3831 * ata_eng_timeout - Handle timeout of queued command
3832 * @ap: Port on which timed-out command is active
3834 * Some part of the kernel (currently, only the SCSI layer)
3835 * has noticed that the active command on port @ap has not
3836 * completed after a specified length of time. Handle this
3837 * condition by disabling DMA (if necessary) and completing
3838 * transactions, with error if necessary.
3840 * This also handles the case of the "lost interrupt", where
3841 * for some reason (possibly hardware bug, possibly driver bug)
3842 * an interrupt was not delivered to the driver, even though the
3843 * transaction completed successfully.
3845 * LOCKING:
3846 * Inherited from SCSI layer (none, can sleep)
3849 void ata_eng_timeout(struct ata_port *ap)
3851 DPRINTK("ENTER\n");
3853 ata_qc_timeout(ata_qc_from_tag(ap, ap->active_tag));
3855 DPRINTK("EXIT\n");
3859 * ata_qc_new - Request an available ATA command, for queueing
3860 * @ap: Port associated with device @dev
3861 * @dev: Device from whom we request an available command structure
3863 * LOCKING:
3864 * None.
3867 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
3869 struct ata_queued_cmd *qc = NULL;
3870 unsigned int i;
3872 for (i = 0; i < ATA_MAX_QUEUE; i++)
3873 if (!test_and_set_bit(i, &ap->qactive)) {
3874 qc = ata_qc_from_tag(ap, i);
3875 break;
3878 if (qc)
3879 qc->tag = i;
3881 return qc;
3885 * ata_qc_new_init - Request an available ATA command, and initialize it
3886 * @ap: Port associated with device @dev
3887 * @dev: Device from whom we request an available command structure
3889 * LOCKING:
3890 * None.
3893 struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
3894 struct ata_device *dev)
3896 struct ata_queued_cmd *qc;
3898 qc = ata_qc_new(ap);
3899 if (qc) {
3900 qc->scsicmd = NULL;
3901 qc->ap = ap;
3902 qc->dev = dev;
3904 ata_qc_reinit(qc);
3907 return qc;
3911 * ata_qc_free - free unused ata_queued_cmd
3912 * @qc: Command to complete
3914 * Designed to free unused ata_queued_cmd object
3915 * in case something prevents using it.
3917 * LOCKING:
3918 * spin_lock_irqsave(host_set lock)
3920 void ata_qc_free(struct ata_queued_cmd *qc)
3922 struct ata_port *ap = qc->ap;
3923 unsigned int tag;
3925 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
3927 qc->flags = 0;
3928 tag = qc->tag;
3929 if (likely(ata_tag_valid(tag))) {
3930 if (tag == ap->active_tag)
3931 ap->active_tag = ATA_TAG_POISON;
3932 qc->tag = ATA_TAG_POISON;
3933 clear_bit(tag, &ap->qactive);
3937 void __ata_qc_complete(struct ata_queued_cmd *qc)
3939 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
3940 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
3942 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
3943 ata_sg_clean(qc);
3945 /* atapi: mark qc as inactive to prevent the interrupt handler
3946 * from completing the command twice later, before the error handler
3947 * is called. (when rc != 0 and atapi request sense is needed)
3949 qc->flags &= ~ATA_QCFLAG_ACTIVE;
3951 /* call completion callback */
3952 qc->complete_fn(qc);
3955 static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
3957 struct ata_port *ap = qc->ap;
3959 switch (qc->tf.protocol) {
3960 case ATA_PROT_DMA:
3961 case ATA_PROT_ATAPI_DMA:
3962 return 1;
3964 case ATA_PROT_ATAPI:
3965 case ATA_PROT_PIO:
3966 if (ap->flags & ATA_FLAG_PIO_DMA)
3967 return 1;
3969 /* fall through */
3971 default:
3972 return 0;
3975 /* never reached */
3979 * ata_qc_issue - issue taskfile to device
3980 * @qc: command to issue to device
3982 * Prepare an ATA command to submission to device.
3983 * This includes mapping the data into a DMA-able
3984 * area, filling in the S/G table, and finally
3985 * writing the taskfile to hardware, starting the command.
3987 * LOCKING:
3988 * spin_lock_irqsave(host_set lock)
3990 void ata_qc_issue(struct ata_queued_cmd *qc)
3992 struct ata_port *ap = qc->ap;
3994 qc->ap->active_tag = qc->tag;
3995 qc->flags |= ATA_QCFLAG_ACTIVE;
3997 if (ata_should_dma_map(qc)) {
3998 if (qc->flags & ATA_QCFLAG_SG) {
3999 if (ata_sg_setup(qc))
4000 goto sg_err;
4001 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
4002 if (ata_sg_setup_one(qc))
4003 goto sg_err;
4005 } else {
4006 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4009 ap->ops->qc_prep(qc);
4011 qc->err_mask |= ap->ops->qc_issue(qc);
4012 if (unlikely(qc->err_mask))
4013 goto err;
4014 return;
4016 sg_err:
4017 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4018 qc->err_mask |= AC_ERR_SYSTEM;
4019 err:
4020 ata_qc_complete(qc);
4024 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
4025 * @qc: command to issue to device
4027 * Using various libata functions and hooks, this function
4028 * starts an ATA command. ATA commands are grouped into
4029 * classes called "protocols", and issuing each type of protocol
4030 * is slightly different.
4032 * May be used as the qc_issue() entry in ata_port_operations.
4034 * LOCKING:
4035 * spin_lock_irqsave(host_set lock)
4037 * RETURNS:
4038 * Zero on success, AC_ERR_* mask on failure
4041 unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
4043 struct ata_port *ap = qc->ap;
4045 ata_dev_select(ap, qc->dev->devno, 1, 0);
4047 switch (qc->tf.protocol) {
4048 case ATA_PROT_NODATA:
4049 ata_tf_to_host(ap, &qc->tf);
4050 break;
4052 case ATA_PROT_DMA:
4053 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4054 ap->ops->bmdma_setup(qc); /* set up bmdma */
4055 ap->ops->bmdma_start(qc); /* initiate bmdma */
4056 break;
4058 case ATA_PROT_PIO: /* load tf registers, initiate polling pio */
4059 ata_qc_set_polling(qc);
4060 ata_tf_to_host(ap, &qc->tf);
4061 ap->hsm_task_state = HSM_ST;
4062 ata_port_queue_task(ap, ata_pio_task, ap, 0);
4063 break;
4065 case ATA_PROT_ATAPI:
4066 ata_qc_set_polling(qc);
4067 ata_tf_to_host(ap, &qc->tf);
4068 ata_port_queue_task(ap, atapi_packet_task, ap, 0);
4069 break;
4071 case ATA_PROT_ATAPI_NODATA:
4072 ap->flags |= ATA_FLAG_NOINTR;
4073 ata_tf_to_host(ap, &qc->tf);
4074 ata_port_queue_task(ap, atapi_packet_task, ap, 0);
4075 break;
4077 case ATA_PROT_ATAPI_DMA:
4078 ap->flags |= ATA_FLAG_NOINTR;
4079 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4080 ap->ops->bmdma_setup(qc); /* set up bmdma */
4081 ata_port_queue_task(ap, atapi_packet_task, ap, 0);
4082 break;
4084 default:
4085 WARN_ON(1);
4086 return AC_ERR_SYSTEM;
4089 return 0;
4093 * ata_host_intr - Handle host interrupt for given (port, task)
4094 * @ap: Port on which interrupt arrived (possibly...)
4095 * @qc: Taskfile currently active in engine
4097 * Handle host interrupt for given queued command. Currently,
4098 * only DMA interrupts are handled. All other commands are
4099 * handled via polling with interrupts disabled (nIEN bit).
4101 * LOCKING:
4102 * spin_lock_irqsave(host_set lock)
4104 * RETURNS:
4105 * One if interrupt was handled, zero if not (shared irq).
4108 inline unsigned int ata_host_intr (struct ata_port *ap,
4109 struct ata_queued_cmd *qc)
4111 u8 status, host_stat;
4113 switch (qc->tf.protocol) {
4115 case ATA_PROT_DMA:
4116 case ATA_PROT_ATAPI_DMA:
4117 case ATA_PROT_ATAPI:
4118 /* check status of DMA engine */
4119 host_stat = ap->ops->bmdma_status(ap);
4120 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
4122 /* if it's not our irq... */
4123 if (!(host_stat & ATA_DMA_INTR))
4124 goto idle_irq;
4126 /* before we do anything else, clear DMA-Start bit */
4127 ap->ops->bmdma_stop(qc);
4129 /* fall through */
4131 case ATA_PROT_ATAPI_NODATA:
4132 case ATA_PROT_NODATA:
4133 /* check altstatus */
4134 status = ata_altstatus(ap);
4135 if (status & ATA_BUSY)
4136 goto idle_irq;
4138 /* check main status, clearing INTRQ */
4139 status = ata_chk_status(ap);
4140 if (unlikely(status & ATA_BUSY))
4141 goto idle_irq;
4142 DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
4143 ap->id, qc->tf.protocol, status);
4145 /* ack bmdma irq events */
4146 ap->ops->irq_clear(ap);
4148 /* complete taskfile transaction */
4149 qc->err_mask |= ac_err_mask(status);
4150 ata_qc_complete(qc);
4151 break;
4153 default:
4154 goto idle_irq;
4157 return 1; /* irq handled */
4159 idle_irq:
4160 ap->stats.idle_irq++;
4162 #ifdef ATA_IRQ_TRAP
4163 if ((ap->stats.idle_irq % 1000) == 0) {
4164 ata_irq_ack(ap, 0); /* debug trap */
4165 printk(KERN_WARNING "ata%d: irq trap\n", ap->id);
4166 return 1;
4168 #endif
4169 return 0; /* irq not handled */
4173 * ata_interrupt - Default ATA host interrupt handler
4174 * @irq: irq line (unused)
4175 * @dev_instance: pointer to our ata_host_set information structure
4176 * @regs: unused
4178 * Default interrupt handler for PCI IDE devices. Calls
4179 * ata_host_intr() for each port that is not disabled.
4181 * LOCKING:
4182 * Obtains host_set lock during operation.
4184 * RETURNS:
4185 * IRQ_NONE or IRQ_HANDLED.
4188 irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
4190 struct ata_host_set *host_set = dev_instance;
4191 unsigned int i;
4192 unsigned int handled = 0;
4193 unsigned long flags;
4195 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
4196 spin_lock_irqsave(&host_set->lock, flags);
4198 for (i = 0; i < host_set->n_ports; i++) {
4199 struct ata_port *ap;
4201 ap = host_set->ports[i];
4202 if (ap &&
4203 !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) {
4204 struct ata_queued_cmd *qc;
4206 qc = ata_qc_from_tag(ap, ap->active_tag);
4207 if (qc && (!(qc->tf.ctl & ATA_NIEN)) &&
4208 (qc->flags & ATA_QCFLAG_ACTIVE))
4209 handled |= ata_host_intr(ap, qc);
4213 spin_unlock_irqrestore(&host_set->lock, flags);
4215 return IRQ_RETVAL(handled);
4220 * Execute a 'simple' command, that only consists of the opcode 'cmd' itself,
4221 * without filling any other registers
4223 static int ata_do_simple_cmd(struct ata_port *ap, struct ata_device *dev,
4224 u8 cmd)
4226 struct ata_taskfile tf;
4227 int err;
4229 ata_tf_init(ap, &tf, dev->devno);
4231 tf.command = cmd;
4232 tf.flags |= ATA_TFLAG_DEVICE;
4233 tf.protocol = ATA_PROT_NODATA;
4235 err = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0);
4236 if (err)
4237 printk(KERN_ERR "%s: ata command failed: %d\n",
4238 __FUNCTION__, err);
4240 return err;
4243 static int ata_flush_cache(struct ata_port *ap, struct ata_device *dev)
4245 u8 cmd;
4247 if (!ata_try_flush_cache(dev))
4248 return 0;
4250 if (ata_id_has_flush_ext(dev->id))
4251 cmd = ATA_CMD_FLUSH_EXT;
4252 else
4253 cmd = ATA_CMD_FLUSH;
4255 return ata_do_simple_cmd(ap, dev, cmd);
4258 static int ata_standby_drive(struct ata_port *ap, struct ata_device *dev)
4260 return ata_do_simple_cmd(ap, dev, ATA_CMD_STANDBYNOW1);
4263 static int ata_start_drive(struct ata_port *ap, struct ata_device *dev)
4265 return ata_do_simple_cmd(ap, dev, ATA_CMD_IDLEIMMEDIATE);
4269 * ata_device_resume - wakeup a previously suspended devices
4270 * @ap: port the device is connected to
4271 * @dev: the device to resume
4273 * Kick the drive back into action, by sending it an idle immediate
4274 * command and making sure its transfer mode matches between drive
4275 * and host.
4278 int ata_device_resume(struct ata_port *ap, struct ata_device *dev)
4280 if (ap->flags & ATA_FLAG_SUSPENDED) {
4281 ap->flags &= ~ATA_FLAG_SUSPENDED;
4282 ata_set_mode(ap);
4284 if (!ata_dev_enabled(dev))
4285 return 0;
4286 if (dev->class == ATA_DEV_ATA)
4287 ata_start_drive(ap, dev);
4289 return 0;
4293 * ata_device_suspend - prepare a device for suspend
4294 * @ap: port the device is connected to
4295 * @dev: the device to suspend
4297 * Flush the cache on the drive, if appropriate, then issue a
4298 * standbynow command.
4300 int ata_device_suspend(struct ata_port *ap, struct ata_device *dev, pm_message_t state)
4302 if (!ata_dev_enabled(dev))
4303 return 0;
4304 if (dev->class == ATA_DEV_ATA)
4305 ata_flush_cache(ap, dev);
4307 if (state.event != PM_EVENT_FREEZE)
4308 ata_standby_drive(ap, dev);
4309 ap->flags |= ATA_FLAG_SUSPENDED;
4310 return 0;
4314 * ata_port_start - Set port up for dma.
4315 * @ap: Port to initialize
4317 * Called just after data structures for each port are
4318 * initialized. Allocates space for PRD table.
4320 * May be used as the port_start() entry in ata_port_operations.
4322 * LOCKING:
4323 * Inherited from caller.
4326 int ata_port_start (struct ata_port *ap)
4328 struct device *dev = ap->dev;
4329 int rc;
4331 ap->prd = dma_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, GFP_KERNEL);
4332 if (!ap->prd)
4333 return -ENOMEM;
4335 rc = ata_pad_alloc(ap, dev);
4336 if (rc) {
4337 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
4338 return rc;
4341 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma);
4343 return 0;
4348 * ata_port_stop - Undo ata_port_start()
4349 * @ap: Port to shut down
4351 * Frees the PRD table.
4353 * May be used as the port_stop() entry in ata_port_operations.
4355 * LOCKING:
4356 * Inherited from caller.
4359 void ata_port_stop (struct ata_port *ap)
4361 struct device *dev = ap->dev;
4363 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
4364 ata_pad_free(ap, dev);
4367 void ata_host_stop (struct ata_host_set *host_set)
4369 if (host_set->mmio_base)
4370 iounmap(host_set->mmio_base);
4375 * ata_host_remove - Unregister SCSI host structure with upper layers
4376 * @ap: Port to unregister
4377 * @do_unregister: 1 if we fully unregister, 0 to just stop the port
4379 * LOCKING:
4380 * Inherited from caller.
4383 static void ata_host_remove(struct ata_port *ap, unsigned int do_unregister)
4385 struct Scsi_Host *sh = ap->host;
4387 DPRINTK("ENTER\n");
4389 if (do_unregister)
4390 scsi_remove_host(sh);
4392 ap->ops->port_stop(ap);
4396 * ata_host_init - Initialize an ata_port structure
4397 * @ap: Structure to initialize
4398 * @host: associated SCSI mid-layer structure
4399 * @host_set: Collection of hosts to which @ap belongs
4400 * @ent: Probe information provided by low-level driver
4401 * @port_no: Port number associated with this ata_port
4403 * Initialize a new ata_port structure, and its associated
4404 * scsi_host.
4406 * LOCKING:
4407 * Inherited from caller.
4410 static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
4411 struct ata_host_set *host_set,
4412 const struct ata_probe_ent *ent, unsigned int port_no)
4414 unsigned int i;
4416 host->max_id = 16;
4417 host->max_lun = 1;
4418 host->max_channel = 1;
4419 host->unique_id = ata_unique_id++;
4420 host->max_cmd_len = 12;
4422 ap->flags = ATA_FLAG_PORT_DISABLED;
4423 ap->id = host->unique_id;
4424 ap->host = host;
4425 ap->ctl = ATA_DEVCTL_OBS;
4426 ap->host_set = host_set;
4427 ap->dev = ent->dev;
4428 ap->port_no = port_no;
4429 ap->hard_port_no =
4430 ent->legacy_mode ? ent->hard_port_no : port_no;
4431 ap->pio_mask = ent->pio_mask;
4432 ap->mwdma_mask = ent->mwdma_mask;
4433 ap->udma_mask = ent->udma_mask;
4434 ap->flags |= ent->host_flags;
4435 ap->ops = ent->port_ops;
4436 ap->cbl = ATA_CBL_NONE;
4437 ap->active_tag = ATA_TAG_POISON;
4438 ap->last_ctl = 0xFF;
4440 INIT_WORK(&ap->port_task, NULL, NULL);
4441 INIT_LIST_HEAD(&ap->eh_done_q);
4443 for (i = 0; i < ATA_MAX_DEVICES; i++) {
4444 struct ata_device *dev = &ap->device[i];
4445 dev->devno = i;
4446 dev->pio_mask = UINT_MAX;
4447 dev->mwdma_mask = UINT_MAX;
4448 dev->udma_mask = UINT_MAX;
4451 #ifdef ATA_IRQ_TRAP
4452 ap->stats.unhandled_irq = 1;
4453 ap->stats.idle_irq = 1;
4454 #endif
4456 memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports));
4460 * ata_host_add - Attach low-level ATA driver to system
4461 * @ent: Information provided by low-level driver
4462 * @host_set: Collections of ports to which we add
4463 * @port_no: Port number associated with this host
4465 * Attach low-level ATA driver to system.
4467 * LOCKING:
4468 * PCI/etc. bus probe sem.
4470 * RETURNS:
4471 * New ata_port on success, for NULL on error.
4474 static struct ata_port * ata_host_add(const struct ata_probe_ent *ent,
4475 struct ata_host_set *host_set,
4476 unsigned int port_no)
4478 struct Scsi_Host *host;
4479 struct ata_port *ap;
4480 int rc;
4482 DPRINTK("ENTER\n");
4484 if (!ent->port_ops->probe_reset &&
4485 !(ent->host_flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST))) {
4486 printk(KERN_ERR "ata%u: no reset mechanism available\n",
4487 port_no);
4488 return NULL;
4491 host = scsi_host_alloc(ent->sht, sizeof(struct ata_port));
4492 if (!host)
4493 return NULL;
4495 host->transportt = &ata_scsi_transport_template;
4497 ap = (struct ata_port *) &host->hostdata[0];
4499 ata_host_init(ap, host, host_set, ent, port_no);
4501 rc = ap->ops->port_start(ap);
4502 if (rc)
4503 goto err_out;
4505 return ap;
4507 err_out:
4508 scsi_host_put(host);
4509 return NULL;
4513 * ata_device_add - Register hardware device with ATA and SCSI layers
4514 * @ent: Probe information describing hardware device to be registered
4516 * This function processes the information provided in the probe
4517 * information struct @ent, allocates the necessary ATA and SCSI
4518 * host information structures, initializes them, and registers
4519 * everything with requisite kernel subsystems.
4521 * This function requests irqs, probes the ATA bus, and probes
4522 * the SCSI bus.
4524 * LOCKING:
4525 * PCI/etc. bus probe sem.
4527 * RETURNS:
4528 * Number of ports registered. Zero on error (no ports registered).
4531 int ata_device_add(const struct ata_probe_ent *ent)
4533 unsigned int count = 0, i;
4534 struct device *dev = ent->dev;
4535 struct ata_host_set *host_set;
4537 DPRINTK("ENTER\n");
4538 /* alloc a container for our list of ATA ports (buses) */
4539 host_set = kzalloc(sizeof(struct ata_host_set) +
4540 (ent->n_ports * sizeof(void *)), GFP_KERNEL);
4541 if (!host_set)
4542 return 0;
4543 spin_lock_init(&host_set->lock);
4545 host_set->dev = dev;
4546 host_set->n_ports = ent->n_ports;
4547 host_set->irq = ent->irq;
4548 host_set->mmio_base = ent->mmio_base;
4549 host_set->private_data = ent->private_data;
4550 host_set->ops = ent->port_ops;
4551 host_set->flags = ent->host_set_flags;
4553 /* register each port bound to this device */
4554 for (i = 0; i < ent->n_ports; i++) {
4555 struct ata_port *ap;
4556 unsigned long xfer_mode_mask;
4558 ap = ata_host_add(ent, host_set, i);
4559 if (!ap)
4560 goto err_out;
4562 host_set->ports[i] = ap;
4563 xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) |
4564 (ap->mwdma_mask << ATA_SHIFT_MWDMA) |
4565 (ap->pio_mask << ATA_SHIFT_PIO);
4567 /* print per-port info to dmesg */
4568 printk(KERN_INFO "ata%u: %cATA max %s cmd 0x%lX ctl 0x%lX "
4569 "bmdma 0x%lX irq %lu\n",
4570 ap->id,
4571 ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
4572 ata_mode_string(xfer_mode_mask),
4573 ap->ioaddr.cmd_addr,
4574 ap->ioaddr.ctl_addr,
4575 ap->ioaddr.bmdma_addr,
4576 ent->irq);
4578 ata_chk_status(ap);
4579 host_set->ops->irq_clear(ap);
4580 count++;
4583 if (!count)
4584 goto err_free_ret;
4586 /* obtain irq, that is shared between channels */
4587 if (request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags,
4588 DRV_NAME, host_set))
4589 goto err_out;
4591 /* perform each probe synchronously */
4592 DPRINTK("probe begin\n");
4593 for (i = 0; i < count; i++) {
4594 struct ata_port *ap;
4595 int rc;
4597 ap = host_set->ports[i];
4599 DPRINTK("ata%u: bus probe begin\n", ap->id);
4600 rc = ata_bus_probe(ap);
4601 DPRINTK("ata%u: bus probe end\n", ap->id);
4603 if (rc) {
4604 /* FIXME: do something useful here?
4605 * Current libata behavior will
4606 * tear down everything when
4607 * the module is removed
4608 * or the h/w is unplugged.
4612 rc = scsi_add_host(ap->host, dev);
4613 if (rc) {
4614 printk(KERN_ERR "ata%u: scsi_add_host failed\n",
4615 ap->id);
4616 /* FIXME: do something useful here */
4617 /* FIXME: handle unconditional calls to
4618 * scsi_scan_host and ata_host_remove, below,
4619 * at the very least
4624 /* probes are done, now scan each port's disk(s) */
4625 DPRINTK("host probe begin\n");
4626 for (i = 0; i < count; i++) {
4627 struct ata_port *ap = host_set->ports[i];
4629 ata_scsi_scan_host(ap);
4632 dev_set_drvdata(dev, host_set);
4634 VPRINTK("EXIT, returning %u\n", ent->n_ports);
4635 return ent->n_ports; /* success */
4637 err_out:
4638 for (i = 0; i < count; i++) {
4639 ata_host_remove(host_set->ports[i], 1);
4640 scsi_host_put(host_set->ports[i]->host);
4642 err_free_ret:
4643 kfree(host_set);
4644 VPRINTK("EXIT, returning 0\n");
4645 return 0;
4649 * ata_host_set_remove - PCI layer callback for device removal
4650 * @host_set: ATA host set that was removed
4652 * Unregister all objects associated with this host set. Free those
4653 * objects.
4655 * LOCKING:
4656 * Inherited from calling layer (may sleep).
4659 void ata_host_set_remove(struct ata_host_set *host_set)
4661 struct ata_port *ap;
4662 unsigned int i;
4664 for (i = 0; i < host_set->n_ports; i++) {
4665 ap = host_set->ports[i];
4666 scsi_remove_host(ap->host);
4669 free_irq(host_set->irq, host_set);
4671 for (i = 0; i < host_set->n_ports; i++) {
4672 ap = host_set->ports[i];
4674 ata_scsi_release(ap->host);
4676 if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) {
4677 struct ata_ioports *ioaddr = &ap->ioaddr;
4679 if (ioaddr->cmd_addr == 0x1f0)
4680 release_region(0x1f0, 8);
4681 else if (ioaddr->cmd_addr == 0x170)
4682 release_region(0x170, 8);
4685 scsi_host_put(ap->host);
4688 if (host_set->ops->host_stop)
4689 host_set->ops->host_stop(host_set);
4691 kfree(host_set);
4695 * ata_scsi_release - SCSI layer callback hook for host unload
4696 * @host: libata host to be unloaded
4698 * Performs all duties necessary to shut down a libata port...
4699 * Kill port kthread, disable port, and release resources.
4701 * LOCKING:
4702 * Inherited from SCSI layer.
4704 * RETURNS:
4705 * One.
4708 int ata_scsi_release(struct Scsi_Host *host)
4710 struct ata_port *ap = (struct ata_port *) &host->hostdata[0];
4711 int i;
4713 DPRINTK("ENTER\n");
4715 ap->ops->port_disable(ap);
4716 ata_host_remove(ap, 0);
4717 for (i = 0; i < ATA_MAX_DEVICES; i++)
4718 kfree(ap->device[i].id);
4720 DPRINTK("EXIT\n");
4721 return 1;
4725 * ata_std_ports - initialize ioaddr with standard port offsets.
4726 * @ioaddr: IO address structure to be initialized
4728 * Utility function which initializes data_addr, error_addr,
4729 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
4730 * device_addr, status_addr, and command_addr to standard offsets
4731 * relative to cmd_addr.
4733 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
4736 void ata_std_ports(struct ata_ioports *ioaddr)
4738 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
4739 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
4740 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
4741 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
4742 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
4743 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
4744 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
4745 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
4746 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
4747 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
4751 #ifdef CONFIG_PCI
4753 void ata_pci_host_stop (struct ata_host_set *host_set)
4755 struct pci_dev *pdev = to_pci_dev(host_set->dev);
4757 pci_iounmap(pdev, host_set->mmio_base);
4761 * ata_pci_remove_one - PCI layer callback for device removal
4762 * @pdev: PCI device that was removed
4764 * PCI layer indicates to libata via this hook that
4765 * hot-unplug or module unload event has occurred.
4766 * Handle this by unregistering all objects associated
4767 * with this PCI device. Free those objects. Then finally
4768 * release PCI resources and disable device.
4770 * LOCKING:
4771 * Inherited from PCI layer (may sleep).
4774 void ata_pci_remove_one (struct pci_dev *pdev)
4776 struct device *dev = pci_dev_to_dev(pdev);
4777 struct ata_host_set *host_set = dev_get_drvdata(dev);
4779 ata_host_set_remove(host_set);
4780 pci_release_regions(pdev);
4781 pci_disable_device(pdev);
4782 dev_set_drvdata(dev, NULL);
4785 /* move to PCI subsystem */
4786 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
4788 unsigned long tmp = 0;
4790 switch (bits->width) {
4791 case 1: {
4792 u8 tmp8 = 0;
4793 pci_read_config_byte(pdev, bits->reg, &tmp8);
4794 tmp = tmp8;
4795 break;
4797 case 2: {
4798 u16 tmp16 = 0;
4799 pci_read_config_word(pdev, bits->reg, &tmp16);
4800 tmp = tmp16;
4801 break;
4803 case 4: {
4804 u32 tmp32 = 0;
4805 pci_read_config_dword(pdev, bits->reg, &tmp32);
4806 tmp = tmp32;
4807 break;
4810 default:
4811 return -EINVAL;
4814 tmp &= bits->mask;
4816 return (tmp == bits->val) ? 1 : 0;
4819 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t state)
4821 pci_save_state(pdev);
4822 pci_disable_device(pdev);
4823 pci_set_power_state(pdev, PCI_D3hot);
4824 return 0;
4827 int ata_pci_device_resume(struct pci_dev *pdev)
4829 pci_set_power_state(pdev, PCI_D0);
4830 pci_restore_state(pdev);
4831 pci_enable_device(pdev);
4832 pci_set_master(pdev);
4833 return 0;
4835 #endif /* CONFIG_PCI */
4838 static int __init ata_init(void)
4840 ata_wq = create_workqueue("ata");
4841 if (!ata_wq)
4842 return -ENOMEM;
4844 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
4845 return 0;
4848 static void __exit ata_exit(void)
4850 destroy_workqueue(ata_wq);
4853 module_init(ata_init);
4854 module_exit(ata_exit);
4856 static unsigned long ratelimit_time;
4857 static spinlock_t ata_ratelimit_lock = SPIN_LOCK_UNLOCKED;
4859 int ata_ratelimit(void)
4861 int rc;
4862 unsigned long flags;
4864 spin_lock_irqsave(&ata_ratelimit_lock, flags);
4866 if (time_after(jiffies, ratelimit_time)) {
4867 rc = 1;
4868 ratelimit_time = jiffies + (HZ/5);
4869 } else
4870 rc = 0;
4872 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
4874 return rc;
4878 * libata is essentially a library of internal helper functions for
4879 * low-level ATA host controller drivers. As such, the API/ABI is
4880 * likely to change as new drivers are added and updated.
4881 * Do not depend on ABI/API stability.
4884 EXPORT_SYMBOL_GPL(ata_std_bios_param);
4885 EXPORT_SYMBOL_GPL(ata_std_ports);
4886 EXPORT_SYMBOL_GPL(ata_device_add);
4887 EXPORT_SYMBOL_GPL(ata_host_set_remove);
4888 EXPORT_SYMBOL_GPL(ata_sg_init);
4889 EXPORT_SYMBOL_GPL(ata_sg_init_one);
4890 EXPORT_SYMBOL_GPL(__ata_qc_complete);
4891 EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
4892 EXPORT_SYMBOL_GPL(ata_eng_timeout);
4893 EXPORT_SYMBOL_GPL(ata_tf_load);
4894 EXPORT_SYMBOL_GPL(ata_tf_read);
4895 EXPORT_SYMBOL_GPL(ata_noop_dev_select);
4896 EXPORT_SYMBOL_GPL(ata_std_dev_select);
4897 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
4898 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
4899 EXPORT_SYMBOL_GPL(ata_check_status);
4900 EXPORT_SYMBOL_GPL(ata_altstatus);
4901 EXPORT_SYMBOL_GPL(ata_exec_command);
4902 EXPORT_SYMBOL_GPL(ata_port_start);
4903 EXPORT_SYMBOL_GPL(ata_port_stop);
4904 EXPORT_SYMBOL_GPL(ata_host_stop);
4905 EXPORT_SYMBOL_GPL(ata_interrupt);
4906 EXPORT_SYMBOL_GPL(ata_qc_prep);
4907 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
4908 EXPORT_SYMBOL_GPL(ata_bmdma_setup);
4909 EXPORT_SYMBOL_GPL(ata_bmdma_start);
4910 EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
4911 EXPORT_SYMBOL_GPL(ata_bmdma_status);
4912 EXPORT_SYMBOL_GPL(ata_bmdma_stop);
4913 EXPORT_SYMBOL_GPL(ata_port_probe);
4914 EXPORT_SYMBOL_GPL(sata_phy_reset);
4915 EXPORT_SYMBOL_GPL(__sata_phy_reset);
4916 EXPORT_SYMBOL_GPL(ata_bus_reset);
4917 EXPORT_SYMBOL_GPL(ata_std_probeinit);
4918 EXPORT_SYMBOL_GPL(ata_std_softreset);
4919 EXPORT_SYMBOL_GPL(sata_std_hardreset);
4920 EXPORT_SYMBOL_GPL(ata_std_postreset);
4921 EXPORT_SYMBOL_GPL(ata_std_probe_reset);
4922 EXPORT_SYMBOL_GPL(ata_drive_probe_reset);
4923 EXPORT_SYMBOL_GPL(ata_dev_revalidate);
4924 EXPORT_SYMBOL_GPL(ata_dev_classify);
4925 EXPORT_SYMBOL_GPL(ata_dev_pair);
4926 EXPORT_SYMBOL_GPL(ata_port_disable);
4927 EXPORT_SYMBOL_GPL(ata_ratelimit);
4928 EXPORT_SYMBOL_GPL(ata_busy_sleep);
4929 EXPORT_SYMBOL_GPL(ata_port_queue_task);
4930 EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
4931 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
4932 EXPORT_SYMBOL_GPL(ata_scsi_error);
4933 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
4934 EXPORT_SYMBOL_GPL(ata_scsi_release);
4935 EXPORT_SYMBOL_GPL(ata_host_intr);
4936 EXPORT_SYMBOL_GPL(ata_id_string);
4937 EXPORT_SYMBOL_GPL(ata_id_c_string);
4938 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
4939 EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
4940 EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
4942 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
4943 EXPORT_SYMBOL_GPL(ata_timing_compute);
4944 EXPORT_SYMBOL_GPL(ata_timing_merge);
4946 #ifdef CONFIG_PCI
4947 EXPORT_SYMBOL_GPL(pci_test_config_bits);
4948 EXPORT_SYMBOL_GPL(ata_pci_host_stop);
4949 EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
4950 EXPORT_SYMBOL_GPL(ata_pci_init_one);
4951 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
4952 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
4953 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
4954 EXPORT_SYMBOL_GPL(ata_pci_default_filter);
4955 EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
4956 #endif /* CONFIG_PCI */
4958 EXPORT_SYMBOL_GPL(ata_device_suspend);
4959 EXPORT_SYMBOL_GPL(ata_device_resume);
4960 EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
4961 EXPORT_SYMBOL_GPL(ata_scsi_device_resume);