[PATCH] radix-tree documentation cleanups
[linux-2.6/mini2440.git] / drivers / scsi / libata-core.c
blobd279666dcb38a72b5176079076ad9f71d83039c5
1 /*
2 * libata-core.c - helper library for ATA
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
35 #include <linux/config.h>
36 #include <linux/kernel.h>
37 #include <linux/module.h>
38 #include <linux/pci.h>
39 #include <linux/init.h>
40 #include <linux/list.h>
41 #include <linux/mm.h>
42 #include <linux/highmem.h>
43 #include <linux/spinlock.h>
44 #include <linux/blkdev.h>
45 #include <linux/delay.h>
46 #include <linux/timer.h>
47 #include <linux/interrupt.h>
48 #include <linux/completion.h>
49 #include <linux/suspend.h>
50 #include <linux/workqueue.h>
51 #include <linux/jiffies.h>
52 #include <linux/scatterlist.h>
53 #include <scsi/scsi.h>
54 #include "scsi_priv.h"
55 #include <scsi/scsi_cmnd.h>
56 #include <scsi/scsi_host.h>
57 #include <linux/libata.h>
58 #include <asm/io.h>
59 #include <asm/semaphore.h>
60 #include <asm/byteorder.h>
62 #include "libata.h"
64 static unsigned int ata_dev_init_params(struct ata_port *ap,
65 struct ata_device *dev);
66 static void ata_set_mode(struct ata_port *ap);
67 static unsigned int ata_dev_set_xfermode(struct ata_port *ap,
68 struct ata_device *dev);
69 static void ata_dev_xfermask(struct ata_port *ap, struct ata_device *dev);
71 static unsigned int ata_unique_id = 1;
72 static struct workqueue_struct *ata_wq;
74 int atapi_enabled = 1;
75 module_param(atapi_enabled, int, 0444);
76 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
78 int libata_fua = 0;
79 module_param_named(fua, libata_fua, int, 0444);
80 MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
82 MODULE_AUTHOR("Jeff Garzik");
83 MODULE_DESCRIPTION("Library module for ATA devices");
84 MODULE_LICENSE("GPL");
85 MODULE_VERSION(DRV_VERSION);
88 /**
89 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
90 * @tf: Taskfile to convert
91 * @fis: Buffer into which data will output
92 * @pmp: Port multiplier port
94 * Converts a standard ATA taskfile to a Serial ATA
95 * FIS structure (Register - Host to Device).
97 * LOCKING:
98 * Inherited from caller.
101 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp)
103 fis[0] = 0x27; /* Register - Host to Device FIS */
104 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
105 bit 7 indicates Command FIS */
106 fis[2] = tf->command;
107 fis[3] = tf->feature;
109 fis[4] = tf->lbal;
110 fis[5] = tf->lbam;
111 fis[6] = tf->lbah;
112 fis[7] = tf->device;
114 fis[8] = tf->hob_lbal;
115 fis[9] = tf->hob_lbam;
116 fis[10] = tf->hob_lbah;
117 fis[11] = tf->hob_feature;
119 fis[12] = tf->nsect;
120 fis[13] = tf->hob_nsect;
121 fis[14] = 0;
122 fis[15] = tf->ctl;
124 fis[16] = 0;
125 fis[17] = 0;
126 fis[18] = 0;
127 fis[19] = 0;
131 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
132 * @fis: Buffer from which data will be input
133 * @tf: Taskfile to output
135 * Converts a serial ATA FIS structure to a standard ATA taskfile.
137 * LOCKING:
138 * Inherited from caller.
141 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
143 tf->command = fis[2]; /* status */
144 tf->feature = fis[3]; /* error */
146 tf->lbal = fis[4];
147 tf->lbam = fis[5];
148 tf->lbah = fis[6];
149 tf->device = fis[7];
151 tf->hob_lbal = fis[8];
152 tf->hob_lbam = fis[9];
153 tf->hob_lbah = fis[10];
155 tf->nsect = fis[12];
156 tf->hob_nsect = fis[13];
159 static const u8 ata_rw_cmds[] = {
160 /* pio multi */
161 ATA_CMD_READ_MULTI,
162 ATA_CMD_WRITE_MULTI,
163 ATA_CMD_READ_MULTI_EXT,
164 ATA_CMD_WRITE_MULTI_EXT,
168 ATA_CMD_WRITE_MULTI_FUA_EXT,
169 /* pio */
170 ATA_CMD_PIO_READ,
171 ATA_CMD_PIO_WRITE,
172 ATA_CMD_PIO_READ_EXT,
173 ATA_CMD_PIO_WRITE_EXT,
178 /* dma */
179 ATA_CMD_READ,
180 ATA_CMD_WRITE,
181 ATA_CMD_READ_EXT,
182 ATA_CMD_WRITE_EXT,
186 ATA_CMD_WRITE_FUA_EXT
190 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
191 * @qc: command to examine and configure
193 * Examine the device configuration and tf->flags to calculate
194 * the proper read/write commands and protocol to use.
196 * LOCKING:
197 * caller.
199 int ata_rwcmd_protocol(struct ata_queued_cmd *qc)
201 struct ata_taskfile *tf = &qc->tf;
202 struct ata_device *dev = qc->dev;
203 u8 cmd;
205 int index, fua, lba48, write;
207 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
208 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
209 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
211 if (dev->flags & ATA_DFLAG_PIO) {
212 tf->protocol = ATA_PROT_PIO;
213 index = dev->multi_count ? 0 : 8;
214 } else if (lba48 && (qc->ap->flags & ATA_FLAG_PIO_LBA48)) {
215 /* Unable to use DMA due to host limitation */
216 tf->protocol = ATA_PROT_PIO;
217 index = dev->multi_count ? 0 : 8;
218 } else {
219 tf->protocol = ATA_PROT_DMA;
220 index = 16;
223 cmd = ata_rw_cmds[index + fua + lba48 + write];
224 if (cmd) {
225 tf->command = cmd;
226 return 0;
228 return -1;
232 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
233 * @pio_mask: pio_mask
234 * @mwdma_mask: mwdma_mask
235 * @udma_mask: udma_mask
237 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
238 * unsigned int xfer_mask.
240 * LOCKING:
241 * None.
243 * RETURNS:
244 * Packed xfer_mask.
246 static unsigned int ata_pack_xfermask(unsigned int pio_mask,
247 unsigned int mwdma_mask,
248 unsigned int udma_mask)
250 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
251 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
252 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
256 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
257 * @xfer_mask: xfer_mask to unpack
258 * @pio_mask: resulting pio_mask
259 * @mwdma_mask: resulting mwdma_mask
260 * @udma_mask: resulting udma_mask
262 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
263 * Any NULL distination masks will be ignored.
265 static void ata_unpack_xfermask(unsigned int xfer_mask,
266 unsigned int *pio_mask,
267 unsigned int *mwdma_mask,
268 unsigned int *udma_mask)
270 if (pio_mask)
271 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
272 if (mwdma_mask)
273 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
274 if (udma_mask)
275 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
278 static const struct ata_xfer_ent {
279 unsigned int shift, bits;
280 u8 base;
281 } ata_xfer_tbl[] = {
282 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
283 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
284 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
285 { -1, },
289 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
290 * @xfer_mask: xfer_mask of interest
292 * Return matching XFER_* value for @xfer_mask. Only the highest
293 * bit of @xfer_mask is considered.
295 * LOCKING:
296 * None.
298 * RETURNS:
299 * Matching XFER_* value, 0 if no match found.
301 static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
303 int highbit = fls(xfer_mask) - 1;
304 const struct ata_xfer_ent *ent;
306 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
307 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
308 return ent->base + highbit - ent->shift;
309 return 0;
313 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
314 * @xfer_mode: XFER_* of interest
316 * Return matching xfer_mask for @xfer_mode.
318 * LOCKING:
319 * None.
321 * RETURNS:
322 * Matching xfer_mask, 0 if no match found.
324 static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
326 const struct ata_xfer_ent *ent;
328 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
329 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
330 return 1 << (ent->shift + xfer_mode - ent->base);
331 return 0;
335 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
336 * @xfer_mode: XFER_* of interest
338 * Return matching xfer_shift for @xfer_mode.
340 * LOCKING:
341 * None.
343 * RETURNS:
344 * Matching xfer_shift, -1 if no match found.
346 static int ata_xfer_mode2shift(unsigned int xfer_mode)
348 const struct ata_xfer_ent *ent;
350 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
351 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
352 return ent->shift;
353 return -1;
357 * ata_mode_string - convert xfer_mask to string
358 * @xfer_mask: mask of bits supported; only highest bit counts.
360 * Determine string which represents the highest speed
361 * (highest bit in @modemask).
363 * LOCKING:
364 * None.
366 * RETURNS:
367 * Constant C string representing highest speed listed in
368 * @mode_mask, or the constant C string "<n/a>".
370 static const char *ata_mode_string(unsigned int xfer_mask)
372 static const char * const xfer_mode_str[] = {
373 "PIO0",
374 "PIO1",
375 "PIO2",
376 "PIO3",
377 "PIO4",
378 "MWDMA0",
379 "MWDMA1",
380 "MWDMA2",
381 "UDMA/16",
382 "UDMA/25",
383 "UDMA/33",
384 "UDMA/44",
385 "UDMA/66",
386 "UDMA/100",
387 "UDMA/133",
388 "UDMA7",
390 int highbit;
392 highbit = fls(xfer_mask) - 1;
393 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
394 return xfer_mode_str[highbit];
395 return "<n/a>";
398 static void ata_dev_disable(struct ata_port *ap, struct ata_device *dev)
400 if (ata_dev_present(dev)) {
401 printk(KERN_WARNING "ata%u: dev %u disabled\n",
402 ap->id, dev->devno);
403 dev->class++;
408 * ata_pio_devchk - PATA device presence detection
409 * @ap: ATA channel to examine
410 * @device: Device to examine (starting at zero)
412 * This technique was originally described in
413 * Hale Landis's ATADRVR (www.ata-atapi.com), and
414 * later found its way into the ATA/ATAPI spec.
416 * Write a pattern to the ATA shadow registers,
417 * and if a device is present, it will respond by
418 * correctly storing and echoing back the
419 * ATA shadow register contents.
421 * LOCKING:
422 * caller.
425 static unsigned int ata_pio_devchk(struct ata_port *ap,
426 unsigned int device)
428 struct ata_ioports *ioaddr = &ap->ioaddr;
429 u8 nsect, lbal;
431 ap->ops->dev_select(ap, device);
433 outb(0x55, ioaddr->nsect_addr);
434 outb(0xaa, ioaddr->lbal_addr);
436 outb(0xaa, ioaddr->nsect_addr);
437 outb(0x55, ioaddr->lbal_addr);
439 outb(0x55, ioaddr->nsect_addr);
440 outb(0xaa, ioaddr->lbal_addr);
442 nsect = inb(ioaddr->nsect_addr);
443 lbal = inb(ioaddr->lbal_addr);
445 if ((nsect == 0x55) && (lbal == 0xaa))
446 return 1; /* we found a device */
448 return 0; /* nothing found */
452 * ata_mmio_devchk - PATA device presence detection
453 * @ap: ATA channel to examine
454 * @device: Device to examine (starting at zero)
456 * This technique was originally described in
457 * Hale Landis's ATADRVR (www.ata-atapi.com), and
458 * later found its way into the ATA/ATAPI spec.
460 * Write a pattern to the ATA shadow registers,
461 * and if a device is present, it will respond by
462 * correctly storing and echoing back the
463 * ATA shadow register contents.
465 * LOCKING:
466 * caller.
469 static unsigned int ata_mmio_devchk(struct ata_port *ap,
470 unsigned int device)
472 struct ata_ioports *ioaddr = &ap->ioaddr;
473 u8 nsect, lbal;
475 ap->ops->dev_select(ap, device);
477 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
478 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
480 writeb(0xaa, (void __iomem *) ioaddr->nsect_addr);
481 writeb(0x55, (void __iomem *) ioaddr->lbal_addr);
483 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
484 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
486 nsect = readb((void __iomem *) ioaddr->nsect_addr);
487 lbal = readb((void __iomem *) ioaddr->lbal_addr);
489 if ((nsect == 0x55) && (lbal == 0xaa))
490 return 1; /* we found a device */
492 return 0; /* nothing found */
496 * ata_devchk - PATA device presence detection
497 * @ap: ATA channel to examine
498 * @device: Device to examine (starting at zero)
500 * Dispatch ATA device presence detection, depending
501 * on whether we are using PIO or MMIO to talk to the
502 * ATA shadow registers.
504 * LOCKING:
505 * caller.
508 static unsigned int ata_devchk(struct ata_port *ap,
509 unsigned int device)
511 if (ap->flags & ATA_FLAG_MMIO)
512 return ata_mmio_devchk(ap, device);
513 return ata_pio_devchk(ap, device);
517 * ata_dev_classify - determine device type based on ATA-spec signature
518 * @tf: ATA taskfile register set for device to be identified
520 * Determine from taskfile register contents whether a device is
521 * ATA or ATAPI, as per "Signature and persistence" section
522 * of ATA/PI spec (volume 1, sect 5.14).
524 * LOCKING:
525 * None.
527 * RETURNS:
528 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
529 * the event of failure.
532 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
534 /* Apple's open source Darwin code hints that some devices only
535 * put a proper signature into the LBA mid/high registers,
536 * So, we only check those. It's sufficient for uniqueness.
539 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
540 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
541 DPRINTK("found ATA device by sig\n");
542 return ATA_DEV_ATA;
545 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
546 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
547 DPRINTK("found ATAPI device by sig\n");
548 return ATA_DEV_ATAPI;
551 DPRINTK("unknown device\n");
552 return ATA_DEV_UNKNOWN;
556 * ata_dev_try_classify - Parse returned ATA device signature
557 * @ap: ATA channel to examine
558 * @device: Device to examine (starting at zero)
559 * @r_err: Value of error register on completion
561 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
562 * an ATA/ATAPI-defined set of values is placed in the ATA
563 * shadow registers, indicating the results of device detection
564 * and diagnostics.
566 * Select the ATA device, and read the values from the ATA shadow
567 * registers. Then parse according to the Error register value,
568 * and the spec-defined values examined by ata_dev_classify().
570 * LOCKING:
571 * caller.
573 * RETURNS:
574 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
577 static unsigned int
578 ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
580 struct ata_taskfile tf;
581 unsigned int class;
582 u8 err;
584 ap->ops->dev_select(ap, device);
586 memset(&tf, 0, sizeof(tf));
588 ap->ops->tf_read(ap, &tf);
589 err = tf.feature;
590 if (r_err)
591 *r_err = err;
593 /* see if device passed diags */
594 if (err == 1)
595 /* do nothing */ ;
596 else if ((device == 0) && (err == 0x81))
597 /* do nothing */ ;
598 else
599 return ATA_DEV_NONE;
601 /* determine if device is ATA or ATAPI */
602 class = ata_dev_classify(&tf);
604 if (class == ATA_DEV_UNKNOWN)
605 return ATA_DEV_NONE;
606 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
607 return ATA_DEV_NONE;
608 return class;
612 * ata_id_string - Convert IDENTIFY DEVICE page into string
613 * @id: IDENTIFY DEVICE results we will examine
614 * @s: string into which data is output
615 * @ofs: offset into identify device page
616 * @len: length of string to return. must be an even number.
618 * The strings in the IDENTIFY DEVICE page are broken up into
619 * 16-bit chunks. Run through the string, and output each
620 * 8-bit chunk linearly, regardless of platform.
622 * LOCKING:
623 * caller.
626 void ata_id_string(const u16 *id, unsigned char *s,
627 unsigned int ofs, unsigned int len)
629 unsigned int c;
631 while (len > 0) {
632 c = id[ofs] >> 8;
633 *s = c;
634 s++;
636 c = id[ofs] & 0xff;
637 *s = c;
638 s++;
640 ofs++;
641 len -= 2;
646 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
647 * @id: IDENTIFY DEVICE results we will examine
648 * @s: string into which data is output
649 * @ofs: offset into identify device page
650 * @len: length of string to return. must be an odd number.
652 * This function is identical to ata_id_string except that it
653 * trims trailing spaces and terminates the resulting string with
654 * null. @len must be actual maximum length (even number) + 1.
656 * LOCKING:
657 * caller.
659 void ata_id_c_string(const u16 *id, unsigned char *s,
660 unsigned int ofs, unsigned int len)
662 unsigned char *p;
664 WARN_ON(!(len & 1));
666 ata_id_string(id, s, ofs, len - 1);
668 p = s + strnlen(s, len - 1);
669 while (p > s && p[-1] == ' ')
670 p--;
671 *p = '\0';
674 static u64 ata_id_n_sectors(const u16 *id)
676 if (ata_id_has_lba(id)) {
677 if (ata_id_has_lba48(id))
678 return ata_id_u64(id, 100);
679 else
680 return ata_id_u32(id, 60);
681 } else {
682 if (ata_id_current_chs_valid(id))
683 return ata_id_u32(id, 57);
684 else
685 return id[1] * id[3] * id[6];
690 * ata_noop_dev_select - Select device 0/1 on ATA bus
691 * @ap: ATA channel to manipulate
692 * @device: ATA device (numbered from zero) to select
694 * This function performs no actual function.
696 * May be used as the dev_select() entry in ata_port_operations.
698 * LOCKING:
699 * caller.
701 void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
707 * ata_std_dev_select - Select device 0/1 on ATA bus
708 * @ap: ATA channel to manipulate
709 * @device: ATA device (numbered from zero) to select
711 * Use the method defined in the ATA specification to
712 * make either device 0, or device 1, active on the
713 * ATA channel. Works with both PIO and MMIO.
715 * May be used as the dev_select() entry in ata_port_operations.
717 * LOCKING:
718 * caller.
721 void ata_std_dev_select (struct ata_port *ap, unsigned int device)
723 u8 tmp;
725 if (device == 0)
726 tmp = ATA_DEVICE_OBS;
727 else
728 tmp = ATA_DEVICE_OBS | ATA_DEV1;
730 if (ap->flags & ATA_FLAG_MMIO) {
731 writeb(tmp, (void __iomem *) ap->ioaddr.device_addr);
732 } else {
733 outb(tmp, ap->ioaddr.device_addr);
735 ata_pause(ap); /* needed; also flushes, for mmio */
739 * ata_dev_select - Select device 0/1 on ATA bus
740 * @ap: ATA channel to manipulate
741 * @device: ATA device (numbered from zero) to select
742 * @wait: non-zero to wait for Status register BSY bit to clear
743 * @can_sleep: non-zero if context allows sleeping
745 * Use the method defined in the ATA specification to
746 * make either device 0, or device 1, active on the
747 * ATA channel.
749 * This is a high-level version of ata_std_dev_select(),
750 * which additionally provides the services of inserting
751 * the proper pauses and status polling, where needed.
753 * LOCKING:
754 * caller.
757 void ata_dev_select(struct ata_port *ap, unsigned int device,
758 unsigned int wait, unsigned int can_sleep)
760 VPRINTK("ENTER, ata%u: device %u, wait %u\n",
761 ap->id, device, wait);
763 if (wait)
764 ata_wait_idle(ap);
766 ap->ops->dev_select(ap, device);
768 if (wait) {
769 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
770 msleep(150);
771 ata_wait_idle(ap);
776 * ata_dump_id - IDENTIFY DEVICE info debugging output
777 * @id: IDENTIFY DEVICE page to dump
779 * Dump selected 16-bit words from the given IDENTIFY DEVICE
780 * page.
782 * LOCKING:
783 * caller.
786 static inline void ata_dump_id(const u16 *id)
788 DPRINTK("49==0x%04x "
789 "53==0x%04x "
790 "63==0x%04x "
791 "64==0x%04x "
792 "75==0x%04x \n",
793 id[49],
794 id[53],
795 id[63],
796 id[64],
797 id[75]);
798 DPRINTK("80==0x%04x "
799 "81==0x%04x "
800 "82==0x%04x "
801 "83==0x%04x "
802 "84==0x%04x \n",
803 id[80],
804 id[81],
805 id[82],
806 id[83],
807 id[84]);
808 DPRINTK("88==0x%04x "
809 "93==0x%04x\n",
810 id[88],
811 id[93]);
815 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
816 * @id: IDENTIFY data to compute xfer mask from
818 * Compute the xfermask for this device. This is not as trivial
819 * as it seems if we must consider early devices correctly.
821 * FIXME: pre IDE drive timing (do we care ?).
823 * LOCKING:
824 * None.
826 * RETURNS:
827 * Computed xfermask
829 static unsigned int ata_id_xfermask(const u16 *id)
831 unsigned int pio_mask, mwdma_mask, udma_mask;
833 /* Usual case. Word 53 indicates word 64 is valid */
834 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
835 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
836 pio_mask <<= 3;
837 pio_mask |= 0x7;
838 } else {
839 /* If word 64 isn't valid then Word 51 high byte holds
840 * the PIO timing number for the maximum. Turn it into
841 * a mask.
843 pio_mask = (2 << (id[ATA_ID_OLD_PIO_MODES] & 0xFF)) - 1 ;
845 /* But wait.. there's more. Design your standards by
846 * committee and you too can get a free iordy field to
847 * process. However its the speeds not the modes that
848 * are supported... Note drivers using the timing API
849 * will get this right anyway
853 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
855 udma_mask = 0;
856 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
857 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
859 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
863 * ata_port_queue_task - Queue port_task
864 * @ap: The ata_port to queue port_task for
866 * Schedule @fn(@data) for execution after @delay jiffies using
867 * port_task. There is one port_task per port and it's the
868 * user(low level driver)'s responsibility to make sure that only
869 * one task is active at any given time.
871 * libata core layer takes care of synchronization between
872 * port_task and EH. ata_port_queue_task() may be ignored for EH
873 * synchronization.
875 * LOCKING:
876 * Inherited from caller.
878 void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), void *data,
879 unsigned long delay)
881 int rc;
883 if (ap->flags & ATA_FLAG_FLUSH_PORT_TASK)
884 return;
886 PREPARE_WORK(&ap->port_task, fn, data);
888 if (!delay)
889 rc = queue_work(ata_wq, &ap->port_task);
890 else
891 rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
893 /* rc == 0 means that another user is using port task */
894 WARN_ON(rc == 0);
898 * ata_port_flush_task - Flush port_task
899 * @ap: The ata_port to flush port_task for
901 * After this function completes, port_task is guranteed not to
902 * be running or scheduled.
904 * LOCKING:
905 * Kernel thread context (may sleep)
907 void ata_port_flush_task(struct ata_port *ap)
909 unsigned long flags;
911 DPRINTK("ENTER\n");
913 spin_lock_irqsave(&ap->host_set->lock, flags);
914 ap->flags |= ATA_FLAG_FLUSH_PORT_TASK;
915 spin_unlock_irqrestore(&ap->host_set->lock, flags);
917 DPRINTK("flush #1\n");
918 flush_workqueue(ata_wq);
921 * At this point, if a task is running, it's guaranteed to see
922 * the FLUSH flag; thus, it will never queue pio tasks again.
923 * Cancel and flush.
925 if (!cancel_delayed_work(&ap->port_task)) {
926 DPRINTK("flush #2\n");
927 flush_workqueue(ata_wq);
930 spin_lock_irqsave(&ap->host_set->lock, flags);
931 ap->flags &= ~ATA_FLAG_FLUSH_PORT_TASK;
932 spin_unlock_irqrestore(&ap->host_set->lock, flags);
934 DPRINTK("EXIT\n");
937 void ata_qc_complete_internal(struct ata_queued_cmd *qc)
939 struct completion *waiting = qc->private_data;
941 qc->ap->ops->tf_read(qc->ap, &qc->tf);
942 complete(waiting);
946 * ata_exec_internal - execute libata internal command
947 * @ap: Port to which the command is sent
948 * @dev: Device to which the command is sent
949 * @tf: Taskfile registers for the command and the result
950 * @dma_dir: Data tranfer direction of the command
951 * @buf: Data buffer of the command
952 * @buflen: Length of data buffer
954 * Executes libata internal command with timeout. @tf contains
955 * command on entry and result on return. Timeout and error
956 * conditions are reported via return value. No recovery action
957 * is taken after a command times out. It's caller's duty to
958 * clean up after timeout.
960 * LOCKING:
961 * None. Should be called with kernel context, might sleep.
964 static unsigned
965 ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
966 struct ata_taskfile *tf,
967 int dma_dir, void *buf, unsigned int buflen)
969 u8 command = tf->command;
970 struct ata_queued_cmd *qc;
971 DECLARE_COMPLETION(wait);
972 unsigned long flags;
973 unsigned int err_mask;
975 spin_lock_irqsave(&ap->host_set->lock, flags);
977 qc = ata_qc_new_init(ap, dev);
978 BUG_ON(qc == NULL);
980 qc->tf = *tf;
981 qc->dma_dir = dma_dir;
982 if (dma_dir != DMA_NONE) {
983 ata_sg_init_one(qc, buf, buflen);
984 qc->nsect = buflen / ATA_SECT_SIZE;
987 qc->private_data = &wait;
988 qc->complete_fn = ata_qc_complete_internal;
990 qc->err_mask = ata_qc_issue(qc);
991 if (qc->err_mask)
992 ata_qc_complete(qc);
994 spin_unlock_irqrestore(&ap->host_set->lock, flags);
996 if (!wait_for_completion_timeout(&wait, ATA_TMOUT_INTERNAL)) {
997 ata_port_flush_task(ap);
999 spin_lock_irqsave(&ap->host_set->lock, flags);
1001 /* We're racing with irq here. If we lose, the
1002 * following test prevents us from completing the qc
1003 * again. If completion irq occurs after here but
1004 * before the caller cleans up, it will result in a
1005 * spurious interrupt. We can live with that.
1007 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1008 qc->err_mask = AC_ERR_TIMEOUT;
1009 ata_qc_complete(qc);
1010 printk(KERN_WARNING "ata%u: qc timeout (cmd 0x%x)\n",
1011 ap->id, command);
1014 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1017 *tf = qc->tf;
1018 err_mask = qc->err_mask;
1020 ata_qc_free(qc);
1022 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1023 * Until those drivers are fixed, we detect the condition
1024 * here, fail the command with AC_ERR_SYSTEM and reenable the
1025 * port.
1027 * Note that this doesn't change any behavior as internal
1028 * command failure results in disabling the device in the
1029 * higher layer for LLDDs without new reset/EH callbacks.
1031 * Kill the following code as soon as those drivers are fixed.
1033 if (ap->flags & ATA_FLAG_PORT_DISABLED) {
1034 err_mask |= AC_ERR_SYSTEM;
1035 ata_port_probe(ap);
1038 return err_mask;
1042 * ata_pio_need_iordy - check if iordy needed
1043 * @adev: ATA device
1045 * Check if the current speed of the device requires IORDY. Used
1046 * by various controllers for chip configuration.
1049 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1051 int pio;
1052 int speed = adev->pio_mode - XFER_PIO_0;
1054 if (speed < 2)
1055 return 0;
1056 if (speed > 2)
1057 return 1;
1059 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1061 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1062 pio = adev->id[ATA_ID_EIDE_PIO];
1063 /* Is the speed faster than the drive allows non IORDY ? */
1064 if (pio) {
1065 /* This is cycle times not frequency - watch the logic! */
1066 if (pio > 240) /* PIO2 is 240nS per cycle */
1067 return 1;
1068 return 0;
1071 return 0;
1075 * ata_dev_read_id - Read ID data from the specified device
1076 * @ap: port on which target device resides
1077 * @dev: target device
1078 * @p_class: pointer to class of the target device (may be changed)
1079 * @post_reset: is this read ID post-reset?
1080 * @p_id: read IDENTIFY page (newly allocated)
1082 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1083 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1084 * devices. This function also takes care of EDD signature
1085 * misreporting (to be removed once EDD support is gone) and
1086 * issues ATA_CMD_INIT_DEV_PARAMS for pre-ATA4 drives.
1088 * LOCKING:
1089 * Kernel thread context (may sleep)
1091 * RETURNS:
1092 * 0 on success, -errno otherwise.
1094 static int ata_dev_read_id(struct ata_port *ap, struct ata_device *dev,
1095 unsigned int *p_class, int post_reset, u16 **p_id)
1097 unsigned int class = *p_class;
1098 unsigned int using_edd;
1099 struct ata_taskfile tf;
1100 unsigned int err_mask = 0;
1101 u16 *id;
1102 const char *reason;
1103 int rc;
1105 DPRINTK("ENTER, host %u, dev %u\n", ap->id, dev->devno);
1107 if (ap->ops->probe_reset ||
1108 ap->flags & (ATA_FLAG_SRST | ATA_FLAG_SATA_RESET))
1109 using_edd = 0;
1110 else
1111 using_edd = 1;
1113 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1115 id = kmalloc(sizeof(id[0]) * ATA_ID_WORDS, GFP_KERNEL);
1116 if (id == NULL) {
1117 rc = -ENOMEM;
1118 reason = "out of memory";
1119 goto err_out;
1122 retry:
1123 ata_tf_init(ap, &tf, dev->devno);
1125 switch (class) {
1126 case ATA_DEV_ATA:
1127 tf.command = ATA_CMD_ID_ATA;
1128 break;
1129 case ATA_DEV_ATAPI:
1130 tf.command = ATA_CMD_ID_ATAPI;
1131 break;
1132 default:
1133 rc = -ENODEV;
1134 reason = "unsupported class";
1135 goto err_out;
1138 tf.protocol = ATA_PROT_PIO;
1140 err_mask = ata_exec_internal(ap, dev, &tf, DMA_FROM_DEVICE,
1141 id, sizeof(id[0]) * ATA_ID_WORDS);
1143 if (err_mask) {
1144 rc = -EIO;
1145 reason = "I/O error";
1147 if (err_mask & ~AC_ERR_DEV)
1148 goto err_out;
1151 * arg! EDD works for all test cases, but seems to return
1152 * the ATA signature for some ATAPI devices. Until the
1153 * reason for this is found and fixed, we fix up the mess
1154 * here. If IDENTIFY DEVICE returns command aborted
1155 * (as ATAPI devices do), then we issue an
1156 * IDENTIFY PACKET DEVICE.
1158 * ATA software reset (SRST, the default) does not appear
1159 * to have this problem.
1161 if ((using_edd) && (class == ATA_DEV_ATA)) {
1162 u8 err = tf.feature;
1163 if (err & ATA_ABORTED) {
1164 class = ATA_DEV_ATAPI;
1165 goto retry;
1168 goto err_out;
1171 swap_buf_le16(id, ATA_ID_WORDS);
1173 /* sanity check */
1174 if ((class == ATA_DEV_ATA) != ata_id_is_ata(id)) {
1175 rc = -EINVAL;
1176 reason = "device reports illegal type";
1177 goto err_out;
1180 if (post_reset && class == ATA_DEV_ATA) {
1182 * The exact sequence expected by certain pre-ATA4 drives is:
1183 * SRST RESET
1184 * IDENTIFY
1185 * INITIALIZE DEVICE PARAMETERS
1186 * anything else..
1187 * Some drives were very specific about that exact sequence.
1189 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
1190 err_mask = ata_dev_init_params(ap, dev);
1191 if (err_mask) {
1192 rc = -EIO;
1193 reason = "INIT_DEV_PARAMS failed";
1194 goto err_out;
1197 /* current CHS translation info (id[53-58]) might be
1198 * changed. reread the identify device info.
1200 post_reset = 0;
1201 goto retry;
1205 *p_class = class;
1206 *p_id = id;
1207 return 0;
1209 err_out:
1210 printk(KERN_WARNING "ata%u: dev %u failed to IDENTIFY (%s)\n",
1211 ap->id, dev->devno, reason);
1212 kfree(id);
1213 return rc;
1216 static inline u8 ata_dev_knobble(const struct ata_port *ap,
1217 struct ata_device *dev)
1219 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
1223 * ata_dev_configure - Configure the specified ATA/ATAPI device
1224 * @ap: Port on which target device resides
1225 * @dev: Target device to configure
1226 * @print_info: Enable device info printout
1228 * Configure @dev according to @dev->id. Generic and low-level
1229 * driver specific fixups are also applied.
1231 * LOCKING:
1232 * Kernel thread context (may sleep)
1234 * RETURNS:
1235 * 0 on success, -errno otherwise
1237 static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev,
1238 int print_info)
1240 const u16 *id = dev->id;
1241 unsigned int xfer_mask;
1242 int i, rc;
1244 if (!ata_dev_present(dev)) {
1245 DPRINTK("ENTER/EXIT (host %u, dev %u) -- nodev\n",
1246 ap->id, dev->devno);
1247 return 0;
1250 DPRINTK("ENTER, host %u, dev %u\n", ap->id, dev->devno);
1252 /* print device capabilities */
1253 if (print_info)
1254 printk(KERN_DEBUG "ata%u: dev %u cfg 49:%04x 82:%04x 83:%04x "
1255 "84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n",
1256 ap->id, dev->devno, id[49], id[82], id[83],
1257 id[84], id[85], id[86], id[87], id[88]);
1259 /* initialize to-be-configured parameters */
1260 dev->flags = 0;
1261 dev->max_sectors = 0;
1262 dev->cdb_len = 0;
1263 dev->n_sectors = 0;
1264 dev->cylinders = 0;
1265 dev->heads = 0;
1266 dev->sectors = 0;
1269 * common ATA, ATAPI feature tests
1272 /* find max transfer mode; for printk only */
1273 xfer_mask = ata_id_xfermask(id);
1275 ata_dump_id(id);
1277 /* ATA-specific feature tests */
1278 if (dev->class == ATA_DEV_ATA) {
1279 dev->n_sectors = ata_id_n_sectors(id);
1281 if (ata_id_has_lba(id)) {
1282 const char *lba_desc;
1284 lba_desc = "LBA";
1285 dev->flags |= ATA_DFLAG_LBA;
1286 if (ata_id_has_lba48(id)) {
1287 dev->flags |= ATA_DFLAG_LBA48;
1288 lba_desc = "LBA48";
1291 /* print device info to dmesg */
1292 if (print_info)
1293 printk(KERN_INFO "ata%u: dev %u ATA-%d, "
1294 "max %s, %Lu sectors: %s\n",
1295 ap->id, dev->devno,
1296 ata_id_major_version(id),
1297 ata_mode_string(xfer_mask),
1298 (unsigned long long)dev->n_sectors,
1299 lba_desc);
1300 } else {
1301 /* CHS */
1303 /* Default translation */
1304 dev->cylinders = id[1];
1305 dev->heads = id[3];
1306 dev->sectors = id[6];
1308 if (ata_id_current_chs_valid(id)) {
1309 /* Current CHS translation is valid. */
1310 dev->cylinders = id[54];
1311 dev->heads = id[55];
1312 dev->sectors = id[56];
1315 /* print device info to dmesg */
1316 if (print_info)
1317 printk(KERN_INFO "ata%u: dev %u ATA-%d, "
1318 "max %s, %Lu sectors: CHS %u/%u/%u\n",
1319 ap->id, dev->devno,
1320 ata_id_major_version(id),
1321 ata_mode_string(xfer_mask),
1322 (unsigned long long)dev->n_sectors,
1323 dev->cylinders, dev->heads, dev->sectors);
1326 dev->cdb_len = 16;
1329 /* ATAPI-specific feature tests */
1330 else if (dev->class == ATA_DEV_ATAPI) {
1331 rc = atapi_cdb_len(id);
1332 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
1333 printk(KERN_WARNING "ata%u: unsupported CDB len\n", ap->id);
1334 rc = -EINVAL;
1335 goto err_out_nosup;
1337 dev->cdb_len = (unsigned int) rc;
1339 /* print device info to dmesg */
1340 if (print_info)
1341 printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n",
1342 ap->id, dev->devno, ata_mode_string(xfer_mask));
1345 ap->host->max_cmd_len = 0;
1346 for (i = 0; i < ATA_MAX_DEVICES; i++)
1347 ap->host->max_cmd_len = max_t(unsigned int,
1348 ap->host->max_cmd_len,
1349 ap->device[i].cdb_len);
1351 /* limit bridge transfers to udma5, 200 sectors */
1352 if (ata_dev_knobble(ap, dev)) {
1353 if (print_info)
1354 printk(KERN_INFO "ata%u(%u): applying bridge limits\n",
1355 ap->id, dev->devno);
1356 dev->udma_mask &= ATA_UDMA5;
1357 dev->max_sectors = ATA_MAX_SECTORS;
1360 if (ap->ops->dev_config)
1361 ap->ops->dev_config(ap, dev);
1363 DPRINTK("EXIT, drv_stat = 0x%x\n", ata_chk_status(ap));
1364 return 0;
1366 err_out_nosup:
1367 DPRINTK("EXIT, err\n");
1368 return rc;
1372 * ata_bus_probe - Reset and probe ATA bus
1373 * @ap: Bus to probe
1375 * Master ATA bus probing function. Initiates a hardware-dependent
1376 * bus reset, then attempts to identify any devices found on
1377 * the bus.
1379 * LOCKING:
1380 * PCI/etc. bus probe sem.
1382 * RETURNS:
1383 * Zero on success, non-zero on error.
1386 static int ata_bus_probe(struct ata_port *ap)
1388 unsigned int classes[ATA_MAX_DEVICES];
1389 unsigned int i, rc, found = 0;
1391 ata_port_probe(ap);
1393 /* reset and determine device classes */
1394 for (i = 0; i < ATA_MAX_DEVICES; i++)
1395 classes[i] = ATA_DEV_UNKNOWN;
1397 if (ap->ops->probe_reset) {
1398 rc = ap->ops->probe_reset(ap, classes);
1399 if (rc) {
1400 printk("ata%u: reset failed (errno=%d)\n", ap->id, rc);
1401 return rc;
1403 } else {
1404 ap->ops->phy_reset(ap);
1406 if (!(ap->flags & ATA_FLAG_PORT_DISABLED))
1407 for (i = 0; i < ATA_MAX_DEVICES; i++)
1408 classes[i] = ap->device[i].class;
1410 ata_port_probe(ap);
1413 for (i = 0; i < ATA_MAX_DEVICES; i++)
1414 if (classes[i] == ATA_DEV_UNKNOWN)
1415 classes[i] = ATA_DEV_NONE;
1417 /* read IDENTIFY page and configure devices */
1418 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1419 struct ata_device *dev = &ap->device[i];
1421 dev->class = classes[i];
1423 if (!ata_dev_present(dev))
1424 continue;
1426 WARN_ON(dev->id != NULL);
1427 if (ata_dev_read_id(ap, dev, &dev->class, 1, &dev->id)) {
1428 dev->class = ATA_DEV_NONE;
1429 continue;
1432 if (ata_dev_configure(ap, dev, 1)) {
1433 ata_dev_disable(ap, dev);
1434 continue;
1437 found = 1;
1440 if (!found)
1441 goto err_out_disable;
1443 ata_set_mode(ap);
1444 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1445 goto err_out_disable;
1447 return 0;
1449 err_out_disable:
1450 ap->ops->port_disable(ap);
1451 return -1;
1455 * ata_port_probe - Mark port as enabled
1456 * @ap: Port for which we indicate enablement
1458 * Modify @ap data structure such that the system
1459 * thinks that the entire port is enabled.
1461 * LOCKING: host_set lock, or some other form of
1462 * serialization.
1465 void ata_port_probe(struct ata_port *ap)
1467 ap->flags &= ~ATA_FLAG_PORT_DISABLED;
1471 * sata_print_link_status - Print SATA link status
1472 * @ap: SATA port to printk link status about
1474 * This function prints link speed and status of a SATA link.
1476 * LOCKING:
1477 * None.
1479 static void sata_print_link_status(struct ata_port *ap)
1481 u32 sstatus, tmp;
1482 const char *speed;
1484 if (!ap->ops->scr_read)
1485 return;
1487 sstatus = scr_read(ap, SCR_STATUS);
1489 if (sata_dev_present(ap)) {
1490 tmp = (sstatus >> 4) & 0xf;
1491 if (tmp & (1 << 0))
1492 speed = "1.5";
1493 else if (tmp & (1 << 1))
1494 speed = "3.0";
1495 else
1496 speed = "<unknown>";
1497 printk(KERN_INFO "ata%u: SATA link up %s Gbps (SStatus %X)\n",
1498 ap->id, speed, sstatus);
1499 } else {
1500 printk(KERN_INFO "ata%u: SATA link down (SStatus %X)\n",
1501 ap->id, sstatus);
1506 * __sata_phy_reset - Wake/reset a low-level SATA PHY
1507 * @ap: SATA port associated with target SATA PHY.
1509 * This function issues commands to standard SATA Sxxx
1510 * PHY registers, to wake up the phy (and device), and
1511 * clear any reset condition.
1513 * LOCKING:
1514 * PCI/etc. bus probe sem.
1517 void __sata_phy_reset(struct ata_port *ap)
1519 u32 sstatus;
1520 unsigned long timeout = jiffies + (HZ * 5);
1522 if (ap->flags & ATA_FLAG_SATA_RESET) {
1523 /* issue phy wake/reset */
1524 scr_write_flush(ap, SCR_CONTROL, 0x301);
1525 /* Couldn't find anything in SATA I/II specs, but
1526 * AHCI-1.1 10.4.2 says at least 1 ms. */
1527 mdelay(1);
1529 scr_write_flush(ap, SCR_CONTROL, 0x300); /* phy wake/clear reset */
1531 /* wait for phy to become ready, if necessary */
1532 do {
1533 msleep(200);
1534 sstatus = scr_read(ap, SCR_STATUS);
1535 if ((sstatus & 0xf) != 1)
1536 break;
1537 } while (time_before(jiffies, timeout));
1539 /* print link status */
1540 sata_print_link_status(ap);
1542 /* TODO: phy layer with polling, timeouts, etc. */
1543 if (sata_dev_present(ap))
1544 ata_port_probe(ap);
1545 else
1546 ata_port_disable(ap);
1548 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1549 return;
1551 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
1552 ata_port_disable(ap);
1553 return;
1556 ap->cbl = ATA_CBL_SATA;
1560 * sata_phy_reset - Reset SATA bus.
1561 * @ap: SATA port associated with target SATA PHY.
1563 * This function resets the SATA bus, and then probes
1564 * the bus for devices.
1566 * LOCKING:
1567 * PCI/etc. bus probe sem.
1570 void sata_phy_reset(struct ata_port *ap)
1572 __sata_phy_reset(ap);
1573 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1574 return;
1575 ata_bus_reset(ap);
1579 * ata_dev_pair - return other device on cable
1580 * @ap: port
1581 * @adev: device
1583 * Obtain the other device on the same cable, or if none is
1584 * present NULL is returned
1587 struct ata_device *ata_dev_pair(struct ata_port *ap, struct ata_device *adev)
1589 struct ata_device *pair = &ap->device[1 - adev->devno];
1590 if (!ata_dev_present(pair))
1591 return NULL;
1592 return pair;
1596 * ata_port_disable - Disable port.
1597 * @ap: Port to be disabled.
1599 * Modify @ap data structure such that the system
1600 * thinks that the entire port is disabled, and should
1601 * never attempt to probe or communicate with devices
1602 * on this port.
1604 * LOCKING: host_set lock, or some other form of
1605 * serialization.
1608 void ata_port_disable(struct ata_port *ap)
1610 ap->device[0].class = ATA_DEV_NONE;
1611 ap->device[1].class = ATA_DEV_NONE;
1612 ap->flags |= ATA_FLAG_PORT_DISABLED;
1616 * This mode timing computation functionality is ported over from
1617 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
1620 * PIO 0-5, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
1621 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
1622 * for PIO 5, which is a nonstandard extension and UDMA6, which
1623 * is currently supported only by Maxtor drives.
1626 static const struct ata_timing ata_timing[] = {
1628 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
1629 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
1630 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
1631 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
1633 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
1634 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
1635 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
1637 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
1639 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
1640 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
1641 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
1643 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
1644 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
1645 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
1647 /* { XFER_PIO_5, 20, 50, 30, 100, 50, 30, 100, 0 }, */
1648 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
1649 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
1651 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
1652 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
1653 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
1655 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
1657 { 0xFF }
1660 #define ENOUGH(v,unit) (((v)-1)/(unit)+1)
1661 #define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
1663 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
1665 q->setup = EZ(t->setup * 1000, T);
1666 q->act8b = EZ(t->act8b * 1000, T);
1667 q->rec8b = EZ(t->rec8b * 1000, T);
1668 q->cyc8b = EZ(t->cyc8b * 1000, T);
1669 q->active = EZ(t->active * 1000, T);
1670 q->recover = EZ(t->recover * 1000, T);
1671 q->cycle = EZ(t->cycle * 1000, T);
1672 q->udma = EZ(t->udma * 1000, UT);
1675 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
1676 struct ata_timing *m, unsigned int what)
1678 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
1679 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
1680 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
1681 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
1682 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
1683 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
1684 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
1685 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
1688 static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
1690 const struct ata_timing *t;
1692 for (t = ata_timing; t->mode != speed; t++)
1693 if (t->mode == 0xFF)
1694 return NULL;
1695 return t;
1698 int ata_timing_compute(struct ata_device *adev, unsigned short speed,
1699 struct ata_timing *t, int T, int UT)
1701 const struct ata_timing *s;
1702 struct ata_timing p;
1705 * Find the mode.
1708 if (!(s = ata_timing_find_mode(speed)))
1709 return -EINVAL;
1711 memcpy(t, s, sizeof(*s));
1714 * If the drive is an EIDE drive, it can tell us it needs extended
1715 * PIO/MW_DMA cycle timing.
1718 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
1719 memset(&p, 0, sizeof(p));
1720 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
1721 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
1722 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
1723 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
1724 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
1726 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
1730 * Convert the timing to bus clock counts.
1733 ata_timing_quantize(t, t, T, UT);
1736 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
1737 * S.M.A.R.T * and some other commands. We have to ensure that the
1738 * DMA cycle timing is slower/equal than the fastest PIO timing.
1741 if (speed > XFER_PIO_4) {
1742 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
1743 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
1747 * Lengthen active & recovery time so that cycle time is correct.
1750 if (t->act8b + t->rec8b < t->cyc8b) {
1751 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
1752 t->rec8b = t->cyc8b - t->act8b;
1755 if (t->active + t->recover < t->cycle) {
1756 t->active += (t->cycle - (t->active + t->recover)) / 2;
1757 t->recover = t->cycle - t->active;
1760 return 0;
1763 static int ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev)
1765 unsigned int err_mask;
1766 int rc;
1768 if (dev->xfer_shift == ATA_SHIFT_PIO)
1769 dev->flags |= ATA_DFLAG_PIO;
1771 err_mask = ata_dev_set_xfermode(ap, dev);
1772 if (err_mask) {
1773 printk(KERN_ERR
1774 "ata%u: failed to set xfermode (err_mask=0x%x)\n",
1775 ap->id, err_mask);
1776 return -EIO;
1779 rc = ata_dev_revalidate(ap, dev, 0);
1780 if (rc) {
1781 printk(KERN_ERR
1782 "ata%u: failed to revalidate after set xfermode\n",
1783 ap->id);
1784 return rc;
1787 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
1788 dev->xfer_shift, (int)dev->xfer_mode);
1790 printk(KERN_INFO "ata%u: dev %u configured for %s\n",
1791 ap->id, dev->devno,
1792 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
1793 return 0;
1796 static int ata_host_set_pio(struct ata_port *ap)
1798 int i;
1800 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1801 struct ata_device *dev = &ap->device[i];
1803 if (!ata_dev_present(dev))
1804 continue;
1806 if (!dev->pio_mode) {
1807 printk(KERN_WARNING "ata%u: no PIO support for device %d.\n", ap->id, i);
1808 return -1;
1811 dev->xfer_mode = dev->pio_mode;
1812 dev->xfer_shift = ATA_SHIFT_PIO;
1813 if (ap->ops->set_piomode)
1814 ap->ops->set_piomode(ap, dev);
1817 return 0;
1820 static void ata_host_set_dma(struct ata_port *ap)
1822 int i;
1824 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1825 struct ata_device *dev = &ap->device[i];
1827 if (!ata_dev_present(dev) || !dev->dma_mode)
1828 continue;
1830 dev->xfer_mode = dev->dma_mode;
1831 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
1832 if (ap->ops->set_dmamode)
1833 ap->ops->set_dmamode(ap, dev);
1838 * ata_set_mode - Program timings and issue SET FEATURES - XFER
1839 * @ap: port on which timings will be programmed
1841 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.).
1843 * LOCKING:
1844 * PCI/etc. bus probe sem.
1846 static void ata_set_mode(struct ata_port *ap)
1848 int i, rc;
1850 /* step 1: calculate xfer_mask */
1851 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1852 struct ata_device *dev = &ap->device[i];
1853 unsigned int pio_mask, dma_mask;
1855 if (!ata_dev_present(dev))
1856 continue;
1858 ata_dev_xfermask(ap, dev);
1860 /* TODO: let LLDD filter dev->*_mask here */
1862 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
1863 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
1864 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
1865 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
1868 /* step 2: always set host PIO timings */
1869 rc = ata_host_set_pio(ap);
1870 if (rc)
1871 goto err_out;
1873 /* step 3: set host DMA timings */
1874 ata_host_set_dma(ap);
1876 /* step 4: update devices' xfer mode */
1877 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1878 struct ata_device *dev = &ap->device[i];
1880 if (!ata_dev_present(dev))
1881 continue;
1883 if (ata_dev_set_mode(ap, dev))
1884 goto err_out;
1887 if (ap->ops->post_set_mode)
1888 ap->ops->post_set_mode(ap);
1890 return;
1892 err_out:
1893 ata_port_disable(ap);
1897 * ata_tf_to_host - issue ATA taskfile to host controller
1898 * @ap: port to which command is being issued
1899 * @tf: ATA taskfile register set
1901 * Issues ATA taskfile register set to ATA host controller,
1902 * with proper synchronization with interrupt handler and
1903 * other threads.
1905 * LOCKING:
1906 * spin_lock_irqsave(host_set lock)
1909 static inline void ata_tf_to_host(struct ata_port *ap,
1910 const struct ata_taskfile *tf)
1912 ap->ops->tf_load(ap, tf);
1913 ap->ops->exec_command(ap, tf);
1917 * ata_busy_sleep - sleep until BSY clears, or timeout
1918 * @ap: port containing status register to be polled
1919 * @tmout_pat: impatience timeout
1920 * @tmout: overall timeout
1922 * Sleep until ATA Status register bit BSY clears,
1923 * or a timeout occurs.
1925 * LOCKING: None.
1928 unsigned int ata_busy_sleep (struct ata_port *ap,
1929 unsigned long tmout_pat, unsigned long tmout)
1931 unsigned long timer_start, timeout;
1932 u8 status;
1934 status = ata_busy_wait(ap, ATA_BUSY, 300);
1935 timer_start = jiffies;
1936 timeout = timer_start + tmout_pat;
1937 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
1938 msleep(50);
1939 status = ata_busy_wait(ap, ATA_BUSY, 3);
1942 if (status & ATA_BUSY)
1943 printk(KERN_WARNING "ata%u is slow to respond, "
1944 "please be patient\n", ap->id);
1946 timeout = timer_start + tmout;
1947 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
1948 msleep(50);
1949 status = ata_chk_status(ap);
1952 if (status & ATA_BUSY) {
1953 printk(KERN_ERR "ata%u failed to respond (%lu secs)\n",
1954 ap->id, tmout / HZ);
1955 return 1;
1958 return 0;
1961 static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
1963 struct ata_ioports *ioaddr = &ap->ioaddr;
1964 unsigned int dev0 = devmask & (1 << 0);
1965 unsigned int dev1 = devmask & (1 << 1);
1966 unsigned long timeout;
1968 /* if device 0 was found in ata_devchk, wait for its
1969 * BSY bit to clear
1971 if (dev0)
1972 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1974 /* if device 1 was found in ata_devchk, wait for
1975 * register access, then wait for BSY to clear
1977 timeout = jiffies + ATA_TMOUT_BOOT;
1978 while (dev1) {
1979 u8 nsect, lbal;
1981 ap->ops->dev_select(ap, 1);
1982 if (ap->flags & ATA_FLAG_MMIO) {
1983 nsect = readb((void __iomem *) ioaddr->nsect_addr);
1984 lbal = readb((void __iomem *) ioaddr->lbal_addr);
1985 } else {
1986 nsect = inb(ioaddr->nsect_addr);
1987 lbal = inb(ioaddr->lbal_addr);
1989 if ((nsect == 1) && (lbal == 1))
1990 break;
1991 if (time_after(jiffies, timeout)) {
1992 dev1 = 0;
1993 break;
1995 msleep(50); /* give drive a breather */
1997 if (dev1)
1998 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2000 /* is all this really necessary? */
2001 ap->ops->dev_select(ap, 0);
2002 if (dev1)
2003 ap->ops->dev_select(ap, 1);
2004 if (dev0)
2005 ap->ops->dev_select(ap, 0);
2009 * ata_bus_edd - Issue EXECUTE DEVICE DIAGNOSTIC command.
2010 * @ap: Port to reset and probe
2012 * Use the EXECUTE DEVICE DIAGNOSTIC command to reset and
2013 * probe the bus. Not often used these days.
2015 * LOCKING:
2016 * PCI/etc. bus probe sem.
2017 * Obtains host_set lock.
2021 static unsigned int ata_bus_edd(struct ata_port *ap)
2023 struct ata_taskfile tf;
2024 unsigned long flags;
2026 /* set up execute-device-diag (bus reset) taskfile */
2027 /* also, take interrupts to a known state (disabled) */
2028 DPRINTK("execute-device-diag\n");
2029 ata_tf_init(ap, &tf, 0);
2030 tf.ctl |= ATA_NIEN;
2031 tf.command = ATA_CMD_EDD;
2032 tf.protocol = ATA_PROT_NODATA;
2034 /* do bus reset */
2035 spin_lock_irqsave(&ap->host_set->lock, flags);
2036 ata_tf_to_host(ap, &tf);
2037 spin_unlock_irqrestore(&ap->host_set->lock, flags);
2039 /* spec says at least 2ms. but who knows with those
2040 * crazy ATAPI devices...
2042 msleep(150);
2044 return ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2047 static unsigned int ata_bus_softreset(struct ata_port *ap,
2048 unsigned int devmask)
2050 struct ata_ioports *ioaddr = &ap->ioaddr;
2052 DPRINTK("ata%u: bus reset via SRST\n", ap->id);
2054 /* software reset. causes dev0 to be selected */
2055 if (ap->flags & ATA_FLAG_MMIO) {
2056 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2057 udelay(20); /* FIXME: flush */
2058 writeb(ap->ctl | ATA_SRST, (void __iomem *) ioaddr->ctl_addr);
2059 udelay(20); /* FIXME: flush */
2060 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2061 } else {
2062 outb(ap->ctl, ioaddr->ctl_addr);
2063 udelay(10);
2064 outb(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
2065 udelay(10);
2066 outb(ap->ctl, ioaddr->ctl_addr);
2069 /* spec mandates ">= 2ms" before checking status.
2070 * We wait 150ms, because that was the magic delay used for
2071 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
2072 * between when the ATA command register is written, and then
2073 * status is checked. Because waiting for "a while" before
2074 * checking status is fine, post SRST, we perform this magic
2075 * delay here as well.
2077 * Old drivers/ide uses the 2mS rule and then waits for ready
2079 msleep(150);
2082 /* Before we perform post reset processing we want to see if
2083 the bus shows 0xFF because the odd clown forgets the D7 pulldown
2084 resistor */
2086 if (ata_check_status(ap) == 0xFF)
2087 return 1; /* Positive is failure for some reason */
2089 ata_bus_post_reset(ap, devmask);
2091 return 0;
2095 * ata_bus_reset - reset host port and associated ATA channel
2096 * @ap: port to reset
2098 * This is typically the first time we actually start issuing
2099 * commands to the ATA channel. We wait for BSY to clear, then
2100 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
2101 * result. Determine what devices, if any, are on the channel
2102 * by looking at the device 0/1 error register. Look at the signature
2103 * stored in each device's taskfile registers, to determine if
2104 * the device is ATA or ATAPI.
2106 * LOCKING:
2107 * PCI/etc. bus probe sem.
2108 * Obtains host_set lock.
2110 * SIDE EFFECTS:
2111 * Sets ATA_FLAG_PORT_DISABLED if bus reset fails.
2114 void ata_bus_reset(struct ata_port *ap)
2116 struct ata_ioports *ioaddr = &ap->ioaddr;
2117 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2118 u8 err;
2119 unsigned int dev0, dev1 = 0, rc = 0, devmask = 0;
2121 DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no);
2123 /* determine if device 0/1 are present */
2124 if (ap->flags & ATA_FLAG_SATA_RESET)
2125 dev0 = 1;
2126 else {
2127 dev0 = ata_devchk(ap, 0);
2128 if (slave_possible)
2129 dev1 = ata_devchk(ap, 1);
2132 if (dev0)
2133 devmask |= (1 << 0);
2134 if (dev1)
2135 devmask |= (1 << 1);
2137 /* select device 0 again */
2138 ap->ops->dev_select(ap, 0);
2140 /* issue bus reset */
2141 if (ap->flags & ATA_FLAG_SRST)
2142 rc = ata_bus_softreset(ap, devmask);
2143 else if ((ap->flags & ATA_FLAG_SATA_RESET) == 0) {
2144 /* set up device control */
2145 if (ap->flags & ATA_FLAG_MMIO)
2146 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2147 else
2148 outb(ap->ctl, ioaddr->ctl_addr);
2149 rc = ata_bus_edd(ap);
2152 if (rc)
2153 goto err_out;
2156 * determine by signature whether we have ATA or ATAPI devices
2158 ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
2159 if ((slave_possible) && (err != 0x81))
2160 ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
2162 /* re-enable interrupts */
2163 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
2164 ata_irq_on(ap);
2166 /* is double-select really necessary? */
2167 if (ap->device[1].class != ATA_DEV_NONE)
2168 ap->ops->dev_select(ap, 1);
2169 if (ap->device[0].class != ATA_DEV_NONE)
2170 ap->ops->dev_select(ap, 0);
2172 /* if no devices were detected, disable this port */
2173 if ((ap->device[0].class == ATA_DEV_NONE) &&
2174 (ap->device[1].class == ATA_DEV_NONE))
2175 goto err_out;
2177 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
2178 /* set up device control for ATA_FLAG_SATA_RESET */
2179 if (ap->flags & ATA_FLAG_MMIO)
2180 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2181 else
2182 outb(ap->ctl, ioaddr->ctl_addr);
2185 DPRINTK("EXIT\n");
2186 return;
2188 err_out:
2189 printk(KERN_ERR "ata%u: disabling port\n", ap->id);
2190 ap->ops->port_disable(ap);
2192 DPRINTK("EXIT\n");
2195 static int sata_phy_resume(struct ata_port *ap)
2197 unsigned long timeout = jiffies + (HZ * 5);
2198 u32 sstatus;
2200 scr_write_flush(ap, SCR_CONTROL, 0x300);
2202 /* Wait for phy to become ready, if necessary. */
2203 do {
2204 msleep(200);
2205 sstatus = scr_read(ap, SCR_STATUS);
2206 if ((sstatus & 0xf) != 1)
2207 return 0;
2208 } while (time_before(jiffies, timeout));
2210 return -1;
2214 * ata_std_probeinit - initialize probing
2215 * @ap: port to be probed
2217 * @ap is about to be probed. Initialize it. This function is
2218 * to be used as standard callback for ata_drive_probe_reset().
2220 * NOTE!!! Do not use this function as probeinit if a low level
2221 * driver implements only hardreset. Just pass NULL as probeinit
2222 * in that case. Using this function is probably okay but doing
2223 * so makes reset sequence different from the original
2224 * ->phy_reset implementation and Jeff nervous. :-P
2226 extern void ata_std_probeinit(struct ata_port *ap)
2228 if (ap->flags & ATA_FLAG_SATA && ap->ops->scr_read) {
2229 sata_phy_resume(ap);
2230 if (sata_dev_present(ap))
2231 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2236 * ata_std_softreset - reset host port via ATA SRST
2237 * @ap: port to reset
2238 * @verbose: fail verbosely
2239 * @classes: resulting classes of attached devices
2241 * Reset host port using ATA SRST. This function is to be used
2242 * as standard callback for ata_drive_*_reset() functions.
2244 * LOCKING:
2245 * Kernel thread context (may sleep)
2247 * RETURNS:
2248 * 0 on success, -errno otherwise.
2250 int ata_std_softreset(struct ata_port *ap, int verbose, unsigned int *classes)
2252 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2253 unsigned int devmask = 0, err_mask;
2254 u8 err;
2256 DPRINTK("ENTER\n");
2258 if (ap->ops->scr_read && !sata_dev_present(ap)) {
2259 classes[0] = ATA_DEV_NONE;
2260 goto out;
2263 /* determine if device 0/1 are present */
2264 if (ata_devchk(ap, 0))
2265 devmask |= (1 << 0);
2266 if (slave_possible && ata_devchk(ap, 1))
2267 devmask |= (1 << 1);
2269 /* select device 0 again */
2270 ap->ops->dev_select(ap, 0);
2272 /* issue bus reset */
2273 DPRINTK("about to softreset, devmask=%x\n", devmask);
2274 err_mask = ata_bus_softreset(ap, devmask);
2275 if (err_mask) {
2276 if (verbose)
2277 printk(KERN_ERR "ata%u: SRST failed (err_mask=0x%x)\n",
2278 ap->id, err_mask);
2279 else
2280 DPRINTK("EXIT, softreset failed (err_mask=0x%x)\n",
2281 err_mask);
2282 return -EIO;
2285 /* determine by signature whether we have ATA or ATAPI devices */
2286 classes[0] = ata_dev_try_classify(ap, 0, &err);
2287 if (slave_possible && err != 0x81)
2288 classes[1] = ata_dev_try_classify(ap, 1, &err);
2290 out:
2291 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
2292 return 0;
2296 * sata_std_hardreset - reset host port via SATA phy reset
2297 * @ap: port to reset
2298 * @verbose: fail verbosely
2299 * @class: resulting class of attached device
2301 * SATA phy-reset host port using DET bits of SControl register.
2302 * This function is to be used as standard callback for
2303 * ata_drive_*_reset().
2305 * LOCKING:
2306 * Kernel thread context (may sleep)
2308 * RETURNS:
2309 * 0 on success, -errno otherwise.
2311 int sata_std_hardreset(struct ata_port *ap, int verbose, unsigned int *class)
2313 DPRINTK("ENTER\n");
2315 /* Issue phy wake/reset */
2316 scr_write_flush(ap, SCR_CONTROL, 0x301);
2319 * Couldn't find anything in SATA I/II specs, but AHCI-1.1
2320 * 10.4.2 says at least 1 ms.
2322 msleep(1);
2324 /* Bring phy back */
2325 sata_phy_resume(ap);
2327 /* TODO: phy layer with polling, timeouts, etc. */
2328 if (!sata_dev_present(ap)) {
2329 *class = ATA_DEV_NONE;
2330 DPRINTK("EXIT, link offline\n");
2331 return 0;
2334 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2335 if (verbose)
2336 printk(KERN_ERR "ata%u: COMRESET failed "
2337 "(device not ready)\n", ap->id);
2338 else
2339 DPRINTK("EXIT, device not ready\n");
2340 return -EIO;
2343 ap->ops->dev_select(ap, 0); /* probably unnecessary */
2345 *class = ata_dev_try_classify(ap, 0, NULL);
2347 DPRINTK("EXIT, class=%u\n", *class);
2348 return 0;
2352 * ata_std_postreset - standard postreset callback
2353 * @ap: the target ata_port
2354 * @classes: classes of attached devices
2356 * This function is invoked after a successful reset. Note that
2357 * the device might have been reset more than once using
2358 * different reset methods before postreset is invoked.
2360 * This function is to be used as standard callback for
2361 * ata_drive_*_reset().
2363 * LOCKING:
2364 * Kernel thread context (may sleep)
2366 void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
2368 DPRINTK("ENTER\n");
2370 /* set cable type if it isn't already set */
2371 if (ap->cbl == ATA_CBL_NONE && ap->flags & ATA_FLAG_SATA)
2372 ap->cbl = ATA_CBL_SATA;
2374 /* print link status */
2375 if (ap->cbl == ATA_CBL_SATA)
2376 sata_print_link_status(ap);
2378 /* re-enable interrupts */
2379 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
2380 ata_irq_on(ap);
2382 /* is double-select really necessary? */
2383 if (classes[0] != ATA_DEV_NONE)
2384 ap->ops->dev_select(ap, 1);
2385 if (classes[1] != ATA_DEV_NONE)
2386 ap->ops->dev_select(ap, 0);
2388 /* bail out if no device is present */
2389 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2390 DPRINTK("EXIT, no device\n");
2391 return;
2394 /* set up device control */
2395 if (ap->ioaddr.ctl_addr) {
2396 if (ap->flags & ATA_FLAG_MMIO)
2397 writeb(ap->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
2398 else
2399 outb(ap->ctl, ap->ioaddr.ctl_addr);
2402 DPRINTK("EXIT\n");
2406 * ata_std_probe_reset - standard probe reset method
2407 * @ap: prot to perform probe-reset
2408 * @classes: resulting classes of attached devices
2410 * The stock off-the-shelf ->probe_reset method.
2412 * LOCKING:
2413 * Kernel thread context (may sleep)
2415 * RETURNS:
2416 * 0 on success, -errno otherwise.
2418 int ata_std_probe_reset(struct ata_port *ap, unsigned int *classes)
2420 ata_reset_fn_t hardreset;
2422 hardreset = NULL;
2423 if (ap->flags & ATA_FLAG_SATA && ap->ops->scr_read)
2424 hardreset = sata_std_hardreset;
2426 return ata_drive_probe_reset(ap, ata_std_probeinit,
2427 ata_std_softreset, hardreset,
2428 ata_std_postreset, classes);
2431 static int do_probe_reset(struct ata_port *ap, ata_reset_fn_t reset,
2432 ata_postreset_fn_t postreset,
2433 unsigned int *classes)
2435 int i, rc;
2437 for (i = 0; i < ATA_MAX_DEVICES; i++)
2438 classes[i] = ATA_DEV_UNKNOWN;
2440 rc = reset(ap, 0, classes);
2441 if (rc)
2442 return rc;
2444 /* If any class isn't ATA_DEV_UNKNOWN, consider classification
2445 * is complete and convert all ATA_DEV_UNKNOWN to
2446 * ATA_DEV_NONE.
2448 for (i = 0; i < ATA_MAX_DEVICES; i++)
2449 if (classes[i] != ATA_DEV_UNKNOWN)
2450 break;
2452 if (i < ATA_MAX_DEVICES)
2453 for (i = 0; i < ATA_MAX_DEVICES; i++)
2454 if (classes[i] == ATA_DEV_UNKNOWN)
2455 classes[i] = ATA_DEV_NONE;
2457 if (postreset)
2458 postreset(ap, classes);
2460 return classes[0] != ATA_DEV_UNKNOWN ? 0 : -ENODEV;
2464 * ata_drive_probe_reset - Perform probe reset with given methods
2465 * @ap: port to reset
2466 * @probeinit: probeinit method (can be NULL)
2467 * @softreset: softreset method (can be NULL)
2468 * @hardreset: hardreset method (can be NULL)
2469 * @postreset: postreset method (can be NULL)
2470 * @classes: resulting classes of attached devices
2472 * Reset the specified port and classify attached devices using
2473 * given methods. This function prefers softreset but tries all
2474 * possible reset sequences to reset and classify devices. This
2475 * function is intended to be used for constructing ->probe_reset
2476 * callback by low level drivers.
2478 * Reset methods should follow the following rules.
2480 * - Return 0 on sucess, -errno on failure.
2481 * - If classification is supported, fill classes[] with
2482 * recognized class codes.
2483 * - If classification is not supported, leave classes[] alone.
2484 * - If verbose is non-zero, print error message on failure;
2485 * otherwise, shut up.
2487 * LOCKING:
2488 * Kernel thread context (may sleep)
2490 * RETURNS:
2491 * 0 on success, -EINVAL if no reset method is avaliable, -ENODEV
2492 * if classification fails, and any error code from reset
2493 * methods.
2495 int ata_drive_probe_reset(struct ata_port *ap, ata_probeinit_fn_t probeinit,
2496 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
2497 ata_postreset_fn_t postreset, unsigned int *classes)
2499 int rc = -EINVAL;
2501 if (probeinit)
2502 probeinit(ap);
2504 if (softreset) {
2505 rc = do_probe_reset(ap, softreset, postreset, classes);
2506 if (rc == 0)
2507 return 0;
2510 if (!hardreset)
2511 return rc;
2513 rc = do_probe_reset(ap, hardreset, postreset, classes);
2514 if (rc == 0 || rc != -ENODEV)
2515 return rc;
2517 if (softreset)
2518 rc = do_probe_reset(ap, softreset, postreset, classes);
2520 return rc;
2524 * ata_dev_same_device - Determine whether new ID matches configured device
2525 * @ap: port on which the device to compare against resides
2526 * @dev: device to compare against
2527 * @new_class: class of the new device
2528 * @new_id: IDENTIFY page of the new device
2530 * Compare @new_class and @new_id against @dev and determine
2531 * whether @dev is the device indicated by @new_class and
2532 * @new_id.
2534 * LOCKING:
2535 * None.
2537 * RETURNS:
2538 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
2540 static int ata_dev_same_device(struct ata_port *ap, struct ata_device *dev,
2541 unsigned int new_class, const u16 *new_id)
2543 const u16 *old_id = dev->id;
2544 unsigned char model[2][41], serial[2][21];
2545 u64 new_n_sectors;
2547 if (dev->class != new_class) {
2548 printk(KERN_INFO
2549 "ata%u: dev %u class mismatch %d != %d\n",
2550 ap->id, dev->devno, dev->class, new_class);
2551 return 0;
2554 ata_id_c_string(old_id, model[0], ATA_ID_PROD_OFS, sizeof(model[0]));
2555 ata_id_c_string(new_id, model[1], ATA_ID_PROD_OFS, sizeof(model[1]));
2556 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO_OFS, sizeof(serial[0]));
2557 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO_OFS, sizeof(serial[1]));
2558 new_n_sectors = ata_id_n_sectors(new_id);
2560 if (strcmp(model[0], model[1])) {
2561 printk(KERN_INFO
2562 "ata%u: dev %u model number mismatch '%s' != '%s'\n",
2563 ap->id, dev->devno, model[0], model[1]);
2564 return 0;
2567 if (strcmp(serial[0], serial[1])) {
2568 printk(KERN_INFO
2569 "ata%u: dev %u serial number mismatch '%s' != '%s'\n",
2570 ap->id, dev->devno, serial[0], serial[1]);
2571 return 0;
2574 if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) {
2575 printk(KERN_INFO
2576 "ata%u: dev %u n_sectors mismatch %llu != %llu\n",
2577 ap->id, dev->devno, (unsigned long long)dev->n_sectors,
2578 (unsigned long long)new_n_sectors);
2579 return 0;
2582 return 1;
2586 * ata_dev_revalidate - Revalidate ATA device
2587 * @ap: port on which the device to revalidate resides
2588 * @dev: device to revalidate
2589 * @post_reset: is this revalidation after reset?
2591 * Re-read IDENTIFY page and make sure @dev is still attached to
2592 * the port.
2594 * LOCKING:
2595 * Kernel thread context (may sleep)
2597 * RETURNS:
2598 * 0 on success, negative errno otherwise
2600 int ata_dev_revalidate(struct ata_port *ap, struct ata_device *dev,
2601 int post_reset)
2603 unsigned int class;
2604 u16 *id;
2605 int rc;
2607 if (!ata_dev_present(dev))
2608 return -ENODEV;
2610 class = dev->class;
2611 id = NULL;
2613 /* allocate & read ID data */
2614 rc = ata_dev_read_id(ap, dev, &class, post_reset, &id);
2615 if (rc)
2616 goto fail;
2618 /* is the device still there? */
2619 if (!ata_dev_same_device(ap, dev, class, id)) {
2620 rc = -ENODEV;
2621 goto fail;
2624 kfree(dev->id);
2625 dev->id = id;
2627 /* configure device according to the new ID */
2628 return ata_dev_configure(ap, dev, 0);
2630 fail:
2631 printk(KERN_ERR "ata%u: dev %u revalidation failed (errno=%d)\n",
2632 ap->id, dev->devno, rc);
2633 kfree(id);
2634 return rc;
2637 static const char * const ata_dma_blacklist [] = {
2638 "WDC AC11000H", NULL,
2639 "WDC AC22100H", NULL,
2640 "WDC AC32500H", NULL,
2641 "WDC AC33100H", NULL,
2642 "WDC AC31600H", NULL,
2643 "WDC AC32100H", "24.09P07",
2644 "WDC AC23200L", "21.10N21",
2645 "Compaq CRD-8241B", NULL,
2646 "CRD-8400B", NULL,
2647 "CRD-8480B", NULL,
2648 "CRD-8482B", NULL,
2649 "CRD-84", NULL,
2650 "SanDisk SDP3B", NULL,
2651 "SanDisk SDP3B-64", NULL,
2652 "SANYO CD-ROM CRD", NULL,
2653 "HITACHI CDR-8", NULL,
2654 "HITACHI CDR-8335", NULL,
2655 "HITACHI CDR-8435", NULL,
2656 "Toshiba CD-ROM XM-6202B", NULL,
2657 "TOSHIBA CD-ROM XM-1702BC", NULL,
2658 "CD-532E-A", NULL,
2659 "E-IDE CD-ROM CR-840", NULL,
2660 "CD-ROM Drive/F5A", NULL,
2661 "WPI CDD-820", NULL,
2662 "SAMSUNG CD-ROM SC-148C", NULL,
2663 "SAMSUNG CD-ROM SC", NULL,
2664 "SanDisk SDP3B-64", NULL,
2665 "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,
2666 "_NEC DV5800A", NULL,
2667 "SAMSUNG CD-ROM SN-124", "N001"
2670 static int ata_strim(char *s, size_t len)
2672 len = strnlen(s, len);
2674 /* ATAPI specifies that empty space is blank-filled; remove blanks */
2675 while ((len > 0) && (s[len - 1] == ' ')) {
2676 len--;
2677 s[len] = 0;
2679 return len;
2682 static int ata_dma_blacklisted(const struct ata_device *dev)
2684 unsigned char model_num[40];
2685 unsigned char model_rev[16];
2686 unsigned int nlen, rlen;
2687 int i;
2689 ata_id_string(dev->id, model_num, ATA_ID_PROD_OFS,
2690 sizeof(model_num));
2691 ata_id_string(dev->id, model_rev, ATA_ID_FW_REV_OFS,
2692 sizeof(model_rev));
2693 nlen = ata_strim(model_num, sizeof(model_num));
2694 rlen = ata_strim(model_rev, sizeof(model_rev));
2696 for (i = 0; i < ARRAY_SIZE(ata_dma_blacklist); i += 2) {
2697 if (!strncmp(ata_dma_blacklist[i], model_num, nlen)) {
2698 if (ata_dma_blacklist[i+1] == NULL)
2699 return 1;
2700 if (!strncmp(ata_dma_blacklist[i], model_rev, rlen))
2701 return 1;
2704 return 0;
2708 * ata_dev_xfermask - Compute supported xfermask of the given device
2709 * @ap: Port on which the device to compute xfermask for resides
2710 * @dev: Device to compute xfermask for
2712 * Compute supported xfermask of @dev and store it in
2713 * dev->*_mask. This function is responsible for applying all
2714 * known limits including host controller limits, device
2715 * blacklist, etc...
2717 * LOCKING:
2718 * None.
2720 static void ata_dev_xfermask(struct ata_port *ap, struct ata_device *dev)
2722 unsigned long xfer_mask;
2723 int i;
2725 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
2726 ap->udma_mask);
2728 /* use port-wide xfermask for now */
2729 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2730 struct ata_device *d = &ap->device[i];
2731 if (!ata_dev_present(d))
2732 continue;
2733 xfer_mask &= ata_pack_xfermask(d->pio_mask, d->mwdma_mask,
2734 d->udma_mask);
2735 xfer_mask &= ata_id_xfermask(d->id);
2736 if (ata_dma_blacklisted(d))
2737 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
2740 if (ata_dma_blacklisted(dev))
2741 printk(KERN_WARNING "ata%u: dev %u is on DMA blacklist, "
2742 "disabling DMA\n", ap->id, dev->devno);
2744 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2745 &dev->udma_mask);
2749 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
2750 * @ap: Port associated with device @dev
2751 * @dev: Device to which command will be sent
2753 * Issue SET FEATURES - XFER MODE command to device @dev
2754 * on port @ap.
2756 * LOCKING:
2757 * PCI/etc. bus probe sem.
2759 * RETURNS:
2760 * 0 on success, AC_ERR_* mask otherwise.
2763 static unsigned int ata_dev_set_xfermode(struct ata_port *ap,
2764 struct ata_device *dev)
2766 struct ata_taskfile tf;
2767 unsigned int err_mask;
2769 /* set up set-features taskfile */
2770 DPRINTK("set features - xfer mode\n");
2772 ata_tf_init(ap, &tf, dev->devno);
2773 tf.command = ATA_CMD_SET_FEATURES;
2774 tf.feature = SETFEATURES_XFER;
2775 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2776 tf.protocol = ATA_PROT_NODATA;
2777 tf.nsect = dev->xfer_mode;
2779 err_mask = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0);
2781 DPRINTK("EXIT, err_mask=%x\n", err_mask);
2782 return err_mask;
2786 * ata_dev_init_params - Issue INIT DEV PARAMS command
2787 * @ap: Port associated with device @dev
2788 * @dev: Device to which command will be sent
2790 * LOCKING:
2791 * Kernel thread context (may sleep)
2793 * RETURNS:
2794 * 0 on success, AC_ERR_* mask otherwise.
2797 static unsigned int ata_dev_init_params(struct ata_port *ap,
2798 struct ata_device *dev)
2800 struct ata_taskfile tf;
2801 unsigned int err_mask;
2802 u16 sectors = dev->id[6];
2803 u16 heads = dev->id[3];
2805 /* Number of sectors per track 1-255. Number of heads 1-16 */
2806 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
2807 return 0;
2809 /* set up init dev params taskfile */
2810 DPRINTK("init dev params \n");
2812 ata_tf_init(ap, &tf, dev->devno);
2813 tf.command = ATA_CMD_INIT_DEV_PARAMS;
2814 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2815 tf.protocol = ATA_PROT_NODATA;
2816 tf.nsect = sectors;
2817 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
2819 err_mask = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0);
2821 DPRINTK("EXIT, err_mask=%x\n", err_mask);
2822 return err_mask;
2826 * ata_sg_clean - Unmap DMA memory associated with command
2827 * @qc: Command containing DMA memory to be released
2829 * Unmap all mapped DMA memory associated with this command.
2831 * LOCKING:
2832 * spin_lock_irqsave(host_set lock)
2835 static void ata_sg_clean(struct ata_queued_cmd *qc)
2837 struct ata_port *ap = qc->ap;
2838 struct scatterlist *sg = qc->__sg;
2839 int dir = qc->dma_dir;
2840 void *pad_buf = NULL;
2842 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
2843 WARN_ON(sg == NULL);
2845 if (qc->flags & ATA_QCFLAG_SINGLE)
2846 WARN_ON(qc->n_elem > 1);
2848 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
2850 /* if we padded the buffer out to 32-bit bound, and data
2851 * xfer direction is from-device, we must copy from the
2852 * pad buffer back into the supplied buffer
2854 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
2855 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
2857 if (qc->flags & ATA_QCFLAG_SG) {
2858 if (qc->n_elem)
2859 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
2860 /* restore last sg */
2861 sg[qc->orig_n_elem - 1].length += qc->pad_len;
2862 if (pad_buf) {
2863 struct scatterlist *psg = &qc->pad_sgent;
2864 void *addr = kmap_atomic(psg->page, KM_IRQ0);
2865 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
2866 kunmap_atomic(addr, KM_IRQ0);
2868 } else {
2869 if (qc->n_elem)
2870 dma_unmap_single(ap->dev,
2871 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
2872 dir);
2873 /* restore sg */
2874 sg->length += qc->pad_len;
2875 if (pad_buf)
2876 memcpy(qc->buf_virt + sg->length - qc->pad_len,
2877 pad_buf, qc->pad_len);
2880 qc->flags &= ~ATA_QCFLAG_DMAMAP;
2881 qc->__sg = NULL;
2885 * ata_fill_sg - Fill PCI IDE PRD table
2886 * @qc: Metadata associated with taskfile to be transferred
2888 * Fill PCI IDE PRD (scatter-gather) table with segments
2889 * associated with the current disk command.
2891 * LOCKING:
2892 * spin_lock_irqsave(host_set lock)
2895 static void ata_fill_sg(struct ata_queued_cmd *qc)
2897 struct ata_port *ap = qc->ap;
2898 struct scatterlist *sg;
2899 unsigned int idx;
2901 WARN_ON(qc->__sg == NULL);
2902 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
2904 idx = 0;
2905 ata_for_each_sg(sg, qc) {
2906 u32 addr, offset;
2907 u32 sg_len, len;
2909 /* determine if physical DMA addr spans 64K boundary.
2910 * Note h/w doesn't support 64-bit, so we unconditionally
2911 * truncate dma_addr_t to u32.
2913 addr = (u32) sg_dma_address(sg);
2914 sg_len = sg_dma_len(sg);
2916 while (sg_len) {
2917 offset = addr & 0xffff;
2918 len = sg_len;
2919 if ((offset + sg_len) > 0x10000)
2920 len = 0x10000 - offset;
2922 ap->prd[idx].addr = cpu_to_le32(addr);
2923 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
2924 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
2926 idx++;
2927 sg_len -= len;
2928 addr += len;
2932 if (idx)
2933 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2936 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
2937 * @qc: Metadata associated with taskfile to check
2939 * Allow low-level driver to filter ATA PACKET commands, returning
2940 * a status indicating whether or not it is OK to use DMA for the
2941 * supplied PACKET command.
2943 * LOCKING:
2944 * spin_lock_irqsave(host_set lock)
2946 * RETURNS: 0 when ATAPI DMA can be used
2947 * nonzero otherwise
2949 int ata_check_atapi_dma(struct ata_queued_cmd *qc)
2951 struct ata_port *ap = qc->ap;
2952 int rc = 0; /* Assume ATAPI DMA is OK by default */
2954 if (ap->ops->check_atapi_dma)
2955 rc = ap->ops->check_atapi_dma(qc);
2957 return rc;
2960 * ata_qc_prep - Prepare taskfile for submission
2961 * @qc: Metadata associated with taskfile to be prepared
2963 * Prepare ATA taskfile for submission.
2965 * LOCKING:
2966 * spin_lock_irqsave(host_set lock)
2968 void ata_qc_prep(struct ata_queued_cmd *qc)
2970 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2971 return;
2973 ata_fill_sg(qc);
2976 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
2979 * ata_sg_init_one - Associate command with memory buffer
2980 * @qc: Command to be associated
2981 * @buf: Memory buffer
2982 * @buflen: Length of memory buffer, in bytes.
2984 * Initialize the data-related elements of queued_cmd @qc
2985 * to point to a single memory buffer, @buf of byte length @buflen.
2987 * LOCKING:
2988 * spin_lock_irqsave(host_set lock)
2991 void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
2993 struct scatterlist *sg;
2995 qc->flags |= ATA_QCFLAG_SINGLE;
2997 memset(&qc->sgent, 0, sizeof(qc->sgent));
2998 qc->__sg = &qc->sgent;
2999 qc->n_elem = 1;
3000 qc->orig_n_elem = 1;
3001 qc->buf_virt = buf;
3003 sg = qc->__sg;
3004 sg_init_one(sg, buf, buflen);
3008 * ata_sg_init - Associate command with scatter-gather table.
3009 * @qc: Command to be associated
3010 * @sg: Scatter-gather table.
3011 * @n_elem: Number of elements in s/g table.
3013 * Initialize the data-related elements of queued_cmd @qc
3014 * to point to a scatter-gather table @sg, containing @n_elem
3015 * elements.
3017 * LOCKING:
3018 * spin_lock_irqsave(host_set lock)
3021 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
3022 unsigned int n_elem)
3024 qc->flags |= ATA_QCFLAG_SG;
3025 qc->__sg = sg;
3026 qc->n_elem = n_elem;
3027 qc->orig_n_elem = n_elem;
3031 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
3032 * @qc: Command with memory buffer to be mapped.
3034 * DMA-map the memory buffer associated with queued_cmd @qc.
3036 * LOCKING:
3037 * spin_lock_irqsave(host_set lock)
3039 * RETURNS:
3040 * Zero on success, negative on error.
3043 static int ata_sg_setup_one(struct ata_queued_cmd *qc)
3045 struct ata_port *ap = qc->ap;
3046 int dir = qc->dma_dir;
3047 struct scatterlist *sg = qc->__sg;
3048 dma_addr_t dma_address;
3049 int trim_sg = 0;
3051 /* we must lengthen transfers to end on a 32-bit boundary */
3052 qc->pad_len = sg->length & 3;
3053 if (qc->pad_len) {
3054 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3055 struct scatterlist *psg = &qc->pad_sgent;
3057 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
3059 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3061 if (qc->tf.flags & ATA_TFLAG_WRITE)
3062 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
3063 qc->pad_len);
3065 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3066 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3067 /* trim sg */
3068 sg->length -= qc->pad_len;
3069 if (sg->length == 0)
3070 trim_sg = 1;
3072 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
3073 sg->length, qc->pad_len);
3076 if (trim_sg) {
3077 qc->n_elem--;
3078 goto skip_map;
3081 dma_address = dma_map_single(ap->dev, qc->buf_virt,
3082 sg->length, dir);
3083 if (dma_mapping_error(dma_address)) {
3084 /* restore sg */
3085 sg->length += qc->pad_len;
3086 return -1;
3089 sg_dma_address(sg) = dma_address;
3090 sg_dma_len(sg) = sg->length;
3092 skip_map:
3093 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
3094 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3096 return 0;
3100 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
3101 * @qc: Command with scatter-gather table to be mapped.
3103 * DMA-map the scatter-gather table associated with queued_cmd @qc.
3105 * LOCKING:
3106 * spin_lock_irqsave(host_set lock)
3108 * RETURNS:
3109 * Zero on success, negative on error.
3113 static int ata_sg_setup(struct ata_queued_cmd *qc)
3115 struct ata_port *ap = qc->ap;
3116 struct scatterlist *sg = qc->__sg;
3117 struct scatterlist *lsg = &sg[qc->n_elem - 1];
3118 int n_elem, pre_n_elem, dir, trim_sg = 0;
3120 VPRINTK("ENTER, ata%u\n", ap->id);
3121 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
3123 /* we must lengthen transfers to end on a 32-bit boundary */
3124 qc->pad_len = lsg->length & 3;
3125 if (qc->pad_len) {
3126 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3127 struct scatterlist *psg = &qc->pad_sgent;
3128 unsigned int offset;
3130 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
3132 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3135 * psg->page/offset are used to copy to-be-written
3136 * data in this function or read data in ata_sg_clean.
3138 offset = lsg->offset + lsg->length - qc->pad_len;
3139 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
3140 psg->offset = offset_in_page(offset);
3142 if (qc->tf.flags & ATA_TFLAG_WRITE) {
3143 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3144 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
3145 kunmap_atomic(addr, KM_IRQ0);
3148 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3149 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3150 /* trim last sg */
3151 lsg->length -= qc->pad_len;
3152 if (lsg->length == 0)
3153 trim_sg = 1;
3155 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
3156 qc->n_elem - 1, lsg->length, qc->pad_len);
3159 pre_n_elem = qc->n_elem;
3160 if (trim_sg && pre_n_elem)
3161 pre_n_elem--;
3163 if (!pre_n_elem) {
3164 n_elem = 0;
3165 goto skip_map;
3168 dir = qc->dma_dir;
3169 n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
3170 if (n_elem < 1) {
3171 /* restore last sg */
3172 lsg->length += qc->pad_len;
3173 return -1;
3176 DPRINTK("%d sg elements mapped\n", n_elem);
3178 skip_map:
3179 qc->n_elem = n_elem;
3181 return 0;
3185 * ata_poll_qc_complete - turn irq back on and finish qc
3186 * @qc: Command to complete
3187 * @err_mask: ATA status register content
3189 * LOCKING:
3190 * None. (grabs host lock)
3193 void ata_poll_qc_complete(struct ata_queued_cmd *qc)
3195 struct ata_port *ap = qc->ap;
3196 unsigned long flags;
3198 spin_lock_irqsave(&ap->host_set->lock, flags);
3199 ap->flags &= ~ATA_FLAG_NOINTR;
3200 ata_irq_on(ap);
3201 ata_qc_complete(qc);
3202 spin_unlock_irqrestore(&ap->host_set->lock, flags);
3206 * ata_pio_poll - poll using PIO, depending on current state
3207 * @ap: the target ata_port
3209 * LOCKING:
3210 * None. (executing in kernel thread context)
3212 * RETURNS:
3213 * timeout value to use
3216 static unsigned long ata_pio_poll(struct ata_port *ap)
3218 struct ata_queued_cmd *qc;
3219 u8 status;
3220 unsigned int poll_state = HSM_ST_UNKNOWN;
3221 unsigned int reg_state = HSM_ST_UNKNOWN;
3223 qc = ata_qc_from_tag(ap, ap->active_tag);
3224 WARN_ON(qc == NULL);
3226 switch (ap->hsm_task_state) {
3227 case HSM_ST:
3228 case HSM_ST_POLL:
3229 poll_state = HSM_ST_POLL;
3230 reg_state = HSM_ST;
3231 break;
3232 case HSM_ST_LAST:
3233 case HSM_ST_LAST_POLL:
3234 poll_state = HSM_ST_LAST_POLL;
3235 reg_state = HSM_ST_LAST;
3236 break;
3237 default:
3238 BUG();
3239 break;
3242 status = ata_chk_status(ap);
3243 if (status & ATA_BUSY) {
3244 if (time_after(jiffies, ap->pio_task_timeout)) {
3245 qc->err_mask |= AC_ERR_TIMEOUT;
3246 ap->hsm_task_state = HSM_ST_TMOUT;
3247 return 0;
3249 ap->hsm_task_state = poll_state;
3250 return ATA_SHORT_PAUSE;
3253 ap->hsm_task_state = reg_state;
3254 return 0;
3258 * ata_pio_complete - check if drive is busy or idle
3259 * @ap: the target ata_port
3261 * LOCKING:
3262 * None. (executing in kernel thread context)
3264 * RETURNS:
3265 * Non-zero if qc completed, zero otherwise.
3268 static int ata_pio_complete (struct ata_port *ap)
3270 struct ata_queued_cmd *qc;
3271 u8 drv_stat;
3274 * This is purely heuristic. This is a fast path. Sometimes when
3275 * we enter, BSY will be cleared in a chk-status or two. If not,
3276 * the drive is probably seeking or something. Snooze for a couple
3277 * msecs, then chk-status again. If still busy, fall back to
3278 * HSM_ST_POLL state.
3280 drv_stat = ata_busy_wait(ap, ATA_BUSY, 10);
3281 if (drv_stat & ATA_BUSY) {
3282 msleep(2);
3283 drv_stat = ata_busy_wait(ap, ATA_BUSY, 10);
3284 if (drv_stat & ATA_BUSY) {
3285 ap->hsm_task_state = HSM_ST_LAST_POLL;
3286 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
3287 return 0;
3291 qc = ata_qc_from_tag(ap, ap->active_tag);
3292 WARN_ON(qc == NULL);
3294 drv_stat = ata_wait_idle(ap);
3295 if (!ata_ok(drv_stat)) {
3296 qc->err_mask |= __ac_err_mask(drv_stat);
3297 ap->hsm_task_state = HSM_ST_ERR;
3298 return 0;
3301 ap->hsm_task_state = HSM_ST_IDLE;
3303 WARN_ON(qc->err_mask);
3304 ata_poll_qc_complete(qc);
3306 /* another command may start at this point */
3308 return 1;
3313 * swap_buf_le16 - swap halves of 16-bit words in place
3314 * @buf: Buffer to swap
3315 * @buf_words: Number of 16-bit words in buffer.
3317 * Swap halves of 16-bit words if needed to convert from
3318 * little-endian byte order to native cpu byte order, or
3319 * vice-versa.
3321 * LOCKING:
3322 * Inherited from caller.
3324 void swap_buf_le16(u16 *buf, unsigned int buf_words)
3326 #ifdef __BIG_ENDIAN
3327 unsigned int i;
3329 for (i = 0; i < buf_words; i++)
3330 buf[i] = le16_to_cpu(buf[i]);
3331 #endif /* __BIG_ENDIAN */
3335 * ata_mmio_data_xfer - Transfer data by MMIO
3336 * @ap: port to read/write
3337 * @buf: data buffer
3338 * @buflen: buffer length
3339 * @write_data: read/write
3341 * Transfer data from/to the device data register by MMIO.
3343 * LOCKING:
3344 * Inherited from caller.
3347 static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf,
3348 unsigned int buflen, int write_data)
3350 unsigned int i;
3351 unsigned int words = buflen >> 1;
3352 u16 *buf16 = (u16 *) buf;
3353 void __iomem *mmio = (void __iomem *)ap->ioaddr.data_addr;
3355 /* Transfer multiple of 2 bytes */
3356 if (write_data) {
3357 for (i = 0; i < words; i++)
3358 writew(le16_to_cpu(buf16[i]), mmio);
3359 } else {
3360 for (i = 0; i < words; i++)
3361 buf16[i] = cpu_to_le16(readw(mmio));
3364 /* Transfer trailing 1 byte, if any. */
3365 if (unlikely(buflen & 0x01)) {
3366 u16 align_buf[1] = { 0 };
3367 unsigned char *trailing_buf = buf + buflen - 1;
3369 if (write_data) {
3370 memcpy(align_buf, trailing_buf, 1);
3371 writew(le16_to_cpu(align_buf[0]), mmio);
3372 } else {
3373 align_buf[0] = cpu_to_le16(readw(mmio));
3374 memcpy(trailing_buf, align_buf, 1);
3380 * ata_pio_data_xfer - Transfer data by PIO
3381 * @ap: port to read/write
3382 * @buf: data buffer
3383 * @buflen: buffer length
3384 * @write_data: read/write
3386 * Transfer data from/to the device data register by PIO.
3388 * LOCKING:
3389 * Inherited from caller.
3392 static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf,
3393 unsigned int buflen, int write_data)
3395 unsigned int words = buflen >> 1;
3397 /* Transfer multiple of 2 bytes */
3398 if (write_data)
3399 outsw(ap->ioaddr.data_addr, buf, words);
3400 else
3401 insw(ap->ioaddr.data_addr, buf, words);
3403 /* Transfer trailing 1 byte, if any. */
3404 if (unlikely(buflen & 0x01)) {
3405 u16 align_buf[1] = { 0 };
3406 unsigned char *trailing_buf = buf + buflen - 1;
3408 if (write_data) {
3409 memcpy(align_buf, trailing_buf, 1);
3410 outw(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
3411 } else {
3412 align_buf[0] = cpu_to_le16(inw(ap->ioaddr.data_addr));
3413 memcpy(trailing_buf, align_buf, 1);
3419 * ata_data_xfer - Transfer data from/to the data register.
3420 * @ap: port to read/write
3421 * @buf: data buffer
3422 * @buflen: buffer length
3423 * @do_write: read/write
3425 * Transfer data from/to the device data register.
3427 * LOCKING:
3428 * Inherited from caller.
3431 static void ata_data_xfer(struct ata_port *ap, unsigned char *buf,
3432 unsigned int buflen, int do_write)
3434 /* Make the crap hardware pay the costs not the good stuff */
3435 if (unlikely(ap->flags & ATA_FLAG_IRQ_MASK)) {
3436 unsigned long flags;
3437 local_irq_save(flags);
3438 if (ap->flags & ATA_FLAG_MMIO)
3439 ata_mmio_data_xfer(ap, buf, buflen, do_write);
3440 else
3441 ata_pio_data_xfer(ap, buf, buflen, do_write);
3442 local_irq_restore(flags);
3443 } else {
3444 if (ap->flags & ATA_FLAG_MMIO)
3445 ata_mmio_data_xfer(ap, buf, buflen, do_write);
3446 else
3447 ata_pio_data_xfer(ap, buf, buflen, do_write);
3452 * ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data.
3453 * @qc: Command on going
3455 * Transfer ATA_SECT_SIZE of data from/to the ATA device.
3457 * LOCKING:
3458 * Inherited from caller.
3461 static void ata_pio_sector(struct ata_queued_cmd *qc)
3463 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3464 struct scatterlist *sg = qc->__sg;
3465 struct ata_port *ap = qc->ap;
3466 struct page *page;
3467 unsigned int offset;
3468 unsigned char *buf;
3470 if (qc->cursect == (qc->nsect - 1))
3471 ap->hsm_task_state = HSM_ST_LAST;
3473 page = sg[qc->cursg].page;
3474 offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE;
3476 /* get the current page and offset */
3477 page = nth_page(page, (offset >> PAGE_SHIFT));
3478 offset %= PAGE_SIZE;
3480 buf = kmap(page) + offset;
3482 qc->cursect++;
3483 qc->cursg_ofs++;
3485 if ((qc->cursg_ofs * ATA_SECT_SIZE) == (&sg[qc->cursg])->length) {
3486 qc->cursg++;
3487 qc->cursg_ofs = 0;
3490 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3492 /* do the actual data transfer */
3493 do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3494 ata_data_xfer(ap, buf, ATA_SECT_SIZE, do_write);
3496 kunmap(page);
3500 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
3501 * @qc: Command on going
3502 * @bytes: number of bytes
3504 * Transfer Transfer data from/to the ATAPI device.
3506 * LOCKING:
3507 * Inherited from caller.
3511 static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
3513 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3514 struct scatterlist *sg = qc->__sg;
3515 struct ata_port *ap = qc->ap;
3516 struct page *page;
3517 unsigned char *buf;
3518 unsigned int offset, count;
3520 if (qc->curbytes + bytes >= qc->nbytes)
3521 ap->hsm_task_state = HSM_ST_LAST;
3523 next_sg:
3524 if (unlikely(qc->cursg >= qc->n_elem)) {
3526 * The end of qc->sg is reached and the device expects
3527 * more data to transfer. In order not to overrun qc->sg
3528 * and fulfill length specified in the byte count register,
3529 * - for read case, discard trailing data from the device
3530 * - for write case, padding zero data to the device
3532 u16 pad_buf[1] = { 0 };
3533 unsigned int words = bytes >> 1;
3534 unsigned int i;
3536 if (words) /* warning if bytes > 1 */
3537 printk(KERN_WARNING "ata%u: %u bytes trailing data\n",
3538 ap->id, bytes);
3540 for (i = 0; i < words; i++)
3541 ata_data_xfer(ap, (unsigned char*)pad_buf, 2, do_write);
3543 ap->hsm_task_state = HSM_ST_LAST;
3544 return;
3547 sg = &qc->__sg[qc->cursg];
3549 page = sg->page;
3550 offset = sg->offset + qc->cursg_ofs;
3552 /* get the current page and offset */
3553 page = nth_page(page, (offset >> PAGE_SHIFT));
3554 offset %= PAGE_SIZE;
3556 /* don't overrun current sg */
3557 count = min(sg->length - qc->cursg_ofs, bytes);
3559 /* don't cross page boundaries */
3560 count = min(count, (unsigned int)PAGE_SIZE - offset);
3562 buf = kmap(page) + offset;
3564 bytes -= count;
3565 qc->curbytes += count;
3566 qc->cursg_ofs += count;
3568 if (qc->cursg_ofs == sg->length) {
3569 qc->cursg++;
3570 qc->cursg_ofs = 0;
3573 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3575 /* do the actual data transfer */
3576 ata_data_xfer(ap, buf, count, do_write);
3578 kunmap(page);
3580 if (bytes)
3581 goto next_sg;
3585 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
3586 * @qc: Command on going
3588 * Transfer Transfer data from/to the ATAPI device.
3590 * LOCKING:
3591 * Inherited from caller.
3594 static void atapi_pio_bytes(struct ata_queued_cmd *qc)
3596 struct ata_port *ap = qc->ap;
3597 struct ata_device *dev = qc->dev;
3598 unsigned int ireason, bc_lo, bc_hi, bytes;
3599 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
3601 ap->ops->tf_read(ap, &qc->tf);
3602 ireason = qc->tf.nsect;
3603 bc_lo = qc->tf.lbam;
3604 bc_hi = qc->tf.lbah;
3605 bytes = (bc_hi << 8) | bc_lo;
3607 /* shall be cleared to zero, indicating xfer of data */
3608 if (ireason & (1 << 0))
3609 goto err_out;
3611 /* make sure transfer direction matches expected */
3612 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
3613 if (do_write != i_write)
3614 goto err_out;
3616 __atapi_pio_bytes(qc, bytes);
3618 return;
3620 err_out:
3621 printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n",
3622 ap->id, dev->devno);
3623 qc->err_mask |= AC_ERR_HSM;
3624 ap->hsm_task_state = HSM_ST_ERR;
3628 * ata_pio_block - start PIO on a block
3629 * @ap: the target ata_port
3631 * LOCKING:
3632 * None. (executing in kernel thread context)
3635 static void ata_pio_block(struct ata_port *ap)
3637 struct ata_queued_cmd *qc;
3638 u8 status;
3641 * This is purely heuristic. This is a fast path.
3642 * Sometimes when we enter, BSY will be cleared in
3643 * a chk-status or two. If not, the drive is probably seeking
3644 * or something. Snooze for a couple msecs, then
3645 * chk-status again. If still busy, fall back to
3646 * HSM_ST_POLL state.
3648 status = ata_busy_wait(ap, ATA_BUSY, 5);
3649 if (status & ATA_BUSY) {
3650 msleep(2);
3651 status = ata_busy_wait(ap, ATA_BUSY, 10);
3652 if (status & ATA_BUSY) {
3653 ap->hsm_task_state = HSM_ST_POLL;
3654 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
3655 return;
3659 qc = ata_qc_from_tag(ap, ap->active_tag);
3660 WARN_ON(qc == NULL);
3662 /* check error */
3663 if (status & (ATA_ERR | ATA_DF)) {
3664 qc->err_mask |= AC_ERR_DEV;
3665 ap->hsm_task_state = HSM_ST_ERR;
3666 return;
3669 /* transfer data if any */
3670 if (is_atapi_taskfile(&qc->tf)) {
3671 /* DRQ=0 means no more data to transfer */
3672 if ((status & ATA_DRQ) == 0) {
3673 ap->hsm_task_state = HSM_ST_LAST;
3674 return;
3677 atapi_pio_bytes(qc);
3678 } else {
3679 /* handle BSY=0, DRQ=0 as error */
3680 if ((status & ATA_DRQ) == 0) {
3681 qc->err_mask |= AC_ERR_HSM;
3682 ap->hsm_task_state = HSM_ST_ERR;
3683 return;
3686 ata_pio_sector(qc);
3690 static void ata_pio_error(struct ata_port *ap)
3692 struct ata_queued_cmd *qc;
3694 qc = ata_qc_from_tag(ap, ap->active_tag);
3695 WARN_ON(qc == NULL);
3697 if (qc->tf.command != ATA_CMD_PACKET)
3698 printk(KERN_WARNING "ata%u: PIO error\n", ap->id);
3700 /* make sure qc->err_mask is available to
3701 * know what's wrong and recover
3703 WARN_ON(qc->err_mask == 0);
3705 ap->hsm_task_state = HSM_ST_IDLE;
3707 ata_poll_qc_complete(qc);
3710 static void ata_pio_task(void *_data)
3712 struct ata_port *ap = _data;
3713 unsigned long timeout;
3714 int qc_completed;
3716 fsm_start:
3717 timeout = 0;
3718 qc_completed = 0;
3720 switch (ap->hsm_task_state) {
3721 case HSM_ST_IDLE:
3722 return;
3724 case HSM_ST:
3725 ata_pio_block(ap);
3726 break;
3728 case HSM_ST_LAST:
3729 qc_completed = ata_pio_complete(ap);
3730 break;
3732 case HSM_ST_POLL:
3733 case HSM_ST_LAST_POLL:
3734 timeout = ata_pio_poll(ap);
3735 break;
3737 case HSM_ST_TMOUT:
3738 case HSM_ST_ERR:
3739 ata_pio_error(ap);
3740 return;
3743 if (timeout)
3744 ata_port_queue_task(ap, ata_pio_task, ap, timeout);
3745 else if (!qc_completed)
3746 goto fsm_start;
3750 * atapi_packet_task - Write CDB bytes to hardware
3751 * @_data: Port to which ATAPI device is attached.
3753 * When device has indicated its readiness to accept
3754 * a CDB, this function is called. Send the CDB.
3755 * If DMA is to be performed, exit immediately.
3756 * Otherwise, we are in polling mode, so poll
3757 * status under operation succeeds or fails.
3759 * LOCKING:
3760 * Kernel thread context (may sleep)
3763 static void atapi_packet_task(void *_data)
3765 struct ata_port *ap = _data;
3766 struct ata_queued_cmd *qc;
3767 u8 status;
3769 qc = ata_qc_from_tag(ap, ap->active_tag);
3770 WARN_ON(qc == NULL);
3771 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
3773 /* sleep-wait for BSY to clear */
3774 DPRINTK("busy wait\n");
3775 if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB)) {
3776 qc->err_mask |= AC_ERR_TIMEOUT;
3777 goto err_out;
3780 /* make sure DRQ is set */
3781 status = ata_chk_status(ap);
3782 if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ) {
3783 qc->err_mask |= AC_ERR_HSM;
3784 goto err_out;
3787 /* send SCSI cdb */
3788 DPRINTK("send cdb\n");
3789 WARN_ON(qc->dev->cdb_len < 12);
3791 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA ||
3792 qc->tf.protocol == ATA_PROT_ATAPI_NODATA) {
3793 unsigned long flags;
3795 /* Once we're done issuing command and kicking bmdma,
3796 * irq handler takes over. To not lose irq, we need
3797 * to clear NOINTR flag before sending cdb, but
3798 * interrupt handler shouldn't be invoked before we're
3799 * finished. Hence, the following locking.
3801 spin_lock_irqsave(&ap->host_set->lock, flags);
3802 ap->flags &= ~ATA_FLAG_NOINTR;
3803 ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1);
3804 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA)
3805 ap->ops->bmdma_start(qc); /* initiate bmdma */
3806 spin_unlock_irqrestore(&ap->host_set->lock, flags);
3807 } else {
3808 ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1);
3810 /* PIO commands are handled by polling */
3811 ap->hsm_task_state = HSM_ST;
3812 ata_port_queue_task(ap, ata_pio_task, ap, 0);
3815 return;
3817 err_out:
3818 ata_poll_qc_complete(qc);
3822 * ata_qc_timeout - Handle timeout of queued command
3823 * @qc: Command that timed out
3825 * Some part of the kernel (currently, only the SCSI layer)
3826 * has noticed that the active command on port @ap has not
3827 * completed after a specified length of time. Handle this
3828 * condition by disabling DMA (if necessary) and completing
3829 * transactions, with error if necessary.
3831 * This also handles the case of the "lost interrupt", where
3832 * for some reason (possibly hardware bug, possibly driver bug)
3833 * an interrupt was not delivered to the driver, even though the
3834 * transaction completed successfully.
3836 * LOCKING:
3837 * Inherited from SCSI layer (none, can sleep)
3840 static void ata_qc_timeout(struct ata_queued_cmd *qc)
3842 struct ata_port *ap = qc->ap;
3843 struct ata_host_set *host_set = ap->host_set;
3844 u8 host_stat = 0, drv_stat;
3845 unsigned long flags;
3847 DPRINTK("ENTER\n");
3849 ap->hsm_task_state = HSM_ST_IDLE;
3851 spin_lock_irqsave(&host_set->lock, flags);
3853 switch (qc->tf.protocol) {
3855 case ATA_PROT_DMA:
3856 case ATA_PROT_ATAPI_DMA:
3857 host_stat = ap->ops->bmdma_status(ap);
3859 /* before we do anything else, clear DMA-Start bit */
3860 ap->ops->bmdma_stop(qc);
3862 /* fall through */
3864 default:
3865 ata_altstatus(ap);
3866 drv_stat = ata_chk_status(ap);
3868 /* ack bmdma irq events */
3869 ap->ops->irq_clear(ap);
3871 printk(KERN_ERR "ata%u: command 0x%x timeout, stat 0x%x host_stat 0x%x\n",
3872 ap->id, qc->tf.command, drv_stat, host_stat);
3874 /* complete taskfile transaction */
3875 qc->err_mask |= ac_err_mask(drv_stat);
3876 break;
3879 spin_unlock_irqrestore(&host_set->lock, flags);
3881 ata_eh_qc_complete(qc);
3883 DPRINTK("EXIT\n");
3887 * ata_eng_timeout - Handle timeout of queued command
3888 * @ap: Port on which timed-out command is active
3890 * Some part of the kernel (currently, only the SCSI layer)
3891 * has noticed that the active command on port @ap has not
3892 * completed after a specified length of time. Handle this
3893 * condition by disabling DMA (if necessary) and completing
3894 * transactions, with error if necessary.
3896 * This also handles the case of the "lost interrupt", where
3897 * for some reason (possibly hardware bug, possibly driver bug)
3898 * an interrupt was not delivered to the driver, even though the
3899 * transaction completed successfully.
3901 * LOCKING:
3902 * Inherited from SCSI layer (none, can sleep)
3905 void ata_eng_timeout(struct ata_port *ap)
3907 DPRINTK("ENTER\n");
3909 ata_qc_timeout(ata_qc_from_tag(ap, ap->active_tag));
3911 DPRINTK("EXIT\n");
3915 * ata_qc_new - Request an available ATA command, for queueing
3916 * @ap: Port associated with device @dev
3917 * @dev: Device from whom we request an available command structure
3919 * LOCKING:
3920 * None.
3923 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
3925 struct ata_queued_cmd *qc = NULL;
3926 unsigned int i;
3928 for (i = 0; i < ATA_MAX_QUEUE; i++)
3929 if (!test_and_set_bit(i, &ap->qactive)) {
3930 qc = ata_qc_from_tag(ap, i);
3931 break;
3934 if (qc)
3935 qc->tag = i;
3937 return qc;
3941 * ata_qc_new_init - Request an available ATA command, and initialize it
3942 * @ap: Port associated with device @dev
3943 * @dev: Device from whom we request an available command structure
3945 * LOCKING:
3946 * None.
3949 struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
3950 struct ata_device *dev)
3952 struct ata_queued_cmd *qc;
3954 qc = ata_qc_new(ap);
3955 if (qc) {
3956 qc->scsicmd = NULL;
3957 qc->ap = ap;
3958 qc->dev = dev;
3960 ata_qc_reinit(qc);
3963 return qc;
3967 * ata_qc_free - free unused ata_queued_cmd
3968 * @qc: Command to complete
3970 * Designed to free unused ata_queued_cmd object
3971 * in case something prevents using it.
3973 * LOCKING:
3974 * spin_lock_irqsave(host_set lock)
3976 void ata_qc_free(struct ata_queued_cmd *qc)
3978 struct ata_port *ap = qc->ap;
3979 unsigned int tag;
3981 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
3983 qc->flags = 0;
3984 tag = qc->tag;
3985 if (likely(ata_tag_valid(tag))) {
3986 if (tag == ap->active_tag)
3987 ap->active_tag = ATA_TAG_POISON;
3988 qc->tag = ATA_TAG_POISON;
3989 clear_bit(tag, &ap->qactive);
3993 void __ata_qc_complete(struct ata_queued_cmd *qc)
3995 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
3996 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
3998 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
3999 ata_sg_clean(qc);
4001 /* atapi: mark qc as inactive to prevent the interrupt handler
4002 * from completing the command twice later, before the error handler
4003 * is called. (when rc != 0 and atapi request sense is needed)
4005 qc->flags &= ~ATA_QCFLAG_ACTIVE;
4007 /* call completion callback */
4008 qc->complete_fn(qc);
4011 static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
4013 struct ata_port *ap = qc->ap;
4015 switch (qc->tf.protocol) {
4016 case ATA_PROT_DMA:
4017 case ATA_PROT_ATAPI_DMA:
4018 return 1;
4020 case ATA_PROT_ATAPI:
4021 case ATA_PROT_PIO:
4022 if (ap->flags & ATA_FLAG_PIO_DMA)
4023 return 1;
4025 /* fall through */
4027 default:
4028 return 0;
4031 /* never reached */
4035 * ata_qc_issue - issue taskfile to device
4036 * @qc: command to issue to device
4038 * Prepare an ATA command to submission to device.
4039 * This includes mapping the data into a DMA-able
4040 * area, filling in the S/G table, and finally
4041 * writing the taskfile to hardware, starting the command.
4043 * LOCKING:
4044 * spin_lock_irqsave(host_set lock)
4046 * RETURNS:
4047 * Zero on success, AC_ERR_* mask on failure
4050 unsigned int ata_qc_issue(struct ata_queued_cmd *qc)
4052 struct ata_port *ap = qc->ap;
4054 if (ata_should_dma_map(qc)) {
4055 if (qc->flags & ATA_QCFLAG_SG) {
4056 if (ata_sg_setup(qc))
4057 goto sg_err;
4058 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
4059 if (ata_sg_setup_one(qc))
4060 goto sg_err;
4062 } else {
4063 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4066 ap->ops->qc_prep(qc);
4068 qc->ap->active_tag = qc->tag;
4069 qc->flags |= ATA_QCFLAG_ACTIVE;
4071 return ap->ops->qc_issue(qc);
4073 sg_err:
4074 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4075 return AC_ERR_SYSTEM;
4080 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
4081 * @qc: command to issue to device
4083 * Using various libata functions and hooks, this function
4084 * starts an ATA command. ATA commands are grouped into
4085 * classes called "protocols", and issuing each type of protocol
4086 * is slightly different.
4088 * May be used as the qc_issue() entry in ata_port_operations.
4090 * LOCKING:
4091 * spin_lock_irqsave(host_set lock)
4093 * RETURNS:
4094 * Zero on success, AC_ERR_* mask on failure
4097 unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
4099 struct ata_port *ap = qc->ap;
4101 ata_dev_select(ap, qc->dev->devno, 1, 0);
4103 switch (qc->tf.protocol) {
4104 case ATA_PROT_NODATA:
4105 ata_tf_to_host(ap, &qc->tf);
4106 break;
4108 case ATA_PROT_DMA:
4109 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4110 ap->ops->bmdma_setup(qc); /* set up bmdma */
4111 ap->ops->bmdma_start(qc); /* initiate bmdma */
4112 break;
4114 case ATA_PROT_PIO: /* load tf registers, initiate polling pio */
4115 ata_qc_set_polling(qc);
4116 ata_tf_to_host(ap, &qc->tf);
4117 ap->hsm_task_state = HSM_ST;
4118 ata_port_queue_task(ap, ata_pio_task, ap, 0);
4119 break;
4121 case ATA_PROT_ATAPI:
4122 ata_qc_set_polling(qc);
4123 ata_tf_to_host(ap, &qc->tf);
4124 ata_port_queue_task(ap, atapi_packet_task, ap, 0);
4125 break;
4127 case ATA_PROT_ATAPI_NODATA:
4128 ap->flags |= ATA_FLAG_NOINTR;
4129 ata_tf_to_host(ap, &qc->tf);
4130 ata_port_queue_task(ap, atapi_packet_task, ap, 0);
4131 break;
4133 case ATA_PROT_ATAPI_DMA:
4134 ap->flags |= ATA_FLAG_NOINTR;
4135 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4136 ap->ops->bmdma_setup(qc); /* set up bmdma */
4137 ata_port_queue_task(ap, atapi_packet_task, ap, 0);
4138 break;
4140 default:
4141 WARN_ON(1);
4142 return AC_ERR_SYSTEM;
4145 return 0;
4149 * ata_host_intr - Handle host interrupt for given (port, task)
4150 * @ap: Port on which interrupt arrived (possibly...)
4151 * @qc: Taskfile currently active in engine
4153 * Handle host interrupt for given queued command. Currently,
4154 * only DMA interrupts are handled. All other commands are
4155 * handled via polling with interrupts disabled (nIEN bit).
4157 * LOCKING:
4158 * spin_lock_irqsave(host_set lock)
4160 * RETURNS:
4161 * One if interrupt was handled, zero if not (shared irq).
4164 inline unsigned int ata_host_intr (struct ata_port *ap,
4165 struct ata_queued_cmd *qc)
4167 u8 status, host_stat;
4169 switch (qc->tf.protocol) {
4171 case ATA_PROT_DMA:
4172 case ATA_PROT_ATAPI_DMA:
4173 case ATA_PROT_ATAPI:
4174 /* check status of DMA engine */
4175 host_stat = ap->ops->bmdma_status(ap);
4176 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
4178 /* if it's not our irq... */
4179 if (!(host_stat & ATA_DMA_INTR))
4180 goto idle_irq;
4182 /* before we do anything else, clear DMA-Start bit */
4183 ap->ops->bmdma_stop(qc);
4185 /* fall through */
4187 case ATA_PROT_ATAPI_NODATA:
4188 case ATA_PROT_NODATA:
4189 /* check altstatus */
4190 status = ata_altstatus(ap);
4191 if (status & ATA_BUSY)
4192 goto idle_irq;
4194 /* check main status, clearing INTRQ */
4195 status = ata_chk_status(ap);
4196 if (unlikely(status & ATA_BUSY))
4197 goto idle_irq;
4198 DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
4199 ap->id, qc->tf.protocol, status);
4201 /* ack bmdma irq events */
4202 ap->ops->irq_clear(ap);
4204 /* complete taskfile transaction */
4205 qc->err_mask |= ac_err_mask(status);
4206 ata_qc_complete(qc);
4207 break;
4209 default:
4210 goto idle_irq;
4213 return 1; /* irq handled */
4215 idle_irq:
4216 ap->stats.idle_irq++;
4218 #ifdef ATA_IRQ_TRAP
4219 if ((ap->stats.idle_irq % 1000) == 0) {
4220 ata_irq_ack(ap, 0); /* debug trap */
4221 printk(KERN_WARNING "ata%d: irq trap\n", ap->id);
4222 return 1;
4224 #endif
4225 return 0; /* irq not handled */
4229 * ata_interrupt - Default ATA host interrupt handler
4230 * @irq: irq line (unused)
4231 * @dev_instance: pointer to our ata_host_set information structure
4232 * @regs: unused
4234 * Default interrupt handler for PCI IDE devices. Calls
4235 * ata_host_intr() for each port that is not disabled.
4237 * LOCKING:
4238 * Obtains host_set lock during operation.
4240 * RETURNS:
4241 * IRQ_NONE or IRQ_HANDLED.
4244 irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
4246 struct ata_host_set *host_set = dev_instance;
4247 unsigned int i;
4248 unsigned int handled = 0;
4249 unsigned long flags;
4251 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
4252 spin_lock_irqsave(&host_set->lock, flags);
4254 for (i = 0; i < host_set->n_ports; i++) {
4255 struct ata_port *ap;
4257 ap = host_set->ports[i];
4258 if (ap &&
4259 !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) {
4260 struct ata_queued_cmd *qc;
4262 qc = ata_qc_from_tag(ap, ap->active_tag);
4263 if (qc && (!(qc->tf.ctl & ATA_NIEN)) &&
4264 (qc->flags & ATA_QCFLAG_ACTIVE))
4265 handled |= ata_host_intr(ap, qc);
4269 spin_unlock_irqrestore(&host_set->lock, flags);
4271 return IRQ_RETVAL(handled);
4276 * Execute a 'simple' command, that only consists of the opcode 'cmd' itself,
4277 * without filling any other registers
4279 static int ata_do_simple_cmd(struct ata_port *ap, struct ata_device *dev,
4280 u8 cmd)
4282 struct ata_taskfile tf;
4283 int err;
4285 ata_tf_init(ap, &tf, dev->devno);
4287 tf.command = cmd;
4288 tf.flags |= ATA_TFLAG_DEVICE;
4289 tf.protocol = ATA_PROT_NODATA;
4291 err = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0);
4292 if (err)
4293 printk(KERN_ERR "%s: ata command failed: %d\n",
4294 __FUNCTION__, err);
4296 return err;
4299 static int ata_flush_cache(struct ata_port *ap, struct ata_device *dev)
4301 u8 cmd;
4303 if (!ata_try_flush_cache(dev))
4304 return 0;
4306 if (ata_id_has_flush_ext(dev->id))
4307 cmd = ATA_CMD_FLUSH_EXT;
4308 else
4309 cmd = ATA_CMD_FLUSH;
4311 return ata_do_simple_cmd(ap, dev, cmd);
4314 static int ata_standby_drive(struct ata_port *ap, struct ata_device *dev)
4316 return ata_do_simple_cmd(ap, dev, ATA_CMD_STANDBYNOW1);
4319 static int ata_start_drive(struct ata_port *ap, struct ata_device *dev)
4321 return ata_do_simple_cmd(ap, dev, ATA_CMD_IDLEIMMEDIATE);
4325 * ata_device_resume - wakeup a previously suspended devices
4326 * @ap: port the device is connected to
4327 * @dev: the device to resume
4329 * Kick the drive back into action, by sending it an idle immediate
4330 * command and making sure its transfer mode matches between drive
4331 * and host.
4334 int ata_device_resume(struct ata_port *ap, struct ata_device *dev)
4336 if (ap->flags & ATA_FLAG_SUSPENDED) {
4337 ap->flags &= ~ATA_FLAG_SUSPENDED;
4338 ata_set_mode(ap);
4340 if (!ata_dev_present(dev))
4341 return 0;
4342 if (dev->class == ATA_DEV_ATA)
4343 ata_start_drive(ap, dev);
4345 return 0;
4349 * ata_device_suspend - prepare a device for suspend
4350 * @ap: port the device is connected to
4351 * @dev: the device to suspend
4353 * Flush the cache on the drive, if appropriate, then issue a
4354 * standbynow command.
4356 int ata_device_suspend(struct ata_port *ap, struct ata_device *dev, pm_message_t state)
4358 if (!ata_dev_present(dev))
4359 return 0;
4360 if (dev->class == ATA_DEV_ATA)
4361 ata_flush_cache(ap, dev);
4363 if (state.event != PM_EVENT_FREEZE)
4364 ata_standby_drive(ap, dev);
4365 ap->flags |= ATA_FLAG_SUSPENDED;
4366 return 0;
4370 * ata_port_start - Set port up for dma.
4371 * @ap: Port to initialize
4373 * Called just after data structures for each port are
4374 * initialized. Allocates space for PRD table.
4376 * May be used as the port_start() entry in ata_port_operations.
4378 * LOCKING:
4379 * Inherited from caller.
4382 int ata_port_start (struct ata_port *ap)
4384 struct device *dev = ap->dev;
4385 int rc;
4387 ap->prd = dma_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, GFP_KERNEL);
4388 if (!ap->prd)
4389 return -ENOMEM;
4391 rc = ata_pad_alloc(ap, dev);
4392 if (rc) {
4393 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
4394 return rc;
4397 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma);
4399 return 0;
4404 * ata_port_stop - Undo ata_port_start()
4405 * @ap: Port to shut down
4407 * Frees the PRD table.
4409 * May be used as the port_stop() entry in ata_port_operations.
4411 * LOCKING:
4412 * Inherited from caller.
4415 void ata_port_stop (struct ata_port *ap)
4417 struct device *dev = ap->dev;
4419 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
4420 ata_pad_free(ap, dev);
4423 void ata_host_stop (struct ata_host_set *host_set)
4425 if (host_set->mmio_base)
4426 iounmap(host_set->mmio_base);
4431 * ata_host_remove - Unregister SCSI host structure with upper layers
4432 * @ap: Port to unregister
4433 * @do_unregister: 1 if we fully unregister, 0 to just stop the port
4435 * LOCKING:
4436 * Inherited from caller.
4439 static void ata_host_remove(struct ata_port *ap, unsigned int do_unregister)
4441 struct Scsi_Host *sh = ap->host;
4443 DPRINTK("ENTER\n");
4445 if (do_unregister)
4446 scsi_remove_host(sh);
4448 ap->ops->port_stop(ap);
4452 * ata_host_init - Initialize an ata_port structure
4453 * @ap: Structure to initialize
4454 * @host: associated SCSI mid-layer structure
4455 * @host_set: Collection of hosts to which @ap belongs
4456 * @ent: Probe information provided by low-level driver
4457 * @port_no: Port number associated with this ata_port
4459 * Initialize a new ata_port structure, and its associated
4460 * scsi_host.
4462 * LOCKING:
4463 * Inherited from caller.
4466 static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
4467 struct ata_host_set *host_set,
4468 const struct ata_probe_ent *ent, unsigned int port_no)
4470 unsigned int i;
4472 host->max_id = 16;
4473 host->max_lun = 1;
4474 host->max_channel = 1;
4475 host->unique_id = ata_unique_id++;
4476 host->max_cmd_len = 12;
4478 ap->flags = ATA_FLAG_PORT_DISABLED;
4479 ap->id = host->unique_id;
4480 ap->host = host;
4481 ap->ctl = ATA_DEVCTL_OBS;
4482 ap->host_set = host_set;
4483 ap->dev = ent->dev;
4484 ap->port_no = port_no;
4485 ap->hard_port_no =
4486 ent->legacy_mode ? ent->hard_port_no : port_no;
4487 ap->pio_mask = ent->pio_mask;
4488 ap->mwdma_mask = ent->mwdma_mask;
4489 ap->udma_mask = ent->udma_mask;
4490 ap->flags |= ent->host_flags;
4491 ap->ops = ent->port_ops;
4492 ap->cbl = ATA_CBL_NONE;
4493 ap->active_tag = ATA_TAG_POISON;
4494 ap->last_ctl = 0xFF;
4496 INIT_WORK(&ap->port_task, NULL, NULL);
4497 INIT_LIST_HEAD(&ap->eh_done_q);
4499 for (i = 0; i < ATA_MAX_DEVICES; i++) {
4500 struct ata_device *dev = &ap->device[i];
4501 dev->devno = i;
4502 dev->pio_mask = UINT_MAX;
4503 dev->mwdma_mask = UINT_MAX;
4504 dev->udma_mask = UINT_MAX;
4507 #ifdef ATA_IRQ_TRAP
4508 ap->stats.unhandled_irq = 1;
4509 ap->stats.idle_irq = 1;
4510 #endif
4512 memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports));
4516 * ata_host_add - Attach low-level ATA driver to system
4517 * @ent: Information provided by low-level driver
4518 * @host_set: Collections of ports to which we add
4519 * @port_no: Port number associated with this host
4521 * Attach low-level ATA driver to system.
4523 * LOCKING:
4524 * PCI/etc. bus probe sem.
4526 * RETURNS:
4527 * New ata_port on success, for NULL on error.
4530 static struct ata_port * ata_host_add(const struct ata_probe_ent *ent,
4531 struct ata_host_set *host_set,
4532 unsigned int port_no)
4534 struct Scsi_Host *host;
4535 struct ata_port *ap;
4536 int rc;
4538 DPRINTK("ENTER\n");
4539 host = scsi_host_alloc(ent->sht, sizeof(struct ata_port));
4540 if (!host)
4541 return NULL;
4543 host->transportt = &ata_scsi_transport_template;
4545 ap = (struct ata_port *) &host->hostdata[0];
4547 ata_host_init(ap, host, host_set, ent, port_no);
4549 rc = ap->ops->port_start(ap);
4550 if (rc)
4551 goto err_out;
4553 return ap;
4555 err_out:
4556 scsi_host_put(host);
4557 return NULL;
4561 * ata_device_add - Register hardware device with ATA and SCSI layers
4562 * @ent: Probe information describing hardware device to be registered
4564 * This function processes the information provided in the probe
4565 * information struct @ent, allocates the necessary ATA and SCSI
4566 * host information structures, initializes them, and registers
4567 * everything with requisite kernel subsystems.
4569 * This function requests irqs, probes the ATA bus, and probes
4570 * the SCSI bus.
4572 * LOCKING:
4573 * PCI/etc. bus probe sem.
4575 * RETURNS:
4576 * Number of ports registered. Zero on error (no ports registered).
4579 int ata_device_add(const struct ata_probe_ent *ent)
4581 unsigned int count = 0, i;
4582 struct device *dev = ent->dev;
4583 struct ata_host_set *host_set;
4585 DPRINTK("ENTER\n");
4586 /* alloc a container for our list of ATA ports (buses) */
4587 host_set = kzalloc(sizeof(struct ata_host_set) +
4588 (ent->n_ports * sizeof(void *)), GFP_KERNEL);
4589 if (!host_set)
4590 return 0;
4591 spin_lock_init(&host_set->lock);
4593 host_set->dev = dev;
4594 host_set->n_ports = ent->n_ports;
4595 host_set->irq = ent->irq;
4596 host_set->mmio_base = ent->mmio_base;
4597 host_set->private_data = ent->private_data;
4598 host_set->ops = ent->port_ops;
4600 /* register each port bound to this device */
4601 for (i = 0; i < ent->n_ports; i++) {
4602 struct ata_port *ap;
4603 unsigned long xfer_mode_mask;
4605 ap = ata_host_add(ent, host_set, i);
4606 if (!ap)
4607 goto err_out;
4609 host_set->ports[i] = ap;
4610 xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) |
4611 (ap->mwdma_mask << ATA_SHIFT_MWDMA) |
4612 (ap->pio_mask << ATA_SHIFT_PIO);
4614 /* print per-port info to dmesg */
4615 printk(KERN_INFO "ata%u: %cATA max %s cmd 0x%lX ctl 0x%lX "
4616 "bmdma 0x%lX irq %lu\n",
4617 ap->id,
4618 ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
4619 ata_mode_string(xfer_mode_mask),
4620 ap->ioaddr.cmd_addr,
4621 ap->ioaddr.ctl_addr,
4622 ap->ioaddr.bmdma_addr,
4623 ent->irq);
4625 ata_chk_status(ap);
4626 host_set->ops->irq_clear(ap);
4627 count++;
4630 if (!count)
4631 goto err_free_ret;
4633 /* obtain irq, that is shared between channels */
4634 if (request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags,
4635 DRV_NAME, host_set))
4636 goto err_out;
4638 /* perform each probe synchronously */
4639 DPRINTK("probe begin\n");
4640 for (i = 0; i < count; i++) {
4641 struct ata_port *ap;
4642 int rc;
4644 ap = host_set->ports[i];
4646 DPRINTK("ata%u: bus probe begin\n", ap->id);
4647 rc = ata_bus_probe(ap);
4648 DPRINTK("ata%u: bus probe end\n", ap->id);
4650 if (rc) {
4651 /* FIXME: do something useful here?
4652 * Current libata behavior will
4653 * tear down everything when
4654 * the module is removed
4655 * or the h/w is unplugged.
4659 rc = scsi_add_host(ap->host, dev);
4660 if (rc) {
4661 printk(KERN_ERR "ata%u: scsi_add_host failed\n",
4662 ap->id);
4663 /* FIXME: do something useful here */
4664 /* FIXME: handle unconditional calls to
4665 * scsi_scan_host and ata_host_remove, below,
4666 * at the very least
4671 /* probes are done, now scan each port's disk(s) */
4672 DPRINTK("host probe begin\n");
4673 for (i = 0; i < count; i++) {
4674 struct ata_port *ap = host_set->ports[i];
4676 ata_scsi_scan_host(ap);
4679 dev_set_drvdata(dev, host_set);
4681 VPRINTK("EXIT, returning %u\n", ent->n_ports);
4682 return ent->n_ports; /* success */
4684 err_out:
4685 for (i = 0; i < count; i++) {
4686 ata_host_remove(host_set->ports[i], 1);
4687 scsi_host_put(host_set->ports[i]->host);
4689 err_free_ret:
4690 kfree(host_set);
4691 VPRINTK("EXIT, returning 0\n");
4692 return 0;
4696 * ata_host_set_remove - PCI layer callback for device removal
4697 * @host_set: ATA host set that was removed
4699 * Unregister all objects associated with this host set. Free those
4700 * objects.
4702 * LOCKING:
4703 * Inherited from calling layer (may sleep).
4706 void ata_host_set_remove(struct ata_host_set *host_set)
4708 struct ata_port *ap;
4709 unsigned int i;
4711 for (i = 0; i < host_set->n_ports; i++) {
4712 ap = host_set->ports[i];
4713 scsi_remove_host(ap->host);
4716 free_irq(host_set->irq, host_set);
4718 for (i = 0; i < host_set->n_ports; i++) {
4719 ap = host_set->ports[i];
4721 ata_scsi_release(ap->host);
4723 if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) {
4724 struct ata_ioports *ioaddr = &ap->ioaddr;
4726 if (ioaddr->cmd_addr == 0x1f0)
4727 release_region(0x1f0, 8);
4728 else if (ioaddr->cmd_addr == 0x170)
4729 release_region(0x170, 8);
4732 scsi_host_put(ap->host);
4735 if (host_set->ops->host_stop)
4736 host_set->ops->host_stop(host_set);
4738 kfree(host_set);
4742 * ata_scsi_release - SCSI layer callback hook for host unload
4743 * @host: libata host to be unloaded
4745 * Performs all duties necessary to shut down a libata port...
4746 * Kill port kthread, disable port, and release resources.
4748 * LOCKING:
4749 * Inherited from SCSI layer.
4751 * RETURNS:
4752 * One.
4755 int ata_scsi_release(struct Scsi_Host *host)
4757 struct ata_port *ap = (struct ata_port *) &host->hostdata[0];
4758 int i;
4760 DPRINTK("ENTER\n");
4762 ap->ops->port_disable(ap);
4763 ata_host_remove(ap, 0);
4764 for (i = 0; i < ATA_MAX_DEVICES; i++)
4765 kfree(ap->device[i].id);
4767 DPRINTK("EXIT\n");
4768 return 1;
4772 * ata_std_ports - initialize ioaddr with standard port offsets.
4773 * @ioaddr: IO address structure to be initialized
4775 * Utility function which initializes data_addr, error_addr,
4776 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
4777 * device_addr, status_addr, and command_addr to standard offsets
4778 * relative to cmd_addr.
4780 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
4783 void ata_std_ports(struct ata_ioports *ioaddr)
4785 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
4786 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
4787 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
4788 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
4789 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
4790 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
4791 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
4792 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
4793 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
4794 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
4798 #ifdef CONFIG_PCI
4800 void ata_pci_host_stop (struct ata_host_set *host_set)
4802 struct pci_dev *pdev = to_pci_dev(host_set->dev);
4804 pci_iounmap(pdev, host_set->mmio_base);
4808 * ata_pci_remove_one - PCI layer callback for device removal
4809 * @pdev: PCI device that was removed
4811 * PCI layer indicates to libata via this hook that
4812 * hot-unplug or module unload event has occurred.
4813 * Handle this by unregistering all objects associated
4814 * with this PCI device. Free those objects. Then finally
4815 * release PCI resources and disable device.
4817 * LOCKING:
4818 * Inherited from PCI layer (may sleep).
4821 void ata_pci_remove_one (struct pci_dev *pdev)
4823 struct device *dev = pci_dev_to_dev(pdev);
4824 struct ata_host_set *host_set = dev_get_drvdata(dev);
4826 ata_host_set_remove(host_set);
4827 pci_release_regions(pdev);
4828 pci_disable_device(pdev);
4829 dev_set_drvdata(dev, NULL);
4832 /* move to PCI subsystem */
4833 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
4835 unsigned long tmp = 0;
4837 switch (bits->width) {
4838 case 1: {
4839 u8 tmp8 = 0;
4840 pci_read_config_byte(pdev, bits->reg, &tmp8);
4841 tmp = tmp8;
4842 break;
4844 case 2: {
4845 u16 tmp16 = 0;
4846 pci_read_config_word(pdev, bits->reg, &tmp16);
4847 tmp = tmp16;
4848 break;
4850 case 4: {
4851 u32 tmp32 = 0;
4852 pci_read_config_dword(pdev, bits->reg, &tmp32);
4853 tmp = tmp32;
4854 break;
4857 default:
4858 return -EINVAL;
4861 tmp &= bits->mask;
4863 return (tmp == bits->val) ? 1 : 0;
4866 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t state)
4868 pci_save_state(pdev);
4869 pci_disable_device(pdev);
4870 pci_set_power_state(pdev, PCI_D3hot);
4871 return 0;
4874 int ata_pci_device_resume(struct pci_dev *pdev)
4876 pci_set_power_state(pdev, PCI_D0);
4877 pci_restore_state(pdev);
4878 pci_enable_device(pdev);
4879 pci_set_master(pdev);
4880 return 0;
4882 #endif /* CONFIG_PCI */
4885 static int __init ata_init(void)
4887 ata_wq = create_workqueue("ata");
4888 if (!ata_wq)
4889 return -ENOMEM;
4891 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
4892 return 0;
4895 static void __exit ata_exit(void)
4897 destroy_workqueue(ata_wq);
4900 module_init(ata_init);
4901 module_exit(ata_exit);
4903 static unsigned long ratelimit_time;
4904 static spinlock_t ata_ratelimit_lock = SPIN_LOCK_UNLOCKED;
4906 int ata_ratelimit(void)
4908 int rc;
4909 unsigned long flags;
4911 spin_lock_irqsave(&ata_ratelimit_lock, flags);
4913 if (time_after(jiffies, ratelimit_time)) {
4914 rc = 1;
4915 ratelimit_time = jiffies + (HZ/5);
4916 } else
4917 rc = 0;
4919 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
4921 return rc;
4925 * libata is essentially a library of internal helper functions for
4926 * low-level ATA host controller drivers. As such, the API/ABI is
4927 * likely to change as new drivers are added and updated.
4928 * Do not depend on ABI/API stability.
4931 EXPORT_SYMBOL_GPL(ata_std_bios_param);
4932 EXPORT_SYMBOL_GPL(ata_std_ports);
4933 EXPORT_SYMBOL_GPL(ata_device_add);
4934 EXPORT_SYMBOL_GPL(ata_host_set_remove);
4935 EXPORT_SYMBOL_GPL(ata_sg_init);
4936 EXPORT_SYMBOL_GPL(ata_sg_init_one);
4937 EXPORT_SYMBOL_GPL(__ata_qc_complete);
4938 EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
4939 EXPORT_SYMBOL_GPL(ata_eng_timeout);
4940 EXPORT_SYMBOL_GPL(ata_tf_load);
4941 EXPORT_SYMBOL_GPL(ata_tf_read);
4942 EXPORT_SYMBOL_GPL(ata_noop_dev_select);
4943 EXPORT_SYMBOL_GPL(ata_std_dev_select);
4944 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
4945 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
4946 EXPORT_SYMBOL_GPL(ata_check_status);
4947 EXPORT_SYMBOL_GPL(ata_altstatus);
4948 EXPORT_SYMBOL_GPL(ata_exec_command);
4949 EXPORT_SYMBOL_GPL(ata_port_start);
4950 EXPORT_SYMBOL_GPL(ata_port_stop);
4951 EXPORT_SYMBOL_GPL(ata_host_stop);
4952 EXPORT_SYMBOL_GPL(ata_interrupt);
4953 EXPORT_SYMBOL_GPL(ata_qc_prep);
4954 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
4955 EXPORT_SYMBOL_GPL(ata_bmdma_setup);
4956 EXPORT_SYMBOL_GPL(ata_bmdma_start);
4957 EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
4958 EXPORT_SYMBOL_GPL(ata_bmdma_status);
4959 EXPORT_SYMBOL_GPL(ata_bmdma_stop);
4960 EXPORT_SYMBOL_GPL(ata_port_probe);
4961 EXPORT_SYMBOL_GPL(sata_phy_reset);
4962 EXPORT_SYMBOL_GPL(__sata_phy_reset);
4963 EXPORT_SYMBOL_GPL(ata_bus_reset);
4964 EXPORT_SYMBOL_GPL(ata_std_probeinit);
4965 EXPORT_SYMBOL_GPL(ata_std_softreset);
4966 EXPORT_SYMBOL_GPL(sata_std_hardreset);
4967 EXPORT_SYMBOL_GPL(ata_std_postreset);
4968 EXPORT_SYMBOL_GPL(ata_std_probe_reset);
4969 EXPORT_SYMBOL_GPL(ata_drive_probe_reset);
4970 EXPORT_SYMBOL_GPL(ata_dev_revalidate);
4971 EXPORT_SYMBOL_GPL(ata_dev_classify);
4972 EXPORT_SYMBOL_GPL(ata_dev_pair);
4973 EXPORT_SYMBOL_GPL(ata_port_disable);
4974 EXPORT_SYMBOL_GPL(ata_ratelimit);
4975 EXPORT_SYMBOL_GPL(ata_busy_sleep);
4976 EXPORT_SYMBOL_GPL(ata_port_queue_task);
4977 EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
4978 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
4979 EXPORT_SYMBOL_GPL(ata_scsi_error);
4980 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
4981 EXPORT_SYMBOL_GPL(ata_scsi_release);
4982 EXPORT_SYMBOL_GPL(ata_host_intr);
4983 EXPORT_SYMBOL_GPL(ata_id_string);
4984 EXPORT_SYMBOL_GPL(ata_id_c_string);
4985 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
4986 EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
4987 EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
4989 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
4990 EXPORT_SYMBOL_GPL(ata_timing_compute);
4991 EXPORT_SYMBOL_GPL(ata_timing_merge);
4993 #ifdef CONFIG_PCI
4994 EXPORT_SYMBOL_GPL(pci_test_config_bits);
4995 EXPORT_SYMBOL_GPL(ata_pci_host_stop);
4996 EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
4997 EXPORT_SYMBOL_GPL(ata_pci_init_one);
4998 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
4999 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
5000 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
5001 EXPORT_SYMBOL_GPL(ata_pci_default_filter);
5002 EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
5003 #endif /* CONFIG_PCI */
5005 EXPORT_SYMBOL_GPL(ata_device_suspend);
5006 EXPORT_SYMBOL_GPL(ata_device_resume);
5007 EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
5008 EXPORT_SYMBOL_GPL(ata_scsi_device_resume);