[PATCH] libata: handle 0xff status properly
[linux-2.6/kvm.git] / drivers / ata / libata-core.c
blobd2336673601c44b1f5ea137efcdab44f2c6ec45b
1 /*
2 * libata-core.c - helper library for ATA
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
35 #include <linux/kernel.h>
36 #include <linux/module.h>
37 #include <linux/pci.h>
38 #include <linux/init.h>
39 #include <linux/list.h>
40 #include <linux/mm.h>
41 #include <linux/highmem.h>
42 #include <linux/spinlock.h>
43 #include <linux/blkdev.h>
44 #include <linux/delay.h>
45 #include <linux/timer.h>
46 #include <linux/interrupt.h>
47 #include <linux/completion.h>
48 #include <linux/suspend.h>
49 #include <linux/workqueue.h>
50 #include <linux/jiffies.h>
51 #include <linux/scatterlist.h>
52 #include <scsi/scsi.h>
53 #include <scsi/scsi_cmnd.h>
54 #include <scsi/scsi_host.h>
55 #include <linux/libata.h>
56 #include <asm/io.h>
57 #include <asm/semaphore.h>
58 #include <asm/byteorder.h>
60 #include "libata.h"
62 /* debounce timing parameters in msecs { interval, duration, timeout } */
63 const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
64 const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
65 const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
67 static unsigned int ata_dev_init_params(struct ata_device *dev,
68 u16 heads, u16 sectors);
69 static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
70 static void ata_dev_xfermask(struct ata_device *dev);
72 static unsigned int ata_unique_id = 1;
73 static struct workqueue_struct *ata_wq;
75 struct workqueue_struct *ata_aux_wq;
77 int atapi_enabled = 1;
78 module_param(atapi_enabled, int, 0444);
79 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
81 int atapi_dmadir = 0;
82 module_param(atapi_dmadir, int, 0444);
83 MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
85 int libata_fua = 0;
86 module_param_named(fua, libata_fua, int, 0444);
87 MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
89 static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
90 module_param(ata_probe_timeout, int, 0444);
91 MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
93 MODULE_AUTHOR("Jeff Garzik");
94 MODULE_DESCRIPTION("Library module for ATA devices");
95 MODULE_LICENSE("GPL");
96 MODULE_VERSION(DRV_VERSION);
99 /**
100 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
101 * @tf: Taskfile to convert
102 * @fis: Buffer into which data will output
103 * @pmp: Port multiplier port
105 * Converts a standard ATA taskfile to a Serial ATA
106 * FIS structure (Register - Host to Device).
108 * LOCKING:
109 * Inherited from caller.
112 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp)
114 fis[0] = 0x27; /* Register - Host to Device FIS */
115 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
116 bit 7 indicates Command FIS */
117 fis[2] = tf->command;
118 fis[3] = tf->feature;
120 fis[4] = tf->lbal;
121 fis[5] = tf->lbam;
122 fis[6] = tf->lbah;
123 fis[7] = tf->device;
125 fis[8] = tf->hob_lbal;
126 fis[9] = tf->hob_lbam;
127 fis[10] = tf->hob_lbah;
128 fis[11] = tf->hob_feature;
130 fis[12] = tf->nsect;
131 fis[13] = tf->hob_nsect;
132 fis[14] = 0;
133 fis[15] = tf->ctl;
135 fis[16] = 0;
136 fis[17] = 0;
137 fis[18] = 0;
138 fis[19] = 0;
142 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
143 * @fis: Buffer from which data will be input
144 * @tf: Taskfile to output
146 * Converts a serial ATA FIS structure to a standard ATA taskfile.
148 * LOCKING:
149 * Inherited from caller.
152 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
154 tf->command = fis[2]; /* status */
155 tf->feature = fis[3]; /* error */
157 tf->lbal = fis[4];
158 tf->lbam = fis[5];
159 tf->lbah = fis[6];
160 tf->device = fis[7];
162 tf->hob_lbal = fis[8];
163 tf->hob_lbam = fis[9];
164 tf->hob_lbah = fis[10];
166 tf->nsect = fis[12];
167 tf->hob_nsect = fis[13];
170 static const u8 ata_rw_cmds[] = {
171 /* pio multi */
172 ATA_CMD_READ_MULTI,
173 ATA_CMD_WRITE_MULTI,
174 ATA_CMD_READ_MULTI_EXT,
175 ATA_CMD_WRITE_MULTI_EXT,
179 ATA_CMD_WRITE_MULTI_FUA_EXT,
180 /* pio */
181 ATA_CMD_PIO_READ,
182 ATA_CMD_PIO_WRITE,
183 ATA_CMD_PIO_READ_EXT,
184 ATA_CMD_PIO_WRITE_EXT,
189 /* dma */
190 ATA_CMD_READ,
191 ATA_CMD_WRITE,
192 ATA_CMD_READ_EXT,
193 ATA_CMD_WRITE_EXT,
197 ATA_CMD_WRITE_FUA_EXT
201 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
202 * @qc: command to examine and configure
204 * Examine the device configuration and tf->flags to calculate
205 * the proper read/write commands and protocol to use.
207 * LOCKING:
208 * caller.
210 int ata_rwcmd_protocol(struct ata_queued_cmd *qc)
212 struct ata_taskfile *tf = &qc->tf;
213 struct ata_device *dev = qc->dev;
214 u8 cmd;
216 int index, fua, lba48, write;
218 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
219 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
220 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
222 if (dev->flags & ATA_DFLAG_PIO) {
223 tf->protocol = ATA_PROT_PIO;
224 index = dev->multi_count ? 0 : 8;
225 } else if (lba48 && (qc->ap->flags & ATA_FLAG_PIO_LBA48)) {
226 /* Unable to use DMA due to host limitation */
227 tf->protocol = ATA_PROT_PIO;
228 index = dev->multi_count ? 0 : 8;
229 } else {
230 tf->protocol = ATA_PROT_DMA;
231 index = 16;
234 cmd = ata_rw_cmds[index + fua + lba48 + write];
235 if (cmd) {
236 tf->command = cmd;
237 return 0;
239 return -1;
243 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
244 * @pio_mask: pio_mask
245 * @mwdma_mask: mwdma_mask
246 * @udma_mask: udma_mask
248 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
249 * unsigned int xfer_mask.
251 * LOCKING:
252 * None.
254 * RETURNS:
255 * Packed xfer_mask.
257 static unsigned int ata_pack_xfermask(unsigned int pio_mask,
258 unsigned int mwdma_mask,
259 unsigned int udma_mask)
261 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
262 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
263 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
267 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
268 * @xfer_mask: xfer_mask to unpack
269 * @pio_mask: resulting pio_mask
270 * @mwdma_mask: resulting mwdma_mask
271 * @udma_mask: resulting udma_mask
273 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
274 * Any NULL distination masks will be ignored.
276 static void ata_unpack_xfermask(unsigned int xfer_mask,
277 unsigned int *pio_mask,
278 unsigned int *mwdma_mask,
279 unsigned int *udma_mask)
281 if (pio_mask)
282 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
283 if (mwdma_mask)
284 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
285 if (udma_mask)
286 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
289 static const struct ata_xfer_ent {
290 int shift, bits;
291 u8 base;
292 } ata_xfer_tbl[] = {
293 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
294 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
295 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
296 { -1, },
300 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
301 * @xfer_mask: xfer_mask of interest
303 * Return matching XFER_* value for @xfer_mask. Only the highest
304 * bit of @xfer_mask is considered.
306 * LOCKING:
307 * None.
309 * RETURNS:
310 * Matching XFER_* value, 0 if no match found.
312 static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
314 int highbit = fls(xfer_mask) - 1;
315 const struct ata_xfer_ent *ent;
317 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
318 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
319 return ent->base + highbit - ent->shift;
320 return 0;
324 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
325 * @xfer_mode: XFER_* of interest
327 * Return matching xfer_mask for @xfer_mode.
329 * LOCKING:
330 * None.
332 * RETURNS:
333 * Matching xfer_mask, 0 if no match found.
335 static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
337 const struct ata_xfer_ent *ent;
339 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
340 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
341 return 1 << (ent->shift + xfer_mode - ent->base);
342 return 0;
346 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
347 * @xfer_mode: XFER_* of interest
349 * Return matching xfer_shift for @xfer_mode.
351 * LOCKING:
352 * None.
354 * RETURNS:
355 * Matching xfer_shift, -1 if no match found.
357 static int ata_xfer_mode2shift(unsigned int xfer_mode)
359 const struct ata_xfer_ent *ent;
361 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
362 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
363 return ent->shift;
364 return -1;
368 * ata_mode_string - convert xfer_mask to string
369 * @xfer_mask: mask of bits supported; only highest bit counts.
371 * Determine string which represents the highest speed
372 * (highest bit in @modemask).
374 * LOCKING:
375 * None.
377 * RETURNS:
378 * Constant C string representing highest speed listed in
379 * @mode_mask, or the constant C string "<n/a>".
381 static const char *ata_mode_string(unsigned int xfer_mask)
383 static const char * const xfer_mode_str[] = {
384 "PIO0",
385 "PIO1",
386 "PIO2",
387 "PIO3",
388 "PIO4",
389 "PIO5",
390 "PIO6",
391 "MWDMA0",
392 "MWDMA1",
393 "MWDMA2",
394 "MWDMA3",
395 "MWDMA4",
396 "UDMA/16",
397 "UDMA/25",
398 "UDMA/33",
399 "UDMA/44",
400 "UDMA/66",
401 "UDMA/100",
402 "UDMA/133",
403 "UDMA7",
405 int highbit;
407 highbit = fls(xfer_mask) - 1;
408 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
409 return xfer_mode_str[highbit];
410 return "<n/a>";
413 static const char *sata_spd_string(unsigned int spd)
415 static const char * const spd_str[] = {
416 "1.5 Gbps",
417 "3.0 Gbps",
420 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
421 return "<unknown>";
422 return spd_str[spd - 1];
425 void ata_dev_disable(struct ata_device *dev)
427 if (ata_dev_enabled(dev) && ata_msg_drv(dev->ap)) {
428 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
429 dev->class++;
434 * ata_pio_devchk - PATA device presence detection
435 * @ap: ATA channel to examine
436 * @device: Device to examine (starting at zero)
438 * This technique was originally described in
439 * Hale Landis's ATADRVR (www.ata-atapi.com), and
440 * later found its way into the ATA/ATAPI spec.
442 * Write a pattern to the ATA shadow registers,
443 * and if a device is present, it will respond by
444 * correctly storing and echoing back the
445 * ATA shadow register contents.
447 * LOCKING:
448 * caller.
451 static unsigned int ata_pio_devchk(struct ata_port *ap,
452 unsigned int device)
454 struct ata_ioports *ioaddr = &ap->ioaddr;
455 u8 nsect, lbal;
457 ap->ops->dev_select(ap, device);
459 outb(0x55, ioaddr->nsect_addr);
460 outb(0xaa, ioaddr->lbal_addr);
462 outb(0xaa, ioaddr->nsect_addr);
463 outb(0x55, ioaddr->lbal_addr);
465 outb(0x55, ioaddr->nsect_addr);
466 outb(0xaa, ioaddr->lbal_addr);
468 nsect = inb(ioaddr->nsect_addr);
469 lbal = inb(ioaddr->lbal_addr);
471 if ((nsect == 0x55) && (lbal == 0xaa))
472 return 1; /* we found a device */
474 return 0; /* nothing found */
478 * ata_mmio_devchk - PATA device presence detection
479 * @ap: ATA channel to examine
480 * @device: Device to examine (starting at zero)
482 * This technique was originally described in
483 * Hale Landis's ATADRVR (www.ata-atapi.com), and
484 * later found its way into the ATA/ATAPI spec.
486 * Write a pattern to the ATA shadow registers,
487 * and if a device is present, it will respond by
488 * correctly storing and echoing back the
489 * ATA shadow register contents.
491 * LOCKING:
492 * caller.
495 static unsigned int ata_mmio_devchk(struct ata_port *ap,
496 unsigned int device)
498 struct ata_ioports *ioaddr = &ap->ioaddr;
499 u8 nsect, lbal;
501 ap->ops->dev_select(ap, device);
503 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
504 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
506 writeb(0xaa, (void __iomem *) ioaddr->nsect_addr);
507 writeb(0x55, (void __iomem *) ioaddr->lbal_addr);
509 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
510 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
512 nsect = readb((void __iomem *) ioaddr->nsect_addr);
513 lbal = readb((void __iomem *) ioaddr->lbal_addr);
515 if ((nsect == 0x55) && (lbal == 0xaa))
516 return 1; /* we found a device */
518 return 0; /* nothing found */
522 * ata_devchk - PATA device presence detection
523 * @ap: ATA channel to examine
524 * @device: Device to examine (starting at zero)
526 * Dispatch ATA device presence detection, depending
527 * on whether we are using PIO or MMIO to talk to the
528 * ATA shadow registers.
530 * LOCKING:
531 * caller.
534 static unsigned int ata_devchk(struct ata_port *ap,
535 unsigned int device)
537 if (ap->flags & ATA_FLAG_MMIO)
538 return ata_mmio_devchk(ap, device);
539 return ata_pio_devchk(ap, device);
543 * ata_dev_classify - determine device type based on ATA-spec signature
544 * @tf: ATA taskfile register set for device to be identified
546 * Determine from taskfile register contents whether a device is
547 * ATA or ATAPI, as per "Signature and persistence" section
548 * of ATA/PI spec (volume 1, sect 5.14).
550 * LOCKING:
551 * None.
553 * RETURNS:
554 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
555 * the event of failure.
558 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
560 /* Apple's open source Darwin code hints that some devices only
561 * put a proper signature into the LBA mid/high registers,
562 * So, we only check those. It's sufficient for uniqueness.
565 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
566 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
567 DPRINTK("found ATA device by sig\n");
568 return ATA_DEV_ATA;
571 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
572 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
573 DPRINTK("found ATAPI device by sig\n");
574 return ATA_DEV_ATAPI;
577 DPRINTK("unknown device\n");
578 return ATA_DEV_UNKNOWN;
582 * ata_dev_try_classify - Parse returned ATA device signature
583 * @ap: ATA channel to examine
584 * @device: Device to examine (starting at zero)
585 * @r_err: Value of error register on completion
587 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
588 * an ATA/ATAPI-defined set of values is placed in the ATA
589 * shadow registers, indicating the results of device detection
590 * and diagnostics.
592 * Select the ATA device, and read the values from the ATA shadow
593 * registers. Then parse according to the Error register value,
594 * and the spec-defined values examined by ata_dev_classify().
596 * LOCKING:
597 * caller.
599 * RETURNS:
600 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
603 static unsigned int
604 ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
606 struct ata_taskfile tf;
607 unsigned int class;
608 u8 err;
610 ap->ops->dev_select(ap, device);
612 memset(&tf, 0, sizeof(tf));
614 ap->ops->tf_read(ap, &tf);
615 err = tf.feature;
616 if (r_err)
617 *r_err = err;
619 /* see if device passed diags: if master then continue and warn later */
620 if (err == 0 && device == 0)
621 /* diagnostic fail : do nothing _YET_ */
622 ap->device[device].horkage |= ATA_HORKAGE_DIAGNOSTIC;
623 else if (err == 1)
624 /* do nothing */ ;
625 else if ((device == 0) && (err == 0x81))
626 /* do nothing */ ;
627 else
628 return ATA_DEV_NONE;
630 /* determine if device is ATA or ATAPI */
631 class = ata_dev_classify(&tf);
633 if (class == ATA_DEV_UNKNOWN)
634 return ATA_DEV_NONE;
635 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
636 return ATA_DEV_NONE;
637 return class;
641 * ata_id_string - Convert IDENTIFY DEVICE page into string
642 * @id: IDENTIFY DEVICE results we will examine
643 * @s: string into which data is output
644 * @ofs: offset into identify device page
645 * @len: length of string to return. must be an even number.
647 * The strings in the IDENTIFY DEVICE page are broken up into
648 * 16-bit chunks. Run through the string, and output each
649 * 8-bit chunk linearly, regardless of platform.
651 * LOCKING:
652 * caller.
655 void ata_id_string(const u16 *id, unsigned char *s,
656 unsigned int ofs, unsigned int len)
658 unsigned int c;
660 while (len > 0) {
661 c = id[ofs] >> 8;
662 *s = c;
663 s++;
665 c = id[ofs] & 0xff;
666 *s = c;
667 s++;
669 ofs++;
670 len -= 2;
675 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
676 * @id: IDENTIFY DEVICE results we will examine
677 * @s: string into which data is output
678 * @ofs: offset into identify device page
679 * @len: length of string to return. must be an odd number.
681 * This function is identical to ata_id_string except that it
682 * trims trailing spaces and terminates the resulting string with
683 * null. @len must be actual maximum length (even number) + 1.
685 * LOCKING:
686 * caller.
688 void ata_id_c_string(const u16 *id, unsigned char *s,
689 unsigned int ofs, unsigned int len)
691 unsigned char *p;
693 WARN_ON(!(len & 1));
695 ata_id_string(id, s, ofs, len - 1);
697 p = s + strnlen(s, len - 1);
698 while (p > s && p[-1] == ' ')
699 p--;
700 *p = '\0';
703 static u64 ata_id_n_sectors(const u16 *id)
705 if (ata_id_has_lba(id)) {
706 if (ata_id_has_lba48(id))
707 return ata_id_u64(id, 100);
708 else
709 return ata_id_u32(id, 60);
710 } else {
711 if (ata_id_current_chs_valid(id))
712 return ata_id_u32(id, 57);
713 else
714 return id[1] * id[3] * id[6];
719 * ata_noop_dev_select - Select device 0/1 on ATA bus
720 * @ap: ATA channel to manipulate
721 * @device: ATA device (numbered from zero) to select
723 * This function performs no actual function.
725 * May be used as the dev_select() entry in ata_port_operations.
727 * LOCKING:
728 * caller.
730 void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
736 * ata_std_dev_select - Select device 0/1 on ATA bus
737 * @ap: ATA channel to manipulate
738 * @device: ATA device (numbered from zero) to select
740 * Use the method defined in the ATA specification to
741 * make either device 0, or device 1, active on the
742 * ATA channel. Works with both PIO and MMIO.
744 * May be used as the dev_select() entry in ata_port_operations.
746 * LOCKING:
747 * caller.
750 void ata_std_dev_select (struct ata_port *ap, unsigned int device)
752 u8 tmp;
754 if (device == 0)
755 tmp = ATA_DEVICE_OBS;
756 else
757 tmp = ATA_DEVICE_OBS | ATA_DEV1;
759 if (ap->flags & ATA_FLAG_MMIO) {
760 writeb(tmp, (void __iomem *) ap->ioaddr.device_addr);
761 } else {
762 outb(tmp, ap->ioaddr.device_addr);
764 ata_pause(ap); /* needed; also flushes, for mmio */
768 * ata_dev_select - Select device 0/1 on ATA bus
769 * @ap: ATA channel to manipulate
770 * @device: ATA device (numbered from zero) to select
771 * @wait: non-zero to wait for Status register BSY bit to clear
772 * @can_sleep: non-zero if context allows sleeping
774 * Use the method defined in the ATA specification to
775 * make either device 0, or device 1, active on the
776 * ATA channel.
778 * This is a high-level version of ata_std_dev_select(),
779 * which additionally provides the services of inserting
780 * the proper pauses and status polling, where needed.
782 * LOCKING:
783 * caller.
786 void ata_dev_select(struct ata_port *ap, unsigned int device,
787 unsigned int wait, unsigned int can_sleep)
789 if (ata_msg_probe(ap))
790 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, ata%u: "
791 "device %u, wait %u\n", ap->id, device, wait);
793 if (wait)
794 ata_wait_idle(ap);
796 ap->ops->dev_select(ap, device);
798 if (wait) {
799 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
800 msleep(150);
801 ata_wait_idle(ap);
806 * ata_dump_id - IDENTIFY DEVICE info debugging output
807 * @id: IDENTIFY DEVICE page to dump
809 * Dump selected 16-bit words from the given IDENTIFY DEVICE
810 * page.
812 * LOCKING:
813 * caller.
816 static inline void ata_dump_id(const u16 *id)
818 DPRINTK("49==0x%04x "
819 "53==0x%04x "
820 "63==0x%04x "
821 "64==0x%04x "
822 "75==0x%04x \n",
823 id[49],
824 id[53],
825 id[63],
826 id[64],
827 id[75]);
828 DPRINTK("80==0x%04x "
829 "81==0x%04x "
830 "82==0x%04x "
831 "83==0x%04x "
832 "84==0x%04x \n",
833 id[80],
834 id[81],
835 id[82],
836 id[83],
837 id[84]);
838 DPRINTK("88==0x%04x "
839 "93==0x%04x\n",
840 id[88],
841 id[93]);
845 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
846 * @id: IDENTIFY data to compute xfer mask from
848 * Compute the xfermask for this device. This is not as trivial
849 * as it seems if we must consider early devices correctly.
851 * FIXME: pre IDE drive timing (do we care ?).
853 * LOCKING:
854 * None.
856 * RETURNS:
857 * Computed xfermask
859 static unsigned int ata_id_xfermask(const u16 *id)
861 unsigned int pio_mask, mwdma_mask, udma_mask;
863 /* Usual case. Word 53 indicates word 64 is valid */
864 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
865 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
866 pio_mask <<= 3;
867 pio_mask |= 0x7;
868 } else {
869 /* If word 64 isn't valid then Word 51 high byte holds
870 * the PIO timing number for the maximum. Turn it into
871 * a mask.
873 u8 mode = id[ATA_ID_OLD_PIO_MODES] & 0xFF;
874 if (mode < 5) /* Valid PIO range */
875 pio_mask = (2 << mode) - 1;
876 else
877 pio_mask = 1;
879 /* But wait.. there's more. Design your standards by
880 * committee and you too can get a free iordy field to
881 * process. However its the speeds not the modes that
882 * are supported... Note drivers using the timing API
883 * will get this right anyway
887 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
889 if (ata_id_is_cfa(id)) {
891 * Process compact flash extended modes
893 int pio = id[163] & 0x7;
894 int dma = (id[163] >> 3) & 7;
896 if (pio)
897 pio_mask |= (1 << 5);
898 if (pio > 1)
899 pio_mask |= (1 << 6);
900 if (dma)
901 mwdma_mask |= (1 << 3);
902 if (dma > 1)
903 mwdma_mask |= (1 << 4);
906 udma_mask = 0;
907 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
908 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
910 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
914 * ata_port_queue_task - Queue port_task
915 * @ap: The ata_port to queue port_task for
916 * @fn: workqueue function to be scheduled
917 * @data: data value to pass to workqueue function
918 * @delay: delay time for workqueue function
920 * Schedule @fn(@data) for execution after @delay jiffies using
921 * port_task. There is one port_task per port and it's the
922 * user(low level driver)'s responsibility to make sure that only
923 * one task is active at any given time.
925 * libata core layer takes care of synchronization between
926 * port_task and EH. ata_port_queue_task() may be ignored for EH
927 * synchronization.
929 * LOCKING:
930 * Inherited from caller.
932 void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), void *data,
933 unsigned long delay)
935 int rc;
937 if (ap->pflags & ATA_PFLAG_FLUSH_PORT_TASK)
938 return;
940 PREPARE_WORK(&ap->port_task, fn, data);
942 if (!delay)
943 rc = queue_work(ata_wq, &ap->port_task);
944 else
945 rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
947 /* rc == 0 means that another user is using port task */
948 WARN_ON(rc == 0);
952 * ata_port_flush_task - Flush port_task
953 * @ap: The ata_port to flush port_task for
955 * After this function completes, port_task is guranteed not to
956 * be running or scheduled.
958 * LOCKING:
959 * Kernel thread context (may sleep)
961 void ata_port_flush_task(struct ata_port *ap)
963 unsigned long flags;
965 DPRINTK("ENTER\n");
967 spin_lock_irqsave(ap->lock, flags);
968 ap->pflags |= ATA_PFLAG_FLUSH_PORT_TASK;
969 spin_unlock_irqrestore(ap->lock, flags);
971 DPRINTK("flush #1\n");
972 flush_workqueue(ata_wq);
975 * At this point, if a task is running, it's guaranteed to see
976 * the FLUSH flag; thus, it will never queue pio tasks again.
977 * Cancel and flush.
979 if (!cancel_delayed_work(&ap->port_task)) {
980 if (ata_msg_ctl(ap))
981 ata_port_printk(ap, KERN_DEBUG, "%s: flush #2\n",
982 __FUNCTION__);
983 flush_workqueue(ata_wq);
986 spin_lock_irqsave(ap->lock, flags);
987 ap->pflags &= ~ATA_PFLAG_FLUSH_PORT_TASK;
988 spin_unlock_irqrestore(ap->lock, flags);
990 if (ata_msg_ctl(ap))
991 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
994 void ata_qc_complete_internal(struct ata_queued_cmd *qc)
996 struct completion *waiting = qc->private_data;
998 complete(waiting);
1002 * ata_exec_internal - execute libata internal command
1003 * @dev: Device to which the command is sent
1004 * @tf: Taskfile registers for the command and the result
1005 * @cdb: CDB for packet command
1006 * @dma_dir: Data tranfer direction of the command
1007 * @buf: Data buffer of the command
1008 * @buflen: Length of data buffer
1010 * Executes libata internal command with timeout. @tf contains
1011 * command on entry and result on return. Timeout and error
1012 * conditions are reported via return value. No recovery action
1013 * is taken after a command times out. It's caller's duty to
1014 * clean up after timeout.
1016 * LOCKING:
1017 * None. Should be called with kernel context, might sleep.
1019 * RETURNS:
1020 * Zero on success, AC_ERR_* mask on failure
1022 unsigned ata_exec_internal(struct ata_device *dev,
1023 struct ata_taskfile *tf, const u8 *cdb,
1024 int dma_dir, void *buf, unsigned int buflen)
1026 struct ata_port *ap = dev->ap;
1027 u8 command = tf->command;
1028 struct ata_queued_cmd *qc;
1029 unsigned int tag, preempted_tag;
1030 u32 preempted_sactive, preempted_qc_active;
1031 DECLARE_COMPLETION_ONSTACK(wait);
1032 unsigned long flags;
1033 unsigned int err_mask;
1034 int rc;
1036 spin_lock_irqsave(ap->lock, flags);
1038 /* no internal command while frozen */
1039 if (ap->pflags & ATA_PFLAG_FROZEN) {
1040 spin_unlock_irqrestore(ap->lock, flags);
1041 return AC_ERR_SYSTEM;
1044 /* initialize internal qc */
1046 /* XXX: Tag 0 is used for drivers with legacy EH as some
1047 * drivers choke if any other tag is given. This breaks
1048 * ata_tag_internal() test for those drivers. Don't use new
1049 * EH stuff without converting to it.
1051 if (ap->ops->error_handler)
1052 tag = ATA_TAG_INTERNAL;
1053 else
1054 tag = 0;
1056 if (test_and_set_bit(tag, &ap->qc_allocated))
1057 BUG();
1058 qc = __ata_qc_from_tag(ap, tag);
1060 qc->tag = tag;
1061 qc->scsicmd = NULL;
1062 qc->ap = ap;
1063 qc->dev = dev;
1064 ata_qc_reinit(qc);
1066 preempted_tag = ap->active_tag;
1067 preempted_sactive = ap->sactive;
1068 preempted_qc_active = ap->qc_active;
1069 ap->active_tag = ATA_TAG_POISON;
1070 ap->sactive = 0;
1071 ap->qc_active = 0;
1073 /* prepare & issue qc */
1074 qc->tf = *tf;
1075 if (cdb)
1076 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1077 qc->flags |= ATA_QCFLAG_RESULT_TF;
1078 qc->dma_dir = dma_dir;
1079 if (dma_dir != DMA_NONE) {
1080 ata_sg_init_one(qc, buf, buflen);
1081 qc->nsect = buflen / ATA_SECT_SIZE;
1084 qc->private_data = &wait;
1085 qc->complete_fn = ata_qc_complete_internal;
1087 ata_qc_issue(qc);
1089 spin_unlock_irqrestore(ap->lock, flags);
1091 rc = wait_for_completion_timeout(&wait, ata_probe_timeout);
1093 ata_port_flush_task(ap);
1095 if (!rc) {
1096 spin_lock_irqsave(ap->lock, flags);
1098 /* We're racing with irq here. If we lose, the
1099 * following test prevents us from completing the qc
1100 * twice. If we win, the port is frozen and will be
1101 * cleaned up by ->post_internal_cmd().
1103 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1104 qc->err_mask |= AC_ERR_TIMEOUT;
1106 if (ap->ops->error_handler)
1107 ata_port_freeze(ap);
1108 else
1109 ata_qc_complete(qc);
1111 if (ata_msg_warn(ap))
1112 ata_dev_printk(dev, KERN_WARNING,
1113 "qc timeout (cmd 0x%x)\n", command);
1116 spin_unlock_irqrestore(ap->lock, flags);
1119 /* do post_internal_cmd */
1120 if (ap->ops->post_internal_cmd)
1121 ap->ops->post_internal_cmd(qc);
1123 if (qc->flags & ATA_QCFLAG_FAILED && !qc->err_mask) {
1124 if (ata_msg_warn(ap))
1125 ata_dev_printk(dev, KERN_WARNING,
1126 "zero err_mask for failed "
1127 "internal command, assuming AC_ERR_OTHER\n");
1128 qc->err_mask |= AC_ERR_OTHER;
1131 /* finish up */
1132 spin_lock_irqsave(ap->lock, flags);
1134 *tf = qc->result_tf;
1135 err_mask = qc->err_mask;
1137 ata_qc_free(qc);
1138 ap->active_tag = preempted_tag;
1139 ap->sactive = preempted_sactive;
1140 ap->qc_active = preempted_qc_active;
1142 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1143 * Until those drivers are fixed, we detect the condition
1144 * here, fail the command with AC_ERR_SYSTEM and reenable the
1145 * port.
1147 * Note that this doesn't change any behavior as internal
1148 * command failure results in disabling the device in the
1149 * higher layer for LLDDs without new reset/EH callbacks.
1151 * Kill the following code as soon as those drivers are fixed.
1153 if (ap->flags & ATA_FLAG_DISABLED) {
1154 err_mask |= AC_ERR_SYSTEM;
1155 ata_port_probe(ap);
1158 spin_unlock_irqrestore(ap->lock, flags);
1160 return err_mask;
1164 * ata_do_simple_cmd - execute simple internal command
1165 * @dev: Device to which the command is sent
1166 * @cmd: Opcode to execute
1168 * Execute a 'simple' command, that only consists of the opcode
1169 * 'cmd' itself, without filling any other registers
1171 * LOCKING:
1172 * Kernel thread context (may sleep).
1174 * RETURNS:
1175 * Zero on success, AC_ERR_* mask on failure
1177 unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
1179 struct ata_taskfile tf;
1181 ata_tf_init(dev, &tf);
1183 tf.command = cmd;
1184 tf.flags |= ATA_TFLAG_DEVICE;
1185 tf.protocol = ATA_PROT_NODATA;
1187 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
1191 * ata_pio_need_iordy - check if iordy needed
1192 * @adev: ATA device
1194 * Check if the current speed of the device requires IORDY. Used
1195 * by various controllers for chip configuration.
1198 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1200 int pio;
1201 int speed = adev->pio_mode - XFER_PIO_0;
1203 if (speed < 2)
1204 return 0;
1205 if (speed > 2)
1206 return 1;
1208 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1210 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1211 pio = adev->id[ATA_ID_EIDE_PIO];
1212 /* Is the speed faster than the drive allows non IORDY ? */
1213 if (pio) {
1214 /* This is cycle times not frequency - watch the logic! */
1215 if (pio > 240) /* PIO2 is 240nS per cycle */
1216 return 1;
1217 return 0;
1220 return 0;
1224 * ata_dev_read_id - Read ID data from the specified device
1225 * @dev: target device
1226 * @p_class: pointer to class of the target device (may be changed)
1227 * @post_reset: is this read ID post-reset?
1228 * @id: buffer to read IDENTIFY data into
1230 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1231 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1232 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1233 * for pre-ATA4 drives.
1235 * LOCKING:
1236 * Kernel thread context (may sleep)
1238 * RETURNS:
1239 * 0 on success, -errno otherwise.
1241 int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1242 int post_reset, u16 *id)
1244 struct ata_port *ap = dev->ap;
1245 unsigned int class = *p_class;
1246 struct ata_taskfile tf;
1247 unsigned int err_mask = 0;
1248 const char *reason;
1249 int rc;
1251 if (ata_msg_ctl(ap))
1252 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER, host %u, dev %u\n",
1253 __FUNCTION__, ap->id, dev->devno);
1255 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1257 retry:
1258 ata_tf_init(dev, &tf);
1260 switch (class) {
1261 case ATA_DEV_ATA:
1262 tf.command = ATA_CMD_ID_ATA;
1263 break;
1264 case ATA_DEV_ATAPI:
1265 tf.command = ATA_CMD_ID_ATAPI;
1266 break;
1267 default:
1268 rc = -ENODEV;
1269 reason = "unsupported class";
1270 goto err_out;
1273 tf.protocol = ATA_PROT_PIO;
1275 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
1276 id, sizeof(id[0]) * ATA_ID_WORDS);
1277 if (err_mask) {
1278 rc = -EIO;
1279 reason = "I/O error";
1280 goto err_out;
1283 swap_buf_le16(id, ATA_ID_WORDS);
1285 /* sanity check */
1286 rc = -EINVAL;
1287 reason = "device reports illegal type";
1289 if (class == ATA_DEV_ATA) {
1290 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1291 goto err_out;
1292 } else {
1293 if (ata_id_is_ata(id))
1294 goto err_out;
1297 if (post_reset && class == ATA_DEV_ATA) {
1299 * The exact sequence expected by certain pre-ATA4 drives is:
1300 * SRST RESET
1301 * IDENTIFY
1302 * INITIALIZE DEVICE PARAMETERS
1303 * anything else..
1304 * Some drives were very specific about that exact sequence.
1306 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
1307 err_mask = ata_dev_init_params(dev, id[3], id[6]);
1308 if (err_mask) {
1309 rc = -EIO;
1310 reason = "INIT_DEV_PARAMS failed";
1311 goto err_out;
1314 /* current CHS translation info (id[53-58]) might be
1315 * changed. reread the identify device info.
1317 post_reset = 0;
1318 goto retry;
1322 *p_class = class;
1324 return 0;
1326 err_out:
1327 if (ata_msg_warn(ap))
1328 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
1329 "(%s, err_mask=0x%x)\n", reason, err_mask);
1330 return rc;
1333 static inline u8 ata_dev_knobble(struct ata_device *dev)
1335 return ((dev->ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
1338 static void ata_dev_config_ncq(struct ata_device *dev,
1339 char *desc, size_t desc_sz)
1341 struct ata_port *ap = dev->ap;
1342 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
1344 if (!ata_id_has_ncq(dev->id)) {
1345 desc[0] = '\0';
1346 return;
1348 if (ata_device_blacklisted(dev) & ATA_HORKAGE_NONCQ) {
1349 snprintf(desc, desc_sz, "NCQ (not used)");
1350 return;
1352 if (ap->flags & ATA_FLAG_NCQ) {
1353 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
1354 dev->flags |= ATA_DFLAG_NCQ;
1357 if (hdepth >= ddepth)
1358 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
1359 else
1360 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
1363 static void ata_set_port_max_cmd_len(struct ata_port *ap)
1365 int i;
1367 if (ap->scsi_host) {
1368 unsigned int len = 0;
1370 for (i = 0; i < ATA_MAX_DEVICES; i++)
1371 len = max(len, ap->device[i].cdb_len);
1373 ap->scsi_host->max_cmd_len = len;
1378 * ata_dev_configure - Configure the specified ATA/ATAPI device
1379 * @dev: Target device to configure
1380 * @print_info: Enable device info printout
1382 * Configure @dev according to @dev->id. Generic and low-level
1383 * driver specific fixups are also applied.
1385 * LOCKING:
1386 * Kernel thread context (may sleep)
1388 * RETURNS:
1389 * 0 on success, -errno otherwise
1391 int ata_dev_configure(struct ata_device *dev, int print_info)
1393 struct ata_port *ap = dev->ap;
1394 const u16 *id = dev->id;
1395 unsigned int xfer_mask;
1396 char revbuf[7]; /* XYZ-99\0 */
1397 int rc;
1399 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
1400 ata_dev_printk(dev, KERN_INFO,
1401 "%s: ENTER/EXIT (host %u, dev %u) -- nodev\n",
1402 __FUNCTION__, ap->id, dev->devno);
1403 return 0;
1406 if (ata_msg_probe(ap))
1407 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER, host %u, dev %u\n",
1408 __FUNCTION__, ap->id, dev->devno);
1410 /* print device capabilities */
1411 if (ata_msg_probe(ap))
1412 ata_dev_printk(dev, KERN_DEBUG,
1413 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
1414 "85:%04x 86:%04x 87:%04x 88:%04x\n",
1415 __FUNCTION__,
1416 id[49], id[82], id[83], id[84],
1417 id[85], id[86], id[87], id[88]);
1419 /* initialize to-be-configured parameters */
1420 dev->flags &= ~ATA_DFLAG_CFG_MASK;
1421 dev->max_sectors = 0;
1422 dev->cdb_len = 0;
1423 dev->n_sectors = 0;
1424 dev->cylinders = 0;
1425 dev->heads = 0;
1426 dev->sectors = 0;
1429 * common ATA, ATAPI feature tests
1432 /* find max transfer mode; for printk only */
1433 xfer_mask = ata_id_xfermask(id);
1435 if (ata_msg_probe(ap))
1436 ata_dump_id(id);
1438 /* ATA-specific feature tests */
1439 if (dev->class == ATA_DEV_ATA) {
1440 if (ata_id_is_cfa(id)) {
1441 if (id[162] & 1) /* CPRM may make this media unusable */
1442 ata_dev_printk(dev, KERN_WARNING, "ata%u: device %u supports DRM functions and may not be fully accessable.\n",
1443 ap->id, dev->devno);
1444 snprintf(revbuf, 7, "CFA");
1446 else
1447 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
1449 dev->n_sectors = ata_id_n_sectors(id);
1451 if (ata_id_has_lba(id)) {
1452 const char *lba_desc;
1453 char ncq_desc[20];
1455 lba_desc = "LBA";
1456 dev->flags |= ATA_DFLAG_LBA;
1457 if (ata_id_has_lba48(id)) {
1458 dev->flags |= ATA_DFLAG_LBA48;
1459 lba_desc = "LBA48";
1462 /* config NCQ */
1463 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
1465 /* print device info to dmesg */
1466 if (ata_msg_drv(ap) && print_info)
1467 ata_dev_printk(dev, KERN_INFO, "%s, "
1468 "max %s, %Lu sectors: %s %s\n",
1469 revbuf,
1470 ata_mode_string(xfer_mask),
1471 (unsigned long long)dev->n_sectors,
1472 lba_desc, ncq_desc);
1473 } else {
1474 /* CHS */
1476 /* Default translation */
1477 dev->cylinders = id[1];
1478 dev->heads = id[3];
1479 dev->sectors = id[6];
1481 if (ata_id_current_chs_valid(id)) {
1482 /* Current CHS translation is valid. */
1483 dev->cylinders = id[54];
1484 dev->heads = id[55];
1485 dev->sectors = id[56];
1488 /* print device info to dmesg */
1489 if (ata_msg_drv(ap) && print_info)
1490 ata_dev_printk(dev, KERN_INFO, "%s, "
1491 "max %s, %Lu sectors: CHS %u/%u/%u\n",
1492 revbuf,
1493 ata_mode_string(xfer_mask),
1494 (unsigned long long)dev->n_sectors,
1495 dev->cylinders, dev->heads,
1496 dev->sectors);
1499 if (dev->id[59] & 0x100) {
1500 dev->multi_count = dev->id[59] & 0xff;
1501 if (ata_msg_drv(ap) && print_info)
1502 ata_dev_printk(dev, KERN_INFO,
1503 "ata%u: dev %u multi count %u\n",
1504 ap->id, dev->devno, dev->multi_count);
1507 dev->cdb_len = 16;
1510 /* ATAPI-specific feature tests */
1511 else if (dev->class == ATA_DEV_ATAPI) {
1512 char *cdb_intr_string = "";
1514 rc = atapi_cdb_len(id);
1515 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
1516 if (ata_msg_warn(ap))
1517 ata_dev_printk(dev, KERN_WARNING,
1518 "unsupported CDB len\n");
1519 rc = -EINVAL;
1520 goto err_out_nosup;
1522 dev->cdb_len = (unsigned int) rc;
1524 if (ata_id_cdb_intr(dev->id)) {
1525 dev->flags |= ATA_DFLAG_CDB_INTR;
1526 cdb_intr_string = ", CDB intr";
1529 /* print device info to dmesg */
1530 if (ata_msg_drv(ap) && print_info)
1531 ata_dev_printk(dev, KERN_INFO, "ATAPI, max %s%s\n",
1532 ata_mode_string(xfer_mask),
1533 cdb_intr_string);
1536 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
1537 /* Let the user know. We don't want to disallow opens for
1538 rescue purposes, or in case the vendor is just a blithering
1539 idiot */
1540 if (print_info) {
1541 ata_dev_printk(dev, KERN_WARNING,
1542 "Drive reports diagnostics failure. This may indicate a drive\n");
1543 ata_dev_printk(dev, KERN_WARNING,
1544 "fault or invalid emulation. Contact drive vendor for information.\n");
1548 ata_set_port_max_cmd_len(ap);
1550 /* limit bridge transfers to udma5, 200 sectors */
1551 if (ata_dev_knobble(dev)) {
1552 if (ata_msg_drv(ap) && print_info)
1553 ata_dev_printk(dev, KERN_INFO,
1554 "applying bridge limits\n");
1555 dev->udma_mask &= ATA_UDMA5;
1556 dev->max_sectors = ATA_MAX_SECTORS;
1559 if (ap->ops->dev_config)
1560 ap->ops->dev_config(ap, dev);
1562 if (ata_msg_probe(ap))
1563 ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
1564 __FUNCTION__, ata_chk_status(ap));
1565 return 0;
1567 err_out_nosup:
1568 if (ata_msg_probe(ap))
1569 ata_dev_printk(dev, KERN_DEBUG,
1570 "%s: EXIT, err\n", __FUNCTION__);
1571 return rc;
1575 * ata_bus_probe - Reset and probe ATA bus
1576 * @ap: Bus to probe
1578 * Master ATA bus probing function. Initiates a hardware-dependent
1579 * bus reset, then attempts to identify any devices found on
1580 * the bus.
1582 * LOCKING:
1583 * PCI/etc. bus probe sem.
1585 * RETURNS:
1586 * Zero on success, negative errno otherwise.
1589 int ata_bus_probe(struct ata_port *ap)
1591 unsigned int classes[ATA_MAX_DEVICES];
1592 int tries[ATA_MAX_DEVICES];
1593 int i, rc, down_xfermask;
1594 struct ata_device *dev;
1596 ata_port_probe(ap);
1598 for (i = 0; i < ATA_MAX_DEVICES; i++)
1599 tries[i] = ATA_PROBE_MAX_TRIES;
1601 retry:
1602 down_xfermask = 0;
1604 /* reset and determine device classes */
1605 ap->ops->phy_reset(ap);
1607 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1608 dev = &ap->device[i];
1610 if (!(ap->flags & ATA_FLAG_DISABLED) &&
1611 dev->class != ATA_DEV_UNKNOWN)
1612 classes[dev->devno] = dev->class;
1613 else
1614 classes[dev->devno] = ATA_DEV_NONE;
1616 dev->class = ATA_DEV_UNKNOWN;
1619 ata_port_probe(ap);
1621 /* after the reset the device state is PIO 0 and the controller
1622 state is undefined. Record the mode */
1624 for (i = 0; i < ATA_MAX_DEVICES; i++)
1625 ap->device[i].pio_mode = XFER_PIO_0;
1627 /* read IDENTIFY page and configure devices */
1628 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1629 dev = &ap->device[i];
1631 if (tries[i])
1632 dev->class = classes[i];
1634 if (!ata_dev_enabled(dev))
1635 continue;
1637 rc = ata_dev_read_id(dev, &dev->class, 1, dev->id);
1638 if (rc)
1639 goto fail;
1641 rc = ata_dev_configure(dev, 1);
1642 if (rc)
1643 goto fail;
1646 /* configure transfer mode */
1647 rc = ata_set_mode(ap, &dev);
1648 if (rc) {
1649 down_xfermask = 1;
1650 goto fail;
1653 for (i = 0; i < ATA_MAX_DEVICES; i++)
1654 if (ata_dev_enabled(&ap->device[i]))
1655 return 0;
1657 /* no device present, disable port */
1658 ata_port_disable(ap);
1659 ap->ops->port_disable(ap);
1660 return -ENODEV;
1662 fail:
1663 switch (rc) {
1664 case -EINVAL:
1665 case -ENODEV:
1666 tries[dev->devno] = 0;
1667 break;
1668 case -EIO:
1669 sata_down_spd_limit(ap);
1670 /* fall through */
1671 default:
1672 tries[dev->devno]--;
1673 if (down_xfermask &&
1674 ata_down_xfermask_limit(dev, tries[dev->devno] == 1))
1675 tries[dev->devno] = 0;
1678 if (!tries[dev->devno]) {
1679 ata_down_xfermask_limit(dev, 1);
1680 ata_dev_disable(dev);
1683 goto retry;
1687 * ata_port_probe - Mark port as enabled
1688 * @ap: Port for which we indicate enablement
1690 * Modify @ap data structure such that the system
1691 * thinks that the entire port is enabled.
1693 * LOCKING: host lock, or some other form of
1694 * serialization.
1697 void ata_port_probe(struct ata_port *ap)
1699 ap->flags &= ~ATA_FLAG_DISABLED;
1703 * sata_print_link_status - Print SATA link status
1704 * @ap: SATA port to printk link status about
1706 * This function prints link speed and status of a SATA link.
1708 * LOCKING:
1709 * None.
1711 static void sata_print_link_status(struct ata_port *ap)
1713 u32 sstatus, scontrol, tmp;
1715 if (sata_scr_read(ap, SCR_STATUS, &sstatus))
1716 return;
1717 sata_scr_read(ap, SCR_CONTROL, &scontrol);
1719 if (ata_port_online(ap)) {
1720 tmp = (sstatus >> 4) & 0xf;
1721 ata_port_printk(ap, KERN_INFO,
1722 "SATA link up %s (SStatus %X SControl %X)\n",
1723 sata_spd_string(tmp), sstatus, scontrol);
1724 } else {
1725 ata_port_printk(ap, KERN_INFO,
1726 "SATA link down (SStatus %X SControl %X)\n",
1727 sstatus, scontrol);
1732 * __sata_phy_reset - Wake/reset a low-level SATA PHY
1733 * @ap: SATA port associated with target SATA PHY.
1735 * This function issues commands to standard SATA Sxxx
1736 * PHY registers, to wake up the phy (and device), and
1737 * clear any reset condition.
1739 * LOCKING:
1740 * PCI/etc. bus probe sem.
1743 void __sata_phy_reset(struct ata_port *ap)
1745 u32 sstatus;
1746 unsigned long timeout = jiffies + (HZ * 5);
1748 if (ap->flags & ATA_FLAG_SATA_RESET) {
1749 /* issue phy wake/reset */
1750 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
1751 /* Couldn't find anything in SATA I/II specs, but
1752 * AHCI-1.1 10.4.2 says at least 1 ms. */
1753 mdelay(1);
1755 /* phy wake/clear reset */
1756 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
1758 /* wait for phy to become ready, if necessary */
1759 do {
1760 msleep(200);
1761 sata_scr_read(ap, SCR_STATUS, &sstatus);
1762 if ((sstatus & 0xf) != 1)
1763 break;
1764 } while (time_before(jiffies, timeout));
1766 /* print link status */
1767 sata_print_link_status(ap);
1769 /* TODO: phy layer with polling, timeouts, etc. */
1770 if (!ata_port_offline(ap))
1771 ata_port_probe(ap);
1772 else
1773 ata_port_disable(ap);
1775 if (ap->flags & ATA_FLAG_DISABLED)
1776 return;
1778 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
1779 ata_port_disable(ap);
1780 return;
1783 ap->cbl = ATA_CBL_SATA;
1787 * sata_phy_reset - Reset SATA bus.
1788 * @ap: SATA port associated with target SATA PHY.
1790 * This function resets the SATA bus, and then probes
1791 * the bus for devices.
1793 * LOCKING:
1794 * PCI/etc. bus probe sem.
1797 void sata_phy_reset(struct ata_port *ap)
1799 __sata_phy_reset(ap);
1800 if (ap->flags & ATA_FLAG_DISABLED)
1801 return;
1802 ata_bus_reset(ap);
1806 * ata_dev_pair - return other device on cable
1807 * @adev: device
1809 * Obtain the other device on the same cable, or if none is
1810 * present NULL is returned
1813 struct ata_device *ata_dev_pair(struct ata_device *adev)
1815 struct ata_port *ap = adev->ap;
1816 struct ata_device *pair = &ap->device[1 - adev->devno];
1817 if (!ata_dev_enabled(pair))
1818 return NULL;
1819 return pair;
1823 * ata_port_disable - Disable port.
1824 * @ap: Port to be disabled.
1826 * Modify @ap data structure such that the system
1827 * thinks that the entire port is disabled, and should
1828 * never attempt to probe or communicate with devices
1829 * on this port.
1831 * LOCKING: host lock, or some other form of
1832 * serialization.
1835 void ata_port_disable(struct ata_port *ap)
1837 ap->device[0].class = ATA_DEV_NONE;
1838 ap->device[1].class = ATA_DEV_NONE;
1839 ap->flags |= ATA_FLAG_DISABLED;
1843 * sata_down_spd_limit - adjust SATA spd limit downward
1844 * @ap: Port to adjust SATA spd limit for
1846 * Adjust SATA spd limit of @ap downward. Note that this
1847 * function only adjusts the limit. The change must be applied
1848 * using sata_set_spd().
1850 * LOCKING:
1851 * Inherited from caller.
1853 * RETURNS:
1854 * 0 on success, negative errno on failure
1856 int sata_down_spd_limit(struct ata_port *ap)
1858 u32 sstatus, spd, mask;
1859 int rc, highbit;
1861 rc = sata_scr_read(ap, SCR_STATUS, &sstatus);
1862 if (rc)
1863 return rc;
1865 mask = ap->sata_spd_limit;
1866 if (mask <= 1)
1867 return -EINVAL;
1868 highbit = fls(mask) - 1;
1869 mask &= ~(1 << highbit);
1871 spd = (sstatus >> 4) & 0xf;
1872 if (spd <= 1)
1873 return -EINVAL;
1874 spd--;
1875 mask &= (1 << spd) - 1;
1876 if (!mask)
1877 return -EINVAL;
1879 ap->sata_spd_limit = mask;
1881 ata_port_printk(ap, KERN_WARNING, "limiting SATA link speed to %s\n",
1882 sata_spd_string(fls(mask)));
1884 return 0;
1887 static int __sata_set_spd_needed(struct ata_port *ap, u32 *scontrol)
1889 u32 spd, limit;
1891 if (ap->sata_spd_limit == UINT_MAX)
1892 limit = 0;
1893 else
1894 limit = fls(ap->sata_spd_limit);
1896 spd = (*scontrol >> 4) & 0xf;
1897 *scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4);
1899 return spd != limit;
1903 * sata_set_spd_needed - is SATA spd configuration needed
1904 * @ap: Port in question
1906 * Test whether the spd limit in SControl matches
1907 * @ap->sata_spd_limit. This function is used to determine
1908 * whether hardreset is necessary to apply SATA spd
1909 * configuration.
1911 * LOCKING:
1912 * Inherited from caller.
1914 * RETURNS:
1915 * 1 if SATA spd configuration is needed, 0 otherwise.
1917 int sata_set_spd_needed(struct ata_port *ap)
1919 u32 scontrol;
1921 if (sata_scr_read(ap, SCR_CONTROL, &scontrol))
1922 return 0;
1924 return __sata_set_spd_needed(ap, &scontrol);
1928 * sata_set_spd - set SATA spd according to spd limit
1929 * @ap: Port to set SATA spd for
1931 * Set SATA spd of @ap according to sata_spd_limit.
1933 * LOCKING:
1934 * Inherited from caller.
1936 * RETURNS:
1937 * 0 if spd doesn't need to be changed, 1 if spd has been
1938 * changed. Negative errno if SCR registers are inaccessible.
1940 int sata_set_spd(struct ata_port *ap)
1942 u32 scontrol;
1943 int rc;
1945 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
1946 return rc;
1948 if (!__sata_set_spd_needed(ap, &scontrol))
1949 return 0;
1951 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
1952 return rc;
1954 return 1;
1958 * This mode timing computation functionality is ported over from
1959 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
1962 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
1963 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
1964 * for UDMA6, which is currently supported only by Maxtor drives.
1966 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
1969 static const struct ata_timing ata_timing[] = {
1971 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
1972 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
1973 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
1974 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
1976 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
1977 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
1978 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
1979 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
1980 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
1982 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
1984 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
1985 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
1986 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
1988 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
1989 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
1990 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
1992 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
1993 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
1994 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
1995 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
1997 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
1998 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
1999 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
2001 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
2003 { 0xFF }
2006 #define ENOUGH(v,unit) (((v)-1)/(unit)+1)
2007 #define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
2009 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2011 q->setup = EZ(t->setup * 1000, T);
2012 q->act8b = EZ(t->act8b * 1000, T);
2013 q->rec8b = EZ(t->rec8b * 1000, T);
2014 q->cyc8b = EZ(t->cyc8b * 1000, T);
2015 q->active = EZ(t->active * 1000, T);
2016 q->recover = EZ(t->recover * 1000, T);
2017 q->cycle = EZ(t->cycle * 1000, T);
2018 q->udma = EZ(t->udma * 1000, UT);
2021 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2022 struct ata_timing *m, unsigned int what)
2024 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
2025 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
2026 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
2027 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
2028 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
2029 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2030 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
2031 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
2034 static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
2036 const struct ata_timing *t;
2038 for (t = ata_timing; t->mode != speed; t++)
2039 if (t->mode == 0xFF)
2040 return NULL;
2041 return t;
2044 int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2045 struct ata_timing *t, int T, int UT)
2047 const struct ata_timing *s;
2048 struct ata_timing p;
2051 * Find the mode.
2054 if (!(s = ata_timing_find_mode(speed)))
2055 return -EINVAL;
2057 memcpy(t, s, sizeof(*s));
2060 * If the drive is an EIDE drive, it can tell us it needs extended
2061 * PIO/MW_DMA cycle timing.
2064 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
2065 memset(&p, 0, sizeof(p));
2066 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
2067 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2068 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2069 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
2070 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2072 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2076 * Convert the timing to bus clock counts.
2079 ata_timing_quantize(t, t, T, UT);
2082 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2083 * S.M.A.R.T * and some other commands. We have to ensure that the
2084 * DMA cycle timing is slower/equal than the fastest PIO timing.
2087 if (speed > XFER_PIO_4) {
2088 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2089 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2093 * Lengthen active & recovery time so that cycle time is correct.
2096 if (t->act8b + t->rec8b < t->cyc8b) {
2097 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2098 t->rec8b = t->cyc8b - t->act8b;
2101 if (t->active + t->recover < t->cycle) {
2102 t->active += (t->cycle - (t->active + t->recover)) / 2;
2103 t->recover = t->cycle - t->active;
2106 return 0;
2110 * ata_down_xfermask_limit - adjust dev xfer masks downward
2111 * @dev: Device to adjust xfer masks
2112 * @force_pio0: Force PIO0
2114 * Adjust xfer masks of @dev downward. Note that this function
2115 * does not apply the change. Invoking ata_set_mode() afterwards
2116 * will apply the limit.
2118 * LOCKING:
2119 * Inherited from caller.
2121 * RETURNS:
2122 * 0 on success, negative errno on failure
2124 int ata_down_xfermask_limit(struct ata_device *dev, int force_pio0)
2126 unsigned long xfer_mask;
2127 int highbit;
2129 xfer_mask = ata_pack_xfermask(dev->pio_mask, dev->mwdma_mask,
2130 dev->udma_mask);
2132 if (!xfer_mask)
2133 goto fail;
2134 /* don't gear down to MWDMA from UDMA, go directly to PIO */
2135 if (xfer_mask & ATA_MASK_UDMA)
2136 xfer_mask &= ~ATA_MASK_MWDMA;
2138 highbit = fls(xfer_mask) - 1;
2139 xfer_mask &= ~(1 << highbit);
2140 if (force_pio0)
2141 xfer_mask &= 1 << ATA_SHIFT_PIO;
2142 if (!xfer_mask)
2143 goto fail;
2145 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2146 &dev->udma_mask);
2148 ata_dev_printk(dev, KERN_WARNING, "limiting speed to %s\n",
2149 ata_mode_string(xfer_mask));
2151 return 0;
2153 fail:
2154 return -EINVAL;
2157 static int ata_dev_set_mode(struct ata_device *dev)
2159 unsigned int err_mask;
2160 int rc;
2162 dev->flags &= ~ATA_DFLAG_PIO;
2163 if (dev->xfer_shift == ATA_SHIFT_PIO)
2164 dev->flags |= ATA_DFLAG_PIO;
2166 err_mask = ata_dev_set_xfermode(dev);
2167 if (err_mask) {
2168 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
2169 "(err_mask=0x%x)\n", err_mask);
2170 return -EIO;
2173 rc = ata_dev_revalidate(dev, 0);
2174 if (rc)
2175 return rc;
2177 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
2178 dev->xfer_shift, (int)dev->xfer_mode);
2180 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
2181 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
2182 return 0;
2186 * ata_set_mode - Program timings and issue SET FEATURES - XFER
2187 * @ap: port on which timings will be programmed
2188 * @r_failed_dev: out paramter for failed device
2190 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2191 * ata_set_mode() fails, pointer to the failing device is
2192 * returned in @r_failed_dev.
2194 * LOCKING:
2195 * PCI/etc. bus probe sem.
2197 * RETURNS:
2198 * 0 on success, negative errno otherwise
2200 int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
2202 struct ata_device *dev;
2203 int i, rc = 0, used_dma = 0, found = 0;
2205 /* has private set_mode? */
2206 if (ap->ops->set_mode) {
2207 /* FIXME: make ->set_mode handle no device case and
2208 * return error code and failing device on failure.
2210 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2211 if (ata_dev_ready(&ap->device[i])) {
2212 ap->ops->set_mode(ap);
2213 break;
2216 return 0;
2219 /* step 1: calculate xfer_mask */
2220 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2221 unsigned int pio_mask, dma_mask;
2223 dev = &ap->device[i];
2225 if (!ata_dev_enabled(dev))
2226 continue;
2228 ata_dev_xfermask(dev);
2230 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
2231 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
2232 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
2233 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
2235 found = 1;
2236 if (dev->dma_mode)
2237 used_dma = 1;
2239 if (!found)
2240 goto out;
2242 /* step 2: always set host PIO timings */
2243 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2244 dev = &ap->device[i];
2245 if (!ata_dev_enabled(dev))
2246 continue;
2248 if (!dev->pio_mode) {
2249 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
2250 rc = -EINVAL;
2251 goto out;
2254 dev->xfer_mode = dev->pio_mode;
2255 dev->xfer_shift = ATA_SHIFT_PIO;
2256 if (ap->ops->set_piomode)
2257 ap->ops->set_piomode(ap, dev);
2260 /* step 3: set host DMA timings */
2261 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2262 dev = &ap->device[i];
2264 if (!ata_dev_enabled(dev) || !dev->dma_mode)
2265 continue;
2267 dev->xfer_mode = dev->dma_mode;
2268 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
2269 if (ap->ops->set_dmamode)
2270 ap->ops->set_dmamode(ap, dev);
2273 /* step 4: update devices' xfer mode */
2274 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2275 dev = &ap->device[i];
2277 /* don't udpate suspended devices' xfer mode */
2278 if (!ata_dev_ready(dev))
2279 continue;
2281 rc = ata_dev_set_mode(dev);
2282 if (rc)
2283 goto out;
2286 /* Record simplex status. If we selected DMA then the other
2287 * host channels are not permitted to do so.
2289 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
2290 ap->host->simplex_claimed = 1;
2292 /* step5: chip specific finalisation */
2293 if (ap->ops->post_set_mode)
2294 ap->ops->post_set_mode(ap);
2296 out:
2297 if (rc)
2298 *r_failed_dev = dev;
2299 return rc;
2303 * ata_tf_to_host - issue ATA taskfile to host controller
2304 * @ap: port to which command is being issued
2305 * @tf: ATA taskfile register set
2307 * Issues ATA taskfile register set to ATA host controller,
2308 * with proper synchronization with interrupt handler and
2309 * other threads.
2311 * LOCKING:
2312 * spin_lock_irqsave(host lock)
2315 static inline void ata_tf_to_host(struct ata_port *ap,
2316 const struct ata_taskfile *tf)
2318 ap->ops->tf_load(ap, tf);
2319 ap->ops->exec_command(ap, tf);
2323 * ata_busy_sleep - sleep until BSY clears, or timeout
2324 * @ap: port containing status register to be polled
2325 * @tmout_pat: impatience timeout
2326 * @tmout: overall timeout
2328 * Sleep until ATA Status register bit BSY clears,
2329 * or a timeout occurs.
2331 * LOCKING:
2332 * Kernel thread context (may sleep).
2334 * RETURNS:
2335 * 0 on success, -errno otherwise.
2337 int ata_busy_sleep(struct ata_port *ap,
2338 unsigned long tmout_pat, unsigned long tmout)
2340 unsigned long timer_start, timeout;
2341 u8 status;
2343 status = ata_busy_wait(ap, ATA_BUSY, 300);
2344 timer_start = jiffies;
2345 timeout = timer_start + tmout_pat;
2346 while (status != 0xff && (status & ATA_BUSY) &&
2347 time_before(jiffies, timeout)) {
2348 msleep(50);
2349 status = ata_busy_wait(ap, ATA_BUSY, 3);
2352 if (status != 0xff && (status & ATA_BUSY))
2353 ata_port_printk(ap, KERN_WARNING,
2354 "port is slow to respond, please be patient "
2355 "(Status 0x%x)\n", status);
2357 timeout = timer_start + tmout;
2358 while (status != 0xff && (status & ATA_BUSY) &&
2359 time_before(jiffies, timeout)) {
2360 msleep(50);
2361 status = ata_chk_status(ap);
2364 if (status == 0xff)
2365 return -ENODEV;
2367 if (status & ATA_BUSY) {
2368 ata_port_printk(ap, KERN_ERR, "port failed to respond "
2369 "(%lu secs, Status 0x%x)\n",
2370 tmout / HZ, status);
2371 return -EBUSY;
2374 return 0;
2377 static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
2379 struct ata_ioports *ioaddr = &ap->ioaddr;
2380 unsigned int dev0 = devmask & (1 << 0);
2381 unsigned int dev1 = devmask & (1 << 1);
2382 unsigned long timeout;
2384 /* if device 0 was found in ata_devchk, wait for its
2385 * BSY bit to clear
2387 if (dev0)
2388 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2390 /* if device 1 was found in ata_devchk, wait for
2391 * register access, then wait for BSY to clear
2393 timeout = jiffies + ATA_TMOUT_BOOT;
2394 while (dev1) {
2395 u8 nsect, lbal;
2397 ap->ops->dev_select(ap, 1);
2398 if (ap->flags & ATA_FLAG_MMIO) {
2399 nsect = readb((void __iomem *) ioaddr->nsect_addr);
2400 lbal = readb((void __iomem *) ioaddr->lbal_addr);
2401 } else {
2402 nsect = inb(ioaddr->nsect_addr);
2403 lbal = inb(ioaddr->lbal_addr);
2405 if ((nsect == 1) && (lbal == 1))
2406 break;
2407 if (time_after(jiffies, timeout)) {
2408 dev1 = 0;
2409 break;
2411 msleep(50); /* give drive a breather */
2413 if (dev1)
2414 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2416 /* is all this really necessary? */
2417 ap->ops->dev_select(ap, 0);
2418 if (dev1)
2419 ap->ops->dev_select(ap, 1);
2420 if (dev0)
2421 ap->ops->dev_select(ap, 0);
2424 static unsigned int ata_bus_softreset(struct ata_port *ap,
2425 unsigned int devmask)
2427 struct ata_ioports *ioaddr = &ap->ioaddr;
2429 DPRINTK("ata%u: bus reset via SRST\n", ap->id);
2431 /* software reset. causes dev0 to be selected */
2432 if (ap->flags & ATA_FLAG_MMIO) {
2433 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2434 udelay(20); /* FIXME: flush */
2435 writeb(ap->ctl | ATA_SRST, (void __iomem *) ioaddr->ctl_addr);
2436 udelay(20); /* FIXME: flush */
2437 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2438 } else {
2439 outb(ap->ctl, ioaddr->ctl_addr);
2440 udelay(10);
2441 outb(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
2442 udelay(10);
2443 outb(ap->ctl, ioaddr->ctl_addr);
2446 /* spec mandates ">= 2ms" before checking status.
2447 * We wait 150ms, because that was the magic delay used for
2448 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
2449 * between when the ATA command register is written, and then
2450 * status is checked. Because waiting for "a while" before
2451 * checking status is fine, post SRST, we perform this magic
2452 * delay here as well.
2454 * Old drivers/ide uses the 2mS rule and then waits for ready
2456 msleep(150);
2458 /* Before we perform post reset processing we want to see if
2459 * the bus shows 0xFF because the odd clown forgets the D7
2460 * pulldown resistor.
2462 if (ata_check_status(ap) == 0xFF)
2463 return 0;
2465 ata_bus_post_reset(ap, devmask);
2467 return 0;
2471 * ata_bus_reset - reset host port and associated ATA channel
2472 * @ap: port to reset
2474 * This is typically the first time we actually start issuing
2475 * commands to the ATA channel. We wait for BSY to clear, then
2476 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
2477 * result. Determine what devices, if any, are on the channel
2478 * by looking at the device 0/1 error register. Look at the signature
2479 * stored in each device's taskfile registers, to determine if
2480 * the device is ATA or ATAPI.
2482 * LOCKING:
2483 * PCI/etc. bus probe sem.
2484 * Obtains host lock.
2486 * SIDE EFFECTS:
2487 * Sets ATA_FLAG_DISABLED if bus reset fails.
2490 void ata_bus_reset(struct ata_port *ap)
2492 struct ata_ioports *ioaddr = &ap->ioaddr;
2493 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2494 u8 err;
2495 unsigned int dev0, dev1 = 0, devmask = 0;
2497 DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no);
2499 /* determine if device 0/1 are present */
2500 if (ap->flags & ATA_FLAG_SATA_RESET)
2501 dev0 = 1;
2502 else {
2503 dev0 = ata_devchk(ap, 0);
2504 if (slave_possible)
2505 dev1 = ata_devchk(ap, 1);
2508 if (dev0)
2509 devmask |= (1 << 0);
2510 if (dev1)
2511 devmask |= (1 << 1);
2513 /* select device 0 again */
2514 ap->ops->dev_select(ap, 0);
2516 /* issue bus reset */
2517 if (ap->flags & ATA_FLAG_SRST)
2518 if (ata_bus_softreset(ap, devmask))
2519 goto err_out;
2522 * determine by signature whether we have ATA or ATAPI devices
2524 ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
2525 if ((slave_possible) && (err != 0x81))
2526 ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
2528 /* re-enable interrupts */
2529 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
2530 ata_irq_on(ap);
2532 /* is double-select really necessary? */
2533 if (ap->device[1].class != ATA_DEV_NONE)
2534 ap->ops->dev_select(ap, 1);
2535 if (ap->device[0].class != ATA_DEV_NONE)
2536 ap->ops->dev_select(ap, 0);
2538 /* if no devices were detected, disable this port */
2539 if ((ap->device[0].class == ATA_DEV_NONE) &&
2540 (ap->device[1].class == ATA_DEV_NONE))
2541 goto err_out;
2543 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
2544 /* set up device control for ATA_FLAG_SATA_RESET */
2545 if (ap->flags & ATA_FLAG_MMIO)
2546 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2547 else
2548 outb(ap->ctl, ioaddr->ctl_addr);
2551 DPRINTK("EXIT\n");
2552 return;
2554 err_out:
2555 ata_port_printk(ap, KERN_ERR, "disabling port\n");
2556 ap->ops->port_disable(ap);
2558 DPRINTK("EXIT\n");
2562 * sata_phy_debounce - debounce SATA phy status
2563 * @ap: ATA port to debounce SATA phy status for
2564 * @params: timing parameters { interval, duratinon, timeout } in msec
2566 * Make sure SStatus of @ap reaches stable state, determined by
2567 * holding the same value where DET is not 1 for @duration polled
2568 * every @interval, before @timeout. Timeout constraints the
2569 * beginning of the stable state. Because, after hot unplugging,
2570 * DET gets stuck at 1 on some controllers, this functions waits
2571 * until timeout then returns 0 if DET is stable at 1.
2573 * LOCKING:
2574 * Kernel thread context (may sleep)
2576 * RETURNS:
2577 * 0 on success, -errno on failure.
2579 int sata_phy_debounce(struct ata_port *ap, const unsigned long *params)
2581 unsigned long interval_msec = params[0];
2582 unsigned long duration = params[1] * HZ / 1000;
2583 unsigned long timeout = jiffies + params[2] * HZ / 1000;
2584 unsigned long last_jiffies;
2585 u32 last, cur;
2586 int rc;
2588 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
2589 return rc;
2590 cur &= 0xf;
2592 last = cur;
2593 last_jiffies = jiffies;
2595 while (1) {
2596 msleep(interval_msec);
2597 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
2598 return rc;
2599 cur &= 0xf;
2601 /* DET stable? */
2602 if (cur == last) {
2603 if (cur == 1 && time_before(jiffies, timeout))
2604 continue;
2605 if (time_after(jiffies, last_jiffies + duration))
2606 return 0;
2607 continue;
2610 /* unstable, start over */
2611 last = cur;
2612 last_jiffies = jiffies;
2614 /* check timeout */
2615 if (time_after(jiffies, timeout))
2616 return -EBUSY;
2621 * sata_phy_resume - resume SATA phy
2622 * @ap: ATA port to resume SATA phy for
2623 * @params: timing parameters { interval, duratinon, timeout } in msec
2625 * Resume SATA phy of @ap and debounce it.
2627 * LOCKING:
2628 * Kernel thread context (may sleep)
2630 * RETURNS:
2631 * 0 on success, -errno on failure.
2633 int sata_phy_resume(struct ata_port *ap, const unsigned long *params)
2635 u32 scontrol;
2636 int rc;
2638 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2639 return rc;
2641 scontrol = (scontrol & 0x0f0) | 0x300;
2643 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2644 return rc;
2646 /* Some PHYs react badly if SStatus is pounded immediately
2647 * after resuming. Delay 200ms before debouncing.
2649 msleep(200);
2651 return sata_phy_debounce(ap, params);
2654 static void ata_wait_spinup(struct ata_port *ap)
2656 struct ata_eh_context *ehc = &ap->eh_context;
2657 unsigned long end, secs;
2658 int rc;
2660 /* first, debounce phy if SATA */
2661 if (ap->cbl == ATA_CBL_SATA) {
2662 rc = sata_phy_debounce(ap, sata_deb_timing_hotplug);
2664 /* if debounced successfully and offline, no need to wait */
2665 if ((rc == 0 || rc == -EOPNOTSUPP) && ata_port_offline(ap))
2666 return;
2669 /* okay, let's give the drive time to spin up */
2670 end = ehc->i.hotplug_timestamp + ATA_SPINUP_WAIT * HZ / 1000;
2671 secs = ((end - jiffies) + HZ - 1) / HZ;
2673 if (time_after(jiffies, end))
2674 return;
2676 if (secs > 5)
2677 ata_port_printk(ap, KERN_INFO, "waiting for device to spin up "
2678 "(%lu secs)\n", secs);
2680 schedule_timeout_uninterruptible(end - jiffies);
2684 * ata_std_prereset - prepare for reset
2685 * @ap: ATA port to be reset
2687 * @ap is about to be reset. Initialize it.
2689 * LOCKING:
2690 * Kernel thread context (may sleep)
2692 * RETURNS:
2693 * 0 on success, -errno otherwise.
2695 int ata_std_prereset(struct ata_port *ap)
2697 struct ata_eh_context *ehc = &ap->eh_context;
2698 const unsigned long *timing = sata_ehc_deb_timing(ehc);
2699 int rc;
2701 /* handle link resume & hotplug spinup */
2702 if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
2703 (ap->flags & ATA_FLAG_HRST_TO_RESUME))
2704 ehc->i.action |= ATA_EH_HARDRESET;
2706 if ((ehc->i.flags & ATA_EHI_HOTPLUGGED) &&
2707 (ap->flags & ATA_FLAG_SKIP_D2H_BSY))
2708 ata_wait_spinup(ap);
2710 /* if we're about to do hardreset, nothing more to do */
2711 if (ehc->i.action & ATA_EH_HARDRESET)
2712 return 0;
2714 /* if SATA, resume phy */
2715 if (ap->cbl == ATA_CBL_SATA) {
2716 rc = sata_phy_resume(ap, timing);
2717 if (rc && rc != -EOPNOTSUPP) {
2718 /* phy resume failed */
2719 ata_port_printk(ap, KERN_WARNING, "failed to resume "
2720 "link for reset (errno=%d)\n", rc);
2721 return rc;
2725 /* Wait for !BSY if the controller can wait for the first D2H
2726 * Reg FIS and we don't know that no device is attached.
2728 if (!(ap->flags & ATA_FLAG_SKIP_D2H_BSY) && !ata_port_offline(ap))
2729 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2731 return 0;
2735 * ata_std_softreset - reset host port via ATA SRST
2736 * @ap: port to reset
2737 * @classes: resulting classes of attached devices
2739 * Reset host port using ATA SRST.
2741 * LOCKING:
2742 * Kernel thread context (may sleep)
2744 * RETURNS:
2745 * 0 on success, -errno otherwise.
2747 int ata_std_softreset(struct ata_port *ap, unsigned int *classes)
2749 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2750 unsigned int devmask = 0, err_mask;
2751 u8 err;
2753 DPRINTK("ENTER\n");
2755 if (ata_port_offline(ap)) {
2756 classes[0] = ATA_DEV_NONE;
2757 goto out;
2760 /* determine if device 0/1 are present */
2761 if (ata_devchk(ap, 0))
2762 devmask |= (1 << 0);
2763 if (slave_possible && ata_devchk(ap, 1))
2764 devmask |= (1 << 1);
2766 /* select device 0 again */
2767 ap->ops->dev_select(ap, 0);
2769 /* issue bus reset */
2770 DPRINTK("about to softreset, devmask=%x\n", devmask);
2771 err_mask = ata_bus_softreset(ap, devmask);
2772 if (err_mask) {
2773 ata_port_printk(ap, KERN_ERR, "SRST failed (err_mask=0x%x)\n",
2774 err_mask);
2775 return -EIO;
2778 /* determine by signature whether we have ATA or ATAPI devices */
2779 classes[0] = ata_dev_try_classify(ap, 0, &err);
2780 if (slave_possible && err != 0x81)
2781 classes[1] = ata_dev_try_classify(ap, 1, &err);
2783 out:
2784 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
2785 return 0;
2789 * sata_std_hardreset - reset host port via SATA phy reset
2790 * @ap: port to reset
2791 * @class: resulting class of attached device
2793 * SATA phy-reset host port using DET bits of SControl register.
2795 * LOCKING:
2796 * Kernel thread context (may sleep)
2798 * RETURNS:
2799 * 0 on success, -errno otherwise.
2801 int sata_std_hardreset(struct ata_port *ap, unsigned int *class)
2803 struct ata_eh_context *ehc = &ap->eh_context;
2804 const unsigned long *timing = sata_ehc_deb_timing(ehc);
2805 u32 scontrol;
2806 int rc;
2808 DPRINTK("ENTER\n");
2810 if (sata_set_spd_needed(ap)) {
2811 /* SATA spec says nothing about how to reconfigure
2812 * spd. To be on the safe side, turn off phy during
2813 * reconfiguration. This works for at least ICH7 AHCI
2814 * and Sil3124.
2816 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2817 return rc;
2819 scontrol = (scontrol & 0x0f0) | 0x304;
2821 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2822 return rc;
2824 sata_set_spd(ap);
2827 /* issue phy wake/reset */
2828 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2829 return rc;
2831 scontrol = (scontrol & 0x0f0) | 0x301;
2833 if ((rc = sata_scr_write_flush(ap, SCR_CONTROL, scontrol)))
2834 return rc;
2836 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
2837 * 10.4.2 says at least 1 ms.
2839 msleep(1);
2841 /* bring phy back */
2842 sata_phy_resume(ap, timing);
2844 /* TODO: phy layer with polling, timeouts, etc. */
2845 if (ata_port_offline(ap)) {
2846 *class = ATA_DEV_NONE;
2847 DPRINTK("EXIT, link offline\n");
2848 return 0;
2851 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2852 ata_port_printk(ap, KERN_ERR,
2853 "COMRESET failed (device not ready)\n");
2854 return -EIO;
2857 ap->ops->dev_select(ap, 0); /* probably unnecessary */
2859 *class = ata_dev_try_classify(ap, 0, NULL);
2861 DPRINTK("EXIT, class=%u\n", *class);
2862 return 0;
2866 * ata_std_postreset - standard postreset callback
2867 * @ap: the target ata_port
2868 * @classes: classes of attached devices
2870 * This function is invoked after a successful reset. Note that
2871 * the device might have been reset more than once using
2872 * different reset methods before postreset is invoked.
2874 * LOCKING:
2875 * Kernel thread context (may sleep)
2877 void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
2879 u32 serror;
2881 DPRINTK("ENTER\n");
2883 /* print link status */
2884 sata_print_link_status(ap);
2886 /* clear SError */
2887 if (sata_scr_read(ap, SCR_ERROR, &serror) == 0)
2888 sata_scr_write(ap, SCR_ERROR, serror);
2890 /* re-enable interrupts */
2891 if (!ap->ops->error_handler) {
2892 /* FIXME: hack. create a hook instead */
2893 if (ap->ioaddr.ctl_addr)
2894 ata_irq_on(ap);
2897 /* is double-select really necessary? */
2898 if (classes[0] != ATA_DEV_NONE)
2899 ap->ops->dev_select(ap, 1);
2900 if (classes[1] != ATA_DEV_NONE)
2901 ap->ops->dev_select(ap, 0);
2903 /* bail out if no device is present */
2904 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2905 DPRINTK("EXIT, no device\n");
2906 return;
2909 /* set up device control */
2910 if (ap->ioaddr.ctl_addr) {
2911 if (ap->flags & ATA_FLAG_MMIO)
2912 writeb(ap->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
2913 else
2914 outb(ap->ctl, ap->ioaddr.ctl_addr);
2917 DPRINTK("EXIT\n");
2921 * ata_dev_same_device - Determine whether new ID matches configured device
2922 * @dev: device to compare against
2923 * @new_class: class of the new device
2924 * @new_id: IDENTIFY page of the new device
2926 * Compare @new_class and @new_id against @dev and determine
2927 * whether @dev is the device indicated by @new_class and
2928 * @new_id.
2930 * LOCKING:
2931 * None.
2933 * RETURNS:
2934 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
2936 static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
2937 const u16 *new_id)
2939 const u16 *old_id = dev->id;
2940 unsigned char model[2][41], serial[2][21];
2941 u64 new_n_sectors;
2943 if (dev->class != new_class) {
2944 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
2945 dev->class, new_class);
2946 return 0;
2949 ata_id_c_string(old_id, model[0], ATA_ID_PROD_OFS, sizeof(model[0]));
2950 ata_id_c_string(new_id, model[1], ATA_ID_PROD_OFS, sizeof(model[1]));
2951 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO_OFS, sizeof(serial[0]));
2952 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO_OFS, sizeof(serial[1]));
2953 new_n_sectors = ata_id_n_sectors(new_id);
2955 if (strcmp(model[0], model[1])) {
2956 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
2957 "'%s' != '%s'\n", model[0], model[1]);
2958 return 0;
2961 if (strcmp(serial[0], serial[1])) {
2962 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
2963 "'%s' != '%s'\n", serial[0], serial[1]);
2964 return 0;
2967 if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) {
2968 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
2969 "%llu != %llu\n",
2970 (unsigned long long)dev->n_sectors,
2971 (unsigned long long)new_n_sectors);
2972 return 0;
2975 return 1;
2979 * ata_dev_revalidate - Revalidate ATA device
2980 * @dev: device to revalidate
2981 * @post_reset: is this revalidation after reset?
2983 * Re-read IDENTIFY page and make sure @dev is still attached to
2984 * the port.
2986 * LOCKING:
2987 * Kernel thread context (may sleep)
2989 * RETURNS:
2990 * 0 on success, negative errno otherwise
2992 int ata_dev_revalidate(struct ata_device *dev, int post_reset)
2994 unsigned int class = dev->class;
2995 u16 *id = (void *)dev->ap->sector_buf;
2996 int rc;
2998 if (!ata_dev_enabled(dev)) {
2999 rc = -ENODEV;
3000 goto fail;
3003 /* read ID data */
3004 rc = ata_dev_read_id(dev, &class, post_reset, id);
3005 if (rc)
3006 goto fail;
3008 /* is the device still there? */
3009 if (!ata_dev_same_device(dev, class, id)) {
3010 rc = -ENODEV;
3011 goto fail;
3014 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
3016 /* configure device according to the new ID */
3017 rc = ata_dev_configure(dev, 0);
3018 if (rc == 0)
3019 return 0;
3021 fail:
3022 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
3023 return rc;
3026 struct ata_blacklist_entry {
3027 const char *model_num;
3028 const char *model_rev;
3029 unsigned long horkage;
3032 static const struct ata_blacklist_entry ata_device_blacklist [] = {
3033 /* Devices with DMA related problems under Linux */
3034 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
3035 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
3036 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
3037 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
3038 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
3039 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
3040 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
3041 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
3042 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
3043 { "CRD-8480B", NULL, ATA_HORKAGE_NODMA },
3044 { "CRD-8482B", NULL, ATA_HORKAGE_NODMA },
3045 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
3046 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
3047 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
3048 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
3049 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
3050 { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA },
3051 { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA },
3052 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
3053 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
3054 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
3055 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
3056 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
3057 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
3058 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
3059 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
3060 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
3061 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
3062 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
3063 { "SAMSUNG CD-ROM SN-124","N001", ATA_HORKAGE_NODMA },
3065 /* Devices we expect to fail diagnostics */
3067 /* Devices where NCQ should be avoided */
3068 /* NCQ is slow */
3069 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
3071 /* Devices with NCQ limits */
3073 /* End Marker */
3077 static int ata_strim(char *s, size_t len)
3079 len = strnlen(s, len);
3081 /* ATAPI specifies that empty space is blank-filled; remove blanks */
3082 while ((len > 0) && (s[len - 1] == ' ')) {
3083 len--;
3084 s[len] = 0;
3086 return len;
3089 unsigned long ata_device_blacklisted(const struct ata_device *dev)
3091 unsigned char model_num[40];
3092 unsigned char model_rev[16];
3093 unsigned int nlen, rlen;
3094 const struct ata_blacklist_entry *ad = ata_device_blacklist;
3096 ata_id_string(dev->id, model_num, ATA_ID_PROD_OFS,
3097 sizeof(model_num));
3098 ata_id_string(dev->id, model_rev, ATA_ID_FW_REV_OFS,
3099 sizeof(model_rev));
3100 nlen = ata_strim(model_num, sizeof(model_num));
3101 rlen = ata_strim(model_rev, sizeof(model_rev));
3103 while (ad->model_num) {
3104 if (!strncmp(ad->model_num, model_num, nlen)) {
3105 if (ad->model_rev == NULL)
3106 return ad->horkage;
3107 if (!strncmp(ad->model_rev, model_rev, rlen))
3108 return ad->horkage;
3110 ad++;
3112 return 0;
3115 static int ata_dma_blacklisted(const struct ata_device *dev)
3117 /* We don't support polling DMA.
3118 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
3119 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
3121 if ((dev->ap->flags & ATA_FLAG_PIO_POLLING) &&
3122 (dev->flags & ATA_DFLAG_CDB_INTR))
3123 return 1;
3124 return (ata_device_blacklisted(dev) & ATA_HORKAGE_NODMA) ? 1 : 0;
3128 * ata_dev_xfermask - Compute supported xfermask of the given device
3129 * @dev: Device to compute xfermask for
3131 * Compute supported xfermask of @dev and store it in
3132 * dev->*_mask. This function is responsible for applying all
3133 * known limits including host controller limits, device
3134 * blacklist, etc...
3136 * LOCKING:
3137 * None.
3139 static void ata_dev_xfermask(struct ata_device *dev)
3141 struct ata_port *ap = dev->ap;
3142 struct ata_host *host = ap->host;
3143 unsigned long xfer_mask;
3145 /* controller modes available */
3146 xfer_mask = ata_pack_xfermask(ap->pio_mask,
3147 ap->mwdma_mask, ap->udma_mask);
3149 /* Apply cable rule here. Don't apply it early because when
3150 * we handle hot plug the cable type can itself change.
3152 if (ap->cbl == ATA_CBL_PATA40)
3153 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
3154 /* Apply drive side cable rule. Unknown or 80 pin cables reported
3155 * host side are checked drive side as well. Cases where we know a
3156 * 40wire cable is used safely for 80 are not checked here.
3158 if (ata_drive_40wire(dev->id) && (ap->cbl == ATA_CBL_PATA_UNK || ap->cbl == ATA_CBL_PATA80))
3159 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
3162 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
3163 dev->mwdma_mask, dev->udma_mask);
3164 xfer_mask &= ata_id_xfermask(dev->id);
3167 * CFA Advanced TrueIDE timings are not allowed on a shared
3168 * cable
3170 if (ata_dev_pair(dev)) {
3171 /* No PIO5 or PIO6 */
3172 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
3173 /* No MWDMA3 or MWDMA 4 */
3174 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
3177 if (ata_dma_blacklisted(dev)) {
3178 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3179 ata_dev_printk(dev, KERN_WARNING,
3180 "device is on DMA blacklist, disabling DMA\n");
3183 if ((host->flags & ATA_HOST_SIMPLEX) && host->simplex_claimed) {
3184 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3185 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
3186 "other device, disabling DMA\n");
3189 if (ap->ops->mode_filter)
3190 xfer_mask = ap->ops->mode_filter(ap, dev, xfer_mask);
3192 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
3193 &dev->mwdma_mask, &dev->udma_mask);
3197 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
3198 * @dev: Device to which command will be sent
3200 * Issue SET FEATURES - XFER MODE command to device @dev
3201 * on port @ap.
3203 * LOCKING:
3204 * PCI/etc. bus probe sem.
3206 * RETURNS:
3207 * 0 on success, AC_ERR_* mask otherwise.
3210 static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
3212 struct ata_taskfile tf;
3213 unsigned int err_mask;
3215 /* set up set-features taskfile */
3216 DPRINTK("set features - xfer mode\n");
3218 ata_tf_init(dev, &tf);
3219 tf.command = ATA_CMD_SET_FEATURES;
3220 tf.feature = SETFEATURES_XFER;
3221 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3222 tf.protocol = ATA_PROT_NODATA;
3223 tf.nsect = dev->xfer_mode;
3225 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
3227 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3228 return err_mask;
3232 * ata_dev_init_params - Issue INIT DEV PARAMS command
3233 * @dev: Device to which command will be sent
3234 * @heads: Number of heads (taskfile parameter)
3235 * @sectors: Number of sectors (taskfile parameter)
3237 * LOCKING:
3238 * Kernel thread context (may sleep)
3240 * RETURNS:
3241 * 0 on success, AC_ERR_* mask otherwise.
3243 static unsigned int ata_dev_init_params(struct ata_device *dev,
3244 u16 heads, u16 sectors)
3246 struct ata_taskfile tf;
3247 unsigned int err_mask;
3249 /* Number of sectors per track 1-255. Number of heads 1-16 */
3250 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
3251 return AC_ERR_INVALID;
3253 /* set up init dev params taskfile */
3254 DPRINTK("init dev params \n");
3256 ata_tf_init(dev, &tf);
3257 tf.command = ATA_CMD_INIT_DEV_PARAMS;
3258 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3259 tf.protocol = ATA_PROT_NODATA;
3260 tf.nsect = sectors;
3261 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
3263 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
3265 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3266 return err_mask;
3270 * ata_sg_clean - Unmap DMA memory associated with command
3271 * @qc: Command containing DMA memory to be released
3273 * Unmap all mapped DMA memory associated with this command.
3275 * LOCKING:
3276 * spin_lock_irqsave(host lock)
3279 static void ata_sg_clean(struct ata_queued_cmd *qc)
3281 struct ata_port *ap = qc->ap;
3282 struct scatterlist *sg = qc->__sg;
3283 int dir = qc->dma_dir;
3284 void *pad_buf = NULL;
3286 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
3287 WARN_ON(sg == NULL);
3289 if (qc->flags & ATA_QCFLAG_SINGLE)
3290 WARN_ON(qc->n_elem > 1);
3292 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
3294 /* if we padded the buffer out to 32-bit bound, and data
3295 * xfer direction is from-device, we must copy from the
3296 * pad buffer back into the supplied buffer
3298 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
3299 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3301 if (qc->flags & ATA_QCFLAG_SG) {
3302 if (qc->n_elem)
3303 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
3304 /* restore last sg */
3305 sg[qc->orig_n_elem - 1].length += qc->pad_len;
3306 if (pad_buf) {
3307 struct scatterlist *psg = &qc->pad_sgent;
3308 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3309 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
3310 kunmap_atomic(addr, KM_IRQ0);
3312 } else {
3313 if (qc->n_elem)
3314 dma_unmap_single(ap->dev,
3315 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
3316 dir);
3317 /* restore sg */
3318 sg->length += qc->pad_len;
3319 if (pad_buf)
3320 memcpy(qc->buf_virt + sg->length - qc->pad_len,
3321 pad_buf, qc->pad_len);
3324 qc->flags &= ~ATA_QCFLAG_DMAMAP;
3325 qc->__sg = NULL;
3329 * ata_fill_sg - Fill PCI IDE PRD table
3330 * @qc: Metadata associated with taskfile to be transferred
3332 * Fill PCI IDE PRD (scatter-gather) table with segments
3333 * associated with the current disk command.
3335 * LOCKING:
3336 * spin_lock_irqsave(host lock)
3339 static void ata_fill_sg(struct ata_queued_cmd *qc)
3341 struct ata_port *ap = qc->ap;
3342 struct scatterlist *sg;
3343 unsigned int idx;
3345 WARN_ON(qc->__sg == NULL);
3346 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
3348 idx = 0;
3349 ata_for_each_sg(sg, qc) {
3350 u32 addr, offset;
3351 u32 sg_len, len;
3353 /* determine if physical DMA addr spans 64K boundary.
3354 * Note h/w doesn't support 64-bit, so we unconditionally
3355 * truncate dma_addr_t to u32.
3357 addr = (u32) sg_dma_address(sg);
3358 sg_len = sg_dma_len(sg);
3360 while (sg_len) {
3361 offset = addr & 0xffff;
3362 len = sg_len;
3363 if ((offset + sg_len) > 0x10000)
3364 len = 0x10000 - offset;
3366 ap->prd[idx].addr = cpu_to_le32(addr);
3367 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
3368 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
3370 idx++;
3371 sg_len -= len;
3372 addr += len;
3376 if (idx)
3377 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
3380 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
3381 * @qc: Metadata associated with taskfile to check
3383 * Allow low-level driver to filter ATA PACKET commands, returning
3384 * a status indicating whether or not it is OK to use DMA for the
3385 * supplied PACKET command.
3387 * LOCKING:
3388 * spin_lock_irqsave(host lock)
3390 * RETURNS: 0 when ATAPI DMA can be used
3391 * nonzero otherwise
3393 int ata_check_atapi_dma(struct ata_queued_cmd *qc)
3395 struct ata_port *ap = qc->ap;
3396 int rc = 0; /* Assume ATAPI DMA is OK by default */
3398 if (ap->ops->check_atapi_dma)
3399 rc = ap->ops->check_atapi_dma(qc);
3401 return rc;
3404 * ata_qc_prep - Prepare taskfile for submission
3405 * @qc: Metadata associated with taskfile to be prepared
3407 * Prepare ATA taskfile for submission.
3409 * LOCKING:
3410 * spin_lock_irqsave(host lock)
3412 void ata_qc_prep(struct ata_queued_cmd *qc)
3414 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
3415 return;
3417 ata_fill_sg(qc);
3420 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
3423 * ata_sg_init_one - Associate command with memory buffer
3424 * @qc: Command to be associated
3425 * @buf: Memory buffer
3426 * @buflen: Length of memory buffer, in bytes.
3428 * Initialize the data-related elements of queued_cmd @qc
3429 * to point to a single memory buffer, @buf of byte length @buflen.
3431 * LOCKING:
3432 * spin_lock_irqsave(host lock)
3435 void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
3437 struct scatterlist *sg;
3439 qc->flags |= ATA_QCFLAG_SINGLE;
3441 memset(&qc->sgent, 0, sizeof(qc->sgent));
3442 qc->__sg = &qc->sgent;
3443 qc->n_elem = 1;
3444 qc->orig_n_elem = 1;
3445 qc->buf_virt = buf;
3446 qc->nbytes = buflen;
3448 sg = qc->__sg;
3449 sg_init_one(sg, buf, buflen);
3453 * ata_sg_init - Associate command with scatter-gather table.
3454 * @qc: Command to be associated
3455 * @sg: Scatter-gather table.
3456 * @n_elem: Number of elements in s/g table.
3458 * Initialize the data-related elements of queued_cmd @qc
3459 * to point to a scatter-gather table @sg, containing @n_elem
3460 * elements.
3462 * LOCKING:
3463 * spin_lock_irqsave(host lock)
3466 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
3467 unsigned int n_elem)
3469 qc->flags |= ATA_QCFLAG_SG;
3470 qc->__sg = sg;
3471 qc->n_elem = n_elem;
3472 qc->orig_n_elem = n_elem;
3476 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
3477 * @qc: Command with memory buffer to be mapped.
3479 * DMA-map the memory buffer associated with queued_cmd @qc.
3481 * LOCKING:
3482 * spin_lock_irqsave(host lock)
3484 * RETURNS:
3485 * Zero on success, negative on error.
3488 static int ata_sg_setup_one(struct ata_queued_cmd *qc)
3490 struct ata_port *ap = qc->ap;
3491 int dir = qc->dma_dir;
3492 struct scatterlist *sg = qc->__sg;
3493 dma_addr_t dma_address;
3494 int trim_sg = 0;
3496 /* we must lengthen transfers to end on a 32-bit boundary */
3497 qc->pad_len = sg->length & 3;
3498 if (qc->pad_len) {
3499 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3500 struct scatterlist *psg = &qc->pad_sgent;
3502 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
3504 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3506 if (qc->tf.flags & ATA_TFLAG_WRITE)
3507 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
3508 qc->pad_len);
3510 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3511 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3512 /* trim sg */
3513 sg->length -= qc->pad_len;
3514 if (sg->length == 0)
3515 trim_sg = 1;
3517 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
3518 sg->length, qc->pad_len);
3521 if (trim_sg) {
3522 qc->n_elem--;
3523 goto skip_map;
3526 dma_address = dma_map_single(ap->dev, qc->buf_virt,
3527 sg->length, dir);
3528 if (dma_mapping_error(dma_address)) {
3529 /* restore sg */
3530 sg->length += qc->pad_len;
3531 return -1;
3534 sg_dma_address(sg) = dma_address;
3535 sg_dma_len(sg) = sg->length;
3537 skip_map:
3538 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
3539 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3541 return 0;
3545 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
3546 * @qc: Command with scatter-gather table to be mapped.
3548 * DMA-map the scatter-gather table associated with queued_cmd @qc.
3550 * LOCKING:
3551 * spin_lock_irqsave(host lock)
3553 * RETURNS:
3554 * Zero on success, negative on error.
3558 static int ata_sg_setup(struct ata_queued_cmd *qc)
3560 struct ata_port *ap = qc->ap;
3561 struct scatterlist *sg = qc->__sg;
3562 struct scatterlist *lsg = &sg[qc->n_elem - 1];
3563 int n_elem, pre_n_elem, dir, trim_sg = 0;
3565 VPRINTK("ENTER, ata%u\n", ap->id);
3566 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
3568 /* we must lengthen transfers to end on a 32-bit boundary */
3569 qc->pad_len = lsg->length & 3;
3570 if (qc->pad_len) {
3571 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3572 struct scatterlist *psg = &qc->pad_sgent;
3573 unsigned int offset;
3575 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
3577 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3580 * psg->page/offset are used to copy to-be-written
3581 * data in this function or read data in ata_sg_clean.
3583 offset = lsg->offset + lsg->length - qc->pad_len;
3584 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
3585 psg->offset = offset_in_page(offset);
3587 if (qc->tf.flags & ATA_TFLAG_WRITE) {
3588 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3589 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
3590 kunmap_atomic(addr, KM_IRQ0);
3593 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3594 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3595 /* trim last sg */
3596 lsg->length -= qc->pad_len;
3597 if (lsg->length == 0)
3598 trim_sg = 1;
3600 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
3601 qc->n_elem - 1, lsg->length, qc->pad_len);
3604 pre_n_elem = qc->n_elem;
3605 if (trim_sg && pre_n_elem)
3606 pre_n_elem--;
3608 if (!pre_n_elem) {
3609 n_elem = 0;
3610 goto skip_map;
3613 dir = qc->dma_dir;
3614 n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
3615 if (n_elem < 1) {
3616 /* restore last sg */
3617 lsg->length += qc->pad_len;
3618 return -1;
3621 DPRINTK("%d sg elements mapped\n", n_elem);
3623 skip_map:
3624 qc->n_elem = n_elem;
3626 return 0;
3630 * swap_buf_le16 - swap halves of 16-bit words in place
3631 * @buf: Buffer to swap
3632 * @buf_words: Number of 16-bit words in buffer.
3634 * Swap halves of 16-bit words if needed to convert from
3635 * little-endian byte order to native cpu byte order, or
3636 * vice-versa.
3638 * LOCKING:
3639 * Inherited from caller.
3641 void swap_buf_le16(u16 *buf, unsigned int buf_words)
3643 #ifdef __BIG_ENDIAN
3644 unsigned int i;
3646 for (i = 0; i < buf_words; i++)
3647 buf[i] = le16_to_cpu(buf[i]);
3648 #endif /* __BIG_ENDIAN */
3652 * ata_mmio_data_xfer - Transfer data by MMIO
3653 * @adev: device for this I/O
3654 * @buf: data buffer
3655 * @buflen: buffer length
3656 * @write_data: read/write
3658 * Transfer data from/to the device data register by MMIO.
3660 * LOCKING:
3661 * Inherited from caller.
3664 void ata_mmio_data_xfer(struct ata_device *adev, unsigned char *buf,
3665 unsigned int buflen, int write_data)
3667 struct ata_port *ap = adev->ap;
3668 unsigned int i;
3669 unsigned int words = buflen >> 1;
3670 u16 *buf16 = (u16 *) buf;
3671 void __iomem *mmio = (void __iomem *)ap->ioaddr.data_addr;
3673 /* Transfer multiple of 2 bytes */
3674 if (write_data) {
3675 for (i = 0; i < words; i++)
3676 writew(le16_to_cpu(buf16[i]), mmio);
3677 } else {
3678 for (i = 0; i < words; i++)
3679 buf16[i] = cpu_to_le16(readw(mmio));
3682 /* Transfer trailing 1 byte, if any. */
3683 if (unlikely(buflen & 0x01)) {
3684 u16 align_buf[1] = { 0 };
3685 unsigned char *trailing_buf = buf + buflen - 1;
3687 if (write_data) {
3688 memcpy(align_buf, trailing_buf, 1);
3689 writew(le16_to_cpu(align_buf[0]), mmio);
3690 } else {
3691 align_buf[0] = cpu_to_le16(readw(mmio));
3692 memcpy(trailing_buf, align_buf, 1);
3698 * ata_pio_data_xfer - Transfer data by PIO
3699 * @adev: device to target
3700 * @buf: data buffer
3701 * @buflen: buffer length
3702 * @write_data: read/write
3704 * Transfer data from/to the device data register by PIO.
3706 * LOCKING:
3707 * Inherited from caller.
3710 void ata_pio_data_xfer(struct ata_device *adev, unsigned char *buf,
3711 unsigned int buflen, int write_data)
3713 struct ata_port *ap = adev->ap;
3714 unsigned int words = buflen >> 1;
3716 /* Transfer multiple of 2 bytes */
3717 if (write_data)
3718 outsw(ap->ioaddr.data_addr, buf, words);
3719 else
3720 insw(ap->ioaddr.data_addr, buf, words);
3722 /* Transfer trailing 1 byte, if any. */
3723 if (unlikely(buflen & 0x01)) {
3724 u16 align_buf[1] = { 0 };
3725 unsigned char *trailing_buf = buf + buflen - 1;
3727 if (write_data) {
3728 memcpy(align_buf, trailing_buf, 1);
3729 outw(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
3730 } else {
3731 align_buf[0] = cpu_to_le16(inw(ap->ioaddr.data_addr));
3732 memcpy(trailing_buf, align_buf, 1);
3738 * ata_pio_data_xfer_noirq - Transfer data by PIO
3739 * @adev: device to target
3740 * @buf: data buffer
3741 * @buflen: buffer length
3742 * @write_data: read/write
3744 * Transfer data from/to the device data register by PIO. Do the
3745 * transfer with interrupts disabled.
3747 * LOCKING:
3748 * Inherited from caller.
3751 void ata_pio_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
3752 unsigned int buflen, int write_data)
3754 unsigned long flags;
3755 local_irq_save(flags);
3756 ata_pio_data_xfer(adev, buf, buflen, write_data);
3757 local_irq_restore(flags);
3762 * ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data.
3763 * @qc: Command on going
3765 * Transfer ATA_SECT_SIZE of data from/to the ATA device.
3767 * LOCKING:
3768 * Inherited from caller.
3771 static void ata_pio_sector(struct ata_queued_cmd *qc)
3773 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3774 struct scatterlist *sg = qc->__sg;
3775 struct ata_port *ap = qc->ap;
3776 struct page *page;
3777 unsigned int offset;
3778 unsigned char *buf;
3780 if (qc->cursect == (qc->nsect - 1))
3781 ap->hsm_task_state = HSM_ST_LAST;
3783 page = sg[qc->cursg].page;
3784 offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE;
3786 /* get the current page and offset */
3787 page = nth_page(page, (offset >> PAGE_SHIFT));
3788 offset %= PAGE_SIZE;
3790 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3792 if (PageHighMem(page)) {
3793 unsigned long flags;
3795 /* FIXME: use a bounce buffer */
3796 local_irq_save(flags);
3797 buf = kmap_atomic(page, KM_IRQ0);
3799 /* do the actual data transfer */
3800 ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write);
3802 kunmap_atomic(buf, KM_IRQ0);
3803 local_irq_restore(flags);
3804 } else {
3805 buf = page_address(page);
3806 ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write);
3809 qc->cursect++;
3810 qc->cursg_ofs++;
3812 if ((qc->cursg_ofs * ATA_SECT_SIZE) == (&sg[qc->cursg])->length) {
3813 qc->cursg++;
3814 qc->cursg_ofs = 0;
3819 * ata_pio_sectors - Transfer one or many 512-byte sectors.
3820 * @qc: Command on going
3822 * Transfer one or many ATA_SECT_SIZE of data from/to the
3823 * ATA device for the DRQ request.
3825 * LOCKING:
3826 * Inherited from caller.
3829 static void ata_pio_sectors(struct ata_queued_cmd *qc)
3831 if (is_multi_taskfile(&qc->tf)) {
3832 /* READ/WRITE MULTIPLE */
3833 unsigned int nsect;
3835 WARN_ON(qc->dev->multi_count == 0);
3837 nsect = min(qc->nsect - qc->cursect, qc->dev->multi_count);
3838 while (nsect--)
3839 ata_pio_sector(qc);
3840 } else
3841 ata_pio_sector(qc);
3845 * atapi_send_cdb - Write CDB bytes to hardware
3846 * @ap: Port to which ATAPI device is attached.
3847 * @qc: Taskfile currently active
3849 * When device has indicated its readiness to accept
3850 * a CDB, this function is called. Send the CDB.
3852 * LOCKING:
3853 * caller.
3856 static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
3858 /* send SCSI cdb */
3859 DPRINTK("send cdb\n");
3860 WARN_ON(qc->dev->cdb_len < 12);
3862 ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
3863 ata_altstatus(ap); /* flush */
3865 switch (qc->tf.protocol) {
3866 case ATA_PROT_ATAPI:
3867 ap->hsm_task_state = HSM_ST;
3868 break;
3869 case ATA_PROT_ATAPI_NODATA:
3870 ap->hsm_task_state = HSM_ST_LAST;
3871 break;
3872 case ATA_PROT_ATAPI_DMA:
3873 ap->hsm_task_state = HSM_ST_LAST;
3874 /* initiate bmdma */
3875 ap->ops->bmdma_start(qc);
3876 break;
3881 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
3882 * @qc: Command on going
3883 * @bytes: number of bytes
3885 * Transfer Transfer data from/to the ATAPI device.
3887 * LOCKING:
3888 * Inherited from caller.
3892 static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
3894 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3895 struct scatterlist *sg = qc->__sg;
3896 struct ata_port *ap = qc->ap;
3897 struct page *page;
3898 unsigned char *buf;
3899 unsigned int offset, count;
3901 if (qc->curbytes + bytes >= qc->nbytes)
3902 ap->hsm_task_state = HSM_ST_LAST;
3904 next_sg:
3905 if (unlikely(qc->cursg >= qc->n_elem)) {
3907 * The end of qc->sg is reached and the device expects
3908 * more data to transfer. In order not to overrun qc->sg
3909 * and fulfill length specified in the byte count register,
3910 * - for read case, discard trailing data from the device
3911 * - for write case, padding zero data to the device
3913 u16 pad_buf[1] = { 0 };
3914 unsigned int words = bytes >> 1;
3915 unsigned int i;
3917 if (words) /* warning if bytes > 1 */
3918 ata_dev_printk(qc->dev, KERN_WARNING,
3919 "%u bytes trailing data\n", bytes);
3921 for (i = 0; i < words; i++)
3922 ap->ops->data_xfer(qc->dev, (unsigned char*)pad_buf, 2, do_write);
3924 ap->hsm_task_state = HSM_ST_LAST;
3925 return;
3928 sg = &qc->__sg[qc->cursg];
3930 page = sg->page;
3931 offset = sg->offset + qc->cursg_ofs;
3933 /* get the current page and offset */
3934 page = nth_page(page, (offset >> PAGE_SHIFT));
3935 offset %= PAGE_SIZE;
3937 /* don't overrun current sg */
3938 count = min(sg->length - qc->cursg_ofs, bytes);
3940 /* don't cross page boundaries */
3941 count = min(count, (unsigned int)PAGE_SIZE - offset);
3943 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3945 if (PageHighMem(page)) {
3946 unsigned long flags;
3948 /* FIXME: use bounce buffer */
3949 local_irq_save(flags);
3950 buf = kmap_atomic(page, KM_IRQ0);
3952 /* do the actual data transfer */
3953 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
3955 kunmap_atomic(buf, KM_IRQ0);
3956 local_irq_restore(flags);
3957 } else {
3958 buf = page_address(page);
3959 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
3962 bytes -= count;
3963 qc->curbytes += count;
3964 qc->cursg_ofs += count;
3966 if (qc->cursg_ofs == sg->length) {
3967 qc->cursg++;
3968 qc->cursg_ofs = 0;
3971 if (bytes)
3972 goto next_sg;
3976 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
3977 * @qc: Command on going
3979 * Transfer Transfer data from/to the ATAPI device.
3981 * LOCKING:
3982 * Inherited from caller.
3985 static void atapi_pio_bytes(struct ata_queued_cmd *qc)
3987 struct ata_port *ap = qc->ap;
3988 struct ata_device *dev = qc->dev;
3989 unsigned int ireason, bc_lo, bc_hi, bytes;
3990 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
3992 /* Abuse qc->result_tf for temp storage of intermediate TF
3993 * here to save some kernel stack usage.
3994 * For normal completion, qc->result_tf is not relevant. For
3995 * error, qc->result_tf is later overwritten by ata_qc_complete().
3996 * So, the correctness of qc->result_tf is not affected.
3998 ap->ops->tf_read(ap, &qc->result_tf);
3999 ireason = qc->result_tf.nsect;
4000 bc_lo = qc->result_tf.lbam;
4001 bc_hi = qc->result_tf.lbah;
4002 bytes = (bc_hi << 8) | bc_lo;
4004 /* shall be cleared to zero, indicating xfer of data */
4005 if (ireason & (1 << 0))
4006 goto err_out;
4008 /* make sure transfer direction matches expected */
4009 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
4010 if (do_write != i_write)
4011 goto err_out;
4013 VPRINTK("ata%u: xfering %d bytes\n", ap->id, bytes);
4015 __atapi_pio_bytes(qc, bytes);
4017 return;
4019 err_out:
4020 ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
4021 qc->err_mask |= AC_ERR_HSM;
4022 ap->hsm_task_state = HSM_ST_ERR;
4026 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
4027 * @ap: the target ata_port
4028 * @qc: qc on going
4030 * RETURNS:
4031 * 1 if ok in workqueue, 0 otherwise.
4034 static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
4036 if (qc->tf.flags & ATA_TFLAG_POLLING)
4037 return 1;
4039 if (ap->hsm_task_state == HSM_ST_FIRST) {
4040 if (qc->tf.protocol == ATA_PROT_PIO &&
4041 (qc->tf.flags & ATA_TFLAG_WRITE))
4042 return 1;
4044 if (is_atapi_taskfile(&qc->tf) &&
4045 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4046 return 1;
4049 return 0;
4053 * ata_hsm_qc_complete - finish a qc running on standard HSM
4054 * @qc: Command to complete
4055 * @in_wq: 1 if called from workqueue, 0 otherwise
4057 * Finish @qc which is running on standard HSM.
4059 * LOCKING:
4060 * If @in_wq is zero, spin_lock_irqsave(host lock).
4061 * Otherwise, none on entry and grabs host lock.
4063 static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
4065 struct ata_port *ap = qc->ap;
4066 unsigned long flags;
4068 if (ap->ops->error_handler) {
4069 if (in_wq) {
4070 spin_lock_irqsave(ap->lock, flags);
4072 /* EH might have kicked in while host lock is
4073 * released.
4075 qc = ata_qc_from_tag(ap, qc->tag);
4076 if (qc) {
4077 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
4078 ata_irq_on(ap);
4079 ata_qc_complete(qc);
4080 } else
4081 ata_port_freeze(ap);
4084 spin_unlock_irqrestore(ap->lock, flags);
4085 } else {
4086 if (likely(!(qc->err_mask & AC_ERR_HSM)))
4087 ata_qc_complete(qc);
4088 else
4089 ata_port_freeze(ap);
4091 } else {
4092 if (in_wq) {
4093 spin_lock_irqsave(ap->lock, flags);
4094 ata_irq_on(ap);
4095 ata_qc_complete(qc);
4096 spin_unlock_irqrestore(ap->lock, flags);
4097 } else
4098 ata_qc_complete(qc);
4101 ata_altstatus(ap); /* flush */
4105 * ata_hsm_move - move the HSM to the next state.
4106 * @ap: the target ata_port
4107 * @qc: qc on going
4108 * @status: current device status
4109 * @in_wq: 1 if called from workqueue, 0 otherwise
4111 * RETURNS:
4112 * 1 when poll next status needed, 0 otherwise.
4114 int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
4115 u8 status, int in_wq)
4117 unsigned long flags = 0;
4118 int poll_next;
4120 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
4122 /* Make sure ata_qc_issue_prot() does not throw things
4123 * like DMA polling into the workqueue. Notice that
4124 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
4126 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
4128 fsm_start:
4129 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
4130 ap->id, qc->tf.protocol, ap->hsm_task_state, status);
4132 switch (ap->hsm_task_state) {
4133 case HSM_ST_FIRST:
4134 /* Send first data block or PACKET CDB */
4136 /* If polling, we will stay in the work queue after
4137 * sending the data. Otherwise, interrupt handler
4138 * takes over after sending the data.
4140 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
4142 /* check device status */
4143 if (unlikely((status & ATA_DRQ) == 0)) {
4144 /* handle BSY=0, DRQ=0 as error */
4145 if (likely(status & (ATA_ERR | ATA_DF)))
4146 /* device stops HSM for abort/error */
4147 qc->err_mask |= AC_ERR_DEV;
4148 else
4149 /* HSM violation. Let EH handle this */
4150 qc->err_mask |= AC_ERR_HSM;
4152 ap->hsm_task_state = HSM_ST_ERR;
4153 goto fsm_start;
4156 /* Device should not ask for data transfer (DRQ=1)
4157 * when it finds something wrong.
4158 * We ignore DRQ here and stop the HSM by
4159 * changing hsm_task_state to HSM_ST_ERR and
4160 * let the EH abort the command or reset the device.
4162 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4163 printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n",
4164 ap->id, status);
4165 qc->err_mask |= AC_ERR_HSM;
4166 ap->hsm_task_state = HSM_ST_ERR;
4167 goto fsm_start;
4170 /* Send the CDB (atapi) or the first data block (ata pio out).
4171 * During the state transition, interrupt handler shouldn't
4172 * be invoked before the data transfer is complete and
4173 * hsm_task_state is changed. Hence, the following locking.
4175 if (in_wq)
4176 spin_lock_irqsave(ap->lock, flags);
4178 if (qc->tf.protocol == ATA_PROT_PIO) {
4179 /* PIO data out protocol.
4180 * send first data block.
4183 /* ata_pio_sectors() might change the state
4184 * to HSM_ST_LAST. so, the state is changed here
4185 * before ata_pio_sectors().
4187 ap->hsm_task_state = HSM_ST;
4188 ata_pio_sectors(qc);
4189 ata_altstatus(ap); /* flush */
4190 } else
4191 /* send CDB */
4192 atapi_send_cdb(ap, qc);
4194 if (in_wq)
4195 spin_unlock_irqrestore(ap->lock, flags);
4197 /* if polling, ata_pio_task() handles the rest.
4198 * otherwise, interrupt handler takes over from here.
4200 break;
4202 case HSM_ST:
4203 /* complete command or read/write the data register */
4204 if (qc->tf.protocol == ATA_PROT_ATAPI) {
4205 /* ATAPI PIO protocol */
4206 if ((status & ATA_DRQ) == 0) {
4207 /* No more data to transfer or device error.
4208 * Device error will be tagged in HSM_ST_LAST.
4210 ap->hsm_task_state = HSM_ST_LAST;
4211 goto fsm_start;
4214 /* Device should not ask for data transfer (DRQ=1)
4215 * when it finds something wrong.
4216 * We ignore DRQ here and stop the HSM by
4217 * changing hsm_task_state to HSM_ST_ERR and
4218 * let the EH abort the command or reset the device.
4220 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4221 printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n",
4222 ap->id, status);
4223 qc->err_mask |= AC_ERR_HSM;
4224 ap->hsm_task_state = HSM_ST_ERR;
4225 goto fsm_start;
4228 atapi_pio_bytes(qc);
4230 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
4231 /* bad ireason reported by device */
4232 goto fsm_start;
4234 } else {
4235 /* ATA PIO protocol */
4236 if (unlikely((status & ATA_DRQ) == 0)) {
4237 /* handle BSY=0, DRQ=0 as error */
4238 if (likely(status & (ATA_ERR | ATA_DF)))
4239 /* device stops HSM for abort/error */
4240 qc->err_mask |= AC_ERR_DEV;
4241 else
4242 /* HSM violation. Let EH handle this */
4243 qc->err_mask |= AC_ERR_HSM;
4245 ap->hsm_task_state = HSM_ST_ERR;
4246 goto fsm_start;
4249 /* For PIO reads, some devices may ask for
4250 * data transfer (DRQ=1) alone with ERR=1.
4251 * We respect DRQ here and transfer one
4252 * block of junk data before changing the
4253 * hsm_task_state to HSM_ST_ERR.
4255 * For PIO writes, ERR=1 DRQ=1 doesn't make
4256 * sense since the data block has been
4257 * transferred to the device.
4259 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4260 /* data might be corrputed */
4261 qc->err_mask |= AC_ERR_DEV;
4263 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
4264 ata_pio_sectors(qc);
4265 ata_altstatus(ap);
4266 status = ata_wait_idle(ap);
4269 if (status & (ATA_BUSY | ATA_DRQ))
4270 qc->err_mask |= AC_ERR_HSM;
4272 /* ata_pio_sectors() might change the
4273 * state to HSM_ST_LAST. so, the state
4274 * is changed after ata_pio_sectors().
4276 ap->hsm_task_state = HSM_ST_ERR;
4277 goto fsm_start;
4280 ata_pio_sectors(qc);
4282 if (ap->hsm_task_state == HSM_ST_LAST &&
4283 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
4284 /* all data read */
4285 ata_altstatus(ap);
4286 status = ata_wait_idle(ap);
4287 goto fsm_start;
4291 ata_altstatus(ap); /* flush */
4292 poll_next = 1;
4293 break;
4295 case HSM_ST_LAST:
4296 if (unlikely(!ata_ok(status))) {
4297 qc->err_mask |= __ac_err_mask(status);
4298 ap->hsm_task_state = HSM_ST_ERR;
4299 goto fsm_start;
4302 /* no more data to transfer */
4303 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
4304 ap->id, qc->dev->devno, status);
4306 WARN_ON(qc->err_mask);
4308 ap->hsm_task_state = HSM_ST_IDLE;
4310 /* complete taskfile transaction */
4311 ata_hsm_qc_complete(qc, in_wq);
4313 poll_next = 0;
4314 break;
4316 case HSM_ST_ERR:
4317 /* make sure qc->err_mask is available to
4318 * know what's wrong and recover
4320 WARN_ON(qc->err_mask == 0);
4322 ap->hsm_task_state = HSM_ST_IDLE;
4324 /* complete taskfile transaction */
4325 ata_hsm_qc_complete(qc, in_wq);
4327 poll_next = 0;
4328 break;
4329 default:
4330 poll_next = 0;
4331 BUG();
4334 return poll_next;
4337 static void ata_pio_task(void *_data)
4339 struct ata_queued_cmd *qc = _data;
4340 struct ata_port *ap = qc->ap;
4341 u8 status;
4342 int poll_next;
4344 fsm_start:
4345 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
4348 * This is purely heuristic. This is a fast path.
4349 * Sometimes when we enter, BSY will be cleared in
4350 * a chk-status or two. If not, the drive is probably seeking
4351 * or something. Snooze for a couple msecs, then
4352 * chk-status again. If still busy, queue delayed work.
4354 status = ata_busy_wait(ap, ATA_BUSY, 5);
4355 if (status & ATA_BUSY) {
4356 msleep(2);
4357 status = ata_busy_wait(ap, ATA_BUSY, 10);
4358 if (status & ATA_BUSY) {
4359 ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
4360 return;
4364 /* move the HSM */
4365 poll_next = ata_hsm_move(ap, qc, status, 1);
4367 /* another command or interrupt handler
4368 * may be running at this point.
4370 if (poll_next)
4371 goto fsm_start;
4375 * ata_qc_new - Request an available ATA command, for queueing
4376 * @ap: Port associated with device @dev
4377 * @dev: Device from whom we request an available command structure
4379 * LOCKING:
4380 * None.
4383 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4385 struct ata_queued_cmd *qc = NULL;
4386 unsigned int i;
4388 /* no command while frozen */
4389 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
4390 return NULL;
4392 /* the last tag is reserved for internal command. */
4393 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
4394 if (!test_and_set_bit(i, &ap->qc_allocated)) {
4395 qc = __ata_qc_from_tag(ap, i);
4396 break;
4399 if (qc)
4400 qc->tag = i;
4402 return qc;
4406 * ata_qc_new_init - Request an available ATA command, and initialize it
4407 * @dev: Device from whom we request an available command structure
4409 * LOCKING:
4410 * None.
4413 struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
4415 struct ata_port *ap = dev->ap;
4416 struct ata_queued_cmd *qc;
4418 qc = ata_qc_new(ap);
4419 if (qc) {
4420 qc->scsicmd = NULL;
4421 qc->ap = ap;
4422 qc->dev = dev;
4424 ata_qc_reinit(qc);
4427 return qc;
4431 * ata_qc_free - free unused ata_queued_cmd
4432 * @qc: Command to complete
4434 * Designed to free unused ata_queued_cmd object
4435 * in case something prevents using it.
4437 * LOCKING:
4438 * spin_lock_irqsave(host lock)
4440 void ata_qc_free(struct ata_queued_cmd *qc)
4442 struct ata_port *ap = qc->ap;
4443 unsigned int tag;
4445 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4447 qc->flags = 0;
4448 tag = qc->tag;
4449 if (likely(ata_tag_valid(tag))) {
4450 qc->tag = ATA_TAG_POISON;
4451 clear_bit(tag, &ap->qc_allocated);
4455 void __ata_qc_complete(struct ata_queued_cmd *qc)
4457 struct ata_port *ap = qc->ap;
4459 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4460 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
4462 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4463 ata_sg_clean(qc);
4465 /* command should be marked inactive atomically with qc completion */
4466 if (qc->tf.protocol == ATA_PROT_NCQ)
4467 ap->sactive &= ~(1 << qc->tag);
4468 else
4469 ap->active_tag = ATA_TAG_POISON;
4471 /* atapi: mark qc as inactive to prevent the interrupt handler
4472 * from completing the command twice later, before the error handler
4473 * is called. (when rc != 0 and atapi request sense is needed)
4475 qc->flags &= ~ATA_QCFLAG_ACTIVE;
4476 ap->qc_active &= ~(1 << qc->tag);
4478 /* call completion callback */
4479 qc->complete_fn(qc);
4483 * ata_qc_complete - Complete an active ATA command
4484 * @qc: Command to complete
4485 * @err_mask: ATA Status register contents
4487 * Indicate to the mid and upper layers that an ATA
4488 * command has completed, with either an ok or not-ok status.
4490 * LOCKING:
4491 * spin_lock_irqsave(host lock)
4493 void ata_qc_complete(struct ata_queued_cmd *qc)
4495 struct ata_port *ap = qc->ap;
4497 /* XXX: New EH and old EH use different mechanisms to
4498 * synchronize EH with regular execution path.
4500 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4501 * Normal execution path is responsible for not accessing a
4502 * failed qc. libata core enforces the rule by returning NULL
4503 * from ata_qc_from_tag() for failed qcs.
4505 * Old EH depends on ata_qc_complete() nullifying completion
4506 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
4507 * not synchronize with interrupt handler. Only PIO task is
4508 * taken care of.
4510 if (ap->ops->error_handler) {
4511 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
4513 if (unlikely(qc->err_mask))
4514 qc->flags |= ATA_QCFLAG_FAILED;
4516 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4517 if (!ata_tag_internal(qc->tag)) {
4518 /* always fill result TF for failed qc */
4519 ap->ops->tf_read(ap, &qc->result_tf);
4520 ata_qc_schedule_eh(qc);
4521 return;
4525 /* read result TF if requested */
4526 if (qc->flags & ATA_QCFLAG_RESULT_TF)
4527 ap->ops->tf_read(ap, &qc->result_tf);
4529 __ata_qc_complete(qc);
4530 } else {
4531 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4532 return;
4534 /* read result TF if failed or requested */
4535 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
4536 ap->ops->tf_read(ap, &qc->result_tf);
4538 __ata_qc_complete(qc);
4543 * ata_qc_complete_multiple - Complete multiple qcs successfully
4544 * @ap: port in question
4545 * @qc_active: new qc_active mask
4546 * @finish_qc: LLDD callback invoked before completing a qc
4548 * Complete in-flight commands. This functions is meant to be
4549 * called from low-level driver's interrupt routine to complete
4550 * requests normally. ap->qc_active and @qc_active is compared
4551 * and commands are completed accordingly.
4553 * LOCKING:
4554 * spin_lock_irqsave(host lock)
4556 * RETURNS:
4557 * Number of completed commands on success, -errno otherwise.
4559 int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
4560 void (*finish_qc)(struct ata_queued_cmd *))
4562 int nr_done = 0;
4563 u32 done_mask;
4564 int i;
4566 done_mask = ap->qc_active ^ qc_active;
4568 if (unlikely(done_mask & qc_active)) {
4569 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
4570 "(%08x->%08x)\n", ap->qc_active, qc_active);
4571 return -EINVAL;
4574 for (i = 0; i < ATA_MAX_QUEUE; i++) {
4575 struct ata_queued_cmd *qc;
4577 if (!(done_mask & (1 << i)))
4578 continue;
4580 if ((qc = ata_qc_from_tag(ap, i))) {
4581 if (finish_qc)
4582 finish_qc(qc);
4583 ata_qc_complete(qc);
4584 nr_done++;
4588 return nr_done;
4591 static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
4593 struct ata_port *ap = qc->ap;
4595 switch (qc->tf.protocol) {
4596 case ATA_PROT_NCQ:
4597 case ATA_PROT_DMA:
4598 case ATA_PROT_ATAPI_DMA:
4599 return 1;
4601 case ATA_PROT_ATAPI:
4602 case ATA_PROT_PIO:
4603 if (ap->flags & ATA_FLAG_PIO_DMA)
4604 return 1;
4606 /* fall through */
4608 default:
4609 return 0;
4612 /* never reached */
4616 * ata_qc_issue - issue taskfile to device
4617 * @qc: command to issue to device
4619 * Prepare an ATA command to submission to device.
4620 * This includes mapping the data into a DMA-able
4621 * area, filling in the S/G table, and finally
4622 * writing the taskfile to hardware, starting the command.
4624 * LOCKING:
4625 * spin_lock_irqsave(host lock)
4627 void ata_qc_issue(struct ata_queued_cmd *qc)
4629 struct ata_port *ap = qc->ap;
4631 /* Make sure only one non-NCQ command is outstanding. The
4632 * check is skipped for old EH because it reuses active qc to
4633 * request ATAPI sense.
4635 WARN_ON(ap->ops->error_handler && ata_tag_valid(ap->active_tag));
4637 if (qc->tf.protocol == ATA_PROT_NCQ) {
4638 WARN_ON(ap->sactive & (1 << qc->tag));
4639 ap->sactive |= 1 << qc->tag;
4640 } else {
4641 WARN_ON(ap->sactive);
4642 ap->active_tag = qc->tag;
4645 qc->flags |= ATA_QCFLAG_ACTIVE;
4646 ap->qc_active |= 1 << qc->tag;
4648 if (ata_should_dma_map(qc)) {
4649 if (qc->flags & ATA_QCFLAG_SG) {
4650 if (ata_sg_setup(qc))
4651 goto sg_err;
4652 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
4653 if (ata_sg_setup_one(qc))
4654 goto sg_err;
4656 } else {
4657 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4660 ap->ops->qc_prep(qc);
4662 qc->err_mask |= ap->ops->qc_issue(qc);
4663 if (unlikely(qc->err_mask))
4664 goto err;
4665 return;
4667 sg_err:
4668 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4669 qc->err_mask |= AC_ERR_SYSTEM;
4670 err:
4671 ata_qc_complete(qc);
4675 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
4676 * @qc: command to issue to device
4678 * Using various libata functions and hooks, this function
4679 * starts an ATA command. ATA commands are grouped into
4680 * classes called "protocols", and issuing each type of protocol
4681 * is slightly different.
4683 * May be used as the qc_issue() entry in ata_port_operations.
4685 * LOCKING:
4686 * spin_lock_irqsave(host lock)
4688 * RETURNS:
4689 * Zero on success, AC_ERR_* mask on failure
4692 unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
4694 struct ata_port *ap = qc->ap;
4696 /* Use polling pio if the LLD doesn't handle
4697 * interrupt driven pio and atapi CDB interrupt.
4699 if (ap->flags & ATA_FLAG_PIO_POLLING) {
4700 switch (qc->tf.protocol) {
4701 case ATA_PROT_PIO:
4702 case ATA_PROT_ATAPI:
4703 case ATA_PROT_ATAPI_NODATA:
4704 qc->tf.flags |= ATA_TFLAG_POLLING;
4705 break;
4706 case ATA_PROT_ATAPI_DMA:
4707 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
4708 /* see ata_dma_blacklisted() */
4709 BUG();
4710 break;
4711 default:
4712 break;
4716 /* select the device */
4717 ata_dev_select(ap, qc->dev->devno, 1, 0);
4719 /* start the command */
4720 switch (qc->tf.protocol) {
4721 case ATA_PROT_NODATA:
4722 if (qc->tf.flags & ATA_TFLAG_POLLING)
4723 ata_qc_set_polling(qc);
4725 ata_tf_to_host(ap, &qc->tf);
4726 ap->hsm_task_state = HSM_ST_LAST;
4728 if (qc->tf.flags & ATA_TFLAG_POLLING)
4729 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4731 break;
4733 case ATA_PROT_DMA:
4734 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
4736 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4737 ap->ops->bmdma_setup(qc); /* set up bmdma */
4738 ap->ops->bmdma_start(qc); /* initiate bmdma */
4739 ap->hsm_task_state = HSM_ST_LAST;
4740 break;
4742 case ATA_PROT_PIO:
4743 if (qc->tf.flags & ATA_TFLAG_POLLING)
4744 ata_qc_set_polling(qc);
4746 ata_tf_to_host(ap, &qc->tf);
4748 if (qc->tf.flags & ATA_TFLAG_WRITE) {
4749 /* PIO data out protocol */
4750 ap->hsm_task_state = HSM_ST_FIRST;
4751 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4753 /* always send first data block using
4754 * the ata_pio_task() codepath.
4756 } else {
4757 /* PIO data in protocol */
4758 ap->hsm_task_state = HSM_ST;
4760 if (qc->tf.flags & ATA_TFLAG_POLLING)
4761 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4763 /* if polling, ata_pio_task() handles the rest.
4764 * otherwise, interrupt handler takes over from here.
4768 break;
4770 case ATA_PROT_ATAPI:
4771 case ATA_PROT_ATAPI_NODATA:
4772 if (qc->tf.flags & ATA_TFLAG_POLLING)
4773 ata_qc_set_polling(qc);
4775 ata_tf_to_host(ap, &qc->tf);
4777 ap->hsm_task_state = HSM_ST_FIRST;
4779 /* send cdb by polling if no cdb interrupt */
4780 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
4781 (qc->tf.flags & ATA_TFLAG_POLLING))
4782 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4783 break;
4785 case ATA_PROT_ATAPI_DMA:
4786 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
4788 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4789 ap->ops->bmdma_setup(qc); /* set up bmdma */
4790 ap->hsm_task_state = HSM_ST_FIRST;
4792 /* send cdb by polling if no cdb interrupt */
4793 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4794 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4795 break;
4797 default:
4798 WARN_ON(1);
4799 return AC_ERR_SYSTEM;
4802 return 0;
4806 * ata_host_intr - Handle host interrupt for given (port, task)
4807 * @ap: Port on which interrupt arrived (possibly...)
4808 * @qc: Taskfile currently active in engine
4810 * Handle host interrupt for given queued command. Currently,
4811 * only DMA interrupts are handled. All other commands are
4812 * handled via polling with interrupts disabled (nIEN bit).
4814 * LOCKING:
4815 * spin_lock_irqsave(host lock)
4817 * RETURNS:
4818 * One if interrupt was handled, zero if not (shared irq).
4821 inline unsigned int ata_host_intr (struct ata_port *ap,
4822 struct ata_queued_cmd *qc)
4824 u8 status, host_stat = 0;
4826 VPRINTK("ata%u: protocol %d task_state %d\n",
4827 ap->id, qc->tf.protocol, ap->hsm_task_state);
4829 /* Check whether we are expecting interrupt in this state */
4830 switch (ap->hsm_task_state) {
4831 case HSM_ST_FIRST:
4832 /* Some pre-ATAPI-4 devices assert INTRQ
4833 * at this state when ready to receive CDB.
4836 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
4837 * The flag was turned on only for atapi devices.
4838 * No need to check is_atapi_taskfile(&qc->tf) again.
4840 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4841 goto idle_irq;
4842 break;
4843 case HSM_ST_LAST:
4844 if (qc->tf.protocol == ATA_PROT_DMA ||
4845 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
4846 /* check status of DMA engine */
4847 host_stat = ap->ops->bmdma_status(ap);
4848 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
4850 /* if it's not our irq... */
4851 if (!(host_stat & ATA_DMA_INTR))
4852 goto idle_irq;
4854 /* before we do anything else, clear DMA-Start bit */
4855 ap->ops->bmdma_stop(qc);
4857 if (unlikely(host_stat & ATA_DMA_ERR)) {
4858 /* error when transfering data to/from memory */
4859 qc->err_mask |= AC_ERR_HOST_BUS;
4860 ap->hsm_task_state = HSM_ST_ERR;
4863 break;
4864 case HSM_ST:
4865 break;
4866 default:
4867 goto idle_irq;
4870 /* check altstatus */
4871 status = ata_altstatus(ap);
4872 if (status & ATA_BUSY)
4873 goto idle_irq;
4875 /* check main status, clearing INTRQ */
4876 status = ata_chk_status(ap);
4877 if (unlikely(status & ATA_BUSY))
4878 goto idle_irq;
4880 /* ack bmdma irq events */
4881 ap->ops->irq_clear(ap);
4883 ata_hsm_move(ap, qc, status, 0);
4884 return 1; /* irq handled */
4886 idle_irq:
4887 ap->stats.idle_irq++;
4889 #ifdef ATA_IRQ_TRAP
4890 if ((ap->stats.idle_irq % 1000) == 0) {
4891 ata_irq_ack(ap, 0); /* debug trap */
4892 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
4893 return 1;
4895 #endif
4896 return 0; /* irq not handled */
4900 * ata_interrupt - Default ATA host interrupt handler
4901 * @irq: irq line (unused)
4902 * @dev_instance: pointer to our ata_host information structure
4904 * Default interrupt handler for PCI IDE devices. Calls
4905 * ata_host_intr() for each port that is not disabled.
4907 * LOCKING:
4908 * Obtains host lock during operation.
4910 * RETURNS:
4911 * IRQ_NONE or IRQ_HANDLED.
4914 irqreturn_t ata_interrupt (int irq, void *dev_instance)
4916 struct ata_host *host = dev_instance;
4917 unsigned int i;
4918 unsigned int handled = 0;
4919 unsigned long flags;
4921 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
4922 spin_lock_irqsave(&host->lock, flags);
4924 for (i = 0; i < host->n_ports; i++) {
4925 struct ata_port *ap;
4927 ap = host->ports[i];
4928 if (ap &&
4929 !(ap->flags & ATA_FLAG_DISABLED)) {
4930 struct ata_queued_cmd *qc;
4932 qc = ata_qc_from_tag(ap, ap->active_tag);
4933 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
4934 (qc->flags & ATA_QCFLAG_ACTIVE))
4935 handled |= ata_host_intr(ap, qc);
4939 spin_unlock_irqrestore(&host->lock, flags);
4941 return IRQ_RETVAL(handled);
4945 * sata_scr_valid - test whether SCRs are accessible
4946 * @ap: ATA port to test SCR accessibility for
4948 * Test whether SCRs are accessible for @ap.
4950 * LOCKING:
4951 * None.
4953 * RETURNS:
4954 * 1 if SCRs are accessible, 0 otherwise.
4956 int sata_scr_valid(struct ata_port *ap)
4958 return ap->cbl == ATA_CBL_SATA && ap->ops->scr_read;
4962 * sata_scr_read - read SCR register of the specified port
4963 * @ap: ATA port to read SCR for
4964 * @reg: SCR to read
4965 * @val: Place to store read value
4967 * Read SCR register @reg of @ap into *@val. This function is
4968 * guaranteed to succeed if the cable type of the port is SATA
4969 * and the port implements ->scr_read.
4971 * LOCKING:
4972 * None.
4974 * RETURNS:
4975 * 0 on success, negative errno on failure.
4977 int sata_scr_read(struct ata_port *ap, int reg, u32 *val)
4979 if (sata_scr_valid(ap)) {
4980 *val = ap->ops->scr_read(ap, reg);
4981 return 0;
4983 return -EOPNOTSUPP;
4987 * sata_scr_write - write SCR register of the specified port
4988 * @ap: ATA port to write SCR for
4989 * @reg: SCR to write
4990 * @val: value to write
4992 * Write @val to SCR register @reg of @ap. This function is
4993 * guaranteed to succeed if the cable type of the port is SATA
4994 * and the port implements ->scr_read.
4996 * LOCKING:
4997 * None.
4999 * RETURNS:
5000 * 0 on success, negative errno on failure.
5002 int sata_scr_write(struct ata_port *ap, int reg, u32 val)
5004 if (sata_scr_valid(ap)) {
5005 ap->ops->scr_write(ap, reg, val);
5006 return 0;
5008 return -EOPNOTSUPP;
5012 * sata_scr_write_flush - write SCR register of the specified port and flush
5013 * @ap: ATA port to write SCR for
5014 * @reg: SCR to write
5015 * @val: value to write
5017 * This function is identical to sata_scr_write() except that this
5018 * function performs flush after writing to the register.
5020 * LOCKING:
5021 * None.
5023 * RETURNS:
5024 * 0 on success, negative errno on failure.
5026 int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val)
5028 if (sata_scr_valid(ap)) {
5029 ap->ops->scr_write(ap, reg, val);
5030 ap->ops->scr_read(ap, reg);
5031 return 0;
5033 return -EOPNOTSUPP;
5037 * ata_port_online - test whether the given port is online
5038 * @ap: ATA port to test
5040 * Test whether @ap is online. Note that this function returns 0
5041 * if online status of @ap cannot be obtained, so
5042 * ata_port_online(ap) != !ata_port_offline(ap).
5044 * LOCKING:
5045 * None.
5047 * RETURNS:
5048 * 1 if the port online status is available and online.
5050 int ata_port_online(struct ata_port *ap)
5052 u32 sstatus;
5054 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) == 0x3)
5055 return 1;
5056 return 0;
5060 * ata_port_offline - test whether the given port is offline
5061 * @ap: ATA port to test
5063 * Test whether @ap is offline. Note that this function returns
5064 * 0 if offline status of @ap cannot be obtained, so
5065 * ata_port_online(ap) != !ata_port_offline(ap).
5067 * LOCKING:
5068 * None.
5070 * RETURNS:
5071 * 1 if the port offline status is available and offline.
5073 int ata_port_offline(struct ata_port *ap)
5075 u32 sstatus;
5077 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) != 0x3)
5078 return 1;
5079 return 0;
5082 int ata_flush_cache(struct ata_device *dev)
5084 unsigned int err_mask;
5085 u8 cmd;
5087 if (!ata_try_flush_cache(dev))
5088 return 0;
5090 if (ata_id_has_flush_ext(dev->id))
5091 cmd = ATA_CMD_FLUSH_EXT;
5092 else
5093 cmd = ATA_CMD_FLUSH;
5095 err_mask = ata_do_simple_cmd(dev, cmd);
5096 if (err_mask) {
5097 ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
5098 return -EIO;
5101 return 0;
5104 static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
5105 unsigned int action, unsigned int ehi_flags,
5106 int wait)
5108 unsigned long flags;
5109 int i, rc;
5111 for (i = 0; i < host->n_ports; i++) {
5112 struct ata_port *ap = host->ports[i];
5114 /* Previous resume operation might still be in
5115 * progress. Wait for PM_PENDING to clear.
5117 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5118 ata_port_wait_eh(ap);
5119 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5122 /* request PM ops to EH */
5123 spin_lock_irqsave(ap->lock, flags);
5125 ap->pm_mesg = mesg;
5126 if (wait) {
5127 rc = 0;
5128 ap->pm_result = &rc;
5131 ap->pflags |= ATA_PFLAG_PM_PENDING;
5132 ap->eh_info.action |= action;
5133 ap->eh_info.flags |= ehi_flags;
5135 ata_port_schedule_eh(ap);
5137 spin_unlock_irqrestore(ap->lock, flags);
5139 /* wait and check result */
5140 if (wait) {
5141 ata_port_wait_eh(ap);
5142 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5143 if (rc)
5144 return rc;
5148 return 0;
5152 * ata_host_suspend - suspend host
5153 * @host: host to suspend
5154 * @mesg: PM message
5156 * Suspend @host. Actual operation is performed by EH. This
5157 * function requests EH to perform PM operations and waits for EH
5158 * to finish.
5160 * LOCKING:
5161 * Kernel thread context (may sleep).
5163 * RETURNS:
5164 * 0 on success, -errno on failure.
5166 int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5168 int i, j, rc;
5170 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
5171 if (rc)
5172 goto fail;
5174 /* EH is quiescent now. Fail if we have any ready device.
5175 * This happens if hotplug occurs between completion of device
5176 * suspension and here.
5178 for (i = 0; i < host->n_ports; i++) {
5179 struct ata_port *ap = host->ports[i];
5181 for (j = 0; j < ATA_MAX_DEVICES; j++) {
5182 struct ata_device *dev = &ap->device[j];
5184 if (ata_dev_ready(dev)) {
5185 ata_port_printk(ap, KERN_WARNING,
5186 "suspend failed, device %d "
5187 "still active\n", dev->devno);
5188 rc = -EBUSY;
5189 goto fail;
5194 host->dev->power.power_state = mesg;
5195 return 0;
5197 fail:
5198 ata_host_resume(host);
5199 return rc;
5203 * ata_host_resume - resume host
5204 * @host: host to resume
5206 * Resume @host. Actual operation is performed by EH. This
5207 * function requests EH to perform PM operations and returns.
5208 * Note that all resume operations are performed parallely.
5210 * LOCKING:
5211 * Kernel thread context (may sleep).
5213 void ata_host_resume(struct ata_host *host)
5215 ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
5216 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
5217 host->dev->power.power_state = PMSG_ON;
5221 * ata_port_start - Set port up for dma.
5222 * @ap: Port to initialize
5224 * Called just after data structures for each port are
5225 * initialized. Allocates space for PRD table.
5227 * May be used as the port_start() entry in ata_port_operations.
5229 * LOCKING:
5230 * Inherited from caller.
5233 int ata_port_start (struct ata_port *ap)
5235 struct device *dev = ap->dev;
5236 int rc;
5238 ap->prd = dma_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, GFP_KERNEL);
5239 if (!ap->prd)
5240 return -ENOMEM;
5242 rc = ata_pad_alloc(ap, dev);
5243 if (rc) {
5244 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
5245 return rc;
5248 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma);
5250 return 0;
5255 * ata_port_stop - Undo ata_port_start()
5256 * @ap: Port to shut down
5258 * Frees the PRD table.
5260 * May be used as the port_stop() entry in ata_port_operations.
5262 * LOCKING:
5263 * Inherited from caller.
5266 void ata_port_stop (struct ata_port *ap)
5268 struct device *dev = ap->dev;
5270 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
5271 ata_pad_free(ap, dev);
5274 void ata_host_stop (struct ata_host *host)
5276 if (host->mmio_base)
5277 iounmap(host->mmio_base);
5281 * ata_dev_init - Initialize an ata_device structure
5282 * @dev: Device structure to initialize
5284 * Initialize @dev in preparation for probing.
5286 * LOCKING:
5287 * Inherited from caller.
5289 void ata_dev_init(struct ata_device *dev)
5291 struct ata_port *ap = dev->ap;
5292 unsigned long flags;
5294 /* SATA spd limit is bound to the first device */
5295 ap->sata_spd_limit = ap->hw_sata_spd_limit;
5297 /* High bits of dev->flags are used to record warm plug
5298 * requests which occur asynchronously. Synchronize using
5299 * host lock.
5301 spin_lock_irqsave(ap->lock, flags);
5302 dev->flags &= ~ATA_DFLAG_INIT_MASK;
5303 spin_unlock_irqrestore(ap->lock, flags);
5305 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
5306 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
5307 dev->pio_mask = UINT_MAX;
5308 dev->mwdma_mask = UINT_MAX;
5309 dev->udma_mask = UINT_MAX;
5313 * ata_port_init - Initialize an ata_port structure
5314 * @ap: Structure to initialize
5315 * @host: Collection of hosts to which @ap belongs
5316 * @ent: Probe information provided by low-level driver
5317 * @port_no: Port number associated with this ata_port
5319 * Initialize a new ata_port structure.
5321 * LOCKING:
5322 * Inherited from caller.
5324 void ata_port_init(struct ata_port *ap, struct ata_host *host,
5325 const struct ata_probe_ent *ent, unsigned int port_no)
5327 unsigned int i;
5329 ap->lock = &host->lock;
5330 ap->flags = ATA_FLAG_DISABLED;
5331 ap->id = ata_unique_id++;
5332 ap->ctl = ATA_DEVCTL_OBS;
5333 ap->host = host;
5334 ap->dev = ent->dev;
5335 ap->port_no = port_no;
5336 if (port_no == 1 && ent->pinfo2) {
5337 ap->pio_mask = ent->pinfo2->pio_mask;
5338 ap->mwdma_mask = ent->pinfo2->mwdma_mask;
5339 ap->udma_mask = ent->pinfo2->udma_mask;
5340 ap->flags |= ent->pinfo2->flags;
5341 ap->ops = ent->pinfo2->port_ops;
5342 } else {
5343 ap->pio_mask = ent->pio_mask;
5344 ap->mwdma_mask = ent->mwdma_mask;
5345 ap->udma_mask = ent->udma_mask;
5346 ap->flags |= ent->port_flags;
5347 ap->ops = ent->port_ops;
5349 ap->hw_sata_spd_limit = UINT_MAX;
5350 ap->active_tag = ATA_TAG_POISON;
5351 ap->last_ctl = 0xFF;
5353 #if defined(ATA_VERBOSE_DEBUG)
5354 /* turn on all debugging levels */
5355 ap->msg_enable = 0x00FF;
5356 #elif defined(ATA_DEBUG)
5357 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
5358 #else
5359 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
5360 #endif
5362 INIT_WORK(&ap->port_task, NULL, NULL);
5363 INIT_WORK(&ap->hotplug_task, ata_scsi_hotplug, ap);
5364 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan, ap);
5365 INIT_LIST_HEAD(&ap->eh_done_q);
5366 init_waitqueue_head(&ap->eh_wait_q);
5368 /* set cable type */
5369 ap->cbl = ATA_CBL_NONE;
5370 if (ap->flags & ATA_FLAG_SATA)
5371 ap->cbl = ATA_CBL_SATA;
5373 for (i = 0; i < ATA_MAX_DEVICES; i++) {
5374 struct ata_device *dev = &ap->device[i];
5375 dev->ap = ap;
5376 dev->devno = i;
5377 ata_dev_init(dev);
5380 #ifdef ATA_IRQ_TRAP
5381 ap->stats.unhandled_irq = 1;
5382 ap->stats.idle_irq = 1;
5383 #endif
5385 memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports));
5389 * ata_port_init_shost - Initialize SCSI host associated with ATA port
5390 * @ap: ATA port to initialize SCSI host for
5391 * @shost: SCSI host associated with @ap
5393 * Initialize SCSI host @shost associated with ATA port @ap.
5395 * LOCKING:
5396 * Inherited from caller.
5398 static void ata_port_init_shost(struct ata_port *ap, struct Scsi_Host *shost)
5400 ap->scsi_host = shost;
5402 shost->unique_id = ap->id;
5403 shost->max_id = 16;
5404 shost->max_lun = 1;
5405 shost->max_channel = 1;
5406 shost->max_cmd_len = 12;
5410 * ata_port_add - Attach low-level ATA driver to system
5411 * @ent: Information provided by low-level driver
5412 * @host: Collections of ports to which we add
5413 * @port_no: Port number associated with this host
5415 * Attach low-level ATA driver to system.
5417 * LOCKING:
5418 * PCI/etc. bus probe sem.
5420 * RETURNS:
5421 * New ata_port on success, for NULL on error.
5423 static struct ata_port * ata_port_add(const struct ata_probe_ent *ent,
5424 struct ata_host *host,
5425 unsigned int port_no)
5427 struct Scsi_Host *shost;
5428 struct ata_port *ap;
5430 DPRINTK("ENTER\n");
5432 if (!ent->port_ops->error_handler &&
5433 !(ent->port_flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST))) {
5434 printk(KERN_ERR "ata%u: no reset mechanism available\n",
5435 port_no);
5436 return NULL;
5439 shost = scsi_host_alloc(ent->sht, sizeof(struct ata_port));
5440 if (!shost)
5441 return NULL;
5443 shost->transportt = &ata_scsi_transport_template;
5445 ap = ata_shost_to_port(shost);
5447 ata_port_init(ap, host, ent, port_no);
5448 ata_port_init_shost(ap, shost);
5450 return ap;
5454 * ata_sas_host_init - Initialize a host struct
5455 * @host: host to initialize
5456 * @dev: device host is attached to
5457 * @flags: host flags
5458 * @ops: port_ops
5460 * LOCKING:
5461 * PCI/etc. bus probe sem.
5465 void ata_host_init(struct ata_host *host, struct device *dev,
5466 unsigned long flags, const struct ata_port_operations *ops)
5468 spin_lock_init(&host->lock);
5469 host->dev = dev;
5470 host->flags = flags;
5471 host->ops = ops;
5475 * ata_device_add - Register hardware device with ATA and SCSI layers
5476 * @ent: Probe information describing hardware device to be registered
5478 * This function processes the information provided in the probe
5479 * information struct @ent, allocates the necessary ATA and SCSI
5480 * host information structures, initializes them, and registers
5481 * everything with requisite kernel subsystems.
5483 * This function requests irqs, probes the ATA bus, and probes
5484 * the SCSI bus.
5486 * LOCKING:
5487 * PCI/etc. bus probe sem.
5489 * RETURNS:
5490 * Number of ports registered. Zero on error (no ports registered).
5492 int ata_device_add(const struct ata_probe_ent *ent)
5494 unsigned int i;
5495 struct device *dev = ent->dev;
5496 struct ata_host *host;
5497 int rc;
5499 DPRINTK("ENTER\n");
5501 if (ent->irq == 0) {
5502 dev_printk(KERN_ERR, dev, "is not available: No interrupt assigned.\n");
5503 return 0;
5505 /* alloc a container for our list of ATA ports (buses) */
5506 host = kzalloc(sizeof(struct ata_host) +
5507 (ent->n_ports * sizeof(void *)), GFP_KERNEL);
5508 if (!host)
5509 return 0;
5511 ata_host_init(host, dev, ent->_host_flags, ent->port_ops);
5512 host->n_ports = ent->n_ports;
5513 host->irq = ent->irq;
5514 host->irq2 = ent->irq2;
5515 host->mmio_base = ent->mmio_base;
5516 host->private_data = ent->private_data;
5518 /* register each port bound to this device */
5519 for (i = 0; i < host->n_ports; i++) {
5520 struct ata_port *ap;
5521 unsigned long xfer_mode_mask;
5522 int irq_line = ent->irq;
5524 ap = ata_port_add(ent, host, i);
5525 host->ports[i] = ap;
5526 if (!ap)
5527 goto err_out;
5529 /* dummy? */
5530 if (ent->dummy_port_mask & (1 << i)) {
5531 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
5532 ap->ops = &ata_dummy_port_ops;
5533 continue;
5536 /* start port */
5537 rc = ap->ops->port_start(ap);
5538 if (rc) {
5539 host->ports[i] = NULL;
5540 scsi_host_put(ap->scsi_host);
5541 goto err_out;
5544 /* Report the secondary IRQ for second channel legacy */
5545 if (i == 1 && ent->irq2)
5546 irq_line = ent->irq2;
5548 xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) |
5549 (ap->mwdma_mask << ATA_SHIFT_MWDMA) |
5550 (ap->pio_mask << ATA_SHIFT_PIO);
5552 /* print per-port info to dmesg */
5553 ata_port_printk(ap, KERN_INFO, "%cATA max %s cmd 0x%lX "
5554 "ctl 0x%lX bmdma 0x%lX irq %d\n",
5555 ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
5556 ata_mode_string(xfer_mode_mask),
5557 ap->ioaddr.cmd_addr,
5558 ap->ioaddr.ctl_addr,
5559 ap->ioaddr.bmdma_addr,
5560 irq_line);
5562 ata_chk_status(ap);
5563 host->ops->irq_clear(ap);
5564 ata_eh_freeze_port(ap); /* freeze port before requesting IRQ */
5567 /* obtain irq, that may be shared between channels */
5568 rc = request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags,
5569 DRV_NAME, host);
5570 if (rc) {
5571 dev_printk(KERN_ERR, dev, "irq %lu request failed: %d\n",
5572 ent->irq, rc);
5573 goto err_out;
5576 /* do we have a second IRQ for the other channel, eg legacy mode */
5577 if (ent->irq2) {
5578 /* We will get weird core code crashes later if this is true
5579 so trap it now */
5580 BUG_ON(ent->irq == ent->irq2);
5582 rc = request_irq(ent->irq2, ent->port_ops->irq_handler, ent->irq_flags,
5583 DRV_NAME, host);
5584 if (rc) {
5585 dev_printk(KERN_ERR, dev, "irq %lu request failed: %d\n",
5586 ent->irq2, rc);
5587 goto err_out_free_irq;
5591 /* perform each probe synchronously */
5592 DPRINTK("probe begin\n");
5593 for (i = 0; i < host->n_ports; i++) {
5594 struct ata_port *ap = host->ports[i];
5595 u32 scontrol;
5596 int rc;
5598 /* init sata_spd_limit to the current value */
5599 if (sata_scr_read(ap, SCR_CONTROL, &scontrol) == 0) {
5600 int spd = (scontrol >> 4) & 0xf;
5601 ap->hw_sata_spd_limit &= (1 << spd) - 1;
5603 ap->sata_spd_limit = ap->hw_sata_spd_limit;
5605 rc = scsi_add_host(ap->scsi_host, dev);
5606 if (rc) {
5607 ata_port_printk(ap, KERN_ERR, "scsi_add_host failed\n");
5608 /* FIXME: do something useful here */
5609 /* FIXME: handle unconditional calls to
5610 * scsi_scan_host and ata_host_remove, below,
5611 * at the very least
5615 if (ap->ops->error_handler) {
5616 struct ata_eh_info *ehi = &ap->eh_info;
5617 unsigned long flags;
5619 ata_port_probe(ap);
5621 /* kick EH for boot probing */
5622 spin_lock_irqsave(ap->lock, flags);
5624 ehi->probe_mask = (1 << ATA_MAX_DEVICES) - 1;
5625 ehi->action |= ATA_EH_SOFTRESET;
5626 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
5628 ap->pflags |= ATA_PFLAG_LOADING;
5629 ata_port_schedule_eh(ap);
5631 spin_unlock_irqrestore(ap->lock, flags);
5633 /* wait for EH to finish */
5634 ata_port_wait_eh(ap);
5635 } else {
5636 DPRINTK("ata%u: bus probe begin\n", ap->id);
5637 rc = ata_bus_probe(ap);
5638 DPRINTK("ata%u: bus probe end\n", ap->id);
5640 if (rc) {
5641 /* FIXME: do something useful here?
5642 * Current libata behavior will
5643 * tear down everything when
5644 * the module is removed
5645 * or the h/w is unplugged.
5651 /* probes are done, now scan each port's disk(s) */
5652 DPRINTK("host probe begin\n");
5653 for (i = 0; i < host->n_ports; i++) {
5654 struct ata_port *ap = host->ports[i];
5656 ata_scsi_scan_host(ap);
5659 dev_set_drvdata(dev, host);
5661 VPRINTK("EXIT, returning %u\n", ent->n_ports);
5662 return ent->n_ports; /* success */
5664 err_out_free_irq:
5665 free_irq(ent->irq, host);
5666 err_out:
5667 for (i = 0; i < host->n_ports; i++) {
5668 struct ata_port *ap = host->ports[i];
5669 if (ap) {
5670 ap->ops->port_stop(ap);
5671 scsi_host_put(ap->scsi_host);
5675 kfree(host);
5676 VPRINTK("EXIT, returning 0\n");
5677 return 0;
5681 * ata_port_detach - Detach ATA port in prepration of device removal
5682 * @ap: ATA port to be detached
5684 * Detach all ATA devices and the associated SCSI devices of @ap;
5685 * then, remove the associated SCSI host. @ap is guaranteed to
5686 * be quiescent on return from this function.
5688 * LOCKING:
5689 * Kernel thread context (may sleep).
5691 void ata_port_detach(struct ata_port *ap)
5693 unsigned long flags;
5694 int i;
5696 if (!ap->ops->error_handler)
5697 goto skip_eh;
5699 /* tell EH we're leaving & flush EH */
5700 spin_lock_irqsave(ap->lock, flags);
5701 ap->pflags |= ATA_PFLAG_UNLOADING;
5702 spin_unlock_irqrestore(ap->lock, flags);
5704 ata_port_wait_eh(ap);
5706 /* EH is now guaranteed to see UNLOADING, so no new device
5707 * will be attached. Disable all existing devices.
5709 spin_lock_irqsave(ap->lock, flags);
5711 for (i = 0; i < ATA_MAX_DEVICES; i++)
5712 ata_dev_disable(&ap->device[i]);
5714 spin_unlock_irqrestore(ap->lock, flags);
5716 /* Final freeze & EH. All in-flight commands are aborted. EH
5717 * will be skipped and retrials will be terminated with bad
5718 * target.
5720 spin_lock_irqsave(ap->lock, flags);
5721 ata_port_freeze(ap); /* won't be thawed */
5722 spin_unlock_irqrestore(ap->lock, flags);
5724 ata_port_wait_eh(ap);
5726 /* Flush hotplug task. The sequence is similar to
5727 * ata_port_flush_task().
5729 flush_workqueue(ata_aux_wq);
5730 cancel_delayed_work(&ap->hotplug_task);
5731 flush_workqueue(ata_aux_wq);
5733 skip_eh:
5734 /* remove the associated SCSI host */
5735 scsi_remove_host(ap->scsi_host);
5739 * ata_host_remove - PCI layer callback for device removal
5740 * @host: ATA host set that was removed
5742 * Unregister all objects associated with this host set. Free those
5743 * objects.
5745 * LOCKING:
5746 * Inherited from calling layer (may sleep).
5749 void ata_host_remove(struct ata_host *host)
5751 unsigned int i;
5753 for (i = 0; i < host->n_ports; i++)
5754 ata_port_detach(host->ports[i]);
5756 free_irq(host->irq, host);
5757 if (host->irq2)
5758 free_irq(host->irq2, host);
5760 for (i = 0; i < host->n_ports; i++) {
5761 struct ata_port *ap = host->ports[i];
5763 ata_scsi_release(ap->scsi_host);
5765 if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) {
5766 struct ata_ioports *ioaddr = &ap->ioaddr;
5768 /* FIXME: Add -ac IDE pci mods to remove these special cases */
5769 if (ioaddr->cmd_addr == ATA_PRIMARY_CMD)
5770 release_region(ATA_PRIMARY_CMD, 8);
5771 else if (ioaddr->cmd_addr == ATA_SECONDARY_CMD)
5772 release_region(ATA_SECONDARY_CMD, 8);
5775 scsi_host_put(ap->scsi_host);
5778 if (host->ops->host_stop)
5779 host->ops->host_stop(host);
5781 kfree(host);
5785 * ata_scsi_release - SCSI layer callback hook for host unload
5786 * @shost: libata host to be unloaded
5788 * Performs all duties necessary to shut down a libata port...
5789 * Kill port kthread, disable port, and release resources.
5791 * LOCKING:
5792 * Inherited from SCSI layer.
5794 * RETURNS:
5795 * One.
5798 int ata_scsi_release(struct Scsi_Host *shost)
5800 struct ata_port *ap = ata_shost_to_port(shost);
5802 DPRINTK("ENTER\n");
5804 ap->ops->port_disable(ap);
5805 ap->ops->port_stop(ap);
5807 DPRINTK("EXIT\n");
5808 return 1;
5811 struct ata_probe_ent *
5812 ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port)
5814 struct ata_probe_ent *probe_ent;
5816 probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
5817 if (!probe_ent) {
5818 printk(KERN_ERR DRV_NAME "(%s): out of memory\n",
5819 kobject_name(&(dev->kobj)));
5820 return NULL;
5823 INIT_LIST_HEAD(&probe_ent->node);
5824 probe_ent->dev = dev;
5826 probe_ent->sht = port->sht;
5827 probe_ent->port_flags = port->flags;
5828 probe_ent->pio_mask = port->pio_mask;
5829 probe_ent->mwdma_mask = port->mwdma_mask;
5830 probe_ent->udma_mask = port->udma_mask;
5831 probe_ent->port_ops = port->port_ops;
5832 probe_ent->private_data = port->private_data;
5834 return probe_ent;
5838 * ata_std_ports - initialize ioaddr with standard port offsets.
5839 * @ioaddr: IO address structure to be initialized
5841 * Utility function which initializes data_addr, error_addr,
5842 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
5843 * device_addr, status_addr, and command_addr to standard offsets
5844 * relative to cmd_addr.
5846 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
5849 void ata_std_ports(struct ata_ioports *ioaddr)
5851 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
5852 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
5853 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
5854 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
5855 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
5856 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
5857 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
5858 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
5859 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
5860 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
5864 #ifdef CONFIG_PCI
5866 void ata_pci_host_stop (struct ata_host *host)
5868 struct pci_dev *pdev = to_pci_dev(host->dev);
5870 pci_iounmap(pdev, host->mmio_base);
5874 * ata_pci_remove_one - PCI layer callback for device removal
5875 * @pdev: PCI device that was removed
5877 * PCI layer indicates to libata via this hook that
5878 * hot-unplug or module unload event has occurred.
5879 * Handle this by unregistering all objects associated
5880 * with this PCI device. Free those objects. Then finally
5881 * release PCI resources and disable device.
5883 * LOCKING:
5884 * Inherited from PCI layer (may sleep).
5887 void ata_pci_remove_one (struct pci_dev *pdev)
5889 struct device *dev = pci_dev_to_dev(pdev);
5890 struct ata_host *host = dev_get_drvdata(dev);
5892 ata_host_remove(host);
5894 pci_release_regions(pdev);
5895 pci_disable_device(pdev);
5896 dev_set_drvdata(dev, NULL);
5899 /* move to PCI subsystem */
5900 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
5902 unsigned long tmp = 0;
5904 switch (bits->width) {
5905 case 1: {
5906 u8 tmp8 = 0;
5907 pci_read_config_byte(pdev, bits->reg, &tmp8);
5908 tmp = tmp8;
5909 break;
5911 case 2: {
5912 u16 tmp16 = 0;
5913 pci_read_config_word(pdev, bits->reg, &tmp16);
5914 tmp = tmp16;
5915 break;
5917 case 4: {
5918 u32 tmp32 = 0;
5919 pci_read_config_dword(pdev, bits->reg, &tmp32);
5920 tmp = tmp32;
5921 break;
5924 default:
5925 return -EINVAL;
5928 tmp &= bits->mask;
5930 return (tmp == bits->val) ? 1 : 0;
5933 void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
5935 pci_save_state(pdev);
5937 if (mesg.event == PM_EVENT_SUSPEND) {
5938 pci_disable_device(pdev);
5939 pci_set_power_state(pdev, PCI_D3hot);
5943 void ata_pci_device_do_resume(struct pci_dev *pdev)
5945 pci_set_power_state(pdev, PCI_D0);
5946 pci_restore_state(pdev);
5947 pci_enable_device(pdev);
5948 pci_set_master(pdev);
5951 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
5953 struct ata_host *host = dev_get_drvdata(&pdev->dev);
5954 int rc = 0;
5956 rc = ata_host_suspend(host, mesg);
5957 if (rc)
5958 return rc;
5960 ata_pci_device_do_suspend(pdev, mesg);
5962 return 0;
5965 int ata_pci_device_resume(struct pci_dev *pdev)
5967 struct ata_host *host = dev_get_drvdata(&pdev->dev);
5969 ata_pci_device_do_resume(pdev);
5970 ata_host_resume(host);
5971 return 0;
5973 #endif /* CONFIG_PCI */
5976 static int __init ata_init(void)
5978 ata_probe_timeout *= HZ;
5979 ata_wq = create_workqueue("ata");
5980 if (!ata_wq)
5981 return -ENOMEM;
5983 ata_aux_wq = create_singlethread_workqueue("ata_aux");
5984 if (!ata_aux_wq) {
5985 destroy_workqueue(ata_wq);
5986 return -ENOMEM;
5989 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
5990 return 0;
5993 static void __exit ata_exit(void)
5995 destroy_workqueue(ata_wq);
5996 destroy_workqueue(ata_aux_wq);
5999 subsys_initcall(ata_init);
6000 module_exit(ata_exit);
6002 static unsigned long ratelimit_time;
6003 static DEFINE_SPINLOCK(ata_ratelimit_lock);
6005 int ata_ratelimit(void)
6007 int rc;
6008 unsigned long flags;
6010 spin_lock_irqsave(&ata_ratelimit_lock, flags);
6012 if (time_after(jiffies, ratelimit_time)) {
6013 rc = 1;
6014 ratelimit_time = jiffies + (HZ/5);
6015 } else
6016 rc = 0;
6018 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
6020 return rc;
6024 * ata_wait_register - wait until register value changes
6025 * @reg: IO-mapped register
6026 * @mask: Mask to apply to read register value
6027 * @val: Wait condition
6028 * @interval_msec: polling interval in milliseconds
6029 * @timeout_msec: timeout in milliseconds
6031 * Waiting for some bits of register to change is a common
6032 * operation for ATA controllers. This function reads 32bit LE
6033 * IO-mapped register @reg and tests for the following condition.
6035 * (*@reg & mask) != val
6037 * If the condition is met, it returns; otherwise, the process is
6038 * repeated after @interval_msec until timeout.
6040 * LOCKING:
6041 * Kernel thread context (may sleep)
6043 * RETURNS:
6044 * The final register value.
6046 u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
6047 unsigned long interval_msec,
6048 unsigned long timeout_msec)
6050 unsigned long timeout;
6051 u32 tmp;
6053 tmp = ioread32(reg);
6055 /* Calculate timeout _after_ the first read to make sure
6056 * preceding writes reach the controller before starting to
6057 * eat away the timeout.
6059 timeout = jiffies + (timeout_msec * HZ) / 1000;
6061 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
6062 msleep(interval_msec);
6063 tmp = ioread32(reg);
6066 return tmp;
6070 * Dummy port_ops
6072 static void ata_dummy_noret(struct ata_port *ap) { }
6073 static int ata_dummy_ret0(struct ata_port *ap) { return 0; }
6074 static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
6076 static u8 ata_dummy_check_status(struct ata_port *ap)
6078 return ATA_DRDY;
6081 static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6083 return AC_ERR_SYSTEM;
6086 const struct ata_port_operations ata_dummy_port_ops = {
6087 .port_disable = ata_port_disable,
6088 .check_status = ata_dummy_check_status,
6089 .check_altstatus = ata_dummy_check_status,
6090 .dev_select = ata_noop_dev_select,
6091 .qc_prep = ata_noop_qc_prep,
6092 .qc_issue = ata_dummy_qc_issue,
6093 .freeze = ata_dummy_noret,
6094 .thaw = ata_dummy_noret,
6095 .error_handler = ata_dummy_noret,
6096 .post_internal_cmd = ata_dummy_qc_noret,
6097 .irq_clear = ata_dummy_noret,
6098 .port_start = ata_dummy_ret0,
6099 .port_stop = ata_dummy_noret,
6103 * libata is essentially a library of internal helper functions for
6104 * low-level ATA host controller drivers. As such, the API/ABI is
6105 * likely to change as new drivers are added and updated.
6106 * Do not depend on ABI/API stability.
6109 EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
6110 EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
6111 EXPORT_SYMBOL_GPL(sata_deb_timing_long);
6112 EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
6113 EXPORT_SYMBOL_GPL(ata_std_bios_param);
6114 EXPORT_SYMBOL_GPL(ata_std_ports);
6115 EXPORT_SYMBOL_GPL(ata_host_init);
6116 EXPORT_SYMBOL_GPL(ata_device_add);
6117 EXPORT_SYMBOL_GPL(ata_port_detach);
6118 EXPORT_SYMBOL_GPL(ata_host_remove);
6119 EXPORT_SYMBOL_GPL(ata_sg_init);
6120 EXPORT_SYMBOL_GPL(ata_sg_init_one);
6121 EXPORT_SYMBOL_GPL(ata_hsm_move);
6122 EXPORT_SYMBOL_GPL(ata_qc_complete);
6123 EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
6124 EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
6125 EXPORT_SYMBOL_GPL(ata_tf_load);
6126 EXPORT_SYMBOL_GPL(ata_tf_read);
6127 EXPORT_SYMBOL_GPL(ata_noop_dev_select);
6128 EXPORT_SYMBOL_GPL(ata_std_dev_select);
6129 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6130 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6131 EXPORT_SYMBOL_GPL(ata_check_status);
6132 EXPORT_SYMBOL_GPL(ata_altstatus);
6133 EXPORT_SYMBOL_GPL(ata_exec_command);
6134 EXPORT_SYMBOL_GPL(ata_port_start);
6135 EXPORT_SYMBOL_GPL(ata_port_stop);
6136 EXPORT_SYMBOL_GPL(ata_host_stop);
6137 EXPORT_SYMBOL_GPL(ata_interrupt);
6138 EXPORT_SYMBOL_GPL(ata_mmio_data_xfer);
6139 EXPORT_SYMBOL_GPL(ata_pio_data_xfer);
6140 EXPORT_SYMBOL_GPL(ata_pio_data_xfer_noirq);
6141 EXPORT_SYMBOL_GPL(ata_qc_prep);
6142 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
6143 EXPORT_SYMBOL_GPL(ata_bmdma_setup);
6144 EXPORT_SYMBOL_GPL(ata_bmdma_start);
6145 EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
6146 EXPORT_SYMBOL_GPL(ata_bmdma_status);
6147 EXPORT_SYMBOL_GPL(ata_bmdma_stop);
6148 EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
6149 EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
6150 EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
6151 EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
6152 EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
6153 EXPORT_SYMBOL_GPL(ata_port_probe);
6154 EXPORT_SYMBOL_GPL(sata_set_spd);
6155 EXPORT_SYMBOL_GPL(sata_phy_debounce);
6156 EXPORT_SYMBOL_GPL(sata_phy_resume);
6157 EXPORT_SYMBOL_GPL(sata_phy_reset);
6158 EXPORT_SYMBOL_GPL(__sata_phy_reset);
6159 EXPORT_SYMBOL_GPL(ata_bus_reset);
6160 EXPORT_SYMBOL_GPL(ata_std_prereset);
6161 EXPORT_SYMBOL_GPL(ata_std_softreset);
6162 EXPORT_SYMBOL_GPL(sata_std_hardreset);
6163 EXPORT_SYMBOL_GPL(ata_std_postreset);
6164 EXPORT_SYMBOL_GPL(ata_dev_classify);
6165 EXPORT_SYMBOL_GPL(ata_dev_pair);
6166 EXPORT_SYMBOL_GPL(ata_port_disable);
6167 EXPORT_SYMBOL_GPL(ata_ratelimit);
6168 EXPORT_SYMBOL_GPL(ata_wait_register);
6169 EXPORT_SYMBOL_GPL(ata_busy_sleep);
6170 EXPORT_SYMBOL_GPL(ata_port_queue_task);
6171 EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
6172 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
6173 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
6174 EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
6175 EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
6176 EXPORT_SYMBOL_GPL(ata_scsi_release);
6177 EXPORT_SYMBOL_GPL(ata_host_intr);
6178 EXPORT_SYMBOL_GPL(sata_scr_valid);
6179 EXPORT_SYMBOL_GPL(sata_scr_read);
6180 EXPORT_SYMBOL_GPL(sata_scr_write);
6181 EXPORT_SYMBOL_GPL(sata_scr_write_flush);
6182 EXPORT_SYMBOL_GPL(ata_port_online);
6183 EXPORT_SYMBOL_GPL(ata_port_offline);
6184 EXPORT_SYMBOL_GPL(ata_host_suspend);
6185 EXPORT_SYMBOL_GPL(ata_host_resume);
6186 EXPORT_SYMBOL_GPL(ata_id_string);
6187 EXPORT_SYMBOL_GPL(ata_id_c_string);
6188 EXPORT_SYMBOL_GPL(ata_device_blacklisted);
6189 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6191 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
6192 EXPORT_SYMBOL_GPL(ata_timing_compute);
6193 EXPORT_SYMBOL_GPL(ata_timing_merge);
6195 #ifdef CONFIG_PCI
6196 EXPORT_SYMBOL_GPL(pci_test_config_bits);
6197 EXPORT_SYMBOL_GPL(ata_pci_host_stop);
6198 EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
6199 EXPORT_SYMBOL_GPL(ata_pci_init_one);
6200 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6201 EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6202 EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
6203 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6204 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6205 EXPORT_SYMBOL_GPL(ata_pci_default_filter);
6206 EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
6207 #endif /* CONFIG_PCI */
6209 EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
6210 EXPORT_SYMBOL_GPL(ata_scsi_device_resume);
6212 EXPORT_SYMBOL_GPL(ata_eng_timeout);
6213 EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
6214 EXPORT_SYMBOL_GPL(ata_port_abort);
6215 EXPORT_SYMBOL_GPL(ata_port_freeze);
6216 EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
6217 EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
6218 EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
6219 EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
6220 EXPORT_SYMBOL_GPL(ata_do_eh);