libata sg chaining support fix
[linux-2.6/mini2440.git] / drivers / ata / libata-core.c
blobbbaa545ea999817200366d54f3ea4b65cb6f32af
1 /*
2 * libata-core.c - helper library for ATA
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
35 #include <linux/kernel.h>
36 #include <linux/module.h>
37 #include <linux/pci.h>
38 #include <linux/init.h>
39 #include <linux/list.h>
40 #include <linux/mm.h>
41 #include <linux/highmem.h>
42 #include <linux/spinlock.h>
43 #include <linux/blkdev.h>
44 #include <linux/delay.h>
45 #include <linux/timer.h>
46 #include <linux/interrupt.h>
47 #include <linux/completion.h>
48 #include <linux/suspend.h>
49 #include <linux/workqueue.h>
50 #include <linux/jiffies.h>
51 #include <linux/scatterlist.h>
52 #include <scsi/scsi.h>
53 #include <scsi/scsi_cmnd.h>
54 #include <scsi/scsi_host.h>
55 #include <linux/libata.h>
56 #include <asm/io.h>
57 #include <asm/semaphore.h>
58 #include <asm/byteorder.h>
60 #include "libata.h"
63 /* debounce timing parameters in msecs { interval, duration, timeout } */
64 const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
65 const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
66 const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
68 static unsigned int ata_dev_init_params(struct ata_device *dev,
69 u16 heads, u16 sectors);
70 static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
71 static unsigned int ata_dev_set_AN(struct ata_device *dev, u8 enable);
72 static void ata_dev_xfermask(struct ata_device *dev);
73 static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
75 unsigned int ata_print_id = 1;
76 static struct workqueue_struct *ata_wq;
78 struct workqueue_struct *ata_aux_wq;
80 int atapi_enabled = 1;
81 module_param(atapi_enabled, int, 0444);
82 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
84 int atapi_dmadir = 0;
85 module_param(atapi_dmadir, int, 0444);
86 MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
88 int atapi_passthru16 = 1;
89 module_param(atapi_passthru16, int, 0444);
90 MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices; on by default (0=off, 1=on)");
92 int libata_fua = 0;
93 module_param_named(fua, libata_fua, int, 0444);
94 MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
96 static int ata_ignore_hpa = 0;
97 module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
98 MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
100 static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
101 module_param_named(dma, libata_dma_mask, int, 0444);
102 MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
104 static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
105 module_param(ata_probe_timeout, int, 0444);
106 MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
108 int libata_noacpi = 0;
109 module_param_named(noacpi, libata_noacpi, int, 0444);
110 MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in probe/suspend/resume when set");
112 MODULE_AUTHOR("Jeff Garzik");
113 MODULE_DESCRIPTION("Library module for ATA devices");
114 MODULE_LICENSE("GPL");
115 MODULE_VERSION(DRV_VERSION);
119 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
120 * @tf: Taskfile to convert
121 * @pmp: Port multiplier port
122 * @is_cmd: This FIS is for command
123 * @fis: Buffer into which data will output
125 * Converts a standard ATA taskfile to a Serial ATA
126 * FIS structure (Register - Host to Device).
128 * LOCKING:
129 * Inherited from caller.
131 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
133 fis[0] = 0x27; /* Register - Host to Device FIS */
134 fis[1] = pmp & 0xf; /* Port multiplier number*/
135 if (is_cmd)
136 fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */
138 fis[2] = tf->command;
139 fis[3] = tf->feature;
141 fis[4] = tf->lbal;
142 fis[5] = tf->lbam;
143 fis[6] = tf->lbah;
144 fis[7] = tf->device;
146 fis[8] = tf->hob_lbal;
147 fis[9] = tf->hob_lbam;
148 fis[10] = tf->hob_lbah;
149 fis[11] = tf->hob_feature;
151 fis[12] = tf->nsect;
152 fis[13] = tf->hob_nsect;
153 fis[14] = 0;
154 fis[15] = tf->ctl;
156 fis[16] = 0;
157 fis[17] = 0;
158 fis[18] = 0;
159 fis[19] = 0;
163 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
164 * @fis: Buffer from which data will be input
165 * @tf: Taskfile to output
167 * Converts a serial ATA FIS structure to a standard ATA taskfile.
169 * LOCKING:
170 * Inherited from caller.
173 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
175 tf->command = fis[2]; /* status */
176 tf->feature = fis[3]; /* error */
178 tf->lbal = fis[4];
179 tf->lbam = fis[5];
180 tf->lbah = fis[6];
181 tf->device = fis[7];
183 tf->hob_lbal = fis[8];
184 tf->hob_lbam = fis[9];
185 tf->hob_lbah = fis[10];
187 tf->nsect = fis[12];
188 tf->hob_nsect = fis[13];
191 static const u8 ata_rw_cmds[] = {
192 /* pio multi */
193 ATA_CMD_READ_MULTI,
194 ATA_CMD_WRITE_MULTI,
195 ATA_CMD_READ_MULTI_EXT,
196 ATA_CMD_WRITE_MULTI_EXT,
200 ATA_CMD_WRITE_MULTI_FUA_EXT,
201 /* pio */
202 ATA_CMD_PIO_READ,
203 ATA_CMD_PIO_WRITE,
204 ATA_CMD_PIO_READ_EXT,
205 ATA_CMD_PIO_WRITE_EXT,
210 /* dma */
211 ATA_CMD_READ,
212 ATA_CMD_WRITE,
213 ATA_CMD_READ_EXT,
214 ATA_CMD_WRITE_EXT,
218 ATA_CMD_WRITE_FUA_EXT
222 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
223 * @tf: command to examine and configure
224 * @dev: device tf belongs to
226 * Examine the device configuration and tf->flags to calculate
227 * the proper read/write commands and protocol to use.
229 * LOCKING:
230 * caller.
232 static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
234 u8 cmd;
236 int index, fua, lba48, write;
238 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
239 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
240 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
242 if (dev->flags & ATA_DFLAG_PIO) {
243 tf->protocol = ATA_PROT_PIO;
244 index = dev->multi_count ? 0 : 8;
245 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
246 /* Unable to use DMA due to host limitation */
247 tf->protocol = ATA_PROT_PIO;
248 index = dev->multi_count ? 0 : 8;
249 } else {
250 tf->protocol = ATA_PROT_DMA;
251 index = 16;
254 cmd = ata_rw_cmds[index + fua + lba48 + write];
255 if (cmd) {
256 tf->command = cmd;
257 return 0;
259 return -1;
263 * ata_tf_read_block - Read block address from ATA taskfile
264 * @tf: ATA taskfile of interest
265 * @dev: ATA device @tf belongs to
267 * LOCKING:
268 * None.
270 * Read block address from @tf. This function can handle all
271 * three address formats - LBA, LBA48 and CHS. tf->protocol and
272 * flags select the address format to use.
274 * RETURNS:
275 * Block address read from @tf.
277 u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
279 u64 block = 0;
281 if (tf->flags & ATA_TFLAG_LBA) {
282 if (tf->flags & ATA_TFLAG_LBA48) {
283 block |= (u64)tf->hob_lbah << 40;
284 block |= (u64)tf->hob_lbam << 32;
285 block |= tf->hob_lbal << 24;
286 } else
287 block |= (tf->device & 0xf) << 24;
289 block |= tf->lbah << 16;
290 block |= tf->lbam << 8;
291 block |= tf->lbal;
292 } else {
293 u32 cyl, head, sect;
295 cyl = tf->lbam | (tf->lbah << 8);
296 head = tf->device & 0xf;
297 sect = tf->lbal;
299 block = (cyl * dev->heads + head) * dev->sectors + sect;
302 return block;
306 * ata_build_rw_tf - Build ATA taskfile for given read/write request
307 * @tf: Target ATA taskfile
308 * @dev: ATA device @tf belongs to
309 * @block: Block address
310 * @n_block: Number of blocks
311 * @tf_flags: RW/FUA etc...
312 * @tag: tag
314 * LOCKING:
315 * None.
317 * Build ATA taskfile @tf for read/write request described by
318 * @block, @n_block, @tf_flags and @tag on @dev.
320 * RETURNS:
322 * 0 on success, -ERANGE if the request is too large for @dev,
323 * -EINVAL if the request is invalid.
325 int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
326 u64 block, u32 n_block, unsigned int tf_flags,
327 unsigned int tag)
329 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
330 tf->flags |= tf_flags;
332 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
333 /* yay, NCQ */
334 if (!lba_48_ok(block, n_block))
335 return -ERANGE;
337 tf->protocol = ATA_PROT_NCQ;
338 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
340 if (tf->flags & ATA_TFLAG_WRITE)
341 tf->command = ATA_CMD_FPDMA_WRITE;
342 else
343 tf->command = ATA_CMD_FPDMA_READ;
345 tf->nsect = tag << 3;
346 tf->hob_feature = (n_block >> 8) & 0xff;
347 tf->feature = n_block & 0xff;
349 tf->hob_lbah = (block >> 40) & 0xff;
350 tf->hob_lbam = (block >> 32) & 0xff;
351 tf->hob_lbal = (block >> 24) & 0xff;
352 tf->lbah = (block >> 16) & 0xff;
353 tf->lbam = (block >> 8) & 0xff;
354 tf->lbal = block & 0xff;
356 tf->device = 1 << 6;
357 if (tf->flags & ATA_TFLAG_FUA)
358 tf->device |= 1 << 7;
359 } else if (dev->flags & ATA_DFLAG_LBA) {
360 tf->flags |= ATA_TFLAG_LBA;
362 if (lba_28_ok(block, n_block)) {
363 /* use LBA28 */
364 tf->device |= (block >> 24) & 0xf;
365 } else if (lba_48_ok(block, n_block)) {
366 if (!(dev->flags & ATA_DFLAG_LBA48))
367 return -ERANGE;
369 /* use LBA48 */
370 tf->flags |= ATA_TFLAG_LBA48;
372 tf->hob_nsect = (n_block >> 8) & 0xff;
374 tf->hob_lbah = (block >> 40) & 0xff;
375 tf->hob_lbam = (block >> 32) & 0xff;
376 tf->hob_lbal = (block >> 24) & 0xff;
377 } else
378 /* request too large even for LBA48 */
379 return -ERANGE;
381 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
382 return -EINVAL;
384 tf->nsect = n_block & 0xff;
386 tf->lbah = (block >> 16) & 0xff;
387 tf->lbam = (block >> 8) & 0xff;
388 tf->lbal = block & 0xff;
390 tf->device |= ATA_LBA;
391 } else {
392 /* CHS */
393 u32 sect, head, cyl, track;
395 /* The request -may- be too large for CHS addressing. */
396 if (!lba_28_ok(block, n_block))
397 return -ERANGE;
399 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
400 return -EINVAL;
402 /* Convert LBA to CHS */
403 track = (u32)block / dev->sectors;
404 cyl = track / dev->heads;
405 head = track % dev->heads;
406 sect = (u32)block % dev->sectors + 1;
408 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
409 (u32)block, track, cyl, head, sect);
411 /* Check whether the converted CHS can fit.
412 Cylinder: 0-65535
413 Head: 0-15
414 Sector: 1-255*/
415 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
416 return -ERANGE;
418 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
419 tf->lbal = sect;
420 tf->lbam = cyl;
421 tf->lbah = cyl >> 8;
422 tf->device |= head;
425 return 0;
429 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
430 * @pio_mask: pio_mask
431 * @mwdma_mask: mwdma_mask
432 * @udma_mask: udma_mask
434 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
435 * unsigned int xfer_mask.
437 * LOCKING:
438 * None.
440 * RETURNS:
441 * Packed xfer_mask.
443 static unsigned int ata_pack_xfermask(unsigned int pio_mask,
444 unsigned int mwdma_mask,
445 unsigned int udma_mask)
447 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
448 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
449 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
453 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
454 * @xfer_mask: xfer_mask to unpack
455 * @pio_mask: resulting pio_mask
456 * @mwdma_mask: resulting mwdma_mask
457 * @udma_mask: resulting udma_mask
459 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
460 * Any NULL distination masks will be ignored.
462 static void ata_unpack_xfermask(unsigned int xfer_mask,
463 unsigned int *pio_mask,
464 unsigned int *mwdma_mask,
465 unsigned int *udma_mask)
467 if (pio_mask)
468 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
469 if (mwdma_mask)
470 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
471 if (udma_mask)
472 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
475 static const struct ata_xfer_ent {
476 int shift, bits;
477 u8 base;
478 } ata_xfer_tbl[] = {
479 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
480 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
481 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
482 { -1, },
486 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
487 * @xfer_mask: xfer_mask of interest
489 * Return matching XFER_* value for @xfer_mask. Only the highest
490 * bit of @xfer_mask is considered.
492 * LOCKING:
493 * None.
495 * RETURNS:
496 * Matching XFER_* value, 0 if no match found.
498 static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
500 int highbit = fls(xfer_mask) - 1;
501 const struct ata_xfer_ent *ent;
503 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
504 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
505 return ent->base + highbit - ent->shift;
506 return 0;
510 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
511 * @xfer_mode: XFER_* of interest
513 * Return matching xfer_mask for @xfer_mode.
515 * LOCKING:
516 * None.
518 * RETURNS:
519 * Matching xfer_mask, 0 if no match found.
521 static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
523 const struct ata_xfer_ent *ent;
525 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
526 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
527 return 1 << (ent->shift + xfer_mode - ent->base);
528 return 0;
532 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
533 * @xfer_mode: XFER_* of interest
535 * Return matching xfer_shift for @xfer_mode.
537 * LOCKING:
538 * None.
540 * RETURNS:
541 * Matching xfer_shift, -1 if no match found.
543 static int ata_xfer_mode2shift(unsigned int xfer_mode)
545 const struct ata_xfer_ent *ent;
547 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
548 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
549 return ent->shift;
550 return -1;
554 * ata_mode_string - convert xfer_mask to string
555 * @xfer_mask: mask of bits supported; only highest bit counts.
557 * Determine string which represents the highest speed
558 * (highest bit in @modemask).
560 * LOCKING:
561 * None.
563 * RETURNS:
564 * Constant C string representing highest speed listed in
565 * @mode_mask, or the constant C string "<n/a>".
567 static const char *ata_mode_string(unsigned int xfer_mask)
569 static const char * const xfer_mode_str[] = {
570 "PIO0",
571 "PIO1",
572 "PIO2",
573 "PIO3",
574 "PIO4",
575 "PIO5",
576 "PIO6",
577 "MWDMA0",
578 "MWDMA1",
579 "MWDMA2",
580 "MWDMA3",
581 "MWDMA4",
582 "UDMA/16",
583 "UDMA/25",
584 "UDMA/33",
585 "UDMA/44",
586 "UDMA/66",
587 "UDMA/100",
588 "UDMA/133",
589 "UDMA7",
591 int highbit;
593 highbit = fls(xfer_mask) - 1;
594 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
595 return xfer_mode_str[highbit];
596 return "<n/a>";
599 static const char *sata_spd_string(unsigned int spd)
601 static const char * const spd_str[] = {
602 "1.5 Gbps",
603 "3.0 Gbps",
606 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
607 return "<unknown>";
608 return spd_str[spd - 1];
611 void ata_dev_disable(struct ata_device *dev)
613 if (ata_dev_enabled(dev)) {
614 if (ata_msg_drv(dev->link->ap))
615 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
616 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
617 ATA_DNXFER_QUIET);
618 dev->class++;
623 * ata_devchk - PATA device presence detection
624 * @ap: ATA channel to examine
625 * @device: Device to examine (starting at zero)
627 * This technique was originally described in
628 * Hale Landis's ATADRVR (www.ata-atapi.com), and
629 * later found its way into the ATA/ATAPI spec.
631 * Write a pattern to the ATA shadow registers,
632 * and if a device is present, it will respond by
633 * correctly storing and echoing back the
634 * ATA shadow register contents.
636 * LOCKING:
637 * caller.
640 static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
642 struct ata_ioports *ioaddr = &ap->ioaddr;
643 u8 nsect, lbal;
645 ap->ops->dev_select(ap, device);
647 iowrite8(0x55, ioaddr->nsect_addr);
648 iowrite8(0xaa, ioaddr->lbal_addr);
650 iowrite8(0xaa, ioaddr->nsect_addr);
651 iowrite8(0x55, ioaddr->lbal_addr);
653 iowrite8(0x55, ioaddr->nsect_addr);
654 iowrite8(0xaa, ioaddr->lbal_addr);
656 nsect = ioread8(ioaddr->nsect_addr);
657 lbal = ioread8(ioaddr->lbal_addr);
659 if ((nsect == 0x55) && (lbal == 0xaa))
660 return 1; /* we found a device */
662 return 0; /* nothing found */
666 * ata_dev_classify - determine device type based on ATA-spec signature
667 * @tf: ATA taskfile register set for device to be identified
669 * Determine from taskfile register contents whether a device is
670 * ATA or ATAPI, as per "Signature and persistence" section
671 * of ATA/PI spec (volume 1, sect 5.14).
673 * LOCKING:
674 * None.
676 * RETURNS:
677 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP or
678 * %ATA_DEV_UNKNOWN the event of failure.
680 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
682 /* Apple's open source Darwin code hints that some devices only
683 * put a proper signature into the LBA mid/high registers,
684 * So, we only check those. It's sufficient for uniqueness.
686 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
687 * signatures for ATA and ATAPI devices attached on SerialATA,
688 * 0x3c/0xc3 and 0x69/0x96 respectively. However, SerialATA
689 * spec has never mentioned about using different signatures
690 * for ATA/ATAPI devices. Then, Serial ATA II: Port
691 * Multiplier specification began to use 0x69/0x96 to identify
692 * port multpliers and 0x3c/0xc3 to identify SEMB device.
693 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
694 * 0x69/0x96 shortly and described them as reserved for
695 * SerialATA.
697 * We follow the current spec and consider that 0x69/0x96
698 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
700 if ((tf->lbam == 0) && (tf->lbah == 0)) {
701 DPRINTK("found ATA device by sig\n");
702 return ATA_DEV_ATA;
705 if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
706 DPRINTK("found ATAPI device by sig\n");
707 return ATA_DEV_ATAPI;
710 if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
711 DPRINTK("found PMP device by sig\n");
712 return ATA_DEV_PMP;
715 if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
716 printk("ata: SEMB device ignored\n");
717 return ATA_DEV_SEMB_UNSUP; /* not yet */
720 DPRINTK("unknown device\n");
721 return ATA_DEV_UNKNOWN;
725 * ata_dev_try_classify - Parse returned ATA device signature
726 * @dev: ATA device to classify (starting at zero)
727 * @present: device seems present
728 * @r_err: Value of error register on completion
730 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
731 * an ATA/ATAPI-defined set of values is placed in the ATA
732 * shadow registers, indicating the results of device detection
733 * and diagnostics.
735 * Select the ATA device, and read the values from the ATA shadow
736 * registers. Then parse according to the Error register value,
737 * and the spec-defined values examined by ata_dev_classify().
739 * LOCKING:
740 * caller.
742 * RETURNS:
743 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
745 unsigned int ata_dev_try_classify(struct ata_device *dev, int present,
746 u8 *r_err)
748 struct ata_port *ap = dev->link->ap;
749 struct ata_taskfile tf;
750 unsigned int class;
751 u8 err;
753 ap->ops->dev_select(ap, dev->devno);
755 memset(&tf, 0, sizeof(tf));
757 ap->ops->tf_read(ap, &tf);
758 err = tf.feature;
759 if (r_err)
760 *r_err = err;
762 /* see if device passed diags: if master then continue and warn later */
763 if (err == 0 && dev->devno == 0)
764 /* diagnostic fail : do nothing _YET_ */
765 dev->horkage |= ATA_HORKAGE_DIAGNOSTIC;
766 else if (err == 1)
767 /* do nothing */ ;
768 else if ((dev->devno == 0) && (err == 0x81))
769 /* do nothing */ ;
770 else
771 return ATA_DEV_NONE;
773 /* determine if device is ATA or ATAPI */
774 class = ata_dev_classify(&tf);
776 if (class == ATA_DEV_UNKNOWN) {
777 /* If the device failed diagnostic, it's likely to
778 * have reported incorrect device signature too.
779 * Assume ATA device if the device seems present but
780 * device signature is invalid with diagnostic
781 * failure.
783 if (present && (dev->horkage & ATA_HORKAGE_DIAGNOSTIC))
784 class = ATA_DEV_ATA;
785 else
786 class = ATA_DEV_NONE;
787 } else if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
788 class = ATA_DEV_NONE;
790 return class;
794 * ata_id_string - Convert IDENTIFY DEVICE page into string
795 * @id: IDENTIFY DEVICE results we will examine
796 * @s: string into which data is output
797 * @ofs: offset into identify device page
798 * @len: length of string to return. must be an even number.
800 * The strings in the IDENTIFY DEVICE page are broken up into
801 * 16-bit chunks. Run through the string, and output each
802 * 8-bit chunk linearly, regardless of platform.
804 * LOCKING:
805 * caller.
808 void ata_id_string(const u16 *id, unsigned char *s,
809 unsigned int ofs, unsigned int len)
811 unsigned int c;
813 while (len > 0) {
814 c = id[ofs] >> 8;
815 *s = c;
816 s++;
818 c = id[ofs] & 0xff;
819 *s = c;
820 s++;
822 ofs++;
823 len -= 2;
828 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
829 * @id: IDENTIFY DEVICE results we will examine
830 * @s: string into which data is output
831 * @ofs: offset into identify device page
832 * @len: length of string to return. must be an odd number.
834 * This function is identical to ata_id_string except that it
835 * trims trailing spaces and terminates the resulting string with
836 * null. @len must be actual maximum length (even number) + 1.
838 * LOCKING:
839 * caller.
841 void ata_id_c_string(const u16 *id, unsigned char *s,
842 unsigned int ofs, unsigned int len)
844 unsigned char *p;
846 WARN_ON(!(len & 1));
848 ata_id_string(id, s, ofs, len - 1);
850 p = s + strnlen(s, len - 1);
851 while (p > s && p[-1] == ' ')
852 p--;
853 *p = '\0';
856 static u64 ata_id_n_sectors(const u16 *id)
858 if (ata_id_has_lba(id)) {
859 if (ata_id_has_lba48(id))
860 return ata_id_u64(id, 100);
861 else
862 return ata_id_u32(id, 60);
863 } else {
864 if (ata_id_current_chs_valid(id))
865 return ata_id_u32(id, 57);
866 else
867 return id[1] * id[3] * id[6];
871 static u64 ata_tf_to_lba48(struct ata_taskfile *tf)
873 u64 sectors = 0;
875 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
876 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
877 sectors |= (tf->hob_lbal & 0xff) << 24;
878 sectors |= (tf->lbah & 0xff) << 16;
879 sectors |= (tf->lbam & 0xff) << 8;
880 sectors |= (tf->lbal & 0xff);
882 return ++sectors;
885 static u64 ata_tf_to_lba(struct ata_taskfile *tf)
887 u64 sectors = 0;
889 sectors |= (tf->device & 0x0f) << 24;
890 sectors |= (tf->lbah & 0xff) << 16;
891 sectors |= (tf->lbam & 0xff) << 8;
892 sectors |= (tf->lbal & 0xff);
894 return ++sectors;
898 * ata_read_native_max_address - Read native max address
899 * @dev: target device
900 * @max_sectors: out parameter for the result native max address
902 * Perform an LBA48 or LBA28 native size query upon the device in
903 * question.
905 * RETURNS:
906 * 0 on success, -EACCES if command is aborted by the drive.
907 * -EIO on other errors.
909 static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
911 unsigned int err_mask;
912 struct ata_taskfile tf;
913 int lba48 = ata_id_has_lba48(dev->id);
915 ata_tf_init(dev, &tf);
917 /* always clear all address registers */
918 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
920 if (lba48) {
921 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
922 tf.flags |= ATA_TFLAG_LBA48;
923 } else
924 tf.command = ATA_CMD_READ_NATIVE_MAX;
926 tf.protocol |= ATA_PROT_NODATA;
927 tf.device |= ATA_LBA;
929 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
930 if (err_mask) {
931 ata_dev_printk(dev, KERN_WARNING, "failed to read native "
932 "max address (err_mask=0x%x)\n", err_mask);
933 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
934 return -EACCES;
935 return -EIO;
938 if (lba48)
939 *max_sectors = ata_tf_to_lba48(&tf);
940 else
941 *max_sectors = ata_tf_to_lba(&tf);
942 if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
943 (*max_sectors)--;
944 return 0;
948 * ata_set_max_sectors - Set max sectors
949 * @dev: target device
950 * @new_sectors: new max sectors value to set for the device
952 * Set max sectors of @dev to @new_sectors.
954 * RETURNS:
955 * 0 on success, -EACCES if command is aborted or denied (due to
956 * previous non-volatile SET_MAX) by the drive. -EIO on other
957 * errors.
959 static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
961 unsigned int err_mask;
962 struct ata_taskfile tf;
963 int lba48 = ata_id_has_lba48(dev->id);
965 new_sectors--;
967 ata_tf_init(dev, &tf);
969 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
971 if (lba48) {
972 tf.command = ATA_CMD_SET_MAX_EXT;
973 tf.flags |= ATA_TFLAG_LBA48;
975 tf.hob_lbal = (new_sectors >> 24) & 0xff;
976 tf.hob_lbam = (new_sectors >> 32) & 0xff;
977 tf.hob_lbah = (new_sectors >> 40) & 0xff;
978 } else {
979 tf.command = ATA_CMD_SET_MAX;
981 tf.device |= (new_sectors >> 24) & 0xf;
984 tf.protocol |= ATA_PROT_NODATA;
985 tf.device |= ATA_LBA;
987 tf.lbal = (new_sectors >> 0) & 0xff;
988 tf.lbam = (new_sectors >> 8) & 0xff;
989 tf.lbah = (new_sectors >> 16) & 0xff;
991 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
992 if (err_mask) {
993 ata_dev_printk(dev, KERN_WARNING, "failed to set "
994 "max address (err_mask=0x%x)\n", err_mask);
995 if (err_mask == AC_ERR_DEV &&
996 (tf.feature & (ATA_ABORTED | ATA_IDNF)))
997 return -EACCES;
998 return -EIO;
1001 return 0;
1005 * ata_hpa_resize - Resize a device with an HPA set
1006 * @dev: Device to resize
1008 * Read the size of an LBA28 or LBA48 disk with HPA features and resize
1009 * it if required to the full size of the media. The caller must check
1010 * the drive has the HPA feature set enabled.
1012 * RETURNS:
1013 * 0 on success, -errno on failure.
1015 static int ata_hpa_resize(struct ata_device *dev)
1017 struct ata_eh_context *ehc = &dev->link->eh_context;
1018 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1019 u64 sectors = ata_id_n_sectors(dev->id);
1020 u64 native_sectors;
1021 int rc;
1023 /* do we need to do it? */
1024 if (dev->class != ATA_DEV_ATA ||
1025 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1026 (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
1027 return 0;
1029 /* read native max address */
1030 rc = ata_read_native_max_address(dev, &native_sectors);
1031 if (rc) {
1032 /* If HPA isn't going to be unlocked, skip HPA
1033 * resizing from the next try.
1035 if (!ata_ignore_hpa) {
1036 ata_dev_printk(dev, KERN_WARNING, "HPA support seems "
1037 "broken, will skip HPA handling\n");
1038 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1040 /* we can continue if device aborted the command */
1041 if (rc == -EACCES)
1042 rc = 0;
1045 return rc;
1048 /* nothing to do? */
1049 if (native_sectors <= sectors || !ata_ignore_hpa) {
1050 if (!print_info || native_sectors == sectors)
1051 return 0;
1053 if (native_sectors > sectors)
1054 ata_dev_printk(dev, KERN_INFO,
1055 "HPA detected: current %llu, native %llu\n",
1056 (unsigned long long)sectors,
1057 (unsigned long long)native_sectors);
1058 else if (native_sectors < sectors)
1059 ata_dev_printk(dev, KERN_WARNING,
1060 "native sectors (%llu) is smaller than "
1061 "sectors (%llu)\n",
1062 (unsigned long long)native_sectors,
1063 (unsigned long long)sectors);
1064 return 0;
1067 /* let's unlock HPA */
1068 rc = ata_set_max_sectors(dev, native_sectors);
1069 if (rc == -EACCES) {
1070 /* if device aborted the command, skip HPA resizing */
1071 ata_dev_printk(dev, KERN_WARNING, "device aborted resize "
1072 "(%llu -> %llu), skipping HPA handling\n",
1073 (unsigned long long)sectors,
1074 (unsigned long long)native_sectors);
1075 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1076 return 0;
1077 } else if (rc)
1078 return rc;
1080 /* re-read IDENTIFY data */
1081 rc = ata_dev_reread_id(dev, 0);
1082 if (rc) {
1083 ata_dev_printk(dev, KERN_ERR, "failed to re-read IDENTIFY "
1084 "data after HPA resizing\n");
1085 return rc;
1088 if (print_info) {
1089 u64 new_sectors = ata_id_n_sectors(dev->id);
1090 ata_dev_printk(dev, KERN_INFO,
1091 "HPA unlocked: %llu -> %llu, native %llu\n",
1092 (unsigned long long)sectors,
1093 (unsigned long long)new_sectors,
1094 (unsigned long long)native_sectors);
1097 return 0;
1101 * ata_id_to_dma_mode - Identify DMA mode from id block
1102 * @dev: device to identify
1103 * @unknown: mode to assume if we cannot tell
1105 * Set up the timing values for the device based upon the identify
1106 * reported values for the DMA mode. This function is used by drivers
1107 * which rely upon firmware configured modes, but wish to report the
1108 * mode correctly when possible.
1110 * In addition we emit similarly formatted messages to the default
1111 * ata_dev_set_mode handler, in order to provide consistency of
1112 * presentation.
1115 void ata_id_to_dma_mode(struct ata_device *dev, u8 unknown)
1117 unsigned int mask;
1118 u8 mode;
1120 /* Pack the DMA modes */
1121 mask = ((dev->id[63] >> 8) << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA;
1122 if (dev->id[53] & 0x04)
1123 mask |= ((dev->id[88] >> 8) << ATA_SHIFT_UDMA) & ATA_MASK_UDMA;
1125 /* Select the mode in use */
1126 mode = ata_xfer_mask2mode(mask);
1128 if (mode != 0) {
1129 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
1130 ata_mode_string(mask));
1131 } else {
1132 /* SWDMA perhaps ? */
1133 mode = unknown;
1134 ata_dev_printk(dev, KERN_INFO, "configured for DMA\n");
1137 /* Configure the device reporting */
1138 dev->xfer_mode = mode;
1139 dev->xfer_shift = ata_xfer_mode2shift(mode);
1143 * ata_noop_dev_select - Select device 0/1 on ATA bus
1144 * @ap: ATA channel to manipulate
1145 * @device: ATA device (numbered from zero) to select
1147 * This function performs no actual function.
1149 * May be used as the dev_select() entry in ata_port_operations.
1151 * LOCKING:
1152 * caller.
1154 void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
1160 * ata_std_dev_select - Select device 0/1 on ATA bus
1161 * @ap: ATA channel to manipulate
1162 * @device: ATA device (numbered from zero) to select
1164 * Use the method defined in the ATA specification to
1165 * make either device 0, or device 1, active on the
1166 * ATA channel. Works with both PIO and MMIO.
1168 * May be used as the dev_select() entry in ata_port_operations.
1170 * LOCKING:
1171 * caller.
1174 void ata_std_dev_select (struct ata_port *ap, unsigned int device)
1176 u8 tmp;
1178 if (device == 0)
1179 tmp = ATA_DEVICE_OBS;
1180 else
1181 tmp = ATA_DEVICE_OBS | ATA_DEV1;
1183 iowrite8(tmp, ap->ioaddr.device_addr);
1184 ata_pause(ap); /* needed; also flushes, for mmio */
1188 * ata_dev_select - Select device 0/1 on ATA bus
1189 * @ap: ATA channel to manipulate
1190 * @device: ATA device (numbered from zero) to select
1191 * @wait: non-zero to wait for Status register BSY bit to clear
1192 * @can_sleep: non-zero if context allows sleeping
1194 * Use the method defined in the ATA specification to
1195 * make either device 0, or device 1, active on the
1196 * ATA channel.
1198 * This is a high-level version of ata_std_dev_select(),
1199 * which additionally provides the services of inserting
1200 * the proper pauses and status polling, where needed.
1202 * LOCKING:
1203 * caller.
1206 void ata_dev_select(struct ata_port *ap, unsigned int device,
1207 unsigned int wait, unsigned int can_sleep)
1209 if (ata_msg_probe(ap))
1210 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, "
1211 "device %u, wait %u\n", device, wait);
1213 if (wait)
1214 ata_wait_idle(ap);
1216 ap->ops->dev_select(ap, device);
1218 if (wait) {
1219 if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI)
1220 msleep(150);
1221 ata_wait_idle(ap);
1226 * ata_dump_id - IDENTIFY DEVICE info debugging output
1227 * @id: IDENTIFY DEVICE page to dump
1229 * Dump selected 16-bit words from the given IDENTIFY DEVICE
1230 * page.
1232 * LOCKING:
1233 * caller.
1236 static inline void ata_dump_id(const u16 *id)
1238 DPRINTK("49==0x%04x "
1239 "53==0x%04x "
1240 "63==0x%04x "
1241 "64==0x%04x "
1242 "75==0x%04x \n",
1243 id[49],
1244 id[53],
1245 id[63],
1246 id[64],
1247 id[75]);
1248 DPRINTK("80==0x%04x "
1249 "81==0x%04x "
1250 "82==0x%04x "
1251 "83==0x%04x "
1252 "84==0x%04x \n",
1253 id[80],
1254 id[81],
1255 id[82],
1256 id[83],
1257 id[84]);
1258 DPRINTK("88==0x%04x "
1259 "93==0x%04x\n",
1260 id[88],
1261 id[93]);
1265 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1266 * @id: IDENTIFY data to compute xfer mask from
1268 * Compute the xfermask for this device. This is not as trivial
1269 * as it seems if we must consider early devices correctly.
1271 * FIXME: pre IDE drive timing (do we care ?).
1273 * LOCKING:
1274 * None.
1276 * RETURNS:
1277 * Computed xfermask
1279 static unsigned int ata_id_xfermask(const u16 *id)
1281 unsigned int pio_mask, mwdma_mask, udma_mask;
1283 /* Usual case. Word 53 indicates word 64 is valid */
1284 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1285 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1286 pio_mask <<= 3;
1287 pio_mask |= 0x7;
1288 } else {
1289 /* If word 64 isn't valid then Word 51 high byte holds
1290 * the PIO timing number for the maximum. Turn it into
1291 * a mask.
1293 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
1294 if (mode < 5) /* Valid PIO range */
1295 pio_mask = (2 << mode) - 1;
1296 else
1297 pio_mask = 1;
1299 /* But wait.. there's more. Design your standards by
1300 * committee and you too can get a free iordy field to
1301 * process. However its the speeds not the modes that
1302 * are supported... Note drivers using the timing API
1303 * will get this right anyway
1307 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1309 if (ata_id_is_cfa(id)) {
1311 * Process compact flash extended modes
1313 int pio = id[163] & 0x7;
1314 int dma = (id[163] >> 3) & 7;
1316 if (pio)
1317 pio_mask |= (1 << 5);
1318 if (pio > 1)
1319 pio_mask |= (1 << 6);
1320 if (dma)
1321 mwdma_mask |= (1 << 3);
1322 if (dma > 1)
1323 mwdma_mask |= (1 << 4);
1326 udma_mask = 0;
1327 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1328 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1330 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1334 * ata_port_queue_task - Queue port_task
1335 * @ap: The ata_port to queue port_task for
1336 * @fn: workqueue function to be scheduled
1337 * @data: data for @fn to use
1338 * @delay: delay time for workqueue function
1340 * Schedule @fn(@data) for execution after @delay jiffies using
1341 * port_task. There is one port_task per port and it's the
1342 * user(low level driver)'s responsibility to make sure that only
1343 * one task is active at any given time.
1345 * libata core layer takes care of synchronization between
1346 * port_task and EH. ata_port_queue_task() may be ignored for EH
1347 * synchronization.
1349 * LOCKING:
1350 * Inherited from caller.
1352 void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
1353 unsigned long delay)
1355 PREPARE_DELAYED_WORK(&ap->port_task, fn);
1356 ap->port_task_data = data;
1358 /* may fail if ata_port_flush_task() in progress */
1359 queue_delayed_work(ata_wq, &ap->port_task, delay);
1363 * ata_port_flush_task - Flush port_task
1364 * @ap: The ata_port to flush port_task for
1366 * After this function completes, port_task is guranteed not to
1367 * be running or scheduled.
1369 * LOCKING:
1370 * Kernel thread context (may sleep)
1372 void ata_port_flush_task(struct ata_port *ap)
1374 DPRINTK("ENTER\n");
1376 cancel_rearming_delayed_work(&ap->port_task);
1378 if (ata_msg_ctl(ap))
1379 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
1382 static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1384 struct completion *waiting = qc->private_data;
1386 complete(waiting);
1390 * ata_exec_internal_sg - execute libata internal command
1391 * @dev: Device to which the command is sent
1392 * @tf: Taskfile registers for the command and the result
1393 * @cdb: CDB for packet command
1394 * @dma_dir: Data tranfer direction of the command
1395 * @sg: sg list for the data buffer of the command
1396 * @n_elem: Number of sg entries
1397 * @timeout: Timeout in msecs (0 for default)
1399 * Executes libata internal command with timeout. @tf contains
1400 * command on entry and result on return. Timeout and error
1401 * conditions are reported via return value. No recovery action
1402 * is taken after a command times out. It's caller's duty to
1403 * clean up after timeout.
1405 * LOCKING:
1406 * None. Should be called with kernel context, might sleep.
1408 * RETURNS:
1409 * Zero on success, AC_ERR_* mask on failure
1411 unsigned ata_exec_internal_sg(struct ata_device *dev,
1412 struct ata_taskfile *tf, const u8 *cdb,
1413 int dma_dir, struct scatterlist *sgl,
1414 unsigned int n_elem, unsigned long timeout)
1416 struct ata_link *link = dev->link;
1417 struct ata_port *ap = link->ap;
1418 u8 command = tf->command;
1419 struct ata_queued_cmd *qc;
1420 unsigned int tag, preempted_tag;
1421 u32 preempted_sactive, preempted_qc_active;
1422 int preempted_nr_active_links;
1423 DECLARE_COMPLETION_ONSTACK(wait);
1424 unsigned long flags;
1425 unsigned int err_mask;
1426 int rc;
1428 spin_lock_irqsave(ap->lock, flags);
1430 /* no internal command while frozen */
1431 if (ap->pflags & ATA_PFLAG_FROZEN) {
1432 spin_unlock_irqrestore(ap->lock, flags);
1433 return AC_ERR_SYSTEM;
1436 /* initialize internal qc */
1438 /* XXX: Tag 0 is used for drivers with legacy EH as some
1439 * drivers choke if any other tag is given. This breaks
1440 * ata_tag_internal() test for those drivers. Don't use new
1441 * EH stuff without converting to it.
1443 if (ap->ops->error_handler)
1444 tag = ATA_TAG_INTERNAL;
1445 else
1446 tag = 0;
1448 if (test_and_set_bit(tag, &ap->qc_allocated))
1449 BUG();
1450 qc = __ata_qc_from_tag(ap, tag);
1452 qc->tag = tag;
1453 qc->scsicmd = NULL;
1454 qc->ap = ap;
1455 qc->dev = dev;
1456 ata_qc_reinit(qc);
1458 preempted_tag = link->active_tag;
1459 preempted_sactive = link->sactive;
1460 preempted_qc_active = ap->qc_active;
1461 preempted_nr_active_links = ap->nr_active_links;
1462 link->active_tag = ATA_TAG_POISON;
1463 link->sactive = 0;
1464 ap->qc_active = 0;
1465 ap->nr_active_links = 0;
1467 /* prepare & issue qc */
1468 qc->tf = *tf;
1469 if (cdb)
1470 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1471 qc->flags |= ATA_QCFLAG_RESULT_TF;
1472 qc->dma_dir = dma_dir;
1473 if (dma_dir != DMA_NONE) {
1474 unsigned int i, buflen = 0;
1475 struct scatterlist *sg;
1477 for_each_sg(sgl, sg, n_elem, i)
1478 buflen += sg->length;
1480 ata_sg_init(qc, sgl, n_elem);
1481 qc->nbytes = buflen;
1484 qc->private_data = &wait;
1485 qc->complete_fn = ata_qc_complete_internal;
1487 ata_qc_issue(qc);
1489 spin_unlock_irqrestore(ap->lock, flags);
1491 if (!timeout)
1492 timeout = ata_probe_timeout * 1000 / HZ;
1494 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
1496 ata_port_flush_task(ap);
1498 if (!rc) {
1499 spin_lock_irqsave(ap->lock, flags);
1501 /* We're racing with irq here. If we lose, the
1502 * following test prevents us from completing the qc
1503 * twice. If we win, the port is frozen and will be
1504 * cleaned up by ->post_internal_cmd().
1506 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1507 qc->err_mask |= AC_ERR_TIMEOUT;
1509 if (ap->ops->error_handler)
1510 ata_port_freeze(ap);
1511 else
1512 ata_qc_complete(qc);
1514 if (ata_msg_warn(ap))
1515 ata_dev_printk(dev, KERN_WARNING,
1516 "qc timeout (cmd 0x%x)\n", command);
1519 spin_unlock_irqrestore(ap->lock, flags);
1522 /* do post_internal_cmd */
1523 if (ap->ops->post_internal_cmd)
1524 ap->ops->post_internal_cmd(qc);
1526 /* perform minimal error analysis */
1527 if (qc->flags & ATA_QCFLAG_FAILED) {
1528 if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1529 qc->err_mask |= AC_ERR_DEV;
1531 if (!qc->err_mask)
1532 qc->err_mask |= AC_ERR_OTHER;
1534 if (qc->err_mask & ~AC_ERR_OTHER)
1535 qc->err_mask &= ~AC_ERR_OTHER;
1538 /* finish up */
1539 spin_lock_irqsave(ap->lock, flags);
1541 *tf = qc->result_tf;
1542 err_mask = qc->err_mask;
1544 ata_qc_free(qc);
1545 link->active_tag = preempted_tag;
1546 link->sactive = preempted_sactive;
1547 ap->qc_active = preempted_qc_active;
1548 ap->nr_active_links = preempted_nr_active_links;
1550 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1551 * Until those drivers are fixed, we detect the condition
1552 * here, fail the command with AC_ERR_SYSTEM and reenable the
1553 * port.
1555 * Note that this doesn't change any behavior as internal
1556 * command failure results in disabling the device in the
1557 * higher layer for LLDDs without new reset/EH callbacks.
1559 * Kill the following code as soon as those drivers are fixed.
1561 if (ap->flags & ATA_FLAG_DISABLED) {
1562 err_mask |= AC_ERR_SYSTEM;
1563 ata_port_probe(ap);
1566 spin_unlock_irqrestore(ap->lock, flags);
1568 return err_mask;
1572 * ata_exec_internal - execute libata internal command
1573 * @dev: Device to which the command is sent
1574 * @tf: Taskfile registers for the command and the result
1575 * @cdb: CDB for packet command
1576 * @dma_dir: Data tranfer direction of the command
1577 * @buf: Data buffer of the command
1578 * @buflen: Length of data buffer
1579 * @timeout: Timeout in msecs (0 for default)
1581 * Wrapper around ata_exec_internal_sg() which takes simple
1582 * buffer instead of sg list.
1584 * LOCKING:
1585 * None. Should be called with kernel context, might sleep.
1587 * RETURNS:
1588 * Zero on success, AC_ERR_* mask on failure
1590 unsigned ata_exec_internal(struct ata_device *dev,
1591 struct ata_taskfile *tf, const u8 *cdb,
1592 int dma_dir, void *buf, unsigned int buflen,
1593 unsigned long timeout)
1595 struct scatterlist *psg = NULL, sg;
1596 unsigned int n_elem = 0;
1598 if (dma_dir != DMA_NONE) {
1599 WARN_ON(!buf);
1600 sg_init_one(&sg, buf, buflen);
1601 psg = &sg;
1602 n_elem++;
1605 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1606 timeout);
1610 * ata_do_simple_cmd - execute simple internal command
1611 * @dev: Device to which the command is sent
1612 * @cmd: Opcode to execute
1614 * Execute a 'simple' command, that only consists of the opcode
1615 * 'cmd' itself, without filling any other registers
1617 * LOCKING:
1618 * Kernel thread context (may sleep).
1620 * RETURNS:
1621 * Zero on success, AC_ERR_* mask on failure
1623 unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
1625 struct ata_taskfile tf;
1627 ata_tf_init(dev, &tf);
1629 tf.command = cmd;
1630 tf.flags |= ATA_TFLAG_DEVICE;
1631 tf.protocol = ATA_PROT_NODATA;
1633 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1637 * ata_pio_need_iordy - check if iordy needed
1638 * @adev: ATA device
1640 * Check if the current speed of the device requires IORDY. Used
1641 * by various controllers for chip configuration.
1644 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1646 /* Controller doesn't support IORDY. Probably a pointless check
1647 as the caller should know this */
1648 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1649 return 0;
1650 /* PIO3 and higher it is mandatory */
1651 if (adev->pio_mode > XFER_PIO_2)
1652 return 1;
1653 /* We turn it on when possible */
1654 if (ata_id_has_iordy(adev->id))
1655 return 1;
1656 return 0;
1660 * ata_pio_mask_no_iordy - Return the non IORDY mask
1661 * @adev: ATA device
1663 * Compute the highest mode possible if we are not using iordy. Return
1664 * -1 if no iordy mode is available.
1667 static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1669 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1670 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1671 u16 pio = adev->id[ATA_ID_EIDE_PIO];
1672 /* Is the speed faster than the drive allows non IORDY ? */
1673 if (pio) {
1674 /* This is cycle times not frequency - watch the logic! */
1675 if (pio > 240) /* PIO2 is 240nS per cycle */
1676 return 3 << ATA_SHIFT_PIO;
1677 return 7 << ATA_SHIFT_PIO;
1680 return 3 << ATA_SHIFT_PIO;
1684 * ata_dev_read_id - Read ID data from the specified device
1685 * @dev: target device
1686 * @p_class: pointer to class of the target device (may be changed)
1687 * @flags: ATA_READID_* flags
1688 * @id: buffer to read IDENTIFY data into
1690 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1691 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1692 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1693 * for pre-ATA4 drives.
1695 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right
1696 * now we abort if we hit that case.
1698 * LOCKING:
1699 * Kernel thread context (may sleep)
1701 * RETURNS:
1702 * 0 on success, -errno otherwise.
1704 int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1705 unsigned int flags, u16 *id)
1707 struct ata_port *ap = dev->link->ap;
1708 unsigned int class = *p_class;
1709 struct ata_taskfile tf;
1710 unsigned int err_mask = 0;
1711 const char *reason;
1712 int may_fallback = 1, tried_spinup = 0;
1713 int rc;
1715 if (ata_msg_ctl(ap))
1716 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1718 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1719 retry:
1720 ata_tf_init(dev, &tf);
1722 switch (class) {
1723 case ATA_DEV_ATA:
1724 tf.command = ATA_CMD_ID_ATA;
1725 break;
1726 case ATA_DEV_ATAPI:
1727 tf.command = ATA_CMD_ID_ATAPI;
1728 break;
1729 default:
1730 rc = -ENODEV;
1731 reason = "unsupported class";
1732 goto err_out;
1735 tf.protocol = ATA_PROT_PIO;
1737 /* Some devices choke if TF registers contain garbage. Make
1738 * sure those are properly initialized.
1740 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1742 /* Device presence detection is unreliable on some
1743 * controllers. Always poll IDENTIFY if available.
1745 tf.flags |= ATA_TFLAG_POLLING;
1747 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
1748 id, sizeof(id[0]) * ATA_ID_WORDS, 0);
1749 if (err_mask) {
1750 if (err_mask & AC_ERR_NODEV_HINT) {
1751 DPRINTK("ata%u.%d: NODEV after polling detection\n",
1752 ap->print_id, dev->devno);
1753 return -ENOENT;
1756 /* Device or controller might have reported the wrong
1757 * device class. Give a shot at the other IDENTIFY if
1758 * the current one is aborted by the device.
1760 if (may_fallback &&
1761 (err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1762 may_fallback = 0;
1764 if (class == ATA_DEV_ATA)
1765 class = ATA_DEV_ATAPI;
1766 else
1767 class = ATA_DEV_ATA;
1768 goto retry;
1771 rc = -EIO;
1772 reason = "I/O error";
1773 goto err_out;
1776 /* Falling back doesn't make sense if ID data was read
1777 * successfully at least once.
1779 may_fallback = 0;
1781 swap_buf_le16(id, ATA_ID_WORDS);
1783 /* sanity check */
1784 rc = -EINVAL;
1785 reason = "device reports invalid type";
1787 if (class == ATA_DEV_ATA) {
1788 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1789 goto err_out;
1790 } else {
1791 if (ata_id_is_ata(id))
1792 goto err_out;
1795 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1796 tried_spinup = 1;
1798 * Drive powered-up in standby mode, and requires a specific
1799 * SET_FEATURES spin-up subcommand before it will accept
1800 * anything other than the original IDENTIFY command.
1802 ata_tf_init(dev, &tf);
1803 tf.command = ATA_CMD_SET_FEATURES;
1804 tf.feature = SETFEATURES_SPINUP;
1805 tf.protocol = ATA_PROT_NODATA;
1806 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1807 err_mask = ata_exec_internal(dev, &tf, NULL,
1808 DMA_NONE, NULL, 0, 0);
1809 if (err_mask && id[2] != 0x738c) {
1810 rc = -EIO;
1811 reason = "SPINUP failed";
1812 goto err_out;
1815 * If the drive initially returned incomplete IDENTIFY info,
1816 * we now must reissue the IDENTIFY command.
1818 if (id[2] == 0x37c8)
1819 goto retry;
1822 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
1824 * The exact sequence expected by certain pre-ATA4 drives is:
1825 * SRST RESET
1826 * IDENTIFY (optional in early ATA)
1827 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
1828 * anything else..
1829 * Some drives were very specific about that exact sequence.
1831 * Note that ATA4 says lba is mandatory so the second check
1832 * shoud never trigger.
1834 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
1835 err_mask = ata_dev_init_params(dev, id[3], id[6]);
1836 if (err_mask) {
1837 rc = -EIO;
1838 reason = "INIT_DEV_PARAMS failed";
1839 goto err_out;
1842 /* current CHS translation info (id[53-58]) might be
1843 * changed. reread the identify device info.
1845 flags &= ~ATA_READID_POSTRESET;
1846 goto retry;
1850 *p_class = class;
1852 return 0;
1854 err_out:
1855 if (ata_msg_warn(ap))
1856 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
1857 "(%s, err_mask=0x%x)\n", reason, err_mask);
1858 return rc;
1861 static inline u8 ata_dev_knobble(struct ata_device *dev)
1863 struct ata_port *ap = dev->link->ap;
1864 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
1867 static void ata_dev_config_ncq(struct ata_device *dev,
1868 char *desc, size_t desc_sz)
1870 struct ata_port *ap = dev->link->ap;
1871 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
1873 if (!ata_id_has_ncq(dev->id)) {
1874 desc[0] = '\0';
1875 return;
1877 if (dev->horkage & ATA_HORKAGE_NONCQ) {
1878 snprintf(desc, desc_sz, "NCQ (not used)");
1879 return;
1881 if (ap->flags & ATA_FLAG_NCQ) {
1882 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
1883 dev->flags |= ATA_DFLAG_NCQ;
1886 if (hdepth >= ddepth)
1887 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
1888 else
1889 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
1893 * ata_dev_configure - Configure the specified ATA/ATAPI device
1894 * @dev: Target device to configure
1896 * Configure @dev according to @dev->id. Generic and low-level
1897 * driver specific fixups are also applied.
1899 * LOCKING:
1900 * Kernel thread context (may sleep)
1902 * RETURNS:
1903 * 0 on success, -errno otherwise
1905 int ata_dev_configure(struct ata_device *dev)
1907 struct ata_port *ap = dev->link->ap;
1908 struct ata_eh_context *ehc = &dev->link->eh_context;
1909 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1910 const u16 *id = dev->id;
1911 unsigned int xfer_mask;
1912 char revbuf[7]; /* XYZ-99\0 */
1913 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
1914 char modelbuf[ATA_ID_PROD_LEN+1];
1915 int rc;
1917 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
1918 ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
1919 __FUNCTION__);
1920 return 0;
1923 if (ata_msg_probe(ap))
1924 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1926 /* set horkage */
1927 dev->horkage |= ata_dev_blacklisted(dev);
1929 /* let ACPI work its magic */
1930 rc = ata_acpi_on_devcfg(dev);
1931 if (rc)
1932 return rc;
1934 /* massage HPA, do it early as it might change IDENTIFY data */
1935 rc = ata_hpa_resize(dev);
1936 if (rc)
1937 return rc;
1939 /* print device capabilities */
1940 if (ata_msg_probe(ap))
1941 ata_dev_printk(dev, KERN_DEBUG,
1942 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
1943 "85:%04x 86:%04x 87:%04x 88:%04x\n",
1944 __FUNCTION__,
1945 id[49], id[82], id[83], id[84],
1946 id[85], id[86], id[87], id[88]);
1948 /* initialize to-be-configured parameters */
1949 dev->flags &= ~ATA_DFLAG_CFG_MASK;
1950 dev->max_sectors = 0;
1951 dev->cdb_len = 0;
1952 dev->n_sectors = 0;
1953 dev->cylinders = 0;
1954 dev->heads = 0;
1955 dev->sectors = 0;
1958 * common ATA, ATAPI feature tests
1961 /* find max transfer mode; for printk only */
1962 xfer_mask = ata_id_xfermask(id);
1964 if (ata_msg_probe(ap))
1965 ata_dump_id(id);
1967 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
1968 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
1969 sizeof(fwrevbuf));
1971 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
1972 sizeof(modelbuf));
1974 /* ATA-specific feature tests */
1975 if (dev->class == ATA_DEV_ATA) {
1976 if (ata_id_is_cfa(id)) {
1977 if (id[162] & 1) /* CPRM may make this media unusable */
1978 ata_dev_printk(dev, KERN_WARNING,
1979 "supports DRM functions and may "
1980 "not be fully accessable.\n");
1981 snprintf(revbuf, 7, "CFA");
1983 else
1984 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
1986 dev->n_sectors = ata_id_n_sectors(id);
1988 if (dev->id[59] & 0x100)
1989 dev->multi_count = dev->id[59] & 0xff;
1991 if (ata_id_has_lba(id)) {
1992 const char *lba_desc;
1993 char ncq_desc[20];
1995 lba_desc = "LBA";
1996 dev->flags |= ATA_DFLAG_LBA;
1997 if (ata_id_has_lba48(id)) {
1998 dev->flags |= ATA_DFLAG_LBA48;
1999 lba_desc = "LBA48";
2001 if (dev->n_sectors >= (1UL << 28) &&
2002 ata_id_has_flush_ext(id))
2003 dev->flags |= ATA_DFLAG_FLUSH_EXT;
2006 /* config NCQ */
2007 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2009 /* print device info to dmesg */
2010 if (ata_msg_drv(ap) && print_info) {
2011 ata_dev_printk(dev, KERN_INFO,
2012 "%s: %s, %s, max %s\n",
2013 revbuf, modelbuf, fwrevbuf,
2014 ata_mode_string(xfer_mask));
2015 ata_dev_printk(dev, KERN_INFO,
2016 "%Lu sectors, multi %u: %s %s\n",
2017 (unsigned long long)dev->n_sectors,
2018 dev->multi_count, lba_desc, ncq_desc);
2020 } else {
2021 /* CHS */
2023 /* Default translation */
2024 dev->cylinders = id[1];
2025 dev->heads = id[3];
2026 dev->sectors = id[6];
2028 if (ata_id_current_chs_valid(id)) {
2029 /* Current CHS translation is valid. */
2030 dev->cylinders = id[54];
2031 dev->heads = id[55];
2032 dev->sectors = id[56];
2035 /* print device info to dmesg */
2036 if (ata_msg_drv(ap) && print_info) {
2037 ata_dev_printk(dev, KERN_INFO,
2038 "%s: %s, %s, max %s\n",
2039 revbuf, modelbuf, fwrevbuf,
2040 ata_mode_string(xfer_mask));
2041 ata_dev_printk(dev, KERN_INFO,
2042 "%Lu sectors, multi %u, CHS %u/%u/%u\n",
2043 (unsigned long long)dev->n_sectors,
2044 dev->multi_count, dev->cylinders,
2045 dev->heads, dev->sectors);
2049 dev->cdb_len = 16;
2052 /* ATAPI-specific feature tests */
2053 else if (dev->class == ATA_DEV_ATAPI) {
2054 const char *cdb_intr_string = "";
2055 const char *atapi_an_string = "";
2056 u32 sntf;
2058 rc = atapi_cdb_len(id);
2059 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
2060 if (ata_msg_warn(ap))
2061 ata_dev_printk(dev, KERN_WARNING,
2062 "unsupported CDB len\n");
2063 rc = -EINVAL;
2064 goto err_out_nosup;
2066 dev->cdb_len = (unsigned int) rc;
2068 /* Enable ATAPI AN if both the host and device have
2069 * the support. If PMP is attached, SNTF is required
2070 * to enable ATAPI AN to discern between PHY status
2071 * changed notifications and ATAPI ANs.
2073 if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2074 (!ap->nr_pmp_links ||
2075 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
2076 unsigned int err_mask;
2078 /* issue SET feature command to turn this on */
2079 err_mask = ata_dev_set_AN(dev, SETFEATURES_SATA_ENABLE);
2080 if (err_mask)
2081 ata_dev_printk(dev, KERN_ERR,
2082 "failed to enable ATAPI AN "
2083 "(err_mask=0x%x)\n", err_mask);
2084 else {
2085 dev->flags |= ATA_DFLAG_AN;
2086 atapi_an_string = ", ATAPI AN";
2090 if (ata_id_cdb_intr(dev->id)) {
2091 dev->flags |= ATA_DFLAG_CDB_INTR;
2092 cdb_intr_string = ", CDB intr";
2095 /* print device info to dmesg */
2096 if (ata_msg_drv(ap) && print_info)
2097 ata_dev_printk(dev, KERN_INFO,
2098 "ATAPI: %s, %s, max %s%s%s\n",
2099 modelbuf, fwrevbuf,
2100 ata_mode_string(xfer_mask),
2101 cdb_intr_string, atapi_an_string);
2104 /* determine max_sectors */
2105 dev->max_sectors = ATA_MAX_SECTORS;
2106 if (dev->flags & ATA_DFLAG_LBA48)
2107 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2109 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2110 /* Let the user know. We don't want to disallow opens for
2111 rescue purposes, or in case the vendor is just a blithering
2112 idiot */
2113 if (print_info) {
2114 ata_dev_printk(dev, KERN_WARNING,
2115 "Drive reports diagnostics failure. This may indicate a drive\n");
2116 ata_dev_printk(dev, KERN_WARNING,
2117 "fault or invalid emulation. Contact drive vendor for information.\n");
2121 /* limit bridge transfers to udma5, 200 sectors */
2122 if (ata_dev_knobble(dev)) {
2123 if (ata_msg_drv(ap) && print_info)
2124 ata_dev_printk(dev, KERN_INFO,
2125 "applying bridge limits\n");
2126 dev->udma_mask &= ATA_UDMA5;
2127 dev->max_sectors = ATA_MAX_SECTORS;
2130 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
2131 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2132 dev->max_sectors);
2134 if (ap->ops->dev_config)
2135 ap->ops->dev_config(dev);
2137 if (ata_msg_probe(ap))
2138 ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
2139 __FUNCTION__, ata_chk_status(ap));
2140 return 0;
2142 err_out_nosup:
2143 if (ata_msg_probe(ap))
2144 ata_dev_printk(dev, KERN_DEBUG,
2145 "%s: EXIT, err\n", __FUNCTION__);
2146 return rc;
2150 * ata_cable_40wire - return 40 wire cable type
2151 * @ap: port
2153 * Helper method for drivers which want to hardwire 40 wire cable
2154 * detection.
2157 int ata_cable_40wire(struct ata_port *ap)
2159 return ATA_CBL_PATA40;
2163 * ata_cable_80wire - return 80 wire cable type
2164 * @ap: port
2166 * Helper method for drivers which want to hardwire 80 wire cable
2167 * detection.
2170 int ata_cable_80wire(struct ata_port *ap)
2172 return ATA_CBL_PATA80;
2176 * ata_cable_unknown - return unknown PATA cable.
2177 * @ap: port
2179 * Helper method for drivers which have no PATA cable detection.
2182 int ata_cable_unknown(struct ata_port *ap)
2184 return ATA_CBL_PATA_UNK;
2188 * ata_cable_sata - return SATA cable type
2189 * @ap: port
2191 * Helper method for drivers which have SATA cables
2194 int ata_cable_sata(struct ata_port *ap)
2196 return ATA_CBL_SATA;
2200 * ata_bus_probe - Reset and probe ATA bus
2201 * @ap: Bus to probe
2203 * Master ATA bus probing function. Initiates a hardware-dependent
2204 * bus reset, then attempts to identify any devices found on
2205 * the bus.
2207 * LOCKING:
2208 * PCI/etc. bus probe sem.
2210 * RETURNS:
2211 * Zero on success, negative errno otherwise.
2214 int ata_bus_probe(struct ata_port *ap)
2216 unsigned int classes[ATA_MAX_DEVICES];
2217 int tries[ATA_MAX_DEVICES];
2218 int rc;
2219 struct ata_device *dev;
2221 ata_port_probe(ap);
2223 ata_link_for_each_dev(dev, &ap->link)
2224 tries[dev->devno] = ATA_PROBE_MAX_TRIES;
2226 retry:
2227 /* reset and determine device classes */
2228 ap->ops->phy_reset(ap);
2230 ata_link_for_each_dev(dev, &ap->link) {
2231 if (!(ap->flags & ATA_FLAG_DISABLED) &&
2232 dev->class != ATA_DEV_UNKNOWN)
2233 classes[dev->devno] = dev->class;
2234 else
2235 classes[dev->devno] = ATA_DEV_NONE;
2237 dev->class = ATA_DEV_UNKNOWN;
2240 ata_port_probe(ap);
2242 /* after the reset the device state is PIO 0 and the controller
2243 state is undefined. Record the mode */
2245 ata_link_for_each_dev(dev, &ap->link)
2246 dev->pio_mode = XFER_PIO_0;
2248 /* read IDENTIFY page and configure devices. We have to do the identify
2249 specific sequence bass-ackwards so that PDIAG- is released by
2250 the slave device */
2252 ata_link_for_each_dev(dev, &ap->link) {
2253 if (tries[dev->devno])
2254 dev->class = classes[dev->devno];
2256 if (!ata_dev_enabled(dev))
2257 continue;
2259 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2260 dev->id);
2261 if (rc)
2262 goto fail;
2265 /* Now ask for the cable type as PDIAG- should have been released */
2266 if (ap->ops->cable_detect)
2267 ap->cbl = ap->ops->cable_detect(ap);
2269 /* We may have SATA bridge glue hiding here irrespective of the
2270 reported cable types and sensed types */
2271 ata_link_for_each_dev(dev, &ap->link) {
2272 if (!ata_dev_enabled(dev))
2273 continue;
2274 /* SATA drives indicate we have a bridge. We don't know which
2275 end of the link the bridge is which is a problem */
2276 if (ata_id_is_sata(dev->id))
2277 ap->cbl = ATA_CBL_SATA;
2280 /* After the identify sequence we can now set up the devices. We do
2281 this in the normal order so that the user doesn't get confused */
2283 ata_link_for_each_dev(dev, &ap->link) {
2284 if (!ata_dev_enabled(dev))
2285 continue;
2287 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
2288 rc = ata_dev_configure(dev);
2289 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
2290 if (rc)
2291 goto fail;
2294 /* configure transfer mode */
2295 rc = ata_set_mode(&ap->link, &dev);
2296 if (rc)
2297 goto fail;
2299 ata_link_for_each_dev(dev, &ap->link)
2300 if (ata_dev_enabled(dev))
2301 return 0;
2303 /* no device present, disable port */
2304 ata_port_disable(ap);
2305 return -ENODEV;
2307 fail:
2308 tries[dev->devno]--;
2310 switch (rc) {
2311 case -EINVAL:
2312 /* eeek, something went very wrong, give up */
2313 tries[dev->devno] = 0;
2314 break;
2316 case -ENODEV:
2317 /* give it just one more chance */
2318 tries[dev->devno] = min(tries[dev->devno], 1);
2319 case -EIO:
2320 if (tries[dev->devno] == 1) {
2321 /* This is the last chance, better to slow
2322 * down than lose it.
2324 sata_down_spd_limit(&ap->link);
2325 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2329 if (!tries[dev->devno])
2330 ata_dev_disable(dev);
2332 goto retry;
2336 * ata_port_probe - Mark port as enabled
2337 * @ap: Port for which we indicate enablement
2339 * Modify @ap data structure such that the system
2340 * thinks that the entire port is enabled.
2342 * LOCKING: host lock, or some other form of
2343 * serialization.
2346 void ata_port_probe(struct ata_port *ap)
2348 ap->flags &= ~ATA_FLAG_DISABLED;
2352 * sata_print_link_status - Print SATA link status
2353 * @link: SATA link to printk link status about
2355 * This function prints link speed and status of a SATA link.
2357 * LOCKING:
2358 * None.
2360 void sata_print_link_status(struct ata_link *link)
2362 u32 sstatus, scontrol, tmp;
2364 if (sata_scr_read(link, SCR_STATUS, &sstatus))
2365 return;
2366 sata_scr_read(link, SCR_CONTROL, &scontrol);
2368 if (ata_link_online(link)) {
2369 tmp = (sstatus >> 4) & 0xf;
2370 ata_link_printk(link, KERN_INFO,
2371 "SATA link up %s (SStatus %X SControl %X)\n",
2372 sata_spd_string(tmp), sstatus, scontrol);
2373 } else {
2374 ata_link_printk(link, KERN_INFO,
2375 "SATA link down (SStatus %X SControl %X)\n",
2376 sstatus, scontrol);
2381 * __sata_phy_reset - Wake/reset a low-level SATA PHY
2382 * @ap: SATA port associated with target SATA PHY.
2384 * This function issues commands to standard SATA Sxxx
2385 * PHY registers, to wake up the phy (and device), and
2386 * clear any reset condition.
2388 * LOCKING:
2389 * PCI/etc. bus probe sem.
2392 void __sata_phy_reset(struct ata_port *ap)
2394 struct ata_link *link = &ap->link;
2395 unsigned long timeout = jiffies + (HZ * 5);
2396 u32 sstatus;
2398 if (ap->flags & ATA_FLAG_SATA_RESET) {
2399 /* issue phy wake/reset */
2400 sata_scr_write_flush(link, SCR_CONTROL, 0x301);
2401 /* Couldn't find anything in SATA I/II specs, but
2402 * AHCI-1.1 10.4.2 says at least 1 ms. */
2403 mdelay(1);
2405 /* phy wake/clear reset */
2406 sata_scr_write_flush(link, SCR_CONTROL, 0x300);
2408 /* wait for phy to become ready, if necessary */
2409 do {
2410 msleep(200);
2411 sata_scr_read(link, SCR_STATUS, &sstatus);
2412 if ((sstatus & 0xf) != 1)
2413 break;
2414 } while (time_before(jiffies, timeout));
2416 /* print link status */
2417 sata_print_link_status(link);
2419 /* TODO: phy layer with polling, timeouts, etc. */
2420 if (!ata_link_offline(link))
2421 ata_port_probe(ap);
2422 else
2423 ata_port_disable(ap);
2425 if (ap->flags & ATA_FLAG_DISABLED)
2426 return;
2428 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2429 ata_port_disable(ap);
2430 return;
2433 ap->cbl = ATA_CBL_SATA;
2437 * sata_phy_reset - Reset SATA bus.
2438 * @ap: SATA port associated with target SATA PHY.
2440 * This function resets the SATA bus, and then probes
2441 * the bus for devices.
2443 * LOCKING:
2444 * PCI/etc. bus probe sem.
2447 void sata_phy_reset(struct ata_port *ap)
2449 __sata_phy_reset(ap);
2450 if (ap->flags & ATA_FLAG_DISABLED)
2451 return;
2452 ata_bus_reset(ap);
2456 * ata_dev_pair - return other device on cable
2457 * @adev: device
2459 * Obtain the other device on the same cable, or if none is
2460 * present NULL is returned
2463 struct ata_device *ata_dev_pair(struct ata_device *adev)
2465 struct ata_link *link = adev->link;
2466 struct ata_device *pair = &link->device[1 - adev->devno];
2467 if (!ata_dev_enabled(pair))
2468 return NULL;
2469 return pair;
2473 * ata_port_disable - Disable port.
2474 * @ap: Port to be disabled.
2476 * Modify @ap data structure such that the system
2477 * thinks that the entire port is disabled, and should
2478 * never attempt to probe or communicate with devices
2479 * on this port.
2481 * LOCKING: host lock, or some other form of
2482 * serialization.
2485 void ata_port_disable(struct ata_port *ap)
2487 ap->link.device[0].class = ATA_DEV_NONE;
2488 ap->link.device[1].class = ATA_DEV_NONE;
2489 ap->flags |= ATA_FLAG_DISABLED;
2493 * sata_down_spd_limit - adjust SATA spd limit downward
2494 * @link: Link to adjust SATA spd limit for
2496 * Adjust SATA spd limit of @link downward. Note that this
2497 * function only adjusts the limit. The change must be applied
2498 * using sata_set_spd().
2500 * LOCKING:
2501 * Inherited from caller.
2503 * RETURNS:
2504 * 0 on success, negative errno on failure
2506 int sata_down_spd_limit(struct ata_link *link)
2508 u32 sstatus, spd, mask;
2509 int rc, highbit;
2511 if (!sata_scr_valid(link))
2512 return -EOPNOTSUPP;
2514 /* If SCR can be read, use it to determine the current SPD.
2515 * If not, use cached value in link->sata_spd.
2517 rc = sata_scr_read(link, SCR_STATUS, &sstatus);
2518 if (rc == 0)
2519 spd = (sstatus >> 4) & 0xf;
2520 else
2521 spd = link->sata_spd;
2523 mask = link->sata_spd_limit;
2524 if (mask <= 1)
2525 return -EINVAL;
2527 /* unconditionally mask off the highest bit */
2528 highbit = fls(mask) - 1;
2529 mask &= ~(1 << highbit);
2531 /* Mask off all speeds higher than or equal to the current
2532 * one. Force 1.5Gbps if current SPD is not available.
2534 if (spd > 1)
2535 mask &= (1 << (spd - 1)) - 1;
2536 else
2537 mask &= 1;
2539 /* were we already at the bottom? */
2540 if (!mask)
2541 return -EINVAL;
2543 link->sata_spd_limit = mask;
2545 ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n",
2546 sata_spd_string(fls(mask)));
2548 return 0;
2551 static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
2553 u32 spd, limit;
2555 if (link->sata_spd_limit == UINT_MAX)
2556 limit = 0;
2557 else
2558 limit = fls(link->sata_spd_limit);
2560 spd = (*scontrol >> 4) & 0xf;
2561 *scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4);
2563 return spd != limit;
2567 * sata_set_spd_needed - is SATA spd configuration needed
2568 * @link: Link in question
2570 * Test whether the spd limit in SControl matches
2571 * @link->sata_spd_limit. This function is used to determine
2572 * whether hardreset is necessary to apply SATA spd
2573 * configuration.
2575 * LOCKING:
2576 * Inherited from caller.
2578 * RETURNS:
2579 * 1 if SATA spd configuration is needed, 0 otherwise.
2581 int sata_set_spd_needed(struct ata_link *link)
2583 u32 scontrol;
2585 if (sata_scr_read(link, SCR_CONTROL, &scontrol))
2586 return 0;
2588 return __sata_set_spd_needed(link, &scontrol);
2592 * sata_set_spd - set SATA spd according to spd limit
2593 * @link: Link to set SATA spd for
2595 * Set SATA spd of @link according to sata_spd_limit.
2597 * LOCKING:
2598 * Inherited from caller.
2600 * RETURNS:
2601 * 0 if spd doesn't need to be changed, 1 if spd has been
2602 * changed. Negative errno if SCR registers are inaccessible.
2604 int sata_set_spd(struct ata_link *link)
2606 u32 scontrol;
2607 int rc;
2609 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
2610 return rc;
2612 if (!__sata_set_spd_needed(link, &scontrol))
2613 return 0;
2615 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
2616 return rc;
2618 return 1;
2622 * This mode timing computation functionality is ported over from
2623 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2626 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
2627 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
2628 * for UDMA6, which is currently supported only by Maxtor drives.
2630 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
2633 static const struct ata_timing ata_timing[] = {
2635 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
2636 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
2637 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
2638 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
2640 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
2641 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
2642 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
2643 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
2644 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
2646 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
2648 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
2649 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
2650 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
2652 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
2653 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
2654 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
2656 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
2657 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
2658 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
2659 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
2661 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
2662 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
2663 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
2665 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
2667 { 0xFF }
2670 #define ENOUGH(v,unit) (((v)-1)/(unit)+1)
2671 #define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
2673 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2675 q->setup = EZ(t->setup * 1000, T);
2676 q->act8b = EZ(t->act8b * 1000, T);
2677 q->rec8b = EZ(t->rec8b * 1000, T);
2678 q->cyc8b = EZ(t->cyc8b * 1000, T);
2679 q->active = EZ(t->active * 1000, T);
2680 q->recover = EZ(t->recover * 1000, T);
2681 q->cycle = EZ(t->cycle * 1000, T);
2682 q->udma = EZ(t->udma * 1000, UT);
2685 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2686 struct ata_timing *m, unsigned int what)
2688 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
2689 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
2690 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
2691 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
2692 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
2693 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2694 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
2695 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
2698 static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
2700 const struct ata_timing *t;
2702 for (t = ata_timing; t->mode != speed; t++)
2703 if (t->mode == 0xFF)
2704 return NULL;
2705 return t;
2708 int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2709 struct ata_timing *t, int T, int UT)
2711 const struct ata_timing *s;
2712 struct ata_timing p;
2715 * Find the mode.
2718 if (!(s = ata_timing_find_mode(speed)))
2719 return -EINVAL;
2721 memcpy(t, s, sizeof(*s));
2724 * If the drive is an EIDE drive, it can tell us it needs extended
2725 * PIO/MW_DMA cycle timing.
2728 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
2729 memset(&p, 0, sizeof(p));
2730 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
2731 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2732 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2733 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
2734 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2736 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2740 * Convert the timing to bus clock counts.
2743 ata_timing_quantize(t, t, T, UT);
2746 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2747 * S.M.A.R.T * and some other commands. We have to ensure that the
2748 * DMA cycle timing is slower/equal than the fastest PIO timing.
2751 if (speed > XFER_PIO_6) {
2752 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2753 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2757 * Lengthen active & recovery time so that cycle time is correct.
2760 if (t->act8b + t->rec8b < t->cyc8b) {
2761 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2762 t->rec8b = t->cyc8b - t->act8b;
2765 if (t->active + t->recover < t->cycle) {
2766 t->active += (t->cycle - (t->active + t->recover)) / 2;
2767 t->recover = t->cycle - t->active;
2770 /* In a few cases quantisation may produce enough errors to
2771 leave t->cycle too low for the sum of active and recovery
2772 if so we must correct this */
2773 if (t->active + t->recover > t->cycle)
2774 t->cycle = t->active + t->recover;
2776 return 0;
2780 * ata_down_xfermask_limit - adjust dev xfer masks downward
2781 * @dev: Device to adjust xfer masks
2782 * @sel: ATA_DNXFER_* selector
2784 * Adjust xfer masks of @dev downward. Note that this function
2785 * does not apply the change. Invoking ata_set_mode() afterwards
2786 * will apply the limit.
2788 * LOCKING:
2789 * Inherited from caller.
2791 * RETURNS:
2792 * 0 on success, negative errno on failure
2794 int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
2796 char buf[32];
2797 unsigned int orig_mask, xfer_mask;
2798 unsigned int pio_mask, mwdma_mask, udma_mask;
2799 int quiet, highbit;
2801 quiet = !!(sel & ATA_DNXFER_QUIET);
2802 sel &= ~ATA_DNXFER_QUIET;
2804 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
2805 dev->mwdma_mask,
2806 dev->udma_mask);
2807 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
2809 switch (sel) {
2810 case ATA_DNXFER_PIO:
2811 highbit = fls(pio_mask) - 1;
2812 pio_mask &= ~(1 << highbit);
2813 break;
2815 case ATA_DNXFER_DMA:
2816 if (udma_mask) {
2817 highbit = fls(udma_mask) - 1;
2818 udma_mask &= ~(1 << highbit);
2819 if (!udma_mask)
2820 return -ENOENT;
2821 } else if (mwdma_mask) {
2822 highbit = fls(mwdma_mask) - 1;
2823 mwdma_mask &= ~(1 << highbit);
2824 if (!mwdma_mask)
2825 return -ENOENT;
2827 break;
2829 case ATA_DNXFER_40C:
2830 udma_mask &= ATA_UDMA_MASK_40C;
2831 break;
2833 case ATA_DNXFER_FORCE_PIO0:
2834 pio_mask &= 1;
2835 case ATA_DNXFER_FORCE_PIO:
2836 mwdma_mask = 0;
2837 udma_mask = 0;
2838 break;
2840 default:
2841 BUG();
2844 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
2846 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
2847 return -ENOENT;
2849 if (!quiet) {
2850 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
2851 snprintf(buf, sizeof(buf), "%s:%s",
2852 ata_mode_string(xfer_mask),
2853 ata_mode_string(xfer_mask & ATA_MASK_PIO));
2854 else
2855 snprintf(buf, sizeof(buf), "%s",
2856 ata_mode_string(xfer_mask));
2858 ata_dev_printk(dev, KERN_WARNING,
2859 "limiting speed to %s\n", buf);
2862 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2863 &dev->udma_mask);
2865 return 0;
2868 static int ata_dev_set_mode(struct ata_device *dev)
2870 struct ata_eh_context *ehc = &dev->link->eh_context;
2871 unsigned int err_mask;
2872 int rc;
2874 dev->flags &= ~ATA_DFLAG_PIO;
2875 if (dev->xfer_shift == ATA_SHIFT_PIO)
2876 dev->flags |= ATA_DFLAG_PIO;
2878 err_mask = ata_dev_set_xfermode(dev);
2879 /* Old CFA may refuse this command, which is just fine */
2880 if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id))
2881 err_mask &= ~AC_ERR_DEV;
2882 /* Some very old devices and some bad newer ones fail any kind of
2883 SET_XFERMODE request but support PIO0-2 timings and no IORDY */
2884 if (dev->xfer_shift == ATA_SHIFT_PIO && !ata_id_has_iordy(dev->id) &&
2885 dev->pio_mode <= XFER_PIO_2)
2886 err_mask &= ~AC_ERR_DEV;
2887 if (err_mask) {
2888 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
2889 "(err_mask=0x%x)\n", err_mask);
2890 return -EIO;
2893 ehc->i.flags |= ATA_EHI_POST_SETMODE;
2894 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
2895 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
2896 if (rc)
2897 return rc;
2899 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
2900 dev->xfer_shift, (int)dev->xfer_mode);
2902 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
2903 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
2904 return 0;
2908 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
2909 * @link: link on which timings will be programmed
2910 * @r_failed_dev: out paramter for failed device
2912 * Standard implementation of the function used to tune and set
2913 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2914 * ata_dev_set_mode() fails, pointer to the failing device is
2915 * returned in @r_failed_dev.
2917 * LOCKING:
2918 * PCI/etc. bus probe sem.
2920 * RETURNS:
2921 * 0 on success, negative errno otherwise
2924 int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
2926 struct ata_port *ap = link->ap;
2927 struct ata_device *dev;
2928 int rc = 0, used_dma = 0, found = 0;
2930 /* step 1: calculate xfer_mask */
2931 ata_link_for_each_dev(dev, link) {
2932 unsigned int pio_mask, dma_mask;
2933 unsigned int mode_mask;
2935 if (!ata_dev_enabled(dev))
2936 continue;
2938 mode_mask = ATA_DMA_MASK_ATA;
2939 if (dev->class == ATA_DEV_ATAPI)
2940 mode_mask = ATA_DMA_MASK_ATAPI;
2941 else if (ata_id_is_cfa(dev->id))
2942 mode_mask = ATA_DMA_MASK_CFA;
2944 ata_dev_xfermask(dev);
2946 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
2947 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
2949 if (libata_dma_mask & mode_mask)
2950 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
2951 else
2952 dma_mask = 0;
2954 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
2955 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
2957 found = 1;
2958 if (dev->dma_mode)
2959 used_dma = 1;
2961 if (!found)
2962 goto out;
2964 /* step 2: always set host PIO timings */
2965 ata_link_for_each_dev(dev, link) {
2966 if (!ata_dev_enabled(dev))
2967 continue;
2969 if (!dev->pio_mode) {
2970 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
2971 rc = -EINVAL;
2972 goto out;
2975 dev->xfer_mode = dev->pio_mode;
2976 dev->xfer_shift = ATA_SHIFT_PIO;
2977 if (ap->ops->set_piomode)
2978 ap->ops->set_piomode(ap, dev);
2981 /* step 3: set host DMA timings */
2982 ata_link_for_each_dev(dev, link) {
2983 if (!ata_dev_enabled(dev) || !dev->dma_mode)
2984 continue;
2986 dev->xfer_mode = dev->dma_mode;
2987 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
2988 if (ap->ops->set_dmamode)
2989 ap->ops->set_dmamode(ap, dev);
2992 /* step 4: update devices' xfer mode */
2993 ata_link_for_each_dev(dev, link) {
2994 /* don't update suspended devices' xfer mode */
2995 if (!ata_dev_enabled(dev))
2996 continue;
2998 rc = ata_dev_set_mode(dev);
2999 if (rc)
3000 goto out;
3003 /* Record simplex status. If we selected DMA then the other
3004 * host channels are not permitted to do so.
3006 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
3007 ap->host->simplex_claimed = ap;
3009 out:
3010 if (rc)
3011 *r_failed_dev = dev;
3012 return rc;
3016 * ata_set_mode - Program timings and issue SET FEATURES - XFER
3017 * @link: link on which timings will be programmed
3018 * @r_failed_dev: out paramter for failed device
3020 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
3021 * ata_set_mode() fails, pointer to the failing device is
3022 * returned in @r_failed_dev.
3024 * LOCKING:
3025 * PCI/etc. bus probe sem.
3027 * RETURNS:
3028 * 0 on success, negative errno otherwise
3030 int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3032 struct ata_port *ap = link->ap;
3034 /* has private set_mode? */
3035 if (ap->ops->set_mode)
3036 return ap->ops->set_mode(link, r_failed_dev);
3037 return ata_do_set_mode(link, r_failed_dev);
3041 * ata_tf_to_host - issue ATA taskfile to host controller
3042 * @ap: port to which command is being issued
3043 * @tf: ATA taskfile register set
3045 * Issues ATA taskfile register set to ATA host controller,
3046 * with proper synchronization with interrupt handler and
3047 * other threads.
3049 * LOCKING:
3050 * spin_lock_irqsave(host lock)
3053 static inline void ata_tf_to_host(struct ata_port *ap,
3054 const struct ata_taskfile *tf)
3056 ap->ops->tf_load(ap, tf);
3057 ap->ops->exec_command(ap, tf);
3061 * ata_busy_sleep - sleep until BSY clears, or timeout
3062 * @ap: port containing status register to be polled
3063 * @tmout_pat: impatience timeout
3064 * @tmout: overall timeout
3066 * Sleep until ATA Status register bit BSY clears,
3067 * or a timeout occurs.
3069 * LOCKING:
3070 * Kernel thread context (may sleep).
3072 * RETURNS:
3073 * 0 on success, -errno otherwise.
3075 int ata_busy_sleep(struct ata_port *ap,
3076 unsigned long tmout_pat, unsigned long tmout)
3078 unsigned long timer_start, timeout;
3079 u8 status;
3081 status = ata_busy_wait(ap, ATA_BUSY, 300);
3082 timer_start = jiffies;
3083 timeout = timer_start + tmout_pat;
3084 while (status != 0xff && (status & ATA_BUSY) &&
3085 time_before(jiffies, timeout)) {
3086 msleep(50);
3087 status = ata_busy_wait(ap, ATA_BUSY, 3);
3090 if (status != 0xff && (status & ATA_BUSY))
3091 ata_port_printk(ap, KERN_WARNING,
3092 "port is slow to respond, please be patient "
3093 "(Status 0x%x)\n", status);
3095 timeout = timer_start + tmout;
3096 while (status != 0xff && (status & ATA_BUSY) &&
3097 time_before(jiffies, timeout)) {
3098 msleep(50);
3099 status = ata_chk_status(ap);
3102 if (status == 0xff)
3103 return -ENODEV;
3105 if (status & ATA_BUSY) {
3106 ata_port_printk(ap, KERN_ERR, "port failed to respond "
3107 "(%lu secs, Status 0x%x)\n",
3108 tmout / HZ, status);
3109 return -EBUSY;
3112 return 0;
3116 * ata_wait_ready - sleep until BSY clears, or timeout
3117 * @ap: port containing status register to be polled
3118 * @deadline: deadline jiffies for the operation
3120 * Sleep until ATA Status register bit BSY clears, or timeout
3121 * occurs.
3123 * LOCKING:
3124 * Kernel thread context (may sleep).
3126 * RETURNS:
3127 * 0 on success, -errno otherwise.
3129 int ata_wait_ready(struct ata_port *ap, unsigned long deadline)
3131 unsigned long start = jiffies;
3132 int warned = 0;
3134 while (1) {
3135 u8 status = ata_chk_status(ap);
3136 unsigned long now = jiffies;
3138 if (!(status & ATA_BUSY))
3139 return 0;
3140 if (!ata_link_online(&ap->link) && status == 0xff)
3141 return -ENODEV;
3142 if (time_after(now, deadline))
3143 return -EBUSY;
3145 if (!warned && time_after(now, start + 5 * HZ) &&
3146 (deadline - now > 3 * HZ)) {
3147 ata_port_printk(ap, KERN_WARNING,
3148 "port is slow to respond, please be patient "
3149 "(Status 0x%x)\n", status);
3150 warned = 1;
3153 msleep(50);
3157 static int ata_bus_post_reset(struct ata_port *ap, unsigned int devmask,
3158 unsigned long deadline)
3160 struct ata_ioports *ioaddr = &ap->ioaddr;
3161 unsigned int dev0 = devmask & (1 << 0);
3162 unsigned int dev1 = devmask & (1 << 1);
3163 int rc, ret = 0;
3165 /* if device 0 was found in ata_devchk, wait for its
3166 * BSY bit to clear
3168 if (dev0) {
3169 rc = ata_wait_ready(ap, deadline);
3170 if (rc) {
3171 if (rc != -ENODEV)
3172 return rc;
3173 ret = rc;
3177 /* if device 1 was found in ata_devchk, wait for register
3178 * access briefly, then wait for BSY to clear.
3180 if (dev1) {
3181 int i;
3183 ap->ops->dev_select(ap, 1);
3185 /* Wait for register access. Some ATAPI devices fail
3186 * to set nsect/lbal after reset, so don't waste too
3187 * much time on it. We're gonna wait for !BSY anyway.
3189 for (i = 0; i < 2; i++) {
3190 u8 nsect, lbal;
3192 nsect = ioread8(ioaddr->nsect_addr);
3193 lbal = ioread8(ioaddr->lbal_addr);
3194 if ((nsect == 1) && (lbal == 1))
3195 break;
3196 msleep(50); /* give drive a breather */
3199 rc = ata_wait_ready(ap, deadline);
3200 if (rc) {
3201 if (rc != -ENODEV)
3202 return rc;
3203 ret = rc;
3207 /* is all this really necessary? */
3208 ap->ops->dev_select(ap, 0);
3209 if (dev1)
3210 ap->ops->dev_select(ap, 1);
3211 if (dev0)
3212 ap->ops->dev_select(ap, 0);
3214 return ret;
3217 static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
3218 unsigned long deadline)
3220 struct ata_ioports *ioaddr = &ap->ioaddr;
3221 struct ata_device *dev;
3222 int i = 0;
3224 DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
3226 /* software reset. causes dev0 to be selected */
3227 iowrite8(ap->ctl, ioaddr->ctl_addr);
3228 udelay(20); /* FIXME: flush */
3229 iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
3230 udelay(20); /* FIXME: flush */
3231 iowrite8(ap->ctl, ioaddr->ctl_addr);
3233 /* If we issued an SRST then an ATA drive (not ATAPI)
3234 * may have changed configuration and be in PIO0 timing. If
3235 * we did a hard reset (or are coming from power on) this is
3236 * true for ATA or ATAPI. Until we've set a suitable controller
3237 * mode we should not touch the bus as we may be talking too fast.
3240 ata_link_for_each_dev(dev, &ap->link)
3241 dev->pio_mode = XFER_PIO_0;
3243 /* If the controller has a pio mode setup function then use
3244 it to set the chipset to rights. Don't touch the DMA setup
3245 as that will be dealt with when revalidating */
3246 if (ap->ops->set_piomode) {
3247 ata_link_for_each_dev(dev, &ap->link)
3248 if (devmask & (1 << i++))
3249 ap->ops->set_piomode(ap, dev);
3252 /* spec mandates ">= 2ms" before checking status.
3253 * We wait 150ms, because that was the magic delay used for
3254 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
3255 * between when the ATA command register is written, and then
3256 * status is checked. Because waiting for "a while" before
3257 * checking status is fine, post SRST, we perform this magic
3258 * delay here as well.
3260 * Old drivers/ide uses the 2mS rule and then waits for ready
3262 msleep(150);
3264 /* Before we perform post reset processing we want to see if
3265 * the bus shows 0xFF because the odd clown forgets the D7
3266 * pulldown resistor.
3268 if (ata_check_status(ap) == 0xFF)
3269 return -ENODEV;
3271 return ata_bus_post_reset(ap, devmask, deadline);
3275 * ata_bus_reset - reset host port and associated ATA channel
3276 * @ap: port to reset
3278 * This is typically the first time we actually start issuing
3279 * commands to the ATA channel. We wait for BSY to clear, then
3280 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
3281 * result. Determine what devices, if any, are on the channel
3282 * by looking at the device 0/1 error register. Look at the signature
3283 * stored in each device's taskfile registers, to determine if
3284 * the device is ATA or ATAPI.
3286 * LOCKING:
3287 * PCI/etc. bus probe sem.
3288 * Obtains host lock.
3290 * SIDE EFFECTS:
3291 * Sets ATA_FLAG_DISABLED if bus reset fails.
3294 void ata_bus_reset(struct ata_port *ap)
3296 struct ata_device *device = ap->link.device;
3297 struct ata_ioports *ioaddr = &ap->ioaddr;
3298 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3299 u8 err;
3300 unsigned int dev0, dev1 = 0, devmask = 0;
3301 int rc;
3303 DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no);
3305 /* determine if device 0/1 are present */
3306 if (ap->flags & ATA_FLAG_SATA_RESET)
3307 dev0 = 1;
3308 else {
3309 dev0 = ata_devchk(ap, 0);
3310 if (slave_possible)
3311 dev1 = ata_devchk(ap, 1);
3314 if (dev0)
3315 devmask |= (1 << 0);
3316 if (dev1)
3317 devmask |= (1 << 1);
3319 /* select device 0 again */
3320 ap->ops->dev_select(ap, 0);
3322 /* issue bus reset */
3323 if (ap->flags & ATA_FLAG_SRST) {
3324 rc = ata_bus_softreset(ap, devmask, jiffies + 40 * HZ);
3325 if (rc && rc != -ENODEV)
3326 goto err_out;
3330 * determine by signature whether we have ATA or ATAPI devices
3332 device[0].class = ata_dev_try_classify(&device[0], dev0, &err);
3333 if ((slave_possible) && (err != 0x81))
3334 device[1].class = ata_dev_try_classify(&device[1], dev1, &err);
3336 /* is double-select really necessary? */
3337 if (device[1].class != ATA_DEV_NONE)
3338 ap->ops->dev_select(ap, 1);
3339 if (device[0].class != ATA_DEV_NONE)
3340 ap->ops->dev_select(ap, 0);
3342 /* if no devices were detected, disable this port */
3343 if ((device[0].class == ATA_DEV_NONE) &&
3344 (device[1].class == ATA_DEV_NONE))
3345 goto err_out;
3347 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
3348 /* set up device control for ATA_FLAG_SATA_RESET */
3349 iowrite8(ap->ctl, ioaddr->ctl_addr);
3352 DPRINTK("EXIT\n");
3353 return;
3355 err_out:
3356 ata_port_printk(ap, KERN_ERR, "disabling port\n");
3357 ata_port_disable(ap);
3359 DPRINTK("EXIT\n");
3363 * sata_link_debounce - debounce SATA phy status
3364 * @link: ATA link to debounce SATA phy status for
3365 * @params: timing parameters { interval, duratinon, timeout } in msec
3366 * @deadline: deadline jiffies for the operation
3368 * Make sure SStatus of @link reaches stable state, determined by
3369 * holding the same value where DET is not 1 for @duration polled
3370 * every @interval, before @timeout. Timeout constraints the
3371 * beginning of the stable state. Because DET gets stuck at 1 on
3372 * some controllers after hot unplugging, this functions waits
3373 * until timeout then returns 0 if DET is stable at 1.
3375 * @timeout is further limited by @deadline. The sooner of the
3376 * two is used.
3378 * LOCKING:
3379 * Kernel thread context (may sleep)
3381 * RETURNS:
3382 * 0 on success, -errno on failure.
3384 int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3385 unsigned long deadline)
3387 unsigned long interval_msec = params[0];
3388 unsigned long duration = msecs_to_jiffies(params[1]);
3389 unsigned long last_jiffies, t;
3390 u32 last, cur;
3391 int rc;
3393 t = jiffies + msecs_to_jiffies(params[2]);
3394 if (time_before(t, deadline))
3395 deadline = t;
3397 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3398 return rc;
3399 cur &= 0xf;
3401 last = cur;
3402 last_jiffies = jiffies;
3404 while (1) {
3405 msleep(interval_msec);
3406 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3407 return rc;
3408 cur &= 0xf;
3410 /* DET stable? */
3411 if (cur == last) {
3412 if (cur == 1 && time_before(jiffies, deadline))
3413 continue;
3414 if (time_after(jiffies, last_jiffies + duration))
3415 return 0;
3416 continue;
3419 /* unstable, start over */
3420 last = cur;
3421 last_jiffies = jiffies;
3423 /* Check deadline. If debouncing failed, return
3424 * -EPIPE to tell upper layer to lower link speed.
3426 if (time_after(jiffies, deadline))
3427 return -EPIPE;
3432 * sata_link_resume - resume SATA link
3433 * @link: ATA link to resume SATA
3434 * @params: timing parameters { interval, duratinon, timeout } in msec
3435 * @deadline: deadline jiffies for the operation
3437 * Resume SATA phy @link and debounce it.
3439 * LOCKING:
3440 * Kernel thread context (may sleep)
3442 * RETURNS:
3443 * 0 on success, -errno on failure.
3445 int sata_link_resume(struct ata_link *link, const unsigned long *params,
3446 unsigned long deadline)
3448 u32 scontrol;
3449 int rc;
3451 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3452 return rc;
3454 scontrol = (scontrol & 0x0f0) | 0x300;
3456 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3457 return rc;
3459 /* Some PHYs react badly if SStatus is pounded immediately
3460 * after resuming. Delay 200ms before debouncing.
3462 msleep(200);
3464 return sata_link_debounce(link, params, deadline);
3468 * ata_std_prereset - prepare for reset
3469 * @link: ATA link to be reset
3470 * @deadline: deadline jiffies for the operation
3472 * @link is about to be reset. Initialize it. Failure from
3473 * prereset makes libata abort whole reset sequence and give up
3474 * that port, so prereset should be best-effort. It does its
3475 * best to prepare for reset sequence but if things go wrong, it
3476 * should just whine, not fail.
3478 * LOCKING:
3479 * Kernel thread context (may sleep)
3481 * RETURNS:
3482 * 0 on success, -errno otherwise.
3484 int ata_std_prereset(struct ata_link *link, unsigned long deadline)
3486 struct ata_port *ap = link->ap;
3487 struct ata_eh_context *ehc = &link->eh_context;
3488 const unsigned long *timing = sata_ehc_deb_timing(ehc);
3489 int rc;
3491 /* handle link resume */
3492 if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
3493 (link->flags & ATA_LFLAG_HRST_TO_RESUME))
3494 ehc->i.action |= ATA_EH_HARDRESET;
3496 /* Some PMPs don't work with only SRST, force hardreset if PMP
3497 * is supported.
3499 if (ap->flags & ATA_FLAG_PMP)
3500 ehc->i.action |= ATA_EH_HARDRESET;
3502 /* if we're about to do hardreset, nothing more to do */
3503 if (ehc->i.action & ATA_EH_HARDRESET)
3504 return 0;
3506 /* if SATA, resume link */
3507 if (ap->flags & ATA_FLAG_SATA) {
3508 rc = sata_link_resume(link, timing, deadline);
3509 /* whine about phy resume failure but proceed */
3510 if (rc && rc != -EOPNOTSUPP)
3511 ata_link_printk(link, KERN_WARNING, "failed to resume "
3512 "link for reset (errno=%d)\n", rc);
3515 /* Wait for !BSY if the controller can wait for the first D2H
3516 * Reg FIS and we don't know that no device is attached.
3518 if (!(link->flags & ATA_LFLAG_SKIP_D2H_BSY) && !ata_link_offline(link)) {
3519 rc = ata_wait_ready(ap, deadline);
3520 if (rc && rc != -ENODEV) {
3521 ata_link_printk(link, KERN_WARNING, "device not ready "
3522 "(errno=%d), forcing hardreset\n", rc);
3523 ehc->i.action |= ATA_EH_HARDRESET;
3527 return 0;
3531 * ata_std_softreset - reset host port via ATA SRST
3532 * @link: ATA link to reset
3533 * @classes: resulting classes of attached devices
3534 * @deadline: deadline jiffies for the operation
3536 * Reset host port using ATA SRST.
3538 * LOCKING:
3539 * Kernel thread context (may sleep)
3541 * RETURNS:
3542 * 0 on success, -errno otherwise.
3544 int ata_std_softreset(struct ata_link *link, unsigned int *classes,
3545 unsigned long deadline)
3547 struct ata_port *ap = link->ap;
3548 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3549 unsigned int devmask = 0;
3550 int rc;
3551 u8 err;
3553 DPRINTK("ENTER\n");
3555 if (ata_link_offline(link)) {
3556 classes[0] = ATA_DEV_NONE;
3557 goto out;
3560 /* determine if device 0/1 are present */
3561 if (ata_devchk(ap, 0))
3562 devmask |= (1 << 0);
3563 if (slave_possible && ata_devchk(ap, 1))
3564 devmask |= (1 << 1);
3566 /* select device 0 again */
3567 ap->ops->dev_select(ap, 0);
3569 /* issue bus reset */
3570 DPRINTK("about to softreset, devmask=%x\n", devmask);
3571 rc = ata_bus_softreset(ap, devmask, deadline);
3572 /* if link is occupied, -ENODEV too is an error */
3573 if (rc && (rc != -ENODEV || sata_scr_valid(link))) {
3574 ata_link_printk(link, KERN_ERR, "SRST failed (errno=%d)\n", rc);
3575 return rc;
3578 /* determine by signature whether we have ATA or ATAPI devices */
3579 classes[0] = ata_dev_try_classify(&link->device[0],
3580 devmask & (1 << 0), &err);
3581 if (slave_possible && err != 0x81)
3582 classes[1] = ata_dev_try_classify(&link->device[1],
3583 devmask & (1 << 1), &err);
3585 out:
3586 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
3587 return 0;
3591 * sata_link_hardreset - reset link via SATA phy reset
3592 * @link: link to reset
3593 * @timing: timing parameters { interval, duratinon, timeout } in msec
3594 * @deadline: deadline jiffies for the operation
3596 * SATA phy-reset @link using DET bits of SControl register.
3598 * LOCKING:
3599 * Kernel thread context (may sleep)
3601 * RETURNS:
3602 * 0 on success, -errno otherwise.
3604 int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
3605 unsigned long deadline)
3607 u32 scontrol;
3608 int rc;
3610 DPRINTK("ENTER\n");
3612 if (sata_set_spd_needed(link)) {
3613 /* SATA spec says nothing about how to reconfigure
3614 * spd. To be on the safe side, turn off phy during
3615 * reconfiguration. This works for at least ICH7 AHCI
3616 * and Sil3124.
3618 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3619 goto out;
3621 scontrol = (scontrol & 0x0f0) | 0x304;
3623 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3624 goto out;
3626 sata_set_spd(link);
3629 /* issue phy wake/reset */
3630 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3631 goto out;
3633 scontrol = (scontrol & 0x0f0) | 0x301;
3635 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
3636 goto out;
3638 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
3639 * 10.4.2 says at least 1 ms.
3641 msleep(1);
3643 /* bring link back */
3644 rc = sata_link_resume(link, timing, deadline);
3645 out:
3646 DPRINTK("EXIT, rc=%d\n", rc);
3647 return rc;
3651 * sata_std_hardreset - reset host port via SATA phy reset
3652 * @link: link to reset
3653 * @class: resulting class of attached device
3654 * @deadline: deadline jiffies for the operation
3656 * SATA phy-reset host port using DET bits of SControl register,
3657 * wait for !BSY and classify the attached device.
3659 * LOCKING:
3660 * Kernel thread context (may sleep)
3662 * RETURNS:
3663 * 0 on success, -errno otherwise.
3665 int sata_std_hardreset(struct ata_link *link, unsigned int *class,
3666 unsigned long deadline)
3668 struct ata_port *ap = link->ap;
3669 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
3670 int rc;
3672 DPRINTK("ENTER\n");
3674 /* do hardreset */
3675 rc = sata_link_hardreset(link, timing, deadline);
3676 if (rc) {
3677 ata_link_printk(link, KERN_ERR,
3678 "COMRESET failed (errno=%d)\n", rc);
3679 return rc;
3682 /* TODO: phy layer with polling, timeouts, etc. */
3683 if (ata_link_offline(link)) {
3684 *class = ATA_DEV_NONE;
3685 DPRINTK("EXIT, link offline\n");
3686 return 0;
3689 /* wait a while before checking status, see SRST for more info */
3690 msleep(150);
3692 /* If PMP is supported, we have to do follow-up SRST. Note
3693 * that some PMPs don't send D2H Reg FIS after hardreset at
3694 * all if the first port is empty. Wait for it just for a
3695 * second and request follow-up SRST.
3697 if (ap->flags & ATA_FLAG_PMP) {
3698 ata_wait_ready(ap, jiffies + HZ);
3699 return -EAGAIN;
3702 rc = ata_wait_ready(ap, deadline);
3703 /* link occupied, -ENODEV too is an error */
3704 if (rc) {
3705 ata_link_printk(link, KERN_ERR,
3706 "COMRESET failed (errno=%d)\n", rc);
3707 return rc;
3710 ap->ops->dev_select(ap, 0); /* probably unnecessary */
3712 *class = ata_dev_try_classify(link->device, 1, NULL);
3714 DPRINTK("EXIT, class=%u\n", *class);
3715 return 0;
3719 * ata_std_postreset - standard postreset callback
3720 * @link: the target ata_link
3721 * @classes: classes of attached devices
3723 * This function is invoked after a successful reset. Note that
3724 * the device might have been reset more than once using
3725 * different reset methods before postreset is invoked.
3727 * LOCKING:
3728 * Kernel thread context (may sleep)
3730 void ata_std_postreset(struct ata_link *link, unsigned int *classes)
3732 struct ata_port *ap = link->ap;
3733 u32 serror;
3735 DPRINTK("ENTER\n");
3737 /* print link status */
3738 sata_print_link_status(link);
3740 /* clear SError */
3741 if (sata_scr_read(link, SCR_ERROR, &serror) == 0)
3742 sata_scr_write(link, SCR_ERROR, serror);
3744 /* is double-select really necessary? */
3745 if (classes[0] != ATA_DEV_NONE)
3746 ap->ops->dev_select(ap, 1);
3747 if (classes[1] != ATA_DEV_NONE)
3748 ap->ops->dev_select(ap, 0);
3750 /* bail out if no device is present */
3751 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
3752 DPRINTK("EXIT, no device\n");
3753 return;
3756 /* set up device control */
3757 if (ap->ioaddr.ctl_addr)
3758 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
3760 DPRINTK("EXIT\n");
3764 * ata_dev_same_device - Determine whether new ID matches configured device
3765 * @dev: device to compare against
3766 * @new_class: class of the new device
3767 * @new_id: IDENTIFY page of the new device
3769 * Compare @new_class and @new_id against @dev and determine
3770 * whether @dev is the device indicated by @new_class and
3771 * @new_id.
3773 * LOCKING:
3774 * None.
3776 * RETURNS:
3777 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
3779 static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3780 const u16 *new_id)
3782 const u16 *old_id = dev->id;
3783 unsigned char model[2][ATA_ID_PROD_LEN + 1];
3784 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
3786 if (dev->class != new_class) {
3787 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
3788 dev->class, new_class);
3789 return 0;
3792 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3793 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3794 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3795 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
3797 if (strcmp(model[0], model[1])) {
3798 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
3799 "'%s' != '%s'\n", model[0], model[1]);
3800 return 0;
3803 if (strcmp(serial[0], serial[1])) {
3804 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
3805 "'%s' != '%s'\n", serial[0], serial[1]);
3806 return 0;
3809 return 1;
3813 * ata_dev_reread_id - Re-read IDENTIFY data
3814 * @dev: target ATA device
3815 * @readid_flags: read ID flags
3817 * Re-read IDENTIFY page and make sure @dev is still attached to
3818 * the port.
3820 * LOCKING:
3821 * Kernel thread context (may sleep)
3823 * RETURNS:
3824 * 0 on success, negative errno otherwise
3826 int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
3828 unsigned int class = dev->class;
3829 u16 *id = (void *)dev->link->ap->sector_buf;
3830 int rc;
3832 /* read ID data */
3833 rc = ata_dev_read_id(dev, &class, readid_flags, id);
3834 if (rc)
3835 return rc;
3837 /* is the device still there? */
3838 if (!ata_dev_same_device(dev, class, id))
3839 return -ENODEV;
3841 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
3842 return 0;
3846 * ata_dev_revalidate - Revalidate ATA device
3847 * @dev: device to revalidate
3848 * @new_class: new class code
3849 * @readid_flags: read ID flags
3851 * Re-read IDENTIFY page, make sure @dev is still attached to the
3852 * port and reconfigure it according to the new IDENTIFY page.
3854 * LOCKING:
3855 * Kernel thread context (may sleep)
3857 * RETURNS:
3858 * 0 on success, negative errno otherwise
3860 int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
3861 unsigned int readid_flags)
3863 u64 n_sectors = dev->n_sectors;
3864 int rc;
3866 if (!ata_dev_enabled(dev))
3867 return -ENODEV;
3869 /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
3870 if (ata_class_enabled(new_class) &&
3871 new_class != ATA_DEV_ATA && new_class != ATA_DEV_ATAPI) {
3872 ata_dev_printk(dev, KERN_INFO, "class mismatch %u != %u\n",
3873 dev->class, new_class);
3874 rc = -ENODEV;
3875 goto fail;
3878 /* re-read ID */
3879 rc = ata_dev_reread_id(dev, readid_flags);
3880 if (rc)
3881 goto fail;
3883 /* configure device according to the new ID */
3884 rc = ata_dev_configure(dev);
3885 if (rc)
3886 goto fail;
3888 /* verify n_sectors hasn't changed */
3889 if (dev->class == ATA_DEV_ATA && n_sectors &&
3890 dev->n_sectors != n_sectors) {
3891 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
3892 "%llu != %llu\n",
3893 (unsigned long long)n_sectors,
3894 (unsigned long long)dev->n_sectors);
3896 /* restore original n_sectors */
3897 dev->n_sectors = n_sectors;
3899 rc = -ENODEV;
3900 goto fail;
3903 return 0;
3905 fail:
3906 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
3907 return rc;
3910 struct ata_blacklist_entry {
3911 const char *model_num;
3912 const char *model_rev;
3913 unsigned long horkage;
3916 static const struct ata_blacklist_entry ata_device_blacklist [] = {
3917 /* Devices with DMA related problems under Linux */
3918 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
3919 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
3920 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
3921 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
3922 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
3923 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
3924 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
3925 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
3926 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
3927 { "CRD-8480B", NULL, ATA_HORKAGE_NODMA },
3928 { "CRD-8482B", NULL, ATA_HORKAGE_NODMA },
3929 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
3930 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
3931 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
3932 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
3933 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
3934 { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA },
3935 { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA },
3936 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
3937 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
3938 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
3939 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
3940 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
3941 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
3942 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
3943 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
3944 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
3945 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
3946 { "SAMSUNG CD-ROM SN-124","N001", ATA_HORKAGE_NODMA },
3947 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
3948 { "IOMEGA ZIP 250 ATAPI", NULL, ATA_HORKAGE_NODMA }, /* temporary fix */
3949 { "IOMEGA ZIP 250 ATAPI Floppy",
3950 NULL, ATA_HORKAGE_NODMA },
3951 /* Odd clown on sil3726/4726 PMPs */
3952 { "Config Disk", NULL, ATA_HORKAGE_NODMA |
3953 ATA_HORKAGE_SKIP_PM },
3955 /* Weird ATAPI devices */
3956 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
3958 /* Devices we expect to fail diagnostics */
3960 /* Devices where NCQ should be avoided */
3961 /* NCQ is slow */
3962 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
3963 /* http://thread.gmane.org/gmane.linux.ide/14907 */
3964 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
3965 /* NCQ is broken */
3966 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ },
3967 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
3968 { "HITACHI HDS7250SASUN500G*", NULL, ATA_HORKAGE_NONCQ },
3969 { "HITACHI HDS7225SBSUN250G*", NULL, ATA_HORKAGE_NONCQ },
3970 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ },
3972 /* Blacklist entries taken from Silicon Image 3124/3132
3973 Windows driver .inf file - also several Linux problem reports */
3974 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
3975 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
3976 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
3977 /* Drives which do spurious command completion */
3978 { "HTS541680J9SA00", "SB2IC7EP", ATA_HORKAGE_NONCQ, },
3979 { "HTS541612J9SA00", "SBDIC7JP", ATA_HORKAGE_NONCQ, },
3980 { "HDT722516DLA380", "V43OA96A", ATA_HORKAGE_NONCQ, },
3981 { "Hitachi HTS541616J9SA00", "SB4OC70P", ATA_HORKAGE_NONCQ, },
3982 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
3983 { "WDC WD3200AAJS-00RYA0", "12.01B01", ATA_HORKAGE_NONCQ, },
3984 { "FUJITSU MHV2080BH", "00840028", ATA_HORKAGE_NONCQ, },
3985 { "ST9120822AS", "3.CLF", ATA_HORKAGE_NONCQ, },
3986 { "ST9160821AS", "3.CLF", ATA_HORKAGE_NONCQ, },
3987 { "ST9160821AS", "3.ALD", ATA_HORKAGE_NONCQ, },
3988 { "ST9160821AS", "3.CCD", ATA_HORKAGE_NONCQ, },
3989 { "ST3160812AS", "3.ADJ", ATA_HORKAGE_NONCQ, },
3990 { "ST980813AS", "3.ADB", ATA_HORKAGE_NONCQ, },
3991 { "SAMSUNG HD401LJ", "ZZ100-15", ATA_HORKAGE_NONCQ, },
3993 /* devices which puke on READ_NATIVE_MAX */
3994 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
3995 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
3996 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
3997 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
3999 /* Devices which report 1 sector over size HPA */
4000 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, },
4001 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, },
4003 /* End Marker */
4007 int strn_pattern_cmp(const char *patt, const char *name, int wildchar)
4009 const char *p;
4010 int len;
4013 * check for trailing wildcard: *\0
4015 p = strchr(patt, wildchar);
4016 if (p && ((*(p + 1)) == 0))
4017 len = p - patt;
4018 else {
4019 len = strlen(name);
4020 if (!len) {
4021 if (!*patt)
4022 return 0;
4023 return -1;
4027 return strncmp(patt, name, len);
4030 static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
4032 unsigned char model_num[ATA_ID_PROD_LEN + 1];
4033 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
4034 const struct ata_blacklist_entry *ad = ata_device_blacklist;
4036 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
4037 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
4039 while (ad->model_num) {
4040 if (!strn_pattern_cmp(ad->model_num, model_num, '*')) {
4041 if (ad->model_rev == NULL)
4042 return ad->horkage;
4043 if (!strn_pattern_cmp(ad->model_rev, model_rev, '*'))
4044 return ad->horkage;
4046 ad++;
4048 return 0;
4051 static int ata_dma_blacklisted(const struct ata_device *dev)
4053 /* We don't support polling DMA.
4054 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
4055 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
4057 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
4058 (dev->flags & ATA_DFLAG_CDB_INTR))
4059 return 1;
4060 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
4064 * ata_dev_xfermask - Compute supported xfermask of the given device
4065 * @dev: Device to compute xfermask for
4067 * Compute supported xfermask of @dev and store it in
4068 * dev->*_mask. This function is responsible for applying all
4069 * known limits including host controller limits, device
4070 * blacklist, etc...
4072 * LOCKING:
4073 * None.
4075 static void ata_dev_xfermask(struct ata_device *dev)
4077 struct ata_link *link = dev->link;
4078 struct ata_port *ap = link->ap;
4079 struct ata_host *host = ap->host;
4080 unsigned long xfer_mask;
4082 /* controller modes available */
4083 xfer_mask = ata_pack_xfermask(ap->pio_mask,
4084 ap->mwdma_mask, ap->udma_mask);
4086 /* drive modes available */
4087 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4088 dev->mwdma_mask, dev->udma_mask);
4089 xfer_mask &= ata_id_xfermask(dev->id);
4092 * CFA Advanced TrueIDE timings are not allowed on a shared
4093 * cable
4095 if (ata_dev_pair(dev)) {
4096 /* No PIO5 or PIO6 */
4097 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4098 /* No MWDMA3 or MWDMA 4 */
4099 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4102 if (ata_dma_blacklisted(dev)) {
4103 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4104 ata_dev_printk(dev, KERN_WARNING,
4105 "device is on DMA blacklist, disabling DMA\n");
4108 if ((host->flags & ATA_HOST_SIMPLEX) &&
4109 host->simplex_claimed && host->simplex_claimed != ap) {
4110 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4111 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
4112 "other device, disabling DMA\n");
4115 if (ap->flags & ATA_FLAG_NO_IORDY)
4116 xfer_mask &= ata_pio_mask_no_iordy(dev);
4118 if (ap->ops->mode_filter)
4119 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
4121 /* Apply cable rule here. Don't apply it early because when
4122 * we handle hot plug the cable type can itself change.
4123 * Check this last so that we know if the transfer rate was
4124 * solely limited by the cable.
4125 * Unknown or 80 wire cables reported host side are checked
4126 * drive side as well. Cases where we know a 40wire cable
4127 * is used safely for 80 are not checked here.
4129 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4130 /* UDMA/44 or higher would be available */
4131 if((ap->cbl == ATA_CBL_PATA40) ||
4132 (ata_drive_40wire(dev->id) &&
4133 (ap->cbl == ATA_CBL_PATA_UNK ||
4134 ap->cbl == ATA_CBL_PATA80))) {
4135 ata_dev_printk(dev, KERN_WARNING,
4136 "limited to UDMA/33 due to 40-wire cable\n");
4137 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4140 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4141 &dev->mwdma_mask, &dev->udma_mask);
4145 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
4146 * @dev: Device to which command will be sent
4148 * Issue SET FEATURES - XFER MODE command to device @dev
4149 * on port @ap.
4151 * LOCKING:
4152 * PCI/etc. bus probe sem.
4154 * RETURNS:
4155 * 0 on success, AC_ERR_* mask otherwise.
4158 static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
4160 struct ata_taskfile tf;
4161 unsigned int err_mask;
4163 /* set up set-features taskfile */
4164 DPRINTK("set features - xfer mode\n");
4166 /* Some controllers and ATAPI devices show flaky interrupt
4167 * behavior after setting xfer mode. Use polling instead.
4169 ata_tf_init(dev, &tf);
4170 tf.command = ATA_CMD_SET_FEATURES;
4171 tf.feature = SETFEATURES_XFER;
4172 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
4173 tf.protocol = ATA_PROT_NODATA;
4174 tf.nsect = dev->xfer_mode;
4176 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4178 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4179 return err_mask;
4183 * ata_dev_set_AN - Issue SET FEATURES - SATA FEATURES
4184 * @dev: Device to which command will be sent
4185 * @enable: Whether to enable or disable the feature
4187 * Issue SET FEATURES - SATA FEATURES command to device @dev
4188 * on port @ap with sector count set to indicate Asynchronous
4189 * Notification feature
4191 * LOCKING:
4192 * PCI/etc. bus probe sem.
4194 * RETURNS:
4195 * 0 on success, AC_ERR_* mask otherwise.
4197 static unsigned int ata_dev_set_AN(struct ata_device *dev, u8 enable)
4199 struct ata_taskfile tf;
4200 unsigned int err_mask;
4202 /* set up set-features taskfile */
4203 DPRINTK("set features - SATA features\n");
4205 ata_tf_init(dev, &tf);
4206 tf.command = ATA_CMD_SET_FEATURES;
4207 tf.feature = enable;
4208 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4209 tf.protocol = ATA_PROT_NODATA;
4210 tf.nsect = SATA_AN;
4212 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4214 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4215 return err_mask;
4219 * ata_dev_init_params - Issue INIT DEV PARAMS command
4220 * @dev: Device to which command will be sent
4221 * @heads: Number of heads (taskfile parameter)
4222 * @sectors: Number of sectors (taskfile parameter)
4224 * LOCKING:
4225 * Kernel thread context (may sleep)
4227 * RETURNS:
4228 * 0 on success, AC_ERR_* mask otherwise.
4230 static unsigned int ata_dev_init_params(struct ata_device *dev,
4231 u16 heads, u16 sectors)
4233 struct ata_taskfile tf;
4234 unsigned int err_mask;
4236 /* Number of sectors per track 1-255. Number of heads 1-16 */
4237 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
4238 return AC_ERR_INVALID;
4240 /* set up init dev params taskfile */
4241 DPRINTK("init dev params \n");
4243 ata_tf_init(dev, &tf);
4244 tf.command = ATA_CMD_INIT_DEV_PARAMS;
4245 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4246 tf.protocol = ATA_PROT_NODATA;
4247 tf.nsect = sectors;
4248 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
4250 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4251 /* A clean abort indicates an original or just out of spec drive
4252 and we should continue as we issue the setup based on the
4253 drive reported working geometry */
4254 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4255 err_mask = 0;
4257 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4258 return err_mask;
4262 * ata_sg_clean - Unmap DMA memory associated with command
4263 * @qc: Command containing DMA memory to be released
4265 * Unmap all mapped DMA memory associated with this command.
4267 * LOCKING:
4268 * spin_lock_irqsave(host lock)
4270 void ata_sg_clean(struct ata_queued_cmd *qc)
4272 struct ata_port *ap = qc->ap;
4273 struct scatterlist *sg = qc->__sg;
4274 int dir = qc->dma_dir;
4275 void *pad_buf = NULL;
4277 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
4278 WARN_ON(sg == NULL);
4280 if (qc->flags & ATA_QCFLAG_SINGLE)
4281 WARN_ON(qc->n_elem > 1);
4283 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
4285 /* if we padded the buffer out to 32-bit bound, and data
4286 * xfer direction is from-device, we must copy from the
4287 * pad buffer back into the supplied buffer
4289 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
4290 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4292 if (qc->flags & ATA_QCFLAG_SG) {
4293 if (qc->n_elem)
4294 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
4295 /* restore last sg */
4296 sg_last(sg, qc->orig_n_elem)->length += qc->pad_len;
4297 if (pad_buf) {
4298 struct scatterlist *psg = &qc->pad_sgent;
4299 void *addr = kmap_atomic(psg->page, KM_IRQ0);
4300 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
4301 kunmap_atomic(addr, KM_IRQ0);
4303 } else {
4304 if (qc->n_elem)
4305 dma_unmap_single(ap->dev,
4306 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
4307 dir);
4308 /* restore sg */
4309 sg->length += qc->pad_len;
4310 if (pad_buf)
4311 memcpy(qc->buf_virt + sg->length - qc->pad_len,
4312 pad_buf, qc->pad_len);
4315 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4316 qc->__sg = NULL;
4320 * ata_fill_sg - Fill PCI IDE PRD table
4321 * @qc: Metadata associated with taskfile to be transferred
4323 * Fill PCI IDE PRD (scatter-gather) table with segments
4324 * associated with the current disk command.
4326 * LOCKING:
4327 * spin_lock_irqsave(host lock)
4330 static void ata_fill_sg(struct ata_queued_cmd *qc)
4332 struct ata_port *ap = qc->ap;
4333 struct scatterlist *sg;
4334 unsigned int idx;
4336 WARN_ON(qc->__sg == NULL);
4337 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
4339 idx = 0;
4340 ata_for_each_sg(sg, qc) {
4341 u32 addr, offset;
4342 u32 sg_len, len;
4344 /* determine if physical DMA addr spans 64K boundary.
4345 * Note h/w doesn't support 64-bit, so we unconditionally
4346 * truncate dma_addr_t to u32.
4348 addr = (u32) sg_dma_address(sg);
4349 sg_len = sg_dma_len(sg);
4351 while (sg_len) {
4352 offset = addr & 0xffff;
4353 len = sg_len;
4354 if ((offset + sg_len) > 0x10000)
4355 len = 0x10000 - offset;
4357 ap->prd[idx].addr = cpu_to_le32(addr);
4358 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
4359 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
4361 idx++;
4362 sg_len -= len;
4363 addr += len;
4367 if (idx)
4368 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4372 * ata_fill_sg_dumb - Fill PCI IDE PRD table
4373 * @qc: Metadata associated with taskfile to be transferred
4375 * Fill PCI IDE PRD (scatter-gather) table with segments
4376 * associated with the current disk command. Perform the fill
4377 * so that we avoid writing any length 64K records for
4378 * controllers that don't follow the spec.
4380 * LOCKING:
4381 * spin_lock_irqsave(host lock)
4384 static void ata_fill_sg_dumb(struct ata_queued_cmd *qc)
4386 struct ata_port *ap = qc->ap;
4387 struct scatterlist *sg;
4388 unsigned int idx;
4390 WARN_ON(qc->__sg == NULL);
4391 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
4393 idx = 0;
4394 ata_for_each_sg(sg, qc) {
4395 u32 addr, offset;
4396 u32 sg_len, len, blen;
4398 /* determine if physical DMA addr spans 64K boundary.
4399 * Note h/w doesn't support 64-bit, so we unconditionally
4400 * truncate dma_addr_t to u32.
4402 addr = (u32) sg_dma_address(sg);
4403 sg_len = sg_dma_len(sg);
4405 while (sg_len) {
4406 offset = addr & 0xffff;
4407 len = sg_len;
4408 if ((offset + sg_len) > 0x10000)
4409 len = 0x10000 - offset;
4411 blen = len & 0xffff;
4412 ap->prd[idx].addr = cpu_to_le32(addr);
4413 if (blen == 0) {
4414 /* Some PATA chipsets like the CS5530 can't
4415 cope with 0x0000 meaning 64K as the spec says */
4416 ap->prd[idx].flags_len = cpu_to_le32(0x8000);
4417 blen = 0x8000;
4418 ap->prd[++idx].addr = cpu_to_le32(addr + 0x8000);
4420 ap->prd[idx].flags_len = cpu_to_le32(blen);
4421 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
4423 idx++;
4424 sg_len -= len;
4425 addr += len;
4429 if (idx)
4430 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4434 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
4435 * @qc: Metadata associated with taskfile to check
4437 * Allow low-level driver to filter ATA PACKET commands, returning
4438 * a status indicating whether or not it is OK to use DMA for the
4439 * supplied PACKET command.
4441 * LOCKING:
4442 * spin_lock_irqsave(host lock)
4444 * RETURNS: 0 when ATAPI DMA can be used
4445 * nonzero otherwise
4447 int ata_check_atapi_dma(struct ata_queued_cmd *qc)
4449 struct ata_port *ap = qc->ap;
4451 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a
4452 * few ATAPI devices choke on such DMA requests.
4454 if (unlikely(qc->nbytes & 15))
4455 return 1;
4457 if (ap->ops->check_atapi_dma)
4458 return ap->ops->check_atapi_dma(qc);
4460 return 0;
4464 * ata_std_qc_defer - Check whether a qc needs to be deferred
4465 * @qc: ATA command in question
4467 * Non-NCQ commands cannot run with any other command, NCQ or
4468 * not. As upper layer only knows the queue depth, we are
4469 * responsible for maintaining exclusion. This function checks
4470 * whether a new command @qc can be issued.
4472 * LOCKING:
4473 * spin_lock_irqsave(host lock)
4475 * RETURNS:
4476 * ATA_DEFER_* if deferring is needed, 0 otherwise.
4478 int ata_std_qc_defer(struct ata_queued_cmd *qc)
4480 struct ata_link *link = qc->dev->link;
4482 if (qc->tf.protocol == ATA_PROT_NCQ) {
4483 if (!ata_tag_valid(link->active_tag))
4484 return 0;
4485 } else {
4486 if (!ata_tag_valid(link->active_tag) && !link->sactive)
4487 return 0;
4490 return ATA_DEFER_LINK;
4494 * ata_qc_prep - Prepare taskfile for submission
4495 * @qc: Metadata associated with taskfile to be prepared
4497 * Prepare ATA taskfile for submission.
4499 * LOCKING:
4500 * spin_lock_irqsave(host lock)
4502 void ata_qc_prep(struct ata_queued_cmd *qc)
4504 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4505 return;
4507 ata_fill_sg(qc);
4511 * ata_dumb_qc_prep - Prepare taskfile for submission
4512 * @qc: Metadata associated with taskfile to be prepared
4514 * Prepare ATA taskfile for submission.
4516 * LOCKING:
4517 * spin_lock_irqsave(host lock)
4519 void ata_dumb_qc_prep(struct ata_queued_cmd *qc)
4521 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4522 return;
4524 ata_fill_sg_dumb(qc);
4527 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4530 * ata_sg_init_one - Associate command with memory buffer
4531 * @qc: Command to be associated
4532 * @buf: Memory buffer
4533 * @buflen: Length of memory buffer, in bytes.
4535 * Initialize the data-related elements of queued_cmd @qc
4536 * to point to a single memory buffer, @buf of byte length @buflen.
4538 * LOCKING:
4539 * spin_lock_irqsave(host lock)
4542 void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
4544 qc->flags |= ATA_QCFLAG_SINGLE;
4546 qc->__sg = &qc->sgent;
4547 qc->n_elem = 1;
4548 qc->orig_n_elem = 1;
4549 qc->buf_virt = buf;
4550 qc->nbytes = buflen;
4551 qc->cursg = qc->__sg;
4553 sg_init_one(&qc->sgent, buf, buflen);
4557 * ata_sg_init - Associate command with scatter-gather table.
4558 * @qc: Command to be associated
4559 * @sg: Scatter-gather table.
4560 * @n_elem: Number of elements in s/g table.
4562 * Initialize the data-related elements of queued_cmd @qc
4563 * to point to a scatter-gather table @sg, containing @n_elem
4564 * elements.
4566 * LOCKING:
4567 * spin_lock_irqsave(host lock)
4570 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4571 unsigned int n_elem)
4573 qc->flags |= ATA_QCFLAG_SG;
4574 qc->__sg = sg;
4575 qc->n_elem = n_elem;
4576 qc->orig_n_elem = n_elem;
4577 qc->cursg = qc->__sg;
4581 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
4582 * @qc: Command with memory buffer to be mapped.
4584 * DMA-map the memory buffer associated with queued_cmd @qc.
4586 * LOCKING:
4587 * spin_lock_irqsave(host lock)
4589 * RETURNS:
4590 * Zero on success, negative on error.
4593 static int ata_sg_setup_one(struct ata_queued_cmd *qc)
4595 struct ata_port *ap = qc->ap;
4596 int dir = qc->dma_dir;
4597 struct scatterlist *sg = qc->__sg;
4598 dma_addr_t dma_address;
4599 int trim_sg = 0;
4601 /* we must lengthen transfers to end on a 32-bit boundary */
4602 qc->pad_len = sg->length & 3;
4603 if (qc->pad_len) {
4604 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4605 struct scatterlist *psg = &qc->pad_sgent;
4607 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
4609 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4611 if (qc->tf.flags & ATA_TFLAG_WRITE)
4612 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
4613 qc->pad_len);
4615 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4616 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4617 /* trim sg */
4618 sg->length -= qc->pad_len;
4619 if (sg->length == 0)
4620 trim_sg = 1;
4622 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
4623 sg->length, qc->pad_len);
4626 if (trim_sg) {
4627 qc->n_elem--;
4628 goto skip_map;
4631 dma_address = dma_map_single(ap->dev, qc->buf_virt,
4632 sg->length, dir);
4633 if (dma_mapping_error(dma_address)) {
4634 /* restore sg */
4635 sg->length += qc->pad_len;
4636 return -1;
4639 sg_dma_address(sg) = dma_address;
4640 sg_dma_len(sg) = sg->length;
4642 skip_map:
4643 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
4644 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4646 return 0;
4650 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4651 * @qc: Command with scatter-gather table to be mapped.
4653 * DMA-map the scatter-gather table associated with queued_cmd @qc.
4655 * LOCKING:
4656 * spin_lock_irqsave(host lock)
4658 * RETURNS:
4659 * Zero on success, negative on error.
4663 static int ata_sg_setup(struct ata_queued_cmd *qc)
4665 struct ata_port *ap = qc->ap;
4666 struct scatterlist *sg = qc->__sg;
4667 struct scatterlist *lsg = sg_last(qc->__sg, qc->n_elem);
4668 int n_elem, pre_n_elem, dir, trim_sg = 0;
4670 VPRINTK("ENTER, ata%u\n", ap->print_id);
4671 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
4673 /* we must lengthen transfers to end on a 32-bit boundary */
4674 qc->pad_len = lsg->length & 3;
4675 if (qc->pad_len) {
4676 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4677 struct scatterlist *psg = &qc->pad_sgent;
4678 unsigned int offset;
4680 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
4682 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4685 * psg->page/offset are used to copy to-be-written
4686 * data in this function or read data in ata_sg_clean.
4688 offset = lsg->offset + lsg->length - qc->pad_len;
4689 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
4690 psg->offset = offset_in_page(offset);
4692 if (qc->tf.flags & ATA_TFLAG_WRITE) {
4693 void *addr = kmap_atomic(psg->page, KM_IRQ0);
4694 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
4695 kunmap_atomic(addr, KM_IRQ0);
4698 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4699 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4700 /* trim last sg */
4701 lsg->length -= qc->pad_len;
4702 if (lsg->length == 0)
4703 trim_sg = 1;
4705 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
4706 qc->n_elem - 1, lsg->length, qc->pad_len);
4709 pre_n_elem = qc->n_elem;
4710 if (trim_sg && pre_n_elem)
4711 pre_n_elem--;
4713 if (!pre_n_elem) {
4714 n_elem = 0;
4715 goto skip_map;
4718 dir = qc->dma_dir;
4719 n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
4720 if (n_elem < 1) {
4721 /* restore last sg */
4722 lsg->length += qc->pad_len;
4723 return -1;
4726 DPRINTK("%d sg elements mapped\n", n_elem);
4728 skip_map:
4729 qc->n_elem = n_elem;
4731 return 0;
4735 * swap_buf_le16 - swap halves of 16-bit words in place
4736 * @buf: Buffer to swap
4737 * @buf_words: Number of 16-bit words in buffer.
4739 * Swap halves of 16-bit words if needed to convert from
4740 * little-endian byte order to native cpu byte order, or
4741 * vice-versa.
4743 * LOCKING:
4744 * Inherited from caller.
4746 void swap_buf_le16(u16 *buf, unsigned int buf_words)
4748 #ifdef __BIG_ENDIAN
4749 unsigned int i;
4751 for (i = 0; i < buf_words; i++)
4752 buf[i] = le16_to_cpu(buf[i]);
4753 #endif /* __BIG_ENDIAN */
4757 * ata_data_xfer - Transfer data by PIO
4758 * @adev: device to target
4759 * @buf: data buffer
4760 * @buflen: buffer length
4761 * @write_data: read/write
4763 * Transfer data from/to the device data register by PIO.
4765 * LOCKING:
4766 * Inherited from caller.
4768 void ata_data_xfer(struct ata_device *adev, unsigned char *buf,
4769 unsigned int buflen, int write_data)
4771 struct ata_port *ap = adev->link->ap;
4772 unsigned int words = buflen >> 1;
4774 /* Transfer multiple of 2 bytes */
4775 if (write_data)
4776 iowrite16_rep(ap->ioaddr.data_addr, buf, words);
4777 else
4778 ioread16_rep(ap->ioaddr.data_addr, buf, words);
4780 /* Transfer trailing 1 byte, if any. */
4781 if (unlikely(buflen & 0x01)) {
4782 u16 align_buf[1] = { 0 };
4783 unsigned char *trailing_buf = buf + buflen - 1;
4785 if (write_data) {
4786 memcpy(align_buf, trailing_buf, 1);
4787 iowrite16(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
4788 } else {
4789 align_buf[0] = cpu_to_le16(ioread16(ap->ioaddr.data_addr));
4790 memcpy(trailing_buf, align_buf, 1);
4796 * ata_data_xfer_noirq - Transfer data by PIO
4797 * @adev: device to target
4798 * @buf: data buffer
4799 * @buflen: buffer length
4800 * @write_data: read/write
4802 * Transfer data from/to the device data register by PIO. Do the
4803 * transfer with interrupts disabled.
4805 * LOCKING:
4806 * Inherited from caller.
4808 void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
4809 unsigned int buflen, int write_data)
4811 unsigned long flags;
4812 local_irq_save(flags);
4813 ata_data_xfer(adev, buf, buflen, write_data);
4814 local_irq_restore(flags);
4819 * ata_pio_sector - Transfer a sector of data.
4820 * @qc: Command on going
4822 * Transfer qc->sect_size bytes of data from/to the ATA device.
4824 * LOCKING:
4825 * Inherited from caller.
4828 static void ata_pio_sector(struct ata_queued_cmd *qc)
4830 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
4831 struct ata_port *ap = qc->ap;
4832 struct page *page;
4833 unsigned int offset;
4834 unsigned char *buf;
4836 if (qc->curbytes == qc->nbytes - qc->sect_size)
4837 ap->hsm_task_state = HSM_ST_LAST;
4839 page = qc->cursg->page;
4840 offset = qc->cursg->offset + qc->cursg_ofs;
4842 /* get the current page and offset */
4843 page = nth_page(page, (offset >> PAGE_SHIFT));
4844 offset %= PAGE_SIZE;
4846 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4848 if (PageHighMem(page)) {
4849 unsigned long flags;
4851 /* FIXME: use a bounce buffer */
4852 local_irq_save(flags);
4853 buf = kmap_atomic(page, KM_IRQ0);
4855 /* do the actual data transfer */
4856 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
4858 kunmap_atomic(buf, KM_IRQ0);
4859 local_irq_restore(flags);
4860 } else {
4861 buf = page_address(page);
4862 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
4865 qc->curbytes += qc->sect_size;
4866 qc->cursg_ofs += qc->sect_size;
4868 if (qc->cursg_ofs == qc->cursg->length) {
4869 qc->cursg = sg_next(qc->cursg);
4870 qc->cursg_ofs = 0;
4875 * ata_pio_sectors - Transfer one or many sectors.
4876 * @qc: Command on going
4878 * Transfer one or many sectors of data from/to the
4879 * ATA device for the DRQ request.
4881 * LOCKING:
4882 * Inherited from caller.
4885 static void ata_pio_sectors(struct ata_queued_cmd *qc)
4887 if (is_multi_taskfile(&qc->tf)) {
4888 /* READ/WRITE MULTIPLE */
4889 unsigned int nsect;
4891 WARN_ON(qc->dev->multi_count == 0);
4893 nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
4894 qc->dev->multi_count);
4895 while (nsect--)
4896 ata_pio_sector(qc);
4897 } else
4898 ata_pio_sector(qc);
4900 ata_altstatus(qc->ap); /* flush */
4904 * atapi_send_cdb - Write CDB bytes to hardware
4905 * @ap: Port to which ATAPI device is attached.
4906 * @qc: Taskfile currently active
4908 * When device has indicated its readiness to accept
4909 * a CDB, this function is called. Send the CDB.
4911 * LOCKING:
4912 * caller.
4915 static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
4917 /* send SCSI cdb */
4918 DPRINTK("send cdb\n");
4919 WARN_ON(qc->dev->cdb_len < 12);
4921 ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
4922 ata_altstatus(ap); /* flush */
4924 switch (qc->tf.protocol) {
4925 case ATA_PROT_ATAPI:
4926 ap->hsm_task_state = HSM_ST;
4927 break;
4928 case ATA_PROT_ATAPI_NODATA:
4929 ap->hsm_task_state = HSM_ST_LAST;
4930 break;
4931 case ATA_PROT_ATAPI_DMA:
4932 ap->hsm_task_state = HSM_ST_LAST;
4933 /* initiate bmdma */
4934 ap->ops->bmdma_start(qc);
4935 break;
4940 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
4941 * @qc: Command on going
4942 * @bytes: number of bytes
4944 * Transfer Transfer data from/to the ATAPI device.
4946 * LOCKING:
4947 * Inherited from caller.
4951 static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
4953 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
4954 struct scatterlist *sg = qc->__sg;
4955 struct scatterlist *lsg = sg_last(qc->__sg, qc->n_elem);
4956 struct ata_port *ap = qc->ap;
4957 struct page *page;
4958 unsigned char *buf;
4959 unsigned int offset, count;
4960 int no_more_sg = 0;
4962 if (qc->curbytes + bytes >= qc->nbytes)
4963 ap->hsm_task_state = HSM_ST_LAST;
4965 next_sg:
4966 if (unlikely(no_more_sg)) {
4968 * The end of qc->sg is reached and the device expects
4969 * more data to transfer. In order not to overrun qc->sg
4970 * and fulfill length specified in the byte count register,
4971 * - for read case, discard trailing data from the device
4972 * - for write case, padding zero data to the device
4974 u16 pad_buf[1] = { 0 };
4975 unsigned int words = bytes >> 1;
4976 unsigned int i;
4978 if (words) /* warning if bytes > 1 */
4979 ata_dev_printk(qc->dev, KERN_WARNING,
4980 "%u bytes trailing data\n", bytes);
4982 for (i = 0; i < words; i++)
4983 ap->ops->data_xfer(qc->dev, (unsigned char*)pad_buf, 2, do_write);
4985 ap->hsm_task_state = HSM_ST_LAST;
4986 return;
4989 sg = qc->cursg;
4991 page = sg->page;
4992 offset = sg->offset + qc->cursg_ofs;
4994 /* get the current page and offset */
4995 page = nth_page(page, (offset >> PAGE_SHIFT));
4996 offset %= PAGE_SIZE;
4998 /* don't overrun current sg */
4999 count = min(sg->length - qc->cursg_ofs, bytes);
5001 /* don't cross page boundaries */
5002 count = min(count, (unsigned int)PAGE_SIZE - offset);
5004 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
5006 if (PageHighMem(page)) {
5007 unsigned long flags;
5009 /* FIXME: use bounce buffer */
5010 local_irq_save(flags);
5011 buf = kmap_atomic(page, KM_IRQ0);
5013 /* do the actual data transfer */
5014 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
5016 kunmap_atomic(buf, KM_IRQ0);
5017 local_irq_restore(flags);
5018 } else {
5019 buf = page_address(page);
5020 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
5023 bytes -= count;
5024 qc->curbytes += count;
5025 qc->cursg_ofs += count;
5027 if (qc->cursg_ofs == sg->length) {
5028 if (qc->cursg == lsg)
5029 no_more_sg = 1;
5031 qc->cursg = sg_next(qc->cursg);
5032 qc->cursg_ofs = 0;
5035 if (bytes)
5036 goto next_sg;
5040 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
5041 * @qc: Command on going
5043 * Transfer Transfer data from/to the ATAPI device.
5045 * LOCKING:
5046 * Inherited from caller.
5049 static void atapi_pio_bytes(struct ata_queued_cmd *qc)
5051 struct ata_port *ap = qc->ap;
5052 struct ata_device *dev = qc->dev;
5053 unsigned int ireason, bc_lo, bc_hi, bytes;
5054 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
5056 /* Abuse qc->result_tf for temp storage of intermediate TF
5057 * here to save some kernel stack usage.
5058 * For normal completion, qc->result_tf is not relevant. For
5059 * error, qc->result_tf is later overwritten by ata_qc_complete().
5060 * So, the correctness of qc->result_tf is not affected.
5062 ap->ops->tf_read(ap, &qc->result_tf);
5063 ireason = qc->result_tf.nsect;
5064 bc_lo = qc->result_tf.lbam;
5065 bc_hi = qc->result_tf.lbah;
5066 bytes = (bc_hi << 8) | bc_lo;
5068 /* shall be cleared to zero, indicating xfer of data */
5069 if (ireason & (1 << 0))
5070 goto err_out;
5072 /* make sure transfer direction matches expected */
5073 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
5074 if (do_write != i_write)
5075 goto err_out;
5077 VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
5079 __atapi_pio_bytes(qc, bytes);
5080 ata_altstatus(ap); /* flush */
5082 return;
5084 err_out:
5085 ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
5086 qc->err_mask |= AC_ERR_HSM;
5087 ap->hsm_task_state = HSM_ST_ERR;
5091 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
5092 * @ap: the target ata_port
5093 * @qc: qc on going
5095 * RETURNS:
5096 * 1 if ok in workqueue, 0 otherwise.
5099 static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
5101 if (qc->tf.flags & ATA_TFLAG_POLLING)
5102 return 1;
5104 if (ap->hsm_task_state == HSM_ST_FIRST) {
5105 if (qc->tf.protocol == ATA_PROT_PIO &&
5106 (qc->tf.flags & ATA_TFLAG_WRITE))
5107 return 1;
5109 if (is_atapi_taskfile(&qc->tf) &&
5110 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
5111 return 1;
5114 return 0;
5118 * ata_hsm_qc_complete - finish a qc running on standard HSM
5119 * @qc: Command to complete
5120 * @in_wq: 1 if called from workqueue, 0 otherwise
5122 * Finish @qc which is running on standard HSM.
5124 * LOCKING:
5125 * If @in_wq is zero, spin_lock_irqsave(host lock).
5126 * Otherwise, none on entry and grabs host lock.
5128 static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
5130 struct ata_port *ap = qc->ap;
5131 unsigned long flags;
5133 if (ap->ops->error_handler) {
5134 if (in_wq) {
5135 spin_lock_irqsave(ap->lock, flags);
5137 /* EH might have kicked in while host lock is
5138 * released.
5140 qc = ata_qc_from_tag(ap, qc->tag);
5141 if (qc) {
5142 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
5143 ap->ops->irq_on(ap);
5144 ata_qc_complete(qc);
5145 } else
5146 ata_port_freeze(ap);
5149 spin_unlock_irqrestore(ap->lock, flags);
5150 } else {
5151 if (likely(!(qc->err_mask & AC_ERR_HSM)))
5152 ata_qc_complete(qc);
5153 else
5154 ata_port_freeze(ap);
5156 } else {
5157 if (in_wq) {
5158 spin_lock_irqsave(ap->lock, flags);
5159 ap->ops->irq_on(ap);
5160 ata_qc_complete(qc);
5161 spin_unlock_irqrestore(ap->lock, flags);
5162 } else
5163 ata_qc_complete(qc);
5168 * ata_hsm_move - move the HSM to the next state.
5169 * @ap: the target ata_port
5170 * @qc: qc on going
5171 * @status: current device status
5172 * @in_wq: 1 if called from workqueue, 0 otherwise
5174 * RETURNS:
5175 * 1 when poll next status needed, 0 otherwise.
5177 int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
5178 u8 status, int in_wq)
5180 unsigned long flags = 0;
5181 int poll_next;
5183 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
5185 /* Make sure ata_qc_issue_prot() does not throw things
5186 * like DMA polling into the workqueue. Notice that
5187 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
5189 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
5191 fsm_start:
5192 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
5193 ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
5195 switch (ap->hsm_task_state) {
5196 case HSM_ST_FIRST:
5197 /* Send first data block or PACKET CDB */
5199 /* If polling, we will stay in the work queue after
5200 * sending the data. Otherwise, interrupt handler
5201 * takes over after sending the data.
5203 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
5205 /* check device status */
5206 if (unlikely((status & ATA_DRQ) == 0)) {
5207 /* handle BSY=0, DRQ=0 as error */
5208 if (likely(status & (ATA_ERR | ATA_DF)))
5209 /* device stops HSM for abort/error */
5210 qc->err_mask |= AC_ERR_DEV;
5211 else
5212 /* HSM violation. Let EH handle this */
5213 qc->err_mask |= AC_ERR_HSM;
5215 ap->hsm_task_state = HSM_ST_ERR;
5216 goto fsm_start;
5219 /* Device should not ask for data transfer (DRQ=1)
5220 * when it finds something wrong.
5221 * We ignore DRQ here and stop the HSM by
5222 * changing hsm_task_state to HSM_ST_ERR and
5223 * let the EH abort the command or reset the device.
5225 if (unlikely(status & (ATA_ERR | ATA_DF))) {
5226 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with device "
5227 "error, dev_stat 0x%X\n", status);
5228 qc->err_mask |= AC_ERR_HSM;
5229 ap->hsm_task_state = HSM_ST_ERR;
5230 goto fsm_start;
5233 /* Send the CDB (atapi) or the first data block (ata pio out).
5234 * During the state transition, interrupt handler shouldn't
5235 * be invoked before the data transfer is complete and
5236 * hsm_task_state is changed. Hence, the following locking.
5238 if (in_wq)
5239 spin_lock_irqsave(ap->lock, flags);
5241 if (qc->tf.protocol == ATA_PROT_PIO) {
5242 /* PIO data out protocol.
5243 * send first data block.
5246 /* ata_pio_sectors() might change the state
5247 * to HSM_ST_LAST. so, the state is changed here
5248 * before ata_pio_sectors().
5250 ap->hsm_task_state = HSM_ST;
5251 ata_pio_sectors(qc);
5252 } else
5253 /* send CDB */
5254 atapi_send_cdb(ap, qc);
5256 if (in_wq)
5257 spin_unlock_irqrestore(ap->lock, flags);
5259 /* if polling, ata_pio_task() handles the rest.
5260 * otherwise, interrupt handler takes over from here.
5262 break;
5264 case HSM_ST:
5265 /* complete command or read/write the data register */
5266 if (qc->tf.protocol == ATA_PROT_ATAPI) {
5267 /* ATAPI PIO protocol */
5268 if ((status & ATA_DRQ) == 0) {
5269 /* No more data to transfer or device error.
5270 * Device error will be tagged in HSM_ST_LAST.
5272 ap->hsm_task_state = HSM_ST_LAST;
5273 goto fsm_start;
5276 /* Device should not ask for data transfer (DRQ=1)
5277 * when it finds something wrong.
5278 * We ignore DRQ here and stop the HSM by
5279 * changing hsm_task_state to HSM_ST_ERR and
5280 * let the EH abort the command or reset the device.
5282 if (unlikely(status & (ATA_ERR | ATA_DF))) {
5283 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with "
5284 "device error, dev_stat 0x%X\n",
5285 status);
5286 qc->err_mask |= AC_ERR_HSM;
5287 ap->hsm_task_state = HSM_ST_ERR;
5288 goto fsm_start;
5291 atapi_pio_bytes(qc);
5293 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
5294 /* bad ireason reported by device */
5295 goto fsm_start;
5297 } else {
5298 /* ATA PIO protocol */
5299 if (unlikely((status & ATA_DRQ) == 0)) {
5300 /* handle BSY=0, DRQ=0 as error */
5301 if (likely(status & (ATA_ERR | ATA_DF)))
5302 /* device stops HSM for abort/error */
5303 qc->err_mask |= AC_ERR_DEV;
5304 else
5305 /* HSM violation. Let EH handle this.
5306 * Phantom devices also trigger this
5307 * condition. Mark hint.
5309 qc->err_mask |= AC_ERR_HSM |
5310 AC_ERR_NODEV_HINT;
5312 ap->hsm_task_state = HSM_ST_ERR;
5313 goto fsm_start;
5316 /* For PIO reads, some devices may ask for
5317 * data transfer (DRQ=1) alone with ERR=1.
5318 * We respect DRQ here and transfer one
5319 * block of junk data before changing the
5320 * hsm_task_state to HSM_ST_ERR.
5322 * For PIO writes, ERR=1 DRQ=1 doesn't make
5323 * sense since the data block has been
5324 * transferred to the device.
5326 if (unlikely(status & (ATA_ERR | ATA_DF))) {
5327 /* data might be corrputed */
5328 qc->err_mask |= AC_ERR_DEV;
5330 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
5331 ata_pio_sectors(qc);
5332 status = ata_wait_idle(ap);
5335 if (status & (ATA_BUSY | ATA_DRQ))
5336 qc->err_mask |= AC_ERR_HSM;
5338 /* ata_pio_sectors() might change the
5339 * state to HSM_ST_LAST. so, the state
5340 * is changed after ata_pio_sectors().
5342 ap->hsm_task_state = HSM_ST_ERR;
5343 goto fsm_start;
5346 ata_pio_sectors(qc);
5348 if (ap->hsm_task_state == HSM_ST_LAST &&
5349 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
5350 /* all data read */
5351 status = ata_wait_idle(ap);
5352 goto fsm_start;
5356 poll_next = 1;
5357 break;
5359 case HSM_ST_LAST:
5360 if (unlikely(!ata_ok(status))) {
5361 qc->err_mask |= __ac_err_mask(status);
5362 ap->hsm_task_state = HSM_ST_ERR;
5363 goto fsm_start;
5366 /* no more data to transfer */
5367 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
5368 ap->print_id, qc->dev->devno, status);
5370 WARN_ON(qc->err_mask);
5372 ap->hsm_task_state = HSM_ST_IDLE;
5374 /* complete taskfile transaction */
5375 ata_hsm_qc_complete(qc, in_wq);
5377 poll_next = 0;
5378 break;
5380 case HSM_ST_ERR:
5381 /* make sure qc->err_mask is available to
5382 * know what's wrong and recover
5384 WARN_ON(qc->err_mask == 0);
5386 ap->hsm_task_state = HSM_ST_IDLE;
5388 /* complete taskfile transaction */
5389 ata_hsm_qc_complete(qc, in_wq);
5391 poll_next = 0;
5392 break;
5393 default:
5394 poll_next = 0;
5395 BUG();
5398 return poll_next;
5401 static void ata_pio_task(struct work_struct *work)
5403 struct ata_port *ap =
5404 container_of(work, struct ata_port, port_task.work);
5405 struct ata_queued_cmd *qc = ap->port_task_data;
5406 u8 status;
5407 int poll_next;
5409 fsm_start:
5410 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
5413 * This is purely heuristic. This is a fast path.
5414 * Sometimes when we enter, BSY will be cleared in
5415 * a chk-status or two. If not, the drive is probably seeking
5416 * or something. Snooze for a couple msecs, then
5417 * chk-status again. If still busy, queue delayed work.
5419 status = ata_busy_wait(ap, ATA_BUSY, 5);
5420 if (status & ATA_BUSY) {
5421 msleep(2);
5422 status = ata_busy_wait(ap, ATA_BUSY, 10);
5423 if (status & ATA_BUSY) {
5424 ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
5425 return;
5429 /* move the HSM */
5430 poll_next = ata_hsm_move(ap, qc, status, 1);
5432 /* another command or interrupt handler
5433 * may be running at this point.
5435 if (poll_next)
5436 goto fsm_start;
5440 * ata_qc_new - Request an available ATA command, for queueing
5441 * @ap: Port associated with device @dev
5442 * @dev: Device from whom we request an available command structure
5444 * LOCKING:
5445 * None.
5448 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
5450 struct ata_queued_cmd *qc = NULL;
5451 unsigned int i;
5453 /* no command while frozen */
5454 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
5455 return NULL;
5457 /* the last tag is reserved for internal command. */
5458 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
5459 if (!test_and_set_bit(i, &ap->qc_allocated)) {
5460 qc = __ata_qc_from_tag(ap, i);
5461 break;
5464 if (qc)
5465 qc->tag = i;
5467 return qc;
5471 * ata_qc_new_init - Request an available ATA command, and initialize it
5472 * @dev: Device from whom we request an available command structure
5474 * LOCKING:
5475 * None.
5478 struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
5480 struct ata_port *ap = dev->link->ap;
5481 struct ata_queued_cmd *qc;
5483 qc = ata_qc_new(ap);
5484 if (qc) {
5485 qc->scsicmd = NULL;
5486 qc->ap = ap;
5487 qc->dev = dev;
5489 ata_qc_reinit(qc);
5492 return qc;
5496 * ata_qc_free - free unused ata_queued_cmd
5497 * @qc: Command to complete
5499 * Designed to free unused ata_queued_cmd object
5500 * in case something prevents using it.
5502 * LOCKING:
5503 * spin_lock_irqsave(host lock)
5505 void ata_qc_free(struct ata_queued_cmd *qc)
5507 struct ata_port *ap = qc->ap;
5508 unsigned int tag;
5510 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
5512 qc->flags = 0;
5513 tag = qc->tag;
5514 if (likely(ata_tag_valid(tag))) {
5515 qc->tag = ATA_TAG_POISON;
5516 clear_bit(tag, &ap->qc_allocated);
5520 void __ata_qc_complete(struct ata_queued_cmd *qc)
5522 struct ata_port *ap = qc->ap;
5523 struct ata_link *link = qc->dev->link;
5525 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
5526 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
5528 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
5529 ata_sg_clean(qc);
5531 /* command should be marked inactive atomically with qc completion */
5532 if (qc->tf.protocol == ATA_PROT_NCQ) {
5533 link->sactive &= ~(1 << qc->tag);
5534 if (!link->sactive)
5535 ap->nr_active_links--;
5536 } else {
5537 link->active_tag = ATA_TAG_POISON;
5538 ap->nr_active_links--;
5541 /* clear exclusive status */
5542 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
5543 ap->excl_link == link))
5544 ap->excl_link = NULL;
5546 /* atapi: mark qc as inactive to prevent the interrupt handler
5547 * from completing the command twice later, before the error handler
5548 * is called. (when rc != 0 and atapi request sense is needed)
5550 qc->flags &= ~ATA_QCFLAG_ACTIVE;
5551 ap->qc_active &= ~(1 << qc->tag);
5553 /* call completion callback */
5554 qc->complete_fn(qc);
5557 static void fill_result_tf(struct ata_queued_cmd *qc)
5559 struct ata_port *ap = qc->ap;
5561 qc->result_tf.flags = qc->tf.flags;
5562 ap->ops->tf_read(ap, &qc->result_tf);
5566 * ata_qc_complete - Complete an active ATA command
5567 * @qc: Command to complete
5568 * @err_mask: ATA Status register contents
5570 * Indicate to the mid and upper layers that an ATA
5571 * command has completed, with either an ok or not-ok status.
5573 * LOCKING:
5574 * spin_lock_irqsave(host lock)
5576 void ata_qc_complete(struct ata_queued_cmd *qc)
5578 struct ata_port *ap = qc->ap;
5580 /* XXX: New EH and old EH use different mechanisms to
5581 * synchronize EH with regular execution path.
5583 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
5584 * Normal execution path is responsible for not accessing a
5585 * failed qc. libata core enforces the rule by returning NULL
5586 * from ata_qc_from_tag() for failed qcs.
5588 * Old EH depends on ata_qc_complete() nullifying completion
5589 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
5590 * not synchronize with interrupt handler. Only PIO task is
5591 * taken care of.
5593 if (ap->ops->error_handler) {
5594 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
5596 if (unlikely(qc->err_mask))
5597 qc->flags |= ATA_QCFLAG_FAILED;
5599 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
5600 if (!ata_tag_internal(qc->tag)) {
5601 /* always fill result TF for failed qc */
5602 fill_result_tf(qc);
5603 ata_qc_schedule_eh(qc);
5604 return;
5608 /* read result TF if requested */
5609 if (qc->flags & ATA_QCFLAG_RESULT_TF)
5610 fill_result_tf(qc);
5612 __ata_qc_complete(qc);
5613 } else {
5614 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
5615 return;
5617 /* read result TF if failed or requested */
5618 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
5619 fill_result_tf(qc);
5621 __ata_qc_complete(qc);
5626 * ata_qc_complete_multiple - Complete multiple qcs successfully
5627 * @ap: port in question
5628 * @qc_active: new qc_active mask
5629 * @finish_qc: LLDD callback invoked before completing a qc
5631 * Complete in-flight commands. This functions is meant to be
5632 * called from low-level driver's interrupt routine to complete
5633 * requests normally. ap->qc_active and @qc_active is compared
5634 * and commands are completed accordingly.
5636 * LOCKING:
5637 * spin_lock_irqsave(host lock)
5639 * RETURNS:
5640 * Number of completed commands on success, -errno otherwise.
5642 int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
5643 void (*finish_qc)(struct ata_queued_cmd *))
5645 int nr_done = 0;
5646 u32 done_mask;
5647 int i;
5649 done_mask = ap->qc_active ^ qc_active;
5651 if (unlikely(done_mask & qc_active)) {
5652 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
5653 "(%08x->%08x)\n", ap->qc_active, qc_active);
5654 return -EINVAL;
5657 for (i = 0; i < ATA_MAX_QUEUE; i++) {
5658 struct ata_queued_cmd *qc;
5660 if (!(done_mask & (1 << i)))
5661 continue;
5663 if ((qc = ata_qc_from_tag(ap, i))) {
5664 if (finish_qc)
5665 finish_qc(qc);
5666 ata_qc_complete(qc);
5667 nr_done++;
5671 return nr_done;
5674 static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
5676 struct ata_port *ap = qc->ap;
5678 switch (qc->tf.protocol) {
5679 case ATA_PROT_NCQ:
5680 case ATA_PROT_DMA:
5681 case ATA_PROT_ATAPI_DMA:
5682 return 1;
5684 case ATA_PROT_ATAPI:
5685 case ATA_PROT_PIO:
5686 if (ap->flags & ATA_FLAG_PIO_DMA)
5687 return 1;
5689 /* fall through */
5691 default:
5692 return 0;
5695 /* never reached */
5699 * ata_qc_issue - issue taskfile to device
5700 * @qc: command to issue to device
5702 * Prepare an ATA command to submission to device.
5703 * This includes mapping the data into a DMA-able
5704 * area, filling in the S/G table, and finally
5705 * writing the taskfile to hardware, starting the command.
5707 * LOCKING:
5708 * spin_lock_irqsave(host lock)
5710 void ata_qc_issue(struct ata_queued_cmd *qc)
5712 struct ata_port *ap = qc->ap;
5713 struct ata_link *link = qc->dev->link;
5715 /* Make sure only one non-NCQ command is outstanding. The
5716 * check is skipped for old EH because it reuses active qc to
5717 * request ATAPI sense.
5719 WARN_ON(ap->ops->error_handler && ata_tag_valid(link->active_tag));
5721 if (qc->tf.protocol == ATA_PROT_NCQ) {
5722 WARN_ON(link->sactive & (1 << qc->tag));
5724 if (!link->sactive)
5725 ap->nr_active_links++;
5726 link->sactive |= 1 << qc->tag;
5727 } else {
5728 WARN_ON(link->sactive);
5730 ap->nr_active_links++;
5731 link->active_tag = qc->tag;
5734 qc->flags |= ATA_QCFLAG_ACTIVE;
5735 ap->qc_active |= 1 << qc->tag;
5737 if (ata_should_dma_map(qc)) {
5738 if (qc->flags & ATA_QCFLAG_SG) {
5739 if (ata_sg_setup(qc))
5740 goto sg_err;
5741 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
5742 if (ata_sg_setup_one(qc))
5743 goto sg_err;
5745 } else {
5746 qc->flags &= ~ATA_QCFLAG_DMAMAP;
5749 ap->ops->qc_prep(qc);
5751 qc->err_mask |= ap->ops->qc_issue(qc);
5752 if (unlikely(qc->err_mask))
5753 goto err;
5754 return;
5756 sg_err:
5757 qc->flags &= ~ATA_QCFLAG_DMAMAP;
5758 qc->err_mask |= AC_ERR_SYSTEM;
5759 err:
5760 ata_qc_complete(qc);
5764 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
5765 * @qc: command to issue to device
5767 * Using various libata functions and hooks, this function
5768 * starts an ATA command. ATA commands are grouped into
5769 * classes called "protocols", and issuing each type of protocol
5770 * is slightly different.
5772 * May be used as the qc_issue() entry in ata_port_operations.
5774 * LOCKING:
5775 * spin_lock_irqsave(host lock)
5777 * RETURNS:
5778 * Zero on success, AC_ERR_* mask on failure
5781 unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
5783 struct ata_port *ap = qc->ap;
5785 /* Use polling pio if the LLD doesn't handle
5786 * interrupt driven pio and atapi CDB interrupt.
5788 if (ap->flags & ATA_FLAG_PIO_POLLING) {
5789 switch (qc->tf.protocol) {
5790 case ATA_PROT_PIO:
5791 case ATA_PROT_NODATA:
5792 case ATA_PROT_ATAPI:
5793 case ATA_PROT_ATAPI_NODATA:
5794 qc->tf.flags |= ATA_TFLAG_POLLING;
5795 break;
5796 case ATA_PROT_ATAPI_DMA:
5797 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
5798 /* see ata_dma_blacklisted() */
5799 BUG();
5800 break;
5801 default:
5802 break;
5806 /* select the device */
5807 ata_dev_select(ap, qc->dev->devno, 1, 0);
5809 /* start the command */
5810 switch (qc->tf.protocol) {
5811 case ATA_PROT_NODATA:
5812 if (qc->tf.flags & ATA_TFLAG_POLLING)
5813 ata_qc_set_polling(qc);
5815 ata_tf_to_host(ap, &qc->tf);
5816 ap->hsm_task_state = HSM_ST_LAST;
5818 if (qc->tf.flags & ATA_TFLAG_POLLING)
5819 ata_port_queue_task(ap, ata_pio_task, qc, 0);
5821 break;
5823 case ATA_PROT_DMA:
5824 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
5826 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
5827 ap->ops->bmdma_setup(qc); /* set up bmdma */
5828 ap->ops->bmdma_start(qc); /* initiate bmdma */
5829 ap->hsm_task_state = HSM_ST_LAST;
5830 break;
5832 case ATA_PROT_PIO:
5833 if (qc->tf.flags & ATA_TFLAG_POLLING)
5834 ata_qc_set_polling(qc);
5836 ata_tf_to_host(ap, &qc->tf);
5838 if (qc->tf.flags & ATA_TFLAG_WRITE) {
5839 /* PIO data out protocol */
5840 ap->hsm_task_state = HSM_ST_FIRST;
5841 ata_port_queue_task(ap, ata_pio_task, qc, 0);
5843 /* always send first data block using
5844 * the ata_pio_task() codepath.
5846 } else {
5847 /* PIO data in protocol */
5848 ap->hsm_task_state = HSM_ST;
5850 if (qc->tf.flags & ATA_TFLAG_POLLING)
5851 ata_port_queue_task(ap, ata_pio_task, qc, 0);
5853 /* if polling, ata_pio_task() handles the rest.
5854 * otherwise, interrupt handler takes over from here.
5858 break;
5860 case ATA_PROT_ATAPI:
5861 case ATA_PROT_ATAPI_NODATA:
5862 if (qc->tf.flags & ATA_TFLAG_POLLING)
5863 ata_qc_set_polling(qc);
5865 ata_tf_to_host(ap, &qc->tf);
5867 ap->hsm_task_state = HSM_ST_FIRST;
5869 /* send cdb by polling if no cdb interrupt */
5870 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
5871 (qc->tf.flags & ATA_TFLAG_POLLING))
5872 ata_port_queue_task(ap, ata_pio_task, qc, 0);
5873 break;
5875 case ATA_PROT_ATAPI_DMA:
5876 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
5878 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
5879 ap->ops->bmdma_setup(qc); /* set up bmdma */
5880 ap->hsm_task_state = HSM_ST_FIRST;
5882 /* send cdb by polling if no cdb interrupt */
5883 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
5884 ata_port_queue_task(ap, ata_pio_task, qc, 0);
5885 break;
5887 default:
5888 WARN_ON(1);
5889 return AC_ERR_SYSTEM;
5892 return 0;
5896 * ata_host_intr - Handle host interrupt for given (port, task)
5897 * @ap: Port on which interrupt arrived (possibly...)
5898 * @qc: Taskfile currently active in engine
5900 * Handle host interrupt for given queued command. Currently,
5901 * only DMA interrupts are handled. All other commands are
5902 * handled via polling with interrupts disabled (nIEN bit).
5904 * LOCKING:
5905 * spin_lock_irqsave(host lock)
5907 * RETURNS:
5908 * One if interrupt was handled, zero if not (shared irq).
5911 inline unsigned int ata_host_intr (struct ata_port *ap,
5912 struct ata_queued_cmd *qc)
5914 struct ata_eh_info *ehi = &ap->link.eh_info;
5915 u8 status, host_stat = 0;
5917 VPRINTK("ata%u: protocol %d task_state %d\n",
5918 ap->print_id, qc->tf.protocol, ap->hsm_task_state);
5920 /* Check whether we are expecting interrupt in this state */
5921 switch (ap->hsm_task_state) {
5922 case HSM_ST_FIRST:
5923 /* Some pre-ATAPI-4 devices assert INTRQ
5924 * at this state when ready to receive CDB.
5927 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
5928 * The flag was turned on only for atapi devices.
5929 * No need to check is_atapi_taskfile(&qc->tf) again.
5931 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
5932 goto idle_irq;
5933 break;
5934 case HSM_ST_LAST:
5935 if (qc->tf.protocol == ATA_PROT_DMA ||
5936 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
5937 /* check status of DMA engine */
5938 host_stat = ap->ops->bmdma_status(ap);
5939 VPRINTK("ata%u: host_stat 0x%X\n",
5940 ap->print_id, host_stat);
5942 /* if it's not our irq... */
5943 if (!(host_stat & ATA_DMA_INTR))
5944 goto idle_irq;
5946 /* before we do anything else, clear DMA-Start bit */
5947 ap->ops->bmdma_stop(qc);
5949 if (unlikely(host_stat & ATA_DMA_ERR)) {
5950 /* error when transfering data to/from memory */
5951 qc->err_mask |= AC_ERR_HOST_BUS;
5952 ap->hsm_task_state = HSM_ST_ERR;
5955 break;
5956 case HSM_ST:
5957 break;
5958 default:
5959 goto idle_irq;
5962 /* check altstatus */
5963 status = ata_altstatus(ap);
5964 if (status & ATA_BUSY)
5965 goto idle_irq;
5967 /* check main status, clearing INTRQ */
5968 status = ata_chk_status(ap);
5969 if (unlikely(status & ATA_BUSY))
5970 goto idle_irq;
5972 /* ack bmdma irq events */
5973 ap->ops->irq_clear(ap);
5975 ata_hsm_move(ap, qc, status, 0);
5977 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
5978 qc->tf.protocol == ATA_PROT_ATAPI_DMA))
5979 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
5981 return 1; /* irq handled */
5983 idle_irq:
5984 ap->stats.idle_irq++;
5986 #ifdef ATA_IRQ_TRAP
5987 if ((ap->stats.idle_irq % 1000) == 0) {
5988 ata_chk_status(ap);
5989 ap->ops->irq_clear(ap);
5990 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
5991 return 1;
5993 #endif
5994 return 0; /* irq not handled */
5998 * ata_interrupt - Default ATA host interrupt handler
5999 * @irq: irq line (unused)
6000 * @dev_instance: pointer to our ata_host information structure
6002 * Default interrupt handler for PCI IDE devices. Calls
6003 * ata_host_intr() for each port that is not disabled.
6005 * LOCKING:
6006 * Obtains host lock during operation.
6008 * RETURNS:
6009 * IRQ_NONE or IRQ_HANDLED.
6012 irqreturn_t ata_interrupt (int irq, void *dev_instance)
6014 struct ata_host *host = dev_instance;
6015 unsigned int i;
6016 unsigned int handled = 0;
6017 unsigned long flags;
6019 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
6020 spin_lock_irqsave(&host->lock, flags);
6022 for (i = 0; i < host->n_ports; i++) {
6023 struct ata_port *ap;
6025 ap = host->ports[i];
6026 if (ap &&
6027 !(ap->flags & ATA_FLAG_DISABLED)) {
6028 struct ata_queued_cmd *qc;
6030 qc = ata_qc_from_tag(ap, ap->link.active_tag);
6031 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
6032 (qc->flags & ATA_QCFLAG_ACTIVE))
6033 handled |= ata_host_intr(ap, qc);
6037 spin_unlock_irqrestore(&host->lock, flags);
6039 return IRQ_RETVAL(handled);
6043 * sata_scr_valid - test whether SCRs are accessible
6044 * @link: ATA link to test SCR accessibility for
6046 * Test whether SCRs are accessible for @link.
6048 * LOCKING:
6049 * None.
6051 * RETURNS:
6052 * 1 if SCRs are accessible, 0 otherwise.
6054 int sata_scr_valid(struct ata_link *link)
6056 struct ata_port *ap = link->ap;
6058 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
6062 * sata_scr_read - read SCR register of the specified port
6063 * @link: ATA link to read SCR for
6064 * @reg: SCR to read
6065 * @val: Place to store read value
6067 * Read SCR register @reg of @link into *@val. This function is
6068 * guaranteed to succeed if @link is ap->link, the cable type of
6069 * the port is SATA and the port implements ->scr_read.
6071 * LOCKING:
6072 * None if @link is ap->link. Kernel thread context otherwise.
6074 * RETURNS:
6075 * 0 on success, negative errno on failure.
6077 int sata_scr_read(struct ata_link *link, int reg, u32 *val)
6079 if (ata_is_host_link(link)) {
6080 struct ata_port *ap = link->ap;
6082 if (sata_scr_valid(link))
6083 return ap->ops->scr_read(ap, reg, val);
6084 return -EOPNOTSUPP;
6087 return sata_pmp_scr_read(link, reg, val);
6091 * sata_scr_write - write SCR register of the specified port
6092 * @link: ATA link to write SCR for
6093 * @reg: SCR to write
6094 * @val: value to write
6096 * Write @val to SCR register @reg of @link. This function is
6097 * guaranteed to succeed if @link is ap->link, the cable type of
6098 * the port is SATA and the port implements ->scr_read.
6100 * LOCKING:
6101 * None if @link is ap->link. Kernel thread context otherwise.
6103 * RETURNS:
6104 * 0 on success, negative errno on failure.
6106 int sata_scr_write(struct ata_link *link, int reg, u32 val)
6108 if (ata_is_host_link(link)) {
6109 struct ata_port *ap = link->ap;
6111 if (sata_scr_valid(link))
6112 return ap->ops->scr_write(ap, reg, val);
6113 return -EOPNOTSUPP;
6116 return sata_pmp_scr_write(link, reg, val);
6120 * sata_scr_write_flush - write SCR register of the specified port and flush
6121 * @link: ATA link to write SCR for
6122 * @reg: SCR to write
6123 * @val: value to write
6125 * This function is identical to sata_scr_write() except that this
6126 * function performs flush after writing to the register.
6128 * LOCKING:
6129 * None if @link is ap->link. Kernel thread context otherwise.
6131 * RETURNS:
6132 * 0 on success, negative errno on failure.
6134 int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
6136 if (ata_is_host_link(link)) {
6137 struct ata_port *ap = link->ap;
6138 int rc;
6140 if (sata_scr_valid(link)) {
6141 rc = ap->ops->scr_write(ap, reg, val);
6142 if (rc == 0)
6143 rc = ap->ops->scr_read(ap, reg, &val);
6144 return rc;
6146 return -EOPNOTSUPP;
6149 return sata_pmp_scr_write(link, reg, val);
6153 * ata_link_online - test whether the given link is online
6154 * @link: ATA link to test
6156 * Test whether @link is online. Note that this function returns
6157 * 0 if online status of @link cannot be obtained, so
6158 * ata_link_online(link) != !ata_link_offline(link).
6160 * LOCKING:
6161 * None.
6163 * RETURNS:
6164 * 1 if the port online status is available and online.
6166 int ata_link_online(struct ata_link *link)
6168 u32 sstatus;
6170 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
6171 (sstatus & 0xf) == 0x3)
6172 return 1;
6173 return 0;
6177 * ata_link_offline - test whether the given link is offline
6178 * @link: ATA link to test
6180 * Test whether @link is offline. Note that this function
6181 * returns 0 if offline status of @link cannot be obtained, so
6182 * ata_link_online(link) != !ata_link_offline(link).
6184 * LOCKING:
6185 * None.
6187 * RETURNS:
6188 * 1 if the port offline status is available and offline.
6190 int ata_link_offline(struct ata_link *link)
6192 u32 sstatus;
6194 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
6195 (sstatus & 0xf) != 0x3)
6196 return 1;
6197 return 0;
6200 int ata_flush_cache(struct ata_device *dev)
6202 unsigned int err_mask;
6203 u8 cmd;
6205 if (!ata_try_flush_cache(dev))
6206 return 0;
6208 if (dev->flags & ATA_DFLAG_FLUSH_EXT)
6209 cmd = ATA_CMD_FLUSH_EXT;
6210 else
6211 cmd = ATA_CMD_FLUSH;
6213 /* This is wrong. On a failed flush we get back the LBA of the lost
6214 sector and we should (assuming it wasn't aborted as unknown) issue
6215 a further flush command to continue the writeback until it
6216 does not error */
6217 err_mask = ata_do_simple_cmd(dev, cmd);
6218 if (err_mask) {
6219 ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
6220 return -EIO;
6223 return 0;
6226 #ifdef CONFIG_PM
6227 static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
6228 unsigned int action, unsigned int ehi_flags,
6229 int wait)
6231 unsigned long flags;
6232 int i, rc;
6234 for (i = 0; i < host->n_ports; i++) {
6235 struct ata_port *ap = host->ports[i];
6236 struct ata_link *link;
6238 /* Previous resume operation might still be in
6239 * progress. Wait for PM_PENDING to clear.
6241 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
6242 ata_port_wait_eh(ap);
6243 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
6246 /* request PM ops to EH */
6247 spin_lock_irqsave(ap->lock, flags);
6249 ap->pm_mesg = mesg;
6250 if (wait) {
6251 rc = 0;
6252 ap->pm_result = &rc;
6255 ap->pflags |= ATA_PFLAG_PM_PENDING;
6256 __ata_port_for_each_link(link, ap) {
6257 link->eh_info.action |= action;
6258 link->eh_info.flags |= ehi_flags;
6261 ata_port_schedule_eh(ap);
6263 spin_unlock_irqrestore(ap->lock, flags);
6265 /* wait and check result */
6266 if (wait) {
6267 ata_port_wait_eh(ap);
6268 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
6269 if (rc)
6270 return rc;
6274 return 0;
6278 * ata_host_suspend - suspend host
6279 * @host: host to suspend
6280 * @mesg: PM message
6282 * Suspend @host. Actual operation is performed by EH. This
6283 * function requests EH to perform PM operations and waits for EH
6284 * to finish.
6286 * LOCKING:
6287 * Kernel thread context (may sleep).
6289 * RETURNS:
6290 * 0 on success, -errno on failure.
6292 int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
6294 int rc;
6296 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
6297 if (rc == 0)
6298 host->dev->power.power_state = mesg;
6299 return rc;
6303 * ata_host_resume - resume host
6304 * @host: host to resume
6306 * Resume @host. Actual operation is performed by EH. This
6307 * function requests EH to perform PM operations and returns.
6308 * Note that all resume operations are performed parallely.
6310 * LOCKING:
6311 * Kernel thread context (may sleep).
6313 void ata_host_resume(struct ata_host *host)
6315 ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
6316 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
6317 host->dev->power.power_state = PMSG_ON;
6319 #endif
6322 * ata_port_start - Set port up for dma.
6323 * @ap: Port to initialize
6325 * Called just after data structures for each port are
6326 * initialized. Allocates space for PRD table.
6328 * May be used as the port_start() entry in ata_port_operations.
6330 * LOCKING:
6331 * Inherited from caller.
6333 int ata_port_start(struct ata_port *ap)
6335 struct device *dev = ap->dev;
6336 int rc;
6338 ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
6339 GFP_KERNEL);
6340 if (!ap->prd)
6341 return -ENOMEM;
6343 rc = ata_pad_alloc(ap, dev);
6344 if (rc)
6345 return rc;
6347 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd,
6348 (unsigned long long)ap->prd_dma);
6349 return 0;
6353 * ata_dev_init - Initialize an ata_device structure
6354 * @dev: Device structure to initialize
6356 * Initialize @dev in preparation for probing.
6358 * LOCKING:
6359 * Inherited from caller.
6361 void ata_dev_init(struct ata_device *dev)
6363 struct ata_link *link = dev->link;
6364 struct ata_port *ap = link->ap;
6365 unsigned long flags;
6367 /* SATA spd limit is bound to the first device */
6368 link->sata_spd_limit = link->hw_sata_spd_limit;
6369 link->sata_spd = 0;
6371 /* High bits of dev->flags are used to record warm plug
6372 * requests which occur asynchronously. Synchronize using
6373 * host lock.
6375 spin_lock_irqsave(ap->lock, flags);
6376 dev->flags &= ~ATA_DFLAG_INIT_MASK;
6377 dev->horkage = 0;
6378 spin_unlock_irqrestore(ap->lock, flags);
6380 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
6381 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
6382 dev->pio_mask = UINT_MAX;
6383 dev->mwdma_mask = UINT_MAX;
6384 dev->udma_mask = UINT_MAX;
6388 * ata_link_init - Initialize an ata_link structure
6389 * @ap: ATA port link is attached to
6390 * @link: Link structure to initialize
6391 * @pmp: Port multiplier port number
6393 * Initialize @link.
6395 * LOCKING:
6396 * Kernel thread context (may sleep)
6398 void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
6400 int i;
6402 /* clear everything except for devices */
6403 memset(link, 0, offsetof(struct ata_link, device[0]));
6405 link->ap = ap;
6406 link->pmp = pmp;
6407 link->active_tag = ATA_TAG_POISON;
6408 link->hw_sata_spd_limit = UINT_MAX;
6410 /* can't use iterator, ap isn't initialized yet */
6411 for (i = 0; i < ATA_MAX_DEVICES; i++) {
6412 struct ata_device *dev = &link->device[i];
6414 dev->link = link;
6415 dev->devno = dev - link->device;
6416 ata_dev_init(dev);
6421 * sata_link_init_spd - Initialize link->sata_spd_limit
6422 * @link: Link to configure sata_spd_limit for
6424 * Initialize @link->[hw_]sata_spd_limit to the currently
6425 * configured value.
6427 * LOCKING:
6428 * Kernel thread context (may sleep).
6430 * RETURNS:
6431 * 0 on success, -errno on failure.
6433 int sata_link_init_spd(struct ata_link *link)
6435 u32 scontrol, spd;
6436 int rc;
6438 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
6439 if (rc)
6440 return rc;
6442 spd = (scontrol >> 4) & 0xf;
6443 if (spd)
6444 link->hw_sata_spd_limit &= (1 << spd) - 1;
6446 link->sata_spd_limit = link->hw_sata_spd_limit;
6448 return 0;
6452 * ata_port_alloc - allocate and initialize basic ATA port resources
6453 * @host: ATA host this allocated port belongs to
6455 * Allocate and initialize basic ATA port resources.
6457 * RETURNS:
6458 * Allocate ATA port on success, NULL on failure.
6460 * LOCKING:
6461 * Inherited from calling layer (may sleep).
6463 struct ata_port *ata_port_alloc(struct ata_host *host)
6465 struct ata_port *ap;
6467 DPRINTK("ENTER\n");
6469 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
6470 if (!ap)
6471 return NULL;
6473 ap->pflags |= ATA_PFLAG_INITIALIZING;
6474 ap->lock = &host->lock;
6475 ap->flags = ATA_FLAG_DISABLED;
6476 ap->print_id = -1;
6477 ap->ctl = ATA_DEVCTL_OBS;
6478 ap->host = host;
6479 ap->dev = host->dev;
6480 ap->last_ctl = 0xFF;
6482 #if defined(ATA_VERBOSE_DEBUG)
6483 /* turn on all debugging levels */
6484 ap->msg_enable = 0x00FF;
6485 #elif defined(ATA_DEBUG)
6486 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
6487 #else
6488 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
6489 #endif
6491 INIT_DELAYED_WORK(&ap->port_task, NULL);
6492 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
6493 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
6494 INIT_LIST_HEAD(&ap->eh_done_q);
6495 init_waitqueue_head(&ap->eh_wait_q);
6496 init_timer_deferrable(&ap->fastdrain_timer);
6497 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
6498 ap->fastdrain_timer.data = (unsigned long)ap;
6500 ap->cbl = ATA_CBL_NONE;
6502 ata_link_init(ap, &ap->link, 0);
6504 #ifdef ATA_IRQ_TRAP
6505 ap->stats.unhandled_irq = 1;
6506 ap->stats.idle_irq = 1;
6507 #endif
6508 return ap;
6511 static void ata_host_release(struct device *gendev, void *res)
6513 struct ata_host *host = dev_get_drvdata(gendev);
6514 int i;
6516 for (i = 0; i < host->n_ports; i++) {
6517 struct ata_port *ap = host->ports[i];
6519 if (!ap)
6520 continue;
6522 if ((host->flags & ATA_HOST_STARTED) && ap->ops->port_stop)
6523 ap->ops->port_stop(ap);
6526 if ((host->flags & ATA_HOST_STARTED) && host->ops->host_stop)
6527 host->ops->host_stop(host);
6529 for (i = 0; i < host->n_ports; i++) {
6530 struct ata_port *ap = host->ports[i];
6532 if (!ap)
6533 continue;
6535 if (ap->scsi_host)
6536 scsi_host_put(ap->scsi_host);
6538 kfree(ap->pmp_link);
6539 kfree(ap);
6540 host->ports[i] = NULL;
6543 dev_set_drvdata(gendev, NULL);
6547 * ata_host_alloc - allocate and init basic ATA host resources
6548 * @dev: generic device this host is associated with
6549 * @max_ports: maximum number of ATA ports associated with this host
6551 * Allocate and initialize basic ATA host resources. LLD calls
6552 * this function to allocate a host, initializes it fully and
6553 * attaches it using ata_host_register().
6555 * @max_ports ports are allocated and host->n_ports is
6556 * initialized to @max_ports. The caller is allowed to decrease
6557 * host->n_ports before calling ata_host_register(). The unused
6558 * ports will be automatically freed on registration.
6560 * RETURNS:
6561 * Allocate ATA host on success, NULL on failure.
6563 * LOCKING:
6564 * Inherited from calling layer (may sleep).
6566 struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
6568 struct ata_host *host;
6569 size_t sz;
6570 int i;
6572 DPRINTK("ENTER\n");
6574 if (!devres_open_group(dev, NULL, GFP_KERNEL))
6575 return NULL;
6577 /* alloc a container for our list of ATA ports (buses) */
6578 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
6579 /* alloc a container for our list of ATA ports (buses) */
6580 host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
6581 if (!host)
6582 goto err_out;
6584 devres_add(dev, host);
6585 dev_set_drvdata(dev, host);
6587 spin_lock_init(&host->lock);
6588 host->dev = dev;
6589 host->n_ports = max_ports;
6591 /* allocate ports bound to this host */
6592 for (i = 0; i < max_ports; i++) {
6593 struct ata_port *ap;
6595 ap = ata_port_alloc(host);
6596 if (!ap)
6597 goto err_out;
6599 ap->port_no = i;
6600 host->ports[i] = ap;
6603 devres_remove_group(dev, NULL);
6604 return host;
6606 err_out:
6607 devres_release_group(dev, NULL);
6608 return NULL;
6612 * ata_host_alloc_pinfo - alloc host and init with port_info array
6613 * @dev: generic device this host is associated with
6614 * @ppi: array of ATA port_info to initialize host with
6615 * @n_ports: number of ATA ports attached to this host
6617 * Allocate ATA host and initialize with info from @ppi. If NULL
6618 * terminated, @ppi may contain fewer entries than @n_ports. The
6619 * last entry will be used for the remaining ports.
6621 * RETURNS:
6622 * Allocate ATA host on success, NULL on failure.
6624 * LOCKING:
6625 * Inherited from calling layer (may sleep).
6627 struct ata_host *ata_host_alloc_pinfo(struct device *dev,
6628 const struct ata_port_info * const * ppi,
6629 int n_ports)
6631 const struct ata_port_info *pi;
6632 struct ata_host *host;
6633 int i, j;
6635 host = ata_host_alloc(dev, n_ports);
6636 if (!host)
6637 return NULL;
6639 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
6640 struct ata_port *ap = host->ports[i];
6642 if (ppi[j])
6643 pi = ppi[j++];
6645 ap->pio_mask = pi->pio_mask;
6646 ap->mwdma_mask = pi->mwdma_mask;
6647 ap->udma_mask = pi->udma_mask;
6648 ap->flags |= pi->flags;
6649 ap->link.flags |= pi->link_flags;
6650 ap->ops = pi->port_ops;
6652 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
6653 host->ops = pi->port_ops;
6654 if (!host->private_data && pi->private_data)
6655 host->private_data = pi->private_data;
6658 return host;
6662 * ata_host_start - start and freeze ports of an ATA host
6663 * @host: ATA host to start ports for
6665 * Start and then freeze ports of @host. Started status is
6666 * recorded in host->flags, so this function can be called
6667 * multiple times. Ports are guaranteed to get started only
6668 * once. If host->ops isn't initialized yet, its set to the
6669 * first non-dummy port ops.
6671 * LOCKING:
6672 * Inherited from calling layer (may sleep).
6674 * RETURNS:
6675 * 0 if all ports are started successfully, -errno otherwise.
6677 int ata_host_start(struct ata_host *host)
6679 int i, rc;
6681 if (host->flags & ATA_HOST_STARTED)
6682 return 0;
6684 for (i = 0; i < host->n_ports; i++) {
6685 struct ata_port *ap = host->ports[i];
6687 if (!host->ops && !ata_port_is_dummy(ap))
6688 host->ops = ap->ops;
6690 if (ap->ops->port_start) {
6691 rc = ap->ops->port_start(ap);
6692 if (rc) {
6693 ata_port_printk(ap, KERN_ERR, "failed to "
6694 "start port (errno=%d)\n", rc);
6695 goto err_out;
6699 ata_eh_freeze_port(ap);
6702 host->flags |= ATA_HOST_STARTED;
6703 return 0;
6705 err_out:
6706 while (--i >= 0) {
6707 struct ata_port *ap = host->ports[i];
6709 if (ap->ops->port_stop)
6710 ap->ops->port_stop(ap);
6712 return rc;
6716 * ata_sas_host_init - Initialize a host struct
6717 * @host: host to initialize
6718 * @dev: device host is attached to
6719 * @flags: host flags
6720 * @ops: port_ops
6722 * LOCKING:
6723 * PCI/etc. bus probe sem.
6726 /* KILLME - the only user left is ipr */
6727 void ata_host_init(struct ata_host *host, struct device *dev,
6728 unsigned long flags, const struct ata_port_operations *ops)
6730 spin_lock_init(&host->lock);
6731 host->dev = dev;
6732 host->flags = flags;
6733 host->ops = ops;
6737 * ata_host_register - register initialized ATA host
6738 * @host: ATA host to register
6739 * @sht: template for SCSI host
6741 * Register initialized ATA host. @host is allocated using
6742 * ata_host_alloc() and fully initialized by LLD. This function
6743 * starts ports, registers @host with ATA and SCSI layers and
6744 * probe registered devices.
6746 * LOCKING:
6747 * Inherited from calling layer (may sleep).
6749 * RETURNS:
6750 * 0 on success, -errno otherwise.
6752 int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
6754 int i, rc;
6756 /* host must have been started */
6757 if (!(host->flags & ATA_HOST_STARTED)) {
6758 dev_printk(KERN_ERR, host->dev,
6759 "BUG: trying to register unstarted host\n");
6760 WARN_ON(1);
6761 return -EINVAL;
6764 /* Blow away unused ports. This happens when LLD can't
6765 * determine the exact number of ports to allocate at
6766 * allocation time.
6768 for (i = host->n_ports; host->ports[i]; i++)
6769 kfree(host->ports[i]);
6771 /* give ports names and add SCSI hosts */
6772 for (i = 0; i < host->n_ports; i++)
6773 host->ports[i]->print_id = ata_print_id++;
6775 rc = ata_scsi_add_hosts(host, sht);
6776 if (rc)
6777 return rc;
6779 /* associate with ACPI nodes */
6780 ata_acpi_associate(host);
6782 /* set cable, sata_spd_limit and report */
6783 for (i = 0; i < host->n_ports; i++) {
6784 struct ata_port *ap = host->ports[i];
6785 unsigned long xfer_mask;
6787 /* set SATA cable type if still unset */
6788 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
6789 ap->cbl = ATA_CBL_SATA;
6791 /* init sata_spd_limit to the current value */
6792 sata_link_init_spd(&ap->link);
6794 /* print per-port info to dmesg */
6795 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
6796 ap->udma_mask);
6798 if (!ata_port_is_dummy(ap)) {
6799 ata_port_printk(ap, KERN_INFO,
6800 "%cATA max %s %s\n",
6801 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
6802 ata_mode_string(xfer_mask),
6803 ap->link.eh_info.desc);
6804 ata_ehi_clear_desc(&ap->link.eh_info);
6805 } else
6806 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
6809 /* perform each probe synchronously */
6810 DPRINTK("probe begin\n");
6811 for (i = 0; i < host->n_ports; i++) {
6812 struct ata_port *ap = host->ports[i];
6813 int rc;
6815 /* probe */
6816 if (ap->ops->error_handler) {
6817 struct ata_eh_info *ehi = &ap->link.eh_info;
6818 unsigned long flags;
6820 ata_port_probe(ap);
6822 /* kick EH for boot probing */
6823 spin_lock_irqsave(ap->lock, flags);
6825 ehi->probe_mask =
6826 (1 << ata_link_max_devices(&ap->link)) - 1;
6827 ehi->action |= ATA_EH_SOFTRESET;
6828 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
6830 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
6831 ap->pflags |= ATA_PFLAG_LOADING;
6832 ata_port_schedule_eh(ap);
6834 spin_unlock_irqrestore(ap->lock, flags);
6836 /* wait for EH to finish */
6837 ata_port_wait_eh(ap);
6838 } else {
6839 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
6840 rc = ata_bus_probe(ap);
6841 DPRINTK("ata%u: bus probe end\n", ap->print_id);
6843 if (rc) {
6844 /* FIXME: do something useful here?
6845 * Current libata behavior will
6846 * tear down everything when
6847 * the module is removed
6848 * or the h/w is unplugged.
6854 /* probes are done, now scan each port's disk(s) */
6855 DPRINTK("host probe begin\n");
6856 for (i = 0; i < host->n_ports; i++) {
6857 struct ata_port *ap = host->ports[i];
6859 ata_scsi_scan_host(ap, 1);
6862 return 0;
6866 * ata_host_activate - start host, request IRQ and register it
6867 * @host: target ATA host
6868 * @irq: IRQ to request
6869 * @irq_handler: irq_handler used when requesting IRQ
6870 * @irq_flags: irq_flags used when requesting IRQ
6871 * @sht: scsi_host_template to use when registering the host
6873 * After allocating an ATA host and initializing it, most libata
6874 * LLDs perform three steps to activate the host - start host,
6875 * request IRQ and register it. This helper takes necessasry
6876 * arguments and performs the three steps in one go.
6878 * LOCKING:
6879 * Inherited from calling layer (may sleep).
6881 * RETURNS:
6882 * 0 on success, -errno otherwise.
6884 int ata_host_activate(struct ata_host *host, int irq,
6885 irq_handler_t irq_handler, unsigned long irq_flags,
6886 struct scsi_host_template *sht)
6888 int i, rc;
6890 rc = ata_host_start(host);
6891 if (rc)
6892 return rc;
6894 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
6895 dev_driver_string(host->dev), host);
6896 if (rc)
6897 return rc;
6899 for (i = 0; i < host->n_ports; i++)
6900 ata_port_desc(host->ports[i], "irq %d", irq);
6902 rc = ata_host_register(host, sht);
6903 /* if failed, just free the IRQ and leave ports alone */
6904 if (rc)
6905 devm_free_irq(host->dev, irq, host);
6907 return rc;
6911 * ata_port_detach - Detach ATA port in prepration of device removal
6912 * @ap: ATA port to be detached
6914 * Detach all ATA devices and the associated SCSI devices of @ap;
6915 * then, remove the associated SCSI host. @ap is guaranteed to
6916 * be quiescent on return from this function.
6918 * LOCKING:
6919 * Kernel thread context (may sleep).
6921 void ata_port_detach(struct ata_port *ap)
6923 unsigned long flags;
6924 struct ata_link *link;
6925 struct ata_device *dev;
6927 if (!ap->ops->error_handler)
6928 goto skip_eh;
6930 /* tell EH we're leaving & flush EH */
6931 spin_lock_irqsave(ap->lock, flags);
6932 ap->pflags |= ATA_PFLAG_UNLOADING;
6933 spin_unlock_irqrestore(ap->lock, flags);
6935 ata_port_wait_eh(ap);
6937 /* EH is now guaranteed to see UNLOADING, so no new device
6938 * will be attached. Disable all existing devices.
6940 spin_lock_irqsave(ap->lock, flags);
6942 ata_port_for_each_link(link, ap) {
6943 ata_link_for_each_dev(dev, link)
6944 ata_dev_disable(dev);
6947 spin_unlock_irqrestore(ap->lock, flags);
6949 /* Final freeze & EH. All in-flight commands are aborted. EH
6950 * will be skipped and retrials will be terminated with bad
6951 * target.
6953 spin_lock_irqsave(ap->lock, flags);
6954 ata_port_freeze(ap); /* won't be thawed */
6955 spin_unlock_irqrestore(ap->lock, flags);
6957 ata_port_wait_eh(ap);
6958 cancel_rearming_delayed_work(&ap->hotplug_task);
6960 skip_eh:
6961 /* remove the associated SCSI host */
6962 scsi_remove_host(ap->scsi_host);
6966 * ata_host_detach - Detach all ports of an ATA host
6967 * @host: Host to detach
6969 * Detach all ports of @host.
6971 * LOCKING:
6972 * Kernel thread context (may sleep).
6974 void ata_host_detach(struct ata_host *host)
6976 int i;
6978 for (i = 0; i < host->n_ports; i++)
6979 ata_port_detach(host->ports[i]);
6983 * ata_std_ports - initialize ioaddr with standard port offsets.
6984 * @ioaddr: IO address structure to be initialized
6986 * Utility function which initializes data_addr, error_addr,
6987 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
6988 * device_addr, status_addr, and command_addr to standard offsets
6989 * relative to cmd_addr.
6991 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
6994 void ata_std_ports(struct ata_ioports *ioaddr)
6996 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
6997 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
6998 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
6999 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
7000 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
7001 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
7002 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
7003 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
7004 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
7005 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
7009 #ifdef CONFIG_PCI
7012 * ata_pci_remove_one - PCI layer callback for device removal
7013 * @pdev: PCI device that was removed
7015 * PCI layer indicates to libata via this hook that hot-unplug or
7016 * module unload event has occurred. Detach all ports. Resource
7017 * release is handled via devres.
7019 * LOCKING:
7020 * Inherited from PCI layer (may sleep).
7022 void ata_pci_remove_one(struct pci_dev *pdev)
7024 struct device *dev = &pdev->dev;
7025 struct ata_host *host = dev_get_drvdata(dev);
7027 ata_host_detach(host);
7030 /* move to PCI subsystem */
7031 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
7033 unsigned long tmp = 0;
7035 switch (bits->width) {
7036 case 1: {
7037 u8 tmp8 = 0;
7038 pci_read_config_byte(pdev, bits->reg, &tmp8);
7039 tmp = tmp8;
7040 break;
7042 case 2: {
7043 u16 tmp16 = 0;
7044 pci_read_config_word(pdev, bits->reg, &tmp16);
7045 tmp = tmp16;
7046 break;
7048 case 4: {
7049 u32 tmp32 = 0;
7050 pci_read_config_dword(pdev, bits->reg, &tmp32);
7051 tmp = tmp32;
7052 break;
7055 default:
7056 return -EINVAL;
7059 tmp &= bits->mask;
7061 return (tmp == bits->val) ? 1 : 0;
7064 #ifdef CONFIG_PM
7065 void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
7067 pci_save_state(pdev);
7068 pci_disable_device(pdev);
7070 if (mesg.event == PM_EVENT_SUSPEND)
7071 pci_set_power_state(pdev, PCI_D3hot);
7074 int ata_pci_device_do_resume(struct pci_dev *pdev)
7076 int rc;
7078 pci_set_power_state(pdev, PCI_D0);
7079 pci_restore_state(pdev);
7081 rc = pcim_enable_device(pdev);
7082 if (rc) {
7083 dev_printk(KERN_ERR, &pdev->dev,
7084 "failed to enable device after resume (%d)\n", rc);
7085 return rc;
7088 pci_set_master(pdev);
7089 return 0;
7092 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
7094 struct ata_host *host = dev_get_drvdata(&pdev->dev);
7095 int rc = 0;
7097 rc = ata_host_suspend(host, mesg);
7098 if (rc)
7099 return rc;
7101 ata_pci_device_do_suspend(pdev, mesg);
7103 return 0;
7106 int ata_pci_device_resume(struct pci_dev *pdev)
7108 struct ata_host *host = dev_get_drvdata(&pdev->dev);
7109 int rc;
7111 rc = ata_pci_device_do_resume(pdev);
7112 if (rc == 0)
7113 ata_host_resume(host);
7114 return rc;
7116 #endif /* CONFIG_PM */
7118 #endif /* CONFIG_PCI */
7121 static int __init ata_init(void)
7123 ata_probe_timeout *= HZ;
7124 ata_wq = create_workqueue("ata");
7125 if (!ata_wq)
7126 return -ENOMEM;
7128 ata_aux_wq = create_singlethread_workqueue("ata_aux");
7129 if (!ata_aux_wq) {
7130 destroy_workqueue(ata_wq);
7131 return -ENOMEM;
7134 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
7135 return 0;
7138 static void __exit ata_exit(void)
7140 destroy_workqueue(ata_wq);
7141 destroy_workqueue(ata_aux_wq);
7144 subsys_initcall(ata_init);
7145 module_exit(ata_exit);
7147 static unsigned long ratelimit_time;
7148 static DEFINE_SPINLOCK(ata_ratelimit_lock);
7150 int ata_ratelimit(void)
7152 int rc;
7153 unsigned long flags;
7155 spin_lock_irqsave(&ata_ratelimit_lock, flags);
7157 if (time_after(jiffies, ratelimit_time)) {
7158 rc = 1;
7159 ratelimit_time = jiffies + (HZ/5);
7160 } else
7161 rc = 0;
7163 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
7165 return rc;
7169 * ata_wait_register - wait until register value changes
7170 * @reg: IO-mapped register
7171 * @mask: Mask to apply to read register value
7172 * @val: Wait condition
7173 * @interval_msec: polling interval in milliseconds
7174 * @timeout_msec: timeout in milliseconds
7176 * Waiting for some bits of register to change is a common
7177 * operation for ATA controllers. This function reads 32bit LE
7178 * IO-mapped register @reg and tests for the following condition.
7180 * (*@reg & mask) != val
7182 * If the condition is met, it returns; otherwise, the process is
7183 * repeated after @interval_msec until timeout.
7185 * LOCKING:
7186 * Kernel thread context (may sleep)
7188 * RETURNS:
7189 * The final register value.
7191 u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
7192 unsigned long interval_msec,
7193 unsigned long timeout_msec)
7195 unsigned long timeout;
7196 u32 tmp;
7198 tmp = ioread32(reg);
7200 /* Calculate timeout _after_ the first read to make sure
7201 * preceding writes reach the controller before starting to
7202 * eat away the timeout.
7204 timeout = jiffies + (timeout_msec * HZ) / 1000;
7206 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
7207 msleep(interval_msec);
7208 tmp = ioread32(reg);
7211 return tmp;
7215 * Dummy port_ops
7217 static void ata_dummy_noret(struct ata_port *ap) { }
7218 static int ata_dummy_ret0(struct ata_port *ap) { return 0; }
7219 static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
7221 static u8 ata_dummy_check_status(struct ata_port *ap)
7223 return ATA_DRDY;
7226 static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
7228 return AC_ERR_SYSTEM;
7231 const struct ata_port_operations ata_dummy_port_ops = {
7232 .check_status = ata_dummy_check_status,
7233 .check_altstatus = ata_dummy_check_status,
7234 .dev_select = ata_noop_dev_select,
7235 .qc_prep = ata_noop_qc_prep,
7236 .qc_issue = ata_dummy_qc_issue,
7237 .freeze = ata_dummy_noret,
7238 .thaw = ata_dummy_noret,
7239 .error_handler = ata_dummy_noret,
7240 .post_internal_cmd = ata_dummy_qc_noret,
7241 .irq_clear = ata_dummy_noret,
7242 .port_start = ata_dummy_ret0,
7243 .port_stop = ata_dummy_noret,
7246 const struct ata_port_info ata_dummy_port_info = {
7247 .port_ops = &ata_dummy_port_ops,
7251 * libata is essentially a library of internal helper functions for
7252 * low-level ATA host controller drivers. As such, the API/ABI is
7253 * likely to change as new drivers are added and updated.
7254 * Do not depend on ABI/API stability.
7257 EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
7258 EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
7259 EXPORT_SYMBOL_GPL(sata_deb_timing_long);
7260 EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
7261 EXPORT_SYMBOL_GPL(ata_dummy_port_info);
7262 EXPORT_SYMBOL_GPL(ata_std_bios_param);
7263 EXPORT_SYMBOL_GPL(ata_std_ports);
7264 EXPORT_SYMBOL_GPL(ata_host_init);
7265 EXPORT_SYMBOL_GPL(ata_host_alloc);
7266 EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
7267 EXPORT_SYMBOL_GPL(ata_host_start);
7268 EXPORT_SYMBOL_GPL(ata_host_register);
7269 EXPORT_SYMBOL_GPL(ata_host_activate);
7270 EXPORT_SYMBOL_GPL(ata_host_detach);
7271 EXPORT_SYMBOL_GPL(ata_sg_init);
7272 EXPORT_SYMBOL_GPL(ata_sg_init_one);
7273 EXPORT_SYMBOL_GPL(ata_hsm_move);
7274 EXPORT_SYMBOL_GPL(ata_qc_complete);
7275 EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
7276 EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
7277 EXPORT_SYMBOL_GPL(ata_tf_load);
7278 EXPORT_SYMBOL_GPL(ata_tf_read);
7279 EXPORT_SYMBOL_GPL(ata_noop_dev_select);
7280 EXPORT_SYMBOL_GPL(ata_std_dev_select);
7281 EXPORT_SYMBOL_GPL(sata_print_link_status);
7282 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
7283 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
7284 EXPORT_SYMBOL_GPL(ata_check_status);
7285 EXPORT_SYMBOL_GPL(ata_altstatus);
7286 EXPORT_SYMBOL_GPL(ata_exec_command);
7287 EXPORT_SYMBOL_GPL(ata_port_start);
7288 EXPORT_SYMBOL_GPL(ata_sff_port_start);
7289 EXPORT_SYMBOL_GPL(ata_interrupt);
7290 EXPORT_SYMBOL_GPL(ata_do_set_mode);
7291 EXPORT_SYMBOL_GPL(ata_data_xfer);
7292 EXPORT_SYMBOL_GPL(ata_data_xfer_noirq);
7293 EXPORT_SYMBOL_GPL(ata_std_qc_defer);
7294 EXPORT_SYMBOL_GPL(ata_qc_prep);
7295 EXPORT_SYMBOL_GPL(ata_dumb_qc_prep);
7296 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
7297 EXPORT_SYMBOL_GPL(ata_bmdma_setup);
7298 EXPORT_SYMBOL_GPL(ata_bmdma_start);
7299 EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
7300 EXPORT_SYMBOL_GPL(ata_bmdma_status);
7301 EXPORT_SYMBOL_GPL(ata_bmdma_stop);
7302 EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
7303 EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
7304 EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
7305 EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
7306 EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
7307 EXPORT_SYMBOL_GPL(ata_port_probe);
7308 EXPORT_SYMBOL_GPL(ata_dev_disable);
7309 EXPORT_SYMBOL_GPL(sata_set_spd);
7310 EXPORT_SYMBOL_GPL(sata_link_debounce);
7311 EXPORT_SYMBOL_GPL(sata_link_resume);
7312 EXPORT_SYMBOL_GPL(sata_phy_reset);
7313 EXPORT_SYMBOL_GPL(__sata_phy_reset);
7314 EXPORT_SYMBOL_GPL(ata_bus_reset);
7315 EXPORT_SYMBOL_GPL(ata_std_prereset);
7316 EXPORT_SYMBOL_GPL(ata_std_softreset);
7317 EXPORT_SYMBOL_GPL(sata_link_hardreset);
7318 EXPORT_SYMBOL_GPL(sata_std_hardreset);
7319 EXPORT_SYMBOL_GPL(ata_std_postreset);
7320 EXPORT_SYMBOL_GPL(ata_dev_classify);
7321 EXPORT_SYMBOL_GPL(ata_dev_pair);
7322 EXPORT_SYMBOL_GPL(ata_port_disable);
7323 EXPORT_SYMBOL_GPL(ata_ratelimit);
7324 EXPORT_SYMBOL_GPL(ata_wait_register);
7325 EXPORT_SYMBOL_GPL(ata_busy_sleep);
7326 EXPORT_SYMBOL_GPL(ata_wait_ready);
7327 EXPORT_SYMBOL_GPL(ata_port_queue_task);
7328 EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
7329 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
7330 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
7331 EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
7332 EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
7333 EXPORT_SYMBOL_GPL(ata_host_intr);
7334 EXPORT_SYMBOL_GPL(sata_scr_valid);
7335 EXPORT_SYMBOL_GPL(sata_scr_read);
7336 EXPORT_SYMBOL_GPL(sata_scr_write);
7337 EXPORT_SYMBOL_GPL(sata_scr_write_flush);
7338 EXPORT_SYMBOL_GPL(ata_link_online);
7339 EXPORT_SYMBOL_GPL(ata_link_offline);
7340 #ifdef CONFIG_PM
7341 EXPORT_SYMBOL_GPL(ata_host_suspend);
7342 EXPORT_SYMBOL_GPL(ata_host_resume);
7343 #endif /* CONFIG_PM */
7344 EXPORT_SYMBOL_GPL(ata_id_string);
7345 EXPORT_SYMBOL_GPL(ata_id_c_string);
7346 EXPORT_SYMBOL_GPL(ata_id_to_dma_mode);
7347 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
7349 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
7350 EXPORT_SYMBOL_GPL(ata_timing_compute);
7351 EXPORT_SYMBOL_GPL(ata_timing_merge);
7353 #ifdef CONFIG_PCI
7354 EXPORT_SYMBOL_GPL(pci_test_config_bits);
7355 EXPORT_SYMBOL_GPL(ata_pci_init_sff_host);
7356 EXPORT_SYMBOL_GPL(ata_pci_init_bmdma);
7357 EXPORT_SYMBOL_GPL(ata_pci_prepare_sff_host);
7358 EXPORT_SYMBOL_GPL(ata_pci_init_one);
7359 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
7360 #ifdef CONFIG_PM
7361 EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
7362 EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
7363 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
7364 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
7365 #endif /* CONFIG_PM */
7366 EXPORT_SYMBOL_GPL(ata_pci_default_filter);
7367 EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
7368 #endif /* CONFIG_PCI */
7370 EXPORT_SYMBOL_GPL(sata_pmp_qc_defer_cmd_switch);
7371 EXPORT_SYMBOL_GPL(sata_pmp_std_prereset);
7372 EXPORT_SYMBOL_GPL(sata_pmp_std_hardreset);
7373 EXPORT_SYMBOL_GPL(sata_pmp_std_postreset);
7374 EXPORT_SYMBOL_GPL(sata_pmp_do_eh);
7376 EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
7377 EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
7378 EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
7379 EXPORT_SYMBOL_GPL(ata_port_desc);
7380 #ifdef CONFIG_PCI
7381 EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
7382 #endif /* CONFIG_PCI */
7383 EXPORT_SYMBOL_GPL(ata_eng_timeout);
7384 EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
7385 EXPORT_SYMBOL_GPL(ata_link_abort);
7386 EXPORT_SYMBOL_GPL(ata_port_abort);
7387 EXPORT_SYMBOL_GPL(ata_port_freeze);
7388 EXPORT_SYMBOL_GPL(sata_async_notification);
7389 EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
7390 EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
7391 EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
7392 EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
7393 EXPORT_SYMBOL_GPL(ata_do_eh);
7394 EXPORT_SYMBOL_GPL(ata_irq_on);
7395 EXPORT_SYMBOL_GPL(ata_dev_try_classify);
7397 EXPORT_SYMBOL_GPL(ata_cable_40wire);
7398 EXPORT_SYMBOL_GPL(ata_cable_80wire);
7399 EXPORT_SYMBOL_GPL(ata_cable_unknown);
7400 EXPORT_SYMBOL_GPL(ata_cable_sata);