pata: Display Configuring .. lines for devices with private set_mode methods
[linux-2.6/verdex.git] / drivers / ata / libata-core.c
blobd3b4e25074c17ab8dd8f9c2d3e4cf40099511804
1 /*
2 * libata-core.c - helper library for ATA
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
35 #include <linux/kernel.h>
36 #include <linux/module.h>
37 #include <linux/pci.h>
38 #include <linux/init.h>
39 #include <linux/list.h>
40 #include <linux/mm.h>
41 #include <linux/highmem.h>
42 #include <linux/spinlock.h>
43 #include <linux/blkdev.h>
44 #include <linux/delay.h>
45 #include <linux/timer.h>
46 #include <linux/interrupt.h>
47 #include <linux/completion.h>
48 #include <linux/suspend.h>
49 #include <linux/workqueue.h>
50 #include <linux/jiffies.h>
51 #include <linux/scatterlist.h>
52 #include <scsi/scsi.h>
53 #include <scsi/scsi_cmnd.h>
54 #include <scsi/scsi_host.h>
55 #include <linux/libata.h>
56 #include <asm/io.h>
57 #include <asm/semaphore.h>
58 #include <asm/byteorder.h>
60 #include "libata.h"
62 #define DRV_VERSION "2.10" /* must be exactly four chars */
65 /* debounce timing parameters in msecs { interval, duration, timeout } */
66 const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
67 const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
68 const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
70 static unsigned int ata_dev_init_params(struct ata_device *dev,
71 u16 heads, u16 sectors);
72 static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
73 static void ata_dev_xfermask(struct ata_device *dev);
75 static unsigned int ata_print_id = 1;
76 static struct workqueue_struct *ata_wq;
78 struct workqueue_struct *ata_aux_wq;
80 int atapi_enabled = 1;
81 module_param(atapi_enabled, int, 0444);
82 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
84 int atapi_dmadir = 0;
85 module_param(atapi_dmadir, int, 0444);
86 MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
88 int libata_fua = 0;
89 module_param_named(fua, libata_fua, int, 0444);
90 MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
92 static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
93 module_param(ata_probe_timeout, int, 0444);
94 MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
96 int noacpi;
97 module_param(noacpi, int, 0444);
98 MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in suspend/resume when set");
100 MODULE_AUTHOR("Jeff Garzik");
101 MODULE_DESCRIPTION("Library module for ATA devices");
102 MODULE_LICENSE("GPL");
103 MODULE_VERSION(DRV_VERSION);
107 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
108 * @tf: Taskfile to convert
109 * @fis: Buffer into which data will output
110 * @pmp: Port multiplier port
112 * Converts a standard ATA taskfile to a Serial ATA
113 * FIS structure (Register - Host to Device).
115 * LOCKING:
116 * Inherited from caller.
119 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp)
121 fis[0] = 0x27; /* Register - Host to Device FIS */
122 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
123 bit 7 indicates Command FIS */
124 fis[2] = tf->command;
125 fis[3] = tf->feature;
127 fis[4] = tf->lbal;
128 fis[5] = tf->lbam;
129 fis[6] = tf->lbah;
130 fis[7] = tf->device;
132 fis[8] = tf->hob_lbal;
133 fis[9] = tf->hob_lbam;
134 fis[10] = tf->hob_lbah;
135 fis[11] = tf->hob_feature;
137 fis[12] = tf->nsect;
138 fis[13] = tf->hob_nsect;
139 fis[14] = 0;
140 fis[15] = tf->ctl;
142 fis[16] = 0;
143 fis[17] = 0;
144 fis[18] = 0;
145 fis[19] = 0;
149 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
150 * @fis: Buffer from which data will be input
151 * @tf: Taskfile to output
153 * Converts a serial ATA FIS structure to a standard ATA taskfile.
155 * LOCKING:
156 * Inherited from caller.
159 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
161 tf->command = fis[2]; /* status */
162 tf->feature = fis[3]; /* error */
164 tf->lbal = fis[4];
165 tf->lbam = fis[5];
166 tf->lbah = fis[6];
167 tf->device = fis[7];
169 tf->hob_lbal = fis[8];
170 tf->hob_lbam = fis[9];
171 tf->hob_lbah = fis[10];
173 tf->nsect = fis[12];
174 tf->hob_nsect = fis[13];
177 static const u8 ata_rw_cmds[] = {
178 /* pio multi */
179 ATA_CMD_READ_MULTI,
180 ATA_CMD_WRITE_MULTI,
181 ATA_CMD_READ_MULTI_EXT,
182 ATA_CMD_WRITE_MULTI_EXT,
186 ATA_CMD_WRITE_MULTI_FUA_EXT,
187 /* pio */
188 ATA_CMD_PIO_READ,
189 ATA_CMD_PIO_WRITE,
190 ATA_CMD_PIO_READ_EXT,
191 ATA_CMD_PIO_WRITE_EXT,
196 /* dma */
197 ATA_CMD_READ,
198 ATA_CMD_WRITE,
199 ATA_CMD_READ_EXT,
200 ATA_CMD_WRITE_EXT,
204 ATA_CMD_WRITE_FUA_EXT
208 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
209 * @tf: command to examine and configure
210 * @dev: device tf belongs to
212 * Examine the device configuration and tf->flags to calculate
213 * the proper read/write commands and protocol to use.
215 * LOCKING:
216 * caller.
218 static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
220 u8 cmd;
222 int index, fua, lba48, write;
224 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
225 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
226 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
228 if (dev->flags & ATA_DFLAG_PIO) {
229 tf->protocol = ATA_PROT_PIO;
230 index = dev->multi_count ? 0 : 8;
231 } else if (lba48 && (dev->ap->flags & ATA_FLAG_PIO_LBA48)) {
232 /* Unable to use DMA due to host limitation */
233 tf->protocol = ATA_PROT_PIO;
234 index = dev->multi_count ? 0 : 8;
235 } else {
236 tf->protocol = ATA_PROT_DMA;
237 index = 16;
240 cmd = ata_rw_cmds[index + fua + lba48 + write];
241 if (cmd) {
242 tf->command = cmd;
243 return 0;
245 return -1;
249 * ata_tf_read_block - Read block address from ATA taskfile
250 * @tf: ATA taskfile of interest
251 * @dev: ATA device @tf belongs to
253 * LOCKING:
254 * None.
256 * Read block address from @tf. This function can handle all
257 * three address formats - LBA, LBA48 and CHS. tf->protocol and
258 * flags select the address format to use.
260 * RETURNS:
261 * Block address read from @tf.
263 u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
265 u64 block = 0;
267 if (tf->flags & ATA_TFLAG_LBA) {
268 if (tf->flags & ATA_TFLAG_LBA48) {
269 block |= (u64)tf->hob_lbah << 40;
270 block |= (u64)tf->hob_lbam << 32;
271 block |= tf->hob_lbal << 24;
272 } else
273 block |= (tf->device & 0xf) << 24;
275 block |= tf->lbah << 16;
276 block |= tf->lbam << 8;
277 block |= tf->lbal;
278 } else {
279 u32 cyl, head, sect;
281 cyl = tf->lbam | (tf->lbah << 8);
282 head = tf->device & 0xf;
283 sect = tf->lbal;
285 block = (cyl * dev->heads + head) * dev->sectors + sect;
288 return block;
292 * ata_build_rw_tf - Build ATA taskfile for given read/write request
293 * @tf: Target ATA taskfile
294 * @dev: ATA device @tf belongs to
295 * @block: Block address
296 * @n_block: Number of blocks
297 * @tf_flags: RW/FUA etc...
298 * @tag: tag
300 * LOCKING:
301 * None.
303 * Build ATA taskfile @tf for read/write request described by
304 * @block, @n_block, @tf_flags and @tag on @dev.
306 * RETURNS:
308 * 0 on success, -ERANGE if the request is too large for @dev,
309 * -EINVAL if the request is invalid.
311 int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
312 u64 block, u32 n_block, unsigned int tf_flags,
313 unsigned int tag)
315 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
316 tf->flags |= tf_flags;
318 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
319 /* yay, NCQ */
320 if (!lba_48_ok(block, n_block))
321 return -ERANGE;
323 tf->protocol = ATA_PROT_NCQ;
324 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
326 if (tf->flags & ATA_TFLAG_WRITE)
327 tf->command = ATA_CMD_FPDMA_WRITE;
328 else
329 tf->command = ATA_CMD_FPDMA_READ;
331 tf->nsect = tag << 3;
332 tf->hob_feature = (n_block >> 8) & 0xff;
333 tf->feature = n_block & 0xff;
335 tf->hob_lbah = (block >> 40) & 0xff;
336 tf->hob_lbam = (block >> 32) & 0xff;
337 tf->hob_lbal = (block >> 24) & 0xff;
338 tf->lbah = (block >> 16) & 0xff;
339 tf->lbam = (block >> 8) & 0xff;
340 tf->lbal = block & 0xff;
342 tf->device = 1 << 6;
343 if (tf->flags & ATA_TFLAG_FUA)
344 tf->device |= 1 << 7;
345 } else if (dev->flags & ATA_DFLAG_LBA) {
346 tf->flags |= ATA_TFLAG_LBA;
348 if (lba_28_ok(block, n_block)) {
349 /* use LBA28 */
350 tf->device |= (block >> 24) & 0xf;
351 } else if (lba_48_ok(block, n_block)) {
352 if (!(dev->flags & ATA_DFLAG_LBA48))
353 return -ERANGE;
355 /* use LBA48 */
356 tf->flags |= ATA_TFLAG_LBA48;
358 tf->hob_nsect = (n_block >> 8) & 0xff;
360 tf->hob_lbah = (block >> 40) & 0xff;
361 tf->hob_lbam = (block >> 32) & 0xff;
362 tf->hob_lbal = (block >> 24) & 0xff;
363 } else
364 /* request too large even for LBA48 */
365 return -ERANGE;
367 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
368 return -EINVAL;
370 tf->nsect = n_block & 0xff;
372 tf->lbah = (block >> 16) & 0xff;
373 tf->lbam = (block >> 8) & 0xff;
374 tf->lbal = block & 0xff;
376 tf->device |= ATA_LBA;
377 } else {
378 /* CHS */
379 u32 sect, head, cyl, track;
381 /* The request -may- be too large for CHS addressing. */
382 if (!lba_28_ok(block, n_block))
383 return -ERANGE;
385 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
386 return -EINVAL;
388 /* Convert LBA to CHS */
389 track = (u32)block / dev->sectors;
390 cyl = track / dev->heads;
391 head = track % dev->heads;
392 sect = (u32)block % dev->sectors + 1;
394 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
395 (u32)block, track, cyl, head, sect);
397 /* Check whether the converted CHS can fit.
398 Cylinder: 0-65535
399 Head: 0-15
400 Sector: 1-255*/
401 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
402 return -ERANGE;
404 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
405 tf->lbal = sect;
406 tf->lbam = cyl;
407 tf->lbah = cyl >> 8;
408 tf->device |= head;
411 return 0;
415 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
416 * @pio_mask: pio_mask
417 * @mwdma_mask: mwdma_mask
418 * @udma_mask: udma_mask
420 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
421 * unsigned int xfer_mask.
423 * LOCKING:
424 * None.
426 * RETURNS:
427 * Packed xfer_mask.
429 static unsigned int ata_pack_xfermask(unsigned int pio_mask,
430 unsigned int mwdma_mask,
431 unsigned int udma_mask)
433 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
434 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
435 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
439 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
440 * @xfer_mask: xfer_mask to unpack
441 * @pio_mask: resulting pio_mask
442 * @mwdma_mask: resulting mwdma_mask
443 * @udma_mask: resulting udma_mask
445 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
446 * Any NULL distination masks will be ignored.
448 static void ata_unpack_xfermask(unsigned int xfer_mask,
449 unsigned int *pio_mask,
450 unsigned int *mwdma_mask,
451 unsigned int *udma_mask)
453 if (pio_mask)
454 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
455 if (mwdma_mask)
456 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
457 if (udma_mask)
458 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
461 static const struct ata_xfer_ent {
462 int shift, bits;
463 u8 base;
464 } ata_xfer_tbl[] = {
465 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
466 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
467 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
468 { -1, },
472 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
473 * @xfer_mask: xfer_mask of interest
475 * Return matching XFER_* value for @xfer_mask. Only the highest
476 * bit of @xfer_mask is considered.
478 * LOCKING:
479 * None.
481 * RETURNS:
482 * Matching XFER_* value, 0 if no match found.
484 static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
486 int highbit = fls(xfer_mask) - 1;
487 const struct ata_xfer_ent *ent;
489 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
490 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
491 return ent->base + highbit - ent->shift;
492 return 0;
496 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
497 * @xfer_mode: XFER_* of interest
499 * Return matching xfer_mask for @xfer_mode.
501 * LOCKING:
502 * None.
504 * RETURNS:
505 * Matching xfer_mask, 0 if no match found.
507 static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
509 const struct ata_xfer_ent *ent;
511 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
512 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
513 return 1 << (ent->shift + xfer_mode - ent->base);
514 return 0;
518 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
519 * @xfer_mode: XFER_* of interest
521 * Return matching xfer_shift for @xfer_mode.
523 * LOCKING:
524 * None.
526 * RETURNS:
527 * Matching xfer_shift, -1 if no match found.
529 static int ata_xfer_mode2shift(unsigned int xfer_mode)
531 const struct ata_xfer_ent *ent;
533 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
534 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
535 return ent->shift;
536 return -1;
540 * ata_mode_string - convert xfer_mask to string
541 * @xfer_mask: mask of bits supported; only highest bit counts.
543 * Determine string which represents the highest speed
544 * (highest bit in @modemask).
546 * LOCKING:
547 * None.
549 * RETURNS:
550 * Constant C string representing highest speed listed in
551 * @mode_mask, or the constant C string "<n/a>".
553 static const char *ata_mode_string(unsigned int xfer_mask)
555 static const char * const xfer_mode_str[] = {
556 "PIO0",
557 "PIO1",
558 "PIO2",
559 "PIO3",
560 "PIO4",
561 "PIO5",
562 "PIO6",
563 "MWDMA0",
564 "MWDMA1",
565 "MWDMA2",
566 "MWDMA3",
567 "MWDMA4",
568 "UDMA/16",
569 "UDMA/25",
570 "UDMA/33",
571 "UDMA/44",
572 "UDMA/66",
573 "UDMA/100",
574 "UDMA/133",
575 "UDMA7",
577 int highbit;
579 highbit = fls(xfer_mask) - 1;
580 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
581 return xfer_mode_str[highbit];
582 return "<n/a>";
585 static const char *sata_spd_string(unsigned int spd)
587 static const char * const spd_str[] = {
588 "1.5 Gbps",
589 "3.0 Gbps",
592 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
593 return "<unknown>";
594 return spd_str[spd - 1];
597 void ata_dev_disable(struct ata_device *dev)
599 if (ata_dev_enabled(dev) && ata_msg_drv(dev->ap)) {
600 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
601 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
602 ATA_DNXFER_QUIET);
603 dev->class++;
608 * ata_devchk - PATA device presence detection
609 * @ap: ATA channel to examine
610 * @device: Device to examine (starting at zero)
612 * This technique was originally described in
613 * Hale Landis's ATADRVR (www.ata-atapi.com), and
614 * later found its way into the ATA/ATAPI spec.
616 * Write a pattern to the ATA shadow registers,
617 * and if a device is present, it will respond by
618 * correctly storing and echoing back the
619 * ATA shadow register contents.
621 * LOCKING:
622 * caller.
625 static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
627 struct ata_ioports *ioaddr = &ap->ioaddr;
628 u8 nsect, lbal;
630 ap->ops->dev_select(ap, device);
632 iowrite8(0x55, ioaddr->nsect_addr);
633 iowrite8(0xaa, ioaddr->lbal_addr);
635 iowrite8(0xaa, ioaddr->nsect_addr);
636 iowrite8(0x55, ioaddr->lbal_addr);
638 iowrite8(0x55, ioaddr->nsect_addr);
639 iowrite8(0xaa, ioaddr->lbal_addr);
641 nsect = ioread8(ioaddr->nsect_addr);
642 lbal = ioread8(ioaddr->lbal_addr);
644 if ((nsect == 0x55) && (lbal == 0xaa))
645 return 1; /* we found a device */
647 return 0; /* nothing found */
651 * ata_dev_classify - determine device type based on ATA-spec signature
652 * @tf: ATA taskfile register set for device to be identified
654 * Determine from taskfile register contents whether a device is
655 * ATA or ATAPI, as per "Signature and persistence" section
656 * of ATA/PI spec (volume 1, sect 5.14).
658 * LOCKING:
659 * None.
661 * RETURNS:
662 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
663 * the event of failure.
666 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
668 /* Apple's open source Darwin code hints that some devices only
669 * put a proper signature into the LBA mid/high registers,
670 * So, we only check those. It's sufficient for uniqueness.
673 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
674 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
675 DPRINTK("found ATA device by sig\n");
676 return ATA_DEV_ATA;
679 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
680 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
681 DPRINTK("found ATAPI device by sig\n");
682 return ATA_DEV_ATAPI;
685 DPRINTK("unknown device\n");
686 return ATA_DEV_UNKNOWN;
690 * ata_dev_try_classify - Parse returned ATA device signature
691 * @ap: ATA channel to examine
692 * @device: Device to examine (starting at zero)
693 * @r_err: Value of error register on completion
695 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
696 * an ATA/ATAPI-defined set of values is placed in the ATA
697 * shadow registers, indicating the results of device detection
698 * and diagnostics.
700 * Select the ATA device, and read the values from the ATA shadow
701 * registers. Then parse according to the Error register value,
702 * and the spec-defined values examined by ata_dev_classify().
704 * LOCKING:
705 * caller.
707 * RETURNS:
708 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
711 unsigned int
712 ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
714 struct ata_taskfile tf;
715 unsigned int class;
716 u8 err;
718 ap->ops->dev_select(ap, device);
720 memset(&tf, 0, sizeof(tf));
722 ap->ops->tf_read(ap, &tf);
723 err = tf.feature;
724 if (r_err)
725 *r_err = err;
727 /* see if device passed diags: if master then continue and warn later */
728 if (err == 0 && device == 0)
729 /* diagnostic fail : do nothing _YET_ */
730 ap->device[device].horkage |= ATA_HORKAGE_DIAGNOSTIC;
731 else if (err == 1)
732 /* do nothing */ ;
733 else if ((device == 0) && (err == 0x81))
734 /* do nothing */ ;
735 else
736 return ATA_DEV_NONE;
738 /* determine if device is ATA or ATAPI */
739 class = ata_dev_classify(&tf);
741 if (class == ATA_DEV_UNKNOWN)
742 return ATA_DEV_NONE;
743 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
744 return ATA_DEV_NONE;
745 return class;
749 * ata_id_string - Convert IDENTIFY DEVICE page into string
750 * @id: IDENTIFY DEVICE results we will examine
751 * @s: string into which data is output
752 * @ofs: offset into identify device page
753 * @len: length of string to return. must be an even number.
755 * The strings in the IDENTIFY DEVICE page are broken up into
756 * 16-bit chunks. Run through the string, and output each
757 * 8-bit chunk linearly, regardless of platform.
759 * LOCKING:
760 * caller.
763 void ata_id_string(const u16 *id, unsigned char *s,
764 unsigned int ofs, unsigned int len)
766 unsigned int c;
768 while (len > 0) {
769 c = id[ofs] >> 8;
770 *s = c;
771 s++;
773 c = id[ofs] & 0xff;
774 *s = c;
775 s++;
777 ofs++;
778 len -= 2;
783 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
784 * @id: IDENTIFY DEVICE results we will examine
785 * @s: string into which data is output
786 * @ofs: offset into identify device page
787 * @len: length of string to return. must be an odd number.
789 * This function is identical to ata_id_string except that it
790 * trims trailing spaces and terminates the resulting string with
791 * null. @len must be actual maximum length (even number) + 1.
793 * LOCKING:
794 * caller.
796 void ata_id_c_string(const u16 *id, unsigned char *s,
797 unsigned int ofs, unsigned int len)
799 unsigned char *p;
801 WARN_ON(!(len & 1));
803 ata_id_string(id, s, ofs, len - 1);
805 p = s + strnlen(s, len - 1);
806 while (p > s && p[-1] == ' ')
807 p--;
808 *p = '\0';
811 static u64 ata_id_n_sectors(const u16 *id)
813 if (ata_id_has_lba(id)) {
814 if (ata_id_has_lba48(id))
815 return ata_id_u64(id, 100);
816 else
817 return ata_id_u32(id, 60);
818 } else {
819 if (ata_id_current_chs_valid(id))
820 return ata_id_u32(id, 57);
821 else
822 return id[1] * id[3] * id[6];
827 * ata_noop_dev_select - Select device 0/1 on ATA bus
828 * @ap: ATA channel to manipulate
829 * @device: ATA device (numbered from zero) to select
831 * This function performs no actual function.
833 * May be used as the dev_select() entry in ata_port_operations.
835 * LOCKING:
836 * caller.
838 void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
844 * ata_std_dev_select - Select device 0/1 on ATA bus
845 * @ap: ATA channel to manipulate
846 * @device: ATA device (numbered from zero) to select
848 * Use the method defined in the ATA specification to
849 * make either device 0, or device 1, active on the
850 * ATA channel. Works with both PIO and MMIO.
852 * May be used as the dev_select() entry in ata_port_operations.
854 * LOCKING:
855 * caller.
858 void ata_std_dev_select (struct ata_port *ap, unsigned int device)
860 u8 tmp;
862 if (device == 0)
863 tmp = ATA_DEVICE_OBS;
864 else
865 tmp = ATA_DEVICE_OBS | ATA_DEV1;
867 iowrite8(tmp, ap->ioaddr.device_addr);
868 ata_pause(ap); /* needed; also flushes, for mmio */
872 * ata_dev_select - Select device 0/1 on ATA bus
873 * @ap: ATA channel to manipulate
874 * @device: ATA device (numbered from zero) to select
875 * @wait: non-zero to wait for Status register BSY bit to clear
876 * @can_sleep: non-zero if context allows sleeping
878 * Use the method defined in the ATA specification to
879 * make either device 0, or device 1, active on the
880 * ATA channel.
882 * This is a high-level version of ata_std_dev_select(),
883 * which additionally provides the services of inserting
884 * the proper pauses and status polling, where needed.
886 * LOCKING:
887 * caller.
890 void ata_dev_select(struct ata_port *ap, unsigned int device,
891 unsigned int wait, unsigned int can_sleep)
893 if (ata_msg_probe(ap))
894 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, "
895 "device %u, wait %u\n", device, wait);
897 if (wait)
898 ata_wait_idle(ap);
900 ap->ops->dev_select(ap, device);
902 if (wait) {
903 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
904 msleep(150);
905 ata_wait_idle(ap);
910 * ata_dump_id - IDENTIFY DEVICE info debugging output
911 * @id: IDENTIFY DEVICE page to dump
913 * Dump selected 16-bit words from the given IDENTIFY DEVICE
914 * page.
916 * LOCKING:
917 * caller.
920 static inline void ata_dump_id(const u16 *id)
922 DPRINTK("49==0x%04x "
923 "53==0x%04x "
924 "63==0x%04x "
925 "64==0x%04x "
926 "75==0x%04x \n",
927 id[49],
928 id[53],
929 id[63],
930 id[64],
931 id[75]);
932 DPRINTK("80==0x%04x "
933 "81==0x%04x "
934 "82==0x%04x "
935 "83==0x%04x "
936 "84==0x%04x \n",
937 id[80],
938 id[81],
939 id[82],
940 id[83],
941 id[84]);
942 DPRINTK("88==0x%04x "
943 "93==0x%04x\n",
944 id[88],
945 id[93]);
949 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
950 * @id: IDENTIFY data to compute xfer mask from
952 * Compute the xfermask for this device. This is not as trivial
953 * as it seems if we must consider early devices correctly.
955 * FIXME: pre IDE drive timing (do we care ?).
957 * LOCKING:
958 * None.
960 * RETURNS:
961 * Computed xfermask
963 static unsigned int ata_id_xfermask(const u16 *id)
965 unsigned int pio_mask, mwdma_mask, udma_mask;
967 /* Usual case. Word 53 indicates word 64 is valid */
968 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
969 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
970 pio_mask <<= 3;
971 pio_mask |= 0x7;
972 } else {
973 /* If word 64 isn't valid then Word 51 high byte holds
974 * the PIO timing number for the maximum. Turn it into
975 * a mask.
977 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
978 if (mode < 5) /* Valid PIO range */
979 pio_mask = (2 << mode) - 1;
980 else
981 pio_mask = 1;
983 /* But wait.. there's more. Design your standards by
984 * committee and you too can get a free iordy field to
985 * process. However its the speeds not the modes that
986 * are supported... Note drivers using the timing API
987 * will get this right anyway
991 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
993 if (ata_id_is_cfa(id)) {
995 * Process compact flash extended modes
997 int pio = id[163] & 0x7;
998 int dma = (id[163] >> 3) & 7;
1000 if (pio)
1001 pio_mask |= (1 << 5);
1002 if (pio > 1)
1003 pio_mask |= (1 << 6);
1004 if (dma)
1005 mwdma_mask |= (1 << 3);
1006 if (dma > 1)
1007 mwdma_mask |= (1 << 4);
1010 udma_mask = 0;
1011 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1012 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1014 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1018 * ata_port_queue_task - Queue port_task
1019 * @ap: The ata_port to queue port_task for
1020 * @fn: workqueue function to be scheduled
1021 * @data: data for @fn to use
1022 * @delay: delay time for workqueue function
1024 * Schedule @fn(@data) for execution after @delay jiffies using
1025 * port_task. There is one port_task per port and it's the
1026 * user(low level driver)'s responsibility to make sure that only
1027 * one task is active at any given time.
1029 * libata core layer takes care of synchronization between
1030 * port_task and EH. ata_port_queue_task() may be ignored for EH
1031 * synchronization.
1033 * LOCKING:
1034 * Inherited from caller.
1036 void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
1037 unsigned long delay)
1039 int rc;
1041 if (ap->pflags & ATA_PFLAG_FLUSH_PORT_TASK)
1042 return;
1044 PREPARE_DELAYED_WORK(&ap->port_task, fn);
1045 ap->port_task_data = data;
1047 rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
1049 /* rc == 0 means that another user is using port task */
1050 WARN_ON(rc == 0);
1054 * ata_port_flush_task - Flush port_task
1055 * @ap: The ata_port to flush port_task for
1057 * After this function completes, port_task is guranteed not to
1058 * be running or scheduled.
1060 * LOCKING:
1061 * Kernel thread context (may sleep)
1063 void ata_port_flush_task(struct ata_port *ap)
1065 unsigned long flags;
1067 DPRINTK("ENTER\n");
1069 spin_lock_irqsave(ap->lock, flags);
1070 ap->pflags |= ATA_PFLAG_FLUSH_PORT_TASK;
1071 spin_unlock_irqrestore(ap->lock, flags);
1073 DPRINTK("flush #1\n");
1074 flush_workqueue(ata_wq);
1077 * At this point, if a task is running, it's guaranteed to see
1078 * the FLUSH flag; thus, it will never queue pio tasks again.
1079 * Cancel and flush.
1081 if (!cancel_delayed_work(&ap->port_task)) {
1082 if (ata_msg_ctl(ap))
1083 ata_port_printk(ap, KERN_DEBUG, "%s: flush #2\n",
1084 __FUNCTION__);
1085 flush_workqueue(ata_wq);
1088 spin_lock_irqsave(ap->lock, flags);
1089 ap->pflags &= ~ATA_PFLAG_FLUSH_PORT_TASK;
1090 spin_unlock_irqrestore(ap->lock, flags);
1092 if (ata_msg_ctl(ap))
1093 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
1096 static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1098 struct completion *waiting = qc->private_data;
1100 complete(waiting);
1104 * ata_exec_internal_sg - execute libata internal command
1105 * @dev: Device to which the command is sent
1106 * @tf: Taskfile registers for the command and the result
1107 * @cdb: CDB for packet command
1108 * @dma_dir: Data tranfer direction of the command
1109 * @sg: sg list for the data buffer of the command
1110 * @n_elem: Number of sg entries
1112 * Executes libata internal command with timeout. @tf contains
1113 * command on entry and result on return. Timeout and error
1114 * conditions are reported via return value. No recovery action
1115 * is taken after a command times out. It's caller's duty to
1116 * clean up after timeout.
1118 * LOCKING:
1119 * None. Should be called with kernel context, might sleep.
1121 * RETURNS:
1122 * Zero on success, AC_ERR_* mask on failure
1124 unsigned ata_exec_internal_sg(struct ata_device *dev,
1125 struct ata_taskfile *tf, const u8 *cdb,
1126 int dma_dir, struct scatterlist *sg,
1127 unsigned int n_elem)
1129 struct ata_port *ap = dev->ap;
1130 u8 command = tf->command;
1131 struct ata_queued_cmd *qc;
1132 unsigned int tag, preempted_tag;
1133 u32 preempted_sactive, preempted_qc_active;
1134 DECLARE_COMPLETION_ONSTACK(wait);
1135 unsigned long flags;
1136 unsigned int err_mask;
1137 int rc;
1139 spin_lock_irqsave(ap->lock, flags);
1141 /* no internal command while frozen */
1142 if (ap->pflags & ATA_PFLAG_FROZEN) {
1143 spin_unlock_irqrestore(ap->lock, flags);
1144 return AC_ERR_SYSTEM;
1147 /* initialize internal qc */
1149 /* XXX: Tag 0 is used for drivers with legacy EH as some
1150 * drivers choke if any other tag is given. This breaks
1151 * ata_tag_internal() test for those drivers. Don't use new
1152 * EH stuff without converting to it.
1154 if (ap->ops->error_handler)
1155 tag = ATA_TAG_INTERNAL;
1156 else
1157 tag = 0;
1159 if (test_and_set_bit(tag, &ap->qc_allocated))
1160 BUG();
1161 qc = __ata_qc_from_tag(ap, tag);
1163 qc->tag = tag;
1164 qc->scsicmd = NULL;
1165 qc->ap = ap;
1166 qc->dev = dev;
1167 ata_qc_reinit(qc);
1169 preempted_tag = ap->active_tag;
1170 preempted_sactive = ap->sactive;
1171 preempted_qc_active = ap->qc_active;
1172 ap->active_tag = ATA_TAG_POISON;
1173 ap->sactive = 0;
1174 ap->qc_active = 0;
1176 /* prepare & issue qc */
1177 qc->tf = *tf;
1178 if (cdb)
1179 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1180 qc->flags |= ATA_QCFLAG_RESULT_TF;
1181 qc->dma_dir = dma_dir;
1182 if (dma_dir != DMA_NONE) {
1183 unsigned int i, buflen = 0;
1185 for (i = 0; i < n_elem; i++)
1186 buflen += sg[i].length;
1188 ata_sg_init(qc, sg, n_elem);
1189 qc->nbytes = buflen;
1192 qc->private_data = &wait;
1193 qc->complete_fn = ata_qc_complete_internal;
1195 ata_qc_issue(qc);
1197 spin_unlock_irqrestore(ap->lock, flags);
1199 rc = wait_for_completion_timeout(&wait, ata_probe_timeout);
1201 ata_port_flush_task(ap);
1203 if (!rc) {
1204 spin_lock_irqsave(ap->lock, flags);
1206 /* We're racing with irq here. If we lose, the
1207 * following test prevents us from completing the qc
1208 * twice. If we win, the port is frozen and will be
1209 * cleaned up by ->post_internal_cmd().
1211 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1212 qc->err_mask |= AC_ERR_TIMEOUT;
1214 if (ap->ops->error_handler)
1215 ata_port_freeze(ap);
1216 else
1217 ata_qc_complete(qc);
1219 if (ata_msg_warn(ap))
1220 ata_dev_printk(dev, KERN_WARNING,
1221 "qc timeout (cmd 0x%x)\n", command);
1224 spin_unlock_irqrestore(ap->lock, flags);
1227 /* do post_internal_cmd */
1228 if (ap->ops->post_internal_cmd)
1229 ap->ops->post_internal_cmd(qc);
1231 if ((qc->flags & ATA_QCFLAG_FAILED) && !qc->err_mask) {
1232 if (ata_msg_warn(ap))
1233 ata_dev_printk(dev, KERN_WARNING,
1234 "zero err_mask for failed "
1235 "internal command, assuming AC_ERR_OTHER\n");
1236 qc->err_mask |= AC_ERR_OTHER;
1239 /* finish up */
1240 spin_lock_irqsave(ap->lock, flags);
1242 *tf = qc->result_tf;
1243 err_mask = qc->err_mask;
1245 ata_qc_free(qc);
1246 ap->active_tag = preempted_tag;
1247 ap->sactive = preempted_sactive;
1248 ap->qc_active = preempted_qc_active;
1250 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1251 * Until those drivers are fixed, we detect the condition
1252 * here, fail the command with AC_ERR_SYSTEM and reenable the
1253 * port.
1255 * Note that this doesn't change any behavior as internal
1256 * command failure results in disabling the device in the
1257 * higher layer for LLDDs without new reset/EH callbacks.
1259 * Kill the following code as soon as those drivers are fixed.
1261 if (ap->flags & ATA_FLAG_DISABLED) {
1262 err_mask |= AC_ERR_SYSTEM;
1263 ata_port_probe(ap);
1266 spin_unlock_irqrestore(ap->lock, flags);
1268 return err_mask;
1272 * ata_exec_internal - execute libata internal command
1273 * @dev: Device to which the command is sent
1274 * @tf: Taskfile registers for the command and the result
1275 * @cdb: CDB for packet command
1276 * @dma_dir: Data tranfer direction of the command
1277 * @buf: Data buffer of the command
1278 * @buflen: Length of data buffer
1280 * Wrapper around ata_exec_internal_sg() which takes simple
1281 * buffer instead of sg list.
1283 * LOCKING:
1284 * None. Should be called with kernel context, might sleep.
1286 * RETURNS:
1287 * Zero on success, AC_ERR_* mask on failure
1289 unsigned ata_exec_internal(struct ata_device *dev,
1290 struct ata_taskfile *tf, const u8 *cdb,
1291 int dma_dir, void *buf, unsigned int buflen)
1293 struct scatterlist *psg = NULL, sg;
1294 unsigned int n_elem = 0;
1296 if (dma_dir != DMA_NONE) {
1297 WARN_ON(!buf);
1298 sg_init_one(&sg, buf, buflen);
1299 psg = &sg;
1300 n_elem++;
1303 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem);
1307 * ata_do_simple_cmd - execute simple internal command
1308 * @dev: Device to which the command is sent
1309 * @cmd: Opcode to execute
1311 * Execute a 'simple' command, that only consists of the opcode
1312 * 'cmd' itself, without filling any other registers
1314 * LOCKING:
1315 * Kernel thread context (may sleep).
1317 * RETURNS:
1318 * Zero on success, AC_ERR_* mask on failure
1320 unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
1322 struct ata_taskfile tf;
1324 ata_tf_init(dev, &tf);
1326 tf.command = cmd;
1327 tf.flags |= ATA_TFLAG_DEVICE;
1328 tf.protocol = ATA_PROT_NODATA;
1330 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
1334 * ata_pio_need_iordy - check if iordy needed
1335 * @adev: ATA device
1337 * Check if the current speed of the device requires IORDY. Used
1338 * by various controllers for chip configuration.
1341 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1343 int pio;
1344 int speed = adev->pio_mode - XFER_PIO_0;
1346 if (speed < 2)
1347 return 0;
1348 if (speed > 2)
1349 return 1;
1351 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1353 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1354 pio = adev->id[ATA_ID_EIDE_PIO];
1355 /* Is the speed faster than the drive allows non IORDY ? */
1356 if (pio) {
1357 /* This is cycle times not frequency - watch the logic! */
1358 if (pio > 240) /* PIO2 is 240nS per cycle */
1359 return 1;
1360 return 0;
1363 return 0;
1367 * ata_dev_read_id - Read ID data from the specified device
1368 * @dev: target device
1369 * @p_class: pointer to class of the target device (may be changed)
1370 * @flags: ATA_READID_* flags
1371 * @id: buffer to read IDENTIFY data into
1373 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1374 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1375 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1376 * for pre-ATA4 drives.
1378 * LOCKING:
1379 * Kernel thread context (may sleep)
1381 * RETURNS:
1382 * 0 on success, -errno otherwise.
1384 int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1385 unsigned int flags, u16 *id)
1387 struct ata_port *ap = dev->ap;
1388 unsigned int class = *p_class;
1389 struct ata_taskfile tf;
1390 unsigned int err_mask = 0;
1391 const char *reason;
1392 int rc;
1394 if (ata_msg_ctl(ap))
1395 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1397 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1399 retry:
1400 ata_tf_init(dev, &tf);
1402 switch (class) {
1403 case ATA_DEV_ATA:
1404 tf.command = ATA_CMD_ID_ATA;
1405 break;
1406 case ATA_DEV_ATAPI:
1407 tf.command = ATA_CMD_ID_ATAPI;
1408 break;
1409 default:
1410 rc = -ENODEV;
1411 reason = "unsupported class";
1412 goto err_out;
1415 tf.protocol = ATA_PROT_PIO;
1417 /* Some devices choke if TF registers contain garbage. Make
1418 * sure those are properly initialized.
1420 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1422 /* Device presence detection is unreliable on some
1423 * controllers. Always poll IDENTIFY if available.
1425 tf.flags |= ATA_TFLAG_POLLING;
1427 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
1428 id, sizeof(id[0]) * ATA_ID_WORDS);
1429 if (err_mask) {
1430 if (err_mask & AC_ERR_NODEV_HINT) {
1431 DPRINTK("ata%u.%d: NODEV after polling detection\n",
1432 ap->print_id, dev->devno);
1433 return -ENOENT;
1436 rc = -EIO;
1437 reason = "I/O error";
1438 goto err_out;
1441 swap_buf_le16(id, ATA_ID_WORDS);
1443 /* sanity check */
1444 rc = -EINVAL;
1445 reason = "device reports illegal type";
1447 if (class == ATA_DEV_ATA) {
1448 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1449 goto err_out;
1450 } else {
1451 if (ata_id_is_ata(id))
1452 goto err_out;
1455 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
1457 * The exact sequence expected by certain pre-ATA4 drives is:
1458 * SRST RESET
1459 * IDENTIFY
1460 * INITIALIZE DEVICE PARAMETERS
1461 * anything else..
1462 * Some drives were very specific about that exact sequence.
1464 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
1465 err_mask = ata_dev_init_params(dev, id[3], id[6]);
1466 if (err_mask) {
1467 rc = -EIO;
1468 reason = "INIT_DEV_PARAMS failed";
1469 goto err_out;
1472 /* current CHS translation info (id[53-58]) might be
1473 * changed. reread the identify device info.
1475 flags &= ~ATA_READID_POSTRESET;
1476 goto retry;
1480 *p_class = class;
1482 return 0;
1484 err_out:
1485 if (ata_msg_warn(ap))
1486 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
1487 "(%s, err_mask=0x%x)\n", reason, err_mask);
1488 return rc;
1491 static inline u8 ata_dev_knobble(struct ata_device *dev)
1493 return ((dev->ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
1496 static void ata_dev_config_ncq(struct ata_device *dev,
1497 char *desc, size_t desc_sz)
1499 struct ata_port *ap = dev->ap;
1500 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
1502 if (!ata_id_has_ncq(dev->id)) {
1503 desc[0] = '\0';
1504 return;
1506 if (ata_device_blacklisted(dev) & ATA_HORKAGE_NONCQ) {
1507 snprintf(desc, desc_sz, "NCQ (not used)");
1508 return;
1510 if (ap->flags & ATA_FLAG_NCQ) {
1511 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
1512 dev->flags |= ATA_DFLAG_NCQ;
1515 if (hdepth >= ddepth)
1516 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
1517 else
1518 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
1521 static void ata_set_port_max_cmd_len(struct ata_port *ap)
1523 int i;
1525 if (ap->scsi_host) {
1526 unsigned int len = 0;
1528 for (i = 0; i < ATA_MAX_DEVICES; i++)
1529 len = max(len, ap->device[i].cdb_len);
1531 ap->scsi_host->max_cmd_len = len;
1536 * ata_dev_configure - Configure the specified ATA/ATAPI device
1537 * @dev: Target device to configure
1539 * Configure @dev according to @dev->id. Generic and low-level
1540 * driver specific fixups are also applied.
1542 * LOCKING:
1543 * Kernel thread context (may sleep)
1545 * RETURNS:
1546 * 0 on success, -errno otherwise
1548 int ata_dev_configure(struct ata_device *dev)
1550 struct ata_port *ap = dev->ap;
1551 int print_info = ap->eh_context.i.flags & ATA_EHI_PRINTINFO;
1552 const u16 *id = dev->id;
1553 unsigned int xfer_mask;
1554 char revbuf[7]; /* XYZ-99\0 */
1555 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
1556 char modelbuf[ATA_ID_PROD_LEN+1];
1557 int rc;
1559 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
1560 ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
1561 __FUNCTION__);
1562 return 0;
1565 if (ata_msg_probe(ap))
1566 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1568 /* set _SDD */
1569 rc = ata_acpi_push_id(ap, dev->devno);
1570 if (rc) {
1571 ata_dev_printk(dev, KERN_WARNING, "failed to set _SDD(%d)\n",
1572 rc);
1575 /* retrieve and execute the ATA task file of _GTF */
1576 ata_acpi_exec_tfs(ap);
1578 /* print device capabilities */
1579 if (ata_msg_probe(ap))
1580 ata_dev_printk(dev, KERN_DEBUG,
1581 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
1582 "85:%04x 86:%04x 87:%04x 88:%04x\n",
1583 __FUNCTION__,
1584 id[49], id[82], id[83], id[84],
1585 id[85], id[86], id[87], id[88]);
1587 /* initialize to-be-configured parameters */
1588 dev->flags &= ~ATA_DFLAG_CFG_MASK;
1589 dev->max_sectors = 0;
1590 dev->cdb_len = 0;
1591 dev->n_sectors = 0;
1592 dev->cylinders = 0;
1593 dev->heads = 0;
1594 dev->sectors = 0;
1597 * common ATA, ATAPI feature tests
1600 /* find max transfer mode; for printk only */
1601 xfer_mask = ata_id_xfermask(id);
1603 if (ata_msg_probe(ap))
1604 ata_dump_id(id);
1606 /* ATA-specific feature tests */
1607 if (dev->class == ATA_DEV_ATA) {
1608 if (ata_id_is_cfa(id)) {
1609 if (id[162] & 1) /* CPRM may make this media unusable */
1610 ata_dev_printk(dev, KERN_WARNING,
1611 "supports DRM functions and may "
1612 "not be fully accessable.\n");
1613 snprintf(revbuf, 7, "CFA");
1615 else
1616 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
1618 dev->n_sectors = ata_id_n_sectors(id);
1620 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
1621 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
1622 sizeof(fwrevbuf));
1624 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
1625 sizeof(modelbuf));
1627 if (dev->id[59] & 0x100)
1628 dev->multi_count = dev->id[59] & 0xff;
1630 if (ata_id_has_lba(id)) {
1631 const char *lba_desc;
1632 char ncq_desc[20];
1634 lba_desc = "LBA";
1635 dev->flags |= ATA_DFLAG_LBA;
1636 if (ata_id_has_lba48(id)) {
1637 dev->flags |= ATA_DFLAG_LBA48;
1638 lba_desc = "LBA48";
1640 if (dev->n_sectors >= (1UL << 28) &&
1641 ata_id_has_flush_ext(id))
1642 dev->flags |= ATA_DFLAG_FLUSH_EXT;
1645 /* config NCQ */
1646 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
1648 /* print device info to dmesg */
1649 if (ata_msg_drv(ap) && print_info) {
1650 ata_dev_printk(dev, KERN_INFO,
1651 "%s: %s, %s, max %s\n",
1652 revbuf, modelbuf, fwrevbuf,
1653 ata_mode_string(xfer_mask));
1654 ata_dev_printk(dev, KERN_INFO,
1655 "%Lu sectors, multi %u: %s %s\n",
1656 (unsigned long long)dev->n_sectors,
1657 dev->multi_count, lba_desc, ncq_desc);
1659 } else {
1660 /* CHS */
1662 /* Default translation */
1663 dev->cylinders = id[1];
1664 dev->heads = id[3];
1665 dev->sectors = id[6];
1667 if (ata_id_current_chs_valid(id)) {
1668 /* Current CHS translation is valid. */
1669 dev->cylinders = id[54];
1670 dev->heads = id[55];
1671 dev->sectors = id[56];
1674 /* print device info to dmesg */
1675 if (ata_msg_drv(ap) && print_info) {
1676 ata_dev_printk(dev, KERN_INFO,
1677 "%s: %s, %s, max %s\n",
1678 revbuf, modelbuf, fwrevbuf,
1679 ata_mode_string(xfer_mask));
1680 ata_dev_printk(dev, KERN_INFO,
1681 "%Lu sectors, multi %u, CHS %u/%u/%u\n",
1682 (unsigned long long)dev->n_sectors,
1683 dev->multi_count, dev->cylinders,
1684 dev->heads, dev->sectors);
1688 dev->cdb_len = 16;
1691 /* ATAPI-specific feature tests */
1692 else if (dev->class == ATA_DEV_ATAPI) {
1693 char *cdb_intr_string = "";
1695 rc = atapi_cdb_len(id);
1696 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
1697 if (ata_msg_warn(ap))
1698 ata_dev_printk(dev, KERN_WARNING,
1699 "unsupported CDB len\n");
1700 rc = -EINVAL;
1701 goto err_out_nosup;
1703 dev->cdb_len = (unsigned int) rc;
1705 if (ata_id_cdb_intr(dev->id)) {
1706 dev->flags |= ATA_DFLAG_CDB_INTR;
1707 cdb_intr_string = ", CDB intr";
1710 /* print device info to dmesg */
1711 if (ata_msg_drv(ap) && print_info)
1712 ata_dev_printk(dev, KERN_INFO, "ATAPI, max %s%s\n",
1713 ata_mode_string(xfer_mask),
1714 cdb_intr_string);
1717 /* determine max_sectors */
1718 dev->max_sectors = ATA_MAX_SECTORS;
1719 if (dev->flags & ATA_DFLAG_LBA48)
1720 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
1722 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
1723 /* Let the user know. We don't want to disallow opens for
1724 rescue purposes, or in case the vendor is just a blithering
1725 idiot */
1726 if (print_info) {
1727 ata_dev_printk(dev, KERN_WARNING,
1728 "Drive reports diagnostics failure. This may indicate a drive\n");
1729 ata_dev_printk(dev, KERN_WARNING,
1730 "fault or invalid emulation. Contact drive vendor for information.\n");
1734 ata_set_port_max_cmd_len(ap);
1736 /* limit bridge transfers to udma5, 200 sectors */
1737 if (ata_dev_knobble(dev)) {
1738 if (ata_msg_drv(ap) && print_info)
1739 ata_dev_printk(dev, KERN_INFO,
1740 "applying bridge limits\n");
1741 dev->udma_mask &= ATA_UDMA5;
1742 dev->max_sectors = ATA_MAX_SECTORS;
1745 if (ap->ops->dev_config)
1746 ap->ops->dev_config(ap, dev);
1748 if (ata_msg_probe(ap))
1749 ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
1750 __FUNCTION__, ata_chk_status(ap));
1751 return 0;
1753 err_out_nosup:
1754 if (ata_msg_probe(ap))
1755 ata_dev_printk(dev, KERN_DEBUG,
1756 "%s: EXIT, err\n", __FUNCTION__);
1757 return rc;
1761 * ata_bus_probe - Reset and probe ATA bus
1762 * @ap: Bus to probe
1764 * Master ATA bus probing function. Initiates a hardware-dependent
1765 * bus reset, then attempts to identify any devices found on
1766 * the bus.
1768 * LOCKING:
1769 * PCI/etc. bus probe sem.
1771 * RETURNS:
1772 * Zero on success, negative errno otherwise.
1775 int ata_bus_probe(struct ata_port *ap)
1777 unsigned int classes[ATA_MAX_DEVICES];
1778 int tries[ATA_MAX_DEVICES];
1779 int i, rc;
1780 struct ata_device *dev;
1782 ata_port_probe(ap);
1784 for (i = 0; i < ATA_MAX_DEVICES; i++)
1785 tries[i] = ATA_PROBE_MAX_TRIES;
1787 retry:
1788 /* reset and determine device classes */
1789 ap->ops->phy_reset(ap);
1791 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1792 dev = &ap->device[i];
1794 if (!(ap->flags & ATA_FLAG_DISABLED) &&
1795 dev->class != ATA_DEV_UNKNOWN)
1796 classes[dev->devno] = dev->class;
1797 else
1798 classes[dev->devno] = ATA_DEV_NONE;
1800 dev->class = ATA_DEV_UNKNOWN;
1803 ata_port_probe(ap);
1805 /* after the reset the device state is PIO 0 and the controller
1806 state is undefined. Record the mode */
1808 for (i = 0; i < ATA_MAX_DEVICES; i++)
1809 ap->device[i].pio_mode = XFER_PIO_0;
1811 /* read IDENTIFY page and configure devices */
1812 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1813 dev = &ap->device[i];
1815 if (tries[i])
1816 dev->class = classes[i];
1818 if (!ata_dev_enabled(dev))
1819 continue;
1821 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
1822 dev->id);
1823 if (rc)
1824 goto fail;
1826 ap->eh_context.i.flags |= ATA_EHI_PRINTINFO;
1827 rc = ata_dev_configure(dev);
1828 ap->eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
1829 if (rc)
1830 goto fail;
1833 /* configure transfer mode */
1834 rc = ata_set_mode(ap, &dev);
1835 if (rc)
1836 goto fail;
1838 for (i = 0; i < ATA_MAX_DEVICES; i++)
1839 if (ata_dev_enabled(&ap->device[i]))
1840 return 0;
1842 /* no device present, disable port */
1843 ata_port_disable(ap);
1844 ap->ops->port_disable(ap);
1845 return -ENODEV;
1847 fail:
1848 tries[dev->devno]--;
1850 switch (rc) {
1851 case -EINVAL:
1852 /* eeek, something went very wrong, give up */
1853 tries[dev->devno] = 0;
1854 break;
1856 case -ENODEV:
1857 /* give it just one more chance */
1858 tries[dev->devno] = min(tries[dev->devno], 1);
1859 case -EIO:
1860 if (tries[dev->devno] == 1) {
1861 /* This is the last chance, better to slow
1862 * down than lose it.
1864 sata_down_spd_limit(ap);
1865 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
1869 if (!tries[dev->devno])
1870 ata_dev_disable(dev);
1872 goto retry;
1876 * ata_port_probe - Mark port as enabled
1877 * @ap: Port for which we indicate enablement
1879 * Modify @ap data structure such that the system
1880 * thinks that the entire port is enabled.
1882 * LOCKING: host lock, or some other form of
1883 * serialization.
1886 void ata_port_probe(struct ata_port *ap)
1888 ap->flags &= ~ATA_FLAG_DISABLED;
1892 * sata_print_link_status - Print SATA link status
1893 * @ap: SATA port to printk link status about
1895 * This function prints link speed and status of a SATA link.
1897 * LOCKING:
1898 * None.
1900 static void sata_print_link_status(struct ata_port *ap)
1902 u32 sstatus, scontrol, tmp;
1904 if (sata_scr_read(ap, SCR_STATUS, &sstatus))
1905 return;
1906 sata_scr_read(ap, SCR_CONTROL, &scontrol);
1908 if (ata_port_online(ap)) {
1909 tmp = (sstatus >> 4) & 0xf;
1910 ata_port_printk(ap, KERN_INFO,
1911 "SATA link up %s (SStatus %X SControl %X)\n",
1912 sata_spd_string(tmp), sstatus, scontrol);
1913 } else {
1914 ata_port_printk(ap, KERN_INFO,
1915 "SATA link down (SStatus %X SControl %X)\n",
1916 sstatus, scontrol);
1921 * __sata_phy_reset - Wake/reset a low-level SATA PHY
1922 * @ap: SATA port associated with target SATA PHY.
1924 * This function issues commands to standard SATA Sxxx
1925 * PHY registers, to wake up the phy (and device), and
1926 * clear any reset condition.
1928 * LOCKING:
1929 * PCI/etc. bus probe sem.
1932 void __sata_phy_reset(struct ata_port *ap)
1934 u32 sstatus;
1935 unsigned long timeout = jiffies + (HZ * 5);
1937 if (ap->flags & ATA_FLAG_SATA_RESET) {
1938 /* issue phy wake/reset */
1939 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
1940 /* Couldn't find anything in SATA I/II specs, but
1941 * AHCI-1.1 10.4.2 says at least 1 ms. */
1942 mdelay(1);
1944 /* phy wake/clear reset */
1945 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
1947 /* wait for phy to become ready, if necessary */
1948 do {
1949 msleep(200);
1950 sata_scr_read(ap, SCR_STATUS, &sstatus);
1951 if ((sstatus & 0xf) != 1)
1952 break;
1953 } while (time_before(jiffies, timeout));
1955 /* print link status */
1956 sata_print_link_status(ap);
1958 /* TODO: phy layer with polling, timeouts, etc. */
1959 if (!ata_port_offline(ap))
1960 ata_port_probe(ap);
1961 else
1962 ata_port_disable(ap);
1964 if (ap->flags & ATA_FLAG_DISABLED)
1965 return;
1967 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
1968 ata_port_disable(ap);
1969 return;
1972 ap->cbl = ATA_CBL_SATA;
1976 * sata_phy_reset - Reset SATA bus.
1977 * @ap: SATA port associated with target SATA PHY.
1979 * This function resets the SATA bus, and then probes
1980 * the bus for devices.
1982 * LOCKING:
1983 * PCI/etc. bus probe sem.
1986 void sata_phy_reset(struct ata_port *ap)
1988 __sata_phy_reset(ap);
1989 if (ap->flags & ATA_FLAG_DISABLED)
1990 return;
1991 ata_bus_reset(ap);
1995 * ata_dev_pair - return other device on cable
1996 * @adev: device
1998 * Obtain the other device on the same cable, or if none is
1999 * present NULL is returned
2002 struct ata_device *ata_dev_pair(struct ata_device *adev)
2004 struct ata_port *ap = adev->ap;
2005 struct ata_device *pair = &ap->device[1 - adev->devno];
2006 if (!ata_dev_enabled(pair))
2007 return NULL;
2008 return pair;
2012 * ata_port_disable - Disable port.
2013 * @ap: Port to be disabled.
2015 * Modify @ap data structure such that the system
2016 * thinks that the entire port is disabled, and should
2017 * never attempt to probe or communicate with devices
2018 * on this port.
2020 * LOCKING: host lock, or some other form of
2021 * serialization.
2024 void ata_port_disable(struct ata_port *ap)
2026 ap->device[0].class = ATA_DEV_NONE;
2027 ap->device[1].class = ATA_DEV_NONE;
2028 ap->flags |= ATA_FLAG_DISABLED;
2032 * sata_down_spd_limit - adjust SATA spd limit downward
2033 * @ap: Port to adjust SATA spd limit for
2035 * Adjust SATA spd limit of @ap downward. Note that this
2036 * function only adjusts the limit. The change must be applied
2037 * using sata_set_spd().
2039 * LOCKING:
2040 * Inherited from caller.
2042 * RETURNS:
2043 * 0 on success, negative errno on failure
2045 int sata_down_spd_limit(struct ata_port *ap)
2047 u32 sstatus, spd, mask;
2048 int rc, highbit;
2050 rc = sata_scr_read(ap, SCR_STATUS, &sstatus);
2051 if (rc)
2052 return rc;
2054 mask = ap->sata_spd_limit;
2055 if (mask <= 1)
2056 return -EINVAL;
2057 highbit = fls(mask) - 1;
2058 mask &= ~(1 << highbit);
2060 spd = (sstatus >> 4) & 0xf;
2061 if (spd <= 1)
2062 return -EINVAL;
2063 spd--;
2064 mask &= (1 << spd) - 1;
2065 if (!mask)
2066 return -EINVAL;
2068 ap->sata_spd_limit = mask;
2070 ata_port_printk(ap, KERN_WARNING, "limiting SATA link speed to %s\n",
2071 sata_spd_string(fls(mask)));
2073 return 0;
2076 static int __sata_set_spd_needed(struct ata_port *ap, u32 *scontrol)
2078 u32 spd, limit;
2080 if (ap->sata_spd_limit == UINT_MAX)
2081 limit = 0;
2082 else
2083 limit = fls(ap->sata_spd_limit);
2085 spd = (*scontrol >> 4) & 0xf;
2086 *scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4);
2088 return spd != limit;
2092 * sata_set_spd_needed - is SATA spd configuration needed
2093 * @ap: Port in question
2095 * Test whether the spd limit in SControl matches
2096 * @ap->sata_spd_limit. This function is used to determine
2097 * whether hardreset is necessary to apply SATA spd
2098 * configuration.
2100 * LOCKING:
2101 * Inherited from caller.
2103 * RETURNS:
2104 * 1 if SATA spd configuration is needed, 0 otherwise.
2106 int sata_set_spd_needed(struct ata_port *ap)
2108 u32 scontrol;
2110 if (sata_scr_read(ap, SCR_CONTROL, &scontrol))
2111 return 0;
2113 return __sata_set_spd_needed(ap, &scontrol);
2117 * sata_set_spd - set SATA spd according to spd limit
2118 * @ap: Port to set SATA spd for
2120 * Set SATA spd of @ap according to sata_spd_limit.
2122 * LOCKING:
2123 * Inherited from caller.
2125 * RETURNS:
2126 * 0 if spd doesn't need to be changed, 1 if spd has been
2127 * changed. Negative errno if SCR registers are inaccessible.
2129 int sata_set_spd(struct ata_port *ap)
2131 u32 scontrol;
2132 int rc;
2134 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2135 return rc;
2137 if (!__sata_set_spd_needed(ap, &scontrol))
2138 return 0;
2140 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2141 return rc;
2143 return 1;
2147 * This mode timing computation functionality is ported over from
2148 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2151 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
2152 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
2153 * for UDMA6, which is currently supported only by Maxtor drives.
2155 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
2158 static const struct ata_timing ata_timing[] = {
2160 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
2161 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
2162 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
2163 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
2165 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
2166 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
2167 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
2168 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
2169 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
2171 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
2173 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
2174 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
2175 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
2177 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
2178 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
2179 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
2181 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
2182 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
2183 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
2184 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
2186 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
2187 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
2188 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
2190 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
2192 { 0xFF }
2195 #define ENOUGH(v,unit) (((v)-1)/(unit)+1)
2196 #define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
2198 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2200 q->setup = EZ(t->setup * 1000, T);
2201 q->act8b = EZ(t->act8b * 1000, T);
2202 q->rec8b = EZ(t->rec8b * 1000, T);
2203 q->cyc8b = EZ(t->cyc8b * 1000, T);
2204 q->active = EZ(t->active * 1000, T);
2205 q->recover = EZ(t->recover * 1000, T);
2206 q->cycle = EZ(t->cycle * 1000, T);
2207 q->udma = EZ(t->udma * 1000, UT);
2210 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2211 struct ata_timing *m, unsigned int what)
2213 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
2214 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
2215 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
2216 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
2217 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
2218 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2219 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
2220 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
2223 static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
2225 const struct ata_timing *t;
2227 for (t = ata_timing; t->mode != speed; t++)
2228 if (t->mode == 0xFF)
2229 return NULL;
2230 return t;
2233 int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2234 struct ata_timing *t, int T, int UT)
2236 const struct ata_timing *s;
2237 struct ata_timing p;
2240 * Find the mode.
2243 if (!(s = ata_timing_find_mode(speed)))
2244 return -EINVAL;
2246 memcpy(t, s, sizeof(*s));
2249 * If the drive is an EIDE drive, it can tell us it needs extended
2250 * PIO/MW_DMA cycle timing.
2253 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
2254 memset(&p, 0, sizeof(p));
2255 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
2256 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2257 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2258 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
2259 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2261 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2265 * Convert the timing to bus clock counts.
2268 ata_timing_quantize(t, t, T, UT);
2271 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2272 * S.M.A.R.T * and some other commands. We have to ensure that the
2273 * DMA cycle timing is slower/equal than the fastest PIO timing.
2276 if (speed > XFER_PIO_6) {
2277 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2278 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2282 * Lengthen active & recovery time so that cycle time is correct.
2285 if (t->act8b + t->rec8b < t->cyc8b) {
2286 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2287 t->rec8b = t->cyc8b - t->act8b;
2290 if (t->active + t->recover < t->cycle) {
2291 t->active += (t->cycle - (t->active + t->recover)) / 2;
2292 t->recover = t->cycle - t->active;
2295 return 0;
2299 * ata_down_xfermask_limit - adjust dev xfer masks downward
2300 * @dev: Device to adjust xfer masks
2301 * @sel: ATA_DNXFER_* selector
2303 * Adjust xfer masks of @dev downward. Note that this function
2304 * does not apply the change. Invoking ata_set_mode() afterwards
2305 * will apply the limit.
2307 * LOCKING:
2308 * Inherited from caller.
2310 * RETURNS:
2311 * 0 on success, negative errno on failure
2313 int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
2315 char buf[32];
2316 unsigned int orig_mask, xfer_mask;
2317 unsigned int pio_mask, mwdma_mask, udma_mask;
2318 int quiet, highbit;
2320 quiet = !!(sel & ATA_DNXFER_QUIET);
2321 sel &= ~ATA_DNXFER_QUIET;
2323 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
2324 dev->mwdma_mask,
2325 dev->udma_mask);
2326 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
2328 switch (sel) {
2329 case ATA_DNXFER_PIO:
2330 highbit = fls(pio_mask) - 1;
2331 pio_mask &= ~(1 << highbit);
2332 break;
2334 case ATA_DNXFER_DMA:
2335 if (udma_mask) {
2336 highbit = fls(udma_mask) - 1;
2337 udma_mask &= ~(1 << highbit);
2338 if (!udma_mask)
2339 return -ENOENT;
2340 } else if (mwdma_mask) {
2341 highbit = fls(mwdma_mask) - 1;
2342 mwdma_mask &= ~(1 << highbit);
2343 if (!mwdma_mask)
2344 return -ENOENT;
2346 break;
2348 case ATA_DNXFER_40C:
2349 udma_mask &= ATA_UDMA_MASK_40C;
2350 break;
2352 case ATA_DNXFER_FORCE_PIO0:
2353 pio_mask &= 1;
2354 case ATA_DNXFER_FORCE_PIO:
2355 mwdma_mask = 0;
2356 udma_mask = 0;
2357 break;
2359 default:
2360 BUG();
2363 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
2365 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
2366 return -ENOENT;
2368 if (!quiet) {
2369 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
2370 snprintf(buf, sizeof(buf), "%s:%s",
2371 ata_mode_string(xfer_mask),
2372 ata_mode_string(xfer_mask & ATA_MASK_PIO));
2373 else
2374 snprintf(buf, sizeof(buf), "%s",
2375 ata_mode_string(xfer_mask));
2377 ata_dev_printk(dev, KERN_WARNING,
2378 "limiting speed to %s\n", buf);
2381 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2382 &dev->udma_mask);
2384 return 0;
2387 static int ata_dev_set_mode(struct ata_device *dev)
2389 struct ata_eh_context *ehc = &dev->ap->eh_context;
2390 unsigned int err_mask;
2391 int rc;
2393 dev->flags &= ~ATA_DFLAG_PIO;
2394 if (dev->xfer_shift == ATA_SHIFT_PIO)
2395 dev->flags |= ATA_DFLAG_PIO;
2397 err_mask = ata_dev_set_xfermode(dev);
2398 /* Old CFA may refuse this command, which is just fine */
2399 if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id))
2400 err_mask &= ~AC_ERR_DEV;
2402 if (err_mask) {
2403 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
2404 "(err_mask=0x%x)\n", err_mask);
2405 return -EIO;
2408 ehc->i.flags |= ATA_EHI_POST_SETMODE;
2409 rc = ata_dev_revalidate(dev, 0);
2410 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
2411 if (rc)
2412 return rc;
2414 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
2415 dev->xfer_shift, (int)dev->xfer_mode);
2417 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
2418 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
2419 return 0;
2423 * ata_set_mode - Program timings and issue SET FEATURES - XFER
2424 * @ap: port on which timings will be programmed
2425 * @r_failed_dev: out paramter for failed device
2427 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2428 * ata_set_mode() fails, pointer to the failing device is
2429 * returned in @r_failed_dev.
2431 * LOCKING:
2432 * PCI/etc. bus probe sem.
2434 * RETURNS:
2435 * 0 on success, negative errno otherwise
2437 int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
2439 struct ata_device *dev;
2440 int i, rc = 0, used_dma = 0, found = 0;
2442 /* has private set_mode? */
2443 if (ap->ops->set_mode)
2444 return ap->ops->set_mode(ap, r_failed_dev);
2446 /* step 1: calculate xfer_mask */
2447 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2448 unsigned int pio_mask, dma_mask;
2450 dev = &ap->device[i];
2452 if (!ata_dev_enabled(dev))
2453 continue;
2455 ata_dev_xfermask(dev);
2457 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
2458 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
2459 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
2460 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
2462 found = 1;
2463 if (dev->dma_mode)
2464 used_dma = 1;
2466 if (!found)
2467 goto out;
2469 /* step 2: always set host PIO timings */
2470 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2471 dev = &ap->device[i];
2472 if (!ata_dev_enabled(dev))
2473 continue;
2475 if (!dev->pio_mode) {
2476 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
2477 rc = -EINVAL;
2478 goto out;
2481 dev->xfer_mode = dev->pio_mode;
2482 dev->xfer_shift = ATA_SHIFT_PIO;
2483 if (ap->ops->set_piomode)
2484 ap->ops->set_piomode(ap, dev);
2487 /* step 3: set host DMA timings */
2488 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2489 dev = &ap->device[i];
2491 if (!ata_dev_enabled(dev) || !dev->dma_mode)
2492 continue;
2494 dev->xfer_mode = dev->dma_mode;
2495 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
2496 if (ap->ops->set_dmamode)
2497 ap->ops->set_dmamode(ap, dev);
2500 /* step 4: update devices' xfer mode */
2501 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2502 dev = &ap->device[i];
2504 /* don't update suspended devices' xfer mode */
2505 if (!ata_dev_ready(dev))
2506 continue;
2508 rc = ata_dev_set_mode(dev);
2509 if (rc)
2510 goto out;
2513 /* Record simplex status. If we selected DMA then the other
2514 * host channels are not permitted to do so.
2516 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
2517 ap->host->simplex_claimed = 1;
2519 /* step5: chip specific finalisation */
2520 if (ap->ops->post_set_mode)
2521 ap->ops->post_set_mode(ap);
2523 out:
2524 if (rc)
2525 *r_failed_dev = dev;
2526 return rc;
2530 * ata_tf_to_host - issue ATA taskfile to host controller
2531 * @ap: port to which command is being issued
2532 * @tf: ATA taskfile register set
2534 * Issues ATA taskfile register set to ATA host controller,
2535 * with proper synchronization with interrupt handler and
2536 * other threads.
2538 * LOCKING:
2539 * spin_lock_irqsave(host lock)
2542 static inline void ata_tf_to_host(struct ata_port *ap,
2543 const struct ata_taskfile *tf)
2545 ap->ops->tf_load(ap, tf);
2546 ap->ops->exec_command(ap, tf);
2550 * ata_busy_sleep - sleep until BSY clears, or timeout
2551 * @ap: port containing status register to be polled
2552 * @tmout_pat: impatience timeout
2553 * @tmout: overall timeout
2555 * Sleep until ATA Status register bit BSY clears,
2556 * or a timeout occurs.
2558 * LOCKING:
2559 * Kernel thread context (may sleep).
2561 * RETURNS:
2562 * 0 on success, -errno otherwise.
2564 int ata_busy_sleep(struct ata_port *ap,
2565 unsigned long tmout_pat, unsigned long tmout)
2567 unsigned long timer_start, timeout;
2568 u8 status;
2570 status = ata_busy_wait(ap, ATA_BUSY, 300);
2571 timer_start = jiffies;
2572 timeout = timer_start + tmout_pat;
2573 while (status != 0xff && (status & ATA_BUSY) &&
2574 time_before(jiffies, timeout)) {
2575 msleep(50);
2576 status = ata_busy_wait(ap, ATA_BUSY, 3);
2579 if (status != 0xff && (status & ATA_BUSY))
2580 ata_port_printk(ap, KERN_WARNING,
2581 "port is slow to respond, please be patient "
2582 "(Status 0x%x)\n", status);
2584 timeout = timer_start + tmout;
2585 while (status != 0xff && (status & ATA_BUSY) &&
2586 time_before(jiffies, timeout)) {
2587 msleep(50);
2588 status = ata_chk_status(ap);
2591 if (status == 0xff)
2592 return -ENODEV;
2594 if (status & ATA_BUSY) {
2595 ata_port_printk(ap, KERN_ERR, "port failed to respond "
2596 "(%lu secs, Status 0x%x)\n",
2597 tmout / HZ, status);
2598 return -EBUSY;
2601 return 0;
2604 static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
2606 struct ata_ioports *ioaddr = &ap->ioaddr;
2607 unsigned int dev0 = devmask & (1 << 0);
2608 unsigned int dev1 = devmask & (1 << 1);
2609 unsigned long timeout;
2611 /* if device 0 was found in ata_devchk, wait for its
2612 * BSY bit to clear
2614 if (dev0)
2615 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2617 /* if device 1 was found in ata_devchk, wait for
2618 * register access, then wait for BSY to clear
2620 timeout = jiffies + ATA_TMOUT_BOOT;
2621 while (dev1) {
2622 u8 nsect, lbal;
2624 ap->ops->dev_select(ap, 1);
2625 nsect = ioread8(ioaddr->nsect_addr);
2626 lbal = ioread8(ioaddr->lbal_addr);
2627 if ((nsect == 1) && (lbal == 1))
2628 break;
2629 if (time_after(jiffies, timeout)) {
2630 dev1 = 0;
2631 break;
2633 msleep(50); /* give drive a breather */
2635 if (dev1)
2636 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2638 /* is all this really necessary? */
2639 ap->ops->dev_select(ap, 0);
2640 if (dev1)
2641 ap->ops->dev_select(ap, 1);
2642 if (dev0)
2643 ap->ops->dev_select(ap, 0);
2646 static unsigned int ata_bus_softreset(struct ata_port *ap,
2647 unsigned int devmask)
2649 struct ata_ioports *ioaddr = &ap->ioaddr;
2651 DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
2653 /* software reset. causes dev0 to be selected */
2654 iowrite8(ap->ctl, ioaddr->ctl_addr);
2655 udelay(20); /* FIXME: flush */
2656 iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
2657 udelay(20); /* FIXME: flush */
2658 iowrite8(ap->ctl, ioaddr->ctl_addr);
2660 /* spec mandates ">= 2ms" before checking status.
2661 * We wait 150ms, because that was the magic delay used for
2662 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
2663 * between when the ATA command register is written, and then
2664 * status is checked. Because waiting for "a while" before
2665 * checking status is fine, post SRST, we perform this magic
2666 * delay here as well.
2668 * Old drivers/ide uses the 2mS rule and then waits for ready
2670 msleep(150);
2672 /* Before we perform post reset processing we want to see if
2673 * the bus shows 0xFF because the odd clown forgets the D7
2674 * pulldown resistor.
2676 if (ata_check_status(ap) == 0xFF)
2677 return 0;
2679 ata_bus_post_reset(ap, devmask);
2681 return 0;
2685 * ata_bus_reset - reset host port and associated ATA channel
2686 * @ap: port to reset
2688 * This is typically the first time we actually start issuing
2689 * commands to the ATA channel. We wait for BSY to clear, then
2690 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
2691 * result. Determine what devices, if any, are on the channel
2692 * by looking at the device 0/1 error register. Look at the signature
2693 * stored in each device's taskfile registers, to determine if
2694 * the device is ATA or ATAPI.
2696 * LOCKING:
2697 * PCI/etc. bus probe sem.
2698 * Obtains host lock.
2700 * SIDE EFFECTS:
2701 * Sets ATA_FLAG_DISABLED if bus reset fails.
2704 void ata_bus_reset(struct ata_port *ap)
2706 struct ata_ioports *ioaddr = &ap->ioaddr;
2707 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2708 u8 err;
2709 unsigned int dev0, dev1 = 0, devmask = 0;
2711 DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no);
2713 /* determine if device 0/1 are present */
2714 if (ap->flags & ATA_FLAG_SATA_RESET)
2715 dev0 = 1;
2716 else {
2717 dev0 = ata_devchk(ap, 0);
2718 if (slave_possible)
2719 dev1 = ata_devchk(ap, 1);
2722 if (dev0)
2723 devmask |= (1 << 0);
2724 if (dev1)
2725 devmask |= (1 << 1);
2727 /* select device 0 again */
2728 ap->ops->dev_select(ap, 0);
2730 /* issue bus reset */
2731 if (ap->flags & ATA_FLAG_SRST)
2732 if (ata_bus_softreset(ap, devmask))
2733 goto err_out;
2736 * determine by signature whether we have ATA or ATAPI devices
2738 ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
2739 if ((slave_possible) && (err != 0x81))
2740 ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
2742 /* re-enable interrupts */
2743 ap->ops->irq_on(ap);
2745 /* is double-select really necessary? */
2746 if (ap->device[1].class != ATA_DEV_NONE)
2747 ap->ops->dev_select(ap, 1);
2748 if (ap->device[0].class != ATA_DEV_NONE)
2749 ap->ops->dev_select(ap, 0);
2751 /* if no devices were detected, disable this port */
2752 if ((ap->device[0].class == ATA_DEV_NONE) &&
2753 (ap->device[1].class == ATA_DEV_NONE))
2754 goto err_out;
2756 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
2757 /* set up device control for ATA_FLAG_SATA_RESET */
2758 iowrite8(ap->ctl, ioaddr->ctl_addr);
2761 DPRINTK("EXIT\n");
2762 return;
2764 err_out:
2765 ata_port_printk(ap, KERN_ERR, "disabling port\n");
2766 ap->ops->port_disable(ap);
2768 DPRINTK("EXIT\n");
2772 * sata_phy_debounce - debounce SATA phy status
2773 * @ap: ATA port to debounce SATA phy status for
2774 * @params: timing parameters { interval, duratinon, timeout } in msec
2776 * Make sure SStatus of @ap reaches stable state, determined by
2777 * holding the same value where DET is not 1 for @duration polled
2778 * every @interval, before @timeout. Timeout constraints the
2779 * beginning of the stable state. Because, after hot unplugging,
2780 * DET gets stuck at 1 on some controllers, this functions waits
2781 * until timeout then returns 0 if DET is stable at 1.
2783 * LOCKING:
2784 * Kernel thread context (may sleep)
2786 * RETURNS:
2787 * 0 on success, -errno on failure.
2789 int sata_phy_debounce(struct ata_port *ap, const unsigned long *params)
2791 unsigned long interval_msec = params[0];
2792 unsigned long duration = params[1] * HZ / 1000;
2793 unsigned long timeout = jiffies + params[2] * HZ / 1000;
2794 unsigned long last_jiffies;
2795 u32 last, cur;
2796 int rc;
2798 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
2799 return rc;
2800 cur &= 0xf;
2802 last = cur;
2803 last_jiffies = jiffies;
2805 while (1) {
2806 msleep(interval_msec);
2807 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
2808 return rc;
2809 cur &= 0xf;
2811 /* DET stable? */
2812 if (cur == last) {
2813 if (cur == 1 && time_before(jiffies, timeout))
2814 continue;
2815 if (time_after(jiffies, last_jiffies + duration))
2816 return 0;
2817 continue;
2820 /* unstable, start over */
2821 last = cur;
2822 last_jiffies = jiffies;
2824 /* check timeout */
2825 if (time_after(jiffies, timeout))
2826 return -EBUSY;
2831 * sata_phy_resume - resume SATA phy
2832 * @ap: ATA port to resume SATA phy for
2833 * @params: timing parameters { interval, duratinon, timeout } in msec
2835 * Resume SATA phy of @ap and debounce it.
2837 * LOCKING:
2838 * Kernel thread context (may sleep)
2840 * RETURNS:
2841 * 0 on success, -errno on failure.
2843 int sata_phy_resume(struct ata_port *ap, const unsigned long *params)
2845 u32 scontrol;
2846 int rc;
2848 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2849 return rc;
2851 scontrol = (scontrol & 0x0f0) | 0x300;
2853 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2854 return rc;
2856 /* Some PHYs react badly if SStatus is pounded immediately
2857 * after resuming. Delay 200ms before debouncing.
2859 msleep(200);
2861 return sata_phy_debounce(ap, params);
2864 static void ata_wait_spinup(struct ata_port *ap)
2866 struct ata_eh_context *ehc = &ap->eh_context;
2867 unsigned long end, secs;
2868 int rc;
2870 /* first, debounce phy if SATA */
2871 if (ap->cbl == ATA_CBL_SATA) {
2872 rc = sata_phy_debounce(ap, sata_deb_timing_hotplug);
2874 /* if debounced successfully and offline, no need to wait */
2875 if ((rc == 0 || rc == -EOPNOTSUPP) && ata_port_offline(ap))
2876 return;
2879 /* okay, let's give the drive time to spin up */
2880 end = ehc->i.hotplug_timestamp + ATA_SPINUP_WAIT * HZ / 1000;
2881 secs = ((end - jiffies) + HZ - 1) / HZ;
2883 if (time_after(jiffies, end))
2884 return;
2886 if (secs > 5)
2887 ata_port_printk(ap, KERN_INFO, "waiting for device to spin up "
2888 "(%lu secs)\n", secs);
2890 schedule_timeout_uninterruptible(end - jiffies);
2894 * ata_std_prereset - prepare for reset
2895 * @ap: ATA port to be reset
2897 * @ap is about to be reset. Initialize it.
2899 * LOCKING:
2900 * Kernel thread context (may sleep)
2902 * RETURNS:
2903 * 0 on success, -errno otherwise.
2905 int ata_std_prereset(struct ata_port *ap)
2907 struct ata_eh_context *ehc = &ap->eh_context;
2908 const unsigned long *timing = sata_ehc_deb_timing(ehc);
2909 int rc;
2911 /* handle link resume & hotplug spinup */
2912 if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
2913 (ap->flags & ATA_FLAG_HRST_TO_RESUME))
2914 ehc->i.action |= ATA_EH_HARDRESET;
2916 if ((ehc->i.flags & ATA_EHI_HOTPLUGGED) &&
2917 (ap->flags & ATA_FLAG_SKIP_D2H_BSY))
2918 ata_wait_spinup(ap);
2920 /* if we're about to do hardreset, nothing more to do */
2921 if (ehc->i.action & ATA_EH_HARDRESET)
2922 return 0;
2924 /* if SATA, resume phy */
2925 if (ap->cbl == ATA_CBL_SATA) {
2926 rc = sata_phy_resume(ap, timing);
2927 if (rc && rc != -EOPNOTSUPP) {
2928 /* phy resume failed */
2929 ata_port_printk(ap, KERN_WARNING, "failed to resume "
2930 "link for reset (errno=%d)\n", rc);
2931 return rc;
2935 /* Wait for !BSY if the controller can wait for the first D2H
2936 * Reg FIS and we don't know that no device is attached.
2938 if (!(ap->flags & ATA_FLAG_SKIP_D2H_BSY) && !ata_port_offline(ap))
2939 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2941 return 0;
2945 * ata_std_softreset - reset host port via ATA SRST
2946 * @ap: port to reset
2947 * @classes: resulting classes of attached devices
2949 * Reset host port using ATA SRST.
2951 * LOCKING:
2952 * Kernel thread context (may sleep)
2954 * RETURNS:
2955 * 0 on success, -errno otherwise.
2957 int ata_std_softreset(struct ata_port *ap, unsigned int *classes)
2959 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2960 unsigned int devmask = 0, err_mask;
2961 u8 err;
2963 DPRINTK("ENTER\n");
2965 if (ata_port_offline(ap)) {
2966 classes[0] = ATA_DEV_NONE;
2967 goto out;
2970 /* determine if device 0/1 are present */
2971 if (ata_devchk(ap, 0))
2972 devmask |= (1 << 0);
2973 if (slave_possible && ata_devchk(ap, 1))
2974 devmask |= (1 << 1);
2976 /* select device 0 again */
2977 ap->ops->dev_select(ap, 0);
2979 /* issue bus reset */
2980 DPRINTK("about to softreset, devmask=%x\n", devmask);
2981 err_mask = ata_bus_softreset(ap, devmask);
2982 if (err_mask) {
2983 ata_port_printk(ap, KERN_ERR, "SRST failed (err_mask=0x%x)\n",
2984 err_mask);
2985 return -EIO;
2988 /* determine by signature whether we have ATA or ATAPI devices */
2989 classes[0] = ata_dev_try_classify(ap, 0, &err);
2990 if (slave_possible && err != 0x81)
2991 classes[1] = ata_dev_try_classify(ap, 1, &err);
2993 out:
2994 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
2995 return 0;
2999 * sata_port_hardreset - reset port via SATA phy reset
3000 * @ap: port to reset
3001 * @timing: timing parameters { interval, duratinon, timeout } in msec
3003 * SATA phy-reset host port using DET bits of SControl register.
3005 * LOCKING:
3006 * Kernel thread context (may sleep)
3008 * RETURNS:
3009 * 0 on success, -errno otherwise.
3011 int sata_port_hardreset(struct ata_port *ap, const unsigned long *timing)
3013 u32 scontrol;
3014 int rc;
3016 DPRINTK("ENTER\n");
3018 if (sata_set_spd_needed(ap)) {
3019 /* SATA spec says nothing about how to reconfigure
3020 * spd. To be on the safe side, turn off phy during
3021 * reconfiguration. This works for at least ICH7 AHCI
3022 * and Sil3124.
3024 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
3025 goto out;
3027 scontrol = (scontrol & 0x0f0) | 0x304;
3029 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
3030 goto out;
3032 sata_set_spd(ap);
3035 /* issue phy wake/reset */
3036 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
3037 goto out;
3039 scontrol = (scontrol & 0x0f0) | 0x301;
3041 if ((rc = sata_scr_write_flush(ap, SCR_CONTROL, scontrol)))
3042 goto out;
3044 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
3045 * 10.4.2 says at least 1 ms.
3047 msleep(1);
3049 /* bring phy back */
3050 rc = sata_phy_resume(ap, timing);
3051 out:
3052 DPRINTK("EXIT, rc=%d\n", rc);
3053 return rc;
3057 * sata_std_hardreset - reset host port via SATA phy reset
3058 * @ap: port to reset
3059 * @class: resulting class of attached device
3061 * SATA phy-reset host port using DET bits of SControl register,
3062 * wait for !BSY and classify the attached device.
3064 * LOCKING:
3065 * Kernel thread context (may sleep)
3067 * RETURNS:
3068 * 0 on success, -errno otherwise.
3070 int sata_std_hardreset(struct ata_port *ap, unsigned int *class)
3072 const unsigned long *timing = sata_ehc_deb_timing(&ap->eh_context);
3073 int rc;
3075 DPRINTK("ENTER\n");
3077 /* do hardreset */
3078 rc = sata_port_hardreset(ap, timing);
3079 if (rc) {
3080 ata_port_printk(ap, KERN_ERR,
3081 "COMRESET failed (errno=%d)\n", rc);
3082 return rc;
3085 /* TODO: phy layer with polling, timeouts, etc. */
3086 if (ata_port_offline(ap)) {
3087 *class = ATA_DEV_NONE;
3088 DPRINTK("EXIT, link offline\n");
3089 return 0;
3092 /* wait a while before checking status, see SRST for more info */
3093 msleep(150);
3095 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
3096 ata_port_printk(ap, KERN_ERR,
3097 "COMRESET failed (device not ready)\n");
3098 return -EIO;
3101 ap->ops->dev_select(ap, 0); /* probably unnecessary */
3103 *class = ata_dev_try_classify(ap, 0, NULL);
3105 DPRINTK("EXIT, class=%u\n", *class);
3106 return 0;
3110 * ata_std_postreset - standard postreset callback
3111 * @ap: the target ata_port
3112 * @classes: classes of attached devices
3114 * This function is invoked after a successful reset. Note that
3115 * the device might have been reset more than once using
3116 * different reset methods before postreset is invoked.
3118 * LOCKING:
3119 * Kernel thread context (may sleep)
3121 void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
3123 u32 serror;
3125 DPRINTK("ENTER\n");
3127 /* print link status */
3128 sata_print_link_status(ap);
3130 /* clear SError */
3131 if (sata_scr_read(ap, SCR_ERROR, &serror) == 0)
3132 sata_scr_write(ap, SCR_ERROR, serror);
3134 /* re-enable interrupts */
3135 if (!ap->ops->error_handler)
3136 ap->ops->irq_on(ap);
3138 /* is double-select really necessary? */
3139 if (classes[0] != ATA_DEV_NONE)
3140 ap->ops->dev_select(ap, 1);
3141 if (classes[1] != ATA_DEV_NONE)
3142 ap->ops->dev_select(ap, 0);
3144 /* bail out if no device is present */
3145 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
3146 DPRINTK("EXIT, no device\n");
3147 return;
3150 /* set up device control */
3151 if (ap->ioaddr.ctl_addr)
3152 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
3154 DPRINTK("EXIT\n");
3158 * ata_dev_same_device - Determine whether new ID matches configured device
3159 * @dev: device to compare against
3160 * @new_class: class of the new device
3161 * @new_id: IDENTIFY page of the new device
3163 * Compare @new_class and @new_id against @dev and determine
3164 * whether @dev is the device indicated by @new_class and
3165 * @new_id.
3167 * LOCKING:
3168 * None.
3170 * RETURNS:
3171 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
3173 static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3174 const u16 *new_id)
3176 const u16 *old_id = dev->id;
3177 unsigned char model[2][ATA_ID_PROD_LEN + 1];
3178 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
3179 u64 new_n_sectors;
3181 if (dev->class != new_class) {
3182 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
3183 dev->class, new_class);
3184 return 0;
3187 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3188 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3189 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3190 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
3191 new_n_sectors = ata_id_n_sectors(new_id);
3193 if (strcmp(model[0], model[1])) {
3194 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
3195 "'%s' != '%s'\n", model[0], model[1]);
3196 return 0;
3199 if (strcmp(serial[0], serial[1])) {
3200 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
3201 "'%s' != '%s'\n", serial[0], serial[1]);
3202 return 0;
3205 if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) {
3206 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
3207 "%llu != %llu\n",
3208 (unsigned long long)dev->n_sectors,
3209 (unsigned long long)new_n_sectors);
3210 return 0;
3213 return 1;
3217 * ata_dev_revalidate - Revalidate ATA device
3218 * @dev: device to revalidate
3219 * @readid_flags: read ID flags
3221 * Re-read IDENTIFY page and make sure @dev is still attached to
3222 * the port.
3224 * LOCKING:
3225 * Kernel thread context (may sleep)
3227 * RETURNS:
3228 * 0 on success, negative errno otherwise
3230 int ata_dev_revalidate(struct ata_device *dev, unsigned int readid_flags)
3232 unsigned int class = dev->class;
3233 u16 *id = (void *)dev->ap->sector_buf;
3234 int rc;
3236 if (!ata_dev_enabled(dev)) {
3237 rc = -ENODEV;
3238 goto fail;
3241 /* read ID data */
3242 rc = ata_dev_read_id(dev, &class, readid_flags, id);
3243 if (rc)
3244 goto fail;
3246 /* is the device still there? */
3247 if (!ata_dev_same_device(dev, class, id)) {
3248 rc = -ENODEV;
3249 goto fail;
3252 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
3254 /* configure device according to the new ID */
3255 rc = ata_dev_configure(dev);
3256 if (rc == 0)
3257 return 0;
3259 fail:
3260 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
3261 return rc;
3264 struct ata_blacklist_entry {
3265 const char *model_num;
3266 const char *model_rev;
3267 unsigned long horkage;
3270 static const struct ata_blacklist_entry ata_device_blacklist [] = {
3271 /* Devices with DMA related problems under Linux */
3272 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
3273 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
3274 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
3275 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
3276 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
3277 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
3278 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
3279 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
3280 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
3281 { "CRD-8480B", NULL, ATA_HORKAGE_NODMA },
3282 { "CRD-8482B", NULL, ATA_HORKAGE_NODMA },
3283 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
3284 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
3285 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
3286 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
3287 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
3288 { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA },
3289 { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA },
3290 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
3291 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
3292 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
3293 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
3294 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
3295 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
3296 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
3297 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
3298 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
3299 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
3300 { "SAMSUNG CD-ROM SN-124","N001", ATA_HORKAGE_NODMA },
3302 /* Devices we expect to fail diagnostics */
3304 /* Devices where NCQ should be avoided */
3305 /* NCQ is slow */
3306 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
3308 /* Devices with NCQ limits */
3310 /* End Marker */
3314 unsigned long ata_device_blacklisted(const struct ata_device *dev)
3316 unsigned char model_num[ATA_ID_PROD_LEN + 1];
3317 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
3318 const struct ata_blacklist_entry *ad = ata_device_blacklist;
3320 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
3321 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
3323 while (ad->model_num) {
3324 if (!strcmp(ad->model_num, model_num)) {
3325 if (ad->model_rev == NULL)
3326 return ad->horkage;
3327 if (!strcmp(ad->model_rev, model_rev))
3328 return ad->horkage;
3330 ad++;
3332 return 0;
3335 static int ata_dma_blacklisted(const struct ata_device *dev)
3337 /* We don't support polling DMA.
3338 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
3339 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
3341 if ((dev->ap->flags & ATA_FLAG_PIO_POLLING) &&
3342 (dev->flags & ATA_DFLAG_CDB_INTR))
3343 return 1;
3344 return (ata_device_blacklisted(dev) & ATA_HORKAGE_NODMA) ? 1 : 0;
3348 * ata_dev_xfermask - Compute supported xfermask of the given device
3349 * @dev: Device to compute xfermask for
3351 * Compute supported xfermask of @dev and store it in
3352 * dev->*_mask. This function is responsible for applying all
3353 * known limits including host controller limits, device
3354 * blacklist, etc...
3356 * LOCKING:
3357 * None.
3359 static void ata_dev_xfermask(struct ata_device *dev)
3361 struct ata_port *ap = dev->ap;
3362 struct ata_host *host = ap->host;
3363 unsigned long xfer_mask;
3365 /* controller modes available */
3366 xfer_mask = ata_pack_xfermask(ap->pio_mask,
3367 ap->mwdma_mask, ap->udma_mask);
3369 /* Apply cable rule here. Don't apply it early because when
3370 * we handle hot plug the cable type can itself change.
3372 if (ap->cbl == ATA_CBL_PATA40)
3373 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
3374 /* Apply drive side cable rule. Unknown or 80 pin cables reported
3375 * host side are checked drive side as well. Cases where we know a
3376 * 40wire cable is used safely for 80 are not checked here.
3378 if (ata_drive_40wire(dev->id) && (ap->cbl == ATA_CBL_PATA_UNK || ap->cbl == ATA_CBL_PATA80))
3379 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
3382 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
3383 dev->mwdma_mask, dev->udma_mask);
3384 xfer_mask &= ata_id_xfermask(dev->id);
3387 * CFA Advanced TrueIDE timings are not allowed on a shared
3388 * cable
3390 if (ata_dev_pair(dev)) {
3391 /* No PIO5 or PIO6 */
3392 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
3393 /* No MWDMA3 or MWDMA 4 */
3394 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
3397 if (ata_dma_blacklisted(dev)) {
3398 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3399 ata_dev_printk(dev, KERN_WARNING,
3400 "device is on DMA blacklist, disabling DMA\n");
3403 if ((host->flags & ATA_HOST_SIMPLEX) && host->simplex_claimed) {
3404 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3405 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
3406 "other device, disabling DMA\n");
3409 if (ap->ops->mode_filter)
3410 xfer_mask = ap->ops->mode_filter(ap, dev, xfer_mask);
3412 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
3413 &dev->mwdma_mask, &dev->udma_mask);
3417 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
3418 * @dev: Device to which command will be sent
3420 * Issue SET FEATURES - XFER MODE command to device @dev
3421 * on port @ap.
3423 * LOCKING:
3424 * PCI/etc. bus probe sem.
3426 * RETURNS:
3427 * 0 on success, AC_ERR_* mask otherwise.
3430 static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
3432 struct ata_taskfile tf;
3433 unsigned int err_mask;
3435 /* set up set-features taskfile */
3436 DPRINTK("set features - xfer mode\n");
3438 ata_tf_init(dev, &tf);
3439 tf.command = ATA_CMD_SET_FEATURES;
3440 tf.feature = SETFEATURES_XFER;
3441 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3442 tf.protocol = ATA_PROT_NODATA;
3443 tf.nsect = dev->xfer_mode;
3445 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
3447 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3448 return err_mask;
3452 * ata_dev_init_params - Issue INIT DEV PARAMS command
3453 * @dev: Device to which command will be sent
3454 * @heads: Number of heads (taskfile parameter)
3455 * @sectors: Number of sectors (taskfile parameter)
3457 * LOCKING:
3458 * Kernel thread context (may sleep)
3460 * RETURNS:
3461 * 0 on success, AC_ERR_* mask otherwise.
3463 static unsigned int ata_dev_init_params(struct ata_device *dev,
3464 u16 heads, u16 sectors)
3466 struct ata_taskfile tf;
3467 unsigned int err_mask;
3469 /* Number of sectors per track 1-255. Number of heads 1-16 */
3470 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
3471 return AC_ERR_INVALID;
3473 /* set up init dev params taskfile */
3474 DPRINTK("init dev params \n");
3476 ata_tf_init(dev, &tf);
3477 tf.command = ATA_CMD_INIT_DEV_PARAMS;
3478 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3479 tf.protocol = ATA_PROT_NODATA;
3480 tf.nsect = sectors;
3481 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
3483 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
3485 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3486 return err_mask;
3490 * ata_sg_clean - Unmap DMA memory associated with command
3491 * @qc: Command containing DMA memory to be released
3493 * Unmap all mapped DMA memory associated with this command.
3495 * LOCKING:
3496 * spin_lock_irqsave(host lock)
3498 void ata_sg_clean(struct ata_queued_cmd *qc)
3500 struct ata_port *ap = qc->ap;
3501 struct scatterlist *sg = qc->__sg;
3502 int dir = qc->dma_dir;
3503 void *pad_buf = NULL;
3505 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
3506 WARN_ON(sg == NULL);
3508 if (qc->flags & ATA_QCFLAG_SINGLE)
3509 WARN_ON(qc->n_elem > 1);
3511 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
3513 /* if we padded the buffer out to 32-bit bound, and data
3514 * xfer direction is from-device, we must copy from the
3515 * pad buffer back into the supplied buffer
3517 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
3518 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3520 if (qc->flags & ATA_QCFLAG_SG) {
3521 if (qc->n_elem)
3522 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
3523 /* restore last sg */
3524 sg[qc->orig_n_elem - 1].length += qc->pad_len;
3525 if (pad_buf) {
3526 struct scatterlist *psg = &qc->pad_sgent;
3527 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3528 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
3529 kunmap_atomic(addr, KM_IRQ0);
3531 } else {
3532 if (qc->n_elem)
3533 dma_unmap_single(ap->dev,
3534 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
3535 dir);
3536 /* restore sg */
3537 sg->length += qc->pad_len;
3538 if (pad_buf)
3539 memcpy(qc->buf_virt + sg->length - qc->pad_len,
3540 pad_buf, qc->pad_len);
3543 qc->flags &= ~ATA_QCFLAG_DMAMAP;
3544 qc->__sg = NULL;
3548 * ata_fill_sg - Fill PCI IDE PRD table
3549 * @qc: Metadata associated with taskfile to be transferred
3551 * Fill PCI IDE PRD (scatter-gather) table with segments
3552 * associated with the current disk command.
3554 * LOCKING:
3555 * spin_lock_irqsave(host lock)
3558 static void ata_fill_sg(struct ata_queued_cmd *qc)
3560 struct ata_port *ap = qc->ap;
3561 struct scatterlist *sg;
3562 unsigned int idx;
3564 WARN_ON(qc->__sg == NULL);
3565 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
3567 idx = 0;
3568 ata_for_each_sg(sg, qc) {
3569 u32 addr, offset;
3570 u32 sg_len, len;
3572 /* determine if physical DMA addr spans 64K boundary.
3573 * Note h/w doesn't support 64-bit, so we unconditionally
3574 * truncate dma_addr_t to u32.
3576 addr = (u32) sg_dma_address(sg);
3577 sg_len = sg_dma_len(sg);
3579 while (sg_len) {
3580 offset = addr & 0xffff;
3581 len = sg_len;
3582 if ((offset + sg_len) > 0x10000)
3583 len = 0x10000 - offset;
3585 ap->prd[idx].addr = cpu_to_le32(addr);
3586 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
3587 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
3589 idx++;
3590 sg_len -= len;
3591 addr += len;
3595 if (idx)
3596 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
3599 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
3600 * @qc: Metadata associated with taskfile to check
3602 * Allow low-level driver to filter ATA PACKET commands, returning
3603 * a status indicating whether or not it is OK to use DMA for the
3604 * supplied PACKET command.
3606 * LOCKING:
3607 * spin_lock_irqsave(host lock)
3609 * RETURNS: 0 when ATAPI DMA can be used
3610 * nonzero otherwise
3612 int ata_check_atapi_dma(struct ata_queued_cmd *qc)
3614 struct ata_port *ap = qc->ap;
3615 int rc = 0; /* Assume ATAPI DMA is OK by default */
3617 if (ap->ops->check_atapi_dma)
3618 rc = ap->ops->check_atapi_dma(qc);
3620 return rc;
3623 * ata_qc_prep - Prepare taskfile for submission
3624 * @qc: Metadata associated with taskfile to be prepared
3626 * Prepare ATA taskfile for submission.
3628 * LOCKING:
3629 * spin_lock_irqsave(host lock)
3631 void ata_qc_prep(struct ata_queued_cmd *qc)
3633 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
3634 return;
3636 ata_fill_sg(qc);
3639 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
3642 * ata_sg_init_one - Associate command with memory buffer
3643 * @qc: Command to be associated
3644 * @buf: Memory buffer
3645 * @buflen: Length of memory buffer, in bytes.
3647 * Initialize the data-related elements of queued_cmd @qc
3648 * to point to a single memory buffer, @buf of byte length @buflen.
3650 * LOCKING:
3651 * spin_lock_irqsave(host lock)
3654 void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
3656 qc->flags |= ATA_QCFLAG_SINGLE;
3658 qc->__sg = &qc->sgent;
3659 qc->n_elem = 1;
3660 qc->orig_n_elem = 1;
3661 qc->buf_virt = buf;
3662 qc->nbytes = buflen;
3664 sg_init_one(&qc->sgent, buf, buflen);
3668 * ata_sg_init - Associate command with scatter-gather table.
3669 * @qc: Command to be associated
3670 * @sg: Scatter-gather table.
3671 * @n_elem: Number of elements in s/g table.
3673 * Initialize the data-related elements of queued_cmd @qc
3674 * to point to a scatter-gather table @sg, containing @n_elem
3675 * elements.
3677 * LOCKING:
3678 * spin_lock_irqsave(host lock)
3681 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
3682 unsigned int n_elem)
3684 qc->flags |= ATA_QCFLAG_SG;
3685 qc->__sg = sg;
3686 qc->n_elem = n_elem;
3687 qc->orig_n_elem = n_elem;
3691 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
3692 * @qc: Command with memory buffer to be mapped.
3694 * DMA-map the memory buffer associated with queued_cmd @qc.
3696 * LOCKING:
3697 * spin_lock_irqsave(host lock)
3699 * RETURNS:
3700 * Zero on success, negative on error.
3703 static int ata_sg_setup_one(struct ata_queued_cmd *qc)
3705 struct ata_port *ap = qc->ap;
3706 int dir = qc->dma_dir;
3707 struct scatterlist *sg = qc->__sg;
3708 dma_addr_t dma_address;
3709 int trim_sg = 0;
3711 /* we must lengthen transfers to end on a 32-bit boundary */
3712 qc->pad_len = sg->length & 3;
3713 if (qc->pad_len) {
3714 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3715 struct scatterlist *psg = &qc->pad_sgent;
3717 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
3719 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3721 if (qc->tf.flags & ATA_TFLAG_WRITE)
3722 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
3723 qc->pad_len);
3725 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3726 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3727 /* trim sg */
3728 sg->length -= qc->pad_len;
3729 if (sg->length == 0)
3730 trim_sg = 1;
3732 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
3733 sg->length, qc->pad_len);
3736 if (trim_sg) {
3737 qc->n_elem--;
3738 goto skip_map;
3741 dma_address = dma_map_single(ap->dev, qc->buf_virt,
3742 sg->length, dir);
3743 if (dma_mapping_error(dma_address)) {
3744 /* restore sg */
3745 sg->length += qc->pad_len;
3746 return -1;
3749 sg_dma_address(sg) = dma_address;
3750 sg_dma_len(sg) = sg->length;
3752 skip_map:
3753 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
3754 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3756 return 0;
3760 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
3761 * @qc: Command with scatter-gather table to be mapped.
3763 * DMA-map the scatter-gather table associated with queued_cmd @qc.
3765 * LOCKING:
3766 * spin_lock_irqsave(host lock)
3768 * RETURNS:
3769 * Zero on success, negative on error.
3773 static int ata_sg_setup(struct ata_queued_cmd *qc)
3775 struct ata_port *ap = qc->ap;
3776 struct scatterlist *sg = qc->__sg;
3777 struct scatterlist *lsg = &sg[qc->n_elem - 1];
3778 int n_elem, pre_n_elem, dir, trim_sg = 0;
3780 VPRINTK("ENTER, ata%u\n", ap->print_id);
3781 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
3783 /* we must lengthen transfers to end on a 32-bit boundary */
3784 qc->pad_len = lsg->length & 3;
3785 if (qc->pad_len) {
3786 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3787 struct scatterlist *psg = &qc->pad_sgent;
3788 unsigned int offset;
3790 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
3792 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3795 * psg->page/offset are used to copy to-be-written
3796 * data in this function or read data in ata_sg_clean.
3798 offset = lsg->offset + lsg->length - qc->pad_len;
3799 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
3800 psg->offset = offset_in_page(offset);
3802 if (qc->tf.flags & ATA_TFLAG_WRITE) {
3803 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3804 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
3805 kunmap_atomic(addr, KM_IRQ0);
3808 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3809 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3810 /* trim last sg */
3811 lsg->length -= qc->pad_len;
3812 if (lsg->length == 0)
3813 trim_sg = 1;
3815 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
3816 qc->n_elem - 1, lsg->length, qc->pad_len);
3819 pre_n_elem = qc->n_elem;
3820 if (trim_sg && pre_n_elem)
3821 pre_n_elem--;
3823 if (!pre_n_elem) {
3824 n_elem = 0;
3825 goto skip_map;
3828 dir = qc->dma_dir;
3829 n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
3830 if (n_elem < 1) {
3831 /* restore last sg */
3832 lsg->length += qc->pad_len;
3833 return -1;
3836 DPRINTK("%d sg elements mapped\n", n_elem);
3838 skip_map:
3839 qc->n_elem = n_elem;
3841 return 0;
3845 * swap_buf_le16 - swap halves of 16-bit words in place
3846 * @buf: Buffer to swap
3847 * @buf_words: Number of 16-bit words in buffer.
3849 * Swap halves of 16-bit words if needed to convert from
3850 * little-endian byte order to native cpu byte order, or
3851 * vice-versa.
3853 * LOCKING:
3854 * Inherited from caller.
3856 void swap_buf_le16(u16 *buf, unsigned int buf_words)
3858 #ifdef __BIG_ENDIAN
3859 unsigned int i;
3861 for (i = 0; i < buf_words; i++)
3862 buf[i] = le16_to_cpu(buf[i]);
3863 #endif /* __BIG_ENDIAN */
3867 * ata_data_xfer - Transfer data by PIO
3868 * @adev: device to target
3869 * @buf: data buffer
3870 * @buflen: buffer length
3871 * @write_data: read/write
3873 * Transfer data from/to the device data register by PIO.
3875 * LOCKING:
3876 * Inherited from caller.
3878 void ata_data_xfer(struct ata_device *adev, unsigned char *buf,
3879 unsigned int buflen, int write_data)
3881 struct ata_port *ap = adev->ap;
3882 unsigned int words = buflen >> 1;
3884 /* Transfer multiple of 2 bytes */
3885 if (write_data)
3886 iowrite16_rep(ap->ioaddr.data_addr, buf, words);
3887 else
3888 ioread16_rep(ap->ioaddr.data_addr, buf, words);
3890 /* Transfer trailing 1 byte, if any. */
3891 if (unlikely(buflen & 0x01)) {
3892 u16 align_buf[1] = { 0 };
3893 unsigned char *trailing_buf = buf + buflen - 1;
3895 if (write_data) {
3896 memcpy(align_buf, trailing_buf, 1);
3897 iowrite16(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
3898 } else {
3899 align_buf[0] = cpu_to_le16(ioread16(ap->ioaddr.data_addr));
3900 memcpy(trailing_buf, align_buf, 1);
3906 * ata_data_xfer_noirq - Transfer data by PIO
3907 * @adev: device to target
3908 * @buf: data buffer
3909 * @buflen: buffer length
3910 * @write_data: read/write
3912 * Transfer data from/to the device data register by PIO. Do the
3913 * transfer with interrupts disabled.
3915 * LOCKING:
3916 * Inherited from caller.
3918 void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
3919 unsigned int buflen, int write_data)
3921 unsigned long flags;
3922 local_irq_save(flags);
3923 ata_data_xfer(adev, buf, buflen, write_data);
3924 local_irq_restore(flags);
3929 * ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data.
3930 * @qc: Command on going
3932 * Transfer ATA_SECT_SIZE of data from/to the ATA device.
3934 * LOCKING:
3935 * Inherited from caller.
3938 static void ata_pio_sector(struct ata_queued_cmd *qc)
3940 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3941 struct scatterlist *sg = qc->__sg;
3942 struct ata_port *ap = qc->ap;
3943 struct page *page;
3944 unsigned int offset;
3945 unsigned char *buf;
3947 if (qc->curbytes == qc->nbytes - ATA_SECT_SIZE)
3948 ap->hsm_task_state = HSM_ST_LAST;
3950 page = sg[qc->cursg].page;
3951 offset = sg[qc->cursg].offset + qc->cursg_ofs;
3953 /* get the current page and offset */
3954 page = nth_page(page, (offset >> PAGE_SHIFT));
3955 offset %= PAGE_SIZE;
3957 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3959 if (PageHighMem(page)) {
3960 unsigned long flags;
3962 /* FIXME: use a bounce buffer */
3963 local_irq_save(flags);
3964 buf = kmap_atomic(page, KM_IRQ0);
3966 /* do the actual data transfer */
3967 ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write);
3969 kunmap_atomic(buf, KM_IRQ0);
3970 local_irq_restore(flags);
3971 } else {
3972 buf = page_address(page);
3973 ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write);
3976 qc->curbytes += ATA_SECT_SIZE;
3977 qc->cursg_ofs += ATA_SECT_SIZE;
3979 if (qc->cursg_ofs == (&sg[qc->cursg])->length) {
3980 qc->cursg++;
3981 qc->cursg_ofs = 0;
3986 * ata_pio_sectors - Transfer one or many 512-byte sectors.
3987 * @qc: Command on going
3989 * Transfer one or many ATA_SECT_SIZE of data from/to the
3990 * ATA device for the DRQ request.
3992 * LOCKING:
3993 * Inherited from caller.
3996 static void ata_pio_sectors(struct ata_queued_cmd *qc)
3998 if (is_multi_taskfile(&qc->tf)) {
3999 /* READ/WRITE MULTIPLE */
4000 unsigned int nsect;
4002 WARN_ON(qc->dev->multi_count == 0);
4004 nsect = min((qc->nbytes - qc->curbytes) / ATA_SECT_SIZE,
4005 qc->dev->multi_count);
4006 while (nsect--)
4007 ata_pio_sector(qc);
4008 } else
4009 ata_pio_sector(qc);
4013 * atapi_send_cdb - Write CDB bytes to hardware
4014 * @ap: Port to which ATAPI device is attached.
4015 * @qc: Taskfile currently active
4017 * When device has indicated its readiness to accept
4018 * a CDB, this function is called. Send the CDB.
4020 * LOCKING:
4021 * caller.
4024 static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
4026 /* send SCSI cdb */
4027 DPRINTK("send cdb\n");
4028 WARN_ON(qc->dev->cdb_len < 12);
4030 ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
4031 ata_altstatus(ap); /* flush */
4033 switch (qc->tf.protocol) {
4034 case ATA_PROT_ATAPI:
4035 ap->hsm_task_state = HSM_ST;
4036 break;
4037 case ATA_PROT_ATAPI_NODATA:
4038 ap->hsm_task_state = HSM_ST_LAST;
4039 break;
4040 case ATA_PROT_ATAPI_DMA:
4041 ap->hsm_task_state = HSM_ST_LAST;
4042 /* initiate bmdma */
4043 ap->ops->bmdma_start(qc);
4044 break;
4049 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
4050 * @qc: Command on going
4051 * @bytes: number of bytes
4053 * Transfer Transfer data from/to the ATAPI device.
4055 * LOCKING:
4056 * Inherited from caller.
4060 static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
4062 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
4063 struct scatterlist *sg = qc->__sg;
4064 struct ata_port *ap = qc->ap;
4065 struct page *page;
4066 unsigned char *buf;
4067 unsigned int offset, count;
4069 if (qc->curbytes + bytes >= qc->nbytes)
4070 ap->hsm_task_state = HSM_ST_LAST;
4072 next_sg:
4073 if (unlikely(qc->cursg >= qc->n_elem)) {
4075 * The end of qc->sg is reached and the device expects
4076 * more data to transfer. In order not to overrun qc->sg
4077 * and fulfill length specified in the byte count register,
4078 * - for read case, discard trailing data from the device
4079 * - for write case, padding zero data to the device
4081 u16 pad_buf[1] = { 0 };
4082 unsigned int words = bytes >> 1;
4083 unsigned int i;
4085 if (words) /* warning if bytes > 1 */
4086 ata_dev_printk(qc->dev, KERN_WARNING,
4087 "%u bytes trailing data\n", bytes);
4089 for (i = 0; i < words; i++)
4090 ap->ops->data_xfer(qc->dev, (unsigned char*)pad_buf, 2, do_write);
4092 ap->hsm_task_state = HSM_ST_LAST;
4093 return;
4096 sg = &qc->__sg[qc->cursg];
4098 page = sg->page;
4099 offset = sg->offset + qc->cursg_ofs;
4101 /* get the current page and offset */
4102 page = nth_page(page, (offset >> PAGE_SHIFT));
4103 offset %= PAGE_SIZE;
4105 /* don't overrun current sg */
4106 count = min(sg->length - qc->cursg_ofs, bytes);
4108 /* don't cross page boundaries */
4109 count = min(count, (unsigned int)PAGE_SIZE - offset);
4111 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4113 if (PageHighMem(page)) {
4114 unsigned long flags;
4116 /* FIXME: use bounce buffer */
4117 local_irq_save(flags);
4118 buf = kmap_atomic(page, KM_IRQ0);
4120 /* do the actual data transfer */
4121 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
4123 kunmap_atomic(buf, KM_IRQ0);
4124 local_irq_restore(flags);
4125 } else {
4126 buf = page_address(page);
4127 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
4130 bytes -= count;
4131 qc->curbytes += count;
4132 qc->cursg_ofs += count;
4134 if (qc->cursg_ofs == sg->length) {
4135 qc->cursg++;
4136 qc->cursg_ofs = 0;
4139 if (bytes)
4140 goto next_sg;
4144 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
4145 * @qc: Command on going
4147 * Transfer Transfer data from/to the ATAPI device.
4149 * LOCKING:
4150 * Inherited from caller.
4153 static void atapi_pio_bytes(struct ata_queued_cmd *qc)
4155 struct ata_port *ap = qc->ap;
4156 struct ata_device *dev = qc->dev;
4157 unsigned int ireason, bc_lo, bc_hi, bytes;
4158 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
4160 /* Abuse qc->result_tf for temp storage of intermediate TF
4161 * here to save some kernel stack usage.
4162 * For normal completion, qc->result_tf is not relevant. For
4163 * error, qc->result_tf is later overwritten by ata_qc_complete().
4164 * So, the correctness of qc->result_tf is not affected.
4166 ap->ops->tf_read(ap, &qc->result_tf);
4167 ireason = qc->result_tf.nsect;
4168 bc_lo = qc->result_tf.lbam;
4169 bc_hi = qc->result_tf.lbah;
4170 bytes = (bc_hi << 8) | bc_lo;
4172 /* shall be cleared to zero, indicating xfer of data */
4173 if (ireason & (1 << 0))
4174 goto err_out;
4176 /* make sure transfer direction matches expected */
4177 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
4178 if (do_write != i_write)
4179 goto err_out;
4181 VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
4183 __atapi_pio_bytes(qc, bytes);
4185 return;
4187 err_out:
4188 ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
4189 qc->err_mask |= AC_ERR_HSM;
4190 ap->hsm_task_state = HSM_ST_ERR;
4194 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
4195 * @ap: the target ata_port
4196 * @qc: qc on going
4198 * RETURNS:
4199 * 1 if ok in workqueue, 0 otherwise.
4202 static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
4204 if (qc->tf.flags & ATA_TFLAG_POLLING)
4205 return 1;
4207 if (ap->hsm_task_state == HSM_ST_FIRST) {
4208 if (qc->tf.protocol == ATA_PROT_PIO &&
4209 (qc->tf.flags & ATA_TFLAG_WRITE))
4210 return 1;
4212 if (is_atapi_taskfile(&qc->tf) &&
4213 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4214 return 1;
4217 return 0;
4221 * ata_hsm_qc_complete - finish a qc running on standard HSM
4222 * @qc: Command to complete
4223 * @in_wq: 1 if called from workqueue, 0 otherwise
4225 * Finish @qc which is running on standard HSM.
4227 * LOCKING:
4228 * If @in_wq is zero, spin_lock_irqsave(host lock).
4229 * Otherwise, none on entry and grabs host lock.
4231 static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
4233 struct ata_port *ap = qc->ap;
4234 unsigned long flags;
4236 if (ap->ops->error_handler) {
4237 if (in_wq) {
4238 spin_lock_irqsave(ap->lock, flags);
4240 /* EH might have kicked in while host lock is
4241 * released.
4243 qc = ata_qc_from_tag(ap, qc->tag);
4244 if (qc) {
4245 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
4246 ap->ops->irq_on(ap);
4247 ata_qc_complete(qc);
4248 } else
4249 ata_port_freeze(ap);
4252 spin_unlock_irqrestore(ap->lock, flags);
4253 } else {
4254 if (likely(!(qc->err_mask & AC_ERR_HSM)))
4255 ata_qc_complete(qc);
4256 else
4257 ata_port_freeze(ap);
4259 } else {
4260 if (in_wq) {
4261 spin_lock_irqsave(ap->lock, flags);
4262 ap->ops->irq_on(ap);
4263 ata_qc_complete(qc);
4264 spin_unlock_irqrestore(ap->lock, flags);
4265 } else
4266 ata_qc_complete(qc);
4269 ata_altstatus(ap); /* flush */
4273 * ata_hsm_move - move the HSM to the next state.
4274 * @ap: the target ata_port
4275 * @qc: qc on going
4276 * @status: current device status
4277 * @in_wq: 1 if called from workqueue, 0 otherwise
4279 * RETURNS:
4280 * 1 when poll next status needed, 0 otherwise.
4282 int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
4283 u8 status, int in_wq)
4285 unsigned long flags = 0;
4286 int poll_next;
4288 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
4290 /* Make sure ata_qc_issue_prot() does not throw things
4291 * like DMA polling into the workqueue. Notice that
4292 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
4294 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
4296 fsm_start:
4297 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
4298 ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
4300 switch (ap->hsm_task_state) {
4301 case HSM_ST_FIRST:
4302 /* Send first data block or PACKET CDB */
4304 /* If polling, we will stay in the work queue after
4305 * sending the data. Otherwise, interrupt handler
4306 * takes over after sending the data.
4308 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
4310 /* check device status */
4311 if (unlikely((status & ATA_DRQ) == 0)) {
4312 /* handle BSY=0, DRQ=0 as error */
4313 if (likely(status & (ATA_ERR | ATA_DF)))
4314 /* device stops HSM for abort/error */
4315 qc->err_mask |= AC_ERR_DEV;
4316 else
4317 /* HSM violation. Let EH handle this */
4318 qc->err_mask |= AC_ERR_HSM;
4320 ap->hsm_task_state = HSM_ST_ERR;
4321 goto fsm_start;
4324 /* Device should not ask for data transfer (DRQ=1)
4325 * when it finds something wrong.
4326 * We ignore DRQ here and stop the HSM by
4327 * changing hsm_task_state to HSM_ST_ERR and
4328 * let the EH abort the command or reset the device.
4330 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4331 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with device "
4332 "error, dev_stat 0x%X\n", status);
4333 qc->err_mask |= AC_ERR_HSM;
4334 ap->hsm_task_state = HSM_ST_ERR;
4335 goto fsm_start;
4338 /* Send the CDB (atapi) or the first data block (ata pio out).
4339 * During the state transition, interrupt handler shouldn't
4340 * be invoked before the data transfer is complete and
4341 * hsm_task_state is changed. Hence, the following locking.
4343 if (in_wq)
4344 spin_lock_irqsave(ap->lock, flags);
4346 if (qc->tf.protocol == ATA_PROT_PIO) {
4347 /* PIO data out protocol.
4348 * send first data block.
4351 /* ata_pio_sectors() might change the state
4352 * to HSM_ST_LAST. so, the state is changed here
4353 * before ata_pio_sectors().
4355 ap->hsm_task_state = HSM_ST;
4356 ata_pio_sectors(qc);
4357 ata_altstatus(ap); /* flush */
4358 } else
4359 /* send CDB */
4360 atapi_send_cdb(ap, qc);
4362 if (in_wq)
4363 spin_unlock_irqrestore(ap->lock, flags);
4365 /* if polling, ata_pio_task() handles the rest.
4366 * otherwise, interrupt handler takes over from here.
4368 break;
4370 case HSM_ST:
4371 /* complete command or read/write the data register */
4372 if (qc->tf.protocol == ATA_PROT_ATAPI) {
4373 /* ATAPI PIO protocol */
4374 if ((status & ATA_DRQ) == 0) {
4375 /* No more data to transfer or device error.
4376 * Device error will be tagged in HSM_ST_LAST.
4378 ap->hsm_task_state = HSM_ST_LAST;
4379 goto fsm_start;
4382 /* Device should not ask for data transfer (DRQ=1)
4383 * when it finds something wrong.
4384 * We ignore DRQ here and stop the HSM by
4385 * changing hsm_task_state to HSM_ST_ERR and
4386 * let the EH abort the command or reset the device.
4388 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4389 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with "
4390 "device error, dev_stat 0x%X\n",
4391 status);
4392 qc->err_mask |= AC_ERR_HSM;
4393 ap->hsm_task_state = HSM_ST_ERR;
4394 goto fsm_start;
4397 atapi_pio_bytes(qc);
4399 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
4400 /* bad ireason reported by device */
4401 goto fsm_start;
4403 } else {
4404 /* ATA PIO protocol */
4405 if (unlikely((status & ATA_DRQ) == 0)) {
4406 /* handle BSY=0, DRQ=0 as error */
4407 if (likely(status & (ATA_ERR | ATA_DF)))
4408 /* device stops HSM for abort/error */
4409 qc->err_mask |= AC_ERR_DEV;
4410 else
4411 /* HSM violation. Let EH handle this.
4412 * Phantom devices also trigger this
4413 * condition. Mark hint.
4415 qc->err_mask |= AC_ERR_HSM |
4416 AC_ERR_NODEV_HINT;
4418 ap->hsm_task_state = HSM_ST_ERR;
4419 goto fsm_start;
4422 /* For PIO reads, some devices may ask for
4423 * data transfer (DRQ=1) alone with ERR=1.
4424 * We respect DRQ here and transfer one
4425 * block of junk data before changing the
4426 * hsm_task_state to HSM_ST_ERR.
4428 * For PIO writes, ERR=1 DRQ=1 doesn't make
4429 * sense since the data block has been
4430 * transferred to the device.
4432 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4433 /* data might be corrputed */
4434 qc->err_mask |= AC_ERR_DEV;
4436 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
4437 ata_pio_sectors(qc);
4438 ata_altstatus(ap);
4439 status = ata_wait_idle(ap);
4442 if (status & (ATA_BUSY | ATA_DRQ))
4443 qc->err_mask |= AC_ERR_HSM;
4445 /* ata_pio_sectors() might change the
4446 * state to HSM_ST_LAST. so, the state
4447 * is changed after ata_pio_sectors().
4449 ap->hsm_task_state = HSM_ST_ERR;
4450 goto fsm_start;
4453 ata_pio_sectors(qc);
4455 if (ap->hsm_task_state == HSM_ST_LAST &&
4456 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
4457 /* all data read */
4458 ata_altstatus(ap);
4459 status = ata_wait_idle(ap);
4460 goto fsm_start;
4464 ata_altstatus(ap); /* flush */
4465 poll_next = 1;
4466 break;
4468 case HSM_ST_LAST:
4469 if (unlikely(!ata_ok(status))) {
4470 qc->err_mask |= __ac_err_mask(status);
4471 ap->hsm_task_state = HSM_ST_ERR;
4472 goto fsm_start;
4475 /* no more data to transfer */
4476 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
4477 ap->print_id, qc->dev->devno, status);
4479 WARN_ON(qc->err_mask);
4481 ap->hsm_task_state = HSM_ST_IDLE;
4483 /* complete taskfile transaction */
4484 ata_hsm_qc_complete(qc, in_wq);
4486 poll_next = 0;
4487 break;
4489 case HSM_ST_ERR:
4490 /* make sure qc->err_mask is available to
4491 * know what's wrong and recover
4493 WARN_ON(qc->err_mask == 0);
4495 ap->hsm_task_state = HSM_ST_IDLE;
4497 /* complete taskfile transaction */
4498 ata_hsm_qc_complete(qc, in_wq);
4500 poll_next = 0;
4501 break;
4502 default:
4503 poll_next = 0;
4504 BUG();
4507 return poll_next;
4510 static void ata_pio_task(struct work_struct *work)
4512 struct ata_port *ap =
4513 container_of(work, struct ata_port, port_task.work);
4514 struct ata_queued_cmd *qc = ap->port_task_data;
4515 u8 status;
4516 int poll_next;
4518 fsm_start:
4519 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
4522 * This is purely heuristic. This is a fast path.
4523 * Sometimes when we enter, BSY will be cleared in
4524 * a chk-status or two. If not, the drive is probably seeking
4525 * or something. Snooze for a couple msecs, then
4526 * chk-status again. If still busy, queue delayed work.
4528 status = ata_busy_wait(ap, ATA_BUSY, 5);
4529 if (status & ATA_BUSY) {
4530 msleep(2);
4531 status = ata_busy_wait(ap, ATA_BUSY, 10);
4532 if (status & ATA_BUSY) {
4533 ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
4534 return;
4538 /* move the HSM */
4539 poll_next = ata_hsm_move(ap, qc, status, 1);
4541 /* another command or interrupt handler
4542 * may be running at this point.
4544 if (poll_next)
4545 goto fsm_start;
4549 * ata_qc_new - Request an available ATA command, for queueing
4550 * @ap: Port associated with device @dev
4551 * @dev: Device from whom we request an available command structure
4553 * LOCKING:
4554 * None.
4557 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4559 struct ata_queued_cmd *qc = NULL;
4560 unsigned int i;
4562 /* no command while frozen */
4563 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
4564 return NULL;
4566 /* the last tag is reserved for internal command. */
4567 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
4568 if (!test_and_set_bit(i, &ap->qc_allocated)) {
4569 qc = __ata_qc_from_tag(ap, i);
4570 break;
4573 if (qc)
4574 qc->tag = i;
4576 return qc;
4580 * ata_qc_new_init - Request an available ATA command, and initialize it
4581 * @dev: Device from whom we request an available command structure
4583 * LOCKING:
4584 * None.
4587 struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
4589 struct ata_port *ap = dev->ap;
4590 struct ata_queued_cmd *qc;
4592 qc = ata_qc_new(ap);
4593 if (qc) {
4594 qc->scsicmd = NULL;
4595 qc->ap = ap;
4596 qc->dev = dev;
4598 ata_qc_reinit(qc);
4601 return qc;
4605 * ata_qc_free - free unused ata_queued_cmd
4606 * @qc: Command to complete
4608 * Designed to free unused ata_queued_cmd object
4609 * in case something prevents using it.
4611 * LOCKING:
4612 * spin_lock_irqsave(host lock)
4614 void ata_qc_free(struct ata_queued_cmd *qc)
4616 struct ata_port *ap = qc->ap;
4617 unsigned int tag;
4619 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4621 qc->flags = 0;
4622 tag = qc->tag;
4623 if (likely(ata_tag_valid(tag))) {
4624 qc->tag = ATA_TAG_POISON;
4625 clear_bit(tag, &ap->qc_allocated);
4629 void __ata_qc_complete(struct ata_queued_cmd *qc)
4631 struct ata_port *ap = qc->ap;
4633 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4634 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
4636 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4637 ata_sg_clean(qc);
4639 /* command should be marked inactive atomically with qc completion */
4640 if (qc->tf.protocol == ATA_PROT_NCQ)
4641 ap->sactive &= ~(1 << qc->tag);
4642 else
4643 ap->active_tag = ATA_TAG_POISON;
4645 /* atapi: mark qc as inactive to prevent the interrupt handler
4646 * from completing the command twice later, before the error handler
4647 * is called. (when rc != 0 and atapi request sense is needed)
4649 qc->flags &= ~ATA_QCFLAG_ACTIVE;
4650 ap->qc_active &= ~(1 << qc->tag);
4652 /* call completion callback */
4653 qc->complete_fn(qc);
4656 static void fill_result_tf(struct ata_queued_cmd *qc)
4658 struct ata_port *ap = qc->ap;
4660 ap->ops->tf_read(ap, &qc->result_tf);
4661 qc->result_tf.flags = qc->tf.flags;
4665 * ata_qc_complete - Complete an active ATA command
4666 * @qc: Command to complete
4667 * @err_mask: ATA Status register contents
4669 * Indicate to the mid and upper layers that an ATA
4670 * command has completed, with either an ok or not-ok status.
4672 * LOCKING:
4673 * spin_lock_irqsave(host lock)
4675 void ata_qc_complete(struct ata_queued_cmd *qc)
4677 struct ata_port *ap = qc->ap;
4679 /* XXX: New EH and old EH use different mechanisms to
4680 * synchronize EH with regular execution path.
4682 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4683 * Normal execution path is responsible for not accessing a
4684 * failed qc. libata core enforces the rule by returning NULL
4685 * from ata_qc_from_tag() for failed qcs.
4687 * Old EH depends on ata_qc_complete() nullifying completion
4688 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
4689 * not synchronize with interrupt handler. Only PIO task is
4690 * taken care of.
4692 if (ap->ops->error_handler) {
4693 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
4695 if (unlikely(qc->err_mask))
4696 qc->flags |= ATA_QCFLAG_FAILED;
4698 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4699 if (!ata_tag_internal(qc->tag)) {
4700 /* always fill result TF for failed qc */
4701 fill_result_tf(qc);
4702 ata_qc_schedule_eh(qc);
4703 return;
4707 /* read result TF if requested */
4708 if (qc->flags & ATA_QCFLAG_RESULT_TF)
4709 fill_result_tf(qc);
4711 __ata_qc_complete(qc);
4712 } else {
4713 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4714 return;
4716 /* read result TF if failed or requested */
4717 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
4718 fill_result_tf(qc);
4720 __ata_qc_complete(qc);
4725 * ata_qc_complete_multiple - Complete multiple qcs successfully
4726 * @ap: port in question
4727 * @qc_active: new qc_active mask
4728 * @finish_qc: LLDD callback invoked before completing a qc
4730 * Complete in-flight commands. This functions is meant to be
4731 * called from low-level driver's interrupt routine to complete
4732 * requests normally. ap->qc_active and @qc_active is compared
4733 * and commands are completed accordingly.
4735 * LOCKING:
4736 * spin_lock_irqsave(host lock)
4738 * RETURNS:
4739 * Number of completed commands on success, -errno otherwise.
4741 int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
4742 void (*finish_qc)(struct ata_queued_cmd *))
4744 int nr_done = 0;
4745 u32 done_mask;
4746 int i;
4748 done_mask = ap->qc_active ^ qc_active;
4750 if (unlikely(done_mask & qc_active)) {
4751 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
4752 "(%08x->%08x)\n", ap->qc_active, qc_active);
4753 return -EINVAL;
4756 for (i = 0; i < ATA_MAX_QUEUE; i++) {
4757 struct ata_queued_cmd *qc;
4759 if (!(done_mask & (1 << i)))
4760 continue;
4762 if ((qc = ata_qc_from_tag(ap, i))) {
4763 if (finish_qc)
4764 finish_qc(qc);
4765 ata_qc_complete(qc);
4766 nr_done++;
4770 return nr_done;
4773 static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
4775 struct ata_port *ap = qc->ap;
4777 switch (qc->tf.protocol) {
4778 case ATA_PROT_NCQ:
4779 case ATA_PROT_DMA:
4780 case ATA_PROT_ATAPI_DMA:
4781 return 1;
4783 case ATA_PROT_ATAPI:
4784 case ATA_PROT_PIO:
4785 if (ap->flags & ATA_FLAG_PIO_DMA)
4786 return 1;
4788 /* fall through */
4790 default:
4791 return 0;
4794 /* never reached */
4798 * ata_qc_issue - issue taskfile to device
4799 * @qc: command to issue to device
4801 * Prepare an ATA command to submission to device.
4802 * This includes mapping the data into a DMA-able
4803 * area, filling in the S/G table, and finally
4804 * writing the taskfile to hardware, starting the command.
4806 * LOCKING:
4807 * spin_lock_irqsave(host lock)
4809 void ata_qc_issue(struct ata_queued_cmd *qc)
4811 struct ata_port *ap = qc->ap;
4813 /* Make sure only one non-NCQ command is outstanding. The
4814 * check is skipped for old EH because it reuses active qc to
4815 * request ATAPI sense.
4817 WARN_ON(ap->ops->error_handler && ata_tag_valid(ap->active_tag));
4819 if (qc->tf.protocol == ATA_PROT_NCQ) {
4820 WARN_ON(ap->sactive & (1 << qc->tag));
4821 ap->sactive |= 1 << qc->tag;
4822 } else {
4823 WARN_ON(ap->sactive);
4824 ap->active_tag = qc->tag;
4827 qc->flags |= ATA_QCFLAG_ACTIVE;
4828 ap->qc_active |= 1 << qc->tag;
4830 if (ata_should_dma_map(qc)) {
4831 if (qc->flags & ATA_QCFLAG_SG) {
4832 if (ata_sg_setup(qc))
4833 goto sg_err;
4834 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
4835 if (ata_sg_setup_one(qc))
4836 goto sg_err;
4838 } else {
4839 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4842 ap->ops->qc_prep(qc);
4844 qc->err_mask |= ap->ops->qc_issue(qc);
4845 if (unlikely(qc->err_mask))
4846 goto err;
4847 return;
4849 sg_err:
4850 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4851 qc->err_mask |= AC_ERR_SYSTEM;
4852 err:
4853 ata_qc_complete(qc);
4857 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
4858 * @qc: command to issue to device
4860 * Using various libata functions and hooks, this function
4861 * starts an ATA command. ATA commands are grouped into
4862 * classes called "protocols", and issuing each type of protocol
4863 * is slightly different.
4865 * May be used as the qc_issue() entry in ata_port_operations.
4867 * LOCKING:
4868 * spin_lock_irqsave(host lock)
4870 * RETURNS:
4871 * Zero on success, AC_ERR_* mask on failure
4874 unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
4876 struct ata_port *ap = qc->ap;
4878 /* Use polling pio if the LLD doesn't handle
4879 * interrupt driven pio and atapi CDB interrupt.
4881 if (ap->flags & ATA_FLAG_PIO_POLLING) {
4882 switch (qc->tf.protocol) {
4883 case ATA_PROT_PIO:
4884 case ATA_PROT_NODATA:
4885 case ATA_PROT_ATAPI:
4886 case ATA_PROT_ATAPI_NODATA:
4887 qc->tf.flags |= ATA_TFLAG_POLLING;
4888 break;
4889 case ATA_PROT_ATAPI_DMA:
4890 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
4891 /* see ata_dma_blacklisted() */
4892 BUG();
4893 break;
4894 default:
4895 break;
4899 /* Some controllers show flaky interrupt behavior after
4900 * setting xfer mode. Use polling instead.
4902 if (unlikely(qc->tf.command == ATA_CMD_SET_FEATURES &&
4903 qc->tf.feature == SETFEATURES_XFER) &&
4904 (ap->flags & ATA_FLAG_SETXFER_POLLING))
4905 qc->tf.flags |= ATA_TFLAG_POLLING;
4907 /* select the device */
4908 ata_dev_select(ap, qc->dev->devno, 1, 0);
4910 /* start the command */
4911 switch (qc->tf.protocol) {
4912 case ATA_PROT_NODATA:
4913 if (qc->tf.flags & ATA_TFLAG_POLLING)
4914 ata_qc_set_polling(qc);
4916 ata_tf_to_host(ap, &qc->tf);
4917 ap->hsm_task_state = HSM_ST_LAST;
4919 if (qc->tf.flags & ATA_TFLAG_POLLING)
4920 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4922 break;
4924 case ATA_PROT_DMA:
4925 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
4927 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4928 ap->ops->bmdma_setup(qc); /* set up bmdma */
4929 ap->ops->bmdma_start(qc); /* initiate bmdma */
4930 ap->hsm_task_state = HSM_ST_LAST;
4931 break;
4933 case ATA_PROT_PIO:
4934 if (qc->tf.flags & ATA_TFLAG_POLLING)
4935 ata_qc_set_polling(qc);
4937 ata_tf_to_host(ap, &qc->tf);
4939 if (qc->tf.flags & ATA_TFLAG_WRITE) {
4940 /* PIO data out protocol */
4941 ap->hsm_task_state = HSM_ST_FIRST;
4942 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4944 /* always send first data block using
4945 * the ata_pio_task() codepath.
4947 } else {
4948 /* PIO data in protocol */
4949 ap->hsm_task_state = HSM_ST;
4951 if (qc->tf.flags & ATA_TFLAG_POLLING)
4952 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4954 /* if polling, ata_pio_task() handles the rest.
4955 * otherwise, interrupt handler takes over from here.
4959 break;
4961 case ATA_PROT_ATAPI:
4962 case ATA_PROT_ATAPI_NODATA:
4963 if (qc->tf.flags & ATA_TFLAG_POLLING)
4964 ata_qc_set_polling(qc);
4966 ata_tf_to_host(ap, &qc->tf);
4968 ap->hsm_task_state = HSM_ST_FIRST;
4970 /* send cdb by polling if no cdb interrupt */
4971 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
4972 (qc->tf.flags & ATA_TFLAG_POLLING))
4973 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4974 break;
4976 case ATA_PROT_ATAPI_DMA:
4977 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
4979 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4980 ap->ops->bmdma_setup(qc); /* set up bmdma */
4981 ap->hsm_task_state = HSM_ST_FIRST;
4983 /* send cdb by polling if no cdb interrupt */
4984 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4985 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4986 break;
4988 default:
4989 WARN_ON(1);
4990 return AC_ERR_SYSTEM;
4993 return 0;
4997 * ata_host_intr - Handle host interrupt for given (port, task)
4998 * @ap: Port on which interrupt arrived (possibly...)
4999 * @qc: Taskfile currently active in engine
5001 * Handle host interrupt for given queued command. Currently,
5002 * only DMA interrupts are handled. All other commands are
5003 * handled via polling with interrupts disabled (nIEN bit).
5005 * LOCKING:
5006 * spin_lock_irqsave(host lock)
5008 * RETURNS:
5009 * One if interrupt was handled, zero if not (shared irq).
5012 inline unsigned int ata_host_intr (struct ata_port *ap,
5013 struct ata_queued_cmd *qc)
5015 struct ata_eh_info *ehi = &ap->eh_info;
5016 u8 status, host_stat = 0;
5018 VPRINTK("ata%u: protocol %d task_state %d\n",
5019 ap->print_id, qc->tf.protocol, ap->hsm_task_state);
5021 /* Check whether we are expecting interrupt in this state */
5022 switch (ap->hsm_task_state) {
5023 case HSM_ST_FIRST:
5024 /* Some pre-ATAPI-4 devices assert INTRQ
5025 * at this state when ready to receive CDB.
5028 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
5029 * The flag was turned on only for atapi devices.
5030 * No need to check is_atapi_taskfile(&qc->tf) again.
5032 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
5033 goto idle_irq;
5034 break;
5035 case HSM_ST_LAST:
5036 if (qc->tf.protocol == ATA_PROT_DMA ||
5037 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
5038 /* check status of DMA engine */
5039 host_stat = ap->ops->bmdma_status(ap);
5040 VPRINTK("ata%u: host_stat 0x%X\n",
5041 ap->print_id, host_stat);
5043 /* if it's not our irq... */
5044 if (!(host_stat & ATA_DMA_INTR))
5045 goto idle_irq;
5047 /* before we do anything else, clear DMA-Start bit */
5048 ap->ops->bmdma_stop(qc);
5050 if (unlikely(host_stat & ATA_DMA_ERR)) {
5051 /* error when transfering data to/from memory */
5052 qc->err_mask |= AC_ERR_HOST_BUS;
5053 ap->hsm_task_state = HSM_ST_ERR;
5056 break;
5057 case HSM_ST:
5058 break;
5059 default:
5060 goto idle_irq;
5063 /* check altstatus */
5064 status = ata_altstatus(ap);
5065 if (status & ATA_BUSY)
5066 goto idle_irq;
5068 /* check main status, clearing INTRQ */
5069 status = ata_chk_status(ap);
5070 if (unlikely(status & ATA_BUSY))
5071 goto idle_irq;
5073 /* ack bmdma irq events */
5074 ap->ops->irq_clear(ap);
5076 ata_hsm_move(ap, qc, status, 0);
5078 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
5079 qc->tf.protocol == ATA_PROT_ATAPI_DMA))
5080 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
5082 return 1; /* irq handled */
5084 idle_irq:
5085 ap->stats.idle_irq++;
5087 #ifdef ATA_IRQ_TRAP
5088 if ((ap->stats.idle_irq % 1000) == 0) {
5089 ap->ops->irq_ack(ap, 0); /* debug trap */
5090 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
5091 return 1;
5093 #endif
5094 return 0; /* irq not handled */
5098 * ata_interrupt - Default ATA host interrupt handler
5099 * @irq: irq line (unused)
5100 * @dev_instance: pointer to our ata_host information structure
5102 * Default interrupt handler for PCI IDE devices. Calls
5103 * ata_host_intr() for each port that is not disabled.
5105 * LOCKING:
5106 * Obtains host lock during operation.
5108 * RETURNS:
5109 * IRQ_NONE or IRQ_HANDLED.
5112 irqreturn_t ata_interrupt (int irq, void *dev_instance)
5114 struct ata_host *host = dev_instance;
5115 unsigned int i;
5116 unsigned int handled = 0;
5117 unsigned long flags;
5119 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
5120 spin_lock_irqsave(&host->lock, flags);
5122 for (i = 0; i < host->n_ports; i++) {
5123 struct ata_port *ap;
5125 ap = host->ports[i];
5126 if (ap &&
5127 !(ap->flags & ATA_FLAG_DISABLED)) {
5128 struct ata_queued_cmd *qc;
5130 qc = ata_qc_from_tag(ap, ap->active_tag);
5131 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
5132 (qc->flags & ATA_QCFLAG_ACTIVE))
5133 handled |= ata_host_intr(ap, qc);
5137 spin_unlock_irqrestore(&host->lock, flags);
5139 return IRQ_RETVAL(handled);
5143 * sata_scr_valid - test whether SCRs are accessible
5144 * @ap: ATA port to test SCR accessibility for
5146 * Test whether SCRs are accessible for @ap.
5148 * LOCKING:
5149 * None.
5151 * RETURNS:
5152 * 1 if SCRs are accessible, 0 otherwise.
5154 int sata_scr_valid(struct ata_port *ap)
5156 return ap->cbl == ATA_CBL_SATA && ap->ops->scr_read;
5160 * sata_scr_read - read SCR register of the specified port
5161 * @ap: ATA port to read SCR for
5162 * @reg: SCR to read
5163 * @val: Place to store read value
5165 * Read SCR register @reg of @ap into *@val. This function is
5166 * guaranteed to succeed if the cable type of the port is SATA
5167 * and the port implements ->scr_read.
5169 * LOCKING:
5170 * None.
5172 * RETURNS:
5173 * 0 on success, negative errno on failure.
5175 int sata_scr_read(struct ata_port *ap, int reg, u32 *val)
5177 if (sata_scr_valid(ap)) {
5178 *val = ap->ops->scr_read(ap, reg);
5179 return 0;
5181 return -EOPNOTSUPP;
5185 * sata_scr_write - write SCR register of the specified port
5186 * @ap: ATA port to write SCR for
5187 * @reg: SCR to write
5188 * @val: value to write
5190 * Write @val to SCR register @reg of @ap. This function is
5191 * guaranteed to succeed if the cable type of the port is SATA
5192 * and the port implements ->scr_read.
5194 * LOCKING:
5195 * None.
5197 * RETURNS:
5198 * 0 on success, negative errno on failure.
5200 int sata_scr_write(struct ata_port *ap, int reg, u32 val)
5202 if (sata_scr_valid(ap)) {
5203 ap->ops->scr_write(ap, reg, val);
5204 return 0;
5206 return -EOPNOTSUPP;
5210 * sata_scr_write_flush - write SCR register of the specified port and flush
5211 * @ap: ATA port to write SCR for
5212 * @reg: SCR to write
5213 * @val: value to write
5215 * This function is identical to sata_scr_write() except that this
5216 * function performs flush after writing to the register.
5218 * LOCKING:
5219 * None.
5221 * RETURNS:
5222 * 0 on success, negative errno on failure.
5224 int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val)
5226 if (sata_scr_valid(ap)) {
5227 ap->ops->scr_write(ap, reg, val);
5228 ap->ops->scr_read(ap, reg);
5229 return 0;
5231 return -EOPNOTSUPP;
5235 * ata_port_online - test whether the given port is online
5236 * @ap: ATA port to test
5238 * Test whether @ap is online. Note that this function returns 0
5239 * if online status of @ap cannot be obtained, so
5240 * ata_port_online(ap) != !ata_port_offline(ap).
5242 * LOCKING:
5243 * None.
5245 * RETURNS:
5246 * 1 if the port online status is available and online.
5248 int ata_port_online(struct ata_port *ap)
5250 u32 sstatus;
5252 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) == 0x3)
5253 return 1;
5254 return 0;
5258 * ata_port_offline - test whether the given port is offline
5259 * @ap: ATA port to test
5261 * Test whether @ap is offline. Note that this function returns
5262 * 0 if offline status of @ap cannot be obtained, so
5263 * ata_port_online(ap) != !ata_port_offline(ap).
5265 * LOCKING:
5266 * None.
5268 * RETURNS:
5269 * 1 if the port offline status is available and offline.
5271 int ata_port_offline(struct ata_port *ap)
5273 u32 sstatus;
5275 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) != 0x3)
5276 return 1;
5277 return 0;
5280 int ata_flush_cache(struct ata_device *dev)
5282 unsigned int err_mask;
5283 u8 cmd;
5285 if (!ata_try_flush_cache(dev))
5286 return 0;
5288 if (dev->flags & ATA_DFLAG_FLUSH_EXT)
5289 cmd = ATA_CMD_FLUSH_EXT;
5290 else
5291 cmd = ATA_CMD_FLUSH;
5293 err_mask = ata_do_simple_cmd(dev, cmd);
5294 if (err_mask) {
5295 ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
5296 return -EIO;
5299 return 0;
5302 static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
5303 unsigned int action, unsigned int ehi_flags,
5304 int wait)
5306 unsigned long flags;
5307 int i, rc;
5309 for (i = 0; i < host->n_ports; i++) {
5310 struct ata_port *ap = host->ports[i];
5312 /* Previous resume operation might still be in
5313 * progress. Wait for PM_PENDING to clear.
5315 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5316 ata_port_wait_eh(ap);
5317 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5320 /* request PM ops to EH */
5321 spin_lock_irqsave(ap->lock, flags);
5323 ap->pm_mesg = mesg;
5324 if (wait) {
5325 rc = 0;
5326 ap->pm_result = &rc;
5329 ap->pflags |= ATA_PFLAG_PM_PENDING;
5330 ap->eh_info.action |= action;
5331 ap->eh_info.flags |= ehi_flags;
5333 ata_port_schedule_eh(ap);
5335 spin_unlock_irqrestore(ap->lock, flags);
5337 /* wait and check result */
5338 if (wait) {
5339 ata_port_wait_eh(ap);
5340 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5341 if (rc)
5342 return rc;
5346 return 0;
5350 * ata_host_suspend - suspend host
5351 * @host: host to suspend
5352 * @mesg: PM message
5354 * Suspend @host. Actual operation is performed by EH. This
5355 * function requests EH to perform PM operations and waits for EH
5356 * to finish.
5358 * LOCKING:
5359 * Kernel thread context (may sleep).
5361 * RETURNS:
5362 * 0 on success, -errno on failure.
5364 int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5366 int i, j, rc;
5368 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
5369 if (rc)
5370 goto fail;
5372 /* EH is quiescent now. Fail if we have any ready device.
5373 * This happens if hotplug occurs between completion of device
5374 * suspension and here.
5376 for (i = 0; i < host->n_ports; i++) {
5377 struct ata_port *ap = host->ports[i];
5379 for (j = 0; j < ATA_MAX_DEVICES; j++) {
5380 struct ata_device *dev = &ap->device[j];
5382 if (ata_dev_ready(dev)) {
5383 ata_port_printk(ap, KERN_WARNING,
5384 "suspend failed, device %d "
5385 "still active\n", dev->devno);
5386 rc = -EBUSY;
5387 goto fail;
5392 host->dev->power.power_state = mesg;
5393 return 0;
5395 fail:
5396 ata_host_resume(host);
5397 return rc;
5401 * ata_host_resume - resume host
5402 * @host: host to resume
5404 * Resume @host. Actual operation is performed by EH. This
5405 * function requests EH to perform PM operations and returns.
5406 * Note that all resume operations are performed parallely.
5408 * LOCKING:
5409 * Kernel thread context (may sleep).
5411 void ata_host_resume(struct ata_host *host)
5413 ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
5414 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
5415 host->dev->power.power_state = PMSG_ON;
5419 * ata_port_start - Set port up for dma.
5420 * @ap: Port to initialize
5422 * Called just after data structures for each port are
5423 * initialized. Allocates space for PRD table.
5425 * May be used as the port_start() entry in ata_port_operations.
5427 * LOCKING:
5428 * Inherited from caller.
5430 int ata_port_start(struct ata_port *ap)
5432 struct device *dev = ap->dev;
5433 int rc;
5435 ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
5436 GFP_KERNEL);
5437 if (!ap->prd)
5438 return -ENOMEM;
5440 rc = ata_pad_alloc(ap, dev);
5441 if (rc)
5442 return rc;
5444 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd,
5445 (unsigned long long)ap->prd_dma);
5446 return 0;
5450 * ata_dev_init - Initialize an ata_device structure
5451 * @dev: Device structure to initialize
5453 * Initialize @dev in preparation for probing.
5455 * LOCKING:
5456 * Inherited from caller.
5458 void ata_dev_init(struct ata_device *dev)
5460 struct ata_port *ap = dev->ap;
5461 unsigned long flags;
5463 /* SATA spd limit is bound to the first device */
5464 ap->sata_spd_limit = ap->hw_sata_spd_limit;
5466 /* High bits of dev->flags are used to record warm plug
5467 * requests which occur asynchronously. Synchronize using
5468 * host lock.
5470 spin_lock_irqsave(ap->lock, flags);
5471 dev->flags &= ~ATA_DFLAG_INIT_MASK;
5472 spin_unlock_irqrestore(ap->lock, flags);
5474 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
5475 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
5476 dev->pio_mask = UINT_MAX;
5477 dev->mwdma_mask = UINT_MAX;
5478 dev->udma_mask = UINT_MAX;
5482 * ata_port_init - Initialize an ata_port structure
5483 * @ap: Structure to initialize
5484 * @host: Collection of hosts to which @ap belongs
5485 * @ent: Probe information provided by low-level driver
5486 * @port_no: Port number associated with this ata_port
5488 * Initialize a new ata_port structure.
5490 * LOCKING:
5491 * Inherited from caller.
5493 void ata_port_init(struct ata_port *ap, struct ata_host *host,
5494 const struct ata_probe_ent *ent, unsigned int port_no)
5496 unsigned int i;
5498 ap->lock = &host->lock;
5499 ap->flags = ATA_FLAG_DISABLED;
5500 ap->print_id = ata_print_id++;
5501 ap->ctl = ATA_DEVCTL_OBS;
5502 ap->host = host;
5503 ap->dev = ent->dev;
5504 ap->port_no = port_no;
5505 if (port_no == 1 && ent->pinfo2) {
5506 ap->pio_mask = ent->pinfo2->pio_mask;
5507 ap->mwdma_mask = ent->pinfo2->mwdma_mask;
5508 ap->udma_mask = ent->pinfo2->udma_mask;
5509 ap->flags |= ent->pinfo2->flags;
5510 ap->ops = ent->pinfo2->port_ops;
5511 } else {
5512 ap->pio_mask = ent->pio_mask;
5513 ap->mwdma_mask = ent->mwdma_mask;
5514 ap->udma_mask = ent->udma_mask;
5515 ap->flags |= ent->port_flags;
5516 ap->ops = ent->port_ops;
5518 ap->hw_sata_spd_limit = UINT_MAX;
5519 ap->active_tag = ATA_TAG_POISON;
5520 ap->last_ctl = 0xFF;
5522 #if defined(ATA_VERBOSE_DEBUG)
5523 /* turn on all debugging levels */
5524 ap->msg_enable = 0x00FF;
5525 #elif defined(ATA_DEBUG)
5526 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
5527 #else
5528 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
5529 #endif
5531 INIT_DELAYED_WORK(&ap->port_task, NULL);
5532 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5533 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
5534 INIT_LIST_HEAD(&ap->eh_done_q);
5535 init_waitqueue_head(&ap->eh_wait_q);
5537 /* set cable type */
5538 ap->cbl = ATA_CBL_NONE;
5539 if (ap->flags & ATA_FLAG_SATA)
5540 ap->cbl = ATA_CBL_SATA;
5542 for (i = 0; i < ATA_MAX_DEVICES; i++) {
5543 struct ata_device *dev = &ap->device[i];
5544 dev->ap = ap;
5545 dev->devno = i;
5546 ata_dev_init(dev);
5549 #ifdef ATA_IRQ_TRAP
5550 ap->stats.unhandled_irq = 1;
5551 ap->stats.idle_irq = 1;
5552 #endif
5554 memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports));
5558 * ata_port_init_shost - Initialize SCSI host associated with ATA port
5559 * @ap: ATA port to initialize SCSI host for
5560 * @shost: SCSI host associated with @ap
5562 * Initialize SCSI host @shost associated with ATA port @ap.
5564 * LOCKING:
5565 * Inherited from caller.
5567 static void ata_port_init_shost(struct ata_port *ap, struct Scsi_Host *shost)
5569 ap->scsi_host = shost;
5571 shost->unique_id = ap->print_id;
5572 shost->max_id = 16;
5573 shost->max_lun = 1;
5574 shost->max_channel = 1;
5575 shost->max_cmd_len = 12;
5579 * ata_port_add - Attach low-level ATA driver to system
5580 * @ent: Information provided by low-level driver
5581 * @host: Collections of ports to which we add
5582 * @port_no: Port number associated with this host
5584 * Attach low-level ATA driver to system.
5586 * LOCKING:
5587 * PCI/etc. bus probe sem.
5589 * RETURNS:
5590 * New ata_port on success, for NULL on error.
5592 static struct ata_port * ata_port_add(const struct ata_probe_ent *ent,
5593 struct ata_host *host,
5594 unsigned int port_no)
5596 struct Scsi_Host *shost;
5597 struct ata_port *ap;
5599 DPRINTK("ENTER\n");
5601 if (!ent->port_ops->error_handler &&
5602 !(ent->port_flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST))) {
5603 printk(KERN_ERR "ata%u: no reset mechanism available\n",
5604 port_no);
5605 return NULL;
5608 shost = scsi_host_alloc(ent->sht, sizeof(struct ata_port));
5609 if (!shost)
5610 return NULL;
5612 shost->transportt = &ata_scsi_transport_template;
5614 ap = ata_shost_to_port(shost);
5616 ata_port_init(ap, host, ent, port_no);
5617 ata_port_init_shost(ap, shost);
5619 return ap;
5622 static void ata_host_release(struct device *gendev, void *res)
5624 struct ata_host *host = dev_get_drvdata(gendev);
5625 int i;
5627 for (i = 0; i < host->n_ports; i++) {
5628 struct ata_port *ap = host->ports[i];
5630 if (!ap)
5631 continue;
5633 if (ap->ops->port_stop)
5634 ap->ops->port_stop(ap);
5636 scsi_host_put(ap->scsi_host);
5639 if (host->ops->host_stop)
5640 host->ops->host_stop(host);
5644 * ata_sas_host_init - Initialize a host struct
5645 * @host: host to initialize
5646 * @dev: device host is attached to
5647 * @flags: host flags
5648 * @ops: port_ops
5650 * LOCKING:
5651 * PCI/etc. bus probe sem.
5655 void ata_host_init(struct ata_host *host, struct device *dev,
5656 unsigned long flags, const struct ata_port_operations *ops)
5658 spin_lock_init(&host->lock);
5659 host->dev = dev;
5660 host->flags = flags;
5661 host->ops = ops;
5665 * ata_device_add - Register hardware device with ATA and SCSI layers
5666 * @ent: Probe information describing hardware device to be registered
5668 * This function processes the information provided in the probe
5669 * information struct @ent, allocates the necessary ATA and SCSI
5670 * host information structures, initializes them, and registers
5671 * everything with requisite kernel subsystems.
5673 * This function requests irqs, probes the ATA bus, and probes
5674 * the SCSI bus.
5676 * LOCKING:
5677 * PCI/etc. bus probe sem.
5679 * RETURNS:
5680 * Number of ports registered. Zero on error (no ports registered).
5682 int ata_device_add(const struct ata_probe_ent *ent)
5684 unsigned int i;
5685 struct device *dev = ent->dev;
5686 struct ata_host *host;
5687 int rc;
5689 DPRINTK("ENTER\n");
5691 if (ent->irq == 0) {
5692 dev_printk(KERN_ERR, dev, "is not available: No interrupt assigned.\n");
5693 return 0;
5696 if (!devres_open_group(dev, ata_device_add, GFP_KERNEL))
5697 return 0;
5699 /* alloc a container for our list of ATA ports (buses) */
5700 host = devres_alloc(ata_host_release, sizeof(struct ata_host) +
5701 (ent->n_ports * sizeof(void *)), GFP_KERNEL);
5702 if (!host)
5703 goto err_out;
5704 devres_add(dev, host);
5705 dev_set_drvdata(dev, host);
5707 ata_host_init(host, dev, ent->_host_flags, ent->port_ops);
5708 host->n_ports = ent->n_ports;
5709 host->irq = ent->irq;
5710 host->irq2 = ent->irq2;
5711 host->iomap = ent->iomap;
5712 host->private_data = ent->private_data;
5714 /* register each port bound to this device */
5715 for (i = 0; i < host->n_ports; i++) {
5716 struct ata_port *ap;
5717 unsigned long xfer_mode_mask;
5718 int irq_line = ent->irq;
5720 ap = ata_port_add(ent, host, i);
5721 host->ports[i] = ap;
5722 if (!ap)
5723 goto err_out;
5725 /* dummy? */
5726 if (ent->dummy_port_mask & (1 << i)) {
5727 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
5728 ap->ops = &ata_dummy_port_ops;
5729 continue;
5732 /* start port */
5733 rc = ap->ops->port_start(ap);
5734 if (rc) {
5735 host->ports[i] = NULL;
5736 scsi_host_put(ap->scsi_host);
5737 goto err_out;
5740 /* Report the secondary IRQ for second channel legacy */
5741 if (i == 1 && ent->irq2)
5742 irq_line = ent->irq2;
5744 xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) |
5745 (ap->mwdma_mask << ATA_SHIFT_MWDMA) |
5746 (ap->pio_mask << ATA_SHIFT_PIO);
5748 /* print per-port info to dmesg */
5749 ata_port_printk(ap, KERN_INFO, "%cATA max %s cmd 0x%p "
5750 "ctl 0x%p bmdma 0x%p irq %d\n",
5751 ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
5752 ata_mode_string(xfer_mode_mask),
5753 ap->ioaddr.cmd_addr,
5754 ap->ioaddr.ctl_addr,
5755 ap->ioaddr.bmdma_addr,
5756 irq_line);
5758 /* freeze port before requesting IRQ */
5759 ata_eh_freeze_port(ap);
5762 /* obtain irq, that may be shared between channels */
5763 rc = devm_request_irq(dev, ent->irq, ent->port_ops->irq_handler,
5764 ent->irq_flags, DRV_NAME, host);
5765 if (rc) {
5766 dev_printk(KERN_ERR, dev, "irq %lu request failed: %d\n",
5767 ent->irq, rc);
5768 goto err_out;
5771 /* do we have a second IRQ for the other channel, eg legacy mode */
5772 if (ent->irq2) {
5773 /* We will get weird core code crashes later if this is true
5774 so trap it now */
5775 BUG_ON(ent->irq == ent->irq2);
5777 rc = devm_request_irq(dev, ent->irq2,
5778 ent->port_ops->irq_handler, ent->irq_flags,
5779 DRV_NAME, host);
5780 if (rc) {
5781 dev_printk(KERN_ERR, dev, "irq %lu request failed: %d\n",
5782 ent->irq2, rc);
5783 goto err_out;
5787 /* resource acquisition complete */
5788 devres_remove_group(dev, ata_device_add);
5790 /* perform each probe synchronously */
5791 DPRINTK("probe begin\n");
5792 for (i = 0; i < host->n_ports; i++) {
5793 struct ata_port *ap = host->ports[i];
5794 u32 scontrol;
5795 int rc;
5797 /* init sata_spd_limit to the current value */
5798 if (sata_scr_read(ap, SCR_CONTROL, &scontrol) == 0) {
5799 int spd = (scontrol >> 4) & 0xf;
5800 ap->hw_sata_spd_limit &= (1 << spd) - 1;
5802 ap->sata_spd_limit = ap->hw_sata_spd_limit;
5804 rc = scsi_add_host(ap->scsi_host, dev);
5805 if (rc) {
5806 ata_port_printk(ap, KERN_ERR, "scsi_add_host failed\n");
5807 /* FIXME: do something useful here */
5808 /* FIXME: handle unconditional calls to
5809 * scsi_scan_host and ata_host_remove, below,
5810 * at the very least
5814 if (ap->ops->error_handler) {
5815 struct ata_eh_info *ehi = &ap->eh_info;
5816 unsigned long flags;
5818 ata_port_probe(ap);
5820 /* kick EH for boot probing */
5821 spin_lock_irqsave(ap->lock, flags);
5823 ehi->probe_mask = (1 << ATA_MAX_DEVICES) - 1;
5824 ehi->action |= ATA_EH_SOFTRESET;
5825 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
5827 ap->pflags |= ATA_PFLAG_LOADING;
5828 ata_port_schedule_eh(ap);
5830 spin_unlock_irqrestore(ap->lock, flags);
5832 /* wait for EH to finish */
5833 ata_port_wait_eh(ap);
5834 } else {
5835 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
5836 rc = ata_bus_probe(ap);
5837 DPRINTK("ata%u: bus probe end\n", ap->print_id);
5839 if (rc) {
5840 /* FIXME: do something useful here?
5841 * Current libata behavior will
5842 * tear down everything when
5843 * the module is removed
5844 * or the h/w is unplugged.
5850 /* probes are done, now scan each port's disk(s) */
5851 DPRINTK("host probe begin\n");
5852 for (i = 0; i < host->n_ports; i++) {
5853 struct ata_port *ap = host->ports[i];
5855 ata_scsi_scan_host(ap);
5858 VPRINTK("EXIT, returning %u\n", ent->n_ports);
5859 return ent->n_ports; /* success */
5861 err_out:
5862 devres_release_group(dev, ata_device_add);
5863 dev_set_drvdata(dev, NULL);
5864 VPRINTK("EXIT, returning %d\n", rc);
5865 return 0;
5869 * ata_port_detach - Detach ATA port in prepration of device removal
5870 * @ap: ATA port to be detached
5872 * Detach all ATA devices and the associated SCSI devices of @ap;
5873 * then, remove the associated SCSI host. @ap is guaranteed to
5874 * be quiescent on return from this function.
5876 * LOCKING:
5877 * Kernel thread context (may sleep).
5879 void ata_port_detach(struct ata_port *ap)
5881 unsigned long flags;
5882 int i;
5884 if (!ap->ops->error_handler)
5885 goto skip_eh;
5887 /* tell EH we're leaving & flush EH */
5888 spin_lock_irqsave(ap->lock, flags);
5889 ap->pflags |= ATA_PFLAG_UNLOADING;
5890 spin_unlock_irqrestore(ap->lock, flags);
5892 ata_port_wait_eh(ap);
5894 /* EH is now guaranteed to see UNLOADING, so no new device
5895 * will be attached. Disable all existing devices.
5897 spin_lock_irqsave(ap->lock, flags);
5899 for (i = 0; i < ATA_MAX_DEVICES; i++)
5900 ata_dev_disable(&ap->device[i]);
5902 spin_unlock_irqrestore(ap->lock, flags);
5904 /* Final freeze & EH. All in-flight commands are aborted. EH
5905 * will be skipped and retrials will be terminated with bad
5906 * target.
5908 spin_lock_irqsave(ap->lock, flags);
5909 ata_port_freeze(ap); /* won't be thawed */
5910 spin_unlock_irqrestore(ap->lock, flags);
5912 ata_port_wait_eh(ap);
5914 /* Flush hotplug task. The sequence is similar to
5915 * ata_port_flush_task().
5917 flush_workqueue(ata_aux_wq);
5918 cancel_delayed_work(&ap->hotplug_task);
5919 flush_workqueue(ata_aux_wq);
5921 skip_eh:
5922 /* remove the associated SCSI host */
5923 scsi_remove_host(ap->scsi_host);
5927 * ata_host_detach - Detach all ports of an ATA host
5928 * @host: Host to detach
5930 * Detach all ports of @host.
5932 * LOCKING:
5933 * Kernel thread context (may sleep).
5935 void ata_host_detach(struct ata_host *host)
5937 int i;
5939 for (i = 0; i < host->n_ports; i++)
5940 ata_port_detach(host->ports[i]);
5943 struct ata_probe_ent *
5944 ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port)
5946 struct ata_probe_ent *probe_ent;
5948 probe_ent = devm_kzalloc(dev, sizeof(*probe_ent), GFP_KERNEL);
5949 if (!probe_ent) {
5950 printk(KERN_ERR DRV_NAME "(%s): out of memory\n",
5951 kobject_name(&(dev->kobj)));
5952 return NULL;
5955 INIT_LIST_HEAD(&probe_ent->node);
5956 probe_ent->dev = dev;
5958 probe_ent->sht = port->sht;
5959 probe_ent->port_flags = port->flags;
5960 probe_ent->pio_mask = port->pio_mask;
5961 probe_ent->mwdma_mask = port->mwdma_mask;
5962 probe_ent->udma_mask = port->udma_mask;
5963 probe_ent->port_ops = port->port_ops;
5964 probe_ent->private_data = port->private_data;
5966 return probe_ent;
5970 * ata_std_ports - initialize ioaddr with standard port offsets.
5971 * @ioaddr: IO address structure to be initialized
5973 * Utility function which initializes data_addr, error_addr,
5974 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
5975 * device_addr, status_addr, and command_addr to standard offsets
5976 * relative to cmd_addr.
5978 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
5981 void ata_std_ports(struct ata_ioports *ioaddr)
5983 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
5984 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
5985 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
5986 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
5987 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
5988 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
5989 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
5990 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
5991 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
5992 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
5996 #ifdef CONFIG_PCI
5999 * ata_pci_remove_one - PCI layer callback for device removal
6000 * @pdev: PCI device that was removed
6002 * PCI layer indicates to libata via this hook that hot-unplug or
6003 * module unload event has occurred. Detach all ports. Resource
6004 * release is handled via devres.
6006 * LOCKING:
6007 * Inherited from PCI layer (may sleep).
6009 void ata_pci_remove_one(struct pci_dev *pdev)
6011 struct device *dev = pci_dev_to_dev(pdev);
6012 struct ata_host *host = dev_get_drvdata(dev);
6014 ata_host_detach(host);
6017 /* move to PCI subsystem */
6018 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
6020 unsigned long tmp = 0;
6022 switch (bits->width) {
6023 case 1: {
6024 u8 tmp8 = 0;
6025 pci_read_config_byte(pdev, bits->reg, &tmp8);
6026 tmp = tmp8;
6027 break;
6029 case 2: {
6030 u16 tmp16 = 0;
6031 pci_read_config_word(pdev, bits->reg, &tmp16);
6032 tmp = tmp16;
6033 break;
6035 case 4: {
6036 u32 tmp32 = 0;
6037 pci_read_config_dword(pdev, bits->reg, &tmp32);
6038 tmp = tmp32;
6039 break;
6042 default:
6043 return -EINVAL;
6046 tmp &= bits->mask;
6048 return (tmp == bits->val) ? 1 : 0;
6051 void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
6053 pci_save_state(pdev);
6054 pci_disable_device(pdev);
6056 if (mesg.event == PM_EVENT_SUSPEND)
6057 pci_set_power_state(pdev, PCI_D3hot);
6060 int ata_pci_device_do_resume(struct pci_dev *pdev)
6062 int rc;
6064 pci_set_power_state(pdev, PCI_D0);
6065 pci_restore_state(pdev);
6067 rc = pcim_enable_device(pdev);
6068 if (rc) {
6069 dev_printk(KERN_ERR, &pdev->dev,
6070 "failed to enable device after resume (%d)\n", rc);
6071 return rc;
6074 pci_set_master(pdev);
6075 return 0;
6078 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
6080 struct ata_host *host = dev_get_drvdata(&pdev->dev);
6081 int rc = 0;
6083 rc = ata_host_suspend(host, mesg);
6084 if (rc)
6085 return rc;
6087 ata_pci_device_do_suspend(pdev, mesg);
6089 return 0;
6092 int ata_pci_device_resume(struct pci_dev *pdev)
6094 struct ata_host *host = dev_get_drvdata(&pdev->dev);
6095 int rc;
6097 rc = ata_pci_device_do_resume(pdev);
6098 if (rc == 0)
6099 ata_host_resume(host);
6100 return rc;
6102 #endif /* CONFIG_PCI */
6105 static int __init ata_init(void)
6107 ata_probe_timeout *= HZ;
6108 ata_wq = create_workqueue("ata");
6109 if (!ata_wq)
6110 return -ENOMEM;
6112 ata_aux_wq = create_singlethread_workqueue("ata_aux");
6113 if (!ata_aux_wq) {
6114 destroy_workqueue(ata_wq);
6115 return -ENOMEM;
6118 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6119 return 0;
6122 static void __exit ata_exit(void)
6124 destroy_workqueue(ata_wq);
6125 destroy_workqueue(ata_aux_wq);
6128 subsys_initcall(ata_init);
6129 module_exit(ata_exit);
6131 static unsigned long ratelimit_time;
6132 static DEFINE_SPINLOCK(ata_ratelimit_lock);
6134 int ata_ratelimit(void)
6136 int rc;
6137 unsigned long flags;
6139 spin_lock_irqsave(&ata_ratelimit_lock, flags);
6141 if (time_after(jiffies, ratelimit_time)) {
6142 rc = 1;
6143 ratelimit_time = jiffies + (HZ/5);
6144 } else
6145 rc = 0;
6147 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
6149 return rc;
6153 * ata_wait_register - wait until register value changes
6154 * @reg: IO-mapped register
6155 * @mask: Mask to apply to read register value
6156 * @val: Wait condition
6157 * @interval_msec: polling interval in milliseconds
6158 * @timeout_msec: timeout in milliseconds
6160 * Waiting for some bits of register to change is a common
6161 * operation for ATA controllers. This function reads 32bit LE
6162 * IO-mapped register @reg and tests for the following condition.
6164 * (*@reg & mask) != val
6166 * If the condition is met, it returns; otherwise, the process is
6167 * repeated after @interval_msec until timeout.
6169 * LOCKING:
6170 * Kernel thread context (may sleep)
6172 * RETURNS:
6173 * The final register value.
6175 u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
6176 unsigned long interval_msec,
6177 unsigned long timeout_msec)
6179 unsigned long timeout;
6180 u32 tmp;
6182 tmp = ioread32(reg);
6184 /* Calculate timeout _after_ the first read to make sure
6185 * preceding writes reach the controller before starting to
6186 * eat away the timeout.
6188 timeout = jiffies + (timeout_msec * HZ) / 1000;
6190 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
6191 msleep(interval_msec);
6192 tmp = ioread32(reg);
6195 return tmp;
6199 * Dummy port_ops
6201 static void ata_dummy_noret(struct ata_port *ap) { }
6202 static int ata_dummy_ret0(struct ata_port *ap) { return 0; }
6203 static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
6205 static u8 ata_dummy_check_status(struct ata_port *ap)
6207 return ATA_DRDY;
6210 static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6212 return AC_ERR_SYSTEM;
6215 const struct ata_port_operations ata_dummy_port_ops = {
6216 .port_disable = ata_port_disable,
6217 .check_status = ata_dummy_check_status,
6218 .check_altstatus = ata_dummy_check_status,
6219 .dev_select = ata_noop_dev_select,
6220 .qc_prep = ata_noop_qc_prep,
6221 .qc_issue = ata_dummy_qc_issue,
6222 .freeze = ata_dummy_noret,
6223 .thaw = ata_dummy_noret,
6224 .error_handler = ata_dummy_noret,
6225 .post_internal_cmd = ata_dummy_qc_noret,
6226 .irq_clear = ata_dummy_noret,
6227 .port_start = ata_dummy_ret0,
6228 .port_stop = ata_dummy_noret,
6232 * libata is essentially a library of internal helper functions for
6233 * low-level ATA host controller drivers. As such, the API/ABI is
6234 * likely to change as new drivers are added and updated.
6235 * Do not depend on ABI/API stability.
6238 EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
6239 EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
6240 EXPORT_SYMBOL_GPL(sata_deb_timing_long);
6241 EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
6242 EXPORT_SYMBOL_GPL(ata_std_bios_param);
6243 EXPORT_SYMBOL_GPL(ata_std_ports);
6244 EXPORT_SYMBOL_GPL(ata_host_init);
6245 EXPORT_SYMBOL_GPL(ata_device_add);
6246 EXPORT_SYMBOL_GPL(ata_host_detach);
6247 EXPORT_SYMBOL_GPL(ata_sg_init);
6248 EXPORT_SYMBOL_GPL(ata_sg_init_one);
6249 EXPORT_SYMBOL_GPL(ata_hsm_move);
6250 EXPORT_SYMBOL_GPL(ata_qc_complete);
6251 EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
6252 EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
6253 EXPORT_SYMBOL_GPL(ata_tf_load);
6254 EXPORT_SYMBOL_GPL(ata_tf_read);
6255 EXPORT_SYMBOL_GPL(ata_noop_dev_select);
6256 EXPORT_SYMBOL_GPL(ata_std_dev_select);
6257 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6258 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6259 EXPORT_SYMBOL_GPL(ata_check_status);
6260 EXPORT_SYMBOL_GPL(ata_altstatus);
6261 EXPORT_SYMBOL_GPL(ata_exec_command);
6262 EXPORT_SYMBOL_GPL(ata_port_start);
6263 EXPORT_SYMBOL_GPL(ata_interrupt);
6264 EXPORT_SYMBOL_GPL(ata_data_xfer);
6265 EXPORT_SYMBOL_GPL(ata_data_xfer_noirq);
6266 EXPORT_SYMBOL_GPL(ata_qc_prep);
6267 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
6268 EXPORT_SYMBOL_GPL(ata_bmdma_setup);
6269 EXPORT_SYMBOL_GPL(ata_bmdma_start);
6270 EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
6271 EXPORT_SYMBOL_GPL(ata_bmdma_status);
6272 EXPORT_SYMBOL_GPL(ata_bmdma_stop);
6273 EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
6274 EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
6275 EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
6276 EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
6277 EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
6278 EXPORT_SYMBOL_GPL(ata_port_probe);
6279 EXPORT_SYMBOL_GPL(sata_set_spd);
6280 EXPORT_SYMBOL_GPL(sata_phy_debounce);
6281 EXPORT_SYMBOL_GPL(sata_phy_resume);
6282 EXPORT_SYMBOL_GPL(sata_phy_reset);
6283 EXPORT_SYMBOL_GPL(__sata_phy_reset);
6284 EXPORT_SYMBOL_GPL(ata_bus_reset);
6285 EXPORT_SYMBOL_GPL(ata_std_prereset);
6286 EXPORT_SYMBOL_GPL(ata_std_softreset);
6287 EXPORT_SYMBOL_GPL(sata_port_hardreset);
6288 EXPORT_SYMBOL_GPL(sata_std_hardreset);
6289 EXPORT_SYMBOL_GPL(ata_std_postreset);
6290 EXPORT_SYMBOL_GPL(ata_dev_classify);
6291 EXPORT_SYMBOL_GPL(ata_dev_pair);
6292 EXPORT_SYMBOL_GPL(ata_port_disable);
6293 EXPORT_SYMBOL_GPL(ata_ratelimit);
6294 EXPORT_SYMBOL_GPL(ata_wait_register);
6295 EXPORT_SYMBOL_GPL(ata_busy_sleep);
6296 EXPORT_SYMBOL_GPL(ata_port_queue_task);
6297 EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
6298 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
6299 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
6300 EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
6301 EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
6302 EXPORT_SYMBOL_GPL(ata_host_intr);
6303 EXPORT_SYMBOL_GPL(sata_scr_valid);
6304 EXPORT_SYMBOL_GPL(sata_scr_read);
6305 EXPORT_SYMBOL_GPL(sata_scr_write);
6306 EXPORT_SYMBOL_GPL(sata_scr_write_flush);
6307 EXPORT_SYMBOL_GPL(ata_port_online);
6308 EXPORT_SYMBOL_GPL(ata_port_offline);
6309 EXPORT_SYMBOL_GPL(ata_host_suspend);
6310 EXPORT_SYMBOL_GPL(ata_host_resume);
6311 EXPORT_SYMBOL_GPL(ata_id_string);
6312 EXPORT_SYMBOL_GPL(ata_id_c_string);
6313 EXPORT_SYMBOL_GPL(ata_device_blacklisted);
6314 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6316 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
6317 EXPORT_SYMBOL_GPL(ata_timing_compute);
6318 EXPORT_SYMBOL_GPL(ata_timing_merge);
6320 #ifdef CONFIG_PCI
6321 EXPORT_SYMBOL_GPL(pci_test_config_bits);
6322 EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
6323 EXPORT_SYMBOL_GPL(ata_pci_init_one);
6324 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6325 EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6326 EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
6327 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6328 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6329 EXPORT_SYMBOL_GPL(ata_pci_default_filter);
6330 EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
6331 #endif /* CONFIG_PCI */
6333 EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
6334 EXPORT_SYMBOL_GPL(ata_scsi_device_resume);
6336 EXPORT_SYMBOL_GPL(ata_eng_timeout);
6337 EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
6338 EXPORT_SYMBOL_GPL(ata_port_abort);
6339 EXPORT_SYMBOL_GPL(ata_port_freeze);
6340 EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
6341 EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
6342 EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
6343 EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
6344 EXPORT_SYMBOL_GPL(ata_do_eh);
6345 EXPORT_SYMBOL_GPL(ata_irq_on);
6346 EXPORT_SYMBOL_GPL(ata_dummy_irq_on);
6347 EXPORT_SYMBOL_GPL(ata_irq_ack);
6348 EXPORT_SYMBOL_GPL(ata_dummy_irq_ack);
6349 EXPORT_SYMBOL_GPL(ata_dev_try_classify);