2 * libata-core.c - helper library for ATA
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
35 #include <linux/kernel.h>
36 #include <linux/module.h>
37 #include <linux/pci.h>
38 #include <linux/init.h>
39 #include <linux/list.h>
41 #include <linux/highmem.h>
42 #include <linux/spinlock.h>
43 #include <linux/blkdev.h>
44 #include <linux/delay.h>
45 #include <linux/timer.h>
46 #include <linux/interrupt.h>
47 #include <linux/completion.h>
48 #include <linux/suspend.h>
49 #include <linux/workqueue.h>
50 #include <linux/jiffies.h>
51 #include <linux/scatterlist.h>
52 #include <scsi/scsi.h>
53 #include <scsi/scsi_cmnd.h>
54 #include <scsi/scsi_host.h>
55 #include <linux/libata.h>
57 #include <asm/semaphore.h>
58 #include <asm/byteorder.h>
62 /* debounce timing parameters in msecs { interval, duration, timeout } */
63 const unsigned long sata_deb_timing_normal
[] = { 5, 100, 2000 };
64 const unsigned long sata_deb_timing_hotplug
[] = { 25, 500, 2000 };
65 const unsigned long sata_deb_timing_long
[] = { 100, 2000, 5000 };
67 static unsigned int ata_dev_init_params(struct ata_device
*dev
,
68 u16 heads
, u16 sectors
);
69 static unsigned int ata_dev_set_xfermode(struct ata_device
*dev
);
70 static void ata_dev_xfermask(struct ata_device
*dev
);
72 static unsigned int ata_unique_id
= 1;
73 static struct workqueue_struct
*ata_wq
;
75 struct workqueue_struct
*ata_aux_wq
;
77 int atapi_enabled
= 1;
78 module_param(atapi_enabled
, int, 0444);
79 MODULE_PARM_DESC(atapi_enabled
, "Enable discovery of ATAPI devices (0=off, 1=on)");
82 module_param(atapi_dmadir
, int, 0444);
83 MODULE_PARM_DESC(atapi_dmadir
, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
86 module_param_named(fua
, libata_fua
, int, 0444);
87 MODULE_PARM_DESC(fua
, "FUA support (0=off, 1=on)");
89 static int ata_probe_timeout
= ATA_TMOUT_INTERNAL
/ HZ
;
90 module_param(ata_probe_timeout
, int, 0444);
91 MODULE_PARM_DESC(ata_probe_timeout
, "Set ATA probing timeout (seconds)");
93 MODULE_AUTHOR("Jeff Garzik");
94 MODULE_DESCRIPTION("Library module for ATA devices");
95 MODULE_LICENSE("GPL");
96 MODULE_VERSION(DRV_VERSION
);
100 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
101 * @tf: Taskfile to convert
102 * @fis: Buffer into which data will output
103 * @pmp: Port multiplier port
105 * Converts a standard ATA taskfile to a Serial ATA
106 * FIS structure (Register - Host to Device).
109 * Inherited from caller.
112 void ata_tf_to_fis(const struct ata_taskfile
*tf
, u8
*fis
, u8 pmp
)
114 fis
[0] = 0x27; /* Register - Host to Device FIS */
115 fis
[1] = (pmp
& 0xf) | (1 << 7); /* Port multiplier number,
116 bit 7 indicates Command FIS */
117 fis
[2] = tf
->command
;
118 fis
[3] = tf
->feature
;
125 fis
[8] = tf
->hob_lbal
;
126 fis
[9] = tf
->hob_lbam
;
127 fis
[10] = tf
->hob_lbah
;
128 fis
[11] = tf
->hob_feature
;
131 fis
[13] = tf
->hob_nsect
;
142 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
143 * @fis: Buffer from which data will be input
144 * @tf: Taskfile to output
146 * Converts a serial ATA FIS structure to a standard ATA taskfile.
149 * Inherited from caller.
152 void ata_tf_from_fis(const u8
*fis
, struct ata_taskfile
*tf
)
154 tf
->command
= fis
[2]; /* status */
155 tf
->feature
= fis
[3]; /* error */
162 tf
->hob_lbal
= fis
[8];
163 tf
->hob_lbam
= fis
[9];
164 tf
->hob_lbah
= fis
[10];
167 tf
->hob_nsect
= fis
[13];
170 static const u8 ata_rw_cmds
[] = {
174 ATA_CMD_READ_MULTI_EXT
,
175 ATA_CMD_WRITE_MULTI_EXT
,
179 ATA_CMD_WRITE_MULTI_FUA_EXT
,
183 ATA_CMD_PIO_READ_EXT
,
184 ATA_CMD_PIO_WRITE_EXT
,
197 ATA_CMD_WRITE_FUA_EXT
201 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
202 * @tf: command to examine and configure
203 * @dev: device tf belongs to
205 * Examine the device configuration and tf->flags to calculate
206 * the proper read/write commands and protocol to use.
211 static int ata_rwcmd_protocol(struct ata_taskfile
*tf
, struct ata_device
*dev
)
215 int index
, fua
, lba48
, write
;
217 fua
= (tf
->flags
& ATA_TFLAG_FUA
) ? 4 : 0;
218 lba48
= (tf
->flags
& ATA_TFLAG_LBA48
) ? 2 : 0;
219 write
= (tf
->flags
& ATA_TFLAG_WRITE
) ? 1 : 0;
221 if (dev
->flags
& ATA_DFLAG_PIO
) {
222 tf
->protocol
= ATA_PROT_PIO
;
223 index
= dev
->multi_count
? 0 : 8;
224 } else if (lba48
&& (dev
->ap
->flags
& ATA_FLAG_PIO_LBA48
)) {
225 /* Unable to use DMA due to host limitation */
226 tf
->protocol
= ATA_PROT_PIO
;
227 index
= dev
->multi_count
? 0 : 8;
229 tf
->protocol
= ATA_PROT_DMA
;
233 cmd
= ata_rw_cmds
[index
+ fua
+ lba48
+ write
];
242 * ata_tf_read_block - Read block address from ATA taskfile
243 * @tf: ATA taskfile of interest
244 * @dev: ATA device @tf belongs to
249 * Read block address from @tf. This function can handle all
250 * three address formats - LBA, LBA48 and CHS. tf->protocol and
251 * flags select the address format to use.
254 * Block address read from @tf.
256 u64
ata_tf_read_block(struct ata_taskfile
*tf
, struct ata_device
*dev
)
260 if (tf
->flags
& ATA_TFLAG_LBA
) {
261 if (tf
->flags
& ATA_TFLAG_LBA48
) {
262 block
|= (u64
)tf
->hob_lbah
<< 40;
263 block
|= (u64
)tf
->hob_lbam
<< 32;
264 block
|= tf
->hob_lbal
<< 24;
266 block
|= (tf
->device
& 0xf) << 24;
268 block
|= tf
->lbah
<< 16;
269 block
|= tf
->lbam
<< 8;
274 cyl
= tf
->lbam
| (tf
->lbah
<< 8);
275 head
= tf
->device
& 0xf;
278 block
= (cyl
* dev
->heads
+ head
) * dev
->sectors
+ sect
;
285 * ata_build_rw_tf - Build ATA taskfile for given read/write request
286 * @tf: Target ATA taskfile
287 * @dev: ATA device @tf belongs to
288 * @block: Block address
289 * @n_block: Number of blocks
290 * @tf_flags: RW/FUA etc...
296 * Build ATA taskfile @tf for read/write request described by
297 * @block, @n_block, @tf_flags and @tag on @dev.
301 * 0 on success, -ERANGE if the request is too large for @dev,
302 * -EINVAL if the request is invalid.
304 int ata_build_rw_tf(struct ata_taskfile
*tf
, struct ata_device
*dev
,
305 u64 block
, u32 n_block
, unsigned int tf_flags
,
308 tf
->flags
|= ATA_TFLAG_ISADDR
| ATA_TFLAG_DEVICE
;
309 tf
->flags
|= tf_flags
;
311 if ((dev
->flags
& (ATA_DFLAG_PIO
| ATA_DFLAG_NCQ_OFF
|
312 ATA_DFLAG_NCQ
)) == ATA_DFLAG_NCQ
&&
313 likely(tag
!= ATA_TAG_INTERNAL
)) {
315 if (!lba_48_ok(block
, n_block
))
318 tf
->protocol
= ATA_PROT_NCQ
;
319 tf
->flags
|= ATA_TFLAG_LBA
| ATA_TFLAG_LBA48
;
321 if (tf
->flags
& ATA_TFLAG_WRITE
)
322 tf
->command
= ATA_CMD_FPDMA_WRITE
;
324 tf
->command
= ATA_CMD_FPDMA_READ
;
326 tf
->nsect
= tag
<< 3;
327 tf
->hob_feature
= (n_block
>> 8) & 0xff;
328 tf
->feature
= n_block
& 0xff;
330 tf
->hob_lbah
= (block
>> 40) & 0xff;
331 tf
->hob_lbam
= (block
>> 32) & 0xff;
332 tf
->hob_lbal
= (block
>> 24) & 0xff;
333 tf
->lbah
= (block
>> 16) & 0xff;
334 tf
->lbam
= (block
>> 8) & 0xff;
335 tf
->lbal
= block
& 0xff;
338 if (tf
->flags
& ATA_TFLAG_FUA
)
339 tf
->device
|= 1 << 7;
340 } else if (dev
->flags
& ATA_DFLAG_LBA
) {
341 tf
->flags
|= ATA_TFLAG_LBA
;
343 if (lba_28_ok(block
, n_block
)) {
345 tf
->device
|= (block
>> 24) & 0xf;
346 } else if (lba_48_ok(block
, n_block
)) {
347 if (!(dev
->flags
& ATA_DFLAG_LBA48
))
351 tf
->flags
|= ATA_TFLAG_LBA48
;
353 tf
->hob_nsect
= (n_block
>> 8) & 0xff;
355 tf
->hob_lbah
= (block
>> 40) & 0xff;
356 tf
->hob_lbam
= (block
>> 32) & 0xff;
357 tf
->hob_lbal
= (block
>> 24) & 0xff;
359 /* request too large even for LBA48 */
362 if (unlikely(ata_rwcmd_protocol(tf
, dev
) < 0))
365 tf
->nsect
= n_block
& 0xff;
367 tf
->lbah
= (block
>> 16) & 0xff;
368 tf
->lbam
= (block
>> 8) & 0xff;
369 tf
->lbal
= block
& 0xff;
371 tf
->device
|= ATA_LBA
;
374 u32 sect
, head
, cyl
, track
;
376 /* The request -may- be too large for CHS addressing. */
377 if (!lba_28_ok(block
, n_block
))
380 if (unlikely(ata_rwcmd_protocol(tf
, dev
) < 0))
383 /* Convert LBA to CHS */
384 track
= (u32
)block
/ dev
->sectors
;
385 cyl
= track
/ dev
->heads
;
386 head
= track
% dev
->heads
;
387 sect
= (u32
)block
% dev
->sectors
+ 1;
389 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
390 (u32
)block
, track
, cyl
, head
, sect
);
392 /* Check whether the converted CHS can fit.
396 if ((cyl
>> 16) || (head
>> 4) || (sect
>> 8) || (!sect
))
399 tf
->nsect
= n_block
& 0xff; /* Sector count 0 means 256 sectors */
410 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
411 * @pio_mask: pio_mask
412 * @mwdma_mask: mwdma_mask
413 * @udma_mask: udma_mask
415 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
416 * unsigned int xfer_mask.
424 static unsigned int ata_pack_xfermask(unsigned int pio_mask
,
425 unsigned int mwdma_mask
,
426 unsigned int udma_mask
)
428 return ((pio_mask
<< ATA_SHIFT_PIO
) & ATA_MASK_PIO
) |
429 ((mwdma_mask
<< ATA_SHIFT_MWDMA
) & ATA_MASK_MWDMA
) |
430 ((udma_mask
<< ATA_SHIFT_UDMA
) & ATA_MASK_UDMA
);
434 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
435 * @xfer_mask: xfer_mask to unpack
436 * @pio_mask: resulting pio_mask
437 * @mwdma_mask: resulting mwdma_mask
438 * @udma_mask: resulting udma_mask
440 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
441 * Any NULL distination masks will be ignored.
443 static void ata_unpack_xfermask(unsigned int xfer_mask
,
444 unsigned int *pio_mask
,
445 unsigned int *mwdma_mask
,
446 unsigned int *udma_mask
)
449 *pio_mask
= (xfer_mask
& ATA_MASK_PIO
) >> ATA_SHIFT_PIO
;
451 *mwdma_mask
= (xfer_mask
& ATA_MASK_MWDMA
) >> ATA_SHIFT_MWDMA
;
453 *udma_mask
= (xfer_mask
& ATA_MASK_UDMA
) >> ATA_SHIFT_UDMA
;
456 static const struct ata_xfer_ent
{
460 { ATA_SHIFT_PIO
, ATA_BITS_PIO
, XFER_PIO_0
},
461 { ATA_SHIFT_MWDMA
, ATA_BITS_MWDMA
, XFER_MW_DMA_0
},
462 { ATA_SHIFT_UDMA
, ATA_BITS_UDMA
, XFER_UDMA_0
},
467 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
468 * @xfer_mask: xfer_mask of interest
470 * Return matching XFER_* value for @xfer_mask. Only the highest
471 * bit of @xfer_mask is considered.
477 * Matching XFER_* value, 0 if no match found.
479 static u8
ata_xfer_mask2mode(unsigned int xfer_mask
)
481 int highbit
= fls(xfer_mask
) - 1;
482 const struct ata_xfer_ent
*ent
;
484 for (ent
= ata_xfer_tbl
; ent
->shift
>= 0; ent
++)
485 if (highbit
>= ent
->shift
&& highbit
< ent
->shift
+ ent
->bits
)
486 return ent
->base
+ highbit
- ent
->shift
;
491 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
492 * @xfer_mode: XFER_* of interest
494 * Return matching xfer_mask for @xfer_mode.
500 * Matching xfer_mask, 0 if no match found.
502 static unsigned int ata_xfer_mode2mask(u8 xfer_mode
)
504 const struct ata_xfer_ent
*ent
;
506 for (ent
= ata_xfer_tbl
; ent
->shift
>= 0; ent
++)
507 if (xfer_mode
>= ent
->base
&& xfer_mode
< ent
->base
+ ent
->bits
)
508 return 1 << (ent
->shift
+ xfer_mode
- ent
->base
);
513 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
514 * @xfer_mode: XFER_* of interest
516 * Return matching xfer_shift for @xfer_mode.
522 * Matching xfer_shift, -1 if no match found.
524 static int ata_xfer_mode2shift(unsigned int xfer_mode
)
526 const struct ata_xfer_ent
*ent
;
528 for (ent
= ata_xfer_tbl
; ent
->shift
>= 0; ent
++)
529 if (xfer_mode
>= ent
->base
&& xfer_mode
< ent
->base
+ ent
->bits
)
535 * ata_mode_string - convert xfer_mask to string
536 * @xfer_mask: mask of bits supported; only highest bit counts.
538 * Determine string which represents the highest speed
539 * (highest bit in @modemask).
545 * Constant C string representing highest speed listed in
546 * @mode_mask, or the constant C string "<n/a>".
548 static const char *ata_mode_string(unsigned int xfer_mask
)
550 static const char * const xfer_mode_str
[] = {
574 highbit
= fls(xfer_mask
) - 1;
575 if (highbit
>= 0 && highbit
< ARRAY_SIZE(xfer_mode_str
))
576 return xfer_mode_str
[highbit
];
580 static const char *sata_spd_string(unsigned int spd
)
582 static const char * const spd_str
[] = {
587 if (spd
== 0 || (spd
- 1) >= ARRAY_SIZE(spd_str
))
589 return spd_str
[spd
- 1];
592 void ata_dev_disable(struct ata_device
*dev
)
594 if (ata_dev_enabled(dev
) && ata_msg_drv(dev
->ap
)) {
595 ata_dev_printk(dev
, KERN_WARNING
, "disabled\n");
601 * ata_pio_devchk - PATA device presence detection
602 * @ap: ATA channel to examine
603 * @device: Device to examine (starting at zero)
605 * This technique was originally described in
606 * Hale Landis's ATADRVR (www.ata-atapi.com), and
607 * later found its way into the ATA/ATAPI spec.
609 * Write a pattern to the ATA shadow registers,
610 * and if a device is present, it will respond by
611 * correctly storing and echoing back the
612 * ATA shadow register contents.
618 static unsigned int ata_pio_devchk(struct ata_port
*ap
,
621 struct ata_ioports
*ioaddr
= &ap
->ioaddr
;
624 ap
->ops
->dev_select(ap
, device
);
626 outb(0x55, ioaddr
->nsect_addr
);
627 outb(0xaa, ioaddr
->lbal_addr
);
629 outb(0xaa, ioaddr
->nsect_addr
);
630 outb(0x55, ioaddr
->lbal_addr
);
632 outb(0x55, ioaddr
->nsect_addr
);
633 outb(0xaa, ioaddr
->lbal_addr
);
635 nsect
= inb(ioaddr
->nsect_addr
);
636 lbal
= inb(ioaddr
->lbal_addr
);
638 if ((nsect
== 0x55) && (lbal
== 0xaa))
639 return 1; /* we found a device */
641 return 0; /* nothing found */
645 * ata_mmio_devchk - PATA device presence detection
646 * @ap: ATA channel to examine
647 * @device: Device to examine (starting at zero)
649 * This technique was originally described in
650 * Hale Landis's ATADRVR (www.ata-atapi.com), and
651 * later found its way into the ATA/ATAPI spec.
653 * Write a pattern to the ATA shadow registers,
654 * and if a device is present, it will respond by
655 * correctly storing and echoing back the
656 * ATA shadow register contents.
662 static unsigned int ata_mmio_devchk(struct ata_port
*ap
,
665 struct ata_ioports
*ioaddr
= &ap
->ioaddr
;
668 ap
->ops
->dev_select(ap
, device
);
670 writeb(0x55, (void __iomem
*) ioaddr
->nsect_addr
);
671 writeb(0xaa, (void __iomem
*) ioaddr
->lbal_addr
);
673 writeb(0xaa, (void __iomem
*) ioaddr
->nsect_addr
);
674 writeb(0x55, (void __iomem
*) ioaddr
->lbal_addr
);
676 writeb(0x55, (void __iomem
*) ioaddr
->nsect_addr
);
677 writeb(0xaa, (void __iomem
*) ioaddr
->lbal_addr
);
679 nsect
= readb((void __iomem
*) ioaddr
->nsect_addr
);
680 lbal
= readb((void __iomem
*) ioaddr
->lbal_addr
);
682 if ((nsect
== 0x55) && (lbal
== 0xaa))
683 return 1; /* we found a device */
685 return 0; /* nothing found */
689 * ata_devchk - PATA device presence detection
690 * @ap: ATA channel to examine
691 * @device: Device to examine (starting at zero)
693 * Dispatch ATA device presence detection, depending
694 * on whether we are using PIO or MMIO to talk to the
695 * ATA shadow registers.
701 static unsigned int ata_devchk(struct ata_port
*ap
,
704 if (ap
->flags
& ATA_FLAG_MMIO
)
705 return ata_mmio_devchk(ap
, device
);
706 return ata_pio_devchk(ap
, device
);
710 * ata_dev_classify - determine device type based on ATA-spec signature
711 * @tf: ATA taskfile register set for device to be identified
713 * Determine from taskfile register contents whether a device is
714 * ATA or ATAPI, as per "Signature and persistence" section
715 * of ATA/PI spec (volume 1, sect 5.14).
721 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
722 * the event of failure.
725 unsigned int ata_dev_classify(const struct ata_taskfile
*tf
)
727 /* Apple's open source Darwin code hints that some devices only
728 * put a proper signature into the LBA mid/high registers,
729 * So, we only check those. It's sufficient for uniqueness.
732 if (((tf
->lbam
== 0) && (tf
->lbah
== 0)) ||
733 ((tf
->lbam
== 0x3c) && (tf
->lbah
== 0xc3))) {
734 DPRINTK("found ATA device by sig\n");
738 if (((tf
->lbam
== 0x14) && (tf
->lbah
== 0xeb)) ||
739 ((tf
->lbam
== 0x69) && (tf
->lbah
== 0x96))) {
740 DPRINTK("found ATAPI device by sig\n");
741 return ATA_DEV_ATAPI
;
744 DPRINTK("unknown device\n");
745 return ATA_DEV_UNKNOWN
;
749 * ata_dev_try_classify - Parse returned ATA device signature
750 * @ap: ATA channel to examine
751 * @device: Device to examine (starting at zero)
752 * @r_err: Value of error register on completion
754 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
755 * an ATA/ATAPI-defined set of values is placed in the ATA
756 * shadow registers, indicating the results of device detection
759 * Select the ATA device, and read the values from the ATA shadow
760 * registers. Then parse according to the Error register value,
761 * and the spec-defined values examined by ata_dev_classify().
767 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
771 ata_dev_try_classify(struct ata_port
*ap
, unsigned int device
, u8
*r_err
)
773 struct ata_taskfile tf
;
777 ap
->ops
->dev_select(ap
, device
);
779 memset(&tf
, 0, sizeof(tf
));
781 ap
->ops
->tf_read(ap
, &tf
);
786 /* see if device passed diags: if master then continue and warn later */
787 if (err
== 0 && device
== 0)
788 /* diagnostic fail : do nothing _YET_ */
789 ap
->device
[device
].horkage
|= ATA_HORKAGE_DIAGNOSTIC
;
792 else if ((device
== 0) && (err
== 0x81))
797 /* determine if device is ATA or ATAPI */
798 class = ata_dev_classify(&tf
);
800 if (class == ATA_DEV_UNKNOWN
)
802 if ((class == ATA_DEV_ATA
) && (ata_chk_status(ap
) == 0))
808 * ata_id_string - Convert IDENTIFY DEVICE page into string
809 * @id: IDENTIFY DEVICE results we will examine
810 * @s: string into which data is output
811 * @ofs: offset into identify device page
812 * @len: length of string to return. must be an even number.
814 * The strings in the IDENTIFY DEVICE page are broken up into
815 * 16-bit chunks. Run through the string, and output each
816 * 8-bit chunk linearly, regardless of platform.
822 void ata_id_string(const u16
*id
, unsigned char *s
,
823 unsigned int ofs
, unsigned int len
)
842 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
843 * @id: IDENTIFY DEVICE results we will examine
844 * @s: string into which data is output
845 * @ofs: offset into identify device page
846 * @len: length of string to return. must be an odd number.
848 * This function is identical to ata_id_string except that it
849 * trims trailing spaces and terminates the resulting string with
850 * null. @len must be actual maximum length (even number) + 1.
855 void ata_id_c_string(const u16
*id
, unsigned char *s
,
856 unsigned int ofs
, unsigned int len
)
862 ata_id_string(id
, s
, ofs
, len
- 1);
864 p
= s
+ strnlen(s
, len
- 1);
865 while (p
> s
&& p
[-1] == ' ')
870 static u64
ata_id_n_sectors(const u16
*id
)
872 if (ata_id_has_lba(id
)) {
873 if (ata_id_has_lba48(id
))
874 return ata_id_u64(id
, 100);
876 return ata_id_u32(id
, 60);
878 if (ata_id_current_chs_valid(id
))
879 return ata_id_u32(id
, 57);
881 return id
[1] * id
[3] * id
[6];
886 * ata_noop_dev_select - Select device 0/1 on ATA bus
887 * @ap: ATA channel to manipulate
888 * @device: ATA device (numbered from zero) to select
890 * This function performs no actual function.
892 * May be used as the dev_select() entry in ata_port_operations.
897 void ata_noop_dev_select (struct ata_port
*ap
, unsigned int device
)
903 * ata_std_dev_select - Select device 0/1 on ATA bus
904 * @ap: ATA channel to manipulate
905 * @device: ATA device (numbered from zero) to select
907 * Use the method defined in the ATA specification to
908 * make either device 0, or device 1, active on the
909 * ATA channel. Works with both PIO and MMIO.
911 * May be used as the dev_select() entry in ata_port_operations.
917 void ata_std_dev_select (struct ata_port
*ap
, unsigned int device
)
922 tmp
= ATA_DEVICE_OBS
;
924 tmp
= ATA_DEVICE_OBS
| ATA_DEV1
;
926 if (ap
->flags
& ATA_FLAG_MMIO
) {
927 writeb(tmp
, (void __iomem
*) ap
->ioaddr
.device_addr
);
929 outb(tmp
, ap
->ioaddr
.device_addr
);
931 ata_pause(ap
); /* needed; also flushes, for mmio */
935 * ata_dev_select - Select device 0/1 on ATA bus
936 * @ap: ATA channel to manipulate
937 * @device: ATA device (numbered from zero) to select
938 * @wait: non-zero to wait for Status register BSY bit to clear
939 * @can_sleep: non-zero if context allows sleeping
941 * Use the method defined in the ATA specification to
942 * make either device 0, or device 1, active on the
945 * This is a high-level version of ata_std_dev_select(),
946 * which additionally provides the services of inserting
947 * the proper pauses and status polling, where needed.
953 void ata_dev_select(struct ata_port
*ap
, unsigned int device
,
954 unsigned int wait
, unsigned int can_sleep
)
956 if (ata_msg_probe(ap
))
957 ata_port_printk(ap
, KERN_INFO
, "ata_dev_select: ENTER, ata%u: "
958 "device %u, wait %u\n", ap
->id
, device
, wait
);
963 ap
->ops
->dev_select(ap
, device
);
966 if (can_sleep
&& ap
->device
[device
].class == ATA_DEV_ATAPI
)
973 * ata_dump_id - IDENTIFY DEVICE info debugging output
974 * @id: IDENTIFY DEVICE page to dump
976 * Dump selected 16-bit words from the given IDENTIFY DEVICE
983 static inline void ata_dump_id(const u16
*id
)
985 DPRINTK("49==0x%04x "
995 DPRINTK("80==0x%04x "
1005 DPRINTK("88==0x%04x "
1012 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1013 * @id: IDENTIFY data to compute xfer mask from
1015 * Compute the xfermask for this device. This is not as trivial
1016 * as it seems if we must consider early devices correctly.
1018 * FIXME: pre IDE drive timing (do we care ?).
1026 static unsigned int ata_id_xfermask(const u16
*id
)
1028 unsigned int pio_mask
, mwdma_mask
, udma_mask
;
1030 /* Usual case. Word 53 indicates word 64 is valid */
1031 if (id
[ATA_ID_FIELD_VALID
] & (1 << 1)) {
1032 pio_mask
= id
[ATA_ID_PIO_MODES
] & 0x03;
1036 /* If word 64 isn't valid then Word 51 high byte holds
1037 * the PIO timing number for the maximum. Turn it into
1040 u8 mode
= id
[ATA_ID_OLD_PIO_MODES
] & 0xFF;
1041 if (mode
< 5) /* Valid PIO range */
1042 pio_mask
= (2 << mode
) - 1;
1046 /* But wait.. there's more. Design your standards by
1047 * committee and you too can get a free iordy field to
1048 * process. However its the speeds not the modes that
1049 * are supported... Note drivers using the timing API
1050 * will get this right anyway
1054 mwdma_mask
= id
[ATA_ID_MWDMA_MODES
] & 0x07;
1056 if (ata_id_is_cfa(id
)) {
1058 * Process compact flash extended modes
1060 int pio
= id
[163] & 0x7;
1061 int dma
= (id
[163] >> 3) & 7;
1064 pio_mask
|= (1 << 5);
1066 pio_mask
|= (1 << 6);
1068 mwdma_mask
|= (1 << 3);
1070 mwdma_mask
|= (1 << 4);
1074 if (id
[ATA_ID_FIELD_VALID
] & (1 << 2))
1075 udma_mask
= id
[ATA_ID_UDMA_MODES
] & 0xff;
1077 return ata_pack_xfermask(pio_mask
, mwdma_mask
, udma_mask
);
1081 * ata_port_queue_task - Queue port_task
1082 * @ap: The ata_port to queue port_task for
1083 * @fn: workqueue function to be scheduled
1084 * @data: data value to pass to workqueue function
1085 * @delay: delay time for workqueue function
1087 * Schedule @fn(@data) for execution after @delay jiffies using
1088 * port_task. There is one port_task per port and it's the
1089 * user(low level driver)'s responsibility to make sure that only
1090 * one task is active at any given time.
1092 * libata core layer takes care of synchronization between
1093 * port_task and EH. ata_port_queue_task() may be ignored for EH
1097 * Inherited from caller.
1099 void ata_port_queue_task(struct ata_port
*ap
, void (*fn
)(void *), void *data
,
1100 unsigned long delay
)
1104 if (ap
->pflags
& ATA_PFLAG_FLUSH_PORT_TASK
)
1107 PREPARE_WORK(&ap
->port_task
, fn
, data
);
1110 rc
= queue_work(ata_wq
, &ap
->port_task
);
1112 rc
= queue_delayed_work(ata_wq
, &ap
->port_task
, delay
);
1114 /* rc == 0 means that another user is using port task */
1119 * ata_port_flush_task - Flush port_task
1120 * @ap: The ata_port to flush port_task for
1122 * After this function completes, port_task is guranteed not to
1123 * be running or scheduled.
1126 * Kernel thread context (may sleep)
1128 void ata_port_flush_task(struct ata_port
*ap
)
1130 unsigned long flags
;
1134 spin_lock_irqsave(ap
->lock
, flags
);
1135 ap
->pflags
|= ATA_PFLAG_FLUSH_PORT_TASK
;
1136 spin_unlock_irqrestore(ap
->lock
, flags
);
1138 DPRINTK("flush #1\n");
1139 flush_workqueue(ata_wq
);
1142 * At this point, if a task is running, it's guaranteed to see
1143 * the FLUSH flag; thus, it will never queue pio tasks again.
1146 if (!cancel_delayed_work(&ap
->port_task
)) {
1147 if (ata_msg_ctl(ap
))
1148 ata_port_printk(ap
, KERN_DEBUG
, "%s: flush #2\n",
1150 flush_workqueue(ata_wq
);
1153 spin_lock_irqsave(ap
->lock
, flags
);
1154 ap
->pflags
&= ~ATA_PFLAG_FLUSH_PORT_TASK
;
1155 spin_unlock_irqrestore(ap
->lock
, flags
);
1157 if (ata_msg_ctl(ap
))
1158 ata_port_printk(ap
, KERN_DEBUG
, "%s: EXIT\n", __FUNCTION__
);
1161 void ata_qc_complete_internal(struct ata_queued_cmd
*qc
)
1163 struct completion
*waiting
= qc
->private_data
;
1169 * ata_exec_internal_sg - execute libata internal command
1170 * @dev: Device to which the command is sent
1171 * @tf: Taskfile registers for the command and the result
1172 * @cdb: CDB for packet command
1173 * @dma_dir: Data tranfer direction of the command
1174 * @sg: sg list for the data buffer of the command
1175 * @n_elem: Number of sg entries
1177 * Executes libata internal command with timeout. @tf contains
1178 * command on entry and result on return. Timeout and error
1179 * conditions are reported via return value. No recovery action
1180 * is taken after a command times out. It's caller's duty to
1181 * clean up after timeout.
1184 * None. Should be called with kernel context, might sleep.
1187 * Zero on success, AC_ERR_* mask on failure
1189 unsigned ata_exec_internal_sg(struct ata_device
*dev
,
1190 struct ata_taskfile
*tf
, const u8
*cdb
,
1191 int dma_dir
, struct scatterlist
*sg
,
1192 unsigned int n_elem
)
1194 struct ata_port
*ap
= dev
->ap
;
1195 u8 command
= tf
->command
;
1196 struct ata_queued_cmd
*qc
;
1197 unsigned int tag
, preempted_tag
;
1198 u32 preempted_sactive
, preempted_qc_active
;
1199 DECLARE_COMPLETION_ONSTACK(wait
);
1200 unsigned long flags
;
1201 unsigned int err_mask
;
1204 spin_lock_irqsave(ap
->lock
, flags
);
1206 /* no internal command while frozen */
1207 if (ap
->pflags
& ATA_PFLAG_FROZEN
) {
1208 spin_unlock_irqrestore(ap
->lock
, flags
);
1209 return AC_ERR_SYSTEM
;
1212 /* initialize internal qc */
1214 /* XXX: Tag 0 is used for drivers with legacy EH as some
1215 * drivers choke if any other tag is given. This breaks
1216 * ata_tag_internal() test for those drivers. Don't use new
1217 * EH stuff without converting to it.
1219 if (ap
->ops
->error_handler
)
1220 tag
= ATA_TAG_INTERNAL
;
1224 if (test_and_set_bit(tag
, &ap
->qc_allocated
))
1226 qc
= __ata_qc_from_tag(ap
, tag
);
1234 preempted_tag
= ap
->active_tag
;
1235 preempted_sactive
= ap
->sactive
;
1236 preempted_qc_active
= ap
->qc_active
;
1237 ap
->active_tag
= ATA_TAG_POISON
;
1241 /* prepare & issue qc */
1244 memcpy(qc
->cdb
, cdb
, ATAPI_CDB_LEN
);
1245 qc
->flags
|= ATA_QCFLAG_RESULT_TF
;
1246 qc
->dma_dir
= dma_dir
;
1247 if (dma_dir
!= DMA_NONE
) {
1248 unsigned int i
, buflen
= 0;
1250 for (i
= 0; i
< n_elem
; i
++)
1251 buflen
+= sg
[i
].length
;
1253 ata_sg_init(qc
, sg
, n_elem
);
1254 qc
->nsect
= buflen
/ ATA_SECT_SIZE
;
1257 qc
->private_data
= &wait
;
1258 qc
->complete_fn
= ata_qc_complete_internal
;
1262 spin_unlock_irqrestore(ap
->lock
, flags
);
1264 rc
= wait_for_completion_timeout(&wait
, ata_probe_timeout
);
1266 ata_port_flush_task(ap
);
1269 spin_lock_irqsave(ap
->lock
, flags
);
1271 /* We're racing with irq here. If we lose, the
1272 * following test prevents us from completing the qc
1273 * twice. If we win, the port is frozen and will be
1274 * cleaned up by ->post_internal_cmd().
1276 if (qc
->flags
& ATA_QCFLAG_ACTIVE
) {
1277 qc
->err_mask
|= AC_ERR_TIMEOUT
;
1279 if (ap
->ops
->error_handler
)
1280 ata_port_freeze(ap
);
1282 ata_qc_complete(qc
);
1284 if (ata_msg_warn(ap
))
1285 ata_dev_printk(dev
, KERN_WARNING
,
1286 "qc timeout (cmd 0x%x)\n", command
);
1289 spin_unlock_irqrestore(ap
->lock
, flags
);
1292 /* do post_internal_cmd */
1293 if (ap
->ops
->post_internal_cmd
)
1294 ap
->ops
->post_internal_cmd(qc
);
1296 if (qc
->flags
& ATA_QCFLAG_FAILED
&& !qc
->err_mask
) {
1297 if (ata_msg_warn(ap
))
1298 ata_dev_printk(dev
, KERN_WARNING
,
1299 "zero err_mask for failed "
1300 "internal command, assuming AC_ERR_OTHER\n");
1301 qc
->err_mask
|= AC_ERR_OTHER
;
1305 spin_lock_irqsave(ap
->lock
, flags
);
1307 *tf
= qc
->result_tf
;
1308 err_mask
= qc
->err_mask
;
1311 ap
->active_tag
= preempted_tag
;
1312 ap
->sactive
= preempted_sactive
;
1313 ap
->qc_active
= preempted_qc_active
;
1315 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1316 * Until those drivers are fixed, we detect the condition
1317 * here, fail the command with AC_ERR_SYSTEM and reenable the
1320 * Note that this doesn't change any behavior as internal
1321 * command failure results in disabling the device in the
1322 * higher layer for LLDDs without new reset/EH callbacks.
1324 * Kill the following code as soon as those drivers are fixed.
1326 if (ap
->flags
& ATA_FLAG_DISABLED
) {
1327 err_mask
|= AC_ERR_SYSTEM
;
1331 spin_unlock_irqrestore(ap
->lock
, flags
);
1337 * ata_exec_internal_sg - execute libata internal command
1338 * @dev: Device to which the command is sent
1339 * @tf: Taskfile registers for the command and the result
1340 * @cdb: CDB for packet command
1341 * @dma_dir: Data tranfer direction of the command
1342 * @buf: Data buffer of the command
1343 * @buflen: Length of data buffer
1345 * Wrapper around ata_exec_internal_sg() which takes simple
1346 * buffer instead of sg list.
1349 * None. Should be called with kernel context, might sleep.
1352 * Zero on success, AC_ERR_* mask on failure
1354 unsigned ata_exec_internal(struct ata_device
*dev
,
1355 struct ata_taskfile
*tf
, const u8
*cdb
,
1356 int dma_dir
, void *buf
, unsigned int buflen
)
1358 struct scatterlist sg
;
1360 sg_init_one(&sg
, buf
, buflen
);
1362 return ata_exec_internal_sg(dev
, tf
, cdb
, dma_dir
, &sg
, 1);
1366 * ata_do_simple_cmd - execute simple internal command
1367 * @dev: Device to which the command is sent
1368 * @cmd: Opcode to execute
1370 * Execute a 'simple' command, that only consists of the opcode
1371 * 'cmd' itself, without filling any other registers
1374 * Kernel thread context (may sleep).
1377 * Zero on success, AC_ERR_* mask on failure
1379 unsigned int ata_do_simple_cmd(struct ata_device
*dev
, u8 cmd
)
1381 struct ata_taskfile tf
;
1383 ata_tf_init(dev
, &tf
);
1386 tf
.flags
|= ATA_TFLAG_DEVICE
;
1387 tf
.protocol
= ATA_PROT_NODATA
;
1389 return ata_exec_internal(dev
, &tf
, NULL
, DMA_NONE
, NULL
, 0);
1393 * ata_pio_need_iordy - check if iordy needed
1396 * Check if the current speed of the device requires IORDY. Used
1397 * by various controllers for chip configuration.
1400 unsigned int ata_pio_need_iordy(const struct ata_device
*adev
)
1403 int speed
= adev
->pio_mode
- XFER_PIO_0
;
1410 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1412 if (adev
->id
[ATA_ID_FIELD_VALID
] & 2) { /* EIDE */
1413 pio
= adev
->id
[ATA_ID_EIDE_PIO
];
1414 /* Is the speed faster than the drive allows non IORDY ? */
1416 /* This is cycle times not frequency - watch the logic! */
1417 if (pio
> 240) /* PIO2 is 240nS per cycle */
1426 * ata_dev_read_id - Read ID data from the specified device
1427 * @dev: target device
1428 * @p_class: pointer to class of the target device (may be changed)
1429 * @flags: ATA_READID_* flags
1430 * @id: buffer to read IDENTIFY data into
1432 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1433 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1434 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1435 * for pre-ATA4 drives.
1438 * Kernel thread context (may sleep)
1441 * 0 on success, -errno otherwise.
1443 int ata_dev_read_id(struct ata_device
*dev
, unsigned int *p_class
,
1444 unsigned int flags
, u16
*id
)
1446 struct ata_port
*ap
= dev
->ap
;
1447 unsigned int class = *p_class
;
1448 struct ata_taskfile tf
;
1449 unsigned int err_mask
= 0;
1453 if (ata_msg_ctl(ap
))
1454 ata_dev_printk(dev
, KERN_DEBUG
, "%s: ENTER, host %u, dev %u\n",
1455 __FUNCTION__
, ap
->id
, dev
->devno
);
1457 ata_dev_select(ap
, dev
->devno
, 1, 1); /* select device 0/1 */
1460 ata_tf_init(dev
, &tf
);
1464 tf
.command
= ATA_CMD_ID_ATA
;
1467 tf
.command
= ATA_CMD_ID_ATAPI
;
1471 reason
= "unsupported class";
1475 tf
.protocol
= ATA_PROT_PIO
;
1476 tf
.flags
|= ATA_TFLAG_POLLING
; /* for polling presence detection */
1478 err_mask
= ata_exec_internal(dev
, &tf
, NULL
, DMA_FROM_DEVICE
,
1479 id
, sizeof(id
[0]) * ATA_ID_WORDS
);
1481 if (err_mask
& AC_ERR_NODEV_HINT
) {
1482 DPRINTK("ata%u.%d: NODEV after polling detection\n",
1483 ap
->id
, dev
->devno
);
1488 reason
= "I/O error";
1492 swap_buf_le16(id
, ATA_ID_WORDS
);
1496 reason
= "device reports illegal type";
1498 if (class == ATA_DEV_ATA
) {
1499 if (!ata_id_is_ata(id
) && !ata_id_is_cfa(id
))
1502 if (ata_id_is_ata(id
))
1506 if ((flags
& ATA_READID_POSTRESET
) && class == ATA_DEV_ATA
) {
1508 * The exact sequence expected by certain pre-ATA4 drives is:
1511 * INITIALIZE DEVICE PARAMETERS
1513 * Some drives were very specific about that exact sequence.
1515 if (ata_id_major_version(id
) < 4 || !ata_id_has_lba(id
)) {
1516 err_mask
= ata_dev_init_params(dev
, id
[3], id
[6]);
1519 reason
= "INIT_DEV_PARAMS failed";
1523 /* current CHS translation info (id[53-58]) might be
1524 * changed. reread the identify device info.
1526 flags
&= ~ATA_READID_POSTRESET
;
1536 if (ata_msg_warn(ap
))
1537 ata_dev_printk(dev
, KERN_WARNING
, "failed to IDENTIFY "
1538 "(%s, err_mask=0x%x)\n", reason
, err_mask
);
1542 static inline u8
ata_dev_knobble(struct ata_device
*dev
)
1544 return ((dev
->ap
->cbl
== ATA_CBL_SATA
) && (!ata_id_is_sata(dev
->id
)));
1547 static void ata_dev_config_ncq(struct ata_device
*dev
,
1548 char *desc
, size_t desc_sz
)
1550 struct ata_port
*ap
= dev
->ap
;
1551 int hdepth
= 0, ddepth
= ata_id_queue_depth(dev
->id
);
1553 if (!ata_id_has_ncq(dev
->id
)) {
1557 if (ata_device_blacklisted(dev
) & ATA_HORKAGE_NONCQ
) {
1558 snprintf(desc
, desc_sz
, "NCQ (not used)");
1561 if (ap
->flags
& ATA_FLAG_NCQ
) {
1562 hdepth
= min(ap
->scsi_host
->can_queue
, ATA_MAX_QUEUE
- 1);
1563 dev
->flags
|= ATA_DFLAG_NCQ
;
1566 if (hdepth
>= ddepth
)
1567 snprintf(desc
, desc_sz
, "NCQ (depth %d)", ddepth
);
1569 snprintf(desc
, desc_sz
, "NCQ (depth %d/%d)", hdepth
, ddepth
);
1572 static void ata_set_port_max_cmd_len(struct ata_port
*ap
)
1576 if (ap
->scsi_host
) {
1577 unsigned int len
= 0;
1579 for (i
= 0; i
< ATA_MAX_DEVICES
; i
++)
1580 len
= max(len
, ap
->device
[i
].cdb_len
);
1582 ap
->scsi_host
->max_cmd_len
= len
;
1587 * ata_dev_configure - Configure the specified ATA/ATAPI device
1588 * @dev: Target device to configure
1590 * Configure @dev according to @dev->id. Generic and low-level
1591 * driver specific fixups are also applied.
1594 * Kernel thread context (may sleep)
1597 * 0 on success, -errno otherwise
1599 int ata_dev_configure(struct ata_device
*dev
)
1601 struct ata_port
*ap
= dev
->ap
;
1602 int print_info
= ap
->eh_context
.i
.flags
& ATA_EHI_PRINTINFO
;
1603 const u16
*id
= dev
->id
;
1604 unsigned int xfer_mask
;
1605 char revbuf
[7]; /* XYZ-99\0 */
1608 if (!ata_dev_enabled(dev
) && ata_msg_info(ap
)) {
1609 ata_dev_printk(dev
, KERN_INFO
,
1610 "%s: ENTER/EXIT (host %u, dev %u) -- nodev\n",
1611 __FUNCTION__
, ap
->id
, dev
->devno
);
1615 if (ata_msg_probe(ap
))
1616 ata_dev_printk(dev
, KERN_DEBUG
, "%s: ENTER, host %u, dev %u\n",
1617 __FUNCTION__
, ap
->id
, dev
->devno
);
1619 /* print device capabilities */
1620 if (ata_msg_probe(ap
))
1621 ata_dev_printk(dev
, KERN_DEBUG
,
1622 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
1623 "85:%04x 86:%04x 87:%04x 88:%04x\n",
1625 id
[49], id
[82], id
[83], id
[84],
1626 id
[85], id
[86], id
[87], id
[88]);
1628 /* initialize to-be-configured parameters */
1629 dev
->flags
&= ~ATA_DFLAG_CFG_MASK
;
1630 dev
->max_sectors
= 0;
1638 * common ATA, ATAPI feature tests
1641 /* find max transfer mode; for printk only */
1642 xfer_mask
= ata_id_xfermask(id
);
1644 if (ata_msg_probe(ap
))
1647 /* ATA-specific feature tests */
1648 if (dev
->class == ATA_DEV_ATA
) {
1649 if (ata_id_is_cfa(id
)) {
1650 if (id
[162] & 1) /* CPRM may make this media unusable */
1651 ata_dev_printk(dev
, KERN_WARNING
, "ata%u: device %u supports DRM functions and may not be fully accessable.\n",
1652 ap
->id
, dev
->devno
);
1653 snprintf(revbuf
, 7, "CFA");
1656 snprintf(revbuf
, 7, "ATA-%d", ata_id_major_version(id
));
1658 dev
->n_sectors
= ata_id_n_sectors(id
);
1660 if (ata_id_has_lba(id
)) {
1661 const char *lba_desc
;
1665 dev
->flags
|= ATA_DFLAG_LBA
;
1666 if (ata_id_has_lba48(id
)) {
1667 dev
->flags
|= ATA_DFLAG_LBA48
;
1670 if (dev
->n_sectors
>= (1UL << 28) &&
1671 ata_id_has_flush_ext(id
))
1672 dev
->flags
|= ATA_DFLAG_FLUSH_EXT
;
1676 ata_dev_config_ncq(dev
, ncq_desc
, sizeof(ncq_desc
));
1678 /* print device info to dmesg */
1679 if (ata_msg_drv(ap
) && print_info
)
1680 ata_dev_printk(dev
, KERN_INFO
, "%s, "
1681 "max %s, %Lu sectors: %s %s\n",
1683 ata_mode_string(xfer_mask
),
1684 (unsigned long long)dev
->n_sectors
,
1685 lba_desc
, ncq_desc
);
1689 /* Default translation */
1690 dev
->cylinders
= id
[1];
1692 dev
->sectors
= id
[6];
1694 if (ata_id_current_chs_valid(id
)) {
1695 /* Current CHS translation is valid. */
1696 dev
->cylinders
= id
[54];
1697 dev
->heads
= id
[55];
1698 dev
->sectors
= id
[56];
1701 /* print device info to dmesg */
1702 if (ata_msg_drv(ap
) && print_info
)
1703 ata_dev_printk(dev
, KERN_INFO
, "%s, "
1704 "max %s, %Lu sectors: CHS %u/%u/%u\n",
1706 ata_mode_string(xfer_mask
),
1707 (unsigned long long)dev
->n_sectors
,
1708 dev
->cylinders
, dev
->heads
,
1712 if (dev
->id
[59] & 0x100) {
1713 dev
->multi_count
= dev
->id
[59] & 0xff;
1714 if (ata_msg_drv(ap
) && print_info
)
1715 ata_dev_printk(dev
, KERN_INFO
,
1716 "ata%u: dev %u multi count %u\n",
1717 ap
->id
, dev
->devno
, dev
->multi_count
);
1723 /* ATAPI-specific feature tests */
1724 else if (dev
->class == ATA_DEV_ATAPI
) {
1725 char *cdb_intr_string
= "";
1727 rc
= atapi_cdb_len(id
);
1728 if ((rc
< 12) || (rc
> ATAPI_CDB_LEN
)) {
1729 if (ata_msg_warn(ap
))
1730 ata_dev_printk(dev
, KERN_WARNING
,
1731 "unsupported CDB len\n");
1735 dev
->cdb_len
= (unsigned int) rc
;
1737 if (ata_id_cdb_intr(dev
->id
)) {
1738 dev
->flags
|= ATA_DFLAG_CDB_INTR
;
1739 cdb_intr_string
= ", CDB intr";
1742 /* print device info to dmesg */
1743 if (ata_msg_drv(ap
) && print_info
)
1744 ata_dev_printk(dev
, KERN_INFO
, "ATAPI, max %s%s\n",
1745 ata_mode_string(xfer_mask
),
1749 /* determine max_sectors */
1750 dev
->max_sectors
= ATA_MAX_SECTORS
;
1751 if (dev
->flags
& ATA_DFLAG_LBA48
)
1752 dev
->max_sectors
= ATA_MAX_SECTORS_LBA48
;
1754 if (dev
->horkage
& ATA_HORKAGE_DIAGNOSTIC
) {
1755 /* Let the user know. We don't want to disallow opens for
1756 rescue purposes, or in case the vendor is just a blithering
1759 ata_dev_printk(dev
, KERN_WARNING
,
1760 "Drive reports diagnostics failure. This may indicate a drive\n");
1761 ata_dev_printk(dev
, KERN_WARNING
,
1762 "fault or invalid emulation. Contact drive vendor for information.\n");
1766 ata_set_port_max_cmd_len(ap
);
1768 /* limit bridge transfers to udma5, 200 sectors */
1769 if (ata_dev_knobble(dev
)) {
1770 if (ata_msg_drv(ap
) && print_info
)
1771 ata_dev_printk(dev
, KERN_INFO
,
1772 "applying bridge limits\n");
1773 dev
->udma_mask
&= ATA_UDMA5
;
1774 dev
->max_sectors
= ATA_MAX_SECTORS
;
1777 if (ap
->ops
->dev_config
)
1778 ap
->ops
->dev_config(ap
, dev
);
1780 if (ata_msg_probe(ap
))
1781 ata_dev_printk(dev
, KERN_DEBUG
, "%s: EXIT, drv_stat = 0x%x\n",
1782 __FUNCTION__
, ata_chk_status(ap
));
1786 if (ata_msg_probe(ap
))
1787 ata_dev_printk(dev
, KERN_DEBUG
,
1788 "%s: EXIT, err\n", __FUNCTION__
);
1793 * ata_bus_probe - Reset and probe ATA bus
1796 * Master ATA bus probing function. Initiates a hardware-dependent
1797 * bus reset, then attempts to identify any devices found on
1801 * PCI/etc. bus probe sem.
1804 * Zero on success, negative errno otherwise.
1807 int ata_bus_probe(struct ata_port
*ap
)
1809 unsigned int classes
[ATA_MAX_DEVICES
];
1810 int tries
[ATA_MAX_DEVICES
];
1811 int i
, rc
, down_xfermask
;
1812 struct ata_device
*dev
;
1816 for (i
= 0; i
< ATA_MAX_DEVICES
; i
++)
1817 tries
[i
] = ATA_PROBE_MAX_TRIES
;
1822 /* reset and determine device classes */
1823 ap
->ops
->phy_reset(ap
);
1825 for (i
= 0; i
< ATA_MAX_DEVICES
; i
++) {
1826 dev
= &ap
->device
[i
];
1828 if (!(ap
->flags
& ATA_FLAG_DISABLED
) &&
1829 dev
->class != ATA_DEV_UNKNOWN
)
1830 classes
[dev
->devno
] = dev
->class;
1832 classes
[dev
->devno
] = ATA_DEV_NONE
;
1834 dev
->class = ATA_DEV_UNKNOWN
;
1839 /* after the reset the device state is PIO 0 and the controller
1840 state is undefined. Record the mode */
1842 for (i
= 0; i
< ATA_MAX_DEVICES
; i
++)
1843 ap
->device
[i
].pio_mode
= XFER_PIO_0
;
1845 /* read IDENTIFY page and configure devices */
1846 for (i
= 0; i
< ATA_MAX_DEVICES
; i
++) {
1847 dev
= &ap
->device
[i
];
1850 dev
->class = classes
[i
];
1852 if (!ata_dev_enabled(dev
))
1855 rc
= ata_dev_read_id(dev
, &dev
->class, ATA_READID_POSTRESET
,
1860 ap
->eh_context
.i
.flags
|= ATA_EHI_PRINTINFO
;
1861 rc
= ata_dev_configure(dev
);
1862 ap
->eh_context
.i
.flags
&= ~ATA_EHI_PRINTINFO
;
1867 /* configure transfer mode */
1868 rc
= ata_set_mode(ap
, &dev
);
1874 for (i
= 0; i
< ATA_MAX_DEVICES
; i
++)
1875 if (ata_dev_enabled(&ap
->device
[i
]))
1878 /* no device present, disable port */
1879 ata_port_disable(ap
);
1880 ap
->ops
->port_disable(ap
);
1887 tries
[dev
->devno
] = 0;
1890 sata_down_spd_limit(ap
);
1893 tries
[dev
->devno
]--;
1894 if (down_xfermask
&&
1895 ata_down_xfermask_limit(dev
, tries
[dev
->devno
] == 1))
1896 tries
[dev
->devno
] = 0;
1899 if (!tries
[dev
->devno
]) {
1900 ata_down_xfermask_limit(dev
, 1);
1901 ata_dev_disable(dev
);
1908 * ata_port_probe - Mark port as enabled
1909 * @ap: Port for which we indicate enablement
1911 * Modify @ap data structure such that the system
1912 * thinks that the entire port is enabled.
1914 * LOCKING: host lock, or some other form of
1918 void ata_port_probe(struct ata_port
*ap
)
1920 ap
->flags
&= ~ATA_FLAG_DISABLED
;
1924 * sata_print_link_status - Print SATA link status
1925 * @ap: SATA port to printk link status about
1927 * This function prints link speed and status of a SATA link.
1932 static void sata_print_link_status(struct ata_port
*ap
)
1934 u32 sstatus
, scontrol
, tmp
;
1936 if (sata_scr_read(ap
, SCR_STATUS
, &sstatus
))
1938 sata_scr_read(ap
, SCR_CONTROL
, &scontrol
);
1940 if (ata_port_online(ap
)) {
1941 tmp
= (sstatus
>> 4) & 0xf;
1942 ata_port_printk(ap
, KERN_INFO
,
1943 "SATA link up %s (SStatus %X SControl %X)\n",
1944 sata_spd_string(tmp
), sstatus
, scontrol
);
1946 ata_port_printk(ap
, KERN_INFO
,
1947 "SATA link down (SStatus %X SControl %X)\n",
1953 * __sata_phy_reset - Wake/reset a low-level SATA PHY
1954 * @ap: SATA port associated with target SATA PHY.
1956 * This function issues commands to standard SATA Sxxx
1957 * PHY registers, to wake up the phy (and device), and
1958 * clear any reset condition.
1961 * PCI/etc. bus probe sem.
1964 void __sata_phy_reset(struct ata_port
*ap
)
1967 unsigned long timeout
= jiffies
+ (HZ
* 5);
1969 if (ap
->flags
& ATA_FLAG_SATA_RESET
) {
1970 /* issue phy wake/reset */
1971 sata_scr_write_flush(ap
, SCR_CONTROL
, 0x301);
1972 /* Couldn't find anything in SATA I/II specs, but
1973 * AHCI-1.1 10.4.2 says at least 1 ms. */
1976 /* phy wake/clear reset */
1977 sata_scr_write_flush(ap
, SCR_CONTROL
, 0x300);
1979 /* wait for phy to become ready, if necessary */
1982 sata_scr_read(ap
, SCR_STATUS
, &sstatus
);
1983 if ((sstatus
& 0xf) != 1)
1985 } while (time_before(jiffies
, timeout
));
1987 /* print link status */
1988 sata_print_link_status(ap
);
1990 /* TODO: phy layer with polling, timeouts, etc. */
1991 if (!ata_port_offline(ap
))
1994 ata_port_disable(ap
);
1996 if (ap
->flags
& ATA_FLAG_DISABLED
)
1999 if (ata_busy_sleep(ap
, ATA_TMOUT_BOOT_QUICK
, ATA_TMOUT_BOOT
)) {
2000 ata_port_disable(ap
);
2004 ap
->cbl
= ATA_CBL_SATA
;
2008 * sata_phy_reset - Reset SATA bus.
2009 * @ap: SATA port associated with target SATA PHY.
2011 * This function resets the SATA bus, and then probes
2012 * the bus for devices.
2015 * PCI/etc. bus probe sem.
2018 void sata_phy_reset(struct ata_port
*ap
)
2020 __sata_phy_reset(ap
);
2021 if (ap
->flags
& ATA_FLAG_DISABLED
)
2027 * ata_dev_pair - return other device on cable
2030 * Obtain the other device on the same cable, or if none is
2031 * present NULL is returned
2034 struct ata_device
*ata_dev_pair(struct ata_device
*adev
)
2036 struct ata_port
*ap
= adev
->ap
;
2037 struct ata_device
*pair
= &ap
->device
[1 - adev
->devno
];
2038 if (!ata_dev_enabled(pair
))
2044 * ata_port_disable - Disable port.
2045 * @ap: Port to be disabled.
2047 * Modify @ap data structure such that the system
2048 * thinks that the entire port is disabled, and should
2049 * never attempt to probe or communicate with devices
2052 * LOCKING: host lock, or some other form of
2056 void ata_port_disable(struct ata_port
*ap
)
2058 ap
->device
[0].class = ATA_DEV_NONE
;
2059 ap
->device
[1].class = ATA_DEV_NONE
;
2060 ap
->flags
|= ATA_FLAG_DISABLED
;
2064 * sata_down_spd_limit - adjust SATA spd limit downward
2065 * @ap: Port to adjust SATA spd limit for
2067 * Adjust SATA spd limit of @ap downward. Note that this
2068 * function only adjusts the limit. The change must be applied
2069 * using sata_set_spd().
2072 * Inherited from caller.
2075 * 0 on success, negative errno on failure
2077 int sata_down_spd_limit(struct ata_port
*ap
)
2079 u32 sstatus
, spd
, mask
;
2082 rc
= sata_scr_read(ap
, SCR_STATUS
, &sstatus
);
2086 mask
= ap
->sata_spd_limit
;
2089 highbit
= fls(mask
) - 1;
2090 mask
&= ~(1 << highbit
);
2092 spd
= (sstatus
>> 4) & 0xf;
2096 mask
&= (1 << spd
) - 1;
2100 ap
->sata_spd_limit
= mask
;
2102 ata_port_printk(ap
, KERN_WARNING
, "limiting SATA link speed to %s\n",
2103 sata_spd_string(fls(mask
)));
2108 static int __sata_set_spd_needed(struct ata_port
*ap
, u32
*scontrol
)
2112 if (ap
->sata_spd_limit
== UINT_MAX
)
2115 limit
= fls(ap
->sata_spd_limit
);
2117 spd
= (*scontrol
>> 4) & 0xf;
2118 *scontrol
= (*scontrol
& ~0xf0) | ((limit
& 0xf) << 4);
2120 return spd
!= limit
;
2124 * sata_set_spd_needed - is SATA spd configuration needed
2125 * @ap: Port in question
2127 * Test whether the spd limit in SControl matches
2128 * @ap->sata_spd_limit. This function is used to determine
2129 * whether hardreset is necessary to apply SATA spd
2133 * Inherited from caller.
2136 * 1 if SATA spd configuration is needed, 0 otherwise.
2138 int sata_set_spd_needed(struct ata_port
*ap
)
2142 if (sata_scr_read(ap
, SCR_CONTROL
, &scontrol
))
2145 return __sata_set_spd_needed(ap
, &scontrol
);
2149 * sata_set_spd - set SATA spd according to spd limit
2150 * @ap: Port to set SATA spd for
2152 * Set SATA spd of @ap according to sata_spd_limit.
2155 * Inherited from caller.
2158 * 0 if spd doesn't need to be changed, 1 if spd has been
2159 * changed. Negative errno if SCR registers are inaccessible.
2161 int sata_set_spd(struct ata_port
*ap
)
2166 if ((rc
= sata_scr_read(ap
, SCR_CONTROL
, &scontrol
)))
2169 if (!__sata_set_spd_needed(ap
, &scontrol
))
2172 if ((rc
= sata_scr_write(ap
, SCR_CONTROL
, scontrol
)))
2179 * This mode timing computation functionality is ported over from
2180 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2183 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
2184 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
2185 * for UDMA6, which is currently supported only by Maxtor drives.
2187 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
2190 static const struct ata_timing ata_timing
[] = {
2192 { XFER_UDMA_6
, 0, 0, 0, 0, 0, 0, 0, 15 },
2193 { XFER_UDMA_5
, 0, 0, 0, 0, 0, 0, 0, 20 },
2194 { XFER_UDMA_4
, 0, 0, 0, 0, 0, 0, 0, 30 },
2195 { XFER_UDMA_3
, 0, 0, 0, 0, 0, 0, 0, 45 },
2197 { XFER_MW_DMA_4
, 25, 0, 0, 0, 55, 20, 80, 0 },
2198 { XFER_MW_DMA_3
, 25, 0, 0, 0, 65, 25, 100, 0 },
2199 { XFER_UDMA_2
, 0, 0, 0, 0, 0, 0, 0, 60 },
2200 { XFER_UDMA_1
, 0, 0, 0, 0, 0, 0, 0, 80 },
2201 { XFER_UDMA_0
, 0, 0, 0, 0, 0, 0, 0, 120 },
2203 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
2205 { XFER_MW_DMA_2
, 25, 0, 0, 0, 70, 25, 120, 0 },
2206 { XFER_MW_DMA_1
, 45, 0, 0, 0, 80, 50, 150, 0 },
2207 { XFER_MW_DMA_0
, 60, 0, 0, 0, 215, 215, 480, 0 },
2209 { XFER_SW_DMA_2
, 60, 0, 0, 0, 120, 120, 240, 0 },
2210 { XFER_SW_DMA_1
, 90, 0, 0, 0, 240, 240, 480, 0 },
2211 { XFER_SW_DMA_0
, 120, 0, 0, 0, 480, 480, 960, 0 },
2213 { XFER_PIO_6
, 10, 55, 20, 80, 55, 20, 80, 0 },
2214 { XFER_PIO_5
, 15, 65, 25, 100, 65, 25, 100, 0 },
2215 { XFER_PIO_4
, 25, 70, 25, 120, 70, 25, 120, 0 },
2216 { XFER_PIO_3
, 30, 80, 70, 180, 80, 70, 180, 0 },
2218 { XFER_PIO_2
, 30, 290, 40, 330, 100, 90, 240, 0 },
2219 { XFER_PIO_1
, 50, 290, 93, 383, 125, 100, 383, 0 },
2220 { XFER_PIO_0
, 70, 290, 240, 600, 165, 150, 600, 0 },
2222 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
2227 #define ENOUGH(v,unit) (((v)-1)/(unit)+1)
2228 #define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
2230 static void ata_timing_quantize(const struct ata_timing
*t
, struct ata_timing
*q
, int T
, int UT
)
2232 q
->setup
= EZ(t
->setup
* 1000, T
);
2233 q
->act8b
= EZ(t
->act8b
* 1000, T
);
2234 q
->rec8b
= EZ(t
->rec8b
* 1000, T
);
2235 q
->cyc8b
= EZ(t
->cyc8b
* 1000, T
);
2236 q
->active
= EZ(t
->active
* 1000, T
);
2237 q
->recover
= EZ(t
->recover
* 1000, T
);
2238 q
->cycle
= EZ(t
->cycle
* 1000, T
);
2239 q
->udma
= EZ(t
->udma
* 1000, UT
);
2242 void ata_timing_merge(const struct ata_timing
*a
, const struct ata_timing
*b
,
2243 struct ata_timing
*m
, unsigned int what
)
2245 if (what
& ATA_TIMING_SETUP
) m
->setup
= max(a
->setup
, b
->setup
);
2246 if (what
& ATA_TIMING_ACT8B
) m
->act8b
= max(a
->act8b
, b
->act8b
);
2247 if (what
& ATA_TIMING_REC8B
) m
->rec8b
= max(a
->rec8b
, b
->rec8b
);
2248 if (what
& ATA_TIMING_CYC8B
) m
->cyc8b
= max(a
->cyc8b
, b
->cyc8b
);
2249 if (what
& ATA_TIMING_ACTIVE
) m
->active
= max(a
->active
, b
->active
);
2250 if (what
& ATA_TIMING_RECOVER
) m
->recover
= max(a
->recover
, b
->recover
);
2251 if (what
& ATA_TIMING_CYCLE
) m
->cycle
= max(a
->cycle
, b
->cycle
);
2252 if (what
& ATA_TIMING_UDMA
) m
->udma
= max(a
->udma
, b
->udma
);
2255 static const struct ata_timing
* ata_timing_find_mode(unsigned short speed
)
2257 const struct ata_timing
*t
;
2259 for (t
= ata_timing
; t
->mode
!= speed
; t
++)
2260 if (t
->mode
== 0xFF)
2265 int ata_timing_compute(struct ata_device
*adev
, unsigned short speed
,
2266 struct ata_timing
*t
, int T
, int UT
)
2268 const struct ata_timing
*s
;
2269 struct ata_timing p
;
2275 if (!(s
= ata_timing_find_mode(speed
)))
2278 memcpy(t
, s
, sizeof(*s
));
2281 * If the drive is an EIDE drive, it can tell us it needs extended
2282 * PIO/MW_DMA cycle timing.
2285 if (adev
->id
[ATA_ID_FIELD_VALID
] & 2) { /* EIDE drive */
2286 memset(&p
, 0, sizeof(p
));
2287 if(speed
>= XFER_PIO_0
&& speed
<= XFER_SW_DMA_0
) {
2288 if (speed
<= XFER_PIO_2
) p
.cycle
= p
.cyc8b
= adev
->id
[ATA_ID_EIDE_PIO
];
2289 else p
.cycle
= p
.cyc8b
= adev
->id
[ATA_ID_EIDE_PIO_IORDY
];
2290 } else if(speed
>= XFER_MW_DMA_0
&& speed
<= XFER_MW_DMA_2
) {
2291 p
.cycle
= adev
->id
[ATA_ID_EIDE_DMA_MIN
];
2293 ata_timing_merge(&p
, t
, t
, ATA_TIMING_CYCLE
| ATA_TIMING_CYC8B
);
2297 * Convert the timing to bus clock counts.
2300 ata_timing_quantize(t
, t
, T
, UT
);
2303 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2304 * S.M.A.R.T * and some other commands. We have to ensure that the
2305 * DMA cycle timing is slower/equal than the fastest PIO timing.
2308 if (speed
> XFER_PIO_4
) {
2309 ata_timing_compute(adev
, adev
->pio_mode
, &p
, T
, UT
);
2310 ata_timing_merge(&p
, t
, t
, ATA_TIMING_ALL
);
2314 * Lengthen active & recovery time so that cycle time is correct.
2317 if (t
->act8b
+ t
->rec8b
< t
->cyc8b
) {
2318 t
->act8b
+= (t
->cyc8b
- (t
->act8b
+ t
->rec8b
)) / 2;
2319 t
->rec8b
= t
->cyc8b
- t
->act8b
;
2322 if (t
->active
+ t
->recover
< t
->cycle
) {
2323 t
->active
+= (t
->cycle
- (t
->active
+ t
->recover
)) / 2;
2324 t
->recover
= t
->cycle
- t
->active
;
2331 * ata_down_xfermask_limit - adjust dev xfer masks downward
2332 * @dev: Device to adjust xfer masks
2333 * @force_pio0: Force PIO0
2335 * Adjust xfer masks of @dev downward. Note that this function
2336 * does not apply the change. Invoking ata_set_mode() afterwards
2337 * will apply the limit.
2340 * Inherited from caller.
2343 * 0 on success, negative errno on failure
2345 int ata_down_xfermask_limit(struct ata_device
*dev
, int force_pio0
)
2347 unsigned long xfer_mask
;
2350 xfer_mask
= ata_pack_xfermask(dev
->pio_mask
, dev
->mwdma_mask
,
2355 /* don't gear down to MWDMA from UDMA, go directly to PIO */
2356 if (xfer_mask
& ATA_MASK_UDMA
)
2357 xfer_mask
&= ~ATA_MASK_MWDMA
;
2359 highbit
= fls(xfer_mask
) - 1;
2360 xfer_mask
&= ~(1 << highbit
);
2362 xfer_mask
&= 1 << ATA_SHIFT_PIO
;
2366 ata_unpack_xfermask(xfer_mask
, &dev
->pio_mask
, &dev
->mwdma_mask
,
2369 ata_dev_printk(dev
, KERN_WARNING
, "limiting speed to %s\n",
2370 ata_mode_string(xfer_mask
));
2378 static int ata_dev_set_mode(struct ata_device
*dev
)
2380 struct ata_eh_context
*ehc
= &dev
->ap
->eh_context
;
2381 unsigned int err_mask
;
2384 dev
->flags
&= ~ATA_DFLAG_PIO
;
2385 if (dev
->xfer_shift
== ATA_SHIFT_PIO
)
2386 dev
->flags
|= ATA_DFLAG_PIO
;
2388 err_mask
= ata_dev_set_xfermode(dev
);
2390 ata_dev_printk(dev
, KERN_ERR
, "failed to set xfermode "
2391 "(err_mask=0x%x)\n", err_mask
);
2395 ehc
->i
.flags
|= ATA_EHI_POST_SETMODE
;
2396 rc
= ata_dev_revalidate(dev
, 0);
2397 ehc
->i
.flags
&= ~ATA_EHI_POST_SETMODE
;
2401 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
2402 dev
->xfer_shift
, (int)dev
->xfer_mode
);
2404 ata_dev_printk(dev
, KERN_INFO
, "configured for %s\n",
2405 ata_mode_string(ata_xfer_mode2mask(dev
->xfer_mode
)));
2410 * ata_set_mode - Program timings and issue SET FEATURES - XFER
2411 * @ap: port on which timings will be programmed
2412 * @r_failed_dev: out paramter for failed device
2414 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2415 * ata_set_mode() fails, pointer to the failing device is
2416 * returned in @r_failed_dev.
2419 * PCI/etc. bus probe sem.
2422 * 0 on success, negative errno otherwise
2424 int ata_set_mode(struct ata_port
*ap
, struct ata_device
**r_failed_dev
)
2426 struct ata_device
*dev
;
2427 int i
, rc
= 0, used_dma
= 0, found
= 0;
2429 /* has private set_mode? */
2430 if (ap
->ops
->set_mode
) {
2431 /* FIXME: make ->set_mode handle no device case and
2432 * return error code and failing device on failure.
2434 for (i
= 0; i
< ATA_MAX_DEVICES
; i
++) {
2435 if (ata_dev_ready(&ap
->device
[i
])) {
2436 ap
->ops
->set_mode(ap
);
2443 /* step 1: calculate xfer_mask */
2444 for (i
= 0; i
< ATA_MAX_DEVICES
; i
++) {
2445 unsigned int pio_mask
, dma_mask
;
2447 dev
= &ap
->device
[i
];
2449 if (!ata_dev_enabled(dev
))
2452 ata_dev_xfermask(dev
);
2454 pio_mask
= ata_pack_xfermask(dev
->pio_mask
, 0, 0);
2455 dma_mask
= ata_pack_xfermask(0, dev
->mwdma_mask
, dev
->udma_mask
);
2456 dev
->pio_mode
= ata_xfer_mask2mode(pio_mask
);
2457 dev
->dma_mode
= ata_xfer_mask2mode(dma_mask
);
2466 /* step 2: always set host PIO timings */
2467 for (i
= 0; i
< ATA_MAX_DEVICES
; i
++) {
2468 dev
= &ap
->device
[i
];
2469 if (!ata_dev_enabled(dev
))
2472 if (!dev
->pio_mode
) {
2473 ata_dev_printk(dev
, KERN_WARNING
, "no PIO support\n");
2478 dev
->xfer_mode
= dev
->pio_mode
;
2479 dev
->xfer_shift
= ATA_SHIFT_PIO
;
2480 if (ap
->ops
->set_piomode
)
2481 ap
->ops
->set_piomode(ap
, dev
);
2484 /* step 3: set host DMA timings */
2485 for (i
= 0; i
< ATA_MAX_DEVICES
; i
++) {
2486 dev
= &ap
->device
[i
];
2488 if (!ata_dev_enabled(dev
) || !dev
->dma_mode
)
2491 dev
->xfer_mode
= dev
->dma_mode
;
2492 dev
->xfer_shift
= ata_xfer_mode2shift(dev
->dma_mode
);
2493 if (ap
->ops
->set_dmamode
)
2494 ap
->ops
->set_dmamode(ap
, dev
);
2497 /* step 4: update devices' xfer mode */
2498 for (i
= 0; i
< ATA_MAX_DEVICES
; i
++) {
2499 dev
= &ap
->device
[i
];
2501 /* don't udpate suspended devices' xfer mode */
2502 if (!ata_dev_ready(dev
))
2505 rc
= ata_dev_set_mode(dev
);
2510 /* Record simplex status. If we selected DMA then the other
2511 * host channels are not permitted to do so.
2513 if (used_dma
&& (ap
->host
->flags
& ATA_HOST_SIMPLEX
))
2514 ap
->host
->simplex_claimed
= 1;
2516 /* step5: chip specific finalisation */
2517 if (ap
->ops
->post_set_mode
)
2518 ap
->ops
->post_set_mode(ap
);
2522 *r_failed_dev
= dev
;
2527 * ata_tf_to_host - issue ATA taskfile to host controller
2528 * @ap: port to which command is being issued
2529 * @tf: ATA taskfile register set
2531 * Issues ATA taskfile register set to ATA host controller,
2532 * with proper synchronization with interrupt handler and
2536 * spin_lock_irqsave(host lock)
2539 static inline void ata_tf_to_host(struct ata_port
*ap
,
2540 const struct ata_taskfile
*tf
)
2542 ap
->ops
->tf_load(ap
, tf
);
2543 ap
->ops
->exec_command(ap
, tf
);
2547 * ata_busy_sleep - sleep until BSY clears, or timeout
2548 * @ap: port containing status register to be polled
2549 * @tmout_pat: impatience timeout
2550 * @tmout: overall timeout
2552 * Sleep until ATA Status register bit BSY clears,
2553 * or a timeout occurs.
2556 * Kernel thread context (may sleep).
2559 * 0 on success, -errno otherwise.
2561 int ata_busy_sleep(struct ata_port
*ap
,
2562 unsigned long tmout_pat
, unsigned long tmout
)
2564 unsigned long timer_start
, timeout
;
2567 status
= ata_busy_wait(ap
, ATA_BUSY
, 300);
2568 timer_start
= jiffies
;
2569 timeout
= timer_start
+ tmout_pat
;
2570 while (status
!= 0xff && (status
& ATA_BUSY
) &&
2571 time_before(jiffies
, timeout
)) {
2573 status
= ata_busy_wait(ap
, ATA_BUSY
, 3);
2576 if (status
!= 0xff && (status
& ATA_BUSY
))
2577 ata_port_printk(ap
, KERN_WARNING
,
2578 "port is slow to respond, please be patient "
2579 "(Status 0x%x)\n", status
);
2581 timeout
= timer_start
+ tmout
;
2582 while (status
!= 0xff && (status
& ATA_BUSY
) &&
2583 time_before(jiffies
, timeout
)) {
2585 status
= ata_chk_status(ap
);
2591 if (status
& ATA_BUSY
) {
2592 ata_port_printk(ap
, KERN_ERR
, "port failed to respond "
2593 "(%lu secs, Status 0x%x)\n",
2594 tmout
/ HZ
, status
);
2601 static void ata_bus_post_reset(struct ata_port
*ap
, unsigned int devmask
)
2603 struct ata_ioports
*ioaddr
= &ap
->ioaddr
;
2604 unsigned int dev0
= devmask
& (1 << 0);
2605 unsigned int dev1
= devmask
& (1 << 1);
2606 unsigned long timeout
;
2608 /* if device 0 was found in ata_devchk, wait for its
2612 ata_busy_sleep(ap
, ATA_TMOUT_BOOT_QUICK
, ATA_TMOUT_BOOT
);
2614 /* if device 1 was found in ata_devchk, wait for
2615 * register access, then wait for BSY to clear
2617 timeout
= jiffies
+ ATA_TMOUT_BOOT
;
2621 ap
->ops
->dev_select(ap
, 1);
2622 if (ap
->flags
& ATA_FLAG_MMIO
) {
2623 nsect
= readb((void __iomem
*) ioaddr
->nsect_addr
);
2624 lbal
= readb((void __iomem
*) ioaddr
->lbal_addr
);
2626 nsect
= inb(ioaddr
->nsect_addr
);
2627 lbal
= inb(ioaddr
->lbal_addr
);
2629 if ((nsect
== 1) && (lbal
== 1))
2631 if (time_after(jiffies
, timeout
)) {
2635 msleep(50); /* give drive a breather */
2638 ata_busy_sleep(ap
, ATA_TMOUT_BOOT_QUICK
, ATA_TMOUT_BOOT
);
2640 /* is all this really necessary? */
2641 ap
->ops
->dev_select(ap
, 0);
2643 ap
->ops
->dev_select(ap
, 1);
2645 ap
->ops
->dev_select(ap
, 0);
2648 static unsigned int ata_bus_softreset(struct ata_port
*ap
,
2649 unsigned int devmask
)
2651 struct ata_ioports
*ioaddr
= &ap
->ioaddr
;
2653 DPRINTK("ata%u: bus reset via SRST\n", ap
->id
);
2655 /* software reset. causes dev0 to be selected */
2656 if (ap
->flags
& ATA_FLAG_MMIO
) {
2657 writeb(ap
->ctl
, (void __iomem
*) ioaddr
->ctl_addr
);
2658 udelay(20); /* FIXME: flush */
2659 writeb(ap
->ctl
| ATA_SRST
, (void __iomem
*) ioaddr
->ctl_addr
);
2660 udelay(20); /* FIXME: flush */
2661 writeb(ap
->ctl
, (void __iomem
*) ioaddr
->ctl_addr
);
2663 outb(ap
->ctl
, ioaddr
->ctl_addr
);
2665 outb(ap
->ctl
| ATA_SRST
, ioaddr
->ctl_addr
);
2667 outb(ap
->ctl
, ioaddr
->ctl_addr
);
2670 /* spec mandates ">= 2ms" before checking status.
2671 * We wait 150ms, because that was the magic delay used for
2672 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
2673 * between when the ATA command register is written, and then
2674 * status is checked. Because waiting for "a while" before
2675 * checking status is fine, post SRST, we perform this magic
2676 * delay here as well.
2678 * Old drivers/ide uses the 2mS rule and then waits for ready
2682 /* Before we perform post reset processing we want to see if
2683 * the bus shows 0xFF because the odd clown forgets the D7
2684 * pulldown resistor.
2686 if (ata_check_status(ap
) == 0xFF)
2689 ata_bus_post_reset(ap
, devmask
);
2695 * ata_bus_reset - reset host port and associated ATA channel
2696 * @ap: port to reset
2698 * This is typically the first time we actually start issuing
2699 * commands to the ATA channel. We wait for BSY to clear, then
2700 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
2701 * result. Determine what devices, if any, are on the channel
2702 * by looking at the device 0/1 error register. Look at the signature
2703 * stored in each device's taskfile registers, to determine if
2704 * the device is ATA or ATAPI.
2707 * PCI/etc. bus probe sem.
2708 * Obtains host lock.
2711 * Sets ATA_FLAG_DISABLED if bus reset fails.
2714 void ata_bus_reset(struct ata_port
*ap
)
2716 struct ata_ioports
*ioaddr
= &ap
->ioaddr
;
2717 unsigned int slave_possible
= ap
->flags
& ATA_FLAG_SLAVE_POSS
;
2719 unsigned int dev0
, dev1
= 0, devmask
= 0;
2721 DPRINTK("ENTER, host %u, port %u\n", ap
->id
, ap
->port_no
);
2723 /* determine if device 0/1 are present */
2724 if (ap
->flags
& ATA_FLAG_SATA_RESET
)
2727 dev0
= ata_devchk(ap
, 0);
2729 dev1
= ata_devchk(ap
, 1);
2733 devmask
|= (1 << 0);
2735 devmask
|= (1 << 1);
2737 /* select device 0 again */
2738 ap
->ops
->dev_select(ap
, 0);
2740 /* issue bus reset */
2741 if (ap
->flags
& ATA_FLAG_SRST
)
2742 if (ata_bus_softreset(ap
, devmask
))
2746 * determine by signature whether we have ATA or ATAPI devices
2748 ap
->device
[0].class = ata_dev_try_classify(ap
, 0, &err
);
2749 if ((slave_possible
) && (err
!= 0x81))
2750 ap
->device
[1].class = ata_dev_try_classify(ap
, 1, &err
);
2752 /* re-enable interrupts */
2753 if (ap
->ioaddr
.ctl_addr
) /* FIXME: hack. create a hook instead */
2756 /* is double-select really necessary? */
2757 if (ap
->device
[1].class != ATA_DEV_NONE
)
2758 ap
->ops
->dev_select(ap
, 1);
2759 if (ap
->device
[0].class != ATA_DEV_NONE
)
2760 ap
->ops
->dev_select(ap
, 0);
2762 /* if no devices were detected, disable this port */
2763 if ((ap
->device
[0].class == ATA_DEV_NONE
) &&
2764 (ap
->device
[1].class == ATA_DEV_NONE
))
2767 if (ap
->flags
& (ATA_FLAG_SATA_RESET
| ATA_FLAG_SRST
)) {
2768 /* set up device control for ATA_FLAG_SATA_RESET */
2769 if (ap
->flags
& ATA_FLAG_MMIO
)
2770 writeb(ap
->ctl
, (void __iomem
*) ioaddr
->ctl_addr
);
2772 outb(ap
->ctl
, ioaddr
->ctl_addr
);
2779 ata_port_printk(ap
, KERN_ERR
, "disabling port\n");
2780 ap
->ops
->port_disable(ap
);
2786 * sata_phy_debounce - debounce SATA phy status
2787 * @ap: ATA port to debounce SATA phy status for
2788 * @params: timing parameters { interval, duratinon, timeout } in msec
2790 * Make sure SStatus of @ap reaches stable state, determined by
2791 * holding the same value where DET is not 1 for @duration polled
2792 * every @interval, before @timeout. Timeout constraints the
2793 * beginning of the stable state. Because, after hot unplugging,
2794 * DET gets stuck at 1 on some controllers, this functions waits
2795 * until timeout then returns 0 if DET is stable at 1.
2798 * Kernel thread context (may sleep)
2801 * 0 on success, -errno on failure.
2803 int sata_phy_debounce(struct ata_port
*ap
, const unsigned long *params
)
2805 unsigned long interval_msec
= params
[0];
2806 unsigned long duration
= params
[1] * HZ
/ 1000;
2807 unsigned long timeout
= jiffies
+ params
[2] * HZ
/ 1000;
2808 unsigned long last_jiffies
;
2812 if ((rc
= sata_scr_read(ap
, SCR_STATUS
, &cur
)))
2817 last_jiffies
= jiffies
;
2820 msleep(interval_msec
);
2821 if ((rc
= sata_scr_read(ap
, SCR_STATUS
, &cur
)))
2827 if (cur
== 1 && time_before(jiffies
, timeout
))
2829 if (time_after(jiffies
, last_jiffies
+ duration
))
2834 /* unstable, start over */
2836 last_jiffies
= jiffies
;
2839 if (time_after(jiffies
, timeout
))
2845 * sata_phy_resume - resume SATA phy
2846 * @ap: ATA port to resume SATA phy for
2847 * @params: timing parameters { interval, duratinon, timeout } in msec
2849 * Resume SATA phy of @ap and debounce it.
2852 * Kernel thread context (may sleep)
2855 * 0 on success, -errno on failure.
2857 int sata_phy_resume(struct ata_port
*ap
, const unsigned long *params
)
2862 if ((rc
= sata_scr_read(ap
, SCR_CONTROL
, &scontrol
)))
2865 scontrol
= (scontrol
& 0x0f0) | 0x300;
2867 if ((rc
= sata_scr_write(ap
, SCR_CONTROL
, scontrol
)))
2870 /* Some PHYs react badly if SStatus is pounded immediately
2871 * after resuming. Delay 200ms before debouncing.
2875 return sata_phy_debounce(ap
, params
);
2878 static void ata_wait_spinup(struct ata_port
*ap
)
2880 struct ata_eh_context
*ehc
= &ap
->eh_context
;
2881 unsigned long end
, secs
;
2884 /* first, debounce phy if SATA */
2885 if (ap
->cbl
== ATA_CBL_SATA
) {
2886 rc
= sata_phy_debounce(ap
, sata_deb_timing_hotplug
);
2888 /* if debounced successfully and offline, no need to wait */
2889 if ((rc
== 0 || rc
== -EOPNOTSUPP
) && ata_port_offline(ap
))
2893 /* okay, let's give the drive time to spin up */
2894 end
= ehc
->i
.hotplug_timestamp
+ ATA_SPINUP_WAIT
* HZ
/ 1000;
2895 secs
= ((end
- jiffies
) + HZ
- 1) / HZ
;
2897 if (time_after(jiffies
, end
))
2901 ata_port_printk(ap
, KERN_INFO
, "waiting for device to spin up "
2902 "(%lu secs)\n", secs
);
2904 schedule_timeout_uninterruptible(end
- jiffies
);
2908 * ata_std_prereset - prepare for reset
2909 * @ap: ATA port to be reset
2911 * @ap is about to be reset. Initialize it.
2914 * Kernel thread context (may sleep)
2917 * 0 on success, -errno otherwise.
2919 int ata_std_prereset(struct ata_port
*ap
)
2921 struct ata_eh_context
*ehc
= &ap
->eh_context
;
2922 const unsigned long *timing
= sata_ehc_deb_timing(ehc
);
2925 /* handle link resume & hotplug spinup */
2926 if ((ehc
->i
.flags
& ATA_EHI_RESUME_LINK
) &&
2927 (ap
->flags
& ATA_FLAG_HRST_TO_RESUME
))
2928 ehc
->i
.action
|= ATA_EH_HARDRESET
;
2930 if ((ehc
->i
.flags
& ATA_EHI_HOTPLUGGED
) &&
2931 (ap
->flags
& ATA_FLAG_SKIP_D2H_BSY
))
2932 ata_wait_spinup(ap
);
2934 /* if we're about to do hardreset, nothing more to do */
2935 if (ehc
->i
.action
& ATA_EH_HARDRESET
)
2938 /* if SATA, resume phy */
2939 if (ap
->cbl
== ATA_CBL_SATA
) {
2940 rc
= sata_phy_resume(ap
, timing
);
2941 if (rc
&& rc
!= -EOPNOTSUPP
) {
2942 /* phy resume failed */
2943 ata_port_printk(ap
, KERN_WARNING
, "failed to resume "
2944 "link for reset (errno=%d)\n", rc
);
2949 /* Wait for !BSY if the controller can wait for the first D2H
2950 * Reg FIS and we don't know that no device is attached.
2952 if (!(ap
->flags
& ATA_FLAG_SKIP_D2H_BSY
) && !ata_port_offline(ap
))
2953 ata_busy_sleep(ap
, ATA_TMOUT_BOOT_QUICK
, ATA_TMOUT_BOOT
);
2959 * ata_std_softreset - reset host port via ATA SRST
2960 * @ap: port to reset
2961 * @classes: resulting classes of attached devices
2963 * Reset host port using ATA SRST.
2966 * Kernel thread context (may sleep)
2969 * 0 on success, -errno otherwise.
2971 int ata_std_softreset(struct ata_port
*ap
, unsigned int *classes
)
2973 unsigned int slave_possible
= ap
->flags
& ATA_FLAG_SLAVE_POSS
;
2974 unsigned int devmask
= 0, err_mask
;
2979 if (ata_port_offline(ap
)) {
2980 classes
[0] = ATA_DEV_NONE
;
2984 /* determine if device 0/1 are present */
2985 if (ata_devchk(ap
, 0))
2986 devmask
|= (1 << 0);
2987 if (slave_possible
&& ata_devchk(ap
, 1))
2988 devmask
|= (1 << 1);
2990 /* select device 0 again */
2991 ap
->ops
->dev_select(ap
, 0);
2993 /* issue bus reset */
2994 DPRINTK("about to softreset, devmask=%x\n", devmask
);
2995 err_mask
= ata_bus_softreset(ap
, devmask
);
2997 ata_port_printk(ap
, KERN_ERR
, "SRST failed (err_mask=0x%x)\n",
3002 /* determine by signature whether we have ATA or ATAPI devices */
3003 classes
[0] = ata_dev_try_classify(ap
, 0, &err
);
3004 if (slave_possible
&& err
!= 0x81)
3005 classes
[1] = ata_dev_try_classify(ap
, 1, &err
);
3008 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes
[0], classes
[1]);
3013 * sata_port_hardreset - reset port via SATA phy reset
3014 * @ap: port to reset
3015 * @timing: timing parameters { interval, duratinon, timeout } in msec
3017 * SATA phy-reset host port using DET bits of SControl register.
3020 * Kernel thread context (may sleep)
3023 * 0 on success, -errno otherwise.
3025 int sata_port_hardreset(struct ata_port
*ap
, const unsigned long *timing
)
3032 if (sata_set_spd_needed(ap
)) {
3033 /* SATA spec says nothing about how to reconfigure
3034 * spd. To be on the safe side, turn off phy during
3035 * reconfiguration. This works for at least ICH7 AHCI
3038 if ((rc
= sata_scr_read(ap
, SCR_CONTROL
, &scontrol
)))
3041 scontrol
= (scontrol
& 0x0f0) | 0x304;
3043 if ((rc
= sata_scr_write(ap
, SCR_CONTROL
, scontrol
)))
3049 /* issue phy wake/reset */
3050 if ((rc
= sata_scr_read(ap
, SCR_CONTROL
, &scontrol
)))
3053 scontrol
= (scontrol
& 0x0f0) | 0x301;
3055 if ((rc
= sata_scr_write_flush(ap
, SCR_CONTROL
, scontrol
)))
3058 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
3059 * 10.4.2 says at least 1 ms.
3063 /* bring phy back */
3064 rc
= sata_phy_resume(ap
, timing
);
3066 DPRINTK("EXIT, rc=%d\n", rc
);
3071 * sata_std_hardreset - reset host port via SATA phy reset
3072 * @ap: port to reset
3073 * @class: resulting class of attached device
3075 * SATA phy-reset host port using DET bits of SControl register,
3076 * wait for !BSY and classify the attached device.
3079 * Kernel thread context (may sleep)
3082 * 0 on success, -errno otherwise.
3084 int sata_std_hardreset(struct ata_port
*ap
, unsigned int *class)
3086 const unsigned long *timing
= sata_ehc_deb_timing(&ap
->eh_context
);
3092 rc
= sata_port_hardreset(ap
, timing
);
3094 ata_port_printk(ap
, KERN_ERR
,
3095 "COMRESET failed (errno=%d)\n", rc
);
3099 /* TODO: phy layer with polling, timeouts, etc. */
3100 if (ata_port_offline(ap
)) {
3101 *class = ATA_DEV_NONE
;
3102 DPRINTK("EXIT, link offline\n");
3106 if (ata_busy_sleep(ap
, ATA_TMOUT_BOOT_QUICK
, ATA_TMOUT_BOOT
)) {
3107 ata_port_printk(ap
, KERN_ERR
,
3108 "COMRESET failed (device not ready)\n");
3112 ap
->ops
->dev_select(ap
, 0); /* probably unnecessary */
3114 *class = ata_dev_try_classify(ap
, 0, NULL
);
3116 DPRINTK("EXIT, class=%u\n", *class);
3121 * ata_std_postreset - standard postreset callback
3122 * @ap: the target ata_port
3123 * @classes: classes of attached devices
3125 * This function is invoked after a successful reset. Note that
3126 * the device might have been reset more than once using
3127 * different reset methods before postreset is invoked.
3130 * Kernel thread context (may sleep)
3132 void ata_std_postreset(struct ata_port
*ap
, unsigned int *classes
)
3138 /* print link status */
3139 sata_print_link_status(ap
);
3142 if (sata_scr_read(ap
, SCR_ERROR
, &serror
) == 0)
3143 sata_scr_write(ap
, SCR_ERROR
, serror
);
3145 /* re-enable interrupts */
3146 if (!ap
->ops
->error_handler
) {
3147 /* FIXME: hack. create a hook instead */
3148 if (ap
->ioaddr
.ctl_addr
)
3152 /* is double-select really necessary? */
3153 if (classes
[0] != ATA_DEV_NONE
)
3154 ap
->ops
->dev_select(ap
, 1);
3155 if (classes
[1] != ATA_DEV_NONE
)
3156 ap
->ops
->dev_select(ap
, 0);
3158 /* bail out if no device is present */
3159 if (classes
[0] == ATA_DEV_NONE
&& classes
[1] == ATA_DEV_NONE
) {
3160 DPRINTK("EXIT, no device\n");
3164 /* set up device control */
3165 if (ap
->ioaddr
.ctl_addr
) {
3166 if (ap
->flags
& ATA_FLAG_MMIO
)
3167 writeb(ap
->ctl
, (void __iomem
*) ap
->ioaddr
.ctl_addr
);
3169 outb(ap
->ctl
, ap
->ioaddr
.ctl_addr
);
3176 * ata_dev_same_device - Determine whether new ID matches configured device
3177 * @dev: device to compare against
3178 * @new_class: class of the new device
3179 * @new_id: IDENTIFY page of the new device
3181 * Compare @new_class and @new_id against @dev and determine
3182 * whether @dev is the device indicated by @new_class and
3189 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
3191 static int ata_dev_same_device(struct ata_device
*dev
, unsigned int new_class
,
3194 const u16
*old_id
= dev
->id
;
3195 unsigned char model
[2][41], serial
[2][21];
3198 if (dev
->class != new_class
) {
3199 ata_dev_printk(dev
, KERN_INFO
, "class mismatch %d != %d\n",
3200 dev
->class, new_class
);
3204 ata_id_c_string(old_id
, model
[0], ATA_ID_PROD_OFS
, sizeof(model
[0]));
3205 ata_id_c_string(new_id
, model
[1], ATA_ID_PROD_OFS
, sizeof(model
[1]));
3206 ata_id_c_string(old_id
, serial
[0], ATA_ID_SERNO_OFS
, sizeof(serial
[0]));
3207 ata_id_c_string(new_id
, serial
[1], ATA_ID_SERNO_OFS
, sizeof(serial
[1]));
3208 new_n_sectors
= ata_id_n_sectors(new_id
);
3210 if (strcmp(model
[0], model
[1])) {
3211 ata_dev_printk(dev
, KERN_INFO
, "model number mismatch "
3212 "'%s' != '%s'\n", model
[0], model
[1]);
3216 if (strcmp(serial
[0], serial
[1])) {
3217 ata_dev_printk(dev
, KERN_INFO
, "serial number mismatch "
3218 "'%s' != '%s'\n", serial
[0], serial
[1]);
3222 if (dev
->class == ATA_DEV_ATA
&& dev
->n_sectors
!= new_n_sectors
) {
3223 ata_dev_printk(dev
, KERN_INFO
, "n_sectors mismatch "
3225 (unsigned long long)dev
->n_sectors
,
3226 (unsigned long long)new_n_sectors
);
3234 * ata_dev_revalidate - Revalidate ATA device
3235 * @dev: device to revalidate
3236 * @readid_flags: read ID flags
3238 * Re-read IDENTIFY page and make sure @dev is still attached to
3242 * Kernel thread context (may sleep)
3245 * 0 on success, negative errno otherwise
3247 int ata_dev_revalidate(struct ata_device
*dev
, unsigned int readid_flags
)
3249 unsigned int class = dev
->class;
3250 u16
*id
= (void *)dev
->ap
->sector_buf
;
3253 if (!ata_dev_enabled(dev
)) {
3259 rc
= ata_dev_read_id(dev
, &class, readid_flags
, id
);
3263 /* is the device still there? */
3264 if (!ata_dev_same_device(dev
, class, id
)) {
3269 memcpy(dev
->id
, id
, sizeof(id
[0]) * ATA_ID_WORDS
);
3271 /* configure device according to the new ID */
3272 rc
= ata_dev_configure(dev
);
3277 ata_dev_printk(dev
, KERN_ERR
, "revalidation failed (errno=%d)\n", rc
);
3281 struct ata_blacklist_entry
{
3282 const char *model_num
;
3283 const char *model_rev
;
3284 unsigned long horkage
;
3287 static const struct ata_blacklist_entry ata_device_blacklist
[] = {
3288 /* Devices with DMA related problems under Linux */
3289 { "WDC AC11000H", NULL
, ATA_HORKAGE_NODMA
},
3290 { "WDC AC22100H", NULL
, ATA_HORKAGE_NODMA
},
3291 { "WDC AC32500H", NULL
, ATA_HORKAGE_NODMA
},
3292 { "WDC AC33100H", NULL
, ATA_HORKAGE_NODMA
},
3293 { "WDC AC31600H", NULL
, ATA_HORKAGE_NODMA
},
3294 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA
},
3295 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA
},
3296 { "Compaq CRD-8241B", NULL
, ATA_HORKAGE_NODMA
},
3297 { "CRD-8400B", NULL
, ATA_HORKAGE_NODMA
},
3298 { "CRD-8480B", NULL
, ATA_HORKAGE_NODMA
},
3299 { "CRD-8482B", NULL
, ATA_HORKAGE_NODMA
},
3300 { "CRD-84", NULL
, ATA_HORKAGE_NODMA
},
3301 { "SanDisk SDP3B", NULL
, ATA_HORKAGE_NODMA
},
3302 { "SanDisk SDP3B-64", NULL
, ATA_HORKAGE_NODMA
},
3303 { "SANYO CD-ROM CRD", NULL
, ATA_HORKAGE_NODMA
},
3304 { "HITACHI CDR-8", NULL
, ATA_HORKAGE_NODMA
},
3305 { "HITACHI CDR-8335", NULL
, ATA_HORKAGE_NODMA
},
3306 { "HITACHI CDR-8435", NULL
, ATA_HORKAGE_NODMA
},
3307 { "Toshiba CD-ROM XM-6202B", NULL
, ATA_HORKAGE_NODMA
},
3308 { "TOSHIBA CD-ROM XM-1702BC", NULL
, ATA_HORKAGE_NODMA
},
3309 { "CD-532E-A", NULL
, ATA_HORKAGE_NODMA
},
3310 { "E-IDE CD-ROM CR-840",NULL
, ATA_HORKAGE_NODMA
},
3311 { "CD-ROM Drive/F5A", NULL
, ATA_HORKAGE_NODMA
},
3312 { "WPI CDD-820", NULL
, ATA_HORKAGE_NODMA
},
3313 { "SAMSUNG CD-ROM SC-148C", NULL
, ATA_HORKAGE_NODMA
},
3314 { "SAMSUNG CD-ROM SC", NULL
, ATA_HORKAGE_NODMA
},
3315 { "SanDisk SDP3B-64", NULL
, ATA_HORKAGE_NODMA
},
3316 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL
,ATA_HORKAGE_NODMA
},
3317 { "_NEC DV5800A", NULL
, ATA_HORKAGE_NODMA
},
3318 { "SAMSUNG CD-ROM SN-124","N001", ATA_HORKAGE_NODMA
},
3320 /* Devices we expect to fail diagnostics */
3322 /* Devices where NCQ should be avoided */
3324 { "WDC WD740ADFD-00", NULL
, ATA_HORKAGE_NONCQ
},
3326 /* Devices with NCQ limits */
3332 static int ata_strim(char *s
, size_t len
)
3334 len
= strnlen(s
, len
);
3336 /* ATAPI specifies that empty space is blank-filled; remove blanks */
3337 while ((len
> 0) && (s
[len
- 1] == ' ')) {
3344 unsigned long ata_device_blacklisted(const struct ata_device
*dev
)
3346 unsigned char model_num
[40];
3347 unsigned char model_rev
[16];
3348 unsigned int nlen
, rlen
;
3349 const struct ata_blacklist_entry
*ad
= ata_device_blacklist
;
3351 ata_id_string(dev
->id
, model_num
, ATA_ID_PROD_OFS
,
3353 ata_id_string(dev
->id
, model_rev
, ATA_ID_FW_REV_OFS
,
3355 nlen
= ata_strim(model_num
, sizeof(model_num
));
3356 rlen
= ata_strim(model_rev
, sizeof(model_rev
));
3358 while (ad
->model_num
) {
3359 if (!strncmp(ad
->model_num
, model_num
, nlen
)) {
3360 if (ad
->model_rev
== NULL
)
3362 if (!strncmp(ad
->model_rev
, model_rev
, rlen
))
3370 static int ata_dma_blacklisted(const struct ata_device
*dev
)
3372 /* We don't support polling DMA.
3373 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
3374 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
3376 if ((dev
->ap
->flags
& ATA_FLAG_PIO_POLLING
) &&
3377 (dev
->flags
& ATA_DFLAG_CDB_INTR
))
3379 return (ata_device_blacklisted(dev
) & ATA_HORKAGE_NODMA
) ? 1 : 0;
3383 * ata_dev_xfermask - Compute supported xfermask of the given device
3384 * @dev: Device to compute xfermask for
3386 * Compute supported xfermask of @dev and store it in
3387 * dev->*_mask. This function is responsible for applying all
3388 * known limits including host controller limits, device
3394 static void ata_dev_xfermask(struct ata_device
*dev
)
3396 struct ata_port
*ap
= dev
->ap
;
3397 struct ata_host
*host
= ap
->host
;
3398 unsigned long xfer_mask
;
3400 /* controller modes available */
3401 xfer_mask
= ata_pack_xfermask(ap
->pio_mask
,
3402 ap
->mwdma_mask
, ap
->udma_mask
);
3404 /* Apply cable rule here. Don't apply it early because when
3405 * we handle hot plug the cable type can itself change.
3407 if (ap
->cbl
== ATA_CBL_PATA40
)
3408 xfer_mask
&= ~(0xF8 << ATA_SHIFT_UDMA
);
3409 /* Apply drive side cable rule. Unknown or 80 pin cables reported
3410 * host side are checked drive side as well. Cases where we know a
3411 * 40wire cable is used safely for 80 are not checked here.
3413 if (ata_drive_40wire(dev
->id
) && (ap
->cbl
== ATA_CBL_PATA_UNK
|| ap
->cbl
== ATA_CBL_PATA80
))
3414 xfer_mask
&= ~(0xF8 << ATA_SHIFT_UDMA
);
3417 xfer_mask
&= ata_pack_xfermask(dev
->pio_mask
,
3418 dev
->mwdma_mask
, dev
->udma_mask
);
3419 xfer_mask
&= ata_id_xfermask(dev
->id
);
3422 * CFA Advanced TrueIDE timings are not allowed on a shared
3425 if (ata_dev_pair(dev
)) {
3426 /* No PIO5 or PIO6 */
3427 xfer_mask
&= ~(0x03 << (ATA_SHIFT_PIO
+ 5));
3428 /* No MWDMA3 or MWDMA 4 */
3429 xfer_mask
&= ~(0x03 << (ATA_SHIFT_MWDMA
+ 3));
3432 if (ata_dma_blacklisted(dev
)) {
3433 xfer_mask
&= ~(ATA_MASK_MWDMA
| ATA_MASK_UDMA
);
3434 ata_dev_printk(dev
, KERN_WARNING
,
3435 "device is on DMA blacklist, disabling DMA\n");
3438 if ((host
->flags
& ATA_HOST_SIMPLEX
) && host
->simplex_claimed
) {
3439 xfer_mask
&= ~(ATA_MASK_MWDMA
| ATA_MASK_UDMA
);
3440 ata_dev_printk(dev
, KERN_WARNING
, "simplex DMA is claimed by "
3441 "other device, disabling DMA\n");
3444 if (ap
->ops
->mode_filter
)
3445 xfer_mask
= ap
->ops
->mode_filter(ap
, dev
, xfer_mask
);
3447 ata_unpack_xfermask(xfer_mask
, &dev
->pio_mask
,
3448 &dev
->mwdma_mask
, &dev
->udma_mask
);
3452 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
3453 * @dev: Device to which command will be sent
3455 * Issue SET FEATURES - XFER MODE command to device @dev
3459 * PCI/etc. bus probe sem.
3462 * 0 on success, AC_ERR_* mask otherwise.
3465 static unsigned int ata_dev_set_xfermode(struct ata_device
*dev
)
3467 struct ata_taskfile tf
;
3468 unsigned int err_mask
;
3470 /* set up set-features taskfile */
3471 DPRINTK("set features - xfer mode\n");
3473 ata_tf_init(dev
, &tf
);
3474 tf
.command
= ATA_CMD_SET_FEATURES
;
3475 tf
.feature
= SETFEATURES_XFER
;
3476 tf
.flags
|= ATA_TFLAG_ISADDR
| ATA_TFLAG_DEVICE
;
3477 tf
.protocol
= ATA_PROT_NODATA
;
3478 tf
.nsect
= dev
->xfer_mode
;
3480 err_mask
= ata_exec_internal(dev
, &tf
, NULL
, DMA_NONE
, NULL
, 0);
3482 DPRINTK("EXIT, err_mask=%x\n", err_mask
);
3487 * ata_dev_init_params - Issue INIT DEV PARAMS command
3488 * @dev: Device to which command will be sent
3489 * @heads: Number of heads (taskfile parameter)
3490 * @sectors: Number of sectors (taskfile parameter)
3493 * Kernel thread context (may sleep)
3496 * 0 on success, AC_ERR_* mask otherwise.
3498 static unsigned int ata_dev_init_params(struct ata_device
*dev
,
3499 u16 heads
, u16 sectors
)
3501 struct ata_taskfile tf
;
3502 unsigned int err_mask
;
3504 /* Number of sectors per track 1-255. Number of heads 1-16 */
3505 if (sectors
< 1 || sectors
> 255 || heads
< 1 || heads
> 16)
3506 return AC_ERR_INVALID
;
3508 /* set up init dev params taskfile */
3509 DPRINTK("init dev params \n");
3511 ata_tf_init(dev
, &tf
);
3512 tf
.command
= ATA_CMD_INIT_DEV_PARAMS
;
3513 tf
.flags
|= ATA_TFLAG_ISADDR
| ATA_TFLAG_DEVICE
;
3514 tf
.protocol
= ATA_PROT_NODATA
;
3516 tf
.device
|= (heads
- 1) & 0x0f; /* max head = num. of heads - 1 */
3518 err_mask
= ata_exec_internal(dev
, &tf
, NULL
, DMA_NONE
, NULL
, 0);
3520 DPRINTK("EXIT, err_mask=%x\n", err_mask
);
3525 * ata_sg_clean - Unmap DMA memory associated with command
3526 * @qc: Command containing DMA memory to be released
3528 * Unmap all mapped DMA memory associated with this command.
3531 * spin_lock_irqsave(host lock)
3533 void ata_sg_clean(struct ata_queued_cmd
*qc
)
3535 struct ata_port
*ap
= qc
->ap
;
3536 struct scatterlist
*sg
= qc
->__sg
;
3537 int dir
= qc
->dma_dir
;
3538 void *pad_buf
= NULL
;
3540 WARN_ON(!(qc
->flags
& ATA_QCFLAG_DMAMAP
));
3541 WARN_ON(sg
== NULL
);
3543 if (qc
->flags
& ATA_QCFLAG_SINGLE
)
3544 WARN_ON(qc
->n_elem
> 1);
3546 VPRINTK("unmapping %u sg elements\n", qc
->n_elem
);
3548 /* if we padded the buffer out to 32-bit bound, and data
3549 * xfer direction is from-device, we must copy from the
3550 * pad buffer back into the supplied buffer
3552 if (qc
->pad_len
&& !(qc
->tf
.flags
& ATA_TFLAG_WRITE
))
3553 pad_buf
= ap
->pad
+ (qc
->tag
* ATA_DMA_PAD_SZ
);
3555 if (qc
->flags
& ATA_QCFLAG_SG
) {
3557 dma_unmap_sg(ap
->dev
, sg
, qc
->n_elem
, dir
);
3558 /* restore last sg */
3559 sg
[qc
->orig_n_elem
- 1].length
+= qc
->pad_len
;
3561 struct scatterlist
*psg
= &qc
->pad_sgent
;
3562 void *addr
= kmap_atomic(psg
->page
, KM_IRQ0
);
3563 memcpy(addr
+ psg
->offset
, pad_buf
, qc
->pad_len
);
3564 kunmap_atomic(addr
, KM_IRQ0
);
3568 dma_unmap_single(ap
->dev
,
3569 sg_dma_address(&sg
[0]), sg_dma_len(&sg
[0]),
3572 sg
->length
+= qc
->pad_len
;
3574 memcpy(qc
->buf_virt
+ sg
->length
- qc
->pad_len
,
3575 pad_buf
, qc
->pad_len
);
3578 qc
->flags
&= ~ATA_QCFLAG_DMAMAP
;
3583 * ata_fill_sg - Fill PCI IDE PRD table
3584 * @qc: Metadata associated with taskfile to be transferred
3586 * Fill PCI IDE PRD (scatter-gather) table with segments
3587 * associated with the current disk command.
3590 * spin_lock_irqsave(host lock)
3593 static void ata_fill_sg(struct ata_queued_cmd
*qc
)
3595 struct ata_port
*ap
= qc
->ap
;
3596 struct scatterlist
*sg
;
3599 WARN_ON(qc
->__sg
== NULL
);
3600 WARN_ON(qc
->n_elem
== 0 && qc
->pad_len
== 0);
3603 ata_for_each_sg(sg
, qc
) {
3607 /* determine if physical DMA addr spans 64K boundary.
3608 * Note h/w doesn't support 64-bit, so we unconditionally
3609 * truncate dma_addr_t to u32.
3611 addr
= (u32
) sg_dma_address(sg
);
3612 sg_len
= sg_dma_len(sg
);
3615 offset
= addr
& 0xffff;
3617 if ((offset
+ sg_len
) > 0x10000)
3618 len
= 0x10000 - offset
;
3620 ap
->prd
[idx
].addr
= cpu_to_le32(addr
);
3621 ap
->prd
[idx
].flags_len
= cpu_to_le32(len
& 0xffff);
3622 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx
, addr
, len
);
3631 ap
->prd
[idx
- 1].flags_len
|= cpu_to_le32(ATA_PRD_EOT
);
3634 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
3635 * @qc: Metadata associated with taskfile to check
3637 * Allow low-level driver to filter ATA PACKET commands, returning
3638 * a status indicating whether or not it is OK to use DMA for the
3639 * supplied PACKET command.
3642 * spin_lock_irqsave(host lock)
3644 * RETURNS: 0 when ATAPI DMA can be used
3647 int ata_check_atapi_dma(struct ata_queued_cmd
*qc
)
3649 struct ata_port
*ap
= qc
->ap
;
3650 int rc
= 0; /* Assume ATAPI DMA is OK by default */
3652 if (ap
->ops
->check_atapi_dma
)
3653 rc
= ap
->ops
->check_atapi_dma(qc
);
3658 * ata_qc_prep - Prepare taskfile for submission
3659 * @qc: Metadata associated with taskfile to be prepared
3661 * Prepare ATA taskfile for submission.
3664 * spin_lock_irqsave(host lock)
3666 void ata_qc_prep(struct ata_queued_cmd
*qc
)
3668 if (!(qc
->flags
& ATA_QCFLAG_DMAMAP
))
3674 void ata_noop_qc_prep(struct ata_queued_cmd
*qc
) { }
3677 * ata_sg_init_one - Associate command with memory buffer
3678 * @qc: Command to be associated
3679 * @buf: Memory buffer
3680 * @buflen: Length of memory buffer, in bytes.
3682 * Initialize the data-related elements of queued_cmd @qc
3683 * to point to a single memory buffer, @buf of byte length @buflen.
3686 * spin_lock_irqsave(host lock)
3689 void ata_sg_init_one(struct ata_queued_cmd
*qc
, void *buf
, unsigned int buflen
)
3691 qc
->flags
|= ATA_QCFLAG_SINGLE
;
3693 qc
->__sg
= &qc
->sgent
;
3695 qc
->orig_n_elem
= 1;
3697 qc
->nbytes
= buflen
;
3699 sg_init_one(&qc
->sgent
, buf
, buflen
);
3703 * ata_sg_init - Associate command with scatter-gather table.
3704 * @qc: Command to be associated
3705 * @sg: Scatter-gather table.
3706 * @n_elem: Number of elements in s/g table.
3708 * Initialize the data-related elements of queued_cmd @qc
3709 * to point to a scatter-gather table @sg, containing @n_elem
3713 * spin_lock_irqsave(host lock)
3716 void ata_sg_init(struct ata_queued_cmd
*qc
, struct scatterlist
*sg
,
3717 unsigned int n_elem
)
3719 qc
->flags
|= ATA_QCFLAG_SG
;
3721 qc
->n_elem
= n_elem
;
3722 qc
->orig_n_elem
= n_elem
;
3726 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
3727 * @qc: Command with memory buffer to be mapped.
3729 * DMA-map the memory buffer associated with queued_cmd @qc.
3732 * spin_lock_irqsave(host lock)
3735 * Zero on success, negative on error.
3738 static int ata_sg_setup_one(struct ata_queued_cmd
*qc
)
3740 struct ata_port
*ap
= qc
->ap
;
3741 int dir
= qc
->dma_dir
;
3742 struct scatterlist
*sg
= qc
->__sg
;
3743 dma_addr_t dma_address
;
3746 /* we must lengthen transfers to end on a 32-bit boundary */
3747 qc
->pad_len
= sg
->length
& 3;
3749 void *pad_buf
= ap
->pad
+ (qc
->tag
* ATA_DMA_PAD_SZ
);
3750 struct scatterlist
*psg
= &qc
->pad_sgent
;
3752 WARN_ON(qc
->dev
->class != ATA_DEV_ATAPI
);
3754 memset(pad_buf
, 0, ATA_DMA_PAD_SZ
);
3756 if (qc
->tf
.flags
& ATA_TFLAG_WRITE
)
3757 memcpy(pad_buf
, qc
->buf_virt
+ sg
->length
- qc
->pad_len
,
3760 sg_dma_address(psg
) = ap
->pad_dma
+ (qc
->tag
* ATA_DMA_PAD_SZ
);
3761 sg_dma_len(psg
) = ATA_DMA_PAD_SZ
;
3763 sg
->length
-= qc
->pad_len
;
3764 if (sg
->length
== 0)
3767 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
3768 sg
->length
, qc
->pad_len
);
3776 dma_address
= dma_map_single(ap
->dev
, qc
->buf_virt
,
3778 if (dma_mapping_error(dma_address
)) {
3780 sg
->length
+= qc
->pad_len
;
3784 sg_dma_address(sg
) = dma_address
;
3785 sg_dma_len(sg
) = sg
->length
;
3788 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg
),
3789 qc
->tf
.flags
& ATA_TFLAG_WRITE
? "write" : "read");
3795 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
3796 * @qc: Command with scatter-gather table to be mapped.
3798 * DMA-map the scatter-gather table associated with queued_cmd @qc.
3801 * spin_lock_irqsave(host lock)
3804 * Zero on success, negative on error.
3808 static int ata_sg_setup(struct ata_queued_cmd
*qc
)
3810 struct ata_port
*ap
= qc
->ap
;
3811 struct scatterlist
*sg
= qc
->__sg
;
3812 struct scatterlist
*lsg
= &sg
[qc
->n_elem
- 1];
3813 int n_elem
, pre_n_elem
, dir
, trim_sg
= 0;
3815 VPRINTK("ENTER, ata%u\n", ap
->id
);
3816 WARN_ON(!(qc
->flags
& ATA_QCFLAG_SG
));
3818 /* we must lengthen transfers to end on a 32-bit boundary */
3819 qc
->pad_len
= lsg
->length
& 3;
3821 void *pad_buf
= ap
->pad
+ (qc
->tag
* ATA_DMA_PAD_SZ
);
3822 struct scatterlist
*psg
= &qc
->pad_sgent
;
3823 unsigned int offset
;
3825 WARN_ON(qc
->dev
->class != ATA_DEV_ATAPI
);
3827 memset(pad_buf
, 0, ATA_DMA_PAD_SZ
);
3830 * psg->page/offset are used to copy to-be-written
3831 * data in this function or read data in ata_sg_clean.
3833 offset
= lsg
->offset
+ lsg
->length
- qc
->pad_len
;
3834 psg
->page
= nth_page(lsg
->page
, offset
>> PAGE_SHIFT
);
3835 psg
->offset
= offset_in_page(offset
);
3837 if (qc
->tf
.flags
& ATA_TFLAG_WRITE
) {
3838 void *addr
= kmap_atomic(psg
->page
, KM_IRQ0
);
3839 memcpy(pad_buf
, addr
+ psg
->offset
, qc
->pad_len
);
3840 kunmap_atomic(addr
, KM_IRQ0
);
3843 sg_dma_address(psg
) = ap
->pad_dma
+ (qc
->tag
* ATA_DMA_PAD_SZ
);
3844 sg_dma_len(psg
) = ATA_DMA_PAD_SZ
;
3846 lsg
->length
-= qc
->pad_len
;
3847 if (lsg
->length
== 0)
3850 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
3851 qc
->n_elem
- 1, lsg
->length
, qc
->pad_len
);
3854 pre_n_elem
= qc
->n_elem
;
3855 if (trim_sg
&& pre_n_elem
)
3864 n_elem
= dma_map_sg(ap
->dev
, sg
, pre_n_elem
, dir
);
3866 /* restore last sg */
3867 lsg
->length
+= qc
->pad_len
;
3871 DPRINTK("%d sg elements mapped\n", n_elem
);
3874 qc
->n_elem
= n_elem
;
3880 * swap_buf_le16 - swap halves of 16-bit words in place
3881 * @buf: Buffer to swap
3882 * @buf_words: Number of 16-bit words in buffer.
3884 * Swap halves of 16-bit words if needed to convert from
3885 * little-endian byte order to native cpu byte order, or
3889 * Inherited from caller.
3891 void swap_buf_le16(u16
*buf
, unsigned int buf_words
)
3896 for (i
= 0; i
< buf_words
; i
++)
3897 buf
[i
] = le16_to_cpu(buf
[i
]);
3898 #endif /* __BIG_ENDIAN */
3902 * ata_mmio_data_xfer - Transfer data by MMIO
3903 * @adev: device for this I/O
3905 * @buflen: buffer length
3906 * @write_data: read/write
3908 * Transfer data from/to the device data register by MMIO.
3911 * Inherited from caller.
3914 void ata_mmio_data_xfer(struct ata_device
*adev
, unsigned char *buf
,
3915 unsigned int buflen
, int write_data
)
3917 struct ata_port
*ap
= adev
->ap
;
3919 unsigned int words
= buflen
>> 1;
3920 u16
*buf16
= (u16
*) buf
;
3921 void __iomem
*mmio
= (void __iomem
*)ap
->ioaddr
.data_addr
;
3923 /* Transfer multiple of 2 bytes */
3925 for (i
= 0; i
< words
; i
++)
3926 writew(le16_to_cpu(buf16
[i
]), mmio
);
3928 for (i
= 0; i
< words
; i
++)
3929 buf16
[i
] = cpu_to_le16(readw(mmio
));
3932 /* Transfer trailing 1 byte, if any. */
3933 if (unlikely(buflen
& 0x01)) {
3934 u16 align_buf
[1] = { 0 };
3935 unsigned char *trailing_buf
= buf
+ buflen
- 1;
3938 memcpy(align_buf
, trailing_buf
, 1);
3939 writew(le16_to_cpu(align_buf
[0]), mmio
);
3941 align_buf
[0] = cpu_to_le16(readw(mmio
));
3942 memcpy(trailing_buf
, align_buf
, 1);
3948 * ata_pio_data_xfer - Transfer data by PIO
3949 * @adev: device to target
3951 * @buflen: buffer length
3952 * @write_data: read/write
3954 * Transfer data from/to the device data register by PIO.
3957 * Inherited from caller.
3960 void ata_pio_data_xfer(struct ata_device
*adev
, unsigned char *buf
,
3961 unsigned int buflen
, int write_data
)
3963 struct ata_port
*ap
= adev
->ap
;
3964 unsigned int words
= buflen
>> 1;
3966 /* Transfer multiple of 2 bytes */
3968 outsw(ap
->ioaddr
.data_addr
, buf
, words
);
3970 insw(ap
->ioaddr
.data_addr
, buf
, words
);
3972 /* Transfer trailing 1 byte, if any. */
3973 if (unlikely(buflen
& 0x01)) {
3974 u16 align_buf
[1] = { 0 };
3975 unsigned char *trailing_buf
= buf
+ buflen
- 1;
3978 memcpy(align_buf
, trailing_buf
, 1);
3979 outw(le16_to_cpu(align_buf
[0]), ap
->ioaddr
.data_addr
);
3981 align_buf
[0] = cpu_to_le16(inw(ap
->ioaddr
.data_addr
));
3982 memcpy(trailing_buf
, align_buf
, 1);
3988 * ata_pio_data_xfer_noirq - Transfer data by PIO
3989 * @adev: device to target
3991 * @buflen: buffer length
3992 * @write_data: read/write
3994 * Transfer data from/to the device data register by PIO. Do the
3995 * transfer with interrupts disabled.
3998 * Inherited from caller.
4001 void ata_pio_data_xfer_noirq(struct ata_device
*adev
, unsigned char *buf
,
4002 unsigned int buflen
, int write_data
)
4004 unsigned long flags
;
4005 local_irq_save(flags
);
4006 ata_pio_data_xfer(adev
, buf
, buflen
, write_data
);
4007 local_irq_restore(flags
);
4012 * ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data.
4013 * @qc: Command on going
4015 * Transfer ATA_SECT_SIZE of data from/to the ATA device.
4018 * Inherited from caller.
4021 static void ata_pio_sector(struct ata_queued_cmd
*qc
)
4023 int do_write
= (qc
->tf
.flags
& ATA_TFLAG_WRITE
);
4024 struct scatterlist
*sg
= qc
->__sg
;
4025 struct ata_port
*ap
= qc
->ap
;
4027 unsigned int offset
;
4030 if (qc
->cursect
== (qc
->nsect
- 1))
4031 ap
->hsm_task_state
= HSM_ST_LAST
;
4033 page
= sg
[qc
->cursg
].page
;
4034 offset
= sg
[qc
->cursg
].offset
+ qc
->cursg_ofs
* ATA_SECT_SIZE
;
4036 /* get the current page and offset */
4037 page
= nth_page(page
, (offset
>> PAGE_SHIFT
));
4038 offset
%= PAGE_SIZE
;
4040 DPRINTK("data %s\n", qc
->tf
.flags
& ATA_TFLAG_WRITE
? "write" : "read");
4042 if (PageHighMem(page
)) {
4043 unsigned long flags
;
4045 /* FIXME: use a bounce buffer */
4046 local_irq_save(flags
);
4047 buf
= kmap_atomic(page
, KM_IRQ0
);
4049 /* do the actual data transfer */
4050 ap
->ops
->data_xfer(qc
->dev
, buf
+ offset
, ATA_SECT_SIZE
, do_write
);
4052 kunmap_atomic(buf
, KM_IRQ0
);
4053 local_irq_restore(flags
);
4055 buf
= page_address(page
);
4056 ap
->ops
->data_xfer(qc
->dev
, buf
+ offset
, ATA_SECT_SIZE
, do_write
);
4062 if ((qc
->cursg_ofs
* ATA_SECT_SIZE
) == (&sg
[qc
->cursg
])->length
) {
4069 * ata_pio_sectors - Transfer one or many 512-byte sectors.
4070 * @qc: Command on going
4072 * Transfer one or many ATA_SECT_SIZE of data from/to the
4073 * ATA device for the DRQ request.
4076 * Inherited from caller.
4079 static void ata_pio_sectors(struct ata_queued_cmd
*qc
)
4081 if (is_multi_taskfile(&qc
->tf
)) {
4082 /* READ/WRITE MULTIPLE */
4085 WARN_ON(qc
->dev
->multi_count
== 0);
4087 nsect
= min(qc
->nsect
- qc
->cursect
, qc
->dev
->multi_count
);
4095 * atapi_send_cdb - Write CDB bytes to hardware
4096 * @ap: Port to which ATAPI device is attached.
4097 * @qc: Taskfile currently active
4099 * When device has indicated its readiness to accept
4100 * a CDB, this function is called. Send the CDB.
4106 static void atapi_send_cdb(struct ata_port
*ap
, struct ata_queued_cmd
*qc
)
4109 DPRINTK("send cdb\n");
4110 WARN_ON(qc
->dev
->cdb_len
< 12);
4112 ap
->ops
->data_xfer(qc
->dev
, qc
->cdb
, qc
->dev
->cdb_len
, 1);
4113 ata_altstatus(ap
); /* flush */
4115 switch (qc
->tf
.protocol
) {
4116 case ATA_PROT_ATAPI
:
4117 ap
->hsm_task_state
= HSM_ST
;
4119 case ATA_PROT_ATAPI_NODATA
:
4120 ap
->hsm_task_state
= HSM_ST_LAST
;
4122 case ATA_PROT_ATAPI_DMA
:
4123 ap
->hsm_task_state
= HSM_ST_LAST
;
4124 /* initiate bmdma */
4125 ap
->ops
->bmdma_start(qc
);
4131 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
4132 * @qc: Command on going
4133 * @bytes: number of bytes
4135 * Transfer Transfer data from/to the ATAPI device.
4138 * Inherited from caller.
4142 static void __atapi_pio_bytes(struct ata_queued_cmd
*qc
, unsigned int bytes
)
4144 int do_write
= (qc
->tf
.flags
& ATA_TFLAG_WRITE
);
4145 struct scatterlist
*sg
= qc
->__sg
;
4146 struct ata_port
*ap
= qc
->ap
;
4149 unsigned int offset
, count
;
4151 if (qc
->curbytes
+ bytes
>= qc
->nbytes
)
4152 ap
->hsm_task_state
= HSM_ST_LAST
;
4155 if (unlikely(qc
->cursg
>= qc
->n_elem
)) {
4157 * The end of qc->sg is reached and the device expects
4158 * more data to transfer. In order not to overrun qc->sg
4159 * and fulfill length specified in the byte count register,
4160 * - for read case, discard trailing data from the device
4161 * - for write case, padding zero data to the device
4163 u16 pad_buf
[1] = { 0 };
4164 unsigned int words
= bytes
>> 1;
4167 if (words
) /* warning if bytes > 1 */
4168 ata_dev_printk(qc
->dev
, KERN_WARNING
,
4169 "%u bytes trailing data\n", bytes
);
4171 for (i
= 0; i
< words
; i
++)
4172 ap
->ops
->data_xfer(qc
->dev
, (unsigned char*)pad_buf
, 2, do_write
);
4174 ap
->hsm_task_state
= HSM_ST_LAST
;
4178 sg
= &qc
->__sg
[qc
->cursg
];
4181 offset
= sg
->offset
+ qc
->cursg_ofs
;
4183 /* get the current page and offset */
4184 page
= nth_page(page
, (offset
>> PAGE_SHIFT
));
4185 offset
%= PAGE_SIZE
;
4187 /* don't overrun current sg */
4188 count
= min(sg
->length
- qc
->cursg_ofs
, bytes
);
4190 /* don't cross page boundaries */
4191 count
= min(count
, (unsigned int)PAGE_SIZE
- offset
);
4193 DPRINTK("data %s\n", qc
->tf
.flags
& ATA_TFLAG_WRITE
? "write" : "read");
4195 if (PageHighMem(page
)) {
4196 unsigned long flags
;
4198 /* FIXME: use bounce buffer */
4199 local_irq_save(flags
);
4200 buf
= kmap_atomic(page
, KM_IRQ0
);
4202 /* do the actual data transfer */
4203 ap
->ops
->data_xfer(qc
->dev
, buf
+ offset
, count
, do_write
);
4205 kunmap_atomic(buf
, KM_IRQ0
);
4206 local_irq_restore(flags
);
4208 buf
= page_address(page
);
4209 ap
->ops
->data_xfer(qc
->dev
, buf
+ offset
, count
, do_write
);
4213 qc
->curbytes
+= count
;
4214 qc
->cursg_ofs
+= count
;
4216 if (qc
->cursg_ofs
== sg
->length
) {
4226 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
4227 * @qc: Command on going
4229 * Transfer Transfer data from/to the ATAPI device.
4232 * Inherited from caller.
4235 static void atapi_pio_bytes(struct ata_queued_cmd
*qc
)
4237 struct ata_port
*ap
= qc
->ap
;
4238 struct ata_device
*dev
= qc
->dev
;
4239 unsigned int ireason
, bc_lo
, bc_hi
, bytes
;
4240 int i_write
, do_write
= (qc
->tf
.flags
& ATA_TFLAG_WRITE
) ? 1 : 0;
4242 /* Abuse qc->result_tf for temp storage of intermediate TF
4243 * here to save some kernel stack usage.
4244 * For normal completion, qc->result_tf is not relevant. For
4245 * error, qc->result_tf is later overwritten by ata_qc_complete().
4246 * So, the correctness of qc->result_tf is not affected.
4248 ap
->ops
->tf_read(ap
, &qc
->result_tf
);
4249 ireason
= qc
->result_tf
.nsect
;
4250 bc_lo
= qc
->result_tf
.lbam
;
4251 bc_hi
= qc
->result_tf
.lbah
;
4252 bytes
= (bc_hi
<< 8) | bc_lo
;
4254 /* shall be cleared to zero, indicating xfer of data */
4255 if (ireason
& (1 << 0))
4258 /* make sure transfer direction matches expected */
4259 i_write
= ((ireason
& (1 << 1)) == 0) ? 1 : 0;
4260 if (do_write
!= i_write
)
4263 VPRINTK("ata%u: xfering %d bytes\n", ap
->id
, bytes
);
4265 __atapi_pio_bytes(qc
, bytes
);
4270 ata_dev_printk(dev
, KERN_INFO
, "ATAPI check failed\n");
4271 qc
->err_mask
|= AC_ERR_HSM
;
4272 ap
->hsm_task_state
= HSM_ST_ERR
;
4276 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
4277 * @ap: the target ata_port
4281 * 1 if ok in workqueue, 0 otherwise.
4284 static inline int ata_hsm_ok_in_wq(struct ata_port
*ap
, struct ata_queued_cmd
*qc
)
4286 if (qc
->tf
.flags
& ATA_TFLAG_POLLING
)
4289 if (ap
->hsm_task_state
== HSM_ST_FIRST
) {
4290 if (qc
->tf
.protocol
== ATA_PROT_PIO
&&
4291 (qc
->tf
.flags
& ATA_TFLAG_WRITE
))
4294 if (is_atapi_taskfile(&qc
->tf
) &&
4295 !(qc
->dev
->flags
& ATA_DFLAG_CDB_INTR
))
4303 * ata_hsm_qc_complete - finish a qc running on standard HSM
4304 * @qc: Command to complete
4305 * @in_wq: 1 if called from workqueue, 0 otherwise
4307 * Finish @qc which is running on standard HSM.
4310 * If @in_wq is zero, spin_lock_irqsave(host lock).
4311 * Otherwise, none on entry and grabs host lock.
4313 static void ata_hsm_qc_complete(struct ata_queued_cmd
*qc
, int in_wq
)
4315 struct ata_port
*ap
= qc
->ap
;
4316 unsigned long flags
;
4318 if (ap
->ops
->error_handler
) {
4320 spin_lock_irqsave(ap
->lock
, flags
);
4322 /* EH might have kicked in while host lock is
4325 qc
= ata_qc_from_tag(ap
, qc
->tag
);
4327 if (likely(!(qc
->err_mask
& AC_ERR_HSM
))) {
4329 ata_qc_complete(qc
);
4331 ata_port_freeze(ap
);
4334 spin_unlock_irqrestore(ap
->lock
, flags
);
4336 if (likely(!(qc
->err_mask
& AC_ERR_HSM
)))
4337 ata_qc_complete(qc
);
4339 ata_port_freeze(ap
);
4343 spin_lock_irqsave(ap
->lock
, flags
);
4345 ata_qc_complete(qc
);
4346 spin_unlock_irqrestore(ap
->lock
, flags
);
4348 ata_qc_complete(qc
);
4351 ata_altstatus(ap
); /* flush */
4355 * ata_hsm_move - move the HSM to the next state.
4356 * @ap: the target ata_port
4358 * @status: current device status
4359 * @in_wq: 1 if called from workqueue, 0 otherwise
4362 * 1 when poll next status needed, 0 otherwise.
4364 int ata_hsm_move(struct ata_port
*ap
, struct ata_queued_cmd
*qc
,
4365 u8 status
, int in_wq
)
4367 unsigned long flags
= 0;
4370 WARN_ON((qc
->flags
& ATA_QCFLAG_ACTIVE
) == 0);
4372 /* Make sure ata_qc_issue_prot() does not throw things
4373 * like DMA polling into the workqueue. Notice that
4374 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
4376 WARN_ON(in_wq
!= ata_hsm_ok_in_wq(ap
, qc
));
4379 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
4380 ap
->id
, qc
->tf
.protocol
, ap
->hsm_task_state
, status
);
4382 switch (ap
->hsm_task_state
) {
4384 /* Send first data block or PACKET CDB */
4386 /* If polling, we will stay in the work queue after
4387 * sending the data. Otherwise, interrupt handler
4388 * takes over after sending the data.
4390 poll_next
= (qc
->tf
.flags
& ATA_TFLAG_POLLING
);
4392 /* check device status */
4393 if (unlikely((status
& ATA_DRQ
) == 0)) {
4394 /* handle BSY=0, DRQ=0 as error */
4395 if (likely(status
& (ATA_ERR
| ATA_DF
)))
4396 /* device stops HSM for abort/error */
4397 qc
->err_mask
|= AC_ERR_DEV
;
4399 /* HSM violation. Let EH handle this */
4400 qc
->err_mask
|= AC_ERR_HSM
;
4402 ap
->hsm_task_state
= HSM_ST_ERR
;
4406 /* Device should not ask for data transfer (DRQ=1)
4407 * when it finds something wrong.
4408 * We ignore DRQ here and stop the HSM by
4409 * changing hsm_task_state to HSM_ST_ERR and
4410 * let the EH abort the command or reset the device.
4412 if (unlikely(status
& (ATA_ERR
| ATA_DF
))) {
4413 printk(KERN_WARNING
"ata%d: DRQ=1 with device error, dev_stat 0x%X\n",
4415 qc
->err_mask
|= AC_ERR_HSM
;
4416 ap
->hsm_task_state
= HSM_ST_ERR
;
4420 /* Send the CDB (atapi) or the first data block (ata pio out).
4421 * During the state transition, interrupt handler shouldn't
4422 * be invoked before the data transfer is complete and
4423 * hsm_task_state is changed. Hence, the following locking.
4426 spin_lock_irqsave(ap
->lock
, flags
);
4428 if (qc
->tf
.protocol
== ATA_PROT_PIO
) {
4429 /* PIO data out protocol.
4430 * send first data block.
4433 /* ata_pio_sectors() might change the state
4434 * to HSM_ST_LAST. so, the state is changed here
4435 * before ata_pio_sectors().
4437 ap
->hsm_task_state
= HSM_ST
;
4438 ata_pio_sectors(qc
);
4439 ata_altstatus(ap
); /* flush */
4442 atapi_send_cdb(ap
, qc
);
4445 spin_unlock_irqrestore(ap
->lock
, flags
);
4447 /* if polling, ata_pio_task() handles the rest.
4448 * otherwise, interrupt handler takes over from here.
4453 /* complete command or read/write the data register */
4454 if (qc
->tf
.protocol
== ATA_PROT_ATAPI
) {
4455 /* ATAPI PIO protocol */
4456 if ((status
& ATA_DRQ
) == 0) {
4457 /* No more data to transfer or device error.
4458 * Device error will be tagged in HSM_ST_LAST.
4460 ap
->hsm_task_state
= HSM_ST_LAST
;
4464 /* Device should not ask for data transfer (DRQ=1)
4465 * when it finds something wrong.
4466 * We ignore DRQ here and stop the HSM by
4467 * changing hsm_task_state to HSM_ST_ERR and
4468 * let the EH abort the command or reset the device.
4470 if (unlikely(status
& (ATA_ERR
| ATA_DF
))) {
4471 printk(KERN_WARNING
"ata%d: DRQ=1 with device error, dev_stat 0x%X\n",
4473 qc
->err_mask
|= AC_ERR_HSM
;
4474 ap
->hsm_task_state
= HSM_ST_ERR
;
4478 atapi_pio_bytes(qc
);
4480 if (unlikely(ap
->hsm_task_state
== HSM_ST_ERR
))
4481 /* bad ireason reported by device */
4485 /* ATA PIO protocol */
4486 if (unlikely((status
& ATA_DRQ
) == 0)) {
4487 /* handle BSY=0, DRQ=0 as error */
4488 if (likely(status
& (ATA_ERR
| ATA_DF
)))
4489 /* device stops HSM for abort/error */
4490 qc
->err_mask
|= AC_ERR_DEV
;
4492 /* HSM violation. Let EH handle this.
4493 * Phantom devices also trigger this
4494 * condition. Mark hint.
4496 qc
->err_mask
|= AC_ERR_HSM
|
4499 ap
->hsm_task_state
= HSM_ST_ERR
;
4503 /* For PIO reads, some devices may ask for
4504 * data transfer (DRQ=1) alone with ERR=1.
4505 * We respect DRQ here and transfer one
4506 * block of junk data before changing the
4507 * hsm_task_state to HSM_ST_ERR.
4509 * For PIO writes, ERR=1 DRQ=1 doesn't make
4510 * sense since the data block has been
4511 * transferred to the device.
4513 if (unlikely(status
& (ATA_ERR
| ATA_DF
))) {
4514 /* data might be corrputed */
4515 qc
->err_mask
|= AC_ERR_DEV
;
4517 if (!(qc
->tf
.flags
& ATA_TFLAG_WRITE
)) {
4518 ata_pio_sectors(qc
);
4520 status
= ata_wait_idle(ap
);
4523 if (status
& (ATA_BUSY
| ATA_DRQ
))
4524 qc
->err_mask
|= AC_ERR_HSM
;
4526 /* ata_pio_sectors() might change the
4527 * state to HSM_ST_LAST. so, the state
4528 * is changed after ata_pio_sectors().
4530 ap
->hsm_task_state
= HSM_ST_ERR
;
4534 ata_pio_sectors(qc
);
4536 if (ap
->hsm_task_state
== HSM_ST_LAST
&&
4537 (!(qc
->tf
.flags
& ATA_TFLAG_WRITE
))) {
4540 status
= ata_wait_idle(ap
);
4545 ata_altstatus(ap
); /* flush */
4550 if (unlikely(!ata_ok(status
))) {
4551 qc
->err_mask
|= __ac_err_mask(status
);
4552 ap
->hsm_task_state
= HSM_ST_ERR
;
4556 /* no more data to transfer */
4557 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
4558 ap
->id
, qc
->dev
->devno
, status
);
4560 WARN_ON(qc
->err_mask
);
4562 ap
->hsm_task_state
= HSM_ST_IDLE
;
4564 /* complete taskfile transaction */
4565 ata_hsm_qc_complete(qc
, in_wq
);
4571 /* make sure qc->err_mask is available to
4572 * know what's wrong and recover
4574 WARN_ON(qc
->err_mask
== 0);
4576 ap
->hsm_task_state
= HSM_ST_IDLE
;
4578 /* complete taskfile transaction */
4579 ata_hsm_qc_complete(qc
, in_wq
);
4591 static void ata_pio_task(void *_data
)
4593 struct ata_queued_cmd
*qc
= _data
;
4594 struct ata_port
*ap
= qc
->ap
;
4599 WARN_ON(ap
->hsm_task_state
== HSM_ST_IDLE
);
4602 * This is purely heuristic. This is a fast path.
4603 * Sometimes when we enter, BSY will be cleared in
4604 * a chk-status or two. If not, the drive is probably seeking
4605 * or something. Snooze for a couple msecs, then
4606 * chk-status again. If still busy, queue delayed work.
4608 status
= ata_busy_wait(ap
, ATA_BUSY
, 5);
4609 if (status
& ATA_BUSY
) {
4611 status
= ata_busy_wait(ap
, ATA_BUSY
, 10);
4612 if (status
& ATA_BUSY
) {
4613 ata_port_queue_task(ap
, ata_pio_task
, qc
, ATA_SHORT_PAUSE
);
4619 poll_next
= ata_hsm_move(ap
, qc
, status
, 1);
4621 /* another command or interrupt handler
4622 * may be running at this point.
4629 * ata_qc_new - Request an available ATA command, for queueing
4630 * @ap: Port associated with device @dev
4631 * @dev: Device from whom we request an available command structure
4637 static struct ata_queued_cmd
*ata_qc_new(struct ata_port
*ap
)
4639 struct ata_queued_cmd
*qc
= NULL
;
4642 /* no command while frozen */
4643 if (unlikely(ap
->pflags
& ATA_PFLAG_FROZEN
))
4646 /* the last tag is reserved for internal command. */
4647 for (i
= 0; i
< ATA_MAX_QUEUE
- 1; i
++)
4648 if (!test_and_set_bit(i
, &ap
->qc_allocated
)) {
4649 qc
= __ata_qc_from_tag(ap
, i
);
4660 * ata_qc_new_init - Request an available ATA command, and initialize it
4661 * @dev: Device from whom we request an available command structure
4667 struct ata_queued_cmd
*ata_qc_new_init(struct ata_device
*dev
)
4669 struct ata_port
*ap
= dev
->ap
;
4670 struct ata_queued_cmd
*qc
;
4672 qc
= ata_qc_new(ap
);
4685 * ata_qc_free - free unused ata_queued_cmd
4686 * @qc: Command to complete
4688 * Designed to free unused ata_queued_cmd object
4689 * in case something prevents using it.
4692 * spin_lock_irqsave(host lock)
4694 void ata_qc_free(struct ata_queued_cmd
*qc
)
4696 struct ata_port
*ap
= qc
->ap
;
4699 WARN_ON(qc
== NULL
); /* ata_qc_from_tag _might_ return NULL */
4703 if (likely(ata_tag_valid(tag
))) {
4704 qc
->tag
= ATA_TAG_POISON
;
4705 clear_bit(tag
, &ap
->qc_allocated
);
4709 void __ata_qc_complete(struct ata_queued_cmd
*qc
)
4711 struct ata_port
*ap
= qc
->ap
;
4713 WARN_ON(qc
== NULL
); /* ata_qc_from_tag _might_ return NULL */
4714 WARN_ON(!(qc
->flags
& ATA_QCFLAG_ACTIVE
));
4716 if (likely(qc
->flags
& ATA_QCFLAG_DMAMAP
))
4719 /* command should be marked inactive atomically with qc completion */
4720 if (qc
->tf
.protocol
== ATA_PROT_NCQ
)
4721 ap
->sactive
&= ~(1 << qc
->tag
);
4723 ap
->active_tag
= ATA_TAG_POISON
;
4725 /* atapi: mark qc as inactive to prevent the interrupt handler
4726 * from completing the command twice later, before the error handler
4727 * is called. (when rc != 0 and atapi request sense is needed)
4729 qc
->flags
&= ~ATA_QCFLAG_ACTIVE
;
4730 ap
->qc_active
&= ~(1 << qc
->tag
);
4732 /* call completion callback */
4733 qc
->complete_fn(qc
);
4736 static void fill_result_tf(struct ata_queued_cmd
*qc
)
4738 struct ata_port
*ap
= qc
->ap
;
4740 ap
->ops
->tf_read(ap
, &qc
->result_tf
);
4741 qc
->result_tf
.flags
= qc
->tf
.flags
;
4745 * ata_qc_complete - Complete an active ATA command
4746 * @qc: Command to complete
4747 * @err_mask: ATA Status register contents
4749 * Indicate to the mid and upper layers that an ATA
4750 * command has completed, with either an ok or not-ok status.
4753 * spin_lock_irqsave(host lock)
4755 void ata_qc_complete(struct ata_queued_cmd
*qc
)
4757 struct ata_port
*ap
= qc
->ap
;
4759 /* XXX: New EH and old EH use different mechanisms to
4760 * synchronize EH with regular execution path.
4762 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4763 * Normal execution path is responsible for not accessing a
4764 * failed qc. libata core enforces the rule by returning NULL
4765 * from ata_qc_from_tag() for failed qcs.
4767 * Old EH depends on ata_qc_complete() nullifying completion
4768 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
4769 * not synchronize with interrupt handler. Only PIO task is
4772 if (ap
->ops
->error_handler
) {
4773 WARN_ON(ap
->pflags
& ATA_PFLAG_FROZEN
);
4775 if (unlikely(qc
->err_mask
))
4776 qc
->flags
|= ATA_QCFLAG_FAILED
;
4778 if (unlikely(qc
->flags
& ATA_QCFLAG_FAILED
)) {
4779 if (!ata_tag_internal(qc
->tag
)) {
4780 /* always fill result TF for failed qc */
4782 ata_qc_schedule_eh(qc
);
4787 /* read result TF if requested */
4788 if (qc
->flags
& ATA_QCFLAG_RESULT_TF
)
4791 __ata_qc_complete(qc
);
4793 if (qc
->flags
& ATA_QCFLAG_EH_SCHEDULED
)
4796 /* read result TF if failed or requested */
4797 if (qc
->err_mask
|| qc
->flags
& ATA_QCFLAG_RESULT_TF
)
4800 __ata_qc_complete(qc
);
4805 * ata_qc_complete_multiple - Complete multiple qcs successfully
4806 * @ap: port in question
4807 * @qc_active: new qc_active mask
4808 * @finish_qc: LLDD callback invoked before completing a qc
4810 * Complete in-flight commands. This functions is meant to be
4811 * called from low-level driver's interrupt routine to complete
4812 * requests normally. ap->qc_active and @qc_active is compared
4813 * and commands are completed accordingly.
4816 * spin_lock_irqsave(host lock)
4819 * Number of completed commands on success, -errno otherwise.
4821 int ata_qc_complete_multiple(struct ata_port
*ap
, u32 qc_active
,
4822 void (*finish_qc
)(struct ata_queued_cmd
*))
4828 done_mask
= ap
->qc_active
^ qc_active
;
4830 if (unlikely(done_mask
& qc_active
)) {
4831 ata_port_printk(ap
, KERN_ERR
, "illegal qc_active transition "
4832 "(%08x->%08x)\n", ap
->qc_active
, qc_active
);
4836 for (i
= 0; i
< ATA_MAX_QUEUE
; i
++) {
4837 struct ata_queued_cmd
*qc
;
4839 if (!(done_mask
& (1 << i
)))
4842 if ((qc
= ata_qc_from_tag(ap
, i
))) {
4845 ata_qc_complete(qc
);
4853 static inline int ata_should_dma_map(struct ata_queued_cmd
*qc
)
4855 struct ata_port
*ap
= qc
->ap
;
4857 switch (qc
->tf
.protocol
) {
4860 case ATA_PROT_ATAPI_DMA
:
4863 case ATA_PROT_ATAPI
:
4865 if (ap
->flags
& ATA_FLAG_PIO_DMA
)
4878 * ata_qc_issue - issue taskfile to device
4879 * @qc: command to issue to device
4881 * Prepare an ATA command to submission to device.
4882 * This includes mapping the data into a DMA-able
4883 * area, filling in the S/G table, and finally
4884 * writing the taskfile to hardware, starting the command.
4887 * spin_lock_irqsave(host lock)
4889 void ata_qc_issue(struct ata_queued_cmd
*qc
)
4891 struct ata_port
*ap
= qc
->ap
;
4893 /* Make sure only one non-NCQ command is outstanding. The
4894 * check is skipped for old EH because it reuses active qc to
4895 * request ATAPI sense.
4897 WARN_ON(ap
->ops
->error_handler
&& ata_tag_valid(ap
->active_tag
));
4899 if (qc
->tf
.protocol
== ATA_PROT_NCQ
) {
4900 WARN_ON(ap
->sactive
& (1 << qc
->tag
));
4901 ap
->sactive
|= 1 << qc
->tag
;
4903 WARN_ON(ap
->sactive
);
4904 ap
->active_tag
= qc
->tag
;
4907 qc
->flags
|= ATA_QCFLAG_ACTIVE
;
4908 ap
->qc_active
|= 1 << qc
->tag
;
4910 if (ata_should_dma_map(qc
)) {
4911 if (qc
->flags
& ATA_QCFLAG_SG
) {
4912 if (ata_sg_setup(qc
))
4914 } else if (qc
->flags
& ATA_QCFLAG_SINGLE
) {
4915 if (ata_sg_setup_one(qc
))
4919 qc
->flags
&= ~ATA_QCFLAG_DMAMAP
;
4922 ap
->ops
->qc_prep(qc
);
4924 qc
->err_mask
|= ap
->ops
->qc_issue(qc
);
4925 if (unlikely(qc
->err_mask
))
4930 qc
->flags
&= ~ATA_QCFLAG_DMAMAP
;
4931 qc
->err_mask
|= AC_ERR_SYSTEM
;
4933 ata_qc_complete(qc
);
4937 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
4938 * @qc: command to issue to device
4940 * Using various libata functions and hooks, this function
4941 * starts an ATA command. ATA commands are grouped into
4942 * classes called "protocols", and issuing each type of protocol
4943 * is slightly different.
4945 * May be used as the qc_issue() entry in ata_port_operations.
4948 * spin_lock_irqsave(host lock)
4951 * Zero on success, AC_ERR_* mask on failure
4954 unsigned int ata_qc_issue_prot(struct ata_queued_cmd
*qc
)
4956 struct ata_port
*ap
= qc
->ap
;
4958 /* Use polling pio if the LLD doesn't handle
4959 * interrupt driven pio and atapi CDB interrupt.
4961 if (ap
->flags
& ATA_FLAG_PIO_POLLING
) {
4962 switch (qc
->tf
.protocol
) {
4964 case ATA_PROT_ATAPI
:
4965 case ATA_PROT_ATAPI_NODATA
:
4966 qc
->tf
.flags
|= ATA_TFLAG_POLLING
;
4968 case ATA_PROT_ATAPI_DMA
:
4969 if (qc
->dev
->flags
& ATA_DFLAG_CDB_INTR
)
4970 /* see ata_dma_blacklisted() */
4978 /* Some controllers show flaky interrupt behavior after
4979 * setting xfer mode. Use polling instead.
4981 if (unlikely(qc
->tf
.command
== ATA_CMD_SET_FEATURES
&&
4982 qc
->tf
.feature
== SETFEATURES_XFER
) &&
4983 (ap
->flags
& ATA_FLAG_SETXFER_POLLING
))
4984 qc
->tf
.flags
|= ATA_TFLAG_POLLING
;
4986 /* select the device */
4987 ata_dev_select(ap
, qc
->dev
->devno
, 1, 0);
4989 /* start the command */
4990 switch (qc
->tf
.protocol
) {
4991 case ATA_PROT_NODATA
:
4992 if (qc
->tf
.flags
& ATA_TFLAG_POLLING
)
4993 ata_qc_set_polling(qc
);
4995 ata_tf_to_host(ap
, &qc
->tf
);
4996 ap
->hsm_task_state
= HSM_ST_LAST
;
4998 if (qc
->tf
.flags
& ATA_TFLAG_POLLING
)
4999 ata_port_queue_task(ap
, ata_pio_task
, qc
, 0);
5004 WARN_ON(qc
->tf
.flags
& ATA_TFLAG_POLLING
);
5006 ap
->ops
->tf_load(ap
, &qc
->tf
); /* load tf registers */
5007 ap
->ops
->bmdma_setup(qc
); /* set up bmdma */
5008 ap
->ops
->bmdma_start(qc
); /* initiate bmdma */
5009 ap
->hsm_task_state
= HSM_ST_LAST
;
5013 if (qc
->tf
.flags
& ATA_TFLAG_POLLING
)
5014 ata_qc_set_polling(qc
);
5016 ata_tf_to_host(ap
, &qc
->tf
);
5018 if (qc
->tf
.flags
& ATA_TFLAG_WRITE
) {
5019 /* PIO data out protocol */
5020 ap
->hsm_task_state
= HSM_ST_FIRST
;
5021 ata_port_queue_task(ap
, ata_pio_task
, qc
, 0);
5023 /* always send first data block using
5024 * the ata_pio_task() codepath.
5027 /* PIO data in protocol */
5028 ap
->hsm_task_state
= HSM_ST
;
5030 if (qc
->tf
.flags
& ATA_TFLAG_POLLING
)
5031 ata_port_queue_task(ap
, ata_pio_task
, qc
, 0);
5033 /* if polling, ata_pio_task() handles the rest.
5034 * otherwise, interrupt handler takes over from here.
5040 case ATA_PROT_ATAPI
:
5041 case ATA_PROT_ATAPI_NODATA
:
5042 if (qc
->tf
.flags
& ATA_TFLAG_POLLING
)
5043 ata_qc_set_polling(qc
);
5045 ata_tf_to_host(ap
, &qc
->tf
);
5047 ap
->hsm_task_state
= HSM_ST_FIRST
;
5049 /* send cdb by polling if no cdb interrupt */
5050 if ((!(qc
->dev
->flags
& ATA_DFLAG_CDB_INTR
)) ||
5051 (qc
->tf
.flags
& ATA_TFLAG_POLLING
))
5052 ata_port_queue_task(ap
, ata_pio_task
, qc
, 0);
5055 case ATA_PROT_ATAPI_DMA
:
5056 WARN_ON(qc
->tf
.flags
& ATA_TFLAG_POLLING
);
5058 ap
->ops
->tf_load(ap
, &qc
->tf
); /* load tf registers */
5059 ap
->ops
->bmdma_setup(qc
); /* set up bmdma */
5060 ap
->hsm_task_state
= HSM_ST_FIRST
;
5062 /* send cdb by polling if no cdb interrupt */
5063 if (!(qc
->dev
->flags
& ATA_DFLAG_CDB_INTR
))
5064 ata_port_queue_task(ap
, ata_pio_task
, qc
, 0);
5069 return AC_ERR_SYSTEM
;
5076 * ata_host_intr - Handle host interrupt for given (port, task)
5077 * @ap: Port on which interrupt arrived (possibly...)
5078 * @qc: Taskfile currently active in engine
5080 * Handle host interrupt for given queued command. Currently,
5081 * only DMA interrupts are handled. All other commands are
5082 * handled via polling with interrupts disabled (nIEN bit).
5085 * spin_lock_irqsave(host lock)
5088 * One if interrupt was handled, zero if not (shared irq).
5091 inline unsigned int ata_host_intr (struct ata_port
*ap
,
5092 struct ata_queued_cmd
*qc
)
5094 struct ata_eh_info
*ehi
= &ap
->eh_info
;
5095 u8 status
, host_stat
= 0;
5097 VPRINTK("ata%u: protocol %d task_state %d\n",
5098 ap
->id
, qc
->tf
.protocol
, ap
->hsm_task_state
);
5100 /* Check whether we are expecting interrupt in this state */
5101 switch (ap
->hsm_task_state
) {
5103 /* Some pre-ATAPI-4 devices assert INTRQ
5104 * at this state when ready to receive CDB.
5107 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
5108 * The flag was turned on only for atapi devices.
5109 * No need to check is_atapi_taskfile(&qc->tf) again.
5111 if (!(qc
->dev
->flags
& ATA_DFLAG_CDB_INTR
))
5115 if (qc
->tf
.protocol
== ATA_PROT_DMA
||
5116 qc
->tf
.protocol
== ATA_PROT_ATAPI_DMA
) {
5117 /* check status of DMA engine */
5118 host_stat
= ap
->ops
->bmdma_status(ap
);
5119 VPRINTK("ata%u: host_stat 0x%X\n", ap
->id
, host_stat
);
5121 /* if it's not our irq... */
5122 if (!(host_stat
& ATA_DMA_INTR
))
5125 /* before we do anything else, clear DMA-Start bit */
5126 ap
->ops
->bmdma_stop(qc
);
5128 if (unlikely(host_stat
& ATA_DMA_ERR
)) {
5129 /* error when transfering data to/from memory */
5130 qc
->err_mask
|= AC_ERR_HOST_BUS
;
5131 ap
->hsm_task_state
= HSM_ST_ERR
;
5141 /* check altstatus */
5142 status
= ata_altstatus(ap
);
5143 if (status
& ATA_BUSY
)
5146 /* check main status, clearing INTRQ */
5147 status
= ata_chk_status(ap
);
5148 if (unlikely(status
& ATA_BUSY
))
5151 /* ack bmdma irq events */
5152 ap
->ops
->irq_clear(ap
);
5154 ata_hsm_move(ap
, qc
, status
, 0);
5156 if (unlikely(qc
->err_mask
) && (qc
->tf
.protocol
== ATA_PROT_DMA
||
5157 qc
->tf
.protocol
== ATA_PROT_ATAPI_DMA
))
5158 ata_ehi_push_desc(ehi
, "BMDMA stat 0x%x", host_stat
);
5160 return 1; /* irq handled */
5163 ap
->stats
.idle_irq
++;
5166 if ((ap
->stats
.idle_irq
% 1000) == 0) {
5167 ata_irq_ack(ap
, 0); /* debug trap */
5168 ata_port_printk(ap
, KERN_WARNING
, "irq trap\n");
5172 return 0; /* irq not handled */
5176 * ata_interrupt - Default ATA host interrupt handler
5177 * @irq: irq line (unused)
5178 * @dev_instance: pointer to our ata_host information structure
5180 * Default interrupt handler for PCI IDE devices. Calls
5181 * ata_host_intr() for each port that is not disabled.
5184 * Obtains host lock during operation.
5187 * IRQ_NONE or IRQ_HANDLED.
5190 irqreturn_t
ata_interrupt (int irq
, void *dev_instance
)
5192 struct ata_host
*host
= dev_instance
;
5194 unsigned int handled
= 0;
5195 unsigned long flags
;
5197 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
5198 spin_lock_irqsave(&host
->lock
, flags
);
5200 for (i
= 0; i
< host
->n_ports
; i
++) {
5201 struct ata_port
*ap
;
5203 ap
= host
->ports
[i
];
5205 !(ap
->flags
& ATA_FLAG_DISABLED
)) {
5206 struct ata_queued_cmd
*qc
;
5208 qc
= ata_qc_from_tag(ap
, ap
->active_tag
);
5209 if (qc
&& (!(qc
->tf
.flags
& ATA_TFLAG_POLLING
)) &&
5210 (qc
->flags
& ATA_QCFLAG_ACTIVE
))
5211 handled
|= ata_host_intr(ap
, qc
);
5215 spin_unlock_irqrestore(&host
->lock
, flags
);
5217 return IRQ_RETVAL(handled
);
5221 * sata_scr_valid - test whether SCRs are accessible
5222 * @ap: ATA port to test SCR accessibility for
5224 * Test whether SCRs are accessible for @ap.
5230 * 1 if SCRs are accessible, 0 otherwise.
5232 int sata_scr_valid(struct ata_port
*ap
)
5234 return ap
->cbl
== ATA_CBL_SATA
&& ap
->ops
->scr_read
;
5238 * sata_scr_read - read SCR register of the specified port
5239 * @ap: ATA port to read SCR for
5241 * @val: Place to store read value
5243 * Read SCR register @reg of @ap into *@val. This function is
5244 * guaranteed to succeed if the cable type of the port is SATA
5245 * and the port implements ->scr_read.
5251 * 0 on success, negative errno on failure.
5253 int sata_scr_read(struct ata_port
*ap
, int reg
, u32
*val
)
5255 if (sata_scr_valid(ap
)) {
5256 *val
= ap
->ops
->scr_read(ap
, reg
);
5263 * sata_scr_write - write SCR register of the specified port
5264 * @ap: ATA port to write SCR for
5265 * @reg: SCR to write
5266 * @val: value to write
5268 * Write @val to SCR register @reg of @ap. This function is
5269 * guaranteed to succeed if the cable type of the port is SATA
5270 * and the port implements ->scr_read.
5276 * 0 on success, negative errno on failure.
5278 int sata_scr_write(struct ata_port
*ap
, int reg
, u32 val
)
5280 if (sata_scr_valid(ap
)) {
5281 ap
->ops
->scr_write(ap
, reg
, val
);
5288 * sata_scr_write_flush - write SCR register of the specified port and flush
5289 * @ap: ATA port to write SCR for
5290 * @reg: SCR to write
5291 * @val: value to write
5293 * This function is identical to sata_scr_write() except that this
5294 * function performs flush after writing to the register.
5300 * 0 on success, negative errno on failure.
5302 int sata_scr_write_flush(struct ata_port
*ap
, int reg
, u32 val
)
5304 if (sata_scr_valid(ap
)) {
5305 ap
->ops
->scr_write(ap
, reg
, val
);
5306 ap
->ops
->scr_read(ap
, reg
);
5313 * ata_port_online - test whether the given port is online
5314 * @ap: ATA port to test
5316 * Test whether @ap is online. Note that this function returns 0
5317 * if online status of @ap cannot be obtained, so
5318 * ata_port_online(ap) != !ata_port_offline(ap).
5324 * 1 if the port online status is available and online.
5326 int ata_port_online(struct ata_port
*ap
)
5330 if (!sata_scr_read(ap
, SCR_STATUS
, &sstatus
) && (sstatus
& 0xf) == 0x3)
5336 * ata_port_offline - test whether the given port is offline
5337 * @ap: ATA port to test
5339 * Test whether @ap is offline. Note that this function returns
5340 * 0 if offline status of @ap cannot be obtained, so
5341 * ata_port_online(ap) != !ata_port_offline(ap).
5347 * 1 if the port offline status is available and offline.
5349 int ata_port_offline(struct ata_port
*ap
)
5353 if (!sata_scr_read(ap
, SCR_STATUS
, &sstatus
) && (sstatus
& 0xf) != 0x3)
5358 int ata_flush_cache(struct ata_device
*dev
)
5360 unsigned int err_mask
;
5363 if (!ata_try_flush_cache(dev
))
5366 if (dev
->flags
& ATA_DFLAG_FLUSH_EXT
)
5367 cmd
= ATA_CMD_FLUSH_EXT
;
5369 cmd
= ATA_CMD_FLUSH
;
5371 err_mask
= ata_do_simple_cmd(dev
, cmd
);
5373 ata_dev_printk(dev
, KERN_ERR
, "failed to flush cache\n");
5380 static int ata_host_request_pm(struct ata_host
*host
, pm_message_t mesg
,
5381 unsigned int action
, unsigned int ehi_flags
,
5384 unsigned long flags
;
5387 for (i
= 0; i
< host
->n_ports
; i
++) {
5388 struct ata_port
*ap
= host
->ports
[i
];
5390 /* Previous resume operation might still be in
5391 * progress. Wait for PM_PENDING to clear.
5393 if (ap
->pflags
& ATA_PFLAG_PM_PENDING
) {
5394 ata_port_wait_eh(ap
);
5395 WARN_ON(ap
->pflags
& ATA_PFLAG_PM_PENDING
);
5398 /* request PM ops to EH */
5399 spin_lock_irqsave(ap
->lock
, flags
);
5404 ap
->pm_result
= &rc
;
5407 ap
->pflags
|= ATA_PFLAG_PM_PENDING
;
5408 ap
->eh_info
.action
|= action
;
5409 ap
->eh_info
.flags
|= ehi_flags
;
5411 ata_port_schedule_eh(ap
);
5413 spin_unlock_irqrestore(ap
->lock
, flags
);
5415 /* wait and check result */
5417 ata_port_wait_eh(ap
);
5418 WARN_ON(ap
->pflags
& ATA_PFLAG_PM_PENDING
);
5428 * ata_host_suspend - suspend host
5429 * @host: host to suspend
5432 * Suspend @host. Actual operation is performed by EH. This
5433 * function requests EH to perform PM operations and waits for EH
5437 * Kernel thread context (may sleep).
5440 * 0 on success, -errno on failure.
5442 int ata_host_suspend(struct ata_host
*host
, pm_message_t mesg
)
5446 rc
= ata_host_request_pm(host
, mesg
, 0, ATA_EHI_QUIET
, 1);
5450 /* EH is quiescent now. Fail if we have any ready device.
5451 * This happens if hotplug occurs between completion of device
5452 * suspension and here.
5454 for (i
= 0; i
< host
->n_ports
; i
++) {
5455 struct ata_port
*ap
= host
->ports
[i
];
5457 for (j
= 0; j
< ATA_MAX_DEVICES
; j
++) {
5458 struct ata_device
*dev
= &ap
->device
[j
];
5460 if (ata_dev_ready(dev
)) {
5461 ata_port_printk(ap
, KERN_WARNING
,
5462 "suspend failed, device %d "
5463 "still active\n", dev
->devno
);
5470 host
->dev
->power
.power_state
= mesg
;
5474 ata_host_resume(host
);
5479 * ata_host_resume - resume host
5480 * @host: host to resume
5482 * Resume @host. Actual operation is performed by EH. This
5483 * function requests EH to perform PM operations and returns.
5484 * Note that all resume operations are performed parallely.
5487 * Kernel thread context (may sleep).
5489 void ata_host_resume(struct ata_host
*host
)
5491 ata_host_request_pm(host
, PMSG_ON
, ATA_EH_SOFTRESET
,
5492 ATA_EHI_NO_AUTOPSY
| ATA_EHI_QUIET
, 0);
5493 host
->dev
->power
.power_state
= PMSG_ON
;
5497 * ata_port_start - Set port up for dma.
5498 * @ap: Port to initialize
5500 * Called just after data structures for each port are
5501 * initialized. Allocates space for PRD table.
5503 * May be used as the port_start() entry in ata_port_operations.
5506 * Inherited from caller.
5509 int ata_port_start (struct ata_port
*ap
)
5511 struct device
*dev
= ap
->dev
;
5514 ap
->prd
= dma_alloc_coherent(dev
, ATA_PRD_TBL_SZ
, &ap
->prd_dma
, GFP_KERNEL
);
5518 rc
= ata_pad_alloc(ap
, dev
);
5520 dma_free_coherent(dev
, ATA_PRD_TBL_SZ
, ap
->prd
, ap
->prd_dma
);
5524 DPRINTK("prd alloc, virt %p, dma %llx\n", ap
->prd
, (unsigned long long) ap
->prd_dma
);
5531 * ata_port_stop - Undo ata_port_start()
5532 * @ap: Port to shut down
5534 * Frees the PRD table.
5536 * May be used as the port_stop() entry in ata_port_operations.
5539 * Inherited from caller.
5542 void ata_port_stop (struct ata_port
*ap
)
5544 struct device
*dev
= ap
->dev
;
5546 dma_free_coherent(dev
, ATA_PRD_TBL_SZ
, ap
->prd
, ap
->prd_dma
);
5547 ata_pad_free(ap
, dev
);
5550 void ata_host_stop (struct ata_host
*host
)
5552 if (host
->mmio_base
)
5553 iounmap(host
->mmio_base
);
5557 * ata_dev_init - Initialize an ata_device structure
5558 * @dev: Device structure to initialize
5560 * Initialize @dev in preparation for probing.
5563 * Inherited from caller.
5565 void ata_dev_init(struct ata_device
*dev
)
5567 struct ata_port
*ap
= dev
->ap
;
5568 unsigned long flags
;
5570 /* SATA spd limit is bound to the first device */
5571 ap
->sata_spd_limit
= ap
->hw_sata_spd_limit
;
5573 /* High bits of dev->flags are used to record warm plug
5574 * requests which occur asynchronously. Synchronize using
5577 spin_lock_irqsave(ap
->lock
, flags
);
5578 dev
->flags
&= ~ATA_DFLAG_INIT_MASK
;
5579 spin_unlock_irqrestore(ap
->lock
, flags
);
5581 memset((void *)dev
+ ATA_DEVICE_CLEAR_OFFSET
, 0,
5582 sizeof(*dev
) - ATA_DEVICE_CLEAR_OFFSET
);
5583 dev
->pio_mask
= UINT_MAX
;
5584 dev
->mwdma_mask
= UINT_MAX
;
5585 dev
->udma_mask
= UINT_MAX
;
5589 * ata_port_init - Initialize an ata_port structure
5590 * @ap: Structure to initialize
5591 * @host: Collection of hosts to which @ap belongs
5592 * @ent: Probe information provided by low-level driver
5593 * @port_no: Port number associated with this ata_port
5595 * Initialize a new ata_port structure.
5598 * Inherited from caller.
5600 void ata_port_init(struct ata_port
*ap
, struct ata_host
*host
,
5601 const struct ata_probe_ent
*ent
, unsigned int port_no
)
5605 ap
->lock
= &host
->lock
;
5606 ap
->flags
= ATA_FLAG_DISABLED
;
5607 ap
->id
= ata_unique_id
++;
5608 ap
->ctl
= ATA_DEVCTL_OBS
;
5611 ap
->port_no
= port_no
;
5612 if (port_no
== 1 && ent
->pinfo2
) {
5613 ap
->pio_mask
= ent
->pinfo2
->pio_mask
;
5614 ap
->mwdma_mask
= ent
->pinfo2
->mwdma_mask
;
5615 ap
->udma_mask
= ent
->pinfo2
->udma_mask
;
5616 ap
->flags
|= ent
->pinfo2
->flags
;
5617 ap
->ops
= ent
->pinfo2
->port_ops
;
5619 ap
->pio_mask
= ent
->pio_mask
;
5620 ap
->mwdma_mask
= ent
->mwdma_mask
;
5621 ap
->udma_mask
= ent
->udma_mask
;
5622 ap
->flags
|= ent
->port_flags
;
5623 ap
->ops
= ent
->port_ops
;
5625 ap
->hw_sata_spd_limit
= UINT_MAX
;
5626 ap
->active_tag
= ATA_TAG_POISON
;
5627 ap
->last_ctl
= 0xFF;
5629 #if defined(ATA_VERBOSE_DEBUG)
5630 /* turn on all debugging levels */
5631 ap
->msg_enable
= 0x00FF;
5632 #elif defined(ATA_DEBUG)
5633 ap
->msg_enable
= ATA_MSG_DRV
| ATA_MSG_INFO
| ATA_MSG_CTL
| ATA_MSG_WARN
| ATA_MSG_ERR
;
5635 ap
->msg_enable
= ATA_MSG_DRV
| ATA_MSG_ERR
| ATA_MSG_WARN
;
5638 INIT_WORK(&ap
->port_task
, NULL
, NULL
);
5639 INIT_WORK(&ap
->hotplug_task
, ata_scsi_hotplug
, ap
);
5640 INIT_WORK(&ap
->scsi_rescan_task
, ata_scsi_dev_rescan
, ap
);
5641 INIT_LIST_HEAD(&ap
->eh_done_q
);
5642 init_waitqueue_head(&ap
->eh_wait_q
);
5644 /* set cable type */
5645 ap
->cbl
= ATA_CBL_NONE
;
5646 if (ap
->flags
& ATA_FLAG_SATA
)
5647 ap
->cbl
= ATA_CBL_SATA
;
5649 for (i
= 0; i
< ATA_MAX_DEVICES
; i
++) {
5650 struct ata_device
*dev
= &ap
->device
[i
];
5657 ap
->stats
.unhandled_irq
= 1;
5658 ap
->stats
.idle_irq
= 1;
5661 memcpy(&ap
->ioaddr
, &ent
->port
[port_no
], sizeof(struct ata_ioports
));
5665 * ata_port_init_shost - Initialize SCSI host associated with ATA port
5666 * @ap: ATA port to initialize SCSI host for
5667 * @shost: SCSI host associated with @ap
5669 * Initialize SCSI host @shost associated with ATA port @ap.
5672 * Inherited from caller.
5674 static void ata_port_init_shost(struct ata_port
*ap
, struct Scsi_Host
*shost
)
5676 ap
->scsi_host
= shost
;
5678 shost
->unique_id
= ap
->id
;
5681 shost
->max_channel
= 1;
5682 shost
->max_cmd_len
= 12;
5686 * ata_port_add - Attach low-level ATA driver to system
5687 * @ent: Information provided by low-level driver
5688 * @host: Collections of ports to which we add
5689 * @port_no: Port number associated with this host
5691 * Attach low-level ATA driver to system.
5694 * PCI/etc. bus probe sem.
5697 * New ata_port on success, for NULL on error.
5699 static struct ata_port
* ata_port_add(const struct ata_probe_ent
*ent
,
5700 struct ata_host
*host
,
5701 unsigned int port_no
)
5703 struct Scsi_Host
*shost
;
5704 struct ata_port
*ap
;
5708 if (!ent
->port_ops
->error_handler
&&
5709 !(ent
->port_flags
& (ATA_FLAG_SATA_RESET
| ATA_FLAG_SRST
))) {
5710 printk(KERN_ERR
"ata%u: no reset mechanism available\n",
5715 shost
= scsi_host_alloc(ent
->sht
, sizeof(struct ata_port
));
5719 shost
->transportt
= &ata_scsi_transport_template
;
5721 ap
= ata_shost_to_port(shost
);
5723 ata_port_init(ap
, host
, ent
, port_no
);
5724 ata_port_init_shost(ap
, shost
);
5730 * ata_sas_host_init - Initialize a host struct
5731 * @host: host to initialize
5732 * @dev: device host is attached to
5733 * @flags: host flags
5737 * PCI/etc. bus probe sem.
5741 void ata_host_init(struct ata_host
*host
, struct device
*dev
,
5742 unsigned long flags
, const struct ata_port_operations
*ops
)
5744 spin_lock_init(&host
->lock
);
5746 host
->flags
= flags
;
5751 * ata_device_add - Register hardware device with ATA and SCSI layers
5752 * @ent: Probe information describing hardware device to be registered
5754 * This function processes the information provided in the probe
5755 * information struct @ent, allocates the necessary ATA and SCSI
5756 * host information structures, initializes them, and registers
5757 * everything with requisite kernel subsystems.
5759 * This function requests irqs, probes the ATA bus, and probes
5763 * PCI/etc. bus probe sem.
5766 * Number of ports registered. Zero on error (no ports registered).
5768 int ata_device_add(const struct ata_probe_ent
*ent
)
5771 struct device
*dev
= ent
->dev
;
5772 struct ata_host
*host
;
5777 if (ent
->irq
== 0) {
5778 dev_printk(KERN_ERR
, dev
, "is not available: No interrupt assigned.\n");
5781 /* alloc a container for our list of ATA ports (buses) */
5782 host
= kzalloc(sizeof(struct ata_host
) +
5783 (ent
->n_ports
* sizeof(void *)), GFP_KERNEL
);
5787 ata_host_init(host
, dev
, ent
->_host_flags
, ent
->port_ops
);
5788 host
->n_ports
= ent
->n_ports
;
5789 host
->irq
= ent
->irq
;
5790 host
->irq2
= ent
->irq2
;
5791 host
->mmio_base
= ent
->mmio_base
;
5792 host
->private_data
= ent
->private_data
;
5794 /* register each port bound to this device */
5795 for (i
= 0; i
< host
->n_ports
; i
++) {
5796 struct ata_port
*ap
;
5797 unsigned long xfer_mode_mask
;
5798 int irq_line
= ent
->irq
;
5800 ap
= ata_port_add(ent
, host
, i
);
5801 host
->ports
[i
] = ap
;
5806 if (ent
->dummy_port_mask
& (1 << i
)) {
5807 ata_port_printk(ap
, KERN_INFO
, "DUMMY\n");
5808 ap
->ops
= &ata_dummy_port_ops
;
5813 rc
= ap
->ops
->port_start(ap
);
5815 host
->ports
[i
] = NULL
;
5816 scsi_host_put(ap
->scsi_host
);
5820 /* Report the secondary IRQ for second channel legacy */
5821 if (i
== 1 && ent
->irq2
)
5822 irq_line
= ent
->irq2
;
5824 xfer_mode_mask
=(ap
->udma_mask
<< ATA_SHIFT_UDMA
) |
5825 (ap
->mwdma_mask
<< ATA_SHIFT_MWDMA
) |
5826 (ap
->pio_mask
<< ATA_SHIFT_PIO
);
5828 /* print per-port info to dmesg */
5829 ata_port_printk(ap
, KERN_INFO
, "%cATA max %s cmd 0x%lX "
5830 "ctl 0x%lX bmdma 0x%lX irq %d\n",
5831 ap
->flags
& ATA_FLAG_SATA
? 'S' : 'P',
5832 ata_mode_string(xfer_mode_mask
),
5833 ap
->ioaddr
.cmd_addr
,
5834 ap
->ioaddr
.ctl_addr
,
5835 ap
->ioaddr
.bmdma_addr
,
5838 /* freeze port before requesting IRQ */
5839 ata_eh_freeze_port(ap
);
5842 /* obtain irq, that may be shared between channels */
5843 rc
= request_irq(ent
->irq
, ent
->port_ops
->irq_handler
, ent
->irq_flags
,
5846 dev_printk(KERN_ERR
, dev
, "irq %lu request failed: %d\n",
5851 /* do we have a second IRQ for the other channel, eg legacy mode */
5853 /* We will get weird core code crashes later if this is true
5855 BUG_ON(ent
->irq
== ent
->irq2
);
5857 rc
= request_irq(ent
->irq2
, ent
->port_ops
->irq_handler
, ent
->irq_flags
,
5860 dev_printk(KERN_ERR
, dev
, "irq %lu request failed: %d\n",
5862 goto err_out_free_irq
;
5866 /* perform each probe synchronously */
5867 DPRINTK("probe begin\n");
5868 for (i
= 0; i
< host
->n_ports
; i
++) {
5869 struct ata_port
*ap
= host
->ports
[i
];
5873 /* init sata_spd_limit to the current value */
5874 if (sata_scr_read(ap
, SCR_CONTROL
, &scontrol
) == 0) {
5875 int spd
= (scontrol
>> 4) & 0xf;
5876 ap
->hw_sata_spd_limit
&= (1 << spd
) - 1;
5878 ap
->sata_spd_limit
= ap
->hw_sata_spd_limit
;
5880 rc
= scsi_add_host(ap
->scsi_host
, dev
);
5882 ata_port_printk(ap
, KERN_ERR
, "scsi_add_host failed\n");
5883 /* FIXME: do something useful here */
5884 /* FIXME: handle unconditional calls to
5885 * scsi_scan_host and ata_host_remove, below,
5890 if (ap
->ops
->error_handler
) {
5891 struct ata_eh_info
*ehi
= &ap
->eh_info
;
5892 unsigned long flags
;
5896 /* kick EH for boot probing */
5897 spin_lock_irqsave(ap
->lock
, flags
);
5899 ehi
->probe_mask
= (1 << ATA_MAX_DEVICES
) - 1;
5900 ehi
->action
|= ATA_EH_SOFTRESET
;
5901 ehi
->flags
|= ATA_EHI_NO_AUTOPSY
| ATA_EHI_QUIET
;
5903 ap
->pflags
|= ATA_PFLAG_LOADING
;
5904 ata_port_schedule_eh(ap
);
5906 spin_unlock_irqrestore(ap
->lock
, flags
);
5908 /* wait for EH to finish */
5909 ata_port_wait_eh(ap
);
5911 DPRINTK("ata%u: bus probe begin\n", ap
->id
);
5912 rc
= ata_bus_probe(ap
);
5913 DPRINTK("ata%u: bus probe end\n", ap
->id
);
5916 /* FIXME: do something useful here?
5917 * Current libata behavior will
5918 * tear down everything when
5919 * the module is removed
5920 * or the h/w is unplugged.
5926 /* probes are done, now scan each port's disk(s) */
5927 DPRINTK("host probe begin\n");
5928 for (i
= 0; i
< host
->n_ports
; i
++) {
5929 struct ata_port
*ap
= host
->ports
[i
];
5931 ata_scsi_scan_host(ap
);
5934 dev_set_drvdata(dev
, host
);
5936 VPRINTK("EXIT, returning %u\n", ent
->n_ports
);
5937 return ent
->n_ports
; /* success */
5940 free_irq(ent
->irq
, host
);
5942 for (i
= 0; i
< host
->n_ports
; i
++) {
5943 struct ata_port
*ap
= host
->ports
[i
];
5945 ap
->ops
->port_stop(ap
);
5946 scsi_host_put(ap
->scsi_host
);
5951 VPRINTK("EXIT, returning 0\n");
5956 * ata_port_detach - Detach ATA port in prepration of device removal
5957 * @ap: ATA port to be detached
5959 * Detach all ATA devices and the associated SCSI devices of @ap;
5960 * then, remove the associated SCSI host. @ap is guaranteed to
5961 * be quiescent on return from this function.
5964 * Kernel thread context (may sleep).
5966 void ata_port_detach(struct ata_port
*ap
)
5968 unsigned long flags
;
5971 if (!ap
->ops
->error_handler
)
5974 /* tell EH we're leaving & flush EH */
5975 spin_lock_irqsave(ap
->lock
, flags
);
5976 ap
->pflags
|= ATA_PFLAG_UNLOADING
;
5977 spin_unlock_irqrestore(ap
->lock
, flags
);
5979 ata_port_wait_eh(ap
);
5981 /* EH is now guaranteed to see UNLOADING, so no new device
5982 * will be attached. Disable all existing devices.
5984 spin_lock_irqsave(ap
->lock
, flags
);
5986 for (i
= 0; i
< ATA_MAX_DEVICES
; i
++)
5987 ata_dev_disable(&ap
->device
[i
]);
5989 spin_unlock_irqrestore(ap
->lock
, flags
);
5991 /* Final freeze & EH. All in-flight commands are aborted. EH
5992 * will be skipped and retrials will be terminated with bad
5995 spin_lock_irqsave(ap
->lock
, flags
);
5996 ata_port_freeze(ap
); /* won't be thawed */
5997 spin_unlock_irqrestore(ap
->lock
, flags
);
5999 ata_port_wait_eh(ap
);
6001 /* Flush hotplug task. The sequence is similar to
6002 * ata_port_flush_task().
6004 flush_workqueue(ata_aux_wq
);
6005 cancel_delayed_work(&ap
->hotplug_task
);
6006 flush_workqueue(ata_aux_wq
);
6009 /* remove the associated SCSI host */
6010 scsi_remove_host(ap
->scsi_host
);
6014 * ata_host_remove - PCI layer callback for device removal
6015 * @host: ATA host set that was removed
6017 * Unregister all objects associated with this host set. Free those
6021 * Inherited from calling layer (may sleep).
6024 void ata_host_remove(struct ata_host
*host
)
6028 for (i
= 0; i
< host
->n_ports
; i
++)
6029 ata_port_detach(host
->ports
[i
]);
6031 free_irq(host
->irq
, host
);
6033 free_irq(host
->irq2
, host
);
6035 for (i
= 0; i
< host
->n_ports
; i
++) {
6036 struct ata_port
*ap
= host
->ports
[i
];
6038 ata_scsi_release(ap
->scsi_host
);
6040 if ((ap
->flags
& ATA_FLAG_NO_LEGACY
) == 0) {
6041 struct ata_ioports
*ioaddr
= &ap
->ioaddr
;
6043 /* FIXME: Add -ac IDE pci mods to remove these special cases */
6044 if (ioaddr
->cmd_addr
== ATA_PRIMARY_CMD
)
6045 release_region(ATA_PRIMARY_CMD
, 8);
6046 else if (ioaddr
->cmd_addr
== ATA_SECONDARY_CMD
)
6047 release_region(ATA_SECONDARY_CMD
, 8);
6050 scsi_host_put(ap
->scsi_host
);
6053 if (host
->ops
->host_stop
)
6054 host
->ops
->host_stop(host
);
6060 * ata_scsi_release - SCSI layer callback hook for host unload
6061 * @shost: libata host to be unloaded
6063 * Performs all duties necessary to shut down a libata port...
6064 * Kill port kthread, disable port, and release resources.
6067 * Inherited from SCSI layer.
6073 int ata_scsi_release(struct Scsi_Host
*shost
)
6075 struct ata_port
*ap
= ata_shost_to_port(shost
);
6079 ap
->ops
->port_disable(ap
);
6080 ap
->ops
->port_stop(ap
);
6086 struct ata_probe_ent
*
6087 ata_probe_ent_alloc(struct device
*dev
, const struct ata_port_info
*port
)
6089 struct ata_probe_ent
*probe_ent
;
6091 probe_ent
= kzalloc(sizeof(*probe_ent
), GFP_KERNEL
);
6093 printk(KERN_ERR DRV_NAME
"(%s): out of memory\n",
6094 kobject_name(&(dev
->kobj
)));
6098 INIT_LIST_HEAD(&probe_ent
->node
);
6099 probe_ent
->dev
= dev
;
6101 probe_ent
->sht
= port
->sht
;
6102 probe_ent
->port_flags
= port
->flags
;
6103 probe_ent
->pio_mask
= port
->pio_mask
;
6104 probe_ent
->mwdma_mask
= port
->mwdma_mask
;
6105 probe_ent
->udma_mask
= port
->udma_mask
;
6106 probe_ent
->port_ops
= port
->port_ops
;
6107 probe_ent
->private_data
= port
->private_data
;
6113 * ata_std_ports - initialize ioaddr with standard port offsets.
6114 * @ioaddr: IO address structure to be initialized
6116 * Utility function which initializes data_addr, error_addr,
6117 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
6118 * device_addr, status_addr, and command_addr to standard offsets
6119 * relative to cmd_addr.
6121 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
6124 void ata_std_ports(struct ata_ioports
*ioaddr
)
6126 ioaddr
->data_addr
= ioaddr
->cmd_addr
+ ATA_REG_DATA
;
6127 ioaddr
->error_addr
= ioaddr
->cmd_addr
+ ATA_REG_ERR
;
6128 ioaddr
->feature_addr
= ioaddr
->cmd_addr
+ ATA_REG_FEATURE
;
6129 ioaddr
->nsect_addr
= ioaddr
->cmd_addr
+ ATA_REG_NSECT
;
6130 ioaddr
->lbal_addr
= ioaddr
->cmd_addr
+ ATA_REG_LBAL
;
6131 ioaddr
->lbam_addr
= ioaddr
->cmd_addr
+ ATA_REG_LBAM
;
6132 ioaddr
->lbah_addr
= ioaddr
->cmd_addr
+ ATA_REG_LBAH
;
6133 ioaddr
->device_addr
= ioaddr
->cmd_addr
+ ATA_REG_DEVICE
;
6134 ioaddr
->status_addr
= ioaddr
->cmd_addr
+ ATA_REG_STATUS
;
6135 ioaddr
->command_addr
= ioaddr
->cmd_addr
+ ATA_REG_CMD
;
6141 void ata_pci_host_stop (struct ata_host
*host
)
6143 struct pci_dev
*pdev
= to_pci_dev(host
->dev
);
6145 pci_iounmap(pdev
, host
->mmio_base
);
6149 * ata_pci_remove_one - PCI layer callback for device removal
6150 * @pdev: PCI device that was removed
6152 * PCI layer indicates to libata via this hook that
6153 * hot-unplug or module unload event has occurred.
6154 * Handle this by unregistering all objects associated
6155 * with this PCI device. Free those objects. Then finally
6156 * release PCI resources and disable device.
6159 * Inherited from PCI layer (may sleep).
6162 void ata_pci_remove_one (struct pci_dev
*pdev
)
6164 struct device
*dev
= pci_dev_to_dev(pdev
);
6165 struct ata_host
*host
= dev_get_drvdata(dev
);
6167 ata_host_remove(host
);
6169 pci_release_regions(pdev
);
6170 pci_disable_device(pdev
);
6171 dev_set_drvdata(dev
, NULL
);
6174 /* move to PCI subsystem */
6175 int pci_test_config_bits(struct pci_dev
*pdev
, const struct pci_bits
*bits
)
6177 unsigned long tmp
= 0;
6179 switch (bits
->width
) {
6182 pci_read_config_byte(pdev
, bits
->reg
, &tmp8
);
6188 pci_read_config_word(pdev
, bits
->reg
, &tmp16
);
6194 pci_read_config_dword(pdev
, bits
->reg
, &tmp32
);
6205 return (tmp
== bits
->val
) ? 1 : 0;
6208 void ata_pci_device_do_suspend(struct pci_dev
*pdev
, pm_message_t mesg
)
6210 pci_save_state(pdev
);
6212 if (mesg
.event
== PM_EVENT_SUSPEND
) {
6213 pci_disable_device(pdev
);
6214 pci_set_power_state(pdev
, PCI_D3hot
);
6218 void ata_pci_device_do_resume(struct pci_dev
*pdev
)
6220 pci_set_power_state(pdev
, PCI_D0
);
6221 pci_restore_state(pdev
);
6222 pci_enable_device(pdev
);
6223 pci_set_master(pdev
);
6226 int ata_pci_device_suspend(struct pci_dev
*pdev
, pm_message_t mesg
)
6228 struct ata_host
*host
= dev_get_drvdata(&pdev
->dev
);
6231 rc
= ata_host_suspend(host
, mesg
);
6235 ata_pci_device_do_suspend(pdev
, mesg
);
6240 int ata_pci_device_resume(struct pci_dev
*pdev
)
6242 struct ata_host
*host
= dev_get_drvdata(&pdev
->dev
);
6244 ata_pci_device_do_resume(pdev
);
6245 ata_host_resume(host
);
6248 #endif /* CONFIG_PCI */
6251 static int __init
ata_init(void)
6253 ata_probe_timeout
*= HZ
;
6254 ata_wq
= create_workqueue("ata");
6258 ata_aux_wq
= create_singlethread_workqueue("ata_aux");
6260 destroy_workqueue(ata_wq
);
6264 printk(KERN_DEBUG
"libata version " DRV_VERSION
" loaded.\n");
6268 static void __exit
ata_exit(void)
6270 destroy_workqueue(ata_wq
);
6271 destroy_workqueue(ata_aux_wq
);
6274 subsys_initcall(ata_init
);
6275 module_exit(ata_exit
);
6277 static unsigned long ratelimit_time
;
6278 static DEFINE_SPINLOCK(ata_ratelimit_lock
);
6280 int ata_ratelimit(void)
6283 unsigned long flags
;
6285 spin_lock_irqsave(&ata_ratelimit_lock
, flags
);
6287 if (time_after(jiffies
, ratelimit_time
)) {
6289 ratelimit_time
= jiffies
+ (HZ
/5);
6293 spin_unlock_irqrestore(&ata_ratelimit_lock
, flags
);
6299 * ata_wait_register - wait until register value changes
6300 * @reg: IO-mapped register
6301 * @mask: Mask to apply to read register value
6302 * @val: Wait condition
6303 * @interval_msec: polling interval in milliseconds
6304 * @timeout_msec: timeout in milliseconds
6306 * Waiting for some bits of register to change is a common
6307 * operation for ATA controllers. This function reads 32bit LE
6308 * IO-mapped register @reg and tests for the following condition.
6310 * (*@reg & mask) != val
6312 * If the condition is met, it returns; otherwise, the process is
6313 * repeated after @interval_msec until timeout.
6316 * Kernel thread context (may sleep)
6319 * The final register value.
6321 u32
ata_wait_register(void __iomem
*reg
, u32 mask
, u32 val
,
6322 unsigned long interval_msec
,
6323 unsigned long timeout_msec
)
6325 unsigned long timeout
;
6328 tmp
= ioread32(reg
);
6330 /* Calculate timeout _after_ the first read to make sure
6331 * preceding writes reach the controller before starting to
6332 * eat away the timeout.
6334 timeout
= jiffies
+ (timeout_msec
* HZ
) / 1000;
6336 while ((tmp
& mask
) == val
&& time_before(jiffies
, timeout
)) {
6337 msleep(interval_msec
);
6338 tmp
= ioread32(reg
);
6347 static void ata_dummy_noret(struct ata_port
*ap
) { }
6348 static int ata_dummy_ret0(struct ata_port
*ap
) { return 0; }
6349 static void ata_dummy_qc_noret(struct ata_queued_cmd
*qc
) { }
6351 static u8
ata_dummy_check_status(struct ata_port
*ap
)
6356 static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd
*qc
)
6358 return AC_ERR_SYSTEM
;
6361 const struct ata_port_operations ata_dummy_port_ops
= {
6362 .port_disable
= ata_port_disable
,
6363 .check_status
= ata_dummy_check_status
,
6364 .check_altstatus
= ata_dummy_check_status
,
6365 .dev_select
= ata_noop_dev_select
,
6366 .qc_prep
= ata_noop_qc_prep
,
6367 .qc_issue
= ata_dummy_qc_issue
,
6368 .freeze
= ata_dummy_noret
,
6369 .thaw
= ata_dummy_noret
,
6370 .error_handler
= ata_dummy_noret
,
6371 .post_internal_cmd
= ata_dummy_qc_noret
,
6372 .irq_clear
= ata_dummy_noret
,
6373 .port_start
= ata_dummy_ret0
,
6374 .port_stop
= ata_dummy_noret
,
6378 * libata is essentially a library of internal helper functions for
6379 * low-level ATA host controller drivers. As such, the API/ABI is
6380 * likely to change as new drivers are added and updated.
6381 * Do not depend on ABI/API stability.
6384 EXPORT_SYMBOL_GPL(sata_deb_timing_normal
);
6385 EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug
);
6386 EXPORT_SYMBOL_GPL(sata_deb_timing_long
);
6387 EXPORT_SYMBOL_GPL(ata_dummy_port_ops
);
6388 EXPORT_SYMBOL_GPL(ata_std_bios_param
);
6389 EXPORT_SYMBOL_GPL(ata_std_ports
);
6390 EXPORT_SYMBOL_GPL(ata_host_init
);
6391 EXPORT_SYMBOL_GPL(ata_device_add
);
6392 EXPORT_SYMBOL_GPL(ata_port_detach
);
6393 EXPORT_SYMBOL_GPL(ata_host_remove
);
6394 EXPORT_SYMBOL_GPL(ata_sg_init
);
6395 EXPORT_SYMBOL_GPL(ata_sg_init_one
);
6396 EXPORT_SYMBOL_GPL(ata_hsm_move
);
6397 EXPORT_SYMBOL_GPL(ata_qc_complete
);
6398 EXPORT_SYMBOL_GPL(ata_qc_complete_multiple
);
6399 EXPORT_SYMBOL_GPL(ata_qc_issue_prot
);
6400 EXPORT_SYMBOL_GPL(ata_tf_load
);
6401 EXPORT_SYMBOL_GPL(ata_tf_read
);
6402 EXPORT_SYMBOL_GPL(ata_noop_dev_select
);
6403 EXPORT_SYMBOL_GPL(ata_std_dev_select
);
6404 EXPORT_SYMBOL_GPL(ata_tf_to_fis
);
6405 EXPORT_SYMBOL_GPL(ata_tf_from_fis
);
6406 EXPORT_SYMBOL_GPL(ata_check_status
);
6407 EXPORT_SYMBOL_GPL(ata_altstatus
);
6408 EXPORT_SYMBOL_GPL(ata_exec_command
);
6409 EXPORT_SYMBOL_GPL(ata_port_start
);
6410 EXPORT_SYMBOL_GPL(ata_port_stop
);
6411 EXPORT_SYMBOL_GPL(ata_host_stop
);
6412 EXPORT_SYMBOL_GPL(ata_interrupt
);
6413 EXPORT_SYMBOL_GPL(ata_mmio_data_xfer
);
6414 EXPORT_SYMBOL_GPL(ata_pio_data_xfer
);
6415 EXPORT_SYMBOL_GPL(ata_pio_data_xfer_noirq
);
6416 EXPORT_SYMBOL_GPL(ata_qc_prep
);
6417 EXPORT_SYMBOL_GPL(ata_noop_qc_prep
);
6418 EXPORT_SYMBOL_GPL(ata_bmdma_setup
);
6419 EXPORT_SYMBOL_GPL(ata_bmdma_start
);
6420 EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear
);
6421 EXPORT_SYMBOL_GPL(ata_bmdma_status
);
6422 EXPORT_SYMBOL_GPL(ata_bmdma_stop
);
6423 EXPORT_SYMBOL_GPL(ata_bmdma_freeze
);
6424 EXPORT_SYMBOL_GPL(ata_bmdma_thaw
);
6425 EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh
);
6426 EXPORT_SYMBOL_GPL(ata_bmdma_error_handler
);
6427 EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd
);
6428 EXPORT_SYMBOL_GPL(ata_port_probe
);
6429 EXPORT_SYMBOL_GPL(sata_set_spd
);
6430 EXPORT_SYMBOL_GPL(sata_phy_debounce
);
6431 EXPORT_SYMBOL_GPL(sata_phy_resume
);
6432 EXPORT_SYMBOL_GPL(sata_phy_reset
);
6433 EXPORT_SYMBOL_GPL(__sata_phy_reset
);
6434 EXPORT_SYMBOL_GPL(ata_bus_reset
);
6435 EXPORT_SYMBOL_GPL(ata_std_prereset
);
6436 EXPORT_SYMBOL_GPL(ata_std_softreset
);
6437 EXPORT_SYMBOL_GPL(sata_port_hardreset
);
6438 EXPORT_SYMBOL_GPL(sata_std_hardreset
);
6439 EXPORT_SYMBOL_GPL(ata_std_postreset
);
6440 EXPORT_SYMBOL_GPL(ata_dev_classify
);
6441 EXPORT_SYMBOL_GPL(ata_dev_pair
);
6442 EXPORT_SYMBOL_GPL(ata_port_disable
);
6443 EXPORT_SYMBOL_GPL(ata_ratelimit
);
6444 EXPORT_SYMBOL_GPL(ata_wait_register
);
6445 EXPORT_SYMBOL_GPL(ata_busy_sleep
);
6446 EXPORT_SYMBOL_GPL(ata_port_queue_task
);
6447 EXPORT_SYMBOL_GPL(ata_scsi_ioctl
);
6448 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd
);
6449 EXPORT_SYMBOL_GPL(ata_scsi_slave_config
);
6450 EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy
);
6451 EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth
);
6452 EXPORT_SYMBOL_GPL(ata_scsi_release
);
6453 EXPORT_SYMBOL_GPL(ata_host_intr
);
6454 EXPORT_SYMBOL_GPL(sata_scr_valid
);
6455 EXPORT_SYMBOL_GPL(sata_scr_read
);
6456 EXPORT_SYMBOL_GPL(sata_scr_write
);
6457 EXPORT_SYMBOL_GPL(sata_scr_write_flush
);
6458 EXPORT_SYMBOL_GPL(ata_port_online
);
6459 EXPORT_SYMBOL_GPL(ata_port_offline
);
6460 EXPORT_SYMBOL_GPL(ata_host_suspend
);
6461 EXPORT_SYMBOL_GPL(ata_host_resume
);
6462 EXPORT_SYMBOL_GPL(ata_id_string
);
6463 EXPORT_SYMBOL_GPL(ata_id_c_string
);
6464 EXPORT_SYMBOL_GPL(ata_device_blacklisted
);
6465 EXPORT_SYMBOL_GPL(ata_scsi_simulate
);
6467 EXPORT_SYMBOL_GPL(ata_pio_need_iordy
);
6468 EXPORT_SYMBOL_GPL(ata_timing_compute
);
6469 EXPORT_SYMBOL_GPL(ata_timing_merge
);
6472 EXPORT_SYMBOL_GPL(pci_test_config_bits
);
6473 EXPORT_SYMBOL_GPL(ata_pci_host_stop
);
6474 EXPORT_SYMBOL_GPL(ata_pci_init_native_mode
);
6475 EXPORT_SYMBOL_GPL(ata_pci_init_one
);
6476 EXPORT_SYMBOL_GPL(ata_pci_remove_one
);
6477 EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend
);
6478 EXPORT_SYMBOL_GPL(ata_pci_device_do_resume
);
6479 EXPORT_SYMBOL_GPL(ata_pci_device_suspend
);
6480 EXPORT_SYMBOL_GPL(ata_pci_device_resume
);
6481 EXPORT_SYMBOL_GPL(ata_pci_default_filter
);
6482 EXPORT_SYMBOL_GPL(ata_pci_clear_simplex
);
6483 #endif /* CONFIG_PCI */
6485 EXPORT_SYMBOL_GPL(ata_scsi_device_suspend
);
6486 EXPORT_SYMBOL_GPL(ata_scsi_device_resume
);
6488 EXPORT_SYMBOL_GPL(ata_eng_timeout
);
6489 EXPORT_SYMBOL_GPL(ata_port_schedule_eh
);
6490 EXPORT_SYMBOL_GPL(ata_port_abort
);
6491 EXPORT_SYMBOL_GPL(ata_port_freeze
);
6492 EXPORT_SYMBOL_GPL(ata_eh_freeze_port
);
6493 EXPORT_SYMBOL_GPL(ata_eh_thaw_port
);
6494 EXPORT_SYMBOL_GPL(ata_eh_qc_complete
);
6495 EXPORT_SYMBOL_GPL(ata_eh_qc_retry
);
6496 EXPORT_SYMBOL_GPL(ata_do_eh
);