2 * libata-core.c - helper library for ATA
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
35 #include <linux/kernel.h>
36 #include <linux/module.h>
37 #include <linux/pci.h>
38 #include <linux/init.h>
39 #include <linux/list.h>
41 #include <linux/highmem.h>
42 #include <linux/spinlock.h>
43 #include <linux/blkdev.h>
44 #include <linux/delay.h>
45 #include <linux/timer.h>
46 #include <linux/interrupt.h>
47 #include <linux/completion.h>
48 #include <linux/suspend.h>
49 #include <linux/workqueue.h>
50 #include <linux/jiffies.h>
51 #include <linux/scatterlist.h>
52 #include <scsi/scsi.h>
53 #include "scsi_priv.h"
54 #include <scsi/scsi_cmnd.h>
55 #include <scsi/scsi_host.h>
56 #include <linux/libata.h>
58 #include <asm/semaphore.h>
59 #include <asm/byteorder.h>
63 /* debounce timing parameters in msecs { interval, duration, timeout } */
64 const unsigned long sata_deb_timing_normal
[] = { 5, 100, 2000 };
65 const unsigned long sata_deb_timing_hotplug
[] = { 25, 500, 2000 };
66 const unsigned long sata_deb_timing_long
[] = { 100, 2000, 5000 };
68 static unsigned int ata_dev_init_params(struct ata_device
*dev
,
69 u16 heads
, u16 sectors
);
70 static unsigned int ata_dev_set_xfermode(struct ata_device
*dev
);
71 static void ata_dev_xfermask(struct ata_device
*dev
);
73 static unsigned int ata_unique_id
= 1;
74 static struct workqueue_struct
*ata_wq
;
76 struct workqueue_struct
*ata_aux_wq
;
78 int atapi_enabled
= 1;
79 module_param(atapi_enabled
, int, 0444);
80 MODULE_PARM_DESC(atapi_enabled
, "Enable discovery of ATAPI devices (0=off, 1=on)");
83 module_param(atapi_dmadir
, int, 0444);
84 MODULE_PARM_DESC(atapi_dmadir
, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
87 module_param_named(fua
, libata_fua
, int, 0444);
88 MODULE_PARM_DESC(fua
, "FUA support (0=off, 1=on)");
90 static int ata_probe_timeout
= ATA_TMOUT_INTERNAL
/ HZ
;
91 module_param(ata_probe_timeout
, int, 0444);
92 MODULE_PARM_DESC(ata_probe_timeout
, "Set ATA probing timeout (seconds)");
94 MODULE_AUTHOR("Jeff Garzik");
95 MODULE_DESCRIPTION("Library module for ATA devices");
96 MODULE_LICENSE("GPL");
97 MODULE_VERSION(DRV_VERSION
);
101 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
102 * @tf: Taskfile to convert
103 * @fis: Buffer into which data will output
104 * @pmp: Port multiplier port
106 * Converts a standard ATA taskfile to a Serial ATA
107 * FIS structure (Register - Host to Device).
110 * Inherited from caller.
113 void ata_tf_to_fis(const struct ata_taskfile
*tf
, u8
*fis
, u8 pmp
)
115 fis
[0] = 0x27; /* Register - Host to Device FIS */
116 fis
[1] = (pmp
& 0xf) | (1 << 7); /* Port multiplier number,
117 bit 7 indicates Command FIS */
118 fis
[2] = tf
->command
;
119 fis
[3] = tf
->feature
;
126 fis
[8] = tf
->hob_lbal
;
127 fis
[9] = tf
->hob_lbam
;
128 fis
[10] = tf
->hob_lbah
;
129 fis
[11] = tf
->hob_feature
;
132 fis
[13] = tf
->hob_nsect
;
143 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
144 * @fis: Buffer from which data will be input
145 * @tf: Taskfile to output
147 * Converts a serial ATA FIS structure to a standard ATA taskfile.
150 * Inherited from caller.
153 void ata_tf_from_fis(const u8
*fis
, struct ata_taskfile
*tf
)
155 tf
->command
= fis
[2]; /* status */
156 tf
->feature
= fis
[3]; /* error */
163 tf
->hob_lbal
= fis
[8];
164 tf
->hob_lbam
= fis
[9];
165 tf
->hob_lbah
= fis
[10];
168 tf
->hob_nsect
= fis
[13];
171 static const u8 ata_rw_cmds
[] = {
175 ATA_CMD_READ_MULTI_EXT
,
176 ATA_CMD_WRITE_MULTI_EXT
,
180 ATA_CMD_WRITE_MULTI_FUA_EXT
,
184 ATA_CMD_PIO_READ_EXT
,
185 ATA_CMD_PIO_WRITE_EXT
,
198 ATA_CMD_WRITE_FUA_EXT
202 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
203 * @qc: command to examine and configure
205 * Examine the device configuration and tf->flags to calculate
206 * the proper read/write commands and protocol to use.
211 int ata_rwcmd_protocol(struct ata_queued_cmd
*qc
)
213 struct ata_taskfile
*tf
= &qc
->tf
;
214 struct ata_device
*dev
= qc
->dev
;
217 int index
, fua
, lba48
, write
;
219 fua
= (tf
->flags
& ATA_TFLAG_FUA
) ? 4 : 0;
220 lba48
= (tf
->flags
& ATA_TFLAG_LBA48
) ? 2 : 0;
221 write
= (tf
->flags
& ATA_TFLAG_WRITE
) ? 1 : 0;
223 if (dev
->flags
& ATA_DFLAG_PIO
) {
224 tf
->protocol
= ATA_PROT_PIO
;
225 index
= dev
->multi_count
? 0 : 8;
226 } else if (lba48
&& (qc
->ap
->flags
& ATA_FLAG_PIO_LBA48
)) {
227 /* Unable to use DMA due to host limitation */
228 tf
->protocol
= ATA_PROT_PIO
;
229 index
= dev
->multi_count
? 0 : 8;
231 tf
->protocol
= ATA_PROT_DMA
;
235 cmd
= ata_rw_cmds
[index
+ fua
+ lba48
+ write
];
244 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
245 * @pio_mask: pio_mask
246 * @mwdma_mask: mwdma_mask
247 * @udma_mask: udma_mask
249 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
250 * unsigned int xfer_mask.
258 static unsigned int ata_pack_xfermask(unsigned int pio_mask
,
259 unsigned int mwdma_mask
,
260 unsigned int udma_mask
)
262 return ((pio_mask
<< ATA_SHIFT_PIO
) & ATA_MASK_PIO
) |
263 ((mwdma_mask
<< ATA_SHIFT_MWDMA
) & ATA_MASK_MWDMA
) |
264 ((udma_mask
<< ATA_SHIFT_UDMA
) & ATA_MASK_UDMA
);
268 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
269 * @xfer_mask: xfer_mask to unpack
270 * @pio_mask: resulting pio_mask
271 * @mwdma_mask: resulting mwdma_mask
272 * @udma_mask: resulting udma_mask
274 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
275 * Any NULL distination masks will be ignored.
277 static void ata_unpack_xfermask(unsigned int xfer_mask
,
278 unsigned int *pio_mask
,
279 unsigned int *mwdma_mask
,
280 unsigned int *udma_mask
)
283 *pio_mask
= (xfer_mask
& ATA_MASK_PIO
) >> ATA_SHIFT_PIO
;
285 *mwdma_mask
= (xfer_mask
& ATA_MASK_MWDMA
) >> ATA_SHIFT_MWDMA
;
287 *udma_mask
= (xfer_mask
& ATA_MASK_UDMA
) >> ATA_SHIFT_UDMA
;
290 static const struct ata_xfer_ent
{
294 { ATA_SHIFT_PIO
, ATA_BITS_PIO
, XFER_PIO_0
},
295 { ATA_SHIFT_MWDMA
, ATA_BITS_MWDMA
, XFER_MW_DMA_0
},
296 { ATA_SHIFT_UDMA
, ATA_BITS_UDMA
, XFER_UDMA_0
},
301 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
302 * @xfer_mask: xfer_mask of interest
304 * Return matching XFER_* value for @xfer_mask. Only the highest
305 * bit of @xfer_mask is considered.
311 * Matching XFER_* value, 0 if no match found.
313 static u8
ata_xfer_mask2mode(unsigned int xfer_mask
)
315 int highbit
= fls(xfer_mask
) - 1;
316 const struct ata_xfer_ent
*ent
;
318 for (ent
= ata_xfer_tbl
; ent
->shift
>= 0; ent
++)
319 if (highbit
>= ent
->shift
&& highbit
< ent
->shift
+ ent
->bits
)
320 return ent
->base
+ highbit
- ent
->shift
;
325 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
326 * @xfer_mode: XFER_* of interest
328 * Return matching xfer_mask for @xfer_mode.
334 * Matching xfer_mask, 0 if no match found.
336 static unsigned int ata_xfer_mode2mask(u8 xfer_mode
)
338 const struct ata_xfer_ent
*ent
;
340 for (ent
= ata_xfer_tbl
; ent
->shift
>= 0; ent
++)
341 if (xfer_mode
>= ent
->base
&& xfer_mode
< ent
->base
+ ent
->bits
)
342 return 1 << (ent
->shift
+ xfer_mode
- ent
->base
);
347 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
348 * @xfer_mode: XFER_* of interest
350 * Return matching xfer_shift for @xfer_mode.
356 * Matching xfer_shift, -1 if no match found.
358 static int ata_xfer_mode2shift(unsigned int xfer_mode
)
360 const struct ata_xfer_ent
*ent
;
362 for (ent
= ata_xfer_tbl
; ent
->shift
>= 0; ent
++)
363 if (xfer_mode
>= ent
->base
&& xfer_mode
< ent
->base
+ ent
->bits
)
369 * ata_mode_string - convert xfer_mask to string
370 * @xfer_mask: mask of bits supported; only highest bit counts.
372 * Determine string which represents the highest speed
373 * (highest bit in @modemask).
379 * Constant C string representing highest speed listed in
380 * @mode_mask, or the constant C string "<n/a>".
382 static const char *ata_mode_string(unsigned int xfer_mask
)
384 static const char * const xfer_mode_str
[] = {
404 highbit
= fls(xfer_mask
) - 1;
405 if (highbit
>= 0 && highbit
< ARRAY_SIZE(xfer_mode_str
))
406 return xfer_mode_str
[highbit
];
410 static const char *sata_spd_string(unsigned int spd
)
412 static const char * const spd_str
[] = {
417 if (spd
== 0 || (spd
- 1) >= ARRAY_SIZE(spd_str
))
419 return spd_str
[spd
- 1];
422 void ata_dev_disable(struct ata_device
*dev
)
424 if (ata_dev_enabled(dev
) && ata_msg_drv(dev
->ap
)) {
425 ata_dev_printk(dev
, KERN_WARNING
, "disabled\n");
431 * ata_pio_devchk - PATA device presence detection
432 * @ap: ATA channel to examine
433 * @device: Device to examine (starting at zero)
435 * This technique was originally described in
436 * Hale Landis's ATADRVR (www.ata-atapi.com), and
437 * later found its way into the ATA/ATAPI spec.
439 * Write a pattern to the ATA shadow registers,
440 * and if a device is present, it will respond by
441 * correctly storing and echoing back the
442 * ATA shadow register contents.
448 static unsigned int ata_pio_devchk(struct ata_port
*ap
,
451 struct ata_ioports
*ioaddr
= &ap
->ioaddr
;
454 ap
->ops
->dev_select(ap
, device
);
456 outb(0x55, ioaddr
->nsect_addr
);
457 outb(0xaa, ioaddr
->lbal_addr
);
459 outb(0xaa, ioaddr
->nsect_addr
);
460 outb(0x55, ioaddr
->lbal_addr
);
462 outb(0x55, ioaddr
->nsect_addr
);
463 outb(0xaa, ioaddr
->lbal_addr
);
465 nsect
= inb(ioaddr
->nsect_addr
);
466 lbal
= inb(ioaddr
->lbal_addr
);
468 if ((nsect
== 0x55) && (lbal
== 0xaa))
469 return 1; /* we found a device */
471 return 0; /* nothing found */
475 * ata_mmio_devchk - PATA device presence detection
476 * @ap: ATA channel to examine
477 * @device: Device to examine (starting at zero)
479 * This technique was originally described in
480 * Hale Landis's ATADRVR (www.ata-atapi.com), and
481 * later found its way into the ATA/ATAPI spec.
483 * Write a pattern to the ATA shadow registers,
484 * and if a device is present, it will respond by
485 * correctly storing and echoing back the
486 * ATA shadow register contents.
492 static unsigned int ata_mmio_devchk(struct ata_port
*ap
,
495 struct ata_ioports
*ioaddr
= &ap
->ioaddr
;
498 ap
->ops
->dev_select(ap
, device
);
500 writeb(0x55, (void __iomem
*) ioaddr
->nsect_addr
);
501 writeb(0xaa, (void __iomem
*) ioaddr
->lbal_addr
);
503 writeb(0xaa, (void __iomem
*) ioaddr
->nsect_addr
);
504 writeb(0x55, (void __iomem
*) ioaddr
->lbal_addr
);
506 writeb(0x55, (void __iomem
*) ioaddr
->nsect_addr
);
507 writeb(0xaa, (void __iomem
*) ioaddr
->lbal_addr
);
509 nsect
= readb((void __iomem
*) ioaddr
->nsect_addr
);
510 lbal
= readb((void __iomem
*) ioaddr
->lbal_addr
);
512 if ((nsect
== 0x55) && (lbal
== 0xaa))
513 return 1; /* we found a device */
515 return 0; /* nothing found */
519 * ata_devchk - PATA device presence detection
520 * @ap: ATA channel to examine
521 * @device: Device to examine (starting at zero)
523 * Dispatch ATA device presence detection, depending
524 * on whether we are using PIO or MMIO to talk to the
525 * ATA shadow registers.
531 static unsigned int ata_devchk(struct ata_port
*ap
,
534 if (ap
->flags
& ATA_FLAG_MMIO
)
535 return ata_mmio_devchk(ap
, device
);
536 return ata_pio_devchk(ap
, device
);
540 * ata_dev_classify - determine device type based on ATA-spec signature
541 * @tf: ATA taskfile register set for device to be identified
543 * Determine from taskfile register contents whether a device is
544 * ATA or ATAPI, as per "Signature and persistence" section
545 * of ATA/PI spec (volume 1, sect 5.14).
551 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
552 * the event of failure.
555 unsigned int ata_dev_classify(const struct ata_taskfile
*tf
)
557 /* Apple's open source Darwin code hints that some devices only
558 * put a proper signature into the LBA mid/high registers,
559 * So, we only check those. It's sufficient for uniqueness.
562 if (((tf
->lbam
== 0) && (tf
->lbah
== 0)) ||
563 ((tf
->lbam
== 0x3c) && (tf
->lbah
== 0xc3))) {
564 DPRINTK("found ATA device by sig\n");
568 if (((tf
->lbam
== 0x14) && (tf
->lbah
== 0xeb)) ||
569 ((tf
->lbam
== 0x69) && (tf
->lbah
== 0x96))) {
570 DPRINTK("found ATAPI device by sig\n");
571 return ATA_DEV_ATAPI
;
574 DPRINTK("unknown device\n");
575 return ATA_DEV_UNKNOWN
;
579 * ata_dev_try_classify - Parse returned ATA device signature
580 * @ap: ATA channel to examine
581 * @device: Device to examine (starting at zero)
582 * @r_err: Value of error register on completion
584 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
585 * an ATA/ATAPI-defined set of values is placed in the ATA
586 * shadow registers, indicating the results of device detection
589 * Select the ATA device, and read the values from the ATA shadow
590 * registers. Then parse according to the Error register value,
591 * and the spec-defined values examined by ata_dev_classify().
597 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
601 ata_dev_try_classify(struct ata_port
*ap
, unsigned int device
, u8
*r_err
)
603 struct ata_taskfile tf
;
607 ap
->ops
->dev_select(ap
, device
);
609 memset(&tf
, 0, sizeof(tf
));
611 ap
->ops
->tf_read(ap
, &tf
);
616 /* see if device passed diags */
619 else if ((device
== 0) && (err
== 0x81))
624 /* determine if device is ATA or ATAPI */
625 class = ata_dev_classify(&tf
);
627 if (class == ATA_DEV_UNKNOWN
)
629 if ((class == ATA_DEV_ATA
) && (ata_chk_status(ap
) == 0))
635 * ata_id_string - Convert IDENTIFY DEVICE page into string
636 * @id: IDENTIFY DEVICE results we will examine
637 * @s: string into which data is output
638 * @ofs: offset into identify device page
639 * @len: length of string to return. must be an even number.
641 * The strings in the IDENTIFY DEVICE page are broken up into
642 * 16-bit chunks. Run through the string, and output each
643 * 8-bit chunk linearly, regardless of platform.
649 void ata_id_string(const u16
*id
, unsigned char *s
,
650 unsigned int ofs
, unsigned int len
)
669 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
670 * @id: IDENTIFY DEVICE results we will examine
671 * @s: string into which data is output
672 * @ofs: offset into identify device page
673 * @len: length of string to return. must be an odd number.
675 * This function is identical to ata_id_string except that it
676 * trims trailing spaces and terminates the resulting string with
677 * null. @len must be actual maximum length (even number) + 1.
682 void ata_id_c_string(const u16
*id
, unsigned char *s
,
683 unsigned int ofs
, unsigned int len
)
689 ata_id_string(id
, s
, ofs
, len
- 1);
691 p
= s
+ strnlen(s
, len
- 1);
692 while (p
> s
&& p
[-1] == ' ')
697 static u64
ata_id_n_sectors(const u16
*id
)
699 if (ata_id_has_lba(id
)) {
700 if (ata_id_has_lba48(id
))
701 return ata_id_u64(id
, 100);
703 return ata_id_u32(id
, 60);
705 if (ata_id_current_chs_valid(id
))
706 return ata_id_u32(id
, 57);
708 return id
[1] * id
[3] * id
[6];
713 * ata_noop_dev_select - Select device 0/1 on ATA bus
714 * @ap: ATA channel to manipulate
715 * @device: ATA device (numbered from zero) to select
717 * This function performs no actual function.
719 * May be used as the dev_select() entry in ata_port_operations.
724 void ata_noop_dev_select (struct ata_port
*ap
, unsigned int device
)
730 * ata_std_dev_select - Select device 0/1 on ATA bus
731 * @ap: ATA channel to manipulate
732 * @device: ATA device (numbered from zero) to select
734 * Use the method defined in the ATA specification to
735 * make either device 0, or device 1, active on the
736 * ATA channel. Works with both PIO and MMIO.
738 * May be used as the dev_select() entry in ata_port_operations.
744 void ata_std_dev_select (struct ata_port
*ap
, unsigned int device
)
749 tmp
= ATA_DEVICE_OBS
;
751 tmp
= ATA_DEVICE_OBS
| ATA_DEV1
;
753 if (ap
->flags
& ATA_FLAG_MMIO
) {
754 writeb(tmp
, (void __iomem
*) ap
->ioaddr
.device_addr
);
756 outb(tmp
, ap
->ioaddr
.device_addr
);
758 ata_pause(ap
); /* needed; also flushes, for mmio */
762 * ata_dev_select - Select device 0/1 on ATA bus
763 * @ap: ATA channel to manipulate
764 * @device: ATA device (numbered from zero) to select
765 * @wait: non-zero to wait for Status register BSY bit to clear
766 * @can_sleep: non-zero if context allows sleeping
768 * Use the method defined in the ATA specification to
769 * make either device 0, or device 1, active on the
772 * This is a high-level version of ata_std_dev_select(),
773 * which additionally provides the services of inserting
774 * the proper pauses and status polling, where needed.
780 void ata_dev_select(struct ata_port
*ap
, unsigned int device
,
781 unsigned int wait
, unsigned int can_sleep
)
783 if (ata_msg_probe(ap
))
784 ata_port_printk(ap
, KERN_INFO
, "ata_dev_select: ENTER, ata%u: "
785 "device %u, wait %u\n", ap
->id
, device
, wait
);
790 ap
->ops
->dev_select(ap
, device
);
793 if (can_sleep
&& ap
->device
[device
].class == ATA_DEV_ATAPI
)
800 * ata_dump_id - IDENTIFY DEVICE info debugging output
801 * @id: IDENTIFY DEVICE page to dump
803 * Dump selected 16-bit words from the given IDENTIFY DEVICE
810 static inline void ata_dump_id(const u16
*id
)
812 DPRINTK("49==0x%04x "
822 DPRINTK("80==0x%04x "
832 DPRINTK("88==0x%04x "
839 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
840 * @id: IDENTIFY data to compute xfer mask from
842 * Compute the xfermask for this device. This is not as trivial
843 * as it seems if we must consider early devices correctly.
845 * FIXME: pre IDE drive timing (do we care ?).
853 static unsigned int ata_id_xfermask(const u16
*id
)
855 unsigned int pio_mask
, mwdma_mask
, udma_mask
;
857 /* Usual case. Word 53 indicates word 64 is valid */
858 if (id
[ATA_ID_FIELD_VALID
] & (1 << 1)) {
859 pio_mask
= id
[ATA_ID_PIO_MODES
] & 0x03;
863 /* If word 64 isn't valid then Word 51 high byte holds
864 * the PIO timing number for the maximum. Turn it into
867 pio_mask
= (2 << (id
[ATA_ID_OLD_PIO_MODES
] & 0xFF)) - 1 ;
869 /* But wait.. there's more. Design your standards by
870 * committee and you too can get a free iordy field to
871 * process. However its the speeds not the modes that
872 * are supported... Note drivers using the timing API
873 * will get this right anyway
877 mwdma_mask
= id
[ATA_ID_MWDMA_MODES
] & 0x07;
880 if (id
[ATA_ID_FIELD_VALID
] & (1 << 2))
881 udma_mask
= id
[ATA_ID_UDMA_MODES
] & 0xff;
883 return ata_pack_xfermask(pio_mask
, mwdma_mask
, udma_mask
);
887 * ata_port_queue_task - Queue port_task
888 * @ap: The ata_port to queue port_task for
889 * @fn: workqueue function to be scheduled
890 * @data: data value to pass to workqueue function
891 * @delay: delay time for workqueue function
893 * Schedule @fn(@data) for execution after @delay jiffies using
894 * port_task. There is one port_task per port and it's the
895 * user(low level driver)'s responsibility to make sure that only
896 * one task is active at any given time.
898 * libata core layer takes care of synchronization between
899 * port_task and EH. ata_port_queue_task() may be ignored for EH
903 * Inherited from caller.
905 void ata_port_queue_task(struct ata_port
*ap
, void (*fn
)(void *), void *data
,
910 if (ap
->pflags
& ATA_PFLAG_FLUSH_PORT_TASK
)
913 PREPARE_WORK(&ap
->port_task
, fn
, data
);
916 rc
= queue_work(ata_wq
, &ap
->port_task
);
918 rc
= queue_delayed_work(ata_wq
, &ap
->port_task
, delay
);
920 /* rc == 0 means that another user is using port task */
925 * ata_port_flush_task - Flush port_task
926 * @ap: The ata_port to flush port_task for
928 * After this function completes, port_task is guranteed not to
929 * be running or scheduled.
932 * Kernel thread context (may sleep)
934 void ata_port_flush_task(struct ata_port
*ap
)
940 spin_lock_irqsave(ap
->lock
, flags
);
941 ap
->pflags
|= ATA_PFLAG_FLUSH_PORT_TASK
;
942 spin_unlock_irqrestore(ap
->lock
, flags
);
944 DPRINTK("flush #1\n");
945 flush_workqueue(ata_wq
);
948 * At this point, if a task is running, it's guaranteed to see
949 * the FLUSH flag; thus, it will never queue pio tasks again.
952 if (!cancel_delayed_work(&ap
->port_task
)) {
954 ata_port_printk(ap
, KERN_DEBUG
, "%s: flush #2\n",
956 flush_workqueue(ata_wq
);
959 spin_lock_irqsave(ap
->lock
, flags
);
960 ap
->pflags
&= ~ATA_PFLAG_FLUSH_PORT_TASK
;
961 spin_unlock_irqrestore(ap
->lock
, flags
);
964 ata_port_printk(ap
, KERN_DEBUG
, "%s: EXIT\n", __FUNCTION__
);
967 void ata_qc_complete_internal(struct ata_queued_cmd
*qc
)
969 struct completion
*waiting
= qc
->private_data
;
975 * ata_exec_internal - execute libata internal command
976 * @dev: Device to which the command is sent
977 * @tf: Taskfile registers for the command and the result
978 * @cdb: CDB for packet command
979 * @dma_dir: Data tranfer direction of the command
980 * @buf: Data buffer of the command
981 * @buflen: Length of data buffer
983 * Executes libata internal command with timeout. @tf contains
984 * command on entry and result on return. Timeout and error
985 * conditions are reported via return value. No recovery action
986 * is taken after a command times out. It's caller's duty to
987 * clean up after timeout.
990 * None. Should be called with kernel context, might sleep.
993 * Zero on success, AC_ERR_* mask on failure
995 unsigned ata_exec_internal(struct ata_device
*dev
,
996 struct ata_taskfile
*tf
, const u8
*cdb
,
997 int dma_dir
, void *buf
, unsigned int buflen
)
999 struct ata_port
*ap
= dev
->ap
;
1000 u8 command
= tf
->command
;
1001 struct ata_queued_cmd
*qc
;
1002 unsigned int tag
, preempted_tag
;
1003 u32 preempted_sactive
, preempted_qc_active
;
1004 DECLARE_COMPLETION_ONSTACK(wait
);
1005 unsigned long flags
;
1006 unsigned int err_mask
;
1009 spin_lock_irqsave(ap
->lock
, flags
);
1011 /* no internal command while frozen */
1012 if (ap
->pflags
& ATA_PFLAG_FROZEN
) {
1013 spin_unlock_irqrestore(ap
->lock
, flags
);
1014 return AC_ERR_SYSTEM
;
1017 /* initialize internal qc */
1019 /* XXX: Tag 0 is used for drivers with legacy EH as some
1020 * drivers choke if any other tag is given. This breaks
1021 * ata_tag_internal() test for those drivers. Don't use new
1022 * EH stuff without converting to it.
1024 if (ap
->ops
->error_handler
)
1025 tag
= ATA_TAG_INTERNAL
;
1029 if (test_and_set_bit(tag
, &ap
->qc_allocated
))
1031 qc
= __ata_qc_from_tag(ap
, tag
);
1039 preempted_tag
= ap
->active_tag
;
1040 preempted_sactive
= ap
->sactive
;
1041 preempted_qc_active
= ap
->qc_active
;
1042 ap
->active_tag
= ATA_TAG_POISON
;
1046 /* prepare & issue qc */
1049 memcpy(qc
->cdb
, cdb
, ATAPI_CDB_LEN
);
1050 qc
->flags
|= ATA_QCFLAG_RESULT_TF
;
1051 qc
->dma_dir
= dma_dir
;
1052 if (dma_dir
!= DMA_NONE
) {
1053 ata_sg_init_one(qc
, buf
, buflen
);
1054 qc
->nsect
= buflen
/ ATA_SECT_SIZE
;
1057 qc
->private_data
= &wait
;
1058 qc
->complete_fn
= ata_qc_complete_internal
;
1062 spin_unlock_irqrestore(ap
->lock
, flags
);
1064 rc
= wait_for_completion_timeout(&wait
, ata_probe_timeout
);
1066 ata_port_flush_task(ap
);
1069 spin_lock_irqsave(ap
->lock
, flags
);
1071 /* We're racing with irq here. If we lose, the
1072 * following test prevents us from completing the qc
1073 * twice. If we win, the port is frozen and will be
1074 * cleaned up by ->post_internal_cmd().
1076 if (qc
->flags
& ATA_QCFLAG_ACTIVE
) {
1077 qc
->err_mask
|= AC_ERR_TIMEOUT
;
1079 if (ap
->ops
->error_handler
)
1080 ata_port_freeze(ap
);
1082 ata_qc_complete(qc
);
1084 if (ata_msg_warn(ap
))
1085 ata_dev_printk(dev
, KERN_WARNING
,
1086 "qc timeout (cmd 0x%x)\n", command
);
1089 spin_unlock_irqrestore(ap
->lock
, flags
);
1092 /* do post_internal_cmd */
1093 if (ap
->ops
->post_internal_cmd
)
1094 ap
->ops
->post_internal_cmd(qc
);
1096 if (qc
->flags
& ATA_QCFLAG_FAILED
&& !qc
->err_mask
) {
1097 if (ata_msg_warn(ap
))
1098 ata_dev_printk(dev
, KERN_WARNING
,
1099 "zero err_mask for failed "
1100 "internal command, assuming AC_ERR_OTHER\n");
1101 qc
->err_mask
|= AC_ERR_OTHER
;
1105 spin_lock_irqsave(ap
->lock
, flags
);
1107 *tf
= qc
->result_tf
;
1108 err_mask
= qc
->err_mask
;
1111 ap
->active_tag
= preempted_tag
;
1112 ap
->sactive
= preempted_sactive
;
1113 ap
->qc_active
= preempted_qc_active
;
1115 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1116 * Until those drivers are fixed, we detect the condition
1117 * here, fail the command with AC_ERR_SYSTEM and reenable the
1120 * Note that this doesn't change any behavior as internal
1121 * command failure results in disabling the device in the
1122 * higher layer for LLDDs without new reset/EH callbacks.
1124 * Kill the following code as soon as those drivers are fixed.
1126 if (ap
->flags
& ATA_FLAG_DISABLED
) {
1127 err_mask
|= AC_ERR_SYSTEM
;
1131 spin_unlock_irqrestore(ap
->lock
, flags
);
1137 * ata_do_simple_cmd - execute simple internal command
1138 * @dev: Device to which the command is sent
1139 * @cmd: Opcode to execute
1141 * Execute a 'simple' command, that only consists of the opcode
1142 * 'cmd' itself, without filling any other registers
1145 * Kernel thread context (may sleep).
1148 * Zero on success, AC_ERR_* mask on failure
1150 unsigned int ata_do_simple_cmd(struct ata_device
*dev
, u8 cmd
)
1152 struct ata_taskfile tf
;
1154 ata_tf_init(dev
, &tf
);
1157 tf
.flags
|= ATA_TFLAG_DEVICE
;
1158 tf
.protocol
= ATA_PROT_NODATA
;
1160 return ata_exec_internal(dev
, &tf
, NULL
, DMA_NONE
, NULL
, 0);
1164 * ata_pio_need_iordy - check if iordy needed
1167 * Check if the current speed of the device requires IORDY. Used
1168 * by various controllers for chip configuration.
1171 unsigned int ata_pio_need_iordy(const struct ata_device
*adev
)
1174 int speed
= adev
->pio_mode
- XFER_PIO_0
;
1181 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1183 if (adev
->id
[ATA_ID_FIELD_VALID
] & 2) { /* EIDE */
1184 pio
= adev
->id
[ATA_ID_EIDE_PIO
];
1185 /* Is the speed faster than the drive allows non IORDY ? */
1187 /* This is cycle times not frequency - watch the logic! */
1188 if (pio
> 240) /* PIO2 is 240nS per cycle */
1197 * ata_dev_read_id - Read ID data from the specified device
1198 * @dev: target device
1199 * @p_class: pointer to class of the target device (may be changed)
1200 * @post_reset: is this read ID post-reset?
1201 * @id: buffer to read IDENTIFY data into
1203 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1204 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1205 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1206 * for pre-ATA4 drives.
1209 * Kernel thread context (may sleep)
1212 * 0 on success, -errno otherwise.
1214 int ata_dev_read_id(struct ata_device
*dev
, unsigned int *p_class
,
1215 int post_reset
, u16
*id
)
1217 struct ata_port
*ap
= dev
->ap
;
1218 unsigned int class = *p_class
;
1219 struct ata_taskfile tf
;
1220 unsigned int err_mask
= 0;
1224 if (ata_msg_ctl(ap
))
1225 ata_dev_printk(dev
, KERN_DEBUG
, "%s: ENTER, host %u, dev %u\n",
1226 __FUNCTION__
, ap
->id
, dev
->devno
);
1228 ata_dev_select(ap
, dev
->devno
, 1, 1); /* select device 0/1 */
1231 ata_tf_init(dev
, &tf
);
1235 tf
.command
= ATA_CMD_ID_ATA
;
1238 tf
.command
= ATA_CMD_ID_ATAPI
;
1242 reason
= "unsupported class";
1246 tf
.protocol
= ATA_PROT_PIO
;
1248 err_mask
= ata_exec_internal(dev
, &tf
, NULL
, DMA_FROM_DEVICE
,
1249 id
, sizeof(id
[0]) * ATA_ID_WORDS
);
1252 reason
= "I/O error";
1256 swap_buf_le16(id
, ATA_ID_WORDS
);
1260 reason
= "device reports illegal type";
1262 if (class == ATA_DEV_ATA
) {
1263 if (!ata_id_is_ata(id
) && !ata_id_is_cfa(id
))
1266 if (ata_id_is_ata(id
))
1270 if (post_reset
&& class == ATA_DEV_ATA
) {
1272 * The exact sequence expected by certain pre-ATA4 drives is:
1275 * INITIALIZE DEVICE PARAMETERS
1277 * Some drives were very specific about that exact sequence.
1279 if (ata_id_major_version(id
) < 4 || !ata_id_has_lba(id
)) {
1280 err_mask
= ata_dev_init_params(dev
, id
[3], id
[6]);
1283 reason
= "INIT_DEV_PARAMS failed";
1287 /* current CHS translation info (id[53-58]) might be
1288 * changed. reread the identify device info.
1300 if (ata_msg_warn(ap
))
1301 ata_dev_printk(dev
, KERN_WARNING
, "failed to IDENTIFY "
1302 "(%s, err_mask=0x%x)\n", reason
, err_mask
);
1306 static inline u8
ata_dev_knobble(struct ata_device
*dev
)
1308 return ((dev
->ap
->cbl
== ATA_CBL_SATA
) && (!ata_id_is_sata(dev
->id
)));
1311 static void ata_dev_config_ncq(struct ata_device
*dev
,
1312 char *desc
, size_t desc_sz
)
1314 struct ata_port
*ap
= dev
->ap
;
1315 int hdepth
= 0, ddepth
= ata_id_queue_depth(dev
->id
);
1317 if (!ata_id_has_ncq(dev
->id
)) {
1322 if (ap
->flags
& ATA_FLAG_NCQ
) {
1323 hdepth
= min(ap
->host
->can_queue
, ATA_MAX_QUEUE
- 1);
1324 dev
->flags
|= ATA_DFLAG_NCQ
;
1327 if (hdepth
>= ddepth
)
1328 snprintf(desc
, desc_sz
, "NCQ (depth %d)", ddepth
);
1330 snprintf(desc
, desc_sz
, "NCQ (depth %d/%d)", hdepth
, ddepth
);
1333 static void ata_set_port_max_cmd_len(struct ata_port
*ap
)
1338 ap
->host
->max_cmd_len
= 0;
1339 for (i
= 0; i
< ATA_MAX_DEVICES
; i
++)
1340 ap
->host
->max_cmd_len
= max_t(unsigned int,
1341 ap
->host
->max_cmd_len
,
1342 ap
->device
[i
].cdb_len
);
1347 * ata_dev_configure - Configure the specified ATA/ATAPI device
1348 * @dev: Target device to configure
1349 * @print_info: Enable device info printout
1351 * Configure @dev according to @dev->id. Generic and low-level
1352 * driver specific fixups are also applied.
1355 * Kernel thread context (may sleep)
1358 * 0 on success, -errno otherwise
1360 int ata_dev_configure(struct ata_device
*dev
, int print_info
)
1362 struct ata_port
*ap
= dev
->ap
;
1363 const u16
*id
= dev
->id
;
1364 unsigned int xfer_mask
;
1367 if (!ata_dev_enabled(dev
) && ata_msg_info(ap
)) {
1368 ata_dev_printk(dev
, KERN_INFO
,
1369 "%s: ENTER/EXIT (host %u, dev %u) -- nodev\n",
1370 __FUNCTION__
, ap
->id
, dev
->devno
);
1374 if (ata_msg_probe(ap
))
1375 ata_dev_printk(dev
, KERN_DEBUG
, "%s: ENTER, host %u, dev %u\n",
1376 __FUNCTION__
, ap
->id
, dev
->devno
);
1378 /* print device capabilities */
1379 if (ata_msg_probe(ap
))
1380 ata_dev_printk(dev
, KERN_DEBUG
,
1381 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
1382 "85:%04x 86:%04x 87:%04x 88:%04x\n",
1384 id
[49], id
[82], id
[83], id
[84],
1385 id
[85], id
[86], id
[87], id
[88]);
1387 /* initialize to-be-configured parameters */
1388 dev
->flags
&= ~ATA_DFLAG_CFG_MASK
;
1389 dev
->max_sectors
= 0;
1397 * common ATA, ATAPI feature tests
1400 /* find max transfer mode; for printk only */
1401 xfer_mask
= ata_id_xfermask(id
);
1403 if (ata_msg_probe(ap
))
1406 /* ATA-specific feature tests */
1407 if (dev
->class == ATA_DEV_ATA
) {
1408 dev
->n_sectors
= ata_id_n_sectors(id
);
1410 if (ata_id_has_lba(id
)) {
1411 const char *lba_desc
;
1415 dev
->flags
|= ATA_DFLAG_LBA
;
1416 if (ata_id_has_lba48(id
)) {
1417 dev
->flags
|= ATA_DFLAG_LBA48
;
1422 ata_dev_config_ncq(dev
, ncq_desc
, sizeof(ncq_desc
));
1424 /* print device info to dmesg */
1425 if (ata_msg_drv(ap
) && print_info
)
1426 ata_dev_printk(dev
, KERN_INFO
, "ATA-%d, "
1427 "max %s, %Lu sectors: %s %s\n",
1428 ata_id_major_version(id
),
1429 ata_mode_string(xfer_mask
),
1430 (unsigned long long)dev
->n_sectors
,
1431 lba_desc
, ncq_desc
);
1435 /* Default translation */
1436 dev
->cylinders
= id
[1];
1438 dev
->sectors
= id
[6];
1440 if (ata_id_current_chs_valid(id
)) {
1441 /* Current CHS translation is valid. */
1442 dev
->cylinders
= id
[54];
1443 dev
->heads
= id
[55];
1444 dev
->sectors
= id
[56];
1447 /* print device info to dmesg */
1448 if (ata_msg_drv(ap
) && print_info
)
1449 ata_dev_printk(dev
, KERN_INFO
, "ATA-%d, "
1450 "max %s, %Lu sectors: CHS %u/%u/%u\n",
1451 ata_id_major_version(id
),
1452 ata_mode_string(xfer_mask
),
1453 (unsigned long long)dev
->n_sectors
,
1454 dev
->cylinders
, dev
->heads
,
1458 if (dev
->id
[59] & 0x100) {
1459 dev
->multi_count
= dev
->id
[59] & 0xff;
1460 if (ata_msg_drv(ap
) && print_info
)
1461 ata_dev_printk(dev
, KERN_INFO
,
1462 "ata%u: dev %u multi count %u\n",
1463 ap
->id
, dev
->devno
, dev
->multi_count
);
1469 /* ATAPI-specific feature tests */
1470 else if (dev
->class == ATA_DEV_ATAPI
) {
1471 char *cdb_intr_string
= "";
1473 rc
= atapi_cdb_len(id
);
1474 if ((rc
< 12) || (rc
> ATAPI_CDB_LEN
)) {
1475 if (ata_msg_warn(ap
))
1476 ata_dev_printk(dev
, KERN_WARNING
,
1477 "unsupported CDB len\n");
1481 dev
->cdb_len
= (unsigned int) rc
;
1483 if (ata_id_cdb_intr(dev
->id
)) {
1484 dev
->flags
|= ATA_DFLAG_CDB_INTR
;
1485 cdb_intr_string
= ", CDB intr";
1488 /* print device info to dmesg */
1489 if (ata_msg_drv(ap
) && print_info
)
1490 ata_dev_printk(dev
, KERN_INFO
, "ATAPI, max %s%s\n",
1491 ata_mode_string(xfer_mask
),
1495 ata_set_port_max_cmd_len(ap
);
1497 /* limit bridge transfers to udma5, 200 sectors */
1498 if (ata_dev_knobble(dev
)) {
1499 if (ata_msg_drv(ap
) && print_info
)
1500 ata_dev_printk(dev
, KERN_INFO
,
1501 "applying bridge limits\n");
1502 dev
->udma_mask
&= ATA_UDMA5
;
1503 dev
->max_sectors
= ATA_MAX_SECTORS
;
1506 if (ap
->ops
->dev_config
)
1507 ap
->ops
->dev_config(ap
, dev
);
1509 if (ata_msg_probe(ap
))
1510 ata_dev_printk(dev
, KERN_DEBUG
, "%s: EXIT, drv_stat = 0x%x\n",
1511 __FUNCTION__
, ata_chk_status(ap
));
1515 if (ata_msg_probe(ap
))
1516 ata_dev_printk(dev
, KERN_DEBUG
,
1517 "%s: EXIT, err\n", __FUNCTION__
);
1522 * ata_bus_probe - Reset and probe ATA bus
1525 * Master ATA bus probing function. Initiates a hardware-dependent
1526 * bus reset, then attempts to identify any devices found on
1530 * PCI/etc. bus probe sem.
1533 * Zero on success, negative errno otherwise.
1536 static int ata_bus_probe(struct ata_port
*ap
)
1538 unsigned int classes
[ATA_MAX_DEVICES
];
1539 int tries
[ATA_MAX_DEVICES
];
1540 int i
, rc
, down_xfermask
;
1541 struct ata_device
*dev
;
1545 for (i
= 0; i
< ATA_MAX_DEVICES
; i
++)
1546 tries
[i
] = ATA_PROBE_MAX_TRIES
;
1551 /* reset and determine device classes */
1552 ap
->ops
->phy_reset(ap
);
1554 for (i
= 0; i
< ATA_MAX_DEVICES
; i
++) {
1555 dev
= &ap
->device
[i
];
1557 if (!(ap
->flags
& ATA_FLAG_DISABLED
) &&
1558 dev
->class != ATA_DEV_UNKNOWN
)
1559 classes
[dev
->devno
] = dev
->class;
1561 classes
[dev
->devno
] = ATA_DEV_NONE
;
1563 dev
->class = ATA_DEV_UNKNOWN
;
1568 /* after the reset the device state is PIO 0 and the controller
1569 state is undefined. Record the mode */
1571 for (i
= 0; i
< ATA_MAX_DEVICES
; i
++)
1572 ap
->device
[i
].pio_mode
= XFER_PIO_0
;
1574 /* read IDENTIFY page and configure devices */
1575 for (i
= 0; i
< ATA_MAX_DEVICES
; i
++) {
1576 dev
= &ap
->device
[i
];
1579 dev
->class = classes
[i
];
1581 if (!ata_dev_enabled(dev
))
1584 rc
= ata_dev_read_id(dev
, &dev
->class, 1, dev
->id
);
1588 rc
= ata_dev_configure(dev
, 1);
1593 /* configure transfer mode */
1594 rc
= ata_set_mode(ap
, &dev
);
1600 for (i
= 0; i
< ATA_MAX_DEVICES
; i
++)
1601 if (ata_dev_enabled(&ap
->device
[i
]))
1604 /* no device present, disable port */
1605 ata_port_disable(ap
);
1606 ap
->ops
->port_disable(ap
);
1613 tries
[dev
->devno
] = 0;
1616 sata_down_spd_limit(ap
);
1619 tries
[dev
->devno
]--;
1620 if (down_xfermask
&&
1621 ata_down_xfermask_limit(dev
, tries
[dev
->devno
] == 1))
1622 tries
[dev
->devno
] = 0;
1625 if (!tries
[dev
->devno
]) {
1626 ata_down_xfermask_limit(dev
, 1);
1627 ata_dev_disable(dev
);
1634 * ata_port_probe - Mark port as enabled
1635 * @ap: Port for which we indicate enablement
1637 * Modify @ap data structure such that the system
1638 * thinks that the entire port is enabled.
1640 * LOCKING: host_set lock, or some other form of
1644 void ata_port_probe(struct ata_port
*ap
)
1646 ap
->flags
&= ~ATA_FLAG_DISABLED
;
1650 * sata_print_link_status - Print SATA link status
1651 * @ap: SATA port to printk link status about
1653 * This function prints link speed and status of a SATA link.
1658 static void sata_print_link_status(struct ata_port
*ap
)
1660 u32 sstatus
, scontrol
, tmp
;
1662 if (sata_scr_read(ap
, SCR_STATUS
, &sstatus
))
1664 sata_scr_read(ap
, SCR_CONTROL
, &scontrol
);
1666 if (ata_port_online(ap
)) {
1667 tmp
= (sstatus
>> 4) & 0xf;
1668 ata_port_printk(ap
, KERN_INFO
,
1669 "SATA link up %s (SStatus %X SControl %X)\n",
1670 sata_spd_string(tmp
), sstatus
, scontrol
);
1672 ata_port_printk(ap
, KERN_INFO
,
1673 "SATA link down (SStatus %X SControl %X)\n",
1679 * __sata_phy_reset - Wake/reset a low-level SATA PHY
1680 * @ap: SATA port associated with target SATA PHY.
1682 * This function issues commands to standard SATA Sxxx
1683 * PHY registers, to wake up the phy (and device), and
1684 * clear any reset condition.
1687 * PCI/etc. bus probe sem.
1690 void __sata_phy_reset(struct ata_port
*ap
)
1693 unsigned long timeout
= jiffies
+ (HZ
* 5);
1695 if (ap
->flags
& ATA_FLAG_SATA_RESET
) {
1696 /* issue phy wake/reset */
1697 sata_scr_write_flush(ap
, SCR_CONTROL
, 0x301);
1698 /* Couldn't find anything in SATA I/II specs, but
1699 * AHCI-1.1 10.4.2 says at least 1 ms. */
1702 /* phy wake/clear reset */
1703 sata_scr_write_flush(ap
, SCR_CONTROL
, 0x300);
1705 /* wait for phy to become ready, if necessary */
1708 sata_scr_read(ap
, SCR_STATUS
, &sstatus
);
1709 if ((sstatus
& 0xf) != 1)
1711 } while (time_before(jiffies
, timeout
));
1713 /* print link status */
1714 sata_print_link_status(ap
);
1716 /* TODO: phy layer with polling, timeouts, etc. */
1717 if (!ata_port_offline(ap
))
1720 ata_port_disable(ap
);
1722 if (ap
->flags
& ATA_FLAG_DISABLED
)
1725 if (ata_busy_sleep(ap
, ATA_TMOUT_BOOT_QUICK
, ATA_TMOUT_BOOT
)) {
1726 ata_port_disable(ap
);
1730 ap
->cbl
= ATA_CBL_SATA
;
1734 * sata_phy_reset - Reset SATA bus.
1735 * @ap: SATA port associated with target SATA PHY.
1737 * This function resets the SATA bus, and then probes
1738 * the bus for devices.
1741 * PCI/etc. bus probe sem.
1744 void sata_phy_reset(struct ata_port
*ap
)
1746 __sata_phy_reset(ap
);
1747 if (ap
->flags
& ATA_FLAG_DISABLED
)
1753 * ata_dev_pair - return other device on cable
1756 * Obtain the other device on the same cable, or if none is
1757 * present NULL is returned
1760 struct ata_device
*ata_dev_pair(struct ata_device
*adev
)
1762 struct ata_port
*ap
= adev
->ap
;
1763 struct ata_device
*pair
= &ap
->device
[1 - adev
->devno
];
1764 if (!ata_dev_enabled(pair
))
1770 * ata_port_disable - Disable port.
1771 * @ap: Port to be disabled.
1773 * Modify @ap data structure such that the system
1774 * thinks that the entire port is disabled, and should
1775 * never attempt to probe or communicate with devices
1778 * LOCKING: host_set lock, or some other form of
1782 void ata_port_disable(struct ata_port
*ap
)
1784 ap
->device
[0].class = ATA_DEV_NONE
;
1785 ap
->device
[1].class = ATA_DEV_NONE
;
1786 ap
->flags
|= ATA_FLAG_DISABLED
;
1790 * sata_down_spd_limit - adjust SATA spd limit downward
1791 * @ap: Port to adjust SATA spd limit for
1793 * Adjust SATA spd limit of @ap downward. Note that this
1794 * function only adjusts the limit. The change must be applied
1795 * using sata_set_spd().
1798 * Inherited from caller.
1801 * 0 on success, negative errno on failure
1803 int sata_down_spd_limit(struct ata_port
*ap
)
1805 u32 sstatus
, spd
, mask
;
1808 rc
= sata_scr_read(ap
, SCR_STATUS
, &sstatus
);
1812 mask
= ap
->sata_spd_limit
;
1815 highbit
= fls(mask
) - 1;
1816 mask
&= ~(1 << highbit
);
1818 spd
= (sstatus
>> 4) & 0xf;
1822 mask
&= (1 << spd
) - 1;
1826 ap
->sata_spd_limit
= mask
;
1828 ata_port_printk(ap
, KERN_WARNING
, "limiting SATA link speed to %s\n",
1829 sata_spd_string(fls(mask
)));
1834 static int __sata_set_spd_needed(struct ata_port
*ap
, u32
*scontrol
)
1838 if (ap
->sata_spd_limit
== UINT_MAX
)
1841 limit
= fls(ap
->sata_spd_limit
);
1843 spd
= (*scontrol
>> 4) & 0xf;
1844 *scontrol
= (*scontrol
& ~0xf0) | ((limit
& 0xf) << 4);
1846 return spd
!= limit
;
1850 * sata_set_spd_needed - is SATA spd configuration needed
1851 * @ap: Port in question
1853 * Test whether the spd limit in SControl matches
1854 * @ap->sata_spd_limit. This function is used to determine
1855 * whether hardreset is necessary to apply SATA spd
1859 * Inherited from caller.
1862 * 1 if SATA spd configuration is needed, 0 otherwise.
1864 int sata_set_spd_needed(struct ata_port
*ap
)
1868 if (sata_scr_read(ap
, SCR_CONTROL
, &scontrol
))
1871 return __sata_set_spd_needed(ap
, &scontrol
);
1875 * sata_set_spd - set SATA spd according to spd limit
1876 * @ap: Port to set SATA spd for
1878 * Set SATA spd of @ap according to sata_spd_limit.
1881 * Inherited from caller.
1884 * 0 if spd doesn't need to be changed, 1 if spd has been
1885 * changed. Negative errno if SCR registers are inaccessible.
1887 int sata_set_spd(struct ata_port
*ap
)
1892 if ((rc
= sata_scr_read(ap
, SCR_CONTROL
, &scontrol
)))
1895 if (!__sata_set_spd_needed(ap
, &scontrol
))
1898 if ((rc
= sata_scr_write(ap
, SCR_CONTROL
, scontrol
)))
1905 * This mode timing computation functionality is ported over from
1906 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
1909 * PIO 0-5, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
1910 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
1911 * for PIO 5, which is a nonstandard extension and UDMA6, which
1912 * is currently supported only by Maxtor drives.
1915 static const struct ata_timing ata_timing
[] = {
1917 { XFER_UDMA_6
, 0, 0, 0, 0, 0, 0, 0, 15 },
1918 { XFER_UDMA_5
, 0, 0, 0, 0, 0, 0, 0, 20 },
1919 { XFER_UDMA_4
, 0, 0, 0, 0, 0, 0, 0, 30 },
1920 { XFER_UDMA_3
, 0, 0, 0, 0, 0, 0, 0, 45 },
1922 { XFER_UDMA_2
, 0, 0, 0, 0, 0, 0, 0, 60 },
1923 { XFER_UDMA_1
, 0, 0, 0, 0, 0, 0, 0, 80 },
1924 { XFER_UDMA_0
, 0, 0, 0, 0, 0, 0, 0, 120 },
1926 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
1928 { XFER_MW_DMA_2
, 25, 0, 0, 0, 70, 25, 120, 0 },
1929 { XFER_MW_DMA_1
, 45, 0, 0, 0, 80, 50, 150, 0 },
1930 { XFER_MW_DMA_0
, 60, 0, 0, 0, 215, 215, 480, 0 },
1932 { XFER_SW_DMA_2
, 60, 0, 0, 0, 120, 120, 240, 0 },
1933 { XFER_SW_DMA_1
, 90, 0, 0, 0, 240, 240, 480, 0 },
1934 { XFER_SW_DMA_0
, 120, 0, 0, 0, 480, 480, 960, 0 },
1936 /* { XFER_PIO_5, 20, 50, 30, 100, 50, 30, 100, 0 }, */
1937 { XFER_PIO_4
, 25, 70, 25, 120, 70, 25, 120, 0 },
1938 { XFER_PIO_3
, 30, 80, 70, 180, 80, 70, 180, 0 },
1940 { XFER_PIO_2
, 30, 290, 40, 330, 100, 90, 240, 0 },
1941 { XFER_PIO_1
, 50, 290, 93, 383, 125, 100, 383, 0 },
1942 { XFER_PIO_0
, 70, 290, 240, 600, 165, 150, 600, 0 },
1944 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
1949 #define ENOUGH(v,unit) (((v)-1)/(unit)+1)
1950 #define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
1952 static void ata_timing_quantize(const struct ata_timing
*t
, struct ata_timing
*q
, int T
, int UT
)
1954 q
->setup
= EZ(t
->setup
* 1000, T
);
1955 q
->act8b
= EZ(t
->act8b
* 1000, T
);
1956 q
->rec8b
= EZ(t
->rec8b
* 1000, T
);
1957 q
->cyc8b
= EZ(t
->cyc8b
* 1000, T
);
1958 q
->active
= EZ(t
->active
* 1000, T
);
1959 q
->recover
= EZ(t
->recover
* 1000, T
);
1960 q
->cycle
= EZ(t
->cycle
* 1000, T
);
1961 q
->udma
= EZ(t
->udma
* 1000, UT
);
1964 void ata_timing_merge(const struct ata_timing
*a
, const struct ata_timing
*b
,
1965 struct ata_timing
*m
, unsigned int what
)
1967 if (what
& ATA_TIMING_SETUP
) m
->setup
= max(a
->setup
, b
->setup
);
1968 if (what
& ATA_TIMING_ACT8B
) m
->act8b
= max(a
->act8b
, b
->act8b
);
1969 if (what
& ATA_TIMING_REC8B
) m
->rec8b
= max(a
->rec8b
, b
->rec8b
);
1970 if (what
& ATA_TIMING_CYC8B
) m
->cyc8b
= max(a
->cyc8b
, b
->cyc8b
);
1971 if (what
& ATA_TIMING_ACTIVE
) m
->active
= max(a
->active
, b
->active
);
1972 if (what
& ATA_TIMING_RECOVER
) m
->recover
= max(a
->recover
, b
->recover
);
1973 if (what
& ATA_TIMING_CYCLE
) m
->cycle
= max(a
->cycle
, b
->cycle
);
1974 if (what
& ATA_TIMING_UDMA
) m
->udma
= max(a
->udma
, b
->udma
);
1977 static const struct ata_timing
* ata_timing_find_mode(unsigned short speed
)
1979 const struct ata_timing
*t
;
1981 for (t
= ata_timing
; t
->mode
!= speed
; t
++)
1982 if (t
->mode
== 0xFF)
1987 int ata_timing_compute(struct ata_device
*adev
, unsigned short speed
,
1988 struct ata_timing
*t
, int T
, int UT
)
1990 const struct ata_timing
*s
;
1991 struct ata_timing p
;
1997 if (!(s
= ata_timing_find_mode(speed
)))
2000 memcpy(t
, s
, sizeof(*s
));
2003 * If the drive is an EIDE drive, it can tell us it needs extended
2004 * PIO/MW_DMA cycle timing.
2007 if (adev
->id
[ATA_ID_FIELD_VALID
] & 2) { /* EIDE drive */
2008 memset(&p
, 0, sizeof(p
));
2009 if(speed
>= XFER_PIO_0
&& speed
<= XFER_SW_DMA_0
) {
2010 if (speed
<= XFER_PIO_2
) p
.cycle
= p
.cyc8b
= adev
->id
[ATA_ID_EIDE_PIO
];
2011 else p
.cycle
= p
.cyc8b
= adev
->id
[ATA_ID_EIDE_PIO_IORDY
];
2012 } else if(speed
>= XFER_MW_DMA_0
&& speed
<= XFER_MW_DMA_2
) {
2013 p
.cycle
= adev
->id
[ATA_ID_EIDE_DMA_MIN
];
2015 ata_timing_merge(&p
, t
, t
, ATA_TIMING_CYCLE
| ATA_TIMING_CYC8B
);
2019 * Convert the timing to bus clock counts.
2022 ata_timing_quantize(t
, t
, T
, UT
);
2025 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2026 * S.M.A.R.T * and some other commands. We have to ensure that the
2027 * DMA cycle timing is slower/equal than the fastest PIO timing.
2030 if (speed
> XFER_PIO_4
) {
2031 ata_timing_compute(adev
, adev
->pio_mode
, &p
, T
, UT
);
2032 ata_timing_merge(&p
, t
, t
, ATA_TIMING_ALL
);
2036 * Lengthen active & recovery time so that cycle time is correct.
2039 if (t
->act8b
+ t
->rec8b
< t
->cyc8b
) {
2040 t
->act8b
+= (t
->cyc8b
- (t
->act8b
+ t
->rec8b
)) / 2;
2041 t
->rec8b
= t
->cyc8b
- t
->act8b
;
2044 if (t
->active
+ t
->recover
< t
->cycle
) {
2045 t
->active
+= (t
->cycle
- (t
->active
+ t
->recover
)) / 2;
2046 t
->recover
= t
->cycle
- t
->active
;
2053 * ata_down_xfermask_limit - adjust dev xfer masks downward
2054 * @dev: Device to adjust xfer masks
2055 * @force_pio0: Force PIO0
2057 * Adjust xfer masks of @dev downward. Note that this function
2058 * does not apply the change. Invoking ata_set_mode() afterwards
2059 * will apply the limit.
2062 * Inherited from caller.
2065 * 0 on success, negative errno on failure
2067 int ata_down_xfermask_limit(struct ata_device
*dev
, int force_pio0
)
2069 unsigned long xfer_mask
;
2072 xfer_mask
= ata_pack_xfermask(dev
->pio_mask
, dev
->mwdma_mask
,
2077 /* don't gear down to MWDMA from UDMA, go directly to PIO */
2078 if (xfer_mask
& ATA_MASK_UDMA
)
2079 xfer_mask
&= ~ATA_MASK_MWDMA
;
2081 highbit
= fls(xfer_mask
) - 1;
2082 xfer_mask
&= ~(1 << highbit
);
2084 xfer_mask
&= 1 << ATA_SHIFT_PIO
;
2088 ata_unpack_xfermask(xfer_mask
, &dev
->pio_mask
, &dev
->mwdma_mask
,
2091 ata_dev_printk(dev
, KERN_WARNING
, "limiting speed to %s\n",
2092 ata_mode_string(xfer_mask
));
2100 static int ata_dev_set_mode(struct ata_device
*dev
)
2102 unsigned int err_mask
;
2105 dev
->flags
&= ~ATA_DFLAG_PIO
;
2106 if (dev
->xfer_shift
== ATA_SHIFT_PIO
)
2107 dev
->flags
|= ATA_DFLAG_PIO
;
2109 err_mask
= ata_dev_set_xfermode(dev
);
2111 ata_dev_printk(dev
, KERN_ERR
, "failed to set xfermode "
2112 "(err_mask=0x%x)\n", err_mask
);
2116 rc
= ata_dev_revalidate(dev
, 0);
2120 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
2121 dev
->xfer_shift
, (int)dev
->xfer_mode
);
2123 ata_dev_printk(dev
, KERN_INFO
, "configured for %s\n",
2124 ata_mode_string(ata_xfer_mode2mask(dev
->xfer_mode
)));
2129 * ata_set_mode - Program timings and issue SET FEATURES - XFER
2130 * @ap: port on which timings will be programmed
2131 * @r_failed_dev: out paramter for failed device
2133 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2134 * ata_set_mode() fails, pointer to the failing device is
2135 * returned in @r_failed_dev.
2138 * PCI/etc. bus probe sem.
2141 * 0 on success, negative errno otherwise
2143 int ata_set_mode(struct ata_port
*ap
, struct ata_device
**r_failed_dev
)
2145 struct ata_device
*dev
;
2146 int i
, rc
= 0, used_dma
= 0, found
= 0;
2148 /* has private set_mode? */
2149 if (ap
->ops
->set_mode
) {
2150 /* FIXME: make ->set_mode handle no device case and
2151 * return error code and failing device on failure.
2153 for (i
= 0; i
< ATA_MAX_DEVICES
; i
++) {
2154 if (ata_dev_ready(&ap
->device
[i
])) {
2155 ap
->ops
->set_mode(ap
);
2162 /* step 1: calculate xfer_mask */
2163 for (i
= 0; i
< ATA_MAX_DEVICES
; i
++) {
2164 unsigned int pio_mask
, dma_mask
;
2166 dev
= &ap
->device
[i
];
2168 if (!ata_dev_enabled(dev
))
2171 ata_dev_xfermask(dev
);
2173 pio_mask
= ata_pack_xfermask(dev
->pio_mask
, 0, 0);
2174 dma_mask
= ata_pack_xfermask(0, dev
->mwdma_mask
, dev
->udma_mask
);
2175 dev
->pio_mode
= ata_xfer_mask2mode(pio_mask
);
2176 dev
->dma_mode
= ata_xfer_mask2mode(dma_mask
);
2185 /* step 2: always set host PIO timings */
2186 for (i
= 0; i
< ATA_MAX_DEVICES
; i
++) {
2187 dev
= &ap
->device
[i
];
2188 if (!ata_dev_enabled(dev
))
2191 if (!dev
->pio_mode
) {
2192 ata_dev_printk(dev
, KERN_WARNING
, "no PIO support\n");
2197 dev
->xfer_mode
= dev
->pio_mode
;
2198 dev
->xfer_shift
= ATA_SHIFT_PIO
;
2199 if (ap
->ops
->set_piomode
)
2200 ap
->ops
->set_piomode(ap
, dev
);
2203 /* step 3: set host DMA timings */
2204 for (i
= 0; i
< ATA_MAX_DEVICES
; i
++) {
2205 dev
= &ap
->device
[i
];
2207 if (!ata_dev_enabled(dev
) || !dev
->dma_mode
)
2210 dev
->xfer_mode
= dev
->dma_mode
;
2211 dev
->xfer_shift
= ata_xfer_mode2shift(dev
->dma_mode
);
2212 if (ap
->ops
->set_dmamode
)
2213 ap
->ops
->set_dmamode(ap
, dev
);
2216 /* step 4: update devices' xfer mode */
2217 for (i
= 0; i
< ATA_MAX_DEVICES
; i
++) {
2218 dev
= &ap
->device
[i
];
2220 /* don't udpate suspended devices' xfer mode */
2221 if (!ata_dev_ready(dev
))
2224 rc
= ata_dev_set_mode(dev
);
2229 /* Record simplex status. If we selected DMA then the other
2230 * host channels are not permitted to do so.
2232 if (used_dma
&& (ap
->host_set
->flags
& ATA_HOST_SIMPLEX
))
2233 ap
->host_set
->simplex_claimed
= 1;
2235 /* step5: chip specific finalisation */
2236 if (ap
->ops
->post_set_mode
)
2237 ap
->ops
->post_set_mode(ap
);
2241 *r_failed_dev
= dev
;
2246 * ata_tf_to_host - issue ATA taskfile to host controller
2247 * @ap: port to which command is being issued
2248 * @tf: ATA taskfile register set
2250 * Issues ATA taskfile register set to ATA host controller,
2251 * with proper synchronization with interrupt handler and
2255 * spin_lock_irqsave(host_set lock)
2258 static inline void ata_tf_to_host(struct ata_port
*ap
,
2259 const struct ata_taskfile
*tf
)
2261 ap
->ops
->tf_load(ap
, tf
);
2262 ap
->ops
->exec_command(ap
, tf
);
2266 * ata_busy_sleep - sleep until BSY clears, or timeout
2267 * @ap: port containing status register to be polled
2268 * @tmout_pat: impatience timeout
2269 * @tmout: overall timeout
2271 * Sleep until ATA Status register bit BSY clears,
2272 * or a timeout occurs.
2277 unsigned int ata_busy_sleep (struct ata_port
*ap
,
2278 unsigned long tmout_pat
, unsigned long tmout
)
2280 unsigned long timer_start
, timeout
;
2283 status
= ata_busy_wait(ap
, ATA_BUSY
, 300);
2284 timer_start
= jiffies
;
2285 timeout
= timer_start
+ tmout_pat
;
2286 while ((status
& ATA_BUSY
) && (time_before(jiffies
, timeout
))) {
2288 status
= ata_busy_wait(ap
, ATA_BUSY
, 3);
2291 if (status
& ATA_BUSY
)
2292 ata_port_printk(ap
, KERN_WARNING
,
2293 "port is slow to respond, please be patient\n");
2295 timeout
= timer_start
+ tmout
;
2296 while ((status
& ATA_BUSY
) && (time_before(jiffies
, timeout
))) {
2298 status
= ata_chk_status(ap
);
2301 if (status
& ATA_BUSY
) {
2302 ata_port_printk(ap
, KERN_ERR
, "port failed to respond "
2303 "(%lu secs)\n", tmout
/ HZ
);
2310 static void ata_bus_post_reset(struct ata_port
*ap
, unsigned int devmask
)
2312 struct ata_ioports
*ioaddr
= &ap
->ioaddr
;
2313 unsigned int dev0
= devmask
& (1 << 0);
2314 unsigned int dev1
= devmask
& (1 << 1);
2315 unsigned long timeout
;
2317 /* if device 0 was found in ata_devchk, wait for its
2321 ata_busy_sleep(ap
, ATA_TMOUT_BOOT_QUICK
, ATA_TMOUT_BOOT
);
2323 /* if device 1 was found in ata_devchk, wait for
2324 * register access, then wait for BSY to clear
2326 timeout
= jiffies
+ ATA_TMOUT_BOOT
;
2330 ap
->ops
->dev_select(ap
, 1);
2331 if (ap
->flags
& ATA_FLAG_MMIO
) {
2332 nsect
= readb((void __iomem
*) ioaddr
->nsect_addr
);
2333 lbal
= readb((void __iomem
*) ioaddr
->lbal_addr
);
2335 nsect
= inb(ioaddr
->nsect_addr
);
2336 lbal
= inb(ioaddr
->lbal_addr
);
2338 if ((nsect
== 1) && (lbal
== 1))
2340 if (time_after(jiffies
, timeout
)) {
2344 msleep(50); /* give drive a breather */
2347 ata_busy_sleep(ap
, ATA_TMOUT_BOOT_QUICK
, ATA_TMOUT_BOOT
);
2349 /* is all this really necessary? */
2350 ap
->ops
->dev_select(ap
, 0);
2352 ap
->ops
->dev_select(ap
, 1);
2354 ap
->ops
->dev_select(ap
, 0);
2357 static unsigned int ata_bus_softreset(struct ata_port
*ap
,
2358 unsigned int devmask
)
2360 struct ata_ioports
*ioaddr
= &ap
->ioaddr
;
2362 DPRINTK("ata%u: bus reset via SRST\n", ap
->id
);
2364 /* software reset. causes dev0 to be selected */
2365 if (ap
->flags
& ATA_FLAG_MMIO
) {
2366 writeb(ap
->ctl
, (void __iomem
*) ioaddr
->ctl_addr
);
2367 udelay(20); /* FIXME: flush */
2368 writeb(ap
->ctl
| ATA_SRST
, (void __iomem
*) ioaddr
->ctl_addr
);
2369 udelay(20); /* FIXME: flush */
2370 writeb(ap
->ctl
, (void __iomem
*) ioaddr
->ctl_addr
);
2372 outb(ap
->ctl
, ioaddr
->ctl_addr
);
2374 outb(ap
->ctl
| ATA_SRST
, ioaddr
->ctl_addr
);
2376 outb(ap
->ctl
, ioaddr
->ctl_addr
);
2379 /* spec mandates ">= 2ms" before checking status.
2380 * We wait 150ms, because that was the magic delay used for
2381 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
2382 * between when the ATA command register is written, and then
2383 * status is checked. Because waiting for "a while" before
2384 * checking status is fine, post SRST, we perform this magic
2385 * delay here as well.
2387 * Old drivers/ide uses the 2mS rule and then waits for ready
2391 /* Before we perform post reset processing we want to see if
2392 * the bus shows 0xFF because the odd clown forgets the D7
2393 * pulldown resistor.
2395 if (ata_check_status(ap
) == 0xFF) {
2396 ata_port_printk(ap
, KERN_ERR
, "SRST failed (status 0xFF)\n");
2397 return AC_ERR_OTHER
;
2400 ata_bus_post_reset(ap
, devmask
);
2406 * ata_bus_reset - reset host port and associated ATA channel
2407 * @ap: port to reset
2409 * This is typically the first time we actually start issuing
2410 * commands to the ATA channel. We wait for BSY to clear, then
2411 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
2412 * result. Determine what devices, if any, are on the channel
2413 * by looking at the device 0/1 error register. Look at the signature
2414 * stored in each device's taskfile registers, to determine if
2415 * the device is ATA or ATAPI.
2418 * PCI/etc. bus probe sem.
2419 * Obtains host_set lock.
2422 * Sets ATA_FLAG_DISABLED if bus reset fails.
2425 void ata_bus_reset(struct ata_port
*ap
)
2427 struct ata_ioports
*ioaddr
= &ap
->ioaddr
;
2428 unsigned int slave_possible
= ap
->flags
& ATA_FLAG_SLAVE_POSS
;
2430 unsigned int dev0
, dev1
= 0, devmask
= 0;
2432 DPRINTK("ENTER, host %u, port %u\n", ap
->id
, ap
->port_no
);
2434 /* determine if device 0/1 are present */
2435 if (ap
->flags
& ATA_FLAG_SATA_RESET
)
2438 dev0
= ata_devchk(ap
, 0);
2440 dev1
= ata_devchk(ap
, 1);
2444 devmask
|= (1 << 0);
2446 devmask
|= (1 << 1);
2448 /* select device 0 again */
2449 ap
->ops
->dev_select(ap
, 0);
2451 /* issue bus reset */
2452 if (ap
->flags
& ATA_FLAG_SRST
)
2453 if (ata_bus_softreset(ap
, devmask
))
2457 * determine by signature whether we have ATA or ATAPI devices
2459 ap
->device
[0].class = ata_dev_try_classify(ap
, 0, &err
);
2460 if ((slave_possible
) && (err
!= 0x81))
2461 ap
->device
[1].class = ata_dev_try_classify(ap
, 1, &err
);
2463 /* re-enable interrupts */
2464 if (ap
->ioaddr
.ctl_addr
) /* FIXME: hack. create a hook instead */
2467 /* is double-select really necessary? */
2468 if (ap
->device
[1].class != ATA_DEV_NONE
)
2469 ap
->ops
->dev_select(ap
, 1);
2470 if (ap
->device
[0].class != ATA_DEV_NONE
)
2471 ap
->ops
->dev_select(ap
, 0);
2473 /* if no devices were detected, disable this port */
2474 if ((ap
->device
[0].class == ATA_DEV_NONE
) &&
2475 (ap
->device
[1].class == ATA_DEV_NONE
))
2478 if (ap
->flags
& (ATA_FLAG_SATA_RESET
| ATA_FLAG_SRST
)) {
2479 /* set up device control for ATA_FLAG_SATA_RESET */
2480 if (ap
->flags
& ATA_FLAG_MMIO
)
2481 writeb(ap
->ctl
, (void __iomem
*) ioaddr
->ctl_addr
);
2483 outb(ap
->ctl
, ioaddr
->ctl_addr
);
2490 ata_port_printk(ap
, KERN_ERR
, "disabling port\n");
2491 ap
->ops
->port_disable(ap
);
2497 * sata_phy_debounce - debounce SATA phy status
2498 * @ap: ATA port to debounce SATA phy status for
2499 * @params: timing parameters { interval, duratinon, timeout } in msec
2501 * Make sure SStatus of @ap reaches stable state, determined by
2502 * holding the same value where DET is not 1 for @duration polled
2503 * every @interval, before @timeout. Timeout constraints the
2504 * beginning of the stable state. Because, after hot unplugging,
2505 * DET gets stuck at 1 on some controllers, this functions waits
2506 * until timeout then returns 0 if DET is stable at 1.
2509 * Kernel thread context (may sleep)
2512 * 0 on success, -errno on failure.
2514 int sata_phy_debounce(struct ata_port
*ap
, const unsigned long *params
)
2516 unsigned long interval_msec
= params
[0];
2517 unsigned long duration
= params
[1] * HZ
/ 1000;
2518 unsigned long timeout
= jiffies
+ params
[2] * HZ
/ 1000;
2519 unsigned long last_jiffies
;
2523 if ((rc
= sata_scr_read(ap
, SCR_STATUS
, &cur
)))
2528 last_jiffies
= jiffies
;
2531 msleep(interval_msec
);
2532 if ((rc
= sata_scr_read(ap
, SCR_STATUS
, &cur
)))
2538 if (cur
== 1 && time_before(jiffies
, timeout
))
2540 if (time_after(jiffies
, last_jiffies
+ duration
))
2545 /* unstable, start over */
2547 last_jiffies
= jiffies
;
2550 if (time_after(jiffies
, timeout
))
2556 * sata_phy_resume - resume SATA phy
2557 * @ap: ATA port to resume SATA phy for
2558 * @params: timing parameters { interval, duratinon, timeout } in msec
2560 * Resume SATA phy of @ap and debounce it.
2563 * Kernel thread context (may sleep)
2566 * 0 on success, -errno on failure.
2568 int sata_phy_resume(struct ata_port
*ap
, const unsigned long *params
)
2573 if ((rc
= sata_scr_read(ap
, SCR_CONTROL
, &scontrol
)))
2576 scontrol
= (scontrol
& 0x0f0) | 0x300;
2578 if ((rc
= sata_scr_write(ap
, SCR_CONTROL
, scontrol
)))
2581 /* Some PHYs react badly if SStatus is pounded immediately
2582 * after resuming. Delay 200ms before debouncing.
2586 return sata_phy_debounce(ap
, params
);
2589 static void ata_wait_spinup(struct ata_port
*ap
)
2591 struct ata_eh_context
*ehc
= &ap
->eh_context
;
2592 unsigned long end
, secs
;
2595 /* first, debounce phy if SATA */
2596 if (ap
->cbl
== ATA_CBL_SATA
) {
2597 rc
= sata_phy_debounce(ap
, sata_deb_timing_hotplug
);
2599 /* if debounced successfully and offline, no need to wait */
2600 if ((rc
== 0 || rc
== -EOPNOTSUPP
) && ata_port_offline(ap
))
2604 /* okay, let's give the drive time to spin up */
2605 end
= ehc
->i
.hotplug_timestamp
+ ATA_SPINUP_WAIT
* HZ
/ 1000;
2606 secs
= ((end
- jiffies
) + HZ
- 1) / HZ
;
2608 if (time_after(jiffies
, end
))
2612 ata_port_printk(ap
, KERN_INFO
, "waiting for device to spin up "
2613 "(%lu secs)\n", secs
);
2615 schedule_timeout_uninterruptible(end
- jiffies
);
2619 * ata_std_prereset - prepare for reset
2620 * @ap: ATA port to be reset
2622 * @ap is about to be reset. Initialize it.
2625 * Kernel thread context (may sleep)
2628 * 0 on success, -errno otherwise.
2630 int ata_std_prereset(struct ata_port
*ap
)
2632 struct ata_eh_context
*ehc
= &ap
->eh_context
;
2633 const unsigned long *timing
= sata_ehc_deb_timing(ehc
);
2636 /* handle link resume & hotplug spinup */
2637 if ((ehc
->i
.flags
& ATA_EHI_RESUME_LINK
) &&
2638 (ap
->flags
& ATA_FLAG_HRST_TO_RESUME
))
2639 ehc
->i
.action
|= ATA_EH_HARDRESET
;
2641 if ((ehc
->i
.flags
& ATA_EHI_HOTPLUGGED
) &&
2642 (ap
->flags
& ATA_FLAG_SKIP_D2H_BSY
))
2643 ata_wait_spinup(ap
);
2645 /* if we're about to do hardreset, nothing more to do */
2646 if (ehc
->i
.action
& ATA_EH_HARDRESET
)
2649 /* if SATA, resume phy */
2650 if (ap
->cbl
== ATA_CBL_SATA
) {
2651 rc
= sata_phy_resume(ap
, timing
);
2652 if (rc
&& rc
!= -EOPNOTSUPP
) {
2653 /* phy resume failed */
2654 ata_port_printk(ap
, KERN_WARNING
, "failed to resume "
2655 "link for reset (errno=%d)\n", rc
);
2660 /* Wait for !BSY if the controller can wait for the first D2H
2661 * Reg FIS and we don't know that no device is attached.
2663 if (!(ap
->flags
& ATA_FLAG_SKIP_D2H_BSY
) && !ata_port_offline(ap
))
2664 ata_busy_sleep(ap
, ATA_TMOUT_BOOT_QUICK
, ATA_TMOUT_BOOT
);
2670 * ata_std_softreset - reset host port via ATA SRST
2671 * @ap: port to reset
2672 * @classes: resulting classes of attached devices
2674 * Reset host port using ATA SRST.
2677 * Kernel thread context (may sleep)
2680 * 0 on success, -errno otherwise.
2682 int ata_std_softreset(struct ata_port
*ap
, unsigned int *classes
)
2684 unsigned int slave_possible
= ap
->flags
& ATA_FLAG_SLAVE_POSS
;
2685 unsigned int devmask
= 0, err_mask
;
2690 if (ata_port_offline(ap
)) {
2691 classes
[0] = ATA_DEV_NONE
;
2695 /* determine if device 0/1 are present */
2696 if (ata_devchk(ap
, 0))
2697 devmask
|= (1 << 0);
2698 if (slave_possible
&& ata_devchk(ap
, 1))
2699 devmask
|= (1 << 1);
2701 /* select device 0 again */
2702 ap
->ops
->dev_select(ap
, 0);
2704 /* issue bus reset */
2705 DPRINTK("about to softreset, devmask=%x\n", devmask
);
2706 err_mask
= ata_bus_softreset(ap
, devmask
);
2708 ata_port_printk(ap
, KERN_ERR
, "SRST failed (err_mask=0x%x)\n",
2713 /* determine by signature whether we have ATA or ATAPI devices */
2714 classes
[0] = ata_dev_try_classify(ap
, 0, &err
);
2715 if (slave_possible
&& err
!= 0x81)
2716 classes
[1] = ata_dev_try_classify(ap
, 1, &err
);
2719 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes
[0], classes
[1]);
2724 * sata_std_hardreset - reset host port via SATA phy reset
2725 * @ap: port to reset
2726 * @class: resulting class of attached device
2728 * SATA phy-reset host port using DET bits of SControl register.
2731 * Kernel thread context (may sleep)
2734 * 0 on success, -errno otherwise.
2736 int sata_std_hardreset(struct ata_port
*ap
, unsigned int *class)
2738 struct ata_eh_context
*ehc
= &ap
->eh_context
;
2739 const unsigned long *timing
= sata_ehc_deb_timing(ehc
);
2745 if (sata_set_spd_needed(ap
)) {
2746 /* SATA spec says nothing about how to reconfigure
2747 * spd. To be on the safe side, turn off phy during
2748 * reconfiguration. This works for at least ICH7 AHCI
2751 if ((rc
= sata_scr_read(ap
, SCR_CONTROL
, &scontrol
)))
2754 scontrol
= (scontrol
& 0x0f0) | 0x304;
2756 if ((rc
= sata_scr_write(ap
, SCR_CONTROL
, scontrol
)))
2762 /* issue phy wake/reset */
2763 if ((rc
= sata_scr_read(ap
, SCR_CONTROL
, &scontrol
)))
2766 scontrol
= (scontrol
& 0x0f0) | 0x301;
2768 if ((rc
= sata_scr_write_flush(ap
, SCR_CONTROL
, scontrol
)))
2771 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
2772 * 10.4.2 says at least 1 ms.
2776 /* bring phy back */
2777 sata_phy_resume(ap
, timing
);
2779 /* TODO: phy layer with polling, timeouts, etc. */
2780 if (ata_port_offline(ap
)) {
2781 *class = ATA_DEV_NONE
;
2782 DPRINTK("EXIT, link offline\n");
2786 if (ata_busy_sleep(ap
, ATA_TMOUT_BOOT_QUICK
, ATA_TMOUT_BOOT
)) {
2787 ata_port_printk(ap
, KERN_ERR
,
2788 "COMRESET failed (device not ready)\n");
2792 ap
->ops
->dev_select(ap
, 0); /* probably unnecessary */
2794 *class = ata_dev_try_classify(ap
, 0, NULL
);
2796 DPRINTK("EXIT, class=%u\n", *class);
2801 * ata_std_postreset - standard postreset callback
2802 * @ap: the target ata_port
2803 * @classes: classes of attached devices
2805 * This function is invoked after a successful reset. Note that
2806 * the device might have been reset more than once using
2807 * different reset methods before postreset is invoked.
2810 * Kernel thread context (may sleep)
2812 void ata_std_postreset(struct ata_port
*ap
, unsigned int *classes
)
2818 /* print link status */
2819 sata_print_link_status(ap
);
2822 if (sata_scr_read(ap
, SCR_ERROR
, &serror
) == 0)
2823 sata_scr_write(ap
, SCR_ERROR
, serror
);
2825 /* re-enable interrupts */
2826 if (!ap
->ops
->error_handler
) {
2827 /* FIXME: hack. create a hook instead */
2828 if (ap
->ioaddr
.ctl_addr
)
2832 /* is double-select really necessary? */
2833 if (classes
[0] != ATA_DEV_NONE
)
2834 ap
->ops
->dev_select(ap
, 1);
2835 if (classes
[1] != ATA_DEV_NONE
)
2836 ap
->ops
->dev_select(ap
, 0);
2838 /* bail out if no device is present */
2839 if (classes
[0] == ATA_DEV_NONE
&& classes
[1] == ATA_DEV_NONE
) {
2840 DPRINTK("EXIT, no device\n");
2844 /* set up device control */
2845 if (ap
->ioaddr
.ctl_addr
) {
2846 if (ap
->flags
& ATA_FLAG_MMIO
)
2847 writeb(ap
->ctl
, (void __iomem
*) ap
->ioaddr
.ctl_addr
);
2849 outb(ap
->ctl
, ap
->ioaddr
.ctl_addr
);
2856 * ata_dev_same_device - Determine whether new ID matches configured device
2857 * @dev: device to compare against
2858 * @new_class: class of the new device
2859 * @new_id: IDENTIFY page of the new device
2861 * Compare @new_class and @new_id against @dev and determine
2862 * whether @dev is the device indicated by @new_class and
2869 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
2871 static int ata_dev_same_device(struct ata_device
*dev
, unsigned int new_class
,
2874 const u16
*old_id
= dev
->id
;
2875 unsigned char model
[2][41], serial
[2][21];
2878 if (dev
->class != new_class
) {
2879 ata_dev_printk(dev
, KERN_INFO
, "class mismatch %d != %d\n",
2880 dev
->class, new_class
);
2884 ata_id_c_string(old_id
, model
[0], ATA_ID_PROD_OFS
, sizeof(model
[0]));
2885 ata_id_c_string(new_id
, model
[1], ATA_ID_PROD_OFS
, sizeof(model
[1]));
2886 ata_id_c_string(old_id
, serial
[0], ATA_ID_SERNO_OFS
, sizeof(serial
[0]));
2887 ata_id_c_string(new_id
, serial
[1], ATA_ID_SERNO_OFS
, sizeof(serial
[1]));
2888 new_n_sectors
= ata_id_n_sectors(new_id
);
2890 if (strcmp(model
[0], model
[1])) {
2891 ata_dev_printk(dev
, KERN_INFO
, "model number mismatch "
2892 "'%s' != '%s'\n", model
[0], model
[1]);
2896 if (strcmp(serial
[0], serial
[1])) {
2897 ata_dev_printk(dev
, KERN_INFO
, "serial number mismatch "
2898 "'%s' != '%s'\n", serial
[0], serial
[1]);
2902 if (dev
->class == ATA_DEV_ATA
&& dev
->n_sectors
!= new_n_sectors
) {
2903 ata_dev_printk(dev
, KERN_INFO
, "n_sectors mismatch "
2905 (unsigned long long)dev
->n_sectors
,
2906 (unsigned long long)new_n_sectors
);
2914 * ata_dev_revalidate - Revalidate ATA device
2915 * @dev: device to revalidate
2916 * @post_reset: is this revalidation after reset?
2918 * Re-read IDENTIFY page and make sure @dev is still attached to
2922 * Kernel thread context (may sleep)
2925 * 0 on success, negative errno otherwise
2927 int ata_dev_revalidate(struct ata_device
*dev
, int post_reset
)
2929 unsigned int class = dev
->class;
2930 u16
*id
= (void *)dev
->ap
->sector_buf
;
2933 if (!ata_dev_enabled(dev
)) {
2939 rc
= ata_dev_read_id(dev
, &class, post_reset
, id
);
2943 /* is the device still there? */
2944 if (!ata_dev_same_device(dev
, class, id
)) {
2949 memcpy(dev
->id
, id
, sizeof(id
[0]) * ATA_ID_WORDS
);
2951 /* configure device according to the new ID */
2952 rc
= ata_dev_configure(dev
, 0);
2957 ata_dev_printk(dev
, KERN_ERR
, "revalidation failed (errno=%d)\n", rc
);
2961 static const char * const ata_dma_blacklist
[] = {
2962 "WDC AC11000H", NULL
,
2963 "WDC AC22100H", NULL
,
2964 "WDC AC32500H", NULL
,
2965 "WDC AC33100H", NULL
,
2966 "WDC AC31600H", NULL
,
2967 "WDC AC32100H", "24.09P07",
2968 "WDC AC23200L", "21.10N21",
2969 "Compaq CRD-8241B", NULL
,
2974 "SanDisk SDP3B", NULL
,
2975 "SanDisk SDP3B-64", NULL
,
2976 "SANYO CD-ROM CRD", NULL
,
2977 "HITACHI CDR-8", NULL
,
2978 "HITACHI CDR-8335", NULL
,
2979 "HITACHI CDR-8435", NULL
,
2980 "Toshiba CD-ROM XM-6202B", NULL
,
2981 "TOSHIBA CD-ROM XM-1702BC", NULL
,
2983 "E-IDE CD-ROM CR-840", NULL
,
2984 "CD-ROM Drive/F5A", NULL
,
2985 "WPI CDD-820", NULL
,
2986 "SAMSUNG CD-ROM SC-148C", NULL
,
2987 "SAMSUNG CD-ROM SC", NULL
,
2988 "SanDisk SDP3B-64", NULL
,
2989 "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL
,
2990 "_NEC DV5800A", NULL
,
2991 "SAMSUNG CD-ROM SN-124", "N001"
2994 static int ata_strim(char *s
, size_t len
)
2996 len
= strnlen(s
, len
);
2998 /* ATAPI specifies that empty space is blank-filled; remove blanks */
2999 while ((len
> 0) && (s
[len
- 1] == ' ')) {
3006 static int ata_dma_blacklisted(const struct ata_device
*dev
)
3008 unsigned char model_num
[40];
3009 unsigned char model_rev
[16];
3010 unsigned int nlen
, rlen
;
3013 /* We don't support polling DMA.
3014 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
3015 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
3017 if ((dev
->ap
->flags
& ATA_FLAG_PIO_POLLING
) &&
3018 (dev
->flags
& ATA_DFLAG_CDB_INTR
))
3021 ata_id_string(dev
->id
, model_num
, ATA_ID_PROD_OFS
,
3023 ata_id_string(dev
->id
, model_rev
, ATA_ID_FW_REV_OFS
,
3025 nlen
= ata_strim(model_num
, sizeof(model_num
));
3026 rlen
= ata_strim(model_rev
, sizeof(model_rev
));
3028 for (i
= 0; i
< ARRAY_SIZE(ata_dma_blacklist
); i
+= 2) {
3029 if (!strncmp(ata_dma_blacklist
[i
], model_num
, nlen
)) {
3030 if (ata_dma_blacklist
[i
+1] == NULL
)
3032 if (!strncmp(ata_dma_blacklist
[i
], model_rev
, rlen
))
3040 * ata_dev_xfermask - Compute supported xfermask of the given device
3041 * @dev: Device to compute xfermask for
3043 * Compute supported xfermask of @dev and store it in
3044 * dev->*_mask. This function is responsible for applying all
3045 * known limits including host controller limits, device
3048 * FIXME: The current implementation limits all transfer modes to
3049 * the fastest of the lowested device on the port. This is not
3050 * required on most controllers.
3055 static void ata_dev_xfermask(struct ata_device
*dev
)
3057 struct ata_port
*ap
= dev
->ap
;
3058 struct ata_host_set
*hs
= ap
->host_set
;
3059 unsigned long xfer_mask
;
3062 xfer_mask
= ata_pack_xfermask(ap
->pio_mask
,
3063 ap
->mwdma_mask
, ap
->udma_mask
);
3065 /* Apply cable rule here. Don't apply it early because when
3066 * we handle hot plug the cable type can itself change.
3068 if (ap
->cbl
== ATA_CBL_PATA40
)
3069 xfer_mask
&= ~(0xF8 << ATA_SHIFT_UDMA
);
3071 /* FIXME: Use port-wide xfermask for now */
3072 for (i
= 0; i
< ATA_MAX_DEVICES
; i
++) {
3073 struct ata_device
*d
= &ap
->device
[i
];
3075 if (ata_dev_absent(d
))
3078 if (ata_dev_disabled(d
)) {
3079 /* to avoid violating device selection timing */
3080 xfer_mask
&= ata_pack_xfermask(d
->pio_mask
,
3081 UINT_MAX
, UINT_MAX
);
3085 xfer_mask
&= ata_pack_xfermask(d
->pio_mask
,
3086 d
->mwdma_mask
, d
->udma_mask
);
3087 xfer_mask
&= ata_id_xfermask(d
->id
);
3088 if (ata_dma_blacklisted(d
))
3089 xfer_mask
&= ~(ATA_MASK_MWDMA
| ATA_MASK_UDMA
);
3092 if (ata_dma_blacklisted(dev
))
3093 ata_dev_printk(dev
, KERN_WARNING
,
3094 "device is on DMA blacklist, disabling DMA\n");
3096 if (hs
->flags
& ATA_HOST_SIMPLEX
) {
3097 if (hs
->simplex_claimed
)
3098 xfer_mask
&= ~(ATA_MASK_MWDMA
| ATA_MASK_UDMA
);
3101 if (ap
->ops
->mode_filter
)
3102 xfer_mask
= ap
->ops
->mode_filter(ap
, dev
, xfer_mask
);
3104 ata_unpack_xfermask(xfer_mask
, &dev
->pio_mask
,
3105 &dev
->mwdma_mask
, &dev
->udma_mask
);
3109 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
3110 * @dev: Device to which command will be sent
3112 * Issue SET FEATURES - XFER MODE command to device @dev
3116 * PCI/etc. bus probe sem.
3119 * 0 on success, AC_ERR_* mask otherwise.
3122 static unsigned int ata_dev_set_xfermode(struct ata_device
*dev
)
3124 struct ata_taskfile tf
;
3125 unsigned int err_mask
;
3127 /* set up set-features taskfile */
3128 DPRINTK("set features - xfer mode\n");
3130 ata_tf_init(dev
, &tf
);
3131 tf
.command
= ATA_CMD_SET_FEATURES
;
3132 tf
.feature
= SETFEATURES_XFER
;
3133 tf
.flags
|= ATA_TFLAG_ISADDR
| ATA_TFLAG_DEVICE
;
3134 tf
.protocol
= ATA_PROT_NODATA
;
3135 tf
.nsect
= dev
->xfer_mode
;
3137 err_mask
= ata_exec_internal(dev
, &tf
, NULL
, DMA_NONE
, NULL
, 0);
3139 DPRINTK("EXIT, err_mask=%x\n", err_mask
);
3144 * ata_dev_init_params - Issue INIT DEV PARAMS command
3145 * @dev: Device to which command will be sent
3146 * @heads: Number of heads (taskfile parameter)
3147 * @sectors: Number of sectors (taskfile parameter)
3150 * Kernel thread context (may sleep)
3153 * 0 on success, AC_ERR_* mask otherwise.
3155 static unsigned int ata_dev_init_params(struct ata_device
*dev
,
3156 u16 heads
, u16 sectors
)
3158 struct ata_taskfile tf
;
3159 unsigned int err_mask
;
3161 /* Number of sectors per track 1-255. Number of heads 1-16 */
3162 if (sectors
< 1 || sectors
> 255 || heads
< 1 || heads
> 16)
3163 return AC_ERR_INVALID
;
3165 /* set up init dev params taskfile */
3166 DPRINTK("init dev params \n");
3168 ata_tf_init(dev
, &tf
);
3169 tf
.command
= ATA_CMD_INIT_DEV_PARAMS
;
3170 tf
.flags
|= ATA_TFLAG_ISADDR
| ATA_TFLAG_DEVICE
;
3171 tf
.protocol
= ATA_PROT_NODATA
;
3173 tf
.device
|= (heads
- 1) & 0x0f; /* max head = num. of heads - 1 */
3175 err_mask
= ata_exec_internal(dev
, &tf
, NULL
, DMA_NONE
, NULL
, 0);
3177 DPRINTK("EXIT, err_mask=%x\n", err_mask
);
3182 * ata_sg_clean - Unmap DMA memory associated with command
3183 * @qc: Command containing DMA memory to be released
3185 * Unmap all mapped DMA memory associated with this command.
3188 * spin_lock_irqsave(host_set lock)
3191 static void ata_sg_clean(struct ata_queued_cmd
*qc
)
3193 struct ata_port
*ap
= qc
->ap
;
3194 struct scatterlist
*sg
= qc
->__sg
;
3195 int dir
= qc
->dma_dir
;
3196 void *pad_buf
= NULL
;
3198 WARN_ON(!(qc
->flags
& ATA_QCFLAG_DMAMAP
));
3199 WARN_ON(sg
== NULL
);
3201 if (qc
->flags
& ATA_QCFLAG_SINGLE
)
3202 WARN_ON(qc
->n_elem
> 1);
3204 VPRINTK("unmapping %u sg elements\n", qc
->n_elem
);
3206 /* if we padded the buffer out to 32-bit bound, and data
3207 * xfer direction is from-device, we must copy from the
3208 * pad buffer back into the supplied buffer
3210 if (qc
->pad_len
&& !(qc
->tf
.flags
& ATA_TFLAG_WRITE
))
3211 pad_buf
= ap
->pad
+ (qc
->tag
* ATA_DMA_PAD_SZ
);
3213 if (qc
->flags
& ATA_QCFLAG_SG
) {
3215 dma_unmap_sg(ap
->dev
, sg
, qc
->n_elem
, dir
);
3216 /* restore last sg */
3217 sg
[qc
->orig_n_elem
- 1].length
+= qc
->pad_len
;
3219 struct scatterlist
*psg
= &qc
->pad_sgent
;
3220 void *addr
= kmap_atomic(psg
->page
, KM_IRQ0
);
3221 memcpy(addr
+ psg
->offset
, pad_buf
, qc
->pad_len
);
3222 kunmap_atomic(addr
, KM_IRQ0
);
3226 dma_unmap_single(ap
->dev
,
3227 sg_dma_address(&sg
[0]), sg_dma_len(&sg
[0]),
3230 sg
->length
+= qc
->pad_len
;
3232 memcpy(qc
->buf_virt
+ sg
->length
- qc
->pad_len
,
3233 pad_buf
, qc
->pad_len
);
3236 qc
->flags
&= ~ATA_QCFLAG_DMAMAP
;
3241 * ata_fill_sg - Fill PCI IDE PRD table
3242 * @qc: Metadata associated with taskfile to be transferred
3244 * Fill PCI IDE PRD (scatter-gather) table with segments
3245 * associated with the current disk command.
3248 * spin_lock_irqsave(host_set lock)
3251 static void ata_fill_sg(struct ata_queued_cmd
*qc
)
3253 struct ata_port
*ap
= qc
->ap
;
3254 struct scatterlist
*sg
;
3257 WARN_ON(qc
->__sg
== NULL
);
3258 WARN_ON(qc
->n_elem
== 0 && qc
->pad_len
== 0);
3261 ata_for_each_sg(sg
, qc
) {
3265 /* determine if physical DMA addr spans 64K boundary.
3266 * Note h/w doesn't support 64-bit, so we unconditionally
3267 * truncate dma_addr_t to u32.
3269 addr
= (u32
) sg_dma_address(sg
);
3270 sg_len
= sg_dma_len(sg
);
3273 offset
= addr
& 0xffff;
3275 if ((offset
+ sg_len
) > 0x10000)
3276 len
= 0x10000 - offset
;
3278 ap
->prd
[idx
].addr
= cpu_to_le32(addr
);
3279 ap
->prd
[idx
].flags_len
= cpu_to_le32(len
& 0xffff);
3280 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx
, addr
, len
);
3289 ap
->prd
[idx
- 1].flags_len
|= cpu_to_le32(ATA_PRD_EOT
);
3292 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
3293 * @qc: Metadata associated with taskfile to check
3295 * Allow low-level driver to filter ATA PACKET commands, returning
3296 * a status indicating whether or not it is OK to use DMA for the
3297 * supplied PACKET command.
3300 * spin_lock_irqsave(host_set lock)
3302 * RETURNS: 0 when ATAPI DMA can be used
3305 int ata_check_atapi_dma(struct ata_queued_cmd
*qc
)
3307 struct ata_port
*ap
= qc
->ap
;
3308 int rc
= 0; /* Assume ATAPI DMA is OK by default */
3310 if (ap
->ops
->check_atapi_dma
)
3311 rc
= ap
->ops
->check_atapi_dma(qc
);
3316 * ata_qc_prep - Prepare taskfile for submission
3317 * @qc: Metadata associated with taskfile to be prepared
3319 * Prepare ATA taskfile for submission.
3322 * spin_lock_irqsave(host_set lock)
3324 void ata_qc_prep(struct ata_queued_cmd
*qc
)
3326 if (!(qc
->flags
& ATA_QCFLAG_DMAMAP
))
3332 void ata_noop_qc_prep(struct ata_queued_cmd
*qc
) { }
3335 * ata_sg_init_one - Associate command with memory buffer
3336 * @qc: Command to be associated
3337 * @buf: Memory buffer
3338 * @buflen: Length of memory buffer, in bytes.
3340 * Initialize the data-related elements of queued_cmd @qc
3341 * to point to a single memory buffer, @buf of byte length @buflen.
3344 * spin_lock_irqsave(host_set lock)
3347 void ata_sg_init_one(struct ata_queued_cmd
*qc
, void *buf
, unsigned int buflen
)
3349 struct scatterlist
*sg
;
3351 qc
->flags
|= ATA_QCFLAG_SINGLE
;
3353 memset(&qc
->sgent
, 0, sizeof(qc
->sgent
));
3354 qc
->__sg
= &qc
->sgent
;
3356 qc
->orig_n_elem
= 1;
3358 qc
->nbytes
= buflen
;
3361 sg_init_one(sg
, buf
, buflen
);
3365 * ata_sg_init - Associate command with scatter-gather table.
3366 * @qc: Command to be associated
3367 * @sg: Scatter-gather table.
3368 * @n_elem: Number of elements in s/g table.
3370 * Initialize the data-related elements of queued_cmd @qc
3371 * to point to a scatter-gather table @sg, containing @n_elem
3375 * spin_lock_irqsave(host_set lock)
3378 void ata_sg_init(struct ata_queued_cmd
*qc
, struct scatterlist
*sg
,
3379 unsigned int n_elem
)
3381 qc
->flags
|= ATA_QCFLAG_SG
;
3383 qc
->n_elem
= n_elem
;
3384 qc
->orig_n_elem
= n_elem
;
3388 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
3389 * @qc: Command with memory buffer to be mapped.
3391 * DMA-map the memory buffer associated with queued_cmd @qc.
3394 * spin_lock_irqsave(host_set lock)
3397 * Zero on success, negative on error.
3400 static int ata_sg_setup_one(struct ata_queued_cmd
*qc
)
3402 struct ata_port
*ap
= qc
->ap
;
3403 int dir
= qc
->dma_dir
;
3404 struct scatterlist
*sg
= qc
->__sg
;
3405 dma_addr_t dma_address
;
3408 /* we must lengthen transfers to end on a 32-bit boundary */
3409 qc
->pad_len
= sg
->length
& 3;
3411 void *pad_buf
= ap
->pad
+ (qc
->tag
* ATA_DMA_PAD_SZ
);
3412 struct scatterlist
*psg
= &qc
->pad_sgent
;
3414 WARN_ON(qc
->dev
->class != ATA_DEV_ATAPI
);
3416 memset(pad_buf
, 0, ATA_DMA_PAD_SZ
);
3418 if (qc
->tf
.flags
& ATA_TFLAG_WRITE
)
3419 memcpy(pad_buf
, qc
->buf_virt
+ sg
->length
- qc
->pad_len
,
3422 sg_dma_address(psg
) = ap
->pad_dma
+ (qc
->tag
* ATA_DMA_PAD_SZ
);
3423 sg_dma_len(psg
) = ATA_DMA_PAD_SZ
;
3425 sg
->length
-= qc
->pad_len
;
3426 if (sg
->length
== 0)
3429 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
3430 sg
->length
, qc
->pad_len
);
3438 dma_address
= dma_map_single(ap
->dev
, qc
->buf_virt
,
3440 if (dma_mapping_error(dma_address
)) {
3442 sg
->length
+= qc
->pad_len
;
3446 sg_dma_address(sg
) = dma_address
;
3447 sg_dma_len(sg
) = sg
->length
;
3450 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg
),
3451 qc
->tf
.flags
& ATA_TFLAG_WRITE
? "write" : "read");
3457 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
3458 * @qc: Command with scatter-gather table to be mapped.
3460 * DMA-map the scatter-gather table associated with queued_cmd @qc.
3463 * spin_lock_irqsave(host_set lock)
3466 * Zero on success, negative on error.
3470 static int ata_sg_setup(struct ata_queued_cmd
*qc
)
3472 struct ata_port
*ap
= qc
->ap
;
3473 struct scatterlist
*sg
= qc
->__sg
;
3474 struct scatterlist
*lsg
= &sg
[qc
->n_elem
- 1];
3475 int n_elem
, pre_n_elem
, dir
, trim_sg
= 0;
3477 VPRINTK("ENTER, ata%u\n", ap
->id
);
3478 WARN_ON(!(qc
->flags
& ATA_QCFLAG_SG
));
3480 /* we must lengthen transfers to end on a 32-bit boundary */
3481 qc
->pad_len
= lsg
->length
& 3;
3483 void *pad_buf
= ap
->pad
+ (qc
->tag
* ATA_DMA_PAD_SZ
);
3484 struct scatterlist
*psg
= &qc
->pad_sgent
;
3485 unsigned int offset
;
3487 WARN_ON(qc
->dev
->class != ATA_DEV_ATAPI
);
3489 memset(pad_buf
, 0, ATA_DMA_PAD_SZ
);
3492 * psg->page/offset are used to copy to-be-written
3493 * data in this function or read data in ata_sg_clean.
3495 offset
= lsg
->offset
+ lsg
->length
- qc
->pad_len
;
3496 psg
->page
= nth_page(lsg
->page
, offset
>> PAGE_SHIFT
);
3497 psg
->offset
= offset_in_page(offset
);
3499 if (qc
->tf
.flags
& ATA_TFLAG_WRITE
) {
3500 void *addr
= kmap_atomic(psg
->page
, KM_IRQ0
);
3501 memcpy(pad_buf
, addr
+ psg
->offset
, qc
->pad_len
);
3502 kunmap_atomic(addr
, KM_IRQ0
);
3505 sg_dma_address(psg
) = ap
->pad_dma
+ (qc
->tag
* ATA_DMA_PAD_SZ
);
3506 sg_dma_len(psg
) = ATA_DMA_PAD_SZ
;
3508 lsg
->length
-= qc
->pad_len
;
3509 if (lsg
->length
== 0)
3512 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
3513 qc
->n_elem
- 1, lsg
->length
, qc
->pad_len
);
3516 pre_n_elem
= qc
->n_elem
;
3517 if (trim_sg
&& pre_n_elem
)
3526 n_elem
= dma_map_sg(ap
->dev
, sg
, pre_n_elem
, dir
);
3528 /* restore last sg */
3529 lsg
->length
+= qc
->pad_len
;
3533 DPRINTK("%d sg elements mapped\n", n_elem
);
3536 qc
->n_elem
= n_elem
;
3542 * swap_buf_le16 - swap halves of 16-bit words in place
3543 * @buf: Buffer to swap
3544 * @buf_words: Number of 16-bit words in buffer.
3546 * Swap halves of 16-bit words if needed to convert from
3547 * little-endian byte order to native cpu byte order, or
3551 * Inherited from caller.
3553 void swap_buf_le16(u16
*buf
, unsigned int buf_words
)
3558 for (i
= 0; i
< buf_words
; i
++)
3559 buf
[i
] = le16_to_cpu(buf
[i
]);
3560 #endif /* __BIG_ENDIAN */
3564 * ata_mmio_data_xfer - Transfer data by MMIO
3565 * @adev: device for this I/O
3567 * @buflen: buffer length
3568 * @write_data: read/write
3570 * Transfer data from/to the device data register by MMIO.
3573 * Inherited from caller.
3576 void ata_mmio_data_xfer(struct ata_device
*adev
, unsigned char *buf
,
3577 unsigned int buflen
, int write_data
)
3579 struct ata_port
*ap
= adev
->ap
;
3581 unsigned int words
= buflen
>> 1;
3582 u16
*buf16
= (u16
*) buf
;
3583 void __iomem
*mmio
= (void __iomem
*)ap
->ioaddr
.data_addr
;
3585 /* Transfer multiple of 2 bytes */
3587 for (i
= 0; i
< words
; i
++)
3588 writew(le16_to_cpu(buf16
[i
]), mmio
);
3590 for (i
= 0; i
< words
; i
++)
3591 buf16
[i
] = cpu_to_le16(readw(mmio
));
3594 /* Transfer trailing 1 byte, if any. */
3595 if (unlikely(buflen
& 0x01)) {
3596 u16 align_buf
[1] = { 0 };
3597 unsigned char *trailing_buf
= buf
+ buflen
- 1;
3600 memcpy(align_buf
, trailing_buf
, 1);
3601 writew(le16_to_cpu(align_buf
[0]), mmio
);
3603 align_buf
[0] = cpu_to_le16(readw(mmio
));
3604 memcpy(trailing_buf
, align_buf
, 1);
3610 * ata_pio_data_xfer - Transfer data by PIO
3611 * @adev: device to target
3613 * @buflen: buffer length
3614 * @write_data: read/write
3616 * Transfer data from/to the device data register by PIO.
3619 * Inherited from caller.
3622 void ata_pio_data_xfer(struct ata_device
*adev
, unsigned char *buf
,
3623 unsigned int buflen
, int write_data
)
3625 struct ata_port
*ap
= adev
->ap
;
3626 unsigned int words
= buflen
>> 1;
3628 /* Transfer multiple of 2 bytes */
3630 outsw(ap
->ioaddr
.data_addr
, buf
, words
);
3632 insw(ap
->ioaddr
.data_addr
, buf
, words
);
3634 /* Transfer trailing 1 byte, if any. */
3635 if (unlikely(buflen
& 0x01)) {
3636 u16 align_buf
[1] = { 0 };
3637 unsigned char *trailing_buf
= buf
+ buflen
- 1;
3640 memcpy(align_buf
, trailing_buf
, 1);
3641 outw(le16_to_cpu(align_buf
[0]), ap
->ioaddr
.data_addr
);
3643 align_buf
[0] = cpu_to_le16(inw(ap
->ioaddr
.data_addr
));
3644 memcpy(trailing_buf
, align_buf
, 1);
3650 * ata_pio_data_xfer_noirq - Transfer data by PIO
3651 * @adev: device to target
3653 * @buflen: buffer length
3654 * @write_data: read/write
3656 * Transfer data from/to the device data register by PIO. Do the
3657 * transfer with interrupts disabled.
3660 * Inherited from caller.
3663 void ata_pio_data_xfer_noirq(struct ata_device
*adev
, unsigned char *buf
,
3664 unsigned int buflen
, int write_data
)
3666 unsigned long flags
;
3667 local_irq_save(flags
);
3668 ata_pio_data_xfer(adev
, buf
, buflen
, write_data
);
3669 local_irq_restore(flags
);
3674 * ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data.
3675 * @qc: Command on going
3677 * Transfer ATA_SECT_SIZE of data from/to the ATA device.
3680 * Inherited from caller.
3683 static void ata_pio_sector(struct ata_queued_cmd
*qc
)
3685 int do_write
= (qc
->tf
.flags
& ATA_TFLAG_WRITE
);
3686 struct scatterlist
*sg
= qc
->__sg
;
3687 struct ata_port
*ap
= qc
->ap
;
3689 unsigned int offset
;
3692 if (qc
->cursect
== (qc
->nsect
- 1))
3693 ap
->hsm_task_state
= HSM_ST_LAST
;
3695 page
= sg
[qc
->cursg
].page
;
3696 offset
= sg
[qc
->cursg
].offset
+ qc
->cursg_ofs
* ATA_SECT_SIZE
;
3698 /* get the current page and offset */
3699 page
= nth_page(page
, (offset
>> PAGE_SHIFT
));
3700 offset
%= PAGE_SIZE
;
3702 DPRINTK("data %s\n", qc
->tf
.flags
& ATA_TFLAG_WRITE
? "write" : "read");
3704 if (PageHighMem(page
)) {
3705 unsigned long flags
;
3707 /* FIXME: use a bounce buffer */
3708 local_irq_save(flags
);
3709 buf
= kmap_atomic(page
, KM_IRQ0
);
3711 /* do the actual data transfer */
3712 ap
->ops
->data_xfer(qc
->dev
, buf
+ offset
, ATA_SECT_SIZE
, do_write
);
3714 kunmap_atomic(buf
, KM_IRQ0
);
3715 local_irq_restore(flags
);
3717 buf
= page_address(page
);
3718 ap
->ops
->data_xfer(qc
->dev
, buf
+ offset
, ATA_SECT_SIZE
, do_write
);
3724 if ((qc
->cursg_ofs
* ATA_SECT_SIZE
) == (&sg
[qc
->cursg
])->length
) {
3731 * ata_pio_sectors - Transfer one or many 512-byte sectors.
3732 * @qc: Command on going
3734 * Transfer one or many ATA_SECT_SIZE of data from/to the
3735 * ATA device for the DRQ request.
3738 * Inherited from caller.
3741 static void ata_pio_sectors(struct ata_queued_cmd
*qc
)
3743 if (is_multi_taskfile(&qc
->tf
)) {
3744 /* READ/WRITE MULTIPLE */
3747 WARN_ON(qc
->dev
->multi_count
== 0);
3749 nsect
= min(qc
->nsect
- qc
->cursect
, qc
->dev
->multi_count
);
3757 * atapi_send_cdb - Write CDB bytes to hardware
3758 * @ap: Port to which ATAPI device is attached.
3759 * @qc: Taskfile currently active
3761 * When device has indicated its readiness to accept
3762 * a CDB, this function is called. Send the CDB.
3768 static void atapi_send_cdb(struct ata_port
*ap
, struct ata_queued_cmd
*qc
)
3771 DPRINTK("send cdb\n");
3772 WARN_ON(qc
->dev
->cdb_len
< 12);
3774 ap
->ops
->data_xfer(qc
->dev
, qc
->cdb
, qc
->dev
->cdb_len
, 1);
3775 ata_altstatus(ap
); /* flush */
3777 switch (qc
->tf
.protocol
) {
3778 case ATA_PROT_ATAPI
:
3779 ap
->hsm_task_state
= HSM_ST
;
3781 case ATA_PROT_ATAPI_NODATA
:
3782 ap
->hsm_task_state
= HSM_ST_LAST
;
3784 case ATA_PROT_ATAPI_DMA
:
3785 ap
->hsm_task_state
= HSM_ST_LAST
;
3786 /* initiate bmdma */
3787 ap
->ops
->bmdma_start(qc
);
3793 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
3794 * @qc: Command on going
3795 * @bytes: number of bytes
3797 * Transfer Transfer data from/to the ATAPI device.
3800 * Inherited from caller.
3804 static void __atapi_pio_bytes(struct ata_queued_cmd
*qc
, unsigned int bytes
)
3806 int do_write
= (qc
->tf
.flags
& ATA_TFLAG_WRITE
);
3807 struct scatterlist
*sg
= qc
->__sg
;
3808 struct ata_port
*ap
= qc
->ap
;
3811 unsigned int offset
, count
;
3813 if (qc
->curbytes
+ bytes
>= qc
->nbytes
)
3814 ap
->hsm_task_state
= HSM_ST_LAST
;
3817 if (unlikely(qc
->cursg
>= qc
->n_elem
)) {
3819 * The end of qc->sg is reached and the device expects
3820 * more data to transfer. In order not to overrun qc->sg
3821 * and fulfill length specified in the byte count register,
3822 * - for read case, discard trailing data from the device
3823 * - for write case, padding zero data to the device
3825 u16 pad_buf
[1] = { 0 };
3826 unsigned int words
= bytes
>> 1;
3829 if (words
) /* warning if bytes > 1 */
3830 ata_dev_printk(qc
->dev
, KERN_WARNING
,
3831 "%u bytes trailing data\n", bytes
);
3833 for (i
= 0; i
< words
; i
++)
3834 ap
->ops
->data_xfer(qc
->dev
, (unsigned char*)pad_buf
, 2, do_write
);
3836 ap
->hsm_task_state
= HSM_ST_LAST
;
3840 sg
= &qc
->__sg
[qc
->cursg
];
3843 offset
= sg
->offset
+ qc
->cursg_ofs
;
3845 /* get the current page and offset */
3846 page
= nth_page(page
, (offset
>> PAGE_SHIFT
));
3847 offset
%= PAGE_SIZE
;
3849 /* don't overrun current sg */
3850 count
= min(sg
->length
- qc
->cursg_ofs
, bytes
);
3852 /* don't cross page boundaries */
3853 count
= min(count
, (unsigned int)PAGE_SIZE
- offset
);
3855 DPRINTK("data %s\n", qc
->tf
.flags
& ATA_TFLAG_WRITE
? "write" : "read");
3857 if (PageHighMem(page
)) {
3858 unsigned long flags
;
3860 /* FIXME: use bounce buffer */
3861 local_irq_save(flags
);
3862 buf
= kmap_atomic(page
, KM_IRQ0
);
3864 /* do the actual data transfer */
3865 ap
->ops
->data_xfer(qc
->dev
, buf
+ offset
, count
, do_write
);
3867 kunmap_atomic(buf
, KM_IRQ0
);
3868 local_irq_restore(flags
);
3870 buf
= page_address(page
);
3871 ap
->ops
->data_xfer(qc
->dev
, buf
+ offset
, count
, do_write
);
3875 qc
->curbytes
+= count
;
3876 qc
->cursg_ofs
+= count
;
3878 if (qc
->cursg_ofs
== sg
->length
) {
3888 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
3889 * @qc: Command on going
3891 * Transfer Transfer data from/to the ATAPI device.
3894 * Inherited from caller.
3897 static void atapi_pio_bytes(struct ata_queued_cmd
*qc
)
3899 struct ata_port
*ap
= qc
->ap
;
3900 struct ata_device
*dev
= qc
->dev
;
3901 unsigned int ireason
, bc_lo
, bc_hi
, bytes
;
3902 int i_write
, do_write
= (qc
->tf
.flags
& ATA_TFLAG_WRITE
) ? 1 : 0;
3904 /* Abuse qc->result_tf for temp storage of intermediate TF
3905 * here to save some kernel stack usage.
3906 * For normal completion, qc->result_tf is not relevant. For
3907 * error, qc->result_tf is later overwritten by ata_qc_complete().
3908 * So, the correctness of qc->result_tf is not affected.
3910 ap
->ops
->tf_read(ap
, &qc
->result_tf
);
3911 ireason
= qc
->result_tf
.nsect
;
3912 bc_lo
= qc
->result_tf
.lbam
;
3913 bc_hi
= qc
->result_tf
.lbah
;
3914 bytes
= (bc_hi
<< 8) | bc_lo
;
3916 /* shall be cleared to zero, indicating xfer of data */
3917 if (ireason
& (1 << 0))
3920 /* make sure transfer direction matches expected */
3921 i_write
= ((ireason
& (1 << 1)) == 0) ? 1 : 0;
3922 if (do_write
!= i_write
)
3925 VPRINTK("ata%u: xfering %d bytes\n", ap
->id
, bytes
);
3927 __atapi_pio_bytes(qc
, bytes
);
3932 ata_dev_printk(dev
, KERN_INFO
, "ATAPI check failed\n");
3933 qc
->err_mask
|= AC_ERR_HSM
;
3934 ap
->hsm_task_state
= HSM_ST_ERR
;
3938 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
3939 * @ap: the target ata_port
3943 * 1 if ok in workqueue, 0 otherwise.
3946 static inline int ata_hsm_ok_in_wq(struct ata_port
*ap
, struct ata_queued_cmd
*qc
)
3948 if (qc
->tf
.flags
& ATA_TFLAG_POLLING
)
3951 if (ap
->hsm_task_state
== HSM_ST_FIRST
) {
3952 if (qc
->tf
.protocol
== ATA_PROT_PIO
&&
3953 (qc
->tf
.flags
& ATA_TFLAG_WRITE
))
3956 if (is_atapi_taskfile(&qc
->tf
) &&
3957 !(qc
->dev
->flags
& ATA_DFLAG_CDB_INTR
))
3965 * ata_hsm_qc_complete - finish a qc running on standard HSM
3966 * @qc: Command to complete
3967 * @in_wq: 1 if called from workqueue, 0 otherwise
3969 * Finish @qc which is running on standard HSM.
3972 * If @in_wq is zero, spin_lock_irqsave(host_set lock).
3973 * Otherwise, none on entry and grabs host lock.
3975 static void ata_hsm_qc_complete(struct ata_queued_cmd
*qc
, int in_wq
)
3977 struct ata_port
*ap
= qc
->ap
;
3978 unsigned long flags
;
3980 if (ap
->ops
->error_handler
) {
3982 spin_lock_irqsave(ap
->lock
, flags
);
3984 /* EH might have kicked in while host_set lock
3987 qc
= ata_qc_from_tag(ap
, qc
->tag
);
3989 if (likely(!(qc
->err_mask
& AC_ERR_HSM
))) {
3991 ata_qc_complete(qc
);
3993 ata_port_freeze(ap
);
3996 spin_unlock_irqrestore(ap
->lock
, flags
);
3998 if (likely(!(qc
->err_mask
& AC_ERR_HSM
)))
3999 ata_qc_complete(qc
);
4001 ata_port_freeze(ap
);
4005 spin_lock_irqsave(ap
->lock
, flags
);
4007 ata_qc_complete(qc
);
4008 spin_unlock_irqrestore(ap
->lock
, flags
);
4010 ata_qc_complete(qc
);
4013 ata_altstatus(ap
); /* flush */
4017 * ata_hsm_move - move the HSM to the next state.
4018 * @ap: the target ata_port
4020 * @status: current device status
4021 * @in_wq: 1 if called from workqueue, 0 otherwise
4024 * 1 when poll next status needed, 0 otherwise.
4026 int ata_hsm_move(struct ata_port
*ap
, struct ata_queued_cmd
*qc
,
4027 u8 status
, int in_wq
)
4029 unsigned long flags
= 0;
4032 WARN_ON((qc
->flags
& ATA_QCFLAG_ACTIVE
) == 0);
4034 /* Make sure ata_qc_issue_prot() does not throw things
4035 * like DMA polling into the workqueue. Notice that
4036 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
4038 WARN_ON(in_wq
!= ata_hsm_ok_in_wq(ap
, qc
));
4041 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
4042 ap
->id
, qc
->tf
.protocol
, ap
->hsm_task_state
, status
);
4044 switch (ap
->hsm_task_state
) {
4046 /* Send first data block or PACKET CDB */
4048 /* If polling, we will stay in the work queue after
4049 * sending the data. Otherwise, interrupt handler
4050 * takes over after sending the data.
4052 poll_next
= (qc
->tf
.flags
& ATA_TFLAG_POLLING
);
4054 /* check device status */
4055 if (unlikely((status
& ATA_DRQ
) == 0)) {
4056 /* handle BSY=0, DRQ=0 as error */
4057 if (likely(status
& (ATA_ERR
| ATA_DF
)))
4058 /* device stops HSM for abort/error */
4059 qc
->err_mask
|= AC_ERR_DEV
;
4061 /* HSM violation. Let EH handle this */
4062 qc
->err_mask
|= AC_ERR_HSM
;
4064 ap
->hsm_task_state
= HSM_ST_ERR
;
4068 /* Device should not ask for data transfer (DRQ=1)
4069 * when it finds something wrong.
4070 * We ignore DRQ here and stop the HSM by
4071 * changing hsm_task_state to HSM_ST_ERR and
4072 * let the EH abort the command or reset the device.
4074 if (unlikely(status
& (ATA_ERR
| ATA_DF
))) {
4075 printk(KERN_WARNING
"ata%d: DRQ=1 with device error, dev_stat 0x%X\n",
4077 qc
->err_mask
|= AC_ERR_HSM
;
4078 ap
->hsm_task_state
= HSM_ST_ERR
;
4082 /* Send the CDB (atapi) or the first data block (ata pio out).
4083 * During the state transition, interrupt handler shouldn't
4084 * be invoked before the data transfer is complete and
4085 * hsm_task_state is changed. Hence, the following locking.
4088 spin_lock_irqsave(ap
->lock
, flags
);
4090 if (qc
->tf
.protocol
== ATA_PROT_PIO
) {
4091 /* PIO data out protocol.
4092 * send first data block.
4095 /* ata_pio_sectors() might change the state
4096 * to HSM_ST_LAST. so, the state is changed here
4097 * before ata_pio_sectors().
4099 ap
->hsm_task_state
= HSM_ST
;
4100 ata_pio_sectors(qc
);
4101 ata_altstatus(ap
); /* flush */
4104 atapi_send_cdb(ap
, qc
);
4107 spin_unlock_irqrestore(ap
->lock
, flags
);
4109 /* if polling, ata_pio_task() handles the rest.
4110 * otherwise, interrupt handler takes over from here.
4115 /* complete command or read/write the data register */
4116 if (qc
->tf
.protocol
== ATA_PROT_ATAPI
) {
4117 /* ATAPI PIO protocol */
4118 if ((status
& ATA_DRQ
) == 0) {
4119 /* No more data to transfer or device error.
4120 * Device error will be tagged in HSM_ST_LAST.
4122 ap
->hsm_task_state
= HSM_ST_LAST
;
4126 /* Device should not ask for data transfer (DRQ=1)
4127 * when it finds something wrong.
4128 * We ignore DRQ here and stop the HSM by
4129 * changing hsm_task_state to HSM_ST_ERR and
4130 * let the EH abort the command or reset the device.
4132 if (unlikely(status
& (ATA_ERR
| ATA_DF
))) {
4133 printk(KERN_WARNING
"ata%d: DRQ=1 with device error, dev_stat 0x%X\n",
4135 qc
->err_mask
|= AC_ERR_HSM
;
4136 ap
->hsm_task_state
= HSM_ST_ERR
;
4140 atapi_pio_bytes(qc
);
4142 if (unlikely(ap
->hsm_task_state
== HSM_ST_ERR
))
4143 /* bad ireason reported by device */
4147 /* ATA PIO protocol */
4148 if (unlikely((status
& ATA_DRQ
) == 0)) {
4149 /* handle BSY=0, DRQ=0 as error */
4150 if (likely(status
& (ATA_ERR
| ATA_DF
)))
4151 /* device stops HSM for abort/error */
4152 qc
->err_mask
|= AC_ERR_DEV
;
4154 /* HSM violation. Let EH handle this */
4155 qc
->err_mask
|= AC_ERR_HSM
;
4157 ap
->hsm_task_state
= HSM_ST_ERR
;
4161 /* For PIO reads, some devices may ask for
4162 * data transfer (DRQ=1) alone with ERR=1.
4163 * We respect DRQ here and transfer one
4164 * block of junk data before changing the
4165 * hsm_task_state to HSM_ST_ERR.
4167 * For PIO writes, ERR=1 DRQ=1 doesn't make
4168 * sense since the data block has been
4169 * transferred to the device.
4171 if (unlikely(status
& (ATA_ERR
| ATA_DF
))) {
4172 /* data might be corrputed */
4173 qc
->err_mask
|= AC_ERR_DEV
;
4175 if (!(qc
->tf
.flags
& ATA_TFLAG_WRITE
)) {
4176 ata_pio_sectors(qc
);
4178 status
= ata_wait_idle(ap
);
4181 if (status
& (ATA_BUSY
| ATA_DRQ
))
4182 qc
->err_mask
|= AC_ERR_HSM
;
4184 /* ata_pio_sectors() might change the
4185 * state to HSM_ST_LAST. so, the state
4186 * is changed after ata_pio_sectors().
4188 ap
->hsm_task_state
= HSM_ST_ERR
;
4192 ata_pio_sectors(qc
);
4194 if (ap
->hsm_task_state
== HSM_ST_LAST
&&
4195 (!(qc
->tf
.flags
& ATA_TFLAG_WRITE
))) {
4198 status
= ata_wait_idle(ap
);
4203 ata_altstatus(ap
); /* flush */
4208 if (unlikely(!ata_ok(status
))) {
4209 qc
->err_mask
|= __ac_err_mask(status
);
4210 ap
->hsm_task_state
= HSM_ST_ERR
;
4214 /* no more data to transfer */
4215 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
4216 ap
->id
, qc
->dev
->devno
, status
);
4218 WARN_ON(qc
->err_mask
);
4220 ap
->hsm_task_state
= HSM_ST_IDLE
;
4222 /* complete taskfile transaction */
4223 ata_hsm_qc_complete(qc
, in_wq
);
4229 /* make sure qc->err_mask is available to
4230 * know what's wrong and recover
4232 WARN_ON(qc
->err_mask
== 0);
4234 ap
->hsm_task_state
= HSM_ST_IDLE
;
4236 /* complete taskfile transaction */
4237 ata_hsm_qc_complete(qc
, in_wq
);
4249 static void ata_pio_task(void *_data
)
4251 struct ata_queued_cmd
*qc
= _data
;
4252 struct ata_port
*ap
= qc
->ap
;
4257 WARN_ON(ap
->hsm_task_state
== HSM_ST_IDLE
);
4260 * This is purely heuristic. This is a fast path.
4261 * Sometimes when we enter, BSY will be cleared in
4262 * a chk-status or two. If not, the drive is probably seeking
4263 * or something. Snooze for a couple msecs, then
4264 * chk-status again. If still busy, queue delayed work.
4266 status
= ata_busy_wait(ap
, ATA_BUSY
, 5);
4267 if (status
& ATA_BUSY
) {
4269 status
= ata_busy_wait(ap
, ATA_BUSY
, 10);
4270 if (status
& ATA_BUSY
) {
4271 ata_port_queue_task(ap
, ata_pio_task
, qc
, ATA_SHORT_PAUSE
);
4277 poll_next
= ata_hsm_move(ap
, qc
, status
, 1);
4279 /* another command or interrupt handler
4280 * may be running at this point.
4287 * ata_qc_new - Request an available ATA command, for queueing
4288 * @ap: Port associated with device @dev
4289 * @dev: Device from whom we request an available command structure
4295 static struct ata_queued_cmd
*ata_qc_new(struct ata_port
*ap
)
4297 struct ata_queued_cmd
*qc
= NULL
;
4300 /* no command while frozen */
4301 if (unlikely(ap
->pflags
& ATA_PFLAG_FROZEN
))
4304 /* the last tag is reserved for internal command. */
4305 for (i
= 0; i
< ATA_MAX_QUEUE
- 1; i
++)
4306 if (!test_and_set_bit(i
, &ap
->qc_allocated
)) {
4307 qc
= __ata_qc_from_tag(ap
, i
);
4318 * ata_qc_new_init - Request an available ATA command, and initialize it
4319 * @dev: Device from whom we request an available command structure
4325 struct ata_queued_cmd
*ata_qc_new_init(struct ata_device
*dev
)
4327 struct ata_port
*ap
= dev
->ap
;
4328 struct ata_queued_cmd
*qc
;
4330 qc
= ata_qc_new(ap
);
4343 * ata_qc_free - free unused ata_queued_cmd
4344 * @qc: Command to complete
4346 * Designed to free unused ata_queued_cmd object
4347 * in case something prevents using it.
4350 * spin_lock_irqsave(host_set lock)
4352 void ata_qc_free(struct ata_queued_cmd
*qc
)
4354 struct ata_port
*ap
= qc
->ap
;
4357 WARN_ON(qc
== NULL
); /* ata_qc_from_tag _might_ return NULL */
4361 if (likely(ata_tag_valid(tag
))) {
4362 qc
->tag
= ATA_TAG_POISON
;
4363 clear_bit(tag
, &ap
->qc_allocated
);
4367 void __ata_qc_complete(struct ata_queued_cmd
*qc
)
4369 struct ata_port
*ap
= qc
->ap
;
4371 WARN_ON(qc
== NULL
); /* ata_qc_from_tag _might_ return NULL */
4372 WARN_ON(!(qc
->flags
& ATA_QCFLAG_ACTIVE
));
4374 if (likely(qc
->flags
& ATA_QCFLAG_DMAMAP
))
4377 /* command should be marked inactive atomically with qc completion */
4378 if (qc
->tf
.protocol
== ATA_PROT_NCQ
)
4379 ap
->sactive
&= ~(1 << qc
->tag
);
4381 ap
->active_tag
= ATA_TAG_POISON
;
4383 /* atapi: mark qc as inactive to prevent the interrupt handler
4384 * from completing the command twice later, before the error handler
4385 * is called. (when rc != 0 and atapi request sense is needed)
4387 qc
->flags
&= ~ATA_QCFLAG_ACTIVE
;
4388 ap
->qc_active
&= ~(1 << qc
->tag
);
4390 /* call completion callback */
4391 qc
->complete_fn(qc
);
4395 * ata_qc_complete - Complete an active ATA command
4396 * @qc: Command to complete
4397 * @err_mask: ATA Status register contents
4399 * Indicate to the mid and upper layers that an ATA
4400 * command has completed, with either an ok or not-ok status.
4403 * spin_lock_irqsave(host_set lock)
4405 void ata_qc_complete(struct ata_queued_cmd
*qc
)
4407 struct ata_port
*ap
= qc
->ap
;
4409 /* XXX: New EH and old EH use different mechanisms to
4410 * synchronize EH with regular execution path.
4412 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4413 * Normal execution path is responsible for not accessing a
4414 * failed qc. libata core enforces the rule by returning NULL
4415 * from ata_qc_from_tag() for failed qcs.
4417 * Old EH depends on ata_qc_complete() nullifying completion
4418 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
4419 * not synchronize with interrupt handler. Only PIO task is
4422 if (ap
->ops
->error_handler
) {
4423 WARN_ON(ap
->pflags
& ATA_PFLAG_FROZEN
);
4425 if (unlikely(qc
->err_mask
))
4426 qc
->flags
|= ATA_QCFLAG_FAILED
;
4428 if (unlikely(qc
->flags
& ATA_QCFLAG_FAILED
)) {
4429 if (!ata_tag_internal(qc
->tag
)) {
4430 /* always fill result TF for failed qc */
4431 ap
->ops
->tf_read(ap
, &qc
->result_tf
);
4432 ata_qc_schedule_eh(qc
);
4437 /* read result TF if requested */
4438 if (qc
->flags
& ATA_QCFLAG_RESULT_TF
)
4439 ap
->ops
->tf_read(ap
, &qc
->result_tf
);
4441 __ata_qc_complete(qc
);
4443 if (qc
->flags
& ATA_QCFLAG_EH_SCHEDULED
)
4446 /* read result TF if failed or requested */
4447 if (qc
->err_mask
|| qc
->flags
& ATA_QCFLAG_RESULT_TF
)
4448 ap
->ops
->tf_read(ap
, &qc
->result_tf
);
4450 __ata_qc_complete(qc
);
4455 * ata_qc_complete_multiple - Complete multiple qcs successfully
4456 * @ap: port in question
4457 * @qc_active: new qc_active mask
4458 * @finish_qc: LLDD callback invoked before completing a qc
4460 * Complete in-flight commands. This functions is meant to be
4461 * called from low-level driver's interrupt routine to complete
4462 * requests normally. ap->qc_active and @qc_active is compared
4463 * and commands are completed accordingly.
4466 * spin_lock_irqsave(host_set lock)
4469 * Number of completed commands on success, -errno otherwise.
4471 int ata_qc_complete_multiple(struct ata_port
*ap
, u32 qc_active
,
4472 void (*finish_qc
)(struct ata_queued_cmd
*))
4478 done_mask
= ap
->qc_active
^ qc_active
;
4480 if (unlikely(done_mask
& qc_active
)) {
4481 ata_port_printk(ap
, KERN_ERR
, "illegal qc_active transition "
4482 "(%08x->%08x)\n", ap
->qc_active
, qc_active
);
4486 for (i
= 0; i
< ATA_MAX_QUEUE
; i
++) {
4487 struct ata_queued_cmd
*qc
;
4489 if (!(done_mask
& (1 << i
)))
4492 if ((qc
= ata_qc_from_tag(ap
, i
))) {
4495 ata_qc_complete(qc
);
4503 static inline int ata_should_dma_map(struct ata_queued_cmd
*qc
)
4505 struct ata_port
*ap
= qc
->ap
;
4507 switch (qc
->tf
.protocol
) {
4510 case ATA_PROT_ATAPI_DMA
:
4513 case ATA_PROT_ATAPI
:
4515 if (ap
->flags
& ATA_FLAG_PIO_DMA
)
4528 * ata_qc_issue - issue taskfile to device
4529 * @qc: command to issue to device
4531 * Prepare an ATA command to submission to device.
4532 * This includes mapping the data into a DMA-able
4533 * area, filling in the S/G table, and finally
4534 * writing the taskfile to hardware, starting the command.
4537 * spin_lock_irqsave(host_set lock)
4539 void ata_qc_issue(struct ata_queued_cmd
*qc
)
4541 struct ata_port
*ap
= qc
->ap
;
4543 /* Make sure only one non-NCQ command is outstanding. The
4544 * check is skipped for old EH because it reuses active qc to
4545 * request ATAPI sense.
4547 WARN_ON(ap
->ops
->error_handler
&& ata_tag_valid(ap
->active_tag
));
4549 if (qc
->tf
.protocol
== ATA_PROT_NCQ
) {
4550 WARN_ON(ap
->sactive
& (1 << qc
->tag
));
4551 ap
->sactive
|= 1 << qc
->tag
;
4553 WARN_ON(ap
->sactive
);
4554 ap
->active_tag
= qc
->tag
;
4557 qc
->flags
|= ATA_QCFLAG_ACTIVE
;
4558 ap
->qc_active
|= 1 << qc
->tag
;
4560 if (ata_should_dma_map(qc
)) {
4561 if (qc
->flags
& ATA_QCFLAG_SG
) {
4562 if (ata_sg_setup(qc
))
4564 } else if (qc
->flags
& ATA_QCFLAG_SINGLE
) {
4565 if (ata_sg_setup_one(qc
))
4569 qc
->flags
&= ~ATA_QCFLAG_DMAMAP
;
4572 ap
->ops
->qc_prep(qc
);
4574 qc
->err_mask
|= ap
->ops
->qc_issue(qc
);
4575 if (unlikely(qc
->err_mask
))
4580 qc
->flags
&= ~ATA_QCFLAG_DMAMAP
;
4581 qc
->err_mask
|= AC_ERR_SYSTEM
;
4583 ata_qc_complete(qc
);
4587 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
4588 * @qc: command to issue to device
4590 * Using various libata functions and hooks, this function
4591 * starts an ATA command. ATA commands are grouped into
4592 * classes called "protocols", and issuing each type of protocol
4593 * is slightly different.
4595 * May be used as the qc_issue() entry in ata_port_operations.
4598 * spin_lock_irqsave(host_set lock)
4601 * Zero on success, AC_ERR_* mask on failure
4604 unsigned int ata_qc_issue_prot(struct ata_queued_cmd
*qc
)
4606 struct ata_port
*ap
= qc
->ap
;
4608 /* Use polling pio if the LLD doesn't handle
4609 * interrupt driven pio and atapi CDB interrupt.
4611 if (ap
->flags
& ATA_FLAG_PIO_POLLING
) {
4612 switch (qc
->tf
.protocol
) {
4614 case ATA_PROT_ATAPI
:
4615 case ATA_PROT_ATAPI_NODATA
:
4616 qc
->tf
.flags
|= ATA_TFLAG_POLLING
;
4618 case ATA_PROT_ATAPI_DMA
:
4619 if (qc
->dev
->flags
& ATA_DFLAG_CDB_INTR
)
4620 /* see ata_dma_blacklisted() */
4628 /* select the device */
4629 ata_dev_select(ap
, qc
->dev
->devno
, 1, 0);
4631 /* start the command */
4632 switch (qc
->tf
.protocol
) {
4633 case ATA_PROT_NODATA
:
4634 if (qc
->tf
.flags
& ATA_TFLAG_POLLING
)
4635 ata_qc_set_polling(qc
);
4637 ata_tf_to_host(ap
, &qc
->tf
);
4638 ap
->hsm_task_state
= HSM_ST_LAST
;
4640 if (qc
->tf
.flags
& ATA_TFLAG_POLLING
)
4641 ata_port_queue_task(ap
, ata_pio_task
, qc
, 0);
4646 WARN_ON(qc
->tf
.flags
& ATA_TFLAG_POLLING
);
4648 ap
->ops
->tf_load(ap
, &qc
->tf
); /* load tf registers */
4649 ap
->ops
->bmdma_setup(qc
); /* set up bmdma */
4650 ap
->ops
->bmdma_start(qc
); /* initiate bmdma */
4651 ap
->hsm_task_state
= HSM_ST_LAST
;
4655 if (qc
->tf
.flags
& ATA_TFLAG_POLLING
)
4656 ata_qc_set_polling(qc
);
4658 ata_tf_to_host(ap
, &qc
->tf
);
4660 if (qc
->tf
.flags
& ATA_TFLAG_WRITE
) {
4661 /* PIO data out protocol */
4662 ap
->hsm_task_state
= HSM_ST_FIRST
;
4663 ata_port_queue_task(ap
, ata_pio_task
, qc
, 0);
4665 /* always send first data block using
4666 * the ata_pio_task() codepath.
4669 /* PIO data in protocol */
4670 ap
->hsm_task_state
= HSM_ST
;
4672 if (qc
->tf
.flags
& ATA_TFLAG_POLLING
)
4673 ata_port_queue_task(ap
, ata_pio_task
, qc
, 0);
4675 /* if polling, ata_pio_task() handles the rest.
4676 * otherwise, interrupt handler takes over from here.
4682 case ATA_PROT_ATAPI
:
4683 case ATA_PROT_ATAPI_NODATA
:
4684 if (qc
->tf
.flags
& ATA_TFLAG_POLLING
)
4685 ata_qc_set_polling(qc
);
4687 ata_tf_to_host(ap
, &qc
->tf
);
4689 ap
->hsm_task_state
= HSM_ST_FIRST
;
4691 /* send cdb by polling if no cdb interrupt */
4692 if ((!(qc
->dev
->flags
& ATA_DFLAG_CDB_INTR
)) ||
4693 (qc
->tf
.flags
& ATA_TFLAG_POLLING
))
4694 ata_port_queue_task(ap
, ata_pio_task
, qc
, 0);
4697 case ATA_PROT_ATAPI_DMA
:
4698 WARN_ON(qc
->tf
.flags
& ATA_TFLAG_POLLING
);
4700 ap
->ops
->tf_load(ap
, &qc
->tf
); /* load tf registers */
4701 ap
->ops
->bmdma_setup(qc
); /* set up bmdma */
4702 ap
->hsm_task_state
= HSM_ST_FIRST
;
4704 /* send cdb by polling if no cdb interrupt */
4705 if (!(qc
->dev
->flags
& ATA_DFLAG_CDB_INTR
))
4706 ata_port_queue_task(ap
, ata_pio_task
, qc
, 0);
4711 return AC_ERR_SYSTEM
;
4718 * ata_host_intr - Handle host interrupt for given (port, task)
4719 * @ap: Port on which interrupt arrived (possibly...)
4720 * @qc: Taskfile currently active in engine
4722 * Handle host interrupt for given queued command. Currently,
4723 * only DMA interrupts are handled. All other commands are
4724 * handled via polling with interrupts disabled (nIEN bit).
4727 * spin_lock_irqsave(host_set lock)
4730 * One if interrupt was handled, zero if not (shared irq).
4733 inline unsigned int ata_host_intr (struct ata_port
*ap
,
4734 struct ata_queued_cmd
*qc
)
4736 u8 status
, host_stat
= 0;
4738 VPRINTK("ata%u: protocol %d task_state %d\n",
4739 ap
->id
, qc
->tf
.protocol
, ap
->hsm_task_state
);
4741 /* Check whether we are expecting interrupt in this state */
4742 switch (ap
->hsm_task_state
) {
4744 /* Some pre-ATAPI-4 devices assert INTRQ
4745 * at this state when ready to receive CDB.
4748 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
4749 * The flag was turned on only for atapi devices.
4750 * No need to check is_atapi_taskfile(&qc->tf) again.
4752 if (!(qc
->dev
->flags
& ATA_DFLAG_CDB_INTR
))
4756 if (qc
->tf
.protocol
== ATA_PROT_DMA
||
4757 qc
->tf
.protocol
== ATA_PROT_ATAPI_DMA
) {
4758 /* check status of DMA engine */
4759 host_stat
= ap
->ops
->bmdma_status(ap
);
4760 VPRINTK("ata%u: host_stat 0x%X\n", ap
->id
, host_stat
);
4762 /* if it's not our irq... */
4763 if (!(host_stat
& ATA_DMA_INTR
))
4766 /* before we do anything else, clear DMA-Start bit */
4767 ap
->ops
->bmdma_stop(qc
);
4769 if (unlikely(host_stat
& ATA_DMA_ERR
)) {
4770 /* error when transfering data to/from memory */
4771 qc
->err_mask
|= AC_ERR_HOST_BUS
;
4772 ap
->hsm_task_state
= HSM_ST_ERR
;
4782 /* check altstatus */
4783 status
= ata_altstatus(ap
);
4784 if (status
& ATA_BUSY
)
4787 /* check main status, clearing INTRQ */
4788 status
= ata_chk_status(ap
);
4789 if (unlikely(status
& ATA_BUSY
))
4792 /* ack bmdma irq events */
4793 ap
->ops
->irq_clear(ap
);
4795 ata_hsm_move(ap
, qc
, status
, 0);
4796 return 1; /* irq handled */
4799 ap
->stats
.idle_irq
++;
4802 if ((ap
->stats
.idle_irq
% 1000) == 0) {
4803 ata_irq_ack(ap
, 0); /* debug trap */
4804 ata_port_printk(ap
, KERN_WARNING
, "irq trap\n");
4808 return 0; /* irq not handled */
4812 * ata_interrupt - Default ATA host interrupt handler
4813 * @irq: irq line (unused)
4814 * @dev_instance: pointer to our ata_host_set information structure
4817 * Default interrupt handler for PCI IDE devices. Calls
4818 * ata_host_intr() for each port that is not disabled.
4821 * Obtains host_set lock during operation.
4824 * IRQ_NONE or IRQ_HANDLED.
4827 irqreturn_t
ata_interrupt (int irq
, void *dev_instance
, struct pt_regs
*regs
)
4829 struct ata_host_set
*host_set
= dev_instance
;
4831 unsigned int handled
= 0;
4832 unsigned long flags
;
4834 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
4835 spin_lock_irqsave(&host_set
->lock
, flags
);
4837 for (i
= 0; i
< host_set
->n_ports
; i
++) {
4838 struct ata_port
*ap
;
4840 ap
= host_set
->ports
[i
];
4842 !(ap
->flags
& ATA_FLAG_DISABLED
)) {
4843 struct ata_queued_cmd
*qc
;
4845 qc
= ata_qc_from_tag(ap
, ap
->active_tag
);
4846 if (qc
&& (!(qc
->tf
.flags
& ATA_TFLAG_POLLING
)) &&
4847 (qc
->flags
& ATA_QCFLAG_ACTIVE
))
4848 handled
|= ata_host_intr(ap
, qc
);
4852 spin_unlock_irqrestore(&host_set
->lock
, flags
);
4854 return IRQ_RETVAL(handled
);
4858 * sata_scr_valid - test whether SCRs are accessible
4859 * @ap: ATA port to test SCR accessibility for
4861 * Test whether SCRs are accessible for @ap.
4867 * 1 if SCRs are accessible, 0 otherwise.
4869 int sata_scr_valid(struct ata_port
*ap
)
4871 return ap
->cbl
== ATA_CBL_SATA
&& ap
->ops
->scr_read
;
4875 * sata_scr_read - read SCR register of the specified port
4876 * @ap: ATA port to read SCR for
4878 * @val: Place to store read value
4880 * Read SCR register @reg of @ap into *@val. This function is
4881 * guaranteed to succeed if the cable type of the port is SATA
4882 * and the port implements ->scr_read.
4888 * 0 on success, negative errno on failure.
4890 int sata_scr_read(struct ata_port
*ap
, int reg
, u32
*val
)
4892 if (sata_scr_valid(ap
)) {
4893 *val
= ap
->ops
->scr_read(ap
, reg
);
4900 * sata_scr_write - write SCR register of the specified port
4901 * @ap: ATA port to write SCR for
4902 * @reg: SCR to write
4903 * @val: value to write
4905 * Write @val to SCR register @reg of @ap. This function is
4906 * guaranteed to succeed if the cable type of the port is SATA
4907 * and the port implements ->scr_read.
4913 * 0 on success, negative errno on failure.
4915 int sata_scr_write(struct ata_port
*ap
, int reg
, u32 val
)
4917 if (sata_scr_valid(ap
)) {
4918 ap
->ops
->scr_write(ap
, reg
, val
);
4925 * sata_scr_write_flush - write SCR register of the specified port and flush
4926 * @ap: ATA port to write SCR for
4927 * @reg: SCR to write
4928 * @val: value to write
4930 * This function is identical to sata_scr_write() except that this
4931 * function performs flush after writing to the register.
4937 * 0 on success, negative errno on failure.
4939 int sata_scr_write_flush(struct ata_port
*ap
, int reg
, u32 val
)
4941 if (sata_scr_valid(ap
)) {
4942 ap
->ops
->scr_write(ap
, reg
, val
);
4943 ap
->ops
->scr_read(ap
, reg
);
4950 * ata_port_online - test whether the given port is online
4951 * @ap: ATA port to test
4953 * Test whether @ap is online. Note that this function returns 0
4954 * if online status of @ap cannot be obtained, so
4955 * ata_port_online(ap) != !ata_port_offline(ap).
4961 * 1 if the port online status is available and online.
4963 int ata_port_online(struct ata_port
*ap
)
4967 if (!sata_scr_read(ap
, SCR_STATUS
, &sstatus
) && (sstatus
& 0xf) == 0x3)
4973 * ata_port_offline - test whether the given port is offline
4974 * @ap: ATA port to test
4976 * Test whether @ap is offline. Note that this function returns
4977 * 0 if offline status of @ap cannot be obtained, so
4978 * ata_port_online(ap) != !ata_port_offline(ap).
4984 * 1 if the port offline status is available and offline.
4986 int ata_port_offline(struct ata_port
*ap
)
4990 if (!sata_scr_read(ap
, SCR_STATUS
, &sstatus
) && (sstatus
& 0xf) != 0x3)
4995 int ata_flush_cache(struct ata_device
*dev
)
4997 unsigned int err_mask
;
5000 if (!ata_try_flush_cache(dev
))
5003 if (ata_id_has_flush_ext(dev
->id
))
5004 cmd
= ATA_CMD_FLUSH_EXT
;
5006 cmd
= ATA_CMD_FLUSH
;
5008 err_mask
= ata_do_simple_cmd(dev
, cmd
);
5010 ata_dev_printk(dev
, KERN_ERR
, "failed to flush cache\n");
5017 static int ata_host_set_request_pm(struct ata_host_set
*host_set
,
5018 pm_message_t mesg
, unsigned int action
,
5019 unsigned int ehi_flags
, int wait
)
5021 unsigned long flags
;
5024 for (i
= 0; i
< host_set
->n_ports
; i
++) {
5025 struct ata_port
*ap
= host_set
->ports
[i
];
5027 /* Previous resume operation might still be in
5028 * progress. Wait for PM_PENDING to clear.
5030 if (ap
->pflags
& ATA_PFLAG_PM_PENDING
) {
5031 ata_port_wait_eh(ap
);
5032 WARN_ON(ap
->pflags
& ATA_PFLAG_PM_PENDING
);
5035 /* request PM ops to EH */
5036 spin_lock_irqsave(ap
->lock
, flags
);
5041 ap
->pm_result
= &rc
;
5044 ap
->pflags
|= ATA_PFLAG_PM_PENDING
;
5045 ap
->eh_info
.action
|= action
;
5046 ap
->eh_info
.flags
|= ehi_flags
;
5048 ata_port_schedule_eh(ap
);
5050 spin_unlock_irqrestore(ap
->lock
, flags
);
5052 /* wait and check result */
5054 ata_port_wait_eh(ap
);
5055 WARN_ON(ap
->pflags
& ATA_PFLAG_PM_PENDING
);
5065 * ata_host_set_suspend - suspend host_set
5066 * @host_set: host_set to suspend
5069 * Suspend @host_set. Actual operation is performed by EH. This
5070 * function requests EH to perform PM operations and waits for EH
5074 * Kernel thread context (may sleep).
5077 * 0 on success, -errno on failure.
5079 int ata_host_set_suspend(struct ata_host_set
*host_set
, pm_message_t mesg
)
5083 rc
= ata_host_set_request_pm(host_set
, mesg
, 0, ATA_EHI_QUIET
, 1);
5087 /* EH is quiescent now. Fail if we have any ready device.
5088 * This happens if hotplug occurs between completion of device
5089 * suspension and here.
5091 for (i
= 0; i
< host_set
->n_ports
; i
++) {
5092 struct ata_port
*ap
= host_set
->ports
[i
];
5094 for (j
= 0; j
< ATA_MAX_DEVICES
; j
++) {
5095 struct ata_device
*dev
= &ap
->device
[j
];
5097 if (ata_dev_ready(dev
)) {
5098 ata_port_printk(ap
, KERN_WARNING
,
5099 "suspend failed, device %d "
5100 "still active\n", dev
->devno
);
5107 host_set
->dev
->power
.power_state
= mesg
;
5111 ata_host_set_resume(host_set
);
5116 * ata_host_set_resume - resume host_set
5117 * @host_set: host_set to resume
5119 * Resume @host_set. Actual operation is performed by EH. This
5120 * function requests EH to perform PM operations and returns.
5121 * Note that all resume operations are performed parallely.
5124 * Kernel thread context (may sleep).
5126 void ata_host_set_resume(struct ata_host_set
*host_set
)
5128 ata_host_set_request_pm(host_set
, PMSG_ON
, ATA_EH_SOFTRESET
,
5129 ATA_EHI_NO_AUTOPSY
| ATA_EHI_QUIET
, 0);
5130 host_set
->dev
->power
.power_state
= PMSG_ON
;
5134 * ata_port_start - Set port up for dma.
5135 * @ap: Port to initialize
5137 * Called just after data structures for each port are
5138 * initialized. Allocates space for PRD table.
5140 * May be used as the port_start() entry in ata_port_operations.
5143 * Inherited from caller.
5146 int ata_port_start (struct ata_port
*ap
)
5148 struct device
*dev
= ap
->dev
;
5151 ap
->prd
= dma_alloc_coherent(dev
, ATA_PRD_TBL_SZ
, &ap
->prd_dma
, GFP_KERNEL
);
5155 rc
= ata_pad_alloc(ap
, dev
);
5157 dma_free_coherent(dev
, ATA_PRD_TBL_SZ
, ap
->prd
, ap
->prd_dma
);
5161 DPRINTK("prd alloc, virt %p, dma %llx\n", ap
->prd
, (unsigned long long) ap
->prd_dma
);
5168 * ata_port_stop - Undo ata_port_start()
5169 * @ap: Port to shut down
5171 * Frees the PRD table.
5173 * May be used as the port_stop() entry in ata_port_operations.
5176 * Inherited from caller.
5179 void ata_port_stop (struct ata_port
*ap
)
5181 struct device
*dev
= ap
->dev
;
5183 dma_free_coherent(dev
, ATA_PRD_TBL_SZ
, ap
->prd
, ap
->prd_dma
);
5184 ata_pad_free(ap
, dev
);
5187 void ata_host_stop (struct ata_host_set
*host_set
)
5189 if (host_set
->mmio_base
)
5190 iounmap(host_set
->mmio_base
);
5194 * ata_dev_init - Initialize an ata_device structure
5195 * @dev: Device structure to initialize
5197 * Initialize @dev in preparation for probing.
5200 * Inherited from caller.
5202 void ata_dev_init(struct ata_device
*dev
)
5204 struct ata_port
*ap
= dev
->ap
;
5205 unsigned long flags
;
5207 /* SATA spd limit is bound to the first device */
5208 ap
->sata_spd_limit
= ap
->hw_sata_spd_limit
;
5210 /* High bits of dev->flags are used to record warm plug
5211 * requests which occur asynchronously. Synchronize using
5214 spin_lock_irqsave(ap
->lock
, flags
);
5215 dev
->flags
&= ~ATA_DFLAG_INIT_MASK
;
5216 spin_unlock_irqrestore(ap
->lock
, flags
);
5218 memset((void *)dev
+ ATA_DEVICE_CLEAR_OFFSET
, 0,
5219 sizeof(*dev
) - ATA_DEVICE_CLEAR_OFFSET
);
5220 dev
->pio_mask
= UINT_MAX
;
5221 dev
->mwdma_mask
= UINT_MAX
;
5222 dev
->udma_mask
= UINT_MAX
;
5226 * ata_host_init - Initialize an ata_port structure
5227 * @ap: Structure to initialize
5228 * @host: associated SCSI mid-layer structure
5229 * @host_set: Collection of hosts to which @ap belongs
5230 * @ent: Probe information provided by low-level driver
5231 * @port_no: Port number associated with this ata_port
5233 * Initialize a new ata_port structure, and its associated
5237 * Inherited from caller.
5239 static void ata_host_init(struct ata_port
*ap
, struct Scsi_Host
*host
,
5240 struct ata_host_set
*host_set
,
5241 const struct ata_probe_ent
*ent
, unsigned int port_no
)
5247 host
->max_channel
= 1;
5248 host
->unique_id
= ata_unique_id
++;
5249 host
->max_cmd_len
= 12;
5251 ap
->lock
= &host_set
->lock
;
5252 ap
->flags
= ATA_FLAG_DISABLED
;
5253 ap
->id
= host
->unique_id
;
5255 ap
->ctl
= ATA_DEVCTL_OBS
;
5256 ap
->host_set
= host_set
;
5258 ap
->port_no
= port_no
;
5260 ent
->legacy_mode
? ent
->hard_port_no
: port_no
;
5261 ap
->pio_mask
= ent
->pio_mask
;
5262 ap
->mwdma_mask
= ent
->mwdma_mask
;
5263 ap
->udma_mask
= ent
->udma_mask
;
5264 ap
->flags
|= ent
->host_flags
;
5265 ap
->ops
= ent
->port_ops
;
5266 ap
->hw_sata_spd_limit
= UINT_MAX
;
5267 ap
->active_tag
= ATA_TAG_POISON
;
5268 ap
->last_ctl
= 0xFF;
5270 #if defined(ATA_VERBOSE_DEBUG)
5271 /* turn on all debugging levels */
5272 ap
->msg_enable
= 0x00FF;
5273 #elif defined(ATA_DEBUG)
5274 ap
->msg_enable
= ATA_MSG_DRV
| ATA_MSG_INFO
| ATA_MSG_CTL
| ATA_MSG_WARN
| ATA_MSG_ERR
;
5276 ap
->msg_enable
= ATA_MSG_DRV
| ATA_MSG_ERR
| ATA_MSG_WARN
;
5279 INIT_WORK(&ap
->port_task
, NULL
, NULL
);
5280 INIT_WORK(&ap
->hotplug_task
, ata_scsi_hotplug
, ap
);
5281 INIT_WORK(&ap
->scsi_rescan_task
, ata_scsi_dev_rescan
, ap
);
5282 INIT_LIST_HEAD(&ap
->eh_done_q
);
5283 init_waitqueue_head(&ap
->eh_wait_q
);
5285 /* set cable type */
5286 ap
->cbl
= ATA_CBL_NONE
;
5287 if (ap
->flags
& ATA_FLAG_SATA
)
5288 ap
->cbl
= ATA_CBL_SATA
;
5290 for (i
= 0; i
< ATA_MAX_DEVICES
; i
++) {
5291 struct ata_device
*dev
= &ap
->device
[i
];
5298 ap
->stats
.unhandled_irq
= 1;
5299 ap
->stats
.idle_irq
= 1;
5302 memcpy(&ap
->ioaddr
, &ent
->port
[port_no
], sizeof(struct ata_ioports
));
5306 * ata_host_add - Attach low-level ATA driver to system
5307 * @ent: Information provided by low-level driver
5308 * @host_set: Collections of ports to which we add
5309 * @port_no: Port number associated with this host
5311 * Attach low-level ATA driver to system.
5314 * PCI/etc. bus probe sem.
5317 * New ata_port on success, for NULL on error.
5320 static struct ata_port
* ata_host_add(const struct ata_probe_ent
*ent
,
5321 struct ata_host_set
*host_set
,
5322 unsigned int port_no
)
5324 struct Scsi_Host
*host
;
5325 struct ata_port
*ap
;
5330 if (!ent
->port_ops
->error_handler
&&
5331 !(ent
->host_flags
& (ATA_FLAG_SATA_RESET
| ATA_FLAG_SRST
))) {
5332 printk(KERN_ERR
"ata%u: no reset mechanism available\n",
5337 host
= scsi_host_alloc(ent
->sht
, sizeof(struct ata_port
));
5341 host
->transportt
= &ata_scsi_transport_template
;
5343 ap
= ata_shost_to_port(host
);
5345 ata_host_init(ap
, host
, host_set
, ent
, port_no
);
5347 rc
= ap
->ops
->port_start(ap
);
5354 scsi_host_put(host
);
5359 * ata_device_add - Register hardware device with ATA and SCSI layers
5360 * @ent: Probe information describing hardware device to be registered
5362 * This function processes the information provided in the probe
5363 * information struct @ent, allocates the necessary ATA and SCSI
5364 * host information structures, initializes them, and registers
5365 * everything with requisite kernel subsystems.
5367 * This function requests irqs, probes the ATA bus, and probes
5371 * PCI/etc. bus probe sem.
5374 * Number of ports registered. Zero on error (no ports registered).
5376 int ata_device_add(const struct ata_probe_ent
*ent
)
5378 unsigned int count
= 0, i
;
5379 struct device
*dev
= ent
->dev
;
5380 struct ata_host_set
*host_set
;
5384 /* alloc a container for our list of ATA ports (buses) */
5385 host_set
= kzalloc(sizeof(struct ata_host_set
) +
5386 (ent
->n_ports
* sizeof(void *)), GFP_KERNEL
);
5389 spin_lock_init(&host_set
->lock
);
5391 host_set
->dev
= dev
;
5392 host_set
->n_ports
= ent
->n_ports
;
5393 host_set
->irq
= ent
->irq
;
5394 host_set
->mmio_base
= ent
->mmio_base
;
5395 host_set
->private_data
= ent
->private_data
;
5396 host_set
->ops
= ent
->port_ops
;
5397 host_set
->flags
= ent
->host_set_flags
;
5399 /* register each port bound to this device */
5400 for (i
= 0; i
< ent
->n_ports
; i
++) {
5401 struct ata_port
*ap
;
5402 unsigned long xfer_mode_mask
;
5404 ap
= ata_host_add(ent
, host_set
, i
);
5408 host_set
->ports
[i
] = ap
;
5409 xfer_mode_mask
=(ap
->udma_mask
<< ATA_SHIFT_UDMA
) |
5410 (ap
->mwdma_mask
<< ATA_SHIFT_MWDMA
) |
5411 (ap
->pio_mask
<< ATA_SHIFT_PIO
);
5413 /* print per-port info to dmesg */
5414 ata_port_printk(ap
, KERN_INFO
, "%cATA max %s cmd 0x%lX "
5415 "ctl 0x%lX bmdma 0x%lX irq %lu\n",
5416 ap
->flags
& ATA_FLAG_SATA
? 'S' : 'P',
5417 ata_mode_string(xfer_mode_mask
),
5418 ap
->ioaddr
.cmd_addr
,
5419 ap
->ioaddr
.ctl_addr
,
5420 ap
->ioaddr
.bmdma_addr
,
5424 host_set
->ops
->irq_clear(ap
);
5425 ata_eh_freeze_port(ap
); /* freeze port before requesting IRQ */
5432 /* obtain irq, that is shared between channels */
5433 rc
= request_irq(ent
->irq
, ent
->port_ops
->irq_handler
, ent
->irq_flags
,
5434 DRV_NAME
, host_set
);
5436 dev_printk(KERN_ERR
, dev
, "irq %lu request failed: %d\n",
5441 /* perform each probe synchronously */
5442 DPRINTK("probe begin\n");
5443 for (i
= 0; i
< count
; i
++) {
5444 struct ata_port
*ap
;
5448 ap
= host_set
->ports
[i
];
5450 /* init sata_spd_limit to the current value */
5451 if (sata_scr_read(ap
, SCR_CONTROL
, &scontrol
) == 0) {
5452 int spd
= (scontrol
>> 4) & 0xf;
5453 ap
->hw_sata_spd_limit
&= (1 << spd
) - 1;
5455 ap
->sata_spd_limit
= ap
->hw_sata_spd_limit
;
5457 rc
= scsi_add_host(ap
->host
, dev
);
5459 ata_port_printk(ap
, KERN_ERR
, "scsi_add_host failed\n");
5460 /* FIXME: do something useful here */
5461 /* FIXME: handle unconditional calls to
5462 * scsi_scan_host and ata_host_remove, below,
5467 if (ap
->ops
->error_handler
) {
5468 struct ata_eh_info
*ehi
= &ap
->eh_info
;
5469 unsigned long flags
;
5473 /* kick EH for boot probing */
5474 spin_lock_irqsave(ap
->lock
, flags
);
5476 ehi
->probe_mask
= (1 << ATA_MAX_DEVICES
) - 1;
5477 ehi
->action
|= ATA_EH_SOFTRESET
;
5478 ehi
->flags
|= ATA_EHI_NO_AUTOPSY
| ATA_EHI_QUIET
;
5480 ap
->pflags
|= ATA_PFLAG_LOADING
;
5481 ata_port_schedule_eh(ap
);
5483 spin_unlock_irqrestore(ap
->lock
, flags
);
5485 /* wait for EH to finish */
5486 ata_port_wait_eh(ap
);
5488 DPRINTK("ata%u: bus probe begin\n", ap
->id
);
5489 rc
= ata_bus_probe(ap
);
5490 DPRINTK("ata%u: bus probe end\n", ap
->id
);
5493 /* FIXME: do something useful here?
5494 * Current libata behavior will
5495 * tear down everything when
5496 * the module is removed
5497 * or the h/w is unplugged.
5503 /* probes are done, now scan each port's disk(s) */
5504 DPRINTK("host probe begin\n");
5505 for (i
= 0; i
< count
; i
++) {
5506 struct ata_port
*ap
= host_set
->ports
[i
];
5508 ata_scsi_scan_host(ap
);
5511 dev_set_drvdata(dev
, host_set
);
5513 VPRINTK("EXIT, returning %u\n", ent
->n_ports
);
5514 return ent
->n_ports
; /* success */
5517 for (i
= 0; i
< count
; i
++) {
5518 struct ata_port
*ap
= host_set
->ports
[i
];
5520 ap
->ops
->port_stop(ap
);
5521 scsi_host_put(ap
->host
);
5526 VPRINTK("EXIT, returning 0\n");
5531 * ata_port_detach - Detach ATA port in prepration of device removal
5532 * @ap: ATA port to be detached
5534 * Detach all ATA devices and the associated SCSI devices of @ap;
5535 * then, remove the associated SCSI host. @ap is guaranteed to
5536 * be quiescent on return from this function.
5539 * Kernel thread context (may sleep).
5541 void ata_port_detach(struct ata_port
*ap
)
5543 unsigned long flags
;
5546 if (!ap
->ops
->error_handler
)
5549 /* tell EH we're leaving & flush EH */
5550 spin_lock_irqsave(ap
->lock
, flags
);
5551 ap
->pflags
|= ATA_PFLAG_UNLOADING
;
5552 spin_unlock_irqrestore(ap
->lock
, flags
);
5554 ata_port_wait_eh(ap
);
5556 /* EH is now guaranteed to see UNLOADING, so no new device
5557 * will be attached. Disable all existing devices.
5559 spin_lock_irqsave(ap
->lock
, flags
);
5561 for (i
= 0; i
< ATA_MAX_DEVICES
; i
++)
5562 ata_dev_disable(&ap
->device
[i
]);
5564 spin_unlock_irqrestore(ap
->lock
, flags
);
5566 /* Final freeze & EH. All in-flight commands are aborted. EH
5567 * will be skipped and retrials will be terminated with bad
5570 spin_lock_irqsave(ap
->lock
, flags
);
5571 ata_port_freeze(ap
); /* won't be thawed */
5572 spin_unlock_irqrestore(ap
->lock
, flags
);
5574 ata_port_wait_eh(ap
);
5576 /* Flush hotplug task. The sequence is similar to
5577 * ata_port_flush_task().
5579 flush_workqueue(ata_aux_wq
);
5580 cancel_delayed_work(&ap
->hotplug_task
);
5581 flush_workqueue(ata_aux_wq
);
5584 /* remove the associated SCSI host */
5585 scsi_remove_host(ap
->host
);
5589 * ata_host_set_remove - PCI layer callback for device removal
5590 * @host_set: ATA host set that was removed
5592 * Unregister all objects associated with this host set. Free those
5596 * Inherited from calling layer (may sleep).
5599 void ata_host_set_remove(struct ata_host_set
*host_set
)
5603 for (i
= 0; i
< host_set
->n_ports
; i
++)
5604 ata_port_detach(host_set
->ports
[i
]);
5606 free_irq(host_set
->irq
, host_set
);
5608 for (i
= 0; i
< host_set
->n_ports
; i
++) {
5609 struct ata_port
*ap
= host_set
->ports
[i
];
5611 ata_scsi_release(ap
->host
);
5613 if ((ap
->flags
& ATA_FLAG_NO_LEGACY
) == 0) {
5614 struct ata_ioports
*ioaddr
= &ap
->ioaddr
;
5616 if (ioaddr
->cmd_addr
== 0x1f0)
5617 release_region(0x1f0, 8);
5618 else if (ioaddr
->cmd_addr
== 0x170)
5619 release_region(0x170, 8);
5622 scsi_host_put(ap
->host
);
5625 if (host_set
->ops
->host_stop
)
5626 host_set
->ops
->host_stop(host_set
);
5632 * ata_scsi_release - SCSI layer callback hook for host unload
5633 * @host: libata host to be unloaded
5635 * Performs all duties necessary to shut down a libata port...
5636 * Kill port kthread, disable port, and release resources.
5639 * Inherited from SCSI layer.
5645 int ata_scsi_release(struct Scsi_Host
*host
)
5647 struct ata_port
*ap
= ata_shost_to_port(host
);
5651 ap
->ops
->port_disable(ap
);
5652 ap
->ops
->port_stop(ap
);
5659 * ata_std_ports - initialize ioaddr with standard port offsets.
5660 * @ioaddr: IO address structure to be initialized
5662 * Utility function which initializes data_addr, error_addr,
5663 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
5664 * device_addr, status_addr, and command_addr to standard offsets
5665 * relative to cmd_addr.
5667 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
5670 void ata_std_ports(struct ata_ioports
*ioaddr
)
5672 ioaddr
->data_addr
= ioaddr
->cmd_addr
+ ATA_REG_DATA
;
5673 ioaddr
->error_addr
= ioaddr
->cmd_addr
+ ATA_REG_ERR
;
5674 ioaddr
->feature_addr
= ioaddr
->cmd_addr
+ ATA_REG_FEATURE
;
5675 ioaddr
->nsect_addr
= ioaddr
->cmd_addr
+ ATA_REG_NSECT
;
5676 ioaddr
->lbal_addr
= ioaddr
->cmd_addr
+ ATA_REG_LBAL
;
5677 ioaddr
->lbam_addr
= ioaddr
->cmd_addr
+ ATA_REG_LBAM
;
5678 ioaddr
->lbah_addr
= ioaddr
->cmd_addr
+ ATA_REG_LBAH
;
5679 ioaddr
->device_addr
= ioaddr
->cmd_addr
+ ATA_REG_DEVICE
;
5680 ioaddr
->status_addr
= ioaddr
->cmd_addr
+ ATA_REG_STATUS
;
5681 ioaddr
->command_addr
= ioaddr
->cmd_addr
+ ATA_REG_CMD
;
5687 void ata_pci_host_stop (struct ata_host_set
*host_set
)
5689 struct pci_dev
*pdev
= to_pci_dev(host_set
->dev
);
5691 pci_iounmap(pdev
, host_set
->mmio_base
);
5695 * ata_pci_remove_one - PCI layer callback for device removal
5696 * @pdev: PCI device that was removed
5698 * PCI layer indicates to libata via this hook that
5699 * hot-unplug or module unload event has occurred.
5700 * Handle this by unregistering all objects associated
5701 * with this PCI device. Free those objects. Then finally
5702 * release PCI resources and disable device.
5705 * Inherited from PCI layer (may sleep).
5708 void ata_pci_remove_one (struct pci_dev
*pdev
)
5710 struct device
*dev
= pci_dev_to_dev(pdev
);
5711 struct ata_host_set
*host_set
= dev_get_drvdata(dev
);
5712 struct ata_host_set
*host_set2
= host_set
->next
;
5714 ata_host_set_remove(host_set
);
5716 ata_host_set_remove(host_set2
);
5718 pci_release_regions(pdev
);
5719 pci_disable_device(pdev
);
5720 dev_set_drvdata(dev
, NULL
);
5723 /* move to PCI subsystem */
5724 int pci_test_config_bits(struct pci_dev
*pdev
, const struct pci_bits
*bits
)
5726 unsigned long tmp
= 0;
5728 switch (bits
->width
) {
5731 pci_read_config_byte(pdev
, bits
->reg
, &tmp8
);
5737 pci_read_config_word(pdev
, bits
->reg
, &tmp16
);
5743 pci_read_config_dword(pdev
, bits
->reg
, &tmp32
);
5754 return (tmp
== bits
->val
) ? 1 : 0;
5757 void ata_pci_device_do_suspend(struct pci_dev
*pdev
, pm_message_t state
)
5759 pci_save_state(pdev
);
5761 if (state
.event
== PM_EVENT_SUSPEND
) {
5762 pci_disable_device(pdev
);
5763 pci_set_power_state(pdev
, PCI_D3hot
);
5767 void ata_pci_device_do_resume(struct pci_dev
*pdev
)
5769 pci_set_power_state(pdev
, PCI_D0
);
5770 pci_restore_state(pdev
);
5771 pci_enable_device(pdev
);
5772 pci_set_master(pdev
);
5775 int ata_pci_device_suspend(struct pci_dev
*pdev
, pm_message_t state
)
5777 struct ata_host_set
*host_set
= dev_get_drvdata(&pdev
->dev
);
5780 rc
= ata_host_set_suspend(host_set
, state
);
5784 if (host_set
->next
) {
5785 rc
= ata_host_set_suspend(host_set
->next
, state
);
5787 ata_host_set_resume(host_set
);
5792 ata_pci_device_do_suspend(pdev
, state
);
5797 int ata_pci_device_resume(struct pci_dev
*pdev
)
5799 struct ata_host_set
*host_set
= dev_get_drvdata(&pdev
->dev
);
5801 ata_pci_device_do_resume(pdev
);
5802 ata_host_set_resume(host_set
);
5804 ata_host_set_resume(host_set
->next
);
5808 #endif /* CONFIG_PCI */
5811 static int __init
ata_init(void)
5813 ata_probe_timeout
*= HZ
;
5814 ata_wq
= create_workqueue("ata");
5818 ata_aux_wq
= create_singlethread_workqueue("ata_aux");
5820 destroy_workqueue(ata_wq
);
5824 printk(KERN_DEBUG
"libata version " DRV_VERSION
" loaded.\n");
5828 static void __exit
ata_exit(void)
5830 destroy_workqueue(ata_wq
);
5831 destroy_workqueue(ata_aux_wq
);
5834 module_init(ata_init
);
5835 module_exit(ata_exit
);
5837 static unsigned long ratelimit_time
;
5838 static DEFINE_SPINLOCK(ata_ratelimit_lock
);
5840 int ata_ratelimit(void)
5843 unsigned long flags
;
5845 spin_lock_irqsave(&ata_ratelimit_lock
, flags
);
5847 if (time_after(jiffies
, ratelimit_time
)) {
5849 ratelimit_time
= jiffies
+ (HZ
/5);
5853 spin_unlock_irqrestore(&ata_ratelimit_lock
, flags
);
5859 * ata_wait_register - wait until register value changes
5860 * @reg: IO-mapped register
5861 * @mask: Mask to apply to read register value
5862 * @val: Wait condition
5863 * @interval_msec: polling interval in milliseconds
5864 * @timeout_msec: timeout in milliseconds
5866 * Waiting for some bits of register to change is a common
5867 * operation for ATA controllers. This function reads 32bit LE
5868 * IO-mapped register @reg and tests for the following condition.
5870 * (*@reg & mask) != val
5872 * If the condition is met, it returns; otherwise, the process is
5873 * repeated after @interval_msec until timeout.
5876 * Kernel thread context (may sleep)
5879 * The final register value.
5881 u32
ata_wait_register(void __iomem
*reg
, u32 mask
, u32 val
,
5882 unsigned long interval_msec
,
5883 unsigned long timeout_msec
)
5885 unsigned long timeout
;
5888 tmp
= ioread32(reg
);
5890 /* Calculate timeout _after_ the first read to make sure
5891 * preceding writes reach the controller before starting to
5892 * eat away the timeout.
5894 timeout
= jiffies
+ (timeout_msec
* HZ
) / 1000;
5896 while ((tmp
& mask
) == val
&& time_before(jiffies
, timeout
)) {
5897 msleep(interval_msec
);
5898 tmp
= ioread32(reg
);
5905 * libata is essentially a library of internal helper functions for
5906 * low-level ATA host controller drivers. As such, the API/ABI is
5907 * likely to change as new drivers are added and updated.
5908 * Do not depend on ABI/API stability.
5911 EXPORT_SYMBOL_GPL(sata_deb_timing_normal
);
5912 EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug
);
5913 EXPORT_SYMBOL_GPL(sata_deb_timing_long
);
5914 EXPORT_SYMBOL_GPL(ata_std_bios_param
);
5915 EXPORT_SYMBOL_GPL(ata_std_ports
);
5916 EXPORT_SYMBOL_GPL(ata_device_add
);
5917 EXPORT_SYMBOL_GPL(ata_port_detach
);
5918 EXPORT_SYMBOL_GPL(ata_host_set_remove
);
5919 EXPORT_SYMBOL_GPL(ata_sg_init
);
5920 EXPORT_SYMBOL_GPL(ata_sg_init_one
);
5921 EXPORT_SYMBOL_GPL(ata_hsm_move
);
5922 EXPORT_SYMBOL_GPL(ata_qc_complete
);
5923 EXPORT_SYMBOL_GPL(ata_qc_complete_multiple
);
5924 EXPORT_SYMBOL_GPL(ata_qc_issue_prot
);
5925 EXPORT_SYMBOL_GPL(ata_tf_load
);
5926 EXPORT_SYMBOL_GPL(ata_tf_read
);
5927 EXPORT_SYMBOL_GPL(ata_noop_dev_select
);
5928 EXPORT_SYMBOL_GPL(ata_std_dev_select
);
5929 EXPORT_SYMBOL_GPL(ata_tf_to_fis
);
5930 EXPORT_SYMBOL_GPL(ata_tf_from_fis
);
5931 EXPORT_SYMBOL_GPL(ata_check_status
);
5932 EXPORT_SYMBOL_GPL(ata_altstatus
);
5933 EXPORT_SYMBOL_GPL(ata_exec_command
);
5934 EXPORT_SYMBOL_GPL(ata_port_start
);
5935 EXPORT_SYMBOL_GPL(ata_port_stop
);
5936 EXPORT_SYMBOL_GPL(ata_host_stop
);
5937 EXPORT_SYMBOL_GPL(ata_interrupt
);
5938 EXPORT_SYMBOL_GPL(ata_mmio_data_xfer
);
5939 EXPORT_SYMBOL_GPL(ata_pio_data_xfer
);
5940 EXPORT_SYMBOL_GPL(ata_pio_data_xfer_noirq
);
5941 EXPORT_SYMBOL_GPL(ata_qc_prep
);
5942 EXPORT_SYMBOL_GPL(ata_noop_qc_prep
);
5943 EXPORT_SYMBOL_GPL(ata_bmdma_setup
);
5944 EXPORT_SYMBOL_GPL(ata_bmdma_start
);
5945 EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear
);
5946 EXPORT_SYMBOL_GPL(ata_bmdma_status
);
5947 EXPORT_SYMBOL_GPL(ata_bmdma_stop
);
5948 EXPORT_SYMBOL_GPL(ata_bmdma_freeze
);
5949 EXPORT_SYMBOL_GPL(ata_bmdma_thaw
);
5950 EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh
);
5951 EXPORT_SYMBOL_GPL(ata_bmdma_error_handler
);
5952 EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd
);
5953 EXPORT_SYMBOL_GPL(ata_port_probe
);
5954 EXPORT_SYMBOL_GPL(sata_set_spd
);
5955 EXPORT_SYMBOL_GPL(sata_phy_debounce
);
5956 EXPORT_SYMBOL_GPL(sata_phy_resume
);
5957 EXPORT_SYMBOL_GPL(sata_phy_reset
);
5958 EXPORT_SYMBOL_GPL(__sata_phy_reset
);
5959 EXPORT_SYMBOL_GPL(ata_bus_reset
);
5960 EXPORT_SYMBOL_GPL(ata_std_prereset
);
5961 EXPORT_SYMBOL_GPL(ata_std_softreset
);
5962 EXPORT_SYMBOL_GPL(sata_std_hardreset
);
5963 EXPORT_SYMBOL_GPL(ata_std_postreset
);
5964 EXPORT_SYMBOL_GPL(ata_dev_revalidate
);
5965 EXPORT_SYMBOL_GPL(ata_dev_classify
);
5966 EXPORT_SYMBOL_GPL(ata_dev_pair
);
5967 EXPORT_SYMBOL_GPL(ata_port_disable
);
5968 EXPORT_SYMBOL_GPL(ata_ratelimit
);
5969 EXPORT_SYMBOL_GPL(ata_wait_register
);
5970 EXPORT_SYMBOL_GPL(ata_busy_sleep
);
5971 EXPORT_SYMBOL_GPL(ata_port_queue_task
);
5972 EXPORT_SYMBOL_GPL(ata_scsi_ioctl
);
5973 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd
);
5974 EXPORT_SYMBOL_GPL(ata_scsi_slave_config
);
5975 EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy
);
5976 EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth
);
5977 EXPORT_SYMBOL_GPL(ata_scsi_release
);
5978 EXPORT_SYMBOL_GPL(ata_host_intr
);
5979 EXPORT_SYMBOL_GPL(sata_scr_valid
);
5980 EXPORT_SYMBOL_GPL(sata_scr_read
);
5981 EXPORT_SYMBOL_GPL(sata_scr_write
);
5982 EXPORT_SYMBOL_GPL(sata_scr_write_flush
);
5983 EXPORT_SYMBOL_GPL(ata_port_online
);
5984 EXPORT_SYMBOL_GPL(ata_port_offline
);
5985 EXPORT_SYMBOL_GPL(ata_host_set_suspend
);
5986 EXPORT_SYMBOL_GPL(ata_host_set_resume
);
5987 EXPORT_SYMBOL_GPL(ata_id_string
);
5988 EXPORT_SYMBOL_GPL(ata_id_c_string
);
5989 EXPORT_SYMBOL_GPL(ata_scsi_simulate
);
5991 EXPORT_SYMBOL_GPL(ata_pio_need_iordy
);
5992 EXPORT_SYMBOL_GPL(ata_timing_compute
);
5993 EXPORT_SYMBOL_GPL(ata_timing_merge
);
5996 EXPORT_SYMBOL_GPL(pci_test_config_bits
);
5997 EXPORT_SYMBOL_GPL(ata_pci_host_stop
);
5998 EXPORT_SYMBOL_GPL(ata_pci_init_native_mode
);
5999 EXPORT_SYMBOL_GPL(ata_pci_init_one
);
6000 EXPORT_SYMBOL_GPL(ata_pci_remove_one
);
6001 EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend
);
6002 EXPORT_SYMBOL_GPL(ata_pci_device_do_resume
);
6003 EXPORT_SYMBOL_GPL(ata_pci_device_suspend
);
6004 EXPORT_SYMBOL_GPL(ata_pci_device_resume
);
6005 EXPORT_SYMBOL_GPL(ata_pci_default_filter
);
6006 EXPORT_SYMBOL_GPL(ata_pci_clear_simplex
);
6007 #endif /* CONFIG_PCI */
6009 EXPORT_SYMBOL_GPL(ata_scsi_device_suspend
);
6010 EXPORT_SYMBOL_GPL(ata_scsi_device_resume
);
6012 EXPORT_SYMBOL_GPL(ata_eng_timeout
);
6013 EXPORT_SYMBOL_GPL(ata_port_schedule_eh
);
6014 EXPORT_SYMBOL_GPL(ata_port_abort
);
6015 EXPORT_SYMBOL_GPL(ata_port_freeze
);
6016 EXPORT_SYMBOL_GPL(ata_eh_freeze_port
);
6017 EXPORT_SYMBOL_GPL(ata_eh_thaw_port
);
6018 EXPORT_SYMBOL_GPL(ata_eh_qc_complete
);
6019 EXPORT_SYMBOL_GPL(ata_eh_qc_retry
);
6020 EXPORT_SYMBOL_GPL(ata_do_eh
);