initial commit with v2.6.9
[linux-2.6.9-moxart.git] / drivers / scsi / libata-core.c
blob8325e09125f094c76ec7990f9455c77fb45cc5fe
1 /*
2 libata-core.c - helper library for ATA
4 Copyright 2003-2004 Red Hat, Inc. All rights reserved.
5 Copyright 2003-2004 Jeff Garzik
7 The contents of this file are subject to the Open
8 Software License version 1.1 that can be found at
9 http://www.opensource.org/licenses/osl-1.1.txt and is included herein
10 by reference.
12 Alternatively, the contents of this file may be used under the terms
13 of the GNU General Public License version 2 (the "GPL") as distributed
14 in the kernel source COPYING file, in which case the provisions of
15 the GPL are applicable instead of the above. If you wish to allow
16 the use of your version of this file only under the terms of the
17 GPL and not to allow others to use your version of this file under
18 the OSL, indicate your decision by deleting the provisions above and
19 replace them with the notice and other provisions required by the GPL.
20 If you do not delete the provisions above, a recipient may use your
21 version of this file under either the OSL or the GPL.
25 #include <linux/config.h>
26 #include <linux/kernel.h>
27 #include <linux/module.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/list.h>
31 #include <linux/highmem.h>
32 #include <linux/spinlock.h>
33 #include <linux/blkdev.h>
34 #include <linux/delay.h>
35 #include <linux/timer.h>
36 #include <linux/interrupt.h>
37 #include <linux/completion.h>
38 #include <linux/suspend.h>
39 #include <linux/workqueue.h>
40 #include <scsi/scsi.h>
41 #include "scsi.h"
42 #include <scsi/scsi_host.h>
43 #include <linux/libata.h>
44 #include <asm/io.h>
45 #include <asm/semaphore.h>
46 #include <asm/byteorder.h>
48 #include "libata.h"
50 static unsigned int ata_busy_sleep (struct ata_port *ap,
51 unsigned long tmout_pat,
52 unsigned long tmout);
53 static void ata_set_mode(struct ata_port *ap);
54 static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev);
55 static unsigned int ata_get_mode_mask(struct ata_port *ap, int shift);
56 static int fgb(u32 bitmap);
57 static int ata_choose_xfer_mode(struct ata_port *ap,
58 u8 *xfer_mode_out,
59 unsigned int *xfer_shift_out);
60 static int ata_qc_complete_noop(struct ata_queued_cmd *qc, u8 drv_stat);
62 static unsigned int ata_unique_id = 1;
63 static struct workqueue_struct *ata_wq;
65 MODULE_AUTHOR("Jeff Garzik");
66 MODULE_DESCRIPTION("Library module for ATA devices");
67 MODULE_LICENSE("GPL");
69 /**
70 * ata_tf_load - send taskfile registers to host controller
71 * @ap: Port to which output is sent
72 * @tf: ATA taskfile register set
74 * Outputs ATA taskfile to standard ATA host controller.
76 * LOCKING:
77 * Inherited from caller.
80 static void ata_tf_load_pio(struct ata_port *ap, struct ata_taskfile *tf)
82 struct ata_ioports *ioaddr = &ap->ioaddr;
83 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
85 if (tf->ctl != ap->last_ctl) {
86 outb(tf->ctl, ioaddr->ctl_addr);
87 ap->last_ctl = tf->ctl;
88 ata_wait_idle(ap);
91 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
92 outb(tf->hob_feature, ioaddr->feature_addr);
93 outb(tf->hob_nsect, ioaddr->nsect_addr);
94 outb(tf->hob_lbal, ioaddr->lbal_addr);
95 outb(tf->hob_lbam, ioaddr->lbam_addr);
96 outb(tf->hob_lbah, ioaddr->lbah_addr);
97 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
98 tf->hob_feature,
99 tf->hob_nsect,
100 tf->hob_lbal,
101 tf->hob_lbam,
102 tf->hob_lbah);
105 if (is_addr) {
106 outb(tf->feature, ioaddr->feature_addr);
107 outb(tf->nsect, ioaddr->nsect_addr);
108 outb(tf->lbal, ioaddr->lbal_addr);
109 outb(tf->lbam, ioaddr->lbam_addr);
110 outb(tf->lbah, ioaddr->lbah_addr);
111 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
112 tf->feature,
113 tf->nsect,
114 tf->lbal,
115 tf->lbam,
116 tf->lbah);
119 if (tf->flags & ATA_TFLAG_DEVICE) {
120 outb(tf->device, ioaddr->device_addr);
121 VPRINTK("device 0x%X\n", tf->device);
124 ata_wait_idle(ap);
128 * ata_tf_load_mmio - send taskfile registers to host controller
129 * @ap: Port to which output is sent
130 * @tf: ATA taskfile register set
132 * Outputs ATA taskfile to standard ATA host controller using MMIO.
134 * LOCKING:
135 * Inherited from caller.
138 static void ata_tf_load_mmio(struct ata_port *ap, struct ata_taskfile *tf)
140 struct ata_ioports *ioaddr = &ap->ioaddr;
141 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
143 if (tf->ctl != ap->last_ctl) {
144 writeb(tf->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
145 ap->last_ctl = tf->ctl;
146 ata_wait_idle(ap);
149 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
150 writeb(tf->hob_feature, (void __iomem *) ioaddr->feature_addr);
151 writeb(tf->hob_nsect, (void __iomem *) ioaddr->nsect_addr);
152 writeb(tf->hob_lbal, (void __iomem *) ioaddr->lbal_addr);
153 writeb(tf->hob_lbam, (void __iomem *) ioaddr->lbam_addr);
154 writeb(tf->hob_lbah, (void __iomem *) ioaddr->lbah_addr);
155 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
156 tf->hob_feature,
157 tf->hob_nsect,
158 tf->hob_lbal,
159 tf->hob_lbam,
160 tf->hob_lbah);
163 if (is_addr) {
164 writeb(tf->feature, (void __iomem *) ioaddr->feature_addr);
165 writeb(tf->nsect, (void __iomem *) ioaddr->nsect_addr);
166 writeb(tf->lbal, (void __iomem *) ioaddr->lbal_addr);
167 writeb(tf->lbam, (void __iomem *) ioaddr->lbam_addr);
168 writeb(tf->lbah, (void __iomem *) ioaddr->lbah_addr);
169 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
170 tf->feature,
171 tf->nsect,
172 tf->lbal,
173 tf->lbam,
174 tf->lbah);
177 if (tf->flags & ATA_TFLAG_DEVICE) {
178 writeb(tf->device, (void __iomem *) ioaddr->device_addr);
179 VPRINTK("device 0x%X\n", tf->device);
182 ata_wait_idle(ap);
185 void ata_tf_load(struct ata_port *ap, struct ata_taskfile *tf)
187 if (ap->flags & ATA_FLAG_MMIO)
188 ata_tf_load_mmio(ap, tf);
189 else
190 ata_tf_load_pio(ap, tf);
194 * ata_exec_command - issue ATA command to host controller
195 * @ap: port to which command is being issued
196 * @tf: ATA taskfile register set
198 * Issues PIO/MMIO write to ATA command register, with proper
199 * synchronization with interrupt handler / other threads.
201 * LOCKING:
202 * spin_lock_irqsave(host_set lock)
205 static void ata_exec_command_pio(struct ata_port *ap, struct ata_taskfile *tf)
207 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
209 outb(tf->command, ap->ioaddr.command_addr);
210 ata_pause(ap);
215 * ata_exec_command_mmio - issue ATA command to host controller
216 * @ap: port to which command is being issued
217 * @tf: ATA taskfile register set
219 * Issues MMIO write to ATA command register, with proper
220 * synchronization with interrupt handler / other threads.
222 * LOCKING:
223 * spin_lock_irqsave(host_set lock)
226 static void ata_exec_command_mmio(struct ata_port *ap, struct ata_taskfile *tf)
228 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
230 writeb(tf->command, (void __iomem *) ap->ioaddr.command_addr);
231 ata_pause(ap);
234 void ata_exec_command(struct ata_port *ap, struct ata_taskfile *tf)
236 if (ap->flags & ATA_FLAG_MMIO)
237 ata_exec_command_mmio(ap, tf);
238 else
239 ata_exec_command_pio(ap, tf);
243 * ata_exec - issue ATA command to host controller
244 * @ap: port to which command is being issued
245 * @tf: ATA taskfile register set
247 * Issues PIO/MMIO write to ATA command register, with proper
248 * synchronization with interrupt handler / other threads.
250 * LOCKING:
251 * Obtains host_set lock.
254 static inline void ata_exec(struct ata_port *ap, struct ata_taskfile *tf)
256 unsigned long flags;
258 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
259 spin_lock_irqsave(&ap->host_set->lock, flags);
260 ap->ops->exec_command(ap, tf);
261 spin_unlock_irqrestore(&ap->host_set->lock, flags);
265 * ata_tf_to_host - issue ATA taskfile to host controller
266 * @ap: port to which command is being issued
267 * @tf: ATA taskfile register set
269 * Issues ATA taskfile register set to ATA host controller,
270 * with proper synchronization with interrupt handler and
271 * other threads.
273 * LOCKING:
274 * Obtains host_set lock.
277 static void ata_tf_to_host(struct ata_port *ap, struct ata_taskfile *tf)
279 ap->ops->tf_load(ap, tf);
281 ata_exec(ap, tf);
285 * ata_tf_to_host_nolock - issue ATA taskfile to host controller
286 * @ap: port to which command is being issued
287 * @tf: ATA taskfile register set
289 * Issues ATA taskfile register set to ATA host controller,
290 * with proper synchronization with interrupt handler and
291 * other threads.
293 * LOCKING:
294 * spin_lock_irqsave(host_set lock)
297 void ata_tf_to_host_nolock(struct ata_port *ap, struct ata_taskfile *tf)
299 ap->ops->tf_load(ap, tf);
300 ap->ops->exec_command(ap, tf);
304 * ata_tf_read - input device's ATA taskfile shadow registers
305 * @ap: Port from which input is read
306 * @tf: ATA taskfile register set for storing input
308 * Reads ATA taskfile registers for currently-selected device
309 * into @tf.
311 * LOCKING:
312 * Inherited from caller.
315 static void ata_tf_read_pio(struct ata_port *ap, struct ata_taskfile *tf)
317 struct ata_ioports *ioaddr = &ap->ioaddr;
319 tf->nsect = inb(ioaddr->nsect_addr);
320 tf->lbal = inb(ioaddr->lbal_addr);
321 tf->lbam = inb(ioaddr->lbam_addr);
322 tf->lbah = inb(ioaddr->lbah_addr);
323 tf->device = inb(ioaddr->device_addr);
325 if (tf->flags & ATA_TFLAG_LBA48) {
326 outb(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
327 tf->hob_feature = inb(ioaddr->error_addr);
328 tf->hob_nsect = inb(ioaddr->nsect_addr);
329 tf->hob_lbal = inb(ioaddr->lbal_addr);
330 tf->hob_lbam = inb(ioaddr->lbam_addr);
331 tf->hob_lbah = inb(ioaddr->lbah_addr);
336 * ata_tf_read_mmio - input device's ATA taskfile shadow registers
337 * @ap: Port from which input is read
338 * @tf: ATA taskfile register set for storing input
340 * Reads ATA taskfile registers for currently-selected device
341 * into @tf via MMIO.
343 * LOCKING:
344 * Inherited from caller.
347 static void ata_tf_read_mmio(struct ata_port *ap, struct ata_taskfile *tf)
349 struct ata_ioports *ioaddr = &ap->ioaddr;
351 tf->nsect = readb((void __iomem *)ioaddr->nsect_addr);
352 tf->lbal = readb((void __iomem *)ioaddr->lbal_addr);
353 tf->lbam = readb((void __iomem *)ioaddr->lbam_addr);
354 tf->lbah = readb((void __iomem *)ioaddr->lbah_addr);
355 tf->device = readb((void __iomem *)ioaddr->device_addr);
357 if (tf->flags & ATA_TFLAG_LBA48) {
358 writeb(tf->ctl | ATA_HOB, (void __iomem *) ap->ioaddr.ctl_addr);
359 tf->hob_feature = readb((void __iomem *)ioaddr->error_addr);
360 tf->hob_nsect = readb((void __iomem *)ioaddr->nsect_addr);
361 tf->hob_lbal = readb((void __iomem *)ioaddr->lbal_addr);
362 tf->hob_lbam = readb((void __iomem *)ioaddr->lbam_addr);
363 tf->hob_lbah = readb((void __iomem *)ioaddr->lbah_addr);
367 void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
369 if (ap->flags & ATA_FLAG_MMIO)
370 ata_tf_read_mmio(ap, tf);
371 else
372 ata_tf_read_pio(ap, tf);
376 * ata_check_status - Read device status reg & clear interrupt
377 * @ap: port where the device is
379 * Reads ATA taskfile status register for currently-selected device
380 * and return it's value. This also clears pending interrupts
381 * from this device
383 * LOCKING:
384 * Inherited from caller.
386 static u8 ata_check_status_pio(struct ata_port *ap)
388 return inb(ap->ioaddr.status_addr);
392 * ata_check_status_mmio - Read device status reg & clear interrupt
393 * @ap: port where the device is
395 * Reads ATA taskfile status register for currently-selected device
396 * via MMIO and return it's value. This also clears pending interrupts
397 * from this device
399 * LOCKING:
400 * Inherited from caller.
402 static u8 ata_check_status_mmio(struct ata_port *ap)
404 return readb((void __iomem *) ap->ioaddr.status_addr);
407 u8 ata_check_status(struct ata_port *ap)
409 if (ap->flags & ATA_FLAG_MMIO)
410 return ata_check_status_mmio(ap);
411 return ata_check_status_pio(ap);
415 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
416 * @tf: Taskfile to convert
417 * @fis: Buffer into which data will output
418 * @pmp: Port multiplier port
420 * Converts a standard ATA taskfile to a Serial ATA
421 * FIS structure (Register - Host to Device).
423 * LOCKING:
424 * Inherited from caller.
427 void ata_tf_to_fis(struct ata_taskfile *tf, u8 *fis, u8 pmp)
429 fis[0] = 0x27; /* Register - Host to Device FIS */
430 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
431 bit 7 indicates Command FIS */
432 fis[2] = tf->command;
433 fis[3] = tf->feature;
435 fis[4] = tf->lbal;
436 fis[5] = tf->lbam;
437 fis[6] = tf->lbah;
438 fis[7] = tf->device;
440 fis[8] = tf->hob_lbal;
441 fis[9] = tf->hob_lbam;
442 fis[10] = tf->hob_lbah;
443 fis[11] = tf->hob_feature;
445 fis[12] = tf->nsect;
446 fis[13] = tf->hob_nsect;
447 fis[14] = 0;
448 fis[15] = tf->ctl;
450 fis[16] = 0;
451 fis[17] = 0;
452 fis[18] = 0;
453 fis[19] = 0;
457 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
458 * @fis: Buffer from which data will be input
459 * @tf: Taskfile to output
461 * Converts a standard ATA taskfile to a Serial ATA
462 * FIS structure (Register - Host to Device).
464 * LOCKING:
465 * Inherited from caller.
468 void ata_tf_from_fis(u8 *fis, struct ata_taskfile *tf)
470 tf->command = fis[2]; /* status */
471 tf->feature = fis[3]; /* error */
473 tf->lbal = fis[4];
474 tf->lbam = fis[5];
475 tf->lbah = fis[6];
476 tf->device = fis[7];
478 tf->hob_lbal = fis[8];
479 tf->hob_lbam = fis[9];
480 tf->hob_lbah = fis[10];
482 tf->nsect = fis[12];
483 tf->hob_nsect = fis[13];
487 * ata_prot_to_cmd - determine which read/write opcodes to use
488 * @protocol: ATA_PROT_xxx taskfile protocol
489 * @lba48: true is lba48 is present
491 * Given necessary input, determine which read/write commands
492 * to use to transfer data.
494 * LOCKING:
495 * None.
497 static int ata_prot_to_cmd(int protocol, int lba48)
499 int rcmd = 0, wcmd = 0;
501 switch (protocol) {
502 case ATA_PROT_PIO:
503 if (lba48) {
504 rcmd = ATA_CMD_PIO_READ_EXT;
505 wcmd = ATA_CMD_PIO_WRITE_EXT;
506 } else {
507 rcmd = ATA_CMD_PIO_READ;
508 wcmd = ATA_CMD_PIO_WRITE;
510 break;
512 case ATA_PROT_DMA:
513 if (lba48) {
514 rcmd = ATA_CMD_READ_EXT;
515 wcmd = ATA_CMD_WRITE_EXT;
516 } else {
517 rcmd = ATA_CMD_READ;
518 wcmd = ATA_CMD_WRITE;
520 break;
522 default:
523 return -1;
526 return rcmd | (wcmd << 8);
530 * ata_dev_set_protocol - set taskfile protocol and r/w commands
531 * @dev: device to examine and configure
533 * Examine the device configuration, after we have
534 * read the identify-device page and configured the
535 * data transfer mode. Set internal state related to
536 * the ATA taskfile protocol (pio, pio mult, dma, etc.)
537 * and calculate the proper read/write commands to use.
539 * LOCKING:
540 * caller.
542 static void ata_dev_set_protocol(struct ata_device *dev)
544 int pio = (dev->flags & ATA_DFLAG_PIO);
545 int lba48 = (dev->flags & ATA_DFLAG_LBA48);
546 int proto, cmd;
548 if (pio)
549 proto = dev->xfer_protocol = ATA_PROT_PIO;
550 else
551 proto = dev->xfer_protocol = ATA_PROT_DMA;
553 cmd = ata_prot_to_cmd(proto, lba48);
554 if (cmd < 0)
555 BUG();
557 dev->read_cmd = cmd & 0xff;
558 dev->write_cmd = (cmd >> 8) & 0xff;
561 static const char * xfer_mode_str[] = {
562 "UDMA/16",
563 "UDMA/25",
564 "UDMA/33",
565 "UDMA/44",
566 "UDMA/66",
567 "UDMA/100",
568 "UDMA/133",
569 "UDMA7",
570 "MWDMA0",
571 "MWDMA1",
572 "MWDMA2",
573 "PIO0",
574 "PIO1",
575 "PIO2",
576 "PIO3",
577 "PIO4",
581 * ata_udma_string - convert UDMA bit offset to string
582 * @udma_mask: mask of bits supported; only highest bit counts.
584 * Determine string which represents the highest speed
585 * (highest bit in @udma_mask).
587 * LOCKING:
588 * None.
590 * RETURNS:
591 * Constant C string representing highest speed listed in
592 * @udma_mask, or the constant C string "<n/a>".
595 static const char *ata_mode_string(unsigned int mask)
597 int i;
599 for (i = 7; i >= 0; i--)
600 if (mask & (1 << i))
601 goto out;
602 for (i = ATA_SHIFT_MWDMA + 2; i >= ATA_SHIFT_MWDMA; i--)
603 if (mask & (1 << i))
604 goto out;
605 for (i = ATA_SHIFT_PIO + 4; i >= ATA_SHIFT_PIO; i--)
606 if (mask & (1 << i))
607 goto out;
609 return "<n/a>";
611 out:
612 return xfer_mode_str[i];
616 * ata_pio_devchk - PATA device presence detection
617 * @ap: ATA channel to examine
618 * @device: Device to examine (starting at zero)
620 * This technique was originally described in
621 * Hale Landis's ATADRVR (www.ata-atapi.com), and
622 * later found its way into the ATA/ATAPI spec.
624 * Write a pattern to the ATA shadow registers,
625 * and if a device is present, it will respond by
626 * correctly storing and echoing back the
627 * ATA shadow register contents.
629 * LOCKING:
630 * caller.
633 static unsigned int ata_pio_devchk(struct ata_port *ap,
634 unsigned int device)
636 struct ata_ioports *ioaddr = &ap->ioaddr;
637 u8 nsect, lbal;
639 ap->ops->dev_select(ap, device);
641 outb(0x55, ioaddr->nsect_addr);
642 outb(0xaa, ioaddr->lbal_addr);
644 outb(0xaa, ioaddr->nsect_addr);
645 outb(0x55, ioaddr->lbal_addr);
647 outb(0x55, ioaddr->nsect_addr);
648 outb(0xaa, ioaddr->lbal_addr);
650 nsect = inb(ioaddr->nsect_addr);
651 lbal = inb(ioaddr->lbal_addr);
653 if ((nsect == 0x55) && (lbal == 0xaa))
654 return 1; /* we found a device */
656 return 0; /* nothing found */
660 * ata_mmio_devchk - PATA device presence detection
661 * @ap: ATA channel to examine
662 * @device: Device to examine (starting at zero)
664 * This technique was originally described in
665 * Hale Landis's ATADRVR (www.ata-atapi.com), and
666 * later found its way into the ATA/ATAPI spec.
668 * Write a pattern to the ATA shadow registers,
669 * and if a device is present, it will respond by
670 * correctly storing and echoing back the
671 * ATA shadow register contents.
673 * LOCKING:
674 * caller.
677 static unsigned int ata_mmio_devchk(struct ata_port *ap,
678 unsigned int device)
680 struct ata_ioports *ioaddr = &ap->ioaddr;
681 u8 nsect, lbal;
683 ap->ops->dev_select(ap, device);
685 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
686 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
688 writeb(0xaa, (void __iomem *) ioaddr->nsect_addr);
689 writeb(0x55, (void __iomem *) ioaddr->lbal_addr);
691 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
692 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
694 nsect = readb((void __iomem *) ioaddr->nsect_addr);
695 lbal = readb((void __iomem *) ioaddr->lbal_addr);
697 if ((nsect == 0x55) && (lbal == 0xaa))
698 return 1; /* we found a device */
700 return 0; /* nothing found */
704 * ata_devchk - PATA device presence detection
705 * @ap: ATA channel to examine
706 * @device: Device to examine (starting at zero)
708 * Dispatch ATA device presence detection, depending
709 * on whether we are using PIO or MMIO to talk to the
710 * ATA shadow registers.
712 * LOCKING:
713 * caller.
716 static unsigned int ata_devchk(struct ata_port *ap,
717 unsigned int device)
719 if (ap->flags & ATA_FLAG_MMIO)
720 return ata_mmio_devchk(ap, device);
721 return ata_pio_devchk(ap, device);
725 * ata_dev_classify - determine device type based on ATA-spec signature
726 * @tf: ATA taskfile register set for device to be identified
728 * Determine from taskfile register contents whether a device is
729 * ATA or ATAPI, as per "Signature and persistence" section
730 * of ATA/PI spec (volume 1, sect 5.14).
732 * LOCKING:
733 * None.
735 * RETURNS:
736 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
737 * the event of failure.
740 unsigned int ata_dev_classify(struct ata_taskfile *tf)
742 /* Apple's open source Darwin code hints that some devices only
743 * put a proper signature into the LBA mid/high registers,
744 * So, we only check those. It's sufficient for uniqueness.
747 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
748 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
749 DPRINTK("found ATA device by sig\n");
750 return ATA_DEV_ATA;
753 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
754 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
755 DPRINTK("found ATAPI device by sig\n");
756 return ATA_DEV_ATAPI;
759 DPRINTK("unknown device\n");
760 return ATA_DEV_UNKNOWN;
764 * ata_dev_try_classify - Parse returned ATA device signature
765 * @ap: ATA channel to examine
766 * @device: Device to examine (starting at zero)
768 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
769 * an ATA/ATAPI-defined set of values is placed in the ATA
770 * shadow registers, indicating the results of device detection
771 * and diagnostics.
773 * Select the ATA device, and read the values from the ATA shadow
774 * registers. Then parse according to the Error register value,
775 * and the spec-defined values examined by ata_dev_classify().
777 * LOCKING:
778 * caller.
781 static u8 ata_dev_try_classify(struct ata_port *ap, unsigned int device)
783 struct ata_device *dev = &ap->device[device];
784 struct ata_taskfile tf;
785 unsigned int class;
786 u8 err;
788 ap->ops->dev_select(ap, device);
790 memset(&tf, 0, sizeof(tf));
792 err = ata_chk_err(ap);
793 ap->ops->tf_read(ap, &tf);
795 dev->class = ATA_DEV_NONE;
797 /* see if device passed diags */
798 if (err == 1)
799 /* do nothing */ ;
800 else if ((device == 0) && (err == 0x81))
801 /* do nothing */ ;
802 else
803 return err;
805 /* determine if device if ATA or ATAPI */
806 class = ata_dev_classify(&tf);
807 if (class == ATA_DEV_UNKNOWN)
808 return err;
809 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
810 return err;
812 dev->class = class;
814 return err;
818 * ata_dev_id_string - Convert IDENTIFY DEVICE page into string
819 * @dev: Device whose IDENTIFY DEVICE results we will examine
820 * @s: string into which data is output
821 * @ofs: offset into identify device page
822 * @len: length of string to return. must be an even number.
824 * The strings in the IDENTIFY DEVICE page are broken up into
825 * 16-bit chunks. Run through the string, and output each
826 * 8-bit chunk linearly, regardless of platform.
828 * LOCKING:
829 * caller.
832 void ata_dev_id_string(struct ata_device *dev, unsigned char *s,
833 unsigned int ofs, unsigned int len)
835 unsigned int c;
837 while (len > 0) {
838 c = dev->id[ofs] >> 8;
839 *s = c;
840 s++;
842 c = dev->id[ofs] & 0xff;
843 *s = c;
844 s++;
846 ofs++;
847 len -= 2;
851 void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
856 * ata_std_dev_select - Select device 0/1 on ATA bus
857 * @ap: ATA channel to manipulate
858 * @device: ATA device (numbered from zero) to select
860 * Use the method defined in the ATA specification to
861 * make either device 0, or device 1, active on the
862 * ATA channel.
864 * LOCKING:
865 * caller.
868 void ata_std_dev_select (struct ata_port *ap, unsigned int device)
870 u8 tmp;
872 if (device == 0)
873 tmp = ATA_DEVICE_OBS;
874 else
875 tmp = ATA_DEVICE_OBS | ATA_DEV1;
877 if (ap->flags & ATA_FLAG_MMIO) {
878 writeb(tmp, (void __iomem *) ap->ioaddr.device_addr);
879 } else {
880 outb(tmp, ap->ioaddr.device_addr);
882 ata_pause(ap); /* needed; also flushes, for mmio */
886 * ata_dev_select - Select device 0/1 on ATA bus
887 * @ap: ATA channel to manipulate
888 * @device: ATA device (numbered from zero) to select
889 * @wait: non-zero to wait for Status register BSY bit to clear
890 * @can_sleep: non-zero if context allows sleeping
892 * Use the method defined in the ATA specification to
893 * make either device 0, or device 1, active on the
894 * ATA channel.
896 * This is a high-level version of ata_std_dev_select(),
897 * which additionally provides the services of inserting
898 * the proper pauses and status polling, where needed.
900 * LOCKING:
901 * caller.
904 void ata_dev_select(struct ata_port *ap, unsigned int device,
905 unsigned int wait, unsigned int can_sleep)
907 VPRINTK("ENTER, ata%u: device %u, wait %u\n",
908 ap->id, device, wait);
910 if (wait)
911 ata_wait_idle(ap);
913 ap->ops->dev_select(ap, device);
915 if (wait) {
916 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
917 msleep(150);
918 ata_wait_idle(ap);
923 * ata_dump_id - IDENTIFY DEVICE info debugging output
924 * @dev: Device whose IDENTIFY DEVICE page we will dump
926 * Dump selected 16-bit words from a detected device's
927 * IDENTIFY PAGE page.
929 * LOCKING:
930 * caller.
933 static inline void ata_dump_id(struct ata_device *dev)
935 DPRINTK("49==0x%04x "
936 "53==0x%04x "
937 "63==0x%04x "
938 "64==0x%04x "
939 "75==0x%04x \n",
940 dev->id[49],
941 dev->id[53],
942 dev->id[63],
943 dev->id[64],
944 dev->id[75]);
945 DPRINTK("80==0x%04x "
946 "81==0x%04x "
947 "82==0x%04x "
948 "83==0x%04x "
949 "84==0x%04x \n",
950 dev->id[80],
951 dev->id[81],
952 dev->id[82],
953 dev->id[83],
954 dev->id[84]);
955 DPRINTK("88==0x%04x "
956 "93==0x%04x\n",
957 dev->id[88],
958 dev->id[93]);
962 * ata_dev_identify - obtain IDENTIFY x DEVICE page
963 * @ap: port on which device we wish to probe resides
964 * @device: device bus address, starting at zero
966 * Following bus reset, we issue the IDENTIFY [PACKET] DEVICE
967 * command, and read back the 512-byte device information page.
968 * The device information page is fed to us via the standard
969 * PIO-IN protocol, but we hand-code it here. (TODO: investigate
970 * using standard PIO-IN paths)
972 * After reading the device information page, we use several
973 * bits of information from it to initialize data structures
974 * that will be used during the lifetime of the ata_device.
975 * Other data from the info page is used to disqualify certain
976 * older ATA devices we do not wish to support.
978 * LOCKING:
979 * Inherited from caller. Some functions called by this function
980 * obtain the host_set lock.
983 static void ata_dev_identify(struct ata_port *ap, unsigned int device)
985 struct ata_device *dev = &ap->device[device];
986 unsigned int i;
987 u16 tmp;
988 unsigned long xfer_modes;
989 u8 status;
990 unsigned int using_edd;
991 DECLARE_COMPLETION(wait);
992 struct ata_queued_cmd *qc;
993 unsigned long flags;
994 int rc;
996 if (!ata_dev_present(dev)) {
997 DPRINTK("ENTER/EXIT (host %u, dev %u) -- nodev\n",
998 ap->id, device);
999 return;
1002 if (ap->flags & (ATA_FLAG_SRST | ATA_FLAG_SATA_RESET))
1003 using_edd = 0;
1004 else
1005 using_edd = 1;
1007 DPRINTK("ENTER, host %u, dev %u\n", ap->id, device);
1009 assert (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ATAPI ||
1010 dev->class == ATA_DEV_NONE);
1012 ata_dev_select(ap, device, 1, 1); /* select device 0/1 */
1014 qc = ata_qc_new_init(ap, dev);
1015 BUG_ON(qc == NULL);
1017 ata_sg_init_one(qc, dev->id, sizeof(dev->id));
1018 qc->pci_dma_dir = PCI_DMA_FROMDEVICE;
1019 qc->tf.protocol = ATA_PROT_PIO;
1020 qc->nsect = 1;
1022 retry:
1023 if (dev->class == ATA_DEV_ATA) {
1024 qc->tf.command = ATA_CMD_ID_ATA;
1025 DPRINTK("do ATA identify\n");
1026 } else {
1027 qc->tf.command = ATA_CMD_ID_ATAPI;
1028 DPRINTK("do ATAPI identify\n");
1031 qc->waiting = &wait;
1032 qc->complete_fn = ata_qc_complete_noop;
1034 spin_lock_irqsave(&ap->host_set->lock, flags);
1035 rc = ata_qc_issue(qc);
1036 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1038 if (rc)
1039 goto err_out;
1040 else
1041 wait_for_completion(&wait);
1043 status = ata_chk_status(ap);
1044 if (status & ATA_ERR) {
1046 * arg! EDD works for all test cases, but seems to return
1047 * the ATA signature for some ATAPI devices. Until the
1048 * reason for this is found and fixed, we fix up the mess
1049 * here. If IDENTIFY DEVICE returns command aborted
1050 * (as ATAPI devices do), then we issue an
1051 * IDENTIFY PACKET DEVICE.
1053 * ATA software reset (SRST, the default) does not appear
1054 * to have this problem.
1056 if ((using_edd) && (qc->tf.command == ATA_CMD_ID_ATA)) {
1057 u8 err = ata_chk_err(ap);
1058 if (err & ATA_ABORTED) {
1059 dev->class = ATA_DEV_ATAPI;
1060 qc->cursg = 0;
1061 qc->cursg_ofs = 0;
1062 qc->cursect = 0;
1063 qc->nsect = 1;
1064 goto retry;
1067 goto err_out;
1070 swap_buf_le16(dev->id, ATA_ID_WORDS);
1072 /* print device capabilities */
1073 printk(KERN_DEBUG "ata%u: dev %u cfg "
1074 "49:%04x 82:%04x 83:%04x 84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n",
1075 ap->id, device, dev->id[49],
1076 dev->id[82], dev->id[83], dev->id[84],
1077 dev->id[85], dev->id[86], dev->id[87],
1078 dev->id[88]);
1081 * common ATA, ATAPI feature tests
1084 /* we require LBA and DMA support (bits 8 & 9 of word 49) */
1085 if (!ata_id_has_dma(dev) || !ata_id_has_lba(dev)) {
1086 printk(KERN_DEBUG "ata%u: no dma/lba\n", ap->id);
1087 goto err_out_nosup;
1090 /* quick-n-dirty find max transfer mode; for printk only */
1091 xfer_modes = dev->id[ATA_ID_UDMA_MODES];
1092 if (!xfer_modes)
1093 xfer_modes = (dev->id[ATA_ID_MWDMA_MODES]) << ATA_SHIFT_MWDMA;
1094 if (!xfer_modes) {
1095 xfer_modes = (dev->id[ATA_ID_PIO_MODES]) << (ATA_SHIFT_PIO + 3);
1096 xfer_modes |= (0x7 << ATA_SHIFT_PIO);
1099 ata_dump_id(dev);
1101 /* ATA-specific feature tests */
1102 if (dev->class == ATA_DEV_ATA) {
1103 if (!ata_id_is_ata(dev)) /* sanity check */
1104 goto err_out_nosup;
1106 tmp = dev->id[ATA_ID_MAJOR_VER];
1107 for (i = 14; i >= 1; i--)
1108 if (tmp & (1 << i))
1109 break;
1111 /* we require at least ATA-3 */
1112 if (i < 3) {
1113 printk(KERN_DEBUG "ata%u: no ATA-3\n", ap->id);
1114 goto err_out_nosup;
1117 if (ata_id_has_lba48(dev)) {
1118 dev->flags |= ATA_DFLAG_LBA48;
1119 dev->n_sectors = ata_id_u64(dev, 100);
1120 } else {
1121 dev->n_sectors = ata_id_u32(dev, 60);
1124 ap->host->max_cmd_len = 16;
1126 /* print device info to dmesg */
1127 printk(KERN_INFO "ata%u: dev %u ATA, max %s, %Lu sectors:%s\n",
1128 ap->id, device,
1129 ata_mode_string(xfer_modes),
1130 (unsigned long long)dev->n_sectors,
1131 dev->flags & ATA_DFLAG_LBA48 ? " lba48" : "");
1134 /* ATAPI-specific feature tests */
1135 else {
1136 if (ata_id_is_ata(dev)) /* sanity check */
1137 goto err_out_nosup;
1139 rc = atapi_cdb_len(dev->id);
1140 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
1141 printk(KERN_WARNING "ata%u: unsupported CDB len\n", ap->id);
1142 goto err_out_nosup;
1144 ap->cdb_len = (unsigned int) rc;
1145 ap->host->max_cmd_len = (unsigned char) ap->cdb_len;
1147 /* print device info to dmesg */
1148 printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n",
1149 ap->id, device,
1150 ata_mode_string(xfer_modes));
1153 DPRINTK("EXIT, drv_stat = 0x%x\n", ata_chk_status(ap));
1154 return;
1156 err_out_nosup:
1157 printk(KERN_WARNING "ata%u: dev %u not supported, ignoring\n",
1158 ap->id, device);
1159 err_out:
1160 ata_irq_on(ap); /* re-enable interrupts */
1161 dev->class++; /* converts ATA_DEV_xxx into ATA_DEV_xxx_UNSUP */
1162 DPRINTK("EXIT, err\n");
1166 * ata_bus_probe - Reset and probe ATA bus
1167 * @ap: Bus to probe
1169 * LOCKING:
1171 * RETURNS:
1172 * Zero on success, non-zero on error.
1175 static int ata_bus_probe(struct ata_port *ap)
1177 unsigned int i, found = 0;
1179 ap->ops->phy_reset(ap);
1180 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1181 goto err_out;
1183 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1184 ata_dev_identify(ap, i);
1185 if (ata_dev_present(&ap->device[i])) {
1186 found = 1;
1187 if (ap->ops->dev_config)
1188 ap->ops->dev_config(ap, &ap->device[i]);
1192 if ((!found) || (ap->flags & ATA_FLAG_PORT_DISABLED))
1193 goto err_out_disable;
1195 ata_set_mode(ap);
1196 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1197 goto err_out_disable;
1199 return 0;
1201 err_out_disable:
1202 ap->ops->port_disable(ap);
1203 err_out:
1204 return -1;
1208 * ata_port_probe -
1209 * @ap:
1211 * LOCKING:
1214 void ata_port_probe(struct ata_port *ap)
1216 ap->flags &= ~ATA_FLAG_PORT_DISABLED;
1220 * __sata_phy_reset -
1221 * @ap:
1223 * LOCKING:
1226 void __sata_phy_reset(struct ata_port *ap)
1228 u32 sstatus;
1229 unsigned long timeout = jiffies + (HZ * 5);
1231 if (ap->flags & ATA_FLAG_SATA_RESET) {
1232 scr_write(ap, SCR_CONTROL, 0x301); /* issue phy wake/reset */
1233 scr_read(ap, SCR_STATUS); /* dummy read; flush */
1234 udelay(400); /* FIXME: a guess */
1236 scr_write(ap, SCR_CONTROL, 0x300); /* issue phy wake/clear reset */
1238 /* wait for phy to become ready, if necessary */
1239 do {
1240 msleep(200);
1241 sstatus = scr_read(ap, SCR_STATUS);
1242 if ((sstatus & 0xf) != 1)
1243 break;
1244 } while (time_before(jiffies, timeout));
1246 /* TODO: phy layer with polling, timeouts, etc. */
1247 if (sata_dev_present(ap))
1248 ata_port_probe(ap);
1249 else {
1250 sstatus = scr_read(ap, SCR_STATUS);
1251 printk(KERN_INFO "ata%u: no device found (phy stat %08x)\n",
1252 ap->id, sstatus);
1253 ata_port_disable(ap);
1256 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1257 return;
1259 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
1260 ata_port_disable(ap);
1261 return;
1264 ap->cbl = ATA_CBL_SATA;
1268 * __sata_phy_reset -
1269 * @ap:
1271 * LOCKING:
1274 void sata_phy_reset(struct ata_port *ap)
1276 __sata_phy_reset(ap);
1277 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1278 return;
1279 ata_bus_reset(ap);
1283 * ata_port_disable -
1284 * @ap:
1286 * LOCKING:
1289 void ata_port_disable(struct ata_port *ap)
1291 ap->device[0].class = ATA_DEV_NONE;
1292 ap->device[1].class = ATA_DEV_NONE;
1293 ap->flags |= ATA_FLAG_PORT_DISABLED;
1296 static struct {
1297 unsigned int shift;
1298 u8 base;
1299 } xfer_mode_classes[] = {
1300 { ATA_SHIFT_UDMA, XFER_UDMA_0 },
1301 { ATA_SHIFT_MWDMA, XFER_MW_DMA_0 },
1302 { ATA_SHIFT_PIO, XFER_PIO_0 },
1305 static inline u8 base_from_shift(unsigned int shift)
1307 int i;
1309 for (i = 0; i < ARRAY_SIZE(xfer_mode_classes); i++)
1310 if (xfer_mode_classes[i].shift == shift)
1311 return xfer_mode_classes[i].base;
1313 return 0xff;
1316 static void ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev)
1318 int ofs, idx;
1319 u8 base;
1321 if (!ata_dev_present(dev) || (ap->flags & ATA_FLAG_PORT_DISABLED))
1322 return;
1324 if (dev->xfer_shift == ATA_SHIFT_PIO)
1325 dev->flags |= ATA_DFLAG_PIO;
1327 ata_dev_set_xfermode(ap, dev);
1329 base = base_from_shift(dev->xfer_shift);
1330 ofs = dev->xfer_mode - base;
1331 idx = ofs + dev->xfer_shift;
1332 WARN_ON(idx >= ARRAY_SIZE(xfer_mode_str));
1334 DPRINTK("idx=%d xfer_shift=%u, xfer_mode=0x%x, base=0x%x, offset=%d\n",
1335 idx, dev->xfer_shift, (int)dev->xfer_mode, (int)base, ofs);
1337 printk(KERN_INFO "ata%u: dev %u configured for %s\n",
1338 ap->id, dev->devno, xfer_mode_str[idx]);
1341 static int ata_host_set_pio(struct ata_port *ap)
1343 unsigned int mask;
1344 int x, i;
1345 u8 base, xfer_mode;
1347 mask = ata_get_mode_mask(ap, ATA_SHIFT_PIO);
1348 x = fgb(mask);
1349 if (x < 0) {
1350 printk(KERN_WARNING "ata%u: no PIO support\n", ap->id);
1351 return -1;
1354 base = base_from_shift(ATA_SHIFT_PIO);
1355 xfer_mode = base + x;
1357 DPRINTK("base 0x%x xfer_mode 0x%x mask 0x%x x %d\n",
1358 (int)base, (int)xfer_mode, mask, x);
1360 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1361 struct ata_device *dev = &ap->device[i];
1362 if (ata_dev_present(dev)) {
1363 dev->pio_mode = xfer_mode;
1364 dev->xfer_mode = xfer_mode;
1365 dev->xfer_shift = ATA_SHIFT_PIO;
1366 if (ap->ops->set_piomode)
1367 ap->ops->set_piomode(ap, dev);
1371 return 0;
1374 static void ata_host_set_dma(struct ata_port *ap, u8 xfer_mode,
1375 unsigned int xfer_shift)
1377 int i;
1379 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1380 struct ata_device *dev = &ap->device[i];
1381 if (ata_dev_present(dev)) {
1382 dev->dma_mode = xfer_mode;
1383 dev->xfer_mode = xfer_mode;
1384 dev->xfer_shift = xfer_shift;
1385 if (ap->ops->set_dmamode)
1386 ap->ops->set_dmamode(ap, dev);
1392 * ata_set_mode - Program timings and issue SET FEATURES - XFER
1393 * @ap: port on which timings will be programmed
1395 * LOCKING:
1398 static void ata_set_mode(struct ata_port *ap)
1400 unsigned int i, xfer_shift;
1401 u8 xfer_mode;
1402 int rc;
1404 /* step 1: always set host PIO timings */
1405 rc = ata_host_set_pio(ap);
1406 if (rc)
1407 goto err_out;
1409 /* step 2: choose the best data xfer mode */
1410 xfer_mode = xfer_shift = 0;
1411 rc = ata_choose_xfer_mode(ap, &xfer_mode, &xfer_shift);
1412 if (rc)
1413 goto err_out;
1415 /* step 3: if that xfer mode isn't PIO, set host DMA timings */
1416 if (xfer_shift != ATA_SHIFT_PIO)
1417 ata_host_set_dma(ap, xfer_mode, xfer_shift);
1419 /* step 4: update devices' xfer mode */
1420 ata_dev_set_mode(ap, &ap->device[0]);
1421 ata_dev_set_mode(ap, &ap->device[1]);
1423 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1424 return;
1426 if (ap->ops->post_set_mode)
1427 ap->ops->post_set_mode(ap);
1429 for (i = 0; i < 2; i++) {
1430 struct ata_device *dev = &ap->device[i];
1431 ata_dev_set_protocol(dev);
1434 return;
1436 err_out:
1437 ata_port_disable(ap);
1441 * ata_busy_sleep - sleep until BSY clears, or timeout
1442 * @ap: port containing status register to be polled
1443 * @tmout_pat: impatience timeout
1444 * @tmout: overall timeout
1446 * LOCKING:
1450 static unsigned int ata_busy_sleep (struct ata_port *ap,
1451 unsigned long tmout_pat,
1452 unsigned long tmout)
1454 unsigned long timer_start, timeout;
1455 u8 status;
1457 status = ata_busy_wait(ap, ATA_BUSY, 300);
1458 timer_start = jiffies;
1459 timeout = timer_start + tmout_pat;
1460 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
1461 msleep(50);
1462 status = ata_busy_wait(ap, ATA_BUSY, 3);
1465 if (status & ATA_BUSY)
1466 printk(KERN_WARNING "ata%u is slow to respond, "
1467 "please be patient\n", ap->id);
1469 timeout = timer_start + tmout;
1470 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
1471 msleep(50);
1472 status = ata_chk_status(ap);
1475 if (status & ATA_BUSY) {
1476 printk(KERN_ERR "ata%u failed to respond (%lu secs)\n",
1477 ap->id, tmout / HZ);
1478 return 1;
1481 return 0;
1484 static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
1486 struct ata_ioports *ioaddr = &ap->ioaddr;
1487 unsigned int dev0 = devmask & (1 << 0);
1488 unsigned int dev1 = devmask & (1 << 1);
1489 unsigned long timeout;
1491 /* if device 0 was found in ata_devchk, wait for its
1492 * BSY bit to clear
1494 if (dev0)
1495 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1497 /* if device 1 was found in ata_devchk, wait for
1498 * register access, then wait for BSY to clear
1500 timeout = jiffies + ATA_TMOUT_BOOT;
1501 while (dev1) {
1502 u8 nsect, lbal;
1504 ap->ops->dev_select(ap, 1);
1505 if (ap->flags & ATA_FLAG_MMIO) {
1506 nsect = readb((void __iomem *) ioaddr->nsect_addr);
1507 lbal = readb((void __iomem *) ioaddr->lbal_addr);
1508 } else {
1509 nsect = inb(ioaddr->nsect_addr);
1510 lbal = inb(ioaddr->lbal_addr);
1512 if ((nsect == 1) && (lbal == 1))
1513 break;
1514 if (time_after(jiffies, timeout)) {
1515 dev1 = 0;
1516 break;
1518 msleep(50); /* give drive a breather */
1520 if (dev1)
1521 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1523 /* is all this really necessary? */
1524 ap->ops->dev_select(ap, 0);
1525 if (dev1)
1526 ap->ops->dev_select(ap, 1);
1527 if (dev0)
1528 ap->ops->dev_select(ap, 0);
1532 * ata_bus_edd -
1533 * @ap:
1535 * LOCKING:
1539 static unsigned int ata_bus_edd(struct ata_port *ap)
1541 struct ata_taskfile tf;
1543 /* set up execute-device-diag (bus reset) taskfile */
1544 /* also, take interrupts to a known state (disabled) */
1545 DPRINTK("execute-device-diag\n");
1546 ata_tf_init(ap, &tf, 0);
1547 tf.ctl |= ATA_NIEN;
1548 tf.command = ATA_CMD_EDD;
1549 tf.protocol = ATA_PROT_NODATA;
1551 /* do bus reset */
1552 ata_tf_to_host(ap, &tf);
1554 /* spec says at least 2ms. but who knows with those
1555 * crazy ATAPI devices...
1557 msleep(150);
1559 return ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1562 static unsigned int ata_bus_softreset(struct ata_port *ap,
1563 unsigned int devmask)
1565 struct ata_ioports *ioaddr = &ap->ioaddr;
1567 DPRINTK("ata%u: bus reset via SRST\n", ap->id);
1569 /* software reset. causes dev0 to be selected */
1570 if (ap->flags & ATA_FLAG_MMIO) {
1571 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
1572 udelay(20); /* FIXME: flush */
1573 writeb(ap->ctl | ATA_SRST, (void __iomem *) ioaddr->ctl_addr);
1574 udelay(20); /* FIXME: flush */
1575 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
1576 } else {
1577 outb(ap->ctl, ioaddr->ctl_addr);
1578 udelay(10);
1579 outb(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
1580 udelay(10);
1581 outb(ap->ctl, ioaddr->ctl_addr);
1584 /* spec mandates ">= 2ms" before checking status.
1585 * We wait 150ms, because that was the magic delay used for
1586 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
1587 * between when the ATA command register is written, and then
1588 * status is checked. Because waiting for "a while" before
1589 * checking status is fine, post SRST, we perform this magic
1590 * delay here as well.
1592 msleep(150);
1594 ata_bus_post_reset(ap, devmask);
1596 return 0;
1600 * ata_bus_reset - reset host port and associated ATA channel
1601 * @ap: port to reset
1603 * This is typically the first time we actually start issuing
1604 * commands to the ATA channel. We wait for BSY to clear, then
1605 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
1606 * result. Determine what devices, if any, are on the channel
1607 * by looking at the device 0/1 error register. Look at the signature
1608 * stored in each device's taskfile registers, to determine if
1609 * the device is ATA or ATAPI.
1611 * LOCKING:
1612 * Inherited from caller. Some functions called by this function
1613 * obtain the host_set lock.
1615 * SIDE EFFECTS:
1616 * Sets ATA_FLAG_PORT_DISABLED if bus reset fails.
1619 void ata_bus_reset(struct ata_port *ap)
1621 struct ata_ioports *ioaddr = &ap->ioaddr;
1622 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
1623 u8 err;
1624 unsigned int dev0, dev1 = 0, rc = 0, devmask = 0;
1626 DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no);
1628 /* determine if device 0/1 are present */
1629 if (ap->flags & ATA_FLAG_SATA_RESET)
1630 dev0 = 1;
1631 else {
1632 dev0 = ata_devchk(ap, 0);
1633 if (slave_possible)
1634 dev1 = ata_devchk(ap, 1);
1637 if (dev0)
1638 devmask |= (1 << 0);
1639 if (dev1)
1640 devmask |= (1 << 1);
1642 /* select device 0 again */
1643 ap->ops->dev_select(ap, 0);
1645 /* issue bus reset */
1646 if (ap->flags & ATA_FLAG_SRST)
1647 rc = ata_bus_softreset(ap, devmask);
1648 else if ((ap->flags & ATA_FLAG_SATA_RESET) == 0) {
1649 /* set up device control */
1650 if (ap->flags & ATA_FLAG_MMIO)
1651 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
1652 else
1653 outb(ap->ctl, ioaddr->ctl_addr);
1654 rc = ata_bus_edd(ap);
1657 if (rc)
1658 goto err_out;
1661 * determine by signature whether we have ATA or ATAPI devices
1663 err = ata_dev_try_classify(ap, 0);
1664 if ((slave_possible) && (err != 0x81))
1665 ata_dev_try_classify(ap, 1);
1667 /* re-enable interrupts */
1668 ata_irq_on(ap);
1670 /* is double-select really necessary? */
1671 if (ap->device[1].class != ATA_DEV_NONE)
1672 ap->ops->dev_select(ap, 1);
1673 if (ap->device[0].class != ATA_DEV_NONE)
1674 ap->ops->dev_select(ap, 0);
1676 /* if no devices were detected, disable this port */
1677 if ((ap->device[0].class == ATA_DEV_NONE) &&
1678 (ap->device[1].class == ATA_DEV_NONE))
1679 goto err_out;
1681 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
1682 /* set up device control for ATA_FLAG_SATA_RESET */
1683 if (ap->flags & ATA_FLAG_MMIO)
1684 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
1685 else
1686 outb(ap->ctl, ioaddr->ctl_addr);
1689 DPRINTK("EXIT\n");
1690 return;
1692 err_out:
1693 printk(KERN_ERR "ata%u: disabling port\n", ap->id);
1694 ap->ops->port_disable(ap);
1696 DPRINTK("EXIT\n");
1699 static unsigned int ata_get_mode_mask(struct ata_port *ap, int shift)
1701 struct ata_device *master, *slave;
1702 unsigned int mask;
1704 master = &ap->device[0];
1705 slave = &ap->device[1];
1707 assert (ata_dev_present(master) || ata_dev_present(slave));
1709 if (shift == ATA_SHIFT_UDMA) {
1710 mask = ap->udma_mask;
1711 if (ata_dev_present(master))
1712 mask &= (master->id[ATA_ID_UDMA_MODES] & 0xff);
1713 if (ata_dev_present(slave))
1714 mask &= (slave->id[ATA_ID_UDMA_MODES] & 0xff);
1716 else if (shift == ATA_SHIFT_MWDMA) {
1717 mask = ap->mwdma_mask;
1718 if (ata_dev_present(master))
1719 mask &= (master->id[ATA_ID_MWDMA_MODES] & 0x07);
1720 if (ata_dev_present(slave))
1721 mask &= (slave->id[ATA_ID_MWDMA_MODES] & 0x07);
1723 else if (shift == ATA_SHIFT_PIO) {
1724 mask = ap->pio_mask;
1725 if (ata_dev_present(master)) {
1726 /* spec doesn't return explicit support for
1727 * PIO0-2, so we fake it
1729 u16 tmp_mode = master->id[ATA_ID_PIO_MODES] & 0x03;
1730 tmp_mode <<= 3;
1731 tmp_mode |= 0x7;
1732 mask &= tmp_mode;
1734 if (ata_dev_present(slave)) {
1735 /* spec doesn't return explicit support for
1736 * PIO0-2, so we fake it
1738 u16 tmp_mode = slave->id[ATA_ID_PIO_MODES] & 0x03;
1739 tmp_mode <<= 3;
1740 tmp_mode |= 0x7;
1741 mask &= tmp_mode;
1744 else {
1745 mask = 0xffffffff; /* shut up compiler warning */
1746 BUG();
1749 return mask;
1752 /* find greatest bit */
1753 static int fgb(u32 bitmap)
1755 unsigned int i;
1756 int x = -1;
1758 for (i = 0; i < 32; i++)
1759 if (bitmap & (1 << i))
1760 x = i;
1762 return x;
1766 * ata_choose_xfer_mode -
1767 * @ap:
1769 * LOCKING:
1771 * RETURNS:
1772 * Zero on success, negative on error.
1775 static int ata_choose_xfer_mode(struct ata_port *ap,
1776 u8 *xfer_mode_out,
1777 unsigned int *xfer_shift_out)
1779 unsigned int mask, shift;
1780 int x, i;
1782 for (i = 0; i < ARRAY_SIZE(xfer_mode_classes); i++) {
1783 shift = xfer_mode_classes[i].shift;
1784 mask = ata_get_mode_mask(ap, shift);
1786 x = fgb(mask);
1787 if (x >= 0) {
1788 *xfer_mode_out = xfer_mode_classes[i].base + x;
1789 *xfer_shift_out = shift;
1790 return 0;
1794 return -1;
1798 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
1799 * @ap: Port associated with device @dev
1800 * @dev: Device to which command will be sent
1802 * LOCKING:
1805 static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev)
1807 DECLARE_COMPLETION(wait);
1808 struct ata_queued_cmd *qc;
1809 int rc;
1810 unsigned long flags;
1812 /* set up set-features taskfile */
1813 DPRINTK("set features - xfer mode\n");
1815 qc = ata_qc_new_init(ap, dev);
1816 BUG_ON(qc == NULL);
1818 qc->tf.command = ATA_CMD_SET_FEATURES;
1819 qc->tf.feature = SETFEATURES_XFER;
1820 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1821 qc->tf.protocol = ATA_PROT_NODATA;
1822 qc->tf.nsect = dev->xfer_mode;
1824 qc->waiting = &wait;
1825 qc->complete_fn = ata_qc_complete_noop;
1827 spin_lock_irqsave(&ap->host_set->lock, flags);
1828 rc = ata_qc_issue(qc);
1829 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1831 if (rc)
1832 ata_port_disable(ap);
1833 else
1834 wait_for_completion(&wait);
1836 DPRINTK("EXIT\n");
1840 * ata_sg_clean -
1841 * @qc:
1843 * LOCKING:
1846 static void ata_sg_clean(struct ata_queued_cmd *qc)
1848 struct ata_port *ap = qc->ap;
1849 struct scatterlist *sg = qc->sg;
1850 int dir = qc->pci_dma_dir;
1852 assert(qc->flags & ATA_QCFLAG_DMAMAP);
1853 assert(sg != NULL);
1855 if (qc->flags & ATA_QCFLAG_SINGLE)
1856 assert(qc->n_elem == 1);
1858 DPRINTK("unmapping %u sg elements\n", qc->n_elem);
1860 if (qc->flags & ATA_QCFLAG_SG)
1861 pci_unmap_sg(ap->host_set->pdev, sg, qc->n_elem, dir);
1862 else
1863 pci_unmap_single(ap->host_set->pdev, sg_dma_address(&sg[0]),
1864 sg_dma_len(&sg[0]), dir);
1866 qc->flags &= ~ATA_QCFLAG_DMAMAP;
1867 qc->sg = NULL;
1871 * ata_fill_sg - Fill PCI IDE PRD table
1872 * @qc: Metadata associated with taskfile to be transferred
1874 * LOCKING:
1877 static void ata_fill_sg(struct ata_queued_cmd *qc)
1879 struct scatterlist *sg = qc->sg;
1880 struct ata_port *ap = qc->ap;
1881 unsigned int idx, nelem;
1883 assert(sg != NULL);
1884 assert(qc->n_elem > 0);
1886 idx = 0;
1887 for (nelem = qc->n_elem; nelem; nelem--,sg++) {
1888 u32 addr, offset;
1889 u32 sg_len, len;
1891 /* determine if physical DMA addr spans 64K boundary.
1892 * Note h/w doesn't support 64-bit, so we unconditionally
1893 * truncate dma_addr_t to u32.
1895 addr = (u32) sg_dma_address(sg);
1896 sg_len = sg_dma_len(sg);
1898 while (sg_len) {
1899 offset = addr & 0xffff;
1900 len = sg_len;
1901 if ((offset + sg_len) > 0x10000)
1902 len = 0x10000 - offset;
1904 ap->prd[idx].addr = cpu_to_le32(addr);
1905 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
1906 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
1908 idx++;
1909 sg_len -= len;
1910 addr += len;
1914 if (idx)
1915 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
1919 * ata_qc_prep - Prepare taskfile for submission
1920 * @qc: Metadata associated with taskfile to be prepared
1922 * LOCKING:
1923 * spin_lock_irqsave(host_set lock)
1925 void ata_qc_prep(struct ata_queued_cmd *qc)
1927 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1928 return;
1930 ata_fill_sg(qc);
1933 void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
1935 struct scatterlist *sg;
1937 qc->flags |= ATA_QCFLAG_SINGLE;
1939 memset(&qc->sgent, 0, sizeof(qc->sgent));
1940 qc->sg = &qc->sgent;
1941 qc->n_elem = 1;
1942 qc->buf_virt = buf;
1944 sg = qc->sg;
1945 sg->page = virt_to_page(buf);
1946 sg->offset = (unsigned long) buf & ~PAGE_MASK;
1947 sg_dma_len(sg) = buflen;
1949 WARN_ON(buflen > PAGE_SIZE);
1952 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
1953 unsigned int n_elem)
1955 qc->flags |= ATA_QCFLAG_SG;
1956 qc->sg = sg;
1957 qc->n_elem = n_elem;
1961 * ata_sg_setup_one -
1962 * @qc:
1964 * LOCKING:
1965 * spin_lock_irqsave(host_set lock)
1967 * RETURNS:
1971 static int ata_sg_setup_one(struct ata_queued_cmd *qc)
1973 struct ata_port *ap = qc->ap;
1974 int dir = qc->pci_dma_dir;
1975 struct scatterlist *sg = qc->sg;
1976 dma_addr_t dma_address;
1978 dma_address = pci_map_single(ap->host_set->pdev, qc->buf_virt,
1979 sg_dma_len(sg), dir);
1980 if (pci_dma_mapping_error(dma_address))
1981 return -1;
1983 sg_dma_address(sg) = dma_address;
1985 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
1986 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
1988 return 0;
1992 * ata_sg_setup -
1993 * @qc:
1995 * LOCKING:
1996 * spin_lock_irqsave(host_set lock)
1998 * RETURNS:
2002 static int ata_sg_setup(struct ata_queued_cmd *qc)
2004 struct ata_port *ap = qc->ap;
2005 struct scatterlist *sg = qc->sg;
2006 int n_elem, dir;
2008 VPRINTK("ENTER, ata%u\n", ap->id);
2009 assert(qc->flags & ATA_QCFLAG_SG);
2011 dir = qc->pci_dma_dir;
2012 n_elem = pci_map_sg(ap->host_set->pdev, sg, qc->n_elem, dir);
2013 if (n_elem < 1)
2014 return -1;
2016 DPRINTK("%d sg elements mapped\n", n_elem);
2018 qc->n_elem = n_elem;
2020 return 0;
2024 * ata_pio_poll -
2025 * @ap:
2027 * LOCKING:
2029 * RETURNS:
2033 static unsigned long ata_pio_poll(struct ata_port *ap)
2035 u8 status;
2036 unsigned int poll_state = PIO_ST_UNKNOWN;
2037 unsigned int reg_state = PIO_ST_UNKNOWN;
2038 const unsigned int tmout_state = PIO_ST_TMOUT;
2040 switch (ap->pio_task_state) {
2041 case PIO_ST:
2042 case PIO_ST_POLL:
2043 poll_state = PIO_ST_POLL;
2044 reg_state = PIO_ST;
2045 break;
2046 case PIO_ST_LAST:
2047 case PIO_ST_LAST_POLL:
2048 poll_state = PIO_ST_LAST_POLL;
2049 reg_state = PIO_ST_LAST;
2050 break;
2051 default:
2052 BUG();
2053 break;
2056 status = ata_chk_status(ap);
2057 if (status & ATA_BUSY) {
2058 if (time_after(jiffies, ap->pio_task_timeout)) {
2059 ap->pio_task_state = tmout_state;
2060 return 0;
2062 ap->pio_task_state = poll_state;
2063 return ATA_SHORT_PAUSE;
2066 ap->pio_task_state = reg_state;
2067 return 0;
2071 * ata_pio_complete -
2072 * @ap:
2074 * LOCKING:
2077 static void ata_pio_complete (struct ata_port *ap)
2079 struct ata_queued_cmd *qc;
2080 u8 drv_stat;
2083 * This is purely hueristic. This is a fast path.
2084 * Sometimes when we enter, BSY will be cleared in
2085 * a chk-status or two. If not, the drive is probably seeking
2086 * or something. Snooze for a couple msecs, then
2087 * chk-status again. If still busy, fall back to
2088 * PIO_ST_POLL state.
2090 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10);
2091 if (drv_stat & (ATA_BUSY | ATA_DRQ)) {
2092 msleep(2);
2093 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10);
2094 if (drv_stat & (ATA_BUSY | ATA_DRQ)) {
2095 ap->pio_task_state = PIO_ST_LAST_POLL;
2096 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
2097 return;
2101 drv_stat = ata_wait_idle(ap);
2102 if (!ata_ok(drv_stat)) {
2103 ap->pio_task_state = PIO_ST_ERR;
2104 return;
2107 qc = ata_qc_from_tag(ap, ap->active_tag);
2108 assert(qc != NULL);
2110 ap->pio_task_state = PIO_ST_IDLE;
2112 ata_irq_on(ap);
2114 ata_qc_complete(qc, drv_stat);
2117 void swap_buf_le16(u16 *buf, unsigned int buf_words)
2119 #ifdef __BIG_ENDIAN
2120 unsigned int i;
2122 for (i = 0; i < buf_words; i++)
2123 buf[i] = le16_to_cpu(buf[i]);
2124 #endif /* __BIG_ENDIAN */
2127 static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf,
2128 unsigned int buflen, int write_data)
2130 unsigned int i;
2131 unsigned int words = buflen >> 1;
2132 u16 *buf16 = (u16 *) buf;
2133 void __iomem *mmio = (void __iomem *)ap->ioaddr.data_addr;
2135 if (write_data) {
2136 for (i = 0; i < words; i++)
2137 writew(le16_to_cpu(buf16[i]), mmio);
2138 } else {
2139 for (i = 0; i < words; i++)
2140 buf16[i] = cpu_to_le16(readw(mmio));
2144 static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf,
2145 unsigned int buflen, int write_data)
2147 unsigned int dwords = buflen >> 1;
2149 if (write_data)
2150 outsw(ap->ioaddr.data_addr, buf, dwords);
2151 else
2152 insw(ap->ioaddr.data_addr, buf, dwords);
2155 static void ata_data_xfer(struct ata_port *ap, unsigned char *buf,
2156 unsigned int buflen, int do_write)
2158 if (ap->flags & ATA_FLAG_MMIO)
2159 ata_mmio_data_xfer(ap, buf, buflen, do_write);
2160 else
2161 ata_pio_data_xfer(ap, buf, buflen, do_write);
2164 static void ata_pio_sector(struct ata_queued_cmd *qc)
2166 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
2167 struct scatterlist *sg = qc->sg;
2168 struct ata_port *ap = qc->ap;
2169 struct page *page;
2170 unsigned char *buf;
2172 if (qc->cursect == (qc->nsect - 1))
2173 ap->pio_task_state = PIO_ST_LAST;
2175 page = sg[qc->cursg].page;
2176 buf = kmap(page) +
2177 sg[qc->cursg].offset + (qc->cursg_ofs * ATA_SECT_SIZE);
2179 qc->cursect++;
2180 qc->cursg_ofs++;
2182 if ((qc->cursg_ofs * ATA_SECT_SIZE) == sg_dma_len(&sg[qc->cursg])) {
2183 qc->cursg++;
2184 qc->cursg_ofs = 0;
2187 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
2189 /* do the actual data transfer */
2190 do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
2191 ata_data_xfer(ap, buf, ATA_SECT_SIZE, do_write);
2193 kunmap(page);
2196 static void atapi_pio_sector(struct ata_queued_cmd *qc)
2198 struct ata_port *ap = qc->ap;
2199 struct ata_device *dev = qc->dev;
2200 unsigned int i, ireason, bc_lo, bc_hi, bytes;
2201 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
2203 ap->ops->tf_read(ap, &qc->tf);
2204 ireason = qc->tf.nsect;
2205 bc_lo = qc->tf.lbam;
2206 bc_hi = qc->tf.lbah;
2207 bytes = (bc_hi << 8) | bc_lo;
2209 /* shall be cleared to zero, indicating xfer of data */
2210 if (ireason & (1 << 0))
2211 goto err_out;
2213 /* make sure transfer direction matches expected */
2214 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
2215 if (do_write != i_write)
2216 goto err_out;
2218 /* make sure byte count is multiple of sector size; not
2219 * required by standard (warning! warning!), but IDE driver
2220 * does this to simplify things a bit. We are lazy, and
2221 * follow suit.
2223 if (bytes & (ATA_SECT_SIZE - 1))
2224 goto err_out;
2226 for (i = 0; i < (bytes >> 9); i++)
2227 ata_pio_sector(qc);
2229 return;
2231 err_out:
2232 printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n",
2233 ap->id, dev->devno);
2234 ap->pio_task_state = PIO_ST_ERR;
2238 * ata_pio_sector -
2239 * @ap:
2241 * LOCKING:
2244 static void ata_pio_block(struct ata_port *ap)
2246 struct ata_queued_cmd *qc;
2247 u8 status;
2250 * This is purely hueristic. This is a fast path.
2251 * Sometimes when we enter, BSY will be cleared in
2252 * a chk-status or two. If not, the drive is probably seeking
2253 * or something. Snooze for a couple msecs, then
2254 * chk-status again. If still busy, fall back to
2255 * PIO_ST_POLL state.
2257 status = ata_busy_wait(ap, ATA_BUSY, 5);
2258 if (status & ATA_BUSY) {
2259 msleep(2);
2260 status = ata_busy_wait(ap, ATA_BUSY, 10);
2261 if (status & ATA_BUSY) {
2262 ap->pio_task_state = PIO_ST_POLL;
2263 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
2264 return;
2268 /* handle BSY=0, DRQ=0 as error */
2269 if ((status & ATA_DRQ) == 0) {
2270 ap->pio_task_state = PIO_ST_ERR;
2271 return;
2274 qc = ata_qc_from_tag(ap, ap->active_tag);
2275 assert(qc != NULL);
2277 if (is_atapi_taskfile(&qc->tf))
2278 atapi_pio_sector(qc);
2279 else
2280 ata_pio_sector(qc);
2283 static void ata_pio_error(struct ata_port *ap)
2285 struct ata_queued_cmd *qc;
2286 u8 drv_stat;
2288 qc = ata_qc_from_tag(ap, ap->active_tag);
2289 assert(qc != NULL);
2291 drv_stat = ata_chk_status(ap);
2292 printk(KERN_WARNING "ata%u: PIO error, drv_stat 0x%x\n",
2293 ap->id, drv_stat);
2295 ap->pio_task_state = PIO_ST_IDLE;
2297 ata_irq_on(ap);
2299 ata_qc_complete(qc, drv_stat | ATA_ERR);
2302 static void ata_pio_task(void *_data)
2304 struct ata_port *ap = _data;
2305 unsigned long timeout = 0;
2307 switch (ap->pio_task_state) {
2308 case PIO_ST:
2309 ata_pio_block(ap);
2310 break;
2312 case PIO_ST_LAST:
2313 ata_pio_complete(ap);
2314 break;
2316 case PIO_ST_POLL:
2317 case PIO_ST_LAST_POLL:
2318 timeout = ata_pio_poll(ap);
2319 break;
2321 case PIO_ST_TMOUT:
2322 case PIO_ST_ERR:
2323 ata_pio_error(ap);
2324 break;
2327 if ((ap->pio_task_state != PIO_ST_IDLE) &&
2328 (ap->pio_task_state != PIO_ST_TMOUT) &&
2329 (ap->pio_task_state != PIO_ST_ERR)) {
2330 if (timeout)
2331 queue_delayed_work(ata_wq, &ap->pio_task,
2332 timeout);
2333 else
2334 queue_work(ata_wq, &ap->pio_task);
2339 * ata_qc_timeout - Handle timeout of queued command
2340 * @qc: Command that timed out
2342 * Some part of the kernel (currently, only the SCSI layer)
2343 * has noticed that the active command on port @ap has not
2344 * completed after a specified length of time. Handle this
2345 * condition by disabling DMA (if necessary) and completing
2346 * transactions, with error if necessary.
2348 * This also handles the case of the "lost interrupt", where
2349 * for some reason (possibly hardware bug, possibly driver bug)
2350 * an interrupt was not delivered to the driver, even though the
2351 * transaction completed successfully.
2353 * LOCKING:
2356 static void ata_qc_timeout(struct ata_queued_cmd *qc)
2358 struct ata_port *ap = qc->ap;
2359 u8 host_stat = 0, drv_stat;
2361 DPRINTK("ENTER\n");
2363 /* hack alert! We cannot use the supplied completion
2364 * function from inside the ->eh_strategy_handler() thread.
2365 * libata is the only user of ->eh_strategy_handler() in
2366 * any kernel, so the default scsi_done() assumes it is
2367 * not being called from the SCSI EH.
2369 qc->scsidone = scsi_finish_command;
2371 switch (qc->tf.protocol) {
2373 case ATA_PROT_DMA:
2374 case ATA_PROT_ATAPI_DMA:
2375 host_stat = ata_bmdma_status(ap);
2377 /* before we do anything else, clear DMA-Start bit */
2378 ata_bmdma_stop(ap);
2380 /* fall through */
2382 default:
2383 ata_altstatus(ap);
2384 drv_stat = ata_chk_status(ap);
2386 /* ack bmdma irq events */
2387 ata_bmdma_ack_irq(ap);
2389 printk(KERN_ERR "ata%u: command 0x%x timeout, stat 0x%x host_stat 0x%x\n",
2390 ap->id, qc->tf.command, drv_stat, host_stat);
2392 /* complete taskfile transaction */
2393 ata_qc_complete(qc, drv_stat);
2394 break;
2397 DPRINTK("EXIT\n");
2401 * ata_eng_timeout - Handle timeout of queued command
2402 * @ap: Port on which timed-out command is active
2404 * Some part of the kernel (currently, only the SCSI layer)
2405 * has noticed that the active command on port @ap has not
2406 * completed after a specified length of time. Handle this
2407 * condition by disabling DMA (if necessary) and completing
2408 * transactions, with error if necessary.
2410 * This also handles the case of the "lost interrupt", where
2411 * for some reason (possibly hardware bug, possibly driver bug)
2412 * an interrupt was not delivered to the driver, even though the
2413 * transaction completed successfully.
2415 * LOCKING:
2416 * Inherited from SCSI layer (none, can sleep)
2419 void ata_eng_timeout(struct ata_port *ap)
2421 struct ata_queued_cmd *qc;
2423 DPRINTK("ENTER\n");
2425 qc = ata_qc_from_tag(ap, ap->active_tag);
2426 if (!qc) {
2427 printk(KERN_ERR "ata%u: BUG: timeout without command\n",
2428 ap->id);
2429 goto out;
2432 ata_qc_timeout(qc);
2434 out:
2435 DPRINTK("EXIT\n");
2439 * ata_qc_new - Request an available ATA command, for queueing
2440 * @ap: Port associated with device @dev
2441 * @dev: Device from whom we request an available command structure
2443 * LOCKING:
2446 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
2448 struct ata_queued_cmd *qc = NULL;
2449 unsigned int i;
2451 for (i = 0; i < ATA_MAX_QUEUE; i++)
2452 if (!test_and_set_bit(i, &ap->qactive)) {
2453 qc = ata_qc_from_tag(ap, i);
2454 break;
2457 if (qc)
2458 qc->tag = i;
2460 return qc;
2464 * ata_qc_new_init - Request an available ATA command, and initialize it
2465 * @ap: Port associated with device @dev
2466 * @dev: Device from whom we request an available command structure
2468 * LOCKING:
2471 struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
2472 struct ata_device *dev)
2474 struct ata_queued_cmd *qc;
2476 qc = ata_qc_new(ap);
2477 if (qc) {
2478 qc->sg = NULL;
2479 qc->flags = 0;
2480 qc->scsicmd = NULL;
2481 qc->ap = ap;
2482 qc->dev = dev;
2483 qc->cursect = qc->cursg = qc->cursg_ofs = 0;
2484 qc->nsect = 0;
2486 ata_tf_init(ap, &qc->tf, dev->devno);
2488 if (dev->flags & ATA_DFLAG_LBA48)
2489 qc->tf.flags |= ATA_TFLAG_LBA48;
2492 return qc;
2495 static int ata_qc_complete_noop(struct ata_queued_cmd *qc, u8 drv_stat)
2497 return 0;
2501 * ata_qc_complete - Complete an active ATA command
2502 * @qc: Command to complete
2503 * @drv_stat: ATA status register contents
2505 * LOCKING:
2509 void ata_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat)
2511 struct ata_port *ap = qc->ap;
2512 unsigned int tag, do_clear = 0;
2513 int rc;
2515 assert(qc != NULL); /* ata_qc_from_tag _might_ return NULL */
2516 assert(qc->flags & ATA_QCFLAG_ACTIVE);
2518 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
2519 ata_sg_clean(qc);
2521 /* call completion callback */
2522 rc = qc->complete_fn(qc, drv_stat);
2524 /* if callback indicates not to complete command (non-zero),
2525 * return immediately
2527 if (rc != 0)
2528 return;
2530 qc->flags = 0;
2531 tag = qc->tag;
2532 if (likely(ata_tag_valid(tag))) {
2533 if (tag == ap->active_tag)
2534 ap->active_tag = ATA_TAG_POISON;
2535 qc->tag = ATA_TAG_POISON;
2536 do_clear = 1;
2539 if (qc->waiting) {
2540 struct completion *waiting = qc->waiting;
2541 qc->waiting = NULL;
2542 complete(waiting);
2545 if (likely(do_clear))
2546 clear_bit(tag, &ap->qactive);
2548 VPRINTK("EXIT\n");
2552 * ata_qc_issue - issue taskfile to device
2553 * @qc: command to issue to device
2555 * Prepare an ATA command to submission to device.
2556 * This includes mapping the data into a DMA-able
2557 * area, filling in the S/G table, and finally
2558 * writing the taskfile to hardware, starting the command.
2560 * LOCKING:
2561 * spin_lock_irqsave(host_set lock)
2563 * RETURNS:
2564 * Zero on success, negative on error.
2567 int ata_qc_issue(struct ata_queued_cmd *qc)
2569 struct ata_port *ap = qc->ap;
2571 if (qc->flags & ATA_QCFLAG_SG) {
2572 if (ata_sg_setup(qc))
2573 goto err_out;
2574 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
2575 if (ata_sg_setup_one(qc))
2576 goto err_out;
2579 ap->ops->qc_prep(qc);
2581 qc->ap->active_tag = qc->tag;
2582 qc->flags |= ATA_QCFLAG_ACTIVE;
2584 return ap->ops->qc_issue(qc);
2586 err_out:
2587 return -1;
2591 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
2592 * @qc: command to issue to device
2594 * Using various libata functions and hooks, this function
2595 * starts an ATA command. ATA commands are grouped into
2596 * classes called "protocols", and issuing each type of protocol
2597 * is slightly different.
2599 * LOCKING:
2600 * spin_lock_irqsave(host_set lock)
2602 * RETURNS:
2603 * Zero on success, negative on error.
2606 int ata_qc_issue_prot(struct ata_queued_cmd *qc)
2608 struct ata_port *ap = qc->ap;
2610 ata_dev_select(ap, qc->dev->devno, 1, 0);
2612 switch (qc->tf.protocol) {
2613 case ATA_PROT_NODATA:
2614 ata_tf_to_host_nolock(ap, &qc->tf);
2615 break;
2617 case ATA_PROT_DMA:
2618 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
2619 ap->ops->bmdma_setup(qc); /* set up bmdma */
2620 ap->ops->bmdma_start(qc); /* initiate bmdma */
2621 break;
2623 case ATA_PROT_PIO: /* load tf registers, initiate polling pio */
2624 ata_qc_set_polling(qc);
2625 ata_tf_to_host_nolock(ap, &qc->tf);
2626 ap->pio_task_state = PIO_ST;
2627 queue_work(ata_wq, &ap->pio_task);
2628 break;
2630 case ATA_PROT_ATAPI:
2631 ata_qc_set_polling(qc);
2632 ata_tf_to_host_nolock(ap, &qc->tf);
2633 queue_work(ata_wq, &ap->packet_task);
2634 break;
2636 case ATA_PROT_ATAPI_NODATA:
2637 ata_tf_to_host_nolock(ap, &qc->tf);
2638 queue_work(ata_wq, &ap->packet_task);
2639 break;
2641 case ATA_PROT_ATAPI_DMA:
2642 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
2643 ap->ops->bmdma_setup(qc); /* set up bmdma */
2644 queue_work(ata_wq, &ap->packet_task);
2645 break;
2647 default:
2648 WARN_ON(1);
2649 return -1;
2652 return 0;
2656 * ata_bmdma_setup - Set up PCI IDE BMDMA transaction
2657 * @qc: Info associated with this ATA transaction.
2659 * LOCKING:
2660 * spin_lock_irqsave(host_set lock)
2663 static void ata_bmdma_setup_mmio (struct ata_queued_cmd *qc)
2665 struct ata_port *ap = qc->ap;
2666 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
2667 u8 dmactl;
2668 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
2670 /* load PRD table addr. */
2671 mb(); /* make sure PRD table writes are visible to controller */
2672 writel(ap->prd_dma, mmio + ATA_DMA_TABLE_OFS);
2674 /* specify data direction, triple-check start bit is clear */
2675 dmactl = readb(mmio + ATA_DMA_CMD);
2676 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
2677 if (!rw)
2678 dmactl |= ATA_DMA_WR;
2679 writeb(dmactl, mmio + ATA_DMA_CMD);
2681 /* issue r/w command */
2682 ap->ops->exec_command(ap, &qc->tf);
2686 * ata_bmdma_start - Start a PCI IDE BMDMA transaction
2687 * @qc: Info associated with this ATA transaction.
2689 * LOCKING:
2690 * spin_lock_irqsave(host_set lock)
2693 static void ata_bmdma_start_mmio (struct ata_queued_cmd *qc)
2695 struct ata_port *ap = qc->ap;
2696 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
2697 u8 dmactl;
2699 /* start host DMA transaction */
2700 dmactl = readb(mmio + ATA_DMA_CMD);
2701 writeb(dmactl | ATA_DMA_START, mmio + ATA_DMA_CMD);
2703 /* Strictly, one may wish to issue a readb() here, to
2704 * flush the mmio write. However, control also passes
2705 * to the hardware at this point, and it will interrupt
2706 * us when we are to resume control. So, in effect,
2707 * we don't care when the mmio write flushes.
2708 * Further, a read of the DMA status register _immediately_
2709 * following the write may not be what certain flaky hardware
2710 * is expected, so I think it is best to not add a readb()
2711 * without first all the MMIO ATA cards/mobos.
2712 * Or maybe I'm just being paranoid.
2717 * ata_bmdma_setup_pio - Set up PCI IDE BMDMA transaction (PIO)
2718 * @qc: Info associated with this ATA transaction.
2720 * LOCKING:
2721 * spin_lock_irqsave(host_set lock)
2724 static void ata_bmdma_setup_pio (struct ata_queued_cmd *qc)
2726 struct ata_port *ap = qc->ap;
2727 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
2728 u8 dmactl;
2730 /* load PRD table addr. */
2731 outl(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
2733 /* specify data direction, triple-check start bit is clear */
2734 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2735 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
2736 if (!rw)
2737 dmactl |= ATA_DMA_WR;
2738 outb(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2740 /* issue r/w command */
2741 ap->ops->exec_command(ap, &qc->tf);
2745 * ata_bmdma_start_pio - Start a PCI IDE BMDMA transaction (PIO)
2746 * @qc: Info associated with this ATA transaction.
2748 * LOCKING:
2749 * spin_lock_irqsave(host_set lock)
2752 static void ata_bmdma_start_pio (struct ata_queued_cmd *qc)
2754 struct ata_port *ap = qc->ap;
2755 u8 dmactl;
2757 /* start host DMA transaction */
2758 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2759 outb(dmactl | ATA_DMA_START,
2760 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2763 void ata_bmdma_start(struct ata_queued_cmd *qc)
2765 if (qc->ap->flags & ATA_FLAG_MMIO)
2766 ata_bmdma_start_mmio(qc);
2767 else
2768 ata_bmdma_start_pio(qc);
2771 void ata_bmdma_setup(struct ata_queued_cmd *qc)
2773 if (qc->ap->flags & ATA_FLAG_MMIO)
2774 ata_bmdma_setup_mmio(qc);
2775 else
2776 ata_bmdma_setup_pio(qc);
2779 void ata_bmdma_irq_clear(struct ata_port *ap)
2781 ata_bmdma_ack_irq(ap);
2785 * ata_host_intr - Handle host interrupt for given (port, task)
2786 * @ap: Port on which interrupt arrived (possibly...)
2787 * @qc: Taskfile currently active in engine
2789 * Handle host interrupt for given queued command. Currently,
2790 * only DMA interrupts are handled. All other commands are
2791 * handled via polling with interrupts disabled (nIEN bit).
2793 * LOCKING:
2794 * spin_lock_irqsave(host_set lock)
2796 * RETURNS:
2797 * One if interrupt was handled, zero if not (shared irq).
2800 inline unsigned int ata_host_intr (struct ata_port *ap,
2801 struct ata_queued_cmd *qc)
2803 u8 status, host_stat;
2805 switch (qc->tf.protocol) {
2807 case ATA_PROT_DMA:
2808 case ATA_PROT_ATAPI_DMA:
2809 case ATA_PROT_ATAPI:
2810 /* check status of DMA engine */
2811 host_stat = ata_bmdma_status(ap);
2812 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
2814 /* if it's not our irq... */
2815 if (!(host_stat & ATA_DMA_INTR))
2816 goto idle_irq;
2818 /* before we do anything else, clear DMA-Start bit */
2819 ata_bmdma_stop(ap);
2821 /* fall through */
2823 case ATA_PROT_ATAPI_NODATA:
2824 case ATA_PROT_NODATA:
2825 /* check altstatus */
2826 status = ata_altstatus(ap);
2827 if (status & ATA_BUSY)
2828 goto idle_irq;
2830 /* check main status, clearing INTRQ */
2831 status = ata_chk_status(ap);
2832 if (unlikely(status & ATA_BUSY))
2833 goto idle_irq;
2834 DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
2835 ap->id, qc->tf.protocol, status);
2837 /* ack bmdma irq events */
2838 ata_bmdma_ack_irq(ap);
2840 /* complete taskfile transaction */
2841 ata_qc_complete(qc, status);
2842 break;
2844 default:
2845 goto idle_irq;
2848 return 1; /* irq handled */
2850 idle_irq:
2851 ap->stats.idle_irq++;
2853 #ifdef ATA_IRQ_TRAP
2854 if ((ap->stats.idle_irq % 1000) == 0) {
2855 handled = 1;
2856 ata_irq_ack(ap, 0); /* debug trap */
2857 printk(KERN_WARNING "ata%d: irq trap\n", ap->id);
2859 #endif
2860 return 0; /* irq not handled */
2864 * ata_interrupt - Default ATA host interrupt handler
2865 * @irq: irq line
2866 * @dev_instance: pointer to our host information structure
2867 * @regs: unused
2869 * LOCKING:
2871 * RETURNS:
2875 irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
2877 struct ata_host_set *host_set = dev_instance;
2878 unsigned int i;
2879 unsigned int handled = 0;
2880 unsigned long flags;
2882 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
2883 spin_lock_irqsave(&host_set->lock, flags);
2885 for (i = 0; i < host_set->n_ports; i++) {
2886 struct ata_port *ap;
2888 ap = host_set->ports[i];
2889 if (ap && (!(ap->flags & ATA_FLAG_PORT_DISABLED))) {
2890 struct ata_queued_cmd *qc;
2892 qc = ata_qc_from_tag(ap, ap->active_tag);
2893 if (qc && (!(qc->tf.ctl & ATA_NIEN)))
2894 handled |= ata_host_intr(ap, qc);
2898 spin_unlock_irqrestore(&host_set->lock, flags);
2900 return IRQ_RETVAL(handled);
2904 * atapi_packet_task - Write CDB bytes to hardware
2905 * @_data: Port to which ATAPI device is attached.
2907 * When device has indicated its readiness to accept
2908 * a CDB, this function is called. Send the CDB.
2909 * If DMA is to be performed, exit immediately.
2910 * Otherwise, we are in polling mode, so poll
2911 * status under operation succeeds or fails.
2913 * LOCKING:
2914 * Kernel thread context (may sleep)
2917 static void atapi_packet_task(void *_data)
2919 struct ata_port *ap = _data;
2920 struct ata_queued_cmd *qc;
2921 u8 status;
2923 qc = ata_qc_from_tag(ap, ap->active_tag);
2924 assert(qc != NULL);
2925 assert(qc->flags & ATA_QCFLAG_ACTIVE);
2927 /* sleep-wait for BSY to clear */
2928 DPRINTK("busy wait\n");
2929 if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB))
2930 goto err_out;
2932 /* make sure DRQ is set */
2933 status = ata_chk_status(ap);
2934 if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ)
2935 goto err_out;
2937 /* send SCSI cdb */
2938 DPRINTK("send cdb\n");
2939 assert(ap->cdb_len >= 12);
2940 ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1);
2942 /* if we are DMA'ing, irq handler takes over from here */
2943 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA)
2944 ap->ops->bmdma_start(qc); /* initiate bmdma */
2946 /* non-data commands are also handled via irq */
2947 else if (qc->tf.protocol == ATA_PROT_ATAPI_NODATA) {
2948 /* do nothing */
2951 /* PIO commands are handled by polling */
2952 else {
2953 ap->pio_task_state = PIO_ST;
2954 queue_work(ata_wq, &ap->pio_task);
2957 return;
2959 err_out:
2960 ata_qc_complete(qc, ATA_ERR);
2963 int ata_port_start (struct ata_port *ap)
2965 struct pci_dev *pdev = ap->host_set->pdev;
2967 ap->prd = pci_alloc_consistent(pdev, ATA_PRD_TBL_SZ, &ap->prd_dma);
2968 if (!ap->prd)
2969 return -ENOMEM;
2971 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma);
2973 return 0;
2976 void ata_port_stop (struct ata_port *ap)
2978 struct pci_dev *pdev = ap->host_set->pdev;
2980 pci_free_consistent(pdev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
2984 * ata_host_remove - Unregister SCSI host structure with upper layers
2985 * @ap: Port to unregister
2986 * @do_unregister: 1 if we fully unregister, 0 to just stop the port
2988 * LOCKING:
2991 static void ata_host_remove(struct ata_port *ap, unsigned int do_unregister)
2993 struct Scsi_Host *sh = ap->host;
2995 DPRINTK("ENTER\n");
2997 if (do_unregister)
2998 scsi_remove_host(sh);
3000 ap->ops->port_stop(ap);
3004 * ata_host_init - Initialize an ata_port structure
3005 * @ap: Structure to initialize
3006 * @host: associated SCSI mid-layer structure
3007 * @host_set: Collection of hosts to which @ap belongs
3008 * @ent: Probe information provided by low-level driver
3009 * @port_no: Port number associated with this ata_port
3011 * LOCKING:
3015 static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
3016 struct ata_host_set *host_set,
3017 struct ata_probe_ent *ent, unsigned int port_no)
3019 unsigned int i;
3021 host->max_id = 16;
3022 host->max_lun = 1;
3023 host->max_channel = 1;
3024 host->unique_id = ata_unique_id++;
3025 host->max_cmd_len = 12;
3026 scsi_set_device(host, &ent->pdev->dev);
3027 scsi_assign_lock(host, &host_set->lock);
3029 ap->flags = ATA_FLAG_PORT_DISABLED;
3030 ap->id = host->unique_id;
3031 ap->host = host;
3032 ap->ctl = ATA_DEVCTL_OBS;
3033 ap->host_set = host_set;
3034 ap->port_no = port_no;
3035 ap->pio_mask = ent->pio_mask;
3036 ap->mwdma_mask = ent->mwdma_mask;
3037 ap->udma_mask = ent->udma_mask;
3038 ap->flags |= ent->host_flags;
3039 ap->ops = ent->port_ops;
3040 ap->cbl = ATA_CBL_NONE;
3041 ap->active_tag = ATA_TAG_POISON;
3042 ap->last_ctl = 0xFF;
3044 INIT_WORK(&ap->packet_task, atapi_packet_task, ap);
3045 INIT_WORK(&ap->pio_task, ata_pio_task, ap);
3047 for (i = 0; i < ATA_MAX_DEVICES; i++)
3048 ap->device[i].devno = i;
3050 #ifdef ATA_IRQ_TRAP
3051 ap->stats.unhandled_irq = 1;
3052 ap->stats.idle_irq = 1;
3053 #endif
3055 memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports));
3059 * ata_host_add - Attach low-level ATA driver to system
3060 * @ent: Information provided by low-level driver
3061 * @host_set: Collections of ports to which we add
3062 * @port_no: Port number associated with this host
3064 * LOCKING:
3066 * RETURNS:
3070 static struct ata_port * ata_host_add(struct ata_probe_ent *ent,
3071 struct ata_host_set *host_set,
3072 unsigned int port_no)
3074 struct Scsi_Host *host;
3075 struct ata_port *ap;
3076 int rc;
3078 DPRINTK("ENTER\n");
3079 host = scsi_host_alloc(ent->sht, sizeof(struct ata_port));
3080 if (!host)
3081 return NULL;
3083 ap = (struct ata_port *) &host->hostdata[0];
3085 ata_host_init(ap, host, host_set, ent, port_no);
3087 rc = ap->ops->port_start(ap);
3088 if (rc)
3089 goto err_out;
3091 return ap;
3093 err_out:
3094 scsi_host_put(host);
3095 return NULL;
3099 * ata_device_add -
3100 * @ent:
3102 * LOCKING:
3104 * RETURNS:
3108 int ata_device_add(struct ata_probe_ent *ent)
3110 unsigned int count = 0, i;
3111 struct pci_dev *pdev = ent->pdev;
3112 struct ata_host_set *host_set;
3114 DPRINTK("ENTER\n");
3115 /* alloc a container for our list of ATA ports (buses) */
3116 host_set = kmalloc(sizeof(struct ata_host_set) +
3117 (ent->n_ports * sizeof(void *)), GFP_KERNEL);
3118 if (!host_set)
3119 return 0;
3120 memset(host_set, 0, sizeof(struct ata_host_set) + (ent->n_ports * sizeof(void *)));
3121 spin_lock_init(&host_set->lock);
3123 host_set->pdev = pdev;
3124 host_set->n_ports = ent->n_ports;
3125 host_set->irq = ent->irq;
3126 host_set->mmio_base = ent->mmio_base;
3127 host_set->private_data = ent->private_data;
3128 host_set->ops = ent->port_ops;
3130 /* register each port bound to this device */
3131 for (i = 0; i < ent->n_ports; i++) {
3132 struct ata_port *ap;
3133 unsigned long xfer_mode_mask;
3135 ap = ata_host_add(ent, host_set, i);
3136 if (!ap)
3137 goto err_out;
3139 host_set->ports[i] = ap;
3140 xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) |
3141 (ap->mwdma_mask << ATA_SHIFT_MWDMA) |
3142 (ap->pio_mask << ATA_SHIFT_PIO);
3144 /* print per-port info to dmesg */
3145 printk(KERN_INFO "ata%u: %cATA max %s cmd 0x%lX ctl 0x%lX "
3146 "bmdma 0x%lX irq %lu\n",
3147 ap->id,
3148 ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
3149 ata_mode_string(xfer_mode_mask),
3150 ap->ioaddr.cmd_addr,
3151 ap->ioaddr.ctl_addr,
3152 ap->ioaddr.bmdma_addr,
3153 ent->irq);
3155 ata_chk_status(ap);
3156 host_set->ops->irq_clear(ap);
3157 count++;
3160 if (!count) {
3161 kfree(host_set);
3162 return 0;
3165 /* obtain irq, that is shared between channels */
3166 if (request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags,
3167 DRV_NAME, host_set))
3168 goto err_out;
3170 /* perform each probe synchronously */
3171 DPRINTK("probe begin\n");
3172 for (i = 0; i < count; i++) {
3173 struct ata_port *ap;
3174 int rc;
3176 ap = host_set->ports[i];
3178 DPRINTK("ata%u: probe begin\n", ap->id);
3179 rc = ata_bus_probe(ap);
3180 DPRINTK("ata%u: probe end\n", ap->id);
3182 if (rc) {
3183 /* FIXME: do something useful here?
3184 * Current libata behavior will
3185 * tear down everything when
3186 * the module is removed
3187 * or the h/w is unplugged.
3191 rc = scsi_add_host(ap->host, &pdev->dev);
3192 if (rc) {
3193 printk(KERN_ERR "ata%u: scsi_add_host failed\n",
3194 ap->id);
3195 /* FIXME: do something useful here */
3196 /* FIXME: handle unconditional calls to
3197 * scsi_scan_host and ata_host_remove, below,
3198 * at the very least
3203 /* probes are done, now scan each port's disk(s) */
3204 DPRINTK("probe begin\n");
3205 for (i = 0; i < count; i++) {
3206 struct ata_port *ap = host_set->ports[i];
3208 scsi_scan_host(ap->host);
3211 pci_set_drvdata(pdev, host_set);
3213 VPRINTK("EXIT, returning %u\n", ent->n_ports);
3214 return ent->n_ports; /* success */
3216 err_out:
3217 for (i = 0; i < count; i++) {
3218 ata_host_remove(host_set->ports[i], 1);
3219 scsi_host_put(host_set->ports[i]->host);
3221 kfree(host_set);
3222 VPRINTK("EXIT, returning 0\n");
3223 return 0;
3227 * ata_scsi_release - SCSI layer callback hook for host unload
3228 * @host: libata host to be unloaded
3230 * Performs all duties necessary to shut down a libata port...
3231 * Kill port kthread, disable port, and release resources.
3233 * LOCKING:
3234 * Inherited from SCSI layer.
3236 * RETURNS:
3237 * One.
3240 int ata_scsi_release(struct Scsi_Host *host)
3242 struct ata_port *ap = (struct ata_port *) &host->hostdata[0];
3244 DPRINTK("ENTER\n");
3246 ap->ops->port_disable(ap);
3247 ata_host_remove(ap, 0);
3249 DPRINTK("EXIT\n");
3250 return 1;
3254 * ata_std_ports - initialize ioaddr with standard port offsets.
3255 * @ioaddr: IO address structure to be initialized
3257 void ata_std_ports(struct ata_ioports *ioaddr)
3259 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
3260 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
3261 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
3262 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
3263 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
3264 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
3265 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
3266 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
3267 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
3268 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
3271 static struct ata_probe_ent *
3272 ata_probe_ent_alloc(int n, struct pci_dev *pdev, struct ata_port_info **port)
3274 struct ata_probe_ent *probe_ent;
3275 int i;
3277 probe_ent = kmalloc(sizeof(*probe_ent) * n, GFP_KERNEL);
3278 if (!probe_ent) {
3279 printk(KERN_ERR DRV_NAME "(%s): out of memory\n",
3280 pci_name(pdev));
3281 return NULL;
3284 memset(probe_ent, 0, sizeof(*probe_ent) * n);
3286 for (i = 0; i < n; i++) {
3287 INIT_LIST_HEAD(&probe_ent[i].node);
3288 probe_ent[i].pdev = pdev;
3290 probe_ent[i].sht = port[i]->sht;
3291 probe_ent[i].host_flags = port[i]->host_flags;
3292 probe_ent[i].pio_mask = port[i]->pio_mask;
3293 probe_ent[i].mwdma_mask = port[i]->mwdma_mask;
3294 probe_ent[i].udma_mask = port[i]->udma_mask;
3295 probe_ent[i].port_ops = port[i]->port_ops;
3299 return probe_ent;
3302 struct ata_probe_ent *
3303 ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port)
3305 struct ata_probe_ent *probe_ent = ata_probe_ent_alloc(1, pdev, port);
3306 if (!probe_ent)
3307 return NULL;
3309 probe_ent->n_ports = 2;
3310 probe_ent->irq = pdev->irq;
3311 probe_ent->irq_flags = SA_SHIRQ;
3313 probe_ent->port[0].cmd_addr = pci_resource_start(pdev, 0);
3314 probe_ent->port[0].altstatus_addr =
3315 probe_ent->port[0].ctl_addr =
3316 pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS;
3317 probe_ent->port[0].bmdma_addr = pci_resource_start(pdev, 4);
3319 probe_ent->port[1].cmd_addr = pci_resource_start(pdev, 2);
3320 probe_ent->port[1].altstatus_addr =
3321 probe_ent->port[1].ctl_addr =
3322 pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS;
3323 probe_ent->port[1].bmdma_addr = pci_resource_start(pdev, 4) + 8;
3325 ata_std_ports(&probe_ent->port[0]);
3326 ata_std_ports(&probe_ent->port[1]);
3328 return probe_ent;
3331 struct ata_probe_ent *
3332 ata_pci_init_legacy_mode(struct pci_dev *pdev, struct ata_port_info **port)
3334 struct ata_probe_ent *probe_ent = ata_probe_ent_alloc(2, pdev, port);
3335 if (!probe_ent)
3336 return NULL;
3338 probe_ent[0].n_ports = 1;
3339 probe_ent[0].irq = 14;
3341 probe_ent[1].n_ports = 1;
3342 probe_ent[1].irq = 15;
3344 probe_ent[0].port[0].cmd_addr = 0x1f0;
3345 probe_ent[0].port[0].altstatus_addr =
3346 probe_ent[0].port[0].ctl_addr = 0x3f6;
3347 probe_ent[0].port[0].bmdma_addr = pci_resource_start(pdev, 4);
3349 probe_ent[1].port[0].cmd_addr = 0x170;
3350 probe_ent[1].port[0].altstatus_addr =
3351 probe_ent[1].port[0].ctl_addr = 0x376;
3352 probe_ent[1].port[0].bmdma_addr = pci_resource_start(pdev, 4)+8;
3354 ata_std_ports(&probe_ent[0].port[0]);
3355 ata_std_ports(&probe_ent[1].port[0]);
3357 return probe_ent;
3361 * ata_pci_init_one - Initialize/register PCI IDE host controller
3362 * @pdev: Controller to be initialized
3363 * @port_info: Information from low-level host driver
3364 * @n_ports: Number of ports attached to host controller
3366 * LOCKING:
3367 * Inherited from PCI layer (may sleep).
3369 * RETURNS:
3373 int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
3374 unsigned int n_ports)
3376 struct ata_probe_ent *probe_ent, *probe_ent2 = NULL;
3377 struct ata_port_info *port[2];
3378 u8 tmp8, mask;
3379 unsigned int legacy_mode = 0;
3380 int rc;
3382 DPRINTK("ENTER\n");
3384 port[0] = port_info[0];
3385 if (n_ports > 1)
3386 port[1] = port_info[1];
3387 else
3388 port[1] = port[0];
3390 if ((port[0]->host_flags & ATA_FLAG_NO_LEGACY) == 0) {
3391 /* TODO: support transitioning to native mode? */
3392 pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
3393 mask = (1 << 2) | (1 << 0);
3394 if ((tmp8 & mask) != mask)
3395 legacy_mode = (1 << 3);
3398 /* FIXME... */
3399 if ((!legacy_mode) && (n_ports > 1)) {
3400 printk(KERN_ERR "ata: BUG: native mode, n_ports > 1\n");
3401 return -EINVAL;
3404 rc = pci_enable_device(pdev);
3405 if (rc)
3406 return rc;
3408 rc = pci_request_regions(pdev, DRV_NAME);
3409 if (rc)
3410 goto err_out;
3412 if (legacy_mode) {
3413 if (!request_region(0x1f0, 8, "libata")) {
3414 struct resource *conflict, res;
3415 res.start = 0x1f0;
3416 res.end = 0x1f0 + 8 - 1;
3417 conflict = ____request_resource(&ioport_resource, &res);
3418 if (!strcmp(conflict->name, "libata"))
3419 legacy_mode |= (1 << 0);
3420 else
3421 printk(KERN_WARNING "ata: 0x1f0 IDE port busy\n");
3422 } else
3423 legacy_mode |= (1 << 0);
3425 if (!request_region(0x170, 8, "libata")) {
3426 struct resource *conflict, res;
3427 res.start = 0x170;
3428 res.end = 0x170 + 8 - 1;
3429 conflict = ____request_resource(&ioport_resource, &res);
3430 if (!strcmp(conflict->name, "libata"))
3431 legacy_mode |= (1 << 1);
3432 else
3433 printk(KERN_WARNING "ata: 0x170 IDE port busy\n");
3434 } else
3435 legacy_mode |= (1 << 1);
3438 /* we have legacy mode, but all ports are unavailable */
3439 if (legacy_mode == (1 << 3)) {
3440 rc = -EBUSY;
3441 goto err_out_regions;
3444 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
3445 if (rc)
3446 goto err_out_regions;
3447 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
3448 if (rc)
3449 goto err_out_regions;
3451 if (legacy_mode) {
3452 probe_ent = ata_pci_init_legacy_mode(pdev, port);
3453 if (probe_ent)
3454 probe_ent2 = &probe_ent[1];
3455 } else
3456 probe_ent = ata_pci_init_native_mode(pdev, port);
3457 if (!probe_ent) {
3458 rc = -ENOMEM;
3459 goto err_out_regions;
3462 pci_set_master(pdev);
3464 /* FIXME: check ata_device_add return */
3465 if (legacy_mode) {
3466 if (legacy_mode & (1 << 0))
3467 ata_device_add(probe_ent);
3468 if (legacy_mode & (1 << 1))
3469 ata_device_add(probe_ent2);
3470 } else {
3471 ata_device_add(probe_ent);
3473 kfree(probe_ent);
3475 return 0;
3477 err_out_regions:
3478 if (legacy_mode & (1 << 0))
3479 release_region(0x1f0, 8);
3480 if (legacy_mode & (1 << 1))
3481 release_region(0x170, 8);
3482 pci_release_regions(pdev);
3483 err_out:
3484 pci_disable_device(pdev);
3485 return rc;
3489 * ata_pci_remove_one - PCI layer callback for device removal
3490 * @pdev: PCI device that was removed
3492 * PCI layer indicates to libata via this hook that
3493 * hot-unplug or module unload event has occured.
3494 * Handle this by unregistering all objects associated
3495 * with this PCI device. Free those objects. Then finally
3496 * release PCI resources and disable device.
3498 * LOCKING:
3499 * Inherited from PCI layer (may sleep).
3502 void ata_pci_remove_one (struct pci_dev *pdev)
3504 struct ata_host_set *host_set = pci_get_drvdata(pdev);
3505 struct ata_port *ap;
3506 unsigned int i;
3508 for (i = 0; i < host_set->n_ports; i++) {
3509 ap = host_set->ports[i];
3511 scsi_remove_host(ap->host);
3514 free_irq(host_set->irq, host_set);
3515 if (host_set->ops->host_stop)
3516 host_set->ops->host_stop(host_set);
3517 if (host_set->mmio_base)
3518 iounmap(host_set->mmio_base);
3520 for (i = 0; i < host_set->n_ports; i++) {
3521 ap = host_set->ports[i];
3523 ata_scsi_release(ap->host);
3524 scsi_host_put(ap->host);
3527 pci_release_regions(pdev);
3529 for (i = 0; i < host_set->n_ports; i++) {
3530 struct ata_ioports *ioaddr;
3532 ap = host_set->ports[i];
3533 ioaddr = &ap->ioaddr;
3535 if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) {
3536 if (ioaddr->cmd_addr == 0x1f0)
3537 release_region(0x1f0, 8);
3538 else if (ioaddr->cmd_addr == 0x170)
3539 release_region(0x170, 8);
3543 kfree(host_set);
3544 pci_disable_device(pdev);
3545 pci_set_drvdata(pdev, NULL);
3548 /* move to PCI subsystem */
3549 int pci_test_config_bits(struct pci_dev *pdev, struct pci_bits *bits)
3551 unsigned long tmp = 0;
3553 switch (bits->width) {
3554 case 1: {
3555 u8 tmp8 = 0;
3556 pci_read_config_byte(pdev, bits->reg, &tmp8);
3557 tmp = tmp8;
3558 break;
3560 case 2: {
3561 u16 tmp16 = 0;
3562 pci_read_config_word(pdev, bits->reg, &tmp16);
3563 tmp = tmp16;
3564 break;
3566 case 4: {
3567 u32 tmp32 = 0;
3568 pci_read_config_dword(pdev, bits->reg, &tmp32);
3569 tmp = tmp32;
3570 break;
3573 default:
3574 return -EINVAL;
3577 tmp &= bits->mask;
3579 return (tmp == bits->val) ? 1 : 0;
3584 * ata_init -
3586 * LOCKING:
3588 * RETURNS:
3592 static int __init ata_init(void)
3594 ata_wq = create_workqueue("ata");
3595 if (!ata_wq)
3596 return -ENOMEM;
3598 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
3599 return 0;
3602 static void __exit ata_exit(void)
3604 destroy_workqueue(ata_wq);
3607 module_init(ata_init);
3608 module_exit(ata_exit);
3611 * libata is essentially a library of internal helper functions for
3612 * low-level ATA host controller drivers. As such, the API/ABI is
3613 * likely to change as new drivers are added and updated.
3614 * Do not depend on ABI/API stability.
3617 EXPORT_SYMBOL_GPL(pci_test_config_bits);
3618 EXPORT_SYMBOL_GPL(ata_std_bios_param);
3619 EXPORT_SYMBOL_GPL(ata_std_ports);
3620 EXPORT_SYMBOL_GPL(ata_device_add);
3621 EXPORT_SYMBOL_GPL(ata_sg_init);
3622 EXPORT_SYMBOL_GPL(ata_sg_init_one);
3623 EXPORT_SYMBOL_GPL(ata_qc_complete);
3624 EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
3625 EXPORT_SYMBOL_GPL(ata_eng_timeout);
3626 EXPORT_SYMBOL_GPL(ata_tf_load);
3627 EXPORT_SYMBOL_GPL(ata_tf_read);
3628 EXPORT_SYMBOL_GPL(ata_noop_dev_select);
3629 EXPORT_SYMBOL_GPL(ata_std_dev_select);
3630 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
3631 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
3632 EXPORT_SYMBOL_GPL(ata_pci_init_legacy_mode);
3633 EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
3634 EXPORT_SYMBOL_GPL(ata_check_status);
3635 EXPORT_SYMBOL_GPL(ata_exec_command);
3636 EXPORT_SYMBOL_GPL(ata_port_start);
3637 EXPORT_SYMBOL_GPL(ata_port_stop);
3638 EXPORT_SYMBOL_GPL(ata_interrupt);
3639 EXPORT_SYMBOL_GPL(ata_qc_prep);
3640 EXPORT_SYMBOL_GPL(ata_bmdma_setup);
3641 EXPORT_SYMBOL_GPL(ata_bmdma_start);
3642 EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
3643 EXPORT_SYMBOL_GPL(ata_port_probe);
3644 EXPORT_SYMBOL_GPL(sata_phy_reset);
3645 EXPORT_SYMBOL_GPL(__sata_phy_reset);
3646 EXPORT_SYMBOL_GPL(ata_bus_reset);
3647 EXPORT_SYMBOL_GPL(ata_port_disable);
3648 EXPORT_SYMBOL_GPL(ata_pci_init_one);
3649 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
3650 EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
3651 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
3652 EXPORT_SYMBOL_GPL(ata_scsi_error);
3653 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
3654 EXPORT_SYMBOL_GPL(ata_scsi_release);
3655 EXPORT_SYMBOL_GPL(ata_host_intr);
3656 EXPORT_SYMBOL_GPL(ata_dev_classify);
3657 EXPORT_SYMBOL_GPL(ata_dev_id_string);