2 * pata_atiixp.c - ATI PATA for new ATA layer
4 * (C) 2009-2010 Bartlomiej Zolnierkiewicz
8 * linux/drivers/ide/pci/atiixp.c Version 0.01-bart2 Feb. 26, 2004
10 * Copyright (C) 2003 ATI Inc. <hyu@ati.com>
11 * Copyright (C) 2004 Bartlomiej Zolnierkiewicz
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/pci.h>
18 #include <linux/init.h>
19 #include <linux/blkdev.h>
20 #include <linux/delay.h>
21 #include <scsi/scsi_host.h>
22 #include <linux/libata.h>
24 #define DRV_NAME "pata_atiixp"
25 #define DRV_VERSION "0.4.6"
28 ATIIXP_IDE_PIO_TIMING
= 0x40,
29 ATIIXP_IDE_MWDMA_TIMING
= 0x44,
30 ATIIXP_IDE_PIO_CONTROL
= 0x48,
31 ATIIXP_IDE_PIO_MODE
= 0x4a,
32 ATIIXP_IDE_UDMA_CONTROL
= 0x54,
33 ATIIXP_IDE_UDMA_MODE
= 0x56
36 static int atiixp_cable_detect(struct ata_port
*ap
)
38 struct pci_dev
*pdev
= to_pci_dev(ap
->host
->dev
);
41 /* Hack from drivers/ide/pci. Really we want to know how to do the
42 raw detection not play follow the bios mode guess */
43 pci_read_config_byte(pdev
, ATIIXP_IDE_UDMA_MODE
+ ap
->port_no
, &udma
);
44 if ((udma
& 0x07) >= 0x04 || (udma
& 0x70) >= 0x40)
45 return ATA_CBL_PATA80
;
46 return ATA_CBL_PATA40
;
49 static DEFINE_SPINLOCK(atiixp_lock
);
52 * atiixp_prereset - perform reset handling
54 * @deadline: deadline jiffies for the operation
56 * Reset sequence checking enable bits to see which ports are
60 static int atiixp_prereset(struct ata_link
*link
, unsigned long deadline
)
62 static const struct pci_bits atiixp_enable_bits
[] = {
63 { 0x48, 1, 0x01, 0x00 },
64 { 0x48, 1, 0x08, 0x00 }
67 struct ata_port
*ap
= link
->ap
;
68 struct pci_dev
*pdev
= to_pci_dev(ap
->host
->dev
);
70 if (!pci_test_config_bits(pdev
, &atiixp_enable_bits
[ap
->port_no
]))
73 return ata_sff_prereset(link
, deadline
);
77 * atiixp_set_pio_timing - set initial PIO mode data
81 * Called by both the pio and dma setup functions to set the controller
82 * timings for PIO transfers. We must load both the mode number and
83 * timing values into the controller.
86 static void atiixp_set_pio_timing(struct ata_port
*ap
, struct ata_device
*adev
, int pio
)
88 static u8 pio_timings
[5] = { 0x5D, 0x47, 0x34, 0x22, 0x20 };
90 struct pci_dev
*pdev
= to_pci_dev(ap
->host
->dev
);
91 int dn
= 2 * ap
->port_no
+ adev
->devno
;
92 int timing_shift
= (16 * ap
->port_no
) + 8 * (adev
->devno
^ 1);
96 pci_read_config_word(pdev
, ATIIXP_IDE_PIO_MODE
, &pio_mode_data
);
97 pio_mode_data
&= ~(0x7 << (4 * dn
));
98 pio_mode_data
|= pio
<< (4 * dn
);
99 pci_write_config_word(pdev
, ATIIXP_IDE_PIO_MODE
, pio_mode_data
);
101 pci_read_config_dword(pdev
, ATIIXP_IDE_PIO_TIMING
, &pio_timing_data
);
102 pio_timing_data
&= ~(0xFF << timing_shift
);
103 pio_timing_data
|= (pio_timings
[pio
] << timing_shift
);
104 pci_write_config_dword(pdev
, ATIIXP_IDE_PIO_TIMING
, pio_timing_data
);
108 * atiixp_set_piomode - set initial PIO mode data
112 * Called to do the PIO mode setup. We use a shared helper for this
113 * as the DMA setup must also adjust the PIO timing information.
116 static void atiixp_set_piomode(struct ata_port
*ap
, struct ata_device
*adev
)
119 spin_lock_irqsave(&atiixp_lock
, flags
);
120 atiixp_set_pio_timing(ap
, adev
, adev
->pio_mode
- XFER_PIO_0
);
121 spin_unlock_irqrestore(&atiixp_lock
, flags
);
125 * atiixp_set_dmamode - set initial DMA mode data
129 * Called to do the DMA mode setup. We use timing tables for most
130 * modes but must tune an appropriate PIO mode to match.
133 static void atiixp_set_dmamode(struct ata_port
*ap
, struct ata_device
*adev
)
135 static u8 mwdma_timings
[5] = { 0x77, 0x21, 0x20 };
137 struct pci_dev
*pdev
= to_pci_dev(ap
->host
->dev
);
138 int dma
= adev
->dma_mode
;
139 int dn
= 2 * ap
->port_no
+ adev
->devno
;
143 spin_lock_irqsave(&atiixp_lock
, flags
);
145 if (adev
->dma_mode
>= XFER_UDMA_0
) {
150 pci_read_config_word(pdev
, ATIIXP_IDE_UDMA_MODE
, &udma_mode_data
);
151 udma_mode_data
&= ~(0x7 << (4 * dn
));
152 udma_mode_data
|= dma
<< (4 * dn
);
153 pci_write_config_word(pdev
, ATIIXP_IDE_UDMA_MODE
, udma_mode_data
);
155 int timing_shift
= (16 * ap
->port_no
) + 8 * (adev
->devno
^ 1);
156 u32 mwdma_timing_data
;
158 dma
-= XFER_MW_DMA_0
;
160 pci_read_config_dword(pdev
, ATIIXP_IDE_MWDMA_TIMING
,
162 mwdma_timing_data
&= ~(0xFF << timing_shift
);
163 mwdma_timing_data
|= (mwdma_timings
[dma
] << timing_shift
);
164 pci_write_config_dword(pdev
, ATIIXP_IDE_MWDMA_TIMING
,
168 * We must now look at the PIO mode situation. We may need to
169 * adjust the PIO mode to keep the timings acceptable
171 if (adev
->dma_mode
>= XFER_MW_DMA_2
)
173 else if (adev
->dma_mode
== XFER_MW_DMA_1
)
175 else if (adev
->dma_mode
== XFER_MW_DMA_0
)
179 if (adev
->pio_mode
!= wanted_pio
)
180 atiixp_set_pio_timing(ap
, adev
, wanted_pio
);
181 spin_unlock_irqrestore(&atiixp_lock
, flags
);
185 * atiixp_bmdma_start - DMA start callback
186 * @qc: Command in progress
188 * When DMA begins we need to ensure that the UDMA control
189 * register for the channel is correctly set.
191 * Note: The host lock held by the libata layer protects
192 * us from two channels both trying to set DMA bits at once
195 static void atiixp_bmdma_start(struct ata_queued_cmd
*qc
)
197 struct ata_port
*ap
= qc
->ap
;
198 struct ata_device
*adev
= qc
->dev
;
200 struct pci_dev
*pdev
= to_pci_dev(ap
->host
->dev
);
201 int dn
= (2 * ap
->port_no
) + adev
->devno
;
204 pci_read_config_word(pdev
, ATIIXP_IDE_UDMA_CONTROL
, &tmp16
);
205 if (ata_using_udma(adev
))
209 pci_write_config_word(pdev
, ATIIXP_IDE_UDMA_CONTROL
, tmp16
);
214 * atiixp_dma_stop - DMA stop callback
215 * @qc: Command in progress
217 * DMA has completed. Clear the UDMA flag as the next operations will
218 * be PIO ones not UDMA data transfer.
220 * Note: The host lock held by the libata layer protects
221 * us from two channels both trying to set DMA bits at once
224 static void atiixp_bmdma_stop(struct ata_queued_cmd
*qc
)
226 struct ata_port
*ap
= qc
->ap
;
227 struct pci_dev
*pdev
= to_pci_dev(ap
->host
->dev
);
228 int dn
= (2 * ap
->port_no
) + qc
->dev
->devno
;
231 pci_read_config_word(pdev
, ATIIXP_IDE_UDMA_CONTROL
, &tmp16
);
233 pci_write_config_word(pdev
, ATIIXP_IDE_UDMA_CONTROL
, tmp16
);
237 static struct scsi_host_template atiixp_sht
= {
238 ATA_BMDMA_SHT(DRV_NAME
),
239 .sg_tablesize
= LIBATA_DUMB_MAX_PRD
,
242 static struct ata_port_operations atiixp_port_ops
= {
243 .inherits
= &ata_bmdma_port_ops
,
245 .qc_prep
= ata_bmdma_dumb_qc_prep
,
246 .bmdma_start
= atiixp_bmdma_start
,
247 .bmdma_stop
= atiixp_bmdma_stop
,
249 .prereset
= atiixp_prereset
,
250 .cable_detect
= atiixp_cable_detect
,
251 .set_piomode
= atiixp_set_piomode
,
252 .set_dmamode
= atiixp_set_dmamode
,
255 static int atiixp_init_one(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
257 static const struct ata_port_info info
= {
258 .flags
= ATA_FLAG_SLAVE_POSS
,
259 .pio_mask
= ATA_PIO4
,
260 .mwdma_mask
= ATA_MWDMA12_ONLY
,
261 .udma_mask
= ATA_UDMA5
,
262 .port_ops
= &atiixp_port_ops
264 const struct ata_port_info
*ppi
[] = { &info
, &info
};
266 return ata_pci_bmdma_init_one(pdev
, ppi
, &atiixp_sht
, NULL
,
267 ATA_HOST_PARALLEL_SCAN
);
270 static const struct pci_device_id atiixp
[] = {
271 { PCI_VDEVICE(ATI
, PCI_DEVICE_ID_ATI_IXP200_IDE
), },
272 { PCI_VDEVICE(ATI
, PCI_DEVICE_ID_ATI_IXP300_IDE
), },
273 { PCI_VDEVICE(ATI
, PCI_DEVICE_ID_ATI_IXP400_IDE
), },
274 { PCI_VDEVICE(ATI
, PCI_DEVICE_ID_ATI_IXP600_IDE
), },
275 { PCI_VDEVICE(ATI
, PCI_DEVICE_ID_ATI_IXP700_IDE
), },
276 { PCI_VDEVICE(AMD
, PCI_DEVICE_ID_AMD_HUDSON2_IDE
), },
281 static struct pci_driver atiixp_pci_driver
= {
284 .probe
= atiixp_init_one
,
285 .remove
= ata_pci_remove_one
,
287 .resume
= ata_pci_device_resume
,
288 .suspend
= ata_pci_device_suspend
,
292 static int __init
atiixp_init(void)
294 return pci_register_driver(&atiixp_pci_driver
);
298 static void __exit
atiixp_exit(void)
300 pci_unregister_driver(&atiixp_pci_driver
);
303 MODULE_AUTHOR("Alan Cox");
304 MODULE_DESCRIPTION("low-level driver for ATI IXP200/300/400");
305 MODULE_LICENSE("GPL");
306 MODULE_DEVICE_TABLE(pci
, atiixp
);
307 MODULE_VERSION(DRV_VERSION
);
309 module_init(atiixp_init
);
310 module_exit(atiixp_exit
);