2 * sata_via.c - VIA Serial ATA controllers
4 * Maintained by: Tejun Heo <tj@kernel.org>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/driver-api/libata.rst
30 * Hardware documentation available under NDA.
36 #include <linux/kernel.h>
37 #include <linux/module.h>
38 #include <linux/pci.h>
39 #include <linux/blkdev.h>
40 #include <linux/delay.h>
41 #include <linux/device.h>
42 #include <scsi/scsi.h>
43 #include <scsi/scsi_cmnd.h>
44 #include <scsi/scsi_host.h>
45 #include <linux/libata.h>
47 #define DRV_NAME "sata_via"
48 #define DRV_VERSION "2.6"
51 * vt8251 is different from other sata controllers of VIA. It has two
52 * channels, each channel has both Master and Slave slot.
61 SATA_CHAN_ENAB
= 0x40, /* SATA channel enable */
62 SATA_INT_GATE
= 0x41, /* SATA interrupt gating */
63 SATA_NATIVE_MODE
= 0x42, /* Native mode enable */
64 SVIA_MISC_3
= 0x46, /* Miscellaneous Control III */
65 PATA_UDMA_TIMING
= 0xB3, /* PATA timing for DMA/ cable detect */
66 PATA_PIO_TIMING
= 0xAB, /* PATA timing register */
70 ALL_PORTS
= PORT0
| PORT1
,
72 NATIVE_MODE_ALL
= (1 << 7) | (1 << 6) | (1 << 5) | (1 << 4),
74 SATA_EXT_PHY
= (1 << 6), /* 0==use PATA, 1==ext phy */
76 SATA_HOTPLUG
= (1 << 5), /* enable IRQ on hotplug */
83 static int vt6420_hotplug
;
84 module_param_named(vt6420_hotplug
, vt6420_hotplug
, int, 0644);
85 MODULE_PARM_DESC(vt6420_hotplug
, "Enable hot-plug support for VT6420 (0=Don't support, 1=support)");
87 static int svia_init_one(struct pci_dev
*pdev
, const struct pci_device_id
*ent
);
88 #ifdef CONFIG_PM_SLEEP
89 static int svia_pci_device_resume(struct pci_dev
*pdev
);
91 static int svia_scr_read(struct ata_link
*link
, unsigned int sc_reg
, u32
*val
);
92 static int svia_scr_write(struct ata_link
*link
, unsigned int sc_reg
, u32 val
);
93 static int vt8251_scr_read(struct ata_link
*link
, unsigned int scr
, u32
*val
);
94 static int vt8251_scr_write(struct ata_link
*link
, unsigned int scr
, u32 val
);
95 static void svia_tf_load(struct ata_port
*ap
, const struct ata_taskfile
*tf
);
96 static void svia_noop_freeze(struct ata_port
*ap
);
97 static int vt6420_prereset(struct ata_link
*link
, unsigned long deadline
);
98 static void vt6420_bmdma_start(struct ata_queued_cmd
*qc
);
99 static int vt6421_pata_cable_detect(struct ata_port
*ap
);
100 static void vt6421_set_pio_mode(struct ata_port
*ap
, struct ata_device
*adev
);
101 static void vt6421_set_dma_mode(struct ata_port
*ap
, struct ata_device
*adev
);
102 static void vt6421_error_handler(struct ata_port
*ap
);
104 static const struct pci_device_id svia_pci_tbl
[] = {
105 { PCI_VDEVICE(VIA
, 0x5337), vt6420
},
106 { PCI_VDEVICE(VIA
, 0x0591), vt6420
}, /* 2 sata chnls (Master) */
107 { PCI_VDEVICE(VIA
, 0x3149), vt6420
}, /* 2 sata chnls (Master) */
108 { PCI_VDEVICE(VIA
, 0x3249), vt6421
}, /* 2 sata chnls, 1 pata chnl */
109 { PCI_VDEVICE(VIA
, 0x5372), vt6420
},
110 { PCI_VDEVICE(VIA
, 0x7372), vt6420
},
111 { PCI_VDEVICE(VIA
, 0x5287), vt8251
}, /* 2 sata chnls (Master/Slave) */
112 { PCI_VDEVICE(VIA
, 0x9000), vt8251
},
114 { } /* terminate list */
117 static struct pci_driver svia_pci_driver
= {
119 .id_table
= svia_pci_tbl
,
120 .probe
= svia_init_one
,
121 #ifdef CONFIG_PM_SLEEP
122 .suspend
= ata_pci_device_suspend
,
123 .resume
= svia_pci_device_resume
,
125 .remove
= ata_pci_remove_one
,
128 static struct scsi_host_template svia_sht
= {
129 ATA_BMDMA_SHT(DRV_NAME
),
132 static struct ata_port_operations svia_base_ops
= {
133 .inherits
= &ata_bmdma_port_ops
,
134 .sff_tf_load
= svia_tf_load
,
137 static struct ata_port_operations vt6420_sata_ops
= {
138 .inherits
= &svia_base_ops
,
139 .freeze
= svia_noop_freeze
,
140 .prereset
= vt6420_prereset
,
141 .bmdma_start
= vt6420_bmdma_start
,
144 static struct ata_port_operations vt6421_pata_ops
= {
145 .inherits
= &svia_base_ops
,
146 .cable_detect
= vt6421_pata_cable_detect
,
147 .set_piomode
= vt6421_set_pio_mode
,
148 .set_dmamode
= vt6421_set_dma_mode
,
151 static struct ata_port_operations vt6421_sata_ops
= {
152 .inherits
= &svia_base_ops
,
153 .scr_read
= svia_scr_read
,
154 .scr_write
= svia_scr_write
,
155 .error_handler
= vt6421_error_handler
,
158 static struct ata_port_operations vt8251_ops
= {
159 .inherits
= &svia_base_ops
,
160 .hardreset
= sata_std_hardreset
,
161 .scr_read
= vt8251_scr_read
,
162 .scr_write
= vt8251_scr_write
,
165 static const struct ata_port_info vt6420_port_info
= {
166 .flags
= ATA_FLAG_SATA
,
167 .pio_mask
= ATA_PIO4
,
168 .mwdma_mask
= ATA_MWDMA2
,
169 .udma_mask
= ATA_UDMA6
,
170 .port_ops
= &vt6420_sata_ops
,
173 static const struct ata_port_info vt6421_sport_info
= {
174 .flags
= ATA_FLAG_SATA
,
175 .pio_mask
= ATA_PIO4
,
176 .mwdma_mask
= ATA_MWDMA2
,
177 .udma_mask
= ATA_UDMA6
,
178 .port_ops
= &vt6421_sata_ops
,
181 static const struct ata_port_info vt6421_pport_info
= {
182 .flags
= ATA_FLAG_SLAVE_POSS
,
183 .pio_mask
= ATA_PIO4
,
185 .udma_mask
= ATA_UDMA6
,
186 .port_ops
= &vt6421_pata_ops
,
189 static const struct ata_port_info vt8251_port_info
= {
190 .flags
= ATA_FLAG_SATA
| ATA_FLAG_SLAVE_POSS
,
191 .pio_mask
= ATA_PIO4
,
192 .mwdma_mask
= ATA_MWDMA2
,
193 .udma_mask
= ATA_UDMA6
,
194 .port_ops
= &vt8251_ops
,
197 MODULE_AUTHOR("Jeff Garzik");
198 MODULE_DESCRIPTION("SCSI low-level driver for VIA SATA controllers");
199 MODULE_LICENSE("GPL");
200 MODULE_DEVICE_TABLE(pci
, svia_pci_tbl
);
201 MODULE_VERSION(DRV_VERSION
);
203 static int svia_scr_read(struct ata_link
*link
, unsigned int sc_reg
, u32
*val
)
205 if (sc_reg
> SCR_CONTROL
)
207 *val
= ioread32(link
->ap
->ioaddr
.scr_addr
+ (4 * sc_reg
));
211 static int svia_scr_write(struct ata_link
*link
, unsigned int sc_reg
, u32 val
)
213 if (sc_reg
> SCR_CONTROL
)
215 iowrite32(val
, link
->ap
->ioaddr
.scr_addr
+ (4 * sc_reg
));
219 static int vt8251_scr_read(struct ata_link
*link
, unsigned int scr
, u32
*val
)
221 static const u8 ipm_tbl
[] = { 1, 2, 6, 0 };
222 struct pci_dev
*pdev
= to_pci_dev(link
->ap
->host
->dev
);
223 int slot
= 2 * link
->ap
->port_no
+ link
->pmp
;
229 pci_read_config_byte(pdev
, 0xA0 + slot
, &raw
);
231 /* read the DET field, bit0 and 1 of the config byte */
234 /* read the SPD field, bit4 of the configure byte */
240 /* read the IPM field, bit2 and 3 of the config byte */
241 v
|= ipm_tbl
[(raw
>> 2) & 0x3];
245 /* devices other than 5287 uses 0xA8 as base */
246 WARN_ON(pdev
->device
!= 0x5287);
247 pci_read_config_dword(pdev
, 0xB0 + slot
* 4, &v
);
251 pci_read_config_byte(pdev
, 0xA4 + slot
, &raw
);
253 /* read the DET field, bit0 and bit1 */
254 v
|= ((raw
& 0x02) << 1) | (raw
& 0x01);
256 /* read the IPM field, bit2 and bit3 */
257 v
|= ((raw
>> 2) & 0x03) << 8;
268 static int vt8251_scr_write(struct ata_link
*link
, unsigned int scr
, u32 val
)
270 struct pci_dev
*pdev
= to_pci_dev(link
->ap
->host
->dev
);
271 int slot
= 2 * link
->ap
->port_no
+ link
->pmp
;
276 /* devices other than 5287 uses 0xA8 as base */
277 WARN_ON(pdev
->device
!= 0x5287);
278 pci_write_config_dword(pdev
, 0xB0 + slot
* 4, val
);
282 /* set the DET field */
283 v
|= ((val
& 0x4) >> 1) | (val
& 0x1);
285 /* set the IPM field */
286 v
|= ((val
>> 8) & 0x3) << 2;
288 pci_write_config_byte(pdev
, 0xA4 + slot
, v
);
297 * svia_tf_load - send taskfile registers to host controller
298 * @ap: Port to which output is sent
299 * @tf: ATA taskfile register set
301 * Outputs ATA taskfile to standard ATA host controller.
303 * This is to fix the internal bug of via chipsets, which will
304 * reset the device register after changing the IEN bit on ctl
307 static void svia_tf_load(struct ata_port
*ap
, const struct ata_taskfile
*tf
)
309 struct ata_taskfile ttf
;
311 if (tf
->ctl
!= ap
->last_ctl
) {
313 ttf
.flags
|= ATA_TFLAG_DEVICE
;
316 ata_sff_tf_load(ap
, tf
);
319 static void svia_noop_freeze(struct ata_port
*ap
)
321 /* Some VIA controllers choke if ATA_NIEN is manipulated in
322 * certain way. Leave it alone and just clear pending IRQ.
324 ap
->ops
->sff_check_status(ap
);
325 ata_bmdma_irq_clear(ap
);
329 * vt6420_prereset - prereset for vt6420
330 * @link: target ATA link
331 * @deadline: deadline jiffies for the operation
333 * SCR registers on vt6420 are pieces of shit and may hang the
334 * whole machine completely if accessed with the wrong timing.
335 * To avoid such catastrophe, vt6420 doesn't provide generic SCR
336 * access operations, but uses SStatus and SControl only during
337 * boot probing in controlled way.
339 * As the old (pre EH update) probing code is proven to work, we
340 * strictly follow the access pattern.
343 * Kernel thread context (may sleep)
346 * 0 on success, -errno otherwise.
348 static int vt6420_prereset(struct ata_link
*link
, unsigned long deadline
)
350 struct ata_port
*ap
= link
->ap
;
351 struct ata_eh_context
*ehc
= &ap
->link
.eh_context
;
352 unsigned long timeout
= jiffies
+ (HZ
* 5);
353 u32 sstatus
, scontrol
;
356 /* don't do any SCR stuff if we're not loading */
357 if (!(ap
->pflags
& ATA_PFLAG_LOADING
))
360 /* Resume phy. This is the old SATA resume sequence */
361 svia_scr_write(link
, SCR_CONTROL
, 0x300);
362 svia_scr_read(link
, SCR_CONTROL
, &scontrol
); /* flush */
364 /* wait for phy to become ready, if necessary */
366 ata_msleep(link
->ap
, 200);
367 svia_scr_read(link
, SCR_STATUS
, &sstatus
);
368 if ((sstatus
& 0xf) != 1)
370 } while (time_before(jiffies
, timeout
));
372 /* open code sata_print_link_status() */
373 svia_scr_read(link
, SCR_STATUS
, &sstatus
);
374 svia_scr_read(link
, SCR_CONTROL
, &scontrol
);
376 online
= (sstatus
& 0xf) == 0x3;
379 "SATA link %s 1.5 Gbps (SStatus %X SControl %X)\n",
380 online
? "up" : "down", sstatus
, scontrol
);
382 /* SStatus is read one more time */
383 svia_scr_read(link
, SCR_STATUS
, &sstatus
);
386 /* tell EH to bail */
387 ehc
->i
.action
&= ~ATA_EH_RESET
;
393 ata_sff_wait_ready(link
, deadline
);
398 static void vt6420_bmdma_start(struct ata_queued_cmd
*qc
)
400 struct ata_port
*ap
= qc
->ap
;
401 if ((qc
->tf
.command
== ATA_CMD_PACKET
) &&
402 (qc
->scsicmd
->sc_data_direction
== DMA_TO_DEVICE
)) {
403 /* Prevents corruption on some ATAPI burners */
409 static int vt6421_pata_cable_detect(struct ata_port
*ap
)
411 struct pci_dev
*pdev
= to_pci_dev(ap
->host
->dev
);
414 pci_read_config_byte(pdev
, PATA_UDMA_TIMING
, &tmp
);
416 return ATA_CBL_PATA40
;
417 return ATA_CBL_PATA80
;
420 static void vt6421_set_pio_mode(struct ata_port
*ap
, struct ata_device
*adev
)
422 struct pci_dev
*pdev
= to_pci_dev(ap
->host
->dev
);
423 static const u8 pio_bits
[] = { 0xA8, 0x65, 0x65, 0x31, 0x20 };
424 pci_write_config_byte(pdev
, PATA_PIO_TIMING
- adev
->devno
,
425 pio_bits
[adev
->pio_mode
- XFER_PIO_0
]);
428 static void vt6421_set_dma_mode(struct ata_port
*ap
, struct ata_device
*adev
)
430 struct pci_dev
*pdev
= to_pci_dev(ap
->host
->dev
);
431 static const u8 udma_bits
[] = { 0xEE, 0xE8, 0xE6, 0xE4, 0xE2, 0xE1, 0xE0, 0xE0 };
432 pci_write_config_byte(pdev
, PATA_UDMA_TIMING
- adev
->devno
,
433 udma_bits
[adev
->dma_mode
- XFER_UDMA_0
]);
436 static const unsigned int svia_bar_sizes
[] = {
440 static const unsigned int vt6421_bar_sizes
[] = {
441 16, 16, 16, 16, 32, 128
444 static void __iomem
*svia_scr_addr(void __iomem
*addr
, unsigned int port
)
446 return addr
+ (port
* 128);
449 static void __iomem
*vt6421_scr_addr(void __iomem
*addr
, unsigned int port
)
451 return addr
+ (port
* 64);
454 static void vt6421_init_addrs(struct ata_port
*ap
)
456 void __iomem
* const * iomap
= ap
->host
->iomap
;
457 void __iomem
*reg_addr
= iomap
[ap
->port_no
];
458 void __iomem
*bmdma_addr
= iomap
[4] + (ap
->port_no
* 8);
459 struct ata_ioports
*ioaddr
= &ap
->ioaddr
;
461 ioaddr
->cmd_addr
= reg_addr
;
462 ioaddr
->altstatus_addr
=
463 ioaddr
->ctl_addr
= (void __iomem
*)
464 ((unsigned long)(reg_addr
+ 8) | ATA_PCI_CTL_OFS
);
465 ioaddr
->bmdma_addr
= bmdma_addr
;
466 ioaddr
->scr_addr
= vt6421_scr_addr(iomap
[5], ap
->port_no
);
468 ata_sff_std_ports(ioaddr
);
470 ata_port_pbar_desc(ap
, ap
->port_no
, -1, "port");
471 ata_port_pbar_desc(ap
, 4, ap
->port_no
* 8, "bmdma");
474 static int vt6420_prepare_host(struct pci_dev
*pdev
, struct ata_host
**r_host
)
476 const struct ata_port_info
*ppi
[] = { &vt6420_port_info
, NULL
};
477 struct ata_host
*host
;
480 if (vt6420_hotplug
) {
481 ppi
[0]->port_ops
->scr_read
= svia_scr_read
;
482 ppi
[0]->port_ops
->scr_write
= svia_scr_write
;
485 rc
= ata_pci_bmdma_prepare_host(pdev
, ppi
, &host
);
490 rc
= pcim_iomap_regions(pdev
, 1 << 5, DRV_NAME
);
492 dev_err(&pdev
->dev
, "failed to iomap PCI BAR 5\n");
496 host
->ports
[0]->ioaddr
.scr_addr
= svia_scr_addr(host
->iomap
[5], 0);
497 host
->ports
[1]->ioaddr
.scr_addr
= svia_scr_addr(host
->iomap
[5], 1);
502 static int vt6421_prepare_host(struct pci_dev
*pdev
, struct ata_host
**r_host
)
504 const struct ata_port_info
*ppi
[] =
505 { &vt6421_sport_info
, &vt6421_sport_info
, &vt6421_pport_info
};
506 struct ata_host
*host
;
509 *r_host
= host
= ata_host_alloc_pinfo(&pdev
->dev
, ppi
, ARRAY_SIZE(ppi
));
511 dev_err(&pdev
->dev
, "failed to allocate host\n");
515 rc
= pcim_iomap_regions(pdev
, 0x3f, DRV_NAME
);
517 dev_err(&pdev
->dev
, "failed to request/iomap PCI BARs (errno=%d)\n",
521 host
->iomap
= pcim_iomap_table(pdev
);
523 for (i
= 0; i
< host
->n_ports
; i
++)
524 vt6421_init_addrs(host
->ports
[i
]);
526 rc
= dma_set_mask(&pdev
->dev
, ATA_DMA_MASK
);
529 rc
= dma_set_coherent_mask(&pdev
->dev
, ATA_DMA_MASK
);
536 static int vt8251_prepare_host(struct pci_dev
*pdev
, struct ata_host
**r_host
)
538 const struct ata_port_info
*ppi
[] = { &vt8251_port_info
, NULL
};
539 struct ata_host
*host
;
542 rc
= ata_pci_bmdma_prepare_host(pdev
, ppi
, &host
);
547 rc
= pcim_iomap_regions(pdev
, 1 << 5, DRV_NAME
);
549 dev_err(&pdev
->dev
, "failed to iomap PCI BAR 5\n");
553 /* 8251 hosts four sata ports as M/S of the two channels */
554 for (i
= 0; i
< host
->n_ports
; i
++)
555 ata_slave_link_init(host
->ports
[i
]);
560 static void svia_wd_fix(struct pci_dev
*pdev
)
564 pci_read_config_byte(pdev
, 0x52, &tmp8
);
565 pci_write_config_byte(pdev
, 0x52, tmp8
| BIT(2));
568 static irqreturn_t
vt642x_interrupt(int irq
, void *dev_instance
)
570 struct ata_host
*host
= dev_instance
;
571 irqreturn_t rc
= ata_bmdma_interrupt(irq
, dev_instance
);
573 /* if the IRQ was not handled, it might be a hotplug IRQ */
574 if (rc
!= IRQ_HANDLED
) {
578 spin_lock_irqsave(&host
->lock
, flags
);
579 /* check for hotplug on port 0 */
580 svia_scr_read(&host
->ports
[0]->link
, SCR_ERROR
, &serror
);
581 if (serror
& SERR_PHYRDY_CHG
) {
582 ata_ehi_hotplugged(&host
->ports
[0]->link
.eh_info
);
583 ata_port_freeze(host
->ports
[0]);
586 /* check for hotplug on port 1 */
587 svia_scr_read(&host
->ports
[1]->link
, SCR_ERROR
, &serror
);
588 if (serror
& SERR_PHYRDY_CHG
) {
589 ata_ehi_hotplugged(&host
->ports
[1]->link
.eh_info
);
590 ata_port_freeze(host
->ports
[1]);
593 spin_unlock_irqrestore(&host
->lock
, flags
);
599 static void vt6421_error_handler(struct ata_port
*ap
)
601 struct svia_priv
*hpriv
= ap
->host
->private_data
;
602 struct pci_dev
*pdev
= to_pci_dev(ap
->host
->dev
);
605 /* see svia_configure() for description */
606 if (!hpriv
->wd_workaround
) {
607 svia_scr_read(&ap
->link
, SCR_ERROR
, &serror
);
608 if (serror
== 0x1000500) {
609 ata_port_warn(ap
, "Incompatible drive: enabling workaround. This slows down transfer rate to ~60 MB/s");
611 hpriv
->wd_workaround
= true;
612 ap
->link
.eh_context
.i
.flags
|= ATA_EHI_QUIET
;
616 ata_sff_error_handler(ap
);
619 static void svia_configure(struct pci_dev
*pdev
, int board_id
,
620 struct svia_priv
*hpriv
)
624 pci_read_config_byte(pdev
, PCI_INTERRUPT_LINE
, &tmp8
);
625 dev_info(&pdev
->dev
, "routed to hard irq line %d\n",
626 (int) (tmp8
& 0xf0) == 0xf0 ? 0 : tmp8
& 0x0f);
628 /* make sure SATA channels are enabled */
629 pci_read_config_byte(pdev
, SATA_CHAN_ENAB
, &tmp8
);
630 if ((tmp8
& ALL_PORTS
) != ALL_PORTS
) {
631 dev_dbg(&pdev
->dev
, "enabling SATA channels (0x%x)\n",
634 pci_write_config_byte(pdev
, SATA_CHAN_ENAB
, tmp8
);
637 /* make sure interrupts for each channel sent to us */
638 pci_read_config_byte(pdev
, SATA_INT_GATE
, &tmp8
);
639 if ((tmp8
& ALL_PORTS
) != ALL_PORTS
) {
640 dev_dbg(&pdev
->dev
, "enabling SATA channel interrupts (0x%x)\n",
643 pci_write_config_byte(pdev
, SATA_INT_GATE
, tmp8
);
646 /* make sure native mode is enabled */
647 pci_read_config_byte(pdev
, SATA_NATIVE_MODE
, &tmp8
);
648 if ((tmp8
& NATIVE_MODE_ALL
) != NATIVE_MODE_ALL
) {
650 "enabling SATA channel native mode (0x%x)\n",
652 tmp8
|= NATIVE_MODE_ALL
;
653 pci_write_config_byte(pdev
, SATA_NATIVE_MODE
, tmp8
);
656 if ((board_id
== vt6420
&& vt6420_hotplug
) || board_id
== vt6421
) {
657 /* enable IRQ on hotplug */
658 pci_read_config_byte(pdev
, SVIA_MISC_3
, &tmp8
);
659 if ((tmp8
& SATA_HOTPLUG
) != SATA_HOTPLUG
) {
661 "enabling SATA hotplug (0x%x)\n",
663 tmp8
|= SATA_HOTPLUG
;
664 pci_write_config_byte(pdev
, SVIA_MISC_3
, tmp8
);
669 * vt6420/1 has problems talking to some drives. The following
670 * is the fix from Joseph Chan <JosephChan@via.com.tw>.
672 * When host issues HOLD, device may send up to 20DW of data
673 * before acknowledging it with HOLDA and the host should be
674 * able to buffer them in FIFO. Unfortunately, some WD drives
675 * send up to 40DW before acknowledging HOLD and, in the
676 * default configuration, this ends up overflowing vt6421's
677 * FIFO, making the controller abort the transaction with
680 * Rx52[2] is the internal 128DW FIFO Flow control watermark
681 * adjusting mechanism enable bit and the default value 0
682 * means host will issue HOLD to device when the left FIFO
683 * size goes below 32DW. Setting it to 1 makes the watermark
686 * https://bugzilla.kernel.org/show_bug.cgi?id=15173
687 * http://article.gmane.org/gmane.linux.ide/46352
688 * http://thread.gmane.org/gmane.linux.kernel/1062139
690 * As the fix slows down data transfer, apply it only if the error
691 * actually appears - see vt6421_error_handler()
692 * Apply the fix always on vt6420 as we don't know if SCR_ERROR can be
695 if (board_id
== vt6420
) {
697 hpriv
->wd_workaround
= true;
701 static int svia_init_one(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
705 struct ata_host
*host
= NULL
;
706 int board_id
= (int) ent
->driver_data
;
707 const unsigned *bar_sizes
;
708 struct svia_priv
*hpriv
;
710 ata_print_version_once(&pdev
->dev
, DRV_VERSION
);
712 rc
= pcim_enable_device(pdev
);
716 if (board_id
== vt6421
)
717 bar_sizes
= &vt6421_bar_sizes
[0];
719 bar_sizes
= &svia_bar_sizes
[0];
721 for (i
= 0; i
< ARRAY_SIZE(svia_bar_sizes
); i
++)
722 if ((pci_resource_start(pdev
, i
) == 0) ||
723 (pci_resource_len(pdev
, i
) < bar_sizes
[i
])) {
725 "invalid PCI BAR %u (sz 0x%llx, val 0x%llx)\n",
727 (unsigned long long)pci_resource_start(pdev
, i
),
728 (unsigned long long)pci_resource_len(pdev
, i
));
734 rc
= vt6420_prepare_host(pdev
, &host
);
737 rc
= vt6421_prepare_host(pdev
, &host
);
740 rc
= vt8251_prepare_host(pdev
, &host
);
748 hpriv
= devm_kzalloc(&pdev
->dev
, sizeof(*hpriv
), GFP_KERNEL
);
751 host
->private_data
= hpriv
;
753 svia_configure(pdev
, board_id
, hpriv
);
755 pci_set_master(pdev
);
756 if ((board_id
== vt6420
&& vt6420_hotplug
) || board_id
== vt6421
)
757 return ata_host_activate(host
, pdev
->irq
, vt642x_interrupt
,
758 IRQF_SHARED
, &svia_sht
);
760 return ata_host_activate(host
, pdev
->irq
, ata_bmdma_interrupt
,
761 IRQF_SHARED
, &svia_sht
);
764 #ifdef CONFIG_PM_SLEEP
765 static int svia_pci_device_resume(struct pci_dev
*pdev
)
767 struct ata_host
*host
= pci_get_drvdata(pdev
);
768 struct svia_priv
*hpriv
= host
->private_data
;
771 rc
= ata_pci_device_do_resume(pdev
);
775 if (hpriv
->wd_workaround
)
777 ata_host_resume(host
);
783 module_pci_driver(svia_pci_driver
);