GUI: Fix Tomato RAF theme for all builds. Compilation typo.
[tomato.git] / release / src-rt-6.x.4708 / linux / linux-2.6.36 / drivers / ata / sata_nv.c
blob9f88f3cb1adab22a22fe8baa176d894613fe6fc4
1 /*
2 * sata_nv.c - NVIDIA nForce SATA
4 * Copyright 2004 NVIDIA Corp. All rights reserved.
5 * Copyright 2004 Andrew Chew
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 * libata documentation is available via 'make {ps|pdf}docs',
24 * as Documentation/DocBook/libata.*
26 * No hardware documentation available outside of NVIDIA.
27 * This driver programs the NVIDIA SATA controller in a similar
28 * fashion as with other PCI IDE BMDMA controllers, with a few
29 * NV-specific details such as register offsets, SATA phy location,
30 * hotplug info, etc.
32 * CK804/MCP04 controllers support an alternate programming interface
33 * similar to the ADMA specification (with some modifications).
34 * This allows the use of NCQ. Non-DMA-mapped ATA commands are still
35 * sent through the legacy interface.
39 #include <linux/kernel.h>
40 #include <linux/module.h>
41 #include <linux/gfp.h>
42 #include <linux/pci.h>
43 #include <linux/init.h>
44 #include <linux/blkdev.h>
45 #include <linux/delay.h>
46 #include <linux/interrupt.h>
47 #include <linux/device.h>
48 #include <scsi/scsi_host.h>
49 #include <scsi/scsi_device.h>
50 #include <linux/libata.h>
52 #define DRV_NAME "sata_nv"
53 #define DRV_VERSION "3.5"
55 #define NV_ADMA_DMA_BOUNDARY 0xffffffffUL
57 enum {
58 NV_MMIO_BAR = 5,
60 NV_PORTS = 2,
61 NV_PIO_MASK = ATA_PIO4,
62 NV_MWDMA_MASK = ATA_MWDMA2,
63 NV_UDMA_MASK = ATA_UDMA6,
64 NV_PORT0_SCR_REG_OFFSET = 0x00,
65 NV_PORT1_SCR_REG_OFFSET = 0x40,
67 /* INT_STATUS/ENABLE */
68 NV_INT_STATUS = 0x10,
69 NV_INT_ENABLE = 0x11,
70 NV_INT_STATUS_CK804 = 0x440,
71 NV_INT_ENABLE_CK804 = 0x441,
73 /* INT_STATUS/ENABLE bits */
74 NV_INT_DEV = 0x01,
75 NV_INT_PM = 0x02,
76 NV_INT_ADDED = 0x04,
77 NV_INT_REMOVED = 0x08,
79 NV_INT_PORT_SHIFT = 4, /* each port occupies 4 bits */
81 NV_INT_ALL = 0x0f,
82 NV_INT_MASK = NV_INT_DEV |
83 NV_INT_ADDED | NV_INT_REMOVED,
85 /* INT_CONFIG */
86 NV_INT_CONFIG = 0x12,
87 NV_INT_CONFIG_METHD = 0x01, // 0 = INT, 1 = SMI
89 // For PCI config register 20
90 NV_MCP_SATA_CFG_20 = 0x50,
91 NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
92 NV_MCP_SATA_CFG_20_PORT0_EN = (1 << 17),
93 NV_MCP_SATA_CFG_20_PORT1_EN = (1 << 16),
94 NV_MCP_SATA_CFG_20_PORT0_PWB_EN = (1 << 14),
95 NV_MCP_SATA_CFG_20_PORT1_PWB_EN = (1 << 12),
97 NV_ADMA_MAX_CPBS = 32,
98 NV_ADMA_CPB_SZ = 128,
99 NV_ADMA_APRD_SZ = 16,
100 NV_ADMA_SGTBL_LEN = (1024 - NV_ADMA_CPB_SZ) /
101 NV_ADMA_APRD_SZ,
102 NV_ADMA_SGTBL_TOTAL_LEN = NV_ADMA_SGTBL_LEN + 5,
103 NV_ADMA_SGTBL_SZ = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
104 NV_ADMA_PORT_PRIV_DMA_SZ = NV_ADMA_MAX_CPBS *
105 (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
107 /* BAR5 offset to ADMA general registers */
108 NV_ADMA_GEN = 0x400,
109 NV_ADMA_GEN_CTL = 0x00,
110 NV_ADMA_NOTIFIER_CLEAR = 0x30,
112 /* BAR5 offset to ADMA ports */
113 NV_ADMA_PORT = 0x480,
115 /* size of ADMA port register space */
116 NV_ADMA_PORT_SIZE = 0x100,
118 /* ADMA port registers */
119 NV_ADMA_CTL = 0x40,
120 NV_ADMA_CPB_COUNT = 0x42,
121 NV_ADMA_NEXT_CPB_IDX = 0x43,
122 NV_ADMA_STAT = 0x44,
123 NV_ADMA_CPB_BASE_LOW = 0x48,
124 NV_ADMA_CPB_BASE_HIGH = 0x4C,
125 NV_ADMA_APPEND = 0x50,
126 NV_ADMA_NOTIFIER = 0x68,
127 NV_ADMA_NOTIFIER_ERROR = 0x6C,
129 /* NV_ADMA_CTL register bits */
130 NV_ADMA_CTL_HOTPLUG_IEN = (1 << 0),
131 NV_ADMA_CTL_CHANNEL_RESET = (1 << 5),
132 NV_ADMA_CTL_GO = (1 << 7),
133 NV_ADMA_CTL_AIEN = (1 << 8),
134 NV_ADMA_CTL_READ_NON_COHERENT = (1 << 11),
135 NV_ADMA_CTL_WRITE_NON_COHERENT = (1 << 12),
137 /* CPB response flag bits */
138 NV_CPB_RESP_DONE = (1 << 0),
139 NV_CPB_RESP_ATA_ERR = (1 << 3),
140 NV_CPB_RESP_CMD_ERR = (1 << 4),
141 NV_CPB_RESP_CPB_ERR = (1 << 7),
143 /* CPB control flag bits */
144 NV_CPB_CTL_CPB_VALID = (1 << 0),
145 NV_CPB_CTL_QUEUE = (1 << 1),
146 NV_CPB_CTL_APRD_VALID = (1 << 2),
147 NV_CPB_CTL_IEN = (1 << 3),
148 NV_CPB_CTL_FPDMA = (1 << 4),
150 /* APRD flags */
151 NV_APRD_WRITE = (1 << 1),
152 NV_APRD_END = (1 << 2),
153 NV_APRD_CONT = (1 << 3),
155 /* NV_ADMA_STAT flags */
156 NV_ADMA_STAT_TIMEOUT = (1 << 0),
157 NV_ADMA_STAT_HOTUNPLUG = (1 << 1),
158 NV_ADMA_STAT_HOTPLUG = (1 << 2),
159 NV_ADMA_STAT_CPBERR = (1 << 4),
160 NV_ADMA_STAT_SERROR = (1 << 5),
161 NV_ADMA_STAT_CMD_COMPLETE = (1 << 6),
162 NV_ADMA_STAT_IDLE = (1 << 8),
163 NV_ADMA_STAT_LEGACY = (1 << 9),
164 NV_ADMA_STAT_STOPPED = (1 << 10),
165 NV_ADMA_STAT_DONE = (1 << 12),
166 NV_ADMA_STAT_ERR = NV_ADMA_STAT_CPBERR |
167 NV_ADMA_STAT_TIMEOUT,
169 /* port flags */
170 NV_ADMA_PORT_REGISTER_MODE = (1 << 0),
171 NV_ADMA_ATAPI_SETUP_COMPLETE = (1 << 1),
173 /* MCP55 reg offset */
174 NV_CTL_MCP55 = 0x400,
175 NV_INT_STATUS_MCP55 = 0x440,
176 NV_INT_ENABLE_MCP55 = 0x444,
177 NV_NCQ_REG_MCP55 = 0x448,
179 /* MCP55 */
180 NV_INT_ALL_MCP55 = 0xffff,
181 NV_INT_PORT_SHIFT_MCP55 = 16, /* each port occupies 16 bits */
182 NV_INT_MASK_MCP55 = NV_INT_ALL_MCP55 & 0xfffd,
184 /* SWNCQ ENABLE BITS*/
185 NV_CTL_PRI_SWNCQ = 0x02,
186 NV_CTL_SEC_SWNCQ = 0x04,
188 /* SW NCQ status bits*/
189 NV_SWNCQ_IRQ_DEV = (1 << 0),
190 NV_SWNCQ_IRQ_PM = (1 << 1),
191 NV_SWNCQ_IRQ_ADDED = (1 << 2),
192 NV_SWNCQ_IRQ_REMOVED = (1 << 3),
194 NV_SWNCQ_IRQ_BACKOUT = (1 << 4),
195 NV_SWNCQ_IRQ_SDBFIS = (1 << 5),
196 NV_SWNCQ_IRQ_DHREGFIS = (1 << 6),
197 NV_SWNCQ_IRQ_DMASETUP = (1 << 7),
199 NV_SWNCQ_IRQ_HOTPLUG = NV_SWNCQ_IRQ_ADDED |
200 NV_SWNCQ_IRQ_REMOVED,
204 /* ADMA Physical Region Descriptor - one SG segment */
205 struct nv_adma_prd {
206 __le64 addr;
207 __le32 len;
208 u8 flags;
209 u8 packet_len;
210 __le16 reserved;
213 enum nv_adma_regbits {
214 CMDEND = (1 << 15), /* end of command list */
215 WNB = (1 << 14), /* wait-not-BSY */
216 IGN = (1 << 13), /* ignore this entry */
217 CS1n = (1 << (4 + 8)), /* std. PATA signals follow... */
218 DA2 = (1 << (2 + 8)),
219 DA1 = (1 << (1 + 8)),
220 DA0 = (1 << (0 + 8)),
223 /* ADMA Command Parameter Block
224 The first 5 SG segments are stored inside the Command Parameter Block itself.
225 If there are more than 5 segments the remainder are stored in a separate
226 memory area indicated by next_aprd. */
227 struct nv_adma_cpb {
228 u8 resp_flags; /* 0 */
229 u8 reserved1; /* 1 */
230 u8 ctl_flags; /* 2 */
231 /* len is length of taskfile in 64 bit words */
232 u8 len; /* 3 */
233 u8 tag; /* 4 */
234 u8 next_cpb_idx; /* 5 */
235 __le16 reserved2; /* 6-7 */
236 __le16 tf[12]; /* 8-31 */
237 struct nv_adma_prd aprd[5]; /* 32-111 */
238 __le64 next_aprd; /* 112-119 */
239 __le64 reserved3; /* 120-127 */
243 struct nv_adma_port_priv {
244 struct nv_adma_cpb *cpb;
245 dma_addr_t cpb_dma;
246 struct nv_adma_prd *aprd;
247 dma_addr_t aprd_dma;
248 void __iomem *ctl_block;
249 void __iomem *gen_block;
250 void __iomem *notifier_clear_block;
251 u64 adma_dma_mask;
252 u8 flags;
253 int last_issue_ncq;
256 struct nv_host_priv {
257 unsigned long type;
260 struct defer_queue {
261 u32 defer_bits;
262 unsigned int head;
263 unsigned int tail;
264 unsigned int tag[ATA_MAX_QUEUE];
267 enum ncq_saw_flag_list {
268 ncq_saw_d2h = (1U << 0),
269 ncq_saw_dmas = (1U << 1),
270 ncq_saw_sdb = (1U << 2),
271 ncq_saw_backout = (1U << 3),
274 struct nv_swncq_port_priv {
275 struct ata_bmdma_prd *prd; /* our SG list */
276 dma_addr_t prd_dma; /* and its DMA mapping */
277 void __iomem *sactive_block;
278 void __iomem *irq_block;
279 void __iomem *tag_block;
280 u32 qc_active;
282 unsigned int last_issue_tag;
284 /* fifo circular queue to store deferral command */
285 struct defer_queue defer_queue;
287 /* for NCQ interrupt analysis */
288 u32 dhfis_bits;
289 u32 dmafis_bits;
290 u32 sdbfis_bits;
292 unsigned int ncq_flags;
296 #define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & (1 << (19 + (12 * (PORT)))))
298 static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
299 #ifdef CONFIG_PM
300 static int nv_pci_device_resume(struct pci_dev *pdev);
301 #endif
302 static void nv_ck804_host_stop(struct ata_host *host);
303 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
304 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
305 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
306 static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
307 static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
309 static int nv_hardreset(struct ata_link *link, unsigned int *class,
310 unsigned long deadline);
311 static void nv_nf2_freeze(struct ata_port *ap);
312 static void nv_nf2_thaw(struct ata_port *ap);
313 static void nv_ck804_freeze(struct ata_port *ap);
314 static void nv_ck804_thaw(struct ata_port *ap);
315 static int nv_adma_slave_config(struct scsi_device *sdev);
316 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
317 static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
318 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
319 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
320 static void nv_adma_irq_clear(struct ata_port *ap);
321 static int nv_adma_port_start(struct ata_port *ap);
322 static void nv_adma_port_stop(struct ata_port *ap);
323 #ifdef CONFIG_PM
324 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
325 static int nv_adma_port_resume(struct ata_port *ap);
326 #endif
327 static void nv_adma_freeze(struct ata_port *ap);
328 static void nv_adma_thaw(struct ata_port *ap);
329 static void nv_adma_error_handler(struct ata_port *ap);
330 static void nv_adma_host_stop(struct ata_host *host);
331 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
332 static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
334 static void nv_mcp55_thaw(struct ata_port *ap);
335 static void nv_mcp55_freeze(struct ata_port *ap);
336 static void nv_swncq_error_handler(struct ata_port *ap);
337 static int nv_swncq_slave_config(struct scsi_device *sdev);
338 static int nv_swncq_port_start(struct ata_port *ap);
339 static void nv_swncq_qc_prep(struct ata_queued_cmd *qc);
340 static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
341 static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc);
342 static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis);
343 static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance);
344 #ifdef CONFIG_PM
345 static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg);
346 static int nv_swncq_port_resume(struct ata_port *ap);
347 #endif
349 enum nv_host_type
351 GENERIC,
352 NFORCE2,
353 NFORCE3 = NFORCE2, /* NF2 == NF3 as far as sata_nv is concerned */
354 CK804,
355 ADMA,
356 MCP5x,
357 SWNCQ,
360 static const struct pci_device_id nv_pci_tbl[] = {
361 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
362 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
363 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
364 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
365 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
366 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
367 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
368 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), MCP5x },
369 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), MCP5x },
370 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), MCP5x },
371 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), MCP5x },
372 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
373 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
374 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
376 { } /* terminate list */
379 static struct pci_driver nv_pci_driver = {
380 .name = DRV_NAME,
381 .id_table = nv_pci_tbl,
382 .probe = nv_init_one,
383 #ifdef CONFIG_PM
384 .suspend = ata_pci_device_suspend,
385 .resume = nv_pci_device_resume,
386 #endif
387 .remove = ata_pci_remove_one,
390 static struct scsi_host_template nv_sht = {
391 ATA_BMDMA_SHT(DRV_NAME),
394 static struct scsi_host_template nv_adma_sht = {
395 ATA_NCQ_SHT(DRV_NAME),
396 .can_queue = NV_ADMA_MAX_CPBS,
397 .sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN,
398 .dma_boundary = NV_ADMA_DMA_BOUNDARY,
399 .slave_configure = nv_adma_slave_config,
402 static struct scsi_host_template nv_swncq_sht = {
403 ATA_NCQ_SHT(DRV_NAME),
404 .can_queue = ATA_MAX_QUEUE,
405 .sg_tablesize = LIBATA_MAX_PRD,
406 .dma_boundary = ATA_DMA_BOUNDARY,
407 .slave_configure = nv_swncq_slave_config,
410 static struct ata_port_operations nv_generic_ops = {
411 .inherits = &ata_bmdma_port_ops,
412 .lost_interrupt = ATA_OP_NULL,
413 .scr_read = nv_scr_read,
414 .scr_write = nv_scr_write,
415 .hardreset = nv_hardreset,
418 static struct ata_port_operations nv_nf2_ops = {
419 .inherits = &nv_generic_ops,
420 .freeze = nv_nf2_freeze,
421 .thaw = nv_nf2_thaw,
424 static struct ata_port_operations nv_ck804_ops = {
425 .inherits = &nv_generic_ops,
426 .freeze = nv_ck804_freeze,
427 .thaw = nv_ck804_thaw,
428 .host_stop = nv_ck804_host_stop,
431 static struct ata_port_operations nv_adma_ops = {
432 .inherits = &nv_ck804_ops,
434 .check_atapi_dma = nv_adma_check_atapi_dma,
435 .sff_tf_read = nv_adma_tf_read,
436 .qc_defer = ata_std_qc_defer,
437 .qc_prep = nv_adma_qc_prep,
438 .qc_issue = nv_adma_qc_issue,
439 .sff_irq_clear = nv_adma_irq_clear,
441 .freeze = nv_adma_freeze,
442 .thaw = nv_adma_thaw,
443 .error_handler = nv_adma_error_handler,
444 .post_internal_cmd = nv_adma_post_internal_cmd,
446 .port_start = nv_adma_port_start,
447 .port_stop = nv_adma_port_stop,
448 #ifdef CONFIG_PM
449 .port_suspend = nv_adma_port_suspend,
450 .port_resume = nv_adma_port_resume,
451 #endif
452 .host_stop = nv_adma_host_stop,
455 static struct ata_port_operations nv_swncq_ops = {
456 .inherits = &nv_generic_ops,
458 .qc_defer = ata_std_qc_defer,
459 .qc_prep = nv_swncq_qc_prep,
460 .qc_issue = nv_swncq_qc_issue,
462 .freeze = nv_mcp55_freeze,
463 .thaw = nv_mcp55_thaw,
464 .error_handler = nv_swncq_error_handler,
466 #ifdef CONFIG_PM
467 .port_suspend = nv_swncq_port_suspend,
468 .port_resume = nv_swncq_port_resume,
469 #endif
470 .port_start = nv_swncq_port_start,
473 struct nv_pi_priv {
474 irq_handler_t irq_handler;
475 struct scsi_host_template *sht;
478 #define NV_PI_PRIV(_irq_handler, _sht) \
479 &(struct nv_pi_priv){ .irq_handler = _irq_handler, .sht = _sht }
481 static const struct ata_port_info nv_port_info[] = {
482 /* generic */
484 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
485 .pio_mask = NV_PIO_MASK,
486 .mwdma_mask = NV_MWDMA_MASK,
487 .udma_mask = NV_UDMA_MASK,
488 .port_ops = &nv_generic_ops,
489 .private_data = NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
491 /* nforce2/3 */
493 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
494 .pio_mask = NV_PIO_MASK,
495 .mwdma_mask = NV_MWDMA_MASK,
496 .udma_mask = NV_UDMA_MASK,
497 .port_ops = &nv_nf2_ops,
498 .private_data = NV_PI_PRIV(nv_nf2_interrupt, &nv_sht),
500 /* ck804 */
502 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
503 .pio_mask = NV_PIO_MASK,
504 .mwdma_mask = NV_MWDMA_MASK,
505 .udma_mask = NV_UDMA_MASK,
506 .port_ops = &nv_ck804_ops,
507 .private_data = NV_PI_PRIV(nv_ck804_interrupt, &nv_sht),
509 /* ADMA */
511 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
512 ATA_FLAG_MMIO | ATA_FLAG_NCQ,
513 .pio_mask = NV_PIO_MASK,
514 .mwdma_mask = NV_MWDMA_MASK,
515 .udma_mask = NV_UDMA_MASK,
516 .port_ops = &nv_adma_ops,
517 .private_data = NV_PI_PRIV(nv_adma_interrupt, &nv_adma_sht),
519 /* MCP5x */
521 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
522 .pio_mask = NV_PIO_MASK,
523 .mwdma_mask = NV_MWDMA_MASK,
524 .udma_mask = NV_UDMA_MASK,
525 .port_ops = &nv_generic_ops,
526 .private_data = NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
528 /* SWNCQ */
530 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
531 ATA_FLAG_NCQ,
532 .pio_mask = NV_PIO_MASK,
533 .mwdma_mask = NV_MWDMA_MASK,
534 .udma_mask = NV_UDMA_MASK,
535 .port_ops = &nv_swncq_ops,
536 .private_data = NV_PI_PRIV(nv_swncq_interrupt, &nv_swncq_sht),
540 MODULE_AUTHOR("NVIDIA");
541 MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
542 MODULE_LICENSE("GPL");
543 MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
544 MODULE_VERSION(DRV_VERSION);
546 static int adma_enabled;
547 static int swncq_enabled = 1;
548 static int msi_enabled;
550 static void nv_adma_register_mode(struct ata_port *ap)
552 struct nv_adma_port_priv *pp = ap->private_data;
553 void __iomem *mmio = pp->ctl_block;
554 u16 tmp, status;
555 int count = 0;
557 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
558 return;
560 status = readw(mmio + NV_ADMA_STAT);
561 while (!(status & NV_ADMA_STAT_IDLE) && count < 20) {
562 ndelay(50);
563 status = readw(mmio + NV_ADMA_STAT);
564 count++;
566 if (count == 20)
567 ata_port_printk(ap, KERN_WARNING,
568 "timeout waiting for ADMA IDLE, stat=0x%hx\n",
569 status);
571 tmp = readw(mmio + NV_ADMA_CTL);
572 writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
574 count = 0;
575 status = readw(mmio + NV_ADMA_STAT);
576 while (!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
577 ndelay(50);
578 status = readw(mmio + NV_ADMA_STAT);
579 count++;
581 if (count == 20)
582 ata_port_printk(ap, KERN_WARNING,
583 "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
584 status);
586 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
589 static void nv_adma_mode(struct ata_port *ap)
591 struct nv_adma_port_priv *pp = ap->private_data;
592 void __iomem *mmio = pp->ctl_block;
593 u16 tmp, status;
594 int count = 0;
596 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
597 return;
599 WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
601 tmp = readw(mmio + NV_ADMA_CTL);
602 writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
604 status = readw(mmio + NV_ADMA_STAT);
605 while (((status & NV_ADMA_STAT_LEGACY) ||
606 !(status & NV_ADMA_STAT_IDLE)) && count < 20) {
607 ndelay(50);
608 status = readw(mmio + NV_ADMA_STAT);
609 count++;
611 if (count == 20)
612 ata_port_printk(ap, KERN_WARNING,
613 "timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
614 status);
616 pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
619 static int nv_adma_slave_config(struct scsi_device *sdev)
621 struct ata_port *ap = ata_shost_to_port(sdev->host);
622 struct nv_adma_port_priv *pp = ap->private_data;
623 struct nv_adma_port_priv *port0, *port1;
624 struct scsi_device *sdev0, *sdev1;
625 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
626 unsigned long segment_boundary, flags;
627 unsigned short sg_tablesize;
628 int rc;
629 int adma_enable;
630 u32 current_reg, new_reg, config_mask;
632 rc = ata_scsi_slave_config(sdev);
634 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
635 /* Not a proper libata device, ignore */
636 return rc;
638 spin_lock_irqsave(ap->lock, flags);
640 if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) {
642 * NVIDIA reports that ADMA mode does not support ATAPI commands.
643 * Therefore ATAPI commands are sent through the legacy interface.
644 * However, the legacy interface only supports 32-bit DMA.
645 * Restrict DMA parameters as required by the legacy interface
646 * when an ATAPI device is connected.
648 segment_boundary = ATA_DMA_BOUNDARY;
649 /* Subtract 1 since an extra entry may be needed for padding, see
650 libata-scsi.c */
651 sg_tablesize = LIBATA_MAX_PRD - 1;
653 /* Since the legacy DMA engine is in use, we need to disable ADMA
654 on the port. */
655 adma_enable = 0;
656 nv_adma_register_mode(ap);
657 } else {
658 segment_boundary = NV_ADMA_DMA_BOUNDARY;
659 sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
660 adma_enable = 1;
663 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg);
665 if (ap->port_no == 1)
666 config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
667 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
668 else
669 config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
670 NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
672 if (adma_enable) {
673 new_reg = current_reg | config_mask;
674 pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
675 } else {
676 new_reg = current_reg & ~config_mask;
677 pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
680 if (current_reg != new_reg)
681 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
683 port0 = ap->host->ports[0]->private_data;
684 port1 = ap->host->ports[1]->private_data;
685 sdev0 = ap->host->ports[0]->link.device[0].sdev;
686 sdev1 = ap->host->ports[1]->link.device[0].sdev;
687 if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
688 (port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
689 /** We have to set the DMA mask to 32-bit if either port is in
690 ATAPI mode, since they are on the same PCI device which is
691 used for DMA mapping. If we set the mask we also need to set
692 the bounce limit on both ports to ensure that the block
693 layer doesn't feed addresses that cause DMA mapping to
694 choke. If either SCSI device is not allocated yet, it's OK
695 since that port will discover its correct setting when it
696 does get allocated.
697 Note: Setting 32-bit mask should not fail. */
698 if (sdev0)
699 blk_queue_bounce_limit(sdev0->request_queue,
700 ATA_DMA_MASK);
701 if (sdev1)
702 blk_queue_bounce_limit(sdev1->request_queue,
703 ATA_DMA_MASK);
705 pci_set_dma_mask(pdev, ATA_DMA_MASK);
706 } else {
707 /** This shouldn't fail as it was set to this value before */
708 pci_set_dma_mask(pdev, pp->adma_dma_mask);
709 if (sdev0)
710 blk_queue_bounce_limit(sdev0->request_queue,
711 pp->adma_dma_mask);
712 if (sdev1)
713 blk_queue_bounce_limit(sdev1->request_queue,
714 pp->adma_dma_mask);
717 blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
718 blk_queue_max_segments(sdev->request_queue, sg_tablesize);
719 ata_port_printk(ap, KERN_INFO,
720 "DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
721 (unsigned long long)*ap->host->dev->dma_mask,
722 segment_boundary, sg_tablesize);
724 spin_unlock_irqrestore(ap->lock, flags);
726 return rc;
729 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
731 struct nv_adma_port_priv *pp = qc->ap->private_data;
732 return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
735 static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
737 /* Other than when internal or pass-through commands are executed,
738 the only time this function will be called in ADMA mode will be
739 if a command fails. In the failure case we don't care about going
740 into register mode with ADMA commands pending, as the commands will
741 all shortly be aborted anyway. We assume that NCQ commands are not
742 issued via passthrough, which is the only way that switching into
743 ADMA mode could abort outstanding commands. */
744 nv_adma_register_mode(ap);
746 ata_sff_tf_read(ap, tf);
749 static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
751 unsigned int idx = 0;
753 if (tf->flags & ATA_TFLAG_ISADDR) {
754 if (tf->flags & ATA_TFLAG_LBA48) {
755 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->hob_feature | WNB);
756 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
757 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->hob_lbal);
758 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->hob_lbam);
759 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->hob_lbah);
760 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature);
761 } else
762 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature | WNB);
764 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->nsect);
765 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->lbal);
766 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->lbam);
767 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->lbah);
770 if (tf->flags & ATA_TFLAG_DEVICE)
771 cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device);
773 cpb[idx++] = cpu_to_le16((ATA_REG_CMD << 8) | tf->command | CMDEND);
775 while (idx < 12)
776 cpb[idx++] = cpu_to_le16(IGN);
778 return idx;
781 static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
783 struct nv_adma_port_priv *pp = ap->private_data;
784 u8 flags = pp->cpb[cpb_num].resp_flags;
786 VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
788 if (unlikely((force_err ||
789 flags & (NV_CPB_RESP_ATA_ERR |
790 NV_CPB_RESP_CMD_ERR |
791 NV_CPB_RESP_CPB_ERR)))) {
792 struct ata_eh_info *ehi = &ap->link.eh_info;
793 int freeze = 0;
795 ata_ehi_clear_desc(ehi);
796 __ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x: ", flags);
797 if (flags & NV_CPB_RESP_ATA_ERR) {
798 ata_ehi_push_desc(ehi, "ATA error");
799 ehi->err_mask |= AC_ERR_DEV;
800 } else if (flags & NV_CPB_RESP_CMD_ERR) {
801 ata_ehi_push_desc(ehi, "CMD error");
802 ehi->err_mask |= AC_ERR_DEV;
803 } else if (flags & NV_CPB_RESP_CPB_ERR) {
804 ata_ehi_push_desc(ehi, "CPB error");
805 ehi->err_mask |= AC_ERR_SYSTEM;
806 freeze = 1;
807 } else {
808 /* notifier error, but no error in CPB flags? */
809 ata_ehi_push_desc(ehi, "unknown");
810 ehi->err_mask |= AC_ERR_OTHER;
811 freeze = 1;
813 /* Kill all commands. EH will determine what actually failed. */
814 if (freeze)
815 ata_port_freeze(ap);
816 else
817 ata_port_abort(ap);
818 return 1;
821 if (likely(flags & NV_CPB_RESP_DONE)) {
822 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num);
823 VPRINTK("CPB flags done, flags=0x%x\n", flags);
824 if (likely(qc)) {
825 DPRINTK("Completing qc from tag %d\n", cpb_num);
826 ata_qc_complete(qc);
827 } else {
828 struct ata_eh_info *ehi = &ap->link.eh_info;
829 /* Notifier bits set without a command may indicate the drive
830 is misbehaving. Raise host state machine violation on this
831 condition. */
832 ata_port_printk(ap, KERN_ERR,
833 "notifier for tag %d with no cmd?\n",
834 cpb_num);
835 ehi->err_mask |= AC_ERR_HSM;
836 ehi->action |= ATA_EH_RESET;
837 ata_port_freeze(ap);
838 return 1;
841 return 0;
844 static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
846 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
848 /* freeze if hotplugged */
849 if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
850 ata_port_freeze(ap);
851 return 1;
854 /* bail out if not our interrupt */
855 if (!(irq_stat & NV_INT_DEV))
856 return 0;
858 /* DEV interrupt w/ no active qc? */
859 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
860 ata_sff_check_status(ap);
861 return 1;
864 /* handle interrupt */
865 return ata_bmdma_port_intr(ap, qc);
868 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
870 struct ata_host *host = dev_instance;
871 int i, handled = 0;
872 u32 notifier_clears[2];
874 spin_lock(&host->lock);
876 for (i = 0; i < host->n_ports; i++) {
877 struct ata_port *ap = host->ports[i];
878 struct nv_adma_port_priv *pp = ap->private_data;
879 void __iomem *mmio = pp->ctl_block;
880 u16 status;
881 u32 gen_ctl;
882 u32 notifier, notifier_error;
884 notifier_clears[i] = 0;
886 /* if ADMA is disabled, use standard ata interrupt handler */
887 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
888 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
889 >> (NV_INT_PORT_SHIFT * i);
890 handled += nv_host_intr(ap, irq_stat);
891 continue;
894 /* if in ATA register mode, check for standard interrupts */
895 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
896 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
897 >> (NV_INT_PORT_SHIFT * i);
898 if (ata_tag_valid(ap->link.active_tag))
899 /** NV_INT_DEV indication seems unreliable
900 at times at least in ADMA mode. Force it
901 on always when a command is active, to
902 prevent losing interrupts. */
903 irq_stat |= NV_INT_DEV;
904 handled += nv_host_intr(ap, irq_stat);
907 notifier = readl(mmio + NV_ADMA_NOTIFIER);
908 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
909 notifier_clears[i] = notifier | notifier_error;
911 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
913 if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
914 !notifier_error)
915 /* Nothing to do */
916 continue;
918 status = readw(mmio + NV_ADMA_STAT);
921 * Clear status. Ensure the controller sees the
922 * clearing before we start looking at any of the CPB
923 * statuses, so that any CPB completions after this
924 * point in the handler will raise another interrupt.
926 writew(status, mmio + NV_ADMA_STAT);
927 readw(mmio + NV_ADMA_STAT); /* flush posted write */
928 rmb();
930 handled++; /* irq handled if we got here */
932 /* freeze if hotplugged or controller error */
933 if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
934 NV_ADMA_STAT_HOTUNPLUG |
935 NV_ADMA_STAT_TIMEOUT |
936 NV_ADMA_STAT_SERROR))) {
937 struct ata_eh_info *ehi = &ap->link.eh_info;
939 ata_ehi_clear_desc(ehi);
940 __ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status);
941 if (status & NV_ADMA_STAT_TIMEOUT) {
942 ehi->err_mask |= AC_ERR_SYSTEM;
943 ata_ehi_push_desc(ehi, "timeout");
944 } else if (status & NV_ADMA_STAT_HOTPLUG) {
945 ata_ehi_hotplugged(ehi);
946 ata_ehi_push_desc(ehi, "hotplug");
947 } else if (status & NV_ADMA_STAT_HOTUNPLUG) {
948 ata_ehi_hotplugged(ehi);
949 ata_ehi_push_desc(ehi, "hot unplug");
950 } else if (status & NV_ADMA_STAT_SERROR) {
951 /* let EH analyze SError and figure out cause */
952 ata_ehi_push_desc(ehi, "SError");
953 } else
954 ata_ehi_push_desc(ehi, "unknown");
955 ata_port_freeze(ap);
956 continue;
959 if (status & (NV_ADMA_STAT_DONE |
960 NV_ADMA_STAT_CPBERR |
961 NV_ADMA_STAT_CMD_COMPLETE)) {
962 u32 check_commands = notifier_clears[i];
963 int pos, rc;
965 if (status & NV_ADMA_STAT_CPBERR) {
966 /* check all active commands */
967 if (ata_tag_valid(ap->link.active_tag))
968 check_commands = 1 <<
969 ap->link.active_tag;
970 else
971 check_commands = ap->link.sactive;
974 /* check CPBs for completed commands */
975 while ((pos = ffs(check_commands))) {
976 pos--;
977 rc = nv_adma_check_cpb(ap, pos,
978 notifier_error & (1 << pos));
979 if (unlikely(rc))
980 check_commands = 0;
981 check_commands &= ~(1 << pos);
986 if (notifier_clears[0] || notifier_clears[1]) {
987 /* Note: Both notifier clear registers must be written
988 if either is set, even if one is zero, according to NVIDIA. */
989 struct nv_adma_port_priv *pp = host->ports[0]->private_data;
990 writel(notifier_clears[0], pp->notifier_clear_block);
991 pp = host->ports[1]->private_data;
992 writel(notifier_clears[1], pp->notifier_clear_block);
995 spin_unlock(&host->lock);
997 return IRQ_RETVAL(handled);
1000 static void nv_adma_freeze(struct ata_port *ap)
1002 struct nv_adma_port_priv *pp = ap->private_data;
1003 void __iomem *mmio = pp->ctl_block;
1004 u16 tmp;
1006 nv_ck804_freeze(ap);
1008 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1009 return;
1011 /* clear any outstanding CK804 notifications */
1012 writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
1013 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1015 /* Disable interrupt */
1016 tmp = readw(mmio + NV_ADMA_CTL);
1017 writew(tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1018 mmio + NV_ADMA_CTL);
1019 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1022 static void nv_adma_thaw(struct ata_port *ap)
1024 struct nv_adma_port_priv *pp = ap->private_data;
1025 void __iomem *mmio = pp->ctl_block;
1026 u16 tmp;
1028 nv_ck804_thaw(ap);
1030 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1031 return;
1033 /* Enable interrupt */
1034 tmp = readw(mmio + NV_ADMA_CTL);
1035 writew(tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1036 mmio + NV_ADMA_CTL);
1037 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1040 static void nv_adma_irq_clear(struct ata_port *ap)
1042 struct nv_adma_port_priv *pp = ap->private_data;
1043 void __iomem *mmio = pp->ctl_block;
1044 u32 notifier_clears[2];
1046 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
1047 ata_bmdma_irq_clear(ap);
1048 return;
1051 /* clear any outstanding CK804 notifications */
1052 writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
1053 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1055 /* clear ADMA status */
1056 writew(0xffff, mmio + NV_ADMA_STAT);
1058 /* clear notifiers - note both ports need to be written with
1059 something even though we are only clearing on one */
1060 if (ap->port_no == 0) {
1061 notifier_clears[0] = 0xFFFFFFFF;
1062 notifier_clears[1] = 0;
1063 } else {
1064 notifier_clears[0] = 0;
1065 notifier_clears[1] = 0xFFFFFFFF;
1067 pp = ap->host->ports[0]->private_data;
1068 writel(notifier_clears[0], pp->notifier_clear_block);
1069 pp = ap->host->ports[1]->private_data;
1070 writel(notifier_clears[1], pp->notifier_clear_block);
1073 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
1075 struct nv_adma_port_priv *pp = qc->ap->private_data;
1077 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
1078 ata_bmdma_post_internal_cmd(qc);
1081 static int nv_adma_port_start(struct ata_port *ap)
1083 struct device *dev = ap->host->dev;
1084 struct nv_adma_port_priv *pp;
1085 int rc;
1086 void *mem;
1087 dma_addr_t mem_dma;
1088 void __iomem *mmio;
1089 struct pci_dev *pdev = to_pci_dev(dev);
1090 u16 tmp;
1092 VPRINTK("ENTER\n");
1094 /* Ensure DMA mask is set to 32-bit before allocating legacy PRD and
1095 pad buffers */
1096 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1097 if (rc)
1098 return rc;
1099 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1100 if (rc)
1101 return rc;
1103 /* we might fallback to bmdma, allocate bmdma resources */
1104 rc = ata_bmdma_port_start(ap);
1105 if (rc)
1106 return rc;
1108 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1109 if (!pp)
1110 return -ENOMEM;
1112 mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
1113 ap->port_no * NV_ADMA_PORT_SIZE;
1114 pp->ctl_block = mmio;
1115 pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
1116 pp->notifier_clear_block = pp->gen_block +
1117 NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
1119 /* Now that the legacy PRD and padding buffer are allocated we can
1120 safely raise the DMA mask to allocate the CPB/APRD table.
1121 These are allowed to fail since we store the value that ends up
1122 being used to set as the bounce limit in slave_config later if
1123 needed. */
1124 pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1125 pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1126 pp->adma_dma_mask = *dev->dma_mask;
1128 mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
1129 &mem_dma, GFP_KERNEL);
1130 if (!mem)
1131 return -ENOMEM;
1132 memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
1135 * First item in chunk of DMA memory:
1136 * 128-byte command parameter block (CPB)
1137 * one for each command tag
1139 pp->cpb = mem;
1140 pp->cpb_dma = mem_dma;
1142 writel(mem_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
1143 writel((mem_dma >> 16) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
1145 mem += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1146 mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1149 * Second item: block of ADMA_SGTBL_LEN s/g entries
1151 pp->aprd = mem;
1152 pp->aprd_dma = mem_dma;
1154 ap->private_data = pp;
1156 /* clear any outstanding interrupt conditions */
1157 writew(0xffff, mmio + NV_ADMA_STAT);
1159 /* initialize port variables */
1160 pp->flags = NV_ADMA_PORT_REGISTER_MODE;
1162 /* clear CPB fetch count */
1163 writew(0, mmio + NV_ADMA_CPB_COUNT);
1165 /* clear GO for register mode, enable interrupt */
1166 tmp = readw(mmio + NV_ADMA_CTL);
1167 writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1168 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1170 tmp = readw(mmio + NV_ADMA_CTL);
1171 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1172 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1173 udelay(1);
1174 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1175 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1177 return 0;
1180 static void nv_adma_port_stop(struct ata_port *ap)
1182 struct nv_adma_port_priv *pp = ap->private_data;
1183 void __iomem *mmio = pp->ctl_block;
1185 VPRINTK("ENTER\n");
1186 writew(0, mmio + NV_ADMA_CTL);
1189 #ifdef CONFIG_PM
1190 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
1192 struct nv_adma_port_priv *pp = ap->private_data;
1193 void __iomem *mmio = pp->ctl_block;
1195 /* Go to register mode - clears GO */
1196 nv_adma_register_mode(ap);
1198 /* clear CPB fetch count */
1199 writew(0, mmio + NV_ADMA_CPB_COUNT);
1201 /* disable interrupt, shut down port */
1202 writew(0, mmio + NV_ADMA_CTL);
1204 return 0;
1207 static int nv_adma_port_resume(struct ata_port *ap)
1209 struct nv_adma_port_priv *pp = ap->private_data;
1210 void __iomem *mmio = pp->ctl_block;
1211 u16 tmp;
1213 /* set CPB block location */
1214 writel(pp->cpb_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
1215 writel((pp->cpb_dma >> 16) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
1217 /* clear any outstanding interrupt conditions */
1218 writew(0xffff, mmio + NV_ADMA_STAT);
1220 /* initialize port variables */
1221 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
1223 /* clear CPB fetch count */
1224 writew(0, mmio + NV_ADMA_CPB_COUNT);
1226 /* clear GO for register mode, enable interrupt */
1227 tmp = readw(mmio + NV_ADMA_CTL);
1228 writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1229 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1231 tmp = readw(mmio + NV_ADMA_CTL);
1232 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1233 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1234 udelay(1);
1235 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1236 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1238 return 0;
1240 #endif
1242 static void nv_adma_setup_port(struct ata_port *ap)
1244 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1245 struct ata_ioports *ioport = &ap->ioaddr;
1247 VPRINTK("ENTER\n");
1249 mmio += NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE;
1251 ioport->cmd_addr = mmio;
1252 ioport->data_addr = mmio + (ATA_REG_DATA * 4);
1253 ioport->error_addr =
1254 ioport->feature_addr = mmio + (ATA_REG_ERR * 4);
1255 ioport->nsect_addr = mmio + (ATA_REG_NSECT * 4);
1256 ioport->lbal_addr = mmio + (ATA_REG_LBAL * 4);
1257 ioport->lbam_addr = mmio + (ATA_REG_LBAM * 4);
1258 ioport->lbah_addr = mmio + (ATA_REG_LBAH * 4);
1259 ioport->device_addr = mmio + (ATA_REG_DEVICE * 4);
1260 ioport->status_addr =
1261 ioport->command_addr = mmio + (ATA_REG_STATUS * 4);
1262 ioport->altstatus_addr =
1263 ioport->ctl_addr = mmio + 0x20;
1266 static int nv_adma_host_init(struct ata_host *host)
1268 struct pci_dev *pdev = to_pci_dev(host->dev);
1269 unsigned int i;
1270 u32 tmp32;
1272 VPRINTK("ENTER\n");
1274 /* enable ADMA on the ports */
1275 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1276 tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1277 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1278 NV_MCP_SATA_CFG_20_PORT1_EN |
1279 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1281 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1283 for (i = 0; i < host->n_ports; i++)
1284 nv_adma_setup_port(host->ports[i]);
1286 return 0;
1289 static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1290 struct scatterlist *sg,
1291 int idx,
1292 struct nv_adma_prd *aprd)
1294 u8 flags = 0;
1295 if (qc->tf.flags & ATA_TFLAG_WRITE)
1296 flags |= NV_APRD_WRITE;
1297 if (idx == qc->n_elem - 1)
1298 flags |= NV_APRD_END;
1299 else if (idx != 4)
1300 flags |= NV_APRD_CONT;
1302 aprd->addr = cpu_to_le64(((u64)sg_dma_address(sg)));
1303 aprd->len = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
1304 aprd->flags = flags;
1305 aprd->packet_len = 0;
1308 static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1310 struct nv_adma_port_priv *pp = qc->ap->private_data;
1311 struct nv_adma_prd *aprd;
1312 struct scatterlist *sg;
1313 unsigned int si;
1315 VPRINTK("ENTER\n");
1317 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1318 aprd = (si < 5) ? &cpb->aprd[si] :
1319 &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (si-5)];
1320 nv_adma_fill_aprd(qc, sg, si, aprd);
1322 if (si > 5)
1323 cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));
1324 else
1325 cpb->next_aprd = cpu_to_le64(0);
1328 static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
1330 struct nv_adma_port_priv *pp = qc->ap->private_data;
1332 /* ADMA engine can only be used for non-ATAPI DMA commands,
1333 or interrupt-driven no-data commands. */
1334 if ((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
1335 (qc->tf.flags & ATA_TFLAG_POLLING))
1336 return 1;
1338 if ((qc->flags & ATA_QCFLAG_DMAMAP) ||
1339 (qc->tf.protocol == ATA_PROT_NODATA))
1340 return 0;
1342 return 1;
1345 static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1347 struct nv_adma_port_priv *pp = qc->ap->private_data;
1348 struct nv_adma_cpb *cpb = &pp->cpb[qc->tag];
1349 u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
1350 NV_CPB_CTL_IEN;
1352 if (nv_adma_use_reg_mode(qc)) {
1353 BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1354 (qc->flags & ATA_QCFLAG_DMAMAP));
1355 nv_adma_register_mode(qc->ap);
1356 ata_bmdma_qc_prep(qc);
1357 return;
1360 cpb->resp_flags = NV_CPB_RESP_DONE;
1361 wmb();
1362 cpb->ctl_flags = 0;
1363 wmb();
1365 cpb->len = 3;
1366 cpb->tag = qc->tag;
1367 cpb->next_cpb_idx = 0;
1369 /* turn on NCQ flags for NCQ commands */
1370 if (qc->tf.protocol == ATA_PROT_NCQ)
1371 ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1373 VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1375 nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1377 if (qc->flags & ATA_QCFLAG_DMAMAP) {
1378 nv_adma_fill_sg(qc, cpb);
1379 ctl_flags |= NV_CPB_CTL_APRD_VALID;
1380 } else
1381 memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
1383 /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID
1384 until we are finished filling in all of the contents */
1385 wmb();
1386 cpb->ctl_flags = ctl_flags;
1387 wmb();
1388 cpb->resp_flags = 0;
1391 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1393 struct nv_adma_port_priv *pp = qc->ap->private_data;
1394 void __iomem *mmio = pp->ctl_block;
1395 int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ);
1397 VPRINTK("ENTER\n");
1399 /* We can't handle result taskfile with NCQ commands, since
1400 retrieving the taskfile switches us out of ADMA mode and would abort
1401 existing commands. */
1402 if (unlikely(qc->tf.protocol == ATA_PROT_NCQ &&
1403 (qc->flags & ATA_QCFLAG_RESULT_TF))) {
1404 ata_dev_printk(qc->dev, KERN_ERR,
1405 "NCQ w/ RESULT_TF not allowed\n");
1406 return AC_ERR_SYSTEM;
1409 if (nv_adma_use_reg_mode(qc)) {
1410 /* use ATA register mode */
1411 VPRINTK("using ATA register mode: 0x%lx\n", qc->flags);
1412 BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1413 (qc->flags & ATA_QCFLAG_DMAMAP));
1414 nv_adma_register_mode(qc->ap);
1415 return ata_bmdma_qc_issue(qc);
1416 } else
1417 nv_adma_mode(qc->ap);
1419 /* write append register, command tag in lower 8 bits
1420 and (number of cpbs to append -1) in top 8 bits */
1421 wmb();
1423 if (curr_ncq != pp->last_issue_ncq) {
1424 /* Seems to need some delay before switching between NCQ and
1425 non-NCQ commands, else we get command timeouts and such. */
1426 udelay(20);
1427 pp->last_issue_ncq = curr_ncq;
1430 writew(qc->tag, mmio + NV_ADMA_APPEND);
1432 DPRINTK("Issued tag %u\n", qc->tag);
1434 return 0;
1437 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1439 struct ata_host *host = dev_instance;
1440 unsigned int i;
1441 unsigned int handled = 0;
1442 unsigned long flags;
1444 spin_lock_irqsave(&host->lock, flags);
1446 for (i = 0; i < host->n_ports; i++) {
1447 struct ata_port *ap = host->ports[i];
1448 struct ata_queued_cmd *qc;
1450 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1451 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
1452 handled += ata_bmdma_port_intr(ap, qc);
1453 } else {
1455 * No request pending? Clear interrupt status
1456 * anyway, in case there's one pending.
1458 ap->ops->sff_check_status(ap);
1462 spin_unlock_irqrestore(&host->lock, flags);
1464 return IRQ_RETVAL(handled);
1467 static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
1469 int i, handled = 0;
1471 for (i = 0; i < host->n_ports; i++) {
1472 handled += nv_host_intr(host->ports[i], irq_stat);
1473 irq_stat >>= NV_INT_PORT_SHIFT;
1476 return IRQ_RETVAL(handled);
1479 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
1481 struct ata_host *host = dev_instance;
1482 u8 irq_stat;
1483 irqreturn_t ret;
1485 spin_lock(&host->lock);
1486 irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
1487 ret = nv_do_interrupt(host, irq_stat);
1488 spin_unlock(&host->lock);
1490 return ret;
1493 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
1495 struct ata_host *host = dev_instance;
1496 u8 irq_stat;
1497 irqreturn_t ret;
1499 spin_lock(&host->lock);
1500 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1501 ret = nv_do_interrupt(host, irq_stat);
1502 spin_unlock(&host->lock);
1504 return ret;
1507 static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
1509 if (sc_reg > SCR_CONTROL)
1510 return -EINVAL;
1512 *val = ioread32(link->ap->ioaddr.scr_addr + (sc_reg * 4));
1513 return 0;
1516 static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
1518 if (sc_reg > SCR_CONTROL)
1519 return -EINVAL;
1521 iowrite32(val, link->ap->ioaddr.scr_addr + (sc_reg * 4));
1522 return 0;
1525 static int nv_hardreset(struct ata_link *link, unsigned int *class,
1526 unsigned long deadline)
1528 struct ata_eh_context *ehc = &link->eh_context;
1530 /* Do hardreset iff it's post-boot probing, please read the
1531 * comment above port ops for details.
1533 if (!(link->ap->pflags & ATA_PFLAG_LOADING) &&
1534 !ata_dev_enabled(link->device))
1535 sata_link_hardreset(link, sata_deb_timing_hotplug, deadline,
1536 NULL, NULL);
1537 else {
1538 const unsigned long *timing = sata_ehc_deb_timing(ehc);
1539 int rc;
1541 if (!(ehc->i.flags & ATA_EHI_QUIET))
1542 ata_link_printk(link, KERN_INFO, "nv: skipping "
1543 "hardreset on occupied port\n");
1545 /* make sure the link is online */
1546 rc = sata_link_resume(link, timing, deadline);
1547 /* whine about phy resume failure but proceed */
1548 if (rc && rc != -EOPNOTSUPP)
1549 ata_link_printk(link, KERN_WARNING, "failed to resume "
1550 "link (errno=%d)\n", rc);
1553 /* device signature acquisition is unreliable */
1554 return -EAGAIN;
1557 static void nv_nf2_freeze(struct ata_port *ap)
1559 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1560 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1561 u8 mask;
1563 mask = ioread8(scr_addr + NV_INT_ENABLE);
1564 mask &= ~(NV_INT_ALL << shift);
1565 iowrite8(mask, scr_addr + NV_INT_ENABLE);
1568 static void nv_nf2_thaw(struct ata_port *ap)
1570 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1571 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1572 u8 mask;
1574 iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
1576 mask = ioread8(scr_addr + NV_INT_ENABLE);
1577 mask |= (NV_INT_MASK << shift);
1578 iowrite8(mask, scr_addr + NV_INT_ENABLE);
1581 static void nv_ck804_freeze(struct ata_port *ap)
1583 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1584 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1585 u8 mask;
1587 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1588 mask &= ~(NV_INT_ALL << shift);
1589 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1592 static void nv_ck804_thaw(struct ata_port *ap)
1594 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1595 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1596 u8 mask;
1598 writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1600 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1601 mask |= (NV_INT_MASK << shift);
1602 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1605 static void nv_mcp55_freeze(struct ata_port *ap)
1607 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1608 int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1609 u32 mask;
1611 writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1613 mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1614 mask &= ~(NV_INT_ALL_MCP55 << shift);
1615 writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1618 static void nv_mcp55_thaw(struct ata_port *ap)
1620 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1621 int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1622 u32 mask;
1624 writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1626 mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1627 mask |= (NV_INT_MASK_MCP55 << shift);
1628 writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1631 static void nv_adma_error_handler(struct ata_port *ap)
1633 struct nv_adma_port_priv *pp = ap->private_data;
1634 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
1635 void __iomem *mmio = pp->ctl_block;
1636 int i;
1637 u16 tmp;
1639 if (ata_tag_valid(ap->link.active_tag) || ap->link.sactive) {
1640 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1641 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1642 u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
1643 u32 status = readw(mmio + NV_ADMA_STAT);
1644 u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
1645 u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
1647 ata_port_printk(ap, KERN_ERR,
1648 "EH in ADMA mode, notifier 0x%X "
1649 "notifier_error 0x%X gen_ctl 0x%X status 0x%X "
1650 "next cpb count 0x%X next cpb idx 0x%x\n",
1651 notifier, notifier_error, gen_ctl, status,
1652 cpb_count, next_cpb_idx);
1654 for (i = 0; i < NV_ADMA_MAX_CPBS; i++) {
1655 struct nv_adma_cpb *cpb = &pp->cpb[i];
1656 if ((ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) ||
1657 ap->link.sactive & (1 << i))
1658 ata_port_printk(ap, KERN_ERR,
1659 "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1660 i, cpb->ctl_flags, cpb->resp_flags);
1664 /* Push us back into port register mode for error handling. */
1665 nv_adma_register_mode(ap);
1667 /* Mark all of the CPBs as invalid to prevent them from
1668 being executed */
1669 for (i = 0; i < NV_ADMA_MAX_CPBS; i++)
1670 pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1672 /* clear CPB fetch count */
1673 writew(0, mmio + NV_ADMA_CPB_COUNT);
1675 /* Reset channel */
1676 tmp = readw(mmio + NV_ADMA_CTL);
1677 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1678 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1679 udelay(1);
1680 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1681 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1684 ata_bmdma_error_handler(ap);
1687 static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc)
1689 struct nv_swncq_port_priv *pp = ap->private_data;
1690 struct defer_queue *dq = &pp->defer_queue;
1692 /* queue is full */
1693 WARN_ON(dq->tail - dq->head == ATA_MAX_QUEUE);
1694 dq->defer_bits |= (1 << qc->tag);
1695 dq->tag[dq->tail++ & (ATA_MAX_QUEUE - 1)] = qc->tag;
1698 static struct ata_queued_cmd *nv_swncq_qc_from_dq(struct ata_port *ap)
1700 struct nv_swncq_port_priv *pp = ap->private_data;
1701 struct defer_queue *dq = &pp->defer_queue;
1702 unsigned int tag;
1704 if (dq->head == dq->tail) /* null queue */
1705 return NULL;
1707 tag = dq->tag[dq->head & (ATA_MAX_QUEUE - 1)];
1708 dq->tag[dq->head++ & (ATA_MAX_QUEUE - 1)] = ATA_TAG_POISON;
1709 WARN_ON(!(dq->defer_bits & (1 << tag)));
1710 dq->defer_bits &= ~(1 << tag);
1712 return ata_qc_from_tag(ap, tag);
1715 static void nv_swncq_fis_reinit(struct ata_port *ap)
1717 struct nv_swncq_port_priv *pp = ap->private_data;
1719 pp->dhfis_bits = 0;
1720 pp->dmafis_bits = 0;
1721 pp->sdbfis_bits = 0;
1722 pp->ncq_flags = 0;
1725 static void nv_swncq_pp_reinit(struct ata_port *ap)
1727 struct nv_swncq_port_priv *pp = ap->private_data;
1728 struct defer_queue *dq = &pp->defer_queue;
1730 dq->head = 0;
1731 dq->tail = 0;
1732 dq->defer_bits = 0;
1733 pp->qc_active = 0;
1734 pp->last_issue_tag = ATA_TAG_POISON;
1735 nv_swncq_fis_reinit(ap);
1738 static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis)
1740 struct nv_swncq_port_priv *pp = ap->private_data;
1742 writew(fis, pp->irq_block);
1745 static void __ata_bmdma_stop(struct ata_port *ap)
1747 struct ata_queued_cmd qc;
1749 qc.ap = ap;
1750 ata_bmdma_stop(&qc);
1753 static void nv_swncq_ncq_stop(struct ata_port *ap)
1755 struct nv_swncq_port_priv *pp = ap->private_data;
1756 unsigned int i;
1757 u32 sactive;
1758 u32 done_mask;
1760 ata_port_printk(ap, KERN_ERR,
1761 "EH in SWNCQ mode,QC:qc_active 0x%X sactive 0x%X\n",
1762 ap->qc_active, ap->link.sactive);
1763 ata_port_printk(ap, KERN_ERR,
1764 "SWNCQ:qc_active 0x%X defer_bits 0x%X last_issue_tag 0x%x\n "
1765 "dhfis 0x%X dmafis 0x%X sdbfis 0x%X\n",
1766 pp->qc_active, pp->defer_queue.defer_bits, pp->last_issue_tag,
1767 pp->dhfis_bits, pp->dmafis_bits, pp->sdbfis_bits);
1769 ata_port_printk(ap, KERN_ERR, "ATA_REG 0x%X ERR_REG 0x%X\n",
1770 ap->ops->sff_check_status(ap),
1771 ioread8(ap->ioaddr.error_addr));
1773 sactive = readl(pp->sactive_block);
1774 done_mask = pp->qc_active ^ sactive;
1776 ata_port_printk(ap, KERN_ERR, "tag : dhfis dmafis sdbfis sacitve\n");
1777 for (i = 0; i < ATA_MAX_QUEUE; i++) {
1778 u8 err = 0;
1779 if (pp->qc_active & (1 << i))
1780 err = 0;
1781 else if (done_mask & (1 << i))
1782 err = 1;
1783 else
1784 continue;
1786 ata_port_printk(ap, KERN_ERR,
1787 "tag 0x%x: %01x %01x %01x %01x %s\n", i,
1788 (pp->dhfis_bits >> i) & 0x1,
1789 (pp->dmafis_bits >> i) & 0x1,
1790 (pp->sdbfis_bits >> i) & 0x1,
1791 (sactive >> i) & 0x1,
1792 (err ? "error! tag doesn't exit" : " "));
1795 nv_swncq_pp_reinit(ap);
1796 ap->ops->sff_irq_clear(ap);
1797 __ata_bmdma_stop(ap);
1798 nv_swncq_irq_clear(ap, 0xffff);
1801 static void nv_swncq_error_handler(struct ata_port *ap)
1803 struct ata_eh_context *ehc = &ap->link.eh_context;
1805 if (ap->link.sactive) {
1806 nv_swncq_ncq_stop(ap);
1807 ehc->i.action |= ATA_EH_RESET;
1810 ata_bmdma_error_handler(ap);
1813 #ifdef CONFIG_PM
1814 static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg)
1816 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1817 u32 tmp;
1819 /* clear irq */
1820 writel(~0, mmio + NV_INT_STATUS_MCP55);
1822 /* disable irq */
1823 writel(0, mmio + NV_INT_ENABLE_MCP55);
1825 /* disable swncq */
1826 tmp = readl(mmio + NV_CTL_MCP55);
1827 tmp &= ~(NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ);
1828 writel(tmp, mmio + NV_CTL_MCP55);
1830 return 0;
1833 static int nv_swncq_port_resume(struct ata_port *ap)
1835 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1836 u32 tmp;
1838 /* clear irq */
1839 writel(~0, mmio + NV_INT_STATUS_MCP55);
1841 /* enable irq */
1842 writel(0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1844 /* enable swncq */
1845 tmp = readl(mmio + NV_CTL_MCP55);
1846 writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1848 return 0;
1850 #endif
1852 static void nv_swncq_host_init(struct ata_host *host)
1854 u32 tmp;
1855 void __iomem *mmio = host->iomap[NV_MMIO_BAR];
1856 struct pci_dev *pdev = to_pci_dev(host->dev);
1857 u8 regval;
1859 /* disable ECO 398 */
1860 pci_read_config_byte(pdev, 0x7f, &regval);
1861 regval &= ~(1 << 7);
1862 pci_write_config_byte(pdev, 0x7f, regval);
1864 /* enable swncq */
1865 tmp = readl(mmio + NV_CTL_MCP55);
1866 VPRINTK("HOST_CTL:0x%X\n", tmp);
1867 writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1869 /* enable irq intr */
1870 tmp = readl(mmio + NV_INT_ENABLE_MCP55);
1871 VPRINTK("HOST_ENABLE:0x%X\n", tmp);
1872 writel(tmp | 0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1874 /* clear port irq */
1875 writel(~0x0, mmio + NV_INT_STATUS_MCP55);
1878 static int nv_swncq_slave_config(struct scsi_device *sdev)
1880 struct ata_port *ap = ata_shost_to_port(sdev->host);
1881 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
1882 struct ata_device *dev;
1883 int rc;
1884 u8 rev;
1885 u8 check_maxtor = 0;
1886 unsigned char model_num[ATA_ID_PROD_LEN + 1];
1888 rc = ata_scsi_slave_config(sdev);
1889 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
1890 /* Not a proper libata device, ignore */
1891 return rc;
1893 dev = &ap->link.device[sdev->id];
1894 if (!(ap->flags & ATA_FLAG_NCQ) || dev->class == ATA_DEV_ATAPI)
1895 return rc;
1897 /* if MCP51 and Maxtor, then disable ncq */
1898 if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA ||
1899 pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2)
1900 check_maxtor = 1;
1902 /* if MCP55 and rev <= a2 and Maxtor, then disable ncq */
1903 if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA ||
1904 pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2) {
1905 pci_read_config_byte(pdev, 0x8, &rev);
1906 if (rev <= 0xa2)
1907 check_maxtor = 1;
1910 if (!check_maxtor)
1911 return rc;
1913 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
1915 if (strncmp(model_num, "Maxtor", 6) == 0) {
1916 ata_scsi_change_queue_depth(sdev, 1, SCSI_QDEPTH_DEFAULT);
1917 ata_dev_printk(dev, KERN_NOTICE,
1918 "Disabling SWNCQ mode (depth %x)\n", sdev->queue_depth);
1921 return rc;
1924 static int nv_swncq_port_start(struct ata_port *ap)
1926 struct device *dev = ap->host->dev;
1927 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1928 struct nv_swncq_port_priv *pp;
1929 int rc;
1931 /* we might fallback to bmdma, allocate bmdma resources */
1932 rc = ata_bmdma_port_start(ap);
1933 if (rc)
1934 return rc;
1936 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1937 if (!pp)
1938 return -ENOMEM;
1940 pp->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE,
1941 &pp->prd_dma, GFP_KERNEL);
1942 if (!pp->prd)
1943 return -ENOMEM;
1944 memset(pp->prd, 0, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE);
1946 ap->private_data = pp;
1947 pp->sactive_block = ap->ioaddr.scr_addr + 4 * SCR_ACTIVE;
1948 pp->irq_block = mmio + NV_INT_STATUS_MCP55 + ap->port_no * 2;
1949 pp->tag_block = mmio + NV_NCQ_REG_MCP55 + ap->port_no * 2;
1951 return 0;
1954 static void nv_swncq_qc_prep(struct ata_queued_cmd *qc)
1956 if (qc->tf.protocol != ATA_PROT_NCQ) {
1957 ata_bmdma_qc_prep(qc);
1958 return;
1961 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1962 return;
1964 nv_swncq_fill_sg(qc);
1967 static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
1969 struct ata_port *ap = qc->ap;
1970 struct scatterlist *sg;
1971 struct nv_swncq_port_priv *pp = ap->private_data;
1972 struct ata_bmdma_prd *prd;
1973 unsigned int si, idx;
1975 prd = pp->prd + ATA_MAX_PRD * qc->tag;
1977 idx = 0;
1978 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1979 u32 addr, offset;
1980 u32 sg_len, len;
1982 addr = (u32)sg_dma_address(sg);
1983 sg_len = sg_dma_len(sg);
1985 while (sg_len) {
1986 offset = addr & 0xffff;
1987 len = sg_len;
1988 if ((offset + sg_len) > 0x10000)
1989 len = 0x10000 - offset;
1991 prd[idx].addr = cpu_to_le32(addr);
1992 prd[idx].flags_len = cpu_to_le32(len & 0xffff);
1994 idx++;
1995 sg_len -= len;
1996 addr += len;
2000 prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2003 static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap,
2004 struct ata_queued_cmd *qc)
2006 struct nv_swncq_port_priv *pp = ap->private_data;
2008 if (qc == NULL)
2009 return 0;
2011 DPRINTK("Enter\n");
2013 writel((1 << qc->tag), pp->sactive_block);
2014 pp->last_issue_tag = qc->tag;
2015 pp->dhfis_bits &= ~(1 << qc->tag);
2016 pp->dmafis_bits &= ~(1 << qc->tag);
2017 pp->qc_active |= (0x1 << qc->tag);
2019 ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
2020 ap->ops->sff_exec_command(ap, &qc->tf);
2022 DPRINTK("Issued tag %u\n", qc->tag);
2024 return 0;
2027 static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc)
2029 struct ata_port *ap = qc->ap;
2030 struct nv_swncq_port_priv *pp = ap->private_data;
2032 if (qc->tf.protocol != ATA_PROT_NCQ)
2033 return ata_bmdma_qc_issue(qc);
2035 DPRINTK("Enter\n");
2037 if (!pp->qc_active)
2038 nv_swncq_issue_atacmd(ap, qc);
2039 else
2040 nv_swncq_qc_to_dq(ap, qc); /* add qc to defer queue */
2042 return 0;
2045 static void nv_swncq_hotplug(struct ata_port *ap, u32 fis)
2047 u32 serror;
2048 struct ata_eh_info *ehi = &ap->link.eh_info;
2050 ata_ehi_clear_desc(ehi);
2052 /* AHCI needs SError cleared; otherwise, it might lock up */
2053 sata_scr_read(&ap->link, SCR_ERROR, &serror);
2054 sata_scr_write(&ap->link, SCR_ERROR, serror);
2056 /* analyze @irq_stat */
2057 if (fis & NV_SWNCQ_IRQ_ADDED)
2058 ata_ehi_push_desc(ehi, "hot plug");
2059 else if (fis & NV_SWNCQ_IRQ_REMOVED)
2060 ata_ehi_push_desc(ehi, "hot unplug");
2062 ata_ehi_hotplugged(ehi);
2064 /* okay, let's hand over to EH */
2065 ehi->serror |= serror;
2067 ata_port_freeze(ap);
2070 static int nv_swncq_sdbfis(struct ata_port *ap)
2072 struct ata_queued_cmd *qc;
2073 struct nv_swncq_port_priv *pp = ap->private_data;
2074 struct ata_eh_info *ehi = &ap->link.eh_info;
2075 u32 sactive;
2076 u32 done_mask;
2077 int i;
2078 u8 host_stat;
2079 u8 lack_dhfis = 0;
2081 host_stat = ap->ops->bmdma_status(ap);
2082 if (unlikely(host_stat & ATA_DMA_ERR)) {
2083 /* error when transfering data to/from memory */
2084 ata_ehi_clear_desc(ehi);
2085 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
2086 ehi->err_mask |= AC_ERR_HOST_BUS;
2087 ehi->action |= ATA_EH_RESET;
2088 return -EINVAL;
2091 ap->ops->sff_irq_clear(ap);
2092 __ata_bmdma_stop(ap);
2094 sactive = readl(pp->sactive_block);
2095 done_mask = pp->qc_active ^ sactive;
2097 if (unlikely(done_mask & sactive)) {
2098 ata_ehi_clear_desc(ehi);
2099 ata_ehi_push_desc(ehi, "illegal SWNCQ:qc_active transition"
2100 "(%08x->%08x)", pp->qc_active, sactive);
2101 ehi->err_mask |= AC_ERR_HSM;
2102 ehi->action |= ATA_EH_RESET;
2103 return -EINVAL;
2105 for (i = 0; i < ATA_MAX_QUEUE; i++) {
2106 if (!(done_mask & (1 << i)))
2107 continue;
2109 qc = ata_qc_from_tag(ap, i);
2110 if (qc) {
2111 ata_qc_complete(qc);
2112 pp->qc_active &= ~(1 << i);
2113 pp->dhfis_bits &= ~(1 << i);
2114 pp->dmafis_bits &= ~(1 << i);
2115 pp->sdbfis_bits |= (1 << i);
2119 if (!ap->qc_active) {
2120 DPRINTK("over\n");
2121 nv_swncq_pp_reinit(ap);
2122 return 0;
2125 if (pp->qc_active & pp->dhfis_bits)
2126 return 0;
2128 if ((pp->ncq_flags & ncq_saw_backout) ||
2129 (pp->qc_active ^ pp->dhfis_bits))
2130 /* if the controller can't get a device to host register FIS,
2131 * The driver needs to reissue the new command.
2133 lack_dhfis = 1;
2135 DPRINTK("id 0x%x QC: qc_active 0x%x,"
2136 "SWNCQ:qc_active 0x%X defer_bits %X "
2137 "dhfis 0x%X dmafis 0x%X last_issue_tag %x\n",
2138 ap->print_id, ap->qc_active, pp->qc_active,
2139 pp->defer_queue.defer_bits, pp->dhfis_bits,
2140 pp->dmafis_bits, pp->last_issue_tag);
2142 nv_swncq_fis_reinit(ap);
2144 if (lack_dhfis) {
2145 qc = ata_qc_from_tag(ap, pp->last_issue_tag);
2146 nv_swncq_issue_atacmd(ap, qc);
2147 return 0;
2150 if (pp->defer_queue.defer_bits) {
2151 /* send deferral queue command */
2152 qc = nv_swncq_qc_from_dq(ap);
2153 WARN_ON(qc == NULL);
2154 nv_swncq_issue_atacmd(ap, qc);
2157 return 0;
2160 static inline u32 nv_swncq_tag(struct ata_port *ap)
2162 struct nv_swncq_port_priv *pp = ap->private_data;
2163 u32 tag;
2165 tag = readb(pp->tag_block) >> 2;
2166 return (tag & 0x1f);
2169 static void nv_swncq_dmafis(struct ata_port *ap)
2171 struct ata_queued_cmd *qc;
2172 unsigned int rw;
2173 u8 dmactl;
2174 u32 tag;
2175 struct nv_swncq_port_priv *pp = ap->private_data;
2177 __ata_bmdma_stop(ap);
2178 tag = nv_swncq_tag(ap);
2180 DPRINTK("dma setup tag 0x%x\n", tag);
2181 qc = ata_qc_from_tag(ap, tag);
2183 if (unlikely(!qc))
2184 return;
2186 rw = qc->tf.flags & ATA_TFLAG_WRITE;
2188 /* load PRD table addr. */
2189 iowrite32(pp->prd_dma + ATA_PRD_TBL_SZ * qc->tag,
2190 ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
2192 /* specify data direction, triple-check start bit is clear */
2193 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2194 dmactl &= ~ATA_DMA_WR;
2195 if (!rw)
2196 dmactl |= ATA_DMA_WR;
2198 iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2201 static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis)
2203 struct nv_swncq_port_priv *pp = ap->private_data;
2204 struct ata_queued_cmd *qc;
2205 struct ata_eh_info *ehi = &ap->link.eh_info;
2206 u32 serror;
2207 u8 ata_stat;
2209 ata_stat = ap->ops->sff_check_status(ap);
2210 nv_swncq_irq_clear(ap, fis);
2211 if (!fis)
2212 return;
2214 if (ap->pflags & ATA_PFLAG_FROZEN)
2215 return;
2217 if (fis & NV_SWNCQ_IRQ_HOTPLUG) {
2218 nv_swncq_hotplug(ap, fis);
2219 return;
2222 if (!pp->qc_active)
2223 return;
2225 if (ap->ops->scr_read(&ap->link, SCR_ERROR, &serror))
2226 return;
2227 ap->ops->scr_write(&ap->link, SCR_ERROR, serror);
2229 if (ata_stat & ATA_ERR) {
2230 ata_ehi_clear_desc(ehi);
2231 ata_ehi_push_desc(ehi, "Ata error. fis:0x%X", fis);
2232 ehi->err_mask |= AC_ERR_DEV;
2233 ehi->serror |= serror;
2234 ehi->action |= ATA_EH_RESET;
2235 ata_port_freeze(ap);
2236 return;
2239 if (fis & NV_SWNCQ_IRQ_BACKOUT) {
2240 /* If the IRQ is backout, driver must issue
2241 * the new command again some time later.
2243 pp->ncq_flags |= ncq_saw_backout;
2246 if (fis & NV_SWNCQ_IRQ_SDBFIS) {
2247 pp->ncq_flags |= ncq_saw_sdb;
2248 DPRINTK("id 0x%x SWNCQ: qc_active 0x%X "
2249 "dhfis 0x%X dmafis 0x%X sactive 0x%X\n",
2250 ap->print_id, pp->qc_active, pp->dhfis_bits,
2251 pp->dmafis_bits, readl(pp->sactive_block));
2252 if (nv_swncq_sdbfis(ap) < 0)
2253 goto irq_error;
2256 if (fis & NV_SWNCQ_IRQ_DHREGFIS) {
2257 /* The interrupt indicates the new command
2258 * was transmitted correctly to the drive.
2260 pp->dhfis_bits |= (0x1 << pp->last_issue_tag);
2261 pp->ncq_flags |= ncq_saw_d2h;
2262 if (pp->ncq_flags & (ncq_saw_sdb | ncq_saw_backout)) {
2263 ata_ehi_push_desc(ehi, "illegal fis transaction");
2264 ehi->err_mask |= AC_ERR_HSM;
2265 ehi->action |= ATA_EH_RESET;
2266 goto irq_error;
2269 if (!(fis & NV_SWNCQ_IRQ_DMASETUP) &&
2270 !(pp->ncq_flags & ncq_saw_dmas)) {
2271 ata_stat = ap->ops->sff_check_status(ap);
2272 if (ata_stat & ATA_BUSY)
2273 goto irq_exit;
2275 if (pp->defer_queue.defer_bits) {
2276 DPRINTK("send next command\n");
2277 qc = nv_swncq_qc_from_dq(ap);
2278 nv_swncq_issue_atacmd(ap, qc);
2283 if (fis & NV_SWNCQ_IRQ_DMASETUP) {
2284 /* program the dma controller with appropriate PRD buffers
2285 * and start the DMA transfer for requested command.
2287 pp->dmafis_bits |= (0x1 << nv_swncq_tag(ap));
2288 pp->ncq_flags |= ncq_saw_dmas;
2289 nv_swncq_dmafis(ap);
2292 irq_exit:
2293 return;
2294 irq_error:
2295 ata_ehi_push_desc(ehi, "fis:0x%x", fis);
2296 ata_port_freeze(ap);
2297 return;
2300 static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance)
2302 struct ata_host *host = dev_instance;
2303 unsigned int i;
2304 unsigned int handled = 0;
2305 unsigned long flags;
2306 u32 irq_stat;
2308 spin_lock_irqsave(&host->lock, flags);
2310 irq_stat = readl(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_MCP55);
2312 for (i = 0; i < host->n_ports; i++) {
2313 struct ata_port *ap = host->ports[i];
2315 if (ap->link.sactive) {
2316 nv_swncq_host_interrupt(ap, (u16)irq_stat);
2317 handled = 1;
2318 } else {
2319 if (irq_stat) /* reserve Hotplug */
2320 nv_swncq_irq_clear(ap, 0xfff0);
2322 handled += nv_host_intr(ap, (u8)irq_stat);
2324 irq_stat >>= NV_INT_PORT_SHIFT_MCP55;
2327 spin_unlock_irqrestore(&host->lock, flags);
2329 return IRQ_RETVAL(handled);
2332 static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2334 static int printed_version;
2335 const struct ata_port_info *ppi[] = { NULL, NULL };
2336 struct nv_pi_priv *ipriv;
2337 struct ata_host *host;
2338 struct nv_host_priv *hpriv;
2339 int rc;
2340 u32 bar;
2341 void __iomem *base;
2342 unsigned long type = ent->driver_data;
2344 // Make sure this is a SATA controller by counting the number of bars
2345 // (NVIDIA SATA controllers will always have six bars). Otherwise,
2346 // it's an IDE controller and we ignore it.
2347 for (bar = 0; bar < 6; bar++)
2348 if (pci_resource_start(pdev, bar) == 0)
2349 return -ENODEV;
2351 if (!printed_version++)
2352 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
2354 rc = pcim_enable_device(pdev);
2355 if (rc)
2356 return rc;
2358 /* determine type and allocate host */
2359 if (type == CK804 && adma_enabled) {
2360 dev_printk(KERN_NOTICE, &pdev->dev, "Using ADMA mode\n");
2361 type = ADMA;
2362 } else if (type == MCP5x && swncq_enabled) {
2363 dev_printk(KERN_NOTICE, &pdev->dev, "Using SWNCQ mode\n");
2364 type = SWNCQ;
2367 ppi[0] = &nv_port_info[type];
2368 ipriv = ppi[0]->private_data;
2369 rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
2370 if (rc)
2371 return rc;
2373 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2374 if (!hpriv)
2375 return -ENOMEM;
2376 hpriv->type = type;
2377 host->private_data = hpriv;
2379 /* request and iomap NV_MMIO_BAR */
2380 rc = pcim_iomap_regions(pdev, 1 << NV_MMIO_BAR, DRV_NAME);
2381 if (rc)
2382 return rc;
2384 /* configure SCR access */
2385 base = host->iomap[NV_MMIO_BAR];
2386 host->ports[0]->ioaddr.scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
2387 host->ports[1]->ioaddr.scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
2389 /* enable SATA space for CK804 */
2390 if (type >= CK804) {
2391 u8 regval;
2393 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2394 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2395 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2398 /* init ADMA */
2399 if (type == ADMA) {
2400 rc = nv_adma_host_init(host);
2401 if (rc)
2402 return rc;
2403 } else if (type == SWNCQ)
2404 nv_swncq_host_init(host);
2406 if (msi_enabled) {
2407 dev_printk(KERN_NOTICE, &pdev->dev, "Using MSI\n");
2408 pci_enable_msi(pdev);
2411 pci_set_master(pdev);
2412 return ata_pci_sff_activate_host(host, ipriv->irq_handler, ipriv->sht);
2415 #ifdef CONFIG_PM
2416 static int nv_pci_device_resume(struct pci_dev *pdev)
2418 struct ata_host *host = dev_get_drvdata(&pdev->dev);
2419 struct nv_host_priv *hpriv = host->private_data;
2420 int rc;
2422 rc = ata_pci_device_do_resume(pdev);
2423 if (rc)
2424 return rc;
2426 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2427 if (hpriv->type >= CK804) {
2428 u8 regval;
2430 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2431 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2432 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2434 if (hpriv->type == ADMA) {
2435 u32 tmp32;
2436 struct nv_adma_port_priv *pp;
2437 /* enable/disable ADMA on the ports appropriately */
2438 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2440 pp = host->ports[0]->private_data;
2441 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2442 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2443 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2444 else
2445 tmp32 |= (NV_MCP_SATA_CFG_20_PORT0_EN |
2446 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2447 pp = host->ports[1]->private_data;
2448 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2449 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
2450 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2451 else
2452 tmp32 |= (NV_MCP_SATA_CFG_20_PORT1_EN |
2453 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2455 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2459 ata_host_resume(host);
2461 return 0;
2463 #endif
2465 static void nv_ck804_host_stop(struct ata_host *host)
2467 struct pci_dev *pdev = to_pci_dev(host->dev);
2468 u8 regval;
2470 /* disable SATA space for CK804 */
2471 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2472 regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2473 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2476 static void nv_adma_host_stop(struct ata_host *host)
2478 struct pci_dev *pdev = to_pci_dev(host->dev);
2479 u32 tmp32;
2481 /* disable ADMA on the ports */
2482 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2483 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2484 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
2485 NV_MCP_SATA_CFG_20_PORT1_EN |
2486 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2488 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2490 nv_ck804_host_stop(host);
2493 static int __init nv_init(void)
2495 return pci_register_driver(&nv_pci_driver);
2498 static void __exit nv_exit(void)
2500 pci_unregister_driver(&nv_pci_driver);
2503 module_init(nv_init);
2504 module_exit(nv_exit);
2505 module_param_named(adma, adma_enabled, bool, 0444);
2506 MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: false)");
2507 module_param_named(swncq, swncq_enabled, bool, 0444);
2508 MODULE_PARM_DESC(swncq, "Enable use of SWNCQ (Default: true)");
2509 module_param_named(msi, msi_enabled, bool, 0444);
2510 MODULE_PARM_DESC(msi, "Enable use of MSI (Default: false)");