[PATCH] 8250 UART backup timer
[linux-2.6/linux-loongson.git] / drivers / ata / sata_nv.c
blob095ef1b2cd0e92b1e7dc3dd75a1a4f3760438c2f
1 /*
2 * sata_nv.c - NVIDIA nForce SATA
4 * Copyright 2004 NVIDIA Corp. All rights reserved.
5 * Copyright 2004 Andrew Chew
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 * libata documentation is available via 'make {ps|pdf}docs',
24 * as Documentation/DocBook/libata.*
26 * No hardware documentation available outside of NVIDIA.
27 * This driver programs the NVIDIA SATA controller in a similar
28 * fashion as with other PCI IDE BMDMA controllers, with a few
29 * NV-specific details such as register offsets, SATA phy location,
30 * hotplug info, etc.
32 * CK804/MCP04 controllers support an alternate programming interface
33 * similar to the ADMA specification (with some modifications).
34 * This allows the use of NCQ. Non-DMA-mapped ATA commands are still
35 * sent through the legacy interface.
39 #include <linux/kernel.h>
40 #include <linux/module.h>
41 #include <linux/pci.h>
42 #include <linux/init.h>
43 #include <linux/blkdev.h>
44 #include <linux/delay.h>
45 #include <linux/interrupt.h>
46 #include <linux/device.h>
47 #include <scsi/scsi_host.h>
48 #include <scsi/scsi_device.h>
49 #include <linux/libata.h>
51 #define DRV_NAME "sata_nv"
52 #define DRV_VERSION "3.3"
54 #define NV_ADMA_DMA_BOUNDARY 0xffffffffUL
56 enum {
57 NV_MMIO_BAR = 5,
59 NV_PORTS = 2,
60 NV_PIO_MASK = 0x1f,
61 NV_MWDMA_MASK = 0x07,
62 NV_UDMA_MASK = 0x7f,
63 NV_PORT0_SCR_REG_OFFSET = 0x00,
64 NV_PORT1_SCR_REG_OFFSET = 0x40,
66 /* INT_STATUS/ENABLE */
67 NV_INT_STATUS = 0x10,
68 NV_INT_ENABLE = 0x11,
69 NV_INT_STATUS_CK804 = 0x440,
70 NV_INT_ENABLE_CK804 = 0x441,
72 /* INT_STATUS/ENABLE bits */
73 NV_INT_DEV = 0x01,
74 NV_INT_PM = 0x02,
75 NV_INT_ADDED = 0x04,
76 NV_INT_REMOVED = 0x08,
78 NV_INT_PORT_SHIFT = 4, /* each port occupies 4 bits */
80 NV_INT_ALL = 0x0f,
81 NV_INT_MASK = NV_INT_DEV |
82 NV_INT_ADDED | NV_INT_REMOVED,
84 /* INT_CONFIG */
85 NV_INT_CONFIG = 0x12,
86 NV_INT_CONFIG_METHD = 0x01, // 0 = INT, 1 = SMI
88 // For PCI config register 20
89 NV_MCP_SATA_CFG_20 = 0x50,
90 NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
91 NV_MCP_SATA_CFG_20_PORT0_EN = (1 << 17),
92 NV_MCP_SATA_CFG_20_PORT1_EN = (1 << 16),
93 NV_MCP_SATA_CFG_20_PORT0_PWB_EN = (1 << 14),
94 NV_MCP_SATA_CFG_20_PORT1_PWB_EN = (1 << 12),
96 NV_ADMA_MAX_CPBS = 32,
97 NV_ADMA_CPB_SZ = 128,
98 NV_ADMA_APRD_SZ = 16,
99 NV_ADMA_SGTBL_LEN = (1024 - NV_ADMA_CPB_SZ) /
100 NV_ADMA_APRD_SZ,
101 NV_ADMA_SGTBL_TOTAL_LEN = NV_ADMA_SGTBL_LEN + 5,
102 NV_ADMA_SGTBL_SZ = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
103 NV_ADMA_PORT_PRIV_DMA_SZ = NV_ADMA_MAX_CPBS *
104 (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
106 /* BAR5 offset to ADMA general registers */
107 NV_ADMA_GEN = 0x400,
108 NV_ADMA_GEN_CTL = 0x00,
109 NV_ADMA_NOTIFIER_CLEAR = 0x30,
111 /* BAR5 offset to ADMA ports */
112 NV_ADMA_PORT = 0x480,
114 /* size of ADMA port register space */
115 NV_ADMA_PORT_SIZE = 0x100,
117 /* ADMA port registers */
118 NV_ADMA_CTL = 0x40,
119 NV_ADMA_CPB_COUNT = 0x42,
120 NV_ADMA_NEXT_CPB_IDX = 0x43,
121 NV_ADMA_STAT = 0x44,
122 NV_ADMA_CPB_BASE_LOW = 0x48,
123 NV_ADMA_CPB_BASE_HIGH = 0x4C,
124 NV_ADMA_APPEND = 0x50,
125 NV_ADMA_NOTIFIER = 0x68,
126 NV_ADMA_NOTIFIER_ERROR = 0x6C,
128 /* NV_ADMA_CTL register bits */
129 NV_ADMA_CTL_HOTPLUG_IEN = (1 << 0),
130 NV_ADMA_CTL_CHANNEL_RESET = (1 << 5),
131 NV_ADMA_CTL_GO = (1 << 7),
132 NV_ADMA_CTL_AIEN = (1 << 8),
133 NV_ADMA_CTL_READ_NON_COHERENT = (1 << 11),
134 NV_ADMA_CTL_WRITE_NON_COHERENT = (1 << 12),
136 /* CPB response flag bits */
137 NV_CPB_RESP_DONE = (1 << 0),
138 NV_CPB_RESP_ATA_ERR = (1 << 3),
139 NV_CPB_RESP_CMD_ERR = (1 << 4),
140 NV_CPB_RESP_CPB_ERR = (1 << 7),
142 /* CPB control flag bits */
143 NV_CPB_CTL_CPB_VALID = (1 << 0),
144 NV_CPB_CTL_QUEUE = (1 << 1),
145 NV_CPB_CTL_APRD_VALID = (1 << 2),
146 NV_CPB_CTL_IEN = (1 << 3),
147 NV_CPB_CTL_FPDMA = (1 << 4),
149 /* APRD flags */
150 NV_APRD_WRITE = (1 << 1),
151 NV_APRD_END = (1 << 2),
152 NV_APRD_CONT = (1 << 3),
154 /* NV_ADMA_STAT flags */
155 NV_ADMA_STAT_TIMEOUT = (1 << 0),
156 NV_ADMA_STAT_HOTUNPLUG = (1 << 1),
157 NV_ADMA_STAT_HOTPLUG = (1 << 2),
158 NV_ADMA_STAT_CPBERR = (1 << 4),
159 NV_ADMA_STAT_SERROR = (1 << 5),
160 NV_ADMA_STAT_CMD_COMPLETE = (1 << 6),
161 NV_ADMA_STAT_IDLE = (1 << 8),
162 NV_ADMA_STAT_LEGACY = (1 << 9),
163 NV_ADMA_STAT_STOPPED = (1 << 10),
164 NV_ADMA_STAT_DONE = (1 << 12),
165 NV_ADMA_STAT_ERR = NV_ADMA_STAT_CPBERR |
166 NV_ADMA_STAT_TIMEOUT,
168 /* port flags */
169 NV_ADMA_PORT_REGISTER_MODE = (1 << 0),
170 NV_ADMA_ATAPI_SETUP_COMPLETE = (1 << 1),
174 /* ADMA Physical Region Descriptor - one SG segment */
175 struct nv_adma_prd {
176 __le64 addr;
177 __le32 len;
178 u8 flags;
179 u8 packet_len;
180 __le16 reserved;
183 enum nv_adma_regbits {
184 CMDEND = (1 << 15), /* end of command list */
185 WNB = (1 << 14), /* wait-not-BSY */
186 IGN = (1 << 13), /* ignore this entry */
187 CS1n = (1 << (4 + 8)), /* std. PATA signals follow... */
188 DA2 = (1 << (2 + 8)),
189 DA1 = (1 << (1 + 8)),
190 DA0 = (1 << (0 + 8)),
193 /* ADMA Command Parameter Block
194 The first 5 SG segments are stored inside the Command Parameter Block itself.
195 If there are more than 5 segments the remainder are stored in a separate
196 memory area indicated by next_aprd. */
197 struct nv_adma_cpb {
198 u8 resp_flags; /* 0 */
199 u8 reserved1; /* 1 */
200 u8 ctl_flags; /* 2 */
201 /* len is length of taskfile in 64 bit words */
202 u8 len; /* 3 */
203 u8 tag; /* 4 */
204 u8 next_cpb_idx; /* 5 */
205 __le16 reserved2; /* 6-7 */
206 __le16 tf[12]; /* 8-31 */
207 struct nv_adma_prd aprd[5]; /* 32-111 */
208 __le64 next_aprd; /* 112-119 */
209 __le64 reserved3; /* 120-127 */
213 struct nv_adma_port_priv {
214 struct nv_adma_cpb *cpb;
215 dma_addr_t cpb_dma;
216 struct nv_adma_prd *aprd;
217 dma_addr_t aprd_dma;
218 void __iomem * ctl_block;
219 void __iomem * gen_block;
220 void __iomem * notifier_clear_block;
221 u8 flags;
224 struct nv_host_priv {
225 unsigned long type;
228 #define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & ( 1 << (19 + (12 * (PORT)))))
230 static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
231 static void nv_remove_one (struct pci_dev *pdev);
232 static int nv_pci_device_resume(struct pci_dev *pdev);
233 static void nv_ck804_host_stop(struct ata_host *host);
234 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
235 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
236 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
237 static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg);
238 static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
240 static void nv_nf2_freeze(struct ata_port *ap);
241 static void nv_nf2_thaw(struct ata_port *ap);
242 static void nv_ck804_freeze(struct ata_port *ap);
243 static void nv_ck804_thaw(struct ata_port *ap);
244 static void nv_error_handler(struct ata_port *ap);
245 static int nv_adma_slave_config(struct scsi_device *sdev);
246 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
247 static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
248 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
249 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
250 static void nv_adma_irq_clear(struct ata_port *ap);
251 static int nv_adma_port_start(struct ata_port *ap);
252 static void nv_adma_port_stop(struct ata_port *ap);
253 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
254 static int nv_adma_port_resume(struct ata_port *ap);
255 static void nv_adma_error_handler(struct ata_port *ap);
256 static void nv_adma_host_stop(struct ata_host *host);
257 static void nv_adma_bmdma_setup(struct ata_queued_cmd *qc);
258 static void nv_adma_bmdma_start(struct ata_queued_cmd *qc);
259 static void nv_adma_bmdma_stop(struct ata_queued_cmd *qc);
260 static u8 nv_adma_bmdma_status(struct ata_port *ap);
262 enum nv_host_type
264 GENERIC,
265 NFORCE2,
266 NFORCE3 = NFORCE2, /* NF2 == NF3 as far as sata_nv is concerned */
267 CK804,
268 ADMA
271 static const struct pci_device_id nv_pci_tbl[] = {
272 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
273 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
274 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
275 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
276 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
277 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
278 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
279 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), GENERIC },
280 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), GENERIC },
281 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), GENERIC },
282 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), GENERIC },
283 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
284 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
285 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
286 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
287 PCI_ANY_ID, PCI_ANY_ID,
288 PCI_CLASS_STORAGE_IDE<<8, 0xffff00, GENERIC },
289 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
290 PCI_ANY_ID, PCI_ANY_ID,
291 PCI_CLASS_STORAGE_RAID<<8, 0xffff00, GENERIC },
293 { } /* terminate list */
296 static struct pci_driver nv_pci_driver = {
297 .name = DRV_NAME,
298 .id_table = nv_pci_tbl,
299 .probe = nv_init_one,
300 .suspend = ata_pci_device_suspend,
301 .resume = nv_pci_device_resume,
302 .remove = nv_remove_one,
305 static struct scsi_host_template nv_sht = {
306 .module = THIS_MODULE,
307 .name = DRV_NAME,
308 .ioctl = ata_scsi_ioctl,
309 .queuecommand = ata_scsi_queuecmd,
310 .can_queue = ATA_DEF_QUEUE,
311 .this_id = ATA_SHT_THIS_ID,
312 .sg_tablesize = LIBATA_MAX_PRD,
313 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
314 .emulated = ATA_SHT_EMULATED,
315 .use_clustering = ATA_SHT_USE_CLUSTERING,
316 .proc_name = DRV_NAME,
317 .dma_boundary = ATA_DMA_BOUNDARY,
318 .slave_configure = ata_scsi_slave_config,
319 .slave_destroy = ata_scsi_slave_destroy,
320 .bios_param = ata_std_bios_param,
321 .suspend = ata_scsi_device_suspend,
322 .resume = ata_scsi_device_resume,
325 static struct scsi_host_template nv_adma_sht = {
326 .module = THIS_MODULE,
327 .name = DRV_NAME,
328 .ioctl = ata_scsi_ioctl,
329 .queuecommand = ata_scsi_queuecmd,
330 .can_queue = NV_ADMA_MAX_CPBS,
331 .this_id = ATA_SHT_THIS_ID,
332 .sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN,
333 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
334 .emulated = ATA_SHT_EMULATED,
335 .use_clustering = ATA_SHT_USE_CLUSTERING,
336 .proc_name = DRV_NAME,
337 .dma_boundary = NV_ADMA_DMA_BOUNDARY,
338 .slave_configure = nv_adma_slave_config,
339 .slave_destroy = ata_scsi_slave_destroy,
340 .bios_param = ata_std_bios_param,
341 .suspend = ata_scsi_device_suspend,
342 .resume = ata_scsi_device_resume,
345 static const struct ata_port_operations nv_generic_ops = {
346 .port_disable = ata_port_disable,
347 .tf_load = ata_tf_load,
348 .tf_read = ata_tf_read,
349 .exec_command = ata_exec_command,
350 .check_status = ata_check_status,
351 .dev_select = ata_std_dev_select,
352 .bmdma_setup = ata_bmdma_setup,
353 .bmdma_start = ata_bmdma_start,
354 .bmdma_stop = ata_bmdma_stop,
355 .bmdma_status = ata_bmdma_status,
356 .qc_prep = ata_qc_prep,
357 .qc_issue = ata_qc_issue_prot,
358 .freeze = ata_bmdma_freeze,
359 .thaw = ata_bmdma_thaw,
360 .error_handler = nv_error_handler,
361 .post_internal_cmd = ata_bmdma_post_internal_cmd,
362 .data_xfer = ata_data_xfer,
363 .irq_handler = nv_generic_interrupt,
364 .irq_clear = ata_bmdma_irq_clear,
365 .irq_on = ata_irq_on,
366 .irq_ack = ata_irq_ack,
367 .scr_read = nv_scr_read,
368 .scr_write = nv_scr_write,
369 .port_start = ata_port_start,
372 static const struct ata_port_operations nv_nf2_ops = {
373 .port_disable = ata_port_disable,
374 .tf_load = ata_tf_load,
375 .tf_read = ata_tf_read,
376 .exec_command = ata_exec_command,
377 .check_status = ata_check_status,
378 .dev_select = ata_std_dev_select,
379 .bmdma_setup = ata_bmdma_setup,
380 .bmdma_start = ata_bmdma_start,
381 .bmdma_stop = ata_bmdma_stop,
382 .bmdma_status = ata_bmdma_status,
383 .qc_prep = ata_qc_prep,
384 .qc_issue = ata_qc_issue_prot,
385 .freeze = nv_nf2_freeze,
386 .thaw = nv_nf2_thaw,
387 .error_handler = nv_error_handler,
388 .post_internal_cmd = ata_bmdma_post_internal_cmd,
389 .data_xfer = ata_data_xfer,
390 .irq_handler = nv_nf2_interrupt,
391 .irq_clear = ata_bmdma_irq_clear,
392 .irq_on = ata_irq_on,
393 .irq_ack = ata_irq_ack,
394 .scr_read = nv_scr_read,
395 .scr_write = nv_scr_write,
396 .port_start = ata_port_start,
399 static const struct ata_port_operations nv_ck804_ops = {
400 .port_disable = ata_port_disable,
401 .tf_load = ata_tf_load,
402 .tf_read = ata_tf_read,
403 .exec_command = ata_exec_command,
404 .check_status = ata_check_status,
405 .dev_select = ata_std_dev_select,
406 .bmdma_setup = ata_bmdma_setup,
407 .bmdma_start = ata_bmdma_start,
408 .bmdma_stop = ata_bmdma_stop,
409 .bmdma_status = ata_bmdma_status,
410 .qc_prep = ata_qc_prep,
411 .qc_issue = ata_qc_issue_prot,
412 .freeze = nv_ck804_freeze,
413 .thaw = nv_ck804_thaw,
414 .error_handler = nv_error_handler,
415 .post_internal_cmd = ata_bmdma_post_internal_cmd,
416 .data_xfer = ata_data_xfer,
417 .irq_handler = nv_ck804_interrupt,
418 .irq_clear = ata_bmdma_irq_clear,
419 .irq_on = ata_irq_on,
420 .irq_ack = ata_irq_ack,
421 .scr_read = nv_scr_read,
422 .scr_write = nv_scr_write,
423 .port_start = ata_port_start,
424 .host_stop = nv_ck804_host_stop,
427 static const struct ata_port_operations nv_adma_ops = {
428 .port_disable = ata_port_disable,
429 .tf_load = ata_tf_load,
430 .tf_read = ata_tf_read,
431 .check_atapi_dma = nv_adma_check_atapi_dma,
432 .exec_command = ata_exec_command,
433 .check_status = ata_check_status,
434 .dev_select = ata_std_dev_select,
435 .bmdma_setup = nv_adma_bmdma_setup,
436 .bmdma_start = nv_adma_bmdma_start,
437 .bmdma_stop = nv_adma_bmdma_stop,
438 .bmdma_status = nv_adma_bmdma_status,
439 .qc_prep = nv_adma_qc_prep,
440 .qc_issue = nv_adma_qc_issue,
441 .freeze = nv_ck804_freeze,
442 .thaw = nv_ck804_thaw,
443 .error_handler = nv_adma_error_handler,
444 .post_internal_cmd = nv_adma_bmdma_stop,
445 .data_xfer = ata_data_xfer,
446 .irq_handler = nv_adma_interrupt,
447 .irq_clear = nv_adma_irq_clear,
448 .irq_on = ata_irq_on,
449 .irq_ack = ata_irq_ack,
450 .scr_read = nv_scr_read,
451 .scr_write = nv_scr_write,
452 .port_start = nv_adma_port_start,
453 .port_stop = nv_adma_port_stop,
454 .port_suspend = nv_adma_port_suspend,
455 .port_resume = nv_adma_port_resume,
456 .host_stop = nv_adma_host_stop,
459 static struct ata_port_info nv_port_info[] = {
460 /* generic */
462 .sht = &nv_sht,
463 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
464 ATA_FLAG_HRST_TO_RESUME,
465 .pio_mask = NV_PIO_MASK,
466 .mwdma_mask = NV_MWDMA_MASK,
467 .udma_mask = NV_UDMA_MASK,
468 .port_ops = &nv_generic_ops,
470 /* nforce2/3 */
472 .sht = &nv_sht,
473 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
474 ATA_FLAG_HRST_TO_RESUME,
475 .pio_mask = NV_PIO_MASK,
476 .mwdma_mask = NV_MWDMA_MASK,
477 .udma_mask = NV_UDMA_MASK,
478 .port_ops = &nv_nf2_ops,
480 /* ck804 */
482 .sht = &nv_sht,
483 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
484 ATA_FLAG_HRST_TO_RESUME,
485 .pio_mask = NV_PIO_MASK,
486 .mwdma_mask = NV_MWDMA_MASK,
487 .udma_mask = NV_UDMA_MASK,
488 .port_ops = &nv_ck804_ops,
490 /* ADMA */
492 .sht = &nv_adma_sht,
493 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
494 ATA_FLAG_HRST_TO_RESUME |
495 ATA_FLAG_MMIO | ATA_FLAG_NCQ,
496 .pio_mask = NV_PIO_MASK,
497 .mwdma_mask = NV_MWDMA_MASK,
498 .udma_mask = NV_UDMA_MASK,
499 .port_ops = &nv_adma_ops,
503 MODULE_AUTHOR("NVIDIA");
504 MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
505 MODULE_LICENSE("GPL");
506 MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
507 MODULE_VERSION(DRV_VERSION);
509 static int adma_enabled = 1;
511 static void nv_adma_register_mode(struct ata_port *ap)
513 struct nv_adma_port_priv *pp = ap->private_data;
514 void __iomem *mmio = pp->ctl_block;
515 u16 tmp, status;
516 int count = 0;
518 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
519 return;
521 status = readw(mmio + NV_ADMA_STAT);
522 while(!(status & NV_ADMA_STAT_IDLE) && count < 20) {
523 ndelay(50);
524 status = readw(mmio + NV_ADMA_STAT);
525 count++;
527 if(count == 20)
528 ata_port_printk(ap, KERN_WARNING,
529 "timeout waiting for ADMA IDLE, stat=0x%hx\n",
530 status);
532 tmp = readw(mmio + NV_ADMA_CTL);
533 writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
535 count = 0;
536 status = readw(mmio + NV_ADMA_STAT);
537 while(!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
538 ndelay(50);
539 status = readw(mmio + NV_ADMA_STAT);
540 count++;
542 if(count == 20)
543 ata_port_printk(ap, KERN_WARNING,
544 "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
545 status);
547 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
550 static void nv_adma_mode(struct ata_port *ap)
552 struct nv_adma_port_priv *pp = ap->private_data;
553 void __iomem *mmio = pp->ctl_block;
554 u16 tmp, status;
555 int count = 0;
557 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
558 return;
560 WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
562 tmp = readw(mmio + NV_ADMA_CTL);
563 writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
565 status = readw(mmio + NV_ADMA_STAT);
566 while(((status & NV_ADMA_STAT_LEGACY) ||
567 !(status & NV_ADMA_STAT_IDLE)) && count < 20) {
568 ndelay(50);
569 status = readw(mmio + NV_ADMA_STAT);
570 count++;
572 if(count == 20)
573 ata_port_printk(ap, KERN_WARNING,
574 "timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
575 status);
577 pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
580 static int nv_adma_slave_config(struct scsi_device *sdev)
582 struct ata_port *ap = ata_shost_to_port(sdev->host);
583 struct nv_adma_port_priv *pp = ap->private_data;
584 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
585 u64 bounce_limit;
586 unsigned long segment_boundary;
587 unsigned short sg_tablesize;
588 int rc;
589 int adma_enable;
590 u32 current_reg, new_reg, config_mask;
592 rc = ata_scsi_slave_config(sdev);
594 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
595 /* Not a proper libata device, ignore */
596 return rc;
598 if (ap->device[sdev->id].class == ATA_DEV_ATAPI) {
600 * NVIDIA reports that ADMA mode does not support ATAPI commands.
601 * Therefore ATAPI commands are sent through the legacy interface.
602 * However, the legacy interface only supports 32-bit DMA.
603 * Restrict DMA parameters as required by the legacy interface
604 * when an ATAPI device is connected.
606 bounce_limit = ATA_DMA_MASK;
607 segment_boundary = ATA_DMA_BOUNDARY;
608 /* Subtract 1 since an extra entry may be needed for padding, see
609 libata-scsi.c */
610 sg_tablesize = LIBATA_MAX_PRD - 1;
612 /* Since the legacy DMA engine is in use, we need to disable ADMA
613 on the port. */
614 adma_enable = 0;
615 nv_adma_register_mode(ap);
617 else {
618 bounce_limit = *ap->dev->dma_mask;
619 segment_boundary = NV_ADMA_DMA_BOUNDARY;
620 sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
621 adma_enable = 1;
624 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg);
626 if(ap->port_no == 1)
627 config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
628 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
629 else
630 config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
631 NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
633 if(adma_enable) {
634 new_reg = current_reg | config_mask;
635 pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
637 else {
638 new_reg = current_reg & ~config_mask;
639 pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
642 if(current_reg != new_reg)
643 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
645 blk_queue_bounce_limit(sdev->request_queue, bounce_limit);
646 blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
647 blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize);
648 ata_port_printk(ap, KERN_INFO,
649 "bounce limit 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
650 (unsigned long long)bounce_limit, segment_boundary, sg_tablesize);
651 return rc;
654 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
656 struct nv_adma_port_priv *pp = qc->ap->private_data;
657 return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
660 static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
662 unsigned int idx = 0;
664 cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device | WNB);
666 if ((tf->flags & ATA_TFLAG_LBA48) == 0) {
667 cpb[idx++] = cpu_to_le16(IGN);
668 cpb[idx++] = cpu_to_le16(IGN);
669 cpb[idx++] = cpu_to_le16(IGN);
670 cpb[idx++] = cpu_to_le16(IGN);
671 cpb[idx++] = cpu_to_le16(IGN);
673 else {
674 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->hob_feature);
675 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
676 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->hob_lbal);
677 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->hob_lbam);
678 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->hob_lbah);
680 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature);
681 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->nsect);
682 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->lbal);
683 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->lbam);
684 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->lbah);
686 cpb[idx++] = cpu_to_le16((ATA_REG_CMD << 8) | tf->command | CMDEND);
688 return idx;
691 static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
693 struct nv_adma_port_priv *pp = ap->private_data;
694 u8 flags = pp->cpb[cpb_num].resp_flags;
696 VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
698 if (unlikely((force_err ||
699 flags & (NV_CPB_RESP_ATA_ERR |
700 NV_CPB_RESP_CMD_ERR |
701 NV_CPB_RESP_CPB_ERR)))) {
702 struct ata_eh_info *ehi = &ap->eh_info;
703 int freeze = 0;
705 ata_ehi_clear_desc(ehi);
706 ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x", flags );
707 if (flags & NV_CPB_RESP_ATA_ERR) {
708 ata_ehi_push_desc(ehi, ": ATA error");
709 ehi->err_mask |= AC_ERR_DEV;
710 } else if (flags & NV_CPB_RESP_CMD_ERR) {
711 ata_ehi_push_desc(ehi, ": CMD error");
712 ehi->err_mask |= AC_ERR_DEV;
713 } else if (flags & NV_CPB_RESP_CPB_ERR) {
714 ata_ehi_push_desc(ehi, ": CPB error");
715 ehi->err_mask |= AC_ERR_SYSTEM;
716 freeze = 1;
717 } else {
718 /* notifier error, but no error in CPB flags? */
719 ehi->err_mask |= AC_ERR_OTHER;
720 freeze = 1;
722 /* Kill all commands. EH will determine what actually failed. */
723 if (freeze)
724 ata_port_freeze(ap);
725 else
726 ata_port_abort(ap);
727 return 1;
730 if (flags & NV_CPB_RESP_DONE) {
731 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num);
732 VPRINTK("CPB flags done, flags=0x%x\n", flags);
733 if (likely(qc)) {
734 /* Grab the ATA port status for non-NCQ commands.
735 For NCQ commands the current status may have nothing to do with
736 the command just completed. */
737 if (qc->tf.protocol != ATA_PROT_NCQ) {
738 u8 ata_status = readb(pp->ctl_block + (ATA_REG_STATUS * 4));
739 qc->err_mask |= ac_err_mask(ata_status);
741 DPRINTK("Completing qc from tag %d with err_mask %u\n",cpb_num,
742 qc->err_mask);
743 ata_qc_complete(qc);
746 return 0;
749 static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
751 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
753 /* freeze if hotplugged */
754 if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
755 ata_port_freeze(ap);
756 return 1;
759 /* bail out if not our interrupt */
760 if (!(irq_stat & NV_INT_DEV))
761 return 0;
763 /* DEV interrupt w/ no active qc? */
764 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
765 ata_check_status(ap);
766 return 1;
769 /* handle interrupt */
770 return ata_host_intr(ap, qc);
773 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
775 struct ata_host *host = dev_instance;
776 int i, handled = 0;
777 u32 notifier_clears[2];
779 spin_lock(&host->lock);
781 for (i = 0; i < host->n_ports; i++) {
782 struct ata_port *ap = host->ports[i];
783 notifier_clears[i] = 0;
785 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
786 struct nv_adma_port_priv *pp = ap->private_data;
787 void __iomem *mmio = pp->ctl_block;
788 u16 status;
789 u32 gen_ctl;
790 u32 notifier, notifier_error;
792 /* if in ATA register mode, use standard ata interrupt handler */
793 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
794 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
795 >> (NV_INT_PORT_SHIFT * i);
796 if(ata_tag_valid(ap->active_tag))
797 /** NV_INT_DEV indication seems unreliable at times
798 at least in ADMA mode. Force it on always when a
799 command is active, to prevent losing interrupts. */
800 irq_stat |= NV_INT_DEV;
801 handled += nv_host_intr(ap, irq_stat);
802 continue;
805 notifier = readl(mmio + NV_ADMA_NOTIFIER);
806 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
807 notifier_clears[i] = notifier | notifier_error;
809 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
811 if( !NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
812 !notifier_error)
813 /* Nothing to do */
814 continue;
816 status = readw(mmio + NV_ADMA_STAT);
818 /* Clear status. Ensure the controller sees the clearing before we start
819 looking at any of the CPB statuses, so that any CPB completions after
820 this point in the handler will raise another interrupt. */
821 writew(status, mmio + NV_ADMA_STAT);
822 readw(mmio + NV_ADMA_STAT); /* flush posted write */
823 rmb();
825 handled++; /* irq handled if we got here */
827 /* freeze if hotplugged or controller error */
828 if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
829 NV_ADMA_STAT_HOTUNPLUG |
830 NV_ADMA_STAT_TIMEOUT))) {
831 struct ata_eh_info *ehi = &ap->eh_info;
833 ata_ehi_clear_desc(ehi);
834 ata_ehi_push_desc(ehi, "ADMA status 0x%08x", status );
835 if (status & NV_ADMA_STAT_TIMEOUT) {
836 ehi->err_mask |= AC_ERR_SYSTEM;
837 ata_ehi_push_desc(ehi, ": timeout");
838 } else if (status & NV_ADMA_STAT_HOTPLUG) {
839 ata_ehi_hotplugged(ehi);
840 ata_ehi_push_desc(ehi, ": hotplug");
841 } else if (status & NV_ADMA_STAT_HOTUNPLUG) {
842 ata_ehi_hotplugged(ehi);
843 ata_ehi_push_desc(ehi, ": hot unplug");
845 ata_port_freeze(ap);
846 continue;
849 if (status & (NV_ADMA_STAT_DONE |
850 NV_ADMA_STAT_CPBERR)) {
851 /** Check CPBs for completed commands */
853 if (ata_tag_valid(ap->active_tag)) {
854 /* Non-NCQ command */
855 nv_adma_check_cpb(ap, ap->active_tag,
856 notifier_error & (1 << ap->active_tag));
857 } else {
858 int pos, error = 0;
859 u32 active = ap->sactive;
861 while ((pos = ffs(active)) && !error) {
862 pos--;
863 error = nv_adma_check_cpb(ap, pos,
864 notifier_error & (1 << pos) );
865 active &= ~(1 << pos );
872 if(notifier_clears[0] || notifier_clears[1]) {
873 /* Note: Both notifier clear registers must be written
874 if either is set, even if one is zero, according to NVIDIA. */
875 struct nv_adma_port_priv *pp = host->ports[0]->private_data;
876 writel(notifier_clears[0], pp->notifier_clear_block);
877 pp = host->ports[1]->private_data;
878 writel(notifier_clears[1], pp->notifier_clear_block);
881 spin_unlock(&host->lock);
883 return IRQ_RETVAL(handled);
886 static void nv_adma_irq_clear(struct ata_port *ap)
888 struct nv_adma_port_priv *pp = ap->private_data;
889 void __iomem *mmio = pp->ctl_block;
890 u16 status = readw(mmio + NV_ADMA_STAT);
891 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
892 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
893 void __iomem *dma_stat_addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS;
895 /* clear ADMA status */
896 writew(status, mmio + NV_ADMA_STAT);
897 writel(notifier | notifier_error,
898 pp->notifier_clear_block);
900 /** clear legacy status */
901 iowrite8(ioread8(dma_stat_addr), dma_stat_addr);
904 static void nv_adma_bmdma_setup(struct ata_queued_cmd *qc)
906 struct ata_port *ap = qc->ap;
907 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
908 struct nv_adma_port_priv *pp = ap->private_data;
909 u8 dmactl;
911 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
912 WARN_ON(1);
913 return;
916 /* load PRD table addr. */
917 iowrite32(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
919 /* specify data direction, triple-check start bit is clear */
920 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
921 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
922 if (!rw)
923 dmactl |= ATA_DMA_WR;
925 iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
927 /* issue r/w command */
928 ata_exec_command(ap, &qc->tf);
931 static void nv_adma_bmdma_start(struct ata_queued_cmd *qc)
933 struct ata_port *ap = qc->ap;
934 struct nv_adma_port_priv *pp = ap->private_data;
935 u8 dmactl;
937 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
938 WARN_ON(1);
939 return;
942 /* start host DMA transaction */
943 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
944 iowrite8(dmactl | ATA_DMA_START,
945 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
948 static void nv_adma_bmdma_stop(struct ata_queued_cmd *qc)
950 struct ata_port *ap = qc->ap;
951 struct nv_adma_port_priv *pp = ap->private_data;
953 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
954 return;
956 /* clear start/stop bit */
957 iowrite8(ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD) & ~ATA_DMA_START,
958 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
960 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
961 ata_altstatus(ap); /* dummy read */
964 static u8 nv_adma_bmdma_status(struct ata_port *ap)
966 struct nv_adma_port_priv *pp = ap->private_data;
968 WARN_ON(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE));
970 return ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
973 static int nv_adma_port_start(struct ata_port *ap)
975 struct device *dev = ap->host->dev;
976 struct nv_adma_port_priv *pp;
977 int rc;
978 void *mem;
979 dma_addr_t mem_dma;
980 void __iomem *mmio;
981 u16 tmp;
983 VPRINTK("ENTER\n");
985 rc = ata_port_start(ap);
986 if (rc)
987 return rc;
989 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
990 if (!pp)
991 return -ENOMEM;
993 mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
994 ap->port_no * NV_ADMA_PORT_SIZE;
995 pp->ctl_block = mmio;
996 pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
997 pp->notifier_clear_block = pp->gen_block +
998 NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
1000 mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
1001 &mem_dma, GFP_KERNEL);
1002 if (!mem)
1003 return -ENOMEM;
1004 memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
1007 * First item in chunk of DMA memory:
1008 * 128-byte command parameter block (CPB)
1009 * one for each command tag
1011 pp->cpb = mem;
1012 pp->cpb_dma = mem_dma;
1014 writel(mem_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
1015 writel((mem_dma >> 16 ) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
1017 mem += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1018 mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1021 * Second item: block of ADMA_SGTBL_LEN s/g entries
1023 pp->aprd = mem;
1024 pp->aprd_dma = mem_dma;
1026 ap->private_data = pp;
1028 /* clear any outstanding interrupt conditions */
1029 writew(0xffff, mmio + NV_ADMA_STAT);
1031 /* initialize port variables */
1032 pp->flags = NV_ADMA_PORT_REGISTER_MODE;
1034 /* clear CPB fetch count */
1035 writew(0, mmio + NV_ADMA_CPB_COUNT);
1037 /* clear GO for register mode, enable interrupt */
1038 tmp = readw(mmio + NV_ADMA_CTL);
1039 writew( (tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN, mmio + NV_ADMA_CTL);
1041 tmp = readw(mmio + NV_ADMA_CTL);
1042 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1043 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1044 udelay(1);
1045 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1046 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1048 return 0;
1051 static void nv_adma_port_stop(struct ata_port *ap)
1053 struct nv_adma_port_priv *pp = ap->private_data;
1054 void __iomem *mmio = pp->ctl_block;
1056 VPRINTK("ENTER\n");
1057 writew(0, mmio + NV_ADMA_CTL);
1060 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
1062 struct nv_adma_port_priv *pp = ap->private_data;
1063 void __iomem *mmio = pp->ctl_block;
1065 /* Go to register mode - clears GO */
1066 nv_adma_register_mode(ap);
1068 /* clear CPB fetch count */
1069 writew(0, mmio + NV_ADMA_CPB_COUNT);
1071 /* disable interrupt, shut down port */
1072 writew(0, mmio + NV_ADMA_CTL);
1074 return 0;
1077 static int nv_adma_port_resume(struct ata_port *ap)
1079 struct nv_adma_port_priv *pp = ap->private_data;
1080 void __iomem *mmio = pp->ctl_block;
1081 u16 tmp;
1083 /* set CPB block location */
1084 writel(pp->cpb_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
1085 writel((pp->cpb_dma >> 16 ) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
1087 /* clear any outstanding interrupt conditions */
1088 writew(0xffff, mmio + NV_ADMA_STAT);
1090 /* initialize port variables */
1091 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
1093 /* clear CPB fetch count */
1094 writew(0, mmio + NV_ADMA_CPB_COUNT);
1096 /* clear GO for register mode, enable interrupt */
1097 tmp = readw(mmio + NV_ADMA_CTL);
1098 writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN, mmio + NV_ADMA_CTL);
1100 tmp = readw(mmio + NV_ADMA_CTL);
1101 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1102 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1103 udelay(1);
1104 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1105 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1107 return 0;
1110 static void nv_adma_setup_port(struct ata_probe_ent *probe_ent, unsigned int port)
1112 void __iomem *mmio = probe_ent->iomap[NV_MMIO_BAR];
1113 struct ata_ioports *ioport = &probe_ent->port[port];
1115 VPRINTK("ENTER\n");
1117 mmio += NV_ADMA_PORT + port * NV_ADMA_PORT_SIZE;
1119 ioport->cmd_addr = mmio;
1120 ioport->data_addr = mmio + (ATA_REG_DATA * 4);
1121 ioport->error_addr =
1122 ioport->feature_addr = mmio + (ATA_REG_ERR * 4);
1123 ioport->nsect_addr = mmio + (ATA_REG_NSECT * 4);
1124 ioport->lbal_addr = mmio + (ATA_REG_LBAL * 4);
1125 ioport->lbam_addr = mmio + (ATA_REG_LBAM * 4);
1126 ioport->lbah_addr = mmio + (ATA_REG_LBAH * 4);
1127 ioport->device_addr = mmio + (ATA_REG_DEVICE * 4);
1128 ioport->status_addr =
1129 ioport->command_addr = mmio + (ATA_REG_STATUS * 4);
1130 ioport->altstatus_addr =
1131 ioport->ctl_addr = mmio + 0x20;
1134 static int nv_adma_host_init(struct ata_probe_ent *probe_ent)
1136 struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
1137 unsigned int i;
1138 u32 tmp32;
1140 VPRINTK("ENTER\n");
1142 /* enable ADMA on the ports */
1143 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1144 tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1145 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1146 NV_MCP_SATA_CFG_20_PORT1_EN |
1147 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1149 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1151 for (i = 0; i < probe_ent->n_ports; i++)
1152 nv_adma_setup_port(probe_ent, i);
1154 return 0;
1157 static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1158 struct scatterlist *sg,
1159 int idx,
1160 struct nv_adma_prd *aprd)
1162 u8 flags;
1164 memset(aprd, 0, sizeof(struct nv_adma_prd));
1166 flags = 0;
1167 if (qc->tf.flags & ATA_TFLAG_WRITE)
1168 flags |= NV_APRD_WRITE;
1169 if (idx == qc->n_elem - 1)
1170 flags |= NV_APRD_END;
1171 else if (idx != 4)
1172 flags |= NV_APRD_CONT;
1174 aprd->addr = cpu_to_le64(((u64)sg_dma_address(sg)));
1175 aprd->len = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
1176 aprd->flags = flags;
1179 static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1181 struct nv_adma_port_priv *pp = qc->ap->private_data;
1182 unsigned int idx;
1183 struct nv_adma_prd *aprd;
1184 struct scatterlist *sg;
1186 VPRINTK("ENTER\n");
1188 idx = 0;
1190 ata_for_each_sg(sg, qc) {
1191 aprd = (idx < 5) ? &cpb->aprd[idx] : &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (idx-5)];
1192 nv_adma_fill_aprd(qc, sg, idx, aprd);
1193 idx++;
1195 if (idx > 5)
1196 cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));
1199 static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
1201 struct nv_adma_port_priv *pp = qc->ap->private_data;
1203 /* ADMA engine can only be used for non-ATAPI DMA commands,
1204 or interrupt-driven no-data commands. */
1205 if((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
1206 (qc->tf.flags & ATA_TFLAG_POLLING))
1207 return 1;
1209 if((qc->flags & ATA_QCFLAG_DMAMAP) ||
1210 (qc->tf.protocol == ATA_PROT_NODATA))
1211 return 0;
1213 return 1;
1216 static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1218 struct nv_adma_port_priv *pp = qc->ap->private_data;
1219 struct nv_adma_cpb *cpb = &pp->cpb[qc->tag];
1220 u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
1221 NV_CPB_CTL_IEN;
1223 if (nv_adma_use_reg_mode(qc)) {
1224 nv_adma_register_mode(qc->ap);
1225 ata_qc_prep(qc);
1226 return;
1229 memset(cpb, 0, sizeof(struct nv_adma_cpb));
1231 cpb->len = 3;
1232 cpb->tag = qc->tag;
1233 cpb->next_cpb_idx = 0;
1235 /* turn on NCQ flags for NCQ commands */
1236 if (qc->tf.protocol == ATA_PROT_NCQ)
1237 ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1239 VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1241 nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1243 if(qc->flags & ATA_QCFLAG_DMAMAP) {
1244 nv_adma_fill_sg(qc, cpb);
1245 ctl_flags |= NV_CPB_CTL_APRD_VALID;
1246 } else
1247 memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
1249 /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID until we are
1250 finished filling in all of the contents */
1251 wmb();
1252 cpb->ctl_flags = ctl_flags;
1255 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1257 struct nv_adma_port_priv *pp = qc->ap->private_data;
1258 void __iomem *mmio = pp->ctl_block;
1260 VPRINTK("ENTER\n");
1262 if (nv_adma_use_reg_mode(qc)) {
1263 /* use ATA register mode */
1264 VPRINTK("using ATA register mode: 0x%lx\n", qc->flags);
1265 nv_adma_register_mode(qc->ap);
1266 return ata_qc_issue_prot(qc);
1267 } else
1268 nv_adma_mode(qc->ap);
1270 /* write append register, command tag in lower 8 bits
1271 and (number of cpbs to append -1) in top 8 bits */
1272 wmb();
1273 writew(qc->tag, mmio + NV_ADMA_APPEND);
1275 DPRINTK("Issued tag %u\n",qc->tag);
1277 return 0;
1280 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1282 struct ata_host *host = dev_instance;
1283 unsigned int i;
1284 unsigned int handled = 0;
1285 unsigned long flags;
1287 spin_lock_irqsave(&host->lock, flags);
1289 for (i = 0; i < host->n_ports; i++) {
1290 struct ata_port *ap;
1292 ap = host->ports[i];
1293 if (ap &&
1294 !(ap->flags & ATA_FLAG_DISABLED)) {
1295 struct ata_queued_cmd *qc;
1297 qc = ata_qc_from_tag(ap, ap->active_tag);
1298 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
1299 handled += ata_host_intr(ap, qc);
1300 else
1301 // No request pending? Clear interrupt status
1302 // anyway, in case there's one pending.
1303 ap->ops->check_status(ap);
1308 spin_unlock_irqrestore(&host->lock, flags);
1310 return IRQ_RETVAL(handled);
1313 static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
1315 int i, handled = 0;
1317 for (i = 0; i < host->n_ports; i++) {
1318 struct ata_port *ap = host->ports[i];
1320 if (ap && !(ap->flags & ATA_FLAG_DISABLED))
1321 handled += nv_host_intr(ap, irq_stat);
1323 irq_stat >>= NV_INT_PORT_SHIFT;
1326 return IRQ_RETVAL(handled);
1329 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
1331 struct ata_host *host = dev_instance;
1332 u8 irq_stat;
1333 irqreturn_t ret;
1335 spin_lock(&host->lock);
1336 irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
1337 ret = nv_do_interrupt(host, irq_stat);
1338 spin_unlock(&host->lock);
1340 return ret;
1343 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
1345 struct ata_host *host = dev_instance;
1346 u8 irq_stat;
1347 irqreturn_t ret;
1349 spin_lock(&host->lock);
1350 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1351 ret = nv_do_interrupt(host, irq_stat);
1352 spin_unlock(&host->lock);
1354 return ret;
1357 static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg)
1359 if (sc_reg > SCR_CONTROL)
1360 return 0xffffffffU;
1362 return ioread32(ap->ioaddr.scr_addr + (sc_reg * 4));
1365 static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
1367 if (sc_reg > SCR_CONTROL)
1368 return;
1370 iowrite32(val, ap->ioaddr.scr_addr + (sc_reg * 4));
1373 static void nv_nf2_freeze(struct ata_port *ap)
1375 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1376 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1377 u8 mask;
1379 mask = ioread8(scr_addr + NV_INT_ENABLE);
1380 mask &= ~(NV_INT_ALL << shift);
1381 iowrite8(mask, scr_addr + NV_INT_ENABLE);
1384 static void nv_nf2_thaw(struct ata_port *ap)
1386 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1387 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1388 u8 mask;
1390 iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
1392 mask = ioread8(scr_addr + NV_INT_ENABLE);
1393 mask |= (NV_INT_MASK << shift);
1394 iowrite8(mask, scr_addr + NV_INT_ENABLE);
1397 static void nv_ck804_freeze(struct ata_port *ap)
1399 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1400 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1401 u8 mask;
1403 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1404 mask &= ~(NV_INT_ALL << shift);
1405 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1408 static void nv_ck804_thaw(struct ata_port *ap)
1410 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1411 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1412 u8 mask;
1414 writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1416 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1417 mask |= (NV_INT_MASK << shift);
1418 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1421 static int nv_hardreset(struct ata_port *ap, unsigned int *class)
1423 unsigned int dummy;
1425 /* SATA hardreset fails to retrieve proper device signature on
1426 * some controllers. Don't classify on hardreset. For more
1427 * info, see http://bugme.osdl.org/show_bug.cgi?id=3352
1429 return sata_std_hardreset(ap, &dummy);
1432 static void nv_error_handler(struct ata_port *ap)
1434 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1435 nv_hardreset, ata_std_postreset);
1438 static void nv_adma_error_handler(struct ata_port *ap)
1440 struct nv_adma_port_priv *pp = ap->private_data;
1441 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
1442 void __iomem *mmio = pp->ctl_block;
1443 int i;
1444 u16 tmp;
1446 /* Push us back into port register mode for error handling. */
1447 nv_adma_register_mode(ap);
1449 /* Mark all of the CPBs as invalid to prevent them from being executed */
1450 for( i=0;i<NV_ADMA_MAX_CPBS;i++)
1451 pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1453 /* clear CPB fetch count */
1454 writew(0, mmio + NV_ADMA_CPB_COUNT);
1456 /* Reset channel */
1457 tmp = readw(mmio + NV_ADMA_CTL);
1458 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1459 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1460 udelay(1);
1461 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1462 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1465 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1466 nv_hardreset, ata_std_postreset);
1469 static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1471 static int printed_version = 0;
1472 struct ata_port_info *ppi[2];
1473 struct ata_probe_ent *probe_ent;
1474 struct nv_host_priv *hpriv;
1475 int rc;
1476 u32 bar;
1477 void __iomem *base;
1478 unsigned long type = ent->driver_data;
1479 int mask_set = 0;
1481 // Make sure this is a SATA controller by counting the number of bars
1482 // (NVIDIA SATA controllers will always have six bars). Otherwise,
1483 // it's an IDE controller and we ignore it.
1484 for (bar=0; bar<6; bar++)
1485 if (pci_resource_start(pdev, bar) == 0)
1486 return -ENODEV;
1488 if (!printed_version++)
1489 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
1491 rc = pcim_enable_device(pdev);
1492 if (rc)
1493 return rc;
1495 rc = pci_request_regions(pdev, DRV_NAME);
1496 if (rc) {
1497 pcim_pin_device(pdev);
1498 return rc;
1501 if(type >= CK804 && adma_enabled) {
1502 dev_printk(KERN_NOTICE, &pdev->dev, "Using ADMA mode\n");
1503 type = ADMA;
1504 if(!pci_set_dma_mask(pdev, DMA_64BIT_MASK) &&
1505 !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))
1506 mask_set = 1;
1509 if(!mask_set) {
1510 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
1511 if (rc)
1512 return rc;
1513 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
1514 if (rc)
1515 return rc;
1518 rc = -ENOMEM;
1520 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
1521 if (!hpriv)
1522 return -ENOMEM;
1524 ppi[0] = ppi[1] = &nv_port_info[type];
1525 probe_ent = ata_pci_init_native_mode(pdev, ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
1526 if (!probe_ent)
1527 return -ENOMEM;
1529 if (!pcim_iomap(pdev, NV_MMIO_BAR, 0))
1530 return -EIO;
1531 probe_ent->iomap = pcim_iomap_table(pdev);
1533 probe_ent->private_data = hpriv;
1534 hpriv->type = type;
1536 base = probe_ent->iomap[NV_MMIO_BAR];
1537 probe_ent->port[0].scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
1538 probe_ent->port[1].scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
1540 /* enable SATA space for CK804 */
1541 if (type >= CK804) {
1542 u8 regval;
1544 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
1545 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1546 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1549 pci_set_master(pdev);
1551 if (type == ADMA) {
1552 rc = nv_adma_host_init(probe_ent);
1553 if (rc)
1554 return rc;
1557 rc = ata_device_add(probe_ent);
1558 if (rc != NV_PORTS)
1559 return -ENODEV;
1561 devm_kfree(&pdev->dev, probe_ent);
1562 return 0;
1565 static void nv_remove_one (struct pci_dev *pdev)
1567 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1568 struct nv_host_priv *hpriv = host->private_data;
1570 ata_pci_remove_one(pdev);
1571 kfree(hpriv);
1574 static int nv_pci_device_resume(struct pci_dev *pdev)
1576 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1577 struct nv_host_priv *hpriv = host->private_data;
1578 int rc;
1580 rc = ata_pci_device_do_resume(pdev);
1581 if(rc)
1582 return rc;
1584 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
1585 if(hpriv->type >= CK804) {
1586 u8 regval;
1588 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
1589 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1590 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1592 if(hpriv->type == ADMA) {
1593 u32 tmp32;
1594 struct nv_adma_port_priv *pp;
1595 /* enable/disable ADMA on the ports appropriately */
1596 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1598 pp = host->ports[0]->private_data;
1599 if(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1600 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
1601 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
1602 else
1603 tmp32 |= (NV_MCP_SATA_CFG_20_PORT0_EN |
1604 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
1605 pp = host->ports[1]->private_data;
1606 if(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1607 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
1608 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
1609 else
1610 tmp32 |= (NV_MCP_SATA_CFG_20_PORT1_EN |
1611 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
1613 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1617 ata_host_resume(host);
1619 return 0;
1622 static void nv_ck804_host_stop(struct ata_host *host)
1624 struct pci_dev *pdev = to_pci_dev(host->dev);
1625 u8 regval;
1627 /* disable SATA space for CK804 */
1628 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
1629 regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1630 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1633 static void nv_adma_host_stop(struct ata_host *host)
1635 struct pci_dev *pdev = to_pci_dev(host->dev);
1636 u32 tmp32;
1638 /* disable ADMA on the ports */
1639 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1640 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
1641 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1642 NV_MCP_SATA_CFG_20_PORT1_EN |
1643 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
1645 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1647 nv_ck804_host_stop(host);
1650 static int __init nv_init(void)
1652 return pci_register_driver(&nv_pci_driver);
1655 static void __exit nv_exit(void)
1657 pci_unregister_driver(&nv_pci_driver);
1660 module_init(nv_init);
1661 module_exit(nv_exit);
1662 module_param_named(adma, adma_enabled, bool, 0444);
1663 MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: true)");