[libata] Move some PCI IDs from sata_nv to ahci
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / ata / sata_nv.c
blobf6d498e1cf80b11f0dd23299415633d5d00e86f1
1 /*
2 * sata_nv.c - NVIDIA nForce SATA
4 * Copyright 2004 NVIDIA Corp. All rights reserved.
5 * Copyright 2004 Andrew Chew
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 * libata documentation is available via 'make {ps|pdf}docs',
24 * as Documentation/DocBook/libata.*
26 * No hardware documentation available outside of NVIDIA.
27 * This driver programs the NVIDIA SATA controller in a similar
28 * fashion as with other PCI IDE BMDMA controllers, with a few
29 * NV-specific details such as register offsets, SATA phy location,
30 * hotplug info, etc.
32 * CK804/MCP04 controllers support an alternate programming interface
33 * similar to the ADMA specification (with some modifications).
34 * This allows the use of NCQ. Non-DMA-mapped ATA commands are still
35 * sent through the legacy interface.
39 #include <linux/kernel.h>
40 #include <linux/module.h>
41 #include <linux/pci.h>
42 #include <linux/init.h>
43 #include <linux/blkdev.h>
44 #include <linux/delay.h>
45 #include <linux/interrupt.h>
46 #include <linux/device.h>
47 #include <scsi/scsi_host.h>
48 #include <scsi/scsi_device.h>
49 #include <linux/libata.h>
51 #define DRV_NAME "sata_nv"
52 #define DRV_VERSION "3.2"
54 #define NV_ADMA_DMA_BOUNDARY 0xffffffffUL
56 enum {
57 NV_PORTS = 2,
58 NV_PIO_MASK = 0x1f,
59 NV_MWDMA_MASK = 0x07,
60 NV_UDMA_MASK = 0x7f,
61 NV_PORT0_SCR_REG_OFFSET = 0x00,
62 NV_PORT1_SCR_REG_OFFSET = 0x40,
64 /* INT_STATUS/ENABLE */
65 NV_INT_STATUS = 0x10,
66 NV_INT_ENABLE = 0x11,
67 NV_INT_STATUS_CK804 = 0x440,
68 NV_INT_ENABLE_CK804 = 0x441,
70 /* INT_STATUS/ENABLE bits */
71 NV_INT_DEV = 0x01,
72 NV_INT_PM = 0x02,
73 NV_INT_ADDED = 0x04,
74 NV_INT_REMOVED = 0x08,
76 NV_INT_PORT_SHIFT = 4, /* each port occupies 4 bits */
78 NV_INT_ALL = 0x0f,
79 NV_INT_MASK = NV_INT_DEV |
80 NV_INT_ADDED | NV_INT_REMOVED,
82 /* INT_CONFIG */
83 NV_INT_CONFIG = 0x12,
84 NV_INT_CONFIG_METHD = 0x01, // 0 = INT, 1 = SMI
86 // For PCI config register 20
87 NV_MCP_SATA_CFG_20 = 0x50,
88 NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
89 NV_MCP_SATA_CFG_20_PORT0_EN = (1 << 17),
90 NV_MCP_SATA_CFG_20_PORT1_EN = (1 << 16),
91 NV_MCP_SATA_CFG_20_PORT0_PWB_EN = (1 << 14),
92 NV_MCP_SATA_CFG_20_PORT1_PWB_EN = (1 << 12),
94 NV_ADMA_MAX_CPBS = 32,
95 NV_ADMA_CPB_SZ = 128,
96 NV_ADMA_APRD_SZ = 16,
97 NV_ADMA_SGTBL_LEN = (1024 - NV_ADMA_CPB_SZ) /
98 NV_ADMA_APRD_SZ,
99 NV_ADMA_SGTBL_TOTAL_LEN = NV_ADMA_SGTBL_LEN + 5,
100 NV_ADMA_SGTBL_SZ = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
101 NV_ADMA_PORT_PRIV_DMA_SZ = NV_ADMA_MAX_CPBS *
102 (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
104 /* BAR5 offset to ADMA general registers */
105 NV_ADMA_GEN = 0x400,
106 NV_ADMA_GEN_CTL = 0x00,
107 NV_ADMA_NOTIFIER_CLEAR = 0x30,
109 /* BAR5 offset to ADMA ports */
110 NV_ADMA_PORT = 0x480,
112 /* size of ADMA port register space */
113 NV_ADMA_PORT_SIZE = 0x100,
115 /* ADMA port registers */
116 NV_ADMA_CTL = 0x40,
117 NV_ADMA_CPB_COUNT = 0x42,
118 NV_ADMA_NEXT_CPB_IDX = 0x43,
119 NV_ADMA_STAT = 0x44,
120 NV_ADMA_CPB_BASE_LOW = 0x48,
121 NV_ADMA_CPB_BASE_HIGH = 0x4C,
122 NV_ADMA_APPEND = 0x50,
123 NV_ADMA_NOTIFIER = 0x68,
124 NV_ADMA_NOTIFIER_ERROR = 0x6C,
126 /* NV_ADMA_CTL register bits */
127 NV_ADMA_CTL_HOTPLUG_IEN = (1 << 0),
128 NV_ADMA_CTL_CHANNEL_RESET = (1 << 5),
129 NV_ADMA_CTL_GO = (1 << 7),
130 NV_ADMA_CTL_AIEN = (1 << 8),
131 NV_ADMA_CTL_READ_NON_COHERENT = (1 << 11),
132 NV_ADMA_CTL_WRITE_NON_COHERENT = (1 << 12),
134 /* CPB response flag bits */
135 NV_CPB_RESP_DONE = (1 << 0),
136 NV_CPB_RESP_ATA_ERR = (1 << 3),
137 NV_CPB_RESP_CMD_ERR = (1 << 4),
138 NV_CPB_RESP_CPB_ERR = (1 << 7),
140 /* CPB control flag bits */
141 NV_CPB_CTL_CPB_VALID = (1 << 0),
142 NV_CPB_CTL_QUEUE = (1 << 1),
143 NV_CPB_CTL_APRD_VALID = (1 << 2),
144 NV_CPB_CTL_IEN = (1 << 3),
145 NV_CPB_CTL_FPDMA = (1 << 4),
147 /* APRD flags */
148 NV_APRD_WRITE = (1 << 1),
149 NV_APRD_END = (1 << 2),
150 NV_APRD_CONT = (1 << 3),
152 /* NV_ADMA_STAT flags */
153 NV_ADMA_STAT_TIMEOUT = (1 << 0),
154 NV_ADMA_STAT_HOTUNPLUG = (1 << 1),
155 NV_ADMA_STAT_HOTPLUG = (1 << 2),
156 NV_ADMA_STAT_CPBERR = (1 << 4),
157 NV_ADMA_STAT_SERROR = (1 << 5),
158 NV_ADMA_STAT_CMD_COMPLETE = (1 << 6),
159 NV_ADMA_STAT_IDLE = (1 << 8),
160 NV_ADMA_STAT_LEGACY = (1 << 9),
161 NV_ADMA_STAT_STOPPED = (1 << 10),
162 NV_ADMA_STAT_DONE = (1 << 12),
163 NV_ADMA_STAT_ERR = NV_ADMA_STAT_CPBERR |
164 NV_ADMA_STAT_TIMEOUT,
166 /* port flags */
167 NV_ADMA_PORT_REGISTER_MODE = (1 << 0),
168 NV_ADMA_ATAPI_SETUP_COMPLETE = (1 << 1),
172 /* ADMA Physical Region Descriptor - one SG segment */
173 struct nv_adma_prd {
174 __le64 addr;
175 __le32 len;
176 u8 flags;
177 u8 packet_len;
178 __le16 reserved;
181 enum nv_adma_regbits {
182 CMDEND = (1 << 15), /* end of command list */
183 WNB = (1 << 14), /* wait-not-BSY */
184 IGN = (1 << 13), /* ignore this entry */
185 CS1n = (1 << (4 + 8)), /* std. PATA signals follow... */
186 DA2 = (1 << (2 + 8)),
187 DA1 = (1 << (1 + 8)),
188 DA0 = (1 << (0 + 8)),
191 /* ADMA Command Parameter Block
192 The first 5 SG segments are stored inside the Command Parameter Block itself.
193 If there are more than 5 segments the remainder are stored in a separate
194 memory area indicated by next_aprd. */
195 struct nv_adma_cpb {
196 u8 resp_flags; /* 0 */
197 u8 reserved1; /* 1 */
198 u8 ctl_flags; /* 2 */
199 /* len is length of taskfile in 64 bit words */
200 u8 len; /* 3 */
201 u8 tag; /* 4 */
202 u8 next_cpb_idx; /* 5 */
203 __le16 reserved2; /* 6-7 */
204 __le16 tf[12]; /* 8-31 */
205 struct nv_adma_prd aprd[5]; /* 32-111 */
206 __le64 next_aprd; /* 112-119 */
207 __le64 reserved3; /* 120-127 */
211 struct nv_adma_port_priv {
212 struct nv_adma_cpb *cpb;
213 dma_addr_t cpb_dma;
214 struct nv_adma_prd *aprd;
215 dma_addr_t aprd_dma;
216 u8 flags;
219 #define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & ( 1 << (19 + (12 * (PORT)))))
221 static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
222 static void nv_ck804_host_stop(struct ata_host *host);
223 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
224 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
225 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
226 static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg);
227 static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
229 static void nv_nf2_freeze(struct ata_port *ap);
230 static void nv_nf2_thaw(struct ata_port *ap);
231 static void nv_ck804_freeze(struct ata_port *ap);
232 static void nv_ck804_thaw(struct ata_port *ap);
233 static void nv_error_handler(struct ata_port *ap);
234 static int nv_adma_slave_config(struct scsi_device *sdev);
235 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
236 static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
237 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
238 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
239 static void nv_adma_irq_clear(struct ata_port *ap);
240 static int nv_adma_port_start(struct ata_port *ap);
241 static void nv_adma_port_stop(struct ata_port *ap);
242 static void nv_adma_error_handler(struct ata_port *ap);
243 static void nv_adma_host_stop(struct ata_host *host);
244 static void nv_adma_bmdma_setup(struct ata_queued_cmd *qc);
245 static void nv_adma_bmdma_start(struct ata_queued_cmd *qc);
246 static void nv_adma_bmdma_stop(struct ata_queued_cmd *qc);
247 static u8 nv_adma_bmdma_status(struct ata_port *ap);
249 enum nv_host_type
251 GENERIC,
252 NFORCE2,
253 NFORCE3 = NFORCE2, /* NF2 == NF3 as far as sata_nv is concerned */
254 CK804,
255 ADMA
258 static const struct pci_device_id nv_pci_tbl[] = {
259 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
260 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
261 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
262 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
263 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
264 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
265 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
266 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), GENERIC },
267 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), GENERIC },
268 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), GENERIC },
269 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), GENERIC },
270 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
271 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
272 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
273 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
274 PCI_ANY_ID, PCI_ANY_ID,
275 PCI_CLASS_STORAGE_IDE<<8, 0xffff00, GENERIC },
276 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
277 PCI_ANY_ID, PCI_ANY_ID,
278 PCI_CLASS_STORAGE_RAID<<8, 0xffff00, GENERIC },
280 { } /* terminate list */
283 static struct pci_driver nv_pci_driver = {
284 .name = DRV_NAME,
285 .id_table = nv_pci_tbl,
286 .probe = nv_init_one,
287 .remove = ata_pci_remove_one,
290 static struct scsi_host_template nv_sht = {
291 .module = THIS_MODULE,
292 .name = DRV_NAME,
293 .ioctl = ata_scsi_ioctl,
294 .queuecommand = ata_scsi_queuecmd,
295 .can_queue = ATA_DEF_QUEUE,
296 .this_id = ATA_SHT_THIS_ID,
297 .sg_tablesize = LIBATA_MAX_PRD,
298 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
299 .emulated = ATA_SHT_EMULATED,
300 .use_clustering = ATA_SHT_USE_CLUSTERING,
301 .proc_name = DRV_NAME,
302 .dma_boundary = ATA_DMA_BOUNDARY,
303 .slave_configure = ata_scsi_slave_config,
304 .slave_destroy = ata_scsi_slave_destroy,
305 .bios_param = ata_std_bios_param,
308 static struct scsi_host_template nv_adma_sht = {
309 .module = THIS_MODULE,
310 .name = DRV_NAME,
311 .ioctl = ata_scsi_ioctl,
312 .queuecommand = ata_scsi_queuecmd,
313 .can_queue = NV_ADMA_MAX_CPBS,
314 .this_id = ATA_SHT_THIS_ID,
315 .sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN,
316 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
317 .emulated = ATA_SHT_EMULATED,
318 .use_clustering = ATA_SHT_USE_CLUSTERING,
319 .proc_name = DRV_NAME,
320 .dma_boundary = NV_ADMA_DMA_BOUNDARY,
321 .slave_configure = nv_adma_slave_config,
322 .slave_destroy = ata_scsi_slave_destroy,
323 .bios_param = ata_std_bios_param,
326 static const struct ata_port_operations nv_generic_ops = {
327 .port_disable = ata_port_disable,
328 .tf_load = ata_tf_load,
329 .tf_read = ata_tf_read,
330 .exec_command = ata_exec_command,
331 .check_status = ata_check_status,
332 .dev_select = ata_std_dev_select,
333 .bmdma_setup = ata_bmdma_setup,
334 .bmdma_start = ata_bmdma_start,
335 .bmdma_stop = ata_bmdma_stop,
336 .bmdma_status = ata_bmdma_status,
337 .qc_prep = ata_qc_prep,
338 .qc_issue = ata_qc_issue_prot,
339 .freeze = ata_bmdma_freeze,
340 .thaw = ata_bmdma_thaw,
341 .error_handler = nv_error_handler,
342 .post_internal_cmd = ata_bmdma_post_internal_cmd,
343 .data_xfer = ata_pio_data_xfer,
344 .irq_handler = nv_generic_interrupt,
345 .irq_clear = ata_bmdma_irq_clear,
346 .scr_read = nv_scr_read,
347 .scr_write = nv_scr_write,
348 .port_start = ata_port_start,
349 .port_stop = ata_port_stop,
350 .host_stop = ata_pci_host_stop,
353 static const struct ata_port_operations nv_nf2_ops = {
354 .port_disable = ata_port_disable,
355 .tf_load = ata_tf_load,
356 .tf_read = ata_tf_read,
357 .exec_command = ata_exec_command,
358 .check_status = ata_check_status,
359 .dev_select = ata_std_dev_select,
360 .bmdma_setup = ata_bmdma_setup,
361 .bmdma_start = ata_bmdma_start,
362 .bmdma_stop = ata_bmdma_stop,
363 .bmdma_status = ata_bmdma_status,
364 .qc_prep = ata_qc_prep,
365 .qc_issue = ata_qc_issue_prot,
366 .freeze = nv_nf2_freeze,
367 .thaw = nv_nf2_thaw,
368 .error_handler = nv_error_handler,
369 .post_internal_cmd = ata_bmdma_post_internal_cmd,
370 .data_xfer = ata_pio_data_xfer,
371 .irq_handler = nv_nf2_interrupt,
372 .irq_clear = ata_bmdma_irq_clear,
373 .scr_read = nv_scr_read,
374 .scr_write = nv_scr_write,
375 .port_start = ata_port_start,
376 .port_stop = ata_port_stop,
377 .host_stop = ata_pci_host_stop,
380 static const struct ata_port_operations nv_ck804_ops = {
381 .port_disable = ata_port_disable,
382 .tf_load = ata_tf_load,
383 .tf_read = ata_tf_read,
384 .exec_command = ata_exec_command,
385 .check_status = ata_check_status,
386 .dev_select = ata_std_dev_select,
387 .bmdma_setup = ata_bmdma_setup,
388 .bmdma_start = ata_bmdma_start,
389 .bmdma_stop = ata_bmdma_stop,
390 .bmdma_status = ata_bmdma_status,
391 .qc_prep = ata_qc_prep,
392 .qc_issue = ata_qc_issue_prot,
393 .freeze = nv_ck804_freeze,
394 .thaw = nv_ck804_thaw,
395 .error_handler = nv_error_handler,
396 .post_internal_cmd = ata_bmdma_post_internal_cmd,
397 .data_xfer = ata_pio_data_xfer,
398 .irq_handler = nv_ck804_interrupt,
399 .irq_clear = ata_bmdma_irq_clear,
400 .scr_read = nv_scr_read,
401 .scr_write = nv_scr_write,
402 .port_start = ata_port_start,
403 .port_stop = ata_port_stop,
404 .host_stop = nv_ck804_host_stop,
407 static const struct ata_port_operations nv_adma_ops = {
408 .port_disable = ata_port_disable,
409 .tf_load = ata_tf_load,
410 .tf_read = ata_tf_read,
411 .check_atapi_dma = nv_adma_check_atapi_dma,
412 .exec_command = ata_exec_command,
413 .check_status = ata_check_status,
414 .dev_select = ata_std_dev_select,
415 .bmdma_setup = nv_adma_bmdma_setup,
416 .bmdma_start = nv_adma_bmdma_start,
417 .bmdma_stop = nv_adma_bmdma_stop,
418 .bmdma_status = nv_adma_bmdma_status,
419 .qc_prep = nv_adma_qc_prep,
420 .qc_issue = nv_adma_qc_issue,
421 .freeze = nv_ck804_freeze,
422 .thaw = nv_ck804_thaw,
423 .error_handler = nv_adma_error_handler,
424 .post_internal_cmd = nv_adma_bmdma_stop,
425 .data_xfer = ata_mmio_data_xfer,
426 .irq_handler = nv_adma_interrupt,
427 .irq_clear = nv_adma_irq_clear,
428 .scr_read = nv_scr_read,
429 .scr_write = nv_scr_write,
430 .port_start = nv_adma_port_start,
431 .port_stop = nv_adma_port_stop,
432 .host_stop = nv_adma_host_stop,
435 static struct ata_port_info nv_port_info[] = {
436 /* generic */
438 .sht = &nv_sht,
439 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
440 ATA_FLAG_HRST_TO_RESUME,
441 .pio_mask = NV_PIO_MASK,
442 .mwdma_mask = NV_MWDMA_MASK,
443 .udma_mask = NV_UDMA_MASK,
444 .port_ops = &nv_generic_ops,
446 /* nforce2/3 */
448 .sht = &nv_sht,
449 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
450 ATA_FLAG_HRST_TO_RESUME,
451 .pio_mask = NV_PIO_MASK,
452 .mwdma_mask = NV_MWDMA_MASK,
453 .udma_mask = NV_UDMA_MASK,
454 .port_ops = &nv_nf2_ops,
456 /* ck804 */
458 .sht = &nv_sht,
459 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
460 ATA_FLAG_HRST_TO_RESUME,
461 .pio_mask = NV_PIO_MASK,
462 .mwdma_mask = NV_MWDMA_MASK,
463 .udma_mask = NV_UDMA_MASK,
464 .port_ops = &nv_ck804_ops,
466 /* ADMA */
468 .sht = &nv_adma_sht,
469 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
470 ATA_FLAG_MMIO | ATA_FLAG_NCQ,
471 .pio_mask = NV_PIO_MASK,
472 .mwdma_mask = NV_MWDMA_MASK,
473 .udma_mask = NV_UDMA_MASK,
474 .port_ops = &nv_adma_ops,
478 MODULE_AUTHOR("NVIDIA");
479 MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
480 MODULE_LICENSE("GPL");
481 MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
482 MODULE_VERSION(DRV_VERSION);
484 static int adma_enabled = 1;
486 static inline void __iomem *__nv_adma_ctl_block(void __iomem *mmio,
487 unsigned int port_no)
489 mmio += NV_ADMA_PORT + port_no * NV_ADMA_PORT_SIZE;
490 return mmio;
493 static inline void __iomem *nv_adma_ctl_block(struct ata_port *ap)
495 return __nv_adma_ctl_block(ap->host->mmio_base, ap->port_no);
498 static inline void __iomem *nv_adma_gen_block(struct ata_port *ap)
500 return (ap->host->mmio_base + NV_ADMA_GEN);
503 static inline void __iomem *nv_adma_notifier_clear_block(struct ata_port *ap)
505 return (nv_adma_gen_block(ap) + NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no));
508 static void nv_adma_register_mode(struct ata_port *ap)
510 void __iomem *mmio = nv_adma_ctl_block(ap);
511 struct nv_adma_port_priv *pp = ap->private_data;
512 u16 tmp;
514 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
515 return;
517 tmp = readw(mmio + NV_ADMA_CTL);
518 writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
520 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
523 static void nv_adma_mode(struct ata_port *ap)
525 void __iomem *mmio = nv_adma_ctl_block(ap);
526 struct nv_adma_port_priv *pp = ap->private_data;
527 u16 tmp;
529 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
530 return;
532 WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
534 tmp = readw(mmio + NV_ADMA_CTL);
535 writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
537 pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
540 static int nv_adma_slave_config(struct scsi_device *sdev)
542 struct ata_port *ap = ata_shost_to_port(sdev->host);
543 struct nv_adma_port_priv *pp = ap->private_data;
544 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
545 u64 bounce_limit;
546 unsigned long segment_boundary;
547 unsigned short sg_tablesize;
548 int rc;
549 int adma_enable;
550 u32 current_reg, new_reg, config_mask;
552 rc = ata_scsi_slave_config(sdev);
554 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
555 /* Not a proper libata device, ignore */
556 return rc;
558 if (ap->device[sdev->id].class == ATA_DEV_ATAPI) {
560 * NVIDIA reports that ADMA mode does not support ATAPI commands.
561 * Therefore ATAPI commands are sent through the legacy interface.
562 * However, the legacy interface only supports 32-bit DMA.
563 * Restrict DMA parameters as required by the legacy interface
564 * when an ATAPI device is connected.
566 bounce_limit = ATA_DMA_MASK;
567 segment_boundary = ATA_DMA_BOUNDARY;
568 /* Subtract 1 since an extra entry may be needed for padding, see
569 libata-scsi.c */
570 sg_tablesize = LIBATA_MAX_PRD - 1;
572 /* Since the legacy DMA engine is in use, we need to disable ADMA
573 on the port. */
574 adma_enable = 0;
575 nv_adma_register_mode(ap);
577 else {
578 bounce_limit = *ap->dev->dma_mask;
579 segment_boundary = NV_ADMA_DMA_BOUNDARY;
580 sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
581 adma_enable = 1;
584 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg);
586 if(ap->port_no == 1)
587 config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
588 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
589 else
590 config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
591 NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
593 if(adma_enable) {
594 new_reg = current_reg | config_mask;
595 pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
597 else {
598 new_reg = current_reg & ~config_mask;
599 pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
602 if(current_reg != new_reg)
603 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
605 blk_queue_bounce_limit(sdev->request_queue, bounce_limit);
606 blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
607 blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize);
608 ata_port_printk(ap, KERN_INFO,
609 "bounce limit 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
610 (unsigned long long)bounce_limit, segment_boundary, sg_tablesize);
611 return rc;
614 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
616 struct nv_adma_port_priv *pp = qc->ap->private_data;
617 return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
620 static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
622 unsigned int idx = 0;
624 cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device | WNB);
626 if ((tf->flags & ATA_TFLAG_LBA48) == 0) {
627 cpb[idx++] = cpu_to_le16(IGN);
628 cpb[idx++] = cpu_to_le16(IGN);
629 cpb[idx++] = cpu_to_le16(IGN);
630 cpb[idx++] = cpu_to_le16(IGN);
631 cpb[idx++] = cpu_to_le16(IGN);
633 else {
634 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->hob_feature);
635 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
636 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->hob_lbal);
637 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->hob_lbam);
638 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->hob_lbah);
640 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature);
641 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->nsect);
642 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->lbal);
643 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->lbam);
644 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->lbah);
646 cpb[idx++] = cpu_to_le16((ATA_REG_CMD << 8) | tf->command | CMDEND);
648 return idx;
651 static void nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
653 struct nv_adma_port_priv *pp = ap->private_data;
654 int complete = 0, have_err = 0;
655 u8 flags = pp->cpb[cpb_num].resp_flags;
657 VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
659 if (flags & NV_CPB_RESP_DONE) {
660 VPRINTK("CPB flags done, flags=0x%x\n", flags);
661 complete = 1;
663 if (flags & NV_CPB_RESP_ATA_ERR) {
664 ata_port_printk(ap, KERN_ERR, "CPB flags ATA err, flags=0x%x\n", flags);
665 have_err = 1;
666 complete = 1;
668 if (flags & NV_CPB_RESP_CMD_ERR) {
669 ata_port_printk(ap, KERN_ERR, "CPB flags CMD err, flags=0x%x\n", flags);
670 have_err = 1;
671 complete = 1;
673 if (flags & NV_CPB_RESP_CPB_ERR) {
674 ata_port_printk(ap, KERN_ERR, "CPB flags CPB err, flags=0x%x\n", flags);
675 have_err = 1;
676 complete = 1;
678 if(complete || force_err)
680 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num);
681 if(likely(qc)) {
682 u8 ata_status = 0;
683 /* Only use the ATA port status for non-NCQ commands.
684 For NCQ commands the current status may have nothing to do with
685 the command just completed. */
686 if(qc->tf.protocol != ATA_PROT_NCQ)
687 ata_status = readb(nv_adma_ctl_block(ap) + (ATA_REG_STATUS * 4));
689 if(have_err || force_err)
690 ata_status |= ATA_ERR;
692 qc->err_mask |= ac_err_mask(ata_status);
693 DPRINTK("Completing qc from tag %d with err_mask %u\n",cpb_num,
694 qc->err_mask);
695 ata_qc_complete(qc);
700 static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
702 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
703 int handled;
705 /* freeze if hotplugged */
706 if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
707 ata_port_freeze(ap);
708 return 1;
711 /* bail out if not our interrupt */
712 if (!(irq_stat & NV_INT_DEV))
713 return 0;
715 /* DEV interrupt w/ no active qc? */
716 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
717 ata_check_status(ap);
718 return 1;
721 /* handle interrupt */
722 handled = ata_host_intr(ap, qc);
723 if (unlikely(!handled)) {
724 /* spurious, clear it */
725 ata_check_status(ap);
728 return 1;
731 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
733 struct ata_host *host = dev_instance;
734 int i, handled = 0;
735 u32 notifier_clears[2];
737 spin_lock(&host->lock);
739 for (i = 0; i < host->n_ports; i++) {
740 struct ata_port *ap = host->ports[i];
741 notifier_clears[i] = 0;
743 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
744 struct nv_adma_port_priv *pp = ap->private_data;
745 void __iomem *mmio = nv_adma_ctl_block(ap);
746 u16 status;
747 u32 gen_ctl;
748 int have_global_err = 0;
749 u32 notifier, notifier_error;
751 /* if in ATA register mode, use standard ata interrupt handler */
752 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
753 u8 irq_stat = readb(host->mmio_base + NV_INT_STATUS_CK804)
754 >> (NV_INT_PORT_SHIFT * i);
755 handled += nv_host_intr(ap, irq_stat);
756 continue;
759 notifier = readl(mmio + NV_ADMA_NOTIFIER);
760 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
761 notifier_clears[i] = notifier | notifier_error;
763 gen_ctl = readl(nv_adma_gen_block(ap) + NV_ADMA_GEN_CTL);
765 if( !NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
766 !notifier_error)
767 /* Nothing to do */
768 continue;
770 status = readw(mmio + NV_ADMA_STAT);
772 /* Clear status. Ensure the controller sees the clearing before we start
773 looking at any of the CPB statuses, so that any CPB completions after
774 this point in the handler will raise another interrupt. */
775 writew(status, mmio + NV_ADMA_STAT);
776 readw(mmio + NV_ADMA_STAT); /* flush posted write */
777 rmb();
779 /* freeze if hotplugged */
780 if (unlikely(status & (NV_ADMA_STAT_HOTPLUG | NV_ADMA_STAT_HOTUNPLUG))) {
781 ata_port_printk(ap, KERN_NOTICE, "Hotplug event, freezing\n");
782 ata_port_freeze(ap);
783 handled++;
784 continue;
787 if (status & NV_ADMA_STAT_TIMEOUT) {
788 ata_port_printk(ap, KERN_ERR, "timeout, stat=0x%x\n", status);
789 have_global_err = 1;
791 if (status & NV_ADMA_STAT_CPBERR) {
792 ata_port_printk(ap, KERN_ERR, "CPB error, stat=0x%x\n", status);
793 have_global_err = 1;
795 if ((status & NV_ADMA_STAT_DONE) || have_global_err) {
796 /** Check CPBs for completed commands */
798 if(ata_tag_valid(ap->active_tag))
799 /* Non-NCQ command */
800 nv_adma_check_cpb(ap, ap->active_tag, have_global_err ||
801 (notifier_error & (1 << ap->active_tag)));
802 else {
803 int pos;
804 u32 active = ap->sactive;
805 while( (pos = ffs(active)) ) {
806 pos--;
807 nv_adma_check_cpb(ap, pos, have_global_err ||
808 (notifier_error & (1 << pos)) );
809 active &= ~(1 << pos );
814 handled++; /* irq handled if we got here */
818 if(notifier_clears[0] || notifier_clears[1]) {
819 /* Note: Both notifier clear registers must be written
820 if either is set, even if one is zero, according to NVIDIA. */
821 writel(notifier_clears[0],
822 nv_adma_notifier_clear_block(host->ports[0]));
823 writel(notifier_clears[1],
824 nv_adma_notifier_clear_block(host->ports[1]));
827 spin_unlock(&host->lock);
829 return IRQ_RETVAL(handled);
832 static void nv_adma_irq_clear(struct ata_port *ap)
834 void __iomem *mmio = nv_adma_ctl_block(ap);
835 u16 status = readw(mmio + NV_ADMA_STAT);
836 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
837 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
838 unsigned long dma_stat_addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS;
840 /* clear ADMA status */
841 writew(status, mmio + NV_ADMA_STAT);
842 writel(notifier | notifier_error,
843 nv_adma_notifier_clear_block(ap));
845 /** clear legacy status */
846 outb(inb(dma_stat_addr), dma_stat_addr);
849 static void nv_adma_bmdma_setup(struct ata_queued_cmd *qc)
851 struct ata_port *ap = qc->ap;
852 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
853 struct nv_adma_port_priv *pp = ap->private_data;
854 u8 dmactl;
856 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
857 WARN_ON(1);
858 return;
861 /* load PRD table addr. */
862 outl(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
864 /* specify data direction, triple-check start bit is clear */
865 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
866 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
867 if (!rw)
868 dmactl |= ATA_DMA_WR;
870 outb(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
872 /* issue r/w command */
873 ata_exec_command(ap, &qc->tf);
876 static void nv_adma_bmdma_start(struct ata_queued_cmd *qc)
878 struct ata_port *ap = qc->ap;
879 struct nv_adma_port_priv *pp = ap->private_data;
880 u8 dmactl;
882 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
883 WARN_ON(1);
884 return;
887 /* start host DMA transaction */
888 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
889 outb(dmactl | ATA_DMA_START,
890 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
893 static void nv_adma_bmdma_stop(struct ata_queued_cmd *qc)
895 struct ata_port *ap = qc->ap;
896 struct nv_adma_port_priv *pp = ap->private_data;
898 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
899 return;
901 /* clear start/stop bit */
902 outb(inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD) & ~ATA_DMA_START,
903 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
905 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
906 ata_altstatus(ap); /* dummy read */
909 static u8 nv_adma_bmdma_status(struct ata_port *ap)
911 struct nv_adma_port_priv *pp = ap->private_data;
913 WARN_ON(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE));
915 return inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
918 static int nv_adma_port_start(struct ata_port *ap)
920 struct device *dev = ap->host->dev;
921 struct nv_adma_port_priv *pp;
922 int rc;
923 void *mem;
924 dma_addr_t mem_dma;
925 void __iomem *mmio = nv_adma_ctl_block(ap);
926 u16 tmp;
928 VPRINTK("ENTER\n");
930 rc = ata_port_start(ap);
931 if (rc)
932 return rc;
934 pp = kzalloc(sizeof(*pp), GFP_KERNEL);
935 if (!pp) {
936 rc = -ENOMEM;
937 goto err_out;
940 mem = dma_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
941 &mem_dma, GFP_KERNEL);
943 if (!mem) {
944 rc = -ENOMEM;
945 goto err_out_kfree;
947 memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
950 * First item in chunk of DMA memory:
951 * 128-byte command parameter block (CPB)
952 * one for each command tag
954 pp->cpb = mem;
955 pp->cpb_dma = mem_dma;
957 writel(mem_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
958 writel((mem_dma >> 16 ) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
960 mem += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
961 mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
964 * Second item: block of ADMA_SGTBL_LEN s/g entries
966 pp->aprd = mem;
967 pp->aprd_dma = mem_dma;
969 ap->private_data = pp;
971 /* clear any outstanding interrupt conditions */
972 writew(0xffff, mmio + NV_ADMA_STAT);
974 /* initialize port variables */
975 pp->flags = NV_ADMA_PORT_REGISTER_MODE;
977 /* clear CPB fetch count */
978 writew(0, mmio + NV_ADMA_CPB_COUNT);
980 /* clear GO for register mode */
981 tmp = readw(mmio + NV_ADMA_CTL);
982 writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
984 tmp = readw(mmio + NV_ADMA_CTL);
985 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
986 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
987 udelay(1);
988 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
989 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
991 return 0;
993 err_out_kfree:
994 kfree(pp);
995 err_out:
996 ata_port_stop(ap);
997 return rc;
1000 static void nv_adma_port_stop(struct ata_port *ap)
1002 struct device *dev = ap->host->dev;
1003 struct nv_adma_port_priv *pp = ap->private_data;
1004 void __iomem *mmio = nv_adma_ctl_block(ap);
1006 VPRINTK("ENTER\n");
1008 writew(0, mmio + NV_ADMA_CTL);
1010 ap->private_data = NULL;
1011 dma_free_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ, pp->cpb, pp->cpb_dma);
1012 kfree(pp);
1013 ata_port_stop(ap);
1017 static void nv_adma_setup_port(struct ata_probe_ent *probe_ent, unsigned int port)
1019 void __iomem *mmio = probe_ent->mmio_base;
1020 struct ata_ioports *ioport = &probe_ent->port[port];
1022 VPRINTK("ENTER\n");
1024 mmio += NV_ADMA_PORT + port * NV_ADMA_PORT_SIZE;
1026 ioport->cmd_addr = (unsigned long) mmio;
1027 ioport->data_addr = (unsigned long) mmio + (ATA_REG_DATA * 4);
1028 ioport->error_addr =
1029 ioport->feature_addr = (unsigned long) mmio + (ATA_REG_ERR * 4);
1030 ioport->nsect_addr = (unsigned long) mmio + (ATA_REG_NSECT * 4);
1031 ioport->lbal_addr = (unsigned long) mmio + (ATA_REG_LBAL * 4);
1032 ioport->lbam_addr = (unsigned long) mmio + (ATA_REG_LBAM * 4);
1033 ioport->lbah_addr = (unsigned long) mmio + (ATA_REG_LBAH * 4);
1034 ioport->device_addr = (unsigned long) mmio + (ATA_REG_DEVICE * 4);
1035 ioport->status_addr =
1036 ioport->command_addr = (unsigned long) mmio + (ATA_REG_STATUS * 4);
1037 ioport->altstatus_addr =
1038 ioport->ctl_addr = (unsigned long) mmio + 0x20;
1041 static int nv_adma_host_init(struct ata_probe_ent *probe_ent)
1043 struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
1044 unsigned int i;
1045 u32 tmp32;
1047 VPRINTK("ENTER\n");
1049 /* enable ADMA on the ports */
1050 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1051 tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1052 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1053 NV_MCP_SATA_CFG_20_PORT1_EN |
1054 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1056 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1058 for (i = 0; i < probe_ent->n_ports; i++)
1059 nv_adma_setup_port(probe_ent, i);
1061 for (i = 0; i < probe_ent->n_ports; i++) {
1062 void __iomem *mmio = __nv_adma_ctl_block(probe_ent->mmio_base, i);
1063 u16 tmp;
1065 /* enable interrupt, clear reset if not already clear */
1066 tmp = readw(mmio + NV_ADMA_CTL);
1067 writew(tmp | NV_ADMA_CTL_AIEN, mmio + NV_ADMA_CTL);
1070 return 0;
1073 static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1074 struct scatterlist *sg,
1075 int idx,
1076 struct nv_adma_prd *aprd)
1078 u8 flags;
1080 memset(aprd, 0, sizeof(struct nv_adma_prd));
1082 flags = 0;
1083 if (qc->tf.flags & ATA_TFLAG_WRITE)
1084 flags |= NV_APRD_WRITE;
1085 if (idx == qc->n_elem - 1)
1086 flags |= NV_APRD_END;
1087 else if (idx != 4)
1088 flags |= NV_APRD_CONT;
1090 aprd->addr = cpu_to_le64(((u64)sg_dma_address(sg)));
1091 aprd->len = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
1092 aprd->flags = flags;
1095 static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1097 struct nv_adma_port_priv *pp = qc->ap->private_data;
1098 unsigned int idx;
1099 struct nv_adma_prd *aprd;
1100 struct scatterlist *sg;
1102 VPRINTK("ENTER\n");
1104 idx = 0;
1106 ata_for_each_sg(sg, qc) {
1107 aprd = (idx < 5) ? &cpb->aprd[idx] : &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (idx-5)];
1108 nv_adma_fill_aprd(qc, sg, idx, aprd);
1109 idx++;
1111 if (idx > 5)
1112 cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));
1115 static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1117 struct nv_adma_port_priv *pp = qc->ap->private_data;
1118 struct nv_adma_cpb *cpb = &pp->cpb[qc->tag];
1119 u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
1120 NV_CPB_CTL_APRD_VALID |
1121 NV_CPB_CTL_IEN;
1123 VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1125 if (!(qc->flags & ATA_QCFLAG_DMAMAP) ||
1126 (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
1127 nv_adma_register_mode(qc->ap);
1128 ata_qc_prep(qc);
1129 return;
1132 memset(cpb, 0, sizeof(struct nv_adma_cpb));
1134 cpb->len = 3;
1135 cpb->tag = qc->tag;
1136 cpb->next_cpb_idx = 0;
1138 /* turn on NCQ flags for NCQ commands */
1139 if (qc->tf.protocol == ATA_PROT_NCQ)
1140 ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1142 nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1144 nv_adma_fill_sg(qc, cpb);
1146 /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID until we are
1147 finished filling in all of the contents */
1148 wmb();
1149 cpb->ctl_flags = ctl_flags;
1152 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1154 struct nv_adma_port_priv *pp = qc->ap->private_data;
1155 void __iomem *mmio = nv_adma_ctl_block(qc->ap);
1157 VPRINTK("ENTER\n");
1159 if (!(qc->flags & ATA_QCFLAG_DMAMAP) ||
1160 (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
1161 /* use ATA register mode */
1162 VPRINTK("no dmamap or ATAPI, using ATA register mode: 0x%lx\n", qc->flags);
1163 nv_adma_register_mode(qc->ap);
1164 return ata_qc_issue_prot(qc);
1165 } else
1166 nv_adma_mode(qc->ap);
1168 /* write append register, command tag in lower 8 bits
1169 and (number of cpbs to append -1) in top 8 bits */
1170 wmb();
1171 writew(qc->tag, mmio + NV_ADMA_APPEND);
1173 DPRINTK("Issued tag %u\n",qc->tag);
1175 return 0;
1178 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1180 struct ata_host *host = dev_instance;
1181 unsigned int i;
1182 unsigned int handled = 0;
1183 unsigned long flags;
1185 spin_lock_irqsave(&host->lock, flags);
1187 for (i = 0; i < host->n_ports; i++) {
1188 struct ata_port *ap;
1190 ap = host->ports[i];
1191 if (ap &&
1192 !(ap->flags & ATA_FLAG_DISABLED)) {
1193 struct ata_queued_cmd *qc;
1195 qc = ata_qc_from_tag(ap, ap->active_tag);
1196 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
1197 handled += ata_host_intr(ap, qc);
1198 else
1199 // No request pending? Clear interrupt status
1200 // anyway, in case there's one pending.
1201 ap->ops->check_status(ap);
1206 spin_unlock_irqrestore(&host->lock, flags);
1208 return IRQ_RETVAL(handled);
1211 static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
1213 int i, handled = 0;
1215 for (i = 0; i < host->n_ports; i++) {
1216 struct ata_port *ap = host->ports[i];
1218 if (ap && !(ap->flags & ATA_FLAG_DISABLED))
1219 handled += nv_host_intr(ap, irq_stat);
1221 irq_stat >>= NV_INT_PORT_SHIFT;
1224 return IRQ_RETVAL(handled);
1227 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
1229 struct ata_host *host = dev_instance;
1230 u8 irq_stat;
1231 irqreturn_t ret;
1233 spin_lock(&host->lock);
1234 irq_stat = inb(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
1235 ret = nv_do_interrupt(host, irq_stat);
1236 spin_unlock(&host->lock);
1238 return ret;
1241 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
1243 struct ata_host *host = dev_instance;
1244 u8 irq_stat;
1245 irqreturn_t ret;
1247 spin_lock(&host->lock);
1248 irq_stat = readb(host->mmio_base + NV_INT_STATUS_CK804);
1249 ret = nv_do_interrupt(host, irq_stat);
1250 spin_unlock(&host->lock);
1252 return ret;
1255 static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg)
1257 if (sc_reg > SCR_CONTROL)
1258 return 0xffffffffU;
1260 return ioread32((void __iomem *)ap->ioaddr.scr_addr + (sc_reg * 4));
1263 static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
1265 if (sc_reg > SCR_CONTROL)
1266 return;
1268 iowrite32(val, (void __iomem *)ap->ioaddr.scr_addr + (sc_reg * 4));
1271 static void nv_nf2_freeze(struct ata_port *ap)
1273 unsigned long scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1274 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1275 u8 mask;
1277 mask = inb(scr_addr + NV_INT_ENABLE);
1278 mask &= ~(NV_INT_ALL << shift);
1279 outb(mask, scr_addr + NV_INT_ENABLE);
1282 static void nv_nf2_thaw(struct ata_port *ap)
1284 unsigned long scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1285 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1286 u8 mask;
1288 outb(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
1290 mask = inb(scr_addr + NV_INT_ENABLE);
1291 mask |= (NV_INT_MASK << shift);
1292 outb(mask, scr_addr + NV_INT_ENABLE);
1295 static void nv_ck804_freeze(struct ata_port *ap)
1297 void __iomem *mmio_base = ap->host->mmio_base;
1298 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1299 u8 mask;
1301 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1302 mask &= ~(NV_INT_ALL << shift);
1303 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1306 static void nv_ck804_thaw(struct ata_port *ap)
1308 void __iomem *mmio_base = ap->host->mmio_base;
1309 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1310 u8 mask;
1312 writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1314 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1315 mask |= (NV_INT_MASK << shift);
1316 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1319 static int nv_hardreset(struct ata_port *ap, unsigned int *class)
1321 unsigned int dummy;
1323 /* SATA hardreset fails to retrieve proper device signature on
1324 * some controllers. Don't classify on hardreset. For more
1325 * info, see http://bugme.osdl.org/show_bug.cgi?id=3352
1327 return sata_std_hardreset(ap, &dummy);
1330 static void nv_error_handler(struct ata_port *ap)
1332 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1333 nv_hardreset, ata_std_postreset);
1336 static void nv_adma_error_handler(struct ata_port *ap)
1338 struct nv_adma_port_priv *pp = ap->private_data;
1339 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
1340 void __iomem *mmio = nv_adma_ctl_block(ap);
1341 int i;
1342 u16 tmp;
1344 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1345 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1346 u32 gen_ctl = readl(nv_adma_gen_block(ap) + NV_ADMA_GEN_CTL);
1347 u32 status = readw(mmio + NV_ADMA_STAT);
1349 ata_port_printk(ap, KERN_ERR, "EH in ADMA mode, notifier 0x%X "
1350 "notifier_error 0x%X gen_ctl 0x%X status 0x%X\n",
1351 notifier, notifier_error, gen_ctl, status);
1353 for( i=0;i<NV_ADMA_MAX_CPBS;i++) {
1354 struct nv_adma_cpb *cpb = &pp->cpb[i];
1355 if( cpb->ctl_flags || cpb->resp_flags )
1356 ata_port_printk(ap, KERN_ERR,
1357 "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1358 i, cpb->ctl_flags, cpb->resp_flags);
1361 /* Push us back into port register mode for error handling. */
1362 nv_adma_register_mode(ap);
1364 ata_port_printk(ap, KERN_ERR, "Resetting port\n");
1366 /* Mark all of the CPBs as invalid to prevent them from being executed */
1367 for( i=0;i<NV_ADMA_MAX_CPBS;i++)
1368 pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1370 /* clear CPB fetch count */
1371 writew(0, mmio + NV_ADMA_CPB_COUNT);
1373 /* Reset channel */
1374 tmp = readw(mmio + NV_ADMA_CTL);
1375 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1376 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1377 udelay(1);
1378 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1379 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1382 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1383 nv_hardreset, ata_std_postreset);
1386 static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1388 static int printed_version = 0;
1389 struct ata_port_info *ppi[2];
1390 struct ata_probe_ent *probe_ent;
1391 int pci_dev_busy = 0;
1392 int rc;
1393 u32 bar;
1394 unsigned long base;
1395 unsigned long type = ent->driver_data;
1396 int mask_set = 0;
1398 // Make sure this is a SATA controller by counting the number of bars
1399 // (NVIDIA SATA controllers will always have six bars). Otherwise,
1400 // it's an IDE controller and we ignore it.
1401 for (bar=0; bar<6; bar++)
1402 if (pci_resource_start(pdev, bar) == 0)
1403 return -ENODEV;
1405 if ( !printed_version++)
1406 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
1408 rc = pci_enable_device(pdev);
1409 if (rc)
1410 goto err_out;
1412 rc = pci_request_regions(pdev, DRV_NAME);
1413 if (rc) {
1414 pci_dev_busy = 1;
1415 goto err_out_disable;
1418 if(type >= CK804 && adma_enabled) {
1419 dev_printk(KERN_NOTICE, &pdev->dev, "Using ADMA mode\n");
1420 type = ADMA;
1421 if(!pci_set_dma_mask(pdev, DMA_64BIT_MASK) &&
1422 !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))
1423 mask_set = 1;
1426 if(!mask_set) {
1427 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
1428 if (rc)
1429 goto err_out_regions;
1430 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
1431 if (rc)
1432 goto err_out_regions;
1435 rc = -ENOMEM;
1437 ppi[0] = ppi[1] = &nv_port_info[type];
1438 probe_ent = ata_pci_init_native_mode(pdev, ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
1439 if (!probe_ent)
1440 goto err_out_regions;
1442 probe_ent->mmio_base = pci_iomap(pdev, 5, 0);
1443 if (!probe_ent->mmio_base) {
1444 rc = -EIO;
1445 goto err_out_free_ent;
1448 base = (unsigned long)probe_ent->mmio_base;
1450 probe_ent->port[0].scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
1451 probe_ent->port[1].scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
1453 /* enable SATA space for CK804 */
1454 if (type >= CK804) {
1455 u8 regval;
1457 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
1458 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1459 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1462 pci_set_master(pdev);
1464 if (type == ADMA) {
1465 rc = nv_adma_host_init(probe_ent);
1466 if (rc)
1467 goto err_out_iounmap;
1470 rc = ata_device_add(probe_ent);
1471 if (rc != NV_PORTS)
1472 goto err_out_iounmap;
1474 kfree(probe_ent);
1476 return 0;
1478 err_out_iounmap:
1479 pci_iounmap(pdev, probe_ent->mmio_base);
1480 err_out_free_ent:
1481 kfree(probe_ent);
1482 err_out_regions:
1483 pci_release_regions(pdev);
1484 err_out_disable:
1485 if (!pci_dev_busy)
1486 pci_disable_device(pdev);
1487 err_out:
1488 return rc;
1491 static void nv_ck804_host_stop(struct ata_host *host)
1493 struct pci_dev *pdev = to_pci_dev(host->dev);
1494 u8 regval;
1496 /* disable SATA space for CK804 */
1497 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
1498 regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1499 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1501 ata_pci_host_stop(host);
1504 static void nv_adma_host_stop(struct ata_host *host)
1506 struct pci_dev *pdev = to_pci_dev(host->dev);
1507 int i;
1508 u32 tmp32;
1510 for (i = 0; i < host->n_ports; i++) {
1511 void __iomem *mmio = __nv_adma_ctl_block(host->mmio_base, i);
1512 u16 tmp;
1514 /* disable interrupt */
1515 tmp = readw(mmio + NV_ADMA_CTL);
1516 writew(tmp & ~NV_ADMA_CTL_AIEN, mmio + NV_ADMA_CTL);
1519 /* disable ADMA on the ports */
1520 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1521 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
1522 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1523 NV_MCP_SATA_CFG_20_PORT1_EN |
1524 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
1526 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1528 nv_ck804_host_stop(host);
1531 static int __init nv_init(void)
1533 return pci_register_driver(&nv_pci_driver);
1536 static void __exit nv_exit(void)
1538 pci_unregister_driver(&nv_pci_driver);
1541 module_init(nv_init);
1542 module_exit(nv_exit);
1543 module_param_named(adma, adma_enabled, bool, 0444);
1544 MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: true)");