sata_nv: allow changing queue depth
[linux-2.6/mini2440.git] / drivers / ata / sata_nv.c
blobd53cb8c47f399e7d78b28b135a1eec9e9795792b
1 /*
2 * sata_nv.c - NVIDIA nForce SATA
4 * Copyright 2004 NVIDIA Corp. All rights reserved.
5 * Copyright 2004 Andrew Chew
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 * libata documentation is available via 'make {ps|pdf}docs',
24 * as Documentation/DocBook/libata.*
26 * No hardware documentation available outside of NVIDIA.
27 * This driver programs the NVIDIA SATA controller in a similar
28 * fashion as with other PCI IDE BMDMA controllers, with a few
29 * NV-specific details such as register offsets, SATA phy location,
30 * hotplug info, etc.
32 * CK804/MCP04 controllers support an alternate programming interface
33 * similar to the ADMA specification (with some modifications).
34 * This allows the use of NCQ. Non-DMA-mapped ATA commands are still
35 * sent through the legacy interface.
39 #include <linux/kernel.h>
40 #include <linux/module.h>
41 #include <linux/pci.h>
42 #include <linux/init.h>
43 #include <linux/blkdev.h>
44 #include <linux/delay.h>
45 #include <linux/interrupt.h>
46 #include <linux/device.h>
47 #include <scsi/scsi_host.h>
48 #include <scsi/scsi_device.h>
49 #include <linux/libata.h>
51 #define DRV_NAME "sata_nv"
52 #define DRV_VERSION "3.4"
54 #define NV_ADMA_DMA_BOUNDARY 0xffffffffUL
56 enum {
57 NV_MMIO_BAR = 5,
59 NV_PORTS = 2,
60 NV_PIO_MASK = 0x1f,
61 NV_MWDMA_MASK = 0x07,
62 NV_UDMA_MASK = 0x7f,
63 NV_PORT0_SCR_REG_OFFSET = 0x00,
64 NV_PORT1_SCR_REG_OFFSET = 0x40,
66 /* INT_STATUS/ENABLE */
67 NV_INT_STATUS = 0x10,
68 NV_INT_ENABLE = 0x11,
69 NV_INT_STATUS_CK804 = 0x440,
70 NV_INT_ENABLE_CK804 = 0x441,
72 /* INT_STATUS/ENABLE bits */
73 NV_INT_DEV = 0x01,
74 NV_INT_PM = 0x02,
75 NV_INT_ADDED = 0x04,
76 NV_INT_REMOVED = 0x08,
78 NV_INT_PORT_SHIFT = 4, /* each port occupies 4 bits */
80 NV_INT_ALL = 0x0f,
81 NV_INT_MASK = NV_INT_DEV |
82 NV_INT_ADDED | NV_INT_REMOVED,
84 /* INT_CONFIG */
85 NV_INT_CONFIG = 0x12,
86 NV_INT_CONFIG_METHD = 0x01, // 0 = INT, 1 = SMI
88 // For PCI config register 20
89 NV_MCP_SATA_CFG_20 = 0x50,
90 NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
91 NV_MCP_SATA_CFG_20_PORT0_EN = (1 << 17),
92 NV_MCP_SATA_CFG_20_PORT1_EN = (1 << 16),
93 NV_MCP_SATA_CFG_20_PORT0_PWB_EN = (1 << 14),
94 NV_MCP_SATA_CFG_20_PORT1_PWB_EN = (1 << 12),
96 NV_ADMA_MAX_CPBS = 32,
97 NV_ADMA_CPB_SZ = 128,
98 NV_ADMA_APRD_SZ = 16,
99 NV_ADMA_SGTBL_LEN = (1024 - NV_ADMA_CPB_SZ) /
100 NV_ADMA_APRD_SZ,
101 NV_ADMA_SGTBL_TOTAL_LEN = NV_ADMA_SGTBL_LEN + 5,
102 NV_ADMA_SGTBL_SZ = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
103 NV_ADMA_PORT_PRIV_DMA_SZ = NV_ADMA_MAX_CPBS *
104 (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
106 /* BAR5 offset to ADMA general registers */
107 NV_ADMA_GEN = 0x400,
108 NV_ADMA_GEN_CTL = 0x00,
109 NV_ADMA_NOTIFIER_CLEAR = 0x30,
111 /* BAR5 offset to ADMA ports */
112 NV_ADMA_PORT = 0x480,
114 /* size of ADMA port register space */
115 NV_ADMA_PORT_SIZE = 0x100,
117 /* ADMA port registers */
118 NV_ADMA_CTL = 0x40,
119 NV_ADMA_CPB_COUNT = 0x42,
120 NV_ADMA_NEXT_CPB_IDX = 0x43,
121 NV_ADMA_STAT = 0x44,
122 NV_ADMA_CPB_BASE_LOW = 0x48,
123 NV_ADMA_CPB_BASE_HIGH = 0x4C,
124 NV_ADMA_APPEND = 0x50,
125 NV_ADMA_NOTIFIER = 0x68,
126 NV_ADMA_NOTIFIER_ERROR = 0x6C,
128 /* NV_ADMA_CTL register bits */
129 NV_ADMA_CTL_HOTPLUG_IEN = (1 << 0),
130 NV_ADMA_CTL_CHANNEL_RESET = (1 << 5),
131 NV_ADMA_CTL_GO = (1 << 7),
132 NV_ADMA_CTL_AIEN = (1 << 8),
133 NV_ADMA_CTL_READ_NON_COHERENT = (1 << 11),
134 NV_ADMA_CTL_WRITE_NON_COHERENT = (1 << 12),
136 /* CPB response flag bits */
137 NV_CPB_RESP_DONE = (1 << 0),
138 NV_CPB_RESP_ATA_ERR = (1 << 3),
139 NV_CPB_RESP_CMD_ERR = (1 << 4),
140 NV_CPB_RESP_CPB_ERR = (1 << 7),
142 /* CPB control flag bits */
143 NV_CPB_CTL_CPB_VALID = (1 << 0),
144 NV_CPB_CTL_QUEUE = (1 << 1),
145 NV_CPB_CTL_APRD_VALID = (1 << 2),
146 NV_CPB_CTL_IEN = (1 << 3),
147 NV_CPB_CTL_FPDMA = (1 << 4),
149 /* APRD flags */
150 NV_APRD_WRITE = (1 << 1),
151 NV_APRD_END = (1 << 2),
152 NV_APRD_CONT = (1 << 3),
154 /* NV_ADMA_STAT flags */
155 NV_ADMA_STAT_TIMEOUT = (1 << 0),
156 NV_ADMA_STAT_HOTUNPLUG = (1 << 1),
157 NV_ADMA_STAT_HOTPLUG = (1 << 2),
158 NV_ADMA_STAT_CPBERR = (1 << 4),
159 NV_ADMA_STAT_SERROR = (1 << 5),
160 NV_ADMA_STAT_CMD_COMPLETE = (1 << 6),
161 NV_ADMA_STAT_IDLE = (1 << 8),
162 NV_ADMA_STAT_LEGACY = (1 << 9),
163 NV_ADMA_STAT_STOPPED = (1 << 10),
164 NV_ADMA_STAT_DONE = (1 << 12),
165 NV_ADMA_STAT_ERR = NV_ADMA_STAT_CPBERR |
166 NV_ADMA_STAT_TIMEOUT,
168 /* port flags */
169 NV_ADMA_PORT_REGISTER_MODE = (1 << 0),
170 NV_ADMA_ATAPI_SETUP_COMPLETE = (1 << 1),
174 /* ADMA Physical Region Descriptor - one SG segment */
175 struct nv_adma_prd {
176 __le64 addr;
177 __le32 len;
178 u8 flags;
179 u8 packet_len;
180 __le16 reserved;
183 enum nv_adma_regbits {
184 CMDEND = (1 << 15), /* end of command list */
185 WNB = (1 << 14), /* wait-not-BSY */
186 IGN = (1 << 13), /* ignore this entry */
187 CS1n = (1 << (4 + 8)), /* std. PATA signals follow... */
188 DA2 = (1 << (2 + 8)),
189 DA1 = (1 << (1 + 8)),
190 DA0 = (1 << (0 + 8)),
193 /* ADMA Command Parameter Block
194 The first 5 SG segments are stored inside the Command Parameter Block itself.
195 If there are more than 5 segments the remainder are stored in a separate
196 memory area indicated by next_aprd. */
197 struct nv_adma_cpb {
198 u8 resp_flags; /* 0 */
199 u8 reserved1; /* 1 */
200 u8 ctl_flags; /* 2 */
201 /* len is length of taskfile in 64 bit words */
202 u8 len; /* 3 */
203 u8 tag; /* 4 */
204 u8 next_cpb_idx; /* 5 */
205 __le16 reserved2; /* 6-7 */
206 __le16 tf[12]; /* 8-31 */
207 struct nv_adma_prd aprd[5]; /* 32-111 */
208 __le64 next_aprd; /* 112-119 */
209 __le64 reserved3; /* 120-127 */
213 struct nv_adma_port_priv {
214 struct nv_adma_cpb *cpb;
215 dma_addr_t cpb_dma;
216 struct nv_adma_prd *aprd;
217 dma_addr_t aprd_dma;
218 void __iomem * ctl_block;
219 void __iomem * gen_block;
220 void __iomem * notifier_clear_block;
221 u8 flags;
222 int last_issue_ncq;
225 struct nv_host_priv {
226 unsigned long type;
229 #define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & ( 1 << (19 + (12 * (PORT)))))
231 static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
232 #ifdef CONFIG_PM
233 static int nv_pci_device_resume(struct pci_dev *pdev);
234 #endif
235 static void nv_ck804_host_stop(struct ata_host *host);
236 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
237 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
238 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
239 static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg);
240 static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
242 static void nv_nf2_freeze(struct ata_port *ap);
243 static void nv_nf2_thaw(struct ata_port *ap);
244 static void nv_ck804_freeze(struct ata_port *ap);
245 static void nv_ck804_thaw(struct ata_port *ap);
246 static void nv_error_handler(struct ata_port *ap);
247 static int nv_adma_slave_config(struct scsi_device *sdev);
248 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
249 static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
250 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
251 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
252 static void nv_adma_irq_clear(struct ata_port *ap);
253 static int nv_adma_port_start(struct ata_port *ap);
254 static void nv_adma_port_stop(struct ata_port *ap);
255 #ifdef CONFIG_PM
256 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
257 static int nv_adma_port_resume(struct ata_port *ap);
258 #endif
259 static void nv_adma_freeze(struct ata_port *ap);
260 static void nv_adma_thaw(struct ata_port *ap);
261 static void nv_adma_error_handler(struct ata_port *ap);
262 static void nv_adma_host_stop(struct ata_host *host);
263 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
264 static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
266 enum nv_host_type
268 GENERIC,
269 NFORCE2,
270 NFORCE3 = NFORCE2, /* NF2 == NF3 as far as sata_nv is concerned */
271 CK804,
272 ADMA
275 static const struct pci_device_id nv_pci_tbl[] = {
276 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
277 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
278 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
279 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
280 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
281 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
282 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
283 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), GENERIC },
284 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), GENERIC },
285 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), GENERIC },
286 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), GENERIC },
287 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
288 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
289 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
291 { } /* terminate list */
294 static struct pci_driver nv_pci_driver = {
295 .name = DRV_NAME,
296 .id_table = nv_pci_tbl,
297 .probe = nv_init_one,
298 #ifdef CONFIG_PM
299 .suspend = ata_pci_device_suspend,
300 .resume = nv_pci_device_resume,
301 #endif
302 .remove = ata_pci_remove_one,
305 static struct scsi_host_template nv_sht = {
306 .module = THIS_MODULE,
307 .name = DRV_NAME,
308 .ioctl = ata_scsi_ioctl,
309 .queuecommand = ata_scsi_queuecmd,
310 .change_queue_depth = ata_scsi_change_queue_depth,
311 .can_queue = ATA_DEF_QUEUE,
312 .this_id = ATA_SHT_THIS_ID,
313 .sg_tablesize = LIBATA_MAX_PRD,
314 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
315 .emulated = ATA_SHT_EMULATED,
316 .use_clustering = ATA_SHT_USE_CLUSTERING,
317 .proc_name = DRV_NAME,
318 .dma_boundary = ATA_DMA_BOUNDARY,
319 .slave_configure = ata_scsi_slave_config,
320 .slave_destroy = ata_scsi_slave_destroy,
321 .bios_param = ata_std_bios_param,
324 static struct scsi_host_template nv_adma_sht = {
325 .module = THIS_MODULE,
326 .name = DRV_NAME,
327 .ioctl = ata_scsi_ioctl,
328 .queuecommand = ata_scsi_queuecmd,
329 .change_queue_depth = ata_scsi_change_queue_depth,
330 .can_queue = NV_ADMA_MAX_CPBS,
331 .this_id = ATA_SHT_THIS_ID,
332 .sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN,
333 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
334 .emulated = ATA_SHT_EMULATED,
335 .use_clustering = ATA_SHT_USE_CLUSTERING,
336 .proc_name = DRV_NAME,
337 .dma_boundary = NV_ADMA_DMA_BOUNDARY,
338 .slave_configure = nv_adma_slave_config,
339 .slave_destroy = ata_scsi_slave_destroy,
340 .bios_param = ata_std_bios_param,
343 static const struct ata_port_operations nv_generic_ops = {
344 .port_disable = ata_port_disable,
345 .tf_load = ata_tf_load,
346 .tf_read = ata_tf_read,
347 .exec_command = ata_exec_command,
348 .check_status = ata_check_status,
349 .dev_select = ata_std_dev_select,
350 .bmdma_setup = ata_bmdma_setup,
351 .bmdma_start = ata_bmdma_start,
352 .bmdma_stop = ata_bmdma_stop,
353 .bmdma_status = ata_bmdma_status,
354 .qc_prep = ata_qc_prep,
355 .qc_issue = ata_qc_issue_prot,
356 .freeze = ata_bmdma_freeze,
357 .thaw = ata_bmdma_thaw,
358 .error_handler = nv_error_handler,
359 .post_internal_cmd = ata_bmdma_post_internal_cmd,
360 .data_xfer = ata_data_xfer,
361 .irq_clear = ata_bmdma_irq_clear,
362 .irq_on = ata_irq_on,
363 .irq_ack = ata_irq_ack,
364 .scr_read = nv_scr_read,
365 .scr_write = nv_scr_write,
366 .port_start = ata_port_start,
369 static const struct ata_port_operations nv_nf2_ops = {
370 .port_disable = ata_port_disable,
371 .tf_load = ata_tf_load,
372 .tf_read = ata_tf_read,
373 .exec_command = ata_exec_command,
374 .check_status = ata_check_status,
375 .dev_select = ata_std_dev_select,
376 .bmdma_setup = ata_bmdma_setup,
377 .bmdma_start = ata_bmdma_start,
378 .bmdma_stop = ata_bmdma_stop,
379 .bmdma_status = ata_bmdma_status,
380 .qc_prep = ata_qc_prep,
381 .qc_issue = ata_qc_issue_prot,
382 .freeze = nv_nf2_freeze,
383 .thaw = nv_nf2_thaw,
384 .error_handler = nv_error_handler,
385 .post_internal_cmd = ata_bmdma_post_internal_cmd,
386 .data_xfer = ata_data_xfer,
387 .irq_clear = ata_bmdma_irq_clear,
388 .irq_on = ata_irq_on,
389 .irq_ack = ata_irq_ack,
390 .scr_read = nv_scr_read,
391 .scr_write = nv_scr_write,
392 .port_start = ata_port_start,
395 static const struct ata_port_operations nv_ck804_ops = {
396 .port_disable = ata_port_disable,
397 .tf_load = ata_tf_load,
398 .tf_read = ata_tf_read,
399 .exec_command = ata_exec_command,
400 .check_status = ata_check_status,
401 .dev_select = ata_std_dev_select,
402 .bmdma_setup = ata_bmdma_setup,
403 .bmdma_start = ata_bmdma_start,
404 .bmdma_stop = ata_bmdma_stop,
405 .bmdma_status = ata_bmdma_status,
406 .qc_prep = ata_qc_prep,
407 .qc_issue = ata_qc_issue_prot,
408 .freeze = nv_ck804_freeze,
409 .thaw = nv_ck804_thaw,
410 .error_handler = nv_error_handler,
411 .post_internal_cmd = ata_bmdma_post_internal_cmd,
412 .data_xfer = ata_data_xfer,
413 .irq_clear = ata_bmdma_irq_clear,
414 .irq_on = ata_irq_on,
415 .irq_ack = ata_irq_ack,
416 .scr_read = nv_scr_read,
417 .scr_write = nv_scr_write,
418 .port_start = ata_port_start,
419 .host_stop = nv_ck804_host_stop,
422 static const struct ata_port_operations nv_adma_ops = {
423 .port_disable = ata_port_disable,
424 .tf_load = ata_tf_load,
425 .tf_read = nv_adma_tf_read,
426 .check_atapi_dma = nv_adma_check_atapi_dma,
427 .exec_command = ata_exec_command,
428 .check_status = ata_check_status,
429 .dev_select = ata_std_dev_select,
430 .bmdma_setup = ata_bmdma_setup,
431 .bmdma_start = ata_bmdma_start,
432 .bmdma_stop = ata_bmdma_stop,
433 .bmdma_status = ata_bmdma_status,
434 .qc_prep = nv_adma_qc_prep,
435 .qc_issue = nv_adma_qc_issue,
436 .freeze = nv_adma_freeze,
437 .thaw = nv_adma_thaw,
438 .error_handler = nv_adma_error_handler,
439 .post_internal_cmd = nv_adma_post_internal_cmd,
440 .data_xfer = ata_data_xfer,
441 .irq_clear = nv_adma_irq_clear,
442 .irq_on = ata_irq_on,
443 .irq_ack = ata_irq_ack,
444 .scr_read = nv_scr_read,
445 .scr_write = nv_scr_write,
446 .port_start = nv_adma_port_start,
447 .port_stop = nv_adma_port_stop,
448 #ifdef CONFIG_PM
449 .port_suspend = nv_adma_port_suspend,
450 .port_resume = nv_adma_port_resume,
451 #endif
452 .host_stop = nv_adma_host_stop,
455 static const struct ata_port_info nv_port_info[] = {
456 /* generic */
458 .sht = &nv_sht,
459 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
460 ATA_FLAG_HRST_TO_RESUME,
461 .pio_mask = NV_PIO_MASK,
462 .mwdma_mask = NV_MWDMA_MASK,
463 .udma_mask = NV_UDMA_MASK,
464 .port_ops = &nv_generic_ops,
465 .irq_handler = nv_generic_interrupt,
467 /* nforce2/3 */
469 .sht = &nv_sht,
470 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
471 ATA_FLAG_HRST_TO_RESUME,
472 .pio_mask = NV_PIO_MASK,
473 .mwdma_mask = NV_MWDMA_MASK,
474 .udma_mask = NV_UDMA_MASK,
475 .port_ops = &nv_nf2_ops,
476 .irq_handler = nv_nf2_interrupt,
478 /* ck804 */
480 .sht = &nv_sht,
481 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
482 ATA_FLAG_HRST_TO_RESUME,
483 .pio_mask = NV_PIO_MASK,
484 .mwdma_mask = NV_MWDMA_MASK,
485 .udma_mask = NV_UDMA_MASK,
486 .port_ops = &nv_ck804_ops,
487 .irq_handler = nv_ck804_interrupt,
489 /* ADMA */
491 .sht = &nv_adma_sht,
492 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
493 ATA_FLAG_HRST_TO_RESUME |
494 ATA_FLAG_MMIO | ATA_FLAG_NCQ,
495 .pio_mask = NV_PIO_MASK,
496 .mwdma_mask = NV_MWDMA_MASK,
497 .udma_mask = NV_UDMA_MASK,
498 .port_ops = &nv_adma_ops,
499 .irq_handler = nv_adma_interrupt,
503 MODULE_AUTHOR("NVIDIA");
504 MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
505 MODULE_LICENSE("GPL");
506 MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
507 MODULE_VERSION(DRV_VERSION);
509 static int adma_enabled = 1;
511 static void nv_adma_register_mode(struct ata_port *ap)
513 struct nv_adma_port_priv *pp = ap->private_data;
514 void __iomem *mmio = pp->ctl_block;
515 u16 tmp, status;
516 int count = 0;
518 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
519 return;
521 status = readw(mmio + NV_ADMA_STAT);
522 while(!(status & NV_ADMA_STAT_IDLE) && count < 20) {
523 ndelay(50);
524 status = readw(mmio + NV_ADMA_STAT);
525 count++;
527 if(count == 20)
528 ata_port_printk(ap, KERN_WARNING,
529 "timeout waiting for ADMA IDLE, stat=0x%hx\n",
530 status);
532 tmp = readw(mmio + NV_ADMA_CTL);
533 writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
535 count = 0;
536 status = readw(mmio + NV_ADMA_STAT);
537 while(!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
538 ndelay(50);
539 status = readw(mmio + NV_ADMA_STAT);
540 count++;
542 if(count == 20)
543 ata_port_printk(ap, KERN_WARNING,
544 "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
545 status);
547 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
550 static void nv_adma_mode(struct ata_port *ap)
552 struct nv_adma_port_priv *pp = ap->private_data;
553 void __iomem *mmio = pp->ctl_block;
554 u16 tmp, status;
555 int count = 0;
557 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
558 return;
560 WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
562 tmp = readw(mmio + NV_ADMA_CTL);
563 writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
565 status = readw(mmio + NV_ADMA_STAT);
566 while(((status & NV_ADMA_STAT_LEGACY) ||
567 !(status & NV_ADMA_STAT_IDLE)) && count < 20) {
568 ndelay(50);
569 status = readw(mmio + NV_ADMA_STAT);
570 count++;
572 if(count == 20)
573 ata_port_printk(ap, KERN_WARNING,
574 "timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
575 status);
577 pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
580 static int nv_adma_slave_config(struct scsi_device *sdev)
582 struct ata_port *ap = ata_shost_to_port(sdev->host);
583 struct nv_adma_port_priv *pp = ap->private_data;
584 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
585 u64 bounce_limit;
586 unsigned long segment_boundary;
587 unsigned short sg_tablesize;
588 int rc;
589 int adma_enable;
590 u32 current_reg, new_reg, config_mask;
592 rc = ata_scsi_slave_config(sdev);
594 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
595 /* Not a proper libata device, ignore */
596 return rc;
598 if (ap->device[sdev->id].class == ATA_DEV_ATAPI) {
600 * NVIDIA reports that ADMA mode does not support ATAPI commands.
601 * Therefore ATAPI commands are sent through the legacy interface.
602 * However, the legacy interface only supports 32-bit DMA.
603 * Restrict DMA parameters as required by the legacy interface
604 * when an ATAPI device is connected.
606 bounce_limit = ATA_DMA_MASK;
607 segment_boundary = ATA_DMA_BOUNDARY;
608 /* Subtract 1 since an extra entry may be needed for padding, see
609 libata-scsi.c */
610 sg_tablesize = LIBATA_MAX_PRD - 1;
612 /* Since the legacy DMA engine is in use, we need to disable ADMA
613 on the port. */
614 adma_enable = 0;
615 nv_adma_register_mode(ap);
617 else {
618 bounce_limit = *ap->dev->dma_mask;
619 segment_boundary = NV_ADMA_DMA_BOUNDARY;
620 sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
621 adma_enable = 1;
624 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg);
626 if(ap->port_no == 1)
627 config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
628 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
629 else
630 config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
631 NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
633 if(adma_enable) {
634 new_reg = current_reg | config_mask;
635 pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
637 else {
638 new_reg = current_reg & ~config_mask;
639 pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
642 if(current_reg != new_reg)
643 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
645 blk_queue_bounce_limit(sdev->request_queue, bounce_limit);
646 blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
647 blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize);
648 ata_port_printk(ap, KERN_INFO,
649 "bounce limit 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
650 (unsigned long long)bounce_limit, segment_boundary, sg_tablesize);
651 return rc;
654 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
656 struct nv_adma_port_priv *pp = qc->ap->private_data;
657 return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
660 static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
662 /* Since commands where a result TF is requested are not
663 executed in ADMA mode, the only time this function will be called
664 in ADMA mode will be if a command fails. In this case we
665 don't care about going into register mode with ADMA commands
666 pending, as the commands will all shortly be aborted anyway. */
667 nv_adma_register_mode(ap);
669 ata_tf_read(ap, tf);
672 static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
674 unsigned int idx = 0;
676 if(tf->flags & ATA_TFLAG_ISADDR) {
677 if (tf->flags & ATA_TFLAG_LBA48) {
678 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->hob_feature | WNB);
679 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
680 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->hob_lbal);
681 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->hob_lbam);
682 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->hob_lbah);
683 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature);
684 } else
685 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature | WNB);
687 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->nsect);
688 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->lbal);
689 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->lbam);
690 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->lbah);
693 if(tf->flags & ATA_TFLAG_DEVICE)
694 cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device);
696 cpb[idx++] = cpu_to_le16((ATA_REG_CMD << 8) | tf->command | CMDEND);
698 while(idx < 12)
699 cpb[idx++] = cpu_to_le16(IGN);
701 return idx;
704 static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
706 struct nv_adma_port_priv *pp = ap->private_data;
707 u8 flags = pp->cpb[cpb_num].resp_flags;
709 VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
711 if (unlikely((force_err ||
712 flags & (NV_CPB_RESP_ATA_ERR |
713 NV_CPB_RESP_CMD_ERR |
714 NV_CPB_RESP_CPB_ERR)))) {
715 struct ata_eh_info *ehi = &ap->eh_info;
716 int freeze = 0;
718 ata_ehi_clear_desc(ehi);
719 ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x", flags );
720 if (flags & NV_CPB_RESP_ATA_ERR) {
721 ata_ehi_push_desc(ehi, ": ATA error");
722 ehi->err_mask |= AC_ERR_DEV;
723 } else if (flags & NV_CPB_RESP_CMD_ERR) {
724 ata_ehi_push_desc(ehi, ": CMD error");
725 ehi->err_mask |= AC_ERR_DEV;
726 } else if (flags & NV_CPB_RESP_CPB_ERR) {
727 ata_ehi_push_desc(ehi, ": CPB error");
728 ehi->err_mask |= AC_ERR_SYSTEM;
729 freeze = 1;
730 } else {
731 /* notifier error, but no error in CPB flags? */
732 ehi->err_mask |= AC_ERR_OTHER;
733 freeze = 1;
735 /* Kill all commands. EH will determine what actually failed. */
736 if (freeze)
737 ata_port_freeze(ap);
738 else
739 ata_port_abort(ap);
740 return 1;
743 if (likely(flags & NV_CPB_RESP_DONE)) {
744 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num);
745 VPRINTK("CPB flags done, flags=0x%x\n", flags);
746 if (likely(qc)) {
747 DPRINTK("Completing qc from tag %d\n",cpb_num);
748 ata_qc_complete(qc);
749 } else {
750 struct ata_eh_info *ehi = &ap->eh_info;
751 /* Notifier bits set without a command may indicate the drive
752 is misbehaving. Raise host state machine violation on this
753 condition. */
754 ata_port_printk(ap, KERN_ERR, "notifier for tag %d with no command?\n",
755 cpb_num);
756 ehi->err_mask |= AC_ERR_HSM;
757 ehi->action |= ATA_EH_SOFTRESET;
758 ata_port_freeze(ap);
759 return 1;
762 return 0;
765 static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
767 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
769 /* freeze if hotplugged */
770 if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
771 ata_port_freeze(ap);
772 return 1;
775 /* bail out if not our interrupt */
776 if (!(irq_stat & NV_INT_DEV))
777 return 0;
779 /* DEV interrupt w/ no active qc? */
780 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
781 ata_check_status(ap);
782 return 1;
785 /* handle interrupt */
786 return ata_host_intr(ap, qc);
789 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
791 struct ata_host *host = dev_instance;
792 int i, handled = 0;
793 u32 notifier_clears[2];
795 spin_lock(&host->lock);
797 for (i = 0; i < host->n_ports; i++) {
798 struct ata_port *ap = host->ports[i];
799 notifier_clears[i] = 0;
801 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
802 struct nv_adma_port_priv *pp = ap->private_data;
803 void __iomem *mmio = pp->ctl_block;
804 u16 status;
805 u32 gen_ctl;
806 u32 notifier, notifier_error;
808 /* if ADMA is disabled, use standard ata interrupt handler */
809 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
810 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
811 >> (NV_INT_PORT_SHIFT * i);
812 handled += nv_host_intr(ap, irq_stat);
813 continue;
816 /* if in ATA register mode, check for standard interrupts */
817 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
818 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
819 >> (NV_INT_PORT_SHIFT * i);
820 if(ata_tag_valid(ap->active_tag))
821 /** NV_INT_DEV indication seems unreliable at times
822 at least in ADMA mode. Force it on always when a
823 command is active, to prevent losing interrupts. */
824 irq_stat |= NV_INT_DEV;
825 handled += nv_host_intr(ap, irq_stat);
828 notifier = readl(mmio + NV_ADMA_NOTIFIER);
829 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
830 notifier_clears[i] = notifier | notifier_error;
832 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
834 if( !NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
835 !notifier_error)
836 /* Nothing to do */
837 continue;
839 status = readw(mmio + NV_ADMA_STAT);
841 /* Clear status. Ensure the controller sees the clearing before we start
842 looking at any of the CPB statuses, so that any CPB completions after
843 this point in the handler will raise another interrupt. */
844 writew(status, mmio + NV_ADMA_STAT);
845 readw(mmio + NV_ADMA_STAT); /* flush posted write */
846 rmb();
848 handled++; /* irq handled if we got here */
850 /* freeze if hotplugged or controller error */
851 if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
852 NV_ADMA_STAT_HOTUNPLUG |
853 NV_ADMA_STAT_TIMEOUT |
854 NV_ADMA_STAT_SERROR))) {
855 struct ata_eh_info *ehi = &ap->eh_info;
857 ata_ehi_clear_desc(ehi);
858 ata_ehi_push_desc(ehi, "ADMA status 0x%08x", status );
859 if (status & NV_ADMA_STAT_TIMEOUT) {
860 ehi->err_mask |= AC_ERR_SYSTEM;
861 ata_ehi_push_desc(ehi, ": timeout");
862 } else if (status & NV_ADMA_STAT_HOTPLUG) {
863 ata_ehi_hotplugged(ehi);
864 ata_ehi_push_desc(ehi, ": hotplug");
865 } else if (status & NV_ADMA_STAT_HOTUNPLUG) {
866 ata_ehi_hotplugged(ehi);
867 ata_ehi_push_desc(ehi, ": hot unplug");
868 } else if (status & NV_ADMA_STAT_SERROR) {
869 /* let libata analyze SError and figure out the cause */
870 ata_ehi_push_desc(ehi, ": SError");
872 ata_port_freeze(ap);
873 continue;
876 if (status & (NV_ADMA_STAT_DONE |
877 NV_ADMA_STAT_CPBERR)) {
878 u32 check_commands;
879 int pos, error = 0;
881 if(ata_tag_valid(ap->active_tag))
882 check_commands = 1 << ap->active_tag;
883 else
884 check_commands = ap->sactive;
886 /** Check CPBs for completed commands */
887 while ((pos = ffs(check_commands)) && !error) {
888 pos--;
889 error = nv_adma_check_cpb(ap, pos,
890 notifier_error & (1 << pos) );
891 check_commands &= ~(1 << pos );
897 if(notifier_clears[0] || notifier_clears[1]) {
898 /* Note: Both notifier clear registers must be written
899 if either is set, even if one is zero, according to NVIDIA. */
900 struct nv_adma_port_priv *pp = host->ports[0]->private_data;
901 writel(notifier_clears[0], pp->notifier_clear_block);
902 pp = host->ports[1]->private_data;
903 writel(notifier_clears[1], pp->notifier_clear_block);
906 spin_unlock(&host->lock);
908 return IRQ_RETVAL(handled);
911 static void nv_adma_freeze(struct ata_port *ap)
913 struct nv_adma_port_priv *pp = ap->private_data;
914 void __iomem *mmio = pp->ctl_block;
915 u16 tmp;
917 nv_ck804_freeze(ap);
919 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
920 return;
922 /* clear any outstanding CK804 notifications */
923 writeb( NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
924 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
926 /* Disable interrupt */
927 tmp = readw(mmio + NV_ADMA_CTL);
928 writew( tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
929 mmio + NV_ADMA_CTL);
930 readw( mmio + NV_ADMA_CTL ); /* flush posted write */
933 static void nv_adma_thaw(struct ata_port *ap)
935 struct nv_adma_port_priv *pp = ap->private_data;
936 void __iomem *mmio = pp->ctl_block;
937 u16 tmp;
939 nv_ck804_thaw(ap);
941 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
942 return;
944 /* Enable interrupt */
945 tmp = readw(mmio + NV_ADMA_CTL);
946 writew( tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
947 mmio + NV_ADMA_CTL);
948 readw( mmio + NV_ADMA_CTL ); /* flush posted write */
951 static void nv_adma_irq_clear(struct ata_port *ap)
953 struct nv_adma_port_priv *pp = ap->private_data;
954 void __iomem *mmio = pp->ctl_block;
955 u32 notifier_clears[2];
957 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
958 ata_bmdma_irq_clear(ap);
959 return;
962 /* clear any outstanding CK804 notifications */
963 writeb( NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
964 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
966 /* clear ADMA status */
967 writew(0xffff, mmio + NV_ADMA_STAT);
969 /* clear notifiers - note both ports need to be written with
970 something even though we are only clearing on one */
971 if (ap->port_no == 0) {
972 notifier_clears[0] = 0xFFFFFFFF;
973 notifier_clears[1] = 0;
974 } else {
975 notifier_clears[0] = 0;
976 notifier_clears[1] = 0xFFFFFFFF;
978 pp = ap->host->ports[0]->private_data;
979 writel(notifier_clears[0], pp->notifier_clear_block);
980 pp = ap->host->ports[1]->private_data;
981 writel(notifier_clears[1], pp->notifier_clear_block);
984 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
986 struct nv_adma_port_priv *pp = qc->ap->private_data;
988 if(pp->flags & NV_ADMA_PORT_REGISTER_MODE)
989 ata_bmdma_post_internal_cmd(qc);
992 static int nv_adma_port_start(struct ata_port *ap)
994 struct device *dev = ap->host->dev;
995 struct nv_adma_port_priv *pp;
996 int rc;
997 void *mem;
998 dma_addr_t mem_dma;
999 void __iomem *mmio;
1000 u16 tmp;
1002 VPRINTK("ENTER\n");
1004 rc = ata_port_start(ap);
1005 if (rc)
1006 return rc;
1008 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1009 if (!pp)
1010 return -ENOMEM;
1012 mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
1013 ap->port_no * NV_ADMA_PORT_SIZE;
1014 pp->ctl_block = mmio;
1015 pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
1016 pp->notifier_clear_block = pp->gen_block +
1017 NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
1019 mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
1020 &mem_dma, GFP_KERNEL);
1021 if (!mem)
1022 return -ENOMEM;
1023 memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
1026 * First item in chunk of DMA memory:
1027 * 128-byte command parameter block (CPB)
1028 * one for each command tag
1030 pp->cpb = mem;
1031 pp->cpb_dma = mem_dma;
1033 writel(mem_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
1034 writel((mem_dma >> 16 ) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
1036 mem += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1037 mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1040 * Second item: block of ADMA_SGTBL_LEN s/g entries
1042 pp->aprd = mem;
1043 pp->aprd_dma = mem_dma;
1045 ap->private_data = pp;
1047 /* clear any outstanding interrupt conditions */
1048 writew(0xffff, mmio + NV_ADMA_STAT);
1050 /* initialize port variables */
1051 pp->flags = NV_ADMA_PORT_REGISTER_MODE;
1053 /* clear CPB fetch count */
1054 writew(0, mmio + NV_ADMA_CPB_COUNT);
1056 /* clear GO for register mode, enable interrupt */
1057 tmp = readw(mmio + NV_ADMA_CTL);
1058 writew( (tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1059 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1061 tmp = readw(mmio + NV_ADMA_CTL);
1062 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1063 readw( mmio + NV_ADMA_CTL ); /* flush posted write */
1064 udelay(1);
1065 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1066 readw( mmio + NV_ADMA_CTL ); /* flush posted write */
1068 return 0;
1071 static void nv_adma_port_stop(struct ata_port *ap)
1073 struct nv_adma_port_priv *pp = ap->private_data;
1074 void __iomem *mmio = pp->ctl_block;
1076 VPRINTK("ENTER\n");
1077 writew(0, mmio + NV_ADMA_CTL);
1080 #ifdef CONFIG_PM
1081 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
1083 struct nv_adma_port_priv *pp = ap->private_data;
1084 void __iomem *mmio = pp->ctl_block;
1086 /* Go to register mode - clears GO */
1087 nv_adma_register_mode(ap);
1089 /* clear CPB fetch count */
1090 writew(0, mmio + NV_ADMA_CPB_COUNT);
1092 /* disable interrupt, shut down port */
1093 writew(0, mmio + NV_ADMA_CTL);
1095 return 0;
1098 static int nv_adma_port_resume(struct ata_port *ap)
1100 struct nv_adma_port_priv *pp = ap->private_data;
1101 void __iomem *mmio = pp->ctl_block;
1102 u16 tmp;
1104 /* set CPB block location */
1105 writel(pp->cpb_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
1106 writel((pp->cpb_dma >> 16 ) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
1108 /* clear any outstanding interrupt conditions */
1109 writew(0xffff, mmio + NV_ADMA_STAT);
1111 /* initialize port variables */
1112 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
1114 /* clear CPB fetch count */
1115 writew(0, mmio + NV_ADMA_CPB_COUNT);
1117 /* clear GO for register mode, enable interrupt */
1118 tmp = readw(mmio + NV_ADMA_CTL);
1119 writew( (tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1120 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1122 tmp = readw(mmio + NV_ADMA_CTL);
1123 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1124 readw( mmio + NV_ADMA_CTL ); /* flush posted write */
1125 udelay(1);
1126 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1127 readw( mmio + NV_ADMA_CTL ); /* flush posted write */
1129 return 0;
1131 #endif
1133 static void nv_adma_setup_port(struct ata_port *ap)
1135 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1136 struct ata_ioports *ioport = &ap->ioaddr;
1138 VPRINTK("ENTER\n");
1140 mmio += NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE;
1142 ioport->cmd_addr = mmio;
1143 ioport->data_addr = mmio + (ATA_REG_DATA * 4);
1144 ioport->error_addr =
1145 ioport->feature_addr = mmio + (ATA_REG_ERR * 4);
1146 ioport->nsect_addr = mmio + (ATA_REG_NSECT * 4);
1147 ioport->lbal_addr = mmio + (ATA_REG_LBAL * 4);
1148 ioport->lbam_addr = mmio + (ATA_REG_LBAM * 4);
1149 ioport->lbah_addr = mmio + (ATA_REG_LBAH * 4);
1150 ioport->device_addr = mmio + (ATA_REG_DEVICE * 4);
1151 ioport->status_addr =
1152 ioport->command_addr = mmio + (ATA_REG_STATUS * 4);
1153 ioport->altstatus_addr =
1154 ioport->ctl_addr = mmio + 0x20;
1157 static int nv_adma_host_init(struct ata_host *host)
1159 struct pci_dev *pdev = to_pci_dev(host->dev);
1160 unsigned int i;
1161 u32 tmp32;
1163 VPRINTK("ENTER\n");
1165 /* enable ADMA on the ports */
1166 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1167 tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1168 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1169 NV_MCP_SATA_CFG_20_PORT1_EN |
1170 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1172 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1174 for (i = 0; i < host->n_ports; i++)
1175 nv_adma_setup_port(host->ports[i]);
1177 return 0;
1180 static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1181 struct scatterlist *sg,
1182 int idx,
1183 struct nv_adma_prd *aprd)
1185 u8 flags = 0;
1186 if (qc->tf.flags & ATA_TFLAG_WRITE)
1187 flags |= NV_APRD_WRITE;
1188 if (idx == qc->n_elem - 1)
1189 flags |= NV_APRD_END;
1190 else if (idx != 4)
1191 flags |= NV_APRD_CONT;
1193 aprd->addr = cpu_to_le64(((u64)sg_dma_address(sg)));
1194 aprd->len = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
1195 aprd->flags = flags;
1196 aprd->packet_len = 0;
1199 static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1201 struct nv_adma_port_priv *pp = qc->ap->private_data;
1202 unsigned int idx;
1203 struct nv_adma_prd *aprd;
1204 struct scatterlist *sg;
1206 VPRINTK("ENTER\n");
1208 idx = 0;
1210 ata_for_each_sg(sg, qc) {
1211 aprd = (idx < 5) ? &cpb->aprd[idx] : &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (idx-5)];
1212 nv_adma_fill_aprd(qc, sg, idx, aprd);
1213 idx++;
1215 if (idx > 5)
1216 cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));
1217 else
1218 cpb->next_aprd = cpu_to_le64(0);
1221 static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
1223 struct nv_adma_port_priv *pp = qc->ap->private_data;
1225 /* ADMA engine can only be used for non-ATAPI DMA commands,
1226 or interrupt-driven no-data commands, where a result taskfile
1227 is not required. */
1228 if((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
1229 (qc->tf.flags & ATA_TFLAG_POLLING) ||
1230 (qc->flags & ATA_QCFLAG_RESULT_TF))
1231 return 1;
1233 if((qc->flags & ATA_QCFLAG_DMAMAP) ||
1234 (qc->tf.protocol == ATA_PROT_NODATA))
1235 return 0;
1237 return 1;
1240 static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1242 struct nv_adma_port_priv *pp = qc->ap->private_data;
1243 struct nv_adma_cpb *cpb = &pp->cpb[qc->tag];
1244 u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
1245 NV_CPB_CTL_IEN;
1247 if (nv_adma_use_reg_mode(qc)) {
1248 nv_adma_register_mode(qc->ap);
1249 ata_qc_prep(qc);
1250 return;
1253 cpb->resp_flags = NV_CPB_RESP_DONE;
1254 wmb();
1255 cpb->ctl_flags = 0;
1256 wmb();
1258 cpb->len = 3;
1259 cpb->tag = qc->tag;
1260 cpb->next_cpb_idx = 0;
1262 /* turn on NCQ flags for NCQ commands */
1263 if (qc->tf.protocol == ATA_PROT_NCQ)
1264 ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1266 VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1268 nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1270 if(qc->flags & ATA_QCFLAG_DMAMAP) {
1271 nv_adma_fill_sg(qc, cpb);
1272 ctl_flags |= NV_CPB_CTL_APRD_VALID;
1273 } else
1274 memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
1276 /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID until we are
1277 finished filling in all of the contents */
1278 wmb();
1279 cpb->ctl_flags = ctl_flags;
1280 wmb();
1281 cpb->resp_flags = 0;
1284 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1286 struct nv_adma_port_priv *pp = qc->ap->private_data;
1287 void __iomem *mmio = pp->ctl_block;
1288 int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ);
1290 VPRINTK("ENTER\n");
1292 if (nv_adma_use_reg_mode(qc)) {
1293 /* use ATA register mode */
1294 VPRINTK("using ATA register mode: 0x%lx\n", qc->flags);
1295 nv_adma_register_mode(qc->ap);
1296 return ata_qc_issue_prot(qc);
1297 } else
1298 nv_adma_mode(qc->ap);
1300 /* write append register, command tag in lower 8 bits
1301 and (number of cpbs to append -1) in top 8 bits */
1302 wmb();
1304 if(curr_ncq != pp->last_issue_ncq) {
1305 /* Seems to need some delay before switching between NCQ and non-NCQ
1306 commands, else we get command timeouts and such. */
1307 udelay(20);
1308 pp->last_issue_ncq = curr_ncq;
1311 writew(qc->tag, mmio + NV_ADMA_APPEND);
1313 DPRINTK("Issued tag %u\n",qc->tag);
1315 return 0;
1318 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1320 struct ata_host *host = dev_instance;
1321 unsigned int i;
1322 unsigned int handled = 0;
1323 unsigned long flags;
1325 spin_lock_irqsave(&host->lock, flags);
1327 for (i = 0; i < host->n_ports; i++) {
1328 struct ata_port *ap;
1330 ap = host->ports[i];
1331 if (ap &&
1332 !(ap->flags & ATA_FLAG_DISABLED)) {
1333 struct ata_queued_cmd *qc;
1335 qc = ata_qc_from_tag(ap, ap->active_tag);
1336 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
1337 handled += ata_host_intr(ap, qc);
1338 else
1339 // No request pending? Clear interrupt status
1340 // anyway, in case there's one pending.
1341 ap->ops->check_status(ap);
1346 spin_unlock_irqrestore(&host->lock, flags);
1348 return IRQ_RETVAL(handled);
1351 static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
1353 int i, handled = 0;
1355 for (i = 0; i < host->n_ports; i++) {
1356 struct ata_port *ap = host->ports[i];
1358 if (ap && !(ap->flags & ATA_FLAG_DISABLED))
1359 handled += nv_host_intr(ap, irq_stat);
1361 irq_stat >>= NV_INT_PORT_SHIFT;
1364 return IRQ_RETVAL(handled);
1367 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
1369 struct ata_host *host = dev_instance;
1370 u8 irq_stat;
1371 irqreturn_t ret;
1373 spin_lock(&host->lock);
1374 irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
1375 ret = nv_do_interrupt(host, irq_stat);
1376 spin_unlock(&host->lock);
1378 return ret;
1381 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
1383 struct ata_host *host = dev_instance;
1384 u8 irq_stat;
1385 irqreturn_t ret;
1387 spin_lock(&host->lock);
1388 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1389 ret = nv_do_interrupt(host, irq_stat);
1390 spin_unlock(&host->lock);
1392 return ret;
1395 static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg)
1397 if (sc_reg > SCR_CONTROL)
1398 return 0xffffffffU;
1400 return ioread32(ap->ioaddr.scr_addr + (sc_reg * 4));
1403 static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
1405 if (sc_reg > SCR_CONTROL)
1406 return;
1408 iowrite32(val, ap->ioaddr.scr_addr + (sc_reg * 4));
1411 static void nv_nf2_freeze(struct ata_port *ap)
1413 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1414 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1415 u8 mask;
1417 mask = ioread8(scr_addr + NV_INT_ENABLE);
1418 mask &= ~(NV_INT_ALL << shift);
1419 iowrite8(mask, scr_addr + NV_INT_ENABLE);
1422 static void nv_nf2_thaw(struct ata_port *ap)
1424 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1425 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1426 u8 mask;
1428 iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
1430 mask = ioread8(scr_addr + NV_INT_ENABLE);
1431 mask |= (NV_INT_MASK << shift);
1432 iowrite8(mask, scr_addr + NV_INT_ENABLE);
1435 static void nv_ck804_freeze(struct ata_port *ap)
1437 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1438 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1439 u8 mask;
1441 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1442 mask &= ~(NV_INT_ALL << shift);
1443 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1446 static void nv_ck804_thaw(struct ata_port *ap)
1448 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1449 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1450 u8 mask;
1452 writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1454 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1455 mask |= (NV_INT_MASK << shift);
1456 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1459 static int nv_hardreset(struct ata_port *ap, unsigned int *class,
1460 unsigned long deadline)
1462 unsigned int dummy;
1464 /* SATA hardreset fails to retrieve proper device signature on
1465 * some controllers. Don't classify on hardreset. For more
1466 * info, see http://bugme.osdl.org/show_bug.cgi?id=3352
1468 return sata_std_hardreset(ap, &dummy, deadline);
1471 static void nv_error_handler(struct ata_port *ap)
1473 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1474 nv_hardreset, ata_std_postreset);
1477 static void nv_adma_error_handler(struct ata_port *ap)
1479 struct nv_adma_port_priv *pp = ap->private_data;
1480 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
1481 void __iomem *mmio = pp->ctl_block;
1482 int i;
1483 u16 tmp;
1485 if(ata_tag_valid(ap->active_tag) || ap->sactive) {
1486 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1487 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1488 u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
1489 u32 status = readw(mmio + NV_ADMA_STAT);
1490 u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
1491 u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
1493 ata_port_printk(ap, KERN_ERR, "EH in ADMA mode, notifier 0x%X "
1494 "notifier_error 0x%X gen_ctl 0x%X status 0x%X "
1495 "next cpb count 0x%X next cpb idx 0x%x\n",
1496 notifier, notifier_error, gen_ctl, status,
1497 cpb_count, next_cpb_idx);
1499 for( i=0;i<NV_ADMA_MAX_CPBS;i++) {
1500 struct nv_adma_cpb *cpb = &pp->cpb[i];
1501 if( (ata_tag_valid(ap->active_tag) && i == ap->active_tag) ||
1502 ap->sactive & (1 << i) )
1503 ata_port_printk(ap, KERN_ERR,
1504 "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1505 i, cpb->ctl_flags, cpb->resp_flags);
1509 /* Push us back into port register mode for error handling. */
1510 nv_adma_register_mode(ap);
1512 /* Mark all of the CPBs as invalid to prevent them from being executed */
1513 for( i=0;i<NV_ADMA_MAX_CPBS;i++)
1514 pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1516 /* clear CPB fetch count */
1517 writew(0, mmio + NV_ADMA_CPB_COUNT);
1519 /* Reset channel */
1520 tmp = readw(mmio + NV_ADMA_CTL);
1521 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1522 readw( mmio + NV_ADMA_CTL ); /* flush posted write */
1523 udelay(1);
1524 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1525 readw( mmio + NV_ADMA_CTL ); /* flush posted write */
1528 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1529 nv_hardreset, ata_std_postreset);
1532 static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1534 static int printed_version = 0;
1535 const struct ata_port_info *ppi[] = { NULL, NULL };
1536 struct ata_host *host;
1537 struct nv_host_priv *hpriv;
1538 int rc;
1539 u32 bar;
1540 void __iomem *base;
1541 unsigned long type = ent->driver_data;
1543 // Make sure this is a SATA controller by counting the number of bars
1544 // (NVIDIA SATA controllers will always have six bars). Otherwise,
1545 // it's an IDE controller and we ignore it.
1546 for (bar=0; bar<6; bar++)
1547 if (pci_resource_start(pdev, bar) == 0)
1548 return -ENODEV;
1550 if (!printed_version++)
1551 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
1553 rc = pcim_enable_device(pdev);
1554 if (rc)
1555 return rc;
1557 /* determine type and allocate host */
1558 if (type >= CK804 && adma_enabled) {
1559 dev_printk(KERN_NOTICE, &pdev->dev, "Using ADMA mode\n");
1560 type = ADMA;
1563 ppi[0] = &nv_port_info[type];
1564 rc = ata_pci_prepare_native_host(pdev, ppi, &host);
1565 if (rc)
1566 return rc;
1568 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
1569 if (!hpriv)
1570 return -ENOMEM;
1571 hpriv->type = type;
1572 host->private_data = hpriv;
1574 /* set 64bit dma masks, may fail */
1575 if (type == ADMA) {
1576 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0)
1577 pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
1580 /* request and iomap NV_MMIO_BAR */
1581 rc = pcim_iomap_regions(pdev, 1 << NV_MMIO_BAR, DRV_NAME);
1582 if (rc)
1583 return rc;
1585 /* configure SCR access */
1586 base = host->iomap[NV_MMIO_BAR];
1587 host->ports[0]->ioaddr.scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
1588 host->ports[1]->ioaddr.scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
1590 /* enable SATA space for CK804 */
1591 if (type >= CK804) {
1592 u8 regval;
1594 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
1595 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1596 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1599 /* init ADMA */
1600 if (type == ADMA) {
1601 rc = nv_adma_host_init(host);
1602 if (rc)
1603 return rc;
1606 pci_set_master(pdev);
1607 return ata_host_activate(host, pdev->irq, ppi[0]->irq_handler,
1608 IRQF_SHARED, ppi[0]->sht);
1611 #ifdef CONFIG_PM
1612 static int nv_pci_device_resume(struct pci_dev *pdev)
1614 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1615 struct nv_host_priv *hpriv = host->private_data;
1616 int rc;
1618 rc = ata_pci_device_do_resume(pdev);
1619 if(rc)
1620 return rc;
1622 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
1623 if(hpriv->type >= CK804) {
1624 u8 regval;
1626 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
1627 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1628 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1630 if(hpriv->type == ADMA) {
1631 u32 tmp32;
1632 struct nv_adma_port_priv *pp;
1633 /* enable/disable ADMA on the ports appropriately */
1634 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1636 pp = host->ports[0]->private_data;
1637 if(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1638 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
1639 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
1640 else
1641 tmp32 |= (NV_MCP_SATA_CFG_20_PORT0_EN |
1642 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
1643 pp = host->ports[1]->private_data;
1644 if(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1645 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
1646 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
1647 else
1648 tmp32 |= (NV_MCP_SATA_CFG_20_PORT1_EN |
1649 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
1651 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1655 ata_host_resume(host);
1657 return 0;
1659 #endif
1661 static void nv_ck804_host_stop(struct ata_host *host)
1663 struct pci_dev *pdev = to_pci_dev(host->dev);
1664 u8 regval;
1666 /* disable SATA space for CK804 */
1667 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
1668 regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1669 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1672 static void nv_adma_host_stop(struct ata_host *host)
1674 struct pci_dev *pdev = to_pci_dev(host->dev);
1675 u32 tmp32;
1677 /* disable ADMA on the ports */
1678 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1679 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
1680 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1681 NV_MCP_SATA_CFG_20_PORT1_EN |
1682 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
1684 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1686 nv_ck804_host_stop(host);
1689 static int __init nv_init(void)
1691 return pci_register_driver(&nv_pci_driver);
1694 static void __exit nv_exit(void)
1696 pci_unregister_driver(&nv_pci_driver);
1699 module_init(nv_init);
1700 module_exit(nv_exit);
1701 module_param_named(adma, adma_enabled, bool, 0444);
1702 MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: true)");