sata_nv: kill old private BMDMA helper functions
[linux-2.6/btrfs-unstable.git] / drivers / ata / sata_nv.c
blob4d9357caa1acdaf3dc267140e6a43cbf197f2a69
1 /*
2 * sata_nv.c - NVIDIA nForce SATA
4 * Copyright 2004 NVIDIA Corp. All rights reserved.
5 * Copyright 2004 Andrew Chew
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 * libata documentation is available via 'make {ps|pdf}docs',
24 * as Documentation/DocBook/libata.*
26 * No hardware documentation available outside of NVIDIA.
27 * This driver programs the NVIDIA SATA controller in a similar
28 * fashion as with other PCI IDE BMDMA controllers, with a few
29 * NV-specific details such as register offsets, SATA phy location,
30 * hotplug info, etc.
32 * CK804/MCP04 controllers support an alternate programming interface
33 * similar to the ADMA specification (with some modifications).
34 * This allows the use of NCQ. Non-DMA-mapped ATA commands are still
35 * sent through the legacy interface.
39 #include <linux/kernel.h>
40 #include <linux/module.h>
41 #include <linux/pci.h>
42 #include <linux/init.h>
43 #include <linux/blkdev.h>
44 #include <linux/delay.h>
45 #include <linux/interrupt.h>
46 #include <linux/device.h>
47 #include <scsi/scsi_host.h>
48 #include <scsi/scsi_device.h>
49 #include <linux/libata.h>
51 #define DRV_NAME "sata_nv"
52 #define DRV_VERSION "3.3"
54 #define NV_ADMA_DMA_BOUNDARY 0xffffffffUL
56 enum {
57 NV_MMIO_BAR = 5,
59 NV_PORTS = 2,
60 NV_PIO_MASK = 0x1f,
61 NV_MWDMA_MASK = 0x07,
62 NV_UDMA_MASK = 0x7f,
63 NV_PORT0_SCR_REG_OFFSET = 0x00,
64 NV_PORT1_SCR_REG_OFFSET = 0x40,
66 /* INT_STATUS/ENABLE */
67 NV_INT_STATUS = 0x10,
68 NV_INT_ENABLE = 0x11,
69 NV_INT_STATUS_CK804 = 0x440,
70 NV_INT_ENABLE_CK804 = 0x441,
72 /* INT_STATUS/ENABLE bits */
73 NV_INT_DEV = 0x01,
74 NV_INT_PM = 0x02,
75 NV_INT_ADDED = 0x04,
76 NV_INT_REMOVED = 0x08,
78 NV_INT_PORT_SHIFT = 4, /* each port occupies 4 bits */
80 NV_INT_ALL = 0x0f,
81 NV_INT_MASK = NV_INT_DEV |
82 NV_INT_ADDED | NV_INT_REMOVED,
84 /* INT_CONFIG */
85 NV_INT_CONFIG = 0x12,
86 NV_INT_CONFIG_METHD = 0x01, // 0 = INT, 1 = SMI
88 // For PCI config register 20
89 NV_MCP_SATA_CFG_20 = 0x50,
90 NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
91 NV_MCP_SATA_CFG_20_PORT0_EN = (1 << 17),
92 NV_MCP_SATA_CFG_20_PORT1_EN = (1 << 16),
93 NV_MCP_SATA_CFG_20_PORT0_PWB_EN = (1 << 14),
94 NV_MCP_SATA_CFG_20_PORT1_PWB_EN = (1 << 12),
96 NV_ADMA_MAX_CPBS = 32,
97 NV_ADMA_CPB_SZ = 128,
98 NV_ADMA_APRD_SZ = 16,
99 NV_ADMA_SGTBL_LEN = (1024 - NV_ADMA_CPB_SZ) /
100 NV_ADMA_APRD_SZ,
101 NV_ADMA_SGTBL_TOTAL_LEN = NV_ADMA_SGTBL_LEN + 5,
102 NV_ADMA_SGTBL_SZ = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
103 NV_ADMA_PORT_PRIV_DMA_SZ = NV_ADMA_MAX_CPBS *
104 (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
106 /* BAR5 offset to ADMA general registers */
107 NV_ADMA_GEN = 0x400,
108 NV_ADMA_GEN_CTL = 0x00,
109 NV_ADMA_NOTIFIER_CLEAR = 0x30,
111 /* BAR5 offset to ADMA ports */
112 NV_ADMA_PORT = 0x480,
114 /* size of ADMA port register space */
115 NV_ADMA_PORT_SIZE = 0x100,
117 /* ADMA port registers */
118 NV_ADMA_CTL = 0x40,
119 NV_ADMA_CPB_COUNT = 0x42,
120 NV_ADMA_NEXT_CPB_IDX = 0x43,
121 NV_ADMA_STAT = 0x44,
122 NV_ADMA_CPB_BASE_LOW = 0x48,
123 NV_ADMA_CPB_BASE_HIGH = 0x4C,
124 NV_ADMA_APPEND = 0x50,
125 NV_ADMA_NOTIFIER = 0x68,
126 NV_ADMA_NOTIFIER_ERROR = 0x6C,
128 /* NV_ADMA_CTL register bits */
129 NV_ADMA_CTL_HOTPLUG_IEN = (1 << 0),
130 NV_ADMA_CTL_CHANNEL_RESET = (1 << 5),
131 NV_ADMA_CTL_GO = (1 << 7),
132 NV_ADMA_CTL_AIEN = (1 << 8),
133 NV_ADMA_CTL_READ_NON_COHERENT = (1 << 11),
134 NV_ADMA_CTL_WRITE_NON_COHERENT = (1 << 12),
136 /* CPB response flag bits */
137 NV_CPB_RESP_DONE = (1 << 0),
138 NV_CPB_RESP_ATA_ERR = (1 << 3),
139 NV_CPB_RESP_CMD_ERR = (1 << 4),
140 NV_CPB_RESP_CPB_ERR = (1 << 7),
142 /* CPB control flag bits */
143 NV_CPB_CTL_CPB_VALID = (1 << 0),
144 NV_CPB_CTL_QUEUE = (1 << 1),
145 NV_CPB_CTL_APRD_VALID = (1 << 2),
146 NV_CPB_CTL_IEN = (1 << 3),
147 NV_CPB_CTL_FPDMA = (1 << 4),
149 /* APRD flags */
150 NV_APRD_WRITE = (1 << 1),
151 NV_APRD_END = (1 << 2),
152 NV_APRD_CONT = (1 << 3),
154 /* NV_ADMA_STAT flags */
155 NV_ADMA_STAT_TIMEOUT = (1 << 0),
156 NV_ADMA_STAT_HOTUNPLUG = (1 << 1),
157 NV_ADMA_STAT_HOTPLUG = (1 << 2),
158 NV_ADMA_STAT_CPBERR = (1 << 4),
159 NV_ADMA_STAT_SERROR = (1 << 5),
160 NV_ADMA_STAT_CMD_COMPLETE = (1 << 6),
161 NV_ADMA_STAT_IDLE = (1 << 8),
162 NV_ADMA_STAT_LEGACY = (1 << 9),
163 NV_ADMA_STAT_STOPPED = (1 << 10),
164 NV_ADMA_STAT_DONE = (1 << 12),
165 NV_ADMA_STAT_ERR = NV_ADMA_STAT_CPBERR |
166 NV_ADMA_STAT_TIMEOUT,
168 /* port flags */
169 NV_ADMA_PORT_REGISTER_MODE = (1 << 0),
170 NV_ADMA_ATAPI_SETUP_COMPLETE = (1 << 1),
174 /* ADMA Physical Region Descriptor - one SG segment */
175 struct nv_adma_prd {
176 __le64 addr;
177 __le32 len;
178 u8 flags;
179 u8 packet_len;
180 __le16 reserved;
183 enum nv_adma_regbits {
184 CMDEND = (1 << 15), /* end of command list */
185 WNB = (1 << 14), /* wait-not-BSY */
186 IGN = (1 << 13), /* ignore this entry */
187 CS1n = (1 << (4 + 8)), /* std. PATA signals follow... */
188 DA2 = (1 << (2 + 8)),
189 DA1 = (1 << (1 + 8)),
190 DA0 = (1 << (0 + 8)),
193 /* ADMA Command Parameter Block
194 The first 5 SG segments are stored inside the Command Parameter Block itself.
195 If there are more than 5 segments the remainder are stored in a separate
196 memory area indicated by next_aprd. */
197 struct nv_adma_cpb {
198 u8 resp_flags; /* 0 */
199 u8 reserved1; /* 1 */
200 u8 ctl_flags; /* 2 */
201 /* len is length of taskfile in 64 bit words */
202 u8 len; /* 3 */
203 u8 tag; /* 4 */
204 u8 next_cpb_idx; /* 5 */
205 __le16 reserved2; /* 6-7 */
206 __le16 tf[12]; /* 8-31 */
207 struct nv_adma_prd aprd[5]; /* 32-111 */
208 __le64 next_aprd; /* 112-119 */
209 __le64 reserved3; /* 120-127 */
213 struct nv_adma_port_priv {
214 struct nv_adma_cpb *cpb;
215 dma_addr_t cpb_dma;
216 struct nv_adma_prd *aprd;
217 dma_addr_t aprd_dma;
218 void __iomem * ctl_block;
219 void __iomem * gen_block;
220 void __iomem * notifier_clear_block;
221 u8 flags;
222 int last_issue_ncq;
225 struct nv_host_priv {
226 unsigned long type;
229 #define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & ( 1 << (19 + (12 * (PORT)))))
231 static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
232 static void nv_remove_one (struct pci_dev *pdev);
233 static int nv_pci_device_resume(struct pci_dev *pdev);
234 static void nv_ck804_host_stop(struct ata_host *host);
235 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
236 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
237 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
238 static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg);
239 static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
241 static void nv_nf2_freeze(struct ata_port *ap);
242 static void nv_nf2_thaw(struct ata_port *ap);
243 static void nv_ck804_freeze(struct ata_port *ap);
244 static void nv_ck804_thaw(struct ata_port *ap);
245 static void nv_error_handler(struct ata_port *ap);
246 static int nv_adma_slave_config(struct scsi_device *sdev);
247 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
248 static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
249 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
250 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
251 static void nv_adma_irq_clear(struct ata_port *ap);
252 static int nv_adma_port_start(struct ata_port *ap);
253 static void nv_adma_port_stop(struct ata_port *ap);
254 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
255 static int nv_adma_port_resume(struct ata_port *ap);
256 static void nv_adma_error_handler(struct ata_port *ap);
257 static void nv_adma_host_stop(struct ata_host *host);
258 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
260 enum nv_host_type
262 GENERIC,
263 NFORCE2,
264 NFORCE3 = NFORCE2, /* NF2 == NF3 as far as sata_nv is concerned */
265 CK804,
266 ADMA
269 static const struct pci_device_id nv_pci_tbl[] = {
270 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
271 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
272 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
273 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
274 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
275 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
276 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
277 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), GENERIC },
278 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), GENERIC },
279 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), GENERIC },
280 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), GENERIC },
281 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
282 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
283 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
284 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
285 PCI_ANY_ID, PCI_ANY_ID,
286 PCI_CLASS_STORAGE_IDE<<8, 0xffff00, GENERIC },
287 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
288 PCI_ANY_ID, PCI_ANY_ID,
289 PCI_CLASS_STORAGE_RAID<<8, 0xffff00, GENERIC },
291 { } /* terminate list */
294 static struct pci_driver nv_pci_driver = {
295 .name = DRV_NAME,
296 .id_table = nv_pci_tbl,
297 .probe = nv_init_one,
298 .suspend = ata_pci_device_suspend,
299 .resume = nv_pci_device_resume,
300 .remove = nv_remove_one,
303 static struct scsi_host_template nv_sht = {
304 .module = THIS_MODULE,
305 .name = DRV_NAME,
306 .ioctl = ata_scsi_ioctl,
307 .queuecommand = ata_scsi_queuecmd,
308 .can_queue = ATA_DEF_QUEUE,
309 .this_id = ATA_SHT_THIS_ID,
310 .sg_tablesize = LIBATA_MAX_PRD,
311 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
312 .emulated = ATA_SHT_EMULATED,
313 .use_clustering = ATA_SHT_USE_CLUSTERING,
314 .proc_name = DRV_NAME,
315 .dma_boundary = ATA_DMA_BOUNDARY,
316 .slave_configure = ata_scsi_slave_config,
317 .slave_destroy = ata_scsi_slave_destroy,
318 .bios_param = ata_std_bios_param,
319 .suspend = ata_scsi_device_suspend,
320 .resume = ata_scsi_device_resume,
323 static struct scsi_host_template nv_adma_sht = {
324 .module = THIS_MODULE,
325 .name = DRV_NAME,
326 .ioctl = ata_scsi_ioctl,
327 .queuecommand = ata_scsi_queuecmd,
328 .can_queue = NV_ADMA_MAX_CPBS,
329 .this_id = ATA_SHT_THIS_ID,
330 .sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN,
331 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
332 .emulated = ATA_SHT_EMULATED,
333 .use_clustering = ATA_SHT_USE_CLUSTERING,
334 .proc_name = DRV_NAME,
335 .dma_boundary = NV_ADMA_DMA_BOUNDARY,
336 .slave_configure = nv_adma_slave_config,
337 .slave_destroy = ata_scsi_slave_destroy,
338 .bios_param = ata_std_bios_param,
339 .suspend = ata_scsi_device_suspend,
340 .resume = ata_scsi_device_resume,
343 static const struct ata_port_operations nv_generic_ops = {
344 .port_disable = ata_port_disable,
345 .tf_load = ata_tf_load,
346 .tf_read = ata_tf_read,
347 .exec_command = ata_exec_command,
348 .check_status = ata_check_status,
349 .dev_select = ata_std_dev_select,
350 .bmdma_setup = ata_bmdma_setup,
351 .bmdma_start = ata_bmdma_start,
352 .bmdma_stop = ata_bmdma_stop,
353 .bmdma_status = ata_bmdma_status,
354 .qc_prep = ata_qc_prep,
355 .qc_issue = ata_qc_issue_prot,
356 .freeze = ata_bmdma_freeze,
357 .thaw = ata_bmdma_thaw,
358 .error_handler = nv_error_handler,
359 .post_internal_cmd = ata_bmdma_post_internal_cmd,
360 .data_xfer = ata_data_xfer,
361 .irq_handler = nv_generic_interrupt,
362 .irq_clear = ata_bmdma_irq_clear,
363 .irq_on = ata_irq_on,
364 .irq_ack = ata_irq_ack,
365 .scr_read = nv_scr_read,
366 .scr_write = nv_scr_write,
367 .port_start = ata_port_start,
370 static const struct ata_port_operations nv_nf2_ops = {
371 .port_disable = ata_port_disable,
372 .tf_load = ata_tf_load,
373 .tf_read = ata_tf_read,
374 .exec_command = ata_exec_command,
375 .check_status = ata_check_status,
376 .dev_select = ata_std_dev_select,
377 .bmdma_setup = ata_bmdma_setup,
378 .bmdma_start = ata_bmdma_start,
379 .bmdma_stop = ata_bmdma_stop,
380 .bmdma_status = ata_bmdma_status,
381 .qc_prep = ata_qc_prep,
382 .qc_issue = ata_qc_issue_prot,
383 .freeze = nv_nf2_freeze,
384 .thaw = nv_nf2_thaw,
385 .error_handler = nv_error_handler,
386 .post_internal_cmd = ata_bmdma_post_internal_cmd,
387 .data_xfer = ata_data_xfer,
388 .irq_handler = nv_nf2_interrupt,
389 .irq_clear = ata_bmdma_irq_clear,
390 .irq_on = ata_irq_on,
391 .irq_ack = ata_irq_ack,
392 .scr_read = nv_scr_read,
393 .scr_write = nv_scr_write,
394 .port_start = ata_port_start,
397 static const struct ata_port_operations nv_ck804_ops = {
398 .port_disable = ata_port_disable,
399 .tf_load = ata_tf_load,
400 .tf_read = ata_tf_read,
401 .exec_command = ata_exec_command,
402 .check_status = ata_check_status,
403 .dev_select = ata_std_dev_select,
404 .bmdma_setup = ata_bmdma_setup,
405 .bmdma_start = ata_bmdma_start,
406 .bmdma_stop = ata_bmdma_stop,
407 .bmdma_status = ata_bmdma_status,
408 .qc_prep = ata_qc_prep,
409 .qc_issue = ata_qc_issue_prot,
410 .freeze = nv_ck804_freeze,
411 .thaw = nv_ck804_thaw,
412 .error_handler = nv_error_handler,
413 .post_internal_cmd = ata_bmdma_post_internal_cmd,
414 .data_xfer = ata_data_xfer,
415 .irq_handler = nv_ck804_interrupt,
416 .irq_clear = ata_bmdma_irq_clear,
417 .irq_on = ata_irq_on,
418 .irq_ack = ata_irq_ack,
419 .scr_read = nv_scr_read,
420 .scr_write = nv_scr_write,
421 .port_start = ata_port_start,
422 .host_stop = nv_ck804_host_stop,
425 static const struct ata_port_operations nv_adma_ops = {
426 .port_disable = ata_port_disable,
427 .tf_load = ata_tf_load,
428 .tf_read = ata_tf_read,
429 .check_atapi_dma = nv_adma_check_atapi_dma,
430 .exec_command = ata_exec_command,
431 .check_status = ata_check_status,
432 .dev_select = ata_std_dev_select,
433 .bmdma_setup = ata_bmdma_setup,
434 .bmdma_start = ata_bmdma_start,
435 .bmdma_stop = ata_bmdma_stop,
436 .bmdma_status = ata_bmdma_status,
437 .qc_prep = nv_adma_qc_prep,
438 .qc_issue = nv_adma_qc_issue,
439 .freeze = nv_ck804_freeze,
440 .thaw = nv_ck804_thaw,
441 .error_handler = nv_adma_error_handler,
442 .post_internal_cmd = nv_adma_post_internal_cmd,
443 .data_xfer = ata_data_xfer,
444 .irq_handler = nv_adma_interrupt,
445 .irq_clear = nv_adma_irq_clear,
446 .irq_on = ata_irq_on,
447 .irq_ack = ata_irq_ack,
448 .scr_read = nv_scr_read,
449 .scr_write = nv_scr_write,
450 .port_start = nv_adma_port_start,
451 .port_stop = nv_adma_port_stop,
452 .port_suspend = nv_adma_port_suspend,
453 .port_resume = nv_adma_port_resume,
454 .host_stop = nv_adma_host_stop,
457 static struct ata_port_info nv_port_info[] = {
458 /* generic */
460 .sht = &nv_sht,
461 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
462 ATA_FLAG_HRST_TO_RESUME,
463 .pio_mask = NV_PIO_MASK,
464 .mwdma_mask = NV_MWDMA_MASK,
465 .udma_mask = NV_UDMA_MASK,
466 .port_ops = &nv_generic_ops,
468 /* nforce2/3 */
470 .sht = &nv_sht,
471 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
472 ATA_FLAG_HRST_TO_RESUME,
473 .pio_mask = NV_PIO_MASK,
474 .mwdma_mask = NV_MWDMA_MASK,
475 .udma_mask = NV_UDMA_MASK,
476 .port_ops = &nv_nf2_ops,
478 /* ck804 */
480 .sht = &nv_sht,
481 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
482 ATA_FLAG_HRST_TO_RESUME,
483 .pio_mask = NV_PIO_MASK,
484 .mwdma_mask = NV_MWDMA_MASK,
485 .udma_mask = NV_UDMA_MASK,
486 .port_ops = &nv_ck804_ops,
488 /* ADMA */
490 .sht = &nv_adma_sht,
491 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
492 ATA_FLAG_HRST_TO_RESUME |
493 ATA_FLAG_MMIO | ATA_FLAG_NCQ,
494 .pio_mask = NV_PIO_MASK,
495 .mwdma_mask = NV_MWDMA_MASK,
496 .udma_mask = NV_UDMA_MASK,
497 .port_ops = &nv_adma_ops,
501 MODULE_AUTHOR("NVIDIA");
502 MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
503 MODULE_LICENSE("GPL");
504 MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
505 MODULE_VERSION(DRV_VERSION);
507 static int adma_enabled = 1;
509 static void nv_adma_register_mode(struct ata_port *ap)
511 struct nv_adma_port_priv *pp = ap->private_data;
512 void __iomem *mmio = pp->ctl_block;
513 u16 tmp, status;
514 int count = 0;
516 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
517 return;
519 status = readw(mmio + NV_ADMA_STAT);
520 while(!(status & NV_ADMA_STAT_IDLE) && count < 20) {
521 ndelay(50);
522 status = readw(mmio + NV_ADMA_STAT);
523 count++;
525 if(count == 20)
526 ata_port_printk(ap, KERN_WARNING,
527 "timeout waiting for ADMA IDLE, stat=0x%hx\n",
528 status);
530 tmp = readw(mmio + NV_ADMA_CTL);
531 writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
533 count = 0;
534 status = readw(mmio + NV_ADMA_STAT);
535 while(!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
536 ndelay(50);
537 status = readw(mmio + NV_ADMA_STAT);
538 count++;
540 if(count == 20)
541 ata_port_printk(ap, KERN_WARNING,
542 "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
543 status);
545 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
548 static void nv_adma_mode(struct ata_port *ap)
550 struct nv_adma_port_priv *pp = ap->private_data;
551 void __iomem *mmio = pp->ctl_block;
552 u16 tmp, status;
553 int count = 0;
555 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
556 return;
558 WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
560 tmp = readw(mmio + NV_ADMA_CTL);
561 writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
563 status = readw(mmio + NV_ADMA_STAT);
564 while(((status & NV_ADMA_STAT_LEGACY) ||
565 !(status & NV_ADMA_STAT_IDLE)) && count < 20) {
566 ndelay(50);
567 status = readw(mmio + NV_ADMA_STAT);
568 count++;
570 if(count == 20)
571 ata_port_printk(ap, KERN_WARNING,
572 "timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
573 status);
575 pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
578 static int nv_adma_slave_config(struct scsi_device *sdev)
580 struct ata_port *ap = ata_shost_to_port(sdev->host);
581 struct nv_adma_port_priv *pp = ap->private_data;
582 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
583 u64 bounce_limit;
584 unsigned long segment_boundary;
585 unsigned short sg_tablesize;
586 int rc;
587 int adma_enable;
588 u32 current_reg, new_reg, config_mask;
590 rc = ata_scsi_slave_config(sdev);
592 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
593 /* Not a proper libata device, ignore */
594 return rc;
596 if (ap->device[sdev->id].class == ATA_DEV_ATAPI) {
598 * NVIDIA reports that ADMA mode does not support ATAPI commands.
599 * Therefore ATAPI commands are sent through the legacy interface.
600 * However, the legacy interface only supports 32-bit DMA.
601 * Restrict DMA parameters as required by the legacy interface
602 * when an ATAPI device is connected.
604 bounce_limit = ATA_DMA_MASK;
605 segment_boundary = ATA_DMA_BOUNDARY;
606 /* Subtract 1 since an extra entry may be needed for padding, see
607 libata-scsi.c */
608 sg_tablesize = LIBATA_MAX_PRD - 1;
610 /* Since the legacy DMA engine is in use, we need to disable ADMA
611 on the port. */
612 adma_enable = 0;
613 nv_adma_register_mode(ap);
615 else {
616 bounce_limit = *ap->dev->dma_mask;
617 segment_boundary = NV_ADMA_DMA_BOUNDARY;
618 sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
619 adma_enable = 1;
622 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg);
624 if(ap->port_no == 1)
625 config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
626 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
627 else
628 config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
629 NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
631 if(adma_enable) {
632 new_reg = current_reg | config_mask;
633 pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
635 else {
636 new_reg = current_reg & ~config_mask;
637 pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
640 if(current_reg != new_reg)
641 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
643 blk_queue_bounce_limit(sdev->request_queue, bounce_limit);
644 blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
645 blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize);
646 ata_port_printk(ap, KERN_INFO,
647 "bounce limit 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
648 (unsigned long long)bounce_limit, segment_boundary, sg_tablesize);
649 return rc;
652 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
654 struct nv_adma_port_priv *pp = qc->ap->private_data;
655 return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
658 static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
660 unsigned int idx = 0;
662 if(tf->flags & ATA_TFLAG_ISADDR) {
663 if (tf->flags & ATA_TFLAG_LBA48) {
664 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->hob_feature | WNB);
665 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
666 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->hob_lbal);
667 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->hob_lbam);
668 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->hob_lbah);
669 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature);
670 } else
671 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature | WNB);
673 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->nsect);
674 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->lbal);
675 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->lbam);
676 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->lbah);
679 if(tf->flags & ATA_TFLAG_DEVICE)
680 cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device);
682 cpb[idx++] = cpu_to_le16((ATA_REG_CMD << 8) | tf->command | CMDEND);
684 while(idx < 12)
685 cpb[idx++] = cpu_to_le16(IGN);
687 return idx;
690 static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
692 struct nv_adma_port_priv *pp = ap->private_data;
693 u8 flags = pp->cpb[cpb_num].resp_flags;
695 VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
697 if (unlikely((force_err ||
698 flags & (NV_CPB_RESP_ATA_ERR |
699 NV_CPB_RESP_CMD_ERR |
700 NV_CPB_RESP_CPB_ERR)))) {
701 struct ata_eh_info *ehi = &ap->eh_info;
702 int freeze = 0;
704 ata_ehi_clear_desc(ehi);
705 ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x", flags );
706 if (flags & NV_CPB_RESP_ATA_ERR) {
707 ata_ehi_push_desc(ehi, ": ATA error");
708 ehi->err_mask |= AC_ERR_DEV;
709 } else if (flags & NV_CPB_RESP_CMD_ERR) {
710 ata_ehi_push_desc(ehi, ": CMD error");
711 ehi->err_mask |= AC_ERR_DEV;
712 } else if (flags & NV_CPB_RESP_CPB_ERR) {
713 ata_ehi_push_desc(ehi, ": CPB error");
714 ehi->err_mask |= AC_ERR_SYSTEM;
715 freeze = 1;
716 } else {
717 /* notifier error, but no error in CPB flags? */
718 ehi->err_mask |= AC_ERR_OTHER;
719 freeze = 1;
721 /* Kill all commands. EH will determine what actually failed. */
722 if (freeze)
723 ata_port_freeze(ap);
724 else
725 ata_port_abort(ap);
726 return 1;
729 if (flags & NV_CPB_RESP_DONE) {
730 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num);
731 VPRINTK("CPB flags done, flags=0x%x\n", flags);
732 if (likely(qc)) {
733 /* Grab the ATA port status for non-NCQ commands.
734 For NCQ commands the current status may have nothing to do with
735 the command just completed. */
736 if (qc->tf.protocol != ATA_PROT_NCQ) {
737 u8 ata_status = readb(pp->ctl_block + (ATA_REG_STATUS * 4));
738 qc->err_mask |= ac_err_mask(ata_status);
740 DPRINTK("Completing qc from tag %d with err_mask %u\n",cpb_num,
741 qc->err_mask);
742 ata_qc_complete(qc);
745 return 0;
748 static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
750 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
752 /* freeze if hotplugged */
753 if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
754 ata_port_freeze(ap);
755 return 1;
758 /* bail out if not our interrupt */
759 if (!(irq_stat & NV_INT_DEV))
760 return 0;
762 /* DEV interrupt w/ no active qc? */
763 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
764 ata_check_status(ap);
765 return 1;
768 /* handle interrupt */
769 return ata_host_intr(ap, qc);
772 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
774 struct ata_host *host = dev_instance;
775 int i, handled = 0;
776 u32 notifier_clears[2];
778 spin_lock(&host->lock);
780 for (i = 0; i < host->n_ports; i++) {
781 struct ata_port *ap = host->ports[i];
782 notifier_clears[i] = 0;
784 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
785 struct nv_adma_port_priv *pp = ap->private_data;
786 void __iomem *mmio = pp->ctl_block;
787 u16 status;
788 u32 gen_ctl;
789 u32 notifier, notifier_error;
791 /* if in ATA register mode, use standard ata interrupt handler */
792 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
793 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
794 >> (NV_INT_PORT_SHIFT * i);
795 if(ata_tag_valid(ap->active_tag))
796 /** NV_INT_DEV indication seems unreliable at times
797 at least in ADMA mode. Force it on always when a
798 command is active, to prevent losing interrupts. */
799 irq_stat |= NV_INT_DEV;
800 handled += nv_host_intr(ap, irq_stat);
801 continue;
804 notifier = readl(mmio + NV_ADMA_NOTIFIER);
805 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
806 notifier_clears[i] = notifier | notifier_error;
808 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
810 if( !NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
811 !notifier_error)
812 /* Nothing to do */
813 continue;
815 status = readw(mmio + NV_ADMA_STAT);
817 /* Clear status. Ensure the controller sees the clearing before we start
818 looking at any of the CPB statuses, so that any CPB completions after
819 this point in the handler will raise another interrupt. */
820 writew(status, mmio + NV_ADMA_STAT);
821 readw(mmio + NV_ADMA_STAT); /* flush posted write */
822 rmb();
824 handled++; /* irq handled if we got here */
826 /* freeze if hotplugged or controller error */
827 if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
828 NV_ADMA_STAT_HOTUNPLUG |
829 NV_ADMA_STAT_TIMEOUT |
830 NV_ADMA_STAT_SERROR))) {
831 struct ata_eh_info *ehi = &ap->eh_info;
833 ata_ehi_clear_desc(ehi);
834 ata_ehi_push_desc(ehi, "ADMA status 0x%08x", status );
835 if (status & NV_ADMA_STAT_TIMEOUT) {
836 ehi->err_mask |= AC_ERR_SYSTEM;
837 ata_ehi_push_desc(ehi, ": timeout");
838 } else if (status & NV_ADMA_STAT_HOTPLUG) {
839 ata_ehi_hotplugged(ehi);
840 ata_ehi_push_desc(ehi, ": hotplug");
841 } else if (status & NV_ADMA_STAT_HOTUNPLUG) {
842 ata_ehi_hotplugged(ehi);
843 ata_ehi_push_desc(ehi, ": hot unplug");
844 } else if (status & NV_ADMA_STAT_SERROR) {
845 /* let libata analyze SError and figure out the cause */
846 ata_ehi_push_desc(ehi, ": SError");
848 ata_port_freeze(ap);
849 continue;
852 if (status & (NV_ADMA_STAT_DONE |
853 NV_ADMA_STAT_CPBERR)) {
854 u32 check_commands = notifier | notifier_error;
855 int pos, error = 0;
856 /** Check CPBs for completed commands */
857 while ((pos = ffs(check_commands)) && !error) {
858 pos--;
859 error = nv_adma_check_cpb(ap, pos,
860 notifier_error & (1 << pos) );
861 check_commands &= ~(1 << pos );
867 if(notifier_clears[0] || notifier_clears[1]) {
868 /* Note: Both notifier clear registers must be written
869 if either is set, even if one is zero, according to NVIDIA. */
870 struct nv_adma_port_priv *pp = host->ports[0]->private_data;
871 writel(notifier_clears[0], pp->notifier_clear_block);
872 pp = host->ports[1]->private_data;
873 writel(notifier_clears[1], pp->notifier_clear_block);
876 spin_unlock(&host->lock);
878 return IRQ_RETVAL(handled);
881 static void nv_adma_irq_clear(struct ata_port *ap)
883 struct nv_adma_port_priv *pp = ap->private_data;
884 void __iomem *mmio = pp->ctl_block;
885 u16 status = readw(mmio + NV_ADMA_STAT);
886 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
887 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
888 void __iomem *dma_stat_addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS;
890 /* clear ADMA status */
891 writew(status, mmio + NV_ADMA_STAT);
892 writel(notifier | notifier_error,
893 pp->notifier_clear_block);
895 /** clear legacy status */
896 iowrite8(ioread8(dma_stat_addr), dma_stat_addr);
899 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
901 struct nv_adma_port_priv *pp = qc->ap->private_data;
903 if(pp->flags & NV_ADMA_PORT_REGISTER_MODE)
904 ata_bmdma_post_internal_cmd(qc);
907 static int nv_adma_port_start(struct ata_port *ap)
909 struct device *dev = ap->host->dev;
910 struct nv_adma_port_priv *pp;
911 int rc;
912 void *mem;
913 dma_addr_t mem_dma;
914 void __iomem *mmio;
915 u16 tmp;
917 VPRINTK("ENTER\n");
919 rc = ata_port_start(ap);
920 if (rc)
921 return rc;
923 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
924 if (!pp)
925 return -ENOMEM;
927 mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
928 ap->port_no * NV_ADMA_PORT_SIZE;
929 pp->ctl_block = mmio;
930 pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
931 pp->notifier_clear_block = pp->gen_block +
932 NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
934 mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
935 &mem_dma, GFP_KERNEL);
936 if (!mem)
937 return -ENOMEM;
938 memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
941 * First item in chunk of DMA memory:
942 * 128-byte command parameter block (CPB)
943 * one for each command tag
945 pp->cpb = mem;
946 pp->cpb_dma = mem_dma;
948 writel(mem_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
949 writel((mem_dma >> 16 ) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
951 mem += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
952 mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
955 * Second item: block of ADMA_SGTBL_LEN s/g entries
957 pp->aprd = mem;
958 pp->aprd_dma = mem_dma;
960 ap->private_data = pp;
962 /* clear any outstanding interrupt conditions */
963 writew(0xffff, mmio + NV_ADMA_STAT);
965 /* initialize port variables */
966 pp->flags = NV_ADMA_PORT_REGISTER_MODE;
968 /* clear CPB fetch count */
969 writew(0, mmio + NV_ADMA_CPB_COUNT);
971 /* clear GO for register mode, enable interrupt */
972 tmp = readw(mmio + NV_ADMA_CTL);
973 writew( (tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
974 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
976 tmp = readw(mmio + NV_ADMA_CTL);
977 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
978 readw( mmio + NV_ADMA_CTL ); /* flush posted write */
979 udelay(1);
980 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
981 readw( mmio + NV_ADMA_CTL ); /* flush posted write */
983 return 0;
986 static void nv_adma_port_stop(struct ata_port *ap)
988 struct nv_adma_port_priv *pp = ap->private_data;
989 void __iomem *mmio = pp->ctl_block;
991 VPRINTK("ENTER\n");
992 writew(0, mmio + NV_ADMA_CTL);
995 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
997 struct nv_adma_port_priv *pp = ap->private_data;
998 void __iomem *mmio = pp->ctl_block;
1000 /* Go to register mode - clears GO */
1001 nv_adma_register_mode(ap);
1003 /* clear CPB fetch count */
1004 writew(0, mmio + NV_ADMA_CPB_COUNT);
1006 /* disable interrupt, shut down port */
1007 writew(0, mmio + NV_ADMA_CTL);
1009 return 0;
1012 static int nv_adma_port_resume(struct ata_port *ap)
1014 struct nv_adma_port_priv *pp = ap->private_data;
1015 void __iomem *mmio = pp->ctl_block;
1016 u16 tmp;
1018 /* set CPB block location */
1019 writel(pp->cpb_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
1020 writel((pp->cpb_dma >> 16 ) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
1022 /* clear any outstanding interrupt conditions */
1023 writew(0xffff, mmio + NV_ADMA_STAT);
1025 /* initialize port variables */
1026 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
1028 /* clear CPB fetch count */
1029 writew(0, mmio + NV_ADMA_CPB_COUNT);
1031 /* clear GO for register mode, enable interrupt */
1032 tmp = readw(mmio + NV_ADMA_CTL);
1033 writew( (tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1034 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1036 tmp = readw(mmio + NV_ADMA_CTL);
1037 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1038 readw( mmio + NV_ADMA_CTL ); /* flush posted write */
1039 udelay(1);
1040 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1041 readw( mmio + NV_ADMA_CTL ); /* flush posted write */
1043 return 0;
1046 static void nv_adma_setup_port(struct ata_probe_ent *probe_ent, unsigned int port)
1048 void __iomem *mmio = probe_ent->iomap[NV_MMIO_BAR];
1049 struct ata_ioports *ioport = &probe_ent->port[port];
1051 VPRINTK("ENTER\n");
1053 mmio += NV_ADMA_PORT + port * NV_ADMA_PORT_SIZE;
1055 ioport->cmd_addr = mmio;
1056 ioport->data_addr = mmio + (ATA_REG_DATA * 4);
1057 ioport->error_addr =
1058 ioport->feature_addr = mmio + (ATA_REG_ERR * 4);
1059 ioport->nsect_addr = mmio + (ATA_REG_NSECT * 4);
1060 ioport->lbal_addr = mmio + (ATA_REG_LBAL * 4);
1061 ioport->lbam_addr = mmio + (ATA_REG_LBAM * 4);
1062 ioport->lbah_addr = mmio + (ATA_REG_LBAH * 4);
1063 ioport->device_addr = mmio + (ATA_REG_DEVICE * 4);
1064 ioport->status_addr =
1065 ioport->command_addr = mmio + (ATA_REG_STATUS * 4);
1066 ioport->altstatus_addr =
1067 ioport->ctl_addr = mmio + 0x20;
1070 static int nv_adma_host_init(struct ata_probe_ent *probe_ent)
1072 struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
1073 unsigned int i;
1074 u32 tmp32;
1076 VPRINTK("ENTER\n");
1078 /* enable ADMA on the ports */
1079 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1080 tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1081 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1082 NV_MCP_SATA_CFG_20_PORT1_EN |
1083 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1085 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1087 for (i = 0; i < probe_ent->n_ports; i++)
1088 nv_adma_setup_port(probe_ent, i);
1090 return 0;
1093 static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1094 struct scatterlist *sg,
1095 int idx,
1096 struct nv_adma_prd *aprd)
1098 u8 flags = 0;
1099 if (qc->tf.flags & ATA_TFLAG_WRITE)
1100 flags |= NV_APRD_WRITE;
1101 if (idx == qc->n_elem - 1)
1102 flags |= NV_APRD_END;
1103 else if (idx != 4)
1104 flags |= NV_APRD_CONT;
1106 aprd->addr = cpu_to_le64(((u64)sg_dma_address(sg)));
1107 aprd->len = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
1108 aprd->flags = flags;
1109 aprd->packet_len = 0;
1112 static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1114 struct nv_adma_port_priv *pp = qc->ap->private_data;
1115 unsigned int idx;
1116 struct nv_adma_prd *aprd;
1117 struct scatterlist *sg;
1119 VPRINTK("ENTER\n");
1121 idx = 0;
1123 ata_for_each_sg(sg, qc) {
1124 aprd = (idx < 5) ? &cpb->aprd[idx] : &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (idx-5)];
1125 nv_adma_fill_aprd(qc, sg, idx, aprd);
1126 idx++;
1128 if (idx > 5)
1129 cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));
1130 else
1131 cpb->next_aprd = cpu_to_le64(0);
1134 static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
1136 struct nv_adma_port_priv *pp = qc->ap->private_data;
1138 /* ADMA engine can only be used for non-ATAPI DMA commands,
1139 or interrupt-driven no-data commands. */
1140 if((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
1141 (qc->tf.flags & ATA_TFLAG_POLLING))
1142 return 1;
1144 if((qc->flags & ATA_QCFLAG_DMAMAP) ||
1145 (qc->tf.protocol == ATA_PROT_NODATA))
1146 return 0;
1148 return 1;
1151 static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1153 struct nv_adma_port_priv *pp = qc->ap->private_data;
1154 struct nv_adma_cpb *cpb = &pp->cpb[qc->tag];
1155 u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
1156 NV_CPB_CTL_IEN;
1158 if (nv_adma_use_reg_mode(qc)) {
1159 nv_adma_register_mode(qc->ap);
1160 ata_qc_prep(qc);
1161 return;
1164 cpb->resp_flags = NV_CPB_RESP_DONE;
1165 wmb();
1166 cpb->ctl_flags = 0;
1167 wmb();
1169 cpb->len = 3;
1170 cpb->tag = qc->tag;
1171 cpb->next_cpb_idx = 0;
1173 /* turn on NCQ flags for NCQ commands */
1174 if (qc->tf.protocol == ATA_PROT_NCQ)
1175 ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1177 VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1179 nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1181 if(qc->flags & ATA_QCFLAG_DMAMAP) {
1182 nv_adma_fill_sg(qc, cpb);
1183 ctl_flags |= NV_CPB_CTL_APRD_VALID;
1184 } else
1185 memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
1187 /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID until we are
1188 finished filling in all of the contents */
1189 wmb();
1190 cpb->ctl_flags = ctl_flags;
1191 wmb();
1192 cpb->resp_flags = 0;
1195 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1197 struct nv_adma_port_priv *pp = qc->ap->private_data;
1198 void __iomem *mmio = pp->ctl_block;
1199 int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ);
1201 VPRINTK("ENTER\n");
1203 if (nv_adma_use_reg_mode(qc)) {
1204 /* use ATA register mode */
1205 VPRINTK("using ATA register mode: 0x%lx\n", qc->flags);
1206 nv_adma_register_mode(qc->ap);
1207 return ata_qc_issue_prot(qc);
1208 } else
1209 nv_adma_mode(qc->ap);
1211 /* write append register, command tag in lower 8 bits
1212 and (number of cpbs to append -1) in top 8 bits */
1213 wmb();
1215 if(curr_ncq != pp->last_issue_ncq) {
1216 /* Seems to need some delay before switching between NCQ and non-NCQ
1217 commands, else we get command timeouts and such. */
1218 udelay(20);
1219 pp->last_issue_ncq = curr_ncq;
1222 writew(qc->tag, mmio + NV_ADMA_APPEND);
1224 DPRINTK("Issued tag %u\n",qc->tag);
1226 return 0;
1229 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1231 struct ata_host *host = dev_instance;
1232 unsigned int i;
1233 unsigned int handled = 0;
1234 unsigned long flags;
1236 spin_lock_irqsave(&host->lock, flags);
1238 for (i = 0; i < host->n_ports; i++) {
1239 struct ata_port *ap;
1241 ap = host->ports[i];
1242 if (ap &&
1243 !(ap->flags & ATA_FLAG_DISABLED)) {
1244 struct ata_queued_cmd *qc;
1246 qc = ata_qc_from_tag(ap, ap->active_tag);
1247 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
1248 handled += ata_host_intr(ap, qc);
1249 else
1250 // No request pending? Clear interrupt status
1251 // anyway, in case there's one pending.
1252 ap->ops->check_status(ap);
1257 spin_unlock_irqrestore(&host->lock, flags);
1259 return IRQ_RETVAL(handled);
1262 static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
1264 int i, handled = 0;
1266 for (i = 0; i < host->n_ports; i++) {
1267 struct ata_port *ap = host->ports[i];
1269 if (ap && !(ap->flags & ATA_FLAG_DISABLED))
1270 handled += nv_host_intr(ap, irq_stat);
1272 irq_stat >>= NV_INT_PORT_SHIFT;
1275 return IRQ_RETVAL(handled);
1278 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
1280 struct ata_host *host = dev_instance;
1281 u8 irq_stat;
1282 irqreturn_t ret;
1284 spin_lock(&host->lock);
1285 irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
1286 ret = nv_do_interrupt(host, irq_stat);
1287 spin_unlock(&host->lock);
1289 return ret;
1292 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
1294 struct ata_host *host = dev_instance;
1295 u8 irq_stat;
1296 irqreturn_t ret;
1298 spin_lock(&host->lock);
1299 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1300 ret = nv_do_interrupt(host, irq_stat);
1301 spin_unlock(&host->lock);
1303 return ret;
1306 static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg)
1308 if (sc_reg > SCR_CONTROL)
1309 return 0xffffffffU;
1311 return ioread32(ap->ioaddr.scr_addr + (sc_reg * 4));
1314 static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
1316 if (sc_reg > SCR_CONTROL)
1317 return;
1319 iowrite32(val, ap->ioaddr.scr_addr + (sc_reg * 4));
1322 static void nv_nf2_freeze(struct ata_port *ap)
1324 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1325 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1326 u8 mask;
1328 mask = ioread8(scr_addr + NV_INT_ENABLE);
1329 mask &= ~(NV_INT_ALL << shift);
1330 iowrite8(mask, scr_addr + NV_INT_ENABLE);
1333 static void nv_nf2_thaw(struct ata_port *ap)
1335 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1336 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1337 u8 mask;
1339 iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
1341 mask = ioread8(scr_addr + NV_INT_ENABLE);
1342 mask |= (NV_INT_MASK << shift);
1343 iowrite8(mask, scr_addr + NV_INT_ENABLE);
1346 static void nv_ck804_freeze(struct ata_port *ap)
1348 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1349 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1350 u8 mask;
1352 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1353 mask &= ~(NV_INT_ALL << shift);
1354 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1357 static void nv_ck804_thaw(struct ata_port *ap)
1359 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1360 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1361 u8 mask;
1363 writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1365 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1366 mask |= (NV_INT_MASK << shift);
1367 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1370 static int nv_hardreset(struct ata_port *ap, unsigned int *class)
1372 unsigned int dummy;
1374 /* SATA hardreset fails to retrieve proper device signature on
1375 * some controllers. Don't classify on hardreset. For more
1376 * info, see http://bugme.osdl.org/show_bug.cgi?id=3352
1378 return sata_std_hardreset(ap, &dummy);
1381 static void nv_error_handler(struct ata_port *ap)
1383 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1384 nv_hardreset, ata_std_postreset);
1387 static void nv_adma_error_handler(struct ata_port *ap)
1389 struct nv_adma_port_priv *pp = ap->private_data;
1390 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
1391 void __iomem *mmio = pp->ctl_block;
1392 int i;
1393 u16 tmp;
1395 if(ata_tag_valid(ap->active_tag) || ap->sactive) {
1396 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1397 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1398 u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
1399 u32 status = readw(mmio + NV_ADMA_STAT);
1400 u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
1401 u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
1403 ata_port_printk(ap, KERN_ERR, "EH in ADMA mode, notifier 0x%X "
1404 "notifier_error 0x%X gen_ctl 0x%X status 0x%X "
1405 "next cpb count 0x%X next cpb idx 0x%x\n",
1406 notifier, notifier_error, gen_ctl, status,
1407 cpb_count, next_cpb_idx);
1409 for( i=0;i<NV_ADMA_MAX_CPBS;i++) {
1410 struct nv_adma_cpb *cpb = &pp->cpb[i];
1411 if( (ata_tag_valid(ap->active_tag) && i == ap->active_tag) ||
1412 ap->sactive & (1 << i) )
1413 ata_port_printk(ap, KERN_ERR,
1414 "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1415 i, cpb->ctl_flags, cpb->resp_flags);
1419 /* Push us back into port register mode for error handling. */
1420 nv_adma_register_mode(ap);
1422 /* Mark all of the CPBs as invalid to prevent them from being executed */
1423 for( i=0;i<NV_ADMA_MAX_CPBS;i++)
1424 pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1426 /* clear CPB fetch count */
1427 writew(0, mmio + NV_ADMA_CPB_COUNT);
1429 /* Reset channel */
1430 tmp = readw(mmio + NV_ADMA_CTL);
1431 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1432 readw( mmio + NV_ADMA_CTL ); /* flush posted write */
1433 udelay(1);
1434 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1435 readw( mmio + NV_ADMA_CTL ); /* flush posted write */
1438 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1439 nv_hardreset, ata_std_postreset);
1442 static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1444 static int printed_version = 0;
1445 struct ata_port_info *ppi[2];
1446 struct ata_probe_ent *probe_ent;
1447 struct nv_host_priv *hpriv;
1448 int rc;
1449 u32 bar;
1450 void __iomem *base;
1451 unsigned long type = ent->driver_data;
1452 int mask_set = 0;
1454 // Make sure this is a SATA controller by counting the number of bars
1455 // (NVIDIA SATA controllers will always have six bars). Otherwise,
1456 // it's an IDE controller and we ignore it.
1457 for (bar=0; bar<6; bar++)
1458 if (pci_resource_start(pdev, bar) == 0)
1459 return -ENODEV;
1461 if (!printed_version++)
1462 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
1464 rc = pcim_enable_device(pdev);
1465 if (rc)
1466 return rc;
1468 rc = pci_request_regions(pdev, DRV_NAME);
1469 if (rc) {
1470 pcim_pin_device(pdev);
1471 return rc;
1474 if(type >= CK804 && adma_enabled) {
1475 dev_printk(KERN_NOTICE, &pdev->dev, "Using ADMA mode\n");
1476 type = ADMA;
1477 if(!pci_set_dma_mask(pdev, DMA_64BIT_MASK) &&
1478 !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))
1479 mask_set = 1;
1482 if(!mask_set) {
1483 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
1484 if (rc)
1485 return rc;
1486 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
1487 if (rc)
1488 return rc;
1491 rc = -ENOMEM;
1493 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
1494 if (!hpriv)
1495 return -ENOMEM;
1497 ppi[0] = ppi[1] = &nv_port_info[type];
1498 probe_ent = ata_pci_init_native_mode(pdev, ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
1499 if (!probe_ent)
1500 return -ENOMEM;
1502 if (!pcim_iomap(pdev, NV_MMIO_BAR, 0))
1503 return -EIO;
1504 probe_ent->iomap = pcim_iomap_table(pdev);
1506 probe_ent->private_data = hpriv;
1507 hpriv->type = type;
1509 base = probe_ent->iomap[NV_MMIO_BAR];
1510 probe_ent->port[0].scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
1511 probe_ent->port[1].scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
1513 /* enable SATA space for CK804 */
1514 if (type >= CK804) {
1515 u8 regval;
1517 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
1518 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1519 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1522 pci_set_master(pdev);
1524 if (type == ADMA) {
1525 rc = nv_adma_host_init(probe_ent);
1526 if (rc)
1527 return rc;
1530 rc = ata_device_add(probe_ent);
1531 if (rc != NV_PORTS)
1532 return -ENODEV;
1534 devm_kfree(&pdev->dev, probe_ent);
1535 return 0;
1538 static void nv_remove_one (struct pci_dev *pdev)
1540 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1541 struct nv_host_priv *hpriv = host->private_data;
1543 ata_pci_remove_one(pdev);
1544 kfree(hpriv);
1547 static int nv_pci_device_resume(struct pci_dev *pdev)
1549 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1550 struct nv_host_priv *hpriv = host->private_data;
1551 int rc;
1553 rc = ata_pci_device_do_resume(pdev);
1554 if(rc)
1555 return rc;
1557 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
1558 if(hpriv->type >= CK804) {
1559 u8 regval;
1561 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
1562 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1563 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1565 if(hpriv->type == ADMA) {
1566 u32 tmp32;
1567 struct nv_adma_port_priv *pp;
1568 /* enable/disable ADMA on the ports appropriately */
1569 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1571 pp = host->ports[0]->private_data;
1572 if(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1573 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
1574 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
1575 else
1576 tmp32 |= (NV_MCP_SATA_CFG_20_PORT0_EN |
1577 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
1578 pp = host->ports[1]->private_data;
1579 if(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1580 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
1581 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
1582 else
1583 tmp32 |= (NV_MCP_SATA_CFG_20_PORT1_EN |
1584 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
1586 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1590 ata_host_resume(host);
1592 return 0;
1595 static void nv_ck804_host_stop(struct ata_host *host)
1597 struct pci_dev *pdev = to_pci_dev(host->dev);
1598 u8 regval;
1600 /* disable SATA space for CK804 */
1601 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
1602 regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1603 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1606 static void nv_adma_host_stop(struct ata_host *host)
1608 struct pci_dev *pdev = to_pci_dev(host->dev);
1609 u32 tmp32;
1611 /* disable ADMA on the ports */
1612 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1613 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
1614 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1615 NV_MCP_SATA_CFG_20_PORT1_EN |
1616 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
1618 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1620 nv_ck804_host_stop(host);
1623 static int __init nv_init(void)
1625 return pci_register_driver(&nv_pci_driver);
1628 static void __exit nv_exit(void)
1630 pci_unregister_driver(&nv_pci_driver);
1633 module_init(nv_init);
1634 module_exit(nv_exit);
1635 module_param_named(adma, adma_enabled, bool, 0444);
1636 MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: true)");