SUNRPC: Fix a potential race in rpc_wake_up_task()
[linux-2.6/mini2440.git] / drivers / ata / sata_nv.c
blob0d316eb3c2149da3ac09266985dc7624f92f84db
1 /*
2 * sata_nv.c - NVIDIA nForce SATA
4 * Copyright 2004 NVIDIA Corp. All rights reserved.
5 * Copyright 2004 Andrew Chew
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 * libata documentation is available via 'make {ps|pdf}docs',
24 * as Documentation/DocBook/libata.*
26 * No hardware documentation available outside of NVIDIA.
27 * This driver programs the NVIDIA SATA controller in a similar
28 * fashion as with other PCI IDE BMDMA controllers, with a few
29 * NV-specific details such as register offsets, SATA phy location,
30 * hotplug info, etc.
32 * CK804/MCP04 controllers support an alternate programming interface
33 * similar to the ADMA specification (with some modifications).
34 * This allows the use of NCQ. Non-DMA-mapped ATA commands are still
35 * sent through the legacy interface.
39 #include <linux/kernel.h>
40 #include <linux/module.h>
41 #include <linux/pci.h>
42 #include <linux/init.h>
43 #include <linux/blkdev.h>
44 #include <linux/delay.h>
45 #include <linux/interrupt.h>
46 #include <linux/device.h>
47 #include <scsi/scsi_host.h>
48 #include <scsi/scsi_device.h>
49 #include <linux/libata.h>
51 #define DRV_NAME "sata_nv"
52 #define DRV_VERSION "3.2"
54 #define NV_ADMA_DMA_BOUNDARY 0xffffffffUL
56 enum {
57 NV_PORTS = 2,
58 NV_PIO_MASK = 0x1f,
59 NV_MWDMA_MASK = 0x07,
60 NV_UDMA_MASK = 0x7f,
61 NV_PORT0_SCR_REG_OFFSET = 0x00,
62 NV_PORT1_SCR_REG_OFFSET = 0x40,
64 /* INT_STATUS/ENABLE */
65 NV_INT_STATUS = 0x10,
66 NV_INT_ENABLE = 0x11,
67 NV_INT_STATUS_CK804 = 0x440,
68 NV_INT_ENABLE_CK804 = 0x441,
70 /* INT_STATUS/ENABLE bits */
71 NV_INT_DEV = 0x01,
72 NV_INT_PM = 0x02,
73 NV_INT_ADDED = 0x04,
74 NV_INT_REMOVED = 0x08,
76 NV_INT_PORT_SHIFT = 4, /* each port occupies 4 bits */
78 NV_INT_ALL = 0x0f,
79 NV_INT_MASK = NV_INT_DEV |
80 NV_INT_ADDED | NV_INT_REMOVED,
82 /* INT_CONFIG */
83 NV_INT_CONFIG = 0x12,
84 NV_INT_CONFIG_METHD = 0x01, // 0 = INT, 1 = SMI
86 // For PCI config register 20
87 NV_MCP_SATA_CFG_20 = 0x50,
88 NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
89 NV_MCP_SATA_CFG_20_PORT0_EN = (1 << 17),
90 NV_MCP_SATA_CFG_20_PORT1_EN = (1 << 16),
91 NV_MCP_SATA_CFG_20_PORT0_PWB_EN = (1 << 14),
92 NV_MCP_SATA_CFG_20_PORT1_PWB_EN = (1 << 12),
94 NV_ADMA_MAX_CPBS = 32,
95 NV_ADMA_CPB_SZ = 128,
96 NV_ADMA_APRD_SZ = 16,
97 NV_ADMA_SGTBL_LEN = (1024 - NV_ADMA_CPB_SZ) /
98 NV_ADMA_APRD_SZ,
99 NV_ADMA_SGTBL_TOTAL_LEN = NV_ADMA_SGTBL_LEN + 5,
100 NV_ADMA_SGTBL_SZ = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
101 NV_ADMA_PORT_PRIV_DMA_SZ = NV_ADMA_MAX_CPBS *
102 (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
104 /* BAR5 offset to ADMA general registers */
105 NV_ADMA_GEN = 0x400,
106 NV_ADMA_GEN_CTL = 0x00,
107 NV_ADMA_NOTIFIER_CLEAR = 0x30,
109 /* BAR5 offset to ADMA ports */
110 NV_ADMA_PORT = 0x480,
112 /* size of ADMA port register space */
113 NV_ADMA_PORT_SIZE = 0x100,
115 /* ADMA port registers */
116 NV_ADMA_CTL = 0x40,
117 NV_ADMA_CPB_COUNT = 0x42,
118 NV_ADMA_NEXT_CPB_IDX = 0x43,
119 NV_ADMA_STAT = 0x44,
120 NV_ADMA_CPB_BASE_LOW = 0x48,
121 NV_ADMA_CPB_BASE_HIGH = 0x4C,
122 NV_ADMA_APPEND = 0x50,
123 NV_ADMA_NOTIFIER = 0x68,
124 NV_ADMA_NOTIFIER_ERROR = 0x6C,
126 /* NV_ADMA_CTL register bits */
127 NV_ADMA_CTL_HOTPLUG_IEN = (1 << 0),
128 NV_ADMA_CTL_CHANNEL_RESET = (1 << 5),
129 NV_ADMA_CTL_GO = (1 << 7),
130 NV_ADMA_CTL_AIEN = (1 << 8),
131 NV_ADMA_CTL_READ_NON_COHERENT = (1 << 11),
132 NV_ADMA_CTL_WRITE_NON_COHERENT = (1 << 12),
134 /* CPB response flag bits */
135 NV_CPB_RESP_DONE = (1 << 0),
136 NV_CPB_RESP_ATA_ERR = (1 << 3),
137 NV_CPB_RESP_CMD_ERR = (1 << 4),
138 NV_CPB_RESP_CPB_ERR = (1 << 7),
140 /* CPB control flag bits */
141 NV_CPB_CTL_CPB_VALID = (1 << 0),
142 NV_CPB_CTL_QUEUE = (1 << 1),
143 NV_CPB_CTL_APRD_VALID = (1 << 2),
144 NV_CPB_CTL_IEN = (1 << 3),
145 NV_CPB_CTL_FPDMA = (1 << 4),
147 /* APRD flags */
148 NV_APRD_WRITE = (1 << 1),
149 NV_APRD_END = (1 << 2),
150 NV_APRD_CONT = (1 << 3),
152 /* NV_ADMA_STAT flags */
153 NV_ADMA_STAT_TIMEOUT = (1 << 0),
154 NV_ADMA_STAT_HOTUNPLUG = (1 << 1),
155 NV_ADMA_STAT_HOTPLUG = (1 << 2),
156 NV_ADMA_STAT_CPBERR = (1 << 4),
157 NV_ADMA_STAT_SERROR = (1 << 5),
158 NV_ADMA_STAT_CMD_COMPLETE = (1 << 6),
159 NV_ADMA_STAT_IDLE = (1 << 8),
160 NV_ADMA_STAT_LEGACY = (1 << 9),
161 NV_ADMA_STAT_STOPPED = (1 << 10),
162 NV_ADMA_STAT_DONE = (1 << 12),
163 NV_ADMA_STAT_ERR = NV_ADMA_STAT_CPBERR |
164 NV_ADMA_STAT_TIMEOUT,
166 /* port flags */
167 NV_ADMA_PORT_REGISTER_MODE = (1 << 0),
168 NV_ADMA_ATAPI_SETUP_COMPLETE = (1 << 1),
172 /* ADMA Physical Region Descriptor - one SG segment */
173 struct nv_adma_prd {
174 __le64 addr;
175 __le32 len;
176 u8 flags;
177 u8 packet_len;
178 __le16 reserved;
181 enum nv_adma_regbits {
182 CMDEND = (1 << 15), /* end of command list */
183 WNB = (1 << 14), /* wait-not-BSY */
184 IGN = (1 << 13), /* ignore this entry */
185 CS1n = (1 << (4 + 8)), /* std. PATA signals follow... */
186 DA2 = (1 << (2 + 8)),
187 DA1 = (1 << (1 + 8)),
188 DA0 = (1 << (0 + 8)),
191 /* ADMA Command Parameter Block
192 The first 5 SG segments are stored inside the Command Parameter Block itself.
193 If there are more than 5 segments the remainder are stored in a separate
194 memory area indicated by next_aprd. */
195 struct nv_adma_cpb {
196 u8 resp_flags; /* 0 */
197 u8 reserved1; /* 1 */
198 u8 ctl_flags; /* 2 */
199 /* len is length of taskfile in 64 bit words */
200 u8 len; /* 3 */
201 u8 tag; /* 4 */
202 u8 next_cpb_idx; /* 5 */
203 __le16 reserved2; /* 6-7 */
204 __le16 tf[12]; /* 8-31 */
205 struct nv_adma_prd aprd[5]; /* 32-111 */
206 __le64 next_aprd; /* 112-119 */
207 __le64 reserved3; /* 120-127 */
211 struct nv_adma_port_priv {
212 struct nv_adma_cpb *cpb;
213 dma_addr_t cpb_dma;
214 struct nv_adma_prd *aprd;
215 dma_addr_t aprd_dma;
216 u8 flags;
219 #define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & ( 1 << (19 + (12 * (PORT)))))
221 static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
222 static void nv_ck804_host_stop(struct ata_host *host);
223 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
224 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
225 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
226 static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg);
227 static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
229 static void nv_nf2_freeze(struct ata_port *ap);
230 static void nv_nf2_thaw(struct ata_port *ap);
231 static void nv_ck804_freeze(struct ata_port *ap);
232 static void nv_ck804_thaw(struct ata_port *ap);
233 static void nv_error_handler(struct ata_port *ap);
234 static int nv_adma_slave_config(struct scsi_device *sdev);
235 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
236 static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
237 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
238 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
239 static void nv_adma_irq_clear(struct ata_port *ap);
240 static int nv_adma_port_start(struct ata_port *ap);
241 static void nv_adma_port_stop(struct ata_port *ap);
242 static void nv_adma_error_handler(struct ata_port *ap);
243 static void nv_adma_host_stop(struct ata_host *host);
244 static void nv_adma_bmdma_setup(struct ata_queued_cmd *qc);
245 static void nv_adma_bmdma_start(struct ata_queued_cmd *qc);
246 static void nv_adma_bmdma_stop(struct ata_queued_cmd *qc);
247 static u8 nv_adma_bmdma_status(struct ata_port *ap);
249 enum nv_host_type
251 GENERIC,
252 NFORCE2,
253 NFORCE3 = NFORCE2, /* NF2 == NF3 as far as sata_nv is concerned */
254 CK804,
255 ADMA
258 static const struct pci_device_id nv_pci_tbl[] = {
259 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
260 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
261 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
262 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
263 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
264 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
265 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
266 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), GENERIC },
267 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), GENERIC },
268 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), GENERIC },
269 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), GENERIC },
270 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
271 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
272 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
273 { PCI_VDEVICE(NVIDIA, 0x045c), GENERIC }, /* MCP65 */
274 { PCI_VDEVICE(NVIDIA, 0x045d), GENERIC }, /* MCP65 */
275 { PCI_VDEVICE(NVIDIA, 0x045e), GENERIC }, /* MCP65 */
276 { PCI_VDEVICE(NVIDIA, 0x045f), GENERIC }, /* MCP65 */
277 { PCI_VDEVICE(NVIDIA, 0x0550), GENERIC }, /* MCP67 */
278 { PCI_VDEVICE(NVIDIA, 0x0551), GENERIC }, /* MCP67 */
279 { PCI_VDEVICE(NVIDIA, 0x0552), GENERIC }, /* MCP67 */
280 { PCI_VDEVICE(NVIDIA, 0x0553), GENERIC }, /* MCP67 */
281 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
282 PCI_ANY_ID, PCI_ANY_ID,
283 PCI_CLASS_STORAGE_IDE<<8, 0xffff00, GENERIC },
284 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
285 PCI_ANY_ID, PCI_ANY_ID,
286 PCI_CLASS_STORAGE_RAID<<8, 0xffff00, GENERIC },
288 { } /* terminate list */
291 static struct pci_driver nv_pci_driver = {
292 .name = DRV_NAME,
293 .id_table = nv_pci_tbl,
294 .probe = nv_init_one,
295 .remove = ata_pci_remove_one,
298 static struct scsi_host_template nv_sht = {
299 .module = THIS_MODULE,
300 .name = DRV_NAME,
301 .ioctl = ata_scsi_ioctl,
302 .queuecommand = ata_scsi_queuecmd,
303 .can_queue = ATA_DEF_QUEUE,
304 .this_id = ATA_SHT_THIS_ID,
305 .sg_tablesize = LIBATA_MAX_PRD,
306 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
307 .emulated = ATA_SHT_EMULATED,
308 .use_clustering = ATA_SHT_USE_CLUSTERING,
309 .proc_name = DRV_NAME,
310 .dma_boundary = ATA_DMA_BOUNDARY,
311 .slave_configure = ata_scsi_slave_config,
312 .slave_destroy = ata_scsi_slave_destroy,
313 .bios_param = ata_std_bios_param,
316 static struct scsi_host_template nv_adma_sht = {
317 .module = THIS_MODULE,
318 .name = DRV_NAME,
319 .ioctl = ata_scsi_ioctl,
320 .queuecommand = ata_scsi_queuecmd,
321 .can_queue = NV_ADMA_MAX_CPBS,
322 .this_id = ATA_SHT_THIS_ID,
323 .sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN,
324 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
325 .emulated = ATA_SHT_EMULATED,
326 .use_clustering = ATA_SHT_USE_CLUSTERING,
327 .proc_name = DRV_NAME,
328 .dma_boundary = NV_ADMA_DMA_BOUNDARY,
329 .slave_configure = nv_adma_slave_config,
330 .slave_destroy = ata_scsi_slave_destroy,
331 .bios_param = ata_std_bios_param,
334 static const struct ata_port_operations nv_generic_ops = {
335 .port_disable = ata_port_disable,
336 .tf_load = ata_tf_load,
337 .tf_read = ata_tf_read,
338 .exec_command = ata_exec_command,
339 .check_status = ata_check_status,
340 .dev_select = ata_std_dev_select,
341 .bmdma_setup = ata_bmdma_setup,
342 .bmdma_start = ata_bmdma_start,
343 .bmdma_stop = ata_bmdma_stop,
344 .bmdma_status = ata_bmdma_status,
345 .qc_prep = ata_qc_prep,
346 .qc_issue = ata_qc_issue_prot,
347 .freeze = ata_bmdma_freeze,
348 .thaw = ata_bmdma_thaw,
349 .error_handler = nv_error_handler,
350 .post_internal_cmd = ata_bmdma_post_internal_cmd,
351 .data_xfer = ata_pio_data_xfer,
352 .irq_handler = nv_generic_interrupt,
353 .irq_clear = ata_bmdma_irq_clear,
354 .scr_read = nv_scr_read,
355 .scr_write = nv_scr_write,
356 .port_start = ata_port_start,
357 .port_stop = ata_port_stop,
358 .host_stop = ata_pci_host_stop,
361 static const struct ata_port_operations nv_nf2_ops = {
362 .port_disable = ata_port_disable,
363 .tf_load = ata_tf_load,
364 .tf_read = ata_tf_read,
365 .exec_command = ata_exec_command,
366 .check_status = ata_check_status,
367 .dev_select = ata_std_dev_select,
368 .bmdma_setup = ata_bmdma_setup,
369 .bmdma_start = ata_bmdma_start,
370 .bmdma_stop = ata_bmdma_stop,
371 .bmdma_status = ata_bmdma_status,
372 .qc_prep = ata_qc_prep,
373 .qc_issue = ata_qc_issue_prot,
374 .freeze = nv_nf2_freeze,
375 .thaw = nv_nf2_thaw,
376 .error_handler = nv_error_handler,
377 .post_internal_cmd = ata_bmdma_post_internal_cmd,
378 .data_xfer = ata_pio_data_xfer,
379 .irq_handler = nv_nf2_interrupt,
380 .irq_clear = ata_bmdma_irq_clear,
381 .scr_read = nv_scr_read,
382 .scr_write = nv_scr_write,
383 .port_start = ata_port_start,
384 .port_stop = ata_port_stop,
385 .host_stop = ata_pci_host_stop,
388 static const struct ata_port_operations nv_ck804_ops = {
389 .port_disable = ata_port_disable,
390 .tf_load = ata_tf_load,
391 .tf_read = ata_tf_read,
392 .exec_command = ata_exec_command,
393 .check_status = ata_check_status,
394 .dev_select = ata_std_dev_select,
395 .bmdma_setup = ata_bmdma_setup,
396 .bmdma_start = ata_bmdma_start,
397 .bmdma_stop = ata_bmdma_stop,
398 .bmdma_status = ata_bmdma_status,
399 .qc_prep = ata_qc_prep,
400 .qc_issue = ata_qc_issue_prot,
401 .freeze = nv_ck804_freeze,
402 .thaw = nv_ck804_thaw,
403 .error_handler = nv_error_handler,
404 .post_internal_cmd = ata_bmdma_post_internal_cmd,
405 .data_xfer = ata_pio_data_xfer,
406 .irq_handler = nv_ck804_interrupt,
407 .irq_clear = ata_bmdma_irq_clear,
408 .scr_read = nv_scr_read,
409 .scr_write = nv_scr_write,
410 .port_start = ata_port_start,
411 .port_stop = ata_port_stop,
412 .host_stop = nv_ck804_host_stop,
415 static const struct ata_port_operations nv_adma_ops = {
416 .port_disable = ata_port_disable,
417 .tf_load = ata_tf_load,
418 .tf_read = ata_tf_read,
419 .check_atapi_dma = nv_adma_check_atapi_dma,
420 .exec_command = ata_exec_command,
421 .check_status = ata_check_status,
422 .dev_select = ata_std_dev_select,
423 .bmdma_setup = nv_adma_bmdma_setup,
424 .bmdma_start = nv_adma_bmdma_start,
425 .bmdma_stop = nv_adma_bmdma_stop,
426 .bmdma_status = nv_adma_bmdma_status,
427 .qc_prep = nv_adma_qc_prep,
428 .qc_issue = nv_adma_qc_issue,
429 .freeze = nv_ck804_freeze,
430 .thaw = nv_ck804_thaw,
431 .error_handler = nv_adma_error_handler,
432 .post_internal_cmd = nv_adma_bmdma_stop,
433 .data_xfer = ata_mmio_data_xfer,
434 .irq_handler = nv_adma_interrupt,
435 .irq_clear = nv_adma_irq_clear,
436 .scr_read = nv_scr_read,
437 .scr_write = nv_scr_write,
438 .port_start = nv_adma_port_start,
439 .port_stop = nv_adma_port_stop,
440 .host_stop = nv_adma_host_stop,
443 static struct ata_port_info nv_port_info[] = {
444 /* generic */
446 .sht = &nv_sht,
447 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
448 ATA_FLAG_HRST_TO_RESUME,
449 .pio_mask = NV_PIO_MASK,
450 .mwdma_mask = NV_MWDMA_MASK,
451 .udma_mask = NV_UDMA_MASK,
452 .port_ops = &nv_generic_ops,
454 /* nforce2/3 */
456 .sht = &nv_sht,
457 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
458 ATA_FLAG_HRST_TO_RESUME,
459 .pio_mask = NV_PIO_MASK,
460 .mwdma_mask = NV_MWDMA_MASK,
461 .udma_mask = NV_UDMA_MASK,
462 .port_ops = &nv_nf2_ops,
464 /* ck804 */
466 .sht = &nv_sht,
467 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
468 ATA_FLAG_HRST_TO_RESUME,
469 .pio_mask = NV_PIO_MASK,
470 .mwdma_mask = NV_MWDMA_MASK,
471 .udma_mask = NV_UDMA_MASK,
472 .port_ops = &nv_ck804_ops,
474 /* ADMA */
476 .sht = &nv_adma_sht,
477 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
478 ATA_FLAG_MMIO | ATA_FLAG_NCQ,
479 .pio_mask = NV_PIO_MASK,
480 .mwdma_mask = NV_MWDMA_MASK,
481 .udma_mask = NV_UDMA_MASK,
482 .port_ops = &nv_adma_ops,
486 MODULE_AUTHOR("NVIDIA");
487 MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
488 MODULE_LICENSE("GPL");
489 MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
490 MODULE_VERSION(DRV_VERSION);
492 static int adma_enabled = 1;
494 static inline void __iomem *__nv_adma_ctl_block(void __iomem *mmio,
495 unsigned int port_no)
497 mmio += NV_ADMA_PORT + port_no * NV_ADMA_PORT_SIZE;
498 return mmio;
501 static inline void __iomem *nv_adma_ctl_block(struct ata_port *ap)
503 return __nv_adma_ctl_block(ap->host->mmio_base, ap->port_no);
506 static inline void __iomem *nv_adma_gen_block(struct ata_port *ap)
508 return (ap->host->mmio_base + NV_ADMA_GEN);
511 static inline void __iomem *nv_adma_notifier_clear_block(struct ata_port *ap)
513 return (nv_adma_gen_block(ap) + NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no));
516 static void nv_adma_register_mode(struct ata_port *ap)
518 void __iomem *mmio = nv_adma_ctl_block(ap);
519 struct nv_adma_port_priv *pp = ap->private_data;
520 u16 tmp;
522 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
523 return;
525 tmp = readw(mmio + NV_ADMA_CTL);
526 writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
528 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
531 static void nv_adma_mode(struct ata_port *ap)
533 void __iomem *mmio = nv_adma_ctl_block(ap);
534 struct nv_adma_port_priv *pp = ap->private_data;
535 u16 tmp;
537 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
538 return;
540 WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
542 tmp = readw(mmio + NV_ADMA_CTL);
543 writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
545 pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
548 static int nv_adma_slave_config(struct scsi_device *sdev)
550 struct ata_port *ap = ata_shost_to_port(sdev->host);
551 struct nv_adma_port_priv *pp = ap->private_data;
552 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
553 u64 bounce_limit;
554 unsigned long segment_boundary;
555 unsigned short sg_tablesize;
556 int rc;
557 int adma_enable;
558 u32 current_reg, new_reg, config_mask;
560 rc = ata_scsi_slave_config(sdev);
562 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
563 /* Not a proper libata device, ignore */
564 return rc;
566 if (ap->device[sdev->id].class == ATA_DEV_ATAPI) {
568 * NVIDIA reports that ADMA mode does not support ATAPI commands.
569 * Therefore ATAPI commands are sent through the legacy interface.
570 * However, the legacy interface only supports 32-bit DMA.
571 * Restrict DMA parameters as required by the legacy interface
572 * when an ATAPI device is connected.
574 bounce_limit = ATA_DMA_MASK;
575 segment_boundary = ATA_DMA_BOUNDARY;
576 /* Subtract 1 since an extra entry may be needed for padding, see
577 libata-scsi.c */
578 sg_tablesize = LIBATA_MAX_PRD - 1;
580 /* Since the legacy DMA engine is in use, we need to disable ADMA
581 on the port. */
582 adma_enable = 0;
583 nv_adma_register_mode(ap);
585 else {
586 bounce_limit = *ap->dev->dma_mask;
587 segment_boundary = NV_ADMA_DMA_BOUNDARY;
588 sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
589 adma_enable = 1;
592 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg);
594 if(ap->port_no == 1)
595 config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
596 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
597 else
598 config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
599 NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
601 if(adma_enable) {
602 new_reg = current_reg | config_mask;
603 pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
605 else {
606 new_reg = current_reg & ~config_mask;
607 pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
610 if(current_reg != new_reg)
611 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
613 blk_queue_bounce_limit(sdev->request_queue, bounce_limit);
614 blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
615 blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize);
616 ata_port_printk(ap, KERN_INFO,
617 "bounce limit 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
618 (unsigned long long)bounce_limit, segment_boundary, sg_tablesize);
619 return rc;
622 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
624 struct nv_adma_port_priv *pp = qc->ap->private_data;
625 return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
628 static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
630 unsigned int idx = 0;
632 cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device | WNB);
634 if ((tf->flags & ATA_TFLAG_LBA48) == 0) {
635 cpb[idx++] = cpu_to_le16(IGN);
636 cpb[idx++] = cpu_to_le16(IGN);
637 cpb[idx++] = cpu_to_le16(IGN);
638 cpb[idx++] = cpu_to_le16(IGN);
639 cpb[idx++] = cpu_to_le16(IGN);
641 else {
642 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->hob_feature);
643 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
644 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->hob_lbal);
645 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->hob_lbam);
646 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->hob_lbah);
648 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature);
649 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->nsect);
650 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->lbal);
651 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->lbam);
652 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->lbah);
654 cpb[idx++] = cpu_to_le16((ATA_REG_CMD << 8) | tf->command | CMDEND);
656 return idx;
659 static void nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
661 struct nv_adma_port_priv *pp = ap->private_data;
662 int complete = 0, have_err = 0;
663 u8 flags = pp->cpb[cpb_num].resp_flags;
665 VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
667 if (flags & NV_CPB_RESP_DONE) {
668 VPRINTK("CPB flags done, flags=0x%x\n", flags);
669 complete = 1;
671 if (flags & NV_CPB_RESP_ATA_ERR) {
672 ata_port_printk(ap, KERN_ERR, "CPB flags ATA err, flags=0x%x\n", flags);
673 have_err = 1;
674 complete = 1;
676 if (flags & NV_CPB_RESP_CMD_ERR) {
677 ata_port_printk(ap, KERN_ERR, "CPB flags CMD err, flags=0x%x\n", flags);
678 have_err = 1;
679 complete = 1;
681 if (flags & NV_CPB_RESP_CPB_ERR) {
682 ata_port_printk(ap, KERN_ERR, "CPB flags CPB err, flags=0x%x\n", flags);
683 have_err = 1;
684 complete = 1;
686 if(complete || force_err)
688 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num);
689 if(likely(qc)) {
690 u8 ata_status = 0;
691 /* Only use the ATA port status for non-NCQ commands.
692 For NCQ commands the current status may have nothing to do with
693 the command just completed. */
694 if(qc->tf.protocol != ATA_PROT_NCQ)
695 ata_status = readb(nv_adma_ctl_block(ap) + (ATA_REG_STATUS * 4));
697 if(have_err || force_err)
698 ata_status |= ATA_ERR;
700 qc->err_mask |= ac_err_mask(ata_status);
701 DPRINTK("Completing qc from tag %d with err_mask %u\n",cpb_num,
702 qc->err_mask);
703 ata_qc_complete(qc);
708 static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
710 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
711 int handled;
713 /* freeze if hotplugged */
714 if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
715 ata_port_freeze(ap);
716 return 1;
719 /* bail out if not our interrupt */
720 if (!(irq_stat & NV_INT_DEV))
721 return 0;
723 /* DEV interrupt w/ no active qc? */
724 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
725 ata_check_status(ap);
726 return 1;
729 /* handle interrupt */
730 handled = ata_host_intr(ap, qc);
731 if (unlikely(!handled)) {
732 /* spurious, clear it */
733 ata_check_status(ap);
736 return 1;
739 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
741 struct ata_host *host = dev_instance;
742 int i, handled = 0;
743 u32 notifier_clears[2];
745 spin_lock(&host->lock);
747 for (i = 0; i < host->n_ports; i++) {
748 struct ata_port *ap = host->ports[i];
749 notifier_clears[i] = 0;
751 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
752 struct nv_adma_port_priv *pp = ap->private_data;
753 void __iomem *mmio = nv_adma_ctl_block(ap);
754 u16 status;
755 u32 gen_ctl;
756 int have_global_err = 0;
757 u32 notifier, notifier_error;
759 /* if in ATA register mode, use standard ata interrupt handler */
760 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
761 u8 irq_stat = readb(host->mmio_base + NV_INT_STATUS_CK804)
762 >> (NV_INT_PORT_SHIFT * i);
763 handled += nv_host_intr(ap, irq_stat);
764 continue;
767 notifier = readl(mmio + NV_ADMA_NOTIFIER);
768 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
769 notifier_clears[i] = notifier | notifier_error;
771 gen_ctl = readl(nv_adma_gen_block(ap) + NV_ADMA_GEN_CTL);
773 if( !NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
774 !notifier_error)
775 /* Nothing to do */
776 continue;
778 status = readw(mmio + NV_ADMA_STAT);
780 /* Clear status. Ensure the controller sees the clearing before we start
781 looking at any of the CPB statuses, so that any CPB completions after
782 this point in the handler will raise another interrupt. */
783 writew(status, mmio + NV_ADMA_STAT);
784 readw(mmio + NV_ADMA_STAT); /* flush posted write */
785 rmb();
787 /* freeze if hotplugged */
788 if (unlikely(status & (NV_ADMA_STAT_HOTPLUG | NV_ADMA_STAT_HOTUNPLUG))) {
789 ata_port_printk(ap, KERN_NOTICE, "Hotplug event, freezing\n");
790 ata_port_freeze(ap);
791 handled++;
792 continue;
795 if (status & NV_ADMA_STAT_TIMEOUT) {
796 ata_port_printk(ap, KERN_ERR, "timeout, stat=0x%x\n", status);
797 have_global_err = 1;
799 if (status & NV_ADMA_STAT_CPBERR) {
800 ata_port_printk(ap, KERN_ERR, "CPB error, stat=0x%x\n", status);
801 have_global_err = 1;
803 if ((status & NV_ADMA_STAT_DONE) || have_global_err) {
804 /** Check CPBs for completed commands */
806 if(ata_tag_valid(ap->active_tag))
807 /* Non-NCQ command */
808 nv_adma_check_cpb(ap, ap->active_tag, have_global_err ||
809 (notifier_error & (1 << ap->active_tag)));
810 else {
811 int pos;
812 u32 active = ap->sactive;
813 while( (pos = ffs(active)) ) {
814 pos--;
815 nv_adma_check_cpb(ap, pos, have_global_err ||
816 (notifier_error & (1 << pos)) );
817 active &= ~(1 << pos );
822 handled++; /* irq handled if we got here */
826 if(notifier_clears[0] || notifier_clears[1]) {
827 /* Note: Both notifier clear registers must be written
828 if either is set, even if one is zero, according to NVIDIA. */
829 writel(notifier_clears[0],
830 nv_adma_notifier_clear_block(host->ports[0]));
831 writel(notifier_clears[1],
832 nv_adma_notifier_clear_block(host->ports[1]));
835 spin_unlock(&host->lock);
837 return IRQ_RETVAL(handled);
840 static void nv_adma_irq_clear(struct ata_port *ap)
842 void __iomem *mmio = nv_adma_ctl_block(ap);
843 u16 status = readw(mmio + NV_ADMA_STAT);
844 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
845 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
846 unsigned long dma_stat_addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS;
848 /* clear ADMA status */
849 writew(status, mmio + NV_ADMA_STAT);
850 writel(notifier | notifier_error,
851 nv_adma_notifier_clear_block(ap));
853 /** clear legacy status */
854 outb(inb(dma_stat_addr), dma_stat_addr);
857 static void nv_adma_bmdma_setup(struct ata_queued_cmd *qc)
859 struct ata_port *ap = qc->ap;
860 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
861 struct nv_adma_port_priv *pp = ap->private_data;
862 u8 dmactl;
864 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
865 WARN_ON(1);
866 return;
869 /* load PRD table addr. */
870 outl(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
872 /* specify data direction, triple-check start bit is clear */
873 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
874 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
875 if (!rw)
876 dmactl |= ATA_DMA_WR;
878 outb(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
880 /* issue r/w command */
881 ata_exec_command(ap, &qc->tf);
884 static void nv_adma_bmdma_start(struct ata_queued_cmd *qc)
886 struct ata_port *ap = qc->ap;
887 struct nv_adma_port_priv *pp = ap->private_data;
888 u8 dmactl;
890 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
891 WARN_ON(1);
892 return;
895 /* start host DMA transaction */
896 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
897 outb(dmactl | ATA_DMA_START,
898 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
901 static void nv_adma_bmdma_stop(struct ata_queued_cmd *qc)
903 struct ata_port *ap = qc->ap;
904 struct nv_adma_port_priv *pp = ap->private_data;
906 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
907 return;
909 /* clear start/stop bit */
910 outb(inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD) & ~ATA_DMA_START,
911 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
913 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
914 ata_altstatus(ap); /* dummy read */
917 static u8 nv_adma_bmdma_status(struct ata_port *ap)
919 struct nv_adma_port_priv *pp = ap->private_data;
921 WARN_ON(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE));
923 return inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
926 static int nv_adma_port_start(struct ata_port *ap)
928 struct device *dev = ap->host->dev;
929 struct nv_adma_port_priv *pp;
930 int rc;
931 void *mem;
932 dma_addr_t mem_dma;
933 void __iomem *mmio = nv_adma_ctl_block(ap);
934 u16 tmp;
936 VPRINTK("ENTER\n");
938 rc = ata_port_start(ap);
939 if (rc)
940 return rc;
942 pp = kzalloc(sizeof(*pp), GFP_KERNEL);
943 if (!pp) {
944 rc = -ENOMEM;
945 goto err_out;
948 mem = dma_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
949 &mem_dma, GFP_KERNEL);
951 if (!mem) {
952 rc = -ENOMEM;
953 goto err_out_kfree;
955 memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
958 * First item in chunk of DMA memory:
959 * 128-byte command parameter block (CPB)
960 * one for each command tag
962 pp->cpb = mem;
963 pp->cpb_dma = mem_dma;
965 writel(mem_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
966 writel((mem_dma >> 16 ) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
968 mem += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
969 mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
972 * Second item: block of ADMA_SGTBL_LEN s/g entries
974 pp->aprd = mem;
975 pp->aprd_dma = mem_dma;
977 ap->private_data = pp;
979 /* clear any outstanding interrupt conditions */
980 writew(0xffff, mmio + NV_ADMA_STAT);
982 /* initialize port variables */
983 pp->flags = NV_ADMA_PORT_REGISTER_MODE;
985 /* clear CPB fetch count */
986 writew(0, mmio + NV_ADMA_CPB_COUNT);
988 /* clear GO for register mode */
989 tmp = readw(mmio + NV_ADMA_CTL);
990 writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
992 tmp = readw(mmio + NV_ADMA_CTL);
993 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
994 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
995 udelay(1);
996 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
997 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
999 return 0;
1001 err_out_kfree:
1002 kfree(pp);
1003 err_out:
1004 ata_port_stop(ap);
1005 return rc;
1008 static void nv_adma_port_stop(struct ata_port *ap)
1010 struct device *dev = ap->host->dev;
1011 struct nv_adma_port_priv *pp = ap->private_data;
1012 void __iomem *mmio = nv_adma_ctl_block(ap);
1014 VPRINTK("ENTER\n");
1016 writew(0, mmio + NV_ADMA_CTL);
1018 ap->private_data = NULL;
1019 dma_free_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ, pp->cpb, pp->cpb_dma);
1020 kfree(pp);
1021 ata_port_stop(ap);
1025 static void nv_adma_setup_port(struct ata_probe_ent *probe_ent, unsigned int port)
1027 void __iomem *mmio = probe_ent->mmio_base;
1028 struct ata_ioports *ioport = &probe_ent->port[port];
1030 VPRINTK("ENTER\n");
1032 mmio += NV_ADMA_PORT + port * NV_ADMA_PORT_SIZE;
1034 ioport->cmd_addr = (unsigned long) mmio;
1035 ioport->data_addr = (unsigned long) mmio + (ATA_REG_DATA * 4);
1036 ioport->error_addr =
1037 ioport->feature_addr = (unsigned long) mmio + (ATA_REG_ERR * 4);
1038 ioport->nsect_addr = (unsigned long) mmio + (ATA_REG_NSECT * 4);
1039 ioport->lbal_addr = (unsigned long) mmio + (ATA_REG_LBAL * 4);
1040 ioport->lbam_addr = (unsigned long) mmio + (ATA_REG_LBAM * 4);
1041 ioport->lbah_addr = (unsigned long) mmio + (ATA_REG_LBAH * 4);
1042 ioport->device_addr = (unsigned long) mmio + (ATA_REG_DEVICE * 4);
1043 ioport->status_addr =
1044 ioport->command_addr = (unsigned long) mmio + (ATA_REG_STATUS * 4);
1045 ioport->altstatus_addr =
1046 ioport->ctl_addr = (unsigned long) mmio + 0x20;
1049 static int nv_adma_host_init(struct ata_probe_ent *probe_ent)
1051 struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
1052 unsigned int i;
1053 u32 tmp32;
1055 VPRINTK("ENTER\n");
1057 /* enable ADMA on the ports */
1058 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1059 tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1060 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1061 NV_MCP_SATA_CFG_20_PORT1_EN |
1062 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1064 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1066 for (i = 0; i < probe_ent->n_ports; i++)
1067 nv_adma_setup_port(probe_ent, i);
1069 for (i = 0; i < probe_ent->n_ports; i++) {
1070 void __iomem *mmio = __nv_adma_ctl_block(probe_ent->mmio_base, i);
1071 u16 tmp;
1073 /* enable interrupt, clear reset if not already clear */
1074 tmp = readw(mmio + NV_ADMA_CTL);
1075 writew(tmp | NV_ADMA_CTL_AIEN, mmio + NV_ADMA_CTL);
1078 return 0;
1081 static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1082 struct scatterlist *sg,
1083 int idx,
1084 struct nv_adma_prd *aprd)
1086 u8 flags;
1088 memset(aprd, 0, sizeof(struct nv_adma_prd));
1090 flags = 0;
1091 if (qc->tf.flags & ATA_TFLAG_WRITE)
1092 flags |= NV_APRD_WRITE;
1093 if (idx == qc->n_elem - 1)
1094 flags |= NV_APRD_END;
1095 else if (idx != 4)
1096 flags |= NV_APRD_CONT;
1098 aprd->addr = cpu_to_le64(((u64)sg_dma_address(sg)));
1099 aprd->len = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
1100 aprd->flags = flags;
1103 static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1105 struct nv_adma_port_priv *pp = qc->ap->private_data;
1106 unsigned int idx;
1107 struct nv_adma_prd *aprd;
1108 struct scatterlist *sg;
1110 VPRINTK("ENTER\n");
1112 idx = 0;
1114 ata_for_each_sg(sg, qc) {
1115 aprd = (idx < 5) ? &cpb->aprd[idx] : &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (idx-5)];
1116 nv_adma_fill_aprd(qc, sg, idx, aprd);
1117 idx++;
1119 if (idx > 5)
1120 cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));
1123 static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1125 struct nv_adma_port_priv *pp = qc->ap->private_data;
1126 struct nv_adma_cpb *cpb = &pp->cpb[qc->tag];
1127 u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
1128 NV_CPB_CTL_APRD_VALID |
1129 NV_CPB_CTL_IEN;
1131 VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1133 if (!(qc->flags & ATA_QCFLAG_DMAMAP) ||
1134 (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
1135 nv_adma_register_mode(qc->ap);
1136 ata_qc_prep(qc);
1137 return;
1140 memset(cpb, 0, sizeof(struct nv_adma_cpb));
1142 cpb->len = 3;
1143 cpb->tag = qc->tag;
1144 cpb->next_cpb_idx = 0;
1146 /* turn on NCQ flags for NCQ commands */
1147 if (qc->tf.protocol == ATA_PROT_NCQ)
1148 ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1150 nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1152 nv_adma_fill_sg(qc, cpb);
1154 /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID until we are
1155 finished filling in all of the contents */
1156 wmb();
1157 cpb->ctl_flags = ctl_flags;
1160 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1162 struct nv_adma_port_priv *pp = qc->ap->private_data;
1163 void __iomem *mmio = nv_adma_ctl_block(qc->ap);
1165 VPRINTK("ENTER\n");
1167 if (!(qc->flags & ATA_QCFLAG_DMAMAP) ||
1168 (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
1169 /* use ATA register mode */
1170 VPRINTK("no dmamap or ATAPI, using ATA register mode: 0x%lx\n", qc->flags);
1171 nv_adma_register_mode(qc->ap);
1172 return ata_qc_issue_prot(qc);
1173 } else
1174 nv_adma_mode(qc->ap);
1176 /* write append register, command tag in lower 8 bits
1177 and (number of cpbs to append -1) in top 8 bits */
1178 wmb();
1179 writew(qc->tag, mmio + NV_ADMA_APPEND);
1181 DPRINTK("Issued tag %u\n",qc->tag);
1183 return 0;
1186 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1188 struct ata_host *host = dev_instance;
1189 unsigned int i;
1190 unsigned int handled = 0;
1191 unsigned long flags;
1193 spin_lock_irqsave(&host->lock, flags);
1195 for (i = 0; i < host->n_ports; i++) {
1196 struct ata_port *ap;
1198 ap = host->ports[i];
1199 if (ap &&
1200 !(ap->flags & ATA_FLAG_DISABLED)) {
1201 struct ata_queued_cmd *qc;
1203 qc = ata_qc_from_tag(ap, ap->active_tag);
1204 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
1205 handled += ata_host_intr(ap, qc);
1206 else
1207 // No request pending? Clear interrupt status
1208 // anyway, in case there's one pending.
1209 ap->ops->check_status(ap);
1214 spin_unlock_irqrestore(&host->lock, flags);
1216 return IRQ_RETVAL(handled);
1219 static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
1221 int i, handled = 0;
1223 for (i = 0; i < host->n_ports; i++) {
1224 struct ata_port *ap = host->ports[i];
1226 if (ap && !(ap->flags & ATA_FLAG_DISABLED))
1227 handled += nv_host_intr(ap, irq_stat);
1229 irq_stat >>= NV_INT_PORT_SHIFT;
1232 return IRQ_RETVAL(handled);
1235 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
1237 struct ata_host *host = dev_instance;
1238 u8 irq_stat;
1239 irqreturn_t ret;
1241 spin_lock(&host->lock);
1242 irq_stat = inb(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
1243 ret = nv_do_interrupt(host, irq_stat);
1244 spin_unlock(&host->lock);
1246 return ret;
1249 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
1251 struct ata_host *host = dev_instance;
1252 u8 irq_stat;
1253 irqreturn_t ret;
1255 spin_lock(&host->lock);
1256 irq_stat = readb(host->mmio_base + NV_INT_STATUS_CK804);
1257 ret = nv_do_interrupt(host, irq_stat);
1258 spin_unlock(&host->lock);
1260 return ret;
1263 static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg)
1265 if (sc_reg > SCR_CONTROL)
1266 return 0xffffffffU;
1268 return ioread32((void __iomem *)ap->ioaddr.scr_addr + (sc_reg * 4));
1271 static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
1273 if (sc_reg > SCR_CONTROL)
1274 return;
1276 iowrite32(val, (void __iomem *)ap->ioaddr.scr_addr + (sc_reg * 4));
1279 static void nv_nf2_freeze(struct ata_port *ap)
1281 unsigned long scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1282 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1283 u8 mask;
1285 mask = inb(scr_addr + NV_INT_ENABLE);
1286 mask &= ~(NV_INT_ALL << shift);
1287 outb(mask, scr_addr + NV_INT_ENABLE);
1290 static void nv_nf2_thaw(struct ata_port *ap)
1292 unsigned long scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1293 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1294 u8 mask;
1296 outb(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
1298 mask = inb(scr_addr + NV_INT_ENABLE);
1299 mask |= (NV_INT_MASK << shift);
1300 outb(mask, scr_addr + NV_INT_ENABLE);
1303 static void nv_ck804_freeze(struct ata_port *ap)
1305 void __iomem *mmio_base = ap->host->mmio_base;
1306 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1307 u8 mask;
1309 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1310 mask &= ~(NV_INT_ALL << shift);
1311 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1314 static void nv_ck804_thaw(struct ata_port *ap)
1316 void __iomem *mmio_base = ap->host->mmio_base;
1317 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1318 u8 mask;
1320 writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1322 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1323 mask |= (NV_INT_MASK << shift);
1324 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1327 static int nv_hardreset(struct ata_port *ap, unsigned int *class)
1329 unsigned int dummy;
1331 /* SATA hardreset fails to retrieve proper device signature on
1332 * some controllers. Don't classify on hardreset. For more
1333 * info, see http://bugme.osdl.org/show_bug.cgi?id=3352
1335 return sata_std_hardreset(ap, &dummy);
1338 static void nv_error_handler(struct ata_port *ap)
1340 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1341 nv_hardreset, ata_std_postreset);
1344 static void nv_adma_error_handler(struct ata_port *ap)
1346 struct nv_adma_port_priv *pp = ap->private_data;
1347 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
1348 void __iomem *mmio = nv_adma_ctl_block(ap);
1349 int i;
1350 u16 tmp;
1352 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1353 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1354 u32 gen_ctl = readl(nv_adma_gen_block(ap) + NV_ADMA_GEN_CTL);
1355 u32 status = readw(mmio + NV_ADMA_STAT);
1357 ata_port_printk(ap, KERN_ERR, "EH in ADMA mode, notifier 0x%X "
1358 "notifier_error 0x%X gen_ctl 0x%X status 0x%X\n",
1359 notifier, notifier_error, gen_ctl, status);
1361 for( i=0;i<NV_ADMA_MAX_CPBS;i++) {
1362 struct nv_adma_cpb *cpb = &pp->cpb[i];
1363 if( cpb->ctl_flags || cpb->resp_flags )
1364 ata_port_printk(ap, KERN_ERR,
1365 "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1366 i, cpb->ctl_flags, cpb->resp_flags);
1369 /* Push us back into port register mode for error handling. */
1370 nv_adma_register_mode(ap);
1372 ata_port_printk(ap, KERN_ERR, "Resetting port\n");
1374 /* Mark all of the CPBs as invalid to prevent them from being executed */
1375 for( i=0;i<NV_ADMA_MAX_CPBS;i++)
1376 pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1378 /* clear CPB fetch count */
1379 writew(0, mmio + NV_ADMA_CPB_COUNT);
1381 /* Reset channel */
1382 tmp = readw(mmio + NV_ADMA_CTL);
1383 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1384 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1385 udelay(1);
1386 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1387 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1390 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1391 nv_hardreset, ata_std_postreset);
1394 static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1396 static int printed_version = 0;
1397 struct ata_port_info *ppi[2];
1398 struct ata_probe_ent *probe_ent;
1399 int pci_dev_busy = 0;
1400 int rc;
1401 u32 bar;
1402 unsigned long base;
1403 unsigned long type = ent->driver_data;
1404 int mask_set = 0;
1406 // Make sure this is a SATA controller by counting the number of bars
1407 // (NVIDIA SATA controllers will always have six bars). Otherwise,
1408 // it's an IDE controller and we ignore it.
1409 for (bar=0; bar<6; bar++)
1410 if (pci_resource_start(pdev, bar) == 0)
1411 return -ENODEV;
1413 if ( !printed_version++)
1414 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
1416 rc = pci_enable_device(pdev);
1417 if (rc)
1418 goto err_out;
1420 rc = pci_request_regions(pdev, DRV_NAME);
1421 if (rc) {
1422 pci_dev_busy = 1;
1423 goto err_out_disable;
1426 if(type >= CK804 && adma_enabled) {
1427 dev_printk(KERN_NOTICE, &pdev->dev, "Using ADMA mode\n");
1428 type = ADMA;
1429 if(!pci_set_dma_mask(pdev, DMA_64BIT_MASK) &&
1430 !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))
1431 mask_set = 1;
1434 if(!mask_set) {
1435 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
1436 if (rc)
1437 goto err_out_regions;
1438 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
1439 if (rc)
1440 goto err_out_regions;
1443 rc = -ENOMEM;
1445 ppi[0] = ppi[1] = &nv_port_info[type];
1446 probe_ent = ata_pci_init_native_mode(pdev, ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
1447 if (!probe_ent)
1448 goto err_out_regions;
1450 probe_ent->mmio_base = pci_iomap(pdev, 5, 0);
1451 if (!probe_ent->mmio_base) {
1452 rc = -EIO;
1453 goto err_out_free_ent;
1456 base = (unsigned long)probe_ent->mmio_base;
1458 probe_ent->port[0].scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
1459 probe_ent->port[1].scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
1461 /* enable SATA space for CK804 */
1462 if (type >= CK804) {
1463 u8 regval;
1465 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
1466 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1467 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1470 pci_set_master(pdev);
1472 if (type == ADMA) {
1473 rc = nv_adma_host_init(probe_ent);
1474 if (rc)
1475 goto err_out_iounmap;
1478 rc = ata_device_add(probe_ent);
1479 if (rc != NV_PORTS)
1480 goto err_out_iounmap;
1482 kfree(probe_ent);
1484 return 0;
1486 err_out_iounmap:
1487 pci_iounmap(pdev, probe_ent->mmio_base);
1488 err_out_free_ent:
1489 kfree(probe_ent);
1490 err_out_regions:
1491 pci_release_regions(pdev);
1492 err_out_disable:
1493 if (!pci_dev_busy)
1494 pci_disable_device(pdev);
1495 err_out:
1496 return rc;
1499 static void nv_ck804_host_stop(struct ata_host *host)
1501 struct pci_dev *pdev = to_pci_dev(host->dev);
1502 u8 regval;
1504 /* disable SATA space for CK804 */
1505 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
1506 regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1507 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1509 ata_pci_host_stop(host);
1512 static void nv_adma_host_stop(struct ata_host *host)
1514 struct pci_dev *pdev = to_pci_dev(host->dev);
1515 int i;
1516 u32 tmp32;
1518 for (i = 0; i < host->n_ports; i++) {
1519 void __iomem *mmio = __nv_adma_ctl_block(host->mmio_base, i);
1520 u16 tmp;
1522 /* disable interrupt */
1523 tmp = readw(mmio + NV_ADMA_CTL);
1524 writew(tmp & ~NV_ADMA_CTL_AIEN, mmio + NV_ADMA_CTL);
1527 /* disable ADMA on the ports */
1528 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1529 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
1530 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1531 NV_MCP_SATA_CFG_20_PORT1_EN |
1532 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
1534 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1536 nv_ck804_host_stop(host);
1539 static int __init nv_init(void)
1541 return pci_register_driver(&nv_pci_driver);
1544 static void __exit nv_exit(void)
1546 pci_unregister_driver(&nv_pci_driver);
1549 module_init(nv_init);
1550 module_exit(nv_exit);
1551 module_param_named(adma, adma_enabled, bool, 0444);
1552 MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: true)");