Committer: Michael Beasley <mike@snafu.setup>
[mikesnafu-overlay.git] / drivers / ata / sata_nv.c
blobed5473bf7a0a2005ea5e156cc1eff7cc723ca54d
1 /*
2 * sata_nv.c - NVIDIA nForce SATA
4 * Copyright 2004 NVIDIA Corp. All rights reserved.
5 * Copyright 2004 Andrew Chew
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 * libata documentation is available via 'make {ps|pdf}docs',
24 * as Documentation/DocBook/libata.*
26 * No hardware documentation available outside of NVIDIA.
27 * This driver programs the NVIDIA SATA controller in a similar
28 * fashion as with other PCI IDE BMDMA controllers, with a few
29 * NV-specific details such as register offsets, SATA phy location,
30 * hotplug info, etc.
32 * CK804/MCP04 controllers support an alternate programming interface
33 * similar to the ADMA specification (with some modifications).
34 * This allows the use of NCQ. Non-DMA-mapped ATA commands are still
35 * sent through the legacy interface.
39 #include <linux/kernel.h>
40 #include <linux/module.h>
41 #include <linux/pci.h>
42 #include <linux/init.h>
43 #include <linux/blkdev.h>
44 #include <linux/delay.h>
45 #include <linux/interrupt.h>
46 #include <linux/device.h>
47 #include <scsi/scsi_host.h>
48 #include <scsi/scsi_device.h>
49 #include <linux/libata.h>
51 #define DRV_NAME "sata_nv"
52 #define DRV_VERSION "3.5"
54 #define NV_ADMA_DMA_BOUNDARY 0xffffffffUL
56 enum {
57 NV_MMIO_BAR = 5,
59 NV_PORTS = 2,
60 NV_PIO_MASK = 0x1f,
61 NV_MWDMA_MASK = 0x07,
62 NV_UDMA_MASK = 0x7f,
63 NV_PORT0_SCR_REG_OFFSET = 0x00,
64 NV_PORT1_SCR_REG_OFFSET = 0x40,
66 /* INT_STATUS/ENABLE */
67 NV_INT_STATUS = 0x10,
68 NV_INT_ENABLE = 0x11,
69 NV_INT_STATUS_CK804 = 0x440,
70 NV_INT_ENABLE_CK804 = 0x441,
72 /* INT_STATUS/ENABLE bits */
73 NV_INT_DEV = 0x01,
74 NV_INT_PM = 0x02,
75 NV_INT_ADDED = 0x04,
76 NV_INT_REMOVED = 0x08,
78 NV_INT_PORT_SHIFT = 4, /* each port occupies 4 bits */
80 NV_INT_ALL = 0x0f,
81 NV_INT_MASK = NV_INT_DEV |
82 NV_INT_ADDED | NV_INT_REMOVED,
84 /* INT_CONFIG */
85 NV_INT_CONFIG = 0x12,
86 NV_INT_CONFIG_METHD = 0x01, // 0 = INT, 1 = SMI
88 // For PCI config register 20
89 NV_MCP_SATA_CFG_20 = 0x50,
90 NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
91 NV_MCP_SATA_CFG_20_PORT0_EN = (1 << 17),
92 NV_MCP_SATA_CFG_20_PORT1_EN = (1 << 16),
93 NV_MCP_SATA_CFG_20_PORT0_PWB_EN = (1 << 14),
94 NV_MCP_SATA_CFG_20_PORT1_PWB_EN = (1 << 12),
96 NV_ADMA_MAX_CPBS = 32,
97 NV_ADMA_CPB_SZ = 128,
98 NV_ADMA_APRD_SZ = 16,
99 NV_ADMA_SGTBL_LEN = (1024 - NV_ADMA_CPB_SZ) /
100 NV_ADMA_APRD_SZ,
101 NV_ADMA_SGTBL_TOTAL_LEN = NV_ADMA_SGTBL_LEN + 5,
102 NV_ADMA_SGTBL_SZ = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
103 NV_ADMA_PORT_PRIV_DMA_SZ = NV_ADMA_MAX_CPBS *
104 (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
106 /* BAR5 offset to ADMA general registers */
107 NV_ADMA_GEN = 0x400,
108 NV_ADMA_GEN_CTL = 0x00,
109 NV_ADMA_NOTIFIER_CLEAR = 0x30,
111 /* BAR5 offset to ADMA ports */
112 NV_ADMA_PORT = 0x480,
114 /* size of ADMA port register space */
115 NV_ADMA_PORT_SIZE = 0x100,
117 /* ADMA port registers */
118 NV_ADMA_CTL = 0x40,
119 NV_ADMA_CPB_COUNT = 0x42,
120 NV_ADMA_NEXT_CPB_IDX = 0x43,
121 NV_ADMA_STAT = 0x44,
122 NV_ADMA_CPB_BASE_LOW = 0x48,
123 NV_ADMA_CPB_BASE_HIGH = 0x4C,
124 NV_ADMA_APPEND = 0x50,
125 NV_ADMA_NOTIFIER = 0x68,
126 NV_ADMA_NOTIFIER_ERROR = 0x6C,
128 /* NV_ADMA_CTL register bits */
129 NV_ADMA_CTL_HOTPLUG_IEN = (1 << 0),
130 NV_ADMA_CTL_CHANNEL_RESET = (1 << 5),
131 NV_ADMA_CTL_GO = (1 << 7),
132 NV_ADMA_CTL_AIEN = (1 << 8),
133 NV_ADMA_CTL_READ_NON_COHERENT = (1 << 11),
134 NV_ADMA_CTL_WRITE_NON_COHERENT = (1 << 12),
136 /* CPB response flag bits */
137 NV_CPB_RESP_DONE = (1 << 0),
138 NV_CPB_RESP_ATA_ERR = (1 << 3),
139 NV_CPB_RESP_CMD_ERR = (1 << 4),
140 NV_CPB_RESP_CPB_ERR = (1 << 7),
142 /* CPB control flag bits */
143 NV_CPB_CTL_CPB_VALID = (1 << 0),
144 NV_CPB_CTL_QUEUE = (1 << 1),
145 NV_CPB_CTL_APRD_VALID = (1 << 2),
146 NV_CPB_CTL_IEN = (1 << 3),
147 NV_CPB_CTL_FPDMA = (1 << 4),
149 /* APRD flags */
150 NV_APRD_WRITE = (1 << 1),
151 NV_APRD_END = (1 << 2),
152 NV_APRD_CONT = (1 << 3),
154 /* NV_ADMA_STAT flags */
155 NV_ADMA_STAT_TIMEOUT = (1 << 0),
156 NV_ADMA_STAT_HOTUNPLUG = (1 << 1),
157 NV_ADMA_STAT_HOTPLUG = (1 << 2),
158 NV_ADMA_STAT_CPBERR = (1 << 4),
159 NV_ADMA_STAT_SERROR = (1 << 5),
160 NV_ADMA_STAT_CMD_COMPLETE = (1 << 6),
161 NV_ADMA_STAT_IDLE = (1 << 8),
162 NV_ADMA_STAT_LEGACY = (1 << 9),
163 NV_ADMA_STAT_STOPPED = (1 << 10),
164 NV_ADMA_STAT_DONE = (1 << 12),
165 NV_ADMA_STAT_ERR = NV_ADMA_STAT_CPBERR |
166 NV_ADMA_STAT_TIMEOUT,
168 /* port flags */
169 NV_ADMA_PORT_REGISTER_MODE = (1 << 0),
170 NV_ADMA_ATAPI_SETUP_COMPLETE = (1 << 1),
172 /* MCP55 reg offset */
173 NV_CTL_MCP55 = 0x400,
174 NV_INT_STATUS_MCP55 = 0x440,
175 NV_INT_ENABLE_MCP55 = 0x444,
176 NV_NCQ_REG_MCP55 = 0x448,
178 /* MCP55 */
179 NV_INT_ALL_MCP55 = 0xffff,
180 NV_INT_PORT_SHIFT_MCP55 = 16, /* each port occupies 16 bits */
181 NV_INT_MASK_MCP55 = NV_INT_ALL_MCP55 & 0xfffd,
183 /* SWNCQ ENABLE BITS*/
184 NV_CTL_PRI_SWNCQ = 0x02,
185 NV_CTL_SEC_SWNCQ = 0x04,
187 /* SW NCQ status bits*/
188 NV_SWNCQ_IRQ_DEV = (1 << 0),
189 NV_SWNCQ_IRQ_PM = (1 << 1),
190 NV_SWNCQ_IRQ_ADDED = (1 << 2),
191 NV_SWNCQ_IRQ_REMOVED = (1 << 3),
193 NV_SWNCQ_IRQ_BACKOUT = (1 << 4),
194 NV_SWNCQ_IRQ_SDBFIS = (1 << 5),
195 NV_SWNCQ_IRQ_DHREGFIS = (1 << 6),
196 NV_SWNCQ_IRQ_DMASETUP = (1 << 7),
198 NV_SWNCQ_IRQ_HOTPLUG = NV_SWNCQ_IRQ_ADDED |
199 NV_SWNCQ_IRQ_REMOVED,
203 /* ADMA Physical Region Descriptor - one SG segment */
204 struct nv_adma_prd {
205 __le64 addr;
206 __le32 len;
207 u8 flags;
208 u8 packet_len;
209 __le16 reserved;
212 enum nv_adma_regbits {
213 CMDEND = (1 << 15), /* end of command list */
214 WNB = (1 << 14), /* wait-not-BSY */
215 IGN = (1 << 13), /* ignore this entry */
216 CS1n = (1 << (4 + 8)), /* std. PATA signals follow... */
217 DA2 = (1 << (2 + 8)),
218 DA1 = (1 << (1 + 8)),
219 DA0 = (1 << (0 + 8)),
222 /* ADMA Command Parameter Block
223 The first 5 SG segments are stored inside the Command Parameter Block itself.
224 If there are more than 5 segments the remainder are stored in a separate
225 memory area indicated by next_aprd. */
226 struct nv_adma_cpb {
227 u8 resp_flags; /* 0 */
228 u8 reserved1; /* 1 */
229 u8 ctl_flags; /* 2 */
230 /* len is length of taskfile in 64 bit words */
231 u8 len; /* 3 */
232 u8 tag; /* 4 */
233 u8 next_cpb_idx; /* 5 */
234 __le16 reserved2; /* 6-7 */
235 __le16 tf[12]; /* 8-31 */
236 struct nv_adma_prd aprd[5]; /* 32-111 */
237 __le64 next_aprd; /* 112-119 */
238 __le64 reserved3; /* 120-127 */
242 struct nv_adma_port_priv {
243 struct nv_adma_cpb *cpb;
244 dma_addr_t cpb_dma;
245 struct nv_adma_prd *aprd;
246 dma_addr_t aprd_dma;
247 void __iomem *ctl_block;
248 void __iomem *gen_block;
249 void __iomem *notifier_clear_block;
250 u64 adma_dma_mask;
251 u8 flags;
252 int last_issue_ncq;
255 struct nv_host_priv {
256 unsigned long type;
259 struct defer_queue {
260 u32 defer_bits;
261 unsigned int head;
262 unsigned int tail;
263 unsigned int tag[ATA_MAX_QUEUE];
266 enum ncq_saw_flag_list {
267 ncq_saw_d2h = (1U << 0),
268 ncq_saw_dmas = (1U << 1),
269 ncq_saw_sdb = (1U << 2),
270 ncq_saw_backout = (1U << 3),
273 struct nv_swncq_port_priv {
274 struct ata_prd *prd; /* our SG list */
275 dma_addr_t prd_dma; /* and its DMA mapping */
276 void __iomem *sactive_block;
277 void __iomem *irq_block;
278 void __iomem *tag_block;
279 u32 qc_active;
281 unsigned int last_issue_tag;
283 /* fifo circular queue to store deferral command */
284 struct defer_queue defer_queue;
286 /* for NCQ interrupt analysis */
287 u32 dhfis_bits;
288 u32 dmafis_bits;
289 u32 sdbfis_bits;
291 unsigned int ncq_flags;
295 #define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & (1 << (19 + (12 * (PORT)))))
297 static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
298 #ifdef CONFIG_PM
299 static int nv_pci_device_resume(struct pci_dev *pdev);
300 #endif
301 static void nv_ck804_host_stop(struct ata_host *host);
302 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
303 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
304 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
305 static int nv_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val);
306 static int nv_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val);
308 static void nv_nf2_freeze(struct ata_port *ap);
309 static void nv_nf2_thaw(struct ata_port *ap);
310 static void nv_ck804_freeze(struct ata_port *ap);
311 static void nv_ck804_thaw(struct ata_port *ap);
312 static void nv_error_handler(struct ata_port *ap);
313 static int nv_adma_slave_config(struct scsi_device *sdev);
314 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
315 static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
316 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
317 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
318 static void nv_adma_irq_clear(struct ata_port *ap);
319 static int nv_adma_port_start(struct ata_port *ap);
320 static void nv_adma_port_stop(struct ata_port *ap);
321 #ifdef CONFIG_PM
322 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
323 static int nv_adma_port_resume(struct ata_port *ap);
324 #endif
325 static void nv_adma_freeze(struct ata_port *ap);
326 static void nv_adma_thaw(struct ata_port *ap);
327 static void nv_adma_error_handler(struct ata_port *ap);
328 static void nv_adma_host_stop(struct ata_host *host);
329 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
330 static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
332 static void nv_mcp55_thaw(struct ata_port *ap);
333 static void nv_mcp55_freeze(struct ata_port *ap);
334 static void nv_swncq_error_handler(struct ata_port *ap);
335 static int nv_swncq_slave_config(struct scsi_device *sdev);
336 static int nv_swncq_port_start(struct ata_port *ap);
337 static void nv_swncq_qc_prep(struct ata_queued_cmd *qc);
338 static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
339 static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc);
340 static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis);
341 static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance);
342 #ifdef CONFIG_PM
343 static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg);
344 static int nv_swncq_port_resume(struct ata_port *ap);
345 #endif
347 enum nv_host_type
349 GENERIC,
350 NFORCE2,
351 NFORCE3 = NFORCE2, /* NF2 == NF3 as far as sata_nv is concerned */
352 CK804,
353 ADMA,
354 SWNCQ,
357 static const struct pci_device_id nv_pci_tbl[] = {
358 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
359 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
360 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
361 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
362 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
363 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
364 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
365 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), SWNCQ },
366 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), SWNCQ },
367 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), SWNCQ },
368 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), SWNCQ },
369 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
370 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
371 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
373 { } /* terminate list */
376 static struct pci_driver nv_pci_driver = {
377 .name = DRV_NAME,
378 .id_table = nv_pci_tbl,
379 .probe = nv_init_one,
380 #ifdef CONFIG_PM
381 .suspend = ata_pci_device_suspend,
382 .resume = nv_pci_device_resume,
383 #endif
384 .remove = ata_pci_remove_one,
387 static struct scsi_host_template nv_sht = {
388 .module = THIS_MODULE,
389 .name = DRV_NAME,
390 .ioctl = ata_scsi_ioctl,
391 .queuecommand = ata_scsi_queuecmd,
392 .can_queue = ATA_DEF_QUEUE,
393 .this_id = ATA_SHT_THIS_ID,
394 .sg_tablesize = LIBATA_MAX_PRD,
395 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
396 .emulated = ATA_SHT_EMULATED,
397 .use_clustering = ATA_SHT_USE_CLUSTERING,
398 .proc_name = DRV_NAME,
399 .dma_boundary = ATA_DMA_BOUNDARY,
400 .slave_configure = ata_scsi_slave_config,
401 .slave_destroy = ata_scsi_slave_destroy,
402 .bios_param = ata_std_bios_param,
405 static struct scsi_host_template nv_adma_sht = {
406 .module = THIS_MODULE,
407 .name = DRV_NAME,
408 .ioctl = ata_scsi_ioctl,
409 .queuecommand = ata_scsi_queuecmd,
410 .change_queue_depth = ata_scsi_change_queue_depth,
411 .can_queue = NV_ADMA_MAX_CPBS,
412 .this_id = ATA_SHT_THIS_ID,
413 .sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN,
414 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
415 .emulated = ATA_SHT_EMULATED,
416 .use_clustering = ATA_SHT_USE_CLUSTERING,
417 .proc_name = DRV_NAME,
418 .dma_boundary = NV_ADMA_DMA_BOUNDARY,
419 .slave_configure = nv_adma_slave_config,
420 .slave_destroy = ata_scsi_slave_destroy,
421 .bios_param = ata_std_bios_param,
424 static struct scsi_host_template nv_swncq_sht = {
425 .module = THIS_MODULE,
426 .name = DRV_NAME,
427 .ioctl = ata_scsi_ioctl,
428 .queuecommand = ata_scsi_queuecmd,
429 .change_queue_depth = ata_scsi_change_queue_depth,
430 .can_queue = ATA_MAX_QUEUE,
431 .this_id = ATA_SHT_THIS_ID,
432 .sg_tablesize = LIBATA_MAX_PRD,
433 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
434 .emulated = ATA_SHT_EMULATED,
435 .use_clustering = ATA_SHT_USE_CLUSTERING,
436 .proc_name = DRV_NAME,
437 .dma_boundary = ATA_DMA_BOUNDARY,
438 .slave_configure = nv_swncq_slave_config,
439 .slave_destroy = ata_scsi_slave_destroy,
440 .bios_param = ata_std_bios_param,
443 static const struct ata_port_operations nv_generic_ops = {
444 .tf_load = ata_tf_load,
445 .tf_read = ata_tf_read,
446 .exec_command = ata_exec_command,
447 .check_status = ata_check_status,
448 .dev_select = ata_std_dev_select,
449 .bmdma_setup = ata_bmdma_setup,
450 .bmdma_start = ata_bmdma_start,
451 .bmdma_stop = ata_bmdma_stop,
452 .bmdma_status = ata_bmdma_status,
453 .qc_prep = ata_qc_prep,
454 .qc_issue = ata_qc_issue_prot,
455 .freeze = ata_bmdma_freeze,
456 .thaw = ata_bmdma_thaw,
457 .error_handler = nv_error_handler,
458 .post_internal_cmd = ata_bmdma_post_internal_cmd,
459 .data_xfer = ata_data_xfer,
460 .irq_clear = ata_bmdma_irq_clear,
461 .irq_on = ata_irq_on,
462 .scr_read = nv_scr_read,
463 .scr_write = nv_scr_write,
464 .port_start = ata_port_start,
467 static const struct ata_port_operations nv_nf2_ops = {
468 .tf_load = ata_tf_load,
469 .tf_read = ata_tf_read,
470 .exec_command = ata_exec_command,
471 .check_status = ata_check_status,
472 .dev_select = ata_std_dev_select,
473 .bmdma_setup = ata_bmdma_setup,
474 .bmdma_start = ata_bmdma_start,
475 .bmdma_stop = ata_bmdma_stop,
476 .bmdma_status = ata_bmdma_status,
477 .qc_prep = ata_qc_prep,
478 .qc_issue = ata_qc_issue_prot,
479 .freeze = nv_nf2_freeze,
480 .thaw = nv_nf2_thaw,
481 .error_handler = nv_error_handler,
482 .post_internal_cmd = ata_bmdma_post_internal_cmd,
483 .data_xfer = ata_data_xfer,
484 .irq_clear = ata_bmdma_irq_clear,
485 .irq_on = ata_irq_on,
486 .scr_read = nv_scr_read,
487 .scr_write = nv_scr_write,
488 .port_start = ata_port_start,
491 static const struct ata_port_operations nv_ck804_ops = {
492 .tf_load = ata_tf_load,
493 .tf_read = ata_tf_read,
494 .exec_command = ata_exec_command,
495 .check_status = ata_check_status,
496 .dev_select = ata_std_dev_select,
497 .bmdma_setup = ata_bmdma_setup,
498 .bmdma_start = ata_bmdma_start,
499 .bmdma_stop = ata_bmdma_stop,
500 .bmdma_status = ata_bmdma_status,
501 .qc_prep = ata_qc_prep,
502 .qc_issue = ata_qc_issue_prot,
503 .freeze = nv_ck804_freeze,
504 .thaw = nv_ck804_thaw,
505 .error_handler = nv_error_handler,
506 .post_internal_cmd = ata_bmdma_post_internal_cmd,
507 .data_xfer = ata_data_xfer,
508 .irq_clear = ata_bmdma_irq_clear,
509 .irq_on = ata_irq_on,
510 .scr_read = nv_scr_read,
511 .scr_write = nv_scr_write,
512 .port_start = ata_port_start,
513 .host_stop = nv_ck804_host_stop,
516 static const struct ata_port_operations nv_adma_ops = {
517 .tf_load = ata_tf_load,
518 .tf_read = nv_adma_tf_read,
519 .check_atapi_dma = nv_adma_check_atapi_dma,
520 .exec_command = ata_exec_command,
521 .check_status = ata_check_status,
522 .dev_select = ata_std_dev_select,
523 .bmdma_setup = ata_bmdma_setup,
524 .bmdma_start = ata_bmdma_start,
525 .bmdma_stop = ata_bmdma_stop,
526 .bmdma_status = ata_bmdma_status,
527 .qc_defer = ata_std_qc_defer,
528 .qc_prep = nv_adma_qc_prep,
529 .qc_issue = nv_adma_qc_issue,
530 .freeze = nv_adma_freeze,
531 .thaw = nv_adma_thaw,
532 .error_handler = nv_adma_error_handler,
533 .post_internal_cmd = nv_adma_post_internal_cmd,
534 .data_xfer = ata_data_xfer,
535 .irq_clear = nv_adma_irq_clear,
536 .irq_on = ata_irq_on,
537 .scr_read = nv_scr_read,
538 .scr_write = nv_scr_write,
539 .port_start = nv_adma_port_start,
540 .port_stop = nv_adma_port_stop,
541 #ifdef CONFIG_PM
542 .port_suspend = nv_adma_port_suspend,
543 .port_resume = nv_adma_port_resume,
544 #endif
545 .host_stop = nv_adma_host_stop,
548 static const struct ata_port_operations nv_swncq_ops = {
549 .tf_load = ata_tf_load,
550 .tf_read = ata_tf_read,
551 .exec_command = ata_exec_command,
552 .check_status = ata_check_status,
553 .dev_select = ata_std_dev_select,
554 .bmdma_setup = ata_bmdma_setup,
555 .bmdma_start = ata_bmdma_start,
556 .bmdma_stop = ata_bmdma_stop,
557 .bmdma_status = ata_bmdma_status,
558 .qc_defer = ata_std_qc_defer,
559 .qc_prep = nv_swncq_qc_prep,
560 .qc_issue = nv_swncq_qc_issue,
561 .freeze = nv_mcp55_freeze,
562 .thaw = nv_mcp55_thaw,
563 .error_handler = nv_swncq_error_handler,
564 .post_internal_cmd = ata_bmdma_post_internal_cmd,
565 .data_xfer = ata_data_xfer,
566 .irq_clear = ata_bmdma_irq_clear,
567 .irq_on = ata_irq_on,
568 .scr_read = nv_scr_read,
569 .scr_write = nv_scr_write,
570 #ifdef CONFIG_PM
571 .port_suspend = nv_swncq_port_suspend,
572 .port_resume = nv_swncq_port_resume,
573 #endif
574 .port_start = nv_swncq_port_start,
577 static const struct ata_port_info nv_port_info[] = {
578 /* generic */
580 .sht = &nv_sht,
581 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
582 .link_flags = ATA_LFLAG_HRST_TO_RESUME,
583 .pio_mask = NV_PIO_MASK,
584 .mwdma_mask = NV_MWDMA_MASK,
585 .udma_mask = NV_UDMA_MASK,
586 .port_ops = &nv_generic_ops,
587 .irq_handler = nv_generic_interrupt,
589 /* nforce2/3 */
591 .sht = &nv_sht,
592 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
593 .link_flags = ATA_LFLAG_HRST_TO_RESUME,
594 .pio_mask = NV_PIO_MASK,
595 .mwdma_mask = NV_MWDMA_MASK,
596 .udma_mask = NV_UDMA_MASK,
597 .port_ops = &nv_nf2_ops,
598 .irq_handler = nv_nf2_interrupt,
600 /* ck804 */
602 .sht = &nv_sht,
603 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
604 .link_flags = ATA_LFLAG_HRST_TO_RESUME,
605 .pio_mask = NV_PIO_MASK,
606 .mwdma_mask = NV_MWDMA_MASK,
607 .udma_mask = NV_UDMA_MASK,
608 .port_ops = &nv_ck804_ops,
609 .irq_handler = nv_ck804_interrupt,
611 /* ADMA */
613 .sht = &nv_adma_sht,
614 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
615 ATA_FLAG_MMIO | ATA_FLAG_NCQ,
616 .link_flags = ATA_LFLAG_HRST_TO_RESUME,
617 .pio_mask = NV_PIO_MASK,
618 .mwdma_mask = NV_MWDMA_MASK,
619 .udma_mask = NV_UDMA_MASK,
620 .port_ops = &nv_adma_ops,
621 .irq_handler = nv_adma_interrupt,
623 /* SWNCQ */
625 .sht = &nv_swncq_sht,
626 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
627 ATA_FLAG_NCQ,
628 .link_flags = ATA_LFLAG_HRST_TO_RESUME,
629 .pio_mask = NV_PIO_MASK,
630 .mwdma_mask = NV_MWDMA_MASK,
631 .udma_mask = NV_UDMA_MASK,
632 .port_ops = &nv_swncq_ops,
633 .irq_handler = nv_swncq_interrupt,
637 MODULE_AUTHOR("NVIDIA");
638 MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
639 MODULE_LICENSE("GPL");
640 MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
641 MODULE_VERSION(DRV_VERSION);
643 static int adma_enabled = 1;
644 static int swncq_enabled;
646 static void nv_adma_register_mode(struct ata_port *ap)
648 struct nv_adma_port_priv *pp = ap->private_data;
649 void __iomem *mmio = pp->ctl_block;
650 u16 tmp, status;
651 int count = 0;
653 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
654 return;
656 status = readw(mmio + NV_ADMA_STAT);
657 while (!(status & NV_ADMA_STAT_IDLE) && count < 20) {
658 ndelay(50);
659 status = readw(mmio + NV_ADMA_STAT);
660 count++;
662 if (count == 20)
663 ata_port_printk(ap, KERN_WARNING,
664 "timeout waiting for ADMA IDLE, stat=0x%hx\n",
665 status);
667 tmp = readw(mmio + NV_ADMA_CTL);
668 writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
670 count = 0;
671 status = readw(mmio + NV_ADMA_STAT);
672 while (!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
673 ndelay(50);
674 status = readw(mmio + NV_ADMA_STAT);
675 count++;
677 if (count == 20)
678 ata_port_printk(ap, KERN_WARNING,
679 "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
680 status);
682 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
685 static void nv_adma_mode(struct ata_port *ap)
687 struct nv_adma_port_priv *pp = ap->private_data;
688 void __iomem *mmio = pp->ctl_block;
689 u16 tmp, status;
690 int count = 0;
692 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
693 return;
695 WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
697 tmp = readw(mmio + NV_ADMA_CTL);
698 writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
700 status = readw(mmio + NV_ADMA_STAT);
701 while (((status & NV_ADMA_STAT_LEGACY) ||
702 !(status & NV_ADMA_STAT_IDLE)) && count < 20) {
703 ndelay(50);
704 status = readw(mmio + NV_ADMA_STAT);
705 count++;
707 if (count == 20)
708 ata_port_printk(ap, KERN_WARNING,
709 "timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
710 status);
712 pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
715 static int nv_adma_slave_config(struct scsi_device *sdev)
717 struct ata_port *ap = ata_shost_to_port(sdev->host);
718 struct nv_adma_port_priv *pp = ap->private_data;
719 struct nv_adma_port_priv *port0, *port1;
720 struct scsi_device *sdev0, *sdev1;
721 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
722 unsigned long segment_boundary, flags;
723 unsigned short sg_tablesize;
724 int rc;
725 int adma_enable;
726 u32 current_reg, new_reg, config_mask;
728 rc = ata_scsi_slave_config(sdev);
730 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
731 /* Not a proper libata device, ignore */
732 return rc;
734 spin_lock_irqsave(ap->lock, flags);
736 if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) {
738 * NVIDIA reports that ADMA mode does not support ATAPI commands.
739 * Therefore ATAPI commands are sent through the legacy interface.
740 * However, the legacy interface only supports 32-bit DMA.
741 * Restrict DMA parameters as required by the legacy interface
742 * when an ATAPI device is connected.
744 segment_boundary = ATA_DMA_BOUNDARY;
745 /* Subtract 1 since an extra entry may be needed for padding, see
746 libata-scsi.c */
747 sg_tablesize = LIBATA_MAX_PRD - 1;
749 /* Since the legacy DMA engine is in use, we need to disable ADMA
750 on the port. */
751 adma_enable = 0;
752 nv_adma_register_mode(ap);
753 } else {
754 segment_boundary = NV_ADMA_DMA_BOUNDARY;
755 sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
756 adma_enable = 1;
759 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg);
761 if (ap->port_no == 1)
762 config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
763 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
764 else
765 config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
766 NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
768 if (adma_enable) {
769 new_reg = current_reg | config_mask;
770 pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
771 } else {
772 new_reg = current_reg & ~config_mask;
773 pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
776 if (current_reg != new_reg)
777 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
779 port0 = ap->host->ports[0]->private_data;
780 port1 = ap->host->ports[1]->private_data;
781 sdev0 = ap->host->ports[0]->link.device[0].sdev;
782 sdev1 = ap->host->ports[1]->link.device[0].sdev;
783 if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
784 (port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
785 /** We have to set the DMA mask to 32-bit if either port is in
786 ATAPI mode, since they are on the same PCI device which is
787 used for DMA mapping. If we set the mask we also need to set
788 the bounce limit on both ports to ensure that the block
789 layer doesn't feed addresses that cause DMA mapping to
790 choke. If either SCSI device is not allocated yet, it's OK
791 since that port will discover its correct setting when it
792 does get allocated.
793 Note: Setting 32-bit mask should not fail. */
794 if (sdev0)
795 blk_queue_bounce_limit(sdev0->request_queue,
796 ATA_DMA_MASK);
797 if (sdev1)
798 blk_queue_bounce_limit(sdev1->request_queue,
799 ATA_DMA_MASK);
801 pci_set_dma_mask(pdev, ATA_DMA_MASK);
802 } else {
803 /** This shouldn't fail as it was set to this value before */
804 pci_set_dma_mask(pdev, pp->adma_dma_mask);
805 if (sdev0)
806 blk_queue_bounce_limit(sdev0->request_queue,
807 pp->adma_dma_mask);
808 if (sdev1)
809 blk_queue_bounce_limit(sdev1->request_queue,
810 pp->adma_dma_mask);
813 blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
814 blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize);
815 ata_port_printk(ap, KERN_INFO,
816 "DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
817 (unsigned long long)*ap->host->dev->dma_mask,
818 segment_boundary, sg_tablesize);
820 spin_unlock_irqrestore(ap->lock, flags);
822 return rc;
825 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
827 struct nv_adma_port_priv *pp = qc->ap->private_data;
828 return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
831 static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
833 /* Other than when internal or pass-through commands are executed,
834 the only time this function will be called in ADMA mode will be
835 if a command fails. In the failure case we don't care about going
836 into register mode with ADMA commands pending, as the commands will
837 all shortly be aborted anyway. We assume that NCQ commands are not
838 issued via passthrough, which is the only way that switching into
839 ADMA mode could abort outstanding commands. */
840 nv_adma_register_mode(ap);
842 ata_tf_read(ap, tf);
845 static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
847 unsigned int idx = 0;
849 if (tf->flags & ATA_TFLAG_ISADDR) {
850 if (tf->flags & ATA_TFLAG_LBA48) {
851 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->hob_feature | WNB);
852 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
853 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->hob_lbal);
854 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->hob_lbam);
855 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->hob_lbah);
856 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature);
857 } else
858 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature | WNB);
860 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->nsect);
861 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->lbal);
862 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->lbam);
863 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->lbah);
866 if (tf->flags & ATA_TFLAG_DEVICE)
867 cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device);
869 cpb[idx++] = cpu_to_le16((ATA_REG_CMD << 8) | tf->command | CMDEND);
871 while (idx < 12)
872 cpb[idx++] = cpu_to_le16(IGN);
874 return idx;
877 static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
879 struct nv_adma_port_priv *pp = ap->private_data;
880 u8 flags = pp->cpb[cpb_num].resp_flags;
882 VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
884 if (unlikely((force_err ||
885 flags & (NV_CPB_RESP_ATA_ERR |
886 NV_CPB_RESP_CMD_ERR |
887 NV_CPB_RESP_CPB_ERR)))) {
888 struct ata_eh_info *ehi = &ap->link.eh_info;
889 int freeze = 0;
891 ata_ehi_clear_desc(ehi);
892 __ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x: ", flags);
893 if (flags & NV_CPB_RESP_ATA_ERR) {
894 ata_ehi_push_desc(ehi, "ATA error");
895 ehi->err_mask |= AC_ERR_DEV;
896 } else if (flags & NV_CPB_RESP_CMD_ERR) {
897 ata_ehi_push_desc(ehi, "CMD error");
898 ehi->err_mask |= AC_ERR_DEV;
899 } else if (flags & NV_CPB_RESP_CPB_ERR) {
900 ata_ehi_push_desc(ehi, "CPB error");
901 ehi->err_mask |= AC_ERR_SYSTEM;
902 freeze = 1;
903 } else {
904 /* notifier error, but no error in CPB flags? */
905 ata_ehi_push_desc(ehi, "unknown");
906 ehi->err_mask |= AC_ERR_OTHER;
907 freeze = 1;
909 /* Kill all commands. EH will determine what actually failed. */
910 if (freeze)
911 ata_port_freeze(ap);
912 else
913 ata_port_abort(ap);
914 return 1;
917 if (likely(flags & NV_CPB_RESP_DONE)) {
918 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num);
919 VPRINTK("CPB flags done, flags=0x%x\n", flags);
920 if (likely(qc)) {
921 DPRINTK("Completing qc from tag %d\n", cpb_num);
922 ata_qc_complete(qc);
923 } else {
924 struct ata_eh_info *ehi = &ap->link.eh_info;
925 /* Notifier bits set without a command may indicate the drive
926 is misbehaving. Raise host state machine violation on this
927 condition. */
928 ata_port_printk(ap, KERN_ERR,
929 "notifier for tag %d with no cmd?\n",
930 cpb_num);
931 ehi->err_mask |= AC_ERR_HSM;
932 ehi->action |= ATA_EH_SOFTRESET;
933 ata_port_freeze(ap);
934 return 1;
937 return 0;
940 static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
942 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
944 /* freeze if hotplugged */
945 if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
946 ata_port_freeze(ap);
947 return 1;
950 /* bail out if not our interrupt */
951 if (!(irq_stat & NV_INT_DEV))
952 return 0;
954 /* DEV interrupt w/ no active qc? */
955 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
956 ata_check_status(ap);
957 return 1;
960 /* handle interrupt */
961 return ata_host_intr(ap, qc);
964 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
966 struct ata_host *host = dev_instance;
967 int i, handled = 0;
968 u32 notifier_clears[2];
970 spin_lock(&host->lock);
972 for (i = 0; i < host->n_ports; i++) {
973 struct ata_port *ap = host->ports[i];
974 notifier_clears[i] = 0;
976 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
977 struct nv_adma_port_priv *pp = ap->private_data;
978 void __iomem *mmio = pp->ctl_block;
979 u16 status;
980 u32 gen_ctl;
981 u32 notifier, notifier_error;
983 /* if ADMA is disabled, use standard ata interrupt handler */
984 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
985 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
986 >> (NV_INT_PORT_SHIFT * i);
987 handled += nv_host_intr(ap, irq_stat);
988 continue;
991 /* if in ATA register mode, check for standard interrupts */
992 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
993 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
994 >> (NV_INT_PORT_SHIFT * i);
995 if (ata_tag_valid(ap->link.active_tag))
996 /** NV_INT_DEV indication seems unreliable at times
997 at least in ADMA mode. Force it on always when a
998 command is active, to prevent losing interrupts. */
999 irq_stat |= NV_INT_DEV;
1000 handled += nv_host_intr(ap, irq_stat);
1003 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1004 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1005 notifier_clears[i] = notifier | notifier_error;
1007 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
1009 if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
1010 !notifier_error)
1011 /* Nothing to do */
1012 continue;
1014 status = readw(mmio + NV_ADMA_STAT);
1016 /* Clear status. Ensure the controller sees the clearing before we start
1017 looking at any of the CPB statuses, so that any CPB completions after
1018 this point in the handler will raise another interrupt. */
1019 writew(status, mmio + NV_ADMA_STAT);
1020 readw(mmio + NV_ADMA_STAT); /* flush posted write */
1021 rmb();
1023 handled++; /* irq handled if we got here */
1025 /* freeze if hotplugged or controller error */
1026 if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
1027 NV_ADMA_STAT_HOTUNPLUG |
1028 NV_ADMA_STAT_TIMEOUT |
1029 NV_ADMA_STAT_SERROR))) {
1030 struct ata_eh_info *ehi = &ap->link.eh_info;
1032 ata_ehi_clear_desc(ehi);
1033 __ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status);
1034 if (status & NV_ADMA_STAT_TIMEOUT) {
1035 ehi->err_mask |= AC_ERR_SYSTEM;
1036 ata_ehi_push_desc(ehi, "timeout");
1037 } else if (status & NV_ADMA_STAT_HOTPLUG) {
1038 ata_ehi_hotplugged(ehi);
1039 ata_ehi_push_desc(ehi, "hotplug");
1040 } else if (status & NV_ADMA_STAT_HOTUNPLUG) {
1041 ata_ehi_hotplugged(ehi);
1042 ata_ehi_push_desc(ehi, "hot unplug");
1043 } else if (status & NV_ADMA_STAT_SERROR) {
1044 /* let libata analyze SError and figure out the cause */
1045 ata_ehi_push_desc(ehi, "SError");
1046 } else
1047 ata_ehi_push_desc(ehi, "unknown");
1048 ata_port_freeze(ap);
1049 continue;
1052 if (status & (NV_ADMA_STAT_DONE |
1053 NV_ADMA_STAT_CPBERR |
1054 NV_ADMA_STAT_CMD_COMPLETE)) {
1055 u32 check_commands = notifier_clears[i];
1056 int pos, error = 0;
1058 if (status & NV_ADMA_STAT_CPBERR) {
1059 /* Check all active commands */
1060 if (ata_tag_valid(ap->link.active_tag))
1061 check_commands = 1 <<
1062 ap->link.active_tag;
1063 else
1064 check_commands = ap->
1065 link.sactive;
1068 /** Check CPBs for completed commands */
1069 while ((pos = ffs(check_commands)) && !error) {
1070 pos--;
1071 error = nv_adma_check_cpb(ap, pos,
1072 notifier_error & (1 << pos));
1073 check_commands &= ~(1 << pos);
1079 if (notifier_clears[0] || notifier_clears[1]) {
1080 /* Note: Both notifier clear registers must be written
1081 if either is set, even if one is zero, according to NVIDIA. */
1082 struct nv_adma_port_priv *pp = host->ports[0]->private_data;
1083 writel(notifier_clears[0], pp->notifier_clear_block);
1084 pp = host->ports[1]->private_data;
1085 writel(notifier_clears[1], pp->notifier_clear_block);
1088 spin_unlock(&host->lock);
1090 return IRQ_RETVAL(handled);
1093 static void nv_adma_freeze(struct ata_port *ap)
1095 struct nv_adma_port_priv *pp = ap->private_data;
1096 void __iomem *mmio = pp->ctl_block;
1097 u16 tmp;
1099 nv_ck804_freeze(ap);
1101 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1102 return;
1104 /* clear any outstanding CK804 notifications */
1105 writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
1106 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1108 /* Disable interrupt */
1109 tmp = readw(mmio + NV_ADMA_CTL);
1110 writew(tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1111 mmio + NV_ADMA_CTL);
1112 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1115 static void nv_adma_thaw(struct ata_port *ap)
1117 struct nv_adma_port_priv *pp = ap->private_data;
1118 void __iomem *mmio = pp->ctl_block;
1119 u16 tmp;
1121 nv_ck804_thaw(ap);
1123 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1124 return;
1126 /* Enable interrupt */
1127 tmp = readw(mmio + NV_ADMA_CTL);
1128 writew(tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1129 mmio + NV_ADMA_CTL);
1130 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1133 static void nv_adma_irq_clear(struct ata_port *ap)
1135 struct nv_adma_port_priv *pp = ap->private_data;
1136 void __iomem *mmio = pp->ctl_block;
1137 u32 notifier_clears[2];
1139 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
1140 ata_bmdma_irq_clear(ap);
1141 return;
1144 /* clear any outstanding CK804 notifications */
1145 writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
1146 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1148 /* clear ADMA status */
1149 writew(0xffff, mmio + NV_ADMA_STAT);
1151 /* clear notifiers - note both ports need to be written with
1152 something even though we are only clearing on one */
1153 if (ap->port_no == 0) {
1154 notifier_clears[0] = 0xFFFFFFFF;
1155 notifier_clears[1] = 0;
1156 } else {
1157 notifier_clears[0] = 0;
1158 notifier_clears[1] = 0xFFFFFFFF;
1160 pp = ap->host->ports[0]->private_data;
1161 writel(notifier_clears[0], pp->notifier_clear_block);
1162 pp = ap->host->ports[1]->private_data;
1163 writel(notifier_clears[1], pp->notifier_clear_block);
1166 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
1168 struct nv_adma_port_priv *pp = qc->ap->private_data;
1170 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
1171 ata_bmdma_post_internal_cmd(qc);
1174 static int nv_adma_port_start(struct ata_port *ap)
1176 struct device *dev = ap->host->dev;
1177 struct nv_adma_port_priv *pp;
1178 int rc;
1179 void *mem;
1180 dma_addr_t mem_dma;
1181 void __iomem *mmio;
1182 struct pci_dev *pdev = to_pci_dev(dev);
1183 u16 tmp;
1185 VPRINTK("ENTER\n");
1187 /* Ensure DMA mask is set to 32-bit before allocating legacy PRD and
1188 pad buffers */
1189 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1190 if (rc)
1191 return rc;
1192 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1193 if (rc)
1194 return rc;
1196 rc = ata_port_start(ap);
1197 if (rc)
1198 return rc;
1200 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1201 if (!pp)
1202 return -ENOMEM;
1204 mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
1205 ap->port_no * NV_ADMA_PORT_SIZE;
1206 pp->ctl_block = mmio;
1207 pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
1208 pp->notifier_clear_block = pp->gen_block +
1209 NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
1211 /* Now that the legacy PRD and padding buffer are allocated we can
1212 safely raise the DMA mask to allocate the CPB/APRD table.
1213 These are allowed to fail since we store the value that ends up
1214 being used to set as the bounce limit in slave_config later if
1215 needed. */
1216 pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1217 pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1218 pp->adma_dma_mask = *dev->dma_mask;
1220 mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
1221 &mem_dma, GFP_KERNEL);
1222 if (!mem)
1223 return -ENOMEM;
1224 memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
1227 * First item in chunk of DMA memory:
1228 * 128-byte command parameter block (CPB)
1229 * one for each command tag
1231 pp->cpb = mem;
1232 pp->cpb_dma = mem_dma;
1234 writel(mem_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
1235 writel((mem_dma >> 16) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
1237 mem += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1238 mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1241 * Second item: block of ADMA_SGTBL_LEN s/g entries
1243 pp->aprd = mem;
1244 pp->aprd_dma = mem_dma;
1246 ap->private_data = pp;
1248 /* clear any outstanding interrupt conditions */
1249 writew(0xffff, mmio + NV_ADMA_STAT);
1251 /* initialize port variables */
1252 pp->flags = NV_ADMA_PORT_REGISTER_MODE;
1254 /* clear CPB fetch count */
1255 writew(0, mmio + NV_ADMA_CPB_COUNT);
1257 /* clear GO for register mode, enable interrupt */
1258 tmp = readw(mmio + NV_ADMA_CTL);
1259 writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1260 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1262 tmp = readw(mmio + NV_ADMA_CTL);
1263 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1264 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1265 udelay(1);
1266 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1267 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1269 return 0;
1272 static void nv_adma_port_stop(struct ata_port *ap)
1274 struct nv_adma_port_priv *pp = ap->private_data;
1275 void __iomem *mmio = pp->ctl_block;
1277 VPRINTK("ENTER\n");
1278 writew(0, mmio + NV_ADMA_CTL);
1281 #ifdef CONFIG_PM
1282 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
1284 struct nv_adma_port_priv *pp = ap->private_data;
1285 void __iomem *mmio = pp->ctl_block;
1287 /* Go to register mode - clears GO */
1288 nv_adma_register_mode(ap);
1290 /* clear CPB fetch count */
1291 writew(0, mmio + NV_ADMA_CPB_COUNT);
1293 /* disable interrupt, shut down port */
1294 writew(0, mmio + NV_ADMA_CTL);
1296 return 0;
1299 static int nv_adma_port_resume(struct ata_port *ap)
1301 struct nv_adma_port_priv *pp = ap->private_data;
1302 void __iomem *mmio = pp->ctl_block;
1303 u16 tmp;
1305 /* set CPB block location */
1306 writel(pp->cpb_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
1307 writel((pp->cpb_dma >> 16) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
1309 /* clear any outstanding interrupt conditions */
1310 writew(0xffff, mmio + NV_ADMA_STAT);
1312 /* initialize port variables */
1313 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
1315 /* clear CPB fetch count */
1316 writew(0, mmio + NV_ADMA_CPB_COUNT);
1318 /* clear GO for register mode, enable interrupt */
1319 tmp = readw(mmio + NV_ADMA_CTL);
1320 writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1321 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1323 tmp = readw(mmio + NV_ADMA_CTL);
1324 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1325 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1326 udelay(1);
1327 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1328 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1330 return 0;
1332 #endif
1334 static void nv_adma_setup_port(struct ata_port *ap)
1336 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1337 struct ata_ioports *ioport = &ap->ioaddr;
1339 VPRINTK("ENTER\n");
1341 mmio += NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE;
1343 ioport->cmd_addr = mmio;
1344 ioport->data_addr = mmio + (ATA_REG_DATA * 4);
1345 ioport->error_addr =
1346 ioport->feature_addr = mmio + (ATA_REG_ERR * 4);
1347 ioport->nsect_addr = mmio + (ATA_REG_NSECT * 4);
1348 ioport->lbal_addr = mmio + (ATA_REG_LBAL * 4);
1349 ioport->lbam_addr = mmio + (ATA_REG_LBAM * 4);
1350 ioport->lbah_addr = mmio + (ATA_REG_LBAH * 4);
1351 ioport->device_addr = mmio + (ATA_REG_DEVICE * 4);
1352 ioport->status_addr =
1353 ioport->command_addr = mmio + (ATA_REG_STATUS * 4);
1354 ioport->altstatus_addr =
1355 ioport->ctl_addr = mmio + 0x20;
1358 static int nv_adma_host_init(struct ata_host *host)
1360 struct pci_dev *pdev = to_pci_dev(host->dev);
1361 unsigned int i;
1362 u32 tmp32;
1364 VPRINTK("ENTER\n");
1366 /* enable ADMA on the ports */
1367 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1368 tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1369 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1370 NV_MCP_SATA_CFG_20_PORT1_EN |
1371 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1373 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1375 for (i = 0; i < host->n_ports; i++)
1376 nv_adma_setup_port(host->ports[i]);
1378 return 0;
1381 static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1382 struct scatterlist *sg,
1383 int idx,
1384 struct nv_adma_prd *aprd)
1386 u8 flags = 0;
1387 if (qc->tf.flags & ATA_TFLAG_WRITE)
1388 flags |= NV_APRD_WRITE;
1389 if (idx == qc->n_elem - 1)
1390 flags |= NV_APRD_END;
1391 else if (idx != 4)
1392 flags |= NV_APRD_CONT;
1394 aprd->addr = cpu_to_le64(((u64)sg_dma_address(sg)));
1395 aprd->len = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
1396 aprd->flags = flags;
1397 aprd->packet_len = 0;
1400 static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1402 struct nv_adma_port_priv *pp = qc->ap->private_data;
1403 struct nv_adma_prd *aprd;
1404 struct scatterlist *sg;
1405 unsigned int si;
1407 VPRINTK("ENTER\n");
1409 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1410 aprd = (si < 5) ? &cpb->aprd[si] :
1411 &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (si-5)];
1412 nv_adma_fill_aprd(qc, sg, si, aprd);
1414 if (si > 5)
1415 cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));
1416 else
1417 cpb->next_aprd = cpu_to_le64(0);
1420 static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
1422 struct nv_adma_port_priv *pp = qc->ap->private_data;
1424 /* ADMA engine can only be used for non-ATAPI DMA commands,
1425 or interrupt-driven no-data commands. */
1426 if ((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
1427 (qc->tf.flags & ATA_TFLAG_POLLING))
1428 return 1;
1430 if ((qc->flags & ATA_QCFLAG_DMAMAP) ||
1431 (qc->tf.protocol == ATA_PROT_NODATA))
1432 return 0;
1434 return 1;
1437 static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1439 struct nv_adma_port_priv *pp = qc->ap->private_data;
1440 struct nv_adma_cpb *cpb = &pp->cpb[qc->tag];
1441 u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
1442 NV_CPB_CTL_IEN;
1444 if (nv_adma_use_reg_mode(qc)) {
1445 BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1446 (qc->flags & ATA_QCFLAG_DMAMAP));
1447 nv_adma_register_mode(qc->ap);
1448 ata_qc_prep(qc);
1449 return;
1452 cpb->resp_flags = NV_CPB_RESP_DONE;
1453 wmb();
1454 cpb->ctl_flags = 0;
1455 wmb();
1457 cpb->len = 3;
1458 cpb->tag = qc->tag;
1459 cpb->next_cpb_idx = 0;
1461 /* turn on NCQ flags for NCQ commands */
1462 if (qc->tf.protocol == ATA_PROT_NCQ)
1463 ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1465 VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1467 nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1469 if (qc->flags & ATA_QCFLAG_DMAMAP) {
1470 nv_adma_fill_sg(qc, cpb);
1471 ctl_flags |= NV_CPB_CTL_APRD_VALID;
1472 } else
1473 memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
1475 /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID
1476 until we are finished filling in all of the contents */
1477 wmb();
1478 cpb->ctl_flags = ctl_flags;
1479 wmb();
1480 cpb->resp_flags = 0;
1483 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1485 struct nv_adma_port_priv *pp = qc->ap->private_data;
1486 void __iomem *mmio = pp->ctl_block;
1487 int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ);
1489 VPRINTK("ENTER\n");
1491 /* We can't handle result taskfile with NCQ commands, since
1492 retrieving the taskfile switches us out of ADMA mode and would abort
1493 existing commands. */
1494 if (unlikely(qc->tf.protocol == ATA_PROT_NCQ &&
1495 (qc->flags & ATA_QCFLAG_RESULT_TF))) {
1496 ata_dev_printk(qc->dev, KERN_ERR,
1497 "NCQ w/ RESULT_TF not allowed\n");
1498 return AC_ERR_SYSTEM;
1501 if (nv_adma_use_reg_mode(qc)) {
1502 /* use ATA register mode */
1503 VPRINTK("using ATA register mode: 0x%lx\n", qc->flags);
1504 BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1505 (qc->flags & ATA_QCFLAG_DMAMAP));
1506 nv_adma_register_mode(qc->ap);
1507 return ata_qc_issue_prot(qc);
1508 } else
1509 nv_adma_mode(qc->ap);
1511 /* write append register, command tag in lower 8 bits
1512 and (number of cpbs to append -1) in top 8 bits */
1513 wmb();
1515 if (curr_ncq != pp->last_issue_ncq) {
1516 /* Seems to need some delay before switching between NCQ and
1517 non-NCQ commands, else we get command timeouts and such. */
1518 udelay(20);
1519 pp->last_issue_ncq = curr_ncq;
1522 writew(qc->tag, mmio + NV_ADMA_APPEND);
1524 DPRINTK("Issued tag %u\n", qc->tag);
1526 return 0;
1529 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1531 struct ata_host *host = dev_instance;
1532 unsigned int i;
1533 unsigned int handled = 0;
1534 unsigned long flags;
1536 spin_lock_irqsave(&host->lock, flags);
1538 for (i = 0; i < host->n_ports; i++) {
1539 struct ata_port *ap;
1541 ap = host->ports[i];
1542 if (ap &&
1543 !(ap->flags & ATA_FLAG_DISABLED)) {
1544 struct ata_queued_cmd *qc;
1546 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1547 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
1548 handled += ata_host_intr(ap, qc);
1549 else
1550 // No request pending? Clear interrupt status
1551 // anyway, in case there's one pending.
1552 ap->ops->check_status(ap);
1557 spin_unlock_irqrestore(&host->lock, flags);
1559 return IRQ_RETVAL(handled);
1562 static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
1564 int i, handled = 0;
1566 for (i = 0; i < host->n_ports; i++) {
1567 struct ata_port *ap = host->ports[i];
1569 if (ap && !(ap->flags & ATA_FLAG_DISABLED))
1570 handled += nv_host_intr(ap, irq_stat);
1572 irq_stat >>= NV_INT_PORT_SHIFT;
1575 return IRQ_RETVAL(handled);
1578 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
1580 struct ata_host *host = dev_instance;
1581 u8 irq_stat;
1582 irqreturn_t ret;
1584 spin_lock(&host->lock);
1585 irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
1586 ret = nv_do_interrupt(host, irq_stat);
1587 spin_unlock(&host->lock);
1589 return ret;
1592 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
1594 struct ata_host *host = dev_instance;
1595 u8 irq_stat;
1596 irqreturn_t ret;
1598 spin_lock(&host->lock);
1599 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1600 ret = nv_do_interrupt(host, irq_stat);
1601 spin_unlock(&host->lock);
1603 return ret;
1606 static int nv_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val)
1608 if (sc_reg > SCR_CONTROL)
1609 return -EINVAL;
1611 *val = ioread32(ap->ioaddr.scr_addr + (sc_reg * 4));
1612 return 0;
1615 static int nv_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val)
1617 if (sc_reg > SCR_CONTROL)
1618 return -EINVAL;
1620 iowrite32(val, ap->ioaddr.scr_addr + (sc_reg * 4));
1621 return 0;
1624 static void nv_nf2_freeze(struct ata_port *ap)
1626 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1627 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1628 u8 mask;
1630 mask = ioread8(scr_addr + NV_INT_ENABLE);
1631 mask &= ~(NV_INT_ALL << shift);
1632 iowrite8(mask, scr_addr + NV_INT_ENABLE);
1635 static void nv_nf2_thaw(struct ata_port *ap)
1637 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1638 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1639 u8 mask;
1641 iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
1643 mask = ioread8(scr_addr + NV_INT_ENABLE);
1644 mask |= (NV_INT_MASK << shift);
1645 iowrite8(mask, scr_addr + NV_INT_ENABLE);
1648 static void nv_ck804_freeze(struct ata_port *ap)
1650 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1651 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1652 u8 mask;
1654 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1655 mask &= ~(NV_INT_ALL << shift);
1656 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1659 static void nv_ck804_thaw(struct ata_port *ap)
1661 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1662 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1663 u8 mask;
1665 writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1667 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1668 mask |= (NV_INT_MASK << shift);
1669 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1672 static void nv_mcp55_freeze(struct ata_port *ap)
1674 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1675 int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1676 u32 mask;
1678 writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1680 mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1681 mask &= ~(NV_INT_ALL_MCP55 << shift);
1682 writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1683 ata_bmdma_freeze(ap);
1686 static void nv_mcp55_thaw(struct ata_port *ap)
1688 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1689 int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1690 u32 mask;
1692 writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1694 mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1695 mask |= (NV_INT_MASK_MCP55 << shift);
1696 writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1697 ata_bmdma_thaw(ap);
1700 static int nv_hardreset(struct ata_link *link, unsigned int *class,
1701 unsigned long deadline)
1703 unsigned int dummy;
1705 /* SATA hardreset fails to retrieve proper device signature on
1706 * some controllers. Don't classify on hardreset. For more
1707 * info, see http://bugzilla.kernel.org/show_bug.cgi?id=3352
1709 return sata_std_hardreset(link, &dummy, deadline);
1712 static void nv_error_handler(struct ata_port *ap)
1714 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1715 nv_hardreset, ata_std_postreset);
1718 static void nv_adma_error_handler(struct ata_port *ap)
1720 struct nv_adma_port_priv *pp = ap->private_data;
1721 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
1722 void __iomem *mmio = pp->ctl_block;
1723 int i;
1724 u16 tmp;
1726 if (ata_tag_valid(ap->link.active_tag) || ap->link.sactive) {
1727 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1728 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1729 u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
1730 u32 status = readw(mmio + NV_ADMA_STAT);
1731 u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
1732 u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
1734 ata_port_printk(ap, KERN_ERR,
1735 "EH in ADMA mode, notifier 0x%X "
1736 "notifier_error 0x%X gen_ctl 0x%X status 0x%X "
1737 "next cpb count 0x%X next cpb idx 0x%x\n",
1738 notifier, notifier_error, gen_ctl, status,
1739 cpb_count, next_cpb_idx);
1741 for (i = 0; i < NV_ADMA_MAX_CPBS; i++) {
1742 struct nv_adma_cpb *cpb = &pp->cpb[i];
1743 if ((ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) ||
1744 ap->link.sactive & (1 << i))
1745 ata_port_printk(ap, KERN_ERR,
1746 "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1747 i, cpb->ctl_flags, cpb->resp_flags);
1751 /* Push us back into port register mode for error handling. */
1752 nv_adma_register_mode(ap);
1754 /* Mark all of the CPBs as invalid to prevent them from
1755 being executed */
1756 for (i = 0; i < NV_ADMA_MAX_CPBS; i++)
1757 pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1759 /* clear CPB fetch count */
1760 writew(0, mmio + NV_ADMA_CPB_COUNT);
1762 /* Reset channel */
1763 tmp = readw(mmio + NV_ADMA_CTL);
1764 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1765 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1766 udelay(1);
1767 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1768 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1771 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1772 nv_hardreset, ata_std_postreset);
1775 static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc)
1777 struct nv_swncq_port_priv *pp = ap->private_data;
1778 struct defer_queue *dq = &pp->defer_queue;
1780 /* queue is full */
1781 WARN_ON(dq->tail - dq->head == ATA_MAX_QUEUE);
1782 dq->defer_bits |= (1 << qc->tag);
1783 dq->tag[dq->tail++ & (ATA_MAX_QUEUE - 1)] = qc->tag;
1786 static struct ata_queued_cmd *nv_swncq_qc_from_dq(struct ata_port *ap)
1788 struct nv_swncq_port_priv *pp = ap->private_data;
1789 struct defer_queue *dq = &pp->defer_queue;
1790 unsigned int tag;
1792 if (dq->head == dq->tail) /* null queue */
1793 return NULL;
1795 tag = dq->tag[dq->head & (ATA_MAX_QUEUE - 1)];
1796 dq->tag[dq->head++ & (ATA_MAX_QUEUE - 1)] = ATA_TAG_POISON;
1797 WARN_ON(!(dq->defer_bits & (1 << tag)));
1798 dq->defer_bits &= ~(1 << tag);
1800 return ata_qc_from_tag(ap, tag);
1803 static void nv_swncq_fis_reinit(struct ata_port *ap)
1805 struct nv_swncq_port_priv *pp = ap->private_data;
1807 pp->dhfis_bits = 0;
1808 pp->dmafis_bits = 0;
1809 pp->sdbfis_bits = 0;
1810 pp->ncq_flags = 0;
1813 static void nv_swncq_pp_reinit(struct ata_port *ap)
1815 struct nv_swncq_port_priv *pp = ap->private_data;
1816 struct defer_queue *dq = &pp->defer_queue;
1818 dq->head = 0;
1819 dq->tail = 0;
1820 dq->defer_bits = 0;
1821 pp->qc_active = 0;
1822 pp->last_issue_tag = ATA_TAG_POISON;
1823 nv_swncq_fis_reinit(ap);
1826 static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis)
1828 struct nv_swncq_port_priv *pp = ap->private_data;
1830 writew(fis, pp->irq_block);
1833 static void __ata_bmdma_stop(struct ata_port *ap)
1835 struct ata_queued_cmd qc;
1837 qc.ap = ap;
1838 ata_bmdma_stop(&qc);
1841 static void nv_swncq_ncq_stop(struct ata_port *ap)
1843 struct nv_swncq_port_priv *pp = ap->private_data;
1844 unsigned int i;
1845 u32 sactive;
1846 u32 done_mask;
1848 ata_port_printk(ap, KERN_ERR,
1849 "EH in SWNCQ mode,QC:qc_active 0x%X sactive 0x%X\n",
1850 ap->qc_active, ap->link.sactive);
1851 ata_port_printk(ap, KERN_ERR,
1852 "SWNCQ:qc_active 0x%X defer_bits 0x%X last_issue_tag 0x%x\n "
1853 "dhfis 0x%X dmafis 0x%X sdbfis 0x%X\n",
1854 pp->qc_active, pp->defer_queue.defer_bits, pp->last_issue_tag,
1855 pp->dhfis_bits, pp->dmafis_bits, pp->sdbfis_bits);
1857 ata_port_printk(ap, KERN_ERR, "ATA_REG 0x%X ERR_REG 0x%X\n",
1858 ap->ops->check_status(ap),
1859 ioread8(ap->ioaddr.error_addr));
1861 sactive = readl(pp->sactive_block);
1862 done_mask = pp->qc_active ^ sactive;
1864 ata_port_printk(ap, KERN_ERR, "tag : dhfis dmafis sdbfis sacitve\n");
1865 for (i = 0; i < ATA_MAX_QUEUE; i++) {
1866 u8 err = 0;
1867 if (pp->qc_active & (1 << i))
1868 err = 0;
1869 else if (done_mask & (1 << i))
1870 err = 1;
1871 else
1872 continue;
1874 ata_port_printk(ap, KERN_ERR,
1875 "tag 0x%x: %01x %01x %01x %01x %s\n", i,
1876 (pp->dhfis_bits >> i) & 0x1,
1877 (pp->dmafis_bits >> i) & 0x1,
1878 (pp->sdbfis_bits >> i) & 0x1,
1879 (sactive >> i) & 0x1,
1880 (err ? "error! tag doesn't exit" : " "));
1883 nv_swncq_pp_reinit(ap);
1884 ap->ops->irq_clear(ap);
1885 __ata_bmdma_stop(ap);
1886 nv_swncq_irq_clear(ap, 0xffff);
1889 static void nv_swncq_error_handler(struct ata_port *ap)
1891 struct ata_eh_context *ehc = &ap->link.eh_context;
1893 if (ap->link.sactive) {
1894 nv_swncq_ncq_stop(ap);
1895 ehc->i.action |= ATA_EH_HARDRESET;
1898 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1899 nv_hardreset, ata_std_postreset);
1902 #ifdef CONFIG_PM
1903 static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg)
1905 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1906 u32 tmp;
1908 /* clear irq */
1909 writel(~0, mmio + NV_INT_STATUS_MCP55);
1911 /* disable irq */
1912 writel(0, mmio + NV_INT_ENABLE_MCP55);
1914 /* disable swncq */
1915 tmp = readl(mmio + NV_CTL_MCP55);
1916 tmp &= ~(NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ);
1917 writel(tmp, mmio + NV_CTL_MCP55);
1919 return 0;
1922 static int nv_swncq_port_resume(struct ata_port *ap)
1924 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1925 u32 tmp;
1927 /* clear irq */
1928 writel(~0, mmio + NV_INT_STATUS_MCP55);
1930 /* enable irq */
1931 writel(0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1933 /* enable swncq */
1934 tmp = readl(mmio + NV_CTL_MCP55);
1935 writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1937 return 0;
1939 #endif
1941 static void nv_swncq_host_init(struct ata_host *host)
1943 u32 tmp;
1944 void __iomem *mmio = host->iomap[NV_MMIO_BAR];
1945 struct pci_dev *pdev = to_pci_dev(host->dev);
1946 u8 regval;
1948 /* disable ECO 398 */
1949 pci_read_config_byte(pdev, 0x7f, &regval);
1950 regval &= ~(1 << 7);
1951 pci_write_config_byte(pdev, 0x7f, regval);
1953 /* enable swncq */
1954 tmp = readl(mmio + NV_CTL_MCP55);
1955 VPRINTK("HOST_CTL:0x%X\n", tmp);
1956 writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1958 /* enable irq intr */
1959 tmp = readl(mmio + NV_INT_ENABLE_MCP55);
1960 VPRINTK("HOST_ENABLE:0x%X\n", tmp);
1961 writel(tmp | 0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1963 /* clear port irq */
1964 writel(~0x0, mmio + NV_INT_STATUS_MCP55);
1967 static int nv_swncq_slave_config(struct scsi_device *sdev)
1969 struct ata_port *ap = ata_shost_to_port(sdev->host);
1970 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
1971 struct ata_device *dev;
1972 int rc;
1973 u8 rev;
1974 u8 check_maxtor = 0;
1975 unsigned char model_num[ATA_ID_PROD_LEN + 1];
1977 rc = ata_scsi_slave_config(sdev);
1978 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
1979 /* Not a proper libata device, ignore */
1980 return rc;
1982 dev = &ap->link.device[sdev->id];
1983 if (!(ap->flags & ATA_FLAG_NCQ) || dev->class == ATA_DEV_ATAPI)
1984 return rc;
1986 /* if MCP51 and Maxtor, then disable ncq */
1987 if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA ||
1988 pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2)
1989 check_maxtor = 1;
1991 /* if MCP55 and rev <= a2 and Maxtor, then disable ncq */
1992 if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA ||
1993 pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2) {
1994 pci_read_config_byte(pdev, 0x8, &rev);
1995 if (rev <= 0xa2)
1996 check_maxtor = 1;
1999 if (!check_maxtor)
2000 return rc;
2002 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
2004 if (strncmp(model_num, "Maxtor", 6) == 0) {
2005 ata_scsi_change_queue_depth(sdev, 1);
2006 ata_dev_printk(dev, KERN_NOTICE,
2007 "Disabling SWNCQ mode (depth %x)\n", sdev->queue_depth);
2010 return rc;
2013 static int nv_swncq_port_start(struct ata_port *ap)
2015 struct device *dev = ap->host->dev;
2016 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
2017 struct nv_swncq_port_priv *pp;
2018 int rc;
2020 rc = ata_port_start(ap);
2021 if (rc)
2022 return rc;
2024 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
2025 if (!pp)
2026 return -ENOMEM;
2028 pp->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE,
2029 &pp->prd_dma, GFP_KERNEL);
2030 if (!pp->prd)
2031 return -ENOMEM;
2032 memset(pp->prd, 0, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE);
2034 ap->private_data = pp;
2035 pp->sactive_block = ap->ioaddr.scr_addr + 4 * SCR_ACTIVE;
2036 pp->irq_block = mmio + NV_INT_STATUS_MCP55 + ap->port_no * 2;
2037 pp->tag_block = mmio + NV_NCQ_REG_MCP55 + ap->port_no * 2;
2039 return 0;
2042 static void nv_swncq_qc_prep(struct ata_queued_cmd *qc)
2044 if (qc->tf.protocol != ATA_PROT_NCQ) {
2045 ata_qc_prep(qc);
2046 return;
2049 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2050 return;
2052 nv_swncq_fill_sg(qc);
2055 static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
2057 struct ata_port *ap = qc->ap;
2058 struct scatterlist *sg;
2059 struct nv_swncq_port_priv *pp = ap->private_data;
2060 struct ata_prd *prd;
2061 unsigned int si, idx;
2063 prd = pp->prd + ATA_MAX_PRD * qc->tag;
2065 idx = 0;
2066 for_each_sg(qc->sg, sg, qc->n_elem, si) {
2067 u32 addr, offset;
2068 u32 sg_len, len;
2070 addr = (u32)sg_dma_address(sg);
2071 sg_len = sg_dma_len(sg);
2073 while (sg_len) {
2074 offset = addr & 0xffff;
2075 len = sg_len;
2076 if ((offset + sg_len) > 0x10000)
2077 len = 0x10000 - offset;
2079 prd[idx].addr = cpu_to_le32(addr);
2080 prd[idx].flags_len = cpu_to_le32(len & 0xffff);
2082 idx++;
2083 sg_len -= len;
2084 addr += len;
2088 prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2091 static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap,
2092 struct ata_queued_cmd *qc)
2094 struct nv_swncq_port_priv *pp = ap->private_data;
2096 if (qc == NULL)
2097 return 0;
2099 DPRINTK("Enter\n");
2101 writel((1 << qc->tag), pp->sactive_block);
2102 pp->last_issue_tag = qc->tag;
2103 pp->dhfis_bits &= ~(1 << qc->tag);
2104 pp->dmafis_bits &= ~(1 << qc->tag);
2105 pp->qc_active |= (0x1 << qc->tag);
2107 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
2108 ap->ops->exec_command(ap, &qc->tf);
2110 DPRINTK("Issued tag %u\n", qc->tag);
2112 return 0;
2115 static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc)
2117 struct ata_port *ap = qc->ap;
2118 struct nv_swncq_port_priv *pp = ap->private_data;
2120 if (qc->tf.protocol != ATA_PROT_NCQ)
2121 return ata_qc_issue_prot(qc);
2123 DPRINTK("Enter\n");
2125 if (!pp->qc_active)
2126 nv_swncq_issue_atacmd(ap, qc);
2127 else
2128 nv_swncq_qc_to_dq(ap, qc); /* add qc to defer queue */
2130 return 0;
2133 static void nv_swncq_hotplug(struct ata_port *ap, u32 fis)
2135 u32 serror;
2136 struct ata_eh_info *ehi = &ap->link.eh_info;
2138 ata_ehi_clear_desc(ehi);
2140 /* AHCI needs SError cleared; otherwise, it might lock up */
2141 sata_scr_read(&ap->link, SCR_ERROR, &serror);
2142 sata_scr_write(&ap->link, SCR_ERROR, serror);
2144 /* analyze @irq_stat */
2145 if (fis & NV_SWNCQ_IRQ_ADDED)
2146 ata_ehi_push_desc(ehi, "hot plug");
2147 else if (fis & NV_SWNCQ_IRQ_REMOVED)
2148 ata_ehi_push_desc(ehi, "hot unplug");
2150 ata_ehi_hotplugged(ehi);
2152 /* okay, let's hand over to EH */
2153 ehi->serror |= serror;
2155 ata_port_freeze(ap);
2158 static int nv_swncq_sdbfis(struct ata_port *ap)
2160 struct ata_queued_cmd *qc;
2161 struct nv_swncq_port_priv *pp = ap->private_data;
2162 struct ata_eh_info *ehi = &ap->link.eh_info;
2163 u32 sactive;
2164 int nr_done = 0;
2165 u32 done_mask;
2166 int i;
2167 u8 host_stat;
2168 u8 lack_dhfis = 0;
2170 host_stat = ap->ops->bmdma_status(ap);
2171 if (unlikely(host_stat & ATA_DMA_ERR)) {
2172 /* error when transfering data to/from memory */
2173 ata_ehi_clear_desc(ehi);
2174 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
2175 ehi->err_mask |= AC_ERR_HOST_BUS;
2176 ehi->action |= ATA_EH_SOFTRESET;
2177 return -EINVAL;
2180 ap->ops->irq_clear(ap);
2181 __ata_bmdma_stop(ap);
2183 sactive = readl(pp->sactive_block);
2184 done_mask = pp->qc_active ^ sactive;
2186 if (unlikely(done_mask & sactive)) {
2187 ata_ehi_clear_desc(ehi);
2188 ata_ehi_push_desc(ehi, "illegal SWNCQ:qc_active transition"
2189 "(%08x->%08x)", pp->qc_active, sactive);
2190 ehi->err_mask |= AC_ERR_HSM;
2191 ehi->action |= ATA_EH_HARDRESET;
2192 return -EINVAL;
2194 for (i = 0; i < ATA_MAX_QUEUE; i++) {
2195 if (!(done_mask & (1 << i)))
2196 continue;
2198 qc = ata_qc_from_tag(ap, i);
2199 if (qc) {
2200 ata_qc_complete(qc);
2201 pp->qc_active &= ~(1 << i);
2202 pp->dhfis_bits &= ~(1 << i);
2203 pp->dmafis_bits &= ~(1 << i);
2204 pp->sdbfis_bits |= (1 << i);
2205 nr_done++;
2209 if (!ap->qc_active) {
2210 DPRINTK("over\n");
2211 nv_swncq_pp_reinit(ap);
2212 return nr_done;
2215 if (pp->qc_active & pp->dhfis_bits)
2216 return nr_done;
2218 if ((pp->ncq_flags & ncq_saw_backout) ||
2219 (pp->qc_active ^ pp->dhfis_bits))
2220 /* if the controller cann't get a device to host register FIS,
2221 * The driver needs to reissue the new command.
2223 lack_dhfis = 1;
2225 DPRINTK("id 0x%x QC: qc_active 0x%x,"
2226 "SWNCQ:qc_active 0x%X defer_bits %X "
2227 "dhfis 0x%X dmafis 0x%X last_issue_tag %x\n",
2228 ap->print_id, ap->qc_active, pp->qc_active,
2229 pp->defer_queue.defer_bits, pp->dhfis_bits,
2230 pp->dmafis_bits, pp->last_issue_tag);
2232 nv_swncq_fis_reinit(ap);
2234 if (lack_dhfis) {
2235 qc = ata_qc_from_tag(ap, pp->last_issue_tag);
2236 nv_swncq_issue_atacmd(ap, qc);
2237 return nr_done;
2240 if (pp->defer_queue.defer_bits) {
2241 /* send deferral queue command */
2242 qc = nv_swncq_qc_from_dq(ap);
2243 WARN_ON(qc == NULL);
2244 nv_swncq_issue_atacmd(ap, qc);
2247 return nr_done;
2250 static inline u32 nv_swncq_tag(struct ata_port *ap)
2252 struct nv_swncq_port_priv *pp = ap->private_data;
2253 u32 tag;
2255 tag = readb(pp->tag_block) >> 2;
2256 return (tag & 0x1f);
2259 static int nv_swncq_dmafis(struct ata_port *ap)
2261 struct ata_queued_cmd *qc;
2262 unsigned int rw;
2263 u8 dmactl;
2264 u32 tag;
2265 struct nv_swncq_port_priv *pp = ap->private_data;
2267 __ata_bmdma_stop(ap);
2268 tag = nv_swncq_tag(ap);
2270 DPRINTK("dma setup tag 0x%x\n", tag);
2271 qc = ata_qc_from_tag(ap, tag);
2273 if (unlikely(!qc))
2274 return 0;
2276 rw = qc->tf.flags & ATA_TFLAG_WRITE;
2278 /* load PRD table addr. */
2279 iowrite32(pp->prd_dma + ATA_PRD_TBL_SZ * qc->tag,
2280 ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
2282 /* specify data direction, triple-check start bit is clear */
2283 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2284 dmactl &= ~ATA_DMA_WR;
2285 if (!rw)
2286 dmactl |= ATA_DMA_WR;
2288 iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2290 return 1;
2293 static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis)
2295 struct nv_swncq_port_priv *pp = ap->private_data;
2296 struct ata_queued_cmd *qc;
2297 struct ata_eh_info *ehi = &ap->link.eh_info;
2298 u32 serror;
2299 u8 ata_stat;
2300 int rc = 0;
2302 ata_stat = ap->ops->check_status(ap);
2303 nv_swncq_irq_clear(ap, fis);
2304 if (!fis)
2305 return;
2307 if (ap->pflags & ATA_PFLAG_FROZEN)
2308 return;
2310 if (fis & NV_SWNCQ_IRQ_HOTPLUG) {
2311 nv_swncq_hotplug(ap, fis);
2312 return;
2315 if (!pp->qc_active)
2316 return;
2318 if (ap->ops->scr_read(ap, SCR_ERROR, &serror))
2319 return;
2320 ap->ops->scr_write(ap, SCR_ERROR, serror);
2322 if (ata_stat & ATA_ERR) {
2323 ata_ehi_clear_desc(ehi);
2324 ata_ehi_push_desc(ehi, "Ata error. fis:0x%X", fis);
2325 ehi->err_mask |= AC_ERR_DEV;
2326 ehi->serror |= serror;
2327 ehi->action |= ATA_EH_SOFTRESET;
2328 ata_port_freeze(ap);
2329 return;
2332 if (fis & NV_SWNCQ_IRQ_BACKOUT) {
2333 /* If the IRQ is backout, driver must issue
2334 * the new command again some time later.
2336 pp->ncq_flags |= ncq_saw_backout;
2339 if (fis & NV_SWNCQ_IRQ_SDBFIS) {
2340 pp->ncq_flags |= ncq_saw_sdb;
2341 DPRINTK("id 0x%x SWNCQ: qc_active 0x%X "
2342 "dhfis 0x%X dmafis 0x%X sactive 0x%X\n",
2343 ap->print_id, pp->qc_active, pp->dhfis_bits,
2344 pp->dmafis_bits, readl(pp->sactive_block));
2345 rc = nv_swncq_sdbfis(ap);
2346 if (rc < 0)
2347 goto irq_error;
2350 if (fis & NV_SWNCQ_IRQ_DHREGFIS) {
2351 /* The interrupt indicates the new command
2352 * was transmitted correctly to the drive.
2354 pp->dhfis_bits |= (0x1 << pp->last_issue_tag);
2355 pp->ncq_flags |= ncq_saw_d2h;
2356 if (pp->ncq_flags & (ncq_saw_sdb | ncq_saw_backout)) {
2357 ata_ehi_push_desc(ehi, "illegal fis transaction");
2358 ehi->err_mask |= AC_ERR_HSM;
2359 ehi->action |= ATA_EH_HARDRESET;
2360 goto irq_error;
2363 if (!(fis & NV_SWNCQ_IRQ_DMASETUP) &&
2364 !(pp->ncq_flags & ncq_saw_dmas)) {
2365 ata_stat = ap->ops->check_status(ap);
2366 if (ata_stat & ATA_BUSY)
2367 goto irq_exit;
2369 if (pp->defer_queue.defer_bits) {
2370 DPRINTK("send next command\n");
2371 qc = nv_swncq_qc_from_dq(ap);
2372 nv_swncq_issue_atacmd(ap, qc);
2377 if (fis & NV_SWNCQ_IRQ_DMASETUP) {
2378 /* program the dma controller with appropriate PRD buffers
2379 * and start the DMA transfer for requested command.
2381 pp->dmafis_bits |= (0x1 << nv_swncq_tag(ap));
2382 pp->ncq_flags |= ncq_saw_dmas;
2383 rc = nv_swncq_dmafis(ap);
2386 irq_exit:
2387 return;
2388 irq_error:
2389 ata_ehi_push_desc(ehi, "fis:0x%x", fis);
2390 ata_port_freeze(ap);
2391 return;
2394 static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance)
2396 struct ata_host *host = dev_instance;
2397 unsigned int i;
2398 unsigned int handled = 0;
2399 unsigned long flags;
2400 u32 irq_stat;
2402 spin_lock_irqsave(&host->lock, flags);
2404 irq_stat = readl(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_MCP55);
2406 for (i = 0; i < host->n_ports; i++) {
2407 struct ata_port *ap = host->ports[i];
2409 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
2410 if (ap->link.sactive) {
2411 nv_swncq_host_interrupt(ap, (u16)irq_stat);
2412 handled = 1;
2413 } else {
2414 if (irq_stat) /* reserve Hotplug */
2415 nv_swncq_irq_clear(ap, 0xfff0);
2417 handled += nv_host_intr(ap, (u8)irq_stat);
2420 irq_stat >>= NV_INT_PORT_SHIFT_MCP55;
2423 spin_unlock_irqrestore(&host->lock, flags);
2425 return IRQ_RETVAL(handled);
2428 static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2430 static int printed_version;
2431 const struct ata_port_info *ppi[] = { NULL, NULL };
2432 struct ata_host *host;
2433 struct nv_host_priv *hpriv;
2434 int rc;
2435 u32 bar;
2436 void __iomem *base;
2437 unsigned long type = ent->driver_data;
2439 // Make sure this is a SATA controller by counting the number of bars
2440 // (NVIDIA SATA controllers will always have six bars). Otherwise,
2441 // it's an IDE controller and we ignore it.
2442 for (bar = 0; bar < 6; bar++)
2443 if (pci_resource_start(pdev, bar) == 0)
2444 return -ENODEV;
2446 if (!printed_version++)
2447 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
2449 rc = pcim_enable_device(pdev);
2450 if (rc)
2451 return rc;
2453 /* determine type and allocate host */
2454 if (type == CK804 && adma_enabled) {
2455 dev_printk(KERN_NOTICE, &pdev->dev, "Using ADMA mode\n");
2456 type = ADMA;
2459 if (type == SWNCQ) {
2460 if (swncq_enabled)
2461 dev_printk(KERN_NOTICE, &pdev->dev,
2462 "Using SWNCQ mode\n");
2463 else
2464 type = GENERIC;
2467 ppi[0] = &nv_port_info[type];
2468 rc = ata_pci_prepare_sff_host(pdev, ppi, &host);
2469 if (rc)
2470 return rc;
2472 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2473 if (!hpriv)
2474 return -ENOMEM;
2475 hpriv->type = type;
2476 host->private_data = hpriv;
2478 /* request and iomap NV_MMIO_BAR */
2479 rc = pcim_iomap_regions(pdev, 1 << NV_MMIO_BAR, DRV_NAME);
2480 if (rc)
2481 return rc;
2483 /* configure SCR access */
2484 base = host->iomap[NV_MMIO_BAR];
2485 host->ports[0]->ioaddr.scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
2486 host->ports[1]->ioaddr.scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
2488 /* enable SATA space for CK804 */
2489 if (type >= CK804) {
2490 u8 regval;
2492 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2493 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2494 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2497 /* init ADMA */
2498 if (type == ADMA) {
2499 rc = nv_adma_host_init(host);
2500 if (rc)
2501 return rc;
2502 } else if (type == SWNCQ)
2503 nv_swncq_host_init(host);
2505 pci_set_master(pdev);
2506 return ata_host_activate(host, pdev->irq, ppi[0]->irq_handler,
2507 IRQF_SHARED, ppi[0]->sht);
2510 #ifdef CONFIG_PM
2511 static int nv_pci_device_resume(struct pci_dev *pdev)
2513 struct ata_host *host = dev_get_drvdata(&pdev->dev);
2514 struct nv_host_priv *hpriv = host->private_data;
2515 int rc;
2517 rc = ata_pci_device_do_resume(pdev);
2518 if (rc)
2519 return rc;
2521 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2522 if (hpriv->type >= CK804) {
2523 u8 regval;
2525 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2526 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2527 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2529 if (hpriv->type == ADMA) {
2530 u32 tmp32;
2531 struct nv_adma_port_priv *pp;
2532 /* enable/disable ADMA on the ports appropriately */
2533 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2535 pp = host->ports[0]->private_data;
2536 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2537 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2538 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2539 else
2540 tmp32 |= (NV_MCP_SATA_CFG_20_PORT0_EN |
2541 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2542 pp = host->ports[1]->private_data;
2543 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2544 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
2545 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2546 else
2547 tmp32 |= (NV_MCP_SATA_CFG_20_PORT1_EN |
2548 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2550 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2554 ata_host_resume(host);
2556 return 0;
2558 #endif
2560 static void nv_ck804_host_stop(struct ata_host *host)
2562 struct pci_dev *pdev = to_pci_dev(host->dev);
2563 u8 regval;
2565 /* disable SATA space for CK804 */
2566 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2567 regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2568 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2571 static void nv_adma_host_stop(struct ata_host *host)
2573 struct pci_dev *pdev = to_pci_dev(host->dev);
2574 u32 tmp32;
2576 /* disable ADMA on the ports */
2577 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2578 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2579 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
2580 NV_MCP_SATA_CFG_20_PORT1_EN |
2581 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2583 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2585 nv_ck804_host_stop(host);
2588 static int __init nv_init(void)
2590 return pci_register_driver(&nv_pci_driver);
2593 static void __exit nv_exit(void)
2595 pci_unregister_driver(&nv_pci_driver);
2598 module_init(nv_init);
2599 module_exit(nv_exit);
2600 module_param_named(adma, adma_enabled, bool, 0444);
2601 MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: true)");
2602 module_param_named(swncq, swncq_enabled, bool, 0444);
2603 MODULE_PARM_DESC(swncq, "Enable use of SWNCQ (Default: false)");