2 * sata_nv.c - NVIDIA nForce SATA
4 * Copyright 2004 NVIDIA Corp. All rights reserved.
5 * Copyright 2004 Andrew Chew
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 * libata documentation is available via 'make {ps|pdf}docs',
24 * as Documentation/DocBook/libata.*
26 * No hardware documentation available outside of NVIDIA.
27 * This driver programs the NVIDIA SATA controller in a similar
28 * fashion as with other PCI IDE BMDMA controllers, with a few
29 * NV-specific details such as register offsets, SATA phy location,
32 * CK804/MCP04 controllers support an alternate programming interface
33 * similar to the ADMA specification (with some modifications).
34 * This allows the use of NCQ. Non-DMA-mapped ATA commands are still
35 * sent through the legacy interface.
39 #include <linux/kernel.h>
40 #include <linux/module.h>
41 #include <linux/pci.h>
42 #include <linux/init.h>
43 #include <linux/blkdev.h>
44 #include <linux/delay.h>
45 #include <linux/interrupt.h>
46 #include <linux/device.h>
47 #include <scsi/scsi_host.h>
48 #include <scsi/scsi_device.h>
49 #include <linux/libata.h>
51 #define DRV_NAME "sata_nv"
52 #define DRV_VERSION "3.5"
54 #define NV_ADMA_DMA_BOUNDARY 0xffffffffUL
63 NV_PORT0_SCR_REG_OFFSET
= 0x00,
64 NV_PORT1_SCR_REG_OFFSET
= 0x40,
66 /* INT_STATUS/ENABLE */
69 NV_INT_STATUS_CK804
= 0x440,
70 NV_INT_ENABLE_CK804
= 0x441,
72 /* INT_STATUS/ENABLE bits */
76 NV_INT_REMOVED
= 0x08,
78 NV_INT_PORT_SHIFT
= 4, /* each port occupies 4 bits */
81 NV_INT_MASK
= NV_INT_DEV
|
82 NV_INT_ADDED
| NV_INT_REMOVED
,
86 NV_INT_CONFIG_METHD
= 0x01, // 0 = INT, 1 = SMI
88 // For PCI config register 20
89 NV_MCP_SATA_CFG_20
= 0x50,
90 NV_MCP_SATA_CFG_20_SATA_SPACE_EN
= 0x04,
91 NV_MCP_SATA_CFG_20_PORT0_EN
= (1 << 17),
92 NV_MCP_SATA_CFG_20_PORT1_EN
= (1 << 16),
93 NV_MCP_SATA_CFG_20_PORT0_PWB_EN
= (1 << 14),
94 NV_MCP_SATA_CFG_20_PORT1_PWB_EN
= (1 << 12),
96 NV_ADMA_MAX_CPBS
= 32,
99 NV_ADMA_SGTBL_LEN
= (1024 - NV_ADMA_CPB_SZ
) /
101 NV_ADMA_SGTBL_TOTAL_LEN
= NV_ADMA_SGTBL_LEN
+ 5,
102 NV_ADMA_SGTBL_SZ
= NV_ADMA_SGTBL_LEN
* NV_ADMA_APRD_SZ
,
103 NV_ADMA_PORT_PRIV_DMA_SZ
= NV_ADMA_MAX_CPBS
*
104 (NV_ADMA_CPB_SZ
+ NV_ADMA_SGTBL_SZ
),
106 /* BAR5 offset to ADMA general registers */
108 NV_ADMA_GEN_CTL
= 0x00,
109 NV_ADMA_NOTIFIER_CLEAR
= 0x30,
111 /* BAR5 offset to ADMA ports */
112 NV_ADMA_PORT
= 0x480,
114 /* size of ADMA port register space */
115 NV_ADMA_PORT_SIZE
= 0x100,
117 /* ADMA port registers */
119 NV_ADMA_CPB_COUNT
= 0x42,
120 NV_ADMA_NEXT_CPB_IDX
= 0x43,
122 NV_ADMA_CPB_BASE_LOW
= 0x48,
123 NV_ADMA_CPB_BASE_HIGH
= 0x4C,
124 NV_ADMA_APPEND
= 0x50,
125 NV_ADMA_NOTIFIER
= 0x68,
126 NV_ADMA_NOTIFIER_ERROR
= 0x6C,
128 /* NV_ADMA_CTL register bits */
129 NV_ADMA_CTL_HOTPLUG_IEN
= (1 << 0),
130 NV_ADMA_CTL_CHANNEL_RESET
= (1 << 5),
131 NV_ADMA_CTL_GO
= (1 << 7),
132 NV_ADMA_CTL_AIEN
= (1 << 8),
133 NV_ADMA_CTL_READ_NON_COHERENT
= (1 << 11),
134 NV_ADMA_CTL_WRITE_NON_COHERENT
= (1 << 12),
136 /* CPB response flag bits */
137 NV_CPB_RESP_DONE
= (1 << 0),
138 NV_CPB_RESP_ATA_ERR
= (1 << 3),
139 NV_CPB_RESP_CMD_ERR
= (1 << 4),
140 NV_CPB_RESP_CPB_ERR
= (1 << 7),
142 /* CPB control flag bits */
143 NV_CPB_CTL_CPB_VALID
= (1 << 0),
144 NV_CPB_CTL_QUEUE
= (1 << 1),
145 NV_CPB_CTL_APRD_VALID
= (1 << 2),
146 NV_CPB_CTL_IEN
= (1 << 3),
147 NV_CPB_CTL_FPDMA
= (1 << 4),
150 NV_APRD_WRITE
= (1 << 1),
151 NV_APRD_END
= (1 << 2),
152 NV_APRD_CONT
= (1 << 3),
154 /* NV_ADMA_STAT flags */
155 NV_ADMA_STAT_TIMEOUT
= (1 << 0),
156 NV_ADMA_STAT_HOTUNPLUG
= (1 << 1),
157 NV_ADMA_STAT_HOTPLUG
= (1 << 2),
158 NV_ADMA_STAT_CPBERR
= (1 << 4),
159 NV_ADMA_STAT_SERROR
= (1 << 5),
160 NV_ADMA_STAT_CMD_COMPLETE
= (1 << 6),
161 NV_ADMA_STAT_IDLE
= (1 << 8),
162 NV_ADMA_STAT_LEGACY
= (1 << 9),
163 NV_ADMA_STAT_STOPPED
= (1 << 10),
164 NV_ADMA_STAT_DONE
= (1 << 12),
165 NV_ADMA_STAT_ERR
= NV_ADMA_STAT_CPBERR
|
166 NV_ADMA_STAT_TIMEOUT
,
169 NV_ADMA_PORT_REGISTER_MODE
= (1 << 0),
170 NV_ADMA_ATAPI_SETUP_COMPLETE
= (1 << 1),
172 /* MCP55 reg offset */
173 NV_CTL_MCP55
= 0x400,
174 NV_INT_STATUS_MCP55
= 0x440,
175 NV_INT_ENABLE_MCP55
= 0x444,
176 NV_NCQ_REG_MCP55
= 0x448,
179 NV_INT_ALL_MCP55
= 0xffff,
180 NV_INT_PORT_SHIFT_MCP55
= 16, /* each port occupies 16 bits */
181 NV_INT_MASK_MCP55
= NV_INT_ALL_MCP55
& 0xfffd,
183 /* SWNCQ ENABLE BITS*/
184 NV_CTL_PRI_SWNCQ
= 0x02,
185 NV_CTL_SEC_SWNCQ
= 0x04,
187 /* SW NCQ status bits*/
188 NV_SWNCQ_IRQ_DEV
= (1 << 0),
189 NV_SWNCQ_IRQ_PM
= (1 << 1),
190 NV_SWNCQ_IRQ_ADDED
= (1 << 2),
191 NV_SWNCQ_IRQ_REMOVED
= (1 << 3),
193 NV_SWNCQ_IRQ_BACKOUT
= (1 << 4),
194 NV_SWNCQ_IRQ_SDBFIS
= (1 << 5),
195 NV_SWNCQ_IRQ_DHREGFIS
= (1 << 6),
196 NV_SWNCQ_IRQ_DMASETUP
= (1 << 7),
198 NV_SWNCQ_IRQ_HOTPLUG
= NV_SWNCQ_IRQ_ADDED
|
199 NV_SWNCQ_IRQ_REMOVED
,
203 /* ADMA Physical Region Descriptor - one SG segment */
212 enum nv_adma_regbits
{
213 CMDEND
= (1 << 15), /* end of command list */
214 WNB
= (1 << 14), /* wait-not-BSY */
215 IGN
= (1 << 13), /* ignore this entry */
216 CS1n
= (1 << (4 + 8)), /* std. PATA signals follow... */
217 DA2
= (1 << (2 + 8)),
218 DA1
= (1 << (1 + 8)),
219 DA0
= (1 << (0 + 8)),
222 /* ADMA Command Parameter Block
223 The first 5 SG segments are stored inside the Command Parameter Block itself.
224 If there are more than 5 segments the remainder are stored in a separate
225 memory area indicated by next_aprd. */
227 u8 resp_flags
; /* 0 */
228 u8 reserved1
; /* 1 */
229 u8 ctl_flags
; /* 2 */
230 /* len is length of taskfile in 64 bit words */
233 u8 next_cpb_idx
; /* 5 */
234 __le16 reserved2
; /* 6-7 */
235 __le16 tf
[12]; /* 8-31 */
236 struct nv_adma_prd aprd
[5]; /* 32-111 */
237 __le64 next_aprd
; /* 112-119 */
238 __le64 reserved3
; /* 120-127 */
242 struct nv_adma_port_priv
{
243 struct nv_adma_cpb
*cpb
;
245 struct nv_adma_prd
*aprd
;
247 void __iomem
*ctl_block
;
248 void __iomem
*gen_block
;
249 void __iomem
*notifier_clear_block
;
255 struct nv_host_priv
{
263 unsigned int tag
[ATA_MAX_QUEUE
];
266 enum ncq_saw_flag_list
{
267 ncq_saw_d2h
= (1U << 0),
268 ncq_saw_dmas
= (1U << 1),
269 ncq_saw_sdb
= (1U << 2),
270 ncq_saw_backout
= (1U << 3),
273 struct nv_swncq_port_priv
{
274 struct ata_prd
*prd
; /* our SG list */
275 dma_addr_t prd_dma
; /* and its DMA mapping */
276 void __iomem
*sactive_block
;
277 void __iomem
*irq_block
;
278 void __iomem
*tag_block
;
281 unsigned int last_issue_tag
;
283 /* fifo circular queue to store deferral command */
284 struct defer_queue defer_queue
;
286 /* for NCQ interrupt analysis */
291 unsigned int ncq_flags
;
295 #define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & (1 << (19 + (12 * (PORT)))))
297 static int nv_init_one(struct pci_dev
*pdev
, const struct pci_device_id
*ent
);
299 static int nv_pci_device_resume(struct pci_dev
*pdev
);
301 static void nv_ck804_host_stop(struct ata_host
*host
);
302 static irqreturn_t
nv_generic_interrupt(int irq
, void *dev_instance
);
303 static irqreturn_t
nv_nf2_interrupt(int irq
, void *dev_instance
);
304 static irqreturn_t
nv_ck804_interrupt(int irq
, void *dev_instance
);
305 static int nv_scr_read(struct ata_link
*link
, unsigned int sc_reg
, u32
*val
);
306 static int nv_scr_write(struct ata_link
*link
, unsigned int sc_reg
, u32 val
);
308 static void nv_nf2_freeze(struct ata_port
*ap
);
309 static void nv_nf2_thaw(struct ata_port
*ap
);
310 static int nv_nf2_hardreset(struct ata_link
*link
, unsigned int *class,
311 unsigned long deadline
);
312 static void nv_ck804_freeze(struct ata_port
*ap
);
313 static void nv_ck804_thaw(struct ata_port
*ap
);
314 static int nv_adma_slave_config(struct scsi_device
*sdev
);
315 static int nv_adma_check_atapi_dma(struct ata_queued_cmd
*qc
);
316 static void nv_adma_qc_prep(struct ata_queued_cmd
*qc
);
317 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd
*qc
);
318 static irqreturn_t
nv_adma_interrupt(int irq
, void *dev_instance
);
319 static void nv_adma_irq_clear(struct ata_port
*ap
);
320 static int nv_adma_port_start(struct ata_port
*ap
);
321 static void nv_adma_port_stop(struct ata_port
*ap
);
323 static int nv_adma_port_suspend(struct ata_port
*ap
, pm_message_t mesg
);
324 static int nv_adma_port_resume(struct ata_port
*ap
);
326 static void nv_adma_freeze(struct ata_port
*ap
);
327 static void nv_adma_thaw(struct ata_port
*ap
);
328 static void nv_adma_error_handler(struct ata_port
*ap
);
329 static void nv_adma_host_stop(struct ata_host
*host
);
330 static void nv_adma_post_internal_cmd(struct ata_queued_cmd
*qc
);
331 static void nv_adma_tf_read(struct ata_port
*ap
, struct ata_taskfile
*tf
);
333 static void nv_mcp55_thaw(struct ata_port
*ap
);
334 static void nv_mcp55_freeze(struct ata_port
*ap
);
335 static void nv_swncq_error_handler(struct ata_port
*ap
);
336 static int nv_swncq_slave_config(struct scsi_device
*sdev
);
337 static int nv_swncq_port_start(struct ata_port
*ap
);
338 static void nv_swncq_qc_prep(struct ata_queued_cmd
*qc
);
339 static void nv_swncq_fill_sg(struct ata_queued_cmd
*qc
);
340 static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd
*qc
);
341 static void nv_swncq_irq_clear(struct ata_port
*ap
, u16 fis
);
342 static irqreturn_t
nv_swncq_interrupt(int irq
, void *dev_instance
);
344 static int nv_swncq_port_suspend(struct ata_port
*ap
, pm_message_t mesg
);
345 static int nv_swncq_port_resume(struct ata_port
*ap
);
352 NFORCE3
= NFORCE2
, /* NF2 == NF3 as far as sata_nv is concerned */
358 static const struct pci_device_id nv_pci_tbl
[] = {
359 { PCI_VDEVICE(NVIDIA
, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA
), NFORCE2
},
360 { PCI_VDEVICE(NVIDIA
, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA
), NFORCE3
},
361 { PCI_VDEVICE(NVIDIA
, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2
), NFORCE3
},
362 { PCI_VDEVICE(NVIDIA
, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA
), CK804
},
363 { PCI_VDEVICE(NVIDIA
, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2
), CK804
},
364 { PCI_VDEVICE(NVIDIA
, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA
), CK804
},
365 { PCI_VDEVICE(NVIDIA
, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2
), CK804
},
366 { PCI_VDEVICE(NVIDIA
, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA
), SWNCQ
},
367 { PCI_VDEVICE(NVIDIA
, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2
), SWNCQ
},
368 { PCI_VDEVICE(NVIDIA
, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA
), SWNCQ
},
369 { PCI_VDEVICE(NVIDIA
, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2
), SWNCQ
},
370 { PCI_VDEVICE(NVIDIA
, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA
), GENERIC
},
371 { PCI_VDEVICE(NVIDIA
, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2
), GENERIC
},
372 { PCI_VDEVICE(NVIDIA
, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3
), GENERIC
},
374 { } /* terminate list */
377 static struct pci_driver nv_pci_driver
= {
379 .id_table
= nv_pci_tbl
,
380 .probe
= nv_init_one
,
382 .suspend
= ata_pci_device_suspend
,
383 .resume
= nv_pci_device_resume
,
385 .remove
= ata_pci_remove_one
,
388 static struct scsi_host_template nv_sht
= {
389 ATA_BMDMA_SHT(DRV_NAME
),
392 static struct scsi_host_template nv_adma_sht
= {
393 ATA_NCQ_SHT(DRV_NAME
),
394 .can_queue
= NV_ADMA_MAX_CPBS
,
395 .sg_tablesize
= NV_ADMA_SGTBL_TOTAL_LEN
,
396 .dma_boundary
= NV_ADMA_DMA_BOUNDARY
,
397 .slave_configure
= nv_adma_slave_config
,
400 static struct scsi_host_template nv_swncq_sht
= {
401 ATA_NCQ_SHT(DRV_NAME
),
402 .can_queue
= ATA_MAX_QUEUE
,
403 .sg_tablesize
= LIBATA_MAX_PRD
,
404 .dma_boundary
= ATA_DMA_BOUNDARY
,
405 .slave_configure
= nv_swncq_slave_config
,
408 static struct ata_port_operations nv_common_ops
= {
409 .inherits
= &ata_bmdma_port_ops
,
410 .scr_read
= nv_scr_read
,
411 .scr_write
= nv_scr_write
,
414 /* OSDL bz11195 reports that link doesn't come online after hardreset
415 * on generic nv's and there have been several other similar reports
416 * on linux-ide. Disable hardreset for generic nv's.
418 static struct ata_port_operations nv_generic_ops
= {
419 .inherits
= &nv_common_ops
,
420 .hardreset
= ATA_OP_NULL
,
423 /* OSDL bz3352 reports that nf2/3 controllers can't determine device
424 * signature reliably. Also, the following thread reports detection
425 * failure on cold boot with the standard debouncing timing.
427 * http://thread.gmane.org/gmane.linux.ide/34098
429 * Debounce with hotplug timing and request follow-up SRST.
431 static struct ata_port_operations nv_nf2_ops
= {
432 .inherits
= &nv_common_ops
,
433 .freeze
= nv_nf2_freeze
,
435 .hardreset
= nv_nf2_hardreset
,
438 /* CK804 finally gets hardreset right */
439 static struct ata_port_operations nv_ck804_ops
= {
440 .inherits
= &nv_common_ops
,
441 .freeze
= nv_ck804_freeze
,
442 .thaw
= nv_ck804_thaw
,
443 .host_stop
= nv_ck804_host_stop
,
446 static struct ata_port_operations nv_adma_ops
= {
447 .inherits
= &nv_ck804_ops
,
449 .check_atapi_dma
= nv_adma_check_atapi_dma
,
450 .sff_tf_read
= nv_adma_tf_read
,
451 .qc_defer
= ata_std_qc_defer
,
452 .qc_prep
= nv_adma_qc_prep
,
453 .qc_issue
= nv_adma_qc_issue
,
454 .sff_irq_clear
= nv_adma_irq_clear
,
456 .freeze
= nv_adma_freeze
,
457 .thaw
= nv_adma_thaw
,
458 .error_handler
= nv_adma_error_handler
,
459 .post_internal_cmd
= nv_adma_post_internal_cmd
,
461 .port_start
= nv_adma_port_start
,
462 .port_stop
= nv_adma_port_stop
,
464 .port_suspend
= nv_adma_port_suspend
,
465 .port_resume
= nv_adma_port_resume
,
467 .host_stop
= nv_adma_host_stop
,
470 static struct ata_port_operations nv_swncq_ops
= {
471 .inherits
= &nv_generic_ops
,
473 .qc_defer
= ata_std_qc_defer
,
474 .qc_prep
= nv_swncq_qc_prep
,
475 .qc_issue
= nv_swncq_qc_issue
,
477 .freeze
= nv_mcp55_freeze
,
478 .thaw
= nv_mcp55_thaw
,
479 .error_handler
= nv_swncq_error_handler
,
482 .port_suspend
= nv_swncq_port_suspend
,
483 .port_resume
= nv_swncq_port_resume
,
485 .port_start
= nv_swncq_port_start
,
489 irq_handler_t irq_handler
;
490 struct scsi_host_template
*sht
;
493 #define NV_PI_PRIV(_irq_handler, _sht) \
494 &(struct nv_pi_priv){ .irq_handler = _irq_handler, .sht = _sht }
496 static const struct ata_port_info nv_port_info
[] = {
499 .flags
= ATA_FLAG_SATA
| ATA_FLAG_NO_LEGACY
,
500 .pio_mask
= NV_PIO_MASK
,
501 .mwdma_mask
= NV_MWDMA_MASK
,
502 .udma_mask
= NV_UDMA_MASK
,
503 .port_ops
= &nv_generic_ops
,
504 .private_data
= NV_PI_PRIV(nv_generic_interrupt
, &nv_sht
),
508 .flags
= ATA_FLAG_SATA
| ATA_FLAG_NO_LEGACY
,
509 .pio_mask
= NV_PIO_MASK
,
510 .mwdma_mask
= NV_MWDMA_MASK
,
511 .udma_mask
= NV_UDMA_MASK
,
512 .port_ops
= &nv_nf2_ops
,
513 .private_data
= NV_PI_PRIV(nv_nf2_interrupt
, &nv_sht
),
517 .flags
= ATA_FLAG_SATA
| ATA_FLAG_NO_LEGACY
,
518 .pio_mask
= NV_PIO_MASK
,
519 .mwdma_mask
= NV_MWDMA_MASK
,
520 .udma_mask
= NV_UDMA_MASK
,
521 .port_ops
= &nv_ck804_ops
,
522 .private_data
= NV_PI_PRIV(nv_ck804_interrupt
, &nv_sht
),
526 .flags
= ATA_FLAG_SATA
| ATA_FLAG_NO_LEGACY
|
527 ATA_FLAG_MMIO
| ATA_FLAG_NCQ
,
528 .pio_mask
= NV_PIO_MASK
,
529 .mwdma_mask
= NV_MWDMA_MASK
,
530 .udma_mask
= NV_UDMA_MASK
,
531 .port_ops
= &nv_adma_ops
,
532 .private_data
= NV_PI_PRIV(nv_adma_interrupt
, &nv_adma_sht
),
536 .flags
= ATA_FLAG_SATA
| ATA_FLAG_NO_LEGACY
|
538 .pio_mask
= NV_PIO_MASK
,
539 .mwdma_mask
= NV_MWDMA_MASK
,
540 .udma_mask
= NV_UDMA_MASK
,
541 .port_ops
= &nv_swncq_ops
,
542 .private_data
= NV_PI_PRIV(nv_swncq_interrupt
, &nv_swncq_sht
),
546 MODULE_AUTHOR("NVIDIA");
547 MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
548 MODULE_LICENSE("GPL");
549 MODULE_DEVICE_TABLE(pci
, nv_pci_tbl
);
550 MODULE_VERSION(DRV_VERSION
);
552 static int adma_enabled
;
553 static int swncq_enabled
= 1;
555 static void nv_adma_register_mode(struct ata_port
*ap
)
557 struct nv_adma_port_priv
*pp
= ap
->private_data
;
558 void __iomem
*mmio
= pp
->ctl_block
;
562 if (pp
->flags
& NV_ADMA_PORT_REGISTER_MODE
)
565 status
= readw(mmio
+ NV_ADMA_STAT
);
566 while (!(status
& NV_ADMA_STAT_IDLE
) && count
< 20) {
568 status
= readw(mmio
+ NV_ADMA_STAT
);
572 ata_port_printk(ap
, KERN_WARNING
,
573 "timeout waiting for ADMA IDLE, stat=0x%hx\n",
576 tmp
= readw(mmio
+ NV_ADMA_CTL
);
577 writew(tmp
& ~NV_ADMA_CTL_GO
, mmio
+ NV_ADMA_CTL
);
580 status
= readw(mmio
+ NV_ADMA_STAT
);
581 while (!(status
& NV_ADMA_STAT_LEGACY
) && count
< 20) {
583 status
= readw(mmio
+ NV_ADMA_STAT
);
587 ata_port_printk(ap
, KERN_WARNING
,
588 "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
591 pp
->flags
|= NV_ADMA_PORT_REGISTER_MODE
;
594 static void nv_adma_mode(struct ata_port
*ap
)
596 struct nv_adma_port_priv
*pp
= ap
->private_data
;
597 void __iomem
*mmio
= pp
->ctl_block
;
601 if (!(pp
->flags
& NV_ADMA_PORT_REGISTER_MODE
))
604 WARN_ON(pp
->flags
& NV_ADMA_ATAPI_SETUP_COMPLETE
);
606 tmp
= readw(mmio
+ NV_ADMA_CTL
);
607 writew(tmp
| NV_ADMA_CTL_GO
, mmio
+ NV_ADMA_CTL
);
609 status
= readw(mmio
+ NV_ADMA_STAT
);
610 while (((status
& NV_ADMA_STAT_LEGACY
) ||
611 !(status
& NV_ADMA_STAT_IDLE
)) && count
< 20) {
613 status
= readw(mmio
+ NV_ADMA_STAT
);
617 ata_port_printk(ap
, KERN_WARNING
,
618 "timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
621 pp
->flags
&= ~NV_ADMA_PORT_REGISTER_MODE
;
624 static int nv_adma_slave_config(struct scsi_device
*sdev
)
626 struct ata_port
*ap
= ata_shost_to_port(sdev
->host
);
627 struct nv_adma_port_priv
*pp
= ap
->private_data
;
628 struct nv_adma_port_priv
*port0
, *port1
;
629 struct scsi_device
*sdev0
, *sdev1
;
630 struct pci_dev
*pdev
= to_pci_dev(ap
->host
->dev
);
631 unsigned long segment_boundary
, flags
;
632 unsigned short sg_tablesize
;
635 u32 current_reg
, new_reg
, config_mask
;
637 rc
= ata_scsi_slave_config(sdev
);
639 if (sdev
->id
>= ATA_MAX_DEVICES
|| sdev
->channel
|| sdev
->lun
)
640 /* Not a proper libata device, ignore */
643 spin_lock_irqsave(ap
->lock
, flags
);
645 if (ap
->link
.device
[sdev
->id
].class == ATA_DEV_ATAPI
) {
647 * NVIDIA reports that ADMA mode does not support ATAPI commands.
648 * Therefore ATAPI commands are sent through the legacy interface.
649 * However, the legacy interface only supports 32-bit DMA.
650 * Restrict DMA parameters as required by the legacy interface
651 * when an ATAPI device is connected.
653 segment_boundary
= ATA_DMA_BOUNDARY
;
654 /* Subtract 1 since an extra entry may be needed for padding, see
656 sg_tablesize
= LIBATA_MAX_PRD
- 1;
658 /* Since the legacy DMA engine is in use, we need to disable ADMA
661 nv_adma_register_mode(ap
);
663 segment_boundary
= NV_ADMA_DMA_BOUNDARY
;
664 sg_tablesize
= NV_ADMA_SGTBL_TOTAL_LEN
;
668 pci_read_config_dword(pdev
, NV_MCP_SATA_CFG_20
, ¤t_reg
);
670 if (ap
->port_no
== 1)
671 config_mask
= NV_MCP_SATA_CFG_20_PORT1_EN
|
672 NV_MCP_SATA_CFG_20_PORT1_PWB_EN
;
674 config_mask
= NV_MCP_SATA_CFG_20_PORT0_EN
|
675 NV_MCP_SATA_CFG_20_PORT0_PWB_EN
;
678 new_reg
= current_reg
| config_mask
;
679 pp
->flags
&= ~NV_ADMA_ATAPI_SETUP_COMPLETE
;
681 new_reg
= current_reg
& ~config_mask
;
682 pp
->flags
|= NV_ADMA_ATAPI_SETUP_COMPLETE
;
685 if (current_reg
!= new_reg
)
686 pci_write_config_dword(pdev
, NV_MCP_SATA_CFG_20
, new_reg
);
688 port0
= ap
->host
->ports
[0]->private_data
;
689 port1
= ap
->host
->ports
[1]->private_data
;
690 sdev0
= ap
->host
->ports
[0]->link
.device
[0].sdev
;
691 sdev1
= ap
->host
->ports
[1]->link
.device
[0].sdev
;
692 if ((port0
->flags
& NV_ADMA_ATAPI_SETUP_COMPLETE
) ||
693 (port1
->flags
& NV_ADMA_ATAPI_SETUP_COMPLETE
)) {
694 /** We have to set the DMA mask to 32-bit if either port is in
695 ATAPI mode, since they are on the same PCI device which is
696 used for DMA mapping. If we set the mask we also need to set
697 the bounce limit on both ports to ensure that the block
698 layer doesn't feed addresses that cause DMA mapping to
699 choke. If either SCSI device is not allocated yet, it's OK
700 since that port will discover its correct setting when it
702 Note: Setting 32-bit mask should not fail. */
704 blk_queue_bounce_limit(sdev0
->request_queue
,
707 blk_queue_bounce_limit(sdev1
->request_queue
,
710 pci_set_dma_mask(pdev
, ATA_DMA_MASK
);
712 /** This shouldn't fail as it was set to this value before */
713 pci_set_dma_mask(pdev
, pp
->adma_dma_mask
);
715 blk_queue_bounce_limit(sdev0
->request_queue
,
718 blk_queue_bounce_limit(sdev1
->request_queue
,
722 blk_queue_segment_boundary(sdev
->request_queue
, segment_boundary
);
723 blk_queue_max_hw_segments(sdev
->request_queue
, sg_tablesize
);
724 ata_port_printk(ap
, KERN_INFO
,
725 "DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
726 (unsigned long long)*ap
->host
->dev
->dma_mask
,
727 segment_boundary
, sg_tablesize
);
729 spin_unlock_irqrestore(ap
->lock
, flags
);
734 static int nv_adma_check_atapi_dma(struct ata_queued_cmd
*qc
)
736 struct nv_adma_port_priv
*pp
= qc
->ap
->private_data
;
737 return !(pp
->flags
& NV_ADMA_ATAPI_SETUP_COMPLETE
);
740 static void nv_adma_tf_read(struct ata_port
*ap
, struct ata_taskfile
*tf
)
742 /* Other than when internal or pass-through commands are executed,
743 the only time this function will be called in ADMA mode will be
744 if a command fails. In the failure case we don't care about going
745 into register mode with ADMA commands pending, as the commands will
746 all shortly be aborted anyway. We assume that NCQ commands are not
747 issued via passthrough, which is the only way that switching into
748 ADMA mode could abort outstanding commands. */
749 nv_adma_register_mode(ap
);
751 ata_sff_tf_read(ap
, tf
);
754 static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile
*tf
, __le16
*cpb
)
756 unsigned int idx
= 0;
758 if (tf
->flags
& ATA_TFLAG_ISADDR
) {
759 if (tf
->flags
& ATA_TFLAG_LBA48
) {
760 cpb
[idx
++] = cpu_to_le16((ATA_REG_ERR
<< 8) | tf
->hob_feature
| WNB
);
761 cpb
[idx
++] = cpu_to_le16((ATA_REG_NSECT
<< 8) | tf
->hob_nsect
);
762 cpb
[idx
++] = cpu_to_le16((ATA_REG_LBAL
<< 8) | tf
->hob_lbal
);
763 cpb
[idx
++] = cpu_to_le16((ATA_REG_LBAM
<< 8) | tf
->hob_lbam
);
764 cpb
[idx
++] = cpu_to_le16((ATA_REG_LBAH
<< 8) | tf
->hob_lbah
);
765 cpb
[idx
++] = cpu_to_le16((ATA_REG_ERR
<< 8) | tf
->feature
);
767 cpb
[idx
++] = cpu_to_le16((ATA_REG_ERR
<< 8) | tf
->feature
| WNB
);
769 cpb
[idx
++] = cpu_to_le16((ATA_REG_NSECT
<< 8) | tf
->nsect
);
770 cpb
[idx
++] = cpu_to_le16((ATA_REG_LBAL
<< 8) | tf
->lbal
);
771 cpb
[idx
++] = cpu_to_le16((ATA_REG_LBAM
<< 8) | tf
->lbam
);
772 cpb
[idx
++] = cpu_to_le16((ATA_REG_LBAH
<< 8) | tf
->lbah
);
775 if (tf
->flags
& ATA_TFLAG_DEVICE
)
776 cpb
[idx
++] = cpu_to_le16((ATA_REG_DEVICE
<< 8) | tf
->device
);
778 cpb
[idx
++] = cpu_to_le16((ATA_REG_CMD
<< 8) | tf
->command
| CMDEND
);
781 cpb
[idx
++] = cpu_to_le16(IGN
);
786 static int nv_adma_check_cpb(struct ata_port
*ap
, int cpb_num
, int force_err
)
788 struct nv_adma_port_priv
*pp
= ap
->private_data
;
789 u8 flags
= pp
->cpb
[cpb_num
].resp_flags
;
791 VPRINTK("CPB %d, flags=0x%x\n", cpb_num
, flags
);
793 if (unlikely((force_err
||
794 flags
& (NV_CPB_RESP_ATA_ERR
|
795 NV_CPB_RESP_CMD_ERR
|
796 NV_CPB_RESP_CPB_ERR
)))) {
797 struct ata_eh_info
*ehi
= &ap
->link
.eh_info
;
800 ata_ehi_clear_desc(ehi
);
801 __ata_ehi_push_desc(ehi
, "CPB resp_flags 0x%x: ", flags
);
802 if (flags
& NV_CPB_RESP_ATA_ERR
) {
803 ata_ehi_push_desc(ehi
, "ATA error");
804 ehi
->err_mask
|= AC_ERR_DEV
;
805 } else if (flags
& NV_CPB_RESP_CMD_ERR
) {
806 ata_ehi_push_desc(ehi
, "CMD error");
807 ehi
->err_mask
|= AC_ERR_DEV
;
808 } else if (flags
& NV_CPB_RESP_CPB_ERR
) {
809 ata_ehi_push_desc(ehi
, "CPB error");
810 ehi
->err_mask
|= AC_ERR_SYSTEM
;
813 /* notifier error, but no error in CPB flags? */
814 ata_ehi_push_desc(ehi
, "unknown");
815 ehi
->err_mask
|= AC_ERR_OTHER
;
818 /* Kill all commands. EH will determine what actually failed. */
826 if (likely(flags
& NV_CPB_RESP_DONE
)) {
827 struct ata_queued_cmd
*qc
= ata_qc_from_tag(ap
, cpb_num
);
828 VPRINTK("CPB flags done, flags=0x%x\n", flags
);
830 DPRINTK("Completing qc from tag %d\n", cpb_num
);
833 struct ata_eh_info
*ehi
= &ap
->link
.eh_info
;
834 /* Notifier bits set without a command may indicate the drive
835 is misbehaving. Raise host state machine violation on this
837 ata_port_printk(ap
, KERN_ERR
,
838 "notifier for tag %d with no cmd?\n",
840 ehi
->err_mask
|= AC_ERR_HSM
;
841 ehi
->action
|= ATA_EH_RESET
;
849 static int nv_host_intr(struct ata_port
*ap
, u8 irq_stat
)
851 struct ata_queued_cmd
*qc
= ata_qc_from_tag(ap
, ap
->link
.active_tag
);
853 /* freeze if hotplugged */
854 if (unlikely(irq_stat
& (NV_INT_ADDED
| NV_INT_REMOVED
))) {
859 /* bail out if not our interrupt */
860 if (!(irq_stat
& NV_INT_DEV
))
863 /* DEV interrupt w/ no active qc? */
864 if (unlikely(!qc
|| (qc
->tf
.flags
& ATA_TFLAG_POLLING
))) {
865 ata_sff_check_status(ap
);
869 /* handle interrupt */
870 return ata_sff_host_intr(ap
, qc
);
873 static irqreturn_t
nv_adma_interrupt(int irq
, void *dev_instance
)
875 struct ata_host
*host
= dev_instance
;
877 u32 notifier_clears
[2];
879 spin_lock(&host
->lock
);
881 for (i
= 0; i
< host
->n_ports
; i
++) {
882 struct ata_port
*ap
= host
->ports
[i
];
883 notifier_clears
[i
] = 0;
885 if (ap
&& !(ap
->flags
& ATA_FLAG_DISABLED
)) {
886 struct nv_adma_port_priv
*pp
= ap
->private_data
;
887 void __iomem
*mmio
= pp
->ctl_block
;
890 u32 notifier
, notifier_error
;
892 /* if ADMA is disabled, use standard ata interrupt handler */
893 if (pp
->flags
& NV_ADMA_ATAPI_SETUP_COMPLETE
) {
894 u8 irq_stat
= readb(host
->iomap
[NV_MMIO_BAR
] + NV_INT_STATUS_CK804
)
895 >> (NV_INT_PORT_SHIFT
* i
);
896 handled
+= nv_host_intr(ap
, irq_stat
);
900 /* if in ATA register mode, check for standard interrupts */
901 if (pp
->flags
& NV_ADMA_PORT_REGISTER_MODE
) {
902 u8 irq_stat
= readb(host
->iomap
[NV_MMIO_BAR
] + NV_INT_STATUS_CK804
)
903 >> (NV_INT_PORT_SHIFT
* i
);
904 if (ata_tag_valid(ap
->link
.active_tag
))
905 /** NV_INT_DEV indication seems unreliable at times
906 at least in ADMA mode. Force it on always when a
907 command is active, to prevent losing interrupts. */
908 irq_stat
|= NV_INT_DEV
;
909 handled
+= nv_host_intr(ap
, irq_stat
);
912 notifier
= readl(mmio
+ NV_ADMA_NOTIFIER
);
913 notifier_error
= readl(mmio
+ NV_ADMA_NOTIFIER_ERROR
);
914 notifier_clears
[i
] = notifier
| notifier_error
;
916 gen_ctl
= readl(pp
->gen_block
+ NV_ADMA_GEN_CTL
);
918 if (!NV_ADMA_CHECK_INTR(gen_ctl
, ap
->port_no
) && !notifier
&&
923 status
= readw(mmio
+ NV_ADMA_STAT
);
925 /* Clear status. Ensure the controller sees the clearing before we start
926 looking at any of the CPB statuses, so that any CPB completions after
927 this point in the handler will raise another interrupt. */
928 writew(status
, mmio
+ NV_ADMA_STAT
);
929 readw(mmio
+ NV_ADMA_STAT
); /* flush posted write */
932 handled
++; /* irq handled if we got here */
934 /* freeze if hotplugged or controller error */
935 if (unlikely(status
& (NV_ADMA_STAT_HOTPLUG
|
936 NV_ADMA_STAT_HOTUNPLUG
|
937 NV_ADMA_STAT_TIMEOUT
|
938 NV_ADMA_STAT_SERROR
))) {
939 struct ata_eh_info
*ehi
= &ap
->link
.eh_info
;
941 ata_ehi_clear_desc(ehi
);
942 __ata_ehi_push_desc(ehi
, "ADMA status 0x%08x: ", status
);
943 if (status
& NV_ADMA_STAT_TIMEOUT
) {
944 ehi
->err_mask
|= AC_ERR_SYSTEM
;
945 ata_ehi_push_desc(ehi
, "timeout");
946 } else if (status
& NV_ADMA_STAT_HOTPLUG
) {
947 ata_ehi_hotplugged(ehi
);
948 ata_ehi_push_desc(ehi
, "hotplug");
949 } else if (status
& NV_ADMA_STAT_HOTUNPLUG
) {
950 ata_ehi_hotplugged(ehi
);
951 ata_ehi_push_desc(ehi
, "hot unplug");
952 } else if (status
& NV_ADMA_STAT_SERROR
) {
953 /* let libata analyze SError and figure out the cause */
954 ata_ehi_push_desc(ehi
, "SError");
956 ata_ehi_push_desc(ehi
, "unknown");
961 if (status
& (NV_ADMA_STAT_DONE
|
962 NV_ADMA_STAT_CPBERR
|
963 NV_ADMA_STAT_CMD_COMPLETE
)) {
964 u32 check_commands
= notifier_clears
[i
];
967 if (status
& NV_ADMA_STAT_CPBERR
) {
968 /* Check all active commands */
969 if (ata_tag_valid(ap
->link
.active_tag
))
970 check_commands
= 1 <<
973 check_commands
= ap
->
977 /** Check CPBs for completed commands */
978 while ((pos
= ffs(check_commands
)) && !error
) {
980 error
= nv_adma_check_cpb(ap
, pos
,
981 notifier_error
& (1 << pos
));
982 check_commands
&= ~(1 << pos
);
988 if (notifier_clears
[0] || notifier_clears
[1]) {
989 /* Note: Both notifier clear registers must be written
990 if either is set, even if one is zero, according to NVIDIA. */
991 struct nv_adma_port_priv
*pp
= host
->ports
[0]->private_data
;
992 writel(notifier_clears
[0], pp
->notifier_clear_block
);
993 pp
= host
->ports
[1]->private_data
;
994 writel(notifier_clears
[1], pp
->notifier_clear_block
);
997 spin_unlock(&host
->lock
);
999 return IRQ_RETVAL(handled
);
1002 static void nv_adma_freeze(struct ata_port
*ap
)
1004 struct nv_adma_port_priv
*pp
= ap
->private_data
;
1005 void __iomem
*mmio
= pp
->ctl_block
;
1008 nv_ck804_freeze(ap
);
1010 if (pp
->flags
& NV_ADMA_ATAPI_SETUP_COMPLETE
)
1013 /* clear any outstanding CK804 notifications */
1014 writeb(NV_INT_ALL
<< (ap
->port_no
* NV_INT_PORT_SHIFT
),
1015 ap
->host
->iomap
[NV_MMIO_BAR
] + NV_INT_STATUS_CK804
);
1017 /* Disable interrupt */
1018 tmp
= readw(mmio
+ NV_ADMA_CTL
);
1019 writew(tmp
& ~(NV_ADMA_CTL_AIEN
| NV_ADMA_CTL_HOTPLUG_IEN
),
1020 mmio
+ NV_ADMA_CTL
);
1021 readw(mmio
+ NV_ADMA_CTL
); /* flush posted write */
1024 static void nv_adma_thaw(struct ata_port
*ap
)
1026 struct nv_adma_port_priv
*pp
= ap
->private_data
;
1027 void __iomem
*mmio
= pp
->ctl_block
;
1032 if (pp
->flags
& NV_ADMA_ATAPI_SETUP_COMPLETE
)
1035 /* Enable interrupt */
1036 tmp
= readw(mmio
+ NV_ADMA_CTL
);
1037 writew(tmp
| (NV_ADMA_CTL_AIEN
| NV_ADMA_CTL_HOTPLUG_IEN
),
1038 mmio
+ NV_ADMA_CTL
);
1039 readw(mmio
+ NV_ADMA_CTL
); /* flush posted write */
1042 static void nv_adma_irq_clear(struct ata_port
*ap
)
1044 struct nv_adma_port_priv
*pp
= ap
->private_data
;
1045 void __iomem
*mmio
= pp
->ctl_block
;
1046 u32 notifier_clears
[2];
1048 if (pp
->flags
& NV_ADMA_ATAPI_SETUP_COMPLETE
) {
1049 ata_sff_irq_clear(ap
);
1053 /* clear any outstanding CK804 notifications */
1054 writeb(NV_INT_ALL
<< (ap
->port_no
* NV_INT_PORT_SHIFT
),
1055 ap
->host
->iomap
[NV_MMIO_BAR
] + NV_INT_STATUS_CK804
);
1057 /* clear ADMA status */
1058 writew(0xffff, mmio
+ NV_ADMA_STAT
);
1060 /* clear notifiers - note both ports need to be written with
1061 something even though we are only clearing on one */
1062 if (ap
->port_no
== 0) {
1063 notifier_clears
[0] = 0xFFFFFFFF;
1064 notifier_clears
[1] = 0;
1066 notifier_clears
[0] = 0;
1067 notifier_clears
[1] = 0xFFFFFFFF;
1069 pp
= ap
->host
->ports
[0]->private_data
;
1070 writel(notifier_clears
[0], pp
->notifier_clear_block
);
1071 pp
= ap
->host
->ports
[1]->private_data
;
1072 writel(notifier_clears
[1], pp
->notifier_clear_block
);
1075 static void nv_adma_post_internal_cmd(struct ata_queued_cmd
*qc
)
1077 struct nv_adma_port_priv
*pp
= qc
->ap
->private_data
;
1079 if (pp
->flags
& NV_ADMA_PORT_REGISTER_MODE
)
1080 ata_sff_post_internal_cmd(qc
);
1083 static int nv_adma_port_start(struct ata_port
*ap
)
1085 struct device
*dev
= ap
->host
->dev
;
1086 struct nv_adma_port_priv
*pp
;
1091 struct pci_dev
*pdev
= to_pci_dev(dev
);
1096 /* Ensure DMA mask is set to 32-bit before allocating legacy PRD and
1098 rc
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
1101 rc
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(32));
1105 rc
= ata_port_start(ap
);
1109 pp
= devm_kzalloc(dev
, sizeof(*pp
), GFP_KERNEL
);
1113 mmio
= ap
->host
->iomap
[NV_MMIO_BAR
] + NV_ADMA_PORT
+
1114 ap
->port_no
* NV_ADMA_PORT_SIZE
;
1115 pp
->ctl_block
= mmio
;
1116 pp
->gen_block
= ap
->host
->iomap
[NV_MMIO_BAR
] + NV_ADMA_GEN
;
1117 pp
->notifier_clear_block
= pp
->gen_block
+
1118 NV_ADMA_NOTIFIER_CLEAR
+ (4 * ap
->port_no
);
1120 /* Now that the legacy PRD and padding buffer are allocated we can
1121 safely raise the DMA mask to allocate the CPB/APRD table.
1122 These are allowed to fail since we store the value that ends up
1123 being used to set as the bounce limit in slave_config later if
1125 pci_set_dma_mask(pdev
, DMA_BIT_MASK(64));
1126 pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64));
1127 pp
->adma_dma_mask
= *dev
->dma_mask
;
1129 mem
= dmam_alloc_coherent(dev
, NV_ADMA_PORT_PRIV_DMA_SZ
,
1130 &mem_dma
, GFP_KERNEL
);
1133 memset(mem
, 0, NV_ADMA_PORT_PRIV_DMA_SZ
);
1136 * First item in chunk of DMA memory:
1137 * 128-byte command parameter block (CPB)
1138 * one for each command tag
1141 pp
->cpb_dma
= mem_dma
;
1143 writel(mem_dma
& 0xFFFFFFFF, mmio
+ NV_ADMA_CPB_BASE_LOW
);
1144 writel((mem_dma
>> 16) >> 16, mmio
+ NV_ADMA_CPB_BASE_HIGH
);
1146 mem
+= NV_ADMA_MAX_CPBS
* NV_ADMA_CPB_SZ
;
1147 mem_dma
+= NV_ADMA_MAX_CPBS
* NV_ADMA_CPB_SZ
;
1150 * Second item: block of ADMA_SGTBL_LEN s/g entries
1153 pp
->aprd_dma
= mem_dma
;
1155 ap
->private_data
= pp
;
1157 /* clear any outstanding interrupt conditions */
1158 writew(0xffff, mmio
+ NV_ADMA_STAT
);
1160 /* initialize port variables */
1161 pp
->flags
= NV_ADMA_PORT_REGISTER_MODE
;
1163 /* clear CPB fetch count */
1164 writew(0, mmio
+ NV_ADMA_CPB_COUNT
);
1166 /* clear GO for register mode, enable interrupt */
1167 tmp
= readw(mmio
+ NV_ADMA_CTL
);
1168 writew((tmp
& ~NV_ADMA_CTL_GO
) | NV_ADMA_CTL_AIEN
|
1169 NV_ADMA_CTL_HOTPLUG_IEN
, mmio
+ NV_ADMA_CTL
);
1171 tmp
= readw(mmio
+ NV_ADMA_CTL
);
1172 writew(tmp
| NV_ADMA_CTL_CHANNEL_RESET
, mmio
+ NV_ADMA_CTL
);
1173 readw(mmio
+ NV_ADMA_CTL
); /* flush posted write */
1175 writew(tmp
& ~NV_ADMA_CTL_CHANNEL_RESET
, mmio
+ NV_ADMA_CTL
);
1176 readw(mmio
+ NV_ADMA_CTL
); /* flush posted write */
1181 static void nv_adma_port_stop(struct ata_port
*ap
)
1183 struct nv_adma_port_priv
*pp
= ap
->private_data
;
1184 void __iomem
*mmio
= pp
->ctl_block
;
1187 writew(0, mmio
+ NV_ADMA_CTL
);
1191 static int nv_adma_port_suspend(struct ata_port
*ap
, pm_message_t mesg
)
1193 struct nv_adma_port_priv
*pp
= ap
->private_data
;
1194 void __iomem
*mmio
= pp
->ctl_block
;
1196 /* Go to register mode - clears GO */
1197 nv_adma_register_mode(ap
);
1199 /* clear CPB fetch count */
1200 writew(0, mmio
+ NV_ADMA_CPB_COUNT
);
1202 /* disable interrupt, shut down port */
1203 writew(0, mmio
+ NV_ADMA_CTL
);
1208 static int nv_adma_port_resume(struct ata_port
*ap
)
1210 struct nv_adma_port_priv
*pp
= ap
->private_data
;
1211 void __iomem
*mmio
= pp
->ctl_block
;
1214 /* set CPB block location */
1215 writel(pp
->cpb_dma
& 0xFFFFFFFF, mmio
+ NV_ADMA_CPB_BASE_LOW
);
1216 writel((pp
->cpb_dma
>> 16) >> 16, mmio
+ NV_ADMA_CPB_BASE_HIGH
);
1218 /* clear any outstanding interrupt conditions */
1219 writew(0xffff, mmio
+ NV_ADMA_STAT
);
1221 /* initialize port variables */
1222 pp
->flags
|= NV_ADMA_PORT_REGISTER_MODE
;
1224 /* clear CPB fetch count */
1225 writew(0, mmio
+ NV_ADMA_CPB_COUNT
);
1227 /* clear GO for register mode, enable interrupt */
1228 tmp
= readw(mmio
+ NV_ADMA_CTL
);
1229 writew((tmp
& ~NV_ADMA_CTL_GO
) | NV_ADMA_CTL_AIEN
|
1230 NV_ADMA_CTL_HOTPLUG_IEN
, mmio
+ NV_ADMA_CTL
);
1232 tmp
= readw(mmio
+ NV_ADMA_CTL
);
1233 writew(tmp
| NV_ADMA_CTL_CHANNEL_RESET
, mmio
+ NV_ADMA_CTL
);
1234 readw(mmio
+ NV_ADMA_CTL
); /* flush posted write */
1236 writew(tmp
& ~NV_ADMA_CTL_CHANNEL_RESET
, mmio
+ NV_ADMA_CTL
);
1237 readw(mmio
+ NV_ADMA_CTL
); /* flush posted write */
1243 static void nv_adma_setup_port(struct ata_port
*ap
)
1245 void __iomem
*mmio
= ap
->host
->iomap
[NV_MMIO_BAR
];
1246 struct ata_ioports
*ioport
= &ap
->ioaddr
;
1250 mmio
+= NV_ADMA_PORT
+ ap
->port_no
* NV_ADMA_PORT_SIZE
;
1252 ioport
->cmd_addr
= mmio
;
1253 ioport
->data_addr
= mmio
+ (ATA_REG_DATA
* 4);
1254 ioport
->error_addr
=
1255 ioport
->feature_addr
= mmio
+ (ATA_REG_ERR
* 4);
1256 ioport
->nsect_addr
= mmio
+ (ATA_REG_NSECT
* 4);
1257 ioport
->lbal_addr
= mmio
+ (ATA_REG_LBAL
* 4);
1258 ioport
->lbam_addr
= mmio
+ (ATA_REG_LBAM
* 4);
1259 ioport
->lbah_addr
= mmio
+ (ATA_REG_LBAH
* 4);
1260 ioport
->device_addr
= mmio
+ (ATA_REG_DEVICE
* 4);
1261 ioport
->status_addr
=
1262 ioport
->command_addr
= mmio
+ (ATA_REG_STATUS
* 4);
1263 ioport
->altstatus_addr
=
1264 ioport
->ctl_addr
= mmio
+ 0x20;
1267 static int nv_adma_host_init(struct ata_host
*host
)
1269 struct pci_dev
*pdev
= to_pci_dev(host
->dev
);
1275 /* enable ADMA on the ports */
1276 pci_read_config_dword(pdev
, NV_MCP_SATA_CFG_20
, &tmp32
);
1277 tmp32
|= NV_MCP_SATA_CFG_20_PORT0_EN
|
1278 NV_MCP_SATA_CFG_20_PORT0_PWB_EN
|
1279 NV_MCP_SATA_CFG_20_PORT1_EN
|
1280 NV_MCP_SATA_CFG_20_PORT1_PWB_EN
;
1282 pci_write_config_dword(pdev
, NV_MCP_SATA_CFG_20
, tmp32
);
1284 for (i
= 0; i
< host
->n_ports
; i
++)
1285 nv_adma_setup_port(host
->ports
[i
]);
1290 static void nv_adma_fill_aprd(struct ata_queued_cmd
*qc
,
1291 struct scatterlist
*sg
,
1293 struct nv_adma_prd
*aprd
)
1296 if (qc
->tf
.flags
& ATA_TFLAG_WRITE
)
1297 flags
|= NV_APRD_WRITE
;
1298 if (idx
== qc
->n_elem
- 1)
1299 flags
|= NV_APRD_END
;
1301 flags
|= NV_APRD_CONT
;
1303 aprd
->addr
= cpu_to_le64(((u64
)sg_dma_address(sg
)));
1304 aprd
->len
= cpu_to_le32(((u32
)sg_dma_len(sg
))); /* len in bytes */
1305 aprd
->flags
= flags
;
1306 aprd
->packet_len
= 0;
1309 static void nv_adma_fill_sg(struct ata_queued_cmd
*qc
, struct nv_adma_cpb
*cpb
)
1311 struct nv_adma_port_priv
*pp
= qc
->ap
->private_data
;
1312 struct nv_adma_prd
*aprd
;
1313 struct scatterlist
*sg
;
1318 for_each_sg(qc
->sg
, sg
, qc
->n_elem
, si
) {
1319 aprd
= (si
< 5) ? &cpb
->aprd
[si
] :
1320 &pp
->aprd
[NV_ADMA_SGTBL_LEN
* qc
->tag
+ (si
-5)];
1321 nv_adma_fill_aprd(qc
, sg
, si
, aprd
);
1324 cpb
->next_aprd
= cpu_to_le64(((u64
)(pp
->aprd_dma
+ NV_ADMA_SGTBL_SZ
* qc
->tag
)));
1326 cpb
->next_aprd
= cpu_to_le64(0);
1329 static int nv_adma_use_reg_mode(struct ata_queued_cmd
*qc
)
1331 struct nv_adma_port_priv
*pp
= qc
->ap
->private_data
;
1333 /* ADMA engine can only be used for non-ATAPI DMA commands,
1334 or interrupt-driven no-data commands. */
1335 if ((pp
->flags
& NV_ADMA_ATAPI_SETUP_COMPLETE
) ||
1336 (qc
->tf
.flags
& ATA_TFLAG_POLLING
))
1339 if ((qc
->flags
& ATA_QCFLAG_DMAMAP
) ||
1340 (qc
->tf
.protocol
== ATA_PROT_NODATA
))
1346 static void nv_adma_qc_prep(struct ata_queued_cmd
*qc
)
1348 struct nv_adma_port_priv
*pp
= qc
->ap
->private_data
;
1349 struct nv_adma_cpb
*cpb
= &pp
->cpb
[qc
->tag
];
1350 u8 ctl_flags
= NV_CPB_CTL_CPB_VALID
|
1353 if (nv_adma_use_reg_mode(qc
)) {
1354 BUG_ON(!(pp
->flags
& NV_ADMA_ATAPI_SETUP_COMPLETE
) &&
1355 (qc
->flags
& ATA_QCFLAG_DMAMAP
));
1356 nv_adma_register_mode(qc
->ap
);
1357 ata_sff_qc_prep(qc
);
1361 cpb
->resp_flags
= NV_CPB_RESP_DONE
;
1368 cpb
->next_cpb_idx
= 0;
1370 /* turn on NCQ flags for NCQ commands */
1371 if (qc
->tf
.protocol
== ATA_PROT_NCQ
)
1372 ctl_flags
|= NV_CPB_CTL_QUEUE
| NV_CPB_CTL_FPDMA
;
1374 VPRINTK("qc->flags = 0x%lx\n", qc
->flags
);
1376 nv_adma_tf_to_cpb(&qc
->tf
, cpb
->tf
);
1378 if (qc
->flags
& ATA_QCFLAG_DMAMAP
) {
1379 nv_adma_fill_sg(qc
, cpb
);
1380 ctl_flags
|= NV_CPB_CTL_APRD_VALID
;
1382 memset(&cpb
->aprd
[0], 0, sizeof(struct nv_adma_prd
) * 5);
1384 /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID
1385 until we are finished filling in all of the contents */
1387 cpb
->ctl_flags
= ctl_flags
;
1389 cpb
->resp_flags
= 0;
1392 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd
*qc
)
1394 struct nv_adma_port_priv
*pp
= qc
->ap
->private_data
;
1395 void __iomem
*mmio
= pp
->ctl_block
;
1396 int curr_ncq
= (qc
->tf
.protocol
== ATA_PROT_NCQ
);
1400 /* We can't handle result taskfile with NCQ commands, since
1401 retrieving the taskfile switches us out of ADMA mode and would abort
1402 existing commands. */
1403 if (unlikely(qc
->tf
.protocol
== ATA_PROT_NCQ
&&
1404 (qc
->flags
& ATA_QCFLAG_RESULT_TF
))) {
1405 ata_dev_printk(qc
->dev
, KERN_ERR
,
1406 "NCQ w/ RESULT_TF not allowed\n");
1407 return AC_ERR_SYSTEM
;
1410 if (nv_adma_use_reg_mode(qc
)) {
1411 /* use ATA register mode */
1412 VPRINTK("using ATA register mode: 0x%lx\n", qc
->flags
);
1413 BUG_ON(!(pp
->flags
& NV_ADMA_ATAPI_SETUP_COMPLETE
) &&
1414 (qc
->flags
& ATA_QCFLAG_DMAMAP
));
1415 nv_adma_register_mode(qc
->ap
);
1416 return ata_sff_qc_issue(qc
);
1418 nv_adma_mode(qc
->ap
);
1420 /* write append register, command tag in lower 8 bits
1421 and (number of cpbs to append -1) in top 8 bits */
1424 if (curr_ncq
!= pp
->last_issue_ncq
) {
1425 /* Seems to need some delay before switching between NCQ and
1426 non-NCQ commands, else we get command timeouts and such. */
1428 pp
->last_issue_ncq
= curr_ncq
;
1431 writew(qc
->tag
, mmio
+ NV_ADMA_APPEND
);
1433 DPRINTK("Issued tag %u\n", qc
->tag
);
1438 static irqreturn_t
nv_generic_interrupt(int irq
, void *dev_instance
)
1440 struct ata_host
*host
= dev_instance
;
1442 unsigned int handled
= 0;
1443 unsigned long flags
;
1445 spin_lock_irqsave(&host
->lock
, flags
);
1447 for (i
= 0; i
< host
->n_ports
; i
++) {
1448 struct ata_port
*ap
;
1450 ap
= host
->ports
[i
];
1452 !(ap
->flags
& ATA_FLAG_DISABLED
)) {
1453 struct ata_queued_cmd
*qc
;
1455 qc
= ata_qc_from_tag(ap
, ap
->link
.active_tag
);
1456 if (qc
&& (!(qc
->tf
.flags
& ATA_TFLAG_POLLING
)))
1457 handled
+= ata_sff_host_intr(ap
, qc
);
1459 // No request pending? Clear interrupt status
1460 // anyway, in case there's one pending.
1461 ap
->ops
->sff_check_status(ap
);
1466 spin_unlock_irqrestore(&host
->lock
, flags
);
1468 return IRQ_RETVAL(handled
);
1471 static irqreturn_t
nv_do_interrupt(struct ata_host
*host
, u8 irq_stat
)
1475 for (i
= 0; i
< host
->n_ports
; i
++) {
1476 struct ata_port
*ap
= host
->ports
[i
];
1478 if (ap
&& !(ap
->flags
& ATA_FLAG_DISABLED
))
1479 handled
+= nv_host_intr(ap
, irq_stat
);
1481 irq_stat
>>= NV_INT_PORT_SHIFT
;
1484 return IRQ_RETVAL(handled
);
1487 static irqreturn_t
nv_nf2_interrupt(int irq
, void *dev_instance
)
1489 struct ata_host
*host
= dev_instance
;
1493 spin_lock(&host
->lock
);
1494 irq_stat
= ioread8(host
->ports
[0]->ioaddr
.scr_addr
+ NV_INT_STATUS
);
1495 ret
= nv_do_interrupt(host
, irq_stat
);
1496 spin_unlock(&host
->lock
);
1501 static irqreturn_t
nv_ck804_interrupt(int irq
, void *dev_instance
)
1503 struct ata_host
*host
= dev_instance
;
1507 spin_lock(&host
->lock
);
1508 irq_stat
= readb(host
->iomap
[NV_MMIO_BAR
] + NV_INT_STATUS_CK804
);
1509 ret
= nv_do_interrupt(host
, irq_stat
);
1510 spin_unlock(&host
->lock
);
1515 static int nv_scr_read(struct ata_link
*link
, unsigned int sc_reg
, u32
*val
)
1517 if (sc_reg
> SCR_CONTROL
)
1520 *val
= ioread32(link
->ap
->ioaddr
.scr_addr
+ (sc_reg
* 4));
1524 static int nv_scr_write(struct ata_link
*link
, unsigned int sc_reg
, u32 val
)
1526 if (sc_reg
> SCR_CONTROL
)
1529 iowrite32(val
, link
->ap
->ioaddr
.scr_addr
+ (sc_reg
* 4));
1533 static void nv_nf2_freeze(struct ata_port
*ap
)
1535 void __iomem
*scr_addr
= ap
->host
->ports
[0]->ioaddr
.scr_addr
;
1536 int shift
= ap
->port_no
* NV_INT_PORT_SHIFT
;
1539 mask
= ioread8(scr_addr
+ NV_INT_ENABLE
);
1540 mask
&= ~(NV_INT_ALL
<< shift
);
1541 iowrite8(mask
, scr_addr
+ NV_INT_ENABLE
);
1544 static void nv_nf2_thaw(struct ata_port
*ap
)
1546 void __iomem
*scr_addr
= ap
->host
->ports
[0]->ioaddr
.scr_addr
;
1547 int shift
= ap
->port_no
* NV_INT_PORT_SHIFT
;
1550 iowrite8(NV_INT_ALL
<< shift
, scr_addr
+ NV_INT_STATUS
);
1552 mask
= ioread8(scr_addr
+ NV_INT_ENABLE
);
1553 mask
|= (NV_INT_MASK
<< shift
);
1554 iowrite8(mask
, scr_addr
+ NV_INT_ENABLE
);
1557 static int nv_nf2_hardreset(struct ata_link
*link
, unsigned int *class,
1558 unsigned long deadline
)
1563 rc
= sata_link_hardreset(link
, sata_deb_timing_hotplug
, deadline
,
1565 return online
? -EAGAIN
: rc
;
1568 static void nv_ck804_freeze(struct ata_port
*ap
)
1570 void __iomem
*mmio_base
= ap
->host
->iomap
[NV_MMIO_BAR
];
1571 int shift
= ap
->port_no
* NV_INT_PORT_SHIFT
;
1574 mask
= readb(mmio_base
+ NV_INT_ENABLE_CK804
);
1575 mask
&= ~(NV_INT_ALL
<< shift
);
1576 writeb(mask
, mmio_base
+ NV_INT_ENABLE_CK804
);
1579 static void nv_ck804_thaw(struct ata_port
*ap
)
1581 void __iomem
*mmio_base
= ap
->host
->iomap
[NV_MMIO_BAR
];
1582 int shift
= ap
->port_no
* NV_INT_PORT_SHIFT
;
1585 writeb(NV_INT_ALL
<< shift
, mmio_base
+ NV_INT_STATUS_CK804
);
1587 mask
= readb(mmio_base
+ NV_INT_ENABLE_CK804
);
1588 mask
|= (NV_INT_MASK
<< shift
);
1589 writeb(mask
, mmio_base
+ NV_INT_ENABLE_CK804
);
1592 static void nv_mcp55_freeze(struct ata_port
*ap
)
1594 void __iomem
*mmio_base
= ap
->host
->iomap
[NV_MMIO_BAR
];
1595 int shift
= ap
->port_no
* NV_INT_PORT_SHIFT_MCP55
;
1598 writel(NV_INT_ALL_MCP55
<< shift
, mmio_base
+ NV_INT_STATUS_MCP55
);
1600 mask
= readl(mmio_base
+ NV_INT_ENABLE_MCP55
);
1601 mask
&= ~(NV_INT_ALL_MCP55
<< shift
);
1602 writel(mask
, mmio_base
+ NV_INT_ENABLE_MCP55
);
1606 static void nv_mcp55_thaw(struct ata_port
*ap
)
1608 void __iomem
*mmio_base
= ap
->host
->iomap
[NV_MMIO_BAR
];
1609 int shift
= ap
->port_no
* NV_INT_PORT_SHIFT_MCP55
;
1612 writel(NV_INT_ALL_MCP55
<< shift
, mmio_base
+ NV_INT_STATUS_MCP55
);
1614 mask
= readl(mmio_base
+ NV_INT_ENABLE_MCP55
);
1615 mask
|= (NV_INT_MASK_MCP55
<< shift
);
1616 writel(mask
, mmio_base
+ NV_INT_ENABLE_MCP55
);
1620 static void nv_adma_error_handler(struct ata_port
*ap
)
1622 struct nv_adma_port_priv
*pp
= ap
->private_data
;
1623 if (!(pp
->flags
& NV_ADMA_PORT_REGISTER_MODE
)) {
1624 void __iomem
*mmio
= pp
->ctl_block
;
1628 if (ata_tag_valid(ap
->link
.active_tag
) || ap
->link
.sactive
) {
1629 u32 notifier
= readl(mmio
+ NV_ADMA_NOTIFIER
);
1630 u32 notifier_error
= readl(mmio
+ NV_ADMA_NOTIFIER_ERROR
);
1631 u32 gen_ctl
= readl(pp
->gen_block
+ NV_ADMA_GEN_CTL
);
1632 u32 status
= readw(mmio
+ NV_ADMA_STAT
);
1633 u8 cpb_count
= readb(mmio
+ NV_ADMA_CPB_COUNT
);
1634 u8 next_cpb_idx
= readb(mmio
+ NV_ADMA_NEXT_CPB_IDX
);
1636 ata_port_printk(ap
, KERN_ERR
,
1637 "EH in ADMA mode, notifier 0x%X "
1638 "notifier_error 0x%X gen_ctl 0x%X status 0x%X "
1639 "next cpb count 0x%X next cpb idx 0x%x\n",
1640 notifier
, notifier_error
, gen_ctl
, status
,
1641 cpb_count
, next_cpb_idx
);
1643 for (i
= 0; i
< NV_ADMA_MAX_CPBS
; i
++) {
1644 struct nv_adma_cpb
*cpb
= &pp
->cpb
[i
];
1645 if ((ata_tag_valid(ap
->link
.active_tag
) && i
== ap
->link
.active_tag
) ||
1646 ap
->link
.sactive
& (1 << i
))
1647 ata_port_printk(ap
, KERN_ERR
,
1648 "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1649 i
, cpb
->ctl_flags
, cpb
->resp_flags
);
1653 /* Push us back into port register mode for error handling. */
1654 nv_adma_register_mode(ap
);
1656 /* Mark all of the CPBs as invalid to prevent them from
1658 for (i
= 0; i
< NV_ADMA_MAX_CPBS
; i
++)
1659 pp
->cpb
[i
].ctl_flags
&= ~NV_CPB_CTL_CPB_VALID
;
1661 /* clear CPB fetch count */
1662 writew(0, mmio
+ NV_ADMA_CPB_COUNT
);
1665 tmp
= readw(mmio
+ NV_ADMA_CTL
);
1666 writew(tmp
| NV_ADMA_CTL_CHANNEL_RESET
, mmio
+ NV_ADMA_CTL
);
1667 readw(mmio
+ NV_ADMA_CTL
); /* flush posted write */
1669 writew(tmp
& ~NV_ADMA_CTL_CHANNEL_RESET
, mmio
+ NV_ADMA_CTL
);
1670 readw(mmio
+ NV_ADMA_CTL
); /* flush posted write */
1673 ata_sff_error_handler(ap
);
1676 static void nv_swncq_qc_to_dq(struct ata_port
*ap
, struct ata_queued_cmd
*qc
)
1678 struct nv_swncq_port_priv
*pp
= ap
->private_data
;
1679 struct defer_queue
*dq
= &pp
->defer_queue
;
1682 WARN_ON(dq
->tail
- dq
->head
== ATA_MAX_QUEUE
);
1683 dq
->defer_bits
|= (1 << qc
->tag
);
1684 dq
->tag
[dq
->tail
++ & (ATA_MAX_QUEUE
- 1)] = qc
->tag
;
1687 static struct ata_queued_cmd
*nv_swncq_qc_from_dq(struct ata_port
*ap
)
1689 struct nv_swncq_port_priv
*pp
= ap
->private_data
;
1690 struct defer_queue
*dq
= &pp
->defer_queue
;
1693 if (dq
->head
== dq
->tail
) /* null queue */
1696 tag
= dq
->tag
[dq
->head
& (ATA_MAX_QUEUE
- 1)];
1697 dq
->tag
[dq
->head
++ & (ATA_MAX_QUEUE
- 1)] = ATA_TAG_POISON
;
1698 WARN_ON(!(dq
->defer_bits
& (1 << tag
)));
1699 dq
->defer_bits
&= ~(1 << tag
);
1701 return ata_qc_from_tag(ap
, tag
);
1704 static void nv_swncq_fis_reinit(struct ata_port
*ap
)
1706 struct nv_swncq_port_priv
*pp
= ap
->private_data
;
1709 pp
->dmafis_bits
= 0;
1710 pp
->sdbfis_bits
= 0;
1714 static void nv_swncq_pp_reinit(struct ata_port
*ap
)
1716 struct nv_swncq_port_priv
*pp
= ap
->private_data
;
1717 struct defer_queue
*dq
= &pp
->defer_queue
;
1723 pp
->last_issue_tag
= ATA_TAG_POISON
;
1724 nv_swncq_fis_reinit(ap
);
1727 static void nv_swncq_irq_clear(struct ata_port
*ap
, u16 fis
)
1729 struct nv_swncq_port_priv
*pp
= ap
->private_data
;
1731 writew(fis
, pp
->irq_block
);
1734 static void __ata_bmdma_stop(struct ata_port
*ap
)
1736 struct ata_queued_cmd qc
;
1739 ata_bmdma_stop(&qc
);
1742 static void nv_swncq_ncq_stop(struct ata_port
*ap
)
1744 struct nv_swncq_port_priv
*pp
= ap
->private_data
;
1749 ata_port_printk(ap
, KERN_ERR
,
1750 "EH in SWNCQ mode,QC:qc_active 0x%X sactive 0x%X\n",
1751 ap
->qc_active
, ap
->link
.sactive
);
1752 ata_port_printk(ap
, KERN_ERR
,
1753 "SWNCQ:qc_active 0x%X defer_bits 0x%X last_issue_tag 0x%x\n "
1754 "dhfis 0x%X dmafis 0x%X sdbfis 0x%X\n",
1755 pp
->qc_active
, pp
->defer_queue
.defer_bits
, pp
->last_issue_tag
,
1756 pp
->dhfis_bits
, pp
->dmafis_bits
, pp
->sdbfis_bits
);
1758 ata_port_printk(ap
, KERN_ERR
, "ATA_REG 0x%X ERR_REG 0x%X\n",
1759 ap
->ops
->sff_check_status(ap
),
1760 ioread8(ap
->ioaddr
.error_addr
));
1762 sactive
= readl(pp
->sactive_block
);
1763 done_mask
= pp
->qc_active
^ sactive
;
1765 ata_port_printk(ap
, KERN_ERR
, "tag : dhfis dmafis sdbfis sacitve\n");
1766 for (i
= 0; i
< ATA_MAX_QUEUE
; i
++) {
1768 if (pp
->qc_active
& (1 << i
))
1770 else if (done_mask
& (1 << i
))
1775 ata_port_printk(ap
, KERN_ERR
,
1776 "tag 0x%x: %01x %01x %01x %01x %s\n", i
,
1777 (pp
->dhfis_bits
>> i
) & 0x1,
1778 (pp
->dmafis_bits
>> i
) & 0x1,
1779 (pp
->sdbfis_bits
>> i
) & 0x1,
1780 (sactive
>> i
) & 0x1,
1781 (err
? "error! tag doesn't exit" : " "));
1784 nv_swncq_pp_reinit(ap
);
1785 ap
->ops
->sff_irq_clear(ap
);
1786 __ata_bmdma_stop(ap
);
1787 nv_swncq_irq_clear(ap
, 0xffff);
1790 static void nv_swncq_error_handler(struct ata_port
*ap
)
1792 struct ata_eh_context
*ehc
= &ap
->link
.eh_context
;
1794 if (ap
->link
.sactive
) {
1795 nv_swncq_ncq_stop(ap
);
1796 ehc
->i
.action
|= ATA_EH_RESET
;
1799 ata_sff_error_handler(ap
);
1803 static int nv_swncq_port_suspend(struct ata_port
*ap
, pm_message_t mesg
)
1805 void __iomem
*mmio
= ap
->host
->iomap
[NV_MMIO_BAR
];
1809 writel(~0, mmio
+ NV_INT_STATUS_MCP55
);
1812 writel(0, mmio
+ NV_INT_ENABLE_MCP55
);
1815 tmp
= readl(mmio
+ NV_CTL_MCP55
);
1816 tmp
&= ~(NV_CTL_PRI_SWNCQ
| NV_CTL_SEC_SWNCQ
);
1817 writel(tmp
, mmio
+ NV_CTL_MCP55
);
1822 static int nv_swncq_port_resume(struct ata_port
*ap
)
1824 void __iomem
*mmio
= ap
->host
->iomap
[NV_MMIO_BAR
];
1828 writel(~0, mmio
+ NV_INT_STATUS_MCP55
);
1831 writel(0x00fd00fd, mmio
+ NV_INT_ENABLE_MCP55
);
1834 tmp
= readl(mmio
+ NV_CTL_MCP55
);
1835 writel(tmp
| NV_CTL_PRI_SWNCQ
| NV_CTL_SEC_SWNCQ
, mmio
+ NV_CTL_MCP55
);
1841 static void nv_swncq_host_init(struct ata_host
*host
)
1844 void __iomem
*mmio
= host
->iomap
[NV_MMIO_BAR
];
1845 struct pci_dev
*pdev
= to_pci_dev(host
->dev
);
1848 /* disable ECO 398 */
1849 pci_read_config_byte(pdev
, 0x7f, ®val
);
1850 regval
&= ~(1 << 7);
1851 pci_write_config_byte(pdev
, 0x7f, regval
);
1854 tmp
= readl(mmio
+ NV_CTL_MCP55
);
1855 VPRINTK("HOST_CTL:0x%X\n", tmp
);
1856 writel(tmp
| NV_CTL_PRI_SWNCQ
| NV_CTL_SEC_SWNCQ
, mmio
+ NV_CTL_MCP55
);
1858 /* enable irq intr */
1859 tmp
= readl(mmio
+ NV_INT_ENABLE_MCP55
);
1860 VPRINTK("HOST_ENABLE:0x%X\n", tmp
);
1861 writel(tmp
| 0x00fd00fd, mmio
+ NV_INT_ENABLE_MCP55
);
1863 /* clear port irq */
1864 writel(~0x0, mmio
+ NV_INT_STATUS_MCP55
);
1867 static int nv_swncq_slave_config(struct scsi_device
*sdev
)
1869 struct ata_port
*ap
= ata_shost_to_port(sdev
->host
);
1870 struct pci_dev
*pdev
= to_pci_dev(ap
->host
->dev
);
1871 struct ata_device
*dev
;
1874 u8 check_maxtor
= 0;
1875 unsigned char model_num
[ATA_ID_PROD_LEN
+ 1];
1877 rc
= ata_scsi_slave_config(sdev
);
1878 if (sdev
->id
>= ATA_MAX_DEVICES
|| sdev
->channel
|| sdev
->lun
)
1879 /* Not a proper libata device, ignore */
1882 dev
= &ap
->link
.device
[sdev
->id
];
1883 if (!(ap
->flags
& ATA_FLAG_NCQ
) || dev
->class == ATA_DEV_ATAPI
)
1886 /* if MCP51 and Maxtor, then disable ncq */
1887 if (pdev
->device
== PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA
||
1888 pdev
->device
== PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2
)
1891 /* if MCP55 and rev <= a2 and Maxtor, then disable ncq */
1892 if (pdev
->device
== PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA
||
1893 pdev
->device
== PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2
) {
1894 pci_read_config_byte(pdev
, 0x8, &rev
);
1902 ata_id_c_string(dev
->id
, model_num
, ATA_ID_PROD
, sizeof(model_num
));
1904 if (strncmp(model_num
, "Maxtor", 6) == 0) {
1905 ata_scsi_change_queue_depth(sdev
, 1);
1906 ata_dev_printk(dev
, KERN_NOTICE
,
1907 "Disabling SWNCQ mode (depth %x)\n", sdev
->queue_depth
);
1913 static int nv_swncq_port_start(struct ata_port
*ap
)
1915 struct device
*dev
= ap
->host
->dev
;
1916 void __iomem
*mmio
= ap
->host
->iomap
[NV_MMIO_BAR
];
1917 struct nv_swncq_port_priv
*pp
;
1920 rc
= ata_port_start(ap
);
1924 pp
= devm_kzalloc(dev
, sizeof(*pp
), GFP_KERNEL
);
1928 pp
->prd
= dmam_alloc_coherent(dev
, ATA_PRD_TBL_SZ
* ATA_MAX_QUEUE
,
1929 &pp
->prd_dma
, GFP_KERNEL
);
1932 memset(pp
->prd
, 0, ATA_PRD_TBL_SZ
* ATA_MAX_QUEUE
);
1934 ap
->private_data
= pp
;
1935 pp
->sactive_block
= ap
->ioaddr
.scr_addr
+ 4 * SCR_ACTIVE
;
1936 pp
->irq_block
= mmio
+ NV_INT_STATUS_MCP55
+ ap
->port_no
* 2;
1937 pp
->tag_block
= mmio
+ NV_NCQ_REG_MCP55
+ ap
->port_no
* 2;
1942 static void nv_swncq_qc_prep(struct ata_queued_cmd
*qc
)
1944 if (qc
->tf
.protocol
!= ATA_PROT_NCQ
) {
1945 ata_sff_qc_prep(qc
);
1949 if (!(qc
->flags
& ATA_QCFLAG_DMAMAP
))
1952 nv_swncq_fill_sg(qc
);
1955 static void nv_swncq_fill_sg(struct ata_queued_cmd
*qc
)
1957 struct ata_port
*ap
= qc
->ap
;
1958 struct scatterlist
*sg
;
1959 struct nv_swncq_port_priv
*pp
= ap
->private_data
;
1960 struct ata_prd
*prd
;
1961 unsigned int si
, idx
;
1963 prd
= pp
->prd
+ ATA_MAX_PRD
* qc
->tag
;
1966 for_each_sg(qc
->sg
, sg
, qc
->n_elem
, si
) {
1970 addr
= (u32
)sg_dma_address(sg
);
1971 sg_len
= sg_dma_len(sg
);
1974 offset
= addr
& 0xffff;
1976 if ((offset
+ sg_len
) > 0x10000)
1977 len
= 0x10000 - offset
;
1979 prd
[idx
].addr
= cpu_to_le32(addr
);
1980 prd
[idx
].flags_len
= cpu_to_le32(len
& 0xffff);
1988 prd
[idx
- 1].flags_len
|= cpu_to_le32(ATA_PRD_EOT
);
1991 static unsigned int nv_swncq_issue_atacmd(struct ata_port
*ap
,
1992 struct ata_queued_cmd
*qc
)
1994 struct nv_swncq_port_priv
*pp
= ap
->private_data
;
2001 writel((1 << qc
->tag
), pp
->sactive_block
);
2002 pp
->last_issue_tag
= qc
->tag
;
2003 pp
->dhfis_bits
&= ~(1 << qc
->tag
);
2004 pp
->dmafis_bits
&= ~(1 << qc
->tag
);
2005 pp
->qc_active
|= (0x1 << qc
->tag
);
2007 ap
->ops
->sff_tf_load(ap
, &qc
->tf
); /* load tf registers */
2008 ap
->ops
->sff_exec_command(ap
, &qc
->tf
);
2010 DPRINTK("Issued tag %u\n", qc
->tag
);
2015 static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd
*qc
)
2017 struct ata_port
*ap
= qc
->ap
;
2018 struct nv_swncq_port_priv
*pp
= ap
->private_data
;
2020 if (qc
->tf
.protocol
!= ATA_PROT_NCQ
)
2021 return ata_sff_qc_issue(qc
);
2026 nv_swncq_issue_atacmd(ap
, qc
);
2028 nv_swncq_qc_to_dq(ap
, qc
); /* add qc to defer queue */
2033 static void nv_swncq_hotplug(struct ata_port
*ap
, u32 fis
)
2036 struct ata_eh_info
*ehi
= &ap
->link
.eh_info
;
2038 ata_ehi_clear_desc(ehi
);
2040 /* AHCI needs SError cleared; otherwise, it might lock up */
2041 sata_scr_read(&ap
->link
, SCR_ERROR
, &serror
);
2042 sata_scr_write(&ap
->link
, SCR_ERROR
, serror
);
2044 /* analyze @irq_stat */
2045 if (fis
& NV_SWNCQ_IRQ_ADDED
)
2046 ata_ehi_push_desc(ehi
, "hot plug");
2047 else if (fis
& NV_SWNCQ_IRQ_REMOVED
)
2048 ata_ehi_push_desc(ehi
, "hot unplug");
2050 ata_ehi_hotplugged(ehi
);
2052 /* okay, let's hand over to EH */
2053 ehi
->serror
|= serror
;
2055 ata_port_freeze(ap
);
2058 static int nv_swncq_sdbfis(struct ata_port
*ap
)
2060 struct ata_queued_cmd
*qc
;
2061 struct nv_swncq_port_priv
*pp
= ap
->private_data
;
2062 struct ata_eh_info
*ehi
= &ap
->link
.eh_info
;
2070 host_stat
= ap
->ops
->bmdma_status(ap
);
2071 if (unlikely(host_stat
& ATA_DMA_ERR
)) {
2072 /* error when transfering data to/from memory */
2073 ata_ehi_clear_desc(ehi
);
2074 ata_ehi_push_desc(ehi
, "BMDMA stat 0x%x", host_stat
);
2075 ehi
->err_mask
|= AC_ERR_HOST_BUS
;
2076 ehi
->action
|= ATA_EH_RESET
;
2080 ap
->ops
->sff_irq_clear(ap
);
2081 __ata_bmdma_stop(ap
);
2083 sactive
= readl(pp
->sactive_block
);
2084 done_mask
= pp
->qc_active
^ sactive
;
2086 if (unlikely(done_mask
& sactive
)) {
2087 ata_ehi_clear_desc(ehi
);
2088 ata_ehi_push_desc(ehi
, "illegal SWNCQ:qc_active transition"
2089 "(%08x->%08x)", pp
->qc_active
, sactive
);
2090 ehi
->err_mask
|= AC_ERR_HSM
;
2091 ehi
->action
|= ATA_EH_RESET
;
2094 for (i
= 0; i
< ATA_MAX_QUEUE
; i
++) {
2095 if (!(done_mask
& (1 << i
)))
2098 qc
= ata_qc_from_tag(ap
, i
);
2100 ata_qc_complete(qc
);
2101 pp
->qc_active
&= ~(1 << i
);
2102 pp
->dhfis_bits
&= ~(1 << i
);
2103 pp
->dmafis_bits
&= ~(1 << i
);
2104 pp
->sdbfis_bits
|= (1 << i
);
2109 if (!ap
->qc_active
) {
2111 nv_swncq_pp_reinit(ap
);
2115 if (pp
->qc_active
& pp
->dhfis_bits
)
2118 if ((pp
->ncq_flags
& ncq_saw_backout
) ||
2119 (pp
->qc_active
^ pp
->dhfis_bits
))
2120 /* if the controller cann't get a device to host register FIS,
2121 * The driver needs to reissue the new command.
2125 DPRINTK("id 0x%x QC: qc_active 0x%x,"
2126 "SWNCQ:qc_active 0x%X defer_bits %X "
2127 "dhfis 0x%X dmafis 0x%X last_issue_tag %x\n",
2128 ap
->print_id
, ap
->qc_active
, pp
->qc_active
,
2129 pp
->defer_queue
.defer_bits
, pp
->dhfis_bits
,
2130 pp
->dmafis_bits
, pp
->last_issue_tag
);
2132 nv_swncq_fis_reinit(ap
);
2135 qc
= ata_qc_from_tag(ap
, pp
->last_issue_tag
);
2136 nv_swncq_issue_atacmd(ap
, qc
);
2140 if (pp
->defer_queue
.defer_bits
) {
2141 /* send deferral queue command */
2142 qc
= nv_swncq_qc_from_dq(ap
);
2143 WARN_ON(qc
== NULL
);
2144 nv_swncq_issue_atacmd(ap
, qc
);
2150 static inline u32
nv_swncq_tag(struct ata_port
*ap
)
2152 struct nv_swncq_port_priv
*pp
= ap
->private_data
;
2155 tag
= readb(pp
->tag_block
) >> 2;
2156 return (tag
& 0x1f);
2159 static int nv_swncq_dmafis(struct ata_port
*ap
)
2161 struct ata_queued_cmd
*qc
;
2165 struct nv_swncq_port_priv
*pp
= ap
->private_data
;
2167 __ata_bmdma_stop(ap
);
2168 tag
= nv_swncq_tag(ap
);
2170 DPRINTK("dma setup tag 0x%x\n", tag
);
2171 qc
= ata_qc_from_tag(ap
, tag
);
2176 rw
= qc
->tf
.flags
& ATA_TFLAG_WRITE
;
2178 /* load PRD table addr. */
2179 iowrite32(pp
->prd_dma
+ ATA_PRD_TBL_SZ
* qc
->tag
,
2180 ap
->ioaddr
.bmdma_addr
+ ATA_DMA_TABLE_OFS
);
2182 /* specify data direction, triple-check start bit is clear */
2183 dmactl
= ioread8(ap
->ioaddr
.bmdma_addr
+ ATA_DMA_CMD
);
2184 dmactl
&= ~ATA_DMA_WR
;
2186 dmactl
|= ATA_DMA_WR
;
2188 iowrite8(dmactl
| ATA_DMA_START
, ap
->ioaddr
.bmdma_addr
+ ATA_DMA_CMD
);
2193 static void nv_swncq_host_interrupt(struct ata_port
*ap
, u16 fis
)
2195 struct nv_swncq_port_priv
*pp
= ap
->private_data
;
2196 struct ata_queued_cmd
*qc
;
2197 struct ata_eh_info
*ehi
= &ap
->link
.eh_info
;
2202 ata_stat
= ap
->ops
->sff_check_status(ap
);
2203 nv_swncq_irq_clear(ap
, fis
);
2207 if (ap
->pflags
& ATA_PFLAG_FROZEN
)
2210 if (fis
& NV_SWNCQ_IRQ_HOTPLUG
) {
2211 nv_swncq_hotplug(ap
, fis
);
2218 if (ap
->ops
->scr_read(&ap
->link
, SCR_ERROR
, &serror
))
2220 ap
->ops
->scr_write(&ap
->link
, SCR_ERROR
, serror
);
2222 if (ata_stat
& ATA_ERR
) {
2223 ata_ehi_clear_desc(ehi
);
2224 ata_ehi_push_desc(ehi
, "Ata error. fis:0x%X", fis
);
2225 ehi
->err_mask
|= AC_ERR_DEV
;
2226 ehi
->serror
|= serror
;
2227 ehi
->action
|= ATA_EH_RESET
;
2228 ata_port_freeze(ap
);
2232 if (fis
& NV_SWNCQ_IRQ_BACKOUT
) {
2233 /* If the IRQ is backout, driver must issue
2234 * the new command again some time later.
2236 pp
->ncq_flags
|= ncq_saw_backout
;
2239 if (fis
& NV_SWNCQ_IRQ_SDBFIS
) {
2240 pp
->ncq_flags
|= ncq_saw_sdb
;
2241 DPRINTK("id 0x%x SWNCQ: qc_active 0x%X "
2242 "dhfis 0x%X dmafis 0x%X sactive 0x%X\n",
2243 ap
->print_id
, pp
->qc_active
, pp
->dhfis_bits
,
2244 pp
->dmafis_bits
, readl(pp
->sactive_block
));
2245 rc
= nv_swncq_sdbfis(ap
);
2250 if (fis
& NV_SWNCQ_IRQ_DHREGFIS
) {
2251 /* The interrupt indicates the new command
2252 * was transmitted correctly to the drive.
2254 pp
->dhfis_bits
|= (0x1 << pp
->last_issue_tag
);
2255 pp
->ncq_flags
|= ncq_saw_d2h
;
2256 if (pp
->ncq_flags
& (ncq_saw_sdb
| ncq_saw_backout
)) {
2257 ata_ehi_push_desc(ehi
, "illegal fis transaction");
2258 ehi
->err_mask
|= AC_ERR_HSM
;
2259 ehi
->action
|= ATA_EH_RESET
;
2263 if (!(fis
& NV_SWNCQ_IRQ_DMASETUP
) &&
2264 !(pp
->ncq_flags
& ncq_saw_dmas
)) {
2265 ata_stat
= ap
->ops
->sff_check_status(ap
);
2266 if (ata_stat
& ATA_BUSY
)
2269 if (pp
->defer_queue
.defer_bits
) {
2270 DPRINTK("send next command\n");
2271 qc
= nv_swncq_qc_from_dq(ap
);
2272 nv_swncq_issue_atacmd(ap
, qc
);
2277 if (fis
& NV_SWNCQ_IRQ_DMASETUP
) {
2278 /* program the dma controller with appropriate PRD buffers
2279 * and start the DMA transfer for requested command.
2281 pp
->dmafis_bits
|= (0x1 << nv_swncq_tag(ap
));
2282 pp
->ncq_flags
|= ncq_saw_dmas
;
2283 rc
= nv_swncq_dmafis(ap
);
2289 ata_ehi_push_desc(ehi
, "fis:0x%x", fis
);
2290 ata_port_freeze(ap
);
2294 static irqreturn_t
nv_swncq_interrupt(int irq
, void *dev_instance
)
2296 struct ata_host
*host
= dev_instance
;
2298 unsigned int handled
= 0;
2299 unsigned long flags
;
2302 spin_lock_irqsave(&host
->lock
, flags
);
2304 irq_stat
= readl(host
->iomap
[NV_MMIO_BAR
] + NV_INT_STATUS_MCP55
);
2306 for (i
= 0; i
< host
->n_ports
; i
++) {
2307 struct ata_port
*ap
= host
->ports
[i
];
2309 if (ap
&& !(ap
->flags
& ATA_FLAG_DISABLED
)) {
2310 if (ap
->link
.sactive
) {
2311 nv_swncq_host_interrupt(ap
, (u16
)irq_stat
);
2314 if (irq_stat
) /* reserve Hotplug */
2315 nv_swncq_irq_clear(ap
, 0xfff0);
2317 handled
+= nv_host_intr(ap
, (u8
)irq_stat
);
2320 irq_stat
>>= NV_INT_PORT_SHIFT_MCP55
;
2323 spin_unlock_irqrestore(&host
->lock
, flags
);
2325 return IRQ_RETVAL(handled
);
2328 static int nv_init_one(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
2330 static int printed_version
;
2331 const struct ata_port_info
*ppi
[] = { NULL
, NULL
};
2332 struct nv_pi_priv
*ipriv
;
2333 struct ata_host
*host
;
2334 struct nv_host_priv
*hpriv
;
2338 unsigned long type
= ent
->driver_data
;
2340 // Make sure this is a SATA controller by counting the number of bars
2341 // (NVIDIA SATA controllers will always have six bars). Otherwise,
2342 // it's an IDE controller and we ignore it.
2343 for (bar
= 0; bar
< 6; bar
++)
2344 if (pci_resource_start(pdev
, bar
) == 0)
2347 if (!printed_version
++)
2348 dev_printk(KERN_DEBUG
, &pdev
->dev
, "version " DRV_VERSION
"\n");
2350 rc
= pcim_enable_device(pdev
);
2354 /* determine type and allocate host */
2355 if (type
== CK804
&& adma_enabled
) {
2356 dev_printk(KERN_NOTICE
, &pdev
->dev
, "Using ADMA mode\n");
2360 if (type
== SWNCQ
) {
2362 dev_printk(KERN_NOTICE
, &pdev
->dev
,
2363 "Using SWNCQ mode\n");
2368 ppi
[0] = &nv_port_info
[type
];
2369 ipriv
= ppi
[0]->private_data
;
2370 rc
= ata_pci_sff_prepare_host(pdev
, ppi
, &host
);
2374 hpriv
= devm_kzalloc(&pdev
->dev
, sizeof(*hpriv
), GFP_KERNEL
);
2378 host
->private_data
= hpriv
;
2380 /* request and iomap NV_MMIO_BAR */
2381 rc
= pcim_iomap_regions(pdev
, 1 << NV_MMIO_BAR
, DRV_NAME
);
2385 /* configure SCR access */
2386 base
= host
->iomap
[NV_MMIO_BAR
];
2387 host
->ports
[0]->ioaddr
.scr_addr
= base
+ NV_PORT0_SCR_REG_OFFSET
;
2388 host
->ports
[1]->ioaddr
.scr_addr
= base
+ NV_PORT1_SCR_REG_OFFSET
;
2390 /* enable SATA space for CK804 */
2391 if (type
>= CK804
) {
2394 pci_read_config_byte(pdev
, NV_MCP_SATA_CFG_20
, ®val
);
2395 regval
|= NV_MCP_SATA_CFG_20_SATA_SPACE_EN
;
2396 pci_write_config_byte(pdev
, NV_MCP_SATA_CFG_20
, regval
);
2401 rc
= nv_adma_host_init(host
);
2404 } else if (type
== SWNCQ
)
2405 nv_swncq_host_init(host
);
2407 pci_set_master(pdev
);
2408 return ata_host_activate(host
, pdev
->irq
, ipriv
->irq_handler
,
2409 IRQF_SHARED
, ipriv
->sht
);
2413 static int nv_pci_device_resume(struct pci_dev
*pdev
)
2415 struct ata_host
*host
= dev_get_drvdata(&pdev
->dev
);
2416 struct nv_host_priv
*hpriv
= host
->private_data
;
2419 rc
= ata_pci_device_do_resume(pdev
);
2423 if (pdev
->dev
.power
.power_state
.event
== PM_EVENT_SUSPEND
) {
2424 if (hpriv
->type
>= CK804
) {
2427 pci_read_config_byte(pdev
, NV_MCP_SATA_CFG_20
, ®val
);
2428 regval
|= NV_MCP_SATA_CFG_20_SATA_SPACE_EN
;
2429 pci_write_config_byte(pdev
, NV_MCP_SATA_CFG_20
, regval
);
2431 if (hpriv
->type
== ADMA
) {
2433 struct nv_adma_port_priv
*pp
;
2434 /* enable/disable ADMA on the ports appropriately */
2435 pci_read_config_dword(pdev
, NV_MCP_SATA_CFG_20
, &tmp32
);
2437 pp
= host
->ports
[0]->private_data
;
2438 if (pp
->flags
& NV_ADMA_ATAPI_SETUP_COMPLETE
)
2439 tmp32
&= ~(NV_MCP_SATA_CFG_20_PORT0_EN
|
2440 NV_MCP_SATA_CFG_20_PORT0_PWB_EN
);
2442 tmp32
|= (NV_MCP_SATA_CFG_20_PORT0_EN
|
2443 NV_MCP_SATA_CFG_20_PORT0_PWB_EN
);
2444 pp
= host
->ports
[1]->private_data
;
2445 if (pp
->flags
& NV_ADMA_ATAPI_SETUP_COMPLETE
)
2446 tmp32
&= ~(NV_MCP_SATA_CFG_20_PORT1_EN
|
2447 NV_MCP_SATA_CFG_20_PORT1_PWB_EN
);
2449 tmp32
|= (NV_MCP_SATA_CFG_20_PORT1_EN
|
2450 NV_MCP_SATA_CFG_20_PORT1_PWB_EN
);
2452 pci_write_config_dword(pdev
, NV_MCP_SATA_CFG_20
, tmp32
);
2456 ata_host_resume(host
);
2462 static void nv_ck804_host_stop(struct ata_host
*host
)
2464 struct pci_dev
*pdev
= to_pci_dev(host
->dev
);
2467 /* disable SATA space for CK804 */
2468 pci_read_config_byte(pdev
, NV_MCP_SATA_CFG_20
, ®val
);
2469 regval
&= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN
;
2470 pci_write_config_byte(pdev
, NV_MCP_SATA_CFG_20
, regval
);
2473 static void nv_adma_host_stop(struct ata_host
*host
)
2475 struct pci_dev
*pdev
= to_pci_dev(host
->dev
);
2478 /* disable ADMA on the ports */
2479 pci_read_config_dword(pdev
, NV_MCP_SATA_CFG_20
, &tmp32
);
2480 tmp32
&= ~(NV_MCP_SATA_CFG_20_PORT0_EN
|
2481 NV_MCP_SATA_CFG_20_PORT0_PWB_EN
|
2482 NV_MCP_SATA_CFG_20_PORT1_EN
|
2483 NV_MCP_SATA_CFG_20_PORT1_PWB_EN
);
2485 pci_write_config_dword(pdev
, NV_MCP_SATA_CFG_20
, tmp32
);
2487 nv_ck804_host_stop(host
);
2490 static int __init
nv_init(void)
2492 return pci_register_driver(&nv_pci_driver
);
2495 static void __exit
nv_exit(void)
2497 pci_unregister_driver(&nv_pci_driver
);
2500 module_init(nv_init
);
2501 module_exit(nv_exit
);
2502 module_param_named(adma
, adma_enabled
, bool, 0444);
2503 MODULE_PARM_DESC(adma
, "Enable use of ADMA (Default: true)");
2504 module_param_named(swncq
, swncq_enabled
, bool, 0444);
2505 MODULE_PARM_DESC(swncq
, "Enable use of SWNCQ (Default: true)");