2 * sata_nv.c - NVIDIA nForce SATA
4 * Copyright 2004 NVIDIA Corp. All rights reserved.
5 * Copyright 2004 Andrew Chew
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 * libata documentation is available via 'make {ps|pdf}docs',
24 * as Documentation/DocBook/libata.*
26 * No hardware documentation available outside of NVIDIA.
27 * This driver programs the NVIDIA SATA controller in a similar
28 * fashion as with other PCI IDE BMDMA controllers, with a few
29 * NV-specific details such as register offsets, SATA phy location,
32 * CK804/MCP04 controllers support an alternate programming interface
33 * similar to the ADMA specification (with some modifications).
34 * This allows the use of NCQ. Non-DMA-mapped ATA commands are still
35 * sent through the legacy interface.
39 #include <linux/kernel.h>
40 #include <linux/module.h>
41 #include <linux/pci.h>
42 #include <linux/init.h>
43 #include <linux/blkdev.h>
44 #include <linux/delay.h>
45 #include <linux/interrupt.h>
46 #include <linux/device.h>
47 #include <scsi/scsi_host.h>
48 #include <scsi/scsi_device.h>
49 #include <linux/libata.h>
51 #define DRV_NAME "sata_nv"
52 #define DRV_VERSION "3.2"
54 #define NV_ADMA_DMA_BOUNDARY 0xffffffffUL
61 NV_PORT0_SCR_REG_OFFSET
= 0x00,
62 NV_PORT1_SCR_REG_OFFSET
= 0x40,
64 /* INT_STATUS/ENABLE */
67 NV_INT_STATUS_CK804
= 0x440,
68 NV_INT_ENABLE_CK804
= 0x441,
70 /* INT_STATUS/ENABLE bits */
74 NV_INT_REMOVED
= 0x08,
76 NV_INT_PORT_SHIFT
= 4, /* each port occupies 4 bits */
79 NV_INT_MASK
= NV_INT_DEV
|
80 NV_INT_ADDED
| NV_INT_REMOVED
,
84 NV_INT_CONFIG_METHD
= 0x01, // 0 = INT, 1 = SMI
86 // For PCI config register 20
87 NV_MCP_SATA_CFG_20
= 0x50,
88 NV_MCP_SATA_CFG_20_SATA_SPACE_EN
= 0x04,
89 NV_MCP_SATA_CFG_20_PORT0_EN
= (1 << 17),
90 NV_MCP_SATA_CFG_20_PORT1_EN
= (1 << 16),
91 NV_MCP_SATA_CFG_20_PORT0_PWB_EN
= (1 << 14),
92 NV_MCP_SATA_CFG_20_PORT1_PWB_EN
= (1 << 12),
94 NV_ADMA_MAX_CPBS
= 32,
97 NV_ADMA_SGTBL_LEN
= (1024 - NV_ADMA_CPB_SZ
) /
99 NV_ADMA_SGTBL_TOTAL_LEN
= NV_ADMA_SGTBL_LEN
+ 5,
100 NV_ADMA_SGTBL_SZ
= NV_ADMA_SGTBL_LEN
* NV_ADMA_APRD_SZ
,
101 NV_ADMA_PORT_PRIV_DMA_SZ
= NV_ADMA_MAX_CPBS
*
102 (NV_ADMA_CPB_SZ
+ NV_ADMA_SGTBL_SZ
),
104 /* BAR5 offset to ADMA general registers */
106 NV_ADMA_GEN_CTL
= 0x00,
107 NV_ADMA_NOTIFIER_CLEAR
= 0x30,
109 /* BAR5 offset to ADMA ports */
110 NV_ADMA_PORT
= 0x480,
112 /* size of ADMA port register space */
113 NV_ADMA_PORT_SIZE
= 0x100,
115 /* ADMA port registers */
117 NV_ADMA_CPB_COUNT
= 0x42,
118 NV_ADMA_NEXT_CPB_IDX
= 0x43,
120 NV_ADMA_CPB_BASE_LOW
= 0x48,
121 NV_ADMA_CPB_BASE_HIGH
= 0x4C,
122 NV_ADMA_APPEND
= 0x50,
123 NV_ADMA_NOTIFIER
= 0x68,
124 NV_ADMA_NOTIFIER_ERROR
= 0x6C,
126 /* NV_ADMA_CTL register bits */
127 NV_ADMA_CTL_HOTPLUG_IEN
= (1 << 0),
128 NV_ADMA_CTL_CHANNEL_RESET
= (1 << 5),
129 NV_ADMA_CTL_GO
= (1 << 7),
130 NV_ADMA_CTL_AIEN
= (1 << 8),
131 NV_ADMA_CTL_READ_NON_COHERENT
= (1 << 11),
132 NV_ADMA_CTL_WRITE_NON_COHERENT
= (1 << 12),
134 /* CPB response flag bits */
135 NV_CPB_RESP_DONE
= (1 << 0),
136 NV_CPB_RESP_ATA_ERR
= (1 << 3),
137 NV_CPB_RESP_CMD_ERR
= (1 << 4),
138 NV_CPB_RESP_CPB_ERR
= (1 << 7),
140 /* CPB control flag bits */
141 NV_CPB_CTL_CPB_VALID
= (1 << 0),
142 NV_CPB_CTL_QUEUE
= (1 << 1),
143 NV_CPB_CTL_APRD_VALID
= (1 << 2),
144 NV_CPB_CTL_IEN
= (1 << 3),
145 NV_CPB_CTL_FPDMA
= (1 << 4),
148 NV_APRD_WRITE
= (1 << 1),
149 NV_APRD_END
= (1 << 2),
150 NV_APRD_CONT
= (1 << 3),
152 /* NV_ADMA_STAT flags */
153 NV_ADMA_STAT_TIMEOUT
= (1 << 0),
154 NV_ADMA_STAT_HOTUNPLUG
= (1 << 1),
155 NV_ADMA_STAT_HOTPLUG
= (1 << 2),
156 NV_ADMA_STAT_CPBERR
= (1 << 4),
157 NV_ADMA_STAT_SERROR
= (1 << 5),
158 NV_ADMA_STAT_CMD_COMPLETE
= (1 << 6),
159 NV_ADMA_STAT_IDLE
= (1 << 8),
160 NV_ADMA_STAT_LEGACY
= (1 << 9),
161 NV_ADMA_STAT_STOPPED
= (1 << 10),
162 NV_ADMA_STAT_DONE
= (1 << 12),
163 NV_ADMA_STAT_ERR
= NV_ADMA_STAT_CPBERR
|
164 NV_ADMA_STAT_TIMEOUT
,
167 NV_ADMA_PORT_REGISTER_MODE
= (1 << 0),
168 NV_ADMA_ATAPI_SETUP_COMPLETE
= (1 << 1),
172 /* ADMA Physical Region Descriptor - one SG segment */
181 enum nv_adma_regbits
{
182 CMDEND
= (1 << 15), /* end of command list */
183 WNB
= (1 << 14), /* wait-not-BSY */
184 IGN
= (1 << 13), /* ignore this entry */
185 CS1n
= (1 << (4 + 8)), /* std. PATA signals follow... */
186 DA2
= (1 << (2 + 8)),
187 DA1
= (1 << (1 + 8)),
188 DA0
= (1 << (0 + 8)),
191 /* ADMA Command Parameter Block
192 The first 5 SG segments are stored inside the Command Parameter Block itself.
193 If there are more than 5 segments the remainder are stored in a separate
194 memory area indicated by next_aprd. */
196 u8 resp_flags
; /* 0 */
197 u8 reserved1
; /* 1 */
198 u8 ctl_flags
; /* 2 */
199 /* len is length of taskfile in 64 bit words */
202 u8 next_cpb_idx
; /* 5 */
203 __le16 reserved2
; /* 6-7 */
204 __le16 tf
[12]; /* 8-31 */
205 struct nv_adma_prd aprd
[5]; /* 32-111 */
206 __le64 next_aprd
; /* 112-119 */
207 __le64 reserved3
; /* 120-127 */
211 struct nv_adma_port_priv
{
212 struct nv_adma_cpb
*cpb
;
214 struct nv_adma_prd
*aprd
;
219 #define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & ( 1 << (19 + (12 * (PORT)))))
221 static int nv_init_one (struct pci_dev
*pdev
, const struct pci_device_id
*ent
);
222 static void nv_ck804_host_stop(struct ata_host
*host
);
223 static irqreturn_t
nv_generic_interrupt(int irq
, void *dev_instance
);
224 static irqreturn_t
nv_nf2_interrupt(int irq
, void *dev_instance
);
225 static irqreturn_t
nv_ck804_interrupt(int irq
, void *dev_instance
);
226 static u32
nv_scr_read (struct ata_port
*ap
, unsigned int sc_reg
);
227 static void nv_scr_write (struct ata_port
*ap
, unsigned int sc_reg
, u32 val
);
229 static void nv_nf2_freeze(struct ata_port
*ap
);
230 static void nv_nf2_thaw(struct ata_port
*ap
);
231 static void nv_ck804_freeze(struct ata_port
*ap
);
232 static void nv_ck804_thaw(struct ata_port
*ap
);
233 static void nv_error_handler(struct ata_port
*ap
);
234 static int nv_adma_slave_config(struct scsi_device
*sdev
);
235 static int nv_adma_check_atapi_dma(struct ata_queued_cmd
*qc
);
236 static void nv_adma_qc_prep(struct ata_queued_cmd
*qc
);
237 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd
*qc
);
238 static irqreturn_t
nv_adma_interrupt(int irq
, void *dev_instance
);
239 static void nv_adma_irq_clear(struct ata_port
*ap
);
240 static int nv_adma_port_start(struct ata_port
*ap
);
241 static void nv_adma_port_stop(struct ata_port
*ap
);
242 static void nv_adma_error_handler(struct ata_port
*ap
);
243 static void nv_adma_host_stop(struct ata_host
*host
);
244 static void nv_adma_bmdma_setup(struct ata_queued_cmd
*qc
);
245 static void nv_adma_bmdma_start(struct ata_queued_cmd
*qc
);
246 static void nv_adma_bmdma_stop(struct ata_queued_cmd
*qc
);
247 static u8
nv_adma_bmdma_status(struct ata_port
*ap
);
253 NFORCE3
= NFORCE2
, /* NF2 == NF3 as far as sata_nv is concerned */
258 static const struct pci_device_id nv_pci_tbl
[] = {
259 { PCI_VDEVICE(NVIDIA
, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA
), NFORCE2
},
260 { PCI_VDEVICE(NVIDIA
, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA
), NFORCE3
},
261 { PCI_VDEVICE(NVIDIA
, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2
), NFORCE3
},
262 { PCI_VDEVICE(NVIDIA
, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA
), CK804
},
263 { PCI_VDEVICE(NVIDIA
, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2
), CK804
},
264 { PCI_VDEVICE(NVIDIA
, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA
), CK804
},
265 { PCI_VDEVICE(NVIDIA
, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2
), CK804
},
266 { PCI_VDEVICE(NVIDIA
, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA
), GENERIC
},
267 { PCI_VDEVICE(NVIDIA
, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2
), GENERIC
},
268 { PCI_VDEVICE(NVIDIA
, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA
), GENERIC
},
269 { PCI_VDEVICE(NVIDIA
, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2
), GENERIC
},
270 { PCI_VDEVICE(NVIDIA
, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA
), GENERIC
},
271 { PCI_VDEVICE(NVIDIA
, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2
), GENERIC
},
272 { PCI_VDEVICE(NVIDIA
, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3
), GENERIC
},
273 { PCI_VDEVICE(NVIDIA
, 0x045c), GENERIC
}, /* MCP65 */
274 { PCI_VDEVICE(NVIDIA
, 0x045d), GENERIC
}, /* MCP65 */
275 { PCI_VDEVICE(NVIDIA
, 0x045e), GENERIC
}, /* MCP65 */
276 { PCI_VDEVICE(NVIDIA
, 0x045f), GENERIC
}, /* MCP65 */
277 { PCI_VDEVICE(NVIDIA
, 0x0550), GENERIC
}, /* MCP67 */
278 { PCI_VDEVICE(NVIDIA
, 0x0551), GENERIC
}, /* MCP67 */
279 { PCI_VDEVICE(NVIDIA
, 0x0552), GENERIC
}, /* MCP67 */
280 { PCI_VDEVICE(NVIDIA
, 0x0553), GENERIC
}, /* MCP67 */
281 { PCI_VENDOR_ID_NVIDIA
, PCI_ANY_ID
,
282 PCI_ANY_ID
, PCI_ANY_ID
,
283 PCI_CLASS_STORAGE_IDE
<<8, 0xffff00, GENERIC
},
284 { PCI_VENDOR_ID_NVIDIA
, PCI_ANY_ID
,
285 PCI_ANY_ID
, PCI_ANY_ID
,
286 PCI_CLASS_STORAGE_RAID
<<8, 0xffff00, GENERIC
},
288 { } /* terminate list */
291 static struct pci_driver nv_pci_driver
= {
293 .id_table
= nv_pci_tbl
,
294 .probe
= nv_init_one
,
295 .remove
= ata_pci_remove_one
,
298 static struct scsi_host_template nv_sht
= {
299 .module
= THIS_MODULE
,
301 .ioctl
= ata_scsi_ioctl
,
302 .queuecommand
= ata_scsi_queuecmd
,
303 .can_queue
= ATA_DEF_QUEUE
,
304 .this_id
= ATA_SHT_THIS_ID
,
305 .sg_tablesize
= LIBATA_MAX_PRD
,
306 .cmd_per_lun
= ATA_SHT_CMD_PER_LUN
,
307 .emulated
= ATA_SHT_EMULATED
,
308 .use_clustering
= ATA_SHT_USE_CLUSTERING
,
309 .proc_name
= DRV_NAME
,
310 .dma_boundary
= ATA_DMA_BOUNDARY
,
311 .slave_configure
= ata_scsi_slave_config
,
312 .slave_destroy
= ata_scsi_slave_destroy
,
313 .bios_param
= ata_std_bios_param
,
316 static struct scsi_host_template nv_adma_sht
= {
317 .module
= THIS_MODULE
,
319 .ioctl
= ata_scsi_ioctl
,
320 .queuecommand
= ata_scsi_queuecmd
,
321 .can_queue
= NV_ADMA_MAX_CPBS
,
322 .this_id
= ATA_SHT_THIS_ID
,
323 .sg_tablesize
= NV_ADMA_SGTBL_TOTAL_LEN
,
324 .cmd_per_lun
= ATA_SHT_CMD_PER_LUN
,
325 .emulated
= ATA_SHT_EMULATED
,
326 .use_clustering
= ATA_SHT_USE_CLUSTERING
,
327 .proc_name
= DRV_NAME
,
328 .dma_boundary
= NV_ADMA_DMA_BOUNDARY
,
329 .slave_configure
= nv_adma_slave_config
,
330 .slave_destroy
= ata_scsi_slave_destroy
,
331 .bios_param
= ata_std_bios_param
,
334 static const struct ata_port_operations nv_generic_ops
= {
335 .port_disable
= ata_port_disable
,
336 .tf_load
= ata_tf_load
,
337 .tf_read
= ata_tf_read
,
338 .exec_command
= ata_exec_command
,
339 .check_status
= ata_check_status
,
340 .dev_select
= ata_std_dev_select
,
341 .bmdma_setup
= ata_bmdma_setup
,
342 .bmdma_start
= ata_bmdma_start
,
343 .bmdma_stop
= ata_bmdma_stop
,
344 .bmdma_status
= ata_bmdma_status
,
345 .qc_prep
= ata_qc_prep
,
346 .qc_issue
= ata_qc_issue_prot
,
347 .freeze
= ata_bmdma_freeze
,
348 .thaw
= ata_bmdma_thaw
,
349 .error_handler
= nv_error_handler
,
350 .post_internal_cmd
= ata_bmdma_post_internal_cmd
,
351 .data_xfer
= ata_pio_data_xfer
,
352 .irq_handler
= nv_generic_interrupt
,
353 .irq_clear
= ata_bmdma_irq_clear
,
354 .scr_read
= nv_scr_read
,
355 .scr_write
= nv_scr_write
,
356 .port_start
= ata_port_start
,
357 .port_stop
= ata_port_stop
,
358 .host_stop
= ata_pci_host_stop
,
361 static const struct ata_port_operations nv_nf2_ops
= {
362 .port_disable
= ata_port_disable
,
363 .tf_load
= ata_tf_load
,
364 .tf_read
= ata_tf_read
,
365 .exec_command
= ata_exec_command
,
366 .check_status
= ata_check_status
,
367 .dev_select
= ata_std_dev_select
,
368 .bmdma_setup
= ata_bmdma_setup
,
369 .bmdma_start
= ata_bmdma_start
,
370 .bmdma_stop
= ata_bmdma_stop
,
371 .bmdma_status
= ata_bmdma_status
,
372 .qc_prep
= ata_qc_prep
,
373 .qc_issue
= ata_qc_issue_prot
,
374 .freeze
= nv_nf2_freeze
,
376 .error_handler
= nv_error_handler
,
377 .post_internal_cmd
= ata_bmdma_post_internal_cmd
,
378 .data_xfer
= ata_pio_data_xfer
,
379 .irq_handler
= nv_nf2_interrupt
,
380 .irq_clear
= ata_bmdma_irq_clear
,
381 .scr_read
= nv_scr_read
,
382 .scr_write
= nv_scr_write
,
383 .port_start
= ata_port_start
,
384 .port_stop
= ata_port_stop
,
385 .host_stop
= ata_pci_host_stop
,
388 static const struct ata_port_operations nv_ck804_ops
= {
389 .port_disable
= ata_port_disable
,
390 .tf_load
= ata_tf_load
,
391 .tf_read
= ata_tf_read
,
392 .exec_command
= ata_exec_command
,
393 .check_status
= ata_check_status
,
394 .dev_select
= ata_std_dev_select
,
395 .bmdma_setup
= ata_bmdma_setup
,
396 .bmdma_start
= ata_bmdma_start
,
397 .bmdma_stop
= ata_bmdma_stop
,
398 .bmdma_status
= ata_bmdma_status
,
399 .qc_prep
= ata_qc_prep
,
400 .qc_issue
= ata_qc_issue_prot
,
401 .freeze
= nv_ck804_freeze
,
402 .thaw
= nv_ck804_thaw
,
403 .error_handler
= nv_error_handler
,
404 .post_internal_cmd
= ata_bmdma_post_internal_cmd
,
405 .data_xfer
= ata_pio_data_xfer
,
406 .irq_handler
= nv_ck804_interrupt
,
407 .irq_clear
= ata_bmdma_irq_clear
,
408 .scr_read
= nv_scr_read
,
409 .scr_write
= nv_scr_write
,
410 .port_start
= ata_port_start
,
411 .port_stop
= ata_port_stop
,
412 .host_stop
= nv_ck804_host_stop
,
415 static const struct ata_port_operations nv_adma_ops
= {
416 .port_disable
= ata_port_disable
,
417 .tf_load
= ata_tf_load
,
418 .tf_read
= ata_tf_read
,
419 .check_atapi_dma
= nv_adma_check_atapi_dma
,
420 .exec_command
= ata_exec_command
,
421 .check_status
= ata_check_status
,
422 .dev_select
= ata_std_dev_select
,
423 .bmdma_setup
= nv_adma_bmdma_setup
,
424 .bmdma_start
= nv_adma_bmdma_start
,
425 .bmdma_stop
= nv_adma_bmdma_stop
,
426 .bmdma_status
= nv_adma_bmdma_status
,
427 .qc_prep
= nv_adma_qc_prep
,
428 .qc_issue
= nv_adma_qc_issue
,
429 .freeze
= nv_ck804_freeze
,
430 .thaw
= nv_ck804_thaw
,
431 .error_handler
= nv_adma_error_handler
,
432 .post_internal_cmd
= nv_adma_bmdma_stop
,
433 .data_xfer
= ata_mmio_data_xfer
,
434 .irq_handler
= nv_adma_interrupt
,
435 .irq_clear
= nv_adma_irq_clear
,
436 .scr_read
= nv_scr_read
,
437 .scr_write
= nv_scr_write
,
438 .port_start
= nv_adma_port_start
,
439 .port_stop
= nv_adma_port_stop
,
440 .host_stop
= nv_adma_host_stop
,
443 static struct ata_port_info nv_port_info
[] = {
447 .flags
= ATA_FLAG_SATA
| ATA_FLAG_NO_LEGACY
|
448 ATA_FLAG_HRST_TO_RESUME
,
449 .pio_mask
= NV_PIO_MASK
,
450 .mwdma_mask
= NV_MWDMA_MASK
,
451 .udma_mask
= NV_UDMA_MASK
,
452 .port_ops
= &nv_generic_ops
,
457 .flags
= ATA_FLAG_SATA
| ATA_FLAG_NO_LEGACY
|
458 ATA_FLAG_HRST_TO_RESUME
,
459 .pio_mask
= NV_PIO_MASK
,
460 .mwdma_mask
= NV_MWDMA_MASK
,
461 .udma_mask
= NV_UDMA_MASK
,
462 .port_ops
= &nv_nf2_ops
,
467 .flags
= ATA_FLAG_SATA
| ATA_FLAG_NO_LEGACY
|
468 ATA_FLAG_HRST_TO_RESUME
,
469 .pio_mask
= NV_PIO_MASK
,
470 .mwdma_mask
= NV_MWDMA_MASK
,
471 .udma_mask
= NV_UDMA_MASK
,
472 .port_ops
= &nv_ck804_ops
,
477 .flags
= ATA_FLAG_SATA
| ATA_FLAG_NO_LEGACY
|
478 ATA_FLAG_MMIO
| ATA_FLAG_NCQ
,
479 .pio_mask
= NV_PIO_MASK
,
480 .mwdma_mask
= NV_MWDMA_MASK
,
481 .udma_mask
= NV_UDMA_MASK
,
482 .port_ops
= &nv_adma_ops
,
486 MODULE_AUTHOR("NVIDIA");
487 MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
488 MODULE_LICENSE("GPL");
489 MODULE_DEVICE_TABLE(pci
, nv_pci_tbl
);
490 MODULE_VERSION(DRV_VERSION
);
492 static int adma_enabled
= 1;
494 static inline void __iomem
*__nv_adma_ctl_block(void __iomem
*mmio
,
495 unsigned int port_no
)
497 mmio
+= NV_ADMA_PORT
+ port_no
* NV_ADMA_PORT_SIZE
;
501 static inline void __iomem
*nv_adma_ctl_block(struct ata_port
*ap
)
503 return __nv_adma_ctl_block(ap
->host
->mmio_base
, ap
->port_no
);
506 static inline void __iomem
*nv_adma_gen_block(struct ata_port
*ap
)
508 return (ap
->host
->mmio_base
+ NV_ADMA_GEN
);
511 static inline void __iomem
*nv_adma_notifier_clear_block(struct ata_port
*ap
)
513 return (nv_adma_gen_block(ap
) + NV_ADMA_NOTIFIER_CLEAR
+ (4 * ap
->port_no
));
516 static void nv_adma_register_mode(struct ata_port
*ap
)
518 void __iomem
*mmio
= nv_adma_ctl_block(ap
);
519 struct nv_adma_port_priv
*pp
= ap
->private_data
;
522 if (pp
->flags
& NV_ADMA_PORT_REGISTER_MODE
)
525 tmp
= readw(mmio
+ NV_ADMA_CTL
);
526 writew(tmp
& ~NV_ADMA_CTL_GO
, mmio
+ NV_ADMA_CTL
);
528 pp
->flags
|= NV_ADMA_PORT_REGISTER_MODE
;
531 static void nv_adma_mode(struct ata_port
*ap
)
533 void __iomem
*mmio
= nv_adma_ctl_block(ap
);
534 struct nv_adma_port_priv
*pp
= ap
->private_data
;
537 if (!(pp
->flags
& NV_ADMA_PORT_REGISTER_MODE
))
540 WARN_ON(pp
->flags
& NV_ADMA_ATAPI_SETUP_COMPLETE
);
542 tmp
= readw(mmio
+ NV_ADMA_CTL
);
543 writew(tmp
| NV_ADMA_CTL_GO
, mmio
+ NV_ADMA_CTL
);
545 pp
->flags
&= ~NV_ADMA_PORT_REGISTER_MODE
;
548 static int nv_adma_slave_config(struct scsi_device
*sdev
)
550 struct ata_port
*ap
= ata_shost_to_port(sdev
->host
);
551 struct nv_adma_port_priv
*pp
= ap
->private_data
;
552 struct pci_dev
*pdev
= to_pci_dev(ap
->host
->dev
);
554 unsigned long segment_boundary
;
555 unsigned short sg_tablesize
;
558 u32 current_reg
, new_reg
, config_mask
;
560 rc
= ata_scsi_slave_config(sdev
);
562 if (sdev
->id
>= ATA_MAX_DEVICES
|| sdev
->channel
|| sdev
->lun
)
563 /* Not a proper libata device, ignore */
566 if (ap
->device
[sdev
->id
].class == ATA_DEV_ATAPI
) {
568 * NVIDIA reports that ADMA mode does not support ATAPI commands.
569 * Therefore ATAPI commands are sent through the legacy interface.
570 * However, the legacy interface only supports 32-bit DMA.
571 * Restrict DMA parameters as required by the legacy interface
572 * when an ATAPI device is connected.
574 bounce_limit
= ATA_DMA_MASK
;
575 segment_boundary
= ATA_DMA_BOUNDARY
;
576 /* Subtract 1 since an extra entry may be needed for padding, see
578 sg_tablesize
= LIBATA_MAX_PRD
- 1;
580 /* Since the legacy DMA engine is in use, we need to disable ADMA
583 nv_adma_register_mode(ap
);
586 bounce_limit
= *ap
->dev
->dma_mask
;
587 segment_boundary
= NV_ADMA_DMA_BOUNDARY
;
588 sg_tablesize
= NV_ADMA_SGTBL_TOTAL_LEN
;
592 pci_read_config_dword(pdev
, NV_MCP_SATA_CFG_20
, ¤t_reg
);
595 config_mask
= NV_MCP_SATA_CFG_20_PORT1_EN
|
596 NV_MCP_SATA_CFG_20_PORT1_PWB_EN
;
598 config_mask
= NV_MCP_SATA_CFG_20_PORT0_EN
|
599 NV_MCP_SATA_CFG_20_PORT0_PWB_EN
;
602 new_reg
= current_reg
| config_mask
;
603 pp
->flags
&= ~NV_ADMA_ATAPI_SETUP_COMPLETE
;
606 new_reg
= current_reg
& ~config_mask
;
607 pp
->flags
|= NV_ADMA_ATAPI_SETUP_COMPLETE
;
610 if(current_reg
!= new_reg
)
611 pci_write_config_dword(pdev
, NV_MCP_SATA_CFG_20
, new_reg
);
613 blk_queue_bounce_limit(sdev
->request_queue
, bounce_limit
);
614 blk_queue_segment_boundary(sdev
->request_queue
, segment_boundary
);
615 blk_queue_max_hw_segments(sdev
->request_queue
, sg_tablesize
);
616 ata_port_printk(ap
, KERN_INFO
,
617 "bounce limit 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
618 (unsigned long long)bounce_limit
, segment_boundary
, sg_tablesize
);
622 static int nv_adma_check_atapi_dma(struct ata_queued_cmd
*qc
)
624 struct nv_adma_port_priv
*pp
= qc
->ap
->private_data
;
625 return !(pp
->flags
& NV_ADMA_ATAPI_SETUP_COMPLETE
);
628 static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile
*tf
, __le16
*cpb
)
630 unsigned int idx
= 0;
632 cpb
[idx
++] = cpu_to_le16((ATA_REG_DEVICE
<< 8) | tf
->device
| WNB
);
634 if ((tf
->flags
& ATA_TFLAG_LBA48
) == 0) {
635 cpb
[idx
++] = cpu_to_le16(IGN
);
636 cpb
[idx
++] = cpu_to_le16(IGN
);
637 cpb
[idx
++] = cpu_to_le16(IGN
);
638 cpb
[idx
++] = cpu_to_le16(IGN
);
639 cpb
[idx
++] = cpu_to_le16(IGN
);
642 cpb
[idx
++] = cpu_to_le16((ATA_REG_ERR
<< 8) | tf
->hob_feature
);
643 cpb
[idx
++] = cpu_to_le16((ATA_REG_NSECT
<< 8) | tf
->hob_nsect
);
644 cpb
[idx
++] = cpu_to_le16((ATA_REG_LBAL
<< 8) | tf
->hob_lbal
);
645 cpb
[idx
++] = cpu_to_le16((ATA_REG_LBAM
<< 8) | tf
->hob_lbam
);
646 cpb
[idx
++] = cpu_to_le16((ATA_REG_LBAH
<< 8) | tf
->hob_lbah
);
648 cpb
[idx
++] = cpu_to_le16((ATA_REG_ERR
<< 8) | tf
->feature
);
649 cpb
[idx
++] = cpu_to_le16((ATA_REG_NSECT
<< 8) | tf
->nsect
);
650 cpb
[idx
++] = cpu_to_le16((ATA_REG_LBAL
<< 8) | tf
->lbal
);
651 cpb
[idx
++] = cpu_to_le16((ATA_REG_LBAM
<< 8) | tf
->lbam
);
652 cpb
[idx
++] = cpu_to_le16((ATA_REG_LBAH
<< 8) | tf
->lbah
);
654 cpb
[idx
++] = cpu_to_le16((ATA_REG_CMD
<< 8) | tf
->command
| CMDEND
);
659 static void nv_adma_check_cpb(struct ata_port
*ap
, int cpb_num
, int force_err
)
661 struct nv_adma_port_priv
*pp
= ap
->private_data
;
662 int complete
= 0, have_err
= 0;
663 u8 flags
= pp
->cpb
[cpb_num
].resp_flags
;
665 VPRINTK("CPB %d, flags=0x%x\n", cpb_num
, flags
);
667 if (flags
& NV_CPB_RESP_DONE
) {
668 VPRINTK("CPB flags done, flags=0x%x\n", flags
);
671 if (flags
& NV_CPB_RESP_ATA_ERR
) {
672 ata_port_printk(ap
, KERN_ERR
, "CPB flags ATA err, flags=0x%x\n", flags
);
676 if (flags
& NV_CPB_RESP_CMD_ERR
) {
677 ata_port_printk(ap
, KERN_ERR
, "CPB flags CMD err, flags=0x%x\n", flags
);
681 if (flags
& NV_CPB_RESP_CPB_ERR
) {
682 ata_port_printk(ap
, KERN_ERR
, "CPB flags CPB err, flags=0x%x\n", flags
);
686 if(complete
|| force_err
)
688 struct ata_queued_cmd
*qc
= ata_qc_from_tag(ap
, cpb_num
);
691 /* Only use the ATA port status for non-NCQ commands.
692 For NCQ commands the current status may have nothing to do with
693 the command just completed. */
694 if(qc
->tf
.protocol
!= ATA_PROT_NCQ
)
695 ata_status
= readb(nv_adma_ctl_block(ap
) + (ATA_REG_STATUS
* 4));
697 if(have_err
|| force_err
)
698 ata_status
|= ATA_ERR
;
700 qc
->err_mask
|= ac_err_mask(ata_status
);
701 DPRINTK("Completing qc from tag %d with err_mask %u\n",cpb_num
,
708 static int nv_host_intr(struct ata_port
*ap
, u8 irq_stat
)
710 struct ata_queued_cmd
*qc
= ata_qc_from_tag(ap
, ap
->active_tag
);
713 /* freeze if hotplugged */
714 if (unlikely(irq_stat
& (NV_INT_ADDED
| NV_INT_REMOVED
))) {
719 /* bail out if not our interrupt */
720 if (!(irq_stat
& NV_INT_DEV
))
723 /* DEV interrupt w/ no active qc? */
724 if (unlikely(!qc
|| (qc
->tf
.flags
& ATA_TFLAG_POLLING
))) {
725 ata_check_status(ap
);
729 /* handle interrupt */
730 handled
= ata_host_intr(ap
, qc
);
731 if (unlikely(!handled
)) {
732 /* spurious, clear it */
733 ata_check_status(ap
);
739 static irqreturn_t
nv_adma_interrupt(int irq
, void *dev_instance
)
741 struct ata_host
*host
= dev_instance
;
743 u32 notifier_clears
[2];
745 spin_lock(&host
->lock
);
747 for (i
= 0; i
< host
->n_ports
; i
++) {
748 struct ata_port
*ap
= host
->ports
[i
];
749 notifier_clears
[i
] = 0;
751 if (ap
&& !(ap
->flags
& ATA_FLAG_DISABLED
)) {
752 struct nv_adma_port_priv
*pp
= ap
->private_data
;
753 void __iomem
*mmio
= nv_adma_ctl_block(ap
);
756 int have_global_err
= 0;
757 u32 notifier
, notifier_error
;
759 /* if in ATA register mode, use standard ata interrupt handler */
760 if (pp
->flags
& NV_ADMA_PORT_REGISTER_MODE
) {
761 u8 irq_stat
= readb(host
->mmio_base
+ NV_INT_STATUS_CK804
)
762 >> (NV_INT_PORT_SHIFT
* i
);
763 handled
+= nv_host_intr(ap
, irq_stat
);
767 notifier
= readl(mmio
+ NV_ADMA_NOTIFIER
);
768 notifier_error
= readl(mmio
+ NV_ADMA_NOTIFIER_ERROR
);
769 notifier_clears
[i
] = notifier
| notifier_error
;
771 gen_ctl
= readl(nv_adma_gen_block(ap
) + NV_ADMA_GEN_CTL
);
773 if( !NV_ADMA_CHECK_INTR(gen_ctl
, ap
->port_no
) && !notifier
&&
778 status
= readw(mmio
+ NV_ADMA_STAT
);
780 /* Clear status. Ensure the controller sees the clearing before we start
781 looking at any of the CPB statuses, so that any CPB completions after
782 this point in the handler will raise another interrupt. */
783 writew(status
, mmio
+ NV_ADMA_STAT
);
784 readw(mmio
+ NV_ADMA_STAT
); /* flush posted write */
787 /* freeze if hotplugged */
788 if (unlikely(status
& (NV_ADMA_STAT_HOTPLUG
| NV_ADMA_STAT_HOTUNPLUG
))) {
789 ata_port_printk(ap
, KERN_NOTICE
, "Hotplug event, freezing\n");
795 if (status
& NV_ADMA_STAT_TIMEOUT
) {
796 ata_port_printk(ap
, KERN_ERR
, "timeout, stat=0x%x\n", status
);
799 if (status
& NV_ADMA_STAT_CPBERR
) {
800 ata_port_printk(ap
, KERN_ERR
, "CPB error, stat=0x%x\n", status
);
803 if ((status
& NV_ADMA_STAT_DONE
) || have_global_err
) {
804 /** Check CPBs for completed commands */
806 if(ata_tag_valid(ap
->active_tag
))
807 /* Non-NCQ command */
808 nv_adma_check_cpb(ap
, ap
->active_tag
, have_global_err
||
809 (notifier_error
& (1 << ap
->active_tag
)));
812 u32 active
= ap
->sactive
;
813 while( (pos
= ffs(active
)) ) {
815 nv_adma_check_cpb(ap
, pos
, have_global_err
||
816 (notifier_error
& (1 << pos
)) );
817 active
&= ~(1 << pos
);
822 handled
++; /* irq handled if we got here */
826 if(notifier_clears
[0] || notifier_clears
[1]) {
827 /* Note: Both notifier clear registers must be written
828 if either is set, even if one is zero, according to NVIDIA. */
829 writel(notifier_clears
[0],
830 nv_adma_notifier_clear_block(host
->ports
[0]));
831 writel(notifier_clears
[1],
832 nv_adma_notifier_clear_block(host
->ports
[1]));
835 spin_unlock(&host
->lock
);
837 return IRQ_RETVAL(handled
);
840 static void nv_adma_irq_clear(struct ata_port
*ap
)
842 void __iomem
*mmio
= nv_adma_ctl_block(ap
);
843 u16 status
= readw(mmio
+ NV_ADMA_STAT
);
844 u32 notifier
= readl(mmio
+ NV_ADMA_NOTIFIER
);
845 u32 notifier_error
= readl(mmio
+ NV_ADMA_NOTIFIER_ERROR
);
846 unsigned long dma_stat_addr
= ap
->ioaddr
.bmdma_addr
+ ATA_DMA_STATUS
;
848 /* clear ADMA status */
849 writew(status
, mmio
+ NV_ADMA_STAT
);
850 writel(notifier
| notifier_error
,
851 nv_adma_notifier_clear_block(ap
));
853 /** clear legacy status */
854 outb(inb(dma_stat_addr
), dma_stat_addr
);
857 static void nv_adma_bmdma_setup(struct ata_queued_cmd
*qc
)
859 struct ata_port
*ap
= qc
->ap
;
860 unsigned int rw
= (qc
->tf
.flags
& ATA_TFLAG_WRITE
);
861 struct nv_adma_port_priv
*pp
= ap
->private_data
;
864 if(!(pp
->flags
& NV_ADMA_PORT_REGISTER_MODE
)) {
869 /* load PRD table addr. */
870 outl(ap
->prd_dma
, ap
->ioaddr
.bmdma_addr
+ ATA_DMA_TABLE_OFS
);
872 /* specify data direction, triple-check start bit is clear */
873 dmactl
= inb(ap
->ioaddr
.bmdma_addr
+ ATA_DMA_CMD
);
874 dmactl
&= ~(ATA_DMA_WR
| ATA_DMA_START
);
876 dmactl
|= ATA_DMA_WR
;
878 outb(dmactl
, ap
->ioaddr
.bmdma_addr
+ ATA_DMA_CMD
);
880 /* issue r/w command */
881 ata_exec_command(ap
, &qc
->tf
);
884 static void nv_adma_bmdma_start(struct ata_queued_cmd
*qc
)
886 struct ata_port
*ap
= qc
->ap
;
887 struct nv_adma_port_priv
*pp
= ap
->private_data
;
890 if(!(pp
->flags
& NV_ADMA_PORT_REGISTER_MODE
)) {
895 /* start host DMA transaction */
896 dmactl
= inb(ap
->ioaddr
.bmdma_addr
+ ATA_DMA_CMD
);
897 outb(dmactl
| ATA_DMA_START
,
898 ap
->ioaddr
.bmdma_addr
+ ATA_DMA_CMD
);
901 static void nv_adma_bmdma_stop(struct ata_queued_cmd
*qc
)
903 struct ata_port
*ap
= qc
->ap
;
904 struct nv_adma_port_priv
*pp
= ap
->private_data
;
906 if(!(pp
->flags
& NV_ADMA_PORT_REGISTER_MODE
))
909 /* clear start/stop bit */
910 outb(inb(ap
->ioaddr
.bmdma_addr
+ ATA_DMA_CMD
) & ~ATA_DMA_START
,
911 ap
->ioaddr
.bmdma_addr
+ ATA_DMA_CMD
);
913 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
914 ata_altstatus(ap
); /* dummy read */
917 static u8
nv_adma_bmdma_status(struct ata_port
*ap
)
919 struct nv_adma_port_priv
*pp
= ap
->private_data
;
921 WARN_ON(!(pp
->flags
& NV_ADMA_PORT_REGISTER_MODE
));
923 return inb(ap
->ioaddr
.bmdma_addr
+ ATA_DMA_STATUS
);
926 static int nv_adma_port_start(struct ata_port
*ap
)
928 struct device
*dev
= ap
->host
->dev
;
929 struct nv_adma_port_priv
*pp
;
933 void __iomem
*mmio
= nv_adma_ctl_block(ap
);
938 rc
= ata_port_start(ap
);
942 pp
= kzalloc(sizeof(*pp
), GFP_KERNEL
);
948 mem
= dma_alloc_coherent(dev
, NV_ADMA_PORT_PRIV_DMA_SZ
,
949 &mem_dma
, GFP_KERNEL
);
955 memset(mem
, 0, NV_ADMA_PORT_PRIV_DMA_SZ
);
958 * First item in chunk of DMA memory:
959 * 128-byte command parameter block (CPB)
960 * one for each command tag
963 pp
->cpb_dma
= mem_dma
;
965 writel(mem_dma
& 0xFFFFFFFF, mmio
+ NV_ADMA_CPB_BASE_LOW
);
966 writel((mem_dma
>> 16 ) >> 16, mmio
+ NV_ADMA_CPB_BASE_HIGH
);
968 mem
+= NV_ADMA_MAX_CPBS
* NV_ADMA_CPB_SZ
;
969 mem_dma
+= NV_ADMA_MAX_CPBS
* NV_ADMA_CPB_SZ
;
972 * Second item: block of ADMA_SGTBL_LEN s/g entries
975 pp
->aprd_dma
= mem_dma
;
977 ap
->private_data
= pp
;
979 /* clear any outstanding interrupt conditions */
980 writew(0xffff, mmio
+ NV_ADMA_STAT
);
982 /* initialize port variables */
983 pp
->flags
= NV_ADMA_PORT_REGISTER_MODE
;
985 /* clear CPB fetch count */
986 writew(0, mmio
+ NV_ADMA_CPB_COUNT
);
988 /* clear GO for register mode */
989 tmp
= readw(mmio
+ NV_ADMA_CTL
);
990 writew(tmp
& ~NV_ADMA_CTL_GO
, mmio
+ NV_ADMA_CTL
);
992 tmp
= readw(mmio
+ NV_ADMA_CTL
);
993 writew(tmp
| NV_ADMA_CTL_CHANNEL_RESET
, mmio
+ NV_ADMA_CTL
);
994 readl( mmio
+ NV_ADMA_CTL
); /* flush posted write */
996 writew(tmp
& ~NV_ADMA_CTL_CHANNEL_RESET
, mmio
+ NV_ADMA_CTL
);
997 readl( mmio
+ NV_ADMA_CTL
); /* flush posted write */
1008 static void nv_adma_port_stop(struct ata_port
*ap
)
1010 struct device
*dev
= ap
->host
->dev
;
1011 struct nv_adma_port_priv
*pp
= ap
->private_data
;
1012 void __iomem
*mmio
= nv_adma_ctl_block(ap
);
1016 writew(0, mmio
+ NV_ADMA_CTL
);
1018 ap
->private_data
= NULL
;
1019 dma_free_coherent(dev
, NV_ADMA_PORT_PRIV_DMA_SZ
, pp
->cpb
, pp
->cpb_dma
);
1025 static void nv_adma_setup_port(struct ata_probe_ent
*probe_ent
, unsigned int port
)
1027 void __iomem
*mmio
= probe_ent
->mmio_base
;
1028 struct ata_ioports
*ioport
= &probe_ent
->port
[port
];
1032 mmio
+= NV_ADMA_PORT
+ port
* NV_ADMA_PORT_SIZE
;
1034 ioport
->cmd_addr
= (unsigned long) mmio
;
1035 ioport
->data_addr
= (unsigned long) mmio
+ (ATA_REG_DATA
* 4);
1036 ioport
->error_addr
=
1037 ioport
->feature_addr
= (unsigned long) mmio
+ (ATA_REG_ERR
* 4);
1038 ioport
->nsect_addr
= (unsigned long) mmio
+ (ATA_REG_NSECT
* 4);
1039 ioport
->lbal_addr
= (unsigned long) mmio
+ (ATA_REG_LBAL
* 4);
1040 ioport
->lbam_addr
= (unsigned long) mmio
+ (ATA_REG_LBAM
* 4);
1041 ioport
->lbah_addr
= (unsigned long) mmio
+ (ATA_REG_LBAH
* 4);
1042 ioport
->device_addr
= (unsigned long) mmio
+ (ATA_REG_DEVICE
* 4);
1043 ioport
->status_addr
=
1044 ioport
->command_addr
= (unsigned long) mmio
+ (ATA_REG_STATUS
* 4);
1045 ioport
->altstatus_addr
=
1046 ioport
->ctl_addr
= (unsigned long) mmio
+ 0x20;
1049 static int nv_adma_host_init(struct ata_probe_ent
*probe_ent
)
1051 struct pci_dev
*pdev
= to_pci_dev(probe_ent
->dev
);
1057 /* enable ADMA on the ports */
1058 pci_read_config_dword(pdev
, NV_MCP_SATA_CFG_20
, &tmp32
);
1059 tmp32
|= NV_MCP_SATA_CFG_20_PORT0_EN
|
1060 NV_MCP_SATA_CFG_20_PORT0_PWB_EN
|
1061 NV_MCP_SATA_CFG_20_PORT1_EN
|
1062 NV_MCP_SATA_CFG_20_PORT1_PWB_EN
;
1064 pci_write_config_dword(pdev
, NV_MCP_SATA_CFG_20
, tmp32
);
1066 for (i
= 0; i
< probe_ent
->n_ports
; i
++)
1067 nv_adma_setup_port(probe_ent
, i
);
1069 for (i
= 0; i
< probe_ent
->n_ports
; i
++) {
1070 void __iomem
*mmio
= __nv_adma_ctl_block(probe_ent
->mmio_base
, i
);
1073 /* enable interrupt, clear reset if not already clear */
1074 tmp
= readw(mmio
+ NV_ADMA_CTL
);
1075 writew(tmp
| NV_ADMA_CTL_AIEN
, mmio
+ NV_ADMA_CTL
);
1081 static void nv_adma_fill_aprd(struct ata_queued_cmd
*qc
,
1082 struct scatterlist
*sg
,
1084 struct nv_adma_prd
*aprd
)
1088 memset(aprd
, 0, sizeof(struct nv_adma_prd
));
1091 if (qc
->tf
.flags
& ATA_TFLAG_WRITE
)
1092 flags
|= NV_APRD_WRITE
;
1093 if (idx
== qc
->n_elem
- 1)
1094 flags
|= NV_APRD_END
;
1096 flags
|= NV_APRD_CONT
;
1098 aprd
->addr
= cpu_to_le64(((u64
)sg_dma_address(sg
)));
1099 aprd
->len
= cpu_to_le32(((u32
)sg_dma_len(sg
))); /* len in bytes */
1100 aprd
->flags
= flags
;
1103 static void nv_adma_fill_sg(struct ata_queued_cmd
*qc
, struct nv_adma_cpb
*cpb
)
1105 struct nv_adma_port_priv
*pp
= qc
->ap
->private_data
;
1107 struct nv_adma_prd
*aprd
;
1108 struct scatterlist
*sg
;
1114 ata_for_each_sg(sg
, qc
) {
1115 aprd
= (idx
< 5) ? &cpb
->aprd
[idx
] : &pp
->aprd
[NV_ADMA_SGTBL_LEN
* qc
->tag
+ (idx
-5)];
1116 nv_adma_fill_aprd(qc
, sg
, idx
, aprd
);
1120 cpb
->next_aprd
= cpu_to_le64(((u64
)(pp
->aprd_dma
+ NV_ADMA_SGTBL_SZ
* qc
->tag
)));
1123 static void nv_adma_qc_prep(struct ata_queued_cmd
*qc
)
1125 struct nv_adma_port_priv
*pp
= qc
->ap
->private_data
;
1126 struct nv_adma_cpb
*cpb
= &pp
->cpb
[qc
->tag
];
1127 u8 ctl_flags
= NV_CPB_CTL_CPB_VALID
|
1128 NV_CPB_CTL_APRD_VALID
|
1131 VPRINTK("qc->flags = 0x%lx\n", qc
->flags
);
1133 if (!(qc
->flags
& ATA_QCFLAG_DMAMAP
) ||
1134 (pp
->flags
& NV_ADMA_ATAPI_SETUP_COMPLETE
)) {
1135 nv_adma_register_mode(qc
->ap
);
1140 memset(cpb
, 0, sizeof(struct nv_adma_cpb
));
1144 cpb
->next_cpb_idx
= 0;
1146 /* turn on NCQ flags for NCQ commands */
1147 if (qc
->tf
.protocol
== ATA_PROT_NCQ
)
1148 ctl_flags
|= NV_CPB_CTL_QUEUE
| NV_CPB_CTL_FPDMA
;
1150 nv_adma_tf_to_cpb(&qc
->tf
, cpb
->tf
);
1152 nv_adma_fill_sg(qc
, cpb
);
1154 /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID until we are
1155 finished filling in all of the contents */
1157 cpb
->ctl_flags
= ctl_flags
;
1160 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd
*qc
)
1162 struct nv_adma_port_priv
*pp
= qc
->ap
->private_data
;
1163 void __iomem
*mmio
= nv_adma_ctl_block(qc
->ap
);
1167 if (!(qc
->flags
& ATA_QCFLAG_DMAMAP
) ||
1168 (pp
->flags
& NV_ADMA_ATAPI_SETUP_COMPLETE
)) {
1169 /* use ATA register mode */
1170 VPRINTK("no dmamap or ATAPI, using ATA register mode: 0x%lx\n", qc
->flags
);
1171 nv_adma_register_mode(qc
->ap
);
1172 return ata_qc_issue_prot(qc
);
1174 nv_adma_mode(qc
->ap
);
1176 /* write append register, command tag in lower 8 bits
1177 and (number of cpbs to append -1) in top 8 bits */
1179 writew(qc
->tag
, mmio
+ NV_ADMA_APPEND
);
1181 DPRINTK("Issued tag %u\n",qc
->tag
);
1186 static irqreturn_t
nv_generic_interrupt(int irq
, void *dev_instance
)
1188 struct ata_host
*host
= dev_instance
;
1190 unsigned int handled
= 0;
1191 unsigned long flags
;
1193 spin_lock_irqsave(&host
->lock
, flags
);
1195 for (i
= 0; i
< host
->n_ports
; i
++) {
1196 struct ata_port
*ap
;
1198 ap
= host
->ports
[i
];
1200 !(ap
->flags
& ATA_FLAG_DISABLED
)) {
1201 struct ata_queued_cmd
*qc
;
1203 qc
= ata_qc_from_tag(ap
, ap
->active_tag
);
1204 if (qc
&& (!(qc
->tf
.flags
& ATA_TFLAG_POLLING
)))
1205 handled
+= ata_host_intr(ap
, qc
);
1207 // No request pending? Clear interrupt status
1208 // anyway, in case there's one pending.
1209 ap
->ops
->check_status(ap
);
1214 spin_unlock_irqrestore(&host
->lock
, flags
);
1216 return IRQ_RETVAL(handled
);
1219 static irqreturn_t
nv_do_interrupt(struct ata_host
*host
, u8 irq_stat
)
1223 for (i
= 0; i
< host
->n_ports
; i
++) {
1224 struct ata_port
*ap
= host
->ports
[i
];
1226 if (ap
&& !(ap
->flags
& ATA_FLAG_DISABLED
))
1227 handled
+= nv_host_intr(ap
, irq_stat
);
1229 irq_stat
>>= NV_INT_PORT_SHIFT
;
1232 return IRQ_RETVAL(handled
);
1235 static irqreturn_t
nv_nf2_interrupt(int irq
, void *dev_instance
)
1237 struct ata_host
*host
= dev_instance
;
1241 spin_lock(&host
->lock
);
1242 irq_stat
= inb(host
->ports
[0]->ioaddr
.scr_addr
+ NV_INT_STATUS
);
1243 ret
= nv_do_interrupt(host
, irq_stat
);
1244 spin_unlock(&host
->lock
);
1249 static irqreturn_t
nv_ck804_interrupt(int irq
, void *dev_instance
)
1251 struct ata_host
*host
= dev_instance
;
1255 spin_lock(&host
->lock
);
1256 irq_stat
= readb(host
->mmio_base
+ NV_INT_STATUS_CK804
);
1257 ret
= nv_do_interrupt(host
, irq_stat
);
1258 spin_unlock(&host
->lock
);
1263 static u32
nv_scr_read (struct ata_port
*ap
, unsigned int sc_reg
)
1265 if (sc_reg
> SCR_CONTROL
)
1268 return ioread32((void __iomem
*)ap
->ioaddr
.scr_addr
+ (sc_reg
* 4));
1271 static void nv_scr_write (struct ata_port
*ap
, unsigned int sc_reg
, u32 val
)
1273 if (sc_reg
> SCR_CONTROL
)
1276 iowrite32(val
, (void __iomem
*)ap
->ioaddr
.scr_addr
+ (sc_reg
* 4));
1279 static void nv_nf2_freeze(struct ata_port
*ap
)
1281 unsigned long scr_addr
= ap
->host
->ports
[0]->ioaddr
.scr_addr
;
1282 int shift
= ap
->port_no
* NV_INT_PORT_SHIFT
;
1285 mask
= inb(scr_addr
+ NV_INT_ENABLE
);
1286 mask
&= ~(NV_INT_ALL
<< shift
);
1287 outb(mask
, scr_addr
+ NV_INT_ENABLE
);
1290 static void nv_nf2_thaw(struct ata_port
*ap
)
1292 unsigned long scr_addr
= ap
->host
->ports
[0]->ioaddr
.scr_addr
;
1293 int shift
= ap
->port_no
* NV_INT_PORT_SHIFT
;
1296 outb(NV_INT_ALL
<< shift
, scr_addr
+ NV_INT_STATUS
);
1298 mask
= inb(scr_addr
+ NV_INT_ENABLE
);
1299 mask
|= (NV_INT_MASK
<< shift
);
1300 outb(mask
, scr_addr
+ NV_INT_ENABLE
);
1303 static void nv_ck804_freeze(struct ata_port
*ap
)
1305 void __iomem
*mmio_base
= ap
->host
->mmio_base
;
1306 int shift
= ap
->port_no
* NV_INT_PORT_SHIFT
;
1309 mask
= readb(mmio_base
+ NV_INT_ENABLE_CK804
);
1310 mask
&= ~(NV_INT_ALL
<< shift
);
1311 writeb(mask
, mmio_base
+ NV_INT_ENABLE_CK804
);
1314 static void nv_ck804_thaw(struct ata_port
*ap
)
1316 void __iomem
*mmio_base
= ap
->host
->mmio_base
;
1317 int shift
= ap
->port_no
* NV_INT_PORT_SHIFT
;
1320 writeb(NV_INT_ALL
<< shift
, mmio_base
+ NV_INT_STATUS_CK804
);
1322 mask
= readb(mmio_base
+ NV_INT_ENABLE_CK804
);
1323 mask
|= (NV_INT_MASK
<< shift
);
1324 writeb(mask
, mmio_base
+ NV_INT_ENABLE_CK804
);
1327 static int nv_hardreset(struct ata_port
*ap
, unsigned int *class)
1331 /* SATA hardreset fails to retrieve proper device signature on
1332 * some controllers. Don't classify on hardreset. For more
1333 * info, see http://bugme.osdl.org/show_bug.cgi?id=3352
1335 return sata_std_hardreset(ap
, &dummy
);
1338 static void nv_error_handler(struct ata_port
*ap
)
1340 ata_bmdma_drive_eh(ap
, ata_std_prereset
, ata_std_softreset
,
1341 nv_hardreset
, ata_std_postreset
);
1344 static void nv_adma_error_handler(struct ata_port
*ap
)
1346 struct nv_adma_port_priv
*pp
= ap
->private_data
;
1347 if(!(pp
->flags
& NV_ADMA_PORT_REGISTER_MODE
)) {
1348 void __iomem
*mmio
= nv_adma_ctl_block(ap
);
1352 u32 notifier
= readl(mmio
+ NV_ADMA_NOTIFIER
);
1353 u32 notifier_error
= readl(mmio
+ NV_ADMA_NOTIFIER_ERROR
);
1354 u32 gen_ctl
= readl(nv_adma_gen_block(ap
) + NV_ADMA_GEN_CTL
);
1355 u32 status
= readw(mmio
+ NV_ADMA_STAT
);
1357 ata_port_printk(ap
, KERN_ERR
, "EH in ADMA mode, notifier 0x%X "
1358 "notifier_error 0x%X gen_ctl 0x%X status 0x%X\n",
1359 notifier
, notifier_error
, gen_ctl
, status
);
1361 for( i
=0;i
<NV_ADMA_MAX_CPBS
;i
++) {
1362 struct nv_adma_cpb
*cpb
= &pp
->cpb
[i
];
1363 if( cpb
->ctl_flags
|| cpb
->resp_flags
)
1364 ata_port_printk(ap
, KERN_ERR
,
1365 "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1366 i
, cpb
->ctl_flags
, cpb
->resp_flags
);
1369 /* Push us back into port register mode for error handling. */
1370 nv_adma_register_mode(ap
);
1372 ata_port_printk(ap
, KERN_ERR
, "Resetting port\n");
1374 /* Mark all of the CPBs as invalid to prevent them from being executed */
1375 for( i
=0;i
<NV_ADMA_MAX_CPBS
;i
++)
1376 pp
->cpb
[i
].ctl_flags
&= ~NV_CPB_CTL_CPB_VALID
;
1378 /* clear CPB fetch count */
1379 writew(0, mmio
+ NV_ADMA_CPB_COUNT
);
1382 tmp
= readw(mmio
+ NV_ADMA_CTL
);
1383 writew(tmp
| NV_ADMA_CTL_CHANNEL_RESET
, mmio
+ NV_ADMA_CTL
);
1384 readl( mmio
+ NV_ADMA_CTL
); /* flush posted write */
1386 writew(tmp
& ~NV_ADMA_CTL_CHANNEL_RESET
, mmio
+ NV_ADMA_CTL
);
1387 readl( mmio
+ NV_ADMA_CTL
); /* flush posted write */
1390 ata_bmdma_drive_eh(ap
, ata_std_prereset
, ata_std_softreset
,
1391 nv_hardreset
, ata_std_postreset
);
1394 static int nv_init_one (struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
1396 static int printed_version
= 0;
1397 struct ata_port_info
*ppi
[2];
1398 struct ata_probe_ent
*probe_ent
;
1399 int pci_dev_busy
= 0;
1403 unsigned long type
= ent
->driver_data
;
1406 // Make sure this is a SATA controller by counting the number of bars
1407 // (NVIDIA SATA controllers will always have six bars). Otherwise,
1408 // it's an IDE controller and we ignore it.
1409 for (bar
=0; bar
<6; bar
++)
1410 if (pci_resource_start(pdev
, bar
) == 0)
1413 if ( !printed_version
++)
1414 dev_printk(KERN_DEBUG
, &pdev
->dev
, "version " DRV_VERSION
"\n");
1416 rc
= pci_enable_device(pdev
);
1420 rc
= pci_request_regions(pdev
, DRV_NAME
);
1423 goto err_out_disable
;
1426 if(type
>= CK804
&& adma_enabled
) {
1427 dev_printk(KERN_NOTICE
, &pdev
->dev
, "Using ADMA mode\n");
1429 if(!pci_set_dma_mask(pdev
, DMA_64BIT_MASK
) &&
1430 !pci_set_consistent_dma_mask(pdev
, DMA_64BIT_MASK
))
1435 rc
= pci_set_dma_mask(pdev
, ATA_DMA_MASK
);
1437 goto err_out_regions
;
1438 rc
= pci_set_consistent_dma_mask(pdev
, ATA_DMA_MASK
);
1440 goto err_out_regions
;
1445 ppi
[0] = ppi
[1] = &nv_port_info
[type
];
1446 probe_ent
= ata_pci_init_native_mode(pdev
, ppi
, ATA_PORT_PRIMARY
| ATA_PORT_SECONDARY
);
1448 goto err_out_regions
;
1450 probe_ent
->mmio_base
= pci_iomap(pdev
, 5, 0);
1451 if (!probe_ent
->mmio_base
) {
1453 goto err_out_free_ent
;
1456 base
= (unsigned long)probe_ent
->mmio_base
;
1458 probe_ent
->port
[0].scr_addr
= base
+ NV_PORT0_SCR_REG_OFFSET
;
1459 probe_ent
->port
[1].scr_addr
= base
+ NV_PORT1_SCR_REG_OFFSET
;
1461 /* enable SATA space for CK804 */
1462 if (type
>= CK804
) {
1465 pci_read_config_byte(pdev
, NV_MCP_SATA_CFG_20
, ®val
);
1466 regval
|= NV_MCP_SATA_CFG_20_SATA_SPACE_EN
;
1467 pci_write_config_byte(pdev
, NV_MCP_SATA_CFG_20
, regval
);
1470 pci_set_master(pdev
);
1473 rc
= nv_adma_host_init(probe_ent
);
1475 goto err_out_iounmap
;
1478 rc
= ata_device_add(probe_ent
);
1480 goto err_out_iounmap
;
1487 pci_iounmap(pdev
, probe_ent
->mmio_base
);
1491 pci_release_regions(pdev
);
1494 pci_disable_device(pdev
);
1499 static void nv_ck804_host_stop(struct ata_host
*host
)
1501 struct pci_dev
*pdev
= to_pci_dev(host
->dev
);
1504 /* disable SATA space for CK804 */
1505 pci_read_config_byte(pdev
, NV_MCP_SATA_CFG_20
, ®val
);
1506 regval
&= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN
;
1507 pci_write_config_byte(pdev
, NV_MCP_SATA_CFG_20
, regval
);
1509 ata_pci_host_stop(host
);
1512 static void nv_adma_host_stop(struct ata_host
*host
)
1514 struct pci_dev
*pdev
= to_pci_dev(host
->dev
);
1518 for (i
= 0; i
< host
->n_ports
; i
++) {
1519 void __iomem
*mmio
= __nv_adma_ctl_block(host
->mmio_base
, i
);
1522 /* disable interrupt */
1523 tmp
= readw(mmio
+ NV_ADMA_CTL
);
1524 writew(tmp
& ~NV_ADMA_CTL_AIEN
, mmio
+ NV_ADMA_CTL
);
1527 /* disable ADMA on the ports */
1528 pci_read_config_dword(pdev
, NV_MCP_SATA_CFG_20
, &tmp32
);
1529 tmp32
&= ~(NV_MCP_SATA_CFG_20_PORT0_EN
|
1530 NV_MCP_SATA_CFG_20_PORT0_PWB_EN
|
1531 NV_MCP_SATA_CFG_20_PORT1_EN
|
1532 NV_MCP_SATA_CFG_20_PORT1_PWB_EN
);
1534 pci_write_config_dword(pdev
, NV_MCP_SATA_CFG_20
, tmp32
);
1536 nv_ck804_host_stop(host
);
1539 static int __init
nv_init(void)
1541 return pci_register_driver(&nv_pci_driver
);
1544 static void __exit
nv_exit(void)
1546 pci_unregister_driver(&nv_pci_driver
);
1549 module_init(nv_init
);
1550 module_exit(nv_exit
);
1551 module_param_named(adma
, adma_enabled
, bool, 0444);
1552 MODULE_PARM_DESC(adma
, "Enable use of ADMA (Default: true)");