2 * sata_mv.c - Marvell SATA support
4 * Copyright 2005: EMC Corporation, all rights reserved.
5 * Copyright 2005 Red Hat, Inc. All rights reserved.
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #include <linux/kernel.h>
25 #include <linux/module.h>
26 #include <linux/pci.h>
27 #include <linux/init.h>
28 #include <linux/blkdev.h>
29 #include <linux/delay.h>
30 #include <linux/interrupt.h>
31 #include <linux/sched.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/device.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_cmnd.h>
36 #include <linux/libata.h>
39 #define DRV_NAME "sata_mv"
40 #define DRV_VERSION "0.7"
43 /* BAR's are enumerated in terms of pci_resource_start() terms */
44 MV_PRIMARY_BAR
= 0, /* offset 0x10: memory space */
45 MV_IO_BAR
= 2, /* offset 0x18: IO space */
46 MV_MISC_BAR
= 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
48 MV_MAJOR_REG_AREA_SZ
= 0x10000, /* 64KB */
49 MV_MINOR_REG_AREA_SZ
= 0x2000, /* 8KB */
52 MV_IRQ_COAL_REG_BASE
= 0x18000, /* 6xxx part only */
53 MV_IRQ_COAL_CAUSE
= (MV_IRQ_COAL_REG_BASE
+ 0x08),
54 MV_IRQ_COAL_CAUSE_LO
= (MV_IRQ_COAL_REG_BASE
+ 0x88),
55 MV_IRQ_COAL_CAUSE_HI
= (MV_IRQ_COAL_REG_BASE
+ 0x8c),
56 MV_IRQ_COAL_THRESHOLD
= (MV_IRQ_COAL_REG_BASE
+ 0xcc),
57 MV_IRQ_COAL_TIME_THRESHOLD
= (MV_IRQ_COAL_REG_BASE
+ 0xd0),
59 MV_SATAHC0_REG_BASE
= 0x20000,
60 MV_FLASH_CTL
= 0x1046c,
61 MV_GPIO_PORT_CTL
= 0x104f0,
62 MV_RESET_CFG
= 0x180d8,
64 MV_PCI_REG_SZ
= MV_MAJOR_REG_AREA_SZ
,
65 MV_SATAHC_REG_SZ
= MV_MAJOR_REG_AREA_SZ
,
66 MV_SATAHC_ARBTR_REG_SZ
= MV_MINOR_REG_AREA_SZ
, /* arbiter */
67 MV_PORT_REG_SZ
= MV_MINOR_REG_AREA_SZ
,
69 MV_USE_Q_DEPTH
= ATA_DEF_QUEUE
,
72 MV_MAX_Q_DEPTH_MASK
= MV_MAX_Q_DEPTH
- 1,
74 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
75 * CRPB needs alignment on a 256B boundary. Size == 256B
76 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
77 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
79 MV_CRQB_Q_SZ
= (32 * MV_MAX_Q_DEPTH
),
80 MV_CRPB_Q_SZ
= (8 * MV_MAX_Q_DEPTH
),
82 MV_SG_TBL_SZ
= (16 * MV_MAX_SG_CT
),
83 MV_PORT_PRIV_DMA_SZ
= (MV_CRQB_Q_SZ
+ MV_CRPB_Q_SZ
+ MV_SG_TBL_SZ
),
86 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
88 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
92 MV_FLAG_DUAL_HC
= (1 << 30), /* two SATA Host Controllers */
93 MV_FLAG_IRQ_COALESCE
= (1 << 29), /* IRQ coalescing capability */
94 MV_COMMON_FLAGS
= (ATA_FLAG_SATA
| ATA_FLAG_NO_LEGACY
|
95 ATA_FLAG_SATA_RESET
| ATA_FLAG_MMIO
|
96 ATA_FLAG_NO_ATAPI
| ATA_FLAG_PIO_POLLING
),
97 MV_6XXX_FLAGS
= MV_FLAG_IRQ_COALESCE
,
99 CRQB_FLAG_READ
= (1 << 0),
101 CRQB_CMD_ADDR_SHIFT
= 8,
102 CRQB_CMD_CS
= (0x2 << 11),
103 CRQB_CMD_LAST
= (1 << 15),
105 CRPB_FLAG_STATUS_SHIFT
= 8,
107 EPRD_FLAG_END_OF_TBL
= (1 << 31),
109 /* PCI interface registers */
111 PCI_COMMAND_OFS
= 0xc00,
113 PCI_MAIN_CMD_STS_OFS
= 0xd30,
114 STOP_PCI_MASTER
= (1 << 2),
115 PCI_MASTER_EMPTY
= (1 << 3),
116 GLOB_SFT_RST
= (1 << 4),
119 MV_PCI_EXP_ROM_BAR_CTL
= 0xd2c,
120 MV_PCI_DISC_TIMER
= 0xd04,
121 MV_PCI_MSI_TRIGGER
= 0xc38,
122 MV_PCI_SERR_MASK
= 0xc28,
123 MV_PCI_XBAR_TMOUT
= 0x1d04,
124 MV_PCI_ERR_LOW_ADDRESS
= 0x1d40,
125 MV_PCI_ERR_HIGH_ADDRESS
= 0x1d44,
126 MV_PCI_ERR_ATTRIBUTE
= 0x1d48,
127 MV_PCI_ERR_COMMAND
= 0x1d50,
129 PCI_IRQ_CAUSE_OFS
= 0x1d58,
130 PCI_IRQ_MASK_OFS
= 0x1d5c,
131 PCI_UNMASK_ALL_IRQS
= 0x7fffff, /* bits 22-0 */
133 HC_MAIN_IRQ_CAUSE_OFS
= 0x1d60,
134 HC_MAIN_IRQ_MASK_OFS
= 0x1d64,
135 PORT0_ERR
= (1 << 0), /* shift by port # */
136 PORT0_DONE
= (1 << 1), /* shift by port # */
137 HC0_IRQ_PEND
= 0x1ff, /* bits 0-8 = HC0's ports */
138 HC_SHIFT
= 9, /* bits 9-17 = HC1's ports */
140 TRAN_LO_DONE
= (1 << 19), /* 6xxx: IRQ coalescing */
141 TRAN_HI_DONE
= (1 << 20), /* 6xxx: IRQ coalescing */
142 PORTS_0_3_COAL_DONE
= (1 << 8),
143 PORTS_4_7_COAL_DONE
= (1 << 17),
144 PORTS_0_7_COAL_DONE
= (1 << 21), /* 6xxx: IRQ coalescing */
145 GPIO_INT
= (1 << 22),
146 SELF_INT
= (1 << 23),
147 TWSI_INT
= (1 << 24),
148 HC_MAIN_RSVD
= (0x7f << 25), /* bits 31-25 */
149 HC_MAIN_RSVD_5
= (0x1fff << 19), /* bits 31-19 */
150 HC_MAIN_MASKED_IRQS
= (TRAN_LO_DONE
| TRAN_HI_DONE
|
151 PORTS_0_7_COAL_DONE
| GPIO_INT
| TWSI_INT
|
153 HC_MAIN_MASKED_IRQS_5
= (PORTS_0_3_COAL_DONE
| PORTS_4_7_COAL_DONE
|
156 /* SATAHC registers */
159 HC_IRQ_CAUSE_OFS
= 0x14,
160 CRPB_DMA_DONE
= (1 << 0), /* shift by port # */
161 HC_IRQ_COAL
= (1 << 4), /* IRQ coalescing */
162 DEV_IRQ
= (1 << 8), /* shift by port # */
164 /* Shadow block registers */
166 SHD_CTL_AST_OFS
= 0x20, /* ofs from SHD_BLK_OFS */
169 SATA_STATUS_OFS
= 0x300, /* ctrl, err regs follow status */
170 SATA_ACTIVE_OFS
= 0x350,
177 SATA_INTERFACE_CTL
= 0x050,
179 MV_M2_PREAMP_MASK
= 0x7e0,
183 EDMA_CFG_Q_DEPTH
= 0, /* queueing disabled */
184 EDMA_CFG_NCQ
= (1 << 5),
185 EDMA_CFG_NCQ_GO_ON_ERR
= (1 << 14), /* continue on error */
186 EDMA_CFG_RD_BRST_EXT
= (1 << 11), /* read burst 512B */
187 EDMA_CFG_WR_BUFF_LEN
= (1 << 13), /* write buffer 512B */
189 EDMA_ERR_IRQ_CAUSE_OFS
= 0x8,
190 EDMA_ERR_IRQ_MASK_OFS
= 0xc,
191 EDMA_ERR_D_PAR
= (1 << 0),
192 EDMA_ERR_PRD_PAR
= (1 << 1),
193 EDMA_ERR_DEV
= (1 << 2),
194 EDMA_ERR_DEV_DCON
= (1 << 3),
195 EDMA_ERR_DEV_CON
= (1 << 4),
196 EDMA_ERR_SERR
= (1 << 5),
197 EDMA_ERR_SELF_DIS
= (1 << 7),
198 EDMA_ERR_BIST_ASYNC
= (1 << 8),
199 EDMA_ERR_CRBQ_PAR
= (1 << 9),
200 EDMA_ERR_CRPB_PAR
= (1 << 10),
201 EDMA_ERR_INTRL_PAR
= (1 << 11),
202 EDMA_ERR_IORDY
= (1 << 12),
203 EDMA_ERR_LNK_CTRL_RX
= (0xf << 13),
204 EDMA_ERR_LNK_CTRL_RX_2
= (1 << 15),
205 EDMA_ERR_LNK_DATA_RX
= (0xf << 17),
206 EDMA_ERR_LNK_CTRL_TX
= (0x1f << 21),
207 EDMA_ERR_LNK_DATA_TX
= (0x1f << 26),
208 EDMA_ERR_TRANS_PROTO
= (1 << 31),
209 EDMA_ERR_FATAL
= (EDMA_ERR_D_PAR
| EDMA_ERR_PRD_PAR
|
210 EDMA_ERR_DEV_DCON
| EDMA_ERR_CRBQ_PAR
|
211 EDMA_ERR_CRPB_PAR
| EDMA_ERR_INTRL_PAR
|
212 EDMA_ERR_IORDY
| EDMA_ERR_LNK_CTRL_RX_2
|
213 EDMA_ERR_LNK_DATA_RX
|
214 EDMA_ERR_LNK_DATA_TX
|
215 EDMA_ERR_TRANS_PROTO
),
217 EDMA_REQ_Q_BASE_HI_OFS
= 0x10,
218 EDMA_REQ_Q_IN_PTR_OFS
= 0x14, /* also contains BASE_LO */
220 EDMA_REQ_Q_OUT_PTR_OFS
= 0x18,
221 EDMA_REQ_Q_PTR_SHIFT
= 5,
223 EDMA_RSP_Q_BASE_HI_OFS
= 0x1c,
224 EDMA_RSP_Q_IN_PTR_OFS
= 0x20,
225 EDMA_RSP_Q_OUT_PTR_OFS
= 0x24, /* also contains BASE_LO */
226 EDMA_RSP_Q_PTR_SHIFT
= 3,
233 EDMA_IORDY_TMOUT
= 0x34,
236 /* Host private flags (hp_flags) */
237 MV_HP_FLAG_MSI
= (1 << 0),
238 MV_HP_ERRATA_50XXB0
= (1 << 1),
239 MV_HP_ERRATA_50XXB2
= (1 << 2),
240 MV_HP_ERRATA_60X1B2
= (1 << 3),
241 MV_HP_ERRATA_60X1C0
= (1 << 4),
242 MV_HP_ERRATA_XX42A0
= (1 << 5),
243 MV_HP_50XX
= (1 << 6),
244 MV_HP_GEN_IIE
= (1 << 7),
246 /* Port private flags (pp_flags) */
247 MV_PP_FLAG_EDMA_EN
= (1 << 0),
248 MV_PP_FLAG_EDMA_DS_ACT
= (1 << 1),
251 #define IS_50XX(hpriv) ((hpriv)->hp_flags & MV_HP_50XX)
252 #define IS_60XX(hpriv) (((hpriv)->hp_flags & MV_HP_50XX) == 0)
253 #define IS_GEN_I(hpriv) IS_50XX(hpriv)
254 #define IS_GEN_II(hpriv) IS_60XX(hpriv)
255 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
258 /* Our DMA boundary is determined by an ePRD being unable to handle
259 * anything larger than 64KB
261 MV_DMA_BOUNDARY
= 0xffffU
,
263 EDMA_REQ_Q_BASE_LO_MASK
= 0xfffffc00U
,
265 EDMA_RSP_Q_BASE_LO_MASK
= 0xffffff00U
,
278 /* Command ReQuest Block: 32B */
294 /* Command ResPonse Block: 8B */
301 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
309 struct mv_port_priv
{
310 struct mv_crqb
*crqb
;
312 struct mv_crpb
*crpb
;
314 struct mv_sg
*sg_tbl
;
315 dma_addr_t sg_tbl_dma
;
319 struct mv_port_signal
{
326 void (*phy_errata
)(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
328 void (*enable_leds
)(struct mv_host_priv
*hpriv
, void __iomem
*mmio
);
329 void (*read_preamp
)(struct mv_host_priv
*hpriv
, int idx
,
331 int (*reset_hc
)(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
333 void (*reset_flash
)(struct mv_host_priv
*hpriv
, void __iomem
*mmio
);
334 void (*reset_bus
)(struct pci_dev
*pdev
, void __iomem
*mmio
);
337 struct mv_host_priv
{
339 struct mv_port_signal signal
[8];
340 const struct mv_hw_ops
*ops
;
343 static void mv_irq_clear(struct ata_port
*ap
);
344 static u32
mv_scr_read(struct ata_port
*ap
, unsigned int sc_reg_in
);
345 static void mv_scr_write(struct ata_port
*ap
, unsigned int sc_reg_in
, u32 val
);
346 static u32
mv5_scr_read(struct ata_port
*ap
, unsigned int sc_reg_in
);
347 static void mv5_scr_write(struct ata_port
*ap
, unsigned int sc_reg_in
, u32 val
);
348 static void mv_phy_reset(struct ata_port
*ap
);
349 static void __mv_phy_reset(struct ata_port
*ap
, int can_sleep
);
350 static void mv_host_stop(struct ata_host
*host
);
351 static int mv_port_start(struct ata_port
*ap
);
352 static void mv_port_stop(struct ata_port
*ap
);
353 static void mv_qc_prep(struct ata_queued_cmd
*qc
);
354 static void mv_qc_prep_iie(struct ata_queued_cmd
*qc
);
355 static unsigned int mv_qc_issue(struct ata_queued_cmd
*qc
);
356 static irqreturn_t
mv_interrupt(int irq
, void *dev_instance
);
357 static void mv_eng_timeout(struct ata_port
*ap
);
358 static int mv_init_one(struct pci_dev
*pdev
, const struct pci_device_id
*ent
);
360 static void mv5_phy_errata(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
362 static void mv5_enable_leds(struct mv_host_priv
*hpriv
, void __iomem
*mmio
);
363 static void mv5_read_preamp(struct mv_host_priv
*hpriv
, int idx
,
365 static int mv5_reset_hc(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
367 static void mv5_reset_flash(struct mv_host_priv
*hpriv
, void __iomem
*mmio
);
368 static void mv5_reset_bus(struct pci_dev
*pdev
, void __iomem
*mmio
);
370 static void mv6_phy_errata(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
372 static void mv6_enable_leds(struct mv_host_priv
*hpriv
, void __iomem
*mmio
);
373 static void mv6_read_preamp(struct mv_host_priv
*hpriv
, int idx
,
375 static int mv6_reset_hc(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
377 static void mv6_reset_flash(struct mv_host_priv
*hpriv
, void __iomem
*mmio
);
378 static void mv_reset_pci_bus(struct pci_dev
*pdev
, void __iomem
*mmio
);
379 static void mv_channel_reset(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
380 unsigned int port_no
);
381 static void mv_stop_and_reset(struct ata_port
*ap
);
383 static struct scsi_host_template mv_sht
= {
384 .module
= THIS_MODULE
,
386 .ioctl
= ata_scsi_ioctl
,
387 .queuecommand
= ata_scsi_queuecmd
,
388 .can_queue
= MV_USE_Q_DEPTH
,
389 .this_id
= ATA_SHT_THIS_ID
,
390 .sg_tablesize
= MV_MAX_SG_CT
/ 2,
391 .cmd_per_lun
= ATA_SHT_CMD_PER_LUN
,
392 .emulated
= ATA_SHT_EMULATED
,
393 .use_clustering
= ATA_SHT_USE_CLUSTERING
,
394 .proc_name
= DRV_NAME
,
395 .dma_boundary
= MV_DMA_BOUNDARY
,
396 .slave_configure
= ata_scsi_slave_config
,
397 .slave_destroy
= ata_scsi_slave_destroy
,
398 .bios_param
= ata_std_bios_param
,
401 static const struct ata_port_operations mv5_ops
= {
402 .port_disable
= ata_port_disable
,
404 .tf_load
= ata_tf_load
,
405 .tf_read
= ata_tf_read
,
406 .check_status
= ata_check_status
,
407 .exec_command
= ata_exec_command
,
408 .dev_select
= ata_std_dev_select
,
410 .phy_reset
= mv_phy_reset
,
412 .qc_prep
= mv_qc_prep
,
413 .qc_issue
= mv_qc_issue
,
414 .data_xfer
= ata_mmio_data_xfer
,
416 .eng_timeout
= mv_eng_timeout
,
418 .irq_handler
= mv_interrupt
,
419 .irq_clear
= mv_irq_clear
,
421 .scr_read
= mv5_scr_read
,
422 .scr_write
= mv5_scr_write
,
424 .port_start
= mv_port_start
,
425 .port_stop
= mv_port_stop
,
426 .host_stop
= mv_host_stop
,
429 static const struct ata_port_operations mv6_ops
= {
430 .port_disable
= ata_port_disable
,
432 .tf_load
= ata_tf_load
,
433 .tf_read
= ata_tf_read
,
434 .check_status
= ata_check_status
,
435 .exec_command
= ata_exec_command
,
436 .dev_select
= ata_std_dev_select
,
438 .phy_reset
= mv_phy_reset
,
440 .qc_prep
= mv_qc_prep
,
441 .qc_issue
= mv_qc_issue
,
442 .data_xfer
= ata_mmio_data_xfer
,
444 .eng_timeout
= mv_eng_timeout
,
446 .irq_handler
= mv_interrupt
,
447 .irq_clear
= mv_irq_clear
,
449 .scr_read
= mv_scr_read
,
450 .scr_write
= mv_scr_write
,
452 .port_start
= mv_port_start
,
453 .port_stop
= mv_port_stop
,
454 .host_stop
= mv_host_stop
,
457 static const struct ata_port_operations mv_iie_ops
= {
458 .port_disable
= ata_port_disable
,
460 .tf_load
= ata_tf_load
,
461 .tf_read
= ata_tf_read
,
462 .check_status
= ata_check_status
,
463 .exec_command
= ata_exec_command
,
464 .dev_select
= ata_std_dev_select
,
466 .phy_reset
= mv_phy_reset
,
468 .qc_prep
= mv_qc_prep_iie
,
469 .qc_issue
= mv_qc_issue
,
470 .data_xfer
= ata_mmio_data_xfer
,
472 .eng_timeout
= mv_eng_timeout
,
474 .irq_handler
= mv_interrupt
,
475 .irq_clear
= mv_irq_clear
,
477 .scr_read
= mv_scr_read
,
478 .scr_write
= mv_scr_write
,
480 .port_start
= mv_port_start
,
481 .port_stop
= mv_port_stop
,
482 .host_stop
= mv_host_stop
,
485 static const struct ata_port_info mv_port_info
[] = {
488 .flags
= MV_COMMON_FLAGS
,
489 .pio_mask
= 0x1f, /* pio0-4 */
490 .udma_mask
= 0x7f, /* udma0-6 */
491 .port_ops
= &mv5_ops
,
495 .flags
= (MV_COMMON_FLAGS
| MV_FLAG_DUAL_HC
),
496 .pio_mask
= 0x1f, /* pio0-4 */
497 .udma_mask
= 0x7f, /* udma0-6 */
498 .port_ops
= &mv5_ops
,
502 .flags
= (MV_COMMON_FLAGS
| MV_FLAG_DUAL_HC
),
503 .pio_mask
= 0x1f, /* pio0-4 */
504 .udma_mask
= 0x7f, /* udma0-6 */
505 .port_ops
= &mv5_ops
,
509 .flags
= (MV_COMMON_FLAGS
| MV_6XXX_FLAGS
),
510 .pio_mask
= 0x1f, /* pio0-4 */
511 .udma_mask
= 0x7f, /* udma0-6 */
512 .port_ops
= &mv6_ops
,
516 .flags
= (MV_COMMON_FLAGS
| MV_6XXX_FLAGS
|
518 .pio_mask
= 0x1f, /* pio0-4 */
519 .udma_mask
= 0x7f, /* udma0-6 */
520 .port_ops
= &mv6_ops
,
524 .flags
= (MV_COMMON_FLAGS
| MV_6XXX_FLAGS
),
525 .pio_mask
= 0x1f, /* pio0-4 */
526 .udma_mask
= 0x7f, /* udma0-6 */
527 .port_ops
= &mv_iie_ops
,
531 .flags
= (MV_COMMON_FLAGS
| MV_6XXX_FLAGS
),
532 .pio_mask
= 0x1f, /* pio0-4 */
533 .udma_mask
= 0x7f, /* udma0-6 */
534 .port_ops
= &mv_iie_ops
,
538 static const struct pci_device_id mv_pci_tbl
[] = {
539 { PCI_VDEVICE(MARVELL
, 0x5040), chip_504x
},
540 { PCI_VDEVICE(MARVELL
, 0x5041), chip_504x
},
541 { PCI_VDEVICE(MARVELL
, 0x5080), chip_5080
},
542 { PCI_VDEVICE(MARVELL
, 0x5081), chip_508x
},
544 { PCI_VDEVICE(MARVELL
, 0x6040), chip_604x
},
545 { PCI_VDEVICE(MARVELL
, 0x6041), chip_604x
},
546 { PCI_VDEVICE(MARVELL
, 0x6042), chip_6042
},
547 { PCI_VDEVICE(MARVELL
, 0x6080), chip_608x
},
548 { PCI_VDEVICE(MARVELL
, 0x6081), chip_608x
},
550 { PCI_VDEVICE(ADAPTEC2
, 0x0241), chip_604x
},
552 { PCI_VDEVICE(TTI
, 0x2310), chip_7042
},
554 { } /* terminate list */
557 static struct pci_driver mv_pci_driver
= {
559 .id_table
= mv_pci_tbl
,
560 .probe
= mv_init_one
,
561 .remove
= ata_pci_remove_one
,
564 static const struct mv_hw_ops mv5xxx_ops
= {
565 .phy_errata
= mv5_phy_errata
,
566 .enable_leds
= mv5_enable_leds
,
567 .read_preamp
= mv5_read_preamp
,
568 .reset_hc
= mv5_reset_hc
,
569 .reset_flash
= mv5_reset_flash
,
570 .reset_bus
= mv5_reset_bus
,
573 static const struct mv_hw_ops mv6xxx_ops
= {
574 .phy_errata
= mv6_phy_errata
,
575 .enable_leds
= mv6_enable_leds
,
576 .read_preamp
= mv6_read_preamp
,
577 .reset_hc
= mv6_reset_hc
,
578 .reset_flash
= mv6_reset_flash
,
579 .reset_bus
= mv_reset_pci_bus
,
585 static int msi
; /* Use PCI msi; either zero (off, default) or non-zero */
592 static inline void writelfl(unsigned long data
, void __iomem
*addr
)
595 (void) readl(addr
); /* flush to avoid PCI posted write */
598 static inline void __iomem
*mv_hc_base(void __iomem
*base
, unsigned int hc
)
600 return (base
+ MV_SATAHC0_REG_BASE
+ (hc
* MV_SATAHC_REG_SZ
));
603 static inline unsigned int mv_hc_from_port(unsigned int port
)
605 return port
>> MV_PORT_HC_SHIFT
;
608 static inline unsigned int mv_hardport_from_port(unsigned int port
)
610 return port
& MV_PORT_MASK
;
613 static inline void __iomem
*mv_hc_base_from_port(void __iomem
*base
,
616 return mv_hc_base(base
, mv_hc_from_port(port
));
619 static inline void __iomem
*mv_port_base(void __iomem
*base
, unsigned int port
)
621 return mv_hc_base_from_port(base
, port
) +
622 MV_SATAHC_ARBTR_REG_SZ
+
623 (mv_hardport_from_port(port
) * MV_PORT_REG_SZ
);
626 static inline void __iomem
*mv_ap_base(struct ata_port
*ap
)
628 return mv_port_base(ap
->host
->mmio_base
, ap
->port_no
);
631 static inline int mv_get_hc_count(unsigned long port_flags
)
633 return ((port_flags
& MV_FLAG_DUAL_HC
) ? 2 : 1);
636 static void mv_irq_clear(struct ata_port
*ap
)
641 * mv_start_dma - Enable eDMA engine
642 * @base: port base address
643 * @pp: port private data
645 * Verify the local cache of the eDMA state is accurate with a
649 * Inherited from caller.
651 static void mv_start_dma(void __iomem
*base
, struct mv_port_priv
*pp
)
653 if (!(MV_PP_FLAG_EDMA_EN
& pp
->pp_flags
)) {
654 writelfl(EDMA_EN
, base
+ EDMA_CMD_OFS
);
655 pp
->pp_flags
|= MV_PP_FLAG_EDMA_EN
;
657 WARN_ON(!(EDMA_EN
& readl(base
+ EDMA_CMD_OFS
)));
661 * mv_stop_dma - Disable eDMA engine
662 * @ap: ATA channel to manipulate
664 * Verify the local cache of the eDMA state is accurate with a
668 * Inherited from caller.
670 static void mv_stop_dma(struct ata_port
*ap
)
672 void __iomem
*port_mmio
= mv_ap_base(ap
);
673 struct mv_port_priv
*pp
= ap
->private_data
;
677 if (MV_PP_FLAG_EDMA_EN
& pp
->pp_flags
) {
678 /* Disable EDMA if active. The disable bit auto clears.
680 writelfl(EDMA_DS
, port_mmio
+ EDMA_CMD_OFS
);
681 pp
->pp_flags
&= ~MV_PP_FLAG_EDMA_EN
;
683 WARN_ON(EDMA_EN
& readl(port_mmio
+ EDMA_CMD_OFS
));
686 /* now properly wait for the eDMA to stop */
687 for (i
= 1000; i
> 0; i
--) {
688 reg
= readl(port_mmio
+ EDMA_CMD_OFS
);
689 if (!(EDMA_EN
& reg
)) {
696 ata_port_printk(ap
, KERN_ERR
, "Unable to stop eDMA\n");
697 /* FIXME: Consider doing a reset here to recover */
702 static void mv_dump_mem(void __iomem
*start
, unsigned bytes
)
705 for (b
= 0; b
< bytes
; ) {
706 DPRINTK("%p: ", start
+ b
);
707 for (w
= 0; b
< bytes
&& w
< 4; w
++) {
708 printk("%08x ",readl(start
+ b
));
716 static void mv_dump_pci_cfg(struct pci_dev
*pdev
, unsigned bytes
)
721 for (b
= 0; b
< bytes
; ) {
722 DPRINTK("%02x: ", b
);
723 for (w
= 0; b
< bytes
&& w
< 4; w
++) {
724 (void) pci_read_config_dword(pdev
,b
,&dw
);
732 static void mv_dump_all_regs(void __iomem
*mmio_base
, int port
,
733 struct pci_dev
*pdev
)
736 void __iomem
*hc_base
= mv_hc_base(mmio_base
,
737 port
>> MV_PORT_HC_SHIFT
);
738 void __iomem
*port_base
;
739 int start_port
, num_ports
, p
, start_hc
, num_hcs
, hc
;
742 start_hc
= start_port
= 0;
743 num_ports
= 8; /* shld be benign for 4 port devs */
746 start_hc
= port
>> MV_PORT_HC_SHIFT
;
748 num_ports
= num_hcs
= 1;
750 DPRINTK("All registers for port(s) %u-%u:\n", start_port
,
751 num_ports
> 1 ? num_ports
- 1 : start_port
);
754 DPRINTK("PCI config space regs:\n");
755 mv_dump_pci_cfg(pdev
, 0x68);
757 DPRINTK("PCI regs:\n");
758 mv_dump_mem(mmio_base
+0xc00, 0x3c);
759 mv_dump_mem(mmio_base
+0xd00, 0x34);
760 mv_dump_mem(mmio_base
+0xf00, 0x4);
761 mv_dump_mem(mmio_base
+0x1d00, 0x6c);
762 for (hc
= start_hc
; hc
< start_hc
+ num_hcs
; hc
++) {
763 hc_base
= mv_hc_base(mmio_base
, hc
);
764 DPRINTK("HC regs (HC %i):\n", hc
);
765 mv_dump_mem(hc_base
, 0x1c);
767 for (p
= start_port
; p
< start_port
+ num_ports
; p
++) {
768 port_base
= mv_port_base(mmio_base
, p
);
769 DPRINTK("EDMA regs (port %i):\n",p
);
770 mv_dump_mem(port_base
, 0x54);
771 DPRINTK("SATA regs (port %i):\n",p
);
772 mv_dump_mem(port_base
+0x300, 0x60);
777 static unsigned int mv_scr_offset(unsigned int sc_reg_in
)
785 ofs
= SATA_STATUS_OFS
+ (sc_reg_in
* sizeof(u32
));
788 ofs
= SATA_ACTIVE_OFS
; /* active is not with the others */
797 static u32
mv_scr_read(struct ata_port
*ap
, unsigned int sc_reg_in
)
799 unsigned int ofs
= mv_scr_offset(sc_reg_in
);
801 if (0xffffffffU
!= ofs
) {
802 return readl(mv_ap_base(ap
) + ofs
);
808 static void mv_scr_write(struct ata_port
*ap
, unsigned int sc_reg_in
, u32 val
)
810 unsigned int ofs
= mv_scr_offset(sc_reg_in
);
812 if (0xffffffffU
!= ofs
) {
813 writelfl(val
, mv_ap_base(ap
) + ofs
);
818 * mv_host_stop - Host specific cleanup/stop routine.
819 * @host: host data structure
821 * Disable ints, cleanup host memory, call general purpose
825 * Inherited from caller.
827 static void mv_host_stop(struct ata_host
*host
)
829 struct mv_host_priv
*hpriv
= host
->private_data
;
830 struct pci_dev
*pdev
= to_pci_dev(host
->dev
);
832 if (hpriv
->hp_flags
& MV_HP_FLAG_MSI
) {
833 pci_disable_msi(pdev
);
841 static inline void mv_priv_free(struct mv_port_priv
*pp
, struct device
*dev
)
843 dma_free_coherent(dev
, MV_PORT_PRIV_DMA_SZ
, pp
->crpb
, pp
->crpb_dma
);
846 static void mv_edma_cfg(struct mv_host_priv
*hpriv
, void __iomem
*port_mmio
)
848 u32 cfg
= readl(port_mmio
+ EDMA_CFG_OFS
);
850 /* set up non-NCQ EDMA configuration */
851 cfg
&= ~(1 << 9); /* disable equeue */
853 if (IS_GEN_I(hpriv
)) {
854 cfg
&= ~0x1f; /* clear queue depth */
855 cfg
|= (1 << 8); /* enab config burst size mask */
858 else if (IS_GEN_II(hpriv
)) {
859 cfg
&= ~0x1f; /* clear queue depth */
860 cfg
|= EDMA_CFG_RD_BRST_EXT
| EDMA_CFG_WR_BUFF_LEN
;
861 cfg
&= ~(EDMA_CFG_NCQ
| EDMA_CFG_NCQ_GO_ON_ERR
); /* clear NCQ */
864 else if (IS_GEN_IIE(hpriv
)) {
865 cfg
|= (1 << 23); /* do not mask PM field in rx'd FIS */
866 cfg
|= (1 << 22); /* enab 4-entry host queue cache */
867 cfg
&= ~(1 << 19); /* dis 128-entry queue (for now?) */
868 cfg
|= (1 << 18); /* enab early completion */
869 cfg
|= (1 << 17); /* enab cut-through (dis stor&forwrd) */
870 cfg
&= ~(1 << 16); /* dis FIS-based switching (for now) */
871 cfg
&= ~(EDMA_CFG_NCQ
| EDMA_CFG_NCQ_GO_ON_ERR
); /* clear NCQ */
874 writelfl(cfg
, port_mmio
+ EDMA_CFG_OFS
);
878 * mv_port_start - Port specific init/start routine.
879 * @ap: ATA channel to manipulate
881 * Allocate and point to DMA memory, init port private memory,
885 * Inherited from caller.
887 static int mv_port_start(struct ata_port
*ap
)
889 struct device
*dev
= ap
->host
->dev
;
890 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
891 struct mv_port_priv
*pp
;
892 void __iomem
*port_mmio
= mv_ap_base(ap
);
897 pp
= kmalloc(sizeof(*pp
), GFP_KERNEL
);
900 memset(pp
, 0, sizeof(*pp
));
902 mem
= dma_alloc_coherent(dev
, MV_PORT_PRIV_DMA_SZ
, &mem_dma
,
906 memset(mem
, 0, MV_PORT_PRIV_DMA_SZ
);
908 rc
= ata_pad_alloc(ap
, dev
);
912 /* First item in chunk of DMA memory:
913 * 32-slot command request table (CRQB), 32 bytes each in size
916 pp
->crqb_dma
= mem_dma
;
918 mem_dma
+= MV_CRQB_Q_SZ
;
921 * 32-slot command response table (CRPB), 8 bytes each in size
924 pp
->crpb_dma
= mem_dma
;
926 mem_dma
+= MV_CRPB_Q_SZ
;
929 * Table of scatter-gather descriptors (ePRD), 16 bytes each
932 pp
->sg_tbl_dma
= mem_dma
;
934 mv_edma_cfg(hpriv
, port_mmio
);
936 writel((pp
->crqb_dma
>> 16) >> 16, port_mmio
+ EDMA_REQ_Q_BASE_HI_OFS
);
937 writelfl(pp
->crqb_dma
& EDMA_REQ_Q_BASE_LO_MASK
,
938 port_mmio
+ EDMA_REQ_Q_IN_PTR_OFS
);
940 if (hpriv
->hp_flags
& MV_HP_ERRATA_XX42A0
)
941 writelfl(pp
->crqb_dma
& 0xffffffff,
942 port_mmio
+ EDMA_REQ_Q_OUT_PTR_OFS
);
944 writelfl(0, port_mmio
+ EDMA_REQ_Q_OUT_PTR_OFS
);
946 writel((pp
->crpb_dma
>> 16) >> 16, port_mmio
+ EDMA_RSP_Q_BASE_HI_OFS
);
948 if (hpriv
->hp_flags
& MV_HP_ERRATA_XX42A0
)
949 writelfl(pp
->crpb_dma
& 0xffffffff,
950 port_mmio
+ EDMA_RSP_Q_IN_PTR_OFS
);
952 writelfl(0, port_mmio
+ EDMA_RSP_Q_IN_PTR_OFS
);
954 writelfl(pp
->crpb_dma
& EDMA_RSP_Q_BASE_LO_MASK
,
955 port_mmio
+ EDMA_RSP_Q_OUT_PTR_OFS
);
957 /* Don't turn on EDMA here...do it before DMA commands only. Else
958 * we'll be unable to send non-data, PIO, etc due to restricted access
961 ap
->private_data
= pp
;
965 mv_priv_free(pp
, dev
);
973 * mv_port_stop - Port specific cleanup/stop routine.
974 * @ap: ATA channel to manipulate
976 * Stop DMA, cleanup port memory.
979 * This routine uses the host lock to protect the DMA stop.
981 static void mv_port_stop(struct ata_port
*ap
)
983 struct device
*dev
= ap
->host
->dev
;
984 struct mv_port_priv
*pp
= ap
->private_data
;
987 spin_lock_irqsave(&ap
->host
->lock
, flags
);
989 spin_unlock_irqrestore(&ap
->host
->lock
, flags
);
991 ap
->private_data
= NULL
;
992 ata_pad_free(ap
, dev
);
993 mv_priv_free(pp
, dev
);
998 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
999 * @qc: queued command whose SG list to source from
1001 * Populate the SG list and mark the last entry.
1004 * Inherited from caller.
1006 static void mv_fill_sg(struct ata_queued_cmd
*qc
)
1008 struct mv_port_priv
*pp
= qc
->ap
->private_data
;
1010 struct scatterlist
*sg
;
1012 ata_for_each_sg(sg
, qc
) {
1014 u32 sg_len
, len
, offset
;
1016 addr
= sg_dma_address(sg
);
1017 sg_len
= sg_dma_len(sg
);
1020 offset
= addr
& MV_DMA_BOUNDARY
;
1022 if ((offset
+ sg_len
) > 0x10000)
1023 len
= 0x10000 - offset
;
1025 pp
->sg_tbl
[i
].addr
= cpu_to_le32(addr
& 0xffffffff);
1026 pp
->sg_tbl
[i
].addr_hi
= cpu_to_le32((addr
>> 16) >> 16);
1027 pp
->sg_tbl
[i
].flags_size
= cpu_to_le32(len
& 0xffff);
1032 if (!sg_len
&& ata_sg_is_last(sg
, qc
))
1033 pp
->sg_tbl
[i
].flags_size
|= cpu_to_le32(EPRD_FLAG_END_OF_TBL
);
1040 static inline unsigned mv_inc_q_index(unsigned index
)
1042 return (index
+ 1) & MV_MAX_Q_DEPTH_MASK
;
1045 static inline void mv_crqb_pack_cmd(__le16
*cmdw
, u8 data
, u8 addr
, unsigned last
)
1047 u16 tmp
= data
| (addr
<< CRQB_CMD_ADDR_SHIFT
) | CRQB_CMD_CS
|
1048 (last
? CRQB_CMD_LAST
: 0);
1049 *cmdw
= cpu_to_le16(tmp
);
1053 * mv_qc_prep - Host specific command preparation.
1054 * @qc: queued command to prepare
1056 * This routine simply redirects to the general purpose routine
1057 * if command is not DMA. Else, it handles prep of the CRQB
1058 * (command request block), does some sanity checking, and calls
1059 * the SG load routine.
1062 * Inherited from caller.
1064 static void mv_qc_prep(struct ata_queued_cmd
*qc
)
1066 struct ata_port
*ap
= qc
->ap
;
1067 struct mv_port_priv
*pp
= ap
->private_data
;
1069 struct ata_taskfile
*tf
;
1073 if (ATA_PROT_DMA
!= qc
->tf
.protocol
)
1076 /* Fill in command request block
1078 if (!(qc
->tf
.flags
& ATA_TFLAG_WRITE
))
1079 flags
|= CRQB_FLAG_READ
;
1080 WARN_ON(MV_MAX_Q_DEPTH
<= qc
->tag
);
1081 flags
|= qc
->tag
<< CRQB_TAG_SHIFT
;
1083 /* get current queue index from hardware */
1084 in_index
= (readl(mv_ap_base(ap
) + EDMA_REQ_Q_IN_PTR_OFS
)
1085 >> EDMA_REQ_Q_PTR_SHIFT
) & MV_MAX_Q_DEPTH_MASK
;
1087 pp
->crqb
[in_index
].sg_addr
=
1088 cpu_to_le32(pp
->sg_tbl_dma
& 0xffffffff);
1089 pp
->crqb
[in_index
].sg_addr_hi
=
1090 cpu_to_le32((pp
->sg_tbl_dma
>> 16) >> 16);
1091 pp
->crqb
[in_index
].ctrl_flags
= cpu_to_le16(flags
);
1093 cw
= &pp
->crqb
[in_index
].ata_cmd
[0];
1096 /* Sadly, the CRQB cannot accomodate all registers--there are
1097 * only 11 bytes...so we must pick and choose required
1098 * registers based on the command. So, we drop feature and
1099 * hob_feature for [RW] DMA commands, but they are needed for
1100 * NCQ. NCQ will drop hob_nsect.
1102 switch (tf
->command
) {
1104 case ATA_CMD_READ_EXT
:
1106 case ATA_CMD_WRITE_EXT
:
1107 case ATA_CMD_WRITE_FUA_EXT
:
1108 mv_crqb_pack_cmd(cw
++, tf
->hob_nsect
, ATA_REG_NSECT
, 0);
1110 #ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
1111 case ATA_CMD_FPDMA_READ
:
1112 case ATA_CMD_FPDMA_WRITE
:
1113 mv_crqb_pack_cmd(cw
++, tf
->hob_feature
, ATA_REG_FEATURE
, 0);
1114 mv_crqb_pack_cmd(cw
++, tf
->feature
, ATA_REG_FEATURE
, 0);
1116 #endif /* FIXME: remove this line when NCQ added */
1118 /* The only other commands EDMA supports in non-queued and
1119 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1120 * of which are defined/used by Linux. If we get here, this
1121 * driver needs work.
1123 * FIXME: modify libata to give qc_prep a return value and
1124 * return error here.
1126 BUG_ON(tf
->command
);
1129 mv_crqb_pack_cmd(cw
++, tf
->nsect
, ATA_REG_NSECT
, 0);
1130 mv_crqb_pack_cmd(cw
++, tf
->hob_lbal
, ATA_REG_LBAL
, 0);
1131 mv_crqb_pack_cmd(cw
++, tf
->lbal
, ATA_REG_LBAL
, 0);
1132 mv_crqb_pack_cmd(cw
++, tf
->hob_lbam
, ATA_REG_LBAM
, 0);
1133 mv_crqb_pack_cmd(cw
++, tf
->lbam
, ATA_REG_LBAM
, 0);
1134 mv_crqb_pack_cmd(cw
++, tf
->hob_lbah
, ATA_REG_LBAH
, 0);
1135 mv_crqb_pack_cmd(cw
++, tf
->lbah
, ATA_REG_LBAH
, 0);
1136 mv_crqb_pack_cmd(cw
++, tf
->device
, ATA_REG_DEVICE
, 0);
1137 mv_crqb_pack_cmd(cw
++, tf
->command
, ATA_REG_CMD
, 1); /* last */
1139 if (!(qc
->flags
& ATA_QCFLAG_DMAMAP
))
1145 * mv_qc_prep_iie - Host specific command preparation.
1146 * @qc: queued command to prepare
1148 * This routine simply redirects to the general purpose routine
1149 * if command is not DMA. Else, it handles prep of the CRQB
1150 * (command request block), does some sanity checking, and calls
1151 * the SG load routine.
1154 * Inherited from caller.
1156 static void mv_qc_prep_iie(struct ata_queued_cmd
*qc
)
1158 struct ata_port
*ap
= qc
->ap
;
1159 struct mv_port_priv
*pp
= ap
->private_data
;
1160 struct mv_crqb_iie
*crqb
;
1161 struct ata_taskfile
*tf
;
1165 if (ATA_PROT_DMA
!= qc
->tf
.protocol
)
1168 /* Fill in Gen IIE command request block
1170 if (!(qc
->tf
.flags
& ATA_TFLAG_WRITE
))
1171 flags
|= CRQB_FLAG_READ
;
1173 WARN_ON(MV_MAX_Q_DEPTH
<= qc
->tag
);
1174 flags
|= qc
->tag
<< CRQB_TAG_SHIFT
;
1176 /* get current queue index from hardware */
1177 in_index
= (readl(mv_ap_base(ap
) + EDMA_REQ_Q_IN_PTR_OFS
)
1178 >> EDMA_REQ_Q_PTR_SHIFT
) & MV_MAX_Q_DEPTH_MASK
;
1180 crqb
= (struct mv_crqb_iie
*) &pp
->crqb
[in_index
];
1181 crqb
->addr
= cpu_to_le32(pp
->sg_tbl_dma
& 0xffffffff);
1182 crqb
->addr_hi
= cpu_to_le32((pp
->sg_tbl_dma
>> 16) >> 16);
1183 crqb
->flags
= cpu_to_le32(flags
);
1186 crqb
->ata_cmd
[0] = cpu_to_le32(
1187 (tf
->command
<< 16) |
1190 crqb
->ata_cmd
[1] = cpu_to_le32(
1196 crqb
->ata_cmd
[2] = cpu_to_le32(
1197 (tf
->hob_lbal
<< 0) |
1198 (tf
->hob_lbam
<< 8) |
1199 (tf
->hob_lbah
<< 16) |
1200 (tf
->hob_feature
<< 24)
1202 crqb
->ata_cmd
[3] = cpu_to_le32(
1204 (tf
->hob_nsect
<< 8)
1207 if (!(qc
->flags
& ATA_QCFLAG_DMAMAP
))
1213 * mv_qc_issue - Initiate a command to the host
1214 * @qc: queued command to start
1216 * This routine simply redirects to the general purpose routine
1217 * if command is not DMA. Else, it sanity checks our local
1218 * caches of the request producer/consumer indices then enables
1219 * DMA and bumps the request producer index.
1222 * Inherited from caller.
1224 static unsigned int mv_qc_issue(struct ata_queued_cmd
*qc
)
1226 void __iomem
*port_mmio
= mv_ap_base(qc
->ap
);
1227 struct mv_port_priv
*pp
= qc
->ap
->private_data
;
1231 if (ATA_PROT_DMA
!= qc
->tf
.protocol
) {
1232 /* We're about to send a non-EDMA capable command to the
1233 * port. Turn off EDMA so there won't be problems accessing
1234 * shadow block, etc registers.
1236 mv_stop_dma(qc
->ap
);
1237 return ata_qc_issue_prot(qc
);
1240 in_ptr
= readl(port_mmio
+ EDMA_REQ_Q_IN_PTR_OFS
);
1241 in_index
= (in_ptr
>> EDMA_REQ_Q_PTR_SHIFT
) & MV_MAX_Q_DEPTH_MASK
;
1243 /* until we do queuing, the queue should be empty at this point */
1244 WARN_ON(in_index
!= ((readl(port_mmio
+ EDMA_REQ_Q_OUT_PTR_OFS
)
1245 >> EDMA_REQ_Q_PTR_SHIFT
) & MV_MAX_Q_DEPTH_MASK
));
1247 in_index
= mv_inc_q_index(in_index
); /* now incr producer index */
1249 mv_start_dma(port_mmio
, pp
);
1251 /* and write the request in pointer to kick the EDMA to life */
1252 in_ptr
&= EDMA_REQ_Q_BASE_LO_MASK
;
1253 in_ptr
|= in_index
<< EDMA_REQ_Q_PTR_SHIFT
;
1254 writelfl(in_ptr
, port_mmio
+ EDMA_REQ_Q_IN_PTR_OFS
);
1260 * mv_get_crpb_status - get status from most recently completed cmd
1261 * @ap: ATA channel to manipulate
1263 * This routine is for use when the port is in DMA mode, when it
1264 * will be using the CRPB (command response block) method of
1265 * returning command completion information. We check indices
1266 * are good, grab status, and bump the response consumer index to
1267 * prove that we're up to date.
1270 * Inherited from caller.
1272 static u8
mv_get_crpb_status(struct ata_port
*ap
)
1274 void __iomem
*port_mmio
= mv_ap_base(ap
);
1275 struct mv_port_priv
*pp
= ap
->private_data
;
1280 out_ptr
= readl(port_mmio
+ EDMA_RSP_Q_OUT_PTR_OFS
);
1281 out_index
= (out_ptr
>> EDMA_RSP_Q_PTR_SHIFT
) & MV_MAX_Q_DEPTH_MASK
;
1283 ata_status
= le16_to_cpu(pp
->crpb
[out_index
].flags
)
1284 >> CRPB_FLAG_STATUS_SHIFT
;
1286 /* increment our consumer index... */
1287 out_index
= mv_inc_q_index(out_index
);
1289 /* and, until we do NCQ, there should only be 1 CRPB waiting */
1290 WARN_ON(out_index
!= ((readl(port_mmio
+ EDMA_RSP_Q_IN_PTR_OFS
)
1291 >> EDMA_RSP_Q_PTR_SHIFT
) & MV_MAX_Q_DEPTH_MASK
));
1293 /* write out our inc'd consumer index so EDMA knows we're caught up */
1294 out_ptr
&= EDMA_RSP_Q_BASE_LO_MASK
;
1295 out_ptr
|= out_index
<< EDMA_RSP_Q_PTR_SHIFT
;
1296 writelfl(out_ptr
, port_mmio
+ EDMA_RSP_Q_OUT_PTR_OFS
);
1298 /* Return ATA status register for completed CRPB */
1303 * mv_err_intr - Handle error interrupts on the port
1304 * @ap: ATA channel to manipulate
1305 * @reset_allowed: bool: 0 == don't trigger from reset here
1307 * In most cases, just clear the interrupt and move on. However,
1308 * some cases require an eDMA reset, which is done right before
1309 * the COMRESET in mv_phy_reset(). The SERR case requires a
1310 * clear of pending errors in the SATA SERROR register. Finally,
1311 * if the port disabled DMA, update our cached copy to match.
1314 * Inherited from caller.
1316 static void mv_err_intr(struct ata_port
*ap
, int reset_allowed
)
1318 void __iomem
*port_mmio
= mv_ap_base(ap
);
1319 u32 edma_err_cause
, serr
= 0;
1321 edma_err_cause
= readl(port_mmio
+ EDMA_ERR_IRQ_CAUSE_OFS
);
1323 if (EDMA_ERR_SERR
& edma_err_cause
) {
1324 sata_scr_read(ap
, SCR_ERROR
, &serr
);
1325 sata_scr_write_flush(ap
, SCR_ERROR
, serr
);
1327 if (EDMA_ERR_SELF_DIS
& edma_err_cause
) {
1328 struct mv_port_priv
*pp
= ap
->private_data
;
1329 pp
->pp_flags
&= ~MV_PP_FLAG_EDMA_EN
;
1331 DPRINTK(KERN_ERR
"ata%u: port error; EDMA err cause: 0x%08x "
1332 "SERR: 0x%08x\n", ap
->id
, edma_err_cause
, serr
);
1334 /* Clear EDMA now that SERR cleanup done */
1335 writelfl(0, port_mmio
+ EDMA_ERR_IRQ_CAUSE_OFS
);
1337 /* check for fatal here and recover if needed */
1338 if (reset_allowed
&& (EDMA_ERR_FATAL
& edma_err_cause
))
1339 mv_stop_and_reset(ap
);
1343 * mv_host_intr - Handle all interrupts on the given host controller
1344 * @host: host specific structure
1345 * @relevant: port error bits relevant to this host controller
1346 * @hc: which host controller we're to look at
1348 * Read then write clear the HC interrupt status then walk each
1349 * port connected to the HC and see if it needs servicing. Port
1350 * success ints are reported in the HC interrupt status reg, the
1351 * port error ints are reported in the higher level main
1352 * interrupt status register and thus are passed in via the
1353 * 'relevant' argument.
1356 * Inherited from caller.
1358 static void mv_host_intr(struct ata_host
*host
, u32 relevant
, unsigned int hc
)
1360 void __iomem
*mmio
= host
->mmio_base
;
1361 void __iomem
*hc_mmio
= mv_hc_base(mmio
, hc
);
1362 struct ata_queued_cmd
*qc
;
1364 int shift
, port
, port0
, hard_port
, handled
;
1365 unsigned int err_mask
;
1370 port0
= MV_PORTS_PER_HC
;
1373 /* we'll need the HC success int register in most cases */
1374 hc_irq_cause
= readl(hc_mmio
+ HC_IRQ_CAUSE_OFS
);
1376 writelfl(~hc_irq_cause
, hc_mmio
+ HC_IRQ_CAUSE_OFS
);
1379 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1380 hc
,relevant
,hc_irq_cause
);
1382 for (port
= port0
; port
< port0
+ MV_PORTS_PER_HC
; port
++) {
1384 struct ata_port
*ap
= host
->ports
[port
];
1385 struct mv_port_priv
*pp
= ap
->private_data
;
1387 hard_port
= mv_hardport_from_port(port
); /* range 0..3 */
1388 handled
= 0; /* ensure ata_status is set if handled++ */
1390 /* Note that DEV_IRQ might happen spuriously during EDMA,
1391 * and should be ignored in such cases.
1392 * The cause of this is still under investigation.
1394 if (pp
->pp_flags
& MV_PP_FLAG_EDMA_EN
) {
1395 /* EDMA: check for response queue interrupt */
1396 if ((CRPB_DMA_DONE
<< hard_port
) & hc_irq_cause
) {
1397 ata_status
= mv_get_crpb_status(ap
);
1401 /* PIO: check for device (drive) interrupt */
1402 if ((DEV_IRQ
<< hard_port
) & hc_irq_cause
) {
1403 ata_status
= readb((void __iomem
*)
1404 ap
->ioaddr
.status_addr
);
1406 /* ignore spurious intr if drive still BUSY */
1407 if (ata_status
& ATA_BUSY
) {
1414 if (ap
&& (ap
->flags
& ATA_FLAG_DISABLED
))
1417 err_mask
= ac_err_mask(ata_status
);
1419 shift
= port
<< 1; /* (port * 2) */
1420 if (port
>= MV_PORTS_PER_HC
) {
1421 shift
++; /* skip bit 8 in the HC Main IRQ reg */
1423 if ((PORT0_ERR
<< shift
) & relevant
) {
1425 err_mask
|= AC_ERR_OTHER
;
1430 qc
= ata_qc_from_tag(ap
, ap
->active_tag
);
1431 if (qc
&& (qc
->flags
& ATA_QCFLAG_ACTIVE
)) {
1432 VPRINTK("port %u IRQ found for qc, "
1433 "ata_status 0x%x\n", port
,ata_status
);
1434 /* mark qc status appropriately */
1435 if (!(qc
->tf
.flags
& ATA_TFLAG_POLLING
)) {
1436 qc
->err_mask
|= err_mask
;
1437 ata_qc_complete(qc
);
1448 * @dev_instance: private data; in this case the host structure
1451 * Read the read only register to determine if any host
1452 * controllers have pending interrupts. If so, call lower level
1453 * routine to handle. Also check for PCI errors which are only
1457 * This routine holds the host lock while processing pending
1460 static irqreturn_t
mv_interrupt(int irq
, void *dev_instance
)
1462 struct ata_host
*host
= dev_instance
;
1463 unsigned int hc
, handled
= 0, n_hcs
;
1464 void __iomem
*mmio
= host
->mmio_base
;
1465 struct mv_host_priv
*hpriv
;
1468 irq_stat
= readl(mmio
+ HC_MAIN_IRQ_CAUSE_OFS
);
1470 /* check the cases where we either have nothing pending or have read
1471 * a bogus register value which can indicate HW removal or PCI fault
1473 if (!irq_stat
|| (0xffffffffU
== irq_stat
)) {
1477 n_hcs
= mv_get_hc_count(host
->ports
[0]->flags
);
1478 spin_lock(&host
->lock
);
1480 for (hc
= 0; hc
< n_hcs
; hc
++) {
1481 u32 relevant
= irq_stat
& (HC0_IRQ_PEND
<< (hc
* HC_SHIFT
));
1483 mv_host_intr(host
, relevant
, hc
);
1488 hpriv
= host
->private_data
;
1489 if (IS_60XX(hpriv
)) {
1490 /* deal with the interrupt coalescing bits */
1491 if (irq_stat
& (TRAN_LO_DONE
| TRAN_HI_DONE
| PORTS_0_7_COAL_DONE
)) {
1492 writelfl(0, mmio
+ MV_IRQ_COAL_CAUSE_LO
);
1493 writelfl(0, mmio
+ MV_IRQ_COAL_CAUSE_HI
);
1494 writelfl(0, mmio
+ MV_IRQ_COAL_CAUSE
);
1498 if (PCI_ERR
& irq_stat
) {
1499 printk(KERN_ERR DRV_NAME
": PCI ERROR; PCI IRQ cause=0x%08x\n",
1500 readl(mmio
+ PCI_IRQ_CAUSE_OFS
));
1502 DPRINTK("All regs @ PCI error\n");
1503 mv_dump_all_regs(mmio
, -1, to_pci_dev(host
->dev
));
1505 writelfl(0, mmio
+ PCI_IRQ_CAUSE_OFS
);
1508 spin_unlock(&host
->lock
);
1510 return IRQ_RETVAL(handled
);
1513 static void __iomem
*mv5_phy_base(void __iomem
*mmio
, unsigned int port
)
1515 void __iomem
*hc_mmio
= mv_hc_base_from_port(mmio
, port
);
1516 unsigned long ofs
= (mv_hardport_from_port(port
) + 1) * 0x100UL
;
1518 return hc_mmio
+ ofs
;
1521 static unsigned int mv5_scr_offset(unsigned int sc_reg_in
)
1525 switch (sc_reg_in
) {
1529 ofs
= sc_reg_in
* sizeof(u32
);
1538 static u32
mv5_scr_read(struct ata_port
*ap
, unsigned int sc_reg_in
)
1540 void __iomem
*mmio
= mv5_phy_base(ap
->host
->mmio_base
, ap
->port_no
);
1541 unsigned int ofs
= mv5_scr_offset(sc_reg_in
);
1543 if (ofs
!= 0xffffffffU
)
1544 return readl(mmio
+ ofs
);
1549 static void mv5_scr_write(struct ata_port
*ap
, unsigned int sc_reg_in
, u32 val
)
1551 void __iomem
*mmio
= mv5_phy_base(ap
->host
->mmio_base
, ap
->port_no
);
1552 unsigned int ofs
= mv5_scr_offset(sc_reg_in
);
1554 if (ofs
!= 0xffffffffU
)
1555 writelfl(val
, mmio
+ ofs
);
1558 static void mv5_reset_bus(struct pci_dev
*pdev
, void __iomem
*mmio
)
1563 pci_read_config_byte(pdev
, PCI_REVISION_ID
, &rev_id
);
1565 early_5080
= (pdev
->device
== 0x5080) && (rev_id
== 0);
1568 u32 tmp
= readl(mmio
+ MV_PCI_EXP_ROM_BAR_CTL
);
1570 writel(tmp
, mmio
+ MV_PCI_EXP_ROM_BAR_CTL
);
1573 mv_reset_pci_bus(pdev
, mmio
);
1576 static void mv5_reset_flash(struct mv_host_priv
*hpriv
, void __iomem
*mmio
)
1578 writel(0x0fcfffff, mmio
+ MV_FLASH_CTL
);
1581 static void mv5_read_preamp(struct mv_host_priv
*hpriv
, int idx
,
1584 void __iomem
*phy_mmio
= mv5_phy_base(mmio
, idx
);
1587 tmp
= readl(phy_mmio
+ MV5_PHY_MODE
);
1589 hpriv
->signal
[idx
].pre
= tmp
& 0x1800; /* bits 12:11 */
1590 hpriv
->signal
[idx
].amps
= tmp
& 0xe0; /* bits 7:5 */
1593 static void mv5_enable_leds(struct mv_host_priv
*hpriv
, void __iomem
*mmio
)
1597 writel(0, mmio
+ MV_GPIO_PORT_CTL
);
1599 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1601 tmp
= readl(mmio
+ MV_PCI_EXP_ROM_BAR_CTL
);
1603 writel(tmp
, mmio
+ MV_PCI_EXP_ROM_BAR_CTL
);
1606 static void mv5_phy_errata(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
1609 void __iomem
*phy_mmio
= mv5_phy_base(mmio
, port
);
1610 const u32 mask
= (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1612 int fix_apm_sq
= (hpriv
->hp_flags
& MV_HP_ERRATA_50XXB0
);
1615 tmp
= readl(phy_mmio
+ MV5_LT_MODE
);
1617 writel(tmp
, phy_mmio
+ MV5_LT_MODE
);
1619 tmp
= readl(phy_mmio
+ MV5_PHY_CTL
);
1622 writel(tmp
, phy_mmio
+ MV5_PHY_CTL
);
1625 tmp
= readl(phy_mmio
+ MV5_PHY_MODE
);
1627 tmp
|= hpriv
->signal
[port
].pre
;
1628 tmp
|= hpriv
->signal
[port
].amps
;
1629 writel(tmp
, phy_mmio
+ MV5_PHY_MODE
);
1634 #define ZERO(reg) writel(0, port_mmio + (reg))
1635 static void mv5_reset_hc_port(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
1638 void __iomem
*port_mmio
= mv_port_base(mmio
, port
);
1640 writelfl(EDMA_DS
, port_mmio
+ EDMA_CMD_OFS
);
1642 mv_channel_reset(hpriv
, mmio
, port
);
1644 ZERO(0x028); /* command */
1645 writel(0x11f, port_mmio
+ EDMA_CFG_OFS
);
1646 ZERO(0x004); /* timer */
1647 ZERO(0x008); /* irq err cause */
1648 ZERO(0x00c); /* irq err mask */
1649 ZERO(0x010); /* rq bah */
1650 ZERO(0x014); /* rq inp */
1651 ZERO(0x018); /* rq outp */
1652 ZERO(0x01c); /* respq bah */
1653 ZERO(0x024); /* respq outp */
1654 ZERO(0x020); /* respq inp */
1655 ZERO(0x02c); /* test control */
1656 writel(0xbc, port_mmio
+ EDMA_IORDY_TMOUT
);
1660 #define ZERO(reg) writel(0, hc_mmio + (reg))
1661 static void mv5_reset_one_hc(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
1664 void __iomem
*hc_mmio
= mv_hc_base(mmio
, hc
);
1672 tmp
= readl(hc_mmio
+ 0x20);
1675 writel(tmp
, hc_mmio
+ 0x20);
1679 static int mv5_reset_hc(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
1682 unsigned int hc
, port
;
1684 for (hc
= 0; hc
< n_hc
; hc
++) {
1685 for (port
= 0; port
< MV_PORTS_PER_HC
; port
++)
1686 mv5_reset_hc_port(hpriv
, mmio
,
1687 (hc
* MV_PORTS_PER_HC
) + port
);
1689 mv5_reset_one_hc(hpriv
, mmio
, hc
);
1696 #define ZERO(reg) writel(0, mmio + (reg))
1697 static void mv_reset_pci_bus(struct pci_dev
*pdev
, void __iomem
*mmio
)
1701 tmp
= readl(mmio
+ MV_PCI_MODE
);
1703 writel(tmp
, mmio
+ MV_PCI_MODE
);
1705 ZERO(MV_PCI_DISC_TIMER
);
1706 ZERO(MV_PCI_MSI_TRIGGER
);
1707 writel(0x000100ff, mmio
+ MV_PCI_XBAR_TMOUT
);
1708 ZERO(HC_MAIN_IRQ_MASK_OFS
);
1709 ZERO(MV_PCI_SERR_MASK
);
1710 ZERO(PCI_IRQ_CAUSE_OFS
);
1711 ZERO(PCI_IRQ_MASK_OFS
);
1712 ZERO(MV_PCI_ERR_LOW_ADDRESS
);
1713 ZERO(MV_PCI_ERR_HIGH_ADDRESS
);
1714 ZERO(MV_PCI_ERR_ATTRIBUTE
);
1715 ZERO(MV_PCI_ERR_COMMAND
);
1719 static void mv6_reset_flash(struct mv_host_priv
*hpriv
, void __iomem
*mmio
)
1723 mv5_reset_flash(hpriv
, mmio
);
1725 tmp
= readl(mmio
+ MV_GPIO_PORT_CTL
);
1727 tmp
|= (1 << 5) | (1 << 6);
1728 writel(tmp
, mmio
+ MV_GPIO_PORT_CTL
);
1732 * mv6_reset_hc - Perform the 6xxx global soft reset
1733 * @mmio: base address of the HBA
1735 * This routine only applies to 6xxx parts.
1738 * Inherited from caller.
1740 static int mv6_reset_hc(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
1743 void __iomem
*reg
= mmio
+ PCI_MAIN_CMD_STS_OFS
;
1747 /* Following procedure defined in PCI "main command and status
1751 writel(t
| STOP_PCI_MASTER
, reg
);
1753 for (i
= 0; i
< 1000; i
++) {
1756 if (PCI_MASTER_EMPTY
& t
) {
1760 if (!(PCI_MASTER_EMPTY
& t
)) {
1761 printk(KERN_ERR DRV_NAME
": PCI master won't flush\n");
1769 writel(t
| GLOB_SFT_RST
, reg
);
1772 } while (!(GLOB_SFT_RST
& t
) && (i
-- > 0));
1774 if (!(GLOB_SFT_RST
& t
)) {
1775 printk(KERN_ERR DRV_NAME
": can't set global reset\n");
1780 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
1783 writel(t
& ~(GLOB_SFT_RST
| STOP_PCI_MASTER
), reg
);
1786 } while ((GLOB_SFT_RST
& t
) && (i
-- > 0));
1788 if (GLOB_SFT_RST
& t
) {
1789 printk(KERN_ERR DRV_NAME
": can't clear global reset\n");
1796 static void mv6_read_preamp(struct mv_host_priv
*hpriv
, int idx
,
1799 void __iomem
*port_mmio
;
1802 tmp
= readl(mmio
+ MV_RESET_CFG
);
1803 if ((tmp
& (1 << 0)) == 0) {
1804 hpriv
->signal
[idx
].amps
= 0x7 << 8;
1805 hpriv
->signal
[idx
].pre
= 0x1 << 5;
1809 port_mmio
= mv_port_base(mmio
, idx
);
1810 tmp
= readl(port_mmio
+ PHY_MODE2
);
1812 hpriv
->signal
[idx
].amps
= tmp
& 0x700; /* bits 10:8 */
1813 hpriv
->signal
[idx
].pre
= tmp
& 0xe0; /* bits 7:5 */
1816 static void mv6_enable_leds(struct mv_host_priv
*hpriv
, void __iomem
*mmio
)
1818 writel(0x00000060, mmio
+ MV_GPIO_PORT_CTL
);
1821 static void mv6_phy_errata(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
1824 void __iomem
*port_mmio
= mv_port_base(mmio
, port
);
1826 u32 hp_flags
= hpriv
->hp_flags
;
1828 hp_flags
& (MV_HP_ERRATA_60X1B2
| MV_HP_ERRATA_60X1C0
);
1830 hp_flags
& (MV_HP_ERRATA_60X1B2
| MV_HP_ERRATA_60X1C0
);
1833 if (fix_phy_mode2
) {
1834 m2
= readl(port_mmio
+ PHY_MODE2
);
1837 writel(m2
, port_mmio
+ PHY_MODE2
);
1841 m2
= readl(port_mmio
+ PHY_MODE2
);
1842 m2
&= ~((1 << 16) | (1 << 31));
1843 writel(m2
, port_mmio
+ PHY_MODE2
);
1848 /* who knows what this magic does */
1849 tmp
= readl(port_mmio
+ PHY_MODE3
);
1852 writel(tmp
, port_mmio
+ PHY_MODE3
);
1854 if (fix_phy_mode4
) {
1857 m4
= readl(port_mmio
+ PHY_MODE4
);
1859 if (hp_flags
& MV_HP_ERRATA_60X1B2
)
1860 tmp
= readl(port_mmio
+ 0x310);
1862 m4
= (m4
& ~(1 << 1)) | (1 << 0);
1864 writel(m4
, port_mmio
+ PHY_MODE4
);
1866 if (hp_flags
& MV_HP_ERRATA_60X1B2
)
1867 writel(tmp
, port_mmio
+ 0x310);
1870 /* Revert values of pre-emphasis and signal amps to the saved ones */
1871 m2
= readl(port_mmio
+ PHY_MODE2
);
1873 m2
&= ~MV_M2_PREAMP_MASK
;
1874 m2
|= hpriv
->signal
[port
].amps
;
1875 m2
|= hpriv
->signal
[port
].pre
;
1878 /* according to mvSata 3.6.1, some IIE values are fixed */
1879 if (IS_GEN_IIE(hpriv
)) {
1884 writel(m2
, port_mmio
+ PHY_MODE2
);
1887 static void mv_channel_reset(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
1888 unsigned int port_no
)
1890 void __iomem
*port_mmio
= mv_port_base(mmio
, port_no
);
1892 writelfl(ATA_RST
, port_mmio
+ EDMA_CMD_OFS
);
1894 if (IS_60XX(hpriv
)) {
1895 u32 ifctl
= readl(port_mmio
+ SATA_INTERFACE_CTL
);
1896 ifctl
|= (1 << 7); /* enable gen2i speed */
1897 ifctl
= (ifctl
& 0xfff) | 0x9b1000; /* from chip spec */
1898 writelfl(ifctl
, port_mmio
+ SATA_INTERFACE_CTL
);
1901 udelay(25); /* allow reset propagation */
1903 /* Spec never mentions clearing the bit. Marvell's driver does
1904 * clear the bit, however.
1906 writelfl(0, port_mmio
+ EDMA_CMD_OFS
);
1908 hpriv
->ops
->phy_errata(hpriv
, mmio
, port_no
);
1914 static void mv_stop_and_reset(struct ata_port
*ap
)
1916 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
1917 void __iomem
*mmio
= ap
->host
->mmio_base
;
1921 mv_channel_reset(hpriv
, mmio
, ap
->port_no
);
1923 __mv_phy_reset(ap
, 0);
1926 static inline void __msleep(unsigned int msec
, int can_sleep
)
1935 * __mv_phy_reset - Perform eDMA reset followed by COMRESET
1936 * @ap: ATA channel to manipulate
1938 * Part of this is taken from __sata_phy_reset and modified to
1939 * not sleep since this routine gets called from interrupt level.
1942 * Inherited from caller. This is coded to safe to call at
1943 * interrupt level, i.e. it does not sleep.
1945 static void __mv_phy_reset(struct ata_port
*ap
, int can_sleep
)
1947 struct mv_port_priv
*pp
= ap
->private_data
;
1948 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
1949 void __iomem
*port_mmio
= mv_ap_base(ap
);
1950 struct ata_taskfile tf
;
1951 struct ata_device
*dev
= &ap
->device
[0];
1952 unsigned long timeout
;
1956 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap
->port_no
, port_mmio
);
1958 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
1959 "SCtrl 0x%08x\n", mv_scr_read(ap
, SCR_STATUS
),
1960 mv_scr_read(ap
, SCR_ERROR
), mv_scr_read(ap
, SCR_CONTROL
));
1962 /* Issue COMRESET via SControl */
1964 sata_scr_write_flush(ap
, SCR_CONTROL
, 0x301);
1965 __msleep(1, can_sleep
);
1967 sata_scr_write_flush(ap
, SCR_CONTROL
, 0x300);
1968 __msleep(20, can_sleep
);
1970 timeout
= jiffies
+ msecs_to_jiffies(200);
1972 sata_scr_read(ap
, SCR_STATUS
, &sstatus
);
1973 if (((sstatus
& 0x3) == 3) || ((sstatus
& 0x3) == 0))
1976 __msleep(1, can_sleep
);
1977 } while (time_before(jiffies
, timeout
));
1979 /* work around errata */
1980 if (IS_60XX(hpriv
) &&
1981 (sstatus
!= 0x0) && (sstatus
!= 0x113) && (sstatus
!= 0x123) &&
1983 goto comreset_retry
;
1985 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
1986 "SCtrl 0x%08x\n", mv_scr_read(ap
, SCR_STATUS
),
1987 mv_scr_read(ap
, SCR_ERROR
), mv_scr_read(ap
, SCR_CONTROL
));
1989 if (ata_port_online(ap
)) {
1992 sata_scr_read(ap
, SCR_STATUS
, &sstatus
);
1993 ata_port_printk(ap
, KERN_INFO
,
1994 "no device found (phy stat %08x)\n", sstatus
);
1995 ata_port_disable(ap
);
1998 ap
->cbl
= ATA_CBL_SATA
;
2000 /* even after SStatus reflects that device is ready,
2001 * it seems to take a while for link to be fully
2002 * established (and thus Status no longer 0x80/0x7F),
2003 * so we poll a bit for that, here.
2007 u8 drv_stat
= ata_check_status(ap
);
2008 if ((drv_stat
!= 0x80) && (drv_stat
!= 0x7f))
2010 __msleep(500, can_sleep
);
2015 tf
.lbah
= readb((void __iomem
*) ap
->ioaddr
.lbah_addr
);
2016 tf
.lbam
= readb((void __iomem
*) ap
->ioaddr
.lbam_addr
);
2017 tf
.lbal
= readb((void __iomem
*) ap
->ioaddr
.lbal_addr
);
2018 tf
.nsect
= readb((void __iomem
*) ap
->ioaddr
.nsect_addr
);
2020 dev
->class = ata_dev_classify(&tf
);
2021 if (!ata_dev_enabled(dev
)) {
2022 VPRINTK("Port disabled post-sig: No device present.\n");
2023 ata_port_disable(ap
);
2026 writelfl(0, port_mmio
+ EDMA_ERR_IRQ_CAUSE_OFS
);
2028 pp
->pp_flags
&= ~MV_PP_FLAG_EDMA_EN
;
2033 static void mv_phy_reset(struct ata_port
*ap
)
2035 __mv_phy_reset(ap
, 1);
2039 * mv_eng_timeout - Routine called by libata when SCSI times out I/O
2040 * @ap: ATA channel to manipulate
2042 * Intent is to clear all pending error conditions, reset the
2043 * chip/bus, fail the command, and move on.
2046 * This routine holds the host lock while failing the command.
2048 static void mv_eng_timeout(struct ata_port
*ap
)
2050 struct ata_queued_cmd
*qc
;
2051 unsigned long flags
;
2053 ata_port_printk(ap
, KERN_ERR
, "Entering mv_eng_timeout\n");
2054 DPRINTK("All regs @ start of eng_timeout\n");
2055 mv_dump_all_regs(ap
->host
->mmio_base
, ap
->port_no
,
2056 to_pci_dev(ap
->host
->dev
));
2058 qc
= ata_qc_from_tag(ap
, ap
->active_tag
);
2059 printk(KERN_ERR
"mmio_base %p ap %p qc %p scsi_cmnd %p &cmnd %p\n",
2060 ap
->host
->mmio_base
, ap
, qc
, qc
->scsicmd
, &qc
->scsicmd
->cmnd
);
2062 spin_lock_irqsave(&ap
->host
->lock
, flags
);
2064 mv_stop_and_reset(ap
);
2065 spin_unlock_irqrestore(&ap
->host
->lock
, flags
);
2067 WARN_ON(!(qc
->flags
& ATA_QCFLAG_ACTIVE
));
2068 if (qc
->flags
& ATA_QCFLAG_ACTIVE
) {
2069 qc
->err_mask
|= AC_ERR_TIMEOUT
;
2070 ata_eh_qc_complete(qc
);
2075 * mv_port_init - Perform some early initialization on a single port.
2076 * @port: libata data structure storing shadow register addresses
2077 * @port_mmio: base address of the port
2079 * Initialize shadow register mmio addresses, clear outstanding
2080 * interrupts on the port, and unmask interrupts for the future
2081 * start of the port.
2084 * Inherited from caller.
2086 static void mv_port_init(struct ata_ioports
*port
, void __iomem
*port_mmio
)
2088 unsigned long shd_base
= (unsigned long) port_mmio
+ SHD_BLK_OFS
;
2091 /* PIO related setup
2093 port
->data_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_DATA
);
2095 port
->feature_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_ERR
);
2096 port
->nsect_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_NSECT
);
2097 port
->lbal_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_LBAL
);
2098 port
->lbam_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_LBAM
);
2099 port
->lbah_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_LBAH
);
2100 port
->device_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_DEVICE
);
2102 port
->command_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_STATUS
);
2103 /* special case: control/altstatus doesn't have ATA_REG_ address */
2104 port
->altstatus_addr
= port
->ctl_addr
= shd_base
+ SHD_CTL_AST_OFS
;
2107 port
->cmd_addr
= port
->bmdma_addr
= port
->scr_addr
= 0;
2109 /* Clear any currently outstanding port interrupt conditions */
2110 serr_ofs
= mv_scr_offset(SCR_ERROR
);
2111 writelfl(readl(port_mmio
+ serr_ofs
), port_mmio
+ serr_ofs
);
2112 writelfl(0, port_mmio
+ EDMA_ERR_IRQ_CAUSE_OFS
);
2114 /* unmask all EDMA error interrupts */
2115 writelfl(~0, port_mmio
+ EDMA_ERR_IRQ_MASK_OFS
);
2117 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2118 readl(port_mmio
+ EDMA_CFG_OFS
),
2119 readl(port_mmio
+ EDMA_ERR_IRQ_CAUSE_OFS
),
2120 readl(port_mmio
+ EDMA_ERR_IRQ_MASK_OFS
));
2123 static int mv_chip_id(struct pci_dev
*pdev
, struct mv_host_priv
*hpriv
,
2124 unsigned int board_idx
)
2127 u32 hp_flags
= hpriv
->hp_flags
;
2129 pci_read_config_byte(pdev
, PCI_REVISION_ID
, &rev_id
);
2133 hpriv
->ops
= &mv5xxx_ops
;
2134 hp_flags
|= MV_HP_50XX
;
2138 hp_flags
|= MV_HP_ERRATA_50XXB0
;
2141 hp_flags
|= MV_HP_ERRATA_50XXB2
;
2144 dev_printk(KERN_WARNING
, &pdev
->dev
,
2145 "Applying 50XXB2 workarounds to unknown rev\n");
2146 hp_flags
|= MV_HP_ERRATA_50XXB2
;
2153 hpriv
->ops
= &mv5xxx_ops
;
2154 hp_flags
|= MV_HP_50XX
;
2158 hp_flags
|= MV_HP_ERRATA_50XXB0
;
2161 hp_flags
|= MV_HP_ERRATA_50XXB2
;
2164 dev_printk(KERN_WARNING
, &pdev
->dev
,
2165 "Applying B2 workarounds to unknown rev\n");
2166 hp_flags
|= MV_HP_ERRATA_50XXB2
;
2173 hpriv
->ops
= &mv6xxx_ops
;
2177 hp_flags
|= MV_HP_ERRATA_60X1B2
;
2180 hp_flags
|= MV_HP_ERRATA_60X1C0
;
2183 dev_printk(KERN_WARNING
, &pdev
->dev
,
2184 "Applying B2 workarounds to unknown rev\n");
2185 hp_flags
|= MV_HP_ERRATA_60X1B2
;
2192 hpriv
->ops
= &mv6xxx_ops
;
2194 hp_flags
|= MV_HP_GEN_IIE
;
2198 hp_flags
|= MV_HP_ERRATA_XX42A0
;
2201 hp_flags
|= MV_HP_ERRATA_60X1C0
;
2204 dev_printk(KERN_WARNING
, &pdev
->dev
,
2205 "Applying 60X1C0 workarounds to unknown rev\n");
2206 hp_flags
|= MV_HP_ERRATA_60X1C0
;
2212 printk(KERN_ERR DRV_NAME
": BUG: invalid board index %u\n", board_idx
);
2216 hpriv
->hp_flags
= hp_flags
;
2222 * mv_init_host - Perform some early initialization of the host.
2223 * @pdev: host PCI device
2224 * @probe_ent: early data struct representing the host
2226 * If possible, do an early global reset of the host. Then do
2227 * our port init and clear/unmask all/relevant host interrupts.
2230 * Inherited from caller.
2232 static int mv_init_host(struct pci_dev
*pdev
, struct ata_probe_ent
*probe_ent
,
2233 unsigned int board_idx
)
2235 int rc
= 0, n_hc
, port
, hc
;
2236 void __iomem
*mmio
= probe_ent
->mmio_base
;
2237 struct mv_host_priv
*hpriv
= probe_ent
->private_data
;
2239 /* global interrupt mask */
2240 writel(0, mmio
+ HC_MAIN_IRQ_MASK_OFS
);
2242 rc
= mv_chip_id(pdev
, hpriv
, board_idx
);
2246 n_hc
= mv_get_hc_count(probe_ent
->port_flags
);
2247 probe_ent
->n_ports
= MV_PORTS_PER_HC
* n_hc
;
2249 for (port
= 0; port
< probe_ent
->n_ports
; port
++)
2250 hpriv
->ops
->read_preamp(hpriv
, port
, mmio
);
2252 rc
= hpriv
->ops
->reset_hc(hpriv
, mmio
, n_hc
);
2256 hpriv
->ops
->reset_flash(hpriv
, mmio
);
2257 hpriv
->ops
->reset_bus(pdev
, mmio
);
2258 hpriv
->ops
->enable_leds(hpriv
, mmio
);
2260 for (port
= 0; port
< probe_ent
->n_ports
; port
++) {
2261 if (IS_60XX(hpriv
)) {
2262 void __iomem
*port_mmio
= mv_port_base(mmio
, port
);
2264 u32 ifctl
= readl(port_mmio
+ SATA_INTERFACE_CTL
);
2265 ifctl
|= (1 << 7); /* enable gen2i speed */
2266 ifctl
= (ifctl
& 0xfff) | 0x9b1000; /* from chip spec */
2267 writelfl(ifctl
, port_mmio
+ SATA_INTERFACE_CTL
);
2270 hpriv
->ops
->phy_errata(hpriv
, mmio
, port
);
2273 for (port
= 0; port
< probe_ent
->n_ports
; port
++) {
2274 void __iomem
*port_mmio
= mv_port_base(mmio
, port
);
2275 mv_port_init(&probe_ent
->port
[port
], port_mmio
);
2278 for (hc
= 0; hc
< n_hc
; hc
++) {
2279 void __iomem
*hc_mmio
= mv_hc_base(mmio
, hc
);
2281 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2282 "(before clear)=0x%08x\n", hc
,
2283 readl(hc_mmio
+ HC_CFG_OFS
),
2284 readl(hc_mmio
+ HC_IRQ_CAUSE_OFS
));
2286 /* Clear any currently outstanding hc interrupt conditions */
2287 writelfl(0, hc_mmio
+ HC_IRQ_CAUSE_OFS
);
2290 /* Clear any currently outstanding host interrupt conditions */
2291 writelfl(0, mmio
+ PCI_IRQ_CAUSE_OFS
);
2293 /* and unmask interrupt generation for host regs */
2294 writelfl(PCI_UNMASK_ALL_IRQS
, mmio
+ PCI_IRQ_MASK_OFS
);
2297 writelfl(~HC_MAIN_MASKED_IRQS_5
, mmio
+ HC_MAIN_IRQ_MASK_OFS
);
2299 writelfl(~HC_MAIN_MASKED_IRQS
, mmio
+ HC_MAIN_IRQ_MASK_OFS
);
2301 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2302 "PCI int cause/mask=0x%08x/0x%08x\n",
2303 readl(mmio
+ HC_MAIN_IRQ_CAUSE_OFS
),
2304 readl(mmio
+ HC_MAIN_IRQ_MASK_OFS
),
2305 readl(mmio
+ PCI_IRQ_CAUSE_OFS
),
2306 readl(mmio
+ PCI_IRQ_MASK_OFS
));
2313 * mv_print_info - Dump key info to kernel log for perusal.
2314 * @probe_ent: early data struct representing the host
2316 * FIXME: complete this.
2319 * Inherited from caller.
2321 static void mv_print_info(struct ata_probe_ent
*probe_ent
)
2323 struct pci_dev
*pdev
= to_pci_dev(probe_ent
->dev
);
2324 struct mv_host_priv
*hpriv
= probe_ent
->private_data
;
2328 /* Use this to determine the HW stepping of the chip so we know
2329 * what errata to workaround
2331 pci_read_config_byte(pdev
, PCI_REVISION_ID
, &rev_id
);
2333 pci_read_config_byte(pdev
, PCI_CLASS_DEVICE
, &scc
);
2336 else if (scc
== 0x01)
2341 dev_printk(KERN_INFO
, &pdev
->dev
,
2342 "%u slots %u ports %s mode IRQ via %s\n",
2343 (unsigned)MV_MAX_Q_DEPTH
, probe_ent
->n_ports
,
2344 scc_s
, (MV_HP_FLAG_MSI
& hpriv
->hp_flags
) ? "MSI" : "INTx");
2348 * mv_init_one - handle a positive probe of a Marvell host
2349 * @pdev: PCI device found
2350 * @ent: PCI device ID entry for the matched host
2353 * Inherited from caller.
2355 static int mv_init_one(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
2357 static int printed_version
= 0;
2358 struct ata_probe_ent
*probe_ent
= NULL
;
2359 struct mv_host_priv
*hpriv
;
2360 unsigned int board_idx
= (unsigned int)ent
->driver_data
;
2361 void __iomem
*mmio_base
;
2362 int pci_dev_busy
= 0, rc
;
2364 if (!printed_version
++)
2365 dev_printk(KERN_INFO
, &pdev
->dev
, "version " DRV_VERSION
"\n");
2367 rc
= pci_enable_device(pdev
);
2371 pci_set_master(pdev
);
2373 rc
= pci_request_regions(pdev
, DRV_NAME
);
2379 probe_ent
= kmalloc(sizeof(*probe_ent
), GFP_KERNEL
);
2380 if (probe_ent
== NULL
) {
2382 goto err_out_regions
;
2385 memset(probe_ent
, 0, sizeof(*probe_ent
));
2386 probe_ent
->dev
= pci_dev_to_dev(pdev
);
2387 INIT_LIST_HEAD(&probe_ent
->node
);
2389 mmio_base
= pci_iomap(pdev
, MV_PRIMARY_BAR
, 0);
2390 if (mmio_base
== NULL
) {
2392 goto err_out_free_ent
;
2395 hpriv
= kmalloc(sizeof(*hpriv
), GFP_KERNEL
);
2398 goto err_out_iounmap
;
2400 memset(hpriv
, 0, sizeof(*hpriv
));
2402 probe_ent
->sht
= mv_port_info
[board_idx
].sht
;
2403 probe_ent
->port_flags
= mv_port_info
[board_idx
].flags
;
2404 probe_ent
->pio_mask
= mv_port_info
[board_idx
].pio_mask
;
2405 probe_ent
->udma_mask
= mv_port_info
[board_idx
].udma_mask
;
2406 probe_ent
->port_ops
= mv_port_info
[board_idx
].port_ops
;
2408 probe_ent
->irq
= pdev
->irq
;
2409 probe_ent
->irq_flags
= IRQF_SHARED
;
2410 probe_ent
->mmio_base
= mmio_base
;
2411 probe_ent
->private_data
= hpriv
;
2413 /* initialize adapter */
2414 rc
= mv_init_host(pdev
, probe_ent
, board_idx
);
2419 /* Enable interrupts */
2420 if (msi
&& pci_enable_msi(pdev
) == 0) {
2421 hpriv
->hp_flags
|= MV_HP_FLAG_MSI
;
2426 mv_dump_pci_cfg(pdev
, 0x68);
2427 mv_print_info(probe_ent
);
2429 if (ata_device_add(probe_ent
) == 0) {
2430 rc
= -ENODEV
; /* No devices discovered */
2431 goto err_out_dev_add
;
2438 if (MV_HP_FLAG_MSI
& hpriv
->hp_flags
) {
2439 pci_disable_msi(pdev
);
2446 pci_iounmap(pdev
, mmio_base
);
2450 pci_release_regions(pdev
);
2452 if (!pci_dev_busy
) {
2453 pci_disable_device(pdev
);
2459 static int __init
mv_init(void)
2461 return pci_register_driver(&mv_pci_driver
);
2464 static void __exit
mv_exit(void)
2466 pci_unregister_driver(&mv_pci_driver
);
2469 MODULE_AUTHOR("Brett Russ");
2470 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2471 MODULE_LICENSE("GPL");
2472 MODULE_DEVICE_TABLE(pci
, mv_pci_tbl
);
2473 MODULE_VERSION(DRV_VERSION
);
2475 module_param(msi
, int, 0444);
2476 MODULE_PARM_DESC(msi
, "Enable use of PCI MSI (0=off, 1=on)");
2478 module_init(mv_init
);
2479 module_exit(mv_exit
);