2 * sata_mv.c - Marvell SATA support
4 * Copyright 2005: EMC Corporation, all rights reserved.
5 * Copyright 2005 Red Hat, Inc. All rights reserved.
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #include <linux/kernel.h>
25 #include <linux/module.h>
26 #include <linux/pci.h>
27 #include <linux/init.h>
28 #include <linux/blkdev.h>
29 #include <linux/delay.h>
30 #include <linux/interrupt.h>
31 #include <linux/sched.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/device.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_cmnd.h>
36 #include <linux/libata.h>
39 #define DRV_NAME "sata_mv"
40 #define DRV_VERSION "0.7"
43 /* BAR's are enumerated in terms of pci_resource_start() terms */
44 MV_PRIMARY_BAR
= 0, /* offset 0x10: memory space */
45 MV_IO_BAR
= 2, /* offset 0x18: IO space */
46 MV_MISC_BAR
= 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
48 MV_MAJOR_REG_AREA_SZ
= 0x10000, /* 64KB */
49 MV_MINOR_REG_AREA_SZ
= 0x2000, /* 8KB */
52 MV_IRQ_COAL_REG_BASE
= 0x18000, /* 6xxx part only */
53 MV_IRQ_COAL_CAUSE
= (MV_IRQ_COAL_REG_BASE
+ 0x08),
54 MV_IRQ_COAL_CAUSE_LO
= (MV_IRQ_COAL_REG_BASE
+ 0x88),
55 MV_IRQ_COAL_CAUSE_HI
= (MV_IRQ_COAL_REG_BASE
+ 0x8c),
56 MV_IRQ_COAL_THRESHOLD
= (MV_IRQ_COAL_REG_BASE
+ 0xcc),
57 MV_IRQ_COAL_TIME_THRESHOLD
= (MV_IRQ_COAL_REG_BASE
+ 0xd0),
59 MV_SATAHC0_REG_BASE
= 0x20000,
60 MV_FLASH_CTL
= 0x1046c,
61 MV_GPIO_PORT_CTL
= 0x104f0,
62 MV_RESET_CFG
= 0x180d8,
64 MV_PCI_REG_SZ
= MV_MAJOR_REG_AREA_SZ
,
65 MV_SATAHC_REG_SZ
= MV_MAJOR_REG_AREA_SZ
,
66 MV_SATAHC_ARBTR_REG_SZ
= MV_MINOR_REG_AREA_SZ
, /* arbiter */
67 MV_PORT_REG_SZ
= MV_MINOR_REG_AREA_SZ
,
69 MV_USE_Q_DEPTH
= ATA_DEF_QUEUE
,
72 MV_MAX_Q_DEPTH_MASK
= MV_MAX_Q_DEPTH
- 1,
74 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
75 * CRPB needs alignment on a 256B boundary. Size == 256B
76 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
77 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
79 MV_CRQB_Q_SZ
= (32 * MV_MAX_Q_DEPTH
),
80 MV_CRPB_Q_SZ
= (8 * MV_MAX_Q_DEPTH
),
82 MV_SG_TBL_SZ
= (16 * MV_MAX_SG_CT
),
83 MV_PORT_PRIV_DMA_SZ
= (MV_CRQB_Q_SZ
+ MV_CRPB_Q_SZ
+ MV_SG_TBL_SZ
),
86 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
88 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
92 MV_FLAG_DUAL_HC
= (1 << 30), /* two SATA Host Controllers */
93 MV_FLAG_IRQ_COALESCE
= (1 << 29), /* IRQ coalescing capability */
94 MV_COMMON_FLAGS
= (ATA_FLAG_SATA
| ATA_FLAG_NO_LEGACY
|
95 ATA_FLAG_SATA_RESET
| ATA_FLAG_MMIO
|
96 ATA_FLAG_NO_ATAPI
| ATA_FLAG_PIO_POLLING
),
97 MV_6XXX_FLAGS
= MV_FLAG_IRQ_COALESCE
,
99 CRQB_FLAG_READ
= (1 << 0),
101 CRQB_CMD_ADDR_SHIFT
= 8,
102 CRQB_CMD_CS
= (0x2 << 11),
103 CRQB_CMD_LAST
= (1 << 15),
105 CRPB_FLAG_STATUS_SHIFT
= 8,
107 EPRD_FLAG_END_OF_TBL
= (1 << 31),
109 /* PCI interface registers */
111 PCI_COMMAND_OFS
= 0xc00,
113 PCI_MAIN_CMD_STS_OFS
= 0xd30,
114 STOP_PCI_MASTER
= (1 << 2),
115 PCI_MASTER_EMPTY
= (1 << 3),
116 GLOB_SFT_RST
= (1 << 4),
119 MV_PCI_EXP_ROM_BAR_CTL
= 0xd2c,
120 MV_PCI_DISC_TIMER
= 0xd04,
121 MV_PCI_MSI_TRIGGER
= 0xc38,
122 MV_PCI_SERR_MASK
= 0xc28,
123 MV_PCI_XBAR_TMOUT
= 0x1d04,
124 MV_PCI_ERR_LOW_ADDRESS
= 0x1d40,
125 MV_PCI_ERR_HIGH_ADDRESS
= 0x1d44,
126 MV_PCI_ERR_ATTRIBUTE
= 0x1d48,
127 MV_PCI_ERR_COMMAND
= 0x1d50,
129 PCI_IRQ_CAUSE_OFS
= 0x1d58,
130 PCI_IRQ_MASK_OFS
= 0x1d5c,
131 PCI_UNMASK_ALL_IRQS
= 0x7fffff, /* bits 22-0 */
133 HC_MAIN_IRQ_CAUSE_OFS
= 0x1d60,
134 HC_MAIN_IRQ_MASK_OFS
= 0x1d64,
135 PORT0_ERR
= (1 << 0), /* shift by port # */
136 PORT0_DONE
= (1 << 1), /* shift by port # */
137 HC0_IRQ_PEND
= 0x1ff, /* bits 0-8 = HC0's ports */
138 HC_SHIFT
= 9, /* bits 9-17 = HC1's ports */
140 TRAN_LO_DONE
= (1 << 19), /* 6xxx: IRQ coalescing */
141 TRAN_HI_DONE
= (1 << 20), /* 6xxx: IRQ coalescing */
142 PORTS_0_7_COAL_DONE
= (1 << 21), /* 6xxx: IRQ coalescing */
143 GPIO_INT
= (1 << 22),
144 SELF_INT
= (1 << 23),
145 TWSI_INT
= (1 << 24),
146 HC_MAIN_RSVD
= (0x7f << 25), /* bits 31-25 */
147 HC_MAIN_MASKED_IRQS
= (TRAN_LO_DONE
| TRAN_HI_DONE
|
148 PORTS_0_7_COAL_DONE
| GPIO_INT
| TWSI_INT
|
151 /* SATAHC registers */
154 HC_IRQ_CAUSE_OFS
= 0x14,
155 CRPB_DMA_DONE
= (1 << 0), /* shift by port # */
156 HC_IRQ_COAL
= (1 << 4), /* IRQ coalescing */
157 DEV_IRQ
= (1 << 8), /* shift by port # */
159 /* Shadow block registers */
161 SHD_CTL_AST_OFS
= 0x20, /* ofs from SHD_BLK_OFS */
164 SATA_STATUS_OFS
= 0x300, /* ctrl, err regs follow status */
165 SATA_ACTIVE_OFS
= 0x350,
172 SATA_INTERFACE_CTL
= 0x050,
174 MV_M2_PREAMP_MASK
= 0x7e0,
178 EDMA_CFG_Q_DEPTH
= 0, /* queueing disabled */
179 EDMA_CFG_NCQ
= (1 << 5),
180 EDMA_CFG_NCQ_GO_ON_ERR
= (1 << 14), /* continue on error */
181 EDMA_CFG_RD_BRST_EXT
= (1 << 11), /* read burst 512B */
182 EDMA_CFG_WR_BUFF_LEN
= (1 << 13), /* write buffer 512B */
184 EDMA_ERR_IRQ_CAUSE_OFS
= 0x8,
185 EDMA_ERR_IRQ_MASK_OFS
= 0xc,
186 EDMA_ERR_D_PAR
= (1 << 0),
187 EDMA_ERR_PRD_PAR
= (1 << 1),
188 EDMA_ERR_DEV
= (1 << 2),
189 EDMA_ERR_DEV_DCON
= (1 << 3),
190 EDMA_ERR_DEV_CON
= (1 << 4),
191 EDMA_ERR_SERR
= (1 << 5),
192 EDMA_ERR_SELF_DIS
= (1 << 7),
193 EDMA_ERR_BIST_ASYNC
= (1 << 8),
194 EDMA_ERR_CRBQ_PAR
= (1 << 9),
195 EDMA_ERR_CRPB_PAR
= (1 << 10),
196 EDMA_ERR_INTRL_PAR
= (1 << 11),
197 EDMA_ERR_IORDY
= (1 << 12),
198 EDMA_ERR_LNK_CTRL_RX
= (0xf << 13),
199 EDMA_ERR_LNK_CTRL_RX_2
= (1 << 15),
200 EDMA_ERR_LNK_DATA_RX
= (0xf << 17),
201 EDMA_ERR_LNK_CTRL_TX
= (0x1f << 21),
202 EDMA_ERR_LNK_DATA_TX
= (0x1f << 26),
203 EDMA_ERR_TRANS_PROTO
= (1 << 31),
204 EDMA_ERR_FATAL
= (EDMA_ERR_D_PAR
| EDMA_ERR_PRD_PAR
|
205 EDMA_ERR_DEV_DCON
| EDMA_ERR_CRBQ_PAR
|
206 EDMA_ERR_CRPB_PAR
| EDMA_ERR_INTRL_PAR
|
207 EDMA_ERR_IORDY
| EDMA_ERR_LNK_CTRL_RX_2
|
208 EDMA_ERR_LNK_DATA_RX
|
209 EDMA_ERR_LNK_DATA_TX
|
210 EDMA_ERR_TRANS_PROTO
),
212 EDMA_REQ_Q_BASE_HI_OFS
= 0x10,
213 EDMA_REQ_Q_IN_PTR_OFS
= 0x14, /* also contains BASE_LO */
215 EDMA_REQ_Q_OUT_PTR_OFS
= 0x18,
216 EDMA_REQ_Q_PTR_SHIFT
= 5,
218 EDMA_RSP_Q_BASE_HI_OFS
= 0x1c,
219 EDMA_RSP_Q_IN_PTR_OFS
= 0x20,
220 EDMA_RSP_Q_OUT_PTR_OFS
= 0x24, /* also contains BASE_LO */
221 EDMA_RSP_Q_PTR_SHIFT
= 3,
228 EDMA_IORDY_TMOUT
= 0x34,
231 /* Host private flags (hp_flags) */
232 MV_HP_FLAG_MSI
= (1 << 0),
233 MV_HP_ERRATA_50XXB0
= (1 << 1),
234 MV_HP_ERRATA_50XXB2
= (1 << 2),
235 MV_HP_ERRATA_60X1B2
= (1 << 3),
236 MV_HP_ERRATA_60X1C0
= (1 << 4),
237 MV_HP_ERRATA_XX42A0
= (1 << 5),
238 MV_HP_50XX
= (1 << 6),
239 MV_HP_GEN_IIE
= (1 << 7),
241 /* Port private flags (pp_flags) */
242 MV_PP_FLAG_EDMA_EN
= (1 << 0),
243 MV_PP_FLAG_EDMA_DS_ACT
= (1 << 1),
246 #define IS_50XX(hpriv) ((hpriv)->hp_flags & MV_HP_50XX)
247 #define IS_60XX(hpriv) (((hpriv)->hp_flags & MV_HP_50XX) == 0)
248 #define IS_GEN_I(hpriv) IS_50XX(hpriv)
249 #define IS_GEN_II(hpriv) IS_60XX(hpriv)
250 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
253 /* Our DMA boundary is determined by an ePRD being unable to handle
254 * anything larger than 64KB
256 MV_DMA_BOUNDARY
= 0xffffU
,
258 EDMA_REQ_Q_BASE_LO_MASK
= 0xfffffc00U
,
260 EDMA_RSP_Q_BASE_LO_MASK
= 0xffffff00U
,
273 /* Command ReQuest Block: 32B */
289 /* Command ResPonse Block: 8B */
296 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
304 struct mv_port_priv
{
305 struct mv_crqb
*crqb
;
307 struct mv_crpb
*crpb
;
309 struct mv_sg
*sg_tbl
;
310 dma_addr_t sg_tbl_dma
;
314 struct mv_port_signal
{
321 void (*phy_errata
)(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
323 void (*enable_leds
)(struct mv_host_priv
*hpriv
, void __iomem
*mmio
);
324 void (*read_preamp
)(struct mv_host_priv
*hpriv
, int idx
,
326 int (*reset_hc
)(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
328 void (*reset_flash
)(struct mv_host_priv
*hpriv
, void __iomem
*mmio
);
329 void (*reset_bus
)(struct pci_dev
*pdev
, void __iomem
*mmio
);
332 struct mv_host_priv
{
334 struct mv_port_signal signal
[8];
335 const struct mv_hw_ops
*ops
;
338 static void mv_irq_clear(struct ata_port
*ap
);
339 static u32
mv_scr_read(struct ata_port
*ap
, unsigned int sc_reg_in
);
340 static void mv_scr_write(struct ata_port
*ap
, unsigned int sc_reg_in
, u32 val
);
341 static u32
mv5_scr_read(struct ata_port
*ap
, unsigned int sc_reg_in
);
342 static void mv5_scr_write(struct ata_port
*ap
, unsigned int sc_reg_in
, u32 val
);
343 static void mv_phy_reset(struct ata_port
*ap
);
344 static void __mv_phy_reset(struct ata_port
*ap
, int can_sleep
);
345 static void mv_host_stop(struct ata_host
*host
);
346 static int mv_port_start(struct ata_port
*ap
);
347 static void mv_port_stop(struct ata_port
*ap
);
348 static void mv_qc_prep(struct ata_queued_cmd
*qc
);
349 static void mv_qc_prep_iie(struct ata_queued_cmd
*qc
);
350 static unsigned int mv_qc_issue(struct ata_queued_cmd
*qc
);
351 static irqreturn_t
mv_interrupt(int irq
, void *dev_instance
);
352 static void mv_eng_timeout(struct ata_port
*ap
);
353 static int mv_init_one(struct pci_dev
*pdev
, const struct pci_device_id
*ent
);
355 static void mv5_phy_errata(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
357 static void mv5_enable_leds(struct mv_host_priv
*hpriv
, void __iomem
*mmio
);
358 static void mv5_read_preamp(struct mv_host_priv
*hpriv
, int idx
,
360 static int mv5_reset_hc(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
362 static void mv5_reset_flash(struct mv_host_priv
*hpriv
, void __iomem
*mmio
);
363 static void mv5_reset_bus(struct pci_dev
*pdev
, void __iomem
*mmio
);
365 static void mv6_phy_errata(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
367 static void mv6_enable_leds(struct mv_host_priv
*hpriv
, void __iomem
*mmio
);
368 static void mv6_read_preamp(struct mv_host_priv
*hpriv
, int idx
,
370 static int mv6_reset_hc(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
372 static void mv6_reset_flash(struct mv_host_priv
*hpriv
, void __iomem
*mmio
);
373 static void mv_reset_pci_bus(struct pci_dev
*pdev
, void __iomem
*mmio
);
374 static void mv_channel_reset(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
375 unsigned int port_no
);
376 static void mv_stop_and_reset(struct ata_port
*ap
);
378 static struct scsi_host_template mv_sht
= {
379 .module
= THIS_MODULE
,
381 .ioctl
= ata_scsi_ioctl
,
382 .queuecommand
= ata_scsi_queuecmd
,
383 .can_queue
= MV_USE_Q_DEPTH
,
384 .this_id
= ATA_SHT_THIS_ID
,
385 .sg_tablesize
= MV_MAX_SG_CT
/ 2,
386 .cmd_per_lun
= ATA_SHT_CMD_PER_LUN
,
387 .emulated
= ATA_SHT_EMULATED
,
388 .use_clustering
= ATA_SHT_USE_CLUSTERING
,
389 .proc_name
= DRV_NAME
,
390 .dma_boundary
= MV_DMA_BOUNDARY
,
391 .slave_configure
= ata_scsi_slave_config
,
392 .slave_destroy
= ata_scsi_slave_destroy
,
393 .bios_param
= ata_std_bios_param
,
396 static const struct ata_port_operations mv5_ops
= {
397 .port_disable
= ata_port_disable
,
399 .tf_load
= ata_tf_load
,
400 .tf_read
= ata_tf_read
,
401 .check_status
= ata_check_status
,
402 .exec_command
= ata_exec_command
,
403 .dev_select
= ata_std_dev_select
,
405 .phy_reset
= mv_phy_reset
,
407 .qc_prep
= mv_qc_prep
,
408 .qc_issue
= mv_qc_issue
,
409 .data_xfer
= ata_mmio_data_xfer
,
411 .eng_timeout
= mv_eng_timeout
,
413 .irq_handler
= mv_interrupt
,
414 .irq_clear
= mv_irq_clear
,
416 .scr_read
= mv5_scr_read
,
417 .scr_write
= mv5_scr_write
,
419 .port_start
= mv_port_start
,
420 .port_stop
= mv_port_stop
,
421 .host_stop
= mv_host_stop
,
424 static const struct ata_port_operations mv6_ops
= {
425 .port_disable
= ata_port_disable
,
427 .tf_load
= ata_tf_load
,
428 .tf_read
= ata_tf_read
,
429 .check_status
= ata_check_status
,
430 .exec_command
= ata_exec_command
,
431 .dev_select
= ata_std_dev_select
,
433 .phy_reset
= mv_phy_reset
,
435 .qc_prep
= mv_qc_prep
,
436 .qc_issue
= mv_qc_issue
,
437 .data_xfer
= ata_mmio_data_xfer
,
439 .eng_timeout
= mv_eng_timeout
,
441 .irq_handler
= mv_interrupt
,
442 .irq_clear
= mv_irq_clear
,
444 .scr_read
= mv_scr_read
,
445 .scr_write
= mv_scr_write
,
447 .port_start
= mv_port_start
,
448 .port_stop
= mv_port_stop
,
449 .host_stop
= mv_host_stop
,
452 static const struct ata_port_operations mv_iie_ops
= {
453 .port_disable
= ata_port_disable
,
455 .tf_load
= ata_tf_load
,
456 .tf_read
= ata_tf_read
,
457 .check_status
= ata_check_status
,
458 .exec_command
= ata_exec_command
,
459 .dev_select
= ata_std_dev_select
,
461 .phy_reset
= mv_phy_reset
,
463 .qc_prep
= mv_qc_prep_iie
,
464 .qc_issue
= mv_qc_issue
,
465 .data_xfer
= ata_mmio_data_xfer
,
467 .eng_timeout
= mv_eng_timeout
,
469 .irq_handler
= mv_interrupt
,
470 .irq_clear
= mv_irq_clear
,
472 .scr_read
= mv_scr_read
,
473 .scr_write
= mv_scr_write
,
475 .port_start
= mv_port_start
,
476 .port_stop
= mv_port_stop
,
477 .host_stop
= mv_host_stop
,
480 static const struct ata_port_info mv_port_info
[] = {
483 .flags
= MV_COMMON_FLAGS
,
484 .pio_mask
= 0x1f, /* pio0-4 */
485 .udma_mask
= 0x7f, /* udma0-6 */
486 .port_ops
= &mv5_ops
,
490 .flags
= (MV_COMMON_FLAGS
| MV_FLAG_DUAL_HC
),
491 .pio_mask
= 0x1f, /* pio0-4 */
492 .udma_mask
= 0x7f, /* udma0-6 */
493 .port_ops
= &mv5_ops
,
497 .flags
= (MV_COMMON_FLAGS
| MV_FLAG_DUAL_HC
),
498 .pio_mask
= 0x1f, /* pio0-4 */
499 .udma_mask
= 0x7f, /* udma0-6 */
500 .port_ops
= &mv5_ops
,
504 .flags
= (MV_COMMON_FLAGS
| MV_6XXX_FLAGS
),
505 .pio_mask
= 0x1f, /* pio0-4 */
506 .udma_mask
= 0x7f, /* udma0-6 */
507 .port_ops
= &mv6_ops
,
511 .flags
= (MV_COMMON_FLAGS
| MV_6XXX_FLAGS
|
513 .pio_mask
= 0x1f, /* pio0-4 */
514 .udma_mask
= 0x7f, /* udma0-6 */
515 .port_ops
= &mv6_ops
,
519 .flags
= (MV_COMMON_FLAGS
| MV_6XXX_FLAGS
),
520 .pio_mask
= 0x1f, /* pio0-4 */
521 .udma_mask
= 0x7f, /* udma0-6 */
522 .port_ops
= &mv_iie_ops
,
526 .flags
= (MV_COMMON_FLAGS
| MV_6XXX_FLAGS
),
527 .pio_mask
= 0x1f, /* pio0-4 */
528 .udma_mask
= 0x7f, /* udma0-6 */
529 .port_ops
= &mv_iie_ops
,
533 static const struct pci_device_id mv_pci_tbl
[] = {
534 { PCI_VDEVICE(MARVELL
, 0x5040), chip_504x
},
535 { PCI_VDEVICE(MARVELL
, 0x5041), chip_504x
},
536 { PCI_VDEVICE(MARVELL
, 0x5080), chip_5080
},
537 { PCI_VDEVICE(MARVELL
, 0x5081), chip_508x
},
539 { PCI_VDEVICE(MARVELL
, 0x6040), chip_604x
},
540 { PCI_VDEVICE(MARVELL
, 0x6041), chip_604x
},
541 { PCI_VDEVICE(MARVELL
, 0x6042), chip_6042
},
542 { PCI_VDEVICE(MARVELL
, 0x6080), chip_608x
},
543 { PCI_VDEVICE(MARVELL
, 0x6081), chip_608x
},
545 { PCI_VDEVICE(ADAPTEC2
, 0x0241), chip_604x
},
547 { PCI_VDEVICE(TTI
, 0x2310), chip_7042
},
549 { } /* terminate list */
552 static struct pci_driver mv_pci_driver
= {
554 .id_table
= mv_pci_tbl
,
555 .probe
= mv_init_one
,
556 .remove
= ata_pci_remove_one
,
559 static const struct mv_hw_ops mv5xxx_ops
= {
560 .phy_errata
= mv5_phy_errata
,
561 .enable_leds
= mv5_enable_leds
,
562 .read_preamp
= mv5_read_preamp
,
563 .reset_hc
= mv5_reset_hc
,
564 .reset_flash
= mv5_reset_flash
,
565 .reset_bus
= mv5_reset_bus
,
568 static const struct mv_hw_ops mv6xxx_ops
= {
569 .phy_errata
= mv6_phy_errata
,
570 .enable_leds
= mv6_enable_leds
,
571 .read_preamp
= mv6_read_preamp
,
572 .reset_hc
= mv6_reset_hc
,
573 .reset_flash
= mv6_reset_flash
,
574 .reset_bus
= mv_reset_pci_bus
,
580 static int msi
; /* Use PCI msi; either zero (off, default) or non-zero */
587 static inline void writelfl(unsigned long data
, void __iomem
*addr
)
590 (void) readl(addr
); /* flush to avoid PCI posted write */
593 static inline void __iomem
*mv_hc_base(void __iomem
*base
, unsigned int hc
)
595 return (base
+ MV_SATAHC0_REG_BASE
+ (hc
* MV_SATAHC_REG_SZ
));
598 static inline unsigned int mv_hc_from_port(unsigned int port
)
600 return port
>> MV_PORT_HC_SHIFT
;
603 static inline unsigned int mv_hardport_from_port(unsigned int port
)
605 return port
& MV_PORT_MASK
;
608 static inline void __iomem
*mv_hc_base_from_port(void __iomem
*base
,
611 return mv_hc_base(base
, mv_hc_from_port(port
));
614 static inline void __iomem
*mv_port_base(void __iomem
*base
, unsigned int port
)
616 return mv_hc_base_from_port(base
, port
) +
617 MV_SATAHC_ARBTR_REG_SZ
+
618 (mv_hardport_from_port(port
) * MV_PORT_REG_SZ
);
621 static inline void __iomem
*mv_ap_base(struct ata_port
*ap
)
623 return mv_port_base(ap
->host
->mmio_base
, ap
->port_no
);
626 static inline int mv_get_hc_count(unsigned long port_flags
)
628 return ((port_flags
& MV_FLAG_DUAL_HC
) ? 2 : 1);
631 static void mv_irq_clear(struct ata_port
*ap
)
636 * mv_start_dma - Enable eDMA engine
637 * @base: port base address
638 * @pp: port private data
640 * Verify the local cache of the eDMA state is accurate with a
644 * Inherited from caller.
646 static void mv_start_dma(void __iomem
*base
, struct mv_port_priv
*pp
)
648 if (!(MV_PP_FLAG_EDMA_EN
& pp
->pp_flags
)) {
649 writelfl(EDMA_EN
, base
+ EDMA_CMD_OFS
);
650 pp
->pp_flags
|= MV_PP_FLAG_EDMA_EN
;
652 WARN_ON(!(EDMA_EN
& readl(base
+ EDMA_CMD_OFS
)));
656 * mv_stop_dma - Disable eDMA engine
657 * @ap: ATA channel to manipulate
659 * Verify the local cache of the eDMA state is accurate with a
663 * Inherited from caller.
665 static void mv_stop_dma(struct ata_port
*ap
)
667 void __iomem
*port_mmio
= mv_ap_base(ap
);
668 struct mv_port_priv
*pp
= ap
->private_data
;
672 if (MV_PP_FLAG_EDMA_EN
& pp
->pp_flags
) {
673 /* Disable EDMA if active. The disable bit auto clears.
675 writelfl(EDMA_DS
, port_mmio
+ EDMA_CMD_OFS
);
676 pp
->pp_flags
&= ~MV_PP_FLAG_EDMA_EN
;
678 WARN_ON(EDMA_EN
& readl(port_mmio
+ EDMA_CMD_OFS
));
681 /* now properly wait for the eDMA to stop */
682 for (i
= 1000; i
> 0; i
--) {
683 reg
= readl(port_mmio
+ EDMA_CMD_OFS
);
684 if (!(EDMA_EN
& reg
)) {
691 ata_port_printk(ap
, KERN_ERR
, "Unable to stop eDMA\n");
692 /* FIXME: Consider doing a reset here to recover */
697 static void mv_dump_mem(void __iomem
*start
, unsigned bytes
)
700 for (b
= 0; b
< bytes
; ) {
701 DPRINTK("%p: ", start
+ b
);
702 for (w
= 0; b
< bytes
&& w
< 4; w
++) {
703 printk("%08x ",readl(start
+ b
));
711 static void mv_dump_pci_cfg(struct pci_dev
*pdev
, unsigned bytes
)
716 for (b
= 0; b
< bytes
; ) {
717 DPRINTK("%02x: ", b
);
718 for (w
= 0; b
< bytes
&& w
< 4; w
++) {
719 (void) pci_read_config_dword(pdev
,b
,&dw
);
727 static void mv_dump_all_regs(void __iomem
*mmio_base
, int port
,
728 struct pci_dev
*pdev
)
731 void __iomem
*hc_base
= mv_hc_base(mmio_base
,
732 port
>> MV_PORT_HC_SHIFT
);
733 void __iomem
*port_base
;
734 int start_port
, num_ports
, p
, start_hc
, num_hcs
, hc
;
737 start_hc
= start_port
= 0;
738 num_ports
= 8; /* shld be benign for 4 port devs */
741 start_hc
= port
>> MV_PORT_HC_SHIFT
;
743 num_ports
= num_hcs
= 1;
745 DPRINTK("All registers for port(s) %u-%u:\n", start_port
,
746 num_ports
> 1 ? num_ports
- 1 : start_port
);
749 DPRINTK("PCI config space regs:\n");
750 mv_dump_pci_cfg(pdev
, 0x68);
752 DPRINTK("PCI regs:\n");
753 mv_dump_mem(mmio_base
+0xc00, 0x3c);
754 mv_dump_mem(mmio_base
+0xd00, 0x34);
755 mv_dump_mem(mmio_base
+0xf00, 0x4);
756 mv_dump_mem(mmio_base
+0x1d00, 0x6c);
757 for (hc
= start_hc
; hc
< start_hc
+ num_hcs
; hc
++) {
758 hc_base
= mv_hc_base(mmio_base
, hc
);
759 DPRINTK("HC regs (HC %i):\n", hc
);
760 mv_dump_mem(hc_base
, 0x1c);
762 for (p
= start_port
; p
< start_port
+ num_ports
; p
++) {
763 port_base
= mv_port_base(mmio_base
, p
);
764 DPRINTK("EDMA regs (port %i):\n",p
);
765 mv_dump_mem(port_base
, 0x54);
766 DPRINTK("SATA regs (port %i):\n",p
);
767 mv_dump_mem(port_base
+0x300, 0x60);
772 static unsigned int mv_scr_offset(unsigned int sc_reg_in
)
780 ofs
= SATA_STATUS_OFS
+ (sc_reg_in
* sizeof(u32
));
783 ofs
= SATA_ACTIVE_OFS
; /* active is not with the others */
792 static u32
mv_scr_read(struct ata_port
*ap
, unsigned int sc_reg_in
)
794 unsigned int ofs
= mv_scr_offset(sc_reg_in
);
796 if (0xffffffffU
!= ofs
) {
797 return readl(mv_ap_base(ap
) + ofs
);
803 static void mv_scr_write(struct ata_port
*ap
, unsigned int sc_reg_in
, u32 val
)
805 unsigned int ofs
= mv_scr_offset(sc_reg_in
);
807 if (0xffffffffU
!= ofs
) {
808 writelfl(val
, mv_ap_base(ap
) + ofs
);
813 * mv_host_stop - Host specific cleanup/stop routine.
814 * @host: host data structure
816 * Disable ints, cleanup host memory, call general purpose
820 * Inherited from caller.
822 static void mv_host_stop(struct ata_host
*host
)
824 struct mv_host_priv
*hpriv
= host
->private_data
;
825 struct pci_dev
*pdev
= to_pci_dev(host
->dev
);
827 if (hpriv
->hp_flags
& MV_HP_FLAG_MSI
) {
828 pci_disable_msi(pdev
);
836 static inline void mv_priv_free(struct mv_port_priv
*pp
, struct device
*dev
)
838 dma_free_coherent(dev
, MV_PORT_PRIV_DMA_SZ
, pp
->crpb
, pp
->crpb_dma
);
841 static void mv_edma_cfg(struct mv_host_priv
*hpriv
, void __iomem
*port_mmio
)
843 u32 cfg
= readl(port_mmio
+ EDMA_CFG_OFS
);
845 /* set up non-NCQ EDMA configuration */
846 cfg
&= ~0x1f; /* clear queue depth */
847 cfg
&= ~EDMA_CFG_NCQ
; /* clear NCQ mode */
848 cfg
&= ~(1 << 9); /* disable equeue */
851 cfg
|= (1 << 8); /* enab config burst size mask */
853 else if (IS_GEN_II(hpriv
))
854 cfg
|= EDMA_CFG_RD_BRST_EXT
| EDMA_CFG_WR_BUFF_LEN
;
856 else if (IS_GEN_IIE(hpriv
)) {
857 cfg
|= (1 << 23); /* dis RX PM port mask */
858 cfg
&= ~(1 << 16); /* dis FIS-based switching (for now) */
859 cfg
&= ~(1 << 19); /* dis 128-entry queue (for now?) */
860 cfg
|= (1 << 18); /* enab early completion */
861 cfg
|= (1 << 17); /* enab host q cache */
862 cfg
|= (1 << 22); /* enab cutthrough */
865 writelfl(cfg
, port_mmio
+ EDMA_CFG_OFS
);
869 * mv_port_start - Port specific init/start routine.
870 * @ap: ATA channel to manipulate
872 * Allocate and point to DMA memory, init port private memory,
876 * Inherited from caller.
878 static int mv_port_start(struct ata_port
*ap
)
880 struct device
*dev
= ap
->host
->dev
;
881 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
882 struct mv_port_priv
*pp
;
883 void __iomem
*port_mmio
= mv_ap_base(ap
);
888 pp
= kmalloc(sizeof(*pp
), GFP_KERNEL
);
891 memset(pp
, 0, sizeof(*pp
));
893 mem
= dma_alloc_coherent(dev
, MV_PORT_PRIV_DMA_SZ
, &mem_dma
,
897 memset(mem
, 0, MV_PORT_PRIV_DMA_SZ
);
899 rc
= ata_pad_alloc(ap
, dev
);
903 /* First item in chunk of DMA memory:
904 * 32-slot command request table (CRQB), 32 bytes each in size
907 pp
->crqb_dma
= mem_dma
;
909 mem_dma
+= MV_CRQB_Q_SZ
;
912 * 32-slot command response table (CRPB), 8 bytes each in size
915 pp
->crpb_dma
= mem_dma
;
917 mem_dma
+= MV_CRPB_Q_SZ
;
920 * Table of scatter-gather descriptors (ePRD), 16 bytes each
923 pp
->sg_tbl_dma
= mem_dma
;
925 mv_edma_cfg(hpriv
, port_mmio
);
927 writel((pp
->crqb_dma
>> 16) >> 16, port_mmio
+ EDMA_REQ_Q_BASE_HI_OFS
);
928 writelfl(pp
->crqb_dma
& EDMA_REQ_Q_BASE_LO_MASK
,
929 port_mmio
+ EDMA_REQ_Q_IN_PTR_OFS
);
931 if (hpriv
->hp_flags
& MV_HP_ERRATA_XX42A0
)
932 writelfl(pp
->crqb_dma
& 0xffffffff,
933 port_mmio
+ EDMA_REQ_Q_OUT_PTR_OFS
);
935 writelfl(0, port_mmio
+ EDMA_REQ_Q_OUT_PTR_OFS
);
937 writel((pp
->crpb_dma
>> 16) >> 16, port_mmio
+ EDMA_RSP_Q_BASE_HI_OFS
);
939 if (hpriv
->hp_flags
& MV_HP_ERRATA_XX42A0
)
940 writelfl(pp
->crpb_dma
& 0xffffffff,
941 port_mmio
+ EDMA_RSP_Q_IN_PTR_OFS
);
943 writelfl(0, port_mmio
+ EDMA_RSP_Q_IN_PTR_OFS
);
945 writelfl(pp
->crpb_dma
& EDMA_RSP_Q_BASE_LO_MASK
,
946 port_mmio
+ EDMA_RSP_Q_OUT_PTR_OFS
);
948 /* Don't turn on EDMA here...do it before DMA commands only. Else
949 * we'll be unable to send non-data, PIO, etc due to restricted access
952 ap
->private_data
= pp
;
956 mv_priv_free(pp
, dev
);
964 * mv_port_stop - Port specific cleanup/stop routine.
965 * @ap: ATA channel to manipulate
967 * Stop DMA, cleanup port memory.
970 * This routine uses the host lock to protect the DMA stop.
972 static void mv_port_stop(struct ata_port
*ap
)
974 struct device
*dev
= ap
->host
->dev
;
975 struct mv_port_priv
*pp
= ap
->private_data
;
978 spin_lock_irqsave(&ap
->host
->lock
, flags
);
980 spin_unlock_irqrestore(&ap
->host
->lock
, flags
);
982 ap
->private_data
= NULL
;
983 ata_pad_free(ap
, dev
);
984 mv_priv_free(pp
, dev
);
989 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
990 * @qc: queued command whose SG list to source from
992 * Populate the SG list and mark the last entry.
995 * Inherited from caller.
997 static void mv_fill_sg(struct ata_queued_cmd
*qc
)
999 struct mv_port_priv
*pp
= qc
->ap
->private_data
;
1001 struct scatterlist
*sg
;
1003 ata_for_each_sg(sg
, qc
) {
1005 u32 sg_len
, len
, offset
;
1007 addr
= sg_dma_address(sg
);
1008 sg_len
= sg_dma_len(sg
);
1011 offset
= addr
& MV_DMA_BOUNDARY
;
1013 if ((offset
+ sg_len
) > 0x10000)
1014 len
= 0x10000 - offset
;
1016 pp
->sg_tbl
[i
].addr
= cpu_to_le32(addr
& 0xffffffff);
1017 pp
->sg_tbl
[i
].addr_hi
= cpu_to_le32((addr
>> 16) >> 16);
1018 pp
->sg_tbl
[i
].flags_size
= cpu_to_le32(len
& 0xffff);
1023 if (!sg_len
&& ata_sg_is_last(sg
, qc
))
1024 pp
->sg_tbl
[i
].flags_size
|= cpu_to_le32(EPRD_FLAG_END_OF_TBL
);
1031 static inline unsigned mv_inc_q_index(unsigned index
)
1033 return (index
+ 1) & MV_MAX_Q_DEPTH_MASK
;
1036 static inline void mv_crqb_pack_cmd(__le16
*cmdw
, u8 data
, u8 addr
, unsigned last
)
1038 u16 tmp
= data
| (addr
<< CRQB_CMD_ADDR_SHIFT
) | CRQB_CMD_CS
|
1039 (last
? CRQB_CMD_LAST
: 0);
1040 *cmdw
= cpu_to_le16(tmp
);
1044 * mv_qc_prep - Host specific command preparation.
1045 * @qc: queued command to prepare
1047 * This routine simply redirects to the general purpose routine
1048 * if command is not DMA. Else, it handles prep of the CRQB
1049 * (command request block), does some sanity checking, and calls
1050 * the SG load routine.
1053 * Inherited from caller.
1055 static void mv_qc_prep(struct ata_queued_cmd
*qc
)
1057 struct ata_port
*ap
= qc
->ap
;
1058 struct mv_port_priv
*pp
= ap
->private_data
;
1060 struct ata_taskfile
*tf
;
1064 if (ATA_PROT_DMA
!= qc
->tf
.protocol
)
1067 /* Fill in command request block
1069 if (!(qc
->tf
.flags
& ATA_TFLAG_WRITE
))
1070 flags
|= CRQB_FLAG_READ
;
1071 WARN_ON(MV_MAX_Q_DEPTH
<= qc
->tag
);
1072 flags
|= qc
->tag
<< CRQB_TAG_SHIFT
;
1074 /* get current queue index from hardware */
1075 in_index
= (readl(mv_ap_base(ap
) + EDMA_REQ_Q_IN_PTR_OFS
)
1076 >> EDMA_REQ_Q_PTR_SHIFT
) & MV_MAX_Q_DEPTH_MASK
;
1078 pp
->crqb
[in_index
].sg_addr
=
1079 cpu_to_le32(pp
->sg_tbl_dma
& 0xffffffff);
1080 pp
->crqb
[in_index
].sg_addr_hi
=
1081 cpu_to_le32((pp
->sg_tbl_dma
>> 16) >> 16);
1082 pp
->crqb
[in_index
].ctrl_flags
= cpu_to_le16(flags
);
1084 cw
= &pp
->crqb
[in_index
].ata_cmd
[0];
1087 /* Sadly, the CRQB cannot accomodate all registers--there are
1088 * only 11 bytes...so we must pick and choose required
1089 * registers based on the command. So, we drop feature and
1090 * hob_feature for [RW] DMA commands, but they are needed for
1091 * NCQ. NCQ will drop hob_nsect.
1093 switch (tf
->command
) {
1095 case ATA_CMD_READ_EXT
:
1097 case ATA_CMD_WRITE_EXT
:
1098 case ATA_CMD_WRITE_FUA_EXT
:
1099 mv_crqb_pack_cmd(cw
++, tf
->hob_nsect
, ATA_REG_NSECT
, 0);
1101 #ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
1102 case ATA_CMD_FPDMA_READ
:
1103 case ATA_CMD_FPDMA_WRITE
:
1104 mv_crqb_pack_cmd(cw
++, tf
->hob_feature
, ATA_REG_FEATURE
, 0);
1105 mv_crqb_pack_cmd(cw
++, tf
->feature
, ATA_REG_FEATURE
, 0);
1107 #endif /* FIXME: remove this line when NCQ added */
1109 /* The only other commands EDMA supports in non-queued and
1110 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1111 * of which are defined/used by Linux. If we get here, this
1112 * driver needs work.
1114 * FIXME: modify libata to give qc_prep a return value and
1115 * return error here.
1117 BUG_ON(tf
->command
);
1120 mv_crqb_pack_cmd(cw
++, tf
->nsect
, ATA_REG_NSECT
, 0);
1121 mv_crqb_pack_cmd(cw
++, tf
->hob_lbal
, ATA_REG_LBAL
, 0);
1122 mv_crqb_pack_cmd(cw
++, tf
->lbal
, ATA_REG_LBAL
, 0);
1123 mv_crqb_pack_cmd(cw
++, tf
->hob_lbam
, ATA_REG_LBAM
, 0);
1124 mv_crqb_pack_cmd(cw
++, tf
->lbam
, ATA_REG_LBAM
, 0);
1125 mv_crqb_pack_cmd(cw
++, tf
->hob_lbah
, ATA_REG_LBAH
, 0);
1126 mv_crqb_pack_cmd(cw
++, tf
->lbah
, ATA_REG_LBAH
, 0);
1127 mv_crqb_pack_cmd(cw
++, tf
->device
, ATA_REG_DEVICE
, 0);
1128 mv_crqb_pack_cmd(cw
++, tf
->command
, ATA_REG_CMD
, 1); /* last */
1130 if (!(qc
->flags
& ATA_QCFLAG_DMAMAP
))
1136 * mv_qc_prep_iie - Host specific command preparation.
1137 * @qc: queued command to prepare
1139 * This routine simply redirects to the general purpose routine
1140 * if command is not DMA. Else, it handles prep of the CRQB
1141 * (command request block), does some sanity checking, and calls
1142 * the SG load routine.
1145 * Inherited from caller.
1147 static void mv_qc_prep_iie(struct ata_queued_cmd
*qc
)
1149 struct ata_port
*ap
= qc
->ap
;
1150 struct mv_port_priv
*pp
= ap
->private_data
;
1151 struct mv_crqb_iie
*crqb
;
1152 struct ata_taskfile
*tf
;
1156 if (ATA_PROT_DMA
!= qc
->tf
.protocol
)
1159 /* Fill in Gen IIE command request block
1161 if (!(qc
->tf
.flags
& ATA_TFLAG_WRITE
))
1162 flags
|= CRQB_FLAG_READ
;
1164 WARN_ON(MV_MAX_Q_DEPTH
<= qc
->tag
);
1165 flags
|= qc
->tag
<< CRQB_TAG_SHIFT
;
1167 /* get current queue index from hardware */
1168 in_index
= (readl(mv_ap_base(ap
) + EDMA_REQ_Q_IN_PTR_OFS
)
1169 >> EDMA_REQ_Q_PTR_SHIFT
) & MV_MAX_Q_DEPTH_MASK
;
1171 crqb
= (struct mv_crqb_iie
*) &pp
->crqb
[in_index
];
1172 crqb
->addr
= cpu_to_le32(pp
->sg_tbl_dma
& 0xffffffff);
1173 crqb
->addr_hi
= cpu_to_le32((pp
->sg_tbl_dma
>> 16) >> 16);
1174 crqb
->flags
= cpu_to_le32(flags
);
1177 crqb
->ata_cmd
[0] = cpu_to_le32(
1178 (tf
->command
<< 16) |
1181 crqb
->ata_cmd
[1] = cpu_to_le32(
1187 crqb
->ata_cmd
[2] = cpu_to_le32(
1188 (tf
->hob_lbal
<< 0) |
1189 (tf
->hob_lbam
<< 8) |
1190 (tf
->hob_lbah
<< 16) |
1191 (tf
->hob_feature
<< 24)
1193 crqb
->ata_cmd
[3] = cpu_to_le32(
1195 (tf
->hob_nsect
<< 8)
1198 if (!(qc
->flags
& ATA_QCFLAG_DMAMAP
))
1204 * mv_qc_issue - Initiate a command to the host
1205 * @qc: queued command to start
1207 * This routine simply redirects to the general purpose routine
1208 * if command is not DMA. Else, it sanity checks our local
1209 * caches of the request producer/consumer indices then enables
1210 * DMA and bumps the request producer index.
1213 * Inherited from caller.
1215 static unsigned int mv_qc_issue(struct ata_queued_cmd
*qc
)
1217 void __iomem
*port_mmio
= mv_ap_base(qc
->ap
);
1218 struct mv_port_priv
*pp
= qc
->ap
->private_data
;
1222 if (ATA_PROT_DMA
!= qc
->tf
.protocol
) {
1223 /* We're about to send a non-EDMA capable command to the
1224 * port. Turn off EDMA so there won't be problems accessing
1225 * shadow block, etc registers.
1227 mv_stop_dma(qc
->ap
);
1228 return ata_qc_issue_prot(qc
);
1231 in_ptr
= readl(port_mmio
+ EDMA_REQ_Q_IN_PTR_OFS
);
1232 in_index
= (in_ptr
>> EDMA_REQ_Q_PTR_SHIFT
) & MV_MAX_Q_DEPTH_MASK
;
1234 /* until we do queuing, the queue should be empty at this point */
1235 WARN_ON(in_index
!= ((readl(port_mmio
+ EDMA_REQ_Q_OUT_PTR_OFS
)
1236 >> EDMA_REQ_Q_PTR_SHIFT
) & MV_MAX_Q_DEPTH_MASK
));
1238 in_index
= mv_inc_q_index(in_index
); /* now incr producer index */
1240 mv_start_dma(port_mmio
, pp
);
1242 /* and write the request in pointer to kick the EDMA to life */
1243 in_ptr
&= EDMA_REQ_Q_BASE_LO_MASK
;
1244 in_ptr
|= in_index
<< EDMA_REQ_Q_PTR_SHIFT
;
1245 writelfl(in_ptr
, port_mmio
+ EDMA_REQ_Q_IN_PTR_OFS
);
1251 * mv_get_crpb_status - get status from most recently completed cmd
1252 * @ap: ATA channel to manipulate
1254 * This routine is for use when the port is in DMA mode, when it
1255 * will be using the CRPB (command response block) method of
1256 * returning command completion information. We check indices
1257 * are good, grab status, and bump the response consumer index to
1258 * prove that we're up to date.
1261 * Inherited from caller.
1263 static u8
mv_get_crpb_status(struct ata_port
*ap
)
1265 void __iomem
*port_mmio
= mv_ap_base(ap
);
1266 struct mv_port_priv
*pp
= ap
->private_data
;
1271 out_ptr
= readl(port_mmio
+ EDMA_RSP_Q_OUT_PTR_OFS
);
1272 out_index
= (out_ptr
>> EDMA_RSP_Q_PTR_SHIFT
) & MV_MAX_Q_DEPTH_MASK
;
1274 ata_status
= le16_to_cpu(pp
->crpb
[out_index
].flags
)
1275 >> CRPB_FLAG_STATUS_SHIFT
;
1277 /* increment our consumer index... */
1278 out_index
= mv_inc_q_index(out_index
);
1280 /* and, until we do NCQ, there should only be 1 CRPB waiting */
1281 WARN_ON(out_index
!= ((readl(port_mmio
+ EDMA_RSP_Q_IN_PTR_OFS
)
1282 >> EDMA_RSP_Q_PTR_SHIFT
) & MV_MAX_Q_DEPTH_MASK
));
1284 /* write out our inc'd consumer index so EDMA knows we're caught up */
1285 out_ptr
&= EDMA_RSP_Q_BASE_LO_MASK
;
1286 out_ptr
|= out_index
<< EDMA_RSP_Q_PTR_SHIFT
;
1287 writelfl(out_ptr
, port_mmio
+ EDMA_RSP_Q_OUT_PTR_OFS
);
1289 /* Return ATA status register for completed CRPB */
1294 * mv_err_intr - Handle error interrupts on the port
1295 * @ap: ATA channel to manipulate
1296 * @reset_allowed: bool: 0 == don't trigger from reset here
1298 * In most cases, just clear the interrupt and move on. However,
1299 * some cases require an eDMA reset, which is done right before
1300 * the COMRESET in mv_phy_reset(). The SERR case requires a
1301 * clear of pending errors in the SATA SERROR register. Finally,
1302 * if the port disabled DMA, update our cached copy to match.
1305 * Inherited from caller.
1307 static void mv_err_intr(struct ata_port
*ap
, int reset_allowed
)
1309 void __iomem
*port_mmio
= mv_ap_base(ap
);
1310 u32 edma_err_cause
, serr
= 0;
1312 edma_err_cause
= readl(port_mmio
+ EDMA_ERR_IRQ_CAUSE_OFS
);
1314 if (EDMA_ERR_SERR
& edma_err_cause
) {
1315 sata_scr_read(ap
, SCR_ERROR
, &serr
);
1316 sata_scr_write_flush(ap
, SCR_ERROR
, serr
);
1318 if (EDMA_ERR_SELF_DIS
& edma_err_cause
) {
1319 struct mv_port_priv
*pp
= ap
->private_data
;
1320 pp
->pp_flags
&= ~MV_PP_FLAG_EDMA_EN
;
1322 DPRINTK(KERN_ERR
"ata%u: port error; EDMA err cause: 0x%08x "
1323 "SERR: 0x%08x\n", ap
->id
, edma_err_cause
, serr
);
1325 /* Clear EDMA now that SERR cleanup done */
1326 writelfl(0, port_mmio
+ EDMA_ERR_IRQ_CAUSE_OFS
);
1328 /* check for fatal here and recover if needed */
1329 if (reset_allowed
&& (EDMA_ERR_FATAL
& edma_err_cause
))
1330 mv_stop_and_reset(ap
);
1334 * mv_host_intr - Handle all interrupts on the given host controller
1335 * @host: host specific structure
1336 * @relevant: port error bits relevant to this host controller
1337 * @hc: which host controller we're to look at
1339 * Read then write clear the HC interrupt status then walk each
1340 * port connected to the HC and see if it needs servicing. Port
1341 * success ints are reported in the HC interrupt status reg, the
1342 * port error ints are reported in the higher level main
1343 * interrupt status register and thus are passed in via the
1344 * 'relevant' argument.
1347 * Inherited from caller.
1349 static void mv_host_intr(struct ata_host
*host
, u32 relevant
, unsigned int hc
)
1351 void __iomem
*mmio
= host
->mmio_base
;
1352 void __iomem
*hc_mmio
= mv_hc_base(mmio
, hc
);
1353 struct ata_queued_cmd
*qc
;
1355 int shift
, port
, port0
, hard_port
, handled
;
1356 unsigned int err_mask
;
1361 port0
= MV_PORTS_PER_HC
;
1364 /* we'll need the HC success int register in most cases */
1365 hc_irq_cause
= readl(hc_mmio
+ HC_IRQ_CAUSE_OFS
);
1367 writelfl(~hc_irq_cause
, hc_mmio
+ HC_IRQ_CAUSE_OFS
);
1370 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1371 hc
,relevant
,hc_irq_cause
);
1373 for (port
= port0
; port
< port0
+ MV_PORTS_PER_HC
; port
++) {
1375 struct ata_port
*ap
= host
->ports
[port
];
1376 struct mv_port_priv
*pp
= ap
->private_data
;
1378 hard_port
= mv_hardport_from_port(port
); /* range 0..3 */
1379 handled
= 0; /* ensure ata_status is set if handled++ */
1381 /* Note that DEV_IRQ might happen spuriously during EDMA,
1382 * and should be ignored in such cases.
1383 * The cause of this is still under investigation.
1385 if (pp
->pp_flags
& MV_PP_FLAG_EDMA_EN
) {
1386 /* EDMA: check for response queue interrupt */
1387 if ((CRPB_DMA_DONE
<< hard_port
) & hc_irq_cause
) {
1388 ata_status
= mv_get_crpb_status(ap
);
1392 /* PIO: check for device (drive) interrupt */
1393 if ((DEV_IRQ
<< hard_port
) & hc_irq_cause
) {
1394 ata_status
= readb((void __iomem
*)
1395 ap
->ioaddr
.status_addr
);
1397 /* ignore spurious intr if drive still BUSY */
1398 if (ata_status
& ATA_BUSY
) {
1405 if (ap
&& (ap
->flags
& ATA_FLAG_DISABLED
))
1408 err_mask
= ac_err_mask(ata_status
);
1410 shift
= port
<< 1; /* (port * 2) */
1411 if (port
>= MV_PORTS_PER_HC
) {
1412 shift
++; /* skip bit 8 in the HC Main IRQ reg */
1414 if ((PORT0_ERR
<< shift
) & relevant
) {
1416 err_mask
|= AC_ERR_OTHER
;
1421 qc
= ata_qc_from_tag(ap
, ap
->active_tag
);
1422 if (qc
&& (qc
->flags
& ATA_QCFLAG_ACTIVE
)) {
1423 VPRINTK("port %u IRQ found for qc, "
1424 "ata_status 0x%x\n", port
,ata_status
);
1425 /* mark qc status appropriately */
1426 if (!(qc
->tf
.flags
& ATA_TFLAG_POLLING
)) {
1427 qc
->err_mask
|= err_mask
;
1428 ata_qc_complete(qc
);
1439 * @dev_instance: private data; in this case the host structure
1442 * Read the read only register to determine if any host
1443 * controllers have pending interrupts. If so, call lower level
1444 * routine to handle. Also check for PCI errors which are only
1448 * This routine holds the host lock while processing pending
1451 static irqreturn_t
mv_interrupt(int irq
, void *dev_instance
)
1453 struct ata_host
*host
= dev_instance
;
1454 unsigned int hc
, handled
= 0, n_hcs
;
1455 void __iomem
*mmio
= host
->mmio_base
;
1456 struct mv_host_priv
*hpriv
;
1459 irq_stat
= readl(mmio
+ HC_MAIN_IRQ_CAUSE_OFS
);
1461 /* check the cases where we either have nothing pending or have read
1462 * a bogus register value which can indicate HW removal or PCI fault
1464 if (!irq_stat
|| (0xffffffffU
== irq_stat
)) {
1468 n_hcs
= mv_get_hc_count(host
->ports
[0]->flags
);
1469 spin_lock(&host
->lock
);
1471 for (hc
= 0; hc
< n_hcs
; hc
++) {
1472 u32 relevant
= irq_stat
& (HC0_IRQ_PEND
<< (hc
* HC_SHIFT
));
1474 mv_host_intr(host
, relevant
, hc
);
1479 hpriv
= host
->private_data
;
1480 if (IS_60XX(hpriv
)) {
1481 /* deal with the interrupt coalescing bits */
1482 if (irq_stat
& (TRAN_LO_DONE
| TRAN_HI_DONE
| PORTS_0_7_COAL_DONE
)) {
1483 writelfl(0, mmio
+ MV_IRQ_COAL_CAUSE_LO
);
1484 writelfl(0, mmio
+ MV_IRQ_COAL_CAUSE_HI
);
1485 writelfl(0, mmio
+ MV_IRQ_COAL_CAUSE
);
1489 if (PCI_ERR
& irq_stat
) {
1490 printk(KERN_ERR DRV_NAME
": PCI ERROR; PCI IRQ cause=0x%08x\n",
1491 readl(mmio
+ PCI_IRQ_CAUSE_OFS
));
1493 DPRINTK("All regs @ PCI error\n");
1494 mv_dump_all_regs(mmio
, -1, to_pci_dev(host
->dev
));
1496 writelfl(0, mmio
+ PCI_IRQ_CAUSE_OFS
);
1499 spin_unlock(&host
->lock
);
1501 return IRQ_RETVAL(handled
);
1504 static void __iomem
*mv5_phy_base(void __iomem
*mmio
, unsigned int port
)
1506 void __iomem
*hc_mmio
= mv_hc_base_from_port(mmio
, port
);
1507 unsigned long ofs
= (mv_hardport_from_port(port
) + 1) * 0x100UL
;
1509 return hc_mmio
+ ofs
;
1512 static unsigned int mv5_scr_offset(unsigned int sc_reg_in
)
1516 switch (sc_reg_in
) {
1520 ofs
= sc_reg_in
* sizeof(u32
);
1529 static u32
mv5_scr_read(struct ata_port
*ap
, unsigned int sc_reg_in
)
1531 void __iomem
*mmio
= mv5_phy_base(ap
->host
->mmio_base
, ap
->port_no
);
1532 unsigned int ofs
= mv5_scr_offset(sc_reg_in
);
1534 if (ofs
!= 0xffffffffU
)
1535 return readl(mmio
+ ofs
);
1540 static void mv5_scr_write(struct ata_port
*ap
, unsigned int sc_reg_in
, u32 val
)
1542 void __iomem
*mmio
= mv5_phy_base(ap
->host
->mmio_base
, ap
->port_no
);
1543 unsigned int ofs
= mv5_scr_offset(sc_reg_in
);
1545 if (ofs
!= 0xffffffffU
)
1546 writelfl(val
, mmio
+ ofs
);
1549 static void mv5_reset_bus(struct pci_dev
*pdev
, void __iomem
*mmio
)
1554 pci_read_config_byte(pdev
, PCI_REVISION_ID
, &rev_id
);
1556 early_5080
= (pdev
->device
== 0x5080) && (rev_id
== 0);
1559 u32 tmp
= readl(mmio
+ MV_PCI_EXP_ROM_BAR_CTL
);
1561 writel(tmp
, mmio
+ MV_PCI_EXP_ROM_BAR_CTL
);
1564 mv_reset_pci_bus(pdev
, mmio
);
1567 static void mv5_reset_flash(struct mv_host_priv
*hpriv
, void __iomem
*mmio
)
1569 writel(0x0fcfffff, mmio
+ MV_FLASH_CTL
);
1572 static void mv5_read_preamp(struct mv_host_priv
*hpriv
, int idx
,
1575 void __iomem
*phy_mmio
= mv5_phy_base(mmio
, idx
);
1578 tmp
= readl(phy_mmio
+ MV5_PHY_MODE
);
1580 hpriv
->signal
[idx
].pre
= tmp
& 0x1800; /* bits 12:11 */
1581 hpriv
->signal
[idx
].amps
= tmp
& 0xe0; /* bits 7:5 */
1584 static void mv5_enable_leds(struct mv_host_priv
*hpriv
, void __iomem
*mmio
)
1588 writel(0, mmio
+ MV_GPIO_PORT_CTL
);
1590 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1592 tmp
= readl(mmio
+ MV_PCI_EXP_ROM_BAR_CTL
);
1594 writel(tmp
, mmio
+ MV_PCI_EXP_ROM_BAR_CTL
);
1597 static void mv5_phy_errata(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
1600 void __iomem
*phy_mmio
= mv5_phy_base(mmio
, port
);
1601 const u32 mask
= (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1603 int fix_apm_sq
= (hpriv
->hp_flags
& MV_HP_ERRATA_50XXB0
);
1606 tmp
= readl(phy_mmio
+ MV5_LT_MODE
);
1608 writel(tmp
, phy_mmio
+ MV5_LT_MODE
);
1610 tmp
= readl(phy_mmio
+ MV5_PHY_CTL
);
1613 writel(tmp
, phy_mmio
+ MV5_PHY_CTL
);
1616 tmp
= readl(phy_mmio
+ MV5_PHY_MODE
);
1618 tmp
|= hpriv
->signal
[port
].pre
;
1619 tmp
|= hpriv
->signal
[port
].amps
;
1620 writel(tmp
, phy_mmio
+ MV5_PHY_MODE
);
1625 #define ZERO(reg) writel(0, port_mmio + (reg))
1626 static void mv5_reset_hc_port(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
1629 void __iomem
*port_mmio
= mv_port_base(mmio
, port
);
1631 writelfl(EDMA_DS
, port_mmio
+ EDMA_CMD_OFS
);
1633 mv_channel_reset(hpriv
, mmio
, port
);
1635 ZERO(0x028); /* command */
1636 writel(0x11f, port_mmio
+ EDMA_CFG_OFS
);
1637 ZERO(0x004); /* timer */
1638 ZERO(0x008); /* irq err cause */
1639 ZERO(0x00c); /* irq err mask */
1640 ZERO(0x010); /* rq bah */
1641 ZERO(0x014); /* rq inp */
1642 ZERO(0x018); /* rq outp */
1643 ZERO(0x01c); /* respq bah */
1644 ZERO(0x024); /* respq outp */
1645 ZERO(0x020); /* respq inp */
1646 ZERO(0x02c); /* test control */
1647 writel(0xbc, port_mmio
+ EDMA_IORDY_TMOUT
);
1651 #define ZERO(reg) writel(0, hc_mmio + (reg))
1652 static void mv5_reset_one_hc(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
1655 void __iomem
*hc_mmio
= mv_hc_base(mmio
, hc
);
1663 tmp
= readl(hc_mmio
+ 0x20);
1666 writel(tmp
, hc_mmio
+ 0x20);
1670 static int mv5_reset_hc(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
1673 unsigned int hc
, port
;
1675 for (hc
= 0; hc
< n_hc
; hc
++) {
1676 for (port
= 0; port
< MV_PORTS_PER_HC
; port
++)
1677 mv5_reset_hc_port(hpriv
, mmio
,
1678 (hc
* MV_PORTS_PER_HC
) + port
);
1680 mv5_reset_one_hc(hpriv
, mmio
, hc
);
1687 #define ZERO(reg) writel(0, mmio + (reg))
1688 static void mv_reset_pci_bus(struct pci_dev
*pdev
, void __iomem
*mmio
)
1692 tmp
= readl(mmio
+ MV_PCI_MODE
);
1694 writel(tmp
, mmio
+ MV_PCI_MODE
);
1696 ZERO(MV_PCI_DISC_TIMER
);
1697 ZERO(MV_PCI_MSI_TRIGGER
);
1698 writel(0x000100ff, mmio
+ MV_PCI_XBAR_TMOUT
);
1699 ZERO(HC_MAIN_IRQ_MASK_OFS
);
1700 ZERO(MV_PCI_SERR_MASK
);
1701 ZERO(PCI_IRQ_CAUSE_OFS
);
1702 ZERO(PCI_IRQ_MASK_OFS
);
1703 ZERO(MV_PCI_ERR_LOW_ADDRESS
);
1704 ZERO(MV_PCI_ERR_HIGH_ADDRESS
);
1705 ZERO(MV_PCI_ERR_ATTRIBUTE
);
1706 ZERO(MV_PCI_ERR_COMMAND
);
1710 static void mv6_reset_flash(struct mv_host_priv
*hpriv
, void __iomem
*mmio
)
1714 mv5_reset_flash(hpriv
, mmio
);
1716 tmp
= readl(mmio
+ MV_GPIO_PORT_CTL
);
1718 tmp
|= (1 << 5) | (1 << 6);
1719 writel(tmp
, mmio
+ MV_GPIO_PORT_CTL
);
1723 * mv6_reset_hc - Perform the 6xxx global soft reset
1724 * @mmio: base address of the HBA
1726 * This routine only applies to 6xxx parts.
1729 * Inherited from caller.
1731 static int mv6_reset_hc(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
1734 void __iomem
*reg
= mmio
+ PCI_MAIN_CMD_STS_OFS
;
1738 /* Following procedure defined in PCI "main command and status
1742 writel(t
| STOP_PCI_MASTER
, reg
);
1744 for (i
= 0; i
< 1000; i
++) {
1747 if (PCI_MASTER_EMPTY
& t
) {
1751 if (!(PCI_MASTER_EMPTY
& t
)) {
1752 printk(KERN_ERR DRV_NAME
": PCI master won't flush\n");
1760 writel(t
| GLOB_SFT_RST
, reg
);
1763 } while (!(GLOB_SFT_RST
& t
) && (i
-- > 0));
1765 if (!(GLOB_SFT_RST
& t
)) {
1766 printk(KERN_ERR DRV_NAME
": can't set global reset\n");
1771 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
1774 writel(t
& ~(GLOB_SFT_RST
| STOP_PCI_MASTER
), reg
);
1777 } while ((GLOB_SFT_RST
& t
) && (i
-- > 0));
1779 if (GLOB_SFT_RST
& t
) {
1780 printk(KERN_ERR DRV_NAME
": can't clear global reset\n");
1787 static void mv6_read_preamp(struct mv_host_priv
*hpriv
, int idx
,
1790 void __iomem
*port_mmio
;
1793 tmp
= readl(mmio
+ MV_RESET_CFG
);
1794 if ((tmp
& (1 << 0)) == 0) {
1795 hpriv
->signal
[idx
].amps
= 0x7 << 8;
1796 hpriv
->signal
[idx
].pre
= 0x1 << 5;
1800 port_mmio
= mv_port_base(mmio
, idx
);
1801 tmp
= readl(port_mmio
+ PHY_MODE2
);
1803 hpriv
->signal
[idx
].amps
= tmp
& 0x700; /* bits 10:8 */
1804 hpriv
->signal
[idx
].pre
= tmp
& 0xe0; /* bits 7:5 */
1807 static void mv6_enable_leds(struct mv_host_priv
*hpriv
, void __iomem
*mmio
)
1809 writel(0x00000060, mmio
+ MV_GPIO_PORT_CTL
);
1812 static void mv6_phy_errata(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
1815 void __iomem
*port_mmio
= mv_port_base(mmio
, port
);
1817 u32 hp_flags
= hpriv
->hp_flags
;
1819 hp_flags
& (MV_HP_ERRATA_60X1B2
| MV_HP_ERRATA_60X1C0
);
1821 hp_flags
& (MV_HP_ERRATA_60X1B2
| MV_HP_ERRATA_60X1C0
);
1824 if (fix_phy_mode2
) {
1825 m2
= readl(port_mmio
+ PHY_MODE2
);
1828 writel(m2
, port_mmio
+ PHY_MODE2
);
1832 m2
= readl(port_mmio
+ PHY_MODE2
);
1833 m2
&= ~((1 << 16) | (1 << 31));
1834 writel(m2
, port_mmio
+ PHY_MODE2
);
1839 /* who knows what this magic does */
1840 tmp
= readl(port_mmio
+ PHY_MODE3
);
1843 writel(tmp
, port_mmio
+ PHY_MODE3
);
1845 if (fix_phy_mode4
) {
1848 m4
= readl(port_mmio
+ PHY_MODE4
);
1850 if (hp_flags
& MV_HP_ERRATA_60X1B2
)
1851 tmp
= readl(port_mmio
+ 0x310);
1853 m4
= (m4
& ~(1 << 1)) | (1 << 0);
1855 writel(m4
, port_mmio
+ PHY_MODE4
);
1857 if (hp_flags
& MV_HP_ERRATA_60X1B2
)
1858 writel(tmp
, port_mmio
+ 0x310);
1861 /* Revert values of pre-emphasis and signal amps to the saved ones */
1862 m2
= readl(port_mmio
+ PHY_MODE2
);
1864 m2
&= ~MV_M2_PREAMP_MASK
;
1865 m2
|= hpriv
->signal
[port
].amps
;
1866 m2
|= hpriv
->signal
[port
].pre
;
1869 /* according to mvSata 3.6.1, some IIE values are fixed */
1870 if (IS_GEN_IIE(hpriv
)) {
1875 writel(m2
, port_mmio
+ PHY_MODE2
);
1878 static void mv_channel_reset(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
1879 unsigned int port_no
)
1881 void __iomem
*port_mmio
= mv_port_base(mmio
, port_no
);
1883 writelfl(ATA_RST
, port_mmio
+ EDMA_CMD_OFS
);
1885 if (IS_60XX(hpriv
)) {
1886 u32 ifctl
= readl(port_mmio
+ SATA_INTERFACE_CTL
);
1887 ifctl
|= (1 << 7); /* enable gen2i speed */
1888 ifctl
= (ifctl
& 0xfff) | 0x9b1000; /* from chip spec */
1889 writelfl(ifctl
, port_mmio
+ SATA_INTERFACE_CTL
);
1892 udelay(25); /* allow reset propagation */
1894 /* Spec never mentions clearing the bit. Marvell's driver does
1895 * clear the bit, however.
1897 writelfl(0, port_mmio
+ EDMA_CMD_OFS
);
1899 hpriv
->ops
->phy_errata(hpriv
, mmio
, port_no
);
1905 static void mv_stop_and_reset(struct ata_port
*ap
)
1907 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
1908 void __iomem
*mmio
= ap
->host
->mmio_base
;
1912 mv_channel_reset(hpriv
, mmio
, ap
->port_no
);
1914 __mv_phy_reset(ap
, 0);
1917 static inline void __msleep(unsigned int msec
, int can_sleep
)
1926 * __mv_phy_reset - Perform eDMA reset followed by COMRESET
1927 * @ap: ATA channel to manipulate
1929 * Part of this is taken from __sata_phy_reset and modified to
1930 * not sleep since this routine gets called from interrupt level.
1933 * Inherited from caller. This is coded to safe to call at
1934 * interrupt level, i.e. it does not sleep.
1936 static void __mv_phy_reset(struct ata_port
*ap
, int can_sleep
)
1938 struct mv_port_priv
*pp
= ap
->private_data
;
1939 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
1940 void __iomem
*port_mmio
= mv_ap_base(ap
);
1941 struct ata_taskfile tf
;
1942 struct ata_device
*dev
= &ap
->device
[0];
1943 unsigned long timeout
;
1947 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap
->port_no
, port_mmio
);
1949 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
1950 "SCtrl 0x%08x\n", mv_scr_read(ap
, SCR_STATUS
),
1951 mv_scr_read(ap
, SCR_ERROR
), mv_scr_read(ap
, SCR_CONTROL
));
1953 /* Issue COMRESET via SControl */
1955 sata_scr_write_flush(ap
, SCR_CONTROL
, 0x301);
1956 __msleep(1, can_sleep
);
1958 sata_scr_write_flush(ap
, SCR_CONTROL
, 0x300);
1959 __msleep(20, can_sleep
);
1961 timeout
= jiffies
+ msecs_to_jiffies(200);
1963 sata_scr_read(ap
, SCR_STATUS
, &sstatus
);
1964 if (((sstatus
& 0x3) == 3) || ((sstatus
& 0x3) == 0))
1967 __msleep(1, can_sleep
);
1968 } while (time_before(jiffies
, timeout
));
1970 /* work around errata */
1971 if (IS_60XX(hpriv
) &&
1972 (sstatus
!= 0x0) && (sstatus
!= 0x113) && (sstatus
!= 0x123) &&
1974 goto comreset_retry
;
1976 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
1977 "SCtrl 0x%08x\n", mv_scr_read(ap
, SCR_STATUS
),
1978 mv_scr_read(ap
, SCR_ERROR
), mv_scr_read(ap
, SCR_CONTROL
));
1980 if (ata_port_online(ap
)) {
1983 sata_scr_read(ap
, SCR_STATUS
, &sstatus
);
1984 ata_port_printk(ap
, KERN_INFO
,
1985 "no device found (phy stat %08x)\n", sstatus
);
1986 ata_port_disable(ap
);
1989 ap
->cbl
= ATA_CBL_SATA
;
1991 /* even after SStatus reflects that device is ready,
1992 * it seems to take a while for link to be fully
1993 * established (and thus Status no longer 0x80/0x7F),
1994 * so we poll a bit for that, here.
1998 u8 drv_stat
= ata_check_status(ap
);
1999 if ((drv_stat
!= 0x80) && (drv_stat
!= 0x7f))
2001 __msleep(500, can_sleep
);
2006 tf
.lbah
= readb((void __iomem
*) ap
->ioaddr
.lbah_addr
);
2007 tf
.lbam
= readb((void __iomem
*) ap
->ioaddr
.lbam_addr
);
2008 tf
.lbal
= readb((void __iomem
*) ap
->ioaddr
.lbal_addr
);
2009 tf
.nsect
= readb((void __iomem
*) ap
->ioaddr
.nsect_addr
);
2011 dev
->class = ata_dev_classify(&tf
);
2012 if (!ata_dev_enabled(dev
)) {
2013 VPRINTK("Port disabled post-sig: No device present.\n");
2014 ata_port_disable(ap
);
2017 writelfl(0, port_mmio
+ EDMA_ERR_IRQ_CAUSE_OFS
);
2019 pp
->pp_flags
&= ~MV_PP_FLAG_EDMA_EN
;
2024 static void mv_phy_reset(struct ata_port
*ap
)
2026 __mv_phy_reset(ap
, 1);
2030 * mv_eng_timeout - Routine called by libata when SCSI times out I/O
2031 * @ap: ATA channel to manipulate
2033 * Intent is to clear all pending error conditions, reset the
2034 * chip/bus, fail the command, and move on.
2037 * This routine holds the host lock while failing the command.
2039 static void mv_eng_timeout(struct ata_port
*ap
)
2041 struct ata_queued_cmd
*qc
;
2042 unsigned long flags
;
2044 ata_port_printk(ap
, KERN_ERR
, "Entering mv_eng_timeout\n");
2045 DPRINTK("All regs @ start of eng_timeout\n");
2046 mv_dump_all_regs(ap
->host
->mmio_base
, ap
->port_no
,
2047 to_pci_dev(ap
->host
->dev
));
2049 qc
= ata_qc_from_tag(ap
, ap
->active_tag
);
2050 printk(KERN_ERR
"mmio_base %p ap %p qc %p scsi_cmnd %p &cmnd %p\n",
2051 ap
->host
->mmio_base
, ap
, qc
, qc
->scsicmd
, &qc
->scsicmd
->cmnd
);
2053 spin_lock_irqsave(&ap
->host
->lock
, flags
);
2055 mv_stop_and_reset(ap
);
2056 spin_unlock_irqrestore(&ap
->host
->lock
, flags
);
2058 WARN_ON(!(qc
->flags
& ATA_QCFLAG_ACTIVE
));
2059 if (qc
->flags
& ATA_QCFLAG_ACTIVE
) {
2060 qc
->err_mask
|= AC_ERR_TIMEOUT
;
2061 ata_eh_qc_complete(qc
);
2066 * mv_port_init - Perform some early initialization on a single port.
2067 * @port: libata data structure storing shadow register addresses
2068 * @port_mmio: base address of the port
2070 * Initialize shadow register mmio addresses, clear outstanding
2071 * interrupts on the port, and unmask interrupts for the future
2072 * start of the port.
2075 * Inherited from caller.
2077 static void mv_port_init(struct ata_ioports
*port
, void __iomem
*port_mmio
)
2079 unsigned long shd_base
= (unsigned long) port_mmio
+ SHD_BLK_OFS
;
2082 /* PIO related setup
2084 port
->data_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_DATA
);
2086 port
->feature_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_ERR
);
2087 port
->nsect_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_NSECT
);
2088 port
->lbal_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_LBAL
);
2089 port
->lbam_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_LBAM
);
2090 port
->lbah_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_LBAH
);
2091 port
->device_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_DEVICE
);
2093 port
->command_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_STATUS
);
2094 /* special case: control/altstatus doesn't have ATA_REG_ address */
2095 port
->altstatus_addr
= port
->ctl_addr
= shd_base
+ SHD_CTL_AST_OFS
;
2098 port
->cmd_addr
= port
->bmdma_addr
= port
->scr_addr
= 0;
2100 /* Clear any currently outstanding port interrupt conditions */
2101 serr_ofs
= mv_scr_offset(SCR_ERROR
);
2102 writelfl(readl(port_mmio
+ serr_ofs
), port_mmio
+ serr_ofs
);
2103 writelfl(0, port_mmio
+ EDMA_ERR_IRQ_CAUSE_OFS
);
2105 /* unmask all EDMA error interrupts */
2106 writelfl(~0, port_mmio
+ EDMA_ERR_IRQ_MASK_OFS
);
2108 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2109 readl(port_mmio
+ EDMA_CFG_OFS
),
2110 readl(port_mmio
+ EDMA_ERR_IRQ_CAUSE_OFS
),
2111 readl(port_mmio
+ EDMA_ERR_IRQ_MASK_OFS
));
2114 static int mv_chip_id(struct pci_dev
*pdev
, struct mv_host_priv
*hpriv
,
2115 unsigned int board_idx
)
2118 u32 hp_flags
= hpriv
->hp_flags
;
2120 pci_read_config_byte(pdev
, PCI_REVISION_ID
, &rev_id
);
2124 hpriv
->ops
= &mv5xxx_ops
;
2125 hp_flags
|= MV_HP_50XX
;
2129 hp_flags
|= MV_HP_ERRATA_50XXB0
;
2132 hp_flags
|= MV_HP_ERRATA_50XXB2
;
2135 dev_printk(KERN_WARNING
, &pdev
->dev
,
2136 "Applying 50XXB2 workarounds to unknown rev\n");
2137 hp_flags
|= MV_HP_ERRATA_50XXB2
;
2144 hpriv
->ops
= &mv5xxx_ops
;
2145 hp_flags
|= MV_HP_50XX
;
2149 hp_flags
|= MV_HP_ERRATA_50XXB0
;
2152 hp_flags
|= MV_HP_ERRATA_50XXB2
;
2155 dev_printk(KERN_WARNING
, &pdev
->dev
,
2156 "Applying B2 workarounds to unknown rev\n");
2157 hp_flags
|= MV_HP_ERRATA_50XXB2
;
2164 hpriv
->ops
= &mv6xxx_ops
;
2168 hp_flags
|= MV_HP_ERRATA_60X1B2
;
2171 hp_flags
|= MV_HP_ERRATA_60X1C0
;
2174 dev_printk(KERN_WARNING
, &pdev
->dev
,
2175 "Applying B2 workarounds to unknown rev\n");
2176 hp_flags
|= MV_HP_ERRATA_60X1B2
;
2183 hpriv
->ops
= &mv6xxx_ops
;
2185 hp_flags
|= MV_HP_GEN_IIE
;
2189 hp_flags
|= MV_HP_ERRATA_XX42A0
;
2192 hp_flags
|= MV_HP_ERRATA_60X1C0
;
2195 dev_printk(KERN_WARNING
, &pdev
->dev
,
2196 "Applying 60X1C0 workarounds to unknown rev\n");
2197 hp_flags
|= MV_HP_ERRATA_60X1C0
;
2203 printk(KERN_ERR DRV_NAME
": BUG: invalid board index %u\n", board_idx
);
2207 hpriv
->hp_flags
= hp_flags
;
2213 * mv_init_host - Perform some early initialization of the host.
2214 * @pdev: host PCI device
2215 * @probe_ent: early data struct representing the host
2217 * If possible, do an early global reset of the host. Then do
2218 * our port init and clear/unmask all/relevant host interrupts.
2221 * Inherited from caller.
2223 static int mv_init_host(struct pci_dev
*pdev
, struct ata_probe_ent
*probe_ent
,
2224 unsigned int board_idx
)
2226 int rc
= 0, n_hc
, port
, hc
;
2227 void __iomem
*mmio
= probe_ent
->mmio_base
;
2228 struct mv_host_priv
*hpriv
= probe_ent
->private_data
;
2230 /* global interrupt mask */
2231 writel(0, mmio
+ HC_MAIN_IRQ_MASK_OFS
);
2233 rc
= mv_chip_id(pdev
, hpriv
, board_idx
);
2237 n_hc
= mv_get_hc_count(probe_ent
->port_flags
);
2238 probe_ent
->n_ports
= MV_PORTS_PER_HC
* n_hc
;
2240 for (port
= 0; port
< probe_ent
->n_ports
; port
++)
2241 hpriv
->ops
->read_preamp(hpriv
, port
, mmio
);
2243 rc
= hpriv
->ops
->reset_hc(hpriv
, mmio
, n_hc
);
2247 hpriv
->ops
->reset_flash(hpriv
, mmio
);
2248 hpriv
->ops
->reset_bus(pdev
, mmio
);
2249 hpriv
->ops
->enable_leds(hpriv
, mmio
);
2251 for (port
= 0; port
< probe_ent
->n_ports
; port
++) {
2252 if (IS_60XX(hpriv
)) {
2253 void __iomem
*port_mmio
= mv_port_base(mmio
, port
);
2255 u32 ifctl
= readl(port_mmio
+ SATA_INTERFACE_CTL
);
2256 ifctl
|= (1 << 7); /* enable gen2i speed */
2257 ifctl
= (ifctl
& 0xfff) | 0x9b1000; /* from chip spec */
2258 writelfl(ifctl
, port_mmio
+ SATA_INTERFACE_CTL
);
2261 hpriv
->ops
->phy_errata(hpriv
, mmio
, port
);
2264 for (port
= 0; port
< probe_ent
->n_ports
; port
++) {
2265 void __iomem
*port_mmio
= mv_port_base(mmio
, port
);
2266 mv_port_init(&probe_ent
->port
[port
], port_mmio
);
2269 for (hc
= 0; hc
< n_hc
; hc
++) {
2270 void __iomem
*hc_mmio
= mv_hc_base(mmio
, hc
);
2272 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2273 "(before clear)=0x%08x\n", hc
,
2274 readl(hc_mmio
+ HC_CFG_OFS
),
2275 readl(hc_mmio
+ HC_IRQ_CAUSE_OFS
));
2277 /* Clear any currently outstanding hc interrupt conditions */
2278 writelfl(0, hc_mmio
+ HC_IRQ_CAUSE_OFS
);
2281 /* Clear any currently outstanding host interrupt conditions */
2282 writelfl(0, mmio
+ PCI_IRQ_CAUSE_OFS
);
2284 /* and unmask interrupt generation for host regs */
2285 writelfl(PCI_UNMASK_ALL_IRQS
, mmio
+ PCI_IRQ_MASK_OFS
);
2286 writelfl(~HC_MAIN_MASKED_IRQS
, mmio
+ HC_MAIN_IRQ_MASK_OFS
);
2288 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2289 "PCI int cause/mask=0x%08x/0x%08x\n",
2290 readl(mmio
+ HC_MAIN_IRQ_CAUSE_OFS
),
2291 readl(mmio
+ HC_MAIN_IRQ_MASK_OFS
),
2292 readl(mmio
+ PCI_IRQ_CAUSE_OFS
),
2293 readl(mmio
+ PCI_IRQ_MASK_OFS
));
2300 * mv_print_info - Dump key info to kernel log for perusal.
2301 * @probe_ent: early data struct representing the host
2303 * FIXME: complete this.
2306 * Inherited from caller.
2308 static void mv_print_info(struct ata_probe_ent
*probe_ent
)
2310 struct pci_dev
*pdev
= to_pci_dev(probe_ent
->dev
);
2311 struct mv_host_priv
*hpriv
= probe_ent
->private_data
;
2315 /* Use this to determine the HW stepping of the chip so we know
2316 * what errata to workaround
2318 pci_read_config_byte(pdev
, PCI_REVISION_ID
, &rev_id
);
2320 pci_read_config_byte(pdev
, PCI_CLASS_DEVICE
, &scc
);
2323 else if (scc
== 0x01)
2328 dev_printk(KERN_INFO
, &pdev
->dev
,
2329 "%u slots %u ports %s mode IRQ via %s\n",
2330 (unsigned)MV_MAX_Q_DEPTH
, probe_ent
->n_ports
,
2331 scc_s
, (MV_HP_FLAG_MSI
& hpriv
->hp_flags
) ? "MSI" : "INTx");
2335 * mv_init_one - handle a positive probe of a Marvell host
2336 * @pdev: PCI device found
2337 * @ent: PCI device ID entry for the matched host
2340 * Inherited from caller.
2342 static int mv_init_one(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
2344 static int printed_version
= 0;
2345 struct ata_probe_ent
*probe_ent
= NULL
;
2346 struct mv_host_priv
*hpriv
;
2347 unsigned int board_idx
= (unsigned int)ent
->driver_data
;
2348 void __iomem
*mmio_base
;
2349 int pci_dev_busy
= 0, rc
;
2351 if (!printed_version
++)
2352 dev_printk(KERN_INFO
, &pdev
->dev
, "version " DRV_VERSION
"\n");
2354 rc
= pci_enable_device(pdev
);
2358 pci_set_master(pdev
);
2360 rc
= pci_request_regions(pdev
, DRV_NAME
);
2366 probe_ent
= kmalloc(sizeof(*probe_ent
), GFP_KERNEL
);
2367 if (probe_ent
== NULL
) {
2369 goto err_out_regions
;
2372 memset(probe_ent
, 0, sizeof(*probe_ent
));
2373 probe_ent
->dev
= pci_dev_to_dev(pdev
);
2374 INIT_LIST_HEAD(&probe_ent
->node
);
2376 mmio_base
= pci_iomap(pdev
, MV_PRIMARY_BAR
, 0);
2377 if (mmio_base
== NULL
) {
2379 goto err_out_free_ent
;
2382 hpriv
= kmalloc(sizeof(*hpriv
), GFP_KERNEL
);
2385 goto err_out_iounmap
;
2387 memset(hpriv
, 0, sizeof(*hpriv
));
2389 probe_ent
->sht
= mv_port_info
[board_idx
].sht
;
2390 probe_ent
->port_flags
= mv_port_info
[board_idx
].flags
;
2391 probe_ent
->pio_mask
= mv_port_info
[board_idx
].pio_mask
;
2392 probe_ent
->udma_mask
= mv_port_info
[board_idx
].udma_mask
;
2393 probe_ent
->port_ops
= mv_port_info
[board_idx
].port_ops
;
2395 probe_ent
->irq
= pdev
->irq
;
2396 probe_ent
->irq_flags
= IRQF_SHARED
;
2397 probe_ent
->mmio_base
= mmio_base
;
2398 probe_ent
->private_data
= hpriv
;
2400 /* initialize adapter */
2401 rc
= mv_init_host(pdev
, probe_ent
, board_idx
);
2406 /* Enable interrupts */
2407 if (msi
&& pci_enable_msi(pdev
) == 0) {
2408 hpriv
->hp_flags
|= MV_HP_FLAG_MSI
;
2413 mv_dump_pci_cfg(pdev
, 0x68);
2414 mv_print_info(probe_ent
);
2416 if (ata_device_add(probe_ent
) == 0) {
2417 rc
= -ENODEV
; /* No devices discovered */
2418 goto err_out_dev_add
;
2425 if (MV_HP_FLAG_MSI
& hpriv
->hp_flags
) {
2426 pci_disable_msi(pdev
);
2433 pci_iounmap(pdev
, mmio_base
);
2437 pci_release_regions(pdev
);
2439 if (!pci_dev_busy
) {
2440 pci_disable_device(pdev
);
2446 static int __init
mv_init(void)
2448 return pci_register_driver(&mv_pci_driver
);
2451 static void __exit
mv_exit(void)
2453 pci_unregister_driver(&mv_pci_driver
);
2456 MODULE_AUTHOR("Brett Russ");
2457 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2458 MODULE_LICENSE("GPL");
2459 MODULE_DEVICE_TABLE(pci
, mv_pci_tbl
);
2460 MODULE_VERSION(DRV_VERSION
);
2462 module_param(msi
, int, 0444);
2463 MODULE_PARM_DESC(msi
, "Enable use of PCI MSI (0=off, 1=on)");
2465 module_init(mv_init
);
2466 module_exit(mv_exit
);