2 * sata_mv.c - Marvell SATA support
4 * Copyright 2005: EMC Corporation, all rights reserved.
5 * Copyright 2005 Red Hat, Inc. All rights reserved.
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
32 2) Convert to LibATA new EH. Required for hotplug, NCQ, and sane
33 probing/error handling in general. MUST HAVE.
35 3) Add hotplug support (easy, once new-EH support appears)
37 4) Add NCQ support (easy to intermediate, once new-EH support appears)
39 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
41 6) Add port multiplier support (intermediate)
43 7) Test and verify 3.0 Gbps support
45 8) Develop a low-power-consumption strategy, and implement it.
47 9) [Experiment, low priority] See if ATAPI can be supported using
48 "unknown FIS" or "vendor-specific FIS" support, or something creative
51 10) [Experiment, low priority] Investigate interrupt coalescing.
52 Quite often, especially with PCI Message Signalled Interrupts (MSI),
53 the overhead reduced by interrupt mitigation is quite often not
54 worth the latency cost.
56 11) [Experiment, Marvell value added] Is it possible to use target
57 mode to cross-connect two Linux boxes with Marvell cards? If so,
58 creating LibATA target mode support would be very interesting.
60 Target mode, for those without docs, is the ability to directly
61 connect two SATA controllers.
63 13) Verify that 7042 is fully supported. I only have a 6042.
68 #include <linux/kernel.h>
69 #include <linux/module.h>
70 #include <linux/pci.h>
71 #include <linux/init.h>
72 #include <linux/blkdev.h>
73 #include <linux/delay.h>
74 #include <linux/interrupt.h>
75 #include <linux/dma-mapping.h>
76 #include <linux/device.h>
77 #include <scsi/scsi_host.h>
78 #include <scsi/scsi_cmnd.h>
79 #include <linux/libata.h>
81 #define DRV_NAME "sata_mv"
82 #define DRV_VERSION "0.81"
85 /* BAR's are enumerated in terms of pci_resource_start() terms */
86 MV_PRIMARY_BAR
= 0, /* offset 0x10: memory space */
87 MV_IO_BAR
= 2, /* offset 0x18: IO space */
88 MV_MISC_BAR
= 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
90 MV_MAJOR_REG_AREA_SZ
= 0x10000, /* 64KB */
91 MV_MINOR_REG_AREA_SZ
= 0x2000, /* 8KB */
94 MV_IRQ_COAL_REG_BASE
= 0x18000, /* 6xxx part only */
95 MV_IRQ_COAL_CAUSE
= (MV_IRQ_COAL_REG_BASE
+ 0x08),
96 MV_IRQ_COAL_CAUSE_LO
= (MV_IRQ_COAL_REG_BASE
+ 0x88),
97 MV_IRQ_COAL_CAUSE_HI
= (MV_IRQ_COAL_REG_BASE
+ 0x8c),
98 MV_IRQ_COAL_THRESHOLD
= (MV_IRQ_COAL_REG_BASE
+ 0xcc),
99 MV_IRQ_COAL_TIME_THRESHOLD
= (MV_IRQ_COAL_REG_BASE
+ 0xd0),
101 MV_SATAHC0_REG_BASE
= 0x20000,
102 MV_FLASH_CTL
= 0x1046c,
103 MV_GPIO_PORT_CTL
= 0x104f0,
104 MV_RESET_CFG
= 0x180d8,
106 MV_PCI_REG_SZ
= MV_MAJOR_REG_AREA_SZ
,
107 MV_SATAHC_REG_SZ
= MV_MAJOR_REG_AREA_SZ
,
108 MV_SATAHC_ARBTR_REG_SZ
= MV_MINOR_REG_AREA_SZ
, /* arbiter */
109 MV_PORT_REG_SZ
= MV_MINOR_REG_AREA_SZ
,
111 MV_USE_Q_DEPTH
= ATA_DEF_QUEUE
,
114 MV_MAX_Q_DEPTH_MASK
= MV_MAX_Q_DEPTH
- 1,
116 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
117 * CRPB needs alignment on a 256B boundary. Size == 256B
118 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
119 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
121 MV_CRQB_Q_SZ
= (32 * MV_MAX_Q_DEPTH
),
122 MV_CRPB_Q_SZ
= (8 * MV_MAX_Q_DEPTH
),
124 MV_SG_TBL_SZ
= (16 * MV_MAX_SG_CT
),
125 MV_PORT_PRIV_DMA_SZ
= (MV_CRQB_Q_SZ
+ MV_CRPB_Q_SZ
+ MV_SG_TBL_SZ
),
128 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
129 MV_PORT_HC_SHIFT
= 2,
130 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
134 MV_FLAG_DUAL_HC
= (1 << 30), /* two SATA Host Controllers */
135 MV_FLAG_IRQ_COALESCE
= (1 << 29), /* IRQ coalescing capability */
136 MV_COMMON_FLAGS
= (ATA_FLAG_SATA
| ATA_FLAG_NO_LEGACY
|
137 ATA_FLAG_SATA_RESET
| ATA_FLAG_MMIO
|
138 ATA_FLAG_NO_ATAPI
| ATA_FLAG_PIO_POLLING
),
139 MV_6XXX_FLAGS
= MV_FLAG_IRQ_COALESCE
,
141 CRQB_FLAG_READ
= (1 << 0),
143 CRQB_CMD_ADDR_SHIFT
= 8,
144 CRQB_CMD_CS
= (0x2 << 11),
145 CRQB_CMD_LAST
= (1 << 15),
147 CRPB_FLAG_STATUS_SHIFT
= 8,
149 EPRD_FLAG_END_OF_TBL
= (1 << 31),
151 /* PCI interface registers */
153 PCI_COMMAND_OFS
= 0xc00,
155 PCI_MAIN_CMD_STS_OFS
= 0xd30,
156 STOP_PCI_MASTER
= (1 << 2),
157 PCI_MASTER_EMPTY
= (1 << 3),
158 GLOB_SFT_RST
= (1 << 4),
161 MV_PCI_EXP_ROM_BAR_CTL
= 0xd2c,
162 MV_PCI_DISC_TIMER
= 0xd04,
163 MV_PCI_MSI_TRIGGER
= 0xc38,
164 MV_PCI_SERR_MASK
= 0xc28,
165 MV_PCI_XBAR_TMOUT
= 0x1d04,
166 MV_PCI_ERR_LOW_ADDRESS
= 0x1d40,
167 MV_PCI_ERR_HIGH_ADDRESS
= 0x1d44,
168 MV_PCI_ERR_ATTRIBUTE
= 0x1d48,
169 MV_PCI_ERR_COMMAND
= 0x1d50,
171 PCI_IRQ_CAUSE_OFS
= 0x1d58,
172 PCI_IRQ_MASK_OFS
= 0x1d5c,
173 PCI_UNMASK_ALL_IRQS
= 0x7fffff, /* bits 22-0 */
175 HC_MAIN_IRQ_CAUSE_OFS
= 0x1d60,
176 HC_MAIN_IRQ_MASK_OFS
= 0x1d64,
177 PORT0_ERR
= (1 << 0), /* shift by port # */
178 PORT0_DONE
= (1 << 1), /* shift by port # */
179 HC0_IRQ_PEND
= 0x1ff, /* bits 0-8 = HC0's ports */
180 HC_SHIFT
= 9, /* bits 9-17 = HC1's ports */
182 TRAN_LO_DONE
= (1 << 19), /* 6xxx: IRQ coalescing */
183 TRAN_HI_DONE
= (1 << 20), /* 6xxx: IRQ coalescing */
184 PORTS_0_3_COAL_DONE
= (1 << 8),
185 PORTS_4_7_COAL_DONE
= (1 << 17),
186 PORTS_0_7_COAL_DONE
= (1 << 21), /* 6xxx: IRQ coalescing */
187 GPIO_INT
= (1 << 22),
188 SELF_INT
= (1 << 23),
189 TWSI_INT
= (1 << 24),
190 HC_MAIN_RSVD
= (0x7f << 25), /* bits 31-25 */
191 HC_MAIN_RSVD_5
= (0x1fff << 19), /* bits 31-19 */
192 HC_MAIN_MASKED_IRQS
= (TRAN_LO_DONE
| TRAN_HI_DONE
|
193 PORTS_0_7_COAL_DONE
| GPIO_INT
| TWSI_INT
|
195 HC_MAIN_MASKED_IRQS_5
= (PORTS_0_3_COAL_DONE
| PORTS_4_7_COAL_DONE
|
198 /* SATAHC registers */
201 HC_IRQ_CAUSE_OFS
= 0x14,
202 CRPB_DMA_DONE
= (1 << 0), /* shift by port # */
203 HC_IRQ_COAL
= (1 << 4), /* IRQ coalescing */
204 DEV_IRQ
= (1 << 8), /* shift by port # */
206 /* Shadow block registers */
208 SHD_CTL_AST_OFS
= 0x20, /* ofs from SHD_BLK_OFS */
211 SATA_STATUS_OFS
= 0x300, /* ctrl, err regs follow status */
212 SATA_ACTIVE_OFS
= 0x350,
219 SATA_INTERFACE_CTL
= 0x050,
221 MV_M2_PREAMP_MASK
= 0x7e0,
225 EDMA_CFG_Q_DEPTH
= 0, /* queueing disabled */
226 EDMA_CFG_NCQ
= (1 << 5),
227 EDMA_CFG_NCQ_GO_ON_ERR
= (1 << 14), /* continue on error */
228 EDMA_CFG_RD_BRST_EXT
= (1 << 11), /* read burst 512B */
229 EDMA_CFG_WR_BUFF_LEN
= (1 << 13), /* write buffer 512B */
231 EDMA_ERR_IRQ_CAUSE_OFS
= 0x8,
232 EDMA_ERR_IRQ_MASK_OFS
= 0xc,
233 EDMA_ERR_D_PAR
= (1 << 0),
234 EDMA_ERR_PRD_PAR
= (1 << 1),
235 EDMA_ERR_DEV
= (1 << 2),
236 EDMA_ERR_DEV_DCON
= (1 << 3),
237 EDMA_ERR_DEV_CON
= (1 << 4),
238 EDMA_ERR_SERR
= (1 << 5),
239 EDMA_ERR_SELF_DIS
= (1 << 7),
240 EDMA_ERR_BIST_ASYNC
= (1 << 8),
241 EDMA_ERR_CRBQ_PAR
= (1 << 9),
242 EDMA_ERR_CRPB_PAR
= (1 << 10),
243 EDMA_ERR_INTRL_PAR
= (1 << 11),
244 EDMA_ERR_IORDY
= (1 << 12),
245 EDMA_ERR_LNK_CTRL_RX
= (0xf << 13),
246 EDMA_ERR_LNK_CTRL_RX_2
= (1 << 15),
247 EDMA_ERR_LNK_DATA_RX
= (0xf << 17),
248 EDMA_ERR_LNK_CTRL_TX
= (0x1f << 21),
249 EDMA_ERR_LNK_DATA_TX
= (0x1f << 26),
250 EDMA_ERR_TRANS_PROTO
= (1 << 31),
251 EDMA_ERR_FATAL
= (EDMA_ERR_D_PAR
| EDMA_ERR_PRD_PAR
|
252 EDMA_ERR_DEV_DCON
| EDMA_ERR_CRBQ_PAR
|
253 EDMA_ERR_CRPB_PAR
| EDMA_ERR_INTRL_PAR
|
254 EDMA_ERR_IORDY
| EDMA_ERR_LNK_CTRL_RX_2
|
255 EDMA_ERR_LNK_DATA_RX
|
256 EDMA_ERR_LNK_DATA_TX
|
257 EDMA_ERR_TRANS_PROTO
),
259 EDMA_REQ_Q_BASE_HI_OFS
= 0x10,
260 EDMA_REQ_Q_IN_PTR_OFS
= 0x14, /* also contains BASE_LO */
262 EDMA_REQ_Q_OUT_PTR_OFS
= 0x18,
263 EDMA_REQ_Q_PTR_SHIFT
= 5,
265 EDMA_RSP_Q_BASE_HI_OFS
= 0x1c,
266 EDMA_RSP_Q_IN_PTR_OFS
= 0x20,
267 EDMA_RSP_Q_OUT_PTR_OFS
= 0x24, /* also contains BASE_LO */
268 EDMA_RSP_Q_PTR_SHIFT
= 3,
275 EDMA_IORDY_TMOUT
= 0x34,
278 /* Host private flags (hp_flags) */
279 MV_HP_FLAG_MSI
= (1 << 0),
280 MV_HP_ERRATA_50XXB0
= (1 << 1),
281 MV_HP_ERRATA_50XXB2
= (1 << 2),
282 MV_HP_ERRATA_60X1B2
= (1 << 3),
283 MV_HP_ERRATA_60X1C0
= (1 << 4),
284 MV_HP_ERRATA_XX42A0
= (1 << 5),
285 MV_HP_50XX
= (1 << 6),
286 MV_HP_GEN_IIE
= (1 << 7),
288 /* Port private flags (pp_flags) */
289 MV_PP_FLAG_EDMA_EN
= (1 << 0),
290 MV_PP_FLAG_EDMA_DS_ACT
= (1 << 1),
293 #define IS_50XX(hpriv) ((hpriv)->hp_flags & MV_HP_50XX)
294 #define IS_60XX(hpriv) (((hpriv)->hp_flags & MV_HP_50XX) == 0)
295 #define IS_GEN_I(hpriv) IS_50XX(hpriv)
296 #define IS_GEN_II(hpriv) IS_60XX(hpriv)
297 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
300 MV_DMA_BOUNDARY
= 0xffffffffU
,
302 EDMA_REQ_Q_BASE_LO_MASK
= 0xfffffc00U
,
304 EDMA_RSP_Q_BASE_LO_MASK
= 0xffffff00U
,
317 /* Command ReQuest Block: 32B */
333 /* Command ResPonse Block: 8B */
340 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
348 struct mv_port_priv
{
349 struct mv_crqb
*crqb
;
351 struct mv_crpb
*crpb
;
353 struct mv_sg
*sg_tbl
;
354 dma_addr_t sg_tbl_dma
;
358 struct mv_port_signal
{
365 void (*phy_errata
)(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
367 void (*enable_leds
)(struct mv_host_priv
*hpriv
, void __iomem
*mmio
);
368 void (*read_preamp
)(struct mv_host_priv
*hpriv
, int idx
,
370 int (*reset_hc
)(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
372 void (*reset_flash
)(struct mv_host_priv
*hpriv
, void __iomem
*mmio
);
373 void (*reset_bus
)(struct pci_dev
*pdev
, void __iomem
*mmio
);
376 struct mv_host_priv
{
378 struct mv_port_signal signal
[8];
379 const struct mv_hw_ops
*ops
;
382 static void mv_irq_clear(struct ata_port
*ap
);
383 static u32
mv_scr_read(struct ata_port
*ap
, unsigned int sc_reg_in
);
384 static void mv_scr_write(struct ata_port
*ap
, unsigned int sc_reg_in
, u32 val
);
385 static u32
mv5_scr_read(struct ata_port
*ap
, unsigned int sc_reg_in
);
386 static void mv5_scr_write(struct ata_port
*ap
, unsigned int sc_reg_in
, u32 val
);
387 static void mv_phy_reset(struct ata_port
*ap
);
388 static void __mv_phy_reset(struct ata_port
*ap
, int can_sleep
);
389 static int mv_port_start(struct ata_port
*ap
);
390 static void mv_port_stop(struct ata_port
*ap
);
391 static void mv_qc_prep(struct ata_queued_cmd
*qc
);
392 static void mv_qc_prep_iie(struct ata_queued_cmd
*qc
);
393 static unsigned int mv_qc_issue(struct ata_queued_cmd
*qc
);
394 static void mv_eng_timeout(struct ata_port
*ap
);
395 static int mv_init_one(struct pci_dev
*pdev
, const struct pci_device_id
*ent
);
397 static void mv5_phy_errata(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
399 static void mv5_enable_leds(struct mv_host_priv
*hpriv
, void __iomem
*mmio
);
400 static void mv5_read_preamp(struct mv_host_priv
*hpriv
, int idx
,
402 static int mv5_reset_hc(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
404 static void mv5_reset_flash(struct mv_host_priv
*hpriv
, void __iomem
*mmio
);
405 static void mv5_reset_bus(struct pci_dev
*pdev
, void __iomem
*mmio
);
407 static void mv6_phy_errata(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
409 static void mv6_enable_leds(struct mv_host_priv
*hpriv
, void __iomem
*mmio
);
410 static void mv6_read_preamp(struct mv_host_priv
*hpriv
, int idx
,
412 static int mv6_reset_hc(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
414 static void mv6_reset_flash(struct mv_host_priv
*hpriv
, void __iomem
*mmio
);
415 static void mv_reset_pci_bus(struct pci_dev
*pdev
, void __iomem
*mmio
);
416 static void mv_channel_reset(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
417 unsigned int port_no
);
418 static void mv_stop_and_reset(struct ata_port
*ap
);
420 static struct scsi_host_template mv_sht
= {
421 .module
= THIS_MODULE
,
423 .ioctl
= ata_scsi_ioctl
,
424 .queuecommand
= ata_scsi_queuecmd
,
425 .can_queue
= MV_USE_Q_DEPTH
,
426 .this_id
= ATA_SHT_THIS_ID
,
427 .sg_tablesize
= MV_MAX_SG_CT
,
428 .cmd_per_lun
= ATA_SHT_CMD_PER_LUN
,
429 .emulated
= ATA_SHT_EMULATED
,
431 .proc_name
= DRV_NAME
,
432 .dma_boundary
= MV_DMA_BOUNDARY
,
433 .slave_configure
= ata_scsi_slave_config
,
434 .slave_destroy
= ata_scsi_slave_destroy
,
435 .bios_param
= ata_std_bios_param
,
438 static const struct ata_port_operations mv5_ops
= {
439 .port_disable
= ata_port_disable
,
441 .tf_load
= ata_tf_load
,
442 .tf_read
= ata_tf_read
,
443 .check_status
= ata_check_status
,
444 .exec_command
= ata_exec_command
,
445 .dev_select
= ata_std_dev_select
,
447 .phy_reset
= mv_phy_reset
,
448 .cable_detect
= ata_cable_sata
,
450 .qc_prep
= mv_qc_prep
,
451 .qc_issue
= mv_qc_issue
,
452 .data_xfer
= ata_data_xfer
,
454 .eng_timeout
= mv_eng_timeout
,
456 .irq_clear
= mv_irq_clear
,
457 .irq_on
= ata_irq_on
,
458 .irq_ack
= ata_irq_ack
,
460 .scr_read
= mv5_scr_read
,
461 .scr_write
= mv5_scr_write
,
463 .port_start
= mv_port_start
,
464 .port_stop
= mv_port_stop
,
467 static const struct ata_port_operations mv6_ops
= {
468 .port_disable
= ata_port_disable
,
470 .tf_load
= ata_tf_load
,
471 .tf_read
= ata_tf_read
,
472 .check_status
= ata_check_status
,
473 .exec_command
= ata_exec_command
,
474 .dev_select
= ata_std_dev_select
,
476 .phy_reset
= mv_phy_reset
,
477 .cable_detect
= ata_cable_sata
,
479 .qc_prep
= mv_qc_prep
,
480 .qc_issue
= mv_qc_issue
,
481 .data_xfer
= ata_data_xfer
,
483 .eng_timeout
= mv_eng_timeout
,
485 .irq_clear
= mv_irq_clear
,
486 .irq_on
= ata_irq_on
,
487 .irq_ack
= ata_irq_ack
,
489 .scr_read
= mv_scr_read
,
490 .scr_write
= mv_scr_write
,
492 .port_start
= mv_port_start
,
493 .port_stop
= mv_port_stop
,
496 static const struct ata_port_operations mv_iie_ops
= {
497 .port_disable
= ata_port_disable
,
499 .tf_load
= ata_tf_load
,
500 .tf_read
= ata_tf_read
,
501 .check_status
= ata_check_status
,
502 .exec_command
= ata_exec_command
,
503 .dev_select
= ata_std_dev_select
,
505 .phy_reset
= mv_phy_reset
,
506 .cable_detect
= ata_cable_sata
,
508 .qc_prep
= mv_qc_prep_iie
,
509 .qc_issue
= mv_qc_issue
,
510 .data_xfer
= ata_data_xfer
,
512 .eng_timeout
= mv_eng_timeout
,
514 .irq_clear
= mv_irq_clear
,
515 .irq_on
= ata_irq_on
,
516 .irq_ack
= ata_irq_ack
,
518 .scr_read
= mv_scr_read
,
519 .scr_write
= mv_scr_write
,
521 .port_start
= mv_port_start
,
522 .port_stop
= mv_port_stop
,
525 static const struct ata_port_info mv_port_info
[] = {
527 .flags
= MV_COMMON_FLAGS
,
528 .pio_mask
= 0x1f, /* pio0-4 */
529 .udma_mask
= 0x7f, /* udma0-6 */
530 .port_ops
= &mv5_ops
,
533 .flags
= (MV_COMMON_FLAGS
| MV_FLAG_DUAL_HC
),
534 .pio_mask
= 0x1f, /* pio0-4 */
535 .udma_mask
= 0x7f, /* udma0-6 */
536 .port_ops
= &mv5_ops
,
539 .flags
= (MV_COMMON_FLAGS
| MV_FLAG_DUAL_HC
),
540 .pio_mask
= 0x1f, /* pio0-4 */
541 .udma_mask
= 0x7f, /* udma0-6 */
542 .port_ops
= &mv5_ops
,
545 .flags
= (MV_COMMON_FLAGS
| MV_6XXX_FLAGS
),
546 .pio_mask
= 0x1f, /* pio0-4 */
547 .udma_mask
= 0x7f, /* udma0-6 */
548 .port_ops
= &mv6_ops
,
551 .flags
= (MV_COMMON_FLAGS
| MV_6XXX_FLAGS
|
553 .pio_mask
= 0x1f, /* pio0-4 */
554 .udma_mask
= 0x7f, /* udma0-6 */
555 .port_ops
= &mv6_ops
,
558 .flags
= (MV_COMMON_FLAGS
| MV_6XXX_FLAGS
),
559 .pio_mask
= 0x1f, /* pio0-4 */
560 .udma_mask
= 0x7f, /* udma0-6 */
561 .port_ops
= &mv_iie_ops
,
564 .flags
= (MV_COMMON_FLAGS
| MV_6XXX_FLAGS
),
565 .pio_mask
= 0x1f, /* pio0-4 */
566 .udma_mask
= 0x7f, /* udma0-6 */
567 .port_ops
= &mv_iie_ops
,
571 static const struct pci_device_id mv_pci_tbl
[] = {
572 { PCI_VDEVICE(MARVELL
, 0x5040), chip_504x
},
573 { PCI_VDEVICE(MARVELL
, 0x5041), chip_504x
},
574 { PCI_VDEVICE(MARVELL
, 0x5080), chip_5080
},
575 { PCI_VDEVICE(MARVELL
, 0x5081), chip_508x
},
577 { PCI_VDEVICE(MARVELL
, 0x6040), chip_604x
},
578 { PCI_VDEVICE(MARVELL
, 0x6041), chip_604x
},
579 { PCI_VDEVICE(MARVELL
, 0x6042), chip_6042
},
580 { PCI_VDEVICE(MARVELL
, 0x6080), chip_608x
},
581 { PCI_VDEVICE(MARVELL
, 0x6081), chip_608x
},
583 { PCI_VDEVICE(ADAPTEC2
, 0x0241), chip_604x
},
586 { PCI_VDEVICE(ADAPTEC2
, 0x0243), chip_7042
},
588 { PCI_VDEVICE(TTI
, 0x2310), chip_7042
},
590 /* add Marvell 7042 support */
591 { PCI_VDEVICE(MARVELL
, 0x7042), chip_7042
},
593 { } /* terminate list */
596 static struct pci_driver mv_pci_driver
= {
598 .id_table
= mv_pci_tbl
,
599 .probe
= mv_init_one
,
600 .remove
= ata_pci_remove_one
,
603 static const struct mv_hw_ops mv5xxx_ops
= {
604 .phy_errata
= mv5_phy_errata
,
605 .enable_leds
= mv5_enable_leds
,
606 .read_preamp
= mv5_read_preamp
,
607 .reset_hc
= mv5_reset_hc
,
608 .reset_flash
= mv5_reset_flash
,
609 .reset_bus
= mv5_reset_bus
,
612 static const struct mv_hw_ops mv6xxx_ops
= {
613 .phy_errata
= mv6_phy_errata
,
614 .enable_leds
= mv6_enable_leds
,
615 .read_preamp
= mv6_read_preamp
,
616 .reset_hc
= mv6_reset_hc
,
617 .reset_flash
= mv6_reset_flash
,
618 .reset_bus
= mv_reset_pci_bus
,
624 static int msi
; /* Use PCI msi; either zero (off, default) or non-zero */
627 /* move to PCI layer or libata core? */
628 static int pci_go_64(struct pci_dev
*pdev
)
632 if (!pci_set_dma_mask(pdev
, DMA_64BIT_MASK
)) {
633 rc
= pci_set_consistent_dma_mask(pdev
, DMA_64BIT_MASK
);
635 rc
= pci_set_consistent_dma_mask(pdev
, DMA_32BIT_MASK
);
637 dev_printk(KERN_ERR
, &pdev
->dev
,
638 "64-bit DMA enable failed\n");
643 rc
= pci_set_dma_mask(pdev
, DMA_32BIT_MASK
);
645 dev_printk(KERN_ERR
, &pdev
->dev
,
646 "32-bit DMA enable failed\n");
649 rc
= pci_set_consistent_dma_mask(pdev
, DMA_32BIT_MASK
);
651 dev_printk(KERN_ERR
, &pdev
->dev
,
652 "32-bit consistent DMA enable failed\n");
664 static inline void writelfl(unsigned long data
, void __iomem
*addr
)
667 (void) readl(addr
); /* flush to avoid PCI posted write */
670 static inline void __iomem
*mv_hc_base(void __iomem
*base
, unsigned int hc
)
672 return (base
+ MV_SATAHC0_REG_BASE
+ (hc
* MV_SATAHC_REG_SZ
));
675 static inline unsigned int mv_hc_from_port(unsigned int port
)
677 return port
>> MV_PORT_HC_SHIFT
;
680 static inline unsigned int mv_hardport_from_port(unsigned int port
)
682 return port
& MV_PORT_MASK
;
685 static inline void __iomem
*mv_hc_base_from_port(void __iomem
*base
,
688 return mv_hc_base(base
, mv_hc_from_port(port
));
691 static inline void __iomem
*mv_port_base(void __iomem
*base
, unsigned int port
)
693 return mv_hc_base_from_port(base
, port
) +
694 MV_SATAHC_ARBTR_REG_SZ
+
695 (mv_hardport_from_port(port
) * MV_PORT_REG_SZ
);
698 static inline void __iomem
*mv_ap_base(struct ata_port
*ap
)
700 return mv_port_base(ap
->host
->iomap
[MV_PRIMARY_BAR
], ap
->port_no
);
703 static inline int mv_get_hc_count(unsigned long port_flags
)
705 return ((port_flags
& MV_FLAG_DUAL_HC
) ? 2 : 1);
708 static void mv_irq_clear(struct ata_port
*ap
)
713 * mv_start_dma - Enable eDMA engine
714 * @base: port base address
715 * @pp: port private data
717 * Verify the local cache of the eDMA state is accurate with a
721 * Inherited from caller.
723 static void mv_start_dma(void __iomem
*base
, struct mv_port_priv
*pp
)
725 if (!(MV_PP_FLAG_EDMA_EN
& pp
->pp_flags
)) {
726 writelfl(EDMA_EN
, base
+ EDMA_CMD_OFS
);
727 pp
->pp_flags
|= MV_PP_FLAG_EDMA_EN
;
729 WARN_ON(!(EDMA_EN
& readl(base
+ EDMA_CMD_OFS
)));
733 * mv_stop_dma - Disable eDMA engine
734 * @ap: ATA channel to manipulate
736 * Verify the local cache of the eDMA state is accurate with a
740 * Inherited from caller.
742 static void mv_stop_dma(struct ata_port
*ap
)
744 void __iomem
*port_mmio
= mv_ap_base(ap
);
745 struct mv_port_priv
*pp
= ap
->private_data
;
749 if (MV_PP_FLAG_EDMA_EN
& pp
->pp_flags
) {
750 /* Disable EDMA if active. The disable bit auto clears.
752 writelfl(EDMA_DS
, port_mmio
+ EDMA_CMD_OFS
);
753 pp
->pp_flags
&= ~MV_PP_FLAG_EDMA_EN
;
755 WARN_ON(EDMA_EN
& readl(port_mmio
+ EDMA_CMD_OFS
));
758 /* now properly wait for the eDMA to stop */
759 for (i
= 1000; i
> 0; i
--) {
760 reg
= readl(port_mmio
+ EDMA_CMD_OFS
);
761 if (!(EDMA_EN
& reg
)) {
768 ata_port_printk(ap
, KERN_ERR
, "Unable to stop eDMA\n");
769 /* FIXME: Consider doing a reset here to recover */
774 static void mv_dump_mem(void __iomem
*start
, unsigned bytes
)
777 for (b
= 0; b
< bytes
; ) {
778 DPRINTK("%p: ", start
+ b
);
779 for (w
= 0; b
< bytes
&& w
< 4; w
++) {
780 printk("%08x ",readl(start
+ b
));
788 static void mv_dump_pci_cfg(struct pci_dev
*pdev
, unsigned bytes
)
793 for (b
= 0; b
< bytes
; ) {
794 DPRINTK("%02x: ", b
);
795 for (w
= 0; b
< bytes
&& w
< 4; w
++) {
796 (void) pci_read_config_dword(pdev
,b
,&dw
);
804 static void mv_dump_all_regs(void __iomem
*mmio_base
, int port
,
805 struct pci_dev
*pdev
)
808 void __iomem
*hc_base
= mv_hc_base(mmio_base
,
809 port
>> MV_PORT_HC_SHIFT
);
810 void __iomem
*port_base
;
811 int start_port
, num_ports
, p
, start_hc
, num_hcs
, hc
;
814 start_hc
= start_port
= 0;
815 num_ports
= 8; /* shld be benign for 4 port devs */
818 start_hc
= port
>> MV_PORT_HC_SHIFT
;
820 num_ports
= num_hcs
= 1;
822 DPRINTK("All registers for port(s) %u-%u:\n", start_port
,
823 num_ports
> 1 ? num_ports
- 1 : start_port
);
826 DPRINTK("PCI config space regs:\n");
827 mv_dump_pci_cfg(pdev
, 0x68);
829 DPRINTK("PCI regs:\n");
830 mv_dump_mem(mmio_base
+0xc00, 0x3c);
831 mv_dump_mem(mmio_base
+0xd00, 0x34);
832 mv_dump_mem(mmio_base
+0xf00, 0x4);
833 mv_dump_mem(mmio_base
+0x1d00, 0x6c);
834 for (hc
= start_hc
; hc
< start_hc
+ num_hcs
; hc
++) {
835 hc_base
= mv_hc_base(mmio_base
, hc
);
836 DPRINTK("HC regs (HC %i):\n", hc
);
837 mv_dump_mem(hc_base
, 0x1c);
839 for (p
= start_port
; p
< start_port
+ num_ports
; p
++) {
840 port_base
= mv_port_base(mmio_base
, p
);
841 DPRINTK("EDMA regs (port %i):\n",p
);
842 mv_dump_mem(port_base
, 0x54);
843 DPRINTK("SATA regs (port %i):\n",p
);
844 mv_dump_mem(port_base
+0x300, 0x60);
849 static unsigned int mv_scr_offset(unsigned int sc_reg_in
)
857 ofs
= SATA_STATUS_OFS
+ (sc_reg_in
* sizeof(u32
));
860 ofs
= SATA_ACTIVE_OFS
; /* active is not with the others */
869 static u32
mv_scr_read(struct ata_port
*ap
, unsigned int sc_reg_in
)
871 unsigned int ofs
= mv_scr_offset(sc_reg_in
);
873 if (0xffffffffU
!= ofs
)
874 return readl(mv_ap_base(ap
) + ofs
);
879 static void mv_scr_write(struct ata_port
*ap
, unsigned int sc_reg_in
, u32 val
)
881 unsigned int ofs
= mv_scr_offset(sc_reg_in
);
883 if (0xffffffffU
!= ofs
)
884 writelfl(val
, mv_ap_base(ap
) + ofs
);
887 static void mv_edma_cfg(struct mv_host_priv
*hpriv
, void __iomem
*port_mmio
)
889 u32 cfg
= readl(port_mmio
+ EDMA_CFG_OFS
);
891 /* set up non-NCQ EDMA configuration */
892 cfg
&= ~(1 << 9); /* disable equeue */
894 if (IS_GEN_I(hpriv
)) {
895 cfg
&= ~0x1f; /* clear queue depth */
896 cfg
|= (1 << 8); /* enab config burst size mask */
899 else if (IS_GEN_II(hpriv
)) {
900 cfg
&= ~0x1f; /* clear queue depth */
901 cfg
|= EDMA_CFG_RD_BRST_EXT
| EDMA_CFG_WR_BUFF_LEN
;
902 cfg
&= ~(EDMA_CFG_NCQ
| EDMA_CFG_NCQ_GO_ON_ERR
); /* clear NCQ */
905 else if (IS_GEN_IIE(hpriv
)) {
906 cfg
|= (1 << 23); /* do not mask PM field in rx'd FIS */
907 cfg
|= (1 << 22); /* enab 4-entry host queue cache */
908 cfg
&= ~(1 << 19); /* dis 128-entry queue (for now?) */
909 cfg
|= (1 << 18); /* enab early completion */
910 cfg
|= (1 << 17); /* enab cut-through (dis stor&forwrd) */
911 cfg
&= ~(1 << 16); /* dis FIS-based switching (for now) */
912 cfg
&= ~(EDMA_CFG_NCQ
| EDMA_CFG_NCQ_GO_ON_ERR
); /* clear NCQ */
915 writelfl(cfg
, port_mmio
+ EDMA_CFG_OFS
);
919 * mv_port_start - Port specific init/start routine.
920 * @ap: ATA channel to manipulate
922 * Allocate and point to DMA memory, init port private memory,
926 * Inherited from caller.
928 static int mv_port_start(struct ata_port
*ap
)
930 struct device
*dev
= ap
->host
->dev
;
931 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
932 struct mv_port_priv
*pp
;
933 void __iomem
*port_mmio
= mv_ap_base(ap
);
938 pp
= devm_kzalloc(dev
, sizeof(*pp
), GFP_KERNEL
);
942 mem
= dmam_alloc_coherent(dev
, MV_PORT_PRIV_DMA_SZ
, &mem_dma
,
946 memset(mem
, 0, MV_PORT_PRIV_DMA_SZ
);
948 rc
= ata_pad_alloc(ap
, dev
);
952 /* First item in chunk of DMA memory:
953 * 32-slot command request table (CRQB), 32 bytes each in size
956 pp
->crqb_dma
= mem_dma
;
958 mem_dma
+= MV_CRQB_Q_SZ
;
961 * 32-slot command response table (CRPB), 8 bytes each in size
964 pp
->crpb_dma
= mem_dma
;
966 mem_dma
+= MV_CRPB_Q_SZ
;
969 * Table of scatter-gather descriptors (ePRD), 16 bytes each
972 pp
->sg_tbl_dma
= mem_dma
;
974 mv_edma_cfg(hpriv
, port_mmio
);
976 writel((pp
->crqb_dma
>> 16) >> 16, port_mmio
+ EDMA_REQ_Q_BASE_HI_OFS
);
977 writelfl(pp
->crqb_dma
& EDMA_REQ_Q_BASE_LO_MASK
,
978 port_mmio
+ EDMA_REQ_Q_IN_PTR_OFS
);
980 if (hpriv
->hp_flags
& MV_HP_ERRATA_XX42A0
)
981 writelfl(pp
->crqb_dma
& 0xffffffff,
982 port_mmio
+ EDMA_REQ_Q_OUT_PTR_OFS
);
984 writelfl(0, port_mmio
+ EDMA_REQ_Q_OUT_PTR_OFS
);
986 writel((pp
->crpb_dma
>> 16) >> 16, port_mmio
+ EDMA_RSP_Q_BASE_HI_OFS
);
988 if (hpriv
->hp_flags
& MV_HP_ERRATA_XX42A0
)
989 writelfl(pp
->crpb_dma
& 0xffffffff,
990 port_mmio
+ EDMA_RSP_Q_IN_PTR_OFS
);
992 writelfl(0, port_mmio
+ EDMA_RSP_Q_IN_PTR_OFS
);
994 writelfl(pp
->crpb_dma
& EDMA_RSP_Q_BASE_LO_MASK
,
995 port_mmio
+ EDMA_RSP_Q_OUT_PTR_OFS
);
997 /* Don't turn on EDMA here...do it before DMA commands only. Else
998 * we'll be unable to send non-data, PIO, etc due to restricted access
1001 ap
->private_data
= pp
;
1006 * mv_port_stop - Port specific cleanup/stop routine.
1007 * @ap: ATA channel to manipulate
1009 * Stop DMA, cleanup port memory.
1012 * This routine uses the host lock to protect the DMA stop.
1014 static void mv_port_stop(struct ata_port
*ap
)
1016 unsigned long flags
;
1018 spin_lock_irqsave(&ap
->host
->lock
, flags
);
1020 spin_unlock_irqrestore(&ap
->host
->lock
, flags
);
1024 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1025 * @qc: queued command whose SG list to source from
1027 * Populate the SG list and mark the last entry.
1030 * Inherited from caller.
1032 static unsigned int mv_fill_sg(struct ata_queued_cmd
*qc
)
1034 struct mv_port_priv
*pp
= qc
->ap
->private_data
;
1035 unsigned int n_sg
= 0;
1036 struct scatterlist
*sg
;
1037 struct mv_sg
*mv_sg
;
1040 ata_for_each_sg(sg
, qc
) {
1041 dma_addr_t addr
= sg_dma_address(sg
);
1042 u32 sg_len
= sg_dma_len(sg
);
1044 mv_sg
->addr
= cpu_to_le32(addr
& 0xffffffff);
1045 mv_sg
->addr_hi
= cpu_to_le32((addr
>> 16) >> 16);
1046 mv_sg
->flags_size
= cpu_to_le32(sg_len
& 0xffff);
1048 if (ata_sg_is_last(sg
, qc
))
1049 mv_sg
->flags_size
|= cpu_to_le32(EPRD_FLAG_END_OF_TBL
);
1058 static inline unsigned mv_inc_q_index(unsigned index
)
1060 return (index
+ 1) & MV_MAX_Q_DEPTH_MASK
;
1063 static inline void mv_crqb_pack_cmd(__le16
*cmdw
, u8 data
, u8 addr
, unsigned last
)
1065 u16 tmp
= data
| (addr
<< CRQB_CMD_ADDR_SHIFT
) | CRQB_CMD_CS
|
1066 (last
? CRQB_CMD_LAST
: 0);
1067 *cmdw
= cpu_to_le16(tmp
);
1071 * mv_qc_prep - Host specific command preparation.
1072 * @qc: queued command to prepare
1074 * This routine simply redirects to the general purpose routine
1075 * if command is not DMA. Else, it handles prep of the CRQB
1076 * (command request block), does some sanity checking, and calls
1077 * the SG load routine.
1080 * Inherited from caller.
1082 static void mv_qc_prep(struct ata_queued_cmd
*qc
)
1084 struct ata_port
*ap
= qc
->ap
;
1085 struct mv_port_priv
*pp
= ap
->private_data
;
1087 struct ata_taskfile
*tf
;
1091 if (ATA_PROT_DMA
!= qc
->tf
.protocol
)
1094 /* Fill in command request block
1096 if (!(qc
->tf
.flags
& ATA_TFLAG_WRITE
))
1097 flags
|= CRQB_FLAG_READ
;
1098 WARN_ON(MV_MAX_Q_DEPTH
<= qc
->tag
);
1099 flags
|= qc
->tag
<< CRQB_TAG_SHIFT
;
1101 /* get current queue index from hardware */
1102 in_index
= (readl(mv_ap_base(ap
) + EDMA_REQ_Q_IN_PTR_OFS
)
1103 >> EDMA_REQ_Q_PTR_SHIFT
) & MV_MAX_Q_DEPTH_MASK
;
1105 pp
->crqb
[in_index
].sg_addr
=
1106 cpu_to_le32(pp
->sg_tbl_dma
& 0xffffffff);
1107 pp
->crqb
[in_index
].sg_addr_hi
=
1108 cpu_to_le32((pp
->sg_tbl_dma
>> 16) >> 16);
1109 pp
->crqb
[in_index
].ctrl_flags
= cpu_to_le16(flags
);
1111 cw
= &pp
->crqb
[in_index
].ata_cmd
[0];
1114 /* Sadly, the CRQB cannot accomodate all registers--there are
1115 * only 11 bytes...so we must pick and choose required
1116 * registers based on the command. So, we drop feature and
1117 * hob_feature for [RW] DMA commands, but they are needed for
1118 * NCQ. NCQ will drop hob_nsect.
1120 switch (tf
->command
) {
1122 case ATA_CMD_READ_EXT
:
1124 case ATA_CMD_WRITE_EXT
:
1125 case ATA_CMD_WRITE_FUA_EXT
:
1126 mv_crqb_pack_cmd(cw
++, tf
->hob_nsect
, ATA_REG_NSECT
, 0);
1128 #ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
1129 case ATA_CMD_FPDMA_READ
:
1130 case ATA_CMD_FPDMA_WRITE
:
1131 mv_crqb_pack_cmd(cw
++, tf
->hob_feature
, ATA_REG_FEATURE
, 0);
1132 mv_crqb_pack_cmd(cw
++, tf
->feature
, ATA_REG_FEATURE
, 0);
1134 #endif /* FIXME: remove this line when NCQ added */
1136 /* The only other commands EDMA supports in non-queued and
1137 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1138 * of which are defined/used by Linux. If we get here, this
1139 * driver needs work.
1141 * FIXME: modify libata to give qc_prep a return value and
1142 * return error here.
1144 BUG_ON(tf
->command
);
1147 mv_crqb_pack_cmd(cw
++, tf
->nsect
, ATA_REG_NSECT
, 0);
1148 mv_crqb_pack_cmd(cw
++, tf
->hob_lbal
, ATA_REG_LBAL
, 0);
1149 mv_crqb_pack_cmd(cw
++, tf
->lbal
, ATA_REG_LBAL
, 0);
1150 mv_crqb_pack_cmd(cw
++, tf
->hob_lbam
, ATA_REG_LBAM
, 0);
1151 mv_crqb_pack_cmd(cw
++, tf
->lbam
, ATA_REG_LBAM
, 0);
1152 mv_crqb_pack_cmd(cw
++, tf
->hob_lbah
, ATA_REG_LBAH
, 0);
1153 mv_crqb_pack_cmd(cw
++, tf
->lbah
, ATA_REG_LBAH
, 0);
1154 mv_crqb_pack_cmd(cw
++, tf
->device
, ATA_REG_DEVICE
, 0);
1155 mv_crqb_pack_cmd(cw
++, tf
->command
, ATA_REG_CMD
, 1); /* last */
1157 if (!(qc
->flags
& ATA_QCFLAG_DMAMAP
))
1163 * mv_qc_prep_iie - Host specific command preparation.
1164 * @qc: queued command to prepare
1166 * This routine simply redirects to the general purpose routine
1167 * if command is not DMA. Else, it handles prep of the CRQB
1168 * (command request block), does some sanity checking, and calls
1169 * the SG load routine.
1172 * Inherited from caller.
1174 static void mv_qc_prep_iie(struct ata_queued_cmd
*qc
)
1176 struct ata_port
*ap
= qc
->ap
;
1177 struct mv_port_priv
*pp
= ap
->private_data
;
1178 struct mv_crqb_iie
*crqb
;
1179 struct ata_taskfile
*tf
;
1183 if (ATA_PROT_DMA
!= qc
->tf
.protocol
)
1186 /* Fill in Gen IIE command request block
1188 if (!(qc
->tf
.flags
& ATA_TFLAG_WRITE
))
1189 flags
|= CRQB_FLAG_READ
;
1191 WARN_ON(MV_MAX_Q_DEPTH
<= qc
->tag
);
1192 flags
|= qc
->tag
<< CRQB_TAG_SHIFT
;
1194 /* get current queue index from hardware */
1195 in_index
= (readl(mv_ap_base(ap
) + EDMA_REQ_Q_IN_PTR_OFS
)
1196 >> EDMA_REQ_Q_PTR_SHIFT
) & MV_MAX_Q_DEPTH_MASK
;
1198 crqb
= (struct mv_crqb_iie
*) &pp
->crqb
[in_index
];
1199 crqb
->addr
= cpu_to_le32(pp
->sg_tbl_dma
& 0xffffffff);
1200 crqb
->addr_hi
= cpu_to_le32((pp
->sg_tbl_dma
>> 16) >> 16);
1201 crqb
->flags
= cpu_to_le32(flags
);
1204 crqb
->ata_cmd
[0] = cpu_to_le32(
1205 (tf
->command
<< 16) |
1208 crqb
->ata_cmd
[1] = cpu_to_le32(
1214 crqb
->ata_cmd
[2] = cpu_to_le32(
1215 (tf
->hob_lbal
<< 0) |
1216 (tf
->hob_lbam
<< 8) |
1217 (tf
->hob_lbah
<< 16) |
1218 (tf
->hob_feature
<< 24)
1220 crqb
->ata_cmd
[3] = cpu_to_le32(
1222 (tf
->hob_nsect
<< 8)
1225 if (!(qc
->flags
& ATA_QCFLAG_DMAMAP
))
1231 * mv_qc_issue - Initiate a command to the host
1232 * @qc: queued command to start
1234 * This routine simply redirects to the general purpose routine
1235 * if command is not DMA. Else, it sanity checks our local
1236 * caches of the request producer/consumer indices then enables
1237 * DMA and bumps the request producer index.
1240 * Inherited from caller.
1242 static unsigned int mv_qc_issue(struct ata_queued_cmd
*qc
)
1244 void __iomem
*port_mmio
= mv_ap_base(qc
->ap
);
1245 struct mv_port_priv
*pp
= qc
->ap
->private_data
;
1249 if (ATA_PROT_DMA
!= qc
->tf
.protocol
) {
1250 /* We're about to send a non-EDMA capable command to the
1251 * port. Turn off EDMA so there won't be problems accessing
1252 * shadow block, etc registers.
1254 mv_stop_dma(qc
->ap
);
1255 return ata_qc_issue_prot(qc
);
1258 in_ptr
= readl(port_mmio
+ EDMA_REQ_Q_IN_PTR_OFS
);
1259 in_index
= (in_ptr
>> EDMA_REQ_Q_PTR_SHIFT
) & MV_MAX_Q_DEPTH_MASK
;
1261 /* until we do queuing, the queue should be empty at this point */
1262 WARN_ON(in_index
!= ((readl(port_mmio
+ EDMA_REQ_Q_OUT_PTR_OFS
)
1263 >> EDMA_REQ_Q_PTR_SHIFT
) & MV_MAX_Q_DEPTH_MASK
));
1265 in_index
= mv_inc_q_index(in_index
); /* now incr producer index */
1267 mv_start_dma(port_mmio
, pp
);
1269 /* and write the request in pointer to kick the EDMA to life */
1270 in_ptr
&= EDMA_REQ_Q_BASE_LO_MASK
;
1271 in_ptr
|= in_index
<< EDMA_REQ_Q_PTR_SHIFT
;
1272 writelfl(in_ptr
, port_mmio
+ EDMA_REQ_Q_IN_PTR_OFS
);
1278 * mv_get_crpb_status - get status from most recently completed cmd
1279 * @ap: ATA channel to manipulate
1281 * This routine is for use when the port is in DMA mode, when it
1282 * will be using the CRPB (command response block) method of
1283 * returning command completion information. We check indices
1284 * are good, grab status, and bump the response consumer index to
1285 * prove that we're up to date.
1288 * Inherited from caller.
1290 static u8
mv_get_crpb_status(struct ata_port
*ap
)
1292 void __iomem
*port_mmio
= mv_ap_base(ap
);
1293 struct mv_port_priv
*pp
= ap
->private_data
;
1298 out_ptr
= readl(port_mmio
+ EDMA_RSP_Q_OUT_PTR_OFS
);
1299 out_index
= (out_ptr
>> EDMA_RSP_Q_PTR_SHIFT
) & MV_MAX_Q_DEPTH_MASK
;
1301 ata_status
= le16_to_cpu(pp
->crpb
[out_index
].flags
)
1302 >> CRPB_FLAG_STATUS_SHIFT
;
1304 /* increment our consumer index... */
1305 out_index
= mv_inc_q_index(out_index
);
1307 /* and, until we do NCQ, there should only be 1 CRPB waiting */
1308 WARN_ON(out_index
!= ((readl(port_mmio
+ EDMA_RSP_Q_IN_PTR_OFS
)
1309 >> EDMA_RSP_Q_PTR_SHIFT
) & MV_MAX_Q_DEPTH_MASK
));
1311 /* write out our inc'd consumer index so EDMA knows we're caught up */
1312 out_ptr
&= EDMA_RSP_Q_BASE_LO_MASK
;
1313 out_ptr
|= out_index
<< EDMA_RSP_Q_PTR_SHIFT
;
1314 writelfl(out_ptr
, port_mmio
+ EDMA_RSP_Q_OUT_PTR_OFS
);
1316 /* Return ATA status register for completed CRPB */
1321 * mv_err_intr - Handle error interrupts on the port
1322 * @ap: ATA channel to manipulate
1323 * @reset_allowed: bool: 0 == don't trigger from reset here
1325 * In most cases, just clear the interrupt and move on. However,
1326 * some cases require an eDMA reset, which is done right before
1327 * the COMRESET in mv_phy_reset(). The SERR case requires a
1328 * clear of pending errors in the SATA SERROR register. Finally,
1329 * if the port disabled DMA, update our cached copy to match.
1332 * Inherited from caller.
1334 static void mv_err_intr(struct ata_port
*ap
, int reset_allowed
)
1336 void __iomem
*port_mmio
= mv_ap_base(ap
);
1337 u32 edma_err_cause
, serr
= 0;
1339 edma_err_cause
= readl(port_mmio
+ EDMA_ERR_IRQ_CAUSE_OFS
);
1341 if (EDMA_ERR_SERR
& edma_err_cause
) {
1342 sata_scr_read(ap
, SCR_ERROR
, &serr
);
1343 sata_scr_write_flush(ap
, SCR_ERROR
, serr
);
1345 if (EDMA_ERR_SELF_DIS
& edma_err_cause
) {
1346 struct mv_port_priv
*pp
= ap
->private_data
;
1347 pp
->pp_flags
&= ~MV_PP_FLAG_EDMA_EN
;
1349 DPRINTK(KERN_ERR
"ata%u: port error; EDMA err cause: 0x%08x "
1350 "SERR: 0x%08x\n", ap
->print_id
, edma_err_cause
, serr
);
1352 /* Clear EDMA now that SERR cleanup done */
1353 writelfl(0, port_mmio
+ EDMA_ERR_IRQ_CAUSE_OFS
);
1355 /* check for fatal here and recover if needed */
1356 if (reset_allowed
&& (EDMA_ERR_FATAL
& edma_err_cause
))
1357 mv_stop_and_reset(ap
);
1361 * mv_host_intr - Handle all interrupts on the given host controller
1362 * @host: host specific structure
1363 * @relevant: port error bits relevant to this host controller
1364 * @hc: which host controller we're to look at
1366 * Read then write clear the HC interrupt status then walk each
1367 * port connected to the HC and see if it needs servicing. Port
1368 * success ints are reported in the HC interrupt status reg, the
1369 * port error ints are reported in the higher level main
1370 * interrupt status register and thus are passed in via the
1371 * 'relevant' argument.
1374 * Inherited from caller.
1376 static void mv_host_intr(struct ata_host
*host
, u32 relevant
, unsigned int hc
)
1378 void __iomem
*mmio
= host
->iomap
[MV_PRIMARY_BAR
];
1379 void __iomem
*hc_mmio
= mv_hc_base(mmio
, hc
);
1380 struct ata_queued_cmd
*qc
;
1382 int shift
, port
, port0
, hard_port
, handled
;
1383 unsigned int err_mask
;
1388 port0
= MV_PORTS_PER_HC
;
1390 /* we'll need the HC success int register in most cases */
1391 hc_irq_cause
= readl(hc_mmio
+ HC_IRQ_CAUSE_OFS
);
1393 writelfl(~hc_irq_cause
, hc_mmio
+ HC_IRQ_CAUSE_OFS
);
1395 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1396 hc
,relevant
,hc_irq_cause
);
1398 for (port
= port0
; port
< port0
+ MV_PORTS_PER_HC
; port
++) {
1400 struct ata_port
*ap
= host
->ports
[port
];
1401 struct mv_port_priv
*pp
= ap
->private_data
;
1403 hard_port
= mv_hardport_from_port(port
); /* range 0..3 */
1404 handled
= 0; /* ensure ata_status is set if handled++ */
1406 /* Note that DEV_IRQ might happen spuriously during EDMA,
1407 * and should be ignored in such cases.
1408 * The cause of this is still under investigation.
1410 if (pp
->pp_flags
& MV_PP_FLAG_EDMA_EN
) {
1411 /* EDMA: check for response queue interrupt */
1412 if ((CRPB_DMA_DONE
<< hard_port
) & hc_irq_cause
) {
1413 ata_status
= mv_get_crpb_status(ap
);
1417 /* PIO: check for device (drive) interrupt */
1418 if ((DEV_IRQ
<< hard_port
) & hc_irq_cause
) {
1419 ata_status
= readb(ap
->ioaddr
.status_addr
);
1421 /* ignore spurious intr if drive still BUSY */
1422 if (ata_status
& ATA_BUSY
) {
1429 if (ap
&& (ap
->flags
& ATA_FLAG_DISABLED
))
1432 err_mask
= ac_err_mask(ata_status
);
1434 shift
= port
<< 1; /* (port * 2) */
1435 if (port
>= MV_PORTS_PER_HC
) {
1436 shift
++; /* skip bit 8 in the HC Main IRQ reg */
1438 if ((PORT0_ERR
<< shift
) & relevant
) {
1440 err_mask
|= AC_ERR_OTHER
;
1445 qc
= ata_qc_from_tag(ap
, ap
->active_tag
);
1446 if (qc
&& (qc
->flags
& ATA_QCFLAG_ACTIVE
)) {
1447 VPRINTK("port %u IRQ found for qc, "
1448 "ata_status 0x%x\n", port
,ata_status
);
1449 /* mark qc status appropriately */
1450 if (!(qc
->tf
.flags
& ATA_TFLAG_POLLING
)) {
1451 qc
->err_mask
|= err_mask
;
1452 ata_qc_complete(qc
);
1463 * @dev_instance: private data; in this case the host structure
1466 * Read the read only register to determine if any host
1467 * controllers have pending interrupts. If so, call lower level
1468 * routine to handle. Also check for PCI errors which are only
1472 * This routine holds the host lock while processing pending
1475 static irqreturn_t
mv_interrupt(int irq
, void *dev_instance
)
1477 struct ata_host
*host
= dev_instance
;
1478 unsigned int hc
, handled
= 0, n_hcs
;
1479 void __iomem
*mmio
= host
->iomap
[MV_PRIMARY_BAR
];
1480 struct mv_host_priv
*hpriv
;
1483 irq_stat
= readl(mmio
+ HC_MAIN_IRQ_CAUSE_OFS
);
1485 /* check the cases where we either have nothing pending or have read
1486 * a bogus register value which can indicate HW removal or PCI fault
1488 if (!irq_stat
|| (0xffffffffU
== irq_stat
))
1491 n_hcs
= mv_get_hc_count(host
->ports
[0]->flags
);
1492 spin_lock(&host
->lock
);
1494 for (hc
= 0; hc
< n_hcs
; hc
++) {
1495 u32 relevant
= irq_stat
& (HC0_IRQ_PEND
<< (hc
* HC_SHIFT
));
1497 mv_host_intr(host
, relevant
, hc
);
1502 hpriv
= host
->private_data
;
1503 if (IS_60XX(hpriv
)) {
1504 /* deal with the interrupt coalescing bits */
1505 if (irq_stat
& (TRAN_LO_DONE
| TRAN_HI_DONE
| PORTS_0_7_COAL_DONE
)) {
1506 writelfl(0, mmio
+ MV_IRQ_COAL_CAUSE_LO
);
1507 writelfl(0, mmio
+ MV_IRQ_COAL_CAUSE_HI
);
1508 writelfl(0, mmio
+ MV_IRQ_COAL_CAUSE
);
1512 if (PCI_ERR
& irq_stat
) {
1513 printk(KERN_ERR DRV_NAME
": PCI ERROR; PCI IRQ cause=0x%08x\n",
1514 readl(mmio
+ PCI_IRQ_CAUSE_OFS
));
1516 DPRINTK("All regs @ PCI error\n");
1517 mv_dump_all_regs(mmio
, -1, to_pci_dev(host
->dev
));
1519 writelfl(0, mmio
+ PCI_IRQ_CAUSE_OFS
);
1522 spin_unlock(&host
->lock
);
1524 return IRQ_RETVAL(handled
);
1527 static void __iomem
*mv5_phy_base(void __iomem
*mmio
, unsigned int port
)
1529 void __iomem
*hc_mmio
= mv_hc_base_from_port(mmio
, port
);
1530 unsigned long ofs
= (mv_hardport_from_port(port
) + 1) * 0x100UL
;
1532 return hc_mmio
+ ofs
;
1535 static unsigned int mv5_scr_offset(unsigned int sc_reg_in
)
1539 switch (sc_reg_in
) {
1543 ofs
= sc_reg_in
* sizeof(u32
);
1552 static u32
mv5_scr_read(struct ata_port
*ap
, unsigned int sc_reg_in
)
1554 void __iomem
*mmio
= ap
->host
->iomap
[MV_PRIMARY_BAR
];
1555 void __iomem
*addr
= mv5_phy_base(mmio
, ap
->port_no
);
1556 unsigned int ofs
= mv5_scr_offset(sc_reg_in
);
1558 if (ofs
!= 0xffffffffU
)
1559 return readl(addr
+ ofs
);
1564 static void mv5_scr_write(struct ata_port
*ap
, unsigned int sc_reg_in
, u32 val
)
1566 void __iomem
*mmio
= ap
->host
->iomap
[MV_PRIMARY_BAR
];
1567 void __iomem
*addr
= mv5_phy_base(mmio
, ap
->port_no
);
1568 unsigned int ofs
= mv5_scr_offset(sc_reg_in
);
1570 if (ofs
!= 0xffffffffU
)
1571 writelfl(val
, addr
+ ofs
);
1574 static void mv5_reset_bus(struct pci_dev
*pdev
, void __iomem
*mmio
)
1579 pci_read_config_byte(pdev
, PCI_REVISION_ID
, &rev_id
);
1581 early_5080
= (pdev
->device
== 0x5080) && (rev_id
== 0);
1584 u32 tmp
= readl(mmio
+ MV_PCI_EXP_ROM_BAR_CTL
);
1586 writel(tmp
, mmio
+ MV_PCI_EXP_ROM_BAR_CTL
);
1589 mv_reset_pci_bus(pdev
, mmio
);
1592 static void mv5_reset_flash(struct mv_host_priv
*hpriv
, void __iomem
*mmio
)
1594 writel(0x0fcfffff, mmio
+ MV_FLASH_CTL
);
1597 static void mv5_read_preamp(struct mv_host_priv
*hpriv
, int idx
,
1600 void __iomem
*phy_mmio
= mv5_phy_base(mmio
, idx
);
1603 tmp
= readl(phy_mmio
+ MV5_PHY_MODE
);
1605 hpriv
->signal
[idx
].pre
= tmp
& 0x1800; /* bits 12:11 */
1606 hpriv
->signal
[idx
].amps
= tmp
& 0xe0; /* bits 7:5 */
1609 static void mv5_enable_leds(struct mv_host_priv
*hpriv
, void __iomem
*mmio
)
1613 writel(0, mmio
+ MV_GPIO_PORT_CTL
);
1615 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1617 tmp
= readl(mmio
+ MV_PCI_EXP_ROM_BAR_CTL
);
1619 writel(tmp
, mmio
+ MV_PCI_EXP_ROM_BAR_CTL
);
1622 static void mv5_phy_errata(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
1625 void __iomem
*phy_mmio
= mv5_phy_base(mmio
, port
);
1626 const u32 mask
= (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1628 int fix_apm_sq
= (hpriv
->hp_flags
& MV_HP_ERRATA_50XXB0
);
1631 tmp
= readl(phy_mmio
+ MV5_LT_MODE
);
1633 writel(tmp
, phy_mmio
+ MV5_LT_MODE
);
1635 tmp
= readl(phy_mmio
+ MV5_PHY_CTL
);
1638 writel(tmp
, phy_mmio
+ MV5_PHY_CTL
);
1641 tmp
= readl(phy_mmio
+ MV5_PHY_MODE
);
1643 tmp
|= hpriv
->signal
[port
].pre
;
1644 tmp
|= hpriv
->signal
[port
].amps
;
1645 writel(tmp
, phy_mmio
+ MV5_PHY_MODE
);
1650 #define ZERO(reg) writel(0, port_mmio + (reg))
1651 static void mv5_reset_hc_port(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
1654 void __iomem
*port_mmio
= mv_port_base(mmio
, port
);
1656 writelfl(EDMA_DS
, port_mmio
+ EDMA_CMD_OFS
);
1658 mv_channel_reset(hpriv
, mmio
, port
);
1660 ZERO(0x028); /* command */
1661 writel(0x11f, port_mmio
+ EDMA_CFG_OFS
);
1662 ZERO(0x004); /* timer */
1663 ZERO(0x008); /* irq err cause */
1664 ZERO(0x00c); /* irq err mask */
1665 ZERO(0x010); /* rq bah */
1666 ZERO(0x014); /* rq inp */
1667 ZERO(0x018); /* rq outp */
1668 ZERO(0x01c); /* respq bah */
1669 ZERO(0x024); /* respq outp */
1670 ZERO(0x020); /* respq inp */
1671 ZERO(0x02c); /* test control */
1672 writel(0xbc, port_mmio
+ EDMA_IORDY_TMOUT
);
1676 #define ZERO(reg) writel(0, hc_mmio + (reg))
1677 static void mv5_reset_one_hc(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
1680 void __iomem
*hc_mmio
= mv_hc_base(mmio
, hc
);
1688 tmp
= readl(hc_mmio
+ 0x20);
1691 writel(tmp
, hc_mmio
+ 0x20);
1695 static int mv5_reset_hc(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
1698 unsigned int hc
, port
;
1700 for (hc
= 0; hc
< n_hc
; hc
++) {
1701 for (port
= 0; port
< MV_PORTS_PER_HC
; port
++)
1702 mv5_reset_hc_port(hpriv
, mmio
,
1703 (hc
* MV_PORTS_PER_HC
) + port
);
1705 mv5_reset_one_hc(hpriv
, mmio
, hc
);
1712 #define ZERO(reg) writel(0, mmio + (reg))
1713 static void mv_reset_pci_bus(struct pci_dev
*pdev
, void __iomem
*mmio
)
1717 tmp
= readl(mmio
+ MV_PCI_MODE
);
1719 writel(tmp
, mmio
+ MV_PCI_MODE
);
1721 ZERO(MV_PCI_DISC_TIMER
);
1722 ZERO(MV_PCI_MSI_TRIGGER
);
1723 writel(0x000100ff, mmio
+ MV_PCI_XBAR_TMOUT
);
1724 ZERO(HC_MAIN_IRQ_MASK_OFS
);
1725 ZERO(MV_PCI_SERR_MASK
);
1726 ZERO(PCI_IRQ_CAUSE_OFS
);
1727 ZERO(PCI_IRQ_MASK_OFS
);
1728 ZERO(MV_PCI_ERR_LOW_ADDRESS
);
1729 ZERO(MV_PCI_ERR_HIGH_ADDRESS
);
1730 ZERO(MV_PCI_ERR_ATTRIBUTE
);
1731 ZERO(MV_PCI_ERR_COMMAND
);
1735 static void mv6_reset_flash(struct mv_host_priv
*hpriv
, void __iomem
*mmio
)
1739 mv5_reset_flash(hpriv
, mmio
);
1741 tmp
= readl(mmio
+ MV_GPIO_PORT_CTL
);
1743 tmp
|= (1 << 5) | (1 << 6);
1744 writel(tmp
, mmio
+ MV_GPIO_PORT_CTL
);
1748 * mv6_reset_hc - Perform the 6xxx global soft reset
1749 * @mmio: base address of the HBA
1751 * This routine only applies to 6xxx parts.
1754 * Inherited from caller.
1756 static int mv6_reset_hc(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
1759 void __iomem
*reg
= mmio
+ PCI_MAIN_CMD_STS_OFS
;
1763 /* Following procedure defined in PCI "main command and status
1767 writel(t
| STOP_PCI_MASTER
, reg
);
1769 for (i
= 0; i
< 1000; i
++) {
1772 if (PCI_MASTER_EMPTY
& t
) {
1776 if (!(PCI_MASTER_EMPTY
& t
)) {
1777 printk(KERN_ERR DRV_NAME
": PCI master won't flush\n");
1785 writel(t
| GLOB_SFT_RST
, reg
);
1788 } while (!(GLOB_SFT_RST
& t
) && (i
-- > 0));
1790 if (!(GLOB_SFT_RST
& t
)) {
1791 printk(KERN_ERR DRV_NAME
": can't set global reset\n");
1796 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
1799 writel(t
& ~(GLOB_SFT_RST
| STOP_PCI_MASTER
), reg
);
1802 } while ((GLOB_SFT_RST
& t
) && (i
-- > 0));
1804 if (GLOB_SFT_RST
& t
) {
1805 printk(KERN_ERR DRV_NAME
": can't clear global reset\n");
1812 static void mv6_read_preamp(struct mv_host_priv
*hpriv
, int idx
,
1815 void __iomem
*port_mmio
;
1818 tmp
= readl(mmio
+ MV_RESET_CFG
);
1819 if ((tmp
& (1 << 0)) == 0) {
1820 hpriv
->signal
[idx
].amps
= 0x7 << 8;
1821 hpriv
->signal
[idx
].pre
= 0x1 << 5;
1825 port_mmio
= mv_port_base(mmio
, idx
);
1826 tmp
= readl(port_mmio
+ PHY_MODE2
);
1828 hpriv
->signal
[idx
].amps
= tmp
& 0x700; /* bits 10:8 */
1829 hpriv
->signal
[idx
].pre
= tmp
& 0xe0; /* bits 7:5 */
1832 static void mv6_enable_leds(struct mv_host_priv
*hpriv
, void __iomem
*mmio
)
1834 writel(0x00000060, mmio
+ MV_GPIO_PORT_CTL
);
1837 static void mv6_phy_errata(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
1840 void __iomem
*port_mmio
= mv_port_base(mmio
, port
);
1842 u32 hp_flags
= hpriv
->hp_flags
;
1844 hp_flags
& (MV_HP_ERRATA_60X1B2
| MV_HP_ERRATA_60X1C0
);
1846 hp_flags
& (MV_HP_ERRATA_60X1B2
| MV_HP_ERRATA_60X1C0
);
1849 if (fix_phy_mode2
) {
1850 m2
= readl(port_mmio
+ PHY_MODE2
);
1853 writel(m2
, port_mmio
+ PHY_MODE2
);
1857 m2
= readl(port_mmio
+ PHY_MODE2
);
1858 m2
&= ~((1 << 16) | (1 << 31));
1859 writel(m2
, port_mmio
+ PHY_MODE2
);
1864 /* who knows what this magic does */
1865 tmp
= readl(port_mmio
+ PHY_MODE3
);
1868 writel(tmp
, port_mmio
+ PHY_MODE3
);
1870 if (fix_phy_mode4
) {
1873 m4
= readl(port_mmio
+ PHY_MODE4
);
1875 if (hp_flags
& MV_HP_ERRATA_60X1B2
)
1876 tmp
= readl(port_mmio
+ 0x310);
1878 m4
= (m4
& ~(1 << 1)) | (1 << 0);
1880 writel(m4
, port_mmio
+ PHY_MODE4
);
1882 if (hp_flags
& MV_HP_ERRATA_60X1B2
)
1883 writel(tmp
, port_mmio
+ 0x310);
1886 /* Revert values of pre-emphasis and signal amps to the saved ones */
1887 m2
= readl(port_mmio
+ PHY_MODE2
);
1889 m2
&= ~MV_M2_PREAMP_MASK
;
1890 m2
|= hpriv
->signal
[port
].amps
;
1891 m2
|= hpriv
->signal
[port
].pre
;
1894 /* according to mvSata 3.6.1, some IIE values are fixed */
1895 if (IS_GEN_IIE(hpriv
)) {
1900 writel(m2
, port_mmio
+ PHY_MODE2
);
1903 static void mv_channel_reset(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
1904 unsigned int port_no
)
1906 void __iomem
*port_mmio
= mv_port_base(mmio
, port_no
);
1908 writelfl(ATA_RST
, port_mmio
+ EDMA_CMD_OFS
);
1910 if (IS_60XX(hpriv
)) {
1911 u32 ifctl
= readl(port_mmio
+ SATA_INTERFACE_CTL
);
1912 ifctl
|= (1 << 7); /* enable gen2i speed */
1913 ifctl
= (ifctl
& 0xfff) | 0x9b1000; /* from chip spec */
1914 writelfl(ifctl
, port_mmio
+ SATA_INTERFACE_CTL
);
1917 udelay(25); /* allow reset propagation */
1919 /* Spec never mentions clearing the bit. Marvell's driver does
1920 * clear the bit, however.
1922 writelfl(0, port_mmio
+ EDMA_CMD_OFS
);
1924 hpriv
->ops
->phy_errata(hpriv
, mmio
, port_no
);
1930 static void mv_stop_and_reset(struct ata_port
*ap
)
1932 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
1933 void __iomem
*mmio
= ap
->host
->iomap
[MV_PRIMARY_BAR
];
1937 mv_channel_reset(hpriv
, mmio
, ap
->port_no
);
1939 __mv_phy_reset(ap
, 0);
1942 static inline void __msleep(unsigned int msec
, int can_sleep
)
1951 * __mv_phy_reset - Perform eDMA reset followed by COMRESET
1952 * @ap: ATA channel to manipulate
1954 * Part of this is taken from __sata_phy_reset and modified to
1955 * not sleep since this routine gets called from interrupt level.
1958 * Inherited from caller. This is coded to safe to call at
1959 * interrupt level, i.e. it does not sleep.
1961 static void __mv_phy_reset(struct ata_port
*ap
, int can_sleep
)
1963 struct mv_port_priv
*pp
= ap
->private_data
;
1964 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
1965 void __iomem
*port_mmio
= mv_ap_base(ap
);
1966 struct ata_taskfile tf
;
1967 struct ata_device
*dev
= &ap
->device
[0];
1968 unsigned long timeout
;
1972 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap
->port_no
, port_mmio
);
1974 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
1975 "SCtrl 0x%08x\n", mv_scr_read(ap
, SCR_STATUS
),
1976 mv_scr_read(ap
, SCR_ERROR
), mv_scr_read(ap
, SCR_CONTROL
));
1978 /* Issue COMRESET via SControl */
1980 sata_scr_write_flush(ap
, SCR_CONTROL
, 0x301);
1981 __msleep(1, can_sleep
);
1983 sata_scr_write_flush(ap
, SCR_CONTROL
, 0x300);
1984 __msleep(20, can_sleep
);
1986 timeout
= jiffies
+ msecs_to_jiffies(200);
1988 sata_scr_read(ap
, SCR_STATUS
, &sstatus
);
1989 if (((sstatus
& 0x3) == 3) || ((sstatus
& 0x3) == 0))
1992 __msleep(1, can_sleep
);
1993 } while (time_before(jiffies
, timeout
));
1995 /* work around errata */
1996 if (IS_60XX(hpriv
) &&
1997 (sstatus
!= 0x0) && (sstatus
!= 0x113) && (sstatus
!= 0x123) &&
1999 goto comreset_retry
;
2001 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2002 "SCtrl 0x%08x\n", mv_scr_read(ap
, SCR_STATUS
),
2003 mv_scr_read(ap
, SCR_ERROR
), mv_scr_read(ap
, SCR_CONTROL
));
2005 if (ata_port_online(ap
)) {
2008 sata_scr_read(ap
, SCR_STATUS
, &sstatus
);
2009 ata_port_printk(ap
, KERN_INFO
,
2010 "no device found (phy stat %08x)\n", sstatus
);
2011 ata_port_disable(ap
);
2015 /* even after SStatus reflects that device is ready,
2016 * it seems to take a while for link to be fully
2017 * established (and thus Status no longer 0x80/0x7F),
2018 * so we poll a bit for that, here.
2022 u8 drv_stat
= ata_check_status(ap
);
2023 if ((drv_stat
!= 0x80) && (drv_stat
!= 0x7f))
2025 __msleep(500, can_sleep
);
2030 tf
.lbah
= readb(ap
->ioaddr
.lbah_addr
);
2031 tf
.lbam
= readb(ap
->ioaddr
.lbam_addr
);
2032 tf
.lbal
= readb(ap
->ioaddr
.lbal_addr
);
2033 tf
.nsect
= readb(ap
->ioaddr
.nsect_addr
);
2035 dev
->class = ata_dev_classify(&tf
);
2036 if (!ata_dev_enabled(dev
)) {
2037 VPRINTK("Port disabled post-sig: No device present.\n");
2038 ata_port_disable(ap
);
2041 writelfl(0, port_mmio
+ EDMA_ERR_IRQ_CAUSE_OFS
);
2043 pp
->pp_flags
&= ~MV_PP_FLAG_EDMA_EN
;
2048 static void mv_phy_reset(struct ata_port
*ap
)
2050 __mv_phy_reset(ap
, 1);
2054 * mv_eng_timeout - Routine called by libata when SCSI times out I/O
2055 * @ap: ATA channel to manipulate
2057 * Intent is to clear all pending error conditions, reset the
2058 * chip/bus, fail the command, and move on.
2061 * This routine holds the host lock while failing the command.
2063 static void mv_eng_timeout(struct ata_port
*ap
)
2065 void __iomem
*mmio
= ap
->host
->iomap
[MV_PRIMARY_BAR
];
2066 struct ata_queued_cmd
*qc
;
2067 unsigned long flags
;
2069 ata_port_printk(ap
, KERN_ERR
, "Entering mv_eng_timeout\n");
2070 DPRINTK("All regs @ start of eng_timeout\n");
2071 mv_dump_all_regs(mmio
, ap
->port_no
, to_pci_dev(ap
->host
->dev
));
2073 qc
= ata_qc_from_tag(ap
, ap
->active_tag
);
2074 printk(KERN_ERR
"mmio_base %p ap %p qc %p scsi_cmnd %p &cmnd %p\n",
2075 mmio
, ap
, qc
, qc
->scsicmd
, &qc
->scsicmd
->cmnd
);
2077 spin_lock_irqsave(&ap
->host
->lock
, flags
);
2079 mv_stop_and_reset(ap
);
2080 spin_unlock_irqrestore(&ap
->host
->lock
, flags
);
2082 WARN_ON(!(qc
->flags
& ATA_QCFLAG_ACTIVE
));
2083 if (qc
->flags
& ATA_QCFLAG_ACTIVE
) {
2084 qc
->err_mask
|= AC_ERR_TIMEOUT
;
2085 ata_eh_qc_complete(qc
);
2090 * mv_port_init - Perform some early initialization on a single port.
2091 * @port: libata data structure storing shadow register addresses
2092 * @port_mmio: base address of the port
2094 * Initialize shadow register mmio addresses, clear outstanding
2095 * interrupts on the port, and unmask interrupts for the future
2096 * start of the port.
2099 * Inherited from caller.
2101 static void mv_port_init(struct ata_ioports
*port
, void __iomem
*port_mmio
)
2103 void __iomem
*shd_base
= port_mmio
+ SHD_BLK_OFS
;
2106 /* PIO related setup
2108 port
->data_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_DATA
);
2110 port
->feature_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_ERR
);
2111 port
->nsect_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_NSECT
);
2112 port
->lbal_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_LBAL
);
2113 port
->lbam_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_LBAM
);
2114 port
->lbah_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_LBAH
);
2115 port
->device_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_DEVICE
);
2117 port
->command_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_STATUS
);
2118 /* special case: control/altstatus doesn't have ATA_REG_ address */
2119 port
->altstatus_addr
= port
->ctl_addr
= shd_base
+ SHD_CTL_AST_OFS
;
2122 port
->cmd_addr
= port
->bmdma_addr
= port
->scr_addr
= NULL
;
2124 /* Clear any currently outstanding port interrupt conditions */
2125 serr_ofs
= mv_scr_offset(SCR_ERROR
);
2126 writelfl(readl(port_mmio
+ serr_ofs
), port_mmio
+ serr_ofs
);
2127 writelfl(0, port_mmio
+ EDMA_ERR_IRQ_CAUSE_OFS
);
2129 /* unmask all EDMA error interrupts */
2130 writelfl(~0, port_mmio
+ EDMA_ERR_IRQ_MASK_OFS
);
2132 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2133 readl(port_mmio
+ EDMA_CFG_OFS
),
2134 readl(port_mmio
+ EDMA_ERR_IRQ_CAUSE_OFS
),
2135 readl(port_mmio
+ EDMA_ERR_IRQ_MASK_OFS
));
2138 static int mv_chip_id(struct ata_host
*host
, unsigned int board_idx
)
2140 struct pci_dev
*pdev
= to_pci_dev(host
->dev
);
2141 struct mv_host_priv
*hpriv
= host
->private_data
;
2143 u32 hp_flags
= hpriv
->hp_flags
;
2145 pci_read_config_byte(pdev
, PCI_REVISION_ID
, &rev_id
);
2149 hpriv
->ops
= &mv5xxx_ops
;
2150 hp_flags
|= MV_HP_50XX
;
2154 hp_flags
|= MV_HP_ERRATA_50XXB0
;
2157 hp_flags
|= MV_HP_ERRATA_50XXB2
;
2160 dev_printk(KERN_WARNING
, &pdev
->dev
,
2161 "Applying 50XXB2 workarounds to unknown rev\n");
2162 hp_flags
|= MV_HP_ERRATA_50XXB2
;
2169 hpriv
->ops
= &mv5xxx_ops
;
2170 hp_flags
|= MV_HP_50XX
;
2174 hp_flags
|= MV_HP_ERRATA_50XXB0
;
2177 hp_flags
|= MV_HP_ERRATA_50XXB2
;
2180 dev_printk(KERN_WARNING
, &pdev
->dev
,
2181 "Applying B2 workarounds to unknown rev\n");
2182 hp_flags
|= MV_HP_ERRATA_50XXB2
;
2189 hpriv
->ops
= &mv6xxx_ops
;
2193 hp_flags
|= MV_HP_ERRATA_60X1B2
;
2196 hp_flags
|= MV_HP_ERRATA_60X1C0
;
2199 dev_printk(KERN_WARNING
, &pdev
->dev
,
2200 "Applying B2 workarounds to unknown rev\n");
2201 hp_flags
|= MV_HP_ERRATA_60X1B2
;
2208 hpriv
->ops
= &mv6xxx_ops
;
2210 hp_flags
|= MV_HP_GEN_IIE
;
2214 hp_flags
|= MV_HP_ERRATA_XX42A0
;
2217 hp_flags
|= MV_HP_ERRATA_60X1C0
;
2220 dev_printk(KERN_WARNING
, &pdev
->dev
,
2221 "Applying 60X1C0 workarounds to unknown rev\n");
2222 hp_flags
|= MV_HP_ERRATA_60X1C0
;
2228 printk(KERN_ERR DRV_NAME
": BUG: invalid board index %u\n", board_idx
);
2232 hpriv
->hp_flags
= hp_flags
;
2238 * mv_init_host - Perform some early initialization of the host.
2239 * @host: ATA host to initialize
2240 * @board_idx: controller index
2242 * If possible, do an early global reset of the host. Then do
2243 * our port init and clear/unmask all/relevant host interrupts.
2246 * Inherited from caller.
2248 static int mv_init_host(struct ata_host
*host
, unsigned int board_idx
)
2250 int rc
= 0, n_hc
, port
, hc
;
2251 struct pci_dev
*pdev
= to_pci_dev(host
->dev
);
2252 void __iomem
*mmio
= host
->iomap
[MV_PRIMARY_BAR
];
2253 struct mv_host_priv
*hpriv
= host
->private_data
;
2255 /* global interrupt mask */
2256 writel(0, mmio
+ HC_MAIN_IRQ_MASK_OFS
);
2258 rc
= mv_chip_id(host
, board_idx
);
2262 n_hc
= mv_get_hc_count(host
->ports
[0]->flags
);
2264 for (port
= 0; port
< host
->n_ports
; port
++)
2265 hpriv
->ops
->read_preamp(hpriv
, port
, mmio
);
2267 rc
= hpriv
->ops
->reset_hc(hpriv
, mmio
, n_hc
);
2271 hpriv
->ops
->reset_flash(hpriv
, mmio
);
2272 hpriv
->ops
->reset_bus(pdev
, mmio
);
2273 hpriv
->ops
->enable_leds(hpriv
, mmio
);
2275 for (port
= 0; port
< host
->n_ports
; port
++) {
2276 if (IS_60XX(hpriv
)) {
2277 void __iomem
*port_mmio
= mv_port_base(mmio
, port
);
2279 u32 ifctl
= readl(port_mmio
+ SATA_INTERFACE_CTL
);
2280 ifctl
|= (1 << 7); /* enable gen2i speed */
2281 ifctl
= (ifctl
& 0xfff) | 0x9b1000; /* from chip spec */
2282 writelfl(ifctl
, port_mmio
+ SATA_INTERFACE_CTL
);
2285 hpriv
->ops
->phy_errata(hpriv
, mmio
, port
);
2288 for (port
= 0; port
< host
->n_ports
; port
++) {
2289 void __iomem
*port_mmio
= mv_port_base(mmio
, port
);
2290 mv_port_init(&host
->ports
[port
]->ioaddr
, port_mmio
);
2293 for (hc
= 0; hc
< n_hc
; hc
++) {
2294 void __iomem
*hc_mmio
= mv_hc_base(mmio
, hc
);
2296 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2297 "(before clear)=0x%08x\n", hc
,
2298 readl(hc_mmio
+ HC_CFG_OFS
),
2299 readl(hc_mmio
+ HC_IRQ_CAUSE_OFS
));
2301 /* Clear any currently outstanding hc interrupt conditions */
2302 writelfl(0, hc_mmio
+ HC_IRQ_CAUSE_OFS
);
2305 /* Clear any currently outstanding host interrupt conditions */
2306 writelfl(0, mmio
+ PCI_IRQ_CAUSE_OFS
);
2308 /* and unmask interrupt generation for host regs */
2309 writelfl(PCI_UNMASK_ALL_IRQS
, mmio
+ PCI_IRQ_MASK_OFS
);
2312 writelfl(~HC_MAIN_MASKED_IRQS_5
, mmio
+ HC_MAIN_IRQ_MASK_OFS
);
2314 writelfl(~HC_MAIN_MASKED_IRQS
, mmio
+ HC_MAIN_IRQ_MASK_OFS
);
2316 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2317 "PCI int cause/mask=0x%08x/0x%08x\n",
2318 readl(mmio
+ HC_MAIN_IRQ_CAUSE_OFS
),
2319 readl(mmio
+ HC_MAIN_IRQ_MASK_OFS
),
2320 readl(mmio
+ PCI_IRQ_CAUSE_OFS
),
2321 readl(mmio
+ PCI_IRQ_MASK_OFS
));
2328 * mv_print_info - Dump key info to kernel log for perusal.
2329 * @host: ATA host to print info about
2331 * FIXME: complete this.
2334 * Inherited from caller.
2336 static void mv_print_info(struct ata_host
*host
)
2338 struct pci_dev
*pdev
= to_pci_dev(host
->dev
);
2339 struct mv_host_priv
*hpriv
= host
->private_data
;
2343 /* Use this to determine the HW stepping of the chip so we know
2344 * what errata to workaround
2346 pci_read_config_byte(pdev
, PCI_REVISION_ID
, &rev_id
);
2348 pci_read_config_byte(pdev
, PCI_CLASS_DEVICE
, &scc
);
2351 else if (scc
== 0x01)
2356 dev_printk(KERN_INFO
, &pdev
->dev
,
2357 "%u slots %u ports %s mode IRQ via %s\n",
2358 (unsigned)MV_MAX_Q_DEPTH
, host
->n_ports
,
2359 scc_s
, (MV_HP_FLAG_MSI
& hpriv
->hp_flags
) ? "MSI" : "INTx");
2363 * mv_init_one - handle a positive probe of a Marvell host
2364 * @pdev: PCI device found
2365 * @ent: PCI device ID entry for the matched host
2368 * Inherited from caller.
2370 static int mv_init_one(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
2372 static int printed_version
= 0;
2373 unsigned int board_idx
= (unsigned int)ent
->driver_data
;
2374 const struct ata_port_info
*ppi
[] = { &mv_port_info
[board_idx
], NULL
};
2375 struct ata_host
*host
;
2376 struct mv_host_priv
*hpriv
;
2379 if (!printed_version
++)
2380 dev_printk(KERN_INFO
, &pdev
->dev
, "version " DRV_VERSION
"\n");
2383 n_ports
= mv_get_hc_count(ppi
[0]->flags
) * MV_PORTS_PER_HC
;
2385 host
= ata_host_alloc_pinfo(&pdev
->dev
, ppi
, n_ports
);
2386 hpriv
= devm_kzalloc(&pdev
->dev
, sizeof(*hpriv
), GFP_KERNEL
);
2387 if (!host
|| !hpriv
)
2389 host
->private_data
= hpriv
;
2391 /* acquire resources */
2392 rc
= pcim_enable_device(pdev
);
2396 rc
= pcim_iomap_regions(pdev
, 1 << MV_PRIMARY_BAR
, DRV_NAME
);
2398 pcim_pin_device(pdev
);
2401 host
->iomap
= pcim_iomap_table(pdev
);
2403 rc
= pci_go_64(pdev
);
2407 /* initialize adapter */
2408 rc
= mv_init_host(host
, board_idx
);
2412 /* Enable interrupts */
2413 if (msi
&& pci_enable_msi(pdev
))
2416 mv_dump_pci_cfg(pdev
, 0x68);
2417 mv_print_info(host
);
2419 pci_set_master(pdev
);
2420 return ata_host_activate(host
, pdev
->irq
, mv_interrupt
, IRQF_SHARED
,
2424 static int __init
mv_init(void)
2426 return pci_register_driver(&mv_pci_driver
);
2429 static void __exit
mv_exit(void)
2431 pci_unregister_driver(&mv_pci_driver
);
2434 MODULE_AUTHOR("Brett Russ");
2435 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2436 MODULE_LICENSE("GPL");
2437 MODULE_DEVICE_TABLE(pci
, mv_pci_tbl
);
2438 MODULE_VERSION(DRV_VERSION
);
2440 module_param(msi
, int, 0444);
2441 MODULE_PARM_DESC(msi
, "Enable use of PCI MSI (0=off, 1=on)");
2443 module_init(mv_init
);
2444 module_exit(mv_exit
);