2 * sata_mv.c - Marvell SATA support
4 * Copyright 2008: Marvell Corporation, all rights reserved.
5 * Copyright 2005: EMC Corporation, all rights reserved.
6 * Copyright 2005 Red Hat, Inc. All rights reserved.
8 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; version 2 of the License.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 * --> Errata workaround for NCQ device errors.
30 * --> More errata workarounds for PCI-X.
32 * --> Complete a full errata audit for all chipsets to identify others.
34 * --> ATAPI support (Marvell claims the 60xx/70xx chips can do it).
36 * --> Develop a low-power-consumption strategy, and implement it.
38 * --> [Experiment, low priority] Investigate interrupt coalescing.
39 * Quite often, especially with PCI Message Signalled Interrupts (MSI),
40 * the overhead reduced by interrupt mitigation is quite often not
41 * worth the latency cost.
43 * --> [Experiment, Marvell value added] Is it possible to use target
44 * mode to cross-connect two Linux boxes with Marvell cards? If so,
45 * creating LibATA target mode support would be very interesting.
47 * Target mode, for those without docs, is the ability to directly
48 * connect two SATA ports.
51 #include <linux/kernel.h>
52 #include <linux/module.h>
53 #include <linux/pci.h>
54 #include <linux/init.h>
55 #include <linux/blkdev.h>
56 #include <linux/delay.h>
57 #include <linux/interrupt.h>
58 #include <linux/dmapool.h>
59 #include <linux/dma-mapping.h>
60 #include <linux/device.h>
61 #include <linux/platform_device.h>
62 #include <linux/ata_platform.h>
63 #include <linux/mbus.h>
64 #include <linux/bitops.h>
65 #include <scsi/scsi_host.h>
66 #include <scsi/scsi_cmnd.h>
67 #include <scsi/scsi_device.h>
68 #include <linux/libata.h>
70 #define DRV_NAME "sata_mv"
71 #define DRV_VERSION "1.25"
74 /* BAR's are enumerated in terms of pci_resource_start() terms */
75 MV_PRIMARY_BAR
= 0, /* offset 0x10: memory space */
76 MV_IO_BAR
= 2, /* offset 0x18: IO space */
77 MV_MISC_BAR
= 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
79 MV_MAJOR_REG_AREA_SZ
= 0x10000, /* 64KB */
80 MV_MINOR_REG_AREA_SZ
= 0x2000, /* 8KB */
83 MV_IRQ_COAL_REG_BASE
= 0x18000, /* 6xxx part only */
84 MV_IRQ_COAL_CAUSE
= (MV_IRQ_COAL_REG_BASE
+ 0x08),
85 MV_IRQ_COAL_CAUSE_LO
= (MV_IRQ_COAL_REG_BASE
+ 0x88),
86 MV_IRQ_COAL_CAUSE_HI
= (MV_IRQ_COAL_REG_BASE
+ 0x8c),
87 MV_IRQ_COAL_THRESHOLD
= (MV_IRQ_COAL_REG_BASE
+ 0xcc),
88 MV_IRQ_COAL_TIME_THRESHOLD
= (MV_IRQ_COAL_REG_BASE
+ 0xd0),
90 MV_SATAHC0_REG_BASE
= 0x20000,
91 MV_FLASH_CTL_OFS
= 0x1046c,
92 MV_GPIO_PORT_CTL_OFS
= 0x104f0,
93 MV_RESET_CFG_OFS
= 0x180d8,
95 MV_PCI_REG_SZ
= MV_MAJOR_REG_AREA_SZ
,
96 MV_SATAHC_REG_SZ
= MV_MAJOR_REG_AREA_SZ
,
97 MV_SATAHC_ARBTR_REG_SZ
= MV_MINOR_REG_AREA_SZ
, /* arbiter */
98 MV_PORT_REG_SZ
= MV_MINOR_REG_AREA_SZ
,
101 MV_MAX_Q_DEPTH_MASK
= MV_MAX_Q_DEPTH
- 1,
103 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
104 * CRPB needs alignment on a 256B boundary. Size == 256B
105 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
107 MV_CRQB_Q_SZ
= (32 * MV_MAX_Q_DEPTH
),
108 MV_CRPB_Q_SZ
= (8 * MV_MAX_Q_DEPTH
),
110 MV_SG_TBL_SZ
= (16 * MV_MAX_SG_CT
),
112 /* Determine hc from 0-7 port: hc = port >> MV_PORT_HC_SHIFT */
113 MV_PORT_HC_SHIFT
= 2,
114 MV_PORTS_PER_HC
= (1 << MV_PORT_HC_SHIFT
), /* 4 */
115 /* Determine hc port from 0-7 port: hardport = port & MV_PORT_MASK */
116 MV_PORT_MASK
= (MV_PORTS_PER_HC
- 1), /* 3 */
119 MV_FLAG_DUAL_HC
= (1 << 30), /* two SATA Host Controllers */
120 MV_FLAG_IRQ_COALESCE
= (1 << 29), /* IRQ coalescing capability */
122 MV_COMMON_FLAGS
= ATA_FLAG_SATA
| ATA_FLAG_NO_LEGACY
|
123 ATA_FLAG_MMIO
| ATA_FLAG_PIO_POLLING
,
125 MV_GEN_I_FLAGS
= MV_COMMON_FLAGS
| ATA_FLAG_NO_ATAPI
,
127 MV_GEN_II_FLAGS
= MV_COMMON_FLAGS
| MV_FLAG_IRQ_COALESCE
|
128 ATA_FLAG_PMP
| ATA_FLAG_ACPI_SATA
|
129 ATA_FLAG_NCQ
| ATA_FLAG_NO_ATAPI
,
131 MV_GEN_IIE_FLAGS
= MV_GEN_II_FLAGS
| ATA_FLAG_AN
,
133 CRQB_FLAG_READ
= (1 << 0),
135 CRQB_IOID_SHIFT
= 6, /* CRQB Gen-II/IIE IO Id shift */
136 CRQB_PMP_SHIFT
= 12, /* CRQB Gen-II/IIE PMP shift */
137 CRQB_HOSTQ_SHIFT
= 17, /* CRQB Gen-II/IIE HostQueTag shift */
138 CRQB_CMD_ADDR_SHIFT
= 8,
139 CRQB_CMD_CS
= (0x2 << 11),
140 CRQB_CMD_LAST
= (1 << 15),
142 CRPB_FLAG_STATUS_SHIFT
= 8,
143 CRPB_IOID_SHIFT_6
= 5, /* CRPB Gen-II IO Id shift */
144 CRPB_IOID_SHIFT_7
= 7, /* CRPB Gen-IIE IO Id shift */
146 EPRD_FLAG_END_OF_TBL
= (1 << 31),
148 /* PCI interface registers */
150 PCI_COMMAND_OFS
= 0xc00,
151 PCI_COMMAND_MRDTRIG
= (1 << 7), /* PCI Master Read Trigger */
153 PCI_MAIN_CMD_STS_OFS
= 0xd30,
154 STOP_PCI_MASTER
= (1 << 2),
155 PCI_MASTER_EMPTY
= (1 << 3),
156 GLOB_SFT_RST
= (1 << 4),
158 MV_PCI_MODE_OFS
= 0xd00,
159 MV_PCI_MODE_MASK
= 0x30,
161 MV_PCI_EXP_ROM_BAR_CTL
= 0xd2c,
162 MV_PCI_DISC_TIMER
= 0xd04,
163 MV_PCI_MSI_TRIGGER
= 0xc38,
164 MV_PCI_SERR_MASK
= 0xc28,
165 MV_PCI_XBAR_TMOUT_OFS
= 0x1d04,
166 MV_PCI_ERR_LOW_ADDRESS
= 0x1d40,
167 MV_PCI_ERR_HIGH_ADDRESS
= 0x1d44,
168 MV_PCI_ERR_ATTRIBUTE
= 0x1d48,
169 MV_PCI_ERR_COMMAND
= 0x1d50,
171 PCI_IRQ_CAUSE_OFS
= 0x1d58,
172 PCI_IRQ_MASK_OFS
= 0x1d5c,
173 PCI_UNMASK_ALL_IRQS
= 0x7fffff, /* bits 22-0 */
175 PCIE_IRQ_CAUSE_OFS
= 0x1900,
176 PCIE_IRQ_MASK_OFS
= 0x1910,
177 PCIE_UNMASK_ALL_IRQS
= 0x40a, /* assorted bits */
179 /* Host Controller Main Interrupt Cause/Mask registers (1 per-chip) */
180 PCI_HC_MAIN_IRQ_CAUSE_OFS
= 0x1d60,
181 PCI_HC_MAIN_IRQ_MASK_OFS
= 0x1d64,
182 SOC_HC_MAIN_IRQ_CAUSE_OFS
= 0x20020,
183 SOC_HC_MAIN_IRQ_MASK_OFS
= 0x20024,
184 ERR_IRQ
= (1 << 0), /* shift by port # */
185 DONE_IRQ
= (1 << 1), /* shift by port # */
186 HC0_IRQ_PEND
= 0x1ff, /* bits 0-8 = HC0's ports */
187 HC_SHIFT
= 9, /* bits 9-17 = HC1's ports */
189 TRAN_LO_DONE
= (1 << 19), /* 6xxx: IRQ coalescing */
190 TRAN_HI_DONE
= (1 << 20), /* 6xxx: IRQ coalescing */
191 PORTS_0_3_COAL_DONE
= (1 << 8),
192 PORTS_4_7_COAL_DONE
= (1 << 17),
193 PORTS_0_7_COAL_DONE
= (1 << 21), /* 6xxx: IRQ coalescing */
194 GPIO_INT
= (1 << 22),
195 SELF_INT
= (1 << 23),
196 TWSI_INT
= (1 << 24),
197 HC_MAIN_RSVD
= (0x7f << 25), /* bits 31-25 */
198 HC_MAIN_RSVD_5
= (0x1fff << 19), /* bits 31-19 */
199 HC_MAIN_RSVD_SOC
= (0x3fffffb << 6), /* bits 31-9, 7-6 */
201 /* SATAHC registers */
204 HC_IRQ_CAUSE_OFS
= 0x14,
205 DMA_IRQ
= (1 << 0), /* shift by port # */
206 HC_COAL_IRQ
= (1 << 4), /* IRQ coalescing */
207 DEV_IRQ
= (1 << 8), /* shift by port # */
209 /* Shadow block registers */
211 SHD_CTL_AST_OFS
= 0x20, /* ofs from SHD_BLK_OFS */
214 SATA_STATUS_OFS
= 0x300, /* ctrl, err regs follow status */
215 SATA_ACTIVE_OFS
= 0x350,
216 SATA_FIS_IRQ_CAUSE_OFS
= 0x364,
217 SATA_FIS_IRQ_AN
= (1 << 9), /* async notification */
220 LTMODE_BIT8
= (1 << 8), /* unknown, but necessary */
224 PHY_MODE4_CFG_MASK
= 0x00000003, /* phy internal config field */
225 PHY_MODE4_CFG_VALUE
= 0x00000001, /* phy internal config field */
226 PHY_MODE4_RSVD_ZEROS
= 0x5de3fffa, /* Gen2e always write zeros */
227 PHY_MODE4_RSVD_ONES
= 0x00000005, /* Gen2e always write ones */
230 SATA_IFCTL_OFS
= 0x344,
231 SATA_TESTCTL_OFS
= 0x348,
232 SATA_IFSTAT_OFS
= 0x34c,
233 VENDOR_UNIQUE_FIS_OFS
= 0x35c,
236 FISCFG_WAIT_DEV_ERR
= (1 << 8), /* wait for host on DevErr */
237 FISCFG_SINGLE_SYNC
= (1 << 16), /* SYNC on DMA activation */
240 MV5_LTMODE_OFS
= 0x30,
241 MV5_PHY_CTL_OFS
= 0x0C,
242 SATA_INTERFACE_CFG_OFS
= 0x050,
244 MV_M2_PREAMP_MASK
= 0x7e0,
248 EDMA_CFG_Q_DEPTH
= 0x1f, /* max device queue depth */
249 EDMA_CFG_NCQ
= (1 << 5), /* for R/W FPDMA queued */
250 EDMA_CFG_NCQ_GO_ON_ERR
= (1 << 14), /* continue on error */
251 EDMA_CFG_RD_BRST_EXT
= (1 << 11), /* read burst 512B */
252 EDMA_CFG_WR_BUFF_LEN
= (1 << 13), /* write buffer 512B */
253 EDMA_CFG_EDMA_FBS
= (1 << 16), /* EDMA FIS-Based Switching */
254 EDMA_CFG_FBS
= (1 << 26), /* FIS-Based Switching */
256 EDMA_ERR_IRQ_CAUSE_OFS
= 0x8,
257 EDMA_ERR_IRQ_MASK_OFS
= 0xc,
258 EDMA_ERR_D_PAR
= (1 << 0), /* UDMA data parity err */
259 EDMA_ERR_PRD_PAR
= (1 << 1), /* UDMA PRD parity err */
260 EDMA_ERR_DEV
= (1 << 2), /* device error */
261 EDMA_ERR_DEV_DCON
= (1 << 3), /* device disconnect */
262 EDMA_ERR_DEV_CON
= (1 << 4), /* device connected */
263 EDMA_ERR_SERR
= (1 << 5), /* SError bits [WBDST] raised */
264 EDMA_ERR_SELF_DIS
= (1 << 7), /* Gen II/IIE self-disable */
265 EDMA_ERR_SELF_DIS_5
= (1 << 8), /* Gen I self-disable */
266 EDMA_ERR_BIST_ASYNC
= (1 << 8), /* BIST FIS or Async Notify */
267 EDMA_ERR_TRANS_IRQ_7
= (1 << 8), /* Gen IIE transprt layer irq */
268 EDMA_ERR_CRQB_PAR
= (1 << 9), /* CRQB parity error */
269 EDMA_ERR_CRPB_PAR
= (1 << 10), /* CRPB parity error */
270 EDMA_ERR_INTRL_PAR
= (1 << 11), /* internal parity error */
271 EDMA_ERR_IORDY
= (1 << 12), /* IORdy timeout */
273 EDMA_ERR_LNK_CTRL_RX
= (0xf << 13), /* link ctrl rx error */
274 EDMA_ERR_LNK_CTRL_RX_0
= (1 << 13), /* transient: CRC err */
275 EDMA_ERR_LNK_CTRL_RX_1
= (1 << 14), /* transient: FIFO err */
276 EDMA_ERR_LNK_CTRL_RX_2
= (1 << 15), /* fatal: caught SYNC */
277 EDMA_ERR_LNK_CTRL_RX_3
= (1 << 16), /* transient: FIS rx err */
279 EDMA_ERR_LNK_DATA_RX
= (0xf << 17), /* link data rx error */
281 EDMA_ERR_LNK_CTRL_TX
= (0x1f << 21), /* link ctrl tx error */
282 EDMA_ERR_LNK_CTRL_TX_0
= (1 << 21), /* transient: CRC err */
283 EDMA_ERR_LNK_CTRL_TX_1
= (1 << 22), /* transient: FIFO err */
284 EDMA_ERR_LNK_CTRL_TX_2
= (1 << 23), /* transient: caught SYNC */
285 EDMA_ERR_LNK_CTRL_TX_3
= (1 << 24), /* transient: caught DMAT */
286 EDMA_ERR_LNK_CTRL_TX_4
= (1 << 25), /* transient: FIS collision */
288 EDMA_ERR_LNK_DATA_TX
= (0x1f << 26), /* link data tx error */
290 EDMA_ERR_TRANS_PROTO
= (1 << 31), /* transport protocol error */
291 EDMA_ERR_OVERRUN_5
= (1 << 5),
292 EDMA_ERR_UNDERRUN_5
= (1 << 6),
294 EDMA_ERR_IRQ_TRANSIENT
= EDMA_ERR_LNK_CTRL_RX_0
|
295 EDMA_ERR_LNK_CTRL_RX_1
|
296 EDMA_ERR_LNK_CTRL_RX_3
|
297 EDMA_ERR_LNK_CTRL_TX
,
299 EDMA_EH_FREEZE
= EDMA_ERR_D_PAR
|
309 EDMA_ERR_LNK_CTRL_RX_2
|
310 EDMA_ERR_LNK_DATA_RX
|
311 EDMA_ERR_LNK_DATA_TX
|
312 EDMA_ERR_TRANS_PROTO
,
314 EDMA_EH_FREEZE_5
= EDMA_ERR_D_PAR
|
319 EDMA_ERR_UNDERRUN_5
|
320 EDMA_ERR_SELF_DIS_5
|
326 EDMA_REQ_Q_BASE_HI_OFS
= 0x10,
327 EDMA_REQ_Q_IN_PTR_OFS
= 0x14, /* also contains BASE_LO */
329 EDMA_REQ_Q_OUT_PTR_OFS
= 0x18,
330 EDMA_REQ_Q_PTR_SHIFT
= 5,
332 EDMA_RSP_Q_BASE_HI_OFS
= 0x1c,
333 EDMA_RSP_Q_IN_PTR_OFS
= 0x20,
334 EDMA_RSP_Q_OUT_PTR_OFS
= 0x24, /* also contains BASE_LO */
335 EDMA_RSP_Q_PTR_SHIFT
= 3,
337 EDMA_CMD_OFS
= 0x28, /* EDMA command register */
338 EDMA_EN
= (1 << 0), /* enable EDMA */
339 EDMA_DS
= (1 << 1), /* disable EDMA; self-negated */
340 EDMA_RESET
= (1 << 2), /* reset eng/trans/link/phy */
342 EDMA_STATUS_OFS
= 0x30, /* EDMA engine status */
343 EDMA_STATUS_CACHE_EMPTY
= (1 << 6), /* GenIIe command cache empty */
344 EDMA_STATUS_IDLE
= (1 << 7), /* GenIIe EDMA enabled/idle */
346 EDMA_IORDY_TMOUT_OFS
= 0x34,
347 EDMA_ARB_CFG_OFS
= 0x38,
349 EDMA_HALTCOND_OFS
= 0x60, /* GenIIe halt conditions */
351 /* Host private flags (hp_flags) */
352 MV_HP_FLAG_MSI
= (1 << 0),
353 MV_HP_ERRATA_50XXB0
= (1 << 1),
354 MV_HP_ERRATA_50XXB2
= (1 << 2),
355 MV_HP_ERRATA_60X1B2
= (1 << 3),
356 MV_HP_ERRATA_60X1C0
= (1 << 4),
357 MV_HP_GEN_I
= (1 << 6), /* Generation I: 50xx */
358 MV_HP_GEN_II
= (1 << 7), /* Generation II: 60xx */
359 MV_HP_GEN_IIE
= (1 << 8), /* Generation IIE: 6042/7042 */
360 MV_HP_PCIE
= (1 << 9), /* PCIe bus/regs: 7042 */
361 MV_HP_CUT_THROUGH
= (1 << 10), /* can use EDMA cut-through */
362 MV_HP_FLAG_SOC
= (1 << 11), /* SystemOnChip, no PCI */
364 /* Port private flags (pp_flags) */
365 MV_PP_FLAG_EDMA_EN
= (1 << 0), /* is EDMA engine enabled? */
366 MV_PP_FLAG_NCQ_EN
= (1 << 1), /* is EDMA set up for NCQ? */
367 MV_PP_FLAG_FBS_EN
= (1 << 2), /* is EDMA set up for FBS? */
368 MV_PP_FLAG_DELAYED_EH
= (1 << 3), /* delayed dev err handling */
371 #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
372 #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
373 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
374 #define IS_PCIE(hpriv) ((hpriv)->hp_flags & MV_HP_PCIE)
375 #define IS_SOC(hpriv) ((hpriv)->hp_flags & MV_HP_FLAG_SOC)
377 #define WINDOW_CTRL(i) (0x20030 + ((i) << 4))
378 #define WINDOW_BASE(i) (0x20034 + ((i) << 4))
381 /* DMA boundary 0xffff is required by the s/g splitting
382 * we need on /length/ in mv_fill-sg().
384 MV_DMA_BOUNDARY
= 0xffffU
,
386 /* mask of register bits containing lower 32 bits
387 * of EDMA request queue DMA address
389 EDMA_REQ_Q_BASE_LO_MASK
= 0xfffffc00U
,
391 /* ditto, for response queue */
392 EDMA_RSP_Q_BASE_LO_MASK
= 0xffffff00U
,
406 /* Command ReQuest Block: 32B */
422 /* Command ResPonse Block: 8B */
429 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
437 struct mv_port_priv
{
438 struct mv_crqb
*crqb
;
440 struct mv_crpb
*crpb
;
442 struct mv_sg
*sg_tbl
[MV_MAX_Q_DEPTH
];
443 dma_addr_t sg_tbl_dma
[MV_MAX_Q_DEPTH
];
445 unsigned int req_idx
;
446 unsigned int resp_idx
;
449 unsigned int delayed_eh_pmp_map
;
452 struct mv_port_signal
{
457 struct mv_host_priv
{
460 struct mv_port_signal signal
[8];
461 const struct mv_hw_ops
*ops
;
464 void __iomem
*main_irq_cause_addr
;
465 void __iomem
*main_irq_mask_addr
;
470 * These consistent DMA memory pools give us guaranteed
471 * alignment for hardware-accessed data structures,
472 * and less memory waste in accomplishing the alignment.
474 struct dma_pool
*crqb_pool
;
475 struct dma_pool
*crpb_pool
;
476 struct dma_pool
*sg_tbl_pool
;
480 void (*phy_errata
)(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
482 void (*enable_leds
)(struct mv_host_priv
*hpriv
, void __iomem
*mmio
);
483 void (*read_preamp
)(struct mv_host_priv
*hpriv
, int idx
,
485 int (*reset_hc
)(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
487 void (*reset_flash
)(struct mv_host_priv
*hpriv
, void __iomem
*mmio
);
488 void (*reset_bus
)(struct ata_host
*host
, void __iomem
*mmio
);
491 static int mv_scr_read(struct ata_link
*link
, unsigned int sc_reg_in
, u32
*val
);
492 static int mv_scr_write(struct ata_link
*link
, unsigned int sc_reg_in
, u32 val
);
493 static int mv5_scr_read(struct ata_link
*link
, unsigned int sc_reg_in
, u32
*val
);
494 static int mv5_scr_write(struct ata_link
*link
, unsigned int sc_reg_in
, u32 val
);
495 static int mv_port_start(struct ata_port
*ap
);
496 static void mv_port_stop(struct ata_port
*ap
);
497 static int mv_qc_defer(struct ata_queued_cmd
*qc
);
498 static void mv_qc_prep(struct ata_queued_cmd
*qc
);
499 static void mv_qc_prep_iie(struct ata_queued_cmd
*qc
);
500 static unsigned int mv_qc_issue(struct ata_queued_cmd
*qc
);
501 static int mv_hardreset(struct ata_link
*link
, unsigned int *class,
502 unsigned long deadline
);
503 static void mv_eh_freeze(struct ata_port
*ap
);
504 static void mv_eh_thaw(struct ata_port
*ap
);
505 static void mv6_dev_config(struct ata_device
*dev
);
507 static void mv5_phy_errata(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
509 static void mv5_enable_leds(struct mv_host_priv
*hpriv
, void __iomem
*mmio
);
510 static void mv5_read_preamp(struct mv_host_priv
*hpriv
, int idx
,
512 static int mv5_reset_hc(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
514 static void mv5_reset_flash(struct mv_host_priv
*hpriv
, void __iomem
*mmio
);
515 static void mv5_reset_bus(struct ata_host
*host
, void __iomem
*mmio
);
517 static void mv6_phy_errata(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
519 static void mv6_enable_leds(struct mv_host_priv
*hpriv
, void __iomem
*mmio
);
520 static void mv6_read_preamp(struct mv_host_priv
*hpriv
, int idx
,
522 static int mv6_reset_hc(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
524 static void mv6_reset_flash(struct mv_host_priv
*hpriv
, void __iomem
*mmio
);
525 static void mv_soc_enable_leds(struct mv_host_priv
*hpriv
,
527 static void mv_soc_read_preamp(struct mv_host_priv
*hpriv
, int idx
,
529 static int mv_soc_reset_hc(struct mv_host_priv
*hpriv
,
530 void __iomem
*mmio
, unsigned int n_hc
);
531 static void mv_soc_reset_flash(struct mv_host_priv
*hpriv
,
533 static void mv_soc_reset_bus(struct ata_host
*host
, void __iomem
*mmio
);
534 static void mv_reset_pci_bus(struct ata_host
*host
, void __iomem
*mmio
);
535 static void mv_reset_channel(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
536 unsigned int port_no
);
537 static int mv_stop_edma(struct ata_port
*ap
);
538 static int mv_stop_edma_engine(void __iomem
*port_mmio
);
539 static void mv_edma_cfg(struct ata_port
*ap
, int want_ncq
, int want_edma
);
541 static void mv_pmp_select(struct ata_port
*ap
, int pmp
);
542 static int mv_pmp_hardreset(struct ata_link
*link
, unsigned int *class,
543 unsigned long deadline
);
544 static int mv_softreset(struct ata_link
*link
, unsigned int *class,
545 unsigned long deadline
);
546 static void mv_pmp_error_handler(struct ata_port
*ap
);
547 static void mv_process_crpb_entries(struct ata_port
*ap
,
548 struct mv_port_priv
*pp
);
550 /* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
551 * because we have to allow room for worst case splitting of
552 * PRDs for 64K boundaries in mv_fill_sg().
554 static struct scsi_host_template mv5_sht
= {
555 ATA_BASE_SHT(DRV_NAME
),
556 .sg_tablesize
= MV_MAX_SG_CT
/ 2,
557 .dma_boundary
= MV_DMA_BOUNDARY
,
560 static struct scsi_host_template mv6_sht
= {
561 ATA_NCQ_SHT(DRV_NAME
),
562 .can_queue
= MV_MAX_Q_DEPTH
- 1,
563 .sg_tablesize
= MV_MAX_SG_CT
/ 2,
564 .dma_boundary
= MV_DMA_BOUNDARY
,
567 static struct ata_port_operations mv5_ops
= {
568 .inherits
= &ata_sff_port_ops
,
570 .qc_defer
= mv_qc_defer
,
571 .qc_prep
= mv_qc_prep
,
572 .qc_issue
= mv_qc_issue
,
574 .freeze
= mv_eh_freeze
,
576 .hardreset
= mv_hardreset
,
577 .error_handler
= ata_std_error_handler
, /* avoid SFF EH */
578 .post_internal_cmd
= ATA_OP_NULL
,
580 .scr_read
= mv5_scr_read
,
581 .scr_write
= mv5_scr_write
,
583 .port_start
= mv_port_start
,
584 .port_stop
= mv_port_stop
,
587 static struct ata_port_operations mv6_ops
= {
588 .inherits
= &mv5_ops
,
589 .dev_config
= mv6_dev_config
,
590 .scr_read
= mv_scr_read
,
591 .scr_write
= mv_scr_write
,
593 .pmp_hardreset
= mv_pmp_hardreset
,
594 .pmp_softreset
= mv_softreset
,
595 .softreset
= mv_softreset
,
596 .error_handler
= mv_pmp_error_handler
,
599 static struct ata_port_operations mv_iie_ops
= {
600 .inherits
= &mv6_ops
,
601 .dev_config
= ATA_OP_NULL
,
602 .qc_prep
= mv_qc_prep_iie
,
605 static const struct ata_port_info mv_port_info
[] = {
607 .flags
= MV_GEN_I_FLAGS
,
608 .pio_mask
= 0x1f, /* pio0-4 */
609 .udma_mask
= ATA_UDMA6
,
610 .port_ops
= &mv5_ops
,
613 .flags
= MV_GEN_I_FLAGS
| MV_FLAG_DUAL_HC
,
614 .pio_mask
= 0x1f, /* pio0-4 */
615 .udma_mask
= ATA_UDMA6
,
616 .port_ops
= &mv5_ops
,
619 .flags
= MV_GEN_I_FLAGS
| MV_FLAG_DUAL_HC
,
620 .pio_mask
= 0x1f, /* pio0-4 */
621 .udma_mask
= ATA_UDMA6
,
622 .port_ops
= &mv5_ops
,
625 .flags
= MV_GEN_II_FLAGS
,
626 .pio_mask
= 0x1f, /* pio0-4 */
627 .udma_mask
= ATA_UDMA6
,
628 .port_ops
= &mv6_ops
,
631 .flags
= MV_GEN_II_FLAGS
| MV_FLAG_DUAL_HC
,
632 .pio_mask
= 0x1f, /* pio0-4 */
633 .udma_mask
= ATA_UDMA6
,
634 .port_ops
= &mv6_ops
,
637 .flags
= MV_GEN_IIE_FLAGS
,
638 .pio_mask
= 0x1f, /* pio0-4 */
639 .udma_mask
= ATA_UDMA6
,
640 .port_ops
= &mv_iie_ops
,
643 .flags
= MV_GEN_IIE_FLAGS
,
644 .pio_mask
= 0x1f, /* pio0-4 */
645 .udma_mask
= ATA_UDMA6
,
646 .port_ops
= &mv_iie_ops
,
649 .flags
= MV_GEN_IIE_FLAGS
,
650 .pio_mask
= 0x1f, /* pio0-4 */
651 .udma_mask
= ATA_UDMA6
,
652 .port_ops
= &mv_iie_ops
,
656 static const struct pci_device_id mv_pci_tbl
[] = {
657 { PCI_VDEVICE(MARVELL
, 0x5040), chip_504x
},
658 { PCI_VDEVICE(MARVELL
, 0x5041), chip_504x
},
659 { PCI_VDEVICE(MARVELL
, 0x5080), chip_5080
},
660 { PCI_VDEVICE(MARVELL
, 0x5081), chip_508x
},
661 /* RocketRAID 1720/174x have different identifiers */
662 { PCI_VDEVICE(TTI
, 0x1720), chip_6042
},
663 { PCI_VDEVICE(TTI
, 0x1740), chip_6042
},
664 { PCI_VDEVICE(TTI
, 0x1742), chip_6042
},
666 { PCI_VDEVICE(MARVELL
, 0x6040), chip_604x
},
667 { PCI_VDEVICE(MARVELL
, 0x6041), chip_604x
},
668 { PCI_VDEVICE(MARVELL
, 0x6042), chip_6042
},
669 { PCI_VDEVICE(MARVELL
, 0x6080), chip_608x
},
670 { PCI_VDEVICE(MARVELL
, 0x6081), chip_608x
},
672 { PCI_VDEVICE(ADAPTEC2
, 0x0241), chip_604x
},
675 { PCI_VDEVICE(ADAPTEC2
, 0x0243), chip_7042
},
677 /* Marvell 7042 support */
678 { PCI_VDEVICE(MARVELL
, 0x7042), chip_7042
},
680 /* Highpoint RocketRAID PCIe series */
681 { PCI_VDEVICE(TTI
, 0x2300), chip_7042
},
682 { PCI_VDEVICE(TTI
, 0x2310), chip_7042
},
684 { } /* terminate list */
687 static const struct mv_hw_ops mv5xxx_ops
= {
688 .phy_errata
= mv5_phy_errata
,
689 .enable_leds
= mv5_enable_leds
,
690 .read_preamp
= mv5_read_preamp
,
691 .reset_hc
= mv5_reset_hc
,
692 .reset_flash
= mv5_reset_flash
,
693 .reset_bus
= mv5_reset_bus
,
696 static const struct mv_hw_ops mv6xxx_ops
= {
697 .phy_errata
= mv6_phy_errata
,
698 .enable_leds
= mv6_enable_leds
,
699 .read_preamp
= mv6_read_preamp
,
700 .reset_hc
= mv6_reset_hc
,
701 .reset_flash
= mv6_reset_flash
,
702 .reset_bus
= mv_reset_pci_bus
,
705 static const struct mv_hw_ops mv_soc_ops
= {
706 .phy_errata
= mv6_phy_errata
,
707 .enable_leds
= mv_soc_enable_leds
,
708 .read_preamp
= mv_soc_read_preamp
,
709 .reset_hc
= mv_soc_reset_hc
,
710 .reset_flash
= mv_soc_reset_flash
,
711 .reset_bus
= mv_soc_reset_bus
,
718 static inline void writelfl(unsigned long data
, void __iomem
*addr
)
721 (void) readl(addr
); /* flush to avoid PCI posted write */
724 static inline unsigned int mv_hc_from_port(unsigned int port
)
726 return port
>> MV_PORT_HC_SHIFT
;
729 static inline unsigned int mv_hardport_from_port(unsigned int port
)
731 return port
& MV_PORT_MASK
;
735 * Consolidate some rather tricky bit shift calculations.
736 * This is hot-path stuff, so not a function.
737 * Simple code, with two return values, so macro rather than inline.
739 * port is the sole input, in range 0..7.
740 * shift is one output, for use with main_irq_cause / main_irq_mask registers.
741 * hardport is the other output, in range 0..3.
743 * Note that port and hardport may be the same variable in some cases.
745 #define MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport) \
747 shift = mv_hc_from_port(port) * HC_SHIFT; \
748 hardport = mv_hardport_from_port(port); \
749 shift += hardport * 2; \
752 static inline void __iomem
*mv_hc_base(void __iomem
*base
, unsigned int hc
)
754 return (base
+ MV_SATAHC0_REG_BASE
+ (hc
* MV_SATAHC_REG_SZ
));
757 static inline void __iomem
*mv_hc_base_from_port(void __iomem
*base
,
760 return mv_hc_base(base
, mv_hc_from_port(port
));
763 static inline void __iomem
*mv_port_base(void __iomem
*base
, unsigned int port
)
765 return mv_hc_base_from_port(base
, port
) +
766 MV_SATAHC_ARBTR_REG_SZ
+
767 (mv_hardport_from_port(port
) * MV_PORT_REG_SZ
);
770 static void __iomem
*mv5_phy_base(void __iomem
*mmio
, unsigned int port
)
772 void __iomem
*hc_mmio
= mv_hc_base_from_port(mmio
, port
);
773 unsigned long ofs
= (mv_hardport_from_port(port
) + 1) * 0x100UL
;
775 return hc_mmio
+ ofs
;
778 static inline void __iomem
*mv_host_base(struct ata_host
*host
)
780 struct mv_host_priv
*hpriv
= host
->private_data
;
784 static inline void __iomem
*mv_ap_base(struct ata_port
*ap
)
786 return mv_port_base(mv_host_base(ap
->host
), ap
->port_no
);
789 static inline int mv_get_hc_count(unsigned long port_flags
)
791 return ((port_flags
& MV_FLAG_DUAL_HC
) ? 2 : 1);
794 static void mv_set_edma_ptrs(void __iomem
*port_mmio
,
795 struct mv_host_priv
*hpriv
,
796 struct mv_port_priv
*pp
)
801 * initialize request queue
803 pp
->req_idx
&= MV_MAX_Q_DEPTH_MASK
; /* paranoia */
804 index
= pp
->req_idx
<< EDMA_REQ_Q_PTR_SHIFT
;
806 WARN_ON(pp
->crqb_dma
& 0x3ff);
807 writel((pp
->crqb_dma
>> 16) >> 16, port_mmio
+ EDMA_REQ_Q_BASE_HI_OFS
);
808 writelfl((pp
->crqb_dma
& EDMA_REQ_Q_BASE_LO_MASK
) | index
,
809 port_mmio
+ EDMA_REQ_Q_IN_PTR_OFS
);
810 writelfl(index
, port_mmio
+ EDMA_REQ_Q_OUT_PTR_OFS
);
813 * initialize response queue
815 pp
->resp_idx
&= MV_MAX_Q_DEPTH_MASK
; /* paranoia */
816 index
= pp
->resp_idx
<< EDMA_RSP_Q_PTR_SHIFT
;
818 WARN_ON(pp
->crpb_dma
& 0xff);
819 writel((pp
->crpb_dma
>> 16) >> 16, port_mmio
+ EDMA_RSP_Q_BASE_HI_OFS
);
820 writelfl(index
, port_mmio
+ EDMA_RSP_Q_IN_PTR_OFS
);
821 writelfl((pp
->crpb_dma
& EDMA_RSP_Q_BASE_LO_MASK
) | index
,
822 port_mmio
+ EDMA_RSP_Q_OUT_PTR_OFS
);
825 static void mv_set_main_irq_mask(struct ata_host
*host
,
826 u32 disable_bits
, u32 enable_bits
)
828 struct mv_host_priv
*hpriv
= host
->private_data
;
829 u32 old_mask
, new_mask
;
831 old_mask
= hpriv
->main_irq_mask
;
832 new_mask
= (old_mask
& ~disable_bits
) | enable_bits
;
833 if (new_mask
!= old_mask
) {
834 hpriv
->main_irq_mask
= new_mask
;
835 writelfl(new_mask
, hpriv
->main_irq_mask_addr
);
839 static void mv_enable_port_irqs(struct ata_port
*ap
,
840 unsigned int port_bits
)
842 unsigned int shift
, hardport
, port
= ap
->port_no
;
843 u32 disable_bits
, enable_bits
;
845 MV_PORT_TO_SHIFT_AND_HARDPORT(port
, shift
, hardport
);
847 disable_bits
= (DONE_IRQ
| ERR_IRQ
) << shift
;
848 enable_bits
= port_bits
<< shift
;
849 mv_set_main_irq_mask(ap
->host
, disable_bits
, enable_bits
);
852 static void mv_clear_and_enable_port_irqs(struct ata_port
*ap
,
853 void __iomem
*port_mmio
,
854 unsigned int port_irqs
)
856 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
857 int hardport
= mv_hardport_from_port(ap
->port_no
);
858 void __iomem
*hc_mmio
= mv_hc_base_from_port(
859 mv_host_base(ap
->host
), ap
->port_no
);
862 /* clear EDMA event indicators, if any */
863 writelfl(0, port_mmio
+ EDMA_ERR_IRQ_CAUSE_OFS
);
865 /* clear pending irq events */
866 hc_irq_cause
= ~((DEV_IRQ
| DMA_IRQ
) << hardport
);
867 writelfl(hc_irq_cause
, hc_mmio
+ HC_IRQ_CAUSE_OFS
);
869 /* clear FIS IRQ Cause */
870 if (IS_GEN_IIE(hpriv
))
871 writelfl(0, port_mmio
+ SATA_FIS_IRQ_CAUSE_OFS
);
873 mv_enable_port_irqs(ap
, port_irqs
);
877 * mv_start_edma - Enable eDMA engine
878 * @base: port base address
879 * @pp: port private data
881 * Verify the local cache of the eDMA state is accurate with a
885 * Inherited from caller.
887 static void mv_start_edma(struct ata_port
*ap
, void __iomem
*port_mmio
,
888 struct mv_port_priv
*pp
, u8 protocol
)
890 int want_ncq
= (protocol
== ATA_PROT_NCQ
);
892 if (pp
->pp_flags
& MV_PP_FLAG_EDMA_EN
) {
893 int using_ncq
= ((pp
->pp_flags
& MV_PP_FLAG_NCQ_EN
) != 0);
894 if (want_ncq
!= using_ncq
)
897 if (!(pp
->pp_flags
& MV_PP_FLAG_EDMA_EN
)) {
898 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
900 mv_edma_cfg(ap
, want_ncq
, 1);
902 mv_set_edma_ptrs(port_mmio
, hpriv
, pp
);
903 mv_clear_and_enable_port_irqs(ap
, port_mmio
, DONE_IRQ
|ERR_IRQ
);
905 writelfl(EDMA_EN
, port_mmio
+ EDMA_CMD_OFS
);
906 pp
->pp_flags
|= MV_PP_FLAG_EDMA_EN
;
910 static void mv_wait_for_edma_empty_idle(struct ata_port
*ap
)
912 void __iomem
*port_mmio
= mv_ap_base(ap
);
913 const u32 empty_idle
= (EDMA_STATUS_CACHE_EMPTY
| EDMA_STATUS_IDLE
);
914 const int per_loop
= 5, timeout
= (15 * 1000 / per_loop
);
918 * Wait for the EDMA engine to finish transactions in progress.
919 * No idea what a good "timeout" value might be, but measurements
920 * indicate that it often requires hundreds of microseconds
921 * with two drives in-use. So we use the 15msec value above
922 * as a rough guess at what even more drives might require.
924 for (i
= 0; i
< timeout
; ++i
) {
925 u32 edma_stat
= readl(port_mmio
+ EDMA_STATUS_OFS
);
926 if ((edma_stat
& empty_idle
) == empty_idle
)
930 /* ata_port_printk(ap, KERN_INFO, "%s: %u+ usecs\n", __func__, i); */
934 * mv_stop_edma_engine - Disable eDMA engine
935 * @port_mmio: io base address
938 * Inherited from caller.
940 static int mv_stop_edma_engine(void __iomem
*port_mmio
)
944 /* Disable eDMA. The disable bit auto clears. */
945 writelfl(EDMA_DS
, port_mmio
+ EDMA_CMD_OFS
);
947 /* Wait for the chip to confirm eDMA is off. */
948 for (i
= 10000; i
> 0; i
--) {
949 u32 reg
= readl(port_mmio
+ EDMA_CMD_OFS
);
950 if (!(reg
& EDMA_EN
))
957 static int mv_stop_edma(struct ata_port
*ap
)
959 void __iomem
*port_mmio
= mv_ap_base(ap
);
960 struct mv_port_priv
*pp
= ap
->private_data
;
962 if (!(pp
->pp_flags
& MV_PP_FLAG_EDMA_EN
))
964 pp
->pp_flags
&= ~MV_PP_FLAG_EDMA_EN
;
965 mv_wait_for_edma_empty_idle(ap
);
966 if (mv_stop_edma_engine(port_mmio
)) {
967 ata_port_printk(ap
, KERN_ERR
, "Unable to stop eDMA\n");
974 static void mv_dump_mem(void __iomem
*start
, unsigned bytes
)
977 for (b
= 0; b
< bytes
; ) {
978 DPRINTK("%p: ", start
+ b
);
979 for (w
= 0; b
< bytes
&& w
< 4; w
++) {
980 printk("%08x ", readl(start
+ b
));
988 static void mv_dump_pci_cfg(struct pci_dev
*pdev
, unsigned bytes
)
993 for (b
= 0; b
< bytes
; ) {
994 DPRINTK("%02x: ", b
);
995 for (w
= 0; b
< bytes
&& w
< 4; w
++) {
996 (void) pci_read_config_dword(pdev
, b
, &dw
);
1004 static void mv_dump_all_regs(void __iomem
*mmio_base
, int port
,
1005 struct pci_dev
*pdev
)
1008 void __iomem
*hc_base
= mv_hc_base(mmio_base
,
1009 port
>> MV_PORT_HC_SHIFT
);
1010 void __iomem
*port_base
;
1011 int start_port
, num_ports
, p
, start_hc
, num_hcs
, hc
;
1014 start_hc
= start_port
= 0;
1015 num_ports
= 8; /* shld be benign for 4 port devs */
1018 start_hc
= port
>> MV_PORT_HC_SHIFT
;
1020 num_ports
= num_hcs
= 1;
1022 DPRINTK("All registers for port(s) %u-%u:\n", start_port
,
1023 num_ports
> 1 ? num_ports
- 1 : start_port
);
1026 DPRINTK("PCI config space regs:\n");
1027 mv_dump_pci_cfg(pdev
, 0x68);
1029 DPRINTK("PCI regs:\n");
1030 mv_dump_mem(mmio_base
+0xc00, 0x3c);
1031 mv_dump_mem(mmio_base
+0xd00, 0x34);
1032 mv_dump_mem(mmio_base
+0xf00, 0x4);
1033 mv_dump_mem(mmio_base
+0x1d00, 0x6c);
1034 for (hc
= start_hc
; hc
< start_hc
+ num_hcs
; hc
++) {
1035 hc_base
= mv_hc_base(mmio_base
, hc
);
1036 DPRINTK("HC regs (HC %i):\n", hc
);
1037 mv_dump_mem(hc_base
, 0x1c);
1039 for (p
= start_port
; p
< start_port
+ num_ports
; p
++) {
1040 port_base
= mv_port_base(mmio_base
, p
);
1041 DPRINTK("EDMA regs (port %i):\n", p
);
1042 mv_dump_mem(port_base
, 0x54);
1043 DPRINTK("SATA regs (port %i):\n", p
);
1044 mv_dump_mem(port_base
+0x300, 0x60);
1049 static unsigned int mv_scr_offset(unsigned int sc_reg_in
)
1053 switch (sc_reg_in
) {
1057 ofs
= SATA_STATUS_OFS
+ (sc_reg_in
* sizeof(u32
));
1060 ofs
= SATA_ACTIVE_OFS
; /* active is not with the others */
1069 static int mv_scr_read(struct ata_link
*link
, unsigned int sc_reg_in
, u32
*val
)
1071 unsigned int ofs
= mv_scr_offset(sc_reg_in
);
1073 if (ofs
!= 0xffffffffU
) {
1074 *val
= readl(mv_ap_base(link
->ap
) + ofs
);
1080 static int mv_scr_write(struct ata_link
*link
, unsigned int sc_reg_in
, u32 val
)
1082 unsigned int ofs
= mv_scr_offset(sc_reg_in
);
1084 if (ofs
!= 0xffffffffU
) {
1085 writelfl(val
, mv_ap_base(link
->ap
) + ofs
);
1091 static void mv6_dev_config(struct ata_device
*adev
)
1094 * Deal with Gen-II ("mv6") hardware quirks/restrictions:
1096 * Gen-II does not support NCQ over a port multiplier
1097 * (no FIS-based switching).
1099 if (adev
->flags
& ATA_DFLAG_NCQ
) {
1100 if (sata_pmp_attached(adev
->link
->ap
)) {
1101 adev
->flags
&= ~ATA_DFLAG_NCQ
;
1102 ata_dev_printk(adev
, KERN_INFO
,
1103 "NCQ disabled for command-based switching\n");
1108 static int mv_qc_defer(struct ata_queued_cmd
*qc
)
1110 struct ata_link
*link
= qc
->dev
->link
;
1111 struct ata_port
*ap
= link
->ap
;
1112 struct mv_port_priv
*pp
= ap
->private_data
;
1115 * Don't allow new commands if we're in a delayed EH state
1116 * for NCQ and/or FIS-based switching.
1118 if (pp
->pp_flags
& MV_PP_FLAG_DELAYED_EH
)
1119 return ATA_DEFER_PORT
;
1121 * If the port is completely idle, then allow the new qc.
1123 if (ap
->nr_active_links
== 0)
1127 * The port is operating in host queuing mode (EDMA) with NCQ
1128 * enabled, allow multiple NCQ commands. EDMA also allows
1129 * queueing multiple DMA commands but libata core currently
1132 if ((pp
->pp_flags
& MV_PP_FLAG_EDMA_EN
) &&
1133 (pp
->pp_flags
& MV_PP_FLAG_NCQ_EN
) && ata_is_ncq(qc
->tf
.protocol
))
1136 return ATA_DEFER_PORT
;
1139 static void mv_config_fbs(void __iomem
*port_mmio
, int want_ncq
, int want_fbs
)
1141 u32 new_fiscfg
, old_fiscfg
;
1142 u32 new_ltmode
, old_ltmode
;
1143 u32 new_haltcond
, old_haltcond
;
1145 old_fiscfg
= readl(port_mmio
+ FISCFG_OFS
);
1146 old_ltmode
= readl(port_mmio
+ LTMODE_OFS
);
1147 old_haltcond
= readl(port_mmio
+ EDMA_HALTCOND_OFS
);
1149 new_fiscfg
= old_fiscfg
& ~(FISCFG_SINGLE_SYNC
| FISCFG_WAIT_DEV_ERR
);
1150 new_ltmode
= old_ltmode
& ~LTMODE_BIT8
;
1151 new_haltcond
= old_haltcond
| EDMA_ERR_DEV
;
1154 new_fiscfg
= old_fiscfg
| FISCFG_SINGLE_SYNC
;
1155 new_ltmode
= old_ltmode
| LTMODE_BIT8
;
1157 new_haltcond
&= ~EDMA_ERR_DEV
;
1159 new_fiscfg
|= FISCFG_WAIT_DEV_ERR
;
1162 if (new_fiscfg
!= old_fiscfg
)
1163 writelfl(new_fiscfg
, port_mmio
+ FISCFG_OFS
);
1164 if (new_ltmode
!= old_ltmode
)
1165 writelfl(new_ltmode
, port_mmio
+ LTMODE_OFS
);
1166 if (new_haltcond
!= old_haltcond
)
1167 writelfl(new_haltcond
, port_mmio
+ EDMA_HALTCOND_OFS
);
1170 static void mv_60x1_errata_sata25(struct ata_port
*ap
, int want_ncq
)
1172 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
1175 /* workaround for 88SX60x1 FEr SATA#25 (part 1) */
1176 old
= readl(hpriv
->base
+ MV_GPIO_PORT_CTL_OFS
);
1178 new = old
| (1 << 22);
1180 new = old
& ~(1 << 22);
1182 writel(new, hpriv
->base
+ MV_GPIO_PORT_CTL_OFS
);
1185 static void mv_edma_cfg(struct ata_port
*ap
, int want_ncq
, int want_edma
)
1188 struct mv_port_priv
*pp
= ap
->private_data
;
1189 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
1190 void __iomem
*port_mmio
= mv_ap_base(ap
);
1192 /* set up non-NCQ EDMA configuration */
1193 cfg
= EDMA_CFG_Q_DEPTH
; /* always 0x1f for *all* chips */
1194 pp
->pp_flags
&= ~(MV_PP_FLAG_FBS_EN
| MV_PP_FLAG_NCQ_EN
);
1196 if (IS_GEN_I(hpriv
))
1197 cfg
|= (1 << 8); /* enab config burst size mask */
1199 else if (IS_GEN_II(hpriv
)) {
1200 cfg
|= EDMA_CFG_RD_BRST_EXT
| EDMA_CFG_WR_BUFF_LEN
;
1201 mv_60x1_errata_sata25(ap
, want_ncq
);
1203 } else if (IS_GEN_IIE(hpriv
)) {
1204 int want_fbs
= sata_pmp_attached(ap
);
1206 * Possible future enhancement:
1208 * The chip can use FBS with non-NCQ, if we allow it,
1209 * But first we need to have the error handling in place
1210 * for this mode (datasheet section 7.3.15.4.2.3).
1211 * So disallow non-NCQ FBS for now.
1213 want_fbs
&= want_ncq
;
1215 mv_config_fbs(port_mmio
, want_ncq
, want_fbs
);
1218 pp
->pp_flags
|= MV_PP_FLAG_FBS_EN
;
1219 cfg
|= EDMA_CFG_EDMA_FBS
; /* FIS-based switching */
1222 cfg
|= (1 << 23); /* do not mask PM field in rx'd FIS */
1224 cfg
|= (1 << 22); /* enab 4-entry host queue cache */
1226 cfg
|= (1 << 18); /* enab early completion */
1228 if (hpriv
->hp_flags
& MV_HP_CUT_THROUGH
)
1229 cfg
|= (1 << 17); /* enab cut-thru (dis stor&forwrd) */
1233 cfg
|= EDMA_CFG_NCQ
;
1234 pp
->pp_flags
|= MV_PP_FLAG_NCQ_EN
;
1237 writelfl(cfg
, port_mmio
+ EDMA_CFG_OFS
);
1240 static void mv_port_free_dma_mem(struct ata_port
*ap
)
1242 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
1243 struct mv_port_priv
*pp
= ap
->private_data
;
1247 dma_pool_free(hpriv
->crqb_pool
, pp
->crqb
, pp
->crqb_dma
);
1251 dma_pool_free(hpriv
->crpb_pool
, pp
->crpb
, pp
->crpb_dma
);
1255 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1256 * For later hardware, we have one unique sg_tbl per NCQ tag.
1258 for (tag
= 0; tag
< MV_MAX_Q_DEPTH
; ++tag
) {
1259 if (pp
->sg_tbl
[tag
]) {
1260 if (tag
== 0 || !IS_GEN_I(hpriv
))
1261 dma_pool_free(hpriv
->sg_tbl_pool
,
1263 pp
->sg_tbl_dma
[tag
]);
1264 pp
->sg_tbl
[tag
] = NULL
;
1270 * mv_port_start - Port specific init/start routine.
1271 * @ap: ATA channel to manipulate
1273 * Allocate and point to DMA memory, init port private memory,
1277 * Inherited from caller.
1279 static int mv_port_start(struct ata_port
*ap
)
1281 struct device
*dev
= ap
->host
->dev
;
1282 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
1283 struct mv_port_priv
*pp
;
1286 pp
= devm_kzalloc(dev
, sizeof(*pp
), GFP_KERNEL
);
1289 ap
->private_data
= pp
;
1291 pp
->crqb
= dma_pool_alloc(hpriv
->crqb_pool
, GFP_KERNEL
, &pp
->crqb_dma
);
1294 memset(pp
->crqb
, 0, MV_CRQB_Q_SZ
);
1296 pp
->crpb
= dma_pool_alloc(hpriv
->crpb_pool
, GFP_KERNEL
, &pp
->crpb_dma
);
1298 goto out_port_free_dma_mem
;
1299 memset(pp
->crpb
, 0, MV_CRPB_Q_SZ
);
1301 /* 6041/6081 Rev. "C0" (and newer) are okay with async notify */
1302 if (hpriv
->hp_flags
& MV_HP_ERRATA_60X1C0
)
1303 ap
->flags
|= ATA_FLAG_AN
;
1305 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1306 * For later hardware, we need one unique sg_tbl per NCQ tag.
1308 for (tag
= 0; tag
< MV_MAX_Q_DEPTH
; ++tag
) {
1309 if (tag
== 0 || !IS_GEN_I(hpriv
)) {
1310 pp
->sg_tbl
[tag
] = dma_pool_alloc(hpriv
->sg_tbl_pool
,
1311 GFP_KERNEL
, &pp
->sg_tbl_dma
[tag
]);
1312 if (!pp
->sg_tbl
[tag
])
1313 goto out_port_free_dma_mem
;
1315 pp
->sg_tbl
[tag
] = pp
->sg_tbl
[0];
1316 pp
->sg_tbl_dma
[tag
] = pp
->sg_tbl_dma
[0];
1321 out_port_free_dma_mem
:
1322 mv_port_free_dma_mem(ap
);
1327 * mv_port_stop - Port specific cleanup/stop routine.
1328 * @ap: ATA channel to manipulate
1330 * Stop DMA, cleanup port memory.
1333 * This routine uses the host lock to protect the DMA stop.
1335 static void mv_port_stop(struct ata_port
*ap
)
1338 mv_enable_port_irqs(ap
, 0);
1339 mv_port_free_dma_mem(ap
);
1343 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1344 * @qc: queued command whose SG list to source from
1346 * Populate the SG list and mark the last entry.
1349 * Inherited from caller.
1351 static void mv_fill_sg(struct ata_queued_cmd
*qc
)
1353 struct mv_port_priv
*pp
= qc
->ap
->private_data
;
1354 struct scatterlist
*sg
;
1355 struct mv_sg
*mv_sg
, *last_sg
= NULL
;
1358 mv_sg
= pp
->sg_tbl
[qc
->tag
];
1359 for_each_sg(qc
->sg
, sg
, qc
->n_elem
, si
) {
1360 dma_addr_t addr
= sg_dma_address(sg
);
1361 u32 sg_len
= sg_dma_len(sg
);
1364 u32 offset
= addr
& 0xffff;
1367 if ((offset
+ sg_len
> 0x10000))
1368 len
= 0x10000 - offset
;
1370 mv_sg
->addr
= cpu_to_le32(addr
& 0xffffffff);
1371 mv_sg
->addr_hi
= cpu_to_le32((addr
>> 16) >> 16);
1372 mv_sg
->flags_size
= cpu_to_le32(len
& 0xffff);
1382 if (likely(last_sg
))
1383 last_sg
->flags_size
|= cpu_to_le32(EPRD_FLAG_END_OF_TBL
);
1386 static void mv_crqb_pack_cmd(__le16
*cmdw
, u8 data
, u8 addr
, unsigned last
)
1388 u16 tmp
= data
| (addr
<< CRQB_CMD_ADDR_SHIFT
) | CRQB_CMD_CS
|
1389 (last
? CRQB_CMD_LAST
: 0);
1390 *cmdw
= cpu_to_le16(tmp
);
1394 * mv_qc_prep - Host specific command preparation.
1395 * @qc: queued command to prepare
1397 * This routine simply redirects to the general purpose routine
1398 * if command is not DMA. Else, it handles prep of the CRQB
1399 * (command request block), does some sanity checking, and calls
1400 * the SG load routine.
1403 * Inherited from caller.
1405 static void mv_qc_prep(struct ata_queued_cmd
*qc
)
1407 struct ata_port
*ap
= qc
->ap
;
1408 struct mv_port_priv
*pp
= ap
->private_data
;
1410 struct ata_taskfile
*tf
;
1414 if ((qc
->tf
.protocol
!= ATA_PROT_DMA
) &&
1415 (qc
->tf
.protocol
!= ATA_PROT_NCQ
))
1418 /* Fill in command request block
1420 if (!(qc
->tf
.flags
& ATA_TFLAG_WRITE
))
1421 flags
|= CRQB_FLAG_READ
;
1422 WARN_ON(MV_MAX_Q_DEPTH
<= qc
->tag
);
1423 flags
|= qc
->tag
<< CRQB_TAG_SHIFT
;
1424 flags
|= (qc
->dev
->link
->pmp
& 0xf) << CRQB_PMP_SHIFT
;
1426 /* get current queue index from software */
1427 in_index
= pp
->req_idx
;
1429 pp
->crqb
[in_index
].sg_addr
=
1430 cpu_to_le32(pp
->sg_tbl_dma
[qc
->tag
] & 0xffffffff);
1431 pp
->crqb
[in_index
].sg_addr_hi
=
1432 cpu_to_le32((pp
->sg_tbl_dma
[qc
->tag
] >> 16) >> 16);
1433 pp
->crqb
[in_index
].ctrl_flags
= cpu_to_le16(flags
);
1435 cw
= &pp
->crqb
[in_index
].ata_cmd
[0];
1438 /* Sadly, the CRQB cannot accomodate all registers--there are
1439 * only 11 bytes...so we must pick and choose required
1440 * registers based on the command. So, we drop feature and
1441 * hob_feature for [RW] DMA commands, but they are needed for
1442 * NCQ. NCQ will drop hob_nsect, which is not needed there
1443 * (nsect is used only for the tag; feat/hob_feat hold true nsect).
1445 switch (tf
->command
) {
1447 case ATA_CMD_READ_EXT
:
1449 case ATA_CMD_WRITE_EXT
:
1450 case ATA_CMD_WRITE_FUA_EXT
:
1451 mv_crqb_pack_cmd(cw
++, tf
->hob_nsect
, ATA_REG_NSECT
, 0);
1453 case ATA_CMD_FPDMA_READ
:
1454 case ATA_CMD_FPDMA_WRITE
:
1455 mv_crqb_pack_cmd(cw
++, tf
->hob_feature
, ATA_REG_FEATURE
, 0);
1456 mv_crqb_pack_cmd(cw
++, tf
->feature
, ATA_REG_FEATURE
, 0);
1459 /* The only other commands EDMA supports in non-queued and
1460 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1461 * of which are defined/used by Linux. If we get here, this
1462 * driver needs work.
1464 * FIXME: modify libata to give qc_prep a return value and
1465 * return error here.
1467 BUG_ON(tf
->command
);
1470 mv_crqb_pack_cmd(cw
++, tf
->nsect
, ATA_REG_NSECT
, 0);
1471 mv_crqb_pack_cmd(cw
++, tf
->hob_lbal
, ATA_REG_LBAL
, 0);
1472 mv_crqb_pack_cmd(cw
++, tf
->lbal
, ATA_REG_LBAL
, 0);
1473 mv_crqb_pack_cmd(cw
++, tf
->hob_lbam
, ATA_REG_LBAM
, 0);
1474 mv_crqb_pack_cmd(cw
++, tf
->lbam
, ATA_REG_LBAM
, 0);
1475 mv_crqb_pack_cmd(cw
++, tf
->hob_lbah
, ATA_REG_LBAH
, 0);
1476 mv_crqb_pack_cmd(cw
++, tf
->lbah
, ATA_REG_LBAH
, 0);
1477 mv_crqb_pack_cmd(cw
++, tf
->device
, ATA_REG_DEVICE
, 0);
1478 mv_crqb_pack_cmd(cw
++, tf
->command
, ATA_REG_CMD
, 1); /* last */
1480 if (!(qc
->flags
& ATA_QCFLAG_DMAMAP
))
1486 * mv_qc_prep_iie - Host specific command preparation.
1487 * @qc: queued command to prepare
1489 * This routine simply redirects to the general purpose routine
1490 * if command is not DMA. Else, it handles prep of the CRQB
1491 * (command request block), does some sanity checking, and calls
1492 * the SG load routine.
1495 * Inherited from caller.
1497 static void mv_qc_prep_iie(struct ata_queued_cmd
*qc
)
1499 struct ata_port
*ap
= qc
->ap
;
1500 struct mv_port_priv
*pp
= ap
->private_data
;
1501 struct mv_crqb_iie
*crqb
;
1502 struct ata_taskfile
*tf
;
1506 if ((qc
->tf
.protocol
!= ATA_PROT_DMA
) &&
1507 (qc
->tf
.protocol
!= ATA_PROT_NCQ
))
1510 /* Fill in Gen IIE command request block */
1511 if (!(qc
->tf
.flags
& ATA_TFLAG_WRITE
))
1512 flags
|= CRQB_FLAG_READ
;
1514 WARN_ON(MV_MAX_Q_DEPTH
<= qc
->tag
);
1515 flags
|= qc
->tag
<< CRQB_TAG_SHIFT
;
1516 flags
|= qc
->tag
<< CRQB_HOSTQ_SHIFT
;
1517 flags
|= (qc
->dev
->link
->pmp
& 0xf) << CRQB_PMP_SHIFT
;
1519 /* get current queue index from software */
1520 in_index
= pp
->req_idx
;
1522 crqb
= (struct mv_crqb_iie
*) &pp
->crqb
[in_index
];
1523 crqb
->addr
= cpu_to_le32(pp
->sg_tbl_dma
[qc
->tag
] & 0xffffffff);
1524 crqb
->addr_hi
= cpu_to_le32((pp
->sg_tbl_dma
[qc
->tag
] >> 16) >> 16);
1525 crqb
->flags
= cpu_to_le32(flags
);
1528 crqb
->ata_cmd
[0] = cpu_to_le32(
1529 (tf
->command
<< 16) |
1532 crqb
->ata_cmd
[1] = cpu_to_le32(
1538 crqb
->ata_cmd
[2] = cpu_to_le32(
1539 (tf
->hob_lbal
<< 0) |
1540 (tf
->hob_lbam
<< 8) |
1541 (tf
->hob_lbah
<< 16) |
1542 (tf
->hob_feature
<< 24)
1544 crqb
->ata_cmd
[3] = cpu_to_le32(
1546 (tf
->hob_nsect
<< 8)
1549 if (!(qc
->flags
& ATA_QCFLAG_DMAMAP
))
1555 * mv_qc_issue - Initiate a command to the host
1556 * @qc: queued command to start
1558 * This routine simply redirects to the general purpose routine
1559 * if command is not DMA. Else, it sanity checks our local
1560 * caches of the request producer/consumer indices then enables
1561 * DMA and bumps the request producer index.
1564 * Inherited from caller.
1566 static unsigned int mv_qc_issue(struct ata_queued_cmd
*qc
)
1568 struct ata_port
*ap
= qc
->ap
;
1569 void __iomem
*port_mmio
= mv_ap_base(ap
);
1570 struct mv_port_priv
*pp
= ap
->private_data
;
1573 if ((qc
->tf
.protocol
!= ATA_PROT_DMA
) &&
1574 (qc
->tf
.protocol
!= ATA_PROT_NCQ
)) {
1575 static int limit_warnings
= 10;
1577 * Errata SATA#16, SATA#24: warn if multiple DRQs expected.
1579 * Someday, we might implement special polling workarounds
1580 * for these, but it all seems rather unnecessary since we
1581 * normally use only DMA for commands which transfer more
1582 * than a single block of data.
1584 * Much of the time, this could just work regardless.
1585 * So for now, just log the incident, and allow the attempt.
1587 if (limit_warnings
> 0 && (qc
->nbytes
/ qc
->sect_size
) > 1) {
1589 ata_link_printk(qc
->dev
->link
, KERN_WARNING
, DRV_NAME
1590 ": attempting PIO w/multiple DRQ: "
1591 "this may fail due to h/w errata\n");
1594 * We're about to send a non-EDMA capable command to the
1595 * port. Turn off EDMA so there won't be problems accessing
1596 * shadow block, etc registers.
1599 mv_enable_port_irqs(ap
, ERR_IRQ
);
1600 mv_pmp_select(ap
, qc
->dev
->link
->pmp
);
1601 return ata_sff_qc_issue(qc
);
1604 mv_start_edma(ap
, port_mmio
, pp
, qc
->tf
.protocol
);
1606 pp
->req_idx
= (pp
->req_idx
+ 1) & MV_MAX_Q_DEPTH_MASK
;
1607 in_index
= pp
->req_idx
<< EDMA_REQ_Q_PTR_SHIFT
;
1609 /* and write the request in pointer to kick the EDMA to life */
1610 writelfl((pp
->crqb_dma
& EDMA_REQ_Q_BASE_LO_MASK
) | in_index
,
1611 port_mmio
+ EDMA_REQ_Q_IN_PTR_OFS
);
1616 static struct ata_queued_cmd
*mv_get_active_qc(struct ata_port
*ap
)
1618 struct mv_port_priv
*pp
= ap
->private_data
;
1619 struct ata_queued_cmd
*qc
;
1621 if (pp
->pp_flags
& MV_PP_FLAG_NCQ_EN
)
1623 qc
= ata_qc_from_tag(ap
, ap
->link
.active_tag
);
1624 if (qc
&& (qc
->tf
.flags
& ATA_TFLAG_POLLING
))
1629 static void mv_pmp_error_handler(struct ata_port
*ap
)
1631 unsigned int pmp
, pmp_map
;
1632 struct mv_port_priv
*pp
= ap
->private_data
;
1634 if (pp
->pp_flags
& MV_PP_FLAG_DELAYED_EH
) {
1636 * Perform NCQ error analysis on failed PMPs
1637 * before we freeze the port entirely.
1639 * The failed PMPs are marked earlier by mv_pmp_eh_prep().
1641 pmp_map
= pp
->delayed_eh_pmp_map
;
1642 pp
->pp_flags
&= ~MV_PP_FLAG_DELAYED_EH
;
1643 for (pmp
= 0; pmp_map
!= 0; pmp
++) {
1644 unsigned int this_pmp
= (1 << pmp
);
1645 if (pmp_map
& this_pmp
) {
1646 struct ata_link
*link
= &ap
->pmp_link
[pmp
];
1647 pmp_map
&= ~this_pmp
;
1648 ata_eh_analyze_ncq_error(link
);
1651 ata_port_freeze(ap
);
1653 sata_pmp_error_handler(ap
);
1656 static unsigned int mv_get_err_pmp_map(struct ata_port
*ap
)
1658 void __iomem
*port_mmio
= mv_ap_base(ap
);
1660 return readl(port_mmio
+ SATA_TESTCTL_OFS
) >> 16;
1663 static void mv_pmp_eh_prep(struct ata_port
*ap
, unsigned int pmp_map
)
1665 struct ata_eh_info
*ehi
;
1669 * Initialize EH info for PMPs which saw device errors
1671 ehi
= &ap
->link
.eh_info
;
1672 for (pmp
= 0; pmp_map
!= 0; pmp
++) {
1673 unsigned int this_pmp
= (1 << pmp
);
1674 if (pmp_map
& this_pmp
) {
1675 struct ata_link
*link
= &ap
->pmp_link
[pmp
];
1677 pmp_map
&= ~this_pmp
;
1678 ehi
= &link
->eh_info
;
1679 ata_ehi_clear_desc(ehi
);
1680 ata_ehi_push_desc(ehi
, "dev err");
1681 ehi
->err_mask
|= AC_ERR_DEV
;
1682 ehi
->action
|= ATA_EH_RESET
;
1683 ata_link_abort(link
);
1688 static int mv_req_q_empty(struct ata_port
*ap
)
1690 void __iomem
*port_mmio
= mv_ap_base(ap
);
1691 u32 in_ptr
, out_ptr
;
1693 in_ptr
= (readl(port_mmio
+ EDMA_REQ_Q_IN_PTR_OFS
)
1694 >> EDMA_REQ_Q_PTR_SHIFT
) & MV_MAX_Q_DEPTH_MASK
;
1695 out_ptr
= (readl(port_mmio
+ EDMA_REQ_Q_OUT_PTR_OFS
)
1696 >> EDMA_REQ_Q_PTR_SHIFT
) & MV_MAX_Q_DEPTH_MASK
;
1697 return (in_ptr
== out_ptr
); /* 1 == queue_is_empty */
1700 static int mv_handle_fbs_ncq_dev_err(struct ata_port
*ap
)
1702 struct mv_port_priv
*pp
= ap
->private_data
;
1704 unsigned int old_map
, new_map
;
1707 * Device error during FBS+NCQ operation:
1709 * Set a port flag to prevent further I/O being enqueued.
1710 * Leave the EDMA running to drain outstanding commands from this port.
1711 * Perform the post-mortem/EH only when all responses are complete.
1712 * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.2).
1714 if (!(pp
->pp_flags
& MV_PP_FLAG_DELAYED_EH
)) {
1715 pp
->pp_flags
|= MV_PP_FLAG_DELAYED_EH
;
1716 pp
->delayed_eh_pmp_map
= 0;
1718 old_map
= pp
->delayed_eh_pmp_map
;
1719 new_map
= old_map
| mv_get_err_pmp_map(ap
);
1721 if (old_map
!= new_map
) {
1722 pp
->delayed_eh_pmp_map
= new_map
;
1723 mv_pmp_eh_prep(ap
, new_map
& ~old_map
);
1725 failed_links
= hweight16(new_map
);
1727 ata_port_printk(ap
, KERN_INFO
, "%s: pmp_map=%04x qc_map=%04x "
1728 "failed_links=%d nr_active_links=%d\n",
1729 __func__
, pp
->delayed_eh_pmp_map
,
1730 ap
->qc_active
, failed_links
,
1731 ap
->nr_active_links
);
1733 if (ap
->nr_active_links
<= failed_links
&& mv_req_q_empty(ap
)) {
1734 mv_process_crpb_entries(ap
, pp
);
1737 ata_port_printk(ap
, KERN_INFO
, "%s: done\n", __func__
);
1738 return 1; /* handled */
1740 ata_port_printk(ap
, KERN_INFO
, "%s: waiting\n", __func__
);
1741 return 1; /* handled */
1744 static int mv_handle_fbs_non_ncq_dev_err(struct ata_port
*ap
)
1747 * Possible future enhancement:
1749 * FBS+non-NCQ operation is not yet implemented.
1750 * See related notes in mv_edma_cfg().
1752 * Device error during FBS+non-NCQ operation:
1754 * We need to snapshot the shadow registers for each failed command.
1755 * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.3).
1757 return 0; /* not handled */
1760 static int mv_handle_dev_err(struct ata_port
*ap
, u32 edma_err_cause
)
1762 struct mv_port_priv
*pp
= ap
->private_data
;
1764 if (!(pp
->pp_flags
& MV_PP_FLAG_EDMA_EN
))
1765 return 0; /* EDMA was not active: not handled */
1766 if (!(pp
->pp_flags
& MV_PP_FLAG_FBS_EN
))
1767 return 0; /* FBS was not active: not handled */
1769 if (!(edma_err_cause
& EDMA_ERR_DEV
))
1770 return 0; /* non DEV error: not handled */
1771 edma_err_cause
&= ~EDMA_ERR_IRQ_TRANSIENT
;
1772 if (edma_err_cause
& ~(EDMA_ERR_DEV
| EDMA_ERR_SELF_DIS
))
1773 return 0; /* other problems: not handled */
1775 if (pp
->pp_flags
& MV_PP_FLAG_NCQ_EN
) {
1777 * EDMA should NOT have self-disabled for this case.
1778 * If it did, then something is wrong elsewhere,
1779 * and we cannot handle it here.
1781 if (edma_err_cause
& EDMA_ERR_SELF_DIS
) {
1782 ata_port_printk(ap
, KERN_WARNING
,
1783 "%s: err_cause=0x%x pp_flags=0x%x\n",
1784 __func__
, edma_err_cause
, pp
->pp_flags
);
1785 return 0; /* not handled */
1787 return mv_handle_fbs_ncq_dev_err(ap
);
1790 * EDMA should have self-disabled for this case.
1791 * If it did not, then something is wrong elsewhere,
1792 * and we cannot handle it here.
1794 if (!(edma_err_cause
& EDMA_ERR_SELF_DIS
)) {
1795 ata_port_printk(ap
, KERN_WARNING
,
1796 "%s: err_cause=0x%x pp_flags=0x%x\n",
1797 __func__
, edma_err_cause
, pp
->pp_flags
);
1798 return 0; /* not handled */
1800 return mv_handle_fbs_non_ncq_dev_err(ap
);
1802 return 0; /* not handled */
1805 static void mv_unexpected_intr(struct ata_port
*ap
, int edma_was_enabled
)
1807 struct ata_eh_info
*ehi
= &ap
->link
.eh_info
;
1808 char *when
= "idle";
1810 ata_ehi_clear_desc(ehi
);
1811 if (!ap
|| (ap
->flags
& ATA_FLAG_DISABLED
)) {
1813 } else if (edma_was_enabled
) {
1814 when
= "EDMA enabled";
1816 struct ata_queued_cmd
*qc
= ata_qc_from_tag(ap
, ap
->link
.active_tag
);
1817 if (qc
&& (qc
->tf
.flags
& ATA_TFLAG_POLLING
))
1820 ata_ehi_push_desc(ehi
, "unexpected device interrupt while %s", when
);
1821 ehi
->err_mask
|= AC_ERR_OTHER
;
1822 ehi
->action
|= ATA_EH_RESET
;
1823 ata_port_freeze(ap
);
1827 * mv_err_intr - Handle error interrupts on the port
1828 * @ap: ATA channel to manipulate
1830 * Most cases require a full reset of the chip's state machine,
1831 * which also performs a COMRESET.
1832 * Also, if the port disabled DMA, update our cached copy to match.
1835 * Inherited from caller.
1837 static void mv_err_intr(struct ata_port
*ap
)
1839 void __iomem
*port_mmio
= mv_ap_base(ap
);
1840 u32 edma_err_cause
, eh_freeze_mask
, serr
= 0;
1842 struct mv_port_priv
*pp
= ap
->private_data
;
1843 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
1844 unsigned int action
= 0, err_mask
= 0;
1845 struct ata_eh_info
*ehi
= &ap
->link
.eh_info
;
1846 struct ata_queued_cmd
*qc
;
1850 * Read and clear the SError and err_cause bits.
1851 * For GenIIe, if EDMA_ERR_TRANS_IRQ_7 is set, we also must read/clear
1852 * the FIS_IRQ_CAUSE register before clearing edma_err_cause.
1854 sata_scr_read(&ap
->link
, SCR_ERROR
, &serr
);
1855 sata_scr_write_flush(&ap
->link
, SCR_ERROR
, serr
);
1857 edma_err_cause
= readl(port_mmio
+ EDMA_ERR_IRQ_CAUSE_OFS
);
1858 if (IS_GEN_IIE(hpriv
) && (edma_err_cause
& EDMA_ERR_TRANS_IRQ_7
)) {
1859 fis_cause
= readl(port_mmio
+ SATA_FIS_IRQ_CAUSE_OFS
);
1860 writelfl(~fis_cause
, port_mmio
+ SATA_FIS_IRQ_CAUSE_OFS
);
1862 writelfl(~edma_err_cause
, port_mmio
+ EDMA_ERR_IRQ_CAUSE_OFS
);
1864 if (edma_err_cause
& EDMA_ERR_DEV
) {
1866 * Device errors during FIS-based switching operation
1867 * require special handling.
1869 if (mv_handle_dev_err(ap
, edma_err_cause
))
1873 qc
= mv_get_active_qc(ap
);
1874 ata_ehi_clear_desc(ehi
);
1875 ata_ehi_push_desc(ehi
, "edma_err_cause=%08x pp_flags=%08x",
1876 edma_err_cause
, pp
->pp_flags
);
1878 if (IS_GEN_IIE(hpriv
) && (edma_err_cause
& EDMA_ERR_TRANS_IRQ_7
)) {
1879 ata_ehi_push_desc(ehi
, "fis_cause=%08x", fis_cause
);
1880 if (fis_cause
& SATA_FIS_IRQ_AN
) {
1881 u32 ec
= edma_err_cause
&
1882 ~(EDMA_ERR_TRANS_IRQ_7
| EDMA_ERR_IRQ_TRANSIENT
);
1883 sata_async_notification(ap
);
1885 return; /* Just an AN; no need for the nukes */
1886 ata_ehi_push_desc(ehi
, "SDB notify");
1890 * All generations share these EDMA error cause bits:
1892 if (edma_err_cause
& EDMA_ERR_DEV
) {
1893 err_mask
|= AC_ERR_DEV
;
1894 action
|= ATA_EH_RESET
;
1895 ata_ehi_push_desc(ehi
, "dev error");
1897 if (edma_err_cause
& (EDMA_ERR_D_PAR
| EDMA_ERR_PRD_PAR
|
1898 EDMA_ERR_CRQB_PAR
| EDMA_ERR_CRPB_PAR
|
1899 EDMA_ERR_INTRL_PAR
)) {
1900 err_mask
|= AC_ERR_ATA_BUS
;
1901 action
|= ATA_EH_RESET
;
1902 ata_ehi_push_desc(ehi
, "parity error");
1904 if (edma_err_cause
& (EDMA_ERR_DEV_DCON
| EDMA_ERR_DEV_CON
)) {
1905 ata_ehi_hotplugged(ehi
);
1906 ata_ehi_push_desc(ehi
, edma_err_cause
& EDMA_ERR_DEV_DCON
?
1907 "dev disconnect" : "dev connect");
1908 action
|= ATA_EH_RESET
;
1912 * Gen-I has a different SELF_DIS bit,
1913 * different FREEZE bits, and no SERR bit:
1915 if (IS_GEN_I(hpriv
)) {
1916 eh_freeze_mask
= EDMA_EH_FREEZE_5
;
1917 if (edma_err_cause
& EDMA_ERR_SELF_DIS_5
) {
1918 pp
->pp_flags
&= ~MV_PP_FLAG_EDMA_EN
;
1919 ata_ehi_push_desc(ehi
, "EDMA self-disable");
1922 eh_freeze_mask
= EDMA_EH_FREEZE
;
1923 if (edma_err_cause
& EDMA_ERR_SELF_DIS
) {
1924 pp
->pp_flags
&= ~MV_PP_FLAG_EDMA_EN
;
1925 ata_ehi_push_desc(ehi
, "EDMA self-disable");
1927 if (edma_err_cause
& EDMA_ERR_SERR
) {
1928 ata_ehi_push_desc(ehi
, "SError=%08x", serr
);
1929 err_mask
|= AC_ERR_ATA_BUS
;
1930 action
|= ATA_EH_RESET
;
1935 err_mask
= AC_ERR_OTHER
;
1936 action
|= ATA_EH_RESET
;
1939 ehi
->serror
|= serr
;
1940 ehi
->action
|= action
;
1943 qc
->err_mask
|= err_mask
;
1945 ehi
->err_mask
|= err_mask
;
1947 if (err_mask
== AC_ERR_DEV
) {
1949 * Cannot do ata_port_freeze() here,
1950 * because it would kill PIO access,
1951 * which is needed for further diagnosis.
1955 } else if (edma_err_cause
& eh_freeze_mask
) {
1957 * Note to self: ata_port_freeze() calls ata_port_abort()
1959 ata_port_freeze(ap
);
1966 ata_link_abort(qc
->dev
->link
);
1972 static void mv_process_crpb_response(struct ata_port
*ap
,
1973 struct mv_crpb
*response
, unsigned int tag
, int ncq_enabled
)
1975 struct ata_queued_cmd
*qc
= ata_qc_from_tag(ap
, tag
);
1979 u16 edma_status
= le16_to_cpu(response
->flags
);
1981 * edma_status from a response queue entry:
1982 * LSB is from EDMA_ERR_IRQ_CAUSE_OFS (non-NCQ only).
1983 * MSB is saved ATA status from command completion.
1986 u8 err_cause
= edma_status
& 0xff & ~EDMA_ERR_DEV
;
1989 * Error will be seen/handled by mv_err_intr().
1990 * So do nothing at all here.
1995 ata_status
= edma_status
>> CRPB_FLAG_STATUS_SHIFT
;
1996 if (!ac_err_mask(ata_status
))
1997 ata_qc_complete(qc
);
1998 /* else: leave it for mv_err_intr() */
2000 ata_port_printk(ap
, KERN_ERR
, "%s: no qc for tag=%d\n",
2005 static void mv_process_crpb_entries(struct ata_port
*ap
, struct mv_port_priv
*pp
)
2007 void __iomem
*port_mmio
= mv_ap_base(ap
);
2008 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
2010 bool work_done
= false;
2011 int ncq_enabled
= (pp
->pp_flags
& MV_PP_FLAG_NCQ_EN
);
2013 /* Get the hardware queue position index */
2014 in_index
= (readl(port_mmio
+ EDMA_RSP_Q_IN_PTR_OFS
)
2015 >> EDMA_RSP_Q_PTR_SHIFT
) & MV_MAX_Q_DEPTH_MASK
;
2017 /* Process new responses from since the last time we looked */
2018 while (in_index
!= pp
->resp_idx
) {
2020 struct mv_crpb
*response
= &pp
->crpb
[pp
->resp_idx
];
2022 pp
->resp_idx
= (pp
->resp_idx
+ 1) & MV_MAX_Q_DEPTH_MASK
;
2024 if (IS_GEN_I(hpriv
)) {
2025 /* 50xx: no NCQ, only one command active at a time */
2026 tag
= ap
->link
.active_tag
;
2028 /* Gen II/IIE: get command tag from CRPB entry */
2029 tag
= le16_to_cpu(response
->id
) & 0x1f;
2031 mv_process_crpb_response(ap
, response
, tag
, ncq_enabled
);
2035 /* Update the software queue position index in hardware */
2037 writelfl((pp
->crpb_dma
& EDMA_RSP_Q_BASE_LO_MASK
) |
2038 (pp
->resp_idx
<< EDMA_RSP_Q_PTR_SHIFT
),
2039 port_mmio
+ EDMA_RSP_Q_OUT_PTR_OFS
);
2042 static void mv_port_intr(struct ata_port
*ap
, u32 port_cause
)
2044 struct mv_port_priv
*pp
;
2045 int edma_was_enabled
;
2047 if (!ap
|| (ap
->flags
& ATA_FLAG_DISABLED
)) {
2048 mv_unexpected_intr(ap
, 0);
2052 * Grab a snapshot of the EDMA_EN flag setting,
2053 * so that we have a consistent view for this port,
2054 * even if something we call of our routines changes it.
2056 pp
= ap
->private_data
;
2057 edma_was_enabled
= (pp
->pp_flags
& MV_PP_FLAG_EDMA_EN
);
2059 * Process completed CRPB response(s) before other events.
2061 if (edma_was_enabled
&& (port_cause
& DONE_IRQ
)) {
2062 mv_process_crpb_entries(ap
, pp
);
2063 if (pp
->pp_flags
& MV_PP_FLAG_DELAYED_EH
)
2064 mv_handle_fbs_ncq_dev_err(ap
);
2067 * Handle chip-reported errors, or continue on to handle PIO.
2069 if (unlikely(port_cause
& ERR_IRQ
)) {
2071 } else if (!edma_was_enabled
) {
2072 struct ata_queued_cmd
*qc
= mv_get_active_qc(ap
);
2074 ata_sff_host_intr(ap
, qc
);
2076 mv_unexpected_intr(ap
, edma_was_enabled
);
2081 * mv_host_intr - Handle all interrupts on the given host controller
2082 * @host: host specific structure
2083 * @main_irq_cause: Main interrupt cause register for the chip.
2086 * Inherited from caller.
2088 static int mv_host_intr(struct ata_host
*host
, u32 main_irq_cause
)
2090 struct mv_host_priv
*hpriv
= host
->private_data
;
2091 void __iomem
*mmio
= hpriv
->base
, *hc_mmio
;
2092 unsigned int handled
= 0, port
;
2094 for (port
= 0; port
< hpriv
->n_ports
; port
++) {
2095 struct ata_port
*ap
= host
->ports
[port
];
2096 unsigned int p
, shift
, hardport
, port_cause
;
2098 MV_PORT_TO_SHIFT_AND_HARDPORT(port
, shift
, hardport
);
2100 * Each hc within the host has its own hc_irq_cause register,
2101 * where the interrupting ports bits get ack'd.
2103 if (hardport
== 0) { /* first port on this hc ? */
2104 u32 hc_cause
= (main_irq_cause
>> shift
) & HC0_IRQ_PEND
;
2105 u32 port_mask
, ack_irqs
;
2107 * Skip this entire hc if nothing pending for any ports
2110 port
+= MV_PORTS_PER_HC
- 1;
2114 * We don't need/want to read the hc_irq_cause register,
2115 * because doing so hurts performance, and
2116 * main_irq_cause already gives us everything we need.
2118 * But we do have to *write* to the hc_irq_cause to ack
2119 * the ports that we are handling this time through.
2121 * This requires that we create a bitmap for those
2122 * ports which interrupted us, and use that bitmap
2123 * to ack (only) those ports via hc_irq_cause.
2126 for (p
= 0; p
< MV_PORTS_PER_HC
; ++p
) {
2127 if ((port
+ p
) >= hpriv
->n_ports
)
2129 port_mask
= (DONE_IRQ
| ERR_IRQ
) << (p
* 2);
2130 if (hc_cause
& port_mask
)
2131 ack_irqs
|= (DMA_IRQ
| DEV_IRQ
) << p
;
2133 hc_mmio
= mv_hc_base_from_port(mmio
, port
);
2134 writelfl(~ack_irqs
, hc_mmio
+ HC_IRQ_CAUSE_OFS
);
2138 * Handle interrupts signalled for this port:
2140 port_cause
= (main_irq_cause
>> shift
) & (DONE_IRQ
| ERR_IRQ
);
2142 mv_port_intr(ap
, port_cause
);
2147 static int mv_pci_error(struct ata_host
*host
, void __iomem
*mmio
)
2149 struct mv_host_priv
*hpriv
= host
->private_data
;
2150 struct ata_port
*ap
;
2151 struct ata_queued_cmd
*qc
;
2152 struct ata_eh_info
*ehi
;
2153 unsigned int i
, err_mask
, printed
= 0;
2156 err_cause
= readl(mmio
+ hpriv
->irq_cause_ofs
);
2158 dev_printk(KERN_ERR
, host
->dev
, "PCI ERROR; PCI IRQ cause=0x%08x\n",
2161 DPRINTK("All regs @ PCI error\n");
2162 mv_dump_all_regs(mmio
, -1, to_pci_dev(host
->dev
));
2164 writelfl(0, mmio
+ hpriv
->irq_cause_ofs
);
2166 for (i
= 0; i
< host
->n_ports
; i
++) {
2167 ap
= host
->ports
[i
];
2168 if (!ata_link_offline(&ap
->link
)) {
2169 ehi
= &ap
->link
.eh_info
;
2170 ata_ehi_clear_desc(ehi
);
2172 ata_ehi_push_desc(ehi
,
2173 "PCI err cause 0x%08x", err_cause
);
2174 err_mask
= AC_ERR_HOST_BUS
;
2175 ehi
->action
= ATA_EH_RESET
;
2176 qc
= ata_qc_from_tag(ap
, ap
->link
.active_tag
);
2178 qc
->err_mask
|= err_mask
;
2180 ehi
->err_mask
|= err_mask
;
2182 ata_port_freeze(ap
);
2185 return 1; /* handled */
2189 * mv_interrupt - Main interrupt event handler
2191 * @dev_instance: private data; in this case the host structure
2193 * Read the read only register to determine if any host
2194 * controllers have pending interrupts. If so, call lower level
2195 * routine to handle. Also check for PCI errors which are only
2199 * This routine holds the host lock while processing pending
2202 static irqreturn_t
mv_interrupt(int irq
, void *dev_instance
)
2204 struct ata_host
*host
= dev_instance
;
2205 struct mv_host_priv
*hpriv
= host
->private_data
;
2206 unsigned int handled
= 0;
2207 int using_msi
= hpriv
->hp_flags
& MV_HP_FLAG_MSI
;
2208 u32 main_irq_cause
, pending_irqs
;
2210 spin_lock(&host
->lock
);
2212 /* for MSI: block new interrupts while in here */
2214 writel(0, hpriv
->main_irq_mask_addr
);
2216 main_irq_cause
= readl(hpriv
->main_irq_cause_addr
);
2217 pending_irqs
= main_irq_cause
& hpriv
->main_irq_mask
;
2219 * Deal with cases where we either have nothing pending, or have read
2220 * a bogus register value which can indicate HW removal or PCI fault.
2222 if (pending_irqs
&& main_irq_cause
!= 0xffffffffU
) {
2223 if (unlikely((pending_irqs
& PCI_ERR
) && !IS_SOC(hpriv
)))
2224 handled
= mv_pci_error(host
, hpriv
->base
);
2226 handled
= mv_host_intr(host
, pending_irqs
);
2229 /* for MSI: unmask; interrupt cause bits will retrigger now */
2231 writel(hpriv
->main_irq_mask
, hpriv
->main_irq_mask_addr
);
2233 spin_unlock(&host
->lock
);
2235 return IRQ_RETVAL(handled
);
2238 static unsigned int mv5_scr_offset(unsigned int sc_reg_in
)
2242 switch (sc_reg_in
) {
2246 ofs
= sc_reg_in
* sizeof(u32
);
2255 static int mv5_scr_read(struct ata_link
*link
, unsigned int sc_reg_in
, u32
*val
)
2257 struct mv_host_priv
*hpriv
= link
->ap
->host
->private_data
;
2258 void __iomem
*mmio
= hpriv
->base
;
2259 void __iomem
*addr
= mv5_phy_base(mmio
, link
->ap
->port_no
);
2260 unsigned int ofs
= mv5_scr_offset(sc_reg_in
);
2262 if (ofs
!= 0xffffffffU
) {
2263 *val
= readl(addr
+ ofs
);
2269 static int mv5_scr_write(struct ata_link
*link
, unsigned int sc_reg_in
, u32 val
)
2271 struct mv_host_priv
*hpriv
= link
->ap
->host
->private_data
;
2272 void __iomem
*mmio
= hpriv
->base
;
2273 void __iomem
*addr
= mv5_phy_base(mmio
, link
->ap
->port_no
);
2274 unsigned int ofs
= mv5_scr_offset(sc_reg_in
);
2276 if (ofs
!= 0xffffffffU
) {
2277 writelfl(val
, addr
+ ofs
);
2283 static void mv5_reset_bus(struct ata_host
*host
, void __iomem
*mmio
)
2285 struct pci_dev
*pdev
= to_pci_dev(host
->dev
);
2288 early_5080
= (pdev
->device
== 0x5080) && (pdev
->revision
== 0);
2291 u32 tmp
= readl(mmio
+ MV_PCI_EXP_ROM_BAR_CTL
);
2293 writel(tmp
, mmio
+ MV_PCI_EXP_ROM_BAR_CTL
);
2296 mv_reset_pci_bus(host
, mmio
);
2299 static void mv5_reset_flash(struct mv_host_priv
*hpriv
, void __iomem
*mmio
)
2301 writel(0x0fcfffff, mmio
+ MV_FLASH_CTL_OFS
);
2304 static void mv5_read_preamp(struct mv_host_priv
*hpriv
, int idx
,
2307 void __iomem
*phy_mmio
= mv5_phy_base(mmio
, idx
);
2310 tmp
= readl(phy_mmio
+ MV5_PHY_MODE
);
2312 hpriv
->signal
[idx
].pre
= tmp
& 0x1800; /* bits 12:11 */
2313 hpriv
->signal
[idx
].amps
= tmp
& 0xe0; /* bits 7:5 */
2316 static void mv5_enable_leds(struct mv_host_priv
*hpriv
, void __iomem
*mmio
)
2320 writel(0, mmio
+ MV_GPIO_PORT_CTL_OFS
);
2322 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
2324 tmp
= readl(mmio
+ MV_PCI_EXP_ROM_BAR_CTL
);
2326 writel(tmp
, mmio
+ MV_PCI_EXP_ROM_BAR_CTL
);
2329 static void mv5_phy_errata(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
2332 void __iomem
*phy_mmio
= mv5_phy_base(mmio
, port
);
2333 const u32 mask
= (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
2335 int fix_apm_sq
= (hpriv
->hp_flags
& MV_HP_ERRATA_50XXB0
);
2338 tmp
= readl(phy_mmio
+ MV5_LTMODE_OFS
);
2340 writel(tmp
, phy_mmio
+ MV5_LTMODE_OFS
);
2342 tmp
= readl(phy_mmio
+ MV5_PHY_CTL_OFS
);
2345 writel(tmp
, phy_mmio
+ MV5_PHY_CTL_OFS
);
2348 tmp
= readl(phy_mmio
+ MV5_PHY_MODE
);
2350 tmp
|= hpriv
->signal
[port
].pre
;
2351 tmp
|= hpriv
->signal
[port
].amps
;
2352 writel(tmp
, phy_mmio
+ MV5_PHY_MODE
);
2357 #define ZERO(reg) writel(0, port_mmio + (reg))
2358 static void mv5_reset_hc_port(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
2361 void __iomem
*port_mmio
= mv_port_base(mmio
, port
);
2363 mv_reset_channel(hpriv
, mmio
, port
);
2365 ZERO(0x028); /* command */
2366 writel(0x11f, port_mmio
+ EDMA_CFG_OFS
);
2367 ZERO(0x004); /* timer */
2368 ZERO(0x008); /* irq err cause */
2369 ZERO(0x00c); /* irq err mask */
2370 ZERO(0x010); /* rq bah */
2371 ZERO(0x014); /* rq inp */
2372 ZERO(0x018); /* rq outp */
2373 ZERO(0x01c); /* respq bah */
2374 ZERO(0x024); /* respq outp */
2375 ZERO(0x020); /* respq inp */
2376 ZERO(0x02c); /* test control */
2377 writel(0xbc, port_mmio
+ EDMA_IORDY_TMOUT_OFS
);
2381 #define ZERO(reg) writel(0, hc_mmio + (reg))
2382 static void mv5_reset_one_hc(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
2385 void __iomem
*hc_mmio
= mv_hc_base(mmio
, hc
);
2393 tmp
= readl(hc_mmio
+ 0x20);
2396 writel(tmp
, hc_mmio
+ 0x20);
2400 static int mv5_reset_hc(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
2403 unsigned int hc
, port
;
2405 for (hc
= 0; hc
< n_hc
; hc
++) {
2406 for (port
= 0; port
< MV_PORTS_PER_HC
; port
++)
2407 mv5_reset_hc_port(hpriv
, mmio
,
2408 (hc
* MV_PORTS_PER_HC
) + port
);
2410 mv5_reset_one_hc(hpriv
, mmio
, hc
);
2417 #define ZERO(reg) writel(0, mmio + (reg))
2418 static void mv_reset_pci_bus(struct ata_host
*host
, void __iomem
*mmio
)
2420 struct mv_host_priv
*hpriv
= host
->private_data
;
2423 tmp
= readl(mmio
+ MV_PCI_MODE_OFS
);
2425 writel(tmp
, mmio
+ MV_PCI_MODE_OFS
);
2427 ZERO(MV_PCI_DISC_TIMER
);
2428 ZERO(MV_PCI_MSI_TRIGGER
);
2429 writel(0x000100ff, mmio
+ MV_PCI_XBAR_TMOUT_OFS
);
2430 ZERO(MV_PCI_SERR_MASK
);
2431 ZERO(hpriv
->irq_cause_ofs
);
2432 ZERO(hpriv
->irq_mask_ofs
);
2433 ZERO(MV_PCI_ERR_LOW_ADDRESS
);
2434 ZERO(MV_PCI_ERR_HIGH_ADDRESS
);
2435 ZERO(MV_PCI_ERR_ATTRIBUTE
);
2436 ZERO(MV_PCI_ERR_COMMAND
);
2440 static void mv6_reset_flash(struct mv_host_priv
*hpriv
, void __iomem
*mmio
)
2444 mv5_reset_flash(hpriv
, mmio
);
2446 tmp
= readl(mmio
+ MV_GPIO_PORT_CTL_OFS
);
2448 tmp
|= (1 << 5) | (1 << 6);
2449 writel(tmp
, mmio
+ MV_GPIO_PORT_CTL_OFS
);
2453 * mv6_reset_hc - Perform the 6xxx global soft reset
2454 * @mmio: base address of the HBA
2456 * This routine only applies to 6xxx parts.
2459 * Inherited from caller.
2461 static int mv6_reset_hc(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
2464 void __iomem
*reg
= mmio
+ PCI_MAIN_CMD_STS_OFS
;
2468 /* Following procedure defined in PCI "main command and status
2472 writel(t
| STOP_PCI_MASTER
, reg
);
2474 for (i
= 0; i
< 1000; i
++) {
2477 if (PCI_MASTER_EMPTY
& t
)
2480 if (!(PCI_MASTER_EMPTY
& t
)) {
2481 printk(KERN_ERR DRV_NAME
": PCI master won't flush\n");
2489 writel(t
| GLOB_SFT_RST
, reg
);
2492 } while (!(GLOB_SFT_RST
& t
) && (i
-- > 0));
2494 if (!(GLOB_SFT_RST
& t
)) {
2495 printk(KERN_ERR DRV_NAME
": can't set global reset\n");
2500 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2503 writel(t
& ~(GLOB_SFT_RST
| STOP_PCI_MASTER
), reg
);
2506 } while ((GLOB_SFT_RST
& t
) && (i
-- > 0));
2508 if (GLOB_SFT_RST
& t
) {
2509 printk(KERN_ERR DRV_NAME
": can't clear global reset\n");
2516 static void mv6_read_preamp(struct mv_host_priv
*hpriv
, int idx
,
2519 void __iomem
*port_mmio
;
2522 tmp
= readl(mmio
+ MV_RESET_CFG_OFS
);
2523 if ((tmp
& (1 << 0)) == 0) {
2524 hpriv
->signal
[idx
].amps
= 0x7 << 8;
2525 hpriv
->signal
[idx
].pre
= 0x1 << 5;
2529 port_mmio
= mv_port_base(mmio
, idx
);
2530 tmp
= readl(port_mmio
+ PHY_MODE2
);
2532 hpriv
->signal
[idx
].amps
= tmp
& 0x700; /* bits 10:8 */
2533 hpriv
->signal
[idx
].pre
= tmp
& 0xe0; /* bits 7:5 */
2536 static void mv6_enable_leds(struct mv_host_priv
*hpriv
, void __iomem
*mmio
)
2538 writel(0x00000060, mmio
+ MV_GPIO_PORT_CTL_OFS
);
2541 static void mv6_phy_errata(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
2544 void __iomem
*port_mmio
= mv_port_base(mmio
, port
);
2546 u32 hp_flags
= hpriv
->hp_flags
;
2548 hp_flags
& (MV_HP_ERRATA_60X1B2
| MV_HP_ERRATA_60X1C0
);
2550 hp_flags
& (MV_HP_ERRATA_60X1B2
| MV_HP_ERRATA_60X1C0
);
2553 if (fix_phy_mode2
) {
2554 m2
= readl(port_mmio
+ PHY_MODE2
);
2557 writel(m2
, port_mmio
+ PHY_MODE2
);
2561 m2
= readl(port_mmio
+ PHY_MODE2
);
2562 m2
&= ~((1 << 16) | (1 << 31));
2563 writel(m2
, port_mmio
+ PHY_MODE2
);
2569 * Gen-II/IIe PHY_MODE3 errata RM#2:
2570 * Achieves better receiver noise performance than the h/w default:
2572 m3
= readl(port_mmio
+ PHY_MODE3
);
2573 m3
= (m3
& 0x1f) | (0x5555601 << 5);
2575 /* Guideline 88F5182 (GL# SATA-S11) */
2579 if (fix_phy_mode4
) {
2580 u32 m4
= readl(port_mmio
+ PHY_MODE4
);
2582 * Enforce reserved-bit restrictions on GenIIe devices only.
2583 * For earlier chipsets, force only the internal config field
2584 * (workaround for errata FEr SATA#10 part 1).
2586 if (IS_GEN_IIE(hpriv
))
2587 m4
= (m4
& ~PHY_MODE4_RSVD_ZEROS
) | PHY_MODE4_RSVD_ONES
;
2589 m4
= (m4
& ~PHY_MODE4_CFG_MASK
) | PHY_MODE4_CFG_VALUE
;
2590 writel(m4
, port_mmio
+ PHY_MODE4
);
2593 * Workaround for 60x1-B2 errata SATA#13:
2594 * Any write to PHY_MODE4 (above) may corrupt PHY_MODE3,
2595 * so we must always rewrite PHY_MODE3 after PHY_MODE4.
2597 writel(m3
, port_mmio
+ PHY_MODE3
);
2599 /* Revert values of pre-emphasis and signal amps to the saved ones */
2600 m2
= readl(port_mmio
+ PHY_MODE2
);
2602 m2
&= ~MV_M2_PREAMP_MASK
;
2603 m2
|= hpriv
->signal
[port
].amps
;
2604 m2
|= hpriv
->signal
[port
].pre
;
2607 /* according to mvSata 3.6.1, some IIE values are fixed */
2608 if (IS_GEN_IIE(hpriv
)) {
2613 writel(m2
, port_mmio
+ PHY_MODE2
);
2616 /* TODO: use the generic LED interface to configure the SATA Presence */
2617 /* & Acitivy LEDs on the board */
2618 static void mv_soc_enable_leds(struct mv_host_priv
*hpriv
,
2624 static void mv_soc_read_preamp(struct mv_host_priv
*hpriv
, int idx
,
2627 void __iomem
*port_mmio
;
2630 port_mmio
= mv_port_base(mmio
, idx
);
2631 tmp
= readl(port_mmio
+ PHY_MODE2
);
2633 hpriv
->signal
[idx
].amps
= tmp
& 0x700; /* bits 10:8 */
2634 hpriv
->signal
[idx
].pre
= tmp
& 0xe0; /* bits 7:5 */
2638 #define ZERO(reg) writel(0, port_mmio + (reg))
2639 static void mv_soc_reset_hc_port(struct mv_host_priv
*hpriv
,
2640 void __iomem
*mmio
, unsigned int port
)
2642 void __iomem
*port_mmio
= mv_port_base(mmio
, port
);
2644 mv_reset_channel(hpriv
, mmio
, port
);
2646 ZERO(0x028); /* command */
2647 writel(0x101f, port_mmio
+ EDMA_CFG_OFS
);
2648 ZERO(0x004); /* timer */
2649 ZERO(0x008); /* irq err cause */
2650 ZERO(0x00c); /* irq err mask */
2651 ZERO(0x010); /* rq bah */
2652 ZERO(0x014); /* rq inp */
2653 ZERO(0x018); /* rq outp */
2654 ZERO(0x01c); /* respq bah */
2655 ZERO(0x024); /* respq outp */
2656 ZERO(0x020); /* respq inp */
2657 ZERO(0x02c); /* test control */
2658 writel(0xbc, port_mmio
+ EDMA_IORDY_TMOUT_OFS
);
2663 #define ZERO(reg) writel(0, hc_mmio + (reg))
2664 static void mv_soc_reset_one_hc(struct mv_host_priv
*hpriv
,
2667 void __iomem
*hc_mmio
= mv_hc_base(mmio
, 0);
2677 static int mv_soc_reset_hc(struct mv_host_priv
*hpriv
,
2678 void __iomem
*mmio
, unsigned int n_hc
)
2682 for (port
= 0; port
< hpriv
->n_ports
; port
++)
2683 mv_soc_reset_hc_port(hpriv
, mmio
, port
);
2685 mv_soc_reset_one_hc(hpriv
, mmio
);
2690 static void mv_soc_reset_flash(struct mv_host_priv
*hpriv
,
2696 static void mv_soc_reset_bus(struct ata_host
*host
, void __iomem
*mmio
)
2701 static void mv_setup_ifcfg(void __iomem
*port_mmio
, int want_gen2i
)
2703 u32 ifcfg
= readl(port_mmio
+ SATA_INTERFACE_CFG_OFS
);
2705 ifcfg
= (ifcfg
& 0xf7f) | 0x9b1000; /* from chip spec */
2707 ifcfg
|= (1 << 7); /* enable gen2i speed */
2708 writelfl(ifcfg
, port_mmio
+ SATA_INTERFACE_CFG_OFS
);
2711 static void mv_reset_channel(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
2712 unsigned int port_no
)
2714 void __iomem
*port_mmio
= mv_port_base(mmio
, port_no
);
2717 * The datasheet warns against setting EDMA_RESET when EDMA is active
2718 * (but doesn't say what the problem might be). So we first try
2719 * to disable the EDMA engine before doing the EDMA_RESET operation.
2721 mv_stop_edma_engine(port_mmio
);
2722 writelfl(EDMA_RESET
, port_mmio
+ EDMA_CMD_OFS
);
2724 if (!IS_GEN_I(hpriv
)) {
2725 /* Enable 3.0gb/s link speed: this survives EDMA_RESET */
2726 mv_setup_ifcfg(port_mmio
, 1);
2729 * Strobing EDMA_RESET here causes a hard reset of the SATA transport,
2730 * link, and physical layers. It resets all SATA interface registers
2731 * (except for SATA_INTERFACE_CFG), and issues a COMRESET to the dev.
2733 writelfl(EDMA_RESET
, port_mmio
+ EDMA_CMD_OFS
);
2734 udelay(25); /* allow reset propagation */
2735 writelfl(0, port_mmio
+ EDMA_CMD_OFS
);
2737 hpriv
->ops
->phy_errata(hpriv
, mmio
, port_no
);
2739 if (IS_GEN_I(hpriv
))
2743 static void mv_pmp_select(struct ata_port
*ap
, int pmp
)
2745 if (sata_pmp_supported(ap
)) {
2746 void __iomem
*port_mmio
= mv_ap_base(ap
);
2747 u32 reg
= readl(port_mmio
+ SATA_IFCTL_OFS
);
2748 int old
= reg
& 0xf;
2751 reg
= (reg
& ~0xf) | pmp
;
2752 writelfl(reg
, port_mmio
+ SATA_IFCTL_OFS
);
2757 static int mv_pmp_hardreset(struct ata_link
*link
, unsigned int *class,
2758 unsigned long deadline
)
2760 mv_pmp_select(link
->ap
, sata_srst_pmp(link
));
2761 return sata_std_hardreset(link
, class, deadline
);
2764 static int mv_softreset(struct ata_link
*link
, unsigned int *class,
2765 unsigned long deadline
)
2767 mv_pmp_select(link
->ap
, sata_srst_pmp(link
));
2768 return ata_sff_softreset(link
, class, deadline
);
2771 static int mv_hardreset(struct ata_link
*link
, unsigned int *class,
2772 unsigned long deadline
)
2774 struct ata_port
*ap
= link
->ap
;
2775 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
2776 struct mv_port_priv
*pp
= ap
->private_data
;
2777 void __iomem
*mmio
= hpriv
->base
;
2778 int rc
, attempts
= 0, extra
= 0;
2782 mv_reset_channel(hpriv
, mmio
, ap
->port_no
);
2783 pp
->pp_flags
&= ~MV_PP_FLAG_EDMA_EN
;
2785 /* Workaround for errata FEr SATA#10 (part 2) */
2787 const unsigned long *timing
=
2788 sata_ehc_deb_timing(&link
->eh_context
);
2790 rc
= sata_link_hardreset(link
, timing
, deadline
+ extra
,
2792 rc
= online
? -EAGAIN
: rc
;
2795 sata_scr_read(link
, SCR_STATUS
, &sstatus
);
2796 if (!IS_GEN_I(hpriv
) && ++attempts
>= 5 && sstatus
== 0x121) {
2797 /* Force 1.5gb/s link speed and try again */
2798 mv_setup_ifcfg(mv_ap_base(ap
), 0);
2799 if (time_after(jiffies
+ HZ
, deadline
))
2800 extra
= HZ
; /* only extend it once, max */
2802 } while (sstatus
!= 0x0 && sstatus
!= 0x113 && sstatus
!= 0x123);
2807 static void mv_eh_freeze(struct ata_port
*ap
)
2810 mv_enable_port_irqs(ap
, 0);
2813 static void mv_eh_thaw(struct ata_port
*ap
)
2815 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
2816 unsigned int port
= ap
->port_no
;
2817 unsigned int hardport
= mv_hardport_from_port(port
);
2818 void __iomem
*hc_mmio
= mv_hc_base_from_port(hpriv
->base
, port
);
2819 void __iomem
*port_mmio
= mv_ap_base(ap
);
2822 /* clear EDMA errors on this port */
2823 writel(0, port_mmio
+ EDMA_ERR_IRQ_CAUSE_OFS
);
2825 /* clear pending irq events */
2826 hc_irq_cause
= ~((DEV_IRQ
| DMA_IRQ
) << hardport
);
2827 writelfl(hc_irq_cause
, hc_mmio
+ HC_IRQ_CAUSE_OFS
);
2829 mv_enable_port_irqs(ap
, ERR_IRQ
);
2833 * mv_port_init - Perform some early initialization on a single port.
2834 * @port: libata data structure storing shadow register addresses
2835 * @port_mmio: base address of the port
2837 * Initialize shadow register mmio addresses, clear outstanding
2838 * interrupts on the port, and unmask interrupts for the future
2839 * start of the port.
2842 * Inherited from caller.
2844 static void mv_port_init(struct ata_ioports
*port
, void __iomem
*port_mmio
)
2846 void __iomem
*shd_base
= port_mmio
+ SHD_BLK_OFS
;
2849 /* PIO related setup
2851 port
->data_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_DATA
);
2853 port
->feature_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_ERR
);
2854 port
->nsect_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_NSECT
);
2855 port
->lbal_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_LBAL
);
2856 port
->lbam_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_LBAM
);
2857 port
->lbah_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_LBAH
);
2858 port
->device_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_DEVICE
);
2860 port
->command_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_STATUS
);
2861 /* special case: control/altstatus doesn't have ATA_REG_ address */
2862 port
->altstatus_addr
= port
->ctl_addr
= shd_base
+ SHD_CTL_AST_OFS
;
2865 port
->cmd_addr
= port
->bmdma_addr
= port
->scr_addr
= NULL
;
2867 /* Clear any currently outstanding port interrupt conditions */
2868 serr_ofs
= mv_scr_offset(SCR_ERROR
);
2869 writelfl(readl(port_mmio
+ serr_ofs
), port_mmio
+ serr_ofs
);
2870 writelfl(0, port_mmio
+ EDMA_ERR_IRQ_CAUSE_OFS
);
2872 /* unmask all non-transient EDMA error interrupts */
2873 writelfl(~EDMA_ERR_IRQ_TRANSIENT
, port_mmio
+ EDMA_ERR_IRQ_MASK_OFS
);
2875 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2876 readl(port_mmio
+ EDMA_CFG_OFS
),
2877 readl(port_mmio
+ EDMA_ERR_IRQ_CAUSE_OFS
),
2878 readl(port_mmio
+ EDMA_ERR_IRQ_MASK_OFS
));
2881 static unsigned int mv_in_pcix_mode(struct ata_host
*host
)
2883 struct mv_host_priv
*hpriv
= host
->private_data
;
2884 void __iomem
*mmio
= hpriv
->base
;
2887 if (IS_SOC(hpriv
) || !IS_PCIE(hpriv
))
2888 return 0; /* not PCI-X capable */
2889 reg
= readl(mmio
+ MV_PCI_MODE_OFS
);
2890 if ((reg
& MV_PCI_MODE_MASK
) == 0)
2891 return 0; /* conventional PCI mode */
2892 return 1; /* chip is in PCI-X mode */
2895 static int mv_pci_cut_through_okay(struct ata_host
*host
)
2897 struct mv_host_priv
*hpriv
= host
->private_data
;
2898 void __iomem
*mmio
= hpriv
->base
;
2901 if (!mv_in_pcix_mode(host
)) {
2902 reg
= readl(mmio
+ PCI_COMMAND_OFS
);
2903 if (reg
& PCI_COMMAND_MRDTRIG
)
2904 return 0; /* not okay */
2906 return 1; /* okay */
2909 static int mv_chip_id(struct ata_host
*host
, unsigned int board_idx
)
2911 struct pci_dev
*pdev
= to_pci_dev(host
->dev
);
2912 struct mv_host_priv
*hpriv
= host
->private_data
;
2913 u32 hp_flags
= hpriv
->hp_flags
;
2915 switch (board_idx
) {
2917 hpriv
->ops
= &mv5xxx_ops
;
2918 hp_flags
|= MV_HP_GEN_I
;
2920 switch (pdev
->revision
) {
2922 hp_flags
|= MV_HP_ERRATA_50XXB0
;
2925 hp_flags
|= MV_HP_ERRATA_50XXB2
;
2928 dev_printk(KERN_WARNING
, &pdev
->dev
,
2929 "Applying 50XXB2 workarounds to unknown rev\n");
2930 hp_flags
|= MV_HP_ERRATA_50XXB2
;
2937 hpriv
->ops
= &mv5xxx_ops
;
2938 hp_flags
|= MV_HP_GEN_I
;
2940 switch (pdev
->revision
) {
2942 hp_flags
|= MV_HP_ERRATA_50XXB0
;
2945 hp_flags
|= MV_HP_ERRATA_50XXB2
;
2948 dev_printk(KERN_WARNING
, &pdev
->dev
,
2949 "Applying B2 workarounds to unknown rev\n");
2950 hp_flags
|= MV_HP_ERRATA_50XXB2
;
2957 hpriv
->ops
= &mv6xxx_ops
;
2958 hp_flags
|= MV_HP_GEN_II
;
2960 switch (pdev
->revision
) {
2962 hp_flags
|= MV_HP_ERRATA_60X1B2
;
2965 hp_flags
|= MV_HP_ERRATA_60X1C0
;
2968 dev_printk(KERN_WARNING
, &pdev
->dev
,
2969 "Applying B2 workarounds to unknown rev\n");
2970 hp_flags
|= MV_HP_ERRATA_60X1B2
;
2976 hp_flags
|= MV_HP_PCIE
| MV_HP_CUT_THROUGH
;
2977 if (pdev
->vendor
== PCI_VENDOR_ID_TTI
&&
2978 (pdev
->device
== 0x2300 || pdev
->device
== 0x2310))
2981 * Highpoint RocketRAID PCIe 23xx series cards:
2983 * Unconfigured drives are treated as "Legacy"
2984 * by the BIOS, and it overwrites sector 8 with
2985 * a "Lgcy" metadata block prior to Linux boot.
2987 * Configured drives (RAID or JBOD) leave sector 8
2988 * alone, but instead overwrite a high numbered
2989 * sector for the RAID metadata. This sector can
2990 * be determined exactly, by truncating the physical
2991 * drive capacity to a nice even GB value.
2993 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2995 * Warn the user, lest they think we're just buggy.
2997 printk(KERN_WARNING DRV_NAME
": Highpoint RocketRAID"
2998 " BIOS CORRUPTS DATA on all attached drives,"
2999 " regardless of if/how they are configured."
3001 printk(KERN_WARNING DRV_NAME
": For data safety, do not"
3002 " use sectors 8-9 on \"Legacy\" drives,"
3003 " and avoid the final two gigabytes on"
3004 " all RocketRAID BIOS initialized drives.\n");
3008 hpriv
->ops
= &mv6xxx_ops
;
3009 hp_flags
|= MV_HP_GEN_IIE
;
3010 if (board_idx
== chip_6042
&& mv_pci_cut_through_okay(host
))
3011 hp_flags
|= MV_HP_CUT_THROUGH
;
3013 switch (pdev
->revision
) {
3014 case 0x2: /* Rev.B0: the first/only public release */
3015 hp_flags
|= MV_HP_ERRATA_60X1C0
;
3018 dev_printk(KERN_WARNING
, &pdev
->dev
,
3019 "Applying 60X1C0 workarounds to unknown rev\n");
3020 hp_flags
|= MV_HP_ERRATA_60X1C0
;
3025 hpriv
->ops
= &mv_soc_ops
;
3026 hp_flags
|= MV_HP_FLAG_SOC
| MV_HP_GEN_IIE
|
3027 MV_HP_ERRATA_60X1C0
;
3031 dev_printk(KERN_ERR
, host
->dev
,
3032 "BUG: invalid board index %u\n", board_idx
);
3036 hpriv
->hp_flags
= hp_flags
;
3037 if (hp_flags
& MV_HP_PCIE
) {
3038 hpriv
->irq_cause_ofs
= PCIE_IRQ_CAUSE_OFS
;
3039 hpriv
->irq_mask_ofs
= PCIE_IRQ_MASK_OFS
;
3040 hpriv
->unmask_all_irqs
= PCIE_UNMASK_ALL_IRQS
;
3042 hpriv
->irq_cause_ofs
= PCI_IRQ_CAUSE_OFS
;
3043 hpriv
->irq_mask_ofs
= PCI_IRQ_MASK_OFS
;
3044 hpriv
->unmask_all_irqs
= PCI_UNMASK_ALL_IRQS
;
3051 * mv_init_host - Perform some early initialization of the host.
3052 * @host: ATA host to initialize
3053 * @board_idx: controller index
3055 * If possible, do an early global reset of the host. Then do
3056 * our port init and clear/unmask all/relevant host interrupts.
3059 * Inherited from caller.
3061 static int mv_init_host(struct ata_host
*host
, unsigned int board_idx
)
3063 int rc
= 0, n_hc
, port
, hc
;
3064 struct mv_host_priv
*hpriv
= host
->private_data
;
3065 void __iomem
*mmio
= hpriv
->base
;
3067 rc
= mv_chip_id(host
, board_idx
);
3071 if (IS_SOC(hpriv
)) {
3072 hpriv
->main_irq_cause_addr
= mmio
+ SOC_HC_MAIN_IRQ_CAUSE_OFS
;
3073 hpriv
->main_irq_mask_addr
= mmio
+ SOC_HC_MAIN_IRQ_MASK_OFS
;
3075 hpriv
->main_irq_cause_addr
= mmio
+ PCI_HC_MAIN_IRQ_CAUSE_OFS
;
3076 hpriv
->main_irq_mask_addr
= mmio
+ PCI_HC_MAIN_IRQ_MASK_OFS
;
3079 /* initialize shadow irq mask with register's value */
3080 hpriv
->main_irq_mask
= readl(hpriv
->main_irq_mask_addr
);
3082 /* global interrupt mask: 0 == mask everything */
3083 mv_set_main_irq_mask(host
, ~0, 0);
3085 n_hc
= mv_get_hc_count(host
->ports
[0]->flags
);
3087 for (port
= 0; port
< host
->n_ports
; port
++)
3088 hpriv
->ops
->read_preamp(hpriv
, port
, mmio
);
3090 rc
= hpriv
->ops
->reset_hc(hpriv
, mmio
, n_hc
);
3094 hpriv
->ops
->reset_flash(hpriv
, mmio
);
3095 hpriv
->ops
->reset_bus(host
, mmio
);
3096 hpriv
->ops
->enable_leds(hpriv
, mmio
);
3098 for (port
= 0; port
< host
->n_ports
; port
++) {
3099 struct ata_port
*ap
= host
->ports
[port
];
3100 void __iomem
*port_mmio
= mv_port_base(mmio
, port
);
3102 mv_port_init(&ap
->ioaddr
, port_mmio
);
3105 if (!IS_SOC(hpriv
)) {
3106 unsigned int offset
= port_mmio
- mmio
;
3107 ata_port_pbar_desc(ap
, MV_PRIMARY_BAR
, -1, "mmio");
3108 ata_port_pbar_desc(ap
, MV_PRIMARY_BAR
, offset
, "port");
3113 for (hc
= 0; hc
< n_hc
; hc
++) {
3114 void __iomem
*hc_mmio
= mv_hc_base(mmio
, hc
);
3116 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
3117 "(before clear)=0x%08x\n", hc
,
3118 readl(hc_mmio
+ HC_CFG_OFS
),
3119 readl(hc_mmio
+ HC_IRQ_CAUSE_OFS
));
3121 /* Clear any currently outstanding hc interrupt conditions */
3122 writelfl(0, hc_mmio
+ HC_IRQ_CAUSE_OFS
);
3125 /* Clear any currently outstanding host interrupt conditions */
3126 writelfl(0, mmio
+ hpriv
->irq_cause_ofs
);
3128 /* and unmask interrupt generation for host regs */
3129 writelfl(hpriv
->unmask_all_irqs
, mmio
+ hpriv
->irq_mask_ofs
);
3132 * enable only global host interrupts for now.
3133 * The per-port interrupts get done later as ports are set up.
3135 mv_set_main_irq_mask(host
, 0, PCI_ERR
);
3140 static int mv_create_dma_pools(struct mv_host_priv
*hpriv
, struct device
*dev
)
3142 hpriv
->crqb_pool
= dmam_pool_create("crqb_q", dev
, MV_CRQB_Q_SZ
,
3144 if (!hpriv
->crqb_pool
)
3147 hpriv
->crpb_pool
= dmam_pool_create("crpb_q", dev
, MV_CRPB_Q_SZ
,
3149 if (!hpriv
->crpb_pool
)
3152 hpriv
->sg_tbl_pool
= dmam_pool_create("sg_tbl", dev
, MV_SG_TBL_SZ
,
3154 if (!hpriv
->sg_tbl_pool
)
3160 static void mv_conf_mbus_windows(struct mv_host_priv
*hpriv
,
3161 struct mbus_dram_target_info
*dram
)
3165 for (i
= 0; i
< 4; i
++) {
3166 writel(0, hpriv
->base
+ WINDOW_CTRL(i
));
3167 writel(0, hpriv
->base
+ WINDOW_BASE(i
));
3170 for (i
= 0; i
< dram
->num_cs
; i
++) {
3171 struct mbus_dram_window
*cs
= dram
->cs
+ i
;
3173 writel(((cs
->size
- 1) & 0xffff0000) |
3174 (cs
->mbus_attr
<< 8) |
3175 (dram
->mbus_dram_target_id
<< 4) | 1,
3176 hpriv
->base
+ WINDOW_CTRL(i
));
3177 writel(cs
->base
, hpriv
->base
+ WINDOW_BASE(i
));
3182 * mv_platform_probe - handle a positive probe of an soc Marvell
3184 * @pdev: platform device found
3187 * Inherited from caller.
3189 static int mv_platform_probe(struct platform_device
*pdev
)
3191 static int printed_version
;
3192 const struct mv_sata_platform_data
*mv_platform_data
;
3193 const struct ata_port_info
*ppi
[] =
3194 { &mv_port_info
[chip_soc
], NULL
};
3195 struct ata_host
*host
;
3196 struct mv_host_priv
*hpriv
;
3197 struct resource
*res
;
3200 if (!printed_version
++)
3201 dev_printk(KERN_INFO
, &pdev
->dev
, "version " DRV_VERSION
"\n");
3204 * Simple resource validation ..
3206 if (unlikely(pdev
->num_resources
!= 2)) {
3207 dev_err(&pdev
->dev
, "invalid number of resources\n");
3212 * Get the register base first
3214 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
3219 mv_platform_data
= pdev
->dev
.platform_data
;
3220 n_ports
= mv_platform_data
->n_ports
;
3222 host
= ata_host_alloc_pinfo(&pdev
->dev
, ppi
, n_ports
);
3223 hpriv
= devm_kzalloc(&pdev
->dev
, sizeof(*hpriv
), GFP_KERNEL
);
3225 if (!host
|| !hpriv
)
3227 host
->private_data
= hpriv
;
3228 hpriv
->n_ports
= n_ports
;
3231 hpriv
->base
= devm_ioremap(&pdev
->dev
, res
->start
,
3232 res
->end
- res
->start
+ 1);
3233 hpriv
->base
-= MV_SATAHC0_REG_BASE
;
3236 * (Re-)program MBUS remapping windows if we are asked to.
3238 if (mv_platform_data
->dram
!= NULL
)
3239 mv_conf_mbus_windows(hpriv
, mv_platform_data
->dram
);
3241 rc
= mv_create_dma_pools(hpriv
, &pdev
->dev
);
3245 /* initialize adapter */
3246 rc
= mv_init_host(host
, chip_soc
);
3250 dev_printk(KERN_INFO
, &pdev
->dev
,
3251 "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH
,
3254 return ata_host_activate(host
, platform_get_irq(pdev
, 0), mv_interrupt
,
3255 IRQF_SHARED
, &mv6_sht
);
3260 * mv_platform_remove - unplug a platform interface
3261 * @pdev: platform device
3263 * A platform bus SATA device has been unplugged. Perform the needed
3264 * cleanup. Also called on module unload for any active devices.
3266 static int __devexit
mv_platform_remove(struct platform_device
*pdev
)
3268 struct device
*dev
= &pdev
->dev
;
3269 struct ata_host
*host
= dev_get_drvdata(dev
);
3271 ata_host_detach(host
);
3275 static struct platform_driver mv_platform_driver
= {
3276 .probe
= mv_platform_probe
,
3277 .remove
= __devexit_p(mv_platform_remove
),
3280 .owner
= THIS_MODULE
,
3286 static int mv_pci_init_one(struct pci_dev
*pdev
,
3287 const struct pci_device_id
*ent
);
3290 static struct pci_driver mv_pci_driver
= {
3292 .id_table
= mv_pci_tbl
,
3293 .probe
= mv_pci_init_one
,
3294 .remove
= ata_pci_remove_one
,
3300 static int msi
; /* Use PCI msi; either zero (off, default) or non-zero */
3303 /* move to PCI layer or libata core? */
3304 static int pci_go_64(struct pci_dev
*pdev
)
3308 if (!pci_set_dma_mask(pdev
, DMA_64BIT_MASK
)) {
3309 rc
= pci_set_consistent_dma_mask(pdev
, DMA_64BIT_MASK
);
3311 rc
= pci_set_consistent_dma_mask(pdev
, DMA_32BIT_MASK
);
3313 dev_printk(KERN_ERR
, &pdev
->dev
,
3314 "64-bit DMA enable failed\n");
3319 rc
= pci_set_dma_mask(pdev
, DMA_32BIT_MASK
);
3321 dev_printk(KERN_ERR
, &pdev
->dev
,
3322 "32-bit DMA enable failed\n");
3325 rc
= pci_set_consistent_dma_mask(pdev
, DMA_32BIT_MASK
);
3327 dev_printk(KERN_ERR
, &pdev
->dev
,
3328 "32-bit consistent DMA enable failed\n");
3337 * mv_print_info - Dump key info to kernel log for perusal.
3338 * @host: ATA host to print info about
3340 * FIXME: complete this.
3343 * Inherited from caller.
3345 static void mv_print_info(struct ata_host
*host
)
3347 struct pci_dev
*pdev
= to_pci_dev(host
->dev
);
3348 struct mv_host_priv
*hpriv
= host
->private_data
;
3350 const char *scc_s
, *gen
;
3352 /* Use this to determine the HW stepping of the chip so we know
3353 * what errata to workaround
3355 pci_read_config_byte(pdev
, PCI_CLASS_DEVICE
, &scc
);
3358 else if (scc
== 0x01)
3363 if (IS_GEN_I(hpriv
))
3365 else if (IS_GEN_II(hpriv
))
3367 else if (IS_GEN_IIE(hpriv
))
3372 dev_printk(KERN_INFO
, &pdev
->dev
,
3373 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
3374 gen
, (unsigned)MV_MAX_Q_DEPTH
, host
->n_ports
,
3375 scc_s
, (MV_HP_FLAG_MSI
& hpriv
->hp_flags
) ? "MSI" : "INTx");
3379 * mv_pci_init_one - handle a positive probe of a PCI Marvell host
3380 * @pdev: PCI device found
3381 * @ent: PCI device ID entry for the matched host
3384 * Inherited from caller.
3386 static int mv_pci_init_one(struct pci_dev
*pdev
,
3387 const struct pci_device_id
*ent
)
3389 static int printed_version
;
3390 unsigned int board_idx
= (unsigned int)ent
->driver_data
;
3391 const struct ata_port_info
*ppi
[] = { &mv_port_info
[board_idx
], NULL
};
3392 struct ata_host
*host
;
3393 struct mv_host_priv
*hpriv
;
3396 if (!printed_version
++)
3397 dev_printk(KERN_INFO
, &pdev
->dev
, "version " DRV_VERSION
"\n");
3400 n_ports
= mv_get_hc_count(ppi
[0]->flags
) * MV_PORTS_PER_HC
;
3402 host
= ata_host_alloc_pinfo(&pdev
->dev
, ppi
, n_ports
);
3403 hpriv
= devm_kzalloc(&pdev
->dev
, sizeof(*hpriv
), GFP_KERNEL
);
3404 if (!host
|| !hpriv
)
3406 host
->private_data
= hpriv
;
3407 hpriv
->n_ports
= n_ports
;
3409 /* acquire resources */
3410 rc
= pcim_enable_device(pdev
);
3414 rc
= pcim_iomap_regions(pdev
, 1 << MV_PRIMARY_BAR
, DRV_NAME
);
3416 pcim_pin_device(pdev
);
3419 host
->iomap
= pcim_iomap_table(pdev
);
3420 hpriv
->base
= host
->iomap
[MV_PRIMARY_BAR
];
3422 rc
= pci_go_64(pdev
);
3426 rc
= mv_create_dma_pools(hpriv
, &pdev
->dev
);
3430 /* initialize adapter */
3431 rc
= mv_init_host(host
, board_idx
);
3435 /* Enable message-switched interrupts, if requested */
3436 if (msi
&& pci_enable_msi(pdev
) == 0)
3437 hpriv
->hp_flags
|= MV_HP_FLAG_MSI
;
3439 mv_dump_pci_cfg(pdev
, 0x68);
3440 mv_print_info(host
);
3442 pci_set_master(pdev
);
3443 pci_try_set_mwi(pdev
);
3444 return ata_host_activate(host
, pdev
->irq
, mv_interrupt
, IRQF_SHARED
,
3445 IS_GEN_I(hpriv
) ? &mv5_sht
: &mv6_sht
);
3449 static int mv_platform_probe(struct platform_device
*pdev
);
3450 static int __devexit
mv_platform_remove(struct platform_device
*pdev
);
3452 static int __init
mv_init(void)
3456 rc
= pci_register_driver(&mv_pci_driver
);
3460 rc
= platform_driver_register(&mv_platform_driver
);
3464 pci_unregister_driver(&mv_pci_driver
);
3469 static void __exit
mv_exit(void)
3472 pci_unregister_driver(&mv_pci_driver
);
3474 platform_driver_unregister(&mv_platform_driver
);
3477 MODULE_AUTHOR("Brett Russ");
3478 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
3479 MODULE_LICENSE("GPL");
3480 MODULE_DEVICE_TABLE(pci
, mv_pci_tbl
);
3481 MODULE_VERSION(DRV_VERSION
);
3482 MODULE_ALIAS("platform:" DRV_NAME
);
3485 module_param(msi
, int, 0444);
3486 MODULE_PARM_DESC(msi
, "Enable use of PCI MSI (0=off, 1=on)");
3489 module_init(mv_init
);
3490 module_exit(mv_exit
);