sata_mv ncq Add want ncq parameter for EDMA configuration
[linux-2.6/mini2440.git] / drivers / ata / sata_mv.c
blob32a0ace5234a7b99b8e87e2e74853ddd9a26c0c2
1 /*
2 * sata_mv.c - Marvell SATA support
4 * Copyright 2005: EMC Corporation, all rights reserved.
5 * Copyright 2005 Red Hat, Inc. All rights reserved.
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 sata_mv TODO list:
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
30 are still needed.
32 4) Add NCQ support (easy to intermediate, once new-EH support appears)
34 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
36 6) Add port multiplier support (intermediate)
38 8) Develop a low-power-consumption strategy, and implement it.
40 9) [Experiment, low priority] See if ATAPI can be supported using
41 "unknown FIS" or "vendor-specific FIS" support, or something creative
42 like that.
44 10) [Experiment, low priority] Investigate interrupt coalescing.
45 Quite often, especially with PCI Message Signalled Interrupts (MSI),
46 the overhead reduced by interrupt mitigation is quite often not
47 worth the latency cost.
49 11) [Experiment, Marvell value added] Is it possible to use target
50 mode to cross-connect two Linux boxes with Marvell cards? If so,
51 creating LibATA target mode support would be very interesting.
53 Target mode, for those without docs, is the ability to directly
54 connect two SATA controllers.
56 13) Verify that 7042 is fully supported. I only have a 6042.
61 #include <linux/kernel.h>
62 #include <linux/module.h>
63 #include <linux/pci.h>
64 #include <linux/init.h>
65 #include <linux/blkdev.h>
66 #include <linux/delay.h>
67 #include <linux/interrupt.h>
68 #include <linux/dma-mapping.h>
69 #include <linux/device.h>
70 #include <scsi/scsi_host.h>
71 #include <scsi/scsi_cmnd.h>
72 #include <scsi/scsi_device.h>
73 #include <linux/libata.h>
75 #define DRV_NAME "sata_mv"
76 #define DRV_VERSION "1.01"
78 enum {
79 /* BAR's are enumerated in terms of pci_resource_start() terms */
80 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
81 MV_IO_BAR = 2, /* offset 0x18: IO space */
82 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
84 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
85 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
87 MV_PCI_REG_BASE = 0,
88 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
89 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
90 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
91 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
92 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
93 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
95 MV_SATAHC0_REG_BASE = 0x20000,
96 MV_FLASH_CTL = 0x1046c,
97 MV_GPIO_PORT_CTL = 0x104f0,
98 MV_RESET_CFG = 0x180d8,
100 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
101 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
102 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
103 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
105 MV_MAX_Q_DEPTH = 32,
106 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
108 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
109 * CRPB needs alignment on a 256B boundary. Size == 256B
110 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
111 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
113 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
114 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
115 MV_MAX_SG_CT = 176,
116 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
117 MV_PORT_PRIV_DMA_SZ = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ),
119 MV_PORTS_PER_HC = 4,
120 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
121 MV_PORT_HC_SHIFT = 2,
122 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
123 MV_PORT_MASK = 3,
125 /* Host Flags */
126 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
127 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
128 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
129 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
130 ATA_FLAG_PIO_POLLING,
131 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
133 CRQB_FLAG_READ = (1 << 0),
134 CRQB_TAG_SHIFT = 1,
135 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
136 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
137 CRQB_CMD_ADDR_SHIFT = 8,
138 CRQB_CMD_CS = (0x2 << 11),
139 CRQB_CMD_LAST = (1 << 15),
141 CRPB_FLAG_STATUS_SHIFT = 8,
142 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
143 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
145 EPRD_FLAG_END_OF_TBL = (1 << 31),
147 /* PCI interface registers */
149 PCI_COMMAND_OFS = 0xc00,
151 PCI_MAIN_CMD_STS_OFS = 0xd30,
152 STOP_PCI_MASTER = (1 << 2),
153 PCI_MASTER_EMPTY = (1 << 3),
154 GLOB_SFT_RST = (1 << 4),
156 MV_PCI_MODE = 0xd00,
157 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
158 MV_PCI_DISC_TIMER = 0xd04,
159 MV_PCI_MSI_TRIGGER = 0xc38,
160 MV_PCI_SERR_MASK = 0xc28,
161 MV_PCI_XBAR_TMOUT = 0x1d04,
162 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
163 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
164 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
165 MV_PCI_ERR_COMMAND = 0x1d50,
167 PCI_IRQ_CAUSE_OFS = 0x1d58,
168 PCI_IRQ_MASK_OFS = 0x1d5c,
169 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
171 PCIE_IRQ_CAUSE_OFS = 0x1900,
172 PCIE_IRQ_MASK_OFS = 0x1910,
173 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
175 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
176 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
177 PORT0_ERR = (1 << 0), /* shift by port # */
178 PORT0_DONE = (1 << 1), /* shift by port # */
179 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
180 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
181 PCI_ERR = (1 << 18),
182 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
183 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
184 PORTS_0_3_COAL_DONE = (1 << 8),
185 PORTS_4_7_COAL_DONE = (1 << 17),
186 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
187 GPIO_INT = (1 << 22),
188 SELF_INT = (1 << 23),
189 TWSI_INT = (1 << 24),
190 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
191 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
192 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
193 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
194 HC_MAIN_RSVD),
195 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
196 HC_MAIN_RSVD_5),
198 /* SATAHC registers */
199 HC_CFG_OFS = 0,
201 HC_IRQ_CAUSE_OFS = 0x14,
202 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
203 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
204 DEV_IRQ = (1 << 8), /* shift by port # */
206 /* Shadow block registers */
207 SHD_BLK_OFS = 0x100,
208 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
210 /* SATA registers */
211 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
212 SATA_ACTIVE_OFS = 0x350,
213 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
214 PHY_MODE3 = 0x310,
215 PHY_MODE4 = 0x314,
216 PHY_MODE2 = 0x330,
217 MV5_PHY_MODE = 0x74,
218 MV5_LT_MODE = 0x30,
219 MV5_PHY_CTL = 0x0C,
220 SATA_INTERFACE_CTL = 0x050,
222 MV_M2_PREAMP_MASK = 0x7e0,
224 /* Port registers */
225 EDMA_CFG_OFS = 0,
226 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
227 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
228 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
229 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
230 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
232 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
233 EDMA_ERR_IRQ_MASK_OFS = 0xc,
234 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
235 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
236 EDMA_ERR_DEV = (1 << 2), /* device error */
237 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
238 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
239 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
240 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
241 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
242 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
243 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
244 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
245 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
246 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
247 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
249 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
250 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
251 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
252 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
253 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
255 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
257 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
258 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
259 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
260 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
261 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
262 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
264 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
266 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
267 EDMA_ERR_OVERRUN_5 = (1 << 5),
268 EDMA_ERR_UNDERRUN_5 = (1 << 6),
270 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
271 EDMA_ERR_LNK_CTRL_RX_1 |
272 EDMA_ERR_LNK_CTRL_RX_3 |
273 EDMA_ERR_LNK_CTRL_TX,
275 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
276 EDMA_ERR_PRD_PAR |
277 EDMA_ERR_DEV_DCON |
278 EDMA_ERR_DEV_CON |
279 EDMA_ERR_SERR |
280 EDMA_ERR_SELF_DIS |
281 EDMA_ERR_CRQB_PAR |
282 EDMA_ERR_CRPB_PAR |
283 EDMA_ERR_INTRL_PAR |
284 EDMA_ERR_IORDY |
285 EDMA_ERR_LNK_CTRL_RX_2 |
286 EDMA_ERR_LNK_DATA_RX |
287 EDMA_ERR_LNK_DATA_TX |
288 EDMA_ERR_TRANS_PROTO,
289 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
290 EDMA_ERR_PRD_PAR |
291 EDMA_ERR_DEV_DCON |
292 EDMA_ERR_DEV_CON |
293 EDMA_ERR_OVERRUN_5 |
294 EDMA_ERR_UNDERRUN_5 |
295 EDMA_ERR_SELF_DIS_5 |
296 EDMA_ERR_CRQB_PAR |
297 EDMA_ERR_CRPB_PAR |
298 EDMA_ERR_INTRL_PAR |
299 EDMA_ERR_IORDY,
301 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
302 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
304 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
305 EDMA_REQ_Q_PTR_SHIFT = 5,
307 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
308 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
309 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
310 EDMA_RSP_Q_PTR_SHIFT = 3,
312 EDMA_CMD_OFS = 0x28, /* EDMA command register */
313 EDMA_EN = (1 << 0), /* enable EDMA */
314 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
315 ATA_RST = (1 << 2), /* reset trans/link/phy */
317 EDMA_IORDY_TMOUT = 0x34,
318 EDMA_ARB_CFG = 0x38,
320 /* Host private flags (hp_flags) */
321 MV_HP_FLAG_MSI = (1 << 0),
322 MV_HP_ERRATA_50XXB0 = (1 << 1),
323 MV_HP_ERRATA_50XXB2 = (1 << 2),
324 MV_HP_ERRATA_60X1B2 = (1 << 3),
325 MV_HP_ERRATA_60X1C0 = (1 << 4),
326 MV_HP_ERRATA_XX42A0 = (1 << 5),
327 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
328 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
329 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
330 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
332 /* Port private flags (pp_flags) */
333 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
334 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
335 MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */
338 #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
339 #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
340 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
342 enum {
343 /* DMA boundary 0xffff is required by the s/g splitting
344 * we need on /length/ in mv_fill-sg().
346 MV_DMA_BOUNDARY = 0xffffU,
348 /* mask of register bits containing lower 32 bits
349 * of EDMA request queue DMA address
351 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
353 /* ditto, for response queue */
354 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
357 enum chip_type {
358 chip_504x,
359 chip_508x,
360 chip_5080,
361 chip_604x,
362 chip_608x,
363 chip_6042,
364 chip_7042,
367 /* Command ReQuest Block: 32B */
368 struct mv_crqb {
369 __le32 sg_addr;
370 __le32 sg_addr_hi;
371 __le16 ctrl_flags;
372 __le16 ata_cmd[11];
375 struct mv_crqb_iie {
376 __le32 addr;
377 __le32 addr_hi;
378 __le32 flags;
379 __le32 len;
380 __le32 ata_cmd[4];
383 /* Command ResPonse Block: 8B */
384 struct mv_crpb {
385 __le16 id;
386 __le16 flags;
387 __le32 tmstmp;
390 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
391 struct mv_sg {
392 __le32 addr;
393 __le32 flags_size;
394 __le32 addr_hi;
395 __le32 reserved;
398 struct mv_port_priv {
399 struct mv_crqb *crqb;
400 dma_addr_t crqb_dma;
401 struct mv_crpb *crpb;
402 dma_addr_t crpb_dma;
403 struct mv_sg *sg_tbl;
404 dma_addr_t sg_tbl_dma;
406 unsigned int req_idx;
407 unsigned int resp_idx;
409 u32 pp_flags;
412 struct mv_port_signal {
413 u32 amps;
414 u32 pre;
417 struct mv_host_priv {
418 u32 hp_flags;
419 struct mv_port_signal signal[8];
420 const struct mv_hw_ops *ops;
421 u32 irq_cause_ofs;
422 u32 irq_mask_ofs;
423 u32 unmask_all_irqs;
426 struct mv_hw_ops {
427 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
428 unsigned int port);
429 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
430 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
431 void __iomem *mmio);
432 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
433 unsigned int n_hc);
434 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
435 void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio);
438 static void mv_irq_clear(struct ata_port *ap);
439 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
440 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
441 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
442 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
443 static int mv_port_start(struct ata_port *ap);
444 static void mv_port_stop(struct ata_port *ap);
445 static void mv_qc_prep(struct ata_queued_cmd *qc);
446 static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
447 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
448 static void mv_error_handler(struct ata_port *ap);
449 static void mv_post_int_cmd(struct ata_queued_cmd *qc);
450 static void mv_eh_freeze(struct ata_port *ap);
451 static void mv_eh_thaw(struct ata_port *ap);
452 static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
454 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
455 unsigned int port);
456 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
457 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
458 void __iomem *mmio);
459 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
460 unsigned int n_hc);
461 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
462 static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio);
464 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
465 unsigned int port);
466 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
467 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
468 void __iomem *mmio);
469 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
470 unsigned int n_hc);
471 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
472 static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
473 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
474 unsigned int port_no);
475 static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
476 void __iomem *port_mmio, int want_ncq);
477 static int __mv_stop_dma(struct ata_port *ap);
479 static struct scsi_host_template mv5_sht = {
480 .module = THIS_MODULE,
481 .name = DRV_NAME,
482 .ioctl = ata_scsi_ioctl,
483 .queuecommand = ata_scsi_queuecmd,
484 .can_queue = ATA_DEF_QUEUE,
485 .this_id = ATA_SHT_THIS_ID,
486 .sg_tablesize = MV_MAX_SG_CT / 2,
487 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
488 .emulated = ATA_SHT_EMULATED,
489 .use_clustering = 1,
490 .proc_name = DRV_NAME,
491 .dma_boundary = MV_DMA_BOUNDARY,
492 .slave_configure = ata_scsi_slave_config,
493 .slave_destroy = ata_scsi_slave_destroy,
494 .bios_param = ata_std_bios_param,
497 static struct scsi_host_template mv6_sht = {
498 .module = THIS_MODULE,
499 .name = DRV_NAME,
500 .ioctl = ata_scsi_ioctl,
501 .queuecommand = ata_scsi_queuecmd,
502 .can_queue = ATA_DEF_QUEUE,
503 .this_id = ATA_SHT_THIS_ID,
504 .sg_tablesize = MV_MAX_SG_CT / 2,
505 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
506 .emulated = ATA_SHT_EMULATED,
507 .use_clustering = 1,
508 .proc_name = DRV_NAME,
509 .dma_boundary = MV_DMA_BOUNDARY,
510 .slave_configure = ata_scsi_slave_config,
511 .slave_destroy = ata_scsi_slave_destroy,
512 .bios_param = ata_std_bios_param,
515 static const struct ata_port_operations mv5_ops = {
516 .tf_load = ata_tf_load,
517 .tf_read = ata_tf_read,
518 .check_status = ata_check_status,
519 .exec_command = ata_exec_command,
520 .dev_select = ata_std_dev_select,
522 .cable_detect = ata_cable_sata,
524 .qc_prep = mv_qc_prep,
525 .qc_issue = mv_qc_issue,
526 .data_xfer = ata_data_xfer,
528 .irq_clear = mv_irq_clear,
529 .irq_on = ata_irq_on,
531 .error_handler = mv_error_handler,
532 .post_internal_cmd = mv_post_int_cmd,
533 .freeze = mv_eh_freeze,
534 .thaw = mv_eh_thaw,
536 .scr_read = mv5_scr_read,
537 .scr_write = mv5_scr_write,
539 .port_start = mv_port_start,
540 .port_stop = mv_port_stop,
543 static const struct ata_port_operations mv6_ops = {
544 .tf_load = ata_tf_load,
545 .tf_read = ata_tf_read,
546 .check_status = ata_check_status,
547 .exec_command = ata_exec_command,
548 .dev_select = ata_std_dev_select,
550 .cable_detect = ata_cable_sata,
552 .qc_prep = mv_qc_prep,
553 .qc_issue = mv_qc_issue,
554 .data_xfer = ata_data_xfer,
556 .irq_clear = mv_irq_clear,
557 .irq_on = ata_irq_on,
559 .error_handler = mv_error_handler,
560 .post_internal_cmd = mv_post_int_cmd,
561 .freeze = mv_eh_freeze,
562 .thaw = mv_eh_thaw,
564 .scr_read = mv_scr_read,
565 .scr_write = mv_scr_write,
567 .port_start = mv_port_start,
568 .port_stop = mv_port_stop,
571 static const struct ata_port_operations mv_iie_ops = {
572 .tf_load = ata_tf_load,
573 .tf_read = ata_tf_read,
574 .check_status = ata_check_status,
575 .exec_command = ata_exec_command,
576 .dev_select = ata_std_dev_select,
578 .cable_detect = ata_cable_sata,
580 .qc_prep = mv_qc_prep_iie,
581 .qc_issue = mv_qc_issue,
582 .data_xfer = ata_data_xfer,
584 .irq_clear = mv_irq_clear,
585 .irq_on = ata_irq_on,
587 .error_handler = mv_error_handler,
588 .post_internal_cmd = mv_post_int_cmd,
589 .freeze = mv_eh_freeze,
590 .thaw = mv_eh_thaw,
592 .scr_read = mv_scr_read,
593 .scr_write = mv_scr_write,
595 .port_start = mv_port_start,
596 .port_stop = mv_port_stop,
599 static const struct ata_port_info mv_port_info[] = {
600 { /* chip_504x */
601 .flags = MV_COMMON_FLAGS,
602 .pio_mask = 0x1f, /* pio0-4 */
603 .udma_mask = ATA_UDMA6,
604 .port_ops = &mv5_ops,
606 { /* chip_508x */
607 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
608 .pio_mask = 0x1f, /* pio0-4 */
609 .udma_mask = ATA_UDMA6,
610 .port_ops = &mv5_ops,
612 { /* chip_5080 */
613 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
614 .pio_mask = 0x1f, /* pio0-4 */
615 .udma_mask = ATA_UDMA6,
616 .port_ops = &mv5_ops,
618 { /* chip_604x */
619 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
620 .pio_mask = 0x1f, /* pio0-4 */
621 .udma_mask = ATA_UDMA6,
622 .port_ops = &mv6_ops,
624 { /* chip_608x */
625 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
626 MV_FLAG_DUAL_HC,
627 .pio_mask = 0x1f, /* pio0-4 */
628 .udma_mask = ATA_UDMA6,
629 .port_ops = &mv6_ops,
631 { /* chip_6042 */
632 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
633 .pio_mask = 0x1f, /* pio0-4 */
634 .udma_mask = ATA_UDMA6,
635 .port_ops = &mv_iie_ops,
637 { /* chip_7042 */
638 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
639 .pio_mask = 0x1f, /* pio0-4 */
640 .udma_mask = ATA_UDMA6,
641 .port_ops = &mv_iie_ops,
645 static const struct pci_device_id mv_pci_tbl[] = {
646 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
647 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
648 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
649 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
650 /* RocketRAID 1740/174x have different identifiers */
651 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
652 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
654 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
655 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
656 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
657 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
658 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
660 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
662 /* Adaptec 1430SA */
663 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
665 /* Marvell 7042 support */
666 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
668 /* Highpoint RocketRAID PCIe series */
669 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
670 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
672 { } /* terminate list */
675 static struct pci_driver mv_pci_driver = {
676 .name = DRV_NAME,
677 .id_table = mv_pci_tbl,
678 .probe = mv_init_one,
679 .remove = ata_pci_remove_one,
682 static const struct mv_hw_ops mv5xxx_ops = {
683 .phy_errata = mv5_phy_errata,
684 .enable_leds = mv5_enable_leds,
685 .read_preamp = mv5_read_preamp,
686 .reset_hc = mv5_reset_hc,
687 .reset_flash = mv5_reset_flash,
688 .reset_bus = mv5_reset_bus,
691 static const struct mv_hw_ops mv6xxx_ops = {
692 .phy_errata = mv6_phy_errata,
693 .enable_leds = mv6_enable_leds,
694 .read_preamp = mv6_read_preamp,
695 .reset_hc = mv6_reset_hc,
696 .reset_flash = mv6_reset_flash,
697 .reset_bus = mv_reset_pci_bus,
701 * module options
703 static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
706 /* move to PCI layer or libata core? */
707 static int pci_go_64(struct pci_dev *pdev)
709 int rc;
711 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
712 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
713 if (rc) {
714 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
715 if (rc) {
716 dev_printk(KERN_ERR, &pdev->dev,
717 "64-bit DMA enable failed\n");
718 return rc;
721 } else {
722 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
723 if (rc) {
724 dev_printk(KERN_ERR, &pdev->dev,
725 "32-bit DMA enable failed\n");
726 return rc;
728 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
729 if (rc) {
730 dev_printk(KERN_ERR, &pdev->dev,
731 "32-bit consistent DMA enable failed\n");
732 return rc;
736 return rc;
740 * Functions
743 static inline void writelfl(unsigned long data, void __iomem *addr)
745 writel(data, addr);
746 (void) readl(addr); /* flush to avoid PCI posted write */
749 static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
751 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
754 static inline unsigned int mv_hc_from_port(unsigned int port)
756 return port >> MV_PORT_HC_SHIFT;
759 static inline unsigned int mv_hardport_from_port(unsigned int port)
761 return port & MV_PORT_MASK;
764 static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
765 unsigned int port)
767 return mv_hc_base(base, mv_hc_from_port(port));
770 static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
772 return mv_hc_base_from_port(base, port) +
773 MV_SATAHC_ARBTR_REG_SZ +
774 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
777 static inline void __iomem *mv_ap_base(struct ata_port *ap)
779 return mv_port_base(ap->host->iomap[MV_PRIMARY_BAR], ap->port_no);
782 static inline int mv_get_hc_count(unsigned long port_flags)
784 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
787 static void mv_irq_clear(struct ata_port *ap)
791 static void mv_set_edma_ptrs(void __iomem *port_mmio,
792 struct mv_host_priv *hpriv,
793 struct mv_port_priv *pp)
795 u32 index;
798 * initialize request queue
800 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
802 WARN_ON(pp->crqb_dma & 0x3ff);
803 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
804 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
805 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
807 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
808 writelfl((pp->crqb_dma & 0xffffffff) | index,
809 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
810 else
811 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
814 * initialize response queue
816 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
818 WARN_ON(pp->crpb_dma & 0xff);
819 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
821 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
822 writelfl((pp->crpb_dma & 0xffffffff) | index,
823 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
824 else
825 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
827 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
828 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
832 * mv_start_dma - Enable eDMA engine
833 * @base: port base address
834 * @pp: port private data
836 * Verify the local cache of the eDMA state is accurate with a
837 * WARN_ON.
839 * LOCKING:
840 * Inherited from caller.
842 static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
843 struct mv_port_priv *pp, u8 protocol)
845 int want_ncq = (protocol == ATA_PROT_NCQ);
847 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
848 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
849 if (want_ncq != using_ncq)
850 __mv_stop_dma(ap);
852 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
853 struct mv_host_priv *hpriv = ap->host->private_data;
854 int hard_port = mv_hardport_from_port(ap->port_no);
855 void __iomem *hc_mmio = mv_hc_base_from_port(
856 ap->host->iomap[MV_PRIMARY_BAR], hard_port);
857 u32 hc_irq_cause, ipending;
859 /* clear EDMA event indicators, if any */
860 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
862 /* clear EDMA interrupt indicator, if any */
863 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
864 ipending = (DEV_IRQ << hard_port) |
865 (CRPB_DMA_DONE << hard_port);
866 if (hc_irq_cause & ipending) {
867 writelfl(hc_irq_cause & ~ipending,
868 hc_mmio + HC_IRQ_CAUSE_OFS);
871 mv_edma_cfg(pp, hpriv, port_mmio, want_ncq);
873 /* clear FIS IRQ Cause */
874 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
876 mv_set_edma_ptrs(port_mmio, hpriv, pp);
878 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
879 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
881 WARN_ON(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
885 * __mv_stop_dma - Disable eDMA engine
886 * @ap: ATA channel to manipulate
888 * Verify the local cache of the eDMA state is accurate with a
889 * WARN_ON.
891 * LOCKING:
892 * Inherited from caller.
894 static int __mv_stop_dma(struct ata_port *ap)
896 void __iomem *port_mmio = mv_ap_base(ap);
897 struct mv_port_priv *pp = ap->private_data;
898 u32 reg;
899 int i, err = 0;
901 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
902 /* Disable EDMA if active. The disable bit auto clears.
904 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
905 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
906 } else {
907 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
910 /* now properly wait for the eDMA to stop */
911 for (i = 1000; i > 0; i--) {
912 reg = readl(port_mmio + EDMA_CMD_OFS);
913 if (!(reg & EDMA_EN))
914 break;
916 udelay(100);
919 if (reg & EDMA_EN) {
920 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
921 err = -EIO;
924 return err;
927 static int mv_stop_dma(struct ata_port *ap)
929 unsigned long flags;
930 int rc;
932 spin_lock_irqsave(&ap->host->lock, flags);
933 rc = __mv_stop_dma(ap);
934 spin_unlock_irqrestore(&ap->host->lock, flags);
936 return rc;
939 #ifdef ATA_DEBUG
940 static void mv_dump_mem(void __iomem *start, unsigned bytes)
942 int b, w;
943 for (b = 0; b < bytes; ) {
944 DPRINTK("%p: ", start + b);
945 for (w = 0; b < bytes && w < 4; w++) {
946 printk("%08x ", readl(start + b));
947 b += sizeof(u32);
949 printk("\n");
952 #endif
954 static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
956 #ifdef ATA_DEBUG
957 int b, w;
958 u32 dw;
959 for (b = 0; b < bytes; ) {
960 DPRINTK("%02x: ", b);
961 for (w = 0; b < bytes && w < 4; w++) {
962 (void) pci_read_config_dword(pdev, b, &dw);
963 printk("%08x ", dw);
964 b += sizeof(u32);
966 printk("\n");
968 #endif
970 static void mv_dump_all_regs(void __iomem *mmio_base, int port,
971 struct pci_dev *pdev)
973 #ifdef ATA_DEBUG
974 void __iomem *hc_base = mv_hc_base(mmio_base,
975 port >> MV_PORT_HC_SHIFT);
976 void __iomem *port_base;
977 int start_port, num_ports, p, start_hc, num_hcs, hc;
979 if (0 > port) {
980 start_hc = start_port = 0;
981 num_ports = 8; /* shld be benign for 4 port devs */
982 num_hcs = 2;
983 } else {
984 start_hc = port >> MV_PORT_HC_SHIFT;
985 start_port = port;
986 num_ports = num_hcs = 1;
988 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
989 num_ports > 1 ? num_ports - 1 : start_port);
991 if (NULL != pdev) {
992 DPRINTK("PCI config space regs:\n");
993 mv_dump_pci_cfg(pdev, 0x68);
995 DPRINTK("PCI regs:\n");
996 mv_dump_mem(mmio_base+0xc00, 0x3c);
997 mv_dump_mem(mmio_base+0xd00, 0x34);
998 mv_dump_mem(mmio_base+0xf00, 0x4);
999 mv_dump_mem(mmio_base+0x1d00, 0x6c);
1000 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
1001 hc_base = mv_hc_base(mmio_base, hc);
1002 DPRINTK("HC regs (HC %i):\n", hc);
1003 mv_dump_mem(hc_base, 0x1c);
1005 for (p = start_port; p < start_port + num_ports; p++) {
1006 port_base = mv_port_base(mmio_base, p);
1007 DPRINTK("EDMA regs (port %i):\n", p);
1008 mv_dump_mem(port_base, 0x54);
1009 DPRINTK("SATA regs (port %i):\n", p);
1010 mv_dump_mem(port_base+0x300, 0x60);
1012 #endif
1015 static unsigned int mv_scr_offset(unsigned int sc_reg_in)
1017 unsigned int ofs;
1019 switch (sc_reg_in) {
1020 case SCR_STATUS:
1021 case SCR_CONTROL:
1022 case SCR_ERROR:
1023 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
1024 break;
1025 case SCR_ACTIVE:
1026 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
1027 break;
1028 default:
1029 ofs = 0xffffffffU;
1030 break;
1032 return ofs;
1035 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
1037 unsigned int ofs = mv_scr_offset(sc_reg_in);
1039 if (ofs != 0xffffffffU) {
1040 *val = readl(mv_ap_base(ap) + ofs);
1041 return 0;
1042 } else
1043 return -EINVAL;
1046 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1048 unsigned int ofs = mv_scr_offset(sc_reg_in);
1050 if (ofs != 0xffffffffU) {
1051 writelfl(val, mv_ap_base(ap) + ofs);
1052 return 0;
1053 } else
1054 return -EINVAL;
1057 static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
1058 void __iomem *port_mmio, int want_ncq)
1060 u32 cfg;
1062 /* set up non-NCQ EDMA configuration */
1063 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
1065 if (IS_GEN_I(hpriv))
1066 cfg |= (1 << 8); /* enab config burst size mask */
1068 else if (IS_GEN_II(hpriv))
1069 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1071 else if (IS_GEN_IIE(hpriv)) {
1072 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1073 cfg |= (1 << 22); /* enab 4-entry host queue cache */
1074 cfg |= (1 << 18); /* enab early completion */
1075 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
1078 if (want_ncq) {
1079 cfg |= EDMA_CFG_NCQ;
1080 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1081 } else
1082 pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;
1084 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1088 * mv_port_start - Port specific init/start routine.
1089 * @ap: ATA channel to manipulate
1091 * Allocate and point to DMA memory, init port private memory,
1092 * zero indices.
1094 * LOCKING:
1095 * Inherited from caller.
1097 static int mv_port_start(struct ata_port *ap)
1099 struct device *dev = ap->host->dev;
1100 struct mv_host_priv *hpriv = ap->host->private_data;
1101 struct mv_port_priv *pp;
1102 void __iomem *port_mmio = mv_ap_base(ap);
1103 void *mem;
1104 dma_addr_t mem_dma;
1105 unsigned long flags;
1106 int rc;
1108 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1109 if (!pp)
1110 return -ENOMEM;
1112 mem = dmam_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
1113 GFP_KERNEL);
1114 if (!mem)
1115 return -ENOMEM;
1116 memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
1118 rc = ata_pad_alloc(ap, dev);
1119 if (rc)
1120 return rc;
1122 /* First item in chunk of DMA memory:
1123 * 32-slot command request table (CRQB), 32 bytes each in size
1125 pp->crqb = mem;
1126 pp->crqb_dma = mem_dma;
1127 mem += MV_CRQB_Q_SZ;
1128 mem_dma += MV_CRQB_Q_SZ;
1130 /* Second item:
1131 * 32-slot command response table (CRPB), 8 bytes each in size
1133 pp->crpb = mem;
1134 pp->crpb_dma = mem_dma;
1135 mem += MV_CRPB_Q_SZ;
1136 mem_dma += MV_CRPB_Q_SZ;
1138 /* Third item:
1139 * Table of scatter-gather descriptors (ePRD), 16 bytes each
1141 pp->sg_tbl = mem;
1142 pp->sg_tbl_dma = mem_dma;
1144 spin_lock_irqsave(&ap->host->lock, flags);
1146 mv_edma_cfg(pp, hpriv, port_mmio, 0);
1148 mv_set_edma_ptrs(port_mmio, hpriv, pp);
1150 spin_unlock_irqrestore(&ap->host->lock, flags);
1152 /* Don't turn on EDMA here...do it before DMA commands only. Else
1153 * we'll be unable to send non-data, PIO, etc due to restricted access
1154 * to shadow regs.
1156 ap->private_data = pp;
1157 return 0;
1161 * mv_port_stop - Port specific cleanup/stop routine.
1162 * @ap: ATA channel to manipulate
1164 * Stop DMA, cleanup port memory.
1166 * LOCKING:
1167 * This routine uses the host lock to protect the DMA stop.
1169 static void mv_port_stop(struct ata_port *ap)
1171 mv_stop_dma(ap);
1175 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1176 * @qc: queued command whose SG list to source from
1178 * Populate the SG list and mark the last entry.
1180 * LOCKING:
1181 * Inherited from caller.
1183 static void mv_fill_sg(struct ata_queued_cmd *qc)
1185 struct mv_port_priv *pp = qc->ap->private_data;
1186 struct scatterlist *sg;
1187 struct mv_sg *mv_sg, *last_sg = NULL;
1188 unsigned int si;
1190 mv_sg = pp->sg_tbl;
1191 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1192 dma_addr_t addr = sg_dma_address(sg);
1193 u32 sg_len = sg_dma_len(sg);
1195 while (sg_len) {
1196 u32 offset = addr & 0xffff;
1197 u32 len = sg_len;
1199 if ((offset + sg_len > 0x10000))
1200 len = 0x10000 - offset;
1202 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1203 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1204 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
1206 sg_len -= len;
1207 addr += len;
1209 last_sg = mv_sg;
1210 mv_sg++;
1214 if (likely(last_sg))
1215 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1218 static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1220 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1221 (last ? CRQB_CMD_LAST : 0);
1222 *cmdw = cpu_to_le16(tmp);
1226 * mv_qc_prep - Host specific command preparation.
1227 * @qc: queued command to prepare
1229 * This routine simply redirects to the general purpose routine
1230 * if command is not DMA. Else, it handles prep of the CRQB
1231 * (command request block), does some sanity checking, and calls
1232 * the SG load routine.
1234 * LOCKING:
1235 * Inherited from caller.
1237 static void mv_qc_prep(struct ata_queued_cmd *qc)
1239 struct ata_port *ap = qc->ap;
1240 struct mv_port_priv *pp = ap->private_data;
1241 __le16 *cw;
1242 struct ata_taskfile *tf;
1243 u16 flags = 0;
1244 unsigned in_index;
1246 if (qc->tf.protocol != ATA_PROT_DMA)
1247 return;
1249 /* Fill in command request block
1251 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1252 flags |= CRQB_FLAG_READ;
1253 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1254 flags |= qc->tag << CRQB_TAG_SHIFT;
1255 flags |= qc->tag << CRQB_IOID_SHIFT; /* 50xx appears to ignore this*/
1257 /* get current queue index from software */
1258 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1260 pp->crqb[in_index].sg_addr =
1261 cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1262 pp->crqb[in_index].sg_addr_hi =
1263 cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1264 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1266 cw = &pp->crqb[in_index].ata_cmd[0];
1267 tf = &qc->tf;
1269 /* Sadly, the CRQB cannot accomodate all registers--there are
1270 * only 11 bytes...so we must pick and choose required
1271 * registers based on the command. So, we drop feature and
1272 * hob_feature for [RW] DMA commands, but they are needed for
1273 * NCQ. NCQ will drop hob_nsect.
1275 switch (tf->command) {
1276 case ATA_CMD_READ:
1277 case ATA_CMD_READ_EXT:
1278 case ATA_CMD_WRITE:
1279 case ATA_CMD_WRITE_EXT:
1280 case ATA_CMD_WRITE_FUA_EXT:
1281 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1282 break;
1283 #ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
1284 case ATA_CMD_FPDMA_READ:
1285 case ATA_CMD_FPDMA_WRITE:
1286 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
1287 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1288 break;
1289 #endif /* FIXME: remove this line when NCQ added */
1290 default:
1291 /* The only other commands EDMA supports in non-queued and
1292 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1293 * of which are defined/used by Linux. If we get here, this
1294 * driver needs work.
1296 * FIXME: modify libata to give qc_prep a return value and
1297 * return error here.
1299 BUG_ON(tf->command);
1300 break;
1302 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1303 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1304 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1305 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1306 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1307 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1308 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1309 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1310 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1312 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1313 return;
1314 mv_fill_sg(qc);
1318 * mv_qc_prep_iie - Host specific command preparation.
1319 * @qc: queued command to prepare
1321 * This routine simply redirects to the general purpose routine
1322 * if command is not DMA. Else, it handles prep of the CRQB
1323 * (command request block), does some sanity checking, and calls
1324 * the SG load routine.
1326 * LOCKING:
1327 * Inherited from caller.
1329 static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1331 struct ata_port *ap = qc->ap;
1332 struct mv_port_priv *pp = ap->private_data;
1333 struct mv_crqb_iie *crqb;
1334 struct ata_taskfile *tf;
1335 unsigned in_index;
1336 u32 flags = 0;
1338 if (qc->tf.protocol != ATA_PROT_DMA)
1339 return;
1341 /* Fill in Gen IIE command request block
1343 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1344 flags |= CRQB_FLAG_READ;
1346 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1347 flags |= qc->tag << CRQB_TAG_SHIFT;
1348 flags |= qc->tag << CRQB_IOID_SHIFT; /* "I/O Id" is -really-
1349 what we use as our tag */
1351 /* get current queue index from software */
1352 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1354 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
1355 crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1356 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1357 crqb->flags = cpu_to_le32(flags);
1359 tf = &qc->tf;
1360 crqb->ata_cmd[0] = cpu_to_le32(
1361 (tf->command << 16) |
1362 (tf->feature << 24)
1364 crqb->ata_cmd[1] = cpu_to_le32(
1365 (tf->lbal << 0) |
1366 (tf->lbam << 8) |
1367 (tf->lbah << 16) |
1368 (tf->device << 24)
1370 crqb->ata_cmd[2] = cpu_to_le32(
1371 (tf->hob_lbal << 0) |
1372 (tf->hob_lbam << 8) |
1373 (tf->hob_lbah << 16) |
1374 (tf->hob_feature << 24)
1376 crqb->ata_cmd[3] = cpu_to_le32(
1377 (tf->nsect << 0) |
1378 (tf->hob_nsect << 8)
1381 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1382 return;
1383 mv_fill_sg(qc);
1387 * mv_qc_issue - Initiate a command to the host
1388 * @qc: queued command to start
1390 * This routine simply redirects to the general purpose routine
1391 * if command is not DMA. Else, it sanity checks our local
1392 * caches of the request producer/consumer indices then enables
1393 * DMA and bumps the request producer index.
1395 * LOCKING:
1396 * Inherited from caller.
1398 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1400 struct ata_port *ap = qc->ap;
1401 void __iomem *port_mmio = mv_ap_base(ap);
1402 struct mv_port_priv *pp = ap->private_data;
1403 u32 in_index;
1405 if (qc->tf.protocol != ATA_PROT_DMA) {
1406 /* We're about to send a non-EDMA capable command to the
1407 * port. Turn off EDMA so there won't be problems accessing
1408 * shadow block, etc registers.
1410 __mv_stop_dma(ap);
1411 return ata_qc_issue_prot(qc);
1414 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
1416 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1418 /* until we do queuing, the queue should be empty at this point */
1419 WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
1420 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
1422 pp->req_idx++;
1424 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
1426 /* and write the request in pointer to kick the EDMA to life */
1427 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1428 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1430 return 0;
1434 * mv_err_intr - Handle error interrupts on the port
1435 * @ap: ATA channel to manipulate
1436 * @reset_allowed: bool: 0 == don't trigger from reset here
1438 * In most cases, just clear the interrupt and move on. However,
1439 * some cases require an eDMA reset, which is done right before
1440 * the COMRESET in mv_phy_reset(). The SERR case requires a
1441 * clear of pending errors in the SATA SERROR register. Finally,
1442 * if the port disabled DMA, update our cached copy to match.
1444 * LOCKING:
1445 * Inherited from caller.
1447 static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1449 void __iomem *port_mmio = mv_ap_base(ap);
1450 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1451 struct mv_port_priv *pp = ap->private_data;
1452 struct mv_host_priv *hpriv = ap->host->private_data;
1453 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1454 unsigned int action = 0, err_mask = 0;
1455 struct ata_eh_info *ehi = &ap->link.eh_info;
1457 ata_ehi_clear_desc(ehi);
1459 if (!edma_enabled) {
1460 /* just a guess: do we need to do this? should we
1461 * expand this, and do it in all cases?
1463 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1464 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1467 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1469 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1472 * all generations share these EDMA error cause bits
1475 if (edma_err_cause & EDMA_ERR_DEV)
1476 err_mask |= AC_ERR_DEV;
1477 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
1478 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
1479 EDMA_ERR_INTRL_PAR)) {
1480 err_mask |= AC_ERR_ATA_BUS;
1481 action |= ATA_EH_HARDRESET;
1482 ata_ehi_push_desc(ehi, "parity error");
1484 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1485 ata_ehi_hotplugged(ehi);
1486 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
1487 "dev disconnect" : "dev connect");
1488 action |= ATA_EH_HARDRESET;
1491 if (IS_GEN_I(hpriv)) {
1492 eh_freeze_mask = EDMA_EH_FREEZE_5;
1494 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1495 struct mv_port_priv *pp = ap->private_data;
1496 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1497 ata_ehi_push_desc(ehi, "EDMA self-disable");
1499 } else {
1500 eh_freeze_mask = EDMA_EH_FREEZE;
1502 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1503 struct mv_port_priv *pp = ap->private_data;
1504 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1505 ata_ehi_push_desc(ehi, "EDMA self-disable");
1508 if (edma_err_cause & EDMA_ERR_SERR) {
1509 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1510 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1511 err_mask = AC_ERR_ATA_BUS;
1512 action |= ATA_EH_HARDRESET;
1516 /* Clear EDMA now that SERR cleanup done */
1517 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1519 if (!err_mask) {
1520 err_mask = AC_ERR_OTHER;
1521 action |= ATA_EH_HARDRESET;
1524 ehi->serror |= serr;
1525 ehi->action |= action;
1527 if (qc)
1528 qc->err_mask |= err_mask;
1529 else
1530 ehi->err_mask |= err_mask;
1532 if (edma_err_cause & eh_freeze_mask)
1533 ata_port_freeze(ap);
1534 else
1535 ata_port_abort(ap);
1538 static void mv_intr_pio(struct ata_port *ap)
1540 struct ata_queued_cmd *qc;
1541 u8 ata_status;
1543 /* ignore spurious intr if drive still BUSY */
1544 ata_status = readb(ap->ioaddr.status_addr);
1545 if (unlikely(ata_status & ATA_BUSY))
1546 return;
1548 /* get active ATA command */
1549 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1550 if (unlikely(!qc)) /* no active tag */
1551 return;
1552 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1553 return;
1555 /* and finally, complete the ATA command */
1556 qc->err_mask |= ac_err_mask(ata_status);
1557 ata_qc_complete(qc);
1560 static void mv_intr_edma(struct ata_port *ap)
1562 void __iomem *port_mmio = mv_ap_base(ap);
1563 struct mv_host_priv *hpriv = ap->host->private_data;
1564 struct mv_port_priv *pp = ap->private_data;
1565 struct ata_queued_cmd *qc;
1566 u32 out_index, in_index;
1567 bool work_done = false;
1569 /* get h/w response queue pointer */
1570 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1571 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1573 while (1) {
1574 u16 status;
1575 unsigned int tag;
1577 /* get s/w response queue last-read pointer, and compare */
1578 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1579 if (in_index == out_index)
1580 break;
1582 /* 50xx: get active ATA command */
1583 if (IS_GEN_I(hpriv))
1584 tag = ap->link.active_tag;
1586 /* Gen II/IIE: get active ATA command via tag, to enable
1587 * support for queueing. this works transparently for
1588 * queued and non-queued modes.
1590 else if (IS_GEN_II(hpriv))
1591 tag = (le16_to_cpu(pp->crpb[out_index].id)
1592 >> CRPB_IOID_SHIFT_6) & 0x3f;
1594 else /* IS_GEN_IIE */
1595 tag = (le16_to_cpu(pp->crpb[out_index].id)
1596 >> CRPB_IOID_SHIFT_7) & 0x3f;
1598 qc = ata_qc_from_tag(ap, tag);
1600 /* lower 8 bits of status are EDMA_ERR_IRQ_CAUSE_OFS
1601 * bits (WARNING: might not necessarily be associated
1602 * with this command), which -should- be clear
1603 * if all is well
1605 status = le16_to_cpu(pp->crpb[out_index].flags);
1606 if (unlikely(status & 0xff)) {
1607 mv_err_intr(ap, qc);
1608 return;
1611 /* and finally, complete the ATA command */
1612 if (qc) {
1613 qc->err_mask |=
1614 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1615 ata_qc_complete(qc);
1618 /* advance software response queue pointer, to
1619 * indicate (after the loop completes) to hardware
1620 * that we have consumed a response queue entry.
1622 work_done = true;
1623 pp->resp_idx++;
1626 if (work_done)
1627 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1628 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1629 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1633 * mv_host_intr - Handle all interrupts on the given host controller
1634 * @host: host specific structure
1635 * @relevant: port error bits relevant to this host controller
1636 * @hc: which host controller we're to look at
1638 * Read then write clear the HC interrupt status then walk each
1639 * port connected to the HC and see if it needs servicing. Port
1640 * success ints are reported in the HC interrupt status reg, the
1641 * port error ints are reported in the higher level main
1642 * interrupt status register and thus are passed in via the
1643 * 'relevant' argument.
1645 * LOCKING:
1646 * Inherited from caller.
1648 static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
1650 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
1651 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1652 u32 hc_irq_cause;
1653 int port, port0;
1655 if (hc == 0)
1656 port0 = 0;
1657 else
1658 port0 = MV_PORTS_PER_HC;
1660 /* we'll need the HC success int register in most cases */
1661 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
1662 if (!hc_irq_cause)
1663 return;
1665 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
1667 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1668 hc, relevant, hc_irq_cause);
1670 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
1671 struct ata_port *ap = host->ports[port];
1672 struct mv_port_priv *pp = ap->private_data;
1673 int have_err_bits, hard_port, shift;
1675 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
1676 continue;
1678 shift = port << 1; /* (port * 2) */
1679 if (port >= MV_PORTS_PER_HC) {
1680 shift++; /* skip bit 8 in the HC Main IRQ reg */
1682 have_err_bits = ((PORT0_ERR << shift) & relevant);
1684 if (unlikely(have_err_bits)) {
1685 struct ata_queued_cmd *qc;
1687 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1688 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1689 continue;
1691 mv_err_intr(ap, qc);
1692 continue;
1695 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1697 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1698 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1699 mv_intr_edma(ap);
1700 } else {
1701 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1702 mv_intr_pio(ap);
1705 VPRINTK("EXIT\n");
1708 static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1710 struct mv_host_priv *hpriv = host->private_data;
1711 struct ata_port *ap;
1712 struct ata_queued_cmd *qc;
1713 struct ata_eh_info *ehi;
1714 unsigned int i, err_mask, printed = 0;
1715 u32 err_cause;
1717 err_cause = readl(mmio + hpriv->irq_cause_ofs);
1719 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1720 err_cause);
1722 DPRINTK("All regs @ PCI error\n");
1723 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1725 writelfl(0, mmio + hpriv->irq_cause_ofs);
1727 for (i = 0; i < host->n_ports; i++) {
1728 ap = host->ports[i];
1729 if (!ata_link_offline(&ap->link)) {
1730 ehi = &ap->link.eh_info;
1731 ata_ehi_clear_desc(ehi);
1732 if (!printed++)
1733 ata_ehi_push_desc(ehi,
1734 "PCI err cause 0x%08x", err_cause);
1735 err_mask = AC_ERR_HOST_BUS;
1736 ehi->action = ATA_EH_HARDRESET;
1737 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1738 if (qc)
1739 qc->err_mask |= err_mask;
1740 else
1741 ehi->err_mask |= err_mask;
1743 ata_port_freeze(ap);
1749 * mv_interrupt - Main interrupt event handler
1750 * @irq: unused
1751 * @dev_instance: private data; in this case the host structure
1753 * Read the read only register to determine if any host
1754 * controllers have pending interrupts. If so, call lower level
1755 * routine to handle. Also check for PCI errors which are only
1756 * reported here.
1758 * LOCKING:
1759 * This routine holds the host lock while processing pending
1760 * interrupts.
1762 static irqreturn_t mv_interrupt(int irq, void *dev_instance)
1764 struct ata_host *host = dev_instance;
1765 unsigned int hc, handled = 0, n_hcs;
1766 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
1767 u32 irq_stat, irq_mask;
1769 spin_lock(&host->lock);
1770 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
1771 irq_mask = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
1773 /* check the cases where we either have nothing pending or have read
1774 * a bogus register value which can indicate HW removal or PCI fault
1776 if (!(irq_stat & irq_mask) || (0xffffffffU == irq_stat))
1777 goto out_unlock;
1779 n_hcs = mv_get_hc_count(host->ports[0]->flags);
1781 if (unlikely(irq_stat & PCI_ERR)) {
1782 mv_pci_error(host, mmio);
1783 handled = 1;
1784 goto out_unlock; /* skip all other HC irq handling */
1787 for (hc = 0; hc < n_hcs; hc++) {
1788 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1789 if (relevant) {
1790 mv_host_intr(host, relevant, hc);
1791 handled = 1;
1795 out_unlock:
1796 spin_unlock(&host->lock);
1798 return IRQ_RETVAL(handled);
1801 static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1803 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1804 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1806 return hc_mmio + ofs;
1809 static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1811 unsigned int ofs;
1813 switch (sc_reg_in) {
1814 case SCR_STATUS:
1815 case SCR_ERROR:
1816 case SCR_CONTROL:
1817 ofs = sc_reg_in * sizeof(u32);
1818 break;
1819 default:
1820 ofs = 0xffffffffU;
1821 break;
1823 return ofs;
1826 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
1828 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1829 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1830 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1832 if (ofs != 0xffffffffU) {
1833 *val = readl(addr + ofs);
1834 return 0;
1835 } else
1836 return -EINVAL;
1839 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1841 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1842 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1843 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1845 if (ofs != 0xffffffffU) {
1846 writelfl(val, addr + ofs);
1847 return 0;
1848 } else
1849 return -EINVAL;
1852 static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
1854 int early_5080;
1856 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
1858 if (!early_5080) {
1859 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1860 tmp |= (1 << 0);
1861 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1864 mv_reset_pci_bus(pdev, mmio);
1867 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1869 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1872 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
1873 void __iomem *mmio)
1875 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1876 u32 tmp;
1878 tmp = readl(phy_mmio + MV5_PHY_MODE);
1880 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1881 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
1884 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1886 u32 tmp;
1888 writel(0, mmio + MV_GPIO_PORT_CTL);
1890 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1892 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1893 tmp |= ~(1 << 0);
1894 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1897 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1898 unsigned int port)
1900 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1901 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1902 u32 tmp;
1903 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1905 if (fix_apm_sq) {
1906 tmp = readl(phy_mmio + MV5_LT_MODE);
1907 tmp |= (1 << 19);
1908 writel(tmp, phy_mmio + MV5_LT_MODE);
1910 tmp = readl(phy_mmio + MV5_PHY_CTL);
1911 tmp &= ~0x3;
1912 tmp |= 0x1;
1913 writel(tmp, phy_mmio + MV5_PHY_CTL);
1916 tmp = readl(phy_mmio + MV5_PHY_MODE);
1917 tmp &= ~mask;
1918 tmp |= hpriv->signal[port].pre;
1919 tmp |= hpriv->signal[port].amps;
1920 writel(tmp, phy_mmio + MV5_PHY_MODE);
1924 #undef ZERO
1925 #define ZERO(reg) writel(0, port_mmio + (reg))
1926 static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1927 unsigned int port)
1929 void __iomem *port_mmio = mv_port_base(mmio, port);
1931 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1933 mv_channel_reset(hpriv, mmio, port);
1935 ZERO(0x028); /* command */
1936 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1937 ZERO(0x004); /* timer */
1938 ZERO(0x008); /* irq err cause */
1939 ZERO(0x00c); /* irq err mask */
1940 ZERO(0x010); /* rq bah */
1941 ZERO(0x014); /* rq inp */
1942 ZERO(0x018); /* rq outp */
1943 ZERO(0x01c); /* respq bah */
1944 ZERO(0x024); /* respq outp */
1945 ZERO(0x020); /* respq inp */
1946 ZERO(0x02c); /* test control */
1947 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1949 #undef ZERO
1951 #define ZERO(reg) writel(0, hc_mmio + (reg))
1952 static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1953 unsigned int hc)
1955 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1956 u32 tmp;
1958 ZERO(0x00c);
1959 ZERO(0x010);
1960 ZERO(0x014);
1961 ZERO(0x018);
1963 tmp = readl(hc_mmio + 0x20);
1964 tmp &= 0x1c1c1c1c;
1965 tmp |= 0x03030303;
1966 writel(tmp, hc_mmio + 0x20);
1968 #undef ZERO
1970 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1971 unsigned int n_hc)
1973 unsigned int hc, port;
1975 for (hc = 0; hc < n_hc; hc++) {
1976 for (port = 0; port < MV_PORTS_PER_HC; port++)
1977 mv5_reset_hc_port(hpriv, mmio,
1978 (hc * MV_PORTS_PER_HC) + port);
1980 mv5_reset_one_hc(hpriv, mmio, hc);
1983 return 0;
1986 #undef ZERO
1987 #define ZERO(reg) writel(0, mmio + (reg))
1988 static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio)
1990 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1991 struct mv_host_priv *hpriv = host->private_data;
1992 u32 tmp;
1994 tmp = readl(mmio + MV_PCI_MODE);
1995 tmp &= 0xff00ffff;
1996 writel(tmp, mmio + MV_PCI_MODE);
1998 ZERO(MV_PCI_DISC_TIMER);
1999 ZERO(MV_PCI_MSI_TRIGGER);
2000 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
2001 ZERO(HC_MAIN_IRQ_MASK_OFS);
2002 ZERO(MV_PCI_SERR_MASK);
2003 ZERO(hpriv->irq_cause_ofs);
2004 ZERO(hpriv->irq_mask_ofs);
2005 ZERO(MV_PCI_ERR_LOW_ADDRESS);
2006 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
2007 ZERO(MV_PCI_ERR_ATTRIBUTE);
2008 ZERO(MV_PCI_ERR_COMMAND);
2010 #undef ZERO
2012 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
2014 u32 tmp;
2016 mv5_reset_flash(hpriv, mmio);
2018 tmp = readl(mmio + MV_GPIO_PORT_CTL);
2019 tmp &= 0x3;
2020 tmp |= (1 << 5) | (1 << 6);
2021 writel(tmp, mmio + MV_GPIO_PORT_CTL);
2025 * mv6_reset_hc - Perform the 6xxx global soft reset
2026 * @mmio: base address of the HBA
2028 * This routine only applies to 6xxx parts.
2030 * LOCKING:
2031 * Inherited from caller.
2033 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2034 unsigned int n_hc)
2036 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2037 int i, rc = 0;
2038 u32 t;
2040 /* Following procedure defined in PCI "main command and status
2041 * register" table.
2043 t = readl(reg);
2044 writel(t | STOP_PCI_MASTER, reg);
2046 for (i = 0; i < 1000; i++) {
2047 udelay(1);
2048 t = readl(reg);
2049 if (PCI_MASTER_EMPTY & t)
2050 break;
2052 if (!(PCI_MASTER_EMPTY & t)) {
2053 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2054 rc = 1;
2055 goto done;
2058 /* set reset */
2059 i = 5;
2060 do {
2061 writel(t | GLOB_SFT_RST, reg);
2062 t = readl(reg);
2063 udelay(1);
2064 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2066 if (!(GLOB_SFT_RST & t)) {
2067 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2068 rc = 1;
2069 goto done;
2072 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2073 i = 5;
2074 do {
2075 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2076 t = readl(reg);
2077 udelay(1);
2078 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2080 if (GLOB_SFT_RST & t) {
2081 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2082 rc = 1;
2084 done:
2085 return rc;
2088 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
2089 void __iomem *mmio)
2091 void __iomem *port_mmio;
2092 u32 tmp;
2094 tmp = readl(mmio + MV_RESET_CFG);
2095 if ((tmp & (1 << 0)) == 0) {
2096 hpriv->signal[idx].amps = 0x7 << 8;
2097 hpriv->signal[idx].pre = 0x1 << 5;
2098 return;
2101 port_mmio = mv_port_base(mmio, idx);
2102 tmp = readl(port_mmio + PHY_MODE2);
2104 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2105 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2108 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
2110 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
2113 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2114 unsigned int port)
2116 void __iomem *port_mmio = mv_port_base(mmio, port);
2118 u32 hp_flags = hpriv->hp_flags;
2119 int fix_phy_mode2 =
2120 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2121 int fix_phy_mode4 =
2122 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2123 u32 m2, tmp;
2125 if (fix_phy_mode2) {
2126 m2 = readl(port_mmio + PHY_MODE2);
2127 m2 &= ~(1 << 16);
2128 m2 |= (1 << 31);
2129 writel(m2, port_mmio + PHY_MODE2);
2131 udelay(200);
2133 m2 = readl(port_mmio + PHY_MODE2);
2134 m2 &= ~((1 << 16) | (1 << 31));
2135 writel(m2, port_mmio + PHY_MODE2);
2137 udelay(200);
2140 /* who knows what this magic does */
2141 tmp = readl(port_mmio + PHY_MODE3);
2142 tmp &= ~0x7F800000;
2143 tmp |= 0x2A800000;
2144 writel(tmp, port_mmio + PHY_MODE3);
2146 if (fix_phy_mode4) {
2147 u32 m4;
2149 m4 = readl(port_mmio + PHY_MODE4);
2151 if (hp_flags & MV_HP_ERRATA_60X1B2)
2152 tmp = readl(port_mmio + 0x310);
2154 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2156 writel(m4, port_mmio + PHY_MODE4);
2158 if (hp_flags & MV_HP_ERRATA_60X1B2)
2159 writel(tmp, port_mmio + 0x310);
2162 /* Revert values of pre-emphasis and signal amps to the saved ones */
2163 m2 = readl(port_mmio + PHY_MODE2);
2165 m2 &= ~MV_M2_PREAMP_MASK;
2166 m2 |= hpriv->signal[port].amps;
2167 m2 |= hpriv->signal[port].pre;
2168 m2 &= ~(1 << 16);
2170 /* according to mvSata 3.6.1, some IIE values are fixed */
2171 if (IS_GEN_IIE(hpriv)) {
2172 m2 &= ~0xC30FF01F;
2173 m2 |= 0x0000900F;
2176 writel(m2, port_mmio + PHY_MODE2);
2179 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
2180 unsigned int port_no)
2182 void __iomem *port_mmio = mv_port_base(mmio, port_no);
2184 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2186 if (IS_GEN_II(hpriv)) {
2187 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2188 ifctl |= (1 << 7); /* enable gen2i speed */
2189 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2190 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2193 udelay(25); /* allow reset propagation */
2195 /* Spec never mentions clearing the bit. Marvell's driver does
2196 * clear the bit, however.
2198 writelfl(0, port_mmio + EDMA_CMD_OFS);
2200 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2202 if (IS_GEN_I(hpriv))
2203 mdelay(1);
2207 * mv_phy_reset - Perform eDMA reset followed by COMRESET
2208 * @ap: ATA channel to manipulate
2210 * Part of this is taken from __sata_phy_reset and modified to
2211 * not sleep since this routine gets called from interrupt level.
2213 * LOCKING:
2214 * Inherited from caller. This is coded to safe to call at
2215 * interrupt level, i.e. it does not sleep.
2217 static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2218 unsigned long deadline)
2220 struct mv_port_priv *pp = ap->private_data;
2221 struct mv_host_priv *hpriv = ap->host->private_data;
2222 void __iomem *port_mmio = mv_ap_base(ap);
2223 int retry = 5;
2224 u32 sstatus;
2226 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
2228 #ifdef DEBUG
2230 u32 sstatus, serror, scontrol;
2232 mv_scr_read(ap, SCR_STATUS, &sstatus);
2233 mv_scr_read(ap, SCR_ERROR, &serror);
2234 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2235 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
2236 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2238 #endif
2240 /* Issue COMRESET via SControl */
2241 comreset_retry:
2242 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x301);
2243 msleep(1);
2245 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x300);
2246 msleep(20);
2248 do {
2249 sata_scr_read(&ap->link, SCR_STATUS, &sstatus);
2250 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
2251 break;
2253 msleep(1);
2254 } while (time_before(jiffies, deadline));
2256 /* work around errata */
2257 if (IS_GEN_II(hpriv) &&
2258 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2259 (retry-- > 0))
2260 goto comreset_retry;
2262 #ifdef DEBUG
2264 u32 sstatus, serror, scontrol;
2266 mv_scr_read(ap, SCR_STATUS, &sstatus);
2267 mv_scr_read(ap, SCR_ERROR, &serror);
2268 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2269 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2270 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2272 #endif
2274 if (ata_link_offline(&ap->link)) {
2275 *class = ATA_DEV_NONE;
2276 return;
2279 /* even after SStatus reflects that device is ready,
2280 * it seems to take a while for link to be fully
2281 * established (and thus Status no longer 0x80/0x7F),
2282 * so we poll a bit for that, here.
2284 retry = 20;
2285 while (1) {
2286 u8 drv_stat = ata_check_status(ap);
2287 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2288 break;
2289 msleep(500);
2290 if (retry-- <= 0)
2291 break;
2292 if (time_after(jiffies, deadline))
2293 break;
2296 /* FIXME: if we passed the deadline, the following
2297 * code probably produces an invalid result
2300 /* finally, read device signature from TF registers */
2301 *class = ata_dev_try_classify(ap->link.device, 1, NULL);
2303 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2305 WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
2307 VPRINTK("EXIT\n");
2310 static int mv_prereset(struct ata_link *link, unsigned long deadline)
2312 struct ata_port *ap = link->ap;
2313 struct mv_port_priv *pp = ap->private_data;
2314 struct ata_eh_context *ehc = &link->eh_context;
2315 int rc;
2317 rc = mv_stop_dma(ap);
2318 if (rc)
2319 ehc->i.action |= ATA_EH_HARDRESET;
2321 if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET)) {
2322 pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
2323 ehc->i.action |= ATA_EH_HARDRESET;
2326 /* if we're about to do hardreset, nothing more to do */
2327 if (ehc->i.action & ATA_EH_HARDRESET)
2328 return 0;
2330 if (ata_link_online(link))
2331 rc = ata_wait_ready(ap, deadline);
2332 else
2333 rc = -ENODEV;
2335 return rc;
2338 static int mv_hardreset(struct ata_link *link, unsigned int *class,
2339 unsigned long deadline)
2341 struct ata_port *ap = link->ap;
2342 struct mv_host_priv *hpriv = ap->host->private_data;
2343 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2345 mv_stop_dma(ap);
2347 mv_channel_reset(hpriv, mmio, ap->port_no);
2349 mv_phy_reset(ap, class, deadline);
2351 return 0;
2354 static void mv_postreset(struct ata_link *link, unsigned int *classes)
2356 struct ata_port *ap = link->ap;
2357 u32 serr;
2359 /* print link status */
2360 sata_print_link_status(link);
2362 /* clear SError */
2363 sata_scr_read(link, SCR_ERROR, &serr);
2364 sata_scr_write_flush(link, SCR_ERROR, serr);
2366 /* bail out if no device is present */
2367 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2368 DPRINTK("EXIT, no device\n");
2369 return;
2372 /* set up device control */
2373 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2376 static void mv_error_handler(struct ata_port *ap)
2378 ata_do_eh(ap, mv_prereset, ata_std_softreset,
2379 mv_hardreset, mv_postreset);
2382 static void mv_post_int_cmd(struct ata_queued_cmd *qc)
2384 mv_stop_dma(qc->ap);
2387 static void mv_eh_freeze(struct ata_port *ap)
2389 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2390 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2391 u32 tmp, mask;
2392 unsigned int shift;
2394 /* FIXME: handle coalescing completion events properly */
2396 shift = ap->port_no * 2;
2397 if (hc > 0)
2398 shift++;
2400 mask = 0x3 << shift;
2402 /* disable assertion of portN err, done events */
2403 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2404 writelfl(tmp & ~mask, mmio + HC_MAIN_IRQ_MASK_OFS);
2407 static void mv_eh_thaw(struct ata_port *ap)
2409 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2410 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2411 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2412 void __iomem *port_mmio = mv_ap_base(ap);
2413 u32 tmp, mask, hc_irq_cause;
2414 unsigned int shift, hc_port_no = ap->port_no;
2416 /* FIXME: handle coalescing completion events properly */
2418 shift = ap->port_no * 2;
2419 if (hc > 0) {
2420 shift++;
2421 hc_port_no -= 4;
2424 mask = 0x3 << shift;
2426 /* clear EDMA errors on this port */
2427 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2429 /* clear pending irq events */
2430 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2431 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2432 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2433 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2435 /* enable assertion of portN err, done events */
2436 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2437 writelfl(tmp | mask, mmio + HC_MAIN_IRQ_MASK_OFS);
2441 * mv_port_init - Perform some early initialization on a single port.
2442 * @port: libata data structure storing shadow register addresses
2443 * @port_mmio: base address of the port
2445 * Initialize shadow register mmio addresses, clear outstanding
2446 * interrupts on the port, and unmask interrupts for the future
2447 * start of the port.
2449 * LOCKING:
2450 * Inherited from caller.
2452 static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2454 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
2455 unsigned serr_ofs;
2457 /* PIO related setup
2459 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
2460 port->error_addr =
2461 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2462 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2463 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2464 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2465 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2466 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
2467 port->status_addr =
2468 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2469 /* special case: control/altstatus doesn't have ATA_REG_ address */
2470 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2472 /* unused: */
2473 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
2475 /* Clear any currently outstanding port interrupt conditions */
2476 serr_ofs = mv_scr_offset(SCR_ERROR);
2477 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2478 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2480 /* unmask all non-transient EDMA error interrupts */
2481 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
2483 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2484 readl(port_mmio + EDMA_CFG_OFS),
2485 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2486 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
2489 static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
2491 struct pci_dev *pdev = to_pci_dev(host->dev);
2492 struct mv_host_priv *hpriv = host->private_data;
2493 u32 hp_flags = hpriv->hp_flags;
2495 switch (board_idx) {
2496 case chip_5080:
2497 hpriv->ops = &mv5xxx_ops;
2498 hp_flags |= MV_HP_GEN_I;
2500 switch (pdev->revision) {
2501 case 0x1:
2502 hp_flags |= MV_HP_ERRATA_50XXB0;
2503 break;
2504 case 0x3:
2505 hp_flags |= MV_HP_ERRATA_50XXB2;
2506 break;
2507 default:
2508 dev_printk(KERN_WARNING, &pdev->dev,
2509 "Applying 50XXB2 workarounds to unknown rev\n");
2510 hp_flags |= MV_HP_ERRATA_50XXB2;
2511 break;
2513 break;
2515 case chip_504x:
2516 case chip_508x:
2517 hpriv->ops = &mv5xxx_ops;
2518 hp_flags |= MV_HP_GEN_I;
2520 switch (pdev->revision) {
2521 case 0x0:
2522 hp_flags |= MV_HP_ERRATA_50XXB0;
2523 break;
2524 case 0x3:
2525 hp_flags |= MV_HP_ERRATA_50XXB2;
2526 break;
2527 default:
2528 dev_printk(KERN_WARNING, &pdev->dev,
2529 "Applying B2 workarounds to unknown rev\n");
2530 hp_flags |= MV_HP_ERRATA_50XXB2;
2531 break;
2533 break;
2535 case chip_604x:
2536 case chip_608x:
2537 hpriv->ops = &mv6xxx_ops;
2538 hp_flags |= MV_HP_GEN_II;
2540 switch (pdev->revision) {
2541 case 0x7:
2542 hp_flags |= MV_HP_ERRATA_60X1B2;
2543 break;
2544 case 0x9:
2545 hp_flags |= MV_HP_ERRATA_60X1C0;
2546 break;
2547 default:
2548 dev_printk(KERN_WARNING, &pdev->dev,
2549 "Applying B2 workarounds to unknown rev\n");
2550 hp_flags |= MV_HP_ERRATA_60X1B2;
2551 break;
2553 break;
2555 case chip_7042:
2556 hp_flags |= MV_HP_PCIE;
2557 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2558 (pdev->device == 0x2300 || pdev->device == 0x2310))
2561 * Highpoint RocketRAID PCIe 23xx series cards:
2563 * Unconfigured drives are treated as "Legacy"
2564 * by the BIOS, and it overwrites sector 8 with
2565 * a "Lgcy" metadata block prior to Linux boot.
2567 * Configured drives (RAID or JBOD) leave sector 8
2568 * alone, but instead overwrite a high numbered
2569 * sector for the RAID metadata. This sector can
2570 * be determined exactly, by truncating the physical
2571 * drive capacity to a nice even GB value.
2573 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2575 * Warn the user, lest they think we're just buggy.
2577 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2578 " BIOS CORRUPTS DATA on all attached drives,"
2579 " regardless of if/how they are configured."
2580 " BEWARE!\n");
2581 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2582 " use sectors 8-9 on \"Legacy\" drives,"
2583 " and avoid the final two gigabytes on"
2584 " all RocketRAID BIOS initialized drives.\n");
2586 case chip_6042:
2587 hpriv->ops = &mv6xxx_ops;
2588 hp_flags |= MV_HP_GEN_IIE;
2590 switch (pdev->revision) {
2591 case 0x0:
2592 hp_flags |= MV_HP_ERRATA_XX42A0;
2593 break;
2594 case 0x1:
2595 hp_flags |= MV_HP_ERRATA_60X1C0;
2596 break;
2597 default:
2598 dev_printk(KERN_WARNING, &pdev->dev,
2599 "Applying 60X1C0 workarounds to unknown rev\n");
2600 hp_flags |= MV_HP_ERRATA_60X1C0;
2601 break;
2603 break;
2605 default:
2606 dev_printk(KERN_ERR, &pdev->dev,
2607 "BUG: invalid board index %u\n", board_idx);
2608 return 1;
2611 hpriv->hp_flags = hp_flags;
2612 if (hp_flags & MV_HP_PCIE) {
2613 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
2614 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
2615 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
2616 } else {
2617 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
2618 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
2619 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
2622 return 0;
2626 * mv_init_host - Perform some early initialization of the host.
2627 * @host: ATA host to initialize
2628 * @board_idx: controller index
2630 * If possible, do an early global reset of the host. Then do
2631 * our port init and clear/unmask all/relevant host interrupts.
2633 * LOCKING:
2634 * Inherited from caller.
2636 static int mv_init_host(struct ata_host *host, unsigned int board_idx)
2638 int rc = 0, n_hc, port, hc;
2639 struct pci_dev *pdev = to_pci_dev(host->dev);
2640 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
2641 struct mv_host_priv *hpriv = host->private_data;
2643 /* global interrupt mask */
2644 writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
2646 rc = mv_chip_id(host, board_idx);
2647 if (rc)
2648 goto done;
2650 n_hc = mv_get_hc_count(host->ports[0]->flags);
2652 for (port = 0; port < host->n_ports; port++)
2653 hpriv->ops->read_preamp(hpriv, port, mmio);
2655 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
2656 if (rc)
2657 goto done;
2659 hpriv->ops->reset_flash(hpriv, mmio);
2660 hpriv->ops->reset_bus(pdev, mmio);
2661 hpriv->ops->enable_leds(hpriv, mmio);
2663 for (port = 0; port < host->n_ports; port++) {
2664 if (IS_GEN_II(hpriv)) {
2665 void __iomem *port_mmio = mv_port_base(mmio, port);
2667 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2668 ifctl |= (1 << 7); /* enable gen2i speed */
2669 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2670 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2673 hpriv->ops->phy_errata(hpriv, mmio, port);
2676 for (port = 0; port < host->n_ports; port++) {
2677 struct ata_port *ap = host->ports[port];
2678 void __iomem *port_mmio = mv_port_base(mmio, port);
2679 unsigned int offset = port_mmio - mmio;
2681 mv_port_init(&ap->ioaddr, port_mmio);
2683 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2684 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
2687 for (hc = 0; hc < n_hc; hc++) {
2688 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2690 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2691 "(before clear)=0x%08x\n", hc,
2692 readl(hc_mmio + HC_CFG_OFS),
2693 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2695 /* Clear any currently outstanding hc interrupt conditions */
2696 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
2699 /* Clear any currently outstanding host interrupt conditions */
2700 writelfl(0, mmio + hpriv->irq_cause_ofs);
2702 /* and unmask interrupt generation for host regs */
2703 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
2705 if (IS_GEN_I(hpriv))
2706 writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS);
2707 else
2708 writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
2710 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2711 "PCI int cause/mask=0x%08x/0x%08x\n",
2712 readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
2713 readl(mmio + HC_MAIN_IRQ_MASK_OFS),
2714 readl(mmio + hpriv->irq_cause_ofs),
2715 readl(mmio + hpriv->irq_mask_ofs));
2717 done:
2718 return rc;
2722 * mv_print_info - Dump key info to kernel log for perusal.
2723 * @host: ATA host to print info about
2725 * FIXME: complete this.
2727 * LOCKING:
2728 * Inherited from caller.
2730 static void mv_print_info(struct ata_host *host)
2732 struct pci_dev *pdev = to_pci_dev(host->dev);
2733 struct mv_host_priv *hpriv = host->private_data;
2734 u8 scc;
2735 const char *scc_s, *gen;
2737 /* Use this to determine the HW stepping of the chip so we know
2738 * what errata to workaround
2740 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2741 if (scc == 0)
2742 scc_s = "SCSI";
2743 else if (scc == 0x01)
2744 scc_s = "RAID";
2745 else
2746 scc_s = "?";
2748 if (IS_GEN_I(hpriv))
2749 gen = "I";
2750 else if (IS_GEN_II(hpriv))
2751 gen = "II";
2752 else if (IS_GEN_IIE(hpriv))
2753 gen = "IIE";
2754 else
2755 gen = "?";
2757 dev_printk(KERN_INFO, &pdev->dev,
2758 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2759 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
2760 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2764 * mv_init_one - handle a positive probe of a Marvell host
2765 * @pdev: PCI device found
2766 * @ent: PCI device ID entry for the matched host
2768 * LOCKING:
2769 * Inherited from caller.
2771 static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2773 static int printed_version;
2774 unsigned int board_idx = (unsigned int)ent->driver_data;
2775 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
2776 struct ata_host *host;
2777 struct mv_host_priv *hpriv;
2778 int n_ports, rc;
2780 if (!printed_version++)
2781 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2783 /* allocate host */
2784 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
2786 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2787 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2788 if (!host || !hpriv)
2789 return -ENOMEM;
2790 host->private_data = hpriv;
2792 /* acquire resources */
2793 rc = pcim_enable_device(pdev);
2794 if (rc)
2795 return rc;
2797 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
2798 if (rc == -EBUSY)
2799 pcim_pin_device(pdev);
2800 if (rc)
2801 return rc;
2802 host->iomap = pcim_iomap_table(pdev);
2804 rc = pci_go_64(pdev);
2805 if (rc)
2806 return rc;
2808 /* initialize adapter */
2809 rc = mv_init_host(host, board_idx);
2810 if (rc)
2811 return rc;
2813 /* Enable interrupts */
2814 if (msi && pci_enable_msi(pdev))
2815 pci_intx(pdev, 1);
2817 mv_dump_pci_cfg(pdev, 0x68);
2818 mv_print_info(host);
2820 pci_set_master(pdev);
2821 pci_try_set_mwi(pdev);
2822 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
2823 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
2826 static int __init mv_init(void)
2828 return pci_register_driver(&mv_pci_driver);
2831 static void __exit mv_exit(void)
2833 pci_unregister_driver(&mv_pci_driver);
2836 MODULE_AUTHOR("Brett Russ");
2837 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2838 MODULE_LICENSE("GPL");
2839 MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2840 MODULE_VERSION(DRV_VERSION);
2842 module_param(msi, int, 0444);
2843 MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
2845 module_init(mv_init);
2846 module_exit(mv_exit);