sata_mv cosmetics
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / ata / sata_mv.c
blob89a798221a6cea9c0731cd1bf20e0d3cec44c1bc
1 /*
2 * sata_mv.c - Marvell SATA support
4 * Copyright 2008: Marvell Corporation, all rights reserved.
5 * Copyright 2005: EMC Corporation, all rights reserved.
6 * Copyright 2005 Red Hat, Inc. All rights reserved.
8 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; version 2 of the License.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 sata_mv TODO list:
28 1) Needs a full errata audit for all chipsets. I implemented most
29 of the errata workarounds found in the Marvell vendor driver, but
30 I distinctly remember a couple workarounds (one related to PCI-X)
31 are still needed.
33 2) Improve/fix IRQ and error handling sequences.
35 3) ATAPI support (Marvell claims the 60xx/70xx chips can do it).
37 4) Think about TCQ support here, and for libata in general
38 with controllers that suppport it via host-queuing hardware
39 (a software-only implementation could be a nightmare).
41 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
43 6) Add port multiplier support (intermediate)
45 8) Develop a low-power-consumption strategy, and implement it.
47 9) [Experiment, low priority] See if ATAPI can be supported using
48 "unknown FIS" or "vendor-specific FIS" support, or something creative
49 like that.
51 10) [Experiment, low priority] Investigate interrupt coalescing.
52 Quite often, especially with PCI Message Signalled Interrupts (MSI),
53 the overhead reduced by interrupt mitigation is quite often not
54 worth the latency cost.
56 11) [Experiment, Marvell value added] Is it possible to use target
57 mode to cross-connect two Linux boxes with Marvell cards? If so,
58 creating LibATA target mode support would be very interesting.
60 Target mode, for those without docs, is the ability to directly
61 connect two SATA controllers.
65 #include <linux/kernel.h>
66 #include <linux/module.h>
67 #include <linux/pci.h>
68 #include <linux/init.h>
69 #include <linux/blkdev.h>
70 #include <linux/delay.h>
71 #include <linux/interrupt.h>
72 #include <linux/dmapool.h>
73 #include <linux/dma-mapping.h>
74 #include <linux/device.h>
75 #include <linux/platform_device.h>
76 #include <linux/ata_platform.h>
77 #include <scsi/scsi_host.h>
78 #include <scsi/scsi_cmnd.h>
79 #include <scsi/scsi_device.h>
80 #include <linux/libata.h>
82 #define DRV_NAME "sata_mv"
83 #define DRV_VERSION "1.20"
85 enum {
86 /* BAR's are enumerated in terms of pci_resource_start() terms */
87 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
88 MV_IO_BAR = 2, /* offset 0x18: IO space */
89 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
91 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
92 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
94 MV_PCI_REG_BASE = 0,
95 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
96 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
97 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
98 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
99 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
100 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
102 MV_SATAHC0_REG_BASE = 0x20000,
103 MV_FLASH_CTL = 0x1046c,
104 MV_GPIO_PORT_CTL = 0x104f0,
105 MV_RESET_CFG = 0x180d8,
107 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
108 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
109 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
110 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
112 MV_MAX_Q_DEPTH = 32,
113 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
115 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
116 * CRPB needs alignment on a 256B boundary. Size == 256B
117 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
119 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
120 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
121 MV_MAX_SG_CT = 256,
122 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
124 MV_PORTS_PER_HC = 4,
125 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
126 MV_PORT_HC_SHIFT = 2,
127 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
128 MV_PORT_MASK = 3,
130 /* Host Flags */
131 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
132 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
133 /* SoC integrated controllers, no PCI interface */
134 MV_FLAG_SOC = (1 << 28),
136 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
137 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
138 ATA_FLAG_PIO_POLLING,
139 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
141 CRQB_FLAG_READ = (1 << 0),
142 CRQB_TAG_SHIFT = 1,
143 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
144 CRQB_PMP_SHIFT = 12, /* CRQB Gen-II/IIE PMP shift */
145 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
146 CRQB_CMD_ADDR_SHIFT = 8,
147 CRQB_CMD_CS = (0x2 << 11),
148 CRQB_CMD_LAST = (1 << 15),
150 CRPB_FLAG_STATUS_SHIFT = 8,
151 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
152 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
154 EPRD_FLAG_END_OF_TBL = (1 << 31),
156 /* PCI interface registers */
158 PCI_COMMAND_OFS = 0xc00,
160 PCI_MAIN_CMD_STS_OFS = 0xd30,
161 STOP_PCI_MASTER = (1 << 2),
162 PCI_MASTER_EMPTY = (1 << 3),
163 GLOB_SFT_RST = (1 << 4),
165 MV_PCI_MODE = 0xd00,
166 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
167 MV_PCI_DISC_TIMER = 0xd04,
168 MV_PCI_MSI_TRIGGER = 0xc38,
169 MV_PCI_SERR_MASK = 0xc28,
170 MV_PCI_XBAR_TMOUT = 0x1d04,
171 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
172 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
173 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
174 MV_PCI_ERR_COMMAND = 0x1d50,
176 PCI_IRQ_CAUSE_OFS = 0x1d58,
177 PCI_IRQ_MASK_OFS = 0x1d5c,
178 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
180 PCIE_IRQ_CAUSE_OFS = 0x1900,
181 PCIE_IRQ_MASK_OFS = 0x1910,
182 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
184 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
185 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
186 HC_SOC_MAIN_IRQ_CAUSE_OFS = 0x20020,
187 HC_SOC_MAIN_IRQ_MASK_OFS = 0x20024,
188 PORT0_ERR = (1 << 0), /* shift by port # */
189 PORT0_DONE = (1 << 1), /* shift by port # */
190 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
191 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
192 PCI_ERR = (1 << 18),
193 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
194 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
195 PORTS_0_3_COAL_DONE = (1 << 8),
196 PORTS_4_7_COAL_DONE = (1 << 17),
197 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
198 GPIO_INT = (1 << 22),
199 SELF_INT = (1 << 23),
200 TWSI_INT = (1 << 24),
201 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
202 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
203 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
204 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
205 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
206 HC_MAIN_RSVD),
207 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
208 HC_MAIN_RSVD_5),
209 HC_MAIN_MASKED_IRQS_SOC = (PORTS_0_3_COAL_DONE | HC_MAIN_RSVD_SOC),
211 /* SATAHC registers */
212 HC_CFG_OFS = 0,
214 HC_IRQ_CAUSE_OFS = 0x14,
215 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
216 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
217 DEV_IRQ = (1 << 8), /* shift by port # */
219 /* Shadow block registers */
220 SHD_BLK_OFS = 0x100,
221 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
223 /* SATA registers */
224 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
225 SATA_ACTIVE_OFS = 0x350,
226 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
228 LTMODE_OFS = 0x30c,
229 LTMODE_BIT8 = (1 << 8), /* unknown, but necessary */
231 PHY_MODE3 = 0x310,
232 PHY_MODE4 = 0x314,
233 PHY_MODE2 = 0x330,
234 SATA_IFCTL_OFS = 0x344,
235 SATA_IFSTAT_OFS = 0x34c,
236 VENDOR_UNIQUE_FIS_OFS = 0x35c,
238 FIS_CFG_OFS = 0x360,
239 FIS_CFG_SINGLE_SYNC = (1 << 16), /* SYNC on DMA activation */
241 MV5_PHY_MODE = 0x74,
242 MV5_LT_MODE = 0x30,
243 MV5_PHY_CTL = 0x0C,
244 SATA_INTERFACE_CFG = 0x050,
246 MV_M2_PREAMP_MASK = 0x7e0,
248 /* Port registers */
249 EDMA_CFG_OFS = 0,
250 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
251 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
252 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
253 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
254 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
255 EDMA_CFG_EDMA_FBS = (1 << 16), /* EDMA FIS-Based Switching */
256 EDMA_CFG_FBS = (1 << 26), /* FIS-Based Switching */
258 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
259 EDMA_ERR_IRQ_MASK_OFS = 0xc,
260 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
261 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
262 EDMA_ERR_DEV = (1 << 2), /* device error */
263 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
264 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
265 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
266 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
267 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
268 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
269 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
270 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
271 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
272 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
273 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
275 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
276 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
277 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
278 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
279 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
281 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
283 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
284 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
285 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
286 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
287 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
288 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
290 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
292 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
293 EDMA_ERR_OVERRUN_5 = (1 << 5),
294 EDMA_ERR_UNDERRUN_5 = (1 << 6),
296 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
297 EDMA_ERR_LNK_CTRL_RX_1 |
298 EDMA_ERR_LNK_CTRL_RX_3 |
299 EDMA_ERR_LNK_CTRL_TX,
301 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
302 EDMA_ERR_PRD_PAR |
303 EDMA_ERR_DEV_DCON |
304 EDMA_ERR_DEV_CON |
305 EDMA_ERR_SERR |
306 EDMA_ERR_SELF_DIS |
307 EDMA_ERR_CRQB_PAR |
308 EDMA_ERR_CRPB_PAR |
309 EDMA_ERR_INTRL_PAR |
310 EDMA_ERR_IORDY |
311 EDMA_ERR_LNK_CTRL_RX_2 |
312 EDMA_ERR_LNK_DATA_RX |
313 EDMA_ERR_LNK_DATA_TX |
314 EDMA_ERR_TRANS_PROTO,
316 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
317 EDMA_ERR_PRD_PAR |
318 EDMA_ERR_DEV_DCON |
319 EDMA_ERR_DEV_CON |
320 EDMA_ERR_OVERRUN_5 |
321 EDMA_ERR_UNDERRUN_5 |
322 EDMA_ERR_SELF_DIS_5 |
323 EDMA_ERR_CRQB_PAR |
324 EDMA_ERR_CRPB_PAR |
325 EDMA_ERR_INTRL_PAR |
326 EDMA_ERR_IORDY,
328 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
329 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
331 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
332 EDMA_REQ_Q_PTR_SHIFT = 5,
334 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
335 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
336 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
337 EDMA_RSP_Q_PTR_SHIFT = 3,
339 EDMA_CMD_OFS = 0x28, /* EDMA command register */
340 EDMA_EN = (1 << 0), /* enable EDMA */
341 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
342 ATA_RST = (1 << 2), /* reset trans/link/phy */
344 EDMA_IORDY_TMOUT = 0x34,
345 EDMA_ARB_CFG = 0x38,
347 /* Host private flags (hp_flags) */
348 MV_HP_FLAG_MSI = (1 << 0),
349 MV_HP_ERRATA_50XXB0 = (1 << 1),
350 MV_HP_ERRATA_50XXB2 = (1 << 2),
351 MV_HP_ERRATA_60X1B2 = (1 << 3),
352 MV_HP_ERRATA_60X1C0 = (1 << 4),
353 MV_HP_ERRATA_XX42A0 = (1 << 5),
354 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
355 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
356 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
357 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
359 /* Port private flags (pp_flags) */
360 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
361 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
364 #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
365 #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
366 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
367 #define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC))
369 enum {
370 /* DMA boundary 0xffff is required by the s/g splitting
371 * we need on /length/ in mv_fill-sg().
373 MV_DMA_BOUNDARY = 0xffffU,
375 /* mask of register bits containing lower 32 bits
376 * of EDMA request queue DMA address
378 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
380 /* ditto, for response queue */
381 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
384 enum chip_type {
385 chip_504x,
386 chip_508x,
387 chip_5080,
388 chip_604x,
389 chip_608x,
390 chip_6042,
391 chip_7042,
392 chip_soc,
395 /* Command ReQuest Block: 32B */
396 struct mv_crqb {
397 __le32 sg_addr;
398 __le32 sg_addr_hi;
399 __le16 ctrl_flags;
400 __le16 ata_cmd[11];
403 struct mv_crqb_iie {
404 __le32 addr;
405 __le32 addr_hi;
406 __le32 flags;
407 __le32 len;
408 __le32 ata_cmd[4];
411 /* Command ResPonse Block: 8B */
412 struct mv_crpb {
413 __le16 id;
414 __le16 flags;
415 __le32 tmstmp;
418 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
419 struct mv_sg {
420 __le32 addr;
421 __le32 flags_size;
422 __le32 addr_hi;
423 __le32 reserved;
426 struct mv_port_priv {
427 struct mv_crqb *crqb;
428 dma_addr_t crqb_dma;
429 struct mv_crpb *crpb;
430 dma_addr_t crpb_dma;
431 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
432 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
434 unsigned int req_idx;
435 unsigned int resp_idx;
437 u32 pp_flags;
440 struct mv_port_signal {
441 u32 amps;
442 u32 pre;
445 struct mv_host_priv {
446 u32 hp_flags;
447 struct mv_port_signal signal[8];
448 const struct mv_hw_ops *ops;
449 int n_ports;
450 void __iomem *base;
451 void __iomem *main_cause_reg_addr;
452 void __iomem *main_mask_reg_addr;
453 u32 irq_cause_ofs;
454 u32 irq_mask_ofs;
455 u32 unmask_all_irqs;
457 * These consistent DMA memory pools give us guaranteed
458 * alignment for hardware-accessed data structures,
459 * and less memory waste in accomplishing the alignment.
461 struct dma_pool *crqb_pool;
462 struct dma_pool *crpb_pool;
463 struct dma_pool *sg_tbl_pool;
466 struct mv_hw_ops {
467 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
468 unsigned int port);
469 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
470 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
471 void __iomem *mmio);
472 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
473 unsigned int n_hc);
474 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
475 void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
478 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
479 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
480 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
481 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
482 static int mv_port_start(struct ata_port *ap);
483 static void mv_port_stop(struct ata_port *ap);
484 static void mv_qc_prep(struct ata_queued_cmd *qc);
485 static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
486 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
487 static int mv_hardreset(struct ata_link *link, unsigned int *class,
488 unsigned long deadline);
489 static void mv_eh_freeze(struct ata_port *ap);
490 static void mv_eh_thaw(struct ata_port *ap);
491 static void mv6_dev_config(struct ata_device *dev);
493 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
494 unsigned int port);
495 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
496 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
497 void __iomem *mmio);
498 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
499 unsigned int n_hc);
500 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
501 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
503 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
504 unsigned int port);
505 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
506 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
507 void __iomem *mmio);
508 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
509 unsigned int n_hc);
510 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
511 static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
512 void __iomem *mmio);
513 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
514 void __iomem *mmio);
515 static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
516 void __iomem *mmio, unsigned int n_hc);
517 static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
518 void __iomem *mmio);
519 static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
520 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
521 static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
522 unsigned int port_no);
523 static int mv_stop_edma(struct ata_port *ap);
524 static int mv_stop_edma_engine(void __iomem *port_mmio);
525 static void mv_edma_cfg(struct ata_port *ap, int want_ncq);
527 /* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
528 * because we have to allow room for worst case splitting of
529 * PRDs for 64K boundaries in mv_fill_sg().
531 static struct scsi_host_template mv5_sht = {
532 ATA_BASE_SHT(DRV_NAME),
533 .sg_tablesize = MV_MAX_SG_CT / 2,
534 .dma_boundary = MV_DMA_BOUNDARY,
537 static struct scsi_host_template mv6_sht = {
538 ATA_NCQ_SHT(DRV_NAME),
539 .can_queue = MV_MAX_Q_DEPTH - 1,
540 .sg_tablesize = MV_MAX_SG_CT / 2,
541 .dma_boundary = MV_DMA_BOUNDARY,
544 static struct ata_port_operations mv5_ops = {
545 .inherits = &ata_sff_port_ops,
547 .qc_prep = mv_qc_prep,
548 .qc_issue = mv_qc_issue,
550 .freeze = mv_eh_freeze,
551 .thaw = mv_eh_thaw,
552 .hardreset = mv_hardreset,
553 .error_handler = ata_std_error_handler, /* avoid SFF EH */
554 .post_internal_cmd = ATA_OP_NULL,
556 .scr_read = mv5_scr_read,
557 .scr_write = mv5_scr_write,
559 .port_start = mv_port_start,
560 .port_stop = mv_port_stop,
563 static struct ata_port_operations mv6_ops = {
564 .inherits = &mv5_ops,
565 .qc_defer = ata_std_qc_defer,
566 .dev_config = mv6_dev_config,
567 .scr_read = mv_scr_read,
568 .scr_write = mv_scr_write,
571 static struct ata_port_operations mv_iie_ops = {
572 .inherits = &mv6_ops,
573 .dev_config = ATA_OP_NULL,
574 .qc_prep = mv_qc_prep_iie,
577 static const struct ata_port_info mv_port_info[] = {
578 { /* chip_504x */
579 .flags = MV_COMMON_FLAGS,
580 .pio_mask = 0x1f, /* pio0-4 */
581 .udma_mask = ATA_UDMA6,
582 .port_ops = &mv5_ops,
584 { /* chip_508x */
585 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
586 .pio_mask = 0x1f, /* pio0-4 */
587 .udma_mask = ATA_UDMA6,
588 .port_ops = &mv5_ops,
590 { /* chip_5080 */
591 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
592 .pio_mask = 0x1f, /* pio0-4 */
593 .udma_mask = ATA_UDMA6,
594 .port_ops = &mv5_ops,
596 { /* chip_604x */
597 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
598 ATA_FLAG_NCQ,
599 .pio_mask = 0x1f, /* pio0-4 */
600 .udma_mask = ATA_UDMA6,
601 .port_ops = &mv6_ops,
603 { /* chip_608x */
604 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
605 ATA_FLAG_NCQ | MV_FLAG_DUAL_HC,
606 .pio_mask = 0x1f, /* pio0-4 */
607 .udma_mask = ATA_UDMA6,
608 .port_ops = &mv6_ops,
610 { /* chip_6042 */
611 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
612 ATA_FLAG_NCQ,
613 .pio_mask = 0x1f, /* pio0-4 */
614 .udma_mask = ATA_UDMA6,
615 .port_ops = &mv_iie_ops,
617 { /* chip_7042 */
618 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
619 ATA_FLAG_NCQ,
620 .pio_mask = 0x1f, /* pio0-4 */
621 .udma_mask = ATA_UDMA6,
622 .port_ops = &mv_iie_ops,
624 { /* chip_soc */
625 .flags = MV_COMMON_FLAGS | MV_FLAG_SOC,
626 .pio_mask = 0x1f, /* pio0-4 */
627 .udma_mask = ATA_UDMA6,
628 .port_ops = &mv_iie_ops,
632 static const struct pci_device_id mv_pci_tbl[] = {
633 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
634 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
635 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
636 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
637 /* RocketRAID 1740/174x have different identifiers */
638 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
639 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
641 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
642 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
643 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
644 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
645 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
647 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
649 /* Adaptec 1430SA */
650 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
652 /* Marvell 7042 support */
653 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
655 /* Highpoint RocketRAID PCIe series */
656 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
657 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
659 { } /* terminate list */
662 static const struct mv_hw_ops mv5xxx_ops = {
663 .phy_errata = mv5_phy_errata,
664 .enable_leds = mv5_enable_leds,
665 .read_preamp = mv5_read_preamp,
666 .reset_hc = mv5_reset_hc,
667 .reset_flash = mv5_reset_flash,
668 .reset_bus = mv5_reset_bus,
671 static const struct mv_hw_ops mv6xxx_ops = {
672 .phy_errata = mv6_phy_errata,
673 .enable_leds = mv6_enable_leds,
674 .read_preamp = mv6_read_preamp,
675 .reset_hc = mv6_reset_hc,
676 .reset_flash = mv6_reset_flash,
677 .reset_bus = mv_reset_pci_bus,
680 static const struct mv_hw_ops mv_soc_ops = {
681 .phy_errata = mv6_phy_errata,
682 .enable_leds = mv_soc_enable_leds,
683 .read_preamp = mv_soc_read_preamp,
684 .reset_hc = mv_soc_reset_hc,
685 .reset_flash = mv_soc_reset_flash,
686 .reset_bus = mv_soc_reset_bus,
690 * Functions
693 static inline void writelfl(unsigned long data, void __iomem *addr)
695 writel(data, addr);
696 (void) readl(addr); /* flush to avoid PCI posted write */
699 static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
701 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
704 static inline unsigned int mv_hc_from_port(unsigned int port)
706 return port >> MV_PORT_HC_SHIFT;
709 static inline unsigned int mv_hardport_from_port(unsigned int port)
711 return port & MV_PORT_MASK;
714 static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
715 unsigned int port)
717 return mv_hc_base(base, mv_hc_from_port(port));
720 static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
722 return mv_hc_base_from_port(base, port) +
723 MV_SATAHC_ARBTR_REG_SZ +
724 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
727 static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
729 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
730 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
732 return hc_mmio + ofs;
735 static inline void __iomem *mv_host_base(struct ata_host *host)
737 struct mv_host_priv *hpriv = host->private_data;
738 return hpriv->base;
741 static inline void __iomem *mv_ap_base(struct ata_port *ap)
743 return mv_port_base(mv_host_base(ap->host), ap->port_no);
746 static inline int mv_get_hc_count(unsigned long port_flags)
748 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
751 static void mv_set_edma_ptrs(void __iomem *port_mmio,
752 struct mv_host_priv *hpriv,
753 struct mv_port_priv *pp)
755 u32 index;
758 * initialize request queue
760 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
762 WARN_ON(pp->crqb_dma & 0x3ff);
763 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
764 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
765 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
767 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
768 writelfl((pp->crqb_dma & 0xffffffff) | index,
769 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
770 else
771 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
774 * initialize response queue
776 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
778 WARN_ON(pp->crpb_dma & 0xff);
779 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
781 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
782 writelfl((pp->crpb_dma & 0xffffffff) | index,
783 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
784 else
785 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
787 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
788 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
792 * mv_start_dma - Enable eDMA engine
793 * @base: port base address
794 * @pp: port private data
796 * Verify the local cache of the eDMA state is accurate with a
797 * WARN_ON.
799 * LOCKING:
800 * Inherited from caller.
802 static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
803 struct mv_port_priv *pp, u8 protocol)
805 int want_ncq = (protocol == ATA_PROT_NCQ);
807 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
808 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
809 if (want_ncq != using_ncq)
810 mv_stop_edma(ap);
812 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
813 struct mv_host_priv *hpriv = ap->host->private_data;
814 int hard_port = mv_hardport_from_port(ap->port_no);
815 void __iomem *hc_mmio = mv_hc_base_from_port(
816 mv_host_base(ap->host), hard_port);
817 u32 hc_irq_cause, ipending;
819 /* clear EDMA event indicators, if any */
820 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
822 /* clear EDMA interrupt indicator, if any */
823 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
824 ipending = (DEV_IRQ << hard_port) |
825 (CRPB_DMA_DONE << hard_port);
826 if (hc_irq_cause & ipending) {
827 writelfl(hc_irq_cause & ~ipending,
828 hc_mmio + HC_IRQ_CAUSE_OFS);
831 mv_edma_cfg(ap, want_ncq);
833 /* clear FIS IRQ Cause */
834 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
836 mv_set_edma_ptrs(port_mmio, hpriv, pp);
838 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
839 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
841 WARN_ON(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
845 * mv_stop_edma_engine - Disable eDMA engine
846 * @port_mmio: io base address
848 * LOCKING:
849 * Inherited from caller.
851 static int mv_stop_edma_engine(void __iomem *port_mmio)
853 int i;
855 /* Disable eDMA. The disable bit auto clears. */
856 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
858 /* Wait for the chip to confirm eDMA is off. */
859 for (i = 10000; i > 0; i--) {
860 u32 reg = readl(port_mmio + EDMA_CMD_OFS);
861 if (!(reg & EDMA_EN))
862 return 0;
863 udelay(10);
865 return -EIO;
868 static int mv_stop_edma(struct ata_port *ap)
870 void __iomem *port_mmio = mv_ap_base(ap);
871 struct mv_port_priv *pp = ap->private_data;
873 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
874 return 0;
875 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
876 if (mv_stop_edma_engine(port_mmio)) {
877 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
878 return -EIO;
880 return 0;
883 #ifdef ATA_DEBUG
884 static void mv_dump_mem(void __iomem *start, unsigned bytes)
886 int b, w;
887 for (b = 0; b < bytes; ) {
888 DPRINTK("%p: ", start + b);
889 for (w = 0; b < bytes && w < 4; w++) {
890 printk("%08x ", readl(start + b));
891 b += sizeof(u32);
893 printk("\n");
896 #endif
898 static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
900 #ifdef ATA_DEBUG
901 int b, w;
902 u32 dw;
903 for (b = 0; b < bytes; ) {
904 DPRINTK("%02x: ", b);
905 for (w = 0; b < bytes && w < 4; w++) {
906 (void) pci_read_config_dword(pdev, b, &dw);
907 printk("%08x ", dw);
908 b += sizeof(u32);
910 printk("\n");
912 #endif
914 static void mv_dump_all_regs(void __iomem *mmio_base, int port,
915 struct pci_dev *pdev)
917 #ifdef ATA_DEBUG
918 void __iomem *hc_base = mv_hc_base(mmio_base,
919 port >> MV_PORT_HC_SHIFT);
920 void __iomem *port_base;
921 int start_port, num_ports, p, start_hc, num_hcs, hc;
923 if (0 > port) {
924 start_hc = start_port = 0;
925 num_ports = 8; /* shld be benign for 4 port devs */
926 num_hcs = 2;
927 } else {
928 start_hc = port >> MV_PORT_HC_SHIFT;
929 start_port = port;
930 num_ports = num_hcs = 1;
932 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
933 num_ports > 1 ? num_ports - 1 : start_port);
935 if (NULL != pdev) {
936 DPRINTK("PCI config space regs:\n");
937 mv_dump_pci_cfg(pdev, 0x68);
939 DPRINTK("PCI regs:\n");
940 mv_dump_mem(mmio_base+0xc00, 0x3c);
941 mv_dump_mem(mmio_base+0xd00, 0x34);
942 mv_dump_mem(mmio_base+0xf00, 0x4);
943 mv_dump_mem(mmio_base+0x1d00, 0x6c);
944 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
945 hc_base = mv_hc_base(mmio_base, hc);
946 DPRINTK("HC regs (HC %i):\n", hc);
947 mv_dump_mem(hc_base, 0x1c);
949 for (p = start_port; p < start_port + num_ports; p++) {
950 port_base = mv_port_base(mmio_base, p);
951 DPRINTK("EDMA regs (port %i):\n", p);
952 mv_dump_mem(port_base, 0x54);
953 DPRINTK("SATA regs (port %i):\n", p);
954 mv_dump_mem(port_base+0x300, 0x60);
956 #endif
959 static unsigned int mv_scr_offset(unsigned int sc_reg_in)
961 unsigned int ofs;
963 switch (sc_reg_in) {
964 case SCR_STATUS:
965 case SCR_CONTROL:
966 case SCR_ERROR:
967 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
968 break;
969 case SCR_ACTIVE:
970 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
971 break;
972 default:
973 ofs = 0xffffffffU;
974 break;
976 return ofs;
979 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
981 unsigned int ofs = mv_scr_offset(sc_reg_in);
983 if (ofs != 0xffffffffU) {
984 *val = readl(mv_ap_base(ap) + ofs);
985 return 0;
986 } else
987 return -EINVAL;
990 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
992 unsigned int ofs = mv_scr_offset(sc_reg_in);
994 if (ofs != 0xffffffffU) {
995 writelfl(val, mv_ap_base(ap) + ofs);
996 return 0;
997 } else
998 return -EINVAL;
1001 static void mv6_dev_config(struct ata_device *adev)
1004 * We don't have hob_nsect when doing NCQ commands on Gen-II.
1005 * See mv_qc_prep() for more info.
1007 if (adev->flags & ATA_DFLAG_NCQ)
1008 if (adev->max_sectors > ATA_MAX_SECTORS)
1009 adev->max_sectors = ATA_MAX_SECTORS;
1012 static void mv_edma_cfg(struct ata_port *ap, int want_ncq)
1014 u32 cfg;
1015 struct mv_port_priv *pp = ap->private_data;
1016 struct mv_host_priv *hpriv = ap->host->private_data;
1017 void __iomem *port_mmio = mv_ap_base(ap);
1019 /* set up non-NCQ EDMA configuration */
1020 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
1022 if (IS_GEN_I(hpriv))
1023 cfg |= (1 << 8); /* enab config burst size mask */
1025 else if (IS_GEN_II(hpriv))
1026 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1028 else if (IS_GEN_IIE(hpriv)) {
1029 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1030 cfg |= (1 << 22); /* enab 4-entry host queue cache */
1031 cfg |= (1 << 18); /* enab early completion */
1032 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
1035 if (want_ncq) {
1036 cfg |= EDMA_CFG_NCQ;
1037 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1038 } else
1039 pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;
1041 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1044 static void mv_port_free_dma_mem(struct ata_port *ap)
1046 struct mv_host_priv *hpriv = ap->host->private_data;
1047 struct mv_port_priv *pp = ap->private_data;
1048 int tag;
1050 if (pp->crqb) {
1051 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1052 pp->crqb = NULL;
1054 if (pp->crpb) {
1055 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1056 pp->crpb = NULL;
1059 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1060 * For later hardware, we have one unique sg_tbl per NCQ tag.
1062 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1063 if (pp->sg_tbl[tag]) {
1064 if (tag == 0 || !IS_GEN_I(hpriv))
1065 dma_pool_free(hpriv->sg_tbl_pool,
1066 pp->sg_tbl[tag],
1067 pp->sg_tbl_dma[tag]);
1068 pp->sg_tbl[tag] = NULL;
1074 * mv_port_start - Port specific init/start routine.
1075 * @ap: ATA channel to manipulate
1077 * Allocate and point to DMA memory, init port private memory,
1078 * zero indices.
1080 * LOCKING:
1081 * Inherited from caller.
1083 static int mv_port_start(struct ata_port *ap)
1085 struct device *dev = ap->host->dev;
1086 struct mv_host_priv *hpriv = ap->host->private_data;
1087 struct mv_port_priv *pp;
1088 void __iomem *port_mmio = mv_ap_base(ap);
1089 unsigned long flags;
1090 int tag;
1092 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1093 if (!pp)
1094 return -ENOMEM;
1095 ap->private_data = pp;
1097 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1098 if (!pp->crqb)
1099 return -ENOMEM;
1100 memset(pp->crqb, 0, MV_CRQB_Q_SZ);
1102 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1103 if (!pp->crpb)
1104 goto out_port_free_dma_mem;
1105 memset(pp->crpb, 0, MV_CRPB_Q_SZ);
1108 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1109 * For later hardware, we need one unique sg_tbl per NCQ tag.
1111 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1112 if (tag == 0 || !IS_GEN_I(hpriv)) {
1113 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1114 GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1115 if (!pp->sg_tbl[tag])
1116 goto out_port_free_dma_mem;
1117 } else {
1118 pp->sg_tbl[tag] = pp->sg_tbl[0];
1119 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1123 spin_lock_irqsave(&ap->host->lock, flags);
1125 mv_edma_cfg(ap, 0);
1126 mv_set_edma_ptrs(port_mmio, hpriv, pp);
1128 spin_unlock_irqrestore(&ap->host->lock, flags);
1130 /* Don't turn on EDMA here...do it before DMA commands only. Else
1131 * we'll be unable to send non-data, PIO, etc due to restricted access
1132 * to shadow regs.
1134 return 0;
1136 out_port_free_dma_mem:
1137 mv_port_free_dma_mem(ap);
1138 return -ENOMEM;
1142 * mv_port_stop - Port specific cleanup/stop routine.
1143 * @ap: ATA channel to manipulate
1145 * Stop DMA, cleanup port memory.
1147 * LOCKING:
1148 * This routine uses the host lock to protect the DMA stop.
1150 static void mv_port_stop(struct ata_port *ap)
1152 mv_stop_edma(ap);
1153 mv_port_free_dma_mem(ap);
1157 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1158 * @qc: queued command whose SG list to source from
1160 * Populate the SG list and mark the last entry.
1162 * LOCKING:
1163 * Inherited from caller.
1165 static void mv_fill_sg(struct ata_queued_cmd *qc)
1167 struct mv_port_priv *pp = qc->ap->private_data;
1168 struct scatterlist *sg;
1169 struct mv_sg *mv_sg, *last_sg = NULL;
1170 unsigned int si;
1172 mv_sg = pp->sg_tbl[qc->tag];
1173 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1174 dma_addr_t addr = sg_dma_address(sg);
1175 u32 sg_len = sg_dma_len(sg);
1177 while (sg_len) {
1178 u32 offset = addr & 0xffff;
1179 u32 len = sg_len;
1181 if ((offset + sg_len > 0x10000))
1182 len = 0x10000 - offset;
1184 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1185 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1186 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
1188 sg_len -= len;
1189 addr += len;
1191 last_sg = mv_sg;
1192 mv_sg++;
1196 if (likely(last_sg))
1197 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1200 static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1202 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1203 (last ? CRQB_CMD_LAST : 0);
1204 *cmdw = cpu_to_le16(tmp);
1208 * mv_qc_prep - Host specific command preparation.
1209 * @qc: queued command to prepare
1211 * This routine simply redirects to the general purpose routine
1212 * if command is not DMA. Else, it handles prep of the CRQB
1213 * (command request block), does some sanity checking, and calls
1214 * the SG load routine.
1216 * LOCKING:
1217 * Inherited from caller.
1219 static void mv_qc_prep(struct ata_queued_cmd *qc)
1221 struct ata_port *ap = qc->ap;
1222 struct mv_port_priv *pp = ap->private_data;
1223 __le16 *cw;
1224 struct ata_taskfile *tf;
1225 u16 flags = 0;
1226 unsigned in_index;
1228 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1229 (qc->tf.protocol != ATA_PROT_NCQ))
1230 return;
1232 /* Fill in command request block
1234 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1235 flags |= CRQB_FLAG_READ;
1236 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1237 flags |= qc->tag << CRQB_TAG_SHIFT;
1239 /* get current queue index from software */
1240 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1242 pp->crqb[in_index].sg_addr =
1243 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1244 pp->crqb[in_index].sg_addr_hi =
1245 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
1246 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1248 cw = &pp->crqb[in_index].ata_cmd[0];
1249 tf = &qc->tf;
1251 /* Sadly, the CRQB cannot accomodate all registers--there are
1252 * only 11 bytes...so we must pick and choose required
1253 * registers based on the command. So, we drop feature and
1254 * hob_feature for [RW] DMA commands, but they are needed for
1255 * NCQ. NCQ will drop hob_nsect.
1257 switch (tf->command) {
1258 case ATA_CMD_READ:
1259 case ATA_CMD_READ_EXT:
1260 case ATA_CMD_WRITE:
1261 case ATA_CMD_WRITE_EXT:
1262 case ATA_CMD_WRITE_FUA_EXT:
1263 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1264 break;
1265 case ATA_CMD_FPDMA_READ:
1266 case ATA_CMD_FPDMA_WRITE:
1267 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
1268 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1269 break;
1270 default:
1271 /* The only other commands EDMA supports in non-queued and
1272 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1273 * of which are defined/used by Linux. If we get here, this
1274 * driver needs work.
1276 * FIXME: modify libata to give qc_prep a return value and
1277 * return error here.
1279 BUG_ON(tf->command);
1280 break;
1282 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1283 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1284 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1285 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1286 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1287 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1288 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1289 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1290 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1292 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1293 return;
1294 mv_fill_sg(qc);
1298 * mv_qc_prep_iie - Host specific command preparation.
1299 * @qc: queued command to prepare
1301 * This routine simply redirects to the general purpose routine
1302 * if command is not DMA. Else, it handles prep of the CRQB
1303 * (command request block), does some sanity checking, and calls
1304 * the SG load routine.
1306 * LOCKING:
1307 * Inherited from caller.
1309 static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1311 struct ata_port *ap = qc->ap;
1312 struct mv_port_priv *pp = ap->private_data;
1313 struct mv_crqb_iie *crqb;
1314 struct ata_taskfile *tf;
1315 unsigned in_index;
1316 u32 flags = 0;
1318 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1319 (qc->tf.protocol != ATA_PROT_NCQ))
1320 return;
1322 /* Fill in Gen IIE command request block */
1323 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1324 flags |= CRQB_FLAG_READ;
1326 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1327 flags |= qc->tag << CRQB_TAG_SHIFT;
1328 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
1330 /* get current queue index from software */
1331 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1333 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
1334 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1335 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
1336 crqb->flags = cpu_to_le32(flags);
1338 tf = &qc->tf;
1339 crqb->ata_cmd[0] = cpu_to_le32(
1340 (tf->command << 16) |
1341 (tf->feature << 24)
1343 crqb->ata_cmd[1] = cpu_to_le32(
1344 (tf->lbal << 0) |
1345 (tf->lbam << 8) |
1346 (tf->lbah << 16) |
1347 (tf->device << 24)
1349 crqb->ata_cmd[2] = cpu_to_le32(
1350 (tf->hob_lbal << 0) |
1351 (tf->hob_lbam << 8) |
1352 (tf->hob_lbah << 16) |
1353 (tf->hob_feature << 24)
1355 crqb->ata_cmd[3] = cpu_to_le32(
1356 (tf->nsect << 0) |
1357 (tf->hob_nsect << 8)
1360 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1361 return;
1362 mv_fill_sg(qc);
1366 * mv_qc_issue - Initiate a command to the host
1367 * @qc: queued command to start
1369 * This routine simply redirects to the general purpose routine
1370 * if command is not DMA. Else, it sanity checks our local
1371 * caches of the request producer/consumer indices then enables
1372 * DMA and bumps the request producer index.
1374 * LOCKING:
1375 * Inherited from caller.
1377 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1379 struct ata_port *ap = qc->ap;
1380 void __iomem *port_mmio = mv_ap_base(ap);
1381 struct mv_port_priv *pp = ap->private_data;
1382 u32 in_index;
1384 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1385 (qc->tf.protocol != ATA_PROT_NCQ)) {
1387 * We're about to send a non-EDMA capable command to the
1388 * port. Turn off EDMA so there won't be problems accessing
1389 * shadow block, etc registers.
1391 mv_stop_edma(ap);
1392 return ata_sff_qc_issue(qc);
1395 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
1397 pp->req_idx++;
1399 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
1401 /* and write the request in pointer to kick the EDMA to life */
1402 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1403 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1405 return 0;
1409 * mv_err_intr - Handle error interrupts on the port
1410 * @ap: ATA channel to manipulate
1411 * @reset_allowed: bool: 0 == don't trigger from reset here
1413 * In most cases, just clear the interrupt and move on. However,
1414 * some cases require an eDMA reset, which also performs a COMRESET.
1415 * The SERR case requires a clear of pending errors in the SATA
1416 * SERROR register. Finally, if the port disabled DMA,
1417 * update our cached copy to match.
1419 * LOCKING:
1420 * Inherited from caller.
1422 static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1424 void __iomem *port_mmio = mv_ap_base(ap);
1425 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1426 struct mv_port_priv *pp = ap->private_data;
1427 struct mv_host_priv *hpriv = ap->host->private_data;
1428 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1429 unsigned int action = 0, err_mask = 0;
1430 struct ata_eh_info *ehi = &ap->link.eh_info;
1432 ata_ehi_clear_desc(ehi);
1434 if (!edma_enabled) {
1435 /* just a guess: do we need to do this? should we
1436 * expand this, and do it in all cases?
1438 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1439 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1442 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1444 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1447 * all generations share these EDMA error cause bits
1450 if (edma_err_cause & EDMA_ERR_DEV)
1451 err_mask |= AC_ERR_DEV;
1452 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
1453 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
1454 EDMA_ERR_INTRL_PAR)) {
1455 err_mask |= AC_ERR_ATA_BUS;
1456 action |= ATA_EH_RESET;
1457 ata_ehi_push_desc(ehi, "parity error");
1459 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1460 ata_ehi_hotplugged(ehi);
1461 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
1462 "dev disconnect" : "dev connect");
1463 action |= ATA_EH_RESET;
1466 if (IS_GEN_I(hpriv)) {
1467 eh_freeze_mask = EDMA_EH_FREEZE_5;
1469 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1470 pp = ap->private_data;
1471 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1472 ata_ehi_push_desc(ehi, "EDMA self-disable");
1474 } else {
1475 eh_freeze_mask = EDMA_EH_FREEZE;
1477 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1478 pp = ap->private_data;
1479 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1480 ata_ehi_push_desc(ehi, "EDMA self-disable");
1483 if (edma_err_cause & EDMA_ERR_SERR) {
1484 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1485 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1486 err_mask = AC_ERR_ATA_BUS;
1487 action |= ATA_EH_RESET;
1491 /* Clear EDMA now that SERR cleanup done */
1492 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1494 if (!err_mask) {
1495 err_mask = AC_ERR_OTHER;
1496 action |= ATA_EH_RESET;
1499 ehi->serror |= serr;
1500 ehi->action |= action;
1502 if (qc)
1503 qc->err_mask |= err_mask;
1504 else
1505 ehi->err_mask |= err_mask;
1507 if (edma_err_cause & eh_freeze_mask)
1508 ata_port_freeze(ap);
1509 else
1510 ata_port_abort(ap);
1513 static void mv_intr_pio(struct ata_port *ap)
1515 struct ata_queued_cmd *qc;
1516 u8 ata_status;
1518 /* ignore spurious intr if drive still BUSY */
1519 ata_status = readb(ap->ioaddr.status_addr);
1520 if (unlikely(ata_status & ATA_BUSY))
1521 return;
1523 /* get active ATA command */
1524 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1525 if (unlikely(!qc)) /* no active tag */
1526 return;
1527 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1528 return;
1530 /* and finally, complete the ATA command */
1531 qc->err_mask |= ac_err_mask(ata_status);
1532 ata_qc_complete(qc);
1535 static void mv_intr_edma(struct ata_port *ap)
1537 void __iomem *port_mmio = mv_ap_base(ap);
1538 struct mv_host_priv *hpriv = ap->host->private_data;
1539 struct mv_port_priv *pp = ap->private_data;
1540 struct ata_queued_cmd *qc;
1541 u32 out_index, in_index;
1542 bool work_done = false;
1544 /* get h/w response queue pointer */
1545 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1546 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1548 while (1) {
1549 u16 status;
1550 unsigned int tag;
1552 /* get s/w response queue last-read pointer, and compare */
1553 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1554 if (in_index == out_index)
1555 break;
1557 /* 50xx: get active ATA command */
1558 if (IS_GEN_I(hpriv))
1559 tag = ap->link.active_tag;
1561 /* Gen II/IIE: get active ATA command via tag, to enable
1562 * support for queueing. this works transparently for
1563 * queued and non-queued modes.
1565 else
1566 tag = le16_to_cpu(pp->crpb[out_index].id) & 0x1f;
1568 qc = ata_qc_from_tag(ap, tag);
1570 /* For non-NCQ mode, the lower 8 bits of status
1571 * are from EDMA_ERR_IRQ_CAUSE_OFS,
1572 * which should be zero if all went well.
1574 status = le16_to_cpu(pp->crpb[out_index].flags);
1575 if ((status & 0xff) && !(pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
1576 mv_err_intr(ap, qc);
1577 return;
1580 /* and finally, complete the ATA command */
1581 if (qc) {
1582 qc->err_mask |=
1583 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1584 ata_qc_complete(qc);
1587 /* advance software response queue pointer, to
1588 * indicate (after the loop completes) to hardware
1589 * that we have consumed a response queue entry.
1591 work_done = true;
1592 pp->resp_idx++;
1595 if (work_done)
1596 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1597 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1598 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1602 * mv_host_intr - Handle all interrupts on the given host controller
1603 * @host: host specific structure
1604 * @relevant: port error bits relevant to this host controller
1605 * @hc: which host controller we're to look at
1607 * Read then write clear the HC interrupt status then walk each
1608 * port connected to the HC and see if it needs servicing. Port
1609 * success ints are reported in the HC interrupt status reg, the
1610 * port error ints are reported in the higher level main
1611 * interrupt status register and thus are passed in via the
1612 * 'relevant' argument.
1614 * LOCKING:
1615 * Inherited from caller.
1617 static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
1619 struct mv_host_priv *hpriv = host->private_data;
1620 void __iomem *mmio = hpriv->base;
1621 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1622 u32 hc_irq_cause;
1623 int port, port0, last_port;
1625 if (hc == 0)
1626 port0 = 0;
1627 else
1628 port0 = MV_PORTS_PER_HC;
1630 if (HAS_PCI(host))
1631 last_port = port0 + MV_PORTS_PER_HC;
1632 else
1633 last_port = port0 + hpriv->n_ports;
1634 /* we'll need the HC success int register in most cases */
1635 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
1636 if (!hc_irq_cause)
1637 return;
1639 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
1641 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1642 hc, relevant, hc_irq_cause);
1644 for (port = port0; port < last_port; port++) {
1645 struct ata_port *ap = host->ports[port];
1646 struct mv_port_priv *pp;
1647 int have_err_bits, hard_port, shift;
1649 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
1650 continue;
1652 pp = ap->private_data;
1654 shift = port << 1; /* (port * 2) */
1655 if (port >= MV_PORTS_PER_HC)
1656 shift++; /* skip bit 8 in the HC Main IRQ reg */
1658 have_err_bits = ((PORT0_ERR << shift) & relevant);
1660 if (unlikely(have_err_bits)) {
1661 struct ata_queued_cmd *qc;
1663 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1664 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1665 continue;
1667 mv_err_intr(ap, qc);
1668 continue;
1671 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1673 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1674 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1675 mv_intr_edma(ap);
1676 } else {
1677 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1678 mv_intr_pio(ap);
1681 VPRINTK("EXIT\n");
1684 static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1686 struct mv_host_priv *hpriv = host->private_data;
1687 struct ata_port *ap;
1688 struct ata_queued_cmd *qc;
1689 struct ata_eh_info *ehi;
1690 unsigned int i, err_mask, printed = 0;
1691 u32 err_cause;
1693 err_cause = readl(mmio + hpriv->irq_cause_ofs);
1695 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1696 err_cause);
1698 DPRINTK("All regs @ PCI error\n");
1699 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1701 writelfl(0, mmio + hpriv->irq_cause_ofs);
1703 for (i = 0; i < host->n_ports; i++) {
1704 ap = host->ports[i];
1705 if (!ata_link_offline(&ap->link)) {
1706 ehi = &ap->link.eh_info;
1707 ata_ehi_clear_desc(ehi);
1708 if (!printed++)
1709 ata_ehi_push_desc(ehi,
1710 "PCI err cause 0x%08x", err_cause);
1711 err_mask = AC_ERR_HOST_BUS;
1712 ehi->action = ATA_EH_RESET;
1713 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1714 if (qc)
1715 qc->err_mask |= err_mask;
1716 else
1717 ehi->err_mask |= err_mask;
1719 ata_port_freeze(ap);
1725 * mv_interrupt - Main interrupt event handler
1726 * @irq: unused
1727 * @dev_instance: private data; in this case the host structure
1729 * Read the read only register to determine if any host
1730 * controllers have pending interrupts. If so, call lower level
1731 * routine to handle. Also check for PCI errors which are only
1732 * reported here.
1734 * LOCKING:
1735 * This routine holds the host lock while processing pending
1736 * interrupts.
1738 static irqreturn_t mv_interrupt(int irq, void *dev_instance)
1740 struct ata_host *host = dev_instance;
1741 struct mv_host_priv *hpriv = host->private_data;
1742 unsigned int hc, handled = 0, n_hcs;
1743 void __iomem *mmio = hpriv->base;
1744 u32 irq_stat, irq_mask;
1746 /* Note to self: &host->lock == &ap->host->lock == ap->lock */
1747 spin_lock(&host->lock);
1749 irq_stat = readl(hpriv->main_cause_reg_addr);
1750 irq_mask = readl(hpriv->main_mask_reg_addr);
1752 /* check the cases where we either have nothing pending or have read
1753 * a bogus register value which can indicate HW removal or PCI fault
1755 if (!(irq_stat & irq_mask) || (0xffffffffU == irq_stat))
1756 goto out_unlock;
1758 n_hcs = mv_get_hc_count(host->ports[0]->flags);
1760 if (unlikely((irq_stat & PCI_ERR) && HAS_PCI(host))) {
1761 mv_pci_error(host, mmio);
1762 handled = 1;
1763 goto out_unlock; /* skip all other HC irq handling */
1766 for (hc = 0; hc < n_hcs; hc++) {
1767 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1768 if (relevant) {
1769 mv_host_intr(host, relevant, hc);
1770 handled = 1;
1774 out_unlock:
1775 spin_unlock(&host->lock);
1777 return IRQ_RETVAL(handled);
1780 static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1782 unsigned int ofs;
1784 switch (sc_reg_in) {
1785 case SCR_STATUS:
1786 case SCR_ERROR:
1787 case SCR_CONTROL:
1788 ofs = sc_reg_in * sizeof(u32);
1789 break;
1790 default:
1791 ofs = 0xffffffffU;
1792 break;
1794 return ofs;
1797 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
1799 struct mv_host_priv *hpriv = ap->host->private_data;
1800 void __iomem *mmio = hpriv->base;
1801 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1802 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1804 if (ofs != 0xffffffffU) {
1805 *val = readl(addr + ofs);
1806 return 0;
1807 } else
1808 return -EINVAL;
1811 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1813 struct mv_host_priv *hpriv = ap->host->private_data;
1814 void __iomem *mmio = hpriv->base;
1815 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1816 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1818 if (ofs != 0xffffffffU) {
1819 writelfl(val, addr + ofs);
1820 return 0;
1821 } else
1822 return -EINVAL;
1825 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
1827 struct pci_dev *pdev = to_pci_dev(host->dev);
1828 int early_5080;
1830 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
1832 if (!early_5080) {
1833 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1834 tmp |= (1 << 0);
1835 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1838 mv_reset_pci_bus(host, mmio);
1841 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1843 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1846 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
1847 void __iomem *mmio)
1849 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1850 u32 tmp;
1852 tmp = readl(phy_mmio + MV5_PHY_MODE);
1854 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1855 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
1858 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1860 u32 tmp;
1862 writel(0, mmio + MV_GPIO_PORT_CTL);
1864 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1866 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1867 tmp |= ~(1 << 0);
1868 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1871 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1872 unsigned int port)
1874 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1875 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1876 u32 tmp;
1877 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1879 if (fix_apm_sq) {
1880 tmp = readl(phy_mmio + MV5_LT_MODE);
1881 tmp |= (1 << 19);
1882 writel(tmp, phy_mmio + MV5_LT_MODE);
1884 tmp = readl(phy_mmio + MV5_PHY_CTL);
1885 tmp &= ~0x3;
1886 tmp |= 0x1;
1887 writel(tmp, phy_mmio + MV5_PHY_CTL);
1890 tmp = readl(phy_mmio + MV5_PHY_MODE);
1891 tmp &= ~mask;
1892 tmp |= hpriv->signal[port].pre;
1893 tmp |= hpriv->signal[port].amps;
1894 writel(tmp, phy_mmio + MV5_PHY_MODE);
1898 #undef ZERO
1899 #define ZERO(reg) writel(0, port_mmio + (reg))
1900 static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1901 unsigned int port)
1903 void __iomem *port_mmio = mv_port_base(mmio, port);
1906 * The datasheet warns against setting ATA_RST when EDMA is active
1907 * (but doesn't say what the problem might be). So we first try
1908 * to disable the EDMA engine before doing the ATA_RST operation.
1910 mv_reset_channel(hpriv, mmio, port);
1912 ZERO(0x028); /* command */
1913 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1914 ZERO(0x004); /* timer */
1915 ZERO(0x008); /* irq err cause */
1916 ZERO(0x00c); /* irq err mask */
1917 ZERO(0x010); /* rq bah */
1918 ZERO(0x014); /* rq inp */
1919 ZERO(0x018); /* rq outp */
1920 ZERO(0x01c); /* respq bah */
1921 ZERO(0x024); /* respq outp */
1922 ZERO(0x020); /* respq inp */
1923 ZERO(0x02c); /* test control */
1924 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1926 #undef ZERO
1928 #define ZERO(reg) writel(0, hc_mmio + (reg))
1929 static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1930 unsigned int hc)
1932 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1933 u32 tmp;
1935 ZERO(0x00c);
1936 ZERO(0x010);
1937 ZERO(0x014);
1938 ZERO(0x018);
1940 tmp = readl(hc_mmio + 0x20);
1941 tmp &= 0x1c1c1c1c;
1942 tmp |= 0x03030303;
1943 writel(tmp, hc_mmio + 0x20);
1945 #undef ZERO
1947 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1948 unsigned int n_hc)
1950 unsigned int hc, port;
1952 for (hc = 0; hc < n_hc; hc++) {
1953 for (port = 0; port < MV_PORTS_PER_HC; port++)
1954 mv5_reset_hc_port(hpriv, mmio,
1955 (hc * MV_PORTS_PER_HC) + port);
1957 mv5_reset_one_hc(hpriv, mmio, hc);
1960 return 0;
1963 #undef ZERO
1964 #define ZERO(reg) writel(0, mmio + (reg))
1965 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
1967 struct mv_host_priv *hpriv = host->private_data;
1968 u32 tmp;
1970 tmp = readl(mmio + MV_PCI_MODE);
1971 tmp &= 0xff00ffff;
1972 writel(tmp, mmio + MV_PCI_MODE);
1974 ZERO(MV_PCI_DISC_TIMER);
1975 ZERO(MV_PCI_MSI_TRIGGER);
1976 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
1977 ZERO(HC_MAIN_IRQ_MASK_OFS);
1978 ZERO(MV_PCI_SERR_MASK);
1979 ZERO(hpriv->irq_cause_ofs);
1980 ZERO(hpriv->irq_mask_ofs);
1981 ZERO(MV_PCI_ERR_LOW_ADDRESS);
1982 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
1983 ZERO(MV_PCI_ERR_ATTRIBUTE);
1984 ZERO(MV_PCI_ERR_COMMAND);
1986 #undef ZERO
1988 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1990 u32 tmp;
1992 mv5_reset_flash(hpriv, mmio);
1994 tmp = readl(mmio + MV_GPIO_PORT_CTL);
1995 tmp &= 0x3;
1996 tmp |= (1 << 5) | (1 << 6);
1997 writel(tmp, mmio + MV_GPIO_PORT_CTL);
2001 * mv6_reset_hc - Perform the 6xxx global soft reset
2002 * @mmio: base address of the HBA
2004 * This routine only applies to 6xxx parts.
2006 * LOCKING:
2007 * Inherited from caller.
2009 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2010 unsigned int n_hc)
2012 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2013 int i, rc = 0;
2014 u32 t;
2016 /* Following procedure defined in PCI "main command and status
2017 * register" table.
2019 t = readl(reg);
2020 writel(t | STOP_PCI_MASTER, reg);
2022 for (i = 0; i < 1000; i++) {
2023 udelay(1);
2024 t = readl(reg);
2025 if (PCI_MASTER_EMPTY & t)
2026 break;
2028 if (!(PCI_MASTER_EMPTY & t)) {
2029 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2030 rc = 1;
2031 goto done;
2034 /* set reset */
2035 i = 5;
2036 do {
2037 writel(t | GLOB_SFT_RST, reg);
2038 t = readl(reg);
2039 udelay(1);
2040 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2042 if (!(GLOB_SFT_RST & t)) {
2043 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2044 rc = 1;
2045 goto done;
2048 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2049 i = 5;
2050 do {
2051 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2052 t = readl(reg);
2053 udelay(1);
2054 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2056 if (GLOB_SFT_RST & t) {
2057 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2058 rc = 1;
2060 done:
2061 return rc;
2064 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
2065 void __iomem *mmio)
2067 void __iomem *port_mmio;
2068 u32 tmp;
2070 tmp = readl(mmio + MV_RESET_CFG);
2071 if ((tmp & (1 << 0)) == 0) {
2072 hpriv->signal[idx].amps = 0x7 << 8;
2073 hpriv->signal[idx].pre = 0x1 << 5;
2074 return;
2077 port_mmio = mv_port_base(mmio, idx);
2078 tmp = readl(port_mmio + PHY_MODE2);
2080 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2081 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2084 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
2086 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
2089 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2090 unsigned int port)
2092 void __iomem *port_mmio = mv_port_base(mmio, port);
2094 u32 hp_flags = hpriv->hp_flags;
2095 int fix_phy_mode2 =
2096 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2097 int fix_phy_mode4 =
2098 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2099 u32 m2, tmp;
2101 if (fix_phy_mode2) {
2102 m2 = readl(port_mmio + PHY_MODE2);
2103 m2 &= ~(1 << 16);
2104 m2 |= (1 << 31);
2105 writel(m2, port_mmio + PHY_MODE2);
2107 udelay(200);
2109 m2 = readl(port_mmio + PHY_MODE2);
2110 m2 &= ~((1 << 16) | (1 << 31));
2111 writel(m2, port_mmio + PHY_MODE2);
2113 udelay(200);
2116 /* who knows what this magic does */
2117 tmp = readl(port_mmio + PHY_MODE3);
2118 tmp &= ~0x7F800000;
2119 tmp |= 0x2A800000;
2120 writel(tmp, port_mmio + PHY_MODE3);
2122 if (fix_phy_mode4) {
2123 u32 m4;
2125 m4 = readl(port_mmio + PHY_MODE4);
2127 if (hp_flags & MV_HP_ERRATA_60X1B2)
2128 tmp = readl(port_mmio + PHY_MODE3);
2130 /* workaround for errata FEr SATA#10 (part 1) */
2131 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2133 writel(m4, port_mmio + PHY_MODE4);
2135 if (hp_flags & MV_HP_ERRATA_60X1B2)
2136 writel(tmp, port_mmio + PHY_MODE3);
2139 /* Revert values of pre-emphasis and signal amps to the saved ones */
2140 m2 = readl(port_mmio + PHY_MODE2);
2142 m2 &= ~MV_M2_PREAMP_MASK;
2143 m2 |= hpriv->signal[port].amps;
2144 m2 |= hpriv->signal[port].pre;
2145 m2 &= ~(1 << 16);
2147 /* according to mvSata 3.6.1, some IIE values are fixed */
2148 if (IS_GEN_IIE(hpriv)) {
2149 m2 &= ~0xC30FF01F;
2150 m2 |= 0x0000900F;
2153 writel(m2, port_mmio + PHY_MODE2);
2156 /* TODO: use the generic LED interface to configure the SATA Presence */
2157 /* & Acitivy LEDs on the board */
2158 static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
2159 void __iomem *mmio)
2161 return;
2164 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
2165 void __iomem *mmio)
2167 void __iomem *port_mmio;
2168 u32 tmp;
2170 port_mmio = mv_port_base(mmio, idx);
2171 tmp = readl(port_mmio + PHY_MODE2);
2173 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2174 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2177 #undef ZERO
2178 #define ZERO(reg) writel(0, port_mmio + (reg))
2179 static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
2180 void __iomem *mmio, unsigned int port)
2182 void __iomem *port_mmio = mv_port_base(mmio, port);
2185 * The datasheet warns against setting ATA_RST when EDMA is active
2186 * (but doesn't say what the problem might be). So we first try
2187 * to disable the EDMA engine before doing the ATA_RST operation.
2189 mv_reset_channel(hpriv, mmio, port);
2191 ZERO(0x028); /* command */
2192 writel(0x101f, port_mmio + EDMA_CFG_OFS);
2193 ZERO(0x004); /* timer */
2194 ZERO(0x008); /* irq err cause */
2195 ZERO(0x00c); /* irq err mask */
2196 ZERO(0x010); /* rq bah */
2197 ZERO(0x014); /* rq inp */
2198 ZERO(0x018); /* rq outp */
2199 ZERO(0x01c); /* respq bah */
2200 ZERO(0x024); /* respq outp */
2201 ZERO(0x020); /* respq inp */
2202 ZERO(0x02c); /* test control */
2203 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
2206 #undef ZERO
2208 #define ZERO(reg) writel(0, hc_mmio + (reg))
2209 static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
2210 void __iomem *mmio)
2212 void __iomem *hc_mmio = mv_hc_base(mmio, 0);
2214 ZERO(0x00c);
2215 ZERO(0x010);
2216 ZERO(0x014);
2220 #undef ZERO
2222 static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
2223 void __iomem *mmio, unsigned int n_hc)
2225 unsigned int port;
2227 for (port = 0; port < hpriv->n_ports; port++)
2228 mv_soc_reset_hc_port(hpriv, mmio, port);
2230 mv_soc_reset_one_hc(hpriv, mmio);
2232 return 0;
2235 static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
2236 void __iomem *mmio)
2238 return;
2241 static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
2243 return;
2246 static void mv_setup_ifctl(void __iomem *port_mmio, int want_gen2i)
2248 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CFG);
2250 ifctl = (ifctl & 0xf7f) | 0x9b1000; /* from chip spec */
2251 if (want_gen2i)
2252 ifctl |= (1 << 7); /* enable gen2i speed */
2253 writelfl(ifctl, port_mmio + SATA_INTERFACE_CFG);
2257 * Caller must ensure that EDMA is not active,
2258 * by first doing mv_stop_edma() where needed.
2260 static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
2261 unsigned int port_no)
2263 void __iomem *port_mmio = mv_port_base(mmio, port_no);
2265 mv_stop_edma_engine(port_mmio);
2266 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2268 if (!IS_GEN_I(hpriv)) {
2269 /* Enable 3.0gb/s link speed */
2270 mv_setup_ifctl(port_mmio, 1);
2273 * Strobing ATA_RST here causes a hard reset of the SATA transport,
2274 * link, and physical layers. It resets all SATA interface registers
2275 * (except for SATA_INTERFACE_CFG), and issues a COMRESET to the dev.
2277 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2278 udelay(25); /* allow reset propagation */
2279 writelfl(0, port_mmio + EDMA_CMD_OFS);
2281 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2283 if (IS_GEN_I(hpriv))
2284 mdelay(1);
2287 static int mv_hardreset(struct ata_link *link, unsigned int *class,
2288 unsigned long deadline)
2290 struct ata_port *ap = link->ap;
2291 struct mv_host_priv *hpriv = ap->host->private_data;
2292 struct mv_port_priv *pp = ap->private_data;
2293 void __iomem *mmio = hpriv->base;
2294 int rc, attempts = 0, extra = 0;
2295 u32 sstatus;
2296 bool online;
2298 mv_reset_channel(hpriv, mmio, ap->port_no);
2299 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
2301 /* Workaround for errata FEr SATA#10 (part 2) */
2302 do {
2303 const unsigned long *timing =
2304 sata_ehc_deb_timing(&link->eh_context);
2306 rc = sata_link_hardreset(link, timing, deadline + extra,
2307 &online, NULL);
2308 if (rc)
2309 return rc;
2310 sata_scr_read(link, SCR_STATUS, &sstatus);
2311 if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) {
2312 /* Force 1.5gb/s link speed and try again */
2313 mv_setup_ifctl(mv_ap_base(ap), 0);
2314 if (time_after(jiffies + HZ, deadline))
2315 extra = HZ; /* only extend it once, max */
2317 } while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123);
2319 return rc;
2322 static void mv_eh_freeze(struct ata_port *ap)
2324 struct mv_host_priv *hpriv = ap->host->private_data;
2325 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2326 u32 tmp, mask;
2327 unsigned int shift;
2329 /* FIXME: handle coalescing completion events properly */
2331 shift = ap->port_no * 2;
2332 if (hc > 0)
2333 shift++;
2335 mask = 0x3 << shift;
2337 /* disable assertion of portN err, done events */
2338 tmp = readl(hpriv->main_mask_reg_addr);
2339 writelfl(tmp & ~mask, hpriv->main_mask_reg_addr);
2342 static void mv_eh_thaw(struct ata_port *ap)
2344 struct mv_host_priv *hpriv = ap->host->private_data;
2345 void __iomem *mmio = hpriv->base;
2346 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2347 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2348 void __iomem *port_mmio = mv_ap_base(ap);
2349 u32 tmp, mask, hc_irq_cause;
2350 unsigned int shift, hc_port_no = ap->port_no;
2352 /* FIXME: handle coalescing completion events properly */
2354 shift = ap->port_no * 2;
2355 if (hc > 0) {
2356 shift++;
2357 hc_port_no -= 4;
2360 mask = 0x3 << shift;
2362 /* clear EDMA errors on this port */
2363 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2365 /* clear pending irq events */
2366 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2367 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2368 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2369 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2371 /* enable assertion of portN err, done events */
2372 tmp = readl(hpriv->main_mask_reg_addr);
2373 writelfl(tmp | mask, hpriv->main_mask_reg_addr);
2377 * mv_port_init - Perform some early initialization on a single port.
2378 * @port: libata data structure storing shadow register addresses
2379 * @port_mmio: base address of the port
2381 * Initialize shadow register mmio addresses, clear outstanding
2382 * interrupts on the port, and unmask interrupts for the future
2383 * start of the port.
2385 * LOCKING:
2386 * Inherited from caller.
2388 static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2390 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
2391 unsigned serr_ofs;
2393 /* PIO related setup
2395 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
2396 port->error_addr =
2397 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2398 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2399 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2400 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2401 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2402 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
2403 port->status_addr =
2404 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2405 /* special case: control/altstatus doesn't have ATA_REG_ address */
2406 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2408 /* unused: */
2409 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
2411 /* Clear any currently outstanding port interrupt conditions */
2412 serr_ofs = mv_scr_offset(SCR_ERROR);
2413 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2414 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2416 /* unmask all non-transient EDMA error interrupts */
2417 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
2419 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2420 readl(port_mmio + EDMA_CFG_OFS),
2421 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2422 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
2425 static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
2427 struct pci_dev *pdev = to_pci_dev(host->dev);
2428 struct mv_host_priv *hpriv = host->private_data;
2429 u32 hp_flags = hpriv->hp_flags;
2431 switch (board_idx) {
2432 case chip_5080:
2433 hpriv->ops = &mv5xxx_ops;
2434 hp_flags |= MV_HP_GEN_I;
2436 switch (pdev->revision) {
2437 case 0x1:
2438 hp_flags |= MV_HP_ERRATA_50XXB0;
2439 break;
2440 case 0x3:
2441 hp_flags |= MV_HP_ERRATA_50XXB2;
2442 break;
2443 default:
2444 dev_printk(KERN_WARNING, &pdev->dev,
2445 "Applying 50XXB2 workarounds to unknown rev\n");
2446 hp_flags |= MV_HP_ERRATA_50XXB2;
2447 break;
2449 break;
2451 case chip_504x:
2452 case chip_508x:
2453 hpriv->ops = &mv5xxx_ops;
2454 hp_flags |= MV_HP_GEN_I;
2456 switch (pdev->revision) {
2457 case 0x0:
2458 hp_flags |= MV_HP_ERRATA_50XXB0;
2459 break;
2460 case 0x3:
2461 hp_flags |= MV_HP_ERRATA_50XXB2;
2462 break;
2463 default:
2464 dev_printk(KERN_WARNING, &pdev->dev,
2465 "Applying B2 workarounds to unknown rev\n");
2466 hp_flags |= MV_HP_ERRATA_50XXB2;
2467 break;
2469 break;
2471 case chip_604x:
2472 case chip_608x:
2473 hpriv->ops = &mv6xxx_ops;
2474 hp_flags |= MV_HP_GEN_II;
2476 switch (pdev->revision) {
2477 case 0x7:
2478 hp_flags |= MV_HP_ERRATA_60X1B2;
2479 break;
2480 case 0x9:
2481 hp_flags |= MV_HP_ERRATA_60X1C0;
2482 break;
2483 default:
2484 dev_printk(KERN_WARNING, &pdev->dev,
2485 "Applying B2 workarounds to unknown rev\n");
2486 hp_flags |= MV_HP_ERRATA_60X1B2;
2487 break;
2489 break;
2491 case chip_7042:
2492 hp_flags |= MV_HP_PCIE;
2493 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2494 (pdev->device == 0x2300 || pdev->device == 0x2310))
2497 * Highpoint RocketRAID PCIe 23xx series cards:
2499 * Unconfigured drives are treated as "Legacy"
2500 * by the BIOS, and it overwrites sector 8 with
2501 * a "Lgcy" metadata block prior to Linux boot.
2503 * Configured drives (RAID or JBOD) leave sector 8
2504 * alone, but instead overwrite a high numbered
2505 * sector for the RAID metadata. This sector can
2506 * be determined exactly, by truncating the physical
2507 * drive capacity to a nice even GB value.
2509 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2511 * Warn the user, lest they think we're just buggy.
2513 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2514 " BIOS CORRUPTS DATA on all attached drives,"
2515 " regardless of if/how they are configured."
2516 " BEWARE!\n");
2517 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2518 " use sectors 8-9 on \"Legacy\" drives,"
2519 " and avoid the final two gigabytes on"
2520 " all RocketRAID BIOS initialized drives.\n");
2522 case chip_6042:
2523 hpriv->ops = &mv6xxx_ops;
2524 hp_flags |= MV_HP_GEN_IIE;
2526 switch (pdev->revision) {
2527 case 0x0:
2528 hp_flags |= MV_HP_ERRATA_XX42A0;
2529 break;
2530 case 0x1:
2531 hp_flags |= MV_HP_ERRATA_60X1C0;
2532 break;
2533 default:
2534 dev_printk(KERN_WARNING, &pdev->dev,
2535 "Applying 60X1C0 workarounds to unknown rev\n");
2536 hp_flags |= MV_HP_ERRATA_60X1C0;
2537 break;
2539 break;
2540 case chip_soc:
2541 hpriv->ops = &mv_soc_ops;
2542 hp_flags |= MV_HP_ERRATA_60X1C0;
2543 break;
2545 default:
2546 dev_printk(KERN_ERR, host->dev,
2547 "BUG: invalid board index %u\n", board_idx);
2548 return 1;
2551 hpriv->hp_flags = hp_flags;
2552 if (hp_flags & MV_HP_PCIE) {
2553 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
2554 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
2555 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
2556 } else {
2557 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
2558 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
2559 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
2562 return 0;
2566 * mv_init_host - Perform some early initialization of the host.
2567 * @host: ATA host to initialize
2568 * @board_idx: controller index
2570 * If possible, do an early global reset of the host. Then do
2571 * our port init and clear/unmask all/relevant host interrupts.
2573 * LOCKING:
2574 * Inherited from caller.
2576 static int mv_init_host(struct ata_host *host, unsigned int board_idx)
2578 int rc = 0, n_hc, port, hc;
2579 struct mv_host_priv *hpriv = host->private_data;
2580 void __iomem *mmio = hpriv->base;
2582 rc = mv_chip_id(host, board_idx);
2583 if (rc)
2584 goto done;
2586 if (HAS_PCI(host)) {
2587 hpriv->main_cause_reg_addr = hpriv->base +
2588 HC_MAIN_IRQ_CAUSE_OFS;
2589 hpriv->main_mask_reg_addr = hpriv->base + HC_MAIN_IRQ_MASK_OFS;
2590 } else {
2591 hpriv->main_cause_reg_addr = hpriv->base +
2592 HC_SOC_MAIN_IRQ_CAUSE_OFS;
2593 hpriv->main_mask_reg_addr = hpriv->base +
2594 HC_SOC_MAIN_IRQ_MASK_OFS;
2596 /* global interrupt mask */
2597 writel(0, hpriv->main_mask_reg_addr);
2599 n_hc = mv_get_hc_count(host->ports[0]->flags);
2601 for (port = 0; port < host->n_ports; port++)
2602 hpriv->ops->read_preamp(hpriv, port, mmio);
2604 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
2605 if (rc)
2606 goto done;
2608 hpriv->ops->reset_flash(hpriv, mmio);
2609 hpriv->ops->reset_bus(host, mmio);
2610 hpriv->ops->enable_leds(hpriv, mmio);
2612 for (port = 0; port < host->n_ports; port++) {
2613 struct ata_port *ap = host->ports[port];
2614 void __iomem *port_mmio = mv_port_base(mmio, port);
2616 mv_port_init(&ap->ioaddr, port_mmio);
2618 #ifdef CONFIG_PCI
2619 if (HAS_PCI(host)) {
2620 unsigned int offset = port_mmio - mmio;
2621 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2622 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
2624 #endif
2627 for (hc = 0; hc < n_hc; hc++) {
2628 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2630 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2631 "(before clear)=0x%08x\n", hc,
2632 readl(hc_mmio + HC_CFG_OFS),
2633 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2635 /* Clear any currently outstanding hc interrupt conditions */
2636 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
2639 if (HAS_PCI(host)) {
2640 /* Clear any currently outstanding host interrupt conditions */
2641 writelfl(0, mmio + hpriv->irq_cause_ofs);
2643 /* and unmask interrupt generation for host regs */
2644 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
2645 if (IS_GEN_I(hpriv))
2646 writelfl(~HC_MAIN_MASKED_IRQS_5,
2647 hpriv->main_mask_reg_addr);
2648 else
2649 writelfl(~HC_MAIN_MASKED_IRQS,
2650 hpriv->main_mask_reg_addr);
2652 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2653 "PCI int cause/mask=0x%08x/0x%08x\n",
2654 readl(hpriv->main_cause_reg_addr),
2655 readl(hpriv->main_mask_reg_addr),
2656 readl(mmio + hpriv->irq_cause_ofs),
2657 readl(mmio + hpriv->irq_mask_ofs));
2658 } else {
2659 writelfl(~HC_MAIN_MASKED_IRQS_SOC,
2660 hpriv->main_mask_reg_addr);
2661 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x\n",
2662 readl(hpriv->main_cause_reg_addr),
2663 readl(hpriv->main_mask_reg_addr));
2665 done:
2666 return rc;
2669 static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
2671 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
2672 MV_CRQB_Q_SZ, 0);
2673 if (!hpriv->crqb_pool)
2674 return -ENOMEM;
2676 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
2677 MV_CRPB_Q_SZ, 0);
2678 if (!hpriv->crpb_pool)
2679 return -ENOMEM;
2681 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
2682 MV_SG_TBL_SZ, 0);
2683 if (!hpriv->sg_tbl_pool)
2684 return -ENOMEM;
2686 return 0;
2690 * mv_platform_probe - handle a positive probe of an soc Marvell
2691 * host
2692 * @pdev: platform device found
2694 * LOCKING:
2695 * Inherited from caller.
2697 static int mv_platform_probe(struct platform_device *pdev)
2699 static int printed_version;
2700 const struct mv_sata_platform_data *mv_platform_data;
2701 const struct ata_port_info *ppi[] =
2702 { &mv_port_info[chip_soc], NULL };
2703 struct ata_host *host;
2704 struct mv_host_priv *hpriv;
2705 struct resource *res;
2706 int n_ports, rc;
2708 if (!printed_version++)
2709 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2712 * Simple resource validation ..
2714 if (unlikely(pdev->num_resources != 2)) {
2715 dev_err(&pdev->dev, "invalid number of resources\n");
2716 return -EINVAL;
2720 * Get the register base first
2722 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2723 if (res == NULL)
2724 return -EINVAL;
2726 /* allocate host */
2727 mv_platform_data = pdev->dev.platform_data;
2728 n_ports = mv_platform_data->n_ports;
2730 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2731 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2733 if (!host || !hpriv)
2734 return -ENOMEM;
2735 host->private_data = hpriv;
2736 hpriv->n_ports = n_ports;
2738 host->iomap = NULL;
2739 hpriv->base = devm_ioremap(&pdev->dev, res->start,
2740 res->end - res->start + 1);
2741 hpriv->base -= MV_SATAHC0_REG_BASE;
2743 rc = mv_create_dma_pools(hpriv, &pdev->dev);
2744 if (rc)
2745 return rc;
2747 /* initialize adapter */
2748 rc = mv_init_host(host, chip_soc);
2749 if (rc)
2750 return rc;
2752 dev_printk(KERN_INFO, &pdev->dev,
2753 "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH,
2754 host->n_ports);
2756 return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
2757 IRQF_SHARED, &mv6_sht);
2762 * mv_platform_remove - unplug a platform interface
2763 * @pdev: platform device
2765 * A platform bus SATA device has been unplugged. Perform the needed
2766 * cleanup. Also called on module unload for any active devices.
2768 static int __devexit mv_platform_remove(struct platform_device *pdev)
2770 struct device *dev = &pdev->dev;
2771 struct ata_host *host = dev_get_drvdata(dev);
2773 ata_host_detach(host);
2774 return 0;
2777 static struct platform_driver mv_platform_driver = {
2778 .probe = mv_platform_probe,
2779 .remove = __devexit_p(mv_platform_remove),
2780 .driver = {
2781 .name = DRV_NAME,
2782 .owner = THIS_MODULE,
2787 #ifdef CONFIG_PCI
2788 static int mv_pci_init_one(struct pci_dev *pdev,
2789 const struct pci_device_id *ent);
2792 static struct pci_driver mv_pci_driver = {
2793 .name = DRV_NAME,
2794 .id_table = mv_pci_tbl,
2795 .probe = mv_pci_init_one,
2796 .remove = ata_pci_remove_one,
2800 * module options
2802 static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
2805 /* move to PCI layer or libata core? */
2806 static int pci_go_64(struct pci_dev *pdev)
2808 int rc;
2810 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2811 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2812 if (rc) {
2813 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2814 if (rc) {
2815 dev_printk(KERN_ERR, &pdev->dev,
2816 "64-bit DMA enable failed\n");
2817 return rc;
2820 } else {
2821 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
2822 if (rc) {
2823 dev_printk(KERN_ERR, &pdev->dev,
2824 "32-bit DMA enable failed\n");
2825 return rc;
2827 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2828 if (rc) {
2829 dev_printk(KERN_ERR, &pdev->dev,
2830 "32-bit consistent DMA enable failed\n");
2831 return rc;
2835 return rc;
2839 * mv_print_info - Dump key info to kernel log for perusal.
2840 * @host: ATA host to print info about
2842 * FIXME: complete this.
2844 * LOCKING:
2845 * Inherited from caller.
2847 static void mv_print_info(struct ata_host *host)
2849 struct pci_dev *pdev = to_pci_dev(host->dev);
2850 struct mv_host_priv *hpriv = host->private_data;
2851 u8 scc;
2852 const char *scc_s, *gen;
2854 /* Use this to determine the HW stepping of the chip so we know
2855 * what errata to workaround
2857 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2858 if (scc == 0)
2859 scc_s = "SCSI";
2860 else if (scc == 0x01)
2861 scc_s = "RAID";
2862 else
2863 scc_s = "?";
2865 if (IS_GEN_I(hpriv))
2866 gen = "I";
2867 else if (IS_GEN_II(hpriv))
2868 gen = "II";
2869 else if (IS_GEN_IIE(hpriv))
2870 gen = "IIE";
2871 else
2872 gen = "?";
2874 dev_printk(KERN_INFO, &pdev->dev,
2875 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2876 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
2877 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2881 * mv_pci_init_one - handle a positive probe of a PCI Marvell host
2882 * @pdev: PCI device found
2883 * @ent: PCI device ID entry for the matched host
2885 * LOCKING:
2886 * Inherited from caller.
2888 static int mv_pci_init_one(struct pci_dev *pdev,
2889 const struct pci_device_id *ent)
2891 static int printed_version;
2892 unsigned int board_idx = (unsigned int)ent->driver_data;
2893 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
2894 struct ata_host *host;
2895 struct mv_host_priv *hpriv;
2896 int n_ports, rc;
2898 if (!printed_version++)
2899 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2901 /* allocate host */
2902 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
2904 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2905 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2906 if (!host || !hpriv)
2907 return -ENOMEM;
2908 host->private_data = hpriv;
2909 hpriv->n_ports = n_ports;
2911 /* acquire resources */
2912 rc = pcim_enable_device(pdev);
2913 if (rc)
2914 return rc;
2916 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
2917 if (rc == -EBUSY)
2918 pcim_pin_device(pdev);
2919 if (rc)
2920 return rc;
2921 host->iomap = pcim_iomap_table(pdev);
2922 hpriv->base = host->iomap[MV_PRIMARY_BAR];
2924 rc = pci_go_64(pdev);
2925 if (rc)
2926 return rc;
2928 rc = mv_create_dma_pools(hpriv, &pdev->dev);
2929 if (rc)
2930 return rc;
2932 /* initialize adapter */
2933 rc = mv_init_host(host, board_idx);
2934 if (rc)
2935 return rc;
2937 /* Enable interrupts */
2938 if (msi && pci_enable_msi(pdev))
2939 pci_intx(pdev, 1);
2941 mv_dump_pci_cfg(pdev, 0x68);
2942 mv_print_info(host);
2944 pci_set_master(pdev);
2945 pci_try_set_mwi(pdev);
2946 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
2947 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
2949 #endif
2951 static int mv_platform_probe(struct platform_device *pdev);
2952 static int __devexit mv_platform_remove(struct platform_device *pdev);
2954 static int __init mv_init(void)
2956 int rc = -ENODEV;
2957 #ifdef CONFIG_PCI
2958 rc = pci_register_driver(&mv_pci_driver);
2959 if (rc < 0)
2960 return rc;
2961 #endif
2962 rc = platform_driver_register(&mv_platform_driver);
2964 #ifdef CONFIG_PCI
2965 if (rc < 0)
2966 pci_unregister_driver(&mv_pci_driver);
2967 #endif
2968 return rc;
2971 static void __exit mv_exit(void)
2973 #ifdef CONFIG_PCI
2974 pci_unregister_driver(&mv_pci_driver);
2975 #endif
2976 platform_driver_unregister(&mv_platform_driver);
2979 MODULE_AUTHOR("Brett Russ");
2980 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2981 MODULE_LICENSE("GPL");
2982 MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2983 MODULE_VERSION(DRV_VERSION);
2984 MODULE_ALIAS("platform:" DRV_NAME);
2986 #ifdef CONFIG_PCI
2987 module_param(msi, int, 0444);
2988 MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
2989 #endif
2991 module_init(mv_init);
2992 module_exit(mv_exit);