sata_mv disable hotplug for now
[linux-2.6/mini2440.git] / drivers / ata / sata_mv.c
blob162260d6fe197357161cc99378891dbd460d3bf9
1 /*
2 * sata_mv.c - Marvell SATA support
4 * Copyright 2008: Marvell Corporation, all rights reserved.
5 * Copyright 2005: EMC Corporation, all rights reserved.
6 * Copyright 2005 Red Hat, Inc. All rights reserved.
8 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; version 2 of the License.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 sata_mv TODO list:
28 1) Needs a full errata audit for all chipsets. I implemented most
29 of the errata workarounds found in the Marvell vendor driver, but
30 I distinctly remember a couple workarounds (one related to PCI-X)
31 are still needed.
33 2) Improve/fix IRQ and error handling sequences.
35 3) ATAPI support (Marvell claims the 60xx/70xx chips can do it).
37 4) Think about TCQ support here, and for libata in general
38 with controllers that suppport it via host-queuing hardware
39 (a software-only implementation could be a nightmare).
41 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
43 6) Add port multiplier support (intermediate)
45 7) Fix/reenable hot plug/unplug (should happen as a side-effect of (2) above).
47 8) Develop a low-power-consumption strategy, and implement it.
49 9) [Experiment, low priority] See if ATAPI can be supported using
50 "unknown FIS" or "vendor-specific FIS" support, or something creative
51 like that.
53 10) [Experiment, low priority] Investigate interrupt coalescing.
54 Quite often, especially with PCI Message Signalled Interrupts (MSI),
55 the overhead reduced by interrupt mitigation is quite often not
56 worth the latency cost.
58 11) [Experiment, Marvell value added] Is it possible to use target
59 mode to cross-connect two Linux boxes with Marvell cards? If so,
60 creating LibATA target mode support would be very interesting.
62 Target mode, for those without docs, is the ability to directly
63 connect two SATA controllers.
67 #include <linux/kernel.h>
68 #include <linux/module.h>
69 #include <linux/pci.h>
70 #include <linux/init.h>
71 #include <linux/blkdev.h>
72 #include <linux/delay.h>
73 #include <linux/interrupt.h>
74 #include <linux/dmapool.h>
75 #include <linux/dma-mapping.h>
76 #include <linux/device.h>
77 #include <linux/platform_device.h>
78 #include <linux/ata_platform.h>
79 #include <scsi/scsi_host.h>
80 #include <scsi/scsi_cmnd.h>
81 #include <scsi/scsi_device.h>
82 #include <linux/libata.h>
84 #define DRV_NAME "sata_mv"
85 #define DRV_VERSION "1.20"
87 enum {
88 /* BAR's are enumerated in terms of pci_resource_start() terms */
89 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
90 MV_IO_BAR = 2, /* offset 0x18: IO space */
91 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
93 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
94 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
96 MV_PCI_REG_BASE = 0,
97 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
98 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
99 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
100 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
101 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
102 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
104 MV_SATAHC0_REG_BASE = 0x20000,
105 MV_FLASH_CTL = 0x1046c,
106 MV_GPIO_PORT_CTL = 0x104f0,
107 MV_RESET_CFG = 0x180d8,
109 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
110 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
111 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
112 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
114 MV_MAX_Q_DEPTH = 32,
115 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
117 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
118 * CRPB needs alignment on a 256B boundary. Size == 256B
119 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
121 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
122 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
123 MV_MAX_SG_CT = 256,
124 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
126 MV_PORTS_PER_HC = 4,
127 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
128 MV_PORT_HC_SHIFT = 2,
129 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
130 MV_PORT_MASK = 3,
132 /* Host Flags */
133 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
134 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
135 /* SoC integrated controllers, no PCI interface */
136 MV_FLAG_SOC = (1 << 28),
138 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
139 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
140 ATA_FLAG_PIO_POLLING,
141 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
143 CRQB_FLAG_READ = (1 << 0),
144 CRQB_TAG_SHIFT = 1,
145 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
146 CRQB_PMP_SHIFT = 12, /* CRQB Gen-II/IIE PMP shift */
147 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
148 CRQB_CMD_ADDR_SHIFT = 8,
149 CRQB_CMD_CS = (0x2 << 11),
150 CRQB_CMD_LAST = (1 << 15),
152 CRPB_FLAG_STATUS_SHIFT = 8,
153 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
154 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
156 EPRD_FLAG_END_OF_TBL = (1 << 31),
158 /* PCI interface registers */
160 PCI_COMMAND_OFS = 0xc00,
162 PCI_MAIN_CMD_STS_OFS = 0xd30,
163 STOP_PCI_MASTER = (1 << 2),
164 PCI_MASTER_EMPTY = (1 << 3),
165 GLOB_SFT_RST = (1 << 4),
167 MV_PCI_MODE = 0xd00,
168 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
169 MV_PCI_DISC_TIMER = 0xd04,
170 MV_PCI_MSI_TRIGGER = 0xc38,
171 MV_PCI_SERR_MASK = 0xc28,
172 MV_PCI_XBAR_TMOUT = 0x1d04,
173 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
174 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
175 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
176 MV_PCI_ERR_COMMAND = 0x1d50,
178 PCI_IRQ_CAUSE_OFS = 0x1d58,
179 PCI_IRQ_MASK_OFS = 0x1d5c,
180 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
182 PCIE_IRQ_CAUSE_OFS = 0x1900,
183 PCIE_IRQ_MASK_OFS = 0x1910,
184 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
186 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
187 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
188 HC_SOC_MAIN_IRQ_CAUSE_OFS = 0x20020,
189 HC_SOC_MAIN_IRQ_MASK_OFS = 0x20024,
190 PORT0_ERR = (1 << 0), /* shift by port # */
191 PORT0_DONE = (1 << 1), /* shift by port # */
192 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
193 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
194 PCI_ERR = (1 << 18),
195 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
196 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
197 PORTS_0_3_COAL_DONE = (1 << 8),
198 PORTS_4_7_COAL_DONE = (1 << 17),
199 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
200 GPIO_INT = (1 << 22),
201 SELF_INT = (1 << 23),
202 TWSI_INT = (1 << 24),
203 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
204 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
205 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
206 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
207 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
208 HC_MAIN_RSVD),
209 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
210 HC_MAIN_RSVD_5),
211 HC_MAIN_MASKED_IRQS_SOC = (PORTS_0_3_COAL_DONE | HC_MAIN_RSVD_SOC),
213 /* SATAHC registers */
214 HC_CFG_OFS = 0,
216 HC_IRQ_CAUSE_OFS = 0x14,
217 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
218 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
219 DEV_IRQ = (1 << 8), /* shift by port # */
221 /* Shadow block registers */
222 SHD_BLK_OFS = 0x100,
223 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
225 /* SATA registers */
226 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
227 SATA_ACTIVE_OFS = 0x350,
228 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
230 LTMODE_OFS = 0x30c,
231 LTMODE_BIT8 = (1 << 8), /* unknown, but necessary */
233 PHY_MODE3 = 0x310,
234 PHY_MODE4 = 0x314,
235 PHY_MODE2 = 0x330,
236 SATA_IFCTL_OFS = 0x344,
237 SATA_IFSTAT_OFS = 0x34c,
238 VENDOR_UNIQUE_FIS_OFS = 0x35c,
240 FIS_CFG_OFS = 0x360,
241 FIS_CFG_SINGLE_SYNC = (1 << 16), /* SYNC on DMA activation */
243 MV5_PHY_MODE = 0x74,
244 MV5_LT_MODE = 0x30,
245 MV5_PHY_CTL = 0x0C,
246 SATA_INTERFACE_CFG = 0x050,
248 MV_M2_PREAMP_MASK = 0x7e0,
250 /* Port registers */
251 EDMA_CFG_OFS = 0,
252 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
253 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
254 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
255 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
256 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
257 EDMA_CFG_EDMA_FBS = (1 << 16), /* EDMA FIS-Based Switching */
258 EDMA_CFG_FBS = (1 << 26), /* FIS-Based Switching */
260 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
261 EDMA_ERR_IRQ_MASK_OFS = 0xc,
262 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
263 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
264 EDMA_ERR_DEV = (1 << 2), /* device error */
265 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
266 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
267 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
268 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
269 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
270 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
271 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
272 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
273 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
274 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
275 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
277 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
278 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
279 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
280 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
281 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
283 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
285 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
286 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
287 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
288 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
289 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
290 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
292 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
294 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
295 EDMA_ERR_OVERRUN_5 = (1 << 5),
296 EDMA_ERR_UNDERRUN_5 = (1 << 6),
298 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
299 EDMA_ERR_LNK_CTRL_RX_1 |
300 EDMA_ERR_LNK_CTRL_RX_3 |
301 EDMA_ERR_LNK_CTRL_TX |
302 /* temporary, until we fix hotplug: */
303 (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON),
305 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
306 EDMA_ERR_PRD_PAR |
307 EDMA_ERR_DEV_DCON |
308 EDMA_ERR_DEV_CON |
309 EDMA_ERR_SERR |
310 EDMA_ERR_SELF_DIS |
311 EDMA_ERR_CRQB_PAR |
312 EDMA_ERR_CRPB_PAR |
313 EDMA_ERR_INTRL_PAR |
314 EDMA_ERR_IORDY |
315 EDMA_ERR_LNK_CTRL_RX_2 |
316 EDMA_ERR_LNK_DATA_RX |
317 EDMA_ERR_LNK_DATA_TX |
318 EDMA_ERR_TRANS_PROTO,
320 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
321 EDMA_ERR_PRD_PAR |
322 EDMA_ERR_DEV_DCON |
323 EDMA_ERR_DEV_CON |
324 EDMA_ERR_OVERRUN_5 |
325 EDMA_ERR_UNDERRUN_5 |
326 EDMA_ERR_SELF_DIS_5 |
327 EDMA_ERR_CRQB_PAR |
328 EDMA_ERR_CRPB_PAR |
329 EDMA_ERR_INTRL_PAR |
330 EDMA_ERR_IORDY,
332 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
333 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
335 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
336 EDMA_REQ_Q_PTR_SHIFT = 5,
338 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
339 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
340 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
341 EDMA_RSP_Q_PTR_SHIFT = 3,
343 EDMA_CMD_OFS = 0x28, /* EDMA command register */
344 EDMA_EN = (1 << 0), /* enable EDMA */
345 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
346 ATA_RST = (1 << 2), /* reset trans/link/phy */
348 EDMA_IORDY_TMOUT = 0x34,
349 EDMA_ARB_CFG = 0x38,
351 /* Host private flags (hp_flags) */
352 MV_HP_FLAG_MSI = (1 << 0),
353 MV_HP_ERRATA_50XXB0 = (1 << 1),
354 MV_HP_ERRATA_50XXB2 = (1 << 2),
355 MV_HP_ERRATA_60X1B2 = (1 << 3),
356 MV_HP_ERRATA_60X1C0 = (1 << 4),
357 MV_HP_ERRATA_XX42A0 = (1 << 5),
358 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
359 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
360 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
361 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
363 /* Port private flags (pp_flags) */
364 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
365 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
368 #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
369 #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
370 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
371 #define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC))
373 enum {
374 /* DMA boundary 0xffff is required by the s/g splitting
375 * we need on /length/ in mv_fill-sg().
377 MV_DMA_BOUNDARY = 0xffffU,
379 /* mask of register bits containing lower 32 bits
380 * of EDMA request queue DMA address
382 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
384 /* ditto, for response queue */
385 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
388 enum chip_type {
389 chip_504x,
390 chip_508x,
391 chip_5080,
392 chip_604x,
393 chip_608x,
394 chip_6042,
395 chip_7042,
396 chip_soc,
399 /* Command ReQuest Block: 32B */
400 struct mv_crqb {
401 __le32 sg_addr;
402 __le32 sg_addr_hi;
403 __le16 ctrl_flags;
404 __le16 ata_cmd[11];
407 struct mv_crqb_iie {
408 __le32 addr;
409 __le32 addr_hi;
410 __le32 flags;
411 __le32 len;
412 __le32 ata_cmd[4];
415 /* Command ResPonse Block: 8B */
416 struct mv_crpb {
417 __le16 id;
418 __le16 flags;
419 __le32 tmstmp;
422 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
423 struct mv_sg {
424 __le32 addr;
425 __le32 flags_size;
426 __le32 addr_hi;
427 __le32 reserved;
430 struct mv_port_priv {
431 struct mv_crqb *crqb;
432 dma_addr_t crqb_dma;
433 struct mv_crpb *crpb;
434 dma_addr_t crpb_dma;
435 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
436 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
438 unsigned int req_idx;
439 unsigned int resp_idx;
441 u32 pp_flags;
444 struct mv_port_signal {
445 u32 amps;
446 u32 pre;
449 struct mv_host_priv {
450 u32 hp_flags;
451 struct mv_port_signal signal[8];
452 const struct mv_hw_ops *ops;
453 int n_ports;
454 void __iomem *base;
455 void __iomem *main_cause_reg_addr;
456 void __iomem *main_mask_reg_addr;
457 u32 irq_cause_ofs;
458 u32 irq_mask_ofs;
459 u32 unmask_all_irqs;
461 * These consistent DMA memory pools give us guaranteed
462 * alignment for hardware-accessed data structures,
463 * and less memory waste in accomplishing the alignment.
465 struct dma_pool *crqb_pool;
466 struct dma_pool *crpb_pool;
467 struct dma_pool *sg_tbl_pool;
470 struct mv_hw_ops {
471 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
472 unsigned int port);
473 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
474 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
475 void __iomem *mmio);
476 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
477 unsigned int n_hc);
478 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
479 void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
482 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
483 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
484 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
485 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
486 static int mv_port_start(struct ata_port *ap);
487 static void mv_port_stop(struct ata_port *ap);
488 static void mv_qc_prep(struct ata_queued_cmd *qc);
489 static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
490 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
491 static int mv_hardreset(struct ata_link *link, unsigned int *class,
492 unsigned long deadline);
493 static void mv_eh_freeze(struct ata_port *ap);
494 static void mv_eh_thaw(struct ata_port *ap);
495 static void mv6_dev_config(struct ata_device *dev);
497 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
498 unsigned int port);
499 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
500 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
501 void __iomem *mmio);
502 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
503 unsigned int n_hc);
504 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
505 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
507 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
508 unsigned int port);
509 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
510 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
511 void __iomem *mmio);
512 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
513 unsigned int n_hc);
514 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
515 static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
516 void __iomem *mmio);
517 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
518 void __iomem *mmio);
519 static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
520 void __iomem *mmio, unsigned int n_hc);
521 static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
522 void __iomem *mmio);
523 static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
524 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
525 static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
526 unsigned int port_no);
527 static int mv_stop_edma(struct ata_port *ap);
528 static int mv_stop_edma_engine(void __iomem *port_mmio);
529 static void mv_edma_cfg(struct ata_port *ap, int want_ncq);
531 /* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
532 * because we have to allow room for worst case splitting of
533 * PRDs for 64K boundaries in mv_fill_sg().
535 static struct scsi_host_template mv5_sht = {
536 ATA_BASE_SHT(DRV_NAME),
537 .sg_tablesize = MV_MAX_SG_CT / 2,
538 .dma_boundary = MV_DMA_BOUNDARY,
541 static struct scsi_host_template mv6_sht = {
542 ATA_NCQ_SHT(DRV_NAME),
543 .can_queue = MV_MAX_Q_DEPTH - 1,
544 .sg_tablesize = MV_MAX_SG_CT / 2,
545 .dma_boundary = MV_DMA_BOUNDARY,
548 static struct ata_port_operations mv5_ops = {
549 .inherits = &ata_sff_port_ops,
551 .qc_prep = mv_qc_prep,
552 .qc_issue = mv_qc_issue,
554 .freeze = mv_eh_freeze,
555 .thaw = mv_eh_thaw,
556 .hardreset = mv_hardreset,
557 .error_handler = ata_std_error_handler, /* avoid SFF EH */
558 .post_internal_cmd = ATA_OP_NULL,
560 .scr_read = mv5_scr_read,
561 .scr_write = mv5_scr_write,
563 .port_start = mv_port_start,
564 .port_stop = mv_port_stop,
567 static struct ata_port_operations mv6_ops = {
568 .inherits = &mv5_ops,
569 .qc_defer = ata_std_qc_defer,
570 .dev_config = mv6_dev_config,
571 .scr_read = mv_scr_read,
572 .scr_write = mv_scr_write,
575 static struct ata_port_operations mv_iie_ops = {
576 .inherits = &mv6_ops,
577 .dev_config = ATA_OP_NULL,
578 .qc_prep = mv_qc_prep_iie,
581 static const struct ata_port_info mv_port_info[] = {
582 { /* chip_504x */
583 .flags = MV_COMMON_FLAGS,
584 .pio_mask = 0x1f, /* pio0-4 */
585 .udma_mask = ATA_UDMA6,
586 .port_ops = &mv5_ops,
588 { /* chip_508x */
589 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
590 .pio_mask = 0x1f, /* pio0-4 */
591 .udma_mask = ATA_UDMA6,
592 .port_ops = &mv5_ops,
594 { /* chip_5080 */
595 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
596 .pio_mask = 0x1f, /* pio0-4 */
597 .udma_mask = ATA_UDMA6,
598 .port_ops = &mv5_ops,
600 { /* chip_604x */
601 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
602 ATA_FLAG_NCQ,
603 .pio_mask = 0x1f, /* pio0-4 */
604 .udma_mask = ATA_UDMA6,
605 .port_ops = &mv6_ops,
607 { /* chip_608x */
608 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
609 ATA_FLAG_NCQ | MV_FLAG_DUAL_HC,
610 .pio_mask = 0x1f, /* pio0-4 */
611 .udma_mask = ATA_UDMA6,
612 .port_ops = &mv6_ops,
614 { /* chip_6042 */
615 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
616 ATA_FLAG_NCQ,
617 .pio_mask = 0x1f, /* pio0-4 */
618 .udma_mask = ATA_UDMA6,
619 .port_ops = &mv_iie_ops,
621 { /* chip_7042 */
622 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
623 ATA_FLAG_NCQ,
624 .pio_mask = 0x1f, /* pio0-4 */
625 .udma_mask = ATA_UDMA6,
626 .port_ops = &mv_iie_ops,
628 { /* chip_soc */
629 .flags = MV_COMMON_FLAGS | MV_FLAG_SOC,
630 .pio_mask = 0x1f, /* pio0-4 */
631 .udma_mask = ATA_UDMA6,
632 .port_ops = &mv_iie_ops,
636 static const struct pci_device_id mv_pci_tbl[] = {
637 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
638 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
639 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
640 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
641 /* RocketRAID 1740/174x have different identifiers */
642 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
643 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
645 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
646 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
647 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
648 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
649 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
651 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
653 /* Adaptec 1430SA */
654 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
656 /* Marvell 7042 support */
657 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
659 /* Highpoint RocketRAID PCIe series */
660 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
661 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
663 { } /* terminate list */
666 static const struct mv_hw_ops mv5xxx_ops = {
667 .phy_errata = mv5_phy_errata,
668 .enable_leds = mv5_enable_leds,
669 .read_preamp = mv5_read_preamp,
670 .reset_hc = mv5_reset_hc,
671 .reset_flash = mv5_reset_flash,
672 .reset_bus = mv5_reset_bus,
675 static const struct mv_hw_ops mv6xxx_ops = {
676 .phy_errata = mv6_phy_errata,
677 .enable_leds = mv6_enable_leds,
678 .read_preamp = mv6_read_preamp,
679 .reset_hc = mv6_reset_hc,
680 .reset_flash = mv6_reset_flash,
681 .reset_bus = mv_reset_pci_bus,
684 static const struct mv_hw_ops mv_soc_ops = {
685 .phy_errata = mv6_phy_errata,
686 .enable_leds = mv_soc_enable_leds,
687 .read_preamp = mv_soc_read_preamp,
688 .reset_hc = mv_soc_reset_hc,
689 .reset_flash = mv_soc_reset_flash,
690 .reset_bus = mv_soc_reset_bus,
694 * Functions
697 static inline void writelfl(unsigned long data, void __iomem *addr)
699 writel(data, addr);
700 (void) readl(addr); /* flush to avoid PCI posted write */
703 static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
705 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
708 static inline unsigned int mv_hc_from_port(unsigned int port)
710 return port >> MV_PORT_HC_SHIFT;
713 static inline unsigned int mv_hardport_from_port(unsigned int port)
715 return port & MV_PORT_MASK;
718 static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
719 unsigned int port)
721 return mv_hc_base(base, mv_hc_from_port(port));
724 static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
726 return mv_hc_base_from_port(base, port) +
727 MV_SATAHC_ARBTR_REG_SZ +
728 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
731 static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
733 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
734 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
736 return hc_mmio + ofs;
739 static inline void __iomem *mv_host_base(struct ata_host *host)
741 struct mv_host_priv *hpriv = host->private_data;
742 return hpriv->base;
745 static inline void __iomem *mv_ap_base(struct ata_port *ap)
747 return mv_port_base(mv_host_base(ap->host), ap->port_no);
750 static inline int mv_get_hc_count(unsigned long port_flags)
752 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
755 static void mv_set_edma_ptrs(void __iomem *port_mmio,
756 struct mv_host_priv *hpriv,
757 struct mv_port_priv *pp)
759 u32 index;
762 * initialize request queue
764 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
766 WARN_ON(pp->crqb_dma & 0x3ff);
767 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
768 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
769 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
771 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
772 writelfl((pp->crqb_dma & 0xffffffff) | index,
773 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
774 else
775 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
778 * initialize response queue
780 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
782 WARN_ON(pp->crpb_dma & 0xff);
783 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
785 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
786 writelfl((pp->crpb_dma & 0xffffffff) | index,
787 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
788 else
789 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
791 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
792 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
796 * mv_start_dma - Enable eDMA engine
797 * @base: port base address
798 * @pp: port private data
800 * Verify the local cache of the eDMA state is accurate with a
801 * WARN_ON.
803 * LOCKING:
804 * Inherited from caller.
806 static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
807 struct mv_port_priv *pp, u8 protocol)
809 int want_ncq = (protocol == ATA_PROT_NCQ);
811 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
812 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
813 if (want_ncq != using_ncq)
814 mv_stop_edma(ap);
816 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
817 struct mv_host_priv *hpriv = ap->host->private_data;
818 int hard_port = mv_hardport_from_port(ap->port_no);
819 void __iomem *hc_mmio = mv_hc_base_from_port(
820 mv_host_base(ap->host), hard_port);
821 u32 hc_irq_cause, ipending;
823 /* clear EDMA event indicators, if any */
824 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
826 /* clear EDMA interrupt indicator, if any */
827 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
828 ipending = (DEV_IRQ << hard_port) |
829 (CRPB_DMA_DONE << hard_port);
830 if (hc_irq_cause & ipending) {
831 writelfl(hc_irq_cause & ~ipending,
832 hc_mmio + HC_IRQ_CAUSE_OFS);
835 mv_edma_cfg(ap, want_ncq);
837 /* clear FIS IRQ Cause */
838 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
840 mv_set_edma_ptrs(port_mmio, hpriv, pp);
842 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
843 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
845 WARN_ON(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
849 * mv_stop_edma_engine - Disable eDMA engine
850 * @port_mmio: io base address
852 * LOCKING:
853 * Inherited from caller.
855 static int mv_stop_edma_engine(void __iomem *port_mmio)
857 int i;
859 /* Disable eDMA. The disable bit auto clears. */
860 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
862 /* Wait for the chip to confirm eDMA is off. */
863 for (i = 10000; i > 0; i--) {
864 u32 reg = readl(port_mmio + EDMA_CMD_OFS);
865 if (!(reg & EDMA_EN))
866 return 0;
867 udelay(10);
869 return -EIO;
872 static int mv_stop_edma(struct ata_port *ap)
874 void __iomem *port_mmio = mv_ap_base(ap);
875 struct mv_port_priv *pp = ap->private_data;
877 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
878 return 0;
879 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
880 if (mv_stop_edma_engine(port_mmio)) {
881 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
882 return -EIO;
884 return 0;
887 #ifdef ATA_DEBUG
888 static void mv_dump_mem(void __iomem *start, unsigned bytes)
890 int b, w;
891 for (b = 0; b < bytes; ) {
892 DPRINTK("%p: ", start + b);
893 for (w = 0; b < bytes && w < 4; w++) {
894 printk("%08x ", readl(start + b));
895 b += sizeof(u32);
897 printk("\n");
900 #endif
902 static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
904 #ifdef ATA_DEBUG
905 int b, w;
906 u32 dw;
907 for (b = 0; b < bytes; ) {
908 DPRINTK("%02x: ", b);
909 for (w = 0; b < bytes && w < 4; w++) {
910 (void) pci_read_config_dword(pdev, b, &dw);
911 printk("%08x ", dw);
912 b += sizeof(u32);
914 printk("\n");
916 #endif
918 static void mv_dump_all_regs(void __iomem *mmio_base, int port,
919 struct pci_dev *pdev)
921 #ifdef ATA_DEBUG
922 void __iomem *hc_base = mv_hc_base(mmio_base,
923 port >> MV_PORT_HC_SHIFT);
924 void __iomem *port_base;
925 int start_port, num_ports, p, start_hc, num_hcs, hc;
927 if (0 > port) {
928 start_hc = start_port = 0;
929 num_ports = 8; /* shld be benign for 4 port devs */
930 num_hcs = 2;
931 } else {
932 start_hc = port >> MV_PORT_HC_SHIFT;
933 start_port = port;
934 num_ports = num_hcs = 1;
936 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
937 num_ports > 1 ? num_ports - 1 : start_port);
939 if (NULL != pdev) {
940 DPRINTK("PCI config space regs:\n");
941 mv_dump_pci_cfg(pdev, 0x68);
943 DPRINTK("PCI regs:\n");
944 mv_dump_mem(mmio_base+0xc00, 0x3c);
945 mv_dump_mem(mmio_base+0xd00, 0x34);
946 mv_dump_mem(mmio_base+0xf00, 0x4);
947 mv_dump_mem(mmio_base+0x1d00, 0x6c);
948 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
949 hc_base = mv_hc_base(mmio_base, hc);
950 DPRINTK("HC regs (HC %i):\n", hc);
951 mv_dump_mem(hc_base, 0x1c);
953 for (p = start_port; p < start_port + num_ports; p++) {
954 port_base = mv_port_base(mmio_base, p);
955 DPRINTK("EDMA regs (port %i):\n", p);
956 mv_dump_mem(port_base, 0x54);
957 DPRINTK("SATA regs (port %i):\n", p);
958 mv_dump_mem(port_base+0x300, 0x60);
960 #endif
963 static unsigned int mv_scr_offset(unsigned int sc_reg_in)
965 unsigned int ofs;
967 switch (sc_reg_in) {
968 case SCR_STATUS:
969 case SCR_CONTROL:
970 case SCR_ERROR:
971 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
972 break;
973 case SCR_ACTIVE:
974 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
975 break;
976 default:
977 ofs = 0xffffffffU;
978 break;
980 return ofs;
983 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
985 unsigned int ofs = mv_scr_offset(sc_reg_in);
987 if (ofs != 0xffffffffU) {
988 *val = readl(mv_ap_base(ap) + ofs);
989 return 0;
990 } else
991 return -EINVAL;
994 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
996 unsigned int ofs = mv_scr_offset(sc_reg_in);
998 if (ofs != 0xffffffffU) {
999 writelfl(val, mv_ap_base(ap) + ofs);
1000 return 0;
1001 } else
1002 return -EINVAL;
1005 static void mv6_dev_config(struct ata_device *adev)
1008 * We don't have hob_nsect when doing NCQ commands on Gen-II.
1009 * See mv_qc_prep() for more info.
1011 if (adev->flags & ATA_DFLAG_NCQ)
1012 if (adev->max_sectors > ATA_MAX_SECTORS)
1013 adev->max_sectors = ATA_MAX_SECTORS;
1016 static void mv_edma_cfg(struct ata_port *ap, int want_ncq)
1018 u32 cfg;
1019 struct mv_port_priv *pp = ap->private_data;
1020 struct mv_host_priv *hpriv = ap->host->private_data;
1021 void __iomem *port_mmio = mv_ap_base(ap);
1023 /* set up non-NCQ EDMA configuration */
1024 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
1026 if (IS_GEN_I(hpriv))
1027 cfg |= (1 << 8); /* enab config burst size mask */
1029 else if (IS_GEN_II(hpriv))
1030 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1032 else if (IS_GEN_IIE(hpriv)) {
1033 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1034 cfg |= (1 << 22); /* enab 4-entry host queue cache */
1035 cfg |= (1 << 18); /* enab early completion */
1036 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
1039 if (want_ncq) {
1040 cfg |= EDMA_CFG_NCQ;
1041 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1042 } else
1043 pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;
1045 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1048 static void mv_port_free_dma_mem(struct ata_port *ap)
1050 struct mv_host_priv *hpriv = ap->host->private_data;
1051 struct mv_port_priv *pp = ap->private_data;
1052 int tag;
1054 if (pp->crqb) {
1055 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1056 pp->crqb = NULL;
1058 if (pp->crpb) {
1059 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1060 pp->crpb = NULL;
1063 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1064 * For later hardware, we have one unique sg_tbl per NCQ tag.
1066 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1067 if (pp->sg_tbl[tag]) {
1068 if (tag == 0 || !IS_GEN_I(hpriv))
1069 dma_pool_free(hpriv->sg_tbl_pool,
1070 pp->sg_tbl[tag],
1071 pp->sg_tbl_dma[tag]);
1072 pp->sg_tbl[tag] = NULL;
1078 * mv_port_start - Port specific init/start routine.
1079 * @ap: ATA channel to manipulate
1081 * Allocate and point to DMA memory, init port private memory,
1082 * zero indices.
1084 * LOCKING:
1085 * Inherited from caller.
1087 static int mv_port_start(struct ata_port *ap)
1089 struct device *dev = ap->host->dev;
1090 struct mv_host_priv *hpriv = ap->host->private_data;
1091 struct mv_port_priv *pp;
1092 void __iomem *port_mmio = mv_ap_base(ap);
1093 unsigned long flags;
1094 int tag;
1096 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1097 if (!pp)
1098 return -ENOMEM;
1099 ap->private_data = pp;
1101 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1102 if (!pp->crqb)
1103 return -ENOMEM;
1104 memset(pp->crqb, 0, MV_CRQB_Q_SZ);
1106 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1107 if (!pp->crpb)
1108 goto out_port_free_dma_mem;
1109 memset(pp->crpb, 0, MV_CRPB_Q_SZ);
1112 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1113 * For later hardware, we need one unique sg_tbl per NCQ tag.
1115 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1116 if (tag == 0 || !IS_GEN_I(hpriv)) {
1117 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1118 GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1119 if (!pp->sg_tbl[tag])
1120 goto out_port_free_dma_mem;
1121 } else {
1122 pp->sg_tbl[tag] = pp->sg_tbl[0];
1123 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1127 spin_lock_irqsave(&ap->host->lock, flags);
1129 mv_edma_cfg(ap, 0);
1130 mv_set_edma_ptrs(port_mmio, hpriv, pp);
1132 spin_unlock_irqrestore(&ap->host->lock, flags);
1134 /* Don't turn on EDMA here...do it before DMA commands only. Else
1135 * we'll be unable to send non-data, PIO, etc due to restricted access
1136 * to shadow regs.
1138 return 0;
1140 out_port_free_dma_mem:
1141 mv_port_free_dma_mem(ap);
1142 return -ENOMEM;
1146 * mv_port_stop - Port specific cleanup/stop routine.
1147 * @ap: ATA channel to manipulate
1149 * Stop DMA, cleanup port memory.
1151 * LOCKING:
1152 * This routine uses the host lock to protect the DMA stop.
1154 static void mv_port_stop(struct ata_port *ap)
1156 mv_stop_edma(ap);
1157 mv_port_free_dma_mem(ap);
1161 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1162 * @qc: queued command whose SG list to source from
1164 * Populate the SG list and mark the last entry.
1166 * LOCKING:
1167 * Inherited from caller.
1169 static void mv_fill_sg(struct ata_queued_cmd *qc)
1171 struct mv_port_priv *pp = qc->ap->private_data;
1172 struct scatterlist *sg;
1173 struct mv_sg *mv_sg, *last_sg = NULL;
1174 unsigned int si;
1176 mv_sg = pp->sg_tbl[qc->tag];
1177 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1178 dma_addr_t addr = sg_dma_address(sg);
1179 u32 sg_len = sg_dma_len(sg);
1181 while (sg_len) {
1182 u32 offset = addr & 0xffff;
1183 u32 len = sg_len;
1185 if ((offset + sg_len > 0x10000))
1186 len = 0x10000 - offset;
1188 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1189 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1190 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
1192 sg_len -= len;
1193 addr += len;
1195 last_sg = mv_sg;
1196 mv_sg++;
1200 if (likely(last_sg))
1201 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1204 static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1206 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1207 (last ? CRQB_CMD_LAST : 0);
1208 *cmdw = cpu_to_le16(tmp);
1212 * mv_qc_prep - Host specific command preparation.
1213 * @qc: queued command to prepare
1215 * This routine simply redirects to the general purpose routine
1216 * if command is not DMA. Else, it handles prep of the CRQB
1217 * (command request block), does some sanity checking, and calls
1218 * the SG load routine.
1220 * LOCKING:
1221 * Inherited from caller.
1223 static void mv_qc_prep(struct ata_queued_cmd *qc)
1225 struct ata_port *ap = qc->ap;
1226 struct mv_port_priv *pp = ap->private_data;
1227 __le16 *cw;
1228 struct ata_taskfile *tf;
1229 u16 flags = 0;
1230 unsigned in_index;
1232 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1233 (qc->tf.protocol != ATA_PROT_NCQ))
1234 return;
1236 /* Fill in command request block
1238 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1239 flags |= CRQB_FLAG_READ;
1240 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1241 flags |= qc->tag << CRQB_TAG_SHIFT;
1243 /* get current queue index from software */
1244 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1246 pp->crqb[in_index].sg_addr =
1247 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1248 pp->crqb[in_index].sg_addr_hi =
1249 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
1250 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1252 cw = &pp->crqb[in_index].ata_cmd[0];
1253 tf = &qc->tf;
1255 /* Sadly, the CRQB cannot accomodate all registers--there are
1256 * only 11 bytes...so we must pick and choose required
1257 * registers based on the command. So, we drop feature and
1258 * hob_feature for [RW] DMA commands, but they are needed for
1259 * NCQ. NCQ will drop hob_nsect.
1261 switch (tf->command) {
1262 case ATA_CMD_READ:
1263 case ATA_CMD_READ_EXT:
1264 case ATA_CMD_WRITE:
1265 case ATA_CMD_WRITE_EXT:
1266 case ATA_CMD_WRITE_FUA_EXT:
1267 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1268 break;
1269 case ATA_CMD_FPDMA_READ:
1270 case ATA_CMD_FPDMA_WRITE:
1271 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
1272 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1273 break;
1274 default:
1275 /* The only other commands EDMA supports in non-queued and
1276 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1277 * of which are defined/used by Linux. If we get here, this
1278 * driver needs work.
1280 * FIXME: modify libata to give qc_prep a return value and
1281 * return error here.
1283 BUG_ON(tf->command);
1284 break;
1286 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1287 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1288 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1289 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1290 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1291 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1292 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1293 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1294 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1296 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1297 return;
1298 mv_fill_sg(qc);
1302 * mv_qc_prep_iie - Host specific command preparation.
1303 * @qc: queued command to prepare
1305 * This routine simply redirects to the general purpose routine
1306 * if command is not DMA. Else, it handles prep of the CRQB
1307 * (command request block), does some sanity checking, and calls
1308 * the SG load routine.
1310 * LOCKING:
1311 * Inherited from caller.
1313 static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1315 struct ata_port *ap = qc->ap;
1316 struct mv_port_priv *pp = ap->private_data;
1317 struct mv_crqb_iie *crqb;
1318 struct ata_taskfile *tf;
1319 unsigned in_index;
1320 u32 flags = 0;
1322 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1323 (qc->tf.protocol != ATA_PROT_NCQ))
1324 return;
1326 /* Fill in Gen IIE command request block */
1327 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1328 flags |= CRQB_FLAG_READ;
1330 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1331 flags |= qc->tag << CRQB_TAG_SHIFT;
1332 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
1334 /* get current queue index from software */
1335 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1337 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
1338 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1339 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
1340 crqb->flags = cpu_to_le32(flags);
1342 tf = &qc->tf;
1343 crqb->ata_cmd[0] = cpu_to_le32(
1344 (tf->command << 16) |
1345 (tf->feature << 24)
1347 crqb->ata_cmd[1] = cpu_to_le32(
1348 (tf->lbal << 0) |
1349 (tf->lbam << 8) |
1350 (tf->lbah << 16) |
1351 (tf->device << 24)
1353 crqb->ata_cmd[2] = cpu_to_le32(
1354 (tf->hob_lbal << 0) |
1355 (tf->hob_lbam << 8) |
1356 (tf->hob_lbah << 16) |
1357 (tf->hob_feature << 24)
1359 crqb->ata_cmd[3] = cpu_to_le32(
1360 (tf->nsect << 0) |
1361 (tf->hob_nsect << 8)
1364 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1365 return;
1366 mv_fill_sg(qc);
1370 * mv_qc_issue - Initiate a command to the host
1371 * @qc: queued command to start
1373 * This routine simply redirects to the general purpose routine
1374 * if command is not DMA. Else, it sanity checks our local
1375 * caches of the request producer/consumer indices then enables
1376 * DMA and bumps the request producer index.
1378 * LOCKING:
1379 * Inherited from caller.
1381 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1383 struct ata_port *ap = qc->ap;
1384 void __iomem *port_mmio = mv_ap_base(ap);
1385 struct mv_port_priv *pp = ap->private_data;
1386 u32 in_index;
1388 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1389 (qc->tf.protocol != ATA_PROT_NCQ)) {
1391 * We're about to send a non-EDMA capable command to the
1392 * port. Turn off EDMA so there won't be problems accessing
1393 * shadow block, etc registers.
1395 mv_stop_edma(ap);
1396 return ata_sff_qc_issue(qc);
1399 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
1401 pp->req_idx++;
1403 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
1405 /* and write the request in pointer to kick the EDMA to life */
1406 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1407 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1409 return 0;
1413 * mv_err_intr - Handle error interrupts on the port
1414 * @ap: ATA channel to manipulate
1415 * @reset_allowed: bool: 0 == don't trigger from reset here
1417 * In most cases, just clear the interrupt and move on. However,
1418 * some cases require an eDMA reset, which also performs a COMRESET.
1419 * The SERR case requires a clear of pending errors in the SATA
1420 * SERROR register. Finally, if the port disabled DMA,
1421 * update our cached copy to match.
1423 * LOCKING:
1424 * Inherited from caller.
1426 static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1428 void __iomem *port_mmio = mv_ap_base(ap);
1429 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1430 struct mv_port_priv *pp = ap->private_data;
1431 struct mv_host_priv *hpriv = ap->host->private_data;
1432 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1433 unsigned int action = 0, err_mask = 0;
1434 struct ata_eh_info *ehi = &ap->link.eh_info;
1436 ata_ehi_clear_desc(ehi);
1438 if (!edma_enabled) {
1439 /* just a guess: do we need to do this? should we
1440 * expand this, and do it in all cases?
1442 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1443 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1446 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1448 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1451 * all generations share these EDMA error cause bits
1454 if (edma_err_cause & EDMA_ERR_DEV)
1455 err_mask |= AC_ERR_DEV;
1456 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
1457 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
1458 EDMA_ERR_INTRL_PAR)) {
1459 err_mask |= AC_ERR_ATA_BUS;
1460 action |= ATA_EH_RESET;
1461 ata_ehi_push_desc(ehi, "parity error");
1463 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1464 ata_ehi_hotplugged(ehi);
1465 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
1466 "dev disconnect" : "dev connect");
1467 action |= ATA_EH_RESET;
1470 if (IS_GEN_I(hpriv)) {
1471 eh_freeze_mask = EDMA_EH_FREEZE_5;
1473 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1474 pp = ap->private_data;
1475 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1476 ata_ehi_push_desc(ehi, "EDMA self-disable");
1478 } else {
1479 eh_freeze_mask = EDMA_EH_FREEZE;
1481 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1482 pp = ap->private_data;
1483 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1484 ata_ehi_push_desc(ehi, "EDMA self-disable");
1487 if (edma_err_cause & EDMA_ERR_SERR) {
1488 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1489 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1490 err_mask = AC_ERR_ATA_BUS;
1491 action |= ATA_EH_RESET;
1495 /* Clear EDMA now that SERR cleanup done */
1496 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1498 if (!err_mask) {
1499 err_mask = AC_ERR_OTHER;
1500 action |= ATA_EH_RESET;
1503 ehi->serror |= serr;
1504 ehi->action |= action;
1506 if (qc)
1507 qc->err_mask |= err_mask;
1508 else
1509 ehi->err_mask |= err_mask;
1511 if (edma_err_cause & eh_freeze_mask)
1512 ata_port_freeze(ap);
1513 else
1514 ata_port_abort(ap);
1517 static void mv_intr_pio(struct ata_port *ap)
1519 struct ata_queued_cmd *qc;
1520 u8 ata_status;
1522 /* ignore spurious intr if drive still BUSY */
1523 ata_status = readb(ap->ioaddr.status_addr);
1524 if (unlikely(ata_status & ATA_BUSY))
1525 return;
1527 /* get active ATA command */
1528 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1529 if (unlikely(!qc)) /* no active tag */
1530 return;
1531 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1532 return;
1534 /* and finally, complete the ATA command */
1535 qc->err_mask |= ac_err_mask(ata_status);
1536 ata_qc_complete(qc);
1539 static void mv_intr_edma(struct ata_port *ap)
1541 void __iomem *port_mmio = mv_ap_base(ap);
1542 struct mv_host_priv *hpriv = ap->host->private_data;
1543 struct mv_port_priv *pp = ap->private_data;
1544 struct ata_queued_cmd *qc;
1545 u32 out_index, in_index;
1546 bool work_done = false;
1548 /* get h/w response queue pointer */
1549 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1550 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1552 while (1) {
1553 u16 status;
1554 unsigned int tag;
1556 /* get s/w response queue last-read pointer, and compare */
1557 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1558 if (in_index == out_index)
1559 break;
1561 /* 50xx: get active ATA command */
1562 if (IS_GEN_I(hpriv))
1563 tag = ap->link.active_tag;
1565 /* Gen II/IIE: get active ATA command via tag, to enable
1566 * support for queueing. this works transparently for
1567 * queued and non-queued modes.
1569 else
1570 tag = le16_to_cpu(pp->crpb[out_index].id) & 0x1f;
1572 qc = ata_qc_from_tag(ap, tag);
1574 /* For non-NCQ mode, the lower 8 bits of status
1575 * are from EDMA_ERR_IRQ_CAUSE_OFS,
1576 * which should be zero if all went well.
1578 status = le16_to_cpu(pp->crpb[out_index].flags);
1579 if ((status & 0xff) && !(pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
1580 mv_err_intr(ap, qc);
1581 return;
1584 /* and finally, complete the ATA command */
1585 if (qc) {
1586 qc->err_mask |=
1587 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1588 ata_qc_complete(qc);
1591 /* advance software response queue pointer, to
1592 * indicate (after the loop completes) to hardware
1593 * that we have consumed a response queue entry.
1595 work_done = true;
1596 pp->resp_idx++;
1599 if (work_done)
1600 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1601 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1602 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1606 * mv_host_intr - Handle all interrupts on the given host controller
1607 * @host: host specific structure
1608 * @relevant: port error bits relevant to this host controller
1609 * @hc: which host controller we're to look at
1611 * Read then write clear the HC interrupt status then walk each
1612 * port connected to the HC and see if it needs servicing. Port
1613 * success ints are reported in the HC interrupt status reg, the
1614 * port error ints are reported in the higher level main
1615 * interrupt status register and thus are passed in via the
1616 * 'relevant' argument.
1618 * LOCKING:
1619 * Inherited from caller.
1621 static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
1623 struct mv_host_priv *hpriv = host->private_data;
1624 void __iomem *mmio = hpriv->base;
1625 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1626 u32 hc_irq_cause;
1627 int port, port0, last_port;
1629 if (hc == 0)
1630 port0 = 0;
1631 else
1632 port0 = MV_PORTS_PER_HC;
1634 if (HAS_PCI(host))
1635 last_port = port0 + MV_PORTS_PER_HC;
1636 else
1637 last_port = port0 + hpriv->n_ports;
1638 /* we'll need the HC success int register in most cases */
1639 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
1640 if (!hc_irq_cause)
1641 return;
1643 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
1645 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1646 hc, relevant, hc_irq_cause);
1648 for (port = port0; port < last_port; port++) {
1649 struct ata_port *ap = host->ports[port];
1650 struct mv_port_priv *pp;
1651 int have_err_bits, hard_port, shift;
1653 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
1654 continue;
1656 pp = ap->private_data;
1658 shift = port << 1; /* (port * 2) */
1659 if (port >= MV_PORTS_PER_HC)
1660 shift++; /* skip bit 8 in the HC Main IRQ reg */
1662 have_err_bits = ((PORT0_ERR << shift) & relevant);
1664 if (unlikely(have_err_bits)) {
1665 struct ata_queued_cmd *qc;
1667 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1668 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1669 continue;
1671 mv_err_intr(ap, qc);
1672 continue;
1675 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1677 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1678 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1679 mv_intr_edma(ap);
1680 } else {
1681 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1682 mv_intr_pio(ap);
1685 VPRINTK("EXIT\n");
1688 static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1690 struct mv_host_priv *hpriv = host->private_data;
1691 struct ata_port *ap;
1692 struct ata_queued_cmd *qc;
1693 struct ata_eh_info *ehi;
1694 unsigned int i, err_mask, printed = 0;
1695 u32 err_cause;
1697 err_cause = readl(mmio + hpriv->irq_cause_ofs);
1699 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1700 err_cause);
1702 DPRINTK("All regs @ PCI error\n");
1703 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1705 writelfl(0, mmio + hpriv->irq_cause_ofs);
1707 for (i = 0; i < host->n_ports; i++) {
1708 ap = host->ports[i];
1709 if (!ata_link_offline(&ap->link)) {
1710 ehi = &ap->link.eh_info;
1711 ata_ehi_clear_desc(ehi);
1712 if (!printed++)
1713 ata_ehi_push_desc(ehi,
1714 "PCI err cause 0x%08x", err_cause);
1715 err_mask = AC_ERR_HOST_BUS;
1716 ehi->action = ATA_EH_RESET;
1717 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1718 if (qc)
1719 qc->err_mask |= err_mask;
1720 else
1721 ehi->err_mask |= err_mask;
1723 ata_port_freeze(ap);
1729 * mv_interrupt - Main interrupt event handler
1730 * @irq: unused
1731 * @dev_instance: private data; in this case the host structure
1733 * Read the read only register to determine if any host
1734 * controllers have pending interrupts. If so, call lower level
1735 * routine to handle. Also check for PCI errors which are only
1736 * reported here.
1738 * LOCKING:
1739 * This routine holds the host lock while processing pending
1740 * interrupts.
1742 static irqreturn_t mv_interrupt(int irq, void *dev_instance)
1744 struct ata_host *host = dev_instance;
1745 struct mv_host_priv *hpriv = host->private_data;
1746 unsigned int hc, handled = 0, n_hcs;
1747 void __iomem *mmio = hpriv->base;
1748 u32 irq_stat, irq_mask;
1750 /* Note to self: &host->lock == &ap->host->lock == ap->lock */
1751 spin_lock(&host->lock);
1753 irq_stat = readl(hpriv->main_cause_reg_addr);
1754 irq_mask = readl(hpriv->main_mask_reg_addr);
1756 /* check the cases where we either have nothing pending or have read
1757 * a bogus register value which can indicate HW removal or PCI fault
1759 if (!(irq_stat & irq_mask) || (0xffffffffU == irq_stat))
1760 goto out_unlock;
1762 n_hcs = mv_get_hc_count(host->ports[0]->flags);
1764 if (unlikely((irq_stat & PCI_ERR) && HAS_PCI(host))) {
1765 mv_pci_error(host, mmio);
1766 handled = 1;
1767 goto out_unlock; /* skip all other HC irq handling */
1770 for (hc = 0; hc < n_hcs; hc++) {
1771 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1772 if (relevant) {
1773 mv_host_intr(host, relevant, hc);
1774 handled = 1;
1778 out_unlock:
1779 spin_unlock(&host->lock);
1781 return IRQ_RETVAL(handled);
1784 static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1786 unsigned int ofs;
1788 switch (sc_reg_in) {
1789 case SCR_STATUS:
1790 case SCR_ERROR:
1791 case SCR_CONTROL:
1792 ofs = sc_reg_in * sizeof(u32);
1793 break;
1794 default:
1795 ofs = 0xffffffffU;
1796 break;
1798 return ofs;
1801 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
1803 struct mv_host_priv *hpriv = ap->host->private_data;
1804 void __iomem *mmio = hpriv->base;
1805 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1806 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1808 if (ofs != 0xffffffffU) {
1809 *val = readl(addr + ofs);
1810 return 0;
1811 } else
1812 return -EINVAL;
1815 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1817 struct mv_host_priv *hpriv = ap->host->private_data;
1818 void __iomem *mmio = hpriv->base;
1819 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1820 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1822 if (ofs != 0xffffffffU) {
1823 writelfl(val, addr + ofs);
1824 return 0;
1825 } else
1826 return -EINVAL;
1829 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
1831 struct pci_dev *pdev = to_pci_dev(host->dev);
1832 int early_5080;
1834 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
1836 if (!early_5080) {
1837 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1838 tmp |= (1 << 0);
1839 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1842 mv_reset_pci_bus(host, mmio);
1845 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1847 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1850 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
1851 void __iomem *mmio)
1853 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1854 u32 tmp;
1856 tmp = readl(phy_mmio + MV5_PHY_MODE);
1858 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1859 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
1862 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1864 u32 tmp;
1866 writel(0, mmio + MV_GPIO_PORT_CTL);
1868 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1870 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1871 tmp |= ~(1 << 0);
1872 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1875 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1876 unsigned int port)
1878 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1879 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1880 u32 tmp;
1881 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1883 if (fix_apm_sq) {
1884 tmp = readl(phy_mmio + MV5_LT_MODE);
1885 tmp |= (1 << 19);
1886 writel(tmp, phy_mmio + MV5_LT_MODE);
1888 tmp = readl(phy_mmio + MV5_PHY_CTL);
1889 tmp &= ~0x3;
1890 tmp |= 0x1;
1891 writel(tmp, phy_mmio + MV5_PHY_CTL);
1894 tmp = readl(phy_mmio + MV5_PHY_MODE);
1895 tmp &= ~mask;
1896 tmp |= hpriv->signal[port].pre;
1897 tmp |= hpriv->signal[port].amps;
1898 writel(tmp, phy_mmio + MV5_PHY_MODE);
1902 #undef ZERO
1903 #define ZERO(reg) writel(0, port_mmio + (reg))
1904 static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1905 unsigned int port)
1907 void __iomem *port_mmio = mv_port_base(mmio, port);
1910 * The datasheet warns against setting ATA_RST when EDMA is active
1911 * (but doesn't say what the problem might be). So we first try
1912 * to disable the EDMA engine before doing the ATA_RST operation.
1914 mv_reset_channel(hpriv, mmio, port);
1916 ZERO(0x028); /* command */
1917 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1918 ZERO(0x004); /* timer */
1919 ZERO(0x008); /* irq err cause */
1920 ZERO(0x00c); /* irq err mask */
1921 ZERO(0x010); /* rq bah */
1922 ZERO(0x014); /* rq inp */
1923 ZERO(0x018); /* rq outp */
1924 ZERO(0x01c); /* respq bah */
1925 ZERO(0x024); /* respq outp */
1926 ZERO(0x020); /* respq inp */
1927 ZERO(0x02c); /* test control */
1928 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1930 #undef ZERO
1932 #define ZERO(reg) writel(0, hc_mmio + (reg))
1933 static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1934 unsigned int hc)
1936 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1937 u32 tmp;
1939 ZERO(0x00c);
1940 ZERO(0x010);
1941 ZERO(0x014);
1942 ZERO(0x018);
1944 tmp = readl(hc_mmio + 0x20);
1945 tmp &= 0x1c1c1c1c;
1946 tmp |= 0x03030303;
1947 writel(tmp, hc_mmio + 0x20);
1949 #undef ZERO
1951 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1952 unsigned int n_hc)
1954 unsigned int hc, port;
1956 for (hc = 0; hc < n_hc; hc++) {
1957 for (port = 0; port < MV_PORTS_PER_HC; port++)
1958 mv5_reset_hc_port(hpriv, mmio,
1959 (hc * MV_PORTS_PER_HC) + port);
1961 mv5_reset_one_hc(hpriv, mmio, hc);
1964 return 0;
1967 #undef ZERO
1968 #define ZERO(reg) writel(0, mmio + (reg))
1969 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
1971 struct mv_host_priv *hpriv = host->private_data;
1972 u32 tmp;
1974 tmp = readl(mmio + MV_PCI_MODE);
1975 tmp &= 0xff00ffff;
1976 writel(tmp, mmio + MV_PCI_MODE);
1978 ZERO(MV_PCI_DISC_TIMER);
1979 ZERO(MV_PCI_MSI_TRIGGER);
1980 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
1981 ZERO(HC_MAIN_IRQ_MASK_OFS);
1982 ZERO(MV_PCI_SERR_MASK);
1983 ZERO(hpriv->irq_cause_ofs);
1984 ZERO(hpriv->irq_mask_ofs);
1985 ZERO(MV_PCI_ERR_LOW_ADDRESS);
1986 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
1987 ZERO(MV_PCI_ERR_ATTRIBUTE);
1988 ZERO(MV_PCI_ERR_COMMAND);
1990 #undef ZERO
1992 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1994 u32 tmp;
1996 mv5_reset_flash(hpriv, mmio);
1998 tmp = readl(mmio + MV_GPIO_PORT_CTL);
1999 tmp &= 0x3;
2000 tmp |= (1 << 5) | (1 << 6);
2001 writel(tmp, mmio + MV_GPIO_PORT_CTL);
2005 * mv6_reset_hc - Perform the 6xxx global soft reset
2006 * @mmio: base address of the HBA
2008 * This routine only applies to 6xxx parts.
2010 * LOCKING:
2011 * Inherited from caller.
2013 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2014 unsigned int n_hc)
2016 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2017 int i, rc = 0;
2018 u32 t;
2020 /* Following procedure defined in PCI "main command and status
2021 * register" table.
2023 t = readl(reg);
2024 writel(t | STOP_PCI_MASTER, reg);
2026 for (i = 0; i < 1000; i++) {
2027 udelay(1);
2028 t = readl(reg);
2029 if (PCI_MASTER_EMPTY & t)
2030 break;
2032 if (!(PCI_MASTER_EMPTY & t)) {
2033 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2034 rc = 1;
2035 goto done;
2038 /* set reset */
2039 i = 5;
2040 do {
2041 writel(t | GLOB_SFT_RST, reg);
2042 t = readl(reg);
2043 udelay(1);
2044 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2046 if (!(GLOB_SFT_RST & t)) {
2047 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2048 rc = 1;
2049 goto done;
2052 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2053 i = 5;
2054 do {
2055 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2056 t = readl(reg);
2057 udelay(1);
2058 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2060 if (GLOB_SFT_RST & t) {
2061 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2062 rc = 1;
2064 done:
2065 return rc;
2068 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
2069 void __iomem *mmio)
2071 void __iomem *port_mmio;
2072 u32 tmp;
2074 tmp = readl(mmio + MV_RESET_CFG);
2075 if ((tmp & (1 << 0)) == 0) {
2076 hpriv->signal[idx].amps = 0x7 << 8;
2077 hpriv->signal[idx].pre = 0x1 << 5;
2078 return;
2081 port_mmio = mv_port_base(mmio, idx);
2082 tmp = readl(port_mmio + PHY_MODE2);
2084 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2085 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2088 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
2090 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
2093 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2094 unsigned int port)
2096 void __iomem *port_mmio = mv_port_base(mmio, port);
2098 u32 hp_flags = hpriv->hp_flags;
2099 int fix_phy_mode2 =
2100 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2101 int fix_phy_mode4 =
2102 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2103 u32 m2, tmp;
2105 if (fix_phy_mode2) {
2106 m2 = readl(port_mmio + PHY_MODE2);
2107 m2 &= ~(1 << 16);
2108 m2 |= (1 << 31);
2109 writel(m2, port_mmio + PHY_MODE2);
2111 udelay(200);
2113 m2 = readl(port_mmio + PHY_MODE2);
2114 m2 &= ~((1 << 16) | (1 << 31));
2115 writel(m2, port_mmio + PHY_MODE2);
2117 udelay(200);
2120 /* who knows what this magic does */
2121 tmp = readl(port_mmio + PHY_MODE3);
2122 tmp &= ~0x7F800000;
2123 tmp |= 0x2A800000;
2124 writel(tmp, port_mmio + PHY_MODE3);
2126 if (fix_phy_mode4) {
2127 u32 m4;
2129 m4 = readl(port_mmio + PHY_MODE4);
2131 if (hp_flags & MV_HP_ERRATA_60X1B2)
2132 tmp = readl(port_mmio + PHY_MODE3);
2134 /* workaround for errata FEr SATA#10 (part 1) */
2135 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2137 writel(m4, port_mmio + PHY_MODE4);
2139 if (hp_flags & MV_HP_ERRATA_60X1B2)
2140 writel(tmp, port_mmio + PHY_MODE3);
2143 /* Revert values of pre-emphasis and signal amps to the saved ones */
2144 m2 = readl(port_mmio + PHY_MODE2);
2146 m2 &= ~MV_M2_PREAMP_MASK;
2147 m2 |= hpriv->signal[port].amps;
2148 m2 |= hpriv->signal[port].pre;
2149 m2 &= ~(1 << 16);
2151 /* according to mvSata 3.6.1, some IIE values are fixed */
2152 if (IS_GEN_IIE(hpriv)) {
2153 m2 &= ~0xC30FF01F;
2154 m2 |= 0x0000900F;
2157 writel(m2, port_mmio + PHY_MODE2);
2160 /* TODO: use the generic LED interface to configure the SATA Presence */
2161 /* & Acitivy LEDs on the board */
2162 static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
2163 void __iomem *mmio)
2165 return;
2168 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
2169 void __iomem *mmio)
2171 void __iomem *port_mmio;
2172 u32 tmp;
2174 port_mmio = mv_port_base(mmio, idx);
2175 tmp = readl(port_mmio + PHY_MODE2);
2177 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2178 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2181 #undef ZERO
2182 #define ZERO(reg) writel(0, port_mmio + (reg))
2183 static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
2184 void __iomem *mmio, unsigned int port)
2186 void __iomem *port_mmio = mv_port_base(mmio, port);
2189 * The datasheet warns against setting ATA_RST when EDMA is active
2190 * (but doesn't say what the problem might be). So we first try
2191 * to disable the EDMA engine before doing the ATA_RST operation.
2193 mv_reset_channel(hpriv, mmio, port);
2195 ZERO(0x028); /* command */
2196 writel(0x101f, port_mmio + EDMA_CFG_OFS);
2197 ZERO(0x004); /* timer */
2198 ZERO(0x008); /* irq err cause */
2199 ZERO(0x00c); /* irq err mask */
2200 ZERO(0x010); /* rq bah */
2201 ZERO(0x014); /* rq inp */
2202 ZERO(0x018); /* rq outp */
2203 ZERO(0x01c); /* respq bah */
2204 ZERO(0x024); /* respq outp */
2205 ZERO(0x020); /* respq inp */
2206 ZERO(0x02c); /* test control */
2207 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
2210 #undef ZERO
2212 #define ZERO(reg) writel(0, hc_mmio + (reg))
2213 static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
2214 void __iomem *mmio)
2216 void __iomem *hc_mmio = mv_hc_base(mmio, 0);
2218 ZERO(0x00c);
2219 ZERO(0x010);
2220 ZERO(0x014);
2224 #undef ZERO
2226 static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
2227 void __iomem *mmio, unsigned int n_hc)
2229 unsigned int port;
2231 for (port = 0; port < hpriv->n_ports; port++)
2232 mv_soc_reset_hc_port(hpriv, mmio, port);
2234 mv_soc_reset_one_hc(hpriv, mmio);
2236 return 0;
2239 static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
2240 void __iomem *mmio)
2242 return;
2245 static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
2247 return;
2250 static void mv_setup_ifctl(void __iomem *port_mmio, int want_gen2i)
2252 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CFG);
2254 ifctl = (ifctl & 0xf7f) | 0x9b1000; /* from chip spec */
2255 if (want_gen2i)
2256 ifctl |= (1 << 7); /* enable gen2i speed */
2257 writelfl(ifctl, port_mmio + SATA_INTERFACE_CFG);
2261 * Caller must ensure that EDMA is not active,
2262 * by first doing mv_stop_edma() where needed.
2264 static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
2265 unsigned int port_no)
2267 void __iomem *port_mmio = mv_port_base(mmio, port_no);
2269 mv_stop_edma_engine(port_mmio);
2270 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2272 if (!IS_GEN_I(hpriv)) {
2273 /* Enable 3.0gb/s link speed */
2274 mv_setup_ifctl(port_mmio, 1);
2277 * Strobing ATA_RST here causes a hard reset of the SATA transport,
2278 * link, and physical layers. It resets all SATA interface registers
2279 * (except for SATA_INTERFACE_CFG), and issues a COMRESET to the dev.
2281 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2282 udelay(25); /* allow reset propagation */
2283 writelfl(0, port_mmio + EDMA_CMD_OFS);
2285 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2287 if (IS_GEN_I(hpriv))
2288 mdelay(1);
2291 static int mv_hardreset(struct ata_link *link, unsigned int *class,
2292 unsigned long deadline)
2294 struct ata_port *ap = link->ap;
2295 struct mv_host_priv *hpriv = ap->host->private_data;
2296 struct mv_port_priv *pp = ap->private_data;
2297 void __iomem *mmio = hpriv->base;
2298 int rc, attempts = 0, extra = 0;
2299 u32 sstatus;
2300 bool online;
2302 mv_reset_channel(hpriv, mmio, ap->port_no);
2303 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
2305 /* Workaround for errata FEr SATA#10 (part 2) */
2306 do {
2307 const unsigned long *timing =
2308 sata_ehc_deb_timing(&link->eh_context);
2310 rc = sata_link_hardreset(link, timing, deadline + extra,
2311 &online, NULL);
2312 if (rc)
2313 return rc;
2314 sata_scr_read(link, SCR_STATUS, &sstatus);
2315 if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) {
2316 /* Force 1.5gb/s link speed and try again */
2317 mv_setup_ifctl(mv_ap_base(ap), 0);
2318 if (time_after(jiffies + HZ, deadline))
2319 extra = HZ; /* only extend it once, max */
2321 } while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123);
2323 return rc;
2326 static void mv_eh_freeze(struct ata_port *ap)
2328 struct mv_host_priv *hpriv = ap->host->private_data;
2329 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2330 u32 tmp, mask;
2331 unsigned int shift;
2333 /* FIXME: handle coalescing completion events properly */
2335 shift = ap->port_no * 2;
2336 if (hc > 0)
2337 shift++;
2339 mask = 0x3 << shift;
2341 /* disable assertion of portN err, done events */
2342 tmp = readl(hpriv->main_mask_reg_addr);
2343 writelfl(tmp & ~mask, hpriv->main_mask_reg_addr);
2346 static void mv_eh_thaw(struct ata_port *ap)
2348 struct mv_host_priv *hpriv = ap->host->private_data;
2349 void __iomem *mmio = hpriv->base;
2350 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2351 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2352 void __iomem *port_mmio = mv_ap_base(ap);
2353 u32 tmp, mask, hc_irq_cause;
2354 unsigned int shift, hc_port_no = ap->port_no;
2356 /* FIXME: handle coalescing completion events properly */
2358 shift = ap->port_no * 2;
2359 if (hc > 0) {
2360 shift++;
2361 hc_port_no -= 4;
2364 mask = 0x3 << shift;
2366 /* clear EDMA errors on this port */
2367 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2369 /* clear pending irq events */
2370 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2371 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2372 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2373 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2375 /* enable assertion of portN err, done events */
2376 tmp = readl(hpriv->main_mask_reg_addr);
2377 writelfl(tmp | mask, hpriv->main_mask_reg_addr);
2381 * mv_port_init - Perform some early initialization on a single port.
2382 * @port: libata data structure storing shadow register addresses
2383 * @port_mmio: base address of the port
2385 * Initialize shadow register mmio addresses, clear outstanding
2386 * interrupts on the port, and unmask interrupts for the future
2387 * start of the port.
2389 * LOCKING:
2390 * Inherited from caller.
2392 static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2394 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
2395 unsigned serr_ofs;
2397 /* PIO related setup
2399 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
2400 port->error_addr =
2401 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2402 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2403 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2404 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2405 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2406 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
2407 port->status_addr =
2408 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2409 /* special case: control/altstatus doesn't have ATA_REG_ address */
2410 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2412 /* unused: */
2413 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
2415 /* Clear any currently outstanding port interrupt conditions */
2416 serr_ofs = mv_scr_offset(SCR_ERROR);
2417 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2418 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2420 /* unmask all non-transient EDMA error interrupts */
2421 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
2423 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2424 readl(port_mmio + EDMA_CFG_OFS),
2425 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2426 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
2429 static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
2431 struct pci_dev *pdev = to_pci_dev(host->dev);
2432 struct mv_host_priv *hpriv = host->private_data;
2433 u32 hp_flags = hpriv->hp_flags;
2435 switch (board_idx) {
2436 case chip_5080:
2437 hpriv->ops = &mv5xxx_ops;
2438 hp_flags |= MV_HP_GEN_I;
2440 switch (pdev->revision) {
2441 case 0x1:
2442 hp_flags |= MV_HP_ERRATA_50XXB0;
2443 break;
2444 case 0x3:
2445 hp_flags |= MV_HP_ERRATA_50XXB2;
2446 break;
2447 default:
2448 dev_printk(KERN_WARNING, &pdev->dev,
2449 "Applying 50XXB2 workarounds to unknown rev\n");
2450 hp_flags |= MV_HP_ERRATA_50XXB2;
2451 break;
2453 break;
2455 case chip_504x:
2456 case chip_508x:
2457 hpriv->ops = &mv5xxx_ops;
2458 hp_flags |= MV_HP_GEN_I;
2460 switch (pdev->revision) {
2461 case 0x0:
2462 hp_flags |= MV_HP_ERRATA_50XXB0;
2463 break;
2464 case 0x3:
2465 hp_flags |= MV_HP_ERRATA_50XXB2;
2466 break;
2467 default:
2468 dev_printk(KERN_WARNING, &pdev->dev,
2469 "Applying B2 workarounds to unknown rev\n");
2470 hp_flags |= MV_HP_ERRATA_50XXB2;
2471 break;
2473 break;
2475 case chip_604x:
2476 case chip_608x:
2477 hpriv->ops = &mv6xxx_ops;
2478 hp_flags |= MV_HP_GEN_II;
2480 switch (pdev->revision) {
2481 case 0x7:
2482 hp_flags |= MV_HP_ERRATA_60X1B2;
2483 break;
2484 case 0x9:
2485 hp_flags |= MV_HP_ERRATA_60X1C0;
2486 break;
2487 default:
2488 dev_printk(KERN_WARNING, &pdev->dev,
2489 "Applying B2 workarounds to unknown rev\n");
2490 hp_flags |= MV_HP_ERRATA_60X1B2;
2491 break;
2493 break;
2495 case chip_7042:
2496 hp_flags |= MV_HP_PCIE;
2497 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2498 (pdev->device == 0x2300 || pdev->device == 0x2310))
2501 * Highpoint RocketRAID PCIe 23xx series cards:
2503 * Unconfigured drives are treated as "Legacy"
2504 * by the BIOS, and it overwrites sector 8 with
2505 * a "Lgcy" metadata block prior to Linux boot.
2507 * Configured drives (RAID or JBOD) leave sector 8
2508 * alone, but instead overwrite a high numbered
2509 * sector for the RAID metadata. This sector can
2510 * be determined exactly, by truncating the physical
2511 * drive capacity to a nice even GB value.
2513 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2515 * Warn the user, lest they think we're just buggy.
2517 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2518 " BIOS CORRUPTS DATA on all attached drives,"
2519 " regardless of if/how they are configured."
2520 " BEWARE!\n");
2521 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2522 " use sectors 8-9 on \"Legacy\" drives,"
2523 " and avoid the final two gigabytes on"
2524 " all RocketRAID BIOS initialized drives.\n");
2526 case chip_6042:
2527 hpriv->ops = &mv6xxx_ops;
2528 hp_flags |= MV_HP_GEN_IIE;
2530 switch (pdev->revision) {
2531 case 0x0:
2532 hp_flags |= MV_HP_ERRATA_XX42A0;
2533 break;
2534 case 0x1:
2535 hp_flags |= MV_HP_ERRATA_60X1C0;
2536 break;
2537 default:
2538 dev_printk(KERN_WARNING, &pdev->dev,
2539 "Applying 60X1C0 workarounds to unknown rev\n");
2540 hp_flags |= MV_HP_ERRATA_60X1C0;
2541 break;
2543 break;
2544 case chip_soc:
2545 hpriv->ops = &mv_soc_ops;
2546 hp_flags |= MV_HP_ERRATA_60X1C0;
2547 break;
2549 default:
2550 dev_printk(KERN_ERR, host->dev,
2551 "BUG: invalid board index %u\n", board_idx);
2552 return 1;
2555 hpriv->hp_flags = hp_flags;
2556 if (hp_flags & MV_HP_PCIE) {
2557 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
2558 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
2559 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
2560 } else {
2561 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
2562 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
2563 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
2566 return 0;
2570 * mv_init_host - Perform some early initialization of the host.
2571 * @host: ATA host to initialize
2572 * @board_idx: controller index
2574 * If possible, do an early global reset of the host. Then do
2575 * our port init and clear/unmask all/relevant host interrupts.
2577 * LOCKING:
2578 * Inherited from caller.
2580 static int mv_init_host(struct ata_host *host, unsigned int board_idx)
2582 int rc = 0, n_hc, port, hc;
2583 struct mv_host_priv *hpriv = host->private_data;
2584 void __iomem *mmio = hpriv->base;
2586 rc = mv_chip_id(host, board_idx);
2587 if (rc)
2588 goto done;
2590 if (HAS_PCI(host)) {
2591 hpriv->main_cause_reg_addr = hpriv->base +
2592 HC_MAIN_IRQ_CAUSE_OFS;
2593 hpriv->main_mask_reg_addr = hpriv->base + HC_MAIN_IRQ_MASK_OFS;
2594 } else {
2595 hpriv->main_cause_reg_addr = hpriv->base +
2596 HC_SOC_MAIN_IRQ_CAUSE_OFS;
2597 hpriv->main_mask_reg_addr = hpriv->base +
2598 HC_SOC_MAIN_IRQ_MASK_OFS;
2600 /* global interrupt mask */
2601 writel(0, hpriv->main_mask_reg_addr);
2603 n_hc = mv_get_hc_count(host->ports[0]->flags);
2605 for (port = 0; port < host->n_ports; port++)
2606 hpriv->ops->read_preamp(hpriv, port, mmio);
2608 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
2609 if (rc)
2610 goto done;
2612 hpriv->ops->reset_flash(hpriv, mmio);
2613 hpriv->ops->reset_bus(host, mmio);
2614 hpriv->ops->enable_leds(hpriv, mmio);
2616 for (port = 0; port < host->n_ports; port++) {
2617 struct ata_port *ap = host->ports[port];
2618 void __iomem *port_mmio = mv_port_base(mmio, port);
2620 mv_port_init(&ap->ioaddr, port_mmio);
2622 #ifdef CONFIG_PCI
2623 if (HAS_PCI(host)) {
2624 unsigned int offset = port_mmio - mmio;
2625 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2626 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
2628 #endif
2631 for (hc = 0; hc < n_hc; hc++) {
2632 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2634 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2635 "(before clear)=0x%08x\n", hc,
2636 readl(hc_mmio + HC_CFG_OFS),
2637 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2639 /* Clear any currently outstanding hc interrupt conditions */
2640 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
2643 if (HAS_PCI(host)) {
2644 /* Clear any currently outstanding host interrupt conditions */
2645 writelfl(0, mmio + hpriv->irq_cause_ofs);
2647 /* and unmask interrupt generation for host regs */
2648 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
2649 if (IS_GEN_I(hpriv))
2650 writelfl(~HC_MAIN_MASKED_IRQS_5,
2651 hpriv->main_mask_reg_addr);
2652 else
2653 writelfl(~HC_MAIN_MASKED_IRQS,
2654 hpriv->main_mask_reg_addr);
2656 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2657 "PCI int cause/mask=0x%08x/0x%08x\n",
2658 readl(hpriv->main_cause_reg_addr),
2659 readl(hpriv->main_mask_reg_addr),
2660 readl(mmio + hpriv->irq_cause_ofs),
2661 readl(mmio + hpriv->irq_mask_ofs));
2662 } else {
2663 writelfl(~HC_MAIN_MASKED_IRQS_SOC,
2664 hpriv->main_mask_reg_addr);
2665 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x\n",
2666 readl(hpriv->main_cause_reg_addr),
2667 readl(hpriv->main_mask_reg_addr));
2669 done:
2670 return rc;
2673 static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
2675 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
2676 MV_CRQB_Q_SZ, 0);
2677 if (!hpriv->crqb_pool)
2678 return -ENOMEM;
2680 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
2681 MV_CRPB_Q_SZ, 0);
2682 if (!hpriv->crpb_pool)
2683 return -ENOMEM;
2685 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
2686 MV_SG_TBL_SZ, 0);
2687 if (!hpriv->sg_tbl_pool)
2688 return -ENOMEM;
2690 return 0;
2694 * mv_platform_probe - handle a positive probe of an soc Marvell
2695 * host
2696 * @pdev: platform device found
2698 * LOCKING:
2699 * Inherited from caller.
2701 static int mv_platform_probe(struct platform_device *pdev)
2703 static int printed_version;
2704 const struct mv_sata_platform_data *mv_platform_data;
2705 const struct ata_port_info *ppi[] =
2706 { &mv_port_info[chip_soc], NULL };
2707 struct ata_host *host;
2708 struct mv_host_priv *hpriv;
2709 struct resource *res;
2710 int n_ports, rc;
2712 if (!printed_version++)
2713 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2716 * Simple resource validation ..
2718 if (unlikely(pdev->num_resources != 2)) {
2719 dev_err(&pdev->dev, "invalid number of resources\n");
2720 return -EINVAL;
2724 * Get the register base first
2726 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2727 if (res == NULL)
2728 return -EINVAL;
2730 /* allocate host */
2731 mv_platform_data = pdev->dev.platform_data;
2732 n_ports = mv_platform_data->n_ports;
2734 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2735 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2737 if (!host || !hpriv)
2738 return -ENOMEM;
2739 host->private_data = hpriv;
2740 hpriv->n_ports = n_ports;
2742 host->iomap = NULL;
2743 hpriv->base = devm_ioremap(&pdev->dev, res->start,
2744 res->end - res->start + 1);
2745 hpriv->base -= MV_SATAHC0_REG_BASE;
2747 rc = mv_create_dma_pools(hpriv, &pdev->dev);
2748 if (rc)
2749 return rc;
2751 /* initialize adapter */
2752 rc = mv_init_host(host, chip_soc);
2753 if (rc)
2754 return rc;
2756 dev_printk(KERN_INFO, &pdev->dev,
2757 "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH,
2758 host->n_ports);
2760 return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
2761 IRQF_SHARED, &mv6_sht);
2766 * mv_platform_remove - unplug a platform interface
2767 * @pdev: platform device
2769 * A platform bus SATA device has been unplugged. Perform the needed
2770 * cleanup. Also called on module unload for any active devices.
2772 static int __devexit mv_platform_remove(struct platform_device *pdev)
2774 struct device *dev = &pdev->dev;
2775 struct ata_host *host = dev_get_drvdata(dev);
2777 ata_host_detach(host);
2778 return 0;
2781 static struct platform_driver mv_platform_driver = {
2782 .probe = mv_platform_probe,
2783 .remove = __devexit_p(mv_platform_remove),
2784 .driver = {
2785 .name = DRV_NAME,
2786 .owner = THIS_MODULE,
2791 #ifdef CONFIG_PCI
2792 static int mv_pci_init_one(struct pci_dev *pdev,
2793 const struct pci_device_id *ent);
2796 static struct pci_driver mv_pci_driver = {
2797 .name = DRV_NAME,
2798 .id_table = mv_pci_tbl,
2799 .probe = mv_pci_init_one,
2800 .remove = ata_pci_remove_one,
2804 * module options
2806 static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
2809 /* move to PCI layer or libata core? */
2810 static int pci_go_64(struct pci_dev *pdev)
2812 int rc;
2814 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2815 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2816 if (rc) {
2817 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2818 if (rc) {
2819 dev_printk(KERN_ERR, &pdev->dev,
2820 "64-bit DMA enable failed\n");
2821 return rc;
2824 } else {
2825 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
2826 if (rc) {
2827 dev_printk(KERN_ERR, &pdev->dev,
2828 "32-bit DMA enable failed\n");
2829 return rc;
2831 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2832 if (rc) {
2833 dev_printk(KERN_ERR, &pdev->dev,
2834 "32-bit consistent DMA enable failed\n");
2835 return rc;
2839 return rc;
2843 * mv_print_info - Dump key info to kernel log for perusal.
2844 * @host: ATA host to print info about
2846 * FIXME: complete this.
2848 * LOCKING:
2849 * Inherited from caller.
2851 static void mv_print_info(struct ata_host *host)
2853 struct pci_dev *pdev = to_pci_dev(host->dev);
2854 struct mv_host_priv *hpriv = host->private_data;
2855 u8 scc;
2856 const char *scc_s, *gen;
2858 /* Use this to determine the HW stepping of the chip so we know
2859 * what errata to workaround
2861 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2862 if (scc == 0)
2863 scc_s = "SCSI";
2864 else if (scc == 0x01)
2865 scc_s = "RAID";
2866 else
2867 scc_s = "?";
2869 if (IS_GEN_I(hpriv))
2870 gen = "I";
2871 else if (IS_GEN_II(hpriv))
2872 gen = "II";
2873 else if (IS_GEN_IIE(hpriv))
2874 gen = "IIE";
2875 else
2876 gen = "?";
2878 dev_printk(KERN_INFO, &pdev->dev,
2879 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2880 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
2881 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2885 * mv_pci_init_one - handle a positive probe of a PCI Marvell host
2886 * @pdev: PCI device found
2887 * @ent: PCI device ID entry for the matched host
2889 * LOCKING:
2890 * Inherited from caller.
2892 static int mv_pci_init_one(struct pci_dev *pdev,
2893 const struct pci_device_id *ent)
2895 static int printed_version;
2896 unsigned int board_idx = (unsigned int)ent->driver_data;
2897 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
2898 struct ata_host *host;
2899 struct mv_host_priv *hpriv;
2900 int n_ports, rc;
2902 if (!printed_version++)
2903 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2905 /* allocate host */
2906 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
2908 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2909 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2910 if (!host || !hpriv)
2911 return -ENOMEM;
2912 host->private_data = hpriv;
2913 hpriv->n_ports = n_ports;
2915 /* acquire resources */
2916 rc = pcim_enable_device(pdev);
2917 if (rc)
2918 return rc;
2920 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
2921 if (rc == -EBUSY)
2922 pcim_pin_device(pdev);
2923 if (rc)
2924 return rc;
2925 host->iomap = pcim_iomap_table(pdev);
2926 hpriv->base = host->iomap[MV_PRIMARY_BAR];
2928 rc = pci_go_64(pdev);
2929 if (rc)
2930 return rc;
2932 rc = mv_create_dma_pools(hpriv, &pdev->dev);
2933 if (rc)
2934 return rc;
2936 /* initialize adapter */
2937 rc = mv_init_host(host, board_idx);
2938 if (rc)
2939 return rc;
2941 /* Enable interrupts */
2942 if (msi && pci_enable_msi(pdev))
2943 pci_intx(pdev, 1);
2945 mv_dump_pci_cfg(pdev, 0x68);
2946 mv_print_info(host);
2948 pci_set_master(pdev);
2949 pci_try_set_mwi(pdev);
2950 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
2951 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
2953 #endif
2955 static int mv_platform_probe(struct platform_device *pdev);
2956 static int __devexit mv_platform_remove(struct platform_device *pdev);
2958 static int __init mv_init(void)
2960 int rc = -ENODEV;
2961 #ifdef CONFIG_PCI
2962 rc = pci_register_driver(&mv_pci_driver);
2963 if (rc < 0)
2964 return rc;
2965 #endif
2966 rc = platform_driver_register(&mv_platform_driver);
2968 #ifdef CONFIG_PCI
2969 if (rc < 0)
2970 pci_unregister_driver(&mv_pci_driver);
2971 #endif
2972 return rc;
2975 static void __exit mv_exit(void)
2977 #ifdef CONFIG_PCI
2978 pci_unregister_driver(&mv_pci_driver);
2979 #endif
2980 platform_driver_unregister(&mv_platform_driver);
2983 MODULE_AUTHOR("Brett Russ");
2984 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2985 MODULE_LICENSE("GPL");
2986 MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2987 MODULE_VERSION(DRV_VERSION);
2988 MODULE_ALIAS("platform:" DRV_NAME);
2990 #ifdef CONFIG_PCI
2991 module_param(msi, int, 0444);
2992 MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
2993 #endif
2995 module_init(mv_init);
2996 module_exit(mv_exit);