sata_mv fix ifctl handling
[linux-2.6/mini2440.git] / drivers / ata / sata_mv.c
blob16c15ed3536ea032cddaaf6ae0d12ccab4223a15
1 /*
2 * sata_mv.c - Marvell SATA support
4 * Copyright 2008: Marvell Corporation, all rights reserved.
5 * Copyright 2005: EMC Corporation, all rights reserved.
6 * Copyright 2005 Red Hat, Inc. All rights reserved.
8 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; version 2 of the License.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 sata_mv TODO list:
28 1) Needs a full errata audit for all chipsets. I implemented most
29 of the errata workarounds found in the Marvell vendor driver, but
30 I distinctly remember a couple workarounds (one related to PCI-X)
31 are still needed.
33 2) Improve/fix IRQ and error handling sequences.
35 3) ATAPI support (Marvell claims the 60xx/70xx chips can do it).
37 4) Think about TCQ support here, and for libata in general
38 with controllers that suppport it via host-queuing hardware
39 (a software-only implementation could be a nightmare).
41 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
43 6) Add port multiplier support (intermediate)
45 8) Develop a low-power-consumption strategy, and implement it.
47 9) [Experiment, low priority] See if ATAPI can be supported using
48 "unknown FIS" or "vendor-specific FIS" support, or something creative
49 like that.
51 10) [Experiment, low priority] Investigate interrupt coalescing.
52 Quite often, especially with PCI Message Signalled Interrupts (MSI),
53 the overhead reduced by interrupt mitigation is quite often not
54 worth the latency cost.
56 11) [Experiment, Marvell value added] Is it possible to use target
57 mode to cross-connect two Linux boxes with Marvell cards? If so,
58 creating LibATA target mode support would be very interesting.
60 Target mode, for those without docs, is the ability to directly
61 connect two SATA controllers.
65 #include <linux/kernel.h>
66 #include <linux/module.h>
67 #include <linux/pci.h>
68 #include <linux/init.h>
69 #include <linux/blkdev.h>
70 #include <linux/delay.h>
71 #include <linux/interrupt.h>
72 #include <linux/dmapool.h>
73 #include <linux/dma-mapping.h>
74 #include <linux/device.h>
75 #include <linux/platform_device.h>
76 #include <linux/ata_platform.h>
77 #include <scsi/scsi_host.h>
78 #include <scsi/scsi_cmnd.h>
79 #include <scsi/scsi_device.h>
80 #include <linux/libata.h>
82 #define DRV_NAME "sata_mv"
83 #define DRV_VERSION "1.20"
85 enum {
86 /* BAR's are enumerated in terms of pci_resource_start() terms */
87 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
88 MV_IO_BAR = 2, /* offset 0x18: IO space */
89 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
91 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
92 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
94 MV_PCI_REG_BASE = 0,
95 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
96 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
97 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
98 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
99 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
100 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
102 MV_SATAHC0_REG_BASE = 0x20000,
103 MV_FLASH_CTL = 0x1046c,
104 MV_GPIO_PORT_CTL = 0x104f0,
105 MV_RESET_CFG = 0x180d8,
107 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
108 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
109 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
110 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
112 MV_MAX_Q_DEPTH = 32,
113 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
115 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
116 * CRPB needs alignment on a 256B boundary. Size == 256B
117 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
119 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
120 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
121 MV_MAX_SG_CT = 256,
122 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
124 MV_PORTS_PER_HC = 4,
125 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
126 MV_PORT_HC_SHIFT = 2,
127 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
128 MV_PORT_MASK = 3,
130 /* Host Flags */
131 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
132 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
133 /* SoC integrated controllers, no PCI interface */
134 MV_FLAG_SOC = (1 << 28),
136 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
137 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
138 ATA_FLAG_PIO_POLLING,
139 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
141 CRQB_FLAG_READ = (1 << 0),
142 CRQB_TAG_SHIFT = 1,
143 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
144 CRQB_PMP_SHIFT = 12, /* CRQB Gen-II/IIE PMP shift */
145 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
146 CRQB_CMD_ADDR_SHIFT = 8,
147 CRQB_CMD_CS = (0x2 << 11),
148 CRQB_CMD_LAST = (1 << 15),
150 CRPB_FLAG_STATUS_SHIFT = 8,
151 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
152 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
154 EPRD_FLAG_END_OF_TBL = (1 << 31),
156 /* PCI interface registers */
158 PCI_COMMAND_OFS = 0xc00,
160 PCI_MAIN_CMD_STS_OFS = 0xd30,
161 STOP_PCI_MASTER = (1 << 2),
162 PCI_MASTER_EMPTY = (1 << 3),
163 GLOB_SFT_RST = (1 << 4),
165 MV_PCI_MODE = 0xd00,
166 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
167 MV_PCI_DISC_TIMER = 0xd04,
168 MV_PCI_MSI_TRIGGER = 0xc38,
169 MV_PCI_SERR_MASK = 0xc28,
170 MV_PCI_XBAR_TMOUT = 0x1d04,
171 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
172 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
173 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
174 MV_PCI_ERR_COMMAND = 0x1d50,
176 PCI_IRQ_CAUSE_OFS = 0x1d58,
177 PCI_IRQ_MASK_OFS = 0x1d5c,
178 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
180 PCIE_IRQ_CAUSE_OFS = 0x1900,
181 PCIE_IRQ_MASK_OFS = 0x1910,
182 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
184 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
185 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
186 HC_SOC_MAIN_IRQ_CAUSE_OFS = 0x20020,
187 HC_SOC_MAIN_IRQ_MASK_OFS = 0x20024,
188 PORT0_ERR = (1 << 0), /* shift by port # */
189 PORT0_DONE = (1 << 1), /* shift by port # */
190 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
191 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
192 PCI_ERR = (1 << 18),
193 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
194 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
195 PORTS_0_3_COAL_DONE = (1 << 8),
196 PORTS_4_7_COAL_DONE = (1 << 17),
197 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
198 GPIO_INT = (1 << 22),
199 SELF_INT = (1 << 23),
200 TWSI_INT = (1 << 24),
201 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
202 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
203 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
204 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
205 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
206 HC_MAIN_RSVD),
207 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
208 HC_MAIN_RSVD_5),
209 HC_MAIN_MASKED_IRQS_SOC = (PORTS_0_3_COAL_DONE | HC_MAIN_RSVD_SOC),
211 /* SATAHC registers */
212 HC_CFG_OFS = 0,
214 HC_IRQ_CAUSE_OFS = 0x14,
215 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
216 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
217 DEV_IRQ = (1 << 8), /* shift by port # */
219 /* Shadow block registers */
220 SHD_BLK_OFS = 0x100,
221 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
223 /* SATA registers */
224 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
225 SATA_ACTIVE_OFS = 0x350,
226 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
227 LTMODE_OFS = 0x30c,
228 PHY_MODE3 = 0x310,
229 PHY_MODE4 = 0x314,
230 PHY_MODE2 = 0x330,
231 SATA_IFCTL_OFS = 0x344,
232 SATA_IFSTAT_OFS = 0x34c,
233 VENDOR_UNIQUE_FIS_OFS = 0x35c,
234 FIS_CFG_OFS = 0x360,
235 MV5_PHY_MODE = 0x74,
236 MV5_LT_MODE = 0x30,
237 MV5_PHY_CTL = 0x0C,
238 SATA_INTERFACE_CFG = 0x050,
240 MV_M2_PREAMP_MASK = 0x7e0,
242 /* Port registers */
243 EDMA_CFG_OFS = 0,
244 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
245 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
246 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
247 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
248 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
249 EDMA_CFG_EDMA_FBS = (1 << 16), /* EDMA FIS-Based Switching */
250 EDMA_CFG_FBS = (1 << 26), /* FIS-Based Switching */
252 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
253 EDMA_ERR_IRQ_MASK_OFS = 0xc,
254 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
255 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
256 EDMA_ERR_DEV = (1 << 2), /* device error */
257 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
258 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
259 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
260 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
261 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
262 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
263 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
264 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
265 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
266 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
267 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
269 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
270 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
271 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
272 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
273 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
275 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
277 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
278 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
279 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
280 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
281 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
282 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
284 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
286 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
287 EDMA_ERR_OVERRUN_5 = (1 << 5),
288 EDMA_ERR_UNDERRUN_5 = (1 << 6),
290 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
291 EDMA_ERR_LNK_CTRL_RX_1 |
292 EDMA_ERR_LNK_CTRL_RX_3 |
293 EDMA_ERR_LNK_CTRL_TX,
295 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
296 EDMA_ERR_PRD_PAR |
297 EDMA_ERR_DEV_DCON |
298 EDMA_ERR_DEV_CON |
299 EDMA_ERR_SERR |
300 EDMA_ERR_SELF_DIS |
301 EDMA_ERR_CRQB_PAR |
302 EDMA_ERR_CRPB_PAR |
303 EDMA_ERR_INTRL_PAR |
304 EDMA_ERR_IORDY |
305 EDMA_ERR_LNK_CTRL_RX_2 |
306 EDMA_ERR_LNK_DATA_RX |
307 EDMA_ERR_LNK_DATA_TX |
308 EDMA_ERR_TRANS_PROTO,
310 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
311 EDMA_ERR_PRD_PAR |
312 EDMA_ERR_DEV_DCON |
313 EDMA_ERR_DEV_CON |
314 EDMA_ERR_OVERRUN_5 |
315 EDMA_ERR_UNDERRUN_5 |
316 EDMA_ERR_SELF_DIS_5 |
317 EDMA_ERR_CRQB_PAR |
318 EDMA_ERR_CRPB_PAR |
319 EDMA_ERR_INTRL_PAR |
320 EDMA_ERR_IORDY,
322 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
323 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
325 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
326 EDMA_REQ_Q_PTR_SHIFT = 5,
328 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
329 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
330 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
331 EDMA_RSP_Q_PTR_SHIFT = 3,
333 EDMA_CMD_OFS = 0x28, /* EDMA command register */
334 EDMA_EN = (1 << 0), /* enable EDMA */
335 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
336 ATA_RST = (1 << 2), /* reset trans/link/phy */
338 EDMA_IORDY_TMOUT = 0x34,
339 EDMA_ARB_CFG = 0x38,
341 /* Host private flags (hp_flags) */
342 MV_HP_FLAG_MSI = (1 << 0),
343 MV_HP_ERRATA_50XXB0 = (1 << 1),
344 MV_HP_ERRATA_50XXB2 = (1 << 2),
345 MV_HP_ERRATA_60X1B2 = (1 << 3),
346 MV_HP_ERRATA_60X1C0 = (1 << 4),
347 MV_HP_ERRATA_XX42A0 = (1 << 5),
348 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
349 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
350 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
351 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
353 /* Port private flags (pp_flags) */
354 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
355 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
358 #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
359 #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
360 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
361 #define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC))
363 enum {
364 /* DMA boundary 0xffff is required by the s/g splitting
365 * we need on /length/ in mv_fill-sg().
367 MV_DMA_BOUNDARY = 0xffffU,
369 /* mask of register bits containing lower 32 bits
370 * of EDMA request queue DMA address
372 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
374 /* ditto, for response queue */
375 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
378 enum chip_type {
379 chip_504x,
380 chip_508x,
381 chip_5080,
382 chip_604x,
383 chip_608x,
384 chip_6042,
385 chip_7042,
386 chip_soc,
389 /* Command ReQuest Block: 32B */
390 struct mv_crqb {
391 __le32 sg_addr;
392 __le32 sg_addr_hi;
393 __le16 ctrl_flags;
394 __le16 ata_cmd[11];
397 struct mv_crqb_iie {
398 __le32 addr;
399 __le32 addr_hi;
400 __le32 flags;
401 __le32 len;
402 __le32 ata_cmd[4];
405 /* Command ResPonse Block: 8B */
406 struct mv_crpb {
407 __le16 id;
408 __le16 flags;
409 __le32 tmstmp;
412 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
413 struct mv_sg {
414 __le32 addr;
415 __le32 flags_size;
416 __le32 addr_hi;
417 __le32 reserved;
420 struct mv_port_priv {
421 struct mv_crqb *crqb;
422 dma_addr_t crqb_dma;
423 struct mv_crpb *crpb;
424 dma_addr_t crpb_dma;
425 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
426 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
428 unsigned int req_idx;
429 unsigned int resp_idx;
431 u32 pp_flags;
434 struct mv_port_signal {
435 u32 amps;
436 u32 pre;
439 struct mv_host_priv {
440 u32 hp_flags;
441 struct mv_port_signal signal[8];
442 const struct mv_hw_ops *ops;
443 int n_ports;
444 void __iomem *base;
445 void __iomem *main_cause_reg_addr;
446 void __iomem *main_mask_reg_addr;
447 u32 irq_cause_ofs;
448 u32 irq_mask_ofs;
449 u32 unmask_all_irqs;
451 * These consistent DMA memory pools give us guaranteed
452 * alignment for hardware-accessed data structures,
453 * and less memory waste in accomplishing the alignment.
455 struct dma_pool *crqb_pool;
456 struct dma_pool *crpb_pool;
457 struct dma_pool *sg_tbl_pool;
460 struct mv_hw_ops {
461 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
462 unsigned int port);
463 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
464 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
465 void __iomem *mmio);
466 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
467 unsigned int n_hc);
468 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
469 void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
472 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
473 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
474 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
475 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
476 static int mv_port_start(struct ata_port *ap);
477 static void mv_port_stop(struct ata_port *ap);
478 static void mv_qc_prep(struct ata_queued_cmd *qc);
479 static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
480 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
481 static int mv_prereset(struct ata_link *link, unsigned long deadline);
482 static int mv_hardreset(struct ata_link *link, unsigned int *class,
483 unsigned long deadline);
484 static void mv_postreset(struct ata_link *link, unsigned int *classes);
485 static void mv_eh_freeze(struct ata_port *ap);
486 static void mv_eh_thaw(struct ata_port *ap);
487 static void mv6_dev_config(struct ata_device *dev);
489 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
490 unsigned int port);
491 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
492 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
493 void __iomem *mmio);
494 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
495 unsigned int n_hc);
496 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
497 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
499 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
500 unsigned int port);
501 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
502 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
503 void __iomem *mmio);
504 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
505 unsigned int n_hc);
506 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
507 static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
508 void __iomem *mmio);
509 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
510 void __iomem *mmio);
511 static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
512 void __iomem *mmio, unsigned int n_hc);
513 static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
514 void __iomem *mmio);
515 static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
516 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
517 static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
518 unsigned int port_no);
519 static int mv_stop_edma(struct ata_port *ap);
520 static int mv_stop_edma_engine(void __iomem *port_mmio);
521 static void mv_edma_cfg(struct ata_port *ap, int want_ncq);
523 /* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
524 * because we have to allow room for worst case splitting of
525 * PRDs for 64K boundaries in mv_fill_sg().
527 static struct scsi_host_template mv5_sht = {
528 ATA_BASE_SHT(DRV_NAME),
529 .sg_tablesize = MV_MAX_SG_CT / 2,
530 .dma_boundary = MV_DMA_BOUNDARY,
533 static struct scsi_host_template mv6_sht = {
534 ATA_NCQ_SHT(DRV_NAME),
535 .can_queue = MV_MAX_Q_DEPTH - 1,
536 .sg_tablesize = MV_MAX_SG_CT / 2,
537 .dma_boundary = MV_DMA_BOUNDARY,
540 static struct ata_port_operations mv5_ops = {
541 .inherits = &ata_sff_port_ops,
543 .qc_prep = mv_qc_prep,
544 .qc_issue = mv_qc_issue,
546 .freeze = mv_eh_freeze,
547 .thaw = mv_eh_thaw,
548 .prereset = mv_prereset,
549 .hardreset = mv_hardreset,
550 .postreset = mv_postreset,
551 .error_handler = ata_std_error_handler, /* avoid SFF EH */
552 .post_internal_cmd = ATA_OP_NULL,
554 .scr_read = mv5_scr_read,
555 .scr_write = mv5_scr_write,
557 .port_start = mv_port_start,
558 .port_stop = mv_port_stop,
561 static struct ata_port_operations mv6_ops = {
562 .inherits = &mv5_ops,
563 .qc_defer = ata_std_qc_defer,
564 .dev_config = mv6_dev_config,
565 .scr_read = mv_scr_read,
566 .scr_write = mv_scr_write,
569 static struct ata_port_operations mv_iie_ops = {
570 .inherits = &mv6_ops,
571 .dev_config = ATA_OP_NULL,
572 .qc_prep = mv_qc_prep_iie,
575 static const struct ata_port_info mv_port_info[] = {
576 { /* chip_504x */
577 .flags = MV_COMMON_FLAGS,
578 .pio_mask = 0x1f, /* pio0-4 */
579 .udma_mask = ATA_UDMA6,
580 .port_ops = &mv5_ops,
582 { /* chip_508x */
583 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
584 .pio_mask = 0x1f, /* pio0-4 */
585 .udma_mask = ATA_UDMA6,
586 .port_ops = &mv5_ops,
588 { /* chip_5080 */
589 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
590 .pio_mask = 0x1f, /* pio0-4 */
591 .udma_mask = ATA_UDMA6,
592 .port_ops = &mv5_ops,
594 { /* chip_604x */
595 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
596 ATA_FLAG_NCQ,
597 .pio_mask = 0x1f, /* pio0-4 */
598 .udma_mask = ATA_UDMA6,
599 .port_ops = &mv6_ops,
601 { /* chip_608x */
602 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
603 ATA_FLAG_NCQ | MV_FLAG_DUAL_HC,
604 .pio_mask = 0x1f, /* pio0-4 */
605 .udma_mask = ATA_UDMA6,
606 .port_ops = &mv6_ops,
608 { /* chip_6042 */
609 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
610 ATA_FLAG_NCQ,
611 .pio_mask = 0x1f, /* pio0-4 */
612 .udma_mask = ATA_UDMA6,
613 .port_ops = &mv_iie_ops,
615 { /* chip_7042 */
616 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
617 ATA_FLAG_NCQ,
618 .pio_mask = 0x1f, /* pio0-4 */
619 .udma_mask = ATA_UDMA6,
620 .port_ops = &mv_iie_ops,
622 { /* chip_soc */
623 .flags = MV_COMMON_FLAGS | MV_FLAG_SOC,
624 .pio_mask = 0x1f, /* pio0-4 */
625 .udma_mask = ATA_UDMA6,
626 .port_ops = &mv_iie_ops,
630 static const struct pci_device_id mv_pci_tbl[] = {
631 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
632 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
633 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
634 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
635 /* RocketRAID 1740/174x have different identifiers */
636 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
637 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
639 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
640 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
641 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
642 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
643 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
645 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
647 /* Adaptec 1430SA */
648 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
650 /* Marvell 7042 support */
651 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
653 /* Highpoint RocketRAID PCIe series */
654 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
655 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
657 { } /* terminate list */
660 static const struct mv_hw_ops mv5xxx_ops = {
661 .phy_errata = mv5_phy_errata,
662 .enable_leds = mv5_enable_leds,
663 .read_preamp = mv5_read_preamp,
664 .reset_hc = mv5_reset_hc,
665 .reset_flash = mv5_reset_flash,
666 .reset_bus = mv5_reset_bus,
669 static const struct mv_hw_ops mv6xxx_ops = {
670 .phy_errata = mv6_phy_errata,
671 .enable_leds = mv6_enable_leds,
672 .read_preamp = mv6_read_preamp,
673 .reset_hc = mv6_reset_hc,
674 .reset_flash = mv6_reset_flash,
675 .reset_bus = mv_reset_pci_bus,
678 static const struct mv_hw_ops mv_soc_ops = {
679 .phy_errata = mv6_phy_errata,
680 .enable_leds = mv_soc_enable_leds,
681 .read_preamp = mv_soc_read_preamp,
682 .reset_hc = mv_soc_reset_hc,
683 .reset_flash = mv_soc_reset_flash,
684 .reset_bus = mv_soc_reset_bus,
688 * Functions
691 static inline void writelfl(unsigned long data, void __iomem *addr)
693 writel(data, addr);
694 (void) readl(addr); /* flush to avoid PCI posted write */
697 static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
699 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
702 static inline unsigned int mv_hc_from_port(unsigned int port)
704 return port >> MV_PORT_HC_SHIFT;
707 static inline unsigned int mv_hardport_from_port(unsigned int port)
709 return port & MV_PORT_MASK;
712 static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
713 unsigned int port)
715 return mv_hc_base(base, mv_hc_from_port(port));
718 static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
720 return mv_hc_base_from_port(base, port) +
721 MV_SATAHC_ARBTR_REG_SZ +
722 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
725 static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
727 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
728 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
730 return hc_mmio + ofs;
733 static inline void __iomem *mv_host_base(struct ata_host *host)
735 struct mv_host_priv *hpriv = host->private_data;
736 return hpriv->base;
739 static inline void __iomem *mv_ap_base(struct ata_port *ap)
741 return mv_port_base(mv_host_base(ap->host), ap->port_no);
744 static inline int mv_get_hc_count(unsigned long port_flags)
746 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
749 static void mv_set_edma_ptrs(void __iomem *port_mmio,
750 struct mv_host_priv *hpriv,
751 struct mv_port_priv *pp)
753 u32 index;
756 * initialize request queue
758 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
760 WARN_ON(pp->crqb_dma & 0x3ff);
761 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
762 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
763 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
765 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
766 writelfl((pp->crqb_dma & 0xffffffff) | index,
767 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
768 else
769 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
772 * initialize response queue
774 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
776 WARN_ON(pp->crpb_dma & 0xff);
777 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
779 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
780 writelfl((pp->crpb_dma & 0xffffffff) | index,
781 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
782 else
783 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
785 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
786 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
790 * mv_start_dma - Enable eDMA engine
791 * @base: port base address
792 * @pp: port private data
794 * Verify the local cache of the eDMA state is accurate with a
795 * WARN_ON.
797 * LOCKING:
798 * Inherited from caller.
800 static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
801 struct mv_port_priv *pp, u8 protocol)
803 int want_ncq = (protocol == ATA_PROT_NCQ);
805 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
806 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
807 if (want_ncq != using_ncq)
808 mv_stop_edma(ap);
810 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
811 struct mv_host_priv *hpriv = ap->host->private_data;
812 int hard_port = mv_hardport_from_port(ap->port_no);
813 void __iomem *hc_mmio = mv_hc_base_from_port(
814 mv_host_base(ap->host), hard_port);
815 u32 hc_irq_cause, ipending;
817 /* clear EDMA event indicators, if any */
818 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
820 /* clear EDMA interrupt indicator, if any */
821 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
822 ipending = (DEV_IRQ << hard_port) |
823 (CRPB_DMA_DONE << hard_port);
824 if (hc_irq_cause & ipending) {
825 writelfl(hc_irq_cause & ~ipending,
826 hc_mmio + HC_IRQ_CAUSE_OFS);
829 mv_edma_cfg(ap, want_ncq);
831 /* clear FIS IRQ Cause */
832 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
834 mv_set_edma_ptrs(port_mmio, hpriv, pp);
836 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
837 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
839 WARN_ON(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
843 * mv_stop_edma_engine - Disable eDMA engine
844 * @port_mmio: io base address
846 * LOCKING:
847 * Inherited from caller.
849 static int mv_stop_edma_engine(void __iomem *port_mmio)
851 int i;
853 /* Disable eDMA. The disable bit auto clears. */
854 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
856 /* Wait for the chip to confirm eDMA is off. */
857 for (i = 10000; i > 0; i--) {
858 u32 reg = readl(port_mmio + EDMA_CMD_OFS);
859 if (!(reg & EDMA_EN))
860 return 0;
861 udelay(10);
863 return -EIO;
866 static int mv_stop_edma(struct ata_port *ap)
868 void __iomem *port_mmio = mv_ap_base(ap);
869 struct mv_port_priv *pp = ap->private_data;
871 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
872 return 0;
873 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
874 if (mv_stop_edma_engine(port_mmio)) {
875 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
876 return -EIO;
878 return 0;
881 #ifdef ATA_DEBUG
882 static void mv_dump_mem(void __iomem *start, unsigned bytes)
884 int b, w;
885 for (b = 0; b < bytes; ) {
886 DPRINTK("%p: ", start + b);
887 for (w = 0; b < bytes && w < 4; w++) {
888 printk("%08x ", readl(start + b));
889 b += sizeof(u32);
891 printk("\n");
894 #endif
896 static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
898 #ifdef ATA_DEBUG
899 int b, w;
900 u32 dw;
901 for (b = 0; b < bytes; ) {
902 DPRINTK("%02x: ", b);
903 for (w = 0; b < bytes && w < 4; w++) {
904 (void) pci_read_config_dword(pdev, b, &dw);
905 printk("%08x ", dw);
906 b += sizeof(u32);
908 printk("\n");
910 #endif
912 static void mv_dump_all_regs(void __iomem *mmio_base, int port,
913 struct pci_dev *pdev)
915 #ifdef ATA_DEBUG
916 void __iomem *hc_base = mv_hc_base(mmio_base,
917 port >> MV_PORT_HC_SHIFT);
918 void __iomem *port_base;
919 int start_port, num_ports, p, start_hc, num_hcs, hc;
921 if (0 > port) {
922 start_hc = start_port = 0;
923 num_ports = 8; /* shld be benign for 4 port devs */
924 num_hcs = 2;
925 } else {
926 start_hc = port >> MV_PORT_HC_SHIFT;
927 start_port = port;
928 num_ports = num_hcs = 1;
930 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
931 num_ports > 1 ? num_ports - 1 : start_port);
933 if (NULL != pdev) {
934 DPRINTK("PCI config space regs:\n");
935 mv_dump_pci_cfg(pdev, 0x68);
937 DPRINTK("PCI regs:\n");
938 mv_dump_mem(mmio_base+0xc00, 0x3c);
939 mv_dump_mem(mmio_base+0xd00, 0x34);
940 mv_dump_mem(mmio_base+0xf00, 0x4);
941 mv_dump_mem(mmio_base+0x1d00, 0x6c);
942 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
943 hc_base = mv_hc_base(mmio_base, hc);
944 DPRINTK("HC regs (HC %i):\n", hc);
945 mv_dump_mem(hc_base, 0x1c);
947 for (p = start_port; p < start_port + num_ports; p++) {
948 port_base = mv_port_base(mmio_base, p);
949 DPRINTK("EDMA regs (port %i):\n", p);
950 mv_dump_mem(port_base, 0x54);
951 DPRINTK("SATA regs (port %i):\n", p);
952 mv_dump_mem(port_base+0x300, 0x60);
954 #endif
957 static unsigned int mv_scr_offset(unsigned int sc_reg_in)
959 unsigned int ofs;
961 switch (sc_reg_in) {
962 case SCR_STATUS:
963 case SCR_CONTROL:
964 case SCR_ERROR:
965 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
966 break;
967 case SCR_ACTIVE:
968 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
969 break;
970 default:
971 ofs = 0xffffffffU;
972 break;
974 return ofs;
977 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
979 unsigned int ofs = mv_scr_offset(sc_reg_in);
981 if (ofs != 0xffffffffU) {
982 *val = readl(mv_ap_base(ap) + ofs);
983 return 0;
984 } else
985 return -EINVAL;
988 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
990 unsigned int ofs = mv_scr_offset(sc_reg_in);
992 if (ofs != 0xffffffffU) {
993 writelfl(val, mv_ap_base(ap) + ofs);
994 return 0;
995 } else
996 return -EINVAL;
999 static void mv6_dev_config(struct ata_device *adev)
1002 * We don't have hob_nsect when doing NCQ commands on Gen-II.
1003 * See mv_qc_prep() for more info.
1005 if (adev->flags & ATA_DFLAG_NCQ)
1006 if (adev->max_sectors > ATA_MAX_SECTORS)
1007 adev->max_sectors = ATA_MAX_SECTORS;
1010 static void mv_edma_cfg(struct ata_port *ap, int want_ncq)
1012 u32 cfg;
1013 struct mv_port_priv *pp = ap->private_data;
1014 struct mv_host_priv *hpriv = ap->host->private_data;
1015 void __iomem *port_mmio = mv_ap_base(ap);
1017 /* set up non-NCQ EDMA configuration */
1018 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
1020 if (IS_GEN_I(hpriv))
1021 cfg |= (1 << 8); /* enab config burst size mask */
1023 else if (IS_GEN_II(hpriv))
1024 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1026 else if (IS_GEN_IIE(hpriv)) {
1027 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1028 cfg |= (1 << 22); /* enab 4-entry host queue cache */
1029 cfg |= (1 << 18); /* enab early completion */
1030 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
1033 if (want_ncq) {
1034 cfg |= EDMA_CFG_NCQ;
1035 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1036 } else
1037 pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;
1039 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1042 static void mv_port_free_dma_mem(struct ata_port *ap)
1044 struct mv_host_priv *hpriv = ap->host->private_data;
1045 struct mv_port_priv *pp = ap->private_data;
1046 int tag;
1048 if (pp->crqb) {
1049 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1050 pp->crqb = NULL;
1052 if (pp->crpb) {
1053 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1054 pp->crpb = NULL;
1057 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1058 * For later hardware, we have one unique sg_tbl per NCQ tag.
1060 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1061 if (pp->sg_tbl[tag]) {
1062 if (tag == 0 || !IS_GEN_I(hpriv))
1063 dma_pool_free(hpriv->sg_tbl_pool,
1064 pp->sg_tbl[tag],
1065 pp->sg_tbl_dma[tag]);
1066 pp->sg_tbl[tag] = NULL;
1072 * mv_port_start - Port specific init/start routine.
1073 * @ap: ATA channel to manipulate
1075 * Allocate and point to DMA memory, init port private memory,
1076 * zero indices.
1078 * LOCKING:
1079 * Inherited from caller.
1081 static int mv_port_start(struct ata_port *ap)
1083 struct device *dev = ap->host->dev;
1084 struct mv_host_priv *hpriv = ap->host->private_data;
1085 struct mv_port_priv *pp;
1086 void __iomem *port_mmio = mv_ap_base(ap);
1087 unsigned long flags;
1088 int tag;
1090 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1091 if (!pp)
1092 return -ENOMEM;
1093 ap->private_data = pp;
1095 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1096 if (!pp->crqb)
1097 return -ENOMEM;
1098 memset(pp->crqb, 0, MV_CRQB_Q_SZ);
1100 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1101 if (!pp->crpb)
1102 goto out_port_free_dma_mem;
1103 memset(pp->crpb, 0, MV_CRPB_Q_SZ);
1106 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1107 * For later hardware, we need one unique sg_tbl per NCQ tag.
1109 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1110 if (tag == 0 || !IS_GEN_I(hpriv)) {
1111 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1112 GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1113 if (!pp->sg_tbl[tag])
1114 goto out_port_free_dma_mem;
1115 } else {
1116 pp->sg_tbl[tag] = pp->sg_tbl[0];
1117 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1121 spin_lock_irqsave(&ap->host->lock, flags);
1123 mv_edma_cfg(ap, 0);
1124 mv_set_edma_ptrs(port_mmio, hpriv, pp);
1126 spin_unlock_irqrestore(&ap->host->lock, flags);
1128 /* Don't turn on EDMA here...do it before DMA commands only. Else
1129 * we'll be unable to send non-data, PIO, etc due to restricted access
1130 * to shadow regs.
1132 return 0;
1134 out_port_free_dma_mem:
1135 mv_port_free_dma_mem(ap);
1136 return -ENOMEM;
1140 * mv_port_stop - Port specific cleanup/stop routine.
1141 * @ap: ATA channel to manipulate
1143 * Stop DMA, cleanup port memory.
1145 * LOCKING:
1146 * This routine uses the host lock to protect the DMA stop.
1148 static void mv_port_stop(struct ata_port *ap)
1150 mv_stop_edma(ap);
1151 mv_port_free_dma_mem(ap);
1155 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1156 * @qc: queued command whose SG list to source from
1158 * Populate the SG list and mark the last entry.
1160 * LOCKING:
1161 * Inherited from caller.
1163 static void mv_fill_sg(struct ata_queued_cmd *qc)
1165 struct mv_port_priv *pp = qc->ap->private_data;
1166 struct scatterlist *sg;
1167 struct mv_sg *mv_sg, *last_sg = NULL;
1168 unsigned int si;
1170 mv_sg = pp->sg_tbl[qc->tag];
1171 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1172 dma_addr_t addr = sg_dma_address(sg);
1173 u32 sg_len = sg_dma_len(sg);
1175 while (sg_len) {
1176 u32 offset = addr & 0xffff;
1177 u32 len = sg_len;
1179 if ((offset + sg_len > 0x10000))
1180 len = 0x10000 - offset;
1182 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1183 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1184 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
1186 sg_len -= len;
1187 addr += len;
1189 last_sg = mv_sg;
1190 mv_sg++;
1194 if (likely(last_sg))
1195 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1198 static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1200 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1201 (last ? CRQB_CMD_LAST : 0);
1202 *cmdw = cpu_to_le16(tmp);
1206 * mv_qc_prep - Host specific command preparation.
1207 * @qc: queued command to prepare
1209 * This routine simply redirects to the general purpose routine
1210 * if command is not DMA. Else, it handles prep of the CRQB
1211 * (command request block), does some sanity checking, and calls
1212 * the SG load routine.
1214 * LOCKING:
1215 * Inherited from caller.
1217 static void mv_qc_prep(struct ata_queued_cmd *qc)
1219 struct ata_port *ap = qc->ap;
1220 struct mv_port_priv *pp = ap->private_data;
1221 __le16 *cw;
1222 struct ata_taskfile *tf;
1223 u16 flags = 0;
1224 unsigned in_index;
1226 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1227 (qc->tf.protocol != ATA_PROT_NCQ))
1228 return;
1230 /* Fill in command request block
1232 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1233 flags |= CRQB_FLAG_READ;
1234 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1235 flags |= qc->tag << CRQB_TAG_SHIFT;
1237 /* get current queue index from software */
1238 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1240 pp->crqb[in_index].sg_addr =
1241 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1242 pp->crqb[in_index].sg_addr_hi =
1243 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
1244 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1246 cw = &pp->crqb[in_index].ata_cmd[0];
1247 tf = &qc->tf;
1249 /* Sadly, the CRQB cannot accomodate all registers--there are
1250 * only 11 bytes...so we must pick and choose required
1251 * registers based on the command. So, we drop feature and
1252 * hob_feature for [RW] DMA commands, but they are needed for
1253 * NCQ. NCQ will drop hob_nsect.
1255 switch (tf->command) {
1256 case ATA_CMD_READ:
1257 case ATA_CMD_READ_EXT:
1258 case ATA_CMD_WRITE:
1259 case ATA_CMD_WRITE_EXT:
1260 case ATA_CMD_WRITE_FUA_EXT:
1261 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1262 break;
1263 case ATA_CMD_FPDMA_READ:
1264 case ATA_CMD_FPDMA_WRITE:
1265 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
1266 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1267 break;
1268 default:
1269 /* The only other commands EDMA supports in non-queued and
1270 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1271 * of which are defined/used by Linux. If we get here, this
1272 * driver needs work.
1274 * FIXME: modify libata to give qc_prep a return value and
1275 * return error here.
1277 BUG_ON(tf->command);
1278 break;
1280 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1281 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1282 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1283 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1284 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1285 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1286 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1287 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1288 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1290 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1291 return;
1292 mv_fill_sg(qc);
1296 * mv_qc_prep_iie - Host specific command preparation.
1297 * @qc: queued command to prepare
1299 * This routine simply redirects to the general purpose routine
1300 * if command is not DMA. Else, it handles prep of the CRQB
1301 * (command request block), does some sanity checking, and calls
1302 * the SG load routine.
1304 * LOCKING:
1305 * Inherited from caller.
1307 static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1309 struct ata_port *ap = qc->ap;
1310 struct mv_port_priv *pp = ap->private_data;
1311 struct mv_crqb_iie *crqb;
1312 struct ata_taskfile *tf;
1313 unsigned in_index;
1314 u32 flags = 0;
1316 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1317 (qc->tf.protocol != ATA_PROT_NCQ))
1318 return;
1320 /* Fill in Gen IIE command request block */
1321 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1322 flags |= CRQB_FLAG_READ;
1324 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1325 flags |= qc->tag << CRQB_TAG_SHIFT;
1326 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
1328 /* get current queue index from software */
1329 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1331 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
1332 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1333 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
1334 crqb->flags = cpu_to_le32(flags);
1336 tf = &qc->tf;
1337 crqb->ata_cmd[0] = cpu_to_le32(
1338 (tf->command << 16) |
1339 (tf->feature << 24)
1341 crqb->ata_cmd[1] = cpu_to_le32(
1342 (tf->lbal << 0) |
1343 (tf->lbam << 8) |
1344 (tf->lbah << 16) |
1345 (tf->device << 24)
1347 crqb->ata_cmd[2] = cpu_to_le32(
1348 (tf->hob_lbal << 0) |
1349 (tf->hob_lbam << 8) |
1350 (tf->hob_lbah << 16) |
1351 (tf->hob_feature << 24)
1353 crqb->ata_cmd[3] = cpu_to_le32(
1354 (tf->nsect << 0) |
1355 (tf->hob_nsect << 8)
1358 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1359 return;
1360 mv_fill_sg(qc);
1364 * mv_qc_issue - Initiate a command to the host
1365 * @qc: queued command to start
1367 * This routine simply redirects to the general purpose routine
1368 * if command is not DMA. Else, it sanity checks our local
1369 * caches of the request producer/consumer indices then enables
1370 * DMA and bumps the request producer index.
1372 * LOCKING:
1373 * Inherited from caller.
1375 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1377 struct ata_port *ap = qc->ap;
1378 void __iomem *port_mmio = mv_ap_base(ap);
1379 struct mv_port_priv *pp = ap->private_data;
1380 u32 in_index;
1382 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1383 (qc->tf.protocol != ATA_PROT_NCQ)) {
1384 /* We're about to send a non-EDMA capable command to the
1385 * port. Turn off EDMA so there won't be problems accessing
1386 * shadow block, etc registers.
1388 mv_stop_edma(ap);
1389 return ata_qc_issue_prot(qc);
1392 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
1394 pp->req_idx++;
1396 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
1398 /* and write the request in pointer to kick the EDMA to life */
1399 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1400 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1402 return 0;
1406 * mv_err_intr - Handle error interrupts on the port
1407 * @ap: ATA channel to manipulate
1408 * @reset_allowed: bool: 0 == don't trigger from reset here
1410 * In most cases, just clear the interrupt and move on. However,
1411 * some cases require an eDMA reset, which also performs a COMRESET.
1412 * The SERR case requires a clear of pending errors in the SATA
1413 * SERROR register. Finally, if the port disabled DMA,
1414 * update our cached copy to match.
1416 * LOCKING:
1417 * Inherited from caller.
1419 static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1421 void __iomem *port_mmio = mv_ap_base(ap);
1422 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1423 struct mv_port_priv *pp = ap->private_data;
1424 struct mv_host_priv *hpriv = ap->host->private_data;
1425 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1426 unsigned int action = 0, err_mask = 0;
1427 struct ata_eh_info *ehi = &ap->link.eh_info;
1429 ata_ehi_clear_desc(ehi);
1431 if (!edma_enabled) {
1432 /* just a guess: do we need to do this? should we
1433 * expand this, and do it in all cases?
1435 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1436 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1439 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1441 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1444 * all generations share these EDMA error cause bits
1447 if (edma_err_cause & EDMA_ERR_DEV)
1448 err_mask |= AC_ERR_DEV;
1449 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
1450 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
1451 EDMA_ERR_INTRL_PAR)) {
1452 err_mask |= AC_ERR_ATA_BUS;
1453 action |= ATA_EH_RESET;
1454 ata_ehi_push_desc(ehi, "parity error");
1456 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1457 ata_ehi_hotplugged(ehi);
1458 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
1459 "dev disconnect" : "dev connect");
1460 action |= ATA_EH_RESET;
1463 if (IS_GEN_I(hpriv)) {
1464 eh_freeze_mask = EDMA_EH_FREEZE_5;
1466 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1467 pp = ap->private_data;
1468 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1469 ata_ehi_push_desc(ehi, "EDMA self-disable");
1471 } else {
1472 eh_freeze_mask = EDMA_EH_FREEZE;
1474 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1475 pp = ap->private_data;
1476 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1477 ata_ehi_push_desc(ehi, "EDMA self-disable");
1480 if (edma_err_cause & EDMA_ERR_SERR) {
1481 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1482 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1483 err_mask = AC_ERR_ATA_BUS;
1484 action |= ATA_EH_RESET;
1488 /* Clear EDMA now that SERR cleanup done */
1489 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1491 if (!err_mask) {
1492 err_mask = AC_ERR_OTHER;
1493 action |= ATA_EH_RESET;
1496 ehi->serror |= serr;
1497 ehi->action |= action;
1499 if (qc)
1500 qc->err_mask |= err_mask;
1501 else
1502 ehi->err_mask |= err_mask;
1504 if (edma_err_cause & eh_freeze_mask)
1505 ata_port_freeze(ap);
1506 else
1507 ata_port_abort(ap);
1510 static void mv_intr_pio(struct ata_port *ap)
1512 struct ata_queued_cmd *qc;
1513 u8 ata_status;
1515 /* ignore spurious intr if drive still BUSY */
1516 ata_status = readb(ap->ioaddr.status_addr);
1517 if (unlikely(ata_status & ATA_BUSY))
1518 return;
1520 /* get active ATA command */
1521 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1522 if (unlikely(!qc)) /* no active tag */
1523 return;
1524 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1525 return;
1527 /* and finally, complete the ATA command */
1528 qc->err_mask |= ac_err_mask(ata_status);
1529 ata_qc_complete(qc);
1532 static void mv_intr_edma(struct ata_port *ap)
1534 void __iomem *port_mmio = mv_ap_base(ap);
1535 struct mv_host_priv *hpriv = ap->host->private_data;
1536 struct mv_port_priv *pp = ap->private_data;
1537 struct ata_queued_cmd *qc;
1538 u32 out_index, in_index;
1539 bool work_done = false;
1541 /* get h/w response queue pointer */
1542 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1543 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1545 while (1) {
1546 u16 status;
1547 unsigned int tag;
1549 /* get s/w response queue last-read pointer, and compare */
1550 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1551 if (in_index == out_index)
1552 break;
1554 /* 50xx: get active ATA command */
1555 if (IS_GEN_I(hpriv))
1556 tag = ap->link.active_tag;
1558 /* Gen II/IIE: get active ATA command via tag, to enable
1559 * support for queueing. this works transparently for
1560 * queued and non-queued modes.
1562 else
1563 tag = le16_to_cpu(pp->crpb[out_index].id) & 0x1f;
1565 qc = ata_qc_from_tag(ap, tag);
1567 /* For non-NCQ mode, the lower 8 bits of status
1568 * are from EDMA_ERR_IRQ_CAUSE_OFS,
1569 * which should be zero if all went well.
1571 status = le16_to_cpu(pp->crpb[out_index].flags);
1572 if ((status & 0xff) && !(pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
1573 mv_err_intr(ap, qc);
1574 return;
1577 /* and finally, complete the ATA command */
1578 if (qc) {
1579 qc->err_mask |=
1580 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1581 ata_qc_complete(qc);
1584 /* advance software response queue pointer, to
1585 * indicate (after the loop completes) to hardware
1586 * that we have consumed a response queue entry.
1588 work_done = true;
1589 pp->resp_idx++;
1592 if (work_done)
1593 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1594 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1595 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1599 * mv_host_intr - Handle all interrupts on the given host controller
1600 * @host: host specific structure
1601 * @relevant: port error bits relevant to this host controller
1602 * @hc: which host controller we're to look at
1604 * Read then write clear the HC interrupt status then walk each
1605 * port connected to the HC and see if it needs servicing. Port
1606 * success ints are reported in the HC interrupt status reg, the
1607 * port error ints are reported in the higher level main
1608 * interrupt status register and thus are passed in via the
1609 * 'relevant' argument.
1611 * LOCKING:
1612 * Inherited from caller.
1614 static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
1616 struct mv_host_priv *hpriv = host->private_data;
1617 void __iomem *mmio = hpriv->base;
1618 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1619 u32 hc_irq_cause;
1620 int port, port0, last_port;
1622 if (hc == 0)
1623 port0 = 0;
1624 else
1625 port0 = MV_PORTS_PER_HC;
1627 if (HAS_PCI(host))
1628 last_port = port0 + MV_PORTS_PER_HC;
1629 else
1630 last_port = port0 + hpriv->n_ports;
1631 /* we'll need the HC success int register in most cases */
1632 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
1633 if (!hc_irq_cause)
1634 return;
1636 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
1638 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1639 hc, relevant, hc_irq_cause);
1641 for (port = port0; port < last_port; port++) {
1642 struct ata_port *ap = host->ports[port];
1643 struct mv_port_priv *pp;
1644 int have_err_bits, hard_port, shift;
1646 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
1647 continue;
1649 pp = ap->private_data;
1651 shift = port << 1; /* (port * 2) */
1652 if (port >= MV_PORTS_PER_HC)
1653 shift++; /* skip bit 8 in the HC Main IRQ reg */
1655 have_err_bits = ((PORT0_ERR << shift) & relevant);
1657 if (unlikely(have_err_bits)) {
1658 struct ata_queued_cmd *qc;
1660 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1661 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1662 continue;
1664 mv_err_intr(ap, qc);
1665 continue;
1668 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1670 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1671 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1672 mv_intr_edma(ap);
1673 } else {
1674 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1675 mv_intr_pio(ap);
1678 VPRINTK("EXIT\n");
1681 static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1683 struct mv_host_priv *hpriv = host->private_data;
1684 struct ata_port *ap;
1685 struct ata_queued_cmd *qc;
1686 struct ata_eh_info *ehi;
1687 unsigned int i, err_mask, printed = 0;
1688 u32 err_cause;
1690 err_cause = readl(mmio + hpriv->irq_cause_ofs);
1692 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1693 err_cause);
1695 DPRINTK("All regs @ PCI error\n");
1696 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1698 writelfl(0, mmio + hpriv->irq_cause_ofs);
1700 for (i = 0; i < host->n_ports; i++) {
1701 ap = host->ports[i];
1702 if (!ata_link_offline(&ap->link)) {
1703 ehi = &ap->link.eh_info;
1704 ata_ehi_clear_desc(ehi);
1705 if (!printed++)
1706 ata_ehi_push_desc(ehi,
1707 "PCI err cause 0x%08x", err_cause);
1708 err_mask = AC_ERR_HOST_BUS;
1709 ehi->action = ATA_EH_RESET;
1710 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1711 if (qc)
1712 qc->err_mask |= err_mask;
1713 else
1714 ehi->err_mask |= err_mask;
1716 ata_port_freeze(ap);
1722 * mv_interrupt - Main interrupt event handler
1723 * @irq: unused
1724 * @dev_instance: private data; in this case the host structure
1726 * Read the read only register to determine if any host
1727 * controllers have pending interrupts. If so, call lower level
1728 * routine to handle. Also check for PCI errors which are only
1729 * reported here.
1731 * LOCKING:
1732 * This routine holds the host lock while processing pending
1733 * interrupts.
1735 static irqreturn_t mv_interrupt(int irq, void *dev_instance)
1737 struct ata_host *host = dev_instance;
1738 struct mv_host_priv *hpriv = host->private_data;
1739 unsigned int hc, handled = 0, n_hcs;
1740 void __iomem *mmio = hpriv->base;
1741 u32 irq_stat, irq_mask;
1743 /* Note to self: &host->lock == &ap->host->lock == ap->lock */
1744 spin_lock(&host->lock);
1746 irq_stat = readl(hpriv->main_cause_reg_addr);
1747 irq_mask = readl(hpriv->main_mask_reg_addr);
1749 /* check the cases where we either have nothing pending or have read
1750 * a bogus register value which can indicate HW removal or PCI fault
1752 if (!(irq_stat & irq_mask) || (0xffffffffU == irq_stat))
1753 goto out_unlock;
1755 n_hcs = mv_get_hc_count(host->ports[0]->flags);
1757 if (unlikely((irq_stat & PCI_ERR) && HAS_PCI(host))) {
1758 mv_pci_error(host, mmio);
1759 handled = 1;
1760 goto out_unlock; /* skip all other HC irq handling */
1763 for (hc = 0; hc < n_hcs; hc++) {
1764 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1765 if (relevant) {
1766 mv_host_intr(host, relevant, hc);
1767 handled = 1;
1771 out_unlock:
1772 spin_unlock(&host->lock);
1774 return IRQ_RETVAL(handled);
1777 static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1779 unsigned int ofs;
1781 switch (sc_reg_in) {
1782 case SCR_STATUS:
1783 case SCR_ERROR:
1784 case SCR_CONTROL:
1785 ofs = sc_reg_in * sizeof(u32);
1786 break;
1787 default:
1788 ofs = 0xffffffffU;
1789 break;
1791 return ofs;
1794 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
1796 struct mv_host_priv *hpriv = ap->host->private_data;
1797 void __iomem *mmio = hpriv->base;
1798 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1799 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1801 if (ofs != 0xffffffffU) {
1802 *val = readl(addr + ofs);
1803 return 0;
1804 } else
1805 return -EINVAL;
1808 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1810 struct mv_host_priv *hpriv = ap->host->private_data;
1811 void __iomem *mmio = hpriv->base;
1812 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1813 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1815 if (ofs != 0xffffffffU) {
1816 writelfl(val, addr + ofs);
1817 return 0;
1818 } else
1819 return -EINVAL;
1822 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
1824 struct pci_dev *pdev = to_pci_dev(host->dev);
1825 int early_5080;
1827 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
1829 if (!early_5080) {
1830 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1831 tmp |= (1 << 0);
1832 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1835 mv_reset_pci_bus(host, mmio);
1838 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1840 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1843 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
1844 void __iomem *mmio)
1846 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1847 u32 tmp;
1849 tmp = readl(phy_mmio + MV5_PHY_MODE);
1851 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1852 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
1855 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1857 u32 tmp;
1859 writel(0, mmio + MV_GPIO_PORT_CTL);
1861 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1863 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1864 tmp |= ~(1 << 0);
1865 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1868 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1869 unsigned int port)
1871 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1872 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1873 u32 tmp;
1874 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1876 if (fix_apm_sq) {
1877 tmp = readl(phy_mmio + MV5_LT_MODE);
1878 tmp |= (1 << 19);
1879 writel(tmp, phy_mmio + MV5_LT_MODE);
1881 tmp = readl(phy_mmio + MV5_PHY_CTL);
1882 tmp &= ~0x3;
1883 tmp |= 0x1;
1884 writel(tmp, phy_mmio + MV5_PHY_CTL);
1887 tmp = readl(phy_mmio + MV5_PHY_MODE);
1888 tmp &= ~mask;
1889 tmp |= hpriv->signal[port].pre;
1890 tmp |= hpriv->signal[port].amps;
1891 writel(tmp, phy_mmio + MV5_PHY_MODE);
1895 #undef ZERO
1896 #define ZERO(reg) writel(0, port_mmio + (reg))
1897 static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1898 unsigned int port)
1900 void __iomem *port_mmio = mv_port_base(mmio, port);
1903 * The datasheet warns against setting ATA_RST when EDMA is active
1904 * (but doesn't say what the problem might be). So we first try
1905 * to disable the EDMA engine before doing the ATA_RST operation.
1907 mv_stop_edma_engine(port_mmio);
1908 mv_reset_channel(hpriv, mmio, port);
1910 ZERO(0x028); /* command */
1911 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1912 ZERO(0x004); /* timer */
1913 ZERO(0x008); /* irq err cause */
1914 ZERO(0x00c); /* irq err mask */
1915 ZERO(0x010); /* rq bah */
1916 ZERO(0x014); /* rq inp */
1917 ZERO(0x018); /* rq outp */
1918 ZERO(0x01c); /* respq bah */
1919 ZERO(0x024); /* respq outp */
1920 ZERO(0x020); /* respq inp */
1921 ZERO(0x02c); /* test control */
1922 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1924 #undef ZERO
1926 #define ZERO(reg) writel(0, hc_mmio + (reg))
1927 static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1928 unsigned int hc)
1930 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1931 u32 tmp;
1933 ZERO(0x00c);
1934 ZERO(0x010);
1935 ZERO(0x014);
1936 ZERO(0x018);
1938 tmp = readl(hc_mmio + 0x20);
1939 tmp &= 0x1c1c1c1c;
1940 tmp |= 0x03030303;
1941 writel(tmp, hc_mmio + 0x20);
1943 #undef ZERO
1945 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1946 unsigned int n_hc)
1948 unsigned int hc, port;
1950 for (hc = 0; hc < n_hc; hc++) {
1951 for (port = 0; port < MV_PORTS_PER_HC; port++)
1952 mv5_reset_hc_port(hpriv, mmio,
1953 (hc * MV_PORTS_PER_HC) + port);
1955 mv5_reset_one_hc(hpriv, mmio, hc);
1958 return 0;
1961 #undef ZERO
1962 #define ZERO(reg) writel(0, mmio + (reg))
1963 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
1965 struct mv_host_priv *hpriv = host->private_data;
1966 u32 tmp;
1968 tmp = readl(mmio + MV_PCI_MODE);
1969 tmp &= 0xff00ffff;
1970 writel(tmp, mmio + MV_PCI_MODE);
1972 ZERO(MV_PCI_DISC_TIMER);
1973 ZERO(MV_PCI_MSI_TRIGGER);
1974 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
1975 ZERO(HC_MAIN_IRQ_MASK_OFS);
1976 ZERO(MV_PCI_SERR_MASK);
1977 ZERO(hpriv->irq_cause_ofs);
1978 ZERO(hpriv->irq_mask_ofs);
1979 ZERO(MV_PCI_ERR_LOW_ADDRESS);
1980 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
1981 ZERO(MV_PCI_ERR_ATTRIBUTE);
1982 ZERO(MV_PCI_ERR_COMMAND);
1984 #undef ZERO
1986 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1988 u32 tmp;
1990 mv5_reset_flash(hpriv, mmio);
1992 tmp = readl(mmio + MV_GPIO_PORT_CTL);
1993 tmp &= 0x3;
1994 tmp |= (1 << 5) | (1 << 6);
1995 writel(tmp, mmio + MV_GPIO_PORT_CTL);
1999 * mv6_reset_hc - Perform the 6xxx global soft reset
2000 * @mmio: base address of the HBA
2002 * This routine only applies to 6xxx parts.
2004 * LOCKING:
2005 * Inherited from caller.
2007 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2008 unsigned int n_hc)
2010 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2011 int i, rc = 0;
2012 u32 t;
2014 /* Following procedure defined in PCI "main command and status
2015 * register" table.
2017 t = readl(reg);
2018 writel(t | STOP_PCI_MASTER, reg);
2020 for (i = 0; i < 1000; i++) {
2021 udelay(1);
2022 t = readl(reg);
2023 if (PCI_MASTER_EMPTY & t)
2024 break;
2026 if (!(PCI_MASTER_EMPTY & t)) {
2027 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2028 rc = 1;
2029 goto done;
2032 /* set reset */
2033 i = 5;
2034 do {
2035 writel(t | GLOB_SFT_RST, reg);
2036 t = readl(reg);
2037 udelay(1);
2038 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2040 if (!(GLOB_SFT_RST & t)) {
2041 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2042 rc = 1;
2043 goto done;
2046 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2047 i = 5;
2048 do {
2049 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2050 t = readl(reg);
2051 udelay(1);
2052 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2054 if (GLOB_SFT_RST & t) {
2055 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2056 rc = 1;
2058 done:
2059 return rc;
2062 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
2063 void __iomem *mmio)
2065 void __iomem *port_mmio;
2066 u32 tmp;
2068 tmp = readl(mmio + MV_RESET_CFG);
2069 if ((tmp & (1 << 0)) == 0) {
2070 hpriv->signal[idx].amps = 0x7 << 8;
2071 hpriv->signal[idx].pre = 0x1 << 5;
2072 return;
2075 port_mmio = mv_port_base(mmio, idx);
2076 tmp = readl(port_mmio + PHY_MODE2);
2078 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2079 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2082 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
2084 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
2087 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2088 unsigned int port)
2090 void __iomem *port_mmio = mv_port_base(mmio, port);
2092 u32 hp_flags = hpriv->hp_flags;
2093 int fix_phy_mode2 =
2094 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2095 int fix_phy_mode4 =
2096 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2097 u32 m2, tmp;
2099 if (fix_phy_mode2) {
2100 m2 = readl(port_mmio + PHY_MODE2);
2101 m2 &= ~(1 << 16);
2102 m2 |= (1 << 31);
2103 writel(m2, port_mmio + PHY_MODE2);
2105 udelay(200);
2107 m2 = readl(port_mmio + PHY_MODE2);
2108 m2 &= ~((1 << 16) | (1 << 31));
2109 writel(m2, port_mmio + PHY_MODE2);
2111 udelay(200);
2114 /* who knows what this magic does */
2115 tmp = readl(port_mmio + PHY_MODE3);
2116 tmp &= ~0x7F800000;
2117 tmp |= 0x2A800000;
2118 writel(tmp, port_mmio + PHY_MODE3);
2120 if (fix_phy_mode4) {
2121 u32 m4;
2123 m4 = readl(port_mmio + PHY_MODE4);
2125 if (hp_flags & MV_HP_ERRATA_60X1B2)
2126 tmp = readl(port_mmio + PHY_MODE3);
2128 /* workaround for errata FEr SATA#10 (part 1) */
2129 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2131 writel(m4, port_mmio + PHY_MODE4);
2133 if (hp_flags & MV_HP_ERRATA_60X1B2)
2134 writel(tmp, port_mmio + PHY_MODE3);
2137 /* Revert values of pre-emphasis and signal amps to the saved ones */
2138 m2 = readl(port_mmio + PHY_MODE2);
2140 m2 &= ~MV_M2_PREAMP_MASK;
2141 m2 |= hpriv->signal[port].amps;
2142 m2 |= hpriv->signal[port].pre;
2143 m2 &= ~(1 << 16);
2145 /* according to mvSata 3.6.1, some IIE values are fixed */
2146 if (IS_GEN_IIE(hpriv)) {
2147 m2 &= ~0xC30FF01F;
2148 m2 |= 0x0000900F;
2151 writel(m2, port_mmio + PHY_MODE2);
2154 /* TODO: use the generic LED interface to configure the SATA Presence */
2155 /* & Acitivy LEDs on the board */
2156 static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
2157 void __iomem *mmio)
2159 return;
2162 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
2163 void __iomem *mmio)
2165 void __iomem *port_mmio;
2166 u32 tmp;
2168 port_mmio = mv_port_base(mmio, idx);
2169 tmp = readl(port_mmio + PHY_MODE2);
2171 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2172 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2175 #undef ZERO
2176 #define ZERO(reg) writel(0, port_mmio + (reg))
2177 static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
2178 void __iomem *mmio, unsigned int port)
2180 void __iomem *port_mmio = mv_port_base(mmio, port);
2183 * The datasheet warns against setting ATA_RST when EDMA is active
2184 * (but doesn't say what the problem might be). So we first try
2185 * to disable the EDMA engine before doing the ATA_RST operation.
2187 mv_stop_edma_engine(port_mmio);
2188 mv_reset_channel(hpriv, mmio, port);
2190 ZERO(0x028); /* command */
2191 writel(0x101f, port_mmio + EDMA_CFG_OFS);
2192 ZERO(0x004); /* timer */
2193 ZERO(0x008); /* irq err cause */
2194 ZERO(0x00c); /* irq err mask */
2195 ZERO(0x010); /* rq bah */
2196 ZERO(0x014); /* rq inp */
2197 ZERO(0x018); /* rq outp */
2198 ZERO(0x01c); /* respq bah */
2199 ZERO(0x024); /* respq outp */
2200 ZERO(0x020); /* respq inp */
2201 ZERO(0x02c); /* test control */
2202 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
2205 #undef ZERO
2207 #define ZERO(reg) writel(0, hc_mmio + (reg))
2208 static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
2209 void __iomem *mmio)
2211 void __iomem *hc_mmio = mv_hc_base(mmio, 0);
2213 ZERO(0x00c);
2214 ZERO(0x010);
2215 ZERO(0x014);
2219 #undef ZERO
2221 static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
2222 void __iomem *mmio, unsigned int n_hc)
2224 unsigned int port;
2226 for (port = 0; port < hpriv->n_ports; port++)
2227 mv_soc_reset_hc_port(hpriv, mmio, port);
2229 mv_soc_reset_one_hc(hpriv, mmio);
2231 return 0;
2234 static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
2235 void __iomem *mmio)
2237 return;
2240 static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
2242 return;
2245 static void mv_setup_ifctl(void __iomem *port_mmio, int want_gen2i)
2247 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CFG);
2249 ifctl = (ifctl & 0xf7f) | 0x9b1000; /* from chip spec */
2250 if (want_gen2i)
2251 ifctl |= (1 << 7); /* enable gen2i speed */
2252 writelfl(ifctl, port_mmio + SATA_INTERFACE_CFG);
2256 * Caller must ensure that EDMA is not active,
2257 * by first doing mv_stop_edma() where needed.
2259 static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
2260 unsigned int port_no)
2262 void __iomem *port_mmio = mv_port_base(mmio, port_no);
2264 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2266 if (!IS_GEN_I(hpriv)) {
2267 /* Enable 3.0gb/s link speed */
2268 mv_setup_ifctl(port_mmio, 1);
2271 * Strobing ATA_RST here causes a hard reset of the SATA transport,
2272 * link, and physical layers. It resets all SATA interface registers
2273 * (except for SATA_INTERFACE_CFG), and issues a COMRESET to the dev.
2275 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2276 udelay(25); /* allow reset propagation */
2277 writelfl(0, port_mmio + EDMA_CMD_OFS);
2279 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2281 if (IS_GEN_I(hpriv))
2282 mdelay(1);
2286 * mv_phy_reset - Perform eDMA reset followed by COMRESET
2287 * @ap: ATA channel to manipulate
2289 * Part of this is taken from __sata_phy_reset and modified to
2290 * not sleep since this routine gets called from interrupt level.
2292 * LOCKING:
2293 * Inherited from caller. This is coded to safe to call at
2294 * interrupt level, i.e. it does not sleep.
2296 static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2297 unsigned long deadline)
2299 struct mv_port_priv *pp = ap->private_data;
2300 struct mv_host_priv *hpriv = ap->host->private_data;
2301 void __iomem *port_mmio = mv_ap_base(ap);
2302 int retry = 5;
2303 u32 sstatus;
2305 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
2307 #ifdef DEBUG
2309 u32 sstatus, serror, scontrol;
2311 mv_scr_read(ap, SCR_STATUS, &sstatus);
2312 mv_scr_read(ap, SCR_ERROR, &serror);
2313 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2314 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
2315 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2317 #endif
2319 /* Issue COMRESET via SControl */
2320 comreset_retry:
2321 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x301);
2322 msleep(1);
2324 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x300);
2325 msleep(20);
2327 do {
2328 sata_scr_read(&ap->link, SCR_STATUS, &sstatus);
2329 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
2330 break;
2332 msleep(1);
2333 } while (time_before(jiffies, deadline));
2335 /* work around errata */
2336 if (IS_GEN_II(hpriv) &&
2337 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2338 (retry-- > 0))
2339 goto comreset_retry;
2341 #ifdef DEBUG
2343 u32 sstatus, serror, scontrol;
2345 mv_scr_read(ap, SCR_STATUS, &sstatus);
2346 mv_scr_read(ap, SCR_ERROR, &serror);
2347 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2348 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2349 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2351 #endif
2353 if (ata_link_offline(&ap->link)) {
2354 *class = ATA_DEV_NONE;
2355 return;
2358 /* even after SStatus reflects that device is ready,
2359 * it seems to take a while for link to be fully
2360 * established (and thus Status no longer 0x80/0x7F),
2361 * so we poll a bit for that, here.
2363 retry = 20;
2364 while (1) {
2365 u8 drv_stat = ata_check_status(ap);
2366 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2367 break;
2368 msleep(500);
2369 if (retry-- <= 0)
2370 break;
2371 if (time_after(jiffies, deadline))
2372 break;
2375 /* FIXME: if we passed the deadline, the following
2376 * code probably produces an invalid result
2379 /* finally, read device signature from TF registers */
2380 *class = ata_dev_try_classify(ap->link.device, 1, NULL);
2382 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2384 WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
2386 VPRINTK("EXIT\n");
2389 static int mv_prereset(struct ata_link *link, unsigned long deadline)
2391 mv_stop_edma(link->ap);
2392 return 0;
2395 static int mv_hardreset(struct ata_link *link, unsigned int *class,
2396 unsigned long deadline)
2398 struct ata_port *ap = link->ap;
2399 struct mv_host_priv *hpriv = ap->host->private_data;
2400 struct mv_port_priv *pp = ap->private_data;
2401 void __iomem *mmio = hpriv->base;
2403 mv_reset_channel(hpriv, mmio, ap->port_no);
2404 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
2405 mv_phy_reset(ap, class, deadline);
2407 return 0;
2410 static void mv_postreset(struct ata_link *link, unsigned int *classes)
2412 struct ata_port *ap = link->ap;
2413 u32 serr;
2415 /* print link status */
2416 sata_print_link_status(link);
2418 /* clear SError */
2419 sata_scr_read(link, SCR_ERROR, &serr);
2420 sata_scr_write_flush(link, SCR_ERROR, serr);
2422 /* bail out if no device is present */
2423 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2424 DPRINTK("EXIT, no device\n");
2425 return;
2428 /* set up device control */
2429 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2432 static void mv_eh_freeze(struct ata_port *ap)
2434 struct mv_host_priv *hpriv = ap->host->private_data;
2435 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2436 u32 tmp, mask;
2437 unsigned int shift;
2439 /* FIXME: handle coalescing completion events properly */
2441 shift = ap->port_no * 2;
2442 if (hc > 0)
2443 shift++;
2445 mask = 0x3 << shift;
2447 /* disable assertion of portN err, done events */
2448 tmp = readl(hpriv->main_mask_reg_addr);
2449 writelfl(tmp & ~mask, hpriv->main_mask_reg_addr);
2452 static void mv_eh_thaw(struct ata_port *ap)
2454 struct mv_host_priv *hpriv = ap->host->private_data;
2455 void __iomem *mmio = hpriv->base;
2456 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2457 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2458 void __iomem *port_mmio = mv_ap_base(ap);
2459 u32 tmp, mask, hc_irq_cause;
2460 unsigned int shift, hc_port_no = ap->port_no;
2462 /* FIXME: handle coalescing completion events properly */
2464 shift = ap->port_no * 2;
2465 if (hc > 0) {
2466 shift++;
2467 hc_port_no -= 4;
2470 mask = 0x3 << shift;
2472 /* clear EDMA errors on this port */
2473 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2475 /* clear pending irq events */
2476 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2477 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2478 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2479 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2481 /* enable assertion of portN err, done events */
2482 tmp = readl(hpriv->main_mask_reg_addr);
2483 writelfl(tmp | mask, hpriv->main_mask_reg_addr);
2487 * mv_port_init - Perform some early initialization on a single port.
2488 * @port: libata data structure storing shadow register addresses
2489 * @port_mmio: base address of the port
2491 * Initialize shadow register mmio addresses, clear outstanding
2492 * interrupts on the port, and unmask interrupts for the future
2493 * start of the port.
2495 * LOCKING:
2496 * Inherited from caller.
2498 static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2500 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
2501 unsigned serr_ofs;
2503 /* PIO related setup
2505 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
2506 port->error_addr =
2507 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2508 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2509 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2510 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2511 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2512 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
2513 port->status_addr =
2514 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2515 /* special case: control/altstatus doesn't have ATA_REG_ address */
2516 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2518 /* unused: */
2519 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
2521 /* Clear any currently outstanding port interrupt conditions */
2522 serr_ofs = mv_scr_offset(SCR_ERROR);
2523 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2524 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2526 /* unmask all non-transient EDMA error interrupts */
2527 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
2529 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2530 readl(port_mmio + EDMA_CFG_OFS),
2531 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2532 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
2535 static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
2537 struct pci_dev *pdev = to_pci_dev(host->dev);
2538 struct mv_host_priv *hpriv = host->private_data;
2539 u32 hp_flags = hpriv->hp_flags;
2541 switch (board_idx) {
2542 case chip_5080:
2543 hpriv->ops = &mv5xxx_ops;
2544 hp_flags |= MV_HP_GEN_I;
2546 switch (pdev->revision) {
2547 case 0x1:
2548 hp_flags |= MV_HP_ERRATA_50XXB0;
2549 break;
2550 case 0x3:
2551 hp_flags |= MV_HP_ERRATA_50XXB2;
2552 break;
2553 default:
2554 dev_printk(KERN_WARNING, &pdev->dev,
2555 "Applying 50XXB2 workarounds to unknown rev\n");
2556 hp_flags |= MV_HP_ERRATA_50XXB2;
2557 break;
2559 break;
2561 case chip_504x:
2562 case chip_508x:
2563 hpriv->ops = &mv5xxx_ops;
2564 hp_flags |= MV_HP_GEN_I;
2566 switch (pdev->revision) {
2567 case 0x0:
2568 hp_flags |= MV_HP_ERRATA_50XXB0;
2569 break;
2570 case 0x3:
2571 hp_flags |= MV_HP_ERRATA_50XXB2;
2572 break;
2573 default:
2574 dev_printk(KERN_WARNING, &pdev->dev,
2575 "Applying B2 workarounds to unknown rev\n");
2576 hp_flags |= MV_HP_ERRATA_50XXB2;
2577 break;
2579 break;
2581 case chip_604x:
2582 case chip_608x:
2583 hpriv->ops = &mv6xxx_ops;
2584 hp_flags |= MV_HP_GEN_II;
2586 switch (pdev->revision) {
2587 case 0x7:
2588 hp_flags |= MV_HP_ERRATA_60X1B2;
2589 break;
2590 case 0x9:
2591 hp_flags |= MV_HP_ERRATA_60X1C0;
2592 break;
2593 default:
2594 dev_printk(KERN_WARNING, &pdev->dev,
2595 "Applying B2 workarounds to unknown rev\n");
2596 hp_flags |= MV_HP_ERRATA_60X1B2;
2597 break;
2599 break;
2601 case chip_7042:
2602 hp_flags |= MV_HP_PCIE;
2603 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2604 (pdev->device == 0x2300 || pdev->device == 0x2310))
2607 * Highpoint RocketRAID PCIe 23xx series cards:
2609 * Unconfigured drives are treated as "Legacy"
2610 * by the BIOS, and it overwrites sector 8 with
2611 * a "Lgcy" metadata block prior to Linux boot.
2613 * Configured drives (RAID or JBOD) leave sector 8
2614 * alone, but instead overwrite a high numbered
2615 * sector for the RAID metadata. This sector can
2616 * be determined exactly, by truncating the physical
2617 * drive capacity to a nice even GB value.
2619 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2621 * Warn the user, lest they think we're just buggy.
2623 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2624 " BIOS CORRUPTS DATA on all attached drives,"
2625 " regardless of if/how they are configured."
2626 " BEWARE!\n");
2627 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2628 " use sectors 8-9 on \"Legacy\" drives,"
2629 " and avoid the final two gigabytes on"
2630 " all RocketRAID BIOS initialized drives.\n");
2632 case chip_6042:
2633 hpriv->ops = &mv6xxx_ops;
2634 hp_flags |= MV_HP_GEN_IIE;
2636 switch (pdev->revision) {
2637 case 0x0:
2638 hp_flags |= MV_HP_ERRATA_XX42A0;
2639 break;
2640 case 0x1:
2641 hp_flags |= MV_HP_ERRATA_60X1C0;
2642 break;
2643 default:
2644 dev_printk(KERN_WARNING, &pdev->dev,
2645 "Applying 60X1C0 workarounds to unknown rev\n");
2646 hp_flags |= MV_HP_ERRATA_60X1C0;
2647 break;
2649 break;
2650 case chip_soc:
2651 hpriv->ops = &mv_soc_ops;
2652 hp_flags |= MV_HP_ERRATA_60X1C0;
2653 break;
2655 default:
2656 dev_printk(KERN_ERR, host->dev,
2657 "BUG: invalid board index %u\n", board_idx);
2658 return 1;
2661 hpriv->hp_flags = hp_flags;
2662 if (hp_flags & MV_HP_PCIE) {
2663 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
2664 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
2665 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
2666 } else {
2667 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
2668 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
2669 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
2672 return 0;
2676 * mv_init_host - Perform some early initialization of the host.
2677 * @host: ATA host to initialize
2678 * @board_idx: controller index
2680 * If possible, do an early global reset of the host. Then do
2681 * our port init and clear/unmask all/relevant host interrupts.
2683 * LOCKING:
2684 * Inherited from caller.
2686 static int mv_init_host(struct ata_host *host, unsigned int board_idx)
2688 int rc = 0, n_hc, port, hc;
2689 struct mv_host_priv *hpriv = host->private_data;
2690 void __iomem *mmio = hpriv->base;
2692 rc = mv_chip_id(host, board_idx);
2693 if (rc)
2694 goto done;
2696 if (HAS_PCI(host)) {
2697 hpriv->main_cause_reg_addr = hpriv->base +
2698 HC_MAIN_IRQ_CAUSE_OFS;
2699 hpriv->main_mask_reg_addr = hpriv->base + HC_MAIN_IRQ_MASK_OFS;
2700 } else {
2701 hpriv->main_cause_reg_addr = hpriv->base +
2702 HC_SOC_MAIN_IRQ_CAUSE_OFS;
2703 hpriv->main_mask_reg_addr = hpriv->base +
2704 HC_SOC_MAIN_IRQ_MASK_OFS;
2706 /* global interrupt mask */
2707 writel(0, hpriv->main_mask_reg_addr);
2709 n_hc = mv_get_hc_count(host->ports[0]->flags);
2711 for (port = 0; port < host->n_ports; port++)
2712 hpriv->ops->read_preamp(hpriv, port, mmio);
2714 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
2715 if (rc)
2716 goto done;
2718 hpriv->ops->reset_flash(hpriv, mmio);
2719 hpriv->ops->reset_bus(host, mmio);
2720 hpriv->ops->enable_leds(hpriv, mmio);
2722 for (port = 0; port < host->n_ports; port++) {
2723 struct ata_port *ap = host->ports[port];
2724 void __iomem *port_mmio = mv_port_base(mmio, port);
2726 mv_port_init(&ap->ioaddr, port_mmio);
2728 #ifdef CONFIG_PCI
2729 if (HAS_PCI(host)) {
2730 unsigned int offset = port_mmio - mmio;
2731 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2732 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
2734 #endif
2737 for (hc = 0; hc < n_hc; hc++) {
2738 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2740 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2741 "(before clear)=0x%08x\n", hc,
2742 readl(hc_mmio + HC_CFG_OFS),
2743 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2745 /* Clear any currently outstanding hc interrupt conditions */
2746 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
2749 if (HAS_PCI(host)) {
2750 /* Clear any currently outstanding host interrupt conditions */
2751 writelfl(0, mmio + hpriv->irq_cause_ofs);
2753 /* and unmask interrupt generation for host regs */
2754 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
2755 if (IS_GEN_I(hpriv))
2756 writelfl(~HC_MAIN_MASKED_IRQS_5,
2757 hpriv->main_mask_reg_addr);
2758 else
2759 writelfl(~HC_MAIN_MASKED_IRQS,
2760 hpriv->main_mask_reg_addr);
2762 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2763 "PCI int cause/mask=0x%08x/0x%08x\n",
2764 readl(hpriv->main_cause_reg_addr),
2765 readl(hpriv->main_mask_reg_addr),
2766 readl(mmio + hpriv->irq_cause_ofs),
2767 readl(mmio + hpriv->irq_mask_ofs));
2768 } else {
2769 writelfl(~HC_MAIN_MASKED_IRQS_SOC,
2770 hpriv->main_mask_reg_addr);
2771 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x\n",
2772 readl(hpriv->main_cause_reg_addr),
2773 readl(hpriv->main_mask_reg_addr));
2775 done:
2776 return rc;
2779 static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
2781 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
2782 MV_CRQB_Q_SZ, 0);
2783 if (!hpriv->crqb_pool)
2784 return -ENOMEM;
2786 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
2787 MV_CRPB_Q_SZ, 0);
2788 if (!hpriv->crpb_pool)
2789 return -ENOMEM;
2791 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
2792 MV_SG_TBL_SZ, 0);
2793 if (!hpriv->sg_tbl_pool)
2794 return -ENOMEM;
2796 return 0;
2800 * mv_platform_probe - handle a positive probe of an soc Marvell
2801 * host
2802 * @pdev: platform device found
2804 * LOCKING:
2805 * Inherited from caller.
2807 static int mv_platform_probe(struct platform_device *pdev)
2809 static int printed_version;
2810 const struct mv_sata_platform_data *mv_platform_data;
2811 const struct ata_port_info *ppi[] =
2812 { &mv_port_info[chip_soc], NULL };
2813 struct ata_host *host;
2814 struct mv_host_priv *hpriv;
2815 struct resource *res;
2816 int n_ports, rc;
2818 if (!printed_version++)
2819 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2822 * Simple resource validation ..
2824 if (unlikely(pdev->num_resources != 2)) {
2825 dev_err(&pdev->dev, "invalid number of resources\n");
2826 return -EINVAL;
2830 * Get the register base first
2832 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2833 if (res == NULL)
2834 return -EINVAL;
2836 /* allocate host */
2837 mv_platform_data = pdev->dev.platform_data;
2838 n_ports = mv_platform_data->n_ports;
2840 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2841 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2843 if (!host || !hpriv)
2844 return -ENOMEM;
2845 host->private_data = hpriv;
2846 hpriv->n_ports = n_ports;
2848 host->iomap = NULL;
2849 hpriv->base = devm_ioremap(&pdev->dev, res->start,
2850 res->end - res->start + 1);
2851 hpriv->base -= MV_SATAHC0_REG_BASE;
2853 rc = mv_create_dma_pools(hpriv, &pdev->dev);
2854 if (rc)
2855 return rc;
2857 /* initialize adapter */
2858 rc = mv_init_host(host, chip_soc);
2859 if (rc)
2860 return rc;
2862 dev_printk(KERN_INFO, &pdev->dev,
2863 "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH,
2864 host->n_ports);
2866 return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
2867 IRQF_SHARED, &mv6_sht);
2872 * mv_platform_remove - unplug a platform interface
2873 * @pdev: platform device
2875 * A platform bus SATA device has been unplugged. Perform the needed
2876 * cleanup. Also called on module unload for any active devices.
2878 static int __devexit mv_platform_remove(struct platform_device *pdev)
2880 struct device *dev = &pdev->dev;
2881 struct ata_host *host = dev_get_drvdata(dev);
2883 ata_host_detach(host);
2884 return 0;
2887 static struct platform_driver mv_platform_driver = {
2888 .probe = mv_platform_probe,
2889 .remove = __devexit_p(mv_platform_remove),
2890 .driver = {
2891 .name = DRV_NAME,
2892 .owner = THIS_MODULE,
2897 #ifdef CONFIG_PCI
2898 static int mv_pci_init_one(struct pci_dev *pdev,
2899 const struct pci_device_id *ent);
2902 static struct pci_driver mv_pci_driver = {
2903 .name = DRV_NAME,
2904 .id_table = mv_pci_tbl,
2905 .probe = mv_pci_init_one,
2906 .remove = ata_pci_remove_one,
2910 * module options
2912 static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
2915 /* move to PCI layer or libata core? */
2916 static int pci_go_64(struct pci_dev *pdev)
2918 int rc;
2920 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2921 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2922 if (rc) {
2923 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2924 if (rc) {
2925 dev_printk(KERN_ERR, &pdev->dev,
2926 "64-bit DMA enable failed\n");
2927 return rc;
2930 } else {
2931 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
2932 if (rc) {
2933 dev_printk(KERN_ERR, &pdev->dev,
2934 "32-bit DMA enable failed\n");
2935 return rc;
2937 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2938 if (rc) {
2939 dev_printk(KERN_ERR, &pdev->dev,
2940 "32-bit consistent DMA enable failed\n");
2941 return rc;
2945 return rc;
2949 * mv_print_info - Dump key info to kernel log for perusal.
2950 * @host: ATA host to print info about
2952 * FIXME: complete this.
2954 * LOCKING:
2955 * Inherited from caller.
2957 static void mv_print_info(struct ata_host *host)
2959 struct pci_dev *pdev = to_pci_dev(host->dev);
2960 struct mv_host_priv *hpriv = host->private_data;
2961 u8 scc;
2962 const char *scc_s, *gen;
2964 /* Use this to determine the HW stepping of the chip so we know
2965 * what errata to workaround
2967 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2968 if (scc == 0)
2969 scc_s = "SCSI";
2970 else if (scc == 0x01)
2971 scc_s = "RAID";
2972 else
2973 scc_s = "?";
2975 if (IS_GEN_I(hpriv))
2976 gen = "I";
2977 else if (IS_GEN_II(hpriv))
2978 gen = "II";
2979 else if (IS_GEN_IIE(hpriv))
2980 gen = "IIE";
2981 else
2982 gen = "?";
2984 dev_printk(KERN_INFO, &pdev->dev,
2985 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2986 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
2987 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2991 * mv_pci_init_one - handle a positive probe of a PCI Marvell host
2992 * @pdev: PCI device found
2993 * @ent: PCI device ID entry for the matched host
2995 * LOCKING:
2996 * Inherited from caller.
2998 static int mv_pci_init_one(struct pci_dev *pdev,
2999 const struct pci_device_id *ent)
3001 static int printed_version;
3002 unsigned int board_idx = (unsigned int)ent->driver_data;
3003 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
3004 struct ata_host *host;
3005 struct mv_host_priv *hpriv;
3006 int n_ports, rc;
3008 if (!printed_version++)
3009 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
3011 /* allocate host */
3012 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
3014 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
3015 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
3016 if (!host || !hpriv)
3017 return -ENOMEM;
3018 host->private_data = hpriv;
3019 hpriv->n_ports = n_ports;
3021 /* acquire resources */
3022 rc = pcim_enable_device(pdev);
3023 if (rc)
3024 return rc;
3026 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
3027 if (rc == -EBUSY)
3028 pcim_pin_device(pdev);
3029 if (rc)
3030 return rc;
3031 host->iomap = pcim_iomap_table(pdev);
3032 hpriv->base = host->iomap[MV_PRIMARY_BAR];
3034 rc = pci_go_64(pdev);
3035 if (rc)
3036 return rc;
3038 rc = mv_create_dma_pools(hpriv, &pdev->dev);
3039 if (rc)
3040 return rc;
3042 /* initialize adapter */
3043 rc = mv_init_host(host, board_idx);
3044 if (rc)
3045 return rc;
3047 /* Enable interrupts */
3048 if (msi && pci_enable_msi(pdev))
3049 pci_intx(pdev, 1);
3051 mv_dump_pci_cfg(pdev, 0x68);
3052 mv_print_info(host);
3054 pci_set_master(pdev);
3055 pci_try_set_mwi(pdev);
3056 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
3057 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
3059 #endif
3061 static int mv_platform_probe(struct platform_device *pdev);
3062 static int __devexit mv_platform_remove(struct platform_device *pdev);
3064 static int __init mv_init(void)
3066 int rc = -ENODEV;
3067 #ifdef CONFIG_PCI
3068 rc = pci_register_driver(&mv_pci_driver);
3069 if (rc < 0)
3070 return rc;
3071 #endif
3072 rc = platform_driver_register(&mv_platform_driver);
3074 #ifdef CONFIG_PCI
3075 if (rc < 0)
3076 pci_unregister_driver(&mv_pci_driver);
3077 #endif
3078 return rc;
3081 static void __exit mv_exit(void)
3083 #ifdef CONFIG_PCI
3084 pci_unregister_driver(&mv_pci_driver);
3085 #endif
3086 platform_driver_unregister(&mv_platform_driver);
3089 MODULE_AUTHOR("Brett Russ");
3090 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
3091 MODULE_LICENSE("GPL");
3092 MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
3093 MODULE_VERSION(DRV_VERSION);
3094 MODULE_ALIAS("platform:sata_mv");
3096 #ifdef CONFIG_PCI
3097 module_param(msi, int, 0444);
3098 MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
3099 #endif
3101 module_init(mv_init);
3102 module_exit(mv_exit);