[SCSI] aic79xx: fix MMIO for PPC 44x platforms
[linux-2.6/libata-dev.git] / drivers / ata / sata_mv.c
blobd52ce1188327bf2a9f89b21e9335c0442fb5f748
1 /*
2 * sata_mv.c - Marvell SATA support
4 * Copyright 2008: Marvell Corporation, all rights reserved.
5 * Copyright 2005: EMC Corporation, all rights reserved.
6 * Copyright 2005 Red Hat, Inc. All rights reserved.
8 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; version 2 of the License.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 sata_mv TODO list:
28 1) Needs a full errata audit for all chipsets. I implemented most
29 of the errata workarounds found in the Marvell vendor driver, but
30 I distinctly remember a couple workarounds (one related to PCI-X)
31 are still needed.
33 2) Improve/fix IRQ and error handling sequences.
35 3) ATAPI support (Marvell claims the 60xx/70xx chips can do it).
37 4) Think about TCQ support here, and for libata in general
38 with controllers that suppport it via host-queuing hardware
39 (a software-only implementation could be a nightmare).
41 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
43 6) Cache frequently-accessed registers in mv_port_priv to reduce overhead.
45 7) Fix/reenable hot plug/unplug (should happen as a side-effect of (2) above).
47 8) Develop a low-power-consumption strategy, and implement it.
49 9) [Experiment, low priority] See if ATAPI can be supported using
50 "unknown FIS" or "vendor-specific FIS" support, or something creative
51 like that.
53 10) [Experiment, low priority] Investigate interrupt coalescing.
54 Quite often, especially with PCI Message Signalled Interrupts (MSI),
55 the overhead reduced by interrupt mitigation is quite often not
56 worth the latency cost.
58 11) [Experiment, Marvell value added] Is it possible to use target
59 mode to cross-connect two Linux boxes with Marvell cards? If so,
60 creating LibATA target mode support would be very interesting.
62 Target mode, for those without docs, is the ability to directly
63 connect two SATA controllers.
67 #include <linux/kernel.h>
68 #include <linux/module.h>
69 #include <linux/pci.h>
70 #include <linux/init.h>
71 #include <linux/blkdev.h>
72 #include <linux/delay.h>
73 #include <linux/interrupt.h>
74 #include <linux/dmapool.h>
75 #include <linux/dma-mapping.h>
76 #include <linux/device.h>
77 #include <linux/platform_device.h>
78 #include <linux/ata_platform.h>
79 #include <linux/mbus.h>
80 #include <scsi/scsi_host.h>
81 #include <scsi/scsi_cmnd.h>
82 #include <scsi/scsi_device.h>
83 #include <linux/libata.h>
85 #define DRV_NAME "sata_mv"
86 #define DRV_VERSION "1.20"
88 enum {
89 /* BAR's are enumerated in terms of pci_resource_start() terms */
90 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
91 MV_IO_BAR = 2, /* offset 0x18: IO space */
92 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
94 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
95 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
97 MV_PCI_REG_BASE = 0,
98 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
99 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
100 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
101 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
102 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
103 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
105 MV_SATAHC0_REG_BASE = 0x20000,
106 MV_FLASH_CTL = 0x1046c,
107 MV_GPIO_PORT_CTL = 0x104f0,
108 MV_RESET_CFG = 0x180d8,
110 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
111 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
112 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
113 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
115 MV_MAX_Q_DEPTH = 32,
116 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
118 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
119 * CRPB needs alignment on a 256B boundary. Size == 256B
120 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
122 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
123 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
124 MV_MAX_SG_CT = 256,
125 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
127 MV_PORTS_PER_HC = 4,
128 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
129 MV_PORT_HC_SHIFT = 2,
130 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
131 MV_PORT_MASK = 3,
133 /* Host Flags */
134 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
135 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
136 /* SoC integrated controllers, no PCI interface */
137 MV_FLAG_SOC = (1 << 28),
139 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
140 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
141 ATA_FLAG_PIO_POLLING,
142 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
144 CRQB_FLAG_READ = (1 << 0),
145 CRQB_TAG_SHIFT = 1,
146 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
147 CRQB_PMP_SHIFT = 12, /* CRQB Gen-II/IIE PMP shift */
148 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
149 CRQB_CMD_ADDR_SHIFT = 8,
150 CRQB_CMD_CS = (0x2 << 11),
151 CRQB_CMD_LAST = (1 << 15),
153 CRPB_FLAG_STATUS_SHIFT = 8,
154 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
155 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
157 EPRD_FLAG_END_OF_TBL = (1 << 31),
159 /* PCI interface registers */
161 PCI_COMMAND_OFS = 0xc00,
163 PCI_MAIN_CMD_STS_OFS = 0xd30,
164 STOP_PCI_MASTER = (1 << 2),
165 PCI_MASTER_EMPTY = (1 << 3),
166 GLOB_SFT_RST = (1 << 4),
168 MV_PCI_MODE = 0xd00,
169 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
170 MV_PCI_DISC_TIMER = 0xd04,
171 MV_PCI_MSI_TRIGGER = 0xc38,
172 MV_PCI_SERR_MASK = 0xc28,
173 MV_PCI_XBAR_TMOUT = 0x1d04,
174 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
175 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
176 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
177 MV_PCI_ERR_COMMAND = 0x1d50,
179 PCI_IRQ_CAUSE_OFS = 0x1d58,
180 PCI_IRQ_MASK_OFS = 0x1d5c,
181 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
183 PCIE_IRQ_CAUSE_OFS = 0x1900,
184 PCIE_IRQ_MASK_OFS = 0x1910,
185 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
187 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
188 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
189 HC_SOC_MAIN_IRQ_CAUSE_OFS = 0x20020,
190 HC_SOC_MAIN_IRQ_MASK_OFS = 0x20024,
191 PORT0_ERR = (1 << 0), /* shift by port # */
192 PORT0_DONE = (1 << 1), /* shift by port # */
193 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
194 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
195 PCI_ERR = (1 << 18),
196 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
197 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
198 PORTS_0_3_COAL_DONE = (1 << 8),
199 PORTS_4_7_COAL_DONE = (1 << 17),
200 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
201 GPIO_INT = (1 << 22),
202 SELF_INT = (1 << 23),
203 TWSI_INT = (1 << 24),
204 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
205 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
206 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
207 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
208 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
209 HC_MAIN_RSVD),
210 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
211 HC_MAIN_RSVD_5),
212 HC_MAIN_MASKED_IRQS_SOC = (PORTS_0_3_COAL_DONE | HC_MAIN_RSVD_SOC),
214 /* SATAHC registers */
215 HC_CFG_OFS = 0,
217 HC_IRQ_CAUSE_OFS = 0x14,
218 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
219 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
220 DEV_IRQ = (1 << 8), /* shift by port # */
222 /* Shadow block registers */
223 SHD_BLK_OFS = 0x100,
224 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
226 /* SATA registers */
227 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
228 SATA_ACTIVE_OFS = 0x350,
229 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
231 LTMODE_OFS = 0x30c,
232 LTMODE_BIT8 = (1 << 8), /* unknown, but necessary */
234 PHY_MODE3 = 0x310,
235 PHY_MODE4 = 0x314,
236 PHY_MODE2 = 0x330,
237 SATA_IFCTL_OFS = 0x344,
238 SATA_IFSTAT_OFS = 0x34c,
239 VENDOR_UNIQUE_FIS_OFS = 0x35c,
241 FIS_CFG_OFS = 0x360,
242 FIS_CFG_SINGLE_SYNC = (1 << 16), /* SYNC on DMA activation */
244 MV5_PHY_MODE = 0x74,
245 MV5_LT_MODE = 0x30,
246 MV5_PHY_CTL = 0x0C,
247 SATA_INTERFACE_CFG = 0x050,
249 MV_M2_PREAMP_MASK = 0x7e0,
251 /* Port registers */
252 EDMA_CFG_OFS = 0,
253 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
254 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
255 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
256 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
257 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
258 EDMA_CFG_EDMA_FBS = (1 << 16), /* EDMA FIS-Based Switching */
259 EDMA_CFG_FBS = (1 << 26), /* FIS-Based Switching */
261 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
262 EDMA_ERR_IRQ_MASK_OFS = 0xc,
263 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
264 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
265 EDMA_ERR_DEV = (1 << 2), /* device error */
266 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
267 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
268 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
269 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
270 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
271 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
272 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
273 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
274 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
275 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
276 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
278 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
279 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
280 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
281 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
282 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
284 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
286 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
287 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
288 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
289 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
290 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
291 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
293 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
295 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
296 EDMA_ERR_OVERRUN_5 = (1 << 5),
297 EDMA_ERR_UNDERRUN_5 = (1 << 6),
299 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
300 EDMA_ERR_LNK_CTRL_RX_1 |
301 EDMA_ERR_LNK_CTRL_RX_3 |
302 EDMA_ERR_LNK_CTRL_TX |
303 /* temporary, until we fix hotplug: */
304 (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON),
306 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
307 EDMA_ERR_PRD_PAR |
308 EDMA_ERR_DEV_DCON |
309 EDMA_ERR_DEV_CON |
310 EDMA_ERR_SERR |
311 EDMA_ERR_SELF_DIS |
312 EDMA_ERR_CRQB_PAR |
313 EDMA_ERR_CRPB_PAR |
314 EDMA_ERR_INTRL_PAR |
315 EDMA_ERR_IORDY |
316 EDMA_ERR_LNK_CTRL_RX_2 |
317 EDMA_ERR_LNK_DATA_RX |
318 EDMA_ERR_LNK_DATA_TX |
319 EDMA_ERR_TRANS_PROTO,
321 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
322 EDMA_ERR_PRD_PAR |
323 EDMA_ERR_DEV_DCON |
324 EDMA_ERR_DEV_CON |
325 EDMA_ERR_OVERRUN_5 |
326 EDMA_ERR_UNDERRUN_5 |
327 EDMA_ERR_SELF_DIS_5 |
328 EDMA_ERR_CRQB_PAR |
329 EDMA_ERR_CRPB_PAR |
330 EDMA_ERR_INTRL_PAR |
331 EDMA_ERR_IORDY,
333 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
334 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
336 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
337 EDMA_REQ_Q_PTR_SHIFT = 5,
339 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
340 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
341 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
342 EDMA_RSP_Q_PTR_SHIFT = 3,
344 EDMA_CMD_OFS = 0x28, /* EDMA command register */
345 EDMA_EN = (1 << 0), /* enable EDMA */
346 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
347 ATA_RST = (1 << 2), /* reset trans/link/phy */
349 EDMA_IORDY_TMOUT = 0x34,
350 EDMA_ARB_CFG = 0x38,
352 /* Host private flags (hp_flags) */
353 MV_HP_FLAG_MSI = (1 << 0),
354 MV_HP_ERRATA_50XXB0 = (1 << 1),
355 MV_HP_ERRATA_50XXB2 = (1 << 2),
356 MV_HP_ERRATA_60X1B2 = (1 << 3),
357 MV_HP_ERRATA_60X1C0 = (1 << 4),
358 MV_HP_ERRATA_XX42A0 = (1 << 5),
359 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
360 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
361 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
362 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
364 /* Port private flags (pp_flags) */
365 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
366 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
369 #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
370 #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
371 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
372 #define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC))
374 #define WINDOW_CTRL(i) (0x20030 + ((i) << 4))
375 #define WINDOW_BASE(i) (0x20034 + ((i) << 4))
377 enum {
378 /* DMA boundary 0xffff is required by the s/g splitting
379 * we need on /length/ in mv_fill-sg().
381 MV_DMA_BOUNDARY = 0xffffU,
383 /* mask of register bits containing lower 32 bits
384 * of EDMA request queue DMA address
386 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
388 /* ditto, for response queue */
389 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
392 enum chip_type {
393 chip_504x,
394 chip_508x,
395 chip_5080,
396 chip_604x,
397 chip_608x,
398 chip_6042,
399 chip_7042,
400 chip_soc,
403 /* Command ReQuest Block: 32B */
404 struct mv_crqb {
405 __le32 sg_addr;
406 __le32 sg_addr_hi;
407 __le16 ctrl_flags;
408 __le16 ata_cmd[11];
411 struct mv_crqb_iie {
412 __le32 addr;
413 __le32 addr_hi;
414 __le32 flags;
415 __le32 len;
416 __le32 ata_cmd[4];
419 /* Command ResPonse Block: 8B */
420 struct mv_crpb {
421 __le16 id;
422 __le16 flags;
423 __le32 tmstmp;
426 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
427 struct mv_sg {
428 __le32 addr;
429 __le32 flags_size;
430 __le32 addr_hi;
431 __le32 reserved;
434 struct mv_port_priv {
435 struct mv_crqb *crqb;
436 dma_addr_t crqb_dma;
437 struct mv_crpb *crpb;
438 dma_addr_t crpb_dma;
439 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
440 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
442 unsigned int req_idx;
443 unsigned int resp_idx;
445 u32 pp_flags;
448 struct mv_port_signal {
449 u32 amps;
450 u32 pre;
453 struct mv_host_priv {
454 u32 hp_flags;
455 struct mv_port_signal signal[8];
456 const struct mv_hw_ops *ops;
457 int n_ports;
458 void __iomem *base;
459 void __iomem *main_cause_reg_addr;
460 void __iomem *main_mask_reg_addr;
461 u32 irq_cause_ofs;
462 u32 irq_mask_ofs;
463 u32 unmask_all_irqs;
465 * These consistent DMA memory pools give us guaranteed
466 * alignment for hardware-accessed data structures,
467 * and less memory waste in accomplishing the alignment.
469 struct dma_pool *crqb_pool;
470 struct dma_pool *crpb_pool;
471 struct dma_pool *sg_tbl_pool;
474 struct mv_hw_ops {
475 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
476 unsigned int port);
477 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
478 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
479 void __iomem *mmio);
480 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
481 unsigned int n_hc);
482 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
483 void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
486 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
487 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
488 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
489 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
490 static int mv_port_start(struct ata_port *ap);
491 static void mv_port_stop(struct ata_port *ap);
492 static void mv_qc_prep(struct ata_queued_cmd *qc);
493 static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
494 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
495 static int mv_hardreset(struct ata_link *link, unsigned int *class,
496 unsigned long deadline);
497 static void mv_eh_freeze(struct ata_port *ap);
498 static void mv_eh_thaw(struct ata_port *ap);
499 static void mv6_dev_config(struct ata_device *dev);
501 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
502 unsigned int port);
503 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
504 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
505 void __iomem *mmio);
506 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
507 unsigned int n_hc);
508 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
509 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
511 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
512 unsigned int port);
513 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
514 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
515 void __iomem *mmio);
516 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
517 unsigned int n_hc);
518 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
519 static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
520 void __iomem *mmio);
521 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
522 void __iomem *mmio);
523 static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
524 void __iomem *mmio, unsigned int n_hc);
525 static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
526 void __iomem *mmio);
527 static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
528 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
529 static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
530 unsigned int port_no);
531 static int mv_stop_edma(struct ata_port *ap);
532 static int mv_stop_edma_engine(void __iomem *port_mmio);
533 static void mv_edma_cfg(struct ata_port *ap, int want_ncq);
535 static void mv_pmp_select(struct ata_port *ap, int pmp);
536 static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
537 unsigned long deadline);
538 static int mv_softreset(struct ata_link *link, unsigned int *class,
539 unsigned long deadline);
541 /* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
542 * because we have to allow room for worst case splitting of
543 * PRDs for 64K boundaries in mv_fill_sg().
545 static struct scsi_host_template mv5_sht = {
546 ATA_BASE_SHT(DRV_NAME),
547 .sg_tablesize = MV_MAX_SG_CT / 2,
548 .dma_boundary = MV_DMA_BOUNDARY,
551 static struct scsi_host_template mv6_sht = {
552 ATA_NCQ_SHT(DRV_NAME),
553 .can_queue = MV_MAX_Q_DEPTH - 1,
554 .sg_tablesize = MV_MAX_SG_CT / 2,
555 .dma_boundary = MV_DMA_BOUNDARY,
558 static struct ata_port_operations mv5_ops = {
559 .inherits = &ata_sff_port_ops,
561 .qc_prep = mv_qc_prep,
562 .qc_issue = mv_qc_issue,
564 .freeze = mv_eh_freeze,
565 .thaw = mv_eh_thaw,
566 .hardreset = mv_hardreset,
567 .error_handler = ata_std_error_handler, /* avoid SFF EH */
568 .post_internal_cmd = ATA_OP_NULL,
570 .scr_read = mv5_scr_read,
571 .scr_write = mv5_scr_write,
573 .port_start = mv_port_start,
574 .port_stop = mv_port_stop,
577 static struct ata_port_operations mv6_ops = {
578 .inherits = &mv5_ops,
579 .qc_defer = sata_pmp_qc_defer_cmd_switch,
580 .dev_config = mv6_dev_config,
581 .scr_read = mv_scr_read,
582 .scr_write = mv_scr_write,
584 .pmp_hardreset = mv_pmp_hardreset,
585 .pmp_softreset = mv_softreset,
586 .softreset = mv_softreset,
587 .error_handler = sata_pmp_error_handler,
590 static struct ata_port_operations mv_iie_ops = {
591 .inherits = &mv6_ops,
592 .qc_defer = ata_std_qc_defer, /* FIS-based switching */
593 .dev_config = ATA_OP_NULL,
594 .qc_prep = mv_qc_prep_iie,
597 static const struct ata_port_info mv_port_info[] = {
598 { /* chip_504x */
599 .flags = MV_COMMON_FLAGS,
600 .pio_mask = 0x1f, /* pio0-4 */
601 .udma_mask = ATA_UDMA6,
602 .port_ops = &mv5_ops,
604 { /* chip_508x */
605 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
606 .pio_mask = 0x1f, /* pio0-4 */
607 .udma_mask = ATA_UDMA6,
608 .port_ops = &mv5_ops,
610 { /* chip_5080 */
611 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
612 .pio_mask = 0x1f, /* pio0-4 */
613 .udma_mask = ATA_UDMA6,
614 .port_ops = &mv5_ops,
616 { /* chip_604x */
617 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
618 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
619 ATA_FLAG_NCQ,
620 .pio_mask = 0x1f, /* pio0-4 */
621 .udma_mask = ATA_UDMA6,
622 .port_ops = &mv6_ops,
624 { /* chip_608x */
625 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
626 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
627 ATA_FLAG_NCQ | MV_FLAG_DUAL_HC,
628 .pio_mask = 0x1f, /* pio0-4 */
629 .udma_mask = ATA_UDMA6,
630 .port_ops = &mv6_ops,
632 { /* chip_6042 */
633 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
634 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
635 ATA_FLAG_NCQ,
636 .pio_mask = 0x1f, /* pio0-4 */
637 .udma_mask = ATA_UDMA6,
638 .port_ops = &mv_iie_ops,
640 { /* chip_7042 */
641 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
642 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
643 ATA_FLAG_NCQ,
644 .pio_mask = 0x1f, /* pio0-4 */
645 .udma_mask = ATA_UDMA6,
646 .port_ops = &mv_iie_ops,
648 { /* chip_soc */
649 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
650 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
651 ATA_FLAG_NCQ | MV_FLAG_SOC,
652 .pio_mask = 0x1f, /* pio0-4 */
653 .udma_mask = ATA_UDMA6,
654 .port_ops = &mv_iie_ops,
658 static const struct pci_device_id mv_pci_tbl[] = {
659 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
660 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
661 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
662 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
663 /* RocketRAID 1740/174x have different identifiers */
664 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
665 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
667 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
668 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
669 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
670 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
671 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
673 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
675 /* Adaptec 1430SA */
676 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
678 /* Marvell 7042 support */
679 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
681 /* Highpoint RocketRAID PCIe series */
682 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
683 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
685 { } /* terminate list */
688 static const struct mv_hw_ops mv5xxx_ops = {
689 .phy_errata = mv5_phy_errata,
690 .enable_leds = mv5_enable_leds,
691 .read_preamp = mv5_read_preamp,
692 .reset_hc = mv5_reset_hc,
693 .reset_flash = mv5_reset_flash,
694 .reset_bus = mv5_reset_bus,
697 static const struct mv_hw_ops mv6xxx_ops = {
698 .phy_errata = mv6_phy_errata,
699 .enable_leds = mv6_enable_leds,
700 .read_preamp = mv6_read_preamp,
701 .reset_hc = mv6_reset_hc,
702 .reset_flash = mv6_reset_flash,
703 .reset_bus = mv_reset_pci_bus,
706 static const struct mv_hw_ops mv_soc_ops = {
707 .phy_errata = mv6_phy_errata,
708 .enable_leds = mv_soc_enable_leds,
709 .read_preamp = mv_soc_read_preamp,
710 .reset_hc = mv_soc_reset_hc,
711 .reset_flash = mv_soc_reset_flash,
712 .reset_bus = mv_soc_reset_bus,
716 * Functions
719 static inline void writelfl(unsigned long data, void __iomem *addr)
721 writel(data, addr);
722 (void) readl(addr); /* flush to avoid PCI posted write */
725 static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
727 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
730 static inline unsigned int mv_hc_from_port(unsigned int port)
732 return port >> MV_PORT_HC_SHIFT;
735 static inline unsigned int mv_hardport_from_port(unsigned int port)
737 return port & MV_PORT_MASK;
740 static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
741 unsigned int port)
743 return mv_hc_base(base, mv_hc_from_port(port));
746 static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
748 return mv_hc_base_from_port(base, port) +
749 MV_SATAHC_ARBTR_REG_SZ +
750 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
753 static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
755 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
756 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
758 return hc_mmio + ofs;
761 static inline void __iomem *mv_host_base(struct ata_host *host)
763 struct mv_host_priv *hpriv = host->private_data;
764 return hpriv->base;
767 static inline void __iomem *mv_ap_base(struct ata_port *ap)
769 return mv_port_base(mv_host_base(ap->host), ap->port_no);
772 static inline int mv_get_hc_count(unsigned long port_flags)
774 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
777 static void mv_set_edma_ptrs(void __iomem *port_mmio,
778 struct mv_host_priv *hpriv,
779 struct mv_port_priv *pp)
781 u32 index;
784 * initialize request queue
786 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
788 WARN_ON(pp->crqb_dma & 0x3ff);
789 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
790 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
791 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
793 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
794 writelfl((pp->crqb_dma & 0xffffffff) | index,
795 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
796 else
797 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
800 * initialize response queue
802 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
804 WARN_ON(pp->crpb_dma & 0xff);
805 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
807 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
808 writelfl((pp->crpb_dma & 0xffffffff) | index,
809 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
810 else
811 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
813 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
814 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
818 * mv_start_dma - Enable eDMA engine
819 * @base: port base address
820 * @pp: port private data
822 * Verify the local cache of the eDMA state is accurate with a
823 * WARN_ON.
825 * LOCKING:
826 * Inherited from caller.
828 static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
829 struct mv_port_priv *pp, u8 protocol)
831 int want_ncq = (protocol == ATA_PROT_NCQ);
833 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
834 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
835 if (want_ncq != using_ncq)
836 mv_stop_edma(ap);
838 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
839 struct mv_host_priv *hpriv = ap->host->private_data;
840 int hard_port = mv_hardport_from_port(ap->port_no);
841 void __iomem *hc_mmio = mv_hc_base_from_port(
842 mv_host_base(ap->host), hard_port);
843 u32 hc_irq_cause, ipending;
845 /* clear EDMA event indicators, if any */
846 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
848 /* clear EDMA interrupt indicator, if any */
849 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
850 ipending = (DEV_IRQ << hard_port) |
851 (CRPB_DMA_DONE << hard_port);
852 if (hc_irq_cause & ipending) {
853 writelfl(hc_irq_cause & ~ipending,
854 hc_mmio + HC_IRQ_CAUSE_OFS);
857 mv_edma_cfg(ap, want_ncq);
859 /* clear FIS IRQ Cause */
860 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
862 mv_set_edma_ptrs(port_mmio, hpriv, pp);
864 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
865 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
867 WARN_ON(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
871 * mv_stop_edma_engine - Disable eDMA engine
872 * @port_mmio: io base address
874 * LOCKING:
875 * Inherited from caller.
877 static int mv_stop_edma_engine(void __iomem *port_mmio)
879 int i;
881 /* Disable eDMA. The disable bit auto clears. */
882 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
884 /* Wait for the chip to confirm eDMA is off. */
885 for (i = 10000; i > 0; i--) {
886 u32 reg = readl(port_mmio + EDMA_CMD_OFS);
887 if (!(reg & EDMA_EN))
888 return 0;
889 udelay(10);
891 return -EIO;
894 static int mv_stop_edma(struct ata_port *ap)
896 void __iomem *port_mmio = mv_ap_base(ap);
897 struct mv_port_priv *pp = ap->private_data;
899 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
900 return 0;
901 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
902 if (mv_stop_edma_engine(port_mmio)) {
903 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
904 return -EIO;
906 return 0;
909 #ifdef ATA_DEBUG
910 static void mv_dump_mem(void __iomem *start, unsigned bytes)
912 int b, w;
913 for (b = 0; b < bytes; ) {
914 DPRINTK("%p: ", start + b);
915 for (w = 0; b < bytes && w < 4; w++) {
916 printk("%08x ", readl(start + b));
917 b += sizeof(u32);
919 printk("\n");
922 #endif
924 static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
926 #ifdef ATA_DEBUG
927 int b, w;
928 u32 dw;
929 for (b = 0; b < bytes; ) {
930 DPRINTK("%02x: ", b);
931 for (w = 0; b < bytes && w < 4; w++) {
932 (void) pci_read_config_dword(pdev, b, &dw);
933 printk("%08x ", dw);
934 b += sizeof(u32);
936 printk("\n");
938 #endif
940 static void mv_dump_all_regs(void __iomem *mmio_base, int port,
941 struct pci_dev *pdev)
943 #ifdef ATA_DEBUG
944 void __iomem *hc_base = mv_hc_base(mmio_base,
945 port >> MV_PORT_HC_SHIFT);
946 void __iomem *port_base;
947 int start_port, num_ports, p, start_hc, num_hcs, hc;
949 if (0 > port) {
950 start_hc = start_port = 0;
951 num_ports = 8; /* shld be benign for 4 port devs */
952 num_hcs = 2;
953 } else {
954 start_hc = port >> MV_PORT_HC_SHIFT;
955 start_port = port;
956 num_ports = num_hcs = 1;
958 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
959 num_ports > 1 ? num_ports - 1 : start_port);
961 if (NULL != pdev) {
962 DPRINTK("PCI config space regs:\n");
963 mv_dump_pci_cfg(pdev, 0x68);
965 DPRINTK("PCI regs:\n");
966 mv_dump_mem(mmio_base+0xc00, 0x3c);
967 mv_dump_mem(mmio_base+0xd00, 0x34);
968 mv_dump_mem(mmio_base+0xf00, 0x4);
969 mv_dump_mem(mmio_base+0x1d00, 0x6c);
970 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
971 hc_base = mv_hc_base(mmio_base, hc);
972 DPRINTK("HC regs (HC %i):\n", hc);
973 mv_dump_mem(hc_base, 0x1c);
975 for (p = start_port; p < start_port + num_ports; p++) {
976 port_base = mv_port_base(mmio_base, p);
977 DPRINTK("EDMA regs (port %i):\n", p);
978 mv_dump_mem(port_base, 0x54);
979 DPRINTK("SATA regs (port %i):\n", p);
980 mv_dump_mem(port_base+0x300, 0x60);
982 #endif
985 static unsigned int mv_scr_offset(unsigned int sc_reg_in)
987 unsigned int ofs;
989 switch (sc_reg_in) {
990 case SCR_STATUS:
991 case SCR_CONTROL:
992 case SCR_ERROR:
993 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
994 break;
995 case SCR_ACTIVE:
996 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
997 break;
998 default:
999 ofs = 0xffffffffU;
1000 break;
1002 return ofs;
1005 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
1007 unsigned int ofs = mv_scr_offset(sc_reg_in);
1009 if (ofs != 0xffffffffU) {
1010 *val = readl(mv_ap_base(ap) + ofs);
1011 return 0;
1012 } else
1013 return -EINVAL;
1016 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1018 unsigned int ofs = mv_scr_offset(sc_reg_in);
1020 if (ofs != 0xffffffffU) {
1021 writelfl(val, mv_ap_base(ap) + ofs);
1022 return 0;
1023 } else
1024 return -EINVAL;
1027 static void mv6_dev_config(struct ata_device *adev)
1030 * Deal with Gen-II ("mv6") hardware quirks/restrictions:
1032 * Gen-II does not support NCQ over a port multiplier
1033 * (no FIS-based switching).
1035 * We don't have hob_nsect when doing NCQ commands on Gen-II.
1036 * See mv_qc_prep() for more info.
1038 if (adev->flags & ATA_DFLAG_NCQ) {
1039 if (sata_pmp_attached(adev->link->ap))
1040 adev->flags &= ~ATA_DFLAG_NCQ;
1041 else if (adev->max_sectors > ATA_MAX_SECTORS)
1042 adev->max_sectors = ATA_MAX_SECTORS;
1046 static void mv_config_fbs(void __iomem *port_mmio, int enable_fbs)
1048 u32 old_fcfg, new_fcfg, old_ltmode, new_ltmode;
1050 * Various bit settings required for operation
1051 * in FIS-based switching (fbs) mode on GenIIe:
1053 old_fcfg = readl(port_mmio + FIS_CFG_OFS);
1054 old_ltmode = readl(port_mmio + LTMODE_OFS);
1055 if (enable_fbs) {
1056 new_fcfg = old_fcfg | FIS_CFG_SINGLE_SYNC;
1057 new_ltmode = old_ltmode | LTMODE_BIT8;
1058 } else { /* disable fbs */
1059 new_fcfg = old_fcfg & ~FIS_CFG_SINGLE_SYNC;
1060 new_ltmode = old_ltmode & ~LTMODE_BIT8;
1062 if (new_fcfg != old_fcfg)
1063 writelfl(new_fcfg, port_mmio + FIS_CFG_OFS);
1064 if (new_ltmode != old_ltmode)
1065 writelfl(new_ltmode, port_mmio + LTMODE_OFS);
1068 static void mv_edma_cfg(struct ata_port *ap, int want_ncq)
1070 u32 cfg;
1071 struct mv_port_priv *pp = ap->private_data;
1072 struct mv_host_priv *hpriv = ap->host->private_data;
1073 void __iomem *port_mmio = mv_ap_base(ap);
1075 /* set up non-NCQ EDMA configuration */
1076 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
1078 if (IS_GEN_I(hpriv))
1079 cfg |= (1 << 8); /* enab config burst size mask */
1081 else if (IS_GEN_II(hpriv))
1082 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1084 else if (IS_GEN_IIE(hpriv)) {
1085 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1086 cfg |= (1 << 22); /* enab 4-entry host queue cache */
1087 cfg |= (1 << 18); /* enab early completion */
1088 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
1090 if (want_ncq && sata_pmp_attached(ap)) {
1091 cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */
1092 mv_config_fbs(port_mmio, 1);
1093 } else {
1094 mv_config_fbs(port_mmio, 0);
1098 if (want_ncq) {
1099 cfg |= EDMA_CFG_NCQ;
1100 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1101 } else
1102 pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;
1104 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1107 static void mv_port_free_dma_mem(struct ata_port *ap)
1109 struct mv_host_priv *hpriv = ap->host->private_data;
1110 struct mv_port_priv *pp = ap->private_data;
1111 int tag;
1113 if (pp->crqb) {
1114 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1115 pp->crqb = NULL;
1117 if (pp->crpb) {
1118 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1119 pp->crpb = NULL;
1122 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1123 * For later hardware, we have one unique sg_tbl per NCQ tag.
1125 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1126 if (pp->sg_tbl[tag]) {
1127 if (tag == 0 || !IS_GEN_I(hpriv))
1128 dma_pool_free(hpriv->sg_tbl_pool,
1129 pp->sg_tbl[tag],
1130 pp->sg_tbl_dma[tag]);
1131 pp->sg_tbl[tag] = NULL;
1137 * mv_port_start - Port specific init/start routine.
1138 * @ap: ATA channel to manipulate
1140 * Allocate and point to DMA memory, init port private memory,
1141 * zero indices.
1143 * LOCKING:
1144 * Inherited from caller.
1146 static int mv_port_start(struct ata_port *ap)
1148 struct device *dev = ap->host->dev;
1149 struct mv_host_priv *hpriv = ap->host->private_data;
1150 struct mv_port_priv *pp;
1151 int tag;
1153 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1154 if (!pp)
1155 return -ENOMEM;
1156 ap->private_data = pp;
1158 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1159 if (!pp->crqb)
1160 return -ENOMEM;
1161 memset(pp->crqb, 0, MV_CRQB_Q_SZ);
1163 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1164 if (!pp->crpb)
1165 goto out_port_free_dma_mem;
1166 memset(pp->crpb, 0, MV_CRPB_Q_SZ);
1169 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1170 * For later hardware, we need one unique sg_tbl per NCQ tag.
1172 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1173 if (tag == 0 || !IS_GEN_I(hpriv)) {
1174 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1175 GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1176 if (!pp->sg_tbl[tag])
1177 goto out_port_free_dma_mem;
1178 } else {
1179 pp->sg_tbl[tag] = pp->sg_tbl[0];
1180 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1183 return 0;
1185 out_port_free_dma_mem:
1186 mv_port_free_dma_mem(ap);
1187 return -ENOMEM;
1191 * mv_port_stop - Port specific cleanup/stop routine.
1192 * @ap: ATA channel to manipulate
1194 * Stop DMA, cleanup port memory.
1196 * LOCKING:
1197 * This routine uses the host lock to protect the DMA stop.
1199 static void mv_port_stop(struct ata_port *ap)
1201 mv_stop_edma(ap);
1202 mv_port_free_dma_mem(ap);
1206 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1207 * @qc: queued command whose SG list to source from
1209 * Populate the SG list and mark the last entry.
1211 * LOCKING:
1212 * Inherited from caller.
1214 static void mv_fill_sg(struct ata_queued_cmd *qc)
1216 struct mv_port_priv *pp = qc->ap->private_data;
1217 struct scatterlist *sg;
1218 struct mv_sg *mv_sg, *last_sg = NULL;
1219 unsigned int si;
1221 mv_sg = pp->sg_tbl[qc->tag];
1222 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1223 dma_addr_t addr = sg_dma_address(sg);
1224 u32 sg_len = sg_dma_len(sg);
1226 while (sg_len) {
1227 u32 offset = addr & 0xffff;
1228 u32 len = sg_len;
1230 if ((offset + sg_len > 0x10000))
1231 len = 0x10000 - offset;
1233 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1234 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1235 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
1237 sg_len -= len;
1238 addr += len;
1240 last_sg = mv_sg;
1241 mv_sg++;
1245 if (likely(last_sg))
1246 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1249 static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1251 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1252 (last ? CRQB_CMD_LAST : 0);
1253 *cmdw = cpu_to_le16(tmp);
1257 * mv_qc_prep - Host specific command preparation.
1258 * @qc: queued command to prepare
1260 * This routine simply redirects to the general purpose routine
1261 * if command is not DMA. Else, it handles prep of the CRQB
1262 * (command request block), does some sanity checking, and calls
1263 * the SG load routine.
1265 * LOCKING:
1266 * Inherited from caller.
1268 static void mv_qc_prep(struct ata_queued_cmd *qc)
1270 struct ata_port *ap = qc->ap;
1271 struct mv_port_priv *pp = ap->private_data;
1272 __le16 *cw;
1273 struct ata_taskfile *tf;
1274 u16 flags = 0;
1275 unsigned in_index;
1277 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1278 (qc->tf.protocol != ATA_PROT_NCQ))
1279 return;
1281 /* Fill in command request block
1283 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1284 flags |= CRQB_FLAG_READ;
1285 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1286 flags |= qc->tag << CRQB_TAG_SHIFT;
1287 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
1289 /* get current queue index from software */
1290 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1292 pp->crqb[in_index].sg_addr =
1293 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1294 pp->crqb[in_index].sg_addr_hi =
1295 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
1296 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1298 cw = &pp->crqb[in_index].ata_cmd[0];
1299 tf = &qc->tf;
1301 /* Sadly, the CRQB cannot accomodate all registers--there are
1302 * only 11 bytes...so we must pick and choose required
1303 * registers based on the command. So, we drop feature and
1304 * hob_feature for [RW] DMA commands, but they are needed for
1305 * NCQ. NCQ will drop hob_nsect.
1307 switch (tf->command) {
1308 case ATA_CMD_READ:
1309 case ATA_CMD_READ_EXT:
1310 case ATA_CMD_WRITE:
1311 case ATA_CMD_WRITE_EXT:
1312 case ATA_CMD_WRITE_FUA_EXT:
1313 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1314 break;
1315 case ATA_CMD_FPDMA_READ:
1316 case ATA_CMD_FPDMA_WRITE:
1317 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
1318 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1319 break;
1320 default:
1321 /* The only other commands EDMA supports in non-queued and
1322 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1323 * of which are defined/used by Linux. If we get here, this
1324 * driver needs work.
1326 * FIXME: modify libata to give qc_prep a return value and
1327 * return error here.
1329 BUG_ON(tf->command);
1330 break;
1332 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1333 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1334 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1335 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1336 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1337 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1338 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1339 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1340 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1342 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1343 return;
1344 mv_fill_sg(qc);
1348 * mv_qc_prep_iie - Host specific command preparation.
1349 * @qc: queued command to prepare
1351 * This routine simply redirects to the general purpose routine
1352 * if command is not DMA. Else, it handles prep of the CRQB
1353 * (command request block), does some sanity checking, and calls
1354 * the SG load routine.
1356 * LOCKING:
1357 * Inherited from caller.
1359 static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1361 struct ata_port *ap = qc->ap;
1362 struct mv_port_priv *pp = ap->private_data;
1363 struct mv_crqb_iie *crqb;
1364 struct ata_taskfile *tf;
1365 unsigned in_index;
1366 u32 flags = 0;
1368 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1369 (qc->tf.protocol != ATA_PROT_NCQ))
1370 return;
1372 /* Fill in Gen IIE command request block */
1373 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1374 flags |= CRQB_FLAG_READ;
1376 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1377 flags |= qc->tag << CRQB_TAG_SHIFT;
1378 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
1379 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
1381 /* get current queue index from software */
1382 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1384 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
1385 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1386 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
1387 crqb->flags = cpu_to_le32(flags);
1389 tf = &qc->tf;
1390 crqb->ata_cmd[0] = cpu_to_le32(
1391 (tf->command << 16) |
1392 (tf->feature << 24)
1394 crqb->ata_cmd[1] = cpu_to_le32(
1395 (tf->lbal << 0) |
1396 (tf->lbam << 8) |
1397 (tf->lbah << 16) |
1398 (tf->device << 24)
1400 crqb->ata_cmd[2] = cpu_to_le32(
1401 (tf->hob_lbal << 0) |
1402 (tf->hob_lbam << 8) |
1403 (tf->hob_lbah << 16) |
1404 (tf->hob_feature << 24)
1406 crqb->ata_cmd[3] = cpu_to_le32(
1407 (tf->nsect << 0) |
1408 (tf->hob_nsect << 8)
1411 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1412 return;
1413 mv_fill_sg(qc);
1417 * mv_qc_issue - Initiate a command to the host
1418 * @qc: queued command to start
1420 * This routine simply redirects to the general purpose routine
1421 * if command is not DMA. Else, it sanity checks our local
1422 * caches of the request producer/consumer indices then enables
1423 * DMA and bumps the request producer index.
1425 * LOCKING:
1426 * Inherited from caller.
1428 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1430 struct ata_port *ap = qc->ap;
1431 void __iomem *port_mmio = mv_ap_base(ap);
1432 struct mv_port_priv *pp = ap->private_data;
1433 u32 in_index;
1435 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1436 (qc->tf.protocol != ATA_PROT_NCQ)) {
1438 * We're about to send a non-EDMA capable command to the
1439 * port. Turn off EDMA so there won't be problems accessing
1440 * shadow block, etc registers.
1442 mv_stop_edma(ap);
1443 mv_pmp_select(ap, qc->dev->link->pmp);
1444 return ata_sff_qc_issue(qc);
1447 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
1449 pp->req_idx++;
1451 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
1453 /* and write the request in pointer to kick the EDMA to life */
1454 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1455 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1457 return 0;
1461 * mv_err_intr - Handle error interrupts on the port
1462 * @ap: ATA channel to manipulate
1463 * @reset_allowed: bool: 0 == don't trigger from reset here
1465 * In most cases, just clear the interrupt and move on. However,
1466 * some cases require an eDMA reset, which also performs a COMRESET.
1467 * The SERR case requires a clear of pending errors in the SATA
1468 * SERROR register. Finally, if the port disabled DMA,
1469 * update our cached copy to match.
1471 * LOCKING:
1472 * Inherited from caller.
1474 static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1476 void __iomem *port_mmio = mv_ap_base(ap);
1477 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1478 struct mv_port_priv *pp = ap->private_data;
1479 struct mv_host_priv *hpriv = ap->host->private_data;
1480 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1481 unsigned int action = 0, err_mask = 0;
1482 struct ata_eh_info *ehi = &ap->link.eh_info;
1484 ata_ehi_clear_desc(ehi);
1486 if (!edma_enabled) {
1487 /* just a guess: do we need to do this? should we
1488 * expand this, and do it in all cases?
1490 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1491 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1494 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1496 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1499 * all generations share these EDMA error cause bits
1502 if (edma_err_cause & EDMA_ERR_DEV)
1503 err_mask |= AC_ERR_DEV;
1504 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
1505 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
1506 EDMA_ERR_INTRL_PAR)) {
1507 err_mask |= AC_ERR_ATA_BUS;
1508 action |= ATA_EH_RESET;
1509 ata_ehi_push_desc(ehi, "parity error");
1511 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1512 ata_ehi_hotplugged(ehi);
1513 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
1514 "dev disconnect" : "dev connect");
1515 action |= ATA_EH_RESET;
1518 if (IS_GEN_I(hpriv)) {
1519 eh_freeze_mask = EDMA_EH_FREEZE_5;
1521 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1522 pp = ap->private_data;
1523 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1524 ata_ehi_push_desc(ehi, "EDMA self-disable");
1526 } else {
1527 eh_freeze_mask = EDMA_EH_FREEZE;
1529 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1530 pp = ap->private_data;
1531 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1532 ata_ehi_push_desc(ehi, "EDMA self-disable");
1535 if (edma_err_cause & EDMA_ERR_SERR) {
1536 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1537 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1538 err_mask = AC_ERR_ATA_BUS;
1539 action |= ATA_EH_RESET;
1543 /* Clear EDMA now that SERR cleanup done */
1544 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1546 if (!err_mask) {
1547 err_mask = AC_ERR_OTHER;
1548 action |= ATA_EH_RESET;
1551 ehi->serror |= serr;
1552 ehi->action |= action;
1554 if (qc)
1555 qc->err_mask |= err_mask;
1556 else
1557 ehi->err_mask |= err_mask;
1559 if (edma_err_cause & eh_freeze_mask)
1560 ata_port_freeze(ap);
1561 else
1562 ata_port_abort(ap);
1565 static void mv_intr_pio(struct ata_port *ap)
1567 struct ata_queued_cmd *qc;
1568 u8 ata_status;
1570 /* ignore spurious intr if drive still BUSY */
1571 ata_status = readb(ap->ioaddr.status_addr);
1572 if (unlikely(ata_status & ATA_BUSY))
1573 return;
1575 /* get active ATA command */
1576 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1577 if (unlikely(!qc)) /* no active tag */
1578 return;
1579 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1580 return;
1582 /* and finally, complete the ATA command */
1583 qc->err_mask |= ac_err_mask(ata_status);
1584 ata_qc_complete(qc);
1587 static void mv_intr_edma(struct ata_port *ap)
1589 void __iomem *port_mmio = mv_ap_base(ap);
1590 struct mv_host_priv *hpriv = ap->host->private_data;
1591 struct mv_port_priv *pp = ap->private_data;
1592 struct ata_queued_cmd *qc;
1593 u32 out_index, in_index;
1594 bool work_done = false;
1596 /* get h/w response queue pointer */
1597 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1598 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1600 while (1) {
1601 u16 status;
1602 unsigned int tag;
1604 /* get s/w response queue last-read pointer, and compare */
1605 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1606 if (in_index == out_index)
1607 break;
1609 /* 50xx: get active ATA command */
1610 if (IS_GEN_I(hpriv))
1611 tag = ap->link.active_tag;
1613 /* Gen II/IIE: get active ATA command via tag, to enable
1614 * support for queueing. this works transparently for
1615 * queued and non-queued modes.
1617 else
1618 tag = le16_to_cpu(pp->crpb[out_index].id) & 0x1f;
1620 qc = ata_qc_from_tag(ap, tag);
1622 /* For non-NCQ mode, the lower 8 bits of status
1623 * are from EDMA_ERR_IRQ_CAUSE_OFS,
1624 * which should be zero if all went well.
1626 status = le16_to_cpu(pp->crpb[out_index].flags);
1627 if ((status & 0xff) && !(pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
1628 mv_err_intr(ap, qc);
1629 return;
1632 /* and finally, complete the ATA command */
1633 if (qc) {
1634 qc->err_mask |=
1635 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1636 ata_qc_complete(qc);
1639 /* advance software response queue pointer, to
1640 * indicate (after the loop completes) to hardware
1641 * that we have consumed a response queue entry.
1643 work_done = true;
1644 pp->resp_idx++;
1647 if (work_done)
1648 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1649 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1650 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1654 * mv_host_intr - Handle all interrupts on the given host controller
1655 * @host: host specific structure
1656 * @relevant: port error bits relevant to this host controller
1657 * @hc: which host controller we're to look at
1659 * Read then write clear the HC interrupt status then walk each
1660 * port connected to the HC and see if it needs servicing. Port
1661 * success ints are reported in the HC interrupt status reg, the
1662 * port error ints are reported in the higher level main
1663 * interrupt status register and thus are passed in via the
1664 * 'relevant' argument.
1666 * LOCKING:
1667 * Inherited from caller.
1669 static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
1671 struct mv_host_priv *hpriv = host->private_data;
1672 void __iomem *mmio = hpriv->base;
1673 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1674 u32 hc_irq_cause;
1675 int port, port0, last_port;
1677 if (hc == 0)
1678 port0 = 0;
1679 else
1680 port0 = MV_PORTS_PER_HC;
1682 if (HAS_PCI(host))
1683 last_port = port0 + MV_PORTS_PER_HC;
1684 else
1685 last_port = port0 + hpriv->n_ports;
1686 /* we'll need the HC success int register in most cases */
1687 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
1688 if (!hc_irq_cause)
1689 return;
1691 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
1693 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1694 hc, relevant, hc_irq_cause);
1696 for (port = port0; port < last_port; port++) {
1697 struct ata_port *ap = host->ports[port];
1698 struct mv_port_priv *pp;
1699 int have_err_bits, hard_port, shift;
1701 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
1702 continue;
1704 pp = ap->private_data;
1706 shift = port << 1; /* (port * 2) */
1707 if (port >= MV_PORTS_PER_HC)
1708 shift++; /* skip bit 8 in the HC Main IRQ reg */
1710 have_err_bits = ((PORT0_ERR << shift) & relevant);
1712 if (unlikely(have_err_bits)) {
1713 struct ata_queued_cmd *qc;
1715 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1716 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1717 continue;
1719 mv_err_intr(ap, qc);
1720 continue;
1723 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1725 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1726 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1727 mv_intr_edma(ap);
1728 } else {
1729 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1730 mv_intr_pio(ap);
1733 VPRINTK("EXIT\n");
1736 static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1738 struct mv_host_priv *hpriv = host->private_data;
1739 struct ata_port *ap;
1740 struct ata_queued_cmd *qc;
1741 struct ata_eh_info *ehi;
1742 unsigned int i, err_mask, printed = 0;
1743 u32 err_cause;
1745 err_cause = readl(mmio + hpriv->irq_cause_ofs);
1747 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1748 err_cause);
1750 DPRINTK("All regs @ PCI error\n");
1751 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1753 writelfl(0, mmio + hpriv->irq_cause_ofs);
1755 for (i = 0; i < host->n_ports; i++) {
1756 ap = host->ports[i];
1757 if (!ata_link_offline(&ap->link)) {
1758 ehi = &ap->link.eh_info;
1759 ata_ehi_clear_desc(ehi);
1760 if (!printed++)
1761 ata_ehi_push_desc(ehi,
1762 "PCI err cause 0x%08x", err_cause);
1763 err_mask = AC_ERR_HOST_BUS;
1764 ehi->action = ATA_EH_RESET;
1765 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1766 if (qc)
1767 qc->err_mask |= err_mask;
1768 else
1769 ehi->err_mask |= err_mask;
1771 ata_port_freeze(ap);
1777 * mv_interrupt - Main interrupt event handler
1778 * @irq: unused
1779 * @dev_instance: private data; in this case the host structure
1781 * Read the read only register to determine if any host
1782 * controllers have pending interrupts. If so, call lower level
1783 * routine to handle. Also check for PCI errors which are only
1784 * reported here.
1786 * LOCKING:
1787 * This routine holds the host lock while processing pending
1788 * interrupts.
1790 static irqreturn_t mv_interrupt(int irq, void *dev_instance)
1792 struct ata_host *host = dev_instance;
1793 struct mv_host_priv *hpriv = host->private_data;
1794 unsigned int hc, handled = 0, n_hcs;
1795 void __iomem *mmio = hpriv->base;
1796 u32 irq_stat, irq_mask;
1798 /* Note to self: &host->lock == &ap->host->lock == ap->lock */
1799 spin_lock(&host->lock);
1801 irq_stat = readl(hpriv->main_cause_reg_addr);
1802 irq_mask = readl(hpriv->main_mask_reg_addr);
1804 /* check the cases where we either have nothing pending or have read
1805 * a bogus register value which can indicate HW removal or PCI fault
1807 if (!(irq_stat & irq_mask) || (0xffffffffU == irq_stat))
1808 goto out_unlock;
1810 n_hcs = mv_get_hc_count(host->ports[0]->flags);
1812 if (unlikely((irq_stat & PCI_ERR) && HAS_PCI(host))) {
1813 mv_pci_error(host, mmio);
1814 handled = 1;
1815 goto out_unlock; /* skip all other HC irq handling */
1818 for (hc = 0; hc < n_hcs; hc++) {
1819 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1820 if (relevant) {
1821 mv_host_intr(host, relevant, hc);
1822 handled = 1;
1826 out_unlock:
1827 spin_unlock(&host->lock);
1829 return IRQ_RETVAL(handled);
1832 static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1834 unsigned int ofs;
1836 switch (sc_reg_in) {
1837 case SCR_STATUS:
1838 case SCR_ERROR:
1839 case SCR_CONTROL:
1840 ofs = sc_reg_in * sizeof(u32);
1841 break;
1842 default:
1843 ofs = 0xffffffffU;
1844 break;
1846 return ofs;
1849 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
1851 struct mv_host_priv *hpriv = ap->host->private_data;
1852 void __iomem *mmio = hpriv->base;
1853 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1854 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1856 if (ofs != 0xffffffffU) {
1857 *val = readl(addr + ofs);
1858 return 0;
1859 } else
1860 return -EINVAL;
1863 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1865 struct mv_host_priv *hpriv = ap->host->private_data;
1866 void __iomem *mmio = hpriv->base;
1867 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1868 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1870 if (ofs != 0xffffffffU) {
1871 writelfl(val, addr + ofs);
1872 return 0;
1873 } else
1874 return -EINVAL;
1877 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
1879 struct pci_dev *pdev = to_pci_dev(host->dev);
1880 int early_5080;
1882 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
1884 if (!early_5080) {
1885 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1886 tmp |= (1 << 0);
1887 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1890 mv_reset_pci_bus(host, mmio);
1893 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1895 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1898 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
1899 void __iomem *mmio)
1901 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1902 u32 tmp;
1904 tmp = readl(phy_mmio + MV5_PHY_MODE);
1906 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1907 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
1910 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1912 u32 tmp;
1914 writel(0, mmio + MV_GPIO_PORT_CTL);
1916 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1918 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1919 tmp |= ~(1 << 0);
1920 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1923 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1924 unsigned int port)
1926 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1927 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1928 u32 tmp;
1929 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1931 if (fix_apm_sq) {
1932 tmp = readl(phy_mmio + MV5_LT_MODE);
1933 tmp |= (1 << 19);
1934 writel(tmp, phy_mmio + MV5_LT_MODE);
1936 tmp = readl(phy_mmio + MV5_PHY_CTL);
1937 tmp &= ~0x3;
1938 tmp |= 0x1;
1939 writel(tmp, phy_mmio + MV5_PHY_CTL);
1942 tmp = readl(phy_mmio + MV5_PHY_MODE);
1943 tmp &= ~mask;
1944 tmp |= hpriv->signal[port].pre;
1945 tmp |= hpriv->signal[port].amps;
1946 writel(tmp, phy_mmio + MV5_PHY_MODE);
1950 #undef ZERO
1951 #define ZERO(reg) writel(0, port_mmio + (reg))
1952 static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1953 unsigned int port)
1955 void __iomem *port_mmio = mv_port_base(mmio, port);
1958 * The datasheet warns against setting ATA_RST when EDMA is active
1959 * (but doesn't say what the problem might be). So we first try
1960 * to disable the EDMA engine before doing the ATA_RST operation.
1962 mv_reset_channel(hpriv, mmio, port);
1964 ZERO(0x028); /* command */
1965 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1966 ZERO(0x004); /* timer */
1967 ZERO(0x008); /* irq err cause */
1968 ZERO(0x00c); /* irq err mask */
1969 ZERO(0x010); /* rq bah */
1970 ZERO(0x014); /* rq inp */
1971 ZERO(0x018); /* rq outp */
1972 ZERO(0x01c); /* respq bah */
1973 ZERO(0x024); /* respq outp */
1974 ZERO(0x020); /* respq inp */
1975 ZERO(0x02c); /* test control */
1976 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1978 #undef ZERO
1980 #define ZERO(reg) writel(0, hc_mmio + (reg))
1981 static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1982 unsigned int hc)
1984 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1985 u32 tmp;
1987 ZERO(0x00c);
1988 ZERO(0x010);
1989 ZERO(0x014);
1990 ZERO(0x018);
1992 tmp = readl(hc_mmio + 0x20);
1993 tmp &= 0x1c1c1c1c;
1994 tmp |= 0x03030303;
1995 writel(tmp, hc_mmio + 0x20);
1997 #undef ZERO
1999 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2000 unsigned int n_hc)
2002 unsigned int hc, port;
2004 for (hc = 0; hc < n_hc; hc++) {
2005 for (port = 0; port < MV_PORTS_PER_HC; port++)
2006 mv5_reset_hc_port(hpriv, mmio,
2007 (hc * MV_PORTS_PER_HC) + port);
2009 mv5_reset_one_hc(hpriv, mmio, hc);
2012 return 0;
2015 #undef ZERO
2016 #define ZERO(reg) writel(0, mmio + (reg))
2017 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
2019 struct mv_host_priv *hpriv = host->private_data;
2020 u32 tmp;
2022 tmp = readl(mmio + MV_PCI_MODE);
2023 tmp &= 0xff00ffff;
2024 writel(tmp, mmio + MV_PCI_MODE);
2026 ZERO(MV_PCI_DISC_TIMER);
2027 ZERO(MV_PCI_MSI_TRIGGER);
2028 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
2029 ZERO(HC_MAIN_IRQ_MASK_OFS);
2030 ZERO(MV_PCI_SERR_MASK);
2031 ZERO(hpriv->irq_cause_ofs);
2032 ZERO(hpriv->irq_mask_ofs);
2033 ZERO(MV_PCI_ERR_LOW_ADDRESS);
2034 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
2035 ZERO(MV_PCI_ERR_ATTRIBUTE);
2036 ZERO(MV_PCI_ERR_COMMAND);
2038 #undef ZERO
2040 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
2042 u32 tmp;
2044 mv5_reset_flash(hpriv, mmio);
2046 tmp = readl(mmio + MV_GPIO_PORT_CTL);
2047 tmp &= 0x3;
2048 tmp |= (1 << 5) | (1 << 6);
2049 writel(tmp, mmio + MV_GPIO_PORT_CTL);
2053 * mv6_reset_hc - Perform the 6xxx global soft reset
2054 * @mmio: base address of the HBA
2056 * This routine only applies to 6xxx parts.
2058 * LOCKING:
2059 * Inherited from caller.
2061 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2062 unsigned int n_hc)
2064 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2065 int i, rc = 0;
2066 u32 t;
2068 /* Following procedure defined in PCI "main command and status
2069 * register" table.
2071 t = readl(reg);
2072 writel(t | STOP_PCI_MASTER, reg);
2074 for (i = 0; i < 1000; i++) {
2075 udelay(1);
2076 t = readl(reg);
2077 if (PCI_MASTER_EMPTY & t)
2078 break;
2080 if (!(PCI_MASTER_EMPTY & t)) {
2081 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2082 rc = 1;
2083 goto done;
2086 /* set reset */
2087 i = 5;
2088 do {
2089 writel(t | GLOB_SFT_RST, reg);
2090 t = readl(reg);
2091 udelay(1);
2092 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2094 if (!(GLOB_SFT_RST & t)) {
2095 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2096 rc = 1;
2097 goto done;
2100 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2101 i = 5;
2102 do {
2103 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2104 t = readl(reg);
2105 udelay(1);
2106 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2108 if (GLOB_SFT_RST & t) {
2109 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2110 rc = 1;
2113 * Temporary: wait 3 seconds before port-probing can happen,
2114 * so that we don't miss finding sleepy SilXXXX port-multipliers.
2115 * This can go away once hotplug is fully/correctly implemented.
2117 if (rc == 0)
2118 msleep(3000);
2119 done:
2120 return rc;
2123 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
2124 void __iomem *mmio)
2126 void __iomem *port_mmio;
2127 u32 tmp;
2129 tmp = readl(mmio + MV_RESET_CFG);
2130 if ((tmp & (1 << 0)) == 0) {
2131 hpriv->signal[idx].amps = 0x7 << 8;
2132 hpriv->signal[idx].pre = 0x1 << 5;
2133 return;
2136 port_mmio = mv_port_base(mmio, idx);
2137 tmp = readl(port_mmio + PHY_MODE2);
2139 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2140 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2143 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
2145 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
2148 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2149 unsigned int port)
2151 void __iomem *port_mmio = mv_port_base(mmio, port);
2153 u32 hp_flags = hpriv->hp_flags;
2154 int fix_phy_mode2 =
2155 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2156 int fix_phy_mode4 =
2157 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2158 u32 m2, tmp;
2160 if (fix_phy_mode2) {
2161 m2 = readl(port_mmio + PHY_MODE2);
2162 m2 &= ~(1 << 16);
2163 m2 |= (1 << 31);
2164 writel(m2, port_mmio + PHY_MODE2);
2166 udelay(200);
2168 m2 = readl(port_mmio + PHY_MODE2);
2169 m2 &= ~((1 << 16) | (1 << 31));
2170 writel(m2, port_mmio + PHY_MODE2);
2172 udelay(200);
2175 /* who knows what this magic does */
2176 tmp = readl(port_mmio + PHY_MODE3);
2177 tmp &= ~0x7F800000;
2178 tmp |= 0x2A800000;
2179 writel(tmp, port_mmio + PHY_MODE3);
2181 if (fix_phy_mode4) {
2182 u32 m4;
2184 m4 = readl(port_mmio + PHY_MODE4);
2186 if (hp_flags & MV_HP_ERRATA_60X1B2)
2187 tmp = readl(port_mmio + PHY_MODE3);
2189 /* workaround for errata FEr SATA#10 (part 1) */
2190 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2192 writel(m4, port_mmio + PHY_MODE4);
2194 if (hp_flags & MV_HP_ERRATA_60X1B2)
2195 writel(tmp, port_mmio + PHY_MODE3);
2198 /* Revert values of pre-emphasis and signal amps to the saved ones */
2199 m2 = readl(port_mmio + PHY_MODE2);
2201 m2 &= ~MV_M2_PREAMP_MASK;
2202 m2 |= hpriv->signal[port].amps;
2203 m2 |= hpriv->signal[port].pre;
2204 m2 &= ~(1 << 16);
2206 /* according to mvSata 3.6.1, some IIE values are fixed */
2207 if (IS_GEN_IIE(hpriv)) {
2208 m2 &= ~0xC30FF01F;
2209 m2 |= 0x0000900F;
2212 writel(m2, port_mmio + PHY_MODE2);
2215 /* TODO: use the generic LED interface to configure the SATA Presence */
2216 /* & Acitivy LEDs on the board */
2217 static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
2218 void __iomem *mmio)
2220 return;
2223 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
2224 void __iomem *mmio)
2226 void __iomem *port_mmio;
2227 u32 tmp;
2229 port_mmio = mv_port_base(mmio, idx);
2230 tmp = readl(port_mmio + PHY_MODE2);
2232 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2233 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2236 #undef ZERO
2237 #define ZERO(reg) writel(0, port_mmio + (reg))
2238 static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
2239 void __iomem *mmio, unsigned int port)
2241 void __iomem *port_mmio = mv_port_base(mmio, port);
2244 * The datasheet warns against setting ATA_RST when EDMA is active
2245 * (but doesn't say what the problem might be). So we first try
2246 * to disable the EDMA engine before doing the ATA_RST operation.
2248 mv_reset_channel(hpriv, mmio, port);
2250 ZERO(0x028); /* command */
2251 writel(0x101f, port_mmio + EDMA_CFG_OFS);
2252 ZERO(0x004); /* timer */
2253 ZERO(0x008); /* irq err cause */
2254 ZERO(0x00c); /* irq err mask */
2255 ZERO(0x010); /* rq bah */
2256 ZERO(0x014); /* rq inp */
2257 ZERO(0x018); /* rq outp */
2258 ZERO(0x01c); /* respq bah */
2259 ZERO(0x024); /* respq outp */
2260 ZERO(0x020); /* respq inp */
2261 ZERO(0x02c); /* test control */
2262 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
2265 #undef ZERO
2267 #define ZERO(reg) writel(0, hc_mmio + (reg))
2268 static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
2269 void __iomem *mmio)
2271 void __iomem *hc_mmio = mv_hc_base(mmio, 0);
2273 ZERO(0x00c);
2274 ZERO(0x010);
2275 ZERO(0x014);
2279 #undef ZERO
2281 static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
2282 void __iomem *mmio, unsigned int n_hc)
2284 unsigned int port;
2286 for (port = 0; port < hpriv->n_ports; port++)
2287 mv_soc_reset_hc_port(hpriv, mmio, port);
2289 mv_soc_reset_one_hc(hpriv, mmio);
2291 return 0;
2294 static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
2295 void __iomem *mmio)
2297 return;
2300 static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
2302 return;
2305 static void mv_setup_ifctl(void __iomem *port_mmio, int want_gen2i)
2307 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CFG);
2309 ifctl = (ifctl & 0xf7f) | 0x9b1000; /* from chip spec */
2310 if (want_gen2i)
2311 ifctl |= (1 << 7); /* enable gen2i speed */
2312 writelfl(ifctl, port_mmio + SATA_INTERFACE_CFG);
2316 * Caller must ensure that EDMA is not active,
2317 * by first doing mv_stop_edma() where needed.
2319 static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
2320 unsigned int port_no)
2322 void __iomem *port_mmio = mv_port_base(mmio, port_no);
2324 mv_stop_edma_engine(port_mmio);
2325 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2327 if (!IS_GEN_I(hpriv)) {
2328 /* Enable 3.0gb/s link speed */
2329 mv_setup_ifctl(port_mmio, 1);
2332 * Strobing ATA_RST here causes a hard reset of the SATA transport,
2333 * link, and physical layers. It resets all SATA interface registers
2334 * (except for SATA_INTERFACE_CFG), and issues a COMRESET to the dev.
2336 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2337 udelay(25); /* allow reset propagation */
2338 writelfl(0, port_mmio + EDMA_CMD_OFS);
2340 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2342 if (IS_GEN_I(hpriv))
2343 mdelay(1);
2346 static void mv_pmp_select(struct ata_port *ap, int pmp)
2348 if (sata_pmp_supported(ap)) {
2349 void __iomem *port_mmio = mv_ap_base(ap);
2350 u32 reg = readl(port_mmio + SATA_IFCTL_OFS);
2351 int old = reg & 0xf;
2353 if (old != pmp) {
2354 reg = (reg & ~0xf) | pmp;
2355 writelfl(reg, port_mmio + SATA_IFCTL_OFS);
2360 static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
2361 unsigned long deadline)
2363 mv_pmp_select(link->ap, sata_srst_pmp(link));
2364 return sata_std_hardreset(link, class, deadline);
2367 static int mv_softreset(struct ata_link *link, unsigned int *class,
2368 unsigned long deadline)
2370 mv_pmp_select(link->ap, sata_srst_pmp(link));
2371 return ata_sff_softreset(link, class, deadline);
2374 static int mv_hardreset(struct ata_link *link, unsigned int *class,
2375 unsigned long deadline)
2377 struct ata_port *ap = link->ap;
2378 struct mv_host_priv *hpriv = ap->host->private_data;
2379 struct mv_port_priv *pp = ap->private_data;
2380 void __iomem *mmio = hpriv->base;
2381 int rc, attempts = 0, extra = 0;
2382 u32 sstatus;
2383 bool online;
2385 mv_reset_channel(hpriv, mmio, ap->port_no);
2386 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
2388 /* Workaround for errata FEr SATA#10 (part 2) */
2389 do {
2390 const unsigned long *timing =
2391 sata_ehc_deb_timing(&link->eh_context);
2393 rc = sata_link_hardreset(link, timing, deadline + extra,
2394 &online, NULL);
2395 if (rc)
2396 return rc;
2397 sata_scr_read(link, SCR_STATUS, &sstatus);
2398 if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) {
2399 /* Force 1.5gb/s link speed and try again */
2400 mv_setup_ifctl(mv_ap_base(ap), 0);
2401 if (time_after(jiffies + HZ, deadline))
2402 extra = HZ; /* only extend it once, max */
2404 } while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123);
2406 return rc;
2409 static void mv_eh_freeze(struct ata_port *ap)
2411 struct mv_host_priv *hpriv = ap->host->private_data;
2412 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2413 u32 tmp, mask;
2414 unsigned int shift;
2416 /* FIXME: handle coalescing completion events properly */
2418 shift = ap->port_no * 2;
2419 if (hc > 0)
2420 shift++;
2422 mask = 0x3 << shift;
2424 /* disable assertion of portN err, done events */
2425 tmp = readl(hpriv->main_mask_reg_addr);
2426 writelfl(tmp & ~mask, hpriv->main_mask_reg_addr);
2429 static void mv_eh_thaw(struct ata_port *ap)
2431 struct mv_host_priv *hpriv = ap->host->private_data;
2432 void __iomem *mmio = hpriv->base;
2433 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2434 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2435 void __iomem *port_mmio = mv_ap_base(ap);
2436 u32 tmp, mask, hc_irq_cause;
2437 unsigned int shift, hc_port_no = ap->port_no;
2439 /* FIXME: handle coalescing completion events properly */
2441 shift = ap->port_no * 2;
2442 if (hc > 0) {
2443 shift++;
2444 hc_port_no -= 4;
2447 mask = 0x3 << shift;
2449 /* clear EDMA errors on this port */
2450 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2452 /* clear pending irq events */
2453 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2454 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2455 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2456 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2458 /* enable assertion of portN err, done events */
2459 tmp = readl(hpriv->main_mask_reg_addr);
2460 writelfl(tmp | mask, hpriv->main_mask_reg_addr);
2464 * mv_port_init - Perform some early initialization on a single port.
2465 * @port: libata data structure storing shadow register addresses
2466 * @port_mmio: base address of the port
2468 * Initialize shadow register mmio addresses, clear outstanding
2469 * interrupts on the port, and unmask interrupts for the future
2470 * start of the port.
2472 * LOCKING:
2473 * Inherited from caller.
2475 static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2477 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
2478 unsigned serr_ofs;
2480 /* PIO related setup
2482 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
2483 port->error_addr =
2484 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2485 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2486 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2487 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2488 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2489 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
2490 port->status_addr =
2491 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2492 /* special case: control/altstatus doesn't have ATA_REG_ address */
2493 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2495 /* unused: */
2496 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
2498 /* Clear any currently outstanding port interrupt conditions */
2499 serr_ofs = mv_scr_offset(SCR_ERROR);
2500 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2501 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2503 /* unmask all non-transient EDMA error interrupts */
2504 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
2506 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2507 readl(port_mmio + EDMA_CFG_OFS),
2508 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2509 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
2512 static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
2514 struct pci_dev *pdev = to_pci_dev(host->dev);
2515 struct mv_host_priv *hpriv = host->private_data;
2516 u32 hp_flags = hpriv->hp_flags;
2518 switch (board_idx) {
2519 case chip_5080:
2520 hpriv->ops = &mv5xxx_ops;
2521 hp_flags |= MV_HP_GEN_I;
2523 switch (pdev->revision) {
2524 case 0x1:
2525 hp_flags |= MV_HP_ERRATA_50XXB0;
2526 break;
2527 case 0x3:
2528 hp_flags |= MV_HP_ERRATA_50XXB2;
2529 break;
2530 default:
2531 dev_printk(KERN_WARNING, &pdev->dev,
2532 "Applying 50XXB2 workarounds to unknown rev\n");
2533 hp_flags |= MV_HP_ERRATA_50XXB2;
2534 break;
2536 break;
2538 case chip_504x:
2539 case chip_508x:
2540 hpriv->ops = &mv5xxx_ops;
2541 hp_flags |= MV_HP_GEN_I;
2543 switch (pdev->revision) {
2544 case 0x0:
2545 hp_flags |= MV_HP_ERRATA_50XXB0;
2546 break;
2547 case 0x3:
2548 hp_flags |= MV_HP_ERRATA_50XXB2;
2549 break;
2550 default:
2551 dev_printk(KERN_WARNING, &pdev->dev,
2552 "Applying B2 workarounds to unknown rev\n");
2553 hp_flags |= MV_HP_ERRATA_50XXB2;
2554 break;
2556 break;
2558 case chip_604x:
2559 case chip_608x:
2560 hpriv->ops = &mv6xxx_ops;
2561 hp_flags |= MV_HP_GEN_II;
2563 switch (pdev->revision) {
2564 case 0x7:
2565 hp_flags |= MV_HP_ERRATA_60X1B2;
2566 break;
2567 case 0x9:
2568 hp_flags |= MV_HP_ERRATA_60X1C0;
2569 break;
2570 default:
2571 dev_printk(KERN_WARNING, &pdev->dev,
2572 "Applying B2 workarounds to unknown rev\n");
2573 hp_flags |= MV_HP_ERRATA_60X1B2;
2574 break;
2576 break;
2578 case chip_7042:
2579 hp_flags |= MV_HP_PCIE;
2580 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2581 (pdev->device == 0x2300 || pdev->device == 0x2310))
2584 * Highpoint RocketRAID PCIe 23xx series cards:
2586 * Unconfigured drives are treated as "Legacy"
2587 * by the BIOS, and it overwrites sector 8 with
2588 * a "Lgcy" metadata block prior to Linux boot.
2590 * Configured drives (RAID or JBOD) leave sector 8
2591 * alone, but instead overwrite a high numbered
2592 * sector for the RAID metadata. This sector can
2593 * be determined exactly, by truncating the physical
2594 * drive capacity to a nice even GB value.
2596 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2598 * Warn the user, lest they think we're just buggy.
2600 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2601 " BIOS CORRUPTS DATA on all attached drives,"
2602 " regardless of if/how they are configured."
2603 " BEWARE!\n");
2604 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2605 " use sectors 8-9 on \"Legacy\" drives,"
2606 " and avoid the final two gigabytes on"
2607 " all RocketRAID BIOS initialized drives.\n");
2609 case chip_6042:
2610 hpriv->ops = &mv6xxx_ops;
2611 hp_flags |= MV_HP_GEN_IIE;
2613 switch (pdev->revision) {
2614 case 0x0:
2615 hp_flags |= MV_HP_ERRATA_XX42A0;
2616 break;
2617 case 0x1:
2618 hp_flags |= MV_HP_ERRATA_60X1C0;
2619 break;
2620 default:
2621 dev_printk(KERN_WARNING, &pdev->dev,
2622 "Applying 60X1C0 workarounds to unknown rev\n");
2623 hp_flags |= MV_HP_ERRATA_60X1C0;
2624 break;
2626 break;
2627 case chip_soc:
2628 hpriv->ops = &mv_soc_ops;
2629 hp_flags |= MV_HP_ERRATA_60X1C0;
2630 break;
2632 default:
2633 dev_printk(KERN_ERR, host->dev,
2634 "BUG: invalid board index %u\n", board_idx);
2635 return 1;
2638 hpriv->hp_flags = hp_flags;
2639 if (hp_flags & MV_HP_PCIE) {
2640 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
2641 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
2642 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
2643 } else {
2644 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
2645 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
2646 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
2649 return 0;
2653 * mv_init_host - Perform some early initialization of the host.
2654 * @host: ATA host to initialize
2655 * @board_idx: controller index
2657 * If possible, do an early global reset of the host. Then do
2658 * our port init and clear/unmask all/relevant host interrupts.
2660 * LOCKING:
2661 * Inherited from caller.
2663 static int mv_init_host(struct ata_host *host, unsigned int board_idx)
2665 int rc = 0, n_hc, port, hc;
2666 struct mv_host_priv *hpriv = host->private_data;
2667 void __iomem *mmio = hpriv->base;
2669 rc = mv_chip_id(host, board_idx);
2670 if (rc)
2671 goto done;
2673 if (HAS_PCI(host)) {
2674 hpriv->main_cause_reg_addr = hpriv->base +
2675 HC_MAIN_IRQ_CAUSE_OFS;
2676 hpriv->main_mask_reg_addr = hpriv->base + HC_MAIN_IRQ_MASK_OFS;
2677 } else {
2678 hpriv->main_cause_reg_addr = hpriv->base +
2679 HC_SOC_MAIN_IRQ_CAUSE_OFS;
2680 hpriv->main_mask_reg_addr = hpriv->base +
2681 HC_SOC_MAIN_IRQ_MASK_OFS;
2683 /* global interrupt mask */
2684 writel(0, hpriv->main_mask_reg_addr);
2686 n_hc = mv_get_hc_count(host->ports[0]->flags);
2688 for (port = 0; port < host->n_ports; port++)
2689 hpriv->ops->read_preamp(hpriv, port, mmio);
2691 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
2692 if (rc)
2693 goto done;
2695 hpriv->ops->reset_flash(hpriv, mmio);
2696 hpriv->ops->reset_bus(host, mmio);
2697 hpriv->ops->enable_leds(hpriv, mmio);
2699 for (port = 0; port < host->n_ports; port++) {
2700 struct ata_port *ap = host->ports[port];
2701 void __iomem *port_mmio = mv_port_base(mmio, port);
2703 mv_port_init(&ap->ioaddr, port_mmio);
2705 #ifdef CONFIG_PCI
2706 if (HAS_PCI(host)) {
2707 unsigned int offset = port_mmio - mmio;
2708 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2709 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
2711 #endif
2714 for (hc = 0; hc < n_hc; hc++) {
2715 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2717 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2718 "(before clear)=0x%08x\n", hc,
2719 readl(hc_mmio + HC_CFG_OFS),
2720 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2722 /* Clear any currently outstanding hc interrupt conditions */
2723 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
2726 if (HAS_PCI(host)) {
2727 /* Clear any currently outstanding host interrupt conditions */
2728 writelfl(0, mmio + hpriv->irq_cause_ofs);
2730 /* and unmask interrupt generation for host regs */
2731 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
2732 if (IS_GEN_I(hpriv))
2733 writelfl(~HC_MAIN_MASKED_IRQS_5,
2734 hpriv->main_mask_reg_addr);
2735 else
2736 writelfl(~HC_MAIN_MASKED_IRQS,
2737 hpriv->main_mask_reg_addr);
2739 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2740 "PCI int cause/mask=0x%08x/0x%08x\n",
2741 readl(hpriv->main_cause_reg_addr),
2742 readl(hpriv->main_mask_reg_addr),
2743 readl(mmio + hpriv->irq_cause_ofs),
2744 readl(mmio + hpriv->irq_mask_ofs));
2745 } else {
2746 writelfl(~HC_MAIN_MASKED_IRQS_SOC,
2747 hpriv->main_mask_reg_addr);
2748 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x\n",
2749 readl(hpriv->main_cause_reg_addr),
2750 readl(hpriv->main_mask_reg_addr));
2752 done:
2753 return rc;
2756 static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
2758 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
2759 MV_CRQB_Q_SZ, 0);
2760 if (!hpriv->crqb_pool)
2761 return -ENOMEM;
2763 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
2764 MV_CRPB_Q_SZ, 0);
2765 if (!hpriv->crpb_pool)
2766 return -ENOMEM;
2768 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
2769 MV_SG_TBL_SZ, 0);
2770 if (!hpriv->sg_tbl_pool)
2771 return -ENOMEM;
2773 return 0;
2776 static void mv_conf_mbus_windows(struct mv_host_priv *hpriv,
2777 struct mbus_dram_target_info *dram)
2779 int i;
2781 for (i = 0; i < 4; i++) {
2782 writel(0, hpriv->base + WINDOW_CTRL(i));
2783 writel(0, hpriv->base + WINDOW_BASE(i));
2786 for (i = 0; i < dram->num_cs; i++) {
2787 struct mbus_dram_window *cs = dram->cs + i;
2789 writel(((cs->size - 1) & 0xffff0000) |
2790 (cs->mbus_attr << 8) |
2791 (dram->mbus_dram_target_id << 4) | 1,
2792 hpriv->base + WINDOW_CTRL(i));
2793 writel(cs->base, hpriv->base + WINDOW_BASE(i));
2798 * mv_platform_probe - handle a positive probe of an soc Marvell
2799 * host
2800 * @pdev: platform device found
2802 * LOCKING:
2803 * Inherited from caller.
2805 static int mv_platform_probe(struct platform_device *pdev)
2807 static int printed_version;
2808 const struct mv_sata_platform_data *mv_platform_data;
2809 const struct ata_port_info *ppi[] =
2810 { &mv_port_info[chip_soc], NULL };
2811 struct ata_host *host;
2812 struct mv_host_priv *hpriv;
2813 struct resource *res;
2814 int n_ports, rc;
2816 if (!printed_version++)
2817 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2820 * Simple resource validation ..
2822 if (unlikely(pdev->num_resources != 2)) {
2823 dev_err(&pdev->dev, "invalid number of resources\n");
2824 return -EINVAL;
2828 * Get the register base first
2830 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2831 if (res == NULL)
2832 return -EINVAL;
2834 /* allocate host */
2835 mv_platform_data = pdev->dev.platform_data;
2836 n_ports = mv_platform_data->n_ports;
2838 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2839 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2841 if (!host || !hpriv)
2842 return -ENOMEM;
2843 host->private_data = hpriv;
2844 hpriv->n_ports = n_ports;
2846 host->iomap = NULL;
2847 hpriv->base = devm_ioremap(&pdev->dev, res->start,
2848 res->end - res->start + 1);
2849 hpriv->base -= MV_SATAHC0_REG_BASE;
2852 * (Re-)program MBUS remapping windows if we are asked to.
2854 if (mv_platform_data->dram != NULL)
2855 mv_conf_mbus_windows(hpriv, mv_platform_data->dram);
2857 rc = mv_create_dma_pools(hpriv, &pdev->dev);
2858 if (rc)
2859 return rc;
2861 /* initialize adapter */
2862 rc = mv_init_host(host, chip_soc);
2863 if (rc)
2864 return rc;
2866 dev_printk(KERN_INFO, &pdev->dev,
2867 "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH,
2868 host->n_ports);
2870 return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
2871 IRQF_SHARED, &mv6_sht);
2876 * mv_platform_remove - unplug a platform interface
2877 * @pdev: platform device
2879 * A platform bus SATA device has been unplugged. Perform the needed
2880 * cleanup. Also called on module unload for any active devices.
2882 static int __devexit mv_platform_remove(struct platform_device *pdev)
2884 struct device *dev = &pdev->dev;
2885 struct ata_host *host = dev_get_drvdata(dev);
2887 ata_host_detach(host);
2888 return 0;
2891 static struct platform_driver mv_platform_driver = {
2892 .probe = mv_platform_probe,
2893 .remove = __devexit_p(mv_platform_remove),
2894 .driver = {
2895 .name = DRV_NAME,
2896 .owner = THIS_MODULE,
2901 #ifdef CONFIG_PCI
2902 static int mv_pci_init_one(struct pci_dev *pdev,
2903 const struct pci_device_id *ent);
2906 static struct pci_driver mv_pci_driver = {
2907 .name = DRV_NAME,
2908 .id_table = mv_pci_tbl,
2909 .probe = mv_pci_init_one,
2910 .remove = ata_pci_remove_one,
2914 * module options
2916 static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
2919 /* move to PCI layer or libata core? */
2920 static int pci_go_64(struct pci_dev *pdev)
2922 int rc;
2924 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2925 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2926 if (rc) {
2927 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2928 if (rc) {
2929 dev_printk(KERN_ERR, &pdev->dev,
2930 "64-bit DMA enable failed\n");
2931 return rc;
2934 } else {
2935 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
2936 if (rc) {
2937 dev_printk(KERN_ERR, &pdev->dev,
2938 "32-bit DMA enable failed\n");
2939 return rc;
2941 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2942 if (rc) {
2943 dev_printk(KERN_ERR, &pdev->dev,
2944 "32-bit consistent DMA enable failed\n");
2945 return rc;
2949 return rc;
2953 * mv_print_info - Dump key info to kernel log for perusal.
2954 * @host: ATA host to print info about
2956 * FIXME: complete this.
2958 * LOCKING:
2959 * Inherited from caller.
2961 static void mv_print_info(struct ata_host *host)
2963 struct pci_dev *pdev = to_pci_dev(host->dev);
2964 struct mv_host_priv *hpriv = host->private_data;
2965 u8 scc;
2966 const char *scc_s, *gen;
2968 /* Use this to determine the HW stepping of the chip so we know
2969 * what errata to workaround
2971 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2972 if (scc == 0)
2973 scc_s = "SCSI";
2974 else if (scc == 0x01)
2975 scc_s = "RAID";
2976 else
2977 scc_s = "?";
2979 if (IS_GEN_I(hpriv))
2980 gen = "I";
2981 else if (IS_GEN_II(hpriv))
2982 gen = "II";
2983 else if (IS_GEN_IIE(hpriv))
2984 gen = "IIE";
2985 else
2986 gen = "?";
2988 dev_printk(KERN_INFO, &pdev->dev,
2989 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2990 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
2991 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2995 * mv_pci_init_one - handle a positive probe of a PCI Marvell host
2996 * @pdev: PCI device found
2997 * @ent: PCI device ID entry for the matched host
2999 * LOCKING:
3000 * Inherited from caller.
3002 static int mv_pci_init_one(struct pci_dev *pdev,
3003 const struct pci_device_id *ent)
3005 static int printed_version;
3006 unsigned int board_idx = (unsigned int)ent->driver_data;
3007 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
3008 struct ata_host *host;
3009 struct mv_host_priv *hpriv;
3010 int n_ports, rc;
3012 if (!printed_version++)
3013 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
3015 /* allocate host */
3016 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
3018 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
3019 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
3020 if (!host || !hpriv)
3021 return -ENOMEM;
3022 host->private_data = hpriv;
3023 hpriv->n_ports = n_ports;
3025 /* acquire resources */
3026 rc = pcim_enable_device(pdev);
3027 if (rc)
3028 return rc;
3030 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
3031 if (rc == -EBUSY)
3032 pcim_pin_device(pdev);
3033 if (rc)
3034 return rc;
3035 host->iomap = pcim_iomap_table(pdev);
3036 hpriv->base = host->iomap[MV_PRIMARY_BAR];
3038 rc = pci_go_64(pdev);
3039 if (rc)
3040 return rc;
3042 rc = mv_create_dma_pools(hpriv, &pdev->dev);
3043 if (rc)
3044 return rc;
3046 /* initialize adapter */
3047 rc = mv_init_host(host, board_idx);
3048 if (rc)
3049 return rc;
3051 /* Enable interrupts */
3052 if (msi && pci_enable_msi(pdev))
3053 pci_intx(pdev, 1);
3055 mv_dump_pci_cfg(pdev, 0x68);
3056 mv_print_info(host);
3058 pci_set_master(pdev);
3059 pci_try_set_mwi(pdev);
3060 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
3061 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
3063 #endif
3065 static int mv_platform_probe(struct platform_device *pdev);
3066 static int __devexit mv_platform_remove(struct platform_device *pdev);
3068 static int __init mv_init(void)
3070 int rc = -ENODEV;
3071 #ifdef CONFIG_PCI
3072 rc = pci_register_driver(&mv_pci_driver);
3073 if (rc < 0)
3074 return rc;
3075 #endif
3076 rc = platform_driver_register(&mv_platform_driver);
3078 #ifdef CONFIG_PCI
3079 if (rc < 0)
3080 pci_unregister_driver(&mv_pci_driver);
3081 #endif
3082 return rc;
3085 static void __exit mv_exit(void)
3087 #ifdef CONFIG_PCI
3088 pci_unregister_driver(&mv_pci_driver);
3089 #endif
3090 platform_driver_unregister(&mv_platform_driver);
3093 MODULE_AUTHOR("Brett Russ");
3094 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
3095 MODULE_LICENSE("GPL");
3096 MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
3097 MODULE_VERSION(DRV_VERSION);
3098 MODULE_ALIAS("platform:" DRV_NAME);
3100 #ifdef CONFIG_PCI
3101 module_param(msi, int, 0444);
3102 MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
3103 #endif
3105 module_init(mv_init);
3106 module_exit(mv_exit);