Merge git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
[wrt350n-kernel.git] / drivers / ata / sata_mv.c
blob32b3cddfae227962d4b3d2e3f474055b25b87a4a
1 /*
2 * sata_mv.c - Marvell SATA support
4 * Copyright 2005: EMC Corporation, all rights reserved.
5 * Copyright 2005 Red Hat, Inc. All rights reserved.
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 sata_mv TODO list:
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
30 are still needed.
32 2) Improve/fix IRQ and error handling sequences.
34 3) ATAPI support (Marvell claims the 60xx/70xx chips can do it).
36 4) Think about TCQ support here, and for libata in general
37 with controllers that suppport it via host-queuing hardware
38 (a software-only implementation could be a nightmare).
40 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
42 6) Add port multiplier support (intermediate)
44 8) Develop a low-power-consumption strategy, and implement it.
46 9) [Experiment, low priority] See if ATAPI can be supported using
47 "unknown FIS" or "vendor-specific FIS" support, or something creative
48 like that.
50 10) [Experiment, low priority] Investigate interrupt coalescing.
51 Quite often, especially with PCI Message Signalled Interrupts (MSI),
52 the overhead reduced by interrupt mitigation is quite often not
53 worth the latency cost.
55 11) [Experiment, Marvell value added] Is it possible to use target
56 mode to cross-connect two Linux boxes with Marvell cards? If so,
57 creating LibATA target mode support would be very interesting.
59 Target mode, for those without docs, is the ability to directly
60 connect two SATA controllers.
65 #include <linux/kernel.h>
66 #include <linux/module.h>
67 #include <linux/pci.h>
68 #include <linux/init.h>
69 #include <linux/blkdev.h>
70 #include <linux/delay.h>
71 #include <linux/interrupt.h>
72 #include <linux/dmapool.h>
73 #include <linux/dma-mapping.h>
74 #include <linux/device.h>
75 #include <linux/platform_device.h>
76 #include <linux/ata_platform.h>
77 #include <scsi/scsi_host.h>
78 #include <scsi/scsi_cmnd.h>
79 #include <scsi/scsi_device.h>
80 #include <linux/libata.h>
82 #define DRV_NAME "sata_mv"
83 #define DRV_VERSION "1.20"
85 enum {
86 /* BAR's are enumerated in terms of pci_resource_start() terms */
87 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
88 MV_IO_BAR = 2, /* offset 0x18: IO space */
89 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
91 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
92 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
94 MV_PCI_REG_BASE = 0,
95 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
96 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
97 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
98 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
99 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
100 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
102 MV_SATAHC0_REG_BASE = 0x20000,
103 MV_FLASH_CTL = 0x1046c,
104 MV_GPIO_PORT_CTL = 0x104f0,
105 MV_RESET_CFG = 0x180d8,
107 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
108 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
109 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
110 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
112 MV_MAX_Q_DEPTH = 32,
113 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
115 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
116 * CRPB needs alignment on a 256B boundary. Size == 256B
117 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
119 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
120 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
121 MV_MAX_SG_CT = 256,
122 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
124 MV_PORTS_PER_HC = 4,
125 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
126 MV_PORT_HC_SHIFT = 2,
127 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
128 MV_PORT_MASK = 3,
130 /* Host Flags */
131 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
132 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
133 /* SoC integrated controllers, no PCI interface */
134 MV_FLAG_SOC = (1 << 28),
136 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
137 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
138 ATA_FLAG_PIO_POLLING,
139 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
141 CRQB_FLAG_READ = (1 << 0),
142 CRQB_TAG_SHIFT = 1,
143 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
144 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
145 CRQB_CMD_ADDR_SHIFT = 8,
146 CRQB_CMD_CS = (0x2 << 11),
147 CRQB_CMD_LAST = (1 << 15),
149 CRPB_FLAG_STATUS_SHIFT = 8,
150 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
151 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
153 EPRD_FLAG_END_OF_TBL = (1 << 31),
155 /* PCI interface registers */
157 PCI_COMMAND_OFS = 0xc00,
159 PCI_MAIN_CMD_STS_OFS = 0xd30,
160 STOP_PCI_MASTER = (1 << 2),
161 PCI_MASTER_EMPTY = (1 << 3),
162 GLOB_SFT_RST = (1 << 4),
164 MV_PCI_MODE = 0xd00,
165 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
166 MV_PCI_DISC_TIMER = 0xd04,
167 MV_PCI_MSI_TRIGGER = 0xc38,
168 MV_PCI_SERR_MASK = 0xc28,
169 MV_PCI_XBAR_TMOUT = 0x1d04,
170 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
171 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
172 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
173 MV_PCI_ERR_COMMAND = 0x1d50,
175 PCI_IRQ_CAUSE_OFS = 0x1d58,
176 PCI_IRQ_MASK_OFS = 0x1d5c,
177 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
179 PCIE_IRQ_CAUSE_OFS = 0x1900,
180 PCIE_IRQ_MASK_OFS = 0x1910,
181 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
183 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
184 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
185 HC_SOC_MAIN_IRQ_CAUSE_OFS = 0x20020,
186 HC_SOC_MAIN_IRQ_MASK_OFS = 0x20024,
187 PORT0_ERR = (1 << 0), /* shift by port # */
188 PORT0_DONE = (1 << 1), /* shift by port # */
189 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
190 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
191 PCI_ERR = (1 << 18),
192 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
193 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
194 PORTS_0_3_COAL_DONE = (1 << 8),
195 PORTS_4_7_COAL_DONE = (1 << 17),
196 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
197 GPIO_INT = (1 << 22),
198 SELF_INT = (1 << 23),
199 TWSI_INT = (1 << 24),
200 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
201 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
202 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
203 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
204 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
205 HC_MAIN_RSVD),
206 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
207 HC_MAIN_RSVD_5),
208 HC_MAIN_MASKED_IRQS_SOC = (PORTS_0_3_COAL_DONE | HC_MAIN_RSVD_SOC),
210 /* SATAHC registers */
211 HC_CFG_OFS = 0,
213 HC_IRQ_CAUSE_OFS = 0x14,
214 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
215 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
216 DEV_IRQ = (1 << 8), /* shift by port # */
218 /* Shadow block registers */
219 SHD_BLK_OFS = 0x100,
220 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
222 /* SATA registers */
223 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
224 SATA_ACTIVE_OFS = 0x350,
225 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
226 PHY_MODE3 = 0x310,
227 PHY_MODE4 = 0x314,
228 PHY_MODE2 = 0x330,
229 MV5_PHY_MODE = 0x74,
230 MV5_LT_MODE = 0x30,
231 MV5_PHY_CTL = 0x0C,
232 SATA_INTERFACE_CTL = 0x050,
234 MV_M2_PREAMP_MASK = 0x7e0,
236 /* Port registers */
237 EDMA_CFG_OFS = 0,
238 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
239 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
240 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
241 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
242 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
244 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
245 EDMA_ERR_IRQ_MASK_OFS = 0xc,
246 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
247 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
248 EDMA_ERR_DEV = (1 << 2), /* device error */
249 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
250 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
251 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
252 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
253 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
254 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
255 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
256 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
257 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
258 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
259 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
261 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
262 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
263 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
264 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
265 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
267 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
269 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
270 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
271 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
272 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
273 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
274 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
276 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
278 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
279 EDMA_ERR_OVERRUN_5 = (1 << 5),
280 EDMA_ERR_UNDERRUN_5 = (1 << 6),
282 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
283 EDMA_ERR_LNK_CTRL_RX_1 |
284 EDMA_ERR_LNK_CTRL_RX_3 |
285 EDMA_ERR_LNK_CTRL_TX,
287 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
288 EDMA_ERR_PRD_PAR |
289 EDMA_ERR_DEV_DCON |
290 EDMA_ERR_DEV_CON |
291 EDMA_ERR_SERR |
292 EDMA_ERR_SELF_DIS |
293 EDMA_ERR_CRQB_PAR |
294 EDMA_ERR_CRPB_PAR |
295 EDMA_ERR_INTRL_PAR |
296 EDMA_ERR_IORDY |
297 EDMA_ERR_LNK_CTRL_RX_2 |
298 EDMA_ERR_LNK_DATA_RX |
299 EDMA_ERR_LNK_DATA_TX |
300 EDMA_ERR_TRANS_PROTO,
301 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
302 EDMA_ERR_PRD_PAR |
303 EDMA_ERR_DEV_DCON |
304 EDMA_ERR_DEV_CON |
305 EDMA_ERR_OVERRUN_5 |
306 EDMA_ERR_UNDERRUN_5 |
307 EDMA_ERR_SELF_DIS_5 |
308 EDMA_ERR_CRQB_PAR |
309 EDMA_ERR_CRPB_PAR |
310 EDMA_ERR_INTRL_PAR |
311 EDMA_ERR_IORDY,
313 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
314 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
316 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
317 EDMA_REQ_Q_PTR_SHIFT = 5,
319 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
320 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
321 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
322 EDMA_RSP_Q_PTR_SHIFT = 3,
324 EDMA_CMD_OFS = 0x28, /* EDMA command register */
325 EDMA_EN = (1 << 0), /* enable EDMA */
326 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
327 ATA_RST = (1 << 2), /* reset trans/link/phy */
329 EDMA_IORDY_TMOUT = 0x34,
330 EDMA_ARB_CFG = 0x38,
332 /* Host private flags (hp_flags) */
333 MV_HP_FLAG_MSI = (1 << 0),
334 MV_HP_ERRATA_50XXB0 = (1 << 1),
335 MV_HP_ERRATA_50XXB2 = (1 << 2),
336 MV_HP_ERRATA_60X1B2 = (1 << 3),
337 MV_HP_ERRATA_60X1C0 = (1 << 4),
338 MV_HP_ERRATA_XX42A0 = (1 << 5),
339 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
340 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
341 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
342 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
344 /* Port private flags (pp_flags) */
345 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
346 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
347 MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */
350 #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
351 #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
352 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
353 #define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC))
355 enum {
356 /* DMA boundary 0xffff is required by the s/g splitting
357 * we need on /length/ in mv_fill-sg().
359 MV_DMA_BOUNDARY = 0xffffU,
361 /* mask of register bits containing lower 32 bits
362 * of EDMA request queue DMA address
364 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
366 /* ditto, for response queue */
367 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
370 enum chip_type {
371 chip_504x,
372 chip_508x,
373 chip_5080,
374 chip_604x,
375 chip_608x,
376 chip_6042,
377 chip_7042,
378 chip_soc,
381 /* Command ReQuest Block: 32B */
382 struct mv_crqb {
383 __le32 sg_addr;
384 __le32 sg_addr_hi;
385 __le16 ctrl_flags;
386 __le16 ata_cmd[11];
389 struct mv_crqb_iie {
390 __le32 addr;
391 __le32 addr_hi;
392 __le32 flags;
393 __le32 len;
394 __le32 ata_cmd[4];
397 /* Command ResPonse Block: 8B */
398 struct mv_crpb {
399 __le16 id;
400 __le16 flags;
401 __le32 tmstmp;
404 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
405 struct mv_sg {
406 __le32 addr;
407 __le32 flags_size;
408 __le32 addr_hi;
409 __le32 reserved;
412 struct mv_port_priv {
413 struct mv_crqb *crqb;
414 dma_addr_t crqb_dma;
415 struct mv_crpb *crpb;
416 dma_addr_t crpb_dma;
417 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
418 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
420 unsigned int req_idx;
421 unsigned int resp_idx;
423 u32 pp_flags;
426 struct mv_port_signal {
427 u32 amps;
428 u32 pre;
431 struct mv_host_priv {
432 u32 hp_flags;
433 struct mv_port_signal signal[8];
434 const struct mv_hw_ops *ops;
435 int n_ports;
436 void __iomem *base;
437 void __iomem *main_cause_reg_addr;
438 void __iomem *main_mask_reg_addr;
439 u32 irq_cause_ofs;
440 u32 irq_mask_ofs;
441 u32 unmask_all_irqs;
443 * These consistent DMA memory pools give us guaranteed
444 * alignment for hardware-accessed data structures,
445 * and less memory waste in accomplishing the alignment.
447 struct dma_pool *crqb_pool;
448 struct dma_pool *crpb_pool;
449 struct dma_pool *sg_tbl_pool;
452 struct mv_hw_ops {
453 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
454 unsigned int port);
455 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
456 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
457 void __iomem *mmio);
458 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
459 unsigned int n_hc);
460 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
461 void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
464 static void mv_irq_clear(struct ata_port *ap);
465 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
466 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
467 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
468 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
469 static int mv_port_start(struct ata_port *ap);
470 static void mv_port_stop(struct ata_port *ap);
471 static void mv_qc_prep(struct ata_queued_cmd *qc);
472 static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
473 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
474 static void mv_error_handler(struct ata_port *ap);
475 static void mv_eh_freeze(struct ata_port *ap);
476 static void mv_eh_thaw(struct ata_port *ap);
477 static void mv6_dev_config(struct ata_device *dev);
479 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
480 unsigned int port);
481 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
482 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
483 void __iomem *mmio);
484 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
485 unsigned int n_hc);
486 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
487 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
489 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
490 unsigned int port);
491 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
492 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
493 void __iomem *mmio);
494 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
495 unsigned int n_hc);
496 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
497 static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
498 void __iomem *mmio);
499 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
500 void __iomem *mmio);
501 static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
502 void __iomem *mmio, unsigned int n_hc);
503 static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
504 void __iomem *mmio);
505 static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
506 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
507 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
508 unsigned int port_no);
509 static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
510 void __iomem *port_mmio, int want_ncq);
511 static int __mv_stop_dma(struct ata_port *ap);
513 /* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
514 * because we have to allow room for worst case splitting of
515 * PRDs for 64K boundaries in mv_fill_sg().
517 static struct scsi_host_template mv5_sht = {
518 .module = THIS_MODULE,
519 .name = DRV_NAME,
520 .ioctl = ata_scsi_ioctl,
521 .queuecommand = ata_scsi_queuecmd,
522 .can_queue = ATA_DEF_QUEUE,
523 .this_id = ATA_SHT_THIS_ID,
524 .sg_tablesize = MV_MAX_SG_CT / 2,
525 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
526 .emulated = ATA_SHT_EMULATED,
527 .use_clustering = 1,
528 .proc_name = DRV_NAME,
529 .dma_boundary = MV_DMA_BOUNDARY,
530 .slave_configure = ata_scsi_slave_config,
531 .slave_destroy = ata_scsi_slave_destroy,
532 .bios_param = ata_std_bios_param,
535 static struct scsi_host_template mv6_sht = {
536 .module = THIS_MODULE,
537 .name = DRV_NAME,
538 .ioctl = ata_scsi_ioctl,
539 .queuecommand = ata_scsi_queuecmd,
540 .change_queue_depth = ata_scsi_change_queue_depth,
541 .can_queue = MV_MAX_Q_DEPTH - 1,
542 .this_id = ATA_SHT_THIS_ID,
543 .sg_tablesize = MV_MAX_SG_CT / 2,
544 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
545 .emulated = ATA_SHT_EMULATED,
546 .use_clustering = 1,
547 .proc_name = DRV_NAME,
548 .dma_boundary = MV_DMA_BOUNDARY,
549 .slave_configure = ata_scsi_slave_config,
550 .slave_destroy = ata_scsi_slave_destroy,
551 .bios_param = ata_std_bios_param,
554 static const struct ata_port_operations mv5_ops = {
555 .tf_load = ata_tf_load,
556 .tf_read = ata_tf_read,
557 .check_status = ata_check_status,
558 .exec_command = ata_exec_command,
559 .dev_select = ata_std_dev_select,
561 .cable_detect = ata_cable_sata,
563 .qc_prep = mv_qc_prep,
564 .qc_issue = mv_qc_issue,
565 .data_xfer = ata_data_xfer,
567 .irq_clear = mv_irq_clear,
568 .irq_on = ata_irq_on,
570 .error_handler = mv_error_handler,
571 .freeze = mv_eh_freeze,
572 .thaw = mv_eh_thaw,
574 .scr_read = mv5_scr_read,
575 .scr_write = mv5_scr_write,
577 .port_start = mv_port_start,
578 .port_stop = mv_port_stop,
581 static const struct ata_port_operations mv6_ops = {
582 .dev_config = mv6_dev_config,
583 .tf_load = ata_tf_load,
584 .tf_read = ata_tf_read,
585 .check_status = ata_check_status,
586 .exec_command = ata_exec_command,
587 .dev_select = ata_std_dev_select,
589 .cable_detect = ata_cable_sata,
591 .qc_prep = mv_qc_prep,
592 .qc_issue = mv_qc_issue,
593 .data_xfer = ata_data_xfer,
595 .irq_clear = mv_irq_clear,
596 .irq_on = ata_irq_on,
598 .error_handler = mv_error_handler,
599 .freeze = mv_eh_freeze,
600 .thaw = mv_eh_thaw,
601 .qc_defer = ata_std_qc_defer,
603 .scr_read = mv_scr_read,
604 .scr_write = mv_scr_write,
606 .port_start = mv_port_start,
607 .port_stop = mv_port_stop,
610 static const struct ata_port_operations mv_iie_ops = {
611 .tf_load = ata_tf_load,
612 .tf_read = ata_tf_read,
613 .check_status = ata_check_status,
614 .exec_command = ata_exec_command,
615 .dev_select = ata_std_dev_select,
617 .cable_detect = ata_cable_sata,
619 .qc_prep = mv_qc_prep_iie,
620 .qc_issue = mv_qc_issue,
621 .data_xfer = ata_data_xfer,
623 .irq_clear = mv_irq_clear,
624 .irq_on = ata_irq_on,
626 .error_handler = mv_error_handler,
627 .freeze = mv_eh_freeze,
628 .thaw = mv_eh_thaw,
629 .qc_defer = ata_std_qc_defer,
631 .scr_read = mv_scr_read,
632 .scr_write = mv_scr_write,
634 .port_start = mv_port_start,
635 .port_stop = mv_port_stop,
638 static const struct ata_port_info mv_port_info[] = {
639 { /* chip_504x */
640 .flags = MV_COMMON_FLAGS,
641 .pio_mask = 0x1f, /* pio0-4 */
642 .udma_mask = ATA_UDMA6,
643 .port_ops = &mv5_ops,
645 { /* chip_508x */
646 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
647 .pio_mask = 0x1f, /* pio0-4 */
648 .udma_mask = ATA_UDMA6,
649 .port_ops = &mv5_ops,
651 { /* chip_5080 */
652 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
653 .pio_mask = 0x1f, /* pio0-4 */
654 .udma_mask = ATA_UDMA6,
655 .port_ops = &mv5_ops,
657 { /* chip_604x */
658 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
659 ATA_FLAG_NCQ,
660 .pio_mask = 0x1f, /* pio0-4 */
661 .udma_mask = ATA_UDMA6,
662 .port_ops = &mv6_ops,
664 { /* chip_608x */
665 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
666 ATA_FLAG_NCQ | MV_FLAG_DUAL_HC,
667 .pio_mask = 0x1f, /* pio0-4 */
668 .udma_mask = ATA_UDMA6,
669 .port_ops = &mv6_ops,
671 { /* chip_6042 */
672 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
673 ATA_FLAG_NCQ,
674 .pio_mask = 0x1f, /* pio0-4 */
675 .udma_mask = ATA_UDMA6,
676 .port_ops = &mv_iie_ops,
678 { /* chip_7042 */
679 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
680 ATA_FLAG_NCQ,
681 .pio_mask = 0x1f, /* pio0-4 */
682 .udma_mask = ATA_UDMA6,
683 .port_ops = &mv_iie_ops,
685 { /* chip_soc */
686 .flags = MV_COMMON_FLAGS | MV_FLAG_SOC,
687 .pio_mask = 0x1f, /* pio0-4 */
688 .udma_mask = ATA_UDMA6,
689 .port_ops = &mv_iie_ops,
693 static const struct pci_device_id mv_pci_tbl[] = {
694 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
695 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
696 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
697 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
698 /* RocketRAID 1740/174x have different identifiers */
699 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
700 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
702 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
703 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
704 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
705 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
706 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
708 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
710 /* Adaptec 1430SA */
711 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
713 /* Marvell 7042 support */
714 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
716 /* Highpoint RocketRAID PCIe series */
717 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
718 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
720 { } /* terminate list */
723 static const struct mv_hw_ops mv5xxx_ops = {
724 .phy_errata = mv5_phy_errata,
725 .enable_leds = mv5_enable_leds,
726 .read_preamp = mv5_read_preamp,
727 .reset_hc = mv5_reset_hc,
728 .reset_flash = mv5_reset_flash,
729 .reset_bus = mv5_reset_bus,
732 static const struct mv_hw_ops mv6xxx_ops = {
733 .phy_errata = mv6_phy_errata,
734 .enable_leds = mv6_enable_leds,
735 .read_preamp = mv6_read_preamp,
736 .reset_hc = mv6_reset_hc,
737 .reset_flash = mv6_reset_flash,
738 .reset_bus = mv_reset_pci_bus,
741 static const struct mv_hw_ops mv_soc_ops = {
742 .phy_errata = mv6_phy_errata,
743 .enable_leds = mv_soc_enable_leds,
744 .read_preamp = mv_soc_read_preamp,
745 .reset_hc = mv_soc_reset_hc,
746 .reset_flash = mv_soc_reset_flash,
747 .reset_bus = mv_soc_reset_bus,
751 * Functions
754 static inline void writelfl(unsigned long data, void __iomem *addr)
756 writel(data, addr);
757 (void) readl(addr); /* flush to avoid PCI posted write */
760 static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
762 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
765 static inline unsigned int mv_hc_from_port(unsigned int port)
767 return port >> MV_PORT_HC_SHIFT;
770 static inline unsigned int mv_hardport_from_port(unsigned int port)
772 return port & MV_PORT_MASK;
775 static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
776 unsigned int port)
778 return mv_hc_base(base, mv_hc_from_port(port));
781 static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
783 return mv_hc_base_from_port(base, port) +
784 MV_SATAHC_ARBTR_REG_SZ +
785 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
788 static inline void __iomem *mv_host_base(struct ata_host *host)
790 struct mv_host_priv *hpriv = host->private_data;
791 return hpriv->base;
794 static inline void __iomem *mv_ap_base(struct ata_port *ap)
796 return mv_port_base(mv_host_base(ap->host), ap->port_no);
799 static inline int mv_get_hc_count(unsigned long port_flags)
801 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
804 static void mv_irq_clear(struct ata_port *ap)
808 static void mv_set_edma_ptrs(void __iomem *port_mmio,
809 struct mv_host_priv *hpriv,
810 struct mv_port_priv *pp)
812 u32 index;
815 * initialize request queue
817 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
819 WARN_ON(pp->crqb_dma & 0x3ff);
820 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
821 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
822 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
824 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
825 writelfl((pp->crqb_dma & 0xffffffff) | index,
826 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
827 else
828 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
831 * initialize response queue
833 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
835 WARN_ON(pp->crpb_dma & 0xff);
836 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
838 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
839 writelfl((pp->crpb_dma & 0xffffffff) | index,
840 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
841 else
842 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
844 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
845 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
849 * mv_start_dma - Enable eDMA engine
850 * @base: port base address
851 * @pp: port private data
853 * Verify the local cache of the eDMA state is accurate with a
854 * WARN_ON.
856 * LOCKING:
857 * Inherited from caller.
859 static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
860 struct mv_port_priv *pp, u8 protocol)
862 int want_ncq = (protocol == ATA_PROT_NCQ);
864 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
865 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
866 if (want_ncq != using_ncq)
867 __mv_stop_dma(ap);
869 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
870 struct mv_host_priv *hpriv = ap->host->private_data;
871 int hard_port = mv_hardport_from_port(ap->port_no);
872 void __iomem *hc_mmio = mv_hc_base_from_port(
873 <<<<<<< HEAD:drivers/ata/sata_mv.c
874 ap->host->iomap[MV_PRIMARY_BAR], hard_port);
875 =======
876 mv_host_base(ap->host), hard_port);
877 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/ata/sata_mv.c
878 u32 hc_irq_cause, ipending;
880 /* clear EDMA event indicators, if any */
881 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
883 /* clear EDMA interrupt indicator, if any */
884 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
885 ipending = (DEV_IRQ << hard_port) |
886 (CRPB_DMA_DONE << hard_port);
887 if (hc_irq_cause & ipending) {
888 writelfl(hc_irq_cause & ~ipending,
889 hc_mmio + HC_IRQ_CAUSE_OFS);
892 mv_edma_cfg(pp, hpriv, port_mmio, want_ncq);
894 /* clear FIS IRQ Cause */
895 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
897 mv_set_edma_ptrs(port_mmio, hpriv, pp);
899 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
900 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
902 WARN_ON(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
906 * __mv_stop_dma - Disable eDMA engine
907 * @ap: ATA channel to manipulate
909 * Verify the local cache of the eDMA state is accurate with a
910 * WARN_ON.
912 * LOCKING:
913 * Inherited from caller.
915 static int __mv_stop_dma(struct ata_port *ap)
917 void __iomem *port_mmio = mv_ap_base(ap);
918 struct mv_port_priv *pp = ap->private_data;
919 u32 reg;
920 int i, err = 0;
922 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
923 /* Disable EDMA if active. The disable bit auto clears.
925 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
926 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
927 } else {
928 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
931 /* now properly wait for the eDMA to stop */
932 for (i = 1000; i > 0; i--) {
933 reg = readl(port_mmio + EDMA_CMD_OFS);
934 if (!(reg & EDMA_EN))
935 break;
937 udelay(100);
940 if (reg & EDMA_EN) {
941 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
942 err = -EIO;
945 return err;
948 static int mv_stop_dma(struct ata_port *ap)
950 unsigned long flags;
951 int rc;
953 spin_lock_irqsave(&ap->host->lock, flags);
954 rc = __mv_stop_dma(ap);
955 spin_unlock_irqrestore(&ap->host->lock, flags);
957 return rc;
960 #ifdef ATA_DEBUG
961 static void mv_dump_mem(void __iomem *start, unsigned bytes)
963 int b, w;
964 for (b = 0; b < bytes; ) {
965 DPRINTK("%p: ", start + b);
966 for (w = 0; b < bytes && w < 4; w++) {
967 printk("%08x ", readl(start + b));
968 b += sizeof(u32);
970 printk("\n");
973 #endif
975 static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
977 #ifdef ATA_DEBUG
978 int b, w;
979 u32 dw;
980 for (b = 0; b < bytes; ) {
981 DPRINTK("%02x: ", b);
982 for (w = 0; b < bytes && w < 4; w++) {
983 (void) pci_read_config_dword(pdev, b, &dw);
984 printk("%08x ", dw);
985 b += sizeof(u32);
987 printk("\n");
989 #endif
991 static void mv_dump_all_regs(void __iomem *mmio_base, int port,
992 struct pci_dev *pdev)
994 #ifdef ATA_DEBUG
995 void __iomem *hc_base = mv_hc_base(mmio_base,
996 port >> MV_PORT_HC_SHIFT);
997 void __iomem *port_base;
998 int start_port, num_ports, p, start_hc, num_hcs, hc;
1000 if (0 > port) {
1001 start_hc = start_port = 0;
1002 num_ports = 8; /* shld be benign for 4 port devs */
1003 num_hcs = 2;
1004 } else {
1005 start_hc = port >> MV_PORT_HC_SHIFT;
1006 start_port = port;
1007 num_ports = num_hcs = 1;
1009 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
1010 num_ports > 1 ? num_ports - 1 : start_port);
1012 if (NULL != pdev) {
1013 DPRINTK("PCI config space regs:\n");
1014 mv_dump_pci_cfg(pdev, 0x68);
1016 DPRINTK("PCI regs:\n");
1017 mv_dump_mem(mmio_base+0xc00, 0x3c);
1018 mv_dump_mem(mmio_base+0xd00, 0x34);
1019 mv_dump_mem(mmio_base+0xf00, 0x4);
1020 mv_dump_mem(mmio_base+0x1d00, 0x6c);
1021 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
1022 hc_base = mv_hc_base(mmio_base, hc);
1023 DPRINTK("HC regs (HC %i):\n", hc);
1024 mv_dump_mem(hc_base, 0x1c);
1026 for (p = start_port; p < start_port + num_ports; p++) {
1027 port_base = mv_port_base(mmio_base, p);
1028 DPRINTK("EDMA regs (port %i):\n", p);
1029 mv_dump_mem(port_base, 0x54);
1030 DPRINTK("SATA regs (port %i):\n", p);
1031 mv_dump_mem(port_base+0x300, 0x60);
1033 #endif
1036 static unsigned int mv_scr_offset(unsigned int sc_reg_in)
1038 unsigned int ofs;
1040 switch (sc_reg_in) {
1041 case SCR_STATUS:
1042 case SCR_CONTROL:
1043 case SCR_ERROR:
1044 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
1045 break;
1046 case SCR_ACTIVE:
1047 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
1048 break;
1049 default:
1050 ofs = 0xffffffffU;
1051 break;
1053 return ofs;
1056 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
1058 unsigned int ofs = mv_scr_offset(sc_reg_in);
1060 if (ofs != 0xffffffffU) {
1061 *val = readl(mv_ap_base(ap) + ofs);
1062 return 0;
1063 } else
1064 return -EINVAL;
1067 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1069 unsigned int ofs = mv_scr_offset(sc_reg_in);
1071 if (ofs != 0xffffffffU) {
1072 writelfl(val, mv_ap_base(ap) + ofs);
1073 return 0;
1074 } else
1075 return -EINVAL;
1078 static void mv6_dev_config(struct ata_device *adev)
1081 * We don't have hob_nsect when doing NCQ commands on Gen-II.
1082 * See mv_qc_prep() for more info.
1084 if (adev->flags & ATA_DFLAG_NCQ)
1085 if (adev->max_sectors > ATA_MAX_SECTORS)
1086 adev->max_sectors = ATA_MAX_SECTORS;
1089 static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
1090 void __iomem *port_mmio, int want_ncq)
1092 u32 cfg;
1094 /* set up non-NCQ EDMA configuration */
1095 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
1097 if (IS_GEN_I(hpriv))
1098 cfg |= (1 << 8); /* enab config burst size mask */
1100 else if (IS_GEN_II(hpriv))
1101 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1103 else if (IS_GEN_IIE(hpriv)) {
1104 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1105 cfg |= (1 << 22); /* enab 4-entry host queue cache */
1106 cfg |= (1 << 18); /* enab early completion */
1107 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
1110 if (want_ncq) {
1111 cfg |= EDMA_CFG_NCQ;
1112 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1113 } else
1114 pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;
1116 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1119 static void mv_port_free_dma_mem(struct ata_port *ap)
1121 struct mv_host_priv *hpriv = ap->host->private_data;
1122 struct mv_port_priv *pp = ap->private_data;
1123 int tag;
1125 if (pp->crqb) {
1126 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1127 pp->crqb = NULL;
1129 if (pp->crpb) {
1130 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1131 pp->crpb = NULL;
1134 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1135 * For later hardware, we have one unique sg_tbl per NCQ tag.
1137 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1138 if (pp->sg_tbl[tag]) {
1139 if (tag == 0 || !IS_GEN_I(hpriv))
1140 dma_pool_free(hpriv->sg_tbl_pool,
1141 pp->sg_tbl[tag],
1142 pp->sg_tbl_dma[tag]);
1143 pp->sg_tbl[tag] = NULL;
1149 * mv_port_start - Port specific init/start routine.
1150 * @ap: ATA channel to manipulate
1152 * Allocate and point to DMA memory, init port private memory,
1153 * zero indices.
1155 * LOCKING:
1156 * Inherited from caller.
1158 static int mv_port_start(struct ata_port *ap)
1160 struct device *dev = ap->host->dev;
1161 struct mv_host_priv *hpriv = ap->host->private_data;
1162 struct mv_port_priv *pp;
1163 void __iomem *port_mmio = mv_ap_base(ap);
1164 unsigned long flags;
1165 <<<<<<< HEAD:drivers/ata/sata_mv.c
1166 int tag, rc;
1167 =======
1168 int tag;
1169 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/ata/sata_mv.c
1171 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1172 if (!pp)
1173 return -ENOMEM;
1174 ap->private_data = pp;
1176 <<<<<<< HEAD:drivers/ata/sata_mv.c
1177 rc = ata_pad_alloc(ap, dev);
1178 if (rc)
1179 return rc;
1181 =======
1182 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/ata/sata_mv.c
1183 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1184 if (!pp->crqb)
1185 return -ENOMEM;
1186 memset(pp->crqb, 0, MV_CRQB_Q_SZ);
1188 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1189 if (!pp->crpb)
1190 goto out_port_free_dma_mem;
1191 memset(pp->crpb, 0, MV_CRPB_Q_SZ);
1194 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1195 * For later hardware, we need one unique sg_tbl per NCQ tag.
1197 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1198 if (tag == 0 || !IS_GEN_I(hpriv)) {
1199 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1200 GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1201 if (!pp->sg_tbl[tag])
1202 goto out_port_free_dma_mem;
1203 } else {
1204 pp->sg_tbl[tag] = pp->sg_tbl[0];
1205 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1209 spin_lock_irqsave(&ap->host->lock, flags);
1211 mv_edma_cfg(pp, hpriv, port_mmio, 0);
1212 mv_set_edma_ptrs(port_mmio, hpriv, pp);
1214 spin_unlock_irqrestore(&ap->host->lock, flags);
1216 /* Don't turn on EDMA here...do it before DMA commands only. Else
1217 * we'll be unable to send non-data, PIO, etc due to restricted access
1218 * to shadow regs.
1220 return 0;
1222 out_port_free_dma_mem:
1223 mv_port_free_dma_mem(ap);
1224 return -ENOMEM;
1228 * mv_port_stop - Port specific cleanup/stop routine.
1229 * @ap: ATA channel to manipulate
1231 * Stop DMA, cleanup port memory.
1233 * LOCKING:
1234 * This routine uses the host lock to protect the DMA stop.
1236 static void mv_port_stop(struct ata_port *ap)
1238 mv_stop_dma(ap);
1239 mv_port_free_dma_mem(ap);
1243 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1244 * @qc: queued command whose SG list to source from
1246 * Populate the SG list and mark the last entry.
1248 * LOCKING:
1249 * Inherited from caller.
1251 static void mv_fill_sg(struct ata_queued_cmd *qc)
1253 struct mv_port_priv *pp = qc->ap->private_data;
1254 struct scatterlist *sg;
1255 struct mv_sg *mv_sg, *last_sg = NULL;
1256 unsigned int si;
1258 mv_sg = pp->sg_tbl[qc->tag];
1259 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1260 dma_addr_t addr = sg_dma_address(sg);
1261 u32 sg_len = sg_dma_len(sg);
1263 while (sg_len) {
1264 u32 offset = addr & 0xffff;
1265 u32 len = sg_len;
1267 if ((offset + sg_len > 0x10000))
1268 len = 0x10000 - offset;
1270 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1271 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1272 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
1274 sg_len -= len;
1275 addr += len;
1277 last_sg = mv_sg;
1278 mv_sg++;
1282 if (likely(last_sg))
1283 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1286 static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1288 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1289 (last ? CRQB_CMD_LAST : 0);
1290 *cmdw = cpu_to_le16(tmp);
1294 * mv_qc_prep - Host specific command preparation.
1295 * @qc: queued command to prepare
1297 * This routine simply redirects to the general purpose routine
1298 * if command is not DMA. Else, it handles prep of the CRQB
1299 * (command request block), does some sanity checking, and calls
1300 * the SG load routine.
1302 * LOCKING:
1303 * Inherited from caller.
1305 static void mv_qc_prep(struct ata_queued_cmd *qc)
1307 struct ata_port *ap = qc->ap;
1308 struct mv_port_priv *pp = ap->private_data;
1309 __le16 *cw;
1310 struct ata_taskfile *tf;
1311 u16 flags = 0;
1312 unsigned in_index;
1314 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1315 (qc->tf.protocol != ATA_PROT_NCQ))
1316 return;
1318 /* Fill in command request block
1320 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1321 flags |= CRQB_FLAG_READ;
1322 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1323 flags |= qc->tag << CRQB_TAG_SHIFT;
1325 /* get current queue index from software */
1326 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1328 pp->crqb[in_index].sg_addr =
1329 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1330 pp->crqb[in_index].sg_addr_hi =
1331 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
1332 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1334 cw = &pp->crqb[in_index].ata_cmd[0];
1335 tf = &qc->tf;
1337 /* Sadly, the CRQB cannot accomodate all registers--there are
1338 * only 11 bytes...so we must pick and choose required
1339 * registers based on the command. So, we drop feature and
1340 * hob_feature for [RW] DMA commands, but they are needed for
1341 * NCQ. NCQ will drop hob_nsect.
1343 switch (tf->command) {
1344 case ATA_CMD_READ:
1345 case ATA_CMD_READ_EXT:
1346 case ATA_CMD_WRITE:
1347 case ATA_CMD_WRITE_EXT:
1348 case ATA_CMD_WRITE_FUA_EXT:
1349 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1350 break;
1351 case ATA_CMD_FPDMA_READ:
1352 case ATA_CMD_FPDMA_WRITE:
1353 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
1354 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1355 break;
1356 default:
1357 /* The only other commands EDMA supports in non-queued and
1358 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1359 * of which are defined/used by Linux. If we get here, this
1360 * driver needs work.
1362 * FIXME: modify libata to give qc_prep a return value and
1363 * return error here.
1365 BUG_ON(tf->command);
1366 break;
1368 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1369 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1370 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1371 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1372 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1373 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1374 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1375 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1376 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1378 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1379 return;
1380 mv_fill_sg(qc);
1384 * mv_qc_prep_iie - Host specific command preparation.
1385 * @qc: queued command to prepare
1387 * This routine simply redirects to the general purpose routine
1388 * if command is not DMA. Else, it handles prep of the CRQB
1389 * (command request block), does some sanity checking, and calls
1390 * the SG load routine.
1392 * LOCKING:
1393 * Inherited from caller.
1395 static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1397 struct ata_port *ap = qc->ap;
1398 struct mv_port_priv *pp = ap->private_data;
1399 struct mv_crqb_iie *crqb;
1400 struct ata_taskfile *tf;
1401 unsigned in_index;
1402 u32 flags = 0;
1404 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1405 (qc->tf.protocol != ATA_PROT_NCQ))
1406 return;
1408 /* Fill in Gen IIE command request block
1410 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1411 flags |= CRQB_FLAG_READ;
1413 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1414 flags |= qc->tag << CRQB_TAG_SHIFT;
1415 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
1417 /* get current queue index from software */
1418 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1420 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
1421 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1422 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
1423 crqb->flags = cpu_to_le32(flags);
1425 tf = &qc->tf;
1426 crqb->ata_cmd[0] = cpu_to_le32(
1427 (tf->command << 16) |
1428 (tf->feature << 24)
1430 crqb->ata_cmd[1] = cpu_to_le32(
1431 (tf->lbal << 0) |
1432 (tf->lbam << 8) |
1433 (tf->lbah << 16) |
1434 (tf->device << 24)
1436 crqb->ata_cmd[2] = cpu_to_le32(
1437 (tf->hob_lbal << 0) |
1438 (tf->hob_lbam << 8) |
1439 (tf->hob_lbah << 16) |
1440 (tf->hob_feature << 24)
1442 crqb->ata_cmd[3] = cpu_to_le32(
1443 (tf->nsect << 0) |
1444 (tf->hob_nsect << 8)
1447 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1448 return;
1449 mv_fill_sg(qc);
1453 * mv_qc_issue - Initiate a command to the host
1454 * @qc: queued command to start
1456 * This routine simply redirects to the general purpose routine
1457 * if command is not DMA. Else, it sanity checks our local
1458 * caches of the request producer/consumer indices then enables
1459 * DMA and bumps the request producer index.
1461 * LOCKING:
1462 * Inherited from caller.
1464 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1466 struct ata_port *ap = qc->ap;
1467 void __iomem *port_mmio = mv_ap_base(ap);
1468 struct mv_port_priv *pp = ap->private_data;
1469 u32 in_index;
1471 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1472 (qc->tf.protocol != ATA_PROT_NCQ)) {
1473 /* We're about to send a non-EDMA capable command to the
1474 * port. Turn off EDMA so there won't be problems accessing
1475 * shadow block, etc registers.
1477 __mv_stop_dma(ap);
1478 return ata_qc_issue_prot(qc);
1481 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
1483 pp->req_idx++;
1485 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
1487 /* and write the request in pointer to kick the EDMA to life */
1488 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1489 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1491 return 0;
1495 * mv_err_intr - Handle error interrupts on the port
1496 * @ap: ATA channel to manipulate
1497 * @reset_allowed: bool: 0 == don't trigger from reset here
1499 * In most cases, just clear the interrupt and move on. However,
1500 * some cases require an eDMA reset, which is done right before
1501 * the COMRESET in mv_phy_reset(). The SERR case requires a
1502 * clear of pending errors in the SATA SERROR register. Finally,
1503 * if the port disabled DMA, update our cached copy to match.
1505 * LOCKING:
1506 * Inherited from caller.
1508 static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1510 void __iomem *port_mmio = mv_ap_base(ap);
1511 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1512 struct mv_port_priv *pp = ap->private_data;
1513 struct mv_host_priv *hpriv = ap->host->private_data;
1514 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1515 unsigned int action = 0, err_mask = 0;
1516 struct ata_eh_info *ehi = &ap->link.eh_info;
1518 ata_ehi_clear_desc(ehi);
1520 if (!edma_enabled) {
1521 /* just a guess: do we need to do this? should we
1522 * expand this, and do it in all cases?
1524 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1525 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1528 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1530 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1533 * all generations share these EDMA error cause bits
1536 if (edma_err_cause & EDMA_ERR_DEV)
1537 err_mask |= AC_ERR_DEV;
1538 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
1539 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
1540 EDMA_ERR_INTRL_PAR)) {
1541 err_mask |= AC_ERR_ATA_BUS;
1542 action |= ATA_EH_HARDRESET;
1543 ata_ehi_push_desc(ehi, "parity error");
1545 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1546 ata_ehi_hotplugged(ehi);
1547 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
1548 "dev disconnect" : "dev connect");
1549 action |= ATA_EH_HARDRESET;
1552 if (IS_GEN_I(hpriv)) {
1553 eh_freeze_mask = EDMA_EH_FREEZE_5;
1555 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1556 pp = ap->private_data;
1557 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1558 ata_ehi_push_desc(ehi, "EDMA self-disable");
1560 } else {
1561 eh_freeze_mask = EDMA_EH_FREEZE;
1563 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1564 pp = ap->private_data;
1565 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1566 ata_ehi_push_desc(ehi, "EDMA self-disable");
1569 if (edma_err_cause & EDMA_ERR_SERR) {
1570 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1571 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1572 err_mask = AC_ERR_ATA_BUS;
1573 action |= ATA_EH_HARDRESET;
1577 /* Clear EDMA now that SERR cleanup done */
1578 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1580 if (!err_mask) {
1581 err_mask = AC_ERR_OTHER;
1582 action |= ATA_EH_HARDRESET;
1585 ehi->serror |= serr;
1586 ehi->action |= action;
1588 if (qc)
1589 qc->err_mask |= err_mask;
1590 else
1591 ehi->err_mask |= err_mask;
1593 if (edma_err_cause & eh_freeze_mask)
1594 ata_port_freeze(ap);
1595 else
1596 ata_port_abort(ap);
1599 static void mv_intr_pio(struct ata_port *ap)
1601 struct ata_queued_cmd *qc;
1602 u8 ata_status;
1604 /* ignore spurious intr if drive still BUSY */
1605 ata_status = readb(ap->ioaddr.status_addr);
1606 if (unlikely(ata_status & ATA_BUSY))
1607 return;
1609 /* get active ATA command */
1610 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1611 if (unlikely(!qc)) /* no active tag */
1612 return;
1613 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1614 return;
1616 /* and finally, complete the ATA command */
1617 qc->err_mask |= ac_err_mask(ata_status);
1618 ata_qc_complete(qc);
1621 static void mv_intr_edma(struct ata_port *ap)
1623 void __iomem *port_mmio = mv_ap_base(ap);
1624 struct mv_host_priv *hpriv = ap->host->private_data;
1625 struct mv_port_priv *pp = ap->private_data;
1626 struct ata_queued_cmd *qc;
1627 u32 out_index, in_index;
1628 bool work_done = false;
1630 /* get h/w response queue pointer */
1631 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1632 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1634 while (1) {
1635 u16 status;
1636 unsigned int tag;
1638 /* get s/w response queue last-read pointer, and compare */
1639 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1640 if (in_index == out_index)
1641 break;
1643 /* 50xx: get active ATA command */
1644 if (IS_GEN_I(hpriv))
1645 tag = ap->link.active_tag;
1647 /* Gen II/IIE: get active ATA command via tag, to enable
1648 * support for queueing. this works transparently for
1649 * queued and non-queued modes.
1651 else
1652 tag = le16_to_cpu(pp->crpb[out_index].id) & 0x1f;
1654 qc = ata_qc_from_tag(ap, tag);
1656 /* For non-NCQ mode, the lower 8 bits of status
1657 * are from EDMA_ERR_IRQ_CAUSE_OFS,
1658 * which should be zero if all went well.
1660 status = le16_to_cpu(pp->crpb[out_index].flags);
1661 if ((status & 0xff) && !(pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
1662 mv_err_intr(ap, qc);
1663 return;
1666 /* and finally, complete the ATA command */
1667 if (qc) {
1668 qc->err_mask |=
1669 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1670 ata_qc_complete(qc);
1673 /* advance software response queue pointer, to
1674 * indicate (after the loop completes) to hardware
1675 * that we have consumed a response queue entry.
1677 work_done = true;
1678 pp->resp_idx++;
1681 if (work_done)
1682 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1683 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1684 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1688 * mv_host_intr - Handle all interrupts on the given host controller
1689 * @host: host specific structure
1690 * @relevant: port error bits relevant to this host controller
1691 * @hc: which host controller we're to look at
1693 * Read then write clear the HC interrupt status then walk each
1694 * port connected to the HC and see if it needs servicing. Port
1695 * success ints are reported in the HC interrupt status reg, the
1696 * port error ints are reported in the higher level main
1697 * interrupt status register and thus are passed in via the
1698 * 'relevant' argument.
1700 * LOCKING:
1701 * Inherited from caller.
1703 static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
1705 struct mv_host_priv *hpriv = host->private_data;
1706 void __iomem *mmio = hpriv->base;
1707 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1708 u32 hc_irq_cause;
1709 int port, port0, last_port;
1711 if (hc == 0)
1712 port0 = 0;
1713 else
1714 port0 = MV_PORTS_PER_HC;
1716 if (HAS_PCI(host))
1717 last_port = port0 + MV_PORTS_PER_HC;
1718 else
1719 last_port = port0 + hpriv->n_ports;
1720 /* we'll need the HC success int register in most cases */
1721 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
1722 if (!hc_irq_cause)
1723 return;
1725 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
1727 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1728 hc, relevant, hc_irq_cause);
1730 for (port = port0; port < last_port; port++) {
1731 struct ata_port *ap = host->ports[port];
1732 struct mv_port_priv *pp;
1733 int have_err_bits, hard_port, shift;
1735 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
1736 continue;
1738 pp = ap->private_data;
1740 shift = port << 1; /* (port * 2) */
1741 if (port >= MV_PORTS_PER_HC) {
1742 shift++; /* skip bit 8 in the HC Main IRQ reg */
1744 have_err_bits = ((PORT0_ERR << shift) & relevant);
1746 if (unlikely(have_err_bits)) {
1747 struct ata_queued_cmd *qc;
1749 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1750 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1751 continue;
1753 mv_err_intr(ap, qc);
1754 continue;
1757 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1759 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1760 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1761 mv_intr_edma(ap);
1762 } else {
1763 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1764 mv_intr_pio(ap);
1767 VPRINTK("EXIT\n");
1770 static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1772 struct mv_host_priv *hpriv = host->private_data;
1773 struct ata_port *ap;
1774 struct ata_queued_cmd *qc;
1775 struct ata_eh_info *ehi;
1776 unsigned int i, err_mask, printed = 0;
1777 u32 err_cause;
1779 err_cause = readl(mmio + hpriv->irq_cause_ofs);
1781 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1782 err_cause);
1784 DPRINTK("All regs @ PCI error\n");
1785 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1787 writelfl(0, mmio + hpriv->irq_cause_ofs);
1789 for (i = 0; i < host->n_ports; i++) {
1790 ap = host->ports[i];
1791 if (!ata_link_offline(&ap->link)) {
1792 ehi = &ap->link.eh_info;
1793 ata_ehi_clear_desc(ehi);
1794 if (!printed++)
1795 ata_ehi_push_desc(ehi,
1796 "PCI err cause 0x%08x", err_cause);
1797 err_mask = AC_ERR_HOST_BUS;
1798 ehi->action = ATA_EH_HARDRESET;
1799 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1800 if (qc)
1801 qc->err_mask |= err_mask;
1802 else
1803 ehi->err_mask |= err_mask;
1805 ata_port_freeze(ap);
1811 * mv_interrupt - Main interrupt event handler
1812 * @irq: unused
1813 * @dev_instance: private data; in this case the host structure
1815 * Read the read only register to determine if any host
1816 * controllers have pending interrupts. If so, call lower level
1817 * routine to handle. Also check for PCI errors which are only
1818 * reported here.
1820 * LOCKING:
1821 * This routine holds the host lock while processing pending
1822 * interrupts.
1824 static irqreturn_t mv_interrupt(int irq, void *dev_instance)
1826 struct ata_host *host = dev_instance;
1827 struct mv_host_priv *hpriv = host->private_data;
1828 unsigned int hc, handled = 0, n_hcs;
1829 void __iomem *mmio = hpriv->base;
1830 u32 irq_stat, irq_mask;
1832 spin_lock(&host->lock);
1834 irq_stat = readl(hpriv->main_cause_reg_addr);
1835 irq_mask = readl(hpriv->main_mask_reg_addr);
1837 /* check the cases where we either have nothing pending or have read
1838 * a bogus register value which can indicate HW removal or PCI fault
1840 if (!(irq_stat & irq_mask) || (0xffffffffU == irq_stat))
1841 goto out_unlock;
1843 n_hcs = mv_get_hc_count(host->ports[0]->flags);
1845 if (unlikely((irq_stat & PCI_ERR) && HAS_PCI(host))) {
1846 mv_pci_error(host, mmio);
1847 handled = 1;
1848 goto out_unlock; /* skip all other HC irq handling */
1851 for (hc = 0; hc < n_hcs; hc++) {
1852 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1853 if (relevant) {
1854 mv_host_intr(host, relevant, hc);
1855 handled = 1;
1859 out_unlock:
1860 spin_unlock(&host->lock);
1862 return IRQ_RETVAL(handled);
1865 static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1867 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1868 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1870 return hc_mmio + ofs;
1873 static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1875 unsigned int ofs;
1877 switch (sc_reg_in) {
1878 case SCR_STATUS:
1879 case SCR_ERROR:
1880 case SCR_CONTROL:
1881 ofs = sc_reg_in * sizeof(u32);
1882 break;
1883 default:
1884 ofs = 0xffffffffU;
1885 break;
1887 return ofs;
1890 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
1892 struct mv_host_priv *hpriv = ap->host->private_data;
1893 void __iomem *mmio = hpriv->base;
1894 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1895 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1897 if (ofs != 0xffffffffU) {
1898 *val = readl(addr + ofs);
1899 return 0;
1900 } else
1901 return -EINVAL;
1904 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1906 struct mv_host_priv *hpriv = ap->host->private_data;
1907 void __iomem *mmio = hpriv->base;
1908 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1909 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1911 if (ofs != 0xffffffffU) {
1912 writelfl(val, addr + ofs);
1913 return 0;
1914 } else
1915 return -EINVAL;
1918 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
1920 struct pci_dev *pdev = to_pci_dev(host->dev);
1921 int early_5080;
1923 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
1925 if (!early_5080) {
1926 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1927 tmp |= (1 << 0);
1928 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1931 mv_reset_pci_bus(host, mmio);
1934 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1936 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1939 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
1940 void __iomem *mmio)
1942 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1943 u32 tmp;
1945 tmp = readl(phy_mmio + MV5_PHY_MODE);
1947 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1948 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
1951 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1953 u32 tmp;
1955 writel(0, mmio + MV_GPIO_PORT_CTL);
1957 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1959 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1960 tmp |= ~(1 << 0);
1961 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1964 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1965 unsigned int port)
1967 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1968 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1969 u32 tmp;
1970 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1972 if (fix_apm_sq) {
1973 tmp = readl(phy_mmio + MV5_LT_MODE);
1974 tmp |= (1 << 19);
1975 writel(tmp, phy_mmio + MV5_LT_MODE);
1977 tmp = readl(phy_mmio + MV5_PHY_CTL);
1978 tmp &= ~0x3;
1979 tmp |= 0x1;
1980 writel(tmp, phy_mmio + MV5_PHY_CTL);
1983 tmp = readl(phy_mmio + MV5_PHY_MODE);
1984 tmp &= ~mask;
1985 tmp |= hpriv->signal[port].pre;
1986 tmp |= hpriv->signal[port].amps;
1987 writel(tmp, phy_mmio + MV5_PHY_MODE);
1991 #undef ZERO
1992 #define ZERO(reg) writel(0, port_mmio + (reg))
1993 static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1994 unsigned int port)
1996 void __iomem *port_mmio = mv_port_base(mmio, port);
1998 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
2000 mv_channel_reset(hpriv, mmio, port);
2002 ZERO(0x028); /* command */
2003 writel(0x11f, port_mmio + EDMA_CFG_OFS);
2004 ZERO(0x004); /* timer */
2005 ZERO(0x008); /* irq err cause */
2006 ZERO(0x00c); /* irq err mask */
2007 ZERO(0x010); /* rq bah */
2008 ZERO(0x014); /* rq inp */
2009 ZERO(0x018); /* rq outp */
2010 ZERO(0x01c); /* respq bah */
2011 ZERO(0x024); /* respq outp */
2012 ZERO(0x020); /* respq inp */
2013 ZERO(0x02c); /* test control */
2014 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
2016 #undef ZERO
2018 #define ZERO(reg) writel(0, hc_mmio + (reg))
2019 static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2020 unsigned int hc)
2022 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2023 u32 tmp;
2025 ZERO(0x00c);
2026 ZERO(0x010);
2027 ZERO(0x014);
2028 ZERO(0x018);
2030 tmp = readl(hc_mmio + 0x20);
2031 tmp &= 0x1c1c1c1c;
2032 tmp |= 0x03030303;
2033 writel(tmp, hc_mmio + 0x20);
2035 #undef ZERO
2037 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2038 unsigned int n_hc)
2040 unsigned int hc, port;
2042 for (hc = 0; hc < n_hc; hc++) {
2043 for (port = 0; port < MV_PORTS_PER_HC; port++)
2044 mv5_reset_hc_port(hpriv, mmio,
2045 (hc * MV_PORTS_PER_HC) + port);
2047 mv5_reset_one_hc(hpriv, mmio, hc);
2050 return 0;
2053 #undef ZERO
2054 #define ZERO(reg) writel(0, mmio + (reg))
2055 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
2057 struct mv_host_priv *hpriv = host->private_data;
2058 u32 tmp;
2060 tmp = readl(mmio + MV_PCI_MODE);
2061 tmp &= 0xff00ffff;
2062 writel(tmp, mmio + MV_PCI_MODE);
2064 ZERO(MV_PCI_DISC_TIMER);
2065 ZERO(MV_PCI_MSI_TRIGGER);
2066 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
2067 ZERO(HC_MAIN_IRQ_MASK_OFS);
2068 ZERO(MV_PCI_SERR_MASK);
2069 ZERO(hpriv->irq_cause_ofs);
2070 ZERO(hpriv->irq_mask_ofs);
2071 ZERO(MV_PCI_ERR_LOW_ADDRESS);
2072 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
2073 ZERO(MV_PCI_ERR_ATTRIBUTE);
2074 ZERO(MV_PCI_ERR_COMMAND);
2076 #undef ZERO
2078 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
2080 u32 tmp;
2082 mv5_reset_flash(hpriv, mmio);
2084 tmp = readl(mmio + MV_GPIO_PORT_CTL);
2085 tmp &= 0x3;
2086 tmp |= (1 << 5) | (1 << 6);
2087 writel(tmp, mmio + MV_GPIO_PORT_CTL);
2091 * mv6_reset_hc - Perform the 6xxx global soft reset
2092 * @mmio: base address of the HBA
2094 * This routine only applies to 6xxx parts.
2096 * LOCKING:
2097 * Inherited from caller.
2099 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2100 unsigned int n_hc)
2102 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2103 int i, rc = 0;
2104 u32 t;
2106 /* Following procedure defined in PCI "main command and status
2107 * register" table.
2109 t = readl(reg);
2110 writel(t | STOP_PCI_MASTER, reg);
2112 for (i = 0; i < 1000; i++) {
2113 udelay(1);
2114 t = readl(reg);
2115 if (PCI_MASTER_EMPTY & t)
2116 break;
2118 if (!(PCI_MASTER_EMPTY & t)) {
2119 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2120 rc = 1;
2121 goto done;
2124 /* set reset */
2125 i = 5;
2126 do {
2127 writel(t | GLOB_SFT_RST, reg);
2128 t = readl(reg);
2129 udelay(1);
2130 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2132 if (!(GLOB_SFT_RST & t)) {
2133 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2134 rc = 1;
2135 goto done;
2138 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2139 i = 5;
2140 do {
2141 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2142 t = readl(reg);
2143 udelay(1);
2144 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2146 if (GLOB_SFT_RST & t) {
2147 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2148 rc = 1;
2150 done:
2151 return rc;
2154 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
2155 void __iomem *mmio)
2157 void __iomem *port_mmio;
2158 u32 tmp;
2160 tmp = readl(mmio + MV_RESET_CFG);
2161 if ((tmp & (1 << 0)) == 0) {
2162 hpriv->signal[idx].amps = 0x7 << 8;
2163 hpriv->signal[idx].pre = 0x1 << 5;
2164 return;
2167 port_mmio = mv_port_base(mmio, idx);
2168 tmp = readl(port_mmio + PHY_MODE2);
2170 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2171 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2174 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
2176 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
2179 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2180 unsigned int port)
2182 void __iomem *port_mmio = mv_port_base(mmio, port);
2184 u32 hp_flags = hpriv->hp_flags;
2185 int fix_phy_mode2 =
2186 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2187 int fix_phy_mode4 =
2188 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2189 u32 m2, tmp;
2191 if (fix_phy_mode2) {
2192 m2 = readl(port_mmio + PHY_MODE2);
2193 m2 &= ~(1 << 16);
2194 m2 |= (1 << 31);
2195 writel(m2, port_mmio + PHY_MODE2);
2197 udelay(200);
2199 m2 = readl(port_mmio + PHY_MODE2);
2200 m2 &= ~((1 << 16) | (1 << 31));
2201 writel(m2, port_mmio + PHY_MODE2);
2203 udelay(200);
2206 /* who knows what this magic does */
2207 tmp = readl(port_mmio + PHY_MODE3);
2208 tmp &= ~0x7F800000;
2209 tmp |= 0x2A800000;
2210 writel(tmp, port_mmio + PHY_MODE3);
2212 if (fix_phy_mode4) {
2213 u32 m4;
2215 m4 = readl(port_mmio + PHY_MODE4);
2217 if (hp_flags & MV_HP_ERRATA_60X1B2)
2218 tmp = readl(port_mmio + 0x310);
2220 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2222 writel(m4, port_mmio + PHY_MODE4);
2224 if (hp_flags & MV_HP_ERRATA_60X1B2)
2225 writel(tmp, port_mmio + 0x310);
2228 /* Revert values of pre-emphasis and signal amps to the saved ones */
2229 m2 = readl(port_mmio + PHY_MODE2);
2231 m2 &= ~MV_M2_PREAMP_MASK;
2232 m2 |= hpriv->signal[port].amps;
2233 m2 |= hpriv->signal[port].pre;
2234 m2 &= ~(1 << 16);
2236 /* according to mvSata 3.6.1, some IIE values are fixed */
2237 if (IS_GEN_IIE(hpriv)) {
2238 m2 &= ~0xC30FF01F;
2239 m2 |= 0x0000900F;
2242 writel(m2, port_mmio + PHY_MODE2);
2245 /* TODO: use the generic LED interface to configure the SATA Presence */
2246 /* & Acitivy LEDs on the board */
2247 static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
2248 void __iomem *mmio)
2250 return;
2253 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
2254 void __iomem *mmio)
2256 void __iomem *port_mmio;
2257 u32 tmp;
2259 port_mmio = mv_port_base(mmio, idx);
2260 tmp = readl(port_mmio + PHY_MODE2);
2262 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2263 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2266 #undef ZERO
2267 #define ZERO(reg) writel(0, port_mmio + (reg))
2268 static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
2269 void __iomem *mmio, unsigned int port)
2271 void __iomem *port_mmio = mv_port_base(mmio, port);
2273 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
2275 mv_channel_reset(hpriv, mmio, port);
2277 ZERO(0x028); /* command */
2278 writel(0x101f, port_mmio + EDMA_CFG_OFS);
2279 ZERO(0x004); /* timer */
2280 ZERO(0x008); /* irq err cause */
2281 ZERO(0x00c); /* irq err mask */
2282 ZERO(0x010); /* rq bah */
2283 ZERO(0x014); /* rq inp */
2284 ZERO(0x018); /* rq outp */
2285 ZERO(0x01c); /* respq bah */
2286 ZERO(0x024); /* respq outp */
2287 ZERO(0x020); /* respq inp */
2288 ZERO(0x02c); /* test control */
2289 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
2292 #undef ZERO
2294 #define ZERO(reg) writel(0, hc_mmio + (reg))
2295 static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
2296 void __iomem *mmio)
2298 void __iomem *hc_mmio = mv_hc_base(mmio, 0);
2300 ZERO(0x00c);
2301 ZERO(0x010);
2302 ZERO(0x014);
2306 #undef ZERO
2308 static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
2309 void __iomem *mmio, unsigned int n_hc)
2311 unsigned int port;
2313 for (port = 0; port < hpriv->n_ports; port++)
2314 mv_soc_reset_hc_port(hpriv, mmio, port);
2316 mv_soc_reset_one_hc(hpriv, mmio);
2318 return 0;
2321 static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
2322 void __iomem *mmio)
2324 return;
2327 static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
2329 return;
2332 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
2333 unsigned int port_no)
2335 void __iomem *port_mmio = mv_port_base(mmio, port_no);
2337 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2339 if (IS_GEN_II(hpriv)) {
2340 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2341 ifctl |= (1 << 7); /* enable gen2i speed */
2342 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2343 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2346 udelay(25); /* allow reset propagation */
2348 /* Spec never mentions clearing the bit. Marvell's driver does
2349 * clear the bit, however.
2351 writelfl(0, port_mmio + EDMA_CMD_OFS);
2353 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2355 if (IS_GEN_I(hpriv))
2356 mdelay(1);
2360 * mv_phy_reset - Perform eDMA reset followed by COMRESET
2361 * @ap: ATA channel to manipulate
2363 * Part of this is taken from __sata_phy_reset and modified to
2364 * not sleep since this routine gets called from interrupt level.
2366 * LOCKING:
2367 * Inherited from caller. This is coded to safe to call at
2368 * interrupt level, i.e. it does not sleep.
2370 static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2371 unsigned long deadline)
2373 struct mv_port_priv *pp = ap->private_data;
2374 struct mv_host_priv *hpriv = ap->host->private_data;
2375 void __iomem *port_mmio = mv_ap_base(ap);
2376 int retry = 5;
2377 u32 sstatus;
2379 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
2381 #ifdef DEBUG
2383 u32 sstatus, serror, scontrol;
2385 mv_scr_read(ap, SCR_STATUS, &sstatus);
2386 mv_scr_read(ap, SCR_ERROR, &serror);
2387 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2388 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
2389 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2391 #endif
2393 /* Issue COMRESET via SControl */
2394 comreset_retry:
2395 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x301);
2396 msleep(1);
2398 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x300);
2399 msleep(20);
2401 do {
2402 sata_scr_read(&ap->link, SCR_STATUS, &sstatus);
2403 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
2404 break;
2406 msleep(1);
2407 } while (time_before(jiffies, deadline));
2409 /* work around errata */
2410 if (IS_GEN_II(hpriv) &&
2411 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2412 (retry-- > 0))
2413 goto comreset_retry;
2415 #ifdef DEBUG
2417 u32 sstatus, serror, scontrol;
2419 mv_scr_read(ap, SCR_STATUS, &sstatus);
2420 mv_scr_read(ap, SCR_ERROR, &serror);
2421 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2422 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2423 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2425 #endif
2427 if (ata_link_offline(&ap->link)) {
2428 *class = ATA_DEV_NONE;
2429 return;
2432 /* even after SStatus reflects that device is ready,
2433 * it seems to take a while for link to be fully
2434 * established (and thus Status no longer 0x80/0x7F),
2435 * so we poll a bit for that, here.
2437 retry = 20;
2438 while (1) {
2439 u8 drv_stat = ata_check_status(ap);
2440 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2441 break;
2442 msleep(500);
2443 if (retry-- <= 0)
2444 break;
2445 if (time_after(jiffies, deadline))
2446 break;
2449 /* FIXME: if we passed the deadline, the following
2450 * code probably produces an invalid result
2453 /* finally, read device signature from TF registers */
2454 *class = ata_dev_try_classify(ap->link.device, 1, NULL);
2456 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2458 WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
2460 VPRINTK("EXIT\n");
2463 static int mv_prereset(struct ata_link *link, unsigned long deadline)
2465 struct ata_port *ap = link->ap;
2466 struct mv_port_priv *pp = ap->private_data;
2467 struct ata_eh_context *ehc = &link->eh_context;
2468 int rc;
2470 rc = mv_stop_dma(ap);
2471 if (rc)
2472 ehc->i.action |= ATA_EH_HARDRESET;
2474 if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET)) {
2475 pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
2476 ehc->i.action |= ATA_EH_HARDRESET;
2479 /* if we're about to do hardreset, nothing more to do */
2480 if (ehc->i.action & ATA_EH_HARDRESET)
2481 return 0;
2483 if (ata_link_online(link))
2484 rc = ata_wait_ready(ap, deadline);
2485 else
2486 rc = -ENODEV;
2488 return rc;
2491 static int mv_hardreset(struct ata_link *link, unsigned int *class,
2492 unsigned long deadline)
2494 struct ata_port *ap = link->ap;
2495 struct mv_host_priv *hpriv = ap->host->private_data;
2496 void __iomem *mmio = hpriv->base;
2498 mv_stop_dma(ap);
2500 mv_channel_reset(hpriv, mmio, ap->port_no);
2502 mv_phy_reset(ap, class, deadline);
2504 return 0;
2507 static void mv_postreset(struct ata_link *link, unsigned int *classes)
2509 struct ata_port *ap = link->ap;
2510 u32 serr;
2512 /* print link status */
2513 sata_print_link_status(link);
2515 /* clear SError */
2516 sata_scr_read(link, SCR_ERROR, &serr);
2517 sata_scr_write_flush(link, SCR_ERROR, serr);
2519 /* bail out if no device is present */
2520 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2521 DPRINTK("EXIT, no device\n");
2522 return;
2525 /* set up device control */
2526 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2529 static void mv_error_handler(struct ata_port *ap)
2531 ata_do_eh(ap, mv_prereset, ata_std_softreset,
2532 mv_hardreset, mv_postreset);
2535 static void mv_eh_freeze(struct ata_port *ap)
2537 struct mv_host_priv *hpriv = ap->host->private_data;
2538 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2539 u32 tmp, mask;
2540 unsigned int shift;
2542 /* FIXME: handle coalescing completion events properly */
2544 shift = ap->port_no * 2;
2545 if (hc > 0)
2546 shift++;
2548 mask = 0x3 << shift;
2550 /* disable assertion of portN err, done events */
2551 tmp = readl(hpriv->main_mask_reg_addr);
2552 writelfl(tmp & ~mask, hpriv->main_mask_reg_addr);
2555 static void mv_eh_thaw(struct ata_port *ap)
2557 struct mv_host_priv *hpriv = ap->host->private_data;
2558 void __iomem *mmio = hpriv->base;
2559 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2560 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2561 void __iomem *port_mmio = mv_ap_base(ap);
2562 u32 tmp, mask, hc_irq_cause;
2563 unsigned int shift, hc_port_no = ap->port_no;
2565 /* FIXME: handle coalescing completion events properly */
2567 shift = ap->port_no * 2;
2568 if (hc > 0) {
2569 shift++;
2570 hc_port_no -= 4;
2573 mask = 0x3 << shift;
2575 /* clear EDMA errors on this port */
2576 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2578 /* clear pending irq events */
2579 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2580 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2581 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2582 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2584 /* enable assertion of portN err, done events */
2585 tmp = readl(hpriv->main_mask_reg_addr);
2586 writelfl(tmp | mask, hpriv->main_mask_reg_addr);
2590 * mv_port_init - Perform some early initialization on a single port.
2591 * @port: libata data structure storing shadow register addresses
2592 * @port_mmio: base address of the port
2594 * Initialize shadow register mmio addresses, clear outstanding
2595 * interrupts on the port, and unmask interrupts for the future
2596 * start of the port.
2598 * LOCKING:
2599 * Inherited from caller.
2601 static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2603 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
2604 unsigned serr_ofs;
2606 /* PIO related setup
2608 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
2609 port->error_addr =
2610 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2611 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2612 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2613 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2614 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2615 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
2616 port->status_addr =
2617 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2618 /* special case: control/altstatus doesn't have ATA_REG_ address */
2619 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2621 /* unused: */
2622 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
2624 /* Clear any currently outstanding port interrupt conditions */
2625 serr_ofs = mv_scr_offset(SCR_ERROR);
2626 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2627 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2629 /* unmask all non-transient EDMA error interrupts */
2630 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
2632 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2633 readl(port_mmio + EDMA_CFG_OFS),
2634 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2635 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
2638 static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
2640 struct pci_dev *pdev = to_pci_dev(host->dev);
2641 struct mv_host_priv *hpriv = host->private_data;
2642 u32 hp_flags = hpriv->hp_flags;
2644 switch (board_idx) {
2645 case chip_5080:
2646 hpriv->ops = &mv5xxx_ops;
2647 hp_flags |= MV_HP_GEN_I;
2649 switch (pdev->revision) {
2650 case 0x1:
2651 hp_flags |= MV_HP_ERRATA_50XXB0;
2652 break;
2653 case 0x3:
2654 hp_flags |= MV_HP_ERRATA_50XXB2;
2655 break;
2656 default:
2657 dev_printk(KERN_WARNING, &pdev->dev,
2658 "Applying 50XXB2 workarounds to unknown rev\n");
2659 hp_flags |= MV_HP_ERRATA_50XXB2;
2660 break;
2662 break;
2664 case chip_504x:
2665 case chip_508x:
2666 hpriv->ops = &mv5xxx_ops;
2667 hp_flags |= MV_HP_GEN_I;
2669 switch (pdev->revision) {
2670 case 0x0:
2671 hp_flags |= MV_HP_ERRATA_50XXB0;
2672 break;
2673 case 0x3:
2674 hp_flags |= MV_HP_ERRATA_50XXB2;
2675 break;
2676 default:
2677 dev_printk(KERN_WARNING, &pdev->dev,
2678 "Applying B2 workarounds to unknown rev\n");
2679 hp_flags |= MV_HP_ERRATA_50XXB2;
2680 break;
2682 break;
2684 case chip_604x:
2685 case chip_608x:
2686 hpriv->ops = &mv6xxx_ops;
2687 hp_flags |= MV_HP_GEN_II;
2689 switch (pdev->revision) {
2690 case 0x7:
2691 hp_flags |= MV_HP_ERRATA_60X1B2;
2692 break;
2693 case 0x9:
2694 hp_flags |= MV_HP_ERRATA_60X1C0;
2695 break;
2696 default:
2697 dev_printk(KERN_WARNING, &pdev->dev,
2698 "Applying B2 workarounds to unknown rev\n");
2699 hp_flags |= MV_HP_ERRATA_60X1B2;
2700 break;
2702 break;
2704 case chip_7042:
2705 hp_flags |= MV_HP_PCIE;
2706 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2707 (pdev->device == 0x2300 || pdev->device == 0x2310))
2710 * Highpoint RocketRAID PCIe 23xx series cards:
2712 * Unconfigured drives are treated as "Legacy"
2713 * by the BIOS, and it overwrites sector 8 with
2714 * a "Lgcy" metadata block prior to Linux boot.
2716 * Configured drives (RAID or JBOD) leave sector 8
2717 * alone, but instead overwrite a high numbered
2718 * sector for the RAID metadata. This sector can
2719 * be determined exactly, by truncating the physical
2720 * drive capacity to a nice even GB value.
2722 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2724 * Warn the user, lest they think we're just buggy.
2726 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2727 " BIOS CORRUPTS DATA on all attached drives,"
2728 " regardless of if/how they are configured."
2729 " BEWARE!\n");
2730 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2731 " use sectors 8-9 on \"Legacy\" drives,"
2732 " and avoid the final two gigabytes on"
2733 " all RocketRAID BIOS initialized drives.\n");
2735 case chip_6042:
2736 hpriv->ops = &mv6xxx_ops;
2737 hp_flags |= MV_HP_GEN_IIE;
2739 switch (pdev->revision) {
2740 case 0x0:
2741 hp_flags |= MV_HP_ERRATA_XX42A0;
2742 break;
2743 case 0x1:
2744 hp_flags |= MV_HP_ERRATA_60X1C0;
2745 break;
2746 default:
2747 dev_printk(KERN_WARNING, &pdev->dev,
2748 "Applying 60X1C0 workarounds to unknown rev\n");
2749 hp_flags |= MV_HP_ERRATA_60X1C0;
2750 break;
2752 break;
2753 case chip_soc:
2754 hpriv->ops = &mv_soc_ops;
2755 hp_flags |= MV_HP_ERRATA_60X1C0;
2756 break;
2758 default:
2759 dev_printk(KERN_ERR, host->dev,
2760 "BUG: invalid board index %u\n", board_idx);
2761 return 1;
2764 hpriv->hp_flags = hp_flags;
2765 if (hp_flags & MV_HP_PCIE) {
2766 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
2767 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
2768 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
2769 } else {
2770 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
2771 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
2772 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
2775 return 0;
2779 * mv_init_host - Perform some early initialization of the host.
2780 * @host: ATA host to initialize
2781 * @board_idx: controller index
2783 * If possible, do an early global reset of the host. Then do
2784 * our port init and clear/unmask all/relevant host interrupts.
2786 * LOCKING:
2787 * Inherited from caller.
2789 static int mv_init_host(struct ata_host *host, unsigned int board_idx)
2791 int rc = 0, n_hc, port, hc;
2792 struct mv_host_priv *hpriv = host->private_data;
2793 void __iomem *mmio = hpriv->base;
2795 rc = mv_chip_id(host, board_idx);
2796 if (rc)
2797 goto done;
2799 if (HAS_PCI(host)) {
2800 hpriv->main_cause_reg_addr = hpriv->base +
2801 HC_MAIN_IRQ_CAUSE_OFS;
2802 hpriv->main_mask_reg_addr = hpriv->base + HC_MAIN_IRQ_MASK_OFS;
2803 } else {
2804 hpriv->main_cause_reg_addr = hpriv->base +
2805 HC_SOC_MAIN_IRQ_CAUSE_OFS;
2806 hpriv->main_mask_reg_addr = hpriv->base +
2807 HC_SOC_MAIN_IRQ_MASK_OFS;
2809 /* global interrupt mask */
2810 writel(0, hpriv->main_mask_reg_addr);
2812 n_hc = mv_get_hc_count(host->ports[0]->flags);
2814 for (port = 0; port < host->n_ports; port++)
2815 hpriv->ops->read_preamp(hpriv, port, mmio);
2817 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
2818 if (rc)
2819 goto done;
2821 hpriv->ops->reset_flash(hpriv, mmio);
2822 hpriv->ops->reset_bus(host, mmio);
2823 hpriv->ops->enable_leds(hpriv, mmio);
2825 for (port = 0; port < host->n_ports; port++) {
2826 if (IS_GEN_II(hpriv)) {
2827 void __iomem *port_mmio = mv_port_base(mmio, port);
2829 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2830 ifctl |= (1 << 7); /* enable gen2i speed */
2831 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2832 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2835 hpriv->ops->phy_errata(hpriv, mmio, port);
2838 for (port = 0; port < host->n_ports; port++) {
2839 struct ata_port *ap = host->ports[port];
2840 void __iomem *port_mmio = mv_port_base(mmio, port);
2842 mv_port_init(&ap->ioaddr, port_mmio);
2844 #ifdef CONFIG_PCI
2845 if (HAS_PCI(host)) {
2846 unsigned int offset = port_mmio - mmio;
2847 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2848 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
2850 #endif
2853 for (hc = 0; hc < n_hc; hc++) {
2854 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2856 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2857 "(before clear)=0x%08x\n", hc,
2858 readl(hc_mmio + HC_CFG_OFS),
2859 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2861 /* Clear any currently outstanding hc interrupt conditions */
2862 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
2865 if (HAS_PCI(host)) {
2866 /* Clear any currently outstanding host interrupt conditions */
2867 writelfl(0, mmio + hpriv->irq_cause_ofs);
2869 /* and unmask interrupt generation for host regs */
2870 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
2871 if (IS_GEN_I(hpriv))
2872 writelfl(~HC_MAIN_MASKED_IRQS_5,
2873 hpriv->main_mask_reg_addr);
2874 else
2875 writelfl(~HC_MAIN_MASKED_IRQS,
2876 hpriv->main_mask_reg_addr);
2878 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2879 "PCI int cause/mask=0x%08x/0x%08x\n",
2880 readl(hpriv->main_cause_reg_addr),
2881 readl(hpriv->main_mask_reg_addr),
2882 readl(mmio + hpriv->irq_cause_ofs),
2883 readl(mmio + hpriv->irq_mask_ofs));
2884 } else {
2885 writelfl(~HC_MAIN_MASKED_IRQS_SOC,
2886 hpriv->main_mask_reg_addr);
2887 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x\n",
2888 readl(hpriv->main_cause_reg_addr),
2889 readl(hpriv->main_mask_reg_addr));
2891 done:
2892 return rc;
2895 static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
2897 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
2898 MV_CRQB_Q_SZ, 0);
2899 if (!hpriv->crqb_pool)
2900 return -ENOMEM;
2902 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
2903 MV_CRPB_Q_SZ, 0);
2904 if (!hpriv->crpb_pool)
2905 return -ENOMEM;
2907 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
2908 MV_SG_TBL_SZ, 0);
2909 if (!hpriv->sg_tbl_pool)
2910 return -ENOMEM;
2912 return 0;
2916 * mv_platform_probe - handle a positive probe of an soc Marvell
2917 * host
2918 * @pdev: platform device found
2920 * LOCKING:
2921 * Inherited from caller.
2923 static int mv_platform_probe(struct platform_device *pdev)
2925 static int printed_version;
2926 const struct mv_sata_platform_data *mv_platform_data;
2927 const struct ata_port_info *ppi[] =
2928 { &mv_port_info[chip_soc], NULL };
2929 struct ata_host *host;
2930 struct mv_host_priv *hpriv;
2931 struct resource *res;
2932 int n_ports, rc;
2934 if (!printed_version++)
2935 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2938 * Simple resource validation ..
2940 if (unlikely(pdev->num_resources != 2)) {
2941 dev_err(&pdev->dev, "invalid number of resources\n");
2942 return -EINVAL;
2946 * Get the register base first
2948 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2949 if (res == NULL)
2950 return -EINVAL;
2952 /* allocate host */
2953 mv_platform_data = pdev->dev.platform_data;
2954 n_ports = mv_platform_data->n_ports;
2956 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2957 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2959 if (!host || !hpriv)
2960 return -ENOMEM;
2961 host->private_data = hpriv;
2962 hpriv->n_ports = n_ports;
2964 host->iomap = NULL;
2965 <<<<<<< HEAD:drivers/ata/sata_mv.c
2966 hpriv->base = ioremap(res->start, res->end - res->start + 1);
2967 =======
2968 hpriv->base = devm_ioremap(&pdev->dev, res->start,
2969 res->end - res->start + 1);
2970 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/ata/sata_mv.c
2971 hpriv->base -= MV_SATAHC0_REG_BASE;
2973 rc = mv_create_dma_pools(hpriv, &pdev->dev);
2974 if (rc)
2975 return rc;
2977 /* initialize adapter */
2978 rc = mv_init_host(host, chip_soc);
2979 if (rc)
2980 return rc;
2982 dev_printk(KERN_INFO, &pdev->dev,
2983 "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH,
2984 host->n_ports);
2986 return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
2987 IRQF_SHARED, &mv6_sht);
2992 * mv_platform_remove - unplug a platform interface
2993 * @pdev: platform device
2995 * A platform bus SATA device has been unplugged. Perform the needed
2996 * cleanup. Also called on module unload for any active devices.
2998 static int __devexit mv_platform_remove(struct platform_device *pdev)
3000 struct device *dev = &pdev->dev;
3001 struct ata_host *host = dev_get_drvdata(dev);
3002 <<<<<<< HEAD:drivers/ata/sata_mv.c
3003 struct mv_host_priv *hpriv = host->private_data;
3004 void __iomem *base = hpriv->base;
3005 =======
3006 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/ata/sata_mv.c
3008 ata_host_detach(host);
3009 <<<<<<< HEAD:drivers/ata/sata_mv.c
3010 iounmap(base);
3011 =======
3012 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/ata/sata_mv.c
3013 return 0;
3016 static struct platform_driver mv_platform_driver = {
3017 .probe = mv_platform_probe,
3018 .remove = __devexit_p(mv_platform_remove),
3019 .driver = {
3020 .name = DRV_NAME,
3021 .owner = THIS_MODULE,
3026 #ifdef CONFIG_PCI
3027 static int mv_pci_init_one(struct pci_dev *pdev,
3028 const struct pci_device_id *ent);
3031 static struct pci_driver mv_pci_driver = {
3032 .name = DRV_NAME,
3033 .id_table = mv_pci_tbl,
3034 .probe = mv_pci_init_one,
3035 .remove = ata_pci_remove_one,
3039 * module options
3041 static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
3044 /* move to PCI layer or libata core? */
3045 static int pci_go_64(struct pci_dev *pdev)
3047 int rc;
3049 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
3050 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
3051 if (rc) {
3052 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
3053 if (rc) {
3054 dev_printk(KERN_ERR, &pdev->dev,
3055 "64-bit DMA enable failed\n");
3056 return rc;
3059 } else {
3060 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
3061 if (rc) {
3062 dev_printk(KERN_ERR, &pdev->dev,
3063 "32-bit DMA enable failed\n");
3064 return rc;
3066 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
3067 if (rc) {
3068 dev_printk(KERN_ERR, &pdev->dev,
3069 "32-bit consistent DMA enable failed\n");
3070 return rc;
3074 return rc;
3078 * mv_print_info - Dump key info to kernel log for perusal.
3079 * @host: ATA host to print info about
3081 * FIXME: complete this.
3083 * LOCKING:
3084 * Inherited from caller.
3086 static void mv_print_info(struct ata_host *host)
3088 struct pci_dev *pdev = to_pci_dev(host->dev);
3089 struct mv_host_priv *hpriv = host->private_data;
3090 u8 scc;
3091 const char *scc_s, *gen;
3093 /* Use this to determine the HW stepping of the chip so we know
3094 * what errata to workaround
3096 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
3097 if (scc == 0)
3098 scc_s = "SCSI";
3099 else if (scc == 0x01)
3100 scc_s = "RAID";
3101 else
3102 scc_s = "?";
3104 if (IS_GEN_I(hpriv))
3105 gen = "I";
3106 else if (IS_GEN_II(hpriv))
3107 gen = "II";
3108 else if (IS_GEN_IIE(hpriv))
3109 gen = "IIE";
3110 else
3111 gen = "?";
3113 dev_printk(KERN_INFO, &pdev->dev,
3114 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
3115 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
3116 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
3120 * mv_pci_init_one - handle a positive probe of a PCI Marvell host
3121 * @pdev: PCI device found
3122 * @ent: PCI device ID entry for the matched host
3124 * LOCKING:
3125 * Inherited from caller.
3127 static int mv_pci_init_one(struct pci_dev *pdev,
3128 const struct pci_device_id *ent)
3130 static int printed_version;
3131 unsigned int board_idx = (unsigned int)ent->driver_data;
3132 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
3133 struct ata_host *host;
3134 struct mv_host_priv *hpriv;
3135 int n_ports, rc;
3137 if (!printed_version++)
3138 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
3140 /* allocate host */
3141 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
3143 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
3144 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
3145 if (!host || !hpriv)
3146 return -ENOMEM;
3147 host->private_data = hpriv;
3148 hpriv->n_ports = n_ports;
3150 /* acquire resources */
3151 rc = pcim_enable_device(pdev);
3152 if (rc)
3153 return rc;
3155 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
3156 if (rc == -EBUSY)
3157 pcim_pin_device(pdev);
3158 if (rc)
3159 return rc;
3160 host->iomap = pcim_iomap_table(pdev);
3161 hpriv->base = host->iomap[MV_PRIMARY_BAR];
3163 rc = pci_go_64(pdev);
3164 if (rc)
3165 return rc;
3167 rc = mv_create_dma_pools(hpriv, &pdev->dev);
3168 if (rc)
3169 return rc;
3171 /* initialize adapter */
3172 rc = mv_init_host(host, board_idx);
3173 if (rc)
3174 return rc;
3176 /* Enable interrupts */
3177 if (msi && pci_enable_msi(pdev))
3178 pci_intx(pdev, 1);
3180 mv_dump_pci_cfg(pdev, 0x68);
3181 mv_print_info(host);
3183 pci_set_master(pdev);
3184 pci_try_set_mwi(pdev);
3185 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
3186 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
3188 #endif
3190 static int mv_platform_probe(struct platform_device *pdev);
3191 static int __devexit mv_platform_remove(struct platform_device *pdev);
3193 static int __init mv_init(void)
3195 int rc = -ENODEV;
3196 #ifdef CONFIG_PCI
3197 rc = pci_register_driver(&mv_pci_driver);
3198 if (rc < 0)
3199 return rc;
3200 #endif
3201 rc = platform_driver_register(&mv_platform_driver);
3203 #ifdef CONFIG_PCI
3204 if (rc < 0)
3205 pci_unregister_driver(&mv_pci_driver);
3206 #endif
3207 return rc;
3210 static void __exit mv_exit(void)
3212 #ifdef CONFIG_PCI
3213 pci_unregister_driver(&mv_pci_driver);
3214 #endif
3215 platform_driver_unregister(&mv_platform_driver);
3218 MODULE_AUTHOR("Brett Russ");
3219 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
3220 MODULE_LICENSE("GPL");
3221 MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
3222 MODULE_VERSION(DRV_VERSION);
3223 <<<<<<< HEAD:drivers/ata/sata_mv.c
3224 =======
3225 MODULE_ALIAS("platform:sata_mv");
3226 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/ata/sata_mv.c
3228 #ifdef CONFIG_PCI
3229 module_param(msi, int, 0444);
3230 MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
3231 #endif
3233 module_init(mv_init);
3234 module_exit(mv_exit);