2 * sata_mv.c - Marvell SATA support
4 * Copyright 2005: EMC Corporation, all rights reserved.
5 * Copyright 2005 Red Hat, Inc. All rights reserved.
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
32 4) Add NCQ support (easy to intermediate, once new-EH support appears)
34 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
36 6) Add port multiplier support (intermediate)
38 8) Develop a low-power-consumption strategy, and implement it.
40 9) [Experiment, low priority] See if ATAPI can be supported using
41 "unknown FIS" or "vendor-specific FIS" support, or something creative
44 10) [Experiment, low priority] Investigate interrupt coalescing.
45 Quite often, especially with PCI Message Signalled Interrupts (MSI),
46 the overhead reduced by interrupt mitigation is quite often not
47 worth the latency cost.
49 11) [Experiment, Marvell value added] Is it possible to use target
50 mode to cross-connect two Linux boxes with Marvell cards? If so,
51 creating LibATA target mode support would be very interesting.
53 Target mode, for those without docs, is the ability to directly
54 connect two SATA controllers.
56 13) Verify that 7042 is fully supported. I only have a 6042.
61 #include <linux/kernel.h>
62 #include <linux/module.h>
63 #include <linux/pci.h>
64 #include <linux/init.h>
65 #include <linux/blkdev.h>
66 #include <linux/delay.h>
67 #include <linux/interrupt.h>
68 #include <linux/dma-mapping.h>
69 #include <linux/device.h>
70 #include <scsi/scsi_host.h>
71 #include <scsi/scsi_cmnd.h>
72 #include <linux/libata.h>
74 #define DRV_NAME "sata_mv"
75 #define DRV_VERSION "1.0"
78 /* BAR's are enumerated in terms of pci_resource_start() terms */
79 MV_PRIMARY_BAR
= 0, /* offset 0x10: memory space */
80 MV_IO_BAR
= 2, /* offset 0x18: IO space */
81 MV_MISC_BAR
= 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
83 MV_MAJOR_REG_AREA_SZ
= 0x10000, /* 64KB */
84 MV_MINOR_REG_AREA_SZ
= 0x2000, /* 8KB */
87 MV_IRQ_COAL_REG_BASE
= 0x18000, /* 6xxx part only */
88 MV_IRQ_COAL_CAUSE
= (MV_IRQ_COAL_REG_BASE
+ 0x08),
89 MV_IRQ_COAL_CAUSE_LO
= (MV_IRQ_COAL_REG_BASE
+ 0x88),
90 MV_IRQ_COAL_CAUSE_HI
= (MV_IRQ_COAL_REG_BASE
+ 0x8c),
91 MV_IRQ_COAL_THRESHOLD
= (MV_IRQ_COAL_REG_BASE
+ 0xcc),
92 MV_IRQ_COAL_TIME_THRESHOLD
= (MV_IRQ_COAL_REG_BASE
+ 0xd0),
94 MV_SATAHC0_REG_BASE
= 0x20000,
95 MV_FLASH_CTL
= 0x1046c,
96 MV_GPIO_PORT_CTL
= 0x104f0,
97 MV_RESET_CFG
= 0x180d8,
99 MV_PCI_REG_SZ
= MV_MAJOR_REG_AREA_SZ
,
100 MV_SATAHC_REG_SZ
= MV_MAJOR_REG_AREA_SZ
,
101 MV_SATAHC_ARBTR_REG_SZ
= MV_MINOR_REG_AREA_SZ
, /* arbiter */
102 MV_PORT_REG_SZ
= MV_MINOR_REG_AREA_SZ
,
105 MV_MAX_Q_DEPTH_MASK
= MV_MAX_Q_DEPTH
- 1,
107 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
108 * CRPB needs alignment on a 256B boundary. Size == 256B
109 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
110 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
112 MV_CRQB_Q_SZ
= (32 * MV_MAX_Q_DEPTH
),
113 MV_CRPB_Q_SZ
= (8 * MV_MAX_Q_DEPTH
),
115 MV_SG_TBL_SZ
= (16 * MV_MAX_SG_CT
),
116 MV_PORT_PRIV_DMA_SZ
= (MV_CRQB_Q_SZ
+ MV_CRPB_Q_SZ
+ MV_SG_TBL_SZ
),
119 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
120 MV_PORT_HC_SHIFT
= 2,
121 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
125 MV_FLAG_DUAL_HC
= (1 << 30), /* two SATA Host Controllers */
126 MV_FLAG_IRQ_COALESCE
= (1 << 29), /* IRQ coalescing capability */
127 MV_COMMON_FLAGS
= ATA_FLAG_SATA
| ATA_FLAG_NO_LEGACY
|
128 ATA_FLAG_MMIO
| ATA_FLAG_NO_ATAPI
|
129 ATA_FLAG_PIO_POLLING
,
130 MV_6XXX_FLAGS
= MV_FLAG_IRQ_COALESCE
,
132 CRQB_FLAG_READ
= (1 << 0),
134 CRQB_IOID_SHIFT
= 6, /* CRQB Gen-II/IIE IO Id shift */
135 CRQB_HOSTQ_SHIFT
= 17, /* CRQB Gen-II/IIE HostQueTag shift */
136 CRQB_CMD_ADDR_SHIFT
= 8,
137 CRQB_CMD_CS
= (0x2 << 11),
138 CRQB_CMD_LAST
= (1 << 15),
140 CRPB_FLAG_STATUS_SHIFT
= 8,
141 CRPB_IOID_SHIFT_6
= 5, /* CRPB Gen-II IO Id shift */
142 CRPB_IOID_SHIFT_7
= 7, /* CRPB Gen-IIE IO Id shift */
144 EPRD_FLAG_END_OF_TBL
= (1 << 31),
146 /* PCI interface registers */
148 PCI_COMMAND_OFS
= 0xc00,
150 PCI_MAIN_CMD_STS_OFS
= 0xd30,
151 STOP_PCI_MASTER
= (1 << 2),
152 PCI_MASTER_EMPTY
= (1 << 3),
153 GLOB_SFT_RST
= (1 << 4),
156 MV_PCI_EXP_ROM_BAR_CTL
= 0xd2c,
157 MV_PCI_DISC_TIMER
= 0xd04,
158 MV_PCI_MSI_TRIGGER
= 0xc38,
159 MV_PCI_SERR_MASK
= 0xc28,
160 MV_PCI_XBAR_TMOUT
= 0x1d04,
161 MV_PCI_ERR_LOW_ADDRESS
= 0x1d40,
162 MV_PCI_ERR_HIGH_ADDRESS
= 0x1d44,
163 MV_PCI_ERR_ATTRIBUTE
= 0x1d48,
164 MV_PCI_ERR_COMMAND
= 0x1d50,
166 PCI_IRQ_CAUSE_OFS
= 0x1d58,
167 PCI_IRQ_MASK_OFS
= 0x1d5c,
168 PCI_UNMASK_ALL_IRQS
= 0x7fffff, /* bits 22-0 */
170 HC_MAIN_IRQ_CAUSE_OFS
= 0x1d60,
171 HC_MAIN_IRQ_MASK_OFS
= 0x1d64,
172 PORT0_ERR
= (1 << 0), /* shift by port # */
173 PORT0_DONE
= (1 << 1), /* shift by port # */
174 HC0_IRQ_PEND
= 0x1ff, /* bits 0-8 = HC0's ports */
175 HC_SHIFT
= 9, /* bits 9-17 = HC1's ports */
177 TRAN_LO_DONE
= (1 << 19), /* 6xxx: IRQ coalescing */
178 TRAN_HI_DONE
= (1 << 20), /* 6xxx: IRQ coalescing */
179 PORTS_0_3_COAL_DONE
= (1 << 8),
180 PORTS_4_7_COAL_DONE
= (1 << 17),
181 PORTS_0_7_COAL_DONE
= (1 << 21), /* 6xxx: IRQ coalescing */
182 GPIO_INT
= (1 << 22),
183 SELF_INT
= (1 << 23),
184 TWSI_INT
= (1 << 24),
185 HC_MAIN_RSVD
= (0x7f << 25), /* bits 31-25 */
186 HC_MAIN_RSVD_5
= (0x1fff << 19), /* bits 31-19 */
187 HC_MAIN_MASKED_IRQS
= (TRAN_LO_DONE
| TRAN_HI_DONE
|
188 PORTS_0_7_COAL_DONE
| GPIO_INT
| TWSI_INT
|
190 HC_MAIN_MASKED_IRQS_5
= (PORTS_0_3_COAL_DONE
| PORTS_4_7_COAL_DONE
|
193 /* SATAHC registers */
196 HC_IRQ_CAUSE_OFS
= 0x14,
197 CRPB_DMA_DONE
= (1 << 0), /* shift by port # */
198 HC_IRQ_COAL
= (1 << 4), /* IRQ coalescing */
199 DEV_IRQ
= (1 << 8), /* shift by port # */
201 /* Shadow block registers */
203 SHD_CTL_AST_OFS
= 0x20, /* ofs from SHD_BLK_OFS */
206 SATA_STATUS_OFS
= 0x300, /* ctrl, err regs follow status */
207 SATA_ACTIVE_OFS
= 0x350,
214 SATA_INTERFACE_CTL
= 0x050,
216 MV_M2_PREAMP_MASK
= 0x7e0,
220 EDMA_CFG_Q_DEPTH
= 0, /* queueing disabled */
221 EDMA_CFG_NCQ
= (1 << 5),
222 EDMA_CFG_NCQ_GO_ON_ERR
= (1 << 14), /* continue on error */
223 EDMA_CFG_RD_BRST_EXT
= (1 << 11), /* read burst 512B */
224 EDMA_CFG_WR_BUFF_LEN
= (1 << 13), /* write buffer 512B */
226 EDMA_ERR_IRQ_CAUSE_OFS
= 0x8,
227 EDMA_ERR_IRQ_MASK_OFS
= 0xc,
228 EDMA_ERR_D_PAR
= (1 << 0), /* UDMA data parity err */
229 EDMA_ERR_PRD_PAR
= (1 << 1), /* UDMA PRD parity err */
230 EDMA_ERR_DEV
= (1 << 2), /* device error */
231 EDMA_ERR_DEV_DCON
= (1 << 3), /* device disconnect */
232 EDMA_ERR_DEV_CON
= (1 << 4), /* device connected */
233 EDMA_ERR_SERR
= (1 << 5), /* SError bits [WBDST] raised */
234 EDMA_ERR_SELF_DIS
= (1 << 7), /* Gen II/IIE self-disable */
235 EDMA_ERR_SELF_DIS_5
= (1 << 8), /* Gen I self-disable */
236 EDMA_ERR_BIST_ASYNC
= (1 << 8), /* BIST FIS or Async Notify */
237 EDMA_ERR_TRANS_IRQ_7
= (1 << 8), /* Gen IIE transprt layer irq */
238 EDMA_ERR_CRQB_PAR
= (1 << 9), /* CRQB parity error */
239 EDMA_ERR_CRPB_PAR
= (1 << 10), /* CRPB parity error */
240 EDMA_ERR_INTRL_PAR
= (1 << 11), /* internal parity error */
241 EDMA_ERR_IORDY
= (1 << 12), /* IORdy timeout */
242 EDMA_ERR_LNK_CTRL_RX
= (0xf << 13), /* link ctrl rx error */
243 EDMA_ERR_LNK_CTRL_RX_2
= (1 << 15),
244 EDMA_ERR_LNK_DATA_RX
= (0xf << 17), /* link data rx error */
245 EDMA_ERR_LNK_CTRL_TX
= (0x1f << 21), /* link ctrl tx error */
246 EDMA_ERR_LNK_DATA_TX
= (0x1f << 26), /* link data tx error */
247 EDMA_ERR_TRANS_PROTO
= (1 << 31), /* transport protocol error */
248 EDMA_ERR_OVERRUN_5
= (1 << 5),
249 EDMA_ERR_UNDERRUN_5
= (1 << 6),
250 EDMA_EH_FREEZE
= EDMA_ERR_D_PAR
|
260 EDMA_ERR_LNK_CTRL_RX_2
|
261 EDMA_ERR_LNK_DATA_RX
|
262 EDMA_ERR_LNK_DATA_TX
|
263 EDMA_ERR_TRANS_PROTO
,
264 EDMA_EH_FREEZE_5
= EDMA_ERR_D_PAR
|
269 EDMA_ERR_UNDERRUN_5
|
270 EDMA_ERR_SELF_DIS_5
|
276 EDMA_REQ_Q_BASE_HI_OFS
= 0x10,
277 EDMA_REQ_Q_IN_PTR_OFS
= 0x14, /* also contains BASE_LO */
279 EDMA_REQ_Q_OUT_PTR_OFS
= 0x18,
280 EDMA_REQ_Q_PTR_SHIFT
= 5,
282 EDMA_RSP_Q_BASE_HI_OFS
= 0x1c,
283 EDMA_RSP_Q_IN_PTR_OFS
= 0x20,
284 EDMA_RSP_Q_OUT_PTR_OFS
= 0x24, /* also contains BASE_LO */
285 EDMA_RSP_Q_PTR_SHIFT
= 3,
287 EDMA_CMD_OFS
= 0x28, /* EDMA command register */
288 EDMA_EN
= (1 << 0), /* enable EDMA */
289 EDMA_DS
= (1 << 1), /* disable EDMA; self-negated */
290 ATA_RST
= (1 << 2), /* reset trans/link/phy */
292 EDMA_IORDY_TMOUT
= 0x34,
295 /* Host private flags (hp_flags) */
296 MV_HP_FLAG_MSI
= (1 << 0),
297 MV_HP_ERRATA_50XXB0
= (1 << 1),
298 MV_HP_ERRATA_50XXB2
= (1 << 2),
299 MV_HP_ERRATA_60X1B2
= (1 << 3),
300 MV_HP_ERRATA_60X1C0
= (1 << 4),
301 MV_HP_ERRATA_XX42A0
= (1 << 5),
302 MV_HP_GEN_I
= (1 << 6), /* Generation I: 50xx */
303 MV_HP_GEN_II
= (1 << 7), /* Generation II: 60xx */
304 MV_HP_GEN_IIE
= (1 << 8), /* Generation IIE: 6042/7042 */
306 /* Port private flags (pp_flags) */
307 MV_PP_FLAG_EDMA_EN
= (1 << 0), /* is EDMA engine enabled? */
308 MV_PP_FLAG_HAD_A_RESET
= (1 << 2), /* 1st hard reset complete? */
311 #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
312 #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
313 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
316 /* DMA boundary 0xffff is required by the s/g splitting
317 * we need on /length/ in mv_fill-sg().
319 MV_DMA_BOUNDARY
= 0xffffU
,
321 /* mask of register bits containing lower 32 bits
322 * of EDMA request queue DMA address
324 EDMA_REQ_Q_BASE_LO_MASK
= 0xfffffc00U
,
326 /* ditto, for response queue */
327 EDMA_RSP_Q_BASE_LO_MASK
= 0xffffff00U
,
340 /* Command ReQuest Block: 32B */
356 /* Command ResPonse Block: 8B */
363 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
371 struct mv_port_priv
{
372 struct mv_crqb
*crqb
;
374 struct mv_crpb
*crpb
;
376 struct mv_sg
*sg_tbl
;
377 dma_addr_t sg_tbl_dma
;
379 unsigned int req_idx
;
380 unsigned int resp_idx
;
385 struct mv_port_signal
{
392 void (*phy_errata
)(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
394 void (*enable_leds
)(struct mv_host_priv
*hpriv
, void __iomem
*mmio
);
395 void (*read_preamp
)(struct mv_host_priv
*hpriv
, int idx
,
397 int (*reset_hc
)(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
399 void (*reset_flash
)(struct mv_host_priv
*hpriv
, void __iomem
*mmio
);
400 void (*reset_bus
)(struct pci_dev
*pdev
, void __iomem
*mmio
);
403 struct mv_host_priv
{
405 struct mv_port_signal signal
[8];
406 const struct mv_hw_ops
*ops
;
409 static void mv_irq_clear(struct ata_port
*ap
);
410 static int mv_scr_read(struct ata_port
*ap
, unsigned int sc_reg_in
, u32
*val
);
411 static int mv_scr_write(struct ata_port
*ap
, unsigned int sc_reg_in
, u32 val
);
412 static int mv5_scr_read(struct ata_port
*ap
, unsigned int sc_reg_in
, u32
*val
);
413 static int mv5_scr_write(struct ata_port
*ap
, unsigned int sc_reg_in
, u32 val
);
414 static int mv_port_start(struct ata_port
*ap
);
415 static void mv_port_stop(struct ata_port
*ap
);
416 static void mv_qc_prep(struct ata_queued_cmd
*qc
);
417 static void mv_qc_prep_iie(struct ata_queued_cmd
*qc
);
418 static unsigned int mv_qc_issue(struct ata_queued_cmd
*qc
);
419 static void mv_error_handler(struct ata_port
*ap
);
420 static void mv_post_int_cmd(struct ata_queued_cmd
*qc
);
421 static void mv_eh_freeze(struct ata_port
*ap
);
422 static void mv_eh_thaw(struct ata_port
*ap
);
423 static int mv_init_one(struct pci_dev
*pdev
, const struct pci_device_id
*ent
);
425 static void mv5_phy_errata(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
427 static void mv5_enable_leds(struct mv_host_priv
*hpriv
, void __iomem
*mmio
);
428 static void mv5_read_preamp(struct mv_host_priv
*hpriv
, int idx
,
430 static int mv5_reset_hc(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
432 static void mv5_reset_flash(struct mv_host_priv
*hpriv
, void __iomem
*mmio
);
433 static void mv5_reset_bus(struct pci_dev
*pdev
, void __iomem
*mmio
);
435 static void mv6_phy_errata(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
437 static void mv6_enable_leds(struct mv_host_priv
*hpriv
, void __iomem
*mmio
);
438 static void mv6_read_preamp(struct mv_host_priv
*hpriv
, int idx
,
440 static int mv6_reset_hc(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
442 static void mv6_reset_flash(struct mv_host_priv
*hpriv
, void __iomem
*mmio
);
443 static void mv_reset_pci_bus(struct pci_dev
*pdev
, void __iomem
*mmio
);
444 static void mv_channel_reset(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
445 unsigned int port_no
);
447 static struct scsi_host_template mv5_sht
= {
448 .module
= THIS_MODULE
,
450 .ioctl
= ata_scsi_ioctl
,
451 .queuecommand
= ata_scsi_queuecmd
,
452 .can_queue
= ATA_DEF_QUEUE
,
453 .this_id
= ATA_SHT_THIS_ID
,
454 .sg_tablesize
= MV_MAX_SG_CT
/ 2,
455 .cmd_per_lun
= ATA_SHT_CMD_PER_LUN
,
456 .emulated
= ATA_SHT_EMULATED
,
458 .proc_name
= DRV_NAME
,
459 .dma_boundary
= MV_DMA_BOUNDARY
,
460 .slave_configure
= ata_scsi_slave_config
,
461 .slave_destroy
= ata_scsi_slave_destroy
,
462 .bios_param
= ata_std_bios_param
,
465 static struct scsi_host_template mv6_sht
= {
466 .module
= THIS_MODULE
,
468 .ioctl
= ata_scsi_ioctl
,
469 .queuecommand
= ata_scsi_queuecmd
,
470 .can_queue
= ATA_DEF_QUEUE
,
471 .this_id
= ATA_SHT_THIS_ID
,
472 .sg_tablesize
= MV_MAX_SG_CT
/ 2,
473 .cmd_per_lun
= ATA_SHT_CMD_PER_LUN
,
474 .emulated
= ATA_SHT_EMULATED
,
476 .proc_name
= DRV_NAME
,
477 .dma_boundary
= MV_DMA_BOUNDARY
,
478 .slave_configure
= ata_scsi_slave_config
,
479 .slave_destroy
= ata_scsi_slave_destroy
,
480 .bios_param
= ata_std_bios_param
,
483 static const struct ata_port_operations mv5_ops
= {
484 .port_disable
= ata_port_disable
,
486 .tf_load
= ata_tf_load
,
487 .tf_read
= ata_tf_read
,
488 .check_status
= ata_check_status
,
489 .exec_command
= ata_exec_command
,
490 .dev_select
= ata_std_dev_select
,
492 .cable_detect
= ata_cable_sata
,
494 .qc_prep
= mv_qc_prep
,
495 .qc_issue
= mv_qc_issue
,
496 .data_xfer
= ata_data_xfer
,
498 .irq_clear
= mv_irq_clear
,
499 .irq_on
= ata_irq_on
,
500 .irq_ack
= ata_irq_ack
,
502 .error_handler
= mv_error_handler
,
503 .post_internal_cmd
= mv_post_int_cmd
,
504 .freeze
= mv_eh_freeze
,
507 .scr_read
= mv5_scr_read
,
508 .scr_write
= mv5_scr_write
,
510 .port_start
= mv_port_start
,
511 .port_stop
= mv_port_stop
,
514 static const struct ata_port_operations mv6_ops
= {
515 .port_disable
= ata_port_disable
,
517 .tf_load
= ata_tf_load
,
518 .tf_read
= ata_tf_read
,
519 .check_status
= ata_check_status
,
520 .exec_command
= ata_exec_command
,
521 .dev_select
= ata_std_dev_select
,
523 .cable_detect
= ata_cable_sata
,
525 .qc_prep
= mv_qc_prep
,
526 .qc_issue
= mv_qc_issue
,
527 .data_xfer
= ata_data_xfer
,
529 .irq_clear
= mv_irq_clear
,
530 .irq_on
= ata_irq_on
,
531 .irq_ack
= ata_irq_ack
,
533 .error_handler
= mv_error_handler
,
534 .post_internal_cmd
= mv_post_int_cmd
,
535 .freeze
= mv_eh_freeze
,
538 .scr_read
= mv_scr_read
,
539 .scr_write
= mv_scr_write
,
541 .port_start
= mv_port_start
,
542 .port_stop
= mv_port_stop
,
545 static const struct ata_port_operations mv_iie_ops
= {
546 .port_disable
= ata_port_disable
,
548 .tf_load
= ata_tf_load
,
549 .tf_read
= ata_tf_read
,
550 .check_status
= ata_check_status
,
551 .exec_command
= ata_exec_command
,
552 .dev_select
= ata_std_dev_select
,
554 .cable_detect
= ata_cable_sata
,
556 .qc_prep
= mv_qc_prep_iie
,
557 .qc_issue
= mv_qc_issue
,
558 .data_xfer
= ata_data_xfer
,
560 .irq_clear
= mv_irq_clear
,
561 .irq_on
= ata_irq_on
,
562 .irq_ack
= ata_irq_ack
,
564 .error_handler
= mv_error_handler
,
565 .post_internal_cmd
= mv_post_int_cmd
,
566 .freeze
= mv_eh_freeze
,
569 .scr_read
= mv_scr_read
,
570 .scr_write
= mv_scr_write
,
572 .port_start
= mv_port_start
,
573 .port_stop
= mv_port_stop
,
576 static const struct ata_port_info mv_port_info
[] = {
578 .flags
= MV_COMMON_FLAGS
,
579 .pio_mask
= 0x1f, /* pio0-4 */
580 .udma_mask
= ATA_UDMA6
,
581 .port_ops
= &mv5_ops
,
584 .flags
= MV_COMMON_FLAGS
| MV_FLAG_DUAL_HC
,
585 .pio_mask
= 0x1f, /* pio0-4 */
586 .udma_mask
= ATA_UDMA6
,
587 .port_ops
= &mv5_ops
,
590 .flags
= MV_COMMON_FLAGS
| MV_FLAG_DUAL_HC
,
591 .pio_mask
= 0x1f, /* pio0-4 */
592 .udma_mask
= ATA_UDMA6
,
593 .port_ops
= &mv5_ops
,
596 .flags
= MV_COMMON_FLAGS
| MV_6XXX_FLAGS
,
597 .pio_mask
= 0x1f, /* pio0-4 */
598 .udma_mask
= ATA_UDMA6
,
599 .port_ops
= &mv6_ops
,
602 .flags
= MV_COMMON_FLAGS
| MV_6XXX_FLAGS
|
604 .pio_mask
= 0x1f, /* pio0-4 */
605 .udma_mask
= ATA_UDMA6
,
606 .port_ops
= &mv6_ops
,
609 .flags
= MV_COMMON_FLAGS
| MV_6XXX_FLAGS
,
610 .pio_mask
= 0x1f, /* pio0-4 */
611 .udma_mask
= ATA_UDMA6
,
612 .port_ops
= &mv_iie_ops
,
615 .flags
= MV_COMMON_FLAGS
| MV_6XXX_FLAGS
,
616 .pio_mask
= 0x1f, /* pio0-4 */
617 .udma_mask
= ATA_UDMA6
,
618 .port_ops
= &mv_iie_ops
,
622 static const struct pci_device_id mv_pci_tbl
[] = {
623 { PCI_VDEVICE(MARVELL
, 0x5040), chip_504x
},
624 { PCI_VDEVICE(MARVELL
, 0x5041), chip_504x
},
625 { PCI_VDEVICE(MARVELL
, 0x5080), chip_5080
},
626 { PCI_VDEVICE(MARVELL
, 0x5081), chip_508x
},
627 /* RocketRAID 1740/174x have different identifiers */
628 { PCI_VDEVICE(TTI
, 0x1740), chip_508x
},
629 { PCI_VDEVICE(TTI
, 0x1742), chip_508x
},
631 { PCI_VDEVICE(MARVELL
, 0x6040), chip_604x
},
632 { PCI_VDEVICE(MARVELL
, 0x6041), chip_604x
},
633 { PCI_VDEVICE(MARVELL
, 0x6042), chip_6042
},
634 { PCI_VDEVICE(MARVELL
, 0x6080), chip_608x
},
635 { PCI_VDEVICE(MARVELL
, 0x6081), chip_608x
},
637 { PCI_VDEVICE(ADAPTEC2
, 0x0241), chip_604x
},
640 { PCI_VDEVICE(ADAPTEC2
, 0x0243), chip_7042
},
642 { PCI_VDEVICE(TTI
, 0x2310), chip_7042
},
644 /* add Marvell 7042 support */
645 { PCI_VDEVICE(MARVELL
, 0x7042), chip_7042
},
647 { } /* terminate list */
650 static struct pci_driver mv_pci_driver
= {
652 .id_table
= mv_pci_tbl
,
653 .probe
= mv_init_one
,
654 .remove
= ata_pci_remove_one
,
657 static const struct mv_hw_ops mv5xxx_ops
= {
658 .phy_errata
= mv5_phy_errata
,
659 .enable_leds
= mv5_enable_leds
,
660 .read_preamp
= mv5_read_preamp
,
661 .reset_hc
= mv5_reset_hc
,
662 .reset_flash
= mv5_reset_flash
,
663 .reset_bus
= mv5_reset_bus
,
666 static const struct mv_hw_ops mv6xxx_ops
= {
667 .phy_errata
= mv6_phy_errata
,
668 .enable_leds
= mv6_enable_leds
,
669 .read_preamp
= mv6_read_preamp
,
670 .reset_hc
= mv6_reset_hc
,
671 .reset_flash
= mv6_reset_flash
,
672 .reset_bus
= mv_reset_pci_bus
,
678 static int msi
; /* Use PCI msi; either zero (off, default) or non-zero */
681 /* move to PCI layer or libata core? */
682 static int pci_go_64(struct pci_dev
*pdev
)
686 if (!pci_set_dma_mask(pdev
, DMA_64BIT_MASK
)) {
687 rc
= pci_set_consistent_dma_mask(pdev
, DMA_64BIT_MASK
);
689 rc
= pci_set_consistent_dma_mask(pdev
, DMA_32BIT_MASK
);
691 dev_printk(KERN_ERR
, &pdev
->dev
,
692 "64-bit DMA enable failed\n");
697 rc
= pci_set_dma_mask(pdev
, DMA_32BIT_MASK
);
699 dev_printk(KERN_ERR
, &pdev
->dev
,
700 "32-bit DMA enable failed\n");
703 rc
= pci_set_consistent_dma_mask(pdev
, DMA_32BIT_MASK
);
705 dev_printk(KERN_ERR
, &pdev
->dev
,
706 "32-bit consistent DMA enable failed\n");
718 static inline void writelfl(unsigned long data
, void __iomem
*addr
)
721 (void) readl(addr
); /* flush to avoid PCI posted write */
724 static inline void __iomem
*mv_hc_base(void __iomem
*base
, unsigned int hc
)
726 return (base
+ MV_SATAHC0_REG_BASE
+ (hc
* MV_SATAHC_REG_SZ
));
729 static inline unsigned int mv_hc_from_port(unsigned int port
)
731 return port
>> MV_PORT_HC_SHIFT
;
734 static inline unsigned int mv_hardport_from_port(unsigned int port
)
736 return port
& MV_PORT_MASK
;
739 static inline void __iomem
*mv_hc_base_from_port(void __iomem
*base
,
742 return mv_hc_base(base
, mv_hc_from_port(port
));
745 static inline void __iomem
*mv_port_base(void __iomem
*base
, unsigned int port
)
747 return mv_hc_base_from_port(base
, port
) +
748 MV_SATAHC_ARBTR_REG_SZ
+
749 (mv_hardport_from_port(port
) * MV_PORT_REG_SZ
);
752 static inline void __iomem
*mv_ap_base(struct ata_port
*ap
)
754 return mv_port_base(ap
->host
->iomap
[MV_PRIMARY_BAR
], ap
->port_no
);
757 static inline int mv_get_hc_count(unsigned long port_flags
)
759 return ((port_flags
& MV_FLAG_DUAL_HC
) ? 2 : 1);
762 static void mv_irq_clear(struct ata_port
*ap
)
766 static void mv_set_edma_ptrs(void __iomem
*port_mmio
,
767 struct mv_host_priv
*hpriv
,
768 struct mv_port_priv
*pp
)
773 * initialize request queue
775 index
= (pp
->req_idx
& MV_MAX_Q_DEPTH_MASK
) << EDMA_REQ_Q_PTR_SHIFT
;
777 WARN_ON(pp
->crqb_dma
& 0x3ff);
778 writel((pp
->crqb_dma
>> 16) >> 16, port_mmio
+ EDMA_REQ_Q_BASE_HI_OFS
);
779 writelfl((pp
->crqb_dma
& EDMA_REQ_Q_BASE_LO_MASK
) | index
,
780 port_mmio
+ EDMA_REQ_Q_IN_PTR_OFS
);
782 if (hpriv
->hp_flags
& MV_HP_ERRATA_XX42A0
)
783 writelfl((pp
->crqb_dma
& 0xffffffff) | index
,
784 port_mmio
+ EDMA_REQ_Q_OUT_PTR_OFS
);
786 writelfl(index
, port_mmio
+ EDMA_REQ_Q_OUT_PTR_OFS
);
789 * initialize response queue
791 index
= (pp
->resp_idx
& MV_MAX_Q_DEPTH_MASK
) << EDMA_RSP_Q_PTR_SHIFT
;
793 WARN_ON(pp
->crpb_dma
& 0xff);
794 writel((pp
->crpb_dma
>> 16) >> 16, port_mmio
+ EDMA_RSP_Q_BASE_HI_OFS
);
796 if (hpriv
->hp_flags
& MV_HP_ERRATA_XX42A0
)
797 writelfl((pp
->crpb_dma
& 0xffffffff) | index
,
798 port_mmio
+ EDMA_RSP_Q_IN_PTR_OFS
);
800 writelfl(index
, port_mmio
+ EDMA_RSP_Q_IN_PTR_OFS
);
802 writelfl((pp
->crpb_dma
& EDMA_RSP_Q_BASE_LO_MASK
) | index
,
803 port_mmio
+ EDMA_RSP_Q_OUT_PTR_OFS
);
807 * mv_start_dma - Enable eDMA engine
808 * @base: port base address
809 * @pp: port private data
811 * Verify the local cache of the eDMA state is accurate with a
815 * Inherited from caller.
817 static void mv_start_dma(void __iomem
*base
, struct mv_host_priv
*hpriv
,
818 struct mv_port_priv
*pp
)
820 if (!(pp
->pp_flags
& MV_PP_FLAG_EDMA_EN
)) {
821 /* clear EDMA event indicators, if any */
822 writelfl(0, base
+ EDMA_ERR_IRQ_CAUSE_OFS
);
824 mv_set_edma_ptrs(base
, hpriv
, pp
);
826 writelfl(EDMA_EN
, base
+ EDMA_CMD_OFS
);
827 pp
->pp_flags
|= MV_PP_FLAG_EDMA_EN
;
829 WARN_ON(!(EDMA_EN
& readl(base
+ EDMA_CMD_OFS
)));
833 * __mv_stop_dma - Disable eDMA engine
834 * @ap: ATA channel to manipulate
836 * Verify the local cache of the eDMA state is accurate with a
840 * Inherited from caller.
842 static int __mv_stop_dma(struct ata_port
*ap
)
844 void __iomem
*port_mmio
= mv_ap_base(ap
);
845 struct mv_port_priv
*pp
= ap
->private_data
;
849 if (pp
->pp_flags
& MV_PP_FLAG_EDMA_EN
) {
850 /* Disable EDMA if active. The disable bit auto clears.
852 writelfl(EDMA_DS
, port_mmio
+ EDMA_CMD_OFS
);
853 pp
->pp_flags
&= ~MV_PP_FLAG_EDMA_EN
;
855 WARN_ON(EDMA_EN
& readl(port_mmio
+ EDMA_CMD_OFS
));
858 /* now properly wait for the eDMA to stop */
859 for (i
= 1000; i
> 0; i
--) {
860 reg
= readl(port_mmio
+ EDMA_CMD_OFS
);
861 if (!(reg
& EDMA_EN
))
868 ata_port_printk(ap
, KERN_ERR
, "Unable to stop eDMA\n");
875 static int mv_stop_dma(struct ata_port
*ap
)
880 spin_lock_irqsave(&ap
->host
->lock
, flags
);
881 rc
= __mv_stop_dma(ap
);
882 spin_unlock_irqrestore(&ap
->host
->lock
, flags
);
888 static void mv_dump_mem(void __iomem
*start
, unsigned bytes
)
891 for (b
= 0; b
< bytes
; ) {
892 DPRINTK("%p: ", start
+ b
);
893 for (w
= 0; b
< bytes
&& w
< 4; w
++) {
894 printk("%08x ",readl(start
+ b
));
902 static void mv_dump_pci_cfg(struct pci_dev
*pdev
, unsigned bytes
)
907 for (b
= 0; b
< bytes
; ) {
908 DPRINTK("%02x: ", b
);
909 for (w
= 0; b
< bytes
&& w
< 4; w
++) {
910 (void) pci_read_config_dword(pdev
,b
,&dw
);
918 static void mv_dump_all_regs(void __iomem
*mmio_base
, int port
,
919 struct pci_dev
*pdev
)
922 void __iomem
*hc_base
= mv_hc_base(mmio_base
,
923 port
>> MV_PORT_HC_SHIFT
);
924 void __iomem
*port_base
;
925 int start_port
, num_ports
, p
, start_hc
, num_hcs
, hc
;
928 start_hc
= start_port
= 0;
929 num_ports
= 8; /* shld be benign for 4 port devs */
932 start_hc
= port
>> MV_PORT_HC_SHIFT
;
934 num_ports
= num_hcs
= 1;
936 DPRINTK("All registers for port(s) %u-%u:\n", start_port
,
937 num_ports
> 1 ? num_ports
- 1 : start_port
);
940 DPRINTK("PCI config space regs:\n");
941 mv_dump_pci_cfg(pdev
, 0x68);
943 DPRINTK("PCI regs:\n");
944 mv_dump_mem(mmio_base
+0xc00, 0x3c);
945 mv_dump_mem(mmio_base
+0xd00, 0x34);
946 mv_dump_mem(mmio_base
+0xf00, 0x4);
947 mv_dump_mem(mmio_base
+0x1d00, 0x6c);
948 for (hc
= start_hc
; hc
< start_hc
+ num_hcs
; hc
++) {
949 hc_base
= mv_hc_base(mmio_base
, hc
);
950 DPRINTK("HC regs (HC %i):\n", hc
);
951 mv_dump_mem(hc_base
, 0x1c);
953 for (p
= start_port
; p
< start_port
+ num_ports
; p
++) {
954 port_base
= mv_port_base(mmio_base
, p
);
955 DPRINTK("EDMA regs (port %i):\n",p
);
956 mv_dump_mem(port_base
, 0x54);
957 DPRINTK("SATA regs (port %i):\n",p
);
958 mv_dump_mem(port_base
+0x300, 0x60);
963 static unsigned int mv_scr_offset(unsigned int sc_reg_in
)
971 ofs
= SATA_STATUS_OFS
+ (sc_reg_in
* sizeof(u32
));
974 ofs
= SATA_ACTIVE_OFS
; /* active is not with the others */
983 static int mv_scr_read(struct ata_port
*ap
, unsigned int sc_reg_in
, u32
*val
)
985 unsigned int ofs
= mv_scr_offset(sc_reg_in
);
987 if (ofs
!= 0xffffffffU
) {
988 *val
= readl(mv_ap_base(ap
) + ofs
);
994 static int mv_scr_write(struct ata_port
*ap
, unsigned int sc_reg_in
, u32 val
)
996 unsigned int ofs
= mv_scr_offset(sc_reg_in
);
998 if (ofs
!= 0xffffffffU
) {
999 writelfl(val
, mv_ap_base(ap
) + ofs
);
1005 static void mv_edma_cfg(struct ata_port
*ap
, struct mv_host_priv
*hpriv
,
1006 void __iomem
*port_mmio
)
1008 u32 cfg
= readl(port_mmio
+ EDMA_CFG_OFS
);
1010 /* set up non-NCQ EDMA configuration */
1011 cfg
&= ~(1 << 9); /* disable eQue */
1013 if (IS_GEN_I(hpriv
)) {
1014 cfg
&= ~0x1f; /* clear queue depth */
1015 cfg
|= (1 << 8); /* enab config burst size mask */
1018 else if (IS_GEN_II(hpriv
)) {
1019 cfg
&= ~0x1f; /* clear queue depth */
1020 cfg
|= EDMA_CFG_RD_BRST_EXT
| EDMA_CFG_WR_BUFF_LEN
;
1021 cfg
&= ~(EDMA_CFG_NCQ
| EDMA_CFG_NCQ_GO_ON_ERR
); /* clear NCQ */
1024 else if (IS_GEN_IIE(hpriv
)) {
1025 cfg
|= (1 << 23); /* do not mask PM field in rx'd FIS */
1026 cfg
|= (1 << 22); /* enab 4-entry host queue cache */
1027 cfg
&= ~(1 << 19); /* dis 128-entry queue (for now?) */
1028 cfg
|= (1 << 18); /* enab early completion */
1029 cfg
|= (1 << 17); /* enab cut-through (dis stor&forwrd) */
1030 cfg
&= ~(1 << 16); /* dis FIS-based switching (for now) */
1031 cfg
&= ~(EDMA_CFG_NCQ
); /* clear NCQ */
1034 writelfl(cfg
, port_mmio
+ EDMA_CFG_OFS
);
1038 * mv_port_start - Port specific init/start routine.
1039 * @ap: ATA channel to manipulate
1041 * Allocate and point to DMA memory, init port private memory,
1045 * Inherited from caller.
1047 static int mv_port_start(struct ata_port
*ap
)
1049 struct device
*dev
= ap
->host
->dev
;
1050 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
1051 struct mv_port_priv
*pp
;
1052 void __iomem
*port_mmio
= mv_ap_base(ap
);
1055 unsigned long flags
;
1058 pp
= devm_kzalloc(dev
, sizeof(*pp
), GFP_KERNEL
);
1062 mem
= dmam_alloc_coherent(dev
, MV_PORT_PRIV_DMA_SZ
, &mem_dma
,
1066 memset(mem
, 0, MV_PORT_PRIV_DMA_SZ
);
1068 rc
= ata_pad_alloc(ap
, dev
);
1072 /* First item in chunk of DMA memory:
1073 * 32-slot command request table (CRQB), 32 bytes each in size
1076 pp
->crqb_dma
= mem_dma
;
1077 mem
+= MV_CRQB_Q_SZ
;
1078 mem_dma
+= MV_CRQB_Q_SZ
;
1081 * 32-slot command response table (CRPB), 8 bytes each in size
1084 pp
->crpb_dma
= mem_dma
;
1085 mem
+= MV_CRPB_Q_SZ
;
1086 mem_dma
+= MV_CRPB_Q_SZ
;
1089 * Table of scatter-gather descriptors (ePRD), 16 bytes each
1092 pp
->sg_tbl_dma
= mem_dma
;
1094 spin_lock_irqsave(&ap
->host
->lock
, flags
);
1096 mv_edma_cfg(ap
, hpriv
, port_mmio
);
1098 mv_set_edma_ptrs(port_mmio
, hpriv
, pp
);
1100 spin_unlock_irqrestore(&ap
->host
->lock
, flags
);
1102 /* Don't turn on EDMA here...do it before DMA commands only. Else
1103 * we'll be unable to send non-data, PIO, etc due to restricted access
1106 ap
->private_data
= pp
;
1111 * mv_port_stop - Port specific cleanup/stop routine.
1112 * @ap: ATA channel to manipulate
1114 * Stop DMA, cleanup port memory.
1117 * This routine uses the host lock to protect the DMA stop.
1119 static void mv_port_stop(struct ata_port
*ap
)
1125 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1126 * @qc: queued command whose SG list to source from
1128 * Populate the SG list and mark the last entry.
1131 * Inherited from caller.
1133 static unsigned int mv_fill_sg(struct ata_queued_cmd
*qc
)
1135 struct mv_port_priv
*pp
= qc
->ap
->private_data
;
1136 unsigned int n_sg
= 0;
1137 struct scatterlist
*sg
;
1138 struct mv_sg
*mv_sg
;
1141 ata_for_each_sg(sg
, qc
) {
1142 dma_addr_t addr
= sg_dma_address(sg
);
1143 u32 sg_len
= sg_dma_len(sg
);
1146 u32 offset
= addr
& 0xffff;
1149 if ((offset
+ sg_len
> 0x10000))
1150 len
= 0x10000 - offset
;
1152 mv_sg
->addr
= cpu_to_le32(addr
& 0xffffffff);
1153 mv_sg
->addr_hi
= cpu_to_le32((addr
>> 16) >> 16);
1154 mv_sg
->flags_size
= cpu_to_le32(len
);
1159 if (!sg_len
&& ata_sg_is_last(sg
, qc
))
1160 mv_sg
->flags_size
|= cpu_to_le32(EPRD_FLAG_END_OF_TBL
);
1171 static inline void mv_crqb_pack_cmd(__le16
*cmdw
, u8 data
, u8 addr
, unsigned last
)
1173 u16 tmp
= data
| (addr
<< CRQB_CMD_ADDR_SHIFT
) | CRQB_CMD_CS
|
1174 (last
? CRQB_CMD_LAST
: 0);
1175 *cmdw
= cpu_to_le16(tmp
);
1179 * mv_qc_prep - Host specific command preparation.
1180 * @qc: queued command to prepare
1182 * This routine simply redirects to the general purpose routine
1183 * if command is not DMA. Else, it handles prep of the CRQB
1184 * (command request block), does some sanity checking, and calls
1185 * the SG load routine.
1188 * Inherited from caller.
1190 static void mv_qc_prep(struct ata_queued_cmd
*qc
)
1192 struct ata_port
*ap
= qc
->ap
;
1193 struct mv_port_priv
*pp
= ap
->private_data
;
1195 struct ata_taskfile
*tf
;
1199 if (qc
->tf
.protocol
!= ATA_PROT_DMA
)
1202 /* Fill in command request block
1204 if (!(qc
->tf
.flags
& ATA_TFLAG_WRITE
))
1205 flags
|= CRQB_FLAG_READ
;
1206 WARN_ON(MV_MAX_Q_DEPTH
<= qc
->tag
);
1207 flags
|= qc
->tag
<< CRQB_TAG_SHIFT
;
1208 flags
|= qc
->tag
<< CRQB_IOID_SHIFT
; /* 50xx appears to ignore this*/
1210 /* get current queue index from software */
1211 in_index
= pp
->req_idx
& MV_MAX_Q_DEPTH_MASK
;
1213 pp
->crqb
[in_index
].sg_addr
=
1214 cpu_to_le32(pp
->sg_tbl_dma
& 0xffffffff);
1215 pp
->crqb
[in_index
].sg_addr_hi
=
1216 cpu_to_le32((pp
->sg_tbl_dma
>> 16) >> 16);
1217 pp
->crqb
[in_index
].ctrl_flags
= cpu_to_le16(flags
);
1219 cw
= &pp
->crqb
[in_index
].ata_cmd
[0];
1222 /* Sadly, the CRQB cannot accomodate all registers--there are
1223 * only 11 bytes...so we must pick and choose required
1224 * registers based on the command. So, we drop feature and
1225 * hob_feature for [RW] DMA commands, but they are needed for
1226 * NCQ. NCQ will drop hob_nsect.
1228 switch (tf
->command
) {
1230 case ATA_CMD_READ_EXT
:
1232 case ATA_CMD_WRITE_EXT
:
1233 case ATA_CMD_WRITE_FUA_EXT
:
1234 mv_crqb_pack_cmd(cw
++, tf
->hob_nsect
, ATA_REG_NSECT
, 0);
1236 #ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
1237 case ATA_CMD_FPDMA_READ
:
1238 case ATA_CMD_FPDMA_WRITE
:
1239 mv_crqb_pack_cmd(cw
++, tf
->hob_feature
, ATA_REG_FEATURE
, 0);
1240 mv_crqb_pack_cmd(cw
++, tf
->feature
, ATA_REG_FEATURE
, 0);
1242 #endif /* FIXME: remove this line when NCQ added */
1244 /* The only other commands EDMA supports in non-queued and
1245 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1246 * of which are defined/used by Linux. If we get here, this
1247 * driver needs work.
1249 * FIXME: modify libata to give qc_prep a return value and
1250 * return error here.
1252 BUG_ON(tf
->command
);
1255 mv_crqb_pack_cmd(cw
++, tf
->nsect
, ATA_REG_NSECT
, 0);
1256 mv_crqb_pack_cmd(cw
++, tf
->hob_lbal
, ATA_REG_LBAL
, 0);
1257 mv_crqb_pack_cmd(cw
++, tf
->lbal
, ATA_REG_LBAL
, 0);
1258 mv_crqb_pack_cmd(cw
++, tf
->hob_lbam
, ATA_REG_LBAM
, 0);
1259 mv_crqb_pack_cmd(cw
++, tf
->lbam
, ATA_REG_LBAM
, 0);
1260 mv_crqb_pack_cmd(cw
++, tf
->hob_lbah
, ATA_REG_LBAH
, 0);
1261 mv_crqb_pack_cmd(cw
++, tf
->lbah
, ATA_REG_LBAH
, 0);
1262 mv_crqb_pack_cmd(cw
++, tf
->device
, ATA_REG_DEVICE
, 0);
1263 mv_crqb_pack_cmd(cw
++, tf
->command
, ATA_REG_CMD
, 1); /* last */
1265 if (!(qc
->flags
& ATA_QCFLAG_DMAMAP
))
1271 * mv_qc_prep_iie - Host specific command preparation.
1272 * @qc: queued command to prepare
1274 * This routine simply redirects to the general purpose routine
1275 * if command is not DMA. Else, it handles prep of the CRQB
1276 * (command request block), does some sanity checking, and calls
1277 * the SG load routine.
1280 * Inherited from caller.
1282 static void mv_qc_prep_iie(struct ata_queued_cmd
*qc
)
1284 struct ata_port
*ap
= qc
->ap
;
1285 struct mv_port_priv
*pp
= ap
->private_data
;
1286 struct mv_crqb_iie
*crqb
;
1287 struct ata_taskfile
*tf
;
1291 if (qc
->tf
.protocol
!= ATA_PROT_DMA
)
1294 /* Fill in Gen IIE command request block
1296 if (!(qc
->tf
.flags
& ATA_TFLAG_WRITE
))
1297 flags
|= CRQB_FLAG_READ
;
1299 WARN_ON(MV_MAX_Q_DEPTH
<= qc
->tag
);
1300 flags
|= qc
->tag
<< CRQB_TAG_SHIFT
;
1301 flags
|= qc
->tag
<< CRQB_IOID_SHIFT
; /* "I/O Id" is -really-
1302 what we use as our tag */
1304 /* get current queue index from software */
1305 in_index
= pp
->req_idx
& MV_MAX_Q_DEPTH_MASK
;
1307 crqb
= (struct mv_crqb_iie
*) &pp
->crqb
[in_index
];
1308 crqb
->addr
= cpu_to_le32(pp
->sg_tbl_dma
& 0xffffffff);
1309 crqb
->addr_hi
= cpu_to_le32((pp
->sg_tbl_dma
>> 16) >> 16);
1310 crqb
->flags
= cpu_to_le32(flags
);
1313 crqb
->ata_cmd
[0] = cpu_to_le32(
1314 (tf
->command
<< 16) |
1317 crqb
->ata_cmd
[1] = cpu_to_le32(
1323 crqb
->ata_cmd
[2] = cpu_to_le32(
1324 (tf
->hob_lbal
<< 0) |
1325 (tf
->hob_lbam
<< 8) |
1326 (tf
->hob_lbah
<< 16) |
1327 (tf
->hob_feature
<< 24)
1329 crqb
->ata_cmd
[3] = cpu_to_le32(
1331 (tf
->hob_nsect
<< 8)
1334 if (!(qc
->flags
& ATA_QCFLAG_DMAMAP
))
1340 * mv_qc_issue - Initiate a command to the host
1341 * @qc: queued command to start
1343 * This routine simply redirects to the general purpose routine
1344 * if command is not DMA. Else, it sanity checks our local
1345 * caches of the request producer/consumer indices then enables
1346 * DMA and bumps the request producer index.
1349 * Inherited from caller.
1351 static unsigned int mv_qc_issue(struct ata_queued_cmd
*qc
)
1353 struct ata_port
*ap
= qc
->ap
;
1354 void __iomem
*port_mmio
= mv_ap_base(ap
);
1355 struct mv_port_priv
*pp
= ap
->private_data
;
1356 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
1359 if (qc
->tf
.protocol
!= ATA_PROT_DMA
) {
1360 /* We're about to send a non-EDMA capable command to the
1361 * port. Turn off EDMA so there won't be problems accessing
1362 * shadow block, etc registers.
1365 return ata_qc_issue_prot(qc
);
1368 mv_start_dma(port_mmio
, hpriv
, pp
);
1370 in_index
= pp
->req_idx
& MV_MAX_Q_DEPTH_MASK
;
1372 /* until we do queuing, the queue should be empty at this point */
1373 WARN_ON(in_index
!= ((readl(port_mmio
+ EDMA_REQ_Q_OUT_PTR_OFS
)
1374 >> EDMA_REQ_Q_PTR_SHIFT
) & MV_MAX_Q_DEPTH_MASK
));
1378 in_index
= (pp
->req_idx
& MV_MAX_Q_DEPTH_MASK
) << EDMA_REQ_Q_PTR_SHIFT
;
1380 /* and write the request in pointer to kick the EDMA to life */
1381 writelfl((pp
->crqb_dma
& EDMA_REQ_Q_BASE_LO_MASK
) | in_index
,
1382 port_mmio
+ EDMA_REQ_Q_IN_PTR_OFS
);
1388 * mv_err_intr - Handle error interrupts on the port
1389 * @ap: ATA channel to manipulate
1390 * @reset_allowed: bool: 0 == don't trigger from reset here
1392 * In most cases, just clear the interrupt and move on. However,
1393 * some cases require an eDMA reset, which is done right before
1394 * the COMRESET in mv_phy_reset(). The SERR case requires a
1395 * clear of pending errors in the SATA SERROR register. Finally,
1396 * if the port disabled DMA, update our cached copy to match.
1399 * Inherited from caller.
1401 static void mv_err_intr(struct ata_port
*ap
, struct ata_queued_cmd
*qc
)
1403 void __iomem
*port_mmio
= mv_ap_base(ap
);
1404 u32 edma_err_cause
, eh_freeze_mask
, serr
= 0;
1405 struct mv_port_priv
*pp
= ap
->private_data
;
1406 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
1407 unsigned int edma_enabled
= (pp
->pp_flags
& MV_PP_FLAG_EDMA_EN
);
1408 unsigned int action
= 0, err_mask
= 0;
1409 struct ata_eh_info
*ehi
= &ap
->eh_info
;
1411 ata_ehi_clear_desc(ehi
);
1413 if (!edma_enabled
) {
1414 /* just a guess: do we need to do this? should we
1415 * expand this, and do it in all cases?
1417 sata_scr_read(ap
, SCR_ERROR
, &serr
);
1418 sata_scr_write_flush(ap
, SCR_ERROR
, serr
);
1421 edma_err_cause
= readl(port_mmio
+ EDMA_ERR_IRQ_CAUSE_OFS
);
1423 ata_ehi_push_desc(ehi
, "edma_err 0x%08x", edma_err_cause
);
1426 * all generations share these EDMA error cause bits
1429 if (edma_err_cause
& EDMA_ERR_DEV
)
1430 err_mask
|= AC_ERR_DEV
;
1431 if (edma_err_cause
& (EDMA_ERR_D_PAR
| EDMA_ERR_PRD_PAR
|
1432 EDMA_ERR_CRQB_PAR
| EDMA_ERR_CRPB_PAR
|
1433 EDMA_ERR_INTRL_PAR
)) {
1434 err_mask
|= AC_ERR_ATA_BUS
;
1435 action
|= ATA_EH_HARDRESET
;
1436 ata_ehi_push_desc(ehi
, "parity error");
1438 if (edma_err_cause
& (EDMA_ERR_DEV_DCON
| EDMA_ERR_DEV_CON
)) {
1439 ata_ehi_hotplugged(ehi
);
1440 ata_ehi_push_desc(ehi
, edma_err_cause
& EDMA_ERR_DEV_DCON
?
1441 "dev disconnect" : "dev connect");
1444 if (IS_GEN_I(hpriv
)) {
1445 eh_freeze_mask
= EDMA_EH_FREEZE_5
;
1447 if (edma_err_cause
& EDMA_ERR_SELF_DIS_5
) {
1448 struct mv_port_priv
*pp
= ap
->private_data
;
1449 pp
->pp_flags
&= ~MV_PP_FLAG_EDMA_EN
;
1450 ata_ehi_push_desc(ehi
, "EDMA self-disable");
1453 eh_freeze_mask
= EDMA_EH_FREEZE
;
1455 if (edma_err_cause
& EDMA_ERR_SELF_DIS
) {
1456 struct mv_port_priv
*pp
= ap
->private_data
;
1457 pp
->pp_flags
&= ~MV_PP_FLAG_EDMA_EN
;
1458 ata_ehi_push_desc(ehi
, "EDMA self-disable");
1461 if (edma_err_cause
& EDMA_ERR_SERR
) {
1462 sata_scr_read(ap
, SCR_ERROR
, &serr
);
1463 sata_scr_write_flush(ap
, SCR_ERROR
, serr
);
1464 err_mask
= AC_ERR_ATA_BUS
;
1465 action
|= ATA_EH_HARDRESET
;
1469 /* Clear EDMA now that SERR cleanup done */
1470 writelfl(0, port_mmio
+ EDMA_ERR_IRQ_CAUSE_OFS
);
1473 err_mask
= AC_ERR_OTHER
;
1474 action
|= ATA_EH_HARDRESET
;
1477 ehi
->serror
|= serr
;
1478 ehi
->action
|= action
;
1481 qc
->err_mask
|= err_mask
;
1483 ehi
->err_mask
|= err_mask
;
1485 if (edma_err_cause
& eh_freeze_mask
)
1486 ata_port_freeze(ap
);
1491 static void mv_intr_pio(struct ata_port
*ap
)
1493 struct ata_queued_cmd
*qc
;
1496 /* ignore spurious intr if drive still BUSY */
1497 ata_status
= readb(ap
->ioaddr
.status_addr
);
1498 if (unlikely(ata_status
& ATA_BUSY
))
1501 /* get active ATA command */
1502 qc
= ata_qc_from_tag(ap
, ap
->active_tag
);
1503 if (unlikely(!qc
)) /* no active tag */
1505 if (qc
->tf
.flags
& ATA_TFLAG_POLLING
) /* polling; we don't own qc */
1508 /* and finally, complete the ATA command */
1509 qc
->err_mask
|= ac_err_mask(ata_status
);
1510 ata_qc_complete(qc
);
1513 static void mv_intr_edma(struct ata_port
*ap
)
1515 void __iomem
*port_mmio
= mv_ap_base(ap
);
1516 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
1517 struct mv_port_priv
*pp
= ap
->private_data
;
1518 struct ata_queued_cmd
*qc
;
1519 u32 out_index
, in_index
;
1520 bool work_done
= false;
1522 /* get h/w response queue pointer */
1523 in_index
= (readl(port_mmio
+ EDMA_RSP_Q_IN_PTR_OFS
)
1524 >> EDMA_RSP_Q_PTR_SHIFT
) & MV_MAX_Q_DEPTH_MASK
;
1530 /* get s/w response queue last-read pointer, and compare */
1531 out_index
= pp
->resp_idx
& MV_MAX_Q_DEPTH_MASK
;
1532 if (in_index
== out_index
)
1535 /* 50xx: get active ATA command */
1536 if (IS_GEN_I(hpriv
))
1537 tag
= ap
->active_tag
;
1539 /* Gen II/IIE: get active ATA command via tag, to enable
1540 * support for queueing. this works transparently for
1541 * queued and non-queued modes.
1543 else if (IS_GEN_II(hpriv
))
1544 tag
= (le16_to_cpu(pp
->crpb
[out_index
].id
)
1545 >> CRPB_IOID_SHIFT_6
) & 0x3f;
1547 else /* IS_GEN_IIE */
1548 tag
= (le16_to_cpu(pp
->crpb
[out_index
].id
)
1549 >> CRPB_IOID_SHIFT_7
) & 0x3f;
1551 qc
= ata_qc_from_tag(ap
, tag
);
1553 /* lower 8 bits of status are EDMA_ERR_IRQ_CAUSE_OFS
1554 * bits (WARNING: might not necessarily be associated
1555 * with this command), which -should- be clear
1558 status
= le16_to_cpu(pp
->crpb
[out_index
].flags
);
1559 if (unlikely(status
& 0xff)) {
1560 mv_err_intr(ap
, qc
);
1564 /* and finally, complete the ATA command */
1567 ac_err_mask(status
>> CRPB_FLAG_STATUS_SHIFT
);
1568 ata_qc_complete(qc
);
1571 /* advance software response queue pointer, to
1572 * indicate (after the loop completes) to hardware
1573 * that we have consumed a response queue entry.
1580 writelfl((pp
->crpb_dma
& EDMA_RSP_Q_BASE_LO_MASK
) |
1581 (out_index
<< EDMA_RSP_Q_PTR_SHIFT
),
1582 port_mmio
+ EDMA_RSP_Q_OUT_PTR_OFS
);
1586 * mv_host_intr - Handle all interrupts on the given host controller
1587 * @host: host specific structure
1588 * @relevant: port error bits relevant to this host controller
1589 * @hc: which host controller we're to look at
1591 * Read then write clear the HC interrupt status then walk each
1592 * port connected to the HC and see if it needs servicing. Port
1593 * success ints are reported in the HC interrupt status reg, the
1594 * port error ints are reported in the higher level main
1595 * interrupt status register and thus are passed in via the
1596 * 'relevant' argument.
1599 * Inherited from caller.
1601 static void mv_host_intr(struct ata_host
*host
, u32 relevant
, unsigned int hc
)
1603 void __iomem
*mmio
= host
->iomap
[MV_PRIMARY_BAR
];
1604 void __iomem
*hc_mmio
= mv_hc_base(mmio
, hc
);
1611 port0
= MV_PORTS_PER_HC
;
1613 /* we'll need the HC success int register in most cases */
1614 hc_irq_cause
= readl(hc_mmio
+ HC_IRQ_CAUSE_OFS
);
1618 writelfl(~hc_irq_cause
, hc_mmio
+ HC_IRQ_CAUSE_OFS
);
1620 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1621 hc
,relevant
,hc_irq_cause
);
1623 for (port
= port0
; port
< port0
+ MV_PORTS_PER_HC
; port
++) {
1624 struct ata_port
*ap
= host
->ports
[port
];
1625 struct mv_port_priv
*pp
= ap
->private_data
;
1626 int have_err_bits
, hard_port
, shift
;
1628 if ((!ap
) || (ap
->flags
& ATA_FLAG_DISABLED
))
1631 shift
= port
<< 1; /* (port * 2) */
1632 if (port
>= MV_PORTS_PER_HC
) {
1633 shift
++; /* skip bit 8 in the HC Main IRQ reg */
1635 have_err_bits
= ((PORT0_ERR
<< shift
) & relevant
);
1637 if (unlikely(have_err_bits
)) {
1638 struct ata_queued_cmd
*qc
;
1640 qc
= ata_qc_from_tag(ap
, ap
->active_tag
);
1641 if (qc
&& (qc
->tf
.flags
& ATA_TFLAG_POLLING
))
1644 mv_err_intr(ap
, qc
);
1648 hard_port
= mv_hardport_from_port(port
); /* range 0..3 */
1650 if (pp
->pp_flags
& MV_PP_FLAG_EDMA_EN
) {
1651 if ((CRPB_DMA_DONE
<< hard_port
) & hc_irq_cause
)
1654 if ((DEV_IRQ
<< hard_port
) & hc_irq_cause
)
1661 static void mv_pci_error(struct ata_host
*host
, void __iomem
*mmio
)
1663 struct ata_port
*ap
;
1664 struct ata_queued_cmd
*qc
;
1665 struct ata_eh_info
*ehi
;
1666 unsigned int i
, err_mask
, printed
= 0;
1669 err_cause
= readl(mmio
+ PCI_IRQ_CAUSE_OFS
);
1671 dev_printk(KERN_ERR
, host
->dev
, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1674 DPRINTK("All regs @ PCI error\n");
1675 mv_dump_all_regs(mmio
, -1, to_pci_dev(host
->dev
));
1677 writelfl(0, mmio
+ PCI_IRQ_CAUSE_OFS
);
1679 for (i
= 0; i
< host
->n_ports
; i
++) {
1680 ap
= host
->ports
[i
];
1681 if (!ata_port_offline(ap
)) {
1683 ata_ehi_clear_desc(ehi
);
1685 ata_ehi_push_desc(ehi
,
1686 "PCI err cause 0x%08x", err_cause
);
1687 err_mask
= AC_ERR_HOST_BUS
;
1688 ehi
->action
= ATA_EH_HARDRESET
;
1689 qc
= ata_qc_from_tag(ap
, ap
->active_tag
);
1691 qc
->err_mask
|= err_mask
;
1693 ehi
->err_mask
|= err_mask
;
1695 ata_port_freeze(ap
);
1701 * mv_interrupt - Main interrupt event handler
1703 * @dev_instance: private data; in this case the host structure
1705 * Read the read only register to determine if any host
1706 * controllers have pending interrupts. If so, call lower level
1707 * routine to handle. Also check for PCI errors which are only
1711 * This routine holds the host lock while processing pending
1714 static irqreturn_t
mv_interrupt(int irq
, void *dev_instance
)
1716 struct ata_host
*host
= dev_instance
;
1717 unsigned int hc
, handled
= 0, n_hcs
;
1718 void __iomem
*mmio
= host
->iomap
[MV_PRIMARY_BAR
];
1721 irq_stat
= readl(mmio
+ HC_MAIN_IRQ_CAUSE_OFS
);
1723 /* check the cases where we either have nothing pending or have read
1724 * a bogus register value which can indicate HW removal or PCI fault
1726 if (!irq_stat
|| (0xffffffffU
== irq_stat
))
1729 n_hcs
= mv_get_hc_count(host
->ports
[0]->flags
);
1730 spin_lock(&host
->lock
);
1732 if (unlikely(irq_stat
& PCI_ERR
)) {
1733 mv_pci_error(host
, mmio
);
1735 goto out_unlock
; /* skip all other HC irq handling */
1738 for (hc
= 0; hc
< n_hcs
; hc
++) {
1739 u32 relevant
= irq_stat
& (HC0_IRQ_PEND
<< (hc
* HC_SHIFT
));
1741 mv_host_intr(host
, relevant
, hc
);
1747 spin_unlock(&host
->lock
);
1749 return IRQ_RETVAL(handled
);
1752 static void __iomem
*mv5_phy_base(void __iomem
*mmio
, unsigned int port
)
1754 void __iomem
*hc_mmio
= mv_hc_base_from_port(mmio
, port
);
1755 unsigned long ofs
= (mv_hardport_from_port(port
) + 1) * 0x100UL
;
1757 return hc_mmio
+ ofs
;
1760 static unsigned int mv5_scr_offset(unsigned int sc_reg_in
)
1764 switch (sc_reg_in
) {
1768 ofs
= sc_reg_in
* sizeof(u32
);
1777 static int mv5_scr_read(struct ata_port
*ap
, unsigned int sc_reg_in
, u32
*val
)
1779 void __iomem
*mmio
= ap
->host
->iomap
[MV_PRIMARY_BAR
];
1780 void __iomem
*addr
= mv5_phy_base(mmio
, ap
->port_no
);
1781 unsigned int ofs
= mv5_scr_offset(sc_reg_in
);
1783 if (ofs
!= 0xffffffffU
) {
1784 *val
= readl(addr
+ ofs
);
1790 static int mv5_scr_write(struct ata_port
*ap
, unsigned int sc_reg_in
, u32 val
)
1792 void __iomem
*mmio
= ap
->host
->iomap
[MV_PRIMARY_BAR
];
1793 void __iomem
*addr
= mv5_phy_base(mmio
, ap
->port_no
);
1794 unsigned int ofs
= mv5_scr_offset(sc_reg_in
);
1796 if (ofs
!= 0xffffffffU
) {
1797 writelfl(val
, addr
+ ofs
);
1803 static void mv5_reset_bus(struct pci_dev
*pdev
, void __iomem
*mmio
)
1807 early_5080
= (pdev
->device
== 0x5080) && (pdev
->revision
== 0);
1810 u32 tmp
= readl(mmio
+ MV_PCI_EXP_ROM_BAR_CTL
);
1812 writel(tmp
, mmio
+ MV_PCI_EXP_ROM_BAR_CTL
);
1815 mv_reset_pci_bus(pdev
, mmio
);
1818 static void mv5_reset_flash(struct mv_host_priv
*hpriv
, void __iomem
*mmio
)
1820 writel(0x0fcfffff, mmio
+ MV_FLASH_CTL
);
1823 static void mv5_read_preamp(struct mv_host_priv
*hpriv
, int idx
,
1826 void __iomem
*phy_mmio
= mv5_phy_base(mmio
, idx
);
1829 tmp
= readl(phy_mmio
+ MV5_PHY_MODE
);
1831 hpriv
->signal
[idx
].pre
= tmp
& 0x1800; /* bits 12:11 */
1832 hpriv
->signal
[idx
].amps
= tmp
& 0xe0; /* bits 7:5 */
1835 static void mv5_enable_leds(struct mv_host_priv
*hpriv
, void __iomem
*mmio
)
1839 writel(0, mmio
+ MV_GPIO_PORT_CTL
);
1841 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1843 tmp
= readl(mmio
+ MV_PCI_EXP_ROM_BAR_CTL
);
1845 writel(tmp
, mmio
+ MV_PCI_EXP_ROM_BAR_CTL
);
1848 static void mv5_phy_errata(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
1851 void __iomem
*phy_mmio
= mv5_phy_base(mmio
, port
);
1852 const u32 mask
= (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1854 int fix_apm_sq
= (hpriv
->hp_flags
& MV_HP_ERRATA_50XXB0
);
1857 tmp
= readl(phy_mmio
+ MV5_LT_MODE
);
1859 writel(tmp
, phy_mmio
+ MV5_LT_MODE
);
1861 tmp
= readl(phy_mmio
+ MV5_PHY_CTL
);
1864 writel(tmp
, phy_mmio
+ MV5_PHY_CTL
);
1867 tmp
= readl(phy_mmio
+ MV5_PHY_MODE
);
1869 tmp
|= hpriv
->signal
[port
].pre
;
1870 tmp
|= hpriv
->signal
[port
].amps
;
1871 writel(tmp
, phy_mmio
+ MV5_PHY_MODE
);
1876 #define ZERO(reg) writel(0, port_mmio + (reg))
1877 static void mv5_reset_hc_port(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
1880 void __iomem
*port_mmio
= mv_port_base(mmio
, port
);
1882 writelfl(EDMA_DS
, port_mmio
+ EDMA_CMD_OFS
);
1884 mv_channel_reset(hpriv
, mmio
, port
);
1886 ZERO(0x028); /* command */
1887 writel(0x11f, port_mmio
+ EDMA_CFG_OFS
);
1888 ZERO(0x004); /* timer */
1889 ZERO(0x008); /* irq err cause */
1890 ZERO(0x00c); /* irq err mask */
1891 ZERO(0x010); /* rq bah */
1892 ZERO(0x014); /* rq inp */
1893 ZERO(0x018); /* rq outp */
1894 ZERO(0x01c); /* respq bah */
1895 ZERO(0x024); /* respq outp */
1896 ZERO(0x020); /* respq inp */
1897 ZERO(0x02c); /* test control */
1898 writel(0xbc, port_mmio
+ EDMA_IORDY_TMOUT
);
1902 #define ZERO(reg) writel(0, hc_mmio + (reg))
1903 static void mv5_reset_one_hc(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
1906 void __iomem
*hc_mmio
= mv_hc_base(mmio
, hc
);
1914 tmp
= readl(hc_mmio
+ 0x20);
1917 writel(tmp
, hc_mmio
+ 0x20);
1921 static int mv5_reset_hc(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
1924 unsigned int hc
, port
;
1926 for (hc
= 0; hc
< n_hc
; hc
++) {
1927 for (port
= 0; port
< MV_PORTS_PER_HC
; port
++)
1928 mv5_reset_hc_port(hpriv
, mmio
,
1929 (hc
* MV_PORTS_PER_HC
) + port
);
1931 mv5_reset_one_hc(hpriv
, mmio
, hc
);
1938 #define ZERO(reg) writel(0, mmio + (reg))
1939 static void mv_reset_pci_bus(struct pci_dev
*pdev
, void __iomem
*mmio
)
1943 tmp
= readl(mmio
+ MV_PCI_MODE
);
1945 writel(tmp
, mmio
+ MV_PCI_MODE
);
1947 ZERO(MV_PCI_DISC_TIMER
);
1948 ZERO(MV_PCI_MSI_TRIGGER
);
1949 writel(0x000100ff, mmio
+ MV_PCI_XBAR_TMOUT
);
1950 ZERO(HC_MAIN_IRQ_MASK_OFS
);
1951 ZERO(MV_PCI_SERR_MASK
);
1952 ZERO(PCI_IRQ_CAUSE_OFS
);
1953 ZERO(PCI_IRQ_MASK_OFS
);
1954 ZERO(MV_PCI_ERR_LOW_ADDRESS
);
1955 ZERO(MV_PCI_ERR_HIGH_ADDRESS
);
1956 ZERO(MV_PCI_ERR_ATTRIBUTE
);
1957 ZERO(MV_PCI_ERR_COMMAND
);
1961 static void mv6_reset_flash(struct mv_host_priv
*hpriv
, void __iomem
*mmio
)
1965 mv5_reset_flash(hpriv
, mmio
);
1967 tmp
= readl(mmio
+ MV_GPIO_PORT_CTL
);
1969 tmp
|= (1 << 5) | (1 << 6);
1970 writel(tmp
, mmio
+ MV_GPIO_PORT_CTL
);
1974 * mv6_reset_hc - Perform the 6xxx global soft reset
1975 * @mmio: base address of the HBA
1977 * This routine only applies to 6xxx parts.
1980 * Inherited from caller.
1982 static int mv6_reset_hc(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
1985 void __iomem
*reg
= mmio
+ PCI_MAIN_CMD_STS_OFS
;
1989 /* Following procedure defined in PCI "main command and status
1993 writel(t
| STOP_PCI_MASTER
, reg
);
1995 for (i
= 0; i
< 1000; i
++) {
1998 if (PCI_MASTER_EMPTY
& t
) {
2002 if (!(PCI_MASTER_EMPTY
& t
)) {
2003 printk(KERN_ERR DRV_NAME
": PCI master won't flush\n");
2011 writel(t
| GLOB_SFT_RST
, reg
);
2014 } while (!(GLOB_SFT_RST
& t
) && (i
-- > 0));
2016 if (!(GLOB_SFT_RST
& t
)) {
2017 printk(KERN_ERR DRV_NAME
": can't set global reset\n");
2022 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2025 writel(t
& ~(GLOB_SFT_RST
| STOP_PCI_MASTER
), reg
);
2028 } while ((GLOB_SFT_RST
& t
) && (i
-- > 0));
2030 if (GLOB_SFT_RST
& t
) {
2031 printk(KERN_ERR DRV_NAME
": can't clear global reset\n");
2038 static void mv6_read_preamp(struct mv_host_priv
*hpriv
, int idx
,
2041 void __iomem
*port_mmio
;
2044 tmp
= readl(mmio
+ MV_RESET_CFG
);
2045 if ((tmp
& (1 << 0)) == 0) {
2046 hpriv
->signal
[idx
].amps
= 0x7 << 8;
2047 hpriv
->signal
[idx
].pre
= 0x1 << 5;
2051 port_mmio
= mv_port_base(mmio
, idx
);
2052 tmp
= readl(port_mmio
+ PHY_MODE2
);
2054 hpriv
->signal
[idx
].amps
= tmp
& 0x700; /* bits 10:8 */
2055 hpriv
->signal
[idx
].pre
= tmp
& 0xe0; /* bits 7:5 */
2058 static void mv6_enable_leds(struct mv_host_priv
*hpriv
, void __iomem
*mmio
)
2060 writel(0x00000060, mmio
+ MV_GPIO_PORT_CTL
);
2063 static void mv6_phy_errata(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
2066 void __iomem
*port_mmio
= mv_port_base(mmio
, port
);
2068 u32 hp_flags
= hpriv
->hp_flags
;
2070 hp_flags
& (MV_HP_ERRATA_60X1B2
| MV_HP_ERRATA_60X1C0
);
2072 hp_flags
& (MV_HP_ERRATA_60X1B2
| MV_HP_ERRATA_60X1C0
);
2075 if (fix_phy_mode2
) {
2076 m2
= readl(port_mmio
+ PHY_MODE2
);
2079 writel(m2
, port_mmio
+ PHY_MODE2
);
2083 m2
= readl(port_mmio
+ PHY_MODE2
);
2084 m2
&= ~((1 << 16) | (1 << 31));
2085 writel(m2
, port_mmio
+ PHY_MODE2
);
2090 /* who knows what this magic does */
2091 tmp
= readl(port_mmio
+ PHY_MODE3
);
2094 writel(tmp
, port_mmio
+ PHY_MODE3
);
2096 if (fix_phy_mode4
) {
2099 m4
= readl(port_mmio
+ PHY_MODE4
);
2101 if (hp_flags
& MV_HP_ERRATA_60X1B2
)
2102 tmp
= readl(port_mmio
+ 0x310);
2104 m4
= (m4
& ~(1 << 1)) | (1 << 0);
2106 writel(m4
, port_mmio
+ PHY_MODE4
);
2108 if (hp_flags
& MV_HP_ERRATA_60X1B2
)
2109 writel(tmp
, port_mmio
+ 0x310);
2112 /* Revert values of pre-emphasis and signal amps to the saved ones */
2113 m2
= readl(port_mmio
+ PHY_MODE2
);
2115 m2
&= ~MV_M2_PREAMP_MASK
;
2116 m2
|= hpriv
->signal
[port
].amps
;
2117 m2
|= hpriv
->signal
[port
].pre
;
2120 /* according to mvSata 3.6.1, some IIE values are fixed */
2121 if (IS_GEN_IIE(hpriv
)) {
2126 writel(m2
, port_mmio
+ PHY_MODE2
);
2129 static void mv_channel_reset(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
2130 unsigned int port_no
)
2132 void __iomem
*port_mmio
= mv_port_base(mmio
, port_no
);
2134 writelfl(ATA_RST
, port_mmio
+ EDMA_CMD_OFS
);
2136 if (IS_GEN_II(hpriv
)) {
2137 u32 ifctl
= readl(port_mmio
+ SATA_INTERFACE_CTL
);
2138 ifctl
|= (1 << 7); /* enable gen2i speed */
2139 ifctl
= (ifctl
& 0xfff) | 0x9b1000; /* from chip spec */
2140 writelfl(ifctl
, port_mmio
+ SATA_INTERFACE_CTL
);
2143 udelay(25); /* allow reset propagation */
2145 /* Spec never mentions clearing the bit. Marvell's driver does
2146 * clear the bit, however.
2148 writelfl(0, port_mmio
+ EDMA_CMD_OFS
);
2150 hpriv
->ops
->phy_errata(hpriv
, mmio
, port_no
);
2152 if (IS_GEN_I(hpriv
))
2157 * mv_phy_reset - Perform eDMA reset followed by COMRESET
2158 * @ap: ATA channel to manipulate
2160 * Part of this is taken from __sata_phy_reset and modified to
2161 * not sleep since this routine gets called from interrupt level.
2164 * Inherited from caller. This is coded to safe to call at
2165 * interrupt level, i.e. it does not sleep.
2167 static void mv_phy_reset(struct ata_port
*ap
, unsigned int *class,
2168 unsigned long deadline
)
2170 struct mv_port_priv
*pp
= ap
->private_data
;
2171 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
2172 void __iomem
*port_mmio
= mv_ap_base(ap
);
2176 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap
->port_no
, port_mmio
);
2180 u32 sstatus
, serror
, scontrol
;
2182 mv_scr_read(ap
, SCR_STATUS
, &sstatus
);
2183 mv_scr_read(ap
, SCR_ERROR
, &serror
);
2184 mv_scr_read(ap
, SCR_CONTROL
, &scontrol
);
2185 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
2186 "SCtrl 0x%08x\n", status
, serror
, scontrol
);
2190 /* Issue COMRESET via SControl */
2192 sata_scr_write_flush(ap
, SCR_CONTROL
, 0x301);
2195 sata_scr_write_flush(ap
, SCR_CONTROL
, 0x300);
2199 sata_scr_read(ap
, SCR_STATUS
, &sstatus
);
2200 if (((sstatus
& 0x3) == 3) || ((sstatus
& 0x3) == 0))
2204 } while (time_before(jiffies
, deadline
));
2206 /* work around errata */
2207 if (IS_GEN_II(hpriv
) &&
2208 (sstatus
!= 0x0) && (sstatus
!= 0x113) && (sstatus
!= 0x123) &&
2210 goto comreset_retry
;
2214 u32 sstatus
, serror
, scontrol
;
2216 mv_scr_read(ap
, SCR_STATUS
, &sstatus
);
2217 mv_scr_read(ap
, SCR_ERROR
, &serror
);
2218 mv_scr_read(ap
, SCR_CONTROL
, &scontrol
);
2219 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2220 "SCtrl 0x%08x\n", sstatus
, serror
, scontrol
);
2224 if (ata_port_offline(ap
)) {
2225 *class = ATA_DEV_NONE
;
2229 /* even after SStatus reflects that device is ready,
2230 * it seems to take a while for link to be fully
2231 * established (and thus Status no longer 0x80/0x7F),
2232 * so we poll a bit for that, here.
2236 u8 drv_stat
= ata_check_status(ap
);
2237 if ((drv_stat
!= 0x80) && (drv_stat
!= 0x7f))
2242 if (time_after(jiffies
, deadline
))
2246 /* FIXME: if we passed the deadline, the following
2247 * code probably produces an invalid result
2250 /* finally, read device signature from TF registers */
2251 *class = ata_dev_try_classify(ap
, 0, NULL
);
2253 writelfl(0, port_mmio
+ EDMA_ERR_IRQ_CAUSE_OFS
);
2255 WARN_ON(pp
->pp_flags
& MV_PP_FLAG_EDMA_EN
);
2260 static int mv_prereset(struct ata_port
*ap
, unsigned long deadline
)
2262 struct mv_port_priv
*pp
= ap
->private_data
;
2263 struct ata_eh_context
*ehc
= &ap
->eh_context
;
2266 rc
= mv_stop_dma(ap
);
2268 ehc
->i
.action
|= ATA_EH_HARDRESET
;
2270 if (!(pp
->pp_flags
& MV_PP_FLAG_HAD_A_RESET
)) {
2271 pp
->pp_flags
|= MV_PP_FLAG_HAD_A_RESET
;
2272 ehc
->i
.action
|= ATA_EH_HARDRESET
;
2275 /* if we're about to do hardreset, nothing more to do */
2276 if (ehc
->i
.action
& ATA_EH_HARDRESET
)
2279 if (ata_port_online(ap
))
2280 rc
= ata_wait_ready(ap
, deadline
);
2287 static int mv_hardreset(struct ata_port
*ap
, unsigned int *class,
2288 unsigned long deadline
)
2290 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
2291 void __iomem
*mmio
= ap
->host
->iomap
[MV_PRIMARY_BAR
];
2295 mv_channel_reset(hpriv
, mmio
, ap
->port_no
);
2297 mv_phy_reset(ap
, class, deadline
);
2302 static void mv_postreset(struct ata_port
*ap
, unsigned int *classes
)
2306 /* print link status */
2307 sata_print_link_status(ap
);
2310 sata_scr_read(ap
, SCR_ERROR
, &serr
);
2311 sata_scr_write_flush(ap
, SCR_ERROR
, serr
);
2313 /* bail out if no device is present */
2314 if (classes
[0] == ATA_DEV_NONE
&& classes
[1] == ATA_DEV_NONE
) {
2315 DPRINTK("EXIT, no device\n");
2319 /* set up device control */
2320 iowrite8(ap
->ctl
, ap
->ioaddr
.ctl_addr
);
2323 static void mv_error_handler(struct ata_port
*ap
)
2325 ata_do_eh(ap
, mv_prereset
, ata_std_softreset
,
2326 mv_hardreset
, mv_postreset
);
2329 static void mv_post_int_cmd(struct ata_queued_cmd
*qc
)
2331 mv_stop_dma(qc
->ap
);
2334 static void mv_eh_freeze(struct ata_port
*ap
)
2336 void __iomem
*mmio
= ap
->host
->iomap
[MV_PRIMARY_BAR
];
2337 unsigned int hc
= (ap
->port_no
> 3) ? 1 : 0;
2341 /* FIXME: handle coalescing completion events properly */
2343 shift
= ap
->port_no
* 2;
2347 mask
= 0x3 << shift
;
2349 /* disable assertion of portN err, done events */
2350 tmp
= readl(mmio
+ HC_MAIN_IRQ_MASK_OFS
);
2351 writelfl(tmp
& ~mask
, mmio
+ HC_MAIN_IRQ_MASK_OFS
);
2354 static void mv_eh_thaw(struct ata_port
*ap
)
2356 void __iomem
*mmio
= ap
->host
->iomap
[MV_PRIMARY_BAR
];
2357 unsigned int hc
= (ap
->port_no
> 3) ? 1 : 0;
2358 void __iomem
*hc_mmio
= mv_hc_base(mmio
, hc
);
2359 void __iomem
*port_mmio
= mv_ap_base(ap
);
2360 u32 tmp
, mask
, hc_irq_cause
;
2361 unsigned int shift
, hc_port_no
= ap
->port_no
;
2363 /* FIXME: handle coalescing completion events properly */
2365 shift
= ap
->port_no
* 2;
2371 mask
= 0x3 << shift
;
2373 /* clear EDMA errors on this port */
2374 writel(0, port_mmio
+ EDMA_ERR_IRQ_CAUSE_OFS
);
2376 /* clear pending irq events */
2377 hc_irq_cause
= readl(hc_mmio
+ HC_IRQ_CAUSE_OFS
);
2378 hc_irq_cause
&= ~(1 << hc_port_no
); /* clear CRPB-done */
2379 hc_irq_cause
&= ~(1 << (hc_port_no
+ 8)); /* clear Device int */
2380 writel(hc_irq_cause
, hc_mmio
+ HC_IRQ_CAUSE_OFS
);
2382 /* enable assertion of portN err, done events */
2383 tmp
= readl(mmio
+ HC_MAIN_IRQ_MASK_OFS
);
2384 writelfl(tmp
| mask
, mmio
+ HC_MAIN_IRQ_MASK_OFS
);
2388 * mv_port_init - Perform some early initialization on a single port.
2389 * @port: libata data structure storing shadow register addresses
2390 * @port_mmio: base address of the port
2392 * Initialize shadow register mmio addresses, clear outstanding
2393 * interrupts on the port, and unmask interrupts for the future
2394 * start of the port.
2397 * Inherited from caller.
2399 static void mv_port_init(struct ata_ioports
*port
, void __iomem
*port_mmio
)
2401 void __iomem
*shd_base
= port_mmio
+ SHD_BLK_OFS
;
2404 /* PIO related setup
2406 port
->data_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_DATA
);
2408 port
->feature_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_ERR
);
2409 port
->nsect_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_NSECT
);
2410 port
->lbal_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_LBAL
);
2411 port
->lbam_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_LBAM
);
2412 port
->lbah_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_LBAH
);
2413 port
->device_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_DEVICE
);
2415 port
->command_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_STATUS
);
2416 /* special case: control/altstatus doesn't have ATA_REG_ address */
2417 port
->altstatus_addr
= port
->ctl_addr
= shd_base
+ SHD_CTL_AST_OFS
;
2420 port
->cmd_addr
= port
->bmdma_addr
= port
->scr_addr
= NULL
;
2422 /* Clear any currently outstanding port interrupt conditions */
2423 serr_ofs
= mv_scr_offset(SCR_ERROR
);
2424 writelfl(readl(port_mmio
+ serr_ofs
), port_mmio
+ serr_ofs
);
2425 writelfl(0, port_mmio
+ EDMA_ERR_IRQ_CAUSE_OFS
);
2427 /* unmask all EDMA error interrupts */
2428 writelfl(~0, port_mmio
+ EDMA_ERR_IRQ_MASK_OFS
);
2430 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2431 readl(port_mmio
+ EDMA_CFG_OFS
),
2432 readl(port_mmio
+ EDMA_ERR_IRQ_CAUSE_OFS
),
2433 readl(port_mmio
+ EDMA_ERR_IRQ_MASK_OFS
));
2436 static int mv_chip_id(struct ata_host
*host
, unsigned int board_idx
)
2438 struct pci_dev
*pdev
= to_pci_dev(host
->dev
);
2439 struct mv_host_priv
*hpriv
= host
->private_data
;
2440 u32 hp_flags
= hpriv
->hp_flags
;
2444 hpriv
->ops
= &mv5xxx_ops
;
2445 hp_flags
|= MV_HP_GEN_I
;
2447 switch (pdev
->revision
) {
2449 hp_flags
|= MV_HP_ERRATA_50XXB0
;
2452 hp_flags
|= MV_HP_ERRATA_50XXB2
;
2455 dev_printk(KERN_WARNING
, &pdev
->dev
,
2456 "Applying 50XXB2 workarounds to unknown rev\n");
2457 hp_flags
|= MV_HP_ERRATA_50XXB2
;
2464 hpriv
->ops
= &mv5xxx_ops
;
2465 hp_flags
|= MV_HP_GEN_I
;
2467 switch (pdev
->revision
) {
2469 hp_flags
|= MV_HP_ERRATA_50XXB0
;
2472 hp_flags
|= MV_HP_ERRATA_50XXB2
;
2475 dev_printk(KERN_WARNING
, &pdev
->dev
,
2476 "Applying B2 workarounds to unknown rev\n");
2477 hp_flags
|= MV_HP_ERRATA_50XXB2
;
2484 hpriv
->ops
= &mv6xxx_ops
;
2485 hp_flags
|= MV_HP_GEN_II
;
2487 switch (pdev
->revision
) {
2489 hp_flags
|= MV_HP_ERRATA_60X1B2
;
2492 hp_flags
|= MV_HP_ERRATA_60X1C0
;
2495 dev_printk(KERN_WARNING
, &pdev
->dev
,
2496 "Applying B2 workarounds to unknown rev\n");
2497 hp_flags
|= MV_HP_ERRATA_60X1B2
;
2504 hpriv
->ops
= &mv6xxx_ops
;
2505 hp_flags
|= MV_HP_GEN_IIE
;
2507 switch (pdev
->revision
) {
2509 hp_flags
|= MV_HP_ERRATA_XX42A0
;
2512 hp_flags
|= MV_HP_ERRATA_60X1C0
;
2515 dev_printk(KERN_WARNING
, &pdev
->dev
,
2516 "Applying 60X1C0 workarounds to unknown rev\n");
2517 hp_flags
|= MV_HP_ERRATA_60X1C0
;
2523 printk(KERN_ERR DRV_NAME
": BUG: invalid board index %u\n", board_idx
);
2527 hpriv
->hp_flags
= hp_flags
;
2533 * mv_init_host - Perform some early initialization of the host.
2534 * @host: ATA host to initialize
2535 * @board_idx: controller index
2537 * If possible, do an early global reset of the host. Then do
2538 * our port init and clear/unmask all/relevant host interrupts.
2541 * Inherited from caller.
2543 static int mv_init_host(struct ata_host
*host
, unsigned int board_idx
)
2545 int rc
= 0, n_hc
, port
, hc
;
2546 struct pci_dev
*pdev
= to_pci_dev(host
->dev
);
2547 void __iomem
*mmio
= host
->iomap
[MV_PRIMARY_BAR
];
2548 struct mv_host_priv
*hpriv
= host
->private_data
;
2550 /* global interrupt mask */
2551 writel(0, mmio
+ HC_MAIN_IRQ_MASK_OFS
);
2553 rc
= mv_chip_id(host
, board_idx
);
2557 n_hc
= mv_get_hc_count(host
->ports
[0]->flags
);
2559 for (port
= 0; port
< host
->n_ports
; port
++)
2560 hpriv
->ops
->read_preamp(hpriv
, port
, mmio
);
2562 rc
= hpriv
->ops
->reset_hc(hpriv
, mmio
, n_hc
);
2566 hpriv
->ops
->reset_flash(hpriv
, mmio
);
2567 hpriv
->ops
->reset_bus(pdev
, mmio
);
2568 hpriv
->ops
->enable_leds(hpriv
, mmio
);
2570 for (port
= 0; port
< host
->n_ports
; port
++) {
2571 if (IS_GEN_II(hpriv
)) {
2572 void __iomem
*port_mmio
= mv_port_base(mmio
, port
);
2574 u32 ifctl
= readl(port_mmio
+ SATA_INTERFACE_CTL
);
2575 ifctl
|= (1 << 7); /* enable gen2i speed */
2576 ifctl
= (ifctl
& 0xfff) | 0x9b1000; /* from chip spec */
2577 writelfl(ifctl
, port_mmio
+ SATA_INTERFACE_CTL
);
2580 hpriv
->ops
->phy_errata(hpriv
, mmio
, port
);
2583 for (port
= 0; port
< host
->n_ports
; port
++) {
2584 void __iomem
*port_mmio
= mv_port_base(mmio
, port
);
2585 mv_port_init(&host
->ports
[port
]->ioaddr
, port_mmio
);
2588 for (hc
= 0; hc
< n_hc
; hc
++) {
2589 void __iomem
*hc_mmio
= mv_hc_base(mmio
, hc
);
2591 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2592 "(before clear)=0x%08x\n", hc
,
2593 readl(hc_mmio
+ HC_CFG_OFS
),
2594 readl(hc_mmio
+ HC_IRQ_CAUSE_OFS
));
2596 /* Clear any currently outstanding hc interrupt conditions */
2597 writelfl(0, hc_mmio
+ HC_IRQ_CAUSE_OFS
);
2600 /* Clear any currently outstanding host interrupt conditions */
2601 writelfl(0, mmio
+ PCI_IRQ_CAUSE_OFS
);
2603 /* and unmask interrupt generation for host regs */
2604 writelfl(PCI_UNMASK_ALL_IRQS
, mmio
+ PCI_IRQ_MASK_OFS
);
2606 if (IS_GEN_I(hpriv
))
2607 writelfl(~HC_MAIN_MASKED_IRQS_5
, mmio
+ HC_MAIN_IRQ_MASK_OFS
);
2609 writelfl(~HC_MAIN_MASKED_IRQS
, mmio
+ HC_MAIN_IRQ_MASK_OFS
);
2611 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2612 "PCI int cause/mask=0x%08x/0x%08x\n",
2613 readl(mmio
+ HC_MAIN_IRQ_CAUSE_OFS
),
2614 readl(mmio
+ HC_MAIN_IRQ_MASK_OFS
),
2615 readl(mmio
+ PCI_IRQ_CAUSE_OFS
),
2616 readl(mmio
+ PCI_IRQ_MASK_OFS
));
2623 * mv_print_info - Dump key info to kernel log for perusal.
2624 * @host: ATA host to print info about
2626 * FIXME: complete this.
2629 * Inherited from caller.
2631 static void mv_print_info(struct ata_host
*host
)
2633 struct pci_dev
*pdev
= to_pci_dev(host
->dev
);
2634 struct mv_host_priv
*hpriv
= host
->private_data
;
2636 const char *scc_s
, *gen
;
2638 /* Use this to determine the HW stepping of the chip so we know
2639 * what errata to workaround
2641 pci_read_config_byte(pdev
, PCI_CLASS_DEVICE
, &scc
);
2644 else if (scc
== 0x01)
2649 if (IS_GEN_I(hpriv
))
2651 else if (IS_GEN_II(hpriv
))
2653 else if (IS_GEN_IIE(hpriv
))
2658 dev_printk(KERN_INFO
, &pdev
->dev
,
2659 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2660 gen
, (unsigned)MV_MAX_Q_DEPTH
, host
->n_ports
,
2661 scc_s
, (MV_HP_FLAG_MSI
& hpriv
->hp_flags
) ? "MSI" : "INTx");
2665 * mv_init_one - handle a positive probe of a Marvell host
2666 * @pdev: PCI device found
2667 * @ent: PCI device ID entry for the matched host
2670 * Inherited from caller.
2672 static int mv_init_one(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
2674 static int printed_version
= 0;
2675 unsigned int board_idx
= (unsigned int)ent
->driver_data
;
2676 const struct ata_port_info
*ppi
[] = { &mv_port_info
[board_idx
], NULL
};
2677 struct ata_host
*host
;
2678 struct mv_host_priv
*hpriv
;
2681 if (!printed_version
++)
2682 dev_printk(KERN_INFO
, &pdev
->dev
, "version " DRV_VERSION
"\n");
2685 n_ports
= mv_get_hc_count(ppi
[0]->flags
) * MV_PORTS_PER_HC
;
2687 host
= ata_host_alloc_pinfo(&pdev
->dev
, ppi
, n_ports
);
2688 hpriv
= devm_kzalloc(&pdev
->dev
, sizeof(*hpriv
), GFP_KERNEL
);
2689 if (!host
|| !hpriv
)
2691 host
->private_data
= hpriv
;
2693 /* acquire resources */
2694 rc
= pcim_enable_device(pdev
);
2698 rc
= pcim_iomap_regions(pdev
, 1 << MV_PRIMARY_BAR
, DRV_NAME
);
2700 pcim_pin_device(pdev
);
2703 host
->iomap
= pcim_iomap_table(pdev
);
2705 rc
= pci_go_64(pdev
);
2709 /* initialize adapter */
2710 rc
= mv_init_host(host
, board_idx
);
2714 /* Enable interrupts */
2715 if (msi
&& pci_enable_msi(pdev
))
2718 mv_dump_pci_cfg(pdev
, 0x68);
2719 mv_print_info(host
);
2721 pci_set_master(pdev
);
2722 pci_try_set_mwi(pdev
);
2723 return ata_host_activate(host
, pdev
->irq
, mv_interrupt
, IRQF_SHARED
,
2724 IS_GEN_I(hpriv
) ? &mv5_sht
: &mv6_sht
);
2727 static int __init
mv_init(void)
2729 return pci_register_driver(&mv_pci_driver
);
2732 static void __exit
mv_exit(void)
2734 pci_unregister_driver(&mv_pci_driver
);
2737 MODULE_AUTHOR("Brett Russ");
2738 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2739 MODULE_LICENSE("GPL");
2740 MODULE_DEVICE_TABLE(pci
, mv_pci_tbl
);
2741 MODULE_VERSION(DRV_VERSION
);
2743 module_param(msi
, int, 0444);
2744 MODULE_PARM_DESC(msi
, "Enable use of PCI MSI (0=off, 1=on)");
2746 module_init(mv_init
);
2747 module_exit(mv_exit
);