2 * Marvell 88SE64xx hardware specific
4 * Copyright 2007 Red Hat, Inc.
5 * Copyright 2008 Marvell. <kewei@marvell.com>
7 * This file is licensed under GPLv2.
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; version 2 of the
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
29 static void mvs_64xx_detect_porttype(struct mvs_info
*mvi
, int i
)
31 void __iomem
*regs
= mvi
->regs
;
33 struct mvs_phy
*phy
= &mvi
->phy
[i
];
35 /* TODO check & save device type */
36 reg
= mr32(MVS_GBL_PORT_TYPE
);
37 phy
->phy_type
&= ~(PORT_TYPE_SAS
| PORT_TYPE_SATA
);
38 if (reg
& MODE_SAS_SATA
& (1 << i
))
39 phy
->phy_type
|= PORT_TYPE_SAS
;
41 phy
->phy_type
|= PORT_TYPE_SATA
;
44 static void __devinit
mvs_64xx_enable_xmt(struct mvs_info
*mvi
, int phy_id
)
46 void __iomem
*regs
= mvi
->regs
;
50 if (mvi
->chip
->n_phy
<= 4)
51 tmp
|= 1 << (phy_id
+ PCS_EN_PORT_XMT_SHIFT
);
53 tmp
|= 1 << (phy_id
+ PCS_EN_PORT_XMT_SHIFT2
);
57 static void __devinit
mvs_64xx_phy_hacks(struct mvs_info
*mvi
)
59 void __iomem
*regs
= mvi
->regs
;
63 if (!(mvi
->flags
& MVF_FLAG_SOC
)) {
64 /* TEST - for phy decoding error, adjust voltage levels */
65 mw32(MVS_P0_VSR_ADDR
+ 0, 0x8);
66 mw32(MVS_P0_VSR_DATA
+ 0, 0x2F0);
68 mw32(MVS_P0_VSR_ADDR
+ 8, 0x8);
69 mw32(MVS_P0_VSR_DATA
+ 8, 0x2F0);
71 mw32(MVS_P0_VSR_ADDR
+ 16, 0x8);
72 mw32(MVS_P0_VSR_DATA
+ 16, 0x2F0);
74 mw32(MVS_P0_VSR_ADDR
+ 24, 0x8);
75 mw32(MVS_P0_VSR_DATA
+ 24, 0x2F0);
78 /* disable auto port detection */
79 mw32(MVS_GBL_PORT_TYPE
, 0);
80 for (i
= 0; i
< mvi
->chip
->n_phy
; i
++) {
81 mvs_write_port_vsr_addr(mvi
, i
, VSR_PHY_MODE7
);
82 mvs_write_port_vsr_data(mvi
, i
, 0x90000000);
83 mvs_write_port_vsr_addr(mvi
, i
, VSR_PHY_MODE9
);
84 mvs_write_port_vsr_data(mvi
, i
, 0x50f2);
85 mvs_write_port_vsr_addr(mvi
, i
, VSR_PHY_MODE11
);
86 mvs_write_port_vsr_data(mvi
, i
, 0x0e);
91 static void mvs_64xx_stp_reset(struct mvs_info
*mvi
, u32 phy_id
)
93 void __iomem
*regs
= mvi
->regs
;
96 if (!(mvi
->flags
& MVF_FLAG_SOC
)) {
98 pci_read_config_dword(mvi
->pdev
, PCR_PHY_CTL
, ®
);
100 pci_read_config_dword(mvi
->pdev
, PCR_PHY_CTL2
, ®
);
103 reg
= mr32(MVS_PHY_CTL
);
107 tmp
|= (1U << phy_id
) << PCTL_LINK_OFFS
;
109 tmp
|= (1U << (phy_id
- 4)) << PCTL_LINK_OFFS
;
111 if (!(mvi
->flags
& MVF_FLAG_SOC
)) {
113 pci_write_config_dword(mvi
->pdev
, PCR_PHY_CTL
, tmp
);
115 pci_write_config_dword(mvi
->pdev
, PCR_PHY_CTL
, reg
);
117 pci_write_config_dword(mvi
->pdev
, PCR_PHY_CTL2
, tmp
);
119 pci_write_config_dword(mvi
->pdev
, PCR_PHY_CTL2
, reg
);
122 mw32(MVS_PHY_CTL
, tmp
);
124 mw32(MVS_PHY_CTL
, reg
);
128 static void mvs_64xx_phy_reset(struct mvs_info
*mvi
, u32 phy_id
, int hard
)
131 tmp
= mvs_read_port_irq_stat(mvi
, phy_id
);
132 tmp
&= ~PHYEV_RDY_CH
;
133 mvs_write_port_irq_stat(mvi
, phy_id
, tmp
);
134 tmp
= mvs_read_phy_ctl(mvi
, phy_id
);
139 mvs_write_phy_ctl(mvi
, phy_id
, tmp
);
142 tmp
= mvs_read_phy_ctl(mvi
, phy_id
);
143 } while (tmp
& PHY_RST_HARD
);
147 static int __devinit
mvs_64xx_chip_reset(struct mvs_info
*mvi
)
149 void __iomem
*regs
= mvi
->regs
;
153 /* make sure interrupts are masked immediately (paranoia) */
154 mw32(MVS_GBL_CTL
, 0);
155 tmp
= mr32(MVS_GBL_CTL
);
157 /* Reset Controller */
158 if (!(tmp
& HBA_RST
)) {
159 if (mvi
->flags
& MVF_PHY_PWR_FIX
) {
160 pci_read_config_dword(mvi
->pdev
, PCR_PHY_CTL
, &tmp
);
161 tmp
&= ~PCTL_PWR_OFF
;
162 tmp
|= PCTL_PHY_DSBL
;
163 pci_write_config_dword(mvi
->pdev
, PCR_PHY_CTL
, tmp
);
165 pci_read_config_dword(mvi
->pdev
, PCR_PHY_CTL2
, &tmp
);
166 tmp
&= ~PCTL_PWR_OFF
;
167 tmp
|= PCTL_PHY_DSBL
;
168 pci_write_config_dword(mvi
->pdev
, PCR_PHY_CTL2
, tmp
);
172 /* make sure interrupts are masked immediately (paranoia) */
173 mw32(MVS_GBL_CTL
, 0);
174 tmp
= mr32(MVS_GBL_CTL
);
176 /* Reset Controller */
177 if (!(tmp
& HBA_RST
)) {
178 /* global reset, incl. COMRESET/H_RESET_N (self-clearing) */
179 mw32_f(MVS_GBL_CTL
, HBA_RST
);
182 /* wait for reset to finish; timeout is just a guess */
187 if (!(mr32(MVS_GBL_CTL
) & HBA_RST
))
190 if (mr32(MVS_GBL_CTL
) & HBA_RST
) {
191 dev_printk(KERN_ERR
, mvi
->dev
, "HBA reset failed\n");
197 static void mvs_64xx_phy_disable(struct mvs_info
*mvi
, u32 phy_id
)
199 void __iomem
*regs
= mvi
->regs
;
201 if (!(mvi
->flags
& MVF_FLAG_SOC
)) {
209 pci_read_config_dword(mvi
->pdev
, offs
, &tmp
);
210 tmp
|= 1U << (PCTL_PHY_DSBL_OFFS
+ phy_id
);
211 pci_write_config_dword(mvi
->pdev
, offs
, tmp
);
213 tmp
= mr32(MVS_PHY_CTL
);
214 tmp
|= 1U << (PCTL_PHY_DSBL_OFFS
+ phy_id
);
215 mw32(MVS_PHY_CTL
, tmp
);
219 static void mvs_64xx_phy_enable(struct mvs_info
*mvi
, u32 phy_id
)
221 void __iomem
*regs
= mvi
->regs
;
223 if (!(mvi
->flags
& MVF_FLAG_SOC
)) {
231 pci_read_config_dword(mvi
->pdev
, offs
, &tmp
);
232 tmp
&= ~(1U << (PCTL_PHY_DSBL_OFFS
+ phy_id
));
233 pci_write_config_dword(mvi
->pdev
, offs
, tmp
);
235 tmp
= mr32(MVS_PHY_CTL
);
236 tmp
&= ~(1U << (PCTL_PHY_DSBL_OFFS
+ phy_id
));
237 mw32(MVS_PHY_CTL
, tmp
);
241 static int __devinit
mvs_64xx_init(struct mvs_info
*mvi
)
243 void __iomem
*regs
= mvi
->regs
;
247 if (mvi
->pdev
&& mvi
->pdev
->revision
== 0)
248 mvi
->flags
|= MVF_PHY_PWR_FIX
;
249 if (!(mvi
->flags
& MVF_FLAG_SOC
)) {
250 mvs_show_pcie_usage(mvi
);
251 tmp
= mvs_64xx_chip_reset(mvi
);
255 tmp
= mr32(MVS_PHY_CTL
);
256 tmp
&= ~PCTL_PWR_OFF
;
257 tmp
|= PCTL_PHY_DSBL
;
258 mw32(MVS_PHY_CTL
, tmp
);
262 /* make sure RST is set; HBA_RST /should/ have done that for us */
263 cctl
= mr32(MVS_CTL
) & 0xFFFF;
267 mw32_f(MVS_CTL
, cctl
| CCTL_RST
);
269 if (!(mvi
->flags
& MVF_FLAG_SOC
)) {
270 /* write to device control _AND_ device status register */
271 pci_read_config_dword(mvi
->pdev
, PCR_DEV_CTRL
, &tmp
);
272 tmp
&= ~PRD_REQ_MASK
;
274 pci_write_config_dword(mvi
->pdev
, PCR_DEV_CTRL
, tmp
);
276 pci_read_config_dword(mvi
->pdev
, PCR_PHY_CTL
, &tmp
);
277 tmp
&= ~PCTL_PWR_OFF
;
278 tmp
&= ~PCTL_PHY_DSBL
;
279 pci_write_config_dword(mvi
->pdev
, PCR_PHY_CTL
, tmp
);
281 pci_read_config_dword(mvi
->pdev
, PCR_PHY_CTL2
, &tmp
);
283 tmp
&= ~PCTL_PHY_DSBL
;
284 pci_write_config_dword(mvi
->pdev
, PCR_PHY_CTL2
, tmp
);
286 tmp
= mr32(MVS_PHY_CTL
);
287 tmp
&= ~PCTL_PWR_OFF
;
289 tmp
&= ~PCTL_PHY_DSBL
;
290 tmp
|= PCTL_LINK_RST
;
291 mw32(MVS_PHY_CTL
, tmp
);
293 tmp
&= ~PCTL_LINK_RST
;
294 mw32(MVS_PHY_CTL
, tmp
);
299 mw32(MVS_PCS
, 0); /* MVS_PCS */
301 mvs_64xx_phy_hacks(mvi
);
303 /* enable auto port detection */
304 mw32(MVS_GBL_PORT_TYPE
, MODE_AUTO_DET_EN
);
306 mw32(MVS_CMD_LIST_LO
, mvi
->slot_dma
);
307 mw32(MVS_CMD_LIST_HI
, (mvi
->slot_dma
>> 16) >> 16);
309 mw32(MVS_RX_FIS_LO
, mvi
->rx_fis_dma
);
310 mw32(MVS_RX_FIS_HI
, (mvi
->rx_fis_dma
>> 16) >> 16);
312 mw32(MVS_TX_CFG
, MVS_CHIP_SLOT_SZ
);
313 mw32(MVS_TX_LO
, mvi
->tx_dma
);
314 mw32(MVS_TX_HI
, (mvi
->tx_dma
>> 16) >> 16);
316 mw32(MVS_RX_CFG
, MVS_RX_RING_SZ
);
317 mw32(MVS_RX_LO
, mvi
->rx_dma
);
318 mw32(MVS_RX_HI
, (mvi
->rx_dma
>> 16) >> 16);
320 for (i
= 0; i
< mvi
->chip
->n_phy
; i
++) {
321 /* set phy local SAS address */
322 /* should set little endian SAS address to 64xx chip */
323 mvs_set_sas_addr(mvi
, i
, PHYR_ADDR_LO
, PHYR_ADDR_HI
,
324 cpu_to_be64(mvi
->phy
[i
].dev_sas_addr
));
326 mvs_64xx_enable_xmt(mvi
, i
);
328 mvs_64xx_phy_reset(mvi
, i
, 1);
330 mvs_64xx_detect_porttype(mvi
, i
);
332 if (mvi
->flags
& MVF_FLAG_SOC
) {
333 /* set select registers */
334 writel(0x0E008000, regs
+ 0x000);
335 writel(0x59000008, regs
+ 0x004);
336 writel(0x20, regs
+ 0x008);
337 writel(0x20, regs
+ 0x00c);
338 writel(0x20, regs
+ 0x010);
339 writel(0x20, regs
+ 0x014);
340 writel(0x20, regs
+ 0x018);
341 writel(0x20, regs
+ 0x01c);
343 for (i
= 0; i
< mvi
->chip
->n_phy
; i
++) {
344 /* clear phy int status */
345 tmp
= mvs_read_port_irq_stat(mvi
, i
);
346 tmp
&= ~PHYEV_SIG_FIS
;
347 mvs_write_port_irq_stat(mvi
, i
, tmp
);
349 /* set phy int mask */
350 tmp
= PHYEV_RDY_CH
| PHYEV_BROAD_CH
| PHYEV_UNASSOC_FIS
|
351 PHYEV_ID_DONE
| PHYEV_DCDR_ERR
| PHYEV_CRC_ERR
|
353 mvs_write_port_irq_mask(mvi
, i
, tmp
);
356 mvs_update_phyinfo(mvi
, i
, 1);
359 /* FIXME: update wide port bitmaps */
361 /* little endian for open address and command table, etc. */
363 * it seems that ( from the spec ) turning on big-endian won't
364 * do us any good on big-endian machines, need further confirmation
366 cctl
= mr32(MVS_CTL
);
367 cctl
|= CCTL_ENDIAN_CMD
;
368 cctl
|= CCTL_ENDIAN_DATA
;
369 cctl
&= ~CCTL_ENDIAN_OPEN
;
370 cctl
|= CCTL_ENDIAN_RSP
;
371 mw32_f(MVS_CTL
, cctl
);
373 /* reset CMD queue */
377 /* interrupt coalescing may cause missing HW interrput in some case,
378 * and the max count is 0x1ff, while our max slot is 0x200,
379 * it will make count 0.
382 mw32(MVS_INT_COAL
, tmp
);
385 mw32(MVS_INT_COAL_TMOUT
, tmp
);
387 /* ladies and gentlemen, start your engines */
389 mw32(MVS_TX_CFG
, MVS_CHIP_SLOT_SZ
| TX_EN
);
390 mw32(MVS_RX_CFG
, MVS_RX_RING_SZ
| RX_EN
);
391 /* enable CMD/CMPL_Q/RESP mode */
392 mw32(MVS_PCS
, PCS_SATA_RETRY
| PCS_FIS_RX_EN
|
393 PCS_CMD_EN
| PCS_CMD_STOP_ERR
);
395 /* enable completion queue interrupt */
396 tmp
= (CINT_PORT_MASK
| CINT_DONE
| CINT_MEM
| CINT_SRS
| CINT_CI_STOP
|
399 mw32(MVS_INT_MASK
, tmp
);
401 /* Enable SRS interrupt */
402 mw32(MVS_INT_MASK_SRS_0
, 0xFFFF);
407 static int mvs_64xx_ioremap(struct mvs_info
*mvi
)
409 if (!mvs_ioremap(mvi
, 4, 2))
414 static void mvs_64xx_iounmap(struct mvs_info
*mvi
)
416 mvs_iounmap(mvi
->regs
);
417 mvs_iounmap(mvi
->regs_ex
);
420 static void mvs_64xx_interrupt_enable(struct mvs_info
*mvi
)
422 void __iomem
*regs
= mvi
->regs
;
425 tmp
= mr32(MVS_GBL_CTL
);
426 mw32(MVS_GBL_CTL
, tmp
| INT_EN
);
429 static void mvs_64xx_interrupt_disable(struct mvs_info
*mvi
)
431 void __iomem
*regs
= mvi
->regs
;
434 tmp
= mr32(MVS_GBL_CTL
);
435 mw32(MVS_GBL_CTL
, tmp
& ~INT_EN
);
438 static u32
mvs_64xx_isr_status(struct mvs_info
*mvi
, int irq
)
440 void __iomem
*regs
= mvi
->regs
;
443 if (!(mvi
->flags
& MVF_FLAG_SOC
)) {
444 stat
= mr32(MVS_GBL_INT_STAT
);
446 if (stat
== 0 || stat
== 0xffffffff)
453 static irqreturn_t
mvs_64xx_isr(struct mvs_info
*mvi
, int irq
, u32 stat
)
455 void __iomem
*regs
= mvi
->regs
;
457 /* clear CMD_CMPLT ASAP */
458 mw32_f(MVS_INT_STAT
, CINT_DONE
);
459 #ifndef MVS_USE_TASKLET
460 spin_lock(&mvi
->lock
);
463 #ifndef MVS_USE_TASKLET
464 spin_unlock(&mvi
->lock
);
469 static void mvs_64xx_command_active(struct mvs_info
*mvi
, u32 slot_idx
)
472 mvs_cw32(mvi
, 0x40 + (slot_idx
>> 3), 1 << (slot_idx
% 32));
473 mvs_cw32(mvi
, 0x00 + (slot_idx
>> 3), 1 << (slot_idx
% 32));
475 tmp
= mvs_cr32(mvi
, 0x00 + (slot_idx
>> 3));
476 } while (tmp
& 1 << (slot_idx
% 32));
478 tmp
= mvs_cr32(mvi
, 0x40 + (slot_idx
>> 3));
479 } while (tmp
& 1 << (slot_idx
% 32));
482 static void mvs_64xx_issue_stop(struct mvs_info
*mvi
, enum mvs_port_type type
,
485 void __iomem
*regs
= mvi
->regs
;
488 if (type
== PORT_TYPE_SATA
) {
489 tmp
= mr32(MVS_INT_STAT_SRS_0
) | (1U << tfs
);
490 mw32(MVS_INT_STAT_SRS_0
, tmp
);
492 mw32(MVS_INT_STAT
, CINT_CI_STOP
);
493 tmp
= mr32(MVS_PCS
) | 0xFF00;
497 static void mvs_64xx_free_reg_set(struct mvs_info
*mvi
, u8
*tfs
)
499 void __iomem
*regs
= mvi
->regs
;
502 if (*tfs
== MVS_ID_NOT_MAPPED
)
505 offs
= 1U << ((*tfs
& 0x0f) + PCS_EN_SATA_REG_SHIFT
);
508 mw32(MVS_PCS
, tmp
& ~offs
);
511 mw32(MVS_CTL
, tmp
& ~offs
);
514 tmp
= mr32(MVS_INT_STAT_SRS_0
) & (1U << *tfs
);
516 mw32(MVS_INT_STAT_SRS_0
, tmp
);
518 *tfs
= MVS_ID_NOT_MAPPED
;
522 static u8
mvs_64xx_assign_reg_set(struct mvs_info
*mvi
, u8
*tfs
)
526 void __iomem
*regs
= mvi
->regs
;
528 if (*tfs
!= MVS_ID_NOT_MAPPED
)
533 for (i
= 0; i
< mvi
->chip
->srs_sz
; i
++) {
536 offs
= 1U << ((i
& 0x0f) + PCS_EN_SATA_REG_SHIFT
);
541 mw32(MVS_PCS
, tmp
| offs
);
543 mw32(MVS_CTL
, tmp
| offs
);
544 tmp
= mr32(MVS_INT_STAT_SRS_0
) & (1U << i
);
546 mw32(MVS_INT_STAT_SRS_0
, tmp
);
550 return MVS_ID_NOT_MAPPED
;
553 void mvs_64xx_make_prd(struct scatterlist
*scatter
, int nr
, void *prd
)
556 struct scatterlist
*sg
;
557 struct mvs_prd
*buf_prd
= prd
;
558 for_each_sg(scatter
, sg
, nr
, i
) {
559 buf_prd
->addr
= cpu_to_le64(sg_dma_address(sg
));
560 buf_prd
->len
= cpu_to_le32(sg_dma_len(sg
));
565 static int mvs_64xx_oob_done(struct mvs_info
*mvi
, int i
)
568 mvs_write_port_cfg_addr(mvi
, i
,
570 phy_st
= mvs_read_port_cfg_data(mvi
, i
);
571 if (phy_st
& PHY_OOB_DTCTD
)
576 static void mvs_64xx_fix_phy_info(struct mvs_info
*mvi
, int i
,
577 struct sas_identify_frame
*id
)
580 struct mvs_phy
*phy
= &mvi
->phy
[i
];
581 struct asd_sas_phy
*sas_phy
= &phy
->sas_phy
;
584 (phy
->phy_status
& PHY_NEG_SPP_PHYS_LINK_RATE_MASK
) >>
585 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET
;
587 phy
->minimum_linkrate
=
589 PHY_MIN_SPP_PHYS_LINK_RATE_MASK
) >> 8;
590 phy
->maximum_linkrate
=
592 PHY_MAX_SPP_PHYS_LINK_RATE_MASK
) >> 12;
594 mvs_write_port_cfg_addr(mvi
, i
, PHYR_IDENTIFY
);
595 phy
->dev_info
= mvs_read_port_cfg_data(mvi
, i
);
597 mvs_write_port_cfg_addr(mvi
, i
, PHYR_ATT_DEV_INFO
);
598 phy
->att_dev_info
= mvs_read_port_cfg_data(mvi
, i
);
600 mvs_write_port_cfg_addr(mvi
, i
, PHYR_ATT_ADDR_HI
);
601 phy
->att_dev_sas_addr
=
602 (u64
) mvs_read_port_cfg_data(mvi
, i
) << 32;
603 mvs_write_port_cfg_addr(mvi
, i
, PHYR_ATT_ADDR_LO
);
604 phy
->att_dev_sas_addr
|= mvs_read_port_cfg_data(mvi
, i
);
605 phy
->att_dev_sas_addr
= SAS_ADDR(&phy
->att_dev_sas_addr
);
608 static void mvs_64xx_phy_work_around(struct mvs_info
*mvi
, int i
)
611 struct mvs_phy
*phy
= &mvi
->phy
[i
];
612 /* workaround for HW phy decoding error on 1.5g disk drive */
613 mvs_write_port_vsr_addr(mvi
, i
, VSR_PHY_MODE6
);
614 tmp
= mvs_read_port_vsr_data(mvi
, i
);
615 if (((phy
->phy_status
& PHY_NEG_SPP_PHYS_LINK_RATE_MASK
) >>
616 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET
) ==
617 SAS_LINK_RATE_1_5_GBPS
)
618 tmp
&= ~PHY_MODE6_LATECLK
;
620 tmp
|= PHY_MODE6_LATECLK
;
621 mvs_write_port_vsr_data(mvi
, i
, tmp
);
624 void mvs_64xx_phy_set_link_rate(struct mvs_info
*mvi
, u32 phy_id
,
625 struct sas_phy_linkrates
*rates
)
627 u32 lrmin
= 0, lrmax
= 0;
630 tmp
= mvs_read_phy_ctl(mvi
, phy_id
);
631 lrmin
= (rates
->minimum_linkrate
<< 8);
632 lrmax
= (rates
->maximum_linkrate
<< 12);
642 mvs_write_phy_ctl(mvi
, phy_id
, tmp
);
643 mvs_64xx_phy_reset(mvi
, phy_id
, 1);
646 static void mvs_64xx_clear_active_cmds(struct mvs_info
*mvi
)
649 void __iomem
*regs
= mvi
->regs
;
651 mw32(MVS_PCS
, tmp
& 0xFFFF);
654 mw32(MVS_CTL
, tmp
& 0xFFFF);
659 u32
mvs_64xx_spi_read_data(struct mvs_info
*mvi
)
661 void __iomem
*regs
= mvi
->regs_ex
;
662 return ior32(SPI_DATA_REG_64XX
);
665 void mvs_64xx_spi_write_data(struct mvs_info
*mvi
, u32 data
)
667 void __iomem
*regs
= mvi
->regs_ex
;
668 iow32(SPI_DATA_REG_64XX
, data
);
672 int mvs_64xx_spi_buildcmd(struct mvs_info
*mvi
,
682 dwTmp
= ((u32
)cmd
<< 24) | ((u32
)length
<< 19);
686 if (addr
!= MV_MAX_U32
) {
688 dwTmp
|= (addr
& 0x0003FFFF);
696 int mvs_64xx_spi_issuecmd(struct mvs_info
*mvi
, u32 cmd
)
698 void __iomem
*regs
= mvi
->regs_ex
;
701 for (retry
= 0; retry
< 1; retry
++) {
702 iow32(SPI_CTRL_REG_64XX
, SPI_CTRL_VENDOR_ENABLE
);
703 iow32(SPI_CMD_REG_64XX
, cmd
);
704 iow32(SPI_CTRL_REG_64XX
,
705 SPI_CTRL_VENDOR_ENABLE
| SPI_CTRL_SPISTART
);
711 int mvs_64xx_spi_waitdataready(struct mvs_info
*mvi
, u32 timeout
)
713 void __iomem
*regs
= mvi
->regs_ex
;
716 for (i
= 0; i
< timeout
; i
++) {
717 dwTmp
= ior32(SPI_CTRL_REG_64XX
);
718 if (!(dwTmp
& SPI_CTRL_SPISTART
))
726 #ifndef DISABLE_HOTPLUG_DMA_FIX
727 void mvs_64xx_fix_dma(dma_addr_t buf_dma
, int buf_len
, int from
, void *prd
)
730 struct mvs_prd
*buf_prd
= prd
;
732 for (i
= 0; i
< MAX_SG_ENTRY
- from
; i
++) {
733 buf_prd
->addr
= cpu_to_le64(buf_dma
);
734 buf_prd
->len
= cpu_to_le32(buf_len
);
740 const struct mvs_dispatch mvs_64xx_dispatch
= {
748 mvs_64xx_interrupt_enable
,
749 mvs_64xx_interrupt_disable
,
752 mvs_read_port_cfg_data
,
753 mvs_write_port_cfg_data
,
754 mvs_write_port_cfg_addr
,
755 mvs_read_port_vsr_data
,
756 mvs_write_port_vsr_data
,
757 mvs_write_port_vsr_addr
,
758 mvs_read_port_irq_stat
,
759 mvs_write_port_irq_stat
,
760 mvs_read_port_irq_mask
,
761 mvs_write_port_irq_mask
,
763 mvs_64xx_command_active
,
768 mvs_64xx_assign_reg_set
,
769 mvs_64xx_free_reg_set
,
773 mvs_64xx_detect_porttype
,
775 mvs_64xx_fix_phy_info
,
776 mvs_64xx_phy_work_around
,
777 mvs_64xx_phy_set_link_rate
,
778 mvs_hw_max_link_rate
,
779 mvs_64xx_phy_disable
,
783 mvs_64xx_clear_active_cmds
,
784 mvs_64xx_spi_read_data
,
785 mvs_64xx_spi_write_data
,
786 mvs_64xx_spi_buildcmd
,
787 mvs_64xx_spi_issuecmd
,
788 mvs_64xx_spi_waitdataready
,
789 #ifndef DISABLE_HOTPLUG_DMA_FIX