[SCSI] mvsas: Add support for Non specific NCQ error interrupt
[linux-2.6.git] / drivers / scsi / mvsas / mv_94xx.c
blob9d60c7c19b32977c3968c539b2ec5f0bf5d72087
1 /*
2 * Marvell 88SE94xx hardware specific
4 * Copyright 2007 Red Hat, Inc.
5 * Copyright 2008 Marvell. <kewei@marvell.com>
6 * Copyright 2009-2011 Marvell. <yuxiangl@marvell.com>
8 * This file is licensed under GPLv2.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License as
12 * published by the Free Software Foundation; version 2 of the
13 * License.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
23 * USA
26 #include "mv_sas.h"
27 #include "mv_94xx.h"
28 #include "mv_chips.h"
30 static void mvs_94xx_detect_porttype(struct mvs_info *mvi, int i)
32 u32 reg;
33 struct mvs_phy *phy = &mvi->phy[i];
34 u32 phy_status;
36 mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE3);
37 reg = mvs_read_port_vsr_data(mvi, i);
38 phy_status = ((reg & 0x3f0000) >> 16) & 0xff;
39 phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
40 switch (phy_status) {
41 case 0x10:
42 phy->phy_type |= PORT_TYPE_SAS;
43 break;
44 case 0x1d:
45 default:
46 phy->phy_type |= PORT_TYPE_SATA;
47 break;
51 static void __devinit mvs_94xx_enable_xmt(struct mvs_info *mvi, int phy_id)
53 void __iomem *regs = mvi->regs;
54 u32 tmp;
56 tmp = mr32(MVS_PCS);
57 tmp |= 1 << (phy_id + PCS_EN_PORT_XMT_SHIFT2);
58 mw32(MVS_PCS, tmp);
61 static void mvs_94xx_phy_reset(struct mvs_info *mvi, u32 phy_id, int hard)
63 u32 tmp;
65 tmp = mvs_read_port_irq_stat(mvi, phy_id);
66 tmp &= ~PHYEV_RDY_CH;
67 mvs_write_port_irq_stat(mvi, phy_id, tmp);
68 if (hard) {
69 tmp = mvs_read_phy_ctl(mvi, phy_id);
70 tmp |= PHY_RST_HARD;
71 mvs_write_phy_ctl(mvi, phy_id, tmp);
72 do {
73 tmp = mvs_read_phy_ctl(mvi, phy_id);
74 } while (tmp & PHY_RST_HARD);
75 } else {
76 mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_STAT);
77 tmp = mvs_read_port_vsr_data(mvi, phy_id);
78 tmp |= PHY_RST;
79 mvs_write_port_vsr_data(mvi, phy_id, tmp);
83 static void mvs_94xx_phy_disable(struct mvs_info *mvi, u32 phy_id)
85 u32 tmp;
86 mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_MODE2);
87 tmp = mvs_read_port_vsr_data(mvi, phy_id);
88 mvs_write_port_vsr_data(mvi, phy_id, tmp | 0x00800000);
91 static void mvs_94xx_phy_enable(struct mvs_info *mvi, u32 phy_id)
93 mvs_write_port_vsr_addr(mvi, phy_id, 0x1B4);
94 mvs_write_port_vsr_data(mvi, phy_id, 0x8300ffc1);
95 mvs_write_port_vsr_addr(mvi, phy_id, 0x104);
96 mvs_write_port_vsr_data(mvi, phy_id, 0x00018080);
97 mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_MODE2);
98 mvs_write_port_vsr_data(mvi, phy_id, 0x00207fff);
101 static int __devinit mvs_94xx_init(struct mvs_info *mvi)
103 void __iomem *regs = mvi->regs;
104 int i;
105 u32 tmp, cctl;
107 mvs_show_pcie_usage(mvi);
108 if (mvi->flags & MVF_FLAG_SOC) {
109 tmp = mr32(MVS_PHY_CTL);
110 tmp &= ~PCTL_PWR_OFF;
111 tmp |= PCTL_PHY_DSBL;
112 mw32(MVS_PHY_CTL, tmp);
115 /* Init Chip */
116 /* make sure RST is set; HBA_RST /should/ have done that for us */
117 cctl = mr32(MVS_CTL) & 0xFFFF;
118 if (cctl & CCTL_RST)
119 cctl &= ~CCTL_RST;
120 else
121 mw32_f(MVS_CTL, cctl | CCTL_RST);
123 if (mvi->flags & MVF_FLAG_SOC) {
124 tmp = mr32(MVS_PHY_CTL);
125 tmp &= ~PCTL_PWR_OFF;
126 tmp |= PCTL_COM_ON;
127 tmp &= ~PCTL_PHY_DSBL;
128 tmp |= PCTL_LINK_RST;
129 mw32(MVS_PHY_CTL, tmp);
130 msleep(100);
131 tmp &= ~PCTL_LINK_RST;
132 mw32(MVS_PHY_CTL, tmp);
133 msleep(100);
136 /* reset control */
137 mw32(MVS_PCS, 0); /* MVS_PCS */
138 mw32(MVS_STP_REG_SET_0, 0);
139 mw32(MVS_STP_REG_SET_1, 0);
141 /* init phys */
142 mvs_phy_hacks(mvi);
144 /* disable Multiplexing, enable phy implemented */
145 mw32(MVS_PORTS_IMP, 0xFF);
148 mw32(MVS_PA_VSR_ADDR, 0x00000104);
149 mw32(MVS_PA_VSR_PORT, 0x00018080);
150 mw32(MVS_PA_VSR_ADDR, VSR_PHY_MODE8);
151 mw32(MVS_PA_VSR_PORT, 0x0084ffff);
153 /* set LED blink when IO*/
154 mw32(MVS_PA_VSR_ADDR, 0x00000030);
155 tmp = mr32(MVS_PA_VSR_PORT);
156 tmp &= 0xFFFF00FF;
157 tmp |= 0x00003300;
158 mw32(MVS_PA_VSR_PORT, tmp);
160 mw32(MVS_CMD_LIST_LO, mvi->slot_dma);
161 mw32(MVS_CMD_LIST_HI, (mvi->slot_dma >> 16) >> 16);
163 mw32(MVS_RX_FIS_LO, mvi->rx_fis_dma);
164 mw32(MVS_RX_FIS_HI, (mvi->rx_fis_dma >> 16) >> 16);
166 mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ);
167 mw32(MVS_TX_LO, mvi->tx_dma);
168 mw32(MVS_TX_HI, (mvi->tx_dma >> 16) >> 16);
170 mw32(MVS_RX_CFG, MVS_RX_RING_SZ);
171 mw32(MVS_RX_LO, mvi->rx_dma);
172 mw32(MVS_RX_HI, (mvi->rx_dma >> 16) >> 16);
174 for (i = 0; i < mvi->chip->n_phy; i++) {
175 mvs_94xx_phy_disable(mvi, i);
176 /* set phy local SAS address */
177 mvs_set_sas_addr(mvi, i, CONFIG_ID_FRAME3, CONFIG_ID_FRAME4,
178 (mvi->phy[i].dev_sas_addr));
180 mvs_94xx_enable_xmt(mvi, i);
181 mvs_94xx_phy_enable(mvi, i);
183 mvs_94xx_phy_reset(mvi, i, 1);
184 msleep(500);
185 mvs_94xx_detect_porttype(mvi, i);
188 if (mvi->flags & MVF_FLAG_SOC) {
189 /* set select registers */
190 writel(0x0E008000, regs + 0x000);
191 writel(0x59000008, regs + 0x004);
192 writel(0x20, regs + 0x008);
193 writel(0x20, regs + 0x00c);
194 writel(0x20, regs + 0x010);
195 writel(0x20, regs + 0x014);
196 writel(0x20, regs + 0x018);
197 writel(0x20, regs + 0x01c);
199 for (i = 0; i < mvi->chip->n_phy; i++) {
200 /* clear phy int status */
201 tmp = mvs_read_port_irq_stat(mvi, i);
202 tmp &= ~PHYEV_SIG_FIS;
203 mvs_write_port_irq_stat(mvi, i, tmp);
205 /* set phy int mask */
206 tmp = PHYEV_RDY_CH | PHYEV_BROAD_CH |
207 PHYEV_ID_DONE | PHYEV_DCDR_ERR | PHYEV_CRC_ERR ;
208 mvs_write_port_irq_mask(mvi, i, tmp);
210 msleep(100);
211 mvs_update_phyinfo(mvi, i, 1);
214 /* FIXME: update wide port bitmaps */
216 /* little endian for open address and command table, etc. */
218 * it seems that ( from the spec ) turning on big-endian won't
219 * do us any good on big-endian machines, need further confirmation
221 cctl = mr32(MVS_CTL);
222 cctl |= CCTL_ENDIAN_CMD;
223 cctl |= CCTL_ENDIAN_DATA;
224 cctl &= ~CCTL_ENDIAN_OPEN;
225 cctl |= CCTL_ENDIAN_RSP;
226 mw32_f(MVS_CTL, cctl);
228 /* reset CMD queue */
229 tmp = mr32(MVS_PCS);
230 tmp |= PCS_CMD_RST;
231 mw32(MVS_PCS, tmp);
232 /* interrupt coalescing may cause missing HW interrput in some case,
233 * and the max count is 0x1ff, while our max slot is 0x200,
234 * it will make count 0.
236 tmp = 0;
237 mw32(MVS_INT_COAL, tmp);
239 tmp = 0x100;
240 mw32(MVS_INT_COAL_TMOUT, tmp);
242 /* ladies and gentlemen, start your engines */
243 mw32(MVS_TX_CFG, 0);
244 mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ | TX_EN);
245 mw32(MVS_RX_CFG, MVS_RX_RING_SZ | RX_EN);
246 /* enable CMD/CMPL_Q/RESP mode */
247 mw32(MVS_PCS, PCS_SATA_RETRY_2 | PCS_FIS_RX_EN |
248 PCS_CMD_EN | PCS_CMD_STOP_ERR);
250 /* enable completion queue interrupt */
251 tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM | CINT_SRS | CINT_CI_STOP |
252 CINT_DMA_PCIE | CINT_NON_SPEC_NCQ_ERROR);
253 tmp |= CINT_PHY_MASK;
254 mw32(MVS_INT_MASK, tmp);
256 /* Enable SRS interrupt */
257 mw32(MVS_INT_MASK_SRS_0, 0xFFFF);
259 return 0;
262 static int mvs_94xx_ioremap(struct mvs_info *mvi)
264 if (!mvs_ioremap(mvi, 2, -1)) {
265 mvi->regs_ex = mvi->regs + 0x10200;
266 mvi->regs += 0x20000;
267 if (mvi->id == 1)
268 mvi->regs += 0x4000;
269 return 0;
271 return -1;
274 static void mvs_94xx_iounmap(struct mvs_info *mvi)
276 if (mvi->regs) {
277 mvi->regs -= 0x20000;
278 if (mvi->id == 1)
279 mvi->regs -= 0x4000;
280 mvs_iounmap(mvi->regs);
284 static void mvs_94xx_interrupt_enable(struct mvs_info *mvi)
286 void __iomem *regs = mvi->regs_ex;
287 u32 tmp;
289 tmp = mr32(MVS_GBL_CTL);
290 tmp |= (IRQ_SAS_A | IRQ_SAS_B);
291 mw32(MVS_GBL_INT_STAT, tmp);
292 writel(tmp, regs + 0x0C);
293 writel(tmp, regs + 0x10);
294 writel(tmp, regs + 0x14);
295 writel(tmp, regs + 0x18);
296 mw32(MVS_GBL_CTL, tmp);
299 static void mvs_94xx_interrupt_disable(struct mvs_info *mvi)
301 void __iomem *regs = mvi->regs_ex;
302 u32 tmp;
304 tmp = mr32(MVS_GBL_CTL);
306 tmp &= ~(IRQ_SAS_A | IRQ_SAS_B);
307 mw32(MVS_GBL_INT_STAT, tmp);
308 writel(tmp, regs + 0x0C);
309 writel(tmp, regs + 0x10);
310 writel(tmp, regs + 0x14);
311 writel(tmp, regs + 0x18);
312 mw32(MVS_GBL_CTL, tmp);
315 static u32 mvs_94xx_isr_status(struct mvs_info *mvi, int irq)
317 void __iomem *regs = mvi->regs_ex;
318 u32 stat = 0;
319 if (!(mvi->flags & MVF_FLAG_SOC)) {
320 stat = mr32(MVS_GBL_INT_STAT);
322 if (!(stat & (IRQ_SAS_A | IRQ_SAS_B)))
323 return 0;
325 return stat;
328 static irqreturn_t mvs_94xx_isr(struct mvs_info *mvi, int irq, u32 stat)
330 void __iomem *regs = mvi->regs;
332 if (((stat & IRQ_SAS_A) && mvi->id == 0) ||
333 ((stat & IRQ_SAS_B) && mvi->id == 1)) {
334 mw32_f(MVS_INT_STAT, CINT_DONE);
335 #ifndef MVS_USE_TASKLET
336 spin_lock(&mvi->lock);
337 #endif
338 mvs_int_full(mvi);
339 #ifndef MVS_USE_TASKLET
340 spin_unlock(&mvi->lock);
341 #endif
343 return IRQ_HANDLED;
346 static void mvs_94xx_command_active(struct mvs_info *mvi, u32 slot_idx)
348 u32 tmp;
349 mvs_cw32(mvi, 0x300 + (slot_idx >> 3), 1 << (slot_idx % 32));
350 do {
351 tmp = mvs_cr32(mvi, 0x300 + (slot_idx >> 3));
352 } while (tmp & 1 << (slot_idx % 32));
355 static void mvs_94xx_issue_stop(struct mvs_info *mvi, enum mvs_port_type type,
356 u32 tfs)
358 void __iomem *regs = mvi->regs;
359 u32 tmp;
361 if (type == PORT_TYPE_SATA) {
362 tmp = mr32(MVS_INT_STAT_SRS_0) | (1U << tfs);
363 mw32(MVS_INT_STAT_SRS_0, tmp);
365 mw32(MVS_INT_STAT, CINT_CI_STOP);
366 tmp = mr32(MVS_PCS) | 0xFF00;
367 mw32(MVS_PCS, tmp);
370 static void mvs_94xx_non_spec_ncq_error(struct mvs_info *mvi)
372 void __iomem *regs = mvi->regs;
373 u32 err_0, err_1;
374 u8 i;
375 struct mvs_device *device;
377 err_0 = mr32(MVS_NON_NCQ_ERR_0);
378 err_1 = mr32(MVS_NON_NCQ_ERR_1);
380 mv_dprintk("non specific ncq error err_0:%x,err_1:%x.\n",
381 err_0, err_1);
382 for (i = 0; i < 32; i++) {
383 if (err_0 & bit(i)) {
384 device = mvs_find_dev_by_reg_set(mvi, i);
385 if (device)
386 mvs_release_task(mvi, device->sas_device);
388 if (err_1 & bit(i)) {
389 device = mvs_find_dev_by_reg_set(mvi, i+32);
390 if (device)
391 mvs_release_task(mvi, device->sas_device);
395 mw32(MVS_NON_NCQ_ERR_0, err_0);
396 mw32(MVS_NON_NCQ_ERR_1, err_1);
399 static void mvs_94xx_free_reg_set(struct mvs_info *mvi, u8 *tfs)
401 void __iomem *regs = mvi->regs;
402 u32 tmp;
403 u8 reg_set = *tfs;
405 if (*tfs == MVS_ID_NOT_MAPPED)
406 return;
408 mvi->sata_reg_set &= ~bit(reg_set);
409 if (reg_set < 32) {
410 w_reg_set_enable(reg_set, (u32)mvi->sata_reg_set);
411 tmp = mr32(MVS_INT_STAT_SRS_0) & (u32)mvi->sata_reg_set;
412 if (tmp)
413 mw32(MVS_INT_STAT_SRS_0, tmp);
414 } else {
415 w_reg_set_enable(reg_set, mvi->sata_reg_set);
416 tmp = mr32(MVS_INT_STAT_SRS_1) & mvi->sata_reg_set;
417 if (tmp)
418 mw32(MVS_INT_STAT_SRS_1, tmp);
421 *tfs = MVS_ID_NOT_MAPPED;
423 return;
426 static u8 mvs_94xx_assign_reg_set(struct mvs_info *mvi, u8 *tfs)
428 int i;
429 void __iomem *regs = mvi->regs;
431 if (*tfs != MVS_ID_NOT_MAPPED)
432 return 0;
434 i = mv_ffc64(mvi->sata_reg_set);
435 if (i > 32) {
436 mvi->sata_reg_set |= bit(i);
437 w_reg_set_enable(i, (u32)(mvi->sata_reg_set >> 32));
438 *tfs = i;
439 return 0;
440 } else if (i >= 0) {
441 mvi->sata_reg_set |= bit(i);
442 w_reg_set_enable(i, (u32)mvi->sata_reg_set);
443 *tfs = i;
444 return 0;
446 return MVS_ID_NOT_MAPPED;
449 static void mvs_94xx_make_prd(struct scatterlist *scatter, int nr, void *prd)
451 int i;
452 struct scatterlist *sg;
453 struct mvs_prd *buf_prd = prd;
454 for_each_sg(scatter, sg, nr, i) {
455 buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
456 buf_prd->im_len.len = cpu_to_le32(sg_dma_len(sg));
457 buf_prd++;
461 static int mvs_94xx_oob_done(struct mvs_info *mvi, int i)
463 u32 phy_st;
464 phy_st = mvs_read_phy_ctl(mvi, i);
465 if (phy_st & PHY_READY_MASK) /* phy ready */
466 return 1;
467 return 0;
470 static void mvs_94xx_get_dev_identify_frame(struct mvs_info *mvi, int port_id,
471 struct sas_identify_frame *id)
473 int i;
474 u32 id_frame[7];
476 for (i = 0; i < 7; i++) {
477 mvs_write_port_cfg_addr(mvi, port_id,
478 CONFIG_ID_FRAME0 + i * 4);
479 id_frame[i] = mvs_read_port_cfg_data(mvi, port_id);
481 memcpy(id, id_frame, 28);
484 static void mvs_94xx_get_att_identify_frame(struct mvs_info *mvi, int port_id,
485 struct sas_identify_frame *id)
487 int i;
488 u32 id_frame[7];
490 /* mvs_hexdump(28, (u8 *)id_frame, 0); */
491 for (i = 0; i < 7; i++) {
492 mvs_write_port_cfg_addr(mvi, port_id,
493 CONFIG_ATT_ID_FRAME0 + i * 4);
494 id_frame[i] = mvs_read_port_cfg_data(mvi, port_id);
495 mv_dprintk("94xx phy %d atta frame %d %x.\n",
496 port_id + mvi->id * mvi->chip->n_phy, i, id_frame[i]);
498 /* mvs_hexdump(28, (u8 *)id_frame, 0); */
499 memcpy(id, id_frame, 28);
502 static u32 mvs_94xx_make_dev_info(struct sas_identify_frame *id)
504 u32 att_dev_info = 0;
506 att_dev_info |= id->dev_type;
507 if (id->stp_iport)
508 att_dev_info |= PORT_DEV_STP_INIT;
509 if (id->smp_iport)
510 att_dev_info |= PORT_DEV_SMP_INIT;
511 if (id->ssp_iport)
512 att_dev_info |= PORT_DEV_SSP_INIT;
513 if (id->stp_tport)
514 att_dev_info |= PORT_DEV_STP_TRGT;
515 if (id->smp_tport)
516 att_dev_info |= PORT_DEV_SMP_TRGT;
517 if (id->ssp_tport)
518 att_dev_info |= PORT_DEV_SSP_TRGT;
520 att_dev_info |= (u32)id->phy_id<<24;
521 return att_dev_info;
524 static u32 mvs_94xx_make_att_info(struct sas_identify_frame *id)
526 return mvs_94xx_make_dev_info(id);
529 static void mvs_94xx_fix_phy_info(struct mvs_info *mvi, int i,
530 struct sas_identify_frame *id)
532 struct mvs_phy *phy = &mvi->phy[i];
533 struct asd_sas_phy *sas_phy = &phy->sas_phy;
534 mv_dprintk("get all reg link rate is 0x%x\n", phy->phy_status);
535 sas_phy->linkrate =
536 (phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
537 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET;
538 sas_phy->linkrate += 0x8;
539 mv_dprintk("get link rate is %d\n", sas_phy->linkrate);
540 phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS;
541 phy->maximum_linkrate = SAS_LINK_RATE_6_0_GBPS;
542 mvs_94xx_get_dev_identify_frame(mvi, i, id);
543 phy->dev_info = mvs_94xx_make_dev_info(id);
545 if (phy->phy_type & PORT_TYPE_SAS) {
546 mvs_94xx_get_att_identify_frame(mvi, i, id);
547 phy->att_dev_info = mvs_94xx_make_att_info(id);
548 phy->att_dev_sas_addr = *(u64 *)id->sas_addr;
549 } else {
550 phy->att_dev_info = PORT_DEV_STP_TRGT | 1;
555 void mvs_94xx_phy_set_link_rate(struct mvs_info *mvi, u32 phy_id,
556 struct sas_phy_linkrates *rates)
558 /* TODO */
561 static void mvs_94xx_clear_active_cmds(struct mvs_info *mvi)
563 u32 tmp;
564 void __iomem *regs = mvi->regs;
565 tmp = mr32(MVS_STP_REG_SET_0);
566 mw32(MVS_STP_REG_SET_0, 0);
567 mw32(MVS_STP_REG_SET_0, tmp);
568 tmp = mr32(MVS_STP_REG_SET_1);
569 mw32(MVS_STP_REG_SET_1, 0);
570 mw32(MVS_STP_REG_SET_1, tmp);
574 u32 mvs_94xx_spi_read_data(struct mvs_info *mvi)
576 void __iomem *regs = mvi->regs_ex - 0x10200;
577 return mr32(SPI_RD_DATA_REG_94XX);
580 void mvs_94xx_spi_write_data(struct mvs_info *mvi, u32 data)
582 void __iomem *regs = mvi->regs_ex - 0x10200;
583 mw32(SPI_RD_DATA_REG_94XX, data);
587 int mvs_94xx_spi_buildcmd(struct mvs_info *mvi,
588 u32 *dwCmd,
589 u8 cmd,
590 u8 read,
591 u8 length,
592 u32 addr
595 void __iomem *regs = mvi->regs_ex - 0x10200;
596 u32 dwTmp;
598 dwTmp = ((u32)cmd << 8) | ((u32)length << 4);
599 if (read)
600 dwTmp |= SPI_CTRL_READ_94XX;
602 if (addr != MV_MAX_U32) {
603 mw32(SPI_ADDR_REG_94XX, (addr & 0x0003FFFFL));
604 dwTmp |= SPI_ADDR_VLD_94XX;
607 *dwCmd = dwTmp;
608 return 0;
612 int mvs_94xx_spi_issuecmd(struct mvs_info *mvi, u32 cmd)
614 void __iomem *regs = mvi->regs_ex - 0x10200;
615 mw32(SPI_CTRL_REG_94XX, cmd | SPI_CTRL_SpiStart_94XX);
617 return 0;
620 int mvs_94xx_spi_waitdataready(struct mvs_info *mvi, u32 timeout)
622 void __iomem *regs = mvi->regs_ex - 0x10200;
623 u32 i, dwTmp;
625 for (i = 0; i < timeout; i++) {
626 dwTmp = mr32(SPI_CTRL_REG_94XX);
627 if (!(dwTmp & SPI_CTRL_SpiStart_94XX))
628 return 0;
629 msleep(10);
632 return -1;
635 #ifndef DISABLE_HOTPLUG_DMA_FIX
636 void mvs_94xx_fix_dma(dma_addr_t buf_dma, int buf_len, int from, void *prd)
638 int i;
639 struct mvs_prd *buf_prd = prd;
640 buf_prd += from;
641 for (i = 0; i < MAX_SG_ENTRY - from; i++) {
642 buf_prd->addr = cpu_to_le64(buf_dma);
643 buf_prd->im_len.len = cpu_to_le32(buf_len);
644 ++buf_prd;
647 #endif
650 * FIXME JEJB: temporary nop clear_srs_irq to make 94xx still work
651 * with 64xx fixes
653 static void mvs_94xx_clear_srs_irq(struct mvs_info *mvi, u8 reg_set,
654 u8 clear_all)
658 const struct mvs_dispatch mvs_94xx_dispatch = {
659 "mv94xx",
660 mvs_94xx_init,
661 NULL,
662 mvs_94xx_ioremap,
663 mvs_94xx_iounmap,
664 mvs_94xx_isr,
665 mvs_94xx_isr_status,
666 mvs_94xx_interrupt_enable,
667 mvs_94xx_interrupt_disable,
668 mvs_read_phy_ctl,
669 mvs_write_phy_ctl,
670 mvs_read_port_cfg_data,
671 mvs_write_port_cfg_data,
672 mvs_write_port_cfg_addr,
673 mvs_read_port_vsr_data,
674 mvs_write_port_vsr_data,
675 mvs_write_port_vsr_addr,
676 mvs_read_port_irq_stat,
677 mvs_write_port_irq_stat,
678 mvs_read_port_irq_mask,
679 mvs_write_port_irq_mask,
680 mvs_get_sas_addr,
681 mvs_94xx_command_active,
682 mvs_94xx_clear_srs_irq,
683 mvs_94xx_issue_stop,
684 mvs_start_delivery,
685 mvs_rx_update,
686 mvs_int_full,
687 mvs_94xx_assign_reg_set,
688 mvs_94xx_free_reg_set,
689 mvs_get_prd_size,
690 mvs_get_prd_count,
691 mvs_94xx_make_prd,
692 mvs_94xx_detect_porttype,
693 mvs_94xx_oob_done,
694 mvs_94xx_fix_phy_info,
695 NULL,
696 mvs_94xx_phy_set_link_rate,
697 mvs_hw_max_link_rate,
698 mvs_94xx_phy_disable,
699 mvs_94xx_phy_enable,
700 mvs_94xx_phy_reset,
701 NULL,
702 mvs_94xx_clear_active_cmds,
703 mvs_94xx_spi_read_data,
704 mvs_94xx_spi_write_data,
705 mvs_94xx_spi_buildcmd,
706 mvs_94xx_spi_issuecmd,
707 mvs_94xx_spi_waitdataready,
708 #ifndef DISABLE_HOTPLUG_DMA_FIX
709 mvs_94xx_fix_dma,
710 #endif
711 mvs_94xx_non_spec_ncq_error,