GUI: Fix Tomato RAF theme for all builds. Compilation typo.
[tomato.git] / release / src-rt-6.x.4708 / linux / linux-2.6.36 / drivers / scsi / mvsas / mv_sas.c
blob58911a3d2686ca2de34e0aa7bdaf4146af2210b8
1 /*
2 * Marvell 88SE64xx/88SE94xx main function
4 * Copyright 2007 Red Hat, Inc.
5 * Copyright 2008 Marvell. <kewei@marvell.com>
7 * This file is licensed under GPLv2.
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; version 2 of the
12 * License.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 * USA
25 #include "mv_sas.h"
27 static int mvs_find_tag(struct mvs_info *mvi, struct sas_task *task, u32 *tag)
29 if (task->lldd_task) {
30 struct mvs_slot_info *slot;
31 slot = task->lldd_task;
32 *tag = slot->slot_tag;
33 return 1;
35 return 0;
38 void mvs_tag_clear(struct mvs_info *mvi, u32 tag)
40 void *bitmap = &mvi->tags;
41 clear_bit(tag, bitmap);
44 void mvs_tag_free(struct mvs_info *mvi, u32 tag)
46 mvs_tag_clear(mvi, tag);
49 void mvs_tag_set(struct mvs_info *mvi, unsigned int tag)
51 void *bitmap = &mvi->tags;
52 set_bit(tag, bitmap);
55 inline int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out)
57 unsigned int index, tag;
58 void *bitmap = &mvi->tags;
60 index = find_first_zero_bit(bitmap, mvi->tags_num);
61 tag = index;
62 if (tag >= mvi->tags_num)
63 return -SAS_QUEUE_FULL;
64 mvs_tag_set(mvi, tag);
65 *tag_out = tag;
66 return 0;
69 void mvs_tag_init(struct mvs_info *mvi)
71 int i;
72 for (i = 0; i < mvi->tags_num; ++i)
73 mvs_tag_clear(mvi, i);
76 void mvs_hexdump(u32 size, u8 *data, u32 baseaddr)
78 u32 i;
79 u32 run;
80 u32 offset;
82 offset = 0;
83 while (size) {
84 printk(KERN_DEBUG"%08X : ", baseaddr + offset);
85 if (size >= 16)
86 run = 16;
87 else
88 run = size;
89 size -= run;
90 for (i = 0; i < 16; i++) {
91 if (i < run)
92 printk(KERN_DEBUG"%02X ", (u32)data[i]);
93 else
94 printk(KERN_DEBUG" ");
96 printk(KERN_DEBUG": ");
97 for (i = 0; i < run; i++)
98 printk(KERN_DEBUG"%c",
99 isalnum(data[i]) ? data[i] : '.');
100 printk(KERN_DEBUG"\n");
101 data = &data[16];
102 offset += run;
104 printk(KERN_DEBUG"\n");
107 #if (_MV_DUMP > 1)
108 static void mvs_hba_sb_dump(struct mvs_info *mvi, u32 tag,
109 enum sas_protocol proto)
111 u32 offset;
112 struct mvs_slot_info *slot = &mvi->slot_info[tag];
114 offset = slot->cmd_size + MVS_OAF_SZ +
115 MVS_CHIP_DISP->prd_size() * slot->n_elem;
116 dev_printk(KERN_DEBUG, mvi->dev, "+---->Status buffer[%d] :\n",
117 tag);
118 mvs_hexdump(32, (u8 *) slot->response,
119 (u32) slot->buf_dma + offset);
121 #endif
123 static void mvs_hba_memory_dump(struct mvs_info *mvi, u32 tag,
124 enum sas_protocol proto)
126 #if (_MV_DUMP > 1)
127 u32 sz, w_ptr;
128 u64 addr;
129 struct mvs_slot_info *slot = &mvi->slot_info[tag];
131 /*Delivery Queue */
132 sz = MVS_CHIP_SLOT_SZ;
133 w_ptr = slot->tx;
134 addr = mvi->tx_dma;
135 dev_printk(KERN_DEBUG, mvi->dev,
136 "Delivery Queue Size=%04d , WRT_PTR=%04X\n", sz, w_ptr);
137 dev_printk(KERN_DEBUG, mvi->dev,
138 "Delivery Queue Base Address=0x%llX (PA)"
139 "(tx_dma=0x%llX), Entry=%04d\n",
140 addr, (unsigned long long)mvi->tx_dma, w_ptr);
141 mvs_hexdump(sizeof(u32), (u8 *)(&mvi->tx[mvi->tx_prod]),
142 (u32) mvi->tx_dma + sizeof(u32) * w_ptr);
143 /*Command List */
144 addr = mvi->slot_dma;
145 dev_printk(KERN_DEBUG, mvi->dev,
146 "Command List Base Address=0x%llX (PA)"
147 "(slot_dma=0x%llX), Header=%03d\n",
148 addr, (unsigned long long)slot->buf_dma, tag);
149 dev_printk(KERN_DEBUG, mvi->dev, "Command Header[%03d]:\n", tag);
150 /*mvs_cmd_hdr */
151 mvs_hexdump(sizeof(struct mvs_cmd_hdr), (u8 *)(&mvi->slot[tag]),
152 (u32) mvi->slot_dma + tag * sizeof(struct mvs_cmd_hdr));
153 /*1.command table area */
154 dev_printk(KERN_DEBUG, mvi->dev, "+---->Command Table :\n");
155 mvs_hexdump(slot->cmd_size, (u8 *) slot->buf, (u32) slot->buf_dma);
156 /*2.open address frame area */
157 dev_printk(KERN_DEBUG, mvi->dev, "+---->Open Address Frame :\n");
158 mvs_hexdump(MVS_OAF_SZ, (u8 *) slot->buf + slot->cmd_size,
159 (u32) slot->buf_dma + slot->cmd_size);
160 /*3.status buffer */
161 mvs_hba_sb_dump(mvi, tag, proto);
162 /*4.PRD table */
163 dev_printk(KERN_DEBUG, mvi->dev, "+---->PRD table :\n");
164 mvs_hexdump(MVS_CHIP_DISP->prd_size() * slot->n_elem,
165 (u8 *) slot->buf + slot->cmd_size + MVS_OAF_SZ,
166 (u32) slot->buf_dma + slot->cmd_size + MVS_OAF_SZ);
167 #endif
170 static void mvs_hba_cq_dump(struct mvs_info *mvi)
172 #if (_MV_DUMP > 2)
173 u64 addr;
174 void __iomem *regs = mvi->regs;
175 u32 entry = mvi->rx_cons + 1;
176 u32 rx_desc = le32_to_cpu(mvi->rx[entry]);
178 /*Completion Queue */
179 addr = mr32(RX_HI) << 16 << 16 | mr32(RX_LO);
180 dev_printk(KERN_DEBUG, mvi->dev, "Completion Task = 0x%p\n",
181 mvi->slot_info[rx_desc & RXQ_SLOT_MASK].task);
182 dev_printk(KERN_DEBUG, mvi->dev,
183 "Completion List Base Address=0x%llX (PA), "
184 "CQ_Entry=%04d, CQ_WP=0x%08X\n",
185 addr, entry - 1, mvi->rx[0]);
186 mvs_hexdump(sizeof(u32), (u8 *)(&rx_desc),
187 mvi->rx_dma + sizeof(u32) * entry);
188 #endif
191 void mvs_get_sas_addr(void *buf, u32 buflen)
193 /*memcpy(buf, "\x50\x05\x04\x30\x11\xab\x64\x40", 8);*/
196 struct mvs_info *mvs_find_dev_mvi(struct domain_device *dev)
198 unsigned long i = 0, j = 0, hi = 0;
199 struct sas_ha_struct *sha = dev->port->ha;
200 struct mvs_info *mvi = NULL;
201 struct asd_sas_phy *phy;
203 while (sha->sas_port[i]) {
204 if (sha->sas_port[i] == dev->port) {
205 phy = container_of(sha->sas_port[i]->phy_list.next,
206 struct asd_sas_phy, port_phy_el);
207 j = 0;
208 while (sha->sas_phy[j]) {
209 if (sha->sas_phy[j] == phy)
210 break;
211 j++;
213 break;
215 i++;
217 hi = j/((struct mvs_prv_info *)sha->lldd_ha)->n_phy;
218 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[hi];
220 return mvi;
224 int mvs_find_dev_phyno(struct domain_device *dev, int *phyno)
226 unsigned long i = 0, j = 0, n = 0, num = 0;
227 struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev;
228 struct mvs_info *mvi = mvi_dev->mvi_info;
229 struct sas_ha_struct *sha = dev->port->ha;
231 while (sha->sas_port[i]) {
232 if (sha->sas_port[i] == dev->port) {
233 struct asd_sas_phy *phy;
234 list_for_each_entry(phy,
235 &sha->sas_port[i]->phy_list, port_phy_el) {
236 j = 0;
237 while (sha->sas_phy[j]) {
238 if (sha->sas_phy[j] == phy)
239 break;
240 j++;
242 phyno[n] = (j >= mvi->chip->n_phy) ?
243 (j - mvi->chip->n_phy) : j;
244 num++;
245 n++;
247 break;
249 i++;
251 return num;
254 static inline void mvs_free_reg_set(struct mvs_info *mvi,
255 struct mvs_device *dev)
257 if (!dev) {
258 mv_printk("device has been free.\n");
259 return;
261 if (dev->taskfileset == MVS_ID_NOT_MAPPED)
262 return;
263 MVS_CHIP_DISP->free_reg_set(mvi, &dev->taskfileset);
266 static inline u8 mvs_assign_reg_set(struct mvs_info *mvi,
267 struct mvs_device *dev)
269 if (dev->taskfileset != MVS_ID_NOT_MAPPED)
270 return 0;
271 return MVS_CHIP_DISP->assign_reg_set(mvi, &dev->taskfileset);
274 void mvs_phys_reset(struct mvs_info *mvi, u32 phy_mask, int hard)
276 u32 no;
277 for_each_phy(phy_mask, phy_mask, no) {
278 if (!(phy_mask & 1))
279 continue;
280 MVS_CHIP_DISP->phy_reset(mvi, no, hard);
284 int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
285 void *funcdata)
287 int rc = 0, phy_id = sas_phy->id;
288 u32 tmp, i = 0, hi;
289 struct sas_ha_struct *sha = sas_phy->ha;
290 struct mvs_info *mvi = NULL;
292 while (sha->sas_phy[i]) {
293 if (sha->sas_phy[i] == sas_phy)
294 break;
295 i++;
297 hi = i/((struct mvs_prv_info *)sha->lldd_ha)->n_phy;
298 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[hi];
300 switch (func) {
301 case PHY_FUNC_SET_LINK_RATE:
302 MVS_CHIP_DISP->phy_set_link_rate(mvi, phy_id, funcdata);
303 break;
305 case PHY_FUNC_HARD_RESET:
306 tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, phy_id);
307 if (tmp & PHY_RST_HARD)
308 break;
309 MVS_CHIP_DISP->phy_reset(mvi, phy_id, 1);
310 break;
312 case PHY_FUNC_LINK_RESET:
313 MVS_CHIP_DISP->phy_enable(mvi, phy_id);
314 MVS_CHIP_DISP->phy_reset(mvi, phy_id, 0);
315 break;
317 case PHY_FUNC_DISABLE:
318 MVS_CHIP_DISP->phy_disable(mvi, phy_id);
319 break;
320 case PHY_FUNC_RELEASE_SPINUP_HOLD:
321 default:
322 rc = -EOPNOTSUPP;
324 msleep(200);
325 return rc;
328 void __devinit mvs_set_sas_addr(struct mvs_info *mvi, int port_id,
329 u32 off_lo, u32 off_hi, u64 sas_addr)
331 u32 lo = (u32)sas_addr;
332 u32 hi = (u32)(sas_addr>>32);
334 MVS_CHIP_DISP->write_port_cfg_addr(mvi, port_id, off_lo);
335 MVS_CHIP_DISP->write_port_cfg_data(mvi, port_id, lo);
336 MVS_CHIP_DISP->write_port_cfg_addr(mvi, port_id, off_hi);
337 MVS_CHIP_DISP->write_port_cfg_data(mvi, port_id, hi);
340 static void mvs_bytes_dmaed(struct mvs_info *mvi, int i)
342 struct mvs_phy *phy = &mvi->phy[i];
343 struct asd_sas_phy *sas_phy = &phy->sas_phy;
344 struct sas_ha_struct *sas_ha;
345 if (!phy->phy_attached)
346 return;
348 if (!(phy->att_dev_info & PORT_DEV_TRGT_MASK)
349 && phy->phy_type & PORT_TYPE_SAS) {
350 return;
353 sas_ha = mvi->sas;
354 sas_ha->notify_phy_event(sas_phy, PHYE_OOB_DONE);
356 if (sas_phy->phy) {
357 struct sas_phy *sphy = sas_phy->phy;
359 sphy->negotiated_linkrate = sas_phy->linkrate;
360 sphy->minimum_linkrate = phy->minimum_linkrate;
361 sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
362 sphy->maximum_linkrate = phy->maximum_linkrate;
363 sphy->maximum_linkrate_hw = MVS_CHIP_DISP->phy_max_link_rate();
366 if (phy->phy_type & PORT_TYPE_SAS) {
367 struct sas_identify_frame *id;
369 id = (struct sas_identify_frame *)phy->frame_rcvd;
370 id->dev_type = phy->identify.device_type;
371 id->initiator_bits = SAS_PROTOCOL_ALL;
372 id->target_bits = phy->identify.target_port_protocols;
373 } else if (phy->phy_type & PORT_TYPE_SATA) {
374 /*Nothing*/
376 mv_dprintk("phy %d byte dmaded.\n", i + mvi->id * mvi->chip->n_phy);
378 sas_phy->frame_rcvd_size = phy->frame_rcvd_size;
380 mvi->sas->notify_port_event(sas_phy,
381 PORTE_BYTES_DMAED);
384 int mvs_slave_alloc(struct scsi_device *scsi_dev)
386 struct domain_device *dev = sdev_to_domain_dev(scsi_dev);
387 if (dev_is_sata(dev)) {
388 /* We don't need to rescan targets
389 * if REPORT_LUNS request is failed
391 if (scsi_dev->lun > 0)
392 return -ENXIO;
393 scsi_dev->tagged_supported = 1;
396 return sas_slave_alloc(scsi_dev);
399 int mvs_slave_configure(struct scsi_device *sdev)
401 struct domain_device *dev = sdev_to_domain_dev(sdev);
402 int ret = sas_slave_configure(sdev);
404 if (ret)
405 return ret;
406 if (dev_is_sata(dev)) {
407 /* may set PIO mode */
408 #if MV_DISABLE_NCQ
409 struct ata_port *ap = dev->sata_dev.ap;
410 struct ata_device *adev = ap->link.device;
411 adev->flags |= ATA_DFLAG_NCQ_OFF;
412 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, 1);
413 #endif
415 return 0;
418 void mvs_scan_start(struct Scsi_Host *shost)
420 int i, j;
421 unsigned short core_nr;
422 struct mvs_info *mvi;
423 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
425 core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
427 for (j = 0; j < core_nr; j++) {
428 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[j];
429 for (i = 0; i < mvi->chip->n_phy; ++i)
430 mvs_bytes_dmaed(mvi, i);
434 int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time)
436 /* give the phy enabling interrupt event time to come in (1s
437 * is empirically about all it takes) */
438 if (time < HZ)
439 return 0;
440 /* Wait for discovery to finish */
441 scsi_flush_work(shost);
442 return 1;
445 static int mvs_task_prep_smp(struct mvs_info *mvi,
446 struct mvs_task_exec_info *tei)
448 int elem, rc, i;
449 struct sas_task *task = tei->task;
450 struct mvs_cmd_hdr *hdr = tei->hdr;
451 struct domain_device *dev = task->dev;
452 struct asd_sas_port *sas_port = dev->port;
453 struct scatterlist *sg_req, *sg_resp;
454 u32 req_len, resp_len, tag = tei->tag;
455 void *buf_tmp;
456 u8 *buf_oaf;
457 dma_addr_t buf_tmp_dma;
458 void *buf_prd;
459 struct mvs_slot_info *slot = &mvi->slot_info[tag];
460 u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
461 #if _MV_DUMP
462 u8 *buf_cmd;
463 void *from;
464 #endif
466 * DMA-map SMP request, response buffers
468 sg_req = &task->smp_task.smp_req;
469 elem = dma_map_sg(mvi->dev, sg_req, 1, PCI_DMA_TODEVICE);
470 if (!elem)
471 return -ENOMEM;
472 req_len = sg_dma_len(sg_req);
474 sg_resp = &task->smp_task.smp_resp;
475 elem = dma_map_sg(mvi->dev, sg_resp, 1, PCI_DMA_FROMDEVICE);
476 if (!elem) {
477 rc = -ENOMEM;
478 goto err_out;
480 resp_len = SB_RFB_MAX;
482 /* must be in dwords */
483 if ((req_len & 0x3) || (resp_len & 0x3)) {
484 rc = -EINVAL;
485 goto err_out_2;
489 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
492 /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ***** */
493 buf_tmp = slot->buf;
494 buf_tmp_dma = slot->buf_dma;
496 #if _MV_DUMP
497 buf_cmd = buf_tmp;
498 hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
499 buf_tmp += req_len;
500 buf_tmp_dma += req_len;
501 slot->cmd_size = req_len;
502 #else
503 hdr->cmd_tbl = cpu_to_le64(sg_dma_address(sg_req));
504 #endif
506 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
507 buf_oaf = buf_tmp;
508 hdr->open_frame = cpu_to_le64(buf_tmp_dma);
510 buf_tmp += MVS_OAF_SZ;
511 buf_tmp_dma += MVS_OAF_SZ;
513 /* region 3: PRD table *********************************** */
514 buf_prd = buf_tmp;
515 if (tei->n_elem)
516 hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
517 else
518 hdr->prd_tbl = 0;
520 i = MVS_CHIP_DISP->prd_size() * tei->n_elem;
521 buf_tmp += i;
522 buf_tmp_dma += i;
524 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
525 slot->response = buf_tmp;
526 hdr->status_buf = cpu_to_le64(buf_tmp_dma);
527 if (mvi->flags & MVF_FLAG_SOC)
528 hdr->reserved[0] = 0;
531 * Fill in TX ring and command slot header
533 slot->tx = mvi->tx_prod;
534 mvi->tx[mvi->tx_prod] = cpu_to_le32((TXQ_CMD_SMP << TXQ_CMD_SHIFT) |
535 TXQ_MODE_I | tag |
536 (sas_port->phy_mask << TXQ_PHY_SHIFT));
538 hdr->flags |= flags;
539 hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | ((req_len - 4) / 4));
540 hdr->tags = cpu_to_le32(tag);
541 hdr->data_len = 0;
543 /* generate open address frame hdr (first 12 bytes) */
544 /* initiator, SMP, ftype 1h */
545 buf_oaf[0] = (1 << 7) | (PROTOCOL_SMP << 4) | 0x01;
546 buf_oaf[1] = dev->linkrate & 0xf;
547 *(u16 *)(buf_oaf + 2) = 0xFFFF; /* SAS SPEC */
548 memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE);
550 /* fill in PRD (scatter/gather) table, if any */
551 MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd);
553 #if _MV_DUMP
554 /* copy cmd table */
555 from = kmap_atomic(sg_page(sg_req), KM_IRQ0);
556 memcpy(buf_cmd, from + sg_req->offset, req_len);
557 kunmap_atomic(from, KM_IRQ0);
558 #endif
559 return 0;
561 err_out_2:
562 dma_unmap_sg(mvi->dev, &tei->task->smp_task.smp_resp, 1,
563 PCI_DMA_FROMDEVICE);
564 err_out:
565 dma_unmap_sg(mvi->dev, &tei->task->smp_task.smp_req, 1,
566 PCI_DMA_TODEVICE);
567 return rc;
570 static u32 mvs_get_ncq_tag(struct sas_task *task, u32 *tag)
572 struct ata_queued_cmd *qc = task->uldd_task;
574 if (qc) {
575 if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
576 qc->tf.command == ATA_CMD_FPDMA_READ) {
577 *tag = qc->tag;
578 return 1;
582 return 0;
585 static int mvs_task_prep_ata(struct mvs_info *mvi,
586 struct mvs_task_exec_info *tei)
588 struct sas_task *task = tei->task;
589 struct domain_device *dev = task->dev;
590 struct mvs_device *mvi_dev = dev->lldd_dev;
591 struct mvs_cmd_hdr *hdr = tei->hdr;
592 struct asd_sas_port *sas_port = dev->port;
593 struct mvs_slot_info *slot;
594 void *buf_prd;
595 u32 tag = tei->tag, hdr_tag;
596 u32 flags, del_q;
597 void *buf_tmp;
598 u8 *buf_cmd, *buf_oaf;
599 dma_addr_t buf_tmp_dma;
600 u32 i, req_len, resp_len;
601 const u32 max_resp_len = SB_RFB_MAX;
603 if (mvs_assign_reg_set(mvi, mvi_dev) == MVS_ID_NOT_MAPPED) {
604 mv_dprintk("Have not enough regiset for dev %d.\n",
605 mvi_dev->device_id);
606 return -EBUSY;
608 slot = &mvi->slot_info[tag];
609 slot->tx = mvi->tx_prod;
610 del_q = TXQ_MODE_I | tag |
611 (TXQ_CMD_STP << TXQ_CMD_SHIFT) |
612 (sas_port->phy_mask << TXQ_PHY_SHIFT) |
613 (mvi_dev->taskfileset << TXQ_SRS_SHIFT);
614 mvi->tx[mvi->tx_prod] = cpu_to_le32(del_q);
616 #ifndef DISABLE_HOTPLUG_DMA_FIX
617 if (task->data_dir == DMA_FROM_DEVICE)
618 flags = (MVS_CHIP_DISP->prd_count() << MCH_PRD_LEN_SHIFT);
619 else
620 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
621 #else
622 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
623 #endif
624 if (task->ata_task.use_ncq)
625 flags |= MCH_FPDMA;
626 if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) {
627 if (task->ata_task.fis.command != ATA_CMD_ID_ATAPI)
628 flags |= MCH_ATAPI;
632 hdr->flags = cpu_to_le32(flags);
634 if (task->ata_task.use_ncq && mvs_get_ncq_tag(task, &hdr_tag))
635 task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3);
636 else
637 hdr_tag = tag;
639 hdr->tags = cpu_to_le32(hdr_tag);
641 hdr->data_len = cpu_to_le32(task->total_xfer_len);
644 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
647 /* region 1: command table area (MVS_ATA_CMD_SZ bytes) ************** */
648 buf_cmd = buf_tmp = slot->buf;
649 buf_tmp_dma = slot->buf_dma;
651 hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
653 buf_tmp += MVS_ATA_CMD_SZ;
654 buf_tmp_dma += MVS_ATA_CMD_SZ;
655 #if _MV_DUMP
656 slot->cmd_size = MVS_ATA_CMD_SZ;
657 #endif
659 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
660 /* used for STP. unused for SATA? */
661 buf_oaf = buf_tmp;
662 hdr->open_frame = cpu_to_le64(buf_tmp_dma);
664 buf_tmp += MVS_OAF_SZ;
665 buf_tmp_dma += MVS_OAF_SZ;
667 /* region 3: PRD table ********************************************* */
668 buf_prd = buf_tmp;
670 if (tei->n_elem)
671 hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
672 else
673 hdr->prd_tbl = 0;
674 i = MVS_CHIP_DISP->prd_size() * MVS_CHIP_DISP->prd_count();
676 buf_tmp += i;
677 buf_tmp_dma += i;
679 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
680 slot->response = buf_tmp;
681 hdr->status_buf = cpu_to_le64(buf_tmp_dma);
682 if (mvi->flags & MVF_FLAG_SOC)
683 hdr->reserved[0] = 0;
685 req_len = sizeof(struct host_to_dev_fis);
686 resp_len = MVS_SLOT_BUF_SZ - MVS_ATA_CMD_SZ -
687 sizeof(struct mvs_err_info) - i;
689 /* request, response lengths */
690 resp_len = min(resp_len, max_resp_len);
691 hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4));
693 if (likely(!task->ata_task.device_control_reg_update))
694 task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */
695 /* fill in command FIS and ATAPI CDB */
696 memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
697 if (dev->sata_dev.command_set == ATAPI_COMMAND_SET)
698 memcpy(buf_cmd + STP_ATAPI_CMD,
699 task->ata_task.atapi_packet, 16);
701 /* generate open address frame hdr (first 12 bytes) */
702 /* initiator, STP, ftype 1h */
703 buf_oaf[0] = (1 << 7) | (PROTOCOL_STP << 4) | 0x1;
704 buf_oaf[1] = dev->linkrate & 0xf;
705 *(u16 *)(buf_oaf + 2) = cpu_to_be16(mvi_dev->device_id + 1);
706 memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE);
708 /* fill in PRD (scatter/gather) table, if any */
709 MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd);
710 #ifndef DISABLE_HOTPLUG_DMA_FIX
711 if (task->data_dir == DMA_FROM_DEVICE)
712 MVS_CHIP_DISP->dma_fix(mvi->bulk_buffer_dma,
713 TRASH_BUCKET_SIZE, tei->n_elem, buf_prd);
714 #endif
715 return 0;
718 static int mvs_task_prep_ssp(struct mvs_info *mvi,
719 struct mvs_task_exec_info *tei, int is_tmf,
720 struct mvs_tmf_task *tmf)
722 struct sas_task *task = tei->task;
723 struct mvs_cmd_hdr *hdr = tei->hdr;
724 struct mvs_port *port = tei->port;
725 struct domain_device *dev = task->dev;
726 struct mvs_device *mvi_dev = dev->lldd_dev;
727 struct asd_sas_port *sas_port = dev->port;
728 struct mvs_slot_info *slot;
729 void *buf_prd;
730 struct ssp_frame_hdr *ssp_hdr;
731 void *buf_tmp;
732 u8 *buf_cmd, *buf_oaf, fburst = 0;
733 dma_addr_t buf_tmp_dma;
734 u32 flags;
735 u32 resp_len, req_len, i, tag = tei->tag;
736 const u32 max_resp_len = SB_RFB_MAX;
737 u32 phy_mask;
739 slot = &mvi->slot_info[tag];
741 phy_mask = ((port->wide_port_phymap) ? port->wide_port_phymap :
742 sas_port->phy_mask) & TXQ_PHY_MASK;
744 slot->tx = mvi->tx_prod;
745 mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag |
746 (TXQ_CMD_SSP << TXQ_CMD_SHIFT) |
747 (phy_mask << TXQ_PHY_SHIFT));
749 flags = MCH_RETRY;
750 if (task->ssp_task.enable_first_burst) {
751 flags |= MCH_FBURST;
752 fburst = (1 << 7);
754 if (is_tmf)
755 flags |= (MCH_SSP_FR_TASK << MCH_SSP_FR_TYPE_SHIFT);
756 hdr->flags = cpu_to_le32(flags | (tei->n_elem << MCH_PRD_LEN_SHIFT));
757 hdr->tags = cpu_to_le32(tag);
758 hdr->data_len = cpu_to_le32(task->total_xfer_len);
761 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
764 /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */
765 buf_cmd = buf_tmp = slot->buf;
766 buf_tmp_dma = slot->buf_dma;
768 hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
770 buf_tmp += MVS_SSP_CMD_SZ;
771 buf_tmp_dma += MVS_SSP_CMD_SZ;
772 #if _MV_DUMP
773 slot->cmd_size = MVS_SSP_CMD_SZ;
774 #endif
776 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
777 buf_oaf = buf_tmp;
778 hdr->open_frame = cpu_to_le64(buf_tmp_dma);
780 buf_tmp += MVS_OAF_SZ;
781 buf_tmp_dma += MVS_OAF_SZ;
783 /* region 3: PRD table ********************************************* */
784 buf_prd = buf_tmp;
785 if (tei->n_elem)
786 hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
787 else
788 hdr->prd_tbl = 0;
790 i = MVS_CHIP_DISP->prd_size() * tei->n_elem;
791 buf_tmp += i;
792 buf_tmp_dma += i;
794 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
795 slot->response = buf_tmp;
796 hdr->status_buf = cpu_to_le64(buf_tmp_dma);
797 if (mvi->flags & MVF_FLAG_SOC)
798 hdr->reserved[0] = 0;
800 resp_len = MVS_SLOT_BUF_SZ - MVS_SSP_CMD_SZ - MVS_OAF_SZ -
801 sizeof(struct mvs_err_info) - i;
802 resp_len = min(resp_len, max_resp_len);
804 req_len = sizeof(struct ssp_frame_hdr) + 28;
806 /* request, response lengths */
807 hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4));
809 /* generate open address frame hdr (first 12 bytes) */
810 /* initiator, SSP, ftype 1h */
811 buf_oaf[0] = (1 << 7) | (PROTOCOL_SSP << 4) | 0x1;
812 buf_oaf[1] = dev->linkrate & 0xf;
813 *(u16 *)(buf_oaf + 2) = cpu_to_be16(mvi_dev->device_id + 1);
814 memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE);
816 /* fill in SSP frame header (Command Table.SSP frame header) */
817 ssp_hdr = (struct ssp_frame_hdr *)buf_cmd;
819 if (is_tmf)
820 ssp_hdr->frame_type = SSP_TASK;
821 else
822 ssp_hdr->frame_type = SSP_COMMAND;
824 memcpy(ssp_hdr->hashed_dest_addr, dev->hashed_sas_addr,
825 HASHED_SAS_ADDR_SIZE);
826 memcpy(ssp_hdr->hashed_src_addr,
827 dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
828 ssp_hdr->tag = cpu_to_be16(tag);
830 /* fill in IU for TASK and Command Frame */
831 buf_cmd += sizeof(*ssp_hdr);
832 memcpy(buf_cmd, &task->ssp_task.LUN, 8);
834 if (ssp_hdr->frame_type != SSP_TASK) {
835 buf_cmd[9] = fburst | task->ssp_task.task_attr |
836 (task->ssp_task.task_prio << 3);
837 memcpy(buf_cmd + 12, &task->ssp_task.cdb, 16);
838 } else{
839 buf_cmd[10] = tmf->tmf;
840 switch (tmf->tmf) {
841 case TMF_ABORT_TASK:
842 case TMF_QUERY_TASK:
843 buf_cmd[12] =
844 (tmf->tag_of_task_to_be_managed >> 8) & 0xff;
845 buf_cmd[13] =
846 tmf->tag_of_task_to_be_managed & 0xff;
847 break;
848 default:
849 break;
852 /* fill in PRD (scatter/gather) table, if any */
853 MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd);
854 return 0;
857 #define DEV_IS_GONE(mvi_dev) ((!mvi_dev || (mvi_dev->dev_type == NO_DEVICE)))
858 static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags,
859 struct completion *completion,int is_tmf,
860 struct mvs_tmf_task *tmf)
862 struct domain_device *dev = task->dev;
863 struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev;
864 struct mvs_info *mvi = mvi_dev->mvi_info;
865 struct mvs_task_exec_info tei;
866 struct sas_task *t = task;
867 struct mvs_slot_info *slot;
868 u32 tag = 0xdeadbeef, rc, n_elem = 0;
869 u32 n = num, pass = 0;
870 unsigned long flags = 0, flags_libsas = 0;
872 if (!dev->port) {
873 struct task_status_struct *tsm = &t->task_status;
875 tsm->resp = SAS_TASK_UNDELIVERED;
876 tsm->stat = SAS_PHY_DOWN;
877 if (dev->dev_type != SATA_DEV)
878 t->task_done(t);
879 return 0;
882 spin_lock_irqsave(&mvi->lock, flags);
883 do {
884 dev = t->dev;
885 mvi_dev = dev->lldd_dev;
886 if (DEV_IS_GONE(mvi_dev)) {
887 if (mvi_dev)
888 mv_dprintk("device %d not ready.\n",
889 mvi_dev->device_id);
890 else
891 mv_dprintk("device %016llx not ready.\n",
892 SAS_ADDR(dev->sas_addr));
894 rc = SAS_PHY_DOWN;
895 goto out_done;
898 if (dev->port->id >= mvi->chip->n_phy)
899 tei.port = &mvi->port[dev->port->id - mvi->chip->n_phy];
900 else
901 tei.port = &mvi->port[dev->port->id];
903 if (tei.port && !tei.port->port_attached) {
904 if (sas_protocol_ata(t->task_proto)) {
905 struct task_status_struct *ts = &t->task_status;
907 mv_dprintk("port %d does not"
908 "attached device.\n", dev->port->id);
909 ts->stat = SAS_PROTO_RESPONSE;
910 ts->stat = SAS_PHY_DOWN;
911 spin_unlock_irqrestore(dev->sata_dev.ap->lock,
912 flags_libsas);
913 spin_unlock_irqrestore(&mvi->lock, flags);
914 t->task_done(t);
915 spin_lock_irqsave(&mvi->lock, flags);
916 spin_lock_irqsave(dev->sata_dev.ap->lock,
917 flags_libsas);
918 if (n > 1)
919 t = list_entry(t->list.next,
920 struct sas_task, list);
921 continue;
922 } else {
923 struct task_status_struct *ts = &t->task_status;
924 ts->resp = SAS_TASK_UNDELIVERED;
925 ts->stat = SAS_PHY_DOWN;
926 t->task_done(t);
927 if (n > 1)
928 t = list_entry(t->list.next,
929 struct sas_task, list);
930 continue;
934 if (!sas_protocol_ata(t->task_proto)) {
935 if (t->num_scatter) {
936 n_elem = dma_map_sg(mvi->dev,
937 t->scatter,
938 t->num_scatter,
939 t->data_dir);
940 if (!n_elem) {
941 rc = -ENOMEM;
942 goto err_out;
945 } else {
946 n_elem = t->num_scatter;
949 rc = mvs_tag_alloc(mvi, &tag);
950 if (rc)
951 goto err_out;
953 slot = &mvi->slot_info[tag];
956 t->lldd_task = NULL;
957 slot->n_elem = n_elem;
958 slot->slot_tag = tag;
959 memset(slot->buf, 0, MVS_SLOT_BUF_SZ);
961 tei.task = t;
962 tei.hdr = &mvi->slot[tag];
963 tei.tag = tag;
964 tei.n_elem = n_elem;
965 switch (t->task_proto) {
966 case SAS_PROTOCOL_SMP:
967 rc = mvs_task_prep_smp(mvi, &tei);
968 break;
969 case SAS_PROTOCOL_SSP:
970 rc = mvs_task_prep_ssp(mvi, &tei, is_tmf, tmf);
971 break;
972 case SAS_PROTOCOL_SATA:
973 case SAS_PROTOCOL_STP:
974 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
975 rc = mvs_task_prep_ata(mvi, &tei);
976 break;
977 default:
978 dev_printk(KERN_ERR, mvi->dev,
979 "unknown sas_task proto: 0x%x\n",
980 t->task_proto);
981 rc = -EINVAL;
982 break;
985 if (rc) {
986 mv_dprintk("rc is %x\n", rc);
987 goto err_out_tag;
989 slot->task = t;
990 slot->port = tei.port;
991 t->lldd_task = slot;
992 list_add_tail(&slot->entry, &tei.port->list);
993 /* TODO: select normal or high priority */
994 spin_lock(&t->task_state_lock);
995 t->task_state_flags |= SAS_TASK_AT_INITIATOR;
996 spin_unlock(&t->task_state_lock);
998 mvs_hba_memory_dump(mvi, tag, t->task_proto);
999 mvi_dev->running_req++;
1000 ++pass;
1001 mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1);
1002 if (n > 1)
1003 t = list_entry(t->list.next, struct sas_task, list);
1004 if (likely(pass))
1005 MVS_CHIP_DISP->start_delivery(mvi, (mvi->tx_prod - 1) &
1006 (MVS_CHIP_SLOT_SZ - 1));
1008 } while (--n);
1009 rc = 0;
1010 goto out_done;
1012 err_out_tag:
1013 mvs_tag_free(mvi, tag);
1014 err_out:
1016 dev_printk(KERN_ERR, mvi->dev, "mvsas exec failed[%d]!\n", rc);
1017 if (!sas_protocol_ata(t->task_proto))
1018 if (n_elem)
1019 dma_unmap_sg(mvi->dev, t->scatter, n_elem,
1020 t->data_dir);
1021 out_done:
1022 spin_unlock_irqrestore(&mvi->lock, flags);
1023 return rc;
1026 int mvs_queue_command(struct sas_task *task, const int num,
1027 gfp_t gfp_flags)
1029 return mvs_task_exec(task, num, gfp_flags, NULL, 0, NULL);
1032 static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc)
1034 u32 slot_idx = rx_desc & RXQ_SLOT_MASK;
1035 mvs_tag_clear(mvi, slot_idx);
1038 static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task,
1039 struct mvs_slot_info *slot, u32 slot_idx)
1041 if (!slot->task)
1042 return;
1043 if (!sas_protocol_ata(task->task_proto))
1044 if (slot->n_elem)
1045 dma_unmap_sg(mvi->dev, task->scatter,
1046 slot->n_elem, task->data_dir);
1048 switch (task->task_proto) {
1049 case SAS_PROTOCOL_SMP:
1050 dma_unmap_sg(mvi->dev, &task->smp_task.smp_resp, 1,
1051 PCI_DMA_FROMDEVICE);
1052 dma_unmap_sg(mvi->dev, &task->smp_task.smp_req, 1,
1053 PCI_DMA_TODEVICE);
1054 break;
1056 case SAS_PROTOCOL_SATA:
1057 case SAS_PROTOCOL_STP:
1058 case SAS_PROTOCOL_SSP:
1059 default:
1060 /* do nothing */
1061 break;
1063 list_del_init(&slot->entry);
1064 task->lldd_task = NULL;
1065 slot->task = NULL;
1066 slot->port = NULL;
1067 slot->slot_tag = 0xFFFFFFFF;
1068 mvs_slot_free(mvi, slot_idx);
1071 static void mvs_update_wideport(struct mvs_info *mvi, int i)
1073 struct mvs_phy *phy = &mvi->phy[i];
1074 struct mvs_port *port = phy->port;
1075 int j, no;
1077 for_each_phy(port->wide_port_phymap, j, no) {
1078 if (j & 1) {
1079 MVS_CHIP_DISP->write_port_cfg_addr(mvi, no,
1080 PHYR_WIDE_PORT);
1081 MVS_CHIP_DISP->write_port_cfg_data(mvi, no,
1082 port->wide_port_phymap);
1083 } else {
1084 MVS_CHIP_DISP->write_port_cfg_addr(mvi, no,
1085 PHYR_WIDE_PORT);
1086 MVS_CHIP_DISP->write_port_cfg_data(mvi, no,
1092 static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i)
1094 u32 tmp;
1095 struct mvs_phy *phy = &mvi->phy[i];
1096 struct mvs_port *port = phy->port;
1098 tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, i);
1099 if ((tmp & PHY_READY_MASK) && !(phy->irq_status & PHYEV_POOF)) {
1100 if (!port)
1101 phy->phy_attached = 1;
1102 return tmp;
1105 if (port) {
1106 if (phy->phy_type & PORT_TYPE_SAS) {
1107 port->wide_port_phymap &= ~(1U << i);
1108 if (!port->wide_port_phymap)
1109 port->port_attached = 0;
1110 mvs_update_wideport(mvi, i);
1111 } else if (phy->phy_type & PORT_TYPE_SATA)
1112 port->port_attached = 0;
1113 phy->port = NULL;
1114 phy->phy_attached = 0;
1115 phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
1117 return 0;
1120 static void *mvs_get_d2h_reg(struct mvs_info *mvi, int i, void *buf)
1122 u32 *s = (u32 *) buf;
1124 if (!s)
1125 return NULL;
1127 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG3);
1128 s[3] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i);
1130 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG2);
1131 s[2] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i);
1133 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG1);
1134 s[1] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i);
1136 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG0);
1137 s[0] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i);
1139 if (((s[1] & 0x00FFFFFF) == 0x00EB1401) && (*(u8 *)&s[3] == 0x01))
1140 s[1] = 0x00EB1401 | (*((u8 *)&s[1] + 3) & 0x10);
1142 return s;
1145 static u32 mvs_is_sig_fis_received(u32 irq_status)
1147 return irq_status & PHYEV_SIG_FIS;
1150 void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st)
1152 struct mvs_phy *phy = &mvi->phy[i];
1153 struct sas_identify_frame *id;
1155 id = (struct sas_identify_frame *)phy->frame_rcvd;
1157 if (get_st) {
1158 phy->irq_status = MVS_CHIP_DISP->read_port_irq_stat(mvi, i);
1159 phy->phy_status = mvs_is_phy_ready(mvi, i);
1162 if (phy->phy_status) {
1163 int oob_done = 0;
1164 struct asd_sas_phy *sas_phy = &mvi->phy[i].sas_phy;
1166 oob_done = MVS_CHIP_DISP->oob_done(mvi, i);
1168 MVS_CHIP_DISP->fix_phy_info(mvi, i, id);
1169 if (phy->phy_type & PORT_TYPE_SATA) {
1170 phy->identify.target_port_protocols = SAS_PROTOCOL_STP;
1171 if (mvs_is_sig_fis_received(phy->irq_status)) {
1172 phy->phy_attached = 1;
1173 phy->att_dev_sas_addr =
1174 i + mvi->id * mvi->chip->n_phy;
1175 if (oob_done)
1176 sas_phy->oob_mode = SATA_OOB_MODE;
1177 phy->frame_rcvd_size =
1178 sizeof(struct dev_to_host_fis);
1179 mvs_get_d2h_reg(mvi, i, id);
1180 } else {
1181 u32 tmp;
1182 dev_printk(KERN_DEBUG, mvi->dev,
1183 "Phy%d : No sig fis\n", i);
1184 tmp = MVS_CHIP_DISP->read_port_irq_mask(mvi, i);
1185 MVS_CHIP_DISP->write_port_irq_mask(mvi, i,
1186 tmp | PHYEV_SIG_FIS);
1187 phy->phy_attached = 0;
1188 phy->phy_type &= ~PORT_TYPE_SATA;
1189 MVS_CHIP_DISP->phy_reset(mvi, i, 0);
1190 goto out_done;
1192 } else if (phy->phy_type & PORT_TYPE_SAS
1193 || phy->att_dev_info & PORT_SSP_INIT_MASK) {
1194 phy->phy_attached = 1;
1195 phy->identify.device_type =
1196 phy->att_dev_info & PORT_DEV_TYPE_MASK;
1198 if (phy->identify.device_type == SAS_END_DEV)
1199 phy->identify.target_port_protocols =
1200 SAS_PROTOCOL_SSP;
1201 else if (phy->identify.device_type != NO_DEVICE)
1202 phy->identify.target_port_protocols =
1203 SAS_PROTOCOL_SMP;
1204 if (oob_done)
1205 sas_phy->oob_mode = SAS_OOB_MODE;
1206 phy->frame_rcvd_size =
1207 sizeof(struct sas_identify_frame);
1209 memcpy(sas_phy->attached_sas_addr,
1210 &phy->att_dev_sas_addr, SAS_ADDR_SIZE);
1212 if (MVS_CHIP_DISP->phy_work_around)
1213 MVS_CHIP_DISP->phy_work_around(mvi, i);
1215 mv_dprintk("port %d attach dev info is %x\n",
1216 i + mvi->id * mvi->chip->n_phy, phy->att_dev_info);
1217 mv_dprintk("port %d attach sas addr is %llx\n",
1218 i + mvi->id * mvi->chip->n_phy, phy->att_dev_sas_addr);
1219 out_done:
1220 if (get_st)
1221 MVS_CHIP_DISP->write_port_irq_stat(mvi, i, phy->irq_status);
1224 static void mvs_port_notify_formed(struct asd_sas_phy *sas_phy, int lock)
1226 struct sas_ha_struct *sas_ha = sas_phy->ha;
1227 struct mvs_info *mvi = NULL; int i = 0, hi;
1228 struct mvs_phy *phy = sas_phy->lldd_phy;
1229 struct asd_sas_port *sas_port = sas_phy->port;
1230 struct mvs_port *port;
1231 unsigned long flags = 0;
1232 if (!sas_port)
1233 return;
1235 while (sas_ha->sas_phy[i]) {
1236 if (sas_ha->sas_phy[i] == sas_phy)
1237 break;
1238 i++;
1240 hi = i/((struct mvs_prv_info *)sas_ha->lldd_ha)->n_phy;
1241 mvi = ((struct mvs_prv_info *)sas_ha->lldd_ha)->mvi[hi];
1242 if (sas_port->id >= mvi->chip->n_phy)
1243 port = &mvi->port[sas_port->id - mvi->chip->n_phy];
1244 else
1245 port = &mvi->port[sas_port->id];
1246 if (lock)
1247 spin_lock_irqsave(&mvi->lock, flags);
1248 port->port_attached = 1;
1249 phy->port = port;
1250 if (phy->phy_type & PORT_TYPE_SAS) {
1251 port->wide_port_phymap = sas_port->phy_mask;
1252 mv_printk("set wide port phy map %x\n", sas_port->phy_mask);
1253 mvs_update_wideport(mvi, sas_phy->id);
1255 if (lock)
1256 spin_unlock_irqrestore(&mvi->lock, flags);
1259 static void mvs_port_notify_deformed(struct asd_sas_phy *sas_phy, int lock)
1261 struct domain_device *dev;
1262 struct mvs_phy *phy = sas_phy->lldd_phy;
1263 struct mvs_info *mvi = phy->mvi;
1264 struct asd_sas_port *port = sas_phy->port;
1265 int phy_no = 0;
1267 while (phy != &mvi->phy[phy_no]) {
1268 phy_no++;
1269 if (phy_no >= MVS_MAX_PHYS)
1270 return;
1272 list_for_each_entry(dev, &port->dev_list, dev_list_node)
1273 mvs_do_release_task(phy->mvi, phy_no, NULL);
1278 void mvs_port_formed(struct asd_sas_phy *sas_phy)
1280 mvs_port_notify_formed(sas_phy, 1);
1283 void mvs_port_deformed(struct asd_sas_phy *sas_phy)
1285 mvs_port_notify_deformed(sas_phy, 1);
1288 struct mvs_device *mvs_alloc_dev(struct mvs_info *mvi)
1290 u32 dev;
1291 for (dev = 0; dev < MVS_MAX_DEVICES; dev++) {
1292 if (mvi->devices[dev].dev_type == NO_DEVICE) {
1293 mvi->devices[dev].device_id = dev;
1294 return &mvi->devices[dev];
1298 if (dev == MVS_MAX_DEVICES)
1299 mv_printk("max support %d devices, ignore ..\n",
1300 MVS_MAX_DEVICES);
1302 return NULL;
1305 void mvs_free_dev(struct mvs_device *mvi_dev)
1307 u32 id = mvi_dev->device_id;
1308 memset(mvi_dev, 0, sizeof(*mvi_dev));
1309 mvi_dev->device_id = id;
1310 mvi_dev->dev_type = NO_DEVICE;
1311 mvi_dev->dev_status = MVS_DEV_NORMAL;
1312 mvi_dev->taskfileset = MVS_ID_NOT_MAPPED;
1315 int mvs_dev_found_notify(struct domain_device *dev, int lock)
1317 unsigned long flags = 0;
1318 int res = 0;
1319 struct mvs_info *mvi = NULL;
1320 struct domain_device *parent_dev = dev->parent;
1321 struct mvs_device *mvi_device;
1323 mvi = mvs_find_dev_mvi(dev);
1325 if (lock)
1326 spin_lock_irqsave(&mvi->lock, flags);
1328 mvi_device = mvs_alloc_dev(mvi);
1329 if (!mvi_device) {
1330 res = -1;
1331 goto found_out;
1333 dev->lldd_dev = mvi_device;
1334 mvi_device->dev_status = MVS_DEV_NORMAL;
1335 mvi_device->dev_type = dev->dev_type;
1336 mvi_device->mvi_info = mvi;
1337 if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) {
1338 int phy_id;
1339 u8 phy_num = parent_dev->ex_dev.num_phys;
1340 struct ex_phy *phy;
1341 for (phy_id = 0; phy_id < phy_num; phy_id++) {
1342 phy = &parent_dev->ex_dev.ex_phy[phy_id];
1343 if (SAS_ADDR(phy->attached_sas_addr) ==
1344 SAS_ADDR(dev->sas_addr)) {
1345 mvi_device->attached_phy = phy_id;
1346 break;
1350 if (phy_id == phy_num) {
1351 mv_printk("Error: no attached dev:%016llx"
1352 "at ex:%016llx.\n",
1353 SAS_ADDR(dev->sas_addr),
1354 SAS_ADDR(parent_dev->sas_addr));
1355 res = -1;
1359 found_out:
1360 if (lock)
1361 spin_unlock_irqrestore(&mvi->lock, flags);
1362 return res;
1365 int mvs_dev_found(struct domain_device *dev)
1367 return mvs_dev_found_notify(dev, 1);
1370 void mvs_dev_gone_notify(struct domain_device *dev)
1372 unsigned long flags = 0;
1373 struct mvs_device *mvi_dev = dev->lldd_dev;
1374 struct mvs_info *mvi = mvi_dev->mvi_info;
1376 spin_lock_irqsave(&mvi->lock, flags);
1378 if (mvi_dev) {
1379 mv_dprintk("found dev[%d:%x] is gone.\n",
1380 mvi_dev->device_id, mvi_dev->dev_type);
1381 mvs_release_task(mvi, dev);
1382 mvs_free_reg_set(mvi, mvi_dev);
1383 mvs_free_dev(mvi_dev);
1384 } else {
1385 mv_dprintk("found dev has gone.\n");
1387 dev->lldd_dev = NULL;
1389 spin_unlock_irqrestore(&mvi->lock, flags);
1393 void mvs_dev_gone(struct domain_device *dev)
1395 mvs_dev_gone_notify(dev);
1398 static struct sas_task *mvs_alloc_task(void)
1400 struct sas_task *task = kzalloc(sizeof(struct sas_task), GFP_KERNEL);
1402 if (task) {
1403 INIT_LIST_HEAD(&task->list);
1404 spin_lock_init(&task->task_state_lock);
1405 task->task_state_flags = SAS_TASK_STATE_PENDING;
1406 init_timer(&task->timer);
1407 init_completion(&task->completion);
1409 return task;
1412 static void mvs_free_task(struct sas_task *task)
1414 if (task) {
1415 BUG_ON(!list_empty(&task->list));
1416 kfree(task);
1420 static void mvs_task_done(struct sas_task *task)
1422 if (!del_timer(&task->timer))
1423 return;
1424 complete(&task->completion);
1427 static void mvs_tmf_timedout(unsigned long data)
1429 struct sas_task *task = (struct sas_task *)data;
1431 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
1432 complete(&task->completion);
1435 #define MVS_TASK_TIMEOUT 20
1436 static int mvs_exec_internal_tmf_task(struct domain_device *dev,
1437 void *parameter, u32 para_len, struct mvs_tmf_task *tmf)
1439 int res, retry;
1440 struct sas_task *task = NULL;
1442 for (retry = 0; retry < 3; retry++) {
1443 task = mvs_alloc_task();
1444 if (!task)
1445 return -ENOMEM;
1447 task->dev = dev;
1448 task->task_proto = dev->tproto;
1450 memcpy(&task->ssp_task, parameter, para_len);
1451 task->task_done = mvs_task_done;
1453 task->timer.data = (unsigned long) task;
1454 task->timer.function = mvs_tmf_timedout;
1455 task->timer.expires = jiffies + MVS_TASK_TIMEOUT*HZ;
1456 add_timer(&task->timer);
1458 res = mvs_task_exec(task, 1, GFP_KERNEL, NULL, 1, tmf);
1460 if (res) {
1461 del_timer(&task->timer);
1462 mv_printk("executing internel task failed:%d\n", res);
1463 goto ex_err;
1466 wait_for_completion(&task->completion);
1467 res = -TMF_RESP_FUNC_FAILED;
1468 /* Even TMF timed out, return direct. */
1469 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1470 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
1471 mv_printk("TMF task[%x] timeout.\n", tmf->tmf);
1472 goto ex_err;
1476 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1477 task->task_status.stat == SAM_STAT_GOOD) {
1478 res = TMF_RESP_FUNC_COMPLETE;
1479 break;
1482 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1483 task->task_status.stat == SAS_DATA_UNDERRUN) {
1484 /* no error, but return the number of bytes of
1485 * underrun */
1486 res = task->task_status.residual;
1487 break;
1490 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1491 task->task_status.stat == SAS_DATA_OVERRUN) {
1492 mv_dprintk("blocked task error.\n");
1493 res = -EMSGSIZE;
1494 break;
1495 } else {
1496 mv_dprintk(" task to dev %016llx response: 0x%x "
1497 "status 0x%x\n",
1498 SAS_ADDR(dev->sas_addr),
1499 task->task_status.resp,
1500 task->task_status.stat);
1501 mvs_free_task(task);
1502 task = NULL;
1506 ex_err:
1507 BUG_ON(retry == 3 && task != NULL);
1508 if (task != NULL)
1509 mvs_free_task(task);
1510 return res;
1513 static int mvs_debug_issue_ssp_tmf(struct domain_device *dev,
1514 u8 *lun, struct mvs_tmf_task *tmf)
1516 struct sas_ssp_task ssp_task;
1517 DECLARE_COMPLETION_ONSTACK(completion);
1518 if (!(dev->tproto & SAS_PROTOCOL_SSP))
1519 return TMF_RESP_FUNC_ESUPP;
1521 strncpy((u8 *)&ssp_task.LUN, lun, 8);
1523 return mvs_exec_internal_tmf_task(dev, &ssp_task,
1524 sizeof(ssp_task), tmf);
1528 /* Standard mandates link reset for ATA (type 0)
1529 and hard reset for SSP (type 1) , only for RECOVERY */
1530 static int mvs_debug_I_T_nexus_reset(struct domain_device *dev)
1532 int rc;
1533 struct sas_phy *phy = sas_find_local_phy(dev);
1534 int reset_type = (dev->dev_type == SATA_DEV ||
1535 (dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
1536 rc = sas_phy_reset(phy, reset_type);
1537 msleep(2000);
1538 return rc;
1541 /* mandatory SAM-3 */
1542 int mvs_lu_reset(struct domain_device *dev, u8 *lun)
1544 unsigned long flags;
1545 int i, phyno[WIDE_PORT_MAX_PHY], num , rc = TMF_RESP_FUNC_FAILED;
1546 struct mvs_tmf_task tmf_task;
1547 struct mvs_device * mvi_dev = dev->lldd_dev;
1548 struct mvs_info *mvi = mvi_dev->mvi_info;
1550 tmf_task.tmf = TMF_LU_RESET;
1551 mvi_dev->dev_status = MVS_DEV_EH;
1552 rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
1553 if (rc == TMF_RESP_FUNC_COMPLETE) {
1554 num = mvs_find_dev_phyno(dev, phyno);
1555 spin_lock_irqsave(&mvi->lock, flags);
1556 for (i = 0; i < num; i++)
1557 mvs_release_task(mvi, dev);
1558 spin_unlock_irqrestore(&mvi->lock, flags);
1560 /* If failed, fall-through I_T_Nexus reset */
1561 mv_printk("%s for device[%x]:rc= %d\n", __func__,
1562 mvi_dev->device_id, rc);
1563 return rc;
1566 int mvs_I_T_nexus_reset(struct domain_device *dev)
1568 unsigned long flags;
1569 int rc = TMF_RESP_FUNC_FAILED;
1570 struct mvs_device * mvi_dev = (struct mvs_device *)dev->lldd_dev;
1571 struct mvs_info *mvi = mvi_dev->mvi_info;
1573 if (mvi_dev->dev_status != MVS_DEV_EH)
1574 return TMF_RESP_FUNC_COMPLETE;
1575 rc = mvs_debug_I_T_nexus_reset(dev);
1576 mv_printk("%s for device[%x]:rc= %d\n",
1577 __func__, mvi_dev->device_id, rc);
1579 /* housekeeper */
1580 spin_lock_irqsave(&mvi->lock, flags);
1581 mvs_release_task(mvi, dev);
1582 spin_unlock_irqrestore(&mvi->lock, flags);
1584 return rc;
1586 /* optional SAM-3 */
1587 int mvs_query_task(struct sas_task *task)
1589 u32 tag;
1590 struct scsi_lun lun;
1591 struct mvs_tmf_task tmf_task;
1592 int rc = TMF_RESP_FUNC_FAILED;
1594 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1595 struct scsi_cmnd * cmnd = (struct scsi_cmnd *)task->uldd_task;
1596 struct domain_device *dev = task->dev;
1597 struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev;
1598 struct mvs_info *mvi = mvi_dev->mvi_info;
1600 int_to_scsilun(cmnd->device->lun, &lun);
1601 rc = mvs_find_tag(mvi, task, &tag);
1602 if (rc == 0) {
1603 rc = TMF_RESP_FUNC_FAILED;
1604 return rc;
1607 tmf_task.tmf = TMF_QUERY_TASK;
1608 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
1610 rc = mvs_debug_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
1611 switch (rc) {
1612 /* The task is still in Lun, release it then */
1613 case TMF_RESP_FUNC_SUCC:
1614 /* The task is not in Lun or failed, reset the phy */
1615 case TMF_RESP_FUNC_FAILED:
1616 case TMF_RESP_FUNC_COMPLETE:
1617 break;
1618 default:
1619 rc = TMF_RESP_FUNC_COMPLETE;
1620 break;
1623 mv_printk("%s:rc= %d\n", __func__, rc);
1624 return rc;
1627 /* mandatory SAM-3, still need free task/slot info */
1628 int mvs_abort_task(struct sas_task *task)
1630 struct scsi_lun lun;
1631 struct mvs_tmf_task tmf_task;
1632 struct domain_device *dev = task->dev;
1633 struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev;
1634 struct mvs_info *mvi;
1635 int rc = TMF_RESP_FUNC_FAILED;
1636 unsigned long flags;
1637 u32 tag;
1639 if (!mvi_dev) {
1640 mv_printk("%s:%d TMF_RESP_FUNC_FAILED\n", __func__, __LINE__);
1641 rc = TMF_RESP_FUNC_FAILED;
1644 mvi = mvi_dev->mvi_info;
1646 spin_lock_irqsave(&task->task_state_lock, flags);
1647 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
1648 spin_unlock_irqrestore(&task->task_state_lock, flags);
1649 rc = TMF_RESP_FUNC_COMPLETE;
1650 goto out;
1652 spin_unlock_irqrestore(&task->task_state_lock, flags);
1653 mvi_dev->dev_status = MVS_DEV_EH;
1654 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1655 struct scsi_cmnd * cmnd = (struct scsi_cmnd *)task->uldd_task;
1657 int_to_scsilun(cmnd->device->lun, &lun);
1658 rc = mvs_find_tag(mvi, task, &tag);
1659 if (rc == 0) {
1660 mv_printk("No such tag in %s\n", __func__);
1661 rc = TMF_RESP_FUNC_FAILED;
1662 return rc;
1665 tmf_task.tmf = TMF_ABORT_TASK;
1666 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
1668 rc = mvs_debug_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
1670 /* if successful, clear the task and callback forwards.*/
1671 if (rc == TMF_RESP_FUNC_COMPLETE) {
1672 u32 slot_no;
1673 struct mvs_slot_info *slot;
1675 if (task->lldd_task) {
1676 slot = task->lldd_task;
1677 slot_no = (u32) (slot - mvi->slot_info);
1678 spin_lock_irqsave(&mvi->lock, flags);
1679 mvs_slot_complete(mvi, slot_no, 1);
1680 spin_unlock_irqrestore(&mvi->lock, flags);
1684 } else if (task->task_proto & SAS_PROTOCOL_SATA ||
1685 task->task_proto & SAS_PROTOCOL_STP) {
1686 /* to do free register_set */
1687 if (SATA_DEV == dev->dev_type) {
1688 struct mvs_slot_info *slot = task->lldd_task;
1689 struct task_status_struct *tstat;
1690 u32 slot_idx = (u32)(slot - mvi->slot_info);
1691 tstat = &task->task_status;
1692 mv_dprintk(KERN_DEBUG "mv_abort_task() mvi=%p task=%p "
1693 "slot=%p slot_idx=x%x\n",
1694 mvi, task, slot, slot_idx);
1695 tstat->stat = SAS_ABORTED_TASK;
1696 if (mvi_dev && mvi_dev->running_req)
1697 mvi_dev->running_req--;
1698 if (sas_protocol_ata(task->task_proto))
1699 mvs_free_reg_set(mvi, mvi_dev);
1700 mvs_slot_task_free(mvi, task, slot, slot_idx);
1701 return -1;
1703 } else {
1704 /* SMP */
1707 out:
1708 if (rc != TMF_RESP_FUNC_COMPLETE)
1709 mv_printk("%s:rc= %d\n", __func__, rc);
1710 return rc;
1713 int mvs_abort_task_set(struct domain_device *dev, u8 *lun)
1715 int rc = TMF_RESP_FUNC_FAILED;
1716 struct mvs_tmf_task tmf_task;
1718 tmf_task.tmf = TMF_ABORT_TASK_SET;
1719 rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
1721 return rc;
1724 int mvs_clear_aca(struct domain_device *dev, u8 *lun)
1726 int rc = TMF_RESP_FUNC_FAILED;
1727 struct mvs_tmf_task tmf_task;
1729 tmf_task.tmf = TMF_CLEAR_ACA;
1730 rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
1732 return rc;
1735 int mvs_clear_task_set(struct domain_device *dev, u8 *lun)
1737 int rc = TMF_RESP_FUNC_FAILED;
1738 struct mvs_tmf_task tmf_task;
1740 tmf_task.tmf = TMF_CLEAR_TASK_SET;
1741 rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
1743 return rc;
1746 static int mvs_sata_done(struct mvs_info *mvi, struct sas_task *task,
1747 u32 slot_idx, int err)
1749 struct mvs_device *mvi_dev = task->dev->lldd_dev;
1750 struct task_status_struct *tstat = &task->task_status;
1751 struct ata_task_resp *resp = (struct ata_task_resp *)tstat->buf;
1752 int stat = SAM_STAT_GOOD;
1755 resp->frame_len = sizeof(struct dev_to_host_fis);
1756 memcpy(&resp->ending_fis[0],
1757 SATA_RECEIVED_D2H_FIS(mvi_dev->taskfileset),
1758 sizeof(struct dev_to_host_fis));
1759 tstat->buf_valid_size = sizeof(*resp);
1760 if (unlikely(err)) {
1761 if (unlikely(err & CMD_ISS_STPD))
1762 stat = SAS_OPEN_REJECT;
1763 else
1764 stat = SAS_PROTO_RESPONSE;
1767 return stat;
1770 static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task,
1771 u32 slot_idx)
1773 struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
1774 int stat;
1775 u32 err_dw0 = le32_to_cpu(*(u32 *) (slot->response));
1776 u32 tfs = 0;
1777 enum mvs_port_type type = PORT_TYPE_SAS;
1779 if (err_dw0 & CMD_ISS_STPD)
1780 MVS_CHIP_DISP->issue_stop(mvi, type, tfs);
1782 MVS_CHIP_DISP->command_active(mvi, slot_idx);
1784 stat = SAM_STAT_CHECK_CONDITION;
1785 switch (task->task_proto) {
1786 case SAS_PROTOCOL_SSP:
1787 stat = SAS_ABORTED_TASK;
1788 break;
1789 case SAS_PROTOCOL_SMP:
1790 stat = SAM_STAT_CHECK_CONDITION;
1791 break;
1793 case SAS_PROTOCOL_SATA:
1794 case SAS_PROTOCOL_STP:
1795 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
1797 if (err_dw0 == 0x80400002)
1798 mv_printk("find reserved error, why?\n");
1800 task->ata_task.use_ncq = 0;
1801 mvs_sata_done(mvi, task, slot_idx, err_dw0);
1803 break;
1804 default:
1805 break;
1808 return stat;
1811 int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
1813 u32 slot_idx = rx_desc & RXQ_SLOT_MASK;
1814 struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
1815 struct sas_task *task = slot->task;
1816 struct mvs_device *mvi_dev = NULL;
1817 struct task_status_struct *tstat;
1818 struct domain_device *dev;
1819 u32 aborted;
1821 void *to;
1822 enum exec_status sts;
1824 if (mvi->exp_req)
1825 mvi->exp_req--;
1826 if (unlikely(!task || !task->lldd_task || !task->dev))
1827 return -1;
1829 tstat = &task->task_status;
1830 dev = task->dev;
1831 mvi_dev = dev->lldd_dev;
1833 mvs_hba_cq_dump(mvi);
1835 spin_lock(&task->task_state_lock);
1836 task->task_state_flags &=
1837 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
1838 task->task_state_flags |= SAS_TASK_STATE_DONE;
1839 /* race condition*/
1840 aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED;
1841 spin_unlock(&task->task_state_lock);
1843 memset(tstat, 0, sizeof(*tstat));
1844 tstat->resp = SAS_TASK_COMPLETE;
1846 if (unlikely(aborted)) {
1847 tstat->stat = SAS_ABORTED_TASK;
1848 if (mvi_dev && mvi_dev->running_req)
1849 mvi_dev->running_req--;
1850 if (sas_protocol_ata(task->task_proto))
1851 mvs_free_reg_set(mvi, mvi_dev);
1853 mvs_slot_task_free(mvi, task, slot, slot_idx);
1854 return -1;
1857 if (unlikely(!mvi_dev || flags)) {
1858 if (!mvi_dev)
1859 mv_dprintk("port has not device.\n");
1860 tstat->stat = SAS_PHY_DOWN;
1861 goto out;
1864 /* error info record present */
1865 if (unlikely((rx_desc & RXQ_ERR) && (*(u64 *) slot->response))) {
1866 tstat->stat = mvs_slot_err(mvi, task, slot_idx);
1867 tstat->resp = SAS_TASK_COMPLETE;
1868 goto out;
1871 switch (task->task_proto) {
1872 case SAS_PROTOCOL_SSP:
1873 /* hw says status == 0, datapres == 0 */
1874 if (rx_desc & RXQ_GOOD) {
1875 tstat->stat = SAM_STAT_GOOD;
1876 tstat->resp = SAS_TASK_COMPLETE;
1878 /* response frame present */
1879 else if (rx_desc & RXQ_RSP) {
1880 struct ssp_response_iu *iu = slot->response +
1881 sizeof(struct mvs_err_info);
1882 sas_ssp_task_response(mvi->dev, task, iu);
1883 } else
1884 tstat->stat = SAM_STAT_CHECK_CONDITION;
1885 break;
1887 case SAS_PROTOCOL_SMP: {
1888 struct scatterlist *sg_resp = &task->smp_task.smp_resp;
1889 tstat->stat = SAM_STAT_GOOD;
1890 to = kmap_atomic(sg_page(sg_resp), KM_IRQ0);
1891 memcpy(to + sg_resp->offset,
1892 slot->response + sizeof(struct mvs_err_info),
1893 sg_dma_len(sg_resp));
1894 kunmap_atomic(to, KM_IRQ0);
1895 break;
1898 case SAS_PROTOCOL_SATA:
1899 case SAS_PROTOCOL_STP:
1900 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: {
1901 tstat->stat = mvs_sata_done(mvi, task, slot_idx, 0);
1902 break;
1905 default:
1906 tstat->stat = SAM_STAT_CHECK_CONDITION;
1907 break;
1909 if (!slot->port->port_attached) {
1910 mv_dprintk("port %d has removed.\n", slot->port->sas_port.id);
1911 tstat->stat = SAS_PHY_DOWN;
1915 out:
1916 if (mvi_dev && mvi_dev->running_req) {
1917 mvi_dev->running_req--;
1918 if (sas_protocol_ata(task->task_proto) && !mvi_dev->running_req)
1919 mvs_free_reg_set(mvi, mvi_dev);
1921 mvs_slot_task_free(mvi, task, slot, slot_idx);
1922 sts = tstat->stat;
1924 spin_unlock(&mvi->lock);
1925 if (task->task_done)
1926 task->task_done(task);
1927 else
1928 mv_dprintk("why has not task_done.\n");
1929 spin_lock(&mvi->lock);
1931 return sts;
1934 void mvs_do_release_task(struct mvs_info *mvi,
1935 int phy_no, struct domain_device *dev)
1937 u32 slot_idx;
1938 struct mvs_phy *phy;
1939 struct mvs_port *port;
1940 struct mvs_slot_info *slot, *slot2;
1942 phy = &mvi->phy[phy_no];
1943 port = phy->port;
1944 if (!port)
1945 return;
1946 /* clean cmpl queue in case request is already finished */
1947 mvs_int_rx(mvi, false);
1951 list_for_each_entry_safe(slot, slot2, &port->list, entry) {
1952 struct sas_task *task;
1953 slot_idx = (u32) (slot - mvi->slot_info);
1954 task = slot->task;
1956 if (dev && task->dev != dev)
1957 continue;
1959 mv_printk("Release slot [%x] tag[%x], task [%p]:\n",
1960 slot_idx, slot->slot_tag, task);
1961 MVS_CHIP_DISP->command_active(mvi, slot_idx);
1963 mvs_slot_complete(mvi, slot_idx, 1);
1967 void mvs_release_task(struct mvs_info *mvi,
1968 struct domain_device *dev)
1970 int i, phyno[WIDE_PORT_MAX_PHY], num;
1971 /* housekeeper */
1972 num = mvs_find_dev_phyno(dev, phyno);
1973 for (i = 0; i < num; i++)
1974 mvs_do_release_task(mvi, phyno[i], dev);
1977 static void mvs_phy_disconnected(struct mvs_phy *phy)
1979 phy->phy_attached = 0;
1980 phy->att_dev_info = 0;
1981 phy->att_dev_sas_addr = 0;
1984 static void mvs_work_queue(struct work_struct *work)
1986 struct delayed_work *dw = container_of(work, struct delayed_work, work);
1987 struct mvs_wq *mwq = container_of(dw, struct mvs_wq, work_q);
1988 struct mvs_info *mvi = mwq->mvi;
1989 unsigned long flags;
1991 spin_lock_irqsave(&mvi->lock, flags);
1992 if (mwq->handler & PHY_PLUG_EVENT) {
1993 u32 phy_no = (unsigned long) mwq->data;
1994 struct sas_ha_struct *sas_ha = mvi->sas;
1995 struct mvs_phy *phy = &mvi->phy[phy_no];
1996 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1998 if (phy->phy_event & PHY_PLUG_OUT) {
1999 u32 tmp;
2000 struct sas_identify_frame *id;
2001 id = (struct sas_identify_frame *)phy->frame_rcvd;
2002 tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, phy_no);
2003 phy->phy_event &= ~PHY_PLUG_OUT;
2004 if (!(tmp & PHY_READY_MASK)) {
2005 sas_phy_disconnected(sas_phy);
2006 mvs_phy_disconnected(phy);
2007 sas_ha->notify_phy_event(sas_phy,
2008 PHYE_LOSS_OF_SIGNAL);
2009 mv_dprintk("phy%d Removed Device\n", phy_no);
2010 } else {
2011 MVS_CHIP_DISP->detect_porttype(mvi, phy_no);
2012 mvs_update_phyinfo(mvi, phy_no, 1);
2013 mvs_bytes_dmaed(mvi, phy_no);
2014 mvs_port_notify_formed(sas_phy, 0);
2015 mv_dprintk("phy%d Attached Device\n", phy_no);
2019 list_del(&mwq->entry);
2020 spin_unlock_irqrestore(&mvi->lock, flags);
2021 kfree(mwq);
2024 static int mvs_handle_event(struct mvs_info *mvi, void *data, int handler)
2026 struct mvs_wq *mwq;
2027 int ret = 0;
2029 mwq = kmalloc(sizeof(struct mvs_wq), GFP_ATOMIC);
2030 if (mwq) {
2031 mwq->mvi = mvi;
2032 mwq->data = data;
2033 mwq->handler = handler;
2034 MV_INIT_DELAYED_WORK(&mwq->work_q, mvs_work_queue, mwq);
2035 list_add_tail(&mwq->entry, &mvi->wq_list);
2036 schedule_delayed_work(&mwq->work_q, HZ * 2);
2037 } else
2038 ret = -ENOMEM;
2040 return ret;
2043 static void mvs_sig_time_out(unsigned long tphy)
2045 struct mvs_phy *phy = (struct mvs_phy *)tphy;
2046 struct mvs_info *mvi = phy->mvi;
2047 u8 phy_no;
2049 for (phy_no = 0; phy_no < mvi->chip->n_phy; phy_no++) {
2050 if (&mvi->phy[phy_no] == phy) {
2051 mv_dprintk("Get signature time out, reset phy %d\n",
2052 phy_no+mvi->id*mvi->chip->n_phy);
2053 MVS_CHIP_DISP->phy_reset(mvi, phy_no, 1);
2058 static void mvs_sig_remove_timer(struct mvs_phy *phy)
2060 if (phy->timer.function)
2061 del_timer(&phy->timer);
2062 phy->timer.function = NULL;
2065 void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
2067 u32 tmp;
2068 struct sas_ha_struct *sas_ha = mvi->sas;
2069 struct mvs_phy *phy = &mvi->phy[phy_no];
2070 struct asd_sas_phy *sas_phy = &phy->sas_phy;
2072 phy->irq_status = MVS_CHIP_DISP->read_port_irq_stat(mvi, phy_no);
2073 mv_dprintk("port %d ctrl sts=0x%X.\n", phy_no+mvi->id*mvi->chip->n_phy,
2074 MVS_CHIP_DISP->read_phy_ctl(mvi, phy_no));
2075 mv_dprintk("Port %d irq sts = 0x%X\n", phy_no+mvi->id*mvi->chip->n_phy,
2076 phy->irq_status);
2079 * events is port event now ,
2080 * we need check the interrupt status which belongs to per port.
2083 if (phy->irq_status & PHYEV_DCDR_ERR) {
2084 mv_dprintk("port %d STP decoding error.\n",
2085 phy_no + mvi->id*mvi->chip->n_phy);
2088 if (phy->irq_status & PHYEV_POOF) {
2089 if (!(phy->phy_event & PHY_PLUG_OUT)) {
2090 int dev_sata = phy->phy_type & PORT_TYPE_SATA;
2091 int ready;
2092 mvs_do_release_task(mvi, phy_no, NULL);
2093 phy->phy_event |= PHY_PLUG_OUT;
2094 MVS_CHIP_DISP->clear_srs_irq(mvi, 0, 1);
2095 mvs_handle_event(mvi,
2096 (void *)(unsigned long)phy_no,
2097 PHY_PLUG_EVENT);
2098 ready = mvs_is_phy_ready(mvi, phy_no);
2099 if (!ready)
2100 mv_dprintk("phy%d Unplug Notice\n",
2101 phy_no +
2102 mvi->id * mvi->chip->n_phy);
2103 if (ready || dev_sata) {
2104 if (MVS_CHIP_DISP->stp_reset)
2105 MVS_CHIP_DISP->stp_reset(mvi,
2106 phy_no);
2107 else
2108 MVS_CHIP_DISP->phy_reset(mvi,
2109 phy_no, 0);
2110 return;
2115 if (phy->irq_status & PHYEV_COMWAKE) {
2116 tmp = MVS_CHIP_DISP->read_port_irq_mask(mvi, phy_no);
2117 MVS_CHIP_DISP->write_port_irq_mask(mvi, phy_no,
2118 tmp | PHYEV_SIG_FIS);
2119 if (phy->timer.function == NULL) {
2120 phy->timer.data = (unsigned long)phy;
2121 phy->timer.function = mvs_sig_time_out;
2122 phy->timer.expires = jiffies + 10*HZ;
2123 add_timer(&phy->timer);
2126 if (phy->irq_status & (PHYEV_SIG_FIS | PHYEV_ID_DONE)) {
2127 phy->phy_status = mvs_is_phy_ready(mvi, phy_no);
2128 mvs_sig_remove_timer(phy);
2129 mv_dprintk("notify plug in on phy[%d]\n", phy_no);
2130 if (phy->phy_status) {
2131 mdelay(10);
2132 MVS_CHIP_DISP->detect_porttype(mvi, phy_no);
2133 if (phy->phy_type & PORT_TYPE_SATA) {
2134 tmp = MVS_CHIP_DISP->read_port_irq_mask(
2135 mvi, phy_no);
2136 tmp &= ~PHYEV_SIG_FIS;
2137 MVS_CHIP_DISP->write_port_irq_mask(mvi,
2138 phy_no, tmp);
2140 mvs_update_phyinfo(mvi, phy_no, 0);
2141 if (phy->phy_type & PORT_TYPE_SAS) {
2142 MVS_CHIP_DISP->phy_reset(mvi, phy_no, 2);
2143 mdelay(10);
2146 mvs_bytes_dmaed(mvi, phy_no);
2147 /* whether driver is going to handle hot plug */
2148 if (phy->phy_event & PHY_PLUG_OUT) {
2149 mvs_port_notify_formed(sas_phy, 0);
2150 phy->phy_event &= ~PHY_PLUG_OUT;
2152 } else {
2153 mv_dprintk("plugin interrupt but phy%d is gone\n",
2154 phy_no + mvi->id*mvi->chip->n_phy);
2156 } else if (phy->irq_status & PHYEV_BROAD_CH) {
2157 mv_dprintk("port %d broadcast change.\n",
2158 phy_no + mvi->id*mvi->chip->n_phy);
2159 /* exception for Samsung disk drive*/
2160 mdelay(1000);
2161 sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
2163 MVS_CHIP_DISP->write_port_irq_stat(mvi, phy_no, phy->irq_status);
2166 int mvs_int_rx(struct mvs_info *mvi, bool self_clear)
2168 u32 rx_prod_idx, rx_desc;
2169 bool attn = false;
2171 /* the first dword in the RX ring is special: it contains
2172 * a mirror of the hardware's RX producer index, so that
2173 * we don't have to stall the CPU reading that register.
2174 * The actual RX ring is offset by one dword, due to this.
2176 rx_prod_idx = mvi->rx_cons;
2177 mvi->rx_cons = le32_to_cpu(mvi->rx[0]);
2178 if (mvi->rx_cons == 0xfff) /* h/w hasn't touched RX ring yet */
2179 return 0;
2181 /* The CMPL_Q may come late, read from register and try again
2182 * note: if coalescing is enabled,
2183 * it will need to read from register every time for sure
2185 if (unlikely(mvi->rx_cons == rx_prod_idx))
2186 mvi->rx_cons = MVS_CHIP_DISP->rx_update(mvi) & RX_RING_SZ_MASK;
2188 if (mvi->rx_cons == rx_prod_idx)
2189 return 0;
2191 while (mvi->rx_cons != rx_prod_idx) {
2192 /* increment our internal RX consumer pointer */
2193 rx_prod_idx = (rx_prod_idx + 1) & (MVS_RX_RING_SZ - 1);
2194 rx_desc = le32_to_cpu(mvi->rx[rx_prod_idx + 1]);
2196 if (likely(rx_desc & RXQ_DONE))
2197 mvs_slot_complete(mvi, rx_desc, 0);
2198 if (rx_desc & RXQ_ATTN) {
2199 attn = true;
2200 } else if (rx_desc & RXQ_ERR) {
2201 if (!(rx_desc & RXQ_DONE))
2202 mvs_slot_complete(mvi, rx_desc, 0);
2203 } else if (rx_desc & RXQ_SLOT_RESET) {
2204 mvs_slot_free(mvi, rx_desc);
2208 if (attn && self_clear)
2209 MVS_CHIP_DISP->int_full(mvi);
2210 return 0;