[SCSI] lpfc: Fixes in mbox_timeout_handler
[usb.git] / drivers / scsi / lpfc / lpfc_sli.c
blob46e062dafd8538cb84eb9301785c2bc391833c49
1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Enterprise Fibre Channel Host Bus Adapters. *
4 * Refer to the README file included with this package for *
5 * driver version and adapter support. *
6 * Copyright (C) 2004 Emulex Corporation. *
7 * www.emulex.com *
8 * *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of the GNU General Public License *
11 * as published by the Free Software Foundation; either version 2 *
12 * of the License, or (at your option) any later version. *
13 * *
14 * This program is distributed in the hope that it will be useful, *
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
17 * GNU General Public License for more details, a copy of which *
18 * can be found in the file COPYING included with this package. *
19 *******************************************************************/
22 * $Id: lpfc_sli.c 1.232 2005/04/13 11:59:16EDT sf_support Exp $
25 #include <linux/blkdev.h>
26 #include <linux/pci.h>
27 #include <linux/interrupt.h>
28 #include <linux/delay.h>
30 #include <scsi/scsi_cmnd.h>
31 #include <scsi/scsi_device.h>
32 #include <scsi/scsi_host.h>
34 #include "lpfc_hw.h"
35 #include "lpfc_sli.h"
36 #include "lpfc_disc.h"
37 #include "lpfc_scsi.h"
38 #include "lpfc.h"
39 #include "lpfc_crtn.h"
40 #include "lpfc_logmsg.h"
41 #include "lpfc_compat.h"
44 * Define macro to log: Mailbox command x%x cannot issue Data
45 * This allows multiple uses of lpfc_msgBlk0311
46 * w/o perturbing log msg utility.
48 #define LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag) \
49 lpfc_printf_log(phba, \
50 KERN_INFO, \
51 LOG_MBOX | LOG_SLI, \
52 "%d:0311 Mailbox command x%x cannot issue " \
53 "Data: x%x x%x x%x\n", \
54 phba->brd_no, \
55 mb->mbxCommand, \
56 phba->hba_state, \
57 psli->sli_flag, \
58 flag);
61 /* There are only four IOCB completion types. */
62 typedef enum _lpfc_iocb_type {
63 LPFC_UNKNOWN_IOCB,
64 LPFC_UNSOL_IOCB,
65 LPFC_SOL_IOCB,
66 LPFC_ABORT_IOCB
67 } lpfc_iocb_type;
70 * Translate the iocb command to an iocb command type used to decide the final
71 * disposition of each completed IOCB.
73 static lpfc_iocb_type
74 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
76 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
78 if (iocb_cmnd > CMD_MAX_IOCB_CMD)
79 return 0;
81 switch (iocb_cmnd) {
82 case CMD_XMIT_SEQUENCE_CR:
83 case CMD_XMIT_SEQUENCE_CX:
84 case CMD_XMIT_BCAST_CN:
85 case CMD_XMIT_BCAST_CX:
86 case CMD_ELS_REQUEST_CR:
87 case CMD_ELS_REQUEST_CX:
88 case CMD_CREATE_XRI_CR:
89 case CMD_CREATE_XRI_CX:
90 case CMD_GET_RPI_CN:
91 case CMD_XMIT_ELS_RSP_CX:
92 case CMD_GET_RPI_CR:
93 case CMD_FCP_IWRITE_CR:
94 case CMD_FCP_IWRITE_CX:
95 case CMD_FCP_IREAD_CR:
96 case CMD_FCP_IREAD_CX:
97 case CMD_FCP_ICMND_CR:
98 case CMD_FCP_ICMND_CX:
99 case CMD_ADAPTER_MSG:
100 case CMD_ADAPTER_DUMP:
101 case CMD_XMIT_SEQUENCE64_CR:
102 case CMD_XMIT_SEQUENCE64_CX:
103 case CMD_XMIT_BCAST64_CN:
104 case CMD_XMIT_BCAST64_CX:
105 case CMD_ELS_REQUEST64_CR:
106 case CMD_ELS_REQUEST64_CX:
107 case CMD_FCP_IWRITE64_CR:
108 case CMD_FCP_IWRITE64_CX:
109 case CMD_FCP_IREAD64_CR:
110 case CMD_FCP_IREAD64_CX:
111 case CMD_FCP_ICMND64_CR:
112 case CMD_FCP_ICMND64_CX:
113 case CMD_GEN_REQUEST64_CR:
114 case CMD_GEN_REQUEST64_CX:
115 case CMD_XMIT_ELS_RSP64_CX:
116 type = LPFC_SOL_IOCB;
117 break;
118 case CMD_ABORT_XRI_CN:
119 case CMD_ABORT_XRI_CX:
120 case CMD_CLOSE_XRI_CN:
121 case CMD_CLOSE_XRI_CX:
122 case CMD_XRI_ABORTED_CX:
123 case CMD_ABORT_MXRI64_CN:
124 type = LPFC_ABORT_IOCB;
125 break;
126 case CMD_RCV_SEQUENCE_CX:
127 case CMD_RCV_ELS_REQ_CX:
128 case CMD_RCV_SEQUENCE64_CX:
129 case CMD_RCV_ELS_REQ64_CX:
130 type = LPFC_UNSOL_IOCB;
131 break;
132 default:
133 type = LPFC_UNKNOWN_IOCB;
134 break;
137 return type;
140 static int
141 lpfc_sli_ring_map(struct lpfc_hba * phba, LPFC_MBOXQ_t *pmb)
143 struct lpfc_sli *psli = &phba->sli;
144 MAILBOX_t *pmbox = &pmb->mb;
145 int i, rc;
147 for (i = 0; i < psli->num_rings; i++) {
148 phba->hba_state = LPFC_INIT_MBX_CMDS;
149 lpfc_config_ring(phba, i, pmb);
150 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
151 if (rc != MBX_SUCCESS) {
152 lpfc_printf_log(phba,
153 KERN_ERR,
154 LOG_INIT,
155 "%d:0446 Adapter failed to init, "
156 "mbxCmd x%x CFG_RING, mbxStatus x%x, "
157 "ring %d\n",
158 phba->brd_no,
159 pmbox->mbxCommand,
160 pmbox->mbxStatus,
162 phba->hba_state = LPFC_HBA_ERROR;
163 return -ENXIO;
166 return 0;
169 static int
170 lpfc_sli_ringtxcmpl_put(struct lpfc_hba * phba,
171 struct lpfc_sli_ring * pring, struct lpfc_iocbq * piocb)
173 uint16_t iotag;
175 list_add_tail(&piocb->list, &pring->txcmplq);
176 pring->txcmplq_cnt++;
177 if (unlikely(pring->ringno == LPFC_ELS_RING))
178 mod_timer(&phba->els_tmofunc,
179 jiffies + HZ * (phba->fc_ratov << 1));
181 if (pring->fast_lookup) {
182 /* Setup fast lookup based on iotag for completion */
183 iotag = piocb->iocb.ulpIoTag;
184 if (iotag && (iotag < pring->fast_iotag))
185 *(pring->fast_lookup + iotag) = piocb;
186 else {
188 /* Cmd ring <ringno> put: iotag <iotag> greater then
189 configured max <fast_iotag> wd0 <icmd> */
190 lpfc_printf_log(phba,
191 KERN_ERR,
192 LOG_SLI,
193 "%d:0316 Cmd ring %d put: iotag x%x "
194 "greater then configured max x%x "
195 "wd0 x%x\n",
196 phba->brd_no,
197 pring->ringno, iotag,
198 pring->fast_iotag,
199 *(((uint32_t *)(&piocb->iocb)) + 7));
202 return (0);
205 static struct lpfc_iocbq *
206 lpfc_sli_ringtx_get(struct lpfc_hba * phba, struct lpfc_sli_ring * pring)
208 struct list_head *dlp;
209 struct lpfc_iocbq *cmd_iocb;
211 dlp = &pring->txq;
212 cmd_iocb = NULL;
213 list_remove_head((&pring->txq), cmd_iocb,
214 struct lpfc_iocbq,
215 list);
216 if (cmd_iocb) {
217 /* If the first ptr is not equal to the list header,
218 * deque the IOCBQ_t and return it.
220 pring->txq_cnt--;
222 return (cmd_iocb);
225 static IOCB_t *
226 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
228 struct lpfc_pgp *pgp = &phba->slim2p->mbx.us.s2.port[pring->ringno];
229 uint32_t max_cmd_idx = pring->numCiocb;
230 IOCB_t *iocb = NULL;
232 if ((pring->next_cmdidx == pring->cmdidx) &&
233 (++pring->next_cmdidx >= max_cmd_idx))
234 pring->next_cmdidx = 0;
236 if (unlikely(pring->local_getidx == pring->next_cmdidx)) {
238 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
240 if (unlikely(pring->local_getidx >= max_cmd_idx)) {
241 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
242 "%d:0315 Ring %d issue: portCmdGet %d "
243 "is bigger then cmd ring %d\n",
244 phba->brd_no, pring->ringno,
245 pring->local_getidx, max_cmd_idx);
247 phba->hba_state = LPFC_HBA_ERROR;
249 * All error attention handlers are posted to
250 * worker thread
252 phba->work_ha |= HA_ERATT;
253 phba->work_hs = HS_FFER3;
254 if (phba->work_wait)
255 wake_up(phba->work_wait);
257 return NULL;
260 if (pring->local_getidx == pring->next_cmdidx)
261 return NULL;
264 iocb = IOCB_ENTRY(pring->cmdringaddr, pring->cmdidx);
266 return iocb;
269 static uint32_t
270 lpfc_sli_next_iotag(struct lpfc_hba * phba, struct lpfc_sli_ring * pring)
272 uint32_t search_start;
274 if (pring->fast_lookup == NULL) {
275 pring->iotag_ctr++;
276 if (pring->iotag_ctr >= pring->iotag_max)
277 pring->iotag_ctr = 1;
278 return pring->iotag_ctr;
281 search_start = pring->iotag_ctr;
283 do {
284 pring->iotag_ctr++;
285 if (pring->iotag_ctr >= pring->fast_iotag)
286 pring->iotag_ctr = 1;
288 if (*(pring->fast_lookup + pring->iotag_ctr) == NULL)
289 return pring->iotag_ctr;
291 } while (pring->iotag_ctr != search_start);
294 * Outstanding I/O count for ring <ringno> is at max <fast_iotag>
296 lpfc_printf_log(phba,
297 KERN_ERR,
298 LOG_SLI,
299 "%d:0318 Outstanding I/O count for ring %d is at max x%x\n",
300 phba->brd_no,
301 pring->ringno,
302 pring->fast_iotag);
303 return (0);
306 static void
307 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
308 IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
311 * Allocate and set up an iotag
313 nextiocb->iocb.ulpIoTag =
314 lpfc_sli_next_iotag(phba, &phba->sli.ring[phba->sli.fcp_ring]);
317 * Issue iocb command to adapter
319 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, sizeof (IOCB_t));
320 wmb();
321 pring->stats.iocb_cmd++;
324 * If there is no completion routine to call, we can release the
325 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
326 * that have no rsp ring completion, iocb_cmpl MUST be NULL.
328 if (nextiocb->iocb_cmpl)
329 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
330 else {
331 list_add_tail(&nextiocb->list, &phba->lpfc_iocb_list);
335 * Let the HBA know what IOCB slot will be the next one the
336 * driver will put a command into.
338 pring->cmdidx = pring->next_cmdidx;
339 writeb(pring->cmdidx, phba->MBslimaddr
340 + (SLIMOFF + (pring->ringno * 2)) * 4);
343 static void
344 lpfc_sli_update_full_ring(struct lpfc_hba * phba,
345 struct lpfc_sli_ring *pring)
347 int ringno = pring->ringno;
349 pring->flag |= LPFC_CALL_RING_AVAILABLE;
351 wmb();
354 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
355 * The HBA will tell us when an IOCB entry is available.
357 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
358 readl(phba->CAregaddr); /* flush */
360 pring->stats.iocb_cmd_full++;
363 static void
364 lpfc_sli_update_ring(struct lpfc_hba * phba,
365 struct lpfc_sli_ring *pring)
367 int ringno = pring->ringno;
370 * Tell the HBA that there is work to do in this ring.
372 wmb();
373 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
374 readl(phba->CAregaddr); /* flush */
377 static void
378 lpfc_sli_resume_iocb(struct lpfc_hba * phba, struct lpfc_sli_ring * pring)
380 IOCB_t *iocb;
381 struct lpfc_iocbq *nextiocb;
384 * Check to see if:
385 * (a) there is anything on the txq to send
386 * (b) link is up
387 * (c) link attention events can be processed (fcp ring only)
388 * (d) IOCB processing is not blocked by the outstanding mbox command.
390 if (pring->txq_cnt &&
391 (phba->hba_state > LPFC_LINK_DOWN) &&
392 (pring->ringno != phba->sli.fcp_ring ||
393 phba->sli.sli_flag & LPFC_PROCESS_LA) &&
394 !(pring->flag & LPFC_STOP_IOCB_MBX)) {
396 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
397 (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
398 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
400 if (iocb)
401 lpfc_sli_update_ring(phba, pring);
402 else
403 lpfc_sli_update_full_ring(phba, pring);
406 return;
409 /* lpfc_sli_turn_on_ring is only called by lpfc_sli_handle_mb_event below */
410 static void
411 lpfc_sli_turn_on_ring(struct lpfc_hba * phba, int ringno)
413 struct lpfc_pgp *pgp = &phba->slim2p->mbx.us.s2.port[ringno];
415 /* If the ring is active, flag it */
416 if (phba->sli.ring[ringno].cmdringaddr) {
417 if (phba->sli.ring[ringno].flag & LPFC_STOP_IOCB_MBX) {
418 phba->sli.ring[ringno].flag &= ~LPFC_STOP_IOCB_MBX;
420 * Force update of the local copy of cmdGetInx
422 phba->sli.ring[ringno].local_getidx
423 = le32_to_cpu(pgp->cmdGetInx);
424 spin_lock_irq(phba->host->host_lock);
425 lpfc_sli_resume_iocb(phba, &phba->sli.ring[ringno]);
426 spin_unlock_irq(phba->host->host_lock);
431 static int
432 lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
434 uint8_t ret;
436 switch (mbxCommand) {
437 case MBX_LOAD_SM:
438 case MBX_READ_NV:
439 case MBX_WRITE_NV:
440 case MBX_RUN_BIU_DIAG:
441 case MBX_INIT_LINK:
442 case MBX_DOWN_LINK:
443 case MBX_CONFIG_LINK:
444 case MBX_CONFIG_RING:
445 case MBX_RESET_RING:
446 case MBX_READ_CONFIG:
447 case MBX_READ_RCONFIG:
448 case MBX_READ_SPARM:
449 case MBX_READ_STATUS:
450 case MBX_READ_RPI:
451 case MBX_READ_XRI:
452 case MBX_READ_REV:
453 case MBX_READ_LNK_STAT:
454 case MBX_REG_LOGIN:
455 case MBX_UNREG_LOGIN:
456 case MBX_READ_LA:
457 case MBX_CLEAR_LA:
458 case MBX_DUMP_MEMORY:
459 case MBX_DUMP_CONTEXT:
460 case MBX_RUN_DIAGS:
461 case MBX_RESTART:
462 case MBX_UPDATE_CFG:
463 case MBX_DOWN_LOAD:
464 case MBX_DEL_LD_ENTRY:
465 case MBX_RUN_PROGRAM:
466 case MBX_SET_MASK:
467 case MBX_SET_SLIM:
468 case MBX_UNREG_D_ID:
469 case MBX_CONFIG_FARP:
470 case MBX_LOAD_AREA:
471 case MBX_RUN_BIU_DIAG64:
472 case MBX_CONFIG_PORT:
473 case MBX_READ_SPARM64:
474 case MBX_READ_RPI64:
475 case MBX_REG_LOGIN64:
476 case MBX_READ_LA64:
477 case MBX_FLASH_WR_ULA:
478 case MBX_SET_DEBUG:
479 case MBX_LOAD_EXP_ROM:
480 ret = mbxCommand;
481 break;
482 default:
483 ret = MBX_SHUTDOWN;
484 break;
486 return (ret);
488 static void
489 lpfc_sli_wake_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
491 wait_queue_head_t *pdone_q;
494 * If pdone_q is empty, the driver thread gave up waiting and
495 * continued running.
497 pdone_q = (wait_queue_head_t *) pmboxq->context1;
498 if (pdone_q)
499 wake_up_interruptible(pdone_q);
500 return;
503 void
504 lpfc_sli_def_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
506 struct lpfc_dmabuf *mp;
507 mp = (struct lpfc_dmabuf *) (pmb->context1);
508 if (mp) {
509 lpfc_mbuf_free(phba, mp->virt, mp->phys);
510 kfree(mp);
512 mempool_free( pmb, phba->mbox_mem_pool);
513 return;
517 lpfc_sli_handle_mb_event(struct lpfc_hba * phba)
519 MAILBOX_t *mbox;
520 MAILBOX_t *pmbox;
521 LPFC_MBOXQ_t *pmb;
522 struct lpfc_sli *psli;
523 int i, rc;
524 uint32_t process_next;
526 psli = &phba->sli;
527 /* We should only get here if we are in SLI2 mode */
528 if (!(phba->sli.sli_flag & LPFC_SLI2_ACTIVE)) {
529 return (1);
532 phba->sli.slistat.mbox_event++;
534 /* Get a Mailbox buffer to setup mailbox commands for callback */
535 if ((pmb = phba->sli.mbox_active)) {
536 pmbox = &pmb->mb;
537 mbox = &phba->slim2p->mbx;
539 /* First check out the status word */
540 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof (uint32_t));
542 /* Sanity check to ensure the host owns the mailbox */
543 if (pmbox->mbxOwner != OWN_HOST) {
544 /* Lets try for a while */
545 for (i = 0; i < 10240; i++) {
546 /* First copy command data */
547 lpfc_sli_pcimem_bcopy(mbox, pmbox,
548 sizeof (uint32_t));
549 if (pmbox->mbxOwner == OWN_HOST)
550 goto mbout;
552 /* Stray Mailbox Interrupt, mbxCommand <cmd> mbxStatus
553 <status> */
554 lpfc_printf_log(phba,
555 KERN_ERR,
556 LOG_MBOX | LOG_SLI,
557 "%d:0304 Stray Mailbox Interrupt "
558 "mbxCommand x%x mbxStatus x%x\n",
559 phba->brd_no,
560 pmbox->mbxCommand,
561 pmbox->mbxStatus);
563 spin_lock_irq(phba->host->host_lock);
564 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
565 spin_unlock_irq(phba->host->host_lock);
566 return (1);
569 mbout:
570 del_timer_sync(&phba->sli.mbox_tmo);
571 phba->work_hba_events &= ~WORKER_MBOX_TMO;
574 * It is a fatal error if unknown mbox command completion.
576 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
577 MBX_SHUTDOWN) {
579 /* Unknow mailbox command compl */
580 lpfc_printf_log(phba,
581 KERN_ERR,
582 LOG_MBOX | LOG_SLI,
583 "%d:0323 Unknown Mailbox command %x Cmpl\n",
584 phba->brd_no,
585 pmbox->mbxCommand);
586 phba->hba_state = LPFC_HBA_ERROR;
587 phba->work_hs = HS_FFER3;
588 lpfc_handle_eratt(phba);
589 return (0);
592 phba->sli.mbox_active = NULL;
593 if (pmbox->mbxStatus) {
594 phba->sli.slistat.mbox_stat_err++;
595 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
596 /* Mbox cmd cmpl error - RETRYing */
597 lpfc_printf_log(phba,
598 KERN_INFO,
599 LOG_MBOX | LOG_SLI,
600 "%d:0305 Mbox cmd cmpl error - "
601 "RETRYing Data: x%x x%x x%x x%x\n",
602 phba->brd_no,
603 pmbox->mbxCommand,
604 pmbox->mbxStatus,
605 pmbox->un.varWords[0],
606 phba->hba_state);
607 pmbox->mbxStatus = 0;
608 pmbox->mbxOwner = OWN_HOST;
609 spin_lock_irq(phba->host->host_lock);
610 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
611 spin_unlock_irq(phba->host->host_lock);
612 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
613 if (rc == MBX_SUCCESS)
614 return (0);
618 /* Mailbox cmd <cmd> Cmpl <cmpl> */
619 lpfc_printf_log(phba,
620 KERN_INFO,
621 LOG_MBOX | LOG_SLI,
622 "%d:0307 Mailbox cmd x%x Cmpl x%p "
623 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n",
624 phba->brd_no,
625 pmbox->mbxCommand,
626 pmb->mbox_cmpl,
627 *((uint32_t *) pmbox),
628 pmbox->un.varWords[0],
629 pmbox->un.varWords[1],
630 pmbox->un.varWords[2],
631 pmbox->un.varWords[3],
632 pmbox->un.varWords[4],
633 pmbox->un.varWords[5],
634 pmbox->un.varWords[6],
635 pmbox->un.varWords[7]);
637 if (pmb->mbox_cmpl) {
638 lpfc_sli_pcimem_bcopy(mbox, pmbox, MAILBOX_CMD_SIZE);
639 pmb->mbox_cmpl(phba,pmb);
644 do {
645 process_next = 0; /* by default don't loop */
646 spin_lock_irq(phba->host->host_lock);
647 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
649 /* Process next mailbox command if there is one */
650 if ((pmb = lpfc_mbox_get(phba))) {
651 spin_unlock_irq(phba->host->host_lock);
652 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
653 if (rc == MBX_NOT_FINISHED) {
654 pmb->mb.mbxStatus = MBX_NOT_FINISHED;
655 pmb->mbox_cmpl(phba,pmb);
656 process_next = 1;
657 continue; /* loop back */
659 } else {
660 spin_unlock_irq(phba->host->host_lock);
661 /* Turn on IOCB processing */
662 for (i = 0; i < phba->sli.num_rings; i++) {
663 lpfc_sli_turn_on_ring(phba, i);
666 /* Free any lpfc_dmabuf's waiting for mbox cmd cmpls */
667 while (!list_empty(&phba->freebufList)) {
668 struct lpfc_dmabuf *mp;
670 mp = NULL;
671 list_remove_head((&phba->freebufList),
673 struct lpfc_dmabuf,
674 list);
675 if (mp) {
676 lpfc_mbuf_free(phba, mp->virt,
677 mp->phys);
678 kfree(mp);
683 } while (process_next);
685 return (0);
687 static int
688 lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
689 struct lpfc_iocbq *saveq)
691 IOCB_t * irsp;
692 WORD5 * w5p;
693 uint32_t Rctl, Type;
694 uint32_t match, i;
696 match = 0;
697 irsp = &(saveq->iocb);
698 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX)
699 || (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX)) {
700 Rctl = FC_ELS_REQ;
701 Type = FC_ELS_DATA;
702 } else {
703 w5p =
704 (WORD5 *) & (saveq->iocb.un.
705 ulpWord[5]);
706 Rctl = w5p->hcsw.Rctl;
707 Type = w5p->hcsw.Type;
709 /* Firmware Workaround */
710 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
711 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX)) {
712 Rctl = FC_ELS_REQ;
713 Type = FC_ELS_DATA;
714 w5p->hcsw.Rctl = Rctl;
715 w5p->hcsw.Type = Type;
718 /* unSolicited Responses */
719 if (pring->prt[0].profile) {
720 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring, saveq);
721 match = 1;
722 } else {
723 /* We must search, based on rctl / type
724 for the right routine */
725 for (i = 0; i < pring->num_mask;
726 i++) {
727 if ((pring->prt[i].rctl ==
728 Rctl)
729 && (pring->prt[i].
730 type == Type)) {
731 (pring->prt[i].lpfc_sli_rcv_unsol_event)
732 (phba, pring, saveq);
733 match = 1;
734 break;
738 if (match == 0) {
739 /* Unexpected Rctl / Type received */
740 /* Ring <ringno> handler: unexpected
741 Rctl <Rctl> Type <Type> received */
742 lpfc_printf_log(phba,
743 KERN_WARNING,
744 LOG_SLI,
745 "%d:0313 Ring %d handler: unexpected Rctl x%x "
746 "Type x%x received \n",
747 phba->brd_no,
748 pring->ringno,
749 Rctl,
750 Type);
752 return(1);
755 static struct lpfc_iocbq *
756 lpfc_sli_txcmpl_ring_search_slow(struct lpfc_sli_ring * pring,
757 struct lpfc_iocbq * prspiocb)
759 IOCB_t *icmd = NULL;
760 IOCB_t *irsp = NULL;
761 struct lpfc_iocbq *cmd_iocb;
762 struct lpfc_iocbq *iocb, *next_iocb;
763 uint16_t iotag;
765 irsp = &prspiocb->iocb;
766 iotag = irsp->ulpIoTag;
767 cmd_iocb = NULL;
769 /* Search through txcmpl from the begining */
770 list_for_each_entry_safe(iocb, next_iocb, &(pring->txcmplq), list) {
771 icmd = &iocb->iocb;
772 if (iotag == icmd->ulpIoTag) {
773 /* Found a match. */
774 cmd_iocb = iocb;
775 list_del(&iocb->list);
776 pring->txcmplq_cnt--;
777 break;
781 return (cmd_iocb);
784 static struct lpfc_iocbq *
785 lpfc_sli_txcmpl_ring_iotag_lookup(struct lpfc_hba * phba,
786 struct lpfc_sli_ring * pring,
787 struct lpfc_iocbq * prspiocb)
789 IOCB_t *irsp = NULL;
790 struct lpfc_iocbq *cmd_iocb = NULL;
791 uint16_t iotag;
793 if (unlikely(pring->fast_lookup == NULL))
794 return NULL;
796 /* Use fast lookup based on iotag for completion */
797 irsp = &prspiocb->iocb;
798 iotag = irsp->ulpIoTag;
799 if (iotag < pring->fast_iotag) {
800 cmd_iocb = *(pring->fast_lookup + iotag);
801 *(pring->fast_lookup + iotag) = NULL;
802 if (cmd_iocb) {
803 list_del(&cmd_iocb->list);
804 pring->txcmplq_cnt--;
805 return cmd_iocb;
806 } else {
808 * This is clearly an error. A ring that uses iotags
809 * should never have a interrupt for a completion that
810 * is not on the ring. Return NULL and log a error.
812 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
813 "%d:0327 Rsp ring %d error - command "
814 "completion for iotag x%x not found\n",
815 phba->brd_no, pring->ringno, iotag);
816 return NULL;
821 * Rsp ring <ringno> get: iotag <iotag> greater then
822 * configured max <fast_iotag> wd0 <irsp>. This is an
823 * error. Just return NULL.
825 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
826 "%d:0317 Rsp ring %d get: iotag x%x greater then "
827 "configured max x%x wd0 x%x\n",
828 phba->brd_no, pring->ringno, iotag, pring->fast_iotag,
829 *(((uint32_t *) irsp) + 7));
830 return NULL;
833 static int
834 lpfc_sli_process_sol_iocb(struct lpfc_hba * phba, struct lpfc_sli_ring * pring,
835 struct lpfc_iocbq *saveq)
837 struct lpfc_iocbq * cmdiocbp;
838 int rc = 1;
839 unsigned long iflag;
841 /* Based on the iotag field, get the cmd IOCB from the txcmplq */
842 spin_lock_irqsave(phba->host->host_lock, iflag);
843 cmdiocbp = lpfc_sli_txcmpl_ring_search_slow(pring, saveq);
844 if (cmdiocbp) {
845 if (cmdiocbp->iocb_cmpl) {
847 * Post all ELS completions to the worker thread.
848 * All other are passed to the completion callback.
850 if (pring->ringno == LPFC_ELS_RING) {
851 spin_unlock_irqrestore(phba->host->host_lock,
852 iflag);
853 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
854 spin_lock_irqsave(phba->host->host_lock, iflag);
856 else {
857 if (cmdiocbp->iocb_flag & LPFC_IO_POLL)
858 rc = 0;
860 spin_unlock_irqrestore(phba->host->host_lock,
861 iflag);
862 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
863 spin_lock_irqsave(phba->host->host_lock, iflag);
865 } else {
866 list_add_tail(&cmdiocbp->list, &phba->lpfc_iocb_list);
868 } else {
870 * Unknown initiating command based on the response iotag.
871 * This could be the case on the ELS ring because of
872 * lpfc_els_abort().
874 if (pring->ringno != LPFC_ELS_RING) {
876 * Ring <ringno> handler: unexpected completion IoTag
877 * <IoTag>
879 lpfc_printf_log(phba,
880 KERN_WARNING,
881 LOG_SLI,
882 "%d:0322 Ring %d handler: unexpected "
883 "completion IoTag x%x Data: x%x x%x x%x x%x\n",
884 phba->brd_no,
885 pring->ringno,
886 saveq->iocb.ulpIoTag,
887 saveq->iocb.ulpStatus,
888 saveq->iocb.un.ulpWord[4],
889 saveq->iocb.ulpCommand,
890 saveq->iocb.ulpContext);
893 spin_unlock_irqrestore(phba->host->host_lock, iflag);
894 return rc;
898 * This routine presumes LPFC_FCP_RING handling and doesn't bother
899 * to check it explicitly.
901 static int
902 lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,
903 struct lpfc_sli_ring * pring, uint32_t mask)
905 struct lpfc_pgp *pgp = &phba->slim2p->mbx.us.s2.port[pring->ringno];
906 IOCB_t *irsp = NULL;
907 struct lpfc_iocbq *cmdiocbq = NULL;
908 struct lpfc_iocbq rspiocbq;
909 uint32_t status;
910 uint32_t portRspPut, portRspMax;
911 int rc = 1;
912 lpfc_iocb_type type;
913 unsigned long iflag;
914 uint32_t rsp_cmpl = 0;
915 void __iomem *to_slim;
917 spin_lock_irqsave(phba->host->host_lock, iflag);
918 pring->stats.iocb_event++;
921 * The next available response entry should never exceed the maximum
922 * entries. If it does, treat it as an adapter hardware error.
924 portRspMax = pring->numRiocb;
925 portRspPut = le32_to_cpu(pgp->rspPutInx);
926 if (unlikely(portRspPut >= portRspMax)) {
928 * Ring <ringno> handler: portRspPut <portRspPut> is bigger then
929 * rsp ring <portRspMax>
931 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
932 "%d:0312 Ring %d handler: portRspPut %d "
933 "is bigger then rsp ring %d\n",
934 phba->brd_no, pring->ringno, portRspPut,
935 portRspMax);
937 phba->hba_state = LPFC_HBA_ERROR;
939 /* All error attention handlers are posted to worker thread */
940 phba->work_ha |= HA_ERATT;
941 phba->work_hs = HS_FFER3;
942 if (phba->work_wait)
943 wake_up(phba->work_wait);
945 spin_unlock_irqrestore(phba->host->host_lock, iflag);
946 return 1;
949 rmb();
950 while (pring->rspidx != portRspPut) {
951 irsp = (IOCB_t *) IOCB_ENTRY(pring->rspringaddr, pring->rspidx);
952 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
953 pring->stats.iocb_rsp++;
954 rsp_cmpl++;
956 if (unlikely(irsp->ulpStatus)) {
957 /* Rsp ring <ringno> error: IOCB */
958 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
959 "%d:0326 Rsp Ring %d error: IOCB Data: "
960 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
961 phba->brd_no, pring->ringno,
962 irsp->un.ulpWord[0], irsp->un.ulpWord[1],
963 irsp->un.ulpWord[2], irsp->un.ulpWord[3],
964 irsp->un.ulpWord[4], irsp->un.ulpWord[5],
965 *(((uint32_t *) irsp) + 6),
966 *(((uint32_t *) irsp) + 7));
969 switch (type) {
970 case LPFC_ABORT_IOCB:
971 case LPFC_SOL_IOCB:
973 * Idle exchange closed via ABTS from port. No iocb
974 * resources need to be recovered.
976 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
977 printk(KERN_INFO "%s: IOCB cmd 0x%x processed. "
978 "Skipping completion\n", __FUNCTION__,
979 irsp->ulpCommand);
980 break;
983 rspiocbq.iocb.un.ulpWord[4] = irsp->un.ulpWord[4];
984 rspiocbq.iocb.ulpStatus = irsp->ulpStatus;
985 rspiocbq.iocb.ulpContext = irsp->ulpContext;
986 rspiocbq.iocb.ulpIoTag = irsp->ulpIoTag;
987 cmdiocbq = lpfc_sli_txcmpl_ring_iotag_lookup(phba,
988 pring,
989 &rspiocbq);
990 if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) {
991 spin_unlock_irqrestore(
992 phba->host->host_lock, iflag);
993 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
994 &rspiocbq);
995 spin_lock_irqsave(phba->host->host_lock,
996 iflag);
998 break;
999 default:
1000 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
1001 char adaptermsg[LPFC_MAX_ADPTMSG];
1002 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
1003 memcpy(&adaptermsg[0], (uint8_t *) irsp,
1004 MAX_MSG_DATA);
1005 dev_warn(&((phba->pcidev)->dev), "lpfc%d: %s",
1006 phba->brd_no, adaptermsg);
1007 } else {
1008 /* Unknown IOCB command */
1009 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1010 "%d:0321 Unknown IOCB command "
1011 "Data: x%x, x%x x%x x%x x%x\n",
1012 phba->brd_no, type, irsp->ulpCommand,
1013 irsp->ulpStatus, irsp->ulpIoTag,
1014 irsp->ulpContext);
1016 break;
1020 * The response IOCB has been processed. Update the ring
1021 * pointer in SLIM. If the port response put pointer has not
1022 * been updated, sync the pgp->rspPutInx and fetch the new port
1023 * response put pointer.
1025 if (++pring->rspidx >= portRspMax)
1026 pring->rspidx = 0;
1028 to_slim = phba->MBslimaddr +
1029 (SLIMOFF + (pring->ringno * 2) + 1) * 4;
1030 writeb(pring->rspidx, to_slim);
1032 if (pring->rspidx == portRspPut)
1033 portRspPut = le32_to_cpu(pgp->rspPutInx);
1036 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
1037 pring->stats.iocb_rsp_full++;
1038 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
1039 writel(status, phba->CAregaddr);
1040 readl(phba->CAregaddr);
1042 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
1043 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
1044 pring->stats.iocb_cmd_empty++;
1046 /* Force update of the local copy of cmdGetInx */
1047 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
1048 lpfc_sli_resume_iocb(phba, pring);
1050 if ((pring->lpfc_sli_cmd_available))
1051 (pring->lpfc_sli_cmd_available) (phba, pring);
1055 spin_unlock_irqrestore(phba->host->host_lock, iflag);
1056 return rc;
1061 lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba,
1062 struct lpfc_sli_ring * pring, uint32_t mask)
1064 IOCB_t *entry;
1065 IOCB_t *irsp = NULL;
1066 struct lpfc_iocbq *rspiocbp = NULL;
1067 struct lpfc_iocbq *next_iocb;
1068 struct lpfc_iocbq *cmdiocbp;
1069 struct lpfc_iocbq *saveq;
1070 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
1071 struct lpfc_pgp *pgp = &phba->slim2p->mbx.us.s2.port[pring->ringno];
1072 uint8_t iocb_cmd_type;
1073 lpfc_iocb_type type;
1074 uint32_t status, free_saveq;
1075 uint32_t portRspPut, portRspMax;
1076 int rc = 1;
1077 unsigned long iflag;
1078 void __iomem *to_slim;
1080 spin_lock_irqsave(phba->host->host_lock, iflag);
1081 pring->stats.iocb_event++;
1084 * The next available response entry should never exceed the maximum
1085 * entries. If it does, treat it as an adapter hardware error.
1087 portRspMax = pring->numRiocb;
1088 portRspPut = le32_to_cpu(pgp->rspPutInx);
1089 if (portRspPut >= portRspMax) {
1091 * Ring <ringno> handler: portRspPut <portRspPut> is bigger then
1092 * rsp ring <portRspMax>
1094 lpfc_printf_log(phba,
1095 KERN_ERR,
1096 LOG_SLI,
1097 "%d:0312 Ring %d handler: portRspPut %d "
1098 "is bigger then rsp ring %d\n",
1099 phba->brd_no,
1100 pring->ringno, portRspPut, portRspMax);
1102 phba->hba_state = LPFC_HBA_ERROR;
1103 spin_unlock_irqrestore(phba->host->host_lock, iflag);
1105 phba->work_hs = HS_FFER3;
1106 lpfc_handle_eratt(phba);
1108 return 1;
1111 rmb();
1112 lpfc_iocb_list = &phba->lpfc_iocb_list;
1113 while (pring->rspidx != portRspPut) {
1115 * Build a completion list and call the appropriate handler.
1116 * The process is to get the next available response iocb, get
1117 * a free iocb from the list, copy the response data into the
1118 * free iocb, insert to the continuation list, and update the
1119 * next response index to slim. This process makes response
1120 * iocb's in the ring available to DMA as fast as possible but
1121 * pays a penalty for a copy operation. Since the iocb is
1122 * only 32 bytes, this penalty is considered small relative to
1123 * the PCI reads for register values and a slim write. When
1124 * the ulpLe field is set, the entire Command has been
1125 * received.
1127 entry = IOCB_ENTRY(pring->rspringaddr, pring->rspidx);
1128 list_remove_head(lpfc_iocb_list, rspiocbp, struct lpfc_iocbq,
1129 list);
1130 if (rspiocbp == NULL) {
1131 printk(KERN_ERR "%s: out of buffers! Failing "
1132 "completion.\n", __FUNCTION__);
1133 break;
1136 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb, sizeof (IOCB_t));
1137 irsp = &rspiocbp->iocb;
1139 if (++pring->rspidx >= portRspMax)
1140 pring->rspidx = 0;
1142 to_slim = phba->MBslimaddr + (SLIMOFF + (pring->ringno * 2)
1143 + 1) * 4;
1144 writeb(pring->rspidx, to_slim);
1146 if (list_empty(&(pring->iocb_continueq))) {
1147 list_add(&rspiocbp->list, &(pring->iocb_continueq));
1148 } else {
1149 list_add_tail(&rspiocbp->list,
1150 &(pring->iocb_continueq));
1153 pring->iocb_continueq_cnt++;
1154 if (irsp->ulpLe) {
1156 * By default, the driver expects to free all resources
1157 * associated with this iocb completion.
1159 free_saveq = 1;
1160 saveq = list_get_first(&pring->iocb_continueq,
1161 struct lpfc_iocbq, list);
1162 irsp = &(saveq->iocb);
1163 list_del_init(&pring->iocb_continueq);
1164 pring->iocb_continueq_cnt = 0;
1166 pring->stats.iocb_rsp++;
1168 if (irsp->ulpStatus) {
1169 /* Rsp ring <ringno> error: IOCB */
1170 lpfc_printf_log(phba,
1171 KERN_WARNING,
1172 LOG_SLI,
1173 "%d:0328 Rsp Ring %d error: IOCB Data: "
1174 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
1175 phba->brd_no,
1176 pring->ringno,
1177 irsp->un.ulpWord[0],
1178 irsp->un.ulpWord[1],
1179 irsp->un.ulpWord[2],
1180 irsp->un.ulpWord[3],
1181 irsp->un.ulpWord[4],
1182 irsp->un.ulpWord[5],
1183 *(((uint32_t *) irsp) + 6),
1184 *(((uint32_t *) irsp) + 7));
1188 * Fetch the IOCB command type and call the correct
1189 * completion routine. Solicited and Unsolicited
1190 * IOCBs on the ELS ring get freed back to the
1191 * lpfc_iocb_list by the discovery kernel thread.
1193 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
1194 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
1195 if (type == LPFC_SOL_IOCB) {
1196 spin_unlock_irqrestore(phba->host->host_lock,
1197 iflag);
1198 rc = lpfc_sli_process_sol_iocb(phba, pring,
1199 saveq);
1200 spin_lock_irqsave(phba->host->host_lock, iflag);
1201 } else if (type == LPFC_UNSOL_IOCB) {
1202 spin_unlock_irqrestore(phba->host->host_lock,
1203 iflag);
1204 rc = lpfc_sli_process_unsol_iocb(phba, pring,
1205 saveq);
1206 spin_lock_irqsave(phba->host->host_lock, iflag);
1207 } else if (type == LPFC_ABORT_IOCB) {
1208 if ((irsp->ulpCommand != CMD_XRI_ABORTED_CX) &&
1209 ((cmdiocbp =
1210 lpfc_sli_txcmpl_ring_search_slow(pring,
1211 saveq)))) {
1212 /* Call the specified completion
1213 routine */
1214 if (cmdiocbp->iocb_cmpl) {
1215 spin_unlock_irqrestore(
1216 phba->host->host_lock,
1217 iflag);
1218 (cmdiocbp->iocb_cmpl) (phba,
1219 cmdiocbp, saveq);
1220 spin_lock_irqsave(
1221 phba->host->host_lock,
1222 iflag);
1223 } else {
1224 list_add_tail(&cmdiocbp->list,
1225 lpfc_iocb_list);
1228 } else if (type == LPFC_UNKNOWN_IOCB) {
1229 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
1231 char adaptermsg[LPFC_MAX_ADPTMSG];
1233 memset(adaptermsg, 0,
1234 LPFC_MAX_ADPTMSG);
1235 memcpy(&adaptermsg[0], (uint8_t *) irsp,
1236 MAX_MSG_DATA);
1237 dev_warn(&((phba->pcidev)->dev),
1238 "lpfc%d: %s",
1239 phba->brd_no, adaptermsg);
1240 } else {
1241 /* Unknown IOCB command */
1242 lpfc_printf_log(phba,
1243 KERN_ERR,
1244 LOG_SLI,
1245 "%d:0321 Unknown IOCB command "
1246 "Data: x%x x%x x%x x%x\n",
1247 phba->brd_no,
1248 irsp->ulpCommand,
1249 irsp->ulpStatus,
1250 irsp->ulpIoTag,
1251 irsp->ulpContext);
1255 if (free_saveq) {
1256 if (!list_empty(&saveq->list)) {
1257 list_for_each_entry_safe(rspiocbp,
1258 next_iocb,
1259 &saveq->list,
1260 list) {
1261 list_add_tail(&rspiocbp->list,
1262 lpfc_iocb_list);
1266 list_add_tail(&saveq->list, lpfc_iocb_list);
1271 * If the port response put pointer has not been updated, sync
1272 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
1273 * response put pointer.
1275 if (pring->rspidx == portRspPut) {
1276 portRspPut = le32_to_cpu(pgp->rspPutInx);
1278 } /* while (pring->rspidx != portRspPut) */
1280 if ((rspiocbp != 0) && (mask & HA_R0RE_REQ)) {
1281 /* At least one response entry has been freed */
1282 pring->stats.iocb_rsp_full++;
1283 /* SET RxRE_RSP in Chip Att register */
1284 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
1285 writel(status, phba->CAregaddr);
1286 readl(phba->CAregaddr); /* flush */
1288 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
1289 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
1290 pring->stats.iocb_cmd_empty++;
1292 /* Force update of the local copy of cmdGetInx */
1293 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
1294 lpfc_sli_resume_iocb(phba, pring);
1296 if ((pring->lpfc_sli_cmd_available))
1297 (pring->lpfc_sli_cmd_available) (phba, pring);
1301 spin_unlock_irqrestore(phba->host->host_lock, iflag);
1302 return rc;
1306 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1308 struct lpfc_iocbq *iocb, *next_iocb;
1309 IOCB_t *icmd = NULL, *cmd = NULL;
1310 int errcnt;
1311 uint16_t iotag;
1313 errcnt = 0;
1315 /* Error everything on txq and txcmplq
1316 * First do the txq.
1318 spin_lock_irq(phba->host->host_lock);
1319 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
1320 list_del_init(&iocb->list);
1321 if (iocb->iocb_cmpl) {
1322 icmd = &iocb->iocb;
1323 icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
1324 icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
1325 spin_unlock_irq(phba->host->host_lock);
1326 (iocb->iocb_cmpl) (phba, iocb, iocb);
1327 spin_lock_irq(phba->host->host_lock);
1328 } else {
1329 list_add_tail(&iocb->list, &phba->lpfc_iocb_list);
1332 pring->txq_cnt = 0;
1333 INIT_LIST_HEAD(&(pring->txq));
1335 /* Next issue ABTS for everything on the txcmplq */
1336 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
1337 cmd = &iocb->iocb;
1340 * Imediate abort of IOCB, clear fast_lookup entry,
1341 * if any, deque and call compl
1343 iotag = cmd->ulpIoTag;
1344 if (iotag && pring->fast_lookup &&
1345 (iotag < pring->fast_iotag))
1346 pring->fast_lookup[iotag] = NULL;
1348 list_del_init(&iocb->list);
1349 pring->txcmplq_cnt--;
1351 if (iocb->iocb_cmpl) {
1352 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
1353 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
1354 spin_unlock_irq(phba->host->host_lock);
1355 (iocb->iocb_cmpl) (phba, iocb, iocb);
1356 spin_lock_irq(phba->host->host_lock);
1357 } else {
1358 list_add_tail(&iocb->list, &phba->lpfc_iocb_list);
1362 INIT_LIST_HEAD(&pring->txcmplq);
1363 pring->txcmplq_cnt = 0;
1364 spin_unlock_irq(phba->host->host_lock);
1366 return errcnt;
1369 /******************************************************************************
1370 * lpfc_sli_send_reset
1372 * Note: After returning from this function, the HBA cannot be accessed for
1373 * 1 ms. Since we do not wish to delay in interrupt context, it is the
1374 * responsibility of the caller to perform the mdelay(1) and flush via readl().
1375 ******************************************************************************/
1376 static int
1377 lpfc_sli_send_reset(struct lpfc_hba * phba, uint16_t skip_post)
1379 MAILBOX_t *swpmb;
1380 volatile uint32_t word0;
1381 void __iomem *to_slim;
1382 unsigned long flags = 0;
1384 spin_lock_irqsave(phba->host->host_lock, flags);
1386 /* A board reset must use REAL SLIM. */
1387 phba->sli.sli_flag &= ~LPFC_SLI2_ACTIVE;
1389 word0 = 0;
1390 swpmb = (MAILBOX_t *) & word0;
1391 swpmb->mbxCommand = MBX_RESTART;
1392 swpmb->mbxHc = 1;
1394 to_slim = phba->MBslimaddr;
1395 writel(*(uint32_t *) swpmb, to_slim);
1396 readl(to_slim); /* flush */
1398 /* Only skip post after fc_ffinit is completed */
1399 if (skip_post) {
1400 word0 = 1; /* This is really setting up word1 */
1401 } else {
1402 word0 = 0; /* This is really setting up word1 */
1404 to_slim = phba->MBslimaddr + sizeof (uint32_t);
1405 writel(*(uint32_t *) swpmb, to_slim);
1406 readl(to_slim); /* flush */
1408 /* Turn off parity checking and serr during the physical reset */
1409 pci_read_config_word(phba->pcidev, PCI_COMMAND, &phba->pci_cfg_value);
1410 pci_write_config_word(phba->pcidev, PCI_COMMAND,
1411 (phba->pci_cfg_value &
1412 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
1414 writel(HC_INITFF, phba->HCregaddr);
1416 phba->hba_state = LPFC_INIT_START;
1417 spin_unlock_irqrestore(phba->host->host_lock, flags);
1419 return 0;
1422 static int
1423 lpfc_sli_brdreset(struct lpfc_hba * phba, uint16_t skip_post)
1425 struct lpfc_sli_ring *pring;
1426 int i;
1427 struct lpfc_dmabuf *mp, *next_mp;
1428 unsigned long flags = 0;
1430 lpfc_sli_send_reset(phba, skip_post);
1431 mdelay(1);
1433 spin_lock_irqsave(phba->host->host_lock, flags);
1434 /* Risk the write on flush case ie no delay after the readl */
1435 readl(phba->HCregaddr); /* flush */
1436 /* Now toggle INITFF bit set by lpfc_sli_send_reset */
1437 writel(0, phba->HCregaddr);
1438 readl(phba->HCregaddr); /* flush */
1440 /* Restore PCI cmd register */
1441 pci_write_config_word(phba->pcidev, PCI_COMMAND, phba->pci_cfg_value);
1443 /* perform board reset */
1444 phba->fc_eventTag = 0;
1445 phba->fc_myDID = 0;
1446 phba->fc_prevDID = Mask_DID;
1448 /* Reset HBA */
1449 lpfc_printf_log(phba,
1450 KERN_INFO,
1451 LOG_SLI,
1452 "%d:0325 Reset HBA Data: x%x x%x x%x\n",
1453 phba->brd_no,
1454 phba->hba_state,
1455 phba->sli.sli_flag,
1456 skip_post);
1458 /* Initialize relevant SLI info */
1459 for (i = 0; i < phba->sli.num_rings; i++) {
1460 pring = &phba->sli.ring[i];
1461 pring->flag = 0;
1462 pring->rspidx = 0;
1463 pring->next_cmdidx = 0;
1464 pring->local_getidx = 0;
1465 pring->cmdidx = 0;
1466 pring->missbufcnt = 0;
1468 spin_unlock_irqrestore(phba->host->host_lock, flags);
1470 if (skip_post) {
1471 mdelay(100);
1472 } else {
1473 mdelay(2000);
1476 spin_lock_irqsave(phba->host->host_lock, flags);
1477 /* Cleanup preposted buffers on the ELS ring */
1478 pring = &phba->sli.ring[LPFC_ELS_RING];
1479 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
1480 list_del(&mp->list);
1481 pring->postbufq_cnt--;
1482 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1483 kfree(mp);
1485 spin_unlock_irqrestore(phba->host->host_lock, flags);
1487 for (i = 0; i < phba->sli.num_rings; i++)
1488 lpfc_sli_abort_iocb_ring(phba, &phba->sli.ring[i]);
1490 return 0;
1493 static int
1494 lpfc_sli_chipset_init(struct lpfc_hba *phba)
1496 uint32_t status, i = 0;
1498 /* Read the HBA Host Status Register */
1499 status = readl(phba->HSregaddr);
1501 /* Check status register to see what current state is */
1502 i = 0;
1503 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
1505 /* Check every 100ms for 5 retries, then every 500ms for 5, then
1506 * every 2.5 sec for 5, then reset board and every 2.5 sec for
1507 * 4.
1509 if (i++ >= 20) {
1510 /* Adapter failed to init, timeout, status reg
1511 <status> */
1512 lpfc_printf_log(phba,
1513 KERN_ERR,
1514 LOG_INIT,
1515 "%d:0436 Adapter failed to init, "
1516 "timeout, status reg x%x\n",
1517 phba->brd_no,
1518 status);
1519 phba->hba_state = LPFC_HBA_ERROR;
1520 return -ETIMEDOUT;
1523 /* Check to see if any errors occurred during init */
1524 if (status & HS_FFERM) {
1525 /* ERROR: During chipset initialization */
1526 /* Adapter failed to init, chipset, status reg
1527 <status> */
1528 lpfc_printf_log(phba,
1529 KERN_ERR,
1530 LOG_INIT,
1531 "%d:0437 Adapter failed to init, "
1532 "chipset, status reg x%x\n",
1533 phba->brd_no,
1534 status);
1535 phba->hba_state = LPFC_HBA_ERROR;
1536 return -EIO;
1539 if (i <= 5) {
1540 msleep(10);
1541 } else if (i <= 10) {
1542 msleep(500);
1543 } else {
1544 msleep(2500);
1547 if (i == 15) {
1548 lpfc_sli_brdreset(phba, 0);
1550 /* Read the HBA Host Status Register */
1551 status = readl(phba->HSregaddr);
1554 /* Check to see if any errors occurred during init */
1555 if (status & HS_FFERM) {
1556 /* ERROR: During chipset initialization */
1557 /* Adapter failed to init, chipset, status reg <status> */
1558 lpfc_printf_log(phba,
1559 KERN_ERR,
1560 LOG_INIT,
1561 "%d:0438 Adapter failed to init, chipset, "
1562 "status reg x%x\n",
1563 phba->brd_no,
1564 status);
1565 phba->hba_state = LPFC_HBA_ERROR;
1566 return -EIO;
1569 /* Clear all interrupt enable conditions */
1570 writel(0, phba->HCregaddr);
1571 readl(phba->HCregaddr); /* flush */
1573 /* setup host attn register */
1574 writel(0xffffffff, phba->HAregaddr);
1575 readl(phba->HAregaddr); /* flush */
1576 return 0;
1580 lpfc_sli_hba_setup(struct lpfc_hba * phba)
1582 LPFC_MBOXQ_t *pmb;
1583 uint32_t resetcount = 0, rc = 0, done = 0;
1585 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1586 if (!pmb) {
1587 phba->hba_state = LPFC_HBA_ERROR;
1588 return -ENOMEM;
1591 while (resetcount < 2 && !done) {
1592 phba->hba_state = 0;
1593 lpfc_sli_brdreset(phba, 0);
1594 msleep(2500);
1595 rc = lpfc_sli_chipset_init(phba);
1596 if (rc)
1597 break;
1599 resetcount++;
1601 /* Call pre CONFIG_PORT mailbox command initialization. A value of 0
1602 * means the call was successful. Any other nonzero value is a failure,
1603 * but if ERESTART is returned, the driver may reset the HBA and try
1604 * again.
1606 rc = lpfc_config_port_prep(phba);
1607 if (rc == -ERESTART) {
1608 phba->hba_state = 0;
1609 continue;
1610 } else if (rc) {
1611 break;
1614 phba->hba_state = LPFC_INIT_MBX_CMDS;
1615 lpfc_config_port(phba, pmb);
1616 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
1617 if (rc == MBX_SUCCESS)
1618 done = 1;
1619 else {
1620 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1621 "%d:0442 Adapter failed to init, mbxCmd x%x "
1622 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
1623 phba->brd_no, pmb->mb.mbxCommand,
1624 pmb->mb.mbxStatus, 0);
1625 phba->sli.sli_flag &= ~LPFC_SLI2_ACTIVE;
1628 if (!done)
1629 goto lpfc_sli_hba_setup_error;
1631 rc = lpfc_sli_ring_map(phba, pmb);
1633 if (rc)
1634 goto lpfc_sli_hba_setup_error;
1636 phba->sli.sli_flag |= LPFC_PROCESS_LA;
1638 rc = lpfc_config_port_post(phba);
1639 if (rc)
1640 goto lpfc_sli_hba_setup_error;
1642 goto lpfc_sli_hba_setup_exit;
1643 lpfc_sli_hba_setup_error:
1644 phba->hba_state = LPFC_HBA_ERROR;
1645 lpfc_sli_hba_setup_exit:
1646 mempool_free(pmb, phba->mbox_mem_pool);
1647 return rc;
1650 static void
1651 lpfc_mbox_abort(struct lpfc_hba * phba)
1653 LPFC_MBOXQ_t *pmbox;
1654 MAILBOX_t *mb;
1656 if (phba->sli.mbox_active) {
1657 del_timer_sync(&phba->sli.mbox_tmo);
1658 phba->work_hba_events &= ~WORKER_MBOX_TMO;
1659 pmbox = phba->sli.mbox_active;
1660 mb = &pmbox->mb;
1661 phba->sli.mbox_active = NULL;
1662 if (pmbox->mbox_cmpl) {
1663 mb->mbxStatus = MBX_NOT_FINISHED;
1664 (pmbox->mbox_cmpl) (phba, pmbox);
1666 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
1669 /* Abort all the non active mailbox commands. */
1670 spin_lock_irq(phba->host->host_lock);
1671 pmbox = lpfc_mbox_get(phba);
1672 while (pmbox) {
1673 mb = &pmbox->mb;
1674 if (pmbox->mbox_cmpl) {
1675 mb->mbxStatus = MBX_NOT_FINISHED;
1676 spin_unlock_irq(phba->host->host_lock);
1677 (pmbox->mbox_cmpl) (phba, pmbox);
1678 spin_lock_irq(phba->host->host_lock);
1680 pmbox = lpfc_mbox_get(phba);
1682 spin_unlock_irq(phba->host->host_lock);
1683 return;
1686 /*! lpfc_mbox_timeout
1688 * \pre
1689 * \post
1690 * \param hba Pointer to per struct lpfc_hba structure
1691 * \param l1 Pointer to the driver's mailbox queue.
1692 * \return
1693 * void
1695 * \b Description:
1697 * This routine handles mailbox timeout events at timer interrupt context.
1699 void
1700 lpfc_mbox_timeout(unsigned long ptr)
1702 struct lpfc_hba *phba;
1703 unsigned long iflag;
1705 phba = (struct lpfc_hba *)ptr;
1706 spin_lock_irqsave(phba->host->host_lock, iflag);
1707 if (!(phba->work_hba_events & WORKER_MBOX_TMO)) {
1708 phba->work_hba_events |= WORKER_MBOX_TMO;
1709 if (phba->work_wait)
1710 wake_up(phba->work_wait);
1712 spin_unlock_irqrestore(phba->host->host_lock, iflag);
1715 void
1716 lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
1718 LPFC_MBOXQ_t *pmbox;
1719 MAILBOX_t *mb;
1721 spin_lock_irq(phba->host->host_lock);
1722 if (!(phba->work_hba_events & WORKER_MBOX_TMO)) {
1723 spin_unlock_irq(phba->host->host_lock);
1724 return;
1727 phba->work_hba_events &= ~WORKER_MBOX_TMO;
1729 pmbox = phba->sli.mbox_active;
1730 mb = &pmbox->mb;
1732 /* Mbox cmd <mbxCommand> timeout */
1733 lpfc_printf_log(phba,
1734 KERN_ERR,
1735 LOG_MBOX | LOG_SLI,
1736 "%d:0310 Mailbox command x%x timeout Data: x%x x%x x%p\n",
1737 phba->brd_no,
1738 mb->mbxCommand,
1739 phba->hba_state,
1740 phba->sli.sli_flag,
1741 phba->sli.mbox_active);
1743 phba->sli.mbox_active = NULL;
1744 if (pmbox->mbox_cmpl) {
1745 mb->mbxStatus = MBX_NOT_FINISHED;
1746 spin_unlock_irq(phba->host->host_lock);
1747 (pmbox->mbox_cmpl) (phba, pmbox);
1748 spin_lock_irq(phba->host->host_lock);
1750 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
1752 spin_unlock_irq(phba->host->host_lock);
1753 lpfc_mbox_abort(phba);
1754 return;
1758 lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag)
1760 MAILBOX_t *mb;
1761 struct lpfc_sli *psli;
1762 uint32_t status, evtctr;
1763 uint32_t ha_copy;
1764 int i;
1765 unsigned long drvr_flag = 0;
1766 volatile uint32_t word0, ldata;
1767 void __iomem *to_slim;
1769 psli = &phba->sli;
1771 spin_lock_irqsave(phba->host->host_lock, drvr_flag);
1774 mb = &pmbox->mb;
1775 status = MBX_SUCCESS;
1777 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
1778 /* Polling for a mbox command when another one is already active
1779 * is not allowed in SLI. Also, the driver must have established
1780 * SLI2 mode to queue and process multiple mbox commands.
1783 if (flag & MBX_POLL) {
1784 spin_unlock_irqrestore(phba->host->host_lock,
1785 drvr_flag);
1787 /* Mbox command <mbxCommand> cannot issue */
1788 LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag)
1789 return (MBX_NOT_FINISHED);
1792 if (!(psli->sli_flag & LPFC_SLI2_ACTIVE)) {
1793 spin_unlock_irqrestore(phba->host->host_lock,
1794 drvr_flag);
1795 /* Mbox command <mbxCommand> cannot issue */
1796 LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag)
1797 return (MBX_NOT_FINISHED);
1800 /* Handle STOP IOCB processing flag. This is only meaningful
1801 * if we are not polling for mbox completion.
1803 if (flag & MBX_STOP_IOCB) {
1804 flag &= ~MBX_STOP_IOCB;
1805 /* Now flag each ring */
1806 for (i = 0; i < psli->num_rings; i++) {
1807 /* If the ring is active, flag it */
1808 if (psli->ring[i].cmdringaddr) {
1809 psli->ring[i].flag |=
1810 LPFC_STOP_IOCB_MBX;
1815 /* Another mailbox command is still being processed, queue this
1816 * command to be processed later.
1818 lpfc_mbox_put(phba, pmbox);
1820 /* Mbox cmd issue - BUSY */
1821 lpfc_printf_log(phba,
1822 KERN_INFO,
1823 LOG_MBOX | LOG_SLI,
1824 "%d:0308 Mbox cmd issue - BUSY Data: x%x x%x x%x x%x\n",
1825 phba->brd_no,
1826 mb->mbxCommand,
1827 phba->hba_state,
1828 psli->sli_flag,
1829 flag);
1831 psli->slistat.mbox_busy++;
1832 spin_unlock_irqrestore(phba->host->host_lock,
1833 drvr_flag);
1835 return (MBX_BUSY);
1838 /* Handle STOP IOCB processing flag. This is only meaningful
1839 * if we are not polling for mbox completion.
1841 if (flag & MBX_STOP_IOCB) {
1842 flag &= ~MBX_STOP_IOCB;
1843 if (flag == MBX_NOWAIT) {
1844 /* Now flag each ring */
1845 for (i = 0; i < psli->num_rings; i++) {
1846 /* If the ring is active, flag it */
1847 if (psli->ring[i].cmdringaddr) {
1848 psli->ring[i].flag |=
1849 LPFC_STOP_IOCB_MBX;
1855 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
1857 /* If we are not polling, we MUST be in SLI2 mode */
1858 if (flag != MBX_POLL) {
1859 if (!(psli->sli_flag & LPFC_SLI2_ACTIVE)) {
1860 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
1861 spin_unlock_irqrestore(phba->host->host_lock,
1862 drvr_flag);
1863 /* Mbox command <mbxCommand> cannot issue */
1864 LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag);
1865 return (MBX_NOT_FINISHED);
1867 /* timeout active mbox command */
1868 mod_timer(&psli->mbox_tmo, jiffies + HZ * LPFC_MBOX_TMO);
1871 /* Mailbox cmd <cmd> issue */
1872 lpfc_printf_log(phba,
1873 KERN_INFO,
1874 LOG_MBOX | LOG_SLI,
1875 "%d:0309 Mailbox cmd x%x issue Data: x%x x%x x%x\n",
1876 phba->brd_no,
1877 mb->mbxCommand,
1878 phba->hba_state,
1879 psli->sli_flag,
1880 flag);
1882 psli->slistat.mbox_cmd++;
1883 evtctr = psli->slistat.mbox_event;
1885 /* next set own bit for the adapter and copy over command word */
1886 mb->mbxOwner = OWN_CHIP;
1888 if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
1889 /* First copy command data to host SLIM area */
1890 lpfc_sli_pcimem_bcopy(mb, &phba->slim2p->mbx, MAILBOX_CMD_SIZE);
1891 } else {
1892 if (mb->mbxCommand == MBX_CONFIG_PORT) {
1893 /* copy command data into host mbox for cmpl */
1894 lpfc_sli_pcimem_bcopy(mb, &phba->slim2p->mbx,
1895 MAILBOX_CMD_SIZE);
1898 /* First copy mbox command data to HBA SLIM, skip past first
1899 word */
1900 to_slim = phba->MBslimaddr + sizeof (uint32_t);
1901 lpfc_memcpy_to_slim(to_slim, &mb->un.varWords[0],
1902 MAILBOX_CMD_SIZE - sizeof (uint32_t));
1904 /* Next copy over first word, with mbxOwner set */
1905 ldata = *((volatile uint32_t *)mb);
1906 to_slim = phba->MBslimaddr;
1907 writel(ldata, to_slim);
1908 readl(to_slim); /* flush */
1910 if (mb->mbxCommand == MBX_CONFIG_PORT) {
1911 /* switch over to host mailbox */
1912 psli->sli_flag |= LPFC_SLI2_ACTIVE;
1916 wmb();
1917 /* interrupt board to doit right away */
1918 writel(CA_MBATT, phba->CAregaddr);
1919 readl(phba->CAregaddr); /* flush */
1921 switch (flag) {
1922 case MBX_NOWAIT:
1923 /* Don't wait for it to finish, just return */
1924 psli->mbox_active = pmbox;
1925 break;
1927 case MBX_POLL:
1928 i = 0;
1929 psli->mbox_active = NULL;
1930 if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
1931 /* First read mbox status word */
1932 word0 = *((volatile uint32_t *)&phba->slim2p->mbx);
1933 word0 = le32_to_cpu(word0);
1934 } else {
1935 /* First read mbox status word */
1936 word0 = readl(phba->MBslimaddr);
1939 /* Read the HBA Host Attention Register */
1940 ha_copy = readl(phba->HAregaddr);
1942 /* Wait for command to complete */
1943 while (((word0 & OWN_CHIP) == OWN_CHIP)
1944 || !(ha_copy & HA_MBATT)) {
1945 if (i++ >= 100) {
1946 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
1947 spin_unlock_irqrestore(phba->host->host_lock,
1948 drvr_flag);
1949 return (MBX_NOT_FINISHED);
1952 /* Check if we took a mbox interrupt while we were
1953 polling */
1954 if (((word0 & OWN_CHIP) != OWN_CHIP)
1955 && (evtctr != psli->slistat.mbox_event))
1956 break;
1958 spin_unlock_irqrestore(phba->host->host_lock,
1959 drvr_flag);
1961 /* Can be in interrupt context, do not sleep */
1962 /* (or might be called with interrupts disabled) */
1963 mdelay(i);
1965 spin_lock_irqsave(phba->host->host_lock, drvr_flag);
1967 if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
1968 /* First copy command data */
1969 word0 = *((volatile uint32_t *)
1970 &phba->slim2p->mbx);
1971 word0 = le32_to_cpu(word0);
1972 if (mb->mbxCommand == MBX_CONFIG_PORT) {
1973 MAILBOX_t *slimmb;
1974 volatile uint32_t slimword0;
1975 /* Check real SLIM for any errors */
1976 slimword0 = readl(phba->MBslimaddr);
1977 slimmb = (MAILBOX_t *) & slimword0;
1978 if (((slimword0 & OWN_CHIP) != OWN_CHIP)
1979 && slimmb->mbxStatus) {
1980 psli->sli_flag &=
1981 ~LPFC_SLI2_ACTIVE;
1982 word0 = slimword0;
1985 } else {
1986 /* First copy command data */
1987 word0 = readl(phba->MBslimaddr);
1989 /* Read the HBA Host Attention Register */
1990 ha_copy = readl(phba->HAregaddr);
1993 if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
1994 /* copy results back to user */
1995 lpfc_sli_pcimem_bcopy(&phba->slim2p->mbx, mb,
1996 MAILBOX_CMD_SIZE);
1997 } else {
1998 /* First copy command data */
1999 lpfc_memcpy_from_slim(mb, phba->MBslimaddr,
2000 MAILBOX_CMD_SIZE);
2001 if ((mb->mbxCommand == MBX_DUMP_MEMORY) &&
2002 pmbox->context2) {
2003 lpfc_memcpy_from_slim((void *)pmbox->context2,
2004 phba->MBslimaddr + DMP_RSP_OFFSET,
2005 mb->un.varDmp.word_cnt);
2009 writel(HA_MBATT, phba->HAregaddr);
2010 readl(phba->HAregaddr); /* flush */
2012 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2013 status = mb->mbxStatus;
2016 spin_unlock_irqrestore(phba->host->host_lock, drvr_flag);
2017 return (status);
2020 static int
2021 lpfc_sli_ringtx_put(struct lpfc_hba * phba, struct lpfc_sli_ring * pring,
2022 struct lpfc_iocbq * piocb)
2024 /* Insert the caller's iocb in the txq tail for later processing. */
2025 list_add_tail(&piocb->list, &pring->txq);
2026 pring->txq_cnt++;
2027 return (0);
2030 static struct lpfc_iocbq *
2031 lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2032 struct lpfc_iocbq ** piocb)
2034 struct lpfc_iocbq * nextiocb;
2036 nextiocb = lpfc_sli_ringtx_get(phba, pring);
2037 if (!nextiocb) {
2038 nextiocb = *piocb;
2039 *piocb = NULL;
2042 return nextiocb;
2046 lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2047 struct lpfc_iocbq *piocb, uint32_t flag)
2049 struct lpfc_iocbq *nextiocb;
2050 IOCB_t *iocb;
2053 * We should never get an IOCB if we are in a < LINK_DOWN state
2055 if (unlikely(phba->hba_state < LPFC_LINK_DOWN))
2056 return IOCB_ERROR;
2059 * Check to see if we are blocking IOCB processing because of a
2060 * outstanding mbox command.
2062 if (unlikely(pring->flag & LPFC_STOP_IOCB_MBX))
2063 goto iocb_busy;
2065 if (unlikely(phba->hba_state == LPFC_LINK_DOWN)) {
2067 * Only CREATE_XRI, CLOSE_XRI, ABORT_XRI, and QUE_RING_BUF
2068 * can be issued if the link is not up.
2070 switch (piocb->iocb.ulpCommand) {
2071 case CMD_QUE_RING_BUF_CN:
2072 case CMD_QUE_RING_BUF64_CN:
2073 case CMD_CLOSE_XRI_CN:
2074 case CMD_ABORT_XRI_CN:
2076 * For IOCBs, like QUE_RING_BUF, that have no rsp ring
2077 * completion, iocb_cmpl MUST be 0.
2079 if (piocb->iocb_cmpl)
2080 piocb->iocb_cmpl = NULL;
2081 /*FALLTHROUGH*/
2082 case CMD_CREATE_XRI_CR:
2083 break;
2084 default:
2085 goto iocb_busy;
2089 * For FCP commands, we must be in a state where we can process link
2090 * attention events.
2092 } else if (unlikely(pring->ringno == phba->sli.fcp_ring &&
2093 !(phba->sli.sli_flag & LPFC_PROCESS_LA)))
2094 goto iocb_busy;
2097 * Check to see if this is a high priority command.
2098 * If so bypass tx queue processing.
2100 if (unlikely((flag & SLI_IOCB_HIGH_PRIORITY) &&
2101 (iocb = lpfc_sli_next_iocb_slot(phba, pring)))) {
2102 lpfc_sli_submit_iocb(phba, pring, iocb, piocb);
2103 piocb = NULL;
2106 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
2107 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
2108 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
2110 if (iocb)
2111 lpfc_sli_update_ring(phba, pring);
2112 else
2113 lpfc_sli_update_full_ring(phba, pring);
2115 if (!piocb)
2116 return IOCB_SUCCESS;
2118 goto out_busy;
2120 iocb_busy:
2121 pring->stats.iocb_cmd_delay++;
2123 out_busy:
2125 if (!(flag & SLI_IOCB_RET_IOCB)) {
2126 lpfc_sli_ringtx_put(phba, pring, piocb);
2127 return IOCB_SUCCESS;
2130 return IOCB_BUSY;
2134 lpfc_sli_setup(struct lpfc_hba *phba)
2136 int i, totiocb = 0;
2137 struct lpfc_sli *psli = &phba->sli;
2138 struct lpfc_sli_ring *pring;
2140 psli->num_rings = MAX_CONFIGURED_RINGS;
2141 psli->sli_flag = 0;
2142 psli->fcp_ring = LPFC_FCP_RING;
2143 psli->next_ring = LPFC_FCP_NEXT_RING;
2144 psli->ip_ring = LPFC_IP_RING;
2146 for (i = 0; i < psli->num_rings; i++) {
2147 pring = &psli->ring[i];
2148 switch (i) {
2149 case LPFC_FCP_RING: /* ring 0 - FCP */
2150 /* numCiocb and numRiocb are used in config_port */
2151 pring->numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
2152 pring->numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
2153 pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
2154 pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
2155 pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
2156 pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
2157 pring->iotag_ctr = 0;
2158 pring->iotag_max =
2159 (phba->cfg_hba_queue_depth * 2);
2160 pring->fast_iotag = pring->iotag_max;
2161 pring->num_mask = 0;
2162 break;
2163 case LPFC_IP_RING: /* ring 1 - IP */
2164 /* numCiocb and numRiocb are used in config_port */
2165 pring->numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
2166 pring->numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
2167 pring->num_mask = 0;
2168 break;
2169 case LPFC_ELS_RING: /* ring 2 - ELS / CT */
2170 /* numCiocb and numRiocb are used in config_port */
2171 pring->numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
2172 pring->numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
2173 pring->fast_iotag = 0;
2174 pring->iotag_ctr = 0;
2175 pring->iotag_max = 4096;
2176 pring->num_mask = 4;
2177 pring->prt[0].profile = 0; /* Mask 0 */
2178 pring->prt[0].rctl = FC_ELS_REQ;
2179 pring->prt[0].type = FC_ELS_DATA;
2180 pring->prt[0].lpfc_sli_rcv_unsol_event =
2181 lpfc_els_unsol_event;
2182 pring->prt[1].profile = 0; /* Mask 1 */
2183 pring->prt[1].rctl = FC_ELS_RSP;
2184 pring->prt[1].type = FC_ELS_DATA;
2185 pring->prt[1].lpfc_sli_rcv_unsol_event =
2186 lpfc_els_unsol_event;
2187 pring->prt[2].profile = 0; /* Mask 2 */
2188 /* NameServer Inquiry */
2189 pring->prt[2].rctl = FC_UNSOL_CTL;
2190 /* NameServer */
2191 pring->prt[2].type = FC_COMMON_TRANSPORT_ULP;
2192 pring->prt[2].lpfc_sli_rcv_unsol_event =
2193 lpfc_ct_unsol_event;
2194 pring->prt[3].profile = 0; /* Mask 3 */
2195 /* NameServer response */
2196 pring->prt[3].rctl = FC_SOL_CTL;
2197 /* NameServer */
2198 pring->prt[3].type = FC_COMMON_TRANSPORT_ULP;
2199 pring->prt[3].lpfc_sli_rcv_unsol_event =
2200 lpfc_ct_unsol_event;
2201 break;
2203 totiocb += (pring->numCiocb + pring->numRiocb);
2205 if (totiocb > MAX_SLI2_IOCB) {
2206 /* Too many cmd / rsp ring entries in SLI2 SLIM */
2207 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2208 "%d:0462 Too many cmd / rsp ring entries in "
2209 "SLI2 SLIM Data: x%x x%x\n",
2210 phba->brd_no, totiocb, MAX_SLI2_IOCB);
2213 return 0;
2217 lpfc_sli_queue_setup(struct lpfc_hba * phba)
2219 struct lpfc_sli *psli;
2220 struct lpfc_sli_ring *pring;
2221 int i, cnt;
2223 psli = &phba->sli;
2224 spin_lock_irq(phba->host->host_lock);
2225 INIT_LIST_HEAD(&psli->mboxq);
2226 /* Initialize list headers for txq and txcmplq as double linked lists */
2227 for (i = 0; i < psli->num_rings; i++) {
2228 pring = &psli->ring[i];
2229 pring->ringno = i;
2230 pring->next_cmdidx = 0;
2231 pring->local_getidx = 0;
2232 pring->cmdidx = 0;
2233 INIT_LIST_HEAD(&pring->txq);
2234 INIT_LIST_HEAD(&pring->txcmplq);
2235 INIT_LIST_HEAD(&pring->iocb_continueq);
2236 INIT_LIST_HEAD(&pring->postbufq);
2237 cnt = pring->fast_iotag;
2238 spin_unlock_irq(phba->host->host_lock);
2239 if (cnt) {
2240 pring->fast_lookup =
2241 kmalloc(cnt * sizeof (struct lpfc_iocbq *),
2242 GFP_KERNEL);
2243 if (pring->fast_lookup == 0) {
2244 return (0);
2246 memset((char *)pring->fast_lookup, 0,
2247 cnt * sizeof (struct lpfc_iocbq *));
2249 spin_lock_irq(phba->host->host_lock);
2251 spin_unlock_irq(phba->host->host_lock);
2252 return (1);
2256 lpfc_sli_hba_down(struct lpfc_hba * phba)
2258 struct lpfc_sli *psli;
2259 struct lpfc_sli_ring *pring;
2260 LPFC_MBOXQ_t *pmb;
2261 struct lpfc_iocbq *iocb, *next_iocb;
2262 IOCB_t *icmd = NULL;
2263 int i;
2264 unsigned long flags = 0;
2266 psli = &phba->sli;
2267 lpfc_hba_down_prep(phba);
2269 spin_lock_irqsave(phba->host->host_lock, flags);
2271 for (i = 0; i < psli->num_rings; i++) {
2272 pring = &psli->ring[i];
2273 pring->flag |= LPFC_DEFERRED_RING_EVENT;
2276 * Error everything on the txq since these iocbs have not been
2277 * given to the FW yet.
2279 pring->txq_cnt = 0;
2281 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
2282 list_del_init(&iocb->list);
2283 if (iocb->iocb_cmpl) {
2284 icmd = &iocb->iocb;
2285 icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
2286 icmd->un.ulpWord[4] = IOERR_SLI_DOWN;
2287 spin_unlock_irqrestore(phba->host->host_lock,
2288 flags);
2289 (iocb->iocb_cmpl) (phba, iocb, iocb);
2290 spin_lock_irqsave(phba->host->host_lock, flags);
2291 } else {
2292 list_add_tail(&iocb->list,
2293 &phba->lpfc_iocb_list);
2297 INIT_LIST_HEAD(&(pring->txq));
2299 if (pring->fast_lookup) {
2300 kfree(pring->fast_lookup);
2301 pring->fast_lookup = NULL;
2306 spin_unlock_irqrestore(phba->host->host_lock, flags);
2308 /* Return any active mbox cmds */
2309 del_timer_sync(&psli->mbox_tmo);
2310 spin_lock_irqsave(phba->host->host_lock, flags);
2311 phba->work_hba_events &= ~WORKER_MBOX_TMO;
2312 if (psli->mbox_active) {
2313 pmb = psli->mbox_active;
2314 pmb->mb.mbxStatus = MBX_NOT_FINISHED;
2315 if (pmb->mbox_cmpl) {
2316 spin_unlock_irqrestore(phba->host->host_lock, flags);
2317 pmb->mbox_cmpl(phba,pmb);
2318 spin_lock_irqsave(phba->host->host_lock, flags);
2321 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2322 psli->mbox_active = NULL;
2324 /* Return any pending mbox cmds */
2325 while ((pmb = lpfc_mbox_get(phba)) != NULL) {
2326 pmb->mb.mbxStatus = MBX_NOT_FINISHED;
2327 if (pmb->mbox_cmpl) {
2328 spin_unlock_irqrestore(phba->host->host_lock, flags);
2329 pmb->mbox_cmpl(phba,pmb);
2330 spin_lock_irqsave(phba->host->host_lock, flags);
2334 INIT_LIST_HEAD(&psli->mboxq);
2336 spin_unlock_irqrestore(phba->host->host_lock, flags);
2339 * Provided the hba is not in an error state, reset it. It is not
2340 * capable of IO anymore.
2342 if (phba->hba_state != LPFC_HBA_ERROR) {
2343 phba->hba_state = LPFC_INIT_START;
2344 lpfc_sli_brdreset(phba, 1);
2347 return 1;
2350 void
2351 lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
2353 uint32_t *src = srcp;
2354 uint32_t *dest = destp;
2355 uint32_t ldata;
2356 int i;
2358 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
2359 ldata = *src;
2360 ldata = le32_to_cpu(ldata);
2361 *dest = ldata;
2362 src++;
2363 dest++;
2368 lpfc_sli_ringpostbuf_put(struct lpfc_hba * phba, struct lpfc_sli_ring * pring,
2369 struct lpfc_dmabuf * mp)
2371 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
2372 later */
2373 list_add_tail(&mp->list, &pring->postbufq);
2375 pring->postbufq_cnt++;
2376 return 0;
2380 struct lpfc_dmabuf *
2381 lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2382 dma_addr_t phys)
2384 struct lpfc_dmabuf *mp, *next_mp;
2385 struct list_head *slp = &pring->postbufq;
2387 /* Search postbufq, from the begining, looking for a match on phys */
2388 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
2389 if (mp->phys == phys) {
2390 list_del_init(&mp->list);
2391 pring->postbufq_cnt--;
2392 return mp;
2396 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2397 "%d:0410 Cannot find virtual addr for mapped buf on "
2398 "ring %d Data x%llx x%p x%p x%x\n",
2399 phba->brd_no, pring->ringno, (unsigned long long)phys,
2400 slp->next, slp->prev, pring->postbufq_cnt);
2401 return NULL;
2404 static void
2405 lpfc_sli_abort_elsreq_cmpl(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
2406 struct lpfc_iocbq * rspiocb)
2408 struct lpfc_dmabuf *buf_ptr, *buf_ptr1;
2409 /* Free the resources associated with the ELS_REQUEST64 IOCB the driver
2410 * just aborted.
2411 * In this case, context2 = cmd, context2->next = rsp, context3 = bpl
2413 if (cmdiocb->context2) {
2414 buf_ptr1 = (struct lpfc_dmabuf *) cmdiocb->context2;
2416 /* Free the response IOCB before completing the abort
2417 command. */
2418 buf_ptr = NULL;
2419 list_remove_head((&buf_ptr1->list), buf_ptr,
2420 struct lpfc_dmabuf, list);
2421 if (buf_ptr) {
2422 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
2423 kfree(buf_ptr);
2425 lpfc_mbuf_free(phba, buf_ptr1->virt, buf_ptr1->phys);
2426 kfree(buf_ptr1);
2429 if (cmdiocb->context3) {
2430 buf_ptr = (struct lpfc_dmabuf *) cmdiocb->context3;
2431 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
2432 kfree(buf_ptr);
2435 list_add_tail(&cmdiocb->list, &phba->lpfc_iocb_list);
2436 return;
2440 lpfc_sli_issue_abort_iotag32(struct lpfc_hba * phba,
2441 struct lpfc_sli_ring * pring,
2442 struct lpfc_iocbq * cmdiocb)
2444 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
2445 struct lpfc_iocbq *abtsiocbp = NULL;
2446 IOCB_t *icmd = NULL;
2447 IOCB_t *iabt = NULL;
2449 /* issue ABTS for this IOCB based on iotag */
2450 list_remove_head(lpfc_iocb_list, abtsiocbp, struct lpfc_iocbq, list);
2451 if (abtsiocbp == NULL)
2452 return 0;
2453 memset(abtsiocbp, 0, sizeof (struct lpfc_iocbq));
2455 iabt = &abtsiocbp->iocb;
2456 icmd = &cmdiocb->iocb;
2457 switch (icmd->ulpCommand) {
2458 case CMD_ELS_REQUEST64_CR:
2459 /* Even though we abort the ELS command, the firmware may access
2460 * the BPL or other resources before it processes our
2461 * ABORT_MXRI64. Thus we must delay reusing the cmdiocb
2462 * resources till the actual abort request completes.
2464 abtsiocbp->context1 = (void *)((unsigned long)icmd->ulpCommand);
2465 abtsiocbp->context2 = cmdiocb->context2;
2466 abtsiocbp->context3 = cmdiocb->context3;
2467 cmdiocb->context2 = NULL;
2468 cmdiocb->context3 = NULL;
2469 abtsiocbp->iocb_cmpl = lpfc_sli_abort_elsreq_cmpl;
2470 break;
2471 default:
2472 list_add_tail(&abtsiocbp->list, lpfc_iocb_list);
2473 return 0;
2476 iabt->un.amxri.abortType = ABORT_TYPE_ABTS;
2477 iabt->un.amxri.iotag32 = icmd->un.elsreq64.bdl.ulpIoTag32;
2479 iabt->ulpLe = 1;
2480 iabt->ulpClass = CLASS3;
2481 iabt->ulpCommand = CMD_ABORT_MXRI64_CN;
2483 if (lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0) == IOCB_ERROR) {
2484 list_add_tail(&abtsiocbp->list, lpfc_iocb_list);
2485 return 0;
2488 return 1;
2491 static int
2492 lpfc_sli_validate_iocb_cmd(struct lpfc_scsi_buf *lpfc_cmd, uint16_t tgt_id,
2493 uint64_t lun_id, struct lpfc_iocbq *iocb,
2494 uint32_t ctx, lpfc_ctx_cmd ctx_cmd)
2496 int rc = 1;
2498 if (lpfc_cmd == NULL)
2499 return rc;
2501 switch (ctx_cmd) {
2502 case LPFC_CTX_LUN:
2503 if ((lpfc_cmd->pCmd->device->id == tgt_id) &&
2504 (lpfc_cmd->pCmd->device->lun == lun_id))
2505 rc = 0;
2506 break;
2507 case LPFC_CTX_TGT:
2508 if (lpfc_cmd->pCmd->device->id == tgt_id)
2509 rc = 0;
2510 break;
2511 case LPFC_CTX_CTX:
2512 if (iocb->iocb.ulpContext == ctx)
2513 rc = 0;
2514 case LPFC_CTX_HOST:
2515 rc = 0;
2516 break;
2517 default:
2518 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
2519 __FUNCTION__, ctx_cmd);
2520 break;
2523 return rc;
2527 lpfc_sli_sum_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2528 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd ctx_cmd)
2530 struct lpfc_iocbq *iocb, *next_iocb;
2531 IOCB_t *cmd = NULL;
2532 struct lpfc_scsi_buf *lpfc_cmd;
2533 int sum = 0, ret_val = 0;
2535 /* Next check the txcmplq */
2536 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
2537 cmd = &iocb->iocb;
2539 /* Must be a FCP command */
2540 if ((cmd->ulpCommand != CMD_FCP_ICMND64_CR) &&
2541 (cmd->ulpCommand != CMD_FCP_IWRITE64_CR) &&
2542 (cmd->ulpCommand != CMD_FCP_IREAD64_CR)) {
2543 continue;
2546 /* context1 MUST be a struct lpfc_scsi_buf */
2547 lpfc_cmd = (struct lpfc_scsi_buf *) (iocb->context1);
2548 ret_val = lpfc_sli_validate_iocb_cmd(lpfc_cmd, tgt_id, lun_id,
2549 NULL, 0, ctx_cmd);
2550 if (ret_val != 0)
2551 continue;
2552 sum++;
2554 return sum;
2558 lpfc_sli_abort_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2559 uint16_t tgt_id, uint64_t lun_id, uint32_t ctx,
2560 lpfc_ctx_cmd abort_cmd)
2562 struct lpfc_iocbq *iocb, *next_iocb;
2563 struct lpfc_iocbq *abtsiocb = NULL;
2564 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
2565 IOCB_t *cmd = NULL;
2566 struct lpfc_scsi_buf *lpfc_cmd;
2567 int errcnt = 0, ret_val = 0;
2569 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
2570 cmd = &iocb->iocb;
2572 /* Must be a FCP command */
2573 if ((cmd->ulpCommand != CMD_FCP_ICMND64_CR) &&
2574 (cmd->ulpCommand != CMD_FCP_IWRITE64_CR) &&
2575 (cmd->ulpCommand != CMD_FCP_IREAD64_CR)) {
2576 continue;
2579 /* context1 MUST be a struct lpfc_scsi_buf */
2580 lpfc_cmd = (struct lpfc_scsi_buf *) (iocb->context1);
2581 ret_val = lpfc_sli_validate_iocb_cmd(lpfc_cmd, tgt_id, lun_id,
2582 iocb, ctx, abort_cmd);
2583 if (ret_val != 0)
2584 continue;
2586 /* issue ABTS for this IOCB based on iotag */
2587 list_remove_head(lpfc_iocb_list, abtsiocb, struct lpfc_iocbq,
2588 list);
2589 if (abtsiocb == NULL) {
2590 errcnt++;
2591 continue;
2593 memset(abtsiocb, 0, sizeof (struct lpfc_iocbq));
2595 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
2596 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
2597 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
2598 abtsiocb->iocb.ulpLe = 1;
2599 abtsiocb->iocb.ulpClass = cmd->ulpClass;
2601 if (phba->hba_state >= LPFC_LINK_UP)
2602 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
2603 else
2604 abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
2606 ret_val = lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0);
2607 if (ret_val == IOCB_ERROR) {
2608 list_add_tail(&abtsiocb->list, lpfc_iocb_list);
2609 errcnt++;
2610 continue;
2614 return errcnt;
2617 void
2618 lpfc_sli_wake_iocb_high_priority(struct lpfc_hba * phba,
2619 struct lpfc_iocbq * queue1,
2620 struct lpfc_iocbq * queue2)
2622 if (queue1->context2 && queue2)
2623 memcpy(queue1->context2, queue2, sizeof (struct lpfc_iocbq));
2625 /* The waiter is looking for LPFC_IO_HIPRI bit to be set
2626 as a signal to wake up */
2627 queue1->iocb_flag |= LPFC_IO_HIPRI;
2628 return;
2632 lpfc_sli_issue_iocb_wait_high_priority(struct lpfc_hba * phba,
2633 struct lpfc_sli_ring * pring,
2634 struct lpfc_iocbq * piocb,
2635 uint32_t flag,
2636 struct lpfc_iocbq * prspiocbq,
2637 uint32_t timeout)
2639 int j, delay_time, retval = IOCB_ERROR;
2641 /* The caller must left context1 empty. */
2642 if (piocb->context_un.hipri_wait_queue != 0) {
2643 return IOCB_ERROR;
2647 * If the caller has provided a response iocbq buffer, context2 must
2648 * be NULL or its an error.
2650 if (prspiocbq && piocb->context2) {
2651 return IOCB_ERROR;
2654 piocb->context2 = prspiocbq;
2656 /* Setup callback routine and issue the command. */
2657 piocb->iocb_cmpl = lpfc_sli_wake_iocb_high_priority;
2658 retval = lpfc_sli_issue_iocb(phba, pring, piocb,
2659 flag | SLI_IOCB_HIGH_PRIORITY);
2660 if (retval != IOCB_SUCCESS) {
2661 piocb->context2 = NULL;
2662 return IOCB_ERROR;
2666 * This high-priority iocb was sent out-of-band. Poll for its
2667 * completion rather than wait for a signal. Note that the host_lock
2668 * is held by the midlayer and must be released here to allow the
2669 * interrupt handlers to complete the IO and signal this routine via
2670 * the iocb_flag.
2671 * Also, the delay_time is computed to be one second longer than
2672 * the scsi command timeout to give the FW time to abort on
2673 * timeout rather than the driver just giving up. Typically,
2674 * the midlayer does not specify a time for this command so the
2675 * driver is free to enforce its own timeout.
2678 delay_time = ((timeout + 1) * 1000) >> 6;
2679 retval = IOCB_ERROR;
2680 spin_unlock_irq(phba->host->host_lock);
2681 for (j = 0; j < 64; j++) {
2682 msleep(delay_time);
2683 if (piocb->iocb_flag & LPFC_IO_HIPRI) {
2684 piocb->iocb_flag &= ~LPFC_IO_HIPRI;
2685 retval = IOCB_SUCCESS;
2686 break;
2690 spin_lock_irq(phba->host->host_lock);
2691 piocb->context2 = NULL;
2692 return retval;
2695 lpfc_sli_issue_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq,
2696 uint32_t timeout)
2698 DECLARE_WAIT_QUEUE_HEAD(done_q);
2699 DECLARE_WAITQUEUE(wq_entry, current);
2700 uint32_t timeleft = 0;
2701 int retval;
2703 /* The caller must leave context1 empty. */
2704 if (pmboxq->context1 != 0) {
2705 return (MBX_NOT_FINISHED);
2708 /* setup wake call as IOCB callback */
2709 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
2710 /* setup context field to pass wait_queue pointer to wake function */
2711 pmboxq->context1 = &done_q;
2713 /* start to sleep before we wait, to avoid races */
2714 set_current_state(TASK_INTERRUPTIBLE);
2715 add_wait_queue(&done_q, &wq_entry);
2717 /* now issue the command */
2718 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
2720 if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
2721 timeleft = schedule_timeout(timeout * HZ);
2722 pmboxq->context1 = NULL;
2723 /* if schedule_timeout returns 0, we timed out and were not
2724 woken up */
2725 if (timeleft == 0) {
2726 retval = MBX_TIMEOUT;
2727 } else {
2728 retval = MBX_SUCCESS;
2733 set_current_state(TASK_RUNNING);
2734 remove_wait_queue(&done_q, &wq_entry);
2735 return retval;
2738 irqreturn_t
2739 lpfc_intr_handler(int irq, void *dev_id, struct pt_regs * regs)
2741 struct lpfc_hba *phba;
2742 uint32_t ha_copy;
2743 uint32_t work_ha_copy;
2744 unsigned long status;
2745 int i;
2746 uint32_t control;
2749 * Get the driver's phba structure from the dev_id and
2750 * assume the HBA is not interrupting.
2752 phba = (struct lpfc_hba *) dev_id;
2754 if (unlikely(!phba))
2755 return IRQ_NONE;
2757 phba->sli.slistat.sli_intr++;
2760 * Call the HBA to see if it is interrupting. If not, don't claim
2761 * the interrupt
2764 /* Ignore all interrupts during initialization. */
2765 if (unlikely(phba->hba_state < LPFC_LINK_DOWN))
2766 return IRQ_NONE;
2769 * Read host attention register to determine interrupt source
2770 * Clear Attention Sources, except Error Attention (to
2771 * preserve status) and Link Attention
2773 spin_lock(phba->host->host_lock);
2774 ha_copy = readl(phba->HAregaddr);
2775 writel((ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
2776 readl(phba->HAregaddr); /* flush */
2777 spin_unlock(phba->host->host_lock);
2779 if (unlikely(!ha_copy))
2780 return IRQ_NONE;
2782 work_ha_copy = ha_copy & phba->work_ha_mask;
2784 if (unlikely(work_ha_copy)) {
2785 if (work_ha_copy & HA_LATT) {
2786 if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
2788 * Turn off Link Attention interrupts
2789 * until CLEAR_LA done
2791 spin_lock(phba->host->host_lock);
2792 phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
2793 control = readl(phba->HCregaddr);
2794 control &= ~HC_LAINT_ENA;
2795 writel(control, phba->HCregaddr);
2796 readl(phba->HCregaddr); /* flush */
2797 spin_unlock(phba->host->host_lock);
2799 else
2800 work_ha_copy &= ~HA_LATT;
2803 if (work_ha_copy & ~(HA_ERATT|HA_MBATT|HA_LATT)) {
2804 for (i = 0; i < phba->sli.num_rings; i++) {
2805 if (work_ha_copy & (HA_RXATT << (4*i))) {
2807 * Turn off Slow Rings interrupts
2809 spin_lock(phba->host->host_lock);
2810 control = readl(phba->HCregaddr);
2811 control &= ~(HC_R0INT_ENA << i);
2812 writel(control, phba->HCregaddr);
2813 readl(phba->HCregaddr); /* flush */
2814 spin_unlock(phba->host->host_lock);
2819 if (work_ha_copy & HA_ERATT) {
2820 phba->hba_state = LPFC_HBA_ERROR;
2822 * There was a link/board error. Read the
2823 * status register to retrieve the error event
2824 * and process it.
2826 phba->sli.slistat.err_attn_event++;
2827 /* Save status info */
2828 phba->work_hs = readl(phba->HSregaddr);
2829 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
2830 phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
2832 /* Clear Chip error bit */
2833 writel(HA_ERATT, phba->HAregaddr);
2834 readl(phba->HAregaddr); /* flush */
2837 * Reseting the HBA is the only reliable way
2838 * to shutdown interrupt when there is a
2839 * ERROR.
2841 lpfc_sli_send_reset(phba, phba->hba_state);
2844 spin_lock(phba->host->host_lock);
2845 phba->work_ha |= work_ha_copy;
2846 if (phba->work_wait)
2847 wake_up(phba->work_wait);
2848 spin_unlock(phba->host->host_lock);
2851 ha_copy &= ~(phba->work_ha_mask);
2854 * Process all events on FCP ring. Take the optimized path for
2855 * FCP IO. Any other IO is slow path and is handled by
2856 * the worker thread.
2858 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
2859 status >>= (4*LPFC_FCP_RING);
2860 if (status & HA_RXATT)
2861 lpfc_sli_handle_fast_ring_event(phba,
2862 &phba->sli.ring[LPFC_FCP_RING],
2863 status);
2864 return IRQ_HANDLED;
2866 } /* lpfc_intr_handler */