1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2007 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
22 #include <linux/blkdev.h>
23 #include <linux/pci.h>
24 #include <linux/kthread.h>
25 #include <linux/interrupt.h>
27 #include <scsi/scsi.h>
28 #include <scsi/scsi_device.h>
29 #include <scsi/scsi_host.h>
30 #include <scsi/scsi_transport_fc.h>
33 #include "lpfc_disc.h"
35 #include "lpfc_scsi.h"
37 #include "lpfc_logmsg.h"
38 #include "lpfc_crtn.h"
40 /* AlpaArray for assignment of scsid for scan-down and bind_method */
41 static uint8_t lpfcAlpaArray
[] = {
42 0xEF, 0xE8, 0xE4, 0xE2, 0xE1, 0xE0, 0xDC, 0xDA, 0xD9, 0xD6,
43 0xD5, 0xD4, 0xD3, 0xD2, 0xD1, 0xCE, 0xCD, 0xCC, 0xCB, 0xCA,
44 0xC9, 0xC7, 0xC6, 0xC5, 0xC3, 0xBC, 0xBA, 0xB9, 0xB6, 0xB5,
45 0xB4, 0xB3, 0xB2, 0xB1, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9,
46 0xA7, 0xA6, 0xA5, 0xA3, 0x9F, 0x9E, 0x9D, 0x9B, 0x98, 0x97,
47 0x90, 0x8F, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7C, 0x7A, 0x79,
48 0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6E, 0x6D, 0x6C, 0x6B,
49 0x6A, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5C, 0x5A, 0x59, 0x56,
50 0x55, 0x54, 0x53, 0x52, 0x51, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A,
51 0x49, 0x47, 0x46, 0x45, 0x43, 0x3C, 0x3A, 0x39, 0x36, 0x35,
52 0x34, 0x33, 0x32, 0x31, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29,
53 0x27, 0x26, 0x25, 0x23, 0x1F, 0x1E, 0x1D, 0x1B, 0x18, 0x17,
54 0x10, 0x0F, 0x08, 0x04, 0x02, 0x01
57 static void lpfc_disc_timeout_handler(struct lpfc_hba
*);
60 lpfc_terminate_rport_io(struct fc_rport
*rport
)
62 struct lpfc_rport_data
*rdata
;
63 struct lpfc_nodelist
* ndlp
;
64 struct lpfc_hba
*phba
;
66 rdata
= rport
->dd_data
;
70 if (rport
->roles
& FC_RPORT_ROLE_FCP_TARGET
)
71 printk(KERN_ERR
"Cannot find remote node"
72 " to terminate I/O Data x%x\n",
77 phba
= ndlp
->nlp_phba
;
79 spin_lock_irq(phba
->host
->host_lock
);
80 if (ndlp
->nlp_sid
!= NLP_NO_SID
) {
81 lpfc_sli_abort_iocb(phba
, &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
82 ndlp
->nlp_sid
, 0, 0, LPFC_CTX_TGT
);
84 spin_unlock_irq(phba
->host
->host_lock
);
90 * This function will be called when dev_loss_tmo fire.
93 lpfc_dev_loss_tmo_callbk(struct fc_rport
*rport
)
95 struct lpfc_rport_data
*rdata
;
96 struct lpfc_nodelist
* ndlp
;
99 struct lpfc_hba
*phba
;
101 rdata
= rport
->dd_data
;
105 if (rport
->roles
& FC_RPORT_ROLE_FCP_TARGET
)
106 printk(KERN_ERR
"Cannot find remote node"
107 " for rport in dev_loss_tmo_callbk x%x\n",
112 if (ndlp
->nlp_state
== NLP_STE_MAPPED_NODE
)
115 name
= (uint8_t *)&ndlp
->nlp_portname
;
116 phba
= ndlp
->nlp_phba
;
118 spin_lock_irq(phba
->host
->host_lock
);
120 if (ndlp
->nlp_sid
!= NLP_NO_SID
) {
122 /* flush the target */
123 lpfc_sli_abort_iocb(phba
, &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
124 ndlp
->nlp_sid
, 0, 0, LPFC_CTX_TGT
);
126 if (phba
->fc_flag
& FC_UNLOADING
)
129 spin_unlock_irq(phba
->host
->host_lock
);
132 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
,
133 "%d:0203 Devloss timeout on "
134 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
135 "NPort x%x Data: x%x x%x x%x\n",
137 *name
, *(name
+1), *(name
+2), *(name
+3),
138 *(name
+4), *(name
+5), *(name
+6), *(name
+7),
139 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
140 ndlp
->nlp_state
, ndlp
->nlp_rpi
);
142 lpfc_printf_log(phba
, KERN_INFO
, LOG_DISCOVERY
,
143 "%d:0204 Devloss timeout on "
144 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
145 "NPort x%x Data: x%x x%x x%x\n",
147 *name
, *(name
+1), *(name
+2), *(name
+3),
148 *(name
+4), *(name
+5), *(name
+6), *(name
+7),
149 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
150 ndlp
->nlp_state
, ndlp
->nlp_rpi
);
153 if (!(phba
->fc_flag
& FC_UNLOADING
) &&
154 !(ndlp
->nlp_flag
& NLP_DELAY_TMO
) &&
155 !(ndlp
->nlp_flag
& NLP_NPR_2B_DISC
) &&
156 (ndlp
->nlp_state
!= NLP_STE_UNMAPPED_NODE
))
157 lpfc_disc_state_machine(phba
, ndlp
, NULL
, NLP_EVT_DEVICE_RM
);
162 put_device(&rport
->dev
);
169 lpfc_work_list_done(struct lpfc_hba
* phba
)
171 struct lpfc_work_evt
*evtp
= NULL
;
172 struct lpfc_nodelist
*ndlp
;
175 spin_lock_irq(phba
->host
->host_lock
);
176 while(!list_empty(&phba
->work_list
)) {
177 list_remove_head((&phba
->work_list
), evtp
, typeof(*evtp
),
179 spin_unlock_irq(phba
->host
->host_lock
);
182 case LPFC_EVT_ELS_RETRY
:
183 ndlp
= (struct lpfc_nodelist
*)(evtp
->evt_arg1
);
184 lpfc_els_retry_delay_handler(ndlp
);
187 case LPFC_EVT_ONLINE
:
188 if (phba
->hba_state
< LPFC_LINK_DOWN
)
189 *(int *)(evtp
->evt_arg1
) = lpfc_online(phba
);
191 *(int *)(evtp
->evt_arg1
) = 0;
192 complete((struct completion
*)(evtp
->evt_arg2
));
194 case LPFC_EVT_OFFLINE_PREP
:
195 if (phba
->hba_state
>= LPFC_LINK_DOWN
)
196 lpfc_offline_prep(phba
);
197 *(int *)(evtp
->evt_arg1
) = 0;
198 complete((struct completion
*)(evtp
->evt_arg2
));
200 case LPFC_EVT_OFFLINE
:
202 lpfc_sli_brdrestart(phba
);
203 *(int *)(evtp
->evt_arg1
) =
204 lpfc_sli_brdready(phba
, HS_FFRDY
| HS_MBRDY
);
205 lpfc_unblock_mgmt_io(phba
);
206 complete((struct completion
*)(evtp
->evt_arg2
));
208 case LPFC_EVT_WARM_START
:
210 lpfc_reset_barrier(phba
);
211 lpfc_sli_brdreset(phba
);
212 lpfc_hba_down_post(phba
);
213 *(int *)(evtp
->evt_arg1
) =
214 lpfc_sli_brdready(phba
, HS_MBRDY
);
215 lpfc_unblock_mgmt_io(phba
);
216 complete((struct completion
*)(evtp
->evt_arg2
));
220 *(int *)(evtp
->evt_arg1
)
221 = (phba
->stopped
) ? 0 : lpfc_sli_brdkill(phba
);
222 lpfc_unblock_mgmt_io(phba
);
223 complete((struct completion
*)(evtp
->evt_arg2
));
228 spin_lock_irq(phba
->host
->host_lock
);
230 spin_unlock_irq(phba
->host
->host_lock
);
235 lpfc_work_done(struct lpfc_hba
* phba
)
237 struct lpfc_sli_ring
*pring
;
241 uint32_t work_hba_events
;
243 spin_lock_irq(phba
->host
->host_lock
);
244 ha_copy
= phba
->work_ha
;
246 work_hba_events
=phba
->work_hba_events
;
247 spin_unlock_irq(phba
->host
->host_lock
);
249 if (ha_copy
& HA_ERATT
)
250 lpfc_handle_eratt(phba
);
252 if (ha_copy
& HA_MBATT
)
253 lpfc_sli_handle_mb_event(phba
);
255 if (ha_copy
& HA_LATT
)
256 lpfc_handle_latt(phba
);
258 if (work_hba_events
& WORKER_DISC_TMO
)
259 lpfc_disc_timeout_handler(phba
);
261 if (work_hba_events
& WORKER_ELS_TMO
)
262 lpfc_els_timeout_handler(phba
);
264 if (work_hba_events
& WORKER_MBOX_TMO
)
265 lpfc_mbox_timeout_handler(phba
);
267 if (work_hba_events
& WORKER_FDMI_TMO
)
268 lpfc_fdmi_tmo_handler(phba
);
270 spin_lock_irq(phba
->host
->host_lock
);
271 phba
->work_hba_events
&= ~work_hba_events
;
272 spin_unlock_irq(phba
->host
->host_lock
);
274 for (i
= 0; i
< phba
->sli
.num_rings
; i
++, ha_copy
>>= 4) {
275 pring
= &phba
->sli
.ring
[i
];
276 if ((ha_copy
& HA_RXATT
)
277 || (pring
->flag
& LPFC_DEFERRED_RING_EVENT
)) {
278 if (pring
->flag
& LPFC_STOP_IOCB_MASK
) {
279 pring
->flag
|= LPFC_DEFERRED_RING_EVENT
;
281 lpfc_sli_handle_slow_ring_event(phba
, pring
,
284 pring
->flag
&= ~LPFC_DEFERRED_RING_EVENT
;
287 * Turn on Ring interrupts
289 spin_lock_irq(phba
->host
->host_lock
);
290 control
= readl(phba
->HCregaddr
);
291 control
|= (HC_R0INT_ENA
<< i
);
292 writel(control
, phba
->HCregaddr
);
293 readl(phba
->HCregaddr
); /* flush */
294 spin_unlock_irq(phba
->host
->host_lock
);
298 lpfc_work_list_done (phba
);
303 check_work_wait_done(struct lpfc_hba
*phba
) {
305 spin_lock_irq(phba
->host
->host_lock
);
307 phba
->work_hba_events
||
308 (!list_empty(&phba
->work_list
)) ||
309 kthread_should_stop()) {
310 spin_unlock_irq(phba
->host
->host_lock
);
313 spin_unlock_irq(phba
->host
->host_lock
);
319 lpfc_do_work(void *p
)
321 struct lpfc_hba
*phba
= p
;
323 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(work_waitq
);
325 set_user_nice(current
, -20);
326 phba
->work_wait
= &work_waitq
;
330 rc
= wait_event_interruptible(work_waitq
,
331 check_work_wait_done(phba
));
334 if (kthread_should_stop())
337 lpfc_work_done(phba
);
340 phba
->work_wait
= NULL
;
345 * This is only called to handle FC worker events. Since this a rare
346 * occurance, we allocate a struct lpfc_work_evt structure here instead of
347 * embedding it in the IOCB.
350 lpfc_workq_post_event(struct lpfc_hba
* phba
, void *arg1
, void *arg2
,
353 struct lpfc_work_evt
*evtp
;
356 * All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events will
357 * be queued to worker thread for processing
359 evtp
= kmalloc(sizeof(struct lpfc_work_evt
), GFP_KERNEL
);
363 evtp
->evt_arg1
= arg1
;
364 evtp
->evt_arg2
= arg2
;
367 spin_lock_irq(phba
->host
->host_lock
);
368 list_add_tail(&evtp
->evt_listp
, &phba
->work_list
);
370 wake_up(phba
->work_wait
);
371 spin_unlock_irq(phba
->host
->host_lock
);
377 lpfc_linkdown(struct lpfc_hba
*phba
)
379 struct lpfc_sli
*psli
;
380 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
385 /* sysfs or selective reset may call this routine to clean up */
386 if (phba
->hba_state
>= LPFC_LINK_DOWN
) {
387 if (phba
->hba_state
== LPFC_LINK_DOWN
)
390 spin_lock_irq(phba
->host
->host_lock
);
391 phba
->hba_state
= LPFC_LINK_DOWN
;
392 spin_unlock_irq(phba
->host
->host_lock
);
395 fc_host_post_event(phba
->host
, fc_get_event_number(),
396 FCH_EVT_LINKDOWN
, 0);
398 /* Clean up any firmware default rpi's */
399 if ((mb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
))) {
400 lpfc_unreg_did(phba
, 0xffffffff, mb
);
401 mb
->mbox_cmpl
=lpfc_sli_def_mbox_cmpl
;
402 if (lpfc_sli_issue_mbox(phba
, mb
, (MBX_NOWAIT
| MBX_STOP_IOCB
))
403 == MBX_NOT_FINISHED
) {
404 mempool_free( mb
, phba
->mbox_mem_pool
);
408 /* Cleanup any outstanding RSCN activity */
409 lpfc_els_flush_rscn(phba
);
411 /* Cleanup any outstanding ELS commands */
412 lpfc_els_flush_cmd(phba
);
415 * Issue a LINK DOWN event to all nodes.
417 list_for_each_entry_safe(ndlp
, next_ndlp
, &phba
->fc_nodes
, nlp_listp
) {
418 /* free any ndlp's on unused list */
419 if (ndlp
->nlp_state
== NLP_STE_UNUSED_NODE
)
420 lpfc_drop_node(phba
, ndlp
);
421 else /* otherwise, force node recovery. */
422 rc
= lpfc_disc_state_machine(phba
, ndlp
, NULL
,
423 NLP_EVT_DEVICE_RECOVERY
);
426 /* Setup myDID for link up if we are in pt2pt mode */
427 if (phba
->fc_flag
& FC_PT2PT
) {
429 if ((mb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
))) {
430 lpfc_config_link(phba
, mb
);
431 mb
->mbox_cmpl
=lpfc_sli_def_mbox_cmpl
;
432 if (lpfc_sli_issue_mbox
433 (phba
, mb
, (MBX_NOWAIT
| MBX_STOP_IOCB
))
434 == MBX_NOT_FINISHED
) {
435 mempool_free( mb
, phba
->mbox_mem_pool
);
438 spin_lock_irq(phba
->host
->host_lock
);
439 phba
->fc_flag
&= ~(FC_PT2PT
| FC_PT2PT_PLOGI
);
440 spin_unlock_irq(phba
->host
->host_lock
);
442 spin_lock_irq(phba
->host
->host_lock
);
443 phba
->fc_flag
&= ~FC_LBIT
;
444 spin_unlock_irq(phba
->host
->host_lock
);
446 /* Turn off discovery timer if its running */
447 lpfc_can_disctmo(phba
);
449 /* Must process IOCBs on all rings to handle ABORTed I/Os */
454 lpfc_linkup(struct lpfc_hba
*phba
)
456 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
458 fc_host_post_event(phba
->host
, fc_get_event_number(),
461 spin_lock_irq(phba
->host
->host_lock
);
462 phba
->hba_state
= LPFC_LINK_UP
;
463 phba
->fc_flag
&= ~(FC_PT2PT
| FC_PT2PT_PLOGI
| FC_ABORT_DISCOVERY
|
464 FC_RSCN_MODE
| FC_NLP_MORE
| FC_RSCN_DISCOVERY
);
465 phba
->fc_flag
|= FC_NDISC_ACTIVE
;
466 phba
->fc_ns_retry
= 0;
467 spin_unlock_irq(phba
->host
->host_lock
);
470 if (phba
->fc_flag
& FC_LBIT
) {
471 list_for_each_entry(ndlp
, &phba
->fc_nodes
, nlp_listp
) {
472 if (ndlp
->nlp_state
!= NLP_STE_UNUSED_NODE
) {
473 if (ndlp
->nlp_type
& NLP_FABRIC
) {
475 * On Linkup its safe to clean up the
476 * ndlp from Fabric connections.
478 lpfc_nlp_set_state(phba
, ndlp
,
479 NLP_STE_UNUSED_NODE
);
480 } else if (!(ndlp
->nlp_flag
& NLP_NPR_ADISC
)) {
482 * Fail outstanding IO now since
483 * device is marked for PLOGI.
485 lpfc_unreg_rpi(phba
, ndlp
);
491 /* free any ndlp's on unused list */
492 list_for_each_entry_safe(ndlp
, next_ndlp
, &phba
->fc_nodes
,
494 if (ndlp
->nlp_state
== NLP_STE_UNUSED_NODE
)
495 lpfc_drop_node(phba
, ndlp
);
502 * This routine handles processing a CLEAR_LA mailbox
503 * command upon completion. It is setup in the LPFC_MBOXQ
504 * as the completion routine when the command is
505 * handed off to the SLI layer.
508 lpfc_mbx_cmpl_clear_la(struct lpfc_hba
* phba
, LPFC_MBOXQ_t
* pmb
)
510 struct lpfc_sli
*psli
;
516 /* Since we don't do discovery right now, turn these off here */
517 psli
->ring
[psli
->extra_ring
].flag
&= ~LPFC_STOP_IOCB_EVENT
;
518 psli
->ring
[psli
->fcp_ring
].flag
&= ~LPFC_STOP_IOCB_EVENT
;
519 psli
->ring
[psli
->next_ring
].flag
&= ~LPFC_STOP_IOCB_EVENT
;
521 /* Check for error */
522 if ((mb
->mbxStatus
) && (mb
->mbxStatus
!= 0x1601)) {
523 /* CLEAR_LA mbox error <mbxStatus> state <hba_state> */
524 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
,
525 "%d:0320 CLEAR_LA mbxStatus error x%x hba "
527 phba
->brd_no
, mb
->mbxStatus
, phba
->hba_state
);
529 phba
->hba_state
= LPFC_HBA_ERROR
;
533 if (phba
->fc_flag
& FC_ABORT_DISCOVERY
)
536 phba
->num_disc_nodes
= 0;
537 /* go thru NPR list and issue ELS PLOGIs */
538 if (phba
->fc_npr_cnt
) {
539 lpfc_els_disc_plogi(phba
);
542 if (!phba
->num_disc_nodes
) {
543 spin_lock_irq(phba
->host
->host_lock
);
544 phba
->fc_flag
&= ~FC_NDISC_ACTIVE
;
545 spin_unlock_irq(phba
->host
->host_lock
);
548 phba
->hba_state
= LPFC_HBA_READY
;
551 /* Device Discovery completes */
552 lpfc_printf_log(phba
,
555 "%d:0225 Device Discovery completes\n",
558 mempool_free( pmb
, phba
->mbox_mem_pool
);
560 spin_lock_irq(phba
->host
->host_lock
);
561 phba
->fc_flag
&= ~FC_ABORT_DISCOVERY
;
562 if (phba
->fc_flag
& FC_ESTABLISH_LINK
) {
563 phba
->fc_flag
&= ~FC_ESTABLISH_LINK
;
565 spin_unlock_irq(phba
->host
->host_lock
);
567 del_timer_sync(&phba
->fc_estabtmo
);
569 lpfc_can_disctmo(phba
);
571 /* turn on Link Attention interrupts */
572 spin_lock_irq(phba
->host
->host_lock
);
573 psli
->sli_flag
|= LPFC_PROCESS_LA
;
574 control
= readl(phba
->HCregaddr
);
575 control
|= HC_LAINT_ENA
;
576 writel(control
, phba
->HCregaddr
);
577 readl(phba
->HCregaddr
); /* flush */
578 spin_unlock_irq(phba
->host
->host_lock
);
584 lpfc_mbx_cmpl_local_config_link(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
586 struct lpfc_sli
*psli
= &phba
->sli
;
589 if (pmb
->mb
.mbxStatus
)
592 mempool_free(pmb
, phba
->mbox_mem_pool
);
594 if (phba
->fc_topology
== TOPOLOGY_LOOP
&&
595 phba
->fc_flag
& FC_PUBLIC_LOOP
&&
596 !(phba
->fc_flag
& FC_LBIT
)) {
597 /* Need to wait for FAN - use discovery timer
598 * for timeout. hba_state is identically
599 * LPFC_LOCAL_CFG_LINK while waiting for FAN
601 lpfc_set_disctmo(phba
);
605 /* Start discovery by sending a FLOGI. hba_state is identically
606 * LPFC_FLOGI while waiting for FLOGI cmpl
608 phba
->hba_state
= LPFC_FLOGI
;
609 lpfc_set_disctmo(phba
);
610 lpfc_initial_flogi(phba
);
614 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
,
615 "%d:0306 CONFIG_LINK mbxStatus error x%x "
617 phba
->brd_no
, pmb
->mb
.mbxStatus
, phba
->hba_state
);
621 phba
->hba_state
= LPFC_HBA_ERROR
;
623 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
,
624 "%d:0200 CONFIG_LINK bad hba state x%x\n",
625 phba
->brd_no
, phba
->hba_state
);
627 lpfc_clear_la(phba
, pmb
);
628 pmb
->mbox_cmpl
= lpfc_mbx_cmpl_clear_la
;
629 rc
= lpfc_sli_issue_mbox(phba
, pmb
, (MBX_NOWAIT
| MBX_STOP_IOCB
));
630 if (rc
== MBX_NOT_FINISHED
) {
631 mempool_free(pmb
, phba
->mbox_mem_pool
);
632 lpfc_disc_flush_list(phba
);
633 psli
->ring
[(psli
->extra_ring
)].flag
&= ~LPFC_STOP_IOCB_EVENT
;
634 psli
->ring
[(psli
->fcp_ring
)].flag
&= ~LPFC_STOP_IOCB_EVENT
;
635 psli
->ring
[(psli
->next_ring
)].flag
&= ~LPFC_STOP_IOCB_EVENT
;
636 phba
->hba_state
= LPFC_HBA_READY
;
642 lpfc_mbx_cmpl_read_sparam(struct lpfc_hba
* phba
, LPFC_MBOXQ_t
* pmb
)
644 struct lpfc_sli
*psli
= &phba
->sli
;
645 MAILBOX_t
*mb
= &pmb
->mb
;
646 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) pmb
->context1
;
649 /* Check for error */
651 /* READ_SPARAM mbox error <mbxStatus> state <hba_state> */
652 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
,
653 "%d:0319 READ_SPARAM mbxStatus error x%x "
655 phba
->brd_no
, mb
->mbxStatus
, phba
->hba_state
);
658 phba
->hba_state
= LPFC_HBA_ERROR
;
662 memcpy((uint8_t *) & phba
->fc_sparam
, (uint8_t *) mp
->virt
,
663 sizeof (struct serv_parm
));
664 if (phba
->cfg_soft_wwnn
)
665 u64_to_wwn(phba
->cfg_soft_wwnn
, phba
->fc_sparam
.nodeName
.u
.wwn
);
666 if (phba
->cfg_soft_wwpn
)
667 u64_to_wwn(phba
->cfg_soft_wwpn
, phba
->fc_sparam
.portName
.u
.wwn
);
668 memcpy((uint8_t *) & phba
->fc_nodename
,
669 (uint8_t *) & phba
->fc_sparam
.nodeName
,
670 sizeof (struct lpfc_name
));
671 memcpy((uint8_t *) & phba
->fc_portname
,
672 (uint8_t *) & phba
->fc_sparam
.portName
,
673 sizeof (struct lpfc_name
));
674 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
676 mempool_free( pmb
, phba
->mbox_mem_pool
);
680 pmb
->context1
= NULL
;
681 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
683 if (phba
->hba_state
!= LPFC_CLEAR_LA
) {
684 lpfc_clear_la(phba
, pmb
);
685 pmb
->mbox_cmpl
= lpfc_mbx_cmpl_clear_la
;
686 if (lpfc_sli_issue_mbox(phba
, pmb
, (MBX_NOWAIT
| MBX_STOP_IOCB
))
687 == MBX_NOT_FINISHED
) {
688 mempool_free( pmb
, phba
->mbox_mem_pool
);
689 lpfc_disc_flush_list(phba
);
690 psli
->ring
[(psli
->extra_ring
)].flag
&=
691 ~LPFC_STOP_IOCB_EVENT
;
692 psli
->ring
[(psli
->fcp_ring
)].flag
&=
693 ~LPFC_STOP_IOCB_EVENT
;
694 psli
->ring
[(psli
->next_ring
)].flag
&=
695 ~LPFC_STOP_IOCB_EVENT
;
696 phba
->hba_state
= LPFC_HBA_READY
;
699 mempool_free( pmb
, phba
->mbox_mem_pool
);
705 lpfc_mbx_process_link_up(struct lpfc_hba
*phba
, READ_LA_VAR
*la
)
708 LPFC_MBOXQ_t
*sparam_mbox
, *cfglink_mbox
;
709 struct lpfc_dmabuf
*mp
;
712 sparam_mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
713 cfglink_mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
715 spin_lock_irq(phba
->host
->host_lock
);
716 switch (la
->UlnkSpeed
) {
718 phba
->fc_linkspeed
= LA_1GHZ_LINK
;
721 phba
->fc_linkspeed
= LA_2GHZ_LINK
;
724 phba
->fc_linkspeed
= LA_4GHZ_LINK
;
727 phba
->fc_linkspeed
= LA_8GHZ_LINK
;
730 phba
->fc_linkspeed
= LA_UNKNW_LINK
;
734 phba
->fc_topology
= la
->topology
;
736 if (phba
->fc_topology
== TOPOLOGY_LOOP
) {
737 /* Get Loop Map information */
740 phba
->fc_flag
|= FC_LBIT
;
742 phba
->fc_myDID
= la
->granted_AL_PA
;
743 i
= la
->un
.lilpBde64
.tus
.f
.bdeSize
;
746 phba
->alpa_map
[0] = 0;
748 if (phba
->cfg_log_verbose
& LOG_LINK_EVENT
) {
759 numalpa
= phba
->alpa_map
[0];
761 while (j
< numalpa
) {
762 memset(un
.pamap
, 0, 16);
763 for (k
= 1; j
< numalpa
; k
++) {
765 phba
->alpa_map
[j
+ 1];
770 /* Link Up Event ALPA map */
771 lpfc_printf_log(phba
,
774 "%d:1304 Link Up Event "
775 "ALPA map Data: x%x "
778 un
.pa
.wd1
, un
.pa
.wd2
,
779 un
.pa
.wd3
, un
.pa
.wd4
);
784 phba
->fc_myDID
= phba
->fc_pref_DID
;
785 phba
->fc_flag
|= FC_LBIT
;
787 spin_unlock_irq(phba
->host
->host_lock
);
791 lpfc_read_sparam(phba
, sparam_mbox
);
792 sparam_mbox
->mbox_cmpl
= lpfc_mbx_cmpl_read_sparam
;
793 rc
= lpfc_sli_issue_mbox(phba
, sparam_mbox
,
794 (MBX_NOWAIT
| MBX_STOP_IOCB
));
795 if (rc
== MBX_NOT_FINISHED
) {
796 mp
= (struct lpfc_dmabuf
*) sparam_mbox
->context1
;
797 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
799 mempool_free(sparam_mbox
, phba
->mbox_mem_pool
);
801 mempool_free(cfglink_mbox
, phba
->mbox_mem_pool
);
807 phba
->hba_state
= LPFC_LOCAL_CFG_LINK
;
808 lpfc_config_link(phba
, cfglink_mbox
);
809 cfglink_mbox
->mbox_cmpl
= lpfc_mbx_cmpl_local_config_link
;
810 rc
= lpfc_sli_issue_mbox(phba
, cfglink_mbox
,
811 (MBX_NOWAIT
| MBX_STOP_IOCB
));
812 if (rc
== MBX_NOT_FINISHED
)
813 mempool_free(cfglink_mbox
, phba
->mbox_mem_pool
);
818 lpfc_mbx_issue_link_down(struct lpfc_hba
*phba
) {
820 struct lpfc_sli
*psli
= &phba
->sli
;
824 /* turn on Link Attention interrupts - no CLEAR_LA needed */
825 spin_lock_irq(phba
->host
->host_lock
);
826 psli
->sli_flag
|= LPFC_PROCESS_LA
;
827 control
= readl(phba
->HCregaddr
);
828 control
|= HC_LAINT_ENA
;
829 writel(control
, phba
->HCregaddr
);
830 readl(phba
->HCregaddr
); /* flush */
831 spin_unlock_irq(phba
->host
->host_lock
);
835 * This routine handles processing a READ_LA mailbox
836 * command upon completion. It is setup in the LPFC_MBOXQ
837 * as the completion routine when the command is
838 * handed off to the SLI layer.
841 lpfc_mbx_cmpl_read_la(struct lpfc_hba
* phba
, LPFC_MBOXQ_t
* pmb
)
844 MAILBOX_t
*mb
= &pmb
->mb
;
845 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
847 /* Check for error */
849 lpfc_printf_log(phba
,
852 "%d:1307 READ_LA mbox error x%x state x%x\n",
854 mb
->mbxStatus
, phba
->hba_state
);
855 lpfc_mbx_issue_link_down(phba
);
856 phba
->hba_state
= LPFC_HBA_ERROR
;
857 goto lpfc_mbx_cmpl_read_la_free_mbuf
;
860 la
= (READ_LA_VAR
*) & pmb
->mb
.un
.varReadLA
;
862 memcpy(&phba
->alpa_map
[0], mp
->virt
, 128);
864 spin_lock_irq(phba
->host
->host_lock
);
866 phba
->fc_flag
|= FC_BYPASSED_MODE
;
868 phba
->fc_flag
&= ~FC_BYPASSED_MODE
;
869 spin_unlock_irq(phba
->host
->host_lock
);
871 if (((phba
->fc_eventTag
+ 1) < la
->eventTag
) ||
872 (phba
->fc_eventTag
== la
->eventTag
)) {
873 phba
->fc_stat
.LinkMultiEvent
++;
874 if (la
->attType
== AT_LINK_UP
) {
875 if (phba
->fc_eventTag
!= 0)
880 phba
->fc_eventTag
= la
->eventTag
;
882 if (la
->attType
== AT_LINK_UP
) {
883 phba
->fc_stat
.LinkUp
++;
884 if (phba
->fc_flag
& FC_LOOPBACK_MODE
) {
885 lpfc_printf_log(phba
, KERN_INFO
, LOG_LINK_EVENT
,
886 "%d:1306 Link Up Event in loop back mode "
887 "x%x received Data: x%x x%x x%x x%x\n",
888 phba
->brd_no
, la
->eventTag
, phba
->fc_eventTag
,
889 la
->granted_AL_PA
, la
->UlnkSpeed
,
892 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
893 "%d:1303 Link Up Event x%x received "
894 "Data: x%x x%x x%x x%x\n",
895 phba
->brd_no
, la
->eventTag
, phba
->fc_eventTag
,
896 la
->granted_AL_PA
, la
->UlnkSpeed
,
899 lpfc_mbx_process_link_up(phba
, la
);
901 phba
->fc_stat
.LinkDown
++;
902 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
903 "%d:1305 Link Down Event x%x received "
904 "Data: x%x x%x x%x\n",
905 phba
->brd_no
, la
->eventTag
, phba
->fc_eventTag
,
906 phba
->hba_state
, phba
->fc_flag
);
907 lpfc_mbx_issue_link_down(phba
);
910 lpfc_mbx_cmpl_read_la_free_mbuf
:
911 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
913 mempool_free(pmb
, phba
->mbox_mem_pool
);
918 * This routine handles processing a REG_LOGIN mailbox
919 * command upon completion. It is setup in the LPFC_MBOXQ
920 * as the completion routine when the command is
921 * handed off to the SLI layer.
924 lpfc_mbx_cmpl_reg_login(struct lpfc_hba
* phba
, LPFC_MBOXQ_t
* pmb
)
926 struct lpfc_sli
*psli
;
928 struct lpfc_dmabuf
*mp
;
929 struct lpfc_nodelist
*ndlp
;
934 ndlp
= (struct lpfc_nodelist
*) pmb
->context2
;
935 mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
937 pmb
->context1
= NULL
;
939 /* Good status, call state machine */
940 lpfc_disc_state_machine(phba
, ndlp
, pmb
, NLP_EVT_CMPL_REG_LOGIN
);
941 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
943 mempool_free( pmb
, phba
->mbox_mem_pool
);
950 * This routine handles processing a Fabric REG_LOGIN mailbox
951 * command upon completion. It is setup in the LPFC_MBOXQ
952 * as the completion routine when the command is
953 * handed off to the SLI layer.
956 lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba
* phba
, LPFC_MBOXQ_t
* pmb
)
958 struct lpfc_sli
*psli
;
960 struct lpfc_dmabuf
*mp
;
961 struct lpfc_nodelist
*ndlp
;
962 struct lpfc_nodelist
*ndlp_fdmi
;
968 ndlp
= (struct lpfc_nodelist
*) pmb
->context2
;
969 mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
971 pmb
->context1
= NULL
;
972 pmb
->context2
= NULL
;
975 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
977 mempool_free(pmb
, phba
->mbox_mem_pool
);
980 /* FLOGI failed, so just use loop map to make discovery list */
981 lpfc_disc_list_loopmap(phba
);
983 /* Start discovery */
984 lpfc_disc_start(phba
);
988 ndlp
->nlp_rpi
= mb
->un
.varWords
[0];
989 ndlp
->nlp_type
|= NLP_FABRIC
;
990 lpfc_nlp_set_state(phba
, ndlp
, NLP_STE_UNMAPPED_NODE
);
992 lpfc_nlp_put(ndlp
); /* Drop the reference from the mbox */
994 if (phba
->hba_state
== LPFC_FABRIC_CFG_LINK
) {
995 /* This NPort has been assigned an NPort_ID by the fabric as a
996 * result of the completed fabric login. Issue a State Change
997 * Registration (SCR) ELS request to the fabric controller
998 * (SCR_DID) so that this NPort gets RSCN events from the
1001 lpfc_issue_els_scr(phba
, SCR_DID
, 0);
1003 ndlp
= lpfc_findnode_did(phba
, NameServer_DID
);
1005 /* Allocate a new node instance. If the pool is empty,
1006 * start the discovery process and skip the Nameserver
1007 * login process. This is attempted again later on.
1008 * Otherwise, issue a Port Login (PLOGI) to NameServer.
1010 ndlp
= mempool_alloc(phba
->nlp_mem_pool
, GFP_ATOMIC
);
1012 lpfc_disc_start(phba
);
1013 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
1015 mempool_free(pmb
, phba
->mbox_mem_pool
);
1018 lpfc_nlp_init(phba
, ndlp
, NameServer_DID
);
1019 ndlp
->nlp_type
|= NLP_FABRIC
;
1022 lpfc_nlp_set_state(phba
, ndlp
, NLP_STE_PLOGI_ISSUE
);
1023 lpfc_issue_els_plogi(phba
, NameServer_DID
, 0);
1024 if (phba
->cfg_fdmi_on
) {
1025 ndlp_fdmi
= mempool_alloc(phba
->nlp_mem_pool
,
1028 lpfc_nlp_init(phba
, ndlp_fdmi
, FDMI_DID
);
1029 ndlp_fdmi
->nlp_type
|= NLP_FABRIC
;
1030 ndlp_fdmi
->nlp_state
= NLP_STE_PLOGI_ISSUE
;
1031 lpfc_issue_els_plogi(phba
, FDMI_DID
, 0);
1036 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
1038 mempool_free(pmb
, phba
->mbox_mem_pool
);
1043 * This routine handles processing a NameServer REG_LOGIN mailbox
1044 * command upon completion. It is setup in the LPFC_MBOXQ
1045 * as the completion routine when the command is
1046 * handed off to the SLI layer.
1049 lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba
* phba
, LPFC_MBOXQ_t
* pmb
)
1051 struct lpfc_sli
*psli
;
1053 struct lpfc_dmabuf
*mp
;
1054 struct lpfc_nodelist
*ndlp
;
1059 ndlp
= (struct lpfc_nodelist
*) pmb
->context2
;
1060 mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
1062 if (mb
->mbxStatus
) {
1064 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
1066 mempool_free(pmb
, phba
->mbox_mem_pool
);
1067 lpfc_drop_node(phba
, ndlp
);
1069 /* RegLogin failed, so just use loop map to make discovery
1071 lpfc_disc_list_loopmap(phba
);
1073 /* Start discovery */
1074 lpfc_disc_start(phba
);
1078 pmb
->context1
= NULL
;
1080 ndlp
->nlp_rpi
= mb
->un
.varWords
[0];
1081 ndlp
->nlp_type
|= NLP_FABRIC
;
1082 lpfc_nlp_set_state(phba
, ndlp
, NLP_STE_UNMAPPED_NODE
);
1084 if (phba
->hba_state
< LPFC_HBA_READY
) {
1085 /* Link up discovery requires Fabrib registration. */
1086 lpfc_ns_cmd(phba
, ndlp
, SLI_CTNS_RNN_ID
);
1087 lpfc_ns_cmd(phba
, ndlp
, SLI_CTNS_RSNN_NN
);
1088 lpfc_ns_cmd(phba
, ndlp
, SLI_CTNS_RFT_ID
);
1089 lpfc_ns_cmd(phba
, ndlp
, SLI_CTNS_RFF_ID
);
1092 phba
->fc_ns_retry
= 0;
1093 /* Good status, issue CT Request to NameServer */
1094 if (lpfc_ns_cmd(phba
, ndlp
, SLI_CTNS_GID_FT
)) {
1095 /* Cannot issue NameServer Query, so finish up discovery */
1096 lpfc_disc_start(phba
);
1100 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
1102 mempool_free( pmb
, phba
->mbox_mem_pool
);
1108 lpfc_register_remote_port(struct lpfc_hba
*phba
, struct lpfc_nodelist
*ndlp
)
1110 struct fc_rport
*rport
;
1111 struct lpfc_rport_data
*rdata
;
1112 struct fc_rport_identifiers rport_ids
;
1114 /* Remote port has reappeared. Re-register w/ FC transport */
1115 rport_ids
.node_name
= wwn_to_u64(ndlp
->nlp_nodename
.u
.wwn
);
1116 rport_ids
.port_name
= wwn_to_u64(ndlp
->nlp_portname
.u
.wwn
);
1117 rport_ids
.port_id
= ndlp
->nlp_DID
;
1118 rport_ids
.roles
= FC_RPORT_ROLE_UNKNOWN
;
1121 * We leave our node pointer in rport->dd_data when we unregister a
1122 * FCP target port. But fc_remote_port_add zeros the space to which
1123 * rport->dd_data points. So, if we're reusing a previously
1124 * registered port, drop the reference that we took the last time we
1125 * registered the port.
1127 if (ndlp
->rport
&& ndlp
->rport
->dd_data
&&
1128 *(struct lpfc_rport_data
**) ndlp
->rport
->dd_data
) {
1131 ndlp
->rport
= rport
= fc_remote_port_add(phba
->host
, 0, &rport_ids
);
1132 if (!rport
|| !get_device(&rport
->dev
)) {
1133 dev_printk(KERN_WARNING
, &phba
->pcidev
->dev
,
1134 "Warning: fc_remote_port_add failed\n");
1138 /* initialize static port data */
1139 rport
->maxframe_size
= ndlp
->nlp_maxframe
;
1140 rport
->supported_classes
= ndlp
->nlp_class_sup
;
1141 rdata
= rport
->dd_data
;
1142 rdata
->pnode
= lpfc_nlp_get(ndlp
);
1144 if (ndlp
->nlp_type
& NLP_FCP_TARGET
)
1145 rport_ids
.roles
|= FC_RPORT_ROLE_FCP_TARGET
;
1146 if (ndlp
->nlp_type
& NLP_FCP_INITIATOR
)
1147 rport_ids
.roles
|= FC_RPORT_ROLE_FCP_INITIATOR
;
1150 if (rport_ids
.roles
!= FC_RPORT_ROLE_UNKNOWN
)
1151 fc_remote_port_rolechg(rport
, rport_ids
.roles
);
1153 if ((rport
->scsi_target_id
!= -1) &&
1154 (rport
->scsi_target_id
< LPFC_MAX_TARGET
)) {
1155 ndlp
->nlp_sid
= rport
->scsi_target_id
;
1162 lpfc_unregister_remote_port(struct lpfc_hba
*phba
, struct lpfc_nodelist
*ndlp
)
1164 struct fc_rport
*rport
= ndlp
->rport
;
1165 struct lpfc_rport_data
*rdata
= rport
->dd_data
;
1167 if (rport
->scsi_target_id
== -1) {
1169 rdata
->pnode
= NULL
;
1171 put_device(&rport
->dev
);
1174 fc_remote_port_delete(rport
);
1180 lpfc_nlp_counters(struct lpfc_hba
*phba
, int state
, int count
)
1182 spin_lock_irq(phba
->host
->host_lock
);
1184 case NLP_STE_UNUSED_NODE
:
1185 phba
->fc_unused_cnt
+= count
;
1187 case NLP_STE_PLOGI_ISSUE
:
1188 phba
->fc_plogi_cnt
+= count
;
1190 case NLP_STE_ADISC_ISSUE
:
1191 phba
->fc_adisc_cnt
+= count
;
1193 case NLP_STE_REG_LOGIN_ISSUE
:
1194 phba
->fc_reglogin_cnt
+= count
;
1196 case NLP_STE_PRLI_ISSUE
:
1197 phba
->fc_prli_cnt
+= count
;
1199 case NLP_STE_UNMAPPED_NODE
:
1200 phba
->fc_unmap_cnt
+= count
;
1202 case NLP_STE_MAPPED_NODE
:
1203 phba
->fc_map_cnt
+= count
;
1205 case NLP_STE_NPR_NODE
:
1206 phba
->fc_npr_cnt
+= count
;
1209 spin_unlock_irq(phba
->host
->host_lock
);
1213 lpfc_nlp_state_cleanup(struct lpfc_hba
*phba
, struct lpfc_nodelist
*ndlp
,
1214 int old_state
, int new_state
)
1216 if (new_state
== NLP_STE_UNMAPPED_NODE
) {
1217 ndlp
->nlp_type
&= ~(NLP_FCP_TARGET
| NLP_FCP_INITIATOR
);
1218 ndlp
->nlp_flag
&= ~NLP_NODEV_REMOVE
;
1219 ndlp
->nlp_type
|= NLP_FC_NODE
;
1221 if (new_state
== NLP_STE_MAPPED_NODE
)
1222 ndlp
->nlp_flag
&= ~NLP_NODEV_REMOVE
;
1223 if (new_state
== NLP_STE_NPR_NODE
)
1224 ndlp
->nlp_flag
&= ~NLP_RCV_PLOGI
;
1226 /* Transport interface */
1227 if (ndlp
->rport
&& (old_state
== NLP_STE_MAPPED_NODE
||
1228 old_state
== NLP_STE_UNMAPPED_NODE
)) {
1229 phba
->nport_event_cnt
++;
1230 lpfc_unregister_remote_port(phba
, ndlp
);
1233 if (new_state
== NLP_STE_MAPPED_NODE
||
1234 new_state
== NLP_STE_UNMAPPED_NODE
) {
1235 phba
->nport_event_cnt
++;
1237 * Tell the fc transport about the port, if we haven't
1238 * already. If we have, and it's a scsi entity, be
1239 * sure to unblock any attached scsi devices
1241 lpfc_register_remote_port(phba
, ndlp
);
1245 * if we added to Mapped list, but the remote port
1246 * registration failed or assigned a target id outside
1247 * our presentable range - move the node to the
1250 if (new_state
== NLP_STE_MAPPED_NODE
&&
1252 ndlp
->rport
->scsi_target_id
== -1 ||
1253 ndlp
->rport
->scsi_target_id
>= LPFC_MAX_TARGET
)) {
1254 spin_lock_irq(phba
->host
->host_lock
);
1255 ndlp
->nlp_flag
|= NLP_TGT_NO_SCSIID
;
1256 spin_unlock_irq(phba
->host
->host_lock
);
1257 lpfc_nlp_set_state(phba
, ndlp
, NLP_STE_UNMAPPED_NODE
);
1262 lpfc_nlp_state_name(char *buffer
, size_t size
, int state
)
1264 static char *states
[] = {
1265 [NLP_STE_UNUSED_NODE
] = "UNUSED",
1266 [NLP_STE_PLOGI_ISSUE
] = "PLOGI",
1267 [NLP_STE_ADISC_ISSUE
] = "ADISC",
1268 [NLP_STE_REG_LOGIN_ISSUE
] = "REGLOGIN",
1269 [NLP_STE_PRLI_ISSUE
] = "PRLI",
1270 [NLP_STE_UNMAPPED_NODE
] = "UNMAPPED",
1271 [NLP_STE_MAPPED_NODE
] = "MAPPED",
1272 [NLP_STE_NPR_NODE
] = "NPR",
1275 if (state
< ARRAY_SIZE(states
) && states
[state
])
1276 strlcpy(buffer
, states
[state
], size
);
1278 snprintf(buffer
, size
, "unknown (%d)", state
);
1283 lpfc_nlp_set_state(struct lpfc_hba
*phba
, struct lpfc_nodelist
*ndlp
, int state
)
1285 int old_state
= ndlp
->nlp_state
;
1286 char name1
[16], name2
[16];
1288 lpfc_printf_log(phba
, KERN_INFO
, LOG_NODE
,
1289 "%d:0904 NPort state transition x%06x, %s -> %s\n",
1292 lpfc_nlp_state_name(name1
, sizeof(name1
), old_state
),
1293 lpfc_nlp_state_name(name2
, sizeof(name2
), state
));
1294 if (old_state
== NLP_STE_NPR_NODE
&&
1295 (ndlp
->nlp_flag
& NLP_DELAY_TMO
) != 0 &&
1296 state
!= NLP_STE_NPR_NODE
)
1297 lpfc_cancel_retry_delay_tmo(phba
, ndlp
);
1298 if (old_state
== NLP_STE_UNMAPPED_NODE
) {
1299 ndlp
->nlp_flag
&= ~NLP_TGT_NO_SCSIID
;
1300 ndlp
->nlp_type
&= ~NLP_FC_NODE
;
1303 if (list_empty(&ndlp
->nlp_listp
)) {
1304 spin_lock_irq(phba
->host
->host_lock
);
1305 list_add_tail(&ndlp
->nlp_listp
, &phba
->fc_nodes
);
1306 spin_unlock_irq(phba
->host
->host_lock
);
1307 } else if (old_state
)
1308 lpfc_nlp_counters(phba
, old_state
, -1);
1310 ndlp
->nlp_state
= state
;
1311 lpfc_nlp_counters(phba
, state
, 1);
1312 lpfc_nlp_state_cleanup(phba
, ndlp
, old_state
, state
);
1316 lpfc_dequeue_node(struct lpfc_hba
*phba
, struct lpfc_nodelist
*ndlp
)
1318 if ((ndlp
->nlp_flag
& NLP_DELAY_TMO
) != 0)
1319 lpfc_cancel_retry_delay_tmo(phba
, ndlp
);
1320 if (ndlp
->nlp_state
&& !list_empty(&ndlp
->nlp_listp
))
1321 lpfc_nlp_counters(phba
, ndlp
->nlp_state
, -1);
1322 spin_lock_irq(phba
->host
->host_lock
);
1323 list_del_init(&ndlp
->nlp_listp
);
1324 spin_unlock_irq(phba
->host
->host_lock
);
1325 lpfc_nlp_state_cleanup(phba
, ndlp
, ndlp
->nlp_state
, 0);
1329 lpfc_drop_node(struct lpfc_hba
*phba
, struct lpfc_nodelist
*ndlp
)
1331 if ((ndlp
->nlp_flag
& NLP_DELAY_TMO
) != 0)
1332 lpfc_cancel_retry_delay_tmo(phba
, ndlp
);
1333 if (ndlp
->nlp_state
&& !list_empty(&ndlp
->nlp_listp
))
1334 lpfc_nlp_counters(phba
, ndlp
->nlp_state
, -1);
1335 spin_lock_irq(phba
->host
->host_lock
);
1336 list_del_init(&ndlp
->nlp_listp
);
1337 spin_unlock_irq(phba
->host
->host_lock
);
1342 * Start / ReStart rescue timer for Discovery / RSCN handling
1345 lpfc_set_disctmo(struct lpfc_hba
* phba
)
1349 if (phba
->hba_state
== LPFC_LOCAL_CFG_LINK
) {
1350 /* For FAN, timeout should be greater then edtov */
1351 tmo
= (((phba
->fc_edtov
+ 999) / 1000) + 1);
1353 /* Normal discovery timeout should be > then ELS/CT timeout
1354 * FC spec states we need 3 * ratov for CT requests
1356 tmo
= ((phba
->fc_ratov
* 3) + 3);
1359 mod_timer(&phba
->fc_disctmo
, jiffies
+ HZ
* tmo
);
1360 spin_lock_irq(phba
->host
->host_lock
);
1361 phba
->fc_flag
|= FC_DISC_TMO
;
1362 spin_unlock_irq(phba
->host
->host_lock
);
1364 /* Start Discovery Timer state <hba_state> */
1365 lpfc_printf_log(phba
, KERN_INFO
, LOG_DISCOVERY
,
1366 "%d:0247 Start Discovery Timer state x%x "
1367 "Data: x%x x%lx x%x x%x\n",
1369 phba
->hba_state
, tmo
, (unsigned long)&phba
->fc_disctmo
,
1370 phba
->fc_plogi_cnt
, phba
->fc_adisc_cnt
);
1376 * Cancel rescue timer for Discovery / RSCN handling
1379 lpfc_can_disctmo(struct lpfc_hba
* phba
)
1381 /* Turn off discovery timer if its running */
1382 if (phba
->fc_flag
& FC_DISC_TMO
) {
1383 spin_lock_irq(phba
->host
->host_lock
);
1384 phba
->fc_flag
&= ~FC_DISC_TMO
;
1385 spin_unlock_irq(phba
->host
->host_lock
);
1386 del_timer_sync(&phba
->fc_disctmo
);
1387 phba
->work_hba_events
&= ~WORKER_DISC_TMO
;
1390 /* Cancel Discovery Timer state <hba_state> */
1391 lpfc_printf_log(phba
, KERN_INFO
, LOG_DISCOVERY
,
1392 "%d:0248 Cancel Discovery Timer state x%x "
1393 "Data: x%x x%x x%x\n",
1394 phba
->brd_no
, phba
->hba_state
, phba
->fc_flag
,
1395 phba
->fc_plogi_cnt
, phba
->fc_adisc_cnt
);
1401 * Check specified ring for outstanding IOCB on the SLI queue
1402 * Return true if iocb matches the specified nport
1405 lpfc_check_sli_ndlp(struct lpfc_hba
* phba
,
1406 struct lpfc_sli_ring
* pring
,
1407 struct lpfc_iocbq
* iocb
, struct lpfc_nodelist
* ndlp
)
1409 struct lpfc_sli
*psli
;
1414 if (pring
->ringno
== LPFC_ELS_RING
) {
1415 switch (icmd
->ulpCommand
) {
1416 case CMD_GEN_REQUEST64_CR
:
1417 if (icmd
->ulpContext
== (volatile ushort
)ndlp
->nlp_rpi
)
1419 case CMD_ELS_REQUEST64_CR
:
1420 if (icmd
->un
.elsreq64
.remoteID
== ndlp
->nlp_DID
)
1422 case CMD_XMIT_ELS_RSP64_CX
:
1423 if (iocb
->context1
== (uint8_t *) ndlp
)
1426 } else if (pring
->ringno
== psli
->extra_ring
) {
1428 } else if (pring
->ringno
== psli
->fcp_ring
) {
1429 /* Skip match check if waiting to relogin to FCP target */
1430 if ((ndlp
->nlp_type
& NLP_FCP_TARGET
) &&
1431 (ndlp
->nlp_flag
& NLP_DELAY_TMO
)) {
1434 if (icmd
->ulpContext
== (volatile ushort
)ndlp
->nlp_rpi
) {
1437 } else if (pring
->ringno
== psli
->next_ring
) {
1444 * Free resources / clean up outstanding I/Os
1445 * associated with nlp_rpi in the LPFC_NODELIST entry.
1448 lpfc_no_rpi(struct lpfc_hba
* phba
, struct lpfc_nodelist
* ndlp
)
1450 LIST_HEAD(completions
);
1451 struct lpfc_sli
*psli
;
1452 struct lpfc_sli_ring
*pring
;
1453 struct lpfc_iocbq
*iocb
, *next_iocb
;
1458 * Everything that matches on txcmplq will be returned
1459 * by firmware with a no rpi error.
1462 rpi
= ndlp
->nlp_rpi
;
1464 /* Now process each ring */
1465 for (i
= 0; i
< psli
->num_rings
; i
++) {
1466 pring
= &psli
->ring
[i
];
1468 spin_lock_irq(phba
->host
->host_lock
);
1469 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txq
,
1472 * Check to see if iocb matches the nport we are
1475 if ((lpfc_check_sli_ndlp
1476 (phba
, pring
, iocb
, ndlp
))) {
1477 /* It matches, so deque and call compl
1479 list_move_tail(&iocb
->list
,
1484 spin_unlock_irq(phba
->host
->host_lock
);
1489 while (!list_empty(&completions
)) {
1490 iocb
= list_get_first(&completions
, struct lpfc_iocbq
, list
);
1491 list_del(&iocb
->list
);
1493 if (iocb
->iocb_cmpl
) {
1495 icmd
->ulpStatus
= IOSTAT_LOCAL_REJECT
;
1496 icmd
->un
.ulpWord
[4] = IOERR_SLI_ABORTED
;
1497 (iocb
->iocb_cmpl
) (phba
, iocb
, iocb
);
1499 lpfc_sli_release_iocbq(phba
, iocb
);
1506 * Free rpi associated with LPFC_NODELIST entry.
1507 * This routine is called from lpfc_freenode(), when we are removing
1508 * a LPFC_NODELIST entry. It is also called if the driver initiates a
1509 * LOGO that completes successfully, and we are waiting to PLOGI back
1510 * to the remote NPort. In addition, it is called after we receive
1511 * and unsolicated ELS cmd, send back a rsp, the rsp completes and
1512 * we are waiting to PLOGI back to the remote NPort.
1515 lpfc_unreg_rpi(struct lpfc_hba
* phba
, struct lpfc_nodelist
* ndlp
)
1520 if (ndlp
->nlp_rpi
) {
1521 if ((mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
))) {
1522 lpfc_unreg_login(phba
, ndlp
->nlp_rpi
, mbox
);
1523 mbox
->mbox_cmpl
=lpfc_sli_def_mbox_cmpl
;
1524 rc
= lpfc_sli_issue_mbox
1525 (phba
, mbox
, (MBX_NOWAIT
| MBX_STOP_IOCB
));
1526 if (rc
== MBX_NOT_FINISHED
)
1527 mempool_free( mbox
, phba
->mbox_mem_pool
);
1529 lpfc_no_rpi(phba
, ndlp
);
1537 * Free resources associated with LPFC_NODELIST entry
1538 * so it can be freed.
1541 lpfc_cleanup_node(struct lpfc_hba
* phba
, struct lpfc_nodelist
* ndlp
)
1544 LPFC_MBOXQ_t
*nextmb
;
1545 struct lpfc_dmabuf
*mp
;
1547 /* Cleanup node for NPort <nlp_DID> */
1548 lpfc_printf_log(phba
, KERN_INFO
, LOG_NODE
,
1549 "%d:0900 Cleanup node for NPort x%x "
1550 "Data: x%x x%x x%x\n",
1551 phba
->brd_no
, ndlp
->nlp_DID
, ndlp
->nlp_flag
,
1552 ndlp
->nlp_state
, ndlp
->nlp_rpi
);
1554 lpfc_dequeue_node(phba
, ndlp
);
1556 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
1557 if ((mb
= phba
->sli
.mbox_active
)) {
1558 if ((mb
->mb
.mbxCommand
== MBX_REG_LOGIN64
) &&
1559 (ndlp
== (struct lpfc_nodelist
*) mb
->context2
)) {
1560 mb
->context2
= NULL
;
1561 mb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
1565 spin_lock_irq(phba
->host
->host_lock
);
1566 list_for_each_entry_safe(mb
, nextmb
, &phba
->sli
.mboxq
, list
) {
1567 if ((mb
->mb
.mbxCommand
== MBX_REG_LOGIN64
) &&
1568 (ndlp
== (struct lpfc_nodelist
*) mb
->context2
)) {
1569 mp
= (struct lpfc_dmabuf
*) (mb
->context1
);
1571 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
1574 list_del(&mb
->list
);
1575 mempool_free(mb
, phba
->mbox_mem_pool
);
1579 spin_unlock_irq(phba
->host
->host_lock
);
1581 lpfc_els_abort(phba
,ndlp
);
1582 spin_lock_irq(phba
->host
->host_lock
);
1583 ndlp
->nlp_flag
&= ~NLP_DELAY_TMO
;
1584 spin_unlock_irq(phba
->host
->host_lock
);
1586 ndlp
->nlp_last_elscmd
= 0;
1587 del_timer_sync(&ndlp
->nlp_delayfunc
);
1589 if (!list_empty(&ndlp
->els_retry_evt
.evt_listp
))
1590 list_del_init(&ndlp
->els_retry_evt
.evt_listp
);
1592 lpfc_unreg_rpi(phba
, ndlp
);
1598 * Check to see if we can free the nlp back to the freelist.
1599 * If we are in the middle of using the nlp in the discovery state
1600 * machine, defer the free till we reach the end of the state machine.
1603 lpfc_nlp_remove(struct lpfc_hba
*phba
, struct lpfc_nodelist
*ndlp
)
1605 struct lpfc_rport_data
*rdata
;
1607 if (ndlp
->nlp_flag
& NLP_DELAY_TMO
) {
1608 lpfc_cancel_retry_delay_tmo(phba
, ndlp
);
1611 lpfc_cleanup_node(phba
, ndlp
);
1613 if ((ndlp
->rport
) && !(phba
->fc_flag
& FC_UNLOADING
)) {
1614 put_device(&ndlp
->rport
->dev
);
1615 rdata
= ndlp
->rport
->dd_data
;
1616 rdata
->pnode
= NULL
;
1622 lpfc_matchdid(struct lpfc_hba
*phba
, struct lpfc_nodelist
*ndlp
, uint32_t did
)
1628 if (did
== Bcast_DID
)
1631 if (ndlp
->nlp_DID
== 0) {
1635 /* First check for Direct match */
1636 if (ndlp
->nlp_DID
== did
)
1639 /* Next check for area/domain identically equals 0 match */
1640 mydid
.un
.word
= phba
->fc_myDID
;
1641 if ((mydid
.un
.b
.domain
== 0) && (mydid
.un
.b
.area
== 0)) {
1645 matchdid
.un
.word
= did
;
1646 ndlpdid
.un
.word
= ndlp
->nlp_DID
;
1647 if (matchdid
.un
.b
.id
== ndlpdid
.un
.b
.id
) {
1648 if ((mydid
.un
.b
.domain
== matchdid
.un
.b
.domain
) &&
1649 (mydid
.un
.b
.area
== matchdid
.un
.b
.area
)) {
1650 if ((ndlpdid
.un
.b
.domain
== 0) &&
1651 (ndlpdid
.un
.b
.area
== 0)) {
1652 if (ndlpdid
.un
.b
.id
)
1658 matchdid
.un
.word
= ndlp
->nlp_DID
;
1659 if ((mydid
.un
.b
.domain
== ndlpdid
.un
.b
.domain
) &&
1660 (mydid
.un
.b
.area
== ndlpdid
.un
.b
.area
)) {
1661 if ((matchdid
.un
.b
.domain
== 0) &&
1662 (matchdid
.un
.b
.area
== 0)) {
1663 if (matchdid
.un
.b
.id
)
1671 /* Search for a nodelist entry */
1672 struct lpfc_nodelist
*
1673 lpfc_findnode_did(struct lpfc_hba
*phba
, uint32_t did
)
1675 struct lpfc_nodelist
*ndlp
;
1678 spin_lock_irq(phba
->host
->host_lock
);
1679 list_for_each_entry(ndlp
, &phba
->fc_nodes
, nlp_listp
) {
1680 if (lpfc_matchdid(phba
, ndlp
, did
)) {
1681 data1
= (((uint32_t) ndlp
->nlp_state
<< 24) |
1682 ((uint32_t) ndlp
->nlp_xri
<< 16) |
1683 ((uint32_t) ndlp
->nlp_type
<< 8) |
1684 ((uint32_t) ndlp
->nlp_rpi
& 0xff));
1685 lpfc_printf_log(phba
, KERN_INFO
, LOG_NODE
,
1686 "%d:0929 FIND node DID "
1687 " Data: x%p x%x x%x x%x\n",
1689 ndlp
, ndlp
->nlp_DID
,
1690 ndlp
->nlp_flag
, data1
);
1691 spin_unlock_irq(phba
->host
->host_lock
);
1695 spin_unlock_irq(phba
->host
->host_lock
);
1697 /* FIND node did <did> NOT FOUND */
1698 lpfc_printf_log(phba
, KERN_INFO
, LOG_NODE
,
1699 "%d:0932 FIND node did x%x NOT FOUND.\n",
1704 struct lpfc_nodelist
*
1705 lpfc_setup_disc_node(struct lpfc_hba
* phba
, uint32_t did
)
1707 struct lpfc_nodelist
*ndlp
;
1709 ndlp
= lpfc_findnode_did(phba
, did
);
1711 if ((phba
->fc_flag
& FC_RSCN_MODE
) &&
1712 ((lpfc_rscn_payload_check(phba
, did
) == 0)))
1714 ndlp
= (struct lpfc_nodelist
*)
1715 mempool_alloc(phba
->nlp_mem_pool
, GFP_KERNEL
);
1718 lpfc_nlp_init(phba
, ndlp
, did
);
1719 lpfc_nlp_set_state(phba
, ndlp
, NLP_STE_NPR_NODE
);
1720 ndlp
->nlp_flag
|= NLP_NPR_2B_DISC
;
1723 if (phba
->fc_flag
& FC_RSCN_MODE
) {
1724 if (lpfc_rscn_payload_check(phba
, did
)) {
1725 ndlp
->nlp_flag
|= NLP_NPR_2B_DISC
;
1727 /* Since this node is marked for discovery,
1728 * delay timeout is not needed.
1730 if (ndlp
->nlp_flag
& NLP_DELAY_TMO
)
1731 lpfc_cancel_retry_delay_tmo(phba
, ndlp
);
1735 if (ndlp
->nlp_state
== NLP_STE_ADISC_ISSUE
||
1736 ndlp
->nlp_state
== NLP_STE_PLOGI_ISSUE
)
1738 lpfc_nlp_set_state(phba
, ndlp
, NLP_STE_NPR_NODE
);
1739 ndlp
->nlp_flag
|= NLP_NPR_2B_DISC
;
1744 /* Build a list of nodes to discover based on the loopmap */
1746 lpfc_disc_list_loopmap(struct lpfc_hba
* phba
)
1749 uint32_t alpa
, index
;
1751 if (phba
->hba_state
<= LPFC_LINK_DOWN
) {
1754 if (phba
->fc_topology
!= TOPOLOGY_LOOP
) {
1758 /* Check for loop map present or not */
1759 if (phba
->alpa_map
[0]) {
1760 for (j
= 1; j
<= phba
->alpa_map
[0]; j
++) {
1761 alpa
= phba
->alpa_map
[j
];
1763 if (((phba
->fc_myDID
& 0xff) == alpa
) || (alpa
== 0)) {
1766 lpfc_setup_disc_node(phba
, alpa
);
1769 /* No alpamap, so try all alpa's */
1770 for (j
= 0; j
< FC_MAXLOOP
; j
++) {
1771 /* If cfg_scan_down is set, start from highest
1772 * ALPA (0xef) to lowest (0x1).
1774 if (phba
->cfg_scan_down
)
1777 index
= FC_MAXLOOP
- j
- 1;
1778 alpa
= lpfcAlpaArray
[index
];
1779 if ((phba
->fc_myDID
& 0xff) == alpa
) {
1783 lpfc_setup_disc_node(phba
, alpa
);
1789 /* Start Link up / RSCN discovery on NPR list */
1791 lpfc_disc_start(struct lpfc_hba
* phba
)
1793 struct lpfc_sli
*psli
;
1795 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
1797 uint32_t clear_la_pending
;
1803 if (phba
->hba_state
<= LPFC_LINK_DOWN
) {
1806 if (phba
->hba_state
== LPFC_CLEAR_LA
)
1807 clear_la_pending
= 1;
1809 clear_la_pending
= 0;
1811 if (phba
->hba_state
< LPFC_HBA_READY
) {
1812 phba
->hba_state
= LPFC_DISC_AUTH
;
1814 lpfc_set_disctmo(phba
);
1816 if (phba
->fc_prevDID
== phba
->fc_myDID
) {
1821 phba
->fc_prevDID
= phba
->fc_myDID
;
1822 phba
->num_disc_nodes
= 0;
1824 /* Start Discovery state <hba_state> */
1825 lpfc_printf_log(phba
, KERN_INFO
, LOG_DISCOVERY
,
1826 "%d:0202 Start Discovery hba state x%x "
1827 "Data: x%x x%x x%x\n",
1828 phba
->brd_no
, phba
->hba_state
, phba
->fc_flag
,
1829 phba
->fc_plogi_cnt
, phba
->fc_adisc_cnt
);
1831 /* If our did changed, we MUST do PLOGI */
1832 list_for_each_entry_safe(ndlp
, next_ndlp
, &phba
->fc_nodes
, nlp_listp
) {
1833 if (ndlp
->nlp_state
== NLP_STE_NPR_NODE
&&
1834 (ndlp
->nlp_flag
& NLP_NPR_2B_DISC
) != 0 &&
1836 spin_lock_irq(phba
->host
->host_lock
);
1837 ndlp
->nlp_flag
&= ~NLP_NPR_ADISC
;
1838 spin_unlock_irq(phba
->host
->host_lock
);
1842 /* First do ADISCs - if any */
1843 num_sent
= lpfc_els_disc_adisc(phba
);
1848 if ((phba
->hba_state
< LPFC_HBA_READY
) && (!clear_la_pending
)) {
1849 /* If we get here, there is nothing to ADISC */
1850 if ((mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
))) {
1851 phba
->hba_state
= LPFC_CLEAR_LA
;
1852 lpfc_clear_la(phba
, mbox
);
1853 mbox
->mbox_cmpl
= lpfc_mbx_cmpl_clear_la
;
1854 rc
= lpfc_sli_issue_mbox(phba
, mbox
,
1855 (MBX_NOWAIT
| MBX_STOP_IOCB
));
1856 if (rc
== MBX_NOT_FINISHED
) {
1857 mempool_free( mbox
, phba
->mbox_mem_pool
);
1858 lpfc_disc_flush_list(phba
);
1859 psli
->ring
[(psli
->extra_ring
)].flag
&=
1860 ~LPFC_STOP_IOCB_EVENT
;
1861 psli
->ring
[(psli
->fcp_ring
)].flag
&=
1862 ~LPFC_STOP_IOCB_EVENT
;
1863 psli
->ring
[(psli
->next_ring
)].flag
&=
1864 ~LPFC_STOP_IOCB_EVENT
;
1865 phba
->hba_state
= LPFC_HBA_READY
;
1869 /* Next do PLOGIs - if any */
1870 num_sent
= lpfc_els_disc_plogi(phba
);
1875 if (phba
->fc_flag
& FC_RSCN_MODE
) {
1876 /* Check to see if more RSCNs came in while we
1877 * were processing this one.
1879 if ((phba
->fc_rscn_id_cnt
== 0) &&
1880 (!(phba
->fc_flag
& FC_RSCN_DISCOVERY
))) {
1881 spin_lock_irq(phba
->host
->host_lock
);
1882 phba
->fc_flag
&= ~FC_RSCN_MODE
;
1883 spin_unlock_irq(phba
->host
->host_lock
);
1885 lpfc_els_handle_rscn(phba
);
1892 * Ignore completion for all IOCBs on tx and txcmpl queue for ELS
1893 * ring the match the sppecified nodelist.
1896 lpfc_free_tx(struct lpfc_hba
* phba
, struct lpfc_nodelist
* ndlp
)
1898 LIST_HEAD(completions
);
1899 struct lpfc_sli
*psli
;
1901 struct lpfc_iocbq
*iocb
, *next_iocb
;
1902 struct lpfc_sli_ring
*pring
;
1905 pring
= &psli
->ring
[LPFC_ELS_RING
];
1907 /* Error matching iocb on txq or txcmplq
1908 * First check the txq.
1910 spin_lock_irq(phba
->host
->host_lock
);
1911 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txq
, list
) {
1912 if (iocb
->context1
!= ndlp
) {
1916 if ((icmd
->ulpCommand
== CMD_ELS_REQUEST64_CR
) ||
1917 (icmd
->ulpCommand
== CMD_XMIT_ELS_RSP64_CX
)) {
1919 list_move_tail(&iocb
->list
, &completions
);
1924 /* Next check the txcmplq */
1925 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txcmplq
, list
) {
1926 if (iocb
->context1
!= ndlp
) {
1930 if ((icmd
->ulpCommand
== CMD_ELS_REQUEST64_CR
) ||
1931 (icmd
->ulpCommand
== CMD_XMIT_ELS_RSP64_CX
)) {
1932 lpfc_sli_issue_abort_iotag(phba
, pring
, iocb
);
1935 spin_unlock_irq(phba
->host
->host_lock
);
1937 while (!list_empty(&completions
)) {
1938 iocb
= list_get_first(&completions
, struct lpfc_iocbq
, list
);
1939 list_del(&iocb
->list
);
1941 if (iocb
->iocb_cmpl
) {
1943 icmd
->ulpStatus
= IOSTAT_LOCAL_REJECT
;
1944 icmd
->un
.ulpWord
[4] = IOERR_SLI_ABORTED
;
1945 (iocb
->iocb_cmpl
) (phba
, iocb
, iocb
);
1947 lpfc_sli_release_iocbq(phba
, iocb
);
1954 lpfc_disc_flush_list(struct lpfc_hba
* phba
)
1956 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
1958 if (phba
->fc_plogi_cnt
|| phba
->fc_adisc_cnt
) {
1959 list_for_each_entry_safe(ndlp
, next_ndlp
, &phba
->fc_nodes
,
1961 if (ndlp
->nlp_state
== NLP_STE_PLOGI_ISSUE
||
1962 ndlp
->nlp_state
== NLP_STE_ADISC_ISSUE
) {
1963 lpfc_free_tx(phba
, ndlp
);
1970 /*****************************************************************************/
1972 * NAME: lpfc_disc_timeout
1974 * FUNCTION: Fibre Channel driver discovery timeout routine.
1976 * EXECUTION ENVIRONMENT: interrupt only
1984 /*****************************************************************************/
1986 lpfc_disc_timeout(unsigned long ptr
)
1988 struct lpfc_hba
*phba
= (struct lpfc_hba
*)ptr
;
1989 unsigned long flags
= 0;
1991 if (unlikely(!phba
))
1994 spin_lock_irqsave(phba
->host
->host_lock
, flags
);
1995 if (!(phba
->work_hba_events
& WORKER_DISC_TMO
)) {
1996 phba
->work_hba_events
|= WORKER_DISC_TMO
;
1997 if (phba
->work_wait
)
1998 wake_up(phba
->work_wait
);
2000 spin_unlock_irqrestore(phba
->host
->host_lock
, flags
);
2005 lpfc_disc_timeout_handler(struct lpfc_hba
*phba
)
2007 struct lpfc_sli
*psli
;
2008 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
2009 LPFC_MBOXQ_t
*clearlambox
, *initlinkmbox
;
2010 int rc
, clrlaerr
= 0;
2012 if (unlikely(!phba
))
2015 if (!(phba
->fc_flag
& FC_DISC_TMO
))
2020 spin_lock_irq(phba
->host
->host_lock
);
2021 phba
->fc_flag
&= ~FC_DISC_TMO
;
2022 spin_unlock_irq(phba
->host
->host_lock
);
2024 switch (phba
->hba_state
) {
2026 case LPFC_LOCAL_CFG_LINK
:
2027 /* hba_state is identically LPFC_LOCAL_CFG_LINK while waiting for FAN */
2029 lpfc_printf_log(phba
,
2032 "%d:0221 FAN timeout\n",
2035 /* Start discovery by sending FLOGI, clean up old rpis */
2036 list_for_each_entry_safe(ndlp
, next_ndlp
, &phba
->fc_nodes
,
2038 if (ndlp
->nlp_state
!= NLP_STE_NPR_NODE
)
2040 if (ndlp
->nlp_type
& NLP_FABRIC
) {
2041 /* Clean up the ndlp on Fabric connections */
2042 lpfc_drop_node(phba
, ndlp
);
2043 } else if (!(ndlp
->nlp_flag
& NLP_NPR_ADISC
)) {
2044 /* Fail outstanding IO now since device
2045 * is marked for PLOGI.
2047 lpfc_unreg_rpi(phba
, ndlp
);
2050 phba
->hba_state
= LPFC_FLOGI
;
2051 lpfc_set_disctmo(phba
);
2052 lpfc_initial_flogi(phba
);
2056 /* hba_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */
2057 /* Initial FLOGI timeout */
2058 lpfc_printf_log(phba
,
2061 "%d:0222 Initial FLOGI timeout\n",
2064 /* Assume no Fabric and go on with discovery.
2065 * Check for outstanding ELS FLOGI to abort.
2068 /* FLOGI failed, so just use loop map to make discovery list */
2069 lpfc_disc_list_loopmap(phba
);
2071 /* Start discovery */
2072 lpfc_disc_start(phba
);
2075 case LPFC_FABRIC_CFG_LINK
:
2076 /* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for
2078 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
,
2079 "%d:0223 Timeout while waiting for NameServer "
2080 "login\n", phba
->brd_no
);
2082 /* Next look for NameServer ndlp */
2083 ndlp
= lpfc_findnode_did(phba
, NameServer_DID
);
2086 /* Start discovery */
2087 lpfc_disc_start(phba
);
2091 /* Check for wait for NameServer Rsp timeout */
2092 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
,
2093 "%d:0224 NameServer Query timeout "
2096 phba
->fc_ns_retry
, LPFC_MAX_NS_RETRY
);
2098 ndlp
= lpfc_findnode_did(phba
, NameServer_DID
);
2099 if (ndlp
&& ndlp
->nlp_state
== NLP_STE_UNMAPPED_NODE
) {
2100 if (phba
->fc_ns_retry
< LPFC_MAX_NS_RETRY
) {
2101 /* Try it one more time */
2102 rc
= lpfc_ns_cmd(phba
, ndlp
, SLI_CTNS_GID_FT
);
2106 phba
->fc_ns_retry
= 0;
2109 /* Nothing to authenticate, so CLEAR_LA right now */
2110 clearlambox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
2113 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
,
2114 "%d:0226 Device Discovery "
2115 "completion error\n",
2117 phba
->hba_state
= LPFC_HBA_ERROR
;
2121 phba
->hba_state
= LPFC_CLEAR_LA
;
2122 lpfc_clear_la(phba
, clearlambox
);
2123 clearlambox
->mbox_cmpl
= lpfc_mbx_cmpl_clear_la
;
2124 rc
= lpfc_sli_issue_mbox(phba
, clearlambox
,
2125 (MBX_NOWAIT
| MBX_STOP_IOCB
));
2126 if (rc
== MBX_NOT_FINISHED
) {
2127 mempool_free(clearlambox
, phba
->mbox_mem_pool
);
2132 /* Setup and issue mailbox INITIALIZE LINK command */
2133 initlinkmbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
2134 if (!initlinkmbox
) {
2135 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
,
2136 "%d:0206 Device Discovery "
2137 "completion error\n",
2139 phba
->hba_state
= LPFC_HBA_ERROR
;
2143 lpfc_linkdown(phba
);
2144 lpfc_init_link(phba
, initlinkmbox
, phba
->cfg_topology
,
2145 phba
->cfg_link_speed
);
2146 initlinkmbox
->mb
.un
.varInitLnk
.lipsr_AL_PA
= 0;
2147 rc
= lpfc_sli_issue_mbox(phba
, initlinkmbox
,
2148 (MBX_NOWAIT
| MBX_STOP_IOCB
));
2149 lpfc_set_loopback_flag(phba
);
2150 if (rc
== MBX_NOT_FINISHED
)
2151 mempool_free(initlinkmbox
, phba
->mbox_mem_pool
);
2155 case LPFC_DISC_AUTH
:
2156 /* Node Authentication timeout */
2157 lpfc_printf_log(phba
,
2160 "%d:0227 Node Authentication timeout\n",
2162 lpfc_disc_flush_list(phba
);
2163 clearlambox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
2166 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
,
2167 "%d:0207 Device Discovery "
2168 "completion error\n",
2170 phba
->hba_state
= LPFC_HBA_ERROR
;
2173 phba
->hba_state
= LPFC_CLEAR_LA
;
2174 lpfc_clear_la(phba
, clearlambox
);
2175 clearlambox
->mbox_cmpl
= lpfc_mbx_cmpl_clear_la
;
2176 rc
= lpfc_sli_issue_mbox(phba
, clearlambox
,
2177 (MBX_NOWAIT
| MBX_STOP_IOCB
));
2178 if (rc
== MBX_NOT_FINISHED
) {
2179 mempool_free(clearlambox
, phba
->mbox_mem_pool
);
2185 /* CLEAR LA timeout */
2186 lpfc_printf_log(phba
,
2189 "%d:0228 CLEAR LA timeout\n",
2194 case LPFC_HBA_READY
:
2195 if (phba
->fc_flag
& FC_RSCN_MODE
) {
2196 lpfc_printf_log(phba
,
2199 "%d:0231 RSCN timeout Data: x%x x%x\n",
2201 phba
->fc_ns_retry
, LPFC_MAX_NS_RETRY
);
2203 /* Cleanup any outstanding ELS commands */
2204 lpfc_els_flush_cmd(phba
);
2206 lpfc_els_flush_rscn(phba
);
2207 lpfc_disc_flush_list(phba
);
2213 lpfc_disc_flush_list(phba
);
2214 psli
->ring
[(psli
->extra_ring
)].flag
&= ~LPFC_STOP_IOCB_EVENT
;
2215 psli
->ring
[(psli
->fcp_ring
)].flag
&= ~LPFC_STOP_IOCB_EVENT
;
2216 psli
->ring
[(psli
->next_ring
)].flag
&= ~LPFC_STOP_IOCB_EVENT
;
2217 phba
->hba_state
= LPFC_HBA_READY
;
2224 * This routine handles processing a NameServer REG_LOGIN mailbox
2225 * command upon completion. It is setup in the LPFC_MBOXQ
2226 * as the completion routine when the command is
2227 * handed off to the SLI layer.
2230 lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba
* phba
, LPFC_MBOXQ_t
* pmb
)
2232 struct lpfc_sli
*psli
;
2234 struct lpfc_dmabuf
*mp
;
2235 struct lpfc_nodelist
*ndlp
;
2240 ndlp
= (struct lpfc_nodelist
*) pmb
->context2
;
2241 mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
2243 pmb
->context1
= NULL
;
2245 ndlp
->nlp_rpi
= mb
->un
.varWords
[0];
2246 ndlp
->nlp_type
|= NLP_FABRIC
;
2247 lpfc_nlp_set_state(phba
, ndlp
, NLP_STE_UNMAPPED_NODE
);
2249 /* Start issuing Fabric-Device Management Interface (FDMI)
2250 * command to 0xfffffa (FDMI well known port)
2252 if (phba
->cfg_fdmi_on
== 1) {
2253 lpfc_fdmi_cmd(phba
, ndlp
, SLI_MGMT_DHBA
);
2256 * Delay issuing FDMI command if fdmi-on=2
2257 * (supporting RPA/hostnmae)
2259 mod_timer(&phba
->fc_fdmitmo
, jiffies
+ HZ
* 60);
2262 /* Mailbox took a reference to the node */
2264 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
2266 mempool_free(pmb
, phba
->mbox_mem_pool
);
2272 lpfc_filter_by_rpi(struct lpfc_nodelist
*ndlp
, void *param
)
2274 uint16_t *rpi
= param
;
2276 return ndlp
->nlp_rpi
== *rpi
;
2280 lpfc_filter_by_wwpn(struct lpfc_nodelist
*ndlp
, void *param
)
2282 return memcmp(&ndlp
->nlp_portname
, param
,
2283 sizeof(ndlp
->nlp_portname
)) == 0;
2287 * Search node lists for a remote port matching filter criteria
2288 * Caller needs to hold host_lock before calling this routine.
2290 struct lpfc_nodelist
*
2291 __lpfc_find_node(struct lpfc_hba
*phba
, node_filter filter
, void *param
)
2293 struct lpfc_nodelist
*ndlp
;
2295 list_for_each_entry(ndlp
, &phba
->fc_nodes
, nlp_listp
) {
2296 if (ndlp
->nlp_state
!= NLP_STE_UNUSED_NODE
&&
2297 filter(ndlp
, param
))
2304 * Search node lists for a remote port matching filter criteria
2305 * This routine is used when the caller does NOT have host_lock.
2307 struct lpfc_nodelist
*
2308 lpfc_find_node(struct lpfc_hba
*phba
, node_filter filter
, void *param
)
2310 struct lpfc_nodelist
*ndlp
;
2312 spin_lock_irq(phba
->host
->host_lock
);
2313 ndlp
= __lpfc_find_node(phba
, filter
, param
);
2314 spin_unlock_irq(phba
->host
->host_lock
);
2319 * This routine looks up the ndlp lists for the given RPI. If rpi found it
2320 * returns the node list pointer else return NULL.
2322 struct lpfc_nodelist
*
2323 __lpfc_findnode_rpi(struct lpfc_hba
*phba
, uint16_t rpi
)
2325 return __lpfc_find_node(phba
, lpfc_filter_by_rpi
, &rpi
);
2328 struct lpfc_nodelist
*
2329 lpfc_findnode_rpi(struct lpfc_hba
* phba
, uint16_t rpi
)
2331 struct lpfc_nodelist
*ndlp
;
2333 spin_lock_irq(phba
->host
->host_lock
);
2334 ndlp
= __lpfc_findnode_rpi(phba
, rpi
);
2335 spin_unlock_irq(phba
->host
->host_lock
);
2340 * This routine looks up the ndlp lists for the given WWPN. If WWPN found it
2341 * returns the node list pointer else return NULL.
2343 struct lpfc_nodelist
*
2344 lpfc_findnode_wwpn(struct lpfc_hba
*phba
, struct lpfc_name
*wwpn
)
2346 struct lpfc_nodelist
*ndlp
;
2348 spin_lock_irq(phba
->host
->host_lock
);
2349 ndlp
= __lpfc_find_node(phba
, lpfc_filter_by_wwpn
, wwpn
);
2350 spin_unlock_irq(phba
->host
->host_lock
);
2355 lpfc_nlp_init(struct lpfc_hba
*phba
, struct lpfc_nodelist
*ndlp
, uint32_t did
)
2357 memset(ndlp
, 0, sizeof (struct lpfc_nodelist
));
2358 INIT_LIST_HEAD(&ndlp
->els_retry_evt
.evt_listp
);
2359 init_timer(&ndlp
->nlp_delayfunc
);
2360 ndlp
->nlp_delayfunc
.function
= lpfc_els_retry_delay
;
2361 ndlp
->nlp_delayfunc
.data
= (unsigned long)ndlp
;
2362 ndlp
->nlp_DID
= did
;
2363 ndlp
->nlp_phba
= phba
;
2364 ndlp
->nlp_sid
= NLP_NO_SID
;
2365 INIT_LIST_HEAD(&ndlp
->nlp_listp
);
2366 kref_init(&ndlp
->kref
);
2371 lpfc_nlp_release(struct kref
*kref
)
2373 struct lpfc_nodelist
*ndlp
= container_of(kref
, struct lpfc_nodelist
,
2375 lpfc_nlp_remove(ndlp
->nlp_phba
, ndlp
);
2376 mempool_free(ndlp
, ndlp
->nlp_phba
->nlp_mem_pool
);
2379 struct lpfc_nodelist
*
2380 lpfc_nlp_get(struct lpfc_nodelist
*ndlp
)
2383 kref_get(&ndlp
->kref
);
2388 lpfc_nlp_put(struct lpfc_nodelist
*ndlp
)
2390 return ndlp
? kref_put(&ndlp
->kref
, lpfc_nlp_release
) : 0;