1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
22 #include <linux/blkdev.h>
23 #include <linux/pci.h>
24 #include <linux/slab.h>
25 #include <linux/interrupt.h>
27 #include <scsi/scsi.h>
28 #include <scsi/scsi_device.h>
29 #include <scsi/scsi_host.h>
30 #include <scsi/scsi_transport_fc.h>
31 #include <scsi/fc/fc_fs.h>
33 #include <linux/nvme-fc-driver.h>
38 #include "lpfc_sli4.h"
40 #include "lpfc_disc.h"
42 #include "lpfc_scsi.h"
43 #include "lpfc_nvme.h"
44 #include "lpfc_logmsg.h"
45 #include "lpfc_crtn.h"
46 #include "lpfc_vport.h"
47 #include "lpfc_debugfs.h"
50 /* Called to verify a rcv'ed ADISC was intended for us. */
52 lpfc_check_adisc(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
53 struct lpfc_name
*nn
, struct lpfc_name
*pn
)
55 /* First, we MUST have a RPI registered */
56 if (!(ndlp
->nlp_flag
& NLP_RPI_REGISTERED
))
59 /* Compare the ADISC rsp WWNN / WWPN matches our internal node
60 * table entry for that node.
62 if (memcmp(nn
, &ndlp
->nlp_nodename
, sizeof (struct lpfc_name
)))
65 if (memcmp(pn
, &ndlp
->nlp_portname
, sizeof (struct lpfc_name
)))
68 /* we match, return success */
73 lpfc_check_sparm(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
74 struct serv_parm
*sp
, uint32_t class, int flogi
)
76 volatile struct serv_parm
*hsp
= &vport
->fc_sparam
;
77 uint16_t hsp_value
, ssp_value
= 0;
80 * The receive data field size and buffer-to-buffer receive data field
81 * size entries are 16 bits but are represented as two 8-bit fields in
82 * the driver data structure to account for rsvd bits and other control
83 * bits. Reconstruct and compare the fields as a 16-bit values before
84 * correcting the byte values.
86 if (sp
->cls1
.classValid
) {
88 hsp_value
= ((hsp
->cls1
.rcvDataSizeMsb
<< 8) |
89 hsp
->cls1
.rcvDataSizeLsb
);
90 ssp_value
= ((sp
->cls1
.rcvDataSizeMsb
<< 8) |
91 sp
->cls1
.rcvDataSizeLsb
);
93 goto bad_service_param
;
94 if (ssp_value
> hsp_value
) {
95 sp
->cls1
.rcvDataSizeLsb
=
96 hsp
->cls1
.rcvDataSizeLsb
;
97 sp
->cls1
.rcvDataSizeMsb
=
98 hsp
->cls1
.rcvDataSizeMsb
;
101 } else if (class == CLASS1
)
102 goto bad_service_param
;
103 if (sp
->cls2
.classValid
) {
105 hsp_value
= ((hsp
->cls2
.rcvDataSizeMsb
<< 8) |
106 hsp
->cls2
.rcvDataSizeLsb
);
107 ssp_value
= ((sp
->cls2
.rcvDataSizeMsb
<< 8) |
108 sp
->cls2
.rcvDataSizeLsb
);
110 goto bad_service_param
;
111 if (ssp_value
> hsp_value
) {
112 sp
->cls2
.rcvDataSizeLsb
=
113 hsp
->cls2
.rcvDataSizeLsb
;
114 sp
->cls2
.rcvDataSizeMsb
=
115 hsp
->cls2
.rcvDataSizeMsb
;
118 } else if (class == CLASS2
)
119 goto bad_service_param
;
120 if (sp
->cls3
.classValid
) {
122 hsp_value
= ((hsp
->cls3
.rcvDataSizeMsb
<< 8) |
123 hsp
->cls3
.rcvDataSizeLsb
);
124 ssp_value
= ((sp
->cls3
.rcvDataSizeMsb
<< 8) |
125 sp
->cls3
.rcvDataSizeLsb
);
127 goto bad_service_param
;
128 if (ssp_value
> hsp_value
) {
129 sp
->cls3
.rcvDataSizeLsb
=
130 hsp
->cls3
.rcvDataSizeLsb
;
131 sp
->cls3
.rcvDataSizeMsb
=
132 hsp
->cls3
.rcvDataSizeMsb
;
135 } else if (class == CLASS3
)
136 goto bad_service_param
;
139 * Preserve the upper four bits of the MSB from the PLOGI response.
140 * These bits contain the Buffer-to-Buffer State Change Number
141 * from the target and need to be passed to the FW.
143 hsp_value
= (hsp
->cmn
.bbRcvSizeMsb
<< 8) | hsp
->cmn
.bbRcvSizeLsb
;
144 ssp_value
= (sp
->cmn
.bbRcvSizeMsb
<< 8) | sp
->cmn
.bbRcvSizeLsb
;
145 if (ssp_value
> hsp_value
) {
146 sp
->cmn
.bbRcvSizeLsb
= hsp
->cmn
.bbRcvSizeLsb
;
147 sp
->cmn
.bbRcvSizeMsb
= (sp
->cmn
.bbRcvSizeMsb
& 0xF0) |
148 (hsp
->cmn
.bbRcvSizeMsb
& 0x0F);
151 memcpy(&ndlp
->nlp_nodename
, &sp
->nodeName
, sizeof (struct lpfc_name
));
152 memcpy(&ndlp
->nlp_portname
, &sp
->portName
, sizeof (struct lpfc_name
));
155 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
157 "(%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x) sent "
158 "invalid service parameters. Ignoring device.\n",
160 sp
->nodeName
.u
.wwn
[0], sp
->nodeName
.u
.wwn
[1],
161 sp
->nodeName
.u
.wwn
[2], sp
->nodeName
.u
.wwn
[3],
162 sp
->nodeName
.u
.wwn
[4], sp
->nodeName
.u
.wwn
[5],
163 sp
->nodeName
.u
.wwn
[6], sp
->nodeName
.u
.wwn
[7]);
168 lpfc_check_elscmpl_iocb(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdiocb
,
169 struct lpfc_iocbq
*rspiocb
)
171 struct lpfc_dmabuf
*pcmd
, *prsp
;
176 irsp
= &rspiocb
->iocb
;
177 pcmd
= (struct lpfc_dmabuf
*) cmdiocb
->context2
;
179 /* For lpfc_els_abort, context2 could be zero'ed to delay
180 * freeing associated memory till after ABTS completes.
183 prsp
= list_get_first(&pcmd
->list
, struct lpfc_dmabuf
,
186 lp
= (uint32_t *) prsp
->virt
;
187 ptr
= (void *)((uint8_t *)lp
+ sizeof(uint32_t));
190 /* Force ulpStatus error since we are returning NULL ptr */
191 if (!(irsp
->ulpStatus
)) {
192 irsp
->ulpStatus
= IOSTAT_LOCAL_REJECT
;
193 irsp
->un
.ulpWord
[4] = IOERR_SLI_ABORTED
;
203 * Free resources / clean up outstanding I/Os
204 * associated with a LPFC_NODELIST entry. This
205 * routine effectively results in a "software abort".
208 lpfc_els_abort(struct lpfc_hba
*phba
, struct lpfc_nodelist
*ndlp
)
210 LIST_HEAD(abort_list
);
211 struct lpfc_sli_ring
*pring
;
212 struct lpfc_iocbq
*iocb
, *next_iocb
;
214 pring
= lpfc_phba_elsring(phba
);
216 /* Abort outstanding I/O on NPort <nlp_DID> */
217 lpfc_printf_vlog(ndlp
->vport
, KERN_INFO
, LOG_DISCOVERY
,
218 "2819 Abort outstanding I/O on NPort x%x "
219 "Data: x%x x%x x%x\n",
220 ndlp
->nlp_DID
, ndlp
->nlp_flag
, ndlp
->nlp_state
,
222 /* Clean up all fabric IOs first.*/
223 lpfc_fabric_abort_nport(ndlp
);
226 * Lock the ELS ring txcmplq for SLI3/SLI4 and build a local list
227 * of all ELS IOs that need an ABTS. The IOs need to stay on the
228 * txcmplq so that the abort operation completes them successfully.
230 spin_lock_irq(&phba
->hbalock
);
231 if (phba
->sli_rev
== LPFC_SLI_REV4
)
232 spin_lock(&pring
->ring_lock
);
233 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txcmplq
, list
) {
234 /* Add to abort_list on on NDLP match. */
235 if (lpfc_check_sli_ndlp(phba
, pring
, iocb
, ndlp
))
236 list_add_tail(&iocb
->dlist
, &abort_list
);
238 if (phba
->sli_rev
== LPFC_SLI_REV4
)
239 spin_unlock(&pring
->ring_lock
);
240 spin_unlock_irq(&phba
->hbalock
);
242 /* Abort the targeted IOs and remove them from the abort list. */
243 list_for_each_entry_safe(iocb
, next_iocb
, &abort_list
, dlist
) {
244 spin_lock_irq(&phba
->hbalock
);
245 list_del_init(&iocb
->dlist
);
246 lpfc_sli_issue_abort_iotag(phba
, pring
, iocb
);
247 spin_unlock_irq(&phba
->hbalock
);
250 INIT_LIST_HEAD(&abort_list
);
252 /* Now process the txq */
253 spin_lock_irq(&phba
->hbalock
);
254 if (phba
->sli_rev
== LPFC_SLI_REV4
)
255 spin_lock(&pring
->ring_lock
);
257 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txq
, list
) {
258 /* Check to see if iocb matches the nport we are looking for */
259 if (lpfc_check_sli_ndlp(phba
, pring
, iocb
, ndlp
)) {
260 list_del_init(&iocb
->list
);
261 list_add_tail(&iocb
->list
, &abort_list
);
265 if (phba
->sli_rev
== LPFC_SLI_REV4
)
266 spin_unlock(&pring
->ring_lock
);
267 spin_unlock_irq(&phba
->hbalock
);
269 /* Cancel all the IOCBs from the completions list */
270 lpfc_sli_cancel_iocbs(phba
, &abort_list
,
271 IOSTAT_LOCAL_REJECT
, IOERR_SLI_ABORTED
);
273 lpfc_cancel_retry_delay_tmo(phba
->pport
, ndlp
);
278 lpfc_rcv_plogi(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
279 struct lpfc_iocbq
*cmdiocb
)
281 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
282 struct lpfc_hba
*phba
= vport
->phba
;
283 struct lpfc_dmabuf
*pcmd
;
284 uint64_t nlp_portwwn
= 0;
287 struct serv_parm
*sp
;
293 memset(&stat
, 0, sizeof (struct ls_rjt
));
294 pcmd
= (struct lpfc_dmabuf
*) cmdiocb
->context2
;
295 lp
= (uint32_t *) pcmd
->virt
;
296 sp
= (struct serv_parm
*) ((uint8_t *) lp
+ sizeof (uint32_t));
297 if (wwn_to_u64(sp
->portName
.u
.wwn
) == 0) {
298 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
299 "0140 PLOGI Reject: invalid nname\n");
300 stat
.un
.b
.lsRjtRsnCode
= LSRJT_UNABLE_TPC
;
301 stat
.un
.b
.lsRjtRsnCodeExp
= LSEXP_INVALID_PNAME
;
302 lpfc_els_rsp_reject(vport
, stat
.un
.lsRjtError
, cmdiocb
, ndlp
,
306 if (wwn_to_u64(sp
->nodeName
.u
.wwn
) == 0) {
307 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
308 "0141 PLOGI Reject: invalid pname\n");
309 stat
.un
.b
.lsRjtRsnCode
= LSRJT_UNABLE_TPC
;
310 stat
.un
.b
.lsRjtRsnCodeExp
= LSEXP_INVALID_NNAME
;
311 lpfc_els_rsp_reject(vport
, stat
.un
.lsRjtError
, cmdiocb
, ndlp
,
316 nlp_portwwn
= wwn_to_u64(ndlp
->nlp_portname
.u
.wwn
);
317 if ((lpfc_check_sparm(vport
, ndlp
, sp
, CLASS3
, 0) == 0)) {
318 /* Reject this request because invalid parameters */
319 stat
.un
.b
.lsRjtRsnCode
= LSRJT_UNABLE_TPC
;
320 stat
.un
.b
.lsRjtRsnCodeExp
= LSEXP_SPARM_OPTIONS
;
321 lpfc_els_rsp_reject(vport
, stat
.un
.lsRjtError
, cmdiocb
, ndlp
,
325 icmd
= &cmdiocb
->iocb
;
327 /* PLOGI chkparm OK */
328 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_ELS
,
329 "0114 PLOGI chkparm OK Data: x%x x%x x%x "
331 ndlp
->nlp_DID
, ndlp
->nlp_state
, ndlp
->nlp_flag
,
332 ndlp
->nlp_rpi
, vport
->port_state
,
335 if (vport
->cfg_fcp_class
== 2 && sp
->cls2
.classValid
)
336 ndlp
->nlp_fcp_info
|= CLASS2
;
338 ndlp
->nlp_fcp_info
|= CLASS3
;
340 ndlp
->nlp_class_sup
= 0;
341 if (sp
->cls1
.classValid
)
342 ndlp
->nlp_class_sup
|= FC_COS_CLASS1
;
343 if (sp
->cls2
.classValid
)
344 ndlp
->nlp_class_sup
|= FC_COS_CLASS2
;
345 if (sp
->cls3
.classValid
)
346 ndlp
->nlp_class_sup
|= FC_COS_CLASS3
;
347 if (sp
->cls4
.classValid
)
348 ndlp
->nlp_class_sup
|= FC_COS_CLASS4
;
350 ((sp
->cmn
.bbRcvSizeMsb
& 0x0F) << 8) | sp
->cmn
.bbRcvSizeLsb
;
352 /* if already logged in, do implicit logout */
353 switch (ndlp
->nlp_state
) {
354 case NLP_STE_NPR_NODE
:
355 if (!(ndlp
->nlp_flag
& NLP_NPR_ADISC
))
357 case NLP_STE_REG_LOGIN_ISSUE
:
358 case NLP_STE_PRLI_ISSUE
:
359 case NLP_STE_UNMAPPED_NODE
:
360 case NLP_STE_MAPPED_NODE
:
361 /* lpfc_plogi_confirm_nport skips fabric did, handle it here */
362 if (!(ndlp
->nlp_type
& NLP_FABRIC
)) {
363 lpfc_els_rsp_acc(vport
, ELS_CMD_PLOGI
, cmdiocb
,
367 if (nlp_portwwn
!= 0 &&
368 nlp_portwwn
!= wwn_to_u64(sp
->portName
.u
.wwn
))
369 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
370 "0143 PLOGI recv'd from DID: x%x "
371 "WWPN changed: old %llx new %llx\n",
373 (unsigned long long)nlp_portwwn
,
375 wwn_to_u64(sp
->portName
.u
.wwn
));
377 ndlp
->nlp_prev_state
= ndlp
->nlp_state
;
378 /* rport needs to be unregistered first */
379 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_NPR_NODE
);
383 /* Check for Nport to NPort pt2pt protocol */
384 if ((vport
->fc_flag
& FC_PT2PT
) &&
385 !(vport
->fc_flag
& FC_PT2PT_PLOGI
)) {
386 /* rcv'ed PLOGI decides what our NPortId will be */
387 vport
->fc_myDID
= icmd
->un
.rcvels
.parmRo
;
389 ed_tov
= be32_to_cpu(sp
->cmn
.e_d_tov
);
390 if (sp
->cmn
.edtovResolution
) {
391 /* E_D_TOV ticks are in nanoseconds */
392 ed_tov
= (phba
->fc_edtov
+ 999999) / 1000000;
396 * For pt-to-pt, use the larger EDTOV
399 if (ed_tov
> phba
->fc_edtov
)
400 phba
->fc_edtov
= ed_tov
;
401 phba
->fc_ratov
= (2 * phba
->fc_edtov
) / 1000;
403 memcpy(&phba
->fc_fabparam
, sp
, sizeof(struct serv_parm
));
405 /* Issue config_link / reg_vfi to account for updated TOV's */
407 if (phba
->sli_rev
== LPFC_SLI_REV4
)
408 lpfc_issue_reg_vfi(vport
);
410 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
413 lpfc_config_link(phba
, mbox
);
414 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
416 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
417 if (rc
== MBX_NOT_FINISHED
) {
418 mempool_free(mbox
, phba
->mbox_mem_pool
);
423 lpfc_can_disctmo(vport
);
426 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
430 /* Registering an existing RPI behaves differently for SLI3 vs SLI4 */
431 if (phba
->sli_rev
== LPFC_SLI_REV4
)
432 lpfc_unreg_rpi(vport
, ndlp
);
434 rc
= lpfc_reg_rpi(phba
, vport
->vpi
, icmd
->un
.rcvels
.remoteID
,
435 (uint8_t *) sp
, mbox
, ndlp
->nlp_rpi
);
437 mempool_free(mbox
, phba
->mbox_mem_pool
);
441 /* ACC PLOGI rsp command needs to execute first,
442 * queue this mbox command to be processed later.
444 mbox
->mbox_cmpl
= lpfc_mbx_cmpl_reg_login
;
446 * mbox->context2 = lpfc_nlp_get(ndlp) deferred until mailbox
447 * command issued in lpfc_cmpl_els_acc().
450 spin_lock_irq(shost
->host_lock
);
451 ndlp
->nlp_flag
|= (NLP_ACC_REGLOGIN
| NLP_RCV_PLOGI
);
452 spin_unlock_irq(shost
->host_lock
);
455 * If there is an outstanding PLOGI issued, abort it before
456 * sending ACC rsp for received PLOGI. If pending plogi
457 * is not canceled here, the plogi will be rejected by
458 * remote port and will be retried. On a configuration with
459 * single discovery thread, this will cause a huge delay in
460 * discovery. Also this will cause multiple state machines
461 * running in parallel for this node.
463 if (ndlp
->nlp_state
== NLP_STE_PLOGI_ISSUE
) {
464 /* software abort outstanding PLOGI */
465 lpfc_els_abort(phba
, ndlp
);
468 if ((vport
->port_type
== LPFC_NPIV_PORT
&&
469 vport
->cfg_restrict_login
)) {
471 /* In order to preserve RPIs, we want to cleanup
472 * the default RPI the firmware created to rcv
473 * this ELS request. The only way to do this is
474 * to register, then unregister the RPI.
476 spin_lock_irq(shost
->host_lock
);
477 ndlp
->nlp_flag
|= NLP_RM_DFLT_RPI
;
478 spin_unlock_irq(shost
->host_lock
);
479 stat
.un
.b
.lsRjtRsnCode
= LSRJT_INVALID_CMD
;
480 stat
.un
.b
.lsRjtRsnCodeExp
= LSEXP_NOTHING_MORE
;
481 rc
= lpfc_els_rsp_reject(vport
, stat
.un
.lsRjtError
, cmdiocb
,
484 mempool_free(mbox
, phba
->mbox_mem_pool
);
487 rc
= lpfc_els_rsp_acc(vport
, ELS_CMD_PLOGI
, cmdiocb
, ndlp
, mbox
);
489 mempool_free(mbox
, phba
->mbox_mem_pool
);
492 stat
.un
.b
.lsRjtRsnCode
= LSRJT_UNABLE_TPC
;
493 stat
.un
.b
.lsRjtRsnCodeExp
= LSEXP_OUT_OF_RESOURCE
;
494 lpfc_els_rsp_reject(vport
, stat
.un
.lsRjtError
, cmdiocb
, ndlp
, NULL
);
499 * lpfc_mbx_cmpl_resume_rpi - Resume RPI completion routine
500 * @phba: pointer to lpfc hba data structure.
501 * @mboxq: pointer to mailbox object
503 * This routine is invoked to issue a completion to a rcv'ed
504 * ADISC or PDISC after the paused RPI has been resumed.
507 lpfc_mbx_cmpl_resume_rpi(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
509 struct lpfc_vport
*vport
;
510 struct lpfc_iocbq
*elsiocb
;
511 struct lpfc_nodelist
*ndlp
;
514 elsiocb
= (struct lpfc_iocbq
*)mboxq
->context1
;
515 ndlp
= (struct lpfc_nodelist
*) mboxq
->context2
;
516 vport
= mboxq
->vport
;
517 cmd
= elsiocb
->drvrTimeout
;
519 if (cmd
== ELS_CMD_ADISC
) {
520 lpfc_els_rsp_adisc_acc(vport
, elsiocb
, ndlp
);
522 lpfc_els_rsp_acc(vport
, ELS_CMD_PLOGI
, elsiocb
,
526 mempool_free(mboxq
, phba
->mbox_mem_pool
);
530 lpfc_rcv_padisc(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
531 struct lpfc_iocbq
*cmdiocb
)
533 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
534 struct lpfc_iocbq
*elsiocb
;
535 struct lpfc_dmabuf
*pcmd
;
536 struct serv_parm
*sp
;
537 struct lpfc_name
*pnn
, *ppn
;
544 pcmd
= (struct lpfc_dmabuf
*) cmdiocb
->context2
;
545 lp
= (uint32_t *) pcmd
->virt
;
548 if (cmd
== ELS_CMD_ADISC
) {
550 pnn
= (struct lpfc_name
*) & ap
->nodeName
;
551 ppn
= (struct lpfc_name
*) & ap
->portName
;
553 sp
= (struct serv_parm
*) lp
;
554 pnn
= (struct lpfc_name
*) & sp
->nodeName
;
555 ppn
= (struct lpfc_name
*) & sp
->portName
;
558 icmd
= &cmdiocb
->iocb
;
559 if (icmd
->ulpStatus
== 0 && lpfc_check_adisc(vport
, ndlp
, pnn
, ppn
)) {
562 * As soon as we send ACC, the remote NPort can
563 * start sending us data. Thus, for SLI4 we must
564 * resume the RPI before the ACC goes out.
566 if (vport
->phba
->sli_rev
== LPFC_SLI_REV4
) {
567 elsiocb
= kmalloc(sizeof(struct lpfc_iocbq
),
571 /* Save info from cmd IOCB used in rsp */
572 memcpy((uint8_t *)elsiocb
, (uint8_t *)cmdiocb
,
573 sizeof(struct lpfc_iocbq
));
575 /* Save the ELS cmd */
576 elsiocb
->drvrTimeout
= cmd
;
578 lpfc_sli4_resume_rpi(ndlp
,
579 lpfc_mbx_cmpl_resume_rpi
, elsiocb
);
584 if (cmd
== ELS_CMD_ADISC
) {
585 lpfc_els_rsp_adisc_acc(vport
, cmdiocb
, ndlp
);
587 lpfc_els_rsp_acc(vport
, ELS_CMD_PLOGI
, cmdiocb
,
591 /* If we are authenticated, move to the proper state */
592 if (ndlp
->nlp_type
& NLP_FCP_TARGET
)
593 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_MAPPED_NODE
);
595 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNMAPPED_NODE
);
599 /* Reject this request because invalid parameters */
600 stat
.un
.b
.lsRjtRsvd0
= 0;
601 stat
.un
.b
.lsRjtRsnCode
= LSRJT_UNABLE_TPC
;
602 stat
.un
.b
.lsRjtRsnCodeExp
= LSEXP_SPARM_OPTIONS
;
603 stat
.un
.b
.vendorUnique
= 0;
604 lpfc_els_rsp_reject(vport
, stat
.un
.lsRjtError
, cmdiocb
, ndlp
, NULL
);
607 mod_timer(&ndlp
->nlp_delayfunc
, jiffies
+ msecs_to_jiffies(1000));
609 spin_lock_irq(shost
->host_lock
);
610 ndlp
->nlp_flag
|= NLP_DELAY_TMO
;
611 spin_unlock_irq(shost
->host_lock
);
612 ndlp
->nlp_last_elscmd
= ELS_CMD_PLOGI
;
613 ndlp
->nlp_prev_state
= ndlp
->nlp_state
;
614 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_NPR_NODE
);
619 lpfc_rcv_logo(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
620 struct lpfc_iocbq
*cmdiocb
, uint32_t els_cmd
)
622 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
623 struct lpfc_hba
*phba
= vport
->phba
;
624 struct lpfc_vport
**vports
;
625 int i
, active_vlink_present
= 0 ;
627 /* Put ndlp in NPR state with 1 sec timeout for plogi, ACC logo */
628 /* Only call LOGO ACC for first LOGO, this avoids sending unnecessary
629 * PLOGIs during LOGO storms from a device.
631 spin_lock_irq(shost
->host_lock
);
632 ndlp
->nlp_flag
|= NLP_LOGO_ACC
;
633 spin_unlock_irq(shost
->host_lock
);
634 if (els_cmd
== ELS_CMD_PRLO
)
635 lpfc_els_rsp_acc(vport
, ELS_CMD_PRLO
, cmdiocb
, ndlp
, NULL
);
637 lpfc_els_rsp_acc(vport
, ELS_CMD_ACC
, cmdiocb
, ndlp
, NULL
);
638 if (ndlp
->nlp_DID
== Fabric_DID
) {
639 if (vport
->port_state
<= LPFC_FDISC
)
641 lpfc_linkdown_port(vport
);
642 spin_lock_irq(shost
->host_lock
);
643 vport
->fc_flag
|= FC_VPORT_LOGO_RCVD
;
644 spin_unlock_irq(shost
->host_lock
);
645 vports
= lpfc_create_vport_work_array(phba
);
647 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
;
649 if ((!(vports
[i
]->fc_flag
&
650 FC_VPORT_LOGO_RCVD
)) &&
651 (vports
[i
]->port_state
> LPFC_FDISC
)) {
652 active_vlink_present
= 1;
656 lpfc_destroy_vport_work_array(phba
, vports
);
660 * Don't re-instantiate if vport is marked for deletion.
661 * If we are here first then vport_delete is going to wait
662 * for discovery to complete.
664 if (!(vport
->load_flag
& FC_UNLOADING
) &&
665 active_vlink_present
) {
667 * If there are other active VLinks present,
668 * re-instantiate the Vlink using FDISC.
670 mod_timer(&ndlp
->nlp_delayfunc
,
671 jiffies
+ msecs_to_jiffies(1000));
672 spin_lock_irq(shost
->host_lock
);
673 ndlp
->nlp_flag
|= NLP_DELAY_TMO
;
674 spin_unlock_irq(shost
->host_lock
);
675 ndlp
->nlp_last_elscmd
= ELS_CMD_FDISC
;
676 vport
->port_state
= LPFC_FDISC
;
678 spin_lock_irq(shost
->host_lock
);
679 phba
->pport
->fc_flag
&= ~FC_LOGO_RCVD_DID_CHNG
;
680 spin_unlock_irq(shost
->host_lock
);
681 lpfc_retry_pport_discovery(phba
);
683 } else if ((!(ndlp
->nlp_type
& NLP_FABRIC
) &&
684 ((ndlp
->nlp_type
& NLP_FCP_TARGET
) ||
685 !(ndlp
->nlp_type
& NLP_FCP_INITIATOR
))) ||
686 (ndlp
->nlp_state
== NLP_STE_ADISC_ISSUE
)) {
687 /* Only try to re-login if this is NOT a Fabric Node */
688 mod_timer(&ndlp
->nlp_delayfunc
,
689 jiffies
+ msecs_to_jiffies(1000 * 1));
690 spin_lock_irq(shost
->host_lock
);
691 ndlp
->nlp_flag
|= NLP_DELAY_TMO
;
692 spin_unlock_irq(shost
->host_lock
);
694 ndlp
->nlp_last_elscmd
= ELS_CMD_PLOGI
;
697 ndlp
->nlp_prev_state
= ndlp
->nlp_state
;
698 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_NPR_NODE
);
700 spin_lock_irq(shost
->host_lock
);
701 ndlp
->nlp_flag
&= ~NLP_NPR_ADISC
;
702 spin_unlock_irq(shost
->host_lock
);
703 /* The driver has to wait until the ACC completes before it continues
704 * processing the LOGO. The action will resume in
705 * lpfc_cmpl_els_logo_acc routine. Since part of processing includes an
706 * unreg_login, the driver waits so the ACC does not get aborted.
712 lpfc_rcv_prli(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
713 struct lpfc_iocbq
*cmdiocb
)
715 struct lpfc_hba
*phba
= vport
->phba
;
716 struct lpfc_dmabuf
*pcmd
;
719 struct fc_rport
*rport
= ndlp
->rport
;
722 pcmd
= (struct lpfc_dmabuf
*) cmdiocb
->context2
;
723 lp
= (uint32_t *) pcmd
->virt
;
724 npr
= (PRLI
*) ((uint8_t *) lp
+ sizeof (uint32_t));
726 ndlp
->nlp_type
&= ~(NLP_FCP_TARGET
| NLP_FCP_INITIATOR
);
727 ndlp
->nlp_fcp_info
&= ~NLP_FCP_2_DEVICE
;
728 ndlp
->nlp_flag
&= ~NLP_FIRSTBURST
;
729 if ((npr
->prliType
== PRLI_FCP_TYPE
) ||
730 (npr
->prliType
== PRLI_NVME_TYPE
)) {
731 if (npr
->initiatorFunc
) {
732 if (npr
->prliType
== PRLI_FCP_TYPE
)
733 ndlp
->nlp_type
|= NLP_FCP_INITIATOR
;
734 if (npr
->prliType
== PRLI_NVME_TYPE
)
735 ndlp
->nlp_type
|= NLP_NVME_INITIATOR
;
737 if (npr
->targetFunc
) {
738 if (npr
->prliType
== PRLI_FCP_TYPE
)
739 ndlp
->nlp_type
|= NLP_FCP_TARGET
;
740 if (npr
->prliType
== PRLI_NVME_TYPE
)
741 ndlp
->nlp_type
|= NLP_NVME_TARGET
;
742 if (npr
->writeXferRdyDis
)
743 ndlp
->nlp_flag
|= NLP_FIRSTBURST
;
746 ndlp
->nlp_fcp_info
|= NLP_FCP_2_DEVICE
;
749 /* We need to update the rport role values */
750 roles
= FC_RPORT_ROLE_UNKNOWN
;
751 if (ndlp
->nlp_type
& NLP_FCP_INITIATOR
)
752 roles
|= FC_RPORT_ROLE_FCP_INITIATOR
;
753 if (ndlp
->nlp_type
& NLP_FCP_TARGET
)
754 roles
|= FC_RPORT_ROLE_FCP_TARGET
;
756 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_RPORT
,
757 "rport rolechg: role:x%x did:x%x flg:x%x",
758 roles
, ndlp
->nlp_DID
, ndlp
->nlp_flag
);
760 if (phba
->cfg_enable_fc4_type
!= LPFC_ENABLE_NVME
)
761 fc_remote_port_rolechg(rport
, roles
);
766 lpfc_disc_set_adisc(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
768 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
770 if (!(ndlp
->nlp_flag
& NLP_RPI_REGISTERED
)) {
771 ndlp
->nlp_flag
&= ~NLP_NPR_ADISC
;
775 if (!(vport
->fc_flag
& FC_PT2PT
)) {
776 /* Check config parameter use-adisc or FCP-2 */
777 if ((vport
->cfg_use_adisc
&& (vport
->fc_flag
& FC_RSCN_MODE
)) ||
778 ((ndlp
->nlp_fcp_info
& NLP_FCP_2_DEVICE
) &&
779 (ndlp
->nlp_type
& NLP_FCP_TARGET
))) {
780 spin_lock_irq(shost
->host_lock
);
781 ndlp
->nlp_flag
|= NLP_NPR_ADISC
;
782 spin_unlock_irq(shost
->host_lock
);
786 ndlp
->nlp_flag
&= ~NLP_NPR_ADISC
;
787 lpfc_unreg_rpi(vport
, ndlp
);
792 * lpfc_release_rpi - Release a RPI by issuing unreg_login mailbox cmd.
793 * @phba : Pointer to lpfc_hba structure.
794 * @vport: Pointer to lpfc_vport structure.
795 * @rpi : rpi to be release.
797 * This function will send a unreg_login mailbox command to the firmware
801 lpfc_release_rpi(struct lpfc_hba
*phba
,
802 struct lpfc_vport
*vport
,
808 pmb
= (LPFC_MBOXQ_t
*) mempool_alloc(phba
->mbox_mem_pool
,
811 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
812 "2796 mailbox memory allocation failed \n");
814 lpfc_unreg_login(phba
, vport
->vpi
, rpi
, pmb
);
815 pmb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
816 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_NOWAIT
);
817 if (rc
== MBX_NOT_FINISHED
)
818 mempool_free(pmb
, phba
->mbox_mem_pool
);
823 lpfc_disc_illegal(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
824 void *arg
, uint32_t evt
)
826 struct lpfc_hba
*phba
;
827 LPFC_MBOXQ_t
*pmb
= (LPFC_MBOXQ_t
*) arg
;
831 /* Release the RPI if reglogin completing */
832 if (!(phba
->pport
->load_flag
& FC_UNLOADING
) &&
833 (evt
== NLP_EVT_CMPL_REG_LOGIN
) &&
834 (!pmb
->u
.mb
.mbxStatus
)) {
835 rpi
= pmb
->u
.mb
.un
.varWords
[0];
836 lpfc_release_rpi(phba
, vport
, rpi
);
838 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
839 "0271 Illegal State Transition: node x%x "
840 "event x%x, state x%x Data: x%x x%x\n",
841 ndlp
->nlp_DID
, evt
, ndlp
->nlp_state
, ndlp
->nlp_rpi
,
843 return ndlp
->nlp_state
;
847 lpfc_cmpl_plogi_illegal(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
848 void *arg
, uint32_t evt
)
850 /* This transition is only legal if we previously
851 * rcv'ed a PLOGI. Since we don't want 2 discovery threads
852 * working on the same NPortID, do nothing for this thread
855 if (!(ndlp
->nlp_flag
& NLP_RCV_PLOGI
)) {
856 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
857 "0272 Illegal State Transition: node x%x "
858 "event x%x, state x%x Data: x%x x%x\n",
859 ndlp
->nlp_DID
, evt
, ndlp
->nlp_state
, ndlp
->nlp_rpi
,
862 return ndlp
->nlp_state
;
865 /* Start of Discovery State Machine routines */
868 lpfc_rcv_plogi_unused_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
869 void *arg
, uint32_t evt
)
871 struct lpfc_iocbq
*cmdiocb
;
873 cmdiocb
= (struct lpfc_iocbq
*) arg
;
875 if (lpfc_rcv_plogi(vport
, ndlp
, cmdiocb
)) {
876 return ndlp
->nlp_state
;
878 return NLP_STE_FREED_NODE
;
882 lpfc_rcv_els_unused_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
883 void *arg
, uint32_t evt
)
885 lpfc_issue_els_logo(vport
, ndlp
, 0);
886 return ndlp
->nlp_state
;
890 lpfc_rcv_logo_unused_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
891 void *arg
, uint32_t evt
)
893 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
894 struct lpfc_iocbq
*cmdiocb
= (struct lpfc_iocbq
*) arg
;
896 spin_lock_irq(shost
->host_lock
);
897 ndlp
->nlp_flag
|= NLP_LOGO_ACC
;
898 spin_unlock_irq(shost
->host_lock
);
899 lpfc_els_rsp_acc(vport
, ELS_CMD_ACC
, cmdiocb
, ndlp
, NULL
);
901 return ndlp
->nlp_state
;
905 lpfc_cmpl_logo_unused_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
906 void *arg
, uint32_t evt
)
908 return NLP_STE_FREED_NODE
;
912 lpfc_device_rm_unused_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
913 void *arg
, uint32_t evt
)
915 return NLP_STE_FREED_NODE
;
919 lpfc_device_recov_unused_node(struct lpfc_vport
*vport
,
920 struct lpfc_nodelist
*ndlp
,
921 void *arg
, uint32_t evt
)
923 return ndlp
->nlp_state
;
927 lpfc_rcv_plogi_plogi_issue(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
928 void *arg
, uint32_t evt
)
930 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
931 struct lpfc_hba
*phba
= vport
->phba
;
932 struct lpfc_iocbq
*cmdiocb
= arg
;
933 struct lpfc_dmabuf
*pcmd
= (struct lpfc_dmabuf
*) cmdiocb
->context2
;
934 uint32_t *lp
= (uint32_t *) pcmd
->virt
;
935 struct serv_parm
*sp
= (struct serv_parm
*) (lp
+ 1);
939 memset(&stat
, 0, sizeof (struct ls_rjt
));
941 /* For a PLOGI, we only accept if our portname is less
942 * than the remote portname.
944 phba
->fc_stat
.elsLogiCol
++;
945 port_cmp
= memcmp(&vport
->fc_portname
, &sp
->portName
,
946 sizeof(struct lpfc_name
));
949 /* Reject this request because the remote node will accept
951 stat
.un
.b
.lsRjtRsnCode
= LSRJT_UNABLE_TPC
;
952 stat
.un
.b
.lsRjtRsnCodeExp
= LSEXP_CMD_IN_PROGRESS
;
953 lpfc_els_rsp_reject(vport
, stat
.un
.lsRjtError
, cmdiocb
, ndlp
,
956 if (lpfc_rcv_plogi(vport
, ndlp
, cmdiocb
) &&
957 (ndlp
->nlp_flag
& NLP_NPR_2B_DISC
) &&
958 (vport
->num_disc_nodes
)) {
959 spin_lock_irq(shost
->host_lock
);
960 ndlp
->nlp_flag
&= ~NLP_NPR_2B_DISC
;
961 spin_unlock_irq(shost
->host_lock
);
962 /* Check if there are more PLOGIs to be sent */
963 lpfc_more_plogi(vport
);
964 if (vport
->num_disc_nodes
== 0) {
965 spin_lock_irq(shost
->host_lock
);
966 vport
->fc_flag
&= ~FC_NDISC_ACTIVE
;
967 spin_unlock_irq(shost
->host_lock
);
968 lpfc_can_disctmo(vport
);
969 lpfc_end_rscn(vport
);
972 } /* If our portname was less */
974 return ndlp
->nlp_state
;
978 lpfc_rcv_prli_plogi_issue(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
979 void *arg
, uint32_t evt
)
981 struct lpfc_iocbq
*cmdiocb
= (struct lpfc_iocbq
*) arg
;
984 memset(&stat
, 0, sizeof (struct ls_rjt
));
985 stat
.un
.b
.lsRjtRsnCode
= LSRJT_LOGICAL_BSY
;
986 stat
.un
.b
.lsRjtRsnCodeExp
= LSEXP_NOTHING_MORE
;
987 lpfc_els_rsp_reject(vport
, stat
.un
.lsRjtError
, cmdiocb
, ndlp
, NULL
);
988 return ndlp
->nlp_state
;
992 lpfc_rcv_logo_plogi_issue(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
993 void *arg
, uint32_t evt
)
995 struct lpfc_iocbq
*cmdiocb
= (struct lpfc_iocbq
*) arg
;
997 /* software abort outstanding PLOGI */
998 lpfc_els_abort(vport
->phba
, ndlp
);
1000 lpfc_rcv_logo(vport
, ndlp
, cmdiocb
, ELS_CMD_LOGO
);
1001 return ndlp
->nlp_state
;
1005 lpfc_rcv_els_plogi_issue(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
1006 void *arg
, uint32_t evt
)
1008 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
1009 struct lpfc_hba
*phba
= vport
->phba
;
1010 struct lpfc_iocbq
*cmdiocb
= (struct lpfc_iocbq
*) arg
;
1012 /* software abort outstanding PLOGI */
1013 lpfc_els_abort(phba
, ndlp
);
1015 if (evt
== NLP_EVT_RCV_LOGO
) {
1016 lpfc_els_rsp_acc(vport
, ELS_CMD_ACC
, cmdiocb
, ndlp
, NULL
);
1018 lpfc_issue_els_logo(vport
, ndlp
, 0);
1021 /* Put ndlp in npr state set plogi timer for 1 sec */
1022 mod_timer(&ndlp
->nlp_delayfunc
, jiffies
+ msecs_to_jiffies(1000 * 1));
1023 spin_lock_irq(shost
->host_lock
);
1024 ndlp
->nlp_flag
|= NLP_DELAY_TMO
;
1025 spin_unlock_irq(shost
->host_lock
);
1026 ndlp
->nlp_last_elscmd
= ELS_CMD_PLOGI
;
1027 ndlp
->nlp_prev_state
= NLP_STE_PLOGI_ISSUE
;
1028 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_NPR_NODE
);
1030 return ndlp
->nlp_state
;
1034 lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport
*vport
,
1035 struct lpfc_nodelist
*ndlp
,
1039 struct lpfc_hba
*phba
= vport
->phba
;
1040 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
1041 struct lpfc_iocbq
*cmdiocb
, *rspiocb
;
1042 struct lpfc_dmabuf
*pcmd
, *prsp
, *mp
;
1045 struct serv_parm
*sp
;
1050 cmdiocb
= (struct lpfc_iocbq
*) arg
;
1051 rspiocb
= cmdiocb
->context_un
.rsp_iocb
;
1053 if (ndlp
->nlp_flag
& NLP_ACC_REGLOGIN
) {
1054 /* Recovery from PLOGI collision logic */
1055 return ndlp
->nlp_state
;
1058 irsp
= &rspiocb
->iocb
;
1060 if (irsp
->ulpStatus
)
1063 pcmd
= (struct lpfc_dmabuf
*) cmdiocb
->context2
;
1065 prsp
= list_get_first(&pcmd
->list
, struct lpfc_dmabuf
, list
);
1069 lp
= (uint32_t *) prsp
->virt
;
1070 sp
= (struct serv_parm
*) ((uint8_t *) lp
+ sizeof (uint32_t));
1072 /* Some switches have FDMI servers returning 0 for WWN */
1073 if ((ndlp
->nlp_DID
!= FDMI_DID
) &&
1074 (wwn_to_u64(sp
->portName
.u
.wwn
) == 0 ||
1075 wwn_to_u64(sp
->nodeName
.u
.wwn
) == 0)) {
1076 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
1077 "0142 PLOGI RSP: Invalid WWN.\n");
1080 if (!lpfc_check_sparm(vport
, ndlp
, sp
, CLASS3
, 0))
1082 /* PLOGI chkparm OK */
1083 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_ELS
,
1084 "0121 PLOGI chkparm OK Data: x%x x%x x%x x%x\n",
1085 ndlp
->nlp_DID
, ndlp
->nlp_state
,
1086 ndlp
->nlp_flag
, ndlp
->nlp_rpi
);
1087 if (vport
->cfg_fcp_class
== 2 && (sp
->cls2
.classValid
))
1088 ndlp
->nlp_fcp_info
|= CLASS2
;
1090 ndlp
->nlp_fcp_info
|= CLASS3
;
1092 ndlp
->nlp_class_sup
= 0;
1093 if (sp
->cls1
.classValid
)
1094 ndlp
->nlp_class_sup
|= FC_COS_CLASS1
;
1095 if (sp
->cls2
.classValid
)
1096 ndlp
->nlp_class_sup
|= FC_COS_CLASS2
;
1097 if (sp
->cls3
.classValid
)
1098 ndlp
->nlp_class_sup
|= FC_COS_CLASS3
;
1099 if (sp
->cls4
.classValid
)
1100 ndlp
->nlp_class_sup
|= FC_COS_CLASS4
;
1101 ndlp
->nlp_maxframe
=
1102 ((sp
->cmn
.bbRcvSizeMsb
& 0x0F) << 8) | sp
->cmn
.bbRcvSizeLsb
;
1104 if ((vport
->fc_flag
& FC_PT2PT
) &&
1105 (vport
->fc_flag
& FC_PT2PT_PLOGI
)) {
1106 ed_tov
= be32_to_cpu(sp
->cmn
.e_d_tov
);
1107 if (sp
->cmn
.edtovResolution
) {
1108 /* E_D_TOV ticks are in nanoseconds */
1109 ed_tov
= (phba
->fc_edtov
+ 999999) / 1000000;
1113 * Use the larger EDTOV
1114 * RATOV = 2 * EDTOV for pt-to-pt
1116 if (ed_tov
> phba
->fc_edtov
)
1117 phba
->fc_edtov
= ed_tov
;
1118 phba
->fc_ratov
= (2 * phba
->fc_edtov
) / 1000;
1120 memcpy(&phba
->fc_fabparam
, sp
, sizeof(struct serv_parm
));
1122 /* Issue config_link / reg_vfi to account for updated TOV's */
1123 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
1124 lpfc_issue_reg_vfi(vport
);
1126 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
1128 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
1129 "0133 PLOGI: no memory "
1131 "Data: x%x x%x x%x x%x\n",
1132 ndlp
->nlp_DID
, ndlp
->nlp_state
,
1133 ndlp
->nlp_flag
, ndlp
->nlp_rpi
);
1137 lpfc_config_link(phba
, mbox
);
1139 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
1140 mbox
->vport
= vport
;
1141 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
1142 if (rc
== MBX_NOT_FINISHED
) {
1143 mempool_free(mbox
, phba
->mbox_mem_pool
);
1149 lpfc_unreg_rpi(vport
, ndlp
);
1151 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
1153 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
1154 "0018 PLOGI: no memory for reg_login "
1155 "Data: x%x x%x x%x x%x\n",
1156 ndlp
->nlp_DID
, ndlp
->nlp_state
,
1157 ndlp
->nlp_flag
, ndlp
->nlp_rpi
);
1161 if (lpfc_reg_rpi(phba
, vport
->vpi
, irsp
->un
.elsreq64
.remoteID
,
1162 (uint8_t *) sp
, mbox
, ndlp
->nlp_rpi
) == 0) {
1163 switch (ndlp
->nlp_DID
) {
1164 case NameServer_DID
:
1165 mbox
->mbox_cmpl
= lpfc_mbx_cmpl_ns_reg_login
;
1168 mbox
->mbox_cmpl
= lpfc_mbx_cmpl_fdmi_reg_login
;
1171 ndlp
->nlp_flag
|= NLP_REG_LOGIN_SEND
;
1172 mbox
->mbox_cmpl
= lpfc_mbx_cmpl_reg_login
;
1174 mbox
->context2
= lpfc_nlp_get(ndlp
);
1175 mbox
->vport
= vport
;
1176 if (lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
)
1177 != MBX_NOT_FINISHED
) {
1178 lpfc_nlp_set_state(vport
, ndlp
,
1179 NLP_STE_REG_LOGIN_ISSUE
);
1180 return ndlp
->nlp_state
;
1182 if (ndlp
->nlp_flag
& NLP_REG_LOGIN_SEND
)
1183 ndlp
->nlp_flag
&= ~NLP_REG_LOGIN_SEND
;
1184 /* decrement node reference count to the failed mbox
1188 mp
= (struct lpfc_dmabuf
*) mbox
->context1
;
1189 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
1191 mempool_free(mbox
, phba
->mbox_mem_pool
);
1193 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
1194 "0134 PLOGI: cannot issue reg_login "
1195 "Data: x%x x%x x%x x%x\n",
1196 ndlp
->nlp_DID
, ndlp
->nlp_state
,
1197 ndlp
->nlp_flag
, ndlp
->nlp_rpi
);
1199 mempool_free(mbox
, phba
->mbox_mem_pool
);
1201 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
1202 "0135 PLOGI: cannot format reg_login "
1203 "Data: x%x x%x x%x x%x\n",
1204 ndlp
->nlp_DID
, ndlp
->nlp_state
,
1205 ndlp
->nlp_flag
, ndlp
->nlp_rpi
);
1210 if (ndlp
->nlp_DID
== NameServer_DID
) {
1211 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
1212 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
1213 "0261 Cannot Register NameServer login\n");
1217 ** In case the node reference counter does not go to zero, ensure that
1218 ** the stale state for the node is not processed.
1221 ndlp
->nlp_prev_state
= ndlp
->nlp_state
;
1222 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_NPR_NODE
);
1223 spin_lock_irq(shost
->host_lock
);
1224 ndlp
->nlp_flag
|= NLP_DEFER_RM
;
1225 spin_unlock_irq(shost
->host_lock
);
1226 return NLP_STE_FREED_NODE
;
1230 lpfc_cmpl_logo_plogi_issue(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
1231 void *arg
, uint32_t evt
)
1233 return ndlp
->nlp_state
;
1237 lpfc_cmpl_reglogin_plogi_issue(struct lpfc_vport
*vport
,
1238 struct lpfc_nodelist
*ndlp
, void *arg
, uint32_t evt
)
1240 struct lpfc_hba
*phba
;
1241 LPFC_MBOXQ_t
*pmb
= (LPFC_MBOXQ_t
*) arg
;
1242 MAILBOX_t
*mb
= &pmb
->u
.mb
;
1246 /* Release the RPI */
1247 if (!(phba
->pport
->load_flag
& FC_UNLOADING
) &&
1249 rpi
= pmb
->u
.mb
.un
.varWords
[0];
1250 lpfc_release_rpi(phba
, vport
, rpi
);
1252 return ndlp
->nlp_state
;
1256 lpfc_device_rm_plogi_issue(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
1257 void *arg
, uint32_t evt
)
1259 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
1261 if (ndlp
->nlp_flag
& NLP_NPR_2B_DISC
) {
1262 spin_lock_irq(shost
->host_lock
);
1263 ndlp
->nlp_flag
|= NLP_NODEV_REMOVE
;
1264 spin_unlock_irq(shost
->host_lock
);
1265 return ndlp
->nlp_state
;
1267 /* software abort outstanding PLOGI */
1268 lpfc_els_abort(vport
->phba
, ndlp
);
1270 lpfc_drop_node(vport
, ndlp
);
1271 return NLP_STE_FREED_NODE
;
1276 lpfc_device_recov_plogi_issue(struct lpfc_vport
*vport
,
1277 struct lpfc_nodelist
*ndlp
,
1281 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
1282 struct lpfc_hba
*phba
= vport
->phba
;
1284 /* Don't do anything that will mess up processing of the
1287 if (vport
->fc_flag
& FC_RSCN_DEFERRED
)
1288 return ndlp
->nlp_state
;
1290 /* software abort outstanding PLOGI */
1291 lpfc_els_abort(phba
, ndlp
);
1293 ndlp
->nlp_prev_state
= NLP_STE_PLOGI_ISSUE
;
1294 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_NPR_NODE
);
1295 spin_lock_irq(shost
->host_lock
);
1296 ndlp
->nlp_flag
&= ~(NLP_NODEV_REMOVE
| NLP_NPR_2B_DISC
);
1297 spin_unlock_irq(shost
->host_lock
);
1299 return ndlp
->nlp_state
;
1303 lpfc_rcv_plogi_adisc_issue(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
1304 void *arg
, uint32_t evt
)
1306 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
1307 struct lpfc_hba
*phba
= vport
->phba
;
1308 struct lpfc_iocbq
*cmdiocb
;
1310 /* software abort outstanding ADISC */
1311 lpfc_els_abort(phba
, ndlp
);
1313 cmdiocb
= (struct lpfc_iocbq
*) arg
;
1315 if (lpfc_rcv_plogi(vport
, ndlp
, cmdiocb
)) {
1316 if (ndlp
->nlp_flag
& NLP_NPR_2B_DISC
) {
1317 spin_lock_irq(shost
->host_lock
);
1318 ndlp
->nlp_flag
&= ~NLP_NPR_2B_DISC
;
1319 spin_unlock_irq(shost
->host_lock
);
1320 if (vport
->num_disc_nodes
)
1321 lpfc_more_adisc(vport
);
1323 return ndlp
->nlp_state
;
1325 ndlp
->nlp_prev_state
= NLP_STE_ADISC_ISSUE
;
1326 lpfc_issue_els_plogi(vport
, ndlp
->nlp_DID
, 0);
1327 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_PLOGI_ISSUE
);
1329 return ndlp
->nlp_state
;
1333 lpfc_rcv_prli_adisc_issue(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
1334 void *arg
, uint32_t evt
)
1336 struct lpfc_iocbq
*cmdiocb
= (struct lpfc_iocbq
*) arg
;
1338 lpfc_els_rsp_prli_acc(vport
, cmdiocb
, ndlp
);
1339 return ndlp
->nlp_state
;
1343 lpfc_rcv_logo_adisc_issue(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
1344 void *arg
, uint32_t evt
)
1346 struct lpfc_hba
*phba
= vport
->phba
;
1347 struct lpfc_iocbq
*cmdiocb
;
1349 cmdiocb
= (struct lpfc_iocbq
*) arg
;
1351 /* software abort outstanding ADISC */
1352 lpfc_els_abort(phba
, ndlp
);
1354 lpfc_rcv_logo(vport
, ndlp
, cmdiocb
, ELS_CMD_LOGO
);
1355 return ndlp
->nlp_state
;
1359 lpfc_rcv_padisc_adisc_issue(struct lpfc_vport
*vport
,
1360 struct lpfc_nodelist
*ndlp
,
1361 void *arg
, uint32_t evt
)
1363 struct lpfc_iocbq
*cmdiocb
;
1365 cmdiocb
= (struct lpfc_iocbq
*) arg
;
1367 lpfc_rcv_padisc(vport
, ndlp
, cmdiocb
);
1368 return ndlp
->nlp_state
;
1372 lpfc_rcv_prlo_adisc_issue(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
1373 void *arg
, uint32_t evt
)
1375 struct lpfc_iocbq
*cmdiocb
;
1377 cmdiocb
= (struct lpfc_iocbq
*) arg
;
1379 /* Treat like rcv logo */
1380 lpfc_rcv_logo(vport
, ndlp
, cmdiocb
, ELS_CMD_PRLO
);
1381 return ndlp
->nlp_state
;
1385 lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport
*vport
,
1386 struct lpfc_nodelist
*ndlp
,
1387 void *arg
, uint32_t evt
)
1389 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
1390 struct lpfc_hba
*phba
= vport
->phba
;
1391 struct lpfc_iocbq
*cmdiocb
, *rspiocb
;
1396 cmdiocb
= (struct lpfc_iocbq
*) arg
;
1397 rspiocb
= cmdiocb
->context_un
.rsp_iocb
;
1399 ap
= (ADISC
*)lpfc_check_elscmpl_iocb(phba
, cmdiocb
, rspiocb
);
1400 irsp
= &rspiocb
->iocb
;
1402 if ((irsp
->ulpStatus
) ||
1403 (!lpfc_check_adisc(vport
, ndlp
, &ap
->nodeName
, &ap
->portName
))) {
1405 mod_timer(&ndlp
->nlp_delayfunc
,
1406 jiffies
+ msecs_to_jiffies(1000));
1407 spin_lock_irq(shost
->host_lock
);
1408 ndlp
->nlp_flag
|= NLP_DELAY_TMO
;
1409 spin_unlock_irq(shost
->host_lock
);
1410 ndlp
->nlp_last_elscmd
= ELS_CMD_PLOGI
;
1412 memset(&ndlp
->nlp_nodename
, 0, sizeof(struct lpfc_name
));
1413 memset(&ndlp
->nlp_portname
, 0, sizeof(struct lpfc_name
));
1415 ndlp
->nlp_prev_state
= NLP_STE_ADISC_ISSUE
;
1416 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_NPR_NODE
);
1417 lpfc_unreg_rpi(vport
, ndlp
);
1418 return ndlp
->nlp_state
;
1421 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
1422 rc
= lpfc_sli4_resume_rpi(ndlp
, NULL
, NULL
);
1424 /* Stay in state and retry. */
1425 ndlp
->nlp_prev_state
= NLP_STE_ADISC_ISSUE
;
1426 return ndlp
->nlp_state
;
1430 if (ndlp
->nlp_type
& NLP_FCP_TARGET
) {
1431 ndlp
->nlp_prev_state
= NLP_STE_ADISC_ISSUE
;
1432 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_MAPPED_NODE
);
1434 ndlp
->nlp_prev_state
= NLP_STE_ADISC_ISSUE
;
1435 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNMAPPED_NODE
);
1438 return ndlp
->nlp_state
;
1442 lpfc_device_rm_adisc_issue(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
1443 void *arg
, uint32_t evt
)
1445 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
1447 if (ndlp
->nlp_flag
& NLP_NPR_2B_DISC
) {
1448 spin_lock_irq(shost
->host_lock
);
1449 ndlp
->nlp_flag
|= NLP_NODEV_REMOVE
;
1450 spin_unlock_irq(shost
->host_lock
);
1451 return ndlp
->nlp_state
;
1453 /* software abort outstanding ADISC */
1454 lpfc_els_abort(vport
->phba
, ndlp
);
1456 lpfc_drop_node(vport
, ndlp
);
1457 return NLP_STE_FREED_NODE
;
1462 lpfc_device_recov_adisc_issue(struct lpfc_vport
*vport
,
1463 struct lpfc_nodelist
*ndlp
,
1467 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
1468 struct lpfc_hba
*phba
= vport
->phba
;
1470 /* Don't do anything that will mess up processing of the
1473 if (vport
->fc_flag
& FC_RSCN_DEFERRED
)
1474 return ndlp
->nlp_state
;
1476 /* software abort outstanding ADISC */
1477 lpfc_els_abort(phba
, ndlp
);
1479 ndlp
->nlp_prev_state
= NLP_STE_ADISC_ISSUE
;
1480 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_NPR_NODE
);
1481 spin_lock_irq(shost
->host_lock
);
1482 ndlp
->nlp_flag
&= ~(NLP_NODEV_REMOVE
| NLP_NPR_2B_DISC
);
1483 spin_unlock_irq(shost
->host_lock
);
1484 lpfc_disc_set_adisc(vport
, ndlp
);
1485 return ndlp
->nlp_state
;
1489 lpfc_rcv_plogi_reglogin_issue(struct lpfc_vport
*vport
,
1490 struct lpfc_nodelist
*ndlp
,
1494 struct lpfc_iocbq
*cmdiocb
= (struct lpfc_iocbq
*) arg
;
1496 lpfc_rcv_plogi(vport
, ndlp
, cmdiocb
);
1497 return ndlp
->nlp_state
;
1501 lpfc_rcv_prli_reglogin_issue(struct lpfc_vport
*vport
,
1502 struct lpfc_nodelist
*ndlp
,
1506 struct lpfc_iocbq
*cmdiocb
= (struct lpfc_iocbq
*) arg
;
1508 /* Initiator mode. */
1509 lpfc_els_rsp_prli_acc(vport
, cmdiocb
, ndlp
);
1511 return ndlp
->nlp_state
;
1515 lpfc_rcv_logo_reglogin_issue(struct lpfc_vport
*vport
,
1516 struct lpfc_nodelist
*ndlp
,
1520 struct lpfc_hba
*phba
= vport
->phba
;
1521 struct lpfc_iocbq
*cmdiocb
= (struct lpfc_iocbq
*) arg
;
1523 LPFC_MBOXQ_t
*nextmb
;
1524 struct lpfc_dmabuf
*mp
;
1526 cmdiocb
= (struct lpfc_iocbq
*) arg
;
1528 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
1529 if ((mb
= phba
->sli
.mbox_active
)) {
1530 if ((mb
->u
.mb
.mbxCommand
== MBX_REG_LOGIN64
) &&
1531 (ndlp
== (struct lpfc_nodelist
*) mb
->context2
)) {
1532 ndlp
->nlp_flag
&= ~NLP_REG_LOGIN_SEND
;
1534 mb
->context2
= NULL
;
1535 mb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
1539 spin_lock_irq(&phba
->hbalock
);
1540 list_for_each_entry_safe(mb
, nextmb
, &phba
->sli
.mboxq
, list
) {
1541 if ((mb
->u
.mb
.mbxCommand
== MBX_REG_LOGIN64
) &&
1542 (ndlp
== (struct lpfc_nodelist
*) mb
->context2
)) {
1543 mp
= (struct lpfc_dmabuf
*) (mb
->context1
);
1545 __lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
1548 ndlp
->nlp_flag
&= ~NLP_REG_LOGIN_SEND
;
1550 list_del(&mb
->list
);
1551 phba
->sli
.mboxq_cnt
--;
1552 mempool_free(mb
, phba
->mbox_mem_pool
);
1555 spin_unlock_irq(&phba
->hbalock
);
1557 lpfc_rcv_logo(vport
, ndlp
, cmdiocb
, ELS_CMD_LOGO
);
1558 return ndlp
->nlp_state
;
1562 lpfc_rcv_padisc_reglogin_issue(struct lpfc_vport
*vport
,
1563 struct lpfc_nodelist
*ndlp
,
1567 struct lpfc_iocbq
*cmdiocb
= (struct lpfc_iocbq
*) arg
;
1569 lpfc_rcv_padisc(vport
, ndlp
, cmdiocb
);
1570 return ndlp
->nlp_state
;
1574 lpfc_rcv_prlo_reglogin_issue(struct lpfc_vport
*vport
,
1575 struct lpfc_nodelist
*ndlp
,
1579 struct lpfc_iocbq
*cmdiocb
;
1581 cmdiocb
= (struct lpfc_iocbq
*) arg
;
1582 lpfc_els_rsp_acc(vport
, ELS_CMD_PRLO
, cmdiocb
, ndlp
, NULL
);
1583 return ndlp
->nlp_state
;
1587 lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport
*vport
,
1588 struct lpfc_nodelist
*ndlp
,
1592 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
1593 struct lpfc_hba
*phba
= vport
->phba
;
1594 LPFC_MBOXQ_t
*pmb
= (LPFC_MBOXQ_t
*) arg
;
1595 MAILBOX_t
*mb
= &pmb
->u
.mb
;
1596 uint32_t did
= mb
->un
.varWords
[1];
1599 if (mb
->mbxStatus
) {
1600 /* RegLogin failed */
1601 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
1602 "0246 RegLogin failed Data: x%x x%x x%x x%x "
1604 did
, mb
->mbxStatus
, vport
->port_state
,
1605 mb
->un
.varRegLogin
.vpi
,
1606 mb
->un
.varRegLogin
.rpi
);
1608 * If RegLogin failed due to lack of HBA resources do not
1611 if (mb
->mbxStatus
== MBXERR_RPI_FULL
) {
1612 ndlp
->nlp_prev_state
= NLP_STE_REG_LOGIN_ISSUE
;
1613 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_NPR_NODE
);
1614 return ndlp
->nlp_state
;
1617 /* Put ndlp in npr state set plogi timer for 1 sec */
1618 mod_timer(&ndlp
->nlp_delayfunc
,
1619 jiffies
+ msecs_to_jiffies(1000 * 1));
1620 spin_lock_irq(shost
->host_lock
);
1621 ndlp
->nlp_flag
|= NLP_DELAY_TMO
;
1622 spin_unlock_irq(shost
->host_lock
);
1623 ndlp
->nlp_last_elscmd
= ELS_CMD_PLOGI
;
1625 lpfc_issue_els_logo(vport
, ndlp
, 0);
1626 ndlp
->nlp_prev_state
= NLP_STE_REG_LOGIN_ISSUE
;
1627 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_NPR_NODE
);
1628 return ndlp
->nlp_state
;
1631 /* SLI4 ports have preallocated logical rpis. */
1632 if (phba
->sli_rev
< LPFC_SLI_REV4
)
1633 ndlp
->nlp_rpi
= mb
->un
.varWords
[0];
1635 ndlp
->nlp_flag
|= NLP_RPI_REGISTERED
;
1637 /* Only if we are not a fabric nport do we issue PRLI */
1638 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
1639 "3066 RegLogin Complete on x%x x%x x%x\n",
1640 did
, ndlp
->nlp_type
, ndlp
->nlp_fc4_type
);
1641 if (!(ndlp
->nlp_type
& NLP_FABRIC
) &&
1642 (phba
->nvmet_support
== 0)) {
1643 /* The driver supports FCP and NVME concurrently. If the
1644 * ndlp's nlp_fc4_type is still zero, the driver doesn't
1645 * know what PRLI to send yet. Figure that out now and
1646 * call PRLI depending on the outcome.
1648 if (vport
->fc_flag
& FC_PT2PT
) {
1649 /* If we are pt2pt, there is no Fabric to determine
1650 * the FC4 type of the remote nport. So if NVME
1651 * is configured try it.
1653 ndlp
->nlp_fc4_type
|= NLP_FC4_FCP
;
1654 if ((phba
->cfg_enable_fc4_type
== LPFC_ENABLE_BOTH
) ||
1655 (phba
->cfg_enable_fc4_type
== LPFC_ENABLE_NVME
)) {
1656 ndlp
->nlp_fc4_type
|= NLP_FC4_NVME
;
1657 /* We need to update the localport also */
1658 lpfc_nvme_update_localport(vport
);
1661 } else if (ndlp
->nlp_fc4_type
== 0) {
1662 rc
= lpfc_ns_cmd(vport
, SLI_CTNS_GFT_ID
,
1664 return ndlp
->nlp_state
;
1667 ndlp
->nlp_prev_state
= NLP_STE_REG_LOGIN_ISSUE
;
1668 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_PRLI_ISSUE
);
1669 lpfc_issue_els_prli(vport
, ndlp
, 0);
1671 /* Only Fabric ports should transition */
1672 if (ndlp
->nlp_type
& NLP_FABRIC
) {
1673 ndlp
->nlp_prev_state
= NLP_STE_REG_LOGIN_ISSUE
;
1674 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNMAPPED_NODE
);
1677 return ndlp
->nlp_state
;
1681 lpfc_device_rm_reglogin_issue(struct lpfc_vport
*vport
,
1682 struct lpfc_nodelist
*ndlp
,
1686 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
1688 if (ndlp
->nlp_flag
& NLP_NPR_2B_DISC
) {
1689 spin_lock_irq(shost
->host_lock
);
1690 ndlp
->nlp_flag
|= NLP_NODEV_REMOVE
;
1691 spin_unlock_irq(shost
->host_lock
);
1692 return ndlp
->nlp_state
;
1694 lpfc_drop_node(vport
, ndlp
);
1695 return NLP_STE_FREED_NODE
;
1700 lpfc_device_recov_reglogin_issue(struct lpfc_vport
*vport
,
1701 struct lpfc_nodelist
*ndlp
,
1705 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
1707 /* Don't do anything that will mess up processing of the
1710 if (vport
->fc_flag
& FC_RSCN_DEFERRED
)
1711 return ndlp
->nlp_state
;
1713 ndlp
->nlp_prev_state
= NLP_STE_REG_LOGIN_ISSUE
;
1714 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_NPR_NODE
);
1715 spin_lock_irq(shost
->host_lock
);
1717 ndlp
->nlp_flag
&= ~(NLP_NODEV_REMOVE
| NLP_NPR_2B_DISC
);
1718 spin_unlock_irq(shost
->host_lock
);
1719 lpfc_disc_set_adisc(vport
, ndlp
);
1720 return ndlp
->nlp_state
;
1724 lpfc_rcv_plogi_prli_issue(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
1725 void *arg
, uint32_t evt
)
1727 struct lpfc_iocbq
*cmdiocb
;
1729 cmdiocb
= (struct lpfc_iocbq
*) arg
;
1731 lpfc_rcv_plogi(vport
, ndlp
, cmdiocb
);
1732 return ndlp
->nlp_state
;
1736 lpfc_rcv_prli_prli_issue(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
1737 void *arg
, uint32_t evt
)
1739 struct lpfc_iocbq
*cmdiocb
= (struct lpfc_iocbq
*) arg
;
1741 lpfc_els_rsp_prli_acc(vport
, cmdiocb
, ndlp
);
1742 return ndlp
->nlp_state
;
1746 lpfc_rcv_logo_prli_issue(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
1747 void *arg
, uint32_t evt
)
1749 struct lpfc_iocbq
*cmdiocb
= (struct lpfc_iocbq
*) arg
;
1751 /* Software abort outstanding PRLI before sending acc */
1752 lpfc_els_abort(vport
->phba
, ndlp
);
1754 lpfc_rcv_logo(vport
, ndlp
, cmdiocb
, ELS_CMD_LOGO
);
1755 return ndlp
->nlp_state
;
1759 lpfc_rcv_padisc_prli_issue(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
1760 void *arg
, uint32_t evt
)
1762 struct lpfc_iocbq
*cmdiocb
= (struct lpfc_iocbq
*) arg
;
1764 lpfc_rcv_padisc(vport
, ndlp
, cmdiocb
);
1765 return ndlp
->nlp_state
;
1768 /* This routine is envoked when we rcv a PRLO request from a nport
1769 * we are logged into. We should send back a PRLO rsp setting the
1771 * NEXT STATE = PRLI_ISSUE
1774 lpfc_rcv_prlo_prli_issue(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
1775 void *arg
, uint32_t evt
)
1777 struct lpfc_iocbq
*cmdiocb
= (struct lpfc_iocbq
*) arg
;
1779 lpfc_els_rsp_acc(vport
, ELS_CMD_PRLO
, cmdiocb
, ndlp
, NULL
);
1780 return ndlp
->nlp_state
;
1784 lpfc_cmpl_prli_prli_issue(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
1785 void *arg
, uint32_t evt
)
1787 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
1788 struct lpfc_iocbq
*cmdiocb
, *rspiocb
;
1789 struct lpfc_hba
*phba
= vport
->phba
;
1792 struct lpfc_nvme_prli
*nvpr
;
1795 cmdiocb
= (struct lpfc_iocbq
*) arg
;
1796 rspiocb
= cmdiocb
->context_un
.rsp_iocb
;
1798 /* A solicited PRLI is either FCP or NVME. The PRLI cmd/rsp
1799 * format is different so NULL the two PRLI types so that the
1800 * driver correctly gets the correct context.
1804 temp_ptr
= lpfc_check_elscmpl_iocb(phba
, cmdiocb
, rspiocb
);
1805 if (cmdiocb
->iocb_flag
& LPFC_PRLI_FCP_REQ
)
1806 npr
= (PRLI
*) temp_ptr
;
1807 else if (cmdiocb
->iocb_flag
& LPFC_PRLI_NVME_REQ
)
1808 nvpr
= (struct lpfc_nvme_prli
*) temp_ptr
;
1810 irsp
= &rspiocb
->iocb
;
1811 if (irsp
->ulpStatus
) {
1812 if ((vport
->port_type
== LPFC_NPIV_PORT
) &&
1813 vport
->cfg_restrict_login
) {
1817 /* The LS Req had some error. Don't let this be a
1820 if ((ndlp
->fc4_prli_sent
== 1) &&
1821 (ndlp
->nlp_state
== NLP_STE_PRLI_ISSUE
) &&
1822 (ndlp
->nlp_type
& (NLP_FCP_TARGET
| NLP_FCP_INITIATOR
)))
1823 /* The FCP PRLI completed successfully but
1824 * the NVME PRLI failed. Since they are sent in
1825 * succession, allow the FCP to complete.
1829 ndlp
->nlp_prev_state
= NLP_STE_PRLI_ISSUE
;
1830 ndlp
->nlp_type
|= NLP_FCP_INITIATOR
;
1831 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNMAPPED_NODE
);
1832 return ndlp
->nlp_state
;
1835 /* Check out PRLI rsp */
1836 ndlp
->nlp_type
&= ~(NLP_FCP_TARGET
| NLP_FCP_INITIATOR
);
1837 ndlp
->nlp_fcp_info
&= ~NLP_FCP_2_DEVICE
;
1839 /* NVME or FCP first burst must be negotiated for each PRLI. */
1840 ndlp
->nlp_flag
&= ~NLP_FIRSTBURST
;
1841 ndlp
->nvme_fb_size
= 0;
1842 if (npr
&& (npr
->acceptRspCode
== PRLI_REQ_EXECUTED
) &&
1843 (npr
->prliType
== PRLI_FCP_TYPE
)) {
1844 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NVME_DISC
,
1845 "6028 FCP NPR PRLI Cmpl Init %d Target %d\n",
1848 if (npr
->initiatorFunc
)
1849 ndlp
->nlp_type
|= NLP_FCP_INITIATOR
;
1850 if (npr
->targetFunc
) {
1851 ndlp
->nlp_type
|= NLP_FCP_TARGET
;
1852 if (npr
->writeXferRdyDis
)
1853 ndlp
->nlp_flag
|= NLP_FIRSTBURST
;
1856 ndlp
->nlp_fcp_info
|= NLP_FCP_2_DEVICE
;
1858 /* PRLI completed. Decrement count. */
1859 ndlp
->fc4_prli_sent
--;
1861 (bf_get_be32(prli_acc_rsp_code
, nvpr
) ==
1862 PRLI_REQ_EXECUTED
) &&
1863 (bf_get_be32(prli_type_code
, nvpr
) ==
1866 /* Complete setting up the remote ndlp personality. */
1867 if (bf_get_be32(prli_init
, nvpr
))
1868 ndlp
->nlp_type
|= NLP_NVME_INITIATOR
;
1870 /* Target driver cannot solicit NVME FB. */
1871 if (bf_get_be32(prli_tgt
, nvpr
)) {
1872 ndlp
->nlp_type
|= NLP_NVME_TARGET
;
1873 if ((bf_get_be32(prli_fba
, nvpr
) == 1) &&
1874 (bf_get_be32(prli_fb_sz
, nvpr
) > 0) &&
1875 (phba
->cfg_nvme_enable_fb
) &&
1876 (!phba
->nvmet_support
)) {
1877 /* Both sides support FB. The target's first
1878 * burst size is a 512 byte encoded value.
1880 ndlp
->nlp_flag
|= NLP_FIRSTBURST
;
1881 ndlp
->nvme_fb_size
= bf_get_be32(prli_fb_sz
,
1886 if (bf_get_be32(prli_recov
, nvpr
))
1887 ndlp
->nlp_fcp_info
|= NLP_FCP_2_DEVICE
;
1889 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NVME_DISC
,
1890 "6029 NVME PRLI Cmpl w1 x%08x "
1891 "w4 x%08x w5 x%08x flag x%x, "
1892 "fcp_info x%x nlp_type x%x\n",
1893 be32_to_cpu(nvpr
->word1
),
1894 be32_to_cpu(nvpr
->word4
),
1895 be32_to_cpu(nvpr
->word5
),
1896 ndlp
->nlp_flag
, ndlp
->nlp_fcp_info
,
1898 /* PRLI completed. Decrement count. */
1899 ndlp
->fc4_prli_sent
--;
1901 if (!(ndlp
->nlp_type
& NLP_FCP_TARGET
) &&
1902 (vport
->port_type
== LPFC_NPIV_PORT
) &&
1903 vport
->cfg_restrict_login
) {
1905 spin_lock_irq(shost
->host_lock
);
1906 ndlp
->nlp_flag
|= NLP_TARGET_REMOVE
;
1907 spin_unlock_irq(shost
->host_lock
);
1908 lpfc_issue_els_logo(vport
, ndlp
, 0);
1910 ndlp
->nlp_prev_state
= NLP_STE_PRLI_ISSUE
;
1911 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_NPR_NODE
);
1912 return ndlp
->nlp_state
;
1916 /* The ndlp state cannot move to MAPPED or UNMAPPED before all PRLIs
1919 if (ndlp
->fc4_prli_sent
== 0) {
1920 ndlp
->nlp_prev_state
= NLP_STE_PRLI_ISSUE
;
1921 if (ndlp
->nlp_type
& (NLP_FCP_TARGET
| NLP_NVME_TARGET
))
1922 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_MAPPED_NODE
);
1924 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNMAPPED_NODE
);
1926 lpfc_printf_vlog(vport
,
1928 "3067 PRLI's still outstanding "
1929 "on x%06x - count %d, Pend Node Mode "
1931 ndlp
->nlp_DID
, ndlp
->fc4_prli_sent
);
1933 return ndlp
->nlp_state
;
1936 /*! lpfc_device_rm_prli_issue
1947 * This routine is envoked when we a request to remove a nport we are in the
1948 * process of PRLIing. We should software abort outstanding prli, unreg
1949 * login, send a logout. We will change node state to UNUSED_NODE, put it
1950 * on plogi list so it can be freed when LOGO completes.
1955 lpfc_device_rm_prli_issue(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
1956 void *arg
, uint32_t evt
)
1958 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
1960 if (ndlp
->nlp_flag
& NLP_NPR_2B_DISC
) {
1961 spin_lock_irq(shost
->host_lock
);
1962 ndlp
->nlp_flag
|= NLP_NODEV_REMOVE
;
1963 spin_unlock_irq(shost
->host_lock
);
1964 return ndlp
->nlp_state
;
1966 /* software abort outstanding PLOGI */
1967 lpfc_els_abort(vport
->phba
, ndlp
);
1969 lpfc_drop_node(vport
, ndlp
);
1970 return NLP_STE_FREED_NODE
;
1975 /*! lpfc_device_recov_prli_issue
1986 * The routine is envoked when the state of a device is unknown, like
1987 * during a link down. We should remove the nodelist entry from the
1988 * unmapped list, issue a UNREG_LOGIN, do a software abort of the
1989 * outstanding PRLI command, then free the node entry.
1992 lpfc_device_recov_prli_issue(struct lpfc_vport
*vport
,
1993 struct lpfc_nodelist
*ndlp
,
1997 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
1998 struct lpfc_hba
*phba
= vport
->phba
;
2000 /* Don't do anything that will mess up processing of the
2003 if (vport
->fc_flag
& FC_RSCN_DEFERRED
)
2004 return ndlp
->nlp_state
;
2006 /* software abort outstanding PRLI */
2007 lpfc_els_abort(phba
, ndlp
);
2009 ndlp
->nlp_prev_state
= NLP_STE_PRLI_ISSUE
;
2010 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_NPR_NODE
);
2011 spin_lock_irq(shost
->host_lock
);
2012 ndlp
->nlp_flag
&= ~(NLP_NODEV_REMOVE
| NLP_NPR_2B_DISC
);
2013 spin_unlock_irq(shost
->host_lock
);
2014 lpfc_disc_set_adisc(vport
, ndlp
);
2015 return ndlp
->nlp_state
;
2019 lpfc_rcv_plogi_logo_issue(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
2020 void *arg
, uint32_t evt
)
2022 struct lpfc_iocbq
*cmdiocb
= (struct lpfc_iocbq
*)arg
;
2025 memset(&stat
, 0, sizeof(struct ls_rjt
));
2026 stat
.un
.b
.lsRjtRsnCode
= LSRJT_UNABLE_TPC
;
2027 stat
.un
.b
.lsRjtRsnCodeExp
= LSEXP_NOTHING_MORE
;
2028 lpfc_els_rsp_reject(vport
, stat
.un
.lsRjtError
, cmdiocb
, ndlp
, NULL
);
2029 return ndlp
->nlp_state
;
2033 lpfc_rcv_prli_logo_issue(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
2034 void *arg
, uint32_t evt
)
2036 struct lpfc_iocbq
*cmdiocb
= (struct lpfc_iocbq
*)arg
;
2039 memset(&stat
, 0, sizeof(struct ls_rjt
));
2040 stat
.un
.b
.lsRjtRsnCode
= LSRJT_UNABLE_TPC
;
2041 stat
.un
.b
.lsRjtRsnCodeExp
= LSEXP_NOTHING_MORE
;
2042 lpfc_els_rsp_reject(vport
, stat
.un
.lsRjtError
, cmdiocb
, ndlp
, NULL
);
2043 return ndlp
->nlp_state
;
2047 lpfc_rcv_logo_logo_issue(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
2048 void *arg
, uint32_t evt
)
2050 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2051 struct lpfc_iocbq
*cmdiocb
= (struct lpfc_iocbq
*)arg
;
2053 spin_lock_irq(shost
->host_lock
);
2054 ndlp
->nlp_flag
|= NLP_LOGO_ACC
;
2055 spin_unlock_irq(shost
->host_lock
);
2056 lpfc_els_rsp_acc(vport
, ELS_CMD_ACC
, cmdiocb
, ndlp
, NULL
);
2057 return ndlp
->nlp_state
;
2061 lpfc_rcv_padisc_logo_issue(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
2062 void *arg
, uint32_t evt
)
2064 struct lpfc_iocbq
*cmdiocb
= (struct lpfc_iocbq
*)arg
;
2067 memset(&stat
, 0, sizeof(struct ls_rjt
));
2068 stat
.un
.b
.lsRjtRsnCode
= LSRJT_UNABLE_TPC
;
2069 stat
.un
.b
.lsRjtRsnCodeExp
= LSEXP_NOTHING_MORE
;
2070 lpfc_els_rsp_reject(vport
, stat
.un
.lsRjtError
, cmdiocb
, ndlp
, NULL
);
2071 return ndlp
->nlp_state
;
2075 lpfc_rcv_prlo_logo_issue(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
2076 void *arg
, uint32_t evt
)
2078 struct lpfc_iocbq
*cmdiocb
= (struct lpfc_iocbq
*)arg
;
2081 memset(&stat
, 0, sizeof(struct ls_rjt
));
2082 stat
.un
.b
.lsRjtRsnCode
= LSRJT_UNABLE_TPC
;
2083 stat
.un
.b
.lsRjtRsnCodeExp
= LSEXP_NOTHING_MORE
;
2084 lpfc_els_rsp_reject(vport
, stat
.un
.lsRjtError
, cmdiocb
, ndlp
, NULL
);
2085 return ndlp
->nlp_state
;
2089 lpfc_cmpl_logo_logo_issue(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
2090 void *arg
, uint32_t evt
)
2092 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2094 ndlp
->nlp_prev_state
= NLP_STE_LOGO_ISSUE
;
2095 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_NPR_NODE
);
2096 spin_lock_irq(shost
->host_lock
);
2097 ndlp
->nlp_flag
&= ~(NLP_NODEV_REMOVE
| NLP_NPR_2B_DISC
);
2098 spin_unlock_irq(shost
->host_lock
);
2099 lpfc_disc_set_adisc(vport
, ndlp
);
2100 return ndlp
->nlp_state
;
2104 lpfc_device_rm_logo_issue(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
2105 void *arg
, uint32_t evt
)
2108 * Take no action. If a LOGO is outstanding, then possibly DevLoss has
2109 * timed out and is calling for Device Remove. In this case, the LOGO
2110 * must be allowed to complete in state LOGO_ISSUE so that the rpi
2111 * and other NLP flags are correctly cleaned up.
2113 return ndlp
->nlp_state
;
2117 lpfc_device_recov_logo_issue(struct lpfc_vport
*vport
,
2118 struct lpfc_nodelist
*ndlp
,
2119 void *arg
, uint32_t evt
)
2122 * Device Recovery events have no meaning for a node with a LOGO
2123 * outstanding. The LOGO has to complete first and handle the
2124 * node from that point.
2126 return ndlp
->nlp_state
;
2130 lpfc_rcv_plogi_unmap_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
2131 void *arg
, uint32_t evt
)
2133 struct lpfc_iocbq
*cmdiocb
= (struct lpfc_iocbq
*) arg
;
2135 lpfc_rcv_plogi(vport
, ndlp
, cmdiocb
);
2136 return ndlp
->nlp_state
;
2140 lpfc_rcv_prli_unmap_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
2141 void *arg
, uint32_t evt
)
2143 struct lpfc_iocbq
*cmdiocb
= (struct lpfc_iocbq
*) arg
;
2145 lpfc_rcv_prli(vport
, ndlp
, cmdiocb
);
2146 lpfc_els_rsp_prli_acc(vport
, cmdiocb
, ndlp
);
2147 return ndlp
->nlp_state
;
2151 lpfc_rcv_logo_unmap_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
2152 void *arg
, uint32_t evt
)
2154 struct lpfc_iocbq
*cmdiocb
= (struct lpfc_iocbq
*) arg
;
2156 lpfc_rcv_logo(vport
, ndlp
, cmdiocb
, ELS_CMD_LOGO
);
2157 return ndlp
->nlp_state
;
2161 lpfc_rcv_padisc_unmap_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
2162 void *arg
, uint32_t evt
)
2164 struct lpfc_iocbq
*cmdiocb
= (struct lpfc_iocbq
*) arg
;
2166 lpfc_rcv_padisc(vport
, ndlp
, cmdiocb
);
2167 return ndlp
->nlp_state
;
2171 lpfc_rcv_prlo_unmap_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
2172 void *arg
, uint32_t evt
)
2174 struct lpfc_iocbq
*cmdiocb
= (struct lpfc_iocbq
*) arg
;
2176 lpfc_els_rsp_acc(vport
, ELS_CMD_PRLO
, cmdiocb
, ndlp
, NULL
);
2177 return ndlp
->nlp_state
;
2181 lpfc_device_recov_unmap_node(struct lpfc_vport
*vport
,
2182 struct lpfc_nodelist
*ndlp
,
2186 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2188 ndlp
->nlp_prev_state
= NLP_STE_UNMAPPED_NODE
;
2189 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_NPR_NODE
);
2190 spin_lock_irq(shost
->host_lock
);
2191 ndlp
->nlp_flag
&= ~(NLP_NODEV_REMOVE
| NLP_NPR_2B_DISC
);
2192 spin_unlock_irq(shost
->host_lock
);
2193 lpfc_disc_set_adisc(vport
, ndlp
);
2195 return ndlp
->nlp_state
;
2199 lpfc_rcv_plogi_mapped_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
2200 void *arg
, uint32_t evt
)
2202 struct lpfc_iocbq
*cmdiocb
= (struct lpfc_iocbq
*) arg
;
2204 lpfc_rcv_plogi(vport
, ndlp
, cmdiocb
);
2205 return ndlp
->nlp_state
;
2209 lpfc_rcv_prli_mapped_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
2210 void *arg
, uint32_t evt
)
2212 struct lpfc_iocbq
*cmdiocb
= (struct lpfc_iocbq
*) arg
;
2214 lpfc_els_rsp_prli_acc(vport
, cmdiocb
, ndlp
);
2215 return ndlp
->nlp_state
;
2219 lpfc_rcv_logo_mapped_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
2220 void *arg
, uint32_t evt
)
2222 struct lpfc_iocbq
*cmdiocb
= (struct lpfc_iocbq
*) arg
;
2224 lpfc_rcv_logo(vport
, ndlp
, cmdiocb
, ELS_CMD_LOGO
);
2225 return ndlp
->nlp_state
;
2229 lpfc_rcv_padisc_mapped_node(struct lpfc_vport
*vport
,
2230 struct lpfc_nodelist
*ndlp
,
2231 void *arg
, uint32_t evt
)
2233 struct lpfc_iocbq
*cmdiocb
= (struct lpfc_iocbq
*) arg
;
2235 lpfc_rcv_padisc(vport
, ndlp
, cmdiocb
);
2236 return ndlp
->nlp_state
;
2240 lpfc_rcv_prlo_mapped_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
2241 void *arg
, uint32_t evt
)
2243 struct lpfc_hba
*phba
= vport
->phba
;
2244 struct lpfc_iocbq
*cmdiocb
= (struct lpfc_iocbq
*) arg
;
2246 /* flush the target */
2247 lpfc_sli_abort_iocb(vport
, &phba
->sli
.sli3_ring
[LPFC_FCP_RING
],
2248 ndlp
->nlp_sid
, 0, LPFC_CTX_TGT
);
2250 /* Treat like rcv logo */
2251 lpfc_rcv_logo(vport
, ndlp
, cmdiocb
, ELS_CMD_PRLO
);
2252 return ndlp
->nlp_state
;
2256 lpfc_device_recov_mapped_node(struct lpfc_vport
*vport
,
2257 struct lpfc_nodelist
*ndlp
,
2261 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2263 ndlp
->nlp_prev_state
= NLP_STE_MAPPED_NODE
;
2264 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_NPR_NODE
);
2265 spin_lock_irq(shost
->host_lock
);
2266 ndlp
->nlp_flag
&= ~(NLP_NODEV_REMOVE
| NLP_NPR_2B_DISC
);
2267 spin_unlock_irq(shost
->host_lock
);
2268 lpfc_disc_set_adisc(vport
, ndlp
);
2269 return ndlp
->nlp_state
;
2273 lpfc_rcv_plogi_npr_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
2274 void *arg
, uint32_t evt
)
2276 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2277 struct lpfc_iocbq
*cmdiocb
= (struct lpfc_iocbq
*) arg
;
2279 /* Ignore PLOGI if we have an outstanding LOGO */
2280 if (ndlp
->nlp_flag
& (NLP_LOGO_SND
| NLP_LOGO_ACC
))
2281 return ndlp
->nlp_state
;
2282 if (lpfc_rcv_plogi(vport
, ndlp
, cmdiocb
)) {
2283 lpfc_cancel_retry_delay_tmo(vport
, ndlp
);
2284 spin_lock_irq(shost
->host_lock
);
2285 ndlp
->nlp_flag
&= ~(NLP_NPR_ADISC
| NLP_NPR_2B_DISC
);
2286 spin_unlock_irq(shost
->host_lock
);
2287 } else if (!(ndlp
->nlp_flag
& NLP_NPR_2B_DISC
)) {
2288 /* send PLOGI immediately, move to PLOGI issue state */
2289 if (!(ndlp
->nlp_flag
& NLP_DELAY_TMO
)) {
2290 ndlp
->nlp_prev_state
= NLP_STE_NPR_NODE
;
2291 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_PLOGI_ISSUE
);
2292 lpfc_issue_els_plogi(vport
, ndlp
->nlp_DID
, 0);
2295 return ndlp
->nlp_state
;
2299 lpfc_rcv_prli_npr_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
2300 void *arg
, uint32_t evt
)
2302 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2303 struct lpfc_iocbq
*cmdiocb
= (struct lpfc_iocbq
*) arg
;
2306 memset(&stat
, 0, sizeof (struct ls_rjt
));
2307 stat
.un
.b
.lsRjtRsnCode
= LSRJT_UNABLE_TPC
;
2308 stat
.un
.b
.lsRjtRsnCodeExp
= LSEXP_NOTHING_MORE
;
2309 lpfc_els_rsp_reject(vport
, stat
.un
.lsRjtError
, cmdiocb
, ndlp
, NULL
);
2311 if (!(ndlp
->nlp_flag
& NLP_DELAY_TMO
)) {
2312 if (ndlp
->nlp_flag
& NLP_NPR_ADISC
) {
2313 spin_lock_irq(shost
->host_lock
);
2314 ndlp
->nlp_flag
&= ~NLP_NPR_ADISC
;
2315 ndlp
->nlp_prev_state
= NLP_STE_NPR_NODE
;
2316 spin_unlock_irq(shost
->host_lock
);
2317 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_ADISC_ISSUE
);
2318 lpfc_issue_els_adisc(vport
, ndlp
, 0);
2320 ndlp
->nlp_prev_state
= NLP_STE_NPR_NODE
;
2321 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_PLOGI_ISSUE
);
2322 lpfc_issue_els_plogi(vport
, ndlp
->nlp_DID
, 0);
2325 return ndlp
->nlp_state
;
2329 lpfc_rcv_logo_npr_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
2330 void *arg
, uint32_t evt
)
2332 struct lpfc_iocbq
*cmdiocb
= (struct lpfc_iocbq
*) arg
;
2334 lpfc_rcv_logo(vport
, ndlp
, cmdiocb
, ELS_CMD_LOGO
);
2335 return ndlp
->nlp_state
;
2339 lpfc_rcv_padisc_npr_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
2340 void *arg
, uint32_t evt
)
2342 struct lpfc_iocbq
*cmdiocb
= (struct lpfc_iocbq
*) arg
;
2344 lpfc_rcv_padisc(vport
, ndlp
, cmdiocb
);
2346 * Do not start discovery if discovery is about to start
2347 * or discovery in progress for this node. Starting discovery
2348 * here will affect the counting of discovery threads.
2350 if (!(ndlp
->nlp_flag
& NLP_DELAY_TMO
) &&
2351 !(ndlp
->nlp_flag
& NLP_NPR_2B_DISC
)) {
2352 if (ndlp
->nlp_flag
& NLP_NPR_ADISC
) {
2353 ndlp
->nlp_flag
&= ~NLP_NPR_ADISC
;
2354 ndlp
->nlp_prev_state
= NLP_STE_NPR_NODE
;
2355 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_ADISC_ISSUE
);
2356 lpfc_issue_els_adisc(vport
, ndlp
, 0);
2358 ndlp
->nlp_prev_state
= NLP_STE_NPR_NODE
;
2359 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_PLOGI_ISSUE
);
2360 lpfc_issue_els_plogi(vport
, ndlp
->nlp_DID
, 0);
2363 return ndlp
->nlp_state
;
2367 lpfc_rcv_prlo_npr_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
2368 void *arg
, uint32_t evt
)
2370 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2371 struct lpfc_iocbq
*cmdiocb
= (struct lpfc_iocbq
*) arg
;
2373 spin_lock_irq(shost
->host_lock
);
2374 ndlp
->nlp_flag
|= NLP_LOGO_ACC
;
2375 spin_unlock_irq(shost
->host_lock
);
2377 lpfc_els_rsp_acc(vport
, ELS_CMD_ACC
, cmdiocb
, ndlp
, NULL
);
2379 if ((ndlp
->nlp_flag
& NLP_DELAY_TMO
) == 0) {
2380 mod_timer(&ndlp
->nlp_delayfunc
,
2381 jiffies
+ msecs_to_jiffies(1000 * 1));
2382 spin_lock_irq(shost
->host_lock
);
2383 ndlp
->nlp_flag
|= NLP_DELAY_TMO
;
2384 ndlp
->nlp_flag
&= ~NLP_NPR_ADISC
;
2385 spin_unlock_irq(shost
->host_lock
);
2386 ndlp
->nlp_last_elscmd
= ELS_CMD_PLOGI
;
2388 spin_lock_irq(shost
->host_lock
);
2389 ndlp
->nlp_flag
&= ~NLP_NPR_ADISC
;
2390 spin_unlock_irq(shost
->host_lock
);
2392 return ndlp
->nlp_state
;
2396 lpfc_cmpl_plogi_npr_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
2397 void *arg
, uint32_t evt
)
2399 struct lpfc_iocbq
*cmdiocb
, *rspiocb
;
2401 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2403 cmdiocb
= (struct lpfc_iocbq
*) arg
;
2404 rspiocb
= cmdiocb
->context_un
.rsp_iocb
;
2406 irsp
= &rspiocb
->iocb
;
2407 if (irsp
->ulpStatus
) {
2408 spin_lock_irq(shost
->host_lock
);
2409 ndlp
->nlp_flag
|= NLP_DEFER_RM
;
2410 spin_unlock_irq(shost
->host_lock
);
2411 return NLP_STE_FREED_NODE
;
2413 return ndlp
->nlp_state
;
2417 lpfc_cmpl_prli_npr_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
2418 void *arg
, uint32_t evt
)
2420 struct lpfc_iocbq
*cmdiocb
, *rspiocb
;
2423 cmdiocb
= (struct lpfc_iocbq
*) arg
;
2424 rspiocb
= cmdiocb
->context_un
.rsp_iocb
;
2426 irsp
= &rspiocb
->iocb
;
2427 if (irsp
->ulpStatus
&& (ndlp
->nlp_flag
& NLP_NODEV_REMOVE
)) {
2428 lpfc_drop_node(vport
, ndlp
);
2429 return NLP_STE_FREED_NODE
;
2431 return ndlp
->nlp_state
;
2435 lpfc_cmpl_logo_npr_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
2436 void *arg
, uint32_t evt
)
2438 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2440 /* For the fabric port just clear the fc flags. */
2441 if (ndlp
->nlp_DID
== Fabric_DID
) {
2442 spin_lock_irq(shost
->host_lock
);
2443 vport
->fc_flag
&= ~(FC_FABRIC
| FC_PUBLIC_LOOP
);
2444 spin_unlock_irq(shost
->host_lock
);
2446 lpfc_unreg_rpi(vport
, ndlp
);
2447 return ndlp
->nlp_state
;
2451 lpfc_cmpl_adisc_npr_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
2452 void *arg
, uint32_t evt
)
2454 struct lpfc_iocbq
*cmdiocb
, *rspiocb
;
2457 cmdiocb
= (struct lpfc_iocbq
*) arg
;
2458 rspiocb
= cmdiocb
->context_un
.rsp_iocb
;
2460 irsp
= &rspiocb
->iocb
;
2461 if (irsp
->ulpStatus
&& (ndlp
->nlp_flag
& NLP_NODEV_REMOVE
)) {
2462 lpfc_drop_node(vport
, ndlp
);
2463 return NLP_STE_FREED_NODE
;
2465 return ndlp
->nlp_state
;
2469 lpfc_cmpl_reglogin_npr_node(struct lpfc_vport
*vport
,
2470 struct lpfc_nodelist
*ndlp
,
2471 void *arg
, uint32_t evt
)
2473 LPFC_MBOXQ_t
*pmb
= (LPFC_MBOXQ_t
*) arg
;
2474 MAILBOX_t
*mb
= &pmb
->u
.mb
;
2476 if (!mb
->mbxStatus
) {
2477 /* SLI4 ports have preallocated logical rpis. */
2478 if (vport
->phba
->sli_rev
< LPFC_SLI_REV4
)
2479 ndlp
->nlp_rpi
= mb
->un
.varWords
[0];
2480 ndlp
->nlp_flag
|= NLP_RPI_REGISTERED
;
2481 if (ndlp
->nlp_flag
& NLP_LOGO_ACC
) {
2482 lpfc_unreg_rpi(vport
, ndlp
);
2485 if (ndlp
->nlp_flag
& NLP_NODEV_REMOVE
) {
2486 lpfc_drop_node(vport
, ndlp
);
2487 return NLP_STE_FREED_NODE
;
2490 return ndlp
->nlp_state
;
2494 lpfc_device_rm_npr_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
2495 void *arg
, uint32_t evt
)
2497 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2499 if (ndlp
->nlp_flag
& NLP_NPR_2B_DISC
) {
2500 spin_lock_irq(shost
->host_lock
);
2501 ndlp
->nlp_flag
|= NLP_NODEV_REMOVE
;
2502 spin_unlock_irq(shost
->host_lock
);
2503 return ndlp
->nlp_state
;
2505 lpfc_drop_node(vport
, ndlp
);
2506 return NLP_STE_FREED_NODE
;
2510 lpfc_device_recov_npr_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
2511 void *arg
, uint32_t evt
)
2513 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2515 /* Don't do anything that will mess up processing of the
2518 if (vport
->fc_flag
& FC_RSCN_DEFERRED
)
2519 return ndlp
->nlp_state
;
2521 lpfc_cancel_retry_delay_tmo(vport
, ndlp
);
2522 spin_lock_irq(shost
->host_lock
);
2523 ndlp
->nlp_flag
&= ~(NLP_NODEV_REMOVE
| NLP_NPR_2B_DISC
);
2524 spin_unlock_irq(shost
->host_lock
);
2525 return ndlp
->nlp_state
;
2529 /* This next section defines the NPort Discovery State Machine */
2531 /* There are 4 different double linked lists nodelist entries can reside on.
2532 * The plogi list and adisc list are used when Link Up discovery or RSCN
2533 * processing is needed. Each list holds the nodes that we will send PLOGI
2534 * or ADISC on. These lists will keep track of what nodes will be effected
2535 * by an RSCN, or a Link Up (Typically, all nodes are effected on Link Up).
2536 * The unmapped_list will contain all nodes that we have successfully logged
2537 * into at the Fibre Channel level. The mapped_list will contain all nodes
2538 * that are mapped FCP targets.
2541 * The bind list is a list of undiscovered (potentially non-existent) nodes
2542 * that we have saved binding information on. This information is used when
2543 * nodes transition from the unmapped to the mapped list.
2545 /* For UNUSED_NODE state, the node has just been allocated .
2546 * For PLOGI_ISSUE and REG_LOGIN_ISSUE, the node is on
2547 * the PLOGI list. For REG_LOGIN_COMPL, the node is taken off the PLOGI list
2548 * and put on the unmapped list. For ADISC processing, the node is taken off
2549 * the ADISC list and placed on either the mapped or unmapped list (depending
2550 * on its previous state). Once on the unmapped list, a PRLI is issued and the
2551 * state changed to PRLI_ISSUE. When the PRLI completion occurs, the state is
2552 * changed to UNMAPPED_NODE. If the completion indicates a mapped
2553 * node, the node is taken off the unmapped list. The binding list is checked
2554 * for a valid binding, or a binding is automatically assigned. If binding
2555 * assignment is unsuccessful, the node is left on the unmapped list. If
2556 * binding assignment is successful, the associated binding list entry (if
2557 * any) is removed, and the node is placed on the mapped list.
2560 * For a Link Down, all nodes on the ADISC, PLOGI, unmapped or mapped
2561 * lists will receive a DEVICE_RECOVERY event. If the linkdown or devloss timers
2562 * expire, all effected nodes will receive a DEVICE_RM event.
2565 * For a Link Up or RSCN, all nodes will move from the mapped / unmapped lists
2566 * to either the ADISC or PLOGI list. After a Nameserver query or ALPA loopmap
2567 * check, additional nodes may be added or removed (via DEVICE_RM) to / from
2568 * the PLOGI or ADISC lists. Once the PLOGI and ADISC lists are populated,
2569 * we will first process the ADISC list. 32 entries are processed initially and
2570 * ADISC is initited for each one. Completions / Events for each node are
2571 * funnelled thru the state machine. As each node finishes ADISC processing, it
2572 * starts ADISC for any nodes waiting for ADISC processing. If no nodes are
2573 * waiting, and the ADISC list count is identically 0, then we are done. For
2574 * Link Up discovery, since all nodes on the PLOGI list are UNREG_LOGIN'ed, we
2575 * can issue a CLEAR_LA and reenable Link Events. Next we will process the PLOGI
2576 * list. 32 entries are processed initially and PLOGI is initited for each one.
2577 * Completions / Events for each node are funnelled thru the state machine. As
2578 * each node finishes PLOGI processing, it starts PLOGI for any nodes waiting
2579 * for PLOGI processing. If no nodes are waiting, and the PLOGI list count is
2580 * indentically 0, then we are done. We have now completed discovery / RSCN
2581 * handling. Upon completion, ALL nodes should be on either the mapped or
2585 static uint32_t (*lpfc_disc_action
[NLP_STE_MAX_STATE
* NLP_EVT_MAX_EVENT
])
2586 (struct lpfc_vport
*, struct lpfc_nodelist
*, void *, uint32_t) = {
2587 /* Action routine Event Current State */
2588 lpfc_rcv_plogi_unused_node
, /* RCV_PLOGI UNUSED_NODE */
2589 lpfc_rcv_els_unused_node
, /* RCV_PRLI */
2590 lpfc_rcv_logo_unused_node
, /* RCV_LOGO */
2591 lpfc_rcv_els_unused_node
, /* RCV_ADISC */
2592 lpfc_rcv_els_unused_node
, /* RCV_PDISC */
2593 lpfc_rcv_els_unused_node
, /* RCV_PRLO */
2594 lpfc_disc_illegal
, /* CMPL_PLOGI */
2595 lpfc_disc_illegal
, /* CMPL_PRLI */
2596 lpfc_cmpl_logo_unused_node
, /* CMPL_LOGO */
2597 lpfc_disc_illegal
, /* CMPL_ADISC */
2598 lpfc_disc_illegal
, /* CMPL_REG_LOGIN */
2599 lpfc_device_rm_unused_node
, /* DEVICE_RM */
2600 lpfc_device_recov_unused_node
, /* DEVICE_RECOVERY */
2602 lpfc_rcv_plogi_plogi_issue
, /* RCV_PLOGI PLOGI_ISSUE */
2603 lpfc_rcv_prli_plogi_issue
, /* RCV_PRLI */
2604 lpfc_rcv_logo_plogi_issue
, /* RCV_LOGO */
2605 lpfc_rcv_els_plogi_issue
, /* RCV_ADISC */
2606 lpfc_rcv_els_plogi_issue
, /* RCV_PDISC */
2607 lpfc_rcv_els_plogi_issue
, /* RCV_PRLO */
2608 lpfc_cmpl_plogi_plogi_issue
, /* CMPL_PLOGI */
2609 lpfc_disc_illegal
, /* CMPL_PRLI */
2610 lpfc_cmpl_logo_plogi_issue
, /* CMPL_LOGO */
2611 lpfc_disc_illegal
, /* CMPL_ADISC */
2612 lpfc_cmpl_reglogin_plogi_issue
,/* CMPL_REG_LOGIN */
2613 lpfc_device_rm_plogi_issue
, /* DEVICE_RM */
2614 lpfc_device_recov_plogi_issue
, /* DEVICE_RECOVERY */
2616 lpfc_rcv_plogi_adisc_issue
, /* RCV_PLOGI ADISC_ISSUE */
2617 lpfc_rcv_prli_adisc_issue
, /* RCV_PRLI */
2618 lpfc_rcv_logo_adisc_issue
, /* RCV_LOGO */
2619 lpfc_rcv_padisc_adisc_issue
, /* RCV_ADISC */
2620 lpfc_rcv_padisc_adisc_issue
, /* RCV_PDISC */
2621 lpfc_rcv_prlo_adisc_issue
, /* RCV_PRLO */
2622 lpfc_disc_illegal
, /* CMPL_PLOGI */
2623 lpfc_disc_illegal
, /* CMPL_PRLI */
2624 lpfc_disc_illegal
, /* CMPL_LOGO */
2625 lpfc_cmpl_adisc_adisc_issue
, /* CMPL_ADISC */
2626 lpfc_disc_illegal
, /* CMPL_REG_LOGIN */
2627 lpfc_device_rm_adisc_issue
, /* DEVICE_RM */
2628 lpfc_device_recov_adisc_issue
, /* DEVICE_RECOVERY */
2630 lpfc_rcv_plogi_reglogin_issue
, /* RCV_PLOGI REG_LOGIN_ISSUE */
2631 lpfc_rcv_prli_reglogin_issue
, /* RCV_PLOGI */
2632 lpfc_rcv_logo_reglogin_issue
, /* RCV_LOGO */
2633 lpfc_rcv_padisc_reglogin_issue
, /* RCV_ADISC */
2634 lpfc_rcv_padisc_reglogin_issue
, /* RCV_PDISC */
2635 lpfc_rcv_prlo_reglogin_issue
, /* RCV_PRLO */
2636 lpfc_cmpl_plogi_illegal
, /* CMPL_PLOGI */
2637 lpfc_disc_illegal
, /* CMPL_PRLI */
2638 lpfc_disc_illegal
, /* CMPL_LOGO */
2639 lpfc_disc_illegal
, /* CMPL_ADISC */
2640 lpfc_cmpl_reglogin_reglogin_issue
,/* CMPL_REG_LOGIN */
2641 lpfc_device_rm_reglogin_issue
, /* DEVICE_RM */
2642 lpfc_device_recov_reglogin_issue
,/* DEVICE_RECOVERY */
2644 lpfc_rcv_plogi_prli_issue
, /* RCV_PLOGI PRLI_ISSUE */
2645 lpfc_rcv_prli_prli_issue
, /* RCV_PRLI */
2646 lpfc_rcv_logo_prli_issue
, /* RCV_LOGO */
2647 lpfc_rcv_padisc_prli_issue
, /* RCV_ADISC */
2648 lpfc_rcv_padisc_prli_issue
, /* RCV_PDISC */
2649 lpfc_rcv_prlo_prli_issue
, /* RCV_PRLO */
2650 lpfc_cmpl_plogi_illegal
, /* CMPL_PLOGI */
2651 lpfc_cmpl_prli_prli_issue
, /* CMPL_PRLI */
2652 lpfc_disc_illegal
, /* CMPL_LOGO */
2653 lpfc_disc_illegal
, /* CMPL_ADISC */
2654 lpfc_disc_illegal
, /* CMPL_REG_LOGIN */
2655 lpfc_device_rm_prli_issue
, /* DEVICE_RM */
2656 lpfc_device_recov_prli_issue
, /* DEVICE_RECOVERY */
2658 lpfc_rcv_plogi_logo_issue
, /* RCV_PLOGI LOGO_ISSUE */
2659 lpfc_rcv_prli_logo_issue
, /* RCV_PRLI */
2660 lpfc_rcv_logo_logo_issue
, /* RCV_LOGO */
2661 lpfc_rcv_padisc_logo_issue
, /* RCV_ADISC */
2662 lpfc_rcv_padisc_logo_issue
, /* RCV_PDISC */
2663 lpfc_rcv_prlo_logo_issue
, /* RCV_PRLO */
2664 lpfc_cmpl_plogi_illegal
, /* CMPL_PLOGI */
2665 lpfc_disc_illegal
, /* CMPL_PRLI */
2666 lpfc_cmpl_logo_logo_issue
, /* CMPL_LOGO */
2667 lpfc_disc_illegal
, /* CMPL_ADISC */
2668 lpfc_disc_illegal
, /* CMPL_REG_LOGIN */
2669 lpfc_device_rm_logo_issue
, /* DEVICE_RM */
2670 lpfc_device_recov_logo_issue
, /* DEVICE_RECOVERY */
2672 lpfc_rcv_plogi_unmap_node
, /* RCV_PLOGI UNMAPPED_NODE */
2673 lpfc_rcv_prli_unmap_node
, /* RCV_PRLI */
2674 lpfc_rcv_logo_unmap_node
, /* RCV_LOGO */
2675 lpfc_rcv_padisc_unmap_node
, /* RCV_ADISC */
2676 lpfc_rcv_padisc_unmap_node
, /* RCV_PDISC */
2677 lpfc_rcv_prlo_unmap_node
, /* RCV_PRLO */
2678 lpfc_disc_illegal
, /* CMPL_PLOGI */
2679 lpfc_disc_illegal
, /* CMPL_PRLI */
2680 lpfc_disc_illegal
, /* CMPL_LOGO */
2681 lpfc_disc_illegal
, /* CMPL_ADISC */
2682 lpfc_disc_illegal
, /* CMPL_REG_LOGIN */
2683 lpfc_disc_illegal
, /* DEVICE_RM */
2684 lpfc_device_recov_unmap_node
, /* DEVICE_RECOVERY */
2686 lpfc_rcv_plogi_mapped_node
, /* RCV_PLOGI MAPPED_NODE */
2687 lpfc_rcv_prli_mapped_node
, /* RCV_PRLI */
2688 lpfc_rcv_logo_mapped_node
, /* RCV_LOGO */
2689 lpfc_rcv_padisc_mapped_node
, /* RCV_ADISC */
2690 lpfc_rcv_padisc_mapped_node
, /* RCV_PDISC */
2691 lpfc_rcv_prlo_mapped_node
, /* RCV_PRLO */
2692 lpfc_disc_illegal
, /* CMPL_PLOGI */
2693 lpfc_disc_illegal
, /* CMPL_PRLI */
2694 lpfc_disc_illegal
, /* CMPL_LOGO */
2695 lpfc_disc_illegal
, /* CMPL_ADISC */
2696 lpfc_disc_illegal
, /* CMPL_REG_LOGIN */
2697 lpfc_disc_illegal
, /* DEVICE_RM */
2698 lpfc_device_recov_mapped_node
, /* DEVICE_RECOVERY */
2700 lpfc_rcv_plogi_npr_node
, /* RCV_PLOGI NPR_NODE */
2701 lpfc_rcv_prli_npr_node
, /* RCV_PRLI */
2702 lpfc_rcv_logo_npr_node
, /* RCV_LOGO */
2703 lpfc_rcv_padisc_npr_node
, /* RCV_ADISC */
2704 lpfc_rcv_padisc_npr_node
, /* RCV_PDISC */
2705 lpfc_rcv_prlo_npr_node
, /* RCV_PRLO */
2706 lpfc_cmpl_plogi_npr_node
, /* CMPL_PLOGI */
2707 lpfc_cmpl_prli_npr_node
, /* CMPL_PRLI */
2708 lpfc_cmpl_logo_npr_node
, /* CMPL_LOGO */
2709 lpfc_cmpl_adisc_npr_node
, /* CMPL_ADISC */
2710 lpfc_cmpl_reglogin_npr_node
, /* CMPL_REG_LOGIN */
2711 lpfc_device_rm_npr_node
, /* DEVICE_RM */
2712 lpfc_device_recov_npr_node
, /* DEVICE_RECOVERY */
2716 lpfc_disc_state_machine(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
2717 void *arg
, uint32_t evt
)
2719 uint32_t cur_state
, rc
;
2720 uint32_t(*func
) (struct lpfc_vport
*, struct lpfc_nodelist
*, void *,
2722 uint32_t got_ndlp
= 0;
2724 if (lpfc_nlp_get(ndlp
))
2727 cur_state
= ndlp
->nlp_state
;
2729 /* DSM in event <evt> on NPort <nlp_DID> in state <cur_state> */
2730 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
2731 "0211 DSM in event x%x on NPort x%x in "
2732 "state %d Data: x%x\n",
2733 evt
, ndlp
->nlp_DID
, cur_state
, ndlp
->nlp_flag
);
2735 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_DSM
,
2736 "DSM in: evt:%d ste:%d did:x%x",
2737 evt
, cur_state
, ndlp
->nlp_DID
);
2739 func
= lpfc_disc_action
[(cur_state
* NLP_EVT_MAX_EVENT
) + evt
];
2740 rc
= (func
) (vport
, ndlp
, arg
, evt
);
2742 /* DSM out state <rc> on NPort <nlp_DID> */
2744 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
2745 "0212 DSM out state %d on NPort x%x Data: x%x\n",
2746 rc
, ndlp
->nlp_DID
, ndlp
->nlp_flag
);
2748 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_DSM
,
2749 "DSM out: ste:%d did:x%x flg:x%x",
2750 rc
, ndlp
->nlp_DID
, ndlp
->nlp_flag
);
2751 /* Decrement the ndlp reference count held for this function */
2754 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
2755 "0213 DSM out state %d on NPort free\n", rc
);
2757 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_DSM
,
2758 "DSM out: ste:%d did:x%x flg:x%x",