1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
22 #include <linux/blkdev.h>
23 #include <linux/delay.h>
24 #include <linux/slab.h>
25 #include <linux/pci.h>
26 #include <linux/kthread.h>
27 #include <linux/interrupt.h>
29 #include <scsi/scsi.h>
30 #include <scsi/scsi_device.h>
31 #include <scsi/scsi_host.h>
32 #include <scsi/scsi_transport_fc.h>
37 #include "lpfc_disc.h"
39 #include "lpfc_sli4.h"
40 #include "lpfc_scsi.h"
42 #include "lpfc_logmsg.h"
43 #include "lpfc_crtn.h"
44 #include "lpfc_vport.h"
45 #include "lpfc_debugfs.h"
47 /* AlpaArray for assignment of scsid for scan-down and bind_method */
48 static uint8_t lpfcAlpaArray
[] = {
49 0xEF, 0xE8, 0xE4, 0xE2, 0xE1, 0xE0, 0xDC, 0xDA, 0xD9, 0xD6,
50 0xD5, 0xD4, 0xD3, 0xD2, 0xD1, 0xCE, 0xCD, 0xCC, 0xCB, 0xCA,
51 0xC9, 0xC7, 0xC6, 0xC5, 0xC3, 0xBC, 0xBA, 0xB9, 0xB6, 0xB5,
52 0xB4, 0xB3, 0xB2, 0xB1, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9,
53 0xA7, 0xA6, 0xA5, 0xA3, 0x9F, 0x9E, 0x9D, 0x9B, 0x98, 0x97,
54 0x90, 0x8F, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7C, 0x7A, 0x79,
55 0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6E, 0x6D, 0x6C, 0x6B,
56 0x6A, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5C, 0x5A, 0x59, 0x56,
57 0x55, 0x54, 0x53, 0x52, 0x51, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A,
58 0x49, 0x47, 0x46, 0x45, 0x43, 0x3C, 0x3A, 0x39, 0x36, 0x35,
59 0x34, 0x33, 0x32, 0x31, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29,
60 0x27, 0x26, 0x25, 0x23, 0x1F, 0x1E, 0x1D, 0x1B, 0x18, 0x17,
61 0x10, 0x0F, 0x08, 0x04, 0x02, 0x01
64 static void lpfc_disc_timeout_handler(struct lpfc_vport
*);
65 static void lpfc_disc_flush_list(struct lpfc_vport
*vport
);
66 static void lpfc_unregister_fcfi_cmpl(struct lpfc_hba
*, LPFC_MBOXQ_t
*);
67 static int lpfc_fcf_inuse(struct lpfc_hba
*);
70 lpfc_terminate_rport_io(struct fc_rport
*rport
)
72 struct lpfc_rport_data
*rdata
;
73 struct lpfc_nodelist
* ndlp
;
74 struct lpfc_hba
*phba
;
76 rdata
= rport
->dd_data
;
79 if (!ndlp
|| !NLP_CHK_NODE_ACT(ndlp
)) {
80 if (rport
->roles
& FC_RPORT_ROLE_FCP_TARGET
)
81 printk(KERN_ERR
"Cannot find remote node"
82 " to terminate I/O Data x%x\n",
89 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_RPORT
,
90 "rport terminate: sid:x%x did:x%x flg:x%x",
91 ndlp
->nlp_sid
, ndlp
->nlp_DID
, ndlp
->nlp_flag
);
93 if (ndlp
->nlp_sid
!= NLP_NO_SID
) {
94 lpfc_sli_abort_iocb(ndlp
->vport
,
95 &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
96 ndlp
->nlp_sid
, 0, LPFC_CTX_TGT
);
101 * This function will be called when dev_loss_tmo fire.
104 lpfc_dev_loss_tmo_callbk(struct fc_rport
*rport
)
106 struct lpfc_rport_data
*rdata
;
107 struct lpfc_nodelist
* ndlp
;
108 struct lpfc_vport
*vport
;
109 struct lpfc_hba
*phba
;
110 struct lpfc_work_evt
*evtp
;
114 rdata
= rport
->dd_data
;
116 if (!ndlp
|| !NLP_CHK_NODE_ACT(ndlp
))
122 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_RPORT
,
123 "rport devlosscb: sid:x%x did:x%x flg:x%x",
124 ndlp
->nlp_sid
, ndlp
->nlp_DID
, ndlp
->nlp_flag
);
126 /* Don't defer this if we are in the process of deleting the vport
127 * or unloading the driver. The unload will cleanup the node
128 * appropriately we just need to cleanup the ndlp rport info here.
130 if (vport
->load_flag
& FC_UNLOADING
) {
131 put_node
= rdata
->pnode
!= NULL
;
132 put_rport
= ndlp
->rport
!= NULL
;
138 put_device(&rport
->dev
);
142 if (ndlp
->nlp_state
== NLP_STE_MAPPED_NODE
)
145 evtp
= &ndlp
->dev_loss_evt
;
147 if (!list_empty(&evtp
->evt_listp
))
150 spin_lock_irq(&phba
->hbalock
);
151 /* We need to hold the node by incrementing the reference
152 * count until this queued work is done
154 evtp
->evt_arg1
= lpfc_nlp_get(ndlp
);
155 if (evtp
->evt_arg1
) {
156 evtp
->evt
= LPFC_EVT_DEV_LOSS
;
157 list_add_tail(&evtp
->evt_listp
, &phba
->work_list
);
158 lpfc_worker_wake_up(phba
);
160 spin_unlock_irq(&phba
->hbalock
);
166 * lpfc_dev_loss_tmo_handler - Remote node devloss timeout handler
167 * @ndlp: Pointer to remote node object.
169 * This function is called from the worker thread when devloss timeout timer
170 * expires. For SLI4 host, this routine shall return 1 when at lease one
171 * remote node, including this @ndlp, is still in use of FCF; otherwise, this
172 * routine shall return 0 when there is no remote node is still in use of FCF
173 * when devloss timeout happened to this @ndlp.
176 lpfc_dev_loss_tmo_handler(struct lpfc_nodelist
*ndlp
)
178 struct lpfc_rport_data
*rdata
;
179 struct fc_rport
*rport
;
180 struct lpfc_vport
*vport
;
181 struct lpfc_hba
*phba
;
193 rdata
= rport
->dd_data
;
194 name
= (uint8_t *) &ndlp
->nlp_portname
;
198 if (phba
->sli_rev
== LPFC_SLI_REV4
)
199 fcf_inuse
= lpfc_fcf_inuse(phba
);
201 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_RPORT
,
202 "rport devlosstmo:did:x%x type:x%x id:x%x",
203 ndlp
->nlp_DID
, ndlp
->nlp_type
, rport
->scsi_target_id
);
205 /* Don't defer this if we are in the process of deleting the vport
206 * or unloading the driver. The unload will cleanup the node
207 * appropriately we just need to cleanup the ndlp rport info here.
209 if (vport
->load_flag
& FC_UNLOADING
) {
210 if (ndlp
->nlp_sid
!= NLP_NO_SID
) {
211 /* flush the target */
212 lpfc_sli_abort_iocb(vport
,
213 &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
214 ndlp
->nlp_sid
, 0, LPFC_CTX_TGT
);
216 put_node
= rdata
->pnode
!= NULL
;
217 put_rport
= ndlp
->rport
!= NULL
;
223 put_device(&rport
->dev
);
227 if (ndlp
->nlp_state
== NLP_STE_MAPPED_NODE
) {
228 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
229 "0284 Devloss timeout Ignored on "
230 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
232 *name
, *(name
+1), *(name
+2), *(name
+3),
233 *(name
+4), *(name
+5), *(name
+6), *(name
+7),
238 if (ndlp
->nlp_type
& NLP_FABRIC
) {
239 /* We will clean up these Nodes in linkup */
240 put_node
= rdata
->pnode
!= NULL
;
241 put_rport
= ndlp
->rport
!= NULL
;
247 put_device(&rport
->dev
);
251 if (ndlp
->nlp_sid
!= NLP_NO_SID
) {
253 /* flush the target */
254 lpfc_sli_abort_iocb(vport
, &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
255 ndlp
->nlp_sid
, 0, LPFC_CTX_TGT
);
259 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
260 "0203 Devloss timeout on "
261 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
262 "NPort x%06x Data: x%x x%x x%x\n",
263 *name
, *(name
+1), *(name
+2), *(name
+3),
264 *(name
+4), *(name
+5), *(name
+6), *(name
+7),
265 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
266 ndlp
->nlp_state
, ndlp
->nlp_rpi
);
268 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
269 "0204 Devloss timeout on "
270 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
271 "NPort x%06x Data: x%x x%x x%x\n",
272 *name
, *(name
+1), *(name
+2), *(name
+3),
273 *(name
+4), *(name
+5), *(name
+6), *(name
+7),
274 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
275 ndlp
->nlp_state
, ndlp
->nlp_rpi
);
278 put_node
= rdata
->pnode
!= NULL
;
279 put_rport
= ndlp
->rport
!= NULL
;
285 put_device(&rport
->dev
);
287 if (!(vport
->load_flag
& FC_UNLOADING
) &&
288 !(ndlp
->nlp_flag
& NLP_DELAY_TMO
) &&
289 !(ndlp
->nlp_flag
& NLP_NPR_2B_DISC
) &&
290 (ndlp
->nlp_state
!= NLP_STE_UNMAPPED_NODE
) &&
291 (ndlp
->nlp_state
!= NLP_STE_REG_LOGIN_ISSUE
) &&
292 (ndlp
->nlp_state
!= NLP_STE_PRLI_ISSUE
))
293 lpfc_disc_state_machine(vport
, ndlp
, NULL
, NLP_EVT_DEVICE_RM
);
299 * lpfc_sli4_post_dev_loss_tmo_handler - SLI4 post devloss timeout handler
300 * @phba: Pointer to hba context object.
301 * @fcf_inuse: SLI4 FCF in-use state reported from devloss timeout handler.
302 * @nlp_did: remote node identifer with devloss timeout.
304 * This function is called from the worker thread after invoking devloss
305 * timeout handler and releasing the reference count for the ndlp with
306 * which the devloss timeout was handled for SLI4 host. For the devloss
307 * timeout of the last remote node which had been in use of FCF, when this
308 * routine is invoked, it shall be guaranteed that none of the remote are
309 * in-use of FCF. When devloss timeout to the last remote using the FCF,
310 * if the FIP engine is neither in FCF table scan process nor roundrobin
311 * failover process, the in-use FCF shall be unregistered. If the FIP
312 * engine is in FCF discovery process, the devloss timeout state shall
313 * be set for either the FCF table scan process or roundrobin failover
314 * process to unregister the in-use FCF.
317 lpfc_sli4_post_dev_loss_tmo_handler(struct lpfc_hba
*phba
, int fcf_inuse
,
320 /* If devloss timeout happened to a remote node when FCF had no
321 * longer been in-use, do nothing.
326 if ((phba
->hba_flag
& HBA_FIP_SUPPORT
) && !lpfc_fcf_inuse(phba
)) {
327 spin_lock_irq(&phba
->hbalock
);
328 if (phba
->fcf
.fcf_flag
& FCF_DISCOVERY
) {
329 if (phba
->hba_flag
& HBA_DEVLOSS_TMO
) {
330 spin_unlock_irq(&phba
->hbalock
);
333 phba
->hba_flag
|= HBA_DEVLOSS_TMO
;
334 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
335 "2847 Last remote node (x%x) using "
336 "FCF devloss tmo\n", nlp_did
);
338 if (phba
->fcf
.fcf_flag
& FCF_REDISC_PROG
) {
339 spin_unlock_irq(&phba
->hbalock
);
340 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
341 "2868 Devloss tmo to FCF rediscovery "
345 if (!(phba
->hba_flag
& (FCF_TS_INPROG
| FCF_RR_INPROG
))) {
346 spin_unlock_irq(&phba
->hbalock
);
347 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
348 "2869 Devloss tmo to idle FIP engine, "
349 "unreg in-use FCF and rescan.\n");
350 /* Unregister in-use FCF and rescan */
351 lpfc_unregister_fcf_rescan(phba
);
354 spin_unlock_irq(&phba
->hbalock
);
355 if (phba
->hba_flag
& FCF_TS_INPROG
)
356 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
357 "2870 FCF table scan in progress\n");
358 if (phba
->hba_flag
& FCF_RR_INPROG
)
359 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
360 "2871 FLOGI roundrobin FCF failover "
363 lpfc_unregister_unused_fcf(phba
);
367 * lpfc_alloc_fast_evt - Allocates data structure for posting event
368 * @phba: Pointer to hba context object.
370 * This function is called from the functions which need to post
371 * events from interrupt context. This function allocates data
372 * structure required for posting event. It also keeps track of
373 * number of events pending and prevent event storm when there are
376 struct lpfc_fast_path_event
*
377 lpfc_alloc_fast_evt(struct lpfc_hba
*phba
) {
378 struct lpfc_fast_path_event
*ret
;
380 /* If there are lot of fast event do not exhaust memory due to this */
381 if (atomic_read(&phba
->fast_event_count
) > LPFC_MAX_EVT_COUNT
)
384 ret
= kzalloc(sizeof(struct lpfc_fast_path_event
),
387 atomic_inc(&phba
->fast_event_count
);
388 INIT_LIST_HEAD(&ret
->work_evt
.evt_listp
);
389 ret
->work_evt
.evt
= LPFC_EVT_FASTPATH_MGMT_EVT
;
395 * lpfc_free_fast_evt - Frees event data structure
396 * @phba: Pointer to hba context object.
397 * @evt: Event object which need to be freed.
399 * This function frees the data structure required for posting
403 lpfc_free_fast_evt(struct lpfc_hba
*phba
,
404 struct lpfc_fast_path_event
*evt
) {
406 atomic_dec(&phba
->fast_event_count
);
411 * lpfc_send_fastpath_evt - Posts events generated from fast path
412 * @phba: Pointer to hba context object.
413 * @evtp: Event data structure.
415 * This function is called from worker thread, when the interrupt
416 * context need to post an event. This function posts the event
417 * to fc transport netlink interface.
420 lpfc_send_fastpath_evt(struct lpfc_hba
*phba
,
421 struct lpfc_work_evt
*evtp
)
423 unsigned long evt_category
, evt_sub_category
;
424 struct lpfc_fast_path_event
*fast_evt_data
;
426 uint32_t evt_data_size
;
427 struct Scsi_Host
*shost
;
429 fast_evt_data
= container_of(evtp
, struct lpfc_fast_path_event
,
432 evt_category
= (unsigned long) fast_evt_data
->un
.fabric_evt
.event_type
;
433 evt_sub_category
= (unsigned long) fast_evt_data
->un
.
434 fabric_evt
.subcategory
;
435 shost
= lpfc_shost_from_vport(fast_evt_data
->vport
);
436 if (evt_category
== FC_REG_FABRIC_EVENT
) {
437 if (evt_sub_category
== LPFC_EVENT_FCPRDCHKERR
) {
438 evt_data
= (char *) &fast_evt_data
->un
.read_check_error
;
439 evt_data_size
= sizeof(fast_evt_data
->un
.
441 } else if ((evt_sub_category
== LPFC_EVENT_FABRIC_BUSY
) ||
442 (evt_sub_category
== LPFC_EVENT_PORT_BUSY
)) {
443 evt_data
= (char *) &fast_evt_data
->un
.fabric_evt
;
444 evt_data_size
= sizeof(fast_evt_data
->un
.fabric_evt
);
446 lpfc_free_fast_evt(phba
, fast_evt_data
);
449 } else if (evt_category
== FC_REG_SCSI_EVENT
) {
450 switch (evt_sub_category
) {
451 case LPFC_EVENT_QFULL
:
452 case LPFC_EVENT_DEVBSY
:
453 evt_data
= (char *) &fast_evt_data
->un
.scsi_evt
;
454 evt_data_size
= sizeof(fast_evt_data
->un
.scsi_evt
);
456 case LPFC_EVENT_CHECK_COND
:
457 evt_data
= (char *) &fast_evt_data
->un
.check_cond_evt
;
458 evt_data_size
= sizeof(fast_evt_data
->un
.
461 case LPFC_EVENT_VARQUEDEPTH
:
462 evt_data
= (char *) &fast_evt_data
->un
.queue_depth_evt
;
463 evt_data_size
= sizeof(fast_evt_data
->un
.
467 lpfc_free_fast_evt(phba
, fast_evt_data
);
471 lpfc_free_fast_evt(phba
, fast_evt_data
);
475 fc_host_post_vendor_event(shost
,
476 fc_get_event_number(),
481 lpfc_free_fast_evt(phba
, fast_evt_data
);
486 lpfc_work_list_done(struct lpfc_hba
*phba
)
488 struct lpfc_work_evt
*evtp
= NULL
;
489 struct lpfc_nodelist
*ndlp
;
494 spin_lock_irq(&phba
->hbalock
);
495 while (!list_empty(&phba
->work_list
)) {
496 list_remove_head((&phba
->work_list
), evtp
, typeof(*evtp
),
498 spin_unlock_irq(&phba
->hbalock
);
501 case LPFC_EVT_ELS_RETRY
:
502 ndlp
= (struct lpfc_nodelist
*) (evtp
->evt_arg1
);
503 lpfc_els_retry_delay_handler(ndlp
);
504 free_evt
= 0; /* evt is part of ndlp */
505 /* decrement the node reference count held
506 * for this queued work
510 case LPFC_EVT_DEV_LOSS
:
511 ndlp
= (struct lpfc_nodelist
*)(evtp
->evt_arg1
);
512 fcf_inuse
= lpfc_dev_loss_tmo_handler(ndlp
);
514 /* decrement the node reference count held for
517 nlp_did
= ndlp
->nlp_DID
;
519 if (phba
->sli_rev
== LPFC_SLI_REV4
)
520 lpfc_sli4_post_dev_loss_tmo_handler(phba
,
524 case LPFC_EVT_ONLINE
:
525 if (phba
->link_state
< LPFC_LINK_DOWN
)
526 *(int *) (evtp
->evt_arg1
) = lpfc_online(phba
);
528 *(int *) (evtp
->evt_arg1
) = 0;
529 complete((struct completion
*)(evtp
->evt_arg2
));
531 case LPFC_EVT_OFFLINE_PREP
:
532 if (phba
->link_state
>= LPFC_LINK_DOWN
)
533 lpfc_offline_prep(phba
);
534 *(int *)(evtp
->evt_arg1
) = 0;
535 complete((struct completion
*)(evtp
->evt_arg2
));
537 case LPFC_EVT_OFFLINE
:
539 lpfc_sli_brdrestart(phba
);
540 *(int *)(evtp
->evt_arg1
) =
541 lpfc_sli_brdready(phba
, HS_FFRDY
| HS_MBRDY
);
542 lpfc_unblock_mgmt_io(phba
);
543 complete((struct completion
*)(evtp
->evt_arg2
));
545 case LPFC_EVT_WARM_START
:
547 lpfc_reset_barrier(phba
);
548 lpfc_sli_brdreset(phba
);
549 lpfc_hba_down_post(phba
);
550 *(int *)(evtp
->evt_arg1
) =
551 lpfc_sli_brdready(phba
, HS_MBRDY
);
552 lpfc_unblock_mgmt_io(phba
);
553 complete((struct completion
*)(evtp
->evt_arg2
));
557 *(int *)(evtp
->evt_arg1
)
558 = (phba
->pport
->stopped
)
559 ? 0 : lpfc_sli_brdkill(phba
);
560 lpfc_unblock_mgmt_io(phba
);
561 complete((struct completion
*)(evtp
->evt_arg2
));
563 case LPFC_EVT_FASTPATH_MGMT_EVT
:
564 lpfc_send_fastpath_evt(phba
, evtp
);
567 case LPFC_EVT_RESET_HBA
:
568 if (!(phba
->pport
->load_flag
& FC_UNLOADING
))
569 lpfc_reset_hba(phba
);
574 spin_lock_irq(&phba
->hbalock
);
576 spin_unlock_irq(&phba
->hbalock
);
581 lpfc_work_done(struct lpfc_hba
*phba
)
583 struct lpfc_sli_ring
*pring
;
584 uint32_t ha_copy
, status
, control
, work_port_events
;
585 struct lpfc_vport
**vports
;
586 struct lpfc_vport
*vport
;
589 spin_lock_irq(&phba
->hbalock
);
590 ha_copy
= phba
->work_ha
;
592 spin_unlock_irq(&phba
->hbalock
);
594 /* First, try to post the next mailbox command to SLI4 device */
595 if (phba
->pci_dev_grp
== LPFC_PCI_DEV_OC
)
596 lpfc_sli4_post_async_mbox(phba
);
598 if (ha_copy
& HA_ERATT
)
599 /* Handle the error attention event */
600 lpfc_handle_eratt(phba
);
602 if (ha_copy
& HA_MBATT
)
603 lpfc_sli_handle_mb_event(phba
);
605 if (ha_copy
& HA_LATT
)
606 lpfc_handle_latt(phba
);
608 /* Process SLI4 events */
609 if (phba
->pci_dev_grp
== LPFC_PCI_DEV_OC
) {
610 if (phba
->hba_flag
& FCP_XRI_ABORT_EVENT
)
611 lpfc_sli4_fcp_xri_abort_event_proc(phba
);
612 if (phba
->hba_flag
& ELS_XRI_ABORT_EVENT
)
613 lpfc_sli4_els_xri_abort_event_proc(phba
);
614 if (phba
->hba_flag
& ASYNC_EVENT
)
615 lpfc_sli4_async_event_proc(phba
);
616 if (phba
->hba_flag
& HBA_POST_RECEIVE_BUFFER
) {
617 spin_lock_irq(&phba
->hbalock
);
618 phba
->hba_flag
&= ~HBA_POST_RECEIVE_BUFFER
;
619 spin_unlock_irq(&phba
->hbalock
);
620 lpfc_sli_hbqbuf_add_hbqs(phba
, LPFC_ELS_HBQ
);
622 if (phba
->fcf
.fcf_flag
& FCF_REDISC_EVT
)
623 lpfc_sli4_fcf_redisc_event_proc(phba
);
626 vports
= lpfc_create_vport_work_array(phba
);
628 for (i
= 0; i
<= phba
->max_vports
; i
++) {
630 * We could have no vports in array if unloading, so if
631 * this happens then just use the pport
633 if (vports
[i
] == NULL
&& i
== 0)
639 spin_lock_irq(&vport
->work_port_lock
);
640 work_port_events
= vport
->work_port_events
;
641 vport
->work_port_events
&= ~work_port_events
;
642 spin_unlock_irq(&vport
->work_port_lock
);
643 if (work_port_events
& WORKER_DISC_TMO
)
644 lpfc_disc_timeout_handler(vport
);
645 if (work_port_events
& WORKER_ELS_TMO
)
646 lpfc_els_timeout_handler(vport
);
647 if (work_port_events
& WORKER_HB_TMO
)
648 lpfc_hb_timeout_handler(phba
);
649 if (work_port_events
& WORKER_MBOX_TMO
)
650 lpfc_mbox_timeout_handler(phba
);
651 if (work_port_events
& WORKER_FABRIC_BLOCK_TMO
)
652 lpfc_unblock_fabric_iocbs(phba
);
653 if (work_port_events
& WORKER_FDMI_TMO
)
654 lpfc_fdmi_timeout_handler(vport
);
655 if (work_port_events
& WORKER_RAMP_DOWN_QUEUE
)
656 lpfc_ramp_down_queue_handler(phba
);
657 if (work_port_events
& WORKER_RAMP_UP_QUEUE
)
658 lpfc_ramp_up_queue_handler(phba
);
660 lpfc_destroy_vport_work_array(phba
, vports
);
662 pring
= &phba
->sli
.ring
[LPFC_ELS_RING
];
663 status
= (ha_copy
& (HA_RXMASK
<< (4*LPFC_ELS_RING
)));
664 status
>>= (4*LPFC_ELS_RING
);
665 if ((status
& HA_RXMASK
) ||
666 (pring
->flag
& LPFC_DEFERRED_RING_EVENT
) ||
667 (phba
->hba_flag
& HBA_SP_QUEUE_EVT
)) {
668 if (pring
->flag
& LPFC_STOP_IOCB_EVENT
) {
669 pring
->flag
|= LPFC_DEFERRED_RING_EVENT
;
670 /* Set the lpfc data pending flag */
671 set_bit(LPFC_DATA_READY
, &phba
->data_flags
);
673 pring
->flag
&= ~LPFC_DEFERRED_RING_EVENT
;
674 lpfc_sli_handle_slow_ring_event(phba
, pring
,
678 if ((phba
->sli_rev
== LPFC_SLI_REV4
) && pring
->txq_cnt
)
679 lpfc_drain_txq(phba
);
681 * Turn on Ring interrupts
683 if (phba
->sli_rev
<= LPFC_SLI_REV3
) {
684 spin_lock_irq(&phba
->hbalock
);
685 control
= readl(phba
->HCregaddr
);
686 if (!(control
& (HC_R0INT_ENA
<< LPFC_ELS_RING
))) {
687 lpfc_debugfs_slow_ring_trc(phba
,
688 "WRK Enable ring: cntl:x%x hacopy:x%x",
689 control
, ha_copy
, 0);
691 control
|= (HC_R0INT_ENA
<< LPFC_ELS_RING
);
692 writel(control
, phba
->HCregaddr
);
693 readl(phba
->HCregaddr
); /* flush */
695 lpfc_debugfs_slow_ring_trc(phba
,
696 "WRK Ring ok: cntl:x%x hacopy:x%x",
697 control
, ha_copy
, 0);
699 spin_unlock_irq(&phba
->hbalock
);
702 lpfc_work_list_done(phba
);
706 lpfc_do_work(void *p
)
708 struct lpfc_hba
*phba
= p
;
711 set_user_nice(current
, -20);
712 phba
->data_flags
= 0;
714 while (!kthread_should_stop()) {
715 /* wait and check worker queue activities */
716 rc
= wait_event_interruptible(phba
->work_waitq
,
717 (test_and_clear_bit(LPFC_DATA_READY
,
719 || kthread_should_stop()));
720 /* Signal wakeup shall terminate the worker thread */
722 lpfc_printf_log(phba
, KERN_ERR
, LOG_ELS
,
723 "0433 Wakeup on signal: rc=x%x\n", rc
);
727 /* Attend pending lpfc data processing */
728 lpfc_work_done(phba
);
730 phba
->worker_thread
= NULL
;
731 lpfc_printf_log(phba
, KERN_INFO
, LOG_ELS
,
732 "0432 Worker thread stopped.\n");
737 * This is only called to handle FC worker events. Since this a rare
738 * occurance, we allocate a struct lpfc_work_evt structure here instead of
739 * embedding it in the IOCB.
742 lpfc_workq_post_event(struct lpfc_hba
*phba
, void *arg1
, void *arg2
,
745 struct lpfc_work_evt
*evtp
;
749 * All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events will
750 * be queued to worker thread for processing
752 evtp
= kmalloc(sizeof(struct lpfc_work_evt
), GFP_ATOMIC
);
756 evtp
->evt_arg1
= arg1
;
757 evtp
->evt_arg2
= arg2
;
760 spin_lock_irqsave(&phba
->hbalock
, flags
);
761 list_add_tail(&evtp
->evt_listp
, &phba
->work_list
);
762 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
764 lpfc_worker_wake_up(phba
);
770 lpfc_cleanup_rpis(struct lpfc_vport
*vport
, int remove
)
772 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
773 struct lpfc_hba
*phba
= vport
->phba
;
774 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
777 list_for_each_entry_safe(ndlp
, next_ndlp
, &vport
->fc_nodes
, nlp_listp
) {
778 if (!NLP_CHK_NODE_ACT(ndlp
))
780 if (ndlp
->nlp_state
== NLP_STE_UNUSED_NODE
)
782 if ((phba
->sli3_options
& LPFC_SLI3_VPORT_TEARDOWN
) ||
783 ((vport
->port_type
== LPFC_NPIV_PORT
) &&
784 (ndlp
->nlp_DID
== NameServer_DID
)))
785 lpfc_unreg_rpi(vport
, ndlp
);
787 /* Leave Fabric nodes alone on link down */
788 if ((phba
->sli_rev
< LPFC_SLI_REV4
) &&
789 (!remove
&& ndlp
->nlp_type
& NLP_FABRIC
))
791 rc
= lpfc_disc_state_machine(vport
, ndlp
, NULL
,
794 : NLP_EVT_DEVICE_RECOVERY
);
796 if (phba
->sli3_options
& LPFC_SLI3_VPORT_TEARDOWN
) {
797 if (phba
->sli_rev
== LPFC_SLI_REV4
)
798 lpfc_sli4_unreg_all_rpis(vport
);
799 lpfc_mbx_unreg_vpi(vport
);
800 spin_lock_irq(shost
->host_lock
);
801 vport
->fc_flag
|= FC_VPORT_NEEDS_REG_VPI
;
802 spin_unlock_irq(shost
->host_lock
);
807 lpfc_port_link_failure(struct lpfc_vport
*vport
)
809 lpfc_vport_set_state(vport
, FC_VPORT_LINKDOWN
);
811 /* Cleanup any outstanding received buffers */
812 lpfc_cleanup_rcv_buffers(vport
);
814 /* Cleanup any outstanding RSCN activity */
815 lpfc_els_flush_rscn(vport
);
817 /* Cleanup any outstanding ELS commands */
818 lpfc_els_flush_cmd(vport
);
820 lpfc_cleanup_rpis(vport
, 0);
822 /* Turn off discovery timer if its running */
823 lpfc_can_disctmo(vport
);
827 lpfc_linkdown_port(struct lpfc_vport
*vport
)
829 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
831 fc_host_post_event(shost
, fc_get_event_number(), FCH_EVT_LINKDOWN
, 0);
833 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
834 "Link Down: state:x%x rtry:x%x flg:x%x",
835 vport
->port_state
, vport
->fc_ns_retry
, vport
->fc_flag
);
837 lpfc_port_link_failure(vport
);
842 lpfc_linkdown(struct lpfc_hba
*phba
)
844 struct lpfc_vport
*vport
= phba
->pport
;
845 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
846 struct lpfc_vport
**vports
;
850 if (phba
->link_state
== LPFC_LINK_DOWN
)
853 /* Block all SCSI stack I/Os */
854 lpfc_scsi_dev_block(phba
);
856 spin_lock_irq(&phba
->hbalock
);
857 phba
->fcf
.fcf_flag
&= ~(FCF_AVAILABLE
| FCF_SCAN_DONE
);
858 spin_unlock_irq(&phba
->hbalock
);
859 if (phba
->link_state
> LPFC_LINK_DOWN
) {
860 phba
->link_state
= LPFC_LINK_DOWN
;
861 spin_lock_irq(shost
->host_lock
);
862 phba
->pport
->fc_flag
&= ~FC_LBIT
;
863 spin_unlock_irq(shost
->host_lock
);
865 vports
= lpfc_create_vport_work_array(phba
);
867 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
868 /* Issue a LINK DOWN event to all nodes */
869 lpfc_linkdown_port(vports
[i
]);
871 lpfc_destroy_vport_work_array(phba
, vports
);
872 /* Clean up any firmware default rpi's */
873 mb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
875 lpfc_unreg_did(phba
, 0xffff, 0xffffffff, mb
);
877 mb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
878 if (lpfc_sli_issue_mbox(phba
, mb
, MBX_NOWAIT
)
879 == MBX_NOT_FINISHED
) {
880 mempool_free(mb
, phba
->mbox_mem_pool
);
884 /* Setup myDID for link up if we are in pt2pt mode */
885 if (phba
->pport
->fc_flag
& FC_PT2PT
) {
886 phba
->pport
->fc_myDID
= 0;
887 mb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
889 lpfc_config_link(phba
, mb
);
890 mb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
892 if (lpfc_sli_issue_mbox(phba
, mb
, MBX_NOWAIT
)
893 == MBX_NOT_FINISHED
) {
894 mempool_free(mb
, phba
->mbox_mem_pool
);
897 spin_lock_irq(shost
->host_lock
);
898 phba
->pport
->fc_flag
&= ~(FC_PT2PT
| FC_PT2PT_PLOGI
);
899 spin_unlock_irq(shost
->host_lock
);
906 lpfc_linkup_cleanup_nodes(struct lpfc_vport
*vport
)
908 struct lpfc_nodelist
*ndlp
;
910 list_for_each_entry(ndlp
, &vport
->fc_nodes
, nlp_listp
) {
911 if (!NLP_CHK_NODE_ACT(ndlp
))
913 if (ndlp
->nlp_state
== NLP_STE_UNUSED_NODE
)
915 if (ndlp
->nlp_type
& NLP_FABRIC
) {
916 /* On Linkup its safe to clean up the ndlp
917 * from Fabric connections.
919 if (ndlp
->nlp_DID
!= Fabric_DID
)
920 lpfc_unreg_rpi(vport
, ndlp
);
921 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_NPR_NODE
);
922 } else if (!(ndlp
->nlp_flag
& NLP_NPR_ADISC
)) {
923 /* Fail outstanding IO now since device is
926 lpfc_unreg_rpi(vport
, ndlp
);
932 lpfc_linkup_port(struct lpfc_vport
*vport
)
934 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
935 struct lpfc_hba
*phba
= vport
->phba
;
937 if ((vport
->load_flag
& FC_UNLOADING
) != 0)
940 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
941 "Link Up: top:x%x speed:x%x flg:x%x",
942 phba
->fc_topology
, phba
->fc_linkspeed
, phba
->link_flag
);
944 /* If NPIV is not enabled, only bring the physical port up */
945 if (!(phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
) &&
946 (vport
!= phba
->pport
))
949 fc_host_post_event(shost
, fc_get_event_number(), FCH_EVT_LINKUP
, 0);
951 spin_lock_irq(shost
->host_lock
);
952 vport
->fc_flag
&= ~(FC_PT2PT
| FC_PT2PT_PLOGI
| FC_ABORT_DISCOVERY
|
953 FC_RSCN_MODE
| FC_NLP_MORE
| FC_RSCN_DISCOVERY
);
954 vport
->fc_flag
|= FC_NDISC_ACTIVE
;
955 vport
->fc_ns_retry
= 0;
956 spin_unlock_irq(shost
->host_lock
);
958 if (vport
->fc_flag
& FC_LBIT
)
959 lpfc_linkup_cleanup_nodes(vport
);
964 lpfc_linkup(struct lpfc_hba
*phba
)
966 struct lpfc_vport
**vports
;
969 phba
->link_state
= LPFC_LINK_UP
;
971 /* Unblock fabric iocbs if they are blocked */
972 clear_bit(FABRIC_COMANDS_BLOCKED
, &phba
->bit_flags
);
973 del_timer_sync(&phba
->fabric_block_timer
);
975 vports
= lpfc_create_vport_work_array(phba
);
977 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++)
978 lpfc_linkup_port(vports
[i
]);
979 lpfc_destroy_vport_work_array(phba
, vports
);
980 if ((phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
) &&
981 (phba
->sli_rev
< LPFC_SLI_REV4
))
982 lpfc_issue_clear_la(phba
, phba
->pport
);
988 * This routine handles processing a CLEAR_LA mailbox
989 * command upon completion. It is setup in the LPFC_MBOXQ
990 * as the completion routine when the command is
991 * handed off to the SLI layer.
994 lpfc_mbx_cmpl_clear_la(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
996 struct lpfc_vport
*vport
= pmb
->vport
;
997 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
998 struct lpfc_sli
*psli
= &phba
->sli
;
999 MAILBOX_t
*mb
= &pmb
->u
.mb
;
1002 /* Since we don't do discovery right now, turn these off here */
1003 psli
->ring
[psli
->extra_ring
].flag
&= ~LPFC_STOP_IOCB_EVENT
;
1004 psli
->ring
[psli
->fcp_ring
].flag
&= ~LPFC_STOP_IOCB_EVENT
;
1005 psli
->ring
[psli
->next_ring
].flag
&= ~LPFC_STOP_IOCB_EVENT
;
1007 /* Check for error */
1008 if ((mb
->mbxStatus
) && (mb
->mbxStatus
!= 0x1601)) {
1009 /* CLEAR_LA mbox error <mbxStatus> state <hba_state> */
1010 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
1011 "0320 CLEAR_LA mbxStatus error x%x hba "
1013 mb
->mbxStatus
, vport
->port_state
);
1014 phba
->link_state
= LPFC_HBA_ERROR
;
1018 if (vport
->port_type
== LPFC_PHYSICAL_PORT
)
1019 phba
->link_state
= LPFC_HBA_READY
;
1021 spin_lock_irq(&phba
->hbalock
);
1022 psli
->sli_flag
|= LPFC_PROCESS_LA
;
1023 control
= readl(phba
->HCregaddr
);
1024 control
|= HC_LAINT_ENA
;
1025 writel(control
, phba
->HCregaddr
);
1026 readl(phba
->HCregaddr
); /* flush */
1027 spin_unlock_irq(&phba
->hbalock
);
1028 mempool_free(pmb
, phba
->mbox_mem_pool
);
1032 /* Device Discovery completes */
1033 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
1034 "0225 Device Discovery completes\n");
1035 mempool_free(pmb
, phba
->mbox_mem_pool
);
1037 spin_lock_irq(shost
->host_lock
);
1038 vport
->fc_flag
&= ~FC_ABORT_DISCOVERY
;
1039 spin_unlock_irq(shost
->host_lock
);
1041 lpfc_can_disctmo(vport
);
1043 /* turn on Link Attention interrupts */
1045 spin_lock_irq(&phba
->hbalock
);
1046 psli
->sli_flag
|= LPFC_PROCESS_LA
;
1047 control
= readl(phba
->HCregaddr
);
1048 control
|= HC_LAINT_ENA
;
1049 writel(control
, phba
->HCregaddr
);
1050 readl(phba
->HCregaddr
); /* flush */
1051 spin_unlock_irq(&phba
->hbalock
);
1058 lpfc_mbx_cmpl_local_config_link(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
1060 struct lpfc_vport
*vport
= pmb
->vport
;
1062 if (pmb
->u
.mb
.mbxStatus
)
1065 mempool_free(pmb
, phba
->mbox_mem_pool
);
1067 if (phba
->fc_topology
== TOPOLOGY_LOOP
&&
1068 vport
->fc_flag
& FC_PUBLIC_LOOP
&&
1069 !(vport
->fc_flag
& FC_LBIT
)) {
1070 /* Need to wait for FAN - use discovery timer
1071 * for timeout. port_state is identically
1072 * LPFC_LOCAL_CFG_LINK while waiting for FAN
1074 lpfc_set_disctmo(vport
);
1078 /* Start discovery by sending a FLOGI. port_state is identically
1079 * LPFC_FLOGI while waiting for FLOGI cmpl
1081 if (vport
->port_state
!= LPFC_FLOGI
) {
1082 lpfc_initial_flogi(vport
);
1087 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
1088 "0306 CONFIG_LINK mbxStatus error x%x "
1090 pmb
->u
.mb
.mbxStatus
, vport
->port_state
);
1091 mempool_free(pmb
, phba
->mbox_mem_pool
);
1093 lpfc_linkdown(phba
);
1095 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
1096 "0200 CONFIG_LINK bad hba state x%x\n",
1099 lpfc_issue_clear_la(phba
, vport
);
1104 lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
1106 struct lpfc_vport
*vport
= mboxq
->vport
;
1108 if (mboxq
->u
.mb
.mbxStatus
) {
1109 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
1110 "2017 REG_FCFI mbxStatus error x%x "
1112 mboxq
->u
.mb
.mbxStatus
, vport
->port_state
);
1116 /* Start FCoE discovery by sending a FLOGI. */
1117 phba
->fcf
.fcfi
= bf_get(lpfc_reg_fcfi_fcfi
, &mboxq
->u
.mqe
.un
.reg_fcfi
);
1118 /* Set the FCFI registered flag */
1119 spin_lock_irq(&phba
->hbalock
);
1120 phba
->fcf
.fcf_flag
|= FCF_REGISTERED
;
1121 spin_unlock_irq(&phba
->hbalock
);
1123 /* If there is a pending FCoE event, restart FCF table scan. */
1124 if (lpfc_check_pending_fcoe_event(phba
, LPFC_UNREG_FCF
))
1127 /* Mark successful completion of FCF table scan */
1128 spin_lock_irq(&phba
->hbalock
);
1129 phba
->fcf
.fcf_flag
|= (FCF_SCAN_DONE
| FCF_IN_USE
);
1130 phba
->hba_flag
&= ~FCF_TS_INPROG
;
1131 if (vport
->port_state
!= LPFC_FLOGI
) {
1132 phba
->hba_flag
|= FCF_RR_INPROG
;
1133 spin_unlock_irq(&phba
->hbalock
);
1134 lpfc_initial_flogi(vport
);
1137 spin_unlock_irq(&phba
->hbalock
);
1141 spin_lock_irq(&phba
->hbalock
);
1142 phba
->hba_flag
&= ~FCF_RR_INPROG
;
1143 spin_unlock_irq(&phba
->hbalock
);
1145 mempool_free(mboxq
, phba
->mbox_mem_pool
);
1149 * lpfc_fab_name_match - Check if the fcf fabric name match.
1150 * @fab_name: pointer to fabric name.
1151 * @new_fcf_record: pointer to fcf record.
1153 * This routine compare the fcf record's fabric name with provided
1154 * fabric name. If the fabric name are identical this function
1155 * returns 1 else return 0.
1158 lpfc_fab_name_match(uint8_t *fab_name
, struct fcf_record
*new_fcf_record
)
1160 if (fab_name
[0] != bf_get(lpfc_fcf_record_fab_name_0
, new_fcf_record
))
1162 if (fab_name
[1] != bf_get(lpfc_fcf_record_fab_name_1
, new_fcf_record
))
1164 if (fab_name
[2] != bf_get(lpfc_fcf_record_fab_name_2
, new_fcf_record
))
1166 if (fab_name
[3] != bf_get(lpfc_fcf_record_fab_name_3
, new_fcf_record
))
1168 if (fab_name
[4] != bf_get(lpfc_fcf_record_fab_name_4
, new_fcf_record
))
1170 if (fab_name
[5] != bf_get(lpfc_fcf_record_fab_name_5
, new_fcf_record
))
1172 if (fab_name
[6] != bf_get(lpfc_fcf_record_fab_name_6
, new_fcf_record
))
1174 if (fab_name
[7] != bf_get(lpfc_fcf_record_fab_name_7
, new_fcf_record
))
1180 * lpfc_sw_name_match - Check if the fcf switch name match.
1181 * @fab_name: pointer to fabric name.
1182 * @new_fcf_record: pointer to fcf record.
1184 * This routine compare the fcf record's switch name with provided
1185 * switch name. If the switch name are identical this function
1186 * returns 1 else return 0.
1189 lpfc_sw_name_match(uint8_t *sw_name
, struct fcf_record
*new_fcf_record
)
1191 if (sw_name
[0] != bf_get(lpfc_fcf_record_switch_name_0
, new_fcf_record
))
1193 if (sw_name
[1] != bf_get(lpfc_fcf_record_switch_name_1
, new_fcf_record
))
1195 if (sw_name
[2] != bf_get(lpfc_fcf_record_switch_name_2
, new_fcf_record
))
1197 if (sw_name
[3] != bf_get(lpfc_fcf_record_switch_name_3
, new_fcf_record
))
1199 if (sw_name
[4] != bf_get(lpfc_fcf_record_switch_name_4
, new_fcf_record
))
1201 if (sw_name
[5] != bf_get(lpfc_fcf_record_switch_name_5
, new_fcf_record
))
1203 if (sw_name
[6] != bf_get(lpfc_fcf_record_switch_name_6
, new_fcf_record
))
1205 if (sw_name
[7] != bf_get(lpfc_fcf_record_switch_name_7
, new_fcf_record
))
1211 * lpfc_mac_addr_match - Check if the fcf mac address match.
1212 * @mac_addr: pointer to mac address.
1213 * @new_fcf_record: pointer to fcf record.
1215 * This routine compare the fcf record's mac address with HBA's
1216 * FCF mac address. If the mac addresses are identical this function
1217 * returns 1 else return 0.
1220 lpfc_mac_addr_match(uint8_t *mac_addr
, struct fcf_record
*new_fcf_record
)
1222 if (mac_addr
[0] != bf_get(lpfc_fcf_record_mac_0
, new_fcf_record
))
1224 if (mac_addr
[1] != bf_get(lpfc_fcf_record_mac_1
, new_fcf_record
))
1226 if (mac_addr
[2] != bf_get(lpfc_fcf_record_mac_2
, new_fcf_record
))
1228 if (mac_addr
[3] != bf_get(lpfc_fcf_record_mac_3
, new_fcf_record
))
1230 if (mac_addr
[4] != bf_get(lpfc_fcf_record_mac_4
, new_fcf_record
))
1232 if (mac_addr
[5] != bf_get(lpfc_fcf_record_mac_5
, new_fcf_record
))
1238 lpfc_vlan_id_match(uint16_t curr_vlan_id
, uint16_t new_vlan_id
)
1240 return (curr_vlan_id
== new_vlan_id
);
1244 * lpfc_copy_fcf_record - Copy fcf information to lpfc_hba.
1245 * @fcf: pointer to driver fcf record.
1246 * @new_fcf_record: pointer to fcf record.
1248 * This routine copies the FCF information from the FCF
1249 * record to lpfc_hba data structure.
1252 lpfc_copy_fcf_record(struct lpfc_fcf_rec
*fcf_rec
,
1253 struct fcf_record
*new_fcf_record
)
1256 fcf_rec
->fabric_name
[0] =
1257 bf_get(lpfc_fcf_record_fab_name_0
, new_fcf_record
);
1258 fcf_rec
->fabric_name
[1] =
1259 bf_get(lpfc_fcf_record_fab_name_1
, new_fcf_record
);
1260 fcf_rec
->fabric_name
[2] =
1261 bf_get(lpfc_fcf_record_fab_name_2
, new_fcf_record
);
1262 fcf_rec
->fabric_name
[3] =
1263 bf_get(lpfc_fcf_record_fab_name_3
, new_fcf_record
);
1264 fcf_rec
->fabric_name
[4] =
1265 bf_get(lpfc_fcf_record_fab_name_4
, new_fcf_record
);
1266 fcf_rec
->fabric_name
[5] =
1267 bf_get(lpfc_fcf_record_fab_name_5
, new_fcf_record
);
1268 fcf_rec
->fabric_name
[6] =
1269 bf_get(lpfc_fcf_record_fab_name_6
, new_fcf_record
);
1270 fcf_rec
->fabric_name
[7] =
1271 bf_get(lpfc_fcf_record_fab_name_7
, new_fcf_record
);
1273 fcf_rec
->mac_addr
[0] = bf_get(lpfc_fcf_record_mac_0
, new_fcf_record
);
1274 fcf_rec
->mac_addr
[1] = bf_get(lpfc_fcf_record_mac_1
, new_fcf_record
);
1275 fcf_rec
->mac_addr
[2] = bf_get(lpfc_fcf_record_mac_2
, new_fcf_record
);
1276 fcf_rec
->mac_addr
[3] = bf_get(lpfc_fcf_record_mac_3
, new_fcf_record
);
1277 fcf_rec
->mac_addr
[4] = bf_get(lpfc_fcf_record_mac_4
, new_fcf_record
);
1278 fcf_rec
->mac_addr
[5] = bf_get(lpfc_fcf_record_mac_5
, new_fcf_record
);
1279 /* FCF record index */
1280 fcf_rec
->fcf_indx
= bf_get(lpfc_fcf_record_fcf_index
, new_fcf_record
);
1281 /* FCF record priority */
1282 fcf_rec
->priority
= new_fcf_record
->fip_priority
;
1284 fcf_rec
->switch_name
[0] =
1285 bf_get(lpfc_fcf_record_switch_name_0
, new_fcf_record
);
1286 fcf_rec
->switch_name
[1] =
1287 bf_get(lpfc_fcf_record_switch_name_1
, new_fcf_record
);
1288 fcf_rec
->switch_name
[2] =
1289 bf_get(lpfc_fcf_record_switch_name_2
, new_fcf_record
);
1290 fcf_rec
->switch_name
[3] =
1291 bf_get(lpfc_fcf_record_switch_name_3
, new_fcf_record
);
1292 fcf_rec
->switch_name
[4] =
1293 bf_get(lpfc_fcf_record_switch_name_4
, new_fcf_record
);
1294 fcf_rec
->switch_name
[5] =
1295 bf_get(lpfc_fcf_record_switch_name_5
, new_fcf_record
);
1296 fcf_rec
->switch_name
[6] =
1297 bf_get(lpfc_fcf_record_switch_name_6
, new_fcf_record
);
1298 fcf_rec
->switch_name
[7] =
1299 bf_get(lpfc_fcf_record_switch_name_7
, new_fcf_record
);
1303 * lpfc_update_fcf_record - Update driver fcf record
1304 * @phba: pointer to lpfc hba data structure.
1305 * @fcf_rec: pointer to driver fcf record.
1306 * @new_fcf_record: pointer to hba fcf record.
1307 * @addr_mode: address mode to be set to the driver fcf record.
1308 * @vlan_id: vlan tag to be set to the driver fcf record.
1309 * @flag: flag bits to be set to the driver fcf record.
1311 * This routine updates the driver FCF record from the new HBA FCF record
1312 * together with the address mode, vlan_id, and other informations. This
1313 * routine is called with the host lock held.
1316 __lpfc_update_fcf_record(struct lpfc_hba
*phba
, struct lpfc_fcf_rec
*fcf_rec
,
1317 struct fcf_record
*new_fcf_record
, uint32_t addr_mode
,
1318 uint16_t vlan_id
, uint32_t flag
)
1320 /* Copy the fields from the HBA's FCF record */
1321 lpfc_copy_fcf_record(fcf_rec
, new_fcf_record
);
1322 /* Update other fields of driver FCF record */
1323 fcf_rec
->addr_mode
= addr_mode
;
1324 fcf_rec
->vlan_id
= vlan_id
;
1325 fcf_rec
->flag
|= (flag
| RECORD_VALID
);
1329 * lpfc_register_fcf - Register the FCF with hba.
1330 * @phba: pointer to lpfc hba data structure.
1332 * This routine issues a register fcfi mailbox command to register
1336 lpfc_register_fcf(struct lpfc_hba
*phba
)
1338 LPFC_MBOXQ_t
*fcf_mbxq
;
1341 spin_lock_irq(&phba
->hbalock
);
1342 /* If the FCF is not availabe do nothing. */
1343 if (!(phba
->fcf
.fcf_flag
& FCF_AVAILABLE
)) {
1344 phba
->hba_flag
&= ~(FCF_TS_INPROG
| FCF_RR_INPROG
);
1345 spin_unlock_irq(&phba
->hbalock
);
1349 /* The FCF is already registered, start discovery */
1350 if (phba
->fcf
.fcf_flag
& FCF_REGISTERED
) {
1351 phba
->fcf
.fcf_flag
|= (FCF_SCAN_DONE
| FCF_IN_USE
);
1352 phba
->hba_flag
&= ~FCF_TS_INPROG
;
1353 if (phba
->pport
->port_state
!= LPFC_FLOGI
) {
1354 phba
->hba_flag
|= FCF_RR_INPROG
;
1355 spin_unlock_irq(&phba
->hbalock
);
1356 lpfc_initial_flogi(phba
->pport
);
1359 spin_unlock_irq(&phba
->hbalock
);
1362 spin_unlock_irq(&phba
->hbalock
);
1364 fcf_mbxq
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
1366 spin_lock_irq(&phba
->hbalock
);
1367 phba
->hba_flag
&= ~(FCF_TS_INPROG
| FCF_RR_INPROG
);
1368 spin_unlock_irq(&phba
->hbalock
);
1372 lpfc_reg_fcfi(phba
, fcf_mbxq
);
1373 fcf_mbxq
->vport
= phba
->pport
;
1374 fcf_mbxq
->mbox_cmpl
= lpfc_mbx_cmpl_reg_fcfi
;
1375 rc
= lpfc_sli_issue_mbox(phba
, fcf_mbxq
, MBX_NOWAIT
);
1376 if (rc
== MBX_NOT_FINISHED
) {
1377 spin_lock_irq(&phba
->hbalock
);
1378 phba
->hba_flag
&= ~(FCF_TS_INPROG
| FCF_RR_INPROG
);
1379 spin_unlock_irq(&phba
->hbalock
);
1380 mempool_free(fcf_mbxq
, phba
->mbox_mem_pool
);
1387 * lpfc_match_fcf_conn_list - Check if the FCF record can be used for discovery.
1388 * @phba: pointer to lpfc hba data structure.
1389 * @new_fcf_record: pointer to fcf record.
1390 * @boot_flag: Indicates if this record used by boot bios.
1391 * @addr_mode: The address mode to be used by this FCF
1392 * @vlan_id: The vlan id to be used as vlan tagging by this FCF.
1394 * This routine compare the fcf record with connect list obtained from the
1395 * config region to decide if this FCF can be used for SAN discovery. It returns
1396 * 1 if this record can be used for SAN discovery else return zero. If this FCF
1397 * record can be used for SAN discovery, the boot_flag will indicate if this FCF
1398 * is used by boot bios and addr_mode will indicate the addressing mode to be
1399 * used for this FCF when the function returns.
1400 * If the FCF record need to be used with a particular vlan id, the vlan is
1401 * set in the vlan_id on return of the function. If not VLAN tagging need to
1402 * be used with the FCF vlan_id will be set to LPFC_FCOE_NULL_VID;
1405 lpfc_match_fcf_conn_list(struct lpfc_hba
*phba
,
1406 struct fcf_record
*new_fcf_record
,
1407 uint32_t *boot_flag
, uint32_t *addr_mode
,
1410 struct lpfc_fcf_conn_entry
*conn_entry
;
1411 int i
, j
, fcf_vlan_id
= 0;
1413 /* Find the lowest VLAN id in the FCF record */
1414 for (i
= 0; i
< 512; i
++) {
1415 if (new_fcf_record
->vlan_bitmap
[i
]) {
1416 fcf_vlan_id
= i
* 8;
1418 while (!((new_fcf_record
->vlan_bitmap
[i
] >> j
) & 1)) {
1426 /* If FCF not available return 0 */
1427 if (!bf_get(lpfc_fcf_record_fcf_avail
, new_fcf_record
) ||
1428 !bf_get(lpfc_fcf_record_fcf_valid
, new_fcf_record
))
1431 if (!(phba
->hba_flag
& HBA_FIP_SUPPORT
)) {
1433 *addr_mode
= bf_get(lpfc_fcf_record_mac_addr_prov
,
1435 if (phba
->valid_vlan
)
1436 *vlan_id
= phba
->vlan_id
;
1438 *vlan_id
= LPFC_FCOE_NULL_VID
;
1443 * If there are no FCF connection table entry, driver connect to all
1446 if (list_empty(&phba
->fcf_conn_rec_list
)) {
1448 *addr_mode
= bf_get(lpfc_fcf_record_mac_addr_prov
,
1452 * When there are no FCF connect entries, use driver's default
1453 * addressing mode - FPMA.
1455 if (*addr_mode
& LPFC_FCF_FPMA
)
1456 *addr_mode
= LPFC_FCF_FPMA
;
1458 /* If FCF record report a vlan id use that vlan id */
1460 *vlan_id
= fcf_vlan_id
;
1462 *vlan_id
= LPFC_FCOE_NULL_VID
;
1466 list_for_each_entry(conn_entry
,
1467 &phba
->fcf_conn_rec_list
, list
) {
1468 if (!(conn_entry
->conn_rec
.flags
& FCFCNCT_VALID
))
1471 if ((conn_entry
->conn_rec
.flags
& FCFCNCT_FBNM_VALID
) &&
1472 !lpfc_fab_name_match(conn_entry
->conn_rec
.fabric_name
,
1475 if ((conn_entry
->conn_rec
.flags
& FCFCNCT_SWNM_VALID
) &&
1476 !lpfc_sw_name_match(conn_entry
->conn_rec
.switch_name
,
1479 if (conn_entry
->conn_rec
.flags
& FCFCNCT_VLAN_VALID
) {
1481 * If the vlan bit map does not have the bit set for the
1482 * vlan id to be used, then it is not a match.
1484 if (!(new_fcf_record
->vlan_bitmap
1485 [conn_entry
->conn_rec
.vlan_tag
/ 8] &
1486 (1 << (conn_entry
->conn_rec
.vlan_tag
% 8))))
1491 * If connection record does not support any addressing mode,
1492 * skip the FCF record.
1494 if (!(bf_get(lpfc_fcf_record_mac_addr_prov
, new_fcf_record
)
1495 & (LPFC_FCF_FPMA
| LPFC_FCF_SPMA
)))
1499 * Check if the connection record specifies a required
1502 if ((conn_entry
->conn_rec
.flags
& FCFCNCT_AM_VALID
) &&
1503 !(conn_entry
->conn_rec
.flags
& FCFCNCT_AM_PREFERRED
)) {
1506 * If SPMA required but FCF not support this continue.
1508 if ((conn_entry
->conn_rec
.flags
& FCFCNCT_AM_SPMA
) &&
1509 !(bf_get(lpfc_fcf_record_mac_addr_prov
,
1510 new_fcf_record
) & LPFC_FCF_SPMA
))
1514 * If FPMA required but FCF not support this continue.
1516 if (!(conn_entry
->conn_rec
.flags
& FCFCNCT_AM_SPMA
) &&
1517 !(bf_get(lpfc_fcf_record_mac_addr_prov
,
1518 new_fcf_record
) & LPFC_FCF_FPMA
))
1523 * This fcf record matches filtering criteria.
1525 if (conn_entry
->conn_rec
.flags
& FCFCNCT_BOOT
)
1531 * If user did not specify any addressing mode, or if the
1532 * prefered addressing mode specified by user is not supported
1533 * by FCF, allow fabric to pick the addressing mode.
1535 *addr_mode
= bf_get(lpfc_fcf_record_mac_addr_prov
,
1538 * If the user specified a required address mode, assign that
1541 if ((conn_entry
->conn_rec
.flags
& FCFCNCT_AM_VALID
) &&
1542 (!(conn_entry
->conn_rec
.flags
& FCFCNCT_AM_PREFERRED
)))
1543 *addr_mode
= (conn_entry
->conn_rec
.flags
&
1545 LPFC_FCF_SPMA
: LPFC_FCF_FPMA
;
1547 * If the user specified a prefered address mode, use the
1548 * addr mode only if FCF support the addr_mode.
1550 else if ((conn_entry
->conn_rec
.flags
& FCFCNCT_AM_VALID
) &&
1551 (conn_entry
->conn_rec
.flags
& FCFCNCT_AM_PREFERRED
) &&
1552 (conn_entry
->conn_rec
.flags
& FCFCNCT_AM_SPMA
) &&
1553 (*addr_mode
& LPFC_FCF_SPMA
))
1554 *addr_mode
= LPFC_FCF_SPMA
;
1555 else if ((conn_entry
->conn_rec
.flags
& FCFCNCT_AM_VALID
) &&
1556 (conn_entry
->conn_rec
.flags
& FCFCNCT_AM_PREFERRED
) &&
1557 !(conn_entry
->conn_rec
.flags
& FCFCNCT_AM_SPMA
) &&
1558 (*addr_mode
& LPFC_FCF_FPMA
))
1559 *addr_mode
= LPFC_FCF_FPMA
;
1561 /* If matching connect list has a vlan id, use it */
1562 if (conn_entry
->conn_rec
.flags
& FCFCNCT_VLAN_VALID
)
1563 *vlan_id
= conn_entry
->conn_rec
.vlan_tag
;
1565 * If no vlan id is specified in connect list, use the vlan id
1568 else if (fcf_vlan_id
)
1569 *vlan_id
= fcf_vlan_id
;
1571 *vlan_id
= LPFC_FCOE_NULL_VID
;
1580 * lpfc_check_pending_fcoe_event - Check if there is pending fcoe event.
1581 * @phba: pointer to lpfc hba data structure.
1582 * @unreg_fcf: Unregister FCF if FCF table need to be re-scaned.
1584 * This function check if there is any fcoe event pending while driver
1585 * scan FCF entries. If there is any pending event, it will restart the
1586 * FCF saning and return 1 else return 0.
1589 lpfc_check_pending_fcoe_event(struct lpfc_hba
*phba
, uint8_t unreg_fcf
)
1592 * If the Link is up and no FCoE events while in the
1593 * FCF discovery, no need to restart FCF discovery.
1595 if ((phba
->link_state
>= LPFC_LINK_UP
) &&
1596 (phba
->fcoe_eventtag
== phba
->fcoe_eventtag_at_fcf_scan
))
1599 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
1600 "2768 Pending link or FCF event during current "
1601 "handling of the previous event: link_state:x%x, "
1602 "evt_tag_at_scan:x%x, evt_tag_current:x%x\n",
1603 phba
->link_state
, phba
->fcoe_eventtag_at_fcf_scan
,
1604 phba
->fcoe_eventtag
);
1606 spin_lock_irq(&phba
->hbalock
);
1607 phba
->fcf
.fcf_flag
&= ~FCF_AVAILABLE
;
1608 spin_unlock_irq(&phba
->hbalock
);
1610 if (phba
->link_state
>= LPFC_LINK_UP
) {
1611 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
| LOG_DISCOVERY
,
1612 "2780 Restart FCF table scan due to "
1613 "pending FCF event:evt_tag_at_scan:x%x, "
1614 "evt_tag_current:x%x\n",
1615 phba
->fcoe_eventtag_at_fcf_scan
,
1616 phba
->fcoe_eventtag
);
1617 lpfc_sli4_fcf_scan_read_fcf_rec(phba
, LPFC_FCOE_FCF_GET_FIRST
);
1620 * Do not continue FCF discovery and clear FCF_TS_INPROG
1623 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
| LOG_DISCOVERY
,
1624 "2833 Stop FCF discovery process due to link "
1625 "state change (x%x)\n", phba
->link_state
);
1626 spin_lock_irq(&phba
->hbalock
);
1627 phba
->hba_flag
&= ~(FCF_TS_INPROG
| FCF_RR_INPROG
);
1628 phba
->fcf
.fcf_flag
&= ~(FCF_REDISC_FOV
| FCF_DISCOVERY
);
1629 spin_unlock_irq(&phba
->hbalock
);
1632 /* Unregister the currently registered FCF if required */
1634 spin_lock_irq(&phba
->hbalock
);
1635 phba
->fcf
.fcf_flag
&= ~FCF_REGISTERED
;
1636 spin_unlock_irq(&phba
->hbalock
);
1637 lpfc_sli4_unregister_fcf(phba
);
1643 * lpfc_sli4_new_fcf_random_select - Randomly select an eligible new fcf record
1644 * @phba: pointer to lpfc hba data structure.
1645 * @fcf_cnt: number of eligible fcf record seen so far.
1647 * This function makes an running random selection decision on FCF record to
1648 * use through a sequence of @fcf_cnt eligible FCF records with equal
1649 * probability. To perform integer manunipulation of random numbers with
1650 * size unit32_t, the lower 16 bits of the 32-bit random number returned
1651 * from random32() are taken as the random random number generated.
1653 * Returns true when outcome is for the newly read FCF record should be
1654 * chosen; otherwise, return false when outcome is for keeping the previously
1655 * chosen FCF record.
1658 lpfc_sli4_new_fcf_random_select(struct lpfc_hba
*phba
, uint32_t fcf_cnt
)
1662 /* Get 16-bit uniform random number */
1663 rand_num
= (0xFFFF & random32());
1665 /* Decision with probability 1/fcf_cnt */
1666 if ((fcf_cnt
* rand_num
) < 0xFFFF)
1673 * lpfc_sli4_fcf_rec_mbox_parse - Parse read_fcf mbox command.
1674 * @phba: pointer to lpfc hba data structure.
1675 * @mboxq: pointer to mailbox object.
1676 * @next_fcf_index: pointer to holder of next fcf index.
1678 * This routine parses the non-embedded fcf mailbox command by performing the
1679 * necessarily error checking, non-embedded read FCF record mailbox command
1680 * SGE parsing, and endianness swapping.
1682 * Returns the pointer to the new FCF record in the non-embedded mailbox
1683 * command DMA memory if successfully, other NULL.
1685 static struct fcf_record
*
1686 lpfc_sli4_fcf_rec_mbox_parse(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
,
1687 uint16_t *next_fcf_index
)
1690 dma_addr_t phys_addr
;
1691 struct lpfc_mbx_sge sge
;
1692 struct lpfc_mbx_read_fcf_tbl
*read_fcf
;
1693 uint32_t shdr_status
, shdr_add_status
;
1694 union lpfc_sli4_cfg_shdr
*shdr
;
1695 struct fcf_record
*new_fcf_record
;
1697 /* Get the first SGE entry from the non-embedded DMA memory. This
1698 * routine only uses a single SGE.
1700 lpfc_sli4_mbx_sge_get(mboxq
, 0, &sge
);
1701 phys_addr
= getPaddr(sge
.pa_hi
, sge
.pa_lo
);
1702 if (unlikely(!mboxq
->sge_array
)) {
1703 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
,
1704 "2524 Failed to get the non-embedded SGE "
1705 "virtual address\n");
1708 virt_addr
= mboxq
->sge_array
->addr
[0];
1710 shdr
= (union lpfc_sli4_cfg_shdr
*)virt_addr
;
1711 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
1712 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
1713 if (shdr_status
|| shdr_add_status
) {
1714 if (shdr_status
== STATUS_FCF_TABLE_EMPTY
)
1715 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
,
1716 "2726 READ_FCF_RECORD Indicates empty "
1719 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
,
1720 "2521 READ_FCF_RECORD mailbox failed "
1721 "with status x%x add_status x%x, "
1722 "mbx\n", shdr_status
, shdr_add_status
);
1726 /* Interpreting the returned information of the FCF record */
1727 read_fcf
= (struct lpfc_mbx_read_fcf_tbl
*)virt_addr
;
1728 lpfc_sli_pcimem_bcopy(read_fcf
, read_fcf
,
1729 sizeof(struct lpfc_mbx_read_fcf_tbl
));
1730 *next_fcf_index
= bf_get(lpfc_mbx_read_fcf_tbl_nxt_vindx
, read_fcf
);
1731 new_fcf_record
= (struct fcf_record
*)(virt_addr
+
1732 sizeof(struct lpfc_mbx_read_fcf_tbl
));
1733 lpfc_sli_pcimem_bcopy(new_fcf_record
, new_fcf_record
,
1734 offsetof(struct fcf_record
, vlan_bitmap
));
1735 new_fcf_record
->word137
= le32_to_cpu(new_fcf_record
->word137
);
1736 new_fcf_record
->word138
= le32_to_cpu(new_fcf_record
->word138
);
1738 return new_fcf_record
;
1742 * lpfc_sli4_log_fcf_record_info - Log the information of a fcf record
1743 * @phba: pointer to lpfc hba data structure.
1744 * @fcf_record: pointer to the fcf record.
1745 * @vlan_id: the lowest vlan identifier associated to this fcf record.
1746 * @next_fcf_index: the index to the next fcf record in hba's fcf table.
1748 * This routine logs the detailed FCF record if the LOG_FIP loggin is
1752 lpfc_sli4_log_fcf_record_info(struct lpfc_hba
*phba
,
1753 struct fcf_record
*fcf_record
,
1755 uint16_t next_fcf_index
)
1757 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
1758 "2764 READ_FCF_RECORD:\n"
1759 "\tFCF_Index : x%x\n"
1760 "\tFCF_Avail : x%x\n"
1761 "\tFCF_Valid : x%x\n"
1762 "\tFIP_Priority : x%x\n"
1763 "\tMAC_Provider : x%x\n"
1764 "\tLowest VLANID : x%x\n"
1765 "\tFCF_MAC Addr : x%x:%x:%x:%x:%x:%x\n"
1766 "\tFabric_Name : x%x:%x:%x:%x:%x:%x:%x:%x\n"
1767 "\tSwitch_Name : x%x:%x:%x:%x:%x:%x:%x:%x\n"
1768 "\tNext_FCF_Index: x%x\n",
1769 bf_get(lpfc_fcf_record_fcf_index
, fcf_record
),
1770 bf_get(lpfc_fcf_record_fcf_avail
, fcf_record
),
1771 bf_get(lpfc_fcf_record_fcf_valid
, fcf_record
),
1772 fcf_record
->fip_priority
,
1773 bf_get(lpfc_fcf_record_mac_addr_prov
, fcf_record
),
1775 bf_get(lpfc_fcf_record_mac_0
, fcf_record
),
1776 bf_get(lpfc_fcf_record_mac_1
, fcf_record
),
1777 bf_get(lpfc_fcf_record_mac_2
, fcf_record
),
1778 bf_get(lpfc_fcf_record_mac_3
, fcf_record
),
1779 bf_get(lpfc_fcf_record_mac_4
, fcf_record
),
1780 bf_get(lpfc_fcf_record_mac_5
, fcf_record
),
1781 bf_get(lpfc_fcf_record_fab_name_0
, fcf_record
),
1782 bf_get(lpfc_fcf_record_fab_name_1
, fcf_record
),
1783 bf_get(lpfc_fcf_record_fab_name_2
, fcf_record
),
1784 bf_get(lpfc_fcf_record_fab_name_3
, fcf_record
),
1785 bf_get(lpfc_fcf_record_fab_name_4
, fcf_record
),
1786 bf_get(lpfc_fcf_record_fab_name_5
, fcf_record
),
1787 bf_get(lpfc_fcf_record_fab_name_6
, fcf_record
),
1788 bf_get(lpfc_fcf_record_fab_name_7
, fcf_record
),
1789 bf_get(lpfc_fcf_record_switch_name_0
, fcf_record
),
1790 bf_get(lpfc_fcf_record_switch_name_1
, fcf_record
),
1791 bf_get(lpfc_fcf_record_switch_name_2
, fcf_record
),
1792 bf_get(lpfc_fcf_record_switch_name_3
, fcf_record
),
1793 bf_get(lpfc_fcf_record_switch_name_4
, fcf_record
),
1794 bf_get(lpfc_fcf_record_switch_name_5
, fcf_record
),
1795 bf_get(lpfc_fcf_record_switch_name_6
, fcf_record
),
1796 bf_get(lpfc_fcf_record_switch_name_7
, fcf_record
),
1801 lpfc_sli4_fcf_record_match - testing new FCF record for matching existing FCF
1802 * @phba: pointer to lpfc hba data structure.
1803 * @fcf_rec: pointer to an existing FCF record.
1804 * @new_fcf_record: pointer to a new FCF record.
1805 * @new_vlan_id: vlan id from the new FCF record.
1807 * This function performs matching test of a new FCF record against an existing
1808 * FCF record. If the new_vlan_id passed in is LPFC_FCOE_IGNORE_VID, vlan id
1809 * will not be used as part of the FCF record matching criteria.
1811 * Returns true if all the fields matching, otherwise returns false.
1814 lpfc_sli4_fcf_record_match(struct lpfc_hba
*phba
,
1815 struct lpfc_fcf_rec
*fcf_rec
,
1816 struct fcf_record
*new_fcf_record
,
1817 uint16_t new_vlan_id
)
1819 if (new_vlan_id
!= LPFC_FCOE_IGNORE_VID
)
1820 if (!lpfc_vlan_id_match(fcf_rec
->vlan_id
, new_vlan_id
))
1822 if (!lpfc_mac_addr_match(fcf_rec
->mac_addr
, new_fcf_record
))
1824 if (!lpfc_sw_name_match(fcf_rec
->switch_name
, new_fcf_record
))
1826 if (!lpfc_fab_name_match(fcf_rec
->fabric_name
, new_fcf_record
))
1832 * lpfc_sli4_fcf_rr_next_proc - processing next roundrobin fcf
1833 * @vport: Pointer to vport object.
1834 * @fcf_index: index to next fcf.
1836 * This function processing the roundrobin fcf failover to next fcf index.
1837 * When this function is invoked, there will be a current fcf registered
1839 * Return: 0 for continue retrying flogi on currently registered fcf;
1840 * 1 for stop flogi on currently registered fcf;
1842 int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport
*vport
, uint16_t fcf_index
)
1844 struct lpfc_hba
*phba
= vport
->phba
;
1847 if (fcf_index
== LPFC_FCOE_FCF_NEXT_NONE
) {
1848 spin_lock_irq(&phba
->hbalock
);
1849 if (phba
->hba_flag
& HBA_DEVLOSS_TMO
) {
1850 spin_unlock_irq(&phba
->hbalock
);
1851 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
1852 "2872 Devloss tmo with no eligible "
1853 "FCF, unregister in-use FCF (x%x) "
1854 "and rescan FCF table\n",
1855 phba
->fcf
.current_rec
.fcf_indx
);
1856 lpfc_unregister_fcf_rescan(phba
);
1857 goto stop_flogi_current_fcf
;
1859 /* Mark the end to FLOGI roundrobin failover */
1860 phba
->hba_flag
&= ~FCF_RR_INPROG
;
1861 /* Allow action to new fcf asynchronous event */
1862 phba
->fcf
.fcf_flag
&= ~(FCF_AVAILABLE
| FCF_SCAN_DONE
);
1863 spin_unlock_irq(&phba
->hbalock
);
1864 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
1865 "2865 No FCF available, stop roundrobin FCF "
1866 "failover and change port state:x%x/x%x\n",
1867 phba
->pport
->port_state
, LPFC_VPORT_UNKNOWN
);
1868 phba
->pport
->port_state
= LPFC_VPORT_UNKNOWN
;
1869 goto stop_flogi_current_fcf
;
1871 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
| LOG_ELS
,
1872 "2794 Try FLOGI roundrobin FCF failover to "
1873 "(x%x)\n", fcf_index
);
1874 rc
= lpfc_sli4_fcf_rr_read_fcf_rec(phba
, fcf_index
);
1876 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FIP
| LOG_ELS
,
1877 "2761 FLOGI roundrobin FCF failover "
1878 "failed (rc:x%x) to read FCF (x%x)\n",
1879 rc
, phba
->fcf
.current_rec
.fcf_indx
);
1881 goto stop_flogi_current_fcf
;
1885 stop_flogi_current_fcf
:
1886 lpfc_can_disctmo(vport
);
1891 * lpfc_mbx_cmpl_fcf_scan_read_fcf_rec - fcf scan read_fcf mbox cmpl handler.
1892 * @phba: pointer to lpfc hba data structure.
1893 * @mboxq: pointer to mailbox object.
1895 * This function iterates through all the fcf records available in
1896 * HBA and chooses the optimal FCF record for discovery. After finding
1897 * the FCF for discovery it registers the FCF record and kicks start
1899 * If FCF_IN_USE flag is set in currently used FCF, the routine tries to
1900 * use an FCF record which matches fabric name and mac address of the
1901 * currently used FCF record.
1902 * If the driver supports only one FCF, it will try to use the FCF record
1903 * used by BOOT_BIOS.
1906 lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
1908 struct fcf_record
*new_fcf_record
;
1909 uint32_t boot_flag
, addr_mode
;
1910 uint16_t fcf_index
, next_fcf_index
;
1911 struct lpfc_fcf_rec
*fcf_rec
= NULL
;
1914 bool select_new_fcf
;
1917 /* If there is pending FCoE event restart FCF table scan */
1918 if (lpfc_check_pending_fcoe_event(phba
, LPFC_SKIP_UNREG_FCF
)) {
1919 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
1923 /* Parse the FCF record from the non-embedded mailbox command */
1924 new_fcf_record
= lpfc_sli4_fcf_rec_mbox_parse(phba
, mboxq
,
1926 if (!new_fcf_record
) {
1927 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
,
1928 "2765 Mailbox command READ_FCF_RECORD "
1929 "failed to retrieve a FCF record.\n");
1930 /* Let next new FCF event trigger fast failover */
1931 spin_lock_irq(&phba
->hbalock
);
1932 phba
->hba_flag
&= ~FCF_TS_INPROG
;
1933 spin_unlock_irq(&phba
->hbalock
);
1934 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
1938 /* Check the FCF record against the connection list */
1939 rc
= lpfc_match_fcf_conn_list(phba
, new_fcf_record
, &boot_flag
,
1940 &addr_mode
, &vlan_id
);
1942 /* Log the FCF record information if turned on */
1943 lpfc_sli4_log_fcf_record_info(phba
, new_fcf_record
, vlan_id
,
1947 * If the fcf record does not match with connect list entries
1948 * read the next entry; otherwise, this is an eligible FCF
1949 * record for roundrobin FCF failover.
1952 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FIP
,
1953 "2781 FCF (x%x) failed connection "
1954 "list check: (x%x/x%x)\n",
1955 bf_get(lpfc_fcf_record_fcf_index
,
1957 bf_get(lpfc_fcf_record_fcf_avail
,
1959 bf_get(lpfc_fcf_record_fcf_valid
,
1961 if ((phba
->fcf
.fcf_flag
& FCF_IN_USE
) &&
1962 lpfc_sli4_fcf_record_match(phba
, &phba
->fcf
.current_rec
,
1963 new_fcf_record
, LPFC_FCOE_IGNORE_VID
)) {
1964 if (bf_get(lpfc_fcf_record_fcf_index
, new_fcf_record
) !=
1965 phba
->fcf
.current_rec
.fcf_indx
) {
1966 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
,
1967 "2862 FCF (x%x) matches property "
1968 "of in-use FCF (x%x)\n",
1969 bf_get(lpfc_fcf_record_fcf_index
,
1971 phba
->fcf
.current_rec
.fcf_indx
);
1975 * In case the current in-use FCF record becomes
1976 * invalid/unavailable during FCF discovery that
1977 * was not triggered by fast FCF failover process,
1978 * treat it as fast FCF failover.
1980 if (!(phba
->fcf
.fcf_flag
& FCF_REDISC_PEND
) &&
1981 !(phba
->fcf
.fcf_flag
& FCF_REDISC_FOV
)) {
1982 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FIP
,
1983 "2835 Invalid in-use FCF "
1984 "(x%x), enter FCF failover "
1986 phba
->fcf
.current_rec
.fcf_indx
);
1987 spin_lock_irq(&phba
->hbalock
);
1988 phba
->fcf
.fcf_flag
|= FCF_REDISC_FOV
;
1989 spin_unlock_irq(&phba
->hbalock
);
1990 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
1991 lpfc_sli4_fcf_scan_read_fcf_rec(phba
,
1992 LPFC_FCOE_FCF_GET_FIRST
);
1998 fcf_index
= bf_get(lpfc_fcf_record_fcf_index
, new_fcf_record
);
1999 rc
= lpfc_sli4_fcf_rr_index_set(phba
, fcf_index
);
2005 * If this is not the first FCF discovery of the HBA, use last
2006 * FCF record for the discovery. The condition that a rescan
2007 * matches the in-use FCF record: fabric name, switch name, mac
2008 * address, and vlan_id.
2010 spin_lock_irq(&phba
->hbalock
);
2011 if (phba
->fcf
.fcf_flag
& FCF_IN_USE
) {
2012 if (lpfc_sli4_fcf_record_match(phba
, &phba
->fcf
.current_rec
,
2013 new_fcf_record
, vlan_id
)) {
2014 if (bf_get(lpfc_fcf_record_fcf_index
, new_fcf_record
) ==
2015 phba
->fcf
.current_rec
.fcf_indx
) {
2016 phba
->fcf
.fcf_flag
|= FCF_AVAILABLE
;
2017 if (phba
->fcf
.fcf_flag
& FCF_REDISC_PEND
)
2018 /* Stop FCF redisc wait timer */
2019 __lpfc_sli4_stop_fcf_redisc_wait_timer(
2021 else if (phba
->fcf
.fcf_flag
& FCF_REDISC_FOV
)
2022 /* Fast failover, mark completed */
2023 phba
->fcf
.fcf_flag
&= ~FCF_REDISC_FOV
;
2024 spin_unlock_irq(&phba
->hbalock
);
2025 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2026 "2836 New FCF matches in-use "
2028 phba
->fcf
.current_rec
.fcf_indx
);
2031 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
,
2032 "2863 New FCF (x%x) matches "
2033 "property of in-use FCF (x%x)\n",
2034 bf_get(lpfc_fcf_record_fcf_index
,
2036 phba
->fcf
.current_rec
.fcf_indx
);
2039 * Read next FCF record from HBA searching for the matching
2040 * with in-use record only if not during the fast failover
2041 * period. In case of fast failover period, it shall try to
2042 * determine whether the FCF record just read should be the
2045 if (!(phba
->fcf
.fcf_flag
& FCF_REDISC_FOV
)) {
2046 spin_unlock_irq(&phba
->hbalock
);
2051 * Update on failover FCF record only if it's in FCF fast-failover
2052 * period; otherwise, update on current FCF record.
2054 if (phba
->fcf
.fcf_flag
& FCF_REDISC_FOV
)
2055 fcf_rec
= &phba
->fcf
.failover_rec
;
2057 fcf_rec
= &phba
->fcf
.current_rec
;
2059 if (phba
->fcf
.fcf_flag
& FCF_AVAILABLE
) {
2061 * If the driver FCF record does not have boot flag
2062 * set and new hba fcf record has boot flag set, use
2063 * the new hba fcf record.
2065 if (boot_flag
&& !(fcf_rec
->flag
& BOOT_ENABLE
)) {
2066 /* Choose this FCF record */
2067 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2068 "2837 Update current FCF record "
2069 "(x%x) with new FCF record (x%x)\n",
2071 bf_get(lpfc_fcf_record_fcf_index
,
2073 __lpfc_update_fcf_record(phba
, fcf_rec
, new_fcf_record
,
2074 addr_mode
, vlan_id
, BOOT_ENABLE
);
2075 spin_unlock_irq(&phba
->hbalock
);
2079 * If the driver FCF record has boot flag set and the
2080 * new hba FCF record does not have boot flag, read
2081 * the next FCF record.
2083 if (!boot_flag
&& (fcf_rec
->flag
& BOOT_ENABLE
)) {
2084 spin_unlock_irq(&phba
->hbalock
);
2088 * If the new hba FCF record has lower priority value
2089 * than the driver FCF record, use the new record.
2091 if (new_fcf_record
->fip_priority
< fcf_rec
->priority
) {
2092 /* Choose the new FCF record with lower priority */
2093 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2094 "2838 Update current FCF record "
2095 "(x%x) with new FCF record (x%x)\n",
2097 bf_get(lpfc_fcf_record_fcf_index
,
2099 __lpfc_update_fcf_record(phba
, fcf_rec
, new_fcf_record
,
2100 addr_mode
, vlan_id
, 0);
2101 /* Reset running random FCF selection count */
2102 phba
->fcf
.eligible_fcf_cnt
= 1;
2103 } else if (new_fcf_record
->fip_priority
== fcf_rec
->priority
) {
2104 /* Update running random FCF selection count */
2105 phba
->fcf
.eligible_fcf_cnt
++;
2106 select_new_fcf
= lpfc_sli4_new_fcf_random_select(phba
,
2107 phba
->fcf
.eligible_fcf_cnt
);
2108 if (select_new_fcf
) {
2109 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2110 "2839 Update current FCF record "
2111 "(x%x) with new FCF record (x%x)\n",
2113 bf_get(lpfc_fcf_record_fcf_index
,
2115 /* Choose the new FCF by random selection */
2116 __lpfc_update_fcf_record(phba
, fcf_rec
,
2118 addr_mode
, vlan_id
, 0);
2121 spin_unlock_irq(&phba
->hbalock
);
2125 * This is the first suitable FCF record, choose this record for
2126 * initial best-fit FCF.
2129 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2130 "2840 Update initial FCF candidate "
2132 bf_get(lpfc_fcf_record_fcf_index
,
2134 __lpfc_update_fcf_record(phba
, fcf_rec
, new_fcf_record
,
2135 addr_mode
, vlan_id
, (boot_flag
?
2137 phba
->fcf
.fcf_flag
|= FCF_AVAILABLE
;
2138 /* Setup initial running random FCF selection count */
2139 phba
->fcf
.eligible_fcf_cnt
= 1;
2140 /* Seeding the random number generator for random selection */
2141 seed
= (uint32_t)(0xFFFFFFFF & jiffies
);
2144 spin_unlock_irq(&phba
->hbalock
);
2148 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
2149 if (next_fcf_index
== LPFC_FCOE_FCF_NEXT_NONE
|| next_fcf_index
== 0) {
2150 if (phba
->fcf
.fcf_flag
& FCF_REDISC_FOV
) {
2152 * Case of FCF fast failover scan
2156 * It has not found any suitable FCF record, cancel
2157 * FCF scan inprogress, and do nothing
2159 if (!(phba
->fcf
.failover_rec
.flag
& RECORD_VALID
)) {
2160 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FIP
,
2161 "2782 No suitable FCF found: "
2163 phba
->fcoe_eventtag_at_fcf_scan
,
2164 bf_get(lpfc_fcf_record_fcf_index
,
2166 spin_lock_irq(&phba
->hbalock
);
2167 if (phba
->hba_flag
& HBA_DEVLOSS_TMO
) {
2168 phba
->hba_flag
&= ~FCF_TS_INPROG
;
2169 spin_unlock_irq(&phba
->hbalock
);
2170 /* Unregister in-use FCF and rescan */
2171 lpfc_printf_log(phba
, KERN_INFO
,
2173 "2864 On devloss tmo "
2174 "unreg in-use FCF and "
2175 "rescan FCF table\n");
2176 lpfc_unregister_fcf_rescan(phba
);
2180 * Let next new FCF event trigger fast failover
2182 phba
->hba_flag
&= ~FCF_TS_INPROG
;
2183 spin_unlock_irq(&phba
->hbalock
);
2187 * It has found a suitable FCF record that is not
2188 * the same as in-use FCF record, unregister the
2189 * in-use FCF record, replace the in-use FCF record
2190 * with the new FCF record, mark FCF fast failover
2191 * completed, and then start register the new FCF
2195 /* Unregister the current in-use FCF record */
2196 lpfc_unregister_fcf(phba
);
2198 /* Replace in-use record with the new record */
2199 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2200 "2842 Replace in-use FCF (x%x) "
2201 "with failover FCF (x%x)\n",
2202 phba
->fcf
.current_rec
.fcf_indx
,
2203 phba
->fcf
.failover_rec
.fcf_indx
);
2204 memcpy(&phba
->fcf
.current_rec
,
2205 &phba
->fcf
.failover_rec
,
2206 sizeof(struct lpfc_fcf_rec
));
2208 * Mark the fast FCF failover rediscovery completed
2209 * and the start of the first round of the roundrobin
2212 spin_lock_irq(&phba
->hbalock
);
2213 phba
->fcf
.fcf_flag
&= ~FCF_REDISC_FOV
;
2214 spin_unlock_irq(&phba
->hbalock
);
2215 /* Register to the new FCF record */
2216 lpfc_register_fcf(phba
);
2219 * In case of transaction period to fast FCF failover,
2220 * do nothing when search to the end of the FCF table.
2222 if ((phba
->fcf
.fcf_flag
& FCF_REDISC_EVT
) ||
2223 (phba
->fcf
.fcf_flag
& FCF_REDISC_PEND
))
2226 if (phba
->fcf
.fcf_flag
& FCF_IN_USE
) {
2228 * In case the current in-use FCF record no
2229 * longer existed during FCF discovery that
2230 * was not triggered by fast FCF failover
2231 * process, treat it as fast FCF failover.
2233 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2234 "2841 In-use FCF record (x%x) "
2235 "not reported, entering fast "
2236 "FCF failover mode scanning.\n",
2237 phba
->fcf
.current_rec
.fcf_indx
);
2238 spin_lock_irq(&phba
->hbalock
);
2239 phba
->fcf
.fcf_flag
|= FCF_REDISC_FOV
;
2240 spin_unlock_irq(&phba
->hbalock
);
2241 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
2242 lpfc_sli4_fcf_scan_read_fcf_rec(phba
,
2243 LPFC_FCOE_FCF_GET_FIRST
);
2246 /* Register to the new FCF record */
2247 lpfc_register_fcf(phba
);
2250 lpfc_sli4_fcf_scan_read_fcf_rec(phba
, next_fcf_index
);
2254 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
2255 lpfc_register_fcf(phba
);
2261 * lpfc_mbx_cmpl_fcf_rr_read_fcf_rec - fcf roundrobin read_fcf mbox cmpl hdler
2262 * @phba: pointer to lpfc hba data structure.
2263 * @mboxq: pointer to mailbox object.
2265 * This is the callback function for FLOGI failure roundrobin FCF failover
2266 * read FCF record mailbox command from the eligible FCF record bmask for
2267 * performing the failover. If the FCF read back is not valid/available, it
2268 * fails through to retrying FLOGI to the currently registered FCF again.
2269 * Otherwise, if the FCF read back is valid and available, it will set the
2270 * newly read FCF record to the failover FCF record, unregister currently
2271 * registered FCF record, copy the failover FCF record to the current
2272 * FCF record, and then register the current FCF record before proceeding
2273 * to trying FLOGI on the new failover FCF.
2276 lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
2278 struct fcf_record
*new_fcf_record
;
2279 uint32_t boot_flag
, addr_mode
;
2280 uint16_t next_fcf_index
, fcf_index
;
2281 uint16_t current_fcf_index
;
2285 /* If link state is not up, stop the roundrobin failover process */
2286 if (phba
->link_state
< LPFC_LINK_UP
) {
2287 spin_lock_irq(&phba
->hbalock
);
2288 phba
->fcf
.fcf_flag
&= ~FCF_DISCOVERY
;
2289 phba
->hba_flag
&= ~FCF_RR_INPROG
;
2290 spin_unlock_irq(&phba
->hbalock
);
2294 /* Parse the FCF record from the non-embedded mailbox command */
2295 new_fcf_record
= lpfc_sli4_fcf_rec_mbox_parse(phba
, mboxq
,
2297 if (!new_fcf_record
) {
2298 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FIP
,
2299 "2766 Mailbox command READ_FCF_RECORD "
2300 "failed to retrieve a FCF record.\n");
2304 /* Get the needed parameters from FCF record */
2305 rc
= lpfc_match_fcf_conn_list(phba
, new_fcf_record
, &boot_flag
,
2306 &addr_mode
, &vlan_id
);
2308 /* Log the FCF record information if turned on */
2309 lpfc_sli4_log_fcf_record_info(phba
, new_fcf_record
, vlan_id
,
2312 fcf_index
= bf_get(lpfc_fcf_record_fcf_index
, new_fcf_record
);
2314 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2315 "2848 Remove ineligible FCF (x%x) from "
2316 "from roundrobin bmask\n", fcf_index
);
2317 /* Clear roundrobin bmask bit for ineligible FCF */
2318 lpfc_sli4_fcf_rr_index_clear(phba
, fcf_index
);
2319 /* Perform next round of roundrobin FCF failover */
2320 fcf_index
= lpfc_sli4_fcf_rr_next_index_get(phba
);
2321 rc
= lpfc_sli4_fcf_rr_next_proc(phba
->pport
, fcf_index
);
2327 if (fcf_index
== phba
->fcf
.current_rec
.fcf_indx
) {
2328 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2329 "2760 Perform FLOGI roundrobin FCF failover: "
2330 "FCF (x%x) back to FCF (x%x)\n",
2331 phba
->fcf
.current_rec
.fcf_indx
, fcf_index
);
2332 /* Wait 500 ms before retrying FLOGI to current FCF */
2334 lpfc_initial_flogi(phba
->pport
);
2338 /* Upload new FCF record to the failover FCF record */
2339 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2340 "2834 Update current FCF (x%x) with new FCF (x%x)\n",
2341 phba
->fcf
.failover_rec
.fcf_indx
, fcf_index
);
2342 spin_lock_irq(&phba
->hbalock
);
2343 __lpfc_update_fcf_record(phba
, &phba
->fcf
.failover_rec
,
2344 new_fcf_record
, addr_mode
, vlan_id
,
2345 (boot_flag
? BOOT_ENABLE
: 0));
2346 spin_unlock_irq(&phba
->hbalock
);
2348 current_fcf_index
= phba
->fcf
.current_rec
.fcf_indx
;
2350 /* Unregister the current in-use FCF record */
2351 lpfc_unregister_fcf(phba
);
2353 /* Replace in-use record with the new record */
2354 memcpy(&phba
->fcf
.current_rec
, &phba
->fcf
.failover_rec
,
2355 sizeof(struct lpfc_fcf_rec
));
2357 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2358 "2783 Perform FLOGI roundrobin FCF failover: FCF "
2359 "(x%x) to FCF (x%x)\n", current_fcf_index
, fcf_index
);
2362 lpfc_register_fcf(phba
);
2364 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
2368 * lpfc_mbx_cmpl_read_fcf_rec - read fcf completion handler.
2369 * @phba: pointer to lpfc hba data structure.
2370 * @mboxq: pointer to mailbox object.
2372 * This is the callback function of read FCF record mailbox command for
2373 * updating the eligible FCF bmask for FLOGI failure roundrobin FCF
2374 * failover when a new FCF event happened. If the FCF read back is
2375 * valid/available and it passes the connection list check, it updates
2376 * the bmask for the eligible FCF record for roundrobin failover.
2379 lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
2381 struct fcf_record
*new_fcf_record
;
2382 uint32_t boot_flag
, addr_mode
;
2383 uint16_t fcf_index
, next_fcf_index
;
2387 /* If link state is not up, no need to proceed */
2388 if (phba
->link_state
< LPFC_LINK_UP
)
2391 /* If FCF discovery period is over, no need to proceed */
2392 if (!(phba
->fcf
.fcf_flag
& FCF_DISCOVERY
))
2395 /* Parse the FCF record from the non-embedded mailbox command */
2396 new_fcf_record
= lpfc_sli4_fcf_rec_mbox_parse(phba
, mboxq
,
2398 if (!new_fcf_record
) {
2399 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2400 "2767 Mailbox command READ_FCF_RECORD "
2401 "failed to retrieve a FCF record.\n");
2405 /* Check the connection list for eligibility */
2406 rc
= lpfc_match_fcf_conn_list(phba
, new_fcf_record
, &boot_flag
,
2407 &addr_mode
, &vlan_id
);
2409 /* Log the FCF record information if turned on */
2410 lpfc_sli4_log_fcf_record_info(phba
, new_fcf_record
, vlan_id
,
2416 /* Update the eligible FCF record index bmask */
2417 fcf_index
= bf_get(lpfc_fcf_record_fcf_index
, new_fcf_record
);
2418 rc
= lpfc_sli4_fcf_rr_index_set(phba
, fcf_index
);
2421 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
2425 * lpfc_init_vpi_cmpl - Completion handler for init_vpi mbox command.
2426 * @phba: pointer to lpfc hba data structure.
2427 * @mboxq: pointer to mailbox data structure.
2429 * This function handles completion of init vpi mailbox command.
2432 lpfc_init_vpi_cmpl(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
2434 struct lpfc_vport
*vport
= mboxq
->vport
;
2435 struct lpfc_nodelist
*ndlp
;
2436 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2438 if (mboxq
->u
.mb
.mbxStatus
) {
2439 lpfc_printf_vlog(vport
, KERN_ERR
,
2441 "2609 Init VPI mailbox failed 0x%x\n",
2442 mboxq
->u
.mb
.mbxStatus
);
2443 mempool_free(mboxq
, phba
->mbox_mem_pool
);
2444 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
2447 spin_lock_irq(shost
->host_lock
);
2448 vport
->fc_flag
&= ~FC_VPORT_NEEDS_INIT_VPI
;
2449 spin_unlock_irq(shost
->host_lock
);
2451 /* If this port is physical port or FDISC is done, do reg_vpi */
2452 if ((phba
->pport
== vport
) || (vport
->port_state
== LPFC_FDISC
)) {
2453 ndlp
= lpfc_findnode_did(vport
, Fabric_DID
);
2455 lpfc_printf_vlog(vport
, KERN_ERR
,
2457 "2731 Cannot find fabric "
2458 "controller node\n");
2460 lpfc_register_new_vport(phba
, vport
, ndlp
);
2461 mempool_free(mboxq
, phba
->mbox_mem_pool
);
2465 if (phba
->link_flag
& LS_NPIV_FAB_SUPPORTED
)
2466 lpfc_initial_fdisc(vport
);
2468 lpfc_vport_set_state(vport
, FC_VPORT_NO_FABRIC_SUPP
);
2469 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
2470 "2606 No NPIV Fabric support\n");
2472 mempool_free(mboxq
, phba
->mbox_mem_pool
);
2477 * lpfc_issue_init_vpi - Issue init_vpi mailbox command.
2478 * @vport: pointer to lpfc_vport data structure.
2480 * This function issue a init_vpi mailbox command to initialize
2481 * VPI for the vport.
2484 lpfc_issue_init_vpi(struct lpfc_vport
*vport
)
2486 LPFC_MBOXQ_t
*mboxq
;
2489 mboxq
= mempool_alloc(vport
->phba
->mbox_mem_pool
, GFP_KERNEL
);
2491 lpfc_printf_vlog(vport
, KERN_ERR
,
2492 LOG_MBOX
, "2607 Failed to allocate "
2493 "init_vpi mailbox\n");
2496 lpfc_init_vpi(vport
->phba
, mboxq
, vport
->vpi
);
2497 mboxq
->vport
= vport
;
2498 mboxq
->mbox_cmpl
= lpfc_init_vpi_cmpl
;
2499 rc
= lpfc_sli_issue_mbox(vport
->phba
, mboxq
, MBX_NOWAIT
);
2500 if (rc
== MBX_NOT_FINISHED
) {
2501 lpfc_printf_vlog(vport
, KERN_ERR
,
2502 LOG_MBOX
, "2608 Failed to issue init_vpi mailbox\n");
2503 mempool_free(mboxq
, vport
->phba
->mbox_mem_pool
);
2508 * lpfc_start_fdiscs - send fdiscs for each vports on this port.
2509 * @phba: pointer to lpfc hba data structure.
2511 * This function loops through the list of vports on the @phba and issues an
2512 * FDISC if possible.
2515 lpfc_start_fdiscs(struct lpfc_hba
*phba
)
2517 struct lpfc_vport
**vports
;
2520 vports
= lpfc_create_vport_work_array(phba
);
2521 if (vports
!= NULL
) {
2522 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
2523 if (vports
[i
]->port_type
== LPFC_PHYSICAL_PORT
)
2525 /* There are no vpi for this vport */
2526 if (vports
[i
]->vpi
> phba
->max_vpi
) {
2527 lpfc_vport_set_state(vports
[i
],
2531 if (phba
->fc_topology
== TOPOLOGY_LOOP
) {
2532 lpfc_vport_set_state(vports
[i
],
2536 if (vports
[i
]->fc_flag
& FC_VPORT_NEEDS_INIT_VPI
) {
2537 lpfc_issue_init_vpi(vports
[i
]);
2540 if (phba
->link_flag
& LS_NPIV_FAB_SUPPORTED
)
2541 lpfc_initial_fdisc(vports
[i
]);
2543 lpfc_vport_set_state(vports
[i
],
2544 FC_VPORT_NO_FABRIC_SUPP
);
2545 lpfc_printf_vlog(vports
[i
], KERN_ERR
,
2548 "Fabric support\n");
2552 lpfc_destroy_vport_work_array(phba
, vports
);
2556 lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
2558 struct lpfc_dmabuf
*dmabuf
= mboxq
->context1
;
2559 struct lpfc_vport
*vport
= mboxq
->vport
;
2560 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2562 if (mboxq
->u
.mb
.mbxStatus
) {
2563 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
2564 "2018 REG_VFI mbxStatus error x%x "
2566 mboxq
->u
.mb
.mbxStatus
, vport
->port_state
);
2567 if (phba
->fc_topology
== TOPOLOGY_LOOP
) {
2568 /* FLOGI failed, use loop map to make discovery list */
2569 lpfc_disc_list_loopmap(vport
);
2570 /* Start discovery */
2571 lpfc_disc_start(vport
);
2574 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
2577 /* The VPI is implicitly registered when the VFI is registered */
2578 spin_lock_irq(shost
->host_lock
);
2579 vport
->vpi_state
|= LPFC_VPI_REGISTERED
;
2580 vport
->fc_flag
|= FC_VFI_REGISTERED
;
2581 vport
->fc_flag
&= ~FC_VPORT_NEEDS_REG_VPI
;
2582 spin_unlock_irq(shost
->host_lock
);
2584 if (vport
->port_state
== LPFC_FABRIC_CFG_LINK
) {
2585 lpfc_start_fdiscs(phba
);
2586 lpfc_do_scr_ns_plogi(phba
, vport
);
2590 mempool_free(mboxq
, phba
->mbox_mem_pool
);
2591 lpfc_mbuf_free(phba
, dmabuf
->virt
, dmabuf
->phys
);
2597 lpfc_mbx_cmpl_read_sparam(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
2599 MAILBOX_t
*mb
= &pmb
->u
.mb
;
2600 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) pmb
->context1
;
2601 struct lpfc_vport
*vport
= pmb
->vport
;
2604 /* Check for error */
2605 if (mb
->mbxStatus
) {
2606 /* READ_SPARAM mbox error <mbxStatus> state <hba_state> */
2607 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
2608 "0319 READ_SPARAM mbxStatus error x%x "
2610 mb
->mbxStatus
, vport
->port_state
);
2611 lpfc_linkdown(phba
);
2615 memcpy((uint8_t *) &vport
->fc_sparam
, (uint8_t *) mp
->virt
,
2616 sizeof (struct serv_parm
));
2617 if (phba
->cfg_soft_wwnn
)
2618 u64_to_wwn(phba
->cfg_soft_wwnn
,
2619 vport
->fc_sparam
.nodeName
.u
.wwn
);
2620 if (phba
->cfg_soft_wwpn
)
2621 u64_to_wwn(phba
->cfg_soft_wwpn
,
2622 vport
->fc_sparam
.portName
.u
.wwn
);
2623 memcpy(&vport
->fc_nodename
, &vport
->fc_sparam
.nodeName
,
2624 sizeof(vport
->fc_nodename
));
2625 memcpy(&vport
->fc_portname
, &vport
->fc_sparam
.portName
,
2626 sizeof(vport
->fc_portname
));
2627 if (vport
->port_type
== LPFC_PHYSICAL_PORT
) {
2628 memcpy(&phba
->wwnn
, &vport
->fc_nodename
, sizeof(phba
->wwnn
));
2629 memcpy(&phba
->wwpn
, &vport
->fc_portname
, sizeof(phba
->wwnn
));
2632 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
2634 mempool_free(pmb
, phba
->mbox_mem_pool
);
2638 pmb
->context1
= NULL
;
2639 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
2641 lpfc_issue_clear_la(phba
, vport
);
2642 mempool_free(pmb
, phba
->mbox_mem_pool
);
2647 lpfc_mbx_process_link_up(struct lpfc_hba
*phba
, READ_LA_VAR
*la
)
2649 struct lpfc_vport
*vport
= phba
->pport
;
2650 LPFC_MBOXQ_t
*sparam_mbox
, *cfglink_mbox
= NULL
;
2652 struct lpfc_dmabuf
*mp
;
2654 struct fcf_record
*fcf_record
;
2656 spin_lock_irq(&phba
->hbalock
);
2657 switch (la
->UlnkSpeed
) {
2659 phba
->fc_linkspeed
= LA_1GHZ_LINK
;
2662 phba
->fc_linkspeed
= LA_2GHZ_LINK
;
2665 phba
->fc_linkspeed
= LA_4GHZ_LINK
;
2668 phba
->fc_linkspeed
= LA_8GHZ_LINK
;
2671 phba
->fc_linkspeed
= LA_10GHZ_LINK
;
2674 phba
->fc_linkspeed
= LA_UNKNW_LINK
;
2678 phba
->fc_topology
= la
->topology
;
2679 phba
->link_flag
&= ~LS_NPIV_FAB_SUPPORTED
;
2681 if (phba
->fc_topology
== TOPOLOGY_LOOP
) {
2682 phba
->sli3_options
&= ~LPFC_SLI3_NPIV_ENABLED
;
2684 /* if npiv is enabled and this adapter supports npiv log
2685 * a message that npiv is not supported in this topology
2687 if (phba
->cfg_enable_npiv
&& phba
->max_vpi
)
2688 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
2689 "1309 Link Up Event npiv not supported in loop "
2691 /* Get Loop Map information */
2693 vport
->fc_flag
|= FC_LBIT
;
2695 vport
->fc_myDID
= la
->granted_AL_PA
;
2696 i
= la
->un
.lilpBde64
.tus
.f
.bdeSize
;
2699 phba
->alpa_map
[0] = 0;
2701 if (vport
->cfg_log_verbose
& LOG_LINK_EVENT
) {
2712 numalpa
= phba
->alpa_map
[0];
2714 while (j
< numalpa
) {
2715 memset(un
.pamap
, 0, 16);
2716 for (k
= 1; j
< numalpa
; k
++) {
2718 phba
->alpa_map
[j
+ 1];
2723 /* Link Up Event ALPA map */
2724 lpfc_printf_log(phba
,
2727 "1304 Link Up Event "
2728 "ALPA map Data: x%x "
2730 un
.pa
.wd1
, un
.pa
.wd2
,
2731 un
.pa
.wd3
, un
.pa
.wd4
);
2736 if (!(phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
)) {
2737 if (phba
->max_vpi
&& phba
->cfg_enable_npiv
&&
2738 (phba
->sli_rev
== 3))
2739 phba
->sli3_options
|= LPFC_SLI3_NPIV_ENABLED
;
2741 vport
->fc_myDID
= phba
->fc_pref_DID
;
2742 vport
->fc_flag
|= FC_LBIT
;
2744 spin_unlock_irq(&phba
->hbalock
);
2747 sparam_mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
2751 rc
= lpfc_read_sparam(phba
, sparam_mbox
, 0);
2753 mempool_free(sparam_mbox
, phba
->mbox_mem_pool
);
2756 sparam_mbox
->vport
= vport
;
2757 sparam_mbox
->mbox_cmpl
= lpfc_mbx_cmpl_read_sparam
;
2758 rc
= lpfc_sli_issue_mbox(phba
, sparam_mbox
, MBX_NOWAIT
);
2759 if (rc
== MBX_NOT_FINISHED
) {
2760 mp
= (struct lpfc_dmabuf
*) sparam_mbox
->context1
;
2761 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
2763 mempool_free(sparam_mbox
, phba
->mbox_mem_pool
);
2767 if (!(phba
->hba_flag
& HBA_FCOE_SUPPORT
)) {
2768 cfglink_mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
2771 vport
->port_state
= LPFC_LOCAL_CFG_LINK
;
2772 lpfc_config_link(phba
, cfglink_mbox
);
2773 cfglink_mbox
->vport
= vport
;
2774 cfglink_mbox
->mbox_cmpl
= lpfc_mbx_cmpl_local_config_link
;
2775 rc
= lpfc_sli_issue_mbox(phba
, cfglink_mbox
, MBX_NOWAIT
);
2776 if (rc
== MBX_NOT_FINISHED
) {
2777 mempool_free(cfglink_mbox
, phba
->mbox_mem_pool
);
2781 vport
->port_state
= LPFC_VPORT_UNKNOWN
;
2783 * Add the driver's default FCF record at FCF index 0 now. This
2784 * is phase 1 implementation that support FCF index 0 and driver
2787 if (!(phba
->hba_flag
& HBA_FIP_SUPPORT
)) {
2788 fcf_record
= kzalloc(sizeof(struct fcf_record
),
2790 if (unlikely(!fcf_record
)) {
2791 lpfc_printf_log(phba
, KERN_ERR
,
2793 "2554 Could not allocate memmory for "
2799 lpfc_sli4_build_dflt_fcf_record(phba
, fcf_record
,
2800 LPFC_FCOE_FCF_DEF_INDEX
);
2801 rc
= lpfc_sli4_add_fcf_record(phba
, fcf_record
);
2803 lpfc_printf_log(phba
, KERN_ERR
,
2805 "2013 Could not manually add FCF "
2806 "record 0, status %d\n", rc
);
2814 * The driver is expected to do FIP/FCF. Call the port
2815 * and get the FCF Table.
2817 spin_lock_irq(&phba
->hbalock
);
2818 if (phba
->hba_flag
& FCF_TS_INPROG
) {
2819 spin_unlock_irq(&phba
->hbalock
);
2822 /* This is the initial FCF discovery scan */
2823 phba
->fcf
.fcf_flag
|= FCF_INIT_DISC
;
2824 spin_unlock_irq(&phba
->hbalock
);
2825 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
| LOG_DISCOVERY
,
2826 "2778 Start FCF table scan at linkup\n");
2827 rc
= lpfc_sli4_fcf_scan_read_fcf_rec(phba
,
2828 LPFC_FCOE_FCF_GET_FIRST
);
2830 spin_lock_irq(&phba
->hbalock
);
2831 phba
->fcf
.fcf_flag
&= ~FCF_INIT_DISC
;
2832 spin_unlock_irq(&phba
->hbalock
);
2835 /* Reset FCF roundrobin bmask for new discovery */
2836 memset(phba
->fcf
.fcf_rr_bmask
, 0,
2837 sizeof(*phba
->fcf
.fcf_rr_bmask
));
2842 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
2843 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
2844 "0263 Discovery Mailbox error: state: 0x%x : %p %p\n",
2845 vport
->port_state
, sparam_mbox
, cfglink_mbox
);
2846 lpfc_issue_clear_la(phba
, vport
);
2851 lpfc_enable_la(struct lpfc_hba
*phba
)
2854 struct lpfc_sli
*psli
= &phba
->sli
;
2855 spin_lock_irq(&phba
->hbalock
);
2856 psli
->sli_flag
|= LPFC_PROCESS_LA
;
2857 if (phba
->sli_rev
<= LPFC_SLI_REV3
) {
2858 control
= readl(phba
->HCregaddr
);
2859 control
|= HC_LAINT_ENA
;
2860 writel(control
, phba
->HCregaddr
);
2861 readl(phba
->HCregaddr
); /* flush */
2863 spin_unlock_irq(&phba
->hbalock
);
2867 lpfc_mbx_issue_link_down(struct lpfc_hba
*phba
)
2869 lpfc_linkdown(phba
);
2870 lpfc_enable_la(phba
);
2871 lpfc_unregister_unused_fcf(phba
);
2872 /* turn on Link Attention interrupts - no CLEAR_LA needed */
2877 * This routine handles processing a READ_LA mailbox
2878 * command upon completion. It is setup in the LPFC_MBOXQ
2879 * as the completion routine when the command is
2880 * handed off to the SLI layer.
2883 lpfc_mbx_cmpl_read_la(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
2885 struct lpfc_vport
*vport
= pmb
->vport
;
2886 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2888 MAILBOX_t
*mb
= &pmb
->u
.mb
;
2889 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
2891 /* Unblock ELS traffic */
2892 phba
->sli
.ring
[LPFC_ELS_RING
].flag
&= ~LPFC_STOP_IOCB_EVENT
;
2893 /* Check for error */
2894 if (mb
->mbxStatus
) {
2895 lpfc_printf_log(phba
, KERN_INFO
, LOG_LINK_EVENT
,
2896 "1307 READ_LA mbox error x%x state x%x\n",
2897 mb
->mbxStatus
, vport
->port_state
);
2898 lpfc_mbx_issue_link_down(phba
);
2899 phba
->link_state
= LPFC_HBA_ERROR
;
2900 goto lpfc_mbx_cmpl_read_la_free_mbuf
;
2903 la
= (READ_LA_VAR
*) &pmb
->u
.mb
.un
.varReadLA
;
2905 memcpy(&phba
->alpa_map
[0], mp
->virt
, 128);
2907 spin_lock_irq(shost
->host_lock
);
2909 vport
->fc_flag
|= FC_BYPASSED_MODE
;
2911 vport
->fc_flag
&= ~FC_BYPASSED_MODE
;
2912 spin_unlock_irq(shost
->host_lock
);
2914 if ((phba
->fc_eventTag
< la
->eventTag
) ||
2915 (phba
->fc_eventTag
== la
->eventTag
)) {
2916 phba
->fc_stat
.LinkMultiEvent
++;
2917 if (la
->attType
== AT_LINK_UP
)
2918 if (phba
->fc_eventTag
!= 0)
2919 lpfc_linkdown(phba
);
2922 phba
->fc_eventTag
= la
->eventTag
;
2923 spin_lock_irq(&phba
->hbalock
);
2925 phba
->sli
.sli_flag
|= LPFC_MENLO_MAINT
;
2927 phba
->sli
.sli_flag
&= ~LPFC_MENLO_MAINT
;
2928 spin_unlock_irq(&phba
->hbalock
);
2930 phba
->link_events
++;
2931 if (la
->attType
== AT_LINK_UP
&& (!la
->mm
)) {
2932 phba
->fc_stat
.LinkUp
++;
2933 if (phba
->link_flag
& LS_LOOPBACK_MODE
) {
2934 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
2935 "1306 Link Up Event in loop back mode "
2936 "x%x received Data: x%x x%x x%x x%x\n",
2937 la
->eventTag
, phba
->fc_eventTag
,
2938 la
->granted_AL_PA
, la
->UlnkSpeed
,
2941 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
2942 "1303 Link Up Event x%x received "
2943 "Data: x%x x%x x%x x%x x%x x%x %d\n",
2944 la
->eventTag
, phba
->fc_eventTag
,
2945 la
->granted_AL_PA
, la
->UlnkSpeed
,
2948 phba
->wait_4_mlo_maint_flg
);
2950 lpfc_mbx_process_link_up(phba
, la
);
2951 } else if (la
->attType
== AT_LINK_DOWN
) {
2952 phba
->fc_stat
.LinkDown
++;
2953 if (phba
->link_flag
& LS_LOOPBACK_MODE
) {
2954 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
2955 "1308 Link Down Event in loop back mode "
2957 "Data: x%x x%x x%x\n",
2958 la
->eventTag
, phba
->fc_eventTag
,
2959 phba
->pport
->port_state
, vport
->fc_flag
);
2962 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
2963 "1305 Link Down Event x%x received "
2964 "Data: x%x x%x x%x x%x x%x\n",
2965 la
->eventTag
, phba
->fc_eventTag
,
2966 phba
->pport
->port_state
, vport
->fc_flag
,
2969 lpfc_mbx_issue_link_down(phba
);
2971 if (la
->mm
&& la
->attType
== AT_LINK_UP
) {
2972 if (phba
->link_state
!= LPFC_LINK_DOWN
) {
2973 phba
->fc_stat
.LinkDown
++;
2974 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
2975 "1312 Link Down Event x%x received "
2976 "Data: x%x x%x x%x\n",
2977 la
->eventTag
, phba
->fc_eventTag
,
2978 phba
->pport
->port_state
, vport
->fc_flag
);
2979 lpfc_mbx_issue_link_down(phba
);
2981 lpfc_enable_la(phba
);
2983 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
2984 "1310 Menlo Maint Mode Link up Event x%x rcvd "
2985 "Data: x%x x%x x%x\n",
2986 la
->eventTag
, phba
->fc_eventTag
,
2987 phba
->pport
->port_state
, vport
->fc_flag
);
2989 * The cmnd that triggered this will be waiting for this
2992 /* WAKEUP for MENLO_SET_MODE or MENLO_RESET command. */
2993 if (phba
->wait_4_mlo_maint_flg
) {
2994 phba
->wait_4_mlo_maint_flg
= 0;
2995 wake_up_interruptible(&phba
->wait_4_mlo_m_q
);
3001 lpfc_issue_clear_la(phba
, vport
);
3002 lpfc_printf_log(phba
, KERN_INFO
, LOG_LINK_EVENT
,
3003 "1311 fa %d\n", la
->fa
);
3006 lpfc_mbx_cmpl_read_la_free_mbuf
:
3007 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3009 mempool_free(pmb
, phba
->mbox_mem_pool
);
3014 * This routine handles processing a REG_LOGIN mailbox
3015 * command upon completion. It is setup in the LPFC_MBOXQ
3016 * as the completion routine when the command is
3017 * handed off to the SLI layer.
3020 lpfc_mbx_cmpl_reg_login(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
3022 struct lpfc_vport
*vport
= pmb
->vport
;
3023 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
3024 struct lpfc_nodelist
*ndlp
= (struct lpfc_nodelist
*) pmb
->context2
;
3025 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3027 pmb
->context1
= NULL
;
3028 pmb
->context2
= NULL
;
3030 if (ndlp
->nlp_flag
& NLP_REG_LOGIN_SEND
)
3031 ndlp
->nlp_flag
&= ~NLP_REG_LOGIN_SEND
;
3033 if (ndlp
->nlp_flag
& NLP_IGNR_REG_CMPL
||
3034 ndlp
->nlp_state
!= NLP_STE_REG_LOGIN_ISSUE
) {
3035 /* We rcvd a rscn after issuing this
3036 * mbox reg login, we may have cycled
3037 * back through the state and be
3038 * back at reg login state so this
3039 * mbox needs to be ignored becase
3040 * there is another reg login in
3043 spin_lock_irq(shost
->host_lock
);
3044 ndlp
->nlp_flag
&= ~NLP_IGNR_REG_CMPL
;
3045 spin_unlock_irq(shost
->host_lock
);
3046 if (phba
->sli_rev
== LPFC_SLI_REV4
)
3047 lpfc_sli4_free_rpi(phba
,
3048 pmb
->u
.mb
.un
.varRegLogin
.rpi
);
3051 /* Good status, call state machine */
3052 lpfc_disc_state_machine(vport
, ndlp
, pmb
,
3053 NLP_EVT_CMPL_REG_LOGIN
);
3055 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3057 mempool_free(pmb
, phba
->mbox_mem_pool
);
3058 /* decrement the node reference count held for this callback
3067 lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
3069 MAILBOX_t
*mb
= &pmb
->u
.mb
;
3070 struct lpfc_vport
*vport
= pmb
->vport
;
3071 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3073 switch (mb
->mbxStatus
) {
3076 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
3077 "0911 cmpl_unreg_vpi, mb status = 0x%x\n",
3080 /* If VPI is busy, reset the HBA */
3082 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_NODE
,
3083 "2798 Unreg_vpi failed vpi 0x%x, mb status = 0x%x\n",
3084 vport
->vpi
, mb
->mbxStatus
);
3085 if (!(phba
->pport
->load_flag
& FC_UNLOADING
))
3086 lpfc_workq_post_event(phba
, NULL
, NULL
,
3087 LPFC_EVT_RESET_HBA
);
3089 spin_lock_irq(shost
->host_lock
);
3090 vport
->vpi_state
&= ~LPFC_VPI_REGISTERED
;
3091 vport
->fc_flag
|= FC_VPORT_NEEDS_REG_VPI
;
3092 spin_unlock_irq(shost
->host_lock
);
3093 vport
->unreg_vpi_cmpl
= VPORT_OK
;
3094 mempool_free(pmb
, phba
->mbox_mem_pool
);
3096 * This shost reference might have been taken at the beginning of
3097 * lpfc_vport_delete()
3099 if ((vport
->load_flag
& FC_UNLOADING
) && (vport
!= phba
->pport
))
3100 scsi_host_put(shost
);
3104 lpfc_mbx_unreg_vpi(struct lpfc_vport
*vport
)
3106 struct lpfc_hba
*phba
= vport
->phba
;
3110 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
3114 lpfc_unreg_vpi(phba
, vport
->vpi
, mbox
);
3115 mbox
->vport
= vport
;
3116 mbox
->mbox_cmpl
= lpfc_mbx_cmpl_unreg_vpi
;
3117 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
3118 if (rc
== MBX_NOT_FINISHED
) {
3119 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
| LOG_VPORT
,
3120 "1800 Could not issue unreg_vpi\n");
3121 mempool_free(mbox
, phba
->mbox_mem_pool
);
3122 vport
->unreg_vpi_cmpl
= VPORT_ERROR
;
3129 lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
3131 struct lpfc_vport
*vport
= pmb
->vport
;
3132 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3133 MAILBOX_t
*mb
= &pmb
->u
.mb
;
3135 switch (mb
->mbxStatus
) {
3139 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
3140 "0912 cmpl_reg_vpi, mb status = 0x%x\n",
3142 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
3143 spin_lock_irq(shost
->host_lock
);
3144 vport
->fc_flag
&= ~(FC_FABRIC
| FC_PUBLIC_LOOP
);
3145 spin_unlock_irq(shost
->host_lock
);
3146 vport
->fc_myDID
= 0;
3150 spin_lock_irq(shost
->host_lock
);
3151 vport
->vpi_state
|= LPFC_VPI_REGISTERED
;
3152 vport
->fc_flag
&= ~FC_VPORT_NEEDS_REG_VPI
;
3153 spin_unlock_irq(shost
->host_lock
);
3154 vport
->num_disc_nodes
= 0;
3155 /* go thru NPR list and issue ELS PLOGIs */
3156 if (vport
->fc_npr_cnt
)
3157 lpfc_els_disc_plogi(vport
);
3159 if (!vport
->num_disc_nodes
) {
3160 spin_lock_irq(shost
->host_lock
);
3161 vport
->fc_flag
&= ~FC_NDISC_ACTIVE
;
3162 spin_unlock_irq(shost
->host_lock
);
3163 lpfc_can_disctmo(vport
);
3165 vport
->port_state
= LPFC_VPORT_READY
;
3168 mempool_free(pmb
, phba
->mbox_mem_pool
);
3173 * lpfc_create_static_vport - Read HBA config region to create static vports.
3174 * @phba: pointer to lpfc hba data structure.
3176 * This routine issue a DUMP mailbox command for config region 22 to get
3177 * the list of static vports to be created. The function create vports
3178 * based on the information returned from the HBA.
3181 lpfc_create_static_vport(struct lpfc_hba
*phba
)
3183 LPFC_MBOXQ_t
*pmb
= NULL
;
3185 struct static_vport_info
*vport_info
;
3187 struct fc_vport_identifiers vport_id
;
3188 struct fc_vport
*new_fc_vport
;
3189 struct Scsi_Host
*shost
;
3190 struct lpfc_vport
*vport
;
3191 uint16_t offset
= 0;
3192 uint8_t *vport_buff
;
3193 struct lpfc_dmabuf
*mp
;
3194 uint32_t byte_count
= 0;
3196 pmb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
3198 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
3199 "0542 lpfc_create_static_vport failed to"
3200 " allocate mailbox memory\n");
3206 vport_info
= kzalloc(sizeof(struct static_vport_info
), GFP_KERNEL
);
3208 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
3209 "0543 lpfc_create_static_vport failed to"
3210 " allocate vport_info\n");
3211 mempool_free(pmb
, phba
->mbox_mem_pool
);
3215 vport_buff
= (uint8_t *) vport_info
;
3217 if (lpfc_dump_static_vport(phba
, pmb
, offset
))
3220 pmb
->vport
= phba
->pport
;
3221 rc
= lpfc_sli_issue_mbox_wait(phba
, pmb
, LPFC_MBOX_TMO
);
3223 if ((rc
!= MBX_SUCCESS
) || mb
->mbxStatus
) {
3224 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
3225 "0544 lpfc_create_static_vport failed to"
3226 " issue dump mailbox command ret 0x%x "
3232 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
3233 byte_count
= pmb
->u
.mqe
.un
.mb_words
[5];
3234 mp
= (struct lpfc_dmabuf
*) pmb
->context2
;
3235 if (byte_count
> sizeof(struct static_vport_info
) -
3237 byte_count
= sizeof(struct static_vport_info
)
3239 memcpy(vport_buff
+ offset
, mp
->virt
, byte_count
);
3240 offset
+= byte_count
;
3242 if (mb
->un
.varDmp
.word_cnt
>
3243 sizeof(struct static_vport_info
) - offset
)
3244 mb
->un
.varDmp
.word_cnt
=
3245 sizeof(struct static_vport_info
)
3247 byte_count
= mb
->un
.varDmp
.word_cnt
;
3248 lpfc_sli_pcimem_bcopy(((uint8_t *)mb
) + DMP_RSP_OFFSET
,
3249 vport_buff
+ offset
,
3252 offset
+= byte_count
;
3255 } while (byte_count
&&
3256 offset
< sizeof(struct static_vport_info
));
3259 if ((le32_to_cpu(vport_info
->signature
) != VPORT_INFO_SIG
) ||
3260 ((le32_to_cpu(vport_info
->rev
) & VPORT_INFO_REV_MASK
)
3261 != VPORT_INFO_REV
)) {
3262 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
3263 "0545 lpfc_create_static_vport bad"
3264 " information header 0x%x 0x%x\n",
3265 le32_to_cpu(vport_info
->signature
),
3266 le32_to_cpu(vport_info
->rev
) & VPORT_INFO_REV_MASK
);
3271 shost
= lpfc_shost_from_vport(phba
->pport
);
3273 for (i
= 0; i
< MAX_STATIC_VPORT_COUNT
; i
++) {
3274 memset(&vport_id
, 0, sizeof(vport_id
));
3275 vport_id
.port_name
= wwn_to_u64(vport_info
->vport_list
[i
].wwpn
);
3276 vport_id
.node_name
= wwn_to_u64(vport_info
->vport_list
[i
].wwnn
);
3277 if (!vport_id
.port_name
|| !vport_id
.node_name
)
3280 vport_id
.roles
= FC_PORT_ROLE_FCP_INITIATOR
;
3281 vport_id
.vport_type
= FC_PORTTYPE_NPIV
;
3282 vport_id
.disable
= false;
3283 new_fc_vport
= fc_vport_create(shost
, 0, &vport_id
);
3285 if (!new_fc_vport
) {
3286 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
3287 "0546 lpfc_create_static_vport failed to"
3292 vport
= *(struct lpfc_vport
**)new_fc_vport
->dd_data
;
3293 vport
->vport_flag
|= STATIC_VPORT
;
3298 if (rc
!= MBX_TIMEOUT
) {
3299 if (pmb
->context2
) {
3300 mp
= (struct lpfc_dmabuf
*) pmb
->context2
;
3301 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3304 mempool_free(pmb
, phba
->mbox_mem_pool
);
3311 * This routine handles processing a Fabric REG_LOGIN mailbox
3312 * command upon completion. It is setup in the LPFC_MBOXQ
3313 * as the completion routine when the command is
3314 * handed off to the SLI layer.
3317 lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
3319 struct lpfc_vport
*vport
= pmb
->vport
;
3320 MAILBOX_t
*mb
= &pmb
->u
.mb
;
3321 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
3322 struct lpfc_nodelist
*ndlp
;
3324 ndlp
= (struct lpfc_nodelist
*) pmb
->context2
;
3325 pmb
->context1
= NULL
;
3326 pmb
->context2
= NULL
;
3328 if (mb
->mbxStatus
) {
3329 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
3330 "0258 Register Fabric login error: 0x%x\n",
3332 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3334 mempool_free(pmb
, phba
->mbox_mem_pool
);
3336 if (phba
->fc_topology
== TOPOLOGY_LOOP
) {
3337 /* FLOGI failed, use loop map to make discovery list */
3338 lpfc_disc_list_loopmap(vport
);
3340 /* Start discovery */
3341 lpfc_disc_start(vport
);
3342 /* Decrement the reference count to ndlp after the
3343 * reference to the ndlp are done.
3349 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
3350 /* Decrement the reference count to ndlp after the reference
3351 * to the ndlp are done.
3357 ndlp
->nlp_rpi
= mb
->un
.varWords
[0];
3358 ndlp
->nlp_flag
|= NLP_RPI_VALID
;
3359 ndlp
->nlp_type
|= NLP_FABRIC
;
3360 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNMAPPED_NODE
);
3362 if (vport
->port_state
== LPFC_FABRIC_CFG_LINK
) {
3363 /* when physical port receive logo donot start
3364 * vport discovery */
3365 if (!(vport
->fc_flag
& FC_LOGO_RCVD_DID_CHNG
))
3366 lpfc_start_fdiscs(phba
);
3368 vport
->fc_flag
&= ~FC_LOGO_RCVD_DID_CHNG
;
3369 lpfc_do_scr_ns_plogi(phba
, vport
);
3372 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3374 mempool_free(pmb
, phba
->mbox_mem_pool
);
3376 /* Drop the reference count from the mbox at the end after
3377 * all the current reference to the ndlp have been done.
3384 * This routine handles processing a NameServer REG_LOGIN mailbox
3385 * command upon completion. It is setup in the LPFC_MBOXQ
3386 * as the completion routine when the command is
3387 * handed off to the SLI layer.
3390 lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
3392 MAILBOX_t
*mb
= &pmb
->u
.mb
;
3393 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
3394 struct lpfc_nodelist
*ndlp
= (struct lpfc_nodelist
*) pmb
->context2
;
3395 struct lpfc_vport
*vport
= pmb
->vport
;
3397 pmb
->context1
= NULL
;
3398 pmb
->context2
= NULL
;
3400 if (mb
->mbxStatus
) {
3402 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
3403 "0260 Register NameServer error: 0x%x\n",
3405 /* decrement the node reference count held for this
3406 * callback function.
3409 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3411 mempool_free(pmb
, phba
->mbox_mem_pool
);
3413 /* If no other thread is using the ndlp, free it */
3414 lpfc_nlp_not_used(ndlp
);
3416 if (phba
->fc_topology
== TOPOLOGY_LOOP
) {
3418 * RegLogin failed, use loop map to make discovery
3421 lpfc_disc_list_loopmap(vport
);
3423 /* Start discovery */
3424 lpfc_disc_start(vport
);
3427 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
3431 ndlp
->nlp_rpi
= mb
->un
.varWords
[0];
3432 ndlp
->nlp_flag
|= NLP_RPI_VALID
;
3433 ndlp
->nlp_type
|= NLP_FABRIC
;
3434 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNMAPPED_NODE
);
3436 if (vport
->port_state
< LPFC_VPORT_READY
) {
3437 /* Link up discovery requires Fabric registration. */
3438 lpfc_ns_cmd(vport
, SLI_CTNS_RFF_ID
, 0, 0); /* Do this first! */
3439 lpfc_ns_cmd(vport
, SLI_CTNS_RNN_ID
, 0, 0);
3440 lpfc_ns_cmd(vport
, SLI_CTNS_RSNN_NN
, 0, 0);
3441 lpfc_ns_cmd(vport
, SLI_CTNS_RSPN_ID
, 0, 0);
3442 lpfc_ns_cmd(vport
, SLI_CTNS_RFT_ID
, 0, 0);
3444 /* Issue SCR just before NameServer GID_FT Query */
3445 lpfc_issue_els_scr(vport
, SCR_DID
, 0);
3448 vport
->fc_ns_retry
= 0;
3449 /* Good status, issue CT Request to NameServer */
3450 if (lpfc_ns_cmd(vport
, SLI_CTNS_GID_FT
, 0, 0)) {
3451 /* Cannot issue NameServer Query, so finish up discovery */
3455 /* decrement the node reference count held for this
3456 * callback function.
3459 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3461 mempool_free(pmb
, phba
->mbox_mem_pool
);
3467 lpfc_register_remote_port(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
3469 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3470 struct fc_rport
*rport
;
3471 struct lpfc_rport_data
*rdata
;
3472 struct fc_rport_identifiers rport_ids
;
3473 struct lpfc_hba
*phba
= vport
->phba
;
3475 /* Remote port has reappeared. Re-register w/ FC transport */
3476 rport_ids
.node_name
= wwn_to_u64(ndlp
->nlp_nodename
.u
.wwn
);
3477 rport_ids
.port_name
= wwn_to_u64(ndlp
->nlp_portname
.u
.wwn
);
3478 rport_ids
.port_id
= ndlp
->nlp_DID
;
3479 rport_ids
.roles
= FC_RPORT_ROLE_UNKNOWN
;
3482 * We leave our node pointer in rport->dd_data when we unregister a
3483 * FCP target port. But fc_remote_port_add zeros the space to which
3484 * rport->dd_data points. So, if we're reusing a previously
3485 * registered port, drop the reference that we took the last time we
3486 * registered the port.
3488 if (ndlp
->rport
&& ndlp
->rport
->dd_data
&&
3489 ((struct lpfc_rport_data
*) ndlp
->rport
->dd_data
)->pnode
== ndlp
)
3492 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_RPORT
,
3493 "rport add: did:x%x flg:x%x type x%x",
3494 ndlp
->nlp_DID
, ndlp
->nlp_flag
, ndlp
->nlp_type
);
3496 ndlp
->rport
= rport
= fc_remote_port_add(shost
, 0, &rport_ids
);
3497 if (!rport
|| !get_device(&rport
->dev
)) {
3498 dev_printk(KERN_WARNING
, &phba
->pcidev
->dev
,
3499 "Warning: fc_remote_port_add failed\n");
3503 /* initialize static port data */
3504 rport
->maxframe_size
= ndlp
->nlp_maxframe
;
3505 rport
->supported_classes
= ndlp
->nlp_class_sup
;
3506 rdata
= rport
->dd_data
;
3507 rdata
->pnode
= lpfc_nlp_get(ndlp
);
3509 if (ndlp
->nlp_type
& NLP_FCP_TARGET
)
3510 rport_ids
.roles
|= FC_RPORT_ROLE_FCP_TARGET
;
3511 if (ndlp
->nlp_type
& NLP_FCP_INITIATOR
)
3512 rport_ids
.roles
|= FC_RPORT_ROLE_FCP_INITIATOR
;
3515 if (rport_ids
.roles
!= FC_RPORT_ROLE_UNKNOWN
)
3516 fc_remote_port_rolechg(rport
, rport_ids
.roles
);
3518 if ((rport
->scsi_target_id
!= -1) &&
3519 (rport
->scsi_target_id
< LPFC_MAX_TARGET
)) {
3520 ndlp
->nlp_sid
= rport
->scsi_target_id
;
3526 lpfc_unregister_remote_port(struct lpfc_nodelist
*ndlp
)
3528 struct fc_rport
*rport
= ndlp
->rport
;
3530 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_RPORT
,
3531 "rport delete: did:x%x flg:x%x type x%x",
3532 ndlp
->nlp_DID
, ndlp
->nlp_flag
, ndlp
->nlp_type
);
3534 fc_remote_port_delete(rport
);
3540 lpfc_nlp_counters(struct lpfc_vport
*vport
, int state
, int count
)
3542 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3544 spin_lock_irq(shost
->host_lock
);
3546 case NLP_STE_UNUSED_NODE
:
3547 vport
->fc_unused_cnt
+= count
;
3549 case NLP_STE_PLOGI_ISSUE
:
3550 vport
->fc_plogi_cnt
+= count
;
3552 case NLP_STE_ADISC_ISSUE
:
3553 vport
->fc_adisc_cnt
+= count
;
3555 case NLP_STE_REG_LOGIN_ISSUE
:
3556 vport
->fc_reglogin_cnt
+= count
;
3558 case NLP_STE_PRLI_ISSUE
:
3559 vport
->fc_prli_cnt
+= count
;
3561 case NLP_STE_UNMAPPED_NODE
:
3562 vport
->fc_unmap_cnt
+= count
;
3564 case NLP_STE_MAPPED_NODE
:
3565 vport
->fc_map_cnt
+= count
;
3567 case NLP_STE_NPR_NODE
:
3568 vport
->fc_npr_cnt
+= count
;
3571 spin_unlock_irq(shost
->host_lock
);
3575 lpfc_nlp_state_cleanup(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
3576 int old_state
, int new_state
)
3578 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3580 if (new_state
== NLP_STE_UNMAPPED_NODE
) {
3581 ndlp
->nlp_flag
&= ~NLP_NODEV_REMOVE
;
3582 ndlp
->nlp_type
|= NLP_FC_NODE
;
3584 if (new_state
== NLP_STE_MAPPED_NODE
)
3585 ndlp
->nlp_flag
&= ~NLP_NODEV_REMOVE
;
3586 if (new_state
== NLP_STE_NPR_NODE
)
3587 ndlp
->nlp_flag
&= ~NLP_RCV_PLOGI
;
3589 /* Transport interface */
3590 if (ndlp
->rport
&& (old_state
== NLP_STE_MAPPED_NODE
||
3591 old_state
== NLP_STE_UNMAPPED_NODE
)) {
3592 vport
->phba
->nport_event_cnt
++;
3593 lpfc_unregister_remote_port(ndlp
);
3596 if (new_state
== NLP_STE_MAPPED_NODE
||
3597 new_state
== NLP_STE_UNMAPPED_NODE
) {
3598 vport
->phba
->nport_event_cnt
++;
3600 * Tell the fc transport about the port, if we haven't
3601 * already. If we have, and it's a scsi entity, be
3602 * sure to unblock any attached scsi devices
3604 lpfc_register_remote_port(vport
, ndlp
);
3606 if ((new_state
== NLP_STE_MAPPED_NODE
) &&
3607 (vport
->stat_data_enabled
)) {
3609 * A new target is discovered, if there is no buffer for
3610 * statistical data collection allocate buffer.
3612 ndlp
->lat_data
= kcalloc(LPFC_MAX_BUCKET_COUNT
,
3613 sizeof(struct lpfc_scsicmd_bkt
),
3616 if (!ndlp
->lat_data
)
3617 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_NODE
,
3618 "0286 lpfc_nlp_state_cleanup failed to "
3619 "allocate statistical data buffer DID "
3620 "0x%x\n", ndlp
->nlp_DID
);
3623 * if we added to Mapped list, but the remote port
3624 * registration failed or assigned a target id outside
3625 * our presentable range - move the node to the
3628 if (new_state
== NLP_STE_MAPPED_NODE
&&
3630 ndlp
->rport
->scsi_target_id
== -1 ||
3631 ndlp
->rport
->scsi_target_id
>= LPFC_MAX_TARGET
)) {
3632 spin_lock_irq(shost
->host_lock
);
3633 ndlp
->nlp_flag
|= NLP_TGT_NO_SCSIID
;
3634 spin_unlock_irq(shost
->host_lock
);
3635 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNMAPPED_NODE
);
3640 lpfc_nlp_state_name(char *buffer
, size_t size
, int state
)
3642 static char *states
[] = {
3643 [NLP_STE_UNUSED_NODE
] = "UNUSED",
3644 [NLP_STE_PLOGI_ISSUE
] = "PLOGI",
3645 [NLP_STE_ADISC_ISSUE
] = "ADISC",
3646 [NLP_STE_REG_LOGIN_ISSUE
] = "REGLOGIN",
3647 [NLP_STE_PRLI_ISSUE
] = "PRLI",
3648 [NLP_STE_UNMAPPED_NODE
] = "UNMAPPED",
3649 [NLP_STE_MAPPED_NODE
] = "MAPPED",
3650 [NLP_STE_NPR_NODE
] = "NPR",
3653 if (state
< NLP_STE_MAX_STATE
&& states
[state
])
3654 strlcpy(buffer
, states
[state
], size
);
3656 snprintf(buffer
, size
, "unknown (%d)", state
);
3661 lpfc_nlp_set_state(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
3664 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3665 int old_state
= ndlp
->nlp_state
;
3666 char name1
[16], name2
[16];
3668 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
3669 "0904 NPort state transition x%06x, %s -> %s\n",
3671 lpfc_nlp_state_name(name1
, sizeof(name1
), old_state
),
3672 lpfc_nlp_state_name(name2
, sizeof(name2
), state
));
3674 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_NODE
,
3675 "node statechg did:x%x old:%d ste:%d",
3676 ndlp
->nlp_DID
, old_state
, state
);
3678 if (old_state
== NLP_STE_NPR_NODE
&&
3679 state
!= NLP_STE_NPR_NODE
)
3680 lpfc_cancel_retry_delay_tmo(vport
, ndlp
);
3681 if (old_state
== NLP_STE_UNMAPPED_NODE
) {
3682 ndlp
->nlp_flag
&= ~NLP_TGT_NO_SCSIID
;
3683 ndlp
->nlp_type
&= ~NLP_FC_NODE
;
3686 if (list_empty(&ndlp
->nlp_listp
)) {
3687 spin_lock_irq(shost
->host_lock
);
3688 list_add_tail(&ndlp
->nlp_listp
, &vport
->fc_nodes
);
3689 spin_unlock_irq(shost
->host_lock
);
3690 } else if (old_state
)
3691 lpfc_nlp_counters(vport
, old_state
, -1);
3693 ndlp
->nlp_state
= state
;
3694 lpfc_nlp_counters(vport
, state
, 1);
3695 lpfc_nlp_state_cleanup(vport
, ndlp
, old_state
, state
);
3699 lpfc_enqueue_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
3701 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3703 if (list_empty(&ndlp
->nlp_listp
)) {
3704 spin_lock_irq(shost
->host_lock
);
3705 list_add_tail(&ndlp
->nlp_listp
, &vport
->fc_nodes
);
3706 spin_unlock_irq(shost
->host_lock
);
3711 lpfc_dequeue_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
3713 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3715 lpfc_cancel_retry_delay_tmo(vport
, ndlp
);
3716 if (ndlp
->nlp_state
&& !list_empty(&ndlp
->nlp_listp
))
3717 lpfc_nlp_counters(vport
, ndlp
->nlp_state
, -1);
3718 spin_lock_irq(shost
->host_lock
);
3719 list_del_init(&ndlp
->nlp_listp
);
3720 spin_unlock_irq(shost
->host_lock
);
3721 lpfc_nlp_state_cleanup(vport
, ndlp
, ndlp
->nlp_state
,
3722 NLP_STE_UNUSED_NODE
);
3726 lpfc_disable_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
3728 lpfc_cancel_retry_delay_tmo(vport
, ndlp
);
3729 if (ndlp
->nlp_state
&& !list_empty(&ndlp
->nlp_listp
))
3730 lpfc_nlp_counters(vport
, ndlp
->nlp_state
, -1);
3731 lpfc_nlp_state_cleanup(vport
, ndlp
, ndlp
->nlp_state
,
3732 NLP_STE_UNUSED_NODE
);
3735 * lpfc_initialize_node - Initialize all fields of node object
3736 * @vport: Pointer to Virtual Port object.
3737 * @ndlp: Pointer to FC node object.
3738 * @did: FC_ID of the node.
3740 * This function is always called when node object need to be initialized.
3741 * It initializes all the fields of the node object. Although the reference
3742 * to phba from @ndlp can be obtained indirectly through it's reference to
3743 * @vport, a direct reference to phba is taken here by @ndlp. This is due
3744 * to the life-span of the @ndlp might go beyond the existence of @vport as
3745 * the final release of ndlp is determined by its reference count. And, the
3746 * operation on @ndlp needs the reference to phba.
3749 lpfc_initialize_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
3752 INIT_LIST_HEAD(&ndlp
->els_retry_evt
.evt_listp
);
3753 INIT_LIST_HEAD(&ndlp
->dev_loss_evt
.evt_listp
);
3754 init_timer(&ndlp
->nlp_delayfunc
);
3755 ndlp
->nlp_delayfunc
.function
= lpfc_els_retry_delay
;
3756 ndlp
->nlp_delayfunc
.data
= (unsigned long)ndlp
;
3757 ndlp
->nlp_DID
= did
;
3758 ndlp
->vport
= vport
;
3759 ndlp
->phba
= vport
->phba
;
3760 ndlp
->nlp_sid
= NLP_NO_SID
;
3761 kref_init(&ndlp
->kref
);
3762 NLP_INT_NODE_ACT(ndlp
);
3763 atomic_set(&ndlp
->cmd_pending
, 0);
3764 ndlp
->cmd_qdepth
= vport
->cfg_tgt_queue_depth
;
3767 struct lpfc_nodelist
*
3768 lpfc_enable_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
3771 struct lpfc_hba
*phba
= vport
->phba
;
3773 unsigned long flags
;
3778 spin_lock_irqsave(&phba
->ndlp_lock
, flags
);
3779 /* The ndlp should not be in memory free mode */
3780 if (NLP_CHK_FREE_REQ(ndlp
)) {
3781 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
3782 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_NODE
,
3783 "0277 lpfc_enable_node: ndlp:x%p "
3784 "usgmap:x%x refcnt:%d\n",
3785 (void *)ndlp
, ndlp
->nlp_usg_map
,
3786 atomic_read(&ndlp
->kref
.refcount
));
3789 /* The ndlp should not already be in active mode */
3790 if (NLP_CHK_NODE_ACT(ndlp
)) {
3791 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
3792 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_NODE
,
3793 "0278 lpfc_enable_node: ndlp:x%p "
3794 "usgmap:x%x refcnt:%d\n",
3795 (void *)ndlp
, ndlp
->nlp_usg_map
,
3796 atomic_read(&ndlp
->kref
.refcount
));
3800 /* Keep the original DID */
3801 did
= ndlp
->nlp_DID
;
3803 /* re-initialize ndlp except of ndlp linked list pointer */
3804 memset((((char *)ndlp
) + sizeof (struct list_head
)), 0,
3805 sizeof (struct lpfc_nodelist
) - sizeof (struct list_head
));
3806 lpfc_initialize_node(vport
, ndlp
, did
);
3808 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
3810 if (state
!= NLP_STE_UNUSED_NODE
)
3811 lpfc_nlp_set_state(vport
, ndlp
, state
);
3813 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_NODE
,
3814 "node enable: did:x%x",
3815 ndlp
->nlp_DID
, 0, 0);
3820 lpfc_drop_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
3823 * Use of lpfc_drop_node and UNUSED list: lpfc_drop_node should
3824 * be used if we wish to issue the "last" lpfc_nlp_put() to remove
3825 * the ndlp from the vport. The ndlp marked as UNUSED on the list
3826 * until ALL other outstanding threads have completed. We check
3827 * that the ndlp not already in the UNUSED state before we proceed.
3829 if (ndlp
->nlp_state
== NLP_STE_UNUSED_NODE
)
3831 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNUSED_NODE
);
3837 * Start / ReStart rescue timer for Discovery / RSCN handling
3840 lpfc_set_disctmo(struct lpfc_vport
*vport
)
3842 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3843 struct lpfc_hba
*phba
= vport
->phba
;
3846 if (vport
->port_state
== LPFC_LOCAL_CFG_LINK
) {
3847 /* For FAN, timeout should be greater than edtov */
3848 tmo
= (((phba
->fc_edtov
+ 999) / 1000) + 1);
3850 /* Normal discovery timeout should be > than ELS/CT timeout
3851 * FC spec states we need 3 * ratov for CT requests
3853 tmo
= ((phba
->fc_ratov
* 3) + 3);
3857 if (!timer_pending(&vport
->fc_disctmo
)) {
3858 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
3859 "set disc timer: tmo:x%x state:x%x flg:x%x",
3860 tmo
, vport
->port_state
, vport
->fc_flag
);
3863 mod_timer(&vport
->fc_disctmo
, jiffies
+ HZ
* tmo
);
3864 spin_lock_irq(shost
->host_lock
);
3865 vport
->fc_flag
|= FC_DISC_TMO
;
3866 spin_unlock_irq(shost
->host_lock
);
3868 /* Start Discovery Timer state <hba_state> */
3869 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
3870 "0247 Start Discovery Timer state x%x "
3871 "Data: x%x x%lx x%x x%x\n",
3872 vport
->port_state
, tmo
,
3873 (unsigned long)&vport
->fc_disctmo
, vport
->fc_plogi_cnt
,
3874 vport
->fc_adisc_cnt
);
3880 * Cancel rescue timer for Discovery / RSCN handling
3883 lpfc_can_disctmo(struct lpfc_vport
*vport
)
3885 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3886 unsigned long iflags
;
3888 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
3889 "can disc timer: state:x%x rtry:x%x flg:x%x",
3890 vport
->port_state
, vport
->fc_ns_retry
, vport
->fc_flag
);
3892 /* Turn off discovery timer if its running */
3893 if (vport
->fc_flag
& FC_DISC_TMO
) {
3894 spin_lock_irqsave(shost
->host_lock
, iflags
);
3895 vport
->fc_flag
&= ~FC_DISC_TMO
;
3896 spin_unlock_irqrestore(shost
->host_lock
, iflags
);
3897 del_timer_sync(&vport
->fc_disctmo
);
3898 spin_lock_irqsave(&vport
->work_port_lock
, iflags
);
3899 vport
->work_port_events
&= ~WORKER_DISC_TMO
;
3900 spin_unlock_irqrestore(&vport
->work_port_lock
, iflags
);
3903 /* Cancel Discovery Timer state <hba_state> */
3904 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
3905 "0248 Cancel Discovery Timer state x%x "
3906 "Data: x%x x%x x%x\n",
3907 vport
->port_state
, vport
->fc_flag
,
3908 vport
->fc_plogi_cnt
, vport
->fc_adisc_cnt
);
3913 * Check specified ring for outstanding IOCB on the SLI queue
3914 * Return true if iocb matches the specified nport
3917 lpfc_check_sli_ndlp(struct lpfc_hba
*phba
,
3918 struct lpfc_sli_ring
*pring
,
3919 struct lpfc_iocbq
*iocb
,
3920 struct lpfc_nodelist
*ndlp
)
3922 struct lpfc_sli
*psli
= &phba
->sli
;
3923 IOCB_t
*icmd
= &iocb
->iocb
;
3924 struct lpfc_vport
*vport
= ndlp
->vport
;
3926 if (iocb
->vport
!= vport
)
3929 if (pring
->ringno
== LPFC_ELS_RING
) {
3930 switch (icmd
->ulpCommand
) {
3931 case CMD_GEN_REQUEST64_CR
:
3932 if (iocb
->context_un
.ndlp
== ndlp
)
3934 case CMD_ELS_REQUEST64_CR
:
3935 if (icmd
->un
.elsreq64
.remoteID
== ndlp
->nlp_DID
)
3937 case CMD_XMIT_ELS_RSP64_CX
:
3938 if (iocb
->context1
== (uint8_t *) ndlp
)
3941 } else if (pring
->ringno
== psli
->extra_ring
) {
3943 } else if (pring
->ringno
== psli
->fcp_ring
) {
3944 /* Skip match check if waiting to relogin to FCP target */
3945 if ((ndlp
->nlp_type
& NLP_FCP_TARGET
) &&
3946 (ndlp
->nlp_flag
& NLP_DELAY_TMO
)) {
3949 if (icmd
->ulpContext
== (volatile ushort
)ndlp
->nlp_rpi
) {
3952 } else if (pring
->ringno
== psli
->next_ring
) {
3959 * Free resources / clean up outstanding I/Os
3960 * associated with nlp_rpi in the LPFC_NODELIST entry.
3963 lpfc_no_rpi(struct lpfc_hba
*phba
, struct lpfc_nodelist
*ndlp
)
3965 LIST_HEAD(completions
);
3966 struct lpfc_sli
*psli
;
3967 struct lpfc_sli_ring
*pring
;
3968 struct lpfc_iocbq
*iocb
, *next_iocb
;
3971 lpfc_fabric_abort_nport(ndlp
);
3974 * Everything that matches on txcmplq will be returned
3975 * by firmware with a no rpi error.
3978 if (ndlp
->nlp_flag
& NLP_RPI_VALID
) {
3979 /* Now process each ring */
3980 for (i
= 0; i
< psli
->num_rings
; i
++) {
3981 pring
= &psli
->ring
[i
];
3983 spin_lock_irq(&phba
->hbalock
);
3984 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txq
,
3987 * Check to see if iocb matches the nport we are
3990 if ((lpfc_check_sli_ndlp(phba
, pring
, iocb
,
3992 /* It matches, so deque and call compl
3994 list_move_tail(&iocb
->list
,
3999 spin_unlock_irq(&phba
->hbalock
);
4003 /* Cancel all the IOCBs from the completions list */
4004 lpfc_sli_cancel_iocbs(phba
, &completions
, IOSTAT_LOCAL_REJECT
,
4011 * Free rpi associated with LPFC_NODELIST entry.
4012 * This routine is called from lpfc_freenode(), when we are removing
4013 * a LPFC_NODELIST entry. It is also called if the driver initiates a
4014 * LOGO that completes successfully, and we are waiting to PLOGI back
4015 * to the remote NPort. In addition, it is called after we receive
4016 * and unsolicated ELS cmd, send back a rsp, the rsp completes and
4017 * we are waiting to PLOGI back to the remote NPort.
4020 lpfc_unreg_rpi(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
4022 struct lpfc_hba
*phba
= vport
->phba
;
4026 if (ndlp
->nlp_flag
& NLP_RPI_VALID
) {
4027 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
4029 lpfc_unreg_login(phba
, vport
->vpi
, ndlp
->nlp_rpi
, mbox
);
4030 mbox
->vport
= vport
;
4031 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
4032 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
4033 if (rc
== MBX_NOT_FINISHED
)
4034 mempool_free(mbox
, phba
->mbox_mem_pool
);
4036 lpfc_no_rpi(phba
, ndlp
);
4039 ndlp
->nlp_flag
&= ~NLP_RPI_VALID
;
4040 ndlp
->nlp_flag
&= ~NLP_NPR_ADISC
;
4047 * lpfc_unreg_hba_rpis - Unregister rpis registered to the hba.
4048 * @phba: pointer to lpfc hba data structure.
4050 * This routine is invoked to unregister all the currently registered RPIs
4054 lpfc_unreg_hba_rpis(struct lpfc_hba
*phba
)
4056 struct lpfc_vport
**vports
;
4057 struct lpfc_nodelist
*ndlp
;
4058 struct Scsi_Host
*shost
;
4061 vports
= lpfc_create_vport_work_array(phba
);
4062 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
4063 shost
= lpfc_shost_from_vport(vports
[i
]);
4064 spin_lock_irq(shost
->host_lock
);
4065 list_for_each_entry(ndlp
, &vports
[i
]->fc_nodes
, nlp_listp
) {
4066 if (ndlp
->nlp_flag
& NLP_RPI_VALID
) {
4067 /* The mempool_alloc might sleep */
4068 spin_unlock_irq(shost
->host_lock
);
4069 lpfc_unreg_rpi(vports
[i
], ndlp
);
4070 spin_lock_irq(shost
->host_lock
);
4073 spin_unlock_irq(shost
->host_lock
);
4075 lpfc_destroy_vport_work_array(phba
, vports
);
4079 lpfc_unreg_all_rpis(struct lpfc_vport
*vport
)
4081 struct lpfc_hba
*phba
= vport
->phba
;
4085 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
4086 lpfc_sli4_unreg_all_rpis(vport
);
4090 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
4092 lpfc_unreg_login(phba
, vport
->vpi
, 0xffff, mbox
);
4093 mbox
->vport
= vport
;
4094 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
4095 mbox
->context1
= NULL
;
4096 rc
= lpfc_sli_issue_mbox_wait(phba
, mbox
, LPFC_MBOX_TMO
);
4097 if (rc
!= MBX_TIMEOUT
)
4098 mempool_free(mbox
, phba
->mbox_mem_pool
);
4100 if ((rc
== MBX_TIMEOUT
) || (rc
== MBX_NOT_FINISHED
))
4101 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
| LOG_VPORT
,
4102 "1836 Could not issue "
4103 "unreg_login(all_rpis) status %d\n", rc
);
4108 lpfc_unreg_default_rpis(struct lpfc_vport
*vport
)
4110 struct lpfc_hba
*phba
= vport
->phba
;
4114 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
4116 lpfc_unreg_did(phba
, vport
->vpi
, 0xffffffff, mbox
);
4117 mbox
->vport
= vport
;
4118 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
4119 mbox
->context1
= NULL
;
4120 rc
= lpfc_sli_issue_mbox_wait(phba
, mbox
, LPFC_MBOX_TMO
);
4121 if (rc
!= MBX_TIMEOUT
)
4122 mempool_free(mbox
, phba
->mbox_mem_pool
);
4124 if ((rc
== MBX_TIMEOUT
) || (rc
== MBX_NOT_FINISHED
))
4125 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
| LOG_VPORT
,
4126 "1815 Could not issue "
4127 "unreg_did (default rpis) status %d\n",
4133 * Free resources associated with LPFC_NODELIST entry
4134 * so it can be freed.
4137 lpfc_cleanup_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
4139 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4140 struct lpfc_hba
*phba
= vport
->phba
;
4141 LPFC_MBOXQ_t
*mb
, *nextmb
;
4142 struct lpfc_dmabuf
*mp
;
4144 /* Cleanup node for NPort <nlp_DID> */
4145 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
4146 "0900 Cleanup node for NPort x%x "
4147 "Data: x%x x%x x%x\n",
4148 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
4149 ndlp
->nlp_state
, ndlp
->nlp_rpi
);
4150 if (NLP_CHK_FREE_REQ(ndlp
)) {
4151 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_NODE
,
4152 "0280 lpfc_cleanup_node: ndlp:x%p "
4153 "usgmap:x%x refcnt:%d\n",
4154 (void *)ndlp
, ndlp
->nlp_usg_map
,
4155 atomic_read(&ndlp
->kref
.refcount
));
4156 lpfc_dequeue_node(vport
, ndlp
);
4158 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_NODE
,
4159 "0281 lpfc_cleanup_node: ndlp:x%p "
4160 "usgmap:x%x refcnt:%d\n",
4161 (void *)ndlp
, ndlp
->nlp_usg_map
,
4162 atomic_read(&ndlp
->kref
.refcount
));
4163 lpfc_disable_node(vport
, ndlp
);
4166 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
4167 if ((mb
= phba
->sli
.mbox_active
)) {
4168 if ((mb
->u
.mb
.mbxCommand
== MBX_REG_LOGIN64
) &&
4169 (ndlp
== (struct lpfc_nodelist
*) mb
->context2
)) {
4170 mb
->context2
= NULL
;
4171 mb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
4175 spin_lock_irq(&phba
->hbalock
);
4176 /* Cleanup REG_LOGIN completions which are not yet processed */
4177 list_for_each_entry(mb
, &phba
->sli
.mboxq_cmpl
, list
) {
4178 if ((mb
->u
.mb
.mbxCommand
!= MBX_REG_LOGIN64
) ||
4179 (ndlp
!= (struct lpfc_nodelist
*) mb
->context2
))
4182 mb
->context2
= NULL
;
4183 mb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
4186 list_for_each_entry_safe(mb
, nextmb
, &phba
->sli
.mboxq
, list
) {
4187 if ((mb
->u
.mb
.mbxCommand
== MBX_REG_LOGIN64
) &&
4188 (ndlp
== (struct lpfc_nodelist
*) mb
->context2
)) {
4189 mp
= (struct lpfc_dmabuf
*) (mb
->context1
);
4191 __lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
4194 list_del(&mb
->list
);
4195 if (phba
->sli_rev
== LPFC_SLI_REV4
)
4196 lpfc_sli4_free_rpi(phba
,
4197 mb
->u
.mb
.un
.varRegLogin
.rpi
);
4198 mempool_free(mb
, phba
->mbox_mem_pool
);
4199 /* We shall not invoke the lpfc_nlp_put to decrement
4200 * the ndlp reference count as we are in the process
4201 * of lpfc_nlp_release.
4205 spin_unlock_irq(&phba
->hbalock
);
4207 lpfc_els_abort(phba
, ndlp
);
4209 spin_lock_irq(shost
->host_lock
);
4210 ndlp
->nlp_flag
&= ~NLP_DELAY_TMO
;
4211 spin_unlock_irq(shost
->host_lock
);
4213 ndlp
->nlp_last_elscmd
= 0;
4214 del_timer_sync(&ndlp
->nlp_delayfunc
);
4216 list_del_init(&ndlp
->els_retry_evt
.evt_listp
);
4217 list_del_init(&ndlp
->dev_loss_evt
.evt_listp
);
4219 lpfc_unreg_rpi(vport
, ndlp
);
4225 * Check to see if we can free the nlp back to the freelist.
4226 * If we are in the middle of using the nlp in the discovery state
4227 * machine, defer the free till we reach the end of the state machine.
4230 lpfc_nlp_remove(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
4232 struct lpfc_hba
*phba
= vport
->phba
;
4233 struct lpfc_rport_data
*rdata
;
4237 lpfc_cancel_retry_delay_tmo(vport
, ndlp
);
4238 if ((ndlp
->nlp_flag
& NLP_DEFER_RM
) &&
4239 !(ndlp
->nlp_flag
& NLP_REG_LOGIN_SEND
) &&
4240 !(ndlp
->nlp_flag
& NLP_RPI_VALID
)) {
4241 /* For this case we need to cleanup the default rpi
4242 * allocated by the firmware.
4244 if ((mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
))
4246 rc
= lpfc_reg_rpi(phba
, vport
->vpi
, ndlp
->nlp_DID
,
4247 (uint8_t *) &vport
->fc_sparam
, mbox
, 0);
4249 mempool_free(mbox
, phba
->mbox_mem_pool
);
4252 mbox
->mbox_flag
|= LPFC_MBX_IMED_UNREG
;
4253 mbox
->mbox_cmpl
= lpfc_mbx_cmpl_dflt_rpi
;
4254 mbox
->vport
= vport
;
4255 mbox
->context2
= NULL
;
4256 rc
=lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
4257 if (rc
== MBX_NOT_FINISHED
) {
4258 mempool_free(mbox
, phba
->mbox_mem_pool
);
4263 lpfc_cleanup_node(vport
, ndlp
);
4266 * We can get here with a non-NULL ndlp->rport because when we
4267 * unregister a rport we don't break the rport/node linkage. So if we
4268 * do, make sure we don't leaving any dangling pointers behind.
4271 rdata
= ndlp
->rport
->dd_data
;
4272 rdata
->pnode
= NULL
;
4278 lpfc_matchdid(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
4281 D_ID mydid
, ndlpdid
, matchdid
;
4283 if (did
== Bcast_DID
)
4286 /* First check for Direct match */
4287 if (ndlp
->nlp_DID
== did
)
4290 /* Next check for area/domain identically equals 0 match */
4291 mydid
.un
.word
= vport
->fc_myDID
;
4292 if ((mydid
.un
.b
.domain
== 0) && (mydid
.un
.b
.area
== 0)) {
4296 matchdid
.un
.word
= did
;
4297 ndlpdid
.un
.word
= ndlp
->nlp_DID
;
4298 if (matchdid
.un
.b
.id
== ndlpdid
.un
.b
.id
) {
4299 if ((mydid
.un
.b
.domain
== matchdid
.un
.b
.domain
) &&
4300 (mydid
.un
.b
.area
== matchdid
.un
.b
.area
)) {
4301 if ((ndlpdid
.un
.b
.domain
== 0) &&
4302 (ndlpdid
.un
.b
.area
== 0)) {
4303 if (ndlpdid
.un
.b
.id
)
4309 matchdid
.un
.word
= ndlp
->nlp_DID
;
4310 if ((mydid
.un
.b
.domain
== ndlpdid
.un
.b
.domain
) &&
4311 (mydid
.un
.b
.area
== ndlpdid
.un
.b
.area
)) {
4312 if ((matchdid
.un
.b
.domain
== 0) &&
4313 (matchdid
.un
.b
.area
== 0)) {
4314 if (matchdid
.un
.b
.id
)
4322 /* Search for a nodelist entry */
4323 static struct lpfc_nodelist
*
4324 __lpfc_findnode_did(struct lpfc_vport
*vport
, uint32_t did
)
4326 struct lpfc_nodelist
*ndlp
;
4329 list_for_each_entry(ndlp
, &vport
->fc_nodes
, nlp_listp
) {
4330 if (lpfc_matchdid(vport
, ndlp
, did
)) {
4331 data1
= (((uint32_t) ndlp
->nlp_state
<< 24) |
4332 ((uint32_t) ndlp
->nlp_xri
<< 16) |
4333 ((uint32_t) ndlp
->nlp_type
<< 8) |
4334 ((uint32_t) ndlp
->nlp_rpi
& 0xff));
4335 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
4336 "0929 FIND node DID "
4337 "Data: x%p x%x x%x x%x\n",
4338 ndlp
, ndlp
->nlp_DID
,
4339 ndlp
->nlp_flag
, data1
);
4344 /* FIND node did <did> NOT FOUND */
4345 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
4346 "0932 FIND node did x%x NOT FOUND.\n", did
);
4350 struct lpfc_nodelist
*
4351 lpfc_findnode_did(struct lpfc_vport
*vport
, uint32_t did
)
4353 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4354 struct lpfc_nodelist
*ndlp
;
4356 spin_lock_irq(shost
->host_lock
);
4357 ndlp
= __lpfc_findnode_did(vport
, did
);
4358 spin_unlock_irq(shost
->host_lock
);
4362 struct lpfc_nodelist
*
4363 lpfc_setup_disc_node(struct lpfc_vport
*vport
, uint32_t did
)
4365 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4366 struct lpfc_nodelist
*ndlp
;
4368 ndlp
= lpfc_findnode_did(vport
, did
);
4370 if ((vport
->fc_flag
& FC_RSCN_MODE
) != 0 &&
4371 lpfc_rscn_payload_check(vport
, did
) == 0)
4373 ndlp
= (struct lpfc_nodelist
*)
4374 mempool_alloc(vport
->phba
->nlp_mem_pool
, GFP_KERNEL
);
4377 lpfc_nlp_init(vport
, ndlp
, did
);
4378 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_NPR_NODE
);
4379 spin_lock_irq(shost
->host_lock
);
4380 ndlp
->nlp_flag
|= NLP_NPR_2B_DISC
;
4381 spin_unlock_irq(shost
->host_lock
);
4383 } else if (!NLP_CHK_NODE_ACT(ndlp
)) {
4384 ndlp
= lpfc_enable_node(vport
, ndlp
, NLP_STE_NPR_NODE
);
4387 spin_lock_irq(shost
->host_lock
);
4388 ndlp
->nlp_flag
|= NLP_NPR_2B_DISC
;
4389 spin_unlock_irq(shost
->host_lock
);
4393 if ((vport
->fc_flag
& FC_RSCN_MODE
) &&
4394 !(vport
->fc_flag
& FC_NDISC_ACTIVE
)) {
4395 if (lpfc_rscn_payload_check(vport
, did
)) {
4396 /* If we've already recieved a PLOGI from this NPort
4397 * we don't need to try to discover it again.
4399 if (ndlp
->nlp_flag
& NLP_RCV_PLOGI
)
4402 /* Since this node is marked for discovery,
4403 * delay timeout is not needed.
4405 lpfc_cancel_retry_delay_tmo(vport
, ndlp
);
4406 spin_lock_irq(shost
->host_lock
);
4407 ndlp
->nlp_flag
|= NLP_NPR_2B_DISC
;
4408 spin_unlock_irq(shost
->host_lock
);
4412 /* If we've already recieved a PLOGI from this NPort,
4413 * or we are already in the process of discovery on it,
4414 * we don't need to try to discover it again.
4416 if (ndlp
->nlp_state
== NLP_STE_ADISC_ISSUE
||
4417 ndlp
->nlp_state
== NLP_STE_PLOGI_ISSUE
||
4418 ndlp
->nlp_flag
& NLP_RCV_PLOGI
)
4420 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_NPR_NODE
);
4421 spin_lock_irq(shost
->host_lock
);
4422 ndlp
->nlp_flag
|= NLP_NPR_2B_DISC
;
4423 spin_unlock_irq(shost
->host_lock
);
4428 /* Build a list of nodes to discover based on the loopmap */
4430 lpfc_disc_list_loopmap(struct lpfc_vport
*vport
)
4432 struct lpfc_hba
*phba
= vport
->phba
;
4434 uint32_t alpa
, index
;
4436 if (!lpfc_is_link_up(phba
))
4439 if (phba
->fc_topology
!= TOPOLOGY_LOOP
)
4442 /* Check for loop map present or not */
4443 if (phba
->alpa_map
[0]) {
4444 for (j
= 1; j
<= phba
->alpa_map
[0]; j
++) {
4445 alpa
= phba
->alpa_map
[j
];
4446 if (((vport
->fc_myDID
& 0xff) == alpa
) || (alpa
== 0))
4448 lpfc_setup_disc_node(vport
, alpa
);
4451 /* No alpamap, so try all alpa's */
4452 for (j
= 0; j
< FC_MAXLOOP
; j
++) {
4453 /* If cfg_scan_down is set, start from highest
4454 * ALPA (0xef) to lowest (0x1).
4456 if (vport
->cfg_scan_down
)
4459 index
= FC_MAXLOOP
- j
- 1;
4460 alpa
= lpfcAlpaArray
[index
];
4461 if ((vport
->fc_myDID
& 0xff) == alpa
)
4463 lpfc_setup_disc_node(vport
, alpa
);
4470 lpfc_issue_clear_la(struct lpfc_hba
*phba
, struct lpfc_vport
*vport
)
4473 struct lpfc_sli
*psli
= &phba
->sli
;
4474 struct lpfc_sli_ring
*extra_ring
= &psli
->ring
[psli
->extra_ring
];
4475 struct lpfc_sli_ring
*fcp_ring
= &psli
->ring
[psli
->fcp_ring
];
4476 struct lpfc_sli_ring
*next_ring
= &psli
->ring
[psli
->next_ring
];
4480 * if it's not a physical port or if we already send
4481 * clear_la then don't send it.
4483 if ((phba
->link_state
>= LPFC_CLEAR_LA
) ||
4484 (vport
->port_type
!= LPFC_PHYSICAL_PORT
) ||
4485 (phba
->sli_rev
== LPFC_SLI_REV4
))
4488 /* Link up discovery */
4489 if ((mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
)) != NULL
) {
4490 phba
->link_state
= LPFC_CLEAR_LA
;
4491 lpfc_clear_la(phba
, mbox
);
4492 mbox
->mbox_cmpl
= lpfc_mbx_cmpl_clear_la
;
4493 mbox
->vport
= vport
;
4494 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
4495 if (rc
== MBX_NOT_FINISHED
) {
4496 mempool_free(mbox
, phba
->mbox_mem_pool
);
4497 lpfc_disc_flush_list(vport
);
4498 extra_ring
->flag
&= ~LPFC_STOP_IOCB_EVENT
;
4499 fcp_ring
->flag
&= ~LPFC_STOP_IOCB_EVENT
;
4500 next_ring
->flag
&= ~LPFC_STOP_IOCB_EVENT
;
4501 phba
->link_state
= LPFC_HBA_ERROR
;
4506 /* Reg_vpi to tell firmware to resume normal operations */
4508 lpfc_issue_reg_vpi(struct lpfc_hba
*phba
, struct lpfc_vport
*vport
)
4510 LPFC_MBOXQ_t
*regvpimbox
;
4512 regvpimbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
4514 lpfc_reg_vpi(vport
, regvpimbox
);
4515 regvpimbox
->mbox_cmpl
= lpfc_mbx_cmpl_reg_vpi
;
4516 regvpimbox
->vport
= vport
;
4517 if (lpfc_sli_issue_mbox(phba
, regvpimbox
, MBX_NOWAIT
)
4518 == MBX_NOT_FINISHED
) {
4519 mempool_free(regvpimbox
, phba
->mbox_mem_pool
);
4524 /* Start Link up / RSCN discovery on NPR nodes */
4526 lpfc_disc_start(struct lpfc_vport
*vport
)
4528 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4529 struct lpfc_hba
*phba
= vport
->phba
;
4531 uint32_t clear_la_pending
;
4534 if (!lpfc_is_link_up(phba
))
4537 if (phba
->link_state
== LPFC_CLEAR_LA
)
4538 clear_la_pending
= 1;
4540 clear_la_pending
= 0;
4542 if (vport
->port_state
< LPFC_VPORT_READY
)
4543 vport
->port_state
= LPFC_DISC_AUTH
;
4545 lpfc_set_disctmo(vport
);
4547 if (vport
->fc_prevDID
== vport
->fc_myDID
)
4552 vport
->fc_prevDID
= vport
->fc_myDID
;
4553 vport
->num_disc_nodes
= 0;
4555 /* Start Discovery state <hba_state> */
4556 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
4557 "0202 Start Discovery hba state x%x "
4558 "Data: x%x x%x x%x\n",
4559 vport
->port_state
, vport
->fc_flag
, vport
->fc_plogi_cnt
,
4560 vport
->fc_adisc_cnt
);
4562 /* First do ADISCs - if any */
4563 num_sent
= lpfc_els_disc_adisc(vport
);
4569 * For SLI3, cmpl_reg_vpi will set port_state to READY, and
4570 * continue discovery.
4572 if ((phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
) &&
4573 !(vport
->fc_flag
& FC_PT2PT
) &&
4574 !(vport
->fc_flag
& FC_RSCN_MODE
) &&
4575 (phba
->sli_rev
< LPFC_SLI_REV4
)) {
4576 lpfc_issue_reg_vpi(phba
, vport
);
4581 * For SLI2, we need to set port_state to READY and continue
4584 if (vport
->port_state
< LPFC_VPORT_READY
&& !clear_la_pending
) {
4585 /* If we get here, there is nothing to ADISC */
4586 if (vport
->port_type
== LPFC_PHYSICAL_PORT
)
4587 lpfc_issue_clear_la(phba
, vport
);
4589 if (!(vport
->fc_flag
& FC_ABORT_DISCOVERY
)) {
4590 vport
->num_disc_nodes
= 0;
4591 /* go thru NPR nodes and issue ELS PLOGIs */
4592 if (vport
->fc_npr_cnt
)
4593 lpfc_els_disc_plogi(vport
);
4595 if (!vport
->num_disc_nodes
) {
4596 spin_lock_irq(shost
->host_lock
);
4597 vport
->fc_flag
&= ~FC_NDISC_ACTIVE
;
4598 spin_unlock_irq(shost
->host_lock
);
4599 lpfc_can_disctmo(vport
);
4602 vport
->port_state
= LPFC_VPORT_READY
;
4604 /* Next do PLOGIs - if any */
4605 num_sent
= lpfc_els_disc_plogi(vport
);
4610 if (vport
->fc_flag
& FC_RSCN_MODE
) {
4611 /* Check to see if more RSCNs came in while we
4612 * were processing this one.
4614 if ((vport
->fc_rscn_id_cnt
== 0) &&
4615 (!(vport
->fc_flag
& FC_RSCN_DISCOVERY
))) {
4616 spin_lock_irq(shost
->host_lock
);
4617 vport
->fc_flag
&= ~FC_RSCN_MODE
;
4618 spin_unlock_irq(shost
->host_lock
);
4619 lpfc_can_disctmo(vport
);
4621 lpfc_els_handle_rscn(vport
);
4628 * Ignore completion for all IOCBs on tx and txcmpl queue for ELS
4629 * ring the match the sppecified nodelist.
4632 lpfc_free_tx(struct lpfc_hba
*phba
, struct lpfc_nodelist
*ndlp
)
4634 LIST_HEAD(completions
);
4635 struct lpfc_sli
*psli
;
4637 struct lpfc_iocbq
*iocb
, *next_iocb
;
4638 struct lpfc_sli_ring
*pring
;
4641 pring
= &psli
->ring
[LPFC_ELS_RING
];
4643 /* Error matching iocb on txq or txcmplq
4644 * First check the txq.
4646 spin_lock_irq(&phba
->hbalock
);
4647 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txq
, list
) {
4648 if (iocb
->context1
!= ndlp
) {
4652 if ((icmd
->ulpCommand
== CMD_ELS_REQUEST64_CR
) ||
4653 (icmd
->ulpCommand
== CMD_XMIT_ELS_RSP64_CX
)) {
4655 list_move_tail(&iocb
->list
, &completions
);
4660 /* Next check the txcmplq */
4661 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txcmplq
, list
) {
4662 if (iocb
->context1
!= ndlp
) {
4666 if (icmd
->ulpCommand
== CMD_ELS_REQUEST64_CR
||
4667 icmd
->ulpCommand
== CMD_XMIT_ELS_RSP64_CX
) {
4668 lpfc_sli_issue_abort_iotag(phba
, pring
, iocb
);
4671 spin_unlock_irq(&phba
->hbalock
);
4673 /* Cancel all the IOCBs from the completions list */
4674 lpfc_sli_cancel_iocbs(phba
, &completions
, IOSTAT_LOCAL_REJECT
,
4679 lpfc_disc_flush_list(struct lpfc_vport
*vport
)
4681 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
4682 struct lpfc_hba
*phba
= vport
->phba
;
4684 if (vport
->fc_plogi_cnt
|| vport
->fc_adisc_cnt
) {
4685 list_for_each_entry_safe(ndlp
, next_ndlp
, &vport
->fc_nodes
,
4687 if (!NLP_CHK_NODE_ACT(ndlp
))
4689 if (ndlp
->nlp_state
== NLP_STE_PLOGI_ISSUE
||
4690 ndlp
->nlp_state
== NLP_STE_ADISC_ISSUE
) {
4691 lpfc_free_tx(phba
, ndlp
);
4698 lpfc_cleanup_discovery_resources(struct lpfc_vport
*vport
)
4700 lpfc_els_flush_rscn(vport
);
4701 lpfc_els_flush_cmd(vport
);
4702 lpfc_disc_flush_list(vport
);
4705 /*****************************************************************************/
4707 * NAME: lpfc_disc_timeout
4709 * FUNCTION: Fibre Channel driver discovery timeout routine.
4711 * EXECUTION ENVIRONMENT: interrupt only
4719 /*****************************************************************************/
4721 lpfc_disc_timeout(unsigned long ptr
)
4723 struct lpfc_vport
*vport
= (struct lpfc_vport
*) ptr
;
4724 struct lpfc_hba
*phba
= vport
->phba
;
4725 uint32_t tmo_posted
;
4726 unsigned long flags
= 0;
4728 if (unlikely(!phba
))
4731 spin_lock_irqsave(&vport
->work_port_lock
, flags
);
4732 tmo_posted
= vport
->work_port_events
& WORKER_DISC_TMO
;
4734 vport
->work_port_events
|= WORKER_DISC_TMO
;
4735 spin_unlock_irqrestore(&vport
->work_port_lock
, flags
);
4738 lpfc_worker_wake_up(phba
);
4743 lpfc_disc_timeout_handler(struct lpfc_vport
*vport
)
4745 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4746 struct lpfc_hba
*phba
= vport
->phba
;
4747 struct lpfc_sli
*psli
= &phba
->sli
;
4748 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
4749 LPFC_MBOXQ_t
*initlinkmbox
;
4750 int rc
, clrlaerr
= 0;
4752 if (!(vport
->fc_flag
& FC_DISC_TMO
))
4755 spin_lock_irq(shost
->host_lock
);
4756 vport
->fc_flag
&= ~FC_DISC_TMO
;
4757 spin_unlock_irq(shost
->host_lock
);
4759 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
4760 "disc timeout: state:x%x rtry:x%x flg:x%x",
4761 vport
->port_state
, vport
->fc_ns_retry
, vport
->fc_flag
);
4763 switch (vport
->port_state
) {
4765 case LPFC_LOCAL_CFG_LINK
:
4766 /* port_state is identically LPFC_LOCAL_CFG_LINK while waiting for
4770 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_DISCOVERY
,
4771 "0221 FAN timeout\n");
4772 /* Start discovery by sending FLOGI, clean up old rpis */
4773 list_for_each_entry_safe(ndlp
, next_ndlp
, &vport
->fc_nodes
,
4775 if (!NLP_CHK_NODE_ACT(ndlp
))
4777 if (ndlp
->nlp_state
!= NLP_STE_NPR_NODE
)
4779 if (ndlp
->nlp_type
& NLP_FABRIC
) {
4780 /* Clean up the ndlp on Fabric connections */
4781 lpfc_drop_node(vport
, ndlp
);
4783 } else if (!(ndlp
->nlp_flag
& NLP_NPR_ADISC
)) {
4784 /* Fail outstanding IO now since device
4785 * is marked for PLOGI.
4787 lpfc_unreg_rpi(vport
, ndlp
);
4790 if (vport
->port_state
!= LPFC_FLOGI
) {
4791 lpfc_initial_flogi(vport
);
4798 /* port_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */
4799 /* Initial FLOGI timeout */
4800 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
4801 "0222 Initial %s timeout\n",
4802 vport
->vpi
? "FDISC" : "FLOGI");
4804 /* Assume no Fabric and go on with discovery.
4805 * Check for outstanding ELS FLOGI to abort.
4808 /* FLOGI failed, so just use loop map to make discovery list */
4809 lpfc_disc_list_loopmap(vport
);
4811 /* Start discovery */
4812 lpfc_disc_start(vport
);
4815 case LPFC_FABRIC_CFG_LINK
:
4816 /* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for
4818 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
4819 "0223 Timeout while waiting for "
4820 "NameServer login\n");
4821 /* Next look for NameServer ndlp */
4822 ndlp
= lpfc_findnode_did(vport
, NameServer_DID
);
4823 if (ndlp
&& NLP_CHK_NODE_ACT(ndlp
))
4824 lpfc_els_abort(phba
, ndlp
);
4826 /* ReStart discovery */
4830 /* Check for wait for NameServer Rsp timeout */
4831 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
4832 "0224 NameServer Query timeout "
4834 vport
->fc_ns_retry
, LPFC_MAX_NS_RETRY
);
4836 if (vport
->fc_ns_retry
< LPFC_MAX_NS_RETRY
) {
4837 /* Try it one more time */
4838 vport
->fc_ns_retry
++;
4839 rc
= lpfc_ns_cmd(vport
, SLI_CTNS_GID_FT
,
4840 vport
->fc_ns_retry
, 0);
4844 vport
->fc_ns_retry
= 0;
4848 * Discovery is over.
4849 * set port_state to PORT_READY if SLI2.
4850 * cmpl_reg_vpi will set port_state to READY for SLI3.
4852 if (phba
->sli_rev
< LPFC_SLI_REV4
) {
4853 if (phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
)
4854 lpfc_issue_reg_vpi(phba
, vport
);
4855 else { /* NPIV Not enabled */
4856 lpfc_issue_clear_la(phba
, vport
);
4857 vport
->port_state
= LPFC_VPORT_READY
;
4861 /* Setup and issue mailbox INITIALIZE LINK command */
4862 initlinkmbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
4863 if (!initlinkmbox
) {
4864 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
4865 "0206 Device Discovery "
4866 "completion error\n");
4867 phba
->link_state
= LPFC_HBA_ERROR
;
4871 lpfc_linkdown(phba
);
4872 lpfc_init_link(phba
, initlinkmbox
, phba
->cfg_topology
,
4873 phba
->cfg_link_speed
);
4874 initlinkmbox
->u
.mb
.un
.varInitLnk
.lipsr_AL_PA
= 0;
4875 initlinkmbox
->vport
= vport
;
4876 initlinkmbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
4877 rc
= lpfc_sli_issue_mbox(phba
, initlinkmbox
, MBX_NOWAIT
);
4878 lpfc_set_loopback_flag(phba
);
4879 if (rc
== MBX_NOT_FINISHED
)
4880 mempool_free(initlinkmbox
, phba
->mbox_mem_pool
);
4884 case LPFC_DISC_AUTH
:
4885 /* Node Authentication timeout */
4886 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
4887 "0227 Node Authentication timeout\n");
4888 lpfc_disc_flush_list(vport
);
4891 * set port_state to PORT_READY if SLI2.
4892 * cmpl_reg_vpi will set port_state to READY for SLI3.
4894 if (phba
->sli_rev
< LPFC_SLI_REV4
) {
4895 if (phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
)
4896 lpfc_issue_reg_vpi(phba
, vport
);
4897 else { /* NPIV Not enabled */
4898 lpfc_issue_clear_la(phba
, vport
);
4899 vport
->port_state
= LPFC_VPORT_READY
;
4904 case LPFC_VPORT_READY
:
4905 if (vport
->fc_flag
& FC_RSCN_MODE
) {
4906 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
4907 "0231 RSCN timeout Data: x%x "
4909 vport
->fc_ns_retry
, LPFC_MAX_NS_RETRY
);
4911 /* Cleanup any outstanding ELS commands */
4912 lpfc_els_flush_cmd(vport
);
4914 lpfc_els_flush_rscn(vport
);
4915 lpfc_disc_flush_list(vport
);
4920 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
4921 "0273 Unexpected discovery timeout, "
4922 "vport State x%x\n", vport
->port_state
);
4926 switch (phba
->link_state
) {
4928 /* CLEAR LA timeout */
4929 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
4930 "0228 CLEAR LA timeout\n");
4935 lpfc_issue_clear_la(phba
, vport
);
4937 case LPFC_LINK_UNKNOWN
:
4938 case LPFC_WARM_START
:
4939 case LPFC_INIT_START
:
4940 case LPFC_INIT_MBX_CMDS
:
4941 case LPFC_LINK_DOWN
:
4942 case LPFC_HBA_ERROR
:
4943 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
4944 "0230 Unexpected timeout, hba link "
4945 "state x%x\n", phba
->link_state
);
4949 case LPFC_HBA_READY
:
4954 lpfc_disc_flush_list(vport
);
4955 psli
->ring
[(psli
->extra_ring
)].flag
&= ~LPFC_STOP_IOCB_EVENT
;
4956 psli
->ring
[(psli
->fcp_ring
)].flag
&= ~LPFC_STOP_IOCB_EVENT
;
4957 psli
->ring
[(psli
->next_ring
)].flag
&= ~LPFC_STOP_IOCB_EVENT
;
4958 vport
->port_state
= LPFC_VPORT_READY
;
4965 * This routine handles processing a NameServer REG_LOGIN mailbox
4966 * command upon completion. It is setup in the LPFC_MBOXQ
4967 * as the completion routine when the command is
4968 * handed off to the SLI layer.
4971 lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
4973 MAILBOX_t
*mb
= &pmb
->u
.mb
;
4974 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
4975 struct lpfc_nodelist
*ndlp
= (struct lpfc_nodelist
*) pmb
->context2
;
4976 struct lpfc_vport
*vport
= pmb
->vport
;
4978 pmb
->context1
= NULL
;
4979 pmb
->context2
= NULL
;
4981 ndlp
->nlp_rpi
= mb
->un
.varWords
[0];
4982 ndlp
->nlp_flag
|= NLP_RPI_VALID
;
4983 ndlp
->nlp_type
|= NLP_FABRIC
;
4984 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNMAPPED_NODE
);
4987 * Start issuing Fabric-Device Management Interface (FDMI) command to
4988 * 0xfffffa (FDMI well known port) or Delay issuing FDMI command if
4989 * fdmi-on=2 (supporting RPA/hostnmae)
4992 if (vport
->cfg_fdmi_on
== 1)
4993 lpfc_fdmi_cmd(vport
, ndlp
, SLI_MGMT_DHBA
);
4995 mod_timer(&vport
->fc_fdmitmo
, jiffies
+ HZ
* 60);
4997 /* decrement the node reference count held for this callback
5001 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
5003 mempool_free(pmb
, phba
->mbox_mem_pool
);
5009 lpfc_filter_by_rpi(struct lpfc_nodelist
*ndlp
, void *param
)
5011 uint16_t *rpi
= param
;
5013 return ndlp
->nlp_rpi
== *rpi
;
5017 lpfc_filter_by_wwpn(struct lpfc_nodelist
*ndlp
, void *param
)
5019 return memcmp(&ndlp
->nlp_portname
, param
,
5020 sizeof(ndlp
->nlp_portname
)) == 0;
5023 static struct lpfc_nodelist
*
5024 __lpfc_find_node(struct lpfc_vport
*vport
, node_filter filter
, void *param
)
5026 struct lpfc_nodelist
*ndlp
;
5028 list_for_each_entry(ndlp
, &vport
->fc_nodes
, nlp_listp
) {
5029 if (filter(ndlp
, param
))
5036 * This routine looks up the ndlp lists for the given RPI. If rpi found it
5037 * returns the node list element pointer else return NULL.
5039 struct lpfc_nodelist
*
5040 __lpfc_findnode_rpi(struct lpfc_vport
*vport
, uint16_t rpi
)
5042 return __lpfc_find_node(vport
, lpfc_filter_by_rpi
, &rpi
);
5046 * This routine looks up the ndlp lists for the given WWPN. If WWPN found it
5047 * returns the node element list pointer else return NULL.
5049 struct lpfc_nodelist
*
5050 lpfc_findnode_wwpn(struct lpfc_vport
*vport
, struct lpfc_name
*wwpn
)
5052 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
5053 struct lpfc_nodelist
*ndlp
;
5055 spin_lock_irq(shost
->host_lock
);
5056 ndlp
= __lpfc_find_node(vport
, lpfc_filter_by_wwpn
, wwpn
);
5057 spin_unlock_irq(shost
->host_lock
);
5062 lpfc_nlp_init(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
5065 memset(ndlp
, 0, sizeof (struct lpfc_nodelist
));
5067 lpfc_initialize_node(vport
, ndlp
, did
);
5068 INIT_LIST_HEAD(&ndlp
->nlp_listp
);
5070 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_NODE
,
5071 "node init: did:x%x",
5072 ndlp
->nlp_DID
, 0, 0);
5077 /* This routine releases all resources associated with a specifc NPort's ndlp
5078 * and mempool_free's the nodelist.
5081 lpfc_nlp_release(struct kref
*kref
)
5083 struct lpfc_hba
*phba
;
5084 unsigned long flags
;
5085 struct lpfc_nodelist
*ndlp
= container_of(kref
, struct lpfc_nodelist
,
5088 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_NODE
,
5089 "node release: did:x%x flg:x%x type:x%x",
5090 ndlp
->nlp_DID
, ndlp
->nlp_flag
, ndlp
->nlp_type
);
5092 lpfc_printf_vlog(ndlp
->vport
, KERN_INFO
, LOG_NODE
,
5093 "0279 lpfc_nlp_release: ndlp:x%p "
5094 "usgmap:x%x refcnt:%d\n",
5095 (void *)ndlp
, ndlp
->nlp_usg_map
,
5096 atomic_read(&ndlp
->kref
.refcount
));
5098 /* remove ndlp from action. */
5099 lpfc_nlp_remove(ndlp
->vport
, ndlp
);
5101 /* clear the ndlp active flag for all release cases */
5103 spin_lock_irqsave(&phba
->ndlp_lock
, flags
);
5104 NLP_CLR_NODE_ACT(ndlp
);
5105 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
5107 /* free ndlp memory for final ndlp release */
5108 if (NLP_CHK_FREE_REQ(ndlp
)) {
5109 kfree(ndlp
->lat_data
);
5110 mempool_free(ndlp
, ndlp
->phba
->nlp_mem_pool
);
5114 /* This routine bumps the reference count for a ndlp structure to ensure
5115 * that one discovery thread won't free a ndlp while another discovery thread
5118 struct lpfc_nodelist
*
5119 lpfc_nlp_get(struct lpfc_nodelist
*ndlp
)
5121 struct lpfc_hba
*phba
;
5122 unsigned long flags
;
5125 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_NODE
,
5126 "node get: did:x%x flg:x%x refcnt:x%x",
5127 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
5128 atomic_read(&ndlp
->kref
.refcount
));
5129 /* The check of ndlp usage to prevent incrementing the
5130 * ndlp reference count that is in the process of being
5134 spin_lock_irqsave(&phba
->ndlp_lock
, flags
);
5135 if (!NLP_CHK_NODE_ACT(ndlp
) || NLP_CHK_FREE_ACK(ndlp
)) {
5136 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
5137 lpfc_printf_vlog(ndlp
->vport
, KERN_WARNING
, LOG_NODE
,
5138 "0276 lpfc_nlp_get: ndlp:x%p "
5139 "usgmap:x%x refcnt:%d\n",
5140 (void *)ndlp
, ndlp
->nlp_usg_map
,
5141 atomic_read(&ndlp
->kref
.refcount
));
5144 kref_get(&ndlp
->kref
);
5145 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
5150 /* This routine decrements the reference count for a ndlp structure. If the
5151 * count goes to 0, this indicates the the associated nodelist should be
5152 * freed. Returning 1 indicates the ndlp resource has been released; on the
5153 * other hand, returning 0 indicates the ndlp resource has not been released
5157 lpfc_nlp_put(struct lpfc_nodelist
*ndlp
)
5159 struct lpfc_hba
*phba
;
5160 unsigned long flags
;
5165 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_NODE
,
5166 "node put: did:x%x flg:x%x refcnt:x%x",
5167 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
5168 atomic_read(&ndlp
->kref
.refcount
));
5170 spin_lock_irqsave(&phba
->ndlp_lock
, flags
);
5171 /* Check the ndlp memory free acknowledge flag to avoid the
5172 * possible race condition that kref_put got invoked again
5173 * after previous one has done ndlp memory free.
5175 if (NLP_CHK_FREE_ACK(ndlp
)) {
5176 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
5177 lpfc_printf_vlog(ndlp
->vport
, KERN_WARNING
, LOG_NODE
,
5178 "0274 lpfc_nlp_put: ndlp:x%p "
5179 "usgmap:x%x refcnt:%d\n",
5180 (void *)ndlp
, ndlp
->nlp_usg_map
,
5181 atomic_read(&ndlp
->kref
.refcount
));
5184 /* Check the ndlp inactivate log flag to avoid the possible
5185 * race condition that kref_put got invoked again after ndlp
5186 * is already in inactivating state.
5188 if (NLP_CHK_IACT_REQ(ndlp
)) {
5189 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
5190 lpfc_printf_vlog(ndlp
->vport
, KERN_WARNING
, LOG_NODE
,
5191 "0275 lpfc_nlp_put: ndlp:x%p "
5192 "usgmap:x%x refcnt:%d\n",
5193 (void *)ndlp
, ndlp
->nlp_usg_map
,
5194 atomic_read(&ndlp
->kref
.refcount
));
5197 /* For last put, mark the ndlp usage flags to make sure no
5198 * other kref_get and kref_put on the same ndlp shall get
5199 * in between the process when the final kref_put has been
5200 * invoked on this ndlp.
5202 if (atomic_read(&ndlp
->kref
.refcount
) == 1) {
5203 /* Indicate ndlp is put to inactive state. */
5204 NLP_SET_IACT_REQ(ndlp
);
5205 /* Acknowledge ndlp memory free has been seen. */
5206 if (NLP_CHK_FREE_REQ(ndlp
))
5207 NLP_SET_FREE_ACK(ndlp
);
5209 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
5210 /* Note, the kref_put returns 1 when decrementing a reference
5211 * count that was 1, it invokes the release callback function,
5212 * but it still left the reference count as 1 (not actually
5213 * performs the last decrementation). Otherwise, it actually
5214 * decrements the reference count and returns 0.
5216 return kref_put(&ndlp
->kref
, lpfc_nlp_release
);
5219 /* This routine free's the specified nodelist if it is not in use
5220 * by any other discovery thread. This routine returns 1 if the
5221 * ndlp has been freed. A return value of 0 indicates the ndlp is
5222 * not yet been released.
5225 lpfc_nlp_not_used(struct lpfc_nodelist
*ndlp
)
5227 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_NODE
,
5228 "node not used: did:x%x flg:x%x refcnt:x%x",
5229 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
5230 atomic_read(&ndlp
->kref
.refcount
));
5231 if (atomic_read(&ndlp
->kref
.refcount
) == 1)
5232 if (lpfc_nlp_put(ndlp
))
5238 * lpfc_fcf_inuse - Check if FCF can be unregistered.
5239 * @phba: Pointer to hba context object.
5241 * This function iterate through all FC nodes associated
5242 * will all vports to check if there is any node with
5243 * fc_rports associated with it. If there is an fc_rport
5244 * associated with the node, then the node is either in
5245 * discovered state or its devloss_timer is pending.
5248 lpfc_fcf_inuse(struct lpfc_hba
*phba
)
5250 struct lpfc_vport
**vports
;
5252 struct lpfc_nodelist
*ndlp
;
5253 struct Scsi_Host
*shost
;
5255 vports
= lpfc_create_vport_work_array(phba
);
5257 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
5258 shost
= lpfc_shost_from_vport(vports
[i
]);
5259 spin_lock_irq(shost
->host_lock
);
5260 list_for_each_entry(ndlp
, &vports
[i
]->fc_nodes
, nlp_listp
) {
5261 if (NLP_CHK_NODE_ACT(ndlp
) && ndlp
->rport
&&
5262 (ndlp
->rport
->roles
& FC_RPORT_ROLE_FCP_TARGET
)) {
5264 spin_unlock_irq(shost
->host_lock
);
5267 lpfc_printf_log(phba
, KERN_INFO
, LOG_ELS
,
5268 "2624 RPI %x DID %x flg %x still "
5270 ndlp
->nlp_rpi
, ndlp
->nlp_DID
,
5272 if (ndlp
->nlp_flag
& NLP_RPI_VALID
)
5276 spin_unlock_irq(shost
->host_lock
);
5279 lpfc_destroy_vport_work_array(phba
, vports
);
5284 * lpfc_unregister_vfi_cmpl - Completion handler for unreg vfi.
5285 * @phba: Pointer to hba context object.
5286 * @mboxq: Pointer to mailbox object.
5288 * This function frees memory associated with the mailbox command.
5291 lpfc_unregister_vfi_cmpl(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
5293 struct lpfc_vport
*vport
= mboxq
->vport
;
5294 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
5296 if (mboxq
->u
.mb
.mbxStatus
) {
5297 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
|LOG_MBOX
,
5298 "2555 UNREG_VFI mbxStatus error x%x "
5300 mboxq
->u
.mb
.mbxStatus
, vport
->port_state
);
5302 spin_lock_irq(shost
->host_lock
);
5303 phba
->pport
->fc_flag
&= ~FC_VFI_REGISTERED
;
5304 spin_unlock_irq(shost
->host_lock
);
5305 mempool_free(mboxq
, phba
->mbox_mem_pool
);
5310 * lpfc_unregister_fcfi_cmpl - Completion handler for unreg fcfi.
5311 * @phba: Pointer to hba context object.
5312 * @mboxq: Pointer to mailbox object.
5314 * This function frees memory associated with the mailbox command.
5317 lpfc_unregister_fcfi_cmpl(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
5319 struct lpfc_vport
*vport
= mboxq
->vport
;
5321 if (mboxq
->u
.mb
.mbxStatus
) {
5322 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
|LOG_MBOX
,
5323 "2550 UNREG_FCFI mbxStatus error x%x "
5325 mboxq
->u
.mb
.mbxStatus
, vport
->port_state
);
5327 mempool_free(mboxq
, phba
->mbox_mem_pool
);
5332 * lpfc_unregister_fcf_prep - Unregister fcf record preparation
5333 * @phba: Pointer to hba context object.
5335 * This function prepare the HBA for unregistering the currently registered
5336 * FCF from the HBA. It performs unregistering, in order, RPIs, VPIs, and
5340 lpfc_unregister_fcf_prep(struct lpfc_hba
*phba
)
5343 struct lpfc_vport
**vports
;
5344 struct lpfc_nodelist
*ndlp
;
5345 struct Scsi_Host
*shost
;
5348 /* Unregister RPIs */
5349 if (lpfc_fcf_inuse(phba
))
5350 lpfc_unreg_hba_rpis(phba
);
5352 /* At this point, all discovery is aborted */
5353 phba
->pport
->port_state
= LPFC_VPORT_UNKNOWN
;
5355 /* Unregister VPIs */
5356 vports
= lpfc_create_vport_work_array(phba
);
5357 if (vports
&& (phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
))
5358 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
5359 /* Stop FLOGI/FDISC retries */
5360 ndlp
= lpfc_findnode_did(vports
[i
], Fabric_DID
);
5362 lpfc_cancel_retry_delay_tmo(vports
[i
], ndlp
);
5363 lpfc_cleanup_pending_mbox(vports
[i
]);
5364 if (phba
->sli_rev
== LPFC_SLI_REV4
)
5365 lpfc_sli4_unreg_all_rpis(vports
[i
]);
5366 lpfc_mbx_unreg_vpi(vports
[i
]);
5367 shost
= lpfc_shost_from_vport(vports
[i
]);
5368 spin_lock_irq(shost
->host_lock
);
5369 vports
[i
]->fc_flag
|= FC_VPORT_NEEDS_INIT_VPI
;
5370 vports
[i
]->vpi_state
&= ~LPFC_VPI_REGISTERED
;
5371 spin_unlock_irq(shost
->host_lock
);
5373 lpfc_destroy_vport_work_array(phba
, vports
);
5375 /* Cleanup any outstanding ELS commands */
5376 lpfc_els_flush_all_cmd(phba
);
5378 /* Unregister VFI */
5379 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
5381 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
|LOG_MBOX
,
5382 "2556 UNREG_VFI mbox allocation failed"
5383 "HBA state x%x\n", phba
->pport
->port_state
);
5387 lpfc_unreg_vfi(mbox
, phba
->pport
);
5388 mbox
->vport
= phba
->pport
;
5389 mbox
->mbox_cmpl
= lpfc_unregister_vfi_cmpl
;
5391 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
5392 if (rc
== MBX_NOT_FINISHED
) {
5393 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
|LOG_MBOX
,
5394 "2557 UNREG_VFI issue mbox failed rc x%x "
5396 rc
, phba
->pport
->port_state
);
5397 mempool_free(mbox
, phba
->mbox_mem_pool
);
5401 shost
= lpfc_shost_from_vport(phba
->pport
);
5402 spin_lock_irq(shost
->host_lock
);
5403 phba
->pport
->fc_flag
&= ~FC_VFI_REGISTERED
;
5404 spin_unlock_irq(shost
->host_lock
);
5410 * lpfc_sli4_unregister_fcf - Unregister currently registered FCF record
5411 * @phba: Pointer to hba context object.
5413 * This function issues synchronous unregister FCF mailbox command to HBA to
5414 * unregister the currently registered FCF record. The driver does not reset
5415 * the driver FCF usage state flags.
5417 * Return 0 if successfully issued, none-zero otherwise.
5420 lpfc_sli4_unregister_fcf(struct lpfc_hba
*phba
)
5425 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
5427 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
|LOG_MBOX
,
5428 "2551 UNREG_FCFI mbox allocation failed"
5429 "HBA state x%x\n", phba
->pport
->port_state
);
5432 lpfc_unreg_fcfi(mbox
, phba
->fcf
.fcfi
);
5433 mbox
->vport
= phba
->pport
;
5434 mbox
->mbox_cmpl
= lpfc_unregister_fcfi_cmpl
;
5435 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
5437 if (rc
== MBX_NOT_FINISHED
) {
5438 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
5439 "2552 Unregister FCFI command failed rc x%x "
5441 rc
, phba
->pport
->port_state
);
5448 * lpfc_unregister_fcf_rescan - Unregister currently registered fcf and rescan
5449 * @phba: Pointer to hba context object.
5451 * This function unregisters the currently reigstered FCF. This function
5452 * also tries to find another FCF for discovery by rescan the HBA FCF table.
5455 lpfc_unregister_fcf_rescan(struct lpfc_hba
*phba
)
5459 /* Preparation for unregistering fcf */
5460 rc
= lpfc_unregister_fcf_prep(phba
);
5462 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
,
5463 "2748 Failed to prepare for unregistering "
5464 "HBA's FCF record: rc=%d\n", rc
);
5468 /* Now, unregister FCF record and reset HBA FCF state */
5469 rc
= lpfc_sli4_unregister_fcf(phba
);
5472 /* Reset HBA FCF states after successful unregister FCF */
5473 phba
->fcf
.fcf_flag
= 0;
5474 phba
->fcf
.current_rec
.flag
= 0;
5477 * If driver is not unloading, check if there is any other
5478 * FCF record that can be used for discovery.
5480 if ((phba
->pport
->load_flag
& FC_UNLOADING
) ||
5481 (phba
->link_state
< LPFC_LINK_UP
))
5484 /* This is considered as the initial FCF discovery scan */
5485 spin_lock_irq(&phba
->hbalock
);
5486 phba
->fcf
.fcf_flag
|= FCF_INIT_DISC
;
5487 spin_unlock_irq(&phba
->hbalock
);
5489 /* Reset FCF roundrobin bmask for new discovery */
5490 memset(phba
->fcf
.fcf_rr_bmask
, 0, sizeof(*phba
->fcf
.fcf_rr_bmask
));
5492 rc
= lpfc_sli4_fcf_scan_read_fcf_rec(phba
, LPFC_FCOE_FCF_GET_FIRST
);
5495 spin_lock_irq(&phba
->hbalock
);
5496 phba
->fcf
.fcf_flag
&= ~FCF_INIT_DISC
;
5497 spin_unlock_irq(&phba
->hbalock
);
5498 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
|LOG_MBOX
,
5499 "2553 lpfc_unregister_unused_fcf failed "
5500 "to read FCF record HBA state x%x\n",
5501 phba
->pport
->port_state
);
5506 * lpfc_unregister_fcf - Unregister the currently registered fcf record
5507 * @phba: Pointer to hba context object.
5509 * This function just unregisters the currently reigstered FCF. It does not
5510 * try to find another FCF for discovery.
5513 lpfc_unregister_fcf(struct lpfc_hba
*phba
)
5517 /* Preparation for unregistering fcf */
5518 rc
= lpfc_unregister_fcf_prep(phba
);
5520 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
,
5521 "2749 Failed to prepare for unregistering "
5522 "HBA's FCF record: rc=%d\n", rc
);
5526 /* Now, unregister FCF record and reset HBA FCF state */
5527 rc
= lpfc_sli4_unregister_fcf(phba
);
5530 /* Set proper HBA FCF states after successful unregister FCF */
5531 spin_lock_irq(&phba
->hbalock
);
5532 phba
->fcf
.fcf_flag
&= ~FCF_REGISTERED
;
5533 spin_unlock_irq(&phba
->hbalock
);
5537 * lpfc_unregister_unused_fcf - Unregister FCF if all devices are disconnected.
5538 * @phba: Pointer to hba context object.
5540 * This function check if there are any connected remote port for the FCF and
5541 * if all the devices are disconnected, this function unregister FCFI.
5542 * This function also tries to use another FCF for discovery.
5545 lpfc_unregister_unused_fcf(struct lpfc_hba
*phba
)
5548 * If HBA is not running in FIP mode, if HBA does not support
5549 * FCoE, if FCF discovery is ongoing, or if FCF has not been
5550 * registered, do nothing.
5552 spin_lock_irq(&phba
->hbalock
);
5553 if (!(phba
->hba_flag
& HBA_FCOE_SUPPORT
) ||
5554 !(phba
->fcf
.fcf_flag
& FCF_REGISTERED
) ||
5555 !(phba
->hba_flag
& HBA_FIP_SUPPORT
) ||
5556 (phba
->fcf
.fcf_flag
& FCF_DISCOVERY
) ||
5557 (phba
->pport
->port_state
== LPFC_FLOGI
)) {
5558 spin_unlock_irq(&phba
->hbalock
);
5561 spin_unlock_irq(&phba
->hbalock
);
5563 if (lpfc_fcf_inuse(phba
))
5566 lpfc_unregister_fcf_rescan(phba
);
5570 * lpfc_read_fcf_conn_tbl - Create driver FCF connection table.
5571 * @phba: Pointer to hba context object.
5572 * @buff: Buffer containing the FCF connection table as in the config
5574 * This function create driver data structure for the FCF connection
5575 * record table read from config region 23.
5578 lpfc_read_fcf_conn_tbl(struct lpfc_hba
*phba
,
5581 struct lpfc_fcf_conn_entry
*conn_entry
, *next_conn_entry
;
5582 struct lpfc_fcf_conn_hdr
*conn_hdr
;
5583 struct lpfc_fcf_conn_rec
*conn_rec
;
5584 uint32_t record_count
;
5587 /* Free the current connect table */
5588 list_for_each_entry_safe(conn_entry
, next_conn_entry
,
5589 &phba
->fcf_conn_rec_list
, list
) {
5590 list_del_init(&conn_entry
->list
);
5594 conn_hdr
= (struct lpfc_fcf_conn_hdr
*) buff
;
5595 record_count
= conn_hdr
->length
* sizeof(uint32_t)/
5596 sizeof(struct lpfc_fcf_conn_rec
);
5598 conn_rec
= (struct lpfc_fcf_conn_rec
*)
5599 (buff
+ sizeof(struct lpfc_fcf_conn_hdr
));
5601 for (i
= 0; i
< record_count
; i
++) {
5602 if (!(conn_rec
[i
].flags
& FCFCNCT_VALID
))
5604 conn_entry
= kzalloc(sizeof(struct lpfc_fcf_conn_entry
),
5607 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
5608 "2566 Failed to allocate connection"
5613 memcpy(&conn_entry
->conn_rec
, &conn_rec
[i
],
5614 sizeof(struct lpfc_fcf_conn_rec
));
5615 conn_entry
->conn_rec
.vlan_tag
=
5616 le16_to_cpu(conn_entry
->conn_rec
.vlan_tag
) & 0xFFF;
5617 conn_entry
->conn_rec
.flags
=
5618 le16_to_cpu(conn_entry
->conn_rec
.flags
);
5619 list_add_tail(&conn_entry
->list
,
5620 &phba
->fcf_conn_rec_list
);
5625 * lpfc_read_fcoe_param - Read FCoe parameters from conf region..
5626 * @phba: Pointer to hba context object.
5627 * @buff: Buffer containing the FCoE parameter data structure.
5629 * This function update driver data structure with config
5630 * parameters read from config region 23.
5633 lpfc_read_fcoe_param(struct lpfc_hba
*phba
,
5636 struct lpfc_fip_param_hdr
*fcoe_param_hdr
;
5637 struct lpfc_fcoe_params
*fcoe_param
;
5639 fcoe_param_hdr
= (struct lpfc_fip_param_hdr
*)
5641 fcoe_param
= (struct lpfc_fcoe_params
*)
5642 (buff
+ sizeof(struct lpfc_fip_param_hdr
));
5644 if ((fcoe_param_hdr
->parm_version
!= FIPP_VERSION
) ||
5645 (fcoe_param_hdr
->length
!= FCOE_PARAM_LENGTH
))
5648 if (fcoe_param_hdr
->parm_flags
& FIPP_VLAN_VALID
) {
5649 phba
->valid_vlan
= 1;
5650 phba
->vlan_id
= le16_to_cpu(fcoe_param
->vlan_tag
) &
5654 phba
->fc_map
[0] = fcoe_param
->fc_map
[0];
5655 phba
->fc_map
[1] = fcoe_param
->fc_map
[1];
5656 phba
->fc_map
[2] = fcoe_param
->fc_map
[2];
5661 * lpfc_get_rec_conf23 - Get a record type in config region data.
5662 * @buff: Buffer containing config region 23 data.
5663 * @size: Size of the data buffer.
5664 * @rec_type: Record type to be searched.
5666 * This function searches config region data to find the begining
5667 * of the record specified by record_type. If record found, this
5668 * function return pointer to the record else return NULL.
5671 lpfc_get_rec_conf23(uint8_t *buff
, uint32_t size
, uint8_t rec_type
)
5673 uint32_t offset
= 0, rec_length
;
5675 if ((buff
[0] == LPFC_REGION23_LAST_REC
) ||
5676 (size
< sizeof(uint32_t)))
5679 rec_length
= buff
[offset
+ 1];
5682 * One TLV record has one word header and number of data words
5683 * specified in the rec_length field of the record header.
5685 while ((offset
+ rec_length
* sizeof(uint32_t) + sizeof(uint32_t))
5687 if (buff
[offset
] == rec_type
)
5688 return &buff
[offset
];
5690 if (buff
[offset
] == LPFC_REGION23_LAST_REC
)
5693 offset
+= rec_length
* sizeof(uint32_t) + sizeof(uint32_t);
5694 rec_length
= buff
[offset
+ 1];
5700 * lpfc_parse_fcoe_conf - Parse FCoE config data read from config region 23.
5701 * @phba: Pointer to lpfc_hba data structure.
5702 * @buff: Buffer containing config region 23 data.
5703 * @size: Size of the data buffer.
5705 * This function parses the FCoE config parameters in config region 23 and
5706 * populate driver data structure with the parameters.
5709 lpfc_parse_fcoe_conf(struct lpfc_hba
*phba
,
5713 uint32_t offset
= 0, rec_length
;
5717 * If data size is less than 2 words signature and version cannot be
5720 if (size
< 2*sizeof(uint32_t))
5723 /* Check the region signature first */
5724 if (memcmp(buff
, LPFC_REGION23_SIGNATURE
, 4)) {
5725 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
5726 "2567 Config region 23 has bad signature\n");
5732 /* Check the data structure version */
5733 if (buff
[offset
] != LPFC_REGION23_VERSION
) {
5734 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
5735 "2568 Config region 23 has bad version\n");
5740 rec_length
= buff
[offset
+ 1];
5742 /* Read FCoE param record */
5743 rec_ptr
= lpfc_get_rec_conf23(&buff
[offset
],
5744 size
- offset
, FCOE_PARAM_TYPE
);
5746 lpfc_read_fcoe_param(phba
, rec_ptr
);
5748 /* Read FCF connection table */
5749 rec_ptr
= lpfc_get_rec_conf23(&buff
[offset
],
5750 size
- offset
, FCOE_CONN_TBL_TYPE
);
5752 lpfc_read_fcf_conn_tbl(phba
, rec_ptr
);