1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
22 #include <linux/blkdev.h>
23 #include <linux/pci.h>
24 #include <linux/kthread.h>
25 #include <linux/interrupt.h>
27 #include <scsi/scsi.h>
28 #include <scsi/scsi_device.h>
29 #include <scsi/scsi_host.h>
30 #include <scsi/scsi_transport_fc.h>
35 #include "lpfc_disc.h"
37 #include "lpfc_sli4.h"
38 #include "lpfc_scsi.h"
40 #include "lpfc_logmsg.h"
41 #include "lpfc_crtn.h"
42 #include "lpfc_vport.h"
43 #include "lpfc_debugfs.h"
45 /* AlpaArray for assignment of scsid for scan-down and bind_method */
46 static uint8_t lpfcAlpaArray
[] = {
47 0xEF, 0xE8, 0xE4, 0xE2, 0xE1, 0xE0, 0xDC, 0xDA, 0xD9, 0xD6,
48 0xD5, 0xD4, 0xD3, 0xD2, 0xD1, 0xCE, 0xCD, 0xCC, 0xCB, 0xCA,
49 0xC9, 0xC7, 0xC6, 0xC5, 0xC3, 0xBC, 0xBA, 0xB9, 0xB6, 0xB5,
50 0xB4, 0xB3, 0xB2, 0xB1, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9,
51 0xA7, 0xA6, 0xA5, 0xA3, 0x9F, 0x9E, 0x9D, 0x9B, 0x98, 0x97,
52 0x90, 0x8F, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7C, 0x7A, 0x79,
53 0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6E, 0x6D, 0x6C, 0x6B,
54 0x6A, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5C, 0x5A, 0x59, 0x56,
55 0x55, 0x54, 0x53, 0x52, 0x51, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A,
56 0x49, 0x47, 0x46, 0x45, 0x43, 0x3C, 0x3A, 0x39, 0x36, 0x35,
57 0x34, 0x33, 0x32, 0x31, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29,
58 0x27, 0x26, 0x25, 0x23, 0x1F, 0x1E, 0x1D, 0x1B, 0x18, 0x17,
59 0x10, 0x0F, 0x08, 0x04, 0x02, 0x01
62 static void lpfc_disc_timeout_handler(struct lpfc_vport
*);
63 static void lpfc_disc_flush_list(struct lpfc_vport
*vport
);
64 static void lpfc_unregister_fcfi_cmpl(struct lpfc_hba
*, LPFC_MBOXQ_t
*);
67 lpfc_terminate_rport_io(struct fc_rport
*rport
)
69 struct lpfc_rport_data
*rdata
;
70 struct lpfc_nodelist
* ndlp
;
71 struct lpfc_hba
*phba
;
73 rdata
= rport
->dd_data
;
76 if (!ndlp
|| !NLP_CHK_NODE_ACT(ndlp
)) {
77 if (rport
->roles
& FC_RPORT_ROLE_FCP_TARGET
)
78 printk(KERN_ERR
"Cannot find remote node"
79 " to terminate I/O Data x%x\n",
86 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_RPORT
,
87 "rport terminate: sid:x%x did:x%x flg:x%x",
88 ndlp
->nlp_sid
, ndlp
->nlp_DID
, ndlp
->nlp_flag
);
90 if (ndlp
->nlp_sid
!= NLP_NO_SID
) {
91 lpfc_sli_abort_iocb(ndlp
->vport
,
92 &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
93 ndlp
->nlp_sid
, 0, LPFC_CTX_TGT
);
98 * This function will be called when dev_loss_tmo fire.
101 lpfc_dev_loss_tmo_callbk(struct fc_rport
*rport
)
103 struct lpfc_rport_data
*rdata
;
104 struct lpfc_nodelist
* ndlp
;
105 struct lpfc_vport
*vport
;
106 struct lpfc_hba
*phba
;
107 struct lpfc_work_evt
*evtp
;
111 rdata
= rport
->dd_data
;
113 if (!ndlp
|| !NLP_CHK_NODE_ACT(ndlp
))
119 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_RPORT
,
120 "rport devlosscb: sid:x%x did:x%x flg:x%x",
121 ndlp
->nlp_sid
, ndlp
->nlp_DID
, ndlp
->nlp_flag
);
123 /* Don't defer this if we are in the process of deleting the vport
124 * or unloading the driver. The unload will cleanup the node
125 * appropriately we just need to cleanup the ndlp rport info here.
127 if (vport
->load_flag
& FC_UNLOADING
) {
128 put_node
= rdata
->pnode
!= NULL
;
129 put_rport
= ndlp
->rport
!= NULL
;
135 put_device(&rport
->dev
);
139 if (ndlp
->nlp_state
== NLP_STE_MAPPED_NODE
)
142 evtp
= &ndlp
->dev_loss_evt
;
144 if (!list_empty(&evtp
->evt_listp
))
147 spin_lock_irq(&phba
->hbalock
);
148 /* We need to hold the node by incrementing the reference
149 * count until this queued work is done
151 evtp
->evt_arg1
= lpfc_nlp_get(ndlp
);
152 if (evtp
->evt_arg1
) {
153 evtp
->evt
= LPFC_EVT_DEV_LOSS
;
154 list_add_tail(&evtp
->evt_listp
, &phba
->work_list
);
155 lpfc_worker_wake_up(phba
);
157 spin_unlock_irq(&phba
->hbalock
);
163 * This function is called from the worker thread when dev_loss_tmo
167 lpfc_dev_loss_tmo_handler(struct lpfc_nodelist
*ndlp
)
169 struct lpfc_rport_data
*rdata
;
170 struct fc_rport
*rport
;
171 struct lpfc_vport
*vport
;
172 struct lpfc_hba
*phba
;
183 rdata
= rport
->dd_data
;
184 name
= (uint8_t *) &ndlp
->nlp_portname
;
188 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_RPORT
,
189 "rport devlosstmo:did:x%x type:x%x id:x%x",
190 ndlp
->nlp_DID
, ndlp
->nlp_type
, rport
->scsi_target_id
);
192 /* Don't defer this if we are in the process of deleting the vport
193 * or unloading the driver. The unload will cleanup the node
194 * appropriately we just need to cleanup the ndlp rport info here.
196 if (vport
->load_flag
& FC_UNLOADING
) {
197 if (ndlp
->nlp_sid
!= NLP_NO_SID
) {
198 /* flush the target */
199 lpfc_sli_abort_iocb(vport
,
200 &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
201 ndlp
->nlp_sid
, 0, LPFC_CTX_TGT
);
203 put_node
= rdata
->pnode
!= NULL
;
204 put_rport
= ndlp
->rport
!= NULL
;
210 put_device(&rport
->dev
);
214 if (ndlp
->nlp_state
== NLP_STE_MAPPED_NODE
) {
215 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
216 "0284 Devloss timeout Ignored on "
217 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
219 *name
, *(name
+1), *(name
+2), *(name
+3),
220 *(name
+4), *(name
+5), *(name
+6), *(name
+7),
225 if (ndlp
->nlp_type
& NLP_FABRIC
) {
226 /* We will clean up these Nodes in linkup */
227 put_node
= rdata
->pnode
!= NULL
;
228 put_rport
= ndlp
->rport
!= NULL
;
234 put_device(&rport
->dev
);
238 if (ndlp
->nlp_sid
!= NLP_NO_SID
) {
240 /* flush the target */
241 lpfc_sli_abort_iocb(vport
, &phba
->sli
.ring
[phba
->sli
.fcp_ring
],
242 ndlp
->nlp_sid
, 0, LPFC_CTX_TGT
);
246 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
247 "0203 Devloss timeout on "
248 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
249 "NPort x%06x Data: x%x x%x x%x\n",
250 *name
, *(name
+1), *(name
+2), *(name
+3),
251 *(name
+4), *(name
+5), *(name
+6), *(name
+7),
252 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
253 ndlp
->nlp_state
, ndlp
->nlp_rpi
);
255 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
256 "0204 Devloss timeout on "
257 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
258 "NPort x%06x Data: x%x x%x x%x\n",
259 *name
, *(name
+1), *(name
+2), *(name
+3),
260 *(name
+4), *(name
+5), *(name
+6), *(name
+7),
261 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
262 ndlp
->nlp_state
, ndlp
->nlp_rpi
);
265 put_node
= rdata
->pnode
!= NULL
;
266 put_rport
= ndlp
->rport
!= NULL
;
272 put_device(&rport
->dev
);
274 if (!(vport
->load_flag
& FC_UNLOADING
) &&
275 !(ndlp
->nlp_flag
& NLP_DELAY_TMO
) &&
276 !(ndlp
->nlp_flag
& NLP_NPR_2B_DISC
) &&
277 (ndlp
->nlp_state
!= NLP_STE_UNMAPPED_NODE
))
278 lpfc_disc_state_machine(vport
, ndlp
, NULL
, NLP_EVT_DEVICE_RM
);
280 lpfc_unregister_unused_fcf(phba
);
284 * lpfc_alloc_fast_evt - Allocates data structure for posting event
285 * @phba: Pointer to hba context object.
287 * This function is called from the functions which need to post
288 * events from interrupt context. This function allocates data
289 * structure required for posting event. It also keeps track of
290 * number of events pending and prevent event storm when there are
293 struct lpfc_fast_path_event
*
294 lpfc_alloc_fast_evt(struct lpfc_hba
*phba
) {
295 struct lpfc_fast_path_event
*ret
;
297 /* If there are lot of fast event do not exhaust memory due to this */
298 if (atomic_read(&phba
->fast_event_count
) > LPFC_MAX_EVT_COUNT
)
301 ret
= kzalloc(sizeof(struct lpfc_fast_path_event
),
304 atomic_inc(&phba
->fast_event_count
);
305 INIT_LIST_HEAD(&ret
->work_evt
.evt_listp
);
306 ret
->work_evt
.evt
= LPFC_EVT_FASTPATH_MGMT_EVT
;
312 * lpfc_free_fast_evt - Frees event data structure
313 * @phba: Pointer to hba context object.
314 * @evt: Event object which need to be freed.
316 * This function frees the data structure required for posting
320 lpfc_free_fast_evt(struct lpfc_hba
*phba
,
321 struct lpfc_fast_path_event
*evt
) {
323 atomic_dec(&phba
->fast_event_count
);
328 * lpfc_send_fastpath_evt - Posts events generated from fast path
329 * @phba: Pointer to hba context object.
330 * @evtp: Event data structure.
332 * This function is called from worker thread, when the interrupt
333 * context need to post an event. This function posts the event
334 * to fc transport netlink interface.
337 lpfc_send_fastpath_evt(struct lpfc_hba
*phba
,
338 struct lpfc_work_evt
*evtp
)
340 unsigned long evt_category
, evt_sub_category
;
341 struct lpfc_fast_path_event
*fast_evt_data
;
343 uint32_t evt_data_size
;
344 struct Scsi_Host
*shost
;
346 fast_evt_data
= container_of(evtp
, struct lpfc_fast_path_event
,
349 evt_category
= (unsigned long) fast_evt_data
->un
.fabric_evt
.event_type
;
350 evt_sub_category
= (unsigned long) fast_evt_data
->un
.
351 fabric_evt
.subcategory
;
352 shost
= lpfc_shost_from_vport(fast_evt_data
->vport
);
353 if (evt_category
== FC_REG_FABRIC_EVENT
) {
354 if (evt_sub_category
== LPFC_EVENT_FCPRDCHKERR
) {
355 evt_data
= (char *) &fast_evt_data
->un
.read_check_error
;
356 evt_data_size
= sizeof(fast_evt_data
->un
.
358 } else if ((evt_sub_category
== LPFC_EVENT_FABRIC_BUSY
) ||
359 (evt_sub_category
== LPFC_EVENT_PORT_BUSY
)) {
360 evt_data
= (char *) &fast_evt_data
->un
.fabric_evt
;
361 evt_data_size
= sizeof(fast_evt_data
->un
.fabric_evt
);
363 lpfc_free_fast_evt(phba
, fast_evt_data
);
366 } else if (evt_category
== FC_REG_SCSI_EVENT
) {
367 switch (evt_sub_category
) {
368 case LPFC_EVENT_QFULL
:
369 case LPFC_EVENT_DEVBSY
:
370 evt_data
= (char *) &fast_evt_data
->un
.scsi_evt
;
371 evt_data_size
= sizeof(fast_evt_data
->un
.scsi_evt
);
373 case LPFC_EVENT_CHECK_COND
:
374 evt_data
= (char *) &fast_evt_data
->un
.check_cond_evt
;
375 evt_data_size
= sizeof(fast_evt_data
->un
.
378 case LPFC_EVENT_VARQUEDEPTH
:
379 evt_data
= (char *) &fast_evt_data
->un
.queue_depth_evt
;
380 evt_data_size
= sizeof(fast_evt_data
->un
.
384 lpfc_free_fast_evt(phba
, fast_evt_data
);
388 lpfc_free_fast_evt(phba
, fast_evt_data
);
392 fc_host_post_vendor_event(shost
,
393 fc_get_event_number(),
398 lpfc_free_fast_evt(phba
, fast_evt_data
);
403 lpfc_work_list_done(struct lpfc_hba
*phba
)
405 struct lpfc_work_evt
*evtp
= NULL
;
406 struct lpfc_nodelist
*ndlp
;
409 spin_lock_irq(&phba
->hbalock
);
410 while (!list_empty(&phba
->work_list
)) {
411 list_remove_head((&phba
->work_list
), evtp
, typeof(*evtp
),
413 spin_unlock_irq(&phba
->hbalock
);
416 case LPFC_EVT_ELS_RETRY
:
417 ndlp
= (struct lpfc_nodelist
*) (evtp
->evt_arg1
);
418 lpfc_els_retry_delay_handler(ndlp
);
419 free_evt
= 0; /* evt is part of ndlp */
420 /* decrement the node reference count held
421 * for this queued work
425 case LPFC_EVT_DEV_LOSS
:
426 ndlp
= (struct lpfc_nodelist
*)(evtp
->evt_arg1
);
427 lpfc_dev_loss_tmo_handler(ndlp
);
429 /* decrement the node reference count held for
434 case LPFC_EVT_ONLINE
:
435 if (phba
->link_state
< LPFC_LINK_DOWN
)
436 *(int *) (evtp
->evt_arg1
) = lpfc_online(phba
);
438 *(int *) (evtp
->evt_arg1
) = 0;
439 complete((struct completion
*)(evtp
->evt_arg2
));
441 case LPFC_EVT_OFFLINE_PREP
:
442 if (phba
->link_state
>= LPFC_LINK_DOWN
)
443 lpfc_offline_prep(phba
);
444 *(int *)(evtp
->evt_arg1
) = 0;
445 complete((struct completion
*)(evtp
->evt_arg2
));
447 case LPFC_EVT_OFFLINE
:
449 lpfc_sli_brdrestart(phba
);
450 *(int *)(evtp
->evt_arg1
) =
451 lpfc_sli_brdready(phba
, HS_FFRDY
| HS_MBRDY
);
452 lpfc_unblock_mgmt_io(phba
);
453 complete((struct completion
*)(evtp
->evt_arg2
));
455 case LPFC_EVT_WARM_START
:
457 lpfc_reset_barrier(phba
);
458 lpfc_sli_brdreset(phba
);
459 lpfc_hba_down_post(phba
);
460 *(int *)(evtp
->evt_arg1
) =
461 lpfc_sli_brdready(phba
, HS_MBRDY
);
462 lpfc_unblock_mgmt_io(phba
);
463 complete((struct completion
*)(evtp
->evt_arg2
));
467 *(int *)(evtp
->evt_arg1
)
468 = (phba
->pport
->stopped
)
469 ? 0 : lpfc_sli_brdkill(phba
);
470 lpfc_unblock_mgmt_io(phba
);
471 complete((struct completion
*)(evtp
->evt_arg2
));
473 case LPFC_EVT_FASTPATH_MGMT_EVT
:
474 lpfc_send_fastpath_evt(phba
, evtp
);
480 spin_lock_irq(&phba
->hbalock
);
482 spin_unlock_irq(&phba
->hbalock
);
487 lpfc_work_done(struct lpfc_hba
*phba
)
489 struct lpfc_sli_ring
*pring
;
490 uint32_t ha_copy
, status
, control
, work_port_events
;
491 struct lpfc_vport
**vports
;
492 struct lpfc_vport
*vport
;
495 spin_lock_irq(&phba
->hbalock
);
496 ha_copy
= phba
->work_ha
;
498 spin_unlock_irq(&phba
->hbalock
);
500 /* First, try to post the next mailbox command to SLI4 device */
501 if (phba
->pci_dev_grp
== LPFC_PCI_DEV_OC
)
502 lpfc_sli4_post_async_mbox(phba
);
504 if (ha_copy
& HA_ERATT
)
505 /* Handle the error attention event */
506 lpfc_handle_eratt(phba
);
508 if (ha_copy
& HA_MBATT
)
509 lpfc_sli_handle_mb_event(phba
);
511 if (ha_copy
& HA_LATT
)
512 lpfc_handle_latt(phba
);
514 /* Process SLI4 events */
515 if (phba
->pci_dev_grp
== LPFC_PCI_DEV_OC
) {
516 if (phba
->hba_flag
& FCP_XRI_ABORT_EVENT
)
517 lpfc_sli4_fcp_xri_abort_event_proc(phba
);
518 if (phba
->hba_flag
& ELS_XRI_ABORT_EVENT
)
519 lpfc_sli4_els_xri_abort_event_proc(phba
);
520 if (phba
->hba_flag
& ASYNC_EVENT
)
521 lpfc_sli4_async_event_proc(phba
);
522 if (phba
->hba_flag
& HBA_POST_RECEIVE_BUFFER
) {
523 spin_lock_irq(&phba
->hbalock
);
524 phba
->hba_flag
&= ~HBA_POST_RECEIVE_BUFFER
;
525 spin_unlock_irq(&phba
->hbalock
);
526 lpfc_sli_hbqbuf_add_hbqs(phba
, LPFC_ELS_HBQ
);
528 if (phba
->fcf
.fcf_flag
& FCF_REDISC_EVT
)
529 lpfc_sli4_fcf_redisc_event_proc(phba
);
532 vports
= lpfc_create_vport_work_array(phba
);
534 for (i
= 0; i
<= phba
->max_vports
; i
++) {
536 * We could have no vports in array if unloading, so if
537 * this happens then just use the pport
539 if (vports
[i
] == NULL
&& i
== 0)
545 spin_lock_irq(&vport
->work_port_lock
);
546 work_port_events
= vport
->work_port_events
;
547 vport
->work_port_events
&= ~work_port_events
;
548 spin_unlock_irq(&vport
->work_port_lock
);
549 if (work_port_events
& WORKER_DISC_TMO
)
550 lpfc_disc_timeout_handler(vport
);
551 if (work_port_events
& WORKER_ELS_TMO
)
552 lpfc_els_timeout_handler(vport
);
553 if (work_port_events
& WORKER_HB_TMO
)
554 lpfc_hb_timeout_handler(phba
);
555 if (work_port_events
& WORKER_MBOX_TMO
)
556 lpfc_mbox_timeout_handler(phba
);
557 if (work_port_events
& WORKER_FABRIC_BLOCK_TMO
)
558 lpfc_unblock_fabric_iocbs(phba
);
559 if (work_port_events
& WORKER_FDMI_TMO
)
560 lpfc_fdmi_timeout_handler(vport
);
561 if (work_port_events
& WORKER_RAMP_DOWN_QUEUE
)
562 lpfc_ramp_down_queue_handler(phba
);
563 if (work_port_events
& WORKER_RAMP_UP_QUEUE
)
564 lpfc_ramp_up_queue_handler(phba
);
566 lpfc_destroy_vport_work_array(phba
, vports
);
568 pring
= &phba
->sli
.ring
[LPFC_ELS_RING
];
569 status
= (ha_copy
& (HA_RXMASK
<< (4*LPFC_ELS_RING
)));
570 status
>>= (4*LPFC_ELS_RING
);
571 if ((status
& HA_RXMASK
) ||
572 (pring
->flag
& LPFC_DEFERRED_RING_EVENT
) ||
573 (phba
->hba_flag
& HBA_SP_QUEUE_EVT
)) {
574 if (pring
->flag
& LPFC_STOP_IOCB_EVENT
) {
575 pring
->flag
|= LPFC_DEFERRED_RING_EVENT
;
576 /* Set the lpfc data pending flag */
577 set_bit(LPFC_DATA_READY
, &phba
->data_flags
);
579 pring
->flag
&= ~LPFC_DEFERRED_RING_EVENT
;
580 lpfc_sli_handle_slow_ring_event(phba
, pring
,
585 * Turn on Ring interrupts
587 if (phba
->sli_rev
<= LPFC_SLI_REV3
) {
588 spin_lock_irq(&phba
->hbalock
);
589 control
= readl(phba
->HCregaddr
);
590 if (!(control
& (HC_R0INT_ENA
<< LPFC_ELS_RING
))) {
591 lpfc_debugfs_slow_ring_trc(phba
,
592 "WRK Enable ring: cntl:x%x hacopy:x%x",
593 control
, ha_copy
, 0);
595 control
|= (HC_R0INT_ENA
<< LPFC_ELS_RING
);
596 writel(control
, phba
->HCregaddr
);
597 readl(phba
->HCregaddr
); /* flush */
599 lpfc_debugfs_slow_ring_trc(phba
,
600 "WRK Ring ok: cntl:x%x hacopy:x%x",
601 control
, ha_copy
, 0);
603 spin_unlock_irq(&phba
->hbalock
);
606 lpfc_work_list_done(phba
);
610 lpfc_do_work(void *p
)
612 struct lpfc_hba
*phba
= p
;
615 set_user_nice(current
, -20);
616 phba
->data_flags
= 0;
618 while (!kthread_should_stop()) {
619 /* wait and check worker queue activities */
620 rc
= wait_event_interruptible(phba
->work_waitq
,
621 (test_and_clear_bit(LPFC_DATA_READY
,
623 || kthread_should_stop()));
624 /* Signal wakeup shall terminate the worker thread */
626 lpfc_printf_log(phba
, KERN_ERR
, LOG_ELS
,
627 "0433 Wakeup on signal: rc=x%x\n", rc
);
631 /* Attend pending lpfc data processing */
632 lpfc_work_done(phba
);
634 phba
->worker_thread
= NULL
;
635 lpfc_printf_log(phba
, KERN_INFO
, LOG_ELS
,
636 "0432 Worker thread stopped.\n");
641 * This is only called to handle FC worker events. Since this a rare
642 * occurance, we allocate a struct lpfc_work_evt structure here instead of
643 * embedding it in the IOCB.
646 lpfc_workq_post_event(struct lpfc_hba
*phba
, void *arg1
, void *arg2
,
649 struct lpfc_work_evt
*evtp
;
653 * All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events will
654 * be queued to worker thread for processing
656 evtp
= kmalloc(sizeof(struct lpfc_work_evt
), GFP_ATOMIC
);
660 evtp
->evt_arg1
= arg1
;
661 evtp
->evt_arg2
= arg2
;
664 spin_lock_irqsave(&phba
->hbalock
, flags
);
665 list_add_tail(&evtp
->evt_listp
, &phba
->work_list
);
666 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
668 lpfc_worker_wake_up(phba
);
674 lpfc_cleanup_rpis(struct lpfc_vport
*vport
, int remove
)
676 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
677 struct lpfc_hba
*phba
= vport
->phba
;
678 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
681 list_for_each_entry_safe(ndlp
, next_ndlp
, &vport
->fc_nodes
, nlp_listp
) {
682 if (!NLP_CHK_NODE_ACT(ndlp
))
684 if (ndlp
->nlp_state
== NLP_STE_UNUSED_NODE
)
686 if ((phba
->sli3_options
& LPFC_SLI3_VPORT_TEARDOWN
) ||
687 ((vport
->port_type
== LPFC_NPIV_PORT
) &&
688 (ndlp
->nlp_DID
== NameServer_DID
)))
689 lpfc_unreg_rpi(vport
, ndlp
);
691 /* Leave Fabric nodes alone on link down */
692 if ((phba
->sli_rev
< LPFC_SLI_REV4
) &&
693 (!remove
&& ndlp
->nlp_type
& NLP_FABRIC
))
695 rc
= lpfc_disc_state_machine(vport
, ndlp
, NULL
,
698 : NLP_EVT_DEVICE_RECOVERY
);
700 if (phba
->sli3_options
& LPFC_SLI3_VPORT_TEARDOWN
) {
701 lpfc_mbx_unreg_vpi(vport
);
702 spin_lock_irq(shost
->host_lock
);
703 vport
->fc_flag
|= FC_VPORT_NEEDS_REG_VPI
;
704 spin_unlock_irq(shost
->host_lock
);
709 lpfc_port_link_failure(struct lpfc_vport
*vport
)
711 lpfc_vport_set_state(vport
, FC_VPORT_LINKDOWN
);
713 /* Cleanup any outstanding received buffers */
714 lpfc_cleanup_rcv_buffers(vport
);
716 /* Cleanup any outstanding RSCN activity */
717 lpfc_els_flush_rscn(vport
);
719 /* Cleanup any outstanding ELS commands */
720 lpfc_els_flush_cmd(vport
);
722 lpfc_cleanup_rpis(vport
, 0);
724 /* Turn off discovery timer if its running */
725 lpfc_can_disctmo(vport
);
729 lpfc_linkdown_port(struct lpfc_vport
*vport
)
731 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
733 fc_host_post_event(shost
, fc_get_event_number(), FCH_EVT_LINKDOWN
, 0);
735 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
736 "Link Down: state:x%x rtry:x%x flg:x%x",
737 vport
->port_state
, vport
->fc_ns_retry
, vport
->fc_flag
);
739 lpfc_port_link_failure(vport
);
744 lpfc_linkdown(struct lpfc_hba
*phba
)
746 struct lpfc_vport
*vport
= phba
->pport
;
747 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
748 struct lpfc_vport
**vports
;
752 if (phba
->link_state
== LPFC_LINK_DOWN
)
755 /* Block all SCSI stack I/Os */
756 lpfc_scsi_dev_block(phba
);
758 spin_lock_irq(&phba
->hbalock
);
759 phba
->fcf
.fcf_flag
&= ~(FCF_AVAILABLE
| FCF_SCAN_DONE
);
760 spin_unlock_irq(&phba
->hbalock
);
761 if (phba
->link_state
> LPFC_LINK_DOWN
) {
762 phba
->link_state
= LPFC_LINK_DOWN
;
763 spin_lock_irq(shost
->host_lock
);
764 phba
->pport
->fc_flag
&= ~FC_LBIT
;
765 spin_unlock_irq(shost
->host_lock
);
767 vports
= lpfc_create_vport_work_array(phba
);
769 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
770 /* Issue a LINK DOWN event to all nodes */
771 lpfc_linkdown_port(vports
[i
]);
773 lpfc_destroy_vport_work_array(phba
, vports
);
774 /* Clean up any firmware default rpi's */
775 mb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
777 lpfc_unreg_did(phba
, 0xffff, 0xffffffff, mb
);
779 mb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
780 if (lpfc_sli_issue_mbox(phba
, mb
, MBX_NOWAIT
)
781 == MBX_NOT_FINISHED
) {
782 mempool_free(mb
, phba
->mbox_mem_pool
);
786 /* Setup myDID for link up if we are in pt2pt mode */
787 if (phba
->pport
->fc_flag
& FC_PT2PT
) {
788 phba
->pport
->fc_myDID
= 0;
789 mb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
791 lpfc_config_link(phba
, mb
);
792 mb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
794 if (lpfc_sli_issue_mbox(phba
, mb
, MBX_NOWAIT
)
795 == MBX_NOT_FINISHED
) {
796 mempool_free(mb
, phba
->mbox_mem_pool
);
799 spin_lock_irq(shost
->host_lock
);
800 phba
->pport
->fc_flag
&= ~(FC_PT2PT
| FC_PT2PT_PLOGI
);
801 spin_unlock_irq(shost
->host_lock
);
808 lpfc_linkup_cleanup_nodes(struct lpfc_vport
*vport
)
810 struct lpfc_nodelist
*ndlp
;
812 list_for_each_entry(ndlp
, &vport
->fc_nodes
, nlp_listp
) {
813 if (!NLP_CHK_NODE_ACT(ndlp
))
815 if (ndlp
->nlp_state
== NLP_STE_UNUSED_NODE
)
817 if (ndlp
->nlp_type
& NLP_FABRIC
) {
818 /* On Linkup its safe to clean up the ndlp
819 * from Fabric connections.
821 if (ndlp
->nlp_DID
!= Fabric_DID
)
822 lpfc_unreg_rpi(vport
, ndlp
);
823 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_NPR_NODE
);
824 } else if (!(ndlp
->nlp_flag
& NLP_NPR_ADISC
)) {
825 /* Fail outstanding IO now since device is
828 lpfc_unreg_rpi(vport
, ndlp
);
834 lpfc_linkup_port(struct lpfc_vport
*vport
)
836 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
837 struct lpfc_hba
*phba
= vport
->phba
;
839 if ((vport
->load_flag
& FC_UNLOADING
) != 0)
842 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
843 "Link Up: top:x%x speed:x%x flg:x%x",
844 phba
->fc_topology
, phba
->fc_linkspeed
, phba
->link_flag
);
846 /* If NPIV is not enabled, only bring the physical port up */
847 if (!(phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
) &&
848 (vport
!= phba
->pport
))
851 fc_host_post_event(shost
, fc_get_event_number(), FCH_EVT_LINKUP
, 0);
853 spin_lock_irq(shost
->host_lock
);
854 vport
->fc_flag
&= ~(FC_PT2PT
| FC_PT2PT_PLOGI
| FC_ABORT_DISCOVERY
|
855 FC_RSCN_MODE
| FC_NLP_MORE
| FC_RSCN_DISCOVERY
);
856 vport
->fc_flag
|= FC_NDISC_ACTIVE
;
857 vport
->fc_ns_retry
= 0;
858 spin_unlock_irq(shost
->host_lock
);
860 if (vport
->fc_flag
& FC_LBIT
)
861 lpfc_linkup_cleanup_nodes(vport
);
866 lpfc_linkup(struct lpfc_hba
*phba
)
868 struct lpfc_vport
**vports
;
871 phba
->link_state
= LPFC_LINK_UP
;
873 /* Unblock fabric iocbs if they are blocked */
874 clear_bit(FABRIC_COMANDS_BLOCKED
, &phba
->bit_flags
);
875 del_timer_sync(&phba
->fabric_block_timer
);
877 vports
= lpfc_create_vport_work_array(phba
);
879 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++)
880 lpfc_linkup_port(vports
[i
]);
881 lpfc_destroy_vport_work_array(phba
, vports
);
882 if ((phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
) &&
883 (phba
->sli_rev
< LPFC_SLI_REV4
))
884 lpfc_issue_clear_la(phba
, phba
->pport
);
890 * This routine handles processing a CLEAR_LA mailbox
891 * command upon completion. It is setup in the LPFC_MBOXQ
892 * as the completion routine when the command is
893 * handed off to the SLI layer.
896 lpfc_mbx_cmpl_clear_la(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
898 struct lpfc_vport
*vport
= pmb
->vport
;
899 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
900 struct lpfc_sli
*psli
= &phba
->sli
;
901 MAILBOX_t
*mb
= &pmb
->u
.mb
;
904 /* Since we don't do discovery right now, turn these off here */
905 psli
->ring
[psli
->extra_ring
].flag
&= ~LPFC_STOP_IOCB_EVENT
;
906 psli
->ring
[psli
->fcp_ring
].flag
&= ~LPFC_STOP_IOCB_EVENT
;
907 psli
->ring
[psli
->next_ring
].flag
&= ~LPFC_STOP_IOCB_EVENT
;
909 /* Check for error */
910 if ((mb
->mbxStatus
) && (mb
->mbxStatus
!= 0x1601)) {
911 /* CLEAR_LA mbox error <mbxStatus> state <hba_state> */
912 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
913 "0320 CLEAR_LA mbxStatus error x%x hba "
915 mb
->mbxStatus
, vport
->port_state
);
916 phba
->link_state
= LPFC_HBA_ERROR
;
920 if (vport
->port_type
== LPFC_PHYSICAL_PORT
)
921 phba
->link_state
= LPFC_HBA_READY
;
923 spin_lock_irq(&phba
->hbalock
);
924 psli
->sli_flag
|= LPFC_PROCESS_LA
;
925 control
= readl(phba
->HCregaddr
);
926 control
|= HC_LAINT_ENA
;
927 writel(control
, phba
->HCregaddr
);
928 readl(phba
->HCregaddr
); /* flush */
929 spin_unlock_irq(&phba
->hbalock
);
930 mempool_free(pmb
, phba
->mbox_mem_pool
);
934 /* Device Discovery completes */
935 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
936 "0225 Device Discovery completes\n");
937 mempool_free(pmb
, phba
->mbox_mem_pool
);
939 spin_lock_irq(shost
->host_lock
);
940 vport
->fc_flag
&= ~FC_ABORT_DISCOVERY
;
941 spin_unlock_irq(shost
->host_lock
);
943 lpfc_can_disctmo(vport
);
945 /* turn on Link Attention interrupts */
947 spin_lock_irq(&phba
->hbalock
);
948 psli
->sli_flag
|= LPFC_PROCESS_LA
;
949 control
= readl(phba
->HCregaddr
);
950 control
|= HC_LAINT_ENA
;
951 writel(control
, phba
->HCregaddr
);
952 readl(phba
->HCregaddr
); /* flush */
953 spin_unlock_irq(&phba
->hbalock
);
960 lpfc_mbx_cmpl_local_config_link(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
962 struct lpfc_vport
*vport
= pmb
->vport
;
964 if (pmb
->u
.mb
.mbxStatus
)
967 mempool_free(pmb
, phba
->mbox_mem_pool
);
969 if (phba
->fc_topology
== TOPOLOGY_LOOP
&&
970 vport
->fc_flag
& FC_PUBLIC_LOOP
&&
971 !(vport
->fc_flag
& FC_LBIT
)) {
972 /* Need to wait for FAN - use discovery timer
973 * for timeout. port_state is identically
974 * LPFC_LOCAL_CFG_LINK while waiting for FAN
976 lpfc_set_disctmo(vport
);
980 /* Start discovery by sending a FLOGI. port_state is identically
981 * LPFC_FLOGI while waiting for FLOGI cmpl
983 if (vport
->port_state
!= LPFC_FLOGI
) {
984 lpfc_initial_flogi(vport
);
989 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
990 "0306 CONFIG_LINK mbxStatus error x%x "
992 pmb
->u
.mb
.mbxStatus
, vport
->port_state
);
993 mempool_free(pmb
, phba
->mbox_mem_pool
);
997 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
998 "0200 CONFIG_LINK bad hba state x%x\n",
1001 lpfc_issue_clear_la(phba
, vport
);
1006 lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
1008 struct lpfc_vport
*vport
= mboxq
->vport
;
1009 unsigned long flags
;
1011 if (mboxq
->u
.mb
.mbxStatus
) {
1012 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
1013 "2017 REG_FCFI mbxStatus error x%x "
1015 mboxq
->u
.mb
.mbxStatus
, vport
->port_state
);
1016 mempool_free(mboxq
, phba
->mbox_mem_pool
);
1020 /* Start FCoE discovery by sending a FLOGI. */
1021 phba
->fcf
.fcfi
= bf_get(lpfc_reg_fcfi_fcfi
, &mboxq
->u
.mqe
.un
.reg_fcfi
);
1022 /* Set the FCFI registered flag */
1023 spin_lock_irqsave(&phba
->hbalock
, flags
);
1024 phba
->fcf
.fcf_flag
|= FCF_REGISTERED
;
1025 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
1026 /* If there is a pending FCoE event, restart FCF table scan. */
1027 if (lpfc_check_pending_fcoe_event(phba
, 1)) {
1028 mempool_free(mboxq
, phba
->mbox_mem_pool
);
1031 spin_lock_irqsave(&phba
->hbalock
, flags
);
1032 phba
->fcf
.fcf_flag
|= (FCF_SCAN_DONE
| FCF_IN_USE
);
1033 phba
->hba_flag
&= ~FCF_DISC_INPROGRESS
;
1034 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
1035 if (vport
->port_state
!= LPFC_FLOGI
)
1036 lpfc_initial_flogi(vport
);
1038 mempool_free(mboxq
, phba
->mbox_mem_pool
);
1043 * lpfc_fab_name_match - Check if the fcf fabric name match.
1044 * @fab_name: pointer to fabric name.
1045 * @new_fcf_record: pointer to fcf record.
1047 * This routine compare the fcf record's fabric name with provided
1048 * fabric name. If the fabric name are identical this function
1049 * returns 1 else return 0.
1052 lpfc_fab_name_match(uint8_t *fab_name
, struct fcf_record
*new_fcf_record
)
1054 if (fab_name
[0] != bf_get(lpfc_fcf_record_fab_name_0
, new_fcf_record
))
1056 if (fab_name
[1] != bf_get(lpfc_fcf_record_fab_name_1
, new_fcf_record
))
1058 if (fab_name
[2] != bf_get(lpfc_fcf_record_fab_name_2
, new_fcf_record
))
1060 if (fab_name
[3] != bf_get(lpfc_fcf_record_fab_name_3
, new_fcf_record
))
1062 if (fab_name
[4] != bf_get(lpfc_fcf_record_fab_name_4
, new_fcf_record
))
1064 if (fab_name
[5] != bf_get(lpfc_fcf_record_fab_name_5
, new_fcf_record
))
1066 if (fab_name
[6] != bf_get(lpfc_fcf_record_fab_name_6
, new_fcf_record
))
1068 if (fab_name
[7] != bf_get(lpfc_fcf_record_fab_name_7
, new_fcf_record
))
1074 * lpfc_sw_name_match - Check if the fcf switch name match.
1075 * @fab_name: pointer to fabric name.
1076 * @new_fcf_record: pointer to fcf record.
1078 * This routine compare the fcf record's switch name with provided
1079 * switch name. If the switch name are identical this function
1080 * returns 1 else return 0.
1083 lpfc_sw_name_match(uint8_t *sw_name
, struct fcf_record
*new_fcf_record
)
1085 if (sw_name
[0] != bf_get(lpfc_fcf_record_switch_name_0
, new_fcf_record
))
1087 if (sw_name
[1] != bf_get(lpfc_fcf_record_switch_name_1
, new_fcf_record
))
1089 if (sw_name
[2] != bf_get(lpfc_fcf_record_switch_name_2
, new_fcf_record
))
1091 if (sw_name
[3] != bf_get(lpfc_fcf_record_switch_name_3
, new_fcf_record
))
1093 if (sw_name
[4] != bf_get(lpfc_fcf_record_switch_name_4
, new_fcf_record
))
1095 if (sw_name
[5] != bf_get(lpfc_fcf_record_switch_name_5
, new_fcf_record
))
1097 if (sw_name
[6] != bf_get(lpfc_fcf_record_switch_name_6
, new_fcf_record
))
1099 if (sw_name
[7] != bf_get(lpfc_fcf_record_switch_name_7
, new_fcf_record
))
1105 * lpfc_mac_addr_match - Check if the fcf mac address match.
1106 * @mac_addr: pointer to mac address.
1107 * @new_fcf_record: pointer to fcf record.
1109 * This routine compare the fcf record's mac address with HBA's
1110 * FCF mac address. If the mac addresses are identical this function
1111 * returns 1 else return 0.
1114 lpfc_mac_addr_match(uint8_t *mac_addr
, struct fcf_record
*new_fcf_record
)
1116 if (mac_addr
[0] != bf_get(lpfc_fcf_record_mac_0
, new_fcf_record
))
1118 if (mac_addr
[1] != bf_get(lpfc_fcf_record_mac_1
, new_fcf_record
))
1120 if (mac_addr
[2] != bf_get(lpfc_fcf_record_mac_2
, new_fcf_record
))
1122 if (mac_addr
[3] != bf_get(lpfc_fcf_record_mac_3
, new_fcf_record
))
1124 if (mac_addr
[4] != bf_get(lpfc_fcf_record_mac_4
, new_fcf_record
))
1126 if (mac_addr
[5] != bf_get(lpfc_fcf_record_mac_5
, new_fcf_record
))
1132 lpfc_vlan_id_match(uint16_t curr_vlan_id
, uint16_t new_vlan_id
)
1134 return (curr_vlan_id
== new_vlan_id
);
1138 * lpfc_copy_fcf_record - Copy fcf information to lpfc_hba.
1139 * @fcf: pointer to driver fcf record.
1140 * @new_fcf_record: pointer to fcf record.
1142 * This routine copies the FCF information from the FCF
1143 * record to lpfc_hba data structure.
1146 lpfc_copy_fcf_record(struct lpfc_fcf_rec
*fcf_rec
,
1147 struct fcf_record
*new_fcf_record
)
1150 fcf_rec
->fabric_name
[0] =
1151 bf_get(lpfc_fcf_record_fab_name_0
, new_fcf_record
);
1152 fcf_rec
->fabric_name
[1] =
1153 bf_get(lpfc_fcf_record_fab_name_1
, new_fcf_record
);
1154 fcf_rec
->fabric_name
[2] =
1155 bf_get(lpfc_fcf_record_fab_name_2
, new_fcf_record
);
1156 fcf_rec
->fabric_name
[3] =
1157 bf_get(lpfc_fcf_record_fab_name_3
, new_fcf_record
);
1158 fcf_rec
->fabric_name
[4] =
1159 bf_get(lpfc_fcf_record_fab_name_4
, new_fcf_record
);
1160 fcf_rec
->fabric_name
[5] =
1161 bf_get(lpfc_fcf_record_fab_name_5
, new_fcf_record
);
1162 fcf_rec
->fabric_name
[6] =
1163 bf_get(lpfc_fcf_record_fab_name_6
, new_fcf_record
);
1164 fcf_rec
->fabric_name
[7] =
1165 bf_get(lpfc_fcf_record_fab_name_7
, new_fcf_record
);
1167 fcf_rec
->mac_addr
[0] = bf_get(lpfc_fcf_record_mac_0
, new_fcf_record
);
1168 fcf_rec
->mac_addr
[1] = bf_get(lpfc_fcf_record_mac_1
, new_fcf_record
);
1169 fcf_rec
->mac_addr
[2] = bf_get(lpfc_fcf_record_mac_2
, new_fcf_record
);
1170 fcf_rec
->mac_addr
[3] = bf_get(lpfc_fcf_record_mac_3
, new_fcf_record
);
1171 fcf_rec
->mac_addr
[4] = bf_get(lpfc_fcf_record_mac_4
, new_fcf_record
);
1172 fcf_rec
->mac_addr
[5] = bf_get(lpfc_fcf_record_mac_5
, new_fcf_record
);
1173 /* FCF record index */
1174 fcf_rec
->fcf_indx
= bf_get(lpfc_fcf_record_fcf_index
, new_fcf_record
);
1175 /* FCF record priority */
1176 fcf_rec
->priority
= new_fcf_record
->fip_priority
;
1178 fcf_rec
->switch_name
[0] =
1179 bf_get(lpfc_fcf_record_switch_name_0
, new_fcf_record
);
1180 fcf_rec
->switch_name
[1] =
1181 bf_get(lpfc_fcf_record_switch_name_1
, new_fcf_record
);
1182 fcf_rec
->switch_name
[2] =
1183 bf_get(lpfc_fcf_record_switch_name_2
, new_fcf_record
);
1184 fcf_rec
->switch_name
[3] =
1185 bf_get(lpfc_fcf_record_switch_name_3
, new_fcf_record
);
1186 fcf_rec
->switch_name
[4] =
1187 bf_get(lpfc_fcf_record_switch_name_4
, new_fcf_record
);
1188 fcf_rec
->switch_name
[5] =
1189 bf_get(lpfc_fcf_record_switch_name_5
, new_fcf_record
);
1190 fcf_rec
->switch_name
[6] =
1191 bf_get(lpfc_fcf_record_switch_name_6
, new_fcf_record
);
1192 fcf_rec
->switch_name
[7] =
1193 bf_get(lpfc_fcf_record_switch_name_7
, new_fcf_record
);
1197 * lpfc_update_fcf_record - Update driver fcf record
1198 * @phba: pointer to lpfc hba data structure.
1199 * @fcf_rec: pointer to driver fcf record.
1200 * @new_fcf_record: pointer to hba fcf record.
1201 * @addr_mode: address mode to be set to the driver fcf record.
1202 * @vlan_id: vlan tag to be set to the driver fcf record.
1203 * @flag: flag bits to be set to the driver fcf record.
1205 * This routine updates the driver FCF record from the new HBA FCF record
1206 * together with the address mode, vlan_id, and other informations. This
1207 * routine is called with the host lock held.
1210 __lpfc_update_fcf_record(struct lpfc_hba
*phba
, struct lpfc_fcf_rec
*fcf_rec
,
1211 struct fcf_record
*new_fcf_record
, uint32_t addr_mode
,
1212 uint16_t vlan_id
, uint32_t flag
)
1214 /* Copy the fields from the HBA's FCF record */
1215 lpfc_copy_fcf_record(fcf_rec
, new_fcf_record
);
1216 /* Update other fields of driver FCF record */
1217 fcf_rec
->addr_mode
= addr_mode
;
1218 fcf_rec
->vlan_id
= vlan_id
;
1219 fcf_rec
->flag
|= (flag
| RECORD_VALID
);
1223 * lpfc_register_fcf - Register the FCF with hba.
1224 * @phba: pointer to lpfc hba data structure.
1226 * This routine issues a register fcfi mailbox command to register
1230 lpfc_register_fcf(struct lpfc_hba
*phba
)
1232 LPFC_MBOXQ_t
*fcf_mbxq
;
1234 unsigned long flags
;
1236 spin_lock_irqsave(&phba
->hbalock
, flags
);
1238 /* If the FCF is not availabe do nothing. */
1239 if (!(phba
->fcf
.fcf_flag
& FCF_AVAILABLE
)) {
1240 phba
->hba_flag
&= ~FCF_DISC_INPROGRESS
;
1241 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
1245 /* The FCF is already registered, start discovery */
1246 if (phba
->fcf
.fcf_flag
& FCF_REGISTERED
) {
1247 phba
->fcf
.fcf_flag
|= (FCF_SCAN_DONE
| FCF_IN_USE
);
1248 phba
->hba_flag
&= ~FCF_DISC_INPROGRESS
;
1249 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
1250 if (phba
->pport
->port_state
!= LPFC_FLOGI
)
1251 lpfc_initial_flogi(phba
->pport
);
1254 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
1256 fcf_mbxq
= mempool_alloc(phba
->mbox_mem_pool
,
1259 spin_lock_irqsave(&phba
->hbalock
, flags
);
1260 phba
->hba_flag
&= ~FCF_DISC_INPROGRESS
;
1261 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
1265 lpfc_reg_fcfi(phba
, fcf_mbxq
);
1266 fcf_mbxq
->vport
= phba
->pport
;
1267 fcf_mbxq
->mbox_cmpl
= lpfc_mbx_cmpl_reg_fcfi
;
1268 rc
= lpfc_sli_issue_mbox(phba
, fcf_mbxq
, MBX_NOWAIT
);
1269 if (rc
== MBX_NOT_FINISHED
) {
1270 spin_lock_irqsave(&phba
->hbalock
, flags
);
1271 phba
->hba_flag
&= ~FCF_DISC_INPROGRESS
;
1272 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
1273 mempool_free(fcf_mbxq
, phba
->mbox_mem_pool
);
1280 * lpfc_match_fcf_conn_list - Check if the FCF record can be used for discovery.
1281 * @phba: pointer to lpfc hba data structure.
1282 * @new_fcf_record: pointer to fcf record.
1283 * @boot_flag: Indicates if this record used by boot bios.
1284 * @addr_mode: The address mode to be used by this FCF
1285 * @vlan_id: The vlan id to be used as vlan tagging by this FCF.
1287 * This routine compare the fcf record with connect list obtained from the
1288 * config region to decide if this FCF can be used for SAN discovery. It returns
1289 * 1 if this record can be used for SAN discovery else return zero. If this FCF
1290 * record can be used for SAN discovery, the boot_flag will indicate if this FCF
1291 * is used by boot bios and addr_mode will indicate the addressing mode to be
1292 * used for this FCF when the function returns.
1293 * If the FCF record need to be used with a particular vlan id, the vlan is
1294 * set in the vlan_id on return of the function. If not VLAN tagging need to
1295 * be used with the FCF vlan_id will be set to 0xFFFF;
1298 lpfc_match_fcf_conn_list(struct lpfc_hba
*phba
,
1299 struct fcf_record
*new_fcf_record
,
1300 uint32_t *boot_flag
, uint32_t *addr_mode
,
1303 struct lpfc_fcf_conn_entry
*conn_entry
;
1304 int i
, j
, fcf_vlan_id
= 0;
1306 /* Find the lowest VLAN id in the FCF record */
1307 for (i
= 0; i
< 512; i
++) {
1308 if (new_fcf_record
->vlan_bitmap
[i
]) {
1309 fcf_vlan_id
= i
* 8;
1311 while (!((new_fcf_record
->vlan_bitmap
[i
] >> j
) & 1)) {
1319 /* If FCF not available return 0 */
1320 if (!bf_get(lpfc_fcf_record_fcf_avail
, new_fcf_record
) ||
1321 !bf_get(lpfc_fcf_record_fcf_valid
, new_fcf_record
))
1324 if (!(phba
->hba_flag
& HBA_FIP_SUPPORT
)) {
1326 *addr_mode
= bf_get(lpfc_fcf_record_mac_addr_prov
,
1328 if (phba
->valid_vlan
)
1329 *vlan_id
= phba
->vlan_id
;
1336 * If there are no FCF connection table entry, driver connect to all
1339 if (list_empty(&phba
->fcf_conn_rec_list
)) {
1341 *addr_mode
= bf_get(lpfc_fcf_record_mac_addr_prov
,
1345 * When there are no FCF connect entries, use driver's default
1346 * addressing mode - FPMA.
1348 if (*addr_mode
& LPFC_FCF_FPMA
)
1349 *addr_mode
= LPFC_FCF_FPMA
;
1351 /* If FCF record report a vlan id use that vlan id */
1353 *vlan_id
= fcf_vlan_id
;
1359 list_for_each_entry(conn_entry
,
1360 &phba
->fcf_conn_rec_list
, list
) {
1361 if (!(conn_entry
->conn_rec
.flags
& FCFCNCT_VALID
))
1364 if ((conn_entry
->conn_rec
.flags
& FCFCNCT_FBNM_VALID
) &&
1365 !lpfc_fab_name_match(conn_entry
->conn_rec
.fabric_name
,
1368 if ((conn_entry
->conn_rec
.flags
& FCFCNCT_SWNM_VALID
) &&
1369 !lpfc_sw_name_match(conn_entry
->conn_rec
.switch_name
,
1372 if (conn_entry
->conn_rec
.flags
& FCFCNCT_VLAN_VALID
) {
1374 * If the vlan bit map does not have the bit set for the
1375 * vlan id to be used, then it is not a match.
1377 if (!(new_fcf_record
->vlan_bitmap
1378 [conn_entry
->conn_rec
.vlan_tag
/ 8] &
1379 (1 << (conn_entry
->conn_rec
.vlan_tag
% 8))))
1384 * If connection record does not support any addressing mode,
1385 * skip the FCF record.
1387 if (!(bf_get(lpfc_fcf_record_mac_addr_prov
, new_fcf_record
)
1388 & (LPFC_FCF_FPMA
| LPFC_FCF_SPMA
)))
1392 * Check if the connection record specifies a required
1395 if ((conn_entry
->conn_rec
.flags
& FCFCNCT_AM_VALID
) &&
1396 !(conn_entry
->conn_rec
.flags
& FCFCNCT_AM_PREFERRED
)) {
1399 * If SPMA required but FCF not support this continue.
1401 if ((conn_entry
->conn_rec
.flags
& FCFCNCT_AM_SPMA
) &&
1402 !(bf_get(lpfc_fcf_record_mac_addr_prov
,
1403 new_fcf_record
) & LPFC_FCF_SPMA
))
1407 * If FPMA required but FCF not support this continue.
1409 if (!(conn_entry
->conn_rec
.flags
& FCFCNCT_AM_SPMA
) &&
1410 !(bf_get(lpfc_fcf_record_mac_addr_prov
,
1411 new_fcf_record
) & LPFC_FCF_FPMA
))
1416 * This fcf record matches filtering criteria.
1418 if (conn_entry
->conn_rec
.flags
& FCFCNCT_BOOT
)
1424 * If user did not specify any addressing mode, or if the
1425 * prefered addressing mode specified by user is not supported
1426 * by FCF, allow fabric to pick the addressing mode.
1428 *addr_mode
= bf_get(lpfc_fcf_record_mac_addr_prov
,
1431 * If the user specified a required address mode, assign that
1434 if ((conn_entry
->conn_rec
.flags
& FCFCNCT_AM_VALID
) &&
1435 (!(conn_entry
->conn_rec
.flags
& FCFCNCT_AM_PREFERRED
)))
1436 *addr_mode
= (conn_entry
->conn_rec
.flags
&
1438 LPFC_FCF_SPMA
: LPFC_FCF_FPMA
;
1440 * If the user specified a prefered address mode, use the
1441 * addr mode only if FCF support the addr_mode.
1443 else if ((conn_entry
->conn_rec
.flags
& FCFCNCT_AM_VALID
) &&
1444 (conn_entry
->conn_rec
.flags
& FCFCNCT_AM_PREFERRED
) &&
1445 (conn_entry
->conn_rec
.flags
& FCFCNCT_AM_SPMA
) &&
1446 (*addr_mode
& LPFC_FCF_SPMA
))
1447 *addr_mode
= LPFC_FCF_SPMA
;
1448 else if ((conn_entry
->conn_rec
.flags
& FCFCNCT_AM_VALID
) &&
1449 (conn_entry
->conn_rec
.flags
& FCFCNCT_AM_PREFERRED
) &&
1450 !(conn_entry
->conn_rec
.flags
& FCFCNCT_AM_SPMA
) &&
1451 (*addr_mode
& LPFC_FCF_FPMA
))
1452 *addr_mode
= LPFC_FCF_FPMA
;
1454 /* If matching connect list has a vlan id, use it */
1455 if (conn_entry
->conn_rec
.flags
& FCFCNCT_VLAN_VALID
)
1456 *vlan_id
= conn_entry
->conn_rec
.vlan_tag
;
1458 * If no vlan id is specified in connect list, use the vlan id
1461 else if (fcf_vlan_id
)
1462 *vlan_id
= fcf_vlan_id
;
1473 * lpfc_check_pending_fcoe_event - Check if there is pending fcoe event.
1474 * @phba: pointer to lpfc hba data structure.
1475 * @unreg_fcf: Unregister FCF if FCF table need to be re-scaned.
1477 * This function check if there is any fcoe event pending while driver
1478 * scan FCF entries. If there is any pending event, it will restart the
1479 * FCF saning and return 1 else return 0.
1482 lpfc_check_pending_fcoe_event(struct lpfc_hba
*phba
, uint8_t unreg_fcf
)
1485 * If the Link is up and no FCoE events while in the
1486 * FCF discovery, no need to restart FCF discovery.
1488 if ((phba
->link_state
>= LPFC_LINK_UP
) &&
1489 (phba
->fcoe_eventtag
== phba
->fcoe_eventtag_at_fcf_scan
))
1492 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
1493 "2768 Pending link or FCF event during current "
1494 "handling of the previous event: link_state:x%x, "
1495 "evt_tag_at_scan:x%x, evt_tag_current:x%x\n",
1496 phba
->link_state
, phba
->fcoe_eventtag_at_fcf_scan
,
1497 phba
->fcoe_eventtag
);
1499 spin_lock_irq(&phba
->hbalock
);
1500 phba
->fcf
.fcf_flag
&= ~FCF_AVAILABLE
;
1501 spin_unlock_irq(&phba
->hbalock
);
1503 if (phba
->link_state
>= LPFC_LINK_UP
) {
1504 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
| LOG_DISCOVERY
,
1505 "2780 Restart FCF table scan due to "
1506 "pending FCF event:evt_tag_at_scan:x%x, "
1507 "evt_tag_current:x%x\n",
1508 phba
->fcoe_eventtag_at_fcf_scan
,
1509 phba
->fcoe_eventtag
);
1510 lpfc_sli4_fcf_scan_read_fcf_rec(phba
, LPFC_FCOE_FCF_GET_FIRST
);
1513 * Do not continue FCF discovery and clear FCF_DISC_INPROGRESS
1516 spin_lock_irq(&phba
->hbalock
);
1517 phba
->hba_flag
&= ~FCF_DISC_INPROGRESS
;
1518 phba
->fcf
.fcf_flag
&= ~(FCF_REDISC_FOV
| FCF_DISCOVERY
);
1519 spin_unlock_irq(&phba
->hbalock
);
1522 /* Unregister the currently registered FCF if required */
1524 spin_lock_irq(&phba
->hbalock
);
1525 phba
->fcf
.fcf_flag
&= ~FCF_REGISTERED
;
1526 spin_unlock_irq(&phba
->hbalock
);
1527 lpfc_sli4_unregister_fcf(phba
);
1533 * lpfc_sli4_fcf_rec_mbox_parse - parse non-embedded fcf record mailbox command
1534 * @phba: pointer to lpfc hba data structure.
1535 * @mboxq: pointer to mailbox object.
1536 * @next_fcf_index: pointer to holder of next fcf index.
1538 * This routine parses the non-embedded fcf mailbox command by performing the
1539 * necessarily error checking, non-embedded read FCF record mailbox command
1540 * SGE parsing, and endianness swapping.
1542 * Returns the pointer to the new FCF record in the non-embedded mailbox
1543 * command DMA memory if successfully, other NULL.
1545 static struct fcf_record
*
1546 lpfc_sli4_fcf_rec_mbox_parse(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
,
1547 uint16_t *next_fcf_index
)
1550 dma_addr_t phys_addr
;
1551 struct lpfc_mbx_sge sge
;
1552 struct lpfc_mbx_read_fcf_tbl
*read_fcf
;
1553 uint32_t shdr_status
, shdr_add_status
;
1554 union lpfc_sli4_cfg_shdr
*shdr
;
1555 struct fcf_record
*new_fcf_record
;
1557 /* Get the first SGE entry from the non-embedded DMA memory. This
1558 * routine only uses a single SGE.
1560 lpfc_sli4_mbx_sge_get(mboxq
, 0, &sge
);
1561 phys_addr
= getPaddr(sge
.pa_hi
, sge
.pa_lo
);
1562 if (unlikely(!mboxq
->sge_array
)) {
1563 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
,
1564 "2524 Failed to get the non-embedded SGE "
1565 "virtual address\n");
1568 virt_addr
= mboxq
->sge_array
->addr
[0];
1570 shdr
= (union lpfc_sli4_cfg_shdr
*)virt_addr
;
1571 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
1572 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
1573 if (shdr_status
|| shdr_add_status
) {
1574 if (shdr_status
== STATUS_FCF_TABLE_EMPTY
)
1575 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
,
1576 "2726 READ_FCF_RECORD Indicates empty "
1579 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
,
1580 "2521 READ_FCF_RECORD mailbox failed "
1581 "with status x%x add_status x%x, "
1582 "mbx\n", shdr_status
, shdr_add_status
);
1586 /* Interpreting the returned information of the FCF record */
1587 read_fcf
= (struct lpfc_mbx_read_fcf_tbl
*)virt_addr
;
1588 lpfc_sli_pcimem_bcopy(read_fcf
, read_fcf
,
1589 sizeof(struct lpfc_mbx_read_fcf_tbl
));
1590 *next_fcf_index
= bf_get(lpfc_mbx_read_fcf_tbl_nxt_vindx
, read_fcf
);
1591 new_fcf_record
= (struct fcf_record
*)(virt_addr
+
1592 sizeof(struct lpfc_mbx_read_fcf_tbl
));
1593 lpfc_sli_pcimem_bcopy(new_fcf_record
, new_fcf_record
,
1594 sizeof(struct fcf_record
));
1596 return new_fcf_record
;
1600 * lpfc_sli4_log_fcf_record_info - Log the information of a fcf record
1601 * @phba: pointer to lpfc hba data structure.
1602 * @fcf_record: pointer to the fcf record.
1603 * @vlan_id: the lowest vlan identifier associated to this fcf record.
1604 * @next_fcf_index: the index to the next fcf record in hba's fcf table.
1606 * This routine logs the detailed FCF record if the LOG_FIP loggin is
1610 lpfc_sli4_log_fcf_record_info(struct lpfc_hba
*phba
,
1611 struct fcf_record
*fcf_record
,
1613 uint16_t next_fcf_index
)
1615 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
1616 "2764 READ_FCF_RECORD:\n"
1617 "\tFCF_Index : x%x\n"
1618 "\tFCF_Avail : x%x\n"
1619 "\tFCF_Valid : x%x\n"
1620 "\tFIP_Priority : x%x\n"
1621 "\tMAC_Provider : x%x\n"
1622 "\tLowest VLANID : x%x\n"
1623 "\tFCF_MAC Addr : x%x:%x:%x:%x:%x:%x\n"
1624 "\tFabric_Name : x%x:%x:%x:%x:%x:%x:%x:%x\n"
1625 "\tSwitch_Name : x%x:%x:%x:%x:%x:%x:%x:%x\n"
1626 "\tNext_FCF_Index: x%x\n",
1627 bf_get(lpfc_fcf_record_fcf_index
, fcf_record
),
1628 bf_get(lpfc_fcf_record_fcf_avail
, fcf_record
),
1629 bf_get(lpfc_fcf_record_fcf_valid
, fcf_record
),
1630 fcf_record
->fip_priority
,
1631 bf_get(lpfc_fcf_record_mac_addr_prov
, fcf_record
),
1633 bf_get(lpfc_fcf_record_mac_0
, fcf_record
),
1634 bf_get(lpfc_fcf_record_mac_1
, fcf_record
),
1635 bf_get(lpfc_fcf_record_mac_2
, fcf_record
),
1636 bf_get(lpfc_fcf_record_mac_3
, fcf_record
),
1637 bf_get(lpfc_fcf_record_mac_4
, fcf_record
),
1638 bf_get(lpfc_fcf_record_mac_5
, fcf_record
),
1639 bf_get(lpfc_fcf_record_fab_name_0
, fcf_record
),
1640 bf_get(lpfc_fcf_record_fab_name_1
, fcf_record
),
1641 bf_get(lpfc_fcf_record_fab_name_2
, fcf_record
),
1642 bf_get(lpfc_fcf_record_fab_name_3
, fcf_record
),
1643 bf_get(lpfc_fcf_record_fab_name_4
, fcf_record
),
1644 bf_get(lpfc_fcf_record_fab_name_5
, fcf_record
),
1645 bf_get(lpfc_fcf_record_fab_name_6
, fcf_record
),
1646 bf_get(lpfc_fcf_record_fab_name_7
, fcf_record
),
1647 bf_get(lpfc_fcf_record_switch_name_0
, fcf_record
),
1648 bf_get(lpfc_fcf_record_switch_name_1
, fcf_record
),
1649 bf_get(lpfc_fcf_record_switch_name_2
, fcf_record
),
1650 bf_get(lpfc_fcf_record_switch_name_3
, fcf_record
),
1651 bf_get(lpfc_fcf_record_switch_name_4
, fcf_record
),
1652 bf_get(lpfc_fcf_record_switch_name_5
, fcf_record
),
1653 bf_get(lpfc_fcf_record_switch_name_6
, fcf_record
),
1654 bf_get(lpfc_fcf_record_switch_name_7
, fcf_record
),
1659 * lpfc_mbx_cmpl_fcf_scan_read_fcf_rec - fcf scan read_fcf mbox cmpl handler.
1660 * @phba: pointer to lpfc hba data structure.
1661 * @mboxq: pointer to mailbox object.
1663 * This function iterates through all the fcf records available in
1664 * HBA and chooses the optimal FCF record for discovery. After finding
1665 * the FCF for discovery it registers the FCF record and kicks start
1667 * If FCF_IN_USE flag is set in currently used FCF, the routine tries to
1668 * use an FCF record which matches fabric name and mac address of the
1669 * currently used FCF record.
1670 * If the driver supports only one FCF, it will try to use the FCF record
1671 * used by BOOT_BIOS.
1674 lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
1676 struct fcf_record
*new_fcf_record
;
1677 uint32_t boot_flag
, addr_mode
;
1678 uint16_t fcf_index
, next_fcf_index
;
1679 struct lpfc_fcf_rec
*fcf_rec
= NULL
;
1683 /* If there is pending FCoE event restart FCF table scan */
1684 if (lpfc_check_pending_fcoe_event(phba
, 0)) {
1685 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
1689 /* Parse the FCF record from the non-embedded mailbox command */
1690 new_fcf_record
= lpfc_sli4_fcf_rec_mbox_parse(phba
, mboxq
,
1692 if (!new_fcf_record
) {
1693 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FIP
,
1694 "2765 Mailbox command READ_FCF_RECORD "
1695 "failed to retrieve a FCF record.\n");
1696 /* Let next new FCF event trigger fast failover */
1697 spin_lock_irq(&phba
->hbalock
);
1698 phba
->hba_flag
&= ~FCF_DISC_INPROGRESS
;
1699 spin_unlock_irq(&phba
->hbalock
);
1700 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
1704 /* Check the FCF record against the connection list */
1705 rc
= lpfc_match_fcf_conn_list(phba
, new_fcf_record
, &boot_flag
,
1706 &addr_mode
, &vlan_id
);
1708 /* Log the FCF record information if turned on */
1709 lpfc_sli4_log_fcf_record_info(phba
, new_fcf_record
, vlan_id
,
1713 * If the fcf record does not match with connect list entries
1714 * read the next entry; otherwise, this is an eligible FCF
1715 * record for round robin FCF failover.
1718 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FIP
,
1719 "2781 FCF record fcf_index:x%x failed FCF "
1720 "connection list check, fcf_avail:x%x, "
1722 bf_get(lpfc_fcf_record_fcf_index
,
1724 bf_get(lpfc_fcf_record_fcf_avail
,
1726 bf_get(lpfc_fcf_record_fcf_valid
,
1730 fcf_index
= bf_get(lpfc_fcf_record_fcf_index
, new_fcf_record
);
1731 rc
= lpfc_sli4_fcf_rr_index_set(phba
, fcf_index
);
1737 * If this is not the first FCF discovery of the HBA, use last
1738 * FCF record for the discovery. The condition that a rescan
1739 * matches the in-use FCF record: fabric name, switch name, mac
1740 * address, and vlan_id.
1742 spin_lock_irq(&phba
->hbalock
);
1743 if (phba
->fcf
.fcf_flag
& FCF_IN_USE
) {
1744 if (lpfc_fab_name_match(phba
->fcf
.current_rec
.fabric_name
,
1746 lpfc_sw_name_match(phba
->fcf
.current_rec
.switch_name
,
1748 lpfc_mac_addr_match(phba
->fcf
.current_rec
.mac_addr
,
1750 lpfc_vlan_id_match(phba
->fcf
.current_rec
.vlan_id
,
1752 phba
->fcf
.fcf_flag
|= FCF_AVAILABLE
;
1753 if (phba
->fcf
.fcf_flag
& FCF_REDISC_PEND
)
1754 /* Stop FCF redisc wait timer if pending */
1755 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba
);
1756 else if (phba
->fcf
.fcf_flag
& FCF_REDISC_FOV
)
1757 /* If in fast failover, mark it's completed */
1758 phba
->fcf
.fcf_flag
&= ~(FCF_REDISC_FOV
|
1760 spin_unlock_irq(&phba
->hbalock
);
1764 * Read next FCF record from HBA searching for the matching
1765 * with in-use record only if not during the fast failover
1766 * period. In case of fast failover period, it shall try to
1767 * determine whether the FCF record just read should be the
1770 if (!(phba
->fcf
.fcf_flag
& FCF_REDISC_FOV
)) {
1771 spin_unlock_irq(&phba
->hbalock
);
1776 * Update on failover FCF record only if it's in FCF fast-failover
1777 * period; otherwise, update on current FCF record.
1779 if (phba
->fcf
.fcf_flag
& FCF_REDISC_FOV
)
1780 fcf_rec
= &phba
->fcf
.failover_rec
;
1782 fcf_rec
= &phba
->fcf
.current_rec
;
1784 if (phba
->fcf
.fcf_flag
& FCF_AVAILABLE
) {
1786 * If the driver FCF record does not have boot flag
1787 * set and new hba fcf record has boot flag set, use
1788 * the new hba fcf record.
1790 if (boot_flag
&& !(fcf_rec
->flag
& BOOT_ENABLE
)) {
1791 /* Choose this FCF record */
1792 __lpfc_update_fcf_record(phba
, fcf_rec
, new_fcf_record
,
1793 addr_mode
, vlan_id
, BOOT_ENABLE
);
1794 spin_unlock_irq(&phba
->hbalock
);
1798 * If the driver FCF record has boot flag set and the
1799 * new hba FCF record does not have boot flag, read
1800 * the next FCF record.
1802 if (!boot_flag
&& (fcf_rec
->flag
& BOOT_ENABLE
)) {
1803 spin_unlock_irq(&phba
->hbalock
);
1807 * If the new hba FCF record has lower priority value
1808 * than the driver FCF record, use the new record.
1810 if (new_fcf_record
->fip_priority
< fcf_rec
->priority
) {
1811 /* Choose this FCF record */
1812 __lpfc_update_fcf_record(phba
, fcf_rec
, new_fcf_record
,
1813 addr_mode
, vlan_id
, 0);
1815 spin_unlock_irq(&phba
->hbalock
);
1819 * This is the first suitable FCF record, choose this record for
1820 * initial best-fit FCF.
1823 __lpfc_update_fcf_record(phba
, fcf_rec
, new_fcf_record
,
1824 addr_mode
, vlan_id
, (boot_flag
?
1826 phba
->fcf
.fcf_flag
|= FCF_AVAILABLE
;
1828 spin_unlock_irq(&phba
->hbalock
);
1832 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
1833 if (next_fcf_index
== LPFC_FCOE_FCF_NEXT_NONE
|| next_fcf_index
== 0) {
1834 if (phba
->fcf
.fcf_flag
& FCF_REDISC_FOV
) {
1836 * Case of FCF fast failover scan
1840 * It has not found any suitable FCF record, cancel
1841 * FCF scan inprogress, and do nothing
1843 if (!(phba
->fcf
.failover_rec
.flag
& RECORD_VALID
)) {
1844 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FIP
,
1845 "2782 No suitable FCF record "
1846 "found during this round of "
1847 "post FCF rediscovery scan: "
1848 "fcf_evt_tag:x%x, fcf_index: "
1850 phba
->fcoe_eventtag_at_fcf_scan
,
1851 bf_get(lpfc_fcf_record_fcf_index
,
1854 * Let next new FCF event trigger fast
1857 spin_lock_irq(&phba
->hbalock
);
1858 phba
->hba_flag
&= ~FCF_DISC_INPROGRESS
;
1859 spin_unlock_irq(&phba
->hbalock
);
1863 * It has found a suitable FCF record that is not
1864 * the same as in-use FCF record, unregister the
1865 * in-use FCF record, replace the in-use FCF record
1866 * with the new FCF record, mark FCF fast failover
1867 * completed, and then start register the new FCF
1871 /* Unregister the current in-use FCF record */
1872 lpfc_unregister_fcf(phba
);
1874 /* Replace in-use record with the new record */
1875 memcpy(&phba
->fcf
.current_rec
,
1876 &phba
->fcf
.failover_rec
,
1877 sizeof(struct lpfc_fcf_rec
));
1878 /* mark the FCF fast failover completed */
1879 spin_lock_irq(&phba
->hbalock
);
1880 phba
->fcf
.fcf_flag
&= ~FCF_REDISC_FOV
;
1881 spin_unlock_irq(&phba
->hbalock
);
1883 * Set up the initial registered FCF index for FLOGI
1884 * round robin FCF failover.
1886 phba
->fcf
.fcf_rr_init_indx
=
1887 phba
->fcf
.failover_rec
.fcf_indx
;
1888 /* Register to the new FCF record */
1889 lpfc_register_fcf(phba
);
1892 * In case of transaction period to fast FCF failover,
1893 * do nothing when search to the end of the FCF table.
1895 if ((phba
->fcf
.fcf_flag
& FCF_REDISC_EVT
) ||
1896 (phba
->fcf
.fcf_flag
& FCF_REDISC_PEND
))
1899 * Otherwise, initial scan or post linkdown rescan,
1900 * register with the best FCF record found so far
1901 * through the FCF scanning process.
1904 /* mark the initial FCF discovery completed */
1905 spin_lock_irq(&phba
->hbalock
);
1906 phba
->fcf
.fcf_flag
&= ~FCF_INIT_DISC
;
1907 spin_unlock_irq(&phba
->hbalock
);
1909 * Set up the initial registered FCF index for FLOGI
1910 * round robin FCF failover
1912 phba
->fcf
.fcf_rr_init_indx
=
1913 phba
->fcf
.current_rec
.fcf_indx
;
1914 /* Register to the new FCF record */
1915 lpfc_register_fcf(phba
);
1918 lpfc_sli4_fcf_scan_read_fcf_rec(phba
, next_fcf_index
);
1922 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
1923 lpfc_register_fcf(phba
);
1929 * lpfc_mbx_cmpl_fcf_rr_read_fcf_rec - fcf round robin read_fcf mbox cmpl hdler
1930 * @phba: pointer to lpfc hba data structure.
1931 * @mboxq: pointer to mailbox object.
1933 * This is the callback function for FLOGI failure round robin FCF failover
1934 * read FCF record mailbox command from the eligible FCF record bmask for
1935 * performing the failover. If the FCF read back is not valid/available, it
1936 * fails through to retrying FLOGI to the currently registered FCF again.
1937 * Otherwise, if the FCF read back is valid and available, it will set the
1938 * newly read FCF record to the failover FCF record, unregister currently
1939 * registered FCF record, copy the failover FCF record to the current
1940 * FCF record, and then register the current FCF record before proceeding
1941 * to trying FLOGI on the new failover FCF.
1944 lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
1946 struct fcf_record
*new_fcf_record
;
1947 uint32_t boot_flag
, addr_mode
;
1948 uint16_t next_fcf_index
;
1949 uint16_t current_fcf_index
;
1952 /* If link state is not up, stop the round robin failover process */
1953 if (phba
->link_state
< LPFC_LINK_UP
) {
1954 spin_lock_irq(&phba
->hbalock
);
1955 phba
->fcf
.fcf_flag
&= ~FCF_DISCOVERY
;
1956 spin_unlock_irq(&phba
->hbalock
);
1957 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
1961 /* Parse the FCF record from the non-embedded mailbox command */
1962 new_fcf_record
= lpfc_sli4_fcf_rec_mbox_parse(phba
, mboxq
,
1964 if (!new_fcf_record
) {
1965 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FIP
,
1966 "2766 Mailbox command READ_FCF_RECORD "
1967 "failed to retrieve a FCF record.\n");
1971 /* Get the needed parameters from FCF record */
1972 lpfc_match_fcf_conn_list(phba
, new_fcf_record
, &boot_flag
,
1973 &addr_mode
, &vlan_id
);
1975 /* Log the FCF record information if turned on */
1976 lpfc_sli4_log_fcf_record_info(phba
, new_fcf_record
, vlan_id
,
1979 /* Upload new FCF record to the failover FCF record */
1980 spin_lock_irq(&phba
->hbalock
);
1981 __lpfc_update_fcf_record(phba
, &phba
->fcf
.failover_rec
,
1982 new_fcf_record
, addr_mode
, vlan_id
,
1983 (boot_flag
? BOOT_ENABLE
: 0));
1984 spin_unlock_irq(&phba
->hbalock
);
1986 current_fcf_index
= phba
->fcf
.current_rec
.fcf_indx
;
1988 /* Unregister the current in-use FCF record */
1989 lpfc_unregister_fcf(phba
);
1991 /* Replace in-use record with the new record */
1992 memcpy(&phba
->fcf
.current_rec
, &phba
->fcf
.failover_rec
,
1993 sizeof(struct lpfc_fcf_rec
));
1995 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
1996 "2783 FLOGI round robin FCF failover from FCF "
1997 "(index:x%x) to FCF (index:x%x).\n",
1999 bf_get(lpfc_fcf_record_fcf_index
, new_fcf_record
));
2002 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
2003 lpfc_register_fcf(phba
);
2007 * lpfc_mbx_cmpl_read_fcf_rec - read fcf completion handler.
2008 * @phba: pointer to lpfc hba data structure.
2009 * @mboxq: pointer to mailbox object.
2011 * This is the callback function of read FCF record mailbox command for
2012 * updating the eligible FCF bmask for FLOGI failure round robin FCF
2013 * failover when a new FCF event happened. If the FCF read back is
2014 * valid/available and it passes the connection list check, it updates
2015 * the bmask for the eligible FCF record for round robin failover.
2018 lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
2020 struct fcf_record
*new_fcf_record
;
2021 uint32_t boot_flag
, addr_mode
;
2022 uint16_t fcf_index
, next_fcf_index
;
2026 /* If link state is not up, no need to proceed */
2027 if (phba
->link_state
< LPFC_LINK_UP
)
2030 /* If FCF discovery period is over, no need to proceed */
2031 if (phba
->fcf
.fcf_flag
& FCF_DISCOVERY
)
2034 /* Parse the FCF record from the non-embedded mailbox command */
2035 new_fcf_record
= lpfc_sli4_fcf_rec_mbox_parse(phba
, mboxq
,
2037 if (!new_fcf_record
) {
2038 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
2039 "2767 Mailbox command READ_FCF_RECORD "
2040 "failed to retrieve a FCF record.\n");
2044 /* Check the connection list for eligibility */
2045 rc
= lpfc_match_fcf_conn_list(phba
, new_fcf_record
, &boot_flag
,
2046 &addr_mode
, &vlan_id
);
2048 /* Log the FCF record information if turned on */
2049 lpfc_sli4_log_fcf_record_info(phba
, new_fcf_record
, vlan_id
,
2055 /* Update the eligible FCF record index bmask */
2056 fcf_index
= bf_get(lpfc_fcf_record_fcf_index
, new_fcf_record
);
2057 rc
= lpfc_sli4_fcf_rr_index_set(phba
, fcf_index
);
2060 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
2064 * lpfc_init_vpi_cmpl - Completion handler for init_vpi mbox command.
2065 * @phba: pointer to lpfc hba data structure.
2066 * @mboxq: pointer to mailbox data structure.
2068 * This function handles completion of init vpi mailbox command.
2071 lpfc_init_vpi_cmpl(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
2073 struct lpfc_vport
*vport
= mboxq
->vport
;
2074 struct lpfc_nodelist
*ndlp
;
2075 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2077 if (mboxq
->u
.mb
.mbxStatus
) {
2078 lpfc_printf_vlog(vport
, KERN_ERR
,
2080 "2609 Init VPI mailbox failed 0x%x\n",
2081 mboxq
->u
.mb
.mbxStatus
);
2082 mempool_free(mboxq
, phba
->mbox_mem_pool
);
2083 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
2086 spin_lock_irq(shost
->host_lock
);
2087 vport
->fc_flag
&= ~FC_VPORT_NEEDS_INIT_VPI
;
2088 spin_unlock_irq(shost
->host_lock
);
2090 /* If this port is physical port or FDISC is done, do reg_vpi */
2091 if ((phba
->pport
== vport
) || (vport
->port_state
== LPFC_FDISC
)) {
2092 ndlp
= lpfc_findnode_did(vport
, Fabric_DID
);
2094 lpfc_printf_vlog(vport
, KERN_ERR
,
2096 "2731 Cannot find fabric "
2097 "controller node\n");
2099 lpfc_register_new_vport(phba
, vport
, ndlp
);
2100 mempool_free(mboxq
, phba
->mbox_mem_pool
);
2104 if (phba
->link_flag
& LS_NPIV_FAB_SUPPORTED
)
2105 lpfc_initial_fdisc(vport
);
2107 lpfc_vport_set_state(vport
, FC_VPORT_NO_FABRIC_SUPP
);
2108 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
2109 "2606 No NPIV Fabric support\n");
2111 mempool_free(mboxq
, phba
->mbox_mem_pool
);
2116 * lpfc_issue_init_vpi - Issue init_vpi mailbox command.
2117 * @vport: pointer to lpfc_vport data structure.
2119 * This function issue a init_vpi mailbox command to initialize
2120 * VPI for the vport.
2123 lpfc_issue_init_vpi(struct lpfc_vport
*vport
)
2125 LPFC_MBOXQ_t
*mboxq
;
2128 mboxq
= mempool_alloc(vport
->phba
->mbox_mem_pool
, GFP_KERNEL
);
2130 lpfc_printf_vlog(vport
, KERN_ERR
,
2131 LOG_MBOX
, "2607 Failed to allocate "
2132 "init_vpi mailbox\n");
2135 lpfc_init_vpi(vport
->phba
, mboxq
, vport
->vpi
);
2136 mboxq
->vport
= vport
;
2137 mboxq
->mbox_cmpl
= lpfc_init_vpi_cmpl
;
2138 rc
= lpfc_sli_issue_mbox(vport
->phba
, mboxq
, MBX_NOWAIT
);
2139 if (rc
== MBX_NOT_FINISHED
) {
2140 lpfc_printf_vlog(vport
, KERN_ERR
,
2141 LOG_MBOX
, "2608 Failed to issue init_vpi mailbox\n");
2142 mempool_free(mboxq
, vport
->phba
->mbox_mem_pool
);
2147 * lpfc_start_fdiscs - send fdiscs for each vports on this port.
2148 * @phba: pointer to lpfc hba data structure.
2150 * This function loops through the list of vports on the @phba and issues an
2151 * FDISC if possible.
2154 lpfc_start_fdiscs(struct lpfc_hba
*phba
)
2156 struct lpfc_vport
**vports
;
2159 vports
= lpfc_create_vport_work_array(phba
);
2160 if (vports
!= NULL
) {
2161 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
2162 if (vports
[i
]->port_type
== LPFC_PHYSICAL_PORT
)
2164 /* There are no vpi for this vport */
2165 if (vports
[i
]->vpi
> phba
->max_vpi
) {
2166 lpfc_vport_set_state(vports
[i
],
2170 if (phba
->fc_topology
== TOPOLOGY_LOOP
) {
2171 lpfc_vport_set_state(vports
[i
],
2175 if (vports
[i
]->fc_flag
& FC_VPORT_NEEDS_INIT_VPI
) {
2176 lpfc_issue_init_vpi(vports
[i
]);
2179 if (phba
->link_flag
& LS_NPIV_FAB_SUPPORTED
)
2180 lpfc_initial_fdisc(vports
[i
]);
2182 lpfc_vport_set_state(vports
[i
],
2183 FC_VPORT_NO_FABRIC_SUPP
);
2184 lpfc_printf_vlog(vports
[i
], KERN_ERR
,
2187 "Fabric support\n");
2191 lpfc_destroy_vport_work_array(phba
, vports
);
2195 lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
2197 struct lpfc_dmabuf
*dmabuf
= mboxq
->context1
;
2198 struct lpfc_vport
*vport
= mboxq
->vport
;
2199 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2201 if (mboxq
->u
.mb
.mbxStatus
) {
2202 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
2203 "2018 REG_VFI mbxStatus error x%x "
2205 mboxq
->u
.mb
.mbxStatus
, vport
->port_state
);
2206 if (phba
->fc_topology
== TOPOLOGY_LOOP
) {
2207 /* FLOGI failed, use loop map to make discovery list */
2208 lpfc_disc_list_loopmap(vport
);
2209 /* Start discovery */
2210 lpfc_disc_start(vport
);
2213 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
2216 /* The VPI is implicitly registered when the VFI is registered */
2217 spin_lock_irq(shost
->host_lock
);
2218 vport
->vpi_state
|= LPFC_VPI_REGISTERED
;
2219 vport
->fc_flag
|= FC_VFI_REGISTERED
;
2220 vport
->fc_flag
&= ~FC_VPORT_NEEDS_REG_VPI
;
2221 spin_unlock_irq(shost
->host_lock
);
2223 if (vport
->port_state
== LPFC_FABRIC_CFG_LINK
) {
2224 lpfc_start_fdiscs(phba
);
2225 lpfc_do_scr_ns_plogi(phba
, vport
);
2229 mempool_free(mboxq
, phba
->mbox_mem_pool
);
2230 lpfc_mbuf_free(phba
, dmabuf
->virt
, dmabuf
->phys
);
2236 lpfc_mbx_cmpl_read_sparam(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
2238 MAILBOX_t
*mb
= &pmb
->u
.mb
;
2239 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) pmb
->context1
;
2240 struct lpfc_vport
*vport
= pmb
->vport
;
2243 /* Check for error */
2244 if (mb
->mbxStatus
) {
2245 /* READ_SPARAM mbox error <mbxStatus> state <hba_state> */
2246 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
2247 "0319 READ_SPARAM mbxStatus error x%x "
2249 mb
->mbxStatus
, vport
->port_state
);
2250 lpfc_linkdown(phba
);
2254 memcpy((uint8_t *) &vport
->fc_sparam
, (uint8_t *) mp
->virt
,
2255 sizeof (struct serv_parm
));
2256 if (phba
->cfg_soft_wwnn
)
2257 u64_to_wwn(phba
->cfg_soft_wwnn
,
2258 vport
->fc_sparam
.nodeName
.u
.wwn
);
2259 if (phba
->cfg_soft_wwpn
)
2260 u64_to_wwn(phba
->cfg_soft_wwpn
,
2261 vport
->fc_sparam
.portName
.u
.wwn
);
2262 memcpy(&vport
->fc_nodename
, &vport
->fc_sparam
.nodeName
,
2263 sizeof(vport
->fc_nodename
));
2264 memcpy(&vport
->fc_portname
, &vport
->fc_sparam
.portName
,
2265 sizeof(vport
->fc_portname
));
2266 if (vport
->port_type
== LPFC_PHYSICAL_PORT
) {
2267 memcpy(&phba
->wwnn
, &vport
->fc_nodename
, sizeof(phba
->wwnn
));
2268 memcpy(&phba
->wwpn
, &vport
->fc_portname
, sizeof(phba
->wwnn
));
2271 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
2273 mempool_free(pmb
, phba
->mbox_mem_pool
);
2277 pmb
->context1
= NULL
;
2278 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
2280 lpfc_issue_clear_la(phba
, vport
);
2281 mempool_free(pmb
, phba
->mbox_mem_pool
);
2286 lpfc_mbx_process_link_up(struct lpfc_hba
*phba
, READ_LA_VAR
*la
)
2288 struct lpfc_vport
*vport
= phba
->pport
;
2289 LPFC_MBOXQ_t
*sparam_mbox
, *cfglink_mbox
= NULL
;
2291 struct lpfc_dmabuf
*mp
;
2293 struct fcf_record
*fcf_record
;
2295 spin_lock_irq(&phba
->hbalock
);
2296 switch (la
->UlnkSpeed
) {
2298 phba
->fc_linkspeed
= LA_1GHZ_LINK
;
2301 phba
->fc_linkspeed
= LA_2GHZ_LINK
;
2304 phba
->fc_linkspeed
= LA_4GHZ_LINK
;
2307 phba
->fc_linkspeed
= LA_8GHZ_LINK
;
2310 phba
->fc_linkspeed
= LA_10GHZ_LINK
;
2313 phba
->fc_linkspeed
= LA_UNKNW_LINK
;
2317 phba
->fc_topology
= la
->topology
;
2318 phba
->link_flag
&= ~LS_NPIV_FAB_SUPPORTED
;
2320 if (phba
->fc_topology
== TOPOLOGY_LOOP
) {
2321 phba
->sli3_options
&= ~LPFC_SLI3_NPIV_ENABLED
;
2323 /* if npiv is enabled and this adapter supports npiv log
2324 * a message that npiv is not supported in this topology
2326 if (phba
->cfg_enable_npiv
&& phba
->max_vpi
)
2327 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
2328 "1309 Link Up Event npiv not supported in loop "
2330 /* Get Loop Map information */
2332 vport
->fc_flag
|= FC_LBIT
;
2334 vport
->fc_myDID
= la
->granted_AL_PA
;
2335 i
= la
->un
.lilpBde64
.tus
.f
.bdeSize
;
2338 phba
->alpa_map
[0] = 0;
2340 if (vport
->cfg_log_verbose
& LOG_LINK_EVENT
) {
2351 numalpa
= phba
->alpa_map
[0];
2353 while (j
< numalpa
) {
2354 memset(un
.pamap
, 0, 16);
2355 for (k
= 1; j
< numalpa
; k
++) {
2357 phba
->alpa_map
[j
+ 1];
2362 /* Link Up Event ALPA map */
2363 lpfc_printf_log(phba
,
2366 "1304 Link Up Event "
2367 "ALPA map Data: x%x "
2369 un
.pa
.wd1
, un
.pa
.wd2
,
2370 un
.pa
.wd3
, un
.pa
.wd4
);
2375 if (!(phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
)) {
2376 if (phba
->max_vpi
&& phba
->cfg_enable_npiv
&&
2377 (phba
->sli_rev
== 3))
2378 phba
->sli3_options
|= LPFC_SLI3_NPIV_ENABLED
;
2380 vport
->fc_myDID
= phba
->fc_pref_DID
;
2381 vport
->fc_flag
|= FC_LBIT
;
2383 spin_unlock_irq(&phba
->hbalock
);
2386 sparam_mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
2390 rc
= lpfc_read_sparam(phba
, sparam_mbox
, 0);
2392 mempool_free(sparam_mbox
, phba
->mbox_mem_pool
);
2395 sparam_mbox
->vport
= vport
;
2396 sparam_mbox
->mbox_cmpl
= lpfc_mbx_cmpl_read_sparam
;
2397 rc
= lpfc_sli_issue_mbox(phba
, sparam_mbox
, MBX_NOWAIT
);
2398 if (rc
== MBX_NOT_FINISHED
) {
2399 mp
= (struct lpfc_dmabuf
*) sparam_mbox
->context1
;
2400 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
2402 mempool_free(sparam_mbox
, phba
->mbox_mem_pool
);
2406 if (!(phba
->hba_flag
& HBA_FCOE_SUPPORT
)) {
2407 cfglink_mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
2410 vport
->port_state
= LPFC_LOCAL_CFG_LINK
;
2411 lpfc_config_link(phba
, cfglink_mbox
);
2412 cfglink_mbox
->vport
= vport
;
2413 cfglink_mbox
->mbox_cmpl
= lpfc_mbx_cmpl_local_config_link
;
2414 rc
= lpfc_sli_issue_mbox(phba
, cfglink_mbox
, MBX_NOWAIT
);
2415 if (rc
== MBX_NOT_FINISHED
) {
2416 mempool_free(cfglink_mbox
, phba
->mbox_mem_pool
);
2420 vport
->port_state
= LPFC_VPORT_UNKNOWN
;
2422 * Add the driver's default FCF record at FCF index 0 now. This
2423 * is phase 1 implementation that support FCF index 0 and driver
2426 if (!(phba
->hba_flag
& HBA_FIP_SUPPORT
)) {
2427 fcf_record
= kzalloc(sizeof(struct fcf_record
),
2429 if (unlikely(!fcf_record
)) {
2430 lpfc_printf_log(phba
, KERN_ERR
,
2432 "2554 Could not allocate memmory for "
2438 lpfc_sli4_build_dflt_fcf_record(phba
, fcf_record
,
2439 LPFC_FCOE_FCF_DEF_INDEX
);
2440 rc
= lpfc_sli4_add_fcf_record(phba
, fcf_record
);
2442 lpfc_printf_log(phba
, KERN_ERR
,
2444 "2013 Could not manually add FCF "
2445 "record 0, status %d\n", rc
);
2453 * The driver is expected to do FIP/FCF. Call the port
2454 * and get the FCF Table.
2456 spin_lock_irq(&phba
->hbalock
);
2457 if (phba
->hba_flag
& FCF_DISC_INPROGRESS
) {
2458 spin_unlock_irq(&phba
->hbalock
);
2461 /* This is the initial FCF discovery scan */
2462 phba
->fcf
.fcf_flag
|= FCF_INIT_DISC
;
2463 spin_unlock_irq(&phba
->hbalock
);
2464 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
| LOG_DISCOVERY
,
2465 "2778 Start FCF table scan at linkup\n");
2467 rc
= lpfc_sli4_fcf_scan_read_fcf_rec(phba
,
2468 LPFC_FCOE_FCF_GET_FIRST
);
2470 spin_lock_irq(&phba
->hbalock
);
2471 phba
->fcf
.fcf_flag
&= ~FCF_INIT_DISC
;
2472 spin_unlock_irq(&phba
->hbalock
);
2479 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
2480 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
2481 "0263 Discovery Mailbox error: state: 0x%x : %p %p\n",
2482 vport
->port_state
, sparam_mbox
, cfglink_mbox
);
2483 lpfc_issue_clear_la(phba
, vport
);
2488 lpfc_enable_la(struct lpfc_hba
*phba
)
2491 struct lpfc_sli
*psli
= &phba
->sli
;
2492 spin_lock_irq(&phba
->hbalock
);
2493 psli
->sli_flag
|= LPFC_PROCESS_LA
;
2494 if (phba
->sli_rev
<= LPFC_SLI_REV3
) {
2495 control
= readl(phba
->HCregaddr
);
2496 control
|= HC_LAINT_ENA
;
2497 writel(control
, phba
->HCregaddr
);
2498 readl(phba
->HCregaddr
); /* flush */
2500 spin_unlock_irq(&phba
->hbalock
);
2504 lpfc_mbx_issue_link_down(struct lpfc_hba
*phba
)
2506 lpfc_linkdown(phba
);
2507 lpfc_enable_la(phba
);
2508 lpfc_unregister_unused_fcf(phba
);
2509 /* turn on Link Attention interrupts - no CLEAR_LA needed */
2514 * This routine handles processing a READ_LA mailbox
2515 * command upon completion. It is setup in the LPFC_MBOXQ
2516 * as the completion routine when the command is
2517 * handed off to the SLI layer.
2520 lpfc_mbx_cmpl_read_la(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
2522 struct lpfc_vport
*vport
= pmb
->vport
;
2523 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2525 MAILBOX_t
*mb
= &pmb
->u
.mb
;
2526 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
2528 /* Unblock ELS traffic */
2529 phba
->sli
.ring
[LPFC_ELS_RING
].flag
&= ~LPFC_STOP_IOCB_EVENT
;
2530 /* Check for error */
2531 if (mb
->mbxStatus
) {
2532 lpfc_printf_log(phba
, KERN_INFO
, LOG_LINK_EVENT
,
2533 "1307 READ_LA mbox error x%x state x%x\n",
2534 mb
->mbxStatus
, vport
->port_state
);
2535 lpfc_mbx_issue_link_down(phba
);
2536 phba
->link_state
= LPFC_HBA_ERROR
;
2537 goto lpfc_mbx_cmpl_read_la_free_mbuf
;
2540 la
= (READ_LA_VAR
*) &pmb
->u
.mb
.un
.varReadLA
;
2542 memcpy(&phba
->alpa_map
[0], mp
->virt
, 128);
2544 spin_lock_irq(shost
->host_lock
);
2546 vport
->fc_flag
|= FC_BYPASSED_MODE
;
2548 vport
->fc_flag
&= ~FC_BYPASSED_MODE
;
2549 spin_unlock_irq(shost
->host_lock
);
2551 if ((phba
->fc_eventTag
< la
->eventTag
) ||
2552 (phba
->fc_eventTag
== la
->eventTag
)) {
2553 phba
->fc_stat
.LinkMultiEvent
++;
2554 if (la
->attType
== AT_LINK_UP
)
2555 if (phba
->fc_eventTag
!= 0)
2556 lpfc_linkdown(phba
);
2559 phba
->fc_eventTag
= la
->eventTag
;
2560 spin_lock_irq(&phba
->hbalock
);
2562 phba
->sli
.sli_flag
|= LPFC_MENLO_MAINT
;
2564 phba
->sli
.sli_flag
&= ~LPFC_MENLO_MAINT
;
2565 spin_unlock_irq(&phba
->hbalock
);
2567 phba
->link_events
++;
2568 if (la
->attType
== AT_LINK_UP
&& (!la
->mm
)) {
2569 phba
->fc_stat
.LinkUp
++;
2570 if (phba
->link_flag
& LS_LOOPBACK_MODE
) {
2571 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
2572 "1306 Link Up Event in loop back mode "
2573 "x%x received Data: x%x x%x x%x x%x\n",
2574 la
->eventTag
, phba
->fc_eventTag
,
2575 la
->granted_AL_PA
, la
->UlnkSpeed
,
2578 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
2579 "1303 Link Up Event x%x received "
2580 "Data: x%x x%x x%x x%x x%x x%x %d\n",
2581 la
->eventTag
, phba
->fc_eventTag
,
2582 la
->granted_AL_PA
, la
->UlnkSpeed
,
2585 phba
->wait_4_mlo_maint_flg
);
2587 lpfc_mbx_process_link_up(phba
, la
);
2588 } else if (la
->attType
== AT_LINK_DOWN
) {
2589 phba
->fc_stat
.LinkDown
++;
2590 if (phba
->link_flag
& LS_LOOPBACK_MODE
) {
2591 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
2592 "1308 Link Down Event in loop back mode "
2594 "Data: x%x x%x x%x\n",
2595 la
->eventTag
, phba
->fc_eventTag
,
2596 phba
->pport
->port_state
, vport
->fc_flag
);
2599 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
2600 "1305 Link Down Event x%x received "
2601 "Data: x%x x%x x%x x%x x%x\n",
2602 la
->eventTag
, phba
->fc_eventTag
,
2603 phba
->pport
->port_state
, vport
->fc_flag
,
2606 lpfc_mbx_issue_link_down(phba
);
2608 if (la
->mm
&& la
->attType
== AT_LINK_UP
) {
2609 if (phba
->link_state
!= LPFC_LINK_DOWN
) {
2610 phba
->fc_stat
.LinkDown
++;
2611 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
2612 "1312 Link Down Event x%x received "
2613 "Data: x%x x%x x%x\n",
2614 la
->eventTag
, phba
->fc_eventTag
,
2615 phba
->pport
->port_state
, vport
->fc_flag
);
2616 lpfc_mbx_issue_link_down(phba
);
2618 lpfc_enable_la(phba
);
2620 lpfc_printf_log(phba
, KERN_ERR
, LOG_LINK_EVENT
,
2621 "1310 Menlo Maint Mode Link up Event x%x rcvd "
2622 "Data: x%x x%x x%x\n",
2623 la
->eventTag
, phba
->fc_eventTag
,
2624 phba
->pport
->port_state
, vport
->fc_flag
);
2626 * The cmnd that triggered this will be waiting for this
2629 /* WAKEUP for MENLO_SET_MODE or MENLO_RESET command. */
2630 if (phba
->wait_4_mlo_maint_flg
) {
2631 phba
->wait_4_mlo_maint_flg
= 0;
2632 wake_up_interruptible(&phba
->wait_4_mlo_m_q
);
2638 lpfc_issue_clear_la(phba
, vport
);
2639 lpfc_printf_log(phba
, KERN_INFO
, LOG_LINK_EVENT
,
2640 "1311 fa %d\n", la
->fa
);
2643 lpfc_mbx_cmpl_read_la_free_mbuf
:
2644 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
2646 mempool_free(pmb
, phba
->mbox_mem_pool
);
2651 * This routine handles processing a REG_LOGIN mailbox
2652 * command upon completion. It is setup in the LPFC_MBOXQ
2653 * as the completion routine when the command is
2654 * handed off to the SLI layer.
2657 lpfc_mbx_cmpl_reg_login(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
2659 struct lpfc_vport
*vport
= pmb
->vport
;
2660 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
2661 struct lpfc_nodelist
*ndlp
= (struct lpfc_nodelist
*) pmb
->context2
;
2663 pmb
->context1
= NULL
;
2665 /* Good status, call state machine */
2666 lpfc_disc_state_machine(vport
, ndlp
, pmb
, NLP_EVT_CMPL_REG_LOGIN
);
2667 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
2669 mempool_free(pmb
, phba
->mbox_mem_pool
);
2670 /* decrement the node reference count held for this callback
2679 lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
2681 MAILBOX_t
*mb
= &pmb
->u
.mb
;
2682 struct lpfc_vport
*vport
= pmb
->vport
;
2683 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2685 switch (mb
->mbxStatus
) {
2689 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
2690 "0911 cmpl_unreg_vpi, mb status = 0x%x\n",
2694 spin_lock_irq(shost
->host_lock
);
2695 vport
->vpi_state
&= ~LPFC_VPI_REGISTERED
;
2696 vport
->fc_flag
|= FC_VPORT_NEEDS_REG_VPI
;
2697 spin_unlock_irq(shost
->host_lock
);
2698 vport
->unreg_vpi_cmpl
= VPORT_OK
;
2699 mempool_free(pmb
, phba
->mbox_mem_pool
);
2701 * This shost reference might have been taken at the beginning of
2702 * lpfc_vport_delete()
2704 if ((vport
->load_flag
& FC_UNLOADING
) && (vport
!= phba
->pport
))
2705 scsi_host_put(shost
);
2709 lpfc_mbx_unreg_vpi(struct lpfc_vport
*vport
)
2711 struct lpfc_hba
*phba
= vport
->phba
;
2715 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
2719 lpfc_unreg_vpi(phba
, vport
->vpi
, mbox
);
2720 mbox
->vport
= vport
;
2721 mbox
->mbox_cmpl
= lpfc_mbx_cmpl_unreg_vpi
;
2722 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
2723 if (rc
== MBX_NOT_FINISHED
) {
2724 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
| LOG_VPORT
,
2725 "1800 Could not issue unreg_vpi\n");
2726 mempool_free(mbox
, phba
->mbox_mem_pool
);
2727 vport
->unreg_vpi_cmpl
= VPORT_ERROR
;
2734 lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
2736 struct lpfc_vport
*vport
= pmb
->vport
;
2737 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
2738 MAILBOX_t
*mb
= &pmb
->u
.mb
;
2740 switch (mb
->mbxStatus
) {
2744 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
2745 "0912 cmpl_reg_vpi, mb status = 0x%x\n",
2747 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
2748 spin_lock_irq(shost
->host_lock
);
2749 vport
->fc_flag
&= ~(FC_FABRIC
| FC_PUBLIC_LOOP
);
2750 spin_unlock_irq(shost
->host_lock
);
2751 vport
->fc_myDID
= 0;
2755 spin_lock_irq(shost
->host_lock
);
2756 vport
->vpi_state
|= LPFC_VPI_REGISTERED
;
2757 vport
->fc_flag
&= ~FC_VPORT_NEEDS_REG_VPI
;
2758 spin_unlock_irq(shost
->host_lock
);
2759 vport
->num_disc_nodes
= 0;
2760 /* go thru NPR list and issue ELS PLOGIs */
2761 if (vport
->fc_npr_cnt
)
2762 lpfc_els_disc_plogi(vport
);
2764 if (!vport
->num_disc_nodes
) {
2765 spin_lock_irq(shost
->host_lock
);
2766 vport
->fc_flag
&= ~FC_NDISC_ACTIVE
;
2767 spin_unlock_irq(shost
->host_lock
);
2768 lpfc_can_disctmo(vport
);
2770 vport
->port_state
= LPFC_VPORT_READY
;
2773 mempool_free(pmb
, phba
->mbox_mem_pool
);
2778 * lpfc_create_static_vport - Read HBA config region to create static vports.
2779 * @phba: pointer to lpfc hba data structure.
2781 * This routine issue a DUMP mailbox command for config region 22 to get
2782 * the list of static vports to be created. The function create vports
2783 * based on the information returned from the HBA.
2786 lpfc_create_static_vport(struct lpfc_hba
*phba
)
2788 LPFC_MBOXQ_t
*pmb
= NULL
;
2790 struct static_vport_info
*vport_info
;
2792 struct fc_vport_identifiers vport_id
;
2793 struct fc_vport
*new_fc_vport
;
2794 struct Scsi_Host
*shost
;
2795 struct lpfc_vport
*vport
;
2796 uint16_t offset
= 0;
2797 uint8_t *vport_buff
;
2798 struct lpfc_dmabuf
*mp
;
2799 uint32_t byte_count
= 0;
2801 pmb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
2803 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
2804 "0542 lpfc_create_static_vport failed to"
2805 " allocate mailbox memory\n");
2811 vport_info
= kzalloc(sizeof(struct static_vport_info
), GFP_KERNEL
);
2813 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
2814 "0543 lpfc_create_static_vport failed to"
2815 " allocate vport_info\n");
2816 mempool_free(pmb
, phba
->mbox_mem_pool
);
2820 vport_buff
= (uint8_t *) vport_info
;
2822 if (lpfc_dump_static_vport(phba
, pmb
, offset
))
2825 pmb
->vport
= phba
->pport
;
2826 rc
= lpfc_sli_issue_mbox_wait(phba
, pmb
, LPFC_MBOX_TMO
);
2828 if ((rc
!= MBX_SUCCESS
) || mb
->mbxStatus
) {
2829 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
2830 "0544 lpfc_create_static_vport failed to"
2831 " issue dump mailbox command ret 0x%x "
2837 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
2838 byte_count
= pmb
->u
.mqe
.un
.mb_words
[5];
2839 mp
= (struct lpfc_dmabuf
*) pmb
->context2
;
2840 if (byte_count
> sizeof(struct static_vport_info
) -
2842 byte_count
= sizeof(struct static_vport_info
)
2844 memcpy(vport_buff
+ offset
, mp
->virt
, byte_count
);
2845 offset
+= byte_count
;
2847 if (mb
->un
.varDmp
.word_cnt
>
2848 sizeof(struct static_vport_info
) - offset
)
2849 mb
->un
.varDmp
.word_cnt
=
2850 sizeof(struct static_vport_info
)
2852 byte_count
= mb
->un
.varDmp
.word_cnt
;
2853 lpfc_sli_pcimem_bcopy(((uint8_t *)mb
) + DMP_RSP_OFFSET
,
2854 vport_buff
+ offset
,
2857 offset
+= byte_count
;
2860 } while (byte_count
&&
2861 offset
< sizeof(struct static_vport_info
));
2864 if ((le32_to_cpu(vport_info
->signature
) != VPORT_INFO_SIG
) ||
2865 ((le32_to_cpu(vport_info
->rev
) & VPORT_INFO_REV_MASK
)
2866 != VPORT_INFO_REV
)) {
2867 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
2868 "0545 lpfc_create_static_vport bad"
2869 " information header 0x%x 0x%x\n",
2870 le32_to_cpu(vport_info
->signature
),
2871 le32_to_cpu(vport_info
->rev
) & VPORT_INFO_REV_MASK
);
2876 shost
= lpfc_shost_from_vport(phba
->pport
);
2878 for (i
= 0; i
< MAX_STATIC_VPORT_COUNT
; i
++) {
2879 memset(&vport_id
, 0, sizeof(vport_id
));
2880 vport_id
.port_name
= wwn_to_u64(vport_info
->vport_list
[i
].wwpn
);
2881 vport_id
.node_name
= wwn_to_u64(vport_info
->vport_list
[i
].wwnn
);
2882 if (!vport_id
.port_name
|| !vport_id
.node_name
)
2885 vport_id
.roles
= FC_PORT_ROLE_FCP_INITIATOR
;
2886 vport_id
.vport_type
= FC_PORTTYPE_NPIV
;
2887 vport_id
.disable
= false;
2888 new_fc_vport
= fc_vport_create(shost
, 0, &vport_id
);
2890 if (!new_fc_vport
) {
2891 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
2892 "0546 lpfc_create_static_vport failed to"
2897 vport
= *(struct lpfc_vport
**)new_fc_vport
->dd_data
;
2898 vport
->vport_flag
|= STATIC_VPORT
;
2903 if (rc
!= MBX_TIMEOUT
) {
2904 if (pmb
->context2
) {
2905 mp
= (struct lpfc_dmabuf
*) pmb
->context2
;
2906 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
2909 mempool_free(pmb
, phba
->mbox_mem_pool
);
2916 * This routine handles processing a Fabric REG_LOGIN mailbox
2917 * command upon completion. It is setup in the LPFC_MBOXQ
2918 * as the completion routine when the command is
2919 * handed off to the SLI layer.
2922 lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
2924 struct lpfc_vport
*vport
= pmb
->vport
;
2925 MAILBOX_t
*mb
= &pmb
->u
.mb
;
2926 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
2927 struct lpfc_nodelist
*ndlp
;
2929 ndlp
= (struct lpfc_nodelist
*) pmb
->context2
;
2930 pmb
->context1
= NULL
;
2931 pmb
->context2
= NULL
;
2932 if (mb
->mbxStatus
) {
2933 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
,
2934 "0258 Register Fabric login error: 0x%x\n",
2936 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
2938 mempool_free(pmb
, phba
->mbox_mem_pool
);
2940 if (phba
->fc_topology
== TOPOLOGY_LOOP
) {
2941 /* FLOGI failed, use loop map to make discovery list */
2942 lpfc_disc_list_loopmap(vport
);
2944 /* Start discovery */
2945 lpfc_disc_start(vport
);
2946 /* Decrement the reference count to ndlp after the
2947 * reference to the ndlp are done.
2953 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
2954 /* Decrement the reference count to ndlp after the reference
2955 * to the ndlp are done.
2961 ndlp
->nlp_rpi
= mb
->un
.varWords
[0];
2962 ndlp
->nlp_flag
|= NLP_RPI_VALID
;
2963 ndlp
->nlp_type
|= NLP_FABRIC
;
2964 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNMAPPED_NODE
);
2966 if (vport
->port_state
== LPFC_FABRIC_CFG_LINK
) {
2967 lpfc_start_fdiscs(phba
);
2968 lpfc_do_scr_ns_plogi(phba
, vport
);
2971 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
2973 mempool_free(pmb
, phba
->mbox_mem_pool
);
2975 /* Drop the reference count from the mbox at the end after
2976 * all the current reference to the ndlp have been done.
2983 * This routine handles processing a NameServer REG_LOGIN mailbox
2984 * command upon completion. It is setup in the LPFC_MBOXQ
2985 * as the completion routine when the command is
2986 * handed off to the SLI layer.
2989 lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
2991 MAILBOX_t
*mb
= &pmb
->u
.mb
;
2992 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
2993 struct lpfc_nodelist
*ndlp
= (struct lpfc_nodelist
*) pmb
->context2
;
2994 struct lpfc_vport
*vport
= pmb
->vport
;
2996 if (mb
->mbxStatus
) {
2998 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
2999 "0260 Register NameServer error: 0x%x\n",
3001 /* decrement the node reference count held for this
3002 * callback function.
3005 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3007 mempool_free(pmb
, phba
->mbox_mem_pool
);
3009 /* If no other thread is using the ndlp, free it */
3010 lpfc_nlp_not_used(ndlp
);
3012 if (phba
->fc_topology
== TOPOLOGY_LOOP
) {
3014 * RegLogin failed, use loop map to make discovery
3017 lpfc_disc_list_loopmap(vport
);
3019 /* Start discovery */
3020 lpfc_disc_start(vport
);
3023 lpfc_vport_set_state(vport
, FC_VPORT_FAILED
);
3027 pmb
->context1
= NULL
;
3029 ndlp
->nlp_rpi
= mb
->un
.varWords
[0];
3030 ndlp
->nlp_flag
|= NLP_RPI_VALID
;
3031 ndlp
->nlp_type
|= NLP_FABRIC
;
3032 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNMAPPED_NODE
);
3034 if (vport
->port_state
< LPFC_VPORT_READY
) {
3035 /* Link up discovery requires Fabric registration. */
3036 lpfc_ns_cmd(vport
, SLI_CTNS_RFF_ID
, 0, 0); /* Do this first! */
3037 lpfc_ns_cmd(vport
, SLI_CTNS_RNN_ID
, 0, 0);
3038 lpfc_ns_cmd(vport
, SLI_CTNS_RSNN_NN
, 0, 0);
3039 lpfc_ns_cmd(vport
, SLI_CTNS_RSPN_ID
, 0, 0);
3040 lpfc_ns_cmd(vport
, SLI_CTNS_RFT_ID
, 0, 0);
3042 /* Issue SCR just before NameServer GID_FT Query */
3043 lpfc_issue_els_scr(vport
, SCR_DID
, 0);
3046 vport
->fc_ns_retry
= 0;
3047 /* Good status, issue CT Request to NameServer */
3048 if (lpfc_ns_cmd(vport
, SLI_CTNS_GID_FT
, 0, 0)) {
3049 /* Cannot issue NameServer Query, so finish up discovery */
3053 /* decrement the node reference count held for this
3054 * callback function.
3057 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3059 mempool_free(pmb
, phba
->mbox_mem_pool
);
3065 lpfc_register_remote_port(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
3067 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3068 struct fc_rport
*rport
;
3069 struct lpfc_rport_data
*rdata
;
3070 struct fc_rport_identifiers rport_ids
;
3071 struct lpfc_hba
*phba
= vport
->phba
;
3073 /* Remote port has reappeared. Re-register w/ FC transport */
3074 rport_ids
.node_name
= wwn_to_u64(ndlp
->nlp_nodename
.u
.wwn
);
3075 rport_ids
.port_name
= wwn_to_u64(ndlp
->nlp_portname
.u
.wwn
);
3076 rport_ids
.port_id
= ndlp
->nlp_DID
;
3077 rport_ids
.roles
= FC_RPORT_ROLE_UNKNOWN
;
3080 * We leave our node pointer in rport->dd_data when we unregister a
3081 * FCP target port. But fc_remote_port_add zeros the space to which
3082 * rport->dd_data points. So, if we're reusing a previously
3083 * registered port, drop the reference that we took the last time we
3084 * registered the port.
3086 if (ndlp
->rport
&& ndlp
->rport
->dd_data
&&
3087 ((struct lpfc_rport_data
*) ndlp
->rport
->dd_data
)->pnode
== ndlp
)
3090 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_RPORT
,
3091 "rport add: did:x%x flg:x%x type x%x",
3092 ndlp
->nlp_DID
, ndlp
->nlp_flag
, ndlp
->nlp_type
);
3094 ndlp
->rport
= rport
= fc_remote_port_add(shost
, 0, &rport_ids
);
3095 if (!rport
|| !get_device(&rport
->dev
)) {
3096 dev_printk(KERN_WARNING
, &phba
->pcidev
->dev
,
3097 "Warning: fc_remote_port_add failed\n");
3101 /* initialize static port data */
3102 rport
->maxframe_size
= ndlp
->nlp_maxframe
;
3103 rport
->supported_classes
= ndlp
->nlp_class_sup
;
3104 rdata
= rport
->dd_data
;
3105 rdata
->pnode
= lpfc_nlp_get(ndlp
);
3107 if (ndlp
->nlp_type
& NLP_FCP_TARGET
)
3108 rport_ids
.roles
|= FC_RPORT_ROLE_FCP_TARGET
;
3109 if (ndlp
->nlp_type
& NLP_FCP_INITIATOR
)
3110 rport_ids
.roles
|= FC_RPORT_ROLE_FCP_INITIATOR
;
3113 if (rport_ids
.roles
!= FC_RPORT_ROLE_UNKNOWN
)
3114 fc_remote_port_rolechg(rport
, rport_ids
.roles
);
3116 if ((rport
->scsi_target_id
!= -1) &&
3117 (rport
->scsi_target_id
< LPFC_MAX_TARGET
)) {
3118 ndlp
->nlp_sid
= rport
->scsi_target_id
;
3124 lpfc_unregister_remote_port(struct lpfc_nodelist
*ndlp
)
3126 struct fc_rport
*rport
= ndlp
->rport
;
3128 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_RPORT
,
3129 "rport delete: did:x%x flg:x%x type x%x",
3130 ndlp
->nlp_DID
, ndlp
->nlp_flag
, ndlp
->nlp_type
);
3132 fc_remote_port_delete(rport
);
3138 lpfc_nlp_counters(struct lpfc_vport
*vport
, int state
, int count
)
3140 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3142 spin_lock_irq(shost
->host_lock
);
3144 case NLP_STE_UNUSED_NODE
:
3145 vport
->fc_unused_cnt
+= count
;
3147 case NLP_STE_PLOGI_ISSUE
:
3148 vport
->fc_plogi_cnt
+= count
;
3150 case NLP_STE_ADISC_ISSUE
:
3151 vport
->fc_adisc_cnt
+= count
;
3153 case NLP_STE_REG_LOGIN_ISSUE
:
3154 vport
->fc_reglogin_cnt
+= count
;
3156 case NLP_STE_PRLI_ISSUE
:
3157 vport
->fc_prli_cnt
+= count
;
3159 case NLP_STE_UNMAPPED_NODE
:
3160 vport
->fc_unmap_cnt
+= count
;
3162 case NLP_STE_MAPPED_NODE
:
3163 vport
->fc_map_cnt
+= count
;
3165 case NLP_STE_NPR_NODE
:
3166 vport
->fc_npr_cnt
+= count
;
3169 spin_unlock_irq(shost
->host_lock
);
3173 lpfc_nlp_state_cleanup(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
3174 int old_state
, int new_state
)
3176 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3178 if (new_state
== NLP_STE_UNMAPPED_NODE
) {
3179 ndlp
->nlp_type
&= ~(NLP_FCP_TARGET
| NLP_FCP_INITIATOR
);
3180 ndlp
->nlp_flag
&= ~NLP_NODEV_REMOVE
;
3181 ndlp
->nlp_type
|= NLP_FC_NODE
;
3183 if (new_state
== NLP_STE_MAPPED_NODE
)
3184 ndlp
->nlp_flag
&= ~NLP_NODEV_REMOVE
;
3185 if (new_state
== NLP_STE_NPR_NODE
)
3186 ndlp
->nlp_flag
&= ~NLP_RCV_PLOGI
;
3188 /* Transport interface */
3189 if (ndlp
->rport
&& (old_state
== NLP_STE_MAPPED_NODE
||
3190 old_state
== NLP_STE_UNMAPPED_NODE
)) {
3191 vport
->phba
->nport_event_cnt
++;
3192 lpfc_unregister_remote_port(ndlp
);
3195 if (new_state
== NLP_STE_MAPPED_NODE
||
3196 new_state
== NLP_STE_UNMAPPED_NODE
) {
3197 vport
->phba
->nport_event_cnt
++;
3199 * Tell the fc transport about the port, if we haven't
3200 * already. If we have, and it's a scsi entity, be
3201 * sure to unblock any attached scsi devices
3203 lpfc_register_remote_port(vport
, ndlp
);
3205 if ((new_state
== NLP_STE_MAPPED_NODE
) &&
3206 (vport
->stat_data_enabled
)) {
3208 * A new target is discovered, if there is no buffer for
3209 * statistical data collection allocate buffer.
3211 ndlp
->lat_data
= kcalloc(LPFC_MAX_BUCKET_COUNT
,
3212 sizeof(struct lpfc_scsicmd_bkt
),
3215 if (!ndlp
->lat_data
)
3216 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_NODE
,
3217 "0286 lpfc_nlp_state_cleanup failed to "
3218 "allocate statistical data buffer DID "
3219 "0x%x\n", ndlp
->nlp_DID
);
3222 * if we added to Mapped list, but the remote port
3223 * registration failed or assigned a target id outside
3224 * our presentable range - move the node to the
3227 if (new_state
== NLP_STE_MAPPED_NODE
&&
3229 ndlp
->rport
->scsi_target_id
== -1 ||
3230 ndlp
->rport
->scsi_target_id
>= LPFC_MAX_TARGET
)) {
3231 spin_lock_irq(shost
->host_lock
);
3232 ndlp
->nlp_flag
|= NLP_TGT_NO_SCSIID
;
3233 spin_unlock_irq(shost
->host_lock
);
3234 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNMAPPED_NODE
);
3239 lpfc_nlp_state_name(char *buffer
, size_t size
, int state
)
3241 static char *states
[] = {
3242 [NLP_STE_UNUSED_NODE
] = "UNUSED",
3243 [NLP_STE_PLOGI_ISSUE
] = "PLOGI",
3244 [NLP_STE_ADISC_ISSUE
] = "ADISC",
3245 [NLP_STE_REG_LOGIN_ISSUE
] = "REGLOGIN",
3246 [NLP_STE_PRLI_ISSUE
] = "PRLI",
3247 [NLP_STE_UNMAPPED_NODE
] = "UNMAPPED",
3248 [NLP_STE_MAPPED_NODE
] = "MAPPED",
3249 [NLP_STE_NPR_NODE
] = "NPR",
3252 if (state
< NLP_STE_MAX_STATE
&& states
[state
])
3253 strlcpy(buffer
, states
[state
], size
);
3255 snprintf(buffer
, size
, "unknown (%d)", state
);
3260 lpfc_nlp_set_state(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
3263 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3264 int old_state
= ndlp
->nlp_state
;
3265 char name1
[16], name2
[16];
3267 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
3268 "0904 NPort state transition x%06x, %s -> %s\n",
3270 lpfc_nlp_state_name(name1
, sizeof(name1
), old_state
),
3271 lpfc_nlp_state_name(name2
, sizeof(name2
), state
));
3273 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_NODE
,
3274 "node statechg did:x%x old:%d ste:%d",
3275 ndlp
->nlp_DID
, old_state
, state
);
3277 if (old_state
== NLP_STE_NPR_NODE
&&
3278 state
!= NLP_STE_NPR_NODE
)
3279 lpfc_cancel_retry_delay_tmo(vport
, ndlp
);
3280 if (old_state
== NLP_STE_UNMAPPED_NODE
) {
3281 ndlp
->nlp_flag
&= ~NLP_TGT_NO_SCSIID
;
3282 ndlp
->nlp_type
&= ~NLP_FC_NODE
;
3285 if (list_empty(&ndlp
->nlp_listp
)) {
3286 spin_lock_irq(shost
->host_lock
);
3287 list_add_tail(&ndlp
->nlp_listp
, &vport
->fc_nodes
);
3288 spin_unlock_irq(shost
->host_lock
);
3289 } else if (old_state
)
3290 lpfc_nlp_counters(vport
, old_state
, -1);
3292 ndlp
->nlp_state
= state
;
3293 lpfc_nlp_counters(vport
, state
, 1);
3294 lpfc_nlp_state_cleanup(vport
, ndlp
, old_state
, state
);
3298 lpfc_enqueue_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
3300 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3302 if (list_empty(&ndlp
->nlp_listp
)) {
3303 spin_lock_irq(shost
->host_lock
);
3304 list_add_tail(&ndlp
->nlp_listp
, &vport
->fc_nodes
);
3305 spin_unlock_irq(shost
->host_lock
);
3310 lpfc_dequeue_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
3312 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3314 lpfc_cancel_retry_delay_tmo(vport
, ndlp
);
3315 if (ndlp
->nlp_state
&& !list_empty(&ndlp
->nlp_listp
))
3316 lpfc_nlp_counters(vport
, ndlp
->nlp_state
, -1);
3317 spin_lock_irq(shost
->host_lock
);
3318 list_del_init(&ndlp
->nlp_listp
);
3319 spin_unlock_irq(shost
->host_lock
);
3320 lpfc_nlp_state_cleanup(vport
, ndlp
, ndlp
->nlp_state
,
3321 NLP_STE_UNUSED_NODE
);
3325 lpfc_disable_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
3327 lpfc_cancel_retry_delay_tmo(vport
, ndlp
);
3328 if (ndlp
->nlp_state
&& !list_empty(&ndlp
->nlp_listp
))
3329 lpfc_nlp_counters(vport
, ndlp
->nlp_state
, -1);
3330 lpfc_nlp_state_cleanup(vport
, ndlp
, ndlp
->nlp_state
,
3331 NLP_STE_UNUSED_NODE
);
3334 * lpfc_initialize_node - Initialize all fields of node object
3335 * @vport: Pointer to Virtual Port object.
3336 * @ndlp: Pointer to FC node object.
3337 * @did: FC_ID of the node.
3339 * This function is always called when node object need to be initialized.
3340 * It initializes all the fields of the node object. Although the reference
3341 * to phba from @ndlp can be obtained indirectly through it's reference to
3342 * @vport, a direct reference to phba is taken here by @ndlp. This is due
3343 * to the life-span of the @ndlp might go beyond the existence of @vport as
3344 * the final release of ndlp is determined by its reference count. And, the
3345 * operation on @ndlp needs the reference to phba.
3348 lpfc_initialize_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
3351 INIT_LIST_HEAD(&ndlp
->els_retry_evt
.evt_listp
);
3352 INIT_LIST_HEAD(&ndlp
->dev_loss_evt
.evt_listp
);
3353 init_timer(&ndlp
->nlp_delayfunc
);
3354 ndlp
->nlp_delayfunc
.function
= lpfc_els_retry_delay
;
3355 ndlp
->nlp_delayfunc
.data
= (unsigned long)ndlp
;
3356 ndlp
->nlp_DID
= did
;
3357 ndlp
->vport
= vport
;
3358 ndlp
->phba
= vport
->phba
;
3359 ndlp
->nlp_sid
= NLP_NO_SID
;
3360 kref_init(&ndlp
->kref
);
3361 NLP_INT_NODE_ACT(ndlp
);
3362 atomic_set(&ndlp
->cmd_pending
, 0);
3363 ndlp
->cmd_qdepth
= LPFC_MAX_TGT_QDEPTH
;
3366 struct lpfc_nodelist
*
3367 lpfc_enable_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
3370 struct lpfc_hba
*phba
= vport
->phba
;
3372 unsigned long flags
;
3377 spin_lock_irqsave(&phba
->ndlp_lock
, flags
);
3378 /* The ndlp should not be in memory free mode */
3379 if (NLP_CHK_FREE_REQ(ndlp
)) {
3380 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
3381 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_NODE
,
3382 "0277 lpfc_enable_node: ndlp:x%p "
3383 "usgmap:x%x refcnt:%d\n",
3384 (void *)ndlp
, ndlp
->nlp_usg_map
,
3385 atomic_read(&ndlp
->kref
.refcount
));
3388 /* The ndlp should not already be in active mode */
3389 if (NLP_CHK_NODE_ACT(ndlp
)) {
3390 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
3391 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_NODE
,
3392 "0278 lpfc_enable_node: ndlp:x%p "
3393 "usgmap:x%x refcnt:%d\n",
3394 (void *)ndlp
, ndlp
->nlp_usg_map
,
3395 atomic_read(&ndlp
->kref
.refcount
));
3399 /* Keep the original DID */
3400 did
= ndlp
->nlp_DID
;
3402 /* re-initialize ndlp except of ndlp linked list pointer */
3403 memset((((char *)ndlp
) + sizeof (struct list_head
)), 0,
3404 sizeof (struct lpfc_nodelist
) - sizeof (struct list_head
));
3405 lpfc_initialize_node(vport
, ndlp
, did
);
3407 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
3409 if (state
!= NLP_STE_UNUSED_NODE
)
3410 lpfc_nlp_set_state(vport
, ndlp
, state
);
3412 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_NODE
,
3413 "node enable: did:x%x",
3414 ndlp
->nlp_DID
, 0, 0);
3419 lpfc_drop_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
3422 * Use of lpfc_drop_node and UNUSED list: lpfc_drop_node should
3423 * be used if we wish to issue the "last" lpfc_nlp_put() to remove
3424 * the ndlp from the vport. The ndlp marked as UNUSED on the list
3425 * until ALL other outstanding threads have completed. We check
3426 * that the ndlp not already in the UNUSED state before we proceed.
3428 if (ndlp
->nlp_state
== NLP_STE_UNUSED_NODE
)
3430 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNUSED_NODE
);
3436 * Start / ReStart rescue timer for Discovery / RSCN handling
3439 lpfc_set_disctmo(struct lpfc_vport
*vport
)
3441 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3442 struct lpfc_hba
*phba
= vport
->phba
;
3445 if (vport
->port_state
== LPFC_LOCAL_CFG_LINK
) {
3446 /* For FAN, timeout should be greater than edtov */
3447 tmo
= (((phba
->fc_edtov
+ 999) / 1000) + 1);
3449 /* Normal discovery timeout should be > than ELS/CT timeout
3450 * FC spec states we need 3 * ratov for CT requests
3452 tmo
= ((phba
->fc_ratov
* 3) + 3);
3456 if (!timer_pending(&vport
->fc_disctmo
)) {
3457 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
3458 "set disc timer: tmo:x%x state:x%x flg:x%x",
3459 tmo
, vport
->port_state
, vport
->fc_flag
);
3462 mod_timer(&vport
->fc_disctmo
, jiffies
+ HZ
* tmo
);
3463 spin_lock_irq(shost
->host_lock
);
3464 vport
->fc_flag
|= FC_DISC_TMO
;
3465 spin_unlock_irq(shost
->host_lock
);
3467 /* Start Discovery Timer state <hba_state> */
3468 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
3469 "0247 Start Discovery Timer state x%x "
3470 "Data: x%x x%lx x%x x%x\n",
3471 vport
->port_state
, tmo
,
3472 (unsigned long)&vport
->fc_disctmo
, vport
->fc_plogi_cnt
,
3473 vport
->fc_adisc_cnt
);
3479 * Cancel rescue timer for Discovery / RSCN handling
3482 lpfc_can_disctmo(struct lpfc_vport
*vport
)
3484 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3485 unsigned long iflags
;
3487 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
3488 "can disc timer: state:x%x rtry:x%x flg:x%x",
3489 vport
->port_state
, vport
->fc_ns_retry
, vport
->fc_flag
);
3491 /* Turn off discovery timer if its running */
3492 if (vport
->fc_flag
& FC_DISC_TMO
) {
3493 spin_lock_irqsave(shost
->host_lock
, iflags
);
3494 vport
->fc_flag
&= ~FC_DISC_TMO
;
3495 spin_unlock_irqrestore(shost
->host_lock
, iflags
);
3496 del_timer_sync(&vport
->fc_disctmo
);
3497 spin_lock_irqsave(&vport
->work_port_lock
, iflags
);
3498 vport
->work_port_events
&= ~WORKER_DISC_TMO
;
3499 spin_unlock_irqrestore(&vport
->work_port_lock
, iflags
);
3502 /* Cancel Discovery Timer state <hba_state> */
3503 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
3504 "0248 Cancel Discovery Timer state x%x "
3505 "Data: x%x x%x x%x\n",
3506 vport
->port_state
, vport
->fc_flag
,
3507 vport
->fc_plogi_cnt
, vport
->fc_adisc_cnt
);
3512 * Check specified ring for outstanding IOCB on the SLI queue
3513 * Return true if iocb matches the specified nport
3516 lpfc_check_sli_ndlp(struct lpfc_hba
*phba
,
3517 struct lpfc_sli_ring
*pring
,
3518 struct lpfc_iocbq
*iocb
,
3519 struct lpfc_nodelist
*ndlp
)
3521 struct lpfc_sli
*psli
= &phba
->sli
;
3522 IOCB_t
*icmd
= &iocb
->iocb
;
3523 struct lpfc_vport
*vport
= ndlp
->vport
;
3525 if (iocb
->vport
!= vport
)
3528 if (pring
->ringno
== LPFC_ELS_RING
) {
3529 switch (icmd
->ulpCommand
) {
3530 case CMD_GEN_REQUEST64_CR
:
3531 if (iocb
->context_un
.ndlp
== ndlp
)
3533 case CMD_ELS_REQUEST64_CR
:
3534 if (icmd
->un
.elsreq64
.remoteID
== ndlp
->nlp_DID
)
3536 case CMD_XMIT_ELS_RSP64_CX
:
3537 if (iocb
->context1
== (uint8_t *) ndlp
)
3540 } else if (pring
->ringno
== psli
->extra_ring
) {
3542 } else if (pring
->ringno
== psli
->fcp_ring
) {
3543 /* Skip match check if waiting to relogin to FCP target */
3544 if ((ndlp
->nlp_type
& NLP_FCP_TARGET
) &&
3545 (ndlp
->nlp_flag
& NLP_DELAY_TMO
)) {
3548 if (icmd
->ulpContext
== (volatile ushort
)ndlp
->nlp_rpi
) {
3551 } else if (pring
->ringno
== psli
->next_ring
) {
3558 * Free resources / clean up outstanding I/Os
3559 * associated with nlp_rpi in the LPFC_NODELIST entry.
3562 lpfc_no_rpi(struct lpfc_hba
*phba
, struct lpfc_nodelist
*ndlp
)
3564 LIST_HEAD(completions
);
3565 struct lpfc_sli
*psli
;
3566 struct lpfc_sli_ring
*pring
;
3567 struct lpfc_iocbq
*iocb
, *next_iocb
;
3570 lpfc_fabric_abort_nport(ndlp
);
3573 * Everything that matches on txcmplq will be returned
3574 * by firmware with a no rpi error.
3577 if (ndlp
->nlp_flag
& NLP_RPI_VALID
) {
3578 /* Now process each ring */
3579 for (i
= 0; i
< psli
->num_rings
; i
++) {
3580 pring
= &psli
->ring
[i
];
3582 spin_lock_irq(&phba
->hbalock
);
3583 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txq
,
3586 * Check to see if iocb matches the nport we are
3589 if ((lpfc_check_sli_ndlp(phba
, pring
, iocb
,
3591 /* It matches, so deque and call compl
3593 list_move_tail(&iocb
->list
,
3598 spin_unlock_irq(&phba
->hbalock
);
3602 /* Cancel all the IOCBs from the completions list */
3603 lpfc_sli_cancel_iocbs(phba
, &completions
, IOSTAT_LOCAL_REJECT
,
3610 * Free rpi associated with LPFC_NODELIST entry.
3611 * This routine is called from lpfc_freenode(), when we are removing
3612 * a LPFC_NODELIST entry. It is also called if the driver initiates a
3613 * LOGO that completes successfully, and we are waiting to PLOGI back
3614 * to the remote NPort. In addition, it is called after we receive
3615 * and unsolicated ELS cmd, send back a rsp, the rsp completes and
3616 * we are waiting to PLOGI back to the remote NPort.
3619 lpfc_unreg_rpi(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
3621 struct lpfc_hba
*phba
= vport
->phba
;
3625 if (ndlp
->nlp_flag
& NLP_RPI_VALID
) {
3626 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
3628 lpfc_unreg_login(phba
, vport
->vpi
, ndlp
->nlp_rpi
, mbox
);
3629 mbox
->vport
= vport
;
3630 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
3631 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
3632 if (rc
== MBX_NOT_FINISHED
)
3633 mempool_free(mbox
, phba
->mbox_mem_pool
);
3635 lpfc_no_rpi(phba
, ndlp
);
3637 ndlp
->nlp_flag
&= ~NLP_RPI_VALID
;
3638 ndlp
->nlp_flag
&= ~NLP_NPR_ADISC
;
3645 * lpfc_unreg_hba_rpis - Unregister rpis registered to the hba.
3646 * @phba: pointer to lpfc hba data structure.
3648 * This routine is invoked to unregister all the currently registered RPIs
3652 lpfc_unreg_hba_rpis(struct lpfc_hba
*phba
)
3654 struct lpfc_vport
**vports
;
3655 struct lpfc_nodelist
*ndlp
;
3656 struct Scsi_Host
*shost
;
3659 vports
= lpfc_create_vport_work_array(phba
);
3660 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
3661 shost
= lpfc_shost_from_vport(vports
[i
]);
3662 spin_lock_irq(shost
->host_lock
);
3663 list_for_each_entry(ndlp
, &vports
[i
]->fc_nodes
, nlp_listp
) {
3664 if (ndlp
->nlp_flag
& NLP_RPI_VALID
) {
3665 /* The mempool_alloc might sleep */
3666 spin_unlock_irq(shost
->host_lock
);
3667 lpfc_unreg_rpi(vports
[i
], ndlp
);
3668 spin_lock_irq(shost
->host_lock
);
3671 spin_unlock_irq(shost
->host_lock
);
3673 lpfc_destroy_vport_work_array(phba
, vports
);
3677 lpfc_unreg_all_rpis(struct lpfc_vport
*vport
)
3679 struct lpfc_hba
*phba
= vport
->phba
;
3683 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
3685 lpfc_unreg_login(phba
, vport
->vpi
, 0xffff, mbox
);
3686 mbox
->vport
= vport
;
3687 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
3688 mbox
->context1
= NULL
;
3689 rc
= lpfc_sli_issue_mbox_wait(phba
, mbox
, LPFC_MBOX_TMO
);
3690 if (rc
!= MBX_TIMEOUT
)
3691 mempool_free(mbox
, phba
->mbox_mem_pool
);
3693 if ((rc
== MBX_TIMEOUT
) || (rc
== MBX_NOT_FINISHED
))
3694 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
| LOG_VPORT
,
3695 "1836 Could not issue "
3696 "unreg_login(all_rpis) status %d\n", rc
);
3701 lpfc_unreg_default_rpis(struct lpfc_vport
*vport
)
3703 struct lpfc_hba
*phba
= vport
->phba
;
3707 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
3709 lpfc_unreg_did(phba
, vport
->vpi
, 0xffffffff, mbox
);
3710 mbox
->vport
= vport
;
3711 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
3712 mbox
->context1
= NULL
;
3713 rc
= lpfc_sli_issue_mbox_wait(phba
, mbox
, LPFC_MBOX_TMO
);
3714 if (rc
!= MBX_TIMEOUT
)
3715 mempool_free(mbox
, phba
->mbox_mem_pool
);
3717 if ((rc
== MBX_TIMEOUT
) || (rc
== MBX_NOT_FINISHED
))
3718 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_MBOX
| LOG_VPORT
,
3719 "1815 Could not issue "
3720 "unreg_did (default rpis) status %d\n",
3726 * Free resources associated with LPFC_NODELIST entry
3727 * so it can be freed.
3730 lpfc_cleanup_node(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
3732 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3733 struct lpfc_hba
*phba
= vport
->phba
;
3734 LPFC_MBOXQ_t
*mb
, *nextmb
;
3735 struct lpfc_dmabuf
*mp
;
3737 /* Cleanup node for NPort <nlp_DID> */
3738 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
3739 "0900 Cleanup node for NPort x%x "
3740 "Data: x%x x%x x%x\n",
3741 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
3742 ndlp
->nlp_state
, ndlp
->nlp_rpi
);
3743 if (NLP_CHK_FREE_REQ(ndlp
)) {
3744 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_NODE
,
3745 "0280 lpfc_cleanup_node: ndlp:x%p "
3746 "usgmap:x%x refcnt:%d\n",
3747 (void *)ndlp
, ndlp
->nlp_usg_map
,
3748 atomic_read(&ndlp
->kref
.refcount
));
3749 lpfc_dequeue_node(vport
, ndlp
);
3751 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_NODE
,
3752 "0281 lpfc_cleanup_node: ndlp:x%p "
3753 "usgmap:x%x refcnt:%d\n",
3754 (void *)ndlp
, ndlp
->nlp_usg_map
,
3755 atomic_read(&ndlp
->kref
.refcount
));
3756 lpfc_disable_node(vport
, ndlp
);
3759 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
3760 if ((mb
= phba
->sli
.mbox_active
)) {
3761 if ((mb
->u
.mb
.mbxCommand
== MBX_REG_LOGIN64
) &&
3762 (ndlp
== (struct lpfc_nodelist
*) mb
->context2
)) {
3763 mb
->context2
= NULL
;
3764 mb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
3768 spin_lock_irq(&phba
->hbalock
);
3769 list_for_each_entry_safe(mb
, nextmb
, &phba
->sli
.mboxq
, list
) {
3770 if ((mb
->u
.mb
.mbxCommand
== MBX_REG_LOGIN64
) &&
3771 (ndlp
== (struct lpfc_nodelist
*) mb
->context2
)) {
3772 mp
= (struct lpfc_dmabuf
*) (mb
->context1
);
3774 __lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
3777 list_del(&mb
->list
);
3778 mempool_free(mb
, phba
->mbox_mem_pool
);
3779 /* We shall not invoke the lpfc_nlp_put to decrement
3780 * the ndlp reference count as we are in the process
3781 * of lpfc_nlp_release.
3785 spin_unlock_irq(&phba
->hbalock
);
3787 lpfc_els_abort(phba
, ndlp
);
3789 spin_lock_irq(shost
->host_lock
);
3790 ndlp
->nlp_flag
&= ~NLP_DELAY_TMO
;
3791 spin_unlock_irq(shost
->host_lock
);
3793 ndlp
->nlp_last_elscmd
= 0;
3794 del_timer_sync(&ndlp
->nlp_delayfunc
);
3796 list_del_init(&ndlp
->els_retry_evt
.evt_listp
);
3797 list_del_init(&ndlp
->dev_loss_evt
.evt_listp
);
3799 lpfc_unreg_rpi(vport
, ndlp
);
3805 * Check to see if we can free the nlp back to the freelist.
3806 * If we are in the middle of using the nlp in the discovery state
3807 * machine, defer the free till we reach the end of the state machine.
3810 lpfc_nlp_remove(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
3812 struct lpfc_hba
*phba
= vport
->phba
;
3813 struct lpfc_rport_data
*rdata
;
3817 lpfc_cancel_retry_delay_tmo(vport
, ndlp
);
3818 if ((ndlp
->nlp_flag
& NLP_DEFER_RM
) &&
3819 !(ndlp
->nlp_flag
& NLP_RPI_VALID
)) {
3820 /* For this case we need to cleanup the default rpi
3821 * allocated by the firmware.
3823 if ((mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
))
3825 rc
= lpfc_reg_rpi(phba
, vport
->vpi
, ndlp
->nlp_DID
,
3826 (uint8_t *) &vport
->fc_sparam
, mbox
, 0);
3828 mempool_free(mbox
, phba
->mbox_mem_pool
);
3831 mbox
->mbox_flag
|= LPFC_MBX_IMED_UNREG
;
3832 mbox
->mbox_cmpl
= lpfc_mbx_cmpl_dflt_rpi
;
3833 mbox
->vport
= vport
;
3834 mbox
->context2
= NULL
;
3835 rc
=lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
3836 if (rc
== MBX_NOT_FINISHED
) {
3837 mempool_free(mbox
, phba
->mbox_mem_pool
);
3842 lpfc_cleanup_node(vport
, ndlp
);
3845 * We can get here with a non-NULL ndlp->rport because when we
3846 * unregister a rport we don't break the rport/node linkage. So if we
3847 * do, make sure we don't leaving any dangling pointers behind.
3850 rdata
= ndlp
->rport
->dd_data
;
3851 rdata
->pnode
= NULL
;
3857 lpfc_matchdid(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
3860 D_ID mydid
, ndlpdid
, matchdid
;
3862 if (did
== Bcast_DID
)
3865 /* First check for Direct match */
3866 if (ndlp
->nlp_DID
== did
)
3869 /* Next check for area/domain identically equals 0 match */
3870 mydid
.un
.word
= vport
->fc_myDID
;
3871 if ((mydid
.un
.b
.domain
== 0) && (mydid
.un
.b
.area
== 0)) {
3875 matchdid
.un
.word
= did
;
3876 ndlpdid
.un
.word
= ndlp
->nlp_DID
;
3877 if (matchdid
.un
.b
.id
== ndlpdid
.un
.b
.id
) {
3878 if ((mydid
.un
.b
.domain
== matchdid
.un
.b
.domain
) &&
3879 (mydid
.un
.b
.area
== matchdid
.un
.b
.area
)) {
3880 if ((ndlpdid
.un
.b
.domain
== 0) &&
3881 (ndlpdid
.un
.b
.area
== 0)) {
3882 if (ndlpdid
.un
.b
.id
)
3888 matchdid
.un
.word
= ndlp
->nlp_DID
;
3889 if ((mydid
.un
.b
.domain
== ndlpdid
.un
.b
.domain
) &&
3890 (mydid
.un
.b
.area
== ndlpdid
.un
.b
.area
)) {
3891 if ((matchdid
.un
.b
.domain
== 0) &&
3892 (matchdid
.un
.b
.area
== 0)) {
3893 if (matchdid
.un
.b
.id
)
3901 /* Search for a nodelist entry */
3902 static struct lpfc_nodelist
*
3903 __lpfc_findnode_did(struct lpfc_vport
*vport
, uint32_t did
)
3905 struct lpfc_nodelist
*ndlp
;
3908 list_for_each_entry(ndlp
, &vport
->fc_nodes
, nlp_listp
) {
3909 if (lpfc_matchdid(vport
, ndlp
, did
)) {
3910 data1
= (((uint32_t) ndlp
->nlp_state
<< 24) |
3911 ((uint32_t) ndlp
->nlp_xri
<< 16) |
3912 ((uint32_t) ndlp
->nlp_type
<< 8) |
3913 ((uint32_t) ndlp
->nlp_rpi
& 0xff));
3914 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
3915 "0929 FIND node DID "
3916 "Data: x%p x%x x%x x%x\n",
3917 ndlp
, ndlp
->nlp_DID
,
3918 ndlp
->nlp_flag
, data1
);
3923 /* FIND node did <did> NOT FOUND */
3924 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_NODE
,
3925 "0932 FIND node did x%x NOT FOUND.\n", did
);
3929 struct lpfc_nodelist
*
3930 lpfc_findnode_did(struct lpfc_vport
*vport
, uint32_t did
)
3932 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3933 struct lpfc_nodelist
*ndlp
;
3935 spin_lock_irq(shost
->host_lock
);
3936 ndlp
= __lpfc_findnode_did(vport
, did
);
3937 spin_unlock_irq(shost
->host_lock
);
3941 struct lpfc_nodelist
*
3942 lpfc_setup_disc_node(struct lpfc_vport
*vport
, uint32_t did
)
3944 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
3945 struct lpfc_nodelist
*ndlp
;
3947 ndlp
= lpfc_findnode_did(vport
, did
);
3949 if ((vport
->fc_flag
& FC_RSCN_MODE
) != 0 &&
3950 lpfc_rscn_payload_check(vport
, did
) == 0)
3952 ndlp
= (struct lpfc_nodelist
*)
3953 mempool_alloc(vport
->phba
->nlp_mem_pool
, GFP_KERNEL
);
3956 lpfc_nlp_init(vport
, ndlp
, did
);
3957 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_NPR_NODE
);
3958 spin_lock_irq(shost
->host_lock
);
3959 ndlp
->nlp_flag
|= NLP_NPR_2B_DISC
;
3960 spin_unlock_irq(shost
->host_lock
);
3962 } else if (!NLP_CHK_NODE_ACT(ndlp
)) {
3963 ndlp
= lpfc_enable_node(vport
, ndlp
, NLP_STE_NPR_NODE
);
3966 spin_lock_irq(shost
->host_lock
);
3967 ndlp
->nlp_flag
|= NLP_NPR_2B_DISC
;
3968 spin_unlock_irq(shost
->host_lock
);
3972 if ((vport
->fc_flag
& FC_RSCN_MODE
) &&
3973 !(vport
->fc_flag
& FC_NDISC_ACTIVE
)) {
3974 if (lpfc_rscn_payload_check(vport
, did
)) {
3975 /* If we've already recieved a PLOGI from this NPort
3976 * we don't need to try to discover it again.
3978 if (ndlp
->nlp_flag
& NLP_RCV_PLOGI
)
3981 /* Since this node is marked for discovery,
3982 * delay timeout is not needed.
3984 lpfc_cancel_retry_delay_tmo(vport
, ndlp
);
3985 spin_lock_irq(shost
->host_lock
);
3986 ndlp
->nlp_flag
|= NLP_NPR_2B_DISC
;
3987 spin_unlock_irq(shost
->host_lock
);
3991 /* If we've already recieved a PLOGI from this NPort,
3992 * or we are already in the process of discovery on it,
3993 * we don't need to try to discover it again.
3995 if (ndlp
->nlp_state
== NLP_STE_ADISC_ISSUE
||
3996 ndlp
->nlp_state
== NLP_STE_PLOGI_ISSUE
||
3997 ndlp
->nlp_flag
& NLP_RCV_PLOGI
)
3999 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_NPR_NODE
);
4000 spin_lock_irq(shost
->host_lock
);
4001 ndlp
->nlp_flag
|= NLP_NPR_2B_DISC
;
4002 spin_unlock_irq(shost
->host_lock
);
4007 /* Build a list of nodes to discover based on the loopmap */
4009 lpfc_disc_list_loopmap(struct lpfc_vport
*vport
)
4011 struct lpfc_hba
*phba
= vport
->phba
;
4013 uint32_t alpa
, index
;
4015 if (!lpfc_is_link_up(phba
))
4018 if (phba
->fc_topology
!= TOPOLOGY_LOOP
)
4021 /* Check for loop map present or not */
4022 if (phba
->alpa_map
[0]) {
4023 for (j
= 1; j
<= phba
->alpa_map
[0]; j
++) {
4024 alpa
= phba
->alpa_map
[j
];
4025 if (((vport
->fc_myDID
& 0xff) == alpa
) || (alpa
== 0))
4027 lpfc_setup_disc_node(vport
, alpa
);
4030 /* No alpamap, so try all alpa's */
4031 for (j
= 0; j
< FC_MAXLOOP
; j
++) {
4032 /* If cfg_scan_down is set, start from highest
4033 * ALPA (0xef) to lowest (0x1).
4035 if (vport
->cfg_scan_down
)
4038 index
= FC_MAXLOOP
- j
- 1;
4039 alpa
= lpfcAlpaArray
[index
];
4040 if ((vport
->fc_myDID
& 0xff) == alpa
)
4042 lpfc_setup_disc_node(vport
, alpa
);
4049 lpfc_issue_clear_la(struct lpfc_hba
*phba
, struct lpfc_vport
*vport
)
4052 struct lpfc_sli
*psli
= &phba
->sli
;
4053 struct lpfc_sli_ring
*extra_ring
= &psli
->ring
[psli
->extra_ring
];
4054 struct lpfc_sli_ring
*fcp_ring
= &psli
->ring
[psli
->fcp_ring
];
4055 struct lpfc_sli_ring
*next_ring
= &psli
->ring
[psli
->next_ring
];
4059 * if it's not a physical port or if we already send
4060 * clear_la then don't send it.
4062 if ((phba
->link_state
>= LPFC_CLEAR_LA
) ||
4063 (vport
->port_type
!= LPFC_PHYSICAL_PORT
) ||
4064 (phba
->sli_rev
== LPFC_SLI_REV4
))
4067 /* Link up discovery */
4068 if ((mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
)) != NULL
) {
4069 phba
->link_state
= LPFC_CLEAR_LA
;
4070 lpfc_clear_la(phba
, mbox
);
4071 mbox
->mbox_cmpl
= lpfc_mbx_cmpl_clear_la
;
4072 mbox
->vport
= vport
;
4073 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
4074 if (rc
== MBX_NOT_FINISHED
) {
4075 mempool_free(mbox
, phba
->mbox_mem_pool
);
4076 lpfc_disc_flush_list(vport
);
4077 extra_ring
->flag
&= ~LPFC_STOP_IOCB_EVENT
;
4078 fcp_ring
->flag
&= ~LPFC_STOP_IOCB_EVENT
;
4079 next_ring
->flag
&= ~LPFC_STOP_IOCB_EVENT
;
4080 phba
->link_state
= LPFC_HBA_ERROR
;
4085 /* Reg_vpi to tell firmware to resume normal operations */
4087 lpfc_issue_reg_vpi(struct lpfc_hba
*phba
, struct lpfc_vport
*vport
)
4089 LPFC_MBOXQ_t
*regvpimbox
;
4091 regvpimbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
4093 lpfc_reg_vpi(vport
, regvpimbox
);
4094 regvpimbox
->mbox_cmpl
= lpfc_mbx_cmpl_reg_vpi
;
4095 regvpimbox
->vport
= vport
;
4096 if (lpfc_sli_issue_mbox(phba
, regvpimbox
, MBX_NOWAIT
)
4097 == MBX_NOT_FINISHED
) {
4098 mempool_free(regvpimbox
, phba
->mbox_mem_pool
);
4103 /* Start Link up / RSCN discovery on NPR nodes */
4105 lpfc_disc_start(struct lpfc_vport
*vport
)
4107 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4108 struct lpfc_hba
*phba
= vport
->phba
;
4110 uint32_t clear_la_pending
;
4113 if (!lpfc_is_link_up(phba
))
4116 if (phba
->link_state
== LPFC_CLEAR_LA
)
4117 clear_la_pending
= 1;
4119 clear_la_pending
= 0;
4121 if (vport
->port_state
< LPFC_VPORT_READY
)
4122 vport
->port_state
= LPFC_DISC_AUTH
;
4124 lpfc_set_disctmo(vport
);
4126 if (vport
->fc_prevDID
== vport
->fc_myDID
)
4131 vport
->fc_prevDID
= vport
->fc_myDID
;
4132 vport
->num_disc_nodes
= 0;
4134 /* Start Discovery state <hba_state> */
4135 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_DISCOVERY
,
4136 "0202 Start Discovery hba state x%x "
4137 "Data: x%x x%x x%x\n",
4138 vport
->port_state
, vport
->fc_flag
, vport
->fc_plogi_cnt
,
4139 vport
->fc_adisc_cnt
);
4141 /* First do ADISCs - if any */
4142 num_sent
= lpfc_els_disc_adisc(vport
);
4148 * For SLI3, cmpl_reg_vpi will set port_state to READY, and
4149 * continue discovery.
4151 if ((phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
) &&
4152 !(vport
->fc_flag
& FC_PT2PT
) &&
4153 !(vport
->fc_flag
& FC_RSCN_MODE
) &&
4154 (phba
->sli_rev
< LPFC_SLI_REV4
)) {
4155 lpfc_issue_reg_vpi(phba
, vport
);
4160 * For SLI2, we need to set port_state to READY and continue
4163 if (vport
->port_state
< LPFC_VPORT_READY
&& !clear_la_pending
) {
4164 /* If we get here, there is nothing to ADISC */
4165 if (vport
->port_type
== LPFC_PHYSICAL_PORT
)
4166 lpfc_issue_clear_la(phba
, vport
);
4168 if (!(vport
->fc_flag
& FC_ABORT_DISCOVERY
)) {
4169 vport
->num_disc_nodes
= 0;
4170 /* go thru NPR nodes and issue ELS PLOGIs */
4171 if (vport
->fc_npr_cnt
)
4172 lpfc_els_disc_plogi(vport
);
4174 if (!vport
->num_disc_nodes
) {
4175 spin_lock_irq(shost
->host_lock
);
4176 vport
->fc_flag
&= ~FC_NDISC_ACTIVE
;
4177 spin_unlock_irq(shost
->host_lock
);
4178 lpfc_can_disctmo(vport
);
4181 vport
->port_state
= LPFC_VPORT_READY
;
4183 /* Next do PLOGIs - if any */
4184 num_sent
= lpfc_els_disc_plogi(vport
);
4189 if (vport
->fc_flag
& FC_RSCN_MODE
) {
4190 /* Check to see if more RSCNs came in while we
4191 * were processing this one.
4193 if ((vport
->fc_rscn_id_cnt
== 0) &&
4194 (!(vport
->fc_flag
& FC_RSCN_DISCOVERY
))) {
4195 spin_lock_irq(shost
->host_lock
);
4196 vport
->fc_flag
&= ~FC_RSCN_MODE
;
4197 spin_unlock_irq(shost
->host_lock
);
4198 lpfc_can_disctmo(vport
);
4200 lpfc_els_handle_rscn(vport
);
4207 * Ignore completion for all IOCBs on tx and txcmpl queue for ELS
4208 * ring the match the sppecified nodelist.
4211 lpfc_free_tx(struct lpfc_hba
*phba
, struct lpfc_nodelist
*ndlp
)
4213 LIST_HEAD(completions
);
4214 struct lpfc_sli
*psli
;
4216 struct lpfc_iocbq
*iocb
, *next_iocb
;
4217 struct lpfc_sli_ring
*pring
;
4220 pring
= &psli
->ring
[LPFC_ELS_RING
];
4222 /* Error matching iocb on txq or txcmplq
4223 * First check the txq.
4225 spin_lock_irq(&phba
->hbalock
);
4226 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txq
, list
) {
4227 if (iocb
->context1
!= ndlp
) {
4231 if ((icmd
->ulpCommand
== CMD_ELS_REQUEST64_CR
) ||
4232 (icmd
->ulpCommand
== CMD_XMIT_ELS_RSP64_CX
)) {
4234 list_move_tail(&iocb
->list
, &completions
);
4239 /* Next check the txcmplq */
4240 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txcmplq
, list
) {
4241 if (iocb
->context1
!= ndlp
) {
4245 if (icmd
->ulpCommand
== CMD_ELS_REQUEST64_CR
||
4246 icmd
->ulpCommand
== CMD_XMIT_ELS_RSP64_CX
) {
4247 lpfc_sli_issue_abort_iotag(phba
, pring
, iocb
);
4250 spin_unlock_irq(&phba
->hbalock
);
4252 /* Cancel all the IOCBs from the completions list */
4253 lpfc_sli_cancel_iocbs(phba
, &completions
, IOSTAT_LOCAL_REJECT
,
4258 lpfc_disc_flush_list(struct lpfc_vport
*vport
)
4260 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
4261 struct lpfc_hba
*phba
= vport
->phba
;
4263 if (vport
->fc_plogi_cnt
|| vport
->fc_adisc_cnt
) {
4264 list_for_each_entry_safe(ndlp
, next_ndlp
, &vport
->fc_nodes
,
4266 if (!NLP_CHK_NODE_ACT(ndlp
))
4268 if (ndlp
->nlp_state
== NLP_STE_PLOGI_ISSUE
||
4269 ndlp
->nlp_state
== NLP_STE_ADISC_ISSUE
) {
4270 lpfc_free_tx(phba
, ndlp
);
4277 lpfc_cleanup_discovery_resources(struct lpfc_vport
*vport
)
4279 lpfc_els_flush_rscn(vport
);
4280 lpfc_els_flush_cmd(vport
);
4281 lpfc_disc_flush_list(vport
);
4284 /*****************************************************************************/
4286 * NAME: lpfc_disc_timeout
4288 * FUNCTION: Fibre Channel driver discovery timeout routine.
4290 * EXECUTION ENVIRONMENT: interrupt only
4298 /*****************************************************************************/
4300 lpfc_disc_timeout(unsigned long ptr
)
4302 struct lpfc_vport
*vport
= (struct lpfc_vport
*) ptr
;
4303 struct lpfc_hba
*phba
= vport
->phba
;
4304 uint32_t tmo_posted
;
4305 unsigned long flags
= 0;
4307 if (unlikely(!phba
))
4310 spin_lock_irqsave(&vport
->work_port_lock
, flags
);
4311 tmo_posted
= vport
->work_port_events
& WORKER_DISC_TMO
;
4313 vport
->work_port_events
|= WORKER_DISC_TMO
;
4314 spin_unlock_irqrestore(&vport
->work_port_lock
, flags
);
4317 lpfc_worker_wake_up(phba
);
4322 lpfc_disc_timeout_handler(struct lpfc_vport
*vport
)
4324 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4325 struct lpfc_hba
*phba
= vport
->phba
;
4326 struct lpfc_sli
*psli
= &phba
->sli
;
4327 struct lpfc_nodelist
*ndlp
, *next_ndlp
;
4328 LPFC_MBOXQ_t
*initlinkmbox
;
4329 int rc
, clrlaerr
= 0;
4331 if (!(vport
->fc_flag
& FC_DISC_TMO
))
4334 spin_lock_irq(shost
->host_lock
);
4335 vport
->fc_flag
&= ~FC_DISC_TMO
;
4336 spin_unlock_irq(shost
->host_lock
);
4338 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_ELS_CMD
,
4339 "disc timeout: state:x%x rtry:x%x flg:x%x",
4340 vport
->port_state
, vport
->fc_ns_retry
, vport
->fc_flag
);
4342 switch (vport
->port_state
) {
4344 case LPFC_LOCAL_CFG_LINK
:
4345 /* port_state is identically LPFC_LOCAL_CFG_LINK while waiting for
4349 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_DISCOVERY
,
4350 "0221 FAN timeout\n");
4351 /* Start discovery by sending FLOGI, clean up old rpis */
4352 list_for_each_entry_safe(ndlp
, next_ndlp
, &vport
->fc_nodes
,
4354 if (!NLP_CHK_NODE_ACT(ndlp
))
4356 if (ndlp
->nlp_state
!= NLP_STE_NPR_NODE
)
4358 if (ndlp
->nlp_type
& NLP_FABRIC
) {
4359 /* Clean up the ndlp on Fabric connections */
4360 lpfc_drop_node(vport
, ndlp
);
4362 } else if (!(ndlp
->nlp_flag
& NLP_NPR_ADISC
)) {
4363 /* Fail outstanding IO now since device
4364 * is marked for PLOGI.
4366 lpfc_unreg_rpi(vport
, ndlp
);
4369 if (vport
->port_state
!= LPFC_FLOGI
) {
4370 lpfc_initial_flogi(vport
);
4377 /* port_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */
4378 /* Initial FLOGI timeout */
4379 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
4380 "0222 Initial %s timeout\n",
4381 vport
->vpi
? "FDISC" : "FLOGI");
4383 /* Assume no Fabric and go on with discovery.
4384 * Check for outstanding ELS FLOGI to abort.
4387 /* FLOGI failed, so just use loop map to make discovery list */
4388 lpfc_disc_list_loopmap(vport
);
4390 /* Start discovery */
4391 lpfc_disc_start(vport
);
4394 case LPFC_FABRIC_CFG_LINK
:
4395 /* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for
4397 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
4398 "0223 Timeout while waiting for "
4399 "NameServer login\n");
4400 /* Next look for NameServer ndlp */
4401 ndlp
= lpfc_findnode_did(vport
, NameServer_DID
);
4402 if (ndlp
&& NLP_CHK_NODE_ACT(ndlp
))
4403 lpfc_els_abort(phba
, ndlp
);
4405 /* ReStart discovery */
4409 /* Check for wait for NameServer Rsp timeout */
4410 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
4411 "0224 NameServer Query timeout "
4413 vport
->fc_ns_retry
, LPFC_MAX_NS_RETRY
);
4415 if (vport
->fc_ns_retry
< LPFC_MAX_NS_RETRY
) {
4416 /* Try it one more time */
4417 vport
->fc_ns_retry
++;
4418 rc
= lpfc_ns_cmd(vport
, SLI_CTNS_GID_FT
,
4419 vport
->fc_ns_retry
, 0);
4423 vport
->fc_ns_retry
= 0;
4427 * Discovery is over.
4428 * set port_state to PORT_READY if SLI2.
4429 * cmpl_reg_vpi will set port_state to READY for SLI3.
4431 if (phba
->sli_rev
< LPFC_SLI_REV4
) {
4432 if (phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
)
4433 lpfc_issue_reg_vpi(phba
, vport
);
4434 else { /* NPIV Not enabled */
4435 lpfc_issue_clear_la(phba
, vport
);
4436 vport
->port_state
= LPFC_VPORT_READY
;
4440 /* Setup and issue mailbox INITIALIZE LINK command */
4441 initlinkmbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
4442 if (!initlinkmbox
) {
4443 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
4444 "0206 Device Discovery "
4445 "completion error\n");
4446 phba
->link_state
= LPFC_HBA_ERROR
;
4450 lpfc_linkdown(phba
);
4451 lpfc_init_link(phba
, initlinkmbox
, phba
->cfg_topology
,
4452 phba
->cfg_link_speed
);
4453 initlinkmbox
->u
.mb
.un
.varInitLnk
.lipsr_AL_PA
= 0;
4454 initlinkmbox
->vport
= vport
;
4455 initlinkmbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
4456 rc
= lpfc_sli_issue_mbox(phba
, initlinkmbox
, MBX_NOWAIT
);
4457 lpfc_set_loopback_flag(phba
);
4458 if (rc
== MBX_NOT_FINISHED
)
4459 mempool_free(initlinkmbox
, phba
->mbox_mem_pool
);
4463 case LPFC_DISC_AUTH
:
4464 /* Node Authentication timeout */
4465 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
4466 "0227 Node Authentication timeout\n");
4467 lpfc_disc_flush_list(vport
);
4470 * set port_state to PORT_READY if SLI2.
4471 * cmpl_reg_vpi will set port_state to READY for SLI3.
4473 if (phba
->sli_rev
< LPFC_SLI_REV4
) {
4474 if (phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
)
4475 lpfc_issue_reg_vpi(phba
, vport
);
4476 else { /* NPIV Not enabled */
4477 lpfc_issue_clear_la(phba
, vport
);
4478 vport
->port_state
= LPFC_VPORT_READY
;
4483 case LPFC_VPORT_READY
:
4484 if (vport
->fc_flag
& FC_RSCN_MODE
) {
4485 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
4486 "0231 RSCN timeout Data: x%x "
4488 vport
->fc_ns_retry
, LPFC_MAX_NS_RETRY
);
4490 /* Cleanup any outstanding ELS commands */
4491 lpfc_els_flush_cmd(vport
);
4493 lpfc_els_flush_rscn(vport
);
4494 lpfc_disc_flush_list(vport
);
4499 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
4500 "0273 Unexpected discovery timeout, "
4501 "vport State x%x\n", vport
->port_state
);
4505 switch (phba
->link_state
) {
4507 /* CLEAR LA timeout */
4508 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
4509 "0228 CLEAR LA timeout\n");
4514 lpfc_issue_clear_la(phba
, vport
);
4516 case LPFC_LINK_UNKNOWN
:
4517 case LPFC_WARM_START
:
4518 case LPFC_INIT_START
:
4519 case LPFC_INIT_MBX_CMDS
:
4520 case LPFC_LINK_DOWN
:
4521 case LPFC_HBA_ERROR
:
4522 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_DISCOVERY
,
4523 "0230 Unexpected timeout, hba link "
4524 "state x%x\n", phba
->link_state
);
4528 case LPFC_HBA_READY
:
4533 lpfc_disc_flush_list(vport
);
4534 psli
->ring
[(psli
->extra_ring
)].flag
&= ~LPFC_STOP_IOCB_EVENT
;
4535 psli
->ring
[(psli
->fcp_ring
)].flag
&= ~LPFC_STOP_IOCB_EVENT
;
4536 psli
->ring
[(psli
->next_ring
)].flag
&= ~LPFC_STOP_IOCB_EVENT
;
4537 vport
->port_state
= LPFC_VPORT_READY
;
4544 * This routine handles processing a NameServer REG_LOGIN mailbox
4545 * command upon completion. It is setup in the LPFC_MBOXQ
4546 * as the completion routine when the command is
4547 * handed off to the SLI layer.
4550 lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
4552 MAILBOX_t
*mb
= &pmb
->u
.mb
;
4553 struct lpfc_dmabuf
*mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
4554 struct lpfc_nodelist
*ndlp
= (struct lpfc_nodelist
*) pmb
->context2
;
4555 struct lpfc_vport
*vport
= pmb
->vport
;
4557 pmb
->context1
= NULL
;
4559 ndlp
->nlp_rpi
= mb
->un
.varWords
[0];
4560 ndlp
->nlp_flag
|= NLP_RPI_VALID
;
4561 ndlp
->nlp_type
|= NLP_FABRIC
;
4562 lpfc_nlp_set_state(vport
, ndlp
, NLP_STE_UNMAPPED_NODE
);
4565 * Start issuing Fabric-Device Management Interface (FDMI) command to
4566 * 0xfffffa (FDMI well known port) or Delay issuing FDMI command if
4567 * fdmi-on=2 (supporting RPA/hostnmae)
4570 if (vport
->cfg_fdmi_on
== 1)
4571 lpfc_fdmi_cmd(vport
, ndlp
, SLI_MGMT_DHBA
);
4573 mod_timer(&vport
->fc_fdmitmo
, jiffies
+ HZ
* 60);
4575 /* decrement the node reference count held for this callback
4579 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
4581 mempool_free(pmb
, phba
->mbox_mem_pool
);
4587 lpfc_filter_by_rpi(struct lpfc_nodelist
*ndlp
, void *param
)
4589 uint16_t *rpi
= param
;
4591 return ndlp
->nlp_rpi
== *rpi
;
4595 lpfc_filter_by_wwpn(struct lpfc_nodelist
*ndlp
, void *param
)
4597 return memcmp(&ndlp
->nlp_portname
, param
,
4598 sizeof(ndlp
->nlp_portname
)) == 0;
4601 static struct lpfc_nodelist
*
4602 __lpfc_find_node(struct lpfc_vport
*vport
, node_filter filter
, void *param
)
4604 struct lpfc_nodelist
*ndlp
;
4606 list_for_each_entry(ndlp
, &vport
->fc_nodes
, nlp_listp
) {
4607 if (filter(ndlp
, param
))
4614 * This routine looks up the ndlp lists for the given RPI. If rpi found it
4615 * returns the node list element pointer else return NULL.
4617 struct lpfc_nodelist
*
4618 __lpfc_findnode_rpi(struct lpfc_vport
*vport
, uint16_t rpi
)
4620 return __lpfc_find_node(vport
, lpfc_filter_by_rpi
, &rpi
);
4624 * This routine looks up the ndlp lists for the given WWPN. If WWPN found it
4625 * returns the node element list pointer else return NULL.
4627 struct lpfc_nodelist
*
4628 lpfc_findnode_wwpn(struct lpfc_vport
*vport
, struct lpfc_name
*wwpn
)
4630 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
4631 struct lpfc_nodelist
*ndlp
;
4633 spin_lock_irq(shost
->host_lock
);
4634 ndlp
= __lpfc_find_node(vport
, lpfc_filter_by_wwpn
, wwpn
);
4635 spin_unlock_irq(shost
->host_lock
);
4640 lpfc_nlp_init(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
,
4643 memset(ndlp
, 0, sizeof (struct lpfc_nodelist
));
4645 lpfc_initialize_node(vport
, ndlp
, did
);
4646 INIT_LIST_HEAD(&ndlp
->nlp_listp
);
4648 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_NODE
,
4649 "node init: did:x%x",
4650 ndlp
->nlp_DID
, 0, 0);
4655 /* This routine releases all resources associated with a specifc NPort's ndlp
4656 * and mempool_free's the nodelist.
4659 lpfc_nlp_release(struct kref
*kref
)
4661 struct lpfc_hba
*phba
;
4662 unsigned long flags
;
4663 struct lpfc_nodelist
*ndlp
= container_of(kref
, struct lpfc_nodelist
,
4666 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_NODE
,
4667 "node release: did:x%x flg:x%x type:x%x",
4668 ndlp
->nlp_DID
, ndlp
->nlp_flag
, ndlp
->nlp_type
);
4670 lpfc_printf_vlog(ndlp
->vport
, KERN_INFO
, LOG_NODE
,
4671 "0279 lpfc_nlp_release: ndlp:x%p "
4672 "usgmap:x%x refcnt:%d\n",
4673 (void *)ndlp
, ndlp
->nlp_usg_map
,
4674 atomic_read(&ndlp
->kref
.refcount
));
4676 /* remove ndlp from action. */
4677 lpfc_nlp_remove(ndlp
->vport
, ndlp
);
4679 /* clear the ndlp active flag for all release cases */
4681 spin_lock_irqsave(&phba
->ndlp_lock
, flags
);
4682 NLP_CLR_NODE_ACT(ndlp
);
4683 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
4685 /* free ndlp memory for final ndlp release */
4686 if (NLP_CHK_FREE_REQ(ndlp
)) {
4687 kfree(ndlp
->lat_data
);
4688 mempool_free(ndlp
, ndlp
->phba
->nlp_mem_pool
);
4692 /* This routine bumps the reference count for a ndlp structure to ensure
4693 * that one discovery thread won't free a ndlp while another discovery thread
4696 struct lpfc_nodelist
*
4697 lpfc_nlp_get(struct lpfc_nodelist
*ndlp
)
4699 struct lpfc_hba
*phba
;
4700 unsigned long flags
;
4703 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_NODE
,
4704 "node get: did:x%x flg:x%x refcnt:x%x",
4705 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
4706 atomic_read(&ndlp
->kref
.refcount
));
4707 /* The check of ndlp usage to prevent incrementing the
4708 * ndlp reference count that is in the process of being
4712 spin_lock_irqsave(&phba
->ndlp_lock
, flags
);
4713 if (!NLP_CHK_NODE_ACT(ndlp
) || NLP_CHK_FREE_ACK(ndlp
)) {
4714 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
4715 lpfc_printf_vlog(ndlp
->vport
, KERN_WARNING
, LOG_NODE
,
4716 "0276 lpfc_nlp_get: ndlp:x%p "
4717 "usgmap:x%x refcnt:%d\n",
4718 (void *)ndlp
, ndlp
->nlp_usg_map
,
4719 atomic_read(&ndlp
->kref
.refcount
));
4722 kref_get(&ndlp
->kref
);
4723 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
4728 /* This routine decrements the reference count for a ndlp structure. If the
4729 * count goes to 0, this indicates the the associated nodelist should be
4730 * freed. Returning 1 indicates the ndlp resource has been released; on the
4731 * other hand, returning 0 indicates the ndlp resource has not been released
4735 lpfc_nlp_put(struct lpfc_nodelist
*ndlp
)
4737 struct lpfc_hba
*phba
;
4738 unsigned long flags
;
4743 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_NODE
,
4744 "node put: did:x%x flg:x%x refcnt:x%x",
4745 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
4746 atomic_read(&ndlp
->kref
.refcount
));
4748 spin_lock_irqsave(&phba
->ndlp_lock
, flags
);
4749 /* Check the ndlp memory free acknowledge flag to avoid the
4750 * possible race condition that kref_put got invoked again
4751 * after previous one has done ndlp memory free.
4753 if (NLP_CHK_FREE_ACK(ndlp
)) {
4754 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
4755 lpfc_printf_vlog(ndlp
->vport
, KERN_WARNING
, LOG_NODE
,
4756 "0274 lpfc_nlp_put: ndlp:x%p "
4757 "usgmap:x%x refcnt:%d\n",
4758 (void *)ndlp
, ndlp
->nlp_usg_map
,
4759 atomic_read(&ndlp
->kref
.refcount
));
4762 /* Check the ndlp inactivate log flag to avoid the possible
4763 * race condition that kref_put got invoked again after ndlp
4764 * is already in inactivating state.
4766 if (NLP_CHK_IACT_REQ(ndlp
)) {
4767 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
4768 lpfc_printf_vlog(ndlp
->vport
, KERN_WARNING
, LOG_NODE
,
4769 "0275 lpfc_nlp_put: ndlp:x%p "
4770 "usgmap:x%x refcnt:%d\n",
4771 (void *)ndlp
, ndlp
->nlp_usg_map
,
4772 atomic_read(&ndlp
->kref
.refcount
));
4775 /* For last put, mark the ndlp usage flags to make sure no
4776 * other kref_get and kref_put on the same ndlp shall get
4777 * in between the process when the final kref_put has been
4778 * invoked on this ndlp.
4780 if (atomic_read(&ndlp
->kref
.refcount
) == 1) {
4781 /* Indicate ndlp is put to inactive state. */
4782 NLP_SET_IACT_REQ(ndlp
);
4783 /* Acknowledge ndlp memory free has been seen. */
4784 if (NLP_CHK_FREE_REQ(ndlp
))
4785 NLP_SET_FREE_ACK(ndlp
);
4787 spin_unlock_irqrestore(&phba
->ndlp_lock
, flags
);
4788 /* Note, the kref_put returns 1 when decrementing a reference
4789 * count that was 1, it invokes the release callback function,
4790 * but it still left the reference count as 1 (not actually
4791 * performs the last decrementation). Otherwise, it actually
4792 * decrements the reference count and returns 0.
4794 return kref_put(&ndlp
->kref
, lpfc_nlp_release
);
4797 /* This routine free's the specified nodelist if it is not in use
4798 * by any other discovery thread. This routine returns 1 if the
4799 * ndlp has been freed. A return value of 0 indicates the ndlp is
4800 * not yet been released.
4803 lpfc_nlp_not_used(struct lpfc_nodelist
*ndlp
)
4805 lpfc_debugfs_disc_trc(ndlp
->vport
, LPFC_DISC_TRC_NODE
,
4806 "node not used: did:x%x flg:x%x refcnt:x%x",
4807 ndlp
->nlp_DID
, ndlp
->nlp_flag
,
4808 atomic_read(&ndlp
->kref
.refcount
));
4809 if (atomic_read(&ndlp
->kref
.refcount
) == 1)
4810 if (lpfc_nlp_put(ndlp
))
4816 * lpfc_fcf_inuse - Check if FCF can be unregistered.
4817 * @phba: Pointer to hba context object.
4819 * This function iterate through all FC nodes associated
4820 * will all vports to check if there is any node with
4821 * fc_rports associated with it. If there is an fc_rport
4822 * associated with the node, then the node is either in
4823 * discovered state or its devloss_timer is pending.
4826 lpfc_fcf_inuse(struct lpfc_hba
*phba
)
4828 struct lpfc_vport
**vports
;
4830 struct lpfc_nodelist
*ndlp
;
4831 struct Scsi_Host
*shost
;
4833 vports
= lpfc_create_vport_work_array(phba
);
4835 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
4836 shost
= lpfc_shost_from_vport(vports
[i
]);
4837 spin_lock_irq(shost
->host_lock
);
4838 list_for_each_entry(ndlp
, &vports
[i
]->fc_nodes
, nlp_listp
) {
4839 if (NLP_CHK_NODE_ACT(ndlp
) && ndlp
->rport
&&
4840 (ndlp
->rport
->roles
& FC_RPORT_ROLE_FCP_TARGET
)) {
4842 spin_unlock_irq(shost
->host_lock
);
4845 lpfc_printf_log(phba
, KERN_INFO
, LOG_ELS
,
4846 "2624 RPI %x DID %x flg %x still "
4848 ndlp
->nlp_rpi
, ndlp
->nlp_DID
,
4850 if (ndlp
->nlp_flag
& NLP_RPI_VALID
)
4854 spin_unlock_irq(shost
->host_lock
);
4857 lpfc_destroy_vport_work_array(phba
, vports
);
4862 * lpfc_unregister_vfi_cmpl - Completion handler for unreg vfi.
4863 * @phba: Pointer to hba context object.
4864 * @mboxq: Pointer to mailbox object.
4866 * This function frees memory associated with the mailbox command.
4869 lpfc_unregister_vfi_cmpl(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
4871 struct lpfc_vport
*vport
= mboxq
->vport
;
4873 if (mboxq
->u
.mb
.mbxStatus
) {
4874 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
|LOG_MBOX
,
4875 "2555 UNREG_VFI mbxStatus error x%x "
4877 mboxq
->u
.mb
.mbxStatus
, vport
->port_state
);
4879 mempool_free(mboxq
, phba
->mbox_mem_pool
);
4884 * lpfc_unregister_fcfi_cmpl - Completion handler for unreg fcfi.
4885 * @phba: Pointer to hba context object.
4886 * @mboxq: Pointer to mailbox object.
4888 * This function frees memory associated with the mailbox command.
4891 lpfc_unregister_fcfi_cmpl(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
4893 struct lpfc_vport
*vport
= mboxq
->vport
;
4895 if (mboxq
->u
.mb
.mbxStatus
) {
4896 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
|LOG_MBOX
,
4897 "2550 UNREG_FCFI mbxStatus error x%x "
4899 mboxq
->u
.mb
.mbxStatus
, vport
->port_state
);
4901 mempool_free(mboxq
, phba
->mbox_mem_pool
);
4906 * lpfc_unregister_fcf_prep - Unregister fcf record preparation
4907 * @phba: Pointer to hba context object.
4909 * This function prepare the HBA for unregistering the currently registered
4910 * FCF from the HBA. It performs unregistering, in order, RPIs, VPIs, and
4914 lpfc_unregister_fcf_prep(struct lpfc_hba
*phba
)
4917 struct lpfc_vport
**vports
;
4918 struct lpfc_nodelist
*ndlp
;
4919 struct Scsi_Host
*shost
;
4922 /* Unregister RPIs */
4923 if (lpfc_fcf_inuse(phba
))
4924 lpfc_unreg_hba_rpis(phba
);
4926 /* At this point, all discovery is aborted */
4927 phba
->pport
->port_state
= LPFC_VPORT_UNKNOWN
;
4929 /* Unregister VPIs */
4930 vports
= lpfc_create_vport_work_array(phba
);
4931 if (vports
&& (phba
->sli3_options
& LPFC_SLI3_NPIV_ENABLED
))
4932 for (i
= 0; i
<= phba
->max_vports
&& vports
[i
] != NULL
; i
++) {
4933 /* Stop FLOGI/FDISC retries */
4934 ndlp
= lpfc_findnode_did(vports
[i
], Fabric_DID
);
4936 lpfc_cancel_retry_delay_tmo(vports
[i
], ndlp
);
4937 lpfc_mbx_unreg_vpi(vports
[i
]);
4938 shost
= lpfc_shost_from_vport(vports
[i
]);
4939 spin_lock_irq(shost
->host_lock
);
4940 vports
[i
]->fc_flag
|= FC_VPORT_NEEDS_INIT_VPI
;
4941 vports
[i
]->vpi_state
&= ~LPFC_VPI_REGISTERED
;
4942 spin_unlock_irq(shost
->host_lock
);
4944 lpfc_destroy_vport_work_array(phba
, vports
);
4946 /* Cleanup any outstanding ELS commands */
4947 lpfc_els_flush_all_cmd(phba
);
4949 /* Unregister VFI */
4950 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
4952 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
|LOG_MBOX
,
4953 "2556 UNREG_VFI mbox allocation failed"
4954 "HBA state x%x\n", phba
->pport
->port_state
);
4958 lpfc_unreg_vfi(mbox
, phba
->pport
);
4959 mbox
->vport
= phba
->pport
;
4960 mbox
->mbox_cmpl
= lpfc_unregister_vfi_cmpl
;
4962 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
4963 if (rc
== MBX_NOT_FINISHED
) {
4964 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
|LOG_MBOX
,
4965 "2557 UNREG_VFI issue mbox failed rc x%x "
4967 rc
, phba
->pport
->port_state
);
4968 mempool_free(mbox
, phba
->mbox_mem_pool
);
4972 shost
= lpfc_shost_from_vport(phba
->pport
);
4973 spin_lock_irq(shost
->host_lock
);
4974 phba
->pport
->fc_flag
&= ~FC_VFI_REGISTERED
;
4975 spin_unlock_irq(shost
->host_lock
);
4981 * lpfc_sli4_unregister_fcf - Unregister currently registered FCF record
4982 * @phba: Pointer to hba context object.
4984 * This function issues synchronous unregister FCF mailbox command to HBA to
4985 * unregister the currently registered FCF record. The driver does not reset
4986 * the driver FCF usage state flags.
4988 * Return 0 if successfully issued, none-zero otherwise.
4991 lpfc_sli4_unregister_fcf(struct lpfc_hba
*phba
)
4996 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
4998 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
|LOG_MBOX
,
4999 "2551 UNREG_FCFI mbox allocation failed"
5000 "HBA state x%x\n", phba
->pport
->port_state
);
5003 lpfc_unreg_fcfi(mbox
, phba
->fcf
.fcfi
);
5004 mbox
->vport
= phba
->pport
;
5005 mbox
->mbox_cmpl
= lpfc_unregister_fcfi_cmpl
;
5006 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
5008 if (rc
== MBX_NOT_FINISHED
) {
5009 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
5010 "2552 Unregister FCFI command failed rc x%x "
5012 rc
, phba
->pport
->port_state
);
5019 * lpfc_unregister_fcf_rescan - Unregister currently registered fcf and rescan
5020 * @phba: Pointer to hba context object.
5022 * This function unregisters the currently reigstered FCF. This function
5023 * also tries to find another FCF for discovery by rescan the HBA FCF table.
5026 lpfc_unregister_fcf_rescan(struct lpfc_hba
*phba
)
5030 /* Preparation for unregistering fcf */
5031 rc
= lpfc_unregister_fcf_prep(phba
);
5033 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
,
5034 "2748 Failed to prepare for unregistering "
5035 "HBA's FCF record: rc=%d\n", rc
);
5039 /* Now, unregister FCF record and reset HBA FCF state */
5040 rc
= lpfc_sli4_unregister_fcf(phba
);
5043 /* Reset HBA FCF states after successful unregister FCF */
5044 phba
->fcf
.fcf_flag
= 0;
5045 phba
->fcf
.current_rec
.flag
= 0;
5048 * If driver is not unloading, check if there is any other
5049 * FCF record that can be used for discovery.
5051 if ((phba
->pport
->load_flag
& FC_UNLOADING
) ||
5052 (phba
->link_state
< LPFC_LINK_UP
))
5055 /* This is considered as the initial FCF discovery scan */
5056 spin_lock_irq(&phba
->hbalock
);
5057 phba
->fcf
.fcf_flag
|= FCF_INIT_DISC
;
5058 spin_unlock_irq(&phba
->hbalock
);
5059 rc
= lpfc_sli4_fcf_scan_read_fcf_rec(phba
, LPFC_FCOE_FCF_GET_FIRST
);
5062 spin_lock_irq(&phba
->hbalock
);
5063 phba
->fcf
.fcf_flag
&= ~FCF_INIT_DISC
;
5064 spin_unlock_irq(&phba
->hbalock
);
5065 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
|LOG_MBOX
,
5066 "2553 lpfc_unregister_unused_fcf failed "
5067 "to read FCF record HBA state x%x\n",
5068 phba
->pport
->port_state
);
5073 * lpfc_unregister_fcf - Unregister the currently registered fcf record
5074 * @phba: Pointer to hba context object.
5076 * This function just unregisters the currently reigstered FCF. It does not
5077 * try to find another FCF for discovery.
5080 lpfc_unregister_fcf(struct lpfc_hba
*phba
)
5084 /* Preparation for unregistering fcf */
5085 rc
= lpfc_unregister_fcf_prep(phba
);
5087 lpfc_printf_log(phba
, KERN_ERR
, LOG_DISCOVERY
,
5088 "2749 Failed to prepare for unregistering "
5089 "HBA's FCF record: rc=%d\n", rc
);
5093 /* Now, unregister FCF record and reset HBA FCF state */
5094 rc
= lpfc_sli4_unregister_fcf(phba
);
5097 /* Set proper HBA FCF states after successful unregister FCF */
5098 spin_lock_irq(&phba
->hbalock
);
5099 phba
->fcf
.fcf_flag
&= ~FCF_REGISTERED
;
5100 spin_unlock_irq(&phba
->hbalock
);
5104 * lpfc_unregister_unused_fcf - Unregister FCF if all devices are disconnected.
5105 * @phba: Pointer to hba context object.
5107 * This function check if there are any connected remote port for the FCF and
5108 * if all the devices are disconnected, this function unregister FCFI.
5109 * This function also tries to use another FCF for discovery.
5112 lpfc_unregister_unused_fcf(struct lpfc_hba
*phba
)
5115 * If HBA is not running in FIP mode or if HBA does not support
5116 * FCoE or if FCF is not registered, do nothing.
5118 spin_lock_irq(&phba
->hbalock
);
5119 if (!(phba
->hba_flag
& HBA_FCOE_SUPPORT
) ||
5120 !(phba
->fcf
.fcf_flag
& FCF_REGISTERED
) ||
5121 !(phba
->hba_flag
& HBA_FIP_SUPPORT
)) {
5122 spin_unlock_irq(&phba
->hbalock
);
5125 spin_unlock_irq(&phba
->hbalock
);
5127 if (lpfc_fcf_inuse(phba
))
5130 lpfc_unregister_fcf_rescan(phba
);
5134 * lpfc_read_fcf_conn_tbl - Create driver FCF connection table.
5135 * @phba: Pointer to hba context object.
5136 * @buff: Buffer containing the FCF connection table as in the config
5138 * This function create driver data structure for the FCF connection
5139 * record table read from config region 23.
5142 lpfc_read_fcf_conn_tbl(struct lpfc_hba
*phba
,
5145 struct lpfc_fcf_conn_entry
*conn_entry
, *next_conn_entry
;
5146 struct lpfc_fcf_conn_hdr
*conn_hdr
;
5147 struct lpfc_fcf_conn_rec
*conn_rec
;
5148 uint32_t record_count
;
5151 /* Free the current connect table */
5152 list_for_each_entry_safe(conn_entry
, next_conn_entry
,
5153 &phba
->fcf_conn_rec_list
, list
) {
5154 list_del_init(&conn_entry
->list
);
5158 conn_hdr
= (struct lpfc_fcf_conn_hdr
*) buff
;
5159 record_count
= conn_hdr
->length
* sizeof(uint32_t)/
5160 sizeof(struct lpfc_fcf_conn_rec
);
5162 conn_rec
= (struct lpfc_fcf_conn_rec
*)
5163 (buff
+ sizeof(struct lpfc_fcf_conn_hdr
));
5165 for (i
= 0; i
< record_count
; i
++) {
5166 if (!(conn_rec
[i
].flags
& FCFCNCT_VALID
))
5168 conn_entry
= kzalloc(sizeof(struct lpfc_fcf_conn_entry
),
5171 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
5172 "2566 Failed to allocate connection"
5177 memcpy(&conn_entry
->conn_rec
, &conn_rec
[i
],
5178 sizeof(struct lpfc_fcf_conn_rec
));
5179 conn_entry
->conn_rec
.vlan_tag
=
5180 le16_to_cpu(conn_entry
->conn_rec
.vlan_tag
) & 0xFFF;
5181 conn_entry
->conn_rec
.flags
=
5182 le16_to_cpu(conn_entry
->conn_rec
.flags
);
5183 list_add_tail(&conn_entry
->list
,
5184 &phba
->fcf_conn_rec_list
);
5189 * lpfc_read_fcoe_param - Read FCoe parameters from conf region..
5190 * @phba: Pointer to hba context object.
5191 * @buff: Buffer containing the FCoE parameter data structure.
5193 * This function update driver data structure with config
5194 * parameters read from config region 23.
5197 lpfc_read_fcoe_param(struct lpfc_hba
*phba
,
5200 struct lpfc_fip_param_hdr
*fcoe_param_hdr
;
5201 struct lpfc_fcoe_params
*fcoe_param
;
5203 fcoe_param_hdr
= (struct lpfc_fip_param_hdr
*)
5205 fcoe_param
= (struct lpfc_fcoe_params
*)
5206 (buff
+ sizeof(struct lpfc_fip_param_hdr
));
5208 if ((fcoe_param_hdr
->parm_version
!= FIPP_VERSION
) ||
5209 (fcoe_param_hdr
->length
!= FCOE_PARAM_LENGTH
))
5212 if (fcoe_param_hdr
->parm_flags
& FIPP_VLAN_VALID
) {
5213 phba
->valid_vlan
= 1;
5214 phba
->vlan_id
= le16_to_cpu(fcoe_param
->vlan_tag
) &
5218 phba
->fc_map
[0] = fcoe_param
->fc_map
[0];
5219 phba
->fc_map
[1] = fcoe_param
->fc_map
[1];
5220 phba
->fc_map
[2] = fcoe_param
->fc_map
[2];
5225 * lpfc_get_rec_conf23 - Get a record type in config region data.
5226 * @buff: Buffer containing config region 23 data.
5227 * @size: Size of the data buffer.
5228 * @rec_type: Record type to be searched.
5230 * This function searches config region data to find the begining
5231 * of the record specified by record_type. If record found, this
5232 * function return pointer to the record else return NULL.
5235 lpfc_get_rec_conf23(uint8_t *buff
, uint32_t size
, uint8_t rec_type
)
5237 uint32_t offset
= 0, rec_length
;
5239 if ((buff
[0] == LPFC_REGION23_LAST_REC
) ||
5240 (size
< sizeof(uint32_t)))
5243 rec_length
= buff
[offset
+ 1];
5246 * One TLV record has one word header and number of data words
5247 * specified in the rec_length field of the record header.
5249 while ((offset
+ rec_length
* sizeof(uint32_t) + sizeof(uint32_t))
5251 if (buff
[offset
] == rec_type
)
5252 return &buff
[offset
];
5254 if (buff
[offset
] == LPFC_REGION23_LAST_REC
)
5257 offset
+= rec_length
* sizeof(uint32_t) + sizeof(uint32_t);
5258 rec_length
= buff
[offset
+ 1];
5264 * lpfc_parse_fcoe_conf - Parse FCoE config data read from config region 23.
5265 * @phba: Pointer to lpfc_hba data structure.
5266 * @buff: Buffer containing config region 23 data.
5267 * @size: Size of the data buffer.
5269 * This fuction parse the FCoE config parameters in config region 23 and
5270 * populate driver data structure with the parameters.
5273 lpfc_parse_fcoe_conf(struct lpfc_hba
*phba
,
5277 uint32_t offset
= 0, rec_length
;
5281 * If data size is less than 2 words signature and version cannot be
5284 if (size
< 2*sizeof(uint32_t))
5287 /* Check the region signature first */
5288 if (memcmp(buff
, LPFC_REGION23_SIGNATURE
, 4)) {
5289 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
5290 "2567 Config region 23 has bad signature\n");
5296 /* Check the data structure version */
5297 if (buff
[offset
] != LPFC_REGION23_VERSION
) {
5298 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
5299 "2568 Config region 23 has bad version\n");
5304 rec_length
= buff
[offset
+ 1];
5306 /* Read FCoE param record */
5307 rec_ptr
= lpfc_get_rec_conf23(&buff
[offset
],
5308 size
- offset
, FCOE_PARAM_TYPE
);
5310 lpfc_read_fcoe_param(phba
, rec_ptr
);
5312 /* Read FCF connection table */
5313 rec_ptr
= lpfc_get_rec_conf23(&buff
[offset
],
5314 size
- offset
, FCOE_CONN_TBL_TYPE
);
5316 lpfc_read_fcf_conn_tbl(phba
, rec_ptr
);