[SCSI] lpfc 8.3.23: Miscellaneous fixes
[linux-2.6/btrfs-unstable.git] / drivers / scsi / lpfc / lpfc_init.c
blob7b6a089796d7ce5035e0cbec593236df715b9075
1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2011 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
8 * *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
22 #include <linux/blkdev.h>
23 #include <linux/delay.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/idr.h>
26 #include <linux/interrupt.h>
27 #include <linux/kthread.h>
28 #include <linux/pci.h>
29 #include <linux/spinlock.h>
30 #include <linux/ctype.h>
31 #include <linux/aer.h>
32 #include <linux/slab.h>
34 #include <scsi/scsi.h>
35 #include <scsi/scsi_device.h>
36 #include <scsi/scsi_host.h>
37 #include <scsi/scsi_transport_fc.h>
39 #include "lpfc_hw4.h"
40 #include "lpfc_hw.h"
41 #include "lpfc_sli.h"
42 #include "lpfc_sli4.h"
43 #include "lpfc_nl.h"
44 #include "lpfc_disc.h"
45 #include "lpfc_scsi.h"
46 #include "lpfc.h"
47 #include "lpfc_logmsg.h"
48 #include "lpfc_crtn.h"
49 #include "lpfc_vport.h"
50 #include "lpfc_version.h"
52 char *_dump_buf_data;
53 unsigned long _dump_buf_data_order;
54 char *_dump_buf_dif;
55 unsigned long _dump_buf_dif_order;
56 spinlock_t _dump_buf_lock;
58 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
59 static int lpfc_post_rcv_buf(struct lpfc_hba *);
60 static int lpfc_sli4_queue_create(struct lpfc_hba *);
61 static void lpfc_sli4_queue_destroy(struct lpfc_hba *);
62 static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
63 static int lpfc_setup_endian_order(struct lpfc_hba *);
64 static int lpfc_sli4_read_config(struct lpfc_hba *);
65 static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
66 static void lpfc_free_sgl_list(struct lpfc_hba *);
67 static int lpfc_init_sgl_list(struct lpfc_hba *);
68 static int lpfc_init_active_sgl_array(struct lpfc_hba *);
69 static void lpfc_free_active_sgl(struct lpfc_hba *);
70 static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
71 static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
72 static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
73 static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
74 static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
76 static struct scsi_transport_template *lpfc_transport_template = NULL;
77 static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
78 static DEFINE_IDR(lpfc_hba_index);
80 /**
81 * lpfc_config_port_prep - Perform lpfc initialization prior to config port
82 * @phba: pointer to lpfc hba data structure.
84 * This routine will do LPFC initialization prior to issuing the CONFIG_PORT
85 * mailbox command. It retrieves the revision information from the HBA and
86 * collects the Vital Product Data (VPD) about the HBA for preparing the
87 * configuration of the HBA.
89 * Return codes:
90 * 0 - success.
91 * -ERESTART - requests the SLI layer to reset the HBA and try again.
92 * Any other value - indicates an error.
93 **/
94 int
95 lpfc_config_port_prep(struct lpfc_hba *phba)
97 lpfc_vpd_t *vp = &phba->vpd;
98 int i = 0, rc;
99 LPFC_MBOXQ_t *pmb;
100 MAILBOX_t *mb;
101 char *lpfc_vpd_data = NULL;
102 uint16_t offset = 0;
103 static char licensed[56] =
104 "key unlock for use with gnu public licensed code only\0";
105 static int init_key = 1;
107 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
108 if (!pmb) {
109 phba->link_state = LPFC_HBA_ERROR;
110 return -ENOMEM;
113 mb = &pmb->u.mb;
114 phba->link_state = LPFC_INIT_MBX_CMDS;
116 if (lpfc_is_LC_HBA(phba->pcidev->device)) {
117 if (init_key) {
118 uint32_t *ptext = (uint32_t *) licensed;
120 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
121 *ptext = cpu_to_be32(*ptext);
122 init_key = 0;
125 lpfc_read_nv(phba, pmb);
126 memset((char*)mb->un.varRDnvp.rsvd3, 0,
127 sizeof (mb->un.varRDnvp.rsvd3));
128 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
129 sizeof (licensed));
131 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
133 if (rc != MBX_SUCCESS) {
134 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
135 "0324 Config Port initialization "
136 "error, mbxCmd x%x READ_NVPARM, "
137 "mbxStatus x%x\n",
138 mb->mbxCommand, mb->mbxStatus);
139 mempool_free(pmb, phba->mbox_mem_pool);
140 return -ERESTART;
142 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
143 sizeof(phba->wwnn));
144 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
145 sizeof(phba->wwpn));
148 phba->sli3_options = 0x0;
150 /* Setup and issue mailbox READ REV command */
151 lpfc_read_rev(phba, pmb);
152 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
153 if (rc != MBX_SUCCESS) {
154 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
155 "0439 Adapter failed to init, mbxCmd x%x "
156 "READ_REV, mbxStatus x%x\n",
157 mb->mbxCommand, mb->mbxStatus);
158 mempool_free( pmb, phba->mbox_mem_pool);
159 return -ERESTART;
164 * The value of rr must be 1 since the driver set the cv field to 1.
165 * This setting requires the FW to set all revision fields.
167 if (mb->un.varRdRev.rr == 0) {
168 vp->rev.rBit = 0;
169 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
170 "0440 Adapter failed to init, READ_REV has "
171 "missing revision information.\n");
172 mempool_free(pmb, phba->mbox_mem_pool);
173 return -ERESTART;
176 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
177 mempool_free(pmb, phba->mbox_mem_pool);
178 return -EINVAL;
181 /* Save information as VPD data */
182 vp->rev.rBit = 1;
183 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
184 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
185 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
186 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
187 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
188 vp->rev.biuRev = mb->un.varRdRev.biuRev;
189 vp->rev.smRev = mb->un.varRdRev.smRev;
190 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
191 vp->rev.endecRev = mb->un.varRdRev.endecRev;
192 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
193 vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
194 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
195 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
196 vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
197 vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
199 /* If the sli feature level is less then 9, we must
200 * tear down all RPIs and VPIs on link down if NPIV
201 * is enabled.
203 if (vp->rev.feaLevelHigh < 9)
204 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
206 if (lpfc_is_LC_HBA(phba->pcidev->device))
207 memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
208 sizeof (phba->RandomData));
210 /* Get adapter VPD information */
211 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
212 if (!lpfc_vpd_data)
213 goto out_free_mbox;
215 do {
216 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
217 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
219 if (rc != MBX_SUCCESS) {
220 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
221 "0441 VPD not present on adapter, "
222 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
223 mb->mbxCommand, mb->mbxStatus);
224 mb->un.varDmp.word_cnt = 0;
226 /* dump mem may return a zero when finished or we got a
227 * mailbox error, either way we are done.
229 if (mb->un.varDmp.word_cnt == 0)
230 break;
231 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
232 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
233 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
234 lpfc_vpd_data + offset,
235 mb->un.varDmp.word_cnt);
236 offset += mb->un.varDmp.word_cnt;
237 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
238 lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
240 kfree(lpfc_vpd_data);
241 out_free_mbox:
242 mempool_free(pmb, phba->mbox_mem_pool);
243 return 0;
247 * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd
248 * @phba: pointer to lpfc hba data structure.
249 * @pmboxq: pointer to the driver internal queue element for mailbox command.
251 * This is the completion handler for driver's configuring asynchronous event
252 * mailbox command to the device. If the mailbox command returns successfully,
253 * it will set internal async event support flag to 1; otherwise, it will
254 * set internal async event support flag to 0.
256 static void
257 lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
259 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
260 phba->temp_sensor_support = 1;
261 else
262 phba->temp_sensor_support = 0;
263 mempool_free(pmboxq, phba->mbox_mem_pool);
264 return;
268 * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler
269 * @phba: pointer to lpfc hba data structure.
270 * @pmboxq: pointer to the driver internal queue element for mailbox command.
272 * This is the completion handler for dump mailbox command for getting
273 * wake up parameters. When this command complete, the response contain
274 * Option rom version of the HBA. This function translate the version number
275 * into a human readable string and store it in OptionROMVersion.
277 static void
278 lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
280 struct prog_id *prg;
281 uint32_t prog_id_word;
282 char dist = ' ';
283 /* character array used for decoding dist type. */
284 char dist_char[] = "nabx";
286 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
287 mempool_free(pmboxq, phba->mbox_mem_pool);
288 return;
291 prg = (struct prog_id *) &prog_id_word;
293 /* word 7 contain option rom version */
294 prog_id_word = pmboxq->u.mb.un.varWords[7];
296 /* Decode the Option rom version word to a readable string */
297 if (prg->dist < 4)
298 dist = dist_char[prg->dist];
300 if ((prg->dist == 3) && (prg->num == 0))
301 sprintf(phba->OptionROMVersion, "%d.%d%d",
302 prg->ver, prg->rev, prg->lev);
303 else
304 sprintf(phba->OptionROMVersion, "%d.%d%d%c%d",
305 prg->ver, prg->rev, prg->lev,
306 dist, prg->num);
307 mempool_free(pmboxq, phba->mbox_mem_pool);
308 return;
312 * lpfc_config_port_post - Perform lpfc initialization after config port
313 * @phba: pointer to lpfc hba data structure.
315 * This routine will do LPFC initialization after the CONFIG_PORT mailbox
316 * command call. It performs all internal resource and state setups on the
317 * port: post IOCB buffers, enable appropriate host interrupt attentions,
318 * ELS ring timers, etc.
320 * Return codes
321 * 0 - success.
322 * Any other value - error.
325 lpfc_config_port_post(struct lpfc_hba *phba)
327 struct lpfc_vport *vport = phba->pport;
328 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
329 LPFC_MBOXQ_t *pmb;
330 MAILBOX_t *mb;
331 struct lpfc_dmabuf *mp;
332 struct lpfc_sli *psli = &phba->sli;
333 uint32_t status, timeout;
334 int i, j;
335 int rc;
337 spin_lock_irq(&phba->hbalock);
339 * If the Config port completed correctly the HBA is not
340 * over heated any more.
342 if (phba->over_temp_state == HBA_OVER_TEMP)
343 phba->over_temp_state = HBA_NORMAL_TEMP;
344 spin_unlock_irq(&phba->hbalock);
346 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
347 if (!pmb) {
348 phba->link_state = LPFC_HBA_ERROR;
349 return -ENOMEM;
351 mb = &pmb->u.mb;
353 /* Get login parameters for NID. */
354 rc = lpfc_read_sparam(phba, pmb, 0);
355 if (rc) {
356 mempool_free(pmb, phba->mbox_mem_pool);
357 return -ENOMEM;
360 pmb->vport = vport;
361 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
362 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
363 "0448 Adapter failed init, mbxCmd x%x "
364 "READ_SPARM mbxStatus x%x\n",
365 mb->mbxCommand, mb->mbxStatus);
366 phba->link_state = LPFC_HBA_ERROR;
367 mp = (struct lpfc_dmabuf *) pmb->context1;
368 mempool_free(pmb, phba->mbox_mem_pool);
369 lpfc_mbuf_free(phba, mp->virt, mp->phys);
370 kfree(mp);
371 return -EIO;
374 mp = (struct lpfc_dmabuf *) pmb->context1;
376 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
377 lpfc_mbuf_free(phba, mp->virt, mp->phys);
378 kfree(mp);
379 pmb->context1 = NULL;
381 if (phba->cfg_soft_wwnn)
382 u64_to_wwn(phba->cfg_soft_wwnn,
383 vport->fc_sparam.nodeName.u.wwn);
384 if (phba->cfg_soft_wwpn)
385 u64_to_wwn(phba->cfg_soft_wwpn,
386 vport->fc_sparam.portName.u.wwn);
387 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
388 sizeof (struct lpfc_name));
389 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
390 sizeof (struct lpfc_name));
392 /* Update the fc_host data structures with new wwn. */
393 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
394 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
395 fc_host_max_npiv_vports(shost) = phba->max_vpi;
397 /* If no serial number in VPD data, use low 6 bytes of WWNN */
398 /* This should be consolidated into parse_vpd ? - mr */
399 if (phba->SerialNumber[0] == 0) {
400 uint8_t *outptr;
402 outptr = &vport->fc_nodename.u.s.IEEE[0];
403 for (i = 0; i < 12; i++) {
404 status = *outptr++;
405 j = ((status & 0xf0) >> 4);
406 if (j <= 9)
407 phba->SerialNumber[i] =
408 (char)((uint8_t) 0x30 + (uint8_t) j);
409 else
410 phba->SerialNumber[i] =
411 (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
412 i++;
413 j = (status & 0xf);
414 if (j <= 9)
415 phba->SerialNumber[i] =
416 (char)((uint8_t) 0x30 + (uint8_t) j);
417 else
418 phba->SerialNumber[i] =
419 (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
423 lpfc_read_config(phba, pmb);
424 pmb->vport = vport;
425 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
426 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
427 "0453 Adapter failed to init, mbxCmd x%x "
428 "READ_CONFIG, mbxStatus x%x\n",
429 mb->mbxCommand, mb->mbxStatus);
430 phba->link_state = LPFC_HBA_ERROR;
431 mempool_free( pmb, phba->mbox_mem_pool);
432 return -EIO;
435 /* Check if the port is disabled */
436 lpfc_sli_read_link_ste(phba);
438 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
439 if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1))
440 phba->cfg_hba_queue_depth =
441 (mb->un.varRdConfig.max_xri + 1) -
442 lpfc_sli4_get_els_iocb_cnt(phba);
444 phba->lmt = mb->un.varRdConfig.lmt;
446 /* Get the default values for Model Name and Description */
447 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
449 if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_16G)
450 || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G)
451 && !(phba->lmt & LMT_1Gb))
452 || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G)
453 && !(phba->lmt & LMT_2Gb))
454 || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G)
455 && !(phba->lmt & LMT_4Gb))
456 || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G)
457 && !(phba->lmt & LMT_8Gb))
458 || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G)
459 && !(phba->lmt & LMT_10Gb))
460 || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G)
461 && !(phba->lmt & LMT_16Gb))) {
462 /* Reset link speed to auto */
463 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
464 "1302 Invalid speed for this board: "
465 "Reset link speed to auto: x%x\n",
466 phba->cfg_link_speed);
467 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
470 phba->link_state = LPFC_LINK_DOWN;
472 /* Only process IOCBs on ELS ring till hba_state is READY */
473 if (psli->ring[psli->extra_ring].cmdringaddr)
474 psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT;
475 if (psli->ring[psli->fcp_ring].cmdringaddr)
476 psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT;
477 if (psli->ring[psli->next_ring].cmdringaddr)
478 psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT;
480 /* Post receive buffers for desired rings */
481 if (phba->sli_rev != 3)
482 lpfc_post_rcv_buf(phba);
485 * Configure HBA MSI-X attention conditions to messages if MSI-X mode
487 if (phba->intr_type == MSIX) {
488 rc = lpfc_config_msi(phba, pmb);
489 if (rc) {
490 mempool_free(pmb, phba->mbox_mem_pool);
491 return -EIO;
493 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
494 if (rc != MBX_SUCCESS) {
495 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
496 "0352 Config MSI mailbox command "
497 "failed, mbxCmd x%x, mbxStatus x%x\n",
498 pmb->u.mb.mbxCommand,
499 pmb->u.mb.mbxStatus);
500 mempool_free(pmb, phba->mbox_mem_pool);
501 return -EIO;
505 spin_lock_irq(&phba->hbalock);
506 /* Initialize ERATT handling flag */
507 phba->hba_flag &= ~HBA_ERATT_HANDLED;
509 /* Enable appropriate host interrupts */
510 if (lpfc_readl(phba->HCregaddr, &status)) {
511 spin_unlock_irq(&phba->hbalock);
512 return -EIO;
514 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
515 if (psli->num_rings > 0)
516 status |= HC_R0INT_ENA;
517 if (psli->num_rings > 1)
518 status |= HC_R1INT_ENA;
519 if (psli->num_rings > 2)
520 status |= HC_R2INT_ENA;
521 if (psli->num_rings > 3)
522 status |= HC_R3INT_ENA;
524 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
525 (phba->cfg_poll & DISABLE_FCP_RING_INT))
526 status &= ~(HC_R0INT_ENA);
528 writel(status, phba->HCregaddr);
529 readl(phba->HCregaddr); /* flush */
530 spin_unlock_irq(&phba->hbalock);
532 /* Set up ring-0 (ELS) timer */
533 timeout = phba->fc_ratov * 2;
534 mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout);
535 /* Set up heart beat (HB) timer */
536 mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
537 phba->hb_outstanding = 0;
538 phba->last_completion_time = jiffies;
539 /* Set up error attention (ERATT) polling timer */
540 mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL);
542 if (phba->hba_flag & LINK_DISABLED) {
543 lpfc_printf_log(phba,
544 KERN_ERR, LOG_INIT,
545 "2598 Adapter Link is disabled.\n");
546 lpfc_down_link(phba, pmb);
547 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
548 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
549 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
550 lpfc_printf_log(phba,
551 KERN_ERR, LOG_INIT,
552 "2599 Adapter failed to issue DOWN_LINK"
553 " mbox command rc 0x%x\n", rc);
555 mempool_free(pmb, phba->mbox_mem_pool);
556 return -EIO;
558 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
559 lpfc_init_link(phba, pmb, phba->cfg_topology,
560 phba->cfg_link_speed);
561 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
562 lpfc_set_loopback_flag(phba);
563 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
564 if (rc != MBX_SUCCESS) {
565 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
566 "0454 Adapter failed to init, mbxCmd x%x "
567 "INIT_LINK, mbxStatus x%x\n",
568 mb->mbxCommand, mb->mbxStatus);
570 /* Clear all interrupt enable conditions */
571 writel(0, phba->HCregaddr);
572 readl(phba->HCregaddr); /* flush */
573 /* Clear all pending interrupts */
574 writel(0xffffffff, phba->HAregaddr);
575 readl(phba->HAregaddr); /* flush */
577 phba->link_state = LPFC_HBA_ERROR;
578 if (rc != MBX_BUSY)
579 mempool_free(pmb, phba->mbox_mem_pool);
580 return -EIO;
583 /* MBOX buffer will be freed in mbox compl */
584 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
585 if (!pmb) {
586 phba->link_state = LPFC_HBA_ERROR;
587 return -ENOMEM;
590 lpfc_config_async(phba, pmb, LPFC_ELS_RING);
591 pmb->mbox_cmpl = lpfc_config_async_cmpl;
592 pmb->vport = phba->pport;
593 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
595 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
596 lpfc_printf_log(phba,
597 KERN_ERR,
598 LOG_INIT,
599 "0456 Adapter failed to issue "
600 "ASYNCEVT_ENABLE mbox status x%x\n",
601 rc);
602 mempool_free(pmb, phba->mbox_mem_pool);
605 /* Get Option rom version */
606 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
607 if (!pmb) {
608 phba->link_state = LPFC_HBA_ERROR;
609 return -ENOMEM;
612 lpfc_dump_wakeup_param(phba, pmb);
613 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
614 pmb->vport = phba->pport;
615 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
617 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
618 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed "
619 "to get Option ROM version status x%x\n", rc);
620 mempool_free(pmb, phba->mbox_mem_pool);
623 return 0;
627 * lpfc_hba_init_link - Initialize the FC link
628 * @phba: pointer to lpfc hba data structure.
629 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
631 * This routine will issue the INIT_LINK mailbox command call.
632 * It is available to other drivers through the lpfc_hba data
633 * structure for use as a delayed link up mechanism with the
634 * module parameter lpfc_suppress_link_up.
636 * Return code
637 * 0 - success
638 * Any other value - error
641 lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
643 struct lpfc_vport *vport = phba->pport;
644 LPFC_MBOXQ_t *pmb;
645 MAILBOX_t *mb;
646 int rc;
648 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
649 if (!pmb) {
650 phba->link_state = LPFC_HBA_ERROR;
651 return -ENOMEM;
653 mb = &pmb->u.mb;
654 pmb->vport = vport;
656 lpfc_init_link(phba, pmb, phba->cfg_topology, phba->cfg_link_speed);
657 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
658 lpfc_set_loopback_flag(phba);
659 rc = lpfc_sli_issue_mbox(phba, pmb, flag);
660 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
661 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
662 "0498 Adapter failed to init, mbxCmd x%x "
663 "INIT_LINK, mbxStatus x%x\n",
664 mb->mbxCommand, mb->mbxStatus);
665 if (phba->sli_rev <= LPFC_SLI_REV3) {
666 /* Clear all interrupt enable conditions */
667 writel(0, phba->HCregaddr);
668 readl(phba->HCregaddr); /* flush */
669 /* Clear all pending interrupts */
670 writel(0xffffffff, phba->HAregaddr);
671 readl(phba->HAregaddr); /* flush */
673 phba->link_state = LPFC_HBA_ERROR;
674 if (rc != MBX_BUSY || flag == MBX_POLL)
675 mempool_free(pmb, phba->mbox_mem_pool);
676 return -EIO;
678 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK;
679 if (flag == MBX_POLL)
680 mempool_free(pmb, phba->mbox_mem_pool);
682 return 0;
686 * lpfc_hba_down_link - this routine downs the FC link
687 * @phba: pointer to lpfc hba data structure.
688 * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
690 * This routine will issue the DOWN_LINK mailbox command call.
691 * It is available to other drivers through the lpfc_hba data
692 * structure for use to stop the link.
694 * Return code
695 * 0 - success
696 * Any other value - error
699 lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag)
701 LPFC_MBOXQ_t *pmb;
702 int rc;
704 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
705 if (!pmb) {
706 phba->link_state = LPFC_HBA_ERROR;
707 return -ENOMEM;
710 lpfc_printf_log(phba,
711 KERN_ERR, LOG_INIT,
712 "0491 Adapter Link is disabled.\n");
713 lpfc_down_link(phba, pmb);
714 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
715 rc = lpfc_sli_issue_mbox(phba, pmb, flag);
716 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
717 lpfc_printf_log(phba,
718 KERN_ERR, LOG_INIT,
719 "2522 Adapter failed to issue DOWN_LINK"
720 " mbox command rc 0x%x\n", rc);
722 mempool_free(pmb, phba->mbox_mem_pool);
723 return -EIO;
725 if (flag == MBX_POLL)
726 mempool_free(pmb, phba->mbox_mem_pool);
728 return 0;
732 * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset
733 * @phba: pointer to lpfc HBA data structure.
735 * This routine will do LPFC uninitialization before the HBA is reset when
736 * bringing down the SLI Layer.
738 * Return codes
739 * 0 - success.
740 * Any other value - error.
743 lpfc_hba_down_prep(struct lpfc_hba *phba)
745 struct lpfc_vport **vports;
746 int i;
748 if (phba->sli_rev <= LPFC_SLI_REV3) {
749 /* Disable interrupts */
750 writel(0, phba->HCregaddr);
751 readl(phba->HCregaddr); /* flush */
754 if (phba->pport->load_flag & FC_UNLOADING)
755 lpfc_cleanup_discovery_resources(phba->pport);
756 else {
757 vports = lpfc_create_vport_work_array(phba);
758 if (vports != NULL)
759 for (i = 0; i <= phba->max_vports &&
760 vports[i] != NULL; i++)
761 lpfc_cleanup_discovery_resources(vports[i]);
762 lpfc_destroy_vport_work_array(phba, vports);
764 return 0;
768 * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
769 * @phba: pointer to lpfc HBA data structure.
771 * This routine will do uninitialization after the HBA is reset when bring
772 * down the SLI Layer.
774 * Return codes
775 * 0 - success.
776 * Any other value - error.
778 static int
779 lpfc_hba_down_post_s3(struct lpfc_hba *phba)
781 struct lpfc_sli *psli = &phba->sli;
782 struct lpfc_sli_ring *pring;
783 struct lpfc_dmabuf *mp, *next_mp;
784 LIST_HEAD(completions);
785 int i;
787 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
788 lpfc_sli_hbqbuf_free_all(phba);
789 else {
790 /* Cleanup preposted buffers on the ELS ring */
791 pring = &psli->ring[LPFC_ELS_RING];
792 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
793 list_del(&mp->list);
794 pring->postbufq_cnt--;
795 lpfc_mbuf_free(phba, mp->virt, mp->phys);
796 kfree(mp);
800 spin_lock_irq(&phba->hbalock);
801 for (i = 0; i < psli->num_rings; i++) {
802 pring = &psli->ring[i];
804 /* At this point in time the HBA is either reset or DOA. Either
805 * way, nothing should be on txcmplq as it will NEVER complete.
807 list_splice_init(&pring->txcmplq, &completions);
808 pring->txcmplq_cnt = 0;
809 spin_unlock_irq(&phba->hbalock);
811 /* Cancel all the IOCBs from the completions list */
812 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
813 IOERR_SLI_ABORTED);
815 lpfc_sli_abort_iocb_ring(phba, pring);
816 spin_lock_irq(&phba->hbalock);
818 spin_unlock_irq(&phba->hbalock);
820 return 0;
824 * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
825 * @phba: pointer to lpfc HBA data structure.
827 * This routine will do uninitialization after the HBA is reset when bring
828 * down the SLI Layer.
830 * Return codes
831 * 0 - success.
832 * Any other value - error.
834 static int
835 lpfc_hba_down_post_s4(struct lpfc_hba *phba)
837 struct lpfc_scsi_buf *psb, *psb_next;
838 LIST_HEAD(aborts);
839 int ret;
840 unsigned long iflag = 0;
841 struct lpfc_sglq *sglq_entry = NULL;
843 ret = lpfc_hba_down_post_s3(phba);
844 if (ret)
845 return ret;
846 /* At this point in time the HBA is either reset or DOA. Either
847 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
848 * on the lpfc_sgl_list so that it can either be freed if the
849 * driver is unloading or reposted if the driver is restarting
850 * the port.
852 spin_lock_irq(&phba->hbalock); /* required for lpfc_sgl_list and */
853 /* scsl_buf_list */
854 /* abts_sgl_list_lock required because worker thread uses this
855 * list.
857 spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
858 list_for_each_entry(sglq_entry,
859 &phba->sli4_hba.lpfc_abts_els_sgl_list, list)
860 sglq_entry->state = SGL_FREED;
862 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
863 &phba->sli4_hba.lpfc_sgl_list);
864 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
865 /* abts_scsi_buf_list_lock required because worker thread uses this
866 * list.
868 spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
869 list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list,
870 &aborts);
871 spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
872 spin_unlock_irq(&phba->hbalock);
874 list_for_each_entry_safe(psb, psb_next, &aborts, list) {
875 psb->pCmd = NULL;
876 psb->status = IOSTAT_SUCCESS;
878 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
879 list_splice(&aborts, &phba->lpfc_scsi_buf_list);
880 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
881 return 0;
885 * lpfc_hba_down_post - Wrapper func for hba down post routine
886 * @phba: pointer to lpfc HBA data structure.
888 * This routine wraps the actual SLI3 or SLI4 routine for performing
889 * uninitialization after the HBA is reset when bring down the SLI Layer.
891 * Return codes
892 * 0 - success.
893 * Any other value - error.
896 lpfc_hba_down_post(struct lpfc_hba *phba)
898 return (*phba->lpfc_hba_down_post)(phba);
902 * lpfc_hb_timeout - The HBA-timer timeout handler
903 * @ptr: unsigned long holds the pointer to lpfc hba data structure.
905 * This is the HBA-timer timeout handler registered to the lpfc driver. When
906 * this timer fires, a HBA timeout event shall be posted to the lpfc driver
907 * work-port-events bitmap and the worker thread is notified. This timeout
908 * event will be used by the worker thread to invoke the actual timeout
909 * handler routine, lpfc_hb_timeout_handler. Any periodical operations will
910 * be performed in the timeout handler and the HBA timeout event bit shall
911 * be cleared by the worker thread after it has taken the event bitmap out.
913 static void
914 lpfc_hb_timeout(unsigned long ptr)
916 struct lpfc_hba *phba;
917 uint32_t tmo_posted;
918 unsigned long iflag;
920 phba = (struct lpfc_hba *)ptr;
922 /* Check for heart beat timeout conditions */
923 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
924 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
925 if (!tmo_posted)
926 phba->pport->work_port_events |= WORKER_HB_TMO;
927 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
929 /* Tell the worker thread there is work to do */
930 if (!tmo_posted)
931 lpfc_worker_wake_up(phba);
932 return;
936 * lpfc_rrq_timeout - The RRQ-timer timeout handler
937 * @ptr: unsigned long holds the pointer to lpfc hba data structure.
939 * This is the RRQ-timer timeout handler registered to the lpfc driver. When
940 * this timer fires, a RRQ timeout event shall be posted to the lpfc driver
941 * work-port-events bitmap and the worker thread is notified. This timeout
942 * event will be used by the worker thread to invoke the actual timeout
943 * handler routine, lpfc_rrq_handler. Any periodical operations will
944 * be performed in the timeout handler and the RRQ timeout event bit shall
945 * be cleared by the worker thread after it has taken the event bitmap out.
947 static void
948 lpfc_rrq_timeout(unsigned long ptr)
950 struct lpfc_hba *phba;
951 unsigned long iflag;
953 phba = (struct lpfc_hba *)ptr;
954 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
955 phba->hba_flag |= HBA_RRQ_ACTIVE;
956 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
957 lpfc_worker_wake_up(phba);
961 * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function
962 * @phba: pointer to lpfc hba data structure.
963 * @pmboxq: pointer to the driver internal queue element for mailbox command.
965 * This is the callback function to the lpfc heart-beat mailbox command.
966 * If configured, the lpfc driver issues the heart-beat mailbox command to
967 * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the
968 * heart-beat mailbox command is issued, the driver shall set up heart-beat
969 * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks
970 * heart-beat outstanding state. Once the mailbox command comes back and
971 * no error conditions detected, the heart-beat mailbox command timer is
972 * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding
973 * state is cleared for the next heart-beat. If the timer expired with the
974 * heart-beat outstanding state set, the driver will put the HBA offline.
976 static void
977 lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
979 unsigned long drvr_flag;
981 spin_lock_irqsave(&phba->hbalock, drvr_flag);
982 phba->hb_outstanding = 0;
983 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
985 /* Check and reset heart-beat timer is necessary */
986 mempool_free(pmboxq, phba->mbox_mem_pool);
987 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
988 !(phba->link_state == LPFC_HBA_ERROR) &&
989 !(phba->pport->load_flag & FC_UNLOADING))
990 mod_timer(&phba->hb_tmofunc,
991 jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
992 return;
996 * lpfc_hb_timeout_handler - The HBA-timer timeout handler
997 * @phba: pointer to lpfc hba data structure.
999 * This is the actual HBA-timer timeout handler to be invoked by the worker
1000 * thread whenever the HBA timer fired and HBA-timeout event posted. This
1001 * handler performs any periodic operations needed for the device. If such
1002 * periodic event has already been attended to either in the interrupt handler
1003 * or by processing slow-ring or fast-ring events within the HBA-timer
1004 * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets
1005 * the timer for the next timeout period. If lpfc heart-beat mailbox command
1006 * is configured and there is no heart-beat mailbox command outstanding, a
1007 * heart-beat mailbox is issued and timer set properly. Otherwise, if there
1008 * has been a heart-beat mailbox command outstanding, the HBA shall be put
1009 * to offline.
1011 void
1012 lpfc_hb_timeout_handler(struct lpfc_hba *phba)
1014 struct lpfc_vport **vports;
1015 LPFC_MBOXQ_t *pmboxq;
1016 struct lpfc_dmabuf *buf_ptr;
1017 int retval, i;
1018 struct lpfc_sli *psli = &phba->sli;
1019 LIST_HEAD(completions);
1021 vports = lpfc_create_vport_work_array(phba);
1022 if (vports != NULL)
1023 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
1024 lpfc_rcv_seq_check_edtov(vports[i]);
1025 lpfc_destroy_vport_work_array(phba, vports);
1027 if ((phba->link_state == LPFC_HBA_ERROR) ||
1028 (phba->pport->load_flag & FC_UNLOADING) ||
1029 (phba->pport->fc_flag & FC_OFFLINE_MODE))
1030 return;
1032 spin_lock_irq(&phba->pport->work_port_lock);
1034 if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ,
1035 jiffies)) {
1036 spin_unlock_irq(&phba->pport->work_port_lock);
1037 if (!phba->hb_outstanding)
1038 mod_timer(&phba->hb_tmofunc,
1039 jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
1040 else
1041 mod_timer(&phba->hb_tmofunc,
1042 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
1043 return;
1045 spin_unlock_irq(&phba->pport->work_port_lock);
1047 if (phba->elsbuf_cnt &&
1048 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
1049 spin_lock_irq(&phba->hbalock);
1050 list_splice_init(&phba->elsbuf, &completions);
1051 phba->elsbuf_cnt = 0;
1052 phba->elsbuf_prev_cnt = 0;
1053 spin_unlock_irq(&phba->hbalock);
1055 while (!list_empty(&completions)) {
1056 list_remove_head(&completions, buf_ptr,
1057 struct lpfc_dmabuf, list);
1058 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
1059 kfree(buf_ptr);
1062 phba->elsbuf_prev_cnt = phba->elsbuf_cnt;
1064 /* If there is no heart beat outstanding, issue a heartbeat command */
1065 if (phba->cfg_enable_hba_heartbeat) {
1066 if (!phba->hb_outstanding) {
1067 if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) &&
1068 (list_empty(&psli->mboxq))) {
1069 pmboxq = mempool_alloc(phba->mbox_mem_pool,
1070 GFP_KERNEL);
1071 if (!pmboxq) {
1072 mod_timer(&phba->hb_tmofunc,
1073 jiffies +
1074 HZ * LPFC_HB_MBOX_INTERVAL);
1075 return;
1078 lpfc_heart_beat(phba, pmboxq);
1079 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
1080 pmboxq->vport = phba->pport;
1081 retval = lpfc_sli_issue_mbox(phba, pmboxq,
1082 MBX_NOWAIT);
1084 if (retval != MBX_BUSY &&
1085 retval != MBX_SUCCESS) {
1086 mempool_free(pmboxq,
1087 phba->mbox_mem_pool);
1088 mod_timer(&phba->hb_tmofunc,
1089 jiffies +
1090 HZ * LPFC_HB_MBOX_INTERVAL);
1091 return;
1093 phba->skipped_hb = 0;
1094 phba->hb_outstanding = 1;
1095 } else if (time_before_eq(phba->last_completion_time,
1096 phba->skipped_hb)) {
1097 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1098 "2857 Last completion time not "
1099 " updated in %d ms\n",
1100 jiffies_to_msecs(jiffies
1101 - phba->last_completion_time));
1102 } else
1103 phba->skipped_hb = jiffies;
1105 mod_timer(&phba->hb_tmofunc,
1106 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
1107 return;
1108 } else {
1110 * If heart beat timeout called with hb_outstanding set
1111 * we need to give the hb mailbox cmd a chance to
1112 * complete or TMO.
1114 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1115 "0459 Adapter heartbeat still out"
1116 "standing:last compl time was %d ms.\n",
1117 jiffies_to_msecs(jiffies
1118 - phba->last_completion_time));
1119 mod_timer(&phba->hb_tmofunc,
1120 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
1126 * lpfc_offline_eratt - Bring lpfc offline on hardware error attention
1127 * @phba: pointer to lpfc hba data structure.
1129 * This routine is called to bring the HBA offline when HBA hardware error
1130 * other than Port Error 6 has been detected.
1132 static void
1133 lpfc_offline_eratt(struct lpfc_hba *phba)
1135 struct lpfc_sli *psli = &phba->sli;
1137 spin_lock_irq(&phba->hbalock);
1138 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1139 spin_unlock_irq(&phba->hbalock);
1140 lpfc_offline_prep(phba);
1142 lpfc_offline(phba);
1143 lpfc_reset_barrier(phba);
1144 spin_lock_irq(&phba->hbalock);
1145 lpfc_sli_brdreset(phba);
1146 spin_unlock_irq(&phba->hbalock);
1147 lpfc_hba_down_post(phba);
1148 lpfc_sli_brdready(phba, HS_MBRDY);
1149 lpfc_unblock_mgmt_io(phba);
1150 phba->link_state = LPFC_HBA_ERROR;
1151 return;
1155 * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention
1156 * @phba: pointer to lpfc hba data structure.
1158 * This routine is called to bring a SLI4 HBA offline when HBA hardware error
1159 * other than Port Error 6 has been detected.
1161 static void
1162 lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
1164 lpfc_offline_prep(phba);
1165 lpfc_offline(phba);
1166 lpfc_sli4_brdreset(phba);
1167 lpfc_hba_down_post(phba);
1168 lpfc_sli4_post_status_check(phba);
1169 lpfc_unblock_mgmt_io(phba);
1170 phba->link_state = LPFC_HBA_ERROR;
1174 * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler
1175 * @phba: pointer to lpfc hba data structure.
1177 * This routine is invoked to handle the deferred HBA hardware error
1178 * conditions. This type of error is indicated by HBA by setting ER1
1179 * and another ER bit in the host status register. The driver will
1180 * wait until the ER1 bit clears before handling the error condition.
1182 static void
1183 lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
1185 uint32_t old_host_status = phba->work_hs;
1186 struct lpfc_sli_ring *pring;
1187 struct lpfc_sli *psli = &phba->sli;
1189 /* If the pci channel is offline, ignore possible errors,
1190 * since we cannot communicate with the pci card anyway.
1192 if (pci_channel_offline(phba->pcidev)) {
1193 spin_lock_irq(&phba->hbalock);
1194 phba->hba_flag &= ~DEFER_ERATT;
1195 spin_unlock_irq(&phba->hbalock);
1196 return;
1199 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1200 "0479 Deferred Adapter Hardware Error "
1201 "Data: x%x x%x x%x\n",
1202 phba->work_hs,
1203 phba->work_status[0], phba->work_status[1]);
1205 spin_lock_irq(&phba->hbalock);
1206 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1207 spin_unlock_irq(&phba->hbalock);
1211 * Firmware stops when it triggred erratt. That could cause the I/Os
1212 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the
1213 * SCSI layer retry it after re-establishing link.
1215 pring = &psli->ring[psli->fcp_ring];
1216 lpfc_sli_abort_iocb_ring(phba, pring);
1219 * There was a firmware error. Take the hba offline and then
1220 * attempt to restart it.
1222 lpfc_offline_prep(phba);
1223 lpfc_offline(phba);
1225 /* Wait for the ER1 bit to clear.*/
1226 while (phba->work_hs & HS_FFER1) {
1227 msleep(100);
1228 if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) {
1229 phba->work_hs = UNPLUG_ERR ;
1230 break;
1232 /* If driver is unloading let the worker thread continue */
1233 if (phba->pport->load_flag & FC_UNLOADING) {
1234 phba->work_hs = 0;
1235 break;
1240 * This is to ptrotect against a race condition in which
1241 * first write to the host attention register clear the
1242 * host status register.
1244 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
1245 phba->work_hs = old_host_status & ~HS_FFER1;
1247 spin_lock_irq(&phba->hbalock);
1248 phba->hba_flag &= ~DEFER_ERATT;
1249 spin_unlock_irq(&phba->hbalock);
1250 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
1251 phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
1254 static void
1255 lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
1257 struct lpfc_board_event_header board_event;
1258 struct Scsi_Host *shost;
1260 board_event.event_type = FC_REG_BOARD_EVENT;
1261 board_event.subcategory = LPFC_EVENT_PORTINTERR;
1262 shost = lpfc_shost_from_vport(phba->pport);
1263 fc_host_post_vendor_event(shost, fc_get_event_number(),
1264 sizeof(board_event),
1265 (char *) &board_event,
1266 LPFC_NL_VENDOR_ID);
1270 * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler
1271 * @phba: pointer to lpfc hba data structure.
1273 * This routine is invoked to handle the following HBA hardware error
1274 * conditions:
1275 * 1 - HBA error attention interrupt
1276 * 2 - DMA ring index out of range
1277 * 3 - Mailbox command came back as unknown
1279 static void
1280 lpfc_handle_eratt_s3(struct lpfc_hba *phba)
1282 struct lpfc_vport *vport = phba->pport;
1283 struct lpfc_sli *psli = &phba->sli;
1284 struct lpfc_sli_ring *pring;
1285 uint32_t event_data;
1286 unsigned long temperature;
1287 struct temp_event temp_event_data;
1288 struct Scsi_Host *shost;
1290 /* If the pci channel is offline, ignore possible errors,
1291 * since we cannot communicate with the pci card anyway.
1293 if (pci_channel_offline(phba->pcidev)) {
1294 spin_lock_irq(&phba->hbalock);
1295 phba->hba_flag &= ~DEFER_ERATT;
1296 spin_unlock_irq(&phba->hbalock);
1297 return;
1300 /* If resets are disabled then leave the HBA alone and return */
1301 if (!phba->cfg_enable_hba_reset)
1302 return;
1304 /* Send an internal error event to mgmt application */
1305 lpfc_board_errevt_to_mgmt(phba);
1307 if (phba->hba_flag & DEFER_ERATT)
1308 lpfc_handle_deferred_eratt(phba);
1310 if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) {
1311 if (phba->work_hs & HS_FFER6)
1312 /* Re-establishing Link */
1313 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1314 "1301 Re-establishing Link "
1315 "Data: x%x x%x x%x\n",
1316 phba->work_hs, phba->work_status[0],
1317 phba->work_status[1]);
1318 if (phba->work_hs & HS_FFER8)
1319 /* Device Zeroization */
1320 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1321 "2861 Host Authentication device "
1322 "zeroization Data:x%x x%x x%x\n",
1323 phba->work_hs, phba->work_status[0],
1324 phba->work_status[1]);
1326 spin_lock_irq(&phba->hbalock);
1327 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1328 spin_unlock_irq(&phba->hbalock);
1331 * Firmware stops when it triggled erratt with HS_FFER6.
1332 * That could cause the I/Os dropped by the firmware.
1333 * Error iocb (I/O) on txcmplq and let the SCSI layer
1334 * retry it after re-establishing link.
1336 pring = &psli->ring[psli->fcp_ring];
1337 lpfc_sli_abort_iocb_ring(phba, pring);
1340 * There was a firmware error. Take the hba offline and then
1341 * attempt to restart it.
1343 lpfc_offline_prep(phba);
1344 lpfc_offline(phba);
1345 lpfc_sli_brdrestart(phba);
1346 if (lpfc_online(phba) == 0) { /* Initialize the HBA */
1347 lpfc_unblock_mgmt_io(phba);
1348 return;
1350 lpfc_unblock_mgmt_io(phba);
1351 } else if (phba->work_hs & HS_CRIT_TEMP) {
1352 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET);
1353 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1354 temp_event_data.event_code = LPFC_CRIT_TEMP;
1355 temp_event_data.data = (uint32_t)temperature;
1357 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1358 "0406 Adapter maximum temperature exceeded "
1359 "(%ld), taking this port offline "
1360 "Data: x%x x%x x%x\n",
1361 temperature, phba->work_hs,
1362 phba->work_status[0], phba->work_status[1]);
1364 shost = lpfc_shost_from_vport(phba->pport);
1365 fc_host_post_vendor_event(shost, fc_get_event_number(),
1366 sizeof(temp_event_data),
1367 (char *) &temp_event_data,
1368 SCSI_NL_VID_TYPE_PCI
1369 | PCI_VENDOR_ID_EMULEX);
1371 spin_lock_irq(&phba->hbalock);
1372 phba->over_temp_state = HBA_OVER_TEMP;
1373 spin_unlock_irq(&phba->hbalock);
1374 lpfc_offline_eratt(phba);
1376 } else {
1377 /* The if clause above forces this code path when the status
1378 * failure is a value other than FFER6. Do not call the offline
1379 * twice. This is the adapter hardware error path.
1381 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1382 "0457 Adapter Hardware Error "
1383 "Data: x%x x%x x%x\n",
1384 phba->work_hs,
1385 phba->work_status[0], phba->work_status[1]);
1387 event_data = FC_REG_DUMP_EVENT;
1388 shost = lpfc_shost_from_vport(vport);
1389 fc_host_post_vendor_event(shost, fc_get_event_number(),
1390 sizeof(event_data), (char *) &event_data,
1391 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1393 lpfc_offline_eratt(phba);
1395 return;
1399 * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler
1400 * @phba: pointer to lpfc hba data structure.
1402 * This routine is invoked to handle the SLI4 HBA hardware error attention
1403 * conditions.
1405 static void
1406 lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1408 struct lpfc_vport *vport = phba->pport;
1409 uint32_t event_data;
1410 struct Scsi_Host *shost;
1411 uint32_t if_type;
1412 struct lpfc_register portstat_reg;
1414 /* If the pci channel is offline, ignore possible errors, since
1415 * we cannot communicate with the pci card anyway.
1417 if (pci_channel_offline(phba->pcidev))
1418 return;
1419 /* If resets are disabled then leave the HBA alone and return */
1420 if (!phba->cfg_enable_hba_reset)
1421 return;
1423 /* Send an internal error event to mgmt application */
1424 lpfc_board_errevt_to_mgmt(phba);
1426 /* For now, the actual action for SLI4 device handling is not
1427 * specified yet, just treated it as adaptor hardware failure
1429 event_data = FC_REG_DUMP_EVENT;
1430 shost = lpfc_shost_from_vport(vport);
1431 fc_host_post_vendor_event(shost, fc_get_event_number(),
1432 sizeof(event_data), (char *) &event_data,
1433 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1435 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
1436 switch (if_type) {
1437 case LPFC_SLI_INTF_IF_TYPE_0:
1438 lpfc_sli4_offline_eratt(phba);
1439 break;
1440 case LPFC_SLI_INTF_IF_TYPE_2:
1441 portstat_reg.word0 =
1442 readl(phba->sli4_hba.u.if_type2.STATUSregaddr);
1444 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
1445 /* TODO: Register for Overtemp async events. */
1446 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1447 "2889 Port Overtemperature event, "
1448 "taking port\n");
1449 spin_lock_irq(&phba->hbalock);
1450 phba->over_temp_state = HBA_OVER_TEMP;
1451 spin_unlock_irq(&phba->hbalock);
1452 lpfc_sli4_offline_eratt(phba);
1453 return;
1455 if (bf_get(lpfc_sliport_status_rn, &portstat_reg)) {
1457 * TODO: Attempt port recovery via a port reset.
1458 * When fully implemented, the driver should
1459 * attempt to recover the port here and return.
1460 * For now, log an error and take the port offline.
1462 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1463 "2887 Port Error: Attempting "
1464 "Port Recovery\n");
1466 lpfc_sli4_offline_eratt(phba);
1467 break;
1468 case LPFC_SLI_INTF_IF_TYPE_1:
1469 default:
1470 break;
1475 * lpfc_handle_eratt - Wrapper func for handling hba error attention
1476 * @phba: pointer to lpfc HBA data structure.
1478 * This routine wraps the actual SLI3 or SLI4 hba error attention handling
1479 * routine from the API jump table function pointer from the lpfc_hba struct.
1481 * Return codes
1482 * 0 - success.
1483 * Any other value - error.
1485 void
1486 lpfc_handle_eratt(struct lpfc_hba *phba)
1488 (*phba->lpfc_handle_eratt)(phba);
1492 * lpfc_handle_latt - The HBA link event handler
1493 * @phba: pointer to lpfc hba data structure.
1495 * This routine is invoked from the worker thread to handle a HBA host
1496 * attention link event.
1498 void
1499 lpfc_handle_latt(struct lpfc_hba *phba)
1501 struct lpfc_vport *vport = phba->pport;
1502 struct lpfc_sli *psli = &phba->sli;
1503 LPFC_MBOXQ_t *pmb;
1504 volatile uint32_t control;
1505 struct lpfc_dmabuf *mp;
1506 int rc = 0;
1508 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1509 if (!pmb) {
1510 rc = 1;
1511 goto lpfc_handle_latt_err_exit;
1514 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1515 if (!mp) {
1516 rc = 2;
1517 goto lpfc_handle_latt_free_pmb;
1520 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
1521 if (!mp->virt) {
1522 rc = 3;
1523 goto lpfc_handle_latt_free_mp;
1526 /* Cleanup any outstanding ELS commands */
1527 lpfc_els_flush_all_cmd(phba);
1529 psli->slistat.link_event++;
1530 lpfc_read_topology(phba, pmb, mp);
1531 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
1532 pmb->vport = vport;
1533 /* Block ELS IOCBs until we have processed this mbox command */
1534 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
1535 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
1536 if (rc == MBX_NOT_FINISHED) {
1537 rc = 4;
1538 goto lpfc_handle_latt_free_mbuf;
1541 /* Clear Link Attention in HA REG */
1542 spin_lock_irq(&phba->hbalock);
1543 writel(HA_LATT, phba->HAregaddr);
1544 readl(phba->HAregaddr); /* flush */
1545 spin_unlock_irq(&phba->hbalock);
1547 return;
1549 lpfc_handle_latt_free_mbuf:
1550 phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
1551 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1552 lpfc_handle_latt_free_mp:
1553 kfree(mp);
1554 lpfc_handle_latt_free_pmb:
1555 mempool_free(pmb, phba->mbox_mem_pool);
1556 lpfc_handle_latt_err_exit:
1557 /* Enable Link attention interrupts */
1558 spin_lock_irq(&phba->hbalock);
1559 psli->sli_flag |= LPFC_PROCESS_LA;
1560 control = readl(phba->HCregaddr);
1561 control |= HC_LAINT_ENA;
1562 writel(control, phba->HCregaddr);
1563 readl(phba->HCregaddr); /* flush */
1565 /* Clear Link Attention in HA REG */
1566 writel(HA_LATT, phba->HAregaddr);
1567 readl(phba->HAregaddr); /* flush */
1568 spin_unlock_irq(&phba->hbalock);
1569 lpfc_linkdown(phba);
1570 phba->link_state = LPFC_HBA_ERROR;
1572 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1573 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
1575 return;
1579 * lpfc_parse_vpd - Parse VPD (Vital Product Data)
1580 * @phba: pointer to lpfc hba data structure.
1581 * @vpd: pointer to the vital product data.
1582 * @len: length of the vital product data in bytes.
1584 * This routine parses the Vital Product Data (VPD). The VPD is treated as
1585 * an array of characters. In this routine, the ModelName, ProgramType, and
1586 * ModelDesc, etc. fields of the phba data structure will be populated.
1588 * Return codes
1589 * 0 - pointer to the VPD passed in is NULL
1590 * 1 - success
1593 lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
1595 uint8_t lenlo, lenhi;
1596 int Length;
1597 int i, j;
1598 int finished = 0;
1599 int index = 0;
1601 if (!vpd)
1602 return 0;
1604 /* Vital Product */
1605 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1606 "0455 Vital Product Data: x%x x%x x%x x%x\n",
1607 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
1608 (uint32_t) vpd[3]);
1609 while (!finished && (index < (len - 4))) {
1610 switch (vpd[index]) {
1611 case 0x82:
1612 case 0x91:
1613 index += 1;
1614 lenlo = vpd[index];
1615 index += 1;
1616 lenhi = vpd[index];
1617 index += 1;
1618 i = ((((unsigned short)lenhi) << 8) + lenlo);
1619 index += i;
1620 break;
1621 case 0x90:
1622 index += 1;
1623 lenlo = vpd[index];
1624 index += 1;
1625 lenhi = vpd[index];
1626 index += 1;
1627 Length = ((((unsigned short)lenhi) << 8) + lenlo);
1628 if (Length > len - index)
1629 Length = len - index;
1630 while (Length > 0) {
1631 /* Look for Serial Number */
1632 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
1633 index += 2;
1634 i = vpd[index];
1635 index += 1;
1636 j = 0;
1637 Length -= (3+i);
1638 while(i--) {
1639 phba->SerialNumber[j++] = vpd[index++];
1640 if (j == 31)
1641 break;
1643 phba->SerialNumber[j] = 0;
1644 continue;
1646 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
1647 phba->vpd_flag |= VPD_MODEL_DESC;
1648 index += 2;
1649 i = vpd[index];
1650 index += 1;
1651 j = 0;
1652 Length -= (3+i);
1653 while(i--) {
1654 phba->ModelDesc[j++] = vpd[index++];
1655 if (j == 255)
1656 break;
1658 phba->ModelDesc[j] = 0;
1659 continue;
1661 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
1662 phba->vpd_flag |= VPD_MODEL_NAME;
1663 index += 2;
1664 i = vpd[index];
1665 index += 1;
1666 j = 0;
1667 Length -= (3+i);
1668 while(i--) {
1669 phba->ModelName[j++] = vpd[index++];
1670 if (j == 79)
1671 break;
1673 phba->ModelName[j] = 0;
1674 continue;
1676 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
1677 phba->vpd_flag |= VPD_PROGRAM_TYPE;
1678 index += 2;
1679 i = vpd[index];
1680 index += 1;
1681 j = 0;
1682 Length -= (3+i);
1683 while(i--) {
1684 phba->ProgramType[j++] = vpd[index++];
1685 if (j == 255)
1686 break;
1688 phba->ProgramType[j] = 0;
1689 continue;
1691 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
1692 phba->vpd_flag |= VPD_PORT;
1693 index += 2;
1694 i = vpd[index];
1695 index += 1;
1696 j = 0;
1697 Length -= (3+i);
1698 while(i--) {
1699 phba->Port[j++] = vpd[index++];
1700 if (j == 19)
1701 break;
1703 phba->Port[j] = 0;
1704 continue;
1706 else {
1707 index += 2;
1708 i = vpd[index];
1709 index += 1;
1710 index += i;
1711 Length -= (3 + i);
1714 finished = 0;
1715 break;
1716 case 0x78:
1717 finished = 1;
1718 break;
1719 default:
1720 index ++;
1721 break;
1725 return(1);
1729 * lpfc_get_hba_model_desc - Retrieve HBA device model name and description
1730 * @phba: pointer to lpfc hba data structure.
1731 * @mdp: pointer to the data structure to hold the derived model name.
1732 * @descp: pointer to the data structure to hold the derived description.
1734 * This routine retrieves HBA's description based on its registered PCI device
1735 * ID. The @descp passed into this function points to an array of 256 chars. It
1736 * shall be returned with the model name, maximum speed, and the host bus type.
1737 * The @mdp passed into this function points to an array of 80 chars. When the
1738 * function returns, the @mdp will be filled with the model name.
1740 static void
1741 lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1743 lpfc_vpd_t *vp;
1744 uint16_t dev_id = phba->pcidev->device;
1745 int max_speed;
1746 int GE = 0;
1747 int oneConnect = 0; /* default is not a oneConnect */
1748 struct {
1749 char *name;
1750 char *bus;
1751 char *function;
1752 } m = {"<Unknown>", "", ""};
1754 if (mdp && mdp[0] != '\0'
1755 && descp && descp[0] != '\0')
1756 return;
1758 if (phba->lmt & LMT_10Gb)
1759 max_speed = 10;
1760 else if (phba->lmt & LMT_8Gb)
1761 max_speed = 8;
1762 else if (phba->lmt & LMT_4Gb)
1763 max_speed = 4;
1764 else if (phba->lmt & LMT_2Gb)
1765 max_speed = 2;
1766 else
1767 max_speed = 1;
1769 vp = &phba->vpd;
1771 switch (dev_id) {
1772 case PCI_DEVICE_ID_FIREFLY:
1773 m = (typeof(m)){"LP6000", "PCI", "Fibre Channel Adapter"};
1774 break;
1775 case PCI_DEVICE_ID_SUPERFLY:
1776 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
1777 m = (typeof(m)){"LP7000", "PCI",
1778 "Fibre Channel Adapter"};
1779 else
1780 m = (typeof(m)){"LP7000E", "PCI",
1781 "Fibre Channel Adapter"};
1782 break;
1783 case PCI_DEVICE_ID_DRAGONFLY:
1784 m = (typeof(m)){"LP8000", "PCI",
1785 "Fibre Channel Adapter"};
1786 break;
1787 case PCI_DEVICE_ID_CENTAUR:
1788 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
1789 m = (typeof(m)){"LP9002", "PCI",
1790 "Fibre Channel Adapter"};
1791 else
1792 m = (typeof(m)){"LP9000", "PCI",
1793 "Fibre Channel Adapter"};
1794 break;
1795 case PCI_DEVICE_ID_RFLY:
1796 m = (typeof(m)){"LP952", "PCI",
1797 "Fibre Channel Adapter"};
1798 break;
1799 case PCI_DEVICE_ID_PEGASUS:
1800 m = (typeof(m)){"LP9802", "PCI-X",
1801 "Fibre Channel Adapter"};
1802 break;
1803 case PCI_DEVICE_ID_THOR:
1804 m = (typeof(m)){"LP10000", "PCI-X",
1805 "Fibre Channel Adapter"};
1806 break;
1807 case PCI_DEVICE_ID_VIPER:
1808 m = (typeof(m)){"LPX1000", "PCI-X",
1809 "Fibre Channel Adapter"};
1810 break;
1811 case PCI_DEVICE_ID_PFLY:
1812 m = (typeof(m)){"LP982", "PCI-X",
1813 "Fibre Channel Adapter"};
1814 break;
1815 case PCI_DEVICE_ID_TFLY:
1816 m = (typeof(m)){"LP1050", "PCI-X",
1817 "Fibre Channel Adapter"};
1818 break;
1819 case PCI_DEVICE_ID_HELIOS:
1820 m = (typeof(m)){"LP11000", "PCI-X2",
1821 "Fibre Channel Adapter"};
1822 break;
1823 case PCI_DEVICE_ID_HELIOS_SCSP:
1824 m = (typeof(m)){"LP11000-SP", "PCI-X2",
1825 "Fibre Channel Adapter"};
1826 break;
1827 case PCI_DEVICE_ID_HELIOS_DCSP:
1828 m = (typeof(m)){"LP11002-SP", "PCI-X2",
1829 "Fibre Channel Adapter"};
1830 break;
1831 case PCI_DEVICE_ID_NEPTUNE:
1832 m = (typeof(m)){"LPe1000", "PCIe", "Fibre Channel Adapter"};
1833 break;
1834 case PCI_DEVICE_ID_NEPTUNE_SCSP:
1835 m = (typeof(m)){"LPe1000-SP", "PCIe", "Fibre Channel Adapter"};
1836 break;
1837 case PCI_DEVICE_ID_NEPTUNE_DCSP:
1838 m = (typeof(m)){"LPe1002-SP", "PCIe", "Fibre Channel Adapter"};
1839 break;
1840 case PCI_DEVICE_ID_BMID:
1841 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
1842 break;
1843 case PCI_DEVICE_ID_BSMB:
1844 m = (typeof(m)){"LP111", "PCI-X2", "Fibre Channel Adapter"};
1845 break;
1846 case PCI_DEVICE_ID_ZEPHYR:
1847 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
1848 break;
1849 case PCI_DEVICE_ID_ZEPHYR_SCSP:
1850 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
1851 break;
1852 case PCI_DEVICE_ID_ZEPHYR_DCSP:
1853 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"};
1854 GE = 1;
1855 break;
1856 case PCI_DEVICE_ID_ZMID:
1857 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"};
1858 break;
1859 case PCI_DEVICE_ID_ZSMB:
1860 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"};
1861 break;
1862 case PCI_DEVICE_ID_LP101:
1863 m = (typeof(m)){"LP101", "PCI-X", "Fibre Channel Adapter"};
1864 break;
1865 case PCI_DEVICE_ID_LP10000S:
1866 m = (typeof(m)){"LP10000-S", "PCI", "Fibre Channel Adapter"};
1867 break;
1868 case PCI_DEVICE_ID_LP11000S:
1869 m = (typeof(m)){"LP11000-S", "PCI-X2", "Fibre Channel Adapter"};
1870 break;
1871 case PCI_DEVICE_ID_LPE11000S:
1872 m = (typeof(m)){"LPe11000-S", "PCIe", "Fibre Channel Adapter"};
1873 break;
1874 case PCI_DEVICE_ID_SAT:
1875 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"};
1876 break;
1877 case PCI_DEVICE_ID_SAT_MID:
1878 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"};
1879 break;
1880 case PCI_DEVICE_ID_SAT_SMB:
1881 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"};
1882 break;
1883 case PCI_DEVICE_ID_SAT_DCSP:
1884 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"};
1885 break;
1886 case PCI_DEVICE_ID_SAT_SCSP:
1887 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"};
1888 break;
1889 case PCI_DEVICE_ID_SAT_S:
1890 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
1891 break;
1892 case PCI_DEVICE_ID_HORNET:
1893 m = (typeof(m)){"LP21000", "PCIe", "FCoE Adapter"};
1894 GE = 1;
1895 break;
1896 case PCI_DEVICE_ID_PROTEUS_VF:
1897 m = (typeof(m)){"LPev12000", "PCIe IOV",
1898 "Fibre Channel Adapter"};
1899 break;
1900 case PCI_DEVICE_ID_PROTEUS_PF:
1901 m = (typeof(m)){"LPev12000", "PCIe IOV",
1902 "Fibre Channel Adapter"};
1903 break;
1904 case PCI_DEVICE_ID_PROTEUS_S:
1905 m = (typeof(m)){"LPemv12002-S", "PCIe IOV",
1906 "Fibre Channel Adapter"};
1907 break;
1908 case PCI_DEVICE_ID_TIGERSHARK:
1909 oneConnect = 1;
1910 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"};
1911 break;
1912 case PCI_DEVICE_ID_TOMCAT:
1913 oneConnect = 1;
1914 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"};
1915 break;
1916 case PCI_DEVICE_ID_FALCON:
1917 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe",
1918 "EmulexSecure Fibre"};
1919 break;
1920 case PCI_DEVICE_ID_BALIUS:
1921 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O",
1922 "Fibre Channel Adapter"};
1923 break;
1924 case PCI_DEVICE_ID_LANCER_FC:
1925 oneConnect = 1;
1926 m = (typeof(m)){"Undefined", "PCIe", "Fibre Channel Adapter"};
1927 break;
1928 case PCI_DEVICE_ID_LANCER_FCOE:
1929 oneConnect = 1;
1930 m = (typeof(m)){"Undefined", "PCIe", "FCoE"};
1931 break;
1932 default:
1933 m = (typeof(m)){"Unknown", "", ""};
1934 break;
1937 if (mdp && mdp[0] == '\0')
1938 snprintf(mdp, 79,"%s", m.name);
1939 /* oneConnect hba requires special processing, they are all initiators
1940 * and we put the port number on the end
1942 if (descp && descp[0] == '\0') {
1943 if (oneConnect)
1944 snprintf(descp, 255,
1945 "Emulex OneConnect %s, %s Initiator, Port %s",
1946 m.name, m.function,
1947 phba->Port);
1948 else
1949 snprintf(descp, 255,
1950 "Emulex %s %d%s %s %s",
1951 m.name, max_speed, (GE) ? "GE" : "Gb",
1952 m.bus, m.function);
1957 * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring
1958 * @phba: pointer to lpfc hba data structure.
1959 * @pring: pointer to a IOCB ring.
1960 * @cnt: the number of IOCBs to be posted to the IOCB ring.
1962 * This routine posts a given number of IOCBs with the associated DMA buffer
1963 * descriptors specified by the cnt argument to the given IOCB ring.
1965 * Return codes
1966 * The number of IOCBs NOT able to be posted to the IOCB ring.
1969 lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
1971 IOCB_t *icmd;
1972 struct lpfc_iocbq *iocb;
1973 struct lpfc_dmabuf *mp1, *mp2;
1975 cnt += pring->missbufcnt;
1977 /* While there are buffers to post */
1978 while (cnt > 0) {
1979 /* Allocate buffer for command iocb */
1980 iocb = lpfc_sli_get_iocbq(phba);
1981 if (iocb == NULL) {
1982 pring->missbufcnt = cnt;
1983 return cnt;
1985 icmd = &iocb->iocb;
1987 /* 2 buffers can be posted per command */
1988 /* Allocate buffer to post */
1989 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
1990 if (mp1)
1991 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys);
1992 if (!mp1 || !mp1->virt) {
1993 kfree(mp1);
1994 lpfc_sli_release_iocbq(phba, iocb);
1995 pring->missbufcnt = cnt;
1996 return cnt;
1999 INIT_LIST_HEAD(&mp1->list);
2000 /* Allocate buffer to post */
2001 if (cnt > 1) {
2002 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2003 if (mp2)
2004 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
2005 &mp2->phys);
2006 if (!mp2 || !mp2->virt) {
2007 kfree(mp2);
2008 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2009 kfree(mp1);
2010 lpfc_sli_release_iocbq(phba, iocb);
2011 pring->missbufcnt = cnt;
2012 return cnt;
2015 INIT_LIST_HEAD(&mp2->list);
2016 } else {
2017 mp2 = NULL;
2020 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
2021 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
2022 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
2023 icmd->ulpBdeCount = 1;
2024 cnt--;
2025 if (mp2) {
2026 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
2027 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
2028 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
2029 cnt--;
2030 icmd->ulpBdeCount = 2;
2033 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
2034 icmd->ulpLe = 1;
2036 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
2037 IOCB_ERROR) {
2038 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2039 kfree(mp1);
2040 cnt++;
2041 if (mp2) {
2042 lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
2043 kfree(mp2);
2044 cnt++;
2046 lpfc_sli_release_iocbq(phba, iocb);
2047 pring->missbufcnt = cnt;
2048 return cnt;
2050 lpfc_sli_ringpostbuf_put(phba, pring, mp1);
2051 if (mp2)
2052 lpfc_sli_ringpostbuf_put(phba, pring, mp2);
2054 pring->missbufcnt = 0;
2055 return 0;
2059 * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring
2060 * @phba: pointer to lpfc hba data structure.
2062 * This routine posts initial receive IOCB buffers to the ELS ring. The
2063 * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is
2064 * set to 64 IOCBs.
2066 * Return codes
2067 * 0 - success (currently always success)
2069 static int
2070 lpfc_post_rcv_buf(struct lpfc_hba *phba)
2072 struct lpfc_sli *psli = &phba->sli;
2074 /* Ring 0, ELS / CT buffers */
2075 lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0);
2076 /* Ring 2 - FCP no buffers needed */
2078 return 0;
2081 #define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
2084 * lpfc_sha_init - Set up initial array of hash table entries
2085 * @HashResultPointer: pointer to an array as hash table.
2087 * This routine sets up the initial values to the array of hash table entries
2088 * for the LC HBAs.
2090 static void
2091 lpfc_sha_init(uint32_t * HashResultPointer)
2093 HashResultPointer[0] = 0x67452301;
2094 HashResultPointer[1] = 0xEFCDAB89;
2095 HashResultPointer[2] = 0x98BADCFE;
2096 HashResultPointer[3] = 0x10325476;
2097 HashResultPointer[4] = 0xC3D2E1F0;
2101 * lpfc_sha_iterate - Iterate initial hash table with the working hash table
2102 * @HashResultPointer: pointer to an initial/result hash table.
2103 * @HashWorkingPointer: pointer to an working hash table.
2105 * This routine iterates an initial hash table pointed by @HashResultPointer
2106 * with the values from the working hash table pointeed by @HashWorkingPointer.
2107 * The results are putting back to the initial hash table, returned through
2108 * the @HashResultPointer as the result hash table.
2110 static void
2111 lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
2113 int t;
2114 uint32_t TEMP;
2115 uint32_t A, B, C, D, E;
2116 t = 16;
2117 do {
2118 HashWorkingPointer[t] =
2119 S(1,
2120 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
2121 8] ^
2122 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
2123 } while (++t <= 79);
2124 t = 0;
2125 A = HashResultPointer[0];
2126 B = HashResultPointer[1];
2127 C = HashResultPointer[2];
2128 D = HashResultPointer[3];
2129 E = HashResultPointer[4];
2131 do {
2132 if (t < 20) {
2133 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
2134 } else if (t < 40) {
2135 TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
2136 } else if (t < 60) {
2137 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
2138 } else {
2139 TEMP = (B ^ C ^ D) + 0xCA62C1D6;
2141 TEMP += S(5, A) + E + HashWorkingPointer[t];
2142 E = D;
2143 D = C;
2144 C = S(30, B);
2145 B = A;
2146 A = TEMP;
2147 } while (++t <= 79);
2149 HashResultPointer[0] += A;
2150 HashResultPointer[1] += B;
2151 HashResultPointer[2] += C;
2152 HashResultPointer[3] += D;
2153 HashResultPointer[4] += E;
2158 * lpfc_challenge_key - Create challenge key based on WWPN of the HBA
2159 * @RandomChallenge: pointer to the entry of host challenge random number array.
2160 * @HashWorking: pointer to the entry of the working hash array.
2162 * This routine calculates the working hash array referred by @HashWorking
2163 * from the challenge random numbers associated with the host, referred by
2164 * @RandomChallenge. The result is put into the entry of the working hash
2165 * array and returned by reference through @HashWorking.
2167 static void
2168 lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
2170 *HashWorking = (*RandomChallenge ^ *HashWorking);
2174 * lpfc_hba_init - Perform special handling for LC HBA initialization
2175 * @phba: pointer to lpfc hba data structure.
2176 * @hbainit: pointer to an array of unsigned 32-bit integers.
2178 * This routine performs the special handling for LC HBA initialization.
2180 void
2181 lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
2183 int t;
2184 uint32_t *HashWorking;
2185 uint32_t *pwwnn = (uint32_t *) phba->wwnn;
2187 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL);
2188 if (!HashWorking)
2189 return;
2191 HashWorking[0] = HashWorking[78] = *pwwnn++;
2192 HashWorking[1] = HashWorking[79] = *pwwnn;
2194 for (t = 0; t < 7; t++)
2195 lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
2197 lpfc_sha_init(hbainit);
2198 lpfc_sha_iterate(hbainit, HashWorking);
2199 kfree(HashWorking);
2203 * lpfc_cleanup - Performs vport cleanups before deleting a vport
2204 * @vport: pointer to a virtual N_Port data structure.
2206 * This routine performs the necessary cleanups before deleting the @vport.
2207 * It invokes the discovery state machine to perform necessary state
2208 * transitions and to release the ndlps associated with the @vport. Note,
2209 * the physical port is treated as @vport 0.
2211 void
2212 lpfc_cleanup(struct lpfc_vport *vport)
2214 struct lpfc_hba *phba = vport->phba;
2215 struct lpfc_nodelist *ndlp, *next_ndlp;
2216 int i = 0;
2218 if (phba->link_state > LPFC_LINK_DOWN)
2219 lpfc_port_link_failure(vport);
2221 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
2222 if (!NLP_CHK_NODE_ACT(ndlp)) {
2223 ndlp = lpfc_enable_node(vport, ndlp,
2224 NLP_STE_UNUSED_NODE);
2225 if (!ndlp)
2226 continue;
2227 spin_lock_irq(&phba->ndlp_lock);
2228 NLP_SET_FREE_REQ(ndlp);
2229 spin_unlock_irq(&phba->ndlp_lock);
2230 /* Trigger the release of the ndlp memory */
2231 lpfc_nlp_put(ndlp);
2232 continue;
2234 spin_lock_irq(&phba->ndlp_lock);
2235 if (NLP_CHK_FREE_REQ(ndlp)) {
2236 /* The ndlp should not be in memory free mode already */
2237 spin_unlock_irq(&phba->ndlp_lock);
2238 continue;
2239 } else
2240 /* Indicate request for freeing ndlp memory */
2241 NLP_SET_FREE_REQ(ndlp);
2242 spin_unlock_irq(&phba->ndlp_lock);
2244 if (vport->port_type != LPFC_PHYSICAL_PORT &&
2245 ndlp->nlp_DID == Fabric_DID) {
2246 /* Just free up ndlp with Fabric_DID for vports */
2247 lpfc_nlp_put(ndlp);
2248 continue;
2251 if (ndlp->nlp_type & NLP_FABRIC)
2252 lpfc_disc_state_machine(vport, ndlp, NULL,
2253 NLP_EVT_DEVICE_RECOVERY);
2255 lpfc_disc_state_machine(vport, ndlp, NULL,
2256 NLP_EVT_DEVICE_RM);
2260 /* At this point, ALL ndlp's should be gone
2261 * because of the previous NLP_EVT_DEVICE_RM.
2262 * Lets wait for this to happen, if needed.
2264 while (!list_empty(&vport->fc_nodes)) {
2265 if (i++ > 3000) {
2266 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2267 "0233 Nodelist not empty\n");
2268 list_for_each_entry_safe(ndlp, next_ndlp,
2269 &vport->fc_nodes, nlp_listp) {
2270 lpfc_printf_vlog(ndlp->vport, KERN_ERR,
2271 LOG_NODE,
2272 "0282 did:x%x ndlp:x%p "
2273 "usgmap:x%x refcnt:%d\n",
2274 ndlp->nlp_DID, (void *)ndlp,
2275 ndlp->nlp_usg_map,
2276 atomic_read(
2277 &ndlp->kref.refcount));
2279 break;
2282 /* Wait for any activity on ndlps to settle */
2283 msleep(10);
2285 lpfc_cleanup_vports_rrqs(vport, NULL);
2289 * lpfc_stop_vport_timers - Stop all the timers associated with a vport
2290 * @vport: pointer to a virtual N_Port data structure.
2292 * This routine stops all the timers associated with a @vport. This function
2293 * is invoked before disabling or deleting a @vport. Note that the physical
2294 * port is treated as @vport 0.
2296 void
2297 lpfc_stop_vport_timers(struct lpfc_vport *vport)
2299 del_timer_sync(&vport->els_tmofunc);
2300 del_timer_sync(&vport->fc_fdmitmo);
2301 del_timer_sync(&vport->delayed_disc_tmo);
2302 lpfc_can_disctmo(vport);
2303 return;
2307 * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2308 * @phba: pointer to lpfc hba data structure.
2310 * This routine stops the SLI4 FCF rediscover wait timer if it's on. The
2311 * caller of this routine should already hold the host lock.
2313 void
2314 __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2316 /* Clear pending FCF rediscovery wait flag */
2317 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
2319 /* Now, try to stop the timer */
2320 del_timer(&phba->fcf.redisc_wait);
2324 * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
2325 * @phba: pointer to lpfc hba data structure.
2327 * This routine stops the SLI4 FCF rediscover wait timer if it's on. It
2328 * checks whether the FCF rediscovery wait timer is pending with the host
2329 * lock held before proceeding with disabling the timer and clearing the
2330 * wait timer pendig flag.
2332 void
2333 lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2335 spin_lock_irq(&phba->hbalock);
2336 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
2337 /* FCF rediscovery timer already fired or stopped */
2338 spin_unlock_irq(&phba->hbalock);
2339 return;
2341 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
2342 /* Clear failover in progress flags */
2343 phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC);
2344 spin_unlock_irq(&phba->hbalock);
2348 * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
2349 * @phba: pointer to lpfc hba data structure.
2351 * This routine stops all the timers associated with a HBA. This function is
2352 * invoked before either putting a HBA offline or unloading the driver.
2354 void
2355 lpfc_stop_hba_timers(struct lpfc_hba *phba)
2357 lpfc_stop_vport_timers(phba->pport);
2358 del_timer_sync(&phba->sli.mbox_tmo);
2359 del_timer_sync(&phba->fabric_block_timer);
2360 del_timer_sync(&phba->eratt_poll);
2361 del_timer_sync(&phba->hb_tmofunc);
2362 if (phba->sli_rev == LPFC_SLI_REV4) {
2363 del_timer_sync(&phba->rrq_tmr);
2364 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
2366 phba->hb_outstanding = 0;
2368 switch (phba->pci_dev_grp) {
2369 case LPFC_PCI_DEV_LP:
2370 /* Stop any LightPulse device specific driver timers */
2371 del_timer_sync(&phba->fcp_poll_timer);
2372 break;
2373 case LPFC_PCI_DEV_OC:
2374 /* Stop any OneConnect device sepcific driver timers */
2375 lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
2376 break;
2377 default:
2378 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2379 "0297 Invalid device group (x%x)\n",
2380 phba->pci_dev_grp);
2381 break;
2383 return;
2387 * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked
2388 * @phba: pointer to lpfc hba data structure.
2390 * This routine marks a HBA's management interface as blocked. Once the HBA's
2391 * management interface is marked as blocked, all the user space access to
2392 * the HBA, whether they are from sysfs interface or libdfc interface will
2393 * all be blocked. The HBA is set to block the management interface when the
2394 * driver prepares the HBA interface for online or offline.
2396 static void
2397 lpfc_block_mgmt_io(struct lpfc_hba * phba)
2399 unsigned long iflag;
2400 uint8_t actcmd = MBX_HEARTBEAT;
2401 unsigned long timeout;
2404 spin_lock_irqsave(&phba->hbalock, iflag);
2405 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
2406 if (phba->sli.mbox_active)
2407 actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
2408 spin_unlock_irqrestore(&phba->hbalock, iflag);
2409 /* Determine how long we might wait for the active mailbox
2410 * command to be gracefully completed by firmware.
2412 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, actcmd) * 1000) +
2413 jiffies;
2414 /* Wait for the outstnading mailbox command to complete */
2415 while (phba->sli.mbox_active) {
2416 /* Check active mailbox complete status every 2ms */
2417 msleep(2);
2418 if (time_after(jiffies, timeout)) {
2419 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2420 "2813 Mgmt IO is Blocked %x "
2421 "- mbox cmd %x still active\n",
2422 phba->sli.sli_flag, actcmd);
2423 break;
2429 * lpfc_online - Initialize and bring a HBA online
2430 * @phba: pointer to lpfc hba data structure.
2432 * This routine initializes the HBA and brings a HBA online. During this
2433 * process, the management interface is blocked to prevent user space access
2434 * to the HBA interfering with the driver initialization.
2436 * Return codes
2437 * 0 - successful
2438 * 1 - failed
2441 lpfc_online(struct lpfc_hba *phba)
2443 struct lpfc_vport *vport;
2444 struct lpfc_vport **vports;
2445 int i;
2447 if (!phba)
2448 return 0;
2449 vport = phba->pport;
2451 if (!(vport->fc_flag & FC_OFFLINE_MODE))
2452 return 0;
2454 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2455 "0458 Bring Adapter online\n");
2457 lpfc_block_mgmt_io(phba);
2459 if (!lpfc_sli_queue_setup(phba)) {
2460 lpfc_unblock_mgmt_io(phba);
2461 return 1;
2464 if (phba->sli_rev == LPFC_SLI_REV4) {
2465 if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */
2466 lpfc_unblock_mgmt_io(phba);
2467 return 1;
2469 } else {
2470 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */
2471 lpfc_unblock_mgmt_io(phba);
2472 return 1;
2476 vports = lpfc_create_vport_work_array(phba);
2477 if (vports != NULL)
2478 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2479 struct Scsi_Host *shost;
2480 shost = lpfc_shost_from_vport(vports[i]);
2481 spin_lock_irq(shost->host_lock);
2482 vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
2483 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
2484 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2485 if (phba->sli_rev == LPFC_SLI_REV4)
2486 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
2487 spin_unlock_irq(shost->host_lock);
2489 lpfc_destroy_vport_work_array(phba, vports);
2491 lpfc_unblock_mgmt_io(phba);
2492 return 0;
2496 * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked
2497 * @phba: pointer to lpfc hba data structure.
2499 * This routine marks a HBA's management interface as not blocked. Once the
2500 * HBA's management interface is marked as not blocked, all the user space
2501 * access to the HBA, whether they are from sysfs interface or libdfc
2502 * interface will be allowed. The HBA is set to block the management interface
2503 * when the driver prepares the HBA interface for online or offline and then
2504 * set to unblock the management interface afterwards.
2506 void
2507 lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
2509 unsigned long iflag;
2511 spin_lock_irqsave(&phba->hbalock, iflag);
2512 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
2513 spin_unlock_irqrestore(&phba->hbalock, iflag);
2517 * lpfc_offline_prep - Prepare a HBA to be brought offline
2518 * @phba: pointer to lpfc hba data structure.
2520 * This routine is invoked to prepare a HBA to be brought offline. It performs
2521 * unregistration login to all the nodes on all vports and flushes the mailbox
2522 * queue to make it ready to be brought offline.
2524 void
2525 lpfc_offline_prep(struct lpfc_hba * phba)
2527 struct lpfc_vport *vport = phba->pport;
2528 struct lpfc_nodelist *ndlp, *next_ndlp;
2529 struct lpfc_vport **vports;
2530 struct Scsi_Host *shost;
2531 int i;
2533 if (vport->fc_flag & FC_OFFLINE_MODE)
2534 return;
2536 lpfc_block_mgmt_io(phba);
2538 lpfc_linkdown(phba);
2540 /* Issue an unreg_login to all nodes on all vports */
2541 vports = lpfc_create_vport_work_array(phba);
2542 if (vports != NULL) {
2543 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2544 if (vports[i]->load_flag & FC_UNLOADING)
2545 continue;
2546 shost = lpfc_shost_from_vport(vports[i]);
2547 spin_lock_irq(shost->host_lock);
2548 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
2549 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2550 vports[i]->fc_flag &= ~FC_VFI_REGISTERED;
2551 spin_unlock_irq(shost->host_lock);
2553 shost = lpfc_shost_from_vport(vports[i]);
2554 list_for_each_entry_safe(ndlp, next_ndlp,
2555 &vports[i]->fc_nodes,
2556 nlp_listp) {
2557 if (!NLP_CHK_NODE_ACT(ndlp))
2558 continue;
2559 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
2560 continue;
2561 if (ndlp->nlp_type & NLP_FABRIC) {
2562 lpfc_disc_state_machine(vports[i], ndlp,
2563 NULL, NLP_EVT_DEVICE_RECOVERY);
2564 lpfc_disc_state_machine(vports[i], ndlp,
2565 NULL, NLP_EVT_DEVICE_RM);
2567 spin_lock_irq(shost->host_lock);
2568 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2569 spin_unlock_irq(shost->host_lock);
2570 lpfc_unreg_rpi(vports[i], ndlp);
2574 lpfc_destroy_vport_work_array(phba, vports);
2576 lpfc_sli_mbox_sys_shutdown(phba);
2580 * lpfc_offline - Bring a HBA offline
2581 * @phba: pointer to lpfc hba data structure.
2583 * This routine actually brings a HBA offline. It stops all the timers
2584 * associated with the HBA, brings down the SLI layer, and eventually
2585 * marks the HBA as in offline state for the upper layer protocol.
2587 void
2588 lpfc_offline(struct lpfc_hba *phba)
2590 struct Scsi_Host *shost;
2591 struct lpfc_vport **vports;
2592 int i;
2594 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
2595 return;
2597 /* stop port and all timers associated with this hba */
2598 lpfc_stop_port(phba);
2599 vports = lpfc_create_vport_work_array(phba);
2600 if (vports != NULL)
2601 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
2602 lpfc_stop_vport_timers(vports[i]);
2603 lpfc_destroy_vport_work_array(phba, vports);
2604 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2605 "0460 Bring Adapter offline\n");
2606 /* Bring down the SLI Layer and cleanup. The HBA is offline
2607 now. */
2608 lpfc_sli_hba_down(phba);
2609 spin_lock_irq(&phba->hbalock);
2610 phba->work_ha = 0;
2611 spin_unlock_irq(&phba->hbalock);
2612 vports = lpfc_create_vport_work_array(phba);
2613 if (vports != NULL)
2614 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2615 shost = lpfc_shost_from_vport(vports[i]);
2616 spin_lock_irq(shost->host_lock);
2617 vports[i]->work_port_events = 0;
2618 vports[i]->fc_flag |= FC_OFFLINE_MODE;
2619 spin_unlock_irq(shost->host_lock);
2621 lpfc_destroy_vport_work_array(phba, vports);
2625 * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists
2626 * @phba: pointer to lpfc hba data structure.
2628 * This routine is to free all the SCSI buffers and IOCBs from the driver
2629 * list back to kernel. It is called from lpfc_pci_remove_one to free
2630 * the internal resources before the device is removed from the system.
2632 * Return codes
2633 * 0 - successful (for now, it always returns 0)
2635 static int
2636 lpfc_scsi_free(struct lpfc_hba *phba)
2638 struct lpfc_scsi_buf *sb, *sb_next;
2639 struct lpfc_iocbq *io, *io_next;
2641 spin_lock_irq(&phba->hbalock);
2642 /* Release all the lpfc_scsi_bufs maintained by this host. */
2643 spin_lock(&phba->scsi_buf_list_lock);
2644 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) {
2645 list_del(&sb->list);
2646 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
2647 sb->dma_handle);
2648 kfree(sb);
2649 phba->total_scsi_bufs--;
2651 spin_unlock(&phba->scsi_buf_list_lock);
2653 /* Release all the lpfc_iocbq entries maintained by this host. */
2654 list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) {
2655 list_del(&io->list);
2656 kfree(io);
2657 phba->total_iocbq_bufs--;
2659 spin_unlock_irq(&phba->hbalock);
2660 return 0;
2664 * lpfc_create_port - Create an FC port
2665 * @phba: pointer to lpfc hba data structure.
2666 * @instance: a unique integer ID to this FC port.
2667 * @dev: pointer to the device data structure.
2669 * This routine creates a FC port for the upper layer protocol. The FC port
2670 * can be created on top of either a physical port or a virtual port provided
2671 * by the HBA. This routine also allocates a SCSI host data structure (shost)
2672 * and associates the FC port created before adding the shost into the SCSI
2673 * layer.
2675 * Return codes
2676 * @vport - pointer to the virtual N_Port data structure.
2677 * NULL - port create failed.
2679 struct lpfc_vport *
2680 lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
2682 struct lpfc_vport *vport;
2683 struct Scsi_Host *shost;
2684 int error = 0;
2686 if (dev != &phba->pcidev->dev)
2687 shost = scsi_host_alloc(&lpfc_vport_template,
2688 sizeof(struct lpfc_vport));
2689 else
2690 shost = scsi_host_alloc(&lpfc_template,
2691 sizeof(struct lpfc_vport));
2692 if (!shost)
2693 goto out;
2695 vport = (struct lpfc_vport *) shost->hostdata;
2696 vport->phba = phba;
2697 vport->load_flag |= FC_LOADING;
2698 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2699 vport->fc_rscn_flush = 0;
2701 lpfc_get_vport_cfgparam(vport);
2702 shost->unique_id = instance;
2703 shost->max_id = LPFC_MAX_TARGET;
2704 shost->max_lun = vport->cfg_max_luns;
2705 shost->this_id = -1;
2706 shost->max_cmd_len = 16;
2707 if (phba->sli_rev == LPFC_SLI_REV4) {
2708 shost->dma_boundary =
2709 phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
2710 shost->sg_tablesize = phba->cfg_sg_seg_cnt;
2714 * Set initial can_queue value since 0 is no longer supported and
2715 * scsi_add_host will fail. This will be adjusted later based on the
2716 * max xri value determined in hba setup.
2718 shost->can_queue = phba->cfg_hba_queue_depth - 10;
2719 if (dev != &phba->pcidev->dev) {
2720 shost->transportt = lpfc_vport_transport_template;
2721 vport->port_type = LPFC_NPIV_PORT;
2722 } else {
2723 shost->transportt = lpfc_transport_template;
2724 vport->port_type = LPFC_PHYSICAL_PORT;
2727 /* Initialize all internally managed lists. */
2728 INIT_LIST_HEAD(&vport->fc_nodes);
2729 INIT_LIST_HEAD(&vport->rcv_buffer_list);
2730 spin_lock_init(&vport->work_port_lock);
2732 init_timer(&vport->fc_disctmo);
2733 vport->fc_disctmo.function = lpfc_disc_timeout;
2734 vport->fc_disctmo.data = (unsigned long)vport;
2736 init_timer(&vport->fc_fdmitmo);
2737 vport->fc_fdmitmo.function = lpfc_fdmi_tmo;
2738 vport->fc_fdmitmo.data = (unsigned long)vport;
2740 init_timer(&vport->els_tmofunc);
2741 vport->els_tmofunc.function = lpfc_els_timeout;
2742 vport->els_tmofunc.data = (unsigned long)vport;
2744 init_timer(&vport->delayed_disc_tmo);
2745 vport->delayed_disc_tmo.function = lpfc_delayed_disc_tmo;
2746 vport->delayed_disc_tmo.data = (unsigned long)vport;
2748 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
2749 if (error)
2750 goto out_put_shost;
2752 spin_lock_irq(&phba->hbalock);
2753 list_add_tail(&vport->listentry, &phba->port_list);
2754 spin_unlock_irq(&phba->hbalock);
2755 return vport;
2757 out_put_shost:
2758 scsi_host_put(shost);
2759 out:
2760 return NULL;
2764 * destroy_port - destroy an FC port
2765 * @vport: pointer to an lpfc virtual N_Port data structure.
2767 * This routine destroys a FC port from the upper layer protocol. All the
2768 * resources associated with the port are released.
2770 void
2771 destroy_port(struct lpfc_vport *vport)
2773 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2774 struct lpfc_hba *phba = vport->phba;
2776 lpfc_debugfs_terminate(vport);
2777 fc_remove_host(shost);
2778 scsi_remove_host(shost);
2780 spin_lock_irq(&phba->hbalock);
2781 list_del_init(&vport->listentry);
2782 spin_unlock_irq(&phba->hbalock);
2784 lpfc_cleanup(vport);
2785 return;
2789 * lpfc_get_instance - Get a unique integer ID
2791 * This routine allocates a unique integer ID from lpfc_hba_index pool. It
2792 * uses the kernel idr facility to perform the task.
2794 * Return codes:
2795 * instance - a unique integer ID allocated as the new instance.
2796 * -1 - lpfc get instance failed.
2799 lpfc_get_instance(void)
2801 int instance = 0;
2803 /* Assign an unused number */
2804 if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL))
2805 return -1;
2806 if (idr_get_new(&lpfc_hba_index, NULL, &instance))
2807 return -1;
2808 return instance;
2812 * lpfc_scan_finished - method for SCSI layer to detect whether scan is done
2813 * @shost: pointer to SCSI host data structure.
2814 * @time: elapsed time of the scan in jiffies.
2816 * This routine is called by the SCSI layer with a SCSI host to determine
2817 * whether the scan host is finished.
2819 * Note: there is no scan_start function as adapter initialization will have
2820 * asynchronously kicked off the link initialization.
2822 * Return codes
2823 * 0 - SCSI host scan is not over yet.
2824 * 1 - SCSI host scan is over.
2826 int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
2828 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2829 struct lpfc_hba *phba = vport->phba;
2830 int stat = 0;
2832 spin_lock_irq(shost->host_lock);
2834 if (vport->load_flag & FC_UNLOADING) {
2835 stat = 1;
2836 goto finished;
2838 if (time >= 30 * HZ) {
2839 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2840 "0461 Scanning longer than 30 "
2841 "seconds. Continuing initialization\n");
2842 stat = 1;
2843 goto finished;
2845 if (time >= 15 * HZ && phba->link_state <= LPFC_LINK_DOWN) {
2846 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2847 "0465 Link down longer than 15 "
2848 "seconds. Continuing initialization\n");
2849 stat = 1;
2850 goto finished;
2853 if (vport->port_state != LPFC_VPORT_READY)
2854 goto finished;
2855 if (vport->num_disc_nodes || vport->fc_prli_sent)
2856 goto finished;
2857 if (vport->fc_map_cnt == 0 && time < 2 * HZ)
2858 goto finished;
2859 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
2860 goto finished;
2862 stat = 1;
2864 finished:
2865 spin_unlock_irq(shost->host_lock);
2866 return stat;
2870 * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port
2871 * @shost: pointer to SCSI host data structure.
2873 * This routine initializes a given SCSI host attributes on a FC port. The
2874 * SCSI host can be either on top of a physical port or a virtual port.
2876 void lpfc_host_attrib_init(struct Scsi_Host *shost)
2878 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2879 struct lpfc_hba *phba = vport->phba;
2881 * Set fixed host attributes. Must done after lpfc_sli_hba_setup().
2884 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
2885 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
2886 fc_host_supported_classes(shost) = FC_COS_CLASS3;
2888 memset(fc_host_supported_fc4s(shost), 0,
2889 sizeof(fc_host_supported_fc4s(shost)));
2890 fc_host_supported_fc4s(shost)[2] = 1;
2891 fc_host_supported_fc4s(shost)[7] = 1;
2893 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
2894 sizeof fc_host_symbolic_name(shost));
2896 fc_host_supported_speeds(shost) = 0;
2897 if (phba->lmt & LMT_10Gb)
2898 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
2899 if (phba->lmt & LMT_8Gb)
2900 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
2901 if (phba->lmt & LMT_4Gb)
2902 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
2903 if (phba->lmt & LMT_2Gb)
2904 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
2905 if (phba->lmt & LMT_1Gb)
2906 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
2908 fc_host_maxframe_size(shost) =
2909 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
2910 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
2912 fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo;
2914 /* This value is also unchanging */
2915 memset(fc_host_active_fc4s(shost), 0,
2916 sizeof(fc_host_active_fc4s(shost)));
2917 fc_host_active_fc4s(shost)[2] = 1;
2918 fc_host_active_fc4s(shost)[7] = 1;
2920 fc_host_max_npiv_vports(shost) = phba->max_vpi;
2921 spin_lock_irq(shost->host_lock);
2922 vport->load_flag &= ~FC_LOADING;
2923 spin_unlock_irq(shost->host_lock);
2927 * lpfc_stop_port_s3 - Stop SLI3 device port
2928 * @phba: pointer to lpfc hba data structure.
2930 * This routine is invoked to stop an SLI3 device port, it stops the device
2931 * from generating interrupts and stops the device driver's timers for the
2932 * device.
2934 static void
2935 lpfc_stop_port_s3(struct lpfc_hba *phba)
2937 /* Clear all interrupt enable conditions */
2938 writel(0, phba->HCregaddr);
2939 readl(phba->HCregaddr); /* flush */
2940 /* Clear all pending interrupts */
2941 writel(0xffffffff, phba->HAregaddr);
2942 readl(phba->HAregaddr); /* flush */
2944 /* Reset some HBA SLI setup states */
2945 lpfc_stop_hba_timers(phba);
2946 phba->pport->work_port_events = 0;
2950 * lpfc_stop_port_s4 - Stop SLI4 device port
2951 * @phba: pointer to lpfc hba data structure.
2953 * This routine is invoked to stop an SLI4 device port, it stops the device
2954 * from generating interrupts and stops the device driver's timers for the
2955 * device.
2957 static void
2958 lpfc_stop_port_s4(struct lpfc_hba *phba)
2960 /* Reset some HBA SLI4 setup states */
2961 lpfc_stop_hba_timers(phba);
2962 phba->pport->work_port_events = 0;
2963 phba->sli4_hba.intr_enable = 0;
2967 * lpfc_stop_port - Wrapper function for stopping hba port
2968 * @phba: Pointer to HBA context object.
2970 * This routine wraps the actual SLI3 or SLI4 hba stop port routine from
2971 * the API jump table function pointer from the lpfc_hba struct.
2973 void
2974 lpfc_stop_port(struct lpfc_hba *phba)
2976 phba->lpfc_stop_port(phba);
2980 * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer
2981 * @phba: Pointer to hba for which this call is being executed.
2983 * This routine starts the timer waiting for the FCF rediscovery to complete.
2985 void
2986 lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba)
2988 unsigned long fcf_redisc_wait_tmo =
2989 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO));
2990 /* Start fcf rediscovery wait period timer */
2991 mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo);
2992 spin_lock_irq(&phba->hbalock);
2993 /* Allow action to new fcf asynchronous event */
2994 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
2995 /* Mark the FCF rediscovery pending state */
2996 phba->fcf.fcf_flag |= FCF_REDISC_PEND;
2997 spin_unlock_irq(&phba->hbalock);
3001 * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout
3002 * @ptr: Map to lpfc_hba data structure pointer.
3004 * This routine is invoked when waiting for FCF table rediscover has been
3005 * timed out. If new FCF record(s) has (have) been discovered during the
3006 * wait period, a new FCF event shall be added to the FCOE async event
3007 * list, and then worker thread shall be waked up for processing from the
3008 * worker thread context.
3010 void
3011 lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr)
3013 struct lpfc_hba *phba = (struct lpfc_hba *)ptr;
3015 /* Don't send FCF rediscovery event if timer cancelled */
3016 spin_lock_irq(&phba->hbalock);
3017 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
3018 spin_unlock_irq(&phba->hbalock);
3019 return;
3021 /* Clear FCF rediscovery timer pending flag */
3022 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
3023 /* FCF rediscovery event to worker thread */
3024 phba->fcf.fcf_flag |= FCF_REDISC_EVT;
3025 spin_unlock_irq(&phba->hbalock);
3026 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
3027 "2776 FCF rediscover quiescent timer expired\n");
3028 /* wake up worker thread */
3029 lpfc_worker_wake_up(phba);
3033 * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
3034 * @phba: pointer to lpfc hba data structure.
3035 * @acqe_link: pointer to the async link completion queue entry.
3037 * This routine is to parse the SLI4 link-attention link fault code and
3038 * translate it into the base driver's read link attention mailbox command
3039 * status.
3041 * Return: Link-attention status in terms of base driver's coding.
3043 static uint16_t
3044 lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
3045 struct lpfc_acqe_link *acqe_link)
3047 uint16_t latt_fault;
3049 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
3050 case LPFC_ASYNC_LINK_FAULT_NONE:
3051 case LPFC_ASYNC_LINK_FAULT_LOCAL:
3052 case LPFC_ASYNC_LINK_FAULT_REMOTE:
3053 latt_fault = 0;
3054 break;
3055 default:
3056 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3057 "0398 Invalid link fault code: x%x\n",
3058 bf_get(lpfc_acqe_link_fault, acqe_link));
3059 latt_fault = MBXERR_ERROR;
3060 break;
3062 return latt_fault;
3066 * lpfc_sli4_parse_latt_type - Parse sli4 link attention type
3067 * @phba: pointer to lpfc hba data structure.
3068 * @acqe_link: pointer to the async link completion queue entry.
3070 * This routine is to parse the SLI4 link attention type and translate it
3071 * into the base driver's link attention type coding.
3073 * Return: Link attention type in terms of base driver's coding.
3075 static uint8_t
3076 lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
3077 struct lpfc_acqe_link *acqe_link)
3079 uint8_t att_type;
3081 switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
3082 case LPFC_ASYNC_LINK_STATUS_DOWN:
3083 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
3084 att_type = LPFC_ATT_LINK_DOWN;
3085 break;
3086 case LPFC_ASYNC_LINK_STATUS_UP:
3087 /* Ignore physical link up events - wait for logical link up */
3088 att_type = LPFC_ATT_RESERVED;
3089 break;
3090 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
3091 att_type = LPFC_ATT_LINK_UP;
3092 break;
3093 default:
3094 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3095 "0399 Invalid link attention type: x%x\n",
3096 bf_get(lpfc_acqe_link_status, acqe_link));
3097 att_type = LPFC_ATT_RESERVED;
3098 break;
3100 return att_type;
3104 * lpfc_sli4_parse_latt_link_speed - Parse sli4 link-attention link speed
3105 * @phba: pointer to lpfc hba data structure.
3106 * @acqe_link: pointer to the async link completion queue entry.
3108 * This routine is to parse the SLI4 link-attention link speed and translate
3109 * it into the base driver's link-attention link speed coding.
3111 * Return: Link-attention link speed in terms of base driver's coding.
3113 static uint8_t
3114 lpfc_sli4_parse_latt_link_speed(struct lpfc_hba *phba,
3115 struct lpfc_acqe_link *acqe_link)
3117 uint8_t link_speed;
3119 switch (bf_get(lpfc_acqe_link_speed, acqe_link)) {
3120 case LPFC_ASYNC_LINK_SPEED_ZERO:
3121 case LPFC_ASYNC_LINK_SPEED_10MBPS:
3122 case LPFC_ASYNC_LINK_SPEED_100MBPS:
3123 link_speed = LPFC_LINK_SPEED_UNKNOWN;
3124 break;
3125 case LPFC_ASYNC_LINK_SPEED_1GBPS:
3126 link_speed = LPFC_LINK_SPEED_1GHZ;
3127 break;
3128 case LPFC_ASYNC_LINK_SPEED_10GBPS:
3129 link_speed = LPFC_LINK_SPEED_10GHZ;
3130 break;
3131 default:
3132 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3133 "0483 Invalid link-attention link speed: x%x\n",
3134 bf_get(lpfc_acqe_link_speed, acqe_link));
3135 link_speed = LPFC_LINK_SPEED_UNKNOWN;
3136 break;
3138 return link_speed;
3142 * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event
3143 * @phba: pointer to lpfc hba data structure.
3144 * @acqe_link: pointer to the async link completion queue entry.
3146 * This routine is to handle the SLI4 asynchronous FCoE link event.
3148 static void
3149 lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
3150 struct lpfc_acqe_link *acqe_link)
3152 struct lpfc_dmabuf *mp;
3153 LPFC_MBOXQ_t *pmb;
3154 MAILBOX_t *mb;
3155 struct lpfc_mbx_read_top *la;
3156 uint8_t att_type;
3157 int rc;
3159 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
3160 if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP)
3161 return;
3162 phba->fcoe_eventtag = acqe_link->event_tag;
3163 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3164 if (!pmb) {
3165 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3166 "0395 The mboxq allocation failed\n");
3167 return;
3169 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
3170 if (!mp) {
3171 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3172 "0396 The lpfc_dmabuf allocation failed\n");
3173 goto out_free_pmb;
3175 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
3176 if (!mp->virt) {
3177 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3178 "0397 The mbuf allocation failed\n");
3179 goto out_free_dmabuf;
3182 /* Cleanup any outstanding ELS commands */
3183 lpfc_els_flush_all_cmd(phba);
3185 /* Block ELS IOCBs until we have done process link event */
3186 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
3188 /* Update link event statistics */
3189 phba->sli.slistat.link_event++;
3191 /* Create lpfc_handle_latt mailbox command from link ACQE */
3192 lpfc_read_topology(phba, pmb, mp);
3193 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
3194 pmb->vport = phba->pport;
3196 /* Keep the link status for extra SLI4 state machine reference */
3197 phba->sli4_hba.link_state.speed =
3198 bf_get(lpfc_acqe_link_speed, acqe_link);
3199 phba->sli4_hba.link_state.duplex =
3200 bf_get(lpfc_acqe_link_duplex, acqe_link);
3201 phba->sli4_hba.link_state.status =
3202 bf_get(lpfc_acqe_link_status, acqe_link);
3203 phba->sli4_hba.link_state.type =
3204 bf_get(lpfc_acqe_link_type, acqe_link);
3205 phba->sli4_hba.link_state.number =
3206 bf_get(lpfc_acqe_link_number, acqe_link);
3207 phba->sli4_hba.link_state.fault =
3208 bf_get(lpfc_acqe_link_fault, acqe_link);
3209 phba->sli4_hba.link_state.logical_speed =
3210 bf_get(lpfc_acqe_logical_link_speed, acqe_link);
3211 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3212 "2900 Async FCoE Link event - Speed:%dGBit duplex:x%x "
3213 "LA Type:x%x Port Type:%d Port Number:%d Logical "
3214 "speed:%dMbps Fault:%d\n",
3215 phba->sli4_hba.link_state.speed,
3216 phba->sli4_hba.link_state.topology,
3217 phba->sli4_hba.link_state.status,
3218 phba->sli4_hba.link_state.type,
3219 phba->sli4_hba.link_state.number,
3220 phba->sli4_hba.link_state.logical_speed * 10,
3221 phba->sli4_hba.link_state.fault);
3223 * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch
3224 * topology info. Note: Optional for non FC-AL ports.
3226 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
3227 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
3228 if (rc == MBX_NOT_FINISHED)
3229 goto out_free_dmabuf;
3230 return;
3233 * For FCoE Mode: fill in all the topology information we need and call
3234 * the READ_TOPOLOGY completion routine to continue without actually
3235 * sending the READ_TOPOLOGY mailbox command to the port.
3237 /* Parse and translate status field */
3238 mb = &pmb->u.mb;
3239 mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link);
3241 /* Parse and translate link attention fields */
3242 la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
3243 la->eventTag = acqe_link->event_tag;
3244 bf_set(lpfc_mbx_read_top_att_type, la, att_type);
3245 bf_set(lpfc_mbx_read_top_link_spd, la,
3246 lpfc_sli4_parse_latt_link_speed(phba, acqe_link));
3248 /* Fake the the following irrelvant fields */
3249 bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT);
3250 bf_set(lpfc_mbx_read_top_alpa_granted, la, 0);
3251 bf_set(lpfc_mbx_read_top_il, la, 0);
3252 bf_set(lpfc_mbx_read_top_pb, la, 0);
3253 bf_set(lpfc_mbx_read_top_fa, la, 0);
3254 bf_set(lpfc_mbx_read_top_mm, la, 0);
3256 /* Invoke the lpfc_handle_latt mailbox command callback function */
3257 lpfc_mbx_cmpl_read_topology(phba, pmb);
3259 return;
3261 out_free_dmabuf:
3262 kfree(mp);
3263 out_free_pmb:
3264 mempool_free(pmb, phba->mbox_mem_pool);
3268 * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event
3269 * @phba: pointer to lpfc hba data structure.
3270 * @acqe_fc: pointer to the async fc completion queue entry.
3272 * This routine is to handle the SLI4 asynchronous FC event. It will simply log
3273 * that the event was received and then issue a read_topology mailbox command so
3274 * that the rest of the driver will treat it the same as SLI3.
3276 static void
3277 lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
3279 struct lpfc_dmabuf *mp;
3280 LPFC_MBOXQ_t *pmb;
3281 int rc;
3283 if (bf_get(lpfc_trailer_type, acqe_fc) !=
3284 LPFC_FC_LA_EVENT_TYPE_FC_LINK) {
3285 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3286 "2895 Non FC link Event detected.(%d)\n",
3287 bf_get(lpfc_trailer_type, acqe_fc));
3288 return;
3290 /* Keep the link status for extra SLI4 state machine reference */
3291 phba->sli4_hba.link_state.speed =
3292 bf_get(lpfc_acqe_fc_la_speed, acqe_fc);
3293 phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL;
3294 phba->sli4_hba.link_state.topology =
3295 bf_get(lpfc_acqe_fc_la_topology, acqe_fc);
3296 phba->sli4_hba.link_state.status =
3297 bf_get(lpfc_acqe_fc_la_att_type, acqe_fc);
3298 phba->sli4_hba.link_state.type =
3299 bf_get(lpfc_acqe_fc_la_port_type, acqe_fc);
3300 phba->sli4_hba.link_state.number =
3301 bf_get(lpfc_acqe_fc_la_port_number, acqe_fc);
3302 phba->sli4_hba.link_state.fault =
3303 bf_get(lpfc_acqe_link_fault, acqe_fc);
3304 phba->sli4_hba.link_state.logical_speed =
3305 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc);
3306 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3307 "2896 Async FC event - Speed:%dGBaud Topology:x%x "
3308 "LA Type:x%x Port Type:%d Port Number:%d Logical speed:"
3309 "%dMbps Fault:%d\n",
3310 phba->sli4_hba.link_state.speed,
3311 phba->sli4_hba.link_state.topology,
3312 phba->sli4_hba.link_state.status,
3313 phba->sli4_hba.link_state.type,
3314 phba->sli4_hba.link_state.number,
3315 phba->sli4_hba.link_state.logical_speed * 10,
3316 phba->sli4_hba.link_state.fault);
3317 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3318 if (!pmb) {
3319 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3320 "2897 The mboxq allocation failed\n");
3321 return;
3323 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
3324 if (!mp) {
3325 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3326 "2898 The lpfc_dmabuf allocation failed\n");
3327 goto out_free_pmb;
3329 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
3330 if (!mp->virt) {
3331 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3332 "2899 The mbuf allocation failed\n");
3333 goto out_free_dmabuf;
3336 /* Cleanup any outstanding ELS commands */
3337 lpfc_els_flush_all_cmd(phba);
3339 /* Block ELS IOCBs until we have done process link event */
3340 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
3342 /* Update link event statistics */
3343 phba->sli.slistat.link_event++;
3345 /* Create lpfc_handle_latt mailbox command from link ACQE */
3346 lpfc_read_topology(phba, pmb, mp);
3347 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
3348 pmb->vport = phba->pport;
3350 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
3351 if (rc == MBX_NOT_FINISHED)
3352 goto out_free_dmabuf;
3353 return;
3355 out_free_dmabuf:
3356 kfree(mp);
3357 out_free_pmb:
3358 mempool_free(pmb, phba->mbox_mem_pool);
3362 * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event
3363 * @phba: pointer to lpfc hba data structure.
3364 * @acqe_fc: pointer to the async SLI completion queue entry.
3366 * This routine is to handle the SLI4 asynchronous SLI events.
3368 static void
3369 lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
3371 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3372 "2901 Async SLI event - Event Data1:x%08x Event Data2:"
3373 "x%08x SLI Event Type:%d",
3374 acqe_sli->event_data1, acqe_sli->event_data2,
3375 bf_get(lpfc_trailer_type, acqe_sli));
3376 return;
3380 * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport
3381 * @vport: pointer to vport data structure.
3383 * This routine is to perform Clear Virtual Link (CVL) on a vport in
3384 * response to a CVL event.
3386 * Return the pointer to the ndlp with the vport if successful, otherwise
3387 * return NULL.
3389 static struct lpfc_nodelist *
3390 lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
3392 struct lpfc_nodelist *ndlp;
3393 struct Scsi_Host *shost;
3394 struct lpfc_hba *phba;
3396 if (!vport)
3397 return NULL;
3398 phba = vport->phba;
3399 if (!phba)
3400 return NULL;
3401 ndlp = lpfc_findnode_did(vport, Fabric_DID);
3402 if (!ndlp) {
3403 /* Cannot find existing Fabric ndlp, so allocate a new one */
3404 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
3405 if (!ndlp)
3406 return 0;
3407 lpfc_nlp_init(vport, ndlp, Fabric_DID);
3408 /* Set the node type */
3409 ndlp->nlp_type |= NLP_FABRIC;
3410 /* Put ndlp onto node list */
3411 lpfc_enqueue_node(vport, ndlp);
3412 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
3413 /* re-setup ndlp without removing from node list */
3414 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
3415 if (!ndlp)
3416 return 0;
3418 if ((phba->pport->port_state < LPFC_FLOGI) &&
3419 (phba->pport->port_state != LPFC_VPORT_FAILED))
3420 return NULL;
3421 /* If virtual link is not yet instantiated ignore CVL */
3422 if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC)
3423 && (vport->port_state != LPFC_VPORT_FAILED))
3424 return NULL;
3425 shost = lpfc_shost_from_vport(vport);
3426 if (!shost)
3427 return NULL;
3428 lpfc_linkdown_port(vport);
3429 lpfc_cleanup_pending_mbox(vport);
3430 spin_lock_irq(shost->host_lock);
3431 vport->fc_flag |= FC_VPORT_CVL_RCVD;
3432 spin_unlock_irq(shost->host_lock);
3434 return ndlp;
3438 * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports
3439 * @vport: pointer to lpfc hba data structure.
3441 * This routine is to perform Clear Virtual Link (CVL) on all vports in
3442 * response to a FCF dead event.
3444 static void
3445 lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
3447 struct lpfc_vport **vports;
3448 int i;
3450 vports = lpfc_create_vport_work_array(phba);
3451 if (vports)
3452 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
3453 lpfc_sli4_perform_vport_cvl(vports[i]);
3454 lpfc_destroy_vport_work_array(phba, vports);
3458 * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event
3459 * @phba: pointer to lpfc hba data structure.
3460 * @acqe_link: pointer to the async fcoe completion queue entry.
3462 * This routine is to handle the SLI4 asynchronous fcoe event.
3464 static void
3465 lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
3466 struct lpfc_acqe_fip *acqe_fip)
3468 uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip);
3469 int rc;
3470 struct lpfc_vport *vport;
3471 struct lpfc_nodelist *ndlp;
3472 struct Scsi_Host *shost;
3473 int active_vlink_present;
3474 struct lpfc_vport **vports;
3475 int i;
3477 phba->fc_eventTag = acqe_fip->event_tag;
3478 phba->fcoe_eventtag = acqe_fip->event_tag;
3479 switch (event_type) {
3480 case LPFC_FIP_EVENT_TYPE_NEW_FCF:
3481 case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD:
3482 if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF)
3483 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
3484 LOG_DISCOVERY,
3485 "2546 New FCF event, evt_tag:x%x, "
3486 "index:x%x\n",
3487 acqe_fip->event_tag,
3488 acqe_fip->index);
3489 else
3490 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP |
3491 LOG_DISCOVERY,
3492 "2788 FCF param modified event, "
3493 "evt_tag:x%x, index:x%x\n",
3494 acqe_fip->event_tag,
3495 acqe_fip->index);
3496 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
3498 * During period of FCF discovery, read the FCF
3499 * table record indexed by the event to update
3500 * FCF roundrobin failover eligible FCF bmask.
3502 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
3503 LOG_DISCOVERY,
3504 "2779 Read FCF (x%x) for updating "
3505 "roundrobin FCF failover bmask\n",
3506 acqe_fip->index);
3507 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index);
3510 /* If the FCF discovery is in progress, do nothing. */
3511 spin_lock_irq(&phba->hbalock);
3512 if (phba->hba_flag & FCF_TS_INPROG) {
3513 spin_unlock_irq(&phba->hbalock);
3514 break;
3516 /* If fast FCF failover rescan event is pending, do nothing */
3517 if (phba->fcf.fcf_flag & FCF_REDISC_EVT) {
3518 spin_unlock_irq(&phba->hbalock);
3519 break;
3522 /* If the FCF has been in discovered state, do nothing. */
3523 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) {
3524 spin_unlock_irq(&phba->hbalock);
3525 break;
3527 spin_unlock_irq(&phba->hbalock);
3529 /* Otherwise, scan the entire FCF table and re-discover SAN */
3530 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3531 "2770 Start FCF table scan per async FCF "
3532 "event, evt_tag:x%x, index:x%x\n",
3533 acqe_fip->event_tag, acqe_fip->index);
3534 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
3535 LPFC_FCOE_FCF_GET_FIRST);
3536 if (rc)
3537 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3538 "2547 Issue FCF scan read FCF mailbox "
3539 "command failed (x%x)\n", rc);
3540 break;
3542 case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL:
3543 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3544 "2548 FCF Table full count 0x%x tag 0x%x\n",
3545 bf_get(lpfc_acqe_fip_fcf_count, acqe_fip),
3546 acqe_fip->event_tag);
3547 break;
3549 case LPFC_FIP_EVENT_TYPE_FCF_DEAD:
3550 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3551 "2549 FCF (x%x) disconnected from network, "
3552 "tag:x%x\n", acqe_fip->index, acqe_fip->event_tag);
3554 * If we are in the middle of FCF failover process, clear
3555 * the corresponding FCF bit in the roundrobin bitmap.
3557 spin_lock_irq(&phba->hbalock);
3558 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
3559 spin_unlock_irq(&phba->hbalock);
3560 /* Update FLOGI FCF failover eligible FCF bmask */
3561 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index);
3562 break;
3564 spin_unlock_irq(&phba->hbalock);
3566 /* If the event is not for currently used fcf do nothing */
3567 if (phba->fcf.current_rec.fcf_indx != acqe_fip->index)
3568 break;
3571 * Otherwise, request the port to rediscover the entire FCF
3572 * table for a fast recovery from case that the current FCF
3573 * is no longer valid as we are not in the middle of FCF
3574 * failover process already.
3576 spin_lock_irq(&phba->hbalock);
3577 /* Mark the fast failover process in progress */
3578 phba->fcf.fcf_flag |= FCF_DEAD_DISC;
3579 spin_unlock_irq(&phba->hbalock);
3581 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3582 "2771 Start FCF fast failover process due to "
3583 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
3584 "\n", acqe_fip->event_tag, acqe_fip->index);
3585 rc = lpfc_sli4_redisc_fcf_table(phba);
3586 if (rc) {
3587 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
3588 LOG_DISCOVERY,
3589 "2772 Issue FCF rediscover mabilbox "
3590 "command failed, fail through to FCF "
3591 "dead event\n");
3592 spin_lock_irq(&phba->hbalock);
3593 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
3594 spin_unlock_irq(&phba->hbalock);
3596 * Last resort will fail over by treating this
3597 * as a link down to FCF registration.
3599 lpfc_sli4_fcf_dead_failthrough(phba);
3600 } else {
3601 /* Reset FCF roundrobin bmask for new discovery */
3602 memset(phba->fcf.fcf_rr_bmask, 0,
3603 sizeof(*phba->fcf.fcf_rr_bmask));
3605 * Handling fast FCF failover to a DEAD FCF event is
3606 * considered equalivant to receiving CVL to all vports.
3608 lpfc_sli4_perform_all_vport_cvl(phba);
3610 break;
3611 case LPFC_FIP_EVENT_TYPE_CVL:
3612 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3613 "2718 Clear Virtual Link Received for VPI 0x%x"
3614 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
3615 vport = lpfc_find_vport_by_vpid(phba,
3616 acqe_fip->index - phba->vpi_base);
3617 ndlp = lpfc_sli4_perform_vport_cvl(vport);
3618 if (!ndlp)
3619 break;
3620 active_vlink_present = 0;
3622 vports = lpfc_create_vport_work_array(phba);
3623 if (vports) {
3624 for (i = 0; i <= phba->max_vports && vports[i] != NULL;
3625 i++) {
3626 if ((!(vports[i]->fc_flag &
3627 FC_VPORT_CVL_RCVD)) &&
3628 (vports[i]->port_state > LPFC_FDISC)) {
3629 active_vlink_present = 1;
3630 break;
3633 lpfc_destroy_vport_work_array(phba, vports);
3636 if (active_vlink_present) {
3638 * If there are other active VLinks present,
3639 * re-instantiate the Vlink using FDISC.
3641 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
3642 shost = lpfc_shost_from_vport(vport);
3643 spin_lock_irq(shost->host_lock);
3644 ndlp->nlp_flag |= NLP_DELAY_TMO;
3645 spin_unlock_irq(shost->host_lock);
3646 ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
3647 vport->port_state = LPFC_FDISC;
3648 } else {
3650 * Otherwise, we request port to rediscover
3651 * the entire FCF table for a fast recovery
3652 * from possible case that the current FCF
3653 * is no longer valid if we are not already
3654 * in the FCF failover process.
3656 spin_lock_irq(&phba->hbalock);
3657 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
3658 spin_unlock_irq(&phba->hbalock);
3659 break;
3661 /* Mark the fast failover process in progress */
3662 phba->fcf.fcf_flag |= FCF_ACVL_DISC;
3663 spin_unlock_irq(&phba->hbalock);
3664 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
3665 LOG_DISCOVERY,
3666 "2773 Start FCF failover per CVL, "
3667 "evt_tag:x%x\n", acqe_fip->event_tag);
3668 rc = lpfc_sli4_redisc_fcf_table(phba);
3669 if (rc) {
3670 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
3671 LOG_DISCOVERY,
3672 "2774 Issue FCF rediscover "
3673 "mabilbox command failed, "
3674 "through to CVL event\n");
3675 spin_lock_irq(&phba->hbalock);
3676 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
3677 spin_unlock_irq(&phba->hbalock);
3679 * Last resort will be re-try on the
3680 * the current registered FCF entry.
3682 lpfc_retry_pport_discovery(phba);
3683 } else
3685 * Reset FCF roundrobin bmask for new
3686 * discovery.
3688 memset(phba->fcf.fcf_rr_bmask, 0,
3689 sizeof(*phba->fcf.fcf_rr_bmask));
3691 break;
3692 default:
3693 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3694 "0288 Unknown FCoE event type 0x%x event tag "
3695 "0x%x\n", event_type, acqe_fip->event_tag);
3696 break;
3701 * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event
3702 * @phba: pointer to lpfc hba data structure.
3703 * @acqe_link: pointer to the async dcbx completion queue entry.
3705 * This routine is to handle the SLI4 asynchronous dcbx event.
3707 static void
3708 lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
3709 struct lpfc_acqe_dcbx *acqe_dcbx)
3711 phba->fc_eventTag = acqe_dcbx->event_tag;
3712 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3713 "0290 The SLI4 DCBX asynchronous event is not "
3714 "handled yet\n");
3718 * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event
3719 * @phba: pointer to lpfc hba data structure.
3720 * @acqe_link: pointer to the async grp5 completion queue entry.
3722 * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event
3723 * is an asynchronous notified of a logical link speed change. The Port
3724 * reports the logical link speed in units of 10Mbps.
3726 static void
3727 lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba,
3728 struct lpfc_acqe_grp5 *acqe_grp5)
3730 uint16_t prev_ll_spd;
3732 phba->fc_eventTag = acqe_grp5->event_tag;
3733 phba->fcoe_eventtag = acqe_grp5->event_tag;
3734 prev_ll_spd = phba->sli4_hba.link_state.logical_speed;
3735 phba->sli4_hba.link_state.logical_speed =
3736 (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5));
3737 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3738 "2789 GRP5 Async Event: Updating logical link speed "
3739 "from %dMbps to %dMbps\n", (prev_ll_spd * 10),
3740 (phba->sli4_hba.link_state.logical_speed*10));
3744 * lpfc_sli4_async_event_proc - Process all the pending asynchronous event
3745 * @phba: pointer to lpfc hba data structure.
3747 * This routine is invoked by the worker thread to process all the pending
3748 * SLI4 asynchronous events.
3750 void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
3752 struct lpfc_cq_event *cq_event;
3754 /* First, declare the async event has been handled */
3755 spin_lock_irq(&phba->hbalock);
3756 phba->hba_flag &= ~ASYNC_EVENT;
3757 spin_unlock_irq(&phba->hbalock);
3758 /* Now, handle all the async events */
3759 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
3760 /* Get the first event from the head of the event queue */
3761 spin_lock_irq(&phba->hbalock);
3762 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
3763 cq_event, struct lpfc_cq_event, list);
3764 spin_unlock_irq(&phba->hbalock);
3765 /* Process the asynchronous event */
3766 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
3767 case LPFC_TRAILER_CODE_LINK:
3768 lpfc_sli4_async_link_evt(phba,
3769 &cq_event->cqe.acqe_link);
3770 break;
3771 case LPFC_TRAILER_CODE_FCOE:
3772 lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip);
3773 break;
3774 case LPFC_TRAILER_CODE_DCBX:
3775 lpfc_sli4_async_dcbx_evt(phba,
3776 &cq_event->cqe.acqe_dcbx);
3777 break;
3778 case LPFC_TRAILER_CODE_GRP5:
3779 lpfc_sli4_async_grp5_evt(phba,
3780 &cq_event->cqe.acqe_grp5);
3781 break;
3782 case LPFC_TRAILER_CODE_FC:
3783 lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc);
3784 break;
3785 case LPFC_TRAILER_CODE_SLI:
3786 lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli);
3787 break;
3788 default:
3789 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3790 "1804 Invalid asynchrous event code: "
3791 "x%x\n", bf_get(lpfc_trailer_code,
3792 &cq_event->cqe.mcqe_cmpl));
3793 break;
3795 /* Free the completion event processed to the free pool */
3796 lpfc_sli4_cq_event_release(phba, cq_event);
3801 * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event
3802 * @phba: pointer to lpfc hba data structure.
3804 * This routine is invoked by the worker thread to process FCF table
3805 * rediscovery pending completion event.
3807 void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
3809 int rc;
3811 spin_lock_irq(&phba->hbalock);
3812 /* Clear FCF rediscovery timeout event */
3813 phba->fcf.fcf_flag &= ~FCF_REDISC_EVT;
3814 /* Clear driver fast failover FCF record flag */
3815 phba->fcf.failover_rec.flag = 0;
3816 /* Set state for FCF fast failover */
3817 phba->fcf.fcf_flag |= FCF_REDISC_FOV;
3818 spin_unlock_irq(&phba->hbalock);
3820 /* Scan FCF table from the first entry to re-discover SAN */
3821 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3822 "2777 Start post-quiescent FCF table scan\n");
3823 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
3824 if (rc)
3825 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
3826 "2747 Issue FCF scan read FCF mailbox "
3827 "command failed 0x%x\n", rc);
3831 * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
3832 * @phba: pointer to lpfc hba data structure.
3833 * @dev_grp: The HBA PCI-Device group number.
3835 * This routine is invoked to set up the per HBA PCI-Device group function
3836 * API jump table entries.
3838 * Return: 0 if success, otherwise -ENODEV
3841 lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
3843 int rc;
3845 /* Set up lpfc PCI-device group */
3846 phba->pci_dev_grp = dev_grp;
3848 /* The LPFC_PCI_DEV_OC uses SLI4 */
3849 if (dev_grp == LPFC_PCI_DEV_OC)
3850 phba->sli_rev = LPFC_SLI_REV4;
3852 /* Set up device INIT API function jump table */
3853 rc = lpfc_init_api_table_setup(phba, dev_grp);
3854 if (rc)
3855 return -ENODEV;
3856 /* Set up SCSI API function jump table */
3857 rc = lpfc_scsi_api_table_setup(phba, dev_grp);
3858 if (rc)
3859 return -ENODEV;
3860 /* Set up SLI API function jump table */
3861 rc = lpfc_sli_api_table_setup(phba, dev_grp);
3862 if (rc)
3863 return -ENODEV;
3864 /* Set up MBOX API function jump table */
3865 rc = lpfc_mbox_api_table_setup(phba, dev_grp);
3866 if (rc)
3867 return -ENODEV;
3869 return 0;
3873 * lpfc_log_intr_mode - Log the active interrupt mode
3874 * @phba: pointer to lpfc hba data structure.
3875 * @intr_mode: active interrupt mode adopted.
3877 * This routine it invoked to log the currently used active interrupt mode
3878 * to the device.
3880 static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
3882 switch (intr_mode) {
3883 case 0:
3884 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3885 "0470 Enable INTx interrupt mode.\n");
3886 break;
3887 case 1:
3888 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3889 "0481 Enabled MSI interrupt mode.\n");
3890 break;
3891 case 2:
3892 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3893 "0480 Enabled MSI-X interrupt mode.\n");
3894 break;
3895 default:
3896 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3897 "0482 Illegal interrupt mode.\n");
3898 break;
3900 return;
3904 * lpfc_enable_pci_dev - Enable a generic PCI device.
3905 * @phba: pointer to lpfc hba data structure.
3907 * This routine is invoked to enable the PCI device that is common to all
3908 * PCI devices.
3910 * Return codes
3911 * 0 - successful
3912 * other values - error
3914 static int
3915 lpfc_enable_pci_dev(struct lpfc_hba *phba)
3917 struct pci_dev *pdev;
3918 int bars;
3920 /* Obtain PCI device reference */
3921 if (!phba->pcidev)
3922 goto out_error;
3923 else
3924 pdev = phba->pcidev;
3925 /* Select PCI BARs */
3926 bars = pci_select_bars(pdev, IORESOURCE_MEM);
3927 /* Enable PCI device */
3928 if (pci_enable_device_mem(pdev))
3929 goto out_error;
3930 /* Request PCI resource for the device */
3931 if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME))
3932 goto out_disable_device;
3933 /* Set up device as PCI master and save state for EEH */
3934 pci_set_master(pdev);
3935 pci_try_set_mwi(pdev);
3936 pci_save_state(pdev);
3938 return 0;
3940 out_disable_device:
3941 pci_disable_device(pdev);
3942 out_error:
3943 return -ENODEV;
3947 * lpfc_disable_pci_dev - Disable a generic PCI device.
3948 * @phba: pointer to lpfc hba data structure.
3950 * This routine is invoked to disable the PCI device that is common to all
3951 * PCI devices.
3953 static void
3954 lpfc_disable_pci_dev(struct lpfc_hba *phba)
3956 struct pci_dev *pdev;
3957 int bars;
3959 /* Obtain PCI device reference */
3960 if (!phba->pcidev)
3961 return;
3962 else
3963 pdev = phba->pcidev;
3964 /* Select PCI BARs */
3965 bars = pci_select_bars(pdev, IORESOURCE_MEM);
3966 /* Release PCI resource and disable PCI device */
3967 pci_release_selected_regions(pdev, bars);
3968 pci_disable_device(pdev);
3969 /* Null out PCI private reference to driver */
3970 pci_set_drvdata(pdev, NULL);
3972 return;
3976 * lpfc_reset_hba - Reset a hba
3977 * @phba: pointer to lpfc hba data structure.
3979 * This routine is invoked to reset a hba device. It brings the HBA
3980 * offline, performs a board restart, and then brings the board back
3981 * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up
3982 * on outstanding mailbox commands.
3984 void
3985 lpfc_reset_hba(struct lpfc_hba *phba)
3987 /* If resets are disabled then set error state and return. */
3988 if (!phba->cfg_enable_hba_reset) {
3989 phba->link_state = LPFC_HBA_ERROR;
3990 return;
3992 lpfc_offline_prep(phba);
3993 lpfc_offline(phba);
3994 lpfc_sli_brdrestart(phba);
3995 lpfc_online(phba);
3996 lpfc_unblock_mgmt_io(phba);
4000 * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev.
4001 * @phba: pointer to lpfc hba data structure.
4003 * This routine is invoked to set up the driver internal resources specific to
4004 * support the SLI-3 HBA device it attached to.
4006 * Return codes
4007 * 0 - successful
4008 * other values - error
4010 static int
4011 lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
4013 struct lpfc_sli *psli;
4016 * Initialize timers used by driver
4019 /* Heartbeat timer */
4020 init_timer(&phba->hb_tmofunc);
4021 phba->hb_tmofunc.function = lpfc_hb_timeout;
4022 phba->hb_tmofunc.data = (unsigned long)phba;
4024 psli = &phba->sli;
4025 /* MBOX heartbeat timer */
4026 init_timer(&psli->mbox_tmo);
4027 psli->mbox_tmo.function = lpfc_mbox_timeout;
4028 psli->mbox_tmo.data = (unsigned long) phba;
4029 /* FCP polling mode timer */
4030 init_timer(&phba->fcp_poll_timer);
4031 phba->fcp_poll_timer.function = lpfc_poll_timeout;
4032 phba->fcp_poll_timer.data = (unsigned long) phba;
4033 /* Fabric block timer */
4034 init_timer(&phba->fabric_block_timer);
4035 phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
4036 phba->fabric_block_timer.data = (unsigned long) phba;
4037 /* EA polling mode timer */
4038 init_timer(&phba->eratt_poll);
4039 phba->eratt_poll.function = lpfc_poll_eratt;
4040 phba->eratt_poll.data = (unsigned long) phba;
4042 /* Host attention work mask setup */
4043 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
4044 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
4046 /* Get all the module params for configuring this host */
4047 lpfc_get_cfgparam(phba);
4048 if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
4049 phba->menlo_flag |= HBA_MENLO_SUPPORT;
4050 /* check for menlo minimum sg count */
4051 if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT)
4052 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
4056 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
4057 * used to create the sg_dma_buf_pool must be dynamically calculated.
4058 * 2 segments are added since the IOCB needs a command and response bde.
4060 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
4061 sizeof(struct fcp_rsp) +
4062 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
4064 if (phba->cfg_enable_bg) {
4065 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT;
4066 phba->cfg_sg_dma_buf_size +=
4067 phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64);
4070 /* Also reinitialize the host templates with new values. */
4071 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
4072 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
4074 phba->max_vpi = LPFC_MAX_VPI;
4075 /* This will be set to correct value after config_port mbox */
4076 phba->max_vports = 0;
4079 * Initialize the SLI Layer to run with lpfc HBAs.
4081 lpfc_sli_setup(phba);
4082 lpfc_sli_queue_setup(phba);
4084 /* Allocate device driver memory */
4085 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
4086 return -ENOMEM;
4088 return 0;
4092 * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev
4093 * @phba: pointer to lpfc hba data structure.
4095 * This routine is invoked to unset the driver internal resources set up
4096 * specific for supporting the SLI-3 HBA device it attached to.
4098 static void
4099 lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
4101 /* Free device driver memory allocated */
4102 lpfc_mem_free_all(phba);
4104 return;
4108 * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev
4109 * @phba: pointer to lpfc hba data structure.
4111 * This routine is invoked to set up the driver internal resources specific to
4112 * support the SLI-4 HBA device it attached to.
4114 * Return codes
4115 * 0 - successful
4116 * other values - error
4118 static int
4119 lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4121 struct lpfc_sli *psli;
4122 LPFC_MBOXQ_t *mboxq;
4123 int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size;
4124 uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
4125 struct lpfc_mqe *mqe;
4126 int longs, sli_family;
4128 /* Before proceed, wait for POST done and device ready */
4129 rc = lpfc_sli4_post_status_check(phba);
4130 if (rc)
4131 return -ENODEV;
4134 * Initialize timers used by driver
4137 /* Heartbeat timer */
4138 init_timer(&phba->hb_tmofunc);
4139 phba->hb_tmofunc.function = lpfc_hb_timeout;
4140 phba->hb_tmofunc.data = (unsigned long)phba;
4141 init_timer(&phba->rrq_tmr);
4142 phba->rrq_tmr.function = lpfc_rrq_timeout;
4143 phba->rrq_tmr.data = (unsigned long)phba;
4145 psli = &phba->sli;
4146 /* MBOX heartbeat timer */
4147 init_timer(&psli->mbox_tmo);
4148 psli->mbox_tmo.function = lpfc_mbox_timeout;
4149 psli->mbox_tmo.data = (unsigned long) phba;
4150 /* Fabric block timer */
4151 init_timer(&phba->fabric_block_timer);
4152 phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
4153 phba->fabric_block_timer.data = (unsigned long) phba;
4154 /* EA polling mode timer */
4155 init_timer(&phba->eratt_poll);
4156 phba->eratt_poll.function = lpfc_poll_eratt;
4157 phba->eratt_poll.data = (unsigned long) phba;
4158 /* FCF rediscover timer */
4159 init_timer(&phba->fcf.redisc_wait);
4160 phba->fcf.redisc_wait.function = lpfc_sli4_fcf_redisc_wait_tmo;
4161 phba->fcf.redisc_wait.data = (unsigned long)phba;
4164 * We need to do a READ_CONFIG mailbox command here before
4165 * calling lpfc_get_cfgparam. For VFs this will report the
4166 * MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings.
4167 * All of the resources allocated
4168 * for this Port are tied to these values.
4170 /* Get all the module params for configuring this host */
4171 lpfc_get_cfgparam(phba);
4172 phba->max_vpi = LPFC_MAX_VPI;
4173 /* This will be set to correct value after the read_config mbox */
4174 phba->max_vports = 0;
4176 /* Program the default value of vlan_id and fc_map */
4177 phba->valid_vlan = 0;
4178 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
4179 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
4180 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
4183 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
4184 * used to create the sg_dma_buf_pool must be dynamically calculated.
4185 * 2 segments are added since the IOCB needs a command and response bde.
4186 * To insure that the scsi sgl does not cross a 4k page boundary only
4187 * sgl sizes of must be a power of 2.
4189 buf_size = (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) +
4190 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge)));
4192 sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf);
4193 max_buf_size = LPFC_SLI4_MAX_BUF_SIZE;
4194 switch (sli_family) {
4195 case LPFC_SLI_INTF_FAMILY_BE2:
4196 case LPFC_SLI_INTF_FAMILY_BE3:
4197 /* There is a single hint for BE - 2 pages per BPL. */
4198 if (bf_get(lpfc_sli_intf_sli_hint1, &phba->sli4_hba.sli_intf) ==
4199 LPFC_SLI_INTF_SLI_HINT1_1)
4200 max_buf_size = LPFC_SLI4_FL1_MAX_BUF_SIZE;
4201 break;
4202 case LPFC_SLI_INTF_FAMILY_LNCR_A0:
4203 case LPFC_SLI_INTF_FAMILY_LNCR_B0:
4204 default:
4205 break;
4207 for (dma_buf_size = LPFC_SLI4_MIN_BUF_SIZE;
4208 dma_buf_size < max_buf_size && buf_size > dma_buf_size;
4209 dma_buf_size = dma_buf_size << 1)
4211 if (dma_buf_size == max_buf_size)
4212 phba->cfg_sg_seg_cnt = (dma_buf_size -
4213 sizeof(struct fcp_cmnd) - sizeof(struct fcp_rsp) -
4214 (2 * sizeof(struct sli4_sge))) /
4215 sizeof(struct sli4_sge);
4216 phba->cfg_sg_dma_buf_size = dma_buf_size;
4218 /* Initialize buffer queue management fields */
4219 hbq_count = lpfc_sli_hbq_count();
4220 for (i = 0; i < hbq_count; ++i)
4221 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
4222 INIT_LIST_HEAD(&phba->rb_pend_list);
4223 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
4224 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
4227 * Initialize the SLI Layer to run with lpfc SLI4 HBAs.
4229 /* Initialize the Abort scsi buffer list used by driver */
4230 spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock);
4231 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
4232 /* This abort list used by worker thread */
4233 spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock);
4236 * Initialize dirver internal slow-path work queues
4239 /* Driver internel slow-path CQ Event pool */
4240 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
4241 /* Response IOCB work queue list */
4242 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event);
4243 /* Asynchronous event CQ Event work queue list */
4244 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
4245 /* Fast-path XRI aborted CQ Event work queue list */
4246 INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
4247 /* Slow-path XRI aborted CQ Event work queue list */
4248 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
4249 /* Receive queue CQ Event work queue list */
4250 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
4252 /* Initialize the driver internal SLI layer lists. */
4253 lpfc_sli_setup(phba);
4254 lpfc_sli_queue_setup(phba);
4256 /* Allocate device driver memory */
4257 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
4258 if (rc)
4259 return -ENOMEM;
4261 /* IF Type 2 ports get initialized now. */
4262 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
4263 LPFC_SLI_INTF_IF_TYPE_2) {
4264 rc = lpfc_pci_function_reset(phba);
4265 if (unlikely(rc))
4266 return -ENODEV;
4269 /* Create the bootstrap mailbox command */
4270 rc = lpfc_create_bootstrap_mbox(phba);
4271 if (unlikely(rc))
4272 goto out_free_mem;
4274 /* Set up the host's endian order with the device. */
4275 rc = lpfc_setup_endian_order(phba);
4276 if (unlikely(rc))
4277 goto out_free_bsmbx;
4279 /* Set up the hba's configuration parameters. */
4280 rc = lpfc_sli4_read_config(phba);
4281 if (unlikely(rc))
4282 goto out_free_bsmbx;
4284 /* IF Type 0 ports get initialized now. */
4285 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
4286 LPFC_SLI_INTF_IF_TYPE_0) {
4287 rc = lpfc_pci_function_reset(phba);
4288 if (unlikely(rc))
4289 goto out_free_bsmbx;
4292 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
4293 GFP_KERNEL);
4294 if (!mboxq) {
4295 rc = -ENOMEM;
4296 goto out_free_bsmbx;
4299 /* Get the Supported Pages if PORT_CAPABILITIES is supported by port. */
4300 lpfc_supported_pages(mboxq);
4301 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4302 if (!rc) {
4303 mqe = &mboxq->u.mqe;
4304 memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
4305 LPFC_MAX_SUPPORTED_PAGES);
4306 for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
4307 switch (pn_page[i]) {
4308 case LPFC_SLI4_PARAMETERS:
4309 phba->sli4_hba.pc_sli4_params.supported = 1;
4310 break;
4311 default:
4312 break;
4315 /* Read the port's SLI4 Parameters capabilities if supported. */
4316 if (phba->sli4_hba.pc_sli4_params.supported)
4317 rc = lpfc_pc_sli4_params_get(phba, mboxq);
4318 if (rc) {
4319 mempool_free(mboxq, phba->mbox_mem_pool);
4320 rc = -EIO;
4321 goto out_free_bsmbx;
4325 * Get sli4 parameters that override parameters from Port capabilities.
4326 * If this call fails it is not a critical error so continue loading.
4328 lpfc_get_sli4_parameters(phba, mboxq);
4329 mempool_free(mboxq, phba->mbox_mem_pool);
4330 /* Create all the SLI4 queues */
4331 rc = lpfc_sli4_queue_create(phba);
4332 if (rc)
4333 goto out_free_bsmbx;
4335 /* Create driver internal CQE event pool */
4336 rc = lpfc_sli4_cq_event_pool_create(phba);
4337 if (rc)
4338 goto out_destroy_queue;
4340 /* Initialize and populate the iocb list per host */
4341 rc = lpfc_init_sgl_list(phba);
4342 if (rc) {
4343 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4344 "1400 Failed to initialize sgl list.\n");
4345 goto out_destroy_cq_event_pool;
4347 rc = lpfc_init_active_sgl_array(phba);
4348 if (rc) {
4349 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4350 "1430 Failed to initialize sgl list.\n");
4351 goto out_free_sgl_list;
4354 rc = lpfc_sli4_init_rpi_hdrs(phba);
4355 if (rc) {
4356 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4357 "1432 Failed to initialize rpi headers.\n");
4358 goto out_free_active_sgl;
4361 /* Allocate eligible FCF bmask memory for FCF roundrobin failover */
4362 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
4363 phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long),
4364 GFP_KERNEL);
4365 if (!phba->fcf.fcf_rr_bmask) {
4366 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4367 "2759 Failed allocate memory for FCF round "
4368 "robin failover bmask\n");
4369 goto out_remove_rpi_hdrs;
4372 phba->sli4_hba.fcp_eq_hdl = kzalloc((sizeof(struct lpfc_fcp_eq_hdl) *
4373 phba->cfg_fcp_eq_count), GFP_KERNEL);
4374 if (!phba->sli4_hba.fcp_eq_hdl) {
4375 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4376 "2572 Failed allocate memory for fast-path "
4377 "per-EQ handle array\n");
4378 goto out_free_fcf_rr_bmask;
4381 phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) *
4382 phba->sli4_hba.cfg_eqn), GFP_KERNEL);
4383 if (!phba->sli4_hba.msix_entries) {
4384 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4385 "2573 Failed allocate memory for msi-x "
4386 "interrupt vector entries\n");
4387 goto out_free_fcp_eq_hdl;
4390 return rc;
4392 out_free_fcp_eq_hdl:
4393 kfree(phba->sli4_hba.fcp_eq_hdl);
4394 out_free_fcf_rr_bmask:
4395 kfree(phba->fcf.fcf_rr_bmask);
4396 out_remove_rpi_hdrs:
4397 lpfc_sli4_remove_rpi_hdrs(phba);
4398 out_free_active_sgl:
4399 lpfc_free_active_sgl(phba);
4400 out_free_sgl_list:
4401 lpfc_free_sgl_list(phba);
4402 out_destroy_cq_event_pool:
4403 lpfc_sli4_cq_event_pool_destroy(phba);
4404 out_destroy_queue:
4405 lpfc_sli4_queue_destroy(phba);
4406 out_free_bsmbx:
4407 lpfc_destroy_bootstrap_mbox(phba);
4408 out_free_mem:
4409 lpfc_mem_free(phba);
4410 return rc;
4414 * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev
4415 * @phba: pointer to lpfc hba data structure.
4417 * This routine is invoked to unset the driver internal resources set up
4418 * specific for supporting the SLI-4 HBA device it attached to.
4420 static void
4421 lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
4423 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
4425 /* Free memory allocated for msi-x interrupt vector entries */
4426 kfree(phba->sli4_hba.msix_entries);
4428 /* Free memory allocated for fast-path work queue handles */
4429 kfree(phba->sli4_hba.fcp_eq_hdl);
4431 /* Free the allocated rpi headers. */
4432 lpfc_sli4_remove_rpi_hdrs(phba);
4433 lpfc_sli4_remove_rpis(phba);
4435 /* Free eligible FCF index bmask */
4436 kfree(phba->fcf.fcf_rr_bmask);
4438 /* Free the ELS sgl list */
4439 lpfc_free_active_sgl(phba);
4440 lpfc_free_sgl_list(phba);
4442 /* Free the SCSI sgl management array */
4443 kfree(phba->sli4_hba.lpfc_scsi_psb_array);
4445 /* Free the SLI4 queues */
4446 lpfc_sli4_queue_destroy(phba);
4448 /* Free the completion queue EQ event pool */
4449 lpfc_sli4_cq_event_release_all(phba);
4450 lpfc_sli4_cq_event_pool_destroy(phba);
4452 /* Free the bsmbx region. */
4453 lpfc_destroy_bootstrap_mbox(phba);
4455 /* Free the SLI Layer memory with SLI4 HBAs */
4456 lpfc_mem_free_all(phba);
4458 /* Free the current connect table */
4459 list_for_each_entry_safe(conn_entry, next_conn_entry,
4460 &phba->fcf_conn_rec_list, list) {
4461 list_del_init(&conn_entry->list);
4462 kfree(conn_entry);
4465 return;
4469 * lpfc_init_api_table_setup - Set up init api function jump table
4470 * @phba: The hba struct for which this call is being executed.
4471 * @dev_grp: The HBA PCI-Device group number.
4473 * This routine sets up the device INIT interface API function jump table
4474 * in @phba struct.
4476 * Returns: 0 - success, -ENODEV - failure.
4479 lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
4481 phba->lpfc_hba_init_link = lpfc_hba_init_link;
4482 phba->lpfc_hba_down_link = lpfc_hba_down_link;
4483 phba->lpfc_selective_reset = lpfc_selective_reset;
4484 switch (dev_grp) {
4485 case LPFC_PCI_DEV_LP:
4486 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
4487 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
4488 phba->lpfc_stop_port = lpfc_stop_port_s3;
4489 break;
4490 case LPFC_PCI_DEV_OC:
4491 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
4492 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
4493 phba->lpfc_stop_port = lpfc_stop_port_s4;
4494 break;
4495 default:
4496 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4497 "1431 Invalid HBA PCI-device group: 0x%x\n",
4498 dev_grp);
4499 return -ENODEV;
4500 break;
4502 return 0;
4506 * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
4507 * @phba: pointer to lpfc hba data structure.
4509 * This routine is invoked to set up the driver internal resources before the
4510 * device specific resource setup to support the HBA device it attached to.
4512 * Return codes
4513 * 0 - successful
4514 * other values - error
4516 static int
4517 lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
4520 * Driver resources common to all SLI revisions
4522 atomic_set(&phba->fast_event_count, 0);
4523 spin_lock_init(&phba->hbalock);
4525 /* Initialize ndlp management spinlock */
4526 spin_lock_init(&phba->ndlp_lock);
4528 INIT_LIST_HEAD(&phba->port_list);
4529 INIT_LIST_HEAD(&phba->work_list);
4530 init_waitqueue_head(&phba->wait_4_mlo_m_q);
4532 /* Initialize the wait queue head for the kernel thread */
4533 init_waitqueue_head(&phba->work_waitq);
4535 /* Initialize the scsi buffer list used by driver for scsi IO */
4536 spin_lock_init(&phba->scsi_buf_list_lock);
4537 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list);
4539 /* Initialize the fabric iocb list */
4540 INIT_LIST_HEAD(&phba->fabric_iocb_list);
4542 /* Initialize list to save ELS buffers */
4543 INIT_LIST_HEAD(&phba->elsbuf);
4545 /* Initialize FCF connection rec list */
4546 INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
4548 return 0;
4552 * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources.
4553 * @phba: pointer to lpfc hba data structure.
4555 * This routine is invoked to set up the driver internal resources after the
4556 * device specific resource setup to support the HBA device it attached to.
4558 * Return codes
4559 * 0 - successful
4560 * other values - error
4562 static int
4563 lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
4565 int error;
4567 /* Startup the kernel thread for this host adapter. */
4568 phba->worker_thread = kthread_run(lpfc_do_work, phba,
4569 "lpfc_worker_%d", phba->brd_no);
4570 if (IS_ERR(phba->worker_thread)) {
4571 error = PTR_ERR(phba->worker_thread);
4572 return error;
4575 return 0;
4579 * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources.
4580 * @phba: pointer to lpfc hba data structure.
4582 * This routine is invoked to unset the driver internal resources set up after
4583 * the device specific resource setup for supporting the HBA device it
4584 * attached to.
4586 static void
4587 lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
4589 /* Stop kernel worker thread */
4590 kthread_stop(phba->worker_thread);
4594 * lpfc_free_iocb_list - Free iocb list.
4595 * @phba: pointer to lpfc hba data structure.
4597 * This routine is invoked to free the driver's IOCB list and memory.
4599 static void
4600 lpfc_free_iocb_list(struct lpfc_hba *phba)
4602 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
4604 spin_lock_irq(&phba->hbalock);
4605 list_for_each_entry_safe(iocbq_entry, iocbq_next,
4606 &phba->lpfc_iocb_list, list) {
4607 list_del(&iocbq_entry->list);
4608 kfree(iocbq_entry);
4609 phba->total_iocbq_bufs--;
4611 spin_unlock_irq(&phba->hbalock);
4613 return;
4617 * lpfc_init_iocb_list - Allocate and initialize iocb list.
4618 * @phba: pointer to lpfc hba data structure.
4620 * This routine is invoked to allocate and initizlize the driver's IOCB
4621 * list and set up the IOCB tag array accordingly.
4623 * Return codes
4624 * 0 - successful
4625 * other values - error
4627 static int
4628 lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
4630 struct lpfc_iocbq *iocbq_entry = NULL;
4631 uint16_t iotag;
4632 int i;
4634 /* Initialize and populate the iocb list per host. */
4635 INIT_LIST_HEAD(&phba->lpfc_iocb_list);
4636 for (i = 0; i < iocb_count; i++) {
4637 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
4638 if (iocbq_entry == NULL) {
4639 printk(KERN_ERR "%s: only allocated %d iocbs of "
4640 "expected %d count. Unloading driver.\n",
4641 __func__, i, LPFC_IOCB_LIST_CNT);
4642 goto out_free_iocbq;
4645 iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
4646 if (iotag == 0) {
4647 kfree(iocbq_entry);
4648 printk(KERN_ERR "%s: failed to allocate IOTAG. "
4649 "Unloading driver.\n", __func__);
4650 goto out_free_iocbq;
4652 iocbq_entry->sli4_xritag = NO_XRI;
4654 spin_lock_irq(&phba->hbalock);
4655 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
4656 phba->total_iocbq_bufs++;
4657 spin_unlock_irq(&phba->hbalock);
4660 return 0;
4662 out_free_iocbq:
4663 lpfc_free_iocb_list(phba);
4665 return -ENOMEM;
4669 * lpfc_free_sgl_list - Free sgl list.
4670 * @phba: pointer to lpfc hba data structure.
4672 * This routine is invoked to free the driver's sgl list and memory.
4674 static void
4675 lpfc_free_sgl_list(struct lpfc_hba *phba)
4677 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
4678 LIST_HEAD(sglq_list);
4680 spin_lock_irq(&phba->hbalock);
4681 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list);
4682 spin_unlock_irq(&phba->hbalock);
4684 list_for_each_entry_safe(sglq_entry, sglq_next,
4685 &sglq_list, list) {
4686 list_del(&sglq_entry->list);
4687 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
4688 kfree(sglq_entry);
4689 phba->sli4_hba.total_sglq_bufs--;
4691 kfree(phba->sli4_hba.lpfc_els_sgl_array);
4695 * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs.
4696 * @phba: pointer to lpfc hba data structure.
4698 * This routine is invoked to allocate the driver's active sgl memory.
4699 * This array will hold the sglq_entry's for active IOs.
4701 static int
4702 lpfc_init_active_sgl_array(struct lpfc_hba *phba)
4704 int size;
4705 size = sizeof(struct lpfc_sglq *);
4706 size *= phba->sli4_hba.max_cfg_param.max_xri;
4708 phba->sli4_hba.lpfc_sglq_active_list =
4709 kzalloc(size, GFP_KERNEL);
4710 if (!phba->sli4_hba.lpfc_sglq_active_list)
4711 return -ENOMEM;
4712 return 0;
4716 * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs.
4717 * @phba: pointer to lpfc hba data structure.
4719 * This routine is invoked to walk through the array of active sglq entries
4720 * and free all of the resources.
4721 * This is just a place holder for now.
4723 static void
4724 lpfc_free_active_sgl(struct lpfc_hba *phba)
4726 kfree(phba->sli4_hba.lpfc_sglq_active_list);
4730 * lpfc_init_sgl_list - Allocate and initialize sgl list.
4731 * @phba: pointer to lpfc hba data structure.
4733 * This routine is invoked to allocate and initizlize the driver's sgl
4734 * list and set up the sgl xritag tag array accordingly.
4736 * Return codes
4737 * 0 - successful
4738 * other values - error
4740 static int
4741 lpfc_init_sgl_list(struct lpfc_hba *phba)
4743 struct lpfc_sglq *sglq_entry = NULL;
4744 int i;
4745 int els_xri_cnt;
4747 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
4748 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4749 "2400 lpfc_init_sgl_list els %d.\n",
4750 els_xri_cnt);
4751 /* Initialize and populate the sglq list per host/VF. */
4752 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list);
4753 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
4755 /* Sanity check on XRI management */
4756 if (phba->sli4_hba.max_cfg_param.max_xri <= els_xri_cnt) {
4757 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4758 "2562 No room left for SCSI XRI allocation: "
4759 "max_xri=%d, els_xri=%d\n",
4760 phba->sli4_hba.max_cfg_param.max_xri,
4761 els_xri_cnt);
4762 return -ENOMEM;
4765 /* Allocate memory for the ELS XRI management array */
4766 phba->sli4_hba.lpfc_els_sgl_array =
4767 kzalloc((sizeof(struct lpfc_sglq *) * els_xri_cnt),
4768 GFP_KERNEL);
4770 if (!phba->sli4_hba.lpfc_els_sgl_array) {
4771 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4772 "2401 Failed to allocate memory for ELS "
4773 "XRI management array of size %d.\n",
4774 els_xri_cnt);
4775 return -ENOMEM;
4778 /* Keep the SCSI XRI into the XRI management array */
4779 phba->sli4_hba.scsi_xri_max =
4780 phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
4781 phba->sli4_hba.scsi_xri_cnt = 0;
4783 phba->sli4_hba.lpfc_scsi_psb_array =
4784 kzalloc((sizeof(struct lpfc_scsi_buf *) *
4785 phba->sli4_hba.scsi_xri_max), GFP_KERNEL);
4787 if (!phba->sli4_hba.lpfc_scsi_psb_array) {
4788 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4789 "2563 Failed to allocate memory for SCSI "
4790 "XRI management array of size %d.\n",
4791 phba->sli4_hba.scsi_xri_max);
4792 kfree(phba->sli4_hba.lpfc_els_sgl_array);
4793 return -ENOMEM;
4796 for (i = 0; i < els_xri_cnt; i++) {
4797 sglq_entry = kzalloc(sizeof(struct lpfc_sglq), GFP_KERNEL);
4798 if (sglq_entry == NULL) {
4799 printk(KERN_ERR "%s: only allocated %d sgls of "
4800 "expected %d count. Unloading driver.\n",
4801 __func__, i, els_xri_cnt);
4802 goto out_free_mem;
4805 sglq_entry->sli4_xritag = lpfc_sli4_next_xritag(phba);
4806 if (sglq_entry->sli4_xritag == NO_XRI) {
4807 kfree(sglq_entry);
4808 printk(KERN_ERR "%s: failed to allocate XRI.\n"
4809 "Unloading driver.\n", __func__);
4810 goto out_free_mem;
4812 sglq_entry->buff_type = GEN_BUFF_TYPE;
4813 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, &sglq_entry->phys);
4814 if (sglq_entry->virt == NULL) {
4815 kfree(sglq_entry);
4816 printk(KERN_ERR "%s: failed to allocate mbuf.\n"
4817 "Unloading driver.\n", __func__);
4818 goto out_free_mem;
4820 sglq_entry->sgl = sglq_entry->virt;
4821 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
4823 /* The list order is used by later block SGL registraton */
4824 spin_lock_irq(&phba->hbalock);
4825 sglq_entry->state = SGL_FREED;
4826 list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list);
4827 phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry;
4828 phba->sli4_hba.total_sglq_bufs++;
4829 spin_unlock_irq(&phba->hbalock);
4831 return 0;
4833 out_free_mem:
4834 kfree(phba->sli4_hba.lpfc_scsi_psb_array);
4835 lpfc_free_sgl_list(phba);
4836 return -ENOMEM;
4840 * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
4841 * @phba: pointer to lpfc hba data structure.
4843 * This routine is invoked to post rpi header templates to the
4844 * HBA consistent with the SLI-4 interface spec. This routine
4845 * posts a PAGE_SIZE memory region to the port to hold up to
4846 * PAGE_SIZE modulo 64 rpi context headers.
4847 * No locks are held here because this is an initialization routine
4848 * called only from probe or lpfc_online when interrupts are not
4849 * enabled and the driver is reinitializing the device.
4851 * Return codes
4852 * 0 - successful
4853 * -ENOMEM - No available memory
4854 * -EIO - The mailbox failed to complete successfully.
4857 lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
4859 int rc = 0;
4860 int longs;
4861 uint16_t rpi_count;
4862 struct lpfc_rpi_hdr *rpi_hdr;
4864 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
4867 * Provision an rpi bitmask range for discovery. The total count
4868 * is the difference between max and base + 1.
4870 rpi_count = phba->sli4_hba.max_cfg_param.rpi_base +
4871 phba->sli4_hba.max_cfg_param.max_rpi - 1;
4873 longs = ((rpi_count) + BITS_PER_LONG - 1) / BITS_PER_LONG;
4874 phba->sli4_hba.rpi_bmask = kzalloc(longs * sizeof(unsigned long),
4875 GFP_KERNEL);
4876 if (!phba->sli4_hba.rpi_bmask)
4877 return -ENOMEM;
4879 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
4880 if (!rpi_hdr) {
4881 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4882 "0391 Error during rpi post operation\n");
4883 lpfc_sli4_remove_rpis(phba);
4884 rc = -ENODEV;
4887 return rc;
4891 * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region
4892 * @phba: pointer to lpfc hba data structure.
4894 * This routine is invoked to allocate a single 4KB memory region to
4895 * support rpis and stores them in the phba. This single region
4896 * provides support for up to 64 rpis. The region is used globally
4897 * by the device.
4899 * Returns:
4900 * A valid rpi hdr on success.
4901 * A NULL pointer on any failure.
4903 struct lpfc_rpi_hdr *
4904 lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
4906 uint16_t rpi_limit, curr_rpi_range;
4907 struct lpfc_dmabuf *dmabuf;
4908 struct lpfc_rpi_hdr *rpi_hdr;
4909 uint32_t rpi_count;
4911 rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base +
4912 phba->sli4_hba.max_cfg_param.max_rpi - 1;
4914 spin_lock_irq(&phba->hbalock);
4915 curr_rpi_range = phba->sli4_hba.next_rpi;
4916 spin_unlock_irq(&phba->hbalock);
4919 * The port has a limited number of rpis. The increment here
4920 * is LPFC_RPI_HDR_COUNT - 1 to account for the starting value
4921 * and to allow the full max_rpi range per port.
4923 if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit)
4924 rpi_count = rpi_limit - curr_rpi_range;
4925 else
4926 rpi_count = LPFC_RPI_HDR_COUNT;
4929 * First allocate the protocol header region for the port. The
4930 * port expects a 4KB DMA-mapped memory region that is 4K aligned.
4932 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4933 if (!dmabuf)
4934 return NULL;
4936 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
4937 LPFC_HDR_TEMPLATE_SIZE,
4938 &dmabuf->phys,
4939 GFP_KERNEL);
4940 if (!dmabuf->virt) {
4941 rpi_hdr = NULL;
4942 goto err_free_dmabuf;
4945 memset(dmabuf->virt, 0, LPFC_HDR_TEMPLATE_SIZE);
4946 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
4947 rpi_hdr = NULL;
4948 goto err_free_coherent;
4951 /* Save the rpi header data for cleanup later. */
4952 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL);
4953 if (!rpi_hdr)
4954 goto err_free_coherent;
4956 rpi_hdr->dmabuf = dmabuf;
4957 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
4958 rpi_hdr->page_count = 1;
4959 spin_lock_irq(&phba->hbalock);
4960 rpi_hdr->start_rpi = phba->sli4_hba.next_rpi;
4961 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
4964 * The next_rpi stores the next module-64 rpi value to post
4965 * in any subsequent rpi memory region postings.
4967 phba->sli4_hba.next_rpi += rpi_count;
4968 spin_unlock_irq(&phba->hbalock);
4969 return rpi_hdr;
4971 err_free_coherent:
4972 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE,
4973 dmabuf->virt, dmabuf->phys);
4974 err_free_dmabuf:
4975 kfree(dmabuf);
4976 return NULL;
4980 * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions
4981 * @phba: pointer to lpfc hba data structure.
4983 * This routine is invoked to remove all memory resources allocated
4984 * to support rpis. This routine presumes the caller has released all
4985 * rpis consumed by fabric or port logins and is prepared to have
4986 * the header pages removed.
4988 void
4989 lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
4991 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
4993 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
4994 &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
4995 list_del(&rpi_hdr->list);
4996 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
4997 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys);
4998 kfree(rpi_hdr->dmabuf);
4999 kfree(rpi_hdr);
5002 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base;
5003 memset(phba->sli4_hba.rpi_bmask, 0, sizeof(*phba->sli4_hba.rpi_bmask));
5007 * lpfc_hba_alloc - Allocate driver hba data structure for a device.
5008 * @pdev: pointer to pci device data structure.
5010 * This routine is invoked to allocate the driver hba data structure for an
5011 * HBA device. If the allocation is successful, the phba reference to the
5012 * PCI device data structure is set.
5014 * Return codes
5015 * pointer to @phba - successful
5016 * NULL - error
5018 static struct lpfc_hba *
5019 lpfc_hba_alloc(struct pci_dev *pdev)
5021 struct lpfc_hba *phba;
5023 /* Allocate memory for HBA structure */
5024 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
5025 if (!phba) {
5026 dev_err(&pdev->dev, "failed to allocate hba struct\n");
5027 return NULL;
5030 /* Set reference to PCI device in HBA structure */
5031 phba->pcidev = pdev;
5033 /* Assign an unused board number */
5034 phba->brd_no = lpfc_get_instance();
5035 if (phba->brd_no < 0) {
5036 kfree(phba);
5037 return NULL;
5040 spin_lock_init(&phba->ct_ev_lock);
5041 INIT_LIST_HEAD(&phba->ct_ev_waiters);
5043 return phba;
5047 * lpfc_hba_free - Free driver hba data structure with a device.
5048 * @phba: pointer to lpfc hba data structure.
5050 * This routine is invoked to free the driver hba data structure with an
5051 * HBA device.
5053 static void
5054 lpfc_hba_free(struct lpfc_hba *phba)
5056 /* Release the driver assigned board number */
5057 idr_remove(&lpfc_hba_index, phba->brd_no);
5059 kfree(phba);
5060 return;
5064 * lpfc_create_shost - Create hba physical port with associated scsi host.
5065 * @phba: pointer to lpfc hba data structure.
5067 * This routine is invoked to create HBA physical port and associate a SCSI
5068 * host with it.
5070 * Return codes
5071 * 0 - successful
5072 * other values - error
5074 static int
5075 lpfc_create_shost(struct lpfc_hba *phba)
5077 struct lpfc_vport *vport;
5078 struct Scsi_Host *shost;
5080 /* Initialize HBA FC structure */
5081 phba->fc_edtov = FF_DEF_EDTOV;
5082 phba->fc_ratov = FF_DEF_RATOV;
5083 phba->fc_altov = FF_DEF_ALTOV;
5084 phba->fc_arbtov = FF_DEF_ARBTOV;
5086 atomic_set(&phba->sdev_cnt, 0);
5087 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
5088 if (!vport)
5089 return -ENODEV;
5091 shost = lpfc_shost_from_vport(vport);
5092 phba->pport = vport;
5093 lpfc_debugfs_initialize(vport);
5094 /* Put reference to SCSI host to driver's device private data */
5095 pci_set_drvdata(phba->pcidev, shost);
5097 return 0;
5101 * lpfc_destroy_shost - Destroy hba physical port with associated scsi host.
5102 * @phba: pointer to lpfc hba data structure.
5104 * This routine is invoked to destroy HBA physical port and the associated
5105 * SCSI host.
5107 static void
5108 lpfc_destroy_shost(struct lpfc_hba *phba)
5110 struct lpfc_vport *vport = phba->pport;
5112 /* Destroy physical port that associated with the SCSI host */
5113 destroy_port(vport);
5115 return;
5119 * lpfc_setup_bg - Setup Block guard structures and debug areas.
5120 * @phba: pointer to lpfc hba data structure.
5121 * @shost: the shost to be used to detect Block guard settings.
5123 * This routine sets up the local Block guard protocol settings for @shost.
5124 * This routine also allocates memory for debugging bg buffers.
5126 static void
5127 lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
5129 int pagecnt = 10;
5130 if (lpfc_prot_mask && lpfc_prot_guard) {
5131 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5132 "1478 Registering BlockGuard with the "
5133 "SCSI layer\n");
5134 scsi_host_set_prot(shost, lpfc_prot_mask);
5135 scsi_host_set_guard(shost, lpfc_prot_guard);
5137 if (!_dump_buf_data) {
5138 while (pagecnt) {
5139 spin_lock_init(&_dump_buf_lock);
5140 _dump_buf_data =
5141 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
5142 if (_dump_buf_data) {
5143 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
5144 "9043 BLKGRD: allocated %d pages for "
5145 "_dump_buf_data at 0x%p\n",
5146 (1 << pagecnt), _dump_buf_data);
5147 _dump_buf_data_order = pagecnt;
5148 memset(_dump_buf_data, 0,
5149 ((1 << PAGE_SHIFT) << pagecnt));
5150 break;
5151 } else
5152 --pagecnt;
5154 if (!_dump_buf_data_order)
5155 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
5156 "9044 BLKGRD: ERROR unable to allocate "
5157 "memory for hexdump\n");
5158 } else
5159 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
5160 "9045 BLKGRD: already allocated _dump_buf_data=0x%p"
5161 "\n", _dump_buf_data);
5162 if (!_dump_buf_dif) {
5163 while (pagecnt) {
5164 _dump_buf_dif =
5165 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
5166 if (_dump_buf_dif) {
5167 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
5168 "9046 BLKGRD: allocated %d pages for "
5169 "_dump_buf_dif at 0x%p\n",
5170 (1 << pagecnt), _dump_buf_dif);
5171 _dump_buf_dif_order = pagecnt;
5172 memset(_dump_buf_dif, 0,
5173 ((1 << PAGE_SHIFT) << pagecnt));
5174 break;
5175 } else
5176 --pagecnt;
5178 if (!_dump_buf_dif_order)
5179 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
5180 "9047 BLKGRD: ERROR unable to allocate "
5181 "memory for hexdump\n");
5182 } else
5183 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
5184 "9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n",
5185 _dump_buf_dif);
5189 * lpfc_post_init_setup - Perform necessary device post initialization setup.
5190 * @phba: pointer to lpfc hba data structure.
5192 * This routine is invoked to perform all the necessary post initialization
5193 * setup for the device.
5195 static void
5196 lpfc_post_init_setup(struct lpfc_hba *phba)
5198 struct Scsi_Host *shost;
5199 struct lpfc_adapter_event_header adapter_event;
5201 /* Get the default values for Model Name and Description */
5202 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
5205 * hba setup may have changed the hba_queue_depth so we need to
5206 * adjust the value of can_queue.
5208 shost = pci_get_drvdata(phba->pcidev);
5209 shost->can_queue = phba->cfg_hba_queue_depth - 10;
5210 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
5211 lpfc_setup_bg(phba, shost);
5213 lpfc_host_attrib_init(shost);
5215 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
5216 spin_lock_irq(shost->host_lock);
5217 lpfc_poll_start_timer(phba);
5218 spin_unlock_irq(shost->host_lock);
5221 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5222 "0428 Perform SCSI scan\n");
5223 /* Send board arrival event to upper layer */
5224 adapter_event.event_type = FC_REG_ADAPTER_EVENT;
5225 adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
5226 fc_host_post_vendor_event(shost, fc_get_event_number(),
5227 sizeof(adapter_event),
5228 (char *) &adapter_event,
5229 LPFC_NL_VENDOR_ID);
5230 return;
5234 * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space.
5235 * @phba: pointer to lpfc hba data structure.
5237 * This routine is invoked to set up the PCI device memory space for device
5238 * with SLI-3 interface spec.
5240 * Return codes
5241 * 0 - successful
5242 * other values - error
5244 static int
5245 lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
5247 struct pci_dev *pdev;
5248 unsigned long bar0map_len, bar2map_len;
5249 int i, hbq_count;
5250 void *ptr;
5251 int error = -ENODEV;
5253 /* Obtain PCI device reference */
5254 if (!phba->pcidev)
5255 return error;
5256 else
5257 pdev = phba->pcidev;
5259 /* Set the device DMA mask size */
5260 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
5261 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
5262 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
5263 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
5264 return error;
5268 /* Get the bus address of Bar0 and Bar2 and the number of bytes
5269 * required by each mapping.
5271 phba->pci_bar0_map = pci_resource_start(pdev, 0);
5272 bar0map_len = pci_resource_len(pdev, 0);
5274 phba->pci_bar2_map = pci_resource_start(pdev, 2);
5275 bar2map_len = pci_resource_len(pdev, 2);
5277 /* Map HBA SLIM to a kernel virtual address. */
5278 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
5279 if (!phba->slim_memmap_p) {
5280 dev_printk(KERN_ERR, &pdev->dev,
5281 "ioremap failed for SLIM memory.\n");
5282 goto out;
5285 /* Map HBA Control Registers to a kernel virtual address. */
5286 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
5287 if (!phba->ctrl_regs_memmap_p) {
5288 dev_printk(KERN_ERR, &pdev->dev,
5289 "ioremap failed for HBA control registers.\n");
5290 goto out_iounmap_slim;
5293 /* Allocate memory for SLI-2 structures */
5294 phba->slim2p.virt = dma_alloc_coherent(&pdev->dev,
5295 SLI2_SLIM_SIZE,
5296 &phba->slim2p.phys,
5297 GFP_KERNEL);
5298 if (!phba->slim2p.virt)
5299 goto out_iounmap;
5301 memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE);
5302 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
5303 phba->mbox_ext = (phba->slim2p.virt +
5304 offsetof(struct lpfc_sli2_slim, mbx_ext_words));
5305 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
5306 phba->IOCBs = (phba->slim2p.virt +
5307 offsetof(struct lpfc_sli2_slim, IOCBs));
5309 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
5310 lpfc_sli_hbq_size(),
5311 &phba->hbqslimp.phys,
5312 GFP_KERNEL);
5313 if (!phba->hbqslimp.virt)
5314 goto out_free_slim;
5316 hbq_count = lpfc_sli_hbq_count();
5317 ptr = phba->hbqslimp.virt;
5318 for (i = 0; i < hbq_count; ++i) {
5319 phba->hbqs[i].hbq_virt = ptr;
5320 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
5321 ptr += (lpfc_hbq_defs[i]->entry_count *
5322 sizeof(struct lpfc_hbq_entry));
5324 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
5325 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
5327 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
5329 INIT_LIST_HEAD(&phba->rb_pend_list);
5331 phba->MBslimaddr = phba->slim_memmap_p;
5332 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
5333 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
5334 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
5335 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
5337 return 0;
5339 out_free_slim:
5340 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
5341 phba->slim2p.virt, phba->slim2p.phys);
5342 out_iounmap:
5343 iounmap(phba->ctrl_regs_memmap_p);
5344 out_iounmap_slim:
5345 iounmap(phba->slim_memmap_p);
5346 out:
5347 return error;
5351 * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space.
5352 * @phba: pointer to lpfc hba data structure.
5354 * This routine is invoked to unset the PCI device memory space for device
5355 * with SLI-3 interface spec.
5357 static void
5358 lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
5360 struct pci_dev *pdev;
5362 /* Obtain PCI device reference */
5363 if (!phba->pcidev)
5364 return;
5365 else
5366 pdev = phba->pcidev;
5368 /* Free coherent DMA memory allocated */
5369 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
5370 phba->hbqslimp.virt, phba->hbqslimp.phys);
5371 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
5372 phba->slim2p.virt, phba->slim2p.phys);
5374 /* I/O memory unmap */
5375 iounmap(phba->ctrl_regs_memmap_p);
5376 iounmap(phba->slim_memmap_p);
5378 return;
5382 * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status
5383 * @phba: pointer to lpfc hba data structure.
5385 * This routine is invoked to wait for SLI4 device Power On Self Test (POST)
5386 * done and check status.
5388 * Return 0 if successful, otherwise -ENODEV.
5391 lpfc_sli4_post_status_check(struct lpfc_hba *phba)
5393 struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg;
5394 struct lpfc_register reg_data;
5395 int i, port_error = 0;
5396 uint32_t if_type;
5398 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
5399 memset(&reg_data, 0, sizeof(reg_data));
5400 if (!phba->sli4_hba.PSMPHRregaddr)
5401 return -ENODEV;
5403 /* Wait up to 30 seconds for the SLI Port POST done and ready */
5404 for (i = 0; i < 3000; i++) {
5405 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
5406 &portsmphr_reg.word0) ||
5407 (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) {
5408 /* Port has a fatal POST error, break out */
5409 port_error = -ENODEV;
5410 break;
5412 if (LPFC_POST_STAGE_PORT_READY ==
5413 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg))
5414 break;
5415 msleep(10);
5419 * If there was a port error during POST, then don't proceed with
5420 * other register reads as the data may not be valid. Just exit.
5422 if (port_error) {
5423 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5424 "1408 Port Failed POST - portsmphr=0x%x, "
5425 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, "
5426 "scr2=x%x, hscratch=x%x, pstatus=x%x\n",
5427 portsmphr_reg.word0,
5428 bf_get(lpfc_port_smphr_perr, &portsmphr_reg),
5429 bf_get(lpfc_port_smphr_sfi, &portsmphr_reg),
5430 bf_get(lpfc_port_smphr_nip, &portsmphr_reg),
5431 bf_get(lpfc_port_smphr_ipc, &portsmphr_reg),
5432 bf_get(lpfc_port_smphr_scr1, &portsmphr_reg),
5433 bf_get(lpfc_port_smphr_scr2, &portsmphr_reg),
5434 bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg),
5435 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg));
5436 } else {
5437 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5438 "2534 Device Info: SLIFamily=0x%x, "
5439 "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, "
5440 "SLIHint_2=0x%x, FT=0x%x\n",
5441 bf_get(lpfc_sli_intf_sli_family,
5442 &phba->sli4_hba.sli_intf),
5443 bf_get(lpfc_sli_intf_slirev,
5444 &phba->sli4_hba.sli_intf),
5445 bf_get(lpfc_sli_intf_if_type,
5446 &phba->sli4_hba.sli_intf),
5447 bf_get(lpfc_sli_intf_sli_hint1,
5448 &phba->sli4_hba.sli_intf),
5449 bf_get(lpfc_sli_intf_sli_hint2,
5450 &phba->sli4_hba.sli_intf),
5451 bf_get(lpfc_sli_intf_func_type,
5452 &phba->sli4_hba.sli_intf));
5454 * Check for other Port errors during the initialization
5455 * process. Fail the load if the port did not come up
5456 * correctly.
5458 if_type = bf_get(lpfc_sli_intf_if_type,
5459 &phba->sli4_hba.sli_intf);
5460 switch (if_type) {
5461 case LPFC_SLI_INTF_IF_TYPE_0:
5462 phba->sli4_hba.ue_mask_lo =
5463 readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr);
5464 phba->sli4_hba.ue_mask_hi =
5465 readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr);
5466 uerrlo_reg.word0 =
5467 readl(phba->sli4_hba.u.if_type0.UERRLOregaddr);
5468 uerrhi_reg.word0 =
5469 readl(phba->sli4_hba.u.if_type0.UERRHIregaddr);
5470 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) ||
5471 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) {
5472 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5473 "1422 Unrecoverable Error "
5474 "Detected during POST "
5475 "uerr_lo_reg=0x%x, "
5476 "uerr_hi_reg=0x%x, "
5477 "ue_mask_lo_reg=0x%x, "
5478 "ue_mask_hi_reg=0x%x\n",
5479 uerrlo_reg.word0,
5480 uerrhi_reg.word0,
5481 phba->sli4_hba.ue_mask_lo,
5482 phba->sli4_hba.ue_mask_hi);
5483 port_error = -ENODEV;
5485 break;
5486 case LPFC_SLI_INTF_IF_TYPE_2:
5487 /* Final checks. The port status should be clean. */
5488 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
5489 &reg_data.word0) ||
5490 bf_get(lpfc_sliport_status_err, &reg_data)) {
5491 phba->work_status[0] =
5492 readl(phba->sli4_hba.u.if_type2.
5493 ERR1regaddr);
5494 phba->work_status[1] =
5495 readl(phba->sli4_hba.u.if_type2.
5496 ERR2regaddr);
5497 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5498 "2888 Port Error Detected "
5499 "during POST: "
5500 "port status reg 0x%x, "
5501 "port_smphr reg 0x%x, "
5502 "error 1=0x%x, error 2=0x%x\n",
5503 reg_data.word0,
5504 portsmphr_reg.word0,
5505 phba->work_status[0],
5506 phba->work_status[1]);
5507 port_error = -ENODEV;
5509 break;
5510 case LPFC_SLI_INTF_IF_TYPE_1:
5511 default:
5512 break;
5515 return port_error;
5519 * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map.
5520 * @phba: pointer to lpfc hba data structure.
5521 * @if_type: The SLI4 interface type getting configured.
5523 * This routine is invoked to set up SLI4 BAR0 PCI config space register
5524 * memory map.
5526 static void
5527 lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
5529 switch (if_type) {
5530 case LPFC_SLI_INTF_IF_TYPE_0:
5531 phba->sli4_hba.u.if_type0.UERRLOregaddr =
5532 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO;
5533 phba->sli4_hba.u.if_type0.UERRHIregaddr =
5534 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI;
5535 phba->sli4_hba.u.if_type0.UEMASKLOregaddr =
5536 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO;
5537 phba->sli4_hba.u.if_type0.UEMASKHIregaddr =
5538 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI;
5539 phba->sli4_hba.SLIINTFregaddr =
5540 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
5541 break;
5542 case LPFC_SLI_INTF_IF_TYPE_2:
5543 phba->sli4_hba.u.if_type2.ERR1regaddr =
5544 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_ERR_1;
5545 phba->sli4_hba.u.if_type2.ERR2regaddr =
5546 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_ERR_2;
5547 phba->sli4_hba.u.if_type2.CTRLregaddr =
5548 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_CNTRL;
5549 phba->sli4_hba.u.if_type2.STATUSregaddr =
5550 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_STATUS;
5551 phba->sli4_hba.SLIINTFregaddr =
5552 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
5553 phba->sli4_hba.PSMPHRregaddr =
5554 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_IF2_SMPHR;
5555 phba->sli4_hba.RQDBregaddr =
5556 phba->sli4_hba.conf_regs_memmap_p + LPFC_RQ_DOORBELL;
5557 phba->sli4_hba.WQDBregaddr =
5558 phba->sli4_hba.conf_regs_memmap_p + LPFC_WQ_DOORBELL;
5559 phba->sli4_hba.EQCQDBregaddr =
5560 phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL;
5561 phba->sli4_hba.MQDBregaddr =
5562 phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL;
5563 phba->sli4_hba.BMBXregaddr =
5564 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
5565 break;
5566 case LPFC_SLI_INTF_IF_TYPE_1:
5567 default:
5568 dev_printk(KERN_ERR, &phba->pcidev->dev,
5569 "FATAL - unsupported SLI4 interface type - %d\n",
5570 if_type);
5571 break;
5576 * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map.
5577 * @phba: pointer to lpfc hba data structure.
5579 * This routine is invoked to set up SLI4 BAR1 control status register (CSR)
5580 * memory map.
5582 static void
5583 lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba)
5585 phba->sli4_hba.PSMPHRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
5586 LPFC_SLIPORT_IF0_SMPHR;
5587 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
5588 LPFC_HST_ISR0;
5589 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
5590 LPFC_HST_IMR0;
5591 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
5592 LPFC_HST_ISCR0;
5596 * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map.
5597 * @phba: pointer to lpfc hba data structure.
5598 * @vf: virtual function number
5600 * This routine is invoked to set up SLI4 BAR2 doorbell register memory map
5601 * based on the given viftual function number, @vf.
5603 * Return 0 if successful, otherwise -ENODEV.
5605 static int
5606 lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
5608 if (vf > LPFC_VIR_FUNC_MAX)
5609 return -ENODEV;
5611 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5612 vf * LPFC_VFR_PAGE_SIZE + LPFC_RQ_DOORBELL);
5613 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5614 vf * LPFC_VFR_PAGE_SIZE + LPFC_WQ_DOORBELL);
5615 phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5616 vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL);
5617 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5618 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL);
5619 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
5620 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX);
5621 return 0;
5625 * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox
5626 * @phba: pointer to lpfc hba data structure.
5628 * This routine is invoked to create the bootstrap mailbox
5629 * region consistent with the SLI-4 interface spec. This
5630 * routine allocates all memory necessary to communicate
5631 * mailbox commands to the port and sets up all alignment
5632 * needs. No locks are expected to be held when calling
5633 * this routine.
5635 * Return codes
5636 * 0 - successful
5637 * -ENOMEM - could not allocated memory.
5639 static int
5640 lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
5642 uint32_t bmbx_size;
5643 struct lpfc_dmabuf *dmabuf;
5644 struct dma_address *dma_address;
5645 uint32_t pa_addr;
5646 uint64_t phys_addr;
5648 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5649 if (!dmabuf)
5650 return -ENOMEM;
5653 * The bootstrap mailbox region is comprised of 2 parts
5654 * plus an alignment restriction of 16 bytes.
5656 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
5657 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
5658 bmbx_size,
5659 &dmabuf->phys,
5660 GFP_KERNEL);
5661 if (!dmabuf->virt) {
5662 kfree(dmabuf);
5663 return -ENOMEM;
5665 memset(dmabuf->virt, 0, bmbx_size);
5668 * Initialize the bootstrap mailbox pointers now so that the register
5669 * operations are simple later. The mailbox dma address is required
5670 * to be 16-byte aligned. Also align the virtual memory as each
5671 * maibox is copied into the bmbx mailbox region before issuing the
5672 * command to the port.
5674 phba->sli4_hba.bmbx.dmabuf = dmabuf;
5675 phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
5677 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
5678 LPFC_ALIGN_16_BYTE);
5679 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
5680 LPFC_ALIGN_16_BYTE);
5683 * Set the high and low physical addresses now. The SLI4 alignment
5684 * requirement is 16 bytes and the mailbox is posted to the port
5685 * as two 30-bit addresses. The other data is a bit marking whether
5686 * the 30-bit address is the high or low address.
5687 * Upcast bmbx aphys to 64bits so shift instruction compiles
5688 * clean on 32 bit machines.
5690 dma_address = &phba->sli4_hba.bmbx.dma_address;
5691 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
5692 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff);
5693 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) |
5694 LPFC_BMBX_BIT1_ADDR_HI);
5696 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
5697 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) |
5698 LPFC_BMBX_BIT1_ADDR_LO);
5699 return 0;
5703 * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources
5704 * @phba: pointer to lpfc hba data structure.
5706 * This routine is invoked to teardown the bootstrap mailbox
5707 * region and release all host resources. This routine requires
5708 * the caller to ensure all mailbox commands recovered, no
5709 * additional mailbox comands are sent, and interrupts are disabled
5710 * before calling this routine.
5713 static void
5714 lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
5716 dma_free_coherent(&phba->pcidev->dev,
5717 phba->sli4_hba.bmbx.bmbx_size,
5718 phba->sli4_hba.bmbx.dmabuf->virt,
5719 phba->sli4_hba.bmbx.dmabuf->phys);
5721 kfree(phba->sli4_hba.bmbx.dmabuf);
5722 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
5726 * lpfc_sli4_read_config - Get the config parameters.
5727 * @phba: pointer to lpfc hba data structure.
5729 * This routine is invoked to read the configuration parameters from the HBA.
5730 * The configuration parameters are used to set the base and maximum values
5731 * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource
5732 * allocation for the port.
5734 * Return codes
5735 * 0 - successful
5736 * -ENOMEM - No available memory
5737 * -EIO - The mailbox failed to complete successfully.
5739 static int
5740 lpfc_sli4_read_config(struct lpfc_hba *phba)
5742 LPFC_MBOXQ_t *pmb;
5743 struct lpfc_mbx_read_config *rd_config;
5744 uint32_t rc = 0;
5746 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5747 if (!pmb) {
5748 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5749 "2011 Unable to allocate memory for issuing "
5750 "SLI_CONFIG_SPECIAL mailbox command\n");
5751 return -ENOMEM;
5754 lpfc_read_config(phba, pmb);
5756 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
5757 if (rc != MBX_SUCCESS) {
5758 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5759 "2012 Mailbox failed , mbxCmd x%x "
5760 "READ_CONFIG, mbxStatus x%x\n",
5761 bf_get(lpfc_mqe_command, &pmb->u.mqe),
5762 bf_get(lpfc_mqe_status, &pmb->u.mqe));
5763 rc = -EIO;
5764 } else {
5765 rd_config = &pmb->u.mqe.un.rd_config;
5766 phba->sli4_hba.max_cfg_param.max_xri =
5767 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
5768 phba->sli4_hba.max_cfg_param.xri_base =
5769 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
5770 phba->sli4_hba.max_cfg_param.max_vpi =
5771 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
5772 phba->sli4_hba.max_cfg_param.vpi_base =
5773 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
5774 phba->sli4_hba.max_cfg_param.max_rpi =
5775 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
5776 phba->sli4_hba.max_cfg_param.rpi_base =
5777 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config);
5778 phba->sli4_hba.max_cfg_param.max_vfi =
5779 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config);
5780 phba->sli4_hba.max_cfg_param.vfi_base =
5781 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
5782 phba->sli4_hba.max_cfg_param.max_fcfi =
5783 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
5784 phba->sli4_hba.max_cfg_param.fcfi_base =
5785 bf_get(lpfc_mbx_rd_conf_fcfi_base, rd_config);
5786 phba->sli4_hba.max_cfg_param.max_eq =
5787 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
5788 phba->sli4_hba.max_cfg_param.max_rq =
5789 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config);
5790 phba->sli4_hba.max_cfg_param.max_wq =
5791 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config);
5792 phba->sli4_hba.max_cfg_param.max_cq =
5793 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config);
5794 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
5795 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
5796 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
5797 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
5798 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base;
5799 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
5800 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
5801 phba->max_vports = phba->max_vpi;
5802 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5803 "2003 cfg params XRI(B:%d M:%d), "
5804 "VPI(B:%d M:%d) "
5805 "VFI(B:%d M:%d) "
5806 "RPI(B:%d M:%d) "
5807 "FCFI(B:%d M:%d)\n",
5808 phba->sli4_hba.max_cfg_param.xri_base,
5809 phba->sli4_hba.max_cfg_param.max_xri,
5810 phba->sli4_hba.max_cfg_param.vpi_base,
5811 phba->sli4_hba.max_cfg_param.max_vpi,
5812 phba->sli4_hba.max_cfg_param.vfi_base,
5813 phba->sli4_hba.max_cfg_param.max_vfi,
5814 phba->sli4_hba.max_cfg_param.rpi_base,
5815 phba->sli4_hba.max_cfg_param.max_rpi,
5816 phba->sli4_hba.max_cfg_param.fcfi_base,
5817 phba->sli4_hba.max_cfg_param.max_fcfi);
5819 mempool_free(pmb, phba->mbox_mem_pool);
5821 /* Reset the DFT_HBA_Q_DEPTH to the max xri */
5822 if (phba->cfg_hba_queue_depth >
5823 (phba->sli4_hba.max_cfg_param.max_xri -
5824 lpfc_sli4_get_els_iocb_cnt(phba)))
5825 phba->cfg_hba_queue_depth =
5826 phba->sli4_hba.max_cfg_param.max_xri -
5827 lpfc_sli4_get_els_iocb_cnt(phba);
5828 return rc;
5832 * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port.
5833 * @phba: pointer to lpfc hba data structure.
5835 * This routine is invoked to setup the port-side endian order when
5836 * the port if_type is 0. This routine has no function for other
5837 * if_types.
5839 * Return codes
5840 * 0 - successful
5841 * -ENOMEM - No available memory
5842 * -EIO - The mailbox failed to complete successfully.
5844 static int
5845 lpfc_setup_endian_order(struct lpfc_hba *phba)
5847 LPFC_MBOXQ_t *mboxq;
5848 uint32_t if_type, rc = 0;
5849 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
5850 HOST_ENDIAN_HIGH_WORD1};
5852 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
5853 switch (if_type) {
5854 case LPFC_SLI_INTF_IF_TYPE_0:
5855 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
5856 GFP_KERNEL);
5857 if (!mboxq) {
5858 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5859 "0492 Unable to allocate memory for "
5860 "issuing SLI_CONFIG_SPECIAL mailbox "
5861 "command\n");
5862 return -ENOMEM;
5866 * The SLI4_CONFIG_SPECIAL mailbox command requires the first
5867 * two words to contain special data values and no other data.
5869 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
5870 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
5871 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5872 if (rc != MBX_SUCCESS) {
5873 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5874 "0493 SLI_CONFIG_SPECIAL mailbox "
5875 "failed with status x%x\n",
5876 rc);
5877 rc = -EIO;
5879 mempool_free(mboxq, phba->mbox_mem_pool);
5880 break;
5881 case LPFC_SLI_INTF_IF_TYPE_2:
5882 case LPFC_SLI_INTF_IF_TYPE_1:
5883 default:
5884 break;
5886 return rc;
5890 * lpfc_sli4_queue_create - Create all the SLI4 queues
5891 * @phba: pointer to lpfc hba data structure.
5893 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA
5894 * operation. For each SLI4 queue type, the parameters such as queue entry
5895 * count (queue depth) shall be taken from the module parameter. For now,
5896 * we just use some constant number as place holder.
5898 * Return codes
5899 * 0 - successful
5900 * -ENOMEM - No available memory
5901 * -EIO - The mailbox failed to complete successfully.
5903 static int
5904 lpfc_sli4_queue_create(struct lpfc_hba *phba)
5906 struct lpfc_queue *qdesc;
5907 int fcp_eqidx, fcp_cqidx, fcp_wqidx;
5908 int cfg_fcp_wq_count;
5909 int cfg_fcp_eq_count;
5912 * Sanity check for confiugred queue parameters against the run-time
5913 * device parameters
5916 /* Sanity check on FCP fast-path WQ parameters */
5917 cfg_fcp_wq_count = phba->cfg_fcp_wq_count;
5918 if (cfg_fcp_wq_count >
5919 (phba->sli4_hba.max_cfg_param.max_wq - LPFC_SP_WQN_DEF)) {
5920 cfg_fcp_wq_count = phba->sli4_hba.max_cfg_param.max_wq -
5921 LPFC_SP_WQN_DEF;
5922 if (cfg_fcp_wq_count < LPFC_FP_WQN_MIN) {
5923 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5924 "2581 Not enough WQs (%d) from "
5925 "the pci function for supporting "
5926 "FCP WQs (%d)\n",
5927 phba->sli4_hba.max_cfg_param.max_wq,
5928 phba->cfg_fcp_wq_count);
5929 goto out_error;
5931 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5932 "2582 Not enough WQs (%d) from the pci "
5933 "function for supporting the requested "
5934 "FCP WQs (%d), the actual FCP WQs can "
5935 "be supported: %d\n",
5936 phba->sli4_hba.max_cfg_param.max_wq,
5937 phba->cfg_fcp_wq_count, cfg_fcp_wq_count);
5939 /* The actual number of FCP work queues adopted */
5940 phba->cfg_fcp_wq_count = cfg_fcp_wq_count;
5942 /* Sanity check on FCP fast-path EQ parameters */
5943 cfg_fcp_eq_count = phba->cfg_fcp_eq_count;
5944 if (cfg_fcp_eq_count >
5945 (phba->sli4_hba.max_cfg_param.max_eq - LPFC_SP_EQN_DEF)) {
5946 cfg_fcp_eq_count = phba->sli4_hba.max_cfg_param.max_eq -
5947 LPFC_SP_EQN_DEF;
5948 if (cfg_fcp_eq_count < LPFC_FP_EQN_MIN) {
5949 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5950 "2574 Not enough EQs (%d) from the "
5951 "pci function for supporting FCP "
5952 "EQs (%d)\n",
5953 phba->sli4_hba.max_cfg_param.max_eq,
5954 phba->cfg_fcp_eq_count);
5955 goto out_error;
5957 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5958 "2575 Not enough EQs (%d) from the pci "
5959 "function for supporting the requested "
5960 "FCP EQs (%d), the actual FCP EQs can "
5961 "be supported: %d\n",
5962 phba->sli4_hba.max_cfg_param.max_eq,
5963 phba->cfg_fcp_eq_count, cfg_fcp_eq_count);
5965 /* It does not make sense to have more EQs than WQs */
5966 if (cfg_fcp_eq_count > phba->cfg_fcp_wq_count) {
5967 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5968 "2593 The FCP EQ count(%d) cannot be greater "
5969 "than the FCP WQ count(%d), limiting the "
5970 "FCP EQ count to %d\n", cfg_fcp_eq_count,
5971 phba->cfg_fcp_wq_count,
5972 phba->cfg_fcp_wq_count);
5973 cfg_fcp_eq_count = phba->cfg_fcp_wq_count;
5975 /* The actual number of FCP event queues adopted */
5976 phba->cfg_fcp_eq_count = cfg_fcp_eq_count;
5977 /* The overall number of event queues used */
5978 phba->sli4_hba.cfg_eqn = phba->cfg_fcp_eq_count + LPFC_SP_EQN_DEF;
5981 * Create Event Queues (EQs)
5984 /* Get EQ depth from module parameter, fake the default for now */
5985 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
5986 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
5988 /* Create slow path event queue */
5989 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
5990 phba->sli4_hba.eq_ecount);
5991 if (!qdesc) {
5992 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5993 "0496 Failed allocate slow-path EQ\n");
5994 goto out_error;
5996 phba->sli4_hba.sp_eq = qdesc;
5998 /* Create fast-path FCP Event Queue(s) */
5999 phba->sli4_hba.fp_eq = kzalloc((sizeof(struct lpfc_queue *) *
6000 phba->cfg_fcp_eq_count), GFP_KERNEL);
6001 if (!phba->sli4_hba.fp_eq) {
6002 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6003 "2576 Failed allocate memory for fast-path "
6004 "EQ record array\n");
6005 goto out_free_sp_eq;
6007 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
6008 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
6009 phba->sli4_hba.eq_ecount);
6010 if (!qdesc) {
6011 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6012 "0497 Failed allocate fast-path EQ\n");
6013 goto out_free_fp_eq;
6015 phba->sli4_hba.fp_eq[fcp_eqidx] = qdesc;
6019 * Create Complete Queues (CQs)
6022 /* Get CQ depth from module parameter, fake the default for now */
6023 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
6024 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
6026 /* Create slow-path Mailbox Command Complete Queue */
6027 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
6028 phba->sli4_hba.cq_ecount);
6029 if (!qdesc) {
6030 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6031 "0500 Failed allocate slow-path mailbox CQ\n");
6032 goto out_free_fp_eq;
6034 phba->sli4_hba.mbx_cq = qdesc;
6036 /* Create slow-path ELS Complete Queue */
6037 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
6038 phba->sli4_hba.cq_ecount);
6039 if (!qdesc) {
6040 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6041 "0501 Failed allocate slow-path ELS CQ\n");
6042 goto out_free_mbx_cq;
6044 phba->sli4_hba.els_cq = qdesc;
6047 /* Create fast-path FCP Completion Queue(s), one-to-one with EQs */
6048 phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) *
6049 phba->cfg_fcp_eq_count), GFP_KERNEL);
6050 if (!phba->sli4_hba.fcp_cq) {
6051 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6052 "2577 Failed allocate memory for fast-path "
6053 "CQ record array\n");
6054 goto out_free_els_cq;
6056 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
6057 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
6058 phba->sli4_hba.cq_ecount);
6059 if (!qdesc) {
6060 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6061 "0499 Failed allocate fast-path FCP "
6062 "CQ (%d)\n", fcp_cqidx);
6063 goto out_free_fcp_cq;
6065 phba->sli4_hba.fcp_cq[fcp_cqidx] = qdesc;
6068 /* Create Mailbox Command Queue */
6069 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
6070 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
6072 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize,
6073 phba->sli4_hba.mq_ecount);
6074 if (!qdesc) {
6075 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6076 "0505 Failed allocate slow-path MQ\n");
6077 goto out_free_fcp_cq;
6079 phba->sli4_hba.mbx_wq = qdesc;
6082 * Create all the Work Queues (WQs)
6084 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
6085 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
6087 /* Create slow-path ELS Work Queue */
6088 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
6089 phba->sli4_hba.wq_ecount);
6090 if (!qdesc) {
6091 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6092 "0504 Failed allocate slow-path ELS WQ\n");
6093 goto out_free_mbx_wq;
6095 phba->sli4_hba.els_wq = qdesc;
6097 /* Create fast-path FCP Work Queue(s) */
6098 phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) *
6099 phba->cfg_fcp_wq_count), GFP_KERNEL);
6100 if (!phba->sli4_hba.fcp_wq) {
6101 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6102 "2578 Failed allocate memory for fast-path "
6103 "WQ record array\n");
6104 goto out_free_els_wq;
6106 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
6107 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
6108 phba->sli4_hba.wq_ecount);
6109 if (!qdesc) {
6110 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6111 "0503 Failed allocate fast-path FCP "
6112 "WQ (%d)\n", fcp_wqidx);
6113 goto out_free_fcp_wq;
6115 phba->sli4_hba.fcp_wq[fcp_wqidx] = qdesc;
6119 * Create Receive Queue (RQ)
6121 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
6122 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
6124 /* Create Receive Queue for header */
6125 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
6126 phba->sli4_hba.rq_ecount);
6127 if (!qdesc) {
6128 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6129 "0506 Failed allocate receive HRQ\n");
6130 goto out_free_fcp_wq;
6132 phba->sli4_hba.hdr_rq = qdesc;
6134 /* Create Receive Queue for data */
6135 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
6136 phba->sli4_hba.rq_ecount);
6137 if (!qdesc) {
6138 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6139 "0507 Failed allocate receive DRQ\n");
6140 goto out_free_hdr_rq;
6142 phba->sli4_hba.dat_rq = qdesc;
6144 return 0;
6146 out_free_hdr_rq:
6147 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
6148 phba->sli4_hba.hdr_rq = NULL;
6149 out_free_fcp_wq:
6150 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) {
6151 lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_wqidx]);
6152 phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL;
6154 kfree(phba->sli4_hba.fcp_wq);
6155 out_free_els_wq:
6156 lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
6157 phba->sli4_hba.els_wq = NULL;
6158 out_free_mbx_wq:
6159 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
6160 phba->sli4_hba.mbx_wq = NULL;
6161 out_free_fcp_cq:
6162 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) {
6163 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_cqidx]);
6164 phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL;
6166 kfree(phba->sli4_hba.fcp_cq);
6167 out_free_els_cq:
6168 lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
6169 phba->sli4_hba.els_cq = NULL;
6170 out_free_mbx_cq:
6171 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
6172 phba->sli4_hba.mbx_cq = NULL;
6173 out_free_fp_eq:
6174 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) {
6175 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_eqidx]);
6176 phba->sli4_hba.fp_eq[fcp_eqidx] = NULL;
6178 kfree(phba->sli4_hba.fp_eq);
6179 out_free_sp_eq:
6180 lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
6181 phba->sli4_hba.sp_eq = NULL;
6182 out_error:
6183 return -ENOMEM;
6187 * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues
6188 * @phba: pointer to lpfc hba data structure.
6190 * This routine is invoked to release all the SLI4 queues with the FCoE HBA
6191 * operation.
6193 * Return codes
6194 * 0 - successful
6195 * -ENOMEM - No available memory
6196 * -EIO - The mailbox failed to complete successfully.
6198 static void
6199 lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
6201 int fcp_qidx;
6203 /* Release mailbox command work queue */
6204 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
6205 phba->sli4_hba.mbx_wq = NULL;
6207 /* Release ELS work queue */
6208 lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
6209 phba->sli4_hba.els_wq = NULL;
6211 /* Release FCP work queue */
6212 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++)
6213 lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]);
6214 kfree(phba->sli4_hba.fcp_wq);
6215 phba->sli4_hba.fcp_wq = NULL;
6217 /* Release unsolicited receive queue */
6218 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
6219 phba->sli4_hba.hdr_rq = NULL;
6220 lpfc_sli4_queue_free(phba->sli4_hba.dat_rq);
6221 phba->sli4_hba.dat_rq = NULL;
6223 /* Release ELS complete queue */
6224 lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
6225 phba->sli4_hba.els_cq = NULL;
6227 /* Release mailbox command complete queue */
6228 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
6229 phba->sli4_hba.mbx_cq = NULL;
6231 /* Release FCP response complete queue */
6232 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
6233 lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]);
6234 kfree(phba->sli4_hba.fcp_cq);
6235 phba->sli4_hba.fcp_cq = NULL;
6237 /* Release fast-path event queue */
6238 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
6239 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]);
6240 kfree(phba->sli4_hba.fp_eq);
6241 phba->sli4_hba.fp_eq = NULL;
6243 /* Release slow-path event queue */
6244 lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
6245 phba->sli4_hba.sp_eq = NULL;
6247 return;
6251 * lpfc_sli4_queue_setup - Set up all the SLI4 queues
6252 * @phba: pointer to lpfc hba data structure.
6254 * This routine is invoked to set up all the SLI4 queues for the FCoE HBA
6255 * operation.
6257 * Return codes
6258 * 0 - successful
6259 * -ENOMEM - No available memory
6260 * -EIO - The mailbox failed to complete successfully.
6263 lpfc_sli4_queue_setup(struct lpfc_hba *phba)
6265 int rc = -ENOMEM;
6266 int fcp_eqidx, fcp_cqidx, fcp_wqidx;
6267 int fcp_cq_index = 0;
6270 * Set up Event Queues (EQs)
6273 /* Set up slow-path event queue */
6274 if (!phba->sli4_hba.sp_eq) {
6275 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6276 "0520 Slow-path EQ not allocated\n");
6277 goto out_error;
6279 rc = lpfc_eq_create(phba, phba->sli4_hba.sp_eq,
6280 LPFC_SP_DEF_IMAX);
6281 if (rc) {
6282 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6283 "0521 Failed setup of slow-path EQ: "
6284 "rc = 0x%x\n", rc);
6285 goto out_error;
6287 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6288 "2583 Slow-path EQ setup: queue-id=%d\n",
6289 phba->sli4_hba.sp_eq->queue_id);
6291 /* Set up fast-path event queue */
6292 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
6293 if (!phba->sli4_hba.fp_eq[fcp_eqidx]) {
6294 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6295 "0522 Fast-path EQ (%d) not "
6296 "allocated\n", fcp_eqidx);
6297 goto out_destroy_fp_eq;
6299 rc = lpfc_eq_create(phba, phba->sli4_hba.fp_eq[fcp_eqidx],
6300 phba->cfg_fcp_imax);
6301 if (rc) {
6302 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6303 "0523 Failed setup of fast-path EQ "
6304 "(%d), rc = 0x%x\n", fcp_eqidx, rc);
6305 goto out_destroy_fp_eq;
6307 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6308 "2584 Fast-path EQ setup: "
6309 "queue[%d]-id=%d\n", fcp_eqidx,
6310 phba->sli4_hba.fp_eq[fcp_eqidx]->queue_id);
6314 * Set up Complete Queues (CQs)
6317 /* Set up slow-path MBOX Complete Queue as the first CQ */
6318 if (!phba->sli4_hba.mbx_cq) {
6319 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6320 "0528 Mailbox CQ not allocated\n");
6321 goto out_destroy_fp_eq;
6323 rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, phba->sli4_hba.sp_eq,
6324 LPFC_MCQ, LPFC_MBOX);
6325 if (rc) {
6326 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6327 "0529 Failed setup of slow-path mailbox CQ: "
6328 "rc = 0x%x\n", rc);
6329 goto out_destroy_fp_eq;
6331 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6332 "2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n",
6333 phba->sli4_hba.mbx_cq->queue_id,
6334 phba->sli4_hba.sp_eq->queue_id);
6336 /* Set up slow-path ELS Complete Queue */
6337 if (!phba->sli4_hba.els_cq) {
6338 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6339 "0530 ELS CQ not allocated\n");
6340 goto out_destroy_mbx_cq;
6342 rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, phba->sli4_hba.sp_eq,
6343 LPFC_WCQ, LPFC_ELS);
6344 if (rc) {
6345 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6346 "0531 Failed setup of slow-path ELS CQ: "
6347 "rc = 0x%x\n", rc);
6348 goto out_destroy_mbx_cq;
6350 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6351 "2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n",
6352 phba->sli4_hba.els_cq->queue_id,
6353 phba->sli4_hba.sp_eq->queue_id);
6355 /* Set up fast-path FCP Response Complete Queue */
6356 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
6357 if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
6358 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6359 "0526 Fast-path FCP CQ (%d) not "
6360 "allocated\n", fcp_cqidx);
6361 goto out_destroy_fcp_cq;
6363 rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx],
6364 phba->sli4_hba.fp_eq[fcp_cqidx],
6365 LPFC_WCQ, LPFC_FCP);
6366 if (rc) {
6367 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6368 "0527 Failed setup of fast-path FCP "
6369 "CQ (%d), rc = 0x%x\n", fcp_cqidx, rc);
6370 goto out_destroy_fcp_cq;
6372 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6373 "2588 FCP CQ setup: cq[%d]-id=%d, "
6374 "parent eq[%d]-id=%d\n",
6375 fcp_cqidx,
6376 phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id,
6377 fcp_cqidx,
6378 phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id);
6382 * Set up all the Work Queues (WQs)
6385 /* Set up Mailbox Command Queue */
6386 if (!phba->sli4_hba.mbx_wq) {
6387 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6388 "0538 Slow-path MQ not allocated\n");
6389 goto out_destroy_fcp_cq;
6391 rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq,
6392 phba->sli4_hba.mbx_cq, LPFC_MBOX);
6393 if (rc) {
6394 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6395 "0539 Failed setup of slow-path MQ: "
6396 "rc = 0x%x\n", rc);
6397 goto out_destroy_fcp_cq;
6399 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6400 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
6401 phba->sli4_hba.mbx_wq->queue_id,
6402 phba->sli4_hba.mbx_cq->queue_id);
6404 /* Set up slow-path ELS Work Queue */
6405 if (!phba->sli4_hba.els_wq) {
6406 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6407 "0536 Slow-path ELS WQ not allocated\n");
6408 goto out_destroy_mbx_wq;
6410 rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq,
6411 phba->sli4_hba.els_cq, LPFC_ELS);
6412 if (rc) {
6413 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6414 "0537 Failed setup of slow-path ELS WQ: "
6415 "rc = 0x%x\n", rc);
6416 goto out_destroy_mbx_wq;
6418 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6419 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
6420 phba->sli4_hba.els_wq->queue_id,
6421 phba->sli4_hba.els_cq->queue_id);
6423 /* Set up fast-path FCP Work Queue */
6424 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
6425 if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) {
6426 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6427 "0534 Fast-path FCP WQ (%d) not "
6428 "allocated\n", fcp_wqidx);
6429 goto out_destroy_fcp_wq;
6431 rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx],
6432 phba->sli4_hba.fcp_cq[fcp_cq_index],
6433 LPFC_FCP);
6434 if (rc) {
6435 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6436 "0535 Failed setup of fast-path FCP "
6437 "WQ (%d), rc = 0x%x\n", fcp_wqidx, rc);
6438 goto out_destroy_fcp_wq;
6440 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6441 "2591 FCP WQ setup: wq[%d]-id=%d, "
6442 "parent cq[%d]-id=%d\n",
6443 fcp_wqidx,
6444 phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id,
6445 fcp_cq_index,
6446 phba->sli4_hba.fcp_cq[fcp_cq_index]->queue_id);
6447 /* Round robin FCP Work Queue's Completion Queue assignment */
6448 fcp_cq_index = ((fcp_cq_index + 1) % phba->cfg_fcp_eq_count);
6452 * Create Receive Queue (RQ)
6454 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
6455 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6456 "0540 Receive Queue not allocated\n");
6457 goto out_destroy_fcp_wq;
6459 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
6460 phba->sli4_hba.els_cq, LPFC_USOL);
6461 if (rc) {
6462 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6463 "0541 Failed setup of Receive Queue: "
6464 "rc = 0x%x\n", rc);
6465 goto out_destroy_fcp_wq;
6467 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6468 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
6469 "parent cq-id=%d\n",
6470 phba->sli4_hba.hdr_rq->queue_id,
6471 phba->sli4_hba.dat_rq->queue_id,
6472 phba->sli4_hba.els_cq->queue_id);
6473 return 0;
6475 out_destroy_fcp_wq:
6476 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--)
6477 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]);
6478 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
6479 out_destroy_mbx_wq:
6480 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
6481 out_destroy_fcp_cq:
6482 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--)
6483 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]);
6484 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
6485 out_destroy_mbx_cq:
6486 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
6487 out_destroy_fp_eq:
6488 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--)
6489 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]);
6490 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
6491 out_error:
6492 return rc;
6496 * lpfc_sli4_queue_unset - Unset all the SLI4 queues
6497 * @phba: pointer to lpfc hba data structure.
6499 * This routine is invoked to unset all the SLI4 queues with the FCoE HBA
6500 * operation.
6502 * Return codes
6503 * 0 - successful
6504 * -ENOMEM - No available memory
6505 * -EIO - The mailbox failed to complete successfully.
6507 void
6508 lpfc_sli4_queue_unset(struct lpfc_hba *phba)
6510 int fcp_qidx;
6512 /* Unset mailbox command work queue */
6513 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
6514 /* Unset ELS work queue */
6515 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
6516 /* Unset unsolicited receive queue */
6517 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq);
6518 /* Unset FCP work queue */
6519 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++)
6520 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]);
6521 /* Unset mailbox command complete queue */
6522 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
6523 /* Unset ELS complete queue */
6524 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
6525 /* Unset FCP response complete queue */
6526 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
6527 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
6528 /* Unset fast-path event queue */
6529 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
6530 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]);
6531 /* Unset slow-path event queue */
6532 lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
6536 * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool
6537 * @phba: pointer to lpfc hba data structure.
6539 * This routine is invoked to allocate and set up a pool of completion queue
6540 * events. The body of the completion queue event is a completion queue entry
6541 * CQE. For now, this pool is used for the interrupt service routine to queue
6542 * the following HBA completion queue events for the worker thread to process:
6543 * - Mailbox asynchronous events
6544 * - Receive queue completion unsolicited events
6545 * Later, this can be used for all the slow-path events.
6547 * Return codes
6548 * 0 - successful
6549 * -ENOMEM - No available memory
6551 static int
6552 lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
6554 struct lpfc_cq_event *cq_event;
6555 int i;
6557 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) {
6558 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL);
6559 if (!cq_event)
6560 goto out_pool_create_fail;
6561 list_add_tail(&cq_event->list,
6562 &phba->sli4_hba.sp_cqe_event_pool);
6564 return 0;
6566 out_pool_create_fail:
6567 lpfc_sli4_cq_event_pool_destroy(phba);
6568 return -ENOMEM;
6572 * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool
6573 * @phba: pointer to lpfc hba data structure.
6575 * This routine is invoked to free the pool of completion queue events at
6576 * driver unload time. Note that, it is the responsibility of the driver
6577 * cleanup routine to free all the outstanding completion-queue events
6578 * allocated from this pool back into the pool before invoking this routine
6579 * to destroy the pool.
6581 static void
6582 lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba)
6584 struct lpfc_cq_event *cq_event, *next_cq_event;
6586 list_for_each_entry_safe(cq_event, next_cq_event,
6587 &phba->sli4_hba.sp_cqe_event_pool, list) {
6588 list_del(&cq_event->list);
6589 kfree(cq_event);
6594 * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
6595 * @phba: pointer to lpfc hba data structure.
6597 * This routine is the lock free version of the API invoked to allocate a
6598 * completion-queue event from the free pool.
6600 * Return: Pointer to the newly allocated completion-queue event if successful
6601 * NULL otherwise.
6603 struct lpfc_cq_event *
6604 __lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
6606 struct lpfc_cq_event *cq_event = NULL;
6608 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event,
6609 struct lpfc_cq_event, list);
6610 return cq_event;
6614 * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
6615 * @phba: pointer to lpfc hba data structure.
6617 * This routine is the lock version of the API invoked to allocate a
6618 * completion-queue event from the free pool.
6620 * Return: Pointer to the newly allocated completion-queue event if successful
6621 * NULL otherwise.
6623 struct lpfc_cq_event *
6624 lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
6626 struct lpfc_cq_event *cq_event;
6627 unsigned long iflags;
6629 spin_lock_irqsave(&phba->hbalock, iflags);
6630 cq_event = __lpfc_sli4_cq_event_alloc(phba);
6631 spin_unlock_irqrestore(&phba->hbalock, iflags);
6632 return cq_event;
6636 * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
6637 * @phba: pointer to lpfc hba data structure.
6638 * @cq_event: pointer to the completion queue event to be freed.
6640 * This routine is the lock free version of the API invoked to release a
6641 * completion-queue event back into the free pool.
6643 void
6644 __lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
6645 struct lpfc_cq_event *cq_event)
6647 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool);
6651 * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
6652 * @phba: pointer to lpfc hba data structure.
6653 * @cq_event: pointer to the completion queue event to be freed.
6655 * This routine is the lock version of the API invoked to release a
6656 * completion-queue event back into the free pool.
6658 void
6659 lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
6660 struct lpfc_cq_event *cq_event)
6662 unsigned long iflags;
6663 spin_lock_irqsave(&phba->hbalock, iflags);
6664 __lpfc_sli4_cq_event_release(phba, cq_event);
6665 spin_unlock_irqrestore(&phba->hbalock, iflags);
6669 * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool
6670 * @phba: pointer to lpfc hba data structure.
6672 * This routine is to free all the pending completion-queue events to the
6673 * back into the free pool for device reset.
6675 static void
6676 lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
6678 LIST_HEAD(cqelist);
6679 struct lpfc_cq_event *cqe;
6680 unsigned long iflags;
6682 /* Retrieve all the pending WCQEs from pending WCQE lists */
6683 spin_lock_irqsave(&phba->hbalock, iflags);
6684 /* Pending FCP XRI abort events */
6685 list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
6686 &cqelist);
6687 /* Pending ELS XRI abort events */
6688 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
6689 &cqelist);
6690 /* Pending asynnc events */
6691 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
6692 &cqelist);
6693 spin_unlock_irqrestore(&phba->hbalock, iflags);
6695 while (!list_empty(&cqelist)) {
6696 list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list);
6697 lpfc_sli4_cq_event_release(phba, cqe);
6702 * lpfc_pci_function_reset - Reset pci function.
6703 * @phba: pointer to lpfc hba data structure.
6705 * This routine is invoked to request a PCI function reset. It will destroys
6706 * all resources assigned to the PCI function which originates this request.
6708 * Return codes
6709 * 0 - successful
6710 * -ENOMEM - No available memory
6711 * -EIO - The mailbox failed to complete successfully.
6714 lpfc_pci_function_reset(struct lpfc_hba *phba)
6716 LPFC_MBOXQ_t *mboxq;
6717 uint32_t rc = 0, if_type;
6718 uint32_t shdr_status, shdr_add_status;
6719 uint32_t rdy_chk, num_resets = 0, reset_again = 0;
6720 union lpfc_sli4_cfg_shdr *shdr;
6721 struct lpfc_register reg_data;
6723 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
6724 switch (if_type) {
6725 case LPFC_SLI_INTF_IF_TYPE_0:
6726 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
6727 GFP_KERNEL);
6728 if (!mboxq) {
6729 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6730 "0494 Unable to allocate memory for "
6731 "issuing SLI_FUNCTION_RESET mailbox "
6732 "command\n");
6733 return -ENOMEM;
6736 /* Setup PCI function reset mailbox-ioctl command */
6737 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
6738 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
6739 LPFC_SLI4_MBX_EMBED);
6740 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6741 shdr = (union lpfc_sli4_cfg_shdr *)
6742 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
6743 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6744 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
6745 &shdr->response);
6746 if (rc != MBX_TIMEOUT)
6747 mempool_free(mboxq, phba->mbox_mem_pool);
6748 if (shdr_status || shdr_add_status || rc) {
6749 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6750 "0495 SLI_FUNCTION_RESET mailbox "
6751 "failed with status x%x add_status x%x,"
6752 " mbx status x%x\n",
6753 shdr_status, shdr_add_status, rc);
6754 rc = -ENXIO;
6756 break;
6757 case LPFC_SLI_INTF_IF_TYPE_2:
6758 for (num_resets = 0;
6759 num_resets < MAX_IF_TYPE_2_RESETS;
6760 num_resets++) {
6761 reg_data.word0 = 0;
6762 bf_set(lpfc_sliport_ctrl_end, &reg_data,
6763 LPFC_SLIPORT_LITTLE_ENDIAN);
6764 bf_set(lpfc_sliport_ctrl_ip, &reg_data,
6765 LPFC_SLIPORT_INIT_PORT);
6766 writel(reg_data.word0, phba->sli4_hba.u.if_type2.
6767 CTRLregaddr);
6770 * Poll the Port Status Register and wait for RDY for
6771 * up to 10 seconds. If the port doesn't respond, treat
6772 * it as an error. If the port responds with RN, start
6773 * the loop again.
6775 for (rdy_chk = 0; rdy_chk < 1000; rdy_chk++) {
6776 if (lpfc_readl(phba->sli4_hba.u.if_type2.
6777 STATUSregaddr, &reg_data.word0)) {
6778 rc = -ENODEV;
6779 break;
6781 if (bf_get(lpfc_sliport_status_rdy, &reg_data))
6782 break;
6783 if (bf_get(lpfc_sliport_status_rn, &reg_data)) {
6784 reset_again++;
6785 break;
6787 msleep(10);
6791 * If the port responds to the init request with
6792 * reset needed, delay for a bit and restart the loop.
6794 if (reset_again) {
6795 msleep(10);
6796 reset_again = 0;
6797 continue;
6800 /* Detect any port errors. */
6801 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
6802 &reg_data.word0)) {
6803 rc = -ENODEV;
6804 break;
6806 if ((bf_get(lpfc_sliport_status_err, &reg_data)) ||
6807 (rdy_chk >= 1000)) {
6808 phba->work_status[0] = readl(
6809 phba->sli4_hba.u.if_type2.ERR1regaddr);
6810 phba->work_status[1] = readl(
6811 phba->sli4_hba.u.if_type2.ERR2regaddr);
6812 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6813 "2890 Port Error Detected "
6814 "during Port Reset: "
6815 "port status reg 0x%x, "
6816 "error 1=0x%x, error 2=0x%x\n",
6817 reg_data.word0,
6818 phba->work_status[0],
6819 phba->work_status[1]);
6820 rc = -ENODEV;
6824 * Terminate the outer loop provided the Port indicated
6825 * ready within 10 seconds.
6827 if (rdy_chk < 1000)
6828 break;
6830 break;
6831 case LPFC_SLI_INTF_IF_TYPE_1:
6832 default:
6833 break;
6836 /* Catch the not-ready port failure after a port reset. */
6837 if (num_resets >= MAX_IF_TYPE_2_RESETS)
6838 rc = -ENODEV;
6840 return rc;
6844 * lpfc_sli4_send_nop_mbox_cmds - Send sli-4 nop mailbox commands
6845 * @phba: pointer to lpfc hba data structure.
6846 * @cnt: number of nop mailbox commands to send.
6848 * This routine is invoked to send a number @cnt of NOP mailbox command and
6849 * wait for each command to complete.
6851 * Return: the number of NOP mailbox command completed.
6853 static int
6854 lpfc_sli4_send_nop_mbox_cmds(struct lpfc_hba *phba, uint32_t cnt)
6856 LPFC_MBOXQ_t *mboxq;
6857 int length, cmdsent;
6858 uint32_t mbox_tmo;
6859 uint32_t rc = 0;
6860 uint32_t shdr_status, shdr_add_status;
6861 union lpfc_sli4_cfg_shdr *shdr;
6863 if (cnt == 0) {
6864 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6865 "2518 Requested to send 0 NOP mailbox cmd\n");
6866 return cnt;
6869 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6870 if (!mboxq) {
6871 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6872 "2519 Unable to allocate memory for issuing "
6873 "NOP mailbox command\n");
6874 return 0;
6877 /* Set up NOP SLI4_CONFIG mailbox-ioctl command */
6878 length = (sizeof(struct lpfc_mbx_nop) -
6879 sizeof(struct lpfc_sli4_cfg_mhdr));
6880 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
6881 LPFC_MBOX_OPCODE_NOP, length, LPFC_SLI4_MBX_EMBED);
6883 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
6884 for (cmdsent = 0; cmdsent < cnt; cmdsent++) {
6885 if (!phba->sli4_hba.intr_enable)
6886 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6887 else
6888 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
6889 if (rc == MBX_TIMEOUT)
6890 break;
6891 /* Check return status */
6892 shdr = (union lpfc_sli4_cfg_shdr *)
6893 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
6894 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6895 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
6896 &shdr->response);
6897 if (shdr_status || shdr_add_status || rc) {
6898 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6899 "2520 NOP mailbox command failed "
6900 "status x%x add_status x%x mbx "
6901 "status x%x\n", shdr_status,
6902 shdr_add_status, rc);
6903 break;
6907 if (rc != MBX_TIMEOUT)
6908 mempool_free(mboxq, phba->mbox_mem_pool);
6910 return cmdsent;
6914 * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
6915 * @phba: pointer to lpfc hba data structure.
6917 * This routine is invoked to set up the PCI device memory space for device
6918 * with SLI-4 interface spec.
6920 * Return codes
6921 * 0 - successful
6922 * other values - error
6924 static int
6925 lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
6927 struct pci_dev *pdev;
6928 unsigned long bar0map_len, bar1map_len, bar2map_len;
6929 int error = -ENODEV;
6930 uint32_t if_type;
6932 /* Obtain PCI device reference */
6933 if (!phba->pcidev)
6934 return error;
6935 else
6936 pdev = phba->pcidev;
6938 /* Set the device DMA mask size */
6939 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
6940 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
6941 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
6942 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
6943 return error;
6948 * The BARs and register set definitions and offset locations are
6949 * dependent on the if_type.
6951 if (pci_read_config_dword(pdev, LPFC_SLI_INTF,
6952 &phba->sli4_hba.sli_intf.word0)) {
6953 return error;
6956 /* There is no SLI3 failback for SLI4 devices. */
6957 if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) !=
6958 LPFC_SLI_INTF_VALID) {
6959 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6960 "2894 SLI_INTF reg contents invalid "
6961 "sli_intf reg 0x%x\n",
6962 phba->sli4_hba.sli_intf.word0);
6963 return error;
6966 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
6968 * Get the bus address of SLI4 device Bar regions and the
6969 * number of bytes required by each mapping. The mapping of the
6970 * particular PCI BARs regions is dependent on the type of
6971 * SLI4 device.
6973 if (pci_resource_start(pdev, 0)) {
6974 phba->pci_bar0_map = pci_resource_start(pdev, 0);
6975 bar0map_len = pci_resource_len(pdev, 0);
6978 * Map SLI4 PCI Config Space Register base to a kernel virtual
6979 * addr
6981 phba->sli4_hba.conf_regs_memmap_p =
6982 ioremap(phba->pci_bar0_map, bar0map_len);
6983 if (!phba->sli4_hba.conf_regs_memmap_p) {
6984 dev_printk(KERN_ERR, &pdev->dev,
6985 "ioremap failed for SLI4 PCI config "
6986 "registers.\n");
6987 goto out;
6989 /* Set up BAR0 PCI config space register memory map */
6990 lpfc_sli4_bar0_register_memmap(phba, if_type);
6991 } else {
6992 phba->pci_bar0_map = pci_resource_start(pdev, 1);
6993 bar0map_len = pci_resource_len(pdev, 1);
6994 if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
6995 dev_printk(KERN_ERR, &pdev->dev,
6996 "FATAL - No BAR0 mapping for SLI4, if_type 2\n");
6997 goto out;
6999 phba->sli4_hba.conf_regs_memmap_p =
7000 ioremap(phba->pci_bar0_map, bar0map_len);
7001 if (!phba->sli4_hba.conf_regs_memmap_p) {
7002 dev_printk(KERN_ERR, &pdev->dev,
7003 "ioremap failed for SLI4 PCI config "
7004 "registers.\n");
7005 goto out;
7007 lpfc_sli4_bar0_register_memmap(phba, if_type);
7010 if (pci_resource_start(pdev, 2)) {
7012 * Map SLI4 if type 0 HBA Control Register base to a kernel
7013 * virtual address and setup the registers.
7015 phba->pci_bar1_map = pci_resource_start(pdev, 2);
7016 bar1map_len = pci_resource_len(pdev, 2);
7017 phba->sli4_hba.ctrl_regs_memmap_p =
7018 ioremap(phba->pci_bar1_map, bar1map_len);
7019 if (!phba->sli4_hba.ctrl_regs_memmap_p) {
7020 dev_printk(KERN_ERR, &pdev->dev,
7021 "ioremap failed for SLI4 HBA control registers.\n");
7022 goto out_iounmap_conf;
7024 lpfc_sli4_bar1_register_memmap(phba);
7027 if (pci_resource_start(pdev, 4)) {
7029 * Map SLI4 if type 0 HBA Doorbell Register base to a kernel
7030 * virtual address and setup the registers.
7032 phba->pci_bar2_map = pci_resource_start(pdev, 4);
7033 bar2map_len = pci_resource_len(pdev, 4);
7034 phba->sli4_hba.drbl_regs_memmap_p =
7035 ioremap(phba->pci_bar2_map, bar2map_len);
7036 if (!phba->sli4_hba.drbl_regs_memmap_p) {
7037 dev_printk(KERN_ERR, &pdev->dev,
7038 "ioremap failed for SLI4 HBA doorbell registers.\n");
7039 goto out_iounmap_ctrl;
7041 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
7042 if (error)
7043 goto out_iounmap_all;
7046 return 0;
7048 out_iounmap_all:
7049 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
7050 out_iounmap_ctrl:
7051 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
7052 out_iounmap_conf:
7053 iounmap(phba->sli4_hba.conf_regs_memmap_p);
7054 out:
7055 return error;
7059 * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space.
7060 * @phba: pointer to lpfc hba data structure.
7062 * This routine is invoked to unset the PCI device memory space for device
7063 * with SLI-4 interface spec.
7065 static void
7066 lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
7068 struct pci_dev *pdev;
7070 /* Obtain PCI device reference */
7071 if (!phba->pcidev)
7072 return;
7073 else
7074 pdev = phba->pcidev;
7076 /* Free coherent DMA memory allocated */
7078 /* Unmap I/O memory space */
7079 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
7080 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
7081 iounmap(phba->sli4_hba.conf_regs_memmap_p);
7083 return;
7087 * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device
7088 * @phba: pointer to lpfc hba data structure.
7090 * This routine is invoked to enable the MSI-X interrupt vectors to device
7091 * with SLI-3 interface specs. The kernel function pci_enable_msix() is
7092 * called to enable the MSI-X vectors. Note that pci_enable_msix(), once
7093 * invoked, enables either all or nothing, depending on the current
7094 * availability of PCI vector resources. The device driver is responsible
7095 * for calling the individual request_irq() to register each MSI-X vector
7096 * with a interrupt handler, which is done in this function. Note that
7097 * later when device is unloading, the driver should always call free_irq()
7098 * on all MSI-X vectors it has done request_irq() on before calling
7099 * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device
7100 * will be left with MSI-X enabled and leaks its vectors.
7102 * Return codes
7103 * 0 - successful
7104 * other values - error
7106 static int
7107 lpfc_sli_enable_msix(struct lpfc_hba *phba)
7109 int rc, i;
7110 LPFC_MBOXQ_t *pmb;
7112 /* Set up MSI-X multi-message vectors */
7113 for (i = 0; i < LPFC_MSIX_VECTORS; i++)
7114 phba->msix_entries[i].entry = i;
7116 /* Configure MSI-X capability structure */
7117 rc = pci_enable_msix(phba->pcidev, phba->msix_entries,
7118 ARRAY_SIZE(phba->msix_entries));
7119 if (rc) {
7120 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7121 "0420 PCI enable MSI-X failed (%d)\n", rc);
7122 goto msi_fail_out;
7124 for (i = 0; i < LPFC_MSIX_VECTORS; i++)
7125 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7126 "0477 MSI-X entry[%d]: vector=x%x "
7127 "message=%d\n", i,
7128 phba->msix_entries[i].vector,
7129 phba->msix_entries[i].entry);
7131 * Assign MSI-X vectors to interrupt handlers
7134 /* vector-0 is associated to slow-path handler */
7135 rc = request_irq(phba->msix_entries[0].vector,
7136 &lpfc_sli_sp_intr_handler, IRQF_SHARED,
7137 LPFC_SP_DRIVER_HANDLER_NAME, phba);
7138 if (rc) {
7139 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7140 "0421 MSI-X slow-path request_irq failed "
7141 "(%d)\n", rc);
7142 goto msi_fail_out;
7145 /* vector-1 is associated to fast-path handler */
7146 rc = request_irq(phba->msix_entries[1].vector,
7147 &lpfc_sli_fp_intr_handler, IRQF_SHARED,
7148 LPFC_FP_DRIVER_HANDLER_NAME, phba);
7150 if (rc) {
7151 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7152 "0429 MSI-X fast-path request_irq failed "
7153 "(%d)\n", rc);
7154 goto irq_fail_out;
7158 * Configure HBA MSI-X attention conditions to messages
7160 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7162 if (!pmb) {
7163 rc = -ENOMEM;
7164 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7165 "0474 Unable to allocate memory for issuing "
7166 "MBOX_CONFIG_MSI command\n");
7167 goto mem_fail_out;
7169 rc = lpfc_config_msi(phba, pmb);
7170 if (rc)
7171 goto mbx_fail_out;
7172 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
7173 if (rc != MBX_SUCCESS) {
7174 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
7175 "0351 Config MSI mailbox command failed, "
7176 "mbxCmd x%x, mbxStatus x%x\n",
7177 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
7178 goto mbx_fail_out;
7181 /* Free memory allocated for mailbox command */
7182 mempool_free(pmb, phba->mbox_mem_pool);
7183 return rc;
7185 mbx_fail_out:
7186 /* Free memory allocated for mailbox command */
7187 mempool_free(pmb, phba->mbox_mem_pool);
7189 mem_fail_out:
7190 /* free the irq already requested */
7191 free_irq(phba->msix_entries[1].vector, phba);
7193 irq_fail_out:
7194 /* free the irq already requested */
7195 free_irq(phba->msix_entries[0].vector, phba);
7197 msi_fail_out:
7198 /* Unconfigure MSI-X capability structure */
7199 pci_disable_msix(phba->pcidev);
7200 return rc;
7204 * lpfc_sli_disable_msix - Disable MSI-X interrupt mode on SLI-3 device.
7205 * @phba: pointer to lpfc hba data structure.
7207 * This routine is invoked to release the MSI-X vectors and then disable the
7208 * MSI-X interrupt mode to device with SLI-3 interface spec.
7210 static void
7211 lpfc_sli_disable_msix(struct lpfc_hba *phba)
7213 int i;
7215 /* Free up MSI-X multi-message vectors */
7216 for (i = 0; i < LPFC_MSIX_VECTORS; i++)
7217 free_irq(phba->msix_entries[i].vector, phba);
7218 /* Disable MSI-X */
7219 pci_disable_msix(phba->pcidev);
7221 return;
7225 * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device.
7226 * @phba: pointer to lpfc hba data structure.
7228 * This routine is invoked to enable the MSI interrupt mode to device with
7229 * SLI-3 interface spec. The kernel function pci_enable_msi() is called to
7230 * enable the MSI vector. The device driver is responsible for calling the
7231 * request_irq() to register MSI vector with a interrupt the handler, which
7232 * is done in this function.
7234 * Return codes
7235 * 0 - successful
7236 * other values - error
7238 static int
7239 lpfc_sli_enable_msi(struct lpfc_hba *phba)
7241 int rc;
7243 rc = pci_enable_msi(phba->pcidev);
7244 if (!rc)
7245 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7246 "0462 PCI enable MSI mode success.\n");
7247 else {
7248 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7249 "0471 PCI enable MSI mode failed (%d)\n", rc);
7250 return rc;
7253 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
7254 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
7255 if (rc) {
7256 pci_disable_msi(phba->pcidev);
7257 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7258 "0478 MSI request_irq failed (%d)\n", rc);
7260 return rc;
7264 * lpfc_sli_disable_msi - Disable MSI interrupt mode to SLI-3 device.
7265 * @phba: pointer to lpfc hba data structure.
7267 * This routine is invoked to disable the MSI interrupt mode to device with
7268 * SLI-3 interface spec. The driver calls free_irq() on MSI vector it has
7269 * done request_irq() on before calling pci_disable_msi(). Failure to do so
7270 * results in a BUG_ON() and a device will be left with MSI enabled and leaks
7271 * its vector.
7273 static void
7274 lpfc_sli_disable_msi(struct lpfc_hba *phba)
7276 free_irq(phba->pcidev->irq, phba);
7277 pci_disable_msi(phba->pcidev);
7278 return;
7282 * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
7283 * @phba: pointer to lpfc hba data structure.
7285 * This routine is invoked to enable device interrupt and associate driver's
7286 * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface
7287 * spec. Depends on the interrupt mode configured to the driver, the driver
7288 * will try to fallback from the configured interrupt mode to an interrupt
7289 * mode which is supported by the platform, kernel, and device in the order
7290 * of:
7291 * MSI-X -> MSI -> IRQ.
7293 * Return codes
7294 * 0 - successful
7295 * other values - error
7297 static uint32_t
7298 lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
7300 uint32_t intr_mode = LPFC_INTR_ERROR;
7301 int retval;
7303 if (cfg_mode == 2) {
7304 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
7305 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
7306 if (!retval) {
7307 /* Now, try to enable MSI-X interrupt mode */
7308 retval = lpfc_sli_enable_msix(phba);
7309 if (!retval) {
7310 /* Indicate initialization to MSI-X mode */
7311 phba->intr_type = MSIX;
7312 intr_mode = 2;
7317 /* Fallback to MSI if MSI-X initialization failed */
7318 if (cfg_mode >= 1 && phba->intr_type == NONE) {
7319 retval = lpfc_sli_enable_msi(phba);
7320 if (!retval) {
7321 /* Indicate initialization to MSI mode */
7322 phba->intr_type = MSI;
7323 intr_mode = 1;
7327 /* Fallback to INTx if both MSI-X/MSI initalization failed */
7328 if (phba->intr_type == NONE) {
7329 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
7330 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
7331 if (!retval) {
7332 /* Indicate initialization to INTx mode */
7333 phba->intr_type = INTx;
7334 intr_mode = 0;
7337 return intr_mode;
7341 * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device.
7342 * @phba: pointer to lpfc hba data structure.
7344 * This routine is invoked to disable device interrupt and disassociate the
7345 * driver's interrupt handler(s) from interrupt vector(s) to device with
7346 * SLI-3 interface spec. Depending on the interrupt mode, the driver will
7347 * release the interrupt vector(s) for the message signaled interrupt.
7349 static void
7350 lpfc_sli_disable_intr(struct lpfc_hba *phba)
7352 /* Disable the currently initialized interrupt mode */
7353 if (phba->intr_type == MSIX)
7354 lpfc_sli_disable_msix(phba);
7355 else if (phba->intr_type == MSI)
7356 lpfc_sli_disable_msi(phba);
7357 else if (phba->intr_type == INTx)
7358 free_irq(phba->pcidev->irq, phba);
7360 /* Reset interrupt management states */
7361 phba->intr_type = NONE;
7362 phba->sli.slistat.sli_intr = 0;
7364 return;
7368 * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
7369 * @phba: pointer to lpfc hba data structure.
7371 * This routine is invoked to enable the MSI-X interrupt vectors to device
7372 * with SLI-4 interface spec. The kernel function pci_enable_msix() is called
7373 * to enable the MSI-X vectors. Note that pci_enable_msix(), once invoked,
7374 * enables either all or nothing, depending on the current availability of
7375 * PCI vector resources. The device driver is responsible for calling the
7376 * individual request_irq() to register each MSI-X vector with a interrupt
7377 * handler, which is done in this function. Note that later when device is
7378 * unloading, the driver should always call free_irq() on all MSI-X vectors
7379 * it has done request_irq() on before calling pci_disable_msix(). Failure
7380 * to do so results in a BUG_ON() and a device will be left with MSI-X
7381 * enabled and leaks its vectors.
7383 * Return codes
7384 * 0 - successful
7385 * other values - error
7387 static int
7388 lpfc_sli4_enable_msix(struct lpfc_hba *phba)
7390 int vectors, rc, index;
7392 /* Set up MSI-X multi-message vectors */
7393 for (index = 0; index < phba->sli4_hba.cfg_eqn; index++)
7394 phba->sli4_hba.msix_entries[index].entry = index;
7396 /* Configure MSI-X capability structure */
7397 vectors = phba->sli4_hba.cfg_eqn;
7398 enable_msix_vectors:
7399 rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries,
7400 vectors);
7401 if (rc > 1) {
7402 vectors = rc;
7403 goto enable_msix_vectors;
7404 } else if (rc) {
7405 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7406 "0484 PCI enable MSI-X failed (%d)\n", rc);
7407 goto msi_fail_out;
7410 /* Log MSI-X vector assignment */
7411 for (index = 0; index < vectors; index++)
7412 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7413 "0489 MSI-X entry[%d]: vector=x%x "
7414 "message=%d\n", index,
7415 phba->sli4_hba.msix_entries[index].vector,
7416 phba->sli4_hba.msix_entries[index].entry);
7418 * Assign MSI-X vectors to interrupt handlers
7421 /* The first vector must associated to slow-path handler for MQ */
7422 rc = request_irq(phba->sli4_hba.msix_entries[0].vector,
7423 &lpfc_sli4_sp_intr_handler, IRQF_SHARED,
7424 LPFC_SP_DRIVER_HANDLER_NAME, phba);
7425 if (rc) {
7426 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7427 "0485 MSI-X slow-path request_irq failed "
7428 "(%d)\n", rc);
7429 goto msi_fail_out;
7432 /* The rest of the vector(s) are associated to fast-path handler(s) */
7433 for (index = 1; index < vectors; index++) {
7434 phba->sli4_hba.fcp_eq_hdl[index - 1].idx = index - 1;
7435 phba->sli4_hba.fcp_eq_hdl[index - 1].phba = phba;
7436 rc = request_irq(phba->sli4_hba.msix_entries[index].vector,
7437 &lpfc_sli4_fp_intr_handler, IRQF_SHARED,
7438 LPFC_FP_DRIVER_HANDLER_NAME,
7439 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
7440 if (rc) {
7441 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7442 "0486 MSI-X fast-path (%d) "
7443 "request_irq failed (%d)\n", index, rc);
7444 goto cfg_fail_out;
7447 phba->sli4_hba.msix_vec_nr = vectors;
7449 return rc;
7451 cfg_fail_out:
7452 /* free the irq already requested */
7453 for (--index; index >= 1; index--)
7454 free_irq(phba->sli4_hba.msix_entries[index - 1].vector,
7455 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
7457 /* free the irq already requested */
7458 free_irq(phba->sli4_hba.msix_entries[0].vector, phba);
7460 msi_fail_out:
7461 /* Unconfigure MSI-X capability structure */
7462 pci_disable_msix(phba->pcidev);
7463 return rc;
7467 * lpfc_sli4_disable_msix - Disable MSI-X interrupt mode to SLI-4 device
7468 * @phba: pointer to lpfc hba data structure.
7470 * This routine is invoked to release the MSI-X vectors and then disable the
7471 * MSI-X interrupt mode to device with SLI-4 interface spec.
7473 static void
7474 lpfc_sli4_disable_msix(struct lpfc_hba *phba)
7476 int index;
7478 /* Free up MSI-X multi-message vectors */
7479 free_irq(phba->sli4_hba.msix_entries[0].vector, phba);
7481 for (index = 1; index < phba->sli4_hba.msix_vec_nr; index++)
7482 free_irq(phba->sli4_hba.msix_entries[index].vector,
7483 &phba->sli4_hba.fcp_eq_hdl[index - 1]);
7485 /* Disable MSI-X */
7486 pci_disable_msix(phba->pcidev);
7488 return;
7492 * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device
7493 * @phba: pointer to lpfc hba data structure.
7495 * This routine is invoked to enable the MSI interrupt mode to device with
7496 * SLI-4 interface spec. The kernel function pci_enable_msi() is called
7497 * to enable the MSI vector. The device driver is responsible for calling
7498 * the request_irq() to register MSI vector with a interrupt the handler,
7499 * which is done in this function.
7501 * Return codes
7502 * 0 - successful
7503 * other values - error
7505 static int
7506 lpfc_sli4_enable_msi(struct lpfc_hba *phba)
7508 int rc, index;
7510 rc = pci_enable_msi(phba->pcidev);
7511 if (!rc)
7512 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7513 "0487 PCI enable MSI mode success.\n");
7514 else {
7515 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7516 "0488 PCI enable MSI mode failed (%d)\n", rc);
7517 return rc;
7520 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
7521 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
7522 if (rc) {
7523 pci_disable_msi(phba->pcidev);
7524 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7525 "0490 MSI request_irq failed (%d)\n", rc);
7526 return rc;
7529 for (index = 0; index < phba->cfg_fcp_eq_count; index++) {
7530 phba->sli4_hba.fcp_eq_hdl[index].idx = index;
7531 phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
7534 return 0;
7538 * lpfc_sli4_disable_msi - Disable MSI interrupt mode to SLI-4 device
7539 * @phba: pointer to lpfc hba data structure.
7541 * This routine is invoked to disable the MSI interrupt mode to device with
7542 * SLI-4 interface spec. The driver calls free_irq() on MSI vector it has
7543 * done request_irq() on before calling pci_disable_msi(). Failure to do so
7544 * results in a BUG_ON() and a device will be left with MSI enabled and leaks
7545 * its vector.
7547 static void
7548 lpfc_sli4_disable_msi(struct lpfc_hba *phba)
7550 free_irq(phba->pcidev->irq, phba);
7551 pci_disable_msi(phba->pcidev);
7552 return;
7556 * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device
7557 * @phba: pointer to lpfc hba data structure.
7559 * This routine is invoked to enable device interrupt and associate driver's
7560 * interrupt handler(s) to interrupt vector(s) to device with SLI-4
7561 * interface spec. Depends on the interrupt mode configured to the driver,
7562 * the driver will try to fallback from the configured interrupt mode to an
7563 * interrupt mode which is supported by the platform, kernel, and device in
7564 * the order of:
7565 * MSI-X -> MSI -> IRQ.
7567 * Return codes
7568 * 0 - successful
7569 * other values - error
7571 static uint32_t
7572 lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
7574 uint32_t intr_mode = LPFC_INTR_ERROR;
7575 int retval, index;
7577 if (cfg_mode == 2) {
7578 /* Preparation before conf_msi mbox cmd */
7579 retval = 0;
7580 if (!retval) {
7581 /* Now, try to enable MSI-X interrupt mode */
7582 retval = lpfc_sli4_enable_msix(phba);
7583 if (!retval) {
7584 /* Indicate initialization to MSI-X mode */
7585 phba->intr_type = MSIX;
7586 intr_mode = 2;
7591 /* Fallback to MSI if MSI-X initialization failed */
7592 if (cfg_mode >= 1 && phba->intr_type == NONE) {
7593 retval = lpfc_sli4_enable_msi(phba);
7594 if (!retval) {
7595 /* Indicate initialization to MSI mode */
7596 phba->intr_type = MSI;
7597 intr_mode = 1;
7601 /* Fallback to INTx if both MSI-X/MSI initalization failed */
7602 if (phba->intr_type == NONE) {
7603 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
7604 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
7605 if (!retval) {
7606 /* Indicate initialization to INTx mode */
7607 phba->intr_type = INTx;
7608 intr_mode = 0;
7609 for (index = 0; index < phba->cfg_fcp_eq_count;
7610 index++) {
7611 phba->sli4_hba.fcp_eq_hdl[index].idx = index;
7612 phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
7616 return intr_mode;
7620 * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device
7621 * @phba: pointer to lpfc hba data structure.
7623 * This routine is invoked to disable device interrupt and disassociate
7624 * the driver's interrupt handler(s) from interrupt vector(s) to device
7625 * with SLI-4 interface spec. Depending on the interrupt mode, the driver
7626 * will release the interrupt vector(s) for the message signaled interrupt.
7628 static void
7629 lpfc_sli4_disable_intr(struct lpfc_hba *phba)
7631 /* Disable the currently initialized interrupt mode */
7632 if (phba->intr_type == MSIX)
7633 lpfc_sli4_disable_msix(phba);
7634 else if (phba->intr_type == MSI)
7635 lpfc_sli4_disable_msi(phba);
7636 else if (phba->intr_type == INTx)
7637 free_irq(phba->pcidev->irq, phba);
7639 /* Reset interrupt management states */
7640 phba->intr_type = NONE;
7641 phba->sli.slistat.sli_intr = 0;
7643 return;
7647 * lpfc_unset_hba - Unset SLI3 hba device initialization
7648 * @phba: pointer to lpfc hba data structure.
7650 * This routine is invoked to unset the HBA device initialization steps to
7651 * a device with SLI-3 interface spec.
7653 static void
7654 lpfc_unset_hba(struct lpfc_hba *phba)
7656 struct lpfc_vport *vport = phba->pport;
7657 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
7659 spin_lock_irq(shost->host_lock);
7660 vport->load_flag |= FC_UNLOADING;
7661 spin_unlock_irq(shost->host_lock);
7663 lpfc_stop_hba_timers(phba);
7665 phba->pport->work_port_events = 0;
7667 lpfc_sli_hba_down(phba);
7669 lpfc_sli_brdrestart(phba);
7671 lpfc_sli_disable_intr(phba);
7673 return;
7677 * lpfc_sli4_unset_hba - Unset SLI4 hba device initialization.
7678 * @phba: pointer to lpfc hba data structure.
7680 * This routine is invoked to unset the HBA device initialization steps to
7681 * a device with SLI-4 interface spec.
7683 static void
7684 lpfc_sli4_unset_hba(struct lpfc_hba *phba)
7686 struct lpfc_vport *vport = phba->pport;
7687 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
7689 spin_lock_irq(shost->host_lock);
7690 vport->load_flag |= FC_UNLOADING;
7691 spin_unlock_irq(shost->host_lock);
7693 phba->pport->work_port_events = 0;
7695 /* Stop the SLI4 device port */
7696 lpfc_stop_port(phba);
7698 lpfc_sli4_disable_intr(phba);
7700 /* Reset SLI4 HBA FCoE function */
7701 lpfc_pci_function_reset(phba);
7703 return;
7707 * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy
7708 * @phba: Pointer to HBA context object.
7710 * This function is called in the SLI4 code path to wait for completion
7711 * of device's XRIs exchange busy. It will check the XRI exchange busy
7712 * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after
7713 * that, it will check the XRI exchange busy on outstanding FCP and ELS
7714 * I/Os every 30 seconds, log error message, and wait forever. Only when
7715 * all XRI exchange busy complete, the driver unload shall proceed with
7716 * invoking the function reset ioctl mailbox command to the CNA and the
7717 * the rest of the driver unload resource release.
7719 static void
7720 lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
7722 int wait_time = 0;
7723 int fcp_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
7724 int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
7726 while (!fcp_xri_cmpl || !els_xri_cmpl) {
7727 if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) {
7728 if (!fcp_xri_cmpl)
7729 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7730 "2877 FCP XRI exchange busy "
7731 "wait time: %d seconds.\n",
7732 wait_time/1000);
7733 if (!els_xri_cmpl)
7734 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7735 "2878 ELS XRI exchange busy "
7736 "wait time: %d seconds.\n",
7737 wait_time/1000);
7738 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2);
7739 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2;
7740 } else {
7741 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
7742 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1;
7744 fcp_xri_cmpl =
7745 list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
7746 els_xri_cmpl =
7747 list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
7752 * lpfc_sli4_hba_unset - Unset the fcoe hba
7753 * @phba: Pointer to HBA context object.
7755 * This function is called in the SLI4 code path to reset the HBA's FCoE
7756 * function. The caller is not required to hold any lock. This routine
7757 * issues PCI function reset mailbox command to reset the FCoE function.
7758 * At the end of the function, it calls lpfc_hba_down_post function to
7759 * free any pending commands.
7761 static void
7762 lpfc_sli4_hba_unset(struct lpfc_hba *phba)
7764 int wait_cnt = 0;
7765 LPFC_MBOXQ_t *mboxq;
7767 lpfc_stop_hba_timers(phba);
7768 phba->sli4_hba.intr_enable = 0;
7771 * Gracefully wait out the potential current outstanding asynchronous
7772 * mailbox command.
7775 /* First, block any pending async mailbox command from posted */
7776 spin_lock_irq(&phba->hbalock);
7777 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
7778 spin_unlock_irq(&phba->hbalock);
7779 /* Now, trying to wait it out if we can */
7780 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
7781 msleep(10);
7782 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT)
7783 break;
7785 /* Forcefully release the outstanding mailbox command if timed out */
7786 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
7787 spin_lock_irq(&phba->hbalock);
7788 mboxq = phba->sli.mbox_active;
7789 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
7790 __lpfc_mbox_cmpl_put(phba, mboxq);
7791 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7792 phba->sli.mbox_active = NULL;
7793 spin_unlock_irq(&phba->hbalock);
7796 /* Abort all iocbs associated with the hba */
7797 lpfc_sli_hba_iocb_abort(phba);
7799 /* Wait for completion of device XRI exchange busy */
7800 lpfc_sli4_xri_exchange_busy_wait(phba);
7802 /* Disable PCI subsystem interrupt */
7803 lpfc_sli4_disable_intr(phba);
7805 /* Stop kthread signal shall trigger work_done one more time */
7806 kthread_stop(phba->worker_thread);
7808 /* Reset SLI4 HBA FCoE function */
7809 lpfc_pci_function_reset(phba);
7811 /* Stop the SLI4 device port */
7812 phba->pport->work_port_events = 0;
7816 * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities.
7817 * @phba: Pointer to HBA context object.
7818 * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
7820 * This function is called in the SLI4 code path to read the port's
7821 * sli4 capabilities.
7823 * This function may be be called from any context that can block-wait
7824 * for the completion. The expectation is that this routine is called
7825 * typically from probe_one or from the online routine.
7828 lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
7830 int rc;
7831 struct lpfc_mqe *mqe;
7832 struct lpfc_pc_sli4_params *sli4_params;
7833 uint32_t mbox_tmo;
7835 rc = 0;
7836 mqe = &mboxq->u.mqe;
7838 /* Read the port's SLI4 Parameters port capabilities */
7839 lpfc_pc_sli4_params(mboxq);
7840 if (!phba->sli4_hba.intr_enable)
7841 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7842 else {
7843 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_PORT_CAPABILITIES);
7844 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
7847 if (unlikely(rc))
7848 return 1;
7850 sli4_params = &phba->sli4_hba.pc_sli4_params;
7851 sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params);
7852 sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params);
7853 sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params);
7854 sli4_params->featurelevel_1 = bf_get(featurelevel_1,
7855 &mqe->un.sli4_params);
7856 sli4_params->featurelevel_2 = bf_get(featurelevel_2,
7857 &mqe->un.sli4_params);
7858 sli4_params->proto_types = mqe->un.sli4_params.word3;
7859 sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len;
7860 sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params);
7861 sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params);
7862 sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params);
7863 sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params);
7864 sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params);
7865 sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params);
7866 sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params);
7867 sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params);
7868 sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params);
7869 sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params);
7870 sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params);
7871 sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params);
7872 sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params);
7873 sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params);
7874 sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params);
7875 sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params);
7876 sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params);
7877 sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params);
7878 sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params);
7879 return rc;
7883 * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS.
7884 * @phba: Pointer to HBA context object.
7885 * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
7887 * This function is called in the SLI4 code path to read the port's
7888 * sli4 capabilities.
7890 * This function may be be called from any context that can block-wait
7891 * for the completion. The expectation is that this routine is called
7892 * typically from probe_one or from the online routine.
7895 lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
7897 int rc;
7898 struct lpfc_mqe *mqe = &mboxq->u.mqe;
7899 struct lpfc_pc_sli4_params *sli4_params;
7900 int length;
7901 struct lpfc_sli4_parameters *mbx_sli4_parameters;
7903 /* Read the port's SLI4 Config Parameters */
7904 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
7905 sizeof(struct lpfc_sli4_cfg_mhdr));
7906 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
7907 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS,
7908 length, LPFC_SLI4_MBX_EMBED);
7909 if (!phba->sli4_hba.intr_enable)
7910 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7911 else
7912 rc = lpfc_sli_issue_mbox_wait(phba, mboxq,
7913 lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG));
7914 if (unlikely(rc))
7915 return rc;
7916 sli4_params = &phba->sli4_hba.pc_sli4_params;
7917 mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
7918 sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters);
7919 sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters);
7920 sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters);
7921 sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1,
7922 mbx_sli4_parameters);
7923 sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2,
7924 mbx_sli4_parameters);
7925 if (bf_get(cfg_phwq, mbx_sli4_parameters))
7926 phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED;
7927 else
7928 phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED;
7929 sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len;
7930 sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters);
7931 sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters);
7932 sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
7933 sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters);
7934 sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters);
7935 sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt,
7936 mbx_sli4_parameters);
7937 sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align,
7938 mbx_sli4_parameters);
7939 return 0;
7943 * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
7944 * @pdev: pointer to PCI device
7945 * @pid: pointer to PCI device identifier
7947 * This routine is to be called to attach a device with SLI-3 interface spec
7948 * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
7949 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
7950 * information of the device and driver to see if the driver state that it can
7951 * support this kind of device. If the match is successful, the driver core
7952 * invokes this routine. If this routine determines it can claim the HBA, it
7953 * does all the initialization that it needs to do to handle the HBA properly.
7955 * Return code
7956 * 0 - driver can claim the device
7957 * negative value - driver can not claim the device
7959 static int __devinit
7960 lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
7962 struct lpfc_hba *phba;
7963 struct lpfc_vport *vport = NULL;
7964 struct Scsi_Host *shost = NULL;
7965 int error;
7966 uint32_t cfg_mode, intr_mode;
7968 /* Allocate memory for HBA structure */
7969 phba = lpfc_hba_alloc(pdev);
7970 if (!phba)
7971 return -ENOMEM;
7973 /* Perform generic PCI device enabling operation */
7974 error = lpfc_enable_pci_dev(phba);
7975 if (error) {
7976 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7977 "1401 Failed to enable pci device.\n");
7978 goto out_free_phba;
7981 /* Set up SLI API function jump table for PCI-device group-0 HBAs */
7982 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
7983 if (error)
7984 goto out_disable_pci_dev;
7986 /* Set up SLI-3 specific device PCI memory space */
7987 error = lpfc_sli_pci_mem_setup(phba);
7988 if (error) {
7989 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7990 "1402 Failed to set up pci memory space.\n");
7991 goto out_disable_pci_dev;
7994 /* Set up phase-1 common device driver resources */
7995 error = lpfc_setup_driver_resource_phase1(phba);
7996 if (error) {
7997 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7998 "1403 Failed to set up driver resource.\n");
7999 goto out_unset_pci_mem_s3;
8002 /* Set up SLI-3 specific device driver resources */
8003 error = lpfc_sli_driver_resource_setup(phba);
8004 if (error) {
8005 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8006 "1404 Failed to set up driver resource.\n");
8007 goto out_unset_pci_mem_s3;
8010 /* Initialize and populate the iocb list per host */
8011 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
8012 if (error) {
8013 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8014 "1405 Failed to initialize iocb list.\n");
8015 goto out_unset_driver_resource_s3;
8018 /* Set up common device driver resources */
8019 error = lpfc_setup_driver_resource_phase2(phba);
8020 if (error) {
8021 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8022 "1406 Failed to set up driver resource.\n");
8023 goto out_free_iocb_list;
8026 /* Create SCSI host to the physical port */
8027 error = lpfc_create_shost(phba);
8028 if (error) {
8029 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8030 "1407 Failed to create scsi host.\n");
8031 goto out_unset_driver_resource;
8034 /* Configure sysfs attributes */
8035 vport = phba->pport;
8036 error = lpfc_alloc_sysfs_attr(vport);
8037 if (error) {
8038 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8039 "1476 Failed to allocate sysfs attr\n");
8040 goto out_destroy_shost;
8043 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
8044 /* Now, trying to enable interrupt and bring up the device */
8045 cfg_mode = phba->cfg_use_msi;
8046 while (true) {
8047 /* Put device to a known state before enabling interrupt */
8048 lpfc_stop_port(phba);
8049 /* Configure and enable interrupt */
8050 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
8051 if (intr_mode == LPFC_INTR_ERROR) {
8052 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8053 "0431 Failed to enable interrupt.\n");
8054 error = -ENODEV;
8055 goto out_free_sysfs_attr;
8057 /* SLI-3 HBA setup */
8058 if (lpfc_sli_hba_setup(phba)) {
8059 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8060 "1477 Failed to set up hba\n");
8061 error = -ENODEV;
8062 goto out_remove_device;
8065 /* Wait 50ms for the interrupts of previous mailbox commands */
8066 msleep(50);
8067 /* Check active interrupts on message signaled interrupts */
8068 if (intr_mode == 0 ||
8069 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
8070 /* Log the current active interrupt mode */
8071 phba->intr_mode = intr_mode;
8072 lpfc_log_intr_mode(phba, intr_mode);
8073 break;
8074 } else {
8075 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8076 "0447 Configure interrupt mode (%d) "
8077 "failed active interrupt test.\n",
8078 intr_mode);
8079 /* Disable the current interrupt mode */
8080 lpfc_sli_disable_intr(phba);
8081 /* Try next level of interrupt mode */
8082 cfg_mode = --intr_mode;
8086 /* Perform post initialization setup */
8087 lpfc_post_init_setup(phba);
8089 /* Check if there are static vports to be created. */
8090 lpfc_create_static_vport(phba);
8092 return 0;
8094 out_remove_device:
8095 lpfc_unset_hba(phba);
8096 out_free_sysfs_attr:
8097 lpfc_free_sysfs_attr(vport);
8098 out_destroy_shost:
8099 lpfc_destroy_shost(phba);
8100 out_unset_driver_resource:
8101 lpfc_unset_driver_resource_phase2(phba);
8102 out_free_iocb_list:
8103 lpfc_free_iocb_list(phba);
8104 out_unset_driver_resource_s3:
8105 lpfc_sli_driver_resource_unset(phba);
8106 out_unset_pci_mem_s3:
8107 lpfc_sli_pci_mem_unset(phba);
8108 out_disable_pci_dev:
8109 lpfc_disable_pci_dev(phba);
8110 if (shost)
8111 scsi_host_put(shost);
8112 out_free_phba:
8113 lpfc_hba_free(phba);
8114 return error;
8118 * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem.
8119 * @pdev: pointer to PCI device
8121 * This routine is to be called to disattach a device with SLI-3 interface
8122 * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
8123 * removed from PCI bus, it performs all the necessary cleanup for the HBA
8124 * device to be removed from the PCI subsystem properly.
8126 static void __devexit
8127 lpfc_pci_remove_one_s3(struct pci_dev *pdev)
8129 struct Scsi_Host *shost = pci_get_drvdata(pdev);
8130 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
8131 struct lpfc_vport **vports;
8132 struct lpfc_hba *phba = vport->phba;
8133 int i;
8134 int bars = pci_select_bars(pdev, IORESOURCE_MEM);
8136 spin_lock_irq(&phba->hbalock);
8137 vport->load_flag |= FC_UNLOADING;
8138 spin_unlock_irq(&phba->hbalock);
8140 lpfc_free_sysfs_attr(vport);
8142 /* Release all the vports against this physical port */
8143 vports = lpfc_create_vport_work_array(phba);
8144 if (vports != NULL)
8145 for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++)
8146 fc_vport_terminate(vports[i]->fc_vport);
8147 lpfc_destroy_vport_work_array(phba, vports);
8149 /* Remove FC host and then SCSI host with the physical port */
8150 fc_remove_host(shost);
8151 scsi_remove_host(shost);
8152 lpfc_cleanup(vport);
8155 * Bring down the SLI Layer. This step disable all interrupts,
8156 * clears the rings, discards all mailbox commands, and resets
8157 * the HBA.
8160 /* HBA interrupt will be disabled after this call */
8161 lpfc_sli_hba_down(phba);
8162 /* Stop kthread signal shall trigger work_done one more time */
8163 kthread_stop(phba->worker_thread);
8164 /* Final cleanup of txcmplq and reset the HBA */
8165 lpfc_sli_brdrestart(phba);
8167 lpfc_stop_hba_timers(phba);
8168 spin_lock_irq(&phba->hbalock);
8169 list_del_init(&vport->listentry);
8170 spin_unlock_irq(&phba->hbalock);
8172 lpfc_debugfs_terminate(vport);
8174 /* Disable interrupt */
8175 lpfc_sli_disable_intr(phba);
8177 pci_set_drvdata(pdev, NULL);
8178 scsi_host_put(shost);
8181 * Call scsi_free before mem_free since scsi bufs are released to their
8182 * corresponding pools here.
8184 lpfc_scsi_free(phba);
8185 lpfc_mem_free_all(phba);
8187 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
8188 phba->hbqslimp.virt, phba->hbqslimp.phys);
8190 /* Free resources associated with SLI2 interface */
8191 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
8192 phba->slim2p.virt, phba->slim2p.phys);
8194 /* unmap adapter SLIM and Control Registers */
8195 iounmap(phba->ctrl_regs_memmap_p);
8196 iounmap(phba->slim_memmap_p);
8198 lpfc_hba_free(phba);
8200 pci_release_selected_regions(pdev, bars);
8201 pci_disable_device(pdev);
8205 * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt
8206 * @pdev: pointer to PCI device
8207 * @msg: power management message
8209 * This routine is to be called from the kernel's PCI subsystem to support
8210 * system Power Management (PM) to device with SLI-3 interface spec. When
8211 * PM invokes this method, it quiesces the device by stopping the driver's
8212 * worker thread for the device, turning off device's interrupt and DMA,
8213 * and bring the device offline. Note that as the driver implements the
8214 * minimum PM requirements to a power-aware driver's PM support for the
8215 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
8216 * to the suspend() method call will be treated as SUSPEND and the driver will
8217 * fully reinitialize its device during resume() method call, the driver will
8218 * set device to PCI_D3hot state in PCI config space instead of setting it
8219 * according to the @msg provided by the PM.
8221 * Return code
8222 * 0 - driver suspended the device
8223 * Error otherwise
8225 static int
8226 lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg)
8228 struct Scsi_Host *shost = pci_get_drvdata(pdev);
8229 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8231 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8232 "0473 PCI device Power Management suspend.\n");
8234 /* Bring down the device */
8235 lpfc_offline_prep(phba);
8236 lpfc_offline(phba);
8237 kthread_stop(phba->worker_thread);
8239 /* Disable interrupt from device */
8240 lpfc_sli_disable_intr(phba);
8242 /* Save device state to PCI config space */
8243 pci_save_state(pdev);
8244 pci_set_power_state(pdev, PCI_D3hot);
8246 return 0;
8250 * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt
8251 * @pdev: pointer to PCI device
8253 * This routine is to be called from the kernel's PCI subsystem to support
8254 * system Power Management (PM) to device with SLI-3 interface spec. When PM
8255 * invokes this method, it restores the device's PCI config space state and
8256 * fully reinitializes the device and brings it online. Note that as the
8257 * driver implements the minimum PM requirements to a power-aware driver's
8258 * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE,
8259 * FREEZE) to the suspend() method call will be treated as SUSPEND and the
8260 * driver will fully reinitialize its device during resume() method call,
8261 * the device will be set to PCI_D0 directly in PCI config space before
8262 * restoring the state.
8264 * Return code
8265 * 0 - driver suspended the device
8266 * Error otherwise
8268 static int
8269 lpfc_pci_resume_one_s3(struct pci_dev *pdev)
8271 struct Scsi_Host *shost = pci_get_drvdata(pdev);
8272 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8273 uint32_t intr_mode;
8274 int error;
8276 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8277 "0452 PCI device Power Management resume.\n");
8279 /* Restore device state from PCI config space */
8280 pci_set_power_state(pdev, PCI_D0);
8281 pci_restore_state(pdev);
8284 * As the new kernel behavior of pci_restore_state() API call clears
8285 * device saved_state flag, need to save the restored state again.
8287 pci_save_state(pdev);
8289 if (pdev->is_busmaster)
8290 pci_set_master(pdev);
8292 /* Startup the kernel thread for this host adapter. */
8293 phba->worker_thread = kthread_run(lpfc_do_work, phba,
8294 "lpfc_worker_%d", phba->brd_no);
8295 if (IS_ERR(phba->worker_thread)) {
8296 error = PTR_ERR(phba->worker_thread);
8297 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8298 "0434 PM resume failed to start worker "
8299 "thread: error=x%x.\n", error);
8300 return error;
8303 /* Configure and enable interrupt */
8304 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
8305 if (intr_mode == LPFC_INTR_ERROR) {
8306 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8307 "0430 PM resume Failed to enable interrupt\n");
8308 return -EIO;
8309 } else
8310 phba->intr_mode = intr_mode;
8312 /* Restart HBA and bring it online */
8313 lpfc_sli_brdrestart(phba);
8314 lpfc_online(phba);
8316 /* Log the current active interrupt mode */
8317 lpfc_log_intr_mode(phba, phba->intr_mode);
8319 return 0;
8323 * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover
8324 * @phba: pointer to lpfc hba data structure.
8326 * This routine is called to prepare the SLI3 device for PCI slot recover. It
8327 * aborts all the outstanding SCSI I/Os to the pci device.
8329 static void
8330 lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
8332 struct lpfc_sli *psli = &phba->sli;
8333 struct lpfc_sli_ring *pring;
8335 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8336 "2723 PCI channel I/O abort preparing for recovery\n");
8339 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
8340 * and let the SCSI mid-layer to retry them to recover.
8342 pring = &psli->ring[psli->fcp_ring];
8343 lpfc_sli_abort_iocb_ring(phba, pring);
8347 * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset
8348 * @phba: pointer to lpfc hba data structure.
8350 * This routine is called to prepare the SLI3 device for PCI slot reset. It
8351 * disables the device interrupt and pci device, and aborts the internal FCP
8352 * pending I/Os.
8354 static void
8355 lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
8357 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8358 "2710 PCI channel disable preparing for reset\n");
8360 /* Block any management I/Os to the device */
8361 lpfc_block_mgmt_io(phba);
8363 /* Block all SCSI devices' I/Os on the host */
8364 lpfc_scsi_dev_block(phba);
8366 /* stop all timers */
8367 lpfc_stop_hba_timers(phba);
8369 /* Disable interrupt and pci device */
8370 lpfc_sli_disable_intr(phba);
8371 pci_disable_device(phba->pcidev);
8373 /* Flush all driver's outstanding SCSI I/Os as we are to reset */
8374 lpfc_sli_flush_fcp_rings(phba);
8378 * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable
8379 * @phba: pointer to lpfc hba data structure.
8381 * This routine is called to prepare the SLI3 device for PCI slot permanently
8382 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
8383 * pending I/Os.
8385 static void
8386 lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba)
8388 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8389 "2711 PCI channel permanent disable for failure\n");
8390 /* Block all SCSI devices' I/Os on the host */
8391 lpfc_scsi_dev_block(phba);
8393 /* stop all timers */
8394 lpfc_stop_hba_timers(phba);
8396 /* Clean up all driver's outstanding SCSI I/Os */
8397 lpfc_sli_flush_fcp_rings(phba);
8401 * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
8402 * @pdev: pointer to PCI device.
8403 * @state: the current PCI connection state.
8405 * This routine is called from the PCI subsystem for I/O error handling to
8406 * device with SLI-3 interface spec. This function is called by the PCI
8407 * subsystem after a PCI bus error affecting this device has been detected.
8408 * When this function is invoked, it will need to stop all the I/Os and
8409 * interrupt(s) to the device. Once that is done, it will return
8410 * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery
8411 * as desired.
8413 * Return codes
8414 * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link
8415 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
8416 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
8418 static pci_ers_result_t
8419 lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
8421 struct Scsi_Host *shost = pci_get_drvdata(pdev);
8422 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8424 switch (state) {
8425 case pci_channel_io_normal:
8426 /* Non-fatal error, prepare for recovery */
8427 lpfc_sli_prep_dev_for_recover(phba);
8428 return PCI_ERS_RESULT_CAN_RECOVER;
8429 case pci_channel_io_frozen:
8430 /* Fatal error, prepare for slot reset */
8431 lpfc_sli_prep_dev_for_reset(phba);
8432 return PCI_ERS_RESULT_NEED_RESET;
8433 case pci_channel_io_perm_failure:
8434 /* Permanent failure, prepare for device down */
8435 lpfc_sli_prep_dev_for_perm_failure(phba);
8436 return PCI_ERS_RESULT_DISCONNECT;
8437 default:
8438 /* Unknown state, prepare and request slot reset */
8439 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8440 "0472 Unknown PCI error state: x%x\n", state);
8441 lpfc_sli_prep_dev_for_reset(phba);
8442 return PCI_ERS_RESULT_NEED_RESET;
8447 * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch.
8448 * @pdev: pointer to PCI device.
8450 * This routine is called from the PCI subsystem for error handling to
8451 * device with SLI-3 interface spec. This is called after PCI bus has been
8452 * reset to restart the PCI card from scratch, as if from a cold-boot.
8453 * During the PCI subsystem error recovery, after driver returns
8454 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
8455 * recovery and then call this routine before calling the .resume method
8456 * to recover the device. This function will initialize the HBA device,
8457 * enable the interrupt, but it will just put the HBA to offline state
8458 * without passing any I/O traffic.
8460 * Return codes
8461 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
8462 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
8464 static pci_ers_result_t
8465 lpfc_io_slot_reset_s3(struct pci_dev *pdev)
8467 struct Scsi_Host *shost = pci_get_drvdata(pdev);
8468 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8469 struct lpfc_sli *psli = &phba->sli;
8470 uint32_t intr_mode;
8472 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
8473 if (pci_enable_device_mem(pdev)) {
8474 printk(KERN_ERR "lpfc: Cannot re-enable "
8475 "PCI device after reset.\n");
8476 return PCI_ERS_RESULT_DISCONNECT;
8479 pci_restore_state(pdev);
8482 * As the new kernel behavior of pci_restore_state() API call clears
8483 * device saved_state flag, need to save the restored state again.
8485 pci_save_state(pdev);
8487 if (pdev->is_busmaster)
8488 pci_set_master(pdev);
8490 spin_lock_irq(&phba->hbalock);
8491 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
8492 spin_unlock_irq(&phba->hbalock);
8494 /* Configure and enable interrupt */
8495 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
8496 if (intr_mode == LPFC_INTR_ERROR) {
8497 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8498 "0427 Cannot re-enable interrupt after "
8499 "slot reset.\n");
8500 return PCI_ERS_RESULT_DISCONNECT;
8501 } else
8502 phba->intr_mode = intr_mode;
8504 /* Take device offline, it will perform cleanup */
8505 lpfc_offline_prep(phba);
8506 lpfc_offline(phba);
8507 lpfc_sli_brdrestart(phba);
8509 /* Log the current active interrupt mode */
8510 lpfc_log_intr_mode(phba, phba->intr_mode);
8512 return PCI_ERS_RESULT_RECOVERED;
8516 * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device.
8517 * @pdev: pointer to PCI device
8519 * This routine is called from the PCI subsystem for error handling to device
8520 * with SLI-3 interface spec. It is called when kernel error recovery tells
8521 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
8522 * error recovery. After this call, traffic can start to flow from this device
8523 * again.
8525 static void
8526 lpfc_io_resume_s3(struct pci_dev *pdev)
8528 struct Scsi_Host *shost = pci_get_drvdata(pdev);
8529 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8531 /* Bring device online, it will be no-op for non-fatal error resume */
8532 lpfc_online(phba);
8534 /* Clean up Advanced Error Reporting (AER) if needed */
8535 if (phba->hba_flag & HBA_AER_ENABLED)
8536 pci_cleanup_aer_uncorrect_error_status(pdev);
8540 * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve
8541 * @phba: pointer to lpfc hba data structure.
8543 * returns the number of ELS/CT IOCBs to reserve
8546 lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
8548 int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
8550 if (phba->sli_rev == LPFC_SLI_REV4) {
8551 if (max_xri <= 100)
8552 return 10;
8553 else if (max_xri <= 256)
8554 return 25;
8555 else if (max_xri <= 512)
8556 return 50;
8557 else if (max_xri <= 1024)
8558 return 100;
8559 else
8560 return 150;
8561 } else
8562 return 0;
8566 * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
8567 * @pdev: pointer to PCI device
8568 * @pid: pointer to PCI device identifier
8570 * This routine is called from the kernel's PCI subsystem to device with
8571 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
8572 * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
8573 * information of the device and driver to see if the driver state that it
8574 * can support this kind of device. If the match is successful, the driver
8575 * core invokes this routine. If this routine determines it can claim the HBA,
8576 * it does all the initialization that it needs to do to handle the HBA
8577 * properly.
8579 * Return code
8580 * 0 - driver can claim the device
8581 * negative value - driver can not claim the device
8583 static int __devinit
8584 lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
8586 struct lpfc_hba *phba;
8587 struct lpfc_vport *vport = NULL;
8588 struct Scsi_Host *shost = NULL;
8589 int error;
8590 uint32_t cfg_mode, intr_mode;
8591 int mcnt;
8593 /* Allocate memory for HBA structure */
8594 phba = lpfc_hba_alloc(pdev);
8595 if (!phba)
8596 return -ENOMEM;
8598 /* Perform generic PCI device enabling operation */
8599 error = lpfc_enable_pci_dev(phba);
8600 if (error) {
8601 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8602 "1409 Failed to enable pci device.\n");
8603 goto out_free_phba;
8606 /* Set up SLI API function jump table for PCI-device group-1 HBAs */
8607 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
8608 if (error)
8609 goto out_disable_pci_dev;
8611 /* Set up SLI-4 specific device PCI memory space */
8612 error = lpfc_sli4_pci_mem_setup(phba);
8613 if (error) {
8614 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8615 "1410 Failed to set up pci memory space.\n");
8616 goto out_disable_pci_dev;
8619 /* Set up phase-1 common device driver resources */
8620 error = lpfc_setup_driver_resource_phase1(phba);
8621 if (error) {
8622 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8623 "1411 Failed to set up driver resource.\n");
8624 goto out_unset_pci_mem_s4;
8627 /* Set up SLI-4 Specific device driver resources */
8628 error = lpfc_sli4_driver_resource_setup(phba);
8629 if (error) {
8630 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8631 "1412 Failed to set up driver resource.\n");
8632 goto out_unset_pci_mem_s4;
8635 /* Initialize and populate the iocb list per host */
8637 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8638 "2821 initialize iocb list %d.\n",
8639 phba->cfg_iocb_cnt*1024);
8640 error = lpfc_init_iocb_list(phba, phba->cfg_iocb_cnt*1024);
8642 if (error) {
8643 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8644 "1413 Failed to initialize iocb list.\n");
8645 goto out_unset_driver_resource_s4;
8648 INIT_LIST_HEAD(&phba->active_rrq_list);
8650 /* Set up common device driver resources */
8651 error = lpfc_setup_driver_resource_phase2(phba);
8652 if (error) {
8653 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8654 "1414 Failed to set up driver resource.\n");
8655 goto out_free_iocb_list;
8658 /* Create SCSI host to the physical port */
8659 error = lpfc_create_shost(phba);
8660 if (error) {
8661 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8662 "1415 Failed to create scsi host.\n");
8663 goto out_unset_driver_resource;
8666 /* Configure sysfs attributes */
8667 vport = phba->pport;
8668 error = lpfc_alloc_sysfs_attr(vport);
8669 if (error) {
8670 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8671 "1416 Failed to allocate sysfs attr\n");
8672 goto out_destroy_shost;
8675 shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
8676 /* Now, trying to enable interrupt and bring up the device */
8677 cfg_mode = phba->cfg_use_msi;
8678 while (true) {
8679 /* Put device to a known state before enabling interrupt */
8680 lpfc_stop_port(phba);
8681 /* Configure and enable interrupt */
8682 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
8683 if (intr_mode == LPFC_INTR_ERROR) {
8684 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8685 "0426 Failed to enable interrupt.\n");
8686 error = -ENODEV;
8687 goto out_free_sysfs_attr;
8689 /* Default to single FCP EQ for non-MSI-X */
8690 if (phba->intr_type != MSIX)
8691 phba->cfg_fcp_eq_count = 1;
8692 else if (phba->sli4_hba.msix_vec_nr < phba->cfg_fcp_eq_count)
8693 phba->cfg_fcp_eq_count = phba->sli4_hba.msix_vec_nr - 1;
8694 /* Set up SLI-4 HBA */
8695 if (lpfc_sli4_hba_setup(phba)) {
8696 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8697 "1421 Failed to set up hba\n");
8698 error = -ENODEV;
8699 goto out_disable_intr;
8702 /* Send NOP mbx cmds for non-INTx mode active interrupt test */
8703 if (intr_mode != 0)
8704 mcnt = lpfc_sli4_send_nop_mbox_cmds(phba,
8705 LPFC_ACT_INTR_CNT);
8707 /* Check active interrupts received only for MSI/MSI-X */
8708 if (intr_mode == 0 ||
8709 phba->sli.slistat.sli_intr >= LPFC_ACT_INTR_CNT) {
8710 /* Log the current active interrupt mode */
8711 phba->intr_mode = intr_mode;
8712 lpfc_log_intr_mode(phba, intr_mode);
8713 break;
8715 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8716 "0451 Configure interrupt mode (%d) "
8717 "failed active interrupt test.\n",
8718 intr_mode);
8719 /* Unset the previous SLI-4 HBA setup. */
8721 * TODO: Is this operation compatible with IF TYPE 2
8722 * devices? All port state is deleted and cleared.
8724 lpfc_sli4_unset_hba(phba);
8725 /* Try next level of interrupt mode */
8726 cfg_mode = --intr_mode;
8729 /* Perform post initialization setup */
8730 lpfc_post_init_setup(phba);
8732 /* Check if there are static vports to be created. */
8733 lpfc_create_static_vport(phba);
8735 return 0;
8737 out_disable_intr:
8738 lpfc_sli4_disable_intr(phba);
8739 out_free_sysfs_attr:
8740 lpfc_free_sysfs_attr(vport);
8741 out_destroy_shost:
8742 lpfc_destroy_shost(phba);
8743 out_unset_driver_resource:
8744 lpfc_unset_driver_resource_phase2(phba);
8745 out_free_iocb_list:
8746 lpfc_free_iocb_list(phba);
8747 out_unset_driver_resource_s4:
8748 lpfc_sli4_driver_resource_unset(phba);
8749 out_unset_pci_mem_s4:
8750 lpfc_sli4_pci_mem_unset(phba);
8751 out_disable_pci_dev:
8752 lpfc_disable_pci_dev(phba);
8753 if (shost)
8754 scsi_host_put(shost);
8755 out_free_phba:
8756 lpfc_hba_free(phba);
8757 return error;
8761 * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem
8762 * @pdev: pointer to PCI device
8764 * This routine is called from the kernel's PCI subsystem to device with
8765 * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
8766 * removed from PCI bus, it performs all the necessary cleanup for the HBA
8767 * device to be removed from the PCI subsystem properly.
8769 static void __devexit
8770 lpfc_pci_remove_one_s4(struct pci_dev *pdev)
8772 struct Scsi_Host *shost = pci_get_drvdata(pdev);
8773 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
8774 struct lpfc_vport **vports;
8775 struct lpfc_hba *phba = vport->phba;
8776 int i;
8778 /* Mark the device unloading flag */
8779 spin_lock_irq(&phba->hbalock);
8780 vport->load_flag |= FC_UNLOADING;
8781 spin_unlock_irq(&phba->hbalock);
8783 /* Free the HBA sysfs attributes */
8784 lpfc_free_sysfs_attr(vport);
8786 /* Release all the vports against this physical port */
8787 vports = lpfc_create_vport_work_array(phba);
8788 if (vports != NULL)
8789 for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++)
8790 fc_vport_terminate(vports[i]->fc_vport);
8791 lpfc_destroy_vport_work_array(phba, vports);
8793 /* Remove FC host and then SCSI host with the physical port */
8794 fc_remove_host(shost);
8795 scsi_remove_host(shost);
8797 /* Perform cleanup on the physical port */
8798 lpfc_cleanup(vport);
8801 * Bring down the SLI Layer. This step disables all interrupts,
8802 * clears the rings, discards all mailbox commands, and resets
8803 * the HBA FCoE function.
8805 lpfc_debugfs_terminate(vport);
8806 lpfc_sli4_hba_unset(phba);
8808 spin_lock_irq(&phba->hbalock);
8809 list_del_init(&vport->listentry);
8810 spin_unlock_irq(&phba->hbalock);
8812 /* Perform scsi free before driver resource_unset since scsi
8813 * buffers are released to their corresponding pools here.
8815 lpfc_scsi_free(phba);
8816 lpfc_sli4_driver_resource_unset(phba);
8818 /* Unmap adapter Control and Doorbell registers */
8819 lpfc_sli4_pci_mem_unset(phba);
8821 /* Release PCI resources and disable device's PCI function */
8822 scsi_host_put(shost);
8823 lpfc_disable_pci_dev(phba);
8825 /* Finally, free the driver's device data structure */
8826 lpfc_hba_free(phba);
8828 return;
8832 * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt
8833 * @pdev: pointer to PCI device
8834 * @msg: power management message
8836 * This routine is called from the kernel's PCI subsystem to support system
8837 * Power Management (PM) to device with SLI-4 interface spec. When PM invokes
8838 * this method, it quiesces the device by stopping the driver's worker
8839 * thread for the device, turning off device's interrupt and DMA, and bring
8840 * the device offline. Note that as the driver implements the minimum PM
8841 * requirements to a power-aware driver's PM support for suspend/resume -- all
8842 * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
8843 * method call will be treated as SUSPEND and the driver will fully
8844 * reinitialize its device during resume() method call, the driver will set
8845 * device to PCI_D3hot state in PCI config space instead of setting it
8846 * according to the @msg provided by the PM.
8848 * Return code
8849 * 0 - driver suspended the device
8850 * Error otherwise
8852 static int
8853 lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg)
8855 struct Scsi_Host *shost = pci_get_drvdata(pdev);
8856 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8858 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8859 "2843 PCI device Power Management suspend.\n");
8861 /* Bring down the device */
8862 lpfc_offline_prep(phba);
8863 lpfc_offline(phba);
8864 kthread_stop(phba->worker_thread);
8866 /* Disable interrupt from device */
8867 lpfc_sli4_disable_intr(phba);
8869 /* Save device state to PCI config space */
8870 pci_save_state(pdev);
8871 pci_set_power_state(pdev, PCI_D3hot);
8873 return 0;
8877 * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt
8878 * @pdev: pointer to PCI device
8880 * This routine is called from the kernel's PCI subsystem to support system
8881 * Power Management (PM) to device with SLI-4 interface spac. When PM invokes
8882 * this method, it restores the device's PCI config space state and fully
8883 * reinitializes the device and brings it online. Note that as the driver
8884 * implements the minimum PM requirements to a power-aware driver's PM for
8885 * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
8886 * to the suspend() method call will be treated as SUSPEND and the driver
8887 * will fully reinitialize its device during resume() method call, the device
8888 * will be set to PCI_D0 directly in PCI config space before restoring the
8889 * state.
8891 * Return code
8892 * 0 - driver suspended the device
8893 * Error otherwise
8895 static int
8896 lpfc_pci_resume_one_s4(struct pci_dev *pdev)
8898 struct Scsi_Host *shost = pci_get_drvdata(pdev);
8899 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
8900 uint32_t intr_mode;
8901 int error;
8903 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8904 "0292 PCI device Power Management resume.\n");
8906 /* Restore device state from PCI config space */
8907 pci_set_power_state(pdev, PCI_D0);
8908 pci_restore_state(pdev);
8911 * As the new kernel behavior of pci_restore_state() API call clears
8912 * device saved_state flag, need to save the restored state again.
8914 pci_save_state(pdev);
8916 if (pdev->is_busmaster)
8917 pci_set_master(pdev);
8919 /* Startup the kernel thread for this host adapter. */
8920 phba->worker_thread = kthread_run(lpfc_do_work, phba,
8921 "lpfc_worker_%d", phba->brd_no);
8922 if (IS_ERR(phba->worker_thread)) {
8923 error = PTR_ERR(phba->worker_thread);
8924 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8925 "0293 PM resume failed to start worker "
8926 "thread: error=x%x.\n", error);
8927 return error;
8930 /* Configure and enable interrupt */
8931 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
8932 if (intr_mode == LPFC_INTR_ERROR) {
8933 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8934 "0294 PM resume Failed to enable interrupt\n");
8935 return -EIO;
8936 } else
8937 phba->intr_mode = intr_mode;
8939 /* Restart HBA and bring it online */
8940 lpfc_sli_brdrestart(phba);
8941 lpfc_online(phba);
8943 /* Log the current active interrupt mode */
8944 lpfc_log_intr_mode(phba, phba->intr_mode);
8946 return 0;
8950 * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover
8951 * @phba: pointer to lpfc hba data structure.
8953 * This routine is called to prepare the SLI4 device for PCI slot recover. It
8954 * aborts all the outstanding SCSI I/Os to the pci device.
8956 static void
8957 lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba)
8959 struct lpfc_sli *psli = &phba->sli;
8960 struct lpfc_sli_ring *pring;
8962 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8963 "2828 PCI channel I/O abort preparing for recovery\n");
8965 * There may be errored I/Os through HBA, abort all I/Os on txcmplq
8966 * and let the SCSI mid-layer to retry them to recover.
8968 pring = &psli->ring[psli->fcp_ring];
8969 lpfc_sli_abort_iocb_ring(phba, pring);
8973 * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset
8974 * @phba: pointer to lpfc hba data structure.
8976 * This routine is called to prepare the SLI4 device for PCI slot reset. It
8977 * disables the device interrupt and pci device, and aborts the internal FCP
8978 * pending I/Os.
8980 static void
8981 lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
8983 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8984 "2826 PCI channel disable preparing for reset\n");
8986 /* Block any management I/Os to the device */
8987 lpfc_block_mgmt_io(phba);
8989 /* Block all SCSI devices' I/Os on the host */
8990 lpfc_scsi_dev_block(phba);
8992 /* stop all timers */
8993 lpfc_stop_hba_timers(phba);
8995 /* Disable interrupt and pci device */
8996 lpfc_sli4_disable_intr(phba);
8997 pci_disable_device(phba->pcidev);
8999 /* Flush all driver's outstanding SCSI I/Os as we are to reset */
9000 lpfc_sli_flush_fcp_rings(phba);
9004 * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable
9005 * @phba: pointer to lpfc hba data structure.
9007 * This routine is called to prepare the SLI4 device for PCI slot permanently
9008 * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
9009 * pending I/Os.
9011 static void
9012 lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba)
9014 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9015 "2827 PCI channel permanent disable for failure\n");
9017 /* Block all SCSI devices' I/Os on the host */
9018 lpfc_scsi_dev_block(phba);
9020 /* stop all timers */
9021 lpfc_stop_hba_timers(phba);
9023 /* Clean up all driver's outstanding SCSI I/Os */
9024 lpfc_sli_flush_fcp_rings(phba);
9028 * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device
9029 * @pdev: pointer to PCI device.
9030 * @state: the current PCI connection state.
9032 * This routine is called from the PCI subsystem for error handling to device
9033 * with SLI-4 interface spec. This function is called by the PCI subsystem
9034 * after a PCI bus error affecting this device has been detected. When this
9035 * function is invoked, it will need to stop all the I/Os and interrupt(s)
9036 * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET
9037 * for the PCI subsystem to perform proper recovery as desired.
9039 * Return codes
9040 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
9041 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
9043 static pci_ers_result_t
9044 lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
9046 struct Scsi_Host *shost = pci_get_drvdata(pdev);
9047 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9049 switch (state) {
9050 case pci_channel_io_normal:
9051 /* Non-fatal error, prepare for recovery */
9052 lpfc_sli4_prep_dev_for_recover(phba);
9053 return PCI_ERS_RESULT_CAN_RECOVER;
9054 case pci_channel_io_frozen:
9055 /* Fatal error, prepare for slot reset */
9056 lpfc_sli4_prep_dev_for_reset(phba);
9057 return PCI_ERS_RESULT_NEED_RESET;
9058 case pci_channel_io_perm_failure:
9059 /* Permanent failure, prepare for device down */
9060 lpfc_sli4_prep_dev_for_perm_failure(phba);
9061 return PCI_ERS_RESULT_DISCONNECT;
9062 default:
9063 /* Unknown state, prepare and request slot reset */
9064 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9065 "2825 Unknown PCI error state: x%x\n", state);
9066 lpfc_sli4_prep_dev_for_reset(phba);
9067 return PCI_ERS_RESULT_NEED_RESET;
9072 * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch
9073 * @pdev: pointer to PCI device.
9075 * This routine is called from the PCI subsystem for error handling to device
9076 * with SLI-4 interface spec. It is called after PCI bus has been reset to
9077 * restart the PCI card from scratch, as if from a cold-boot. During the
9078 * PCI subsystem error recovery, after the driver returns
9079 * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
9080 * recovery and then call this routine before calling the .resume method to
9081 * recover the device. This function will initialize the HBA device, enable
9082 * the interrupt, but it will just put the HBA to offline state without
9083 * passing any I/O traffic.
9085 * Return codes
9086 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
9087 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
9089 static pci_ers_result_t
9090 lpfc_io_slot_reset_s4(struct pci_dev *pdev)
9092 struct Scsi_Host *shost = pci_get_drvdata(pdev);
9093 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9094 struct lpfc_sli *psli = &phba->sli;
9095 uint32_t intr_mode;
9097 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
9098 if (pci_enable_device_mem(pdev)) {
9099 printk(KERN_ERR "lpfc: Cannot re-enable "
9100 "PCI device after reset.\n");
9101 return PCI_ERS_RESULT_DISCONNECT;
9104 pci_restore_state(pdev);
9105 if (pdev->is_busmaster)
9106 pci_set_master(pdev);
9108 spin_lock_irq(&phba->hbalock);
9109 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
9110 spin_unlock_irq(&phba->hbalock);
9112 /* Configure and enable interrupt */
9113 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
9114 if (intr_mode == LPFC_INTR_ERROR) {
9115 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9116 "2824 Cannot re-enable interrupt after "
9117 "slot reset.\n");
9118 return PCI_ERS_RESULT_DISCONNECT;
9119 } else
9120 phba->intr_mode = intr_mode;
9122 /* Log the current active interrupt mode */
9123 lpfc_log_intr_mode(phba, phba->intr_mode);
9125 return PCI_ERS_RESULT_RECOVERED;
9129 * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device
9130 * @pdev: pointer to PCI device
9132 * This routine is called from the PCI subsystem for error handling to device
9133 * with SLI-4 interface spec. It is called when kernel error recovery tells
9134 * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
9135 * error recovery. After this call, traffic can start to flow from this device
9136 * again.
9138 static void
9139 lpfc_io_resume_s4(struct pci_dev *pdev)
9141 struct Scsi_Host *shost = pci_get_drvdata(pdev);
9142 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9145 * In case of slot reset, as function reset is performed through
9146 * mailbox command which needs DMA to be enabled, this operation
9147 * has to be moved to the io resume phase. Taking device offline
9148 * will perform the necessary cleanup.
9150 if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) {
9151 /* Perform device reset */
9152 lpfc_offline_prep(phba);
9153 lpfc_offline(phba);
9154 lpfc_sli_brdrestart(phba);
9155 /* Bring the device back online */
9156 lpfc_online(phba);
9159 /* Clean up Advanced Error Reporting (AER) if needed */
9160 if (phba->hba_flag & HBA_AER_ENABLED)
9161 pci_cleanup_aer_uncorrect_error_status(pdev);
9165 * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem
9166 * @pdev: pointer to PCI device
9167 * @pid: pointer to PCI device identifier
9169 * This routine is to be registered to the kernel's PCI subsystem. When an
9170 * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks
9171 * at PCI device-specific information of the device and driver to see if the
9172 * driver state that it can support this kind of device. If the match is
9173 * successful, the driver core invokes this routine. This routine dispatches
9174 * the action to the proper SLI-3 or SLI-4 device probing routine, which will
9175 * do all the initialization that it needs to do to handle the HBA device
9176 * properly.
9178 * Return code
9179 * 0 - driver can claim the device
9180 * negative value - driver can not claim the device
9182 static int __devinit
9183 lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
9185 int rc;
9186 struct lpfc_sli_intf intf;
9188 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0))
9189 return -ENODEV;
9191 if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) &&
9192 (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4))
9193 rc = lpfc_pci_probe_one_s4(pdev, pid);
9194 else
9195 rc = lpfc_pci_probe_one_s3(pdev, pid);
9197 return rc;
9201 * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem
9202 * @pdev: pointer to PCI device
9204 * This routine is to be registered to the kernel's PCI subsystem. When an
9205 * Emulex HBA is removed from PCI bus, the driver core invokes this routine.
9206 * This routine dispatches the action to the proper SLI-3 or SLI-4 device
9207 * remove routine, which will perform all the necessary cleanup for the
9208 * device to be removed from the PCI subsystem properly.
9210 static void __devexit
9211 lpfc_pci_remove_one(struct pci_dev *pdev)
9213 struct Scsi_Host *shost = pci_get_drvdata(pdev);
9214 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9216 switch (phba->pci_dev_grp) {
9217 case LPFC_PCI_DEV_LP:
9218 lpfc_pci_remove_one_s3(pdev);
9219 break;
9220 case LPFC_PCI_DEV_OC:
9221 lpfc_pci_remove_one_s4(pdev);
9222 break;
9223 default:
9224 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9225 "1424 Invalid PCI device group: 0x%x\n",
9226 phba->pci_dev_grp);
9227 break;
9229 return;
9233 * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management
9234 * @pdev: pointer to PCI device
9235 * @msg: power management message
9237 * This routine is to be registered to the kernel's PCI subsystem to support
9238 * system Power Management (PM). When PM invokes this method, it dispatches
9239 * the action to the proper SLI-3 or SLI-4 device suspend routine, which will
9240 * suspend the device.
9242 * Return code
9243 * 0 - driver suspended the device
9244 * Error otherwise
9246 static int
9247 lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
9249 struct Scsi_Host *shost = pci_get_drvdata(pdev);
9250 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9251 int rc = -ENODEV;
9253 switch (phba->pci_dev_grp) {
9254 case LPFC_PCI_DEV_LP:
9255 rc = lpfc_pci_suspend_one_s3(pdev, msg);
9256 break;
9257 case LPFC_PCI_DEV_OC:
9258 rc = lpfc_pci_suspend_one_s4(pdev, msg);
9259 break;
9260 default:
9261 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9262 "1425 Invalid PCI device group: 0x%x\n",
9263 phba->pci_dev_grp);
9264 break;
9266 return rc;
9270 * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management
9271 * @pdev: pointer to PCI device
9273 * This routine is to be registered to the kernel's PCI subsystem to support
9274 * system Power Management (PM). When PM invokes this method, it dispatches
9275 * the action to the proper SLI-3 or SLI-4 device resume routine, which will
9276 * resume the device.
9278 * Return code
9279 * 0 - driver suspended the device
9280 * Error otherwise
9282 static int
9283 lpfc_pci_resume_one(struct pci_dev *pdev)
9285 struct Scsi_Host *shost = pci_get_drvdata(pdev);
9286 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9287 int rc = -ENODEV;
9289 switch (phba->pci_dev_grp) {
9290 case LPFC_PCI_DEV_LP:
9291 rc = lpfc_pci_resume_one_s3(pdev);
9292 break;
9293 case LPFC_PCI_DEV_OC:
9294 rc = lpfc_pci_resume_one_s4(pdev);
9295 break;
9296 default:
9297 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9298 "1426 Invalid PCI device group: 0x%x\n",
9299 phba->pci_dev_grp);
9300 break;
9302 return rc;
9306 * lpfc_io_error_detected - lpfc method for handling PCI I/O error
9307 * @pdev: pointer to PCI device.
9308 * @state: the current PCI connection state.
9310 * This routine is registered to the PCI subsystem for error handling. This
9311 * function is called by the PCI subsystem after a PCI bus error affecting
9312 * this device has been detected. When this routine is invoked, it dispatches
9313 * the action to the proper SLI-3 or SLI-4 device error detected handling
9314 * routine, which will perform the proper error detected operation.
9316 * Return codes
9317 * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
9318 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
9320 static pci_ers_result_t
9321 lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
9323 struct Scsi_Host *shost = pci_get_drvdata(pdev);
9324 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9325 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
9327 switch (phba->pci_dev_grp) {
9328 case LPFC_PCI_DEV_LP:
9329 rc = lpfc_io_error_detected_s3(pdev, state);
9330 break;
9331 case LPFC_PCI_DEV_OC:
9332 rc = lpfc_io_error_detected_s4(pdev, state);
9333 break;
9334 default:
9335 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9336 "1427 Invalid PCI device group: 0x%x\n",
9337 phba->pci_dev_grp);
9338 break;
9340 return rc;
9344 * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch
9345 * @pdev: pointer to PCI device.
9347 * This routine is registered to the PCI subsystem for error handling. This
9348 * function is called after PCI bus has been reset to restart the PCI card
9349 * from scratch, as if from a cold-boot. When this routine is invoked, it
9350 * dispatches the action to the proper SLI-3 or SLI-4 device reset handling
9351 * routine, which will perform the proper device reset.
9353 * Return codes
9354 * PCI_ERS_RESULT_RECOVERED - the device has been recovered
9355 * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
9357 static pci_ers_result_t
9358 lpfc_io_slot_reset(struct pci_dev *pdev)
9360 struct Scsi_Host *shost = pci_get_drvdata(pdev);
9361 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9362 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
9364 switch (phba->pci_dev_grp) {
9365 case LPFC_PCI_DEV_LP:
9366 rc = lpfc_io_slot_reset_s3(pdev);
9367 break;
9368 case LPFC_PCI_DEV_OC:
9369 rc = lpfc_io_slot_reset_s4(pdev);
9370 break;
9371 default:
9372 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9373 "1428 Invalid PCI device group: 0x%x\n",
9374 phba->pci_dev_grp);
9375 break;
9377 return rc;
9381 * lpfc_io_resume - lpfc method for resuming PCI I/O operation
9382 * @pdev: pointer to PCI device
9384 * This routine is registered to the PCI subsystem for error handling. It
9385 * is called when kernel error recovery tells the lpfc driver that it is
9386 * OK to resume normal PCI operation after PCI bus error recovery. When
9387 * this routine is invoked, it dispatches the action to the proper SLI-3
9388 * or SLI-4 device io_resume routine, which will resume the device operation.
9390 static void
9391 lpfc_io_resume(struct pci_dev *pdev)
9393 struct Scsi_Host *shost = pci_get_drvdata(pdev);
9394 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9396 switch (phba->pci_dev_grp) {
9397 case LPFC_PCI_DEV_LP:
9398 lpfc_io_resume_s3(pdev);
9399 break;
9400 case LPFC_PCI_DEV_OC:
9401 lpfc_io_resume_s4(pdev);
9402 break;
9403 default:
9404 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9405 "1429 Invalid PCI device group: 0x%x\n",
9406 phba->pci_dev_grp);
9407 break;
9409 return;
9412 static struct pci_device_id lpfc_id_table[] = {
9413 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER,
9414 PCI_ANY_ID, PCI_ANY_ID, },
9415 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FIREFLY,
9416 PCI_ANY_ID, PCI_ANY_ID, },
9417 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR,
9418 PCI_ANY_ID, PCI_ANY_ID, },
9419 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS,
9420 PCI_ANY_ID, PCI_ANY_ID, },
9421 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR,
9422 PCI_ANY_ID, PCI_ANY_ID, },
9423 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY,
9424 PCI_ANY_ID, PCI_ANY_ID, },
9425 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY,
9426 PCI_ANY_ID, PCI_ANY_ID, },
9427 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY,
9428 PCI_ANY_ID, PCI_ANY_ID, },
9429 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY,
9430 PCI_ANY_ID, PCI_ANY_ID, },
9431 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE,
9432 PCI_ANY_ID, PCI_ANY_ID, },
9433 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_SCSP,
9434 PCI_ANY_ID, PCI_ANY_ID, },
9435 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_DCSP,
9436 PCI_ANY_ID, PCI_ANY_ID, },
9437 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS,
9438 PCI_ANY_ID, PCI_ANY_ID, },
9439 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_SCSP,
9440 PCI_ANY_ID, PCI_ANY_ID, },
9441 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_DCSP,
9442 PCI_ANY_ID, PCI_ANY_ID, },
9443 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID,
9444 PCI_ANY_ID, PCI_ANY_ID, },
9445 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB,
9446 PCI_ANY_ID, PCI_ANY_ID, },
9447 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR,
9448 PCI_ANY_ID, PCI_ANY_ID, },
9449 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HORNET,
9450 PCI_ANY_ID, PCI_ANY_ID, },
9451 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP,
9452 PCI_ANY_ID, PCI_ANY_ID, },
9453 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP,
9454 PCI_ANY_ID, PCI_ANY_ID, },
9455 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID,
9456 PCI_ANY_ID, PCI_ANY_ID, },
9457 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB,
9458 PCI_ANY_ID, PCI_ANY_ID, },
9459 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY,
9460 PCI_ANY_ID, PCI_ANY_ID, },
9461 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101,
9462 PCI_ANY_ID, PCI_ANY_ID, },
9463 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S,
9464 PCI_ANY_ID, PCI_ANY_ID, },
9465 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP11000S,
9466 PCI_ANY_ID, PCI_ANY_ID, },
9467 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LPE11000S,
9468 PCI_ANY_ID, PCI_ANY_ID, },
9469 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT,
9470 PCI_ANY_ID, PCI_ANY_ID, },
9471 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_MID,
9472 PCI_ANY_ID, PCI_ANY_ID, },
9473 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SMB,
9474 PCI_ANY_ID, PCI_ANY_ID, },
9475 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_DCSP,
9476 PCI_ANY_ID, PCI_ANY_ID, },
9477 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SCSP,
9478 PCI_ANY_ID, PCI_ANY_ID, },
9479 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_S,
9480 PCI_ANY_ID, PCI_ANY_ID, },
9481 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_VF,
9482 PCI_ANY_ID, PCI_ANY_ID, },
9483 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_PF,
9484 PCI_ANY_ID, PCI_ANY_ID, },
9485 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S,
9486 PCI_ANY_ID, PCI_ANY_ID, },
9487 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK,
9488 PCI_ANY_ID, PCI_ANY_ID, },
9489 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TOMCAT,
9490 PCI_ANY_ID, PCI_ANY_ID, },
9491 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FALCON,
9492 PCI_ANY_ID, PCI_ANY_ID, },
9493 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BALIUS,
9494 PCI_ANY_ID, PCI_ANY_ID, },
9495 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC,
9496 PCI_ANY_ID, PCI_ANY_ID, },
9497 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE,
9498 PCI_ANY_ID, PCI_ANY_ID, },
9499 { 0 }
9502 MODULE_DEVICE_TABLE(pci, lpfc_id_table);
9504 static struct pci_error_handlers lpfc_err_handler = {
9505 .error_detected = lpfc_io_error_detected,
9506 .slot_reset = lpfc_io_slot_reset,
9507 .resume = lpfc_io_resume,
9510 static struct pci_driver lpfc_driver = {
9511 .name = LPFC_DRIVER_NAME,
9512 .id_table = lpfc_id_table,
9513 .probe = lpfc_pci_probe_one,
9514 .remove = __devexit_p(lpfc_pci_remove_one),
9515 .suspend = lpfc_pci_suspend_one,
9516 .resume = lpfc_pci_resume_one,
9517 .err_handler = &lpfc_err_handler,
9521 * lpfc_init - lpfc module initialization routine
9523 * This routine is to be invoked when the lpfc module is loaded into the
9524 * kernel. The special kernel macro module_init() is used to indicate the
9525 * role of this routine to the kernel as lpfc module entry point.
9527 * Return codes
9528 * 0 - successful
9529 * -ENOMEM - FC attach transport failed
9530 * all others - failed
9532 static int __init
9533 lpfc_init(void)
9535 int error = 0;
9537 printk(LPFC_MODULE_DESC "\n");
9538 printk(LPFC_COPYRIGHT "\n");
9540 if (lpfc_enable_npiv) {
9541 lpfc_transport_functions.vport_create = lpfc_vport_create;
9542 lpfc_transport_functions.vport_delete = lpfc_vport_delete;
9544 lpfc_transport_template =
9545 fc_attach_transport(&lpfc_transport_functions);
9546 if (lpfc_transport_template == NULL)
9547 return -ENOMEM;
9548 if (lpfc_enable_npiv) {
9549 lpfc_vport_transport_template =
9550 fc_attach_transport(&lpfc_vport_transport_functions);
9551 if (lpfc_vport_transport_template == NULL) {
9552 fc_release_transport(lpfc_transport_template);
9553 return -ENOMEM;
9556 error = pci_register_driver(&lpfc_driver);
9557 if (error) {
9558 fc_release_transport(lpfc_transport_template);
9559 if (lpfc_enable_npiv)
9560 fc_release_transport(lpfc_vport_transport_template);
9563 return error;
9567 * lpfc_exit - lpfc module removal routine
9569 * This routine is invoked when the lpfc module is removed from the kernel.
9570 * The special kernel macro module_exit() is used to indicate the role of
9571 * this routine to the kernel as lpfc module exit point.
9573 static void __exit
9574 lpfc_exit(void)
9576 pci_unregister_driver(&lpfc_driver);
9577 fc_release_transport(lpfc_transport_template);
9578 if (lpfc_enable_npiv)
9579 fc_release_transport(lpfc_vport_transport_template);
9580 if (_dump_buf_data) {
9581 printk(KERN_ERR "9062 BLKGRD: freeing %lu pages for "
9582 "_dump_buf_data at 0x%p\n",
9583 (1L << _dump_buf_data_order), _dump_buf_data);
9584 free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order);
9587 if (_dump_buf_dif) {
9588 printk(KERN_ERR "9049 BLKGRD: freeing %lu pages for "
9589 "_dump_buf_dif at 0x%p\n",
9590 (1L << _dump_buf_dif_order), _dump_buf_dif);
9591 free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order);
9595 module_init(lpfc_init);
9596 module_exit(lpfc_exit);
9597 MODULE_LICENSE("GPL");
9598 MODULE_DESCRIPTION(LPFC_MODULE_DESC);
9599 MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com");
9600 MODULE_VERSION("0:" LPFC_DRIVER_VERSION);