2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
19 * bfad.c Linux driver PCI interface module.
21 #include <linux/module.h>
22 #include <linux/kthread.h>
23 #include <linux/errno.h>
24 #include <linux/sched.h>
25 #include <linux/init.h>
27 #include <linux/pci.h>
28 #include <linux/firmware.h>
29 #include <asm/uaccess.h>
30 #include <asm/fcntl.h>
38 BFA_TRC_FILE(LDRV
, BFAD
);
39 DEFINE_MUTEX(bfad_mutex
);
43 static int num_sgpgs_parm
;
45 char *host_name
, *os_name
, *os_patch
;
46 int num_rports
, num_ios
, num_tms
;
47 int num_fcxps
, num_ufbufs
;
48 int reqq_size
, rspq_size
, num_sgpgs
;
49 int rport_del_timeout
= BFA_FCS_RPORT_DEF_DEL_TIMEOUT
;
50 int bfa_lun_queue_depth
= BFAD_LUN_QUEUE_DEPTH
;
51 int bfa_io_max_sge
= BFAD_IO_MAX_SGE
;
52 int bfa_log_level
= 3; /* WARNING log level */
53 int ioc_auto_recover
= BFA_TRUE
;
54 int bfa_linkup_delay
= -1;
55 int fdmi_enable
= BFA_TRUE
;
56 int pcie_max_read_reqsz
;
57 int bfa_debugfs_enable
= 1;
58 int msix_disable_cb
= 0, msix_disable_ct
= 0;
60 /* Firmware releated */
61 u32 bfi_image_ct_fc_size
, bfi_image_ct_cna_size
, bfi_image_cb_fc_size
;
62 u32
*bfi_image_ct_fc
, *bfi_image_ct_cna
, *bfi_image_cb_fc
;
64 #define BFAD_FW_FILE_CT_FC "ctfw_fc.bin"
65 #define BFAD_FW_FILE_CT_CNA "ctfw_cna.bin"
66 #define BFAD_FW_FILE_CB_FC "cbfw_fc.bin"
68 static u32
*bfad_load_fwimg(struct pci_dev
*pdev
);
69 static void bfad_free_fwimg(void);
70 static void bfad_read_firmware(struct pci_dev
*pdev
, u32
**bfi_image
,
71 u32
*bfi_image_size
, char *fw_name
);
73 static const char *msix_name_ct
[] = {
74 "cpe0", "cpe1", "cpe2", "cpe3",
75 "rme0", "rme1", "rme2", "rme3",
78 static const char *msix_name_cb
[] = {
79 "cpe0", "cpe1", "cpe2", "cpe3",
80 "rme0", "rme1", "rme2", "rme3",
81 "eemc", "elpu0", "elpu1", "epss", "mlpu" };
83 MODULE_FIRMWARE(BFAD_FW_FILE_CT_FC
);
84 MODULE_FIRMWARE(BFAD_FW_FILE_CT_CNA
);
85 MODULE_FIRMWARE(BFAD_FW_FILE_CB_FC
);
87 module_param(os_name
, charp
, S_IRUGO
| S_IWUSR
);
88 MODULE_PARM_DESC(os_name
, "OS name of the hba host machine");
89 module_param(os_patch
, charp
, S_IRUGO
| S_IWUSR
);
90 MODULE_PARM_DESC(os_patch
, "OS patch level of the hba host machine");
91 module_param(host_name
, charp
, S_IRUGO
| S_IWUSR
);
92 MODULE_PARM_DESC(host_name
, "Hostname of the hba host machine");
93 module_param(num_rports
, int, S_IRUGO
| S_IWUSR
);
94 MODULE_PARM_DESC(num_rports
, "Max number of rports supported per port "
95 "(physical/logical), default=1024");
96 module_param(num_ios
, int, S_IRUGO
| S_IWUSR
);
97 MODULE_PARM_DESC(num_ios
, "Max number of ioim requests, default=2000");
98 module_param(num_tms
, int, S_IRUGO
| S_IWUSR
);
99 MODULE_PARM_DESC(num_tms
, "Max number of task im requests, default=128");
100 module_param(num_fcxps
, int, S_IRUGO
| S_IWUSR
);
101 MODULE_PARM_DESC(num_fcxps
, "Max number of fcxp requests, default=64");
102 module_param(num_ufbufs
, int, S_IRUGO
| S_IWUSR
);
103 MODULE_PARM_DESC(num_ufbufs
, "Max number of unsolicited frame "
104 "buffers, default=64");
105 module_param(reqq_size
, int, S_IRUGO
| S_IWUSR
);
106 MODULE_PARM_DESC(reqq_size
, "Max number of request queue elements, "
108 module_param(rspq_size
, int, S_IRUGO
| S_IWUSR
);
109 MODULE_PARM_DESC(rspq_size
, "Max number of response queue elements, "
111 module_param(num_sgpgs
, int, S_IRUGO
| S_IWUSR
);
112 MODULE_PARM_DESC(num_sgpgs
, "Number of scatter/gather pages, default=2048");
113 module_param(rport_del_timeout
, int, S_IRUGO
| S_IWUSR
);
114 MODULE_PARM_DESC(rport_del_timeout
, "Rport delete timeout, default=90 secs, "
116 module_param(bfa_lun_queue_depth
, int, S_IRUGO
| S_IWUSR
);
117 MODULE_PARM_DESC(bfa_lun_queue_depth
, "Lun queue depth, default=32, Range[>0]");
118 module_param(bfa_io_max_sge
, int, S_IRUGO
| S_IWUSR
);
119 MODULE_PARM_DESC(bfa_io_max_sge
, "Max io scatter/gather elements, default=255");
120 module_param(bfa_log_level
, int, S_IRUGO
| S_IWUSR
);
121 MODULE_PARM_DESC(bfa_log_level
, "Driver log level, default=3, "
122 "Range[Critical:1|Error:2|Warning:3|Info:4]");
123 module_param(ioc_auto_recover
, int, S_IRUGO
| S_IWUSR
);
124 MODULE_PARM_DESC(ioc_auto_recover
, "IOC auto recovery, default=1, "
125 "Range[off:0|on:1]");
126 module_param(bfa_linkup_delay
, int, S_IRUGO
| S_IWUSR
);
127 MODULE_PARM_DESC(bfa_linkup_delay
, "Link up delay, default=30 secs for "
128 "boot port. Otherwise 10 secs in RHEL4 & 0 for "
129 "[RHEL5, SLES10, ESX40] Range[>0]");
130 module_param(msix_disable_cb
, int, S_IRUGO
| S_IWUSR
);
131 MODULE_PARM_DESC(msix_disable_cb
, "Disable Message Signaled Interrupts "
132 "for Brocade-415/425/815/825 cards, default=0, "
133 " Range[false:0|true:1]");
134 module_param(msix_disable_ct
, int, S_IRUGO
| S_IWUSR
);
135 MODULE_PARM_DESC(msix_disable_ct
, "Disable Message Signaled Interrupts "
136 "if possible for Brocade-1010/1020/804/1007/902/1741 "
137 "cards, default=0, Range[false:0|true:1]");
138 module_param(fdmi_enable
, int, S_IRUGO
| S_IWUSR
);
139 MODULE_PARM_DESC(fdmi_enable
, "Enables fdmi registration, default=1, "
140 "Range[false:0|true:1]");
141 module_param(pcie_max_read_reqsz
, int, S_IRUGO
| S_IWUSR
);
142 MODULE_PARM_DESC(pcie_max_read_reqsz
, "PCIe max read request size, default=0 "
143 "(use system setting), Range[128|256|512|1024|2048|4096]");
144 module_param(bfa_debugfs_enable
, int, S_IRUGO
| S_IWUSR
);
145 MODULE_PARM_DESC(bfa_debugfs_enable
, "Enables debugfs feature, default=1,"
146 " Range[false:0|true:1]");
149 bfad_sm_uninit(struct bfad_s
*bfad
, enum bfad_sm_event event
);
151 bfad_sm_created(struct bfad_s
*bfad
, enum bfad_sm_event event
);
153 bfad_sm_initializing(struct bfad_s
*bfad
, enum bfad_sm_event event
);
155 bfad_sm_operational(struct bfad_s
*bfad
, enum bfad_sm_event event
);
157 bfad_sm_stopping(struct bfad_s
*bfad
, enum bfad_sm_event event
);
159 bfad_sm_failed(struct bfad_s
*bfad
, enum bfad_sm_event event
);
161 bfad_sm_fcs_exit(struct bfad_s
*bfad
, enum bfad_sm_event event
);
164 * Beginning state for the driver instance, awaiting the pci_probe event
167 bfad_sm_uninit(struct bfad_s
*bfad
, enum bfad_sm_event event
)
169 bfa_trc(bfad
, event
);
173 bfa_sm_set_state(bfad
, bfad_sm_created
);
174 bfad
->bfad_tsk
= kthread_create(bfad_worker
, (void *) bfad
,
175 "%s", "bfad_worker");
176 if (IS_ERR(bfad
->bfad_tsk
)) {
177 printk(KERN_INFO
"bfad[%d]: Kernel thread "
178 "creation failed!\n", bfad
->inst_no
);
179 bfa_sm_send_event(bfad
, BFAD_E_KTHREAD_CREATE_FAILED
);
181 bfa_sm_send_event(bfad
, BFAD_E_INIT
);
185 /* Ignore stop; already in uninit */
189 bfa_sm_fault(bfad
, event
);
194 * Driver Instance is created, awaiting event INIT to initialize the bfad
197 bfad_sm_created(struct bfad_s
*bfad
, enum bfad_sm_event event
)
201 bfa_trc(bfad
, event
);
205 bfa_sm_set_state(bfad
, bfad_sm_initializing
);
207 init_completion(&bfad
->comp
);
209 /* Enable Interrupt and wait bfa_init completion */
210 if (bfad_setup_intr(bfad
)) {
211 printk(KERN_WARNING
"bfad%d: bfad_setup_intr failed\n",
213 bfa_sm_send_event(bfad
, BFAD_E_INTR_INIT_FAILED
);
217 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
218 bfa_iocfc_init(&bfad
->bfa
);
219 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
221 /* Set up interrupt handler for each vectors */
222 if ((bfad
->bfad_flags
& BFAD_MSIX_ON
) &&
223 bfad_install_msix_handler(bfad
)) {
224 printk(KERN_WARNING
"%s: install_msix failed, bfad%d\n",
225 __func__
, bfad
->inst_no
);
228 bfad_init_timer(bfad
);
230 wait_for_completion(&bfad
->comp
);
232 if ((bfad
->bfad_flags
& BFAD_HAL_INIT_DONE
)) {
233 bfa_sm_send_event(bfad
, BFAD_E_INIT_SUCCESS
);
236 "bfa %s: bfa init failed\n",
238 bfad
->bfad_flags
|= BFAD_HAL_INIT_FAIL
;
239 bfa_sm_send_event(bfad
, BFAD_E_INIT_FAILED
);
244 case BFAD_E_KTHREAD_CREATE_FAILED
:
245 bfa_sm_set_state(bfad
, bfad_sm_uninit
);
249 bfa_sm_fault(bfad
, event
);
254 bfad_sm_initializing(struct bfad_s
*bfad
, enum bfad_sm_event event
)
259 bfa_trc(bfad
, event
);
262 case BFAD_E_INIT_SUCCESS
:
263 kthread_stop(bfad
->bfad_tsk
);
264 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
265 bfad
->bfad_tsk
= NULL
;
266 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
268 retval
= bfad_start_ops(bfad
);
269 if (retval
!= BFA_STATUS_OK
)
271 bfa_sm_set_state(bfad
, bfad_sm_operational
);
274 case BFAD_E_INTR_INIT_FAILED
:
275 bfa_sm_set_state(bfad
, bfad_sm_uninit
);
276 kthread_stop(bfad
->bfad_tsk
);
277 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
278 bfad
->bfad_tsk
= NULL
;
279 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
282 case BFAD_E_INIT_FAILED
:
283 bfa_sm_set_state(bfad
, bfad_sm_failed
);
286 bfa_sm_fault(bfad
, event
);
291 bfad_sm_failed(struct bfad_s
*bfad
, enum bfad_sm_event event
)
295 bfa_trc(bfad
, event
);
298 case BFAD_E_INIT_SUCCESS
:
299 retval
= bfad_start_ops(bfad
);
300 if (retval
!= BFA_STATUS_OK
)
302 bfa_sm_set_state(bfad
, bfad_sm_operational
);
306 if (bfad
->bfad_flags
& BFAD_CFG_PPORT_DONE
)
307 bfad_uncfg_pport(bfad
);
308 if (bfad
->bfad_flags
& BFAD_FC4_PROBE_DONE
) {
309 bfad_im_probe_undo(bfad
);
310 bfad
->bfad_flags
&= ~BFAD_FC4_PROBE_DONE
;
315 case BFAD_E_EXIT_COMP
:
316 bfa_sm_set_state(bfad
, bfad_sm_uninit
);
317 bfad_remove_intr(bfad
);
318 del_timer_sync(&bfad
->hal_tmo
);
322 bfa_sm_fault(bfad
, event
);
327 bfad_sm_operational(struct bfad_s
*bfad
, enum bfad_sm_event event
)
329 bfa_trc(bfad
, event
);
333 bfa_sm_set_state(bfad
, bfad_sm_fcs_exit
);
338 bfa_sm_fault(bfad
, event
);
343 bfad_sm_fcs_exit(struct bfad_s
*bfad
, enum bfad_sm_event event
)
345 bfa_trc(bfad
, event
);
348 case BFAD_E_FCS_EXIT_COMP
:
349 bfa_sm_set_state(bfad
, bfad_sm_stopping
);
354 bfa_sm_fault(bfad
, event
);
359 bfad_sm_stopping(struct bfad_s
*bfad
, enum bfad_sm_event event
)
361 bfa_trc(bfad
, event
);
364 case BFAD_E_EXIT_COMP
:
365 bfa_sm_set_state(bfad
, bfad_sm_uninit
);
366 bfad_remove_intr(bfad
);
367 del_timer_sync(&bfad
->hal_tmo
);
368 bfad_im_probe_undo(bfad
);
369 bfad
->bfad_flags
&= ~BFAD_FC4_PROBE_DONE
;
370 bfad_uncfg_pport(bfad
);
374 bfa_sm_fault(bfad
, event
);
383 bfad_hcb_comp(void *arg
, bfa_status_t status
)
385 struct bfad_hal_comp
*fcomp
= (struct bfad_hal_comp
*)arg
;
387 fcomp
->status
= status
;
388 complete(&fcomp
->comp
);
395 bfa_cb_init(void *drv
, bfa_status_t init_status
)
397 struct bfad_s
*bfad
= drv
;
399 if (init_status
== BFA_STATUS_OK
) {
400 bfad
->bfad_flags
|= BFAD_HAL_INIT_DONE
;
403 * If BFAD_HAL_INIT_FAIL flag is set:
404 * Wake up the kernel thread to start
405 * the bfad operations after HAL init done
407 if ((bfad
->bfad_flags
& BFAD_HAL_INIT_FAIL
)) {
408 bfad
->bfad_flags
&= ~BFAD_HAL_INIT_FAIL
;
409 wake_up_process(bfad
->bfad_tsk
);
413 complete(&bfad
->comp
);
420 bfa_fcb_lport_new(struct bfad_s
*bfad
, struct bfa_fcs_lport_s
*port
,
421 enum bfa_lport_role roles
, struct bfad_vf_s
*vf_drv
,
422 struct bfad_vport_s
*vp_drv
)
425 struct bfad_port_s
*port_drv
;
427 if (!vp_drv
&& !vf_drv
) {
428 port_drv
= &bfad
->pport
;
429 port_drv
->pvb_type
= BFAD_PORT_PHYS_BASE
;
430 } else if (!vp_drv
&& vf_drv
) {
431 port_drv
= &vf_drv
->base_port
;
432 port_drv
->pvb_type
= BFAD_PORT_VF_BASE
;
433 } else if (vp_drv
&& !vf_drv
) {
434 port_drv
= &vp_drv
->drv_port
;
435 port_drv
->pvb_type
= BFAD_PORT_PHYS_VPORT
;
437 port_drv
= &vp_drv
->drv_port
;
438 port_drv
->pvb_type
= BFAD_PORT_VF_VPORT
;
441 port_drv
->fcs_port
= port
;
442 port_drv
->roles
= roles
;
444 if (roles
& BFA_LPORT_ROLE_FCP_IM
) {
445 rc
= bfad_im_port_new(bfad
, port_drv
);
446 if (rc
!= BFA_STATUS_OK
) {
447 bfad_im_port_delete(bfad
, port_drv
);
456 bfa_fcb_lport_delete(struct bfad_s
*bfad
, enum bfa_lport_role roles
,
457 struct bfad_vf_s
*vf_drv
, struct bfad_vport_s
*vp_drv
)
459 struct bfad_port_s
*port_drv
;
461 /* this will be only called from rmmod context */
462 if (vp_drv
&& !vp_drv
->comp_del
) {
463 port_drv
= (vp_drv
) ? (&(vp_drv
)->drv_port
) :
464 ((vf_drv
) ? (&(vf_drv
)->base_port
) :
466 bfa_trc(bfad
, roles
);
467 if (roles
& BFA_LPORT_ROLE_FCP_IM
)
468 bfad_im_port_delete(bfad
, port_drv
);
473 * FCS RPORT alloc callback, after successful PLOGI by FCS
476 bfa_fcb_rport_alloc(struct bfad_s
*bfad
, struct bfa_fcs_rport_s
**rport
,
477 struct bfad_rport_s
**rport_drv
)
479 bfa_status_t rc
= BFA_STATUS_OK
;
481 *rport_drv
= kzalloc(sizeof(struct bfad_rport_s
), GFP_ATOMIC
);
482 if (*rport_drv
== NULL
) {
483 rc
= BFA_STATUS_ENOMEM
;
487 *rport
= &(*rport_drv
)->fcs_rport
;
494 * FCS PBC VPORT Create
497 bfa_fcb_pbc_vport_create(struct bfad_s
*bfad
, struct bfi_pbc_vport_s pbc_vport
)
500 struct bfa_lport_cfg_s port_cfg
= {0};
501 struct bfad_vport_s
*vport
;
504 vport
= kzalloc(sizeof(struct bfad_vport_s
), GFP_KERNEL
);
510 vport
->drv_port
.bfad
= bfad
;
511 port_cfg
.roles
= BFA_LPORT_ROLE_FCP_IM
;
512 port_cfg
.pwwn
= pbc_vport
.vp_pwwn
;
513 port_cfg
.nwwn
= pbc_vport
.vp_nwwn
;
514 port_cfg
.preboot_vp
= BFA_TRUE
;
516 rc
= bfa_fcs_pbc_vport_create(&vport
->fcs_vport
, &bfad
->bfa_fcs
, 0,
519 if (rc
!= BFA_STATUS_OK
) {
524 list_add_tail(&vport
->list_entry
, &bfad
->pbc_vport_list
);
528 bfad_hal_mem_release(struct bfad_s
*bfad
)
531 struct bfa_meminfo_s
*hal_meminfo
= &bfad
->meminfo
;
532 struct bfa_mem_elem_s
*meminfo_elem
;
534 for (i
= 0; i
< BFA_MEM_TYPE_MAX
; i
++) {
535 meminfo_elem
= &hal_meminfo
->meminfo
[i
];
536 if (meminfo_elem
->kva
!= NULL
) {
537 switch (meminfo_elem
->mem_type
) {
538 case BFA_MEM_TYPE_KVA
:
539 vfree(meminfo_elem
->kva
);
541 case BFA_MEM_TYPE_DMA
:
542 dma_free_coherent(&bfad
->pcidev
->dev
,
543 meminfo_elem
->mem_len
,
545 (dma_addr_t
) meminfo_elem
->dma
);
554 memset(hal_meminfo
, 0, sizeof(struct bfa_meminfo_s
));
558 bfad_update_hal_cfg(struct bfa_iocfc_cfg_s
*bfa_cfg
)
561 bfa_cfg
->fwcfg
.num_rports
= num_rports
;
563 bfa_cfg
->fwcfg
.num_ioim_reqs
= num_ios
;
565 bfa_cfg
->fwcfg
.num_tskim_reqs
= num_tms
;
567 bfa_cfg
->fwcfg
.num_fcxp_reqs
= num_fcxps
;
569 bfa_cfg
->fwcfg
.num_uf_bufs
= num_ufbufs
;
571 bfa_cfg
->drvcfg
.num_reqq_elems
= reqq_size
;
573 bfa_cfg
->drvcfg
.num_rspq_elems
= rspq_size
;
575 bfa_cfg
->drvcfg
.num_sgpgs
= num_sgpgs
;
578 * populate the hal values back to the driver for sysfs use.
579 * otherwise, the default values will be shown as 0 in sysfs
581 num_rports
= bfa_cfg
->fwcfg
.num_rports
;
582 num_ios
= bfa_cfg
->fwcfg
.num_ioim_reqs
;
583 num_tms
= bfa_cfg
->fwcfg
.num_tskim_reqs
;
584 num_fcxps
= bfa_cfg
->fwcfg
.num_fcxp_reqs
;
585 num_ufbufs
= bfa_cfg
->fwcfg
.num_uf_bufs
;
586 reqq_size
= bfa_cfg
->drvcfg
.num_reqq_elems
;
587 rspq_size
= bfa_cfg
->drvcfg
.num_rspq_elems
;
588 num_sgpgs
= bfa_cfg
->drvcfg
.num_sgpgs
;
592 bfad_hal_mem_alloc(struct bfad_s
*bfad
)
595 struct bfa_meminfo_s
*hal_meminfo
= &bfad
->meminfo
;
596 struct bfa_mem_elem_s
*meminfo_elem
;
597 dma_addr_t phys_addr
;
599 bfa_status_t rc
= BFA_STATUS_OK
;
602 int min_num_sgpgs
= 512;
604 bfa_cfg_get_default(&bfad
->ioc_cfg
);
607 bfad_update_hal_cfg(&bfad
->ioc_cfg
);
608 bfad
->cfg_data
.ioc_queue_depth
= bfad
->ioc_cfg
.fwcfg
.num_ioim_reqs
;
609 bfa_cfg_get_meminfo(&bfad
->ioc_cfg
, hal_meminfo
);
611 for (i
= 0; i
< BFA_MEM_TYPE_MAX
; i
++) {
612 meminfo_elem
= &hal_meminfo
->meminfo
[i
];
613 switch (meminfo_elem
->mem_type
) {
614 case BFA_MEM_TYPE_KVA
:
615 kva
= vmalloc(meminfo_elem
->mem_len
);
617 bfad_hal_mem_release(bfad
);
618 rc
= BFA_STATUS_ENOMEM
;
621 memset(kva
, 0, meminfo_elem
->mem_len
);
622 meminfo_elem
->kva
= kva
;
624 case BFA_MEM_TYPE_DMA
:
625 kva
= dma_alloc_coherent(&bfad
->pcidev
->dev
,
626 meminfo_elem
->mem_len
, &phys_addr
, GFP_KERNEL
);
628 bfad_hal_mem_release(bfad
);
630 * If we cannot allocate with default
631 * num_sgpages try with half the value.
633 if (num_sgpgs
> min_num_sgpgs
) {
635 "bfad[%d]: memory allocation failed"
636 " with num_sgpgs: %d\n",
637 bfad
->inst_no
, num_sgpgs
);
638 nextLowerInt(&num_sgpgs
);
640 "bfad[%d]: trying to allocate memory"
641 " with num_sgpgs: %d\n",
642 bfad
->inst_no
, num_sgpgs
);
646 if (num_sgpgs_parm
> 0)
647 num_sgpgs
= num_sgpgs_parm
;
651 num_sgpgs
*= reset_value
;
653 rc
= BFA_STATUS_ENOMEM
;
658 if (num_sgpgs_parm
> 0)
659 num_sgpgs
= num_sgpgs_parm
;
661 reset_value
= (1 << retry_count
);
662 num_sgpgs
*= reset_value
;
665 memset(kva
, 0, meminfo_elem
->mem_len
);
666 meminfo_elem
->kva
= kva
;
667 meminfo_elem
->dma
= phys_addr
;
679 * Create a vport under a vf.
682 bfad_vport_create(struct bfad_s
*bfad
, u16 vf_id
,
683 struct bfa_lport_cfg_s
*port_cfg
, struct device
*dev
)
685 struct bfad_vport_s
*vport
;
686 int rc
= BFA_STATUS_OK
;
688 struct completion fcomp
;
690 vport
= kzalloc(sizeof(struct bfad_vport_s
), GFP_KERNEL
);
692 rc
= BFA_STATUS_ENOMEM
;
696 vport
->drv_port
.bfad
= bfad
;
697 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
698 rc
= bfa_fcs_vport_create(&vport
->fcs_vport
, &bfad
->bfa_fcs
, vf_id
,
700 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
702 if (rc
!= BFA_STATUS_OK
)
705 if (port_cfg
->roles
& BFA_LPORT_ROLE_FCP_IM
) {
706 rc
= bfad_im_scsi_host_alloc(bfad
, vport
->drv_port
.im_port
,
708 if (rc
!= BFA_STATUS_OK
)
709 goto ext_free_fcs_vport
;
712 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
713 bfa_fcs_vport_start(&vport
->fcs_vport
);
714 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
716 return BFA_STATUS_OK
;
719 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
720 vport
->comp_del
= &fcomp
;
721 init_completion(vport
->comp_del
);
722 bfa_fcs_vport_delete(&vport
->fcs_vport
);
723 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
724 wait_for_completion(vport
->comp_del
);
732 bfad_bfa_tmo(unsigned long data
)
734 struct bfad_s
*bfad
= (struct bfad_s
*) data
;
736 struct list_head doneq
;
738 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
740 bfa_timer_beat(&bfad
->bfa
.timer_mod
);
742 bfa_comp_deq(&bfad
->bfa
, &doneq
);
743 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
745 if (!list_empty(&doneq
)) {
746 bfa_comp_process(&bfad
->bfa
, &doneq
);
747 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
748 bfa_comp_free(&bfad
->bfa
, &doneq
);
749 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
752 mod_timer(&bfad
->hal_tmo
,
753 jiffies
+ msecs_to_jiffies(BFA_TIMER_FREQ
));
757 bfad_init_timer(struct bfad_s
*bfad
)
759 init_timer(&bfad
->hal_tmo
);
760 bfad
->hal_tmo
.function
= bfad_bfa_tmo
;
761 bfad
->hal_tmo
.data
= (unsigned long)bfad
;
763 mod_timer(&bfad
->hal_tmo
,
764 jiffies
+ msecs_to_jiffies(BFA_TIMER_FREQ
));
768 bfad_pci_init(struct pci_dev
*pdev
, struct bfad_s
*bfad
)
772 if (pci_enable_device(pdev
)) {
773 printk(KERN_ERR
"pci_enable_device fail %p\n", pdev
);
777 if (pci_request_regions(pdev
, BFAD_DRIVER_NAME
))
778 goto out_disable_device
;
780 pci_set_master(pdev
);
783 if (pci_set_dma_mask(pdev
, DMA_BIT_MASK(64)) != 0)
784 if (pci_set_dma_mask(pdev
, DMA_BIT_MASK(32)) != 0) {
785 printk(KERN_ERR
"pci_set_dma_mask fail %p\n", pdev
);
786 goto out_release_region
;
789 bfad
->pci_bar0_kva
= pci_iomap(pdev
, 0, pci_resource_len(pdev
, 0));
791 if (bfad
->pci_bar0_kva
== NULL
) {
792 printk(KERN_ERR
"Fail to map bar0\n");
793 goto out_release_region
;
796 bfad
->hal_pcidev
.pci_slot
= PCI_SLOT(pdev
->devfn
);
797 bfad
->hal_pcidev
.pci_func
= PCI_FUNC(pdev
->devfn
);
798 bfad
->hal_pcidev
.pci_bar_kva
= bfad
->pci_bar0_kva
;
799 bfad
->hal_pcidev
.device_id
= pdev
->device
;
800 bfad
->pci_name
= pci_name(pdev
);
802 bfad
->pci_attr
.vendor_id
= pdev
->vendor
;
803 bfad
->pci_attr
.device_id
= pdev
->device
;
804 bfad
->pci_attr
.ssid
= pdev
->subsystem_device
;
805 bfad
->pci_attr
.ssvid
= pdev
->subsystem_vendor
;
806 bfad
->pci_attr
.pcifn
= PCI_FUNC(pdev
->devfn
);
810 /* Adjust PCIe Maximum Read Request Size */
811 if (pcie_max_read_reqsz
> 0) {
816 switch (pcie_max_read_reqsz
) {
839 pcie_cap_reg
= pci_find_capability(pdev
, PCI_CAP_ID_EXP
);
840 if (mask
!= 0xffff && pcie_cap_reg
) {
841 pcie_cap_reg
+= 0x08;
842 pci_read_config_word(pdev
, pcie_cap_reg
, &pcie_dev_ctl
);
843 if ((pcie_dev_ctl
& 0x7000) != mask
) {
844 printk(KERN_WARNING
"BFA[%s]: "
845 "pcie_max_read_request_size is %d, "
846 "reset to %d\n", bfad
->pci_name
,
847 (1 << ((pcie_dev_ctl
& 0x7000) >> 12)) << 7,
848 pcie_max_read_reqsz
);
850 pcie_dev_ctl
&= ~0x7000;
851 pci_write_config_word(pdev
, pcie_cap_reg
,
852 pcie_dev_ctl
| mask
);
860 pci_release_regions(pdev
);
862 pci_disable_device(pdev
);
868 bfad_pci_uninit(struct pci_dev
*pdev
, struct bfad_s
*bfad
)
870 pci_iounmap(pdev
, bfad
->pci_bar0_kva
);
871 pci_release_regions(pdev
);
872 pci_disable_device(pdev
);
873 pci_set_drvdata(pdev
, NULL
);
877 bfad_drv_init(struct bfad_s
*bfad
)
882 bfad
->cfg_data
.rport_del_timeout
= rport_del_timeout
;
883 bfad
->cfg_data
.lun_queue_depth
= bfa_lun_queue_depth
;
884 bfad
->cfg_data
.io_max_sge
= bfa_io_max_sge
;
885 bfad
->cfg_data
.binding_method
= FCP_PWWN_BINDING
;
887 rc
= bfad_hal_mem_alloc(bfad
);
888 if (rc
!= BFA_STATUS_OK
) {
889 printk(KERN_WARNING
"bfad%d bfad_hal_mem_alloc failure\n",
892 "Not enough memory to attach all Brocade HBA ports, %s",
893 "System may need more memory.\n");
894 goto out_hal_mem_alloc_failure
;
897 bfad
->bfa
.trcmod
= bfad
->trcmod
;
898 bfad
->bfa
.plog
= &bfad
->plog_buf
;
899 bfa_plog_init(&bfad
->plog_buf
);
900 bfa_plog_str(&bfad
->plog_buf
, BFA_PL_MID_DRVR
, BFA_PL_EID_DRIVER_START
,
903 bfa_attach(&bfad
->bfa
, bfad
, &bfad
->ioc_cfg
, &bfad
->meminfo
,
907 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
908 bfad
->bfa_fcs
.trcmod
= bfad
->trcmod
;
909 bfa_fcs_attach(&bfad
->bfa_fcs
, &bfad
->bfa
, bfad
, BFA_FALSE
);
910 bfad
->bfa_fcs
.fdmi_enabled
= fdmi_enable
;
911 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
913 bfad
->bfad_flags
|= BFAD_DRV_INIT_DONE
;
915 return BFA_STATUS_OK
;
917 out_hal_mem_alloc_failure
:
918 return BFA_STATUS_FAILED
;
922 bfad_drv_uninit(struct bfad_s
*bfad
)
926 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
927 init_completion(&bfad
->comp
);
928 bfa_iocfc_stop(&bfad
->bfa
);
929 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
930 wait_for_completion(&bfad
->comp
);
932 del_timer_sync(&bfad
->hal_tmo
);
933 bfa_isr_disable(&bfad
->bfa
);
934 bfa_detach(&bfad
->bfa
);
935 bfad_remove_intr(bfad
);
936 bfad_hal_mem_release(bfad
);
938 bfad
->bfad_flags
&= ~BFAD_DRV_INIT_DONE
;
942 bfad_drv_start(struct bfad_s
*bfad
)
946 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
947 bfa_iocfc_start(&bfad
->bfa
);
948 bfa_fcs_fabric_modstart(&bfad
->bfa_fcs
);
949 bfad
->bfad_flags
|= BFAD_HAL_START_DONE
;
950 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
953 flush_workqueue(bfad
->im
->drv_workq
);
957 bfad_fcs_stop(struct bfad_s
*bfad
)
961 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
962 init_completion(&bfad
->comp
);
963 bfad
->pport
.flags
|= BFAD_PORT_DELETE
;
964 bfa_fcs_exit(&bfad
->bfa_fcs
);
965 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
966 wait_for_completion(&bfad
->comp
);
968 bfa_sm_send_event(bfad
, BFAD_E_FCS_EXIT_COMP
);
972 bfad_stop(struct bfad_s
*bfad
)
976 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
977 init_completion(&bfad
->comp
);
978 bfa_iocfc_stop(&bfad
->bfa
);
979 bfad
->bfad_flags
&= ~BFAD_HAL_START_DONE
;
980 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
981 wait_for_completion(&bfad
->comp
);
983 bfa_sm_send_event(bfad
, BFAD_E_EXIT_COMP
);
987 bfad_cfg_pport(struct bfad_s
*bfad
, enum bfa_lport_role role
)
989 int rc
= BFA_STATUS_OK
;
991 /* Allocate scsi_host for the physical port */
992 if ((supported_fc4s
& BFA_LPORT_ROLE_FCP_IM
) &&
993 (role
& BFA_LPORT_ROLE_FCP_IM
)) {
994 if (bfad
->pport
.im_port
== NULL
) {
995 rc
= BFA_STATUS_FAILED
;
999 rc
= bfad_im_scsi_host_alloc(bfad
, bfad
->pport
.im_port
,
1000 &bfad
->pcidev
->dev
);
1001 if (rc
!= BFA_STATUS_OK
)
1004 bfad
->pport
.roles
|= BFA_LPORT_ROLE_FCP_IM
;
1007 bfad
->bfad_flags
|= BFAD_CFG_PPORT_DONE
;
1014 bfad_uncfg_pport(struct bfad_s
*bfad
)
1016 if ((supported_fc4s
& BFA_LPORT_ROLE_FCP_IM
) &&
1017 (bfad
->pport
.roles
& BFA_LPORT_ROLE_FCP_IM
)) {
1018 bfad_im_scsi_host_free(bfad
, bfad
->pport
.im_port
);
1019 bfad_im_port_clean(bfad
->pport
.im_port
);
1020 kfree(bfad
->pport
.im_port
);
1021 bfad
->pport
.roles
&= ~BFA_LPORT_ROLE_FCP_IM
;
1024 bfad
->bfad_flags
&= ~BFAD_CFG_PPORT_DONE
;
1028 bfad_start_ops(struct bfad_s
*bfad
) {
1031 unsigned long flags
;
1032 struct bfad_vport_s
*vport
, *vport_new
;
1033 struct bfa_fcs_driver_info_s driver_info
;
1035 /* Fill the driver_info info to fcs*/
1036 memset(&driver_info
, 0, sizeof(driver_info
));
1037 strncpy(driver_info
.version
, BFAD_DRIVER_VERSION
,
1038 sizeof(driver_info
.version
) - 1);
1040 strncpy(driver_info
.host_machine_name
, host_name
,
1041 sizeof(driver_info
.host_machine_name
) - 1);
1043 strncpy(driver_info
.host_os_name
, os_name
,
1044 sizeof(driver_info
.host_os_name
) - 1);
1046 strncpy(driver_info
.host_os_patch
, os_patch
,
1047 sizeof(driver_info
.host_os_patch
) - 1);
1049 strncpy(driver_info
.os_device_name
, bfad
->pci_name
,
1050 sizeof(driver_info
.os_device_name
- 1));
1053 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1054 bfa_fcs_driver_info_init(&bfad
->bfa_fcs
, &driver_info
);
1055 bfa_fcs_init(&bfad
->bfa_fcs
);
1056 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1058 retval
= bfad_cfg_pport(bfad
, BFA_LPORT_ROLE_FCP_IM
);
1059 if (retval
!= BFA_STATUS_OK
) {
1060 if (bfa_sm_cmp_state(bfad
, bfad_sm_initializing
))
1061 bfa_sm_set_state(bfad
, bfad_sm_failed
);
1063 return BFA_STATUS_FAILED
;
1066 /* BFAD level FC4 IM specific resource allocation */
1067 retval
= bfad_im_probe(bfad
);
1068 if (retval
!= BFA_STATUS_OK
) {
1069 printk(KERN_WARNING
"bfad_im_probe failed\n");
1070 if (bfa_sm_cmp_state(bfad
, bfad_sm_initializing
))
1071 bfa_sm_set_state(bfad
, bfad_sm_failed
);
1072 bfad_im_probe_undo(bfad
);
1073 bfad
->bfad_flags
&= ~BFAD_FC4_PROBE_DONE
;
1074 bfad_uncfg_pport(bfad
);
1076 return BFA_STATUS_FAILED
;
1078 bfad
->bfad_flags
|= BFAD_FC4_PROBE_DONE
;
1080 bfad_drv_start(bfad
);
1082 /* Complete pbc vport create */
1083 list_for_each_entry_safe(vport
, vport_new
, &bfad
->pbc_vport_list
,
1085 struct fc_vport_identifiers vid
;
1086 struct fc_vport
*fc_vport
;
1087 char pwwn_buf
[BFA_STRING_32
];
1089 memset(&vid
, 0, sizeof(vid
));
1090 vid
.roles
= FC_PORT_ROLE_FCP_INITIATOR
;
1091 vid
.vport_type
= FC_PORTTYPE_NPIV
;
1092 vid
.disable
= false;
1093 vid
.node_name
= wwn_to_u64((u8
*)
1094 (&((vport
->fcs_vport
).lport
.port_cfg
.nwwn
)));
1095 vid
.port_name
= wwn_to_u64((u8
*)
1096 (&((vport
->fcs_vport
).lport
.port_cfg
.pwwn
)));
1097 fc_vport
= fc_vport_create(bfad
->pport
.im_port
->shost
, 0, &vid
);
1099 wwn2str(pwwn_buf
, vid
.port_name
);
1100 printk(KERN_WARNING
"bfad%d: failed to create pbc vport"
1101 " %s\n", bfad
->inst_no
, pwwn_buf
);
1103 list_del(&vport
->list_entry
);
1108 * If bfa_linkup_delay is set to -1 default; try to retrive the
1109 * value using the bfad_get_linkup_delay(); else use the
1110 * passed in module param value as the bfa_linkup_delay.
1112 if (bfa_linkup_delay
< 0) {
1113 bfa_linkup_delay
= bfad_get_linkup_delay(bfad
);
1114 bfad_rport_online_wait(bfad
);
1115 bfa_linkup_delay
= -1;
1117 bfad_rport_online_wait(bfad
);
1119 BFA_LOG(KERN_INFO
, bfad
, bfa_log_level
, "bfa device claimed\n");
1121 return BFA_STATUS_OK
;
1125 bfad_worker(void *ptr
)
1127 struct bfad_s
*bfad
;
1128 unsigned long flags
;
1130 bfad
= (struct bfad_s
*)ptr
;
1132 while (!kthread_should_stop()) {
1134 /* Send event BFAD_E_INIT_SUCCESS */
1135 bfa_sm_send_event(bfad
, BFAD_E_INIT_SUCCESS
);
1137 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1138 bfad
->bfad_tsk
= NULL
;
1139 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1148 * BFA driver interrupt functions
1151 bfad_intx(int irq
, void *dev_id
)
1153 struct bfad_s
*bfad
= dev_id
;
1154 struct list_head doneq
;
1155 unsigned long flags
;
1158 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1159 rc
= bfa_intx(&bfad
->bfa
);
1161 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1165 bfa_comp_deq(&bfad
->bfa
, &doneq
);
1166 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1168 if (!list_empty(&doneq
)) {
1169 bfa_comp_process(&bfad
->bfa
, &doneq
);
1171 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1172 bfa_comp_free(&bfad
->bfa
, &doneq
);
1173 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1181 bfad_msix(int irq
, void *dev_id
)
1183 struct bfad_msix_s
*vec
= dev_id
;
1184 struct bfad_s
*bfad
= vec
->bfad
;
1185 struct list_head doneq
;
1186 unsigned long flags
;
1188 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1190 bfa_msix(&bfad
->bfa
, vec
->msix
.entry
);
1191 bfa_comp_deq(&bfad
->bfa
, &doneq
);
1192 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1194 if (!list_empty(&doneq
)) {
1195 bfa_comp_process(&bfad
->bfa
, &doneq
);
1197 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1198 bfa_comp_free(&bfad
->bfa
, &doneq
);
1199 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1206 * Initialize the MSIX entry table.
1209 bfad_init_msix_entry(struct bfad_s
*bfad
, struct msix_entry
*msix_entries
,
1210 int mask
, int max_bit
)
1213 int match
= 0x00000001;
1215 for (i
= 0, bfad
->nvec
= 0; i
< MAX_MSIX_ENTRY
; i
++) {
1217 bfad
->msix_tab
[bfad
->nvec
].msix
.entry
= i
;
1218 bfad
->msix_tab
[bfad
->nvec
].bfad
= bfad
;
1219 msix_entries
[bfad
->nvec
].entry
= i
;
1229 bfad_install_msix_handler(struct bfad_s
*bfad
)
1233 for (i
= 0; i
< bfad
->nvec
; i
++) {
1234 sprintf(bfad
->msix_tab
[i
].name
, "bfa-%s-%s",
1236 ((bfa_asic_id_ct(bfad
->hal_pcidev
.device_id
)) ?
1237 msix_name_ct
[i
] : msix_name_cb
[i
]));
1239 error
= request_irq(bfad
->msix_tab
[i
].msix
.vector
,
1240 (irq_handler_t
) bfad_msix
, 0,
1241 bfad
->msix_tab
[i
].name
, &bfad
->msix_tab
[i
]);
1243 bfa_trc(bfad
, bfad
->msix_tab
[i
].msix
.vector
);
1247 for (j
= 0; j
< i
; j
++)
1248 free_irq(bfad
->msix_tab
[j
].msix
.vector
,
1249 &bfad
->msix_tab
[j
]);
1259 * Setup MSIX based interrupt.
1262 bfad_setup_intr(struct bfad_s
*bfad
)
1265 u32 mask
= 0, i
, num_bit
= 0, max_bit
= 0;
1266 struct msix_entry msix_entries
[MAX_MSIX_ENTRY
];
1267 struct pci_dev
*pdev
= bfad
->pcidev
;
1269 /* Call BFA to get the msix map for this PCI function. */
1270 bfa_msix_getvecs(&bfad
->bfa
, &mask
, &num_bit
, &max_bit
);
1272 /* Set up the msix entry table */
1273 bfad_init_msix_entry(bfad
, msix_entries
, mask
, max_bit
);
1275 if ((bfa_asic_id_ct(pdev
->device
) && !msix_disable_ct
) ||
1276 (!bfa_asic_id_ct(pdev
->device
) && !msix_disable_cb
)) {
1278 error
= pci_enable_msix(bfad
->pcidev
, msix_entries
, bfad
->nvec
);
1281 * Only error number of vector is available.
1282 * We don't have a mechanism to map multiple
1283 * interrupts into one vector, so even if we
1284 * can try to request less vectors, we don't
1285 * know how to associate interrupt events to
1286 * vectors. Linux doesn't duplicate vectors
1287 * in the MSIX table for this case.
1290 printk(KERN_WARNING
"bfad%d: "
1291 "pci_enable_msix failed (%d),"
1292 " use line based.\n", bfad
->inst_no
, error
);
1297 /* Save the vectors */
1298 for (i
= 0; i
< bfad
->nvec
; i
++) {
1299 bfa_trc(bfad
, msix_entries
[i
].vector
);
1300 bfad
->msix_tab
[i
].msix
.vector
= msix_entries
[i
].vector
;
1303 bfa_msix_init(&bfad
->bfa
, bfad
->nvec
);
1305 bfad
->bfad_flags
|= BFAD_MSIX_ON
;
1313 (bfad
->pcidev
->irq
, (irq_handler_t
) bfad_intx
, BFAD_IRQ_FLAGS
,
1314 BFAD_DRIVER_NAME
, bfad
) != 0) {
1315 /* Enable interrupt handler failed */
1323 bfad_remove_intr(struct bfad_s
*bfad
)
1327 if (bfad
->bfad_flags
& BFAD_MSIX_ON
) {
1328 for (i
= 0; i
< bfad
->nvec
; i
++)
1329 free_irq(bfad
->msix_tab
[i
].msix
.vector
,
1330 &bfad
->msix_tab
[i
]);
1332 pci_disable_msix(bfad
->pcidev
);
1333 bfad
->bfad_flags
&= ~BFAD_MSIX_ON
;
1335 free_irq(bfad
->pcidev
->irq
, bfad
);
1343 bfad_pci_probe(struct pci_dev
*pdev
, const struct pci_device_id
*pid
)
1345 struct bfad_s
*bfad
;
1346 int error
= -ENODEV
, retval
;
1348 /* For single port cards - only claim function 0 */
1349 if ((pdev
->device
== BFA_PCI_DEVICE_ID_FC_8G1P
) &&
1350 (PCI_FUNC(pdev
->devfn
) != 0))
1353 bfad
= kzalloc(sizeof(struct bfad_s
), GFP_KERNEL
);
1359 bfad
->trcmod
= kzalloc(sizeof(struct bfa_trc_mod_s
), GFP_KERNEL
);
1360 if (!bfad
->trcmod
) {
1361 printk(KERN_WARNING
"Error alloc trace buffer!\n");
1363 goto out_alloc_trace_failure
;
1367 bfa_trc_init(bfad
->trcmod
);
1368 bfa_trc(bfad
, bfad_inst
);
1370 if (!(bfad_load_fwimg(pdev
))) {
1371 kfree(bfad
->trcmod
);
1372 goto out_alloc_trace_failure
;
1375 retval
= bfad_pci_init(pdev
, bfad
);
1377 printk(KERN_WARNING
"bfad_pci_init failure!\n");
1379 goto out_pci_init_failure
;
1382 mutex_lock(&bfad_mutex
);
1383 bfad
->inst_no
= bfad_inst
++;
1384 list_add_tail(&bfad
->list_entry
, &bfad_list
);
1385 mutex_unlock(&bfad_mutex
);
1387 /* Initializing the state machine: State set to uninit */
1388 bfa_sm_set_state(bfad
, bfad_sm_uninit
);
1390 spin_lock_init(&bfad
->bfad_lock
);
1391 pci_set_drvdata(pdev
, bfad
);
1393 bfad
->ref_count
= 0;
1394 bfad
->pport
.bfad
= bfad
;
1395 INIT_LIST_HEAD(&bfad
->pbc_vport_list
);
1397 /* Setup the debugfs node for this bfad */
1398 if (bfa_debugfs_enable
)
1399 bfad_debugfs_init(&bfad
->pport
);
1401 retval
= bfad_drv_init(bfad
);
1402 if (retval
!= BFA_STATUS_OK
)
1403 goto out_drv_init_failure
;
1405 bfa_sm_send_event(bfad
, BFAD_E_CREATE
);
1407 if (bfa_sm_cmp_state(bfad
, bfad_sm_uninit
))
1408 goto out_bfad_sm_failure
;
1412 out_bfad_sm_failure
:
1413 bfa_detach(&bfad
->bfa
);
1414 bfad_hal_mem_release(bfad
);
1415 out_drv_init_failure
:
1416 /* Remove the debugfs node for this bfad */
1417 kfree(bfad
->regdata
);
1418 bfad_debugfs_exit(&bfad
->pport
);
1419 mutex_lock(&bfad_mutex
);
1421 list_del(&bfad
->list_entry
);
1422 mutex_unlock(&bfad_mutex
);
1423 bfad_pci_uninit(pdev
, bfad
);
1424 out_pci_init_failure
:
1425 kfree(bfad
->trcmod
);
1426 out_alloc_trace_failure
:
1436 bfad_pci_remove(struct pci_dev
*pdev
)
1438 struct bfad_s
*bfad
= pci_get_drvdata(pdev
);
1439 unsigned long flags
;
1441 bfa_trc(bfad
, bfad
->inst_no
);
1443 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1444 if (bfad
->bfad_tsk
!= NULL
) {
1445 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1446 kthread_stop(bfad
->bfad_tsk
);
1448 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1451 /* Send Event BFAD_E_STOP */
1452 bfa_sm_send_event(bfad
, BFAD_E_STOP
);
1454 /* Driver detach and dealloc mem */
1455 spin_lock_irqsave(&bfad
->bfad_lock
, flags
);
1456 bfa_detach(&bfad
->bfa
);
1457 spin_unlock_irqrestore(&bfad
->bfad_lock
, flags
);
1458 bfad_hal_mem_release(bfad
);
1460 /* Remove the debugfs node for this bfad */
1461 kfree(bfad
->regdata
);
1462 bfad_debugfs_exit(&bfad
->pport
);
1464 /* Cleaning the BFAD instance */
1465 mutex_lock(&bfad_mutex
);
1467 list_del(&bfad
->list_entry
);
1468 mutex_unlock(&bfad_mutex
);
1469 bfad_pci_uninit(pdev
, bfad
);
1471 kfree(bfad
->trcmod
);
1475 struct pci_device_id bfad_id_table
[] = {
1477 .vendor
= BFA_PCI_VENDOR_ID_BROCADE
,
1478 .device
= BFA_PCI_DEVICE_ID_FC_8G2P
,
1479 .subvendor
= PCI_ANY_ID
,
1480 .subdevice
= PCI_ANY_ID
,
1483 .vendor
= BFA_PCI_VENDOR_ID_BROCADE
,
1484 .device
= BFA_PCI_DEVICE_ID_FC_8G1P
,
1485 .subvendor
= PCI_ANY_ID
,
1486 .subdevice
= PCI_ANY_ID
,
1489 .vendor
= BFA_PCI_VENDOR_ID_BROCADE
,
1490 .device
= BFA_PCI_DEVICE_ID_CT
,
1491 .subvendor
= PCI_ANY_ID
,
1492 .subdevice
= PCI_ANY_ID
,
1493 .class = (PCI_CLASS_SERIAL_FIBER
<< 8),
1497 .vendor
= BFA_PCI_VENDOR_ID_BROCADE
,
1498 .device
= BFA_PCI_DEVICE_ID_CT_FC
,
1499 .subvendor
= PCI_ANY_ID
,
1500 .subdevice
= PCI_ANY_ID
,
1501 .class = (PCI_CLASS_SERIAL_FIBER
<< 8),
1508 MODULE_DEVICE_TABLE(pci
, bfad_id_table
);
1510 static struct pci_driver bfad_pci_driver
= {
1511 .name
= BFAD_DRIVER_NAME
,
1512 .id_table
= bfad_id_table
,
1513 .probe
= bfad_pci_probe
,
1514 .remove
= __devexit_p(bfad_pci_remove
),
1518 * Driver module init.
1525 printk(KERN_INFO
"Brocade BFA FC/FCOE SCSI driver - version: %s\n",
1526 BFAD_DRIVER_VERSION
);
1529 num_sgpgs_parm
= num_sgpgs
;
1531 error
= bfad_im_module_init();
1534 printk(KERN_WARNING
"bfad_im_module_init failure\n");
1538 if (strcmp(FCPI_NAME
, " fcpim") == 0)
1539 supported_fc4s
|= BFA_LPORT_ROLE_FCP_IM
;
1541 bfa_auto_recover
= ioc_auto_recover
;
1542 bfa_fcs_rport_set_del_timeout(rport_del_timeout
);
1544 error
= pci_register_driver(&bfad_pci_driver
);
1546 printk(KERN_WARNING
"pci_register_driver failure\n");
1553 bfad_im_module_exit();
1558 * Driver module exit.
1563 pci_unregister_driver(&bfad_pci_driver
);
1564 bfad_im_module_exit();
1568 /* Firmware handling */
1570 bfad_read_firmware(struct pci_dev
*pdev
, u32
**bfi_image
,
1571 u32
*bfi_image_size
, char *fw_name
)
1573 const struct firmware
*fw
;
1575 if (request_firmware(&fw
, fw_name
, &pdev
->dev
)) {
1576 printk(KERN_ALERT
"Can't locate firmware %s\n", fw_name
);
1581 *bfi_image
= vmalloc(fw
->size
);
1582 if (NULL
== *bfi_image
) {
1583 printk(KERN_ALERT
"Fail to allocate buffer for fw image "
1584 "size=%x!\n", (u32
) fw
->size
);
1588 memcpy(*bfi_image
, fw
->data
, fw
->size
);
1589 *bfi_image_size
= fw
->size
/sizeof(u32
);
1591 release_firmware(fw
);
1595 bfad_load_fwimg(struct pci_dev
*pdev
)
1597 if (pdev
->device
== BFA_PCI_DEVICE_ID_CT_FC
) {
1598 if (bfi_image_ct_fc_size
== 0)
1599 bfad_read_firmware(pdev
, &bfi_image_ct_fc
,
1600 &bfi_image_ct_fc_size
, BFAD_FW_FILE_CT_FC
);
1601 return bfi_image_ct_fc
;
1602 } else if (pdev
->device
== BFA_PCI_DEVICE_ID_CT
) {
1603 if (bfi_image_ct_cna_size
== 0)
1604 bfad_read_firmware(pdev
, &bfi_image_ct_cna
,
1605 &bfi_image_ct_cna_size
, BFAD_FW_FILE_CT_CNA
);
1606 return bfi_image_ct_cna
;
1608 if (bfi_image_cb_fc_size
== 0)
1609 bfad_read_firmware(pdev
, &bfi_image_cb_fc
,
1610 &bfi_image_cb_fc_size
, BFAD_FW_FILE_CB_FC
);
1611 return bfi_image_cb_fc
;
1616 bfad_free_fwimg(void)
1618 if (bfi_image_ct_fc_size
&& bfi_image_ct_fc
)
1619 vfree(bfi_image_ct_fc
);
1620 if (bfi_image_ct_cna_size
&& bfi_image_ct_cna
)
1621 vfree(bfi_image_ct_cna
);
1622 if (bfi_image_cb_fc_size
&& bfi_image_cb_fc
)
1623 vfree(bfi_image_cb_fc
);
1626 module_init(bfad_init
);
1627 module_exit(bfad_exit
);
1628 MODULE_LICENSE("GPL");
1629 MODULE_DESCRIPTION("Brocade Fibre Channel HBA Driver" BFAD_PROTO_NAME
);
1630 MODULE_AUTHOR("Brocade Communications Systems, Inc.");
1631 MODULE_VERSION(BFAD_DRIVER_VERSION
);