2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
20 #include <bfa_fwimg_priv.h>
21 #include <cna/bfa_cna_trcmod.h>
22 #include <cs/bfa_debug.h>
23 #include <bfi/bfi_ioc.h>
24 #include <bfi/bfi_ctreg.h>
25 #include <aen/bfa_aen_ioc.h>
26 #include <aen/bfa_aen.h>
27 #include <log/bfa_log_hal.h>
28 #include <defs/bfa_defs_pci.h>
30 BFA_TRC_FILE(CNA
, IOC
);
33 * IOC local definitions
35 #define BFA_IOC_TOV 2000 /* msecs */
36 #define BFA_IOC_HWSEM_TOV 500 /* msecs */
37 #define BFA_IOC_HB_TOV 500 /* msecs */
38 #define BFA_IOC_HWINIT_MAX 2
39 #define BFA_IOC_FWIMG_MINSZ (16 * 1024)
40 #define BFA_IOC_TOV_RECOVER BFA_IOC_HB_TOV
42 #define bfa_ioc_timer_start(__ioc) \
43 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
44 bfa_ioc_timeout, (__ioc), BFA_IOC_TOV)
45 #define bfa_ioc_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer)
47 #define BFA_DBG_FWTRC_ENTS (BFI_IOC_TRC_ENTS)
48 #define BFA_DBG_FWTRC_LEN \
49 (BFA_DBG_FWTRC_ENTS * sizeof(struct bfa_trc_s) + \
50 (sizeof(struct bfa_trc_mod_s) - \
51 BFA_TRC_MAX * sizeof(struct bfa_trc_s)))
52 #define BFA_DBG_FWTRC_OFF(_fn) (BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn))
55 * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
58 #define bfa_ioc_firmware_lock(__ioc) \
59 ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
60 #define bfa_ioc_firmware_unlock(__ioc) \
61 ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
62 #define bfa_ioc_fwimg_get_chunk(__ioc, __off) \
63 ((__ioc)->ioc_hwif->ioc_fwimg_get_chunk(__ioc, __off))
64 #define bfa_ioc_fwimg_get_size(__ioc) \
65 ((__ioc)->ioc_hwif->ioc_fwimg_get_size(__ioc))
66 #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
67 #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
68 #define bfa_ioc_notify_hbfail(__ioc) \
69 ((__ioc)->ioc_hwif->ioc_notify_hbfail(__ioc))
71 bfa_boolean_t bfa_auto_recover
= BFA_TRUE
;
74 * forward declarations
76 static void bfa_ioc_aen_post(struct bfa_ioc_s
*bfa
,
77 enum bfa_ioc_aen_event event
);
78 static void bfa_ioc_hw_sem_get(struct bfa_ioc_s
*ioc
);
79 static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s
*ioc
);
80 static void bfa_ioc_hwinit(struct bfa_ioc_s
*ioc
, bfa_boolean_t force
);
81 static void bfa_ioc_timeout(void *ioc
);
82 static void bfa_ioc_send_enable(struct bfa_ioc_s
*ioc
);
83 static void bfa_ioc_send_disable(struct bfa_ioc_s
*ioc
);
84 static void bfa_ioc_send_getattr(struct bfa_ioc_s
*ioc
);
85 static void bfa_ioc_hb_monitor(struct bfa_ioc_s
*ioc
);
86 static void bfa_ioc_hb_stop(struct bfa_ioc_s
*ioc
);
87 static void bfa_ioc_reset(struct bfa_ioc_s
*ioc
, bfa_boolean_t force
);
88 static void bfa_ioc_mbox_poll(struct bfa_ioc_s
*ioc
);
89 static void bfa_ioc_mbox_hbfail(struct bfa_ioc_s
*ioc
);
90 static void bfa_ioc_recover(struct bfa_ioc_s
*ioc
);
91 static void bfa_ioc_disable_comp(struct bfa_ioc_s
*ioc
);
92 static void bfa_ioc_lpu_stop(struct bfa_ioc_s
*ioc
);
99 * IOC state machine events
102 IOC_E_ENABLE
= 1, /* IOC enable request */
103 IOC_E_DISABLE
= 2, /* IOC disable request */
104 IOC_E_TIMEOUT
= 3, /* f/w response timeout */
105 IOC_E_FWREADY
= 4, /* f/w initialization done */
106 IOC_E_FWRSP_GETATTR
= 5, /* IOC get attribute response */
107 IOC_E_FWRSP_ENABLE
= 6, /* enable f/w response */
108 IOC_E_FWRSP_DISABLE
= 7, /* disable f/w response */
109 IOC_E_HBFAIL
= 8, /* heartbeat failure */
110 IOC_E_HWERROR
= 9, /* hardware error interrupt */
111 IOC_E_SEMLOCKED
= 10, /* h/w semaphore is locked */
112 IOC_E_DETACH
= 11, /* driver detach cleanup */
115 bfa_fsm_state_decl(bfa_ioc
, reset
, struct bfa_ioc_s
, enum ioc_event
);
116 bfa_fsm_state_decl(bfa_ioc
, fwcheck
, struct bfa_ioc_s
, enum ioc_event
);
117 bfa_fsm_state_decl(bfa_ioc
, mismatch
, struct bfa_ioc_s
, enum ioc_event
);
118 bfa_fsm_state_decl(bfa_ioc
, semwait
, struct bfa_ioc_s
, enum ioc_event
);
119 bfa_fsm_state_decl(bfa_ioc
, hwinit
, struct bfa_ioc_s
, enum ioc_event
);
120 bfa_fsm_state_decl(bfa_ioc
, enabling
, struct bfa_ioc_s
, enum ioc_event
);
121 bfa_fsm_state_decl(bfa_ioc
, getattr
, struct bfa_ioc_s
, enum ioc_event
);
122 bfa_fsm_state_decl(bfa_ioc
, op
, struct bfa_ioc_s
, enum ioc_event
);
123 bfa_fsm_state_decl(bfa_ioc
, initfail
, struct bfa_ioc_s
, enum ioc_event
);
124 bfa_fsm_state_decl(bfa_ioc
, hbfail
, struct bfa_ioc_s
, enum ioc_event
);
125 bfa_fsm_state_decl(bfa_ioc
, disabling
, struct bfa_ioc_s
, enum ioc_event
);
126 bfa_fsm_state_decl(bfa_ioc
, disabled
, struct bfa_ioc_s
, enum ioc_event
);
128 static struct bfa_sm_table_s ioc_sm_table
[] = {
129 {BFA_SM(bfa_ioc_sm_reset
), BFA_IOC_RESET
},
130 {BFA_SM(bfa_ioc_sm_fwcheck
), BFA_IOC_FWMISMATCH
},
131 {BFA_SM(bfa_ioc_sm_mismatch
), BFA_IOC_FWMISMATCH
},
132 {BFA_SM(bfa_ioc_sm_semwait
), BFA_IOC_SEMWAIT
},
133 {BFA_SM(bfa_ioc_sm_hwinit
), BFA_IOC_HWINIT
},
134 {BFA_SM(bfa_ioc_sm_enabling
), BFA_IOC_HWINIT
},
135 {BFA_SM(bfa_ioc_sm_getattr
), BFA_IOC_GETATTR
},
136 {BFA_SM(bfa_ioc_sm_op
), BFA_IOC_OPERATIONAL
},
137 {BFA_SM(bfa_ioc_sm_initfail
), BFA_IOC_INITFAIL
},
138 {BFA_SM(bfa_ioc_sm_hbfail
), BFA_IOC_HBFAIL
},
139 {BFA_SM(bfa_ioc_sm_disabling
), BFA_IOC_DISABLING
},
140 {BFA_SM(bfa_ioc_sm_disabled
), BFA_IOC_DISABLED
},
144 * Reset entry actions -- initialize state machine
147 bfa_ioc_sm_reset_entry(struct bfa_ioc_s
*ioc
)
149 ioc
->retry_count
= 0;
150 ioc
->auto_recover
= bfa_auto_recover
;
154 * Beginning state. IOC is in reset state.
157 bfa_ioc_sm_reset(struct bfa_ioc_s
*ioc
, enum ioc_event event
)
163 bfa_fsm_set_state(ioc
, bfa_ioc_sm_fwcheck
);
167 bfa_ioc_disable_comp(ioc
);
174 bfa_sm_fault(ioc
, event
);
179 * Semaphore should be acquired for version check.
182 bfa_ioc_sm_fwcheck_entry(struct bfa_ioc_s
*ioc
)
184 bfa_ioc_hw_sem_get(ioc
);
188 * Awaiting h/w semaphore to continue with version check.
191 bfa_ioc_sm_fwcheck(struct bfa_ioc_s
*ioc
, enum ioc_event event
)
196 case IOC_E_SEMLOCKED
:
197 if (bfa_ioc_firmware_lock(ioc
)) {
198 ioc
->retry_count
= 0;
199 bfa_fsm_set_state(ioc
, bfa_ioc_sm_hwinit
);
201 bfa_ioc_hw_sem_release(ioc
);
202 bfa_fsm_set_state(ioc
, bfa_ioc_sm_mismatch
);
207 bfa_ioc_disable_comp(ioc
);
213 bfa_ioc_hw_sem_get_cancel(ioc
);
214 bfa_fsm_set_state(ioc
, bfa_ioc_sm_reset
);
221 bfa_sm_fault(ioc
, event
);
226 * Notify enable completion callback and generate mismatch AEN.
229 bfa_ioc_sm_mismatch_entry(struct bfa_ioc_s
*ioc
)
232 * Provide enable completion callback and AEN notification only once.
234 if (ioc
->retry_count
== 0) {
235 ioc
->cbfn
->enable_cbfn(ioc
->bfa
, BFA_STATUS_IOC_FAILURE
);
236 bfa_ioc_aen_post(ioc
, BFA_IOC_AEN_FWMISMATCH
);
239 bfa_ioc_timer_start(ioc
);
243 * Awaiting firmware version match.
246 bfa_ioc_sm_mismatch(struct bfa_ioc_s
*ioc
, enum ioc_event event
)
252 bfa_fsm_set_state(ioc
, bfa_ioc_sm_fwcheck
);
256 bfa_ioc_disable_comp(ioc
);
262 bfa_ioc_timer_stop(ioc
);
263 bfa_fsm_set_state(ioc
, bfa_ioc_sm_reset
);
270 bfa_sm_fault(ioc
, event
);
275 * Request for semaphore.
278 bfa_ioc_sm_semwait_entry(struct bfa_ioc_s
*ioc
)
280 bfa_ioc_hw_sem_get(ioc
);
284 * Awaiting semaphore for h/w initialzation.
287 bfa_ioc_sm_semwait(struct bfa_ioc_s
*ioc
, enum ioc_event event
)
292 case IOC_E_SEMLOCKED
:
293 ioc
->retry_count
= 0;
294 bfa_fsm_set_state(ioc
, bfa_ioc_sm_hwinit
);
298 bfa_ioc_hw_sem_get_cancel(ioc
);
299 bfa_fsm_set_state(ioc
, bfa_ioc_sm_disabled
);
303 bfa_sm_fault(ioc
, event
);
309 bfa_ioc_sm_hwinit_entry(struct bfa_ioc_s
*ioc
)
311 bfa_ioc_timer_start(ioc
);
312 bfa_ioc_reset(ioc
, BFA_FALSE
);
316 * Hardware is being initialized. Interrupts are enabled.
317 * Holding hardware semaphore lock.
320 bfa_ioc_sm_hwinit(struct bfa_ioc_s
*ioc
, enum ioc_event event
)
326 bfa_ioc_timer_stop(ioc
);
327 bfa_fsm_set_state(ioc
, bfa_ioc_sm_enabling
);
331 bfa_ioc_timer_stop(ioc
);
338 if (ioc
->retry_count
< BFA_IOC_HWINIT_MAX
) {
339 bfa_ioc_timer_start(ioc
);
340 bfa_ioc_reset(ioc
, BFA_TRUE
);
344 bfa_ioc_hw_sem_release(ioc
);
345 bfa_fsm_set_state(ioc
, bfa_ioc_sm_initfail
);
349 bfa_ioc_hw_sem_release(ioc
);
350 bfa_ioc_timer_stop(ioc
);
351 bfa_fsm_set_state(ioc
, bfa_ioc_sm_disabled
);
355 bfa_sm_fault(ioc
, event
);
361 bfa_ioc_sm_enabling_entry(struct bfa_ioc_s
*ioc
)
363 bfa_ioc_timer_start(ioc
);
364 bfa_ioc_send_enable(ioc
);
368 * Host IOC function is being enabled, awaiting response from firmware.
369 * Semaphore is acquired.
372 bfa_ioc_sm_enabling(struct bfa_ioc_s
*ioc
, enum ioc_event event
)
377 case IOC_E_FWRSP_ENABLE
:
378 bfa_ioc_timer_stop(ioc
);
379 bfa_ioc_hw_sem_release(ioc
);
380 bfa_fsm_set_state(ioc
, bfa_ioc_sm_getattr
);
384 bfa_ioc_timer_stop(ioc
);
391 if (ioc
->retry_count
< BFA_IOC_HWINIT_MAX
) {
392 bfa_reg_write(ioc
->ioc_regs
.ioc_fwstate
,
394 bfa_fsm_set_state(ioc
, bfa_ioc_sm_hwinit
);
398 bfa_ioc_hw_sem_release(ioc
);
399 bfa_fsm_set_state(ioc
, bfa_ioc_sm_initfail
);
403 bfa_ioc_timer_stop(ioc
);
404 bfa_ioc_hw_sem_release(ioc
);
405 bfa_fsm_set_state(ioc
, bfa_ioc_sm_disabled
);
409 bfa_ioc_send_enable(ioc
);
413 bfa_sm_fault(ioc
, event
);
419 bfa_ioc_sm_getattr_entry(struct bfa_ioc_s
*ioc
)
421 bfa_ioc_timer_start(ioc
);
422 bfa_ioc_send_getattr(ioc
);
426 * IOC configuration in progress. Timer is active.
429 bfa_ioc_sm_getattr(struct bfa_ioc_s
*ioc
, enum ioc_event event
)
434 case IOC_E_FWRSP_GETATTR
:
435 bfa_ioc_timer_stop(ioc
);
436 bfa_fsm_set_state(ioc
, bfa_ioc_sm_op
);
440 bfa_ioc_timer_stop(ioc
);
446 bfa_fsm_set_state(ioc
, bfa_ioc_sm_initfail
);
450 bfa_ioc_timer_stop(ioc
);
451 bfa_fsm_set_state(ioc
, bfa_ioc_sm_disabled
);
455 bfa_sm_fault(ioc
, event
);
461 bfa_ioc_sm_op_entry(struct bfa_ioc_s
*ioc
)
463 ioc
->cbfn
->enable_cbfn(ioc
->bfa
, BFA_STATUS_OK
);
464 bfa_ioc_hb_monitor(ioc
);
465 bfa_ioc_aen_post(ioc
, BFA_IOC_AEN_ENABLE
);
469 bfa_ioc_sm_op(struct bfa_ioc_s
*ioc
, enum ioc_event event
)
478 bfa_ioc_hb_stop(ioc
);
479 bfa_fsm_set_state(ioc
, bfa_ioc_sm_disabling
);
485 * Hard error or IOC recovery by other function.
486 * Treat it same as heartbeat failure.
488 bfa_ioc_hb_stop(ioc
);
490 * !!! fall through !!!
494 bfa_fsm_set_state(ioc
, bfa_ioc_sm_hbfail
);
498 bfa_sm_fault(ioc
, event
);
504 bfa_ioc_sm_disabling_entry(struct bfa_ioc_s
*ioc
)
506 bfa_ioc_aen_post(ioc
, BFA_IOC_AEN_DISABLE
);
507 bfa_ioc_timer_start(ioc
);
508 bfa_ioc_send_disable(ioc
);
512 * IOC is being disabled
515 bfa_ioc_sm_disabling(struct bfa_ioc_s
*ioc
, enum ioc_event event
)
520 case IOC_E_FWRSP_DISABLE
:
521 bfa_ioc_timer_stop(ioc
);
522 bfa_fsm_set_state(ioc
, bfa_ioc_sm_disabled
);
526 bfa_ioc_timer_stop(ioc
);
528 * !!! fall through !!!
532 bfa_reg_write(ioc
->ioc_regs
.ioc_fwstate
, BFI_IOC_FAIL
);
533 bfa_fsm_set_state(ioc
, bfa_ioc_sm_disabled
);
537 bfa_sm_fault(ioc
, event
);
542 * IOC disable completion entry.
545 bfa_ioc_sm_disabled_entry(struct bfa_ioc_s
*ioc
)
547 bfa_ioc_disable_comp(ioc
);
551 bfa_ioc_sm_disabled(struct bfa_ioc_s
*ioc
, enum ioc_event event
)
557 bfa_fsm_set_state(ioc
, bfa_ioc_sm_semwait
);
561 ioc
->cbfn
->disable_cbfn(ioc
->bfa
);
568 bfa_ioc_firmware_unlock(ioc
);
569 bfa_fsm_set_state(ioc
, bfa_ioc_sm_reset
);
573 bfa_sm_fault(ioc
, event
);
579 bfa_ioc_sm_initfail_entry(struct bfa_ioc_s
*ioc
)
581 ioc
->cbfn
->enable_cbfn(ioc
->bfa
, BFA_STATUS_IOC_FAILURE
);
582 bfa_ioc_timer_start(ioc
);
586 * Hardware initialization failed.
589 bfa_ioc_sm_initfail(struct bfa_ioc_s
*ioc
, enum ioc_event event
)
595 bfa_ioc_timer_stop(ioc
);
596 bfa_fsm_set_state(ioc
, bfa_ioc_sm_disabled
);
600 bfa_ioc_timer_stop(ioc
);
601 bfa_ioc_firmware_unlock(ioc
);
602 bfa_fsm_set_state(ioc
, bfa_ioc_sm_reset
);
606 bfa_fsm_set_state(ioc
, bfa_ioc_sm_semwait
);
610 bfa_sm_fault(ioc
, event
);
616 bfa_ioc_sm_hbfail_entry(struct bfa_ioc_s
*ioc
)
618 struct list_head
*qe
;
619 struct bfa_ioc_hbfail_notify_s
*notify
;
622 * Mark IOC as failed in hardware and stop firmware.
624 bfa_ioc_lpu_stop(ioc
);
625 bfa_reg_write(ioc
->ioc_regs
.ioc_fwstate
, BFI_IOC_FAIL
);
628 * Notify other functions on HB failure.
630 bfa_ioc_notify_hbfail(ioc
);
633 * Notify driver and common modules registered for notification.
635 ioc
->cbfn
->hbfail_cbfn(ioc
->bfa
);
636 list_for_each(qe
, &ioc
->hb_notify_q
) {
637 notify
= (struct bfa_ioc_hbfail_notify_s
*)qe
;
638 notify
->cbfn(notify
->cbarg
);
642 * Flush any queued up mailbox requests.
644 bfa_ioc_mbox_hbfail(ioc
);
645 bfa_ioc_aen_post(ioc
, BFA_IOC_AEN_HBFAIL
);
648 * Trigger auto-recovery after a delay.
650 if (ioc
->auto_recover
) {
651 bfa_timer_begin(ioc
->timer_mod
, &ioc
->ioc_timer
,
652 bfa_ioc_timeout
, ioc
, BFA_IOC_TOV_RECOVER
);
657 * IOC heartbeat failure.
660 bfa_ioc_sm_hbfail(struct bfa_ioc_s
*ioc
, enum ioc_event event
)
667 ioc
->cbfn
->enable_cbfn(ioc
->bfa
, BFA_STATUS_IOC_FAILURE
);
671 if (ioc
->auto_recover
)
672 bfa_ioc_timer_stop(ioc
);
673 bfa_fsm_set_state(ioc
, bfa_ioc_sm_disabled
);
677 bfa_fsm_set_state(ioc
, bfa_ioc_sm_semwait
);
682 * Recovery is already initiated by other function.
688 * HB failure notification, ignore.
693 bfa_sm_fault(ioc
, event
);
700 * bfa_ioc_pvt BFA IOC private functions
704 bfa_ioc_disable_comp(struct bfa_ioc_s
*ioc
)
706 struct list_head
*qe
;
707 struct bfa_ioc_hbfail_notify_s
*notify
;
709 ioc
->cbfn
->disable_cbfn(ioc
->bfa
);
712 * Notify common modules registered for notification.
714 list_for_each(qe
, &ioc
->hb_notify_q
) {
715 notify
= (struct bfa_ioc_hbfail_notify_s
*)qe
;
716 notify
->cbfn(notify
->cbarg
);
721 bfa_ioc_sem_timeout(void *ioc_arg
)
723 struct bfa_ioc_s
*ioc
= (struct bfa_ioc_s
*)ioc_arg
;
725 bfa_ioc_hw_sem_get(ioc
);
729 bfa_ioc_sem_get(bfa_os_addr_t sem_reg
)
733 #define BFA_SEM_SPINCNT 3000
735 r32
= bfa_reg_read(sem_reg
);
737 while (r32
&& (cnt
< BFA_SEM_SPINCNT
)) {
740 r32
= bfa_reg_read(sem_reg
);
746 bfa_assert(cnt
< BFA_SEM_SPINCNT
);
751 bfa_ioc_sem_release(bfa_os_addr_t sem_reg
)
753 bfa_reg_write(sem_reg
, 1);
757 bfa_ioc_hw_sem_get(struct bfa_ioc_s
*ioc
)
762 * First read to the semaphore register will return 0, subsequent reads
763 * will return 1. Semaphore is released by writing 1 to the register
765 r32
= bfa_reg_read(ioc
->ioc_regs
.ioc_sem_reg
);
767 bfa_fsm_send_event(ioc
, IOC_E_SEMLOCKED
);
771 bfa_timer_begin(ioc
->timer_mod
, &ioc
->sem_timer
, bfa_ioc_sem_timeout
,
772 ioc
, BFA_IOC_HWSEM_TOV
);
776 bfa_ioc_hw_sem_release(struct bfa_ioc_s
*ioc
)
778 bfa_reg_write(ioc
->ioc_regs
.ioc_sem_reg
, 1);
782 bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s
*ioc
)
784 bfa_timer_stop(&ioc
->sem_timer
);
788 * Initialize LPU local memory (aka secondary memory / SRAM)
791 bfa_ioc_lmem_init(struct bfa_ioc_s
*ioc
)
795 #define PSS_LMEM_INIT_TIME 10000
797 pss_ctl
= bfa_reg_read(ioc
->ioc_regs
.pss_ctl_reg
);
798 pss_ctl
&= ~__PSS_LMEM_RESET
;
799 pss_ctl
|= __PSS_LMEM_INIT_EN
;
800 pss_ctl
|= __PSS_I2C_CLK_DIV(3UL); /* i2c workaround 12.5khz clock */
801 bfa_reg_write(ioc
->ioc_regs
.pss_ctl_reg
, pss_ctl
);
804 * wait for memory initialization to be complete
808 pss_ctl
= bfa_reg_read(ioc
->ioc_regs
.pss_ctl_reg
);
810 } while (!(pss_ctl
& __PSS_LMEM_INIT_DONE
) && (i
< PSS_LMEM_INIT_TIME
));
813 * If memory initialization is not successful, IOC timeout will catch
816 bfa_assert(pss_ctl
& __PSS_LMEM_INIT_DONE
);
817 bfa_trc(ioc
, pss_ctl
);
819 pss_ctl
&= ~(__PSS_LMEM_INIT_DONE
| __PSS_LMEM_INIT_EN
);
820 bfa_reg_write(ioc
->ioc_regs
.pss_ctl_reg
, pss_ctl
);
824 bfa_ioc_lpu_start(struct bfa_ioc_s
*ioc
)
829 * Take processor out of reset.
831 pss_ctl
= bfa_reg_read(ioc
->ioc_regs
.pss_ctl_reg
);
832 pss_ctl
&= ~__PSS_LPU0_RESET
;
834 bfa_reg_write(ioc
->ioc_regs
.pss_ctl_reg
, pss_ctl
);
838 bfa_ioc_lpu_stop(struct bfa_ioc_s
*ioc
)
843 * Put processors in reset.
845 pss_ctl
= bfa_reg_read(ioc
->ioc_regs
.pss_ctl_reg
);
846 pss_ctl
|= (__PSS_LPU0_RESET
| __PSS_LPU1_RESET
);
848 bfa_reg_write(ioc
->ioc_regs
.pss_ctl_reg
, pss_ctl
);
852 * Get driver and firmware versions.
855 bfa_ioc_fwver_get(struct bfa_ioc_s
*ioc
, struct bfi_ioc_image_hdr_s
*fwhdr
)
860 u32
*fwsig
= (u32
*) fwhdr
;
862 pgnum
= bfa_ioc_smem_pgnum(ioc
, loff
);
863 pgoff
= bfa_ioc_smem_pgoff(ioc
, loff
);
864 bfa_reg_write(ioc
->ioc_regs
.host_page_num_fn
, pgnum
);
866 for (i
= 0; i
< (sizeof(struct bfi_ioc_image_hdr_s
) / sizeof(u32
));
868 fwsig
[i
] = bfa_mem_read(ioc
->ioc_regs
.smem_page_start
, loff
);
874 * Returns TRUE if same.
877 bfa_ioc_fwver_cmp(struct bfa_ioc_s
*ioc
, struct bfi_ioc_image_hdr_s
*fwhdr
)
879 struct bfi_ioc_image_hdr_s
*drv_fwhdr
;
883 (struct bfi_ioc_image_hdr_s
*)bfa_ioc_fwimg_get_chunk(ioc
, 0);
885 for (i
= 0; i
< BFI_IOC_MD5SUM_SZ
; i
++) {
886 if (fwhdr
->md5sum
[i
] != drv_fwhdr
->md5sum
[i
]) {
888 bfa_trc(ioc
, fwhdr
->md5sum
[i
]);
889 bfa_trc(ioc
, drv_fwhdr
->md5sum
[i
]);
894 bfa_trc(ioc
, fwhdr
->md5sum
[0]);
899 * Return true if current running version is valid. Firmware signature and
900 * execution context (driver/bios) must match.
903 bfa_ioc_fwver_valid(struct bfa_ioc_s
*ioc
)
905 struct bfi_ioc_image_hdr_s fwhdr
, *drv_fwhdr
;
908 * If bios/efi boot (flash based) -- return true
910 if (bfa_ioc_fwimg_get_size(ioc
) < BFA_IOC_FWIMG_MINSZ
)
913 bfa_ioc_fwver_get(ioc
, &fwhdr
);
915 (struct bfi_ioc_image_hdr_s
*)bfa_ioc_fwimg_get_chunk(ioc
, 0);
917 if (fwhdr
.signature
!= drv_fwhdr
->signature
) {
918 bfa_trc(ioc
, fwhdr
.signature
);
919 bfa_trc(ioc
, drv_fwhdr
->signature
);
923 if (fwhdr
.exec
!= drv_fwhdr
->exec
) {
924 bfa_trc(ioc
, fwhdr
.exec
);
925 bfa_trc(ioc
, drv_fwhdr
->exec
);
929 return bfa_ioc_fwver_cmp(ioc
, &fwhdr
);
933 * Conditionally flush any pending message from firmware at start.
936 bfa_ioc_msgflush(struct bfa_ioc_s
*ioc
)
940 r32
= bfa_reg_read(ioc
->ioc_regs
.lpu_mbox_cmd
);
942 bfa_reg_write(ioc
->ioc_regs
.lpu_mbox_cmd
, 1);
947 bfa_ioc_hwinit(struct bfa_ioc_s
*ioc
, bfa_boolean_t force
)
949 enum bfi_ioc_state ioc_fwstate
;
950 bfa_boolean_t fwvalid
;
952 ioc_fwstate
= bfa_reg_read(ioc
->ioc_regs
.ioc_fwstate
);
955 ioc_fwstate
= BFI_IOC_UNINIT
;
957 bfa_trc(ioc
, ioc_fwstate
);
960 * check if firmware is valid
962 fwvalid
= (ioc_fwstate
== BFI_IOC_UNINIT
) ?
963 BFA_FALSE
: bfa_ioc_fwver_valid(ioc
);
966 bfa_ioc_boot(ioc
, BFI_BOOT_TYPE_NORMAL
, ioc
->pcidev
.device_id
);
971 * If hardware initialization is in progress (initialized by other IOC),
972 * just wait for an initialization completion interrupt.
974 if (ioc_fwstate
== BFI_IOC_INITING
) {
975 bfa_trc(ioc
, ioc_fwstate
);
976 ioc
->cbfn
->reset_cbfn(ioc
->bfa
);
981 * If IOC function is disabled and firmware version is same,
982 * just re-enable IOC.
984 if (ioc_fwstate
== BFI_IOC_DISABLED
|| ioc_fwstate
== BFI_IOC_OP
) {
985 bfa_trc(ioc
, ioc_fwstate
);
988 * When using MSI-X any pending firmware ready event should
989 * be flushed. Otherwise MSI-X interrupts are not delivered.
991 bfa_ioc_msgflush(ioc
);
992 ioc
->cbfn
->reset_cbfn(ioc
->bfa
);
993 bfa_fsm_send_event(ioc
, IOC_E_FWREADY
);
998 * Initialize the h/w for any other states.
1000 bfa_ioc_boot(ioc
, BFI_BOOT_TYPE_NORMAL
, ioc
->pcidev
.device_id
);
1004 bfa_ioc_timeout(void *ioc_arg
)
1006 struct bfa_ioc_s
*ioc
= (struct bfa_ioc_s
*)ioc_arg
;
1009 bfa_fsm_send_event(ioc
, IOC_E_TIMEOUT
);
1013 bfa_ioc_mbox_send(struct bfa_ioc_s
*ioc
, void *ioc_msg
, int len
)
1015 u32
*msgp
= (u32
*) ioc_msg
;
1018 bfa_trc(ioc
, msgp
[0]);
1021 bfa_assert(len
<= BFI_IOC_MSGLEN_MAX
);
1024 * first write msg to mailbox registers
1026 for (i
= 0; i
< len
/ sizeof(u32
); i
++)
1027 bfa_reg_write(ioc
->ioc_regs
.hfn_mbox
+ i
* sizeof(u32
),
1028 bfa_os_wtole(msgp
[i
]));
1030 for (; i
< BFI_IOC_MSGLEN_MAX
/ sizeof(u32
); i
++)
1031 bfa_reg_write(ioc
->ioc_regs
.hfn_mbox
+ i
* sizeof(u32
), 0);
1034 * write 1 to mailbox CMD to trigger LPU event
1036 bfa_reg_write(ioc
->ioc_regs
.hfn_mbox_cmd
, 1);
1037 (void)bfa_reg_read(ioc
->ioc_regs
.hfn_mbox_cmd
);
1041 bfa_ioc_send_enable(struct bfa_ioc_s
*ioc
)
1043 struct bfi_ioc_ctrl_req_s enable_req
;
1045 bfi_h2i_set(enable_req
.mh
, BFI_MC_IOC
, BFI_IOC_H2I_ENABLE_REQ
,
1046 bfa_ioc_portid(ioc
));
1047 enable_req
.ioc_class
= ioc
->ioc_mc
;
1048 bfa_ioc_mbox_send(ioc
, &enable_req
, sizeof(struct bfi_ioc_ctrl_req_s
));
1052 bfa_ioc_send_disable(struct bfa_ioc_s
*ioc
)
1054 struct bfi_ioc_ctrl_req_s disable_req
;
1056 bfi_h2i_set(disable_req
.mh
, BFI_MC_IOC
, BFI_IOC_H2I_DISABLE_REQ
,
1057 bfa_ioc_portid(ioc
));
1058 bfa_ioc_mbox_send(ioc
, &disable_req
, sizeof(struct bfi_ioc_ctrl_req_s
));
1062 bfa_ioc_send_getattr(struct bfa_ioc_s
*ioc
)
1064 struct bfi_ioc_getattr_req_s attr_req
;
1066 bfi_h2i_set(attr_req
.mh
, BFI_MC_IOC
, BFI_IOC_H2I_GETATTR_REQ
,
1067 bfa_ioc_portid(ioc
));
1068 bfa_dma_be_addr_set(attr_req
.attr_addr
, ioc
->attr_dma
.pa
);
1069 bfa_ioc_mbox_send(ioc
, &attr_req
, sizeof(attr_req
));
1073 bfa_ioc_hb_check(void *cbarg
)
1075 struct bfa_ioc_s
*ioc
= cbarg
;
1078 hb_count
= bfa_reg_read(ioc
->ioc_regs
.heartbeat
);
1079 if (ioc
->hb_count
== hb_count
) {
1080 bfa_log(ioc
->logm
, BFA_LOG_HAL_HEARTBEAT_FAILURE
,
1082 bfa_ioc_recover(ioc
);
1085 ioc
->hb_count
= hb_count
;
1088 bfa_ioc_mbox_poll(ioc
);
1089 bfa_timer_begin(ioc
->timer_mod
, &ioc
->ioc_timer
, bfa_ioc_hb_check
,
1090 ioc
, BFA_IOC_HB_TOV
);
1094 bfa_ioc_hb_monitor(struct bfa_ioc_s
*ioc
)
1096 ioc
->hb_count
= bfa_reg_read(ioc
->ioc_regs
.heartbeat
);
1097 bfa_timer_begin(ioc
->timer_mod
, &ioc
->ioc_timer
, bfa_ioc_hb_check
, ioc
,
1102 bfa_ioc_hb_stop(struct bfa_ioc_s
*ioc
)
1104 bfa_timer_stop(&ioc
->ioc_timer
);
1108 * Initiate a full firmware download.
1111 bfa_ioc_download_fw(struct bfa_ioc_s
*ioc
, u32 boot_type
,
1121 * Initialize LMEM first before code download
1123 bfa_ioc_lmem_init(ioc
);
1126 * Flash based firmware boot
1128 bfa_trc(ioc
, bfa_ioc_fwimg_get_size(ioc
));
1129 if (bfa_ioc_fwimg_get_size(ioc
) < BFA_IOC_FWIMG_MINSZ
)
1130 boot_type
= BFI_BOOT_TYPE_FLASH
;
1131 fwimg
= bfa_ioc_fwimg_get_chunk(ioc
, chunkno
);
1133 pgnum
= bfa_ioc_smem_pgnum(ioc
, loff
);
1134 pgoff
= bfa_ioc_smem_pgoff(ioc
, loff
);
1136 bfa_reg_write(ioc
->ioc_regs
.host_page_num_fn
, pgnum
);
1138 for (i
= 0; i
< bfa_ioc_fwimg_get_size(ioc
); i
++) {
1140 if (BFA_IOC_FLASH_CHUNK_NO(i
) != chunkno
) {
1141 chunkno
= BFA_IOC_FLASH_CHUNK_NO(i
);
1142 fwimg
= bfa_ioc_fwimg_get_chunk(ioc
,
1143 BFA_IOC_FLASH_CHUNK_ADDR(chunkno
));
1149 bfa_mem_write(ioc
->ioc_regs
.smem_page_start
, loff
,
1150 fwimg
[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i
)]);
1152 loff
+= sizeof(u32
);
1155 * handle page offset wrap around
1157 loff
= PSS_SMEM_PGOFF(loff
);
1160 bfa_reg_write(ioc
->ioc_regs
.host_page_num_fn
, pgnum
);
1164 bfa_reg_write(ioc
->ioc_regs
.host_page_num_fn
,
1165 bfa_ioc_smem_pgnum(ioc
, 0));
1168 * Set boot type and boot param at the end.
1170 bfa_mem_write(ioc
->ioc_regs
.smem_page_start
, BFI_BOOT_TYPE_OFF
,
1171 bfa_os_swap32(boot_type
));
1172 bfa_mem_write(ioc
->ioc_regs
.smem_page_start
, BFI_BOOT_PARAM_OFF
,
1173 bfa_os_swap32(boot_param
));
1177 bfa_ioc_reset(struct bfa_ioc_s
*ioc
, bfa_boolean_t force
)
1179 bfa_ioc_hwinit(ioc
, force
);
1183 * Update BFA configuration from firmware configuration.
1186 bfa_ioc_getattr_reply(struct bfa_ioc_s
*ioc
)
1188 struct bfi_ioc_attr_s
*attr
= ioc
->attr
;
1190 attr
->adapter_prop
= bfa_os_ntohl(attr
->adapter_prop
);
1191 attr
->maxfrsize
= bfa_os_ntohs(attr
->maxfrsize
);
1193 bfa_fsm_send_event(ioc
, IOC_E_FWRSP_GETATTR
);
1197 * Attach time initialization of mbox logic.
1200 bfa_ioc_mbox_attach(struct bfa_ioc_s
*ioc
)
1202 struct bfa_ioc_mbox_mod_s
*mod
= &ioc
->mbox_mod
;
1205 INIT_LIST_HEAD(&mod
->cmd_q
);
1206 for (mc
= 0; mc
< BFI_MC_MAX
; mc
++) {
1207 mod
->mbhdlr
[mc
].cbfn
= NULL
;
1208 mod
->mbhdlr
[mc
].cbarg
= ioc
->bfa
;
1213 * Mbox poll timer -- restarts any pending mailbox requests.
1216 bfa_ioc_mbox_poll(struct bfa_ioc_s
*ioc
)
1218 struct bfa_ioc_mbox_mod_s
*mod
= &ioc
->mbox_mod
;
1219 struct bfa_mbox_cmd_s
*cmd
;
1223 * If no command pending, do nothing
1225 if (list_empty(&mod
->cmd_q
))
1229 * If previous command is not yet fetched by firmware, do nothing
1231 stat
= bfa_reg_read(ioc
->ioc_regs
.hfn_mbox_cmd
);
1236 * Enqueue command to firmware.
1238 bfa_q_deq(&mod
->cmd_q
, &cmd
);
1239 bfa_ioc_mbox_send(ioc
, cmd
->msg
, sizeof(cmd
->msg
));
1243 * Cleanup any pending requests.
1246 bfa_ioc_mbox_hbfail(struct bfa_ioc_s
*ioc
)
1248 struct bfa_ioc_mbox_mod_s
*mod
= &ioc
->mbox_mod
;
1249 struct bfa_mbox_cmd_s
*cmd
;
1251 while (!list_empty(&mod
->cmd_q
))
1252 bfa_q_deq(&mod
->cmd_q
, &cmd
);
1260 * Interface used by diag module to do firmware boot with memory test
1261 * as the entry vector.
1264 bfa_ioc_boot(struct bfa_ioc_s
*ioc
, u32 boot_type
, u32 boot_param
)
1268 bfa_ioc_stats(ioc
, ioc_boots
);
1270 if (bfa_ioc_pll_init(ioc
) != BFA_STATUS_OK
)
1274 * Initialize IOC state of all functions on a chip reset.
1276 rb
= ioc
->pcidev
.pci_bar_kva
;
1277 if (boot_param
== BFI_BOOT_TYPE_MEMTEST
) {
1278 bfa_reg_write((rb
+ BFA_IOC0_STATE_REG
), BFI_IOC_MEMTEST
);
1279 bfa_reg_write((rb
+ BFA_IOC1_STATE_REG
), BFI_IOC_MEMTEST
);
1281 bfa_reg_write((rb
+ BFA_IOC0_STATE_REG
), BFI_IOC_INITING
);
1282 bfa_reg_write((rb
+ BFA_IOC1_STATE_REG
), BFI_IOC_INITING
);
1285 bfa_ioc_download_fw(ioc
, boot_type
, boot_param
);
1288 * Enable interrupts just before starting LPU
1290 ioc
->cbfn
->reset_cbfn(ioc
->bfa
);
1291 bfa_ioc_lpu_start(ioc
);
1295 * Enable/disable IOC failure auto recovery.
1298 bfa_ioc_auto_recover(bfa_boolean_t auto_recover
)
1300 bfa_auto_recover
= auto_recover
;
1305 bfa_ioc_is_operational(struct bfa_ioc_s
*ioc
)
1307 return bfa_fsm_cmp_state(ioc
, bfa_ioc_sm_op
);
1311 bfa_ioc_msgget(struct bfa_ioc_s
*ioc
, void *mbmsg
)
1320 for (i
= 0; i
< (sizeof(union bfi_ioc_i2h_msg_u
) / sizeof(u32
));
1322 r32
= bfa_reg_read(ioc
->ioc_regs
.lpu_mbox
+
1324 msgp
[i
] = bfa_os_htonl(r32
);
1328 * turn off mailbox interrupt by clearing mailbox status
1330 bfa_reg_write(ioc
->ioc_regs
.lpu_mbox_cmd
, 1);
1331 bfa_reg_read(ioc
->ioc_regs
.lpu_mbox_cmd
);
1335 bfa_ioc_isr(struct bfa_ioc_s
*ioc
, struct bfi_mbmsg_s
*m
)
1337 union bfi_ioc_i2h_msg_u
*msg
;
1339 msg
= (union bfi_ioc_i2h_msg_u
*)m
;
1341 bfa_ioc_stats(ioc
, ioc_isrs
);
1343 switch (msg
->mh
.msg_id
) {
1344 case BFI_IOC_I2H_HBEAT
:
1347 case BFI_IOC_I2H_READY_EVENT
:
1348 bfa_fsm_send_event(ioc
, IOC_E_FWREADY
);
1351 case BFI_IOC_I2H_ENABLE_REPLY
:
1352 bfa_fsm_send_event(ioc
, IOC_E_FWRSP_ENABLE
);
1355 case BFI_IOC_I2H_DISABLE_REPLY
:
1356 bfa_fsm_send_event(ioc
, IOC_E_FWRSP_DISABLE
);
1359 case BFI_IOC_I2H_GETATTR_REPLY
:
1360 bfa_ioc_getattr_reply(ioc
);
1364 bfa_trc(ioc
, msg
->mh
.msg_id
);
1370 * IOC attach time initialization and setup.
1372 * @param[in] ioc memory for IOC
1373 * @param[in] bfa driver instance structure
1374 * @param[in] trcmod kernel trace module
1375 * @param[in] aen kernel aen event module
1376 * @param[in] logm kernel logging module
1379 bfa_ioc_attach(struct bfa_ioc_s
*ioc
, void *bfa
, struct bfa_ioc_cbfn_s
*cbfn
,
1380 struct bfa_timer_mod_s
*timer_mod
, struct bfa_trc_mod_s
*trcmod
,
1381 struct bfa_aen_s
*aen
, struct bfa_log_mod_s
*logm
)
1385 ioc
->timer_mod
= timer_mod
;
1386 ioc
->trcmod
= trcmod
;
1389 ioc
->fcmode
= BFA_FALSE
;
1390 ioc
->pllinit
= BFA_FALSE
;
1391 ioc
->dbg_fwsave_once
= BFA_TRUE
;
1393 bfa_ioc_mbox_attach(ioc
);
1394 INIT_LIST_HEAD(&ioc
->hb_notify_q
);
1396 bfa_fsm_set_state(ioc
, bfa_ioc_sm_reset
);
1400 * Driver detach time IOC cleanup.
1403 bfa_ioc_detach(struct bfa_ioc_s
*ioc
)
1405 bfa_fsm_send_event(ioc
, IOC_E_DETACH
);
1409 * Setup IOC PCI properties.
1411 * @param[in] pcidev PCI device information for this IOC
1414 bfa_ioc_pci_init(struct bfa_ioc_s
*ioc
, struct bfa_pcidev_s
*pcidev
,
1418 ioc
->pcidev
= *pcidev
;
1419 ioc
->ctdev
= (ioc
->pcidev
.device_id
== BFA_PCI_DEVICE_ID_CT
);
1420 ioc
->cna
= ioc
->ctdev
&& !ioc
->fcmode
;
1423 * Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c
1426 bfa_ioc_set_ct_hwif(ioc
);
1428 bfa_ioc_set_cb_hwif(ioc
);
1430 bfa_ioc_map_port(ioc
);
1431 bfa_ioc_reg_init(ioc
);
1435 * Initialize IOC dma memory
1437 * @param[in] dm_kva kernel virtual address of IOC dma memory
1438 * @param[in] dm_pa physical address of IOC dma memory
1441 bfa_ioc_mem_claim(struct bfa_ioc_s
*ioc
, u8
*dm_kva
, u64 dm_pa
)
1444 * dma memory for firmware attribute
1446 ioc
->attr_dma
.kva
= dm_kva
;
1447 ioc
->attr_dma
.pa
= dm_pa
;
1448 ioc
->attr
= (struct bfi_ioc_attr_s
*)dm_kva
;
1452 * Return size of dma memory required.
1455 bfa_ioc_meminfo(void)
1457 return BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s
), BFA_DMA_ALIGN_SZ
);
1461 bfa_ioc_enable(struct bfa_ioc_s
*ioc
)
1463 bfa_ioc_stats(ioc
, ioc_enables
);
1464 ioc
->dbg_fwsave_once
= BFA_TRUE
;
1466 bfa_fsm_send_event(ioc
, IOC_E_ENABLE
);
1470 bfa_ioc_disable(struct bfa_ioc_s
*ioc
)
1472 bfa_ioc_stats(ioc
, ioc_disables
);
1473 bfa_fsm_send_event(ioc
, IOC_E_DISABLE
);
1477 * Returns memory required for saving firmware trace in case of crash.
1478 * Driver must call this interface to allocate memory required for
1479 * automatic saving of firmware trace. Driver should call
1480 * bfa_ioc_debug_memclaim() right after bfa_ioc_attach() to setup this
1484 bfa_ioc_debug_trcsz(bfa_boolean_t auto_recover
)
1486 return (auto_recover
) ? BFA_DBG_FWTRC_LEN
: 0;
1490 * Initialize memory for saving firmware trace. Driver must initialize
1491 * trace memory before call bfa_ioc_enable().
1494 bfa_ioc_debug_memclaim(struct bfa_ioc_s
*ioc
, void *dbg_fwsave
)
1496 ioc
->dbg_fwsave
= dbg_fwsave
;
1497 ioc
->dbg_fwsave_len
= bfa_ioc_debug_trcsz(ioc
->auto_recover
);
1501 bfa_ioc_smem_pgnum(struct bfa_ioc_s
*ioc
, u32 fmaddr
)
1503 return PSS_SMEM_PGNUM(ioc
->ioc_regs
.smem_pg0
, fmaddr
);
1507 bfa_ioc_smem_pgoff(struct bfa_ioc_s
*ioc
, u32 fmaddr
)
1509 return PSS_SMEM_PGOFF(fmaddr
);
1513 * Register mailbox message handler functions
1515 * @param[in] ioc IOC instance
1516 * @param[in] mcfuncs message class handler functions
1519 bfa_ioc_mbox_register(struct bfa_ioc_s
*ioc
, bfa_ioc_mbox_mcfunc_t
*mcfuncs
)
1521 struct bfa_ioc_mbox_mod_s
*mod
= &ioc
->mbox_mod
;
1524 for (mc
= 0; mc
< BFI_MC_MAX
; mc
++)
1525 mod
->mbhdlr
[mc
].cbfn
= mcfuncs
[mc
];
1529 * Register mailbox message handler function, to be called by common modules
1532 bfa_ioc_mbox_regisr(struct bfa_ioc_s
*ioc
, enum bfi_mclass mc
,
1533 bfa_ioc_mbox_mcfunc_t cbfn
, void *cbarg
)
1535 struct bfa_ioc_mbox_mod_s
*mod
= &ioc
->mbox_mod
;
1537 mod
->mbhdlr
[mc
].cbfn
= cbfn
;
1538 mod
->mbhdlr
[mc
].cbarg
= cbarg
;
1542 * Queue a mailbox command request to firmware. Waits if mailbox is busy.
1543 * Responsibility of caller to serialize
1545 * @param[in] ioc IOC instance
1546 * @param[i] cmd Mailbox command
1549 bfa_ioc_mbox_queue(struct bfa_ioc_s
*ioc
, struct bfa_mbox_cmd_s
*cmd
)
1551 struct bfa_ioc_mbox_mod_s
*mod
= &ioc
->mbox_mod
;
1555 * If a previous command is pending, queue new command
1557 if (!list_empty(&mod
->cmd_q
)) {
1558 list_add_tail(&cmd
->qe
, &mod
->cmd_q
);
1563 * If mailbox is busy, queue command for poll timer
1565 stat
= bfa_reg_read(ioc
->ioc_regs
.hfn_mbox_cmd
);
1567 list_add_tail(&cmd
->qe
, &mod
->cmd_q
);
1572 * mailbox is free -- queue command to firmware
1574 bfa_ioc_mbox_send(ioc
, cmd
->msg
, sizeof(cmd
->msg
));
1578 * Handle mailbox interrupts
1581 bfa_ioc_mbox_isr(struct bfa_ioc_s
*ioc
)
1583 struct bfa_ioc_mbox_mod_s
*mod
= &ioc
->mbox_mod
;
1584 struct bfi_mbmsg_s m
;
1587 bfa_ioc_msgget(ioc
, &m
);
1590 * Treat IOC message class as special.
1592 mc
= m
.mh
.msg_class
;
1593 if (mc
== BFI_MC_IOC
) {
1594 bfa_ioc_isr(ioc
, &m
);
1598 if ((mc
> BFI_MC_MAX
) || (mod
->mbhdlr
[mc
].cbfn
== NULL
))
1601 mod
->mbhdlr
[mc
].cbfn(mod
->mbhdlr
[mc
].cbarg
, &m
);
1605 bfa_ioc_error_isr(struct bfa_ioc_s
*ioc
)
1607 bfa_fsm_send_event(ioc
, IOC_E_HWERROR
);
1610 #ifndef BFA_BIOS_BUILD
1613 * return true if IOC is disabled
1616 bfa_ioc_is_disabled(struct bfa_ioc_s
*ioc
)
1618 return bfa_fsm_cmp_state(ioc
, bfa_ioc_sm_disabling
)
1619 || bfa_fsm_cmp_state(ioc
, bfa_ioc_sm_disabled
);
1623 * return true if IOC firmware is different.
1626 bfa_ioc_fw_mismatch(struct bfa_ioc_s
*ioc
)
1628 return bfa_fsm_cmp_state(ioc
, bfa_ioc_sm_reset
)
1629 || bfa_fsm_cmp_state(ioc
, bfa_ioc_sm_fwcheck
)
1630 || bfa_fsm_cmp_state(ioc
, bfa_ioc_sm_mismatch
);
1633 #define bfa_ioc_state_disabled(__sm) \
1634 (((__sm) == BFI_IOC_UNINIT) || \
1635 ((__sm) == BFI_IOC_INITING) || \
1636 ((__sm) == BFI_IOC_HWINIT) || \
1637 ((__sm) == BFI_IOC_DISABLED) || \
1638 ((__sm) == BFI_IOC_FAIL) || \
1639 ((__sm) == BFI_IOC_CFG_DISABLED))
1642 * Check if adapter is disabled -- both IOCs should be in a disabled
1646 bfa_ioc_adapter_is_disabled(struct bfa_ioc_s
*ioc
)
1649 bfa_os_addr_t rb
= ioc
->pcidev
.pci_bar_kva
;
1651 if (!bfa_fsm_cmp_state(ioc
, bfa_ioc_sm_disabled
))
1654 ioc_state
= bfa_reg_read(rb
+ BFA_IOC0_STATE_REG
);
1655 if (!bfa_ioc_state_disabled(ioc_state
))
1658 ioc_state
= bfa_reg_read(rb
+ BFA_IOC1_STATE_REG
);
1659 if (!bfa_ioc_state_disabled(ioc_state
))
1666 * Add to IOC heartbeat failure notification queue. To be used by common
1670 bfa_ioc_hbfail_register(struct bfa_ioc_s
*ioc
,
1671 struct bfa_ioc_hbfail_notify_s
*notify
)
1673 list_add_tail(¬ify
->qe
, &ioc
->hb_notify_q
);
1676 #define BFA_MFG_NAME "Brocade"
1678 bfa_ioc_get_adapter_attr(struct bfa_ioc_s
*ioc
,
1679 struct bfa_adapter_attr_s
*ad_attr
)
1681 struct bfi_ioc_attr_s
*ioc_attr
;
1683 ioc_attr
= ioc
->attr
;
1685 bfa_ioc_get_adapter_serial_num(ioc
, ad_attr
->serial_num
);
1686 bfa_ioc_get_adapter_fw_ver(ioc
, ad_attr
->fw_ver
);
1687 bfa_ioc_get_adapter_optrom_ver(ioc
, ad_attr
->optrom_ver
);
1688 bfa_ioc_get_adapter_manufacturer(ioc
, ad_attr
->manufacturer
);
1689 bfa_os_memcpy(&ad_attr
->vpd
, &ioc_attr
->vpd
,
1690 sizeof(struct bfa_mfg_vpd_s
));
1692 ad_attr
->nports
= bfa_ioc_get_nports(ioc
);
1693 ad_attr
->max_speed
= bfa_ioc_speed_sup(ioc
);
1695 bfa_ioc_get_adapter_model(ioc
, ad_attr
->model
);
1696 /* For now, model descr uses same model string */
1697 bfa_ioc_get_adapter_model(ioc
, ad_attr
->model_descr
);
1699 if (BFI_ADAPTER_IS_SPECIAL(ioc_attr
->adapter_prop
))
1700 ad_attr
->prototype
= 1;
1702 ad_attr
->prototype
= 0;
1704 ad_attr
->pwwn
= bfa_ioc_get_pwwn(ioc
);
1705 ad_attr
->mac
= bfa_ioc_get_mac(ioc
);
1707 ad_attr
->pcie_gen
= ioc_attr
->pcie_gen
;
1708 ad_attr
->pcie_lanes
= ioc_attr
->pcie_lanes
;
1709 ad_attr
->pcie_lanes_orig
= ioc_attr
->pcie_lanes_orig
;
1710 ad_attr
->asic_rev
= ioc_attr
->asic_rev
;
1712 bfa_ioc_get_pci_chip_rev(ioc
, ad_attr
->hw_ver
);
1714 ad_attr
->cna_capable
= ioc
->cna
;
1718 bfa_ioc_get_type(struct bfa_ioc_s
*ioc
)
1720 if (!ioc
->ctdev
|| ioc
->fcmode
)
1721 return BFA_IOC_TYPE_FC
;
1722 else if (ioc
->ioc_mc
== BFI_MC_IOCFC
)
1723 return BFA_IOC_TYPE_FCoE
;
1724 else if (ioc
->ioc_mc
== BFI_MC_LL
)
1725 return BFA_IOC_TYPE_LL
;
1727 bfa_assert(ioc
->ioc_mc
== BFI_MC_LL
);
1728 return BFA_IOC_TYPE_LL
;
1733 bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s
*ioc
, char *serial_num
)
1735 bfa_os_memset((void *)serial_num
, 0, BFA_ADAPTER_SERIAL_NUM_LEN
);
1736 bfa_os_memcpy((void *)serial_num
,
1737 (void *)ioc
->attr
->brcd_serialnum
,
1738 BFA_ADAPTER_SERIAL_NUM_LEN
);
1742 bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s
*ioc
, char *fw_ver
)
1744 bfa_os_memset((void *)fw_ver
, 0, BFA_VERSION_LEN
);
1745 bfa_os_memcpy(fw_ver
, ioc
->attr
->fw_version
, BFA_VERSION_LEN
);
1749 bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s
*ioc
, char *chip_rev
)
1751 bfa_assert(chip_rev
);
1753 bfa_os_memset((void *)chip_rev
, 0, BFA_IOC_CHIP_REV_LEN
);
1759 chip_rev
[4] = ioc
->attr
->asic_rev
;
1764 bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s
*ioc
, char *optrom_ver
)
1766 bfa_os_memset((void *)optrom_ver
, 0, BFA_VERSION_LEN
);
1767 bfa_os_memcpy(optrom_ver
, ioc
->attr
->optrom_version
,
1772 bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s
*ioc
, char *manufacturer
)
1774 bfa_os_memset((void *)manufacturer
, 0, BFA_ADAPTER_MFG_NAME_LEN
);
1775 bfa_os_memcpy(manufacturer
, BFA_MFG_NAME
, BFA_ADAPTER_MFG_NAME_LEN
);
1779 bfa_ioc_get_adapter_model(struct bfa_ioc_s
*ioc
, char *model
)
1781 struct bfi_ioc_attr_s
*ioc_attr
;
1786 bfa_os_memset((void *)model
, 0, BFA_ADAPTER_MODEL_NAME_LEN
);
1788 ioc_attr
= ioc
->attr
;
1790 nports
= bfa_ioc_get_nports(ioc
);
1791 max_speed
= bfa_ioc_speed_sup(ioc
);
1796 if (max_speed
== 10) {
1797 strcpy(model
, "BR-10?0");
1798 model
[5] = '0' + nports
;
1800 strcpy(model
, "Brocade-??5");
1801 model
[8] = '0' + max_speed
;
1802 model
[9] = '0' + nports
;
1807 bfa_ioc_get_state(struct bfa_ioc_s
*ioc
)
1809 return bfa_sm_to_state(ioc_sm_table
, ioc
->fsm
);
1813 bfa_ioc_get_attr(struct bfa_ioc_s
*ioc
, struct bfa_ioc_attr_s
*ioc_attr
)
1815 bfa_os_memset((void *)ioc_attr
, 0, sizeof(struct bfa_ioc_attr_s
));
1817 ioc_attr
->state
= bfa_ioc_get_state(ioc
);
1818 ioc_attr
->port_id
= ioc
->port_id
;
1820 ioc_attr
->ioc_type
= bfa_ioc_get_type(ioc
);
1822 bfa_ioc_get_adapter_attr(ioc
, &ioc_attr
->adapter_attr
);
1824 ioc_attr
->pci_attr
.device_id
= ioc
->pcidev
.device_id
;
1825 ioc_attr
->pci_attr
.pcifn
= ioc
->pcidev
.pci_func
;
1826 bfa_ioc_get_pci_chip_rev(ioc
, ioc_attr
->pci_attr
.chip_rev
);
1833 bfa_ioc_get_pwwn(struct bfa_ioc_s
*ioc
)
1837 u8 byte
[sizeof(wwn_t
)];
1841 w
.wwn
= ioc
->attr
->mfg_wwn
;
1843 if (bfa_ioc_portid(ioc
) == 1)
1850 bfa_ioc_get_nwwn(struct bfa_ioc_s
*ioc
)
1854 u8 byte
[sizeof(wwn_t
)];
1858 w
.wwn
= ioc
->attr
->mfg_wwn
;
1860 if (bfa_ioc_portid(ioc
) == 1)
1869 bfa_ioc_get_wwn_naa5(struct bfa_ioc_s
*ioc
, u16 inst
)
1873 u8 byte
[sizeof(wwn_t
)];
1879 w
.wwn
= ioc
->attr
->mfg_wwn
;
1880 w5
.byte
[0] = 0x50 | w
.byte
[2] >> 4;
1881 w5
.byte
[1] = w
.byte
[2] << 4 | w
.byte
[3] >> 4;
1882 w5
.byte
[2] = w
.byte
[3] << 4 | w
.byte
[4] >> 4;
1883 w5
.byte
[3] = w
.byte
[4] << 4 | w
.byte
[5] >> 4;
1884 w5
.byte
[4] = w
.byte
[5] << 4 | w
.byte
[6] >> 4;
1885 w5
.byte
[5] = w
.byte
[6] << 4 | w
.byte
[7] >> 4;
1886 w5
.byte
[6] = w
.byte
[7] << 4 | (inst
& 0x0f00) >> 8;
1887 w5
.byte
[7] = (inst
& 0xff);
1893 bfa_ioc_get_adid(struct bfa_ioc_s
*ioc
)
1895 return ioc
->attr
->mfg_wwn
;
1899 bfa_ioc_get_mac(struct bfa_ioc_s
*ioc
)
1903 mac
= ioc
->attr
->mfg_mac
;
1904 mac
.mac
[MAC_ADDRLEN
- 1] += bfa_ioc_pcifn(ioc
);
1910 bfa_ioc_set_fcmode(struct bfa_ioc_s
*ioc
)
1912 ioc
->fcmode
= BFA_TRUE
;
1913 ioc
->port_id
= bfa_ioc_pcifn(ioc
);
1917 bfa_ioc_get_fcmode(struct bfa_ioc_s
*ioc
)
1919 return ioc
->fcmode
|| (ioc
->pcidev
.device_id
!= BFA_PCI_DEVICE_ID_CT
);
1923 * Send AEN notification
1926 bfa_ioc_aen_post(struct bfa_ioc_s
*ioc
, enum bfa_ioc_aen_event event
)
1928 union bfa_aen_data_u aen_data
;
1929 struct bfa_log_mod_s
*logmod
= ioc
->logm
;
1931 enum bfa_ioc_type_e ioc_type
;
1933 bfa_log(logmod
, BFA_LOG_CREATE_ID(BFA_AEN_CAT_IOC
, event
), inst_num
);
1935 memset(&aen_data
.ioc
.pwwn
, 0, sizeof(aen_data
.ioc
.pwwn
));
1936 memset(&aen_data
.ioc
.mac
, 0, sizeof(aen_data
.ioc
.mac
));
1937 ioc_type
= bfa_ioc_get_type(ioc
);
1939 case BFA_IOC_TYPE_FC
:
1940 aen_data
.ioc
.pwwn
= bfa_ioc_get_pwwn(ioc
);
1942 case BFA_IOC_TYPE_FCoE
:
1943 aen_data
.ioc
.pwwn
= bfa_ioc_get_pwwn(ioc
);
1944 aen_data
.ioc
.mac
= bfa_ioc_get_mac(ioc
);
1946 case BFA_IOC_TYPE_LL
:
1947 aen_data
.ioc
.mac
= bfa_ioc_get_mac(ioc
);
1950 bfa_assert(ioc_type
== BFA_IOC_TYPE_FC
);
1953 aen_data
.ioc
.ioc_type
= ioc_type
;
1957 * Retrieve saved firmware trace from a prior IOC failure.
1960 bfa_ioc_debug_fwsave(struct bfa_ioc_s
*ioc
, void *trcdata
, int *trclen
)
1964 if (ioc
->dbg_fwsave_len
== 0)
1965 return BFA_STATUS_ENOFSAVE
;
1968 if (tlen
> ioc
->dbg_fwsave_len
)
1969 tlen
= ioc
->dbg_fwsave_len
;
1971 bfa_os_memcpy(trcdata
, ioc
->dbg_fwsave
, tlen
);
1973 return BFA_STATUS_OK
;
1977 * Clear saved firmware trace
1980 bfa_ioc_debug_fwsave_clear(struct bfa_ioc_s
*ioc
)
1982 ioc
->dbg_fwsave_once
= BFA_TRUE
;
1986 * Retrieve saved firmware trace from a prior IOC failure.
1989 bfa_ioc_debug_fwtrc(struct bfa_ioc_s
*ioc
, void *trcdata
, int *trclen
)
1992 u32 loff
= BFA_DBG_FWTRC_OFF(bfa_ioc_portid(ioc
));
1994 u32
*tbuf
= trcdata
, r32
;
1996 bfa_trc(ioc
, *trclen
);
1998 pgnum
= bfa_ioc_smem_pgnum(ioc
, loff
);
1999 loff
= bfa_ioc_smem_pgoff(ioc
, loff
);
2002 * Hold semaphore to serialize pll init and fwtrc.
2004 if (BFA_FALSE
== bfa_ioc_sem_get(ioc
->ioc_regs
.ioc_init_sem_reg
))
2005 return BFA_STATUS_FAILED
;
2007 bfa_reg_write(ioc
->ioc_regs
.host_page_num_fn
, pgnum
);
2010 if (tlen
> BFA_DBG_FWTRC_LEN
)
2011 tlen
= BFA_DBG_FWTRC_LEN
;
2012 tlen
/= sizeof(u32
);
2016 for (i
= 0; i
< tlen
; i
++) {
2017 r32
= bfa_mem_read(ioc
->ioc_regs
.smem_page_start
, loff
);
2018 tbuf
[i
] = bfa_os_ntohl(r32
);
2019 loff
+= sizeof(u32
);
2022 * handle page offset wrap around
2024 loff
= PSS_SMEM_PGOFF(loff
);
2027 bfa_reg_write(ioc
->ioc_regs
.host_page_num_fn
, pgnum
);
2030 bfa_reg_write(ioc
->ioc_regs
.host_page_num_fn
,
2031 bfa_ioc_smem_pgnum(ioc
, 0));
2034 * release semaphore.
2036 bfa_ioc_sem_release(ioc
->ioc_regs
.ioc_init_sem_reg
);
2038 bfa_trc(ioc
, pgnum
);
2040 *trclen
= tlen
* sizeof(u32
);
2041 return BFA_STATUS_OK
;
2045 * Save firmware trace if configured.
2048 bfa_ioc_debug_save(struct bfa_ioc_s
*ioc
)
2052 if (ioc
->dbg_fwsave_len
) {
2053 tlen
= ioc
->dbg_fwsave_len
;
2054 bfa_ioc_debug_fwtrc(ioc
, ioc
->dbg_fwsave
, &tlen
);
2059 * Firmware failure detected. Start recovery actions.
2062 bfa_ioc_recover(struct bfa_ioc_s
*ioc
)
2064 if (ioc
->dbg_fwsave_once
) {
2065 ioc
->dbg_fwsave_once
= BFA_FALSE
;
2066 bfa_ioc_debug_save(ioc
);
2069 bfa_ioc_stats(ioc
, ioc_hbfails
);
2070 bfa_fsm_send_event(ioc
, IOC_E_HBFAIL
);
2076 bfa_ioc_aen_post(struct bfa_ioc_s
*ioc
, enum bfa_ioc_aen_event event
)
2081 bfa_ioc_recover(struct bfa_ioc_s
*ioc
)