2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
23 #include "bfa_defs_svc.h"
25 BFA_TRC_FILE(CNA
, IOC
);
28 * IOC local definitions
30 #define BFA_IOC_TOV 3000 /* msecs */
31 #define BFA_IOC_HWSEM_TOV 500 /* msecs */
32 #define BFA_IOC_HB_TOV 500 /* msecs */
33 #define BFA_IOC_TOV_RECOVER BFA_IOC_HB_TOV
34 #define BFA_IOC_POLL_TOV BFA_TIMER_FREQ
36 #define bfa_ioc_timer_start(__ioc) \
37 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
38 bfa_ioc_timeout, (__ioc), BFA_IOC_TOV)
39 #define bfa_ioc_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer)
41 #define bfa_hb_timer_start(__ioc) \
42 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->hb_timer, \
43 bfa_ioc_hb_check, (__ioc), BFA_IOC_HB_TOV)
44 #define bfa_hb_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->hb_timer)
46 #define BFA_DBG_FWTRC_OFF(_fn) (BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn))
49 * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
52 #define bfa_ioc_firmware_lock(__ioc) \
53 ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
54 #define bfa_ioc_firmware_unlock(__ioc) \
55 ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
56 #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
57 #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
58 #define bfa_ioc_notify_fail(__ioc) \
59 ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
60 #define bfa_ioc_sync_start(__ioc) \
61 ((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
62 #define bfa_ioc_sync_join(__ioc) \
63 ((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
64 #define bfa_ioc_sync_leave(__ioc) \
65 ((__ioc)->ioc_hwif->ioc_sync_leave(__ioc))
66 #define bfa_ioc_sync_ack(__ioc) \
67 ((__ioc)->ioc_hwif->ioc_sync_ack(__ioc))
68 #define bfa_ioc_sync_complete(__ioc) \
69 ((__ioc)->ioc_hwif->ioc_sync_complete(__ioc))
71 #define bfa_ioc_mbox_cmd_pending(__ioc) \
72 (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
73 readl((__ioc)->ioc_regs.hfn_mbox_cmd))
75 bfa_boolean_t bfa_auto_recover
= BFA_TRUE
;
78 * forward declarations
80 static void bfa_ioc_hw_sem_get(struct bfa_ioc_s
*ioc
);
81 static void bfa_ioc_hwinit(struct bfa_ioc_s
*ioc
, bfa_boolean_t force
);
82 static void bfa_ioc_timeout(void *ioc
);
83 static void bfa_ioc_poll_fwinit(struct bfa_ioc_s
*ioc
);
84 static void bfa_ioc_send_enable(struct bfa_ioc_s
*ioc
);
85 static void bfa_ioc_send_disable(struct bfa_ioc_s
*ioc
);
86 static void bfa_ioc_send_getattr(struct bfa_ioc_s
*ioc
);
87 static void bfa_ioc_hb_monitor(struct bfa_ioc_s
*ioc
);
88 static void bfa_ioc_mbox_poll(struct bfa_ioc_s
*ioc
);
89 static void bfa_ioc_mbox_flush(struct bfa_ioc_s
*ioc
);
90 static void bfa_ioc_recover(struct bfa_ioc_s
*ioc
);
91 static void bfa_ioc_event_notify(struct bfa_ioc_s
*ioc
,
92 enum bfa_ioc_event_e event
);
93 static void bfa_ioc_disable_comp(struct bfa_ioc_s
*ioc
);
94 static void bfa_ioc_lpu_stop(struct bfa_ioc_s
*ioc
);
95 static void bfa_ioc_fail_notify(struct bfa_ioc_s
*ioc
);
96 static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s
*ioc
);
99 * IOC state machine definitions/declarations
102 IOC_E_RESET
= 1, /* IOC reset request */
103 IOC_E_ENABLE
= 2, /* IOC enable request */
104 IOC_E_DISABLE
= 3, /* IOC disable request */
105 IOC_E_DETACH
= 4, /* driver detach cleanup */
106 IOC_E_ENABLED
= 5, /* f/w enabled */
107 IOC_E_FWRSP_GETATTR
= 6, /* IOC get attribute response */
108 IOC_E_DISABLED
= 7, /* f/w disabled */
109 IOC_E_PFFAILED
= 8, /* failure notice by iocpf sm */
110 IOC_E_HBFAIL
= 9, /* heartbeat failure */
111 IOC_E_HWERROR
= 10, /* hardware error interrupt */
112 IOC_E_TIMEOUT
= 11, /* timeout */
113 IOC_E_HWFAILED
= 12, /* PCI mapping failure notice */
116 bfa_fsm_state_decl(bfa_ioc
, uninit
, struct bfa_ioc_s
, enum ioc_event
);
117 bfa_fsm_state_decl(bfa_ioc
, reset
, struct bfa_ioc_s
, enum ioc_event
);
118 bfa_fsm_state_decl(bfa_ioc
, enabling
, struct bfa_ioc_s
, enum ioc_event
);
119 bfa_fsm_state_decl(bfa_ioc
, getattr
, struct bfa_ioc_s
, enum ioc_event
);
120 bfa_fsm_state_decl(bfa_ioc
, op
, struct bfa_ioc_s
, enum ioc_event
);
121 bfa_fsm_state_decl(bfa_ioc
, fail_retry
, struct bfa_ioc_s
, enum ioc_event
);
122 bfa_fsm_state_decl(bfa_ioc
, fail
, struct bfa_ioc_s
, enum ioc_event
);
123 bfa_fsm_state_decl(bfa_ioc
, disabling
, struct bfa_ioc_s
, enum ioc_event
);
124 bfa_fsm_state_decl(bfa_ioc
, disabled
, struct bfa_ioc_s
, enum ioc_event
);
125 bfa_fsm_state_decl(bfa_ioc
, hwfail
, struct bfa_ioc_s
, enum ioc_event
);
127 static struct bfa_sm_table_s ioc_sm_table
[] = {
128 {BFA_SM(bfa_ioc_sm_uninit
), BFA_IOC_UNINIT
},
129 {BFA_SM(bfa_ioc_sm_reset
), BFA_IOC_RESET
},
130 {BFA_SM(bfa_ioc_sm_enabling
), BFA_IOC_ENABLING
},
131 {BFA_SM(bfa_ioc_sm_getattr
), BFA_IOC_GETATTR
},
132 {BFA_SM(bfa_ioc_sm_op
), BFA_IOC_OPERATIONAL
},
133 {BFA_SM(bfa_ioc_sm_fail_retry
), BFA_IOC_INITFAIL
},
134 {BFA_SM(bfa_ioc_sm_fail
), BFA_IOC_FAIL
},
135 {BFA_SM(bfa_ioc_sm_disabling
), BFA_IOC_DISABLING
},
136 {BFA_SM(bfa_ioc_sm_disabled
), BFA_IOC_DISABLED
},
137 {BFA_SM(bfa_ioc_sm_hwfail
), BFA_IOC_HWFAIL
},
141 * IOCPF state machine definitions/declarations
144 #define bfa_iocpf_timer_start(__ioc) \
145 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
146 bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV)
147 #define bfa_iocpf_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer)
149 #define bfa_iocpf_poll_timer_start(__ioc) \
150 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
151 bfa_iocpf_poll_timeout, (__ioc), BFA_IOC_POLL_TOV)
153 #define bfa_sem_timer_start(__ioc) \
154 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->sem_timer, \
155 bfa_iocpf_sem_timeout, (__ioc), BFA_IOC_HWSEM_TOV)
156 #define bfa_sem_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->sem_timer)
159 * Forward declareations for iocpf state machine
161 static void bfa_iocpf_timeout(void *ioc_arg
);
162 static void bfa_iocpf_sem_timeout(void *ioc_arg
);
163 static void bfa_iocpf_poll_timeout(void *ioc_arg
);
166 * IOCPF state machine events
169 IOCPF_E_ENABLE
= 1, /* IOCPF enable request */
170 IOCPF_E_DISABLE
= 2, /* IOCPF disable request */
171 IOCPF_E_STOP
= 3, /* stop on driver detach */
172 IOCPF_E_FWREADY
= 4, /* f/w initialization done */
173 IOCPF_E_FWRSP_ENABLE
= 5, /* enable f/w response */
174 IOCPF_E_FWRSP_DISABLE
= 6, /* disable f/w response */
175 IOCPF_E_FAIL
= 7, /* failure notice by ioc sm */
176 IOCPF_E_INITFAIL
= 8, /* init fail notice by ioc sm */
177 IOCPF_E_GETATTRFAIL
= 9, /* init fail notice by ioc sm */
178 IOCPF_E_SEMLOCKED
= 10, /* h/w semaphore is locked */
179 IOCPF_E_TIMEOUT
= 11, /* f/w response timeout */
180 IOCPF_E_SEM_ERROR
= 12, /* h/w sem mapping error */
186 enum bfa_iocpf_state
{
187 BFA_IOCPF_RESET
= 1, /* IOC is in reset state */
188 BFA_IOCPF_SEMWAIT
= 2, /* Waiting for IOC h/w semaphore */
189 BFA_IOCPF_HWINIT
= 3, /* IOC h/w is being initialized */
190 BFA_IOCPF_READY
= 4, /* IOCPF is initialized */
191 BFA_IOCPF_INITFAIL
= 5, /* IOCPF failed */
192 BFA_IOCPF_FAIL
= 6, /* IOCPF failed */
193 BFA_IOCPF_DISABLING
= 7, /* IOCPF is being disabled */
194 BFA_IOCPF_DISABLED
= 8, /* IOCPF is disabled */
195 BFA_IOCPF_FWMISMATCH
= 9, /* IOC f/w different from drivers */
198 bfa_fsm_state_decl(bfa_iocpf
, reset
, struct bfa_iocpf_s
, enum iocpf_event
);
199 bfa_fsm_state_decl(bfa_iocpf
, fwcheck
, struct bfa_iocpf_s
, enum iocpf_event
);
200 bfa_fsm_state_decl(bfa_iocpf
, mismatch
, struct bfa_iocpf_s
, enum iocpf_event
);
201 bfa_fsm_state_decl(bfa_iocpf
, semwait
, struct bfa_iocpf_s
, enum iocpf_event
);
202 bfa_fsm_state_decl(bfa_iocpf
, hwinit
, struct bfa_iocpf_s
, enum iocpf_event
);
203 bfa_fsm_state_decl(bfa_iocpf
, enabling
, struct bfa_iocpf_s
, enum iocpf_event
);
204 bfa_fsm_state_decl(bfa_iocpf
, ready
, struct bfa_iocpf_s
, enum iocpf_event
);
205 bfa_fsm_state_decl(bfa_iocpf
, initfail_sync
, struct bfa_iocpf_s
,
207 bfa_fsm_state_decl(bfa_iocpf
, initfail
, struct bfa_iocpf_s
, enum iocpf_event
);
208 bfa_fsm_state_decl(bfa_iocpf
, fail_sync
, struct bfa_iocpf_s
, enum iocpf_event
);
209 bfa_fsm_state_decl(bfa_iocpf
, fail
, struct bfa_iocpf_s
, enum iocpf_event
);
210 bfa_fsm_state_decl(bfa_iocpf
, disabling
, struct bfa_iocpf_s
, enum iocpf_event
);
211 bfa_fsm_state_decl(bfa_iocpf
, disabling_sync
, struct bfa_iocpf_s
,
213 bfa_fsm_state_decl(bfa_iocpf
, disabled
, struct bfa_iocpf_s
, enum iocpf_event
);
215 static struct bfa_sm_table_s iocpf_sm_table
[] = {
216 {BFA_SM(bfa_iocpf_sm_reset
), BFA_IOCPF_RESET
},
217 {BFA_SM(bfa_iocpf_sm_fwcheck
), BFA_IOCPF_FWMISMATCH
},
218 {BFA_SM(bfa_iocpf_sm_mismatch
), BFA_IOCPF_FWMISMATCH
},
219 {BFA_SM(bfa_iocpf_sm_semwait
), BFA_IOCPF_SEMWAIT
},
220 {BFA_SM(bfa_iocpf_sm_hwinit
), BFA_IOCPF_HWINIT
},
221 {BFA_SM(bfa_iocpf_sm_enabling
), BFA_IOCPF_HWINIT
},
222 {BFA_SM(bfa_iocpf_sm_ready
), BFA_IOCPF_READY
},
223 {BFA_SM(bfa_iocpf_sm_initfail_sync
), BFA_IOCPF_INITFAIL
},
224 {BFA_SM(bfa_iocpf_sm_initfail
), BFA_IOCPF_INITFAIL
},
225 {BFA_SM(bfa_iocpf_sm_fail_sync
), BFA_IOCPF_FAIL
},
226 {BFA_SM(bfa_iocpf_sm_fail
), BFA_IOCPF_FAIL
},
227 {BFA_SM(bfa_iocpf_sm_disabling
), BFA_IOCPF_DISABLING
},
228 {BFA_SM(bfa_iocpf_sm_disabling_sync
), BFA_IOCPF_DISABLING
},
229 {BFA_SM(bfa_iocpf_sm_disabled
), BFA_IOCPF_DISABLED
},
237 * Beginning state. IOC uninit state.
241 bfa_ioc_sm_uninit_entry(struct bfa_ioc_s
*ioc
)
246 * IOC is in uninit state.
249 bfa_ioc_sm_uninit(struct bfa_ioc_s
*ioc
, enum ioc_event event
)
255 bfa_fsm_set_state(ioc
, bfa_ioc_sm_reset
);
259 bfa_sm_fault(ioc
, event
);
263 * Reset entry actions -- initialize state machine
266 bfa_ioc_sm_reset_entry(struct bfa_ioc_s
*ioc
)
268 bfa_fsm_set_state(&ioc
->iocpf
, bfa_iocpf_sm_reset
);
272 * IOC is in reset state.
275 bfa_ioc_sm_reset(struct bfa_ioc_s
*ioc
, enum ioc_event event
)
281 bfa_fsm_set_state(ioc
, bfa_ioc_sm_enabling
);
285 bfa_ioc_disable_comp(ioc
);
289 bfa_fsm_set_state(ioc
, bfa_ioc_sm_uninit
);
293 bfa_sm_fault(ioc
, event
);
299 bfa_ioc_sm_enabling_entry(struct bfa_ioc_s
*ioc
)
301 bfa_fsm_send_event(&ioc
->iocpf
, IOCPF_E_ENABLE
);
305 * Host IOC function is being enabled, awaiting response from firmware.
306 * Semaphore is acquired.
309 bfa_ioc_sm_enabling(struct bfa_ioc_s
*ioc
, enum ioc_event event
)
315 bfa_fsm_set_state(ioc
, bfa_ioc_sm_getattr
);
319 /* !!! fall through !!! */
321 ioc
->cbfn
->enable_cbfn(ioc
->bfa
, BFA_STATUS_IOC_FAILURE
);
322 bfa_fsm_set_state(ioc
, bfa_ioc_sm_fail
);
323 if (event
!= IOC_E_PFFAILED
)
324 bfa_fsm_send_event(&ioc
->iocpf
, IOCPF_E_INITFAIL
);
328 ioc
->cbfn
->enable_cbfn(ioc
->bfa
, BFA_STATUS_IOC_FAILURE
);
329 bfa_fsm_set_state(ioc
, bfa_ioc_sm_hwfail
);
333 bfa_fsm_set_state(ioc
, bfa_ioc_sm_disabling
);
337 bfa_fsm_set_state(ioc
, bfa_ioc_sm_uninit
);
338 bfa_fsm_send_event(&ioc
->iocpf
, IOCPF_E_STOP
);
345 bfa_sm_fault(ioc
, event
);
351 bfa_ioc_sm_getattr_entry(struct bfa_ioc_s
*ioc
)
353 bfa_ioc_timer_start(ioc
);
354 bfa_ioc_send_getattr(ioc
);
358 * IOC configuration in progress. Timer is active.
361 bfa_ioc_sm_getattr(struct bfa_ioc_s
*ioc
, enum ioc_event event
)
366 case IOC_E_FWRSP_GETATTR
:
367 bfa_ioc_timer_stop(ioc
);
368 bfa_fsm_set_state(ioc
, bfa_ioc_sm_op
);
373 bfa_ioc_timer_stop(ioc
);
374 /* !!! fall through !!! */
376 ioc
->cbfn
->enable_cbfn(ioc
->bfa
, BFA_STATUS_IOC_FAILURE
);
377 bfa_fsm_set_state(ioc
, bfa_ioc_sm_fail
);
378 if (event
!= IOC_E_PFFAILED
)
379 bfa_fsm_send_event(&ioc
->iocpf
, IOCPF_E_GETATTRFAIL
);
383 bfa_ioc_timer_stop(ioc
);
384 bfa_fsm_set_state(ioc
, bfa_ioc_sm_disabling
);
391 bfa_sm_fault(ioc
, event
);
396 bfa_ioc_sm_op_entry(struct bfa_ioc_s
*ioc
)
398 struct bfad_s
*bfad
= (struct bfad_s
*)ioc
->bfa
->bfad
;
400 ioc
->cbfn
->enable_cbfn(ioc
->bfa
, BFA_STATUS_OK
);
401 bfa_ioc_event_notify(ioc
, BFA_IOC_E_ENABLED
);
402 bfa_ioc_hb_monitor(ioc
);
403 BFA_LOG(KERN_INFO
, bfad
, bfa_log_level
, "IOC enabled\n");
404 bfa_ioc_aen_post(ioc
, BFA_IOC_AEN_ENABLE
);
408 bfa_ioc_sm_op(struct bfa_ioc_s
*ioc
, enum ioc_event event
)
417 bfa_hb_timer_stop(ioc
);
418 bfa_fsm_set_state(ioc
, bfa_ioc_sm_disabling
);
423 bfa_hb_timer_stop(ioc
);
424 /* !!! fall through !!! */
426 if (ioc
->iocpf
.auto_recover
)
427 bfa_fsm_set_state(ioc
, bfa_ioc_sm_fail_retry
);
429 bfa_fsm_set_state(ioc
, bfa_ioc_sm_fail
);
431 bfa_ioc_fail_notify(ioc
);
433 if (event
!= IOC_E_PFFAILED
)
434 bfa_fsm_send_event(&ioc
->iocpf
, IOCPF_E_FAIL
);
438 bfa_sm_fault(ioc
, event
);
444 bfa_ioc_sm_disabling_entry(struct bfa_ioc_s
*ioc
)
446 struct bfad_s
*bfad
= (struct bfad_s
*)ioc
->bfa
->bfad
;
447 bfa_fsm_send_event(&ioc
->iocpf
, IOCPF_E_DISABLE
);
448 BFA_LOG(KERN_INFO
, bfad
, bfa_log_level
, "IOC disabled\n");
449 bfa_ioc_aen_post(ioc
, BFA_IOC_AEN_DISABLE
);
453 * IOC is being disabled
456 bfa_ioc_sm_disabling(struct bfa_ioc_s
*ioc
, enum ioc_event event
)
462 bfa_fsm_set_state(ioc
, bfa_ioc_sm_disabled
);
467 * No state change. Will move to disabled state
468 * after iocpf sm completes failure processing and
469 * moves to disabled state.
471 bfa_fsm_send_event(&ioc
->iocpf
, IOCPF_E_FAIL
);
475 bfa_fsm_set_state(ioc
, bfa_ioc_sm_hwfail
);
476 bfa_ioc_disable_comp(ioc
);
480 bfa_sm_fault(ioc
, event
);
485 * IOC disable completion entry.
488 bfa_ioc_sm_disabled_entry(struct bfa_ioc_s
*ioc
)
490 bfa_ioc_disable_comp(ioc
);
494 bfa_ioc_sm_disabled(struct bfa_ioc_s
*ioc
, enum ioc_event event
)
500 bfa_fsm_set_state(ioc
, bfa_ioc_sm_enabling
);
504 ioc
->cbfn
->disable_cbfn(ioc
->bfa
);
508 bfa_fsm_set_state(ioc
, bfa_ioc_sm_uninit
);
509 bfa_fsm_send_event(&ioc
->iocpf
, IOCPF_E_STOP
);
513 bfa_sm_fault(ioc
, event
);
519 bfa_ioc_sm_fail_retry_entry(struct bfa_ioc_s
*ioc
)
525 * Hardware initialization retry.
528 bfa_ioc_sm_fail_retry(struct bfa_ioc_s
*ioc
, enum ioc_event event
)
534 bfa_fsm_set_state(ioc
, bfa_ioc_sm_getattr
);
540 * Initialization retry failed.
542 ioc
->cbfn
->enable_cbfn(ioc
->bfa
, BFA_STATUS_IOC_FAILURE
);
543 bfa_fsm_set_state(ioc
, bfa_ioc_sm_fail
);
544 if (event
!= IOC_E_PFFAILED
)
545 bfa_fsm_send_event(&ioc
->iocpf
, IOCPF_E_INITFAIL
);
549 ioc
->cbfn
->enable_cbfn(ioc
->bfa
, BFA_STATUS_IOC_FAILURE
);
550 bfa_fsm_set_state(ioc
, bfa_ioc_sm_hwfail
);
557 bfa_fsm_set_state(ioc
, bfa_ioc_sm_disabling
);
561 bfa_fsm_set_state(ioc
, bfa_ioc_sm_uninit
);
562 bfa_fsm_send_event(&ioc
->iocpf
, IOCPF_E_STOP
);
566 bfa_sm_fault(ioc
, event
);
572 bfa_ioc_sm_fail_entry(struct bfa_ioc_s
*ioc
)
581 bfa_ioc_sm_fail(struct bfa_ioc_s
*ioc
, enum ioc_event event
)
588 ioc
->cbfn
->enable_cbfn(ioc
->bfa
, BFA_STATUS_IOC_FAILURE
);
592 bfa_fsm_set_state(ioc
, bfa_ioc_sm_disabling
);
596 bfa_fsm_set_state(ioc
, bfa_ioc_sm_uninit
);
597 bfa_fsm_send_event(&ioc
->iocpf
, IOCPF_E_STOP
);
603 * HB failure / HW error notification, ignore.
607 bfa_sm_fault(ioc
, event
);
612 bfa_ioc_sm_hwfail_entry(struct bfa_ioc_s
*ioc
)
618 bfa_ioc_sm_hwfail(struct bfa_ioc_s
*ioc
, enum ioc_event event
)
624 ioc
->cbfn
->enable_cbfn(ioc
->bfa
, BFA_STATUS_IOC_FAILURE
);
628 ioc
->cbfn
->disable_cbfn(ioc
->bfa
);
632 bfa_fsm_set_state(ioc
, bfa_ioc_sm_uninit
);
636 /* Ignore - already in hwfail state */
640 bfa_sm_fault(ioc
, event
);
645 * IOCPF State Machine
649 * Reset entry actions -- initialize state machine
652 bfa_iocpf_sm_reset_entry(struct bfa_iocpf_s
*iocpf
)
654 iocpf
->fw_mismatch_notified
= BFA_FALSE
;
655 iocpf
->auto_recover
= bfa_auto_recover
;
659 * Beginning state. IOC is in reset state.
662 bfa_iocpf_sm_reset(struct bfa_iocpf_s
*iocpf
, enum iocpf_event event
)
664 struct bfa_ioc_s
*ioc
= iocpf
->ioc
;
670 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_fwcheck
);
677 bfa_sm_fault(ioc
, event
);
682 * Semaphore should be acquired for version check.
685 bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s
*iocpf
)
687 struct bfi_ioc_image_hdr_s fwhdr
;
688 u32 r32
, fwstate
, pgnum
, pgoff
, loff
= 0;
692 * Spin on init semaphore to serialize.
694 r32
= readl(iocpf
->ioc
->ioc_regs
.ioc_init_sem_reg
);
697 r32
= readl(iocpf
->ioc
->ioc_regs
.ioc_init_sem_reg
);
701 fwstate
= readl(iocpf
->ioc
->ioc_regs
.ioc_fwstate
);
702 if (fwstate
== BFI_IOC_UNINIT
) {
703 writel(1, iocpf
->ioc
->ioc_regs
.ioc_init_sem_reg
);
707 bfa_ioc_fwver_get(iocpf
->ioc
, &fwhdr
);
709 if (swab32(fwhdr
.exec
) == BFI_FWBOOT_TYPE_NORMAL
) {
710 writel(1, iocpf
->ioc
->ioc_regs
.ioc_init_sem_reg
);
717 pgnum
= PSS_SMEM_PGNUM(iocpf
->ioc
->ioc_regs
.smem_pg0
, loff
);
718 pgoff
= PSS_SMEM_PGOFF(loff
);
719 writel(pgnum
, iocpf
->ioc
->ioc_regs
.host_page_num_fn
);
721 for (i
= 0; i
< sizeof(struct bfi_ioc_image_hdr_s
) / sizeof(u32
); i
++) {
722 bfa_mem_write(iocpf
->ioc
->ioc_regs
.smem_page_start
, loff
, 0);
726 bfa_trc(iocpf
->ioc
, fwstate
);
727 bfa_trc(iocpf
->ioc
, swab32(fwhdr
.exec
));
728 writel(BFI_IOC_UNINIT
, iocpf
->ioc
->ioc_regs
.ioc_fwstate
);
729 writel(BFI_IOC_UNINIT
, iocpf
->ioc
->ioc_regs
.alt_ioc_fwstate
);
732 * Unlock the hw semaphore. Should be here only once per boot.
734 bfa_ioc_ownership_reset(iocpf
->ioc
);
737 * unlock init semaphore.
739 writel(1, iocpf
->ioc
->ioc_regs
.ioc_init_sem_reg
);
742 bfa_ioc_hw_sem_get(iocpf
->ioc
);
746 * Awaiting h/w semaphore to continue with version check.
749 bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s
*iocpf
, enum iocpf_event event
)
751 struct bfa_ioc_s
*ioc
= iocpf
->ioc
;
756 case IOCPF_E_SEMLOCKED
:
757 if (bfa_ioc_firmware_lock(ioc
)) {
758 if (bfa_ioc_sync_start(ioc
)) {
759 bfa_ioc_sync_join(ioc
);
760 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_hwinit
);
762 bfa_ioc_firmware_unlock(ioc
);
763 writel(1, ioc
->ioc_regs
.ioc_sem_reg
);
764 bfa_sem_timer_start(ioc
);
767 writel(1, ioc
->ioc_regs
.ioc_sem_reg
);
768 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_mismatch
);
772 case IOCPF_E_SEM_ERROR
:
773 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_fail
);
774 bfa_fsm_send_event(ioc
, IOC_E_HWFAILED
);
777 case IOCPF_E_DISABLE
:
778 bfa_sem_timer_stop(ioc
);
779 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_reset
);
780 bfa_fsm_send_event(ioc
, IOC_E_DISABLED
);
784 bfa_sem_timer_stop(ioc
);
785 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_reset
);
789 bfa_sm_fault(ioc
, event
);
794 * Notify enable completion callback.
797 bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf_s
*iocpf
)
800 * Call only the first time sm enters fwmismatch state.
802 if (iocpf
->fw_mismatch_notified
== BFA_FALSE
)
803 bfa_ioc_pf_fwmismatch(iocpf
->ioc
);
805 iocpf
->fw_mismatch_notified
= BFA_TRUE
;
806 bfa_iocpf_timer_start(iocpf
->ioc
);
810 * Awaiting firmware version match.
813 bfa_iocpf_sm_mismatch(struct bfa_iocpf_s
*iocpf
, enum iocpf_event event
)
815 struct bfa_ioc_s
*ioc
= iocpf
->ioc
;
820 case IOCPF_E_TIMEOUT
:
821 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_fwcheck
);
824 case IOCPF_E_DISABLE
:
825 bfa_iocpf_timer_stop(ioc
);
826 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_reset
);
827 bfa_fsm_send_event(ioc
, IOC_E_DISABLED
);
831 bfa_iocpf_timer_stop(ioc
);
832 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_reset
);
836 bfa_sm_fault(ioc
, event
);
841 * Request for semaphore.
844 bfa_iocpf_sm_semwait_entry(struct bfa_iocpf_s
*iocpf
)
846 bfa_ioc_hw_sem_get(iocpf
->ioc
);
850 * Awaiting semaphore for h/w initialzation.
853 bfa_iocpf_sm_semwait(struct bfa_iocpf_s
*iocpf
, enum iocpf_event event
)
855 struct bfa_ioc_s
*ioc
= iocpf
->ioc
;
860 case IOCPF_E_SEMLOCKED
:
861 if (bfa_ioc_sync_complete(ioc
)) {
862 bfa_ioc_sync_join(ioc
);
863 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_hwinit
);
865 writel(1, ioc
->ioc_regs
.ioc_sem_reg
);
866 bfa_sem_timer_start(ioc
);
870 case IOCPF_E_SEM_ERROR
:
871 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_fail
);
872 bfa_fsm_send_event(ioc
, IOC_E_HWFAILED
);
875 case IOCPF_E_DISABLE
:
876 bfa_sem_timer_stop(ioc
);
877 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_disabling_sync
);
881 bfa_sm_fault(ioc
, event
);
886 bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf_s
*iocpf
)
888 iocpf
->poll_time
= 0;
889 bfa_ioc_hwinit(iocpf
->ioc
, BFA_FALSE
);
893 * Hardware is being initialized. Interrupts are enabled.
894 * Holding hardware semaphore lock.
897 bfa_iocpf_sm_hwinit(struct bfa_iocpf_s
*iocpf
, enum iocpf_event event
)
899 struct bfa_ioc_s
*ioc
= iocpf
->ioc
;
904 case IOCPF_E_FWREADY
:
905 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_enabling
);
908 case IOCPF_E_TIMEOUT
:
909 writel(1, ioc
->ioc_regs
.ioc_sem_reg
);
910 bfa_fsm_send_event(ioc
, IOC_E_PFFAILED
);
911 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_initfail_sync
);
914 case IOCPF_E_DISABLE
:
915 bfa_iocpf_timer_stop(ioc
);
916 bfa_ioc_sync_leave(ioc
);
917 writel(1, ioc
->ioc_regs
.ioc_sem_reg
);
918 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_disabled
);
922 bfa_sm_fault(ioc
, event
);
927 bfa_iocpf_sm_enabling_entry(struct bfa_iocpf_s
*iocpf
)
929 bfa_iocpf_timer_start(iocpf
->ioc
);
931 * Enable Interrupts before sending fw IOC ENABLE cmd.
933 iocpf
->ioc
->cbfn
->reset_cbfn(iocpf
->ioc
->bfa
);
934 bfa_ioc_send_enable(iocpf
->ioc
);
938 * Host IOC function is being enabled, awaiting response from firmware.
939 * Semaphore is acquired.
942 bfa_iocpf_sm_enabling(struct bfa_iocpf_s
*iocpf
, enum iocpf_event event
)
944 struct bfa_ioc_s
*ioc
= iocpf
->ioc
;
949 case IOCPF_E_FWRSP_ENABLE
:
950 bfa_iocpf_timer_stop(ioc
);
951 writel(1, ioc
->ioc_regs
.ioc_sem_reg
);
952 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_ready
);
955 case IOCPF_E_INITFAIL
:
956 bfa_iocpf_timer_stop(ioc
);
958 * !!! fall through !!!
961 case IOCPF_E_TIMEOUT
:
962 writel(1, ioc
->ioc_regs
.ioc_sem_reg
);
963 if (event
== IOCPF_E_TIMEOUT
)
964 bfa_fsm_send_event(ioc
, IOC_E_PFFAILED
);
965 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_initfail_sync
);
968 case IOCPF_E_DISABLE
:
969 bfa_iocpf_timer_stop(ioc
);
970 writel(1, ioc
->ioc_regs
.ioc_sem_reg
);
971 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_disabling
);
975 bfa_sm_fault(ioc
, event
);
980 bfa_iocpf_sm_ready_entry(struct bfa_iocpf_s
*iocpf
)
982 bfa_fsm_send_event(iocpf
->ioc
, IOC_E_ENABLED
);
986 bfa_iocpf_sm_ready(struct bfa_iocpf_s
*iocpf
, enum iocpf_event event
)
988 struct bfa_ioc_s
*ioc
= iocpf
->ioc
;
993 case IOCPF_E_DISABLE
:
994 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_disabling
);
997 case IOCPF_E_GETATTRFAIL
:
998 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_initfail_sync
);
1002 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_fail_sync
);
1006 bfa_sm_fault(ioc
, event
);
1011 bfa_iocpf_sm_disabling_entry(struct bfa_iocpf_s
*iocpf
)
1013 bfa_iocpf_timer_start(iocpf
->ioc
);
1014 bfa_ioc_send_disable(iocpf
->ioc
);
1018 * IOC is being disabled
1021 bfa_iocpf_sm_disabling(struct bfa_iocpf_s
*iocpf
, enum iocpf_event event
)
1023 struct bfa_ioc_s
*ioc
= iocpf
->ioc
;
1025 bfa_trc(ioc
, event
);
1028 case IOCPF_E_FWRSP_DISABLE
:
1029 bfa_iocpf_timer_stop(ioc
);
1030 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_disabling_sync
);
1034 bfa_iocpf_timer_stop(ioc
);
1036 * !!! fall through !!!
1039 case IOCPF_E_TIMEOUT
:
1040 writel(BFI_IOC_FAIL
, ioc
->ioc_regs
.ioc_fwstate
);
1041 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_disabling_sync
);
1044 case IOCPF_E_FWRSP_ENABLE
:
1048 bfa_sm_fault(ioc
, event
);
1053 bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf_s
*iocpf
)
1055 bfa_ioc_hw_sem_get(iocpf
->ioc
);
1059 * IOC hb ack request is being removed.
1062 bfa_iocpf_sm_disabling_sync(struct bfa_iocpf_s
*iocpf
, enum iocpf_event event
)
1064 struct bfa_ioc_s
*ioc
= iocpf
->ioc
;
1066 bfa_trc(ioc
, event
);
1069 case IOCPF_E_SEMLOCKED
:
1070 bfa_ioc_sync_leave(ioc
);
1071 writel(1, ioc
->ioc_regs
.ioc_sem_reg
);
1072 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_disabled
);
1075 case IOCPF_E_SEM_ERROR
:
1076 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_fail
);
1077 bfa_fsm_send_event(ioc
, IOC_E_HWFAILED
);
1084 bfa_sm_fault(ioc
, event
);
1089 * IOC disable completion entry.
1092 bfa_iocpf_sm_disabled_entry(struct bfa_iocpf_s
*iocpf
)
1094 bfa_ioc_mbox_flush(iocpf
->ioc
);
1095 bfa_fsm_send_event(iocpf
->ioc
, IOC_E_DISABLED
);
1099 bfa_iocpf_sm_disabled(struct bfa_iocpf_s
*iocpf
, enum iocpf_event event
)
1101 struct bfa_ioc_s
*ioc
= iocpf
->ioc
;
1103 bfa_trc(ioc
, event
);
1106 case IOCPF_E_ENABLE
:
1107 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_semwait
);
1111 bfa_ioc_firmware_unlock(ioc
);
1112 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_reset
);
1116 bfa_sm_fault(ioc
, event
);
1121 bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf_s
*iocpf
)
1123 bfa_ioc_debug_save_ftrc(iocpf
->ioc
);
1124 bfa_ioc_hw_sem_get(iocpf
->ioc
);
1128 * Hardware initialization failed.
1131 bfa_iocpf_sm_initfail_sync(struct bfa_iocpf_s
*iocpf
, enum iocpf_event event
)
1133 struct bfa_ioc_s
*ioc
= iocpf
->ioc
;
1135 bfa_trc(ioc
, event
);
1138 case IOCPF_E_SEMLOCKED
:
1139 bfa_ioc_notify_fail(ioc
);
1140 bfa_ioc_sync_leave(ioc
);
1141 writel(BFI_IOC_FAIL
, ioc
->ioc_regs
.ioc_fwstate
);
1142 writel(1, ioc
->ioc_regs
.ioc_sem_reg
);
1143 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_initfail
);
1146 case IOCPF_E_SEM_ERROR
:
1147 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_fail
);
1148 bfa_fsm_send_event(ioc
, IOC_E_HWFAILED
);
1151 case IOCPF_E_DISABLE
:
1152 bfa_sem_timer_stop(ioc
);
1153 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_disabling_sync
);
1157 bfa_sem_timer_stop(ioc
);
1158 bfa_ioc_firmware_unlock(ioc
);
1159 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_reset
);
1166 bfa_sm_fault(ioc
, event
);
1171 bfa_iocpf_sm_initfail_entry(struct bfa_iocpf_s
*iocpf
)
1173 bfa_trc(iocpf
->ioc
, 0);
1177 * Hardware initialization failed.
1180 bfa_iocpf_sm_initfail(struct bfa_iocpf_s
*iocpf
, enum iocpf_event event
)
1182 struct bfa_ioc_s
*ioc
= iocpf
->ioc
;
1184 bfa_trc(ioc
, event
);
1187 case IOCPF_E_DISABLE
:
1188 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_disabled
);
1192 bfa_ioc_firmware_unlock(ioc
);
1193 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_reset
);
1197 bfa_sm_fault(ioc
, event
);
1202 bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf_s
*iocpf
)
1205 * Mark IOC as failed in hardware and stop firmware.
1207 bfa_ioc_lpu_stop(iocpf
->ioc
);
1210 * Flush any queued up mailbox requests.
1212 bfa_ioc_mbox_flush(iocpf
->ioc
);
1214 bfa_ioc_hw_sem_get(iocpf
->ioc
);
1218 bfa_iocpf_sm_fail_sync(struct bfa_iocpf_s
*iocpf
, enum iocpf_event event
)
1220 struct bfa_ioc_s
*ioc
= iocpf
->ioc
;
1222 bfa_trc(ioc
, event
);
1225 case IOCPF_E_SEMLOCKED
:
1226 bfa_ioc_sync_ack(ioc
);
1227 bfa_ioc_notify_fail(ioc
);
1228 if (!iocpf
->auto_recover
) {
1229 bfa_ioc_sync_leave(ioc
);
1230 writel(BFI_IOC_FAIL
, ioc
->ioc_regs
.ioc_fwstate
);
1231 writel(1, ioc
->ioc_regs
.ioc_sem_reg
);
1232 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_fail
);
1234 if (bfa_ioc_sync_complete(ioc
))
1235 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_hwinit
);
1237 writel(1, ioc
->ioc_regs
.ioc_sem_reg
);
1238 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_semwait
);
1243 case IOCPF_E_SEM_ERROR
:
1244 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_fail
);
1245 bfa_fsm_send_event(ioc
, IOC_E_HWFAILED
);
1248 case IOCPF_E_DISABLE
:
1249 bfa_sem_timer_stop(ioc
);
1250 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_disabling_sync
);
1257 bfa_sm_fault(ioc
, event
);
1262 bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s
*iocpf
)
1264 bfa_trc(iocpf
->ioc
, 0);
1268 * IOC is in failed state.
1271 bfa_iocpf_sm_fail(struct bfa_iocpf_s
*iocpf
, enum iocpf_event event
)
1273 struct bfa_ioc_s
*ioc
= iocpf
->ioc
;
1275 bfa_trc(ioc
, event
);
1278 case IOCPF_E_DISABLE
:
1279 bfa_fsm_set_state(iocpf
, bfa_iocpf_sm_disabled
);
1283 bfa_sm_fault(ioc
, event
);
1288 * BFA IOC private functions
1292 * Notify common modules registered for notification.
1295 bfa_ioc_event_notify(struct bfa_ioc_s
*ioc
, enum bfa_ioc_event_e event
)
1297 struct bfa_ioc_notify_s
*notify
;
1298 struct list_head
*qe
;
1300 list_for_each(qe
, &ioc
->notify_q
) {
1301 notify
= (struct bfa_ioc_notify_s
*)qe
;
1302 notify
->cbfn(notify
->cbarg
, event
);
1307 bfa_ioc_disable_comp(struct bfa_ioc_s
*ioc
)
1309 ioc
->cbfn
->disable_cbfn(ioc
->bfa
);
1310 bfa_ioc_event_notify(ioc
, BFA_IOC_E_DISABLED
);
1314 bfa_ioc_sem_get(void __iomem
*sem_reg
)
1318 #define BFA_SEM_SPINCNT 3000
1320 r32
= readl(sem_reg
);
1322 while ((r32
& 1) && (cnt
< BFA_SEM_SPINCNT
)) {
1325 r32
= readl(sem_reg
);
1335 bfa_ioc_hw_sem_get(struct bfa_ioc_s
*ioc
)
1340 * First read to the semaphore register will return 0, subsequent reads
1341 * will return 1. Semaphore is released by writing 1 to the register
1343 r32
= readl(ioc
->ioc_regs
.ioc_sem_reg
);
1346 bfa_fsm_send_event(&ioc
->iocpf
, IOCPF_E_SEM_ERROR
);
1350 bfa_fsm_send_event(&ioc
->iocpf
, IOCPF_E_SEMLOCKED
);
1354 bfa_sem_timer_start(ioc
);
1358 * Initialize LPU local memory (aka secondary memory / SRAM)
1361 bfa_ioc_lmem_init(struct bfa_ioc_s
*ioc
)
1365 #define PSS_LMEM_INIT_TIME 10000
1367 pss_ctl
= readl(ioc
->ioc_regs
.pss_ctl_reg
);
1368 pss_ctl
&= ~__PSS_LMEM_RESET
;
1369 pss_ctl
|= __PSS_LMEM_INIT_EN
;
1372 * i2c workaround 12.5khz clock
1374 pss_ctl
|= __PSS_I2C_CLK_DIV(3UL);
1375 writel(pss_ctl
, ioc
->ioc_regs
.pss_ctl_reg
);
1378 * wait for memory initialization to be complete
1382 pss_ctl
= readl(ioc
->ioc_regs
.pss_ctl_reg
);
1384 } while (!(pss_ctl
& __PSS_LMEM_INIT_DONE
) && (i
< PSS_LMEM_INIT_TIME
));
1387 * If memory initialization is not successful, IOC timeout will catch
1390 WARN_ON(!(pss_ctl
& __PSS_LMEM_INIT_DONE
));
1391 bfa_trc(ioc
, pss_ctl
);
1393 pss_ctl
&= ~(__PSS_LMEM_INIT_DONE
| __PSS_LMEM_INIT_EN
);
1394 writel(pss_ctl
, ioc
->ioc_regs
.pss_ctl_reg
);
1398 bfa_ioc_lpu_start(struct bfa_ioc_s
*ioc
)
1403 * Take processor out of reset.
1405 pss_ctl
= readl(ioc
->ioc_regs
.pss_ctl_reg
);
1406 pss_ctl
&= ~__PSS_LPU0_RESET
;
1408 writel(pss_ctl
, ioc
->ioc_regs
.pss_ctl_reg
);
1412 bfa_ioc_lpu_stop(struct bfa_ioc_s
*ioc
)
1417 * Put processors in reset.
1419 pss_ctl
= readl(ioc
->ioc_regs
.pss_ctl_reg
);
1420 pss_ctl
|= (__PSS_LPU0_RESET
| __PSS_LPU1_RESET
);
1422 writel(pss_ctl
, ioc
->ioc_regs
.pss_ctl_reg
);
1426 * Get driver and firmware versions.
1429 bfa_ioc_fwver_get(struct bfa_ioc_s
*ioc
, struct bfi_ioc_image_hdr_s
*fwhdr
)
1434 u32
*fwsig
= (u32
*) fwhdr
;
1436 pgnum
= PSS_SMEM_PGNUM(ioc
->ioc_regs
.smem_pg0
, loff
);
1437 pgoff
= PSS_SMEM_PGOFF(loff
);
1438 writel(pgnum
, ioc
->ioc_regs
.host_page_num_fn
);
1440 for (i
= 0; i
< (sizeof(struct bfi_ioc_image_hdr_s
) / sizeof(u32
));
1443 bfa_mem_read(ioc
->ioc_regs
.smem_page_start
, loff
);
1444 loff
+= sizeof(u32
);
1449 * Returns TRUE if same.
1452 bfa_ioc_fwver_cmp(struct bfa_ioc_s
*ioc
, struct bfi_ioc_image_hdr_s
*fwhdr
)
1454 struct bfi_ioc_image_hdr_s
*drv_fwhdr
;
1457 drv_fwhdr
= (struct bfi_ioc_image_hdr_s
*)
1458 bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc
), 0);
1460 for (i
= 0; i
< BFI_IOC_MD5SUM_SZ
; i
++) {
1461 if (fwhdr
->md5sum
[i
] != cpu_to_le32(drv_fwhdr
->md5sum
[i
])) {
1463 bfa_trc(ioc
, fwhdr
->md5sum
[i
]);
1464 bfa_trc(ioc
, drv_fwhdr
->md5sum
[i
]);
1469 bfa_trc(ioc
, fwhdr
->md5sum
[0]);
1474 * Return true if current running version is valid. Firmware signature and
1475 * execution context (driver/bios) must match.
1477 static bfa_boolean_t
1478 bfa_ioc_fwver_valid(struct bfa_ioc_s
*ioc
, u32 boot_env
)
1480 struct bfi_ioc_image_hdr_s fwhdr
, *drv_fwhdr
;
1482 bfa_ioc_fwver_get(ioc
, &fwhdr
);
1483 drv_fwhdr
= (struct bfi_ioc_image_hdr_s
*)
1484 bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc
), 0);
1486 if (fwhdr
.signature
!= cpu_to_le32(drv_fwhdr
->signature
)) {
1487 bfa_trc(ioc
, fwhdr
.signature
);
1488 bfa_trc(ioc
, drv_fwhdr
->signature
);
1492 if (swab32(fwhdr
.bootenv
) != boot_env
) {
1493 bfa_trc(ioc
, fwhdr
.bootenv
);
1494 bfa_trc(ioc
, boot_env
);
1498 return bfa_ioc_fwver_cmp(ioc
, &fwhdr
);
1502 * Conditionally flush any pending message from firmware at start.
1505 bfa_ioc_msgflush(struct bfa_ioc_s
*ioc
)
1509 r32
= readl(ioc
->ioc_regs
.lpu_mbox_cmd
);
1511 writel(1, ioc
->ioc_regs
.lpu_mbox_cmd
);
1515 bfa_ioc_hwinit(struct bfa_ioc_s
*ioc
, bfa_boolean_t force
)
1517 enum bfi_ioc_state ioc_fwstate
;
1518 bfa_boolean_t fwvalid
;
1522 ioc_fwstate
= readl(ioc
->ioc_regs
.ioc_fwstate
);
1525 ioc_fwstate
= BFI_IOC_UNINIT
;
1527 bfa_trc(ioc
, ioc_fwstate
);
1529 boot_type
= BFI_FWBOOT_TYPE_NORMAL
;
1530 boot_env
= BFI_FWBOOT_ENV_OS
;
1533 * check if firmware is valid
1535 fwvalid
= (ioc_fwstate
== BFI_IOC_UNINIT
) ?
1536 BFA_FALSE
: bfa_ioc_fwver_valid(ioc
, boot_env
);
1539 bfa_ioc_boot(ioc
, boot_type
, boot_env
);
1540 bfa_ioc_poll_fwinit(ioc
);
1545 * If hardware initialization is in progress (initialized by other IOC),
1546 * just wait for an initialization completion interrupt.
1548 if (ioc_fwstate
== BFI_IOC_INITING
) {
1549 bfa_ioc_poll_fwinit(ioc
);
1554 * If IOC function is disabled and firmware version is same,
1555 * just re-enable IOC.
1557 * If option rom, IOC must not be in operational state. With
1558 * convergence, IOC will be in operational state when 2nd driver
1561 if (ioc_fwstate
== BFI_IOC_DISABLED
|| ioc_fwstate
== BFI_IOC_OP
) {
1564 * When using MSI-X any pending firmware ready event should
1565 * be flushed. Otherwise MSI-X interrupts are not delivered.
1567 bfa_ioc_msgflush(ioc
);
1568 bfa_fsm_send_event(&ioc
->iocpf
, IOCPF_E_FWREADY
);
1573 * Initialize the h/w for any other states.
1575 bfa_ioc_boot(ioc
, boot_type
, boot_env
);
1576 bfa_ioc_poll_fwinit(ioc
);
1580 bfa_ioc_timeout(void *ioc_arg
)
1582 struct bfa_ioc_s
*ioc
= (struct bfa_ioc_s
*) ioc_arg
;
1585 bfa_fsm_send_event(ioc
, IOC_E_TIMEOUT
);
1589 bfa_ioc_mbox_send(struct bfa_ioc_s
*ioc
, void *ioc_msg
, int len
)
1591 u32
*msgp
= (u32
*) ioc_msg
;
1594 bfa_trc(ioc
, msgp
[0]);
1597 WARN_ON(len
> BFI_IOC_MSGLEN_MAX
);
1600 * first write msg to mailbox registers
1602 for (i
= 0; i
< len
/ sizeof(u32
); i
++)
1603 writel(cpu_to_le32(msgp
[i
]),
1604 ioc
->ioc_regs
.hfn_mbox
+ i
* sizeof(u32
));
1606 for (; i
< BFI_IOC_MSGLEN_MAX
/ sizeof(u32
); i
++)
1607 writel(0, ioc
->ioc_regs
.hfn_mbox
+ i
* sizeof(u32
));
1610 * write 1 to mailbox CMD to trigger LPU event
1612 writel(1, ioc
->ioc_regs
.hfn_mbox_cmd
);
1613 (void) readl(ioc
->ioc_regs
.hfn_mbox_cmd
);
1617 bfa_ioc_send_enable(struct bfa_ioc_s
*ioc
)
1619 struct bfi_ioc_ctrl_req_s enable_req
;
1622 bfi_h2i_set(enable_req
.mh
, BFI_MC_IOC
, BFI_IOC_H2I_ENABLE_REQ
,
1623 bfa_ioc_portid(ioc
));
1624 enable_req
.clscode
= cpu_to_be16(ioc
->clscode
);
1625 do_gettimeofday(&tv
);
1626 enable_req
.tv_sec
= be32_to_cpu(tv
.tv_sec
);
1627 bfa_ioc_mbox_send(ioc
, &enable_req
, sizeof(struct bfi_ioc_ctrl_req_s
));
1631 bfa_ioc_send_disable(struct bfa_ioc_s
*ioc
)
1633 struct bfi_ioc_ctrl_req_s disable_req
;
1635 bfi_h2i_set(disable_req
.mh
, BFI_MC_IOC
, BFI_IOC_H2I_DISABLE_REQ
,
1636 bfa_ioc_portid(ioc
));
1637 bfa_ioc_mbox_send(ioc
, &disable_req
, sizeof(struct bfi_ioc_ctrl_req_s
));
1641 bfa_ioc_send_getattr(struct bfa_ioc_s
*ioc
)
1643 struct bfi_ioc_getattr_req_s attr_req
;
1645 bfi_h2i_set(attr_req
.mh
, BFI_MC_IOC
, BFI_IOC_H2I_GETATTR_REQ
,
1646 bfa_ioc_portid(ioc
));
1647 bfa_dma_be_addr_set(attr_req
.attr_addr
, ioc
->attr_dma
.pa
);
1648 bfa_ioc_mbox_send(ioc
, &attr_req
, sizeof(attr_req
));
1652 bfa_ioc_hb_check(void *cbarg
)
1654 struct bfa_ioc_s
*ioc
= cbarg
;
1657 hb_count
= readl(ioc
->ioc_regs
.heartbeat
);
1658 if (ioc
->hb_count
== hb_count
) {
1659 bfa_ioc_recover(ioc
);
1662 ioc
->hb_count
= hb_count
;
1665 bfa_ioc_mbox_poll(ioc
);
1666 bfa_hb_timer_start(ioc
);
1670 bfa_ioc_hb_monitor(struct bfa_ioc_s
*ioc
)
1672 ioc
->hb_count
= readl(ioc
->ioc_regs
.heartbeat
);
1673 bfa_hb_timer_start(ioc
);
1677 * Initiate a full firmware download.
1680 bfa_ioc_download_fw(struct bfa_ioc_s
*ioc
, u32 boot_type
,
1690 bfa_trc(ioc
, bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc
)));
1691 fwimg
= bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc
), chunkno
);
1693 pgnum
= PSS_SMEM_PGNUM(ioc
->ioc_regs
.smem_pg0
, loff
);
1694 pgoff
= PSS_SMEM_PGOFF(loff
);
1696 writel(pgnum
, ioc
->ioc_regs
.host_page_num_fn
);
1698 for (i
= 0; i
< bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc
)); i
++) {
1700 if (BFA_IOC_FLASH_CHUNK_NO(i
) != chunkno
) {
1701 chunkno
= BFA_IOC_FLASH_CHUNK_NO(i
);
1702 fwimg
= bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc
),
1703 BFA_IOC_FLASH_CHUNK_ADDR(chunkno
));
1709 bfa_mem_write(ioc
->ioc_regs
.smem_page_start
, loff
,
1710 cpu_to_le32(fwimg
[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i
)]));
1712 loff
+= sizeof(u32
);
1715 * handle page offset wrap around
1717 loff
= PSS_SMEM_PGOFF(loff
);
1720 writel(pgnum
, ioc
->ioc_regs
.host_page_num_fn
);
1724 writel(PSS_SMEM_PGNUM(ioc
->ioc_regs
.smem_pg0
, 0),
1725 ioc
->ioc_regs
.host_page_num_fn
);
1728 * Set boot type and device mode at the end.
1730 asicmode
= BFI_FWBOOT_DEVMODE(ioc
->asic_gen
, ioc
->asic_mode
,
1731 ioc
->port0_mode
, ioc
->port1_mode
);
1732 bfa_mem_write(ioc
->ioc_regs
.smem_page_start
, BFI_FWBOOT_DEVMODE_OFF
,
1734 bfa_mem_write(ioc
->ioc_regs
.smem_page_start
, BFI_FWBOOT_TYPE_OFF
,
1736 bfa_mem_write(ioc
->ioc_regs
.smem_page_start
, BFI_FWBOOT_ENV_OFF
,
1742 * Update BFA configuration from firmware configuration.
1745 bfa_ioc_getattr_reply(struct bfa_ioc_s
*ioc
)
1747 struct bfi_ioc_attr_s
*attr
= ioc
->attr
;
1749 attr
->adapter_prop
= be32_to_cpu(attr
->adapter_prop
);
1750 attr
->card_type
= be32_to_cpu(attr
->card_type
);
1751 attr
->maxfrsize
= be16_to_cpu(attr
->maxfrsize
);
1752 ioc
->fcmode
= (attr
->port_mode
== BFI_PORT_MODE_FC
);
1753 attr
->mfg_year
= be16_to_cpu(attr
->mfg_year
);
1755 bfa_fsm_send_event(ioc
, IOC_E_FWRSP_GETATTR
);
1759 * Attach time initialization of mbox logic.
1762 bfa_ioc_mbox_attach(struct bfa_ioc_s
*ioc
)
1764 struct bfa_ioc_mbox_mod_s
*mod
= &ioc
->mbox_mod
;
1767 INIT_LIST_HEAD(&mod
->cmd_q
);
1768 for (mc
= 0; mc
< BFI_MC_MAX
; mc
++) {
1769 mod
->mbhdlr
[mc
].cbfn
= NULL
;
1770 mod
->mbhdlr
[mc
].cbarg
= ioc
->bfa
;
1775 * Mbox poll timer -- restarts any pending mailbox requests.
1778 bfa_ioc_mbox_poll(struct bfa_ioc_s
*ioc
)
1780 struct bfa_ioc_mbox_mod_s
*mod
= &ioc
->mbox_mod
;
1781 struct bfa_mbox_cmd_s
*cmd
;
1785 * If no command pending, do nothing
1787 if (list_empty(&mod
->cmd_q
))
1791 * If previous command is not yet fetched by firmware, do nothing
1793 stat
= readl(ioc
->ioc_regs
.hfn_mbox_cmd
);
1798 * Enqueue command to firmware.
1800 bfa_q_deq(&mod
->cmd_q
, &cmd
);
1801 bfa_ioc_mbox_send(ioc
, cmd
->msg
, sizeof(cmd
->msg
));
1805 * Cleanup any pending requests.
1808 bfa_ioc_mbox_flush(struct bfa_ioc_s
*ioc
)
1810 struct bfa_ioc_mbox_mod_s
*mod
= &ioc
->mbox_mod
;
1811 struct bfa_mbox_cmd_s
*cmd
;
1813 while (!list_empty(&mod
->cmd_q
))
1814 bfa_q_deq(&mod
->cmd_q
, &cmd
);
1818 * Read data from SMEM to host through PCI memmap
1820 * @param[in] ioc memory for IOC
1821 * @param[in] tbuf app memory to store data from smem
1822 * @param[in] soff smem offset
1823 * @param[in] sz size of smem in bytes
1826 bfa_ioc_smem_read(struct bfa_ioc_s
*ioc
, void *tbuf
, u32 soff
, u32 sz
)
1833 pgnum
= PSS_SMEM_PGNUM(ioc
->ioc_regs
.smem_pg0
, soff
);
1834 loff
= PSS_SMEM_PGOFF(soff
);
1835 bfa_trc(ioc
, pgnum
);
1840 * Hold semaphore to serialize pll init and fwtrc.
1842 if (BFA_FALSE
== bfa_ioc_sem_get(ioc
->ioc_regs
.ioc_init_sem_reg
)) {
1844 return BFA_STATUS_FAILED
;
1847 writel(pgnum
, ioc
->ioc_regs
.host_page_num_fn
);
1849 len
= sz
/sizeof(u32
);
1851 for (i
= 0; i
< len
; i
++) {
1852 r32
= bfa_mem_read(ioc
->ioc_regs
.smem_page_start
, loff
);
1853 buf
[i
] = be32_to_cpu(r32
);
1854 loff
+= sizeof(u32
);
1857 * handle page offset wrap around
1859 loff
= PSS_SMEM_PGOFF(loff
);
1862 writel(pgnum
, ioc
->ioc_regs
.host_page_num_fn
);
1865 writel(PSS_SMEM_PGNUM(ioc
->ioc_regs
.smem_pg0
, 0),
1866 ioc
->ioc_regs
.host_page_num_fn
);
1868 * release semaphore.
1870 readl(ioc
->ioc_regs
.ioc_init_sem_reg
);
1871 writel(1, ioc
->ioc_regs
.ioc_init_sem_reg
);
1873 bfa_trc(ioc
, pgnum
);
1874 return BFA_STATUS_OK
;
1878 * Clear SMEM data from host through PCI memmap
1880 * @param[in] ioc memory for IOC
1881 * @param[in] soff smem offset
1882 * @param[in] sz size of smem in bytes
1885 bfa_ioc_smem_clr(struct bfa_ioc_s
*ioc
, u32 soff
, u32 sz
)
1890 pgnum
= PSS_SMEM_PGNUM(ioc
->ioc_regs
.smem_pg0
, soff
);
1891 loff
= PSS_SMEM_PGOFF(soff
);
1892 bfa_trc(ioc
, pgnum
);
1897 * Hold semaphore to serialize pll init and fwtrc.
1899 if (BFA_FALSE
== bfa_ioc_sem_get(ioc
->ioc_regs
.ioc_init_sem_reg
)) {
1901 return BFA_STATUS_FAILED
;
1904 writel(pgnum
, ioc
->ioc_regs
.host_page_num_fn
);
1906 len
= sz
/sizeof(u32
); /* len in words */
1908 for (i
= 0; i
< len
; i
++) {
1909 bfa_mem_write(ioc
->ioc_regs
.smem_page_start
, loff
, 0);
1910 loff
+= sizeof(u32
);
1913 * handle page offset wrap around
1915 loff
= PSS_SMEM_PGOFF(loff
);
1918 writel(pgnum
, ioc
->ioc_regs
.host_page_num_fn
);
1921 writel(PSS_SMEM_PGNUM(ioc
->ioc_regs
.smem_pg0
, 0),
1922 ioc
->ioc_regs
.host_page_num_fn
);
1925 * release semaphore.
1927 readl(ioc
->ioc_regs
.ioc_init_sem_reg
);
1928 writel(1, ioc
->ioc_regs
.ioc_init_sem_reg
);
1929 bfa_trc(ioc
, pgnum
);
1930 return BFA_STATUS_OK
;
1934 bfa_ioc_fail_notify(struct bfa_ioc_s
*ioc
)
1936 struct bfad_s
*bfad
= (struct bfad_s
*)ioc
->bfa
->bfad
;
1939 * Notify driver and common modules registered for notification.
1941 ioc
->cbfn
->hbfail_cbfn(ioc
->bfa
);
1942 bfa_ioc_event_notify(ioc
, BFA_IOC_E_FAILED
);
1944 bfa_ioc_debug_save_ftrc(ioc
);
1946 BFA_LOG(KERN_CRIT
, bfad
, bfa_log_level
,
1947 "Heart Beat of IOC has failed\n");
1948 bfa_ioc_aen_post(ioc
, BFA_IOC_AEN_HBFAIL
);
1953 bfa_ioc_pf_fwmismatch(struct bfa_ioc_s
*ioc
)
1955 struct bfad_s
*bfad
= (struct bfad_s
*)ioc
->bfa
->bfad
;
1957 * Provide enable completion callback.
1959 ioc
->cbfn
->enable_cbfn(ioc
->bfa
, BFA_STATUS_IOC_FAILURE
);
1960 BFA_LOG(KERN_WARNING
, bfad
, bfa_log_level
,
1961 "Running firmware version is incompatible "
1962 "with the driver version\n");
1963 bfa_ioc_aen_post(ioc
, BFA_IOC_AEN_FWMISMATCH
);
1967 bfa_ioc_pll_init(struct bfa_ioc_s
*ioc
)
1971 * Hold semaphore so that nobody can access the chip during init.
1973 bfa_ioc_sem_get(ioc
->ioc_regs
.ioc_init_sem_reg
);
1975 bfa_ioc_pll_init_asic(ioc
);
1977 ioc
->pllinit
= BFA_TRUE
;
1982 bfa_ioc_lmem_init(ioc
);
1985 * release semaphore.
1987 readl(ioc
->ioc_regs
.ioc_init_sem_reg
);
1988 writel(1, ioc
->ioc_regs
.ioc_init_sem_reg
);
1990 return BFA_STATUS_OK
;
1994 * Interface used by diag module to do firmware boot with memory test
1995 * as the entry vector.
1998 bfa_ioc_boot(struct bfa_ioc_s
*ioc
, u32 boot_type
, u32 boot_env
)
2000 bfa_ioc_stats(ioc
, ioc_boots
);
2002 if (bfa_ioc_pll_init(ioc
) != BFA_STATUS_OK
)
2006 * Initialize IOC state of all functions on a chip reset.
2008 if (boot_type
== BFI_FWBOOT_TYPE_MEMTEST
) {
2009 writel(BFI_IOC_MEMTEST
, ioc
->ioc_regs
.ioc_fwstate
);
2010 writel(BFI_IOC_MEMTEST
, ioc
->ioc_regs
.alt_ioc_fwstate
);
2012 writel(BFI_IOC_INITING
, ioc
->ioc_regs
.ioc_fwstate
);
2013 writel(BFI_IOC_INITING
, ioc
->ioc_regs
.alt_ioc_fwstate
);
2016 bfa_ioc_msgflush(ioc
);
2017 bfa_ioc_download_fw(ioc
, boot_type
, boot_env
);
2018 bfa_ioc_lpu_start(ioc
);
2022 * Enable/disable IOC failure auto recovery.
2025 bfa_ioc_auto_recover(bfa_boolean_t auto_recover
)
2027 bfa_auto_recover
= auto_recover
;
2033 bfa_ioc_is_operational(struct bfa_ioc_s
*ioc
)
2035 return bfa_fsm_cmp_state(ioc
, bfa_ioc_sm_op
);
2039 bfa_ioc_is_initialized(struct bfa_ioc_s
*ioc
)
2041 u32 r32
= readl(ioc
->ioc_regs
.ioc_fwstate
);
2043 return ((r32
!= BFI_IOC_UNINIT
) &&
2044 (r32
!= BFI_IOC_INITING
) &&
2045 (r32
!= BFI_IOC_MEMTEST
));
2049 bfa_ioc_msgget(struct bfa_ioc_s
*ioc
, void *mbmsg
)
2051 __be32
*msgp
= mbmsg
;
2055 r32
= readl(ioc
->ioc_regs
.lpu_mbox_cmd
);
2062 for (i
= 0; i
< (sizeof(union bfi_ioc_i2h_msg_u
) / sizeof(u32
));
2064 r32
= readl(ioc
->ioc_regs
.lpu_mbox
+
2066 msgp
[i
] = cpu_to_be32(r32
);
2070 * turn off mailbox interrupt by clearing mailbox status
2072 writel(1, ioc
->ioc_regs
.lpu_mbox_cmd
);
2073 readl(ioc
->ioc_regs
.lpu_mbox_cmd
);
2079 bfa_ioc_isr(struct bfa_ioc_s
*ioc
, struct bfi_mbmsg_s
*m
)
2081 union bfi_ioc_i2h_msg_u
*msg
;
2082 struct bfa_iocpf_s
*iocpf
= &ioc
->iocpf
;
2084 msg
= (union bfi_ioc_i2h_msg_u
*) m
;
2086 bfa_ioc_stats(ioc
, ioc_isrs
);
2088 switch (msg
->mh
.msg_id
) {
2089 case BFI_IOC_I2H_HBEAT
:
2092 case BFI_IOC_I2H_ENABLE_REPLY
:
2093 ioc
->port_mode
= ioc
->port_mode_cfg
=
2094 (enum bfa_mode_s
)msg
->fw_event
.port_mode
;
2095 ioc
->ad_cap_bm
= msg
->fw_event
.cap_bm
;
2096 bfa_fsm_send_event(iocpf
, IOCPF_E_FWRSP_ENABLE
);
2099 case BFI_IOC_I2H_DISABLE_REPLY
:
2100 bfa_fsm_send_event(iocpf
, IOCPF_E_FWRSP_DISABLE
);
2103 case BFI_IOC_I2H_GETATTR_REPLY
:
2104 bfa_ioc_getattr_reply(ioc
);
2108 bfa_trc(ioc
, msg
->mh
.msg_id
);
2114 * IOC attach time initialization and setup.
2116 * @param[in] ioc memory for IOC
2117 * @param[in] bfa driver instance structure
2120 bfa_ioc_attach(struct bfa_ioc_s
*ioc
, void *bfa
, struct bfa_ioc_cbfn_s
*cbfn
,
2121 struct bfa_timer_mod_s
*timer_mod
)
2125 ioc
->timer_mod
= timer_mod
;
2126 ioc
->fcmode
= BFA_FALSE
;
2127 ioc
->pllinit
= BFA_FALSE
;
2128 ioc
->dbg_fwsave_once
= BFA_TRUE
;
2129 ioc
->iocpf
.ioc
= ioc
;
2131 bfa_ioc_mbox_attach(ioc
);
2132 INIT_LIST_HEAD(&ioc
->notify_q
);
2134 bfa_fsm_set_state(ioc
, bfa_ioc_sm_uninit
);
2135 bfa_fsm_send_event(ioc
, IOC_E_RESET
);
2139 * Driver detach time IOC cleanup.
2142 bfa_ioc_detach(struct bfa_ioc_s
*ioc
)
2144 bfa_fsm_send_event(ioc
, IOC_E_DETACH
);
2145 INIT_LIST_HEAD(&ioc
->notify_q
);
2149 * Setup IOC PCI properties.
2151 * @param[in] pcidev PCI device information for this IOC
2154 bfa_ioc_pci_init(struct bfa_ioc_s
*ioc
, struct bfa_pcidev_s
*pcidev
,
2155 enum bfi_pcifn_class clscode
)
2157 ioc
->clscode
= clscode
;
2158 ioc
->pcidev
= *pcidev
;
2161 * Initialize IOC and device personality
2163 ioc
->port0_mode
= ioc
->port1_mode
= BFI_PORT_MODE_FC
;
2164 ioc
->asic_mode
= BFI_ASIC_MODE_FC
;
2166 switch (pcidev
->device_id
) {
2167 case BFA_PCI_DEVICE_ID_FC_8G1P
:
2168 case BFA_PCI_DEVICE_ID_FC_8G2P
:
2169 ioc
->asic_gen
= BFI_ASIC_GEN_CB
;
2170 ioc
->fcmode
= BFA_TRUE
;
2171 ioc
->port_mode
= ioc
->port_mode_cfg
= BFA_MODE_HBA
;
2172 ioc
->ad_cap_bm
= BFA_CM_HBA
;
2175 case BFA_PCI_DEVICE_ID_CT
:
2176 ioc
->asic_gen
= BFI_ASIC_GEN_CT
;
2177 ioc
->port0_mode
= ioc
->port1_mode
= BFI_PORT_MODE_ETH
;
2178 ioc
->asic_mode
= BFI_ASIC_MODE_ETH
;
2179 ioc
->port_mode
= ioc
->port_mode_cfg
= BFA_MODE_CNA
;
2180 ioc
->ad_cap_bm
= BFA_CM_CNA
;
2183 case BFA_PCI_DEVICE_ID_CT_FC
:
2184 ioc
->asic_gen
= BFI_ASIC_GEN_CT
;
2185 ioc
->fcmode
= BFA_TRUE
;
2186 ioc
->port_mode
= ioc
->port_mode_cfg
= BFA_MODE_HBA
;
2187 ioc
->ad_cap_bm
= BFA_CM_HBA
;
2190 case BFA_PCI_DEVICE_ID_CT2
:
2191 ioc
->asic_gen
= BFI_ASIC_GEN_CT2
;
2192 if (clscode
== BFI_PCIFN_CLASS_FC
&&
2193 pcidev
->ssid
== BFA_PCI_CT2_SSID_FC
) {
2194 ioc
->asic_mode
= BFI_ASIC_MODE_FC16
;
2195 ioc
->fcmode
= BFA_TRUE
;
2196 ioc
->port_mode
= ioc
->port_mode_cfg
= BFA_MODE_HBA
;
2197 ioc
->ad_cap_bm
= BFA_CM_HBA
;
2199 ioc
->port0_mode
= ioc
->port1_mode
= BFI_PORT_MODE_ETH
;
2200 ioc
->asic_mode
= BFI_ASIC_MODE_ETH
;
2201 if (pcidev
->ssid
== BFA_PCI_CT2_SSID_FCoE
) {
2203 ioc
->port_mode_cfg
= BFA_MODE_CNA
;
2204 ioc
->ad_cap_bm
= BFA_CM_CNA
;
2207 ioc
->port_mode_cfg
= BFA_MODE_NIC
;
2208 ioc
->ad_cap_bm
= BFA_CM_NIC
;
2218 * Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c
2220 if (ioc
->asic_gen
== BFI_ASIC_GEN_CB
)
2221 bfa_ioc_set_cb_hwif(ioc
);
2222 else if (ioc
->asic_gen
== BFI_ASIC_GEN_CT
)
2223 bfa_ioc_set_ct_hwif(ioc
);
2225 WARN_ON(ioc
->asic_gen
!= BFI_ASIC_GEN_CT2
);
2226 bfa_ioc_set_ct2_hwif(ioc
);
2227 bfa_ioc_ct2_poweron(ioc
);
2230 bfa_ioc_map_port(ioc
);
2231 bfa_ioc_reg_init(ioc
);
2235 * Initialize IOC dma memory
2237 * @param[in] dm_kva kernel virtual address of IOC dma memory
2238 * @param[in] dm_pa physical address of IOC dma memory
2241 bfa_ioc_mem_claim(struct bfa_ioc_s
*ioc
, u8
*dm_kva
, u64 dm_pa
)
2244 * dma memory for firmware attribute
2246 ioc
->attr_dma
.kva
= dm_kva
;
2247 ioc
->attr_dma
.pa
= dm_pa
;
2248 ioc
->attr
= (struct bfi_ioc_attr_s
*) dm_kva
;
2252 bfa_ioc_enable(struct bfa_ioc_s
*ioc
)
2254 bfa_ioc_stats(ioc
, ioc_enables
);
2255 ioc
->dbg_fwsave_once
= BFA_TRUE
;
2257 bfa_fsm_send_event(ioc
, IOC_E_ENABLE
);
2261 bfa_ioc_disable(struct bfa_ioc_s
*ioc
)
2263 bfa_ioc_stats(ioc
, ioc_disables
);
2264 bfa_fsm_send_event(ioc
, IOC_E_DISABLE
);
2268 bfa_ioc_suspend(struct bfa_ioc_s
*ioc
)
2270 ioc
->dbg_fwsave_once
= BFA_TRUE
;
2271 bfa_fsm_send_event(ioc
, IOC_E_HWERROR
);
2275 * Initialize memory for saving firmware trace. Driver must initialize
2276 * trace memory before call bfa_ioc_enable().
2279 bfa_ioc_debug_memclaim(struct bfa_ioc_s
*ioc
, void *dbg_fwsave
)
2281 ioc
->dbg_fwsave
= dbg_fwsave
;
2282 ioc
->dbg_fwsave_len
= BFA_DBG_FWTRC_LEN
;
2286 * Register mailbox message handler functions
2288 * @param[in] ioc IOC instance
2289 * @param[in] mcfuncs message class handler functions
2292 bfa_ioc_mbox_register(struct bfa_ioc_s
*ioc
, bfa_ioc_mbox_mcfunc_t
*mcfuncs
)
2294 struct bfa_ioc_mbox_mod_s
*mod
= &ioc
->mbox_mod
;
2297 for (mc
= 0; mc
< BFI_MC_MAX
; mc
++)
2298 mod
->mbhdlr
[mc
].cbfn
= mcfuncs
[mc
];
2302 * Register mailbox message handler function, to be called by common modules
2305 bfa_ioc_mbox_regisr(struct bfa_ioc_s
*ioc
, enum bfi_mclass mc
,
2306 bfa_ioc_mbox_mcfunc_t cbfn
, void *cbarg
)
2308 struct bfa_ioc_mbox_mod_s
*mod
= &ioc
->mbox_mod
;
2310 mod
->mbhdlr
[mc
].cbfn
= cbfn
;
2311 mod
->mbhdlr
[mc
].cbarg
= cbarg
;
2315 * Queue a mailbox command request to firmware. Waits if mailbox is busy.
2316 * Responsibility of caller to serialize
2318 * @param[in] ioc IOC instance
2319 * @param[i] cmd Mailbox command
2322 bfa_ioc_mbox_queue(struct bfa_ioc_s
*ioc
, struct bfa_mbox_cmd_s
*cmd
)
2324 struct bfa_ioc_mbox_mod_s
*mod
= &ioc
->mbox_mod
;
2328 * If a previous command is pending, queue new command
2330 if (!list_empty(&mod
->cmd_q
)) {
2331 list_add_tail(&cmd
->qe
, &mod
->cmd_q
);
2336 * If mailbox is busy, queue command for poll timer
2338 stat
= readl(ioc
->ioc_regs
.hfn_mbox_cmd
);
2340 list_add_tail(&cmd
->qe
, &mod
->cmd_q
);
2345 * mailbox is free -- queue command to firmware
2347 bfa_ioc_mbox_send(ioc
, cmd
->msg
, sizeof(cmd
->msg
));
2351 * Handle mailbox interrupts
2354 bfa_ioc_mbox_isr(struct bfa_ioc_s
*ioc
)
2356 struct bfa_ioc_mbox_mod_s
*mod
= &ioc
->mbox_mod
;
2357 struct bfi_mbmsg_s m
;
2360 if (bfa_ioc_msgget(ioc
, &m
)) {
2362 * Treat IOC message class as special.
2364 mc
= m
.mh
.msg_class
;
2365 if (mc
== BFI_MC_IOC
) {
2366 bfa_ioc_isr(ioc
, &m
);
2370 if ((mc
>= BFI_MC_MAX
) || (mod
->mbhdlr
[mc
].cbfn
== NULL
))
2373 mod
->mbhdlr
[mc
].cbfn(mod
->mbhdlr
[mc
].cbarg
, &m
);
2376 bfa_ioc_lpu_read_stat(ioc
);
2379 * Try to send pending mailbox commands
2381 bfa_ioc_mbox_poll(ioc
);
2385 bfa_ioc_error_isr(struct bfa_ioc_s
*ioc
)
2387 bfa_ioc_stats(ioc
, ioc_hbfails
);
2388 ioc
->stats
.hb_count
= ioc
->hb_count
;
2389 bfa_fsm_send_event(ioc
, IOC_E_HWERROR
);
2393 * return true if IOC is disabled
2396 bfa_ioc_is_disabled(struct bfa_ioc_s
*ioc
)
2398 return bfa_fsm_cmp_state(ioc
, bfa_ioc_sm_disabling
) ||
2399 bfa_fsm_cmp_state(ioc
, bfa_ioc_sm_disabled
);
2403 * return true if IOC firmware is different.
2406 bfa_ioc_fw_mismatch(struct bfa_ioc_s
*ioc
)
2408 return bfa_fsm_cmp_state(ioc
, bfa_ioc_sm_reset
) ||
2409 bfa_fsm_cmp_state(&ioc
->iocpf
, bfa_iocpf_sm_fwcheck
) ||
2410 bfa_fsm_cmp_state(&ioc
->iocpf
, bfa_iocpf_sm_mismatch
);
2413 #define bfa_ioc_state_disabled(__sm) \
2414 (((__sm) == BFI_IOC_UNINIT) || \
2415 ((__sm) == BFI_IOC_INITING) || \
2416 ((__sm) == BFI_IOC_HWINIT) || \
2417 ((__sm) == BFI_IOC_DISABLED) || \
2418 ((__sm) == BFI_IOC_FAIL) || \
2419 ((__sm) == BFI_IOC_CFG_DISABLED))
2422 * Check if adapter is disabled -- both IOCs should be in a disabled
2426 bfa_ioc_adapter_is_disabled(struct bfa_ioc_s
*ioc
)
2430 if (!bfa_fsm_cmp_state(ioc
, bfa_ioc_sm_disabled
))
2433 ioc_state
= readl(ioc
->ioc_regs
.ioc_fwstate
);
2434 if (!bfa_ioc_state_disabled(ioc_state
))
2437 if (ioc
->pcidev
.device_id
!= BFA_PCI_DEVICE_ID_FC_8G1P
) {
2438 ioc_state
= readl(ioc
->ioc_regs
.alt_ioc_fwstate
);
2439 if (!bfa_ioc_state_disabled(ioc_state
))
2447 * Reset IOC fwstate registers.
2450 bfa_ioc_reset_fwstate(struct bfa_ioc_s
*ioc
)
2452 writel(BFI_IOC_UNINIT
, ioc
->ioc_regs
.ioc_fwstate
);
2453 writel(BFI_IOC_UNINIT
, ioc
->ioc_regs
.alt_ioc_fwstate
);
2456 #define BFA_MFG_NAME "Brocade"
2458 bfa_ioc_get_adapter_attr(struct bfa_ioc_s
*ioc
,
2459 struct bfa_adapter_attr_s
*ad_attr
)
2461 struct bfi_ioc_attr_s
*ioc_attr
;
2463 ioc_attr
= ioc
->attr
;
2465 bfa_ioc_get_adapter_serial_num(ioc
, ad_attr
->serial_num
);
2466 bfa_ioc_get_adapter_fw_ver(ioc
, ad_attr
->fw_ver
);
2467 bfa_ioc_get_adapter_optrom_ver(ioc
, ad_attr
->optrom_ver
);
2468 bfa_ioc_get_adapter_manufacturer(ioc
, ad_attr
->manufacturer
);
2469 memcpy(&ad_attr
->vpd
, &ioc_attr
->vpd
,
2470 sizeof(struct bfa_mfg_vpd_s
));
2472 ad_attr
->nports
= bfa_ioc_get_nports(ioc
);
2473 ad_attr
->max_speed
= bfa_ioc_speed_sup(ioc
);
2475 bfa_ioc_get_adapter_model(ioc
, ad_attr
->model
);
2476 /* For now, model descr uses same model string */
2477 bfa_ioc_get_adapter_model(ioc
, ad_attr
->model_descr
);
2479 ad_attr
->card_type
= ioc_attr
->card_type
;
2480 ad_attr
->is_mezz
= bfa_mfg_is_mezz(ioc_attr
->card_type
);
2482 if (BFI_ADAPTER_IS_SPECIAL(ioc_attr
->adapter_prop
))
2483 ad_attr
->prototype
= 1;
2485 ad_attr
->prototype
= 0;
2487 ad_attr
->pwwn
= ioc
->attr
->pwwn
;
2488 ad_attr
->mac
= bfa_ioc_get_mac(ioc
);
2490 ad_attr
->pcie_gen
= ioc_attr
->pcie_gen
;
2491 ad_attr
->pcie_lanes
= ioc_attr
->pcie_lanes
;
2492 ad_attr
->pcie_lanes_orig
= ioc_attr
->pcie_lanes_orig
;
2493 ad_attr
->asic_rev
= ioc_attr
->asic_rev
;
2495 bfa_ioc_get_pci_chip_rev(ioc
, ad_attr
->hw_ver
);
2497 ad_attr
->cna_capable
= bfa_ioc_is_cna(ioc
);
2498 ad_attr
->trunk_capable
= (ad_attr
->nports
> 1) &&
2499 !bfa_ioc_is_cna(ioc
) && !ad_attr
->is_mezz
;
2500 ad_attr
->mfg_day
= ioc_attr
->mfg_day
;
2501 ad_attr
->mfg_month
= ioc_attr
->mfg_month
;
2502 ad_attr
->mfg_year
= ioc_attr
->mfg_year
;
2506 bfa_ioc_get_type(struct bfa_ioc_s
*ioc
)
2508 if (ioc
->clscode
== BFI_PCIFN_CLASS_ETH
)
2509 return BFA_IOC_TYPE_LL
;
2511 WARN_ON(ioc
->clscode
!= BFI_PCIFN_CLASS_FC
);
2513 return (ioc
->attr
->port_mode
== BFI_PORT_MODE_FC
)
2514 ? BFA_IOC_TYPE_FC
: BFA_IOC_TYPE_FCoE
;
2518 bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s
*ioc
, char *serial_num
)
2520 memset((void *)serial_num
, 0, BFA_ADAPTER_SERIAL_NUM_LEN
);
2521 memcpy((void *)serial_num
,
2522 (void *)ioc
->attr
->brcd_serialnum
,
2523 BFA_ADAPTER_SERIAL_NUM_LEN
);
2527 bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s
*ioc
, char *fw_ver
)
2529 memset((void *)fw_ver
, 0, BFA_VERSION_LEN
);
2530 memcpy(fw_ver
, ioc
->attr
->fw_version
, BFA_VERSION_LEN
);
2534 bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s
*ioc
, char *chip_rev
)
2538 memset((void *)chip_rev
, 0, BFA_IOC_CHIP_REV_LEN
);
2544 chip_rev
[4] = ioc
->attr
->asic_rev
;
2549 bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s
*ioc
, char *optrom_ver
)
2551 memset((void *)optrom_ver
, 0, BFA_VERSION_LEN
);
2552 memcpy(optrom_ver
, ioc
->attr
->optrom_version
,
2557 bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s
*ioc
, char *manufacturer
)
2559 memset((void *)manufacturer
, 0, BFA_ADAPTER_MFG_NAME_LEN
);
2560 memcpy(manufacturer
, BFA_MFG_NAME
, BFA_ADAPTER_MFG_NAME_LEN
);
2564 bfa_ioc_get_adapter_model(struct bfa_ioc_s
*ioc
, char *model
)
2566 struct bfi_ioc_attr_s
*ioc_attr
;
2569 memset((void *)model
, 0, BFA_ADAPTER_MODEL_NAME_LEN
);
2571 ioc_attr
= ioc
->attr
;
2573 snprintf(model
, BFA_ADAPTER_MODEL_NAME_LEN
, "%s-%u",
2574 BFA_MFG_NAME
, ioc_attr
->card_type
);
2578 bfa_ioc_get_state(struct bfa_ioc_s
*ioc
)
2580 enum bfa_iocpf_state iocpf_st
;
2581 enum bfa_ioc_state ioc_st
= bfa_sm_to_state(ioc_sm_table
, ioc
->fsm
);
2583 if (ioc_st
== BFA_IOC_ENABLING
||
2584 ioc_st
== BFA_IOC_FAIL
|| ioc_st
== BFA_IOC_INITFAIL
) {
2586 iocpf_st
= bfa_sm_to_state(iocpf_sm_table
, ioc
->iocpf
.fsm
);
2589 case BFA_IOCPF_SEMWAIT
:
2590 ioc_st
= BFA_IOC_SEMWAIT
;
2593 case BFA_IOCPF_HWINIT
:
2594 ioc_st
= BFA_IOC_HWINIT
;
2597 case BFA_IOCPF_FWMISMATCH
:
2598 ioc_st
= BFA_IOC_FWMISMATCH
;
2601 case BFA_IOCPF_FAIL
:
2602 ioc_st
= BFA_IOC_FAIL
;
2605 case BFA_IOCPF_INITFAIL
:
2606 ioc_st
= BFA_IOC_INITFAIL
;
2618 bfa_ioc_get_attr(struct bfa_ioc_s
*ioc
, struct bfa_ioc_attr_s
*ioc_attr
)
2620 memset((void *)ioc_attr
, 0, sizeof(struct bfa_ioc_attr_s
));
2622 ioc_attr
->state
= bfa_ioc_get_state(ioc
);
2623 ioc_attr
->port_id
= ioc
->port_id
;
2624 ioc_attr
->port_mode
= ioc
->port_mode
;
2625 ioc_attr
->port_mode_cfg
= ioc
->port_mode_cfg
;
2626 ioc_attr
->cap_bm
= ioc
->ad_cap_bm
;
2628 ioc_attr
->ioc_type
= bfa_ioc_get_type(ioc
);
2630 bfa_ioc_get_adapter_attr(ioc
, &ioc_attr
->adapter_attr
);
2632 ioc_attr
->pci_attr
.device_id
= ioc
->pcidev
.device_id
;
2633 ioc_attr
->pci_attr
.pcifn
= ioc
->pcidev
.pci_func
;
2634 bfa_ioc_get_pci_chip_rev(ioc
, ioc_attr
->pci_attr
.chip_rev
);
2638 bfa_ioc_get_mac(struct bfa_ioc_s
*ioc
)
2641 * Check the IOC type and return the appropriate MAC
2643 if (bfa_ioc_get_type(ioc
) == BFA_IOC_TYPE_FCoE
)
2644 return ioc
->attr
->fcoe_mac
;
2646 return ioc
->attr
->mac
;
2650 bfa_ioc_get_mfg_mac(struct bfa_ioc_s
*ioc
)
2654 m
= ioc
->attr
->mfg_mac
;
2655 if (bfa_mfg_is_old_wwn_mac_model(ioc
->attr
->card_type
))
2656 m
.mac
[MAC_ADDRLEN
- 1] += bfa_ioc_pcifn(ioc
);
2658 bfa_mfg_increment_wwn_mac(&(m
.mac
[MAC_ADDRLEN
-3]),
2659 bfa_ioc_pcifn(ioc
));
2665 * Send AEN notification
2668 bfa_ioc_aen_post(struct bfa_ioc_s
*ioc
, enum bfa_ioc_aen_event event
)
2670 struct bfad_s
*bfad
= (struct bfad_s
*)ioc
->bfa
->bfad
;
2671 struct bfa_aen_entry_s
*aen_entry
;
2672 enum bfa_ioc_type_e ioc_type
;
2674 bfad_get_aen_entry(bfad
, aen_entry
);
2678 ioc_type
= bfa_ioc_get_type(ioc
);
2680 case BFA_IOC_TYPE_FC
:
2681 aen_entry
->aen_data
.ioc
.pwwn
= ioc
->attr
->pwwn
;
2683 case BFA_IOC_TYPE_FCoE
:
2684 aen_entry
->aen_data
.ioc
.pwwn
= ioc
->attr
->pwwn
;
2685 aen_entry
->aen_data
.ioc
.mac
= bfa_ioc_get_mac(ioc
);
2687 case BFA_IOC_TYPE_LL
:
2688 aen_entry
->aen_data
.ioc
.mac
= bfa_ioc_get_mac(ioc
);
2691 WARN_ON(ioc_type
!= BFA_IOC_TYPE_FC
);
2695 /* Send the AEN notification */
2696 aen_entry
->aen_data
.ioc
.ioc_type
= ioc_type
;
2697 bfad_im_post_vendor_event(aen_entry
, bfad
, ++ioc
->ioc_aen_seq
,
2698 BFA_AEN_CAT_IOC
, event
);
2702 * Retrieve saved firmware trace from a prior IOC failure.
2705 bfa_ioc_debug_fwsave(struct bfa_ioc_s
*ioc
, void *trcdata
, int *trclen
)
2709 if (ioc
->dbg_fwsave_len
== 0)
2710 return BFA_STATUS_ENOFSAVE
;
2713 if (tlen
> ioc
->dbg_fwsave_len
)
2714 tlen
= ioc
->dbg_fwsave_len
;
2716 memcpy(trcdata
, ioc
->dbg_fwsave
, tlen
);
2718 return BFA_STATUS_OK
;
2723 * Retrieve saved firmware trace from a prior IOC failure.
2726 bfa_ioc_debug_fwtrc(struct bfa_ioc_s
*ioc
, void *trcdata
, int *trclen
)
2728 u32 loff
= BFA_DBG_FWTRC_OFF(bfa_ioc_portid(ioc
));
2730 bfa_status_t status
;
2732 bfa_trc(ioc
, *trclen
);
2735 if (tlen
> BFA_DBG_FWTRC_LEN
)
2736 tlen
= BFA_DBG_FWTRC_LEN
;
2738 status
= bfa_ioc_smem_read(ioc
, trcdata
, loff
, tlen
);
2744 bfa_ioc_send_fwsync(struct bfa_ioc_s
*ioc
)
2746 struct bfa_mbox_cmd_s cmd
;
2747 struct bfi_ioc_ctrl_req_s
*req
= (struct bfi_ioc_ctrl_req_s
*) cmd
.msg
;
2749 bfi_h2i_set(req
->mh
, BFI_MC_IOC
, BFI_IOC_H2I_DBG_SYNC
,
2750 bfa_ioc_portid(ioc
));
2751 req
->clscode
= cpu_to_be16(ioc
->clscode
);
2752 bfa_ioc_mbox_queue(ioc
, &cmd
);
2756 bfa_ioc_fwsync(struct bfa_ioc_s
*ioc
)
2758 u32 fwsync_iter
= 1000;
2760 bfa_ioc_send_fwsync(ioc
);
2763 * After sending a fw sync mbox command wait for it to
2764 * take effect. We will not wait for a response because
2765 * 1. fw_sync mbox cmd doesn't have a response.
2766 * 2. Even if we implement that, interrupts might not
2767 * be enabled when we call this function.
2768 * So, just keep checking if any mbox cmd is pending, and
2769 * after waiting for a reasonable amount of time, go ahead.
2770 * It is possible that fw has crashed and the mbox command
2771 * is never acknowledged.
2773 while (bfa_ioc_mbox_cmd_pending(ioc
) && fwsync_iter
> 0)
2778 * Dump firmware smem
2781 bfa_ioc_debug_fwcore(struct bfa_ioc_s
*ioc
, void *buf
,
2782 u32
*offset
, int *buflen
)
2786 bfa_status_t status
;
2787 u32 smem_len
= BFA_IOC_FW_SMEM_SIZE(ioc
);
2789 if (*offset
>= smem_len
) {
2790 *offset
= *buflen
= 0;
2791 return BFA_STATUS_EINVAL
;
2798 * First smem read, sync smem before proceeding
2799 * No need to sync before reading every chunk.
2802 bfa_ioc_fwsync(ioc
);
2804 if ((loff
+ dlen
) >= smem_len
)
2805 dlen
= smem_len
- loff
;
2807 status
= bfa_ioc_smem_read(ioc
, buf
, loff
, dlen
);
2809 if (status
!= BFA_STATUS_OK
) {
2810 *offset
= *buflen
= 0;
2816 if (*offset
>= smem_len
)
2825 * Firmware statistics
2828 bfa_ioc_fw_stats_get(struct bfa_ioc_s
*ioc
, void *stats
)
2830 u32 loff
= BFI_IOC_FWSTATS_OFF
+ \
2831 BFI_IOC_FWSTATS_SZ
* (bfa_ioc_portid(ioc
));
2833 bfa_status_t status
;
2835 if (ioc
->stats_busy
) {
2836 bfa_trc(ioc
, ioc
->stats_busy
);
2837 return BFA_STATUS_DEVBUSY
;
2839 ioc
->stats_busy
= BFA_TRUE
;
2841 tlen
= sizeof(struct bfa_fw_stats_s
);
2842 status
= bfa_ioc_smem_read(ioc
, stats
, loff
, tlen
);
2844 ioc
->stats_busy
= BFA_FALSE
;
2849 bfa_ioc_fw_stats_clear(struct bfa_ioc_s
*ioc
)
2851 u32 loff
= BFI_IOC_FWSTATS_OFF
+ \
2852 BFI_IOC_FWSTATS_SZ
* (bfa_ioc_portid(ioc
));
2854 bfa_status_t status
;
2856 if (ioc
->stats_busy
) {
2857 bfa_trc(ioc
, ioc
->stats_busy
);
2858 return BFA_STATUS_DEVBUSY
;
2860 ioc
->stats_busy
= BFA_TRUE
;
2862 tlen
= sizeof(struct bfa_fw_stats_s
);
2863 status
= bfa_ioc_smem_clr(ioc
, loff
, tlen
);
2865 ioc
->stats_busy
= BFA_FALSE
;
2870 * Save firmware trace if configured.
2873 bfa_ioc_debug_save_ftrc(struct bfa_ioc_s
*ioc
)
2877 if (ioc
->dbg_fwsave_once
) {
2878 ioc
->dbg_fwsave_once
= BFA_FALSE
;
2879 if (ioc
->dbg_fwsave_len
) {
2880 tlen
= ioc
->dbg_fwsave_len
;
2881 bfa_ioc_debug_fwtrc(ioc
, ioc
->dbg_fwsave
, &tlen
);
2887 * Firmware failure detected. Start recovery actions.
2890 bfa_ioc_recover(struct bfa_ioc_s
*ioc
)
2892 bfa_ioc_stats(ioc
, ioc_hbfails
);
2893 ioc
->stats
.hb_count
= ioc
->hb_count
;
2894 bfa_fsm_send_event(ioc
, IOC_E_HBFAIL
);
2898 * BFA IOC PF private functions
2901 bfa_iocpf_timeout(void *ioc_arg
)
2903 struct bfa_ioc_s
*ioc
= (struct bfa_ioc_s
*) ioc_arg
;
2906 bfa_fsm_send_event(&ioc
->iocpf
, IOCPF_E_TIMEOUT
);
2910 bfa_iocpf_sem_timeout(void *ioc_arg
)
2912 struct bfa_ioc_s
*ioc
= (struct bfa_ioc_s
*) ioc_arg
;
2914 bfa_ioc_hw_sem_get(ioc
);
2918 bfa_ioc_poll_fwinit(struct bfa_ioc_s
*ioc
)
2920 u32 fwstate
= readl(ioc
->ioc_regs
.ioc_fwstate
);
2922 bfa_trc(ioc
, fwstate
);
2924 if (fwstate
== BFI_IOC_DISABLED
) {
2925 bfa_fsm_send_event(&ioc
->iocpf
, IOCPF_E_FWREADY
);
2929 if (ioc
->iocpf
.poll_time
>= (3 * BFA_IOC_TOV
))
2930 bfa_iocpf_timeout(ioc
);
2932 ioc
->iocpf
.poll_time
+= BFA_IOC_POLL_TOV
;
2933 bfa_iocpf_poll_timer_start(ioc
);
2938 bfa_iocpf_poll_timeout(void *ioc_arg
)
2940 struct bfa_ioc_s
*ioc
= (struct bfa_ioc_s
*) ioc_arg
;
2942 bfa_ioc_poll_fwinit(ioc
);
2946 * bfa timer function
2949 bfa_timer_beat(struct bfa_timer_mod_s
*mod
)
2951 struct list_head
*qh
= &mod
->timer_q
;
2952 struct list_head
*qe
, *qe_next
;
2953 struct bfa_timer_s
*elem
;
2954 struct list_head timedout_q
;
2956 INIT_LIST_HEAD(&timedout_q
);
2958 qe
= bfa_q_next(qh
);
2961 qe_next
= bfa_q_next(qe
);
2963 elem
= (struct bfa_timer_s
*) qe
;
2964 if (elem
->timeout
<= BFA_TIMER_FREQ
) {
2966 list_del(&elem
->qe
);
2967 list_add_tail(&elem
->qe
, &timedout_q
);
2969 elem
->timeout
-= BFA_TIMER_FREQ
;
2972 qe
= qe_next
; /* go to next elem */
2976 * Pop all the timeout entries
2978 while (!list_empty(&timedout_q
)) {
2979 bfa_q_deq(&timedout_q
, &elem
);
2980 elem
->timercb(elem
->arg
);
2985 * Should be called with lock protection
2988 bfa_timer_begin(struct bfa_timer_mod_s
*mod
, struct bfa_timer_s
*timer
,
2989 void (*timercb
) (void *), void *arg
, unsigned int timeout
)
2992 WARN_ON(timercb
== NULL
);
2993 WARN_ON(bfa_q_is_on_q(&mod
->timer_q
, timer
));
2995 timer
->timeout
= timeout
;
2996 timer
->timercb
= timercb
;
2999 list_add_tail(&timer
->qe
, &mod
->timer_q
);
3003 * Should be called with lock protection
3006 bfa_timer_stop(struct bfa_timer_s
*timer
)
3008 WARN_ON(list_empty(&timer
->qe
));
3010 list_del(&timer
->qe
);
3014 * ASIC block related
3017 bfa_ablk_config_swap(struct bfa_ablk_cfg_s
*cfg
)
3019 struct bfa_ablk_cfg_inst_s
*cfg_inst
;
3023 for (i
= 0; i
< BFA_ABLK_MAX
; i
++) {
3024 cfg_inst
= &cfg
->inst
[i
];
3025 for (j
= 0; j
< BFA_ABLK_MAX_PFS
; j
++) {
3026 be16
= cfg_inst
->pf_cfg
[j
].pers
;
3027 cfg_inst
->pf_cfg
[j
].pers
= be16_to_cpu(be16
);
3028 be16
= cfg_inst
->pf_cfg
[j
].num_qpairs
;
3029 cfg_inst
->pf_cfg
[j
].num_qpairs
= be16_to_cpu(be16
);
3030 be16
= cfg_inst
->pf_cfg
[j
].num_vectors
;
3031 cfg_inst
->pf_cfg
[j
].num_vectors
= be16_to_cpu(be16
);
3032 be16
= cfg_inst
->pf_cfg
[j
].bw_min
;
3033 cfg_inst
->pf_cfg
[j
].bw_min
= be16_to_cpu(be16
);
3034 be16
= cfg_inst
->pf_cfg
[j
].bw_max
;
3035 cfg_inst
->pf_cfg
[j
].bw_max
= be16_to_cpu(be16
);
3041 bfa_ablk_isr(void *cbarg
, struct bfi_mbmsg_s
*msg
)
3043 struct bfa_ablk_s
*ablk
= (struct bfa_ablk_s
*)cbarg
;
3044 struct bfi_ablk_i2h_rsp_s
*rsp
= (struct bfi_ablk_i2h_rsp_s
*)msg
;
3045 bfa_ablk_cbfn_t cbfn
;
3047 WARN_ON(msg
->mh
.msg_class
!= BFI_MC_ABLK
);
3048 bfa_trc(ablk
->ioc
, msg
->mh
.msg_id
);
3050 switch (msg
->mh
.msg_id
) {
3051 case BFI_ABLK_I2H_QUERY
:
3052 if (rsp
->status
== BFA_STATUS_OK
) {
3053 memcpy(ablk
->cfg
, ablk
->dma_addr
.kva
,
3054 sizeof(struct bfa_ablk_cfg_s
));
3055 bfa_ablk_config_swap(ablk
->cfg
);
3060 case BFI_ABLK_I2H_ADPT_CONFIG
:
3061 case BFI_ABLK_I2H_PORT_CONFIG
:
3062 /* update config port mode */
3063 ablk
->ioc
->port_mode_cfg
= rsp
->port_mode
;
3065 case BFI_ABLK_I2H_PF_DELETE
:
3066 case BFI_ABLK_I2H_PF_UPDATE
:
3067 case BFI_ABLK_I2H_OPTROM_ENABLE
:
3068 case BFI_ABLK_I2H_OPTROM_DISABLE
:
3072 case BFI_ABLK_I2H_PF_CREATE
:
3073 *(ablk
->pcifn
) = rsp
->pcifn
;
3081 ablk
->busy
= BFA_FALSE
;
3085 cbfn(ablk
->cbarg
, rsp
->status
);
3090 bfa_ablk_notify(void *cbarg
, enum bfa_ioc_event_e event
)
3092 struct bfa_ablk_s
*ablk
= (struct bfa_ablk_s
*)cbarg
;
3094 bfa_trc(ablk
->ioc
, event
);
3097 case BFA_IOC_E_ENABLED
:
3098 WARN_ON(ablk
->busy
!= BFA_FALSE
);
3101 case BFA_IOC_E_DISABLED
:
3102 case BFA_IOC_E_FAILED
:
3103 /* Fail any pending requests */
3107 ablk
->cbfn(ablk
->cbarg
, BFA_STATUS_FAILED
);
3109 ablk
->busy
= BFA_FALSE
;
3120 bfa_ablk_meminfo(void)
3122 return BFA_ROUNDUP(sizeof(struct bfa_ablk_cfg_s
), BFA_DMA_ALIGN_SZ
);
3126 bfa_ablk_memclaim(struct bfa_ablk_s
*ablk
, u8
*dma_kva
, u64 dma_pa
)
3128 ablk
->dma_addr
.kva
= dma_kva
;
3129 ablk
->dma_addr
.pa
= dma_pa
;
3133 bfa_ablk_attach(struct bfa_ablk_s
*ablk
, struct bfa_ioc_s
*ioc
)
3137 bfa_ioc_mbox_regisr(ablk
->ioc
, BFI_MC_ABLK
, bfa_ablk_isr
, ablk
);
3138 bfa_q_qe_init(&ablk
->ioc_notify
);
3139 bfa_ioc_notify_init(&ablk
->ioc_notify
, bfa_ablk_notify
, ablk
);
3140 list_add_tail(&ablk
->ioc_notify
.qe
, &ablk
->ioc
->notify_q
);
3144 bfa_ablk_query(struct bfa_ablk_s
*ablk
, struct bfa_ablk_cfg_s
*ablk_cfg
,
3145 bfa_ablk_cbfn_t cbfn
, void *cbarg
)
3147 struct bfi_ablk_h2i_query_s
*m
;
3151 if (!bfa_ioc_is_operational(ablk
->ioc
)) {
3152 bfa_trc(ablk
->ioc
, BFA_STATUS_IOC_FAILURE
);
3153 return BFA_STATUS_IOC_FAILURE
;
3157 bfa_trc(ablk
->ioc
, BFA_STATUS_DEVBUSY
);
3158 return BFA_STATUS_DEVBUSY
;
3161 ablk
->cfg
= ablk_cfg
;
3163 ablk
->cbarg
= cbarg
;
3164 ablk
->busy
= BFA_TRUE
;
3166 m
= (struct bfi_ablk_h2i_query_s
*)ablk
->mb
.msg
;
3167 bfi_h2i_set(m
->mh
, BFI_MC_ABLK
, BFI_ABLK_H2I_QUERY
,
3168 bfa_ioc_portid(ablk
->ioc
));
3169 bfa_dma_be_addr_set(m
->addr
, ablk
->dma_addr
.pa
);
3170 bfa_ioc_mbox_queue(ablk
->ioc
, &ablk
->mb
);
3172 return BFA_STATUS_OK
;
3176 bfa_ablk_pf_create(struct bfa_ablk_s
*ablk
, u16
*pcifn
,
3177 u8 port
, enum bfi_pcifn_class personality
,
3178 u16 bw_min
, u16 bw_max
,
3179 bfa_ablk_cbfn_t cbfn
, void *cbarg
)
3181 struct bfi_ablk_h2i_pf_req_s
*m
;
3183 if (!bfa_ioc_is_operational(ablk
->ioc
)) {
3184 bfa_trc(ablk
->ioc
, BFA_STATUS_IOC_FAILURE
);
3185 return BFA_STATUS_IOC_FAILURE
;
3189 bfa_trc(ablk
->ioc
, BFA_STATUS_DEVBUSY
);
3190 return BFA_STATUS_DEVBUSY
;
3193 ablk
->pcifn
= pcifn
;
3195 ablk
->cbarg
= cbarg
;
3196 ablk
->busy
= BFA_TRUE
;
3198 m
= (struct bfi_ablk_h2i_pf_req_s
*)ablk
->mb
.msg
;
3199 bfi_h2i_set(m
->mh
, BFI_MC_ABLK
, BFI_ABLK_H2I_PF_CREATE
,
3200 bfa_ioc_portid(ablk
->ioc
));
3201 m
->pers
= cpu_to_be16((u16
)personality
);
3202 m
->bw_min
= cpu_to_be16(bw_min
);
3203 m
->bw_max
= cpu_to_be16(bw_max
);
3205 bfa_ioc_mbox_queue(ablk
->ioc
, &ablk
->mb
);
3207 return BFA_STATUS_OK
;
3211 bfa_ablk_pf_delete(struct bfa_ablk_s
*ablk
, int pcifn
,
3212 bfa_ablk_cbfn_t cbfn
, void *cbarg
)
3214 struct bfi_ablk_h2i_pf_req_s
*m
;
3216 if (!bfa_ioc_is_operational(ablk
->ioc
)) {
3217 bfa_trc(ablk
->ioc
, BFA_STATUS_IOC_FAILURE
);
3218 return BFA_STATUS_IOC_FAILURE
;
3222 bfa_trc(ablk
->ioc
, BFA_STATUS_DEVBUSY
);
3223 return BFA_STATUS_DEVBUSY
;
3227 ablk
->cbarg
= cbarg
;
3228 ablk
->busy
= BFA_TRUE
;
3230 m
= (struct bfi_ablk_h2i_pf_req_s
*)ablk
->mb
.msg
;
3231 bfi_h2i_set(m
->mh
, BFI_MC_ABLK
, BFI_ABLK_H2I_PF_DELETE
,
3232 bfa_ioc_portid(ablk
->ioc
));
3233 m
->pcifn
= (u8
)pcifn
;
3234 bfa_ioc_mbox_queue(ablk
->ioc
, &ablk
->mb
);
3236 return BFA_STATUS_OK
;
3240 bfa_ablk_adapter_config(struct bfa_ablk_s
*ablk
, enum bfa_mode_s mode
,
3241 int max_pf
, int max_vf
, bfa_ablk_cbfn_t cbfn
, void *cbarg
)
3243 struct bfi_ablk_h2i_cfg_req_s
*m
;
3245 if (!bfa_ioc_is_operational(ablk
->ioc
)) {
3246 bfa_trc(ablk
->ioc
, BFA_STATUS_IOC_FAILURE
);
3247 return BFA_STATUS_IOC_FAILURE
;
3251 bfa_trc(ablk
->ioc
, BFA_STATUS_DEVBUSY
);
3252 return BFA_STATUS_DEVBUSY
;
3256 ablk
->cbarg
= cbarg
;
3257 ablk
->busy
= BFA_TRUE
;
3259 m
= (struct bfi_ablk_h2i_cfg_req_s
*)ablk
->mb
.msg
;
3260 bfi_h2i_set(m
->mh
, BFI_MC_ABLK
, BFI_ABLK_H2I_ADPT_CONFIG
,
3261 bfa_ioc_portid(ablk
->ioc
));
3263 m
->max_pf
= (u8
)max_pf
;
3264 m
->max_vf
= (u8
)max_vf
;
3265 bfa_ioc_mbox_queue(ablk
->ioc
, &ablk
->mb
);
3267 return BFA_STATUS_OK
;
3271 bfa_ablk_port_config(struct bfa_ablk_s
*ablk
, int port
, enum bfa_mode_s mode
,
3272 int max_pf
, int max_vf
, bfa_ablk_cbfn_t cbfn
, void *cbarg
)
3274 struct bfi_ablk_h2i_cfg_req_s
*m
;
3276 if (!bfa_ioc_is_operational(ablk
->ioc
)) {
3277 bfa_trc(ablk
->ioc
, BFA_STATUS_IOC_FAILURE
);
3278 return BFA_STATUS_IOC_FAILURE
;
3282 bfa_trc(ablk
->ioc
, BFA_STATUS_DEVBUSY
);
3283 return BFA_STATUS_DEVBUSY
;
3287 ablk
->cbarg
= cbarg
;
3288 ablk
->busy
= BFA_TRUE
;
3290 m
= (struct bfi_ablk_h2i_cfg_req_s
*)ablk
->mb
.msg
;
3291 bfi_h2i_set(m
->mh
, BFI_MC_ABLK
, BFI_ABLK_H2I_PORT_CONFIG
,
3292 bfa_ioc_portid(ablk
->ioc
));
3295 m
->max_pf
= (u8
)max_pf
;
3296 m
->max_vf
= (u8
)max_vf
;
3297 bfa_ioc_mbox_queue(ablk
->ioc
, &ablk
->mb
);
3299 return BFA_STATUS_OK
;
3303 bfa_ablk_pf_update(struct bfa_ablk_s
*ablk
, int pcifn
, u16 bw_min
,
3304 u16 bw_max
, bfa_ablk_cbfn_t cbfn
, void *cbarg
)
3306 struct bfi_ablk_h2i_pf_req_s
*m
;
3308 if (!bfa_ioc_is_operational(ablk
->ioc
)) {
3309 bfa_trc(ablk
->ioc
, BFA_STATUS_IOC_FAILURE
);
3310 return BFA_STATUS_IOC_FAILURE
;
3314 bfa_trc(ablk
->ioc
, BFA_STATUS_DEVBUSY
);
3315 return BFA_STATUS_DEVBUSY
;
3319 ablk
->cbarg
= cbarg
;
3320 ablk
->busy
= BFA_TRUE
;
3322 m
= (struct bfi_ablk_h2i_pf_req_s
*)ablk
->mb
.msg
;
3323 bfi_h2i_set(m
->mh
, BFI_MC_ABLK
, BFI_ABLK_H2I_PF_UPDATE
,
3324 bfa_ioc_portid(ablk
->ioc
));
3325 m
->pcifn
= (u8
)pcifn
;
3326 m
->bw_min
= cpu_to_be16(bw_min
);
3327 m
->bw_max
= cpu_to_be16(bw_max
);
3328 bfa_ioc_mbox_queue(ablk
->ioc
, &ablk
->mb
);
3330 return BFA_STATUS_OK
;
3334 bfa_ablk_optrom_en(struct bfa_ablk_s
*ablk
, bfa_ablk_cbfn_t cbfn
, void *cbarg
)
3336 struct bfi_ablk_h2i_optrom_s
*m
;
3338 if (!bfa_ioc_is_operational(ablk
->ioc
)) {
3339 bfa_trc(ablk
->ioc
, BFA_STATUS_IOC_FAILURE
);
3340 return BFA_STATUS_IOC_FAILURE
;
3344 bfa_trc(ablk
->ioc
, BFA_STATUS_DEVBUSY
);
3345 return BFA_STATUS_DEVBUSY
;
3349 ablk
->cbarg
= cbarg
;
3350 ablk
->busy
= BFA_TRUE
;
3352 m
= (struct bfi_ablk_h2i_optrom_s
*)ablk
->mb
.msg
;
3353 bfi_h2i_set(m
->mh
, BFI_MC_ABLK
, BFI_ABLK_H2I_OPTROM_ENABLE
,
3354 bfa_ioc_portid(ablk
->ioc
));
3355 bfa_ioc_mbox_queue(ablk
->ioc
, &ablk
->mb
);
3357 return BFA_STATUS_OK
;
3361 bfa_ablk_optrom_dis(struct bfa_ablk_s
*ablk
, bfa_ablk_cbfn_t cbfn
, void *cbarg
)
3363 struct bfi_ablk_h2i_optrom_s
*m
;
3365 if (!bfa_ioc_is_operational(ablk
->ioc
)) {
3366 bfa_trc(ablk
->ioc
, BFA_STATUS_IOC_FAILURE
);
3367 return BFA_STATUS_IOC_FAILURE
;
3371 bfa_trc(ablk
->ioc
, BFA_STATUS_DEVBUSY
);
3372 return BFA_STATUS_DEVBUSY
;
3376 ablk
->cbarg
= cbarg
;
3377 ablk
->busy
= BFA_TRUE
;
3379 m
= (struct bfi_ablk_h2i_optrom_s
*)ablk
->mb
.msg
;
3380 bfi_h2i_set(m
->mh
, BFI_MC_ABLK
, BFI_ABLK_H2I_OPTROM_DISABLE
,
3381 bfa_ioc_portid(ablk
->ioc
));
3382 bfa_ioc_mbox_queue(ablk
->ioc
, &ablk
->mb
);
3384 return BFA_STATUS_OK
;
3388 * SFP module specific
3391 /* forward declarations */
3392 static void bfa_sfp_getdata_send(struct bfa_sfp_s
*sfp
);
3393 static void bfa_sfp_media_get(struct bfa_sfp_s
*sfp
);
3394 static bfa_status_t
bfa_sfp_speed_valid(struct bfa_sfp_s
*sfp
,
3395 enum bfa_port_speed portspeed
);
3398 bfa_cb_sfp_show(struct bfa_sfp_s
*sfp
)
3400 bfa_trc(sfp
, sfp
->lock
);
3402 sfp
->cbfn(sfp
->cbarg
, sfp
->status
);
3408 bfa_cb_sfp_state_query(struct bfa_sfp_s
*sfp
)
3410 bfa_trc(sfp
, sfp
->portspeed
);
3412 bfa_sfp_media_get(sfp
);
3413 if (sfp
->state_query_cbfn
)
3414 sfp
->state_query_cbfn(sfp
->state_query_cbarg
,
3419 if (sfp
->portspeed
) {
3420 sfp
->status
= bfa_sfp_speed_valid(sfp
, sfp
->portspeed
);
3421 if (sfp
->state_query_cbfn
)
3422 sfp
->state_query_cbfn(sfp
->state_query_cbarg
,
3424 sfp
->portspeed
= BFA_PORT_SPEED_UNKNOWN
;
3427 sfp
->state_query_lock
= 0;
3428 sfp
->state_query_cbfn
= NULL
;
3432 * IOC event handler.
3435 bfa_sfp_notify(void *sfp_arg
, enum bfa_ioc_event_e event
)
3437 struct bfa_sfp_s
*sfp
= sfp_arg
;
3439 bfa_trc(sfp
, event
);
3440 bfa_trc(sfp
, sfp
->lock
);
3441 bfa_trc(sfp
, sfp
->state_query_lock
);
3444 case BFA_IOC_E_DISABLED
:
3445 case BFA_IOC_E_FAILED
:
3447 sfp
->status
= BFA_STATUS_IOC_FAILURE
;
3448 bfa_cb_sfp_show(sfp
);
3451 if (sfp
->state_query_lock
) {
3452 sfp
->status
= BFA_STATUS_IOC_FAILURE
;
3453 bfa_cb_sfp_state_query(sfp
);
3463 * SFP's State Change Notification post to AEN
3466 bfa_sfp_scn_aen_post(struct bfa_sfp_s
*sfp
, struct bfi_sfp_scn_s
*rsp
)
3468 struct bfad_s
*bfad
= (struct bfad_s
*)sfp
->ioc
->bfa
->bfad
;
3469 struct bfa_aen_entry_s
*aen_entry
;
3470 enum bfa_port_aen_event aen_evt
= 0;
3472 bfa_trc(sfp
, (((u64
)rsp
->pomlvl
) << 16) | (((u64
)rsp
->sfpid
) << 8) |
3475 bfad_get_aen_entry(bfad
, aen_entry
);
3479 aen_entry
->aen_data
.port
.ioc_type
= bfa_ioc_get_type(sfp
->ioc
);
3480 aen_entry
->aen_data
.port
.pwwn
= sfp
->ioc
->attr
->pwwn
;
3481 aen_entry
->aen_data
.port
.mac
= bfa_ioc_get_mac(sfp
->ioc
);
3483 switch (rsp
->event
) {
3484 case BFA_SFP_SCN_INSERTED
:
3485 aen_evt
= BFA_PORT_AEN_SFP_INSERT
;
3487 case BFA_SFP_SCN_REMOVED
:
3488 aen_evt
= BFA_PORT_AEN_SFP_REMOVE
;
3490 case BFA_SFP_SCN_FAILED
:
3491 aen_evt
= BFA_PORT_AEN_SFP_ACCESS_ERROR
;
3493 case BFA_SFP_SCN_UNSUPPORT
:
3494 aen_evt
= BFA_PORT_AEN_SFP_UNSUPPORT
;
3496 case BFA_SFP_SCN_POM
:
3497 aen_evt
= BFA_PORT_AEN_SFP_POM
;
3498 aen_entry
->aen_data
.port
.level
= rsp
->pomlvl
;
3501 bfa_trc(sfp
, rsp
->event
);
3505 /* Send the AEN notification */
3506 bfad_im_post_vendor_event(aen_entry
, bfad
, ++sfp
->ioc
->ioc_aen_seq
,
3507 BFA_AEN_CAT_PORT
, aen_evt
);
3514 bfa_sfp_getdata_send(struct bfa_sfp_s
*sfp
)
3516 struct bfi_sfp_req_s
*req
= (struct bfi_sfp_req_s
*)sfp
->mbcmd
.msg
;
3518 bfa_trc(sfp
, req
->memtype
);
3520 /* build host command */
3521 bfi_h2i_set(req
->mh
, BFI_MC_SFP
, BFI_SFP_H2I_SHOW
,
3522 bfa_ioc_portid(sfp
->ioc
));
3525 bfa_ioc_mbox_queue(sfp
->ioc
, &sfp
->mbcmd
);
3529 * SFP is valid, read sfp data
3532 bfa_sfp_getdata(struct bfa_sfp_s
*sfp
, enum bfi_sfp_mem_e memtype
)
3534 struct bfi_sfp_req_s
*req
= (struct bfi_sfp_req_s
*)sfp
->mbcmd
.msg
;
3536 WARN_ON(sfp
->lock
!= 0);
3537 bfa_trc(sfp
, sfp
->state
);
3540 sfp
->memtype
= memtype
;
3541 req
->memtype
= memtype
;
3544 bfa_alen_set(&req
->alen
, sizeof(struct sfp_mem_s
), sfp
->dbuf_pa
);
3546 bfa_sfp_getdata_send(sfp
);
3553 bfa_sfp_scn(struct bfa_sfp_s
*sfp
, struct bfi_mbmsg_s
*msg
)
3555 struct bfi_sfp_scn_s
*rsp
= (struct bfi_sfp_scn_s
*) msg
;
3557 switch (rsp
->event
) {
3558 case BFA_SFP_SCN_INSERTED
:
3559 sfp
->state
= BFA_SFP_STATE_INSERTED
;
3560 sfp
->data_valid
= 0;
3561 bfa_sfp_scn_aen_post(sfp
, rsp
);
3563 case BFA_SFP_SCN_REMOVED
:
3564 sfp
->state
= BFA_SFP_STATE_REMOVED
;
3565 sfp
->data_valid
= 0;
3566 bfa_sfp_scn_aen_post(sfp
, rsp
);
3568 case BFA_SFP_SCN_FAILED
:
3569 sfp
->state
= BFA_SFP_STATE_FAILED
;
3570 sfp
->data_valid
= 0;
3571 bfa_sfp_scn_aen_post(sfp
, rsp
);
3573 case BFA_SFP_SCN_UNSUPPORT
:
3574 sfp
->state
= BFA_SFP_STATE_UNSUPPORT
;
3575 bfa_sfp_scn_aen_post(sfp
, rsp
);
3577 bfa_sfp_getdata(sfp
, BFI_SFP_MEM_ALL
);
3579 case BFA_SFP_SCN_POM
:
3580 bfa_sfp_scn_aen_post(sfp
, rsp
);
3582 case BFA_SFP_SCN_VALID
:
3583 sfp
->state
= BFA_SFP_STATE_VALID
;
3585 bfa_sfp_getdata(sfp
, BFI_SFP_MEM_ALL
);
3588 bfa_trc(sfp
, rsp
->event
);
3597 bfa_sfp_show_comp(struct bfa_sfp_s
*sfp
, struct bfi_mbmsg_s
*msg
)
3599 struct bfi_sfp_rsp_s
*rsp
= (struct bfi_sfp_rsp_s
*) msg
;
3603 * receiving response after ioc failure
3605 bfa_trc(sfp
, sfp
->lock
);
3609 bfa_trc(sfp
, rsp
->status
);
3610 if (rsp
->status
== BFA_STATUS_OK
) {
3611 sfp
->data_valid
= 1;
3612 if (sfp
->state
== BFA_SFP_STATE_VALID
)
3613 sfp
->status
= BFA_STATUS_OK
;
3614 else if (sfp
->state
== BFA_SFP_STATE_UNSUPPORT
)
3615 sfp
->status
= BFA_STATUS_SFP_UNSUPP
;
3617 bfa_trc(sfp
, sfp
->state
);
3619 sfp
->data_valid
= 0;
3620 sfp
->status
= rsp
->status
;
3621 /* sfpshow shouldn't change sfp state */
3624 bfa_trc(sfp
, sfp
->memtype
);
3625 if (sfp
->memtype
== BFI_SFP_MEM_DIAGEXT
) {
3626 bfa_trc(sfp
, sfp
->data_valid
);
3627 if (sfp
->data_valid
) {
3628 u32 size
= sizeof(struct sfp_mem_s
);
3629 u8
*des
= (u8
*) &(sfp
->sfpmem
->srlid_base
);
3630 memcpy(des
, sfp
->dbuf_kva
, size
);
3633 * Queue completion callback.
3635 bfa_cb_sfp_show(sfp
);
3639 bfa_trc(sfp
, sfp
->state_query_lock
);
3640 if (sfp
->state_query_lock
) {
3641 sfp
->state
= rsp
->state
;
3642 /* Complete callback */
3643 bfa_cb_sfp_state_query(sfp
);
3648 * SFP query fw sfp state
3651 bfa_sfp_state_query(struct bfa_sfp_s
*sfp
)
3653 struct bfi_sfp_req_s
*req
= (struct bfi_sfp_req_s
*)sfp
->mbcmd
.msg
;
3655 /* Should not be doing query if not in _INIT state */
3656 WARN_ON(sfp
->state
!= BFA_SFP_STATE_INIT
);
3657 WARN_ON(sfp
->state_query_lock
!= 0);
3658 bfa_trc(sfp
, sfp
->state
);
3660 sfp
->state_query_lock
= 1;
3664 bfa_sfp_getdata(sfp
, BFI_SFP_MEM_ALL
);
3668 bfa_sfp_media_get(struct bfa_sfp_s
*sfp
)
3670 enum bfa_defs_sfp_media_e
*media
= sfp
->media
;
3672 *media
= BFA_SFP_MEDIA_UNKNOWN
;
3674 if (sfp
->state
== BFA_SFP_STATE_UNSUPPORT
)
3675 *media
= BFA_SFP_MEDIA_UNSUPPORT
;
3676 else if (sfp
->state
== BFA_SFP_STATE_VALID
) {
3677 union sfp_xcvr_e10g_code_u e10g
;
3678 struct sfp_mem_s
*sfpmem
= (struct sfp_mem_s
*)sfp
->dbuf_kva
;
3679 u16 xmtr_tech
= (sfpmem
->srlid_base
.xcvr
[4] & 0x3) << 7 |
3680 (sfpmem
->srlid_base
.xcvr
[5] >> 1);
3682 e10g
.b
= sfpmem
->srlid_base
.xcvr
[0];
3683 bfa_trc(sfp
, e10g
.b
);
3684 bfa_trc(sfp
, xmtr_tech
);
3685 /* check fc transmitter tech */
3686 if ((xmtr_tech
& SFP_XMTR_TECH_CU
) ||
3687 (xmtr_tech
& SFP_XMTR_TECH_CP
) ||
3688 (xmtr_tech
& SFP_XMTR_TECH_CA
))
3689 *media
= BFA_SFP_MEDIA_CU
;
3690 else if ((xmtr_tech
& SFP_XMTR_TECH_EL_INTRA
) ||
3691 (xmtr_tech
& SFP_XMTR_TECH_EL_INTER
))
3692 *media
= BFA_SFP_MEDIA_EL
;
3693 else if ((xmtr_tech
& SFP_XMTR_TECH_LL
) ||
3694 (xmtr_tech
& SFP_XMTR_TECH_LC
))
3695 *media
= BFA_SFP_MEDIA_LW
;
3696 else if ((xmtr_tech
& SFP_XMTR_TECH_SL
) ||
3697 (xmtr_tech
& SFP_XMTR_TECH_SN
) ||
3698 (xmtr_tech
& SFP_XMTR_TECH_SA
))
3699 *media
= BFA_SFP_MEDIA_SW
;
3700 /* Check 10G Ethernet Compilance code */
3701 else if (e10g
.r
.e10g_sr
)
3702 *media
= BFA_SFP_MEDIA_SW
;
3703 else if (e10g
.r
.e10g_lrm
&& e10g
.r
.e10g_lr
)
3704 *media
= BFA_SFP_MEDIA_LW
;
3705 else if (e10g
.r
.e10g_unall
)
3706 *media
= BFA_SFP_MEDIA_UNKNOWN
;
3710 bfa_trc(sfp
, sfp
->state
);
3714 bfa_sfp_speed_valid(struct bfa_sfp_s
*sfp
, enum bfa_port_speed portspeed
)
3716 struct sfp_mem_s
*sfpmem
= (struct sfp_mem_s
*)sfp
->dbuf_kva
;
3717 struct sfp_xcvr_s
*xcvr
= (struct sfp_xcvr_s
*) sfpmem
->srlid_base
.xcvr
;
3718 union sfp_xcvr_fc3_code_u fc3
= xcvr
->fc3
;
3719 union sfp_xcvr_e10g_code_u e10g
= xcvr
->e10g
;
3721 if (portspeed
== BFA_PORT_SPEED_10GBPS
) {
3722 if (e10g
.r
.e10g_sr
|| e10g
.r
.e10g_lr
)
3723 return BFA_STATUS_OK
;
3725 bfa_trc(sfp
, e10g
.b
);
3726 return BFA_STATUS_UNSUPP_SPEED
;
3729 if (((portspeed
& BFA_PORT_SPEED_16GBPS
) && fc3
.r
.mb1600
) ||
3730 ((portspeed
& BFA_PORT_SPEED_8GBPS
) && fc3
.r
.mb800
) ||
3731 ((portspeed
& BFA_PORT_SPEED_4GBPS
) && fc3
.r
.mb400
) ||
3732 ((portspeed
& BFA_PORT_SPEED_2GBPS
) && fc3
.r
.mb200
) ||
3733 ((portspeed
& BFA_PORT_SPEED_1GBPS
) && fc3
.r
.mb100
))
3734 return BFA_STATUS_OK
;
3736 bfa_trc(sfp
, portspeed
);
3737 bfa_trc(sfp
, fc3
.b
);
3738 bfa_trc(sfp
, e10g
.b
);
3739 return BFA_STATUS_UNSUPP_SPEED
;
3747 bfa_sfp_intr(void *sfparg
, struct bfi_mbmsg_s
*msg
)
3749 struct bfa_sfp_s
*sfp
= sfparg
;
3751 switch (msg
->mh
.msg_id
) {
3752 case BFI_SFP_I2H_SHOW
:
3753 bfa_sfp_show_comp(sfp
, msg
);
3756 case BFI_SFP_I2H_SCN
:
3757 bfa_sfp_scn(sfp
, msg
);
3761 bfa_trc(sfp
, msg
->mh
.msg_id
);
3767 * Return DMA memory needed by sfp module.
3770 bfa_sfp_meminfo(void)
3772 return BFA_ROUNDUP(sizeof(struct sfp_mem_s
), BFA_DMA_ALIGN_SZ
);
3776 * Attach virtual and physical memory for SFP.
3779 bfa_sfp_attach(struct bfa_sfp_s
*sfp
, struct bfa_ioc_s
*ioc
, void *dev
,
3780 struct bfa_trc_mod_s
*trcmod
)
3784 sfp
->trcmod
= trcmod
;
3790 sfp
->data_valid
= 0;
3791 sfp
->state
= BFA_SFP_STATE_INIT
;
3792 sfp
->state_query_lock
= 0;
3793 sfp
->state_query_cbfn
= NULL
;
3794 sfp
->state_query_cbarg
= NULL
;
3796 sfp
->portspeed
= BFA_PORT_SPEED_UNKNOWN
;
3797 sfp
->is_elb
= BFA_FALSE
;
3799 bfa_ioc_mbox_regisr(sfp
->ioc
, BFI_MC_SFP
, bfa_sfp_intr
, sfp
);
3800 bfa_q_qe_init(&sfp
->ioc_notify
);
3801 bfa_ioc_notify_init(&sfp
->ioc_notify
, bfa_sfp_notify
, sfp
);
3802 list_add_tail(&sfp
->ioc_notify
.qe
, &sfp
->ioc
->notify_q
);
3806 * Claim Memory for SFP
3809 bfa_sfp_memclaim(struct bfa_sfp_s
*sfp
, u8
*dm_kva
, u64 dm_pa
)
3811 sfp
->dbuf_kva
= dm_kva
;
3812 sfp
->dbuf_pa
= dm_pa
;
3813 memset(sfp
->dbuf_kva
, 0, sizeof(struct sfp_mem_s
));
3815 dm_kva
+= BFA_ROUNDUP(sizeof(struct sfp_mem_s
), BFA_DMA_ALIGN_SZ
);
3816 dm_pa
+= BFA_ROUNDUP(sizeof(struct sfp_mem_s
), BFA_DMA_ALIGN_SZ
);
3820 * Show SFP eeprom content
3822 * @param[in] sfp - bfa sfp module
3824 * @param[out] sfpmem - sfp eeprom data
3828 bfa_sfp_show(struct bfa_sfp_s
*sfp
, struct sfp_mem_s
*sfpmem
,
3829 bfa_cb_sfp_t cbfn
, void *cbarg
)
3832 if (!bfa_ioc_is_operational(sfp
->ioc
)) {
3834 return BFA_STATUS_IOC_NON_OP
;
3839 return BFA_STATUS_DEVBUSY
;
3844 sfp
->sfpmem
= sfpmem
;
3846 bfa_sfp_getdata(sfp
, BFI_SFP_MEM_DIAGEXT
);
3847 return BFA_STATUS_OK
;
3851 * Return SFP Media type
3853 * @param[in] sfp - bfa sfp module
3855 * @param[out] media - port speed from user
3859 bfa_sfp_media(struct bfa_sfp_s
*sfp
, enum bfa_defs_sfp_media_e
*media
,
3860 bfa_cb_sfp_t cbfn
, void *cbarg
)
3862 if (!bfa_ioc_is_operational(sfp
->ioc
)) {
3864 return BFA_STATUS_IOC_NON_OP
;
3868 if (sfp
->state
== BFA_SFP_STATE_INIT
) {
3869 if (sfp
->state_query_lock
) {
3871 return BFA_STATUS_DEVBUSY
;
3873 sfp
->state_query_cbfn
= cbfn
;
3874 sfp
->state_query_cbarg
= cbarg
;
3875 bfa_sfp_state_query(sfp
);
3876 return BFA_STATUS_SFP_NOT_READY
;
3880 bfa_sfp_media_get(sfp
);
3881 return BFA_STATUS_OK
;
3885 * Check if user set port speed is allowed by the SFP
3887 * @param[in] sfp - bfa sfp module
3888 * @param[in] portspeed - port speed from user
3892 bfa_sfp_speed(struct bfa_sfp_s
*sfp
, enum bfa_port_speed portspeed
,
3893 bfa_cb_sfp_t cbfn
, void *cbarg
)
3895 WARN_ON(portspeed
== BFA_PORT_SPEED_UNKNOWN
);
3897 if (!bfa_ioc_is_operational(sfp
->ioc
))
3898 return BFA_STATUS_IOC_NON_OP
;
3900 /* For Mezz card, all speed is allowed */
3901 if (bfa_mfg_is_mezz(sfp
->ioc
->attr
->card_type
))
3902 return BFA_STATUS_OK
;
3904 /* Check SFP state */
3905 sfp
->portspeed
= portspeed
;
3906 if (sfp
->state
== BFA_SFP_STATE_INIT
) {
3907 if (sfp
->state_query_lock
) {
3909 return BFA_STATUS_DEVBUSY
;
3911 sfp
->state_query_cbfn
= cbfn
;
3912 sfp
->state_query_cbarg
= cbarg
;
3913 bfa_sfp_state_query(sfp
);
3914 return BFA_STATUS_SFP_NOT_READY
;
3918 if (sfp
->state
== BFA_SFP_STATE_REMOVED
||
3919 sfp
->state
== BFA_SFP_STATE_FAILED
) {
3920 bfa_trc(sfp
, sfp
->state
);
3921 return BFA_STATUS_NO_SFP_DEV
;
3924 if (sfp
->state
== BFA_SFP_STATE_INSERTED
) {
3925 bfa_trc(sfp
, sfp
->state
);
3926 return BFA_STATUS_DEVBUSY
; /* sfp is reading data */
3929 /* For eloopback, all speed is allowed */
3931 return BFA_STATUS_OK
;
3933 return bfa_sfp_speed_valid(sfp
, portspeed
);
3937 * Flash module specific
3941 * FLASH DMA buffer should be big enough to hold both MFG block and
3942 * asic block(64k) at the same time and also should be 2k aligned to
3943 * avoid write segement to cross sector boundary.
3945 #define BFA_FLASH_SEG_SZ 2048
3946 #define BFA_FLASH_DMA_BUF_SZ \
3947 BFA_ROUNDUP(0x010000 + sizeof(struct bfa_mfg_block_s), BFA_FLASH_SEG_SZ)
3950 bfa_flash_aen_audit_post(struct bfa_ioc_s
*ioc
, enum bfa_audit_aen_event event
,
3953 struct bfad_s
*bfad
= (struct bfad_s
*)ioc
->bfa
->bfad
;
3954 struct bfa_aen_entry_s
*aen_entry
;
3956 bfad_get_aen_entry(bfad
, aen_entry
);
3960 aen_entry
->aen_data
.audit
.pwwn
= ioc
->attr
->pwwn
;
3961 aen_entry
->aen_data
.audit
.partition_inst
= inst
;
3962 aen_entry
->aen_data
.audit
.partition_type
= type
;
3964 /* Send the AEN notification */
3965 bfad_im_post_vendor_event(aen_entry
, bfad
, ++ioc
->ioc_aen_seq
,
3966 BFA_AEN_CAT_AUDIT
, event
);
3970 bfa_flash_cb(struct bfa_flash_s
*flash
)
3974 flash
->cbfn(flash
->cbarg
, flash
->status
);
3978 bfa_flash_notify(void *cbarg
, enum bfa_ioc_event_e event
)
3980 struct bfa_flash_s
*flash
= cbarg
;
3982 bfa_trc(flash
, event
);
3984 case BFA_IOC_E_DISABLED
:
3985 case BFA_IOC_E_FAILED
:
3986 if (flash
->op_busy
) {
3987 flash
->status
= BFA_STATUS_IOC_FAILURE
;
3988 flash
->cbfn(flash
->cbarg
, flash
->status
);
3999 * Send flash attribute query request.
4001 * @param[in] cbarg - callback argument
4004 bfa_flash_query_send(void *cbarg
)
4006 struct bfa_flash_s
*flash
= cbarg
;
4007 struct bfi_flash_query_req_s
*msg
=
4008 (struct bfi_flash_query_req_s
*) flash
->mb
.msg
;
4010 bfi_h2i_set(msg
->mh
, BFI_MC_FLASH
, BFI_FLASH_H2I_QUERY_REQ
,
4011 bfa_ioc_portid(flash
->ioc
));
4012 bfa_alen_set(&msg
->alen
, sizeof(struct bfa_flash_attr_s
),
4014 bfa_ioc_mbox_queue(flash
->ioc
, &flash
->mb
);
4018 * Send flash write request.
4020 * @param[in] cbarg - callback argument
4023 bfa_flash_write_send(struct bfa_flash_s
*flash
)
4025 struct bfi_flash_write_req_s
*msg
=
4026 (struct bfi_flash_write_req_s
*) flash
->mb
.msg
;
4029 msg
->type
= be32_to_cpu(flash
->type
);
4030 msg
->instance
= flash
->instance
;
4031 msg
->offset
= be32_to_cpu(flash
->addr_off
+ flash
->offset
);
4032 len
= (flash
->residue
< BFA_FLASH_DMA_BUF_SZ
) ?
4033 flash
->residue
: BFA_FLASH_DMA_BUF_SZ
;
4034 msg
->length
= be32_to_cpu(len
);
4036 /* indicate if it's the last msg of the whole write operation */
4037 msg
->last
= (len
== flash
->residue
) ? 1 : 0;
4039 bfi_h2i_set(msg
->mh
, BFI_MC_FLASH
, BFI_FLASH_H2I_WRITE_REQ
,
4040 bfa_ioc_portid(flash
->ioc
));
4041 bfa_alen_set(&msg
->alen
, len
, flash
->dbuf_pa
);
4042 memcpy(flash
->dbuf_kva
, flash
->ubuf
+ flash
->offset
, len
);
4043 bfa_ioc_mbox_queue(flash
->ioc
, &flash
->mb
);
4045 flash
->residue
-= len
;
4046 flash
->offset
+= len
;
4050 * Send flash read request.
4052 * @param[in] cbarg - callback argument
4055 bfa_flash_read_send(void *cbarg
)
4057 struct bfa_flash_s
*flash
= cbarg
;
4058 struct bfi_flash_read_req_s
*msg
=
4059 (struct bfi_flash_read_req_s
*) flash
->mb
.msg
;
4062 msg
->type
= be32_to_cpu(flash
->type
);
4063 msg
->instance
= flash
->instance
;
4064 msg
->offset
= be32_to_cpu(flash
->addr_off
+ flash
->offset
);
4065 len
= (flash
->residue
< BFA_FLASH_DMA_BUF_SZ
) ?
4066 flash
->residue
: BFA_FLASH_DMA_BUF_SZ
;
4067 msg
->length
= be32_to_cpu(len
);
4068 bfi_h2i_set(msg
->mh
, BFI_MC_FLASH
, BFI_FLASH_H2I_READ_REQ
,
4069 bfa_ioc_portid(flash
->ioc
));
4070 bfa_alen_set(&msg
->alen
, len
, flash
->dbuf_pa
);
4071 bfa_ioc_mbox_queue(flash
->ioc
, &flash
->mb
);
4075 * Send flash erase request.
4077 * @param[in] cbarg - callback argument
4080 bfa_flash_erase_send(void *cbarg
)
4082 struct bfa_flash_s
*flash
= cbarg
;
4083 struct bfi_flash_erase_req_s
*msg
=
4084 (struct bfi_flash_erase_req_s
*) flash
->mb
.msg
;
4086 msg
->type
= be32_to_cpu(flash
->type
);
4087 msg
->instance
= flash
->instance
;
4088 bfi_h2i_set(msg
->mh
, BFI_MC_FLASH
, BFI_FLASH_H2I_ERASE_REQ
,
4089 bfa_ioc_portid(flash
->ioc
));
4090 bfa_ioc_mbox_queue(flash
->ioc
, &flash
->mb
);
4094 * Process flash response messages upon receiving interrupts.
4096 * @param[in] flasharg - flash structure
4097 * @param[in] msg - message structure
4100 bfa_flash_intr(void *flasharg
, struct bfi_mbmsg_s
*msg
)
4102 struct bfa_flash_s
*flash
= flasharg
;
4106 struct bfi_flash_query_rsp_s
*query
;
4107 struct bfi_flash_erase_rsp_s
*erase
;
4108 struct bfi_flash_write_rsp_s
*write
;
4109 struct bfi_flash_read_rsp_s
*read
;
4110 struct bfi_flash_event_s
*event
;
4111 struct bfi_mbmsg_s
*msg
;
4115 bfa_trc(flash
, msg
->mh
.msg_id
);
4117 if (!flash
->op_busy
&& msg
->mh
.msg_id
!= BFI_FLASH_I2H_EVENT
) {
4118 /* receiving response after ioc failure */
4119 bfa_trc(flash
, 0x9999);
4123 switch (msg
->mh
.msg_id
) {
4124 case BFI_FLASH_I2H_QUERY_RSP
:
4125 status
= be32_to_cpu(m
.query
->status
);
4126 bfa_trc(flash
, status
);
4127 if (status
== BFA_STATUS_OK
) {
4129 struct bfa_flash_attr_s
*attr
, *f
;
4131 attr
= (struct bfa_flash_attr_s
*) flash
->ubuf
;
4132 f
= (struct bfa_flash_attr_s
*) flash
->dbuf_kva
;
4133 attr
->status
= be32_to_cpu(f
->status
);
4134 attr
->npart
= be32_to_cpu(f
->npart
);
4135 bfa_trc(flash
, attr
->status
);
4136 bfa_trc(flash
, attr
->npart
);
4137 for (i
= 0; i
< attr
->npart
; i
++) {
4138 attr
->part
[i
].part_type
=
4139 be32_to_cpu(f
->part
[i
].part_type
);
4140 attr
->part
[i
].part_instance
=
4141 be32_to_cpu(f
->part
[i
].part_instance
);
4142 attr
->part
[i
].part_off
=
4143 be32_to_cpu(f
->part
[i
].part_off
);
4144 attr
->part
[i
].part_size
=
4145 be32_to_cpu(f
->part
[i
].part_size
);
4146 attr
->part
[i
].part_len
=
4147 be32_to_cpu(f
->part
[i
].part_len
);
4148 attr
->part
[i
].part_status
=
4149 be32_to_cpu(f
->part
[i
].part_status
);
4152 flash
->status
= status
;
4153 bfa_flash_cb(flash
);
4155 case BFI_FLASH_I2H_ERASE_RSP
:
4156 status
= be32_to_cpu(m
.erase
->status
);
4157 bfa_trc(flash
, status
);
4158 flash
->status
= status
;
4159 bfa_flash_cb(flash
);
4161 case BFI_FLASH_I2H_WRITE_RSP
:
4162 status
= be32_to_cpu(m
.write
->status
);
4163 bfa_trc(flash
, status
);
4164 if (status
!= BFA_STATUS_OK
|| flash
->residue
== 0) {
4165 flash
->status
= status
;
4166 bfa_flash_cb(flash
);
4168 bfa_trc(flash
, flash
->offset
);
4169 bfa_flash_write_send(flash
);
4172 case BFI_FLASH_I2H_READ_RSP
:
4173 status
= be32_to_cpu(m
.read
->status
);
4174 bfa_trc(flash
, status
);
4175 if (status
!= BFA_STATUS_OK
) {
4176 flash
->status
= status
;
4177 bfa_flash_cb(flash
);
4179 u32 len
= be32_to_cpu(m
.read
->length
);
4180 bfa_trc(flash
, flash
->offset
);
4181 bfa_trc(flash
, len
);
4182 memcpy(flash
->ubuf
+ flash
->offset
,
4183 flash
->dbuf_kva
, len
);
4184 flash
->residue
-= len
;
4185 flash
->offset
+= len
;
4186 if (flash
->residue
== 0) {
4187 flash
->status
= status
;
4188 bfa_flash_cb(flash
);
4190 bfa_flash_read_send(flash
);
4193 case BFI_FLASH_I2H_BOOT_VER_RSP
:
4195 case BFI_FLASH_I2H_EVENT
:
4196 status
= be32_to_cpu(m
.event
->status
);
4197 bfa_trc(flash
, status
);
4198 if (status
== BFA_STATUS_BAD_FWCFG
)
4199 bfa_ioc_aen_post(flash
->ioc
, BFA_IOC_AEN_FWCFG_ERROR
);
4200 else if (status
== BFA_STATUS_INVALID_VENDOR
) {
4202 param
= be32_to_cpu(m
.event
->param
);
4203 bfa_trc(flash
, param
);
4204 bfa_ioc_aen_post(flash
->ioc
,
4205 BFA_IOC_AEN_INVALID_VENDOR
);
4215 * Flash memory info API.
4217 * @param[in] mincfg - minimal cfg variable
4220 bfa_flash_meminfo(bfa_boolean_t mincfg
)
4222 /* min driver doesn't need flash */
4225 return BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ
, BFA_DMA_ALIGN_SZ
);
4231 * @param[in] flash - flash structure
4232 * @param[in] ioc - ioc structure
4233 * @param[in] dev - device structure
4234 * @param[in] trcmod - trace module
4235 * @param[in] logmod - log module
4238 bfa_flash_attach(struct bfa_flash_s
*flash
, struct bfa_ioc_s
*ioc
, void *dev
,
4239 struct bfa_trc_mod_s
*trcmod
, bfa_boolean_t mincfg
)
4242 flash
->trcmod
= trcmod
;
4244 flash
->cbarg
= NULL
;
4247 bfa_ioc_mbox_regisr(flash
->ioc
, BFI_MC_FLASH
, bfa_flash_intr
, flash
);
4248 bfa_q_qe_init(&flash
->ioc_notify
);
4249 bfa_ioc_notify_init(&flash
->ioc_notify
, bfa_flash_notify
, flash
);
4250 list_add_tail(&flash
->ioc_notify
.qe
, &flash
->ioc
->notify_q
);
4252 /* min driver doesn't need flash */
4254 flash
->dbuf_kva
= NULL
;
4260 * Claim memory for flash
4262 * @param[in] flash - flash structure
4263 * @param[in] dm_kva - pointer to virtual memory address
4264 * @param[in] dm_pa - physical memory address
4265 * @param[in] mincfg - minimal cfg variable
4268 bfa_flash_memclaim(struct bfa_flash_s
*flash
, u8
*dm_kva
, u64 dm_pa
,
4269 bfa_boolean_t mincfg
)
4274 flash
->dbuf_kva
= dm_kva
;
4275 flash
->dbuf_pa
= dm_pa
;
4276 memset(flash
->dbuf_kva
, 0, BFA_FLASH_DMA_BUF_SZ
);
4277 dm_kva
+= BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ
, BFA_DMA_ALIGN_SZ
);
4278 dm_pa
+= BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ
, BFA_DMA_ALIGN_SZ
);
4282 * Get flash attribute.
4284 * @param[in] flash - flash structure
4285 * @param[in] attr - flash attribute structure
4286 * @param[in] cbfn - callback function
4287 * @param[in] cbarg - callback argument
4292 bfa_flash_get_attr(struct bfa_flash_s
*flash
, struct bfa_flash_attr_s
*attr
,
4293 bfa_cb_flash_t cbfn
, void *cbarg
)
4295 bfa_trc(flash
, BFI_FLASH_H2I_QUERY_REQ
);
4297 if (!bfa_ioc_is_operational(flash
->ioc
))
4298 return BFA_STATUS_IOC_NON_OP
;
4300 if (flash
->op_busy
) {
4301 bfa_trc(flash
, flash
->op_busy
);
4302 return BFA_STATUS_DEVBUSY
;
4307 flash
->cbarg
= cbarg
;
4308 flash
->ubuf
= (u8
*) attr
;
4309 bfa_flash_query_send(flash
);
4311 return BFA_STATUS_OK
;
4315 * Erase flash partition.
4317 * @param[in] flash - flash structure
4318 * @param[in] type - flash partition type
4319 * @param[in] instance - flash partition instance
4320 * @param[in] cbfn - callback function
4321 * @param[in] cbarg - callback argument
4326 bfa_flash_erase_part(struct bfa_flash_s
*flash
, enum bfa_flash_part_type type
,
4327 u8 instance
, bfa_cb_flash_t cbfn
, void *cbarg
)
4329 bfa_trc(flash
, BFI_FLASH_H2I_ERASE_REQ
);
4330 bfa_trc(flash
, type
);
4331 bfa_trc(flash
, instance
);
4333 if (!bfa_ioc_is_operational(flash
->ioc
))
4334 return BFA_STATUS_IOC_NON_OP
;
4336 if (flash
->op_busy
) {
4337 bfa_trc(flash
, flash
->op_busy
);
4338 return BFA_STATUS_DEVBUSY
;
4343 flash
->cbarg
= cbarg
;
4345 flash
->instance
= instance
;
4347 bfa_flash_erase_send(flash
);
4348 bfa_flash_aen_audit_post(flash
->ioc
, BFA_AUDIT_AEN_FLASH_ERASE
,
4350 return BFA_STATUS_OK
;
4354 * Update flash partition.
4356 * @param[in] flash - flash structure
4357 * @param[in] type - flash partition type
4358 * @param[in] instance - flash partition instance
4359 * @param[in] buf - update data buffer
4360 * @param[in] len - data buffer length
4361 * @param[in] offset - offset relative to the partition starting address
4362 * @param[in] cbfn - callback function
4363 * @param[in] cbarg - callback argument
4368 bfa_flash_update_part(struct bfa_flash_s
*flash
, enum bfa_flash_part_type type
,
4369 u8 instance
, void *buf
, u32 len
, u32 offset
,
4370 bfa_cb_flash_t cbfn
, void *cbarg
)
4372 bfa_trc(flash
, BFI_FLASH_H2I_WRITE_REQ
);
4373 bfa_trc(flash
, type
);
4374 bfa_trc(flash
, instance
);
4375 bfa_trc(flash
, len
);
4376 bfa_trc(flash
, offset
);
4378 if (!bfa_ioc_is_operational(flash
->ioc
))
4379 return BFA_STATUS_IOC_NON_OP
;
4382 * 'len' must be in word (4-byte) boundary
4383 * 'offset' must be in sector (16kb) boundary
4385 if (!len
|| (len
& 0x03) || (offset
& 0x00003FFF))
4386 return BFA_STATUS_FLASH_BAD_LEN
;
4388 if (type
== BFA_FLASH_PART_MFG
)
4389 return BFA_STATUS_EINVAL
;
4391 if (flash
->op_busy
) {
4392 bfa_trc(flash
, flash
->op_busy
);
4393 return BFA_STATUS_DEVBUSY
;
4398 flash
->cbarg
= cbarg
;
4400 flash
->instance
= instance
;
4401 flash
->residue
= len
;
4403 flash
->addr_off
= offset
;
4406 bfa_flash_write_send(flash
);
4407 return BFA_STATUS_OK
;
4411 * Read flash partition.
4413 * @param[in] flash - flash structure
4414 * @param[in] type - flash partition type
4415 * @param[in] instance - flash partition instance
4416 * @param[in] buf - read data buffer
4417 * @param[in] len - data buffer length
4418 * @param[in] offset - offset relative to the partition starting address
4419 * @param[in] cbfn - callback function
4420 * @param[in] cbarg - callback argument
4425 bfa_flash_read_part(struct bfa_flash_s
*flash
, enum bfa_flash_part_type type
,
4426 u8 instance
, void *buf
, u32 len
, u32 offset
,
4427 bfa_cb_flash_t cbfn
, void *cbarg
)
4429 bfa_trc(flash
, BFI_FLASH_H2I_READ_REQ
);
4430 bfa_trc(flash
, type
);
4431 bfa_trc(flash
, instance
);
4432 bfa_trc(flash
, len
);
4433 bfa_trc(flash
, offset
);
4435 if (!bfa_ioc_is_operational(flash
->ioc
))
4436 return BFA_STATUS_IOC_NON_OP
;
4439 * 'len' must be in word (4-byte) boundary
4440 * 'offset' must be in sector (16kb) boundary
4442 if (!len
|| (len
& 0x03) || (offset
& 0x00003FFF))
4443 return BFA_STATUS_FLASH_BAD_LEN
;
4445 if (flash
->op_busy
) {
4446 bfa_trc(flash
, flash
->op_busy
);
4447 return BFA_STATUS_DEVBUSY
;
4452 flash
->cbarg
= cbarg
;
4454 flash
->instance
= instance
;
4455 flash
->residue
= len
;
4457 flash
->addr_off
= offset
;
4459 bfa_flash_read_send(flash
);
4461 return BFA_STATUS_OK
;
4465 * DIAG module specific
4468 #define BFA_DIAG_MEMTEST_TOV 50000 /* memtest timeout in msec */
4469 #define CT2_BFA_DIAG_MEMTEST_TOV (9*30*1000) /* 4.5 min */
4471 /* IOC event handler */
4473 bfa_diag_notify(void *diag_arg
, enum bfa_ioc_event_e event
)
4475 struct bfa_diag_s
*diag
= diag_arg
;
4477 bfa_trc(diag
, event
);
4478 bfa_trc(diag
, diag
->block
);
4479 bfa_trc(diag
, diag
->fwping
.lock
);
4480 bfa_trc(diag
, diag
->tsensor
.lock
);
4483 case BFA_IOC_E_DISABLED
:
4484 case BFA_IOC_E_FAILED
:
4485 if (diag
->fwping
.lock
) {
4486 diag
->fwping
.status
= BFA_STATUS_IOC_FAILURE
;
4487 diag
->fwping
.cbfn(diag
->fwping
.cbarg
,
4488 diag
->fwping
.status
);
4489 diag
->fwping
.lock
= 0;
4492 if (diag
->tsensor
.lock
) {
4493 diag
->tsensor
.status
= BFA_STATUS_IOC_FAILURE
;
4494 diag
->tsensor
.cbfn(diag
->tsensor
.cbarg
,
4495 diag
->tsensor
.status
);
4496 diag
->tsensor
.lock
= 0;
4500 if (diag
->timer_active
) {
4501 bfa_timer_stop(&diag
->timer
);
4502 diag
->timer_active
= 0;
4505 diag
->status
= BFA_STATUS_IOC_FAILURE
;
4506 diag
->cbfn(diag
->cbarg
, diag
->status
);
4517 bfa_diag_memtest_done(void *cbarg
)
4519 struct bfa_diag_s
*diag
= cbarg
;
4520 struct bfa_ioc_s
*ioc
= diag
->ioc
;
4521 struct bfa_diag_memtest_result
*res
= diag
->result
;
4522 u32 loff
= BFI_BOOT_MEMTEST_RES_ADDR
;
4523 u32 pgnum
, pgoff
, i
;
4525 pgnum
= PSS_SMEM_PGNUM(ioc
->ioc_regs
.smem_pg0
, loff
);
4526 pgoff
= PSS_SMEM_PGOFF(loff
);
4528 writel(pgnum
, ioc
->ioc_regs
.host_page_num_fn
);
4530 for (i
= 0; i
< (sizeof(struct bfa_diag_memtest_result
) /
4531 sizeof(u32
)); i
++) {
4532 /* read test result from smem */
4533 *((u32
*) res
+ i
) =
4534 bfa_mem_read(ioc
->ioc_regs
.smem_page_start
, loff
);
4535 loff
+= sizeof(u32
);
4538 /* Reset IOC fwstates to BFI_IOC_UNINIT */
4539 bfa_ioc_reset_fwstate(ioc
);
4541 res
->status
= swab32(res
->status
);
4542 bfa_trc(diag
, res
->status
);
4544 if (res
->status
== BFI_BOOT_MEMTEST_RES_SIG
)
4545 diag
->status
= BFA_STATUS_OK
;
4547 diag
->status
= BFA_STATUS_MEMTEST_FAILED
;
4548 res
->addr
= swab32(res
->addr
);
4549 res
->exp
= swab32(res
->exp
);
4550 res
->act
= swab32(res
->act
);
4551 res
->err_status
= swab32(res
->err_status
);
4552 res
->err_status1
= swab32(res
->err_status1
);
4553 res
->err_addr
= swab32(res
->err_addr
);
4554 bfa_trc(diag
, res
->addr
);
4555 bfa_trc(diag
, res
->exp
);
4556 bfa_trc(diag
, res
->act
);
4557 bfa_trc(diag
, res
->err_status
);
4558 bfa_trc(diag
, res
->err_status1
);
4559 bfa_trc(diag
, res
->err_addr
);
4561 diag
->timer_active
= 0;
4562 diag
->cbfn(diag
->cbarg
, diag
->status
);
4571 * Perform DMA test directly
4574 diag_fwping_send(struct bfa_diag_s
*diag
)
4576 struct bfi_diag_fwping_req_s
*fwping_req
;
4579 bfa_trc(diag
, diag
->fwping
.dbuf_pa
);
4581 /* fill DMA area with pattern */
4582 for (i
= 0; i
< (BFI_DIAG_DMA_BUF_SZ
>> 2); i
++)
4583 *((u32
*)diag
->fwping
.dbuf_kva
+ i
) = diag
->fwping
.data
;
4586 fwping_req
= (struct bfi_diag_fwping_req_s
*)diag
->fwping
.mbcmd
.msg
;
4589 bfa_alen_set(&fwping_req
->alen
, BFI_DIAG_DMA_BUF_SZ
,
4590 diag
->fwping
.dbuf_pa
);
4591 /* Set up dma count */
4592 fwping_req
->count
= cpu_to_be32(diag
->fwping
.count
);
4593 /* Set up data pattern */
4594 fwping_req
->data
= diag
->fwping
.data
;
4596 /* build host command */
4597 bfi_h2i_set(fwping_req
->mh
, BFI_MC_DIAG
, BFI_DIAG_H2I_FWPING
,
4598 bfa_ioc_portid(diag
->ioc
));
4601 bfa_ioc_mbox_queue(diag
->ioc
, &diag
->fwping
.mbcmd
);
4605 diag_fwping_comp(struct bfa_diag_s
*diag
,
4606 struct bfi_diag_fwping_rsp_s
*diag_rsp
)
4608 u32 rsp_data
= diag_rsp
->data
;
4609 u8 rsp_dma_status
= diag_rsp
->dma_status
;
4611 bfa_trc(diag
, rsp_data
);
4612 bfa_trc(diag
, rsp_dma_status
);
4614 if (rsp_dma_status
== BFA_STATUS_OK
) {
4616 pat
= (diag
->fwping
.count
& 0x1) ? ~(diag
->fwping
.data
) :
4618 /* Check mbox data */
4619 if (diag
->fwping
.data
!= rsp_data
) {
4620 bfa_trc(diag
, rsp_data
);
4621 diag
->fwping
.result
->dmastatus
=
4622 BFA_STATUS_DATACORRUPTED
;
4623 diag
->fwping
.status
= BFA_STATUS_DATACORRUPTED
;
4624 diag
->fwping
.cbfn(diag
->fwping
.cbarg
,
4625 diag
->fwping
.status
);
4626 diag
->fwping
.lock
= 0;
4629 /* Check dma pattern */
4630 for (i
= 0; i
< (BFI_DIAG_DMA_BUF_SZ
>> 2); i
++) {
4631 if (*((u32
*)diag
->fwping
.dbuf_kva
+ i
) != pat
) {
4635 *((u32
*)diag
->fwping
.dbuf_kva
+ i
));
4636 diag
->fwping
.result
->dmastatus
=
4637 BFA_STATUS_DATACORRUPTED
;
4638 diag
->fwping
.status
= BFA_STATUS_DATACORRUPTED
;
4639 diag
->fwping
.cbfn(diag
->fwping
.cbarg
,
4640 diag
->fwping
.status
);
4641 diag
->fwping
.lock
= 0;
4645 diag
->fwping
.result
->dmastatus
= BFA_STATUS_OK
;
4646 diag
->fwping
.status
= BFA_STATUS_OK
;
4647 diag
->fwping
.cbfn(diag
->fwping
.cbarg
, diag
->fwping
.status
);
4648 diag
->fwping
.lock
= 0;
4650 diag
->fwping
.status
= BFA_STATUS_HDMA_FAILED
;
4651 diag
->fwping
.cbfn(diag
->fwping
.cbarg
, diag
->fwping
.status
);
4652 diag
->fwping
.lock
= 0;
4657 * Temperature Sensor
4661 diag_tempsensor_send(struct bfa_diag_s
*diag
)
4663 struct bfi_diag_ts_req_s
*msg
;
4665 msg
= (struct bfi_diag_ts_req_s
*)diag
->tsensor
.mbcmd
.msg
;
4666 bfa_trc(diag
, msg
->temp
);
4667 /* build host command */
4668 bfi_h2i_set(msg
->mh
, BFI_MC_DIAG
, BFI_DIAG_H2I_TEMPSENSOR
,
4669 bfa_ioc_portid(diag
->ioc
));
4671 bfa_ioc_mbox_queue(diag
->ioc
, &diag
->tsensor
.mbcmd
);
4675 diag_tempsensor_comp(struct bfa_diag_s
*diag
, bfi_diag_ts_rsp_t
*rsp
)
4677 if (!diag
->tsensor
.lock
) {
4678 /* receiving response after ioc failure */
4679 bfa_trc(diag
, diag
->tsensor
.lock
);
4684 * ASIC junction tempsensor is a reg read operation
4685 * it will always return OK
4687 diag
->tsensor
.temp
->temp
= be16_to_cpu(rsp
->temp
);
4688 diag
->tsensor
.temp
->ts_junc
= rsp
->ts_junc
;
4689 diag
->tsensor
.temp
->ts_brd
= rsp
->ts_brd
;
4692 /* tsensor.temp->status is brd_temp status */
4693 diag
->tsensor
.temp
->status
= rsp
->status
;
4694 if (rsp
->status
== BFA_STATUS_OK
) {
4695 diag
->tsensor
.temp
->brd_temp
=
4696 be16_to_cpu(rsp
->brd_temp
);
4698 diag
->tsensor
.temp
->brd_temp
= 0;
4701 bfa_trc(diag
, rsp
->status
);
4702 bfa_trc(diag
, rsp
->ts_junc
);
4703 bfa_trc(diag
, rsp
->temp
);
4704 bfa_trc(diag
, rsp
->ts_brd
);
4705 bfa_trc(diag
, rsp
->brd_temp
);
4707 /* tsensor status is always good bcos we always have junction temp */
4708 diag
->tsensor
.status
= BFA_STATUS_OK
;
4709 diag
->tsensor
.cbfn(diag
->tsensor
.cbarg
, diag
->tsensor
.status
);
4710 diag
->tsensor
.lock
= 0;
4717 diag_ledtest_send(struct bfa_diag_s
*diag
, struct bfa_diag_ledtest_s
*ledtest
)
4719 struct bfi_diag_ledtest_req_s
*msg
;
4721 msg
= (struct bfi_diag_ledtest_req_s
*)diag
->ledtest
.mbcmd
.msg
;
4722 /* build host command */
4723 bfi_h2i_set(msg
->mh
, BFI_MC_DIAG
, BFI_DIAG_H2I_LEDTEST
,
4724 bfa_ioc_portid(diag
->ioc
));
4727 * convert the freq from N blinks per 10 sec to
4728 * crossbow ontime value. We do it here because division is need
4731 ledtest
->freq
= 500 / ledtest
->freq
;
4733 if (ledtest
->freq
== 0)
4736 bfa_trc(diag
, ledtest
->freq
);
4737 /* mcpy(&ledtest_req->req, ledtest, sizeof(bfa_diag_ledtest_t)); */
4738 msg
->cmd
= (u8
) ledtest
->cmd
;
4739 msg
->color
= (u8
) ledtest
->color
;
4740 msg
->portid
= bfa_ioc_portid(diag
->ioc
);
4741 msg
->led
= ledtest
->led
;
4742 msg
->freq
= cpu_to_be16(ledtest
->freq
);
4745 bfa_ioc_mbox_queue(diag
->ioc
, &diag
->ledtest
.mbcmd
);
4749 diag_ledtest_comp(struct bfa_diag_s
*diag
, struct bfi_diag_ledtest_rsp_s
*msg
)
4751 bfa_trc(diag
, diag
->ledtest
.lock
);
4752 diag
->ledtest
.lock
= BFA_FALSE
;
4753 /* no bfa_cb_queue is needed because driver is not waiting */
4760 diag_portbeacon_send(struct bfa_diag_s
*diag
, bfa_boolean_t beacon
, u32 sec
)
4762 struct bfi_diag_portbeacon_req_s
*msg
;
4764 msg
= (struct bfi_diag_portbeacon_req_s
*)diag
->beacon
.mbcmd
.msg
;
4765 /* build host command */
4766 bfi_h2i_set(msg
->mh
, BFI_MC_DIAG
, BFI_DIAG_H2I_PORTBEACON
,
4767 bfa_ioc_portid(diag
->ioc
));
4768 msg
->beacon
= beacon
;
4769 msg
->period
= cpu_to_be32(sec
);
4771 bfa_ioc_mbox_queue(diag
->ioc
, &diag
->beacon
.mbcmd
);
4775 diag_portbeacon_comp(struct bfa_diag_s
*diag
)
4777 bfa_trc(diag
, diag
->beacon
.state
);
4778 diag
->beacon
.state
= BFA_FALSE
;
4779 if (diag
->cbfn_beacon
)
4780 diag
->cbfn_beacon(diag
->dev
, BFA_FALSE
, diag
->beacon
.link_e2e
);
4784 * Diag hmbox handler
4787 bfa_diag_intr(void *diagarg
, struct bfi_mbmsg_s
*msg
)
4789 struct bfa_diag_s
*diag
= diagarg
;
4791 switch (msg
->mh
.msg_id
) {
4792 case BFI_DIAG_I2H_PORTBEACON
:
4793 diag_portbeacon_comp(diag
);
4795 case BFI_DIAG_I2H_FWPING
:
4796 diag_fwping_comp(diag
, (struct bfi_diag_fwping_rsp_s
*) msg
);
4798 case BFI_DIAG_I2H_TEMPSENSOR
:
4799 diag_tempsensor_comp(diag
, (bfi_diag_ts_rsp_t
*) msg
);
4801 case BFI_DIAG_I2H_LEDTEST
:
4802 diag_ledtest_comp(diag
, (struct bfi_diag_ledtest_rsp_s
*) msg
);
4805 bfa_trc(diag
, msg
->mh
.msg_id
);
4813 * @param[in] *diag - diag data struct
4814 * @param[in] *memtest - mem test params input from upper layer,
4815 * @param[in] pattern - mem test pattern
4816 * @param[in] *result - mem test result
4817 * @param[in] cbfn - mem test callback functioin
4818 * @param[in] cbarg - callback functioin arg
4823 bfa_diag_memtest(struct bfa_diag_s
*diag
, struct bfa_diag_memtest_s
*memtest
,
4824 u32 pattern
, struct bfa_diag_memtest_result
*result
,
4825 bfa_cb_diag_t cbfn
, void *cbarg
)
4829 bfa_trc(diag
, pattern
);
4831 if (!bfa_ioc_adapter_is_disabled(diag
->ioc
))
4832 return BFA_STATUS_ADAPTER_ENABLED
;
4834 /* check to see if there is another destructive diag cmd running */
4836 bfa_trc(diag
, diag
->block
);
4837 return BFA_STATUS_DEVBUSY
;
4841 diag
->result
= result
;
4843 diag
->cbarg
= cbarg
;
4845 /* download memtest code and take LPU0 out of reset */
4846 bfa_ioc_boot(diag
->ioc
, BFI_FWBOOT_TYPE_MEMTEST
, BFI_FWBOOT_ENV_OS
);
4848 memtest_tov
= (bfa_ioc_asic_gen(diag
->ioc
) == BFI_ASIC_GEN_CT2
) ?
4849 CT2_BFA_DIAG_MEMTEST_TOV
: BFA_DIAG_MEMTEST_TOV
;
4850 bfa_timer_begin(diag
->ioc
->timer_mod
, &diag
->timer
,
4851 bfa_diag_memtest_done
, diag
, memtest_tov
);
4852 diag
->timer_active
= 1;
4853 return BFA_STATUS_OK
;
4857 * DIAG firmware ping command
4859 * @param[in] *diag - diag data struct
4860 * @param[in] cnt - dma loop count for testing PCIE
4861 * @param[in] data - data pattern to pass in fw
4862 * @param[in] *result - pt to bfa_diag_fwping_result_t data struct
4863 * @param[in] cbfn - callback function
4864 * @param[in] *cbarg - callback functioin arg
4869 bfa_diag_fwping(struct bfa_diag_s
*diag
, u32 cnt
, u32 data
,
4870 struct bfa_diag_results_fwping
*result
, bfa_cb_diag_t cbfn
,
4874 bfa_trc(diag
, data
);
4876 if (!bfa_ioc_is_operational(diag
->ioc
))
4877 return BFA_STATUS_IOC_NON_OP
;
4879 if (bfa_asic_id_ct2(bfa_ioc_devid((diag
->ioc
))) &&
4880 ((diag
->ioc
)->clscode
== BFI_PCIFN_CLASS_ETH
))
4881 return BFA_STATUS_CMD_NOTSUPP
;
4883 /* check to see if there is another destructive diag cmd running */
4884 if (diag
->block
|| diag
->fwping
.lock
) {
4885 bfa_trc(diag
, diag
->block
);
4886 bfa_trc(diag
, diag
->fwping
.lock
);
4887 return BFA_STATUS_DEVBUSY
;
4890 /* Initialization */
4891 diag
->fwping
.lock
= 1;
4892 diag
->fwping
.cbfn
= cbfn
;
4893 diag
->fwping
.cbarg
= cbarg
;
4894 diag
->fwping
.result
= result
;
4895 diag
->fwping
.data
= data
;
4896 diag
->fwping
.count
= cnt
;
4898 /* Init test results */
4899 diag
->fwping
.result
->data
= 0;
4900 diag
->fwping
.result
->status
= BFA_STATUS_OK
;
4902 /* kick off the first ping */
4903 diag_fwping_send(diag
);
4904 return BFA_STATUS_OK
;
4908 * Read Temperature Sensor
4910 * @param[in] *diag - diag data struct
4911 * @param[in] *result - pt to bfa_diag_temp_t data struct
4912 * @param[in] cbfn - callback function
4913 * @param[in] *cbarg - callback functioin arg
4918 bfa_diag_tsensor_query(struct bfa_diag_s
*diag
,
4919 struct bfa_diag_results_tempsensor_s
*result
,
4920 bfa_cb_diag_t cbfn
, void *cbarg
)
4922 /* check to see if there is a destructive diag cmd running */
4923 if (diag
->block
|| diag
->tsensor
.lock
) {
4924 bfa_trc(diag
, diag
->block
);
4925 bfa_trc(diag
, diag
->tsensor
.lock
);
4926 return BFA_STATUS_DEVBUSY
;
4929 if (!bfa_ioc_is_operational(diag
->ioc
))
4930 return BFA_STATUS_IOC_NON_OP
;
4932 /* Init diag mod params */
4933 diag
->tsensor
.lock
= 1;
4934 diag
->tsensor
.temp
= result
;
4935 diag
->tsensor
.cbfn
= cbfn
;
4936 diag
->tsensor
.cbarg
= cbarg
;
4937 diag
->tsensor
.status
= BFA_STATUS_OK
;
4939 /* Send msg to fw */
4940 diag_tempsensor_send(diag
);
4942 return BFA_STATUS_OK
;
4948 * @param[in] *diag - diag data struct
4949 * @param[in] *ledtest - pt to ledtest data structure
4954 bfa_diag_ledtest(struct bfa_diag_s
*diag
, struct bfa_diag_ledtest_s
*ledtest
)
4956 bfa_trc(diag
, ledtest
->cmd
);
4958 if (!bfa_ioc_is_operational(diag
->ioc
))
4959 return BFA_STATUS_IOC_NON_OP
;
4961 if (diag
->beacon
.state
)
4962 return BFA_STATUS_BEACON_ON
;
4964 if (diag
->ledtest
.lock
)
4965 return BFA_STATUS_LEDTEST_OP
;
4967 /* Send msg to fw */
4968 diag
->ledtest
.lock
= BFA_TRUE
;
4969 diag_ledtest_send(diag
, ledtest
);
4971 return BFA_STATUS_OK
;
4975 * Port beaconing command
4977 * @param[in] *diag - diag data struct
4978 * @param[in] beacon - port beaconing 1:ON 0:OFF
4979 * @param[in] link_e2e_beacon - link beaconing 1:ON 0:OFF
4980 * @param[in] sec - beaconing duration in seconds
4985 bfa_diag_beacon_port(struct bfa_diag_s
*diag
, bfa_boolean_t beacon
,
4986 bfa_boolean_t link_e2e_beacon
, uint32_t sec
)
4988 bfa_trc(diag
, beacon
);
4989 bfa_trc(diag
, link_e2e_beacon
);
4992 if (!bfa_ioc_is_operational(diag
->ioc
))
4993 return BFA_STATUS_IOC_NON_OP
;
4995 if (diag
->ledtest
.lock
)
4996 return BFA_STATUS_LEDTEST_OP
;
4998 if (diag
->beacon
.state
&& beacon
) /* beacon alread on */
4999 return BFA_STATUS_BEACON_ON
;
5001 diag
->beacon
.state
= beacon
;
5002 diag
->beacon
.link_e2e
= link_e2e_beacon
;
5003 if (diag
->cbfn_beacon
)
5004 diag
->cbfn_beacon(diag
->dev
, beacon
, link_e2e_beacon
);
5006 /* Send msg to fw */
5007 diag_portbeacon_send(diag
, beacon
, sec
);
5009 return BFA_STATUS_OK
;
5013 * Return DMA memory needed by diag module.
5016 bfa_diag_meminfo(void)
5018 return BFA_ROUNDUP(BFI_DIAG_DMA_BUF_SZ
, BFA_DMA_ALIGN_SZ
);
5022 * Attach virtual and physical memory for Diag.
5025 bfa_diag_attach(struct bfa_diag_s
*diag
, struct bfa_ioc_s
*ioc
, void *dev
,
5026 bfa_cb_diag_beacon_t cbfn_beacon
, struct bfa_trc_mod_s
*trcmod
)
5030 diag
->trcmod
= trcmod
;
5035 diag
->result
= NULL
;
5036 diag
->cbfn_beacon
= cbfn_beacon
;
5038 bfa_ioc_mbox_regisr(diag
->ioc
, BFI_MC_DIAG
, bfa_diag_intr
, diag
);
5039 bfa_q_qe_init(&diag
->ioc_notify
);
5040 bfa_ioc_notify_init(&diag
->ioc_notify
, bfa_diag_notify
, diag
);
5041 list_add_tail(&diag
->ioc_notify
.qe
, &diag
->ioc
->notify_q
);
5045 bfa_diag_memclaim(struct bfa_diag_s
*diag
, u8
*dm_kva
, u64 dm_pa
)
5047 diag
->fwping
.dbuf_kva
= dm_kva
;
5048 diag
->fwping
.dbuf_pa
= dm_pa
;
5049 memset(diag
->fwping
.dbuf_kva
, 0, BFI_DIAG_DMA_BUF_SZ
);
5053 * PHY module specific
5055 #define BFA_PHY_DMA_BUF_SZ 0x02000 /* 8k dma buffer */
5056 #define BFA_PHY_LOCK_STATUS 0x018878 /* phy semaphore status reg */
5059 bfa_phy_ntoh32(u32
*obuf
, u32
*ibuf
, int sz
)
5063 for (i
= 0; i
< m
; i
++)
5064 obuf
[i
] = be32_to_cpu(ibuf
[i
]);
5067 static bfa_boolean_t
5068 bfa_phy_present(struct bfa_phy_s
*phy
)
5070 return (phy
->ioc
->attr
->card_type
== BFA_MFG_TYPE_LIGHTNING
);
5074 bfa_phy_notify(void *cbarg
, enum bfa_ioc_event_e event
)
5076 struct bfa_phy_s
*phy
= cbarg
;
5078 bfa_trc(phy
, event
);
5081 case BFA_IOC_E_DISABLED
:
5082 case BFA_IOC_E_FAILED
:
5084 phy
->status
= BFA_STATUS_IOC_FAILURE
;
5085 phy
->cbfn(phy
->cbarg
, phy
->status
);
5096 * Send phy attribute query request.
5098 * @param[in] cbarg - callback argument
5101 bfa_phy_query_send(void *cbarg
)
5103 struct bfa_phy_s
*phy
= cbarg
;
5104 struct bfi_phy_query_req_s
*msg
=
5105 (struct bfi_phy_query_req_s
*) phy
->mb
.msg
;
5107 msg
->instance
= phy
->instance
;
5108 bfi_h2i_set(msg
->mh
, BFI_MC_PHY
, BFI_PHY_H2I_QUERY_REQ
,
5109 bfa_ioc_portid(phy
->ioc
));
5110 bfa_alen_set(&msg
->alen
, sizeof(struct bfa_phy_attr_s
), phy
->dbuf_pa
);
5111 bfa_ioc_mbox_queue(phy
->ioc
, &phy
->mb
);
5115 * Send phy write request.
5117 * @param[in] cbarg - callback argument
5120 bfa_phy_write_send(void *cbarg
)
5122 struct bfa_phy_s
*phy
= cbarg
;
5123 struct bfi_phy_write_req_s
*msg
=
5124 (struct bfi_phy_write_req_s
*) phy
->mb
.msg
;
5129 msg
->instance
= phy
->instance
;
5130 msg
->offset
= cpu_to_be32(phy
->addr_off
+ phy
->offset
);
5131 len
= (phy
->residue
< BFA_PHY_DMA_BUF_SZ
) ?
5132 phy
->residue
: BFA_PHY_DMA_BUF_SZ
;
5133 msg
->length
= cpu_to_be32(len
);
5135 /* indicate if it's the last msg of the whole write operation */
5136 msg
->last
= (len
== phy
->residue
) ? 1 : 0;
5138 bfi_h2i_set(msg
->mh
, BFI_MC_PHY
, BFI_PHY_H2I_WRITE_REQ
,
5139 bfa_ioc_portid(phy
->ioc
));
5140 bfa_alen_set(&msg
->alen
, len
, phy
->dbuf_pa
);
5142 buf
= (u16
*) (phy
->ubuf
+ phy
->offset
);
5143 dbuf
= (u16
*)phy
->dbuf_kva
;
5145 for (i
= 0; i
< sz
; i
++)
5146 buf
[i
] = cpu_to_be16(dbuf
[i
]);
5148 bfa_ioc_mbox_queue(phy
->ioc
, &phy
->mb
);
5150 phy
->residue
-= len
;
5155 * Send phy read request.
5157 * @param[in] cbarg - callback argument
5160 bfa_phy_read_send(void *cbarg
)
5162 struct bfa_phy_s
*phy
= cbarg
;
5163 struct bfi_phy_read_req_s
*msg
=
5164 (struct bfi_phy_read_req_s
*) phy
->mb
.msg
;
5167 msg
->instance
= phy
->instance
;
5168 msg
->offset
= cpu_to_be32(phy
->addr_off
+ phy
->offset
);
5169 len
= (phy
->residue
< BFA_PHY_DMA_BUF_SZ
) ?
5170 phy
->residue
: BFA_PHY_DMA_BUF_SZ
;
5171 msg
->length
= cpu_to_be32(len
);
5172 bfi_h2i_set(msg
->mh
, BFI_MC_PHY
, BFI_PHY_H2I_READ_REQ
,
5173 bfa_ioc_portid(phy
->ioc
));
5174 bfa_alen_set(&msg
->alen
, len
, phy
->dbuf_pa
);
5175 bfa_ioc_mbox_queue(phy
->ioc
, &phy
->mb
);
5179 * Send phy stats request.
5181 * @param[in] cbarg - callback argument
5184 bfa_phy_stats_send(void *cbarg
)
5186 struct bfa_phy_s
*phy
= cbarg
;
5187 struct bfi_phy_stats_req_s
*msg
=
5188 (struct bfi_phy_stats_req_s
*) phy
->mb
.msg
;
5190 msg
->instance
= phy
->instance
;
5191 bfi_h2i_set(msg
->mh
, BFI_MC_PHY
, BFI_PHY_H2I_STATS_REQ
,
5192 bfa_ioc_portid(phy
->ioc
));
5193 bfa_alen_set(&msg
->alen
, sizeof(struct bfa_phy_stats_s
), phy
->dbuf_pa
);
5194 bfa_ioc_mbox_queue(phy
->ioc
, &phy
->mb
);
5198 * Flash memory info API.
5200 * @param[in] mincfg - minimal cfg variable
5203 bfa_phy_meminfo(bfa_boolean_t mincfg
)
5205 /* min driver doesn't need phy */
5209 return BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ
, BFA_DMA_ALIGN_SZ
);
5215 * @param[in] phy - phy structure
5216 * @param[in] ioc - ioc structure
5217 * @param[in] dev - device structure
5218 * @param[in] trcmod - trace module
5219 * @param[in] logmod - log module
5222 bfa_phy_attach(struct bfa_phy_s
*phy
, struct bfa_ioc_s
*ioc
, void *dev
,
5223 struct bfa_trc_mod_s
*trcmod
, bfa_boolean_t mincfg
)
5226 phy
->trcmod
= trcmod
;
5231 bfa_ioc_mbox_regisr(phy
->ioc
, BFI_MC_PHY
, bfa_phy_intr
, phy
);
5232 bfa_q_qe_init(&phy
->ioc_notify
);
5233 bfa_ioc_notify_init(&phy
->ioc_notify
, bfa_phy_notify
, phy
);
5234 list_add_tail(&phy
->ioc_notify
.qe
, &phy
->ioc
->notify_q
);
5236 /* min driver doesn't need phy */
5238 phy
->dbuf_kva
= NULL
;
5244 * Claim memory for phy
5246 * @param[in] phy - phy structure
5247 * @param[in] dm_kva - pointer to virtual memory address
5248 * @param[in] dm_pa - physical memory address
5249 * @param[in] mincfg - minimal cfg variable
5252 bfa_phy_memclaim(struct bfa_phy_s
*phy
, u8
*dm_kva
, u64 dm_pa
,
5253 bfa_boolean_t mincfg
)
5258 phy
->dbuf_kva
= dm_kva
;
5259 phy
->dbuf_pa
= dm_pa
;
5260 memset(phy
->dbuf_kva
, 0, BFA_PHY_DMA_BUF_SZ
);
5261 dm_kva
+= BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ
, BFA_DMA_ALIGN_SZ
);
5262 dm_pa
+= BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ
, BFA_DMA_ALIGN_SZ
);
5266 bfa_phy_busy(struct bfa_ioc_s
*ioc
)
5270 rb
= bfa_ioc_bar0(ioc
);
5271 return readl(rb
+ BFA_PHY_LOCK_STATUS
);
5275 * Get phy attribute.
5277 * @param[in] phy - phy structure
5278 * @param[in] attr - phy attribute structure
5279 * @param[in] cbfn - callback function
5280 * @param[in] cbarg - callback argument
5285 bfa_phy_get_attr(struct bfa_phy_s
*phy
, u8 instance
,
5286 struct bfa_phy_attr_s
*attr
, bfa_cb_phy_t cbfn
, void *cbarg
)
5288 bfa_trc(phy
, BFI_PHY_H2I_QUERY_REQ
);
5289 bfa_trc(phy
, instance
);
5291 if (!bfa_phy_present(phy
))
5292 return BFA_STATUS_PHY_NOT_PRESENT
;
5294 if (!bfa_ioc_is_operational(phy
->ioc
))
5295 return BFA_STATUS_IOC_NON_OP
;
5297 if (phy
->op_busy
|| bfa_phy_busy(phy
->ioc
)) {
5298 bfa_trc(phy
, phy
->op_busy
);
5299 return BFA_STATUS_DEVBUSY
;
5305 phy
->instance
= instance
;
5306 phy
->ubuf
= (uint8_t *) attr
;
5307 bfa_phy_query_send(phy
);
5309 return BFA_STATUS_OK
;
5315 * @param[in] phy - phy structure
5316 * @param[in] instance - phy image instance
5317 * @param[in] stats - pointer to phy stats
5318 * @param[in] cbfn - callback function
5319 * @param[in] cbarg - callback argument
5324 bfa_phy_get_stats(struct bfa_phy_s
*phy
, u8 instance
,
5325 struct bfa_phy_stats_s
*stats
,
5326 bfa_cb_phy_t cbfn
, void *cbarg
)
5328 bfa_trc(phy
, BFI_PHY_H2I_STATS_REQ
);
5329 bfa_trc(phy
, instance
);
5331 if (!bfa_phy_present(phy
))
5332 return BFA_STATUS_PHY_NOT_PRESENT
;
5334 if (!bfa_ioc_is_operational(phy
->ioc
))
5335 return BFA_STATUS_IOC_NON_OP
;
5337 if (phy
->op_busy
|| bfa_phy_busy(phy
->ioc
)) {
5338 bfa_trc(phy
, phy
->op_busy
);
5339 return BFA_STATUS_DEVBUSY
;
5345 phy
->instance
= instance
;
5346 phy
->ubuf
= (u8
*) stats
;
5347 bfa_phy_stats_send(phy
);
5349 return BFA_STATUS_OK
;
5355 * @param[in] phy - phy structure
5356 * @param[in] instance - phy image instance
5357 * @param[in] buf - update data buffer
5358 * @param[in] len - data buffer length
5359 * @param[in] offset - offset relative to starting address
5360 * @param[in] cbfn - callback function
5361 * @param[in] cbarg - callback argument
5366 bfa_phy_update(struct bfa_phy_s
*phy
, u8 instance
,
5367 void *buf
, u32 len
, u32 offset
,
5368 bfa_cb_phy_t cbfn
, void *cbarg
)
5370 bfa_trc(phy
, BFI_PHY_H2I_WRITE_REQ
);
5371 bfa_trc(phy
, instance
);
5373 bfa_trc(phy
, offset
);
5375 if (!bfa_phy_present(phy
))
5376 return BFA_STATUS_PHY_NOT_PRESENT
;
5378 if (!bfa_ioc_is_operational(phy
->ioc
))
5379 return BFA_STATUS_IOC_NON_OP
;
5381 /* 'len' must be in word (4-byte) boundary */
5382 if (!len
|| (len
& 0x03))
5383 return BFA_STATUS_FAILED
;
5385 if (phy
->op_busy
|| bfa_phy_busy(phy
->ioc
)) {
5386 bfa_trc(phy
, phy
->op_busy
);
5387 return BFA_STATUS_DEVBUSY
;
5393 phy
->instance
= instance
;
5396 phy
->addr_off
= offset
;
5399 bfa_phy_write_send(phy
);
5400 return BFA_STATUS_OK
;
5406 * @param[in] phy - phy structure
5407 * @param[in] instance - phy image instance
5408 * @param[in] buf - read data buffer
5409 * @param[in] len - data buffer length
5410 * @param[in] offset - offset relative to starting address
5411 * @param[in] cbfn - callback function
5412 * @param[in] cbarg - callback argument
5417 bfa_phy_read(struct bfa_phy_s
*phy
, u8 instance
,
5418 void *buf
, u32 len
, u32 offset
,
5419 bfa_cb_phy_t cbfn
, void *cbarg
)
5421 bfa_trc(phy
, BFI_PHY_H2I_READ_REQ
);
5422 bfa_trc(phy
, instance
);
5424 bfa_trc(phy
, offset
);
5426 if (!bfa_phy_present(phy
))
5427 return BFA_STATUS_PHY_NOT_PRESENT
;
5429 if (!bfa_ioc_is_operational(phy
->ioc
))
5430 return BFA_STATUS_IOC_NON_OP
;
5432 /* 'len' must be in word (4-byte) boundary */
5433 if (!len
|| (len
& 0x03))
5434 return BFA_STATUS_FAILED
;
5436 if (phy
->op_busy
|| bfa_phy_busy(phy
->ioc
)) {
5437 bfa_trc(phy
, phy
->op_busy
);
5438 return BFA_STATUS_DEVBUSY
;
5444 phy
->instance
= instance
;
5447 phy
->addr_off
= offset
;
5449 bfa_phy_read_send(phy
);
5451 return BFA_STATUS_OK
;
5455 * Process phy response messages upon receiving interrupts.
5457 * @param[in] phyarg - phy structure
5458 * @param[in] msg - message structure
5461 bfa_phy_intr(void *phyarg
, struct bfi_mbmsg_s
*msg
)
5463 struct bfa_phy_s
*phy
= phyarg
;
5467 struct bfi_phy_query_rsp_s
*query
;
5468 struct bfi_phy_stats_rsp_s
*stats
;
5469 struct bfi_phy_write_rsp_s
*write
;
5470 struct bfi_phy_read_rsp_s
*read
;
5471 struct bfi_mbmsg_s
*msg
;
5475 bfa_trc(phy
, msg
->mh
.msg_id
);
5477 if (!phy
->op_busy
) {
5478 /* receiving response after ioc failure */
5479 bfa_trc(phy
, 0x9999);
5483 switch (msg
->mh
.msg_id
) {
5484 case BFI_PHY_I2H_QUERY_RSP
:
5485 status
= be32_to_cpu(m
.query
->status
);
5486 bfa_trc(phy
, status
);
5488 if (status
== BFA_STATUS_OK
) {
5489 struct bfa_phy_attr_s
*attr
=
5490 (struct bfa_phy_attr_s
*) phy
->ubuf
;
5491 bfa_phy_ntoh32((u32
*)attr
, (u32
*)phy
->dbuf_kva
,
5492 sizeof(struct bfa_phy_attr_s
));
5493 bfa_trc(phy
, attr
->status
);
5494 bfa_trc(phy
, attr
->length
);
5497 phy
->status
= status
;
5500 phy
->cbfn(phy
->cbarg
, phy
->status
);
5502 case BFI_PHY_I2H_STATS_RSP
:
5503 status
= be32_to_cpu(m
.stats
->status
);
5504 bfa_trc(phy
, status
);
5506 if (status
== BFA_STATUS_OK
) {
5507 struct bfa_phy_stats_s
*stats
=
5508 (struct bfa_phy_stats_s
*) phy
->ubuf
;
5509 bfa_phy_ntoh32((u32
*)stats
, (u32
*)phy
->dbuf_kva
,
5510 sizeof(struct bfa_phy_stats_s
));
5511 bfa_trc(phy
, stats
->status
);
5514 phy
->status
= status
;
5517 phy
->cbfn(phy
->cbarg
, phy
->status
);
5519 case BFI_PHY_I2H_WRITE_RSP
:
5520 status
= be32_to_cpu(m
.write
->status
);
5521 bfa_trc(phy
, status
);
5523 if (status
!= BFA_STATUS_OK
|| phy
->residue
== 0) {
5524 phy
->status
= status
;
5527 phy
->cbfn(phy
->cbarg
, phy
->status
);
5529 bfa_trc(phy
, phy
->offset
);
5530 bfa_phy_write_send(phy
);
5533 case BFI_PHY_I2H_READ_RSP
:
5534 status
= be32_to_cpu(m
.read
->status
);
5535 bfa_trc(phy
, status
);
5537 if (status
!= BFA_STATUS_OK
) {
5538 phy
->status
= status
;
5541 phy
->cbfn(phy
->cbarg
, phy
->status
);
5543 u32 len
= be32_to_cpu(m
.read
->length
);
5544 u16
*buf
= (u16
*)(phy
->ubuf
+ phy
->offset
);
5545 u16
*dbuf
= (u16
*)phy
->dbuf_kva
;
5546 int i
, sz
= len
>> 1;
5548 bfa_trc(phy
, phy
->offset
);
5551 for (i
= 0; i
< sz
; i
++)
5552 buf
[i
] = be16_to_cpu(dbuf
[i
]);
5554 phy
->residue
-= len
;
5557 if (phy
->residue
== 0) {
5558 phy
->status
= status
;
5561 phy
->cbfn(phy
->cbarg
, phy
->status
);
5563 bfa_phy_read_send(phy
);
5572 * DCONF module specific
5578 * DCONF state machine events
5580 enum bfa_dconf_event
{
5581 BFA_DCONF_SM_INIT
= 1, /* dconf Init */
5582 BFA_DCONF_SM_FLASH_COMP
= 2, /* read/write to flash */
5583 BFA_DCONF_SM_WR
= 3, /* binding change, map */
5584 BFA_DCONF_SM_TIMEOUT
= 4, /* Start timer */
5585 BFA_DCONF_SM_EXIT
= 5, /* exit dconf module */
5586 BFA_DCONF_SM_IOCDISABLE
= 6, /* IOC disable event */
5589 /* forward declaration of DCONF state machine */
5590 static void bfa_dconf_sm_uninit(struct bfa_dconf_mod_s
*dconf
,
5591 enum bfa_dconf_event event
);
5592 static void bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s
*dconf
,
5593 enum bfa_dconf_event event
);
5594 static void bfa_dconf_sm_ready(struct bfa_dconf_mod_s
*dconf
,
5595 enum bfa_dconf_event event
);
5596 static void bfa_dconf_sm_dirty(struct bfa_dconf_mod_s
*dconf
,
5597 enum bfa_dconf_event event
);
5598 static void bfa_dconf_sm_sync(struct bfa_dconf_mod_s
*dconf
,
5599 enum bfa_dconf_event event
);
5600 static void bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s
*dconf
,
5601 enum bfa_dconf_event event
);
5602 static void bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s
*dconf
,
5603 enum bfa_dconf_event event
);
5605 static void bfa_dconf_cbfn(void *dconf
, bfa_status_t status
);
5606 static void bfa_dconf_timer(void *cbarg
);
5607 static bfa_status_t
bfa_dconf_flash_write(struct bfa_dconf_mod_s
*dconf
);
5608 static void bfa_dconf_init_cb(void *arg
, bfa_status_t status
);
5611 * Beginning state of dconf module. Waiting for an event to start.
5614 bfa_dconf_sm_uninit(struct bfa_dconf_mod_s
*dconf
, enum bfa_dconf_event event
)
5616 bfa_status_t bfa_status
;
5617 bfa_trc(dconf
->bfa
, event
);
5620 case BFA_DCONF_SM_INIT
:
5621 if (dconf
->min_cfg
) {
5622 bfa_trc(dconf
->bfa
, dconf
->min_cfg
);
5623 bfa_fsm_send_event(&dconf
->bfa
->iocfc
,
5624 IOCFC_E_DCONF_DONE
);
5627 bfa_sm_set_state(dconf
, bfa_dconf_sm_flash_read
);
5628 bfa_timer_start(dconf
->bfa
, &dconf
->timer
,
5629 bfa_dconf_timer
, dconf
, 2 * BFA_DCONF_UPDATE_TOV
);
5630 bfa_status
= bfa_flash_read_part(BFA_FLASH(dconf
->bfa
),
5631 BFA_FLASH_PART_DRV
, dconf
->instance
,
5633 sizeof(struct bfa_dconf_s
), 0,
5634 bfa_dconf_init_cb
, dconf
->bfa
);
5635 if (bfa_status
!= BFA_STATUS_OK
) {
5636 bfa_timer_stop(&dconf
->timer
);
5637 bfa_dconf_init_cb(dconf
->bfa
, BFA_STATUS_FAILED
);
5638 bfa_sm_set_state(dconf
, bfa_dconf_sm_uninit
);
5642 case BFA_DCONF_SM_EXIT
:
5643 bfa_fsm_send_event(&dconf
->bfa
->iocfc
, IOCFC_E_DCONF_DONE
);
5644 case BFA_DCONF_SM_IOCDISABLE
:
5645 case BFA_DCONF_SM_WR
:
5646 case BFA_DCONF_SM_FLASH_COMP
:
5649 bfa_sm_fault(dconf
->bfa
, event
);
5654 * Read flash for dconf entries and make a call back to the driver once done.
5657 bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s
*dconf
,
5658 enum bfa_dconf_event event
)
5660 bfa_trc(dconf
->bfa
, event
);
5663 case BFA_DCONF_SM_FLASH_COMP
:
5664 bfa_timer_stop(&dconf
->timer
);
5665 bfa_sm_set_state(dconf
, bfa_dconf_sm_ready
);
5667 case BFA_DCONF_SM_TIMEOUT
:
5668 bfa_sm_set_state(dconf
, bfa_dconf_sm_ready
);
5669 bfa_ioc_suspend(&dconf
->bfa
->ioc
);
5671 case BFA_DCONF_SM_EXIT
:
5672 bfa_timer_stop(&dconf
->timer
);
5673 bfa_sm_set_state(dconf
, bfa_dconf_sm_uninit
);
5674 bfa_fsm_send_event(&dconf
->bfa
->iocfc
, IOCFC_E_DCONF_DONE
);
5676 case BFA_DCONF_SM_IOCDISABLE
:
5677 bfa_timer_stop(&dconf
->timer
);
5678 bfa_sm_set_state(dconf
, bfa_dconf_sm_uninit
);
5681 bfa_sm_fault(dconf
->bfa
, event
);
5686 * DCONF Module is in ready state. Has completed the initialization.
5689 bfa_dconf_sm_ready(struct bfa_dconf_mod_s
*dconf
, enum bfa_dconf_event event
)
5691 bfa_trc(dconf
->bfa
, event
);
5694 case BFA_DCONF_SM_WR
:
5695 bfa_timer_start(dconf
->bfa
, &dconf
->timer
,
5696 bfa_dconf_timer
, dconf
, BFA_DCONF_UPDATE_TOV
);
5697 bfa_sm_set_state(dconf
, bfa_dconf_sm_dirty
);
5699 case BFA_DCONF_SM_EXIT
:
5700 bfa_sm_set_state(dconf
, bfa_dconf_sm_uninit
);
5701 bfa_fsm_send_event(&dconf
->bfa
->iocfc
, IOCFC_E_DCONF_DONE
);
5703 case BFA_DCONF_SM_INIT
:
5704 case BFA_DCONF_SM_IOCDISABLE
:
5707 bfa_sm_fault(dconf
->bfa
, event
);
5712 * entries are dirty, write back to the flash.
5716 bfa_dconf_sm_dirty(struct bfa_dconf_mod_s
*dconf
, enum bfa_dconf_event event
)
5718 bfa_trc(dconf
->bfa
, event
);
5721 case BFA_DCONF_SM_TIMEOUT
:
5722 bfa_sm_set_state(dconf
, bfa_dconf_sm_sync
);
5723 bfa_dconf_flash_write(dconf
);
5725 case BFA_DCONF_SM_WR
:
5726 bfa_timer_stop(&dconf
->timer
);
5727 bfa_timer_start(dconf
->bfa
, &dconf
->timer
,
5728 bfa_dconf_timer
, dconf
, BFA_DCONF_UPDATE_TOV
);
5730 case BFA_DCONF_SM_EXIT
:
5731 bfa_timer_stop(&dconf
->timer
);
5732 bfa_timer_start(dconf
->bfa
, &dconf
->timer
,
5733 bfa_dconf_timer
, dconf
, BFA_DCONF_UPDATE_TOV
);
5734 bfa_sm_set_state(dconf
, bfa_dconf_sm_final_sync
);
5735 bfa_dconf_flash_write(dconf
);
5737 case BFA_DCONF_SM_FLASH_COMP
:
5739 case BFA_DCONF_SM_IOCDISABLE
:
5740 bfa_timer_stop(&dconf
->timer
);
5741 bfa_sm_set_state(dconf
, bfa_dconf_sm_iocdown_dirty
);
5744 bfa_sm_fault(dconf
->bfa
, event
);
5749 * Sync the dconf entries to the flash.
5752 bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s
*dconf
,
5753 enum bfa_dconf_event event
)
5755 bfa_trc(dconf
->bfa
, event
);
5758 case BFA_DCONF_SM_IOCDISABLE
:
5759 case BFA_DCONF_SM_FLASH_COMP
:
5760 bfa_timer_stop(&dconf
->timer
);
5761 case BFA_DCONF_SM_TIMEOUT
:
5762 bfa_sm_set_state(dconf
, bfa_dconf_sm_uninit
);
5763 bfa_fsm_send_event(&dconf
->bfa
->iocfc
, IOCFC_E_DCONF_DONE
);
5766 bfa_sm_fault(dconf
->bfa
, event
);
5771 bfa_dconf_sm_sync(struct bfa_dconf_mod_s
*dconf
, enum bfa_dconf_event event
)
5773 bfa_trc(dconf
->bfa
, event
);
5776 case BFA_DCONF_SM_FLASH_COMP
:
5777 bfa_sm_set_state(dconf
, bfa_dconf_sm_ready
);
5779 case BFA_DCONF_SM_WR
:
5780 bfa_timer_start(dconf
->bfa
, &dconf
->timer
,
5781 bfa_dconf_timer
, dconf
, BFA_DCONF_UPDATE_TOV
);
5782 bfa_sm_set_state(dconf
, bfa_dconf_sm_dirty
);
5784 case BFA_DCONF_SM_EXIT
:
5785 bfa_timer_start(dconf
->bfa
, &dconf
->timer
,
5786 bfa_dconf_timer
, dconf
, BFA_DCONF_UPDATE_TOV
);
5787 bfa_sm_set_state(dconf
, bfa_dconf_sm_final_sync
);
5789 case BFA_DCONF_SM_IOCDISABLE
:
5790 bfa_sm_set_state(dconf
, bfa_dconf_sm_iocdown_dirty
);
5793 bfa_sm_fault(dconf
->bfa
, event
);
5798 bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s
*dconf
,
5799 enum bfa_dconf_event event
)
5801 bfa_trc(dconf
->bfa
, event
);
5804 case BFA_DCONF_SM_INIT
:
5805 bfa_timer_start(dconf
->bfa
, &dconf
->timer
,
5806 bfa_dconf_timer
, dconf
, BFA_DCONF_UPDATE_TOV
);
5807 bfa_sm_set_state(dconf
, bfa_dconf_sm_dirty
);
5809 case BFA_DCONF_SM_EXIT
:
5810 bfa_sm_set_state(dconf
, bfa_dconf_sm_uninit
);
5811 bfa_fsm_send_event(&dconf
->bfa
->iocfc
, IOCFC_E_DCONF_DONE
);
5813 case BFA_DCONF_SM_IOCDISABLE
:
5816 bfa_sm_fault(dconf
->bfa
, event
);
5821 * Compute and return memory needed by DRV_CFG module.
5824 bfa_dconf_meminfo(struct bfa_iocfc_cfg_s
*cfg
, struct bfa_meminfo_s
*meminfo
,
5827 struct bfa_mem_kva_s
*dconf_kva
= BFA_MEM_DCONF_KVA(bfa
);
5829 if (cfg
->drvcfg
.min_cfg
)
5830 bfa_mem_kva_setup(meminfo
, dconf_kva
,
5831 sizeof(struct bfa_dconf_hdr_s
));
5833 bfa_mem_kva_setup(meminfo
, dconf_kva
,
5834 sizeof(struct bfa_dconf_s
));
5838 bfa_dconf_attach(struct bfa_s
*bfa
, void *bfad
, struct bfa_iocfc_cfg_s
*cfg
,
5839 struct bfa_pcidev_s
*pcidev
)
5841 struct bfa_dconf_mod_s
*dconf
= BFA_DCONF_MOD(bfa
);
5845 dconf
->instance
= bfa
->ioc
.port_id
;
5846 bfa_trc(bfa
, dconf
->instance
);
5848 dconf
->dconf
= (struct bfa_dconf_s
*) bfa_mem_kva_curp(dconf
);
5849 if (cfg
->drvcfg
.min_cfg
) {
5850 bfa_mem_kva_curp(dconf
) += sizeof(struct bfa_dconf_hdr_s
);
5851 dconf
->min_cfg
= BFA_TRUE
;
5853 dconf
->min_cfg
= BFA_FALSE
;
5854 bfa_mem_kva_curp(dconf
) += sizeof(struct bfa_dconf_s
);
5857 bfa_dconf_read_data_valid(bfa
) = BFA_FALSE
;
5858 bfa_sm_set_state(dconf
, bfa_dconf_sm_uninit
);
5862 bfa_dconf_init_cb(void *arg
, bfa_status_t status
)
5864 struct bfa_s
*bfa
= arg
;
5865 struct bfa_dconf_mod_s
*dconf
= BFA_DCONF_MOD(bfa
);
5867 if (status
== BFA_STATUS_OK
) {
5868 bfa_dconf_read_data_valid(bfa
) = BFA_TRUE
;
5869 if (dconf
->dconf
->hdr
.signature
!= BFI_DCONF_SIGNATURE
)
5870 dconf
->dconf
->hdr
.signature
= BFI_DCONF_SIGNATURE
;
5871 if (dconf
->dconf
->hdr
.version
!= BFI_DCONF_VERSION
)
5872 dconf
->dconf
->hdr
.version
= BFI_DCONF_VERSION
;
5874 bfa_sm_send_event(dconf
, BFA_DCONF_SM_FLASH_COMP
);
5875 bfa_fsm_send_event(&bfa
->iocfc
, IOCFC_E_DCONF_DONE
);
5879 bfa_dconf_modinit(struct bfa_s
*bfa
)
5881 struct bfa_dconf_mod_s
*dconf
= BFA_DCONF_MOD(bfa
);
5882 bfa_sm_send_event(dconf
, BFA_DCONF_SM_INIT
);
5885 bfa_dconf_start(struct bfa_s
*bfa
)
5890 bfa_dconf_stop(struct bfa_s
*bfa
)
5894 static void bfa_dconf_timer(void *cbarg
)
5896 struct bfa_dconf_mod_s
*dconf
= cbarg
;
5897 bfa_sm_send_event(dconf
, BFA_DCONF_SM_TIMEOUT
);
5900 bfa_dconf_iocdisable(struct bfa_s
*bfa
)
5902 struct bfa_dconf_mod_s
*dconf
= BFA_DCONF_MOD(bfa
);
5903 bfa_sm_send_event(dconf
, BFA_DCONF_SM_IOCDISABLE
);
5907 bfa_dconf_detach(struct bfa_s
*bfa
)
5912 bfa_dconf_flash_write(struct bfa_dconf_mod_s
*dconf
)
5914 bfa_status_t bfa_status
;
5915 bfa_trc(dconf
->bfa
, 0);
5917 bfa_status
= bfa_flash_update_part(BFA_FLASH(dconf
->bfa
),
5918 BFA_FLASH_PART_DRV
, dconf
->instance
,
5919 dconf
->dconf
, sizeof(struct bfa_dconf_s
), 0,
5920 bfa_dconf_cbfn
, dconf
);
5921 if (bfa_status
!= BFA_STATUS_OK
)
5922 WARN_ON(bfa_status
);
5923 bfa_trc(dconf
->bfa
, bfa_status
);
5929 bfa_dconf_update(struct bfa_s
*bfa
)
5931 struct bfa_dconf_mod_s
*dconf
= BFA_DCONF_MOD(bfa
);
5932 bfa_trc(dconf
->bfa
, 0);
5933 if (bfa_sm_cmp_state(dconf
, bfa_dconf_sm_iocdown_dirty
))
5934 return BFA_STATUS_FAILED
;
5936 if (dconf
->min_cfg
) {
5937 bfa_trc(dconf
->bfa
, dconf
->min_cfg
);
5938 return BFA_STATUS_FAILED
;
5941 bfa_sm_send_event(dconf
, BFA_DCONF_SM_WR
);
5942 return BFA_STATUS_OK
;
5946 bfa_dconf_cbfn(void *arg
, bfa_status_t status
)
5948 struct bfa_dconf_mod_s
*dconf
= arg
;
5950 bfa_sm_send_event(dconf
, BFA_DCONF_SM_FLASH_COMP
);
5954 bfa_dconf_modexit(struct bfa_s
*bfa
)
5956 struct bfa_dconf_mod_s
*dconf
= BFA_DCONF_MOD(bfa
);
5957 bfa_sm_send_event(dconf
, BFA_DCONF_SM_EXIT
);
5961 * FRU specific functions
5964 #define BFA_FRU_DMA_BUF_SZ 0x02000 /* 8k dma buffer */
5965 #define BFA_FRU_CHINOOK_MAX_SIZE 0x10000
5966 #define BFA_FRU_LIGHTNING_MAX_SIZE 0x200
5969 bfa_fru_notify(void *cbarg
, enum bfa_ioc_event_e event
)
5971 struct bfa_fru_s
*fru
= cbarg
;
5973 bfa_trc(fru
, event
);
5976 case BFA_IOC_E_DISABLED
:
5977 case BFA_IOC_E_FAILED
:
5979 fru
->status
= BFA_STATUS_IOC_FAILURE
;
5980 fru
->cbfn(fru
->cbarg
, fru
->status
);
5991 * Send fru write request.
5993 * @param[in] cbarg - callback argument
5996 bfa_fru_write_send(void *cbarg
, enum bfi_fru_h2i_msgs msg_type
)
5998 struct bfa_fru_s
*fru
= cbarg
;
5999 struct bfi_fru_write_req_s
*msg
=
6000 (struct bfi_fru_write_req_s
*) fru
->mb
.msg
;
6003 msg
->offset
= cpu_to_be32(fru
->addr_off
+ fru
->offset
);
6004 len
= (fru
->residue
< BFA_FRU_DMA_BUF_SZ
) ?
6005 fru
->residue
: BFA_FRU_DMA_BUF_SZ
;
6006 msg
->length
= cpu_to_be32(len
);
6009 * indicate if it's the last msg of the whole write operation
6011 msg
->last
= (len
== fru
->residue
) ? 1 : 0;
6013 bfi_h2i_set(msg
->mh
, BFI_MC_FRU
, msg_type
, bfa_ioc_portid(fru
->ioc
));
6014 bfa_alen_set(&msg
->alen
, len
, fru
->dbuf_pa
);
6016 memcpy(fru
->dbuf_kva
, fru
->ubuf
+ fru
->offset
, len
);
6017 bfa_ioc_mbox_queue(fru
->ioc
, &fru
->mb
);
6019 fru
->residue
-= len
;
6024 * Send fru read request.
6026 * @param[in] cbarg - callback argument
6029 bfa_fru_read_send(void *cbarg
, enum bfi_fru_h2i_msgs msg_type
)
6031 struct bfa_fru_s
*fru
= cbarg
;
6032 struct bfi_fru_read_req_s
*msg
=
6033 (struct bfi_fru_read_req_s
*) fru
->mb
.msg
;
6036 msg
->offset
= cpu_to_be32(fru
->addr_off
+ fru
->offset
);
6037 len
= (fru
->residue
< BFA_FRU_DMA_BUF_SZ
) ?
6038 fru
->residue
: BFA_FRU_DMA_BUF_SZ
;
6039 msg
->length
= cpu_to_be32(len
);
6040 bfi_h2i_set(msg
->mh
, BFI_MC_FRU
, msg_type
, bfa_ioc_portid(fru
->ioc
));
6041 bfa_alen_set(&msg
->alen
, len
, fru
->dbuf_pa
);
6042 bfa_ioc_mbox_queue(fru
->ioc
, &fru
->mb
);
6046 * Flash memory info API.
6048 * @param[in] mincfg - minimal cfg variable
6051 bfa_fru_meminfo(bfa_boolean_t mincfg
)
6053 /* min driver doesn't need fru */
6057 return BFA_ROUNDUP(BFA_FRU_DMA_BUF_SZ
, BFA_DMA_ALIGN_SZ
);
6063 * @param[in] fru - fru structure
6064 * @param[in] ioc - ioc structure
6065 * @param[in] dev - device structure
6066 * @param[in] trcmod - trace module
6067 * @param[in] logmod - log module
6070 bfa_fru_attach(struct bfa_fru_s
*fru
, struct bfa_ioc_s
*ioc
, void *dev
,
6071 struct bfa_trc_mod_s
*trcmod
, bfa_boolean_t mincfg
)
6074 fru
->trcmod
= trcmod
;
6079 bfa_ioc_mbox_regisr(fru
->ioc
, BFI_MC_FRU
, bfa_fru_intr
, fru
);
6080 bfa_q_qe_init(&fru
->ioc_notify
);
6081 bfa_ioc_notify_init(&fru
->ioc_notify
, bfa_fru_notify
, fru
);
6082 list_add_tail(&fru
->ioc_notify
.qe
, &fru
->ioc
->notify_q
);
6084 /* min driver doesn't need fru */
6086 fru
->dbuf_kva
= NULL
;
6092 * Claim memory for fru
6094 * @param[in] fru - fru structure
6095 * @param[in] dm_kva - pointer to virtual memory address
6096 * @param[in] dm_pa - frusical memory address
6097 * @param[in] mincfg - minimal cfg variable
6100 bfa_fru_memclaim(struct bfa_fru_s
*fru
, u8
*dm_kva
, u64 dm_pa
,
6101 bfa_boolean_t mincfg
)
6106 fru
->dbuf_kva
= dm_kva
;
6107 fru
->dbuf_pa
= dm_pa
;
6108 memset(fru
->dbuf_kva
, 0, BFA_FRU_DMA_BUF_SZ
);
6109 dm_kva
+= BFA_ROUNDUP(BFA_FRU_DMA_BUF_SZ
, BFA_DMA_ALIGN_SZ
);
6110 dm_pa
+= BFA_ROUNDUP(BFA_FRU_DMA_BUF_SZ
, BFA_DMA_ALIGN_SZ
);
6114 * Update fru vpd image.
6116 * @param[in] fru - fru structure
6117 * @param[in] buf - update data buffer
6118 * @param[in] len - data buffer length
6119 * @param[in] offset - offset relative to starting address
6120 * @param[in] cbfn - callback function
6121 * @param[in] cbarg - callback argument
6126 bfa_fruvpd_update(struct bfa_fru_s
*fru
, void *buf
, u32 len
, u32 offset
,
6127 bfa_cb_fru_t cbfn
, void *cbarg
)
6129 bfa_trc(fru
, BFI_FRUVPD_H2I_WRITE_REQ
);
6131 bfa_trc(fru
, offset
);
6133 if (fru
->ioc
->asic_gen
!= BFI_ASIC_GEN_CT2
)
6134 return BFA_STATUS_FRU_NOT_PRESENT
;
6136 if (fru
->ioc
->attr
->card_type
!= BFA_MFG_TYPE_CHINOOK
)
6137 return BFA_STATUS_CMD_NOTSUPP
;
6139 if (!bfa_ioc_is_operational(fru
->ioc
))
6140 return BFA_STATUS_IOC_NON_OP
;
6143 bfa_trc(fru
, fru
->op_busy
);
6144 return BFA_STATUS_DEVBUSY
;
6153 fru
->addr_off
= offset
;
6156 bfa_fru_write_send(fru
, BFI_FRUVPD_H2I_WRITE_REQ
);
6158 return BFA_STATUS_OK
;
6162 * Read fru vpd image.
6164 * @param[in] fru - fru structure
6165 * @param[in] buf - read data buffer
6166 * @param[in] len - data buffer length
6167 * @param[in] offset - offset relative to starting address
6168 * @param[in] cbfn - callback function
6169 * @param[in] cbarg - callback argument
6174 bfa_fruvpd_read(struct bfa_fru_s
*fru
, void *buf
, u32 len
, u32 offset
,
6175 bfa_cb_fru_t cbfn
, void *cbarg
)
6177 bfa_trc(fru
, BFI_FRUVPD_H2I_READ_REQ
);
6179 bfa_trc(fru
, offset
);
6181 if (fru
->ioc
->asic_gen
!= BFI_ASIC_GEN_CT2
)
6182 return BFA_STATUS_FRU_NOT_PRESENT
;
6184 if (fru
->ioc
->attr
->card_type
!= BFA_MFG_TYPE_CHINOOK
)
6185 return BFA_STATUS_CMD_NOTSUPP
;
6187 if (!bfa_ioc_is_operational(fru
->ioc
))
6188 return BFA_STATUS_IOC_NON_OP
;
6191 bfa_trc(fru
, fru
->op_busy
);
6192 return BFA_STATUS_DEVBUSY
;
6201 fru
->addr_off
= offset
;
6203 bfa_fru_read_send(fru
, BFI_FRUVPD_H2I_READ_REQ
);
6205 return BFA_STATUS_OK
;
6209 * Get maximum size fru vpd image.
6211 * @param[in] fru - fru structure
6212 * @param[out] size - maximum size of fru vpd data
6217 bfa_fruvpd_get_max_size(struct bfa_fru_s
*fru
, u32
*max_size
)
6219 if (fru
->ioc
->asic_gen
!= BFI_ASIC_GEN_CT2
)
6220 return BFA_STATUS_FRU_NOT_PRESENT
;
6222 if (!bfa_ioc_is_operational(fru
->ioc
))
6223 return BFA_STATUS_IOC_NON_OP
;
6225 if (fru
->ioc
->attr
->card_type
== BFA_MFG_TYPE_CHINOOK
)
6226 *max_size
= BFA_FRU_CHINOOK_MAX_SIZE
;
6228 return BFA_STATUS_CMD_NOTSUPP
;
6229 return BFA_STATUS_OK
;
6234 * @param[in] fru - fru structure
6235 * @param[in] buf - update data buffer
6236 * @param[in] len - data buffer length
6237 * @param[in] offset - offset relative to starting address
6238 * @param[in] cbfn - callback function
6239 * @param[in] cbarg - callback argument
6244 bfa_tfru_write(struct bfa_fru_s
*fru
, void *buf
, u32 len
, u32 offset
,
6245 bfa_cb_fru_t cbfn
, void *cbarg
)
6247 bfa_trc(fru
, BFI_TFRU_H2I_WRITE_REQ
);
6249 bfa_trc(fru
, offset
);
6250 bfa_trc(fru
, *((u8
*) buf
));
6252 if (fru
->ioc
->asic_gen
!= BFI_ASIC_GEN_CT2
)
6253 return BFA_STATUS_FRU_NOT_PRESENT
;
6255 if (!bfa_ioc_is_operational(fru
->ioc
))
6256 return BFA_STATUS_IOC_NON_OP
;
6259 bfa_trc(fru
, fru
->op_busy
);
6260 return BFA_STATUS_DEVBUSY
;
6269 fru
->addr_off
= offset
;
6272 bfa_fru_write_send(fru
, BFI_TFRU_H2I_WRITE_REQ
);
6274 return BFA_STATUS_OK
;
6280 * @param[in] fru - fru structure
6281 * @param[in] buf - read data buffer
6282 * @param[in] len - data buffer length
6283 * @param[in] offset - offset relative to starting address
6284 * @param[in] cbfn - callback function
6285 * @param[in] cbarg - callback argument
6290 bfa_tfru_read(struct bfa_fru_s
*fru
, void *buf
, u32 len
, u32 offset
,
6291 bfa_cb_fru_t cbfn
, void *cbarg
)
6293 bfa_trc(fru
, BFI_TFRU_H2I_READ_REQ
);
6295 bfa_trc(fru
, offset
);
6297 if (fru
->ioc
->asic_gen
!= BFI_ASIC_GEN_CT2
)
6298 return BFA_STATUS_FRU_NOT_PRESENT
;
6300 if (!bfa_ioc_is_operational(fru
->ioc
))
6301 return BFA_STATUS_IOC_NON_OP
;
6304 bfa_trc(fru
, fru
->op_busy
);
6305 return BFA_STATUS_DEVBUSY
;
6314 fru
->addr_off
= offset
;
6316 bfa_fru_read_send(fru
, BFI_TFRU_H2I_READ_REQ
);
6318 return BFA_STATUS_OK
;
6322 * Process fru response messages upon receiving interrupts.
6324 * @param[in] fruarg - fru structure
6325 * @param[in] msg - message structure
6328 bfa_fru_intr(void *fruarg
, struct bfi_mbmsg_s
*msg
)
6330 struct bfa_fru_s
*fru
= fruarg
;
6331 struct bfi_fru_rsp_s
*rsp
= (struct bfi_fru_rsp_s
*)msg
;
6334 bfa_trc(fru
, msg
->mh
.msg_id
);
6336 if (!fru
->op_busy
) {
6338 * receiving response after ioc failure
6340 bfa_trc(fru
, 0x9999);
6344 switch (msg
->mh
.msg_id
) {
6345 case BFI_FRUVPD_I2H_WRITE_RSP
:
6346 case BFI_TFRU_I2H_WRITE_RSP
:
6347 status
= be32_to_cpu(rsp
->status
);
6348 bfa_trc(fru
, status
);
6350 if (status
!= BFA_STATUS_OK
|| fru
->residue
== 0) {
6351 fru
->status
= status
;
6354 fru
->cbfn(fru
->cbarg
, fru
->status
);
6356 bfa_trc(fru
, fru
->offset
);
6357 if (msg
->mh
.msg_id
== BFI_FRUVPD_I2H_WRITE_RSP
)
6358 bfa_fru_write_send(fru
,
6359 BFI_FRUVPD_H2I_WRITE_REQ
);
6361 bfa_fru_write_send(fru
,
6362 BFI_TFRU_H2I_WRITE_REQ
);
6365 case BFI_FRUVPD_I2H_READ_RSP
:
6366 case BFI_TFRU_I2H_READ_RSP
:
6367 status
= be32_to_cpu(rsp
->status
);
6368 bfa_trc(fru
, status
);
6370 if (status
!= BFA_STATUS_OK
) {
6371 fru
->status
= status
;
6374 fru
->cbfn(fru
->cbarg
, fru
->status
);
6376 u32 len
= be32_to_cpu(rsp
->length
);
6378 bfa_trc(fru
, fru
->offset
);
6381 memcpy(fru
->ubuf
+ fru
->offset
, fru
->dbuf_kva
, len
);
6382 fru
->residue
-= len
;
6385 if (fru
->residue
== 0) {
6386 fru
->status
= status
;
6389 fru
->cbfn(fru
->cbarg
, fru
->status
);
6391 if (msg
->mh
.msg_id
== BFI_FRUVPD_I2H_READ_RSP
)
6392 bfa_fru_read_send(fru
,
6393 BFI_FRUVPD_H2I_READ_REQ
);
6395 bfa_fru_read_send(fru
,
6396 BFI_TFRU_H2I_READ_REQ
);