[media] em28xx-alsa: add mixer support for AC97 volume controls
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / scsi / bfa / bfa_ioc.c
blob6c7e0339dda44d0d7029cd83bd2700950e1c49d2
1 /*
2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
18 #include "bfad_drv.h"
19 #include "bfa_ioc.h"
20 #include "bfi_ctreg.h"
21 #include "bfa_defs.h"
22 #include "bfa_defs_svc.h"
24 BFA_TRC_FILE(CNA, IOC);
27 * IOC local definitions
29 #define BFA_IOC_TOV 3000 /* msecs */
30 #define BFA_IOC_HWSEM_TOV 500 /* msecs */
31 #define BFA_IOC_HB_TOV 500 /* msecs */
32 #define BFA_IOC_HWINIT_MAX 5
33 #define BFA_IOC_TOV_RECOVER BFA_IOC_HB_TOV
35 #define bfa_ioc_timer_start(__ioc) \
36 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
37 bfa_ioc_timeout, (__ioc), BFA_IOC_TOV)
38 #define bfa_ioc_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer)
40 #define bfa_hb_timer_start(__ioc) \
41 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->hb_timer, \
42 bfa_ioc_hb_check, (__ioc), BFA_IOC_HB_TOV)
43 #define bfa_hb_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->hb_timer)
45 #define BFA_DBG_FWTRC_OFF(_fn) (BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn))
48 * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
51 #define bfa_ioc_firmware_lock(__ioc) \
52 ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
53 #define bfa_ioc_firmware_unlock(__ioc) \
54 ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
55 #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
56 #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
57 #define bfa_ioc_notify_fail(__ioc) \
58 ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
59 #define bfa_ioc_sync_start(__ioc) \
60 ((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
61 #define bfa_ioc_sync_join(__ioc) \
62 ((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
63 #define bfa_ioc_sync_leave(__ioc) \
64 ((__ioc)->ioc_hwif->ioc_sync_leave(__ioc))
65 #define bfa_ioc_sync_ack(__ioc) \
66 ((__ioc)->ioc_hwif->ioc_sync_ack(__ioc))
67 #define bfa_ioc_sync_complete(__ioc) \
68 ((__ioc)->ioc_hwif->ioc_sync_complete(__ioc))
70 #define bfa_ioc_mbox_cmd_pending(__ioc) \
71 (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
72 readl((__ioc)->ioc_regs.hfn_mbox_cmd))
74 bfa_boolean_t bfa_auto_recover = BFA_TRUE;
77 * forward declarations
79 static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc);
80 static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force);
81 static void bfa_ioc_timeout(void *ioc);
82 static void bfa_ioc_send_enable(struct bfa_ioc_s *ioc);
83 static void bfa_ioc_send_disable(struct bfa_ioc_s *ioc);
84 static void bfa_ioc_send_getattr(struct bfa_ioc_s *ioc);
85 static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc);
86 static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc);
87 static void bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc);
88 static void bfa_ioc_recover(struct bfa_ioc_s *ioc);
89 static void bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc);
90 static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc);
91 static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc);
92 static void bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc);
93 static void bfa_ioc_fail_notify(struct bfa_ioc_s *ioc);
94 static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc);
98 * IOC state machine definitions/declarations
100 enum ioc_event {
101 IOC_E_RESET = 1, /* IOC reset request */
102 IOC_E_ENABLE = 2, /* IOC enable request */
103 IOC_E_DISABLE = 3, /* IOC disable request */
104 IOC_E_DETACH = 4, /* driver detach cleanup */
105 IOC_E_ENABLED = 5, /* f/w enabled */
106 IOC_E_FWRSP_GETATTR = 6, /* IOC get attribute response */
107 IOC_E_DISABLED = 7, /* f/w disabled */
108 IOC_E_INITFAILED = 8, /* failure notice by iocpf sm */
109 IOC_E_PFFAILED = 9, /* failure notice by iocpf sm */
110 IOC_E_HBFAIL = 10, /* heartbeat failure */
111 IOC_E_HWERROR = 11, /* hardware error interrupt */
112 IOC_E_TIMEOUT = 12, /* timeout */
115 bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc_s, enum ioc_event);
116 bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc_s, enum ioc_event);
117 bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc_s, enum ioc_event);
118 bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc_s, enum ioc_event);
119 bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc_s, enum ioc_event);
120 bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc_s, enum ioc_event);
121 bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc_s, enum ioc_event);
122 bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event);
123 bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event);
125 static struct bfa_sm_table_s ioc_sm_table[] = {
126 {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
127 {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
128 {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING},
129 {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
130 {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
131 {BFA_SM(bfa_ioc_sm_fail_retry), BFA_IOC_INITFAIL},
132 {BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL},
133 {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
134 {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
138 * IOCPF state machine definitions/declarations
141 #define bfa_iocpf_timer_start(__ioc) \
142 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
143 bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV)
144 #define bfa_iocpf_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer)
146 #define bfa_iocpf_recovery_timer_start(__ioc) \
147 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
148 bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV_RECOVER)
150 #define bfa_sem_timer_start(__ioc) \
151 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->sem_timer, \
152 bfa_iocpf_sem_timeout, (__ioc), BFA_IOC_HWSEM_TOV)
153 #define bfa_sem_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->sem_timer)
156 * Forward declareations for iocpf state machine
158 static void bfa_iocpf_timeout(void *ioc_arg);
159 static void bfa_iocpf_sem_timeout(void *ioc_arg);
162 * IOCPF state machine events
164 enum iocpf_event {
165 IOCPF_E_ENABLE = 1, /* IOCPF enable request */
166 IOCPF_E_DISABLE = 2, /* IOCPF disable request */
167 IOCPF_E_STOP = 3, /* stop on driver detach */
168 IOCPF_E_FWREADY = 4, /* f/w initialization done */
169 IOCPF_E_FWRSP_ENABLE = 5, /* enable f/w response */
170 IOCPF_E_FWRSP_DISABLE = 6, /* disable f/w response */
171 IOCPF_E_FAIL = 7, /* failure notice by ioc sm */
172 IOCPF_E_INITFAIL = 8, /* init fail notice by ioc sm */
173 IOCPF_E_GETATTRFAIL = 9, /* init fail notice by ioc sm */
174 IOCPF_E_SEMLOCKED = 10, /* h/w semaphore is locked */
175 IOCPF_E_TIMEOUT = 11, /* f/w response timeout */
179 * IOCPF states
181 enum bfa_iocpf_state {
182 BFA_IOCPF_RESET = 1, /* IOC is in reset state */
183 BFA_IOCPF_SEMWAIT = 2, /* Waiting for IOC h/w semaphore */
184 BFA_IOCPF_HWINIT = 3, /* IOC h/w is being initialized */
185 BFA_IOCPF_READY = 4, /* IOCPF is initialized */
186 BFA_IOCPF_INITFAIL = 5, /* IOCPF failed */
187 BFA_IOCPF_FAIL = 6, /* IOCPF failed */
188 BFA_IOCPF_DISABLING = 7, /* IOCPF is being disabled */
189 BFA_IOCPF_DISABLED = 8, /* IOCPF is disabled */
190 BFA_IOCPF_FWMISMATCH = 9, /* IOC f/w different from drivers */
193 bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf_s, enum iocpf_event);
194 bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf_s, enum iocpf_event);
195 bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf_s, enum iocpf_event);
196 bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf_s, enum iocpf_event);
197 bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf_s, enum iocpf_event);
198 bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf_s, enum iocpf_event);
199 bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf_s, enum iocpf_event);
200 bfa_fsm_state_decl(bfa_iocpf, initfail_sync, struct bfa_iocpf_s,
201 enum iocpf_event);
202 bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf_s, enum iocpf_event);
203 bfa_fsm_state_decl(bfa_iocpf, fail_sync, struct bfa_iocpf_s, enum iocpf_event);
204 bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf_s, enum iocpf_event);
205 bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf_s, enum iocpf_event);
206 bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf_s,
207 enum iocpf_event);
208 bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf_s, enum iocpf_event);
210 static struct bfa_sm_table_s iocpf_sm_table[] = {
211 {BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET},
212 {BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH},
213 {BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH},
214 {BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT},
215 {BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT},
216 {BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT},
217 {BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY},
218 {BFA_SM(bfa_iocpf_sm_initfail_sync), BFA_IOCPF_INITFAIL},
219 {BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL},
220 {BFA_SM(bfa_iocpf_sm_fail_sync), BFA_IOCPF_FAIL},
221 {BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL},
222 {BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING},
223 {BFA_SM(bfa_iocpf_sm_disabling_sync), BFA_IOCPF_DISABLING},
224 {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
228 * IOC State Machine
232 * Beginning state. IOC uninit state.
235 static void
236 bfa_ioc_sm_uninit_entry(struct bfa_ioc_s *ioc)
241 * IOC is in uninit state.
243 static void
244 bfa_ioc_sm_uninit(struct bfa_ioc_s *ioc, enum ioc_event event)
246 bfa_trc(ioc, event);
248 switch (event) {
249 case IOC_E_RESET:
250 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
251 break;
253 default:
254 bfa_sm_fault(ioc, event);
258 * Reset entry actions -- initialize state machine
260 static void
261 bfa_ioc_sm_reset_entry(struct bfa_ioc_s *ioc)
263 bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
267 * IOC is in reset state.
269 static void
270 bfa_ioc_sm_reset(struct bfa_ioc_s *ioc, enum ioc_event event)
272 bfa_trc(ioc, event);
274 switch (event) {
275 case IOC_E_ENABLE:
276 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
277 break;
279 case IOC_E_DISABLE:
280 bfa_ioc_disable_comp(ioc);
281 break;
283 case IOC_E_DETACH:
284 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
285 break;
287 default:
288 bfa_sm_fault(ioc, event);
293 static void
294 bfa_ioc_sm_enabling_entry(struct bfa_ioc_s *ioc)
296 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE);
300 * Host IOC function is being enabled, awaiting response from firmware.
301 * Semaphore is acquired.
303 static void
304 bfa_ioc_sm_enabling(struct bfa_ioc_s *ioc, enum ioc_event event)
306 bfa_trc(ioc, event);
308 switch (event) {
309 case IOC_E_ENABLED:
310 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
311 break;
313 case IOC_E_PFFAILED:
314 /* !!! fall through !!! */
315 case IOC_E_HWERROR:
316 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
317 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
318 if (event != IOC_E_PFFAILED)
319 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
320 break;
322 case IOC_E_DISABLE:
323 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
324 break;
326 case IOC_E_DETACH:
327 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
328 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
329 break;
331 case IOC_E_ENABLE:
332 break;
334 default:
335 bfa_sm_fault(ioc, event);
340 static void
341 bfa_ioc_sm_getattr_entry(struct bfa_ioc_s *ioc)
343 bfa_ioc_timer_start(ioc);
344 bfa_ioc_send_getattr(ioc);
348 * IOC configuration in progress. Timer is active.
350 static void
351 bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event)
353 bfa_trc(ioc, event);
355 switch (event) {
356 case IOC_E_FWRSP_GETATTR:
357 bfa_ioc_timer_stop(ioc);
358 bfa_ioc_check_attr_wwns(ioc);
359 bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
360 break;
362 break;
363 case IOC_E_PFFAILED:
364 case IOC_E_HWERROR:
365 bfa_ioc_timer_stop(ioc);
366 /* !!! fall through !!! */
367 case IOC_E_TIMEOUT:
368 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
369 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
370 if (event != IOC_E_PFFAILED)
371 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
372 break;
374 case IOC_E_DISABLE:
375 bfa_ioc_timer_stop(ioc);
376 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
377 break;
379 case IOC_E_ENABLE:
380 break;
382 default:
383 bfa_sm_fault(ioc, event);
388 static void
389 bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc)
391 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
393 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
394 bfa_ioc_hb_monitor(ioc);
395 BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC enabled\n");
398 static void
399 bfa_ioc_sm_op(struct bfa_ioc_s *ioc, enum ioc_event event)
401 bfa_trc(ioc, event);
403 switch (event) {
404 case IOC_E_ENABLE:
405 break;
407 case IOC_E_DISABLE:
408 bfa_hb_timer_stop(ioc);
409 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
410 break;
412 case IOC_E_PFFAILED:
413 case IOC_E_HWERROR:
414 bfa_hb_timer_stop(ioc);
415 /* !!! fall through !!! */
416 case IOC_E_HBFAIL:
417 bfa_ioc_fail_notify(ioc);
419 if (ioc->iocpf.auto_recover)
420 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
421 else
422 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
424 if (event != IOC_E_PFFAILED)
425 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
426 break;
428 default:
429 bfa_sm_fault(ioc, event);
434 static void
435 bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc)
437 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
438 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE);
439 BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC disabled\n");
443 * IOC is being disabled
445 static void
446 bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event)
448 bfa_trc(ioc, event);
450 switch (event) {
451 case IOC_E_DISABLED:
452 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
453 break;
455 case IOC_E_HWERROR:
457 * No state change. Will move to disabled state
458 * after iocpf sm completes failure processing and
459 * moves to disabled state.
461 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
462 break;
464 default:
465 bfa_sm_fault(ioc, event);
470 * IOC disable completion entry.
472 static void
473 bfa_ioc_sm_disabled_entry(struct bfa_ioc_s *ioc)
475 bfa_ioc_disable_comp(ioc);
478 static void
479 bfa_ioc_sm_disabled(struct bfa_ioc_s *ioc, enum ioc_event event)
481 bfa_trc(ioc, event);
483 switch (event) {
484 case IOC_E_ENABLE:
485 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
486 break;
488 case IOC_E_DISABLE:
489 ioc->cbfn->disable_cbfn(ioc->bfa);
490 break;
492 case IOC_E_DETACH:
493 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
494 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
495 break;
497 default:
498 bfa_sm_fault(ioc, event);
503 static void
504 bfa_ioc_sm_fail_retry_entry(struct bfa_ioc_s *ioc)
506 bfa_trc(ioc, 0);
510 * Hardware initialization retry.
512 static void
513 bfa_ioc_sm_fail_retry(struct bfa_ioc_s *ioc, enum ioc_event event)
515 bfa_trc(ioc, event);
517 switch (event) {
518 case IOC_E_ENABLED:
519 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
520 break;
522 case IOC_E_PFFAILED:
523 case IOC_E_HWERROR:
525 * Initialization retry failed.
527 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
528 if (event != IOC_E_PFFAILED)
529 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
530 break;
532 case IOC_E_INITFAILED:
533 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
534 break;
536 case IOC_E_ENABLE:
537 break;
539 case IOC_E_DISABLE:
540 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
541 break;
543 case IOC_E_DETACH:
544 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
545 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
546 break;
548 default:
549 bfa_sm_fault(ioc, event);
554 static void
555 bfa_ioc_sm_fail_entry(struct bfa_ioc_s *ioc)
557 bfa_trc(ioc, 0);
561 * IOC failure.
563 static void
564 bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event)
566 bfa_trc(ioc, event);
568 switch (event) {
570 case IOC_E_ENABLE:
571 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
572 break;
574 case IOC_E_DISABLE:
575 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
576 break;
578 case IOC_E_DETACH:
579 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
580 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
581 break;
583 case IOC_E_HWERROR:
585 * HB failure notification, ignore.
587 break;
588 default:
589 bfa_sm_fault(ioc, event);
594 * IOCPF State Machine
598 * Reset entry actions -- initialize state machine
600 static void
601 bfa_iocpf_sm_reset_entry(struct bfa_iocpf_s *iocpf)
603 iocpf->retry_count = 0;
604 iocpf->auto_recover = bfa_auto_recover;
608 * Beginning state. IOC is in reset state.
610 static void
611 bfa_iocpf_sm_reset(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
613 struct bfa_ioc_s *ioc = iocpf->ioc;
615 bfa_trc(ioc, event);
617 switch (event) {
618 case IOCPF_E_ENABLE:
619 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
620 break;
622 case IOCPF_E_STOP:
623 break;
625 default:
626 bfa_sm_fault(ioc, event);
631 * Semaphore should be acquired for version check.
633 static void
634 bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf)
636 bfa_ioc_hw_sem_get(iocpf->ioc);
640 * Awaiting h/w semaphore to continue with version check.
642 static void
643 bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
645 struct bfa_ioc_s *ioc = iocpf->ioc;
647 bfa_trc(ioc, event);
649 switch (event) {
650 case IOCPF_E_SEMLOCKED:
651 if (bfa_ioc_firmware_lock(ioc)) {
652 if (bfa_ioc_sync_start(ioc)) {
653 iocpf->retry_count = 0;
654 bfa_ioc_sync_join(ioc);
655 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
656 } else {
657 bfa_ioc_firmware_unlock(ioc);
658 writel(1, ioc->ioc_regs.ioc_sem_reg);
659 bfa_sem_timer_start(ioc);
661 } else {
662 writel(1, ioc->ioc_regs.ioc_sem_reg);
663 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch);
665 break;
667 case IOCPF_E_DISABLE:
668 bfa_sem_timer_stop(ioc);
669 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
670 bfa_fsm_send_event(ioc, IOC_E_DISABLED);
671 break;
673 case IOCPF_E_STOP:
674 bfa_sem_timer_stop(ioc);
675 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
676 break;
678 default:
679 bfa_sm_fault(ioc, event);
684 * Notify enable completion callback.
686 static void
687 bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf_s *iocpf)
690 * Call only the first time sm enters fwmismatch state.
692 if (iocpf->retry_count == 0)
693 bfa_ioc_pf_fwmismatch(iocpf->ioc);
695 iocpf->retry_count++;
696 bfa_iocpf_timer_start(iocpf->ioc);
700 * Awaiting firmware version match.
702 static void
703 bfa_iocpf_sm_mismatch(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
705 struct bfa_ioc_s *ioc = iocpf->ioc;
707 bfa_trc(ioc, event);
709 switch (event) {
710 case IOCPF_E_TIMEOUT:
711 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
712 break;
714 case IOCPF_E_DISABLE:
715 bfa_iocpf_timer_stop(ioc);
716 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
717 bfa_fsm_send_event(ioc, IOC_E_DISABLED);
718 break;
720 case IOCPF_E_STOP:
721 bfa_iocpf_timer_stop(ioc);
722 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
723 break;
725 default:
726 bfa_sm_fault(ioc, event);
731 * Request for semaphore.
733 static void
734 bfa_iocpf_sm_semwait_entry(struct bfa_iocpf_s *iocpf)
736 bfa_ioc_hw_sem_get(iocpf->ioc);
740 * Awaiting semaphore for h/w initialzation.
742 static void
743 bfa_iocpf_sm_semwait(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
745 struct bfa_ioc_s *ioc = iocpf->ioc;
747 bfa_trc(ioc, event);
749 switch (event) {
750 case IOCPF_E_SEMLOCKED:
751 if (bfa_ioc_sync_complete(ioc)) {
752 bfa_ioc_sync_join(ioc);
753 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
754 } else {
755 writel(1, ioc->ioc_regs.ioc_sem_reg);
756 bfa_sem_timer_start(ioc);
758 break;
760 case IOCPF_E_DISABLE:
761 bfa_sem_timer_stop(ioc);
762 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
763 break;
765 default:
766 bfa_sm_fault(ioc, event);
770 static void
771 bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf_s *iocpf)
773 bfa_iocpf_timer_start(iocpf->ioc);
774 bfa_ioc_hwinit(iocpf->ioc, BFA_FALSE);
778 * Hardware is being initialized. Interrupts are enabled.
779 * Holding hardware semaphore lock.
781 static void
782 bfa_iocpf_sm_hwinit(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
784 struct bfa_ioc_s *ioc = iocpf->ioc;
786 bfa_trc(ioc, event);
788 switch (event) {
789 case IOCPF_E_FWREADY:
790 bfa_iocpf_timer_stop(ioc);
791 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling);
792 break;
794 case IOCPF_E_INITFAIL:
795 bfa_iocpf_timer_stop(ioc);
797 * !!! fall through !!!
800 case IOCPF_E_TIMEOUT:
801 writel(1, ioc->ioc_regs.ioc_sem_reg);
802 if (event == IOCPF_E_TIMEOUT)
803 bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
804 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
805 break;
807 case IOCPF_E_DISABLE:
808 bfa_iocpf_timer_stop(ioc);
809 bfa_ioc_sync_leave(ioc);
810 writel(1, ioc->ioc_regs.ioc_sem_reg);
811 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
812 break;
814 default:
815 bfa_sm_fault(ioc, event);
819 static void
820 bfa_iocpf_sm_enabling_entry(struct bfa_iocpf_s *iocpf)
822 bfa_iocpf_timer_start(iocpf->ioc);
823 bfa_ioc_send_enable(iocpf->ioc);
827 * Host IOC function is being enabled, awaiting response from firmware.
828 * Semaphore is acquired.
830 static void
831 bfa_iocpf_sm_enabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
833 struct bfa_ioc_s *ioc = iocpf->ioc;
835 bfa_trc(ioc, event);
837 switch (event) {
838 case IOCPF_E_FWRSP_ENABLE:
839 bfa_iocpf_timer_stop(ioc);
840 writel(1, ioc->ioc_regs.ioc_sem_reg);
841 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready);
842 break;
844 case IOCPF_E_INITFAIL:
845 bfa_iocpf_timer_stop(ioc);
847 * !!! fall through !!!
850 case IOCPF_E_TIMEOUT:
851 writel(1, ioc->ioc_regs.ioc_sem_reg);
852 if (event == IOCPF_E_TIMEOUT)
853 bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
854 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
855 break;
857 case IOCPF_E_DISABLE:
858 bfa_iocpf_timer_stop(ioc);
859 writel(1, ioc->ioc_regs.ioc_sem_reg);
860 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
861 break;
863 case IOCPF_E_FWREADY:
864 bfa_ioc_send_enable(ioc);
865 break;
867 default:
868 bfa_sm_fault(ioc, event);
872 static void
873 bfa_iocpf_sm_ready_entry(struct bfa_iocpf_s *iocpf)
875 bfa_fsm_send_event(iocpf->ioc, IOC_E_ENABLED);
878 static void
879 bfa_iocpf_sm_ready(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
881 struct bfa_ioc_s *ioc = iocpf->ioc;
883 bfa_trc(ioc, event);
885 switch (event) {
886 case IOCPF_E_DISABLE:
887 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
888 break;
890 case IOCPF_E_GETATTRFAIL:
891 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
892 break;
894 case IOCPF_E_FAIL:
895 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
896 break;
898 case IOCPF_E_FWREADY:
899 if (bfa_ioc_is_operational(ioc)) {
900 bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
901 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
902 } else {
903 bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
904 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
906 break;
908 default:
909 bfa_sm_fault(ioc, event);
913 static void
914 bfa_iocpf_sm_disabling_entry(struct bfa_iocpf_s *iocpf)
916 bfa_iocpf_timer_start(iocpf->ioc);
917 bfa_ioc_send_disable(iocpf->ioc);
921 * IOC is being disabled
923 static void
924 bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
926 struct bfa_ioc_s *ioc = iocpf->ioc;
928 bfa_trc(ioc, event);
930 switch (event) {
931 case IOCPF_E_FWRSP_DISABLE:
932 case IOCPF_E_FWREADY:
933 bfa_iocpf_timer_stop(ioc);
934 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
935 break;
937 case IOCPF_E_FAIL:
938 bfa_iocpf_timer_stop(ioc);
940 * !!! fall through !!!
943 case IOCPF_E_TIMEOUT:
944 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
945 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
946 break;
948 case IOCPF_E_FWRSP_ENABLE:
949 break;
951 default:
952 bfa_sm_fault(ioc, event);
956 static void
957 bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf_s *iocpf)
959 bfa_ioc_hw_sem_get(iocpf->ioc);
963 * IOC hb ack request is being removed.
965 static void
966 bfa_iocpf_sm_disabling_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
968 struct bfa_ioc_s *ioc = iocpf->ioc;
970 bfa_trc(ioc, event);
972 switch (event) {
973 case IOCPF_E_SEMLOCKED:
974 bfa_ioc_sync_leave(ioc);
975 writel(1, ioc->ioc_regs.ioc_sem_reg);
976 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
977 break;
979 case IOCPF_E_FAIL:
980 break;
982 default:
983 bfa_sm_fault(ioc, event);
988 * IOC disable completion entry.
990 static void
991 bfa_iocpf_sm_disabled_entry(struct bfa_iocpf_s *iocpf)
993 bfa_fsm_send_event(iocpf->ioc, IOC_E_DISABLED);
996 static void
997 bfa_iocpf_sm_disabled(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
999 struct bfa_ioc_s *ioc = iocpf->ioc;
1001 bfa_trc(ioc, event);
1003 switch (event) {
1004 case IOCPF_E_ENABLE:
1005 iocpf->retry_count = 0;
1006 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1007 break;
1009 case IOCPF_E_STOP:
1010 bfa_ioc_firmware_unlock(ioc);
1011 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1012 break;
1014 default:
1015 bfa_sm_fault(ioc, event);
1019 static void
1020 bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf_s *iocpf)
1022 bfa_ioc_hw_sem_get(iocpf->ioc);
1026 * Hardware initialization failed.
1028 static void
1029 bfa_iocpf_sm_initfail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1031 struct bfa_ioc_s *ioc = iocpf->ioc;
1033 bfa_trc(ioc, event);
1035 switch (event) {
1036 case IOCPF_E_SEMLOCKED:
1037 bfa_ioc_notify_fail(ioc);
1038 bfa_ioc_sync_ack(ioc);
1039 iocpf->retry_count++;
1040 if (iocpf->retry_count >= BFA_IOC_HWINIT_MAX) {
1041 bfa_ioc_sync_leave(ioc);
1042 writel(1, ioc->ioc_regs.ioc_sem_reg);
1043 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
1044 } else {
1045 if (bfa_ioc_sync_complete(ioc))
1046 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
1047 else {
1048 writel(1, ioc->ioc_regs.ioc_sem_reg);
1049 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1052 break;
1054 case IOCPF_E_DISABLE:
1055 bfa_sem_timer_stop(ioc);
1056 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1057 break;
1059 case IOCPF_E_STOP:
1060 bfa_sem_timer_stop(ioc);
1061 bfa_ioc_firmware_unlock(ioc);
1062 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1063 break;
1065 case IOCPF_E_FAIL:
1066 break;
1068 default:
1069 bfa_sm_fault(ioc, event);
1073 static void
1074 bfa_iocpf_sm_initfail_entry(struct bfa_iocpf_s *iocpf)
1076 bfa_fsm_send_event(iocpf->ioc, IOC_E_INITFAILED);
1080 * Hardware initialization failed.
1082 static void
1083 bfa_iocpf_sm_initfail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1085 struct bfa_ioc_s *ioc = iocpf->ioc;
1087 bfa_trc(ioc, event);
1089 switch (event) {
1090 case IOCPF_E_DISABLE:
1091 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1092 break;
1094 case IOCPF_E_STOP:
1095 bfa_ioc_firmware_unlock(ioc);
1096 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1097 break;
1099 default:
1100 bfa_sm_fault(ioc, event);
1104 static void
1105 bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf_s *iocpf)
1108 * Mark IOC as failed in hardware and stop firmware.
1110 bfa_ioc_lpu_stop(iocpf->ioc);
1113 * Flush any queued up mailbox requests.
1115 bfa_ioc_mbox_hbfail(iocpf->ioc);
1117 bfa_ioc_hw_sem_get(iocpf->ioc);
1120 static void
1121 bfa_iocpf_sm_fail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1123 struct bfa_ioc_s *ioc = iocpf->ioc;
1125 bfa_trc(ioc, event);
1127 switch (event) {
1128 case IOCPF_E_SEMLOCKED:
1129 iocpf->retry_count = 0;
1130 bfa_ioc_sync_ack(ioc);
1131 bfa_ioc_notify_fail(ioc);
1132 if (!iocpf->auto_recover) {
1133 bfa_ioc_sync_leave(ioc);
1134 writel(1, ioc->ioc_regs.ioc_sem_reg);
1135 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1136 } else {
1137 if (bfa_ioc_sync_complete(ioc))
1138 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
1139 else {
1140 writel(1, ioc->ioc_regs.ioc_sem_reg);
1141 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1144 break;
1146 case IOCPF_E_DISABLE:
1147 bfa_sem_timer_stop(ioc);
1148 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1149 break;
1151 case IOCPF_E_FAIL:
1152 break;
1154 default:
1155 bfa_sm_fault(ioc, event);
1159 static void
1160 bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s *iocpf)
1165 * IOC is in failed state.
1167 static void
1168 bfa_iocpf_sm_fail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1170 struct bfa_ioc_s *ioc = iocpf->ioc;
1172 bfa_trc(ioc, event);
1174 switch (event) {
1175 case IOCPF_E_DISABLE:
1176 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1177 break;
1179 default:
1180 bfa_sm_fault(ioc, event);
1185 * BFA IOC private functions
1188 static void
1189 bfa_ioc_disable_comp(struct bfa_ioc_s *ioc)
1191 struct list_head *qe;
1192 struct bfa_ioc_hbfail_notify_s *notify;
1194 ioc->cbfn->disable_cbfn(ioc->bfa);
1197 * Notify common modules registered for notification.
1199 list_for_each(qe, &ioc->hb_notify_q) {
1200 notify = (struct bfa_ioc_hbfail_notify_s *) qe;
1201 notify->cbfn(notify->cbarg);
1205 bfa_boolean_t
1206 bfa_ioc_sem_get(void __iomem *sem_reg)
1208 u32 r32;
1209 int cnt = 0;
1210 #define BFA_SEM_SPINCNT 3000
1212 r32 = readl(sem_reg);
1214 while (r32 && (cnt < BFA_SEM_SPINCNT)) {
1215 cnt++;
1216 udelay(2);
1217 r32 = readl(sem_reg);
1220 if (r32 == 0)
1221 return BFA_TRUE;
1223 WARN_ON(cnt >= BFA_SEM_SPINCNT);
1224 return BFA_FALSE;
1227 static void
1228 bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc)
1230 u32 r32;
1233 * First read to the semaphore register will return 0, subsequent reads
1234 * will return 1. Semaphore is released by writing 1 to the register
1236 r32 = readl(ioc->ioc_regs.ioc_sem_reg);
1237 if (r32 == 0) {
1238 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED);
1239 return;
1242 bfa_sem_timer_start(ioc);
1246 * Initialize LPU local memory (aka secondary memory / SRAM)
1248 static void
1249 bfa_ioc_lmem_init(struct bfa_ioc_s *ioc)
1251 u32 pss_ctl;
1252 int i;
1253 #define PSS_LMEM_INIT_TIME 10000
1255 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1256 pss_ctl &= ~__PSS_LMEM_RESET;
1257 pss_ctl |= __PSS_LMEM_INIT_EN;
1260 * i2c workaround 12.5khz clock
1262 pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
1263 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1266 * wait for memory initialization to be complete
1268 i = 0;
1269 do {
1270 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1271 i++;
1272 } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
1275 * If memory initialization is not successful, IOC timeout will catch
1276 * such failures.
1278 WARN_ON(!(pss_ctl & __PSS_LMEM_INIT_DONE));
1279 bfa_trc(ioc, pss_ctl);
1281 pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
1282 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1285 static void
1286 bfa_ioc_lpu_start(struct bfa_ioc_s *ioc)
1288 u32 pss_ctl;
1291 * Take processor out of reset.
1293 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1294 pss_ctl &= ~__PSS_LPU0_RESET;
1296 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1299 static void
1300 bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc)
1302 u32 pss_ctl;
1305 * Put processors in reset.
1307 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1308 pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
1310 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1314 * Get driver and firmware versions.
1316 void
1317 bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
1319 u32 pgnum, pgoff;
1320 u32 loff = 0;
1321 int i;
1322 u32 *fwsig = (u32 *) fwhdr;
1324 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
1325 pgoff = PSS_SMEM_PGOFF(loff);
1326 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1328 for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32));
1329 i++) {
1330 fwsig[i] =
1331 bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
1332 loff += sizeof(u32);
1337 * Returns TRUE if same.
1339 bfa_boolean_t
1340 bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
1342 struct bfi_ioc_image_hdr_s *drv_fwhdr;
1343 int i;
1345 drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
1346 bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
1348 for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
1349 if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i]) {
1350 bfa_trc(ioc, i);
1351 bfa_trc(ioc, fwhdr->md5sum[i]);
1352 bfa_trc(ioc, drv_fwhdr->md5sum[i]);
1353 return BFA_FALSE;
1357 bfa_trc(ioc, fwhdr->md5sum[0]);
1358 return BFA_TRUE;
1362 * Return true if current running version is valid. Firmware signature and
1363 * execution context (driver/bios) must match.
1365 static bfa_boolean_t
1366 bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env)
1368 struct bfi_ioc_image_hdr_s fwhdr, *drv_fwhdr;
1370 bfa_ioc_fwver_get(ioc, &fwhdr);
1371 drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
1372 bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
1374 if (fwhdr.signature != drv_fwhdr->signature) {
1375 bfa_trc(ioc, fwhdr.signature);
1376 bfa_trc(ioc, drv_fwhdr->signature);
1377 return BFA_FALSE;
1380 if (swab32(fwhdr.param) != boot_env) {
1381 bfa_trc(ioc, fwhdr.param);
1382 bfa_trc(ioc, boot_env);
1383 return BFA_FALSE;
1386 return bfa_ioc_fwver_cmp(ioc, &fwhdr);
1390 * Conditionally flush any pending message from firmware at start.
1392 static void
1393 bfa_ioc_msgflush(struct bfa_ioc_s *ioc)
1395 u32 r32;
1397 r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
1398 if (r32)
1399 writel(1, ioc->ioc_regs.lpu_mbox_cmd);
1402 static void
1403 bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
1405 enum bfi_ioc_state ioc_fwstate;
1406 bfa_boolean_t fwvalid;
1407 u32 boot_type;
1408 u32 boot_env;
1410 ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
1412 if (force)
1413 ioc_fwstate = BFI_IOC_UNINIT;
1415 bfa_trc(ioc, ioc_fwstate);
1417 boot_type = BFI_BOOT_TYPE_NORMAL;
1418 boot_env = BFI_BOOT_LOADER_OS;
1421 * check if firmware is valid
1423 fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
1424 BFA_FALSE : bfa_ioc_fwver_valid(ioc, boot_env);
1426 if (!fwvalid) {
1427 bfa_ioc_boot(ioc, boot_type, boot_env);
1428 return;
1432 * If hardware initialization is in progress (initialized by other IOC),
1433 * just wait for an initialization completion interrupt.
1435 if (ioc_fwstate == BFI_IOC_INITING) {
1436 ioc->cbfn->reset_cbfn(ioc->bfa);
1437 return;
1441 * If IOC function is disabled and firmware version is same,
1442 * just re-enable IOC.
1444 * If option rom, IOC must not be in operational state. With
1445 * convergence, IOC will be in operational state when 2nd driver
1446 * is loaded.
1448 if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) {
1451 * When using MSI-X any pending firmware ready event should
1452 * be flushed. Otherwise MSI-X interrupts are not delivered.
1454 bfa_ioc_msgflush(ioc);
1455 ioc->cbfn->reset_cbfn(ioc->bfa);
1456 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
1457 return;
1461 * Initialize the h/w for any other states.
1463 bfa_ioc_boot(ioc, boot_type, boot_env);
1466 static void
1467 bfa_ioc_timeout(void *ioc_arg)
1469 struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
1471 bfa_trc(ioc, 0);
1472 bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
1475 void
1476 bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len)
1478 u32 *msgp = (u32 *) ioc_msg;
1479 u32 i;
1481 bfa_trc(ioc, msgp[0]);
1482 bfa_trc(ioc, len);
1484 WARN_ON(len > BFI_IOC_MSGLEN_MAX);
1487 * first write msg to mailbox registers
1489 for (i = 0; i < len / sizeof(u32); i++)
1490 writel(cpu_to_le32(msgp[i]),
1491 ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1493 for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
1494 writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1497 * write 1 to mailbox CMD to trigger LPU event
1499 writel(1, ioc->ioc_regs.hfn_mbox_cmd);
1500 (void) readl(ioc->ioc_regs.hfn_mbox_cmd);
1503 static void
1504 bfa_ioc_send_enable(struct bfa_ioc_s *ioc)
1506 struct bfi_ioc_ctrl_req_s enable_req;
1507 struct timeval tv;
1509 bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
1510 bfa_ioc_portid(ioc));
1511 enable_req.ioc_class = ioc->ioc_mc;
1512 do_gettimeofday(&tv);
1513 enable_req.tv_sec = be32_to_cpu(tv.tv_sec);
1514 bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s));
1517 static void
1518 bfa_ioc_send_disable(struct bfa_ioc_s *ioc)
1520 struct bfi_ioc_ctrl_req_s disable_req;
1522 bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
1523 bfa_ioc_portid(ioc));
1524 bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req_s));
1527 static void
1528 bfa_ioc_send_getattr(struct bfa_ioc_s *ioc)
1530 struct bfi_ioc_getattr_req_s attr_req;
1532 bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
1533 bfa_ioc_portid(ioc));
1534 bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
1535 bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
1538 static void
1539 bfa_ioc_hb_check(void *cbarg)
1541 struct bfa_ioc_s *ioc = cbarg;
1542 u32 hb_count;
1544 hb_count = readl(ioc->ioc_regs.heartbeat);
1545 if (ioc->hb_count == hb_count) {
1546 bfa_ioc_recover(ioc);
1547 return;
1548 } else {
1549 ioc->hb_count = hb_count;
1552 bfa_ioc_mbox_poll(ioc);
1553 bfa_hb_timer_start(ioc);
1556 static void
1557 bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc)
1559 ioc->hb_count = readl(ioc->ioc_regs.heartbeat);
1560 bfa_hb_timer_start(ioc);
1564 * Initiate a full firmware download.
1566 static void
1567 bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
1568 u32 boot_env)
1570 u32 *fwimg;
1571 u32 pgnum, pgoff;
1572 u32 loff = 0;
1573 u32 chunkno = 0;
1574 u32 i;
1577 * Initialize LMEM first before code download
1579 bfa_ioc_lmem_init(ioc);
1581 bfa_trc(ioc, bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)));
1582 fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), chunkno);
1584 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
1585 pgoff = PSS_SMEM_PGOFF(loff);
1587 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1589 for (i = 0; i < bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)); i++) {
1591 if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
1592 chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
1593 fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc),
1594 BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
1598 * write smem
1600 bfa_mem_write(ioc->ioc_regs.smem_page_start, loff,
1601 fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)]);
1603 loff += sizeof(u32);
1606 * handle page offset wrap around
1608 loff = PSS_SMEM_PGOFF(loff);
1609 if (loff == 0) {
1610 pgnum++;
1611 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1615 writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
1616 ioc->ioc_regs.host_page_num_fn);
1619 * Set boot type and boot param at the end.
1621 bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_TYPE_OFF,
1622 swab32(boot_type));
1623 bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_LOADER_OFF,
1624 swab32(boot_env));
1629 * Update BFA configuration from firmware configuration.
1631 static void
1632 bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc)
1634 struct bfi_ioc_attr_s *attr = ioc->attr;
1636 attr->adapter_prop = be32_to_cpu(attr->adapter_prop);
1637 attr->card_type = be32_to_cpu(attr->card_type);
1638 attr->maxfrsize = be16_to_cpu(attr->maxfrsize);
1640 bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
1644 * Attach time initialization of mbox logic.
1646 static void
1647 bfa_ioc_mbox_attach(struct bfa_ioc_s *ioc)
1649 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1650 int mc;
1652 INIT_LIST_HEAD(&mod->cmd_q);
1653 for (mc = 0; mc < BFI_MC_MAX; mc++) {
1654 mod->mbhdlr[mc].cbfn = NULL;
1655 mod->mbhdlr[mc].cbarg = ioc->bfa;
1660 * Mbox poll timer -- restarts any pending mailbox requests.
1662 static void
1663 bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc)
1665 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1666 struct bfa_mbox_cmd_s *cmd;
1667 u32 stat;
1670 * If no command pending, do nothing
1672 if (list_empty(&mod->cmd_q))
1673 return;
1676 * If previous command is not yet fetched by firmware, do nothing
1678 stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
1679 if (stat)
1680 return;
1683 * Enqueue command to firmware.
1685 bfa_q_deq(&mod->cmd_q, &cmd);
1686 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
1690 * Cleanup any pending requests.
1692 static void
1693 bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc)
1695 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1696 struct bfa_mbox_cmd_s *cmd;
1698 while (!list_empty(&mod->cmd_q))
1699 bfa_q_deq(&mod->cmd_q, &cmd);
1703 * Read data from SMEM to host through PCI memmap
1705 * @param[in] ioc memory for IOC
1706 * @param[in] tbuf app memory to store data from smem
1707 * @param[in] soff smem offset
1708 * @param[in] sz size of smem in bytes
1710 static bfa_status_t
1711 bfa_ioc_smem_read(struct bfa_ioc_s *ioc, void *tbuf, u32 soff, u32 sz)
1713 u32 pgnum, loff;
1714 __be32 r32;
1715 int i, len;
1716 u32 *buf = tbuf;
1718 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
1719 loff = PSS_SMEM_PGOFF(soff);
1720 bfa_trc(ioc, pgnum);
1721 bfa_trc(ioc, loff);
1722 bfa_trc(ioc, sz);
1725 * Hold semaphore to serialize pll init and fwtrc.
1727 if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
1728 bfa_trc(ioc, 0);
1729 return BFA_STATUS_FAILED;
1732 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1734 len = sz/sizeof(u32);
1735 bfa_trc(ioc, len);
1736 for (i = 0; i < len; i++) {
1737 r32 = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
1738 buf[i] = be32_to_cpu(r32);
1739 loff += sizeof(u32);
1742 * handle page offset wrap around
1744 loff = PSS_SMEM_PGOFF(loff);
1745 if (loff == 0) {
1746 pgnum++;
1747 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1750 writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
1751 ioc->ioc_regs.host_page_num_fn);
1753 * release semaphore.
1755 writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1757 bfa_trc(ioc, pgnum);
1758 return BFA_STATUS_OK;
1762 * Clear SMEM data from host through PCI memmap
1764 * @param[in] ioc memory for IOC
1765 * @param[in] soff smem offset
1766 * @param[in] sz size of smem in bytes
1768 static bfa_status_t
1769 bfa_ioc_smem_clr(struct bfa_ioc_s *ioc, u32 soff, u32 sz)
1771 int i, len;
1772 u32 pgnum, loff;
1774 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
1775 loff = PSS_SMEM_PGOFF(soff);
1776 bfa_trc(ioc, pgnum);
1777 bfa_trc(ioc, loff);
1778 bfa_trc(ioc, sz);
1781 * Hold semaphore to serialize pll init and fwtrc.
1783 if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
1784 bfa_trc(ioc, 0);
1785 return BFA_STATUS_FAILED;
1788 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1790 len = sz/sizeof(u32); /* len in words */
1791 bfa_trc(ioc, len);
1792 for (i = 0; i < len; i++) {
1793 bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, 0);
1794 loff += sizeof(u32);
1797 * handle page offset wrap around
1799 loff = PSS_SMEM_PGOFF(loff);
1800 if (loff == 0) {
1801 pgnum++;
1802 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1805 writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
1806 ioc->ioc_regs.host_page_num_fn);
1809 * release semaphore.
1811 writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1812 bfa_trc(ioc, pgnum);
1813 return BFA_STATUS_OK;
1816 static void
1817 bfa_ioc_fail_notify(struct bfa_ioc_s *ioc)
1819 struct list_head *qe;
1820 struct bfa_ioc_hbfail_notify_s *notify;
1821 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
1824 * Notify driver and common modules registered for notification.
1826 ioc->cbfn->hbfail_cbfn(ioc->bfa);
1827 list_for_each(qe, &ioc->hb_notify_q) {
1828 notify = (struct bfa_ioc_hbfail_notify_s *) qe;
1829 notify->cbfn(notify->cbarg);
1832 bfa_ioc_debug_save_ftrc(ioc);
1834 BFA_LOG(KERN_CRIT, bfad, bfa_log_level,
1835 "Heart Beat of IOC has failed\n");
1839 static void
1840 bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc)
1842 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
1844 * Provide enable completion callback.
1846 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
1847 BFA_LOG(KERN_WARNING, bfad, bfa_log_level,
1848 "Running firmware version is incompatible "
1849 "with the driver version\n");
1852 bfa_status_t
1853 bfa_ioc_pll_init(struct bfa_ioc_s *ioc)
1857 * Hold semaphore so that nobody can access the chip during init.
1859 bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
1861 bfa_ioc_pll_init_asic(ioc);
1863 ioc->pllinit = BFA_TRUE;
1865 * release semaphore.
1867 writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1869 return BFA_STATUS_OK;
1873 * Interface used by diag module to do firmware boot with memory test
1874 * as the entry vector.
1876 void
1877 bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env)
1879 void __iomem *rb;
1881 bfa_ioc_stats(ioc, ioc_boots);
1883 if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
1884 return;
1887 * Initialize IOC state of all functions on a chip reset.
1889 rb = ioc->pcidev.pci_bar_kva;
1890 if (boot_type == BFI_BOOT_TYPE_MEMTEST) {
1891 writel(BFI_IOC_MEMTEST, (rb + BFA_IOC0_STATE_REG));
1892 writel(BFI_IOC_MEMTEST, (rb + BFA_IOC1_STATE_REG));
1893 } else {
1894 writel(BFI_IOC_INITING, (rb + BFA_IOC0_STATE_REG));
1895 writel(BFI_IOC_INITING, (rb + BFA_IOC1_STATE_REG));
1898 bfa_ioc_msgflush(ioc);
1899 bfa_ioc_download_fw(ioc, boot_type, boot_env);
1902 * Enable interrupts just before starting LPU
1904 ioc->cbfn->reset_cbfn(ioc->bfa);
1905 bfa_ioc_lpu_start(ioc);
1909 * Enable/disable IOC failure auto recovery.
1911 void
1912 bfa_ioc_auto_recover(bfa_boolean_t auto_recover)
1914 bfa_auto_recover = auto_recover;
1919 bfa_boolean_t
1920 bfa_ioc_is_operational(struct bfa_ioc_s *ioc)
1922 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
1925 bfa_boolean_t
1926 bfa_ioc_is_initialized(struct bfa_ioc_s *ioc)
1928 u32 r32 = readl(ioc->ioc_regs.ioc_fwstate);
1930 return ((r32 != BFI_IOC_UNINIT) &&
1931 (r32 != BFI_IOC_INITING) &&
1932 (r32 != BFI_IOC_MEMTEST));
1935 void
1936 bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg)
1938 __be32 *msgp = mbmsg;
1939 u32 r32;
1940 int i;
1943 * read the MBOX msg
1945 for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
1946 i++) {
1947 r32 = readl(ioc->ioc_regs.lpu_mbox +
1948 i * sizeof(u32));
1949 msgp[i] = cpu_to_be32(r32);
1953 * turn off mailbox interrupt by clearing mailbox status
1955 writel(1, ioc->ioc_regs.lpu_mbox_cmd);
1956 readl(ioc->ioc_regs.lpu_mbox_cmd);
1959 void
1960 bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m)
1962 union bfi_ioc_i2h_msg_u *msg;
1963 struct bfa_iocpf_s *iocpf = &ioc->iocpf;
1965 msg = (union bfi_ioc_i2h_msg_u *) m;
1967 bfa_ioc_stats(ioc, ioc_isrs);
1969 switch (msg->mh.msg_id) {
1970 case BFI_IOC_I2H_HBEAT:
1971 break;
1973 case BFI_IOC_I2H_READY_EVENT:
1974 bfa_fsm_send_event(iocpf, IOCPF_E_FWREADY);
1975 break;
1977 case BFI_IOC_I2H_ENABLE_REPLY:
1978 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
1979 break;
1981 case BFI_IOC_I2H_DISABLE_REPLY:
1982 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE);
1983 break;
1985 case BFI_IOC_I2H_GETATTR_REPLY:
1986 bfa_ioc_getattr_reply(ioc);
1987 break;
1989 default:
1990 bfa_trc(ioc, msg->mh.msg_id);
1991 WARN_ON(1);
1996 * IOC attach time initialization and setup.
1998 * @param[in] ioc memory for IOC
1999 * @param[in] bfa driver instance structure
2001 void
2002 bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, struct bfa_ioc_cbfn_s *cbfn,
2003 struct bfa_timer_mod_s *timer_mod)
2005 ioc->bfa = bfa;
2006 ioc->cbfn = cbfn;
2007 ioc->timer_mod = timer_mod;
2008 ioc->fcmode = BFA_FALSE;
2009 ioc->pllinit = BFA_FALSE;
2010 ioc->dbg_fwsave_once = BFA_TRUE;
2011 ioc->iocpf.ioc = ioc;
2013 bfa_ioc_mbox_attach(ioc);
2014 INIT_LIST_HEAD(&ioc->hb_notify_q);
2016 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
2017 bfa_fsm_send_event(ioc, IOC_E_RESET);
2021 * Driver detach time IOC cleanup.
2023 void
2024 bfa_ioc_detach(struct bfa_ioc_s *ioc)
2026 bfa_fsm_send_event(ioc, IOC_E_DETACH);
2030 * Setup IOC PCI properties.
2032 * @param[in] pcidev PCI device information for this IOC
2034 void
2035 bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
2036 enum bfi_mclass mc)
2038 ioc->ioc_mc = mc;
2039 ioc->pcidev = *pcidev;
2040 ioc->ctdev = bfa_asic_id_ct(ioc->pcidev.device_id);
2041 ioc->cna = ioc->ctdev && !ioc->fcmode;
2044 * Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c
2046 if (ioc->ctdev)
2047 bfa_ioc_set_ct_hwif(ioc);
2048 else
2049 bfa_ioc_set_cb_hwif(ioc);
2051 bfa_ioc_map_port(ioc);
2052 bfa_ioc_reg_init(ioc);
2056 * Initialize IOC dma memory
2058 * @param[in] dm_kva kernel virtual address of IOC dma memory
2059 * @param[in] dm_pa physical address of IOC dma memory
2061 void
2062 bfa_ioc_mem_claim(struct bfa_ioc_s *ioc, u8 *dm_kva, u64 dm_pa)
2065 * dma memory for firmware attribute
2067 ioc->attr_dma.kva = dm_kva;
2068 ioc->attr_dma.pa = dm_pa;
2069 ioc->attr = (struct bfi_ioc_attr_s *) dm_kva;
2072 void
2073 bfa_ioc_enable(struct bfa_ioc_s *ioc)
2075 bfa_ioc_stats(ioc, ioc_enables);
2076 ioc->dbg_fwsave_once = BFA_TRUE;
2078 bfa_fsm_send_event(ioc, IOC_E_ENABLE);
2081 void
2082 bfa_ioc_disable(struct bfa_ioc_s *ioc)
2084 bfa_ioc_stats(ioc, ioc_disables);
2085 bfa_fsm_send_event(ioc, IOC_E_DISABLE);
2090 * Initialize memory for saving firmware trace. Driver must initialize
2091 * trace memory before call bfa_ioc_enable().
2093 void
2094 bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave)
2096 ioc->dbg_fwsave = dbg_fwsave;
2097 ioc->dbg_fwsave_len = (ioc->iocpf.auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
2101 * Register mailbox message handler functions
2103 * @param[in] ioc IOC instance
2104 * @param[in] mcfuncs message class handler functions
2106 void
2107 bfa_ioc_mbox_register(struct bfa_ioc_s *ioc, bfa_ioc_mbox_mcfunc_t *mcfuncs)
2109 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
2110 int mc;
2112 for (mc = 0; mc < BFI_MC_MAX; mc++)
2113 mod->mbhdlr[mc].cbfn = mcfuncs[mc];
2117 * Register mailbox message handler function, to be called by common modules
2119 void
2120 bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
2121 bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
2123 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
2125 mod->mbhdlr[mc].cbfn = cbfn;
2126 mod->mbhdlr[mc].cbarg = cbarg;
2130 * Queue a mailbox command request to firmware. Waits if mailbox is busy.
2131 * Responsibility of caller to serialize
2133 * @param[in] ioc IOC instance
2134 * @param[i] cmd Mailbox command
2136 void
2137 bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd)
2139 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
2140 u32 stat;
2143 * If a previous command is pending, queue new command
2145 if (!list_empty(&mod->cmd_q)) {
2146 list_add_tail(&cmd->qe, &mod->cmd_q);
2147 return;
2151 * If mailbox is busy, queue command for poll timer
2153 stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
2154 if (stat) {
2155 list_add_tail(&cmd->qe, &mod->cmd_q);
2156 return;
2160 * mailbox is free -- queue command to firmware
2162 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
2166 * Handle mailbox interrupts
2168 void
2169 bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc)
2171 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
2172 struct bfi_mbmsg_s m;
2173 int mc;
2175 bfa_ioc_msgget(ioc, &m);
2178 * Treat IOC message class as special.
2180 mc = m.mh.msg_class;
2181 if (mc == BFI_MC_IOC) {
2182 bfa_ioc_isr(ioc, &m);
2183 return;
2186 if ((mc > BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
2187 return;
2189 mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
2192 void
2193 bfa_ioc_error_isr(struct bfa_ioc_s *ioc)
2195 bfa_fsm_send_event(ioc, IOC_E_HWERROR);
2198 void
2199 bfa_ioc_set_fcmode(struct bfa_ioc_s *ioc)
2201 ioc->fcmode = BFA_TRUE;
2202 ioc->port_id = bfa_ioc_pcifn(ioc);
2206 * return true if IOC is disabled
2208 bfa_boolean_t
2209 bfa_ioc_is_disabled(struct bfa_ioc_s *ioc)
2211 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) ||
2212 bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
2216 * return true if IOC firmware is different.
2218 bfa_boolean_t
2219 bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc)
2221 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_reset) ||
2222 bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_fwcheck) ||
2223 bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_mismatch);
2226 #define bfa_ioc_state_disabled(__sm) \
2227 (((__sm) == BFI_IOC_UNINIT) || \
2228 ((__sm) == BFI_IOC_INITING) || \
2229 ((__sm) == BFI_IOC_HWINIT) || \
2230 ((__sm) == BFI_IOC_DISABLED) || \
2231 ((__sm) == BFI_IOC_FAIL) || \
2232 ((__sm) == BFI_IOC_CFG_DISABLED))
2235 * Check if adapter is disabled -- both IOCs should be in a disabled
2236 * state.
2238 bfa_boolean_t
2239 bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc)
2241 u32 ioc_state;
2242 void __iomem *rb = ioc->pcidev.pci_bar_kva;
2244 if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled))
2245 return BFA_FALSE;
2247 ioc_state = readl(rb + BFA_IOC0_STATE_REG);
2248 if (!bfa_ioc_state_disabled(ioc_state))
2249 return BFA_FALSE;
2251 if (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_FC_8G1P) {
2252 ioc_state = readl(rb + BFA_IOC1_STATE_REG);
2253 if (!bfa_ioc_state_disabled(ioc_state))
2254 return BFA_FALSE;
2257 return BFA_TRUE;
2261 * Reset IOC fwstate registers.
2263 void
2264 bfa_ioc_reset_fwstate(struct bfa_ioc_s *ioc)
2266 writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
2267 writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
2270 #define BFA_MFG_NAME "Brocade"
2271 void
2272 bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
2273 struct bfa_adapter_attr_s *ad_attr)
2275 struct bfi_ioc_attr_s *ioc_attr;
2277 ioc_attr = ioc->attr;
2279 bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
2280 bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
2281 bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
2282 bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
2283 memcpy(&ad_attr->vpd, &ioc_attr->vpd,
2284 sizeof(struct bfa_mfg_vpd_s));
2286 ad_attr->nports = bfa_ioc_get_nports(ioc);
2287 ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
2289 bfa_ioc_get_adapter_model(ioc, ad_attr->model);
2290 /* For now, model descr uses same model string */
2291 bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
2293 ad_attr->card_type = ioc_attr->card_type;
2294 ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type);
2296 if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
2297 ad_attr->prototype = 1;
2298 else
2299 ad_attr->prototype = 0;
2301 ad_attr->pwwn = ioc->attr->pwwn;
2302 ad_attr->mac = bfa_ioc_get_mac(ioc);
2304 ad_attr->pcie_gen = ioc_attr->pcie_gen;
2305 ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
2306 ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
2307 ad_attr->asic_rev = ioc_attr->asic_rev;
2309 bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
2311 ad_attr->cna_capable = ioc->cna;
2312 ad_attr->trunk_capable = (ad_attr->nports > 1) && !ioc->cna &&
2313 !ad_attr->is_mezz;
2316 enum bfa_ioc_type_e
2317 bfa_ioc_get_type(struct bfa_ioc_s *ioc)
2319 if (!ioc->ctdev || ioc->fcmode)
2320 return BFA_IOC_TYPE_FC;
2321 else if (ioc->ioc_mc == BFI_MC_IOCFC)
2322 return BFA_IOC_TYPE_FCoE;
2323 else if (ioc->ioc_mc == BFI_MC_LL)
2324 return BFA_IOC_TYPE_LL;
2325 else {
2326 WARN_ON(ioc->ioc_mc != BFI_MC_LL);
2327 return BFA_IOC_TYPE_LL;
2331 void
2332 bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num)
2334 memset((void *)serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
2335 memcpy((void *)serial_num,
2336 (void *)ioc->attr->brcd_serialnum,
2337 BFA_ADAPTER_SERIAL_NUM_LEN);
2340 void
2341 bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver)
2343 memset((void *)fw_ver, 0, BFA_VERSION_LEN);
2344 memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
2347 void
2348 bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev)
2350 WARN_ON(!chip_rev);
2352 memset((void *)chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
2354 chip_rev[0] = 'R';
2355 chip_rev[1] = 'e';
2356 chip_rev[2] = 'v';
2357 chip_rev[3] = '-';
2358 chip_rev[4] = ioc->attr->asic_rev;
2359 chip_rev[5] = '\0';
2362 void
2363 bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver)
2365 memset((void *)optrom_ver, 0, BFA_VERSION_LEN);
2366 memcpy(optrom_ver, ioc->attr->optrom_version,
2367 BFA_VERSION_LEN);
2370 void
2371 bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer)
2373 memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
2374 memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
2377 void
2378 bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model)
2380 struct bfi_ioc_attr_s *ioc_attr;
2382 WARN_ON(!model);
2383 memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
2385 ioc_attr = ioc->attr;
2388 * model name
2390 snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
2391 BFA_MFG_NAME, ioc_attr->card_type);
2394 enum bfa_ioc_state
2395 bfa_ioc_get_state(struct bfa_ioc_s *ioc)
2397 enum bfa_iocpf_state iocpf_st;
2398 enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
2400 if (ioc_st == BFA_IOC_ENABLING ||
2401 ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) {
2403 iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
2405 switch (iocpf_st) {
2406 case BFA_IOCPF_SEMWAIT:
2407 ioc_st = BFA_IOC_SEMWAIT;
2408 break;
2410 case BFA_IOCPF_HWINIT:
2411 ioc_st = BFA_IOC_HWINIT;
2412 break;
2414 case BFA_IOCPF_FWMISMATCH:
2415 ioc_st = BFA_IOC_FWMISMATCH;
2416 break;
2418 case BFA_IOCPF_FAIL:
2419 ioc_st = BFA_IOC_FAIL;
2420 break;
2422 case BFA_IOCPF_INITFAIL:
2423 ioc_st = BFA_IOC_INITFAIL;
2424 break;
2426 default:
2427 break;
2431 return ioc_st;
2434 void
2435 bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr)
2437 memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr_s));
2439 ioc_attr->state = bfa_ioc_get_state(ioc);
2440 ioc_attr->port_id = ioc->port_id;
2442 ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
2444 bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
2446 ioc_attr->pci_attr.device_id = ioc->pcidev.device_id;
2447 ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func;
2448 bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
2451 mac_t
2452 bfa_ioc_get_mac(struct bfa_ioc_s *ioc)
2455 * Check the IOC type and return the appropriate MAC
2457 if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_FCoE)
2458 return ioc->attr->fcoe_mac;
2459 else
2460 return ioc->attr->mac;
2463 mac_t
2464 bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc)
2466 mac_t m;
2468 m = ioc->attr->mfg_mac;
2469 if (bfa_mfg_is_old_wwn_mac_model(ioc->attr->card_type))
2470 m.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc);
2471 else
2472 bfa_mfg_increment_wwn_mac(&(m.mac[MAC_ADDRLEN-3]),
2473 bfa_ioc_pcifn(ioc));
2475 return m;
2478 bfa_boolean_t
2479 bfa_ioc_get_fcmode(struct bfa_ioc_s *ioc)
2481 return ioc->fcmode || !bfa_asic_id_ct(ioc->pcidev.device_id);
2485 * Retrieve saved firmware trace from a prior IOC failure.
2487 bfa_status_t
2488 bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
2490 int tlen;
2492 if (ioc->dbg_fwsave_len == 0)
2493 return BFA_STATUS_ENOFSAVE;
2495 tlen = *trclen;
2496 if (tlen > ioc->dbg_fwsave_len)
2497 tlen = ioc->dbg_fwsave_len;
2499 memcpy(trcdata, ioc->dbg_fwsave, tlen);
2500 *trclen = tlen;
2501 return BFA_STATUS_OK;
2506 * Retrieve saved firmware trace from a prior IOC failure.
2508 bfa_status_t
2509 bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
2511 u32 loff = BFA_DBG_FWTRC_OFF(bfa_ioc_portid(ioc));
2512 int tlen;
2513 bfa_status_t status;
2515 bfa_trc(ioc, *trclen);
2517 tlen = *trclen;
2518 if (tlen > BFA_DBG_FWTRC_LEN)
2519 tlen = BFA_DBG_FWTRC_LEN;
2521 status = bfa_ioc_smem_read(ioc, trcdata, loff, tlen);
2522 *trclen = tlen;
2523 return status;
2526 static void
2527 bfa_ioc_send_fwsync(struct bfa_ioc_s *ioc)
2529 struct bfa_mbox_cmd_s cmd;
2530 struct bfi_ioc_ctrl_req_s *req = (struct bfi_ioc_ctrl_req_s *) cmd.msg;
2532 bfi_h2i_set(req->mh, BFI_MC_IOC, BFI_IOC_H2I_DBG_SYNC,
2533 bfa_ioc_portid(ioc));
2534 req->ioc_class = ioc->ioc_mc;
2535 bfa_ioc_mbox_queue(ioc, &cmd);
2538 static void
2539 bfa_ioc_fwsync(struct bfa_ioc_s *ioc)
2541 u32 fwsync_iter = 1000;
2543 bfa_ioc_send_fwsync(ioc);
2546 * After sending a fw sync mbox command wait for it to
2547 * take effect. We will not wait for a response because
2548 * 1. fw_sync mbox cmd doesn't have a response.
2549 * 2. Even if we implement that, interrupts might not
2550 * be enabled when we call this function.
2551 * So, just keep checking if any mbox cmd is pending, and
2552 * after waiting for a reasonable amount of time, go ahead.
2553 * It is possible that fw has crashed and the mbox command
2554 * is never acknowledged.
2556 while (bfa_ioc_mbox_cmd_pending(ioc) && fwsync_iter > 0)
2557 fwsync_iter--;
2561 * Dump firmware smem
2563 bfa_status_t
2564 bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf,
2565 u32 *offset, int *buflen)
2567 u32 loff;
2568 int dlen;
2569 bfa_status_t status;
2570 u32 smem_len = BFA_IOC_FW_SMEM_SIZE(ioc);
2572 if (*offset >= smem_len) {
2573 *offset = *buflen = 0;
2574 return BFA_STATUS_EINVAL;
2577 loff = *offset;
2578 dlen = *buflen;
2581 * First smem read, sync smem before proceeding
2582 * No need to sync before reading every chunk.
2584 if (loff == 0)
2585 bfa_ioc_fwsync(ioc);
2587 if ((loff + dlen) >= smem_len)
2588 dlen = smem_len - loff;
2590 status = bfa_ioc_smem_read(ioc, buf, loff, dlen);
2592 if (status != BFA_STATUS_OK) {
2593 *offset = *buflen = 0;
2594 return status;
2597 *offset += dlen;
2599 if (*offset >= smem_len)
2600 *offset = 0;
2602 *buflen = dlen;
2604 return status;
2608 * Firmware statistics
2610 bfa_status_t
2611 bfa_ioc_fw_stats_get(struct bfa_ioc_s *ioc, void *stats)
2613 u32 loff = BFI_IOC_FWSTATS_OFF + \
2614 BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
2615 int tlen;
2616 bfa_status_t status;
2618 if (ioc->stats_busy) {
2619 bfa_trc(ioc, ioc->stats_busy);
2620 return BFA_STATUS_DEVBUSY;
2622 ioc->stats_busy = BFA_TRUE;
2624 tlen = sizeof(struct bfa_fw_stats_s);
2625 status = bfa_ioc_smem_read(ioc, stats, loff, tlen);
2627 ioc->stats_busy = BFA_FALSE;
2628 return status;
2631 bfa_status_t
2632 bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc)
2634 u32 loff = BFI_IOC_FWSTATS_OFF + \
2635 BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
2636 int tlen;
2637 bfa_status_t status;
2639 if (ioc->stats_busy) {
2640 bfa_trc(ioc, ioc->stats_busy);
2641 return BFA_STATUS_DEVBUSY;
2643 ioc->stats_busy = BFA_TRUE;
2645 tlen = sizeof(struct bfa_fw_stats_s);
2646 status = bfa_ioc_smem_clr(ioc, loff, tlen);
2648 ioc->stats_busy = BFA_FALSE;
2649 return status;
2653 * Save firmware trace if configured.
2655 static void
2656 bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc)
2658 int tlen;
2660 if (ioc->dbg_fwsave_once) {
2661 ioc->dbg_fwsave_once = BFA_FALSE;
2662 if (ioc->dbg_fwsave_len) {
2663 tlen = ioc->dbg_fwsave_len;
2664 bfa_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen);
2670 * Firmware failure detected. Start recovery actions.
2672 static void
2673 bfa_ioc_recover(struct bfa_ioc_s *ioc)
2675 bfa_ioc_stats(ioc, ioc_hbfails);
2676 bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
2679 static void
2680 bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc)
2682 if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL)
2683 return;
2687 * BFA IOC PF private functions
2689 static void
2690 bfa_iocpf_timeout(void *ioc_arg)
2692 struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
2694 bfa_trc(ioc, 0);
2695 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
2698 static void
2699 bfa_iocpf_sem_timeout(void *ioc_arg)
2701 struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
2703 bfa_ioc_hw_sem_get(ioc);
2707 * bfa timer function
2709 void
2710 bfa_timer_beat(struct bfa_timer_mod_s *mod)
2712 struct list_head *qh = &mod->timer_q;
2713 struct list_head *qe, *qe_next;
2714 struct bfa_timer_s *elem;
2715 struct list_head timedout_q;
2717 INIT_LIST_HEAD(&timedout_q);
2719 qe = bfa_q_next(qh);
2721 while (qe != qh) {
2722 qe_next = bfa_q_next(qe);
2724 elem = (struct bfa_timer_s *) qe;
2725 if (elem->timeout <= BFA_TIMER_FREQ) {
2726 elem->timeout = 0;
2727 list_del(&elem->qe);
2728 list_add_tail(&elem->qe, &timedout_q);
2729 } else {
2730 elem->timeout -= BFA_TIMER_FREQ;
2733 qe = qe_next; /* go to next elem */
2737 * Pop all the timeout entries
2739 while (!list_empty(&timedout_q)) {
2740 bfa_q_deq(&timedout_q, &elem);
2741 elem->timercb(elem->arg);
2746 * Should be called with lock protection
2748 void
2749 bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer,
2750 void (*timercb) (void *), void *arg, unsigned int timeout)
2753 WARN_ON(timercb == NULL);
2754 WARN_ON(bfa_q_is_on_q(&mod->timer_q, timer));
2756 timer->timeout = timeout;
2757 timer->timercb = timercb;
2758 timer->arg = arg;
2760 list_add_tail(&timer->qe, &mod->timer_q);
2764 * Should be called with lock protection
2766 void
2767 bfa_timer_stop(struct bfa_timer_s *timer)
2769 WARN_ON(list_empty(&timer->qe));
2771 list_del(&timer->qe);