bna: Brocade 1860 HW Enablement
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / net / ethernet / brocade / bna / bfa_ioc.c
blobf89ac7a6569d8a672dd35ebcff9f20fc126fe850
1 /*
2 * Linux network driver for Brocade Converged Network Adapter.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
19 #include "bfa_ioc.h"
20 #include "cna.h"
21 #include "bfi.h"
22 #include "bfi_reg.h"
23 #include "bfa_defs.h"
25 /**
26 * IOC local definitions
29 /**
30 * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
33 #define bfa_ioc_firmware_lock(__ioc) \
34 ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
35 #define bfa_ioc_firmware_unlock(__ioc) \
36 ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
37 #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
38 #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
39 #define bfa_ioc_notify_fail(__ioc) \
40 ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
41 #define bfa_ioc_sync_start(__ioc) \
42 ((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
43 #define bfa_ioc_sync_join(__ioc) \
44 ((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
45 #define bfa_ioc_sync_leave(__ioc) \
46 ((__ioc)->ioc_hwif->ioc_sync_leave(__ioc))
47 #define bfa_ioc_sync_ack(__ioc) \
48 ((__ioc)->ioc_hwif->ioc_sync_ack(__ioc))
49 #define bfa_ioc_sync_complete(__ioc) \
50 ((__ioc)->ioc_hwif->ioc_sync_complete(__ioc))
52 #define bfa_ioc_mbox_cmd_pending(__ioc) \
53 (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
54 readl((__ioc)->ioc_regs.hfn_mbox_cmd))
56 static bool bfa_nw_auto_recover = true;
59 * forward declarations
61 static void bfa_ioc_hw_sem_init(struct bfa_ioc *ioc);
62 static void bfa_ioc_hw_sem_get(struct bfa_ioc *ioc);
63 static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc);
64 static void bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force);
65 static void bfa_ioc_poll_fwinit(struct bfa_ioc *ioc);
66 static void bfa_ioc_send_enable(struct bfa_ioc *ioc);
67 static void bfa_ioc_send_disable(struct bfa_ioc *ioc);
68 static void bfa_ioc_send_getattr(struct bfa_ioc *ioc);
69 static void bfa_ioc_hb_monitor(struct bfa_ioc *ioc);
70 static void bfa_ioc_hb_stop(struct bfa_ioc *ioc);
71 static void bfa_ioc_reset(struct bfa_ioc *ioc, bool force);
72 static void bfa_ioc_mbox_poll(struct bfa_ioc *ioc);
73 static void bfa_ioc_mbox_flush(struct bfa_ioc *ioc);
74 static void bfa_ioc_recover(struct bfa_ioc *ioc);
75 static void bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc);
76 static void bfa_ioc_event_notify(struct bfa_ioc *, enum bfa_ioc_event);
77 static void bfa_ioc_disable_comp(struct bfa_ioc *ioc);
78 static void bfa_ioc_lpu_stop(struct bfa_ioc *ioc);
79 static void bfa_ioc_fail_notify(struct bfa_ioc *ioc);
80 static void bfa_ioc_pf_enabled(struct bfa_ioc *ioc);
81 static void bfa_ioc_pf_disabled(struct bfa_ioc *ioc);
82 static void bfa_ioc_pf_failed(struct bfa_ioc *ioc);
83 static void bfa_ioc_pf_hwfailed(struct bfa_ioc *ioc);
84 static void bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc);
85 static void bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type,
86 u32 boot_param);
87 static u32 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr);
88 static void bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc,
89 char *serial_num);
90 static void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc,
91 char *fw_ver);
92 static void bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc,
93 char *chip_rev);
94 static void bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc,
95 char *optrom_ver);
96 static void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc,
97 char *manufacturer);
98 static void bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model);
99 static u64 bfa_ioc_get_pwwn(struct bfa_ioc *ioc);
102 * IOC state machine definitions/declarations
104 enum ioc_event {
105 IOC_E_RESET = 1, /*!< IOC reset request */
106 IOC_E_ENABLE = 2, /*!< IOC enable request */
107 IOC_E_DISABLE = 3, /*!< IOC disable request */
108 IOC_E_DETACH = 4, /*!< driver detach cleanup */
109 IOC_E_ENABLED = 5, /*!< f/w enabled */
110 IOC_E_FWRSP_GETATTR = 6, /*!< IOC get attribute response */
111 IOC_E_DISABLED = 7, /*!< f/w disabled */
112 IOC_E_PFFAILED = 8, /*!< failure notice by iocpf sm */
113 IOC_E_HBFAIL = 9, /*!< heartbeat failure */
114 IOC_E_HWERROR = 10, /*!< hardware error interrupt */
115 IOC_E_TIMEOUT = 11, /*!< timeout */
116 IOC_E_HWFAILED = 12, /*!< PCI mapping failure notice */
119 bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc, enum ioc_event);
120 bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc, enum ioc_event);
121 bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc, enum ioc_event);
122 bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc, enum ioc_event);
123 bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc, enum ioc_event);
124 bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc, enum ioc_event);
125 bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc, enum ioc_event);
126 bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc, enum ioc_event);
127 bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc, enum ioc_event);
128 bfa_fsm_state_decl(bfa_ioc, hwfail, struct bfa_ioc, enum ioc_event);
130 static struct bfa_sm_table ioc_sm_table[] = {
131 {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
132 {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
133 {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING},
134 {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
135 {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
136 {BFA_SM(bfa_ioc_sm_fail_retry), BFA_IOC_INITFAIL},
137 {BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL},
138 {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
139 {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
140 {BFA_SM(bfa_ioc_sm_hwfail), BFA_IOC_HWFAIL},
144 * IOCPF state machine definitions/declarations
148 * Forward declareations for iocpf state machine
150 static void bfa_iocpf_enable(struct bfa_ioc *ioc);
151 static void bfa_iocpf_disable(struct bfa_ioc *ioc);
152 static void bfa_iocpf_fail(struct bfa_ioc *ioc);
153 static void bfa_iocpf_initfail(struct bfa_ioc *ioc);
154 static void bfa_iocpf_getattrfail(struct bfa_ioc *ioc);
155 static void bfa_iocpf_stop(struct bfa_ioc *ioc);
158 * IOCPF state machine events
160 enum iocpf_event {
161 IOCPF_E_ENABLE = 1, /*!< IOCPF enable request */
162 IOCPF_E_DISABLE = 2, /*!< IOCPF disable request */
163 IOCPF_E_STOP = 3, /*!< stop on driver detach */
164 IOCPF_E_FWREADY = 4, /*!< f/w initialization done */
165 IOCPF_E_FWRSP_ENABLE = 5, /*!< enable f/w response */
166 IOCPF_E_FWRSP_DISABLE = 6, /*!< disable f/w response */
167 IOCPF_E_FAIL = 7, /*!< failure notice by ioc sm */
168 IOCPF_E_INITFAIL = 8, /*!< init fail notice by ioc sm */
169 IOCPF_E_GETATTRFAIL = 9, /*!< init fail notice by ioc sm */
170 IOCPF_E_SEMLOCKED = 10, /*!< h/w semaphore is locked */
171 IOCPF_E_TIMEOUT = 11, /*!< f/w response timeout */
172 IOCPF_E_SEM_ERROR = 12, /*!< h/w sem mapping error */
176 * IOCPF states
178 enum bfa_iocpf_state {
179 BFA_IOCPF_RESET = 1, /*!< IOC is in reset state */
180 BFA_IOCPF_SEMWAIT = 2, /*!< Waiting for IOC h/w semaphore */
181 BFA_IOCPF_HWINIT = 3, /*!< IOC h/w is being initialized */
182 BFA_IOCPF_READY = 4, /*!< IOCPF is initialized */
183 BFA_IOCPF_INITFAIL = 5, /*!< IOCPF failed */
184 BFA_IOCPF_FAIL = 6, /*!< IOCPF failed */
185 BFA_IOCPF_DISABLING = 7, /*!< IOCPF is being disabled */
186 BFA_IOCPF_DISABLED = 8, /*!< IOCPF is disabled */
187 BFA_IOCPF_FWMISMATCH = 9, /*!< IOC f/w different from drivers */
190 bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf, enum iocpf_event);
191 bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf, enum iocpf_event);
192 bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf, enum iocpf_event);
193 bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf, enum iocpf_event);
194 bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf, enum iocpf_event);
195 bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf, enum iocpf_event);
196 bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf, enum iocpf_event);
197 bfa_fsm_state_decl(bfa_iocpf, initfail_sync, struct bfa_iocpf,
198 enum iocpf_event);
199 bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf, enum iocpf_event);
200 bfa_fsm_state_decl(bfa_iocpf, fail_sync, struct bfa_iocpf, enum iocpf_event);
201 bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf, enum iocpf_event);
202 bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf, enum iocpf_event);
203 bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf,
204 enum iocpf_event);
205 bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf, enum iocpf_event);
207 static struct bfa_sm_table iocpf_sm_table[] = {
208 {BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET},
209 {BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH},
210 {BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH},
211 {BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT},
212 {BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT},
213 {BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT},
214 {BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY},
215 {BFA_SM(bfa_iocpf_sm_initfail_sync), BFA_IOCPF_INITFAIL},
216 {BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL},
217 {BFA_SM(bfa_iocpf_sm_fail_sync), BFA_IOCPF_FAIL},
218 {BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL},
219 {BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING},
220 {BFA_SM(bfa_iocpf_sm_disabling_sync), BFA_IOCPF_DISABLING},
221 {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
225 * IOC State Machine
229 * Beginning state. IOC uninit state.
231 static void
232 bfa_ioc_sm_uninit_entry(struct bfa_ioc *ioc)
237 * IOC is in uninit state.
239 static void
240 bfa_ioc_sm_uninit(struct bfa_ioc *ioc, enum ioc_event event)
242 switch (event) {
243 case IOC_E_RESET:
244 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
245 break;
247 default:
248 bfa_sm_fault(event);
253 * Reset entry actions -- initialize state machine
255 static void
256 bfa_ioc_sm_reset_entry(struct bfa_ioc *ioc)
258 bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
262 * IOC is in reset state.
264 static void
265 bfa_ioc_sm_reset(struct bfa_ioc *ioc, enum ioc_event event)
267 switch (event) {
268 case IOC_E_ENABLE:
269 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
270 break;
272 case IOC_E_DISABLE:
273 bfa_ioc_disable_comp(ioc);
274 break;
276 case IOC_E_DETACH:
277 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
278 break;
280 default:
281 bfa_sm_fault(event);
285 static void
286 bfa_ioc_sm_enabling_entry(struct bfa_ioc *ioc)
288 bfa_iocpf_enable(ioc);
292 * Host IOC function is being enabled, awaiting response from firmware.
293 * Semaphore is acquired.
295 static void
296 bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event)
298 switch (event) {
299 case IOC_E_ENABLED:
300 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
301 break;
303 case IOC_E_PFFAILED:
304 /* !!! fall through !!! */
305 case IOC_E_HWERROR:
306 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
307 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
308 if (event != IOC_E_PFFAILED)
309 bfa_iocpf_initfail(ioc);
310 break;
312 case IOC_E_HWFAILED:
313 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
314 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
315 break;
317 case IOC_E_DISABLE:
318 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
319 break;
321 case IOC_E_DETACH:
322 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
323 bfa_iocpf_stop(ioc);
324 break;
326 case IOC_E_ENABLE:
327 break;
329 default:
330 bfa_sm_fault(event);
335 * Semaphore should be acquired for version check.
337 static void
338 bfa_ioc_sm_getattr_entry(struct bfa_ioc *ioc)
340 mod_timer(&ioc->ioc_timer, jiffies +
341 msecs_to_jiffies(BFA_IOC_TOV));
342 bfa_ioc_send_getattr(ioc);
346 * IOC configuration in progress. Timer is active.
348 static void
349 bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event)
351 switch (event) {
352 case IOC_E_FWRSP_GETATTR:
353 del_timer(&ioc->ioc_timer);
354 bfa_ioc_check_attr_wwns(ioc);
355 bfa_ioc_hb_monitor(ioc);
356 bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
357 break;
359 case IOC_E_PFFAILED:
360 case IOC_E_HWERROR:
361 del_timer(&ioc->ioc_timer);
362 /* fall through */
363 case IOC_E_TIMEOUT:
364 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
365 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
366 if (event != IOC_E_PFFAILED)
367 bfa_iocpf_getattrfail(ioc);
368 break;
370 case IOC_E_DISABLE:
371 del_timer(&ioc->ioc_timer);
372 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
373 break;
375 case IOC_E_ENABLE:
376 break;
378 default:
379 bfa_sm_fault(event);
383 static void
384 bfa_ioc_sm_op_entry(struct bfa_ioc *ioc)
386 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
387 bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED);
390 static void
391 bfa_ioc_sm_op(struct bfa_ioc *ioc, enum ioc_event event)
393 switch (event) {
394 case IOC_E_ENABLE:
395 break;
397 case IOC_E_DISABLE:
398 bfa_ioc_hb_stop(ioc);
399 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
400 break;
402 case IOC_E_PFFAILED:
403 case IOC_E_HWERROR:
404 bfa_ioc_hb_stop(ioc);
405 /* !!! fall through !!! */
406 case IOC_E_HBFAIL:
407 if (ioc->iocpf.auto_recover)
408 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
409 else
410 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
412 bfa_ioc_fail_notify(ioc);
414 if (event != IOC_E_PFFAILED)
415 bfa_iocpf_fail(ioc);
416 break;
418 default:
419 bfa_sm_fault(event);
423 static void
424 bfa_ioc_sm_disabling_entry(struct bfa_ioc *ioc)
426 bfa_iocpf_disable(ioc);
430 * IOC is being desabled
432 static void
433 bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event)
435 switch (event) {
436 case IOC_E_DISABLED:
437 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
438 break;
440 case IOC_E_HWERROR:
442 * No state change. Will move to disabled state
443 * after iocpf sm completes failure processing and
444 * moves to disabled state.
446 bfa_iocpf_fail(ioc);
447 break;
449 case IOC_E_HWFAILED:
450 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
451 bfa_ioc_disable_comp(ioc);
452 break;
454 default:
455 bfa_sm_fault(event);
460 * IOC desable completion entry.
462 static void
463 bfa_ioc_sm_disabled_entry(struct bfa_ioc *ioc)
465 bfa_ioc_disable_comp(ioc);
468 static void
469 bfa_ioc_sm_disabled(struct bfa_ioc *ioc, enum ioc_event event)
471 switch (event) {
472 case IOC_E_ENABLE:
473 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
474 break;
476 case IOC_E_DISABLE:
477 ioc->cbfn->disable_cbfn(ioc->bfa);
478 break;
480 case IOC_E_DETACH:
481 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
482 bfa_iocpf_stop(ioc);
483 break;
485 default:
486 bfa_sm_fault(event);
490 static void
491 bfa_ioc_sm_fail_retry_entry(struct bfa_ioc *ioc)
496 * Hardware initialization retry.
498 static void
499 bfa_ioc_sm_fail_retry(struct bfa_ioc *ioc, enum ioc_event event)
501 switch (event) {
502 case IOC_E_ENABLED:
503 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
504 break;
506 case IOC_E_PFFAILED:
507 case IOC_E_HWERROR:
509 * Initialization retry failed.
511 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
512 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
513 if (event != IOC_E_PFFAILED)
514 bfa_iocpf_initfail(ioc);
515 break;
517 case IOC_E_HWFAILED:
518 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
519 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
520 break;
522 case IOC_E_ENABLE:
523 break;
525 case IOC_E_DISABLE:
526 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
527 break;
529 case IOC_E_DETACH:
530 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
531 bfa_iocpf_stop(ioc);
532 break;
534 default:
535 bfa_sm_fault(event);
539 static void
540 bfa_ioc_sm_fail_entry(struct bfa_ioc *ioc)
545 * IOC failure.
547 static void
548 bfa_ioc_sm_fail(struct bfa_ioc *ioc, enum ioc_event event)
550 switch (event) {
551 case IOC_E_ENABLE:
552 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
553 break;
555 case IOC_E_DISABLE:
556 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
557 break;
559 case IOC_E_DETACH:
560 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
561 bfa_iocpf_stop(ioc);
562 break;
564 case IOC_E_HWERROR:
565 /* HB failure notification, ignore. */
566 break;
568 default:
569 bfa_sm_fault(event);
573 static void
574 bfa_ioc_sm_hwfail_entry(struct bfa_ioc *ioc)
579 * IOC failure.
581 static void
582 bfa_ioc_sm_hwfail(struct bfa_ioc *ioc, enum ioc_event event)
584 switch (event) {
586 case IOC_E_ENABLE:
587 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
588 break;
590 case IOC_E_DISABLE:
591 ioc->cbfn->disable_cbfn(ioc->bfa);
592 break;
594 case IOC_E_DETACH:
595 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
596 break;
598 default:
599 bfa_sm_fault(event);
604 * IOCPF State Machine
608 * Reset entry actions -- initialize state machine
610 static void
611 bfa_iocpf_sm_reset_entry(struct bfa_iocpf *iocpf)
613 iocpf->fw_mismatch_notified = false;
614 iocpf->auto_recover = bfa_nw_auto_recover;
618 * Beginning state. IOC is in reset state.
620 static void
621 bfa_iocpf_sm_reset(struct bfa_iocpf *iocpf, enum iocpf_event event)
623 switch (event) {
624 case IOCPF_E_ENABLE:
625 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
626 break;
628 case IOCPF_E_STOP:
629 break;
631 default:
632 bfa_sm_fault(event);
637 * Semaphore should be acquired for version check.
639 static void
640 bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf *iocpf)
642 bfa_ioc_hw_sem_init(iocpf->ioc);
643 bfa_ioc_hw_sem_get(iocpf->ioc);
647 * Awaiting h/w semaphore to continue with version check.
649 static void
650 bfa_iocpf_sm_fwcheck(struct bfa_iocpf *iocpf, enum iocpf_event event)
652 struct bfa_ioc *ioc = iocpf->ioc;
654 switch (event) {
655 case IOCPF_E_SEMLOCKED:
656 if (bfa_ioc_firmware_lock(ioc)) {
657 if (bfa_ioc_sync_start(ioc)) {
658 bfa_ioc_sync_join(ioc);
659 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
660 } else {
661 bfa_ioc_firmware_unlock(ioc);
662 bfa_nw_ioc_hw_sem_release(ioc);
663 mod_timer(&ioc->sem_timer, jiffies +
664 msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
666 } else {
667 bfa_nw_ioc_hw_sem_release(ioc);
668 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch);
670 break;
672 case IOCPF_E_SEM_ERROR:
673 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
674 bfa_ioc_pf_hwfailed(ioc);
675 break;
677 case IOCPF_E_DISABLE:
678 bfa_ioc_hw_sem_get_cancel(ioc);
679 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
680 bfa_ioc_pf_disabled(ioc);
681 break;
683 case IOCPF_E_STOP:
684 bfa_ioc_hw_sem_get_cancel(ioc);
685 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
686 break;
688 default:
689 bfa_sm_fault(event);
694 * Notify enable completion callback
696 static void
697 bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf *iocpf)
699 /* Call only the first time sm enters fwmismatch state. */
700 if (iocpf->fw_mismatch_notified == false)
701 bfa_ioc_pf_fwmismatch(iocpf->ioc);
703 iocpf->fw_mismatch_notified = true;
704 mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
705 msecs_to_jiffies(BFA_IOC_TOV));
709 * Awaiting firmware version match.
711 static void
712 bfa_iocpf_sm_mismatch(struct bfa_iocpf *iocpf, enum iocpf_event event)
714 struct bfa_ioc *ioc = iocpf->ioc;
716 switch (event) {
717 case IOCPF_E_TIMEOUT:
718 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
719 break;
721 case IOCPF_E_DISABLE:
722 del_timer(&ioc->iocpf_timer);
723 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
724 bfa_ioc_pf_disabled(ioc);
725 break;
727 case IOCPF_E_STOP:
728 del_timer(&ioc->iocpf_timer);
729 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
730 break;
732 default:
733 bfa_sm_fault(event);
738 * Request for semaphore.
740 static void
741 bfa_iocpf_sm_semwait_entry(struct bfa_iocpf *iocpf)
743 bfa_ioc_hw_sem_get(iocpf->ioc);
747 * Awaiting semaphore for h/w initialzation.
749 static void
750 bfa_iocpf_sm_semwait(struct bfa_iocpf *iocpf, enum iocpf_event event)
752 struct bfa_ioc *ioc = iocpf->ioc;
754 switch (event) {
755 case IOCPF_E_SEMLOCKED:
756 if (bfa_ioc_sync_complete(ioc)) {
757 bfa_ioc_sync_join(ioc);
758 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
759 } else {
760 bfa_nw_ioc_hw_sem_release(ioc);
761 mod_timer(&ioc->sem_timer, jiffies +
762 msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
764 break;
766 case IOCPF_E_SEM_ERROR:
767 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
768 bfa_ioc_pf_hwfailed(ioc);
769 break;
771 case IOCPF_E_DISABLE:
772 bfa_ioc_hw_sem_get_cancel(ioc);
773 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
774 break;
776 default:
777 bfa_sm_fault(event);
781 static void
782 bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf *iocpf)
784 iocpf->poll_time = 0;
785 bfa_ioc_reset(iocpf->ioc, 0);
789 * Hardware is being initialized. Interrupts are enabled.
790 * Holding hardware semaphore lock.
792 static void
793 bfa_iocpf_sm_hwinit(struct bfa_iocpf *iocpf, enum iocpf_event event)
795 struct bfa_ioc *ioc = iocpf->ioc;
797 switch (event) {
798 case IOCPF_E_FWREADY:
799 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling);
800 break;
802 case IOCPF_E_TIMEOUT:
803 bfa_nw_ioc_hw_sem_release(ioc);
804 bfa_ioc_pf_failed(ioc);
805 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
806 break;
808 case IOCPF_E_DISABLE:
809 del_timer(&ioc->iocpf_timer);
810 bfa_ioc_sync_leave(ioc);
811 bfa_nw_ioc_hw_sem_release(ioc);
812 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
813 break;
815 default:
816 bfa_sm_fault(event);
820 static void
821 bfa_iocpf_sm_enabling_entry(struct bfa_iocpf *iocpf)
823 mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
824 msecs_to_jiffies(BFA_IOC_TOV));
826 * Enable Interrupts before sending fw IOC ENABLE cmd.
828 iocpf->ioc->cbfn->reset_cbfn(iocpf->ioc->bfa);
829 bfa_ioc_send_enable(iocpf->ioc);
833 * Host IOC function is being enabled, awaiting response from firmware.
834 * Semaphore is acquired.
836 static void
837 bfa_iocpf_sm_enabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
839 struct bfa_ioc *ioc = iocpf->ioc;
841 switch (event) {
842 case IOCPF_E_FWRSP_ENABLE:
843 del_timer(&ioc->iocpf_timer);
844 bfa_nw_ioc_hw_sem_release(ioc);
845 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready);
846 break;
848 case IOCPF_E_INITFAIL:
849 del_timer(&ioc->iocpf_timer);
851 * !!! fall through !!!
853 case IOCPF_E_TIMEOUT:
854 bfa_nw_ioc_hw_sem_release(ioc);
855 if (event == IOCPF_E_TIMEOUT)
856 bfa_ioc_pf_failed(ioc);
857 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
858 break;
860 case IOCPF_E_DISABLE:
861 del_timer(&ioc->iocpf_timer);
862 bfa_nw_ioc_hw_sem_release(ioc);
863 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
864 break;
866 default:
867 bfa_sm_fault(event);
871 static void
872 bfa_iocpf_sm_ready_entry(struct bfa_iocpf *iocpf)
874 bfa_ioc_pf_enabled(iocpf->ioc);
877 static void
878 bfa_iocpf_sm_ready(struct bfa_iocpf *iocpf, enum iocpf_event event)
880 switch (event) {
881 case IOCPF_E_DISABLE:
882 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
883 break;
885 case IOCPF_E_GETATTRFAIL:
886 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
887 break;
889 case IOCPF_E_FAIL:
890 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
891 break;
893 default:
894 bfa_sm_fault(event);
898 static void
899 bfa_iocpf_sm_disabling_entry(struct bfa_iocpf *iocpf)
901 mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
902 msecs_to_jiffies(BFA_IOC_TOV));
903 bfa_ioc_send_disable(iocpf->ioc);
907 * IOC is being disabled
909 static void
910 bfa_iocpf_sm_disabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
912 struct bfa_ioc *ioc = iocpf->ioc;
914 switch (event) {
915 case IOCPF_E_FWRSP_DISABLE:
916 del_timer(&ioc->iocpf_timer);
917 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
918 break;
920 case IOCPF_E_FAIL:
921 del_timer(&ioc->iocpf_timer);
923 * !!! fall through !!!
926 case IOCPF_E_TIMEOUT:
927 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
928 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
929 break;
931 case IOCPF_E_FWRSP_ENABLE:
932 break;
934 default:
935 bfa_sm_fault(event);
939 static void
940 bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf *iocpf)
942 bfa_ioc_hw_sem_get(iocpf->ioc);
946 * IOC hb ack request is being removed.
948 static void
949 bfa_iocpf_sm_disabling_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
951 struct bfa_ioc *ioc = iocpf->ioc;
953 switch (event) {
954 case IOCPF_E_SEMLOCKED:
955 bfa_ioc_sync_leave(ioc);
956 bfa_nw_ioc_hw_sem_release(ioc);
957 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
958 break;
960 case IOCPF_E_SEM_ERROR:
961 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
962 bfa_ioc_pf_hwfailed(ioc);
963 break;
965 case IOCPF_E_FAIL:
966 break;
968 default:
969 bfa_sm_fault(event);
974 * IOC disable completion entry.
976 static void
977 bfa_iocpf_sm_disabled_entry(struct bfa_iocpf *iocpf)
979 bfa_ioc_mbox_flush(iocpf->ioc);
980 bfa_ioc_pf_disabled(iocpf->ioc);
983 static void
984 bfa_iocpf_sm_disabled(struct bfa_iocpf *iocpf, enum iocpf_event event)
986 struct bfa_ioc *ioc = iocpf->ioc;
988 switch (event) {
989 case IOCPF_E_ENABLE:
990 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
991 break;
993 case IOCPF_E_STOP:
994 bfa_ioc_firmware_unlock(ioc);
995 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
996 break;
998 default:
999 bfa_sm_fault(event);
1003 static void
1004 bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf *iocpf)
1006 bfa_ioc_hw_sem_get(iocpf->ioc);
1010 * Hardware initialization failed.
1012 static void
1013 bfa_iocpf_sm_initfail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
1015 struct bfa_ioc *ioc = iocpf->ioc;
1017 switch (event) {
1018 case IOCPF_E_SEMLOCKED:
1019 bfa_ioc_notify_fail(ioc);
1020 bfa_ioc_sync_leave(ioc);
1021 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
1022 bfa_nw_ioc_hw_sem_release(ioc);
1023 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
1024 break;
1026 case IOCPF_E_SEM_ERROR:
1027 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1028 bfa_ioc_pf_hwfailed(ioc);
1029 break;
1031 case IOCPF_E_DISABLE:
1032 bfa_ioc_hw_sem_get_cancel(ioc);
1033 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1034 break;
1036 case IOCPF_E_STOP:
1037 bfa_ioc_hw_sem_get_cancel(ioc);
1038 bfa_ioc_firmware_unlock(ioc);
1039 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1040 break;
1042 case IOCPF_E_FAIL:
1043 break;
1045 default:
1046 bfa_sm_fault(event);
1050 static void
1051 bfa_iocpf_sm_initfail_entry(struct bfa_iocpf *iocpf)
1056 * Hardware initialization failed.
1058 static void
1059 bfa_iocpf_sm_initfail(struct bfa_iocpf *iocpf, enum iocpf_event event)
1061 struct bfa_ioc *ioc = iocpf->ioc;
1063 switch (event) {
1064 case IOCPF_E_DISABLE:
1065 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1066 break;
1068 case IOCPF_E_STOP:
1069 bfa_ioc_firmware_unlock(ioc);
1070 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1071 break;
1073 default:
1074 bfa_sm_fault(event);
1078 static void
1079 bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf *iocpf)
1082 * Mark IOC as failed in hardware and stop firmware.
1084 bfa_ioc_lpu_stop(iocpf->ioc);
1087 * Flush any queued up mailbox requests.
1089 bfa_ioc_mbox_flush(iocpf->ioc);
1090 bfa_ioc_hw_sem_get(iocpf->ioc);
1094 * IOC is in failed state.
1096 static void
1097 bfa_iocpf_sm_fail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
1099 struct bfa_ioc *ioc = iocpf->ioc;
1101 switch (event) {
1102 case IOCPF_E_SEMLOCKED:
1103 bfa_ioc_sync_ack(ioc);
1104 bfa_ioc_notify_fail(ioc);
1105 if (!iocpf->auto_recover) {
1106 bfa_ioc_sync_leave(ioc);
1107 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
1108 bfa_nw_ioc_hw_sem_release(ioc);
1109 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1110 } else {
1111 if (bfa_ioc_sync_complete(ioc))
1112 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
1113 else {
1114 bfa_nw_ioc_hw_sem_release(ioc);
1115 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1118 break;
1120 case IOCPF_E_SEM_ERROR:
1121 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1122 bfa_ioc_pf_hwfailed(ioc);
1123 break;
1125 case IOCPF_E_DISABLE:
1126 bfa_ioc_hw_sem_get_cancel(ioc);
1127 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1128 break;
1130 case IOCPF_E_FAIL:
1131 break;
1133 default:
1134 bfa_sm_fault(event);
1138 static void
1139 bfa_iocpf_sm_fail_entry(struct bfa_iocpf *iocpf)
1144 * @brief
1145 * IOC is in failed state.
1147 static void
1148 bfa_iocpf_sm_fail(struct bfa_iocpf *iocpf, enum iocpf_event event)
1150 switch (event) {
1151 case IOCPF_E_DISABLE:
1152 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1153 break;
1155 default:
1156 bfa_sm_fault(event);
1161 * BFA IOC private functions
1165 * Notify common modules registered for notification.
1167 static void
1168 bfa_ioc_event_notify(struct bfa_ioc *ioc, enum bfa_ioc_event event)
1170 struct bfa_ioc_notify *notify;
1171 struct list_head *qe;
1173 list_for_each(qe, &ioc->notify_q) {
1174 notify = (struct bfa_ioc_notify *)qe;
1175 notify->cbfn(notify->cbarg, event);
1179 static void
1180 bfa_ioc_disable_comp(struct bfa_ioc *ioc)
1182 ioc->cbfn->disable_cbfn(ioc->bfa);
1183 bfa_ioc_event_notify(ioc, BFA_IOC_E_DISABLED);
1186 bool
1187 bfa_nw_ioc_sem_get(void __iomem *sem_reg)
1189 u32 r32;
1190 int cnt = 0;
1191 #define BFA_SEM_SPINCNT 3000
1193 r32 = readl(sem_reg);
1195 while ((r32 & 1) && (cnt < BFA_SEM_SPINCNT)) {
1196 cnt++;
1197 udelay(2);
1198 r32 = readl(sem_reg);
1201 if (!(r32 & 1))
1202 return true;
1204 return false;
1207 void
1208 bfa_nw_ioc_sem_release(void __iomem *sem_reg)
1210 readl(sem_reg);
1211 writel(1, sem_reg);
1214 static void
1215 bfa_ioc_hw_sem_init(struct bfa_ioc *ioc)
1217 struct bfi_ioc_image_hdr fwhdr;
1218 u32 fwstate = readl(ioc->ioc_regs.ioc_fwstate);
1220 if (fwstate == BFI_IOC_UNINIT)
1221 return;
1223 bfa_nw_ioc_fwver_get(ioc, &fwhdr);
1225 if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL)
1226 return;
1228 writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
1231 * Try to lock and then unlock the semaphore.
1233 readl(ioc->ioc_regs.ioc_sem_reg);
1234 writel(1, ioc->ioc_regs.ioc_sem_reg);
1237 static void
1238 bfa_ioc_hw_sem_get(struct bfa_ioc *ioc)
1240 u32 r32;
1243 * First read to the semaphore register will return 0, subsequent reads
1244 * will return 1. Semaphore is released by writing 1 to the register
1246 r32 = readl(ioc->ioc_regs.ioc_sem_reg);
1247 if (r32 == ~0) {
1248 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEM_ERROR);
1249 return;
1251 if (!(r32 & 1)) {
1252 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED);
1253 return;
1256 mod_timer(&ioc->sem_timer, jiffies +
1257 msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
1260 void
1261 bfa_nw_ioc_hw_sem_release(struct bfa_ioc *ioc)
1263 writel(1, ioc->ioc_regs.ioc_sem_reg);
1266 static void
1267 bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc)
1269 del_timer(&ioc->sem_timer);
1273 * @brief
1274 * Initialize LPU local memory (aka secondary memory / SRAM)
1276 static void
1277 bfa_ioc_lmem_init(struct bfa_ioc *ioc)
1279 u32 pss_ctl;
1280 int i;
1281 #define PSS_LMEM_INIT_TIME 10000
1283 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1284 pss_ctl &= ~__PSS_LMEM_RESET;
1285 pss_ctl |= __PSS_LMEM_INIT_EN;
1288 * i2c workaround 12.5khz clock
1290 pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
1291 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1294 * wait for memory initialization to be complete
1296 i = 0;
1297 do {
1298 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1299 i++;
1300 } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
1303 * If memory initialization is not successful, IOC timeout will catch
1304 * such failures.
1306 BUG_ON(!(pss_ctl & __PSS_LMEM_INIT_DONE));
1308 pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
1309 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1312 static void
1313 bfa_ioc_lpu_start(struct bfa_ioc *ioc)
1315 u32 pss_ctl;
1318 * Take processor out of reset.
1320 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1321 pss_ctl &= ~__PSS_LPU0_RESET;
1323 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1326 static void
1327 bfa_ioc_lpu_stop(struct bfa_ioc *ioc)
1329 u32 pss_ctl;
1332 * Put processors in reset.
1334 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1335 pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
1337 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1341 * Get driver and firmware versions.
1343 void
1344 bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
1346 u32 pgnum;
1347 u32 loff = 0;
1348 int i;
1349 u32 *fwsig = (u32 *) fwhdr;
1351 pgnum = bfa_ioc_smem_pgnum(ioc, loff);
1352 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1354 for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr) / sizeof(u32));
1355 i++) {
1356 fwsig[i] =
1357 swab32(readl((loff) + (ioc->ioc_regs.smem_page_start)));
1358 loff += sizeof(u32);
1363 * Returns TRUE if same.
1365 bool
1366 bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
1368 struct bfi_ioc_image_hdr *drv_fwhdr;
1369 int i;
1371 drv_fwhdr = (struct bfi_ioc_image_hdr *)
1372 bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
1374 for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
1375 if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i])
1376 return false;
1379 return true;
1383 * Return true if current running version is valid. Firmware signature and
1384 * execution context (driver/bios) must match.
1386 static bool
1387 bfa_ioc_fwver_valid(struct bfa_ioc *ioc, u32 boot_env)
1389 struct bfi_ioc_image_hdr fwhdr, *drv_fwhdr;
1391 bfa_nw_ioc_fwver_get(ioc, &fwhdr);
1392 drv_fwhdr = (struct bfi_ioc_image_hdr *)
1393 bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
1395 if (fwhdr.signature != drv_fwhdr->signature)
1396 return false;
1398 if (swab32(fwhdr.bootenv) != boot_env)
1399 return false;
1401 return bfa_nw_ioc_fwver_cmp(ioc, &fwhdr);
1405 * Conditionally flush any pending message from firmware at start.
1407 static void
1408 bfa_ioc_msgflush(struct bfa_ioc *ioc)
1410 u32 r32;
1412 r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
1413 if (r32)
1414 writel(1, ioc->ioc_regs.lpu_mbox_cmd);
1418 * @img ioc_init_logic.jpg
1420 static void
1421 bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
1423 enum bfi_ioc_state ioc_fwstate;
1424 bool fwvalid;
1425 u32 boot_env;
1427 ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
1429 if (force)
1430 ioc_fwstate = BFI_IOC_UNINIT;
1432 boot_env = BFI_FWBOOT_ENV_OS;
1435 * check if firmware is valid
1437 fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
1438 false : bfa_ioc_fwver_valid(ioc, boot_env);
1440 if (!fwvalid) {
1441 bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env);
1442 bfa_ioc_poll_fwinit(ioc);
1443 return;
1447 * If hardware initialization is in progress (initialized by other IOC),
1448 * just wait for an initialization completion interrupt.
1450 if (ioc_fwstate == BFI_IOC_INITING) {
1451 bfa_ioc_poll_fwinit(ioc);
1452 return;
1456 * If IOC function is disabled and firmware version is same,
1457 * just re-enable IOC.
1459 if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) {
1461 * When using MSI-X any pending firmware ready event should
1462 * be flushed. Otherwise MSI-X interrupts are not delivered.
1464 bfa_ioc_msgflush(ioc);
1465 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
1466 return;
1470 * Initialize the h/w for any other states.
1472 bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env);
1473 bfa_ioc_poll_fwinit(ioc);
1476 void
1477 bfa_nw_ioc_timeout(void *ioc_arg)
1479 struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
1481 bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
1484 static void
1485 bfa_ioc_mbox_send(struct bfa_ioc *ioc, void *ioc_msg, int len)
1487 u32 *msgp = (u32 *) ioc_msg;
1488 u32 i;
1490 BUG_ON(!(len <= BFI_IOC_MSGLEN_MAX));
1493 * first write msg to mailbox registers
1495 for (i = 0; i < len / sizeof(u32); i++)
1496 writel(cpu_to_le32(msgp[i]),
1497 ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1499 for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
1500 writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1503 * write 1 to mailbox CMD to trigger LPU event
1505 writel(1, ioc->ioc_regs.hfn_mbox_cmd);
1506 (void) readl(ioc->ioc_regs.hfn_mbox_cmd);
1509 static void
1510 bfa_ioc_send_enable(struct bfa_ioc *ioc)
1512 struct bfi_ioc_ctrl_req enable_req;
1513 struct timeval tv;
1515 bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
1516 bfa_ioc_portid(ioc));
1517 enable_req.clscode = htons(ioc->clscode);
1518 do_gettimeofday(&tv);
1519 enable_req.tv_sec = ntohl(tv.tv_sec);
1520 bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req));
1523 static void
1524 bfa_ioc_send_disable(struct bfa_ioc *ioc)
1526 struct bfi_ioc_ctrl_req disable_req;
1528 bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
1529 bfa_ioc_portid(ioc));
1530 bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req));
1533 static void
1534 bfa_ioc_send_getattr(struct bfa_ioc *ioc)
1536 struct bfi_ioc_getattr_req attr_req;
1538 bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
1539 bfa_ioc_portid(ioc));
1540 bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
1541 bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
1544 void
1545 bfa_nw_ioc_hb_check(void *cbarg)
1547 struct bfa_ioc *ioc = cbarg;
1548 u32 hb_count;
1550 hb_count = readl(ioc->ioc_regs.heartbeat);
1551 if (ioc->hb_count == hb_count) {
1552 bfa_ioc_recover(ioc);
1553 return;
1554 } else {
1555 ioc->hb_count = hb_count;
1558 bfa_ioc_mbox_poll(ioc);
1559 mod_timer(&ioc->hb_timer, jiffies +
1560 msecs_to_jiffies(BFA_IOC_HB_TOV));
1563 static void
1564 bfa_ioc_hb_monitor(struct bfa_ioc *ioc)
1566 ioc->hb_count = readl(ioc->ioc_regs.heartbeat);
1567 mod_timer(&ioc->hb_timer, jiffies +
1568 msecs_to_jiffies(BFA_IOC_HB_TOV));
1571 static void
1572 bfa_ioc_hb_stop(struct bfa_ioc *ioc)
1574 del_timer(&ioc->hb_timer);
1578 * @brief
1579 * Initiate a full firmware download.
1581 static void
1582 bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
1583 u32 boot_env)
1585 u32 *fwimg;
1586 u32 pgnum;
1587 u32 loff = 0;
1588 u32 chunkno = 0;
1589 u32 i;
1590 u32 asicmode;
1593 * Initialize LMEM first before code download
1595 bfa_ioc_lmem_init(ioc);
1597 fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), chunkno);
1599 pgnum = bfa_ioc_smem_pgnum(ioc, loff);
1601 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1603 for (i = 0; i < bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)); i++) {
1604 if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
1605 chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
1606 fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc),
1607 BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
1611 * write smem
1613 writel((swab32(fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)])),
1614 ((ioc->ioc_regs.smem_page_start) + (loff)));
1616 loff += sizeof(u32);
1619 * handle page offset wrap around
1621 loff = PSS_SMEM_PGOFF(loff);
1622 if (loff == 0) {
1623 pgnum++;
1624 writel(pgnum,
1625 ioc->ioc_regs.host_page_num_fn);
1629 writel(bfa_ioc_smem_pgnum(ioc, 0),
1630 ioc->ioc_regs.host_page_num_fn);
1633 * Set boot type, env and device mode at the end.
1635 asicmode = BFI_FWBOOT_DEVMODE(ioc->asic_gen, ioc->asic_mode,
1636 ioc->port0_mode, ioc->port1_mode);
1637 writel(asicmode, ((ioc->ioc_regs.smem_page_start)
1638 + BFI_FWBOOT_DEVMODE_OFF));
1639 writel(boot_type, ((ioc->ioc_regs.smem_page_start)
1640 + (BFI_FWBOOT_TYPE_OFF)));
1641 writel(boot_env, ((ioc->ioc_regs.smem_page_start)
1642 + (BFI_FWBOOT_ENV_OFF)));
1645 static void
1646 bfa_ioc_reset(struct bfa_ioc *ioc, bool force)
1648 bfa_ioc_hwinit(ioc, force);
1652 * BFA ioc enable reply by firmware
1654 static void
1655 bfa_ioc_enable_reply(struct bfa_ioc *ioc, enum bfa_mode port_mode,
1656 u8 cap_bm)
1658 struct bfa_iocpf *iocpf = &ioc->iocpf;
1660 ioc->port_mode = ioc->port_mode_cfg = port_mode;
1661 ioc->ad_cap_bm = cap_bm;
1662 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
1666 * @brief
1667 * Update BFA configuration from firmware configuration.
1669 static void
1670 bfa_ioc_getattr_reply(struct bfa_ioc *ioc)
1672 struct bfi_ioc_attr *attr = ioc->attr;
1674 attr->adapter_prop = ntohl(attr->adapter_prop);
1675 attr->card_type = ntohl(attr->card_type);
1676 attr->maxfrsize = ntohs(attr->maxfrsize);
1678 bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
1682 * Attach time initialization of mbox logic.
1684 static void
1685 bfa_ioc_mbox_attach(struct bfa_ioc *ioc)
1687 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1688 int mc;
1690 INIT_LIST_HEAD(&mod->cmd_q);
1691 for (mc = 0; mc < BFI_MC_MAX; mc++) {
1692 mod->mbhdlr[mc].cbfn = NULL;
1693 mod->mbhdlr[mc].cbarg = ioc->bfa;
1698 * Mbox poll timer -- restarts any pending mailbox requests.
1700 static void
1701 bfa_ioc_mbox_poll(struct bfa_ioc *ioc)
1703 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1704 struct bfa_mbox_cmd *cmd;
1705 bfa_mbox_cmd_cbfn_t cbfn;
1706 void *cbarg;
1707 u32 stat;
1710 * If no command pending, do nothing
1712 if (list_empty(&mod->cmd_q))
1713 return;
1716 * If previous command is not yet fetched by firmware, do nothing
1718 stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
1719 if (stat)
1720 return;
1723 * Enqueue command to firmware.
1725 bfa_q_deq(&mod->cmd_q, &cmd);
1726 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
1729 * Give a callback to the client, indicating that the command is sent
1731 if (cmd->cbfn) {
1732 cbfn = cmd->cbfn;
1733 cbarg = cmd->cbarg;
1734 cmd->cbfn = NULL;
1735 cbfn(cbarg);
1740 * Cleanup any pending requests.
1742 static void
1743 bfa_ioc_mbox_flush(struct bfa_ioc *ioc)
1745 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1746 struct bfa_mbox_cmd *cmd;
1748 while (!list_empty(&mod->cmd_q))
1749 bfa_q_deq(&mod->cmd_q, &cmd);
1752 static void
1753 bfa_ioc_fail_notify(struct bfa_ioc *ioc)
1756 * Notify driver and common modules registered for notification.
1758 ioc->cbfn->hbfail_cbfn(ioc->bfa);
1759 bfa_ioc_event_notify(ioc, BFA_IOC_E_FAILED);
1762 static void
1763 bfa_ioc_pf_enabled(struct bfa_ioc *ioc)
1765 bfa_fsm_send_event(ioc, IOC_E_ENABLED);
1768 static void
1769 bfa_ioc_pf_disabled(struct bfa_ioc *ioc)
1771 bfa_fsm_send_event(ioc, IOC_E_DISABLED);
1774 static void
1775 bfa_ioc_pf_failed(struct bfa_ioc *ioc)
1777 bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
1780 static void
1781 bfa_ioc_pf_hwfailed(struct bfa_ioc *ioc)
1783 bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
1786 static void
1787 bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc)
1790 * Provide enable completion callback and AEN notification.
1792 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
1796 * IOC public
1798 static enum bfa_status
1799 bfa_ioc_pll_init(struct bfa_ioc *ioc)
1802 * Hold semaphore so that nobody can access the chip during init.
1804 bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
1806 bfa_ioc_pll_init_asic(ioc);
1808 ioc->pllinit = true;
1810 * release semaphore.
1812 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
1814 return BFA_STATUS_OK;
1818 * Interface used by diag module to do firmware boot with memory test
1819 * as the entry vector.
1821 static void
1822 bfa_ioc_boot(struct bfa_ioc *ioc, enum bfi_fwboot_type boot_type,
1823 u32 boot_env)
1825 bfa_ioc_stats(ioc, ioc_boots);
1827 if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
1828 return;
1831 * Initialize IOC state of all functions on a chip reset.
1833 if (boot_type == BFI_FWBOOT_TYPE_MEMTEST) {
1834 writel(BFI_IOC_MEMTEST, ioc->ioc_regs.ioc_fwstate);
1835 writel(BFI_IOC_MEMTEST, ioc->ioc_regs.alt_ioc_fwstate);
1836 } else {
1837 writel(BFI_IOC_INITING, ioc->ioc_regs.ioc_fwstate);
1838 writel(BFI_IOC_INITING, ioc->ioc_regs.alt_ioc_fwstate);
1841 bfa_ioc_msgflush(ioc);
1842 bfa_ioc_download_fw(ioc, boot_type, boot_env);
1843 bfa_ioc_lpu_start(ioc);
1847 * Enable/disable IOC failure auto recovery.
1849 void
1850 bfa_nw_ioc_auto_recover(bool auto_recover)
1852 bfa_nw_auto_recover = auto_recover;
1855 static bool
1856 bfa_ioc_msgget(struct bfa_ioc *ioc, void *mbmsg)
1858 u32 *msgp = mbmsg;
1859 u32 r32;
1860 int i;
1862 r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
1863 if ((r32 & 1) == 0)
1864 return false;
1867 * read the MBOX msg
1869 for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
1870 i++) {
1871 r32 = readl(ioc->ioc_regs.lpu_mbox +
1872 i * sizeof(u32));
1873 msgp[i] = htonl(r32);
1877 * turn off mailbox interrupt by clearing mailbox status
1879 writel(1, ioc->ioc_regs.lpu_mbox_cmd);
1880 readl(ioc->ioc_regs.lpu_mbox_cmd);
1882 return true;
1885 static void
1886 bfa_ioc_isr(struct bfa_ioc *ioc, struct bfi_mbmsg *m)
1888 union bfi_ioc_i2h_msg_u *msg;
1889 struct bfa_iocpf *iocpf = &ioc->iocpf;
1891 msg = (union bfi_ioc_i2h_msg_u *) m;
1893 bfa_ioc_stats(ioc, ioc_isrs);
1895 switch (msg->mh.msg_id) {
1896 case BFI_IOC_I2H_HBEAT:
1897 break;
1899 case BFI_IOC_I2H_ENABLE_REPLY:
1900 bfa_ioc_enable_reply(ioc,
1901 (enum bfa_mode)msg->fw_event.port_mode,
1902 msg->fw_event.cap_bm);
1903 break;
1905 case BFI_IOC_I2H_DISABLE_REPLY:
1906 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE);
1907 break;
1909 case BFI_IOC_I2H_GETATTR_REPLY:
1910 bfa_ioc_getattr_reply(ioc);
1911 break;
1913 default:
1914 BUG_ON(1);
1919 * IOC attach time initialization and setup.
1921 * @param[in] ioc memory for IOC
1922 * @param[in] bfa driver instance structure
1924 void
1925 bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa, struct bfa_ioc_cbfn *cbfn)
1927 ioc->bfa = bfa;
1928 ioc->cbfn = cbfn;
1929 ioc->fcmode = false;
1930 ioc->pllinit = false;
1931 ioc->dbg_fwsave_once = true;
1932 ioc->iocpf.ioc = ioc;
1934 bfa_ioc_mbox_attach(ioc);
1935 INIT_LIST_HEAD(&ioc->notify_q);
1937 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
1938 bfa_fsm_send_event(ioc, IOC_E_RESET);
1942 * Driver detach time IOC cleanup.
1944 void
1945 bfa_nw_ioc_detach(struct bfa_ioc *ioc)
1947 bfa_fsm_send_event(ioc, IOC_E_DETACH);
1949 /* Done with detach, empty the notify_q. */
1950 INIT_LIST_HEAD(&ioc->notify_q);
1954 * Setup IOC PCI properties.
1956 * @param[in] pcidev PCI device information for this IOC
1958 void
1959 bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev,
1960 enum bfi_pcifn_class clscode)
1962 ioc->clscode = clscode;
1963 ioc->pcidev = *pcidev;
1966 * Initialize IOC and device personality
1968 ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_FC;
1969 ioc->asic_mode = BFI_ASIC_MODE_FC;
1971 switch (pcidev->device_id) {
1972 case PCI_DEVICE_ID_BROCADE_CT:
1973 ioc->asic_gen = BFI_ASIC_GEN_CT;
1974 ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
1975 ioc->asic_mode = BFI_ASIC_MODE_ETH;
1976 ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_CNA;
1977 ioc->ad_cap_bm = BFA_CM_CNA;
1978 break;
1980 case BFA_PCI_DEVICE_ID_CT2:
1981 ioc->asic_gen = BFI_ASIC_GEN_CT2;
1982 if (clscode == BFI_PCIFN_CLASS_FC &&
1983 pcidev->ssid == BFA_PCI_CT2_SSID_FC) {
1984 ioc->asic_mode = BFI_ASIC_MODE_FC16;
1985 ioc->fcmode = true;
1986 ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
1987 ioc->ad_cap_bm = BFA_CM_HBA;
1988 } else {
1989 ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
1990 ioc->asic_mode = BFI_ASIC_MODE_ETH;
1991 if (pcidev->ssid == BFA_PCI_CT2_SSID_FCoE) {
1992 ioc->port_mode =
1993 ioc->port_mode_cfg = BFA_MODE_CNA;
1994 ioc->ad_cap_bm = BFA_CM_CNA;
1995 } else {
1996 ioc->port_mode =
1997 ioc->port_mode_cfg = BFA_MODE_NIC;
1998 ioc->ad_cap_bm = BFA_CM_NIC;
2001 break;
2003 default:
2004 BUG_ON(1);
2008 * Set asic specific interfaces.
2010 if (ioc->asic_gen == BFI_ASIC_GEN_CT)
2011 bfa_nw_ioc_set_ct_hwif(ioc);
2012 else
2013 bfa_nw_ioc_set_ct2_hwif(ioc);
2015 bfa_ioc_map_port(ioc);
2016 bfa_ioc_reg_init(ioc);
2020 * Initialize IOC dma memory
2022 * @param[in] dm_kva kernel virtual address of IOC dma memory
2023 * @param[in] dm_pa physical address of IOC dma memory
2025 void
2026 bfa_nw_ioc_mem_claim(struct bfa_ioc *ioc, u8 *dm_kva, u64 dm_pa)
2029 * dma memory for firmware attribute
2031 ioc->attr_dma.kva = dm_kva;
2032 ioc->attr_dma.pa = dm_pa;
2033 ioc->attr = (struct bfi_ioc_attr *) dm_kva;
2037 * Return size of dma memory required.
2040 bfa_nw_ioc_meminfo(void)
2042 return roundup(sizeof(struct bfi_ioc_attr), BFA_DMA_ALIGN_SZ);
2045 void
2046 bfa_nw_ioc_enable(struct bfa_ioc *ioc)
2048 bfa_ioc_stats(ioc, ioc_enables);
2049 ioc->dbg_fwsave_once = true;
2051 bfa_fsm_send_event(ioc, IOC_E_ENABLE);
2054 void
2055 bfa_nw_ioc_disable(struct bfa_ioc *ioc)
2057 bfa_ioc_stats(ioc, ioc_disables);
2058 bfa_fsm_send_event(ioc, IOC_E_DISABLE);
2061 static u32
2062 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr)
2064 return PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, fmaddr);
2068 * Register mailbox message handler function, to be called by common modules
2070 void
2071 bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
2072 bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
2074 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
2076 mod->mbhdlr[mc].cbfn = cbfn;
2077 mod->mbhdlr[mc].cbarg = cbarg;
2081 * Queue a mailbox command request to firmware. Waits if mailbox is busy.
2082 * Responsibility of caller to serialize
2084 * @param[in] ioc IOC instance
2085 * @param[i] cmd Mailbox command
2087 bool
2088 bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd,
2089 bfa_mbox_cmd_cbfn_t cbfn, void *cbarg)
2091 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
2092 u32 stat;
2094 cmd->cbfn = cbfn;
2095 cmd->cbarg = cbarg;
2098 * If a previous command is pending, queue new command
2100 if (!list_empty(&mod->cmd_q)) {
2101 list_add_tail(&cmd->qe, &mod->cmd_q);
2102 return true;
2106 * If mailbox is busy, queue command for poll timer
2108 stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
2109 if (stat) {
2110 list_add_tail(&cmd->qe, &mod->cmd_q);
2111 return true;
2115 * mailbox is free -- queue command to firmware
2117 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
2119 return false;
2123 * Handle mailbox interrupts
2125 void
2126 bfa_nw_ioc_mbox_isr(struct bfa_ioc *ioc)
2128 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
2129 struct bfi_mbmsg m;
2130 int mc;
2132 if (bfa_ioc_msgget(ioc, &m)) {
2134 * Treat IOC message class as special.
2136 mc = m.mh.msg_class;
2137 if (mc == BFI_MC_IOC) {
2138 bfa_ioc_isr(ioc, &m);
2139 return;
2142 if ((mc >= BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
2143 return;
2145 mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
2148 bfa_ioc_lpu_read_stat(ioc);
2151 * Try to send pending mailbox commands
2153 bfa_ioc_mbox_poll(ioc);
2156 void
2157 bfa_nw_ioc_error_isr(struct bfa_ioc *ioc)
2159 bfa_ioc_stats(ioc, ioc_hbfails);
2160 bfa_ioc_stats_hb_count(ioc, ioc->hb_count);
2161 bfa_fsm_send_event(ioc, IOC_E_HWERROR);
2165 * return true if IOC is disabled
2167 bool
2168 bfa_nw_ioc_is_disabled(struct bfa_ioc *ioc)
2170 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) ||
2171 bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
2175 * Add to IOC heartbeat failure notification queue. To be used by common
2176 * modules such as cee, port, diag.
2178 void
2179 bfa_nw_ioc_notify_register(struct bfa_ioc *ioc,
2180 struct bfa_ioc_notify *notify)
2182 list_add_tail(&notify->qe, &ioc->notify_q);
2185 #define BFA_MFG_NAME "Brocade"
2186 static void
2187 bfa_ioc_get_adapter_attr(struct bfa_ioc *ioc,
2188 struct bfa_adapter_attr *ad_attr)
2190 struct bfi_ioc_attr *ioc_attr;
2192 ioc_attr = ioc->attr;
2194 bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
2195 bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
2196 bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
2197 bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
2198 memcpy(&ad_attr->vpd, &ioc_attr->vpd,
2199 sizeof(struct bfa_mfg_vpd));
2201 ad_attr->nports = bfa_ioc_get_nports(ioc);
2202 ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
2204 bfa_ioc_get_adapter_model(ioc, ad_attr->model);
2205 /* For now, model descr uses same model string */
2206 bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
2208 ad_attr->card_type = ioc_attr->card_type;
2209 ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type);
2211 if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
2212 ad_attr->prototype = 1;
2213 else
2214 ad_attr->prototype = 0;
2216 ad_attr->pwwn = bfa_ioc_get_pwwn(ioc);
2217 ad_attr->mac = bfa_nw_ioc_get_mac(ioc);
2219 ad_attr->pcie_gen = ioc_attr->pcie_gen;
2220 ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
2221 ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
2222 ad_attr->asic_rev = ioc_attr->asic_rev;
2224 bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
2227 static enum bfa_ioc_type
2228 bfa_ioc_get_type(struct bfa_ioc *ioc)
2230 if (ioc->clscode == BFI_PCIFN_CLASS_ETH)
2231 return BFA_IOC_TYPE_LL;
2233 BUG_ON(!(ioc->clscode == BFI_PCIFN_CLASS_FC));
2235 return (ioc->attr->port_mode == BFI_PORT_MODE_FC)
2236 ? BFA_IOC_TYPE_FC : BFA_IOC_TYPE_FCoE;
2239 static void
2240 bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc, char *serial_num)
2242 memset(serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
2243 memcpy(serial_num,
2244 (void *)ioc->attr->brcd_serialnum,
2245 BFA_ADAPTER_SERIAL_NUM_LEN);
2248 static void
2249 bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc, char *fw_ver)
2251 memset(fw_ver, 0, BFA_VERSION_LEN);
2252 memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
2255 static void
2256 bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc, char *chip_rev)
2258 BUG_ON(!(chip_rev));
2260 memset(chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
2262 chip_rev[0] = 'R';
2263 chip_rev[1] = 'e';
2264 chip_rev[2] = 'v';
2265 chip_rev[3] = '-';
2266 chip_rev[4] = ioc->attr->asic_rev;
2267 chip_rev[5] = '\0';
2270 static void
2271 bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, char *optrom_ver)
2273 memset(optrom_ver, 0, BFA_VERSION_LEN);
2274 memcpy(optrom_ver, ioc->attr->optrom_version,
2275 BFA_VERSION_LEN);
2278 static void
2279 bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc, char *manufacturer)
2281 memset(manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
2282 memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
2285 static void
2286 bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model)
2288 struct bfi_ioc_attr *ioc_attr;
2290 BUG_ON(!(model));
2291 memset(model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
2293 ioc_attr = ioc->attr;
2296 * model name
2298 snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
2299 BFA_MFG_NAME, ioc_attr->card_type);
2302 static enum bfa_ioc_state
2303 bfa_ioc_get_state(struct bfa_ioc *ioc)
2305 enum bfa_iocpf_state iocpf_st;
2306 enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
2308 if (ioc_st == BFA_IOC_ENABLING ||
2309 ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) {
2311 iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
2313 switch (iocpf_st) {
2314 case BFA_IOCPF_SEMWAIT:
2315 ioc_st = BFA_IOC_SEMWAIT;
2316 break;
2318 case BFA_IOCPF_HWINIT:
2319 ioc_st = BFA_IOC_HWINIT;
2320 break;
2322 case BFA_IOCPF_FWMISMATCH:
2323 ioc_st = BFA_IOC_FWMISMATCH;
2324 break;
2326 case BFA_IOCPF_FAIL:
2327 ioc_st = BFA_IOC_FAIL;
2328 break;
2330 case BFA_IOCPF_INITFAIL:
2331 ioc_st = BFA_IOC_INITFAIL;
2332 break;
2334 default:
2335 break;
2338 return ioc_st;
2341 void
2342 bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr)
2344 memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr));
2346 ioc_attr->state = bfa_ioc_get_state(ioc);
2347 ioc_attr->port_id = ioc->port_id;
2348 ioc_attr->port_mode = ioc->port_mode;
2350 ioc_attr->port_mode_cfg = ioc->port_mode_cfg;
2351 ioc_attr->cap_bm = ioc->ad_cap_bm;
2353 ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
2355 bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
2357 ioc_attr->pci_attr.device_id = ioc->pcidev.device_id;
2358 ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func;
2359 bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
2363 * WWN public
2365 static u64
2366 bfa_ioc_get_pwwn(struct bfa_ioc *ioc)
2368 return ioc->attr->pwwn;
2371 mac_t
2372 bfa_nw_ioc_get_mac(struct bfa_ioc *ioc)
2374 return ioc->attr->mac;
2378 * Firmware failure detected. Start recovery actions.
2380 static void
2381 bfa_ioc_recover(struct bfa_ioc *ioc)
2383 pr_crit("Heart Beat of IOC has failed\n");
2384 bfa_ioc_stats(ioc, ioc_hbfails);
2385 bfa_ioc_stats_hb_count(ioc, ioc->hb_count);
2386 bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
2389 static void
2390 bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc)
2392 if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL)
2393 return;
2397 * @dg hal_iocpf_pvt BFA IOC PF private functions
2398 * @{
2401 static void
2402 bfa_iocpf_enable(struct bfa_ioc *ioc)
2404 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE);
2407 static void
2408 bfa_iocpf_disable(struct bfa_ioc *ioc)
2410 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE);
2413 static void
2414 bfa_iocpf_fail(struct bfa_ioc *ioc)
2416 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
2419 static void
2420 bfa_iocpf_initfail(struct bfa_ioc *ioc)
2422 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
2425 static void
2426 bfa_iocpf_getattrfail(struct bfa_ioc *ioc)
2428 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
2431 static void
2432 bfa_iocpf_stop(struct bfa_ioc *ioc)
2434 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
2437 void
2438 bfa_nw_iocpf_timeout(void *ioc_arg)
2440 struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
2441 enum bfa_iocpf_state iocpf_st;
2443 iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
2445 if (iocpf_st == BFA_IOCPF_HWINIT)
2446 bfa_ioc_poll_fwinit(ioc);
2447 else
2448 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
2451 void
2452 bfa_nw_iocpf_sem_timeout(void *ioc_arg)
2454 struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
2456 bfa_ioc_hw_sem_get(ioc);
2459 static void
2460 bfa_ioc_poll_fwinit(struct bfa_ioc *ioc)
2462 u32 fwstate = readl(ioc->ioc_regs.ioc_fwstate);
2464 if (fwstate == BFI_IOC_DISABLED) {
2465 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
2466 return;
2469 if (ioc->iocpf.poll_time >= BFA_IOC_TOV) {
2470 bfa_nw_iocpf_timeout(ioc);
2471 } else {
2472 ioc->iocpf.poll_time += BFA_IOC_POLL_TOV;
2473 mod_timer(&ioc->iocpf_timer, jiffies +
2474 msecs_to_jiffies(BFA_IOC_POLL_TOV));