RT-AC56 3.0.0.4.374.37 core
[tomato.git] / release / src-rt-6.x.4708 / linux / linux-2.6.36 / drivers / scsi / bfa / bfa_ioc.c
bloba96895cd4decd207c36367791a1c3498978481a0
1 /*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
18 #include <bfa.h>
19 #include <bfa_ioc.h>
20 #include <bfa_fwimg_priv.h>
21 #include <cna/bfa_cna_trcmod.h>
22 #include <cs/bfa_debug.h>
23 #include <bfi/bfi_ioc.h>
24 #include <bfi/bfi_ctreg.h>
25 #include <aen/bfa_aen_ioc.h>
26 #include <aen/bfa_aen.h>
27 #include <log/bfa_log_hal.h>
28 #include <defs/bfa_defs_pci.h>
30 BFA_TRC_FILE(CNA, IOC);
32 /**
33 * IOC local definitions
35 #define BFA_IOC_TOV 2000 /* msecs */
36 #define BFA_IOC_HWSEM_TOV 500 /* msecs */
37 #define BFA_IOC_HB_TOV 500 /* msecs */
38 #define BFA_IOC_HWINIT_MAX 2
39 #define BFA_IOC_FWIMG_MINSZ (16 * 1024)
40 #define BFA_IOC_TOV_RECOVER BFA_IOC_HB_TOV
42 #define bfa_ioc_timer_start(__ioc) \
43 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
44 bfa_ioc_timeout, (__ioc), BFA_IOC_TOV)
45 #define bfa_ioc_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer)
47 #define BFA_DBG_FWTRC_ENTS (BFI_IOC_TRC_ENTS)
48 #define BFA_DBG_FWTRC_LEN \
49 (BFA_DBG_FWTRC_ENTS * sizeof(struct bfa_trc_s) + \
50 (sizeof(struct bfa_trc_mod_s) - \
51 BFA_TRC_MAX * sizeof(struct bfa_trc_s)))
52 #define BFA_DBG_FWTRC_OFF(_fn) (BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn))
54 /**
55 * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
58 #define bfa_ioc_firmware_lock(__ioc) \
59 ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
60 #define bfa_ioc_firmware_unlock(__ioc) \
61 ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
62 #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
63 #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
64 #define bfa_ioc_notify_hbfail(__ioc) \
65 ((__ioc)->ioc_hwif->ioc_notify_hbfail(__ioc))
66 #define bfa_ioc_is_optrom(__ioc) \
67 (bfi_image_get_size(BFA_IOC_FWIMG_TYPE(__ioc)) < BFA_IOC_FWIMG_MINSZ)
69 bfa_boolean_t bfa_auto_recover = BFA_TRUE;
72 * forward declarations
74 static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc);
75 static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s *ioc);
76 static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force);
77 static void bfa_ioc_timeout(void *ioc);
78 static void bfa_ioc_send_enable(struct bfa_ioc_s *ioc);
79 static void bfa_ioc_send_disable(struct bfa_ioc_s *ioc);
80 static void bfa_ioc_send_getattr(struct bfa_ioc_s *ioc);
81 static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc);
82 static void bfa_ioc_hb_stop(struct bfa_ioc_s *ioc);
83 static void bfa_ioc_reset(struct bfa_ioc_s *ioc, bfa_boolean_t force);
84 static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc);
85 static void bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc);
86 static void bfa_ioc_recover(struct bfa_ioc_s *ioc);
87 static void bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc);
88 static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc);
89 static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc);
91 /**
92 * bfa_ioc_sm
95 /**
96 * IOC state machine events
98 enum ioc_event {
99 IOC_E_ENABLE = 1, /* IOC enable request */
100 IOC_E_DISABLE = 2, /* IOC disable request */
101 IOC_E_TIMEOUT = 3, /* f/w response timeout */
102 IOC_E_FWREADY = 4, /* f/w initialization done */
103 IOC_E_FWRSP_GETATTR = 5, /* IOC get attribute response */
104 IOC_E_FWRSP_ENABLE = 6, /* enable f/w response */
105 IOC_E_FWRSP_DISABLE = 7, /* disable f/w response */
106 IOC_E_HBFAIL = 8, /* heartbeat failure */
107 IOC_E_HWERROR = 9, /* hardware error interrupt */
108 IOC_E_SEMLOCKED = 10, /* h/w semaphore is locked */
109 IOC_E_DETACH = 11, /* driver detach cleanup */
112 bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc_s, enum ioc_event);
113 bfa_fsm_state_decl(bfa_ioc, fwcheck, struct bfa_ioc_s, enum ioc_event);
114 bfa_fsm_state_decl(bfa_ioc, mismatch, struct bfa_ioc_s, enum ioc_event);
115 bfa_fsm_state_decl(bfa_ioc, semwait, struct bfa_ioc_s, enum ioc_event);
116 bfa_fsm_state_decl(bfa_ioc, hwinit, struct bfa_ioc_s, enum ioc_event);
117 bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc_s, enum ioc_event);
118 bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc_s, enum ioc_event);
119 bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc_s, enum ioc_event);
120 bfa_fsm_state_decl(bfa_ioc, initfail, struct bfa_ioc_s, enum ioc_event);
121 bfa_fsm_state_decl(bfa_ioc, hbfail, struct bfa_ioc_s, enum ioc_event);
122 bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event);
123 bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event);
125 static struct bfa_sm_table_s ioc_sm_table[] = {
126 {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
127 {BFA_SM(bfa_ioc_sm_fwcheck), BFA_IOC_FWMISMATCH},
128 {BFA_SM(bfa_ioc_sm_mismatch), BFA_IOC_FWMISMATCH},
129 {BFA_SM(bfa_ioc_sm_semwait), BFA_IOC_SEMWAIT},
130 {BFA_SM(bfa_ioc_sm_hwinit), BFA_IOC_HWINIT},
131 {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_HWINIT},
132 {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
133 {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
134 {BFA_SM(bfa_ioc_sm_initfail), BFA_IOC_INITFAIL},
135 {BFA_SM(bfa_ioc_sm_hbfail), BFA_IOC_HBFAIL},
136 {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
137 {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
141 * Reset entry actions -- initialize state machine
143 static void
144 bfa_ioc_sm_reset_entry(struct bfa_ioc_s *ioc)
146 ioc->retry_count = 0;
147 ioc->auto_recover = bfa_auto_recover;
151 * Beginning state. IOC is in reset state.
153 static void
154 bfa_ioc_sm_reset(struct bfa_ioc_s *ioc, enum ioc_event event)
156 bfa_trc(ioc, event);
158 switch (event) {
159 case IOC_E_ENABLE:
160 bfa_fsm_set_state(ioc, bfa_ioc_sm_fwcheck);
161 break;
163 case IOC_E_DISABLE:
164 bfa_ioc_disable_comp(ioc);
165 break;
167 case IOC_E_DETACH:
168 break;
170 default:
171 bfa_sm_fault(ioc, event);
176 * Semaphore should be acquired for version check.
178 static void
179 bfa_ioc_sm_fwcheck_entry(struct bfa_ioc_s *ioc)
181 bfa_ioc_hw_sem_get(ioc);
185 * Awaiting h/w semaphore to continue with version check.
187 static void
188 bfa_ioc_sm_fwcheck(struct bfa_ioc_s *ioc, enum ioc_event event)
190 bfa_trc(ioc, event);
192 switch (event) {
193 case IOC_E_SEMLOCKED:
194 if (bfa_ioc_firmware_lock(ioc)) {
195 ioc->retry_count = 0;
196 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit);
197 } else {
198 bfa_ioc_hw_sem_release(ioc);
199 bfa_fsm_set_state(ioc, bfa_ioc_sm_mismatch);
201 break;
203 case IOC_E_DISABLE:
204 bfa_ioc_disable_comp(ioc);
206 * fall through
209 case IOC_E_DETACH:
210 bfa_ioc_hw_sem_get_cancel(ioc);
211 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
212 break;
214 case IOC_E_FWREADY:
215 break;
217 default:
218 bfa_sm_fault(ioc, event);
223 * Notify enable completion callback and generate mismatch AEN.
225 static void
226 bfa_ioc_sm_mismatch_entry(struct bfa_ioc_s *ioc)
229 * Provide enable completion callback and AEN notification only once.
231 if (ioc->retry_count == 0) {
232 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
233 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_FWMISMATCH);
235 ioc->retry_count++;
236 bfa_ioc_timer_start(ioc);
240 * Awaiting firmware version match.
242 static void
243 bfa_ioc_sm_mismatch(struct bfa_ioc_s *ioc, enum ioc_event event)
245 bfa_trc(ioc, event);
247 switch (event) {
248 case IOC_E_TIMEOUT:
249 bfa_fsm_set_state(ioc, bfa_ioc_sm_fwcheck);
250 break;
252 case IOC_E_DISABLE:
253 bfa_ioc_disable_comp(ioc);
255 * fall through
258 case IOC_E_DETACH:
259 bfa_ioc_timer_stop(ioc);
260 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
261 break;
263 case IOC_E_FWREADY:
264 break;
266 default:
267 bfa_sm_fault(ioc, event);
272 * Request for semaphore.
274 static void
275 bfa_ioc_sm_semwait_entry(struct bfa_ioc_s *ioc)
277 bfa_ioc_hw_sem_get(ioc);
281 * Awaiting semaphore for h/w initialzation.
283 static void
284 bfa_ioc_sm_semwait(struct bfa_ioc_s *ioc, enum ioc_event event)
286 bfa_trc(ioc, event);
288 switch (event) {
289 case IOC_E_SEMLOCKED:
290 ioc->retry_count = 0;
291 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit);
292 break;
294 case IOC_E_DISABLE:
295 bfa_ioc_hw_sem_get_cancel(ioc);
296 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
297 break;
299 default:
300 bfa_sm_fault(ioc, event);
305 static void
306 bfa_ioc_sm_hwinit_entry(struct bfa_ioc_s *ioc)
308 bfa_ioc_timer_start(ioc);
309 bfa_ioc_reset(ioc, BFA_FALSE);
313 * Hardware is being initialized. Interrupts are enabled.
314 * Holding hardware semaphore lock.
316 static void
317 bfa_ioc_sm_hwinit(struct bfa_ioc_s *ioc, enum ioc_event event)
319 bfa_trc(ioc, event);
321 switch (event) {
322 case IOC_E_FWREADY:
323 bfa_ioc_timer_stop(ioc);
324 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
325 break;
327 case IOC_E_HWERROR:
328 bfa_ioc_timer_stop(ioc);
330 * fall through
333 case IOC_E_TIMEOUT:
334 ioc->retry_count++;
335 if (ioc->retry_count < BFA_IOC_HWINIT_MAX) {
336 bfa_ioc_timer_start(ioc);
337 bfa_ioc_reset(ioc, BFA_TRUE);
338 break;
341 bfa_ioc_hw_sem_release(ioc);
342 bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
343 break;
345 case IOC_E_DISABLE:
346 bfa_ioc_hw_sem_release(ioc);
347 bfa_ioc_timer_stop(ioc);
348 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
349 break;
351 default:
352 bfa_sm_fault(ioc, event);
357 static void
358 bfa_ioc_sm_enabling_entry(struct bfa_ioc_s *ioc)
360 bfa_ioc_timer_start(ioc);
361 bfa_ioc_send_enable(ioc);
365 * Host IOC function is being enabled, awaiting response from firmware.
366 * Semaphore is acquired.
368 static void
369 bfa_ioc_sm_enabling(struct bfa_ioc_s *ioc, enum ioc_event event)
371 bfa_trc(ioc, event);
373 switch (event) {
374 case IOC_E_FWRSP_ENABLE:
375 bfa_ioc_timer_stop(ioc);
376 bfa_ioc_hw_sem_release(ioc);
377 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
378 break;
380 case IOC_E_HWERROR:
381 bfa_ioc_timer_stop(ioc);
383 * fall through
386 case IOC_E_TIMEOUT:
387 ioc->retry_count++;
388 if (ioc->retry_count < BFA_IOC_HWINIT_MAX) {
389 bfa_reg_write(ioc->ioc_regs.ioc_fwstate,
390 BFI_IOC_UNINIT);
391 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit);
392 break;
395 bfa_ioc_hw_sem_release(ioc);
396 bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
397 break;
399 case IOC_E_DISABLE:
400 bfa_ioc_timer_stop(ioc);
401 bfa_ioc_hw_sem_release(ioc);
402 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
403 break;
405 case IOC_E_FWREADY:
406 bfa_ioc_send_enable(ioc);
407 break;
409 default:
410 bfa_sm_fault(ioc, event);
415 static void
416 bfa_ioc_sm_getattr_entry(struct bfa_ioc_s *ioc)
418 bfa_ioc_timer_start(ioc);
419 bfa_ioc_send_getattr(ioc);
423 * IOC configuration in progress. Timer is active.
425 static void
426 bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event)
428 bfa_trc(ioc, event);
430 switch (event) {
431 case IOC_E_FWRSP_GETATTR:
432 bfa_ioc_timer_stop(ioc);
433 bfa_ioc_check_attr_wwns(ioc);
434 bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
435 break;
437 case IOC_E_HWERROR:
438 bfa_ioc_timer_stop(ioc);
440 * fall through
443 case IOC_E_TIMEOUT:
444 bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
445 break;
447 case IOC_E_DISABLE:
448 bfa_ioc_timer_stop(ioc);
449 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
450 break;
452 default:
453 bfa_sm_fault(ioc, event);
458 static void
459 bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc)
461 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
462 bfa_ioc_hb_monitor(ioc);
463 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_ENABLE);
466 static void
467 bfa_ioc_sm_op(struct bfa_ioc_s *ioc, enum ioc_event event)
469 bfa_trc(ioc, event);
471 switch (event) {
472 case IOC_E_ENABLE:
473 break;
475 case IOC_E_DISABLE:
476 bfa_ioc_hb_stop(ioc);
477 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
478 break;
480 case IOC_E_HWERROR:
481 case IOC_E_FWREADY:
483 * Hard error or IOC recovery by other function.
484 * Treat it same as heartbeat failure.
486 bfa_ioc_hb_stop(ioc);
488 * !!! fall through !!!
491 case IOC_E_HBFAIL:
492 bfa_fsm_set_state(ioc, bfa_ioc_sm_hbfail);
493 break;
495 default:
496 bfa_sm_fault(ioc, event);
501 static void
502 bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc)
504 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_DISABLE);
505 bfa_ioc_timer_start(ioc);
506 bfa_ioc_send_disable(ioc);
510 * IOC is being disabled
512 static void
513 bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event)
515 bfa_trc(ioc, event);
517 switch (event) {
518 case IOC_E_FWRSP_DISABLE:
519 bfa_ioc_timer_stop(ioc);
520 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
521 break;
523 case IOC_E_HWERROR:
524 bfa_ioc_timer_stop(ioc);
526 * !!! fall through !!!
529 case IOC_E_TIMEOUT:
530 bfa_reg_write(ioc->ioc_regs.ioc_fwstate, BFI_IOC_FAIL);
531 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
532 break;
534 default:
535 bfa_sm_fault(ioc, event);
540 * IOC disable completion entry.
542 static void
543 bfa_ioc_sm_disabled_entry(struct bfa_ioc_s *ioc)
545 bfa_ioc_disable_comp(ioc);
548 static void
549 bfa_ioc_sm_disabled(struct bfa_ioc_s *ioc, enum ioc_event event)
551 bfa_trc(ioc, event);
553 switch (event) {
554 case IOC_E_ENABLE:
555 bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait);
556 break;
558 case IOC_E_DISABLE:
559 ioc->cbfn->disable_cbfn(ioc->bfa);
560 break;
562 case IOC_E_FWREADY:
563 break;
565 case IOC_E_DETACH:
566 bfa_ioc_firmware_unlock(ioc);
567 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
568 break;
570 default:
571 bfa_sm_fault(ioc, event);
576 static void
577 bfa_ioc_sm_initfail_entry(struct bfa_ioc_s *ioc)
579 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
580 bfa_ioc_timer_start(ioc);
584 * Hardware initialization failed.
586 static void
587 bfa_ioc_sm_initfail(struct bfa_ioc_s *ioc, enum ioc_event event)
589 bfa_trc(ioc, event);
591 switch (event) {
592 case IOC_E_DISABLE:
593 bfa_ioc_timer_stop(ioc);
594 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
595 break;
597 case IOC_E_DETACH:
598 bfa_ioc_timer_stop(ioc);
599 bfa_ioc_firmware_unlock(ioc);
600 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
601 break;
603 case IOC_E_TIMEOUT:
604 bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait);
605 break;
607 default:
608 bfa_sm_fault(ioc, event);
613 static void
614 bfa_ioc_sm_hbfail_entry(struct bfa_ioc_s *ioc)
616 struct list_head *qe;
617 struct bfa_ioc_hbfail_notify_s *notify;
620 * Mark IOC as failed in hardware and stop firmware.
622 bfa_ioc_lpu_stop(ioc);
623 bfa_reg_write(ioc->ioc_regs.ioc_fwstate, BFI_IOC_FAIL);
626 * Notify other functions on HB failure.
628 bfa_ioc_notify_hbfail(ioc);
631 * Notify driver and common modules registered for notification.
633 ioc->cbfn->hbfail_cbfn(ioc->bfa);
634 list_for_each(qe, &ioc->hb_notify_q) {
635 notify = (struct bfa_ioc_hbfail_notify_s *)qe;
636 notify->cbfn(notify->cbarg);
640 * Flush any queued up mailbox requests.
642 bfa_ioc_mbox_hbfail(ioc);
643 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_HBFAIL);
646 * Trigger auto-recovery after a delay.
648 if (ioc->auto_recover) {
649 bfa_timer_begin(ioc->timer_mod, &ioc->ioc_timer,
650 bfa_ioc_timeout, ioc, BFA_IOC_TOV_RECOVER);
655 * IOC heartbeat failure.
657 static void
658 bfa_ioc_sm_hbfail(struct bfa_ioc_s *ioc, enum ioc_event event)
660 bfa_trc(ioc, event);
662 switch (event) {
664 case IOC_E_ENABLE:
665 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
666 break;
668 case IOC_E_DISABLE:
669 if (ioc->auto_recover)
670 bfa_ioc_timer_stop(ioc);
671 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
672 break;
674 case IOC_E_TIMEOUT:
675 bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait);
676 break;
678 case IOC_E_FWREADY:
680 * Recovery is already initiated by other function.
682 break;
684 case IOC_E_HWERROR:
686 * HB failure notification, ignore.
688 break;
690 default:
691 bfa_sm_fault(ioc, event);
698 * bfa_ioc_pvt BFA IOC private functions
701 static void
702 bfa_ioc_disable_comp(struct bfa_ioc_s *ioc)
704 struct list_head *qe;
705 struct bfa_ioc_hbfail_notify_s *notify;
707 ioc->cbfn->disable_cbfn(ioc->bfa);
710 * Notify common modules registered for notification.
712 list_for_each(qe, &ioc->hb_notify_q) {
713 notify = (struct bfa_ioc_hbfail_notify_s *)qe;
714 notify->cbfn(notify->cbarg);
718 void
719 bfa_ioc_sem_timeout(void *ioc_arg)
721 struct bfa_ioc_s *ioc = (struct bfa_ioc_s *)ioc_arg;
723 bfa_ioc_hw_sem_get(ioc);
726 bfa_boolean_t
727 bfa_ioc_sem_get(bfa_os_addr_t sem_reg)
729 u32 r32;
730 int cnt = 0;
731 #define BFA_SEM_SPINCNT 3000
733 r32 = bfa_reg_read(sem_reg);
735 while (r32 && (cnt < BFA_SEM_SPINCNT)) {
736 cnt++;
737 bfa_os_udelay(2);
738 r32 = bfa_reg_read(sem_reg);
741 if (r32 == 0)
742 return BFA_TRUE;
744 bfa_assert(cnt < BFA_SEM_SPINCNT);
745 return BFA_FALSE;
748 void
749 bfa_ioc_sem_release(bfa_os_addr_t sem_reg)
751 bfa_reg_write(sem_reg, 1);
754 static void
755 bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc)
757 u32 r32;
760 * First read to the semaphore register will return 0, subsequent reads
761 * will return 1. Semaphore is released by writing 1 to the register
763 r32 = bfa_reg_read(ioc->ioc_regs.ioc_sem_reg);
764 if (r32 == 0) {
765 bfa_fsm_send_event(ioc, IOC_E_SEMLOCKED);
766 return;
769 bfa_timer_begin(ioc->timer_mod, &ioc->sem_timer, bfa_ioc_sem_timeout,
770 ioc, BFA_IOC_HWSEM_TOV);
773 void
774 bfa_ioc_hw_sem_release(struct bfa_ioc_s *ioc)
776 bfa_reg_write(ioc->ioc_regs.ioc_sem_reg, 1);
779 static void
780 bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s *ioc)
782 bfa_timer_stop(&ioc->sem_timer);
786 * Initialize LPU local memory (aka secondary memory / SRAM)
788 static void
789 bfa_ioc_lmem_init(struct bfa_ioc_s *ioc)
791 u32 pss_ctl;
792 int i;
793 #define PSS_LMEM_INIT_TIME 10000
795 pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg);
796 pss_ctl &= ~__PSS_LMEM_RESET;
797 pss_ctl |= __PSS_LMEM_INIT_EN;
798 pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
799 bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl);
802 * wait for memory initialization to be complete
804 i = 0;
805 do {
806 pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg);
807 i++;
808 } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
811 * If memory initialization is not successful, IOC timeout will catch
812 * such failures.
814 bfa_assert(pss_ctl & __PSS_LMEM_INIT_DONE);
815 bfa_trc(ioc, pss_ctl);
817 pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
818 bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl);
821 static void
822 bfa_ioc_lpu_start(struct bfa_ioc_s *ioc)
824 u32 pss_ctl;
827 * Take processor out of reset.
829 pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg);
830 pss_ctl &= ~__PSS_LPU0_RESET;
832 bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl);
835 static void
836 bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc)
838 u32 pss_ctl;
841 * Put processors in reset.
843 pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg);
844 pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
846 bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl);
850 * Get driver and firmware versions.
852 void
853 bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
855 u32 pgnum, pgoff;
856 u32 loff = 0;
857 int i;
858 u32 *fwsig = (u32 *) fwhdr;
860 pgnum = bfa_ioc_smem_pgnum(ioc, loff);
861 pgoff = bfa_ioc_smem_pgoff(ioc, loff);
862 bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
864 for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32));
865 i++) {
866 fwsig[i] = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
867 loff += sizeof(u32);
872 * Returns TRUE if same.
874 bfa_boolean_t
875 bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
877 struct bfi_ioc_image_hdr_s *drv_fwhdr;
878 int i;
880 drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
881 bfi_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
883 for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
884 if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i]) {
885 bfa_trc(ioc, i);
886 bfa_trc(ioc, fwhdr->md5sum[i]);
887 bfa_trc(ioc, drv_fwhdr->md5sum[i]);
888 return BFA_FALSE;
892 bfa_trc(ioc, fwhdr->md5sum[0]);
893 return BFA_TRUE;
897 * Return true if current running version is valid. Firmware signature and
898 * execution context (driver/bios) must match.
900 static bfa_boolean_t
901 bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc)
903 struct bfi_ioc_image_hdr_s fwhdr, *drv_fwhdr;
906 * If bios/efi boot (flash based) -- return true
908 if (bfa_ioc_is_optrom(ioc))
909 return BFA_TRUE;
911 bfa_ioc_fwver_get(ioc, &fwhdr);
912 drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
913 bfi_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
916 if (fwhdr.signature != drv_fwhdr->signature) {
917 bfa_trc(ioc, fwhdr.signature);
918 bfa_trc(ioc, drv_fwhdr->signature);
919 return BFA_FALSE;
922 if (fwhdr.exec != drv_fwhdr->exec) {
923 bfa_trc(ioc, fwhdr.exec);
924 bfa_trc(ioc, drv_fwhdr->exec);
925 return BFA_FALSE;
928 return bfa_ioc_fwver_cmp(ioc, &fwhdr);
932 * Conditionally flush any pending message from firmware at start.
934 static void
935 bfa_ioc_msgflush(struct bfa_ioc_s *ioc)
937 u32 r32;
939 r32 = bfa_reg_read(ioc->ioc_regs.lpu_mbox_cmd);
940 if (r32)
941 bfa_reg_write(ioc->ioc_regs.lpu_mbox_cmd, 1);
945 static void
946 bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
948 enum bfi_ioc_state ioc_fwstate;
949 bfa_boolean_t fwvalid;
951 ioc_fwstate = bfa_reg_read(ioc->ioc_regs.ioc_fwstate);
953 if (force)
954 ioc_fwstate = BFI_IOC_UNINIT;
956 bfa_trc(ioc, ioc_fwstate);
959 * check if firmware is valid
961 fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
962 BFA_FALSE : bfa_ioc_fwver_valid(ioc);
964 if (!fwvalid) {
965 bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id);
966 return;
970 * If hardware initialization is in progress (initialized by other IOC),
971 * just wait for an initialization completion interrupt.
973 if (ioc_fwstate == BFI_IOC_INITING) {
974 bfa_trc(ioc, ioc_fwstate);
975 ioc->cbfn->reset_cbfn(ioc->bfa);
976 return;
980 * If IOC function is disabled and firmware version is same,
981 * just re-enable IOC.
983 * If option rom, IOC must not be in operational state. With
984 * convergence, IOC will be in operational state when 2nd driver
985 * is loaded.
987 if (ioc_fwstate == BFI_IOC_DISABLED ||
988 (!bfa_ioc_is_optrom(ioc) && ioc_fwstate == BFI_IOC_OP)) {
989 bfa_trc(ioc, ioc_fwstate);
992 * When using MSI-X any pending firmware ready event should
993 * be flushed. Otherwise MSI-X interrupts are not delivered.
995 bfa_ioc_msgflush(ioc);
996 ioc->cbfn->reset_cbfn(ioc->bfa);
997 bfa_fsm_send_event(ioc, IOC_E_FWREADY);
998 return;
1002 * Initialize the h/w for any other states.
1004 bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id);
1007 static void
1008 bfa_ioc_timeout(void *ioc_arg)
1010 struct bfa_ioc_s *ioc = (struct bfa_ioc_s *)ioc_arg;
1012 bfa_trc(ioc, 0);
1013 bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
1016 void
1017 bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len)
1019 u32 *msgp = (u32 *) ioc_msg;
1020 u32 i;
1022 bfa_trc(ioc, msgp[0]);
1023 bfa_trc(ioc, len);
1025 bfa_assert(len <= BFI_IOC_MSGLEN_MAX);
1028 * first write msg to mailbox registers
1030 for (i = 0; i < len / sizeof(u32); i++)
1031 bfa_reg_write(ioc->ioc_regs.hfn_mbox + i * sizeof(u32),
1032 bfa_os_wtole(msgp[i]));
1034 for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
1035 bfa_reg_write(ioc->ioc_regs.hfn_mbox + i * sizeof(u32), 0);
1038 * write 1 to mailbox CMD to trigger LPU event
1040 bfa_reg_write(ioc->ioc_regs.hfn_mbox_cmd, 1);
1041 (void)bfa_reg_read(ioc->ioc_regs.hfn_mbox_cmd);
1044 static void
1045 bfa_ioc_send_enable(struct bfa_ioc_s *ioc)
1047 struct bfi_ioc_ctrl_req_s enable_req;
1049 bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
1050 bfa_ioc_portid(ioc));
1051 enable_req.ioc_class = ioc->ioc_mc;
1052 bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s));
1055 static void
1056 bfa_ioc_send_disable(struct bfa_ioc_s *ioc)
1058 struct bfi_ioc_ctrl_req_s disable_req;
1060 bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
1061 bfa_ioc_portid(ioc));
1062 bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req_s));
1065 static void
1066 bfa_ioc_send_getattr(struct bfa_ioc_s *ioc)
1068 struct bfi_ioc_getattr_req_s attr_req;
1070 bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
1071 bfa_ioc_portid(ioc));
1072 bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
1073 bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
1076 static void
1077 bfa_ioc_hb_check(void *cbarg)
1079 struct bfa_ioc_s *ioc = cbarg;
1080 u32 hb_count;
1082 hb_count = bfa_reg_read(ioc->ioc_regs.heartbeat);
1083 if (ioc->hb_count == hb_count) {
1084 bfa_log(ioc->logm, BFA_LOG_HAL_HEARTBEAT_FAILURE,
1085 hb_count);
1086 bfa_ioc_recover(ioc);
1087 return;
1088 } else {
1089 ioc->hb_count = hb_count;
1092 bfa_ioc_mbox_poll(ioc);
1093 bfa_timer_begin(ioc->timer_mod, &ioc->ioc_timer, bfa_ioc_hb_check,
1094 ioc, BFA_IOC_HB_TOV);
1097 static void
1098 bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc)
1100 ioc->hb_count = bfa_reg_read(ioc->ioc_regs.heartbeat);
1101 bfa_timer_begin(ioc->timer_mod, &ioc->ioc_timer, bfa_ioc_hb_check, ioc,
1102 BFA_IOC_HB_TOV);
1105 static void
1106 bfa_ioc_hb_stop(struct bfa_ioc_s *ioc)
1108 bfa_timer_stop(&ioc->ioc_timer);
1112 * Initiate a full firmware download.
1114 static void
1115 bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
1116 u32 boot_param)
1118 u32 *fwimg;
1119 u32 pgnum, pgoff;
1120 u32 loff = 0;
1121 u32 chunkno = 0;
1122 u32 i;
1125 * Initialize LMEM first before code download
1127 bfa_ioc_lmem_init(ioc);
1130 * Flash based firmware boot
1132 bfa_trc(ioc, bfi_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)));
1133 if (bfa_ioc_is_optrom(ioc))
1134 boot_type = BFI_BOOT_TYPE_FLASH;
1135 fwimg = bfi_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), chunkno);
1138 pgnum = bfa_ioc_smem_pgnum(ioc, loff);
1139 pgoff = bfa_ioc_smem_pgoff(ioc, loff);
1141 bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
1143 for (i = 0; i < bfi_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)); i++) {
1145 if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
1146 chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
1147 fwimg = bfi_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc),
1148 BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
1152 * write smem
1154 bfa_mem_write(ioc->ioc_regs.smem_page_start, loff,
1155 fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)]);
1157 loff += sizeof(u32);
1160 * handle page offset wrap around
1162 loff = PSS_SMEM_PGOFF(loff);
1163 if (loff == 0) {
1164 pgnum++;
1165 bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
1169 bfa_reg_write(ioc->ioc_regs.host_page_num_fn,
1170 bfa_ioc_smem_pgnum(ioc, 0));
1173 * Set boot type and boot param at the end.
1175 bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_TYPE_OFF,
1176 bfa_os_swap32(boot_type));
1177 bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_PARAM_OFF,
1178 bfa_os_swap32(boot_param));
1181 static void
1182 bfa_ioc_reset(struct bfa_ioc_s *ioc, bfa_boolean_t force)
1184 bfa_ioc_hwinit(ioc, force);
1188 * Update BFA configuration from firmware configuration.
1190 static void
1191 bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc)
1193 struct bfi_ioc_attr_s *attr = ioc->attr;
1195 attr->adapter_prop = bfa_os_ntohl(attr->adapter_prop);
1196 attr->card_type = bfa_os_ntohl(attr->card_type);
1197 attr->maxfrsize = bfa_os_ntohs(attr->maxfrsize);
1199 bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
1203 * Attach time initialization of mbox logic.
1205 static void
1206 bfa_ioc_mbox_attach(struct bfa_ioc_s *ioc)
1208 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1209 int mc;
1211 INIT_LIST_HEAD(&mod->cmd_q);
1212 for (mc = 0; mc < BFI_MC_MAX; mc++) {
1213 mod->mbhdlr[mc].cbfn = NULL;
1214 mod->mbhdlr[mc].cbarg = ioc->bfa;
1219 * Mbox poll timer -- restarts any pending mailbox requests.
1221 static void
1222 bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc)
1224 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1225 struct bfa_mbox_cmd_s *cmd;
1226 u32 stat;
1229 * If no command pending, do nothing
1231 if (list_empty(&mod->cmd_q))
1232 return;
1235 * If previous command is not yet fetched by firmware, do nothing
1237 stat = bfa_reg_read(ioc->ioc_regs.hfn_mbox_cmd);
1238 if (stat)
1239 return;
1242 * Enqueue command to firmware.
1244 bfa_q_deq(&mod->cmd_q, &cmd);
1245 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
1249 * Cleanup any pending requests.
1251 static void
1252 bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc)
1254 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1255 struct bfa_mbox_cmd_s *cmd;
1257 while (!list_empty(&mod->cmd_q))
1258 bfa_q_deq(&mod->cmd_q, &cmd);
1262 * bfa_ioc_public
1266 * Interface used by diag module to do firmware boot with memory test
1267 * as the entry vector.
1269 void
1270 bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_param)
1272 bfa_os_addr_t rb;
1274 bfa_ioc_stats(ioc, ioc_boots);
1276 if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
1277 return;
1280 * Initialize IOC state of all functions on a chip reset.
1282 rb = ioc->pcidev.pci_bar_kva;
1283 if (boot_param == BFI_BOOT_TYPE_MEMTEST) {
1284 bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_MEMTEST);
1285 bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_MEMTEST);
1286 } else {
1287 bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_INITING);
1288 bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_INITING);
1291 bfa_ioc_msgflush(ioc);
1292 bfa_ioc_download_fw(ioc, boot_type, boot_param);
1295 * Enable interrupts just before starting LPU
1297 ioc->cbfn->reset_cbfn(ioc->bfa);
1298 bfa_ioc_lpu_start(ioc);
1302 * Enable/disable IOC failure auto recovery.
1304 void
1305 bfa_ioc_auto_recover(bfa_boolean_t auto_recover)
1307 bfa_auto_recover = auto_recover;
1311 bfa_boolean_t
1312 bfa_ioc_is_operational(struct bfa_ioc_s *ioc)
1314 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
1317 void
1318 bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg)
1320 u32 *msgp = mbmsg;
1321 u32 r32;
1322 int i;
1325 * read the MBOX msg
1327 for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
1328 i++) {
1329 r32 = bfa_reg_read(ioc->ioc_regs.lpu_mbox +
1330 i * sizeof(u32));
1331 msgp[i] = bfa_os_htonl(r32);
1335 * turn off mailbox interrupt by clearing mailbox status
1337 bfa_reg_write(ioc->ioc_regs.lpu_mbox_cmd, 1);
1338 bfa_reg_read(ioc->ioc_regs.lpu_mbox_cmd);
1341 void
1342 bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m)
1344 union bfi_ioc_i2h_msg_u *msg;
1346 msg = (union bfi_ioc_i2h_msg_u *)m;
1348 bfa_ioc_stats(ioc, ioc_isrs);
1350 switch (msg->mh.msg_id) {
1351 case BFI_IOC_I2H_HBEAT:
1352 break;
1354 case BFI_IOC_I2H_READY_EVENT:
1355 bfa_fsm_send_event(ioc, IOC_E_FWREADY);
1356 break;
1358 case BFI_IOC_I2H_ENABLE_REPLY:
1359 bfa_fsm_send_event(ioc, IOC_E_FWRSP_ENABLE);
1360 break;
1362 case BFI_IOC_I2H_DISABLE_REPLY:
1363 bfa_fsm_send_event(ioc, IOC_E_FWRSP_DISABLE);
1364 break;
1366 case BFI_IOC_I2H_GETATTR_REPLY:
1367 bfa_ioc_getattr_reply(ioc);
1368 break;
1370 default:
1371 bfa_trc(ioc, msg->mh.msg_id);
1372 bfa_assert(0);
1377 * IOC attach time initialization and setup.
1379 * @param[in] ioc memory for IOC
1380 * @param[in] bfa driver instance structure
1381 * @param[in] trcmod kernel trace module
1382 * @param[in] aen kernel aen event module
1383 * @param[in] logm kernel logging module
1385 void
1386 bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, struct bfa_ioc_cbfn_s *cbfn,
1387 struct bfa_timer_mod_s *timer_mod, struct bfa_trc_mod_s *trcmod,
1388 struct bfa_aen_s *aen, struct bfa_log_mod_s *logm)
1390 ioc->bfa = bfa;
1391 ioc->cbfn = cbfn;
1392 ioc->timer_mod = timer_mod;
1393 ioc->trcmod = trcmod;
1394 ioc->aen = aen;
1395 ioc->logm = logm;
1396 ioc->fcmode = BFA_FALSE;
1397 ioc->pllinit = BFA_FALSE;
1398 ioc->dbg_fwsave_once = BFA_TRUE;
1400 bfa_ioc_mbox_attach(ioc);
1401 INIT_LIST_HEAD(&ioc->hb_notify_q);
1403 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
1407 * Driver detach time IOC cleanup.
1409 void
1410 bfa_ioc_detach(struct bfa_ioc_s *ioc)
1412 bfa_fsm_send_event(ioc, IOC_E_DETACH);
1416 * Setup IOC PCI properties.
1418 * @param[in] pcidev PCI device information for this IOC
1420 void
1421 bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
1422 enum bfi_mclass mc)
1424 ioc->ioc_mc = mc;
1425 ioc->pcidev = *pcidev;
1426 ioc->ctdev = bfa_asic_id_ct(ioc->pcidev.device_id);
1427 ioc->cna = ioc->ctdev && !ioc->fcmode;
1430 * Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c
1432 if (ioc->ctdev)
1433 bfa_ioc_set_ct_hwif(ioc);
1434 else
1435 bfa_ioc_set_cb_hwif(ioc);
1437 bfa_ioc_map_port(ioc);
1438 bfa_ioc_reg_init(ioc);
1442 * Initialize IOC dma memory
1444 * @param[in] dm_kva kernel virtual address of IOC dma memory
1445 * @param[in] dm_pa physical address of IOC dma memory
1447 void
1448 bfa_ioc_mem_claim(struct bfa_ioc_s *ioc, u8 *dm_kva, u64 dm_pa)
1451 * dma memory for firmware attribute
1453 ioc->attr_dma.kva = dm_kva;
1454 ioc->attr_dma.pa = dm_pa;
1455 ioc->attr = (struct bfi_ioc_attr_s *)dm_kva;
1459 * Return size of dma memory required.
1462 bfa_ioc_meminfo(void)
1464 return BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ);
1467 void
1468 bfa_ioc_enable(struct bfa_ioc_s *ioc)
1470 bfa_ioc_stats(ioc, ioc_enables);
1471 ioc->dbg_fwsave_once = BFA_TRUE;
1473 bfa_fsm_send_event(ioc, IOC_E_ENABLE);
1476 void
1477 bfa_ioc_disable(struct bfa_ioc_s *ioc)
1479 bfa_ioc_stats(ioc, ioc_disables);
1480 bfa_fsm_send_event(ioc, IOC_E_DISABLE);
1484 * Returns memory required for saving firmware trace in case of crash.
1485 * Driver must call this interface to allocate memory required for
1486 * automatic saving of firmware trace. Driver should call
1487 * bfa_ioc_debug_memclaim() right after bfa_ioc_attach() to setup this
1488 * trace memory.
1491 bfa_ioc_debug_trcsz(bfa_boolean_t auto_recover)
1493 return (auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
1497 * Initialize memory for saving firmware trace. Driver must initialize
1498 * trace memory before call bfa_ioc_enable().
1500 void
1501 bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave)
1503 ioc->dbg_fwsave = dbg_fwsave;
1504 ioc->dbg_fwsave_len = bfa_ioc_debug_trcsz(ioc->auto_recover);
1508 bfa_ioc_smem_pgnum(struct bfa_ioc_s *ioc, u32 fmaddr)
1510 return PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, fmaddr);
1514 bfa_ioc_smem_pgoff(struct bfa_ioc_s *ioc, u32 fmaddr)
1516 return PSS_SMEM_PGOFF(fmaddr);
1520 * Register mailbox message handler functions
1522 * @param[in] ioc IOC instance
1523 * @param[in] mcfuncs message class handler functions
1525 void
1526 bfa_ioc_mbox_register(struct bfa_ioc_s *ioc, bfa_ioc_mbox_mcfunc_t *mcfuncs)
1528 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1529 int mc;
1531 for (mc = 0; mc < BFI_MC_MAX; mc++)
1532 mod->mbhdlr[mc].cbfn = mcfuncs[mc];
1536 * Register mailbox message handler function, to be called by common modules
1538 void
1539 bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
1540 bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
1542 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1544 mod->mbhdlr[mc].cbfn = cbfn;
1545 mod->mbhdlr[mc].cbarg = cbarg;
1549 * Queue a mailbox command request to firmware. Waits if mailbox is busy.
1550 * Responsibility of caller to serialize
1552 * @param[in] ioc IOC instance
1553 * @param[i] cmd Mailbox command
1555 void
1556 bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd)
1558 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1559 u32 stat;
1562 * If a previous command is pending, queue new command
1564 if (!list_empty(&mod->cmd_q)) {
1565 list_add_tail(&cmd->qe, &mod->cmd_q);
1566 return;
1570 * If mailbox is busy, queue command for poll timer
1572 stat = bfa_reg_read(ioc->ioc_regs.hfn_mbox_cmd);
1573 if (stat) {
1574 list_add_tail(&cmd->qe, &mod->cmd_q);
1575 return;
1579 * mailbox is free -- queue command to firmware
1581 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
1585 * Handle mailbox interrupts
1587 void
1588 bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc)
1590 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1591 struct bfi_mbmsg_s m;
1592 int mc;
1594 bfa_ioc_msgget(ioc, &m);
1597 * Treat IOC message class as special.
1599 mc = m.mh.msg_class;
1600 if (mc == BFI_MC_IOC) {
1601 bfa_ioc_isr(ioc, &m);
1602 return;
1605 if ((mc > BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
1606 return;
1608 mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
1611 void
1612 bfa_ioc_error_isr(struct bfa_ioc_s *ioc)
1614 bfa_fsm_send_event(ioc, IOC_E_HWERROR);
1617 void
1618 bfa_ioc_set_fcmode(struct bfa_ioc_s *ioc)
1620 ioc->fcmode = BFA_TRUE;
1621 ioc->port_id = bfa_ioc_pcifn(ioc);
1624 #ifndef BFA_BIOS_BUILD
1627 * return true if IOC is disabled
1629 bfa_boolean_t
1630 bfa_ioc_is_disabled(struct bfa_ioc_s *ioc)
1632 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling)
1633 || bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
1637 * return true if IOC firmware is different.
1639 bfa_boolean_t
1640 bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc)
1642 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_reset)
1643 || bfa_fsm_cmp_state(ioc, bfa_ioc_sm_fwcheck)
1644 || bfa_fsm_cmp_state(ioc, bfa_ioc_sm_mismatch);
1647 #define bfa_ioc_state_disabled(__sm) \
1648 (((__sm) == BFI_IOC_UNINIT) || \
1649 ((__sm) == BFI_IOC_INITING) || \
1650 ((__sm) == BFI_IOC_HWINIT) || \
1651 ((__sm) == BFI_IOC_DISABLED) || \
1652 ((__sm) == BFI_IOC_FAIL) || \
1653 ((__sm) == BFI_IOC_CFG_DISABLED))
1656 * Check if adapter is disabled -- both IOCs should be in a disabled
1657 * state.
1659 bfa_boolean_t
1660 bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc)
1662 u32 ioc_state;
1663 bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva;
1665 if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled))
1666 return BFA_FALSE;
1668 ioc_state = bfa_reg_read(rb + BFA_IOC0_STATE_REG);
1669 if (!bfa_ioc_state_disabled(ioc_state))
1670 return BFA_FALSE;
1672 ioc_state = bfa_reg_read(rb + BFA_IOC1_STATE_REG);
1673 if (!bfa_ioc_state_disabled(ioc_state))
1674 return BFA_FALSE;
1676 return BFA_TRUE;
1680 * Add to IOC heartbeat failure notification queue. To be used by common
1681 * modules such as
1683 void
1684 bfa_ioc_hbfail_register(struct bfa_ioc_s *ioc,
1685 struct bfa_ioc_hbfail_notify_s *notify)
1687 list_add_tail(&notify->qe, &ioc->hb_notify_q);
1690 #define BFA_MFG_NAME "Brocade"
1691 void
1692 bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
1693 struct bfa_adapter_attr_s *ad_attr)
1695 struct bfi_ioc_attr_s *ioc_attr;
1697 ioc_attr = ioc->attr;
1699 bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
1700 bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
1701 bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
1702 bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
1703 bfa_os_memcpy(&ad_attr->vpd, &ioc_attr->vpd,
1704 sizeof(struct bfa_mfg_vpd_s));
1706 ad_attr->nports = bfa_ioc_get_nports(ioc);
1707 ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
1709 bfa_ioc_get_adapter_model(ioc, ad_attr->model);
1710 /* For now, model descr uses same model string */
1711 bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
1713 ad_attr->card_type = ioc_attr->card_type;
1714 ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type);
1716 if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
1717 ad_attr->prototype = 1;
1718 else
1719 ad_attr->prototype = 0;
1721 ad_attr->pwwn = bfa_ioc_get_pwwn(ioc);
1722 ad_attr->mac = bfa_ioc_get_mac(ioc);
1724 ad_attr->pcie_gen = ioc_attr->pcie_gen;
1725 ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
1726 ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
1727 ad_attr->asic_rev = ioc_attr->asic_rev;
1729 bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
1731 ad_attr->cna_capable = ioc->cna;
1734 enum bfa_ioc_type_e
1735 bfa_ioc_get_type(struct bfa_ioc_s *ioc)
1737 if (!ioc->ctdev || ioc->fcmode)
1738 return BFA_IOC_TYPE_FC;
1739 else if (ioc->ioc_mc == BFI_MC_IOCFC)
1740 return BFA_IOC_TYPE_FCoE;
1741 else if (ioc->ioc_mc == BFI_MC_LL)
1742 return BFA_IOC_TYPE_LL;
1743 else {
1744 bfa_assert(ioc->ioc_mc == BFI_MC_LL);
1745 return BFA_IOC_TYPE_LL;
1749 void
1750 bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num)
1752 bfa_os_memset((void *)serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
1753 bfa_os_memcpy((void *)serial_num,
1754 (void *)ioc->attr->brcd_serialnum,
1755 BFA_ADAPTER_SERIAL_NUM_LEN);
1758 void
1759 bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver)
1761 bfa_os_memset((void *)fw_ver, 0, BFA_VERSION_LEN);
1762 bfa_os_memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
1765 void
1766 bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev)
1768 bfa_assert(chip_rev);
1770 bfa_os_memset((void *)chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
1772 chip_rev[0] = 'R';
1773 chip_rev[1] = 'e';
1774 chip_rev[2] = 'v';
1775 chip_rev[3] = '-';
1776 chip_rev[4] = ioc->attr->asic_rev;
1777 chip_rev[5] = '\0';
1780 void
1781 bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver)
1783 bfa_os_memset((void *)optrom_ver, 0, BFA_VERSION_LEN);
1784 bfa_os_memcpy(optrom_ver, ioc->attr->optrom_version,
1785 BFA_VERSION_LEN);
1788 void
1789 bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer)
1791 bfa_os_memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
1792 bfa_os_memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
1795 void
1796 bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model)
1798 struct bfi_ioc_attr_s *ioc_attr;
1800 bfa_assert(model);
1801 bfa_os_memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
1803 ioc_attr = ioc->attr;
1806 * model name
1808 snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
1809 BFA_MFG_NAME, ioc_attr->card_type);
1812 enum bfa_ioc_state
1813 bfa_ioc_get_state(struct bfa_ioc_s *ioc)
1815 return bfa_sm_to_state(ioc_sm_table, ioc->fsm);
1818 void
1819 bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr)
1821 bfa_os_memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr_s));
1823 ioc_attr->state = bfa_ioc_get_state(ioc);
1824 ioc_attr->port_id = ioc->port_id;
1826 ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
1828 bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
1830 ioc_attr->pci_attr.device_id = ioc->pcidev.device_id;
1831 ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func;
1832 bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
1836 * bfa_wwn_public
1838 wwn_t
1839 bfa_ioc_get_pwwn(struct bfa_ioc_s *ioc)
1841 return ioc->attr->pwwn;
1844 wwn_t
1845 bfa_ioc_get_nwwn(struct bfa_ioc_s *ioc)
1847 return ioc->attr->nwwn;
1851 bfa_ioc_get_adid(struct bfa_ioc_s *ioc)
1853 return ioc->attr->mfg_pwwn;
1856 mac_t
1857 bfa_ioc_get_mac(struct bfa_ioc_s *ioc)
1860 * Currently mfg mac is used as FCoE enode mac (not configured by PBC)
1862 if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_FCoE)
1863 return bfa_ioc_get_mfg_mac(ioc);
1864 else
1865 return ioc->attr->mac;
1868 wwn_t
1869 bfa_ioc_get_mfg_pwwn(struct bfa_ioc_s *ioc)
1871 return ioc->attr->mfg_pwwn;
1874 wwn_t
1875 bfa_ioc_get_mfg_nwwn(struct bfa_ioc_s *ioc)
1877 return ioc->attr->mfg_nwwn;
1880 mac_t
1881 bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc)
1883 mac_t mac;
1885 mac = ioc->attr->mfg_mac;
1886 mac.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc);
1888 return mac;
1891 bfa_boolean_t
1892 bfa_ioc_get_fcmode(struct bfa_ioc_s *ioc)
1894 return ioc->fcmode || !bfa_asic_id_ct(ioc->pcidev.device_id);
1898 * Send AEN notification
1900 void
1901 bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event)
1903 union bfa_aen_data_u aen_data;
1904 struct bfa_log_mod_s *logmod = ioc->logm;
1905 s32 inst_num = 0;
1906 enum bfa_ioc_type_e ioc_type;
1908 bfa_log(logmod, BFA_LOG_CREATE_ID(BFA_AEN_CAT_IOC, event), inst_num);
1910 memset(&aen_data.ioc.pwwn, 0, sizeof(aen_data.ioc.pwwn));
1911 memset(&aen_data.ioc.mac, 0, sizeof(aen_data.ioc.mac));
1912 ioc_type = bfa_ioc_get_type(ioc);
1913 switch (ioc_type) {
1914 case BFA_IOC_TYPE_FC:
1915 aen_data.ioc.pwwn = bfa_ioc_get_pwwn(ioc);
1916 break;
1917 case BFA_IOC_TYPE_FCoE:
1918 aen_data.ioc.pwwn = bfa_ioc_get_pwwn(ioc);
1919 aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
1920 break;
1921 case BFA_IOC_TYPE_LL:
1922 aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
1923 break;
1924 default:
1925 bfa_assert(ioc_type == BFA_IOC_TYPE_FC);
1926 break;
1928 aen_data.ioc.ioc_type = ioc_type;
1932 * Retrieve saved firmware trace from a prior IOC failure.
1934 bfa_status_t
1935 bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
1937 int tlen;
1939 if (ioc->dbg_fwsave_len == 0)
1940 return BFA_STATUS_ENOFSAVE;
1942 tlen = *trclen;
1943 if (tlen > ioc->dbg_fwsave_len)
1944 tlen = ioc->dbg_fwsave_len;
1946 bfa_os_memcpy(trcdata, ioc->dbg_fwsave, tlen);
1947 *trclen = tlen;
1948 return BFA_STATUS_OK;
1952 * Clear saved firmware trace
1954 void
1955 bfa_ioc_debug_fwsave_clear(struct bfa_ioc_s *ioc)
1957 ioc->dbg_fwsave_once = BFA_TRUE;
1961 * Retrieve saved firmware trace from a prior IOC failure.
1963 bfa_status_t
1964 bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
1966 u32 pgnum;
1967 u32 loff = BFA_DBG_FWTRC_OFF(bfa_ioc_portid(ioc));
1968 int i, tlen;
1969 u32 *tbuf = trcdata, r32;
1971 bfa_trc(ioc, *trclen);
1973 pgnum = bfa_ioc_smem_pgnum(ioc, loff);
1974 loff = bfa_ioc_smem_pgoff(ioc, loff);
1977 * Hold semaphore to serialize pll init and fwtrc.
1979 if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg))
1980 return BFA_STATUS_FAILED;
1982 bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
1984 tlen = *trclen;
1985 if (tlen > BFA_DBG_FWTRC_LEN)
1986 tlen = BFA_DBG_FWTRC_LEN;
1987 tlen /= sizeof(u32);
1989 bfa_trc(ioc, tlen);
1991 for (i = 0; i < tlen; i++) {
1992 r32 = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
1993 tbuf[i] = bfa_os_ntohl(r32);
1994 loff += sizeof(u32);
1997 * handle page offset wrap around
1999 loff = PSS_SMEM_PGOFF(loff);
2000 if (loff == 0) {
2001 pgnum++;
2002 bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
2005 bfa_reg_write(ioc->ioc_regs.host_page_num_fn,
2006 bfa_ioc_smem_pgnum(ioc, 0));
2009 * release semaphore.
2011 bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
2013 bfa_trc(ioc, pgnum);
2015 *trclen = tlen * sizeof(u32);
2016 return BFA_STATUS_OK;
2020 * Save firmware trace if configured.
2022 static void
2023 bfa_ioc_debug_save(struct bfa_ioc_s *ioc)
2025 int tlen;
2027 if (ioc->dbg_fwsave_len) {
2028 tlen = ioc->dbg_fwsave_len;
2029 bfa_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen);
2034 * Firmware failure detected. Start recovery actions.
2036 static void
2037 bfa_ioc_recover(struct bfa_ioc_s *ioc)
2039 if (ioc->dbg_fwsave_once) {
2040 ioc->dbg_fwsave_once = BFA_FALSE;
2041 bfa_ioc_debug_save(ioc);
2044 bfa_ioc_stats(ioc, ioc_hbfails);
2045 bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
2048 static void
2049 bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc)
2051 if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL)
2052 return;
2054 if (ioc->attr->nwwn == 0)
2055 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_INVALID_NWWN);
2056 if (ioc->attr->pwwn == 0)
2057 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_INVALID_PWWN);
2060 #endif