ARM: 6246/1: mmci: support larger MMCIDATALENGTH register
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / scsi / bfa / bfa_ioc.c
blobe038bc9769f63dc51317909b613fc3bd6791b316
1 /*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
18 #include <bfa.h>
19 #include <bfa_ioc.h>
20 #include <bfa_fwimg_priv.h>
21 #include <cna/bfa_cna_trcmod.h>
22 #include <cs/bfa_debug.h>
23 #include <bfi/bfi_ioc.h>
24 #include <bfi/bfi_ctreg.h>
25 #include <aen/bfa_aen_ioc.h>
26 #include <aen/bfa_aen.h>
27 #include <log/bfa_log_hal.h>
28 #include <defs/bfa_defs_pci.h>
30 BFA_TRC_FILE(CNA, IOC);
32 /**
33 * IOC local definitions
35 #define BFA_IOC_TOV 2000 /* msecs */
36 #define BFA_IOC_HWSEM_TOV 500 /* msecs */
37 #define BFA_IOC_HB_TOV 500 /* msecs */
38 #define BFA_IOC_HWINIT_MAX 2
39 #define BFA_IOC_FWIMG_MINSZ (16 * 1024)
40 #define BFA_IOC_TOV_RECOVER BFA_IOC_HB_TOV
42 #define bfa_ioc_timer_start(__ioc) \
43 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
44 bfa_ioc_timeout, (__ioc), BFA_IOC_TOV)
45 #define bfa_ioc_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer)
47 #define BFA_DBG_FWTRC_ENTS (BFI_IOC_TRC_ENTS)
48 #define BFA_DBG_FWTRC_LEN \
49 (BFA_DBG_FWTRC_ENTS * sizeof(struct bfa_trc_s) + \
50 (sizeof(struct bfa_trc_mod_s) - \
51 BFA_TRC_MAX * sizeof(struct bfa_trc_s)))
52 #define BFA_DBG_FWTRC_OFF(_fn) (BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn))
54 /**
55 * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
58 #define bfa_ioc_firmware_lock(__ioc) \
59 ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
60 #define bfa_ioc_firmware_unlock(__ioc) \
61 ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
62 #define bfa_ioc_fwimg_get_chunk(__ioc, __off) \
63 ((__ioc)->ioc_hwif->ioc_fwimg_get_chunk(__ioc, __off))
64 #define bfa_ioc_fwimg_get_size(__ioc) \
65 ((__ioc)->ioc_hwif->ioc_fwimg_get_size(__ioc))
66 #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
67 #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
68 #define bfa_ioc_notify_hbfail(__ioc) \
69 ((__ioc)->ioc_hwif->ioc_notify_hbfail(__ioc))
71 bfa_boolean_t bfa_auto_recover = BFA_TRUE;
74 * forward declarations
76 static void bfa_ioc_aen_post(struct bfa_ioc_s *bfa,
77 enum bfa_ioc_aen_event event);
78 static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc);
79 static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s *ioc);
80 static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force);
81 static void bfa_ioc_timeout(void *ioc);
82 static void bfa_ioc_send_enable(struct bfa_ioc_s *ioc);
83 static void bfa_ioc_send_disable(struct bfa_ioc_s *ioc);
84 static void bfa_ioc_send_getattr(struct bfa_ioc_s *ioc);
85 static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc);
86 static void bfa_ioc_hb_stop(struct bfa_ioc_s *ioc);
87 static void bfa_ioc_reset(struct bfa_ioc_s *ioc, bfa_boolean_t force);
88 static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc);
89 static void bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc);
90 static void bfa_ioc_recover(struct bfa_ioc_s *ioc);
91 static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc);
92 static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc);
94 /**
95 * bfa_ioc_sm
98 /**
99 * IOC state machine events
101 enum ioc_event {
102 IOC_E_ENABLE = 1, /* IOC enable request */
103 IOC_E_DISABLE = 2, /* IOC disable request */
104 IOC_E_TIMEOUT = 3, /* f/w response timeout */
105 IOC_E_FWREADY = 4, /* f/w initialization done */
106 IOC_E_FWRSP_GETATTR = 5, /* IOC get attribute response */
107 IOC_E_FWRSP_ENABLE = 6, /* enable f/w response */
108 IOC_E_FWRSP_DISABLE = 7, /* disable f/w response */
109 IOC_E_HBFAIL = 8, /* heartbeat failure */
110 IOC_E_HWERROR = 9, /* hardware error interrupt */
111 IOC_E_SEMLOCKED = 10, /* h/w semaphore is locked */
112 IOC_E_DETACH = 11, /* driver detach cleanup */
115 bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc_s, enum ioc_event);
116 bfa_fsm_state_decl(bfa_ioc, fwcheck, struct bfa_ioc_s, enum ioc_event);
117 bfa_fsm_state_decl(bfa_ioc, mismatch, struct bfa_ioc_s, enum ioc_event);
118 bfa_fsm_state_decl(bfa_ioc, semwait, struct bfa_ioc_s, enum ioc_event);
119 bfa_fsm_state_decl(bfa_ioc, hwinit, struct bfa_ioc_s, enum ioc_event);
120 bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc_s, enum ioc_event);
121 bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc_s, enum ioc_event);
122 bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc_s, enum ioc_event);
123 bfa_fsm_state_decl(bfa_ioc, initfail, struct bfa_ioc_s, enum ioc_event);
124 bfa_fsm_state_decl(bfa_ioc, hbfail, struct bfa_ioc_s, enum ioc_event);
125 bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event);
126 bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event);
128 static struct bfa_sm_table_s ioc_sm_table[] = {
129 {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
130 {BFA_SM(bfa_ioc_sm_fwcheck), BFA_IOC_FWMISMATCH},
131 {BFA_SM(bfa_ioc_sm_mismatch), BFA_IOC_FWMISMATCH},
132 {BFA_SM(bfa_ioc_sm_semwait), BFA_IOC_SEMWAIT},
133 {BFA_SM(bfa_ioc_sm_hwinit), BFA_IOC_HWINIT},
134 {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_HWINIT},
135 {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
136 {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
137 {BFA_SM(bfa_ioc_sm_initfail), BFA_IOC_INITFAIL},
138 {BFA_SM(bfa_ioc_sm_hbfail), BFA_IOC_HBFAIL},
139 {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
140 {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
144 * Reset entry actions -- initialize state machine
146 static void
147 bfa_ioc_sm_reset_entry(struct bfa_ioc_s *ioc)
149 ioc->retry_count = 0;
150 ioc->auto_recover = bfa_auto_recover;
154 * Beginning state. IOC is in reset state.
156 static void
157 bfa_ioc_sm_reset(struct bfa_ioc_s *ioc, enum ioc_event event)
159 bfa_trc(ioc, event);
161 switch (event) {
162 case IOC_E_ENABLE:
163 bfa_fsm_set_state(ioc, bfa_ioc_sm_fwcheck);
164 break;
166 case IOC_E_DISABLE:
167 bfa_ioc_disable_comp(ioc);
168 break;
170 case IOC_E_DETACH:
171 break;
173 default:
174 bfa_sm_fault(ioc, event);
179 * Semaphore should be acquired for version check.
181 static void
182 bfa_ioc_sm_fwcheck_entry(struct bfa_ioc_s *ioc)
184 bfa_ioc_hw_sem_get(ioc);
188 * Awaiting h/w semaphore to continue with version check.
190 static void
191 bfa_ioc_sm_fwcheck(struct bfa_ioc_s *ioc, enum ioc_event event)
193 bfa_trc(ioc, event);
195 switch (event) {
196 case IOC_E_SEMLOCKED:
197 if (bfa_ioc_firmware_lock(ioc)) {
198 ioc->retry_count = 0;
199 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit);
200 } else {
201 bfa_ioc_hw_sem_release(ioc);
202 bfa_fsm_set_state(ioc, bfa_ioc_sm_mismatch);
204 break;
206 case IOC_E_DISABLE:
207 bfa_ioc_disable_comp(ioc);
209 * fall through
212 case IOC_E_DETACH:
213 bfa_ioc_hw_sem_get_cancel(ioc);
214 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
215 break;
217 case IOC_E_FWREADY:
218 break;
220 default:
221 bfa_sm_fault(ioc, event);
226 * Notify enable completion callback and generate mismatch AEN.
228 static void
229 bfa_ioc_sm_mismatch_entry(struct bfa_ioc_s *ioc)
232 * Provide enable completion callback and AEN notification only once.
234 if (ioc->retry_count == 0) {
235 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
236 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_FWMISMATCH);
238 ioc->retry_count++;
239 bfa_ioc_timer_start(ioc);
243 * Awaiting firmware version match.
245 static void
246 bfa_ioc_sm_mismatch(struct bfa_ioc_s *ioc, enum ioc_event event)
248 bfa_trc(ioc, event);
250 switch (event) {
251 case IOC_E_TIMEOUT:
252 bfa_fsm_set_state(ioc, bfa_ioc_sm_fwcheck);
253 break;
255 case IOC_E_DISABLE:
256 bfa_ioc_disable_comp(ioc);
258 * fall through
261 case IOC_E_DETACH:
262 bfa_ioc_timer_stop(ioc);
263 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
264 break;
266 case IOC_E_FWREADY:
267 break;
269 default:
270 bfa_sm_fault(ioc, event);
275 * Request for semaphore.
277 static void
278 bfa_ioc_sm_semwait_entry(struct bfa_ioc_s *ioc)
280 bfa_ioc_hw_sem_get(ioc);
284 * Awaiting semaphore for h/w initialzation.
286 static void
287 bfa_ioc_sm_semwait(struct bfa_ioc_s *ioc, enum ioc_event event)
289 bfa_trc(ioc, event);
291 switch (event) {
292 case IOC_E_SEMLOCKED:
293 ioc->retry_count = 0;
294 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit);
295 break;
297 case IOC_E_DISABLE:
298 bfa_ioc_hw_sem_get_cancel(ioc);
299 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
300 break;
302 default:
303 bfa_sm_fault(ioc, event);
308 static void
309 bfa_ioc_sm_hwinit_entry(struct bfa_ioc_s *ioc)
311 bfa_ioc_timer_start(ioc);
312 bfa_ioc_reset(ioc, BFA_FALSE);
316 * Hardware is being initialized. Interrupts are enabled.
317 * Holding hardware semaphore lock.
319 static void
320 bfa_ioc_sm_hwinit(struct bfa_ioc_s *ioc, enum ioc_event event)
322 bfa_trc(ioc, event);
324 switch (event) {
325 case IOC_E_FWREADY:
326 bfa_ioc_timer_stop(ioc);
327 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
328 break;
330 case IOC_E_HWERROR:
331 bfa_ioc_timer_stop(ioc);
333 * fall through
336 case IOC_E_TIMEOUT:
337 ioc->retry_count++;
338 if (ioc->retry_count < BFA_IOC_HWINIT_MAX) {
339 bfa_ioc_timer_start(ioc);
340 bfa_ioc_reset(ioc, BFA_TRUE);
341 break;
344 bfa_ioc_hw_sem_release(ioc);
345 bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
346 break;
348 case IOC_E_DISABLE:
349 bfa_ioc_hw_sem_release(ioc);
350 bfa_ioc_timer_stop(ioc);
351 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
352 break;
354 default:
355 bfa_sm_fault(ioc, event);
360 static void
361 bfa_ioc_sm_enabling_entry(struct bfa_ioc_s *ioc)
363 bfa_ioc_timer_start(ioc);
364 bfa_ioc_send_enable(ioc);
368 * Host IOC function is being enabled, awaiting response from firmware.
369 * Semaphore is acquired.
371 static void
372 bfa_ioc_sm_enabling(struct bfa_ioc_s *ioc, enum ioc_event event)
374 bfa_trc(ioc, event);
376 switch (event) {
377 case IOC_E_FWRSP_ENABLE:
378 bfa_ioc_timer_stop(ioc);
379 bfa_ioc_hw_sem_release(ioc);
380 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
381 break;
383 case IOC_E_HWERROR:
384 bfa_ioc_timer_stop(ioc);
386 * fall through
389 case IOC_E_TIMEOUT:
390 ioc->retry_count++;
391 if (ioc->retry_count < BFA_IOC_HWINIT_MAX) {
392 bfa_reg_write(ioc->ioc_regs.ioc_fwstate,
393 BFI_IOC_UNINIT);
394 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit);
395 break;
398 bfa_ioc_hw_sem_release(ioc);
399 bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
400 break;
402 case IOC_E_DISABLE:
403 bfa_ioc_timer_stop(ioc);
404 bfa_ioc_hw_sem_release(ioc);
405 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
406 break;
408 case IOC_E_FWREADY:
409 bfa_ioc_send_enable(ioc);
410 break;
412 default:
413 bfa_sm_fault(ioc, event);
418 static void
419 bfa_ioc_sm_getattr_entry(struct bfa_ioc_s *ioc)
421 bfa_ioc_timer_start(ioc);
422 bfa_ioc_send_getattr(ioc);
426 * IOC configuration in progress. Timer is active.
428 static void
429 bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event)
431 bfa_trc(ioc, event);
433 switch (event) {
434 case IOC_E_FWRSP_GETATTR:
435 bfa_ioc_timer_stop(ioc);
436 bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
437 break;
439 case IOC_E_HWERROR:
440 bfa_ioc_timer_stop(ioc);
442 * fall through
445 case IOC_E_TIMEOUT:
446 bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
447 break;
449 case IOC_E_DISABLE:
450 bfa_ioc_timer_stop(ioc);
451 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
452 break;
454 default:
455 bfa_sm_fault(ioc, event);
460 static void
461 bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc)
463 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
464 bfa_ioc_hb_monitor(ioc);
465 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_ENABLE);
468 static void
469 bfa_ioc_sm_op(struct bfa_ioc_s *ioc, enum ioc_event event)
471 bfa_trc(ioc, event);
473 switch (event) {
474 case IOC_E_ENABLE:
475 break;
477 case IOC_E_DISABLE:
478 bfa_ioc_hb_stop(ioc);
479 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
480 break;
482 case IOC_E_HWERROR:
483 case IOC_E_FWREADY:
485 * Hard error or IOC recovery by other function.
486 * Treat it same as heartbeat failure.
488 bfa_ioc_hb_stop(ioc);
490 * !!! fall through !!!
493 case IOC_E_HBFAIL:
494 bfa_fsm_set_state(ioc, bfa_ioc_sm_hbfail);
495 break;
497 default:
498 bfa_sm_fault(ioc, event);
503 static void
504 bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc)
506 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_DISABLE);
507 bfa_ioc_timer_start(ioc);
508 bfa_ioc_send_disable(ioc);
512 * IOC is being disabled
514 static void
515 bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event)
517 bfa_trc(ioc, event);
519 switch (event) {
520 case IOC_E_FWRSP_DISABLE:
521 bfa_ioc_timer_stop(ioc);
522 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
523 break;
525 case IOC_E_HWERROR:
526 bfa_ioc_timer_stop(ioc);
528 * !!! fall through !!!
531 case IOC_E_TIMEOUT:
532 bfa_reg_write(ioc->ioc_regs.ioc_fwstate, BFI_IOC_FAIL);
533 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
534 break;
536 default:
537 bfa_sm_fault(ioc, event);
542 * IOC disable completion entry.
544 static void
545 bfa_ioc_sm_disabled_entry(struct bfa_ioc_s *ioc)
547 bfa_ioc_disable_comp(ioc);
550 static void
551 bfa_ioc_sm_disabled(struct bfa_ioc_s *ioc, enum ioc_event event)
553 bfa_trc(ioc, event);
555 switch (event) {
556 case IOC_E_ENABLE:
557 bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait);
558 break;
560 case IOC_E_DISABLE:
561 ioc->cbfn->disable_cbfn(ioc->bfa);
562 break;
564 case IOC_E_FWREADY:
565 break;
567 case IOC_E_DETACH:
568 bfa_ioc_firmware_unlock(ioc);
569 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
570 break;
572 default:
573 bfa_sm_fault(ioc, event);
578 static void
579 bfa_ioc_sm_initfail_entry(struct bfa_ioc_s *ioc)
581 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
582 bfa_ioc_timer_start(ioc);
586 * Hardware initialization failed.
588 static void
589 bfa_ioc_sm_initfail(struct bfa_ioc_s *ioc, enum ioc_event event)
591 bfa_trc(ioc, event);
593 switch (event) {
594 case IOC_E_DISABLE:
595 bfa_ioc_timer_stop(ioc);
596 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
597 break;
599 case IOC_E_DETACH:
600 bfa_ioc_timer_stop(ioc);
601 bfa_ioc_firmware_unlock(ioc);
602 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
603 break;
605 case IOC_E_TIMEOUT:
606 bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait);
607 break;
609 default:
610 bfa_sm_fault(ioc, event);
615 static void
616 bfa_ioc_sm_hbfail_entry(struct bfa_ioc_s *ioc)
618 struct list_head *qe;
619 struct bfa_ioc_hbfail_notify_s *notify;
622 * Mark IOC as failed in hardware and stop firmware.
624 bfa_ioc_lpu_stop(ioc);
625 bfa_reg_write(ioc->ioc_regs.ioc_fwstate, BFI_IOC_FAIL);
628 * Notify other functions on HB failure.
630 bfa_ioc_notify_hbfail(ioc);
633 * Notify driver and common modules registered for notification.
635 ioc->cbfn->hbfail_cbfn(ioc->bfa);
636 list_for_each(qe, &ioc->hb_notify_q) {
637 notify = (struct bfa_ioc_hbfail_notify_s *)qe;
638 notify->cbfn(notify->cbarg);
642 * Flush any queued up mailbox requests.
644 bfa_ioc_mbox_hbfail(ioc);
645 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_HBFAIL);
648 * Trigger auto-recovery after a delay.
650 if (ioc->auto_recover) {
651 bfa_timer_begin(ioc->timer_mod, &ioc->ioc_timer,
652 bfa_ioc_timeout, ioc, BFA_IOC_TOV_RECOVER);
657 * IOC heartbeat failure.
659 static void
660 bfa_ioc_sm_hbfail(struct bfa_ioc_s *ioc, enum ioc_event event)
662 bfa_trc(ioc, event);
664 switch (event) {
666 case IOC_E_ENABLE:
667 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
668 break;
670 case IOC_E_DISABLE:
671 if (ioc->auto_recover)
672 bfa_ioc_timer_stop(ioc);
673 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
674 break;
676 case IOC_E_TIMEOUT:
677 bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait);
678 break;
680 case IOC_E_FWREADY:
682 * Recovery is already initiated by other function.
684 break;
686 case IOC_E_HWERROR:
688 * HB failure notification, ignore.
690 break;
692 default:
693 bfa_sm_fault(ioc, event);
700 * bfa_ioc_pvt BFA IOC private functions
703 static void
704 bfa_ioc_disable_comp(struct bfa_ioc_s *ioc)
706 struct list_head *qe;
707 struct bfa_ioc_hbfail_notify_s *notify;
709 ioc->cbfn->disable_cbfn(ioc->bfa);
712 * Notify common modules registered for notification.
714 list_for_each(qe, &ioc->hb_notify_q) {
715 notify = (struct bfa_ioc_hbfail_notify_s *)qe;
716 notify->cbfn(notify->cbarg);
720 void
721 bfa_ioc_sem_timeout(void *ioc_arg)
723 struct bfa_ioc_s *ioc = (struct bfa_ioc_s *)ioc_arg;
725 bfa_ioc_hw_sem_get(ioc);
728 bfa_boolean_t
729 bfa_ioc_sem_get(bfa_os_addr_t sem_reg)
731 u32 r32;
732 int cnt = 0;
733 #define BFA_SEM_SPINCNT 3000
735 r32 = bfa_reg_read(sem_reg);
737 while (r32 && (cnt < BFA_SEM_SPINCNT)) {
738 cnt++;
739 bfa_os_udelay(2);
740 r32 = bfa_reg_read(sem_reg);
743 if (r32 == 0)
744 return BFA_TRUE;
746 bfa_assert(cnt < BFA_SEM_SPINCNT);
747 return BFA_FALSE;
750 void
751 bfa_ioc_sem_release(bfa_os_addr_t sem_reg)
753 bfa_reg_write(sem_reg, 1);
756 static void
757 bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc)
759 u32 r32;
762 * First read to the semaphore register will return 0, subsequent reads
763 * will return 1. Semaphore is released by writing 1 to the register
765 r32 = bfa_reg_read(ioc->ioc_regs.ioc_sem_reg);
766 if (r32 == 0) {
767 bfa_fsm_send_event(ioc, IOC_E_SEMLOCKED);
768 return;
771 bfa_timer_begin(ioc->timer_mod, &ioc->sem_timer, bfa_ioc_sem_timeout,
772 ioc, BFA_IOC_HWSEM_TOV);
775 void
776 bfa_ioc_hw_sem_release(struct bfa_ioc_s *ioc)
778 bfa_reg_write(ioc->ioc_regs.ioc_sem_reg, 1);
781 static void
782 bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s *ioc)
784 bfa_timer_stop(&ioc->sem_timer);
788 * Initialize LPU local memory (aka secondary memory / SRAM)
790 static void
791 bfa_ioc_lmem_init(struct bfa_ioc_s *ioc)
793 u32 pss_ctl;
794 int i;
795 #define PSS_LMEM_INIT_TIME 10000
797 pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg);
798 pss_ctl &= ~__PSS_LMEM_RESET;
799 pss_ctl |= __PSS_LMEM_INIT_EN;
800 pss_ctl |= __PSS_I2C_CLK_DIV(3UL); /* i2c workaround 12.5khz clock */
801 bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl);
804 * wait for memory initialization to be complete
806 i = 0;
807 do {
808 pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg);
809 i++;
810 } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
813 * If memory initialization is not successful, IOC timeout will catch
814 * such failures.
816 bfa_assert(pss_ctl & __PSS_LMEM_INIT_DONE);
817 bfa_trc(ioc, pss_ctl);
819 pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
820 bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl);
823 static void
824 bfa_ioc_lpu_start(struct bfa_ioc_s *ioc)
826 u32 pss_ctl;
829 * Take processor out of reset.
831 pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg);
832 pss_ctl &= ~__PSS_LPU0_RESET;
834 bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl);
837 static void
838 bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc)
840 u32 pss_ctl;
843 * Put processors in reset.
845 pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg);
846 pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
848 bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl);
852 * Get driver and firmware versions.
854 void
855 bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
857 u32 pgnum, pgoff;
858 u32 loff = 0;
859 int i;
860 u32 *fwsig = (u32 *) fwhdr;
862 pgnum = bfa_ioc_smem_pgnum(ioc, loff);
863 pgoff = bfa_ioc_smem_pgoff(ioc, loff);
864 bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
866 for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32));
867 i++) {
868 fwsig[i] = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
869 loff += sizeof(u32);
874 * Returns TRUE if same.
876 bfa_boolean_t
877 bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
879 struct bfi_ioc_image_hdr_s *drv_fwhdr;
880 int i;
882 drv_fwhdr =
883 (struct bfi_ioc_image_hdr_s *)bfa_ioc_fwimg_get_chunk(ioc, 0);
885 for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
886 if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i]) {
887 bfa_trc(ioc, i);
888 bfa_trc(ioc, fwhdr->md5sum[i]);
889 bfa_trc(ioc, drv_fwhdr->md5sum[i]);
890 return BFA_FALSE;
894 bfa_trc(ioc, fwhdr->md5sum[0]);
895 return BFA_TRUE;
899 * Return true if current running version is valid. Firmware signature and
900 * execution context (driver/bios) must match.
902 static bfa_boolean_t
903 bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc)
905 struct bfi_ioc_image_hdr_s fwhdr, *drv_fwhdr;
908 * If bios/efi boot (flash based) -- return true
910 if (bfa_ioc_fwimg_get_size(ioc) < BFA_IOC_FWIMG_MINSZ)
911 return BFA_TRUE;
913 bfa_ioc_fwver_get(ioc, &fwhdr);
914 drv_fwhdr =
915 (struct bfi_ioc_image_hdr_s *)bfa_ioc_fwimg_get_chunk(ioc, 0);
917 if (fwhdr.signature != drv_fwhdr->signature) {
918 bfa_trc(ioc, fwhdr.signature);
919 bfa_trc(ioc, drv_fwhdr->signature);
920 return BFA_FALSE;
923 if (fwhdr.exec != drv_fwhdr->exec) {
924 bfa_trc(ioc, fwhdr.exec);
925 bfa_trc(ioc, drv_fwhdr->exec);
926 return BFA_FALSE;
929 return bfa_ioc_fwver_cmp(ioc, &fwhdr);
933 * Conditionally flush any pending message from firmware at start.
935 static void
936 bfa_ioc_msgflush(struct bfa_ioc_s *ioc)
938 u32 r32;
940 r32 = bfa_reg_read(ioc->ioc_regs.lpu_mbox_cmd);
941 if (r32)
942 bfa_reg_write(ioc->ioc_regs.lpu_mbox_cmd, 1);
946 static void
947 bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
949 enum bfi_ioc_state ioc_fwstate;
950 bfa_boolean_t fwvalid;
952 ioc_fwstate = bfa_reg_read(ioc->ioc_regs.ioc_fwstate);
954 if (force)
955 ioc_fwstate = BFI_IOC_UNINIT;
957 bfa_trc(ioc, ioc_fwstate);
960 * check if firmware is valid
962 fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
963 BFA_FALSE : bfa_ioc_fwver_valid(ioc);
965 if (!fwvalid) {
966 bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id);
967 return;
971 * If hardware initialization is in progress (initialized by other IOC),
972 * just wait for an initialization completion interrupt.
974 if (ioc_fwstate == BFI_IOC_INITING) {
975 bfa_trc(ioc, ioc_fwstate);
976 ioc->cbfn->reset_cbfn(ioc->bfa);
977 return;
981 * If IOC function is disabled and firmware version is same,
982 * just re-enable IOC.
984 if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) {
985 bfa_trc(ioc, ioc_fwstate);
988 * When using MSI-X any pending firmware ready event should
989 * be flushed. Otherwise MSI-X interrupts are not delivered.
991 bfa_ioc_msgflush(ioc);
992 ioc->cbfn->reset_cbfn(ioc->bfa);
993 bfa_fsm_send_event(ioc, IOC_E_FWREADY);
994 return;
998 * Initialize the h/w for any other states.
1000 bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id);
1003 static void
1004 bfa_ioc_timeout(void *ioc_arg)
1006 struct bfa_ioc_s *ioc = (struct bfa_ioc_s *)ioc_arg;
1008 bfa_trc(ioc, 0);
1009 bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
1012 void
1013 bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len)
1015 u32 *msgp = (u32 *) ioc_msg;
1016 u32 i;
1018 bfa_trc(ioc, msgp[0]);
1019 bfa_trc(ioc, len);
1021 bfa_assert(len <= BFI_IOC_MSGLEN_MAX);
1024 * first write msg to mailbox registers
1026 for (i = 0; i < len / sizeof(u32); i++)
1027 bfa_reg_write(ioc->ioc_regs.hfn_mbox + i * sizeof(u32),
1028 bfa_os_wtole(msgp[i]));
1030 for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
1031 bfa_reg_write(ioc->ioc_regs.hfn_mbox + i * sizeof(u32), 0);
1034 * write 1 to mailbox CMD to trigger LPU event
1036 bfa_reg_write(ioc->ioc_regs.hfn_mbox_cmd, 1);
1037 (void)bfa_reg_read(ioc->ioc_regs.hfn_mbox_cmd);
1040 static void
1041 bfa_ioc_send_enable(struct bfa_ioc_s *ioc)
1043 struct bfi_ioc_ctrl_req_s enable_req;
1045 bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
1046 bfa_ioc_portid(ioc));
1047 enable_req.ioc_class = ioc->ioc_mc;
1048 bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s));
1051 static void
1052 bfa_ioc_send_disable(struct bfa_ioc_s *ioc)
1054 struct bfi_ioc_ctrl_req_s disable_req;
1056 bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
1057 bfa_ioc_portid(ioc));
1058 bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req_s));
1061 static void
1062 bfa_ioc_send_getattr(struct bfa_ioc_s *ioc)
1064 struct bfi_ioc_getattr_req_s attr_req;
1066 bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
1067 bfa_ioc_portid(ioc));
1068 bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
1069 bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
1072 static void
1073 bfa_ioc_hb_check(void *cbarg)
1075 struct bfa_ioc_s *ioc = cbarg;
1076 u32 hb_count;
1078 hb_count = bfa_reg_read(ioc->ioc_regs.heartbeat);
1079 if (ioc->hb_count == hb_count) {
1080 bfa_log(ioc->logm, BFA_LOG_HAL_HEARTBEAT_FAILURE,
1081 hb_count);
1082 bfa_ioc_recover(ioc);
1083 return;
1084 } else {
1085 ioc->hb_count = hb_count;
1088 bfa_ioc_mbox_poll(ioc);
1089 bfa_timer_begin(ioc->timer_mod, &ioc->ioc_timer, bfa_ioc_hb_check,
1090 ioc, BFA_IOC_HB_TOV);
1093 static void
1094 bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc)
1096 ioc->hb_count = bfa_reg_read(ioc->ioc_regs.heartbeat);
1097 bfa_timer_begin(ioc->timer_mod, &ioc->ioc_timer, bfa_ioc_hb_check, ioc,
1098 BFA_IOC_HB_TOV);
1101 static void
1102 bfa_ioc_hb_stop(struct bfa_ioc_s *ioc)
1104 bfa_timer_stop(&ioc->ioc_timer);
1108 * Initiate a full firmware download.
1110 static void
1111 bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
1112 u32 boot_param)
1114 u32 *fwimg;
1115 u32 pgnum, pgoff;
1116 u32 loff = 0;
1117 u32 chunkno = 0;
1118 u32 i;
1121 * Initialize LMEM first before code download
1123 bfa_ioc_lmem_init(ioc);
1126 * Flash based firmware boot
1128 bfa_trc(ioc, bfa_ioc_fwimg_get_size(ioc));
1129 if (bfa_ioc_fwimg_get_size(ioc) < BFA_IOC_FWIMG_MINSZ)
1130 boot_type = BFI_BOOT_TYPE_FLASH;
1131 fwimg = bfa_ioc_fwimg_get_chunk(ioc, chunkno);
1133 pgnum = bfa_ioc_smem_pgnum(ioc, loff);
1134 pgoff = bfa_ioc_smem_pgoff(ioc, loff);
1136 bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
1138 for (i = 0; i < bfa_ioc_fwimg_get_size(ioc); i++) {
1140 if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
1141 chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
1142 fwimg = bfa_ioc_fwimg_get_chunk(ioc,
1143 BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
1147 * write smem
1149 bfa_mem_write(ioc->ioc_regs.smem_page_start, loff,
1150 fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)]);
1152 loff += sizeof(u32);
1155 * handle page offset wrap around
1157 loff = PSS_SMEM_PGOFF(loff);
1158 if (loff == 0) {
1159 pgnum++;
1160 bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
1164 bfa_reg_write(ioc->ioc_regs.host_page_num_fn,
1165 bfa_ioc_smem_pgnum(ioc, 0));
1168 * Set boot type and boot param at the end.
1170 bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_TYPE_OFF,
1171 bfa_os_swap32(boot_type));
1172 bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_PARAM_OFF,
1173 bfa_os_swap32(boot_param));
1176 static void
1177 bfa_ioc_reset(struct bfa_ioc_s *ioc, bfa_boolean_t force)
1179 bfa_ioc_hwinit(ioc, force);
1183 * Update BFA configuration from firmware configuration.
1185 static void
1186 bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc)
1188 struct bfi_ioc_attr_s *attr = ioc->attr;
1190 attr->adapter_prop = bfa_os_ntohl(attr->adapter_prop);
1191 attr->maxfrsize = bfa_os_ntohs(attr->maxfrsize);
1193 bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
1197 * Attach time initialization of mbox logic.
1199 static void
1200 bfa_ioc_mbox_attach(struct bfa_ioc_s *ioc)
1202 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1203 int mc;
1205 INIT_LIST_HEAD(&mod->cmd_q);
1206 for (mc = 0; mc < BFI_MC_MAX; mc++) {
1207 mod->mbhdlr[mc].cbfn = NULL;
1208 mod->mbhdlr[mc].cbarg = ioc->bfa;
1213 * Mbox poll timer -- restarts any pending mailbox requests.
1215 static void
1216 bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc)
1218 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1219 struct bfa_mbox_cmd_s *cmd;
1220 u32 stat;
1223 * If no command pending, do nothing
1225 if (list_empty(&mod->cmd_q))
1226 return;
1229 * If previous command is not yet fetched by firmware, do nothing
1231 stat = bfa_reg_read(ioc->ioc_regs.hfn_mbox_cmd);
1232 if (stat)
1233 return;
1236 * Enqueue command to firmware.
1238 bfa_q_deq(&mod->cmd_q, &cmd);
1239 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
1243 * Cleanup any pending requests.
1245 static void
1246 bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc)
1248 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1249 struct bfa_mbox_cmd_s *cmd;
1251 while (!list_empty(&mod->cmd_q))
1252 bfa_q_deq(&mod->cmd_q, &cmd);
1256 * bfa_ioc_public
1260 * Interface used by diag module to do firmware boot with memory test
1261 * as the entry vector.
1263 void
1264 bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_param)
1266 bfa_os_addr_t rb;
1268 bfa_ioc_stats(ioc, ioc_boots);
1270 if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
1271 return;
1274 * Initialize IOC state of all functions on a chip reset.
1276 rb = ioc->pcidev.pci_bar_kva;
1277 if (boot_param == BFI_BOOT_TYPE_MEMTEST) {
1278 bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_MEMTEST);
1279 bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_MEMTEST);
1280 } else {
1281 bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_INITING);
1282 bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_INITING);
1285 bfa_ioc_download_fw(ioc, boot_type, boot_param);
1288 * Enable interrupts just before starting LPU
1290 ioc->cbfn->reset_cbfn(ioc->bfa);
1291 bfa_ioc_lpu_start(ioc);
1295 * Enable/disable IOC failure auto recovery.
1297 void
1298 bfa_ioc_auto_recover(bfa_boolean_t auto_recover)
1300 bfa_auto_recover = auto_recover;
1304 bfa_boolean_t
1305 bfa_ioc_is_operational(struct bfa_ioc_s *ioc)
1307 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
1310 void
1311 bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg)
1313 u32 *msgp = mbmsg;
1314 u32 r32;
1315 int i;
1318 * read the MBOX msg
1320 for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
1321 i++) {
1322 r32 = bfa_reg_read(ioc->ioc_regs.lpu_mbox +
1323 i * sizeof(u32));
1324 msgp[i] = bfa_os_htonl(r32);
1328 * turn off mailbox interrupt by clearing mailbox status
1330 bfa_reg_write(ioc->ioc_regs.lpu_mbox_cmd, 1);
1331 bfa_reg_read(ioc->ioc_regs.lpu_mbox_cmd);
1334 void
1335 bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m)
1337 union bfi_ioc_i2h_msg_u *msg;
1339 msg = (union bfi_ioc_i2h_msg_u *)m;
1341 bfa_ioc_stats(ioc, ioc_isrs);
1343 switch (msg->mh.msg_id) {
1344 case BFI_IOC_I2H_HBEAT:
1345 break;
1347 case BFI_IOC_I2H_READY_EVENT:
1348 bfa_fsm_send_event(ioc, IOC_E_FWREADY);
1349 break;
1351 case BFI_IOC_I2H_ENABLE_REPLY:
1352 bfa_fsm_send_event(ioc, IOC_E_FWRSP_ENABLE);
1353 break;
1355 case BFI_IOC_I2H_DISABLE_REPLY:
1356 bfa_fsm_send_event(ioc, IOC_E_FWRSP_DISABLE);
1357 break;
1359 case BFI_IOC_I2H_GETATTR_REPLY:
1360 bfa_ioc_getattr_reply(ioc);
1361 break;
1363 default:
1364 bfa_trc(ioc, msg->mh.msg_id);
1365 bfa_assert(0);
1370 * IOC attach time initialization and setup.
1372 * @param[in] ioc memory for IOC
1373 * @param[in] bfa driver instance structure
1374 * @param[in] trcmod kernel trace module
1375 * @param[in] aen kernel aen event module
1376 * @param[in] logm kernel logging module
1378 void
1379 bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, struct bfa_ioc_cbfn_s *cbfn,
1380 struct bfa_timer_mod_s *timer_mod, struct bfa_trc_mod_s *trcmod,
1381 struct bfa_aen_s *aen, struct bfa_log_mod_s *logm)
1383 ioc->bfa = bfa;
1384 ioc->cbfn = cbfn;
1385 ioc->timer_mod = timer_mod;
1386 ioc->trcmod = trcmod;
1387 ioc->aen = aen;
1388 ioc->logm = logm;
1389 ioc->fcmode = BFA_FALSE;
1390 ioc->pllinit = BFA_FALSE;
1391 ioc->dbg_fwsave_once = BFA_TRUE;
1393 bfa_ioc_mbox_attach(ioc);
1394 INIT_LIST_HEAD(&ioc->hb_notify_q);
1396 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
1400 * Driver detach time IOC cleanup.
1402 void
1403 bfa_ioc_detach(struct bfa_ioc_s *ioc)
1405 bfa_fsm_send_event(ioc, IOC_E_DETACH);
1409 * Setup IOC PCI properties.
1411 * @param[in] pcidev PCI device information for this IOC
1413 void
1414 bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
1415 enum bfi_mclass mc)
1417 ioc->ioc_mc = mc;
1418 ioc->pcidev = *pcidev;
1419 ioc->ctdev = (ioc->pcidev.device_id == BFA_PCI_DEVICE_ID_CT);
1420 ioc->cna = ioc->ctdev && !ioc->fcmode;
1423 * Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c
1425 if (ioc->ctdev)
1426 bfa_ioc_set_ct_hwif(ioc);
1427 else
1428 bfa_ioc_set_cb_hwif(ioc);
1430 bfa_ioc_map_port(ioc);
1431 bfa_ioc_reg_init(ioc);
1435 * Initialize IOC dma memory
1437 * @param[in] dm_kva kernel virtual address of IOC dma memory
1438 * @param[in] dm_pa physical address of IOC dma memory
1440 void
1441 bfa_ioc_mem_claim(struct bfa_ioc_s *ioc, u8 *dm_kva, u64 dm_pa)
1444 * dma memory for firmware attribute
1446 ioc->attr_dma.kva = dm_kva;
1447 ioc->attr_dma.pa = dm_pa;
1448 ioc->attr = (struct bfi_ioc_attr_s *)dm_kva;
1452 * Return size of dma memory required.
1455 bfa_ioc_meminfo(void)
1457 return BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ);
1460 void
1461 bfa_ioc_enable(struct bfa_ioc_s *ioc)
1463 bfa_ioc_stats(ioc, ioc_enables);
1464 ioc->dbg_fwsave_once = BFA_TRUE;
1466 bfa_fsm_send_event(ioc, IOC_E_ENABLE);
1469 void
1470 bfa_ioc_disable(struct bfa_ioc_s *ioc)
1472 bfa_ioc_stats(ioc, ioc_disables);
1473 bfa_fsm_send_event(ioc, IOC_E_DISABLE);
1477 * Returns memory required for saving firmware trace in case of crash.
1478 * Driver must call this interface to allocate memory required for
1479 * automatic saving of firmware trace. Driver should call
1480 * bfa_ioc_debug_memclaim() right after bfa_ioc_attach() to setup this
1481 * trace memory.
1484 bfa_ioc_debug_trcsz(bfa_boolean_t auto_recover)
1486 return (auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
1490 * Initialize memory for saving firmware trace. Driver must initialize
1491 * trace memory before call bfa_ioc_enable().
1493 void
1494 bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave)
1496 ioc->dbg_fwsave = dbg_fwsave;
1497 ioc->dbg_fwsave_len = bfa_ioc_debug_trcsz(ioc->auto_recover);
1501 bfa_ioc_smem_pgnum(struct bfa_ioc_s *ioc, u32 fmaddr)
1503 return PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, fmaddr);
1507 bfa_ioc_smem_pgoff(struct bfa_ioc_s *ioc, u32 fmaddr)
1509 return PSS_SMEM_PGOFF(fmaddr);
1513 * Register mailbox message handler functions
1515 * @param[in] ioc IOC instance
1516 * @param[in] mcfuncs message class handler functions
1518 void
1519 bfa_ioc_mbox_register(struct bfa_ioc_s *ioc, bfa_ioc_mbox_mcfunc_t *mcfuncs)
1521 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1522 int mc;
1524 for (mc = 0; mc < BFI_MC_MAX; mc++)
1525 mod->mbhdlr[mc].cbfn = mcfuncs[mc];
1529 * Register mailbox message handler function, to be called by common modules
1531 void
1532 bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
1533 bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
1535 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1537 mod->mbhdlr[mc].cbfn = cbfn;
1538 mod->mbhdlr[mc].cbarg = cbarg;
1542 * Queue a mailbox command request to firmware. Waits if mailbox is busy.
1543 * Responsibility of caller to serialize
1545 * @param[in] ioc IOC instance
1546 * @param[i] cmd Mailbox command
1548 void
1549 bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd)
1551 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1552 u32 stat;
1555 * If a previous command is pending, queue new command
1557 if (!list_empty(&mod->cmd_q)) {
1558 list_add_tail(&cmd->qe, &mod->cmd_q);
1559 return;
1563 * If mailbox is busy, queue command for poll timer
1565 stat = bfa_reg_read(ioc->ioc_regs.hfn_mbox_cmd);
1566 if (stat) {
1567 list_add_tail(&cmd->qe, &mod->cmd_q);
1568 return;
1572 * mailbox is free -- queue command to firmware
1574 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
1578 * Handle mailbox interrupts
1580 void
1581 bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc)
1583 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1584 struct bfi_mbmsg_s m;
1585 int mc;
1587 bfa_ioc_msgget(ioc, &m);
1590 * Treat IOC message class as special.
1592 mc = m.mh.msg_class;
1593 if (mc == BFI_MC_IOC) {
1594 bfa_ioc_isr(ioc, &m);
1595 return;
1598 if ((mc > BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
1599 return;
1601 mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
1604 void
1605 bfa_ioc_error_isr(struct bfa_ioc_s *ioc)
1607 bfa_fsm_send_event(ioc, IOC_E_HWERROR);
1610 #ifndef BFA_BIOS_BUILD
1613 * return true if IOC is disabled
1615 bfa_boolean_t
1616 bfa_ioc_is_disabled(struct bfa_ioc_s *ioc)
1618 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling)
1619 || bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
1623 * return true if IOC firmware is different.
1625 bfa_boolean_t
1626 bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc)
1628 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_reset)
1629 || bfa_fsm_cmp_state(ioc, bfa_ioc_sm_fwcheck)
1630 || bfa_fsm_cmp_state(ioc, bfa_ioc_sm_mismatch);
1633 #define bfa_ioc_state_disabled(__sm) \
1634 (((__sm) == BFI_IOC_UNINIT) || \
1635 ((__sm) == BFI_IOC_INITING) || \
1636 ((__sm) == BFI_IOC_HWINIT) || \
1637 ((__sm) == BFI_IOC_DISABLED) || \
1638 ((__sm) == BFI_IOC_FAIL) || \
1639 ((__sm) == BFI_IOC_CFG_DISABLED))
1642 * Check if adapter is disabled -- both IOCs should be in a disabled
1643 * state.
1645 bfa_boolean_t
1646 bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc)
1648 u32 ioc_state;
1649 bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva;
1651 if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled))
1652 return BFA_FALSE;
1654 ioc_state = bfa_reg_read(rb + BFA_IOC0_STATE_REG);
1655 if (!bfa_ioc_state_disabled(ioc_state))
1656 return BFA_FALSE;
1658 ioc_state = bfa_reg_read(rb + BFA_IOC1_STATE_REG);
1659 if (!bfa_ioc_state_disabled(ioc_state))
1660 return BFA_FALSE;
1662 return BFA_TRUE;
1666 * Add to IOC heartbeat failure notification queue. To be used by common
1667 * modules such as
1669 void
1670 bfa_ioc_hbfail_register(struct bfa_ioc_s *ioc,
1671 struct bfa_ioc_hbfail_notify_s *notify)
1673 list_add_tail(&notify->qe, &ioc->hb_notify_q);
1676 #define BFA_MFG_NAME "Brocade"
1677 void
1678 bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
1679 struct bfa_adapter_attr_s *ad_attr)
1681 struct bfi_ioc_attr_s *ioc_attr;
1683 ioc_attr = ioc->attr;
1685 bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
1686 bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
1687 bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
1688 bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
1689 bfa_os_memcpy(&ad_attr->vpd, &ioc_attr->vpd,
1690 sizeof(struct bfa_mfg_vpd_s));
1692 ad_attr->nports = bfa_ioc_get_nports(ioc);
1693 ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
1695 bfa_ioc_get_adapter_model(ioc, ad_attr->model);
1696 /* For now, model descr uses same model string */
1697 bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
1699 if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
1700 ad_attr->prototype = 1;
1701 else
1702 ad_attr->prototype = 0;
1704 ad_attr->pwwn = bfa_ioc_get_pwwn(ioc);
1705 ad_attr->mac = bfa_ioc_get_mac(ioc);
1707 ad_attr->pcie_gen = ioc_attr->pcie_gen;
1708 ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
1709 ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
1710 ad_attr->asic_rev = ioc_attr->asic_rev;
1712 bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
1714 ad_attr->cna_capable = ioc->cna;
1717 enum bfa_ioc_type_e
1718 bfa_ioc_get_type(struct bfa_ioc_s *ioc)
1720 if (!ioc->ctdev || ioc->fcmode)
1721 return BFA_IOC_TYPE_FC;
1722 else if (ioc->ioc_mc == BFI_MC_IOCFC)
1723 return BFA_IOC_TYPE_FCoE;
1724 else if (ioc->ioc_mc == BFI_MC_LL)
1725 return BFA_IOC_TYPE_LL;
1726 else {
1727 bfa_assert(ioc->ioc_mc == BFI_MC_LL);
1728 return BFA_IOC_TYPE_LL;
1732 void
1733 bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num)
1735 bfa_os_memset((void *)serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
1736 bfa_os_memcpy((void *)serial_num,
1737 (void *)ioc->attr->brcd_serialnum,
1738 BFA_ADAPTER_SERIAL_NUM_LEN);
1741 void
1742 bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver)
1744 bfa_os_memset((void *)fw_ver, 0, BFA_VERSION_LEN);
1745 bfa_os_memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
1748 void
1749 bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev)
1751 bfa_assert(chip_rev);
1753 bfa_os_memset((void *)chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
1755 chip_rev[0] = 'R';
1756 chip_rev[1] = 'e';
1757 chip_rev[2] = 'v';
1758 chip_rev[3] = '-';
1759 chip_rev[4] = ioc->attr->asic_rev;
1760 chip_rev[5] = '\0';
1763 void
1764 bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver)
1766 bfa_os_memset((void *)optrom_ver, 0, BFA_VERSION_LEN);
1767 bfa_os_memcpy(optrom_ver, ioc->attr->optrom_version,
1768 BFA_VERSION_LEN);
1771 void
1772 bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer)
1774 bfa_os_memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
1775 bfa_os_memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
1778 void
1779 bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model)
1781 struct bfi_ioc_attr_s *ioc_attr;
1782 u8 nports;
1783 u8 max_speed;
1785 bfa_assert(model);
1786 bfa_os_memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
1788 ioc_attr = ioc->attr;
1790 nports = bfa_ioc_get_nports(ioc);
1791 max_speed = bfa_ioc_speed_sup(ioc);
1794 * model name
1796 if (max_speed == 10) {
1797 strcpy(model, "BR-10?0");
1798 model[5] = '0' + nports;
1799 } else {
1800 strcpy(model, "Brocade-??5");
1801 model[8] = '0' + max_speed;
1802 model[9] = '0' + nports;
1806 enum bfa_ioc_state
1807 bfa_ioc_get_state(struct bfa_ioc_s *ioc)
1809 return bfa_sm_to_state(ioc_sm_table, ioc->fsm);
1812 void
1813 bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr)
1815 bfa_os_memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr_s));
1817 ioc_attr->state = bfa_ioc_get_state(ioc);
1818 ioc_attr->port_id = ioc->port_id;
1820 ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
1822 bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
1824 ioc_attr->pci_attr.device_id = ioc->pcidev.device_id;
1825 ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func;
1826 bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
1830 * hal_wwn_public
1832 wwn_t
1833 bfa_ioc_get_pwwn(struct bfa_ioc_s *ioc)
1835 union {
1836 wwn_t wwn;
1837 u8 byte[sizeof(wwn_t)];
1841 w.wwn = ioc->attr->mfg_wwn;
1843 if (bfa_ioc_portid(ioc) == 1)
1844 w.byte[7]++;
1846 return w.wwn;
1849 wwn_t
1850 bfa_ioc_get_nwwn(struct bfa_ioc_s *ioc)
1852 union {
1853 wwn_t wwn;
1854 u8 byte[sizeof(wwn_t)];
1858 w.wwn = ioc->attr->mfg_wwn;
1860 if (bfa_ioc_portid(ioc) == 1)
1861 w.byte[7]++;
1863 w.byte[0] = 0x20;
1865 return w.wwn;
1868 wwn_t
1869 bfa_ioc_get_wwn_naa5(struct bfa_ioc_s *ioc, u16 inst)
1871 union {
1872 wwn_t wwn;
1873 u8 byte[sizeof(wwn_t)];
1875 w , w5;
1877 bfa_trc(ioc, inst);
1879 w.wwn = ioc->attr->mfg_wwn;
1880 w5.byte[0] = 0x50 | w.byte[2] >> 4;
1881 w5.byte[1] = w.byte[2] << 4 | w.byte[3] >> 4;
1882 w5.byte[2] = w.byte[3] << 4 | w.byte[4] >> 4;
1883 w5.byte[3] = w.byte[4] << 4 | w.byte[5] >> 4;
1884 w5.byte[4] = w.byte[5] << 4 | w.byte[6] >> 4;
1885 w5.byte[5] = w.byte[6] << 4 | w.byte[7] >> 4;
1886 w5.byte[6] = w.byte[7] << 4 | (inst & 0x0f00) >> 8;
1887 w5.byte[7] = (inst & 0xff);
1889 return w5.wwn;
1893 bfa_ioc_get_adid(struct bfa_ioc_s *ioc)
1895 return ioc->attr->mfg_wwn;
1898 mac_t
1899 bfa_ioc_get_mac(struct bfa_ioc_s *ioc)
1901 mac_t mac;
1903 mac = ioc->attr->mfg_mac;
1904 mac.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc);
1906 return mac;
1909 void
1910 bfa_ioc_set_fcmode(struct bfa_ioc_s *ioc)
1912 ioc->fcmode = BFA_TRUE;
1913 ioc->port_id = bfa_ioc_pcifn(ioc);
1916 bfa_boolean_t
1917 bfa_ioc_get_fcmode(struct bfa_ioc_s *ioc)
1919 return ioc->fcmode || (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_CT);
1923 * Send AEN notification
1925 static void
1926 bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event)
1928 union bfa_aen_data_u aen_data;
1929 struct bfa_log_mod_s *logmod = ioc->logm;
1930 s32 inst_num = 0;
1931 enum bfa_ioc_type_e ioc_type;
1933 bfa_log(logmod, BFA_LOG_CREATE_ID(BFA_AEN_CAT_IOC, event), inst_num);
1935 memset(&aen_data.ioc.pwwn, 0, sizeof(aen_data.ioc.pwwn));
1936 memset(&aen_data.ioc.mac, 0, sizeof(aen_data.ioc.mac));
1937 ioc_type = bfa_ioc_get_type(ioc);
1938 switch (ioc_type) {
1939 case BFA_IOC_TYPE_FC:
1940 aen_data.ioc.pwwn = bfa_ioc_get_pwwn(ioc);
1941 break;
1942 case BFA_IOC_TYPE_FCoE:
1943 aen_data.ioc.pwwn = bfa_ioc_get_pwwn(ioc);
1944 aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
1945 break;
1946 case BFA_IOC_TYPE_LL:
1947 aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
1948 break;
1949 default:
1950 bfa_assert(ioc_type == BFA_IOC_TYPE_FC);
1951 break;
1953 aen_data.ioc.ioc_type = ioc_type;
1957 * Retrieve saved firmware trace from a prior IOC failure.
1959 bfa_status_t
1960 bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
1962 int tlen;
1964 if (ioc->dbg_fwsave_len == 0)
1965 return BFA_STATUS_ENOFSAVE;
1967 tlen = *trclen;
1968 if (tlen > ioc->dbg_fwsave_len)
1969 tlen = ioc->dbg_fwsave_len;
1971 bfa_os_memcpy(trcdata, ioc->dbg_fwsave, tlen);
1972 *trclen = tlen;
1973 return BFA_STATUS_OK;
1977 * Clear saved firmware trace
1979 void
1980 bfa_ioc_debug_fwsave_clear(struct bfa_ioc_s *ioc)
1982 ioc->dbg_fwsave_once = BFA_TRUE;
1986 * Retrieve saved firmware trace from a prior IOC failure.
1988 bfa_status_t
1989 bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
1991 u32 pgnum;
1992 u32 loff = BFA_DBG_FWTRC_OFF(bfa_ioc_portid(ioc));
1993 int i, tlen;
1994 u32 *tbuf = trcdata, r32;
1996 bfa_trc(ioc, *trclen);
1998 pgnum = bfa_ioc_smem_pgnum(ioc, loff);
1999 loff = bfa_ioc_smem_pgoff(ioc, loff);
2002 * Hold semaphore to serialize pll init and fwtrc.
2004 if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg))
2005 return BFA_STATUS_FAILED;
2007 bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
2009 tlen = *trclen;
2010 if (tlen > BFA_DBG_FWTRC_LEN)
2011 tlen = BFA_DBG_FWTRC_LEN;
2012 tlen /= sizeof(u32);
2014 bfa_trc(ioc, tlen);
2016 for (i = 0; i < tlen; i++) {
2017 r32 = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
2018 tbuf[i] = bfa_os_ntohl(r32);
2019 loff += sizeof(u32);
2022 * handle page offset wrap around
2024 loff = PSS_SMEM_PGOFF(loff);
2025 if (loff == 0) {
2026 pgnum++;
2027 bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
2030 bfa_reg_write(ioc->ioc_regs.host_page_num_fn,
2031 bfa_ioc_smem_pgnum(ioc, 0));
2034 * release semaphore.
2036 bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
2038 bfa_trc(ioc, pgnum);
2040 *trclen = tlen * sizeof(u32);
2041 return BFA_STATUS_OK;
2045 * Save firmware trace if configured.
2047 static void
2048 bfa_ioc_debug_save(struct bfa_ioc_s *ioc)
2050 int tlen;
2052 if (ioc->dbg_fwsave_len) {
2053 tlen = ioc->dbg_fwsave_len;
2054 bfa_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen);
2059 * Firmware failure detected. Start recovery actions.
2061 static void
2062 bfa_ioc_recover(struct bfa_ioc_s *ioc)
2064 if (ioc->dbg_fwsave_once) {
2065 ioc->dbg_fwsave_once = BFA_FALSE;
2066 bfa_ioc_debug_save(ioc);
2069 bfa_ioc_stats(ioc, ioc_hbfails);
2070 bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
2073 #else
2075 static void
2076 bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event)
2080 static void
2081 bfa_ioc_recover(struct bfa_ioc_s *ioc)
2083 bfa_assert(0);
2086 #endif