Merge branch 'nfs-for-2.6.37' of git://git.linux-nfs.org/projects/trondmy/nfs-2.6
[linux-2.6.git] / drivers / net / bna / bfa_ioc.c
blobe94e5aa975150ba742909f70ef39129f1c48799a
1 /*
2 * Linux network driver for Brocade Converged Network Adapter.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
19 #include "bfa_ioc.h"
20 #include "cna.h"
21 #include "bfi.h"
22 #include "bfi_ctreg.h"
23 #include "bfa_defs.h"
25 /**
26 * IOC local definitions
29 #define bfa_ioc_timer_start(__ioc) \
30 mod_timer(&(__ioc)->ioc_timer, jiffies + \
31 msecs_to_jiffies(BFA_IOC_TOV))
32 #define bfa_ioc_timer_stop(__ioc) del_timer(&(__ioc)->ioc_timer)
34 #define bfa_ioc_recovery_timer_start(__ioc) \
35 mod_timer(&(__ioc)->ioc_timer, jiffies + \
36 msecs_to_jiffies(BFA_IOC_TOV_RECOVER))
38 #define bfa_sem_timer_start(__ioc) \
39 mod_timer(&(__ioc)->sem_timer, jiffies + \
40 msecs_to_jiffies(BFA_IOC_HWSEM_TOV))
41 #define bfa_sem_timer_stop(__ioc) del_timer(&(__ioc)->sem_timer)
43 #define bfa_hb_timer_start(__ioc) \
44 mod_timer(&(__ioc)->hb_timer, jiffies + \
45 msecs_to_jiffies(BFA_IOC_HB_TOV))
46 #define bfa_hb_timer_stop(__ioc) del_timer(&(__ioc)->hb_timer)
48 /**
49 * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
52 #define bfa_ioc_firmware_lock(__ioc) \
53 ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
54 #define bfa_ioc_firmware_unlock(__ioc) \
55 ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
56 #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
57 #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
58 #define bfa_ioc_notify_hbfail(__ioc) \
59 ((__ioc)->ioc_hwif->ioc_notify_hbfail(__ioc))
61 #define bfa_ioc_is_optrom(__ioc) \
62 (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(__ioc)) < BFA_IOC_FWIMG_MINSZ)
64 #define bfa_ioc_mbox_cmd_pending(__ioc) \
65 (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
66 readl((__ioc)->ioc_regs.hfn_mbox_cmd))
68 static bool bfa_nw_auto_recover = true;
71 * forward declarations
73 static void bfa_ioc_hw_sem_get(struct bfa_ioc *ioc);
74 static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc);
75 static void bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force);
76 static void bfa_ioc_send_enable(struct bfa_ioc *ioc);
77 static void bfa_ioc_send_disable(struct bfa_ioc *ioc);
78 static void bfa_ioc_send_getattr(struct bfa_ioc *ioc);
79 static void bfa_ioc_hb_monitor(struct bfa_ioc *ioc);
80 static void bfa_ioc_hb_stop(struct bfa_ioc *ioc);
81 static void bfa_ioc_reset(struct bfa_ioc *ioc, bool force);
82 static void bfa_ioc_mbox_poll(struct bfa_ioc *ioc);
83 static void bfa_ioc_mbox_hbfail(struct bfa_ioc *ioc);
84 static void bfa_ioc_recover(struct bfa_ioc *ioc);
85 static void bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc);
86 static void bfa_ioc_disable_comp(struct bfa_ioc *ioc);
87 static void bfa_ioc_lpu_stop(struct bfa_ioc *ioc);
88 static void bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type,
89 u32 boot_param);
90 static u32 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr);
91 static u32 bfa_ioc_smem_pgoff(struct bfa_ioc *ioc, u32 fmaddr);
92 static void bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc,
93 char *serial_num);
94 static void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc,
95 char *fw_ver);
96 static void bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc,
97 char *chip_rev);
98 static void bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc,
99 char *optrom_ver);
100 static void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc,
101 char *manufacturer);
102 static void bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model);
103 static u64 bfa_ioc_get_pwwn(struct bfa_ioc *ioc);
104 static mac_t bfa_ioc_get_mfg_mac(struct bfa_ioc *ioc);
107 * IOC state machine events
109 enum ioc_event {
110 IOC_E_ENABLE = 1, /*!< IOC enable request */
111 IOC_E_DISABLE = 2, /*!< IOC disable request */
112 IOC_E_TIMEOUT = 3, /*!< f/w response timeout */
113 IOC_E_FWREADY = 4, /*!< f/w initialization done */
114 IOC_E_FWRSP_GETATTR = 5, /*!< IOC get attribute response */
115 IOC_E_FWRSP_ENABLE = 6, /*!< enable f/w response */
116 IOC_E_FWRSP_DISABLE = 7, /*!< disable f/w response */
117 IOC_E_HBFAIL = 8, /*!< heartbeat failure */
118 IOC_E_HWERROR = 9, /*!< hardware error interrupt */
119 IOC_E_SEMLOCKED = 10, /*!< h/w semaphore is locked */
120 IOC_E_DETACH = 11, /*!< driver detach cleanup */
123 bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc, enum ioc_event);
124 bfa_fsm_state_decl(bfa_ioc, fwcheck, struct bfa_ioc, enum ioc_event);
125 bfa_fsm_state_decl(bfa_ioc, mismatch, struct bfa_ioc, enum ioc_event);
126 bfa_fsm_state_decl(bfa_ioc, semwait, struct bfa_ioc, enum ioc_event);
127 bfa_fsm_state_decl(bfa_ioc, hwinit, struct bfa_ioc, enum ioc_event);
128 bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc, enum ioc_event);
129 bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc, enum ioc_event);
130 bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc, enum ioc_event);
131 bfa_fsm_state_decl(bfa_ioc, initfail, struct bfa_ioc, enum ioc_event);
132 bfa_fsm_state_decl(bfa_ioc, hbfail, struct bfa_ioc, enum ioc_event);
133 bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc, enum ioc_event);
134 bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc, enum ioc_event);
136 static struct bfa_sm_table ioc_sm_table[] = {
137 {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
138 {BFA_SM(bfa_ioc_sm_fwcheck), BFA_IOC_FWMISMATCH},
139 {BFA_SM(bfa_ioc_sm_mismatch), BFA_IOC_FWMISMATCH},
140 {BFA_SM(bfa_ioc_sm_semwait), BFA_IOC_SEMWAIT},
141 {BFA_SM(bfa_ioc_sm_hwinit), BFA_IOC_HWINIT},
142 {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_HWINIT},
143 {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
144 {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
145 {BFA_SM(bfa_ioc_sm_initfail), BFA_IOC_INITFAIL},
146 {BFA_SM(bfa_ioc_sm_hbfail), BFA_IOC_HBFAIL},
147 {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
148 {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
152 * Reset entry actions -- initialize state machine
154 static void
155 bfa_ioc_sm_reset_entry(struct bfa_ioc *ioc)
157 ioc->retry_count = 0;
158 ioc->auto_recover = bfa_nw_auto_recover;
162 * Beginning state. IOC is in reset state.
164 static void
165 bfa_ioc_sm_reset(struct bfa_ioc *ioc, enum ioc_event event)
167 switch (event) {
168 case IOC_E_ENABLE:
169 bfa_fsm_set_state(ioc, bfa_ioc_sm_fwcheck);
170 break;
172 case IOC_E_DISABLE:
173 bfa_ioc_disable_comp(ioc);
174 break;
176 case IOC_E_DETACH:
177 break;
179 default:
180 bfa_sm_fault(ioc, event);
185 * Semaphore should be acquired for version check.
187 static void
188 bfa_ioc_sm_fwcheck_entry(struct bfa_ioc *ioc)
190 bfa_ioc_hw_sem_get(ioc);
194 * Awaiting h/w semaphore to continue with version check.
196 static void
197 bfa_ioc_sm_fwcheck(struct bfa_ioc *ioc, enum ioc_event event)
199 switch (event) {
200 case IOC_E_SEMLOCKED:
201 if (bfa_ioc_firmware_lock(ioc)) {
202 ioc->retry_count = 0;
203 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit);
204 } else {
205 bfa_nw_ioc_hw_sem_release(ioc);
206 bfa_fsm_set_state(ioc, bfa_ioc_sm_mismatch);
208 break;
210 case IOC_E_DISABLE:
211 bfa_ioc_disable_comp(ioc);
212 /* fall through */
214 case IOC_E_DETACH:
215 bfa_ioc_hw_sem_get_cancel(ioc);
216 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
217 break;
219 case IOC_E_FWREADY:
220 break;
222 default:
223 bfa_sm_fault(ioc, event);
228 * Notify enable completion callback and generate mismatch AEN.
230 static void
231 bfa_ioc_sm_mismatch_entry(struct bfa_ioc *ioc)
234 * Provide enable completion callback and AEN notification only once.
236 if (ioc->retry_count == 0)
237 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
238 ioc->retry_count++;
239 bfa_ioc_timer_start(ioc);
243 * Awaiting firmware version match.
245 static void
246 bfa_ioc_sm_mismatch(struct bfa_ioc *ioc, enum ioc_event event)
248 switch (event) {
249 case IOC_E_TIMEOUT:
250 bfa_fsm_set_state(ioc, bfa_ioc_sm_fwcheck);
251 break;
253 case IOC_E_DISABLE:
254 bfa_ioc_disable_comp(ioc);
255 /* fall through */
257 case IOC_E_DETACH:
258 bfa_ioc_timer_stop(ioc);
259 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
260 break;
262 case IOC_E_FWREADY:
263 break;
265 default:
266 bfa_sm_fault(ioc, event);
271 * Request for semaphore.
273 static void
274 bfa_ioc_sm_semwait_entry(struct bfa_ioc *ioc)
276 bfa_ioc_hw_sem_get(ioc);
280 * Awaiting semaphore for h/w initialzation.
282 static void
283 bfa_ioc_sm_semwait(struct bfa_ioc *ioc, enum ioc_event event)
285 switch (event) {
286 case IOC_E_SEMLOCKED:
287 ioc->retry_count = 0;
288 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit);
289 break;
291 case IOC_E_DISABLE:
292 bfa_ioc_hw_sem_get_cancel(ioc);
293 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
294 break;
296 default:
297 bfa_sm_fault(ioc, event);
301 static void
302 bfa_ioc_sm_hwinit_entry(struct bfa_ioc *ioc)
304 bfa_ioc_timer_start(ioc);
305 bfa_ioc_reset(ioc, false);
309 * @brief
310 * Hardware is being initialized. Interrupts are enabled.
311 * Holding hardware semaphore lock.
313 static void
314 bfa_ioc_sm_hwinit(struct bfa_ioc *ioc, enum ioc_event event)
316 switch (event) {
317 case IOC_E_FWREADY:
318 bfa_ioc_timer_stop(ioc);
319 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
320 break;
322 case IOC_E_HWERROR:
323 bfa_ioc_timer_stop(ioc);
324 /* fall through */
326 case IOC_E_TIMEOUT:
327 ioc->retry_count++;
328 if (ioc->retry_count < BFA_IOC_HWINIT_MAX) {
329 bfa_ioc_timer_start(ioc);
330 bfa_ioc_reset(ioc, true);
331 break;
334 bfa_nw_ioc_hw_sem_release(ioc);
335 bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
336 break;
338 case IOC_E_DISABLE:
339 bfa_nw_ioc_hw_sem_release(ioc);
340 bfa_ioc_timer_stop(ioc);
341 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
342 break;
344 default:
345 bfa_sm_fault(ioc, event);
349 static void
350 bfa_ioc_sm_enabling_entry(struct bfa_ioc *ioc)
352 bfa_ioc_timer_start(ioc);
353 bfa_ioc_send_enable(ioc);
357 * Host IOC function is being enabled, awaiting response from firmware.
358 * Semaphore is acquired.
360 static void
361 bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event)
363 switch (event) {
364 case IOC_E_FWRSP_ENABLE:
365 bfa_ioc_timer_stop(ioc);
366 bfa_nw_ioc_hw_sem_release(ioc);
367 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
368 break;
370 case IOC_E_HWERROR:
371 bfa_ioc_timer_stop(ioc);
372 /* fall through */
374 case IOC_E_TIMEOUT:
375 ioc->retry_count++;
376 if (ioc->retry_count < BFA_IOC_HWINIT_MAX) {
377 writel(BFI_IOC_UNINIT,
378 ioc->ioc_regs.ioc_fwstate);
379 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit);
380 break;
383 bfa_nw_ioc_hw_sem_release(ioc);
384 bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
385 break;
387 case IOC_E_DISABLE:
388 bfa_ioc_timer_stop(ioc);
389 bfa_nw_ioc_hw_sem_release(ioc);
390 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
391 break;
393 case IOC_E_FWREADY:
394 bfa_ioc_send_enable(ioc);
395 break;
397 default:
398 bfa_sm_fault(ioc, event);
402 static void
403 bfa_ioc_sm_getattr_entry(struct bfa_ioc *ioc)
405 bfa_ioc_timer_start(ioc);
406 bfa_ioc_send_getattr(ioc);
410 * @brief
411 * IOC configuration in progress. Timer is active.
413 static void
414 bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event)
416 switch (event) {
417 case IOC_E_FWRSP_GETATTR:
418 bfa_ioc_timer_stop(ioc);
419 bfa_ioc_check_attr_wwns(ioc);
420 bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
421 break;
423 case IOC_E_HWERROR:
424 bfa_ioc_timer_stop(ioc);
425 /* fall through */
427 case IOC_E_TIMEOUT:
428 bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
429 break;
431 case IOC_E_DISABLE:
432 bfa_ioc_timer_stop(ioc);
433 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
434 break;
436 default:
437 bfa_sm_fault(ioc, event);
441 static void
442 bfa_ioc_sm_op_entry(struct bfa_ioc *ioc)
444 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
445 bfa_ioc_hb_monitor(ioc);
448 static void
449 bfa_ioc_sm_op(struct bfa_ioc *ioc, enum ioc_event event)
451 switch (event) {
452 case IOC_E_ENABLE:
453 break;
455 case IOC_E_DISABLE:
456 bfa_ioc_hb_stop(ioc);
457 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
458 break;
460 case IOC_E_HWERROR:
461 case IOC_E_FWREADY:
463 * Hard error or IOC recovery by other function.
464 * Treat it same as heartbeat failure.
466 bfa_ioc_hb_stop(ioc);
467 /* !!! fall through !!! */
469 case IOC_E_HBFAIL:
470 bfa_fsm_set_state(ioc, bfa_ioc_sm_hbfail);
471 break;
473 default:
474 bfa_sm_fault(ioc, event);
478 static void
479 bfa_ioc_sm_disabling_entry(struct bfa_ioc *ioc)
481 bfa_ioc_timer_start(ioc);
482 bfa_ioc_send_disable(ioc);
486 * IOC is being disabled
488 static void
489 bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event)
491 switch (event) {
492 case IOC_E_FWRSP_DISABLE:
493 bfa_ioc_timer_stop(ioc);
494 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
495 break;
497 case IOC_E_HWERROR:
498 bfa_ioc_timer_stop(ioc);
500 * !!! fall through !!!
503 case IOC_E_TIMEOUT:
504 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
505 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
506 break;
508 default:
509 bfa_sm_fault(ioc, event);
514 * IOC disable completion entry.
516 static void
517 bfa_ioc_sm_disabled_entry(struct bfa_ioc *ioc)
519 bfa_ioc_disable_comp(ioc);
522 static void
523 bfa_ioc_sm_disabled(struct bfa_ioc *ioc, enum ioc_event event)
525 switch (event) {
526 case IOC_E_ENABLE:
527 bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait);
528 break;
530 case IOC_E_DISABLE:
531 ioc->cbfn->disable_cbfn(ioc->bfa);
532 break;
534 case IOC_E_FWREADY:
535 break;
537 case IOC_E_DETACH:
538 bfa_ioc_firmware_unlock(ioc);
539 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
540 break;
542 default:
543 bfa_sm_fault(ioc, event);
547 static void
548 bfa_ioc_sm_initfail_entry(struct bfa_ioc *ioc)
550 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
551 bfa_ioc_timer_start(ioc);
555 * @brief
556 * Hardware initialization failed.
558 static void
559 bfa_ioc_sm_initfail(struct bfa_ioc *ioc, enum ioc_event event)
561 switch (event) {
562 case IOC_E_DISABLE:
563 bfa_ioc_timer_stop(ioc);
564 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
565 break;
567 case IOC_E_DETACH:
568 bfa_ioc_timer_stop(ioc);
569 bfa_ioc_firmware_unlock(ioc);
570 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
571 break;
573 case IOC_E_TIMEOUT:
574 bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait);
575 break;
577 default:
578 bfa_sm_fault(ioc, event);
582 static void
583 bfa_ioc_sm_hbfail_entry(struct bfa_ioc *ioc)
585 struct list_head *qe;
586 struct bfa_ioc_hbfail_notify *notify;
589 * Mark IOC as failed in hardware and stop firmware.
591 bfa_ioc_lpu_stop(ioc);
592 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
595 * Notify other functions on HB failure.
597 bfa_ioc_notify_hbfail(ioc);
600 * Notify driver and common modules registered for notification.
602 ioc->cbfn->hbfail_cbfn(ioc->bfa);
603 list_for_each(qe, &ioc->hb_notify_q) {
604 notify = (struct bfa_ioc_hbfail_notify *) qe;
605 notify->cbfn(notify->cbarg);
609 * Flush any queued up mailbox requests.
611 bfa_ioc_mbox_hbfail(ioc);
614 * Trigger auto-recovery after a delay.
616 if (ioc->auto_recover)
617 mod_timer(&ioc->ioc_timer, jiffies +
618 msecs_to_jiffies(BFA_IOC_TOV_RECOVER));
622 * @brief
623 * IOC heartbeat failure.
625 static void
626 bfa_ioc_sm_hbfail(struct bfa_ioc *ioc, enum ioc_event event)
628 switch (event) {
630 case IOC_E_ENABLE:
631 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
632 break;
634 case IOC_E_DISABLE:
635 if (ioc->auto_recover)
636 bfa_ioc_timer_stop(ioc);
637 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
638 break;
640 case IOC_E_TIMEOUT:
641 bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait);
642 break;
644 case IOC_E_FWREADY:
646 * Recovery is already initiated by other function.
648 break;
650 case IOC_E_HWERROR:
652 * HB failure notification, ignore.
654 break;
655 default:
656 bfa_sm_fault(ioc, event);
661 * BFA IOC private functions
664 static void
665 bfa_ioc_disable_comp(struct bfa_ioc *ioc)
667 struct list_head *qe;
668 struct bfa_ioc_hbfail_notify *notify;
670 ioc->cbfn->disable_cbfn(ioc->bfa);
673 * Notify common modules registered for notification.
675 list_for_each(qe, &ioc->hb_notify_q) {
676 notify = (struct bfa_ioc_hbfail_notify *) qe;
677 notify->cbfn(notify->cbarg);
681 void
682 bfa_nw_ioc_sem_timeout(void *ioc_arg)
684 struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
686 bfa_ioc_hw_sem_get(ioc);
689 bool
690 bfa_nw_ioc_sem_get(void __iomem *sem_reg)
692 u32 r32;
693 int cnt = 0;
694 #define BFA_SEM_SPINCNT 3000
696 r32 = readl(sem_reg);
698 while (r32 && (cnt < BFA_SEM_SPINCNT)) {
699 cnt++;
700 udelay(2);
701 r32 = readl(sem_reg);
704 if (r32 == 0)
705 return true;
707 BUG_ON(!(cnt < BFA_SEM_SPINCNT));
708 return false;
711 void
712 bfa_nw_ioc_sem_release(void __iomem *sem_reg)
714 writel(1, sem_reg);
717 static void
718 bfa_ioc_hw_sem_get(struct bfa_ioc *ioc)
720 u32 r32;
723 * First read to the semaphore register will return 0, subsequent reads
724 * will return 1. Semaphore is released by writing 1 to the register
726 r32 = readl(ioc->ioc_regs.ioc_sem_reg);
727 if (r32 == 0) {
728 bfa_fsm_send_event(ioc, IOC_E_SEMLOCKED);
729 return;
732 mod_timer(&ioc->sem_timer, jiffies +
733 msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
736 void
737 bfa_nw_ioc_hw_sem_release(struct bfa_ioc *ioc)
739 writel(1, ioc->ioc_regs.ioc_sem_reg);
742 static void
743 bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc)
745 del_timer(&ioc->sem_timer);
749 * @brief
750 * Initialize LPU local memory (aka secondary memory / SRAM)
752 static void
753 bfa_ioc_lmem_init(struct bfa_ioc *ioc)
755 u32 pss_ctl;
756 int i;
757 #define PSS_LMEM_INIT_TIME 10000
759 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
760 pss_ctl &= ~__PSS_LMEM_RESET;
761 pss_ctl |= __PSS_LMEM_INIT_EN;
764 * i2c workaround 12.5khz clock
766 pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
767 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
770 * wait for memory initialization to be complete
772 i = 0;
773 do {
774 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
775 i++;
776 } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
779 * If memory initialization is not successful, IOC timeout will catch
780 * such failures.
782 BUG_ON(!(pss_ctl & __PSS_LMEM_INIT_DONE));
784 pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
785 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
788 static void
789 bfa_ioc_lpu_start(struct bfa_ioc *ioc)
791 u32 pss_ctl;
794 * Take processor out of reset.
796 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
797 pss_ctl &= ~__PSS_LPU0_RESET;
799 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
802 static void
803 bfa_ioc_lpu_stop(struct bfa_ioc *ioc)
805 u32 pss_ctl;
808 * Put processors in reset.
810 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
811 pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
813 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
817 * Get driver and firmware versions.
819 void
820 bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
822 u32 pgnum, pgoff;
823 u32 loff = 0;
824 int i;
825 u32 *fwsig = (u32 *) fwhdr;
827 pgnum = bfa_ioc_smem_pgnum(ioc, loff);
828 pgoff = bfa_ioc_smem_pgoff(ioc, loff);
829 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
831 for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr) / sizeof(u32));
832 i++) {
833 fwsig[i] =
834 swab32(readl((loff) + (ioc->ioc_regs.smem_page_start)));
835 loff += sizeof(u32);
840 * Returns TRUE if same.
842 bool
843 bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
845 struct bfi_ioc_image_hdr *drv_fwhdr;
846 int i;
848 drv_fwhdr = (struct bfi_ioc_image_hdr *)
849 bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
851 for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
852 if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i])
853 return false;
856 return true;
860 * Return true if current running version is valid. Firmware signature and
861 * execution context (driver/bios) must match.
863 static bool
864 bfa_ioc_fwver_valid(struct bfa_ioc *ioc)
866 struct bfi_ioc_image_hdr fwhdr, *drv_fwhdr;
869 * If bios/efi boot (flash based) -- return true
871 if (bfa_ioc_is_optrom(ioc))
872 return true;
874 bfa_nw_ioc_fwver_get(ioc, &fwhdr);
875 drv_fwhdr = (struct bfi_ioc_image_hdr *)
876 bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
878 if (fwhdr.signature != drv_fwhdr->signature)
879 return false;
881 if (fwhdr.exec != drv_fwhdr->exec)
882 return false;
884 return bfa_nw_ioc_fwver_cmp(ioc, &fwhdr);
888 * Conditionally flush any pending message from firmware at start.
890 static void
891 bfa_ioc_msgflush(struct bfa_ioc *ioc)
893 u32 r32;
895 r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
896 if (r32)
897 writel(1, ioc->ioc_regs.lpu_mbox_cmd);
901 * @img ioc_init_logic.jpg
903 static void
904 bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
906 enum bfi_ioc_state ioc_fwstate;
907 bool fwvalid;
909 ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
911 if (force)
912 ioc_fwstate = BFI_IOC_UNINIT;
915 * check if firmware is valid
917 fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
918 false : bfa_ioc_fwver_valid(ioc);
920 if (!fwvalid) {
921 bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id);
922 return;
926 * If hardware initialization is in progress (initialized by other IOC),
927 * just wait for an initialization completion interrupt.
929 if (ioc_fwstate == BFI_IOC_INITING) {
930 ioc->cbfn->reset_cbfn(ioc->bfa);
931 return;
935 * If IOC function is disabled and firmware version is same,
936 * just re-enable IOC.
938 * If option rom, IOC must not be in operational state. With
939 * convergence, IOC will be in operational state when 2nd driver
940 * is loaded.
942 if (ioc_fwstate == BFI_IOC_DISABLED ||
943 (!bfa_ioc_is_optrom(ioc) && ioc_fwstate == BFI_IOC_OP)) {
945 * When using MSI-X any pending firmware ready event should
946 * be flushed. Otherwise MSI-X interrupts are not delivered.
948 bfa_ioc_msgflush(ioc);
949 ioc->cbfn->reset_cbfn(ioc->bfa);
950 bfa_fsm_send_event(ioc, IOC_E_FWREADY);
951 return;
955 * Initialize the h/w for any other states.
957 bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id);
960 void
961 bfa_nw_ioc_timeout(void *ioc_arg)
963 struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
965 bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
968 static void
969 bfa_ioc_mbox_send(struct bfa_ioc *ioc, void *ioc_msg, int len)
971 u32 *msgp = (u32 *) ioc_msg;
972 u32 i;
974 BUG_ON(!(len <= BFI_IOC_MSGLEN_MAX));
977 * first write msg to mailbox registers
979 for (i = 0; i < len / sizeof(u32); i++)
980 writel(cpu_to_le32(msgp[i]),
981 ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
983 for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
984 writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
987 * write 1 to mailbox CMD to trigger LPU event
989 writel(1, ioc->ioc_regs.hfn_mbox_cmd);
990 (void) readl(ioc->ioc_regs.hfn_mbox_cmd);
993 static void
994 bfa_ioc_send_enable(struct bfa_ioc *ioc)
996 struct bfi_ioc_ctrl_req enable_req;
997 struct timeval tv;
999 bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
1000 bfa_ioc_portid(ioc));
1001 enable_req.ioc_class = ioc->ioc_mc;
1002 do_gettimeofday(&tv);
1003 enable_req.tv_sec = ntohl(tv.tv_sec);
1004 bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req));
1007 static void
1008 bfa_ioc_send_disable(struct bfa_ioc *ioc)
1010 struct bfi_ioc_ctrl_req disable_req;
1012 bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
1013 bfa_ioc_portid(ioc));
1014 bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req));
1017 static void
1018 bfa_ioc_send_getattr(struct bfa_ioc *ioc)
1020 struct bfi_ioc_getattr_req attr_req;
1022 bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
1023 bfa_ioc_portid(ioc));
1024 bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
1025 bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
1028 void
1029 bfa_nw_ioc_hb_check(void *cbarg)
1031 struct bfa_ioc *ioc = cbarg;
1032 u32 hb_count;
1034 hb_count = readl(ioc->ioc_regs.heartbeat);
1035 if (ioc->hb_count == hb_count) {
1036 pr_crit("Firmware heartbeat failure at %d", hb_count);
1037 bfa_ioc_recover(ioc);
1038 return;
1039 } else {
1040 ioc->hb_count = hb_count;
1043 bfa_ioc_mbox_poll(ioc);
1044 mod_timer(&ioc->hb_timer, jiffies +
1045 msecs_to_jiffies(BFA_IOC_HB_TOV));
1048 static void
1049 bfa_ioc_hb_monitor(struct bfa_ioc *ioc)
1051 ioc->hb_count = readl(ioc->ioc_regs.heartbeat);
1052 mod_timer(&ioc->hb_timer, jiffies +
1053 msecs_to_jiffies(BFA_IOC_HB_TOV));
1056 static void
1057 bfa_ioc_hb_stop(struct bfa_ioc *ioc)
1059 del_timer(&ioc->hb_timer);
1063 * @brief
1064 * Initiate a full firmware download.
1066 static void
1067 bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
1068 u32 boot_param)
1070 u32 *fwimg;
1071 u32 pgnum, pgoff;
1072 u32 loff = 0;
1073 u32 chunkno = 0;
1074 u32 i;
1077 * Initialize LMEM first before code download
1079 bfa_ioc_lmem_init(ioc);
1082 * Flash based firmware boot
1084 if (bfa_ioc_is_optrom(ioc))
1085 boot_type = BFI_BOOT_TYPE_FLASH;
1086 fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), chunkno);
1088 pgnum = bfa_ioc_smem_pgnum(ioc, loff);
1089 pgoff = bfa_ioc_smem_pgoff(ioc, loff);
1091 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1093 for (i = 0; i < bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)); i++) {
1094 if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
1095 chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
1096 fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc),
1097 BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
1101 * write smem
1103 writel((swab32(fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)])),
1104 ((ioc->ioc_regs.smem_page_start) + (loff)));
1106 loff += sizeof(u32);
1109 * handle page offset wrap around
1111 loff = PSS_SMEM_PGOFF(loff);
1112 if (loff == 0) {
1113 pgnum++;
1114 writel(pgnum,
1115 ioc->ioc_regs.host_page_num_fn);
1119 writel(bfa_ioc_smem_pgnum(ioc, 0),
1120 ioc->ioc_regs.host_page_num_fn);
1123 * Set boot type and boot param at the end.
1125 writel((swab32(swab32(boot_type))), ((ioc->ioc_regs.smem_page_start)
1126 + (BFI_BOOT_TYPE_OFF)));
1127 writel((swab32(swab32(boot_param))), ((ioc->ioc_regs.smem_page_start)
1128 + (BFI_BOOT_PARAM_OFF)));
1131 static void
1132 bfa_ioc_reset(struct bfa_ioc *ioc, bool force)
1134 bfa_ioc_hwinit(ioc, force);
1138 * @brief
1139 * Update BFA configuration from firmware configuration.
1141 static void
1142 bfa_ioc_getattr_reply(struct bfa_ioc *ioc)
1144 struct bfi_ioc_attr *attr = ioc->attr;
1146 attr->adapter_prop = ntohl(attr->adapter_prop);
1147 attr->card_type = ntohl(attr->card_type);
1148 attr->maxfrsize = ntohs(attr->maxfrsize);
1150 bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
1154 * Attach time initialization of mbox logic.
1156 static void
1157 bfa_ioc_mbox_attach(struct bfa_ioc *ioc)
1159 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1160 int mc;
1162 INIT_LIST_HEAD(&mod->cmd_q);
1163 for (mc = 0; mc < BFI_MC_MAX; mc++) {
1164 mod->mbhdlr[mc].cbfn = NULL;
1165 mod->mbhdlr[mc].cbarg = ioc->bfa;
1170 * Mbox poll timer -- restarts any pending mailbox requests.
1172 static void
1173 bfa_ioc_mbox_poll(struct bfa_ioc *ioc)
1175 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1176 struct bfa_mbox_cmd *cmd;
1177 u32 stat;
1180 * If no command pending, do nothing
1182 if (list_empty(&mod->cmd_q))
1183 return;
1186 * If previous command is not yet fetched by firmware, do nothing
1188 stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
1189 if (stat)
1190 return;
1193 * Enqueue command to firmware.
1195 bfa_q_deq(&mod->cmd_q, &cmd);
1196 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
1200 * Cleanup any pending requests.
1202 static void
1203 bfa_ioc_mbox_hbfail(struct bfa_ioc *ioc)
1205 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1206 struct bfa_mbox_cmd *cmd;
1208 while (!list_empty(&mod->cmd_q))
1209 bfa_q_deq(&mod->cmd_q, &cmd);
1213 * IOC public
1215 static enum bfa_status
1216 bfa_ioc_pll_init(struct bfa_ioc *ioc)
1219 * Hold semaphore so that nobody can access the chip during init.
1221 bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
1223 bfa_ioc_pll_init_asic(ioc);
1225 ioc->pllinit = true;
1227 * release semaphore.
1229 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
1231 return BFA_STATUS_OK;
1235 * Interface used by diag module to do firmware boot with memory test
1236 * as the entry vector.
1238 static void
1239 bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, u32 boot_param)
1241 void __iomem *rb;
1243 bfa_ioc_stats(ioc, ioc_boots);
1245 if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
1246 return;
1249 * Initialize IOC state of all functions on a chip reset.
1251 rb = ioc->pcidev.pci_bar_kva;
1252 if (boot_param == BFI_BOOT_TYPE_MEMTEST) {
1253 writel(BFI_IOC_MEMTEST, (rb + BFA_IOC0_STATE_REG));
1254 writel(BFI_IOC_MEMTEST, (rb + BFA_IOC1_STATE_REG));
1255 } else {
1256 writel(BFI_IOC_INITING, (rb + BFA_IOC0_STATE_REG));
1257 writel(BFI_IOC_INITING, (rb + BFA_IOC1_STATE_REG));
1260 bfa_ioc_msgflush(ioc);
1261 bfa_ioc_download_fw(ioc, boot_type, boot_param);
1264 * Enable interrupts just before starting LPU
1266 ioc->cbfn->reset_cbfn(ioc->bfa);
1267 bfa_ioc_lpu_start(ioc);
1271 * Enable/disable IOC failure auto recovery.
1273 void
1274 bfa_nw_ioc_auto_recover(bool auto_recover)
1276 bfa_nw_auto_recover = auto_recover;
1279 static void
1280 bfa_ioc_msgget(struct bfa_ioc *ioc, void *mbmsg)
1282 u32 *msgp = mbmsg;
1283 u32 r32;
1284 int i;
1287 * read the MBOX msg
1289 for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
1290 i++) {
1291 r32 = readl(ioc->ioc_regs.lpu_mbox +
1292 i * sizeof(u32));
1293 msgp[i] = htonl(r32);
1297 * turn off mailbox interrupt by clearing mailbox status
1299 writel(1, ioc->ioc_regs.lpu_mbox_cmd);
1300 readl(ioc->ioc_regs.lpu_mbox_cmd);
1303 static void
1304 bfa_ioc_isr(struct bfa_ioc *ioc, struct bfi_mbmsg *m)
1306 union bfi_ioc_i2h_msg_u *msg;
1308 msg = (union bfi_ioc_i2h_msg_u *) m;
1310 bfa_ioc_stats(ioc, ioc_isrs);
1312 switch (msg->mh.msg_id) {
1313 case BFI_IOC_I2H_HBEAT:
1314 break;
1316 case BFI_IOC_I2H_READY_EVENT:
1317 bfa_fsm_send_event(ioc, IOC_E_FWREADY);
1318 break;
1320 case BFI_IOC_I2H_ENABLE_REPLY:
1321 bfa_fsm_send_event(ioc, IOC_E_FWRSP_ENABLE);
1322 break;
1324 case BFI_IOC_I2H_DISABLE_REPLY:
1325 bfa_fsm_send_event(ioc, IOC_E_FWRSP_DISABLE);
1326 break;
1328 case BFI_IOC_I2H_GETATTR_REPLY:
1329 bfa_ioc_getattr_reply(ioc);
1330 break;
1332 default:
1333 BUG_ON(1);
1338 * IOC attach time initialization and setup.
1340 * @param[in] ioc memory for IOC
1341 * @param[in] bfa driver instance structure
1343 void
1344 bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa, struct bfa_ioc_cbfn *cbfn)
1346 ioc->bfa = bfa;
1347 ioc->cbfn = cbfn;
1348 ioc->fcmode = false;
1349 ioc->pllinit = false;
1350 ioc->dbg_fwsave_once = true;
1352 bfa_ioc_mbox_attach(ioc);
1353 INIT_LIST_HEAD(&ioc->hb_notify_q);
1355 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
1359 * Driver detach time IOC cleanup.
1361 void
1362 bfa_nw_ioc_detach(struct bfa_ioc *ioc)
1364 bfa_fsm_send_event(ioc, IOC_E_DETACH);
1368 * Setup IOC PCI properties.
1370 * @param[in] pcidev PCI device information for this IOC
1372 void
1373 bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev,
1374 enum bfi_mclass mc)
1376 ioc->ioc_mc = mc;
1377 ioc->pcidev = *pcidev;
1378 ioc->ctdev = bfa_asic_id_ct(ioc->pcidev.device_id);
1379 ioc->cna = ioc->ctdev && !ioc->fcmode;
1381 bfa_nw_ioc_set_ct_hwif(ioc);
1383 bfa_ioc_map_port(ioc);
1384 bfa_ioc_reg_init(ioc);
1388 * Initialize IOC dma memory
1390 * @param[in] dm_kva kernel virtual address of IOC dma memory
1391 * @param[in] dm_pa physical address of IOC dma memory
1393 void
1394 bfa_nw_ioc_mem_claim(struct bfa_ioc *ioc, u8 *dm_kva, u64 dm_pa)
1397 * dma memory for firmware attribute
1399 ioc->attr_dma.kva = dm_kva;
1400 ioc->attr_dma.pa = dm_pa;
1401 ioc->attr = (struct bfi_ioc_attr *) dm_kva;
1405 * Return size of dma memory required.
1408 bfa_nw_ioc_meminfo(void)
1410 return roundup(sizeof(struct bfi_ioc_attr), BFA_DMA_ALIGN_SZ);
1413 void
1414 bfa_nw_ioc_enable(struct bfa_ioc *ioc)
1416 bfa_ioc_stats(ioc, ioc_enables);
1417 ioc->dbg_fwsave_once = true;
1419 bfa_fsm_send_event(ioc, IOC_E_ENABLE);
1422 void
1423 bfa_nw_ioc_disable(struct bfa_ioc *ioc)
1425 bfa_ioc_stats(ioc, ioc_disables);
1426 bfa_fsm_send_event(ioc, IOC_E_DISABLE);
1429 static u32
1430 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr)
1432 return PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, fmaddr);
1435 static u32
1436 bfa_ioc_smem_pgoff(struct bfa_ioc *ioc, u32 fmaddr)
1438 return PSS_SMEM_PGOFF(fmaddr);
1442 * Register mailbox message handler function, to be called by common modules
1444 void
1445 bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
1446 bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
1448 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1450 mod->mbhdlr[mc].cbfn = cbfn;
1451 mod->mbhdlr[mc].cbarg = cbarg;
1455 * Queue a mailbox command request to firmware. Waits if mailbox is busy.
1456 * Responsibility of caller to serialize
1458 * @param[in] ioc IOC instance
1459 * @param[i] cmd Mailbox command
1461 void
1462 bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd)
1464 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1465 u32 stat;
1468 * If a previous command is pending, queue new command
1470 if (!list_empty(&mod->cmd_q)) {
1471 list_add_tail(&cmd->qe, &mod->cmd_q);
1472 return;
1476 * If mailbox is busy, queue command for poll timer
1478 stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
1479 if (stat) {
1480 list_add_tail(&cmd->qe, &mod->cmd_q);
1481 return;
1485 * mailbox is free -- queue command to firmware
1487 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
1491 * Handle mailbox interrupts
1493 void
1494 bfa_nw_ioc_mbox_isr(struct bfa_ioc *ioc)
1496 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1497 struct bfi_mbmsg m;
1498 int mc;
1500 bfa_ioc_msgget(ioc, &m);
1503 * Treat IOC message class as special.
1505 mc = m.mh.msg_class;
1506 if (mc == BFI_MC_IOC) {
1507 bfa_ioc_isr(ioc, &m);
1508 return;
1511 if ((mc >= BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
1512 return;
1514 mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
1517 void
1518 bfa_nw_ioc_error_isr(struct bfa_ioc *ioc)
1520 bfa_fsm_send_event(ioc, IOC_E_HWERROR);
1524 * Add to IOC heartbeat failure notification queue. To be used by common
1525 * modules such as cee, port, diag.
1527 void
1528 bfa_nw_ioc_hbfail_register(struct bfa_ioc *ioc,
1529 struct bfa_ioc_hbfail_notify *notify)
1531 list_add_tail(&notify->qe, &ioc->hb_notify_q);
1534 #define BFA_MFG_NAME "Brocade"
1535 static void
1536 bfa_ioc_get_adapter_attr(struct bfa_ioc *ioc,
1537 struct bfa_adapter_attr *ad_attr)
1539 struct bfi_ioc_attr *ioc_attr;
1541 ioc_attr = ioc->attr;
1543 bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
1544 bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
1545 bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
1546 bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
1547 memcpy(&ad_attr->vpd, &ioc_attr->vpd,
1548 sizeof(struct bfa_mfg_vpd));
1550 ad_attr->nports = bfa_ioc_get_nports(ioc);
1551 ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
1553 bfa_ioc_get_adapter_model(ioc, ad_attr->model);
1554 /* For now, model descr uses same model string */
1555 bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
1557 ad_attr->card_type = ioc_attr->card_type;
1558 ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type);
1560 if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
1561 ad_attr->prototype = 1;
1562 else
1563 ad_attr->prototype = 0;
1565 ad_attr->pwwn = bfa_ioc_get_pwwn(ioc);
1566 ad_attr->mac = bfa_nw_ioc_get_mac(ioc);
1568 ad_attr->pcie_gen = ioc_attr->pcie_gen;
1569 ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
1570 ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
1571 ad_attr->asic_rev = ioc_attr->asic_rev;
1573 bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
1575 ad_attr->cna_capable = ioc->cna;
1576 ad_attr->trunk_capable = (ad_attr->nports > 1) && !ioc->cna;
1579 static enum bfa_ioc_type
1580 bfa_ioc_get_type(struct bfa_ioc *ioc)
1582 if (!ioc->ctdev || ioc->fcmode)
1583 return BFA_IOC_TYPE_FC;
1584 else if (ioc->ioc_mc == BFI_MC_IOCFC)
1585 return BFA_IOC_TYPE_FCoE;
1586 else if (ioc->ioc_mc == BFI_MC_LL)
1587 return BFA_IOC_TYPE_LL;
1588 else {
1589 BUG_ON(!(ioc->ioc_mc == BFI_MC_LL));
1590 return BFA_IOC_TYPE_LL;
1594 static void
1595 bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc, char *serial_num)
1597 memset(serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
1598 memcpy(serial_num,
1599 (void *)ioc->attr->brcd_serialnum,
1600 BFA_ADAPTER_SERIAL_NUM_LEN);
1603 static void
1604 bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc, char *fw_ver)
1606 memset(fw_ver, 0, BFA_VERSION_LEN);
1607 memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
1610 static void
1611 bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc, char *chip_rev)
1613 BUG_ON(!(chip_rev));
1615 memset(chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
1617 chip_rev[0] = 'R';
1618 chip_rev[1] = 'e';
1619 chip_rev[2] = 'v';
1620 chip_rev[3] = '-';
1621 chip_rev[4] = ioc->attr->asic_rev;
1622 chip_rev[5] = '\0';
1625 static void
1626 bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, char *optrom_ver)
1628 memset(optrom_ver, 0, BFA_VERSION_LEN);
1629 memcpy(optrom_ver, ioc->attr->optrom_version,
1630 BFA_VERSION_LEN);
1633 static void
1634 bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc, char *manufacturer)
1636 memset(manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
1637 memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
1640 static void
1641 bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model)
1643 struct bfi_ioc_attr *ioc_attr;
1645 BUG_ON(!(model));
1646 memset(model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
1648 ioc_attr = ioc->attr;
1651 * model name
1653 snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
1654 BFA_MFG_NAME, ioc_attr->card_type);
1657 static enum bfa_ioc_state
1658 bfa_ioc_get_state(struct bfa_ioc *ioc)
1660 return bfa_sm_to_state(ioc_sm_table, ioc->fsm);
1663 void
1664 bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr)
1666 memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr));
1668 ioc_attr->state = bfa_ioc_get_state(ioc);
1669 ioc_attr->port_id = ioc->port_id;
1671 ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
1673 bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
1675 ioc_attr->pci_attr.device_id = ioc->pcidev.device_id;
1676 ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func;
1677 bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
1681 * WWN public
1683 static u64
1684 bfa_ioc_get_pwwn(struct bfa_ioc *ioc)
1686 return ioc->attr->pwwn;
1689 mac_t
1690 bfa_nw_ioc_get_mac(struct bfa_ioc *ioc)
1693 * Currently mfg mac is used as FCoE enode mac (not configured by PBC)
1695 if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_FCoE)
1696 return bfa_ioc_get_mfg_mac(ioc);
1697 else
1698 return ioc->attr->mac;
1701 static mac_t
1702 bfa_ioc_get_mfg_mac(struct bfa_ioc *ioc)
1704 mac_t m;
1706 m = ioc->attr->mfg_mac;
1707 if (bfa_mfg_is_old_wwn_mac_model(ioc->attr->card_type))
1708 m.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc);
1709 else
1710 bfa_mfg_increment_wwn_mac(&(m.mac[MAC_ADDRLEN-3]),
1711 bfa_ioc_pcifn(ioc));
1713 return m;
1717 * Firmware failure detected. Start recovery actions.
1719 static void
1720 bfa_ioc_recover(struct bfa_ioc *ioc)
1722 bfa_ioc_stats(ioc, ioc_hbfails);
1723 bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
1726 static void
1727 bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc)
1729 if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL)
1730 return;