2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
23 BFA_TRC_FILE(CNA
, IOC_CT
);
25 #define bfa_ioc_ct_sync_pos(__ioc) \
26 ((uint32_t) (1 << bfa_ioc_pcifn(__ioc)))
27 #define BFA_IOC_SYNC_REQD_SH 16
28 #define bfa_ioc_ct_get_sync_ackd(__val) (__val & 0x0000ffff)
29 #define bfa_ioc_ct_clear_sync_ackd(__val) (__val & 0xffff0000)
30 #define bfa_ioc_ct_get_sync_reqd(__val) (__val >> BFA_IOC_SYNC_REQD_SH)
31 #define bfa_ioc_ct_sync_reqd_pos(__ioc) \
32 (bfa_ioc_ct_sync_pos(__ioc) << BFA_IOC_SYNC_REQD_SH)
35 * forward declarations
37 static bfa_boolean_t
bfa_ioc_ct_firmware_lock(struct bfa_ioc_s
*ioc
);
38 static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s
*ioc
);
39 static void bfa_ioc_ct_notify_fail(struct bfa_ioc_s
*ioc
);
40 static void bfa_ioc_ct_ownership_reset(struct bfa_ioc_s
*ioc
);
41 static bfa_boolean_t
bfa_ioc_ct_sync_start(struct bfa_ioc_s
*ioc
);
42 static void bfa_ioc_ct_sync_join(struct bfa_ioc_s
*ioc
);
43 static void bfa_ioc_ct_sync_leave(struct bfa_ioc_s
*ioc
);
44 static void bfa_ioc_ct_sync_ack(struct bfa_ioc_s
*ioc
);
45 static bfa_boolean_t
bfa_ioc_ct_sync_complete(struct bfa_ioc_s
*ioc
);
47 static struct bfa_ioc_hwif_s hwif_ct
;
48 static struct bfa_ioc_hwif_s hwif_ct2
;
51 * Return true if firmware of current driver matches the running firmware.
54 bfa_ioc_ct_firmware_lock(struct bfa_ioc_s
*ioc
)
56 enum bfi_ioc_state ioc_fwstate
;
58 struct bfi_ioc_image_hdr_s fwhdr
;
61 * If bios boot (flash based) -- do not increment usage count
63 if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc
)) <
67 bfa_ioc_sem_get(ioc
->ioc_regs
.ioc_usage_sem_reg
);
68 usecnt
= readl(ioc
->ioc_regs
.ioc_usage_reg
);
71 * If usage count is 0, always return TRUE.
74 writel(1, ioc
->ioc_regs
.ioc_usage_reg
);
75 readl(ioc
->ioc_regs
.ioc_usage_sem_reg
);
76 writel(1, ioc
->ioc_regs
.ioc_usage_sem_reg
);
77 writel(0, ioc
->ioc_regs
.ioc_fail_sync
);
82 ioc_fwstate
= readl(ioc
->ioc_regs
.ioc_fwstate
);
83 bfa_trc(ioc
, ioc_fwstate
);
86 * Use count cannot be non-zero and chip in uninitialized state.
88 WARN_ON(ioc_fwstate
== BFI_IOC_UNINIT
);
91 * Check if another driver with a different firmware is active
93 bfa_ioc_fwver_get(ioc
, &fwhdr
);
94 if (!bfa_ioc_fwver_cmp(ioc
, &fwhdr
)) {
95 readl(ioc
->ioc_regs
.ioc_usage_sem_reg
);
96 writel(1, ioc
->ioc_regs
.ioc_usage_sem_reg
);
102 * Same firmware version. Increment the reference count.
105 writel(usecnt
, ioc
->ioc_regs
.ioc_usage_reg
);
106 readl(ioc
->ioc_regs
.ioc_usage_sem_reg
);
107 writel(1, ioc
->ioc_regs
.ioc_usage_sem_reg
);
108 bfa_trc(ioc
, usecnt
);
113 bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s
*ioc
)
118 * If bios boot (flash based) -- do not decrement usage count
120 if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc
)) <
125 * decrement usage count
127 bfa_ioc_sem_get(ioc
->ioc_regs
.ioc_usage_sem_reg
);
128 usecnt
= readl(ioc
->ioc_regs
.ioc_usage_reg
);
129 WARN_ON(usecnt
<= 0);
132 writel(usecnt
, ioc
->ioc_regs
.ioc_usage_reg
);
133 bfa_trc(ioc
, usecnt
);
135 readl(ioc
->ioc_regs
.ioc_usage_sem_reg
);
136 writel(1, ioc
->ioc_regs
.ioc_usage_sem_reg
);
140 * Notify other functions on HB failure.
143 bfa_ioc_ct_notify_fail(struct bfa_ioc_s
*ioc
)
145 if (bfa_ioc_is_cna(ioc
)) {
146 writel(__FW_INIT_HALT_P
, ioc
->ioc_regs
.ll_halt
);
147 writel(__FW_INIT_HALT_P
, ioc
->ioc_regs
.alt_ll_halt
);
148 /* Wait for halt to take effect */
149 readl(ioc
->ioc_regs
.ll_halt
);
150 readl(ioc
->ioc_regs
.alt_ll_halt
);
152 writel(~0U, ioc
->ioc_regs
.err_set
);
153 readl(ioc
->ioc_regs
.err_set
);
158 * Host to LPU mailbox message addresses
160 static struct { u32 hfn_mbox
, lpu_mbox
, hfn_pgn
; } ct_fnreg
[] = {
161 { HOSTFN0_LPU_MBOX0_0
, LPU_HOSTFN0_MBOX0_0
, HOST_PAGE_NUM_FN0
},
162 { HOSTFN1_LPU_MBOX0_8
, LPU_HOSTFN1_MBOX0_8
, HOST_PAGE_NUM_FN1
},
163 { HOSTFN2_LPU_MBOX0_0
, LPU_HOSTFN2_MBOX0_0
, HOST_PAGE_NUM_FN2
},
164 { HOSTFN3_LPU_MBOX0_8
, LPU_HOSTFN3_MBOX0_8
, HOST_PAGE_NUM_FN3
}
168 * Host <-> LPU mailbox command/status registers - port 0
170 static struct { u32 hfn
, lpu
; } ct_p0reg
[] = {
171 { HOSTFN0_LPU0_CMD_STAT
, LPU0_HOSTFN0_CMD_STAT
},
172 { HOSTFN1_LPU0_CMD_STAT
, LPU0_HOSTFN1_CMD_STAT
},
173 { HOSTFN2_LPU0_CMD_STAT
, LPU0_HOSTFN2_CMD_STAT
},
174 { HOSTFN3_LPU0_CMD_STAT
, LPU0_HOSTFN3_CMD_STAT
}
178 * Host <-> LPU mailbox command/status registers - port 1
180 static struct { u32 hfn
, lpu
; } ct_p1reg
[] = {
181 { HOSTFN0_LPU1_CMD_STAT
, LPU1_HOSTFN0_CMD_STAT
},
182 { HOSTFN1_LPU1_CMD_STAT
, LPU1_HOSTFN1_CMD_STAT
},
183 { HOSTFN2_LPU1_CMD_STAT
, LPU1_HOSTFN2_CMD_STAT
},
184 { HOSTFN3_LPU1_CMD_STAT
, LPU1_HOSTFN3_CMD_STAT
}
187 static struct { uint32_t hfn_mbox
, lpu_mbox
, hfn_pgn
, hfn
, lpu
, lpu_read
; }
189 { CT2_HOSTFN_LPU0_MBOX0
, CT2_LPU0_HOSTFN_MBOX0
, CT2_HOSTFN_PAGE_NUM
,
190 CT2_HOSTFN_LPU0_CMD_STAT
, CT2_LPU0_HOSTFN_CMD_STAT
,
191 CT2_HOSTFN_LPU0_READ_STAT
},
192 { CT2_HOSTFN_LPU1_MBOX0
, CT2_LPU1_HOSTFN_MBOX0
, CT2_HOSTFN_PAGE_NUM
,
193 CT2_HOSTFN_LPU1_CMD_STAT
, CT2_LPU1_HOSTFN_CMD_STAT
,
194 CT2_HOSTFN_LPU1_READ_STAT
},
198 bfa_ioc_ct_reg_init(struct bfa_ioc_s
*ioc
)
201 int pcifn
= bfa_ioc_pcifn(ioc
);
203 rb
= bfa_ioc_bar0(ioc
);
205 ioc
->ioc_regs
.hfn_mbox
= rb
+ ct_fnreg
[pcifn
].hfn_mbox
;
206 ioc
->ioc_regs
.lpu_mbox
= rb
+ ct_fnreg
[pcifn
].lpu_mbox
;
207 ioc
->ioc_regs
.host_page_num_fn
= rb
+ ct_fnreg
[pcifn
].hfn_pgn
;
209 if (ioc
->port_id
== 0) {
210 ioc
->ioc_regs
.heartbeat
= rb
+ BFA_IOC0_HBEAT_REG
;
211 ioc
->ioc_regs
.ioc_fwstate
= rb
+ BFA_IOC0_STATE_REG
;
212 ioc
->ioc_regs
.alt_ioc_fwstate
= rb
+ BFA_IOC1_STATE_REG
;
213 ioc
->ioc_regs
.hfn_mbox_cmd
= rb
+ ct_p0reg
[pcifn
].hfn
;
214 ioc
->ioc_regs
.lpu_mbox_cmd
= rb
+ ct_p0reg
[pcifn
].lpu
;
215 ioc
->ioc_regs
.ll_halt
= rb
+ FW_INIT_HALT_P0
;
216 ioc
->ioc_regs
.alt_ll_halt
= rb
+ FW_INIT_HALT_P1
;
218 ioc
->ioc_regs
.heartbeat
= (rb
+ BFA_IOC1_HBEAT_REG
);
219 ioc
->ioc_regs
.ioc_fwstate
= (rb
+ BFA_IOC1_STATE_REG
);
220 ioc
->ioc_regs
.alt_ioc_fwstate
= rb
+ BFA_IOC0_STATE_REG
;
221 ioc
->ioc_regs
.hfn_mbox_cmd
= rb
+ ct_p1reg
[pcifn
].hfn
;
222 ioc
->ioc_regs
.lpu_mbox_cmd
= rb
+ ct_p1reg
[pcifn
].lpu
;
223 ioc
->ioc_regs
.ll_halt
= rb
+ FW_INIT_HALT_P1
;
224 ioc
->ioc_regs
.alt_ll_halt
= rb
+ FW_INIT_HALT_P0
;
228 * PSS control registers
230 ioc
->ioc_regs
.pss_ctl_reg
= (rb
+ PSS_CTL_REG
);
231 ioc
->ioc_regs
.pss_err_status_reg
= (rb
+ PSS_ERR_STATUS_REG
);
232 ioc
->ioc_regs
.app_pll_fast_ctl_reg
= (rb
+ APP_PLL_LCLK_CTL_REG
);
233 ioc
->ioc_regs
.app_pll_slow_ctl_reg
= (rb
+ APP_PLL_SCLK_CTL_REG
);
236 * IOC semaphore registers and serialization
238 ioc
->ioc_regs
.ioc_sem_reg
= (rb
+ HOST_SEM0_REG
);
239 ioc
->ioc_regs
.ioc_usage_sem_reg
= (rb
+ HOST_SEM1_REG
);
240 ioc
->ioc_regs
.ioc_init_sem_reg
= (rb
+ HOST_SEM2_REG
);
241 ioc
->ioc_regs
.ioc_usage_reg
= (rb
+ BFA_FW_USE_COUNT
);
242 ioc
->ioc_regs
.ioc_fail_sync
= (rb
+ BFA_IOC_FAIL_SYNC
);
247 ioc
->ioc_regs
.smem_page_start
= (rb
+ PSS_SMEM_PAGE_START
);
248 ioc
->ioc_regs
.smem_pg0
= BFI_IOC_SMEM_PG0_CT
;
251 * err set reg : for notification of hb failure in fcmode
253 ioc
->ioc_regs
.err_set
= (rb
+ ERR_SET_REG
);
257 bfa_ioc_ct2_reg_init(struct bfa_ioc_s
*ioc
)
260 int port
= bfa_ioc_portid(ioc
);
262 rb
= bfa_ioc_bar0(ioc
);
264 ioc
->ioc_regs
.hfn_mbox
= rb
+ ct2_reg
[port
].hfn_mbox
;
265 ioc
->ioc_regs
.lpu_mbox
= rb
+ ct2_reg
[port
].lpu_mbox
;
266 ioc
->ioc_regs
.host_page_num_fn
= rb
+ ct2_reg
[port
].hfn_pgn
;
267 ioc
->ioc_regs
.hfn_mbox_cmd
= rb
+ ct2_reg
[port
].hfn
;
268 ioc
->ioc_regs
.lpu_mbox_cmd
= rb
+ ct2_reg
[port
].lpu
;
269 ioc
->ioc_regs
.lpu_read_stat
= rb
+ ct2_reg
[port
].lpu_read
;
272 ioc
->ioc_regs
.heartbeat
= rb
+ CT2_BFA_IOC0_HBEAT_REG
;
273 ioc
->ioc_regs
.ioc_fwstate
= rb
+ CT2_BFA_IOC0_STATE_REG
;
274 ioc
->ioc_regs
.alt_ioc_fwstate
= rb
+ CT2_BFA_IOC1_STATE_REG
;
275 ioc
->ioc_regs
.ll_halt
= rb
+ FW_INIT_HALT_P0
;
276 ioc
->ioc_regs
.alt_ll_halt
= rb
+ FW_INIT_HALT_P1
;
278 ioc
->ioc_regs
.heartbeat
= (rb
+ CT2_BFA_IOC1_HBEAT_REG
);
279 ioc
->ioc_regs
.ioc_fwstate
= (rb
+ CT2_BFA_IOC1_STATE_REG
);
280 ioc
->ioc_regs
.alt_ioc_fwstate
= rb
+ CT2_BFA_IOC0_STATE_REG
;
281 ioc
->ioc_regs
.ll_halt
= rb
+ FW_INIT_HALT_P1
;
282 ioc
->ioc_regs
.alt_ll_halt
= rb
+ FW_INIT_HALT_P0
;
286 * PSS control registers
288 ioc
->ioc_regs
.pss_ctl_reg
= (rb
+ PSS_CTL_REG
);
289 ioc
->ioc_regs
.pss_err_status_reg
= (rb
+ PSS_ERR_STATUS_REG
);
290 ioc
->ioc_regs
.app_pll_fast_ctl_reg
= (rb
+ CT2_APP_PLL_LCLK_CTL_REG
);
291 ioc
->ioc_regs
.app_pll_slow_ctl_reg
= (rb
+ CT2_APP_PLL_SCLK_CTL_REG
);
294 * IOC semaphore registers and serialization
296 ioc
->ioc_regs
.ioc_sem_reg
= (rb
+ CT2_HOST_SEM0_REG
);
297 ioc
->ioc_regs
.ioc_usage_sem_reg
= (rb
+ CT2_HOST_SEM1_REG
);
298 ioc
->ioc_regs
.ioc_init_sem_reg
= (rb
+ CT2_HOST_SEM2_REG
);
299 ioc
->ioc_regs
.ioc_usage_reg
= (rb
+ CT2_BFA_FW_USE_COUNT
);
300 ioc
->ioc_regs
.ioc_fail_sync
= (rb
+ CT2_BFA_IOC_FAIL_SYNC
);
305 ioc
->ioc_regs
.smem_page_start
= (rb
+ PSS_SMEM_PAGE_START
);
306 ioc
->ioc_regs
.smem_pg0
= BFI_IOC_SMEM_PG0_CT
;
309 * err set reg : for notification of hb failure in fcmode
311 ioc
->ioc_regs
.err_set
= (rb
+ ERR_SET_REG
);
315 * Initialize IOC to port mapping.
318 #define FNC_PERS_FN_SHIFT(__fn) ((__fn) * 8)
320 bfa_ioc_ct_map_port(struct bfa_ioc_s
*ioc
)
322 void __iomem
*rb
= ioc
->pcidev
.pci_bar_kva
;
326 * For catapult, base port id on personality register and IOC type
328 r32
= readl(rb
+ FNC_PERS_REG
);
329 r32
>>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc
));
330 ioc
->port_id
= (r32
& __F0_PORT_MAP_MK
) >> __F0_PORT_MAP_SH
;
332 bfa_trc(ioc
, bfa_ioc_pcifn(ioc
));
333 bfa_trc(ioc
, ioc
->port_id
);
337 bfa_ioc_ct2_map_port(struct bfa_ioc_s
*ioc
)
339 void __iomem
*rb
= ioc
->pcidev
.pci_bar_kva
;
342 r32
= readl(rb
+ CT2_HOSTFN_PERSONALITY0
);
343 ioc
->port_id
= ((r32
& __FC_LL_PORT_MAP__MK
) >> __FC_LL_PORT_MAP__SH
);
345 bfa_trc(ioc
, bfa_ioc_pcifn(ioc
));
346 bfa_trc(ioc
, ioc
->port_id
);
350 * Set interrupt mode for a function: INTX or MSIX
353 bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s
*ioc
, bfa_boolean_t msix
)
355 void __iomem
*rb
= ioc
->pcidev
.pci_bar_kva
;
358 r32
= readl(rb
+ FNC_PERS_REG
);
361 mode
= (r32
>> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc
))) &
365 * If already in desired mode, do not change anything
367 if ((!msix
&& mode
) || (msix
&& !mode
))
371 mode
= __F0_INTX_STATUS_MSIX
;
373 mode
= __F0_INTX_STATUS_INTA
;
375 r32
&= ~(__F0_INTX_STATUS
<< FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc
)));
376 r32
|= (mode
<< FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc
)));
379 writel(r32
, rb
+ FNC_PERS_REG
);
383 bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc_s
*ioc
)
387 r32
= readl(ioc
->ioc_regs
.lpu_read_stat
);
389 writel(1, ioc
->ioc_regs
.lpu_read_stat
);
397 * Cleanup hw semaphore and usecnt registers
400 bfa_ioc_ct_ownership_reset(struct bfa_ioc_s
*ioc
)
403 if (bfa_ioc_is_cna(ioc
)) {
404 bfa_ioc_sem_get(ioc
->ioc_regs
.ioc_usage_sem_reg
);
405 writel(0, ioc
->ioc_regs
.ioc_usage_reg
);
406 readl(ioc
->ioc_regs
.ioc_usage_sem_reg
);
407 writel(1, ioc
->ioc_regs
.ioc_usage_sem_reg
);
411 * Read the hw sem reg to make sure that it is locked
412 * before we clear it. If it is not locked, writing 1
413 * will lock it instead of clearing it.
415 readl(ioc
->ioc_regs
.ioc_sem_reg
);
416 writel(1, ioc
->ioc_regs
.ioc_sem_reg
);
420 bfa_ioc_ct_sync_start(struct bfa_ioc_s
*ioc
)
422 uint32_t r32
= readl(ioc
->ioc_regs
.ioc_fail_sync
);
423 uint32_t sync_reqd
= bfa_ioc_ct_get_sync_reqd(r32
);
426 * Driver load time. If the sync required bit for this PCI fn
427 * is set, it is due to an unclean exit by the driver for this
428 * PCI fn in the previous incarnation. Whoever comes here first
429 * should clean it up, no matter which PCI fn.
432 if (sync_reqd
& bfa_ioc_ct_sync_pos(ioc
)) {
433 writel(0, ioc
->ioc_regs
.ioc_fail_sync
);
434 writel(1, ioc
->ioc_regs
.ioc_usage_reg
);
435 writel(BFI_IOC_UNINIT
, ioc
->ioc_regs
.ioc_fwstate
);
436 writel(BFI_IOC_UNINIT
, ioc
->ioc_regs
.alt_ioc_fwstate
);
440 return bfa_ioc_ct_sync_complete(ioc
);
444 * Synchronized IOC failure processing routines
447 bfa_ioc_ct_sync_join(struct bfa_ioc_s
*ioc
)
449 uint32_t r32
= readl(ioc
->ioc_regs
.ioc_fail_sync
);
450 uint32_t sync_pos
= bfa_ioc_ct_sync_reqd_pos(ioc
);
452 writel((r32
| sync_pos
), ioc
->ioc_regs
.ioc_fail_sync
);
456 bfa_ioc_ct_sync_leave(struct bfa_ioc_s
*ioc
)
458 uint32_t r32
= readl(ioc
->ioc_regs
.ioc_fail_sync
);
459 uint32_t sync_msk
= bfa_ioc_ct_sync_reqd_pos(ioc
) |
460 bfa_ioc_ct_sync_pos(ioc
);
462 writel((r32
& ~sync_msk
), ioc
->ioc_regs
.ioc_fail_sync
);
466 bfa_ioc_ct_sync_ack(struct bfa_ioc_s
*ioc
)
468 uint32_t r32
= readl(ioc
->ioc_regs
.ioc_fail_sync
);
470 writel((r32
| bfa_ioc_ct_sync_pos(ioc
)),
471 ioc
->ioc_regs
.ioc_fail_sync
);
475 bfa_ioc_ct_sync_complete(struct bfa_ioc_s
*ioc
)
477 uint32_t r32
= readl(ioc
->ioc_regs
.ioc_fail_sync
);
478 uint32_t sync_reqd
= bfa_ioc_ct_get_sync_reqd(r32
);
479 uint32_t sync_ackd
= bfa_ioc_ct_get_sync_ackd(r32
);
486 * The check below is to see whether any other PCI fn
487 * has reinitialized the ASIC (reset sync_ackd bits)
488 * and failed again while this IOC was waiting for hw
489 * semaphore (in bfa_iocpf_sm_semwait()).
491 tmp_ackd
= sync_ackd
;
492 if ((sync_reqd
& bfa_ioc_ct_sync_pos(ioc
)) &&
493 !(sync_ackd
& bfa_ioc_ct_sync_pos(ioc
)))
494 sync_ackd
|= bfa_ioc_ct_sync_pos(ioc
);
496 if (sync_reqd
== sync_ackd
) {
497 writel(bfa_ioc_ct_clear_sync_ackd(r32
),
498 ioc
->ioc_regs
.ioc_fail_sync
);
499 writel(BFI_IOC_FAIL
, ioc
->ioc_regs
.ioc_fwstate
);
500 writel(BFI_IOC_FAIL
, ioc
->ioc_regs
.alt_ioc_fwstate
);
505 * If another PCI fn reinitialized and failed again while
506 * this IOC was waiting for hw sem, the sync_ackd bit for
507 * this IOC need to be set again to allow reinitialization.
509 if (tmp_ackd
!= sync_ackd
)
510 writel((r32
| sync_ackd
), ioc
->ioc_regs
.ioc_fail_sync
);
516 * Called from bfa_ioc_attach() to map asic specific calls.
519 bfa_ioc_set_ctx_hwif(struct bfa_ioc_s
*ioc
, struct bfa_ioc_hwif_s
*hwif
)
521 hwif
->ioc_firmware_lock
= bfa_ioc_ct_firmware_lock
;
522 hwif
->ioc_firmware_unlock
= bfa_ioc_ct_firmware_unlock
;
523 hwif
->ioc_notify_fail
= bfa_ioc_ct_notify_fail
;
524 hwif
->ioc_ownership_reset
= bfa_ioc_ct_ownership_reset
;
525 hwif
->ioc_sync_start
= bfa_ioc_ct_sync_start
;
526 hwif
->ioc_sync_join
= bfa_ioc_ct_sync_join
;
527 hwif
->ioc_sync_leave
= bfa_ioc_ct_sync_leave
;
528 hwif
->ioc_sync_ack
= bfa_ioc_ct_sync_ack
;
529 hwif
->ioc_sync_complete
= bfa_ioc_ct_sync_complete
;
533 * Called from bfa_ioc_attach() to map asic specific calls.
536 bfa_ioc_set_ct_hwif(struct bfa_ioc_s
*ioc
)
538 bfa_ioc_set_ctx_hwif(ioc
, &hwif_ct
);
540 hwif_ct
.ioc_pll_init
= bfa_ioc_ct_pll_init
;
541 hwif_ct
.ioc_reg_init
= bfa_ioc_ct_reg_init
;
542 hwif_ct
.ioc_map_port
= bfa_ioc_ct_map_port
;
543 hwif_ct
.ioc_isr_mode_set
= bfa_ioc_ct_isr_mode_set
;
544 ioc
->ioc_hwif
= &hwif_ct
;
548 * Called from bfa_ioc_attach() to map asic specific calls.
551 bfa_ioc_set_ct2_hwif(struct bfa_ioc_s
*ioc
)
553 bfa_ioc_set_ctx_hwif(ioc
, &hwif_ct2
);
555 hwif_ct2
.ioc_pll_init
= bfa_ioc_ct2_pll_init
;
556 hwif_ct2
.ioc_reg_init
= bfa_ioc_ct2_reg_init
;
557 hwif_ct2
.ioc_map_port
= bfa_ioc_ct2_map_port
;
558 hwif_ct2
.ioc_lpu_read_stat
= bfa_ioc_ct2_lpu_read_stat
;
559 hwif_ct2
.ioc_isr_mode_set
= NULL
;
560 ioc
->ioc_hwif
= &hwif_ct2
;
564 * Workaround for MSI-X resource allocation for catapult-2 with no asic block
566 #define HOSTFN_MSIX_DEFAULT 64
567 #define HOSTFN_MSIX_VT_INDEX_MBOX_ERR 0x30138
568 #define HOSTFN_MSIX_VT_OFST_NUMVT 0x3013c
569 #define __MSIX_VT_NUMVT__MK 0x003ff800
570 #define __MSIX_VT_NUMVT__SH 11
571 #define __MSIX_VT_NUMVT_(_v) ((_v) << __MSIX_VT_NUMVT__SH)
572 #define __MSIX_VT_OFST_ 0x000007ff
574 bfa_ioc_ct2_poweron(struct bfa_ioc_s
*ioc
)
576 void __iomem
*rb
= ioc
->pcidev
.pci_bar_kva
;
579 r32
= readl(rb
+ HOSTFN_MSIX_VT_OFST_NUMVT
);
580 if (r32
& __MSIX_VT_NUMVT__MK
) {
581 writel(r32
& __MSIX_VT_OFST_
,
582 rb
+ HOSTFN_MSIX_VT_INDEX_MBOX_ERR
);
586 writel(__MSIX_VT_NUMVT_(HOSTFN_MSIX_DEFAULT
- 1) |
587 HOSTFN_MSIX_DEFAULT
* bfa_ioc_pcifn(ioc
),
588 rb
+ HOSTFN_MSIX_VT_OFST_NUMVT
);
589 writel(HOSTFN_MSIX_DEFAULT
* bfa_ioc_pcifn(ioc
),
590 rb
+ HOSTFN_MSIX_VT_INDEX_MBOX_ERR
);
594 bfa_ioc_ct_pll_init(void __iomem
*rb
, enum bfi_asic_mode mode
)
596 u32 pll_sclk
, pll_fclk
, r32
;
597 bfa_boolean_t fcmode
= (mode
== BFI_ASIC_MODE_FC
);
599 pll_sclk
= __APP_PLL_SCLK_LRESETN
| __APP_PLL_SCLK_ENARST
|
600 __APP_PLL_SCLK_RSEL200500
| __APP_PLL_SCLK_P0_1(3U) |
601 __APP_PLL_SCLK_JITLMT0_1(3U) |
602 __APP_PLL_SCLK_CNTLMT0_1(1U);
603 pll_fclk
= __APP_PLL_LCLK_LRESETN
| __APP_PLL_LCLK_ENARST
|
604 __APP_PLL_LCLK_RSEL200500
| __APP_PLL_LCLK_P0_1(3U) |
605 __APP_PLL_LCLK_JITLMT0_1(3U) |
606 __APP_PLL_LCLK_CNTLMT0_1(1U);
609 writel(0, (rb
+ OP_MODE
));
610 writel(__APP_EMS_CMLCKSEL
| __APP_EMS_REFCKBUFEN2
|
611 __APP_EMS_CHANNEL_SEL
, (rb
+ ETH_MAC_SER_REG
));
613 writel(__GLOBAL_FCOE_MODE
, (rb
+ OP_MODE
));
614 writel(__APP_EMS_REFCKBUFEN1
, (rb
+ ETH_MAC_SER_REG
));
616 writel(BFI_IOC_UNINIT
, (rb
+ BFA_IOC0_STATE_REG
));
617 writel(BFI_IOC_UNINIT
, (rb
+ BFA_IOC1_STATE_REG
));
618 writel(0xffffffffU
, (rb
+ HOSTFN0_INT_MSK
));
619 writel(0xffffffffU
, (rb
+ HOSTFN1_INT_MSK
));
620 writel(0xffffffffU
, (rb
+ HOSTFN0_INT_STATUS
));
621 writel(0xffffffffU
, (rb
+ HOSTFN1_INT_STATUS
));
622 writel(0xffffffffU
, (rb
+ HOSTFN0_INT_MSK
));
623 writel(0xffffffffU
, (rb
+ HOSTFN1_INT_MSK
));
624 writel(pll_sclk
| __APP_PLL_SCLK_LOGIC_SOFT_RESET
,
625 rb
+ APP_PLL_SCLK_CTL_REG
);
626 writel(pll_fclk
| __APP_PLL_LCLK_LOGIC_SOFT_RESET
,
627 rb
+ APP_PLL_LCLK_CTL_REG
);
628 writel(pll_sclk
| __APP_PLL_SCLK_LOGIC_SOFT_RESET
|
629 __APP_PLL_SCLK_ENABLE
, rb
+ APP_PLL_SCLK_CTL_REG
);
630 writel(pll_fclk
| __APP_PLL_LCLK_LOGIC_SOFT_RESET
|
631 __APP_PLL_LCLK_ENABLE
, rb
+ APP_PLL_LCLK_CTL_REG
);
632 readl(rb
+ HOSTFN0_INT_MSK
);
634 writel(0xffffffffU
, (rb
+ HOSTFN0_INT_STATUS
));
635 writel(0xffffffffU
, (rb
+ HOSTFN1_INT_STATUS
));
636 writel(pll_sclk
| __APP_PLL_SCLK_ENABLE
, rb
+ APP_PLL_SCLK_CTL_REG
);
637 writel(pll_fclk
| __APP_PLL_LCLK_ENABLE
, rb
+ APP_PLL_LCLK_CTL_REG
);
640 writel(__PMM_1T_RESET_P
, (rb
+ PMM_1T_RESET_REG_P0
));
641 writel(__PMM_1T_RESET_P
, (rb
+ PMM_1T_RESET_REG_P1
));
643 r32
= readl((rb
+ PSS_CTL_REG
));
644 r32
&= ~__PSS_LMEM_RESET
;
645 writel(r32
, (rb
+ PSS_CTL_REG
));
648 writel(0, (rb
+ PMM_1T_RESET_REG_P0
));
649 writel(0, (rb
+ PMM_1T_RESET_REG_P1
));
652 writel(__EDRAM_BISTR_START
, (rb
+ MBIST_CTL_REG
));
654 r32
= readl((rb
+ MBIST_STAT_REG
));
655 writel(0, (rb
+ MBIST_CTL_REG
));
656 return BFA_STATUS_OK
;
660 bfa_ioc_ct2_sclk_init(void __iomem
*rb
)
665 * put s_clk PLL and PLL FSM in reset
667 r32
= readl((rb
+ CT2_APP_PLL_SCLK_CTL_REG
));
668 r32
&= ~(__APP_PLL_SCLK_ENABLE
| __APP_PLL_SCLK_LRESETN
);
669 r32
|= (__APP_PLL_SCLK_ENARST
| __APP_PLL_SCLK_BYPASS
|
670 __APP_PLL_SCLK_LOGIC_SOFT_RESET
);
671 writel(r32
, (rb
+ CT2_APP_PLL_SCLK_CTL_REG
));
674 * Ignore mode and program for the max clock (which is FC16)
675 * Firmware/NFC will do the PLL init appropiately
677 r32
= readl((rb
+ CT2_APP_PLL_SCLK_CTL_REG
));
678 r32
&= ~(__APP_PLL_SCLK_REFCLK_SEL
| __APP_PLL_SCLK_CLK_DIV2
);
679 writel(r32
, (rb
+ CT2_APP_PLL_SCLK_CTL_REG
));
682 * while doing PLL init dont clock gate ethernet subsystem
684 r32
= readl((rb
+ CT2_CHIP_MISC_PRG
));
685 writel(r32
| __ETH_CLK_ENABLE_PORT0
, (rb
+ CT2_CHIP_MISC_PRG
));
687 r32
= readl((rb
+ CT2_PCIE_MISC_REG
));
688 writel(r32
| __ETH_CLK_ENABLE_PORT1
, (rb
+ CT2_PCIE_MISC_REG
));
693 r32
= readl((rb
+ CT2_APP_PLL_SCLK_CTL_REG
));
694 r32
&= (__P_SCLK_PLL_LOCK
| __APP_PLL_SCLK_REFCLK_SEL
|
695 __APP_PLL_SCLK_CLK_DIV2
);
696 writel(r32
| 0x1061731b, (rb
+ CT2_APP_PLL_SCLK_CTL_REG
));
699 * poll for s_clk lock or delay 1ms
705 bfa_ioc_ct2_lclk_init(void __iomem
*rb
)
710 * put l_clk PLL and PLL FSM in reset
712 r32
= readl((rb
+ CT2_APP_PLL_LCLK_CTL_REG
));
713 r32
&= ~(__APP_PLL_LCLK_ENABLE
| __APP_PLL_LCLK_LRESETN
);
714 r32
|= (__APP_PLL_LCLK_ENARST
| __APP_PLL_LCLK_BYPASS
|
715 __APP_PLL_LCLK_LOGIC_SOFT_RESET
);
716 writel(r32
, (rb
+ CT2_APP_PLL_LCLK_CTL_REG
));
719 * set LPU speed (set for FC16 which will work for other modes)
721 r32
= readl((rb
+ CT2_CHIP_MISC_PRG
));
722 writel(r32
, (rb
+ CT2_CHIP_MISC_PRG
));
725 * set LPU half speed (set for FC16 which will work for other modes)
727 r32
= readl((rb
+ CT2_APP_PLL_LCLK_CTL_REG
));
728 writel(r32
, (rb
+ CT2_APP_PLL_LCLK_CTL_REG
));
731 * set lclk for mode (set for FC16)
733 r32
= readl((rb
+ CT2_APP_PLL_LCLK_CTL_REG
));
734 r32
&= (__P_LCLK_PLL_LOCK
| __APP_LPUCLK_HALFSPEED
);
736 writel(r32
, (rb
+ CT2_APP_PLL_LCLK_CTL_REG
));
739 * poll for s_clk lock or delay 1ms
745 bfa_ioc_ct2_mem_init(void __iomem
*rb
)
749 r32
= readl((rb
+ PSS_CTL_REG
));
750 r32
&= ~__PSS_LMEM_RESET
;
751 writel(r32
, (rb
+ PSS_CTL_REG
));
754 writel(__EDRAM_BISTR_START
, (rb
+ CT2_MBIST_CTL_REG
));
756 writel(0, (rb
+ CT2_MBIST_CTL_REG
));
760 bfa_ioc_ct2_mac_reset(void __iomem
*rb
)
764 bfa_ioc_ct2_sclk_init(rb
);
765 bfa_ioc_ct2_lclk_init(rb
);
768 * release soft reset on s_clk & l_clk
770 r32
= readl((rb
+ CT2_APP_PLL_SCLK_CTL_REG
));
771 writel(r32
& ~__APP_PLL_SCLK_LOGIC_SOFT_RESET
,
772 (rb
+ CT2_APP_PLL_SCLK_CTL_REG
));
775 * release soft reset on s_clk & l_clk
777 r32
= readl((rb
+ CT2_APP_PLL_LCLK_CTL_REG
));
778 writel(r32
& ~__APP_PLL_LCLK_LOGIC_SOFT_RESET
,
779 (rb
+ CT2_APP_PLL_LCLK_CTL_REG
));
781 /* put port0, port1 MAC & AHB in reset */
782 writel((__CSI_MAC_RESET
| __CSI_MAC_AHB_RESET
),
783 rb
+ CT2_CSI_MAC_CONTROL_REG(0));
784 writel((__CSI_MAC_RESET
| __CSI_MAC_AHB_RESET
),
785 rb
+ CT2_CSI_MAC_CONTROL_REG(1));
788 #define CT2_NFC_MAX_DELAY 1000
790 bfa_ioc_ct2_pll_init(void __iomem
*rb
, enum bfi_asic_mode mode
)
796 * Initialize PLL if not already done by NFC
798 wgn
= readl(rb
+ CT2_WGN_STATUS
);
799 if (!(wgn
& __GLBL_PF_VF_CFG_RDY
)) {
800 writel(__HALT_NFC_CONTROLLER
, rb
+ CT2_NFC_CSR_SET_REG
);
801 for (i
= 0; i
< CT2_NFC_MAX_DELAY
; i
++) {
802 r32
= readl(rb
+ CT2_NFC_CSR_SET_REG
);
803 if (r32
& __NFC_CONTROLLER_HALTED
)
810 * Mask the interrupts and clear any
811 * pending interrupts.
813 writel(1, (rb
+ CT2_LPU0_HOSTFN_MBOX0_MSK
));
814 writel(1, (rb
+ CT2_LPU1_HOSTFN_MBOX0_MSK
));
816 r32
= readl((rb
+ CT2_LPU0_HOSTFN_CMD_STAT
));
818 writel(1, (rb
+ CT2_LPU0_HOSTFN_CMD_STAT
));
819 readl((rb
+ CT2_LPU0_HOSTFN_CMD_STAT
));
821 r32
= readl((rb
+ CT2_LPU1_HOSTFN_CMD_STAT
));
823 writel(1, (rb
+ CT2_LPU1_HOSTFN_CMD_STAT
));
824 readl((rb
+ CT2_LPU1_HOSTFN_CMD_STAT
));
827 bfa_ioc_ct2_mac_reset(rb
);
828 bfa_ioc_ct2_sclk_init(rb
);
829 bfa_ioc_ct2_lclk_init(rb
);
832 * release soft reset on s_clk & l_clk
834 r32
= readl((rb
+ CT2_APP_PLL_SCLK_CTL_REG
));
835 writel(r32
& ~__APP_PLL_SCLK_LOGIC_SOFT_RESET
,
836 (rb
+ CT2_APP_PLL_SCLK_CTL_REG
));
839 * release soft reset on s_clk & l_clk
841 r32
= readl((rb
+ CT2_APP_PLL_LCLK_CTL_REG
));
842 writel(r32
& ~__APP_PLL_LCLK_LOGIC_SOFT_RESET
,
843 (rb
+ CT2_APP_PLL_LCLK_CTL_REG
));
846 * Announce flash device presence, if flash was corrupted.
848 if (wgn
== (__WGN_READY
| __GLBL_PF_VF_CFG_RDY
)) {
849 r32
= readl((rb
+ PSS_GPIO_OUT_REG
));
850 writel(r32
& ~1, (rb
+ PSS_GPIO_OUT_REG
));
851 r32
= readl((rb
+ PSS_GPIO_OE_REG
));
852 writel(r32
| 1, (rb
+ PSS_GPIO_OE_REG
));
855 bfa_ioc_ct2_mem_init(rb
);
857 writel(BFI_IOC_UNINIT
, (rb
+ CT2_BFA_IOC0_STATE_REG
));
858 writel(BFI_IOC_UNINIT
, (rb
+ CT2_BFA_IOC1_STATE_REG
));
859 return BFA_STATUS_OK
;