Merge branch 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jwessel...
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / net / bna / bfa_ioc_ct.c
blob121cfd6d48b1eb7fe8a5f15223e4f4b3d40cea76
1 /*
2 * Linux network driver for Brocade Converged Network Adapter.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
19 #include "bfa_ioc.h"
20 #include "cna.h"
21 #include "bfi.h"
22 #include "bfi_ctreg.h"
23 #include "bfa_defs.h"
26 * forward declarations
28 static bool bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc);
29 static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc);
30 static void bfa_ioc_ct_reg_init(struct bfa_ioc *ioc);
31 static void bfa_ioc_ct_map_port(struct bfa_ioc *ioc);
32 static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix);
33 static void bfa_ioc_ct_notify_hbfail(struct bfa_ioc *ioc);
34 static void bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc);
35 static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode);
37 static struct bfa_ioc_hwif nw_hwif_ct;
39 /**
40 * Called from bfa_ioc_attach() to map asic specific calls.
42 void
43 bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc)
45 nw_hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init;
46 nw_hwif_ct.ioc_firmware_lock = bfa_ioc_ct_firmware_lock;
47 nw_hwif_ct.ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock;
48 nw_hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
49 nw_hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
50 nw_hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
51 nw_hwif_ct.ioc_notify_hbfail = bfa_ioc_ct_notify_hbfail;
52 nw_hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
54 ioc->ioc_hwif = &nw_hwif_ct;
57 /**
58 * Return true if firmware of current driver matches the running firmware.
60 static bool
61 bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc)
63 enum bfi_ioc_state ioc_fwstate;
64 u32 usecnt;
65 struct bfi_ioc_image_hdr fwhdr;
67 /**
68 * Firmware match check is relevant only for CNA.
70 if (!ioc->cna)
71 return true;
73 /**
74 * If bios boot (flash based) -- do not increment usage count
76 if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) <
77 BFA_IOC_FWIMG_MINSZ)
78 return true;
80 bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
81 usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
83 /**
84 * If usage count is 0, always return TRUE.
86 if (usecnt == 0) {
87 writel(1, ioc->ioc_regs.ioc_usage_reg);
88 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
89 return true;
92 ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
94 /**
95 * Use count cannot be non-zero and chip in uninitialized state.
97 BUG_ON(!(ioc_fwstate != BFI_IOC_UNINIT));
99 /**
100 * Check if another driver with a different firmware is active
102 bfa_nw_ioc_fwver_get(ioc, &fwhdr);
103 if (!bfa_nw_ioc_fwver_cmp(ioc, &fwhdr)) {
104 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
105 return false;
109 * Same firmware version. Increment the reference count.
111 usecnt++;
112 writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
113 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
114 return true;
117 static void
118 bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc)
120 u32 usecnt;
123 * Firmware lock is relevant only for CNA.
125 if (!ioc->cna)
126 return;
129 * If bios boot (flash based) -- do not decrement usage count
131 if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) <
132 BFA_IOC_FWIMG_MINSZ)
133 return;
136 * decrement usage count
138 bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
139 usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
140 BUG_ON(!(usecnt > 0));
142 usecnt--;
143 writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
145 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
149 * Notify other functions on HB failure.
151 static void
152 bfa_ioc_ct_notify_hbfail(struct bfa_ioc *ioc)
154 if (ioc->cna) {
155 writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt);
156 /* Wait for halt to take effect */
157 readl(ioc->ioc_regs.ll_halt);
158 } else {
159 writel(__PSS_ERR_STATUS_SET, ioc->ioc_regs.err_set);
160 readl(ioc->ioc_regs.err_set);
165 * Host to LPU mailbox message addresses
167 static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = {
168 { HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 },
169 { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 },
170 { HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2 },
171 { HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 }
175 * Host <-> LPU mailbox command/status registers - port 0
177 static struct { u32 hfn, lpu; } iocreg_mbcmd_p0[] = {
178 { HOSTFN0_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN0_MBOX0_CMD_STAT },
179 { HOSTFN1_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN1_MBOX0_CMD_STAT },
180 { HOSTFN2_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN2_MBOX0_CMD_STAT },
181 { HOSTFN3_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN3_MBOX0_CMD_STAT }
185 * Host <-> LPU mailbox command/status registers - port 1
187 static struct { u32 hfn, lpu; } iocreg_mbcmd_p1[] = {
188 { HOSTFN0_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN0_MBOX0_CMD_STAT },
189 { HOSTFN1_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN1_MBOX0_CMD_STAT },
190 { HOSTFN2_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN2_MBOX0_CMD_STAT },
191 { HOSTFN3_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN3_MBOX0_CMD_STAT }
194 static void
195 bfa_ioc_ct_reg_init(struct bfa_ioc *ioc)
197 void __iomem *rb;
198 int pcifn = bfa_ioc_pcifn(ioc);
200 rb = bfa_ioc_bar0(ioc);
202 ioc->ioc_regs.hfn_mbox = rb + iocreg_fnreg[pcifn].hfn_mbox;
203 ioc->ioc_regs.lpu_mbox = rb + iocreg_fnreg[pcifn].lpu_mbox;
204 ioc->ioc_regs.host_page_num_fn = rb + iocreg_fnreg[pcifn].hfn_pgn;
206 if (ioc->port_id == 0) {
207 ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
208 ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
209 ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].hfn;
210 ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].lpu;
211 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
212 } else {
213 ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG);
214 ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG);
215 ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].hfn;
216 ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].lpu;
217 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
221 * PSS control registers
223 ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG);
224 ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG);
225 ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_425_CTL_REG);
226 ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_312_CTL_REG);
229 * IOC semaphore registers and serialization
231 ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG);
232 ioc->ioc_regs.ioc_usage_sem_reg = (rb + HOST_SEM1_REG);
233 ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG);
234 ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT);
237 * sram memory access
239 ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
240 ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
243 * err set reg : for notification of hb failure in fcmode
245 ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
249 * Initialize IOC to port mapping.
252 #define FNC_PERS_FN_SHIFT(__fn) ((__fn) * 8)
253 static void
254 bfa_ioc_ct_map_port(struct bfa_ioc *ioc)
256 void __iomem *rb = ioc->pcidev.pci_bar_kva;
257 u32 r32;
260 * For catapult, base port id on personality register and IOC type
262 r32 = readl(rb + FNC_PERS_REG);
263 r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc));
264 ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH;
269 * Set interrupt mode for a function: INTX or MSIX
271 static void
272 bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix)
274 void __iomem *rb = ioc->pcidev.pci_bar_kva;
275 u32 r32, mode;
277 r32 = readl(rb + FNC_PERS_REG);
279 mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) &
280 __F0_INTX_STATUS;
283 * If already in desired mode, do not change anything
285 if (!msix && mode)
286 return;
288 if (msix)
289 mode = __F0_INTX_STATUS_MSIX;
290 else
291 mode = __F0_INTX_STATUS_INTA;
293 r32 &= ~(__F0_INTX_STATUS << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
294 r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
296 writel(r32, rb + FNC_PERS_REG);
300 * Cleanup hw semaphore and usecnt registers
302 static void
303 bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc)
305 if (ioc->cna) {
306 bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
307 writel(0, ioc->ioc_regs.ioc_usage_reg);
308 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
312 * Read the hw sem reg to make sure that it is locked
313 * before we clear it. If it is not locked, writing 1
314 * will lock it instead of clearing it.
316 readl(ioc->ioc_regs.ioc_sem_reg);
317 bfa_nw_ioc_hw_sem_release(ioc);
320 static enum bfa_status
321 bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode)
323 u32 pll_sclk, pll_fclk, r32;
325 pll_sclk = __APP_PLL_312_LRESETN | __APP_PLL_312_ENARST |
326 __APP_PLL_312_RSEL200500 | __APP_PLL_312_P0_1(3U) |
327 __APP_PLL_312_JITLMT0_1(3U) |
328 __APP_PLL_312_CNTLMT0_1(1U);
329 pll_fclk = __APP_PLL_425_LRESETN | __APP_PLL_425_ENARST |
330 __APP_PLL_425_RSEL200500 | __APP_PLL_425_P0_1(3U) |
331 __APP_PLL_425_JITLMT0_1(3U) |
332 __APP_PLL_425_CNTLMT0_1(1U);
333 if (fcmode) {
334 writel(0, (rb + OP_MODE));
335 writel(__APP_EMS_CMLCKSEL |
336 __APP_EMS_REFCKBUFEN2 |
337 __APP_EMS_CHANNEL_SEL,
338 (rb + ETH_MAC_SER_REG));
339 } else {
340 writel(__GLOBAL_FCOE_MODE, (rb + OP_MODE));
341 writel(__APP_EMS_REFCKBUFEN1,
342 (rb + ETH_MAC_SER_REG));
344 writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG));
345 writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG));
346 writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
347 writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
348 writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
349 writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
350 writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
351 writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
352 writel(pll_sclk |
353 __APP_PLL_312_LOGIC_SOFT_RESET,
354 rb + APP_PLL_312_CTL_REG);
355 writel(pll_fclk |
356 __APP_PLL_425_LOGIC_SOFT_RESET,
357 rb + APP_PLL_425_CTL_REG);
358 writel(pll_sclk |
359 __APP_PLL_312_LOGIC_SOFT_RESET | __APP_PLL_312_ENABLE,
360 rb + APP_PLL_312_CTL_REG);
361 writel(pll_fclk |
362 __APP_PLL_425_LOGIC_SOFT_RESET | __APP_PLL_425_ENABLE,
363 rb + APP_PLL_425_CTL_REG);
364 readl(rb + HOSTFN0_INT_MSK);
365 udelay(2000);
366 writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
367 writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
368 writel(pll_sclk |
369 __APP_PLL_312_ENABLE,
370 rb + APP_PLL_312_CTL_REG);
371 writel(pll_fclk |
372 __APP_PLL_425_ENABLE,
373 rb + APP_PLL_425_CTL_REG);
374 if (!fcmode) {
375 writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P0));
376 writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P1));
378 r32 = readl((rb + PSS_CTL_REG));
379 r32 &= ~__PSS_LMEM_RESET;
380 writel(r32, (rb + PSS_CTL_REG));
381 udelay(1000);
382 if (!fcmode) {
383 writel(0, (rb + PMM_1T_RESET_REG_P0));
384 writel(0, (rb + PMM_1T_RESET_REG_P1));
387 writel(__EDRAM_BISTR_START, (rb + MBIST_CTL_REG));
388 udelay(1000);
389 r32 = readl((rb + MBIST_STAT_REG));
390 writel(0, (rb + MBIST_CTL_REG));
391 return BFA_STATUS_OK;