GUI: Fix Tomato RAF theme for all builds. Compilation typo.
[tomato.git] / release / src-rt-6.x.4708 / linux / linux-2.6.36 / drivers / scsi / bfa / bfa_intr.c
blob68ef5fa956bf29550cbc6472e733d740dca76847
1 /*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
17 #include <bfa.h>
18 #include <bfi/bfi_ctreg.h>
19 #include <bfa_port_priv.h>
20 #include <bfa_intr_priv.h>
21 #include <cs/bfa_debug.h>
23 BFA_TRC_FILE(HAL, INTR);
25 static void
26 bfa_msix_errint(struct bfa_s *bfa, u32 intr)
28 bfa_ioc_error_isr(&bfa->ioc);
31 static void
32 bfa_msix_lpu(struct bfa_s *bfa)
34 bfa_ioc_mbox_isr(&bfa->ioc);
37 static void
38 bfa_reqq_resume(struct bfa_s *bfa, int qid)
40 struct list_head *waitq, *qe, *qen;
41 struct bfa_reqq_wait_s *wqe;
43 waitq = bfa_reqq(bfa, qid);
44 list_for_each_safe(qe, qen, waitq) {
45 /**
46 * Callback only as long as there is room in request queue
48 if (bfa_reqq_full(bfa, qid))
49 break;
51 list_del(qe);
52 wqe = (struct bfa_reqq_wait_s *) qe;
53 wqe->qresume(wqe->cbarg);
57 void
58 bfa_msix_all(struct bfa_s *bfa, int vec)
60 bfa_intx(bfa);
63 /**
64 * hal_intr_api
66 bfa_boolean_t
67 bfa_intx(struct bfa_s *bfa)
69 u32 intr, qintr;
70 int queue;
72 intr = bfa_reg_read(bfa->iocfc.bfa_regs.intr_status);
73 if (!intr)
74 return BFA_FALSE;
76 /**
77 * RME completion queue interrupt
79 qintr = intr & __HFN_INT_RME_MASK;
80 bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, qintr);
82 for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) {
83 if (intr & (__HFN_INT_RME_Q0 << queue))
84 bfa_msix_rspq(bfa, queue & (BFI_IOC_MAX_CQS - 1));
86 intr &= ~qintr;
87 if (!intr)
88 return BFA_TRUE;
90 /**
91 * CPE completion queue interrupt
93 qintr = intr & __HFN_INT_CPE_MASK;
94 bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, qintr);
96 for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) {
97 if (intr & (__HFN_INT_CPE_Q0 << queue))
98 bfa_msix_reqq(bfa, queue & (BFI_IOC_MAX_CQS - 1));
100 intr &= ~qintr;
101 if (!intr)
102 return BFA_TRUE;
104 bfa_msix_lpu_err(bfa, intr);
106 return BFA_TRUE;
109 void
110 bfa_isr_enable(struct bfa_s *bfa)
112 u32 intr_unmask;
113 int pci_func = bfa_ioc_pcifn(&bfa->ioc);
115 bfa_trc(bfa, pci_func);
117 bfa_msix_install(bfa);
118 intr_unmask = (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 |
119 __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS |
120 __HFN_INT_LL_HALT);
122 if (pci_func == 0)
123 intr_unmask |= (__HFN_INT_CPE_Q0 | __HFN_INT_CPE_Q1 |
124 __HFN_INT_CPE_Q2 | __HFN_INT_CPE_Q3 |
125 __HFN_INT_RME_Q0 | __HFN_INT_RME_Q1 |
126 __HFN_INT_RME_Q2 | __HFN_INT_RME_Q3 |
127 __HFN_INT_MBOX_LPU0);
128 else
129 intr_unmask |= (__HFN_INT_CPE_Q4 | __HFN_INT_CPE_Q5 |
130 __HFN_INT_CPE_Q6 | __HFN_INT_CPE_Q7 |
131 __HFN_INT_RME_Q4 | __HFN_INT_RME_Q5 |
132 __HFN_INT_RME_Q6 | __HFN_INT_RME_Q7 |
133 __HFN_INT_MBOX_LPU1);
135 bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, intr_unmask);
136 bfa_reg_write(bfa->iocfc.bfa_regs.intr_mask, ~intr_unmask);
137 bfa->iocfc.intr_mask = ~intr_unmask;
138 bfa_isr_mode_set(bfa, bfa->msix.nvecs != 0);
141 void
142 bfa_isr_disable(struct bfa_s *bfa)
144 bfa_isr_mode_set(bfa, BFA_FALSE);
145 bfa_reg_write(bfa->iocfc.bfa_regs.intr_mask, -1L);
146 bfa_msix_uninstall(bfa);
149 void
150 bfa_msix_reqq(struct bfa_s *bfa, int qid)
152 struct list_head *waitq;
154 qid &= (BFI_IOC_MAX_CQS - 1);
156 bfa->iocfc.hwif.hw_reqq_ack(bfa, qid);
159 * Resume any pending requests in the corresponding reqq.
161 waitq = bfa_reqq(bfa, qid);
162 if (!list_empty(waitq))
163 bfa_reqq_resume(bfa, qid);
166 void
167 bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m)
169 bfa_trc(bfa, m->mhdr.msg_class);
170 bfa_trc(bfa, m->mhdr.msg_id);
171 bfa_trc(bfa, m->mhdr.mtag.i2htok);
172 bfa_assert(0);
173 bfa_trc_stop(bfa->trcmod);
176 void
177 bfa_msix_rspq(struct bfa_s *bfa, int qid)
179 struct bfi_msg_s *m;
180 u32 pi, ci;
181 struct list_head *waitq;
183 bfa_trc_fp(bfa, qid);
185 qid &= (BFI_IOC_MAX_CQS - 1);
187 bfa->iocfc.hwif.hw_rspq_ack(bfa, qid);
189 ci = bfa_rspq_ci(bfa, qid);
190 pi = bfa_rspq_pi(bfa, qid);
192 bfa_trc_fp(bfa, ci);
193 bfa_trc_fp(bfa, pi);
195 if (bfa->rme_process) {
196 while (ci != pi) {
197 m = bfa_rspq_elem(bfa, qid, ci);
198 bfa_assert_fp(m->mhdr.msg_class < BFI_MC_MAX);
200 bfa_isrs[m->mhdr.msg_class] (bfa, m);
202 CQ_INCR(ci, bfa->iocfc.cfg.drvcfg.num_rspq_elems);
207 * update CI
209 bfa_rspq_ci(bfa, qid) = pi;
210 bfa_reg_write(bfa->iocfc.bfa_regs.rme_q_ci[qid], pi);
211 bfa_os_mmiowb();
214 * Resume any pending requests in the corresponding reqq.
216 waitq = bfa_reqq(bfa, qid);
217 if (!list_empty(waitq))
218 bfa_reqq_resume(bfa, qid);
221 void
222 bfa_msix_lpu_err(struct bfa_s *bfa, int vec)
224 u32 intr, curr_value;
226 intr = bfa_reg_read(bfa->iocfc.bfa_regs.intr_status);
228 if (intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1))
229 bfa_msix_lpu(bfa);
231 intr &= (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 |
232 __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS | __HFN_INT_LL_HALT);
234 if (intr) {
235 if (intr & __HFN_INT_LL_HALT) {
237 * If LL_HALT bit is set then FW Init Halt LL Port
238 * Register needs to be cleared as well so Interrupt
239 * Status Register will be cleared.
241 curr_value = bfa_reg_read(bfa->ioc.ioc_regs.ll_halt);
242 curr_value &= ~__FW_INIT_HALT_P;
243 bfa_reg_write(bfa->ioc.ioc_regs.ll_halt, curr_value);
246 if (intr & __HFN_INT_ERR_PSS) {
248 * ERR_PSS bit needs to be cleared as well in case
249 * interrups are shared so driver's interrupt handler is
250 * still called eventhough it is already masked out.
252 curr_value = bfa_reg_read(
253 bfa->ioc.ioc_regs.pss_err_status_reg);
254 curr_value &= __PSS_ERR_STATUS_SET;
255 bfa_reg_write(bfa->ioc.ioc_regs.pss_err_status_reg,
256 curr_value);
259 bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, intr);
260 bfa_msix_errint(bfa, intr);
264 void
265 bfa_isr_bind(enum bfi_mclass mc, bfa_isr_func_t isr_func)
267 bfa_isrs[mc] = isr_func;