2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
18 #include <bfi/bfi_ctreg.h>
19 #include <bfa_port_priv.h>
20 #include <bfa_intr_priv.h>
21 #include <cs/bfa_debug.h>
23 BFA_TRC_FILE(HAL
, INTR
);
26 bfa_msix_errint(struct bfa_s
*bfa
, u32 intr
)
28 bfa_ioc_error_isr(&bfa
->ioc
);
32 bfa_msix_lpu(struct bfa_s
*bfa
)
34 bfa_ioc_mbox_isr(&bfa
->ioc
);
38 bfa_reqq_resume(struct bfa_s
*bfa
, int qid
)
40 struct list_head
*waitq
, *qe
, *qen
;
41 struct bfa_reqq_wait_s
*wqe
;
43 waitq
= bfa_reqq(bfa
, qid
);
44 list_for_each_safe(qe
, qen
, waitq
) {
46 * Callback only as long as there is room in request queue
48 if (bfa_reqq_full(bfa
, qid
))
52 wqe
= (struct bfa_reqq_wait_s
*) qe
;
53 wqe
->qresume(wqe
->cbarg
);
58 bfa_msix_all(struct bfa_s
*bfa
, int vec
)
67 bfa_intx(struct bfa_s
*bfa
)
72 intr
= bfa_reg_read(bfa
->iocfc
.bfa_regs
.intr_status
);
77 * RME completion queue interrupt
79 qintr
= intr
& __HFN_INT_RME_MASK
;
80 bfa_reg_write(bfa
->iocfc
.bfa_regs
.intr_status
, qintr
);
82 for (queue
= 0; queue
< BFI_IOC_MAX_CQS_ASIC
; queue
++) {
83 if (intr
& (__HFN_INT_RME_Q0
<< queue
))
84 bfa_msix_rspq(bfa
, queue
& (BFI_IOC_MAX_CQS
- 1));
91 * CPE completion queue interrupt
93 qintr
= intr
& __HFN_INT_CPE_MASK
;
94 bfa_reg_write(bfa
->iocfc
.bfa_regs
.intr_status
, qintr
);
96 for (queue
= 0; queue
< BFI_IOC_MAX_CQS_ASIC
; queue
++) {
97 if (intr
& (__HFN_INT_CPE_Q0
<< queue
))
98 bfa_msix_reqq(bfa
, queue
& (BFI_IOC_MAX_CQS
- 1));
104 bfa_msix_lpu_err(bfa
, intr
);
110 bfa_isr_enable(struct bfa_s
*bfa
)
113 int pci_func
= bfa_ioc_pcifn(&bfa
->ioc
);
115 bfa_trc(bfa
, pci_func
);
117 bfa_msix_install(bfa
);
118 intr_unmask
= (__HFN_INT_ERR_EMC
| __HFN_INT_ERR_LPU0
|
119 __HFN_INT_ERR_LPU1
| __HFN_INT_ERR_PSS
|
123 intr_unmask
|= (__HFN_INT_CPE_Q0
| __HFN_INT_CPE_Q1
|
124 __HFN_INT_CPE_Q2
| __HFN_INT_CPE_Q3
|
125 __HFN_INT_RME_Q0
| __HFN_INT_RME_Q1
|
126 __HFN_INT_RME_Q2
| __HFN_INT_RME_Q3
|
127 __HFN_INT_MBOX_LPU0
);
129 intr_unmask
|= (__HFN_INT_CPE_Q4
| __HFN_INT_CPE_Q5
|
130 __HFN_INT_CPE_Q6
| __HFN_INT_CPE_Q7
|
131 __HFN_INT_RME_Q4
| __HFN_INT_RME_Q5
|
132 __HFN_INT_RME_Q6
| __HFN_INT_RME_Q7
|
133 __HFN_INT_MBOX_LPU1
);
135 bfa_reg_write(bfa
->iocfc
.bfa_regs
.intr_status
, intr_unmask
);
136 bfa_reg_write(bfa
->iocfc
.bfa_regs
.intr_mask
, ~intr_unmask
);
137 bfa
->iocfc
.intr_mask
= ~intr_unmask
;
138 bfa_isr_mode_set(bfa
, bfa
->msix
.nvecs
!= 0);
142 bfa_isr_disable(struct bfa_s
*bfa
)
144 bfa_isr_mode_set(bfa
, BFA_FALSE
);
145 bfa_reg_write(bfa
->iocfc
.bfa_regs
.intr_mask
, -1L);
146 bfa_msix_uninstall(bfa
);
150 bfa_msix_reqq(struct bfa_s
*bfa
, int qid
)
152 struct list_head
*waitq
;
154 qid
&= (BFI_IOC_MAX_CQS
- 1);
156 bfa
->iocfc
.hwif
.hw_reqq_ack(bfa
, qid
);
159 * Resume any pending requests in the corresponding reqq.
161 waitq
= bfa_reqq(bfa
, qid
);
162 if (!list_empty(waitq
))
163 bfa_reqq_resume(bfa
, qid
);
167 bfa_isr_unhandled(struct bfa_s
*bfa
, struct bfi_msg_s
*m
)
169 bfa_trc(bfa
, m
->mhdr
.msg_class
);
170 bfa_trc(bfa
, m
->mhdr
.msg_id
);
171 bfa_trc(bfa
, m
->mhdr
.mtag
.i2htok
);
173 bfa_trc_stop(bfa
->trcmod
);
177 bfa_msix_rspq(struct bfa_s
*bfa
, int qid
)
181 struct list_head
*waitq
;
183 bfa_trc_fp(bfa
, qid
);
185 qid
&= (BFI_IOC_MAX_CQS
- 1);
187 bfa
->iocfc
.hwif
.hw_rspq_ack(bfa
, qid
);
189 ci
= bfa_rspq_ci(bfa
, qid
);
190 pi
= bfa_rspq_pi(bfa
, qid
);
195 if (bfa
->rme_process
) {
197 m
= bfa_rspq_elem(bfa
, qid
, ci
);
198 bfa_assert_fp(m
->mhdr
.msg_class
< BFI_MC_MAX
);
200 bfa_isrs
[m
->mhdr
.msg_class
] (bfa
, m
);
202 CQ_INCR(ci
, bfa
->iocfc
.cfg
.drvcfg
.num_rspq_elems
);
209 bfa_rspq_ci(bfa
, qid
) = pi
;
210 bfa_reg_write(bfa
->iocfc
.bfa_regs
.rme_q_ci
[qid
], pi
);
214 * Resume any pending requests in the corresponding reqq.
216 waitq
= bfa_reqq(bfa
, qid
);
217 if (!list_empty(waitq
))
218 bfa_reqq_resume(bfa
, qid
);
222 bfa_msix_lpu_err(struct bfa_s
*bfa
, int vec
)
224 u32 intr
, curr_value
;
226 intr
= bfa_reg_read(bfa
->iocfc
.bfa_regs
.intr_status
);
228 if (intr
& (__HFN_INT_MBOX_LPU0
| __HFN_INT_MBOX_LPU1
))
231 intr
&= (__HFN_INT_ERR_EMC
| __HFN_INT_ERR_LPU0
|
232 __HFN_INT_ERR_LPU1
| __HFN_INT_ERR_PSS
| __HFN_INT_LL_HALT
);
235 if (intr
& __HFN_INT_LL_HALT
) {
237 * If LL_HALT bit is set then FW Init Halt LL Port
238 * Register needs to be cleared as well so Interrupt
239 * Status Register will be cleared.
241 curr_value
= bfa_reg_read(bfa
->ioc
.ioc_regs
.ll_halt
);
242 curr_value
&= ~__FW_INIT_HALT_P
;
243 bfa_reg_write(bfa
->ioc
.ioc_regs
.ll_halt
, curr_value
);
246 if (intr
& __HFN_INT_ERR_PSS
) {
248 * ERR_PSS bit needs to be cleared as well in case
249 * interrups are shared so driver's interrupt handler is
250 * still called eventhough it is already masked out.
252 curr_value
= bfa_reg_read(
253 bfa
->ioc
.ioc_regs
.pss_err_status_reg
);
254 curr_value
&= __PSS_ERR_STATUS_SET
;
255 bfa_reg_write(bfa
->ioc
.ioc_regs
.pss_err_status_reg
,
259 bfa_reg_write(bfa
->iocfc
.bfa_regs
.intr_status
, intr
);
260 bfa_msix_errint(bfa
, intr
);
265 bfa_isr_bind(enum bfi_mclass mc
, bfa_isr_func_t isr_func
)
267 bfa_isrs
[mc
] = isr_func
;