2 * Linux network driver for Brocade Converged Network Adapter.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
21 static void bna_device_cb_port_stopped(void *arg
, enum bna_cb_status status
);
24 bna_port_cb_link_up(struct bna_port
*port
, struct bfi_ll_aen
*aen
,
30 port
->llport
.link_status
= BNA_LINK_UP
;
32 port
->llport
.link_status
= BNA_CEE_UP
;
34 /* Compute the priority */
35 prio_map
= aen
->prio_map
;
37 for (i
= 0; i
< 8; i
++) {
38 if ((prio_map
>> i
) & 0x1)
46 bna_tx_mod_cee_link_status(&port
->bna
->tx_mod
, aen
->cee_linkup
);
47 bna_tx_mod_prio_changed(&port
->bna
->tx_mod
, port
->priority
);
48 port
->link_cbfn(port
->bna
->bnad
, port
->llport
.link_status
);
52 bna_port_cb_link_down(struct bna_port
*port
, int status
)
54 port
->llport
.link_status
= BNA_LINK_DOWN
;
57 bna_tx_mod_cee_link_status(&port
->bna
->tx_mod
, BNA_LINK_DOWN
);
58 port
->link_cbfn(port
->bna
->bnad
, BNA_LINK_DOWN
);
62 llport_can_be_up(struct bna_llport
*llport
)
65 if (llport
->type
== BNA_PORT_T_REGULAR
)
66 ready
= ((llport
->flags
& BNA_LLPORT_F_ADMIN_UP
) &&
67 (llport
->flags
& BNA_LLPORT_F_RX_STARTED
) &&
68 (llport
->flags
& BNA_LLPORT_F_PORT_ENABLED
));
70 ready
= ((llport
->flags
& BNA_LLPORT_F_ADMIN_UP
) &&
71 (llport
->flags
& BNA_LLPORT_F_RX_STARTED
) &&
72 !(llport
->flags
& BNA_LLPORT_F_PORT_ENABLED
));
76 #define llport_is_up llport_can_be_up
78 enum bna_llport_event
{
84 LLPORT_E_FWRESP_UP_OK
= 6,
85 LLPORT_E_FWRESP_UP_FAIL
= 7,
86 LLPORT_E_FWRESP_DOWN
= 8
90 bna_llport_cb_port_enabled(struct bna_llport
*llport
)
92 llport
->flags
|= BNA_LLPORT_F_PORT_ENABLED
;
94 if (llport_can_be_up(llport
))
95 bfa_fsm_send_event(llport
, LLPORT_E_UP
);
99 bna_llport_cb_port_disabled(struct bna_llport
*llport
)
101 int llport_up
= llport_is_up(llport
);
103 llport
->flags
&= ~BNA_LLPORT_F_PORT_ENABLED
;
106 bfa_fsm_send_event(llport
, LLPORT_E_DOWN
);
113 bna_is_aen(u8 msg_id
)
116 case BFI_LL_I2H_LINK_DOWN_AEN
:
117 case BFI_LL_I2H_LINK_UP_AEN
:
118 case BFI_LL_I2H_PORT_ENABLE_AEN
:
119 case BFI_LL_I2H_PORT_DISABLE_AEN
:
128 bna_mbox_aen_callback(struct bna
*bna
, struct bfi_mbmsg
*msg
)
130 struct bfi_ll_aen
*aen
= (struct bfi_ll_aen
*)(msg
);
132 switch (aen
->mh
.msg_id
) {
133 case BFI_LL_I2H_LINK_UP_AEN
:
134 bna_port_cb_link_up(&bna
->port
, aen
, aen
->reason
);
136 case BFI_LL_I2H_LINK_DOWN_AEN
:
137 bna_port_cb_link_down(&bna
->port
, aen
->reason
);
139 case BFI_LL_I2H_PORT_ENABLE_AEN
:
140 bna_llport_cb_port_enabled(&bna
->port
.llport
);
142 case BFI_LL_I2H_PORT_DISABLE_AEN
:
143 bna_llport_cb_port_disabled(&bna
->port
.llport
);
151 bna_ll_isr(void *llarg
, struct bfi_mbmsg
*msg
)
153 struct bna
*bna
= (struct bna
*)(llarg
);
154 struct bfi_ll_rsp
*mb_rsp
= (struct bfi_ll_rsp
*)(msg
);
155 struct bfi_mhdr
*cmd_h
, *rsp_h
;
156 struct bna_mbox_qe
*mb_qe
= NULL
;
159 char message
[BNA_MESSAGE_SIZE
];
161 aen
= bna_is_aen(mb_rsp
->mh
.msg_id
);
164 mb_qe
= bfa_q_first(&bna
->mbox_mod
.posted_q
);
165 cmd_h
= (struct bfi_mhdr
*)(&mb_qe
->cmd
.msg
[0]);
166 rsp_h
= (struct bfi_mhdr
*)(&mb_rsp
->mh
);
168 if ((BFA_I2HM(cmd_h
->msg_id
) == rsp_h
->msg_id
) &&
169 (cmd_h
->mtag
.i2htok
== rsp_h
->mtag
.i2htok
)) {
170 /* Remove the request from posted_q, update state */
171 list_del(&mb_qe
->qe
);
172 bna
->mbox_mod
.msg_pending
--;
173 if (list_empty(&bna
->mbox_mod
.posted_q
))
174 bna
->mbox_mod
.state
= BNA_MBOX_FREE
;
178 /* Dispatch the cbfn */
180 mb_qe
->cbfn(mb_qe
->cbarg
, mb_rsp
->error
);
182 /* Post the next entry, if needed */
184 mb_qe
= bfa_q_first(&bna
->mbox_mod
.posted_q
);
185 bfa_nw_ioc_mbox_queue(&bna
->device
.ioc
,
189 snprintf(message
, BNA_MESSAGE_SIZE
,
190 "No matching rsp for [%d:%d:%d]\n",
191 mb_rsp
->mh
.msg_class
, mb_rsp
->mh
.msg_id
,
192 mb_rsp
->mh
.mtag
.i2htok
);
193 pr_info("%s", message
);
197 bna_mbox_aen_callback(bna
, msg
);
201 bna_err_handler(struct bna
*bna
, u32 intr_status
)
205 if (intr_status
& __HALT_STATUS_BITS
) {
206 init_halt
= readl(bna
->device
.ioc
.ioc_regs
.ll_halt
);
207 init_halt
&= ~__FW_INIT_HALT_P
;
208 writel(init_halt
, bna
->device
.ioc
.ioc_regs
.ll_halt
);
211 bfa_nw_ioc_error_isr(&bna
->device
.ioc
);
215 bna_mbox_handler(struct bna
*bna
, u32 intr_status
)
217 if (BNA_IS_ERR_INTR(intr_status
)) {
218 bna_err_handler(bna
, intr_status
);
221 if (BNA_IS_MBOX_INTR(intr_status
))
222 bfa_nw_ioc_mbox_isr(&bna
->device
.ioc
);
226 bna_mbox_send(struct bna
*bna
, struct bna_mbox_qe
*mbox_qe
)
230 mh
= (struct bfi_mhdr
*)(&mbox_qe
->cmd
.msg
[0]);
232 mh
->mtag
.i2htok
= htons(bna
->mbox_mod
.msg_ctr
);
233 bna
->mbox_mod
.msg_ctr
++;
234 bna
->mbox_mod
.msg_pending
++;
235 if (bna
->mbox_mod
.state
== BNA_MBOX_FREE
) {
236 list_add_tail(&mbox_qe
->qe
, &bna
->mbox_mod
.posted_q
);
237 bfa_nw_ioc_mbox_queue(&bna
->device
.ioc
, &mbox_qe
->cmd
);
238 bna
->mbox_mod
.state
= BNA_MBOX_POSTED
;
240 list_add_tail(&mbox_qe
->qe
, &bna
->mbox_mod
.posted_q
);
245 bna_mbox_flush_q(struct bna
*bna
, struct list_head
*q
)
247 struct bna_mbox_qe
*mb_qe
= NULL
;
248 struct list_head
*mb_q
;
249 void (*cbfn
)(void *arg
, int status
);
252 mb_q
= &bna
->mbox_mod
.posted_q
;
254 while (!list_empty(mb_q
)) {
255 bfa_q_deq(mb_q
, &mb_qe
);
257 cbarg
= mb_qe
->cbarg
;
258 bfa_q_qe_init(mb_qe
);
259 bna
->mbox_mod
.msg_pending
--;
262 cbfn(cbarg
, BNA_CB_NOT_EXEC
);
265 bna
->mbox_mod
.state
= BNA_MBOX_FREE
;
269 bna_mbox_mod_start(struct bna_mbox_mod
*mbox_mod
)
274 bna_mbox_mod_stop(struct bna_mbox_mod
*mbox_mod
)
276 bna_mbox_flush_q(mbox_mod
->bna
, &mbox_mod
->posted_q
);
280 bna_mbox_mod_init(struct bna_mbox_mod
*mbox_mod
, struct bna
*bna
)
282 bfa_nw_ioc_mbox_regisr(&bna
->device
.ioc
, BFI_MC_LL
, bna_ll_isr
, bna
);
283 mbox_mod
->state
= BNA_MBOX_FREE
;
284 mbox_mod
->msg_ctr
= mbox_mod
->msg_pending
= 0;
285 INIT_LIST_HEAD(&mbox_mod
->posted_q
);
290 bna_mbox_mod_uninit(struct bna_mbox_mod
*mbox_mod
)
292 mbox_mod
->bna
= NULL
;
298 #define call_llport_stop_cbfn(llport, status)\
300 if ((llport)->stop_cbfn)\
301 (llport)->stop_cbfn(&(llport)->bna->port, status);\
302 (llport)->stop_cbfn = NULL;\
305 static void bna_fw_llport_up(struct bna_llport
*llport
);
306 static void bna_fw_cb_llport_up(void *arg
, int status
);
307 static void bna_fw_llport_down(struct bna_llport
*llport
);
308 static void bna_fw_cb_llport_down(void *arg
, int status
);
309 static void bna_llport_start(struct bna_llport
*llport
);
310 static void bna_llport_stop(struct bna_llport
*llport
);
311 static void bna_llport_fail(struct bna_llport
*llport
);
313 enum bna_llport_state
{
314 BNA_LLPORT_STOPPED
= 1,
316 BNA_LLPORT_UP_RESP_WAIT
= 3,
317 BNA_LLPORT_DOWN_RESP_WAIT
= 4,
319 BNA_LLPORT_LAST_RESP_WAIT
= 6
322 bfa_fsm_state_decl(bna_llport
, stopped
, struct bna_llport
,
323 enum bna_llport_event
);
324 bfa_fsm_state_decl(bna_llport
, down
, struct bna_llport
,
325 enum bna_llport_event
);
326 bfa_fsm_state_decl(bna_llport
, up_resp_wait
, struct bna_llport
,
327 enum bna_llport_event
);
328 bfa_fsm_state_decl(bna_llport
, down_resp_wait
, struct bna_llport
,
329 enum bna_llport_event
);
330 bfa_fsm_state_decl(bna_llport
, up
, struct bna_llport
,
331 enum bna_llport_event
);
332 bfa_fsm_state_decl(bna_llport
, last_resp_wait
, struct bna_llport
,
333 enum bna_llport_event
);
335 static struct bfa_sm_table llport_sm_table
[] = {
336 {BFA_SM(bna_llport_sm_stopped
), BNA_LLPORT_STOPPED
},
337 {BFA_SM(bna_llport_sm_down
), BNA_LLPORT_DOWN
},
338 {BFA_SM(bna_llport_sm_up_resp_wait
), BNA_LLPORT_UP_RESP_WAIT
},
339 {BFA_SM(bna_llport_sm_down_resp_wait
), BNA_LLPORT_DOWN_RESP_WAIT
},
340 {BFA_SM(bna_llport_sm_up
), BNA_LLPORT_UP
},
341 {BFA_SM(bna_llport_sm_last_resp_wait
), BNA_LLPORT_LAST_RESP_WAIT
}
345 bna_llport_sm_stopped_entry(struct bna_llport
*llport
)
347 llport
->bna
->port
.link_cbfn((llport
)->bna
->bnad
, BNA_LINK_DOWN
);
348 call_llport_stop_cbfn(llport
, BNA_CB_SUCCESS
);
352 bna_llport_sm_stopped(struct bna_llport
*llport
,
353 enum bna_llport_event event
)
357 bfa_fsm_set_state(llport
, bna_llport_sm_down
);
361 call_llport_stop_cbfn(llport
, BNA_CB_SUCCESS
);
368 /* This event is received due to Rx objects failing */
372 case LLPORT_E_FWRESP_UP_OK
:
373 case LLPORT_E_FWRESP_DOWN
:
375 * These events are received due to flushing of mbox when
387 bna_llport_sm_down_entry(struct bna_llport
*llport
)
389 bnad_cb_port_link_status((llport
)->bna
->bnad
, BNA_LINK_DOWN
);
393 bna_llport_sm_down(struct bna_llport
*llport
,
394 enum bna_llport_event event
)
398 bfa_fsm_set_state(llport
, bna_llport_sm_stopped
);
402 bfa_fsm_set_state(llport
, bna_llport_sm_stopped
);
406 bfa_fsm_set_state(llport
, bna_llport_sm_up_resp_wait
);
407 bna_fw_llport_up(llport
);
416 bna_llport_sm_up_resp_wait_entry(struct bna_llport
*llport
)
418 BUG_ON(!llport_can_be_up(llport
));
420 * NOTE: Do not call bna_fw_llport_up() here. That will over step
421 * mbox due to down_resp_wait -> up_resp_wait transition on event
427 bna_llport_sm_up_resp_wait(struct bna_llport
*llport
,
428 enum bna_llport_event event
)
432 bfa_fsm_set_state(llport
, bna_llport_sm_last_resp_wait
);
436 bfa_fsm_set_state(llport
, bna_llport_sm_stopped
);
440 bfa_fsm_set_state(llport
, bna_llport_sm_down_resp_wait
);
443 case LLPORT_E_FWRESP_UP_OK
:
444 bfa_fsm_set_state(llport
, bna_llport_sm_up
);
447 case LLPORT_E_FWRESP_UP_FAIL
:
448 bfa_fsm_set_state(llport
, bna_llport_sm_down
);
451 case LLPORT_E_FWRESP_DOWN
:
452 /* down_resp_wait -> up_resp_wait transition on LLPORT_E_UP */
453 bna_fw_llport_up(llport
);
462 bna_llport_sm_down_resp_wait_entry(struct bna_llport
*llport
)
465 * NOTE: Do not call bna_fw_llport_down() here. That will over step
466 * mbox due to up_resp_wait -> down_resp_wait transition on event
472 bna_llport_sm_down_resp_wait(struct bna_llport
*llport
,
473 enum bna_llport_event event
)
477 bfa_fsm_set_state(llport
, bna_llport_sm_last_resp_wait
);
481 bfa_fsm_set_state(llport
, bna_llport_sm_stopped
);
485 bfa_fsm_set_state(llport
, bna_llport_sm_up_resp_wait
);
488 case LLPORT_E_FWRESP_UP_OK
:
489 /* up_resp_wait->down_resp_wait transition on LLPORT_E_DOWN */
490 bna_fw_llport_down(llport
);
493 case LLPORT_E_FWRESP_UP_FAIL
:
494 case LLPORT_E_FWRESP_DOWN
:
495 bfa_fsm_set_state(llport
, bna_llport_sm_down
);
504 bna_llport_sm_up_entry(struct bna_llport
*llport
)
509 bna_llport_sm_up(struct bna_llport
*llport
,
510 enum bna_llport_event event
)
514 bfa_fsm_set_state(llport
, bna_llport_sm_last_resp_wait
);
515 bna_fw_llport_down(llport
);
519 bfa_fsm_set_state(llport
, bna_llport_sm_stopped
);
523 bfa_fsm_set_state(llport
, bna_llport_sm_down_resp_wait
);
524 bna_fw_llport_down(llport
);
533 bna_llport_sm_last_resp_wait_entry(struct bna_llport
*llport
)
538 bna_llport_sm_last_resp_wait(struct bna_llport
*llport
,
539 enum bna_llport_event event
)
543 bfa_fsm_set_state(llport
, bna_llport_sm_stopped
);
548 * This event is received due to Rx objects stopping in
554 case LLPORT_E_FWRESP_UP_OK
:
555 /* up_resp_wait->last_resp_wait transition on LLPORT_T_STOP */
556 bna_fw_llport_down(llport
);
559 case LLPORT_E_FWRESP_UP_FAIL
:
560 case LLPORT_E_FWRESP_DOWN
:
561 bfa_fsm_set_state(llport
, bna_llport_sm_stopped
);
570 bna_fw_llport_admin_up(struct bna_llport
*llport
)
572 struct bfi_ll_port_admin_req ll_req
;
574 memset(&ll_req
, 0, sizeof(ll_req
));
575 ll_req
.mh
.msg_class
= BFI_MC_LL
;
576 ll_req
.mh
.msg_id
= BFI_LL_H2I_PORT_ADMIN_REQ
;
577 ll_req
.mh
.mtag
.h2i
.lpu_id
= 0;
579 ll_req
.up
= BNA_STATUS_T_ENABLED
;
581 bna_mbox_qe_fill(&llport
->mbox_qe
, &ll_req
, sizeof(ll_req
),
582 bna_fw_cb_llport_up
, llport
);
584 bna_mbox_send(llport
->bna
, &llport
->mbox_qe
);
588 bna_fw_llport_up(struct bna_llport
*llport
)
590 if (llport
->type
== BNA_PORT_T_REGULAR
)
591 bna_fw_llport_admin_up(llport
);
595 bna_fw_cb_llport_up(void *arg
, int status
)
597 struct bna_llport
*llport
= (struct bna_llport
*)arg
;
599 bfa_q_qe_init(&llport
->mbox_qe
.qe
);
600 if (status
== BFI_LL_CMD_FAIL
) {
601 if (llport
->type
== BNA_PORT_T_REGULAR
)
602 llport
->flags
&= ~BNA_LLPORT_F_PORT_ENABLED
;
604 llport
->flags
&= ~BNA_LLPORT_F_ADMIN_UP
;
605 bfa_fsm_send_event(llport
, LLPORT_E_FWRESP_UP_FAIL
);
607 bfa_fsm_send_event(llport
, LLPORT_E_FWRESP_UP_OK
);
611 bna_fw_llport_admin_down(struct bna_llport
*llport
)
613 struct bfi_ll_port_admin_req ll_req
;
615 memset(&ll_req
, 0, sizeof(ll_req
));
616 ll_req
.mh
.msg_class
= BFI_MC_LL
;
617 ll_req
.mh
.msg_id
= BFI_LL_H2I_PORT_ADMIN_REQ
;
618 ll_req
.mh
.mtag
.h2i
.lpu_id
= 0;
620 ll_req
.up
= BNA_STATUS_T_DISABLED
;
622 bna_mbox_qe_fill(&llport
->mbox_qe
, &ll_req
, sizeof(ll_req
),
623 bna_fw_cb_llport_down
, llport
);
625 bna_mbox_send(llport
->bna
, &llport
->mbox_qe
);
629 bna_fw_llport_down(struct bna_llport
*llport
)
631 if (llport
->type
== BNA_PORT_T_REGULAR
)
632 bna_fw_llport_admin_down(llport
);
636 bna_fw_cb_llport_down(void *arg
, int status
)
638 struct bna_llport
*llport
= (struct bna_llport
*)arg
;
640 bfa_q_qe_init(&llport
->mbox_qe
.qe
);
641 bfa_fsm_send_event(llport
, LLPORT_E_FWRESP_DOWN
);
645 bna_port_cb_llport_stopped(struct bna_port
*port
,
646 enum bna_cb_status status
)
648 bfa_wc_down(&port
->chld_stop_wc
);
652 bna_llport_init(struct bna_llport
*llport
, struct bna
*bna
)
654 llport
->flags
|= BNA_LLPORT_F_ADMIN_UP
;
655 llport
->flags
|= BNA_LLPORT_F_PORT_ENABLED
;
656 llport
->type
= BNA_PORT_T_REGULAR
;
659 llport
->link_status
= BNA_LINK_DOWN
;
661 llport
->rx_started_count
= 0;
663 llport
->stop_cbfn
= NULL
;
665 bfa_q_qe_init(&llport
->mbox_qe
.qe
);
667 bfa_fsm_set_state(llport
, bna_llport_sm_stopped
);
671 bna_llport_uninit(struct bna_llport
*llport
)
673 llport
->flags
&= ~BNA_LLPORT_F_ADMIN_UP
;
674 llport
->flags
&= ~BNA_LLPORT_F_PORT_ENABLED
;
680 bna_llport_start(struct bna_llport
*llport
)
682 bfa_fsm_send_event(llport
, LLPORT_E_START
);
686 bna_llport_stop(struct bna_llport
*llport
)
688 llport
->stop_cbfn
= bna_port_cb_llport_stopped
;
690 bfa_fsm_send_event(llport
, LLPORT_E_STOP
);
694 bna_llport_fail(struct bna_llport
*llport
)
696 /* Reset the physical port status to enabled */
697 llport
->flags
|= BNA_LLPORT_F_PORT_ENABLED
;
698 bfa_fsm_send_event(llport
, LLPORT_E_FAIL
);
702 bna_llport_state_get(struct bna_llport
*llport
)
704 return bfa_sm_to_state(llport_sm_table
, llport
->fsm
);
708 bna_llport_rx_started(struct bna_llport
*llport
)
710 llport
->rx_started_count
++;
712 if (llport
->rx_started_count
== 1) {
714 llport
->flags
|= BNA_LLPORT_F_RX_STARTED
;
716 if (llport_can_be_up(llport
))
717 bfa_fsm_send_event(llport
, LLPORT_E_UP
);
722 bna_llport_rx_stopped(struct bna_llport
*llport
)
724 int llport_up
= llport_is_up(llport
);
726 llport
->rx_started_count
--;
728 if (llport
->rx_started_count
== 0) {
730 llport
->flags
&= ~BNA_LLPORT_F_RX_STARTED
;
733 bfa_fsm_send_event(llport
, LLPORT_E_DOWN
);
740 #define bna_port_chld_start(port)\
742 enum bna_tx_type tx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
743 BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK;\
744 enum bna_rx_type rx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
745 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;\
746 bna_llport_start(&(port)->llport);\
747 bna_tx_mod_start(&(port)->bna->tx_mod, tx_type);\
748 bna_rx_mod_start(&(port)->bna->rx_mod, rx_type);\
751 #define bna_port_chld_stop(port)\
753 enum bna_tx_type tx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
754 BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK;\
755 enum bna_rx_type rx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
756 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;\
757 bfa_wc_up(&(port)->chld_stop_wc);\
758 bfa_wc_up(&(port)->chld_stop_wc);\
759 bfa_wc_up(&(port)->chld_stop_wc);\
760 bna_llport_stop(&(port)->llport);\
761 bna_tx_mod_stop(&(port)->bna->tx_mod, tx_type);\
762 bna_rx_mod_stop(&(port)->bna->rx_mod, rx_type);\
765 #define bna_port_chld_fail(port)\
767 bna_llport_fail(&(port)->llport);\
768 bna_tx_mod_fail(&(port)->bna->tx_mod);\
769 bna_rx_mod_fail(&(port)->bna->rx_mod);\
772 #define bna_port_rx_start(port)\
774 enum bna_rx_type rx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
775 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;\
776 bna_rx_mod_start(&(port)->bna->rx_mod, rx_type);\
779 #define bna_port_rx_stop(port)\
781 enum bna_rx_type rx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
782 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;\
783 bfa_wc_up(&(port)->chld_stop_wc);\
784 bna_rx_mod_stop(&(port)->bna->rx_mod, rx_type);\
787 #define call_port_stop_cbfn(port, status)\
789 if ((port)->stop_cbfn)\
790 (port)->stop_cbfn((port)->stop_cbarg, status);\
791 (port)->stop_cbfn = NULL;\
792 (port)->stop_cbarg = NULL;\
795 #define call_port_pause_cbfn(port, status)\
797 if ((port)->pause_cbfn)\
798 (port)->pause_cbfn((port)->bna->bnad, status);\
799 (port)->pause_cbfn = NULL;\
802 #define call_port_mtu_cbfn(port, status)\
804 if ((port)->mtu_cbfn)\
805 (port)->mtu_cbfn((port)->bna->bnad, status);\
806 (port)->mtu_cbfn = NULL;\
809 static void bna_fw_pause_set(struct bna_port
*port
);
810 static void bna_fw_cb_pause_set(void *arg
, int status
);
811 static void bna_fw_mtu_set(struct bna_port
*port
);
812 static void bna_fw_cb_mtu_set(void *arg
, int status
);
814 enum bna_port_event
{
818 PORT_E_PAUSE_CFG
= 4,
820 PORT_E_CHLD_STOPPED
= 6,
821 PORT_E_FWRESP_PAUSE
= 7,
822 PORT_E_FWRESP_MTU
= 8
825 enum bna_port_state
{
826 BNA_PORT_STOPPED
= 1,
827 BNA_PORT_MTU_INIT_WAIT
= 2,
828 BNA_PORT_PAUSE_INIT_WAIT
= 3,
829 BNA_PORT_LAST_RESP_WAIT
= 4,
830 BNA_PORT_STARTED
= 5,
831 BNA_PORT_PAUSE_CFG_WAIT
= 6,
832 BNA_PORT_RX_STOP_WAIT
= 7,
833 BNA_PORT_MTU_CFG_WAIT
= 8,
834 BNA_PORT_CHLD_STOP_WAIT
= 9
837 bfa_fsm_state_decl(bna_port
, stopped
, struct bna_port
,
838 enum bna_port_event
);
839 bfa_fsm_state_decl(bna_port
, mtu_init_wait
, struct bna_port
,
840 enum bna_port_event
);
841 bfa_fsm_state_decl(bna_port
, pause_init_wait
, struct bna_port
,
842 enum bna_port_event
);
843 bfa_fsm_state_decl(bna_port
, last_resp_wait
, struct bna_port
,
844 enum bna_port_event
);
845 bfa_fsm_state_decl(bna_port
, started
, struct bna_port
,
846 enum bna_port_event
);
847 bfa_fsm_state_decl(bna_port
, pause_cfg_wait
, struct bna_port
,
848 enum bna_port_event
);
849 bfa_fsm_state_decl(bna_port
, rx_stop_wait
, struct bna_port
,
850 enum bna_port_event
);
851 bfa_fsm_state_decl(bna_port
, mtu_cfg_wait
, struct bna_port
,
852 enum bna_port_event
);
853 bfa_fsm_state_decl(bna_port
, chld_stop_wait
, struct bna_port
,
854 enum bna_port_event
);
856 static struct bfa_sm_table port_sm_table
[] = {
857 {BFA_SM(bna_port_sm_stopped
), BNA_PORT_STOPPED
},
858 {BFA_SM(bna_port_sm_mtu_init_wait
), BNA_PORT_MTU_INIT_WAIT
},
859 {BFA_SM(bna_port_sm_pause_init_wait
), BNA_PORT_PAUSE_INIT_WAIT
},
860 {BFA_SM(bna_port_sm_last_resp_wait
), BNA_PORT_LAST_RESP_WAIT
},
861 {BFA_SM(bna_port_sm_started
), BNA_PORT_STARTED
},
862 {BFA_SM(bna_port_sm_pause_cfg_wait
), BNA_PORT_PAUSE_CFG_WAIT
},
863 {BFA_SM(bna_port_sm_rx_stop_wait
), BNA_PORT_RX_STOP_WAIT
},
864 {BFA_SM(bna_port_sm_mtu_cfg_wait
), BNA_PORT_MTU_CFG_WAIT
},
865 {BFA_SM(bna_port_sm_chld_stop_wait
), BNA_PORT_CHLD_STOP_WAIT
}
869 bna_port_sm_stopped_entry(struct bna_port
*port
)
871 call_port_pause_cbfn(port
, BNA_CB_SUCCESS
);
872 call_port_mtu_cbfn(port
, BNA_CB_SUCCESS
);
873 call_port_stop_cbfn(port
, BNA_CB_SUCCESS
);
877 bna_port_sm_stopped(struct bna_port
*port
, enum bna_port_event event
)
881 bfa_fsm_set_state(port
, bna_port_sm_mtu_init_wait
);
885 call_port_stop_cbfn(port
, BNA_CB_SUCCESS
);
892 case PORT_E_PAUSE_CFG
:
893 call_port_pause_cbfn(port
, BNA_CB_SUCCESS
);
897 call_port_mtu_cbfn(port
, BNA_CB_SUCCESS
);
900 case PORT_E_CHLD_STOPPED
:
902 * This event is received due to LLPort, Tx and Rx objects
908 case PORT_E_FWRESP_PAUSE
:
909 case PORT_E_FWRESP_MTU
:
911 * These events are received due to flushing of mbox when
923 bna_port_sm_mtu_init_wait_entry(struct bna_port
*port
)
925 bna_fw_mtu_set(port
);
929 bna_port_sm_mtu_init_wait(struct bna_port
*port
, enum bna_port_event event
)
933 bfa_fsm_set_state(port
, bna_port_sm_last_resp_wait
);
937 bfa_fsm_set_state(port
, bna_port_sm_stopped
);
940 case PORT_E_PAUSE_CFG
:
945 port
->flags
|= BNA_PORT_F_MTU_CHANGED
;
948 case PORT_E_FWRESP_MTU
:
949 if (port
->flags
& BNA_PORT_F_MTU_CHANGED
) {
950 port
->flags
&= ~BNA_PORT_F_MTU_CHANGED
;
951 bna_fw_mtu_set(port
);
953 bfa_fsm_set_state(port
, bna_port_sm_pause_init_wait
);
963 bna_port_sm_pause_init_wait_entry(struct bna_port
*port
)
965 bna_fw_pause_set(port
);
969 bna_port_sm_pause_init_wait(struct bna_port
*port
,
970 enum bna_port_event event
)
974 bfa_fsm_set_state(port
, bna_port_sm_last_resp_wait
);
978 bfa_fsm_set_state(port
, bna_port_sm_stopped
);
981 case PORT_E_PAUSE_CFG
:
982 port
->flags
|= BNA_PORT_F_PAUSE_CHANGED
;
986 port
->flags
|= BNA_PORT_F_MTU_CHANGED
;
989 case PORT_E_FWRESP_PAUSE
:
990 if (port
->flags
& BNA_PORT_F_PAUSE_CHANGED
) {
991 port
->flags
&= ~BNA_PORT_F_PAUSE_CHANGED
;
992 bna_fw_pause_set(port
);
993 } else if (port
->flags
& BNA_PORT_F_MTU_CHANGED
) {
994 port
->flags
&= ~BNA_PORT_F_MTU_CHANGED
;
995 bfa_fsm_set_state(port
, bna_port_sm_mtu_init_wait
);
997 bfa_fsm_set_state(port
, bna_port_sm_started
);
998 bna_port_chld_start(port
);
1003 bfa_sm_fault(event
);
1008 bna_port_sm_last_resp_wait_entry(struct bna_port
*port
)
1013 bna_port_sm_last_resp_wait(struct bna_port
*port
,
1014 enum bna_port_event event
)
1018 case PORT_E_FWRESP_PAUSE
:
1019 case PORT_E_FWRESP_MTU
:
1020 bfa_fsm_set_state(port
, bna_port_sm_stopped
);
1024 bfa_sm_fault(event
);
1029 bna_port_sm_started_entry(struct bna_port
*port
)
1032 * NOTE: Do not call bna_port_chld_start() here, since it will be
1033 * inadvertently called during pause_cfg_wait->started transition
1036 call_port_pause_cbfn(port
, BNA_CB_SUCCESS
);
1037 call_port_mtu_cbfn(port
, BNA_CB_SUCCESS
);
1041 bna_port_sm_started(struct bna_port
*port
,
1042 enum bna_port_event event
)
1046 bfa_fsm_set_state(port
, bna_port_sm_chld_stop_wait
);
1050 bfa_fsm_set_state(port
, bna_port_sm_stopped
);
1051 bna_port_chld_fail(port
);
1054 case PORT_E_PAUSE_CFG
:
1055 bfa_fsm_set_state(port
, bna_port_sm_pause_cfg_wait
);
1058 case PORT_E_MTU_CFG
:
1059 bfa_fsm_set_state(port
, bna_port_sm_rx_stop_wait
);
1063 bfa_sm_fault(event
);
1068 bna_port_sm_pause_cfg_wait_entry(struct bna_port
*port
)
1070 bna_fw_pause_set(port
);
1074 bna_port_sm_pause_cfg_wait(struct bna_port
*port
,
1075 enum bna_port_event event
)
1079 bfa_fsm_set_state(port
, bna_port_sm_stopped
);
1080 bna_port_chld_fail(port
);
1083 case PORT_E_FWRESP_PAUSE
:
1084 bfa_fsm_set_state(port
, bna_port_sm_started
);
1088 bfa_sm_fault(event
);
1093 bna_port_sm_rx_stop_wait_entry(struct bna_port
*port
)
1095 bna_port_rx_stop(port
);
1099 bna_port_sm_rx_stop_wait(struct bna_port
*port
,
1100 enum bna_port_event event
)
1104 bfa_fsm_set_state(port
, bna_port_sm_stopped
);
1105 bna_port_chld_fail(port
);
1108 case PORT_E_CHLD_STOPPED
:
1109 bfa_fsm_set_state(port
, bna_port_sm_mtu_cfg_wait
);
1113 bfa_sm_fault(event
);
1118 bna_port_sm_mtu_cfg_wait_entry(struct bna_port
*port
)
1120 bna_fw_mtu_set(port
);
1124 bna_port_sm_mtu_cfg_wait(struct bna_port
*port
, enum bna_port_event event
)
1128 bfa_fsm_set_state(port
, bna_port_sm_stopped
);
1129 bna_port_chld_fail(port
);
1132 case PORT_E_FWRESP_MTU
:
1133 bfa_fsm_set_state(port
, bna_port_sm_started
);
1134 bna_port_rx_start(port
);
1138 bfa_sm_fault(event
);
1143 bna_port_sm_chld_stop_wait_entry(struct bna_port
*port
)
1145 bna_port_chld_stop(port
);
1149 bna_port_sm_chld_stop_wait(struct bna_port
*port
,
1150 enum bna_port_event event
)
1154 bfa_fsm_set_state(port
, bna_port_sm_stopped
);
1155 bna_port_chld_fail(port
);
1158 case PORT_E_CHLD_STOPPED
:
1159 bfa_fsm_set_state(port
, bna_port_sm_stopped
);
1163 bfa_sm_fault(event
);
1168 bna_fw_pause_set(struct bna_port
*port
)
1170 struct bfi_ll_set_pause_req ll_req
;
1172 memset(&ll_req
, 0, sizeof(ll_req
));
1173 ll_req
.mh
.msg_class
= BFI_MC_LL
;
1174 ll_req
.mh
.msg_id
= BFI_LL_H2I_SET_PAUSE_REQ
;
1175 ll_req
.mh
.mtag
.h2i
.lpu_id
= 0;
1177 ll_req
.tx_pause
= port
->pause_config
.tx_pause
;
1178 ll_req
.rx_pause
= port
->pause_config
.rx_pause
;
1180 bna_mbox_qe_fill(&port
->mbox_qe
, &ll_req
, sizeof(ll_req
),
1181 bna_fw_cb_pause_set
, port
);
1183 bna_mbox_send(port
->bna
, &port
->mbox_qe
);
1187 bna_fw_cb_pause_set(void *arg
, int status
)
1189 struct bna_port
*port
= (struct bna_port
*)arg
;
1191 bfa_q_qe_init(&port
->mbox_qe
.qe
);
1192 bfa_fsm_send_event(port
, PORT_E_FWRESP_PAUSE
);
1196 bna_fw_mtu_set(struct bna_port
*port
)
1198 struct bfi_ll_mtu_info_req ll_req
;
1200 bfi_h2i_set(ll_req
.mh
, BFI_MC_LL
, BFI_LL_H2I_MTU_INFO_REQ
, 0);
1201 ll_req
.mtu
= htons((u16
)port
->mtu
);
1203 bna_mbox_qe_fill(&port
->mbox_qe
, &ll_req
, sizeof(ll_req
),
1204 bna_fw_cb_mtu_set
, port
);
1205 bna_mbox_send(port
->bna
, &port
->mbox_qe
);
1209 bna_fw_cb_mtu_set(void *arg
, int status
)
1211 struct bna_port
*port
= (struct bna_port
*)arg
;
1213 bfa_q_qe_init(&port
->mbox_qe
.qe
);
1214 bfa_fsm_send_event(port
, PORT_E_FWRESP_MTU
);
1218 bna_port_cb_chld_stopped(void *arg
)
1220 struct bna_port
*port
= (struct bna_port
*)arg
;
1222 bfa_fsm_send_event(port
, PORT_E_CHLD_STOPPED
);
1226 bna_port_init(struct bna_port
*port
, struct bna
*bna
)
1231 port
->type
= BNA_PORT_T_REGULAR
;
1233 port
->link_cbfn
= bnad_cb_port_link_status
;
1235 port
->chld_stop_wc
.wc_resume
= bna_port_cb_chld_stopped
;
1236 port
->chld_stop_wc
.wc_cbarg
= port
;
1237 port
->chld_stop_wc
.wc_count
= 0;
1239 port
->stop_cbfn
= NULL
;
1240 port
->stop_cbarg
= NULL
;
1242 port
->pause_cbfn
= NULL
;
1244 port
->mtu_cbfn
= NULL
;
1246 bfa_q_qe_init(&port
->mbox_qe
.qe
);
1248 bfa_fsm_set_state(port
, bna_port_sm_stopped
);
1250 bna_llport_init(&port
->llport
, bna
);
1254 bna_port_uninit(struct bna_port
*port
)
1256 bna_llport_uninit(&port
->llport
);
1264 bna_port_state_get(struct bna_port
*port
)
1266 return bfa_sm_to_state(port_sm_table
, port
->fsm
);
1270 bna_port_start(struct bna_port
*port
)
1272 port
->flags
|= BNA_PORT_F_DEVICE_READY
;
1273 if (port
->flags
& BNA_PORT_F_ENABLED
)
1274 bfa_fsm_send_event(port
, PORT_E_START
);
1278 bna_port_stop(struct bna_port
*port
)
1280 port
->stop_cbfn
= bna_device_cb_port_stopped
;
1281 port
->stop_cbarg
= &port
->bna
->device
;
1283 port
->flags
&= ~BNA_PORT_F_DEVICE_READY
;
1284 bfa_fsm_send_event(port
, PORT_E_STOP
);
1288 bna_port_fail(struct bna_port
*port
)
1290 port
->flags
&= ~BNA_PORT_F_DEVICE_READY
;
1291 bfa_fsm_send_event(port
, PORT_E_FAIL
);
1295 bna_port_cb_tx_stopped(struct bna_port
*port
, enum bna_cb_status status
)
1297 bfa_wc_down(&port
->chld_stop_wc
);
1301 bna_port_cb_rx_stopped(struct bna_port
*port
, enum bna_cb_status status
)
1303 bfa_wc_down(&port
->chld_stop_wc
);
1307 bna_port_mtu_get(struct bna_port
*port
)
1313 bna_port_enable(struct bna_port
*port
)
1315 if (port
->fsm
!= (bfa_sm_t
)bna_port_sm_stopped
)
1318 port
->flags
|= BNA_PORT_F_ENABLED
;
1320 if (port
->flags
& BNA_PORT_F_DEVICE_READY
)
1321 bfa_fsm_send_event(port
, PORT_E_START
);
1325 bna_port_disable(struct bna_port
*port
, enum bna_cleanup_type type
,
1326 void (*cbfn
)(void *, enum bna_cb_status
))
1328 if (type
== BNA_SOFT_CLEANUP
) {
1329 (*cbfn
)(port
->bna
->bnad
, BNA_CB_SUCCESS
);
1333 port
->stop_cbfn
= cbfn
;
1334 port
->stop_cbarg
= port
->bna
->bnad
;
1336 port
->flags
&= ~BNA_PORT_F_ENABLED
;
1338 bfa_fsm_send_event(port
, PORT_E_STOP
);
1342 bna_port_pause_config(struct bna_port
*port
,
1343 struct bna_pause_config
*pause_config
,
1344 void (*cbfn
)(struct bnad
*, enum bna_cb_status
))
1346 port
->pause_config
= *pause_config
;
1348 port
->pause_cbfn
= cbfn
;
1350 bfa_fsm_send_event(port
, PORT_E_PAUSE_CFG
);
1354 bna_port_mtu_set(struct bna_port
*port
, int mtu
,
1355 void (*cbfn
)(struct bnad
*, enum bna_cb_status
))
1359 port
->mtu_cbfn
= cbfn
;
1361 bfa_fsm_send_event(port
, PORT_E_MTU_CFG
);
1365 bna_port_mac_get(struct bna_port
*port
, mac_t
*mac
)
1367 *mac
= bfa_nw_ioc_get_mac(&port
->bna
->device
.ioc
);
1373 #define enable_mbox_intr(_device)\
1376 bna_intr_status_get((_device)->bna, intr_status);\
1377 bnad_cb_device_enable_mbox_intr((_device)->bna->bnad);\
1378 bna_mbox_intr_enable((_device)->bna);\
1381 #define disable_mbox_intr(_device)\
1383 bna_mbox_intr_disable((_device)->bna);\
1384 bnad_cb_device_disable_mbox_intr((_device)->bna->bnad);\
1387 static const struct bna_chip_regs_offset reg_offset
[] =
1388 {{HOST_PAGE_NUM_FN0
, HOSTFN0_INT_STATUS
,
1389 HOSTFN0_INT_MASK
, HOST_MSIX_ERR_INDEX_FN0
},
1390 {HOST_PAGE_NUM_FN1
, HOSTFN1_INT_STATUS
,
1391 HOSTFN1_INT_MASK
, HOST_MSIX_ERR_INDEX_FN1
},
1392 {HOST_PAGE_NUM_FN2
, HOSTFN2_INT_STATUS
,
1393 HOSTFN2_INT_MASK
, HOST_MSIX_ERR_INDEX_FN2
},
1394 {HOST_PAGE_NUM_FN3
, HOSTFN3_INT_STATUS
,
1395 HOSTFN3_INT_MASK
, HOST_MSIX_ERR_INDEX_FN3
},
1398 enum bna_device_event
{
1399 DEVICE_E_ENABLE
= 1,
1400 DEVICE_E_DISABLE
= 2,
1401 DEVICE_E_IOC_READY
= 3,
1402 DEVICE_E_IOC_FAILED
= 4,
1403 DEVICE_E_IOC_DISABLED
= 5,
1404 DEVICE_E_IOC_RESET
= 6,
1405 DEVICE_E_PORT_STOPPED
= 7,
1408 enum bna_device_state
{
1409 BNA_DEVICE_STOPPED
= 1,
1410 BNA_DEVICE_IOC_READY_WAIT
= 2,
1411 BNA_DEVICE_READY
= 3,
1412 BNA_DEVICE_PORT_STOP_WAIT
= 4,
1413 BNA_DEVICE_IOC_DISABLE_WAIT
= 5,
1414 BNA_DEVICE_FAILED
= 6
1417 bfa_fsm_state_decl(bna_device
, stopped
, struct bna_device
,
1418 enum bna_device_event
);
1419 bfa_fsm_state_decl(bna_device
, ioc_ready_wait
, struct bna_device
,
1420 enum bna_device_event
);
1421 bfa_fsm_state_decl(bna_device
, ready
, struct bna_device
,
1422 enum bna_device_event
);
1423 bfa_fsm_state_decl(bna_device
, port_stop_wait
, struct bna_device
,
1424 enum bna_device_event
);
1425 bfa_fsm_state_decl(bna_device
, ioc_disable_wait
, struct bna_device
,
1426 enum bna_device_event
);
1427 bfa_fsm_state_decl(bna_device
, failed
, struct bna_device
,
1428 enum bna_device_event
);
1430 static struct bfa_sm_table device_sm_table
[] = {
1431 {BFA_SM(bna_device_sm_stopped
), BNA_DEVICE_STOPPED
},
1432 {BFA_SM(bna_device_sm_ioc_ready_wait
), BNA_DEVICE_IOC_READY_WAIT
},
1433 {BFA_SM(bna_device_sm_ready
), BNA_DEVICE_READY
},
1434 {BFA_SM(bna_device_sm_port_stop_wait
), BNA_DEVICE_PORT_STOP_WAIT
},
1435 {BFA_SM(bna_device_sm_ioc_disable_wait
), BNA_DEVICE_IOC_DISABLE_WAIT
},
1436 {BFA_SM(bna_device_sm_failed
), BNA_DEVICE_FAILED
},
1440 bna_device_sm_stopped_entry(struct bna_device
*device
)
1442 if (device
->stop_cbfn
)
1443 device
->stop_cbfn(device
->stop_cbarg
, BNA_CB_SUCCESS
);
1445 device
->stop_cbfn
= NULL
;
1446 device
->stop_cbarg
= NULL
;
1450 bna_device_sm_stopped(struct bna_device
*device
,
1451 enum bna_device_event event
)
1454 case DEVICE_E_ENABLE
:
1455 if (device
->intr_type
== BNA_INTR_T_MSIX
)
1456 bna_mbox_msix_idx_set(device
);
1457 bfa_nw_ioc_enable(&device
->ioc
);
1458 bfa_fsm_set_state(device
, bna_device_sm_ioc_ready_wait
);
1461 case DEVICE_E_DISABLE
:
1462 bfa_fsm_set_state(device
, bna_device_sm_stopped
);
1465 case DEVICE_E_IOC_RESET
:
1466 enable_mbox_intr(device
);
1469 case DEVICE_E_IOC_FAILED
:
1470 bfa_fsm_set_state(device
, bna_device_sm_failed
);
1474 bfa_sm_fault(event
);
1479 bna_device_sm_ioc_ready_wait_entry(struct bna_device
*device
)
1482 * Do not call bfa_ioc_enable() here. It must be called in the
1483 * previous state due to failed -> ioc_ready_wait transition.
1488 bna_device_sm_ioc_ready_wait(struct bna_device
*device
,
1489 enum bna_device_event event
)
1492 case DEVICE_E_DISABLE
:
1493 if (device
->ready_cbfn
)
1494 device
->ready_cbfn(device
->ready_cbarg
,
1496 device
->ready_cbfn
= NULL
;
1497 device
->ready_cbarg
= NULL
;
1498 bfa_fsm_set_state(device
, bna_device_sm_ioc_disable_wait
);
1501 case DEVICE_E_IOC_READY
:
1502 bfa_fsm_set_state(device
, bna_device_sm_ready
);
1505 case DEVICE_E_IOC_FAILED
:
1506 bfa_fsm_set_state(device
, bna_device_sm_failed
);
1509 case DEVICE_E_IOC_RESET
:
1510 enable_mbox_intr(device
);
1514 bfa_sm_fault(event
);
1519 bna_device_sm_ready_entry(struct bna_device
*device
)
1521 bna_mbox_mod_start(&device
->bna
->mbox_mod
);
1522 bna_port_start(&device
->bna
->port
);
1524 if (device
->ready_cbfn
)
1525 device
->ready_cbfn(device
->ready_cbarg
,
1527 device
->ready_cbfn
= NULL
;
1528 device
->ready_cbarg
= NULL
;
1532 bna_device_sm_ready(struct bna_device
*device
, enum bna_device_event event
)
1535 case DEVICE_E_DISABLE
:
1536 bfa_fsm_set_state(device
, bna_device_sm_port_stop_wait
);
1539 case DEVICE_E_IOC_FAILED
:
1540 bfa_fsm_set_state(device
, bna_device_sm_failed
);
1544 bfa_sm_fault(event
);
1549 bna_device_sm_port_stop_wait_entry(struct bna_device
*device
)
1551 bna_port_stop(&device
->bna
->port
);
1555 bna_device_sm_port_stop_wait(struct bna_device
*device
,
1556 enum bna_device_event event
)
1559 case DEVICE_E_PORT_STOPPED
:
1560 bna_mbox_mod_stop(&device
->bna
->mbox_mod
);
1561 bfa_fsm_set_state(device
, bna_device_sm_ioc_disable_wait
);
1564 case DEVICE_E_IOC_FAILED
:
1565 disable_mbox_intr(device
);
1566 bna_port_fail(&device
->bna
->port
);
1570 bfa_sm_fault(event
);
1575 bna_device_sm_ioc_disable_wait_entry(struct bna_device
*device
)
1577 bfa_nw_ioc_disable(&device
->ioc
);
1581 bna_device_sm_ioc_disable_wait(struct bna_device
*device
,
1582 enum bna_device_event event
)
1585 case DEVICE_E_IOC_DISABLED
:
1586 disable_mbox_intr(device
);
1587 bfa_fsm_set_state(device
, bna_device_sm_stopped
);
1591 bfa_sm_fault(event
);
1596 bna_device_sm_failed_entry(struct bna_device
*device
)
1598 disable_mbox_intr(device
);
1599 bna_port_fail(&device
->bna
->port
);
1600 bna_mbox_mod_stop(&device
->bna
->mbox_mod
);
1602 if (device
->ready_cbfn
)
1603 device
->ready_cbfn(device
->ready_cbarg
,
1605 device
->ready_cbfn
= NULL
;
1606 device
->ready_cbarg
= NULL
;
1610 bna_device_sm_failed(struct bna_device
*device
,
1611 enum bna_device_event event
)
1614 case DEVICE_E_DISABLE
:
1615 bfa_fsm_set_state(device
, bna_device_sm_ioc_disable_wait
);
1618 case DEVICE_E_IOC_RESET
:
1619 enable_mbox_intr(device
);
1620 bfa_fsm_set_state(device
, bna_device_sm_ioc_ready_wait
);
1624 bfa_sm_fault(event
);
1628 /* IOC callback functions */
1631 bna_device_cb_iocll_ready(void *dev
, enum bfa_status error
)
1633 struct bna_device
*device
= (struct bna_device
*)dev
;
1636 bfa_fsm_send_event(device
, DEVICE_E_IOC_FAILED
);
1638 bfa_fsm_send_event(device
, DEVICE_E_IOC_READY
);
1642 bna_device_cb_iocll_disabled(void *dev
)
1644 struct bna_device
*device
= (struct bna_device
*)dev
;
1646 bfa_fsm_send_event(device
, DEVICE_E_IOC_DISABLED
);
1650 bna_device_cb_iocll_failed(void *dev
)
1652 struct bna_device
*device
= (struct bna_device
*)dev
;
1654 bfa_fsm_send_event(device
, DEVICE_E_IOC_FAILED
);
1658 bna_device_cb_iocll_reset(void *dev
)
1660 struct bna_device
*device
= (struct bna_device
*)dev
;
1662 bfa_fsm_send_event(device
, DEVICE_E_IOC_RESET
);
1665 static struct bfa_ioc_cbfn bfa_iocll_cbfn
= {
1666 bna_device_cb_iocll_ready
,
1667 bna_device_cb_iocll_disabled
,
1668 bna_device_cb_iocll_failed
,
1669 bna_device_cb_iocll_reset
1674 bna_adv_device_init(struct bna_device
*device
, struct bna
*bna
,
1675 struct bna_res_info
*res_info
)
1682 kva
= res_info
[BNA_RES_MEM_T_FWTRC
].res_u
.mem_info
.mdl
[0].kva
;
1685 * Attach common modules (Diag, SFP, CEE, Port) and claim respective
1689 &res_info
[BNA_RES_MEM_T_COM
].res_u
.mem_info
.mdl
[0].dma
, dma
);
1690 kva
= res_info
[BNA_RES_MEM_T_COM
].res_u
.mem_info
.mdl
[0].kva
;
1692 bfa_nw_cee_attach(&bna
->cee
, &device
->ioc
, bna
);
1693 bfa_nw_cee_mem_claim(&bna
->cee
, kva
, dma
);
1694 kva
+= bfa_nw_cee_meminfo();
1695 dma
+= bfa_nw_cee_meminfo();
1700 bna_device_init(struct bna_device
*device
, struct bna
*bna
,
1701 struct bna_res_info
*res_info
)
1708 * Attach IOC and claim:
1709 * 1. DMA memory for IOC attributes
1710 * 2. Kernel memory for FW trace
1712 bfa_nw_ioc_attach(&device
->ioc
, device
, &bfa_iocll_cbfn
);
1713 bfa_nw_ioc_pci_init(&device
->ioc
, &bna
->pcidev
, BFI_MC_LL
);
1716 &res_info
[BNA_RES_MEM_T_ATTR
].res_u
.mem_info
.mdl
[0].dma
, dma
);
1717 bfa_nw_ioc_mem_claim(&device
->ioc
,
1718 res_info
[BNA_RES_MEM_T_ATTR
].res_u
.mem_info
.mdl
[0].kva
,
1721 bna_adv_device_init(device
, bna
, res_info
);
1723 * Initialize mbox_mod only after IOC, so that mbox handler
1724 * registration goes through
1727 res_info
[BNA_RES_INTR_T_MBOX
].res_u
.intr_info
.intr_type
;
1729 res_info
[BNA_RES_INTR_T_MBOX
].res_u
.intr_info
.idl
[0].vector
;
1730 bna_mbox_mod_init(&bna
->mbox_mod
, bna
);
1732 device
->ready_cbfn
= device
->stop_cbfn
= NULL
;
1733 device
->ready_cbarg
= device
->stop_cbarg
= NULL
;
1735 bfa_fsm_set_state(device
, bna_device_sm_stopped
);
1739 bna_device_uninit(struct bna_device
*device
)
1741 bna_mbox_mod_uninit(&device
->bna
->mbox_mod
);
1743 bfa_nw_ioc_detach(&device
->ioc
);
1749 bna_device_cb_port_stopped(void *arg
, enum bna_cb_status status
)
1751 struct bna_device
*device
= (struct bna_device
*)arg
;
1753 bfa_fsm_send_event(device
, DEVICE_E_PORT_STOPPED
);
1757 bna_device_status_get(struct bna_device
*device
)
1759 return device
->fsm
== (bfa_fsm_t
)bna_device_sm_ready
;
1763 bna_device_enable(struct bna_device
*device
)
1765 if (device
->fsm
!= (bfa_fsm_t
)bna_device_sm_stopped
) {
1766 bnad_cb_device_enabled(device
->bna
->bnad
, BNA_CB_BUSY
);
1770 device
->ready_cbfn
= bnad_cb_device_enabled
;
1771 device
->ready_cbarg
= device
->bna
->bnad
;
1773 bfa_fsm_send_event(device
, DEVICE_E_ENABLE
);
1777 bna_device_disable(struct bna_device
*device
, enum bna_cleanup_type type
)
1779 if (type
== BNA_SOFT_CLEANUP
) {
1780 bnad_cb_device_disabled(device
->bna
->bnad
, BNA_CB_SUCCESS
);
1784 device
->stop_cbfn
= bnad_cb_device_disabled
;
1785 device
->stop_cbarg
= device
->bna
->bnad
;
1787 bfa_fsm_send_event(device
, DEVICE_E_DISABLE
);
1791 bna_device_state_get(struct bna_device
*device
)
1793 return bfa_sm_to_state(device_sm_table
, device
->fsm
);
1796 const u32 bna_napi_dim_vector
[BNA_LOAD_T_MAX
][BNA_BIAS_T_MAX
] = {
1810 bna_adv_res_req(struct bna_res_info
*res_info
)
1812 /* DMA memory for COMMON_MODULE */
1813 res_info
[BNA_RES_MEM_T_COM
].res_type
= BNA_RES_T_MEM
;
1814 res_info
[BNA_RES_MEM_T_COM
].res_u
.mem_info
.mem_type
= BNA_MEM_T_DMA
;
1815 res_info
[BNA_RES_MEM_T_COM
].res_u
.mem_info
.num
= 1;
1816 res_info
[BNA_RES_MEM_T_COM
].res_u
.mem_info
.len
= ALIGN(
1817 bfa_nw_cee_meminfo(), PAGE_SIZE
);
1819 /* Virtual memory for retreiving fw_trc */
1820 res_info
[BNA_RES_MEM_T_FWTRC
].res_type
= BNA_RES_T_MEM
;
1821 res_info
[BNA_RES_MEM_T_FWTRC
].res_u
.mem_info
.mem_type
= BNA_MEM_T_KVA
;
1822 res_info
[BNA_RES_MEM_T_FWTRC
].res_u
.mem_info
.num
= 0;
1823 res_info
[BNA_RES_MEM_T_FWTRC
].res_u
.mem_info
.len
= 0;
1825 /* DMA memory for retreiving stats */
1826 res_info
[BNA_RES_MEM_T_STATS
].res_type
= BNA_RES_T_MEM
;
1827 res_info
[BNA_RES_MEM_T_STATS
].res_u
.mem_info
.mem_type
= BNA_MEM_T_DMA
;
1828 res_info
[BNA_RES_MEM_T_STATS
].res_u
.mem_info
.num
= 1;
1829 res_info
[BNA_RES_MEM_T_STATS
].res_u
.mem_info
.len
=
1830 ALIGN(BFI_HW_STATS_SIZE
, PAGE_SIZE
);
1832 /* Virtual memory for soft stats */
1833 res_info
[BNA_RES_MEM_T_SWSTATS
].res_type
= BNA_RES_T_MEM
;
1834 res_info
[BNA_RES_MEM_T_SWSTATS
].res_u
.mem_info
.mem_type
= BNA_MEM_T_KVA
;
1835 res_info
[BNA_RES_MEM_T_SWSTATS
].res_u
.mem_info
.num
= 1;
1836 res_info
[BNA_RES_MEM_T_SWSTATS
].res_u
.mem_info
.len
=
1837 sizeof(struct bna_sw_stats
);
1841 bna_sw_stats_get(struct bna
*bna
, struct bna_sw_stats
*sw_stats
)
1844 struct bna_txq
*txq
;
1846 struct bna_rxp
*rxp
;
1847 struct list_head
*qe
;
1848 struct list_head
*txq_qe
;
1849 struct list_head
*rxp_qe
;
1850 struct list_head
*mac_qe
;
1853 sw_stats
->device_state
= bna_device_state_get(&bna
->device
);
1854 sw_stats
->port_state
= bna_port_state_get(&bna
->port
);
1855 sw_stats
->port_flags
= bna
->port
.flags
;
1856 sw_stats
->llport_state
= bna_llport_state_get(&bna
->port
.llport
);
1857 sw_stats
->priority
= bna
->port
.priority
;
1860 list_for_each(qe
, &bna
->tx_mod
.tx_active_q
) {
1861 tx
= (struct bna_tx
*)qe
;
1862 sw_stats
->tx_stats
[i
].tx_state
= bna_tx_state_get(tx
);
1863 sw_stats
->tx_stats
[i
].tx_flags
= tx
->flags
;
1865 sw_stats
->tx_stats
[i
].num_txqs
= 0;
1866 sw_stats
->tx_stats
[i
].txq_bmap
[0] = 0;
1867 sw_stats
->tx_stats
[i
].txq_bmap
[1] = 0;
1868 list_for_each(txq_qe
, &tx
->txq_q
) {
1869 txq
= (struct bna_txq
*)txq_qe
;
1870 if (txq
->txq_id
< 32)
1871 sw_stats
->tx_stats
[i
].txq_bmap
[0] |=
1872 ((u32
)1 << txq
->txq_id
);
1874 sw_stats
->tx_stats
[i
].txq_bmap
[1] |=
1876 1 << (txq
->txq_id
- 32));
1877 sw_stats
->tx_stats
[i
].num_txqs
++;
1880 sw_stats
->tx_stats
[i
].txf_id
= tx
->txf
.txf_id
;
1884 sw_stats
->num_active_tx
= i
;
1887 list_for_each(qe
, &bna
->rx_mod
.rx_active_q
) {
1888 rx
= (struct bna_rx
*)qe
;
1889 sw_stats
->rx_stats
[i
].rx_state
= bna_rx_state_get(rx
);
1890 sw_stats
->rx_stats
[i
].rx_flags
= rx
->rx_flags
;
1892 sw_stats
->rx_stats
[i
].num_rxps
= 0;
1893 sw_stats
->rx_stats
[i
].num_rxqs
= 0;
1894 sw_stats
->rx_stats
[i
].rxq_bmap
[0] = 0;
1895 sw_stats
->rx_stats
[i
].rxq_bmap
[1] = 0;
1896 sw_stats
->rx_stats
[i
].cq_bmap
[0] = 0;
1897 sw_stats
->rx_stats
[i
].cq_bmap
[1] = 0;
1898 list_for_each(rxp_qe
, &rx
->rxp_q
) {
1899 rxp
= (struct bna_rxp
*)rxp_qe
;
1901 sw_stats
->rx_stats
[i
].num_rxqs
+= 1;
1903 if (rxp
->type
== BNA_RXP_SINGLE
) {
1904 if (rxp
->rxq
.single
.only
->rxq_id
< 32) {
1905 sw_stats
->rx_stats
[i
].rxq_bmap
[0] |=
1907 rxp
->rxq
.single
.only
->rxq_id
);
1909 sw_stats
->rx_stats
[i
].rxq_bmap
[1] |=
1911 (rxp
->rxq
.single
.only
->rxq_id
- 32));
1914 if (rxp
->rxq
.slr
.large
->rxq_id
< 32) {
1915 sw_stats
->rx_stats
[i
].rxq_bmap
[0] |=
1917 rxp
->rxq
.slr
.large
->rxq_id
);
1919 sw_stats
->rx_stats
[i
].rxq_bmap
[1] |=
1921 (rxp
->rxq
.slr
.large
->rxq_id
- 32));
1924 if (rxp
->rxq
.slr
.small
->rxq_id
< 32) {
1925 sw_stats
->rx_stats
[i
].rxq_bmap
[0] |=
1927 rxp
->rxq
.slr
.small
->rxq_id
);
1929 sw_stats
->rx_stats
[i
].rxq_bmap
[1] |=
1931 (rxp
->rxq
.slr
.small
->rxq_id
- 32));
1933 sw_stats
->rx_stats
[i
].num_rxqs
+= 1;
1936 if (rxp
->cq
.cq_id
< 32)
1937 sw_stats
->rx_stats
[i
].cq_bmap
[0] |=
1938 (1 << rxp
->cq
.cq_id
);
1940 sw_stats
->rx_stats
[i
].cq_bmap
[1] |=
1941 (1 << (rxp
->cq
.cq_id
- 32));
1943 sw_stats
->rx_stats
[i
].num_rxps
++;
1946 sw_stats
->rx_stats
[i
].rxf_id
= rx
->rxf
.rxf_id
;
1947 sw_stats
->rx_stats
[i
].rxf_state
= bna_rxf_state_get(&rx
->rxf
);
1948 sw_stats
->rx_stats
[i
].rxf_oper_state
= rx
->rxf
.rxf_oper_state
;
1950 sw_stats
->rx_stats
[i
].num_active_ucast
= 0;
1951 if (rx
->rxf
.ucast_active_mac
)
1952 sw_stats
->rx_stats
[i
].num_active_ucast
++;
1953 list_for_each(mac_qe
, &rx
->rxf
.ucast_active_q
)
1954 sw_stats
->rx_stats
[i
].num_active_ucast
++;
1956 sw_stats
->rx_stats
[i
].num_active_mcast
= 0;
1957 list_for_each(mac_qe
, &rx
->rxf
.mcast_active_q
)
1958 sw_stats
->rx_stats
[i
].num_active_mcast
++;
1960 sw_stats
->rx_stats
[i
].rxmode_active
= rx
->rxf
.rxmode_active
;
1961 sw_stats
->rx_stats
[i
].vlan_filter_status
=
1962 rx
->rxf
.vlan_filter_status
;
1963 memcpy(sw_stats
->rx_stats
[i
].vlan_filter_table
,
1964 rx
->rxf
.vlan_filter_table
,
1965 sizeof(u32
) * ((BFI_MAX_VLAN
+ 1) / 32));
1967 sw_stats
->rx_stats
[i
].rss_status
= rx
->rxf
.rss_status
;
1968 sw_stats
->rx_stats
[i
].hds_status
= rx
->rxf
.hds_status
;
1972 sw_stats
->num_active_rx
= i
;
1976 bna_fw_cb_stats_get(void *arg
, int status
)
1978 struct bna
*bna
= (struct bna
*)arg
;
1981 int rxf_count
, txf_count
;
1982 u64 rxf_bmap
, txf_bmap
;
1984 bfa_q_qe_init(&bna
->mbox_qe
.qe
);
1987 p_stats
= (u64
*)bna
->stats
.hw_stats
;
1988 count
= sizeof(struct bfi_ll_stats
) / sizeof(u64
);
1989 for (i
= 0; i
< count
; i
++)
1990 p_stats
[i
] = cpu_to_be64(p_stats
[i
]);
1993 rxf_bmap
= (u64
)bna
->stats
.rxf_bmap
[0] |
1994 ((u64
)bna
->stats
.rxf_bmap
[1] << 32);
1995 for (i
= 0; i
< BFI_LL_RXF_ID_MAX
; i
++)
1996 if (rxf_bmap
& ((u64
)1 << i
))
2000 txf_bmap
= (u64
)bna
->stats
.txf_bmap
[0] |
2001 ((u64
)bna
->stats
.txf_bmap
[1] << 32);
2002 for (i
= 0; i
< BFI_LL_TXF_ID_MAX
; i
++)
2003 if (txf_bmap
& ((u64
)1 << i
))
2006 p_stats
= (u64
*)&bna
->stats
.hw_stats
->rxf_stats
[0] +
2007 ((rxf_count
* sizeof(struct bfi_ll_stats_rxf
) +
2008 txf_count
* sizeof(struct bfi_ll_stats_txf
))/
2011 /* Populate the TXF stats from the firmware DMAed copy */
2012 for (i
= (BFI_LL_TXF_ID_MAX
- 1); i
>= 0; i
--)
2013 if (txf_bmap
& ((u64
)1 << i
)) {
2014 p_stats
-= sizeof(struct bfi_ll_stats_txf
)/
2016 memcpy(&bna
->stats
.hw_stats
->txf_stats
[i
],
2018 sizeof(struct bfi_ll_stats_txf
));
2021 /* Populate the RXF stats from the firmware DMAed copy */
2022 for (i
= (BFI_LL_RXF_ID_MAX
- 1); i
>= 0; i
--)
2023 if (rxf_bmap
& ((u64
)1 << i
)) {
2024 p_stats
-= sizeof(struct bfi_ll_stats_rxf
)/
2026 memcpy(&bna
->stats
.hw_stats
->rxf_stats
[i
],
2028 sizeof(struct bfi_ll_stats_rxf
));
2031 bna_sw_stats_get(bna
, bna
->stats
.sw_stats
);
2032 bnad_cb_stats_get(bna
->bnad
, BNA_CB_SUCCESS
, &bna
->stats
);
2034 bnad_cb_stats_get(bna
->bnad
, BNA_CB_FAIL
, &bna
->stats
);
2038 bna_fw_stats_get(struct bna
*bna
)
2040 struct bfi_ll_stats_req ll_req
;
2042 bfi_h2i_set(ll_req
.mh
, BFI_MC_LL
, BFI_LL_H2I_STATS_GET_REQ
, 0);
2043 ll_req
.stats_mask
= htons(BFI_LL_STATS_ALL
);
2045 ll_req
.rxf_id_mask
[0] = htonl(bna
->rx_mod
.rxf_bmap
[0]);
2046 ll_req
.rxf_id_mask
[1] = htonl(bna
->rx_mod
.rxf_bmap
[1]);
2047 ll_req
.txf_id_mask
[0] = htonl(bna
->tx_mod
.txf_bmap
[0]);
2048 ll_req
.txf_id_mask
[1] = htonl(bna
->tx_mod
.txf_bmap
[1]);
2050 ll_req
.host_buffer
.a32
.addr_hi
= bna
->hw_stats_dma
.msb
;
2051 ll_req
.host_buffer
.a32
.addr_lo
= bna
->hw_stats_dma
.lsb
;
2053 bna_mbox_qe_fill(&bna
->mbox_qe
, &ll_req
, sizeof(ll_req
),
2054 bna_fw_cb_stats_get
, bna
);
2055 bna_mbox_send(bna
, &bna
->mbox_qe
);
2057 bna
->stats
.rxf_bmap
[0] = bna
->rx_mod
.rxf_bmap
[0];
2058 bna
->stats
.rxf_bmap
[1] = bna
->rx_mod
.rxf_bmap
[1];
2059 bna
->stats
.txf_bmap
[0] = bna
->tx_mod
.txf_bmap
[0];
2060 bna
->stats
.txf_bmap
[1] = bna
->tx_mod
.txf_bmap
[1];
2064 bna_stats_get(struct bna
*bna
)
2066 if (bna_device_status_get(&bna
->device
))
2067 bna_fw_stats_get(bna
);
2069 bnad_cb_stats_get(bna
->bnad
, BNA_CB_FAIL
, &bna
->stats
);
2074 bna_ib_coalescing_timeo_set(struct bna_ib
*ib
, u8 coalescing_timeo
)
2076 ib
->ib_config
.coalescing_timeo
= coalescing_timeo
;
2078 if (ib
->start_count
)
2079 ib
->door_bell
.doorbell_ack
= BNA_DOORBELL_IB_INT_ACK(
2080 (u32
)ib
->ib_config
.coalescing_timeo
, 0);
2085 bna_rxf_adv_init(struct bna_rxf
*rxf
,
2087 struct bna_rx_config
*q_config
)
2089 switch (q_config
->rxp_type
) {
2090 case BNA_RXP_SINGLE
:
2094 rxf
->ctrl_flags
|= BNA_RXF_CF_SM_LG_RXQ
;
2097 rxf
->hds_cfg
.hdr_type
= q_config
->hds_config
.hdr_type
;
2098 rxf
->hds_cfg
.header_size
=
2099 q_config
->hds_config
.header_size
;
2100 rxf
->forced_offset
= 0;
2106 if (q_config
->rss_status
== BNA_STATUS_T_ENABLED
) {
2107 rxf
->ctrl_flags
|= BNA_RXF_CF_RSS_ENABLE
;
2108 rxf
->rss_cfg
.hash_type
= q_config
->rss_config
.hash_type
;
2109 rxf
->rss_cfg
.hash_mask
= q_config
->rss_config
.hash_mask
;
2110 memcpy(&rxf
->rss_cfg
.toeplitz_hash_key
[0],
2111 &q_config
->rss_config
.toeplitz_hash_key
[0],
2112 sizeof(rxf
->rss_cfg
.toeplitz_hash_key
));
2117 rxf_fltr_mbox_cmd(struct bna_rxf
*rxf
, u8 cmd
, enum bna_status status
)
2119 struct bfi_ll_rxf_req req
;
2121 bfi_h2i_set(req
.mh
, BFI_MC_LL
, cmd
, 0);
2123 req
.rxf_id
= rxf
->rxf_id
;
2124 req
.enable
= status
;
2126 bna_mbox_qe_fill(&rxf
->mbox_qe
, &req
, sizeof(req
),
2127 rxf_cb_cam_fltr_mbox_cmd
, rxf
);
2129 bna_mbox_send(rxf
->rx
->bna
, &rxf
->mbox_qe
);
2133 rxf_process_packet_filter_ucast(struct bna_rxf
*rxf
)
2135 struct bna_mac
*mac
= NULL
;
2136 struct list_head
*qe
;
2138 /* Add additional MAC entries */
2139 if (!list_empty(&rxf
->ucast_pending_add_q
)) {
2140 bfa_q_deq(&rxf
->ucast_pending_add_q
, &qe
);
2142 mac
= (struct bna_mac
*)qe
;
2143 rxf_cam_mbox_cmd(rxf
, BFI_LL_H2I_MAC_UCAST_ADD_REQ
, mac
);
2144 list_add_tail(&mac
->qe
, &rxf
->ucast_active_q
);
2148 /* Delete MAC addresses previousely added */
2149 if (!list_empty(&rxf
->ucast_pending_del_q
)) {
2150 bfa_q_deq(&rxf
->ucast_pending_del_q
, &qe
);
2152 mac
= (struct bna_mac
*)qe
;
2153 rxf_cam_mbox_cmd(rxf
, BFI_LL_H2I_MAC_UCAST_DEL_REQ
, mac
);
2154 bna_ucam_mod_mac_put(&rxf
->rx
->bna
->ucam_mod
, mac
);
2162 rxf_process_packet_filter_promisc(struct bna_rxf
*rxf
)
2164 struct bna
*bna
= rxf
->rx
->bna
;
2166 /* Enable/disable promiscuous mode */
2167 if (is_promisc_enable(rxf
->rxmode_pending
,
2168 rxf
->rxmode_pending_bitmask
)) {
2169 /* move promisc configuration from pending -> active */
2170 promisc_inactive(rxf
->rxmode_pending
,
2171 rxf
->rxmode_pending_bitmask
);
2172 rxf
->rxmode_active
|= BNA_RXMODE_PROMISC
;
2174 /* Disable VLAN filter to allow all VLANs */
2175 __rxf_vlan_filter_set(rxf
, BNA_STATUS_T_DISABLED
);
2176 rxf_fltr_mbox_cmd(rxf
, BFI_LL_H2I_RXF_PROMISCUOUS_SET_REQ
,
2177 BNA_STATUS_T_ENABLED
);
2179 } else if (is_promisc_disable(rxf
->rxmode_pending
,
2180 rxf
->rxmode_pending_bitmask
)) {
2181 /* move promisc configuration from pending -> active */
2182 promisc_inactive(rxf
->rxmode_pending
,
2183 rxf
->rxmode_pending_bitmask
);
2184 rxf
->rxmode_active
&= ~BNA_RXMODE_PROMISC
;
2185 bna
->rxf_promisc_id
= BFI_MAX_RXF
;
2187 /* Revert VLAN filter */
2188 __rxf_vlan_filter_set(rxf
, rxf
->vlan_filter_status
);
2189 rxf_fltr_mbox_cmd(rxf
, BFI_LL_H2I_RXF_PROMISCUOUS_SET_REQ
,
2190 BNA_STATUS_T_DISABLED
);
2198 rxf_process_packet_filter_allmulti(struct bna_rxf
*rxf
)
2200 /* Enable/disable allmulti mode */
2201 if (is_allmulti_enable(rxf
->rxmode_pending
,
2202 rxf
->rxmode_pending_bitmask
)) {
2203 /* move allmulti configuration from pending -> active */
2204 allmulti_inactive(rxf
->rxmode_pending
,
2205 rxf
->rxmode_pending_bitmask
);
2206 rxf
->rxmode_active
|= BNA_RXMODE_ALLMULTI
;
2208 rxf_fltr_mbox_cmd(rxf
, BFI_LL_H2I_MAC_MCAST_FILTER_REQ
,
2209 BNA_STATUS_T_ENABLED
);
2211 } else if (is_allmulti_disable(rxf
->rxmode_pending
,
2212 rxf
->rxmode_pending_bitmask
)) {
2213 /* move allmulti configuration from pending -> active */
2214 allmulti_inactive(rxf
->rxmode_pending
,
2215 rxf
->rxmode_pending_bitmask
);
2216 rxf
->rxmode_active
&= ~BNA_RXMODE_ALLMULTI
;
2218 rxf_fltr_mbox_cmd(rxf
, BFI_LL_H2I_MAC_MCAST_FILTER_REQ
,
2219 BNA_STATUS_T_DISABLED
);
2227 rxf_clear_packet_filter_ucast(struct bna_rxf
*rxf
)
2229 struct bna_mac
*mac
= NULL
;
2230 struct list_head
*qe
;
2232 /* 1. delete pending ucast entries */
2233 if (!list_empty(&rxf
->ucast_pending_del_q
)) {
2234 bfa_q_deq(&rxf
->ucast_pending_del_q
, &qe
);
2236 mac
= (struct bna_mac
*)qe
;
2237 rxf_cam_mbox_cmd(rxf
, BFI_LL_H2I_MAC_UCAST_DEL_REQ
, mac
);
2238 bna_ucam_mod_mac_put(&rxf
->rx
->bna
->ucam_mod
, mac
);
2242 /* 2. clear active ucast entries; move them to pending_add_q */
2243 if (!list_empty(&rxf
->ucast_active_q
)) {
2244 bfa_q_deq(&rxf
->ucast_active_q
, &qe
);
2246 mac
= (struct bna_mac
*)qe
;
2247 rxf_cam_mbox_cmd(rxf
, BFI_LL_H2I_MAC_UCAST_DEL_REQ
, mac
);
2248 list_add_tail(&mac
->qe
, &rxf
->ucast_pending_add_q
);
2256 rxf_clear_packet_filter_promisc(struct bna_rxf
*rxf
)
2258 struct bna
*bna
= rxf
->rx
->bna
;
2260 /* 6. Execute pending promisc mode disable command */
2261 if (is_promisc_disable(rxf
->rxmode_pending
,
2262 rxf
->rxmode_pending_bitmask
)) {
2263 /* move promisc configuration from pending -> active */
2264 promisc_inactive(rxf
->rxmode_pending
,
2265 rxf
->rxmode_pending_bitmask
);
2266 rxf
->rxmode_active
&= ~BNA_RXMODE_PROMISC
;
2267 bna
->rxf_promisc_id
= BFI_MAX_RXF
;
2269 /* Revert VLAN filter */
2270 __rxf_vlan_filter_set(rxf
, rxf
->vlan_filter_status
);
2271 rxf_fltr_mbox_cmd(rxf
, BFI_LL_H2I_RXF_PROMISCUOUS_SET_REQ
,
2272 BNA_STATUS_T_DISABLED
);
2276 /* 7. Clear active promisc mode; move it to pending enable */
2277 if (rxf
->rxmode_active
& BNA_RXMODE_PROMISC
) {
2278 /* move promisc configuration from active -> pending */
2279 promisc_enable(rxf
->rxmode_pending
,
2280 rxf
->rxmode_pending_bitmask
);
2281 rxf
->rxmode_active
&= ~BNA_RXMODE_PROMISC
;
2283 /* Revert VLAN filter */
2284 __rxf_vlan_filter_set(rxf
, rxf
->vlan_filter_status
);
2285 rxf_fltr_mbox_cmd(rxf
, BFI_LL_H2I_RXF_PROMISCUOUS_SET_REQ
,
2286 BNA_STATUS_T_DISABLED
);
2294 rxf_clear_packet_filter_allmulti(struct bna_rxf
*rxf
)
2296 /* 10. Execute pending allmulti mode disable command */
2297 if (is_allmulti_disable(rxf
->rxmode_pending
,
2298 rxf
->rxmode_pending_bitmask
)) {
2299 /* move allmulti configuration from pending -> active */
2300 allmulti_inactive(rxf
->rxmode_pending
,
2301 rxf
->rxmode_pending_bitmask
);
2302 rxf
->rxmode_active
&= ~BNA_RXMODE_ALLMULTI
;
2303 rxf_fltr_mbox_cmd(rxf
, BFI_LL_H2I_MAC_MCAST_FILTER_REQ
,
2304 BNA_STATUS_T_DISABLED
);
2308 /* 11. Clear active allmulti mode; move it to pending enable */
2309 if (rxf
->rxmode_active
& BNA_RXMODE_ALLMULTI
) {
2310 /* move allmulti configuration from active -> pending */
2311 allmulti_enable(rxf
->rxmode_pending
,
2312 rxf
->rxmode_pending_bitmask
);
2313 rxf
->rxmode_active
&= ~BNA_RXMODE_ALLMULTI
;
2314 rxf_fltr_mbox_cmd(rxf
, BFI_LL_H2I_MAC_MCAST_FILTER_REQ
,
2315 BNA_STATUS_T_DISABLED
);
2323 rxf_reset_packet_filter_ucast(struct bna_rxf
*rxf
)
2325 struct list_head
*qe
;
2326 struct bna_mac
*mac
;
2328 /* 1. Move active ucast entries to pending_add_q */
2329 while (!list_empty(&rxf
->ucast_active_q
)) {
2330 bfa_q_deq(&rxf
->ucast_active_q
, &qe
);
2332 list_add_tail(qe
, &rxf
->ucast_pending_add_q
);
2335 /* 2. Throw away delete pending ucast entries */
2336 while (!list_empty(&rxf
->ucast_pending_del_q
)) {
2337 bfa_q_deq(&rxf
->ucast_pending_del_q
, &qe
);
2339 mac
= (struct bna_mac
*)qe
;
2340 bna_ucam_mod_mac_put(&rxf
->rx
->bna
->ucam_mod
, mac
);
2345 rxf_reset_packet_filter_promisc(struct bna_rxf
*rxf
)
2347 struct bna
*bna
= rxf
->rx
->bna
;
2349 /* 6. Clear pending promisc mode disable */
2350 if (is_promisc_disable(rxf
->rxmode_pending
,
2351 rxf
->rxmode_pending_bitmask
)) {
2352 promisc_inactive(rxf
->rxmode_pending
,
2353 rxf
->rxmode_pending_bitmask
);
2354 rxf
->rxmode_active
&= ~BNA_RXMODE_PROMISC
;
2355 bna
->rxf_promisc_id
= BFI_MAX_RXF
;
2358 /* 7. Move promisc mode config from active -> pending */
2359 if (rxf
->rxmode_active
& BNA_RXMODE_PROMISC
) {
2360 promisc_enable(rxf
->rxmode_pending
,
2361 rxf
->rxmode_pending_bitmask
);
2362 rxf
->rxmode_active
&= ~BNA_RXMODE_PROMISC
;
2368 rxf_reset_packet_filter_allmulti(struct bna_rxf
*rxf
)
2370 /* 10. Clear pending allmulti mode disable */
2371 if (is_allmulti_disable(rxf
->rxmode_pending
,
2372 rxf
->rxmode_pending_bitmask
)) {
2373 allmulti_inactive(rxf
->rxmode_pending
,
2374 rxf
->rxmode_pending_bitmask
);
2375 rxf
->rxmode_active
&= ~BNA_RXMODE_ALLMULTI
;
2378 /* 11. Move allmulti mode config from active -> pending */
2379 if (rxf
->rxmode_active
& BNA_RXMODE_ALLMULTI
) {
2380 allmulti_enable(rxf
->rxmode_pending
,
2381 rxf
->rxmode_pending_bitmask
);
2382 rxf
->rxmode_active
&= ~BNA_RXMODE_ALLMULTI
;
2387 * Should only be called by bna_rxf_mode_set.
2388 * Helps deciding if h/w configuration is needed or not.
2391 * 1 = need h/w change
2394 rxf_promisc_enable(struct bna_rxf
*rxf
)
2396 struct bna
*bna
= rxf
->rx
->bna
;
2399 /* There can not be any pending disable command */
2401 /* Do nothing if pending enable or already enabled */
2402 if (is_promisc_enable(rxf
->rxmode_pending
,
2403 rxf
->rxmode_pending_bitmask
) ||
2404 (rxf
->rxmode_active
& BNA_RXMODE_PROMISC
)) {
2405 /* Schedule enable */
2407 /* Promisc mode should not be active in the system */
2408 promisc_enable(rxf
->rxmode_pending
,
2409 rxf
->rxmode_pending_bitmask
);
2410 bna
->rxf_promisc_id
= rxf
->rxf_id
;
2418 * Should only be called by bna_rxf_mode_set.
2419 * Helps deciding if h/w configuration is needed or not.
2422 * 1 = need h/w change
2425 rxf_promisc_disable(struct bna_rxf
*rxf
)
2427 struct bna
*bna
= rxf
->rx
->bna
;
2430 /* There can not be any pending disable */
2432 /* Turn off pending enable command , if any */
2433 if (is_promisc_enable(rxf
->rxmode_pending
,
2434 rxf
->rxmode_pending_bitmask
)) {
2435 /* Promisc mode should not be active */
2436 /* system promisc state should be pending */
2437 promisc_inactive(rxf
->rxmode_pending
,
2438 rxf
->rxmode_pending_bitmask
);
2439 /* Remove the promisc state from the system */
2440 bna
->rxf_promisc_id
= BFI_MAX_RXF
;
2442 /* Schedule disable */
2443 } else if (rxf
->rxmode_active
& BNA_RXMODE_PROMISC
) {
2444 /* Promisc mode should be active in the system */
2445 promisc_disable(rxf
->rxmode_pending
,
2446 rxf
->rxmode_pending_bitmask
);
2449 /* Do nothing if already disabled */
2457 * Should only be called by bna_rxf_mode_set.
2458 * Helps deciding if h/w configuration is needed or not.
2461 * 1 = need h/w change
2464 rxf_allmulti_enable(struct bna_rxf
*rxf
)
2468 /* There can not be any pending disable command */
2470 /* Do nothing if pending enable or already enabled */
2471 if (is_allmulti_enable(rxf
->rxmode_pending
,
2472 rxf
->rxmode_pending_bitmask
) ||
2473 (rxf
->rxmode_active
& BNA_RXMODE_ALLMULTI
)) {
2474 /* Schedule enable */
2476 allmulti_enable(rxf
->rxmode_pending
,
2477 rxf
->rxmode_pending_bitmask
);
2485 * Should only be called by bna_rxf_mode_set.
2486 * Helps deciding if h/w configuration is needed or not.
2489 * 1 = need h/w change
2492 rxf_allmulti_disable(struct bna_rxf
*rxf
)
2496 /* There can not be any pending disable */
2498 /* Turn off pending enable command , if any */
2499 if (is_allmulti_enable(rxf
->rxmode_pending
,
2500 rxf
->rxmode_pending_bitmask
)) {
2501 /* Allmulti mode should not be active */
2502 allmulti_inactive(rxf
->rxmode_pending
,
2503 rxf
->rxmode_pending_bitmask
);
2505 /* Schedule disable */
2506 } else if (rxf
->rxmode_active
& BNA_RXMODE_ALLMULTI
) {
2507 allmulti_disable(rxf
->rxmode_pending
,
2508 rxf
->rxmode_pending_bitmask
);
2517 bna_rx_mode_set(struct bna_rx
*rx
, enum bna_rxmode new_mode
,
2518 enum bna_rxmode bitmask
,
2519 void (*cbfn
)(struct bnad
*, struct bna_rx
*,
2520 enum bna_cb_status
))
2522 struct bna_rxf
*rxf
= &rx
->rxf
;
2523 int need_hw_config
= 0;
2525 /* Process the commands */
2527 if (is_promisc_enable(new_mode
, bitmask
)) {
2528 /* If promisc mode is already enabled elsewhere in the system */
2529 if ((rx
->bna
->rxf_promisc_id
!= BFI_MAX_RXF
) &&
2530 (rx
->bna
->rxf_promisc_id
!= rxf
->rxf_id
))
2532 if (rxf_promisc_enable(rxf
))
2534 } else if (is_promisc_disable(new_mode
, bitmask
)) {
2535 if (rxf_promisc_disable(rxf
))
2539 if (is_allmulti_enable(new_mode
, bitmask
)) {
2540 if (rxf_allmulti_enable(rxf
))
2542 } else if (is_allmulti_disable(new_mode
, bitmask
)) {
2543 if (rxf_allmulti_disable(rxf
))
2547 /* Trigger h/w if needed */
2549 if (need_hw_config
) {
2550 rxf
->cam_fltr_cbfn
= cbfn
;
2551 rxf
->cam_fltr_cbarg
= rx
->bna
->bnad
;
2552 bfa_fsm_send_event(rxf
, RXF_E_CAM_FLTR_MOD
);
2554 (*cbfn
)(rx
->bna
->bnad
, rx
, BNA_CB_SUCCESS
);
2556 return BNA_CB_SUCCESS
;
2564 bna_rx_vlanfilter_enable(struct bna_rx
*rx
)
2566 struct bna_rxf
*rxf
= &rx
->rxf
;
2568 if (rxf
->vlan_filter_status
== BNA_STATUS_T_DISABLED
) {
2569 rxf
->rxf_flags
|= BNA_RXF_FL_VLAN_CONFIG_PENDING
;
2570 rxf
->vlan_filter_status
= BNA_STATUS_T_ENABLED
;
2571 bfa_fsm_send_event(rxf
, RXF_E_CAM_FLTR_MOD
);
2579 bna_rx_coalescing_timeo_set(struct bna_rx
*rx
, int coalescing_timeo
)
2581 struct bna_rxp
*rxp
;
2582 struct list_head
*qe
;
2584 list_for_each(qe
, &rx
->rxp_q
) {
2585 rxp
= (struct bna_rxp
*)qe
;
2586 rxp
->cq
.ccb
->rx_coalescing_timeo
= coalescing_timeo
;
2587 bna_ib_coalescing_timeo_set(rxp
->cq
.ib
, coalescing_timeo
);
2593 bna_rx_dim_reconfig(struct bna
*bna
, const u32 vector
[][BNA_BIAS_T_MAX
])
2597 for (i
= 0; i
< BNA_LOAD_T_MAX
; i
++)
2598 for (j
= 0; j
< BNA_BIAS_T_MAX
; j
++)
2599 bna
->rx_mod
.dim_vector
[i
][j
] = vector
[i
][j
];
2604 bna_rx_dim_update(struct bna_ccb
*ccb
)
2606 struct bna
*bna
= ccb
->cq
->rx
->bna
;
2608 u32 pkt_rt
, small_rt
, large_rt
;
2609 u8 coalescing_timeo
;
2611 if ((ccb
->pkt_rate
.small_pkt_cnt
== 0) &&
2612 (ccb
->pkt_rate
.large_pkt_cnt
== 0))
2615 /* Arrive at preconfigured coalescing timeo value based on pkt rate */
2617 small_rt
= ccb
->pkt_rate
.small_pkt_cnt
;
2618 large_rt
= ccb
->pkt_rate
.large_pkt_cnt
;
2620 pkt_rt
= small_rt
+ large_rt
;
2622 if (pkt_rt
< BNA_PKT_RATE_10K
)
2623 load
= BNA_LOAD_T_LOW_4
;
2624 else if (pkt_rt
< BNA_PKT_RATE_20K
)
2625 load
= BNA_LOAD_T_LOW_3
;
2626 else if (pkt_rt
< BNA_PKT_RATE_30K
)
2627 load
= BNA_LOAD_T_LOW_2
;
2628 else if (pkt_rt
< BNA_PKT_RATE_40K
)
2629 load
= BNA_LOAD_T_LOW_1
;
2630 else if (pkt_rt
< BNA_PKT_RATE_50K
)
2631 load
= BNA_LOAD_T_HIGH_1
;
2632 else if (pkt_rt
< BNA_PKT_RATE_60K
)
2633 load
= BNA_LOAD_T_HIGH_2
;
2634 else if (pkt_rt
< BNA_PKT_RATE_80K
)
2635 load
= BNA_LOAD_T_HIGH_3
;
2637 load
= BNA_LOAD_T_HIGH_4
;
2639 if (small_rt
> (large_rt
<< 1))
2644 ccb
->pkt_rate
.small_pkt_cnt
= 0;
2645 ccb
->pkt_rate
.large_pkt_cnt
= 0;
2647 coalescing_timeo
= bna
->rx_mod
.dim_vector
[load
][bias
];
2648 ccb
->rx_coalescing_timeo
= coalescing_timeo
;
2651 bna_ib_coalescing_timeo_set(ccb
->cq
->ib
, coalescing_timeo
);
2657 bna_tx_coalescing_timeo_set(struct bna_tx
*tx
, int coalescing_timeo
)
2659 struct bna_txq
*txq
;
2660 struct list_head
*qe
;
2662 list_for_each(qe
, &tx
->txq_q
) {
2663 txq
= (struct bna_txq
*)qe
;
2664 bna_ib_coalescing_timeo_set(txq
->ib
, coalescing_timeo
);
2672 struct bna_ritseg_pool_cfg
{
2674 u32 pool_entry_size
;
2676 init_ritseg_pool(ritseg_pool_cfg
);
2682 bna_ucam_mod_init(struct bna_ucam_mod
*ucam_mod
, struct bna
*bna
,
2683 struct bna_res_info
*res_info
)
2687 ucam_mod
->ucmac
= (struct bna_mac
*)
2688 res_info
[BNA_RES_MEM_T_UCMAC_ARRAY
].res_u
.mem_info
.mdl
[0].kva
;
2690 INIT_LIST_HEAD(&ucam_mod
->free_q
);
2691 for (i
= 0; i
< BFI_MAX_UCMAC
; i
++) {
2692 bfa_q_qe_init(&ucam_mod
->ucmac
[i
].qe
);
2693 list_add_tail(&ucam_mod
->ucmac
[i
].qe
, &ucam_mod
->free_q
);
2696 ucam_mod
->bna
= bna
;
2700 bna_ucam_mod_uninit(struct bna_ucam_mod
*ucam_mod
)
2702 struct list_head
*qe
;
2705 list_for_each(qe
, &ucam_mod
->free_q
)
2708 ucam_mod
->bna
= NULL
;
2712 bna_mcam_mod_init(struct bna_mcam_mod
*mcam_mod
, struct bna
*bna
,
2713 struct bna_res_info
*res_info
)
2717 mcam_mod
->mcmac
= (struct bna_mac
*)
2718 res_info
[BNA_RES_MEM_T_MCMAC_ARRAY
].res_u
.mem_info
.mdl
[0].kva
;
2720 INIT_LIST_HEAD(&mcam_mod
->free_q
);
2721 for (i
= 0; i
< BFI_MAX_MCMAC
; i
++) {
2722 bfa_q_qe_init(&mcam_mod
->mcmac
[i
].qe
);
2723 list_add_tail(&mcam_mod
->mcmac
[i
].qe
, &mcam_mod
->free_q
);
2726 mcam_mod
->bna
= bna
;
2730 bna_mcam_mod_uninit(struct bna_mcam_mod
*mcam_mod
)
2732 struct list_head
*qe
;
2735 list_for_each(qe
, &mcam_mod
->free_q
)
2738 mcam_mod
->bna
= NULL
;
2742 bna_rit_mod_init(struct bna_rit_mod
*rit_mod
,
2743 struct bna_res_info
*res_info
)
2750 rit_mod
->rit
= (struct bna_rit_entry
*)
2751 res_info
[BNA_RES_MEM_T_RIT_ENTRY
].res_u
.mem_info
.mdl
[0].kva
;
2752 rit_mod
->rit_segment
= (struct bna_rit_segment
*)
2753 res_info
[BNA_RES_MEM_T_RIT_SEGMENT
].res_u
.mem_info
.mdl
[0].kva
;
2757 for (i
= 0; i
< BFI_RIT_SEG_TOTAL_POOLS
; i
++) {
2758 INIT_LIST_HEAD(&rit_mod
->rit_seg_pool
[i
]);
2759 for (j
= 0; j
< ritseg_pool_cfg
[i
].pool_size
; j
++) {
2760 bfa_q_qe_init(&rit_mod
->rit_segment
[count
].qe
);
2761 rit_mod
->rit_segment
[count
].max_rit_size
=
2762 ritseg_pool_cfg
[i
].pool_entry_size
;
2763 rit_mod
->rit_segment
[count
].rit_offset
= offset
;
2764 rit_mod
->rit_segment
[count
].rit
=
2765 &rit_mod
->rit
[offset
];
2766 list_add_tail(&rit_mod
->rit_segment
[count
].qe
,
2767 &rit_mod
->rit_seg_pool
[i
]);
2769 offset
+= ritseg_pool_cfg
[i
].pool_entry_size
;
2778 /* Called during probe(), before calling bna_init() */
2780 bna_res_req(struct bna_res_info
*res_info
)
2782 bna_adv_res_req(res_info
);
2784 /* DMA memory for retrieving IOC attributes */
2785 res_info
[BNA_RES_MEM_T_ATTR
].res_type
= BNA_RES_T_MEM
;
2786 res_info
[BNA_RES_MEM_T_ATTR
].res_u
.mem_info
.mem_type
= BNA_MEM_T_DMA
;
2787 res_info
[BNA_RES_MEM_T_ATTR
].res_u
.mem_info
.num
= 1;
2788 res_info
[BNA_RES_MEM_T_ATTR
].res_u
.mem_info
.len
=
2789 ALIGN(bfa_nw_ioc_meminfo(), PAGE_SIZE
);
2791 /* DMA memory for index segment of an IB */
2792 res_info
[BNA_RES_MEM_T_IBIDX
].res_type
= BNA_RES_T_MEM
;
2793 res_info
[BNA_RES_MEM_T_IBIDX
].res_u
.mem_info
.mem_type
= BNA_MEM_T_DMA
;
2794 res_info
[BNA_RES_MEM_T_IBIDX
].res_u
.mem_info
.len
=
2795 BFI_IBIDX_SIZE
* BFI_IBIDX_MAX_SEGSIZE
;
2796 res_info
[BNA_RES_MEM_T_IBIDX
].res_u
.mem_info
.num
= BFI_MAX_IB
;
2798 /* Virtual memory for IB objects - stored by IB module */
2799 res_info
[BNA_RES_MEM_T_IB_ARRAY
].res_type
= BNA_RES_T_MEM
;
2800 res_info
[BNA_RES_MEM_T_IB_ARRAY
].res_u
.mem_info
.mem_type
=
2802 res_info
[BNA_RES_MEM_T_IB_ARRAY
].res_u
.mem_info
.num
= 1;
2803 res_info
[BNA_RES_MEM_T_IB_ARRAY
].res_u
.mem_info
.len
=
2804 BFI_MAX_IB
* sizeof(struct bna_ib
);
2806 /* Virtual memory for intr objects - stored by IB module */
2807 res_info
[BNA_RES_MEM_T_INTR_ARRAY
].res_type
= BNA_RES_T_MEM
;
2808 res_info
[BNA_RES_MEM_T_INTR_ARRAY
].res_u
.mem_info
.mem_type
=
2810 res_info
[BNA_RES_MEM_T_INTR_ARRAY
].res_u
.mem_info
.num
= 1;
2811 res_info
[BNA_RES_MEM_T_INTR_ARRAY
].res_u
.mem_info
.len
=
2812 BFI_MAX_IB
* sizeof(struct bna_intr
);
2814 /* Virtual memory for idx_seg objects - stored by IB module */
2815 res_info
[BNA_RES_MEM_T_IDXSEG_ARRAY
].res_type
= BNA_RES_T_MEM
;
2816 res_info
[BNA_RES_MEM_T_IDXSEG_ARRAY
].res_u
.mem_info
.mem_type
=
2818 res_info
[BNA_RES_MEM_T_IDXSEG_ARRAY
].res_u
.mem_info
.num
= 1;
2819 res_info
[BNA_RES_MEM_T_IDXSEG_ARRAY
].res_u
.mem_info
.len
=
2820 BFI_IBIDX_TOTAL_SEGS
* sizeof(struct bna_ibidx_seg
);
2822 /* Virtual memory for Tx objects - stored by Tx module */
2823 res_info
[BNA_RES_MEM_T_TX_ARRAY
].res_type
= BNA_RES_T_MEM
;
2824 res_info
[BNA_RES_MEM_T_TX_ARRAY
].res_u
.mem_info
.mem_type
=
2826 res_info
[BNA_RES_MEM_T_TX_ARRAY
].res_u
.mem_info
.num
= 1;
2827 res_info
[BNA_RES_MEM_T_TX_ARRAY
].res_u
.mem_info
.len
=
2828 BFI_MAX_TXQ
* sizeof(struct bna_tx
);
2830 /* Virtual memory for TxQ - stored by Tx module */
2831 res_info
[BNA_RES_MEM_T_TXQ_ARRAY
].res_type
= BNA_RES_T_MEM
;
2832 res_info
[BNA_RES_MEM_T_TXQ_ARRAY
].res_u
.mem_info
.mem_type
=
2834 res_info
[BNA_RES_MEM_T_TXQ_ARRAY
].res_u
.mem_info
.num
= 1;
2835 res_info
[BNA_RES_MEM_T_TXQ_ARRAY
].res_u
.mem_info
.len
=
2836 BFI_MAX_TXQ
* sizeof(struct bna_txq
);
2838 /* Virtual memory for Rx objects - stored by Rx module */
2839 res_info
[BNA_RES_MEM_T_RX_ARRAY
].res_type
= BNA_RES_T_MEM
;
2840 res_info
[BNA_RES_MEM_T_RX_ARRAY
].res_u
.mem_info
.mem_type
=
2842 res_info
[BNA_RES_MEM_T_RX_ARRAY
].res_u
.mem_info
.num
= 1;
2843 res_info
[BNA_RES_MEM_T_RX_ARRAY
].res_u
.mem_info
.len
=
2844 BFI_MAX_RXQ
* sizeof(struct bna_rx
);
2846 /* Virtual memory for RxPath - stored by Rx module */
2847 res_info
[BNA_RES_MEM_T_RXP_ARRAY
].res_type
= BNA_RES_T_MEM
;
2848 res_info
[BNA_RES_MEM_T_RXP_ARRAY
].res_u
.mem_info
.mem_type
=
2850 res_info
[BNA_RES_MEM_T_RXP_ARRAY
].res_u
.mem_info
.num
= 1;
2851 res_info
[BNA_RES_MEM_T_RXP_ARRAY
].res_u
.mem_info
.len
=
2852 BFI_MAX_RXQ
* sizeof(struct bna_rxp
);
2854 /* Virtual memory for RxQ - stored by Rx module */
2855 res_info
[BNA_RES_MEM_T_RXQ_ARRAY
].res_type
= BNA_RES_T_MEM
;
2856 res_info
[BNA_RES_MEM_T_RXQ_ARRAY
].res_u
.mem_info
.mem_type
=
2858 res_info
[BNA_RES_MEM_T_RXQ_ARRAY
].res_u
.mem_info
.num
= 1;
2859 res_info
[BNA_RES_MEM_T_RXQ_ARRAY
].res_u
.mem_info
.len
=
2860 BFI_MAX_RXQ
* sizeof(struct bna_rxq
);
2862 /* Virtual memory for Unicast MAC address - stored by ucam module */
2863 res_info
[BNA_RES_MEM_T_UCMAC_ARRAY
].res_type
= BNA_RES_T_MEM
;
2864 res_info
[BNA_RES_MEM_T_UCMAC_ARRAY
].res_u
.mem_info
.mem_type
=
2866 res_info
[BNA_RES_MEM_T_UCMAC_ARRAY
].res_u
.mem_info
.num
= 1;
2867 res_info
[BNA_RES_MEM_T_UCMAC_ARRAY
].res_u
.mem_info
.len
=
2868 BFI_MAX_UCMAC
* sizeof(struct bna_mac
);
2870 /* Virtual memory for Multicast MAC address - stored by mcam module */
2871 res_info
[BNA_RES_MEM_T_MCMAC_ARRAY
].res_type
= BNA_RES_T_MEM
;
2872 res_info
[BNA_RES_MEM_T_MCMAC_ARRAY
].res_u
.mem_info
.mem_type
=
2874 res_info
[BNA_RES_MEM_T_MCMAC_ARRAY
].res_u
.mem_info
.num
= 1;
2875 res_info
[BNA_RES_MEM_T_MCMAC_ARRAY
].res_u
.mem_info
.len
=
2876 BFI_MAX_MCMAC
* sizeof(struct bna_mac
);
2878 /* Virtual memory for RIT entries */
2879 res_info
[BNA_RES_MEM_T_RIT_ENTRY
].res_type
= BNA_RES_T_MEM
;
2880 res_info
[BNA_RES_MEM_T_RIT_ENTRY
].res_u
.mem_info
.mem_type
=
2882 res_info
[BNA_RES_MEM_T_RIT_ENTRY
].res_u
.mem_info
.num
= 1;
2883 res_info
[BNA_RES_MEM_T_RIT_ENTRY
].res_u
.mem_info
.len
=
2884 BFI_MAX_RIT_SIZE
* sizeof(struct bna_rit_entry
);
2886 /* Virtual memory for RIT segment table */
2887 res_info
[BNA_RES_MEM_T_RIT_SEGMENT
].res_type
= BNA_RES_T_MEM
;
2888 res_info
[BNA_RES_MEM_T_RIT_SEGMENT
].res_u
.mem_info
.mem_type
=
2890 res_info
[BNA_RES_MEM_T_RIT_SEGMENT
].res_u
.mem_info
.num
= 1;
2891 res_info
[BNA_RES_MEM_T_RIT_SEGMENT
].res_u
.mem_info
.len
=
2892 BFI_RIT_TOTAL_SEGS
* sizeof(struct bna_rit_segment
);
2894 /* Interrupt resource for mailbox interrupt */
2895 res_info
[BNA_RES_INTR_T_MBOX
].res_type
= BNA_RES_T_INTR
;
2896 res_info
[BNA_RES_INTR_T_MBOX
].res_u
.intr_info
.intr_type
=
2898 res_info
[BNA_RES_INTR_T_MBOX
].res_u
.intr_info
.num
= 1;
2901 /* Called during probe() */
2903 bna_init(struct bna
*bna
, struct bnad
*bnad
, struct bfa_pcidev
*pcidev
,
2904 struct bna_res_info
*res_info
)
2907 bna
->pcidev
= *pcidev
;
2909 bna
->stats
.hw_stats
= (struct bfi_ll_stats
*)
2910 res_info
[BNA_RES_MEM_T_STATS
].res_u
.mem_info
.mdl
[0].kva
;
2911 bna
->hw_stats_dma
.msb
=
2912 res_info
[BNA_RES_MEM_T_STATS
].res_u
.mem_info
.mdl
[0].dma
.msb
;
2913 bna
->hw_stats_dma
.lsb
=
2914 res_info
[BNA_RES_MEM_T_STATS
].res_u
.mem_info
.mdl
[0].dma
.lsb
;
2915 bna
->stats
.sw_stats
= (struct bna_sw_stats
*)
2916 res_info
[BNA_RES_MEM_T_SWSTATS
].res_u
.mem_info
.mdl
[0].kva
;
2918 bna
->regs
.page_addr
= bna
->pcidev
.pci_bar_kva
+
2919 reg_offset
[bna
->pcidev
.pci_func
].page_addr
;
2920 bna
->regs
.fn_int_status
= bna
->pcidev
.pci_bar_kva
+
2921 reg_offset
[bna
->pcidev
.pci_func
].fn_int_status
;
2922 bna
->regs
.fn_int_mask
= bna
->pcidev
.pci_bar_kva
+
2923 reg_offset
[bna
->pcidev
.pci_func
].fn_int_mask
;
2925 if (bna
->pcidev
.pci_func
< 3)
2930 /* Also initializes diag, cee, sfp, phy_port and mbox_mod */
2931 bna_device_init(&bna
->device
, bna
, res_info
);
2933 bna_port_init(&bna
->port
, bna
);
2935 bna_tx_mod_init(&bna
->tx_mod
, bna
, res_info
);
2937 bna_rx_mod_init(&bna
->rx_mod
, bna
, res_info
);
2939 bna_ib_mod_init(&bna
->ib_mod
, bna
, res_info
);
2941 bna_rit_mod_init(&bna
->rit_mod
, res_info
);
2943 bna_ucam_mod_init(&bna
->ucam_mod
, bna
, res_info
);
2945 bna_mcam_mod_init(&bna
->mcam_mod
, bna
, res_info
);
2947 bna
->rxf_promisc_id
= BFI_MAX_RXF
;
2949 /* Mbox q element for posting stat request to f/w */
2950 bfa_q_qe_init(&bna
->mbox_qe
.qe
);
2954 bna_uninit(struct bna
*bna
)
2956 bna_mcam_mod_uninit(&bna
->mcam_mod
);
2958 bna_ucam_mod_uninit(&bna
->ucam_mod
);
2960 bna_ib_mod_uninit(&bna
->ib_mod
);
2962 bna_rx_mod_uninit(&bna
->rx_mod
);
2964 bna_tx_mod_uninit(&bna
->tx_mod
);
2966 bna_port_uninit(&bna
->port
);
2968 bna_device_uninit(&bna
->device
);
2974 bna_ucam_mod_mac_get(struct bna_ucam_mod
*ucam_mod
)
2976 struct list_head
*qe
;
2978 if (list_empty(&ucam_mod
->free_q
))
2981 bfa_q_deq(&ucam_mod
->free_q
, &qe
);
2983 return (struct bna_mac
*)qe
;
2987 bna_ucam_mod_mac_put(struct bna_ucam_mod
*ucam_mod
, struct bna_mac
*mac
)
2989 list_add_tail(&mac
->qe
, &ucam_mod
->free_q
);
2993 bna_mcam_mod_mac_get(struct bna_mcam_mod
*mcam_mod
)
2995 struct list_head
*qe
;
2997 if (list_empty(&mcam_mod
->free_q
))
3000 bfa_q_deq(&mcam_mod
->free_q
, &qe
);
3002 return (struct bna_mac
*)qe
;
3006 bna_mcam_mod_mac_put(struct bna_mcam_mod
*mcam_mod
, struct bna_mac
*mac
)
3008 list_add_tail(&mac
->qe
, &mcam_mod
->free_q
);
3012 * Note: This should be called in the same locking context as the call to
3013 * bna_rit_mod_seg_get()
3016 bna_rit_mod_can_satisfy(struct bna_rit_mod
*rit_mod
, int seg_size
)
3020 /* Select the pool for seg_size */
3021 for (i
= 0; i
< BFI_RIT_SEG_TOTAL_POOLS
; i
++) {
3022 if (seg_size
<= ritseg_pool_cfg
[i
].pool_entry_size
)
3026 if (i
== BFI_RIT_SEG_TOTAL_POOLS
)
3029 if (list_empty(&rit_mod
->rit_seg_pool
[i
]))
3035 struct bna_rit_segment
*
3036 bna_rit_mod_seg_get(struct bna_rit_mod
*rit_mod
, int seg_size
)
3038 struct bna_rit_segment
*seg
;
3039 struct list_head
*qe
;
3042 /* Select the pool for seg_size */
3043 for (i
= 0; i
< BFI_RIT_SEG_TOTAL_POOLS
; i
++) {
3044 if (seg_size
<= ritseg_pool_cfg
[i
].pool_entry_size
)
3048 if (i
== BFI_RIT_SEG_TOTAL_POOLS
)
3051 if (list_empty(&rit_mod
->rit_seg_pool
[i
]))
3054 bfa_q_deq(&rit_mod
->rit_seg_pool
[i
], &qe
);
3055 seg
= (struct bna_rit_segment
*)qe
;
3056 bfa_q_qe_init(&seg
->qe
);
3057 seg
->rit_size
= seg_size
;
3063 bna_rit_mod_seg_put(struct bna_rit_mod
*rit_mod
,
3064 struct bna_rit_segment
*seg
)
3068 /* Select the pool for seg->max_rit_size */
3069 for (i
= 0; i
< BFI_RIT_SEG_TOTAL_POOLS
; i
++) {
3070 if (seg
->max_rit_size
== ritseg_pool_cfg
[i
].pool_entry_size
)
3075 list_add_tail(&seg
->qe
, &rit_mod
->rit_seg_pool
[i
]);