2 * Linux network driver for Brocade Converged Network Adapter.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
25 #define bna_ib_find_free_ibidx(_mask, _pos)\
28 while (((_pos) < (BFI_IBIDX_MAX_SEGSIZE)) &&\
29 ((1 << (_pos)) & (_mask)))\
33 #define bna_ib_count_ibidx(_mask, _count)\
37 while (pos < (BFI_IBIDX_MAX_SEGSIZE)) {\
38 if ((1 << pos) & (_mask))\
44 #define bna_ib_select_segpool(_count, _q_idx)\
48 for (i = 0; i < BFI_IBIDX_TOTAL_POOLS; i++) {\
49 if ((_count <= ibidx_pool[i].pool_entry_size)) {\
56 struct bna_ibidx_pool
{
60 init_ibidx_pool(ibidx_pool
);
62 static struct bna_intr
*
63 bna_intr_get(struct bna_ib_mod
*ib_mod
, enum bna_intr_type intr_type
,
66 struct bna_intr
*intr
;
69 list_for_each(qe
, &ib_mod
->intr_active_q
) {
70 intr
= (struct bna_intr
*)qe
;
72 if ((intr
->intr_type
== intr_type
) &&
73 (intr
->vector
== vector
)) {
79 if (list_empty(&ib_mod
->intr_free_q
))
82 bfa_q_deq(&ib_mod
->intr_free_q
, &intr
);
83 bfa_q_qe_init(&intr
->qe
);
86 intr
->intr_type
= intr_type
;
87 intr
->vector
= vector
;
89 list_add_tail(&intr
->qe
, &ib_mod
->intr_active_q
);
95 bna_intr_put(struct bna_ib_mod
*ib_mod
,
96 struct bna_intr
*intr
)
100 if (intr
->ref_count
== 0) {
103 bfa_q_qe_init(&intr
->qe
);
104 list_add_tail(&intr
->qe
, &ib_mod
->intr_free_q
);
109 bna_ib_mod_init(struct bna_ib_mod
*ib_mod
, struct bna
*bna
,
110 struct bna_res_info
*res_info
)
116 struct bna_doorbell_qset
*qset
;
121 ib_mod
->ib
= (struct bna_ib
*)
122 res_info
[BNA_RES_MEM_T_IB_ARRAY
].res_u
.mem_info
.mdl
[0].kva
;
123 ib_mod
->intr
= (struct bna_intr
*)
124 res_info
[BNA_RES_MEM_T_INTR_ARRAY
].res_u
.mem_info
.mdl
[0].kva
;
125 ib_mod
->idx_seg
= (struct bna_ibidx_seg
*)
126 res_info
[BNA_RES_MEM_T_IDXSEG_ARRAY
].res_u
.mem_info
.mdl
[0].kva
;
128 INIT_LIST_HEAD(&ib_mod
->ib_free_q
);
129 INIT_LIST_HEAD(&ib_mod
->intr_free_q
);
130 INIT_LIST_HEAD(&ib_mod
->intr_active_q
);
132 for (i
= 0; i
< BFI_IBIDX_TOTAL_POOLS
; i
++)
133 INIT_LIST_HEAD(&ib_mod
->ibidx_seg_pool
[i
]);
135 for (i
= 0; i
< BFI_MAX_IB
; i
++) {
136 ib_mod
->ib
[i
].ib_id
= i
;
138 ib_mod
->ib
[i
].ib_seg_host_addr_kva
=
139 res_info
[BNA_RES_MEM_T_IBIDX
].res_u
.mem_info
.mdl
[i
].kva
;
140 ib_mod
->ib
[i
].ib_seg_host_addr
.lsb
=
141 res_info
[BNA_RES_MEM_T_IBIDX
].res_u
.mem_info
.mdl
[i
].dma
.lsb
;
142 ib_mod
->ib
[i
].ib_seg_host_addr
.msb
=
143 res_info
[BNA_RES_MEM_T_IBIDX
].res_u
.mem_info
.mdl
[i
].dma
.msb
;
145 qset
= (struct bna_doorbell_qset
*)0;
146 off
= (unsigned long)(&qset
[i
>> 1].ib0
[(i
& 0x1)
148 ib_mod
->ib
[i
].door_bell
.doorbell_addr
= off
+
149 BNA_GET_DOORBELL_BASE_ADDR(bna
->pcidev
.pci_bar_kva
);
151 bfa_q_qe_init(&ib_mod
->ib
[i
].qe
);
152 list_add_tail(&ib_mod
->ib
[i
].qe
, &ib_mod
->ib_free_q
);
154 bfa_q_qe_init(&ib_mod
->intr
[i
].qe
);
155 list_add_tail(&ib_mod
->intr
[i
].qe
, &ib_mod
->intr_free_q
);
160 for (i
= 0; i
< BFI_IBIDX_TOTAL_POOLS
; i
++) {
161 for (j
= 0; j
< ibidx_pool
[i
].pool_size
; j
++) {
162 bfa_q_qe_init(&ib_mod
->idx_seg
[count
]);
163 ib_mod
->idx_seg
[count
].ib_seg_size
=
164 ibidx_pool
[i
].pool_entry_size
;
165 ib_mod
->idx_seg
[count
].ib_idx_tbl_offset
= offset
;
166 list_add_tail(&ib_mod
->idx_seg
[count
].qe
,
167 &ib_mod
->ibidx_seg_pool
[i
]);
169 offset
+= ibidx_pool
[i
].pool_entry_size
;
175 bna_ib_mod_uninit(struct bna_ib_mod
*ib_mod
)
179 struct list_head
*qe
;
182 list_for_each(qe
, &ib_mod
->ib_free_q
)
186 list_for_each(qe
, &ib_mod
->intr_free_q
)
189 for (i
= 0; i
< BFI_IBIDX_TOTAL_POOLS
; i
++) {
191 list_for_each(qe
, &ib_mod
->ibidx_seg_pool
[i
])
199 bna_ib_get(struct bna_ib_mod
*ib_mod
,
200 enum bna_intr_type intr_type
,
204 struct bna_intr
*intr
;
206 if (intr_type
== BNA_INTR_T_INTX
)
207 vector
= (1 << vector
);
209 intr
= bna_intr_get(ib_mod
, intr_type
, vector
);
214 if (intr
->ib
->ref_count
== BFI_IBIDX_MAX_SEGSIZE
) {
215 bna_intr_put(ib_mod
, intr
);
218 intr
->ib
->ref_count
++;
222 if (list_empty(&ib_mod
->ib_free_q
)) {
223 bna_intr_put(ib_mod
, intr
);
227 bfa_q_deq(&ib_mod
->ib_free_q
, &ib
);
228 bfa_q_qe_init(&ib
->qe
);
238 ib
->bna
= ib_mod
->bna
;
244 bna_ib_put(struct bna_ib_mod
*ib_mod
, struct bna_ib
*ib
)
246 bna_intr_put(ib_mod
, ib
->intr
);
250 if (ib
->ref_count
== 0) {
253 list_add_tail(&ib
->qe
, &ib_mod
->ib_free_q
);
257 /* Returns index offset - starting from 0 */
259 bna_ib_reserve_idx(struct bna_ib
*ib
)
261 struct bna_ib_mod
*ib_mod
= &ib
->bna
->ib_mod
;
262 struct bna_ibidx_seg
*idx_seg
;
267 /* Find the first free index position */
268 bna_ib_find_free_ibidx(ib
->idx_mask
, idx
);
269 if (idx
== BFI_IBIDX_MAX_SEGSIZE
)
273 * Calculate the total number of indexes held by this IB,
274 * including the index newly reserved above.
276 bna_ib_count_ibidx((ib
->idx_mask
| (1 << idx
)), num_idx
);
278 /* See if there is a free space in the index segment held by this IB */
279 if (ib
->idx_seg
&& (num_idx
<= ib
->idx_seg
->ib_seg_size
)) {
280 ib
->idx_mask
|= (1 << idx
);
287 /* Allocate a new segment */
288 bna_ib_select_segpool(num_idx
, q_idx
);
290 if (q_idx
== BFI_IBIDX_TOTAL_POOLS
)
292 if (!list_empty(&ib_mod
->ibidx_seg_pool
[q_idx
]))
296 bfa_q_deq(&ib_mod
->ibidx_seg_pool
[q_idx
], &idx_seg
);
297 bfa_q_qe_init(&idx_seg
->qe
);
299 /* Free the old segment */
301 bna_ib_select_segpool(ib
->idx_seg
->ib_seg_size
, q_idx
);
302 list_add_tail(&ib
->idx_seg
->qe
, &ib_mod
->ibidx_seg_pool
[q_idx
]);
305 ib
->idx_seg
= idx_seg
;
307 ib
->idx_mask
|= (1 << idx
);
313 bna_ib_release_idx(struct bna_ib
*ib
, int idx
)
315 struct bna_ib_mod
*ib_mod
= &ib
->bna
->ib_mod
;
316 struct bna_ibidx_seg
*idx_seg
;
321 ib
->idx_mask
&= ~(1 << idx
);
326 bna_ib_count_ibidx(ib
->idx_mask
, num_idx
);
329 * Free the segment, if there are no more indexes in the segment
333 bna_ib_select_segpool(ib
->idx_seg
->ib_seg_size
, cur_q_idx
);
334 list_add_tail(&ib
->idx_seg
->qe
,
335 &ib_mod
->ibidx_seg_pool
[cur_q_idx
]);
340 /* See if we can move to a smaller segment */
341 bna_ib_select_segpool(num_idx
, new_q_idx
);
342 bna_ib_select_segpool(ib
->idx_seg
->ib_seg_size
, cur_q_idx
);
343 while (new_q_idx
< cur_q_idx
) {
344 if (!list_empty(&ib_mod
->ibidx_seg_pool
[new_q_idx
]))
348 if (new_q_idx
< cur_q_idx
) {
349 /* Select the new smaller segment */
350 bfa_q_deq(&ib_mod
->ibidx_seg_pool
[new_q_idx
], &idx_seg
);
351 bfa_q_qe_init(&idx_seg
->qe
);
352 /* Free the old segment */
353 list_add_tail(&ib
->idx_seg
->qe
,
354 &ib_mod
->ibidx_seg_pool
[cur_q_idx
]);
355 ib
->idx_seg
= idx_seg
;
360 bna_ib_config(struct bna_ib
*ib
, struct bna_ib_config
*ib_config
)
365 ib
->ib_config
.coalescing_timeo
= ib_config
->coalescing_timeo
;
366 ib
->ib_config
.interpkt_timeo
= ib_config
->interpkt_timeo
;
367 ib
->ib_config
.interpkt_count
= ib_config
->interpkt_count
;
368 ib
->ib_config
.ctrl_flags
= ib_config
->ctrl_flags
;
370 ib
->ib_config
.ctrl_flags
|= BFI_IB_CF_MASTER_ENABLE
;
371 if (ib
->intr
->intr_type
== BNA_INTR_T_MSIX
)
372 ib
->ib_config
.ctrl_flags
|= BFI_IB_CF_MSIX_MODE
;
378 bna_ib_start(struct bna_ib
*ib
)
380 struct bna_ib_blk_mem ib_cfg
;
381 struct bna_ib_blk_mem
*ib_mem
;
385 void __iomem
*base_addr
;
390 if (ib
->start_count
> 1)
393 ib_cfg
.host_addr_lo
= (u32
)(ib
->ib_seg_host_addr
.lsb
);
394 ib_cfg
.host_addr_hi
= (u32
)(ib
->ib_seg_host_addr
.msb
);
396 ib_cfg
.clsc_n_ctrl_n_msix
= (((u32
)
397 ib
->ib_config
.coalescing_timeo
<< 16) |
398 ((u32
)ib
->ib_config
.ctrl_flags
<< 8) |
400 ib_cfg
.ipkt_n_ent_n_idxof
=
402 (ib
->ib_config
.interpkt_timeo
& 0xf) << 16) |
403 ((u32
)ib
->idx_seg
->ib_seg_size
<< 8) |
404 (ib
->idx_seg
->ib_idx_tbl_offset
);
405 ib_cfg
.ipkt_cnt_cfg_n_unacked
= ((u32
)
406 ib
->ib_config
.interpkt_count
<< 24);
408 pg_num
= BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM
+ ib
->bna
->port_num
,
409 HQM_IB_RAM_BASE_OFFSET
);
410 writel(pg_num
, ib
->bna
->regs
.page_addr
);
412 base_addr
= BNA_GET_MEM_BASE_ADDR(ib
->bna
->pcidev
.pci_bar_kva
,
413 HQM_IB_RAM_BASE_OFFSET
);
415 ib_mem
= (struct bna_ib_blk_mem
*)0;
416 off
= (unsigned long)&ib_mem
[ib
->ib_id
].host_addr_lo
;
417 writel(htonl(ib_cfg
.host_addr_lo
), base_addr
+ off
);
419 off
= (unsigned long)&ib_mem
[ib
->ib_id
].host_addr_hi
;
420 writel(htonl(ib_cfg
.host_addr_hi
), base_addr
+ off
);
422 off
= (unsigned long)&ib_mem
[ib
->ib_id
].clsc_n_ctrl_n_msix
;
423 writel(ib_cfg
.clsc_n_ctrl_n_msix
, base_addr
+ off
);
425 off
= (unsigned long)&ib_mem
[ib
->ib_id
].ipkt_n_ent_n_idxof
;
426 writel(ib_cfg
.ipkt_n_ent_n_idxof
, base_addr
+ off
);
428 off
= (unsigned long)&ib_mem
[ib
->ib_id
].ipkt_cnt_cfg_n_unacked
;
429 writel(ib_cfg
.ipkt_cnt_cfg_n_unacked
, base_addr
+ off
);
431 ib
->door_bell
.doorbell_ack
= BNA_DOORBELL_IB_INT_ACK(
432 (u32
)ib
->ib_config
.coalescing_timeo
, 0);
434 pg_num
= BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM
+ ib
->bna
->port_num
,
435 HQM_INDX_TBL_RAM_BASE_OFFSET
);
436 writel(pg_num
, ib
->bna
->regs
.page_addr
);
438 base_addr
= BNA_GET_MEM_BASE_ADDR(ib
->bna
->pcidev
.pci_bar_kva
,
439 HQM_INDX_TBL_RAM_BASE_OFFSET
);
440 for (i
= 0; i
< ib
->idx_seg
->ib_seg_size
; i
++) {
441 off
= (unsigned long)
442 ((ib
->idx_seg
->ib_idx_tbl_offset
+ i
) * BFI_IBIDX_SIZE
);
443 writel(0, base_addr
+ off
);
446 if (ib
->intr
->intr_type
== BNA_INTR_T_INTX
) {
447 bna_intx_disable(ib
->bna
, intx_mask
);
448 intx_mask
&= ~(ib
->intr
->vector
);
449 bna_intx_enable(ib
->bna
, intx_mask
);
454 bna_ib_stop(struct bna_ib
*ib
)
460 if (ib
->start_count
== 0) {
461 writel(BNA_DOORBELL_IB_INT_DISABLE
,
462 ib
->door_bell
.doorbell_addr
);
463 if (ib
->intr
->intr_type
== BNA_INTR_T_INTX
) {
464 bna_intx_disable(ib
->bna
, intx_mask
);
465 intx_mask
|= (ib
->intr
->vector
);
466 bna_intx_enable(ib
->bna
, intx_mask
);
472 bna_ib_fail(struct bna_ib
*ib
)
480 static void rxf_enable(struct bna_rxf
*rxf
);
481 static void rxf_disable(struct bna_rxf
*rxf
);
482 static void __rxf_config_set(struct bna_rxf
*rxf
);
483 static void __rxf_rit_set(struct bna_rxf
*rxf
);
484 static void __bna_rxf_stat_clr(struct bna_rxf
*rxf
);
485 static int rxf_process_packet_filter(struct bna_rxf
*rxf
);
486 static int rxf_clear_packet_filter(struct bna_rxf
*rxf
);
487 static void rxf_reset_packet_filter(struct bna_rxf
*rxf
);
488 static void rxf_cb_enabled(void *arg
, int status
);
489 static void rxf_cb_disabled(void *arg
, int status
);
490 static void bna_rxf_cb_stats_cleared(void *arg
, int status
);
491 static void __rxf_enable(struct bna_rxf
*rxf
);
492 static void __rxf_disable(struct bna_rxf
*rxf
);
494 bfa_fsm_state_decl(bna_rxf
, stopped
, struct bna_rxf
,
496 bfa_fsm_state_decl(bna_rxf
, start_wait
, struct bna_rxf
,
498 bfa_fsm_state_decl(bna_rxf
, cam_fltr_mod_wait
, struct bna_rxf
,
500 bfa_fsm_state_decl(bna_rxf
, started
, struct bna_rxf
,
502 bfa_fsm_state_decl(bna_rxf
, cam_fltr_clr_wait
, struct bna_rxf
,
504 bfa_fsm_state_decl(bna_rxf
, stop_wait
, struct bna_rxf
,
506 bfa_fsm_state_decl(bna_rxf
, pause_wait
, struct bna_rxf
,
508 bfa_fsm_state_decl(bna_rxf
, resume_wait
, struct bna_rxf
,
510 bfa_fsm_state_decl(bna_rxf
, stat_clr_wait
, struct bna_rxf
,
513 static struct bfa_sm_table rxf_sm_table
[] = {
514 {BFA_SM(bna_rxf_sm_stopped
), BNA_RXF_STOPPED
},
515 {BFA_SM(bna_rxf_sm_start_wait
), BNA_RXF_START_WAIT
},
516 {BFA_SM(bna_rxf_sm_cam_fltr_mod_wait
), BNA_RXF_CAM_FLTR_MOD_WAIT
},
517 {BFA_SM(bna_rxf_sm_started
), BNA_RXF_STARTED
},
518 {BFA_SM(bna_rxf_sm_cam_fltr_clr_wait
), BNA_RXF_CAM_FLTR_CLR_WAIT
},
519 {BFA_SM(bna_rxf_sm_stop_wait
), BNA_RXF_STOP_WAIT
},
520 {BFA_SM(bna_rxf_sm_pause_wait
), BNA_RXF_PAUSE_WAIT
},
521 {BFA_SM(bna_rxf_sm_resume_wait
), BNA_RXF_RESUME_WAIT
},
522 {BFA_SM(bna_rxf_sm_stat_clr_wait
), BNA_RXF_STAT_CLR_WAIT
}
526 bna_rxf_sm_stopped_entry(struct bna_rxf
*rxf
)
528 call_rxf_stop_cbfn(rxf
, BNA_CB_SUCCESS
);
532 bna_rxf_sm_stopped(struct bna_rxf
*rxf
, enum bna_rxf_event event
)
536 bfa_fsm_set_state(rxf
, bna_rxf_sm_start_wait
);
540 bfa_fsm_set_state(rxf
, bna_rxf_sm_stopped
);
547 case RXF_E_CAM_FLTR_MOD
:
548 call_rxf_cam_fltr_cbfn(rxf
, BNA_CB_SUCCESS
);
553 case RXF_E_CAM_FLTR_RESP
:
555 * These events are received due to flushing of mbox
562 rxf
->rxf_oper_state
= BNA_RXF_OPER_STATE_PAUSED
;
563 call_rxf_pause_cbfn(rxf
, BNA_CB_SUCCESS
);
567 rxf
->rxf_oper_state
= BNA_RXF_OPER_STATE_RUNNING
;
568 call_rxf_resume_cbfn(rxf
, BNA_CB_SUCCESS
);
572 bfa_sm_fault(rxf
->rx
->bna
, event
);
577 bna_rxf_sm_start_wait_entry(struct bna_rxf
*rxf
)
579 __rxf_config_set(rxf
);
585 bna_rxf_sm_start_wait(struct bna_rxf
*rxf
, enum bna_rxf_event event
)
590 * STOP is originated from bnad. When this happens,
591 * it can not be waiting for filter update
593 call_rxf_start_cbfn(rxf
, BNA_CB_INTERRUPT
);
594 bfa_fsm_set_state(rxf
, bna_rxf_sm_stop_wait
);
598 call_rxf_cam_fltr_cbfn(rxf
, BNA_CB_SUCCESS
);
599 call_rxf_start_cbfn(rxf
, BNA_CB_FAIL
);
600 bfa_fsm_set_state(rxf
, bna_rxf_sm_stopped
);
603 case RXF_E_CAM_FLTR_MOD
:
609 * Force rxf_process_filter() to go through initial
612 if ((rxf
->ucast_active_mac
!= NULL
) &&
613 (rxf
->ucast_pending_set
== 0))
614 rxf
->ucast_pending_set
= 1;
616 if (rxf
->rss_status
== BNA_STATUS_T_ENABLED
)
617 rxf
->rxf_flags
|= BNA_RXF_FL_RSS_CONFIG_PENDING
;
619 rxf
->rxf_flags
|= BNA_RXF_FL_VLAN_CONFIG_PENDING
;
621 bfa_fsm_set_state(rxf
, bna_rxf_sm_cam_fltr_mod_wait
);
626 rxf
->rxf_flags
|= BNA_RXF_FL_OPERSTATE_CHANGED
;
630 bfa_sm_fault(rxf
->rx
->bna
, event
);
635 bna_rxf_sm_cam_fltr_mod_wait_entry(struct bna_rxf
*rxf
)
637 if (!rxf_process_packet_filter(rxf
)) {
638 /* No more pending CAM entries to update */
639 bfa_fsm_set_state(rxf
, bna_rxf_sm_started
);
644 bna_rxf_sm_cam_fltr_mod_wait(struct bna_rxf
*rxf
, enum bna_rxf_event event
)
649 * STOP is originated from bnad. When this happens,
650 * it can not be waiting for filter update
652 call_rxf_start_cbfn(rxf
, BNA_CB_INTERRUPT
);
653 bfa_fsm_set_state(rxf
, bna_rxf_sm_cam_fltr_clr_wait
);
657 rxf_reset_packet_filter(rxf
);
658 call_rxf_cam_fltr_cbfn(rxf
, BNA_CB_SUCCESS
);
659 call_rxf_start_cbfn(rxf
, BNA_CB_FAIL
);
660 bfa_fsm_set_state(rxf
, bna_rxf_sm_stopped
);
663 case RXF_E_CAM_FLTR_MOD
:
667 case RXF_E_CAM_FLTR_RESP
:
668 if (!rxf_process_packet_filter(rxf
)) {
669 /* No more pending CAM entries to update */
670 call_rxf_cam_fltr_cbfn(rxf
, BNA_CB_SUCCESS
);
671 bfa_fsm_set_state(rxf
, bna_rxf_sm_started
);
677 rxf
->rxf_flags
|= BNA_RXF_FL_OPERSTATE_CHANGED
;
681 bfa_sm_fault(rxf
->rx
->bna
, event
);
686 bna_rxf_sm_started_entry(struct bna_rxf
*rxf
)
688 call_rxf_start_cbfn(rxf
, BNA_CB_SUCCESS
);
690 if (rxf
->rxf_flags
& BNA_RXF_FL_OPERSTATE_CHANGED
) {
691 if (rxf
->rxf_oper_state
== BNA_RXF_OPER_STATE_PAUSED
)
692 bfa_fsm_send_event(rxf
, RXF_E_PAUSE
);
694 bfa_fsm_send_event(rxf
, RXF_E_RESUME
);
700 bna_rxf_sm_started(struct bna_rxf
*rxf
, enum bna_rxf_event event
)
704 bfa_fsm_set_state(rxf
, bna_rxf_sm_cam_fltr_clr_wait
);
705 /* Hack to get FSM start clearing CAM entries */
706 bfa_fsm_send_event(rxf
, RXF_E_CAM_FLTR_RESP
);
710 rxf_reset_packet_filter(rxf
);
711 bfa_fsm_set_state(rxf
, bna_rxf_sm_stopped
);
714 case RXF_E_CAM_FLTR_MOD
:
715 bfa_fsm_set_state(rxf
, bna_rxf_sm_cam_fltr_mod_wait
);
719 bfa_fsm_set_state(rxf
, bna_rxf_sm_pause_wait
);
723 bfa_fsm_set_state(rxf
, bna_rxf_sm_resume_wait
);
727 bfa_sm_fault(rxf
->rx
->bna
, event
);
732 bna_rxf_sm_cam_fltr_clr_wait_entry(struct bna_rxf
*rxf
)
735 * Note: Do not add rxf_clear_packet_filter here.
736 * It will overstep mbox when this transition happens:
737 * cam_fltr_mod_wait -> cam_fltr_clr_wait on RXF_E_STOP event
742 bna_rxf_sm_cam_fltr_clr_wait(struct bna_rxf
*rxf
, enum bna_rxf_event event
)
747 * FSM was in the process of stopping, initiated by
748 * bnad. When this happens, no one can be waiting for
749 * start or filter update
751 rxf_reset_packet_filter(rxf
);
752 bfa_fsm_set_state(rxf
, bna_rxf_sm_stopped
);
755 case RXF_E_CAM_FLTR_RESP
:
756 if (!rxf_clear_packet_filter(rxf
)) {
757 /* No more pending CAM entries to clear */
758 bfa_fsm_set_state(rxf
, bna_rxf_sm_stop_wait
);
764 bfa_sm_fault(rxf
->rx
->bna
, event
);
769 bna_rxf_sm_stop_wait_entry(struct bna_rxf
*rxf
)
772 * NOTE: Do not add rxf_disable here.
773 * It will overstep mbox when this transition happens:
774 * start_wait -> stop_wait on RXF_E_STOP event
779 bna_rxf_sm_stop_wait(struct bna_rxf
*rxf
, enum bna_rxf_event event
)
784 * FSM was in the process of stopping, initiated by
785 * bnad. When this happens, no one can be waiting for
786 * start or filter update
788 bfa_fsm_set_state(rxf
, bna_rxf_sm_stopped
);
793 * This event is received due to abrupt transition from
794 * bna_rxf_sm_start_wait state on receiving
802 * FSM was in the process of stopping, initiated by
803 * bnad. When this happens, no one can be waiting for
804 * start or filter update
806 bfa_fsm_set_state(rxf
, bna_rxf_sm_stat_clr_wait
);
810 rxf
->rxf_oper_state
= BNA_RXF_OPER_STATE_PAUSED
;
814 rxf
->rxf_oper_state
= BNA_RXF_OPER_STATE_RUNNING
;
818 bfa_sm_fault(rxf
->rx
->bna
, event
);
823 bna_rxf_sm_pause_wait_entry(struct bna_rxf
*rxf
)
826 ~(BNA_RXF_FL_OPERSTATE_CHANGED
| BNA_RXF_FL_RXF_ENABLED
);
831 bna_rxf_sm_pause_wait(struct bna_rxf
*rxf
, enum bna_rxf_event event
)
836 * FSM was in the process of disabling rxf, initiated by
839 call_rxf_pause_cbfn(rxf
, BNA_CB_FAIL
);
840 bfa_fsm_set_state(rxf
, bna_rxf_sm_stopped
);
844 rxf
->rxf_oper_state
= BNA_RXF_OPER_STATE_PAUSED
;
845 call_rxf_pause_cbfn(rxf
, BNA_CB_SUCCESS
);
846 bfa_fsm_set_state(rxf
, bna_rxf_sm_started
);
850 * Since PAUSE/RESUME can only be sent by bnad, we don't expect
851 * any other event during these states
854 bfa_sm_fault(rxf
->rx
->bna
, event
);
859 bna_rxf_sm_resume_wait_entry(struct bna_rxf
*rxf
)
861 rxf
->rxf_flags
&= ~(BNA_RXF_FL_OPERSTATE_CHANGED
);
862 rxf
->rxf_flags
|= BNA_RXF_FL_RXF_ENABLED
;
867 bna_rxf_sm_resume_wait(struct bna_rxf
*rxf
, enum bna_rxf_event event
)
872 * FSM was in the process of disabling rxf, initiated by
875 call_rxf_resume_cbfn(rxf
, BNA_CB_FAIL
);
876 bfa_fsm_set_state(rxf
, bna_rxf_sm_stopped
);
880 rxf
->rxf_oper_state
= BNA_RXF_OPER_STATE_RUNNING
;
881 call_rxf_resume_cbfn(rxf
, BNA_CB_SUCCESS
);
882 bfa_fsm_set_state(rxf
, bna_rxf_sm_started
);
886 * Since PAUSE/RESUME can only be sent by bnad, we don't expect
887 * any other event during these states
890 bfa_sm_fault(rxf
->rx
->bna
, event
);
895 bna_rxf_sm_stat_clr_wait_entry(struct bna_rxf
*rxf
)
897 __bna_rxf_stat_clr(rxf
);
901 bna_rxf_sm_stat_clr_wait(struct bna_rxf
*rxf
, enum bna_rxf_event event
)
905 case RXF_E_STAT_CLEARED
:
906 bfa_fsm_set_state(rxf
, bna_rxf_sm_stopped
);
910 bfa_sm_fault(rxf
->rx
->bna
, event
);
915 __rxf_enable(struct bna_rxf
*rxf
)
917 struct bfi_ll_rxf_multi_req ll_req
;
920 if (rxf
->rxf_id
< 32)
921 bm
[0] = 1 << rxf
->rxf_id
;
923 bm
[1] = 1 << (rxf
->rxf_id
- 32);
925 bfi_h2i_set(ll_req
.mh
, BFI_MC_LL
, BFI_LL_H2I_RX_REQ
, 0);
926 ll_req
.rxf_id_mask
[0] = htonl(bm
[0]);
927 ll_req
.rxf_id_mask
[1] = htonl(bm
[1]);
930 bna_mbox_qe_fill(&rxf
->mbox_qe
, &ll_req
, sizeof(ll_req
),
931 rxf_cb_enabled
, rxf
);
933 bna_mbox_send(rxf
->rx
->bna
, &rxf
->mbox_qe
);
937 __rxf_disable(struct bna_rxf
*rxf
)
939 struct bfi_ll_rxf_multi_req ll_req
;
942 if (rxf
->rxf_id
< 32)
943 bm
[0] = 1 << rxf
->rxf_id
;
945 bm
[1] = 1 << (rxf
->rxf_id
- 32);
947 bfi_h2i_set(ll_req
.mh
, BFI_MC_LL
, BFI_LL_H2I_RX_REQ
, 0);
948 ll_req
.rxf_id_mask
[0] = htonl(bm
[0]);
949 ll_req
.rxf_id_mask
[1] = htonl(bm
[1]);
952 bna_mbox_qe_fill(&rxf
->mbox_qe
, &ll_req
, sizeof(ll_req
),
953 rxf_cb_disabled
, rxf
);
955 bna_mbox_send(rxf
->rx
->bna
, &rxf
->mbox_qe
);
959 __rxf_config_set(struct bna_rxf
*rxf
)
962 struct bna_rss_mem
*rss_mem
;
963 struct bna_rx_fndb_ram
*rx_fndb_ram
;
964 struct bna
*bna
= rxf
->rx
->bna
;
965 void __iomem
*base_addr
;
968 base_addr
= BNA_GET_MEM_BASE_ADDR(bna
->pcidev
.pci_bar_kva
,
969 RSS_TABLE_BASE_OFFSET
);
971 rss_mem
= (struct bna_rss_mem
*)0;
973 /* Configure RSS if required */
974 if (rxf
->ctrl_flags
& BNA_RXF_CF_RSS_ENABLE
) {
975 /* configure RSS Table */
976 writel(BNA_GET_PAGE_NUM(RAD0_MEM_BLK_BASE_PG_NUM
+
977 bna
->port_num
, RSS_TABLE_BASE_OFFSET
),
978 bna
->regs
.page_addr
);
980 /* temporarily disable RSS, while hash value is written */
981 off
= (unsigned long)&rss_mem
[0].type_n_hash
;
982 writel(0, base_addr
+ off
);
984 for (i
= 0; i
< BFI_RSS_HASH_KEY_LEN
; i
++) {
985 off
= (unsigned long)
986 &rss_mem
[0].hash_key
[(BFI_RSS_HASH_KEY_LEN
- 1) - i
];
987 writel(htonl(rxf
->rss_cfg
.toeplitz_hash_key
[i
]),
991 off
= (unsigned long)&rss_mem
[0].type_n_hash
;
992 writel(rxf
->rss_cfg
.hash_type
| rxf
->rss_cfg
.hash_mask
,
997 writel(BNA_GET_PAGE_NUM(
998 LUT0_MEM_BLK_BASE_PG_NUM
+ (bna
->port_num
* 2),
999 RX_FNDB_RAM_BASE_OFFSET
),
1000 bna
->regs
.page_addr
);
1002 base_addr
= BNA_GET_MEM_BASE_ADDR(bna
->pcidev
.pci_bar_kva
,
1003 RX_FNDB_RAM_BASE_OFFSET
);
1005 rx_fndb_ram
= (struct bna_rx_fndb_ram
*)0;
1007 /* We always use RSS table 0 */
1008 off
= (unsigned long)&rx_fndb_ram
[rxf
->rxf_id
].rss_prop
;
1009 writel(rxf
->ctrl_flags
& BNA_RXF_CF_RSS_ENABLE
,
1012 /* small large buffer enable/disable */
1013 off
= (unsigned long)&rx_fndb_ram
[rxf
->rxf_id
].size_routing_props
;
1014 writel((rxf
->ctrl_flags
& BNA_RXF_CF_SM_LG_RXQ
) | 0x80,
1017 /* RIT offset, HDS forced offset, multicast RxQ Id */
1018 off
= (unsigned long)&rx_fndb_ram
[rxf
->rxf_id
].rit_hds_mcastq
;
1019 writel((rxf
->rit_segment
->rit_offset
<< 16) |
1020 (rxf
->forced_offset
<< 8) |
1021 (rxf
->hds_cfg
.hdr_type
& BNA_HDS_FORCED
) | rxf
->mcast_rxq_id
,
1025 * default vlan tag, default function enable, strip vlan bytes,
1026 * HDS type, header size
1029 off
= (unsigned long)&rx_fndb_ram
[rxf
->rxf_id
].control_flags
;
1030 writel(((u32
)rxf
->default_vlan_tag
<< 16) |
1032 (BNA_RXF_CF_DEFAULT_VLAN
|
1033 BNA_RXF_CF_DEFAULT_FUNCTION_ENABLE
|
1034 BNA_RXF_CF_VLAN_STRIP
)) |
1035 (rxf
->hds_cfg
.hdr_type
& ~BNA_HDS_FORCED
) |
1036 rxf
->hds_cfg
.header_size
,
1041 __rxf_vlan_filter_set(struct bna_rxf
*rxf
, enum bna_status status
)
1043 struct bna
*bna
= rxf
->rx
->bna
;
1046 writel(BNA_GET_PAGE_NUM(LUT0_MEM_BLK_BASE_PG_NUM
+
1047 (bna
->port_num
* 2), VLAN_RAM_BASE_OFFSET
),
1048 bna
->regs
.page_addr
);
1050 if (status
== BNA_STATUS_T_ENABLED
) {
1051 /* enable VLAN filtering on this function */
1052 for (i
= 0; i
<= BFI_MAX_VLAN
/ 32; i
++) {
1053 writel(rxf
->vlan_filter_table
[i
],
1054 BNA_GET_VLAN_MEM_ENTRY_ADDR
1055 (bna
->pcidev
.pci_bar_kva
, rxf
->rxf_id
,
1059 /* disable VLAN filtering on this function */
1060 for (i
= 0; i
<= BFI_MAX_VLAN
/ 32; i
++) {
1062 BNA_GET_VLAN_MEM_ENTRY_ADDR
1063 (bna
->pcidev
.pci_bar_kva
, rxf
->rxf_id
,
1070 __rxf_rit_set(struct bna_rxf
*rxf
)
1072 struct bna
*bna
= rxf
->rx
->bna
;
1073 struct bna_rit_mem
*rit_mem
;
1075 void __iomem
*base_addr
;
1078 base_addr
= BNA_GET_MEM_BASE_ADDR(bna
->pcidev
.pci_bar_kva
,
1079 FUNCTION_TO_RXQ_TRANSLATE
);
1081 rit_mem
= (struct bna_rit_mem
*)0;
1083 writel(BNA_GET_PAGE_NUM(RXA0_MEM_BLK_BASE_PG_NUM
+ bna
->port_num
,
1084 FUNCTION_TO_RXQ_TRANSLATE
),
1085 bna
->regs
.page_addr
);
1087 for (i
= 0; i
< rxf
->rit_segment
->rit_size
; i
++) {
1088 off
= (unsigned long)&rit_mem
[i
+ rxf
->rit_segment
->rit_offset
];
1089 writel(rxf
->rit_segment
->rit
[i
].large_rxq_id
<< 6 |
1090 rxf
->rit_segment
->rit
[i
].small_rxq_id
,
1096 __bna_rxf_stat_clr(struct bna_rxf
*rxf
)
1098 struct bfi_ll_stats_req ll_req
;
1101 if (rxf
->rxf_id
< 32)
1102 bm
[0] = 1 << rxf
->rxf_id
;
1104 bm
[1] = 1 << (rxf
->rxf_id
- 32);
1106 bfi_h2i_set(ll_req
.mh
, BFI_MC_LL
, BFI_LL_H2I_STATS_CLEAR_REQ
, 0);
1107 ll_req
.stats_mask
= 0;
1108 ll_req
.txf_id_mask
[0] = 0;
1109 ll_req
.txf_id_mask
[1] = 0;
1111 ll_req
.rxf_id_mask
[0] = htonl(bm
[0]);
1112 ll_req
.rxf_id_mask
[1] = htonl(bm
[1]);
1114 bna_mbox_qe_fill(&rxf
->mbox_qe
, &ll_req
, sizeof(ll_req
),
1115 bna_rxf_cb_stats_cleared
, rxf
);
1116 bna_mbox_send(rxf
->rx
->bna
, &rxf
->mbox_qe
);
1120 rxf_enable(struct bna_rxf
*rxf
)
1122 if (rxf
->rxf_oper_state
== BNA_RXF_OPER_STATE_PAUSED
)
1123 bfa_fsm_send_event(rxf
, RXF_E_STARTED
);
1125 rxf
->rxf_flags
|= BNA_RXF_FL_RXF_ENABLED
;
1131 rxf_cb_enabled(void *arg
, int status
)
1133 struct bna_rxf
*rxf
= (struct bna_rxf
*)arg
;
1135 bfa_q_qe_init(&rxf
->mbox_qe
.qe
);
1136 bfa_fsm_send_event(rxf
, RXF_E_STARTED
);
1140 rxf_disable(struct bna_rxf
*rxf
)
1142 if (rxf
->rxf_oper_state
== BNA_RXF_OPER_STATE_PAUSED
)
1143 bfa_fsm_send_event(rxf
, RXF_E_STOPPED
);
1145 rxf
->rxf_flags
&= ~BNA_RXF_FL_RXF_ENABLED
;
1150 rxf_cb_disabled(void *arg
, int status
)
1152 struct bna_rxf
*rxf
= (struct bna_rxf
*)arg
;
1154 bfa_q_qe_init(&rxf
->mbox_qe
.qe
);
1155 bfa_fsm_send_event(rxf
, RXF_E_STOPPED
);
1159 rxf_cb_cam_fltr_mbox_cmd(void *arg
, int status
)
1161 struct bna_rxf
*rxf
= (struct bna_rxf
*)arg
;
1163 bfa_q_qe_init(&rxf
->mbox_qe
.qe
);
1165 bfa_fsm_send_event(rxf
, RXF_E_CAM_FLTR_RESP
);
1169 bna_rxf_cb_stats_cleared(void *arg
, int status
)
1171 struct bna_rxf
*rxf
= (struct bna_rxf
*)arg
;
1173 bfa_q_qe_init(&rxf
->mbox_qe
.qe
);
1174 bfa_fsm_send_event(rxf
, RXF_E_STAT_CLEARED
);
1178 rxf_cam_mbox_cmd(struct bna_rxf
*rxf
, u8 cmd
,
1179 const struct bna_mac
*mac_addr
)
1181 struct bfi_ll_mac_addr_req req
;
1183 bfi_h2i_set(req
.mh
, BFI_MC_LL
, cmd
, 0);
1185 req
.rxf_id
= rxf
->rxf_id
;
1186 memcpy(&req
.mac_addr
, (void *)&mac_addr
->addr
, ETH_ALEN
);
1188 bna_mbox_qe_fill(&rxf
->mbox_qe
, &req
, sizeof(req
),
1189 rxf_cb_cam_fltr_mbox_cmd
, rxf
);
1191 bna_mbox_send(rxf
->rx
->bna
, &rxf
->mbox_qe
);
1195 rxf_process_packet_filter_mcast(struct bna_rxf
*rxf
)
1197 struct bna_mac
*mac
= NULL
;
1198 struct list_head
*qe
;
1200 /* Add multicast entries */
1201 if (!list_empty(&rxf
->mcast_pending_add_q
)) {
1202 bfa_q_deq(&rxf
->mcast_pending_add_q
, &qe
);
1204 mac
= (struct bna_mac
*)qe
;
1205 rxf_cam_mbox_cmd(rxf
, BFI_LL_H2I_MAC_MCAST_ADD_REQ
, mac
);
1206 list_add_tail(&mac
->qe
, &rxf
->mcast_active_q
);
1210 /* Delete multicast entries previousely added */
1211 if (!list_empty(&rxf
->mcast_pending_del_q
)) {
1212 bfa_q_deq(&rxf
->mcast_pending_del_q
, &qe
);
1214 mac
= (struct bna_mac
*)qe
;
1215 rxf_cam_mbox_cmd(rxf
, BFI_LL_H2I_MAC_MCAST_DEL_REQ
, mac
);
1216 bna_mcam_mod_mac_put(&rxf
->rx
->bna
->mcam_mod
, mac
);
1224 rxf_process_packet_filter_vlan(struct bna_rxf
*rxf
)
1226 /* Apply the VLAN filter */
1227 if (rxf
->rxf_flags
& BNA_RXF_FL_VLAN_CONFIG_PENDING
) {
1228 rxf
->rxf_flags
&= ~BNA_RXF_FL_VLAN_CONFIG_PENDING
;
1229 if (!(rxf
->rxmode_active
& BNA_RXMODE_PROMISC
) &&
1230 !(rxf
->rxmode_active
& BNA_RXMODE_DEFAULT
))
1231 __rxf_vlan_filter_set(rxf
, rxf
->vlan_filter_status
);
1234 /* Apply RSS configuration */
1235 if (rxf
->rxf_flags
& BNA_RXF_FL_RSS_CONFIG_PENDING
) {
1236 rxf
->rxf_flags
&= ~BNA_RXF_FL_RSS_CONFIG_PENDING
;
1237 if (rxf
->rss_status
== BNA_STATUS_T_DISABLED
) {
1238 /* RSS is being disabled */
1239 rxf
->ctrl_flags
&= ~BNA_RXF_CF_RSS_ENABLE
;
1241 __rxf_config_set(rxf
);
1243 /* RSS is being enabled or reconfigured */
1244 rxf
->ctrl_flags
|= BNA_RXF_CF_RSS_ENABLE
;
1246 __rxf_config_set(rxf
);
1254 * Processes pending ucast, mcast entry addition/deletion and issues mailbox
1255 * command. Also processes pending filter configuration - promiscuous mode,
1256 * default mode, allmutli mode and issues mailbox command or directly applies
1260 rxf_process_packet_filter(struct bna_rxf
*rxf
)
1262 /* Set the default MAC first */
1263 if (rxf
->ucast_pending_set
> 0) {
1264 rxf_cam_mbox_cmd(rxf
, BFI_LL_H2I_MAC_UCAST_SET_REQ
,
1265 rxf
->ucast_active_mac
);
1266 rxf
->ucast_pending_set
--;
1270 if (rxf_process_packet_filter_ucast(rxf
))
1273 if (rxf_process_packet_filter_mcast(rxf
))
1276 if (rxf_process_packet_filter_promisc(rxf
))
1279 if (rxf_process_packet_filter_default(rxf
))
1282 if (rxf_process_packet_filter_allmulti(rxf
))
1285 if (rxf_process_packet_filter_vlan(rxf
))
1292 rxf_clear_packet_filter_mcast(struct bna_rxf
*rxf
)
1294 struct bna_mac
*mac
= NULL
;
1295 struct list_head
*qe
;
1297 /* 3. delete pending mcast entries */
1298 if (!list_empty(&rxf
->mcast_pending_del_q
)) {
1299 bfa_q_deq(&rxf
->mcast_pending_del_q
, &qe
);
1301 mac
= (struct bna_mac
*)qe
;
1302 rxf_cam_mbox_cmd(rxf
, BFI_LL_H2I_MAC_MCAST_DEL_REQ
, mac
);
1303 bna_mcam_mod_mac_put(&rxf
->rx
->bna
->mcam_mod
, mac
);
1307 /* 4. clear active mcast entries; move them to pending_add_q */
1308 if (!list_empty(&rxf
->mcast_active_q
)) {
1309 bfa_q_deq(&rxf
->mcast_active_q
, &qe
);
1311 mac
= (struct bna_mac
*)qe
;
1312 rxf_cam_mbox_cmd(rxf
, BFI_LL_H2I_MAC_MCAST_DEL_REQ
, mac
);
1313 list_add_tail(&mac
->qe
, &rxf
->mcast_pending_add_q
);
1321 * In the rxf stop path, processes pending ucast/mcast delete queue and issues
1322 * the mailbox command. Moves the active ucast/mcast entries to pending add q,
1323 * so that they are added to CAM again in the rxf start path. Moves the current
1324 * filter settings - promiscuous, default, allmutli - to pending filter
1328 rxf_clear_packet_filter(struct bna_rxf
*rxf
)
1330 if (rxf_clear_packet_filter_ucast(rxf
))
1333 if (rxf_clear_packet_filter_mcast(rxf
))
1336 /* 5. clear active default MAC in the CAM */
1337 if (rxf
->ucast_pending_set
> 0)
1338 rxf
->ucast_pending_set
= 0;
1340 if (rxf_clear_packet_filter_promisc(rxf
))
1343 if (rxf_clear_packet_filter_default(rxf
))
1346 if (rxf_clear_packet_filter_allmulti(rxf
))
1353 rxf_reset_packet_filter_mcast(struct bna_rxf
*rxf
)
1355 struct list_head
*qe
;
1356 struct bna_mac
*mac
;
1358 /* 3. Move active mcast entries to pending_add_q */
1359 while (!list_empty(&rxf
->mcast_active_q
)) {
1360 bfa_q_deq(&rxf
->mcast_active_q
, &qe
);
1362 list_add_tail(qe
, &rxf
->mcast_pending_add_q
);
1365 /* 4. Throw away delete pending mcast entries */
1366 while (!list_empty(&rxf
->mcast_pending_del_q
)) {
1367 bfa_q_deq(&rxf
->mcast_pending_del_q
, &qe
);
1369 mac
= (struct bna_mac
*)qe
;
1370 bna_mcam_mod_mac_put(&rxf
->rx
->bna
->mcam_mod
, mac
);
1375 * In the rxf fail path, throws away the ucast/mcast entries pending for
1376 * deletion, moves all active ucast/mcast entries to pending queue so that
1377 * they are added back to CAM in the rxf start path. Also moves the current
1378 * filter configuration to pending filter configuration.
1381 rxf_reset_packet_filter(struct bna_rxf
*rxf
)
1383 rxf_reset_packet_filter_ucast(rxf
);
1385 rxf_reset_packet_filter_mcast(rxf
);
1387 /* 5. Turn off ucast set flag */
1388 rxf
->ucast_pending_set
= 0;
1390 rxf_reset_packet_filter_promisc(rxf
);
1392 rxf_reset_packet_filter_default(rxf
);
1394 rxf_reset_packet_filter_allmulti(rxf
);
1398 bna_rxf_init(struct bna_rxf
*rxf
,
1400 struct bna_rx_config
*q_config
)
1402 struct list_head
*qe
;
1403 struct bna_rxp
*rxp
;
1405 /* rxf_id is initialized during rx_mod init */
1408 INIT_LIST_HEAD(&rxf
->ucast_pending_add_q
);
1409 INIT_LIST_HEAD(&rxf
->ucast_pending_del_q
);
1410 rxf
->ucast_pending_set
= 0;
1411 INIT_LIST_HEAD(&rxf
->ucast_active_q
);
1412 rxf
->ucast_active_mac
= NULL
;
1414 INIT_LIST_HEAD(&rxf
->mcast_pending_add_q
);
1415 INIT_LIST_HEAD(&rxf
->mcast_pending_del_q
);
1416 INIT_LIST_HEAD(&rxf
->mcast_active_q
);
1418 bfa_q_qe_init(&rxf
->mbox_qe
.qe
);
1420 if (q_config
->vlan_strip_status
== BNA_STATUS_T_ENABLED
)
1421 rxf
->ctrl_flags
|= BNA_RXF_CF_VLAN_STRIP
;
1423 rxf
->rxf_oper_state
= (q_config
->paused
) ?
1424 BNA_RXF_OPER_STATE_PAUSED
: BNA_RXF_OPER_STATE_RUNNING
;
1426 bna_rxf_adv_init(rxf
, rx
, q_config
);
1428 rxf
->rit_segment
= bna_rit_mod_seg_get(&rxf
->rx
->bna
->rit_mod
,
1429 q_config
->num_paths
);
1431 list_for_each(qe
, &rx
->rxp_q
) {
1432 rxp
= (struct bna_rxp
*)qe
;
1433 if (q_config
->rxp_type
== BNA_RXP_SINGLE
)
1434 rxf
->mcast_rxq_id
= rxp
->rxq
.single
.only
->rxq_id
;
1436 rxf
->mcast_rxq_id
= rxp
->rxq
.slr
.large
->rxq_id
;
1440 rxf
->vlan_filter_status
= BNA_STATUS_T_DISABLED
;
1441 memset(rxf
->vlan_filter_table
, 0,
1442 (sizeof(u32
) * ((BFI_MAX_VLAN
+ 1) / 32)));
1444 bfa_fsm_set_state(rxf
, bna_rxf_sm_stopped
);
1448 bna_rxf_uninit(struct bna_rxf
*rxf
)
1450 struct bna_mac
*mac
;
1452 bna_rit_mod_seg_put(&rxf
->rx
->bna
->rit_mod
, rxf
->rit_segment
);
1453 rxf
->rit_segment
= NULL
;
1455 rxf
->ucast_pending_set
= 0;
1457 while (!list_empty(&rxf
->ucast_pending_add_q
)) {
1458 bfa_q_deq(&rxf
->ucast_pending_add_q
, &mac
);
1459 bfa_q_qe_init(&mac
->qe
);
1460 bna_ucam_mod_mac_put(&rxf
->rx
->bna
->ucam_mod
, mac
);
1463 if (rxf
->ucast_active_mac
) {
1464 bfa_q_qe_init(&rxf
->ucast_active_mac
->qe
);
1465 bna_ucam_mod_mac_put(&rxf
->rx
->bna
->ucam_mod
,
1466 rxf
->ucast_active_mac
);
1467 rxf
->ucast_active_mac
= NULL
;
1470 while (!list_empty(&rxf
->mcast_pending_add_q
)) {
1471 bfa_q_deq(&rxf
->mcast_pending_add_q
, &mac
);
1472 bfa_q_qe_init(&mac
->qe
);
1473 bna_mcam_mod_mac_put(&rxf
->rx
->bna
->mcam_mod
, mac
);
1480 bna_rxf_start(struct bna_rxf
*rxf
)
1482 rxf
->start_cbfn
= bna_rx_cb_rxf_started
;
1483 rxf
->start_cbarg
= rxf
->rx
;
1484 rxf
->rxf_flags
&= ~BNA_RXF_FL_FAILED
;
1485 bfa_fsm_send_event(rxf
, RXF_E_START
);
1489 bna_rxf_stop(struct bna_rxf
*rxf
)
1491 rxf
->stop_cbfn
= bna_rx_cb_rxf_stopped
;
1492 rxf
->stop_cbarg
= rxf
->rx
;
1493 bfa_fsm_send_event(rxf
, RXF_E_STOP
);
1497 bna_rxf_fail(struct bna_rxf
*rxf
)
1499 rxf
->rxf_flags
|= BNA_RXF_FL_FAILED
;
1500 bfa_fsm_send_event(rxf
, RXF_E_FAIL
);
1504 bna_rxf_state_get(struct bna_rxf
*rxf
)
1506 return bfa_sm_to_state(rxf_sm_table
, rxf
->fsm
);
1510 bna_rx_ucast_set(struct bna_rx
*rx
, u8
*ucmac
,
1511 void (*cbfn
)(struct bnad
*, struct bna_rx
*,
1512 enum bna_cb_status
))
1514 struct bna_rxf
*rxf
= &rx
->rxf
;
1516 if (rxf
->ucast_active_mac
== NULL
) {
1517 rxf
->ucast_active_mac
=
1518 bna_ucam_mod_mac_get(&rxf
->rx
->bna
->ucam_mod
);
1519 if (rxf
->ucast_active_mac
== NULL
)
1520 return BNA_CB_UCAST_CAM_FULL
;
1521 bfa_q_qe_init(&rxf
->ucast_active_mac
->qe
);
1524 memcpy(rxf
->ucast_active_mac
->addr
, ucmac
, ETH_ALEN
);
1525 rxf
->ucast_pending_set
++;
1526 rxf
->cam_fltr_cbfn
= cbfn
;
1527 rxf
->cam_fltr_cbarg
= rx
->bna
->bnad
;
1529 bfa_fsm_send_event(rxf
, RXF_E_CAM_FLTR_MOD
);
1531 return BNA_CB_SUCCESS
;
1535 bna_rx_mcast_add(struct bna_rx
*rx
, u8
*addr
,
1536 void (*cbfn
)(struct bnad
*, struct bna_rx
*,
1537 enum bna_cb_status
))
1539 struct bna_rxf
*rxf
= &rx
->rxf
;
1540 struct list_head
*qe
;
1541 struct bna_mac
*mac
;
1543 /* Check if already added */
1544 list_for_each(qe
, &rxf
->mcast_active_q
) {
1545 mac
= (struct bna_mac
*)qe
;
1546 if (BNA_MAC_IS_EQUAL(mac
->addr
, addr
)) {
1548 (*cbfn
)(rx
->bna
->bnad
, rx
, BNA_CB_SUCCESS
);
1549 return BNA_CB_SUCCESS
;
1553 /* Check if pending addition */
1554 list_for_each(qe
, &rxf
->mcast_pending_add_q
) {
1555 mac
= (struct bna_mac
*)qe
;
1556 if (BNA_MAC_IS_EQUAL(mac
->addr
, addr
)) {
1558 (*cbfn
)(rx
->bna
->bnad
, rx
, BNA_CB_SUCCESS
);
1559 return BNA_CB_SUCCESS
;
1563 mac
= bna_mcam_mod_mac_get(&rxf
->rx
->bna
->mcam_mod
);
1565 return BNA_CB_MCAST_LIST_FULL
;
1566 bfa_q_qe_init(&mac
->qe
);
1567 memcpy(mac
->addr
, addr
, ETH_ALEN
);
1568 list_add_tail(&mac
->qe
, &rxf
->mcast_pending_add_q
);
1570 rxf
->cam_fltr_cbfn
= cbfn
;
1571 rxf
->cam_fltr_cbarg
= rx
->bna
->bnad
;
1573 bfa_fsm_send_event(rxf
, RXF_E_CAM_FLTR_MOD
);
1575 return BNA_CB_SUCCESS
;
1579 bna_rx_mcast_del(struct bna_rx
*rx
, u8
*addr
,
1580 void (*cbfn
)(struct bnad
*, struct bna_rx
*,
1581 enum bna_cb_status
))
1583 struct bna_rxf
*rxf
= &rx
->rxf
;
1584 struct list_head
*qe
;
1585 struct bna_mac
*mac
;
1587 list_for_each(qe
, &rxf
->mcast_pending_add_q
) {
1588 mac
= (struct bna_mac
*)qe
;
1589 if (BNA_MAC_IS_EQUAL(mac
->addr
, addr
)) {
1592 bna_mcam_mod_mac_put(&rxf
->rx
->bna
->mcam_mod
, mac
);
1594 (*cbfn
)(rx
->bna
->bnad
, rx
, BNA_CB_SUCCESS
);
1595 return BNA_CB_SUCCESS
;
1599 list_for_each(qe
, &rxf
->mcast_active_q
) {
1600 mac
= (struct bna_mac
*)qe
;
1601 if (BNA_MAC_IS_EQUAL(mac
->addr
, addr
)) {
1604 list_add_tail(qe
, &rxf
->mcast_pending_del_q
);
1605 rxf
->cam_fltr_cbfn
= cbfn
;
1606 rxf
->cam_fltr_cbarg
= rx
->bna
->bnad
;
1607 bfa_fsm_send_event(rxf
, RXF_E_CAM_FLTR_MOD
);
1608 return BNA_CB_SUCCESS
;
1612 return BNA_CB_INVALID_MAC
;
1616 bna_rx_mcast_listset(struct bna_rx
*rx
, int count
, u8
*mclist
,
1617 void (*cbfn
)(struct bnad
*, struct bna_rx
*,
1618 enum bna_cb_status
))
1620 struct bna_rxf
*rxf
= &rx
->rxf
;
1621 struct list_head list_head
;
1622 struct list_head
*qe
;
1624 struct bna_mac
*mac
;
1625 struct bna_mac
*mac1
;
1628 int need_hw_config
= 0;
1631 /* Allocate nodes */
1632 INIT_LIST_HEAD(&list_head
);
1633 for (i
= 0, mcaddr
= mclist
; i
< count
; i
++) {
1634 mac
= bna_mcam_mod_mac_get(&rxf
->rx
->bna
->mcam_mod
);
1637 bfa_q_qe_init(&mac
->qe
);
1638 memcpy(mac
->addr
, mcaddr
, ETH_ALEN
);
1639 list_add_tail(&mac
->qe
, &list_head
);
1644 /* Schedule for addition */
1645 while (!list_empty(&list_head
)) {
1646 bfa_q_deq(&list_head
, &qe
);
1647 mac
= (struct bna_mac
*)qe
;
1648 bfa_q_qe_init(&mac
->qe
);
1652 /* Skip if already added */
1653 list_for_each(qe
, &rxf
->mcast_active_q
) {
1654 mac1
= (struct bna_mac
*)qe
;
1655 if (BNA_MAC_IS_EQUAL(mac1
->addr
, mac
->addr
)) {
1656 bna_mcam_mod_mac_put(&rxf
->rx
->bna
->mcam_mod
,
1666 /* Skip if pending addition */
1667 list_for_each(qe
, &rxf
->mcast_pending_add_q
) {
1668 mac1
= (struct bna_mac
*)qe
;
1669 if (BNA_MAC_IS_EQUAL(mac1
->addr
, mac
->addr
)) {
1670 bna_mcam_mod_mac_put(&rxf
->rx
->bna
->mcam_mod
,
1681 list_add_tail(&mac
->qe
, &rxf
->mcast_pending_add_q
);
1685 * Delete the entries that are in the pending_add_q but not
1688 while (!list_empty(&rxf
->mcast_pending_add_q
)) {
1689 bfa_q_deq(&rxf
->mcast_pending_add_q
, &qe
);
1690 mac
= (struct bna_mac
*)qe
;
1691 bfa_q_qe_init(&mac
->qe
);
1692 for (i
= 0, mcaddr
= mclist
, delete = 1; i
< count
; i
++) {
1693 if (BNA_MAC_IS_EQUAL(mcaddr
, mac
->addr
)) {
1700 bna_mcam_mod_mac_put(&rxf
->rx
->bna
->mcam_mod
, mac
);
1702 list_add_tail(&mac
->qe
, &list_head
);
1704 while (!list_empty(&list_head
)) {
1705 bfa_q_deq(&list_head
, &qe
);
1706 mac
= (struct bna_mac
*)qe
;
1707 bfa_q_qe_init(&mac
->qe
);
1708 list_add_tail(&mac
->qe
, &rxf
->mcast_pending_add_q
);
1712 * Schedule entries for deletion that are in the active_q but not
1715 while (!list_empty(&rxf
->mcast_active_q
)) {
1716 bfa_q_deq(&rxf
->mcast_active_q
, &qe
);
1717 mac
= (struct bna_mac
*)qe
;
1718 bfa_q_qe_init(&mac
->qe
);
1719 for (i
= 0, mcaddr
= mclist
, delete = 1; i
< count
; i
++) {
1720 if (BNA_MAC_IS_EQUAL(mcaddr
, mac
->addr
)) {
1727 list_add_tail(&mac
->qe
, &rxf
->mcast_pending_del_q
);
1730 list_add_tail(&mac
->qe
, &list_head
);
1733 while (!list_empty(&list_head
)) {
1734 bfa_q_deq(&list_head
, &qe
);
1735 mac
= (struct bna_mac
*)qe
;
1736 bfa_q_qe_init(&mac
->qe
);
1737 list_add_tail(&mac
->qe
, &rxf
->mcast_active_q
);
1740 if (need_hw_config
) {
1741 rxf
->cam_fltr_cbfn
= cbfn
;
1742 rxf
->cam_fltr_cbarg
= rx
->bna
->bnad
;
1743 bfa_fsm_send_event(rxf
, RXF_E_CAM_FLTR_MOD
);
1745 (*cbfn
)(rx
->bna
->bnad
, rx
, BNA_CB_SUCCESS
);
1747 return BNA_CB_SUCCESS
;
1750 while (!list_empty(&list_head
)) {
1751 bfa_q_deq(&list_head
, &qe
);
1752 mac
= (struct bna_mac
*)qe
;
1753 bfa_q_qe_init(&mac
->qe
);
1754 bna_mcam_mod_mac_put(&rxf
->rx
->bna
->mcam_mod
, mac
);
1757 return BNA_CB_MCAST_LIST_FULL
;
1761 bna_rx_vlan_add(struct bna_rx
*rx
, int vlan_id
)
1763 struct bna_rxf
*rxf
= &rx
->rxf
;
1764 int index
= (vlan_id
>> 5);
1765 int bit
= (1 << (vlan_id
& 0x1F));
1767 rxf
->vlan_filter_table
[index
] |= bit
;
1768 if (rxf
->vlan_filter_status
== BNA_STATUS_T_ENABLED
) {
1769 rxf
->rxf_flags
|= BNA_RXF_FL_VLAN_CONFIG_PENDING
;
1770 bfa_fsm_send_event(rxf
, RXF_E_CAM_FLTR_MOD
);
1775 bna_rx_vlan_del(struct bna_rx
*rx
, int vlan_id
)
1777 struct bna_rxf
*rxf
= &rx
->rxf
;
1778 int index
= (vlan_id
>> 5);
1779 int bit
= (1 << (vlan_id
& 0x1F));
1781 rxf
->vlan_filter_table
[index
] &= ~bit
;
1782 if (rxf
->vlan_filter_status
== BNA_STATUS_T_ENABLED
) {
1783 rxf
->rxf_flags
|= BNA_RXF_FL_VLAN_CONFIG_PENDING
;
1784 bfa_fsm_send_event(rxf
, RXF_E_CAM_FLTR_MOD
);
1791 #define RXQ_RCB_INIT(q, rxp, qdepth, bna, _id, unmapq_mem) do { \
1792 struct bna_doorbell_qset *_qset; \
1793 unsigned long off; \
1794 (q)->rcb->producer_index = (q)->rcb->consumer_index = 0; \
1795 (q)->rcb->q_depth = (qdepth); \
1796 (q)->rcb->unmap_q = unmapq_mem; \
1797 (q)->rcb->rxq = (q); \
1798 (q)->rcb->cq = &(rxp)->cq; \
1799 (q)->rcb->bnad = (bna)->bnad; \
1800 _qset = (struct bna_doorbell_qset *)0; \
1801 off = (unsigned long)&_qset[(q)->rxq_id].rxq[0]; \
1802 (q)->rcb->q_dbell = off + \
1803 BNA_GET_DOORBELL_BASE_ADDR((bna)->pcidev.pci_bar_kva); \
1804 (q)->rcb->id = _id; \
1807 #define BNA_GET_RXQS(qcfg) (((qcfg)->rxp_type == BNA_RXP_SINGLE) ? \
1808 (qcfg)->num_paths : ((qcfg)->num_paths * 2))
1810 #define SIZE_TO_PAGES(size) (((size) >> PAGE_SHIFT) + ((((size) &\
1811 (PAGE_SIZE - 1)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT))
1813 #define call_rx_stop_callback(rx, status) \
1814 if ((rx)->stop_cbfn) { \
1815 (*(rx)->stop_cbfn)((rx)->stop_cbarg, rx, (status)); \
1816 (rx)->stop_cbfn = NULL; \
1817 (rx)->stop_cbarg = NULL; \
1821 * Since rx_enable is synchronous callback, there is no start_cbfn required.
1822 * Instead, we'll call bnad_rx_post(rxp) so that bnad can post the buffers
1826 #define call_rx_disable_cbfn(rx, status) \
1827 if ((rx)->disable_cbfn) { \
1828 (*(rx)->disable_cbfn)((rx)->disable_cbarg, \
1830 (rx)->disable_cbfn = NULL; \
1831 (rx)->disable_cbarg = NULL; \
1834 #define rxqs_reqd(type, num_rxqs) \
1835 (((type) == BNA_RXP_SINGLE) ? (num_rxqs) : ((num_rxqs) * 2))
1837 #define rx_ib_fail(rx) \
1839 struct bna_rxp *rxp; \
1840 struct list_head *qe; \
1841 list_for_each(qe, &(rx)->rxp_q) { \
1842 rxp = (struct bna_rxp *)qe; \
1843 bna_ib_fail(rxp->cq.ib); \
1847 static void __bna_multi_rxq_stop(struct bna_rxp
*, u32
*);
1848 static void __bna_rxq_start(struct bna_rxq
*rxq
);
1849 static void __bna_cq_start(struct bna_cq
*cq
);
1850 static void bna_rit_create(struct bna_rx
*rx
);
1851 static void bna_rx_cb_multi_rxq_stopped(void *arg
, int status
);
1852 static void bna_rx_cb_rxq_stopped_all(void *arg
);
1854 bfa_fsm_state_decl(bna_rx
, stopped
,
1855 struct bna_rx
, enum bna_rx_event
);
1856 bfa_fsm_state_decl(bna_rx
, rxf_start_wait
,
1857 struct bna_rx
, enum bna_rx_event
);
1858 bfa_fsm_state_decl(bna_rx
, started
,
1859 struct bna_rx
, enum bna_rx_event
);
1860 bfa_fsm_state_decl(bna_rx
, rxf_stop_wait
,
1861 struct bna_rx
, enum bna_rx_event
);
1862 bfa_fsm_state_decl(bna_rx
, rxq_stop_wait
,
1863 struct bna_rx
, enum bna_rx_event
);
1865 static struct bfa_sm_table rx_sm_table
[] = {
1866 {BFA_SM(bna_rx_sm_stopped
), BNA_RX_STOPPED
},
1867 {BFA_SM(bna_rx_sm_rxf_start_wait
), BNA_RX_RXF_START_WAIT
},
1868 {BFA_SM(bna_rx_sm_started
), BNA_RX_STARTED
},
1869 {BFA_SM(bna_rx_sm_rxf_stop_wait
), BNA_RX_RXF_STOP_WAIT
},
1870 {BFA_SM(bna_rx_sm_rxq_stop_wait
), BNA_RX_RXQ_STOP_WAIT
},
1873 static void bna_rx_sm_stopped_entry(struct bna_rx
*rx
)
1875 struct bna_rxp
*rxp
;
1876 struct list_head
*qe_rxp
;
1878 list_for_each(qe_rxp
, &rx
->rxp_q
) {
1879 rxp
= (struct bna_rxp
*)qe_rxp
;
1880 rx
->rx_cleanup_cbfn(rx
->bna
->bnad
, rxp
->cq
.ccb
);
1883 call_rx_stop_callback(rx
, BNA_CB_SUCCESS
);
1886 static void bna_rx_sm_stopped(struct bna_rx
*rx
,
1887 enum bna_rx_event event
)
1891 bfa_fsm_set_state(rx
, bna_rx_sm_rxf_start_wait
);
1894 call_rx_stop_callback(rx
, BNA_CB_SUCCESS
);
1900 bfa_sm_fault(rx
->bna
, event
);
1906 static void bna_rx_sm_rxf_start_wait_entry(struct bna_rx
*rx
)
1908 struct bna_rxp
*rxp
;
1909 struct list_head
*qe_rxp
;
1910 struct bna_rxq
*q0
= NULL
, *q1
= NULL
;
1915 list_for_each(qe_rxp
, &rx
->rxp_q
) {
1916 rxp
= (struct bna_rxp
*)qe_rxp
;
1917 bna_ib_start(rxp
->cq
.ib
);
1918 GET_RXQS(rxp
, q0
, q1
);
1919 q0
->buffer_size
= bna_port_mtu_get(&rx
->bna
->port
);
1920 __bna_rxq_start(q0
);
1921 rx
->rx_post_cbfn(rx
->bna
->bnad
, q0
->rcb
);
1923 __bna_rxq_start(q1
);
1924 rx
->rx_post_cbfn(rx
->bna
->bnad
, q1
->rcb
);
1926 __bna_cq_start(&rxp
->cq
);
1929 bna_rxf_start(&rx
->rxf
);
1932 static void bna_rx_sm_rxf_start_wait(struct bna_rx
*rx
,
1933 enum bna_rx_event event
)
1937 bfa_fsm_set_state(rx
, bna_rx_sm_rxf_stop_wait
);
1940 bfa_fsm_set_state(rx
, bna_rx_sm_stopped
);
1942 bna_rxf_fail(&rx
->rxf
);
1944 case RX_E_RXF_STARTED
:
1945 bfa_fsm_set_state(rx
, bna_rx_sm_started
);
1948 bfa_sm_fault(rx
->bna
, event
);
1954 bna_rx_sm_started_entry(struct bna_rx
*rx
)
1956 struct bna_rxp
*rxp
;
1957 struct list_head
*qe_rxp
;
1960 list_for_each(qe_rxp
, &rx
->rxp_q
) {
1961 rxp
= (struct bna_rxp
*)qe_rxp
;
1962 bna_ib_ack(&rxp
->cq
.ib
->door_bell
, 0);
1965 bna_llport_admin_up(&rx
->bna
->port
.llport
);
1969 bna_rx_sm_started(struct bna_rx
*rx
, enum bna_rx_event event
)
1973 bna_llport_admin_down(&rx
->bna
->port
.llport
);
1974 bfa_fsm_set_state(rx
, bna_rx_sm_stopped
);
1976 bna_rxf_fail(&rx
->rxf
);
1979 bna_llport_admin_down(&rx
->bna
->port
.llport
);
1980 bfa_fsm_set_state(rx
, bna_rx_sm_rxf_stop_wait
);
1983 bfa_sm_fault(rx
->bna
, event
);
1989 bna_rx_sm_rxf_stop_wait_entry(struct bna_rx
*rx
)
1991 bna_rxf_stop(&rx
->rxf
);
1995 bna_rx_sm_rxf_stop_wait(struct bna_rx
*rx
, enum bna_rx_event event
)
1998 case RX_E_RXF_STOPPED
:
1999 bfa_fsm_set_state(rx
, bna_rx_sm_rxq_stop_wait
);
2001 case RX_E_RXF_STARTED
:
2003 * RxF was in the process of starting up when
2004 * RXF_E_STOP was issued. Ignore this event
2008 bfa_fsm_set_state(rx
, bna_rx_sm_stopped
);
2010 bna_rxf_fail(&rx
->rxf
);
2013 bfa_sm_fault(rx
->bna
, event
);
2020 bna_rx_sm_rxq_stop_wait_entry(struct bna_rx
*rx
)
2022 struct bna_rxp
*rxp
= NULL
;
2023 struct bna_rxq
*q0
= NULL
;
2024 struct bna_rxq
*q1
= NULL
;
2025 struct list_head
*qe
;
2026 u32 rxq_mask
[2] = {0, 0};
2028 /* Only one call to multi-rxq-stop for all RXPs in this RX */
2029 bfa_wc_up(&rx
->rxq_stop_wc
);
2030 list_for_each(qe
, &rx
->rxp_q
) {
2031 rxp
= (struct bna_rxp
*)qe
;
2032 GET_RXQS(rxp
, q0
, q1
);
2033 if (q0
->rxq_id
< 32)
2034 rxq_mask
[0] |= ((u32
)1 << q0
->rxq_id
);
2036 rxq_mask
[1] |= ((u32
)1 << (q0
->rxq_id
- 32));
2038 if (q1
->rxq_id
< 32)
2039 rxq_mask
[0] |= ((u32
)1 << q1
->rxq_id
);
2041 rxq_mask
[1] |= ((u32
)
2042 1 << (q1
->rxq_id
- 32));
2046 __bna_multi_rxq_stop(rxp
, rxq_mask
);
2050 bna_rx_sm_rxq_stop_wait(struct bna_rx
*rx
, enum bna_rx_event event
)
2052 struct bna_rxp
*rxp
= NULL
;
2053 struct list_head
*qe
;
2056 case RX_E_RXQ_STOPPED
:
2057 list_for_each(qe
, &rx
->rxp_q
) {
2058 rxp
= (struct bna_rxp
*)qe
;
2059 bna_ib_stop(rxp
->cq
.ib
);
2063 bfa_fsm_set_state(rx
, bna_rx_sm_stopped
);
2066 bfa_sm_fault(rx
->bna
, event
);
2072 __bna_multi_rxq_stop(struct bna_rxp
*rxp
, u32
* rxq_id_mask
)
2074 struct bfi_ll_q_stop_req ll_req
;
2076 bfi_h2i_set(ll_req
.mh
, BFI_MC_LL
, BFI_LL_H2I_RXQ_STOP_REQ
, 0);
2077 ll_req
.q_id_mask
[0] = htonl(rxq_id_mask
[0]);
2078 ll_req
.q_id_mask
[1] = htonl(rxq_id_mask
[1]);
2079 bna_mbox_qe_fill(&rxp
->mbox_qe
, &ll_req
, sizeof(ll_req
),
2080 bna_rx_cb_multi_rxq_stopped
, rxp
);
2081 bna_mbox_send(rxp
->rx
->bna
, &rxp
->mbox_qe
);
2085 __bna_rxq_start(struct bna_rxq
*rxq
)
2087 struct bna_rxtx_q_mem
*q_mem
;
2088 struct bna_rxq_mem rxq_cfg
, *rxq_mem
;
2089 struct bna_dma_addr cur_q_addr
;
2090 /* struct bna_doorbell_qset *qset; */
2091 struct bna_qpt
*qpt
;
2093 struct bna
*bna
= rxq
->rx
->bna
;
2094 void __iomem
*base_addr
;
2098 cur_q_addr
= *((struct bna_dma_addr
*)(qpt
->kv_qpt_ptr
));
2100 rxq_cfg
.pg_tbl_addr_lo
= qpt
->hw_qpt_ptr
.lsb
;
2101 rxq_cfg
.pg_tbl_addr_hi
= qpt
->hw_qpt_ptr
.msb
;
2102 rxq_cfg
.cur_q_entry_lo
= cur_q_addr
.lsb
;
2103 rxq_cfg
.cur_q_entry_hi
= cur_q_addr
.msb
;
2105 rxq_cfg
.pg_cnt_n_prd_ptr
= ((u32
)qpt
->page_count
<< 16) | 0x0;
2106 rxq_cfg
.entry_n_pg_size
= ((u32
)(BFI_RXQ_WI_SIZE
>> 2) << 16) |
2107 (qpt
->page_size
>> 2);
2108 rxq_cfg
.sg_n_cq_n_cns_ptr
=
2109 ((u32
)(rxq
->rxp
->cq
.cq_id
& 0xff) << 16) | 0x0;
2110 rxq_cfg
.buf_sz_n_q_state
= ((u32
)rxq
->buffer_size
<< 16) |
2112 rxq_cfg
.next_qid
= 0x0 | (0x3 << 8);
2114 /* Write the page number register */
2115 pg_num
= BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM
+ bna
->port_num
,
2116 HQM_RXTX_Q_RAM_BASE_OFFSET
);
2117 writel(pg_num
, bna
->regs
.page_addr
);
2120 base_addr
= BNA_GET_MEM_BASE_ADDR(bna
->pcidev
.pci_bar_kva
,
2121 HQM_RXTX_Q_RAM_BASE_OFFSET
);
2123 q_mem
= (struct bna_rxtx_q_mem
*)0;
2124 rxq_mem
= &q_mem
[rxq
->rxq_id
].rxq
;
2126 off
= (unsigned long)&rxq_mem
->pg_tbl_addr_lo
;
2127 writel(htonl(rxq_cfg
.pg_tbl_addr_lo
), base_addr
+ off
);
2129 off
= (unsigned long)&rxq_mem
->pg_tbl_addr_hi
;
2130 writel(htonl(rxq_cfg
.pg_tbl_addr_hi
), base_addr
+ off
);
2132 off
= (unsigned long)&rxq_mem
->cur_q_entry_lo
;
2133 writel(htonl(rxq_cfg
.cur_q_entry_lo
), base_addr
+ off
);
2135 off
= (unsigned long)&rxq_mem
->cur_q_entry_hi
;
2136 writel(htonl(rxq_cfg
.cur_q_entry_hi
), base_addr
+ off
);
2138 off
= (unsigned long)&rxq_mem
->pg_cnt_n_prd_ptr
;
2139 writel(rxq_cfg
.pg_cnt_n_prd_ptr
, base_addr
+ off
);
2141 off
= (unsigned long)&rxq_mem
->entry_n_pg_size
;
2142 writel(rxq_cfg
.entry_n_pg_size
, base_addr
+ off
);
2144 off
= (unsigned long)&rxq_mem
->sg_n_cq_n_cns_ptr
;
2145 writel(rxq_cfg
.sg_n_cq_n_cns_ptr
, base_addr
+ off
);
2147 off
= (unsigned long)&rxq_mem
->buf_sz_n_q_state
;
2148 writel(rxq_cfg
.buf_sz_n_q_state
, base_addr
+ off
);
2150 off
= (unsigned long)&rxq_mem
->next_qid
;
2151 writel(rxq_cfg
.next_qid
, base_addr
+ off
);
2153 rxq
->rcb
->producer_index
= 0;
2154 rxq
->rcb
->consumer_index
= 0;
2158 __bna_cq_start(struct bna_cq
*cq
)
2160 struct bna_cq_mem cq_cfg
, *cq_mem
;
2161 const struct bna_qpt
*qpt
;
2162 struct bna_dma_addr cur_q_addr
;
2164 struct bna
*bna
= cq
->rx
->bna
;
2165 void __iomem
*base_addr
;
2169 cur_q_addr
= *((struct bna_dma_addr
*)(qpt
->kv_qpt_ptr
));
2172 * Fill out structure, to be subsequently written
2175 cq_cfg
.pg_tbl_addr_lo
= qpt
->hw_qpt_ptr
.lsb
;
2176 cq_cfg
.pg_tbl_addr_hi
= qpt
->hw_qpt_ptr
.msb
;
2177 cq_cfg
.cur_q_entry_lo
= cur_q_addr
.lsb
;
2178 cq_cfg
.cur_q_entry_hi
= cur_q_addr
.msb
;
2180 cq_cfg
.pg_cnt_n_prd_ptr
= (qpt
->page_count
<< 16) | 0x0;
2181 cq_cfg
.entry_n_pg_size
=
2182 ((u32
)(BFI_CQ_WI_SIZE
>> 2) << 16) | (qpt
->page_size
>> 2);
2183 cq_cfg
.int_blk_n_cns_ptr
= ((((u32
)cq
->ib_seg_offset
) << 24) |
2184 ((u32
)(cq
->ib
->ib_id
& 0xff) << 16) | 0x0);
2185 cq_cfg
.q_state
= BNA_Q_IDLE_STATE
;
2187 /* Write the page number register */
2188 pg_num
= BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM
+ bna
->port_num
,
2189 HQM_CQ_RAM_BASE_OFFSET
);
2191 writel(pg_num
, bna
->regs
.page_addr
);
2194 base_addr
= BNA_GET_MEM_BASE_ADDR(bna
->pcidev
.pci_bar_kva
,
2195 HQM_CQ_RAM_BASE_OFFSET
);
2197 cq_mem
= (struct bna_cq_mem
*)0;
2199 off
= (unsigned long)&cq_mem
[cq
->cq_id
].pg_tbl_addr_lo
;
2200 writel(htonl(cq_cfg
.pg_tbl_addr_lo
), base_addr
+ off
);
2202 off
= (unsigned long)&cq_mem
[cq
->cq_id
].pg_tbl_addr_hi
;
2203 writel(htonl(cq_cfg
.pg_tbl_addr_hi
), base_addr
+ off
);
2205 off
= (unsigned long)&cq_mem
[cq
->cq_id
].cur_q_entry_lo
;
2206 writel(htonl(cq_cfg
.cur_q_entry_lo
), base_addr
+ off
);
2208 off
= (unsigned long)&cq_mem
[cq
->cq_id
].cur_q_entry_hi
;
2209 writel(htonl(cq_cfg
.cur_q_entry_hi
), base_addr
+ off
);
2211 off
= (unsigned long)&cq_mem
[cq
->cq_id
].pg_cnt_n_prd_ptr
;
2212 writel(cq_cfg
.pg_cnt_n_prd_ptr
, base_addr
+ off
);
2214 off
= (unsigned long)&cq_mem
[cq
->cq_id
].entry_n_pg_size
;
2215 writel(cq_cfg
.entry_n_pg_size
, base_addr
+ off
);
2217 off
= (unsigned long)&cq_mem
[cq
->cq_id
].int_blk_n_cns_ptr
;
2218 writel(cq_cfg
.int_blk_n_cns_ptr
, base_addr
+ off
);
2220 off
= (unsigned long)&cq_mem
[cq
->cq_id
].q_state
;
2221 writel(cq_cfg
.q_state
, base_addr
+ off
);
2223 cq
->ccb
->producer_index
= 0;
2224 *(cq
->ccb
->hw_producer_index
) = 0;
2228 bna_rit_create(struct bna_rx
*rx
)
2230 struct list_head
*qe_rxp
;
2232 struct bna_rxp
*rxp
;
2233 struct bna_rxq
*q0
= NULL
;
2234 struct bna_rxq
*q1
= NULL
;
2240 list_for_each(qe_rxp
, &rx
->rxp_q
) {
2241 rxp
= (struct bna_rxp
*)qe_rxp
;
2242 GET_RXQS(rxp
, q0
, q1
);
2243 rx
->rxf
.rit_segment
->rit
[offset
].large_rxq_id
= q0
->rxq_id
;
2244 rx
->rxf
.rit_segment
->rit
[offset
].small_rxq_id
=
2245 (q1
? q1
->rxq_id
: 0);
2251 _rx_can_satisfy(struct bna_rx_mod
*rx_mod
,
2252 struct bna_rx_config
*rx_cfg
)
2254 if ((rx_mod
->rx_free_count
== 0) ||
2255 (rx_mod
->rxp_free_count
== 0) ||
2256 (rx_mod
->rxq_free_count
== 0))
2259 if (rx_cfg
->rxp_type
== BNA_RXP_SINGLE
) {
2260 if ((rx_mod
->rxp_free_count
< rx_cfg
->num_paths
) ||
2261 (rx_mod
->rxq_free_count
< rx_cfg
->num_paths
))
2264 if ((rx_mod
->rxp_free_count
< rx_cfg
->num_paths
) ||
2265 (rx_mod
->rxq_free_count
< (2 * rx_cfg
->num_paths
)))
2269 if (!bna_rit_mod_can_satisfy(&rx_mod
->bna
->rit_mod
, rx_cfg
->num_paths
))
2276 _get_free_rxq(struct bna_rx_mod
*rx_mod
)
2278 struct bna_rxq
*rxq
= NULL
;
2279 struct list_head
*qe
= NULL
;
2281 bfa_q_deq(&rx_mod
->rxq_free_q
, &qe
);
2283 rx_mod
->rxq_free_count
--;
2284 rxq
= (struct bna_rxq
*)qe
;
2290 _put_free_rxq(struct bna_rx_mod
*rx_mod
, struct bna_rxq
*rxq
)
2292 bfa_q_qe_init(&rxq
->qe
);
2293 list_add_tail(&rxq
->qe
, &rx_mod
->rxq_free_q
);
2294 rx_mod
->rxq_free_count
++;
2298 _get_free_rxp(struct bna_rx_mod
*rx_mod
)
2300 struct list_head
*qe
= NULL
;
2301 struct bna_rxp
*rxp
= NULL
;
2303 bfa_q_deq(&rx_mod
->rxp_free_q
, &qe
);
2305 rx_mod
->rxp_free_count
--;
2307 rxp
= (struct bna_rxp
*)qe
;
2314 _put_free_rxp(struct bna_rx_mod
*rx_mod
, struct bna_rxp
*rxp
)
2316 bfa_q_qe_init(&rxp
->qe
);
2317 list_add_tail(&rxp
->qe
, &rx_mod
->rxp_free_q
);
2318 rx_mod
->rxp_free_count
++;
2322 _get_free_rx(struct bna_rx_mod
*rx_mod
)
2324 struct list_head
*qe
= NULL
;
2325 struct bna_rx
*rx
= NULL
;
2327 bfa_q_deq(&rx_mod
->rx_free_q
, &qe
);
2329 rx_mod
->rx_free_count
--;
2331 rx
= (struct bna_rx
*)qe
;
2333 list_add_tail(&rx
->qe
, &rx_mod
->rx_active_q
);
2340 _put_free_rx(struct bna_rx_mod
*rx_mod
, struct bna_rx
*rx
)
2342 bfa_q_qe_init(&rx
->qe
);
2343 list_add_tail(&rx
->qe
, &rx_mod
->rx_free_q
);
2344 rx_mod
->rx_free_count
++;
2348 _rx_init(struct bna_rx
*rx
, struct bna
*bna
)
2353 INIT_LIST_HEAD(&rx
->rxp_q
);
2355 rx
->rxq_stop_wc
.wc_resume
= bna_rx_cb_rxq_stopped_all
;
2356 rx
->rxq_stop_wc
.wc_cbarg
= rx
;
2357 rx
->rxq_stop_wc
.wc_count
= 0;
2359 rx
->stop_cbfn
= NULL
;
2360 rx
->stop_cbarg
= NULL
;
2364 _rxp_add_rxqs(struct bna_rxp
*rxp
,
2368 switch (rxp
->type
) {
2369 case BNA_RXP_SINGLE
:
2370 rxp
->rxq
.single
.only
= q0
;
2371 rxp
->rxq
.single
.reserved
= NULL
;
2374 rxp
->rxq
.slr
.large
= q0
;
2375 rxp
->rxq
.slr
.small
= q1
;
2378 rxp
->rxq
.hds
.data
= q0
;
2379 rxp
->rxq
.hds
.hdr
= q1
;
2387 _rxq_qpt_init(struct bna_rxq
*rxq
,
2388 struct bna_rxp
*rxp
,
2391 struct bna_mem_descr
*qpt_mem
,
2392 struct bna_mem_descr
*swqpt_mem
,
2393 struct bna_mem_descr
*page_mem
)
2397 rxq
->qpt
.hw_qpt_ptr
.lsb
= qpt_mem
->dma
.lsb
;
2398 rxq
->qpt
.hw_qpt_ptr
.msb
= qpt_mem
->dma
.msb
;
2399 rxq
->qpt
.kv_qpt_ptr
= qpt_mem
->kva
;
2400 rxq
->qpt
.page_count
= page_count
;
2401 rxq
->qpt
.page_size
= page_size
;
2403 rxq
->rcb
->sw_qpt
= (void **) swqpt_mem
->kva
;
2405 for (i
= 0; i
< rxq
->qpt
.page_count
; i
++) {
2406 rxq
->rcb
->sw_qpt
[i
] = page_mem
[i
].kva
;
2407 ((struct bna_dma_addr
*)rxq
->qpt
.kv_qpt_ptr
)[i
].lsb
=
2408 page_mem
[i
].dma
.lsb
;
2409 ((struct bna_dma_addr
*)rxq
->qpt
.kv_qpt_ptr
)[i
].msb
=
2410 page_mem
[i
].dma
.msb
;
2416 _rxp_cqpt_setup(struct bna_rxp
*rxp
,
2419 struct bna_mem_descr
*qpt_mem
,
2420 struct bna_mem_descr
*swqpt_mem
,
2421 struct bna_mem_descr
*page_mem
)
2425 rxp
->cq
.qpt
.hw_qpt_ptr
.lsb
= qpt_mem
->dma
.lsb
;
2426 rxp
->cq
.qpt
.hw_qpt_ptr
.msb
= qpt_mem
->dma
.msb
;
2427 rxp
->cq
.qpt
.kv_qpt_ptr
= qpt_mem
->kva
;
2428 rxp
->cq
.qpt
.page_count
= page_count
;
2429 rxp
->cq
.qpt
.page_size
= page_size
;
2431 rxp
->cq
.ccb
->sw_qpt
= (void **) swqpt_mem
->kva
;
2433 for (i
= 0; i
< rxp
->cq
.qpt
.page_count
; i
++) {
2434 rxp
->cq
.ccb
->sw_qpt
[i
] = page_mem
[i
].kva
;
2436 ((struct bna_dma_addr
*)rxp
->cq
.qpt
.kv_qpt_ptr
)[i
].lsb
=
2437 page_mem
[i
].dma
.lsb
;
2438 ((struct bna_dma_addr
*)rxp
->cq
.qpt
.kv_qpt_ptr
)[i
].msb
=
2439 page_mem
[i
].dma
.msb
;
2445 _rx_add_rxp(struct bna_rx
*rx
, struct bna_rxp
*rxp
)
2447 list_add_tail(&rxp
->qe
, &rx
->rxp_q
);
2451 _init_rxmod_queues(struct bna_rx_mod
*rx_mod
)
2453 INIT_LIST_HEAD(&rx_mod
->rx_free_q
);
2454 INIT_LIST_HEAD(&rx_mod
->rxq_free_q
);
2455 INIT_LIST_HEAD(&rx_mod
->rxp_free_q
);
2456 INIT_LIST_HEAD(&rx_mod
->rx_active_q
);
2458 rx_mod
->rx_free_count
= 0;
2459 rx_mod
->rxq_free_count
= 0;
2460 rx_mod
->rxp_free_count
= 0;
2464 _rx_ctor(struct bna_rx
*rx
, int id
)
2466 bfa_q_qe_init(&rx
->qe
);
2467 INIT_LIST_HEAD(&rx
->rxp_q
);
2470 rx
->rxf
.rxf_id
= id
;
2472 /* FIXME: mbox_qe ctor()?? */
2473 bfa_q_qe_init(&rx
->mbox_qe
.qe
);
2475 rx
->stop_cbfn
= NULL
;
2476 rx
->stop_cbarg
= NULL
;
2480 bna_rx_cb_multi_rxq_stopped(void *arg
, int status
)
2482 struct bna_rxp
*rxp
= (struct bna_rxp
*)arg
;
2484 bfa_wc_down(&rxp
->rx
->rxq_stop_wc
);
2488 bna_rx_cb_rxq_stopped_all(void *arg
)
2490 struct bna_rx
*rx
= (struct bna_rx
*)arg
;
2492 bfa_fsm_send_event(rx
, RX_E_RXQ_STOPPED
);
2496 bna_rx_mod_cb_rx_stopped(void *arg
, struct bna_rx
*rx
,
2497 enum bna_cb_status status
)
2499 struct bna_rx_mod
*rx_mod
= (struct bna_rx_mod
*)arg
;
2501 bfa_wc_down(&rx_mod
->rx_stop_wc
);
2505 bna_rx_mod_cb_rx_stopped_all(void *arg
)
2507 struct bna_rx_mod
*rx_mod
= (struct bna_rx_mod
*)arg
;
2509 if (rx_mod
->stop_cbfn
)
2510 rx_mod
->stop_cbfn(&rx_mod
->bna
->port
, BNA_CB_SUCCESS
);
2511 rx_mod
->stop_cbfn
= NULL
;
2515 bna_rx_start(struct bna_rx
*rx
)
2517 rx
->rx_flags
|= BNA_RX_F_PORT_ENABLED
;
2518 if (rx
->rx_flags
& BNA_RX_F_ENABLE
)
2519 bfa_fsm_send_event(rx
, RX_E_START
);
2523 bna_rx_stop(struct bna_rx
*rx
)
2525 rx
->rx_flags
&= ~BNA_RX_F_PORT_ENABLED
;
2526 if (rx
->fsm
== (bfa_fsm_t
) bna_rx_sm_stopped
)
2527 bna_rx_mod_cb_rx_stopped(&rx
->bna
->rx_mod
, rx
, BNA_CB_SUCCESS
);
2529 rx
->stop_cbfn
= bna_rx_mod_cb_rx_stopped
;
2530 rx
->stop_cbarg
= &rx
->bna
->rx_mod
;
2531 bfa_fsm_send_event(rx
, RX_E_STOP
);
2536 bna_rx_fail(struct bna_rx
*rx
)
2538 /* Indicate port is not enabled, and failed */
2539 rx
->rx_flags
&= ~BNA_RX_F_PORT_ENABLED
;
2540 rx
->rx_flags
|= BNA_RX_F_PORT_FAILED
;
2541 bfa_fsm_send_event(rx
, RX_E_FAIL
);
2545 bna_rx_cb_rxf_started(struct bna_rx
*rx
, enum bna_cb_status status
)
2547 bfa_fsm_send_event(rx
, RX_E_RXF_STARTED
);
2548 if (rx
->rxf
.rxf_id
< 32)
2549 rx
->bna
->rx_mod
.rxf_bmap
[0] |= ((u32
)1 << rx
->rxf
.rxf_id
);
2551 rx
->bna
->rx_mod
.rxf_bmap
[1] |= ((u32
)
2552 1 << (rx
->rxf
.rxf_id
- 32));
2556 bna_rx_cb_rxf_stopped(struct bna_rx
*rx
, enum bna_cb_status status
)
2558 bfa_fsm_send_event(rx
, RX_E_RXF_STOPPED
);
2559 if (rx
->rxf
.rxf_id
< 32)
2560 rx
->bna
->rx_mod
.rxf_bmap
[0] &= ~(u32
)1 << rx
->rxf
.rxf_id
;
2562 rx
->bna
->rx_mod
.rxf_bmap
[1] &= ~(u32
)
2563 1 << (rx
->rxf
.rxf_id
- 32);
2567 bna_rx_mod_start(struct bna_rx_mod
*rx_mod
, enum bna_rx_type type
)
2570 struct list_head
*qe
;
2572 rx_mod
->flags
|= BNA_RX_MOD_F_PORT_STARTED
;
2573 if (type
== BNA_RX_T_LOOPBACK
)
2574 rx_mod
->flags
|= BNA_RX_MOD_F_PORT_LOOPBACK
;
2576 list_for_each(qe
, &rx_mod
->rx_active_q
) {
2577 rx
= (struct bna_rx
*)qe
;
2578 if (rx
->type
== type
)
2584 bna_rx_mod_stop(struct bna_rx_mod
*rx_mod
, enum bna_rx_type type
)
2587 struct list_head
*qe
;
2589 rx_mod
->flags
&= ~BNA_RX_MOD_F_PORT_STARTED
;
2590 rx_mod
->flags
&= ~BNA_RX_MOD_F_PORT_LOOPBACK
;
2592 rx_mod
->stop_cbfn
= bna_port_cb_rx_stopped
;
2595 * Before calling bna_rx_stop(), increment rx_stop_wc as many times
2596 * as we are going to call bna_rx_stop
2598 list_for_each(qe
, &rx_mod
->rx_active_q
) {
2599 rx
= (struct bna_rx
*)qe
;
2600 if (rx
->type
== type
)
2601 bfa_wc_up(&rx_mod
->rx_stop_wc
);
2604 if (rx_mod
->rx_stop_wc
.wc_count
== 0) {
2605 rx_mod
->stop_cbfn(&rx_mod
->bna
->port
, BNA_CB_SUCCESS
);
2606 rx_mod
->stop_cbfn
= NULL
;
2610 list_for_each(qe
, &rx_mod
->rx_active_q
) {
2611 rx
= (struct bna_rx
*)qe
;
2612 if (rx
->type
== type
)
2618 bna_rx_mod_fail(struct bna_rx_mod
*rx_mod
)
2621 struct list_head
*qe
;
2623 rx_mod
->flags
&= ~BNA_RX_MOD_F_PORT_STARTED
;
2624 rx_mod
->flags
&= ~BNA_RX_MOD_F_PORT_LOOPBACK
;
2626 list_for_each(qe
, &rx_mod
->rx_active_q
) {
2627 rx
= (struct bna_rx
*)qe
;
2632 void bna_rx_mod_init(struct bna_rx_mod
*rx_mod
, struct bna
*bna
,
2633 struct bna_res_info
*res_info
)
2636 struct bna_rx
*rx_ptr
;
2637 struct bna_rxp
*rxp_ptr
;
2638 struct bna_rxq
*rxq_ptr
;
2643 rx_mod
->rx
= (struct bna_rx
*)
2644 res_info
[BNA_RES_MEM_T_RX_ARRAY
].res_u
.mem_info
.mdl
[0].kva
;
2645 rx_mod
->rxp
= (struct bna_rxp
*)
2646 res_info
[BNA_RES_MEM_T_RXP_ARRAY
].res_u
.mem_info
.mdl
[0].kva
;
2647 rx_mod
->rxq
= (struct bna_rxq
*)
2648 res_info
[BNA_RES_MEM_T_RXQ_ARRAY
].res_u
.mem_info
.mdl
[0].kva
;
2650 /* Initialize the queues */
2651 _init_rxmod_queues(rx_mod
);
2653 /* Build RX queues */
2654 for (index
= 0; index
< BFI_MAX_RXQ
; index
++) {
2655 rx_ptr
= &rx_mod
->rx
[index
];
2656 _rx_ctor(rx_ptr
, index
);
2657 list_add_tail(&rx_ptr
->qe
, &rx_mod
->rx_free_q
);
2658 rx_mod
->rx_free_count
++;
2661 /* build RX-path queue */
2662 for (index
= 0; index
< BFI_MAX_RXQ
; index
++) {
2663 rxp_ptr
= &rx_mod
->rxp
[index
];
2664 rxp_ptr
->cq
.cq_id
= index
;
2665 bfa_q_qe_init(&rxp_ptr
->qe
);
2666 list_add_tail(&rxp_ptr
->qe
, &rx_mod
->rxp_free_q
);
2667 rx_mod
->rxp_free_count
++;
2670 /* build RXQ queue */
2671 for (index
= 0; index
< BFI_MAX_RXQ
; index
++) {
2672 rxq_ptr
= &rx_mod
->rxq
[index
];
2673 rxq_ptr
->rxq_id
= index
;
2675 bfa_q_qe_init(&rxq_ptr
->qe
);
2676 list_add_tail(&rxq_ptr
->qe
, &rx_mod
->rxq_free_q
);
2677 rx_mod
->rxq_free_count
++;
2680 rx_mod
->rx_stop_wc
.wc_resume
= bna_rx_mod_cb_rx_stopped_all
;
2681 rx_mod
->rx_stop_wc
.wc_cbarg
= rx_mod
;
2682 rx_mod
->rx_stop_wc
.wc_count
= 0;
2686 bna_rx_mod_uninit(struct bna_rx_mod
*rx_mod
)
2688 struct list_head
*qe
;
2692 list_for_each(qe
, &rx_mod
->rx_free_q
)
2696 list_for_each(qe
, &rx_mod
->rxp_free_q
)
2700 list_for_each(qe
, &rx_mod
->rxq_free_q
)
2707 bna_rx_state_get(struct bna_rx
*rx
)
2709 return bfa_sm_to_state(rx_sm_table
, rx
->fsm
);
2713 bna_rx_res_req(struct bna_rx_config
*q_cfg
, struct bna_res_info
*res_info
)
2715 u32 cq_size
, hq_size
, dq_size
;
2716 u32 cpage_count
, hpage_count
, dpage_count
;
2717 struct bna_mem_info
*mem_info
;
2722 dq_depth
= q_cfg
->q_depth
;
2723 hq_depth
= ((q_cfg
->rxp_type
== BNA_RXP_SINGLE
) ? 0 : q_cfg
->q_depth
);
2724 cq_depth
= dq_depth
+ hq_depth
;
2726 BNA_TO_POWER_OF_2_HIGH(cq_depth
);
2727 cq_size
= cq_depth
* BFI_CQ_WI_SIZE
;
2728 cq_size
= ALIGN(cq_size
, PAGE_SIZE
);
2729 cpage_count
= SIZE_TO_PAGES(cq_size
);
2731 BNA_TO_POWER_OF_2_HIGH(dq_depth
);
2732 dq_size
= dq_depth
* BFI_RXQ_WI_SIZE
;
2733 dq_size
= ALIGN(dq_size
, PAGE_SIZE
);
2734 dpage_count
= SIZE_TO_PAGES(dq_size
);
2736 if (BNA_RXP_SINGLE
!= q_cfg
->rxp_type
) {
2737 BNA_TO_POWER_OF_2_HIGH(hq_depth
);
2738 hq_size
= hq_depth
* BFI_RXQ_WI_SIZE
;
2739 hq_size
= ALIGN(hq_size
, PAGE_SIZE
);
2740 hpage_count
= SIZE_TO_PAGES(hq_size
);
2745 /* CCB structures */
2746 res_info
[BNA_RX_RES_MEM_T_CCB
].res_type
= BNA_RES_T_MEM
;
2747 mem_info
= &res_info
[BNA_RX_RES_MEM_T_CCB
].res_u
.mem_info
;
2748 mem_info
->mem_type
= BNA_MEM_T_KVA
;
2749 mem_info
->len
= sizeof(struct bna_ccb
);
2750 mem_info
->num
= q_cfg
->num_paths
;
2752 /* RCB structures */
2753 res_info
[BNA_RX_RES_MEM_T_RCB
].res_type
= BNA_RES_T_MEM
;
2754 mem_info
= &res_info
[BNA_RX_RES_MEM_T_RCB
].res_u
.mem_info
;
2755 mem_info
->mem_type
= BNA_MEM_T_KVA
;
2756 mem_info
->len
= sizeof(struct bna_rcb
);
2757 mem_info
->num
= BNA_GET_RXQS(q_cfg
);
2759 /* Completion QPT */
2760 res_info
[BNA_RX_RES_MEM_T_CQPT
].res_type
= BNA_RES_T_MEM
;
2761 mem_info
= &res_info
[BNA_RX_RES_MEM_T_CQPT
].res_u
.mem_info
;
2762 mem_info
->mem_type
= BNA_MEM_T_DMA
;
2763 mem_info
->len
= cpage_count
* sizeof(struct bna_dma_addr
);
2764 mem_info
->num
= q_cfg
->num_paths
;
2766 /* Completion s/w QPT */
2767 res_info
[BNA_RX_RES_MEM_T_CSWQPT
].res_type
= BNA_RES_T_MEM
;
2768 mem_info
= &res_info
[BNA_RX_RES_MEM_T_CSWQPT
].res_u
.mem_info
;
2769 mem_info
->mem_type
= BNA_MEM_T_KVA
;
2770 mem_info
->len
= cpage_count
* sizeof(void *);
2771 mem_info
->num
= q_cfg
->num_paths
;
2773 /* Completion QPT pages */
2774 res_info
[BNA_RX_RES_MEM_T_CQPT_PAGE
].res_type
= BNA_RES_T_MEM
;
2775 mem_info
= &res_info
[BNA_RX_RES_MEM_T_CQPT_PAGE
].res_u
.mem_info
;
2776 mem_info
->mem_type
= BNA_MEM_T_DMA
;
2777 mem_info
->len
= PAGE_SIZE
;
2778 mem_info
->num
= cpage_count
* q_cfg
->num_paths
;
2781 res_info
[BNA_RX_RES_MEM_T_DQPT
].res_type
= BNA_RES_T_MEM
;
2782 mem_info
= &res_info
[BNA_RX_RES_MEM_T_DQPT
].res_u
.mem_info
;
2783 mem_info
->mem_type
= BNA_MEM_T_DMA
;
2784 mem_info
->len
= dpage_count
* sizeof(struct bna_dma_addr
);
2785 mem_info
->num
= q_cfg
->num_paths
;
2788 res_info
[BNA_RX_RES_MEM_T_DSWQPT
].res_type
= BNA_RES_T_MEM
;
2789 mem_info
= &res_info
[BNA_RX_RES_MEM_T_DSWQPT
].res_u
.mem_info
;
2790 mem_info
->mem_type
= BNA_MEM_T_KVA
;
2791 mem_info
->len
= dpage_count
* sizeof(void *);
2792 mem_info
->num
= q_cfg
->num_paths
;
2794 /* Data QPT pages */
2795 res_info
[BNA_RX_RES_MEM_T_DPAGE
].res_type
= BNA_RES_T_MEM
;
2796 mem_info
= &res_info
[BNA_RX_RES_MEM_T_DPAGE
].res_u
.mem_info
;
2797 mem_info
->mem_type
= BNA_MEM_T_DMA
;
2798 mem_info
->len
= PAGE_SIZE
;
2799 mem_info
->num
= dpage_count
* q_cfg
->num_paths
;
2802 res_info
[BNA_RX_RES_MEM_T_HQPT
].res_type
= BNA_RES_T_MEM
;
2803 mem_info
= &res_info
[BNA_RX_RES_MEM_T_HQPT
].res_u
.mem_info
;
2804 mem_info
->mem_type
= BNA_MEM_T_DMA
;
2805 mem_info
->len
= hpage_count
* sizeof(struct bna_dma_addr
);
2806 mem_info
->num
= (hpage_count
? q_cfg
->num_paths
: 0);
2809 res_info
[BNA_RX_RES_MEM_T_HSWQPT
].res_type
= BNA_RES_T_MEM
;
2810 mem_info
= &res_info
[BNA_RX_RES_MEM_T_HSWQPT
].res_u
.mem_info
;
2811 mem_info
->mem_type
= BNA_MEM_T_KVA
;
2812 mem_info
->len
= hpage_count
* sizeof(void *);
2813 mem_info
->num
= (hpage_count
? q_cfg
->num_paths
: 0);
2816 res_info
[BNA_RX_RES_MEM_T_HPAGE
].res_type
= BNA_RES_T_MEM
;
2817 mem_info
= &res_info
[BNA_RX_RES_MEM_T_HPAGE
].res_u
.mem_info
;
2818 mem_info
->mem_type
= BNA_MEM_T_DMA
;
2819 mem_info
->len
= (hpage_count
? PAGE_SIZE
: 0);
2820 mem_info
->num
= (hpage_count
? (hpage_count
* q_cfg
->num_paths
) : 0);
2823 res_info
[BNA_RX_RES_T_INTR
].res_type
= BNA_RES_T_INTR
;
2824 res_info
[BNA_RX_RES_T_INTR
].res_u
.intr_info
.intr_type
= BNA_INTR_T_MSIX
;
2825 res_info
[BNA_RX_RES_T_INTR
].res_u
.intr_info
.num
= q_cfg
->num_paths
;
2829 bna_rx_create(struct bna
*bna
, struct bnad
*bnad
,
2830 struct bna_rx_config
*rx_cfg
,
2831 struct bna_rx_event_cbfn
*rx_cbfn
,
2832 struct bna_res_info
*res_info
,
2835 struct bna_rx_mod
*rx_mod
= &bna
->rx_mod
;
2837 struct bna_rxp
*rxp
;
2840 struct bna_intr_info
*intr_info
;
2842 struct bna_mem_descr
*ccb_mem
;
2843 struct bna_mem_descr
*rcb_mem
;
2844 struct bna_mem_descr
*unmapq_mem
;
2845 struct bna_mem_descr
*cqpt_mem
;
2846 struct bna_mem_descr
*cswqpt_mem
;
2847 struct bna_mem_descr
*cpage_mem
;
2848 struct bna_mem_descr
*hqpt_mem
; /* Header/Small Q qpt */
2849 struct bna_mem_descr
*dqpt_mem
; /* Data/Large Q qpt */
2850 struct bna_mem_descr
*hsqpt_mem
; /* s/w qpt for hdr */
2851 struct bna_mem_descr
*dsqpt_mem
; /* s/w qpt for data */
2852 struct bna_mem_descr
*hpage_mem
; /* hdr page mem */
2853 struct bna_mem_descr
*dpage_mem
; /* data page mem */
2854 int i
, cpage_idx
= 0, dpage_idx
= 0, hpage_idx
= 0, ret
;
2855 int dpage_count
, hpage_count
, rcb_idx
;
2856 struct bna_ib_config ibcfg
;
2857 /* Fail if we don't have enough RXPs, RXQs */
2858 if (!_rx_can_satisfy(rx_mod
, rx_cfg
))
2861 /* Initialize resource pointers */
2862 intr_info
= &res_info
[BNA_RX_RES_T_INTR
].res_u
.intr_info
;
2863 ccb_mem
= &res_info
[BNA_RX_RES_MEM_T_CCB
].res_u
.mem_info
.mdl
[0];
2864 rcb_mem
= &res_info
[BNA_RX_RES_MEM_T_RCB
].res_u
.mem_info
.mdl
[0];
2865 unmapq_mem
= &res_info
[BNA_RX_RES_MEM_T_UNMAPQ
].res_u
.mem_info
.mdl
[0];
2866 cqpt_mem
= &res_info
[BNA_RX_RES_MEM_T_CQPT
].res_u
.mem_info
.mdl
[0];
2867 cswqpt_mem
= &res_info
[BNA_RX_RES_MEM_T_CSWQPT
].res_u
.mem_info
.mdl
[0];
2868 cpage_mem
= &res_info
[BNA_RX_RES_MEM_T_CQPT_PAGE
].res_u
.mem_info
.mdl
[0];
2869 hqpt_mem
= &res_info
[BNA_RX_RES_MEM_T_HQPT
].res_u
.mem_info
.mdl
[0];
2870 dqpt_mem
= &res_info
[BNA_RX_RES_MEM_T_DQPT
].res_u
.mem_info
.mdl
[0];
2871 hsqpt_mem
= &res_info
[BNA_RX_RES_MEM_T_HSWQPT
].res_u
.mem_info
.mdl
[0];
2872 dsqpt_mem
= &res_info
[BNA_RX_RES_MEM_T_DSWQPT
].res_u
.mem_info
.mdl
[0];
2873 hpage_mem
= &res_info
[BNA_RX_RES_MEM_T_HPAGE
].res_u
.mem_info
.mdl
[0];
2874 dpage_mem
= &res_info
[BNA_RX_RES_MEM_T_DPAGE
].res_u
.mem_info
.mdl
[0];
2876 /* Compute q depth & page count */
2877 page_count
= res_info
[BNA_RX_RES_MEM_T_CQPT_PAGE
].res_u
.mem_info
.num
/
2880 dpage_count
= res_info
[BNA_RX_RES_MEM_T_DPAGE
].res_u
.mem_info
.num
/
2883 hpage_count
= res_info
[BNA_RX_RES_MEM_T_HPAGE
].res_u
.mem_info
.num
/
2885 /* Get RX pointer */
2886 rx
= _get_free_rx(rx_mod
);
2889 rx
->type
= rx_cfg
->rx_type
;
2891 rx
->rcb_setup_cbfn
= rx_cbfn
->rcb_setup_cbfn
;
2892 rx
->rcb_destroy_cbfn
= rx_cbfn
->rcb_destroy_cbfn
;
2893 rx
->ccb_setup_cbfn
= rx_cbfn
->ccb_setup_cbfn
;
2894 rx
->ccb_destroy_cbfn
= rx_cbfn
->ccb_destroy_cbfn
;
2895 /* Following callbacks are mandatory */
2896 rx
->rx_cleanup_cbfn
= rx_cbfn
->rx_cleanup_cbfn
;
2897 rx
->rx_post_cbfn
= rx_cbfn
->rx_post_cbfn
;
2899 if (rx
->bna
->rx_mod
.flags
& BNA_RX_MOD_F_PORT_STARTED
) {
2901 case BNA_RX_T_REGULAR
:
2902 if (!(rx
->bna
->rx_mod
.flags
&
2903 BNA_RX_MOD_F_PORT_LOOPBACK
))
2904 rx
->rx_flags
|= BNA_RX_F_PORT_ENABLED
;
2906 case BNA_RX_T_LOOPBACK
:
2907 if (rx
->bna
->rx_mod
.flags
& BNA_RX_MOD_F_PORT_LOOPBACK
)
2908 rx
->rx_flags
|= BNA_RX_F_PORT_ENABLED
;
2913 for (i
= 0, rcb_idx
= 0; i
< rx_cfg
->num_paths
; i
++) {
2914 rxp
= _get_free_rxp(rx_mod
);
2915 rxp
->type
= rx_cfg
->rxp_type
;
2919 /* Get required RXQs, and queue them to rx-path */
2920 q0
= _get_free_rxq(rx_mod
);
2921 if (BNA_RXP_SINGLE
== rx_cfg
->rxp_type
)
2924 q1
= _get_free_rxq(rx_mod
);
2927 if (1 == intr_info
->num
) {
2928 rxp
->cq
.ib
= bna_ib_get(&bna
->ib_mod
,
2929 intr_info
->intr_type
,
2930 intr_info
->idl
[0].vector
);
2931 rxp
->vector
= intr_info
->idl
[0].vector
;
2933 rxp
->cq
.ib
= bna_ib_get(&bna
->ib_mod
,
2934 intr_info
->intr_type
,
2935 intr_info
->idl
[i
].vector
);
2937 /* Map the MSI-x vector used for this RXP */
2938 rxp
->vector
= intr_info
->idl
[i
].vector
;
2941 rxp
->cq
.ib_seg_offset
= bna_ib_reserve_idx(rxp
->cq
.ib
);
2943 ibcfg
.coalescing_timeo
= BFI_RX_COALESCING_TIMEO
;
2944 ibcfg
.interpkt_count
= BFI_RX_INTERPKT_COUNT
;
2945 ibcfg
.interpkt_timeo
= BFI_RX_INTERPKT_TIMEO
;
2946 ibcfg
.ctrl_flags
= BFI_IB_CF_INT_ENABLE
;
2948 ret
= bna_ib_config(rxp
->cq
.ib
, &ibcfg
);
2950 /* Link rxqs to rxp */
2951 _rxp_add_rxqs(rxp
, q0
, q1
);
2953 /* Link rxp to rx */
2954 _rx_add_rxp(rx
, rxp
);
2959 /* Initialize RCB for the large / data q */
2960 q0
->rcb
= (struct bna_rcb
*) rcb_mem
[rcb_idx
].kva
;
2961 RXQ_RCB_INIT(q0
, rxp
, rx_cfg
->q_depth
, bna
, 0,
2962 (void *)unmapq_mem
[rcb_idx
].kva
);
2964 (q0
)->rx_packets
= (q0
)->rx_bytes
= 0;
2965 (q0
)->rx_packets_with_error
= (q0
)->rxbuf_alloc_failed
= 0;
2967 /* Initialize RXQs */
2968 _rxq_qpt_init(q0
, rxp
, dpage_count
, PAGE_SIZE
,
2969 &dqpt_mem
[i
], &dsqpt_mem
[i
], &dpage_mem
[dpage_idx
]);
2970 q0
->rcb
->page_idx
= dpage_idx
;
2971 q0
->rcb
->page_count
= dpage_count
;
2972 dpage_idx
+= dpage_count
;
2974 /* Call bnad to complete rcb setup */
2975 if (rx
->rcb_setup_cbfn
)
2976 rx
->rcb_setup_cbfn(bnad
, q0
->rcb
);
2982 q1
->rcb
= (struct bna_rcb
*) rcb_mem
[rcb_idx
].kva
;
2983 RXQ_RCB_INIT(q1
, rxp
, rx_cfg
->q_depth
, bna
, 1,
2984 (void *)unmapq_mem
[rcb_idx
].kva
);
2986 (q1
)->buffer_size
= (rx_cfg
)->small_buff_size
;
2987 (q1
)->rx_packets
= (q1
)->rx_bytes
= 0;
2988 (q1
)->rx_packets_with_error
=
2989 (q1
)->rxbuf_alloc_failed
= 0;
2991 _rxq_qpt_init(q1
, rxp
, hpage_count
, PAGE_SIZE
,
2992 &hqpt_mem
[i
], &hsqpt_mem
[i
],
2993 &hpage_mem
[hpage_idx
]);
2994 q1
->rcb
->page_idx
= hpage_idx
;
2995 q1
->rcb
->page_count
= hpage_count
;
2996 hpage_idx
+= hpage_count
;
2998 /* Call bnad to complete rcb setup */
2999 if (rx
->rcb_setup_cbfn
)
3000 rx
->rcb_setup_cbfn(bnad
, q1
->rcb
);
3003 rxp
->cq
.ccb
= (struct bna_ccb
*) ccb_mem
[i
].kva
;
3004 _rxp_cqpt_setup(rxp
, page_count
, PAGE_SIZE
,
3005 &cqpt_mem
[i
], &cswqpt_mem
[i
], &cpage_mem
[cpage_idx
]);
3006 rxp
->cq
.ccb
->page_idx
= cpage_idx
;
3007 rxp
->cq
.ccb
->page_count
= page_count
;
3008 cpage_idx
+= page_count
;
3010 rxp
->cq
.ccb
->pkt_rate
.small_pkt_cnt
= 0;
3011 rxp
->cq
.ccb
->pkt_rate
.large_pkt_cnt
= 0;
3013 rxp
->cq
.ccb
->producer_index
= 0;
3014 rxp
->cq
.ccb
->q_depth
= rx_cfg
->q_depth
+
3015 ((rx_cfg
->rxp_type
== BNA_RXP_SINGLE
) ?
3016 0 : rx_cfg
->q_depth
);
3017 rxp
->cq
.ccb
->i_dbell
= &rxp
->cq
.ib
->door_bell
;
3018 rxp
->cq
.ccb
->rcb
[0] = q0
->rcb
;
3020 rxp
->cq
.ccb
->rcb
[1] = q1
->rcb
;
3021 rxp
->cq
.ccb
->cq
= &rxp
->cq
;
3022 rxp
->cq
.ccb
->bnad
= bna
->bnad
;
3023 rxp
->cq
.ccb
->hw_producer_index
=
3024 ((volatile u32
*)rxp
->cq
.ib
->ib_seg_host_addr_kva
+
3025 (rxp
->cq
.ib_seg_offset
* BFI_IBIDX_SIZE
));
3026 *(rxp
->cq
.ccb
->hw_producer_index
) = 0;
3027 rxp
->cq
.ccb
->intr_type
= intr_info
->intr_type
;
3028 rxp
->cq
.ccb
->intr_vector
= (intr_info
->num
== 1) ?
3029 intr_info
->idl
[0].vector
:
3030 intr_info
->idl
[i
].vector
;
3031 rxp
->cq
.ccb
->rx_coalescing_timeo
=
3032 rxp
->cq
.ib
->ib_config
.coalescing_timeo
;
3033 rxp
->cq
.ccb
->id
= i
;
3035 /* Call bnad to complete CCB setup */
3036 if (rx
->ccb_setup_cbfn
)
3037 rx
->ccb_setup_cbfn(bnad
, rxp
->cq
.ccb
);
3039 } /* for each rx-path */
3041 bna_rxf_init(&rx
->rxf
, rx
, rx_cfg
);
3043 bfa_fsm_set_state(rx
, bna_rx_sm_stopped
);
3049 bna_rx_destroy(struct bna_rx
*rx
)
3051 struct bna_rx_mod
*rx_mod
= &rx
->bna
->rx_mod
;
3052 struct bna_ib_mod
*ib_mod
= &rx
->bna
->ib_mod
;
3053 struct bna_rxq
*q0
= NULL
;
3054 struct bna_rxq
*q1
= NULL
;
3055 struct bna_rxp
*rxp
;
3056 struct list_head
*qe
;
3058 bna_rxf_uninit(&rx
->rxf
);
3060 while (!list_empty(&rx
->rxp_q
)) {
3061 bfa_q_deq(&rx
->rxp_q
, &rxp
);
3062 GET_RXQS(rxp
, q0
, q1
);
3063 /* Callback to bnad for destroying RCB */
3064 if (rx
->rcb_destroy_cbfn
)
3065 rx
->rcb_destroy_cbfn(rx
->bna
->bnad
, q0
->rcb
);
3069 _put_free_rxq(rx_mod
, q0
);
3071 /* Callback to bnad for destroying RCB */
3072 if (rx
->rcb_destroy_cbfn
)
3073 rx
->rcb_destroy_cbfn(rx
->bna
->bnad
, q1
->rcb
);
3077 _put_free_rxq(rx_mod
, q1
);
3079 rxp
->rxq
.slr
.large
= NULL
;
3080 rxp
->rxq
.slr
.small
= NULL
;
3082 if (rxp
->cq
.ib_seg_offset
!= 0xff)
3083 bna_ib_release_idx(rxp
->cq
.ib
,
3084 rxp
->cq
.ib_seg_offset
);
3085 bna_ib_put(ib_mod
, rxp
->cq
.ib
);
3088 /* Callback to bnad for destroying CCB */
3089 if (rx
->ccb_destroy_cbfn
)
3090 rx
->ccb_destroy_cbfn(rx
->bna
->bnad
, rxp
->cq
.ccb
);
3093 _put_free_rxp(rx_mod
, rxp
);
3096 list_for_each(qe
, &rx_mod
->rx_active_q
) {
3097 if (qe
== &rx
->qe
) {
3099 bfa_q_qe_init(&rx
->qe
);
3106 _put_free_rx(rx_mod
, rx
);
3110 bna_rx_enable(struct bna_rx
*rx
)
3112 if (rx
->fsm
!= (bfa_sm_t
)bna_rx_sm_stopped
)
3115 rx
->rx_flags
|= BNA_RX_F_ENABLE
;
3116 if (rx
->rx_flags
& BNA_RX_F_PORT_ENABLED
)
3117 bfa_fsm_send_event(rx
, RX_E_START
);
3121 bna_rx_disable(struct bna_rx
*rx
, enum bna_cleanup_type type
,
3122 void (*cbfn
)(void *, struct bna_rx
*,
3123 enum bna_cb_status
))
3125 if (type
== BNA_SOFT_CLEANUP
) {
3126 /* h/w should not be accessed. Treat we're stopped */
3127 (*cbfn
)(rx
->bna
->bnad
, rx
, BNA_CB_SUCCESS
);
3129 rx
->stop_cbfn
= cbfn
;
3130 rx
->stop_cbarg
= rx
->bna
->bnad
;
3132 rx
->rx_flags
&= ~BNA_RX_F_ENABLE
;
3134 bfa_fsm_send_event(rx
, RX_E_STOP
);
3141 #define call_tx_stop_cbfn(tx, status)\
3143 if ((tx)->stop_cbfn)\
3144 (tx)->stop_cbfn((tx)->stop_cbarg, (tx), status);\
3145 (tx)->stop_cbfn = NULL;\
3146 (tx)->stop_cbarg = NULL;\
3149 #define call_tx_prio_change_cbfn(tx, status)\
3151 if ((tx)->prio_change_cbfn)\
3152 (tx)->prio_change_cbfn((tx)->bna->bnad, (tx), status);\
3153 (tx)->prio_change_cbfn = NULL;\
3156 static void bna_tx_mod_cb_tx_stopped(void *tx_mod
, struct bna_tx
*tx
,
3157 enum bna_cb_status status
);
3158 static void bna_tx_cb_txq_stopped(void *arg
, int status
);
3159 static void bna_tx_cb_stats_cleared(void *arg
, int status
);
3160 static void __bna_tx_stop(struct bna_tx
*tx
);
3161 static void __bna_tx_start(struct bna_tx
*tx
);
3162 static void __bna_txf_stat_clr(struct bna_tx
*tx
);
3168 TX_E_TXQ_STOPPED
= 4,
3169 TX_E_PRIO_CHANGE
= 5,
3170 TX_E_STAT_CLEARED
= 6,
3176 BNA_TX_TXQ_STOP_WAIT
= 3,
3177 BNA_TX_PRIO_STOP_WAIT
= 4,
3178 BNA_TX_STAT_CLR_WAIT
= 5,
3181 bfa_fsm_state_decl(bna_tx
, stopped
, struct bna_tx
,
3183 bfa_fsm_state_decl(bna_tx
, started
, struct bna_tx
,
3185 bfa_fsm_state_decl(bna_tx
, txq_stop_wait
, struct bna_tx
,
3187 bfa_fsm_state_decl(bna_tx
, prio_stop_wait
, struct bna_tx
,
3189 bfa_fsm_state_decl(bna_tx
, stat_clr_wait
, struct bna_tx
,
3192 static struct bfa_sm_table tx_sm_table
[] = {
3193 {BFA_SM(bna_tx_sm_stopped
), BNA_TX_STOPPED
},
3194 {BFA_SM(bna_tx_sm_started
), BNA_TX_STARTED
},
3195 {BFA_SM(bna_tx_sm_txq_stop_wait
), BNA_TX_TXQ_STOP_WAIT
},
3196 {BFA_SM(bna_tx_sm_prio_stop_wait
), BNA_TX_PRIO_STOP_WAIT
},
3197 {BFA_SM(bna_tx_sm_stat_clr_wait
), BNA_TX_STAT_CLR_WAIT
},
3201 bna_tx_sm_stopped_entry(struct bna_tx
*tx
)
3203 struct bna_txq
*txq
;
3204 struct list_head
*qe
;
3206 list_for_each(qe
, &tx
->txq_q
) {
3207 txq
= (struct bna_txq
*)qe
;
3208 (tx
->tx_cleanup_cbfn
)(tx
->bna
->bnad
, txq
->tcb
);
3211 call_tx_stop_cbfn(tx
, BNA_CB_SUCCESS
);
3215 bna_tx_sm_stopped(struct bna_tx
*tx
, enum bna_tx_event event
)
3219 bfa_fsm_set_state(tx
, bna_tx_sm_started
);
3223 bfa_fsm_set_state(tx
, bna_tx_sm_stopped
);
3230 case TX_E_PRIO_CHANGE
:
3231 call_tx_prio_change_cbfn(tx
, BNA_CB_SUCCESS
);
3234 case TX_E_TXQ_STOPPED
:
3236 * This event is received due to flushing of mbox when
3243 bfa_sm_fault(tx
->bna
, event
);
3248 bna_tx_sm_started_entry(struct bna_tx
*tx
)
3250 struct bna_txq
*txq
;
3251 struct list_head
*qe
;
3256 list_for_each(qe
, &tx
->txq_q
) {
3257 txq
= (struct bna_txq
*)qe
;
3258 bna_ib_ack(&txq
->ib
->door_bell
, 0);
3263 bna_tx_sm_started(struct bna_tx
*tx
, enum bna_tx_event event
)
3265 struct bna_txq
*txq
;
3266 struct list_head
*qe
;
3270 bfa_fsm_set_state(tx
, bna_tx_sm_txq_stop_wait
);
3275 list_for_each(qe
, &tx
->txq_q
) {
3276 txq
= (struct bna_txq
*)qe
;
3277 bna_ib_fail(txq
->ib
);
3278 (tx
->tx_stall_cbfn
)(tx
->bna
->bnad
, txq
->tcb
);
3280 bfa_fsm_set_state(tx
, bna_tx_sm_stopped
);
3283 case TX_E_PRIO_CHANGE
:
3284 bfa_fsm_set_state(tx
, bna_tx_sm_prio_stop_wait
);
3288 bfa_sm_fault(tx
->bna
, event
);
3293 bna_tx_sm_txq_stop_wait_entry(struct bna_tx
*tx
)
3298 bna_tx_sm_txq_stop_wait(struct bna_tx
*tx
, enum bna_tx_event event
)
3300 struct bna_txq
*txq
;
3301 struct list_head
*qe
;
3305 bfa_fsm_set_state(tx
, bna_tx_sm_stopped
);
3308 case TX_E_TXQ_STOPPED
:
3309 list_for_each(qe
, &tx
->txq_q
) {
3310 txq
= (struct bna_txq
*)qe
;
3311 bna_ib_stop(txq
->ib
);
3313 bfa_fsm_set_state(tx
, bna_tx_sm_stat_clr_wait
);
3316 case TX_E_PRIO_CHANGE
:
3321 bfa_sm_fault(tx
->bna
, event
);
3326 bna_tx_sm_prio_stop_wait_entry(struct bna_tx
*tx
)
3332 bna_tx_sm_prio_stop_wait(struct bna_tx
*tx
, enum bna_tx_event event
)
3334 struct bna_txq
*txq
;
3335 struct list_head
*qe
;
3339 bfa_fsm_set_state(tx
, bna_tx_sm_txq_stop_wait
);
3343 call_tx_prio_change_cbfn(tx
, BNA_CB_FAIL
);
3344 bfa_fsm_set_state(tx
, bna_tx_sm_stopped
);
3347 case TX_E_TXQ_STOPPED
:
3348 list_for_each(qe
, &tx
->txq_q
) {
3349 txq
= (struct bna_txq
*)qe
;
3350 bna_ib_stop(txq
->ib
);
3351 (tx
->tx_cleanup_cbfn
)(tx
->bna
->bnad
, txq
->tcb
);
3353 call_tx_prio_change_cbfn(tx
, BNA_CB_SUCCESS
);
3354 bfa_fsm_set_state(tx
, bna_tx_sm_started
);
3357 case TX_E_PRIO_CHANGE
:
3362 bfa_sm_fault(tx
->bna
, event
);
3367 bna_tx_sm_stat_clr_wait_entry(struct bna_tx
*tx
)
3369 __bna_txf_stat_clr(tx
);
3373 bna_tx_sm_stat_clr_wait(struct bna_tx
*tx
, enum bna_tx_event event
)
3377 case TX_E_STAT_CLEARED
:
3378 bfa_fsm_set_state(tx
, bna_tx_sm_stopped
);
3382 bfa_sm_fault(tx
->bna
, event
);
3387 __bna_txq_start(struct bna_tx
*tx
, struct bna_txq
*txq
)
3389 struct bna_rxtx_q_mem
*q_mem
;
3390 struct bna_txq_mem txq_cfg
;
3391 struct bna_txq_mem
*txq_mem
;
3392 struct bna_dma_addr cur_q_addr
;
3394 void __iomem
*base_addr
;
3397 /* Fill out structure, to be subsequently written to hardware */
3398 txq_cfg
.pg_tbl_addr_lo
= txq
->qpt
.hw_qpt_ptr
.lsb
;
3399 txq_cfg
.pg_tbl_addr_hi
= txq
->qpt
.hw_qpt_ptr
.msb
;
3400 cur_q_addr
= *((struct bna_dma_addr
*)(txq
->qpt
.kv_qpt_ptr
));
3401 txq_cfg
.cur_q_entry_lo
= cur_q_addr
.lsb
;
3402 txq_cfg
.cur_q_entry_hi
= cur_q_addr
.msb
;
3404 txq_cfg
.pg_cnt_n_prd_ptr
= (txq
->qpt
.page_count
<< 16) | 0x0;
3406 txq_cfg
.entry_n_pg_size
= ((u32
)(BFI_TXQ_WI_SIZE
>> 2) << 16) |
3407 (txq
->qpt
.page_size
>> 2);
3408 txq_cfg
.int_blk_n_cns_ptr
= ((((u32
)txq
->ib_seg_offset
) << 24) |
3409 ((u32
)(txq
->ib
->ib_id
& 0xff) << 16) | 0x0);
3411 txq_cfg
.cns_ptr2_n_q_state
= BNA_Q_IDLE_STATE
;
3412 txq_cfg
.nxt_qid_n_fid_n_pri
= (((tx
->txf
.txf_id
& 0x3f) << 3) |
3413 (txq
->priority
& 0x3));
3414 txq_cfg
.wvc_n_cquota_n_rquota
=
3415 ((((u32
)BFI_TX_MAX_WRR_QUOTA
& 0xfff) << 12) |
3416 (BFI_TX_MAX_WRR_QUOTA
& 0xfff));
3418 /* Setup the page and write to H/W */
3420 pg_num
= BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM
+ tx
->bna
->port_num
,
3421 HQM_RXTX_Q_RAM_BASE_OFFSET
);
3422 writel(pg_num
, tx
->bna
->regs
.page_addr
);
3424 base_addr
= BNA_GET_MEM_BASE_ADDR(tx
->bna
->pcidev
.pci_bar_kva
,
3425 HQM_RXTX_Q_RAM_BASE_OFFSET
);
3426 q_mem
= (struct bna_rxtx_q_mem
*)0;
3427 txq_mem
= &q_mem
[txq
->txq_id
].txq
;
3430 * The following 4 lines, is a hack b'cos the H/W needs to read
3431 * these DMA addresses as little endian
3434 off
= (unsigned long)&txq_mem
->pg_tbl_addr_lo
;
3435 writel(htonl(txq_cfg
.pg_tbl_addr_lo
), base_addr
+ off
);
3437 off
= (unsigned long)&txq_mem
->pg_tbl_addr_hi
;
3438 writel(htonl(txq_cfg
.pg_tbl_addr_hi
), base_addr
+ off
);
3440 off
= (unsigned long)&txq_mem
->cur_q_entry_lo
;
3441 writel(htonl(txq_cfg
.cur_q_entry_lo
), base_addr
+ off
);
3443 off
= (unsigned long)&txq_mem
->cur_q_entry_hi
;
3444 writel(htonl(txq_cfg
.cur_q_entry_hi
), base_addr
+ off
);
3446 off
= (unsigned long)&txq_mem
->pg_cnt_n_prd_ptr
;
3447 writel(txq_cfg
.pg_cnt_n_prd_ptr
, base_addr
+ off
);
3449 off
= (unsigned long)&txq_mem
->entry_n_pg_size
;
3450 writel(txq_cfg
.entry_n_pg_size
, base_addr
+ off
);
3452 off
= (unsigned long)&txq_mem
->int_blk_n_cns_ptr
;
3453 writel(txq_cfg
.int_blk_n_cns_ptr
, base_addr
+ off
);
3455 off
= (unsigned long)&txq_mem
->cns_ptr2_n_q_state
;
3456 writel(txq_cfg
.cns_ptr2_n_q_state
, base_addr
+ off
);
3458 off
= (unsigned long)&txq_mem
->nxt_qid_n_fid_n_pri
;
3459 writel(txq_cfg
.nxt_qid_n_fid_n_pri
, base_addr
+ off
);
3461 off
= (unsigned long)&txq_mem
->wvc_n_cquota_n_rquota
;
3462 writel(txq_cfg
.wvc_n_cquota_n_rquota
, base_addr
+ off
);
3464 txq
->tcb
->producer_index
= 0;
3465 txq
->tcb
->consumer_index
= 0;
3466 *(txq
->tcb
->hw_consumer_index
) = 0;
3471 __bna_txq_stop(struct bna_tx
*tx
, struct bna_txq
*txq
)
3473 struct bfi_ll_q_stop_req ll_req
;
3474 u32 bit_mask
[2] = {0, 0};
3475 if (txq
->txq_id
< 32)
3476 bit_mask
[0] = (u32
)1 << txq
->txq_id
;
3478 bit_mask
[1] = (u32
)1 << (txq
->txq_id
- 32);
3480 memset(&ll_req
, 0, sizeof(ll_req
));
3481 ll_req
.mh
.msg_class
= BFI_MC_LL
;
3482 ll_req
.mh
.msg_id
= BFI_LL_H2I_TXQ_STOP_REQ
;
3483 ll_req
.mh
.mtag
.h2i
.lpu_id
= 0;
3484 ll_req
.q_id_mask
[0] = htonl(bit_mask
[0]);
3485 ll_req
.q_id_mask
[1] = htonl(bit_mask
[1]);
3487 bna_mbox_qe_fill(&tx
->mbox_qe
, &ll_req
, sizeof(ll_req
),
3488 bna_tx_cb_txq_stopped
, tx
);
3490 bna_mbox_send(tx
->bna
, &tx
->mbox_qe
);
3494 __bna_txf_start(struct bna_tx
*tx
)
3496 struct bna_tx_fndb_ram
*tx_fndb
;
3497 struct bna_txf
*txf
= &tx
->txf
;
3498 void __iomem
*base_addr
;
3501 writel(BNA_GET_PAGE_NUM(LUT0_MEM_BLK_BASE_PG_NUM
+
3502 (tx
->bna
->port_num
* 2), TX_FNDB_RAM_BASE_OFFSET
),
3503 tx
->bna
->regs
.page_addr
);
3505 base_addr
= BNA_GET_MEM_BASE_ADDR(tx
->bna
->pcidev
.pci_bar_kva
,
3506 TX_FNDB_RAM_BASE_OFFSET
);
3508 tx_fndb
= (struct bna_tx_fndb_ram
*)0;
3509 off
= (unsigned long)&tx_fndb
[txf
->txf_id
].vlan_n_ctrl_flags
;
3511 writel(((u32
)txf
->vlan
<< 16) | txf
->ctrl_flags
,
3514 if (tx
->txf
.txf_id
< 32)
3515 tx
->bna
->tx_mod
.txf_bmap
[0] |= ((u32
)1 << tx
->txf
.txf_id
);
3517 tx
->bna
->tx_mod
.txf_bmap
[1] |= ((u32
)
3518 1 << (tx
->txf
.txf_id
- 32));
3522 __bna_txf_stop(struct bna_tx
*tx
)
3524 struct bna_tx_fndb_ram
*tx_fndb
;
3527 struct bna_txf
*txf
= &tx
->txf
;
3528 void __iomem
*base_addr
;
3531 /* retrieve the running txf_flags & turn off enable bit */
3532 page_num
= BNA_GET_PAGE_NUM(LUT0_MEM_BLK_BASE_PG_NUM
+
3533 (tx
->bna
->port_num
* 2), TX_FNDB_RAM_BASE_OFFSET
);
3534 writel(page_num
, tx
->bna
->regs
.page_addr
);
3536 base_addr
= BNA_GET_MEM_BASE_ADDR(tx
->bna
->pcidev
.pci_bar_kva
,
3537 TX_FNDB_RAM_BASE_OFFSET
);
3538 tx_fndb
= (struct bna_tx_fndb_ram
*)0;
3539 off
= (unsigned long)&tx_fndb
[txf
->txf_id
].vlan_n_ctrl_flags
;
3541 ctl_flags
= readl(base_addr
+ off
);
3542 ctl_flags
&= ~BFI_TXF_CF_ENABLE
;
3544 writel(ctl_flags
, base_addr
+ off
);
3546 if (tx
->txf
.txf_id
< 32)
3547 tx
->bna
->tx_mod
.txf_bmap
[0] &= ~((u32
)1 << tx
->txf
.txf_id
);
3549 tx
->bna
->tx_mod
.txf_bmap
[0] &= ~((u32
)
3550 1 << (tx
->txf
.txf_id
- 32));
3554 __bna_txf_stat_clr(struct bna_tx
*tx
)
3556 struct bfi_ll_stats_req ll_req
;
3557 u32 txf_bmap
[2] = {0, 0};
3558 if (tx
->txf
.txf_id
< 32)
3559 txf_bmap
[0] = ((u32
)1 << tx
->txf
.txf_id
);
3561 txf_bmap
[1] = ((u32
)1 << (tx
->txf
.txf_id
- 32));
3562 bfi_h2i_set(ll_req
.mh
, BFI_MC_LL
, BFI_LL_H2I_STATS_CLEAR_REQ
, 0);
3563 ll_req
.stats_mask
= 0;
3564 ll_req
.rxf_id_mask
[0] = 0;
3565 ll_req
.rxf_id_mask
[1] = 0;
3566 ll_req
.txf_id_mask
[0] = htonl(txf_bmap
[0]);
3567 ll_req
.txf_id_mask
[1] = htonl(txf_bmap
[1]);
3569 bna_mbox_qe_fill(&tx
->mbox_qe
, &ll_req
, sizeof(ll_req
),
3570 bna_tx_cb_stats_cleared
, tx
);
3571 bna_mbox_send(tx
->bna
, &tx
->mbox_qe
);
3575 __bna_tx_start(struct bna_tx
*tx
)
3577 struct bna_txq
*txq
;
3578 struct list_head
*qe
;
3580 list_for_each(qe
, &tx
->txq_q
) {
3581 txq
= (struct bna_txq
*)qe
;
3582 bna_ib_start(txq
->ib
);
3583 __bna_txq_start(tx
, txq
);
3586 __bna_txf_start(tx
);
3588 list_for_each(qe
, &tx
->txq_q
) {
3589 txq
= (struct bna_txq
*)qe
;
3590 txq
->tcb
->priority
= txq
->priority
;
3591 (tx
->tx_resume_cbfn
)(tx
->bna
->bnad
, txq
->tcb
);
3596 __bna_tx_stop(struct bna_tx
*tx
)
3598 struct bna_txq
*txq
;
3599 struct list_head
*qe
;
3601 list_for_each(qe
, &tx
->txq_q
) {
3602 txq
= (struct bna_txq
*)qe
;
3603 (tx
->tx_stall_cbfn
)(tx
->bna
->bnad
, txq
->tcb
);
3608 list_for_each(qe
, &tx
->txq_q
) {
3609 txq
= (struct bna_txq
*)qe
;
3610 bfa_wc_up(&tx
->txq_stop_wc
);
3613 list_for_each(qe
, &tx
->txq_q
) {
3614 txq
= (struct bna_txq
*)qe
;
3615 __bna_txq_stop(tx
, txq
);
3620 bna_txq_qpt_setup(struct bna_txq
*txq
, int page_count
, int page_size
,
3621 struct bna_mem_descr
*qpt_mem
,
3622 struct bna_mem_descr
*swqpt_mem
,
3623 struct bna_mem_descr
*page_mem
)
3627 txq
->qpt
.hw_qpt_ptr
.lsb
= qpt_mem
->dma
.lsb
;
3628 txq
->qpt
.hw_qpt_ptr
.msb
= qpt_mem
->dma
.msb
;
3629 txq
->qpt
.kv_qpt_ptr
= qpt_mem
->kva
;
3630 txq
->qpt
.page_count
= page_count
;
3631 txq
->qpt
.page_size
= page_size
;
3633 txq
->tcb
->sw_qpt
= (void **) swqpt_mem
->kva
;
3635 for (i
= 0; i
< page_count
; i
++) {
3636 txq
->tcb
->sw_qpt
[i
] = page_mem
[i
].kva
;
3638 ((struct bna_dma_addr
*)txq
->qpt
.kv_qpt_ptr
)[i
].lsb
=
3639 page_mem
[i
].dma
.lsb
;
3640 ((struct bna_dma_addr
*)txq
->qpt
.kv_qpt_ptr
)[i
].msb
=
3641 page_mem
[i
].dma
.msb
;
3647 bna_tx_free(struct bna_tx
*tx
)
3649 struct bna_tx_mod
*tx_mod
= &tx
->bna
->tx_mod
;
3650 struct bna_txq
*txq
;
3651 struct bna_ib_mod
*ib_mod
= &tx
->bna
->ib_mod
;
3652 struct list_head
*qe
;
3654 while (!list_empty(&tx
->txq_q
)) {
3655 bfa_q_deq(&tx
->txq_q
, &txq
);
3656 bfa_q_qe_init(&txq
->qe
);
3658 if (txq
->ib_seg_offset
!= -1)
3659 bna_ib_release_idx(txq
->ib
,
3660 txq
->ib_seg_offset
);
3661 bna_ib_put(ib_mod
, txq
->ib
);
3666 list_add_tail(&txq
->qe
, &tx_mod
->txq_free_q
);
3669 list_for_each(qe
, &tx_mod
->tx_active_q
) {
3670 if (qe
== &tx
->qe
) {
3672 bfa_q_qe_init(&tx
->qe
);
3679 list_add_tail(&tx
->qe
, &tx_mod
->tx_free_q
);
3683 bna_tx_cb_txq_stopped(void *arg
, int status
)
3685 struct bna_tx
*tx
= (struct bna_tx
*)arg
;
3687 bfa_q_qe_init(&tx
->mbox_qe
.qe
);
3688 bfa_wc_down(&tx
->txq_stop_wc
);
3692 bna_tx_cb_txq_stopped_all(void *arg
)
3694 struct bna_tx
*tx
= (struct bna_tx
*)arg
;
3696 bfa_fsm_send_event(tx
, TX_E_TXQ_STOPPED
);
3700 bna_tx_cb_stats_cleared(void *arg
, int status
)
3702 struct bna_tx
*tx
= (struct bna_tx
*)arg
;
3704 bfa_q_qe_init(&tx
->mbox_qe
.qe
);
3706 bfa_fsm_send_event(tx
, TX_E_STAT_CLEARED
);
3710 bna_tx_start(struct bna_tx
*tx
)
3712 tx
->flags
|= BNA_TX_F_PORT_STARTED
;
3713 if (tx
->flags
& BNA_TX_F_ENABLED
)
3714 bfa_fsm_send_event(tx
, TX_E_START
);
3718 bna_tx_stop(struct bna_tx
*tx
)
3720 tx
->stop_cbfn
= bna_tx_mod_cb_tx_stopped
;
3721 tx
->stop_cbarg
= &tx
->bna
->tx_mod
;
3723 tx
->flags
&= ~BNA_TX_F_PORT_STARTED
;
3724 bfa_fsm_send_event(tx
, TX_E_STOP
);
3728 bna_tx_fail(struct bna_tx
*tx
)
3730 tx
->flags
&= ~BNA_TX_F_PORT_STARTED
;
3731 bfa_fsm_send_event(tx
, TX_E_FAIL
);
3735 bna_tx_prio_changed(struct bna_tx
*tx
, int prio
)
3737 struct bna_txq
*txq
;
3738 struct list_head
*qe
;
3740 list_for_each(qe
, &tx
->txq_q
) {
3741 txq
= (struct bna_txq
*)qe
;
3742 txq
->priority
= prio
;
3745 bfa_fsm_send_event(tx
, TX_E_PRIO_CHANGE
);
3749 bna_tx_cee_link_status(struct bna_tx
*tx
, int cee_link
)
3752 tx
->flags
|= BNA_TX_F_PRIO_LOCK
;
3754 tx
->flags
&= ~BNA_TX_F_PRIO_LOCK
;
3758 bna_tx_mod_cb_tx_stopped(void *arg
, struct bna_tx
*tx
,
3759 enum bna_cb_status status
)
3761 struct bna_tx_mod
*tx_mod
= (struct bna_tx_mod
*)arg
;
3763 bfa_wc_down(&tx_mod
->tx_stop_wc
);
3767 bna_tx_mod_cb_tx_stopped_all(void *arg
)
3769 struct bna_tx_mod
*tx_mod
= (struct bna_tx_mod
*)arg
;
3771 if (tx_mod
->stop_cbfn
)
3772 tx_mod
->stop_cbfn(&tx_mod
->bna
->port
, BNA_CB_SUCCESS
);
3773 tx_mod
->stop_cbfn
= NULL
;
3777 bna_tx_res_req(int num_txq
, int txq_depth
, struct bna_res_info
*res_info
)
3781 struct bna_mem_info
*mem_info
;
3783 res_info
[BNA_TX_RES_MEM_T_TCB
].res_type
= BNA_RES_T_MEM
;
3784 mem_info
= &res_info
[BNA_TX_RES_MEM_T_TCB
].res_u
.mem_info
;
3785 mem_info
->mem_type
= BNA_MEM_T_KVA
;
3786 mem_info
->len
= sizeof(struct bna_tcb
);
3787 mem_info
->num
= num_txq
;
3789 q_size
= txq_depth
* BFI_TXQ_WI_SIZE
;
3790 q_size
= ALIGN(q_size
, PAGE_SIZE
);
3791 page_count
= q_size
>> PAGE_SHIFT
;
3793 res_info
[BNA_TX_RES_MEM_T_QPT
].res_type
= BNA_RES_T_MEM
;
3794 mem_info
= &res_info
[BNA_TX_RES_MEM_T_QPT
].res_u
.mem_info
;
3795 mem_info
->mem_type
= BNA_MEM_T_DMA
;
3796 mem_info
->len
= page_count
* sizeof(struct bna_dma_addr
);
3797 mem_info
->num
= num_txq
;
3799 res_info
[BNA_TX_RES_MEM_T_SWQPT
].res_type
= BNA_RES_T_MEM
;
3800 mem_info
= &res_info
[BNA_TX_RES_MEM_T_SWQPT
].res_u
.mem_info
;
3801 mem_info
->mem_type
= BNA_MEM_T_KVA
;
3802 mem_info
->len
= page_count
* sizeof(void *);
3803 mem_info
->num
= num_txq
;
3805 res_info
[BNA_TX_RES_MEM_T_PAGE
].res_type
= BNA_RES_T_MEM
;
3806 mem_info
= &res_info
[BNA_TX_RES_MEM_T_PAGE
].res_u
.mem_info
;
3807 mem_info
->mem_type
= BNA_MEM_T_DMA
;
3808 mem_info
->len
= PAGE_SIZE
;
3809 mem_info
->num
= num_txq
* page_count
;
3811 res_info
[BNA_TX_RES_INTR_T_TXCMPL
].res_type
= BNA_RES_T_INTR
;
3812 res_info
[BNA_TX_RES_INTR_T_TXCMPL
].res_u
.intr_info
.intr_type
=
3814 res_info
[BNA_TX_RES_INTR_T_TXCMPL
].res_u
.intr_info
.num
= num_txq
;
3818 bna_tx_create(struct bna
*bna
, struct bnad
*bnad
,
3819 struct bna_tx_config
*tx_cfg
,
3820 struct bna_tx_event_cbfn
*tx_cbfn
,
3821 struct bna_res_info
*res_info
, void *priv
)
3823 struct bna_intr_info
*intr_info
;
3824 struct bna_tx_mod
*tx_mod
= &bna
->tx_mod
;
3826 struct bna_txq
*txq
;
3827 struct list_head
*qe
;
3828 struct bna_ib_mod
*ib_mod
= &bna
->ib_mod
;
3829 struct bna_doorbell_qset
*qset
;
3830 struct bna_ib_config ib_config
;
3837 intr_info
= &res_info
[BNA_TX_RES_INTR_T_TXCMPL
].res_u
.intr_info
;
3838 page_count
= (res_info
[BNA_TX_RES_MEM_T_PAGE
].res_u
.mem_info
.num
) /
3840 page_size
= res_info
[BNA_TX_RES_MEM_T_PAGE
].res_u
.mem_info
.len
;
3846 if ((intr_info
->num
!= 1) && (intr_info
->num
!= tx_cfg
->num_txq
))
3851 if (list_empty(&tx_mod
->tx_free_q
))
3853 bfa_q_deq(&tx_mod
->tx_free_q
, &tx
);
3854 bfa_q_qe_init(&tx
->qe
);
3858 INIT_LIST_HEAD(&tx
->txq_q
);
3859 for (i
= 0; i
< tx_cfg
->num_txq
; i
++) {
3860 if (list_empty(&tx_mod
->txq_free_q
))
3863 bfa_q_deq(&tx_mod
->txq_free_q
, &txq
);
3864 bfa_q_qe_init(&txq
->qe
);
3865 list_add_tail(&txq
->qe
, &tx
->txq_q
);
3867 txq
->ib_seg_offset
= -1;
3873 list_for_each(qe
, &tx
->txq_q
) {
3874 txq
= (struct bna_txq
*)qe
;
3876 if (intr_info
->num
== 1)
3877 txq
->ib
= bna_ib_get(ib_mod
, intr_info
->intr_type
,
3878 intr_info
->idl
[0].vector
);
3880 txq
->ib
= bna_ib_get(ib_mod
, intr_info
->intr_type
,
3881 intr_info
->idl
[i
].vector
);
3883 if (txq
->ib
== NULL
)
3886 txq
->ib_seg_offset
= bna_ib_reserve_idx(txq
->ib
);
3887 if (txq
->ib_seg_offset
== -1)
3899 tx
->tcb_setup_cbfn
= tx_cbfn
->tcb_setup_cbfn
;
3900 tx
->tcb_destroy_cbfn
= tx_cbfn
->tcb_destroy_cbfn
;
3901 /* Following callbacks are mandatory */
3902 tx
->tx_stall_cbfn
= tx_cbfn
->tx_stall_cbfn
;
3903 tx
->tx_resume_cbfn
= tx_cbfn
->tx_resume_cbfn
;
3904 tx
->tx_cleanup_cbfn
= tx_cbfn
->tx_cleanup_cbfn
;
3906 list_add_tail(&tx
->qe
, &tx_mod
->tx_active_q
);
3909 tx
->txq_stop_wc
.wc_resume
= bna_tx_cb_txq_stopped_all
;
3910 tx
->txq_stop_wc
.wc_cbarg
= tx
;
3911 tx
->txq_stop_wc
.wc_count
= 0;
3913 tx
->type
= tx_cfg
->tx_type
;
3916 if (tx
->bna
->tx_mod
.flags
& BNA_TX_MOD_F_PORT_STARTED
) {
3918 case BNA_TX_T_REGULAR
:
3919 if (!(tx
->bna
->tx_mod
.flags
&
3920 BNA_TX_MOD_F_PORT_LOOPBACK
))
3921 tx
->flags
|= BNA_TX_F_PORT_STARTED
;
3923 case BNA_TX_T_LOOPBACK
:
3924 if (tx
->bna
->tx_mod
.flags
& BNA_TX_MOD_F_PORT_LOOPBACK
)
3925 tx
->flags
|= BNA_TX_F_PORT_STARTED
;
3929 if (tx
->bna
->tx_mod
.cee_link
)
3930 tx
->flags
|= BNA_TX_F_PRIO_LOCK
;
3936 list_for_each(qe
, &tx
->txq_q
) {
3937 txq
= (struct bna_txq
*)qe
;
3938 txq
->priority
= tx_mod
->priority
;
3939 txq
->tcb
= (struct bna_tcb
*)
3940 res_info
[BNA_TX_RES_MEM_T_TCB
].res_u
.mem_info
.mdl
[i
].kva
;
3941 txq
->tx_packets
= 0;
3946 ib_config
.coalescing_timeo
= BFI_TX_COALESCING_TIMEO
;
3947 ib_config
.interpkt_timeo
= 0; /* Not used */
3948 ib_config
.interpkt_count
= BFI_TX_INTERPKT_COUNT
;
3949 ib_config
.ctrl_flags
= (BFI_IB_CF_INTER_PKT_DMA
|
3950 BFI_IB_CF_INT_ENABLE
|
3951 BFI_IB_CF_COALESCING_MODE
);
3952 bna_ib_config(txq
->ib
, &ib_config
);
3956 txq
->tcb
->producer_index
= 0;
3957 txq
->tcb
->consumer_index
= 0;
3958 txq
->tcb
->hw_consumer_index
= (volatile u32
*)
3959 ((volatile u8
*)txq
->ib
->ib_seg_host_addr_kva
+
3960 (txq
->ib_seg_offset
* BFI_IBIDX_SIZE
));
3961 *(txq
->tcb
->hw_consumer_index
) = 0;
3962 txq
->tcb
->q_depth
= tx_cfg
->txq_depth
;
3963 txq
->tcb
->unmap_q
= (void *)
3964 res_info
[BNA_TX_RES_MEM_T_UNMAPQ
].res_u
.mem_info
.mdl
[i
].kva
;
3965 qset
= (struct bna_doorbell_qset
*)0;
3966 off
= (unsigned long)&qset
[txq
->txq_id
].txq
[0];
3967 txq
->tcb
->q_dbell
= off
+
3968 BNA_GET_DOORBELL_BASE_ADDR(bna
->pcidev
.pci_bar_kva
);
3969 txq
->tcb
->i_dbell
= &txq
->ib
->door_bell
;
3970 txq
->tcb
->intr_type
= intr_info
->intr_type
;
3971 txq
->tcb
->intr_vector
= (intr_info
->num
== 1) ?
3972 intr_info
->idl
[0].vector
:
3973 intr_info
->idl
[i
].vector
;
3974 txq
->tcb
->txq
= txq
;
3975 txq
->tcb
->bnad
= bnad
;
3978 /* QPT, SWQPT, Pages */
3979 bna_txq_qpt_setup(txq
, page_count
, page_size
,
3980 &res_info
[BNA_TX_RES_MEM_T_QPT
].res_u
.mem_info
.mdl
[i
],
3981 &res_info
[BNA_TX_RES_MEM_T_SWQPT
].res_u
.mem_info
.mdl
[i
],
3982 &res_info
[BNA_TX_RES_MEM_T_PAGE
].
3983 res_u
.mem_info
.mdl
[page_idx
]);
3984 txq
->tcb
->page_idx
= page_idx
;
3985 txq
->tcb
->page_count
= page_count
;
3986 page_idx
+= page_count
;
3988 /* Callback to bnad for setting up TCB */
3989 if (tx
->tcb_setup_cbfn
)
3990 (tx
->tcb_setup_cbfn
)(bna
->bnad
, txq
->tcb
);
3997 tx
->txf
.ctrl_flags
= BFI_TXF_CF_ENABLE
| BFI_TXF_CF_VLAN_WI_BASED
;
4001 bfa_q_qe_init(&tx
->mbox_qe
.qe
);
4003 bfa_fsm_set_state(tx
, bna_tx_sm_stopped
);
4013 bna_tx_destroy(struct bna_tx
*tx
)
4015 /* Callback to bnad for destroying TCB */
4016 if (tx
->tcb_destroy_cbfn
) {
4017 struct bna_txq
*txq
;
4018 struct list_head
*qe
;
4020 list_for_each(qe
, &tx
->txq_q
) {
4021 txq
= (struct bna_txq
*)qe
;
4022 (tx
->tcb_destroy_cbfn
)(tx
->bna
->bnad
, txq
->tcb
);
4030 bna_tx_enable(struct bna_tx
*tx
)
4032 if (tx
->fsm
!= (bfa_sm_t
)bna_tx_sm_stopped
)
4035 tx
->flags
|= BNA_TX_F_ENABLED
;
4037 if (tx
->flags
& BNA_TX_F_PORT_STARTED
)
4038 bfa_fsm_send_event(tx
, TX_E_START
);
4042 bna_tx_disable(struct bna_tx
*tx
, enum bna_cleanup_type type
,
4043 void (*cbfn
)(void *, struct bna_tx
*, enum bna_cb_status
))
4045 if (type
== BNA_SOFT_CLEANUP
) {
4046 (*cbfn
)(tx
->bna
->bnad
, tx
, BNA_CB_SUCCESS
);
4050 tx
->stop_cbfn
= cbfn
;
4051 tx
->stop_cbarg
= tx
->bna
->bnad
;
4053 tx
->flags
&= ~BNA_TX_F_ENABLED
;
4055 bfa_fsm_send_event(tx
, TX_E_STOP
);
4059 bna_tx_state_get(struct bna_tx
*tx
)
4061 return bfa_sm_to_state(tx_sm_table
, tx
->fsm
);
4065 bna_tx_mod_init(struct bna_tx_mod
*tx_mod
, struct bna
*bna
,
4066 struct bna_res_info
*res_info
)
4073 tx_mod
->tx
= (struct bna_tx
*)
4074 res_info
[BNA_RES_MEM_T_TX_ARRAY
].res_u
.mem_info
.mdl
[0].kva
;
4075 tx_mod
->txq
= (struct bna_txq
*)
4076 res_info
[BNA_RES_MEM_T_TXQ_ARRAY
].res_u
.mem_info
.mdl
[0].kva
;
4078 INIT_LIST_HEAD(&tx_mod
->tx_free_q
);
4079 INIT_LIST_HEAD(&tx_mod
->tx_active_q
);
4081 INIT_LIST_HEAD(&tx_mod
->txq_free_q
);
4083 for (i
= 0; i
< BFI_MAX_TXQ
; i
++) {
4084 tx_mod
->tx
[i
].txf
.txf_id
= i
;
4085 bfa_q_qe_init(&tx_mod
->tx
[i
].qe
);
4086 list_add_tail(&tx_mod
->tx
[i
].qe
, &tx_mod
->tx_free_q
);
4088 tx_mod
->txq
[i
].txq_id
= i
;
4089 bfa_q_qe_init(&tx_mod
->txq
[i
].qe
);
4090 list_add_tail(&tx_mod
->txq
[i
].qe
, &tx_mod
->txq_free_q
);
4093 tx_mod
->tx_stop_wc
.wc_resume
= bna_tx_mod_cb_tx_stopped_all
;
4094 tx_mod
->tx_stop_wc
.wc_cbarg
= tx_mod
;
4095 tx_mod
->tx_stop_wc
.wc_count
= 0;
4099 bna_tx_mod_uninit(struct bna_tx_mod
*tx_mod
)
4101 struct list_head
*qe
;
4105 list_for_each(qe
, &tx_mod
->tx_free_q
)
4109 list_for_each(qe
, &tx_mod
->txq_free_q
)
4116 bna_tx_mod_start(struct bna_tx_mod
*tx_mod
, enum bna_tx_type type
)
4119 struct list_head
*qe
;
4121 tx_mod
->flags
|= BNA_TX_MOD_F_PORT_STARTED
;
4122 if (type
== BNA_TX_T_LOOPBACK
)
4123 tx_mod
->flags
|= BNA_TX_MOD_F_PORT_LOOPBACK
;
4125 list_for_each(qe
, &tx_mod
->tx_active_q
) {
4126 tx
= (struct bna_tx
*)qe
;
4127 if (tx
->type
== type
)
4133 bna_tx_mod_stop(struct bna_tx_mod
*tx_mod
, enum bna_tx_type type
)
4136 struct list_head
*qe
;
4138 tx_mod
->flags
&= ~BNA_TX_MOD_F_PORT_STARTED
;
4139 tx_mod
->flags
&= ~BNA_TX_MOD_F_PORT_LOOPBACK
;
4141 tx_mod
->stop_cbfn
= bna_port_cb_tx_stopped
;
4144 * Before calling bna_tx_stop(), increment tx_stop_wc as many times
4145 * as we are going to call bna_tx_stop
4147 list_for_each(qe
, &tx_mod
->tx_active_q
) {
4148 tx
= (struct bna_tx
*)qe
;
4149 if (tx
->type
== type
)
4150 bfa_wc_up(&tx_mod
->tx_stop_wc
);
4153 if (tx_mod
->tx_stop_wc
.wc_count
== 0) {
4154 tx_mod
->stop_cbfn(&tx_mod
->bna
->port
, BNA_CB_SUCCESS
);
4155 tx_mod
->stop_cbfn
= NULL
;
4159 list_for_each(qe
, &tx_mod
->tx_active_q
) {
4160 tx
= (struct bna_tx
*)qe
;
4161 if (tx
->type
== type
)
4167 bna_tx_mod_fail(struct bna_tx_mod
*tx_mod
)
4170 struct list_head
*qe
;
4172 tx_mod
->flags
&= ~BNA_TX_MOD_F_PORT_STARTED
;
4173 tx_mod
->flags
&= ~BNA_TX_MOD_F_PORT_LOOPBACK
;
4175 list_for_each(qe
, &tx_mod
->tx_active_q
) {
4176 tx
= (struct bna_tx
*)qe
;
4182 bna_tx_mod_prio_changed(struct bna_tx_mod
*tx_mod
, int prio
)
4185 struct list_head
*qe
;
4187 if (prio
!= tx_mod
->priority
) {
4188 tx_mod
->priority
= prio
;
4190 list_for_each(qe
, &tx_mod
->tx_active_q
) {
4191 tx
= (struct bna_tx
*)qe
;
4192 bna_tx_prio_changed(tx
, prio
);
4198 bna_tx_mod_cee_link_status(struct bna_tx_mod
*tx_mod
, int cee_link
)
4201 struct list_head
*qe
;
4203 tx_mod
->cee_link
= cee_link
;
4205 list_for_each(qe
, &tx_mod
->tx_active_q
) {
4206 tx
= (struct bna_tx
*)qe
;
4207 bna_tx_cee_link_status(tx
, cee_link
);