2 * Linux network driver for Brocade Converged Network Adapter.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
18 #include <linux/netdevice.h>
19 #include <linux/skbuff.h>
20 #include <linux/etherdevice.h>
22 #include <linux/ethtool.h>
23 #include <linux/if_vlan.h>
24 #include <linux/if_ether.h>
26 #include <linux/prefetch.h>
32 static DEFINE_MUTEX(bnad_fwimg_mutex
);
37 static uint bnad_msix_disable
;
38 module_param(bnad_msix_disable
, uint
, 0444);
39 MODULE_PARM_DESC(bnad_msix_disable
, "Disable MSIX mode");
41 static uint bnad_ioc_auto_recover
= 1;
42 module_param(bnad_ioc_auto_recover
, uint
, 0444);
43 MODULE_PARM_DESC(bnad_ioc_auto_recover
, "Enable / Disable auto recovery");
48 u32 bnad_rxqs_per_cq
= 2;
50 static const u8 bnad_bcast_addr
[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
55 #define BNAD_TX_UNMAPQ_DEPTH (bnad->txq_depth * 2)
57 #define BNAD_RX_UNMAPQ_DEPTH (bnad->rxq_depth)
59 #define BNAD_GET_MBOX_IRQ(_bnad) \
60 (((_bnad)->cfg_flags & BNAD_CF_MSIX) ? \
61 ((_bnad)->msix_table[(_bnad)->msix_num - 1].vector) : \
62 ((_bnad)->pcidev->irq))
64 #define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _depth) \
66 (_res_info)->res_type = BNA_RES_T_MEM; \
67 (_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA; \
68 (_res_info)->res_u.mem_info.num = (_num); \
69 (_res_info)->res_u.mem_info.len = \
70 sizeof(struct bnad_unmap_q) + \
71 (sizeof(struct bnad_skb_unmap) * ((_depth) - 1)); \
74 #define BNAD_TXRX_SYNC_MDELAY 250 /* 250 msecs */
77 * Reinitialize completions in CQ, once Rx is taken down
80 bnad_cq_cmpl_init(struct bnad
*bnad
, struct bna_ccb
*ccb
)
82 struct bna_cq_entry
*cmpl
, *next_cmpl
;
83 unsigned int wi_range
, wis
= 0, ccb_prod
= 0;
86 BNA_CQ_QPGE_PTR_GET(ccb_prod
, ccb
->sw_qpt
, cmpl
,
89 for (i
= 0; i
< ccb
->q_depth
; i
++) {
91 if (likely(--wi_range
))
94 BNA_QE_INDX_ADD(ccb_prod
, wis
, ccb
->q_depth
);
96 BNA_CQ_QPGE_PTR_GET(ccb_prod
, ccb
->sw_qpt
,
105 * Frees all pending Tx Bufs
106 * At this point no activity is expected on the Q,
107 * so DMA unmap & freeing is fine.
110 bnad_free_all_txbufs(struct bnad
*bnad
,
114 struct bnad_unmap_q
*unmap_q
= tcb
->unmap_q
;
115 struct bnad_skb_unmap
*unmap_array
;
116 struct sk_buff
*skb
= NULL
;
119 unmap_array
= unmap_q
->unmap_array
;
122 while (unmap_cons
< unmap_q
->q_depth
) {
123 skb
= unmap_array
[unmap_cons
].skb
;
128 unmap_array
[unmap_cons
].skb
= NULL
;
130 dma_unmap_single(&bnad
->pcidev
->dev
,
131 dma_unmap_addr(&unmap_array
[unmap_cons
],
132 dma_addr
), skb_headlen(skb
),
135 dma_unmap_addr_set(&unmap_array
[unmap_cons
], dma_addr
, 0);
136 if (++unmap_cons
>= unmap_q
->q_depth
)
139 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
140 dma_unmap_page(&bnad
->pcidev
->dev
,
141 dma_unmap_addr(&unmap_array
[unmap_cons
],
143 skb_shinfo(skb
)->frags
[i
].size
,
145 dma_unmap_addr_set(&unmap_array
[unmap_cons
], dma_addr
,
147 if (++unmap_cons
>= unmap_q
->q_depth
)
150 dev_kfree_skb_any(skb
);
154 /* Data Path Handlers */
157 * bnad_free_txbufs : Frees the Tx bufs on Tx completion
158 * Can be called in a) Interrupt context
163 bnad_free_txbufs(struct bnad
*bnad
,
166 u32 sent_packets
= 0, sent_bytes
= 0;
167 u16 wis
, unmap_cons
, updated_hw_cons
;
168 struct bnad_unmap_q
*unmap_q
= tcb
->unmap_q
;
169 struct bnad_skb_unmap
*unmap_array
;
174 * Just return if TX is stopped. This check is useful
175 * when bnad_free_txbufs() runs out of a tasklet scheduled
176 * before bnad_cb_tx_cleanup() cleared BNAD_TXQ_TX_STARTED bit
177 * but this routine runs actually after the cleanup has been
180 if (!test_bit(BNAD_TXQ_TX_STARTED
, &tcb
->flags
))
183 updated_hw_cons
= *(tcb
->hw_consumer_index
);
185 wis
= BNA_Q_INDEX_CHANGE(tcb
->consumer_index
,
186 updated_hw_cons
, tcb
->q_depth
);
188 BUG_ON(!(wis
<= BNA_QE_IN_USE_CNT(tcb
, tcb
->q_depth
)));
190 unmap_array
= unmap_q
->unmap_array
;
191 unmap_cons
= unmap_q
->consumer_index
;
193 prefetch(&unmap_array
[unmap_cons
+ 1]);
195 skb
= unmap_array
[unmap_cons
].skb
;
197 unmap_array
[unmap_cons
].skb
= NULL
;
200 sent_bytes
+= skb
->len
;
201 wis
-= BNA_TXQ_WI_NEEDED(1 + skb_shinfo(skb
)->nr_frags
);
203 dma_unmap_single(&bnad
->pcidev
->dev
,
204 dma_unmap_addr(&unmap_array
[unmap_cons
],
205 dma_addr
), skb_headlen(skb
),
207 dma_unmap_addr_set(&unmap_array
[unmap_cons
], dma_addr
, 0);
208 BNA_QE_INDX_ADD(unmap_cons
, 1, unmap_q
->q_depth
);
210 prefetch(&unmap_array
[unmap_cons
+ 1]);
211 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
212 prefetch(&unmap_array
[unmap_cons
+ 1]);
214 dma_unmap_page(&bnad
->pcidev
->dev
,
215 dma_unmap_addr(&unmap_array
[unmap_cons
],
217 skb_shinfo(skb
)->frags
[i
].size
,
219 dma_unmap_addr_set(&unmap_array
[unmap_cons
], dma_addr
,
221 BNA_QE_INDX_ADD(unmap_cons
, 1, unmap_q
->q_depth
);
223 dev_kfree_skb_any(skb
);
226 /* Update consumer pointers. */
227 tcb
->consumer_index
= updated_hw_cons
;
228 unmap_q
->consumer_index
= unmap_cons
;
230 tcb
->txq
->tx_packets
+= sent_packets
;
231 tcb
->txq
->tx_bytes
+= sent_bytes
;
236 /* Tx Free Tasklet function */
237 /* Frees for all the tcb's in all the Tx's */
239 * Scheduled from sending context, so that
240 * the fat Tx lock is not held for too long
241 * in the sending context.
244 bnad_tx_free_tasklet(unsigned long bnad_ptr
)
246 struct bnad
*bnad
= (struct bnad
*)bnad_ptr
;
251 for (i
= 0; i
< bnad
->num_tx
; i
++) {
252 for (j
= 0; j
< bnad
->num_txq_per_tx
; j
++) {
253 tcb
= bnad
->tx_info
[i
].tcb
[j
];
256 if (((u16
) (*tcb
->hw_consumer_index
) !=
257 tcb
->consumer_index
) &&
258 (!test_and_set_bit(BNAD_TXQ_FREE_SENT
,
260 acked
= bnad_free_txbufs(bnad
, tcb
);
261 if (likely(test_bit(BNAD_TXQ_TX_STARTED
,
263 bna_ib_ack(tcb
->i_dbell
, acked
);
264 smp_mb__before_clear_bit();
265 clear_bit(BNAD_TXQ_FREE_SENT
, &tcb
->flags
);
267 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED
,
270 if (netif_queue_stopped(bnad
->netdev
)) {
271 if (acked
&& netif_carrier_ok(bnad
->netdev
) &&
272 BNA_QE_FREE_CNT(tcb
, tcb
->q_depth
) >=
273 BNAD_NETIF_WAKE_THRESHOLD
) {
274 netif_wake_queue(bnad
->netdev
);
276 /* Counters for individual TxQs? */
277 BNAD_UPDATE_CTR(bnad
,
286 bnad_tx(struct bnad
*bnad
, struct bna_tcb
*tcb
)
288 struct net_device
*netdev
= bnad
->netdev
;
291 if (test_and_set_bit(BNAD_TXQ_FREE_SENT
, &tcb
->flags
))
294 sent
= bnad_free_txbufs(bnad
, tcb
);
296 if (netif_queue_stopped(netdev
) &&
297 netif_carrier_ok(netdev
) &&
298 BNA_QE_FREE_CNT(tcb
, tcb
->q_depth
) >=
299 BNAD_NETIF_WAKE_THRESHOLD
) {
300 if (test_bit(BNAD_TXQ_TX_STARTED
, &tcb
->flags
)) {
301 netif_wake_queue(netdev
);
302 BNAD_UPDATE_CTR(bnad
, netif_queue_wakeup
);
307 if (likely(test_bit(BNAD_TXQ_TX_STARTED
, &tcb
->flags
)))
308 bna_ib_ack(tcb
->i_dbell
, sent
);
310 smp_mb__before_clear_bit();
311 clear_bit(BNAD_TXQ_FREE_SENT
, &tcb
->flags
);
316 /* MSIX Tx Completion Handler */
318 bnad_msix_tx(int irq
, void *data
)
320 struct bna_tcb
*tcb
= (struct bna_tcb
*)data
;
321 struct bnad
*bnad
= tcb
->bnad
;
329 bnad_reset_rcb(struct bnad
*bnad
, struct bna_rcb
*rcb
)
331 struct bnad_unmap_q
*unmap_q
= rcb
->unmap_q
;
333 rcb
->producer_index
= 0;
334 rcb
->consumer_index
= 0;
336 unmap_q
->producer_index
= 0;
337 unmap_q
->consumer_index
= 0;
341 bnad_free_all_rxbufs(struct bnad
*bnad
, struct bna_rcb
*rcb
)
343 struct bnad_unmap_q
*unmap_q
;
344 struct bnad_skb_unmap
*unmap_array
;
348 unmap_q
= rcb
->unmap_q
;
349 unmap_array
= unmap_q
->unmap_array
;
350 for (unmap_cons
= 0; unmap_cons
< unmap_q
->q_depth
; unmap_cons
++) {
351 skb
= unmap_array
[unmap_cons
].skb
;
354 unmap_array
[unmap_cons
].skb
= NULL
;
355 dma_unmap_single(&bnad
->pcidev
->dev
,
356 dma_unmap_addr(&unmap_array
[unmap_cons
],
358 rcb
->rxq
->buffer_size
,
362 bnad_reset_rcb(bnad
, rcb
);
366 bnad_alloc_n_post_rxbufs(struct bnad
*bnad
, struct bna_rcb
*rcb
)
368 u16 to_alloc
, alloced
, unmap_prod
, wi_range
;
369 struct bnad_unmap_q
*unmap_q
= rcb
->unmap_q
;
370 struct bnad_skb_unmap
*unmap_array
;
371 struct bna_rxq_entry
*rxent
;
377 BNA_QE_FREE_CNT(unmap_q
, unmap_q
->q_depth
);
379 unmap_array
= unmap_q
->unmap_array
;
380 unmap_prod
= unmap_q
->producer_index
;
382 BNA_RXQ_QPGE_PTR_GET(unmap_prod
, rcb
->sw_qpt
, rxent
, wi_range
);
386 BNA_RXQ_QPGE_PTR_GET(unmap_prod
, rcb
->sw_qpt
, rxent
,
389 skb
= alloc_skb(rcb
->rxq
->buffer_size
+ NET_IP_ALIGN
,
391 if (unlikely(!skb
)) {
392 BNAD_UPDATE_CTR(bnad
, rxbuf_alloc_failed
);
395 skb
->dev
= bnad
->netdev
;
396 skb_reserve(skb
, NET_IP_ALIGN
);
397 unmap_array
[unmap_prod
].skb
= skb
;
398 dma_addr
= dma_map_single(&bnad
->pcidev
->dev
, skb
->data
,
399 rcb
->rxq
->buffer_size
,
401 dma_unmap_addr_set(&unmap_array
[unmap_prod
], dma_addr
,
403 BNA_SET_DMA_ADDR(dma_addr
, &rxent
->host_addr
);
404 BNA_QE_INDX_ADD(unmap_prod
, 1, unmap_q
->q_depth
);
412 if (likely(alloced
)) {
413 unmap_q
->producer_index
= unmap_prod
;
414 rcb
->producer_index
= unmap_prod
;
416 if (likely(test_bit(BNAD_RXQ_STARTED
, &rcb
->flags
)))
417 bna_rxq_prod_indx_doorbell(rcb
);
422 bnad_refill_rxq(struct bnad
*bnad
, struct bna_rcb
*rcb
)
424 struct bnad_unmap_q
*unmap_q
= rcb
->unmap_q
;
426 if (!test_and_set_bit(BNAD_RXQ_REFILL
, &rcb
->flags
)) {
427 if (BNA_QE_FREE_CNT(unmap_q
, unmap_q
->q_depth
)
428 >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT
)
429 bnad_alloc_n_post_rxbufs(bnad
, rcb
);
430 smp_mb__before_clear_bit();
431 clear_bit(BNAD_RXQ_REFILL
, &rcb
->flags
);
436 bnad_poll_cq(struct bnad
*bnad
, struct bna_ccb
*ccb
, int budget
)
438 struct bna_cq_entry
*cmpl
, *next_cmpl
;
439 struct bna_rcb
*rcb
= NULL
;
440 unsigned int wi_range
, packets
= 0, wis
= 0;
441 struct bnad_unmap_q
*unmap_q
;
442 struct bnad_skb_unmap
*unmap_array
;
444 u32 flags
, unmap_cons
;
445 u32 qid0
= ccb
->rcb
[0]->rxq
->rxq_id
;
446 struct bna_pkt_rate
*pkt_rt
= &ccb
->pkt_rate
;
448 if (!test_bit(BNAD_RXQ_STARTED
, &ccb
->rcb
[0]->flags
))
451 prefetch(bnad
->netdev
);
452 BNA_CQ_QPGE_PTR_GET(ccb
->producer_index
, ccb
->sw_qpt
, cmpl
,
454 BUG_ON(!(wi_range
<= ccb
->q_depth
));
455 while (cmpl
->valid
&& packets
< budget
) {
457 BNA_UPDATE_PKT_CNT(pkt_rt
, ntohs(cmpl
->length
));
459 if (qid0
== cmpl
->rxq_id
)
464 unmap_q
= rcb
->unmap_q
;
465 unmap_array
= unmap_q
->unmap_array
;
466 unmap_cons
= unmap_q
->consumer_index
;
468 skb
= unmap_array
[unmap_cons
].skb
;
470 unmap_array
[unmap_cons
].skb
= NULL
;
471 dma_unmap_single(&bnad
->pcidev
->dev
,
472 dma_unmap_addr(&unmap_array
[unmap_cons
],
474 rcb
->rxq
->buffer_size
,
476 BNA_QE_INDX_ADD(unmap_q
->consumer_index
, 1, unmap_q
->q_depth
);
478 /* Should be more efficient ? Performance ? */
479 BNA_QE_INDX_ADD(rcb
->consumer_index
, 1, rcb
->q_depth
);
482 if (likely(--wi_range
))
483 next_cmpl
= cmpl
+ 1;
485 BNA_QE_INDX_ADD(ccb
->producer_index
, wis
, ccb
->q_depth
);
487 BNA_CQ_QPGE_PTR_GET(ccb
->producer_index
, ccb
->sw_qpt
,
488 next_cmpl
, wi_range
);
489 BUG_ON(!(wi_range
<= ccb
->q_depth
));
493 flags
= ntohl(cmpl
->flags
);
496 (BNA_CQ_EF_MAC_ERROR
| BNA_CQ_EF_FCS_ERROR
|
497 BNA_CQ_EF_TOO_LONG
))) {
498 dev_kfree_skb_any(skb
);
499 rcb
->rxq
->rx_packets_with_error
++;
503 skb_put(skb
, ntohs(cmpl
->length
));
505 ((bnad
->netdev
->features
& NETIF_F_RXCSUM
) &&
506 (((flags
& BNA_CQ_EF_IPV4
) &&
507 (flags
& BNA_CQ_EF_L3_CKSUM_OK
)) ||
508 (flags
& BNA_CQ_EF_IPV6
)) &&
509 (flags
& (BNA_CQ_EF_TCP
| BNA_CQ_EF_UDP
)) &&
510 (flags
& BNA_CQ_EF_L4_CKSUM_OK
)))
511 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
513 skb_checksum_none_assert(skb
);
515 rcb
->rxq
->rx_packets
++;
516 rcb
->rxq
->rx_bytes
+= skb
->len
;
517 skb
->protocol
= eth_type_trans(skb
, bnad
->netdev
);
519 if (bnad
->vlan_grp
&& (flags
& BNA_CQ_EF_VLAN
)) {
520 struct bnad_rx_ctrl
*rx_ctrl
=
521 (struct bnad_rx_ctrl
*)ccb
->ctrl
;
522 if (skb
->ip_summed
== CHECKSUM_UNNECESSARY
)
523 vlan_gro_receive(&rx_ctrl
->napi
, bnad
->vlan_grp
,
524 ntohs(cmpl
->vlan_tag
), skb
);
526 vlan_hwaccel_receive_skb(skb
,
528 ntohs(cmpl
->vlan_tag
));
530 } else { /* Not VLAN tagged/stripped */
531 struct bnad_rx_ctrl
*rx_ctrl
=
532 (struct bnad_rx_ctrl
*)ccb
->ctrl
;
533 if (skb
->ip_summed
== CHECKSUM_UNNECESSARY
)
534 napi_gro_receive(&rx_ctrl
->napi
, skb
);
536 netif_receive_skb(skb
);
544 BNA_QE_INDX_ADD(ccb
->producer_index
, wis
, ccb
->q_depth
);
547 if (likely(test_bit(BNAD_RXQ_STARTED
, &ccb
->rcb
[0]->flags
)))
548 bna_ib_ack(ccb
->i_dbell
, packets
);
549 bnad_refill_rxq(bnad
, ccb
->rcb
[0]);
551 bnad_refill_rxq(bnad
, ccb
->rcb
[1]);
553 if (likely(test_bit(BNAD_RXQ_STARTED
, &ccb
->rcb
[0]->flags
)))
554 bna_ib_ack(ccb
->i_dbell
, 0);
561 bnad_disable_rx_irq(struct bnad
*bnad
, struct bna_ccb
*ccb
)
563 if (unlikely(!test_bit(BNAD_RXQ_STARTED
, &ccb
->rcb
[0]->flags
)))
566 bna_ib_coalescing_timer_set(ccb
->i_dbell
, 0);
567 bna_ib_ack(ccb
->i_dbell
, 0);
571 bnad_enable_rx_irq(struct bnad
*bnad
, struct bna_ccb
*ccb
)
575 /* Because of polling context */
576 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
577 bnad_enable_rx_irq_unsafe(ccb
);
578 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
582 bnad_netif_rx_schedule_poll(struct bnad
*bnad
, struct bna_ccb
*ccb
)
584 struct bnad_rx_ctrl
*rx_ctrl
= (struct bnad_rx_ctrl
*)(ccb
->ctrl
);
585 struct napi_struct
*napi
= &rx_ctrl
->napi
;
587 if (likely(napi_schedule_prep(napi
))) {
588 bnad_disable_rx_irq(bnad
, ccb
);
589 __napi_schedule(napi
);
591 BNAD_UPDATE_CTR(bnad
, netif_rx_schedule
);
594 /* MSIX Rx Path Handler */
596 bnad_msix_rx(int irq
, void *data
)
598 struct bna_ccb
*ccb
= (struct bna_ccb
*)data
;
599 struct bnad
*bnad
= ccb
->bnad
;
601 bnad_netif_rx_schedule_poll(bnad
, ccb
);
606 /* Interrupt handlers */
608 /* Mbox Interrupt Handlers */
610 bnad_msix_mbox_handler(int irq
, void *data
)
614 struct bnad
*bnad
= (struct bnad
*)data
;
616 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED
, &bnad
->run_flags
)))
619 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
621 bna_intr_status_get(&bnad
->bna
, intr_status
);
623 if (BNA_IS_MBOX_ERR_INTR(intr_status
))
624 bna_mbox_handler(&bnad
->bna
, intr_status
);
626 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
632 bnad_isr(int irq
, void *data
)
637 struct bnad
*bnad
= (struct bnad
*)data
;
638 struct bnad_rx_info
*rx_info
;
639 struct bnad_rx_ctrl
*rx_ctrl
;
641 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED
, &bnad
->run_flags
)))
644 bna_intr_status_get(&bnad
->bna
, intr_status
);
646 if (unlikely(!intr_status
))
649 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
651 if (BNA_IS_MBOX_ERR_INTR(intr_status
))
652 bna_mbox_handler(&bnad
->bna
, intr_status
);
654 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
656 if (!BNA_IS_INTX_DATA_INTR(intr_status
))
659 /* Process data interrupts */
661 for (i
= 0; i
< bnad
->num_tx
; i
++) {
662 for (j
= 0; j
< bnad
->num_txq_per_tx
; j
++)
663 bnad_tx(bnad
, bnad
->tx_info
[i
].tcb
[j
]);
666 for (i
= 0; i
< bnad
->num_rx
; i
++) {
667 rx_info
= &bnad
->rx_info
[i
];
670 for (j
= 0; j
< bnad
->num_rxp_per_rx
; j
++) {
671 rx_ctrl
= &rx_info
->rx_ctrl
[j
];
673 bnad_netif_rx_schedule_poll(bnad
,
681 * Called in interrupt / callback context
682 * with bna_lock held, so cfg_flags access is OK
685 bnad_enable_mbox_irq(struct bnad
*bnad
)
687 clear_bit(BNAD_RF_MBOX_IRQ_DISABLED
, &bnad
->run_flags
);
689 BNAD_UPDATE_CTR(bnad
, mbox_intr_enabled
);
693 * Called with bnad->bna_lock held b'cos of
694 * bnad->cfg_flags access.
697 bnad_disable_mbox_irq(struct bnad
*bnad
)
699 set_bit(BNAD_RF_MBOX_IRQ_DISABLED
, &bnad
->run_flags
);
701 BNAD_UPDATE_CTR(bnad
, mbox_intr_disabled
);
705 bnad_set_netdev_perm_addr(struct bnad
*bnad
)
707 struct net_device
*netdev
= bnad
->netdev
;
709 memcpy(netdev
->perm_addr
, &bnad
->perm_addr
, netdev
->addr_len
);
710 if (is_zero_ether_addr(netdev
->dev_addr
))
711 memcpy(netdev
->dev_addr
, &bnad
->perm_addr
, netdev
->addr_len
);
714 /* Control Path Handlers */
718 bnad_cb_device_enable_mbox_intr(struct bnad
*bnad
)
720 bnad_enable_mbox_irq(bnad
);
724 bnad_cb_device_disable_mbox_intr(struct bnad
*bnad
)
726 bnad_disable_mbox_irq(bnad
);
730 bnad_cb_device_enabled(struct bnad
*bnad
, enum bna_cb_status status
)
732 complete(&bnad
->bnad_completions
.ioc_comp
);
733 bnad
->bnad_completions
.ioc_comp_status
= status
;
737 bnad_cb_device_disabled(struct bnad
*bnad
, enum bna_cb_status status
)
739 complete(&bnad
->bnad_completions
.ioc_comp
);
740 bnad
->bnad_completions
.ioc_comp_status
= status
;
744 bnad_cb_port_disabled(void *arg
, enum bna_cb_status status
)
746 struct bnad
*bnad
= (struct bnad
*)arg
;
748 complete(&bnad
->bnad_completions
.port_comp
);
750 netif_carrier_off(bnad
->netdev
);
754 bnad_cb_port_link_status(struct bnad
*bnad
,
755 enum bna_link_status link_status
)
759 link_up
= (link_status
== BNA_LINK_UP
) || (link_status
== BNA_CEE_UP
);
761 if (link_status
== BNA_CEE_UP
) {
762 set_bit(BNAD_RF_CEE_RUNNING
, &bnad
->run_flags
);
763 BNAD_UPDATE_CTR(bnad
, cee_up
);
765 clear_bit(BNAD_RF_CEE_RUNNING
, &bnad
->run_flags
);
768 if (!netif_carrier_ok(bnad
->netdev
)) {
769 struct bna_tcb
*tcb
= bnad
->tx_info
[0].tcb
[0];
772 pr_warn("bna: %s link up\n",
774 netif_carrier_on(bnad
->netdev
);
775 BNAD_UPDATE_CTR(bnad
, link_toggle
);
776 if (test_bit(BNAD_TXQ_TX_STARTED
, &tcb
->flags
)) {
777 /* Force an immediate Transmit Schedule */
778 pr_info("bna: %s TX_STARTED\n",
780 netif_wake_queue(bnad
->netdev
);
781 BNAD_UPDATE_CTR(bnad
, netif_queue_wakeup
);
783 netif_stop_queue(bnad
->netdev
);
784 BNAD_UPDATE_CTR(bnad
, netif_queue_stop
);
788 if (netif_carrier_ok(bnad
->netdev
)) {
789 pr_warn("bna: %s link down\n",
791 netif_carrier_off(bnad
->netdev
);
792 BNAD_UPDATE_CTR(bnad
, link_toggle
);
798 bnad_cb_tx_disabled(void *arg
, struct bna_tx
*tx
,
799 enum bna_cb_status status
)
801 struct bnad
*bnad
= (struct bnad
*)arg
;
803 complete(&bnad
->bnad_completions
.tx_comp
);
807 bnad_cb_tcb_setup(struct bnad
*bnad
, struct bna_tcb
*tcb
)
809 struct bnad_tx_info
*tx_info
=
810 (struct bnad_tx_info
*)tcb
->txq
->tx
->priv
;
811 struct bnad_unmap_q
*unmap_q
= tcb
->unmap_q
;
813 tx_info
->tcb
[tcb
->id
] = tcb
;
814 unmap_q
->producer_index
= 0;
815 unmap_q
->consumer_index
= 0;
816 unmap_q
->q_depth
= BNAD_TX_UNMAPQ_DEPTH
;
820 bnad_cb_tcb_destroy(struct bnad
*bnad
, struct bna_tcb
*tcb
)
822 struct bnad_tx_info
*tx_info
=
823 (struct bnad_tx_info
*)tcb
->txq
->tx
->priv
;
824 struct bnad_unmap_q
*unmap_q
= tcb
->unmap_q
;
826 while (test_and_set_bit(BNAD_TXQ_FREE_SENT
, &tcb
->flags
))
829 bnad_free_all_txbufs(bnad
, tcb
);
831 unmap_q
->producer_index
= 0;
832 unmap_q
->consumer_index
= 0;
834 smp_mb__before_clear_bit();
835 clear_bit(BNAD_TXQ_FREE_SENT
, &tcb
->flags
);
837 tx_info
->tcb
[tcb
->id
] = NULL
;
841 bnad_cb_rcb_setup(struct bnad
*bnad
, struct bna_rcb
*rcb
)
843 struct bnad_unmap_q
*unmap_q
= rcb
->unmap_q
;
845 unmap_q
->producer_index
= 0;
846 unmap_q
->consumer_index
= 0;
847 unmap_q
->q_depth
= BNAD_RX_UNMAPQ_DEPTH
;
851 bnad_cb_rcb_destroy(struct bnad
*bnad
, struct bna_rcb
*rcb
)
853 bnad_free_all_rxbufs(bnad
, rcb
);
857 bnad_cb_ccb_setup(struct bnad
*bnad
, struct bna_ccb
*ccb
)
859 struct bnad_rx_info
*rx_info
=
860 (struct bnad_rx_info
*)ccb
->cq
->rx
->priv
;
862 rx_info
->rx_ctrl
[ccb
->id
].ccb
= ccb
;
863 ccb
->ctrl
= &rx_info
->rx_ctrl
[ccb
->id
];
867 bnad_cb_ccb_destroy(struct bnad
*bnad
, struct bna_ccb
*ccb
)
869 struct bnad_rx_info
*rx_info
=
870 (struct bnad_rx_info
*)ccb
->cq
->rx
->priv
;
872 rx_info
->rx_ctrl
[ccb
->id
].ccb
= NULL
;
876 bnad_cb_tx_stall(struct bnad
*bnad
, struct bna_tcb
*tcb
)
878 struct bnad_tx_info
*tx_info
=
879 (struct bnad_tx_info
*)tcb
->txq
->tx
->priv
;
881 if (tx_info
!= &bnad
->tx_info
[0])
884 clear_bit(BNAD_TXQ_TX_STARTED
, &tcb
->flags
);
885 netif_stop_queue(bnad
->netdev
);
886 pr_info("bna: %s TX_STOPPED\n", bnad
->netdev
->name
);
890 bnad_cb_tx_resume(struct bnad
*bnad
, struct bna_tcb
*tcb
)
892 struct bnad_unmap_q
*unmap_q
= tcb
->unmap_q
;
894 if (test_bit(BNAD_TXQ_TX_STARTED
, &tcb
->flags
))
897 clear_bit(BNAD_RF_TX_SHUTDOWN_DELAYED
, &bnad
->run_flags
);
899 while (test_and_set_bit(BNAD_TXQ_FREE_SENT
, &tcb
->flags
))
902 bnad_free_all_txbufs(bnad
, tcb
);
904 unmap_q
->producer_index
= 0;
905 unmap_q
->consumer_index
= 0;
907 smp_mb__before_clear_bit();
908 clear_bit(BNAD_TXQ_FREE_SENT
, &tcb
->flags
);
911 * Workaround for first device enable failure & we
912 * get a 0 MAC address. We try to get the MAC address
915 if (is_zero_ether_addr(&bnad
->perm_addr
.mac
[0])) {
916 bna_port_mac_get(&bnad
->bna
.port
, &bnad
->perm_addr
);
917 bnad_set_netdev_perm_addr(bnad
);
920 set_bit(BNAD_TXQ_TX_STARTED
, &tcb
->flags
);
922 if (netif_carrier_ok(bnad
->netdev
)) {
923 pr_info("bna: %s TX_STARTED\n", bnad
->netdev
->name
);
924 netif_wake_queue(bnad
->netdev
);
925 BNAD_UPDATE_CTR(bnad
, netif_queue_wakeup
);
930 bnad_cb_tx_cleanup(struct bnad
*bnad
, struct bna_tcb
*tcb
)
932 /* Delay only once for the whole Tx Path Shutdown */
933 if (!test_and_set_bit(BNAD_RF_TX_SHUTDOWN_DELAYED
, &bnad
->run_flags
))
934 mdelay(BNAD_TXRX_SYNC_MDELAY
);
938 bnad_cb_rx_cleanup(struct bnad
*bnad
,
941 clear_bit(BNAD_RXQ_STARTED
, &ccb
->rcb
[0]->flags
);
944 clear_bit(BNAD_RXQ_STARTED
, &ccb
->rcb
[1]->flags
);
946 if (!test_and_set_bit(BNAD_RF_RX_SHUTDOWN_DELAYED
, &bnad
->run_flags
))
947 mdelay(BNAD_TXRX_SYNC_MDELAY
);
951 bnad_cb_rx_post(struct bnad
*bnad
, struct bna_rcb
*rcb
)
953 struct bnad_unmap_q
*unmap_q
= rcb
->unmap_q
;
955 clear_bit(BNAD_RF_RX_SHUTDOWN_DELAYED
, &bnad
->run_flags
);
957 if (rcb
== rcb
->cq
->ccb
->rcb
[0])
958 bnad_cq_cmpl_init(bnad
, rcb
->cq
->ccb
);
960 bnad_free_all_rxbufs(bnad
, rcb
);
962 set_bit(BNAD_RXQ_STARTED
, &rcb
->flags
);
964 /* Now allocate & post buffers for this RCB */
965 /* !!Allocation in callback context */
966 if (!test_and_set_bit(BNAD_RXQ_REFILL
, &rcb
->flags
)) {
967 if (BNA_QE_FREE_CNT(unmap_q
, unmap_q
->q_depth
)
968 >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT
)
969 bnad_alloc_n_post_rxbufs(bnad
, rcb
);
970 smp_mb__before_clear_bit();
971 clear_bit(BNAD_RXQ_REFILL
, &rcb
->flags
);
976 bnad_cb_rx_disabled(void *arg
, struct bna_rx
*rx
,
977 enum bna_cb_status status
)
979 struct bnad
*bnad
= (struct bnad
*)arg
;
981 complete(&bnad
->bnad_completions
.rx_comp
);
985 bnad_cb_rx_mcast_add(struct bnad
*bnad
, struct bna_rx
*rx
,
986 enum bna_cb_status status
)
988 bnad
->bnad_completions
.mcast_comp_status
= status
;
989 complete(&bnad
->bnad_completions
.mcast_comp
);
993 bnad_cb_stats_get(struct bnad
*bnad
, enum bna_cb_status status
,
994 struct bna_stats
*stats
)
996 if (status
== BNA_CB_SUCCESS
)
997 BNAD_UPDATE_CTR(bnad
, hw_stats_updates
);
999 if (!netif_running(bnad
->netdev
) ||
1000 !test_bit(BNAD_RF_STATS_TIMER_RUNNING
, &bnad
->run_flags
))
1003 mod_timer(&bnad
->stats_timer
,
1004 jiffies
+ msecs_to_jiffies(BNAD_STATS_TIMER_FREQ
));
1007 /* Resource allocation, free functions */
1010 bnad_mem_free(struct bnad
*bnad
,
1011 struct bna_mem_info
*mem_info
)
1016 if (mem_info
->mdl
== NULL
)
1019 for (i
= 0; i
< mem_info
->num
; i
++) {
1020 if (mem_info
->mdl
[i
].kva
!= NULL
) {
1021 if (mem_info
->mem_type
== BNA_MEM_T_DMA
) {
1022 BNA_GET_DMA_ADDR(&(mem_info
->mdl
[i
].dma
),
1024 dma_free_coherent(&bnad
->pcidev
->dev
,
1025 mem_info
->mdl
[i
].len
,
1026 mem_info
->mdl
[i
].kva
, dma_pa
);
1028 kfree(mem_info
->mdl
[i
].kva
);
1031 kfree(mem_info
->mdl
);
1032 mem_info
->mdl
= NULL
;
1036 bnad_mem_alloc(struct bnad
*bnad
,
1037 struct bna_mem_info
*mem_info
)
1042 if ((mem_info
->num
== 0) || (mem_info
->len
== 0)) {
1043 mem_info
->mdl
= NULL
;
1047 mem_info
->mdl
= kcalloc(mem_info
->num
, sizeof(struct bna_mem_descr
),
1049 if (mem_info
->mdl
== NULL
)
1052 if (mem_info
->mem_type
== BNA_MEM_T_DMA
) {
1053 for (i
= 0; i
< mem_info
->num
; i
++) {
1054 mem_info
->mdl
[i
].len
= mem_info
->len
;
1055 mem_info
->mdl
[i
].kva
=
1056 dma_alloc_coherent(&bnad
->pcidev
->dev
,
1057 mem_info
->len
, &dma_pa
,
1060 if (mem_info
->mdl
[i
].kva
== NULL
)
1063 BNA_SET_DMA_ADDR(dma_pa
,
1064 &(mem_info
->mdl
[i
].dma
));
1067 for (i
= 0; i
< mem_info
->num
; i
++) {
1068 mem_info
->mdl
[i
].len
= mem_info
->len
;
1069 mem_info
->mdl
[i
].kva
= kzalloc(mem_info
->len
,
1071 if (mem_info
->mdl
[i
].kva
== NULL
)
1079 bnad_mem_free(bnad
, mem_info
);
1083 /* Free IRQ for Mailbox */
1085 bnad_mbox_irq_free(struct bnad
*bnad
,
1086 struct bna_intr_info
*intr_info
)
1089 unsigned long flags
;
1091 if (intr_info
->idl
== NULL
)
1094 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1095 bnad_disable_mbox_irq(bnad
);
1096 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1098 irq
= BNAD_GET_MBOX_IRQ(bnad
);
1099 free_irq(irq
, bnad
);
1101 kfree(intr_info
->idl
);
1105 * Allocates IRQ for Mailbox, but keep it disabled
1106 * This will be enabled once we get the mbox enable callback
1110 bnad_mbox_irq_alloc(struct bnad
*bnad
,
1111 struct bna_intr_info
*intr_info
)
1114 unsigned long flags
;
1116 irq_handler_t irq_handler
;
1118 /* Mbox should use only 1 vector */
1120 intr_info
->idl
= kzalloc(sizeof(*(intr_info
->idl
)), GFP_KERNEL
);
1121 if (!intr_info
->idl
)
1124 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1125 if (bnad
->cfg_flags
& BNAD_CF_MSIX
) {
1126 irq_handler
= (irq_handler_t
)bnad_msix_mbox_handler
;
1127 irq
= bnad
->msix_table
[bnad
->msix_num
- 1].vector
;
1129 intr_info
->intr_type
= BNA_INTR_T_MSIX
;
1130 intr_info
->idl
[0].vector
= bnad
->msix_num
- 1;
1132 irq_handler
= (irq_handler_t
)bnad_isr
;
1133 irq
= bnad
->pcidev
->irq
;
1134 flags
= IRQF_SHARED
;
1135 intr_info
->intr_type
= BNA_INTR_T_INTX
;
1136 /* intr_info->idl.vector = 0 ? */
1138 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1140 sprintf(bnad
->mbox_irq_name
, "%s", BNAD_NAME
);
1143 * Set the Mbox IRQ disable flag, so that the IRQ handler
1144 * called from request_irq() for SHARED IRQs do not execute
1146 set_bit(BNAD_RF_MBOX_IRQ_DISABLED
, &bnad
->run_flags
);
1148 BNAD_UPDATE_CTR(bnad
, mbox_intr_disabled
);
1150 err
= request_irq(irq
, irq_handler
, flags
,
1151 bnad
->mbox_irq_name
, bnad
);
1154 kfree(intr_info
->idl
);
1155 intr_info
->idl
= NULL
;
1162 bnad_txrx_irq_free(struct bnad
*bnad
, struct bna_intr_info
*intr_info
)
1164 kfree(intr_info
->idl
);
1165 intr_info
->idl
= NULL
;
1168 /* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */
1170 bnad_txrx_irq_alloc(struct bnad
*bnad
, enum bnad_intr_source src
,
1171 uint txrx_id
, struct bna_intr_info
*intr_info
)
1173 int i
, vector_start
= 0;
1175 unsigned long flags
;
1177 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1178 cfg_flags
= bnad
->cfg_flags
;
1179 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1181 if (cfg_flags
& BNAD_CF_MSIX
) {
1182 intr_info
->intr_type
= BNA_INTR_T_MSIX
;
1183 intr_info
->idl
= kcalloc(intr_info
->num
,
1184 sizeof(struct bna_intr_descr
),
1186 if (!intr_info
->idl
)
1191 vector_start
= txrx_id
;
1195 vector_start
= bnad
->num_tx
* bnad
->num_txq_per_tx
+
1203 for (i
= 0; i
< intr_info
->num
; i
++)
1204 intr_info
->idl
[i
].vector
= vector_start
+ i
;
1206 intr_info
->intr_type
= BNA_INTR_T_INTX
;
1208 intr_info
->idl
= kcalloc(intr_info
->num
,
1209 sizeof(struct bna_intr_descr
),
1211 if (!intr_info
->idl
)
1216 intr_info
->idl
[0].vector
= 0x1; /* Bit mask : Tx IB */
1220 intr_info
->idl
[0].vector
= 0x2; /* Bit mask : Rx IB */
1228 * NOTE: Should be called for MSIX only
1229 * Unregisters Tx MSIX vector(s) from the kernel
1232 bnad_tx_msix_unregister(struct bnad
*bnad
, struct bnad_tx_info
*tx_info
,
1238 for (i
= 0; i
< num_txqs
; i
++) {
1239 if (tx_info
->tcb
[i
] == NULL
)
1242 vector_num
= tx_info
->tcb
[i
]->intr_vector
;
1243 free_irq(bnad
->msix_table
[vector_num
].vector
, tx_info
->tcb
[i
]);
1248 * NOTE: Should be called for MSIX only
1249 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1252 bnad_tx_msix_register(struct bnad
*bnad
, struct bnad_tx_info
*tx_info
,
1253 uint tx_id
, int num_txqs
)
1259 for (i
= 0; i
< num_txqs
; i
++) {
1260 vector_num
= tx_info
->tcb
[i
]->intr_vector
;
1261 sprintf(tx_info
->tcb
[i
]->name
, "%s TXQ %d", bnad
->netdev
->name
,
1262 tx_id
+ tx_info
->tcb
[i
]->id
);
1263 err
= request_irq(bnad
->msix_table
[vector_num
].vector
,
1264 (irq_handler_t
)bnad_msix_tx
, 0,
1265 tx_info
->tcb
[i
]->name
,
1275 bnad_tx_msix_unregister(bnad
, tx_info
, (i
- 1));
1280 * NOTE: Should be called for MSIX only
1281 * Unregisters Rx MSIX vector(s) from the kernel
1284 bnad_rx_msix_unregister(struct bnad
*bnad
, struct bnad_rx_info
*rx_info
,
1290 for (i
= 0; i
< num_rxps
; i
++) {
1291 if (rx_info
->rx_ctrl
[i
].ccb
== NULL
)
1294 vector_num
= rx_info
->rx_ctrl
[i
].ccb
->intr_vector
;
1295 free_irq(bnad
->msix_table
[vector_num
].vector
,
1296 rx_info
->rx_ctrl
[i
].ccb
);
1301 * NOTE: Should be called for MSIX only
1302 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1305 bnad_rx_msix_register(struct bnad
*bnad
, struct bnad_rx_info
*rx_info
,
1306 uint rx_id
, int num_rxps
)
1312 for (i
= 0; i
< num_rxps
; i
++) {
1313 vector_num
= rx_info
->rx_ctrl
[i
].ccb
->intr_vector
;
1314 sprintf(rx_info
->rx_ctrl
[i
].ccb
->name
, "%s CQ %d",
1316 rx_id
+ rx_info
->rx_ctrl
[i
].ccb
->id
);
1317 err
= request_irq(bnad
->msix_table
[vector_num
].vector
,
1318 (irq_handler_t
)bnad_msix_rx
, 0,
1319 rx_info
->rx_ctrl
[i
].ccb
->name
,
1320 rx_info
->rx_ctrl
[i
].ccb
);
1329 bnad_rx_msix_unregister(bnad
, rx_info
, (i
- 1));
1333 /* Free Tx object Resources */
1335 bnad_tx_res_free(struct bnad
*bnad
, struct bna_res_info
*res_info
)
1339 for (i
= 0; i
< BNA_TX_RES_T_MAX
; i
++) {
1340 if (res_info
[i
].res_type
== BNA_RES_T_MEM
)
1341 bnad_mem_free(bnad
, &res_info
[i
].res_u
.mem_info
);
1342 else if (res_info
[i
].res_type
== BNA_RES_T_INTR
)
1343 bnad_txrx_irq_free(bnad
, &res_info
[i
].res_u
.intr_info
);
1347 /* Allocates memory and interrupt resources for Tx object */
1349 bnad_tx_res_alloc(struct bnad
*bnad
, struct bna_res_info
*res_info
,
1354 for (i
= 0; i
< BNA_TX_RES_T_MAX
; i
++) {
1355 if (res_info
[i
].res_type
== BNA_RES_T_MEM
)
1356 err
= bnad_mem_alloc(bnad
,
1357 &res_info
[i
].res_u
.mem_info
);
1358 else if (res_info
[i
].res_type
== BNA_RES_T_INTR
)
1359 err
= bnad_txrx_irq_alloc(bnad
, BNAD_INTR_TX
, tx_id
,
1360 &res_info
[i
].res_u
.intr_info
);
1367 bnad_tx_res_free(bnad
, res_info
);
1371 /* Free Rx object Resources */
1373 bnad_rx_res_free(struct bnad
*bnad
, struct bna_res_info
*res_info
)
1377 for (i
= 0; i
< BNA_RX_RES_T_MAX
; i
++) {
1378 if (res_info
[i
].res_type
== BNA_RES_T_MEM
)
1379 bnad_mem_free(bnad
, &res_info
[i
].res_u
.mem_info
);
1380 else if (res_info
[i
].res_type
== BNA_RES_T_INTR
)
1381 bnad_txrx_irq_free(bnad
, &res_info
[i
].res_u
.intr_info
);
1385 /* Allocates memory and interrupt resources for Rx object */
1387 bnad_rx_res_alloc(struct bnad
*bnad
, struct bna_res_info
*res_info
,
1392 /* All memory needs to be allocated before setup_ccbs */
1393 for (i
= 0; i
< BNA_RX_RES_T_MAX
; i
++) {
1394 if (res_info
[i
].res_type
== BNA_RES_T_MEM
)
1395 err
= bnad_mem_alloc(bnad
,
1396 &res_info
[i
].res_u
.mem_info
);
1397 else if (res_info
[i
].res_type
== BNA_RES_T_INTR
)
1398 err
= bnad_txrx_irq_alloc(bnad
, BNAD_INTR_RX
, rx_id
,
1399 &res_info
[i
].res_u
.intr_info
);
1406 bnad_rx_res_free(bnad
, res_info
);
1410 /* Timer callbacks */
1413 bnad_ioc_timeout(unsigned long data
)
1415 struct bnad
*bnad
= (struct bnad
*)data
;
1416 unsigned long flags
;
1418 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1419 bfa_nw_ioc_timeout((void *) &bnad
->bna
.device
.ioc
);
1420 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1424 bnad_ioc_hb_check(unsigned long data
)
1426 struct bnad
*bnad
= (struct bnad
*)data
;
1427 unsigned long flags
;
1429 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1430 bfa_nw_ioc_hb_check((void *) &bnad
->bna
.device
.ioc
);
1431 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1435 bnad_iocpf_timeout(unsigned long data
)
1437 struct bnad
*bnad
= (struct bnad
*)data
;
1438 unsigned long flags
;
1440 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1441 bfa_nw_iocpf_timeout((void *) &bnad
->bna
.device
.ioc
);
1442 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1446 bnad_iocpf_sem_timeout(unsigned long data
)
1448 struct bnad
*bnad
= (struct bnad
*)data
;
1449 unsigned long flags
;
1451 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1452 bfa_nw_iocpf_sem_timeout((void *) &bnad
->bna
.device
.ioc
);
1453 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1457 * All timer routines use bnad->bna_lock to protect against
1458 * the following race, which may occur in case of no locking:
1466 /* b) Dynamic Interrupt Moderation Timer */
1468 bnad_dim_timeout(unsigned long data
)
1470 struct bnad
*bnad
= (struct bnad
*)data
;
1471 struct bnad_rx_info
*rx_info
;
1472 struct bnad_rx_ctrl
*rx_ctrl
;
1474 unsigned long flags
;
1476 if (!netif_carrier_ok(bnad
->netdev
))
1479 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1480 for (i
= 0; i
< bnad
->num_rx
; i
++) {
1481 rx_info
= &bnad
->rx_info
[i
];
1484 for (j
= 0; j
< bnad
->num_rxp_per_rx
; j
++) {
1485 rx_ctrl
= &rx_info
->rx_ctrl
[j
];
1488 bna_rx_dim_update(rx_ctrl
->ccb
);
1492 /* Check for BNAD_CF_DIM_ENABLED, does not eleminate a race */
1493 if (test_bit(BNAD_RF_DIM_TIMER_RUNNING
, &bnad
->run_flags
))
1494 mod_timer(&bnad
->dim_timer
,
1495 jiffies
+ msecs_to_jiffies(BNAD_DIM_TIMER_FREQ
));
1496 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1499 /* c) Statistics Timer */
1501 bnad_stats_timeout(unsigned long data
)
1503 struct bnad
*bnad
= (struct bnad
*)data
;
1504 unsigned long flags
;
1506 if (!netif_running(bnad
->netdev
) ||
1507 !test_bit(BNAD_RF_STATS_TIMER_RUNNING
, &bnad
->run_flags
))
1510 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1511 bna_stats_get(&bnad
->bna
);
1512 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1516 * Set up timer for DIM
1517 * Called with bnad->bna_lock held
1520 bnad_dim_timer_start(struct bnad
*bnad
)
1522 if (bnad
->cfg_flags
& BNAD_CF_DIM_ENABLED
&&
1523 !test_bit(BNAD_RF_DIM_TIMER_RUNNING
, &bnad
->run_flags
)) {
1524 setup_timer(&bnad
->dim_timer
, bnad_dim_timeout
,
1525 (unsigned long)bnad
);
1526 set_bit(BNAD_RF_DIM_TIMER_RUNNING
, &bnad
->run_flags
);
1527 mod_timer(&bnad
->dim_timer
,
1528 jiffies
+ msecs_to_jiffies(BNAD_DIM_TIMER_FREQ
));
1533 * Set up timer for statistics
1534 * Called with mutex_lock(&bnad->conf_mutex) held
1537 bnad_stats_timer_start(struct bnad
*bnad
)
1539 unsigned long flags
;
1541 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1542 if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING
, &bnad
->run_flags
)) {
1543 setup_timer(&bnad
->stats_timer
, bnad_stats_timeout
,
1544 (unsigned long)bnad
);
1545 mod_timer(&bnad
->stats_timer
,
1546 jiffies
+ msecs_to_jiffies(BNAD_STATS_TIMER_FREQ
));
1548 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1552 * Stops the stats timer
1553 * Called with mutex_lock(&bnad->conf_mutex) held
1556 bnad_stats_timer_stop(struct bnad
*bnad
)
1559 unsigned long flags
;
1561 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1562 if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING
, &bnad
->run_flags
))
1564 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1566 del_timer_sync(&bnad
->stats_timer
);
1572 bnad_netdev_mc_list_get(struct net_device
*netdev
, u8
*mc_list
)
1574 int i
= 1; /* Index 0 has broadcast address */
1575 struct netdev_hw_addr
*mc_addr
;
1577 netdev_for_each_mc_addr(mc_addr
, netdev
) {
1578 memcpy(&mc_list
[i
* ETH_ALEN
], &mc_addr
->addr
[0],
1585 bnad_napi_poll_rx(struct napi_struct
*napi
, int budget
)
1587 struct bnad_rx_ctrl
*rx_ctrl
=
1588 container_of(napi
, struct bnad_rx_ctrl
, napi
);
1589 struct bna_ccb
*ccb
;
1597 if (!netif_carrier_ok(bnad
->netdev
))
1600 rcvd
= bnad_poll_cq(bnad
, ccb
, budget
);
1605 napi_complete((napi
));
1607 BNAD_UPDATE_CTR(bnad
, netif_rx_complete
);
1609 bnad_enable_rx_irq(bnad
, ccb
);
1614 bnad_napi_enable(struct bnad
*bnad
, u32 rx_id
)
1616 struct bnad_rx_ctrl
*rx_ctrl
;
1619 /* Initialize & enable NAPI */
1620 for (i
= 0; i
< bnad
->num_rxp_per_rx
; i
++) {
1621 rx_ctrl
= &bnad
->rx_info
[rx_id
].rx_ctrl
[i
];
1623 netif_napi_add(bnad
->netdev
, &rx_ctrl
->napi
,
1624 bnad_napi_poll_rx
, 64);
1626 napi_enable(&rx_ctrl
->napi
);
1631 bnad_napi_disable(struct bnad
*bnad
, u32 rx_id
)
1635 /* First disable and then clean up */
1636 for (i
= 0; i
< bnad
->num_rxp_per_rx
; i
++) {
1637 napi_disable(&bnad
->rx_info
[rx_id
].rx_ctrl
[i
].napi
);
1638 netif_napi_del(&bnad
->rx_info
[rx_id
].rx_ctrl
[i
].napi
);
1642 /* Should be held with conf_lock held */
1644 bnad_cleanup_tx(struct bnad
*bnad
, uint tx_id
)
1646 struct bnad_tx_info
*tx_info
= &bnad
->tx_info
[tx_id
];
1647 struct bna_res_info
*res_info
= &bnad
->tx_res_info
[tx_id
].res_info
[0];
1648 unsigned long flags
;
1653 init_completion(&bnad
->bnad_completions
.tx_comp
);
1654 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1655 bna_tx_disable(tx_info
->tx
, BNA_HARD_CLEANUP
, bnad_cb_tx_disabled
);
1656 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1657 wait_for_completion(&bnad
->bnad_completions
.tx_comp
);
1659 if (tx_info
->tcb
[0]->intr_type
== BNA_INTR_T_MSIX
)
1660 bnad_tx_msix_unregister(bnad
, tx_info
,
1661 bnad
->num_txq_per_tx
);
1663 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1664 bna_tx_destroy(tx_info
->tx
);
1665 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1670 tasklet_kill(&bnad
->tx_free_tasklet
);
1672 bnad_tx_res_free(bnad
, res_info
);
1675 /* Should be held with conf_lock held */
1677 bnad_setup_tx(struct bnad
*bnad
, uint tx_id
)
1680 struct bnad_tx_info
*tx_info
= &bnad
->tx_info
[tx_id
];
1681 struct bna_res_info
*res_info
= &bnad
->tx_res_info
[tx_id
].res_info
[0];
1682 struct bna_intr_info
*intr_info
=
1683 &res_info
[BNA_TX_RES_INTR_T_TXCMPL
].res_u
.intr_info
;
1684 struct bna_tx_config
*tx_config
= &bnad
->tx_config
[tx_id
];
1685 struct bna_tx_event_cbfn tx_cbfn
;
1687 unsigned long flags
;
1689 /* Initialize the Tx object configuration */
1690 tx_config
->num_txq
= bnad
->num_txq_per_tx
;
1691 tx_config
->txq_depth
= bnad
->txq_depth
;
1692 tx_config
->tx_type
= BNA_TX_T_REGULAR
;
1694 /* Initialize the tx event handlers */
1695 tx_cbfn
.tcb_setup_cbfn
= bnad_cb_tcb_setup
;
1696 tx_cbfn
.tcb_destroy_cbfn
= bnad_cb_tcb_destroy
;
1697 tx_cbfn
.tx_stall_cbfn
= bnad_cb_tx_stall
;
1698 tx_cbfn
.tx_resume_cbfn
= bnad_cb_tx_resume
;
1699 tx_cbfn
.tx_cleanup_cbfn
= bnad_cb_tx_cleanup
;
1701 /* Get BNA's resource requirement for one tx object */
1702 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1703 bna_tx_res_req(bnad
->num_txq_per_tx
,
1704 bnad
->txq_depth
, res_info
);
1705 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1707 /* Fill Unmap Q memory requirements */
1708 BNAD_FILL_UNMAPQ_MEM_REQ(
1709 &res_info
[BNA_TX_RES_MEM_T_UNMAPQ
],
1710 bnad
->num_txq_per_tx
,
1711 BNAD_TX_UNMAPQ_DEPTH
);
1713 /* Allocate resources */
1714 err
= bnad_tx_res_alloc(bnad
, res_info
, tx_id
);
1718 /* Ask BNA to create one Tx object, supplying required resources */
1719 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1720 tx
= bna_tx_create(&bnad
->bna
, bnad
, tx_config
, &tx_cbfn
, res_info
,
1722 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1727 /* Register ISR for the Tx object */
1728 if (intr_info
->intr_type
== BNA_INTR_T_MSIX
) {
1729 err
= bnad_tx_msix_register(bnad
, tx_info
,
1730 tx_id
, bnad
->num_txq_per_tx
);
1735 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1737 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1742 bnad_tx_res_free(bnad
, res_info
);
1746 /* Setup the rx config for bna_rx_create */
1747 /* bnad decides the configuration */
1749 bnad_init_rx_config(struct bnad
*bnad
, struct bna_rx_config
*rx_config
)
1751 rx_config
->rx_type
= BNA_RX_T_REGULAR
;
1752 rx_config
->num_paths
= bnad
->num_rxp_per_rx
;
1754 if (bnad
->num_rxp_per_rx
> 1) {
1755 rx_config
->rss_status
= BNA_STATUS_T_ENABLED
;
1756 rx_config
->rss_config
.hash_type
=
1761 rx_config
->rss_config
.hash_mask
=
1762 bnad
->num_rxp_per_rx
- 1;
1763 get_random_bytes(rx_config
->rss_config
.toeplitz_hash_key
,
1764 sizeof(rx_config
->rss_config
.toeplitz_hash_key
));
1766 rx_config
->rss_status
= BNA_STATUS_T_DISABLED
;
1767 memset(&rx_config
->rss_config
, 0,
1768 sizeof(rx_config
->rss_config
));
1770 rx_config
->rxp_type
= BNA_RXP_SLR
;
1771 rx_config
->q_depth
= bnad
->rxq_depth
;
1773 rx_config
->small_buff_size
= BFI_SMALL_RXBUF_SIZE
;
1775 rx_config
->vlan_strip_status
= BNA_STATUS_T_ENABLED
;
1778 /* Called with mutex_lock(&bnad->conf_mutex) held */
1780 bnad_cleanup_rx(struct bnad
*bnad
, uint rx_id
)
1782 struct bnad_rx_info
*rx_info
= &bnad
->rx_info
[rx_id
];
1783 struct bna_rx_config
*rx_config
= &bnad
->rx_config
[rx_id
];
1784 struct bna_res_info
*res_info
= &bnad
->rx_res_info
[rx_id
].res_info
[0];
1785 unsigned long flags
;
1786 int dim_timer_del
= 0;
1792 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1793 dim_timer_del
= bnad_dim_timer_running(bnad
);
1795 clear_bit(BNAD_RF_DIM_TIMER_RUNNING
, &bnad
->run_flags
);
1796 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1798 del_timer_sync(&bnad
->dim_timer
);
1801 bnad_napi_disable(bnad
, rx_id
);
1803 init_completion(&bnad
->bnad_completions
.rx_comp
);
1804 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1805 bna_rx_disable(rx_info
->rx
, BNA_HARD_CLEANUP
, bnad_cb_rx_disabled
);
1806 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1807 wait_for_completion(&bnad
->bnad_completions
.rx_comp
);
1809 if (rx_info
->rx_ctrl
[0].ccb
->intr_type
== BNA_INTR_T_MSIX
)
1810 bnad_rx_msix_unregister(bnad
, rx_info
, rx_config
->num_paths
);
1812 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1813 bna_rx_destroy(rx_info
->rx
);
1814 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1818 bnad_rx_res_free(bnad
, res_info
);
1821 /* Called with mutex_lock(&bnad->conf_mutex) held */
1823 bnad_setup_rx(struct bnad
*bnad
, uint rx_id
)
1826 struct bnad_rx_info
*rx_info
= &bnad
->rx_info
[rx_id
];
1827 struct bna_res_info
*res_info
= &bnad
->rx_res_info
[rx_id
].res_info
[0];
1828 struct bna_intr_info
*intr_info
=
1829 &res_info
[BNA_RX_RES_T_INTR
].res_u
.intr_info
;
1830 struct bna_rx_config
*rx_config
= &bnad
->rx_config
[rx_id
];
1831 struct bna_rx_event_cbfn rx_cbfn
;
1833 unsigned long flags
;
1835 /* Initialize the Rx object configuration */
1836 bnad_init_rx_config(bnad
, rx_config
);
1838 /* Initialize the Rx event handlers */
1839 rx_cbfn
.rcb_setup_cbfn
= bnad_cb_rcb_setup
;
1840 rx_cbfn
.rcb_destroy_cbfn
= bnad_cb_rcb_destroy
;
1841 rx_cbfn
.ccb_setup_cbfn
= bnad_cb_ccb_setup
;
1842 rx_cbfn
.ccb_destroy_cbfn
= bnad_cb_ccb_destroy
;
1843 rx_cbfn
.rx_cleanup_cbfn
= bnad_cb_rx_cleanup
;
1844 rx_cbfn
.rx_post_cbfn
= bnad_cb_rx_post
;
1846 /* Get BNA's resource requirement for one Rx object */
1847 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1848 bna_rx_res_req(rx_config
, res_info
);
1849 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1851 /* Fill Unmap Q memory requirements */
1852 BNAD_FILL_UNMAPQ_MEM_REQ(
1853 &res_info
[BNA_RX_RES_MEM_T_UNMAPQ
],
1854 rx_config
->num_paths
+
1855 ((rx_config
->rxp_type
== BNA_RXP_SINGLE
) ? 0 :
1856 rx_config
->num_paths
), BNAD_RX_UNMAPQ_DEPTH
);
1858 /* Allocate resource */
1859 err
= bnad_rx_res_alloc(bnad
, res_info
, rx_id
);
1863 /* Ask BNA to create one Rx object, supplying required resources */
1864 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1865 rx
= bna_rx_create(&bnad
->bna
, bnad
, rx_config
, &rx_cbfn
, res_info
,
1867 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1872 /* Register ISR for the Rx object */
1873 if (intr_info
->intr_type
== BNA_INTR_T_MSIX
) {
1874 err
= bnad_rx_msix_register(bnad
, rx_info
, rx_id
,
1875 rx_config
->num_paths
);
1881 bnad_napi_enable(bnad
, rx_id
);
1883 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1885 /* Set up Dynamic Interrupt Moderation Vector */
1886 if (bnad
->cfg_flags
& BNAD_CF_DIM_ENABLED
)
1887 bna_rx_dim_reconfig(&bnad
->bna
, bna_napi_dim_vector
);
1889 /* Enable VLAN filtering only on the default Rx */
1890 bna_rx_vlanfilter_enable(rx
);
1892 /* Start the DIM timer */
1893 bnad_dim_timer_start(bnad
);
1897 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1902 bnad_cleanup_rx(bnad
, rx_id
);
1906 /* Called with conf_lock & bnad->bna_lock held */
1908 bnad_tx_coalescing_timeo_set(struct bnad
*bnad
)
1910 struct bnad_tx_info
*tx_info
;
1912 tx_info
= &bnad
->tx_info
[0];
1916 bna_tx_coalescing_timeo_set(tx_info
->tx
, bnad
->tx_coalescing_timeo
);
1919 /* Called with conf_lock & bnad->bna_lock held */
1921 bnad_rx_coalescing_timeo_set(struct bnad
*bnad
)
1923 struct bnad_rx_info
*rx_info
;
1926 for (i
= 0; i
< bnad
->num_rx
; i
++) {
1927 rx_info
= &bnad
->rx_info
[i
];
1930 bna_rx_coalescing_timeo_set(rx_info
->rx
,
1931 bnad
->rx_coalescing_timeo
);
1936 * Called with bnad->bna_lock held
1939 bnad_mac_addr_set_locked(struct bnad
*bnad
, u8
*mac_addr
)
1943 if (!is_valid_ether_addr(mac_addr
))
1944 return -EADDRNOTAVAIL
;
1946 /* If datapath is down, pretend everything went through */
1947 if (!bnad
->rx_info
[0].rx
)
1950 ret
= bna_rx_ucast_set(bnad
->rx_info
[0].rx
, mac_addr
, NULL
);
1951 if (ret
!= BNA_CB_SUCCESS
)
1952 return -EADDRNOTAVAIL
;
1957 /* Should be called with conf_lock held */
1959 bnad_enable_default_bcast(struct bnad
*bnad
)
1961 struct bnad_rx_info
*rx_info
= &bnad
->rx_info
[0];
1963 unsigned long flags
;
1965 init_completion(&bnad
->bnad_completions
.mcast_comp
);
1967 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1968 ret
= bna_rx_mcast_add(rx_info
->rx
, (u8
*)bnad_bcast_addr
,
1969 bnad_cb_rx_mcast_add
);
1970 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1972 if (ret
== BNA_CB_SUCCESS
)
1973 wait_for_completion(&bnad
->bnad_completions
.mcast_comp
);
1977 if (bnad
->bnad_completions
.mcast_comp_status
!= BNA_CB_SUCCESS
)
1983 /* Called with bnad_conf_lock() held */
1985 bnad_restore_vlans(struct bnad
*bnad
, u32 rx_id
)
1988 unsigned long flags
;
1990 if (!bnad
->vlan_grp
)
1993 BUG_ON(!(VLAN_N_VID
== (BFI_MAX_VLAN
+ 1)));
1995 for (vlan_id
= 0; vlan_id
< VLAN_N_VID
; vlan_id
++) {
1996 if (!vlan_group_get_device(bnad
->vlan_grp
, vlan_id
))
1998 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1999 bna_rx_vlan_add(bnad
->rx_info
[rx_id
].rx
, vlan_id
);
2000 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2004 /* Statistics utilities */
2006 bnad_netdev_qstats_fill(struct bnad
*bnad
, struct rtnl_link_stats64
*stats
)
2010 for (i
= 0; i
< bnad
->num_rx
; i
++) {
2011 for (j
= 0; j
< bnad
->num_rxp_per_rx
; j
++) {
2012 if (bnad
->rx_info
[i
].rx_ctrl
[j
].ccb
) {
2013 stats
->rx_packets
+= bnad
->rx_info
[i
].
2014 rx_ctrl
[j
].ccb
->rcb
[0]->rxq
->rx_packets
;
2015 stats
->rx_bytes
+= bnad
->rx_info
[i
].
2016 rx_ctrl
[j
].ccb
->rcb
[0]->rxq
->rx_bytes
;
2017 if (bnad
->rx_info
[i
].rx_ctrl
[j
].ccb
->rcb
[1] &&
2018 bnad
->rx_info
[i
].rx_ctrl
[j
].ccb
->
2020 stats
->rx_packets
+=
2021 bnad
->rx_info
[i
].rx_ctrl
[j
].
2022 ccb
->rcb
[1]->rxq
->rx_packets
;
2024 bnad
->rx_info
[i
].rx_ctrl
[j
].
2025 ccb
->rcb
[1]->rxq
->rx_bytes
;
2030 for (i
= 0; i
< bnad
->num_tx
; i
++) {
2031 for (j
= 0; j
< bnad
->num_txq_per_tx
; j
++) {
2032 if (bnad
->tx_info
[i
].tcb
[j
]) {
2033 stats
->tx_packets
+=
2034 bnad
->tx_info
[i
].tcb
[j
]->txq
->tx_packets
;
2036 bnad
->tx_info
[i
].tcb
[j
]->txq
->tx_bytes
;
2043 * Must be called with the bna_lock held.
2046 bnad_netdev_hwstats_fill(struct bnad
*bnad
, struct rtnl_link_stats64
*stats
)
2048 struct bfi_ll_stats_mac
*mac_stats
;
2052 mac_stats
= &bnad
->stats
.bna_stats
->hw_stats
->mac_stats
;
2054 mac_stats
->rx_fcs_error
+ mac_stats
->rx_alignment_error
+
2055 mac_stats
->rx_frame_length_error
+ mac_stats
->rx_code_error
+
2056 mac_stats
->rx_undersize
;
2057 stats
->tx_errors
= mac_stats
->tx_fcs_error
+
2058 mac_stats
->tx_undersize
;
2059 stats
->rx_dropped
= mac_stats
->rx_drop
;
2060 stats
->tx_dropped
= mac_stats
->tx_drop
;
2061 stats
->multicast
= mac_stats
->rx_multicast
;
2062 stats
->collisions
= mac_stats
->tx_total_collision
;
2064 stats
->rx_length_errors
= mac_stats
->rx_frame_length_error
;
2066 /* receive ring buffer overflow ?? */
2068 stats
->rx_crc_errors
= mac_stats
->rx_fcs_error
;
2069 stats
->rx_frame_errors
= mac_stats
->rx_alignment_error
;
2070 /* recv'r fifo overrun */
2071 bmap
= (u64
)bnad
->stats
.bna_stats
->rxf_bmap
[0] |
2072 ((u64
)bnad
->stats
.bna_stats
->rxf_bmap
[1] << 32);
2073 for (i
= 0; bmap
&& (i
< BFI_LL_RXF_ID_MAX
); i
++) {
2075 stats
->rx_fifo_errors
+=
2076 bnad
->stats
.bna_stats
->
2077 hw_stats
->rxf_stats
[i
].frame_drops
;
2085 bnad_mbox_irq_sync(struct bnad
*bnad
)
2088 unsigned long flags
;
2090 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2091 if (bnad
->cfg_flags
& BNAD_CF_MSIX
)
2092 irq
= bnad
->msix_table
[bnad
->msix_num
- 1].vector
;
2094 irq
= bnad
->pcidev
->irq
;
2095 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2097 synchronize_irq(irq
);
2100 /* Utility used by bnad_start_xmit, for doing TSO */
2102 bnad_tso_prepare(struct bnad
*bnad
, struct sk_buff
*skb
)
2106 /* SKB_GSO_TCPV4 and SKB_GSO_TCPV6 is defined since 2.6.18. */
2107 BUG_ON(!(skb_shinfo(skb
)->gso_type
== SKB_GSO_TCPV4
||
2108 skb_shinfo(skb
)->gso_type
== SKB_GSO_TCPV6
));
2109 if (skb_header_cloned(skb
)) {
2110 err
= pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
);
2112 BNAD_UPDATE_CTR(bnad
, tso_err
);
2118 * For TSO, the TCP checksum field is seeded with pseudo-header sum
2119 * excluding the length field.
2121 if (skb
->protocol
== htons(ETH_P_IP
)) {
2122 struct iphdr
*iph
= ip_hdr(skb
);
2124 /* Do we really need these? */
2128 tcp_hdr(skb
)->check
=
2129 ~csum_tcpudp_magic(iph
->saddr
, iph
->daddr
, 0,
2131 BNAD_UPDATE_CTR(bnad
, tso4
);
2133 struct ipv6hdr
*ipv6h
= ipv6_hdr(skb
);
2135 BUG_ON(!(skb
->protocol
== htons(ETH_P_IPV6
)));
2136 ipv6h
->payload_len
= 0;
2137 tcp_hdr(skb
)->check
=
2138 ~csum_ipv6_magic(&ipv6h
->saddr
, &ipv6h
->daddr
, 0,
2140 BNAD_UPDATE_CTR(bnad
, tso6
);
2147 * Initialize Q numbers depending on Rx Paths
2148 * Called with bnad->bna_lock held, because of cfg_flags
2152 bnad_q_num_init(struct bnad
*bnad
)
2156 rxps
= min((uint
)num_online_cpus(),
2157 (uint
)(BNAD_MAX_RXS
* BNAD_MAX_RXPS_PER_RX
));
2159 if (!(bnad
->cfg_flags
& BNAD_CF_MSIX
))
2160 rxps
= 1; /* INTx */
2164 bnad
->num_rxp_per_rx
= rxps
;
2165 bnad
->num_txq_per_tx
= BNAD_TXQ_NUM
;
2169 * Adjusts the Q numbers, given a number of msix vectors
2170 * Give preference to RSS as opposed to Tx priority Queues,
2171 * in such a case, just use 1 Tx Q
2172 * Called with bnad->bna_lock held b'cos of cfg_flags access
2175 bnad_q_num_adjust(struct bnad
*bnad
, int msix_vectors
)
2177 bnad
->num_txq_per_tx
= 1;
2178 if ((msix_vectors
>= (bnad
->num_tx
* bnad
->num_txq_per_tx
) +
2179 bnad_rxqs_per_cq
+ BNAD_MAILBOX_MSIX_VECTORS
) &&
2180 (bnad
->cfg_flags
& BNAD_CF_MSIX
)) {
2181 bnad
->num_rxp_per_rx
= msix_vectors
-
2182 (bnad
->num_tx
* bnad
->num_txq_per_tx
) -
2183 BNAD_MAILBOX_MSIX_VECTORS
;
2185 bnad
->num_rxp_per_rx
= 1;
2188 /* Enable / disable device */
2190 bnad_device_disable(struct bnad
*bnad
)
2192 unsigned long flags
;
2194 init_completion(&bnad
->bnad_completions
.ioc_comp
);
2196 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2197 bna_device_disable(&bnad
->bna
.device
, BNA_HARD_CLEANUP
);
2198 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2200 wait_for_completion(&bnad
->bnad_completions
.ioc_comp
);
2204 bnad_device_enable(struct bnad
*bnad
)
2207 unsigned long flags
;
2209 init_completion(&bnad
->bnad_completions
.ioc_comp
);
2211 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2212 bna_device_enable(&bnad
->bna
.device
);
2213 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2215 wait_for_completion(&bnad
->bnad_completions
.ioc_comp
);
2217 if (bnad
->bnad_completions
.ioc_comp_status
)
2218 err
= bnad
->bnad_completions
.ioc_comp_status
;
2223 /* Free BNA resources */
2225 bnad_res_free(struct bnad
*bnad
)
2228 struct bna_res_info
*res_info
= &bnad
->res_info
[0];
2230 for (i
= 0; i
< BNA_RES_T_MAX
; i
++) {
2231 if (res_info
[i
].res_type
== BNA_RES_T_MEM
)
2232 bnad_mem_free(bnad
, &res_info
[i
].res_u
.mem_info
);
2234 bnad_mbox_irq_free(bnad
, &res_info
[i
].res_u
.intr_info
);
2238 /* Allocates memory and interrupt resources for BNA */
2240 bnad_res_alloc(struct bnad
*bnad
)
2243 struct bna_res_info
*res_info
= &bnad
->res_info
[0];
2245 for (i
= 0; i
< BNA_RES_T_MAX
; i
++) {
2246 if (res_info
[i
].res_type
== BNA_RES_T_MEM
)
2247 err
= bnad_mem_alloc(bnad
, &res_info
[i
].res_u
.mem_info
);
2249 err
= bnad_mbox_irq_alloc(bnad
,
2250 &res_info
[i
].res_u
.intr_info
);
2257 bnad_res_free(bnad
);
2261 /* Interrupt enable / disable */
2263 bnad_enable_msix(struct bnad
*bnad
)
2266 unsigned long flags
;
2268 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2269 if (!(bnad
->cfg_flags
& BNAD_CF_MSIX
)) {
2270 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2273 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2275 if (bnad
->msix_table
)
2279 kcalloc(bnad
->msix_num
, sizeof(struct msix_entry
), GFP_KERNEL
);
2281 if (!bnad
->msix_table
)
2284 for (i
= 0; i
< bnad
->msix_num
; i
++)
2285 bnad
->msix_table
[i
].entry
= i
;
2287 ret
= pci_enable_msix(bnad
->pcidev
, bnad
->msix_table
, bnad
->msix_num
);
2289 /* Not enough MSI-X vectors. */
2291 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2292 /* ret = #of vectors that we got */
2293 bnad_q_num_adjust(bnad
, ret
);
2294 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2296 bnad
->msix_num
= (bnad
->num_tx
* bnad
->num_txq_per_tx
)
2298 * bnad
->num_rxp_per_rx
) +
2299 BNAD_MAILBOX_MSIX_VECTORS
;
2301 /* Try once more with adjusted numbers */
2302 /* If this fails, fall back to INTx */
2303 ret
= pci_enable_msix(bnad
->pcidev
, bnad
->msix_table
,
2314 kfree(bnad
->msix_table
);
2315 bnad
->msix_table
= NULL
;
2317 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2318 bnad
->cfg_flags
&= ~BNAD_CF_MSIX
;
2319 bnad_q_num_init(bnad
);
2320 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2324 bnad_disable_msix(struct bnad
*bnad
)
2327 unsigned long flags
;
2329 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2330 cfg_flags
= bnad
->cfg_flags
;
2331 if (bnad
->cfg_flags
& BNAD_CF_MSIX
)
2332 bnad
->cfg_flags
&= ~BNAD_CF_MSIX
;
2333 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2335 if (cfg_flags
& BNAD_CF_MSIX
) {
2336 pci_disable_msix(bnad
->pcidev
);
2337 kfree(bnad
->msix_table
);
2338 bnad
->msix_table
= NULL
;
2342 /* Netdev entry points */
2344 bnad_open(struct net_device
*netdev
)
2347 struct bnad
*bnad
= netdev_priv(netdev
);
2348 struct bna_pause_config pause_config
;
2350 unsigned long flags
;
2352 mutex_lock(&bnad
->conf_mutex
);
2355 err
= bnad_setup_tx(bnad
, 0);
2360 err
= bnad_setup_rx(bnad
, 0);
2365 pause_config
.tx_pause
= 0;
2366 pause_config
.rx_pause
= 0;
2368 mtu
= ETH_HLEN
+ bnad
->netdev
->mtu
+ ETH_FCS_LEN
;
2370 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2371 bna_port_mtu_set(&bnad
->bna
.port
, mtu
, NULL
);
2372 bna_port_pause_config(&bnad
->bna
.port
, &pause_config
, NULL
);
2373 bna_port_enable(&bnad
->bna
.port
);
2374 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2376 /* Enable broadcast */
2377 bnad_enable_default_bcast(bnad
);
2379 /* Restore VLANs, if any */
2380 bnad_restore_vlans(bnad
, 0);
2382 /* Set the UCAST address */
2383 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2384 bnad_mac_addr_set_locked(bnad
, netdev
->dev_addr
);
2385 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2387 /* Start the stats timer */
2388 bnad_stats_timer_start(bnad
);
2390 mutex_unlock(&bnad
->conf_mutex
);
2395 bnad_cleanup_tx(bnad
, 0);
2398 mutex_unlock(&bnad
->conf_mutex
);
2403 bnad_stop(struct net_device
*netdev
)
2405 struct bnad
*bnad
= netdev_priv(netdev
);
2406 unsigned long flags
;
2408 mutex_lock(&bnad
->conf_mutex
);
2410 /* Stop the stats timer */
2411 bnad_stats_timer_stop(bnad
);
2413 init_completion(&bnad
->bnad_completions
.port_comp
);
2415 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2416 bna_port_disable(&bnad
->bna
.port
, BNA_HARD_CLEANUP
,
2417 bnad_cb_port_disabled
);
2418 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2420 wait_for_completion(&bnad
->bnad_completions
.port_comp
);
2422 bnad_cleanup_tx(bnad
, 0);
2423 bnad_cleanup_rx(bnad
, 0);
2425 /* Synchronize mailbox IRQ */
2426 bnad_mbox_irq_sync(bnad
);
2428 mutex_unlock(&bnad
->conf_mutex
);
2435 * bnad_start_xmit : Netdev entry point for Transmit
2436 * Called under lock held by net_device
2439 bnad_start_xmit(struct sk_buff
*skb
, struct net_device
*netdev
)
2441 struct bnad
*bnad
= netdev_priv(netdev
);
2443 u16 txq_prod
, vlan_tag
= 0;
2444 u32 unmap_prod
, wis
, wis_used
, wi_range
;
2445 u32 vectors
, vect_id
, i
, acked
;
2449 struct bnad_tx_info
*tx_info
;
2450 struct bna_tcb
*tcb
;
2451 struct bnad_unmap_q
*unmap_q
;
2452 dma_addr_t dma_addr
;
2453 struct bna_txq_entry
*txqent
;
2454 bna_txq_wi_ctrl_flag_t flags
;
2457 (skb
->len
<= ETH_HLEN
|| skb
->len
> BFI_TX_MAX_DATA_PER_PKT
)) {
2459 return NETDEV_TX_OK
;
2464 tx_info
= &bnad
->tx_info
[tx_id
];
2465 tcb
= tx_info
->tcb
[tx_id
];
2466 unmap_q
= tcb
->unmap_q
;
2469 * Takes care of the Tx that is scheduled between clearing the flag
2470 * and the netif_stop_queue() call.
2472 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED
, &tcb
->flags
))) {
2474 return NETDEV_TX_OK
;
2477 vectors
= 1 + skb_shinfo(skb
)->nr_frags
;
2478 if (vectors
> BFI_TX_MAX_VECTORS_PER_PKT
) {
2480 return NETDEV_TX_OK
;
2482 wis
= BNA_TXQ_WI_NEEDED(vectors
); /* 4 vectors per work item */
2485 (wis
> BNA_QE_FREE_CNT(tcb
, tcb
->q_depth
) ||
2486 vectors
> BNA_QE_FREE_CNT(unmap_q
, unmap_q
->q_depth
))) {
2487 if ((u16
) (*tcb
->hw_consumer_index
) !=
2488 tcb
->consumer_index
&&
2489 !test_and_set_bit(BNAD_TXQ_FREE_SENT
, &tcb
->flags
)) {
2490 acked
= bnad_free_txbufs(bnad
, tcb
);
2491 if (likely(test_bit(BNAD_TXQ_TX_STARTED
, &tcb
->flags
)))
2492 bna_ib_ack(tcb
->i_dbell
, acked
);
2493 smp_mb__before_clear_bit();
2494 clear_bit(BNAD_TXQ_FREE_SENT
, &tcb
->flags
);
2496 netif_stop_queue(netdev
);
2497 BNAD_UPDATE_CTR(bnad
, netif_queue_stop
);
2502 * Check again to deal with race condition between
2503 * netif_stop_queue here, and netif_wake_queue in
2504 * interrupt handler which is not inside netif tx lock.
2507 (wis
> BNA_QE_FREE_CNT(tcb
, tcb
->q_depth
) ||
2508 vectors
> BNA_QE_FREE_CNT(unmap_q
, unmap_q
->q_depth
))) {
2509 BNAD_UPDATE_CTR(bnad
, netif_queue_stop
);
2510 return NETDEV_TX_BUSY
;
2512 netif_wake_queue(netdev
);
2513 BNAD_UPDATE_CTR(bnad
, netif_queue_wakeup
);
2517 unmap_prod
= unmap_q
->producer_index
;
2522 txq_prod
= tcb
->producer_index
;
2523 BNA_TXQ_QPGE_PTR_GET(txq_prod
, tcb
->sw_qpt
, txqent
, wi_range
);
2524 BUG_ON(!(wi_range
<= tcb
->q_depth
));
2525 txqent
->hdr
.wi
.reserved
= 0;
2526 txqent
->hdr
.wi
.num_vectors
= vectors
;
2527 txqent
->hdr
.wi
.opcode
=
2528 htons((skb_is_gso(skb
) ? BNA_TXQ_WI_SEND_LSO
:
2531 if (vlan_tx_tag_present(skb
)) {
2532 vlan_tag
= (u16
) vlan_tx_tag_get(skb
);
2533 flags
|= (BNA_TXQ_WI_CF_INS_PRIO
| BNA_TXQ_WI_CF_INS_VLAN
);
2535 if (test_bit(BNAD_RF_CEE_RUNNING
, &bnad
->run_flags
)) {
2537 (tcb
->priority
& 0x7) << 13 | (vlan_tag
& 0x1fff);
2538 flags
|= (BNA_TXQ_WI_CF_INS_PRIO
| BNA_TXQ_WI_CF_INS_VLAN
);
2541 txqent
->hdr
.wi
.vlan_tag
= htons(vlan_tag
);
2543 if (skb_is_gso(skb
)) {
2544 err
= bnad_tso_prepare(bnad
, skb
);
2547 return NETDEV_TX_OK
;
2549 txqent
->hdr
.wi
.lso_mss
= htons(skb_is_gso(skb
));
2550 flags
|= (BNA_TXQ_WI_CF_IP_CKSUM
| BNA_TXQ_WI_CF_TCP_CKSUM
);
2551 txqent
->hdr
.wi
.l4_hdr_size_n_offset
=
2552 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2553 (tcp_hdrlen(skb
) >> 2,
2554 skb_transport_offset(skb
)));
2555 } else if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
2558 txqent
->hdr
.wi
.lso_mss
= 0;
2560 if (skb
->protocol
== htons(ETH_P_IP
))
2561 proto
= ip_hdr(skb
)->protocol
;
2562 else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
2563 /* nexthdr may not be TCP immediately. */
2564 proto
= ipv6_hdr(skb
)->nexthdr
;
2566 if (proto
== IPPROTO_TCP
) {
2567 flags
|= BNA_TXQ_WI_CF_TCP_CKSUM
;
2568 txqent
->hdr
.wi
.l4_hdr_size_n_offset
=
2569 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2570 (0, skb_transport_offset(skb
)));
2572 BNAD_UPDATE_CTR(bnad
, tcpcsum_offload
);
2574 BUG_ON(!(skb_headlen(skb
) >=
2575 skb_transport_offset(skb
) + tcp_hdrlen(skb
)));
2577 } else if (proto
== IPPROTO_UDP
) {
2578 flags
|= BNA_TXQ_WI_CF_UDP_CKSUM
;
2579 txqent
->hdr
.wi
.l4_hdr_size_n_offset
=
2580 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2581 (0, skb_transport_offset(skb
)));
2583 BNAD_UPDATE_CTR(bnad
, udpcsum_offload
);
2585 BUG_ON(!(skb_headlen(skb
) >=
2586 skb_transport_offset(skb
) +
2587 sizeof(struct udphdr
)));
2589 err
= skb_checksum_help(skb
);
2590 BNAD_UPDATE_CTR(bnad
, csum_help
);
2593 BNAD_UPDATE_CTR(bnad
, csum_help_err
);
2594 return NETDEV_TX_OK
;
2598 txqent
->hdr
.wi
.lso_mss
= 0;
2599 txqent
->hdr
.wi
.l4_hdr_size_n_offset
= 0;
2602 txqent
->hdr
.wi
.flags
= htons(flags
);
2604 txqent
->hdr
.wi
.frame_length
= htonl(skb
->len
);
2606 unmap_q
->unmap_array
[unmap_prod
].skb
= skb
;
2607 BUG_ON(!(skb_headlen(skb
) <= BFI_TX_MAX_DATA_PER_VECTOR
));
2608 txqent
->vector
[vect_id
].length
= htons(skb_headlen(skb
));
2609 dma_addr
= dma_map_single(&bnad
->pcidev
->dev
, skb
->data
,
2610 skb_headlen(skb
), DMA_TO_DEVICE
);
2611 dma_unmap_addr_set(&unmap_q
->unmap_array
[unmap_prod
], dma_addr
,
2614 BNA_SET_DMA_ADDR(dma_addr
, &txqent
->vector
[vect_id
].host_addr
);
2615 BNA_QE_INDX_ADD(unmap_prod
, 1, unmap_q
->q_depth
);
2617 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
2618 struct skb_frag_struct
*frag
= &skb_shinfo(skb
)->frags
[i
];
2619 u32 size
= frag
->size
;
2621 if (++vect_id
== BFI_TX_MAX_VECTORS_PER_WI
) {
2626 BNA_QE_INDX_ADD(txq_prod
, wis_used
,
2629 BNA_TXQ_QPGE_PTR_GET(txq_prod
, tcb
->sw_qpt
,
2631 BUG_ON(!(wi_range
<= tcb
->q_depth
));
2634 txqent
->hdr
.wi_ext
.opcode
= htons(BNA_TXQ_WI_EXTENSION
);
2637 BUG_ON(!(size
<= BFI_TX_MAX_DATA_PER_VECTOR
));
2638 txqent
->vector
[vect_id
].length
= htons(size
);
2639 dma_addr
= dma_map_page(&bnad
->pcidev
->dev
, frag
->page
,
2640 frag
->page_offset
, size
, DMA_TO_DEVICE
);
2641 dma_unmap_addr_set(&unmap_q
->unmap_array
[unmap_prod
], dma_addr
,
2643 BNA_SET_DMA_ADDR(dma_addr
, &txqent
->vector
[vect_id
].host_addr
);
2644 BNA_QE_INDX_ADD(unmap_prod
, 1, unmap_q
->q_depth
);
2647 unmap_q
->producer_index
= unmap_prod
;
2648 BNA_QE_INDX_ADD(txq_prod
, wis_used
, tcb
->q_depth
);
2649 tcb
->producer_index
= txq_prod
;
2653 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED
, &tcb
->flags
)))
2654 return NETDEV_TX_OK
;
2656 bna_txq_prod_indx_doorbell(tcb
);
2658 if ((u16
) (*tcb
->hw_consumer_index
) != tcb
->consumer_index
)
2659 tasklet_schedule(&bnad
->tx_free_tasklet
);
2661 return NETDEV_TX_OK
;
2665 * Used spin_lock to synchronize reading of stats structures, which
2666 * is written by BNA under the same lock.
2668 static struct rtnl_link_stats64
*
2669 bnad_get_stats64(struct net_device
*netdev
, struct rtnl_link_stats64
*stats
)
2671 struct bnad
*bnad
= netdev_priv(netdev
);
2672 unsigned long flags
;
2674 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2676 bnad_netdev_qstats_fill(bnad
, stats
);
2677 bnad_netdev_hwstats_fill(bnad
, stats
);
2679 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2685 bnad_set_rx_mode(struct net_device
*netdev
)
2687 struct bnad
*bnad
= netdev_priv(netdev
);
2688 u32 new_mask
, valid_mask
;
2689 unsigned long flags
;
2691 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2693 new_mask
= valid_mask
= 0;
2695 if (netdev
->flags
& IFF_PROMISC
) {
2696 if (!(bnad
->cfg_flags
& BNAD_CF_PROMISC
)) {
2697 new_mask
= BNAD_RXMODE_PROMISC_DEFAULT
;
2698 valid_mask
= BNAD_RXMODE_PROMISC_DEFAULT
;
2699 bnad
->cfg_flags
|= BNAD_CF_PROMISC
;
2702 if (bnad
->cfg_flags
& BNAD_CF_PROMISC
) {
2703 new_mask
= ~BNAD_RXMODE_PROMISC_DEFAULT
;
2704 valid_mask
= BNAD_RXMODE_PROMISC_DEFAULT
;
2705 bnad
->cfg_flags
&= ~BNAD_CF_PROMISC
;
2709 if (netdev
->flags
& IFF_ALLMULTI
) {
2710 if (!(bnad
->cfg_flags
& BNAD_CF_ALLMULTI
)) {
2711 new_mask
|= BNA_RXMODE_ALLMULTI
;
2712 valid_mask
|= BNA_RXMODE_ALLMULTI
;
2713 bnad
->cfg_flags
|= BNAD_CF_ALLMULTI
;
2716 if (bnad
->cfg_flags
& BNAD_CF_ALLMULTI
) {
2717 new_mask
&= ~BNA_RXMODE_ALLMULTI
;
2718 valid_mask
|= BNA_RXMODE_ALLMULTI
;
2719 bnad
->cfg_flags
&= ~BNAD_CF_ALLMULTI
;
2723 bna_rx_mode_set(bnad
->rx_info
[0].rx
, new_mask
, valid_mask
, NULL
);
2725 if (!netdev_mc_empty(netdev
)) {
2727 int mc_count
= netdev_mc_count(netdev
);
2729 /* Index 0 holds the broadcast address */
2731 kzalloc((mc_count
+ 1) * ETH_ALEN
,
2736 memcpy(&mcaddr_list
[0], &bnad_bcast_addr
[0], ETH_ALEN
);
2738 /* Copy rest of the MC addresses */
2739 bnad_netdev_mc_list_get(netdev
, mcaddr_list
);
2741 bna_rx_mcast_listset(bnad
->rx_info
[0].rx
, mc_count
+ 1,
2744 /* Should we enable BNAD_CF_ALLMULTI for err != 0 ? */
2748 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2752 * bna_lock is used to sync writes to netdev->addr
2753 * conf_lock cannot be used since this call may be made
2754 * in a non-blocking context.
2757 bnad_set_mac_address(struct net_device
*netdev
, void *mac_addr
)
2760 struct bnad
*bnad
= netdev_priv(netdev
);
2761 struct sockaddr
*sa
= (struct sockaddr
*)mac_addr
;
2762 unsigned long flags
;
2764 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2766 err
= bnad_mac_addr_set_locked(bnad
, sa
->sa_data
);
2769 memcpy(netdev
->dev_addr
, sa
->sa_data
, netdev
->addr_len
);
2771 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2777 bnad_change_mtu(struct net_device
*netdev
, int new_mtu
)
2780 unsigned long flags
;
2782 struct bnad
*bnad
= netdev_priv(netdev
);
2784 if (new_mtu
+ ETH_HLEN
< ETH_ZLEN
|| new_mtu
> BNAD_JUMBO_MTU
)
2787 mutex_lock(&bnad
->conf_mutex
);
2789 netdev
->mtu
= new_mtu
;
2791 mtu
= ETH_HLEN
+ new_mtu
+ ETH_FCS_LEN
;
2793 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2794 bna_port_mtu_set(&bnad
->bna
.port
, mtu
, NULL
);
2795 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2797 mutex_unlock(&bnad
->conf_mutex
);
2802 bnad_vlan_rx_register(struct net_device
*netdev
,
2803 struct vlan_group
*vlan_grp
)
2805 struct bnad
*bnad
= netdev_priv(netdev
);
2807 mutex_lock(&bnad
->conf_mutex
);
2808 bnad
->vlan_grp
= vlan_grp
;
2809 mutex_unlock(&bnad
->conf_mutex
);
2813 bnad_vlan_rx_add_vid(struct net_device
*netdev
,
2816 struct bnad
*bnad
= netdev_priv(netdev
);
2817 unsigned long flags
;
2819 if (!bnad
->rx_info
[0].rx
)
2822 mutex_lock(&bnad
->conf_mutex
);
2824 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2825 bna_rx_vlan_add(bnad
->rx_info
[0].rx
, vid
);
2826 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2828 mutex_unlock(&bnad
->conf_mutex
);
2832 bnad_vlan_rx_kill_vid(struct net_device
*netdev
,
2835 struct bnad
*bnad
= netdev_priv(netdev
);
2836 unsigned long flags
;
2838 if (!bnad
->rx_info
[0].rx
)
2841 mutex_lock(&bnad
->conf_mutex
);
2843 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2844 bna_rx_vlan_del(bnad
->rx_info
[0].rx
, vid
);
2845 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2847 mutex_unlock(&bnad
->conf_mutex
);
2850 #ifdef CONFIG_NET_POLL_CONTROLLER
2852 bnad_netpoll(struct net_device
*netdev
)
2854 struct bnad
*bnad
= netdev_priv(netdev
);
2855 struct bnad_rx_info
*rx_info
;
2856 struct bnad_rx_ctrl
*rx_ctrl
;
2860 if (!(bnad
->cfg_flags
& BNAD_CF_MSIX
)) {
2861 bna_intx_disable(&bnad
->bna
, curr_mask
);
2862 bnad_isr(bnad
->pcidev
->irq
, netdev
);
2863 bna_intx_enable(&bnad
->bna
, curr_mask
);
2865 for (i
= 0; i
< bnad
->num_rx
; i
++) {
2866 rx_info
= &bnad
->rx_info
[i
];
2869 for (j
= 0; j
< bnad
->num_rxp_per_rx
; j
++) {
2870 rx_ctrl
= &rx_info
->rx_ctrl
[j
];
2872 bnad_disable_rx_irq(bnad
,
2874 bnad_netif_rx_schedule_poll(bnad
,
2883 static const struct net_device_ops bnad_netdev_ops
= {
2884 .ndo_open
= bnad_open
,
2885 .ndo_stop
= bnad_stop
,
2886 .ndo_start_xmit
= bnad_start_xmit
,
2887 .ndo_get_stats64
= bnad_get_stats64
,
2888 .ndo_set_rx_mode
= bnad_set_rx_mode
,
2889 .ndo_set_multicast_list
= bnad_set_rx_mode
,
2890 .ndo_validate_addr
= eth_validate_addr
,
2891 .ndo_set_mac_address
= bnad_set_mac_address
,
2892 .ndo_change_mtu
= bnad_change_mtu
,
2893 .ndo_vlan_rx_register
= bnad_vlan_rx_register
,
2894 .ndo_vlan_rx_add_vid
= bnad_vlan_rx_add_vid
,
2895 .ndo_vlan_rx_kill_vid
= bnad_vlan_rx_kill_vid
,
2896 #ifdef CONFIG_NET_POLL_CONTROLLER
2897 .ndo_poll_controller
= bnad_netpoll
2902 bnad_netdev_init(struct bnad
*bnad
, bool using_dac
)
2904 struct net_device
*netdev
= bnad
->netdev
;
2906 netdev
->hw_features
= NETIF_F_SG
| NETIF_F_RXCSUM
|
2907 NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
2908 NETIF_F_TSO
| NETIF_F_TSO6
| NETIF_F_HW_VLAN_TX
;
2910 netdev
->vlan_features
= NETIF_F_SG
| NETIF_F_HIGHDMA
|
2911 NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
2912 NETIF_F_TSO
| NETIF_F_TSO6
;
2914 netdev
->features
|= netdev
->hw_features
|
2915 NETIF_F_HW_VLAN_RX
| NETIF_F_HW_VLAN_FILTER
;
2918 netdev
->features
|= NETIF_F_HIGHDMA
;
2920 netdev
->mem_start
= bnad
->mmio_start
;
2921 netdev
->mem_end
= bnad
->mmio_start
+ bnad
->mmio_len
- 1;
2923 netdev
->netdev_ops
= &bnad_netdev_ops
;
2924 bnad_set_ethtool_ops(netdev
);
2928 * 1. Initialize the bnad structure
2929 * 2. Setup netdev pointer in pci_dev
2930 * 3. Initialze Tx free tasklet
2931 * 4. Initialize no. of TxQ & CQs & MSIX vectors
2934 bnad_init(struct bnad
*bnad
,
2935 struct pci_dev
*pdev
, struct net_device
*netdev
)
2937 unsigned long flags
;
2939 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
2940 pci_set_drvdata(pdev
, netdev
);
2942 bnad
->netdev
= netdev
;
2943 bnad
->pcidev
= pdev
;
2944 bnad
->mmio_start
= pci_resource_start(pdev
, 0);
2945 bnad
->mmio_len
= pci_resource_len(pdev
, 0);
2946 bnad
->bar0
= ioremap_nocache(bnad
->mmio_start
, bnad
->mmio_len
);
2948 dev_err(&pdev
->dev
, "ioremap for bar0 failed\n");
2949 pci_set_drvdata(pdev
, NULL
);
2952 pr_info("bar0 mapped to %p, len %llu\n", bnad
->bar0
,
2953 (unsigned long long) bnad
->mmio_len
);
2955 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2956 if (!bnad_msix_disable
)
2957 bnad
->cfg_flags
= BNAD_CF_MSIX
;
2959 bnad
->cfg_flags
|= BNAD_CF_DIM_ENABLED
;
2961 bnad_q_num_init(bnad
);
2962 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2964 bnad
->msix_num
= (bnad
->num_tx
* bnad
->num_txq_per_tx
) +
2965 (bnad
->num_rx
* bnad
->num_rxp_per_rx
) +
2966 BNAD_MAILBOX_MSIX_VECTORS
;
2968 bnad
->txq_depth
= BNAD_TXQ_DEPTH
;
2969 bnad
->rxq_depth
= BNAD_RXQ_DEPTH
;
2971 bnad
->tx_coalescing_timeo
= BFI_TX_COALESCING_TIMEO
;
2972 bnad
->rx_coalescing_timeo
= BFI_RX_COALESCING_TIMEO
;
2974 tasklet_init(&bnad
->tx_free_tasklet
, bnad_tx_free_tasklet
,
2975 (unsigned long)bnad
);
2981 * Must be called after bnad_pci_uninit()
2982 * so that iounmap() and pci_set_drvdata(NULL)
2983 * happens only after PCI uninitialization.
2986 bnad_uninit(struct bnad
*bnad
)
2989 iounmap(bnad
->bar0
);
2990 pci_set_drvdata(bnad
->pcidev
, NULL
);
2995 a) Per device mutes used for serializing configuration
2996 changes from OS interface
2997 b) spin lock used to protect bna state machine
3000 bnad_lock_init(struct bnad
*bnad
)
3002 spin_lock_init(&bnad
->bna_lock
);
3003 mutex_init(&bnad
->conf_mutex
);
3007 bnad_lock_uninit(struct bnad
*bnad
)
3009 mutex_destroy(&bnad
->conf_mutex
);
3012 /* PCI Initialization */
3014 bnad_pci_init(struct bnad
*bnad
,
3015 struct pci_dev
*pdev
, bool *using_dac
)
3019 err
= pci_enable_device(pdev
);
3022 err
= pci_request_regions(pdev
, BNAD_NAME
);
3024 goto disable_device
;
3025 if (!dma_set_mask(&pdev
->dev
, DMA_BIT_MASK(64)) &&
3026 !dma_set_coherent_mask(&pdev
->dev
, DMA_BIT_MASK(64))) {
3029 err
= dma_set_mask(&pdev
->dev
, DMA_BIT_MASK(32));
3031 err
= dma_set_coherent_mask(&pdev
->dev
,
3034 goto release_regions
;
3038 pci_set_master(pdev
);
3042 pci_release_regions(pdev
);
3044 pci_disable_device(pdev
);
3050 bnad_pci_uninit(struct pci_dev
*pdev
)
3052 pci_release_regions(pdev
);
3053 pci_disable_device(pdev
);
3056 static int __devinit
3057 bnad_pci_probe(struct pci_dev
*pdev
,
3058 const struct pci_device_id
*pcidev_id
)
3060 bool using_dac
= false;
3064 struct net_device
*netdev
;
3065 struct bfa_pcidev pcidev_info
;
3066 unsigned long flags
;
3068 pr_info("bnad_pci_probe : (0x%p, 0x%p) PCI Func : (%d)\n",
3069 pdev
, pcidev_id
, PCI_FUNC(pdev
->devfn
));
3071 mutex_lock(&bnad_fwimg_mutex
);
3072 if (!cna_get_firmware_buf(pdev
)) {
3073 mutex_unlock(&bnad_fwimg_mutex
);
3074 pr_warn("Failed to load Firmware Image!\n");
3077 mutex_unlock(&bnad_fwimg_mutex
);
3080 * Allocates sizeof(struct net_device + struct bnad)
3081 * bnad = netdev->priv
3083 netdev
= alloc_etherdev(sizeof(struct bnad
));
3085 dev_err(&pdev
->dev
, "alloc_etherdev failed\n");
3089 bnad
= netdev_priv(netdev
);
3092 * PCI initialization
3093 * Output : using_dac = 1 for 64 bit DMA
3094 * = 0 for 32 bit DMA
3096 err
= bnad_pci_init(bnad
, pdev
, &using_dac
);
3100 bnad_lock_init(bnad
);
3102 * Initialize bnad structure
3103 * Setup relation between pci_dev & netdev
3104 * Init Tx free tasklet
3106 err
= bnad_init(bnad
, pdev
, netdev
);
3109 /* Initialize netdev structure, set up ethtool ops */
3110 bnad_netdev_init(bnad
, using_dac
);
3112 /* Set link to down state */
3113 netif_carrier_off(netdev
);
3115 bnad_enable_msix(bnad
);
3117 /* Get resource requirement form bna */
3118 bna_res_req(&bnad
->res_info
[0]);
3120 /* Allocate resources from bna */
3121 err
= bnad_res_alloc(bnad
);
3127 /* Setup pcidev_info for bna_init() */
3128 pcidev_info
.pci_slot
= PCI_SLOT(bnad
->pcidev
->devfn
);
3129 pcidev_info
.pci_func
= PCI_FUNC(bnad
->pcidev
->devfn
);
3130 pcidev_info
.device_id
= bnad
->pcidev
->device
;
3131 pcidev_info
.pci_bar_kva
= bnad
->bar0
;
3133 mutex_lock(&bnad
->conf_mutex
);
3135 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3136 bna_init(bna
, bnad
, &pcidev_info
, &bnad
->res_info
[0]);
3137 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3139 bnad
->stats
.bna_stats
= &bna
->stats
;
3142 setup_timer(&bnad
->bna
.device
.ioc
.ioc_timer
, bnad_ioc_timeout
,
3143 ((unsigned long)bnad
));
3144 setup_timer(&bnad
->bna
.device
.ioc
.hb_timer
, bnad_ioc_hb_check
,
3145 ((unsigned long)bnad
));
3146 setup_timer(&bnad
->bna
.device
.ioc
.iocpf_timer
, bnad_iocpf_timeout
,
3147 ((unsigned long)bnad
));
3148 setup_timer(&bnad
->bna
.device
.ioc
.sem_timer
, bnad_iocpf_sem_timeout
,
3149 ((unsigned long)bnad
));
3151 /* Now start the timer before calling IOC */
3152 mod_timer(&bnad
->bna
.device
.ioc
.iocpf_timer
,
3153 jiffies
+ msecs_to_jiffies(BNA_IOC_TIMER_FREQ
));
3157 * Don't care even if err != 0, bna state machine will
3160 err
= bnad_device_enable(bnad
);
3162 /* Get the burnt-in mac */
3163 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3164 bna_port_mac_get(&bna
->port
, &bnad
->perm_addr
);
3165 bnad_set_netdev_perm_addr(bnad
);
3166 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3168 mutex_unlock(&bnad
->conf_mutex
);
3170 /* Finally, reguister with net_device layer */
3171 err
= register_netdev(netdev
);
3173 pr_err("BNA : Registering with netdev failed\n");
3174 goto disable_device
;
3180 mutex_lock(&bnad
->conf_mutex
);
3181 bnad_device_disable(bnad
);
3182 del_timer_sync(&bnad
->bna
.device
.ioc
.ioc_timer
);
3183 del_timer_sync(&bnad
->bna
.device
.ioc
.sem_timer
);
3184 del_timer_sync(&bnad
->bna
.device
.ioc
.hb_timer
);
3185 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3187 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3188 mutex_unlock(&bnad
->conf_mutex
);
3190 bnad_res_free(bnad
);
3191 bnad_disable_msix(bnad
);
3193 bnad_pci_uninit(pdev
);
3194 bnad_lock_uninit(bnad
);
3197 free_netdev(netdev
);
3201 static void __devexit
3202 bnad_pci_remove(struct pci_dev
*pdev
)
3204 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3207 unsigned long flags
;
3212 pr_info("%s bnad_pci_remove\n", netdev
->name
);
3213 bnad
= netdev_priv(netdev
);
3216 unregister_netdev(netdev
);
3218 mutex_lock(&bnad
->conf_mutex
);
3219 bnad_device_disable(bnad
);
3220 del_timer_sync(&bnad
->bna
.device
.ioc
.ioc_timer
);
3221 del_timer_sync(&bnad
->bna
.device
.ioc
.sem_timer
);
3222 del_timer_sync(&bnad
->bna
.device
.ioc
.hb_timer
);
3223 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3225 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3226 mutex_unlock(&bnad
->conf_mutex
);
3228 bnad_res_free(bnad
);
3229 bnad_disable_msix(bnad
);
3230 bnad_pci_uninit(pdev
);
3231 bnad_lock_uninit(bnad
);
3233 free_netdev(netdev
);
3236 static const struct pci_device_id bnad_pci_id_table
[] = {
3238 PCI_DEVICE(PCI_VENDOR_ID_BROCADE
,
3239 PCI_DEVICE_ID_BROCADE_CT
),
3240 .class = PCI_CLASS_NETWORK_ETHERNET
<< 8,
3241 .class_mask
= 0xffff00
3245 MODULE_DEVICE_TABLE(pci
, bnad_pci_id_table
);
3247 static struct pci_driver bnad_pci_driver
= {
3249 .id_table
= bnad_pci_id_table
,
3250 .probe
= bnad_pci_probe
,
3251 .remove
= __devexit_p(bnad_pci_remove
),
3255 bnad_module_init(void)
3259 pr_info("Brocade 10G Ethernet driver\n");
3261 bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover
);
3263 err
= pci_register_driver(&bnad_pci_driver
);
3265 pr_err("bna : PCI registration failed in module init "
3274 bnad_module_exit(void)
3276 pci_unregister_driver(&bnad_pci_driver
);
3279 release_firmware(bfi_fw
);
3282 module_init(bnad_module_init
);
3283 module_exit(bnad_module_exit
);
3285 MODULE_AUTHOR("Brocade");
3286 MODULE_LICENSE("GPL");
3287 MODULE_DESCRIPTION("Brocade 10G PCIe Ethernet driver");
3288 MODULE_VERSION(BNAD_VERSION
);
3289 MODULE_FIRMWARE(CNA_FW_FILE_CT
);