Merge branch 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jwessel...
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / net / bna / bnad.c
blob7e839b9cec221758c69afe3871d7cd884207bb80
1 /*
2 * Linux network driver for Brocade Converged Network Adapter.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
18 #include <linux/netdevice.h>
19 #include <linux/skbuff.h>
20 #include <linux/etherdevice.h>
21 #include <linux/in.h>
22 #include <linux/ethtool.h>
23 #include <linux/if_vlan.h>
24 #include <linux/if_ether.h>
25 #include <linux/ip.h>
27 #include "bnad.h"
28 #include "bna.h"
29 #include "cna.h"
31 static DEFINE_MUTEX(bnad_fwimg_mutex);
34 * Module params
36 static uint bnad_msix_disable;
37 module_param(bnad_msix_disable, uint, 0444);
38 MODULE_PARM_DESC(bnad_msix_disable, "Disable MSIX mode");
40 static uint bnad_ioc_auto_recover = 1;
41 module_param(bnad_ioc_auto_recover, uint, 0444);
42 MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery");
45 * Global variables
47 u32 bnad_rxqs_per_cq = 2;
49 static const u8 bnad_bcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
52 * Local MACROS
54 #define BNAD_TX_UNMAPQ_DEPTH (bnad->txq_depth * 2)
56 #define BNAD_RX_UNMAPQ_DEPTH (bnad->rxq_depth)
58 #define BNAD_GET_MBOX_IRQ(_bnad) \
59 (((_bnad)->cfg_flags & BNAD_CF_MSIX) ? \
60 ((_bnad)->msix_table[(_bnad)->msix_num - 1].vector) : \
61 ((_bnad)->pcidev->irq))
63 #define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _depth) \
64 do { \
65 (_res_info)->res_type = BNA_RES_T_MEM; \
66 (_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA; \
67 (_res_info)->res_u.mem_info.num = (_num); \
68 (_res_info)->res_u.mem_info.len = \
69 sizeof(struct bnad_unmap_q) + \
70 (sizeof(struct bnad_skb_unmap) * ((_depth) - 1)); \
71 } while (0)
74 * Reinitialize completions in CQ, once Rx is taken down
76 static void
77 bnad_cq_cmpl_init(struct bnad *bnad, struct bna_ccb *ccb)
79 struct bna_cq_entry *cmpl, *next_cmpl;
80 unsigned int wi_range, wis = 0, ccb_prod = 0;
81 int i;
83 BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt, cmpl,
84 wi_range);
86 for (i = 0; i < ccb->q_depth; i++) {
87 wis++;
88 if (likely(--wi_range))
89 next_cmpl = cmpl + 1;
90 else {
91 BNA_QE_INDX_ADD(ccb_prod, wis, ccb->q_depth);
92 wis = 0;
93 BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt,
94 next_cmpl, wi_range);
96 cmpl->valid = 0;
97 cmpl = next_cmpl;
102 * Frees all pending Tx Bufs
103 * At this point no activity is expected on the Q,
104 * so DMA unmap & freeing is fine.
106 static void
107 bnad_free_all_txbufs(struct bnad *bnad,
108 struct bna_tcb *tcb)
110 u16 unmap_cons;
111 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
112 struct bnad_skb_unmap *unmap_array;
113 struct sk_buff *skb = NULL;
114 int i;
116 unmap_array = unmap_q->unmap_array;
118 unmap_cons = 0;
119 while (unmap_cons < unmap_q->q_depth) {
120 skb = unmap_array[unmap_cons].skb;
121 if (!skb) {
122 unmap_cons++;
123 continue;
125 unmap_array[unmap_cons].skb = NULL;
127 pci_unmap_single(bnad->pcidev,
128 pci_unmap_addr(&unmap_array[unmap_cons],
129 dma_addr), skb_headlen(skb),
130 PCI_DMA_TODEVICE);
132 pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
133 unmap_cons++;
134 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
135 pci_unmap_page(bnad->pcidev,
136 pci_unmap_addr(&unmap_array[unmap_cons],
137 dma_addr),
138 skb_shinfo(skb)->frags[i].size,
139 PCI_DMA_TODEVICE);
140 pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
142 unmap_cons++;
144 dev_kfree_skb_any(skb);
148 /* Data Path Handlers */
151 * bnad_free_txbufs : Frees the Tx bufs on Tx completion
152 * Can be called in a) Interrupt context
153 * b) Sending context
154 * c) Tasklet context
156 static u32
157 bnad_free_txbufs(struct bnad *bnad,
158 struct bna_tcb *tcb)
160 u32 sent_packets = 0, sent_bytes = 0;
161 u16 wis, unmap_cons, updated_hw_cons;
162 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
163 struct bnad_skb_unmap *unmap_array;
164 struct sk_buff *skb;
165 int i;
168 * Just return if TX is stopped. This check is useful
169 * when bnad_free_txbufs() runs out of a tasklet scheduled
170 * before bnad_cb_tx_cleanup() cleared BNAD_RF_TX_STARTED bit
171 * but this routine runs actually after the cleanup has been
172 * executed.
174 if (!test_bit(BNAD_RF_TX_STARTED, &bnad->run_flags))
175 return 0;
177 updated_hw_cons = *(tcb->hw_consumer_index);
179 wis = BNA_Q_INDEX_CHANGE(tcb->consumer_index,
180 updated_hw_cons, tcb->q_depth);
182 BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth)));
184 unmap_array = unmap_q->unmap_array;
185 unmap_cons = unmap_q->consumer_index;
187 prefetch(&unmap_array[unmap_cons + 1]);
188 while (wis) {
189 skb = unmap_array[unmap_cons].skb;
191 unmap_array[unmap_cons].skb = NULL;
193 sent_packets++;
194 sent_bytes += skb->len;
195 wis -= BNA_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags);
197 pci_unmap_single(bnad->pcidev,
198 pci_unmap_addr(&unmap_array[unmap_cons],
199 dma_addr), skb_headlen(skb),
200 PCI_DMA_TODEVICE);
201 pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
202 BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth);
204 prefetch(&unmap_array[unmap_cons + 1]);
205 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
206 prefetch(&unmap_array[unmap_cons + 1]);
208 pci_unmap_page(bnad->pcidev,
209 pci_unmap_addr(&unmap_array[unmap_cons],
210 dma_addr),
211 skb_shinfo(skb)->frags[i].size,
212 PCI_DMA_TODEVICE);
213 pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
215 BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth);
217 dev_kfree_skb_any(skb);
220 /* Update consumer pointers. */
221 tcb->consumer_index = updated_hw_cons;
222 unmap_q->consumer_index = unmap_cons;
224 tcb->txq->tx_packets += sent_packets;
225 tcb->txq->tx_bytes += sent_bytes;
227 return sent_packets;
230 /* Tx Free Tasklet function */
231 /* Frees for all the tcb's in all the Tx's */
233 * Scheduled from sending context, so that
234 * the fat Tx lock is not held for too long
235 * in the sending context.
237 static void
238 bnad_tx_free_tasklet(unsigned long bnad_ptr)
240 struct bnad *bnad = (struct bnad *)bnad_ptr;
241 struct bna_tcb *tcb;
242 u32 acked;
243 int i, j;
245 for (i = 0; i < bnad->num_tx; i++) {
246 for (j = 0; j < bnad->num_txq_per_tx; j++) {
247 tcb = bnad->tx_info[i].tcb[j];
248 if (!tcb)
249 continue;
250 if (((u16) (*tcb->hw_consumer_index) !=
251 tcb->consumer_index) &&
252 (!test_and_set_bit(BNAD_TXQ_FREE_SENT,
253 &tcb->flags))) {
254 acked = bnad_free_txbufs(bnad, tcb);
255 bna_ib_ack(tcb->i_dbell, acked);
256 smp_mb__before_clear_bit();
257 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
263 static u32
264 bnad_tx(struct bnad *bnad, struct bna_tcb *tcb)
266 struct net_device *netdev = bnad->netdev;
267 u32 sent;
269 if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
270 return 0;
272 sent = bnad_free_txbufs(bnad, tcb);
273 if (sent) {
274 if (netif_queue_stopped(netdev) &&
275 netif_carrier_ok(netdev) &&
276 BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
277 BNAD_NETIF_WAKE_THRESHOLD) {
278 netif_wake_queue(netdev);
279 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
281 bna_ib_ack(tcb->i_dbell, sent);
282 } else
283 bna_ib_ack(tcb->i_dbell, 0);
285 smp_mb__before_clear_bit();
286 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
288 return sent;
291 /* MSIX Tx Completion Handler */
292 static irqreturn_t
293 bnad_msix_tx(int irq, void *data)
295 struct bna_tcb *tcb = (struct bna_tcb *)data;
296 struct bnad *bnad = tcb->bnad;
298 bnad_tx(bnad, tcb);
300 return IRQ_HANDLED;
303 static void
304 bnad_reset_rcb(struct bnad *bnad, struct bna_rcb *rcb)
306 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
308 rcb->producer_index = 0;
309 rcb->consumer_index = 0;
311 unmap_q->producer_index = 0;
312 unmap_q->consumer_index = 0;
315 static void
316 bnad_free_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
318 struct bnad_unmap_q *unmap_q;
319 struct sk_buff *skb;
321 unmap_q = rcb->unmap_q;
322 while (BNA_QE_IN_USE_CNT(unmap_q, unmap_q->q_depth)) {
323 skb = unmap_q->unmap_array[unmap_q->consumer_index].skb;
324 BUG_ON(!(skb));
325 unmap_q->unmap_array[unmap_q->consumer_index].skb = NULL;
326 pci_unmap_single(bnad->pcidev, pci_unmap_addr(&unmap_q->
327 unmap_array[unmap_q->consumer_index],
328 dma_addr), rcb->rxq->buffer_size +
329 NET_IP_ALIGN, PCI_DMA_FROMDEVICE);
330 dev_kfree_skb(skb);
331 BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
332 BNA_QE_INDX_ADD(rcb->consumer_index, 1, rcb->q_depth);
335 bnad_reset_rcb(bnad, rcb);
338 static void
339 bnad_alloc_n_post_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
341 u16 to_alloc, alloced, unmap_prod, wi_range;
342 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
343 struct bnad_skb_unmap *unmap_array;
344 struct bna_rxq_entry *rxent;
345 struct sk_buff *skb;
346 dma_addr_t dma_addr;
348 alloced = 0;
349 to_alloc =
350 BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth);
352 unmap_array = unmap_q->unmap_array;
353 unmap_prod = unmap_q->producer_index;
355 BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent, wi_range);
357 while (to_alloc--) {
358 if (!wi_range) {
359 BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent,
360 wi_range);
362 skb = alloc_skb(rcb->rxq->buffer_size + NET_IP_ALIGN,
363 GFP_ATOMIC);
364 if (unlikely(!skb)) {
365 BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
366 goto finishing;
368 skb->dev = bnad->netdev;
369 skb_reserve(skb, NET_IP_ALIGN);
370 unmap_array[unmap_prod].skb = skb;
371 dma_addr = pci_map_single(bnad->pcidev, skb->data,
372 rcb->rxq->buffer_size, PCI_DMA_FROMDEVICE);
373 pci_unmap_addr_set(&unmap_array[unmap_prod], dma_addr,
374 dma_addr);
375 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
376 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
378 rxent++;
379 wi_range--;
380 alloced++;
383 finishing:
384 if (likely(alloced)) {
385 unmap_q->producer_index = unmap_prod;
386 rcb->producer_index = unmap_prod;
387 smp_mb();
388 bna_rxq_prod_indx_doorbell(rcb);
393 * Locking is required in the enable path
394 * because it is called from a napi poll
395 * context, where the bna_lock is not held
396 * unlike the IRQ context.
398 static void
399 bnad_enable_txrx_irqs(struct bnad *bnad)
401 struct bna_tcb *tcb;
402 struct bna_ccb *ccb;
403 int i, j;
404 unsigned long flags;
406 spin_lock_irqsave(&bnad->bna_lock, flags);
407 for (i = 0; i < bnad->num_tx; i++) {
408 for (j = 0; j < bnad->num_txq_per_tx; j++) {
409 tcb = bnad->tx_info[i].tcb[j];
410 bna_ib_coalescing_timer_set(tcb->i_dbell,
411 tcb->txq->ib->ib_config.coalescing_timeo);
412 bna_ib_ack(tcb->i_dbell, 0);
416 for (i = 0; i < bnad->num_rx; i++) {
417 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
418 ccb = bnad->rx_info[i].rx_ctrl[j].ccb;
419 bnad_enable_rx_irq_unsafe(ccb);
422 spin_unlock_irqrestore(&bnad->bna_lock, flags);
425 static inline void
426 bnad_refill_rxq(struct bnad *bnad, struct bna_rcb *rcb)
428 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
430 if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
431 if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
432 >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
433 bnad_alloc_n_post_rxbufs(bnad, rcb);
434 smp_mb__before_clear_bit();
435 clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
439 static u32
440 bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
442 struct bna_cq_entry *cmpl, *next_cmpl;
443 struct bna_rcb *rcb = NULL;
444 unsigned int wi_range, packets = 0, wis = 0;
445 struct bnad_unmap_q *unmap_q;
446 struct sk_buff *skb;
447 u32 flags;
448 u32 qid0 = ccb->rcb[0]->rxq->rxq_id;
449 struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
451 prefetch(bnad->netdev);
452 BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt, cmpl,
453 wi_range);
454 BUG_ON(!(wi_range <= ccb->q_depth));
455 while (cmpl->valid && packets < budget) {
456 packets++;
457 BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
459 if (qid0 == cmpl->rxq_id)
460 rcb = ccb->rcb[0];
461 else
462 rcb = ccb->rcb[1];
464 unmap_q = rcb->unmap_q;
466 skb = unmap_q->unmap_array[unmap_q->consumer_index].skb;
467 BUG_ON(!(skb));
468 unmap_q->unmap_array[unmap_q->consumer_index].skb = NULL;
469 pci_unmap_single(bnad->pcidev,
470 pci_unmap_addr(&unmap_q->
471 unmap_array[unmap_q->
472 consumer_index],
473 dma_addr),
474 rcb->rxq->buffer_size,
475 PCI_DMA_FROMDEVICE);
476 BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
478 /* Should be more efficient ? Performance ? */
479 BNA_QE_INDX_ADD(rcb->consumer_index, 1, rcb->q_depth);
481 wis++;
482 if (likely(--wi_range))
483 next_cmpl = cmpl + 1;
484 else {
485 BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth);
486 wis = 0;
487 BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt,
488 next_cmpl, wi_range);
489 BUG_ON(!(wi_range <= ccb->q_depth));
491 prefetch(next_cmpl);
493 flags = ntohl(cmpl->flags);
494 if (unlikely
495 (flags &
496 (BNA_CQ_EF_MAC_ERROR | BNA_CQ_EF_FCS_ERROR |
497 BNA_CQ_EF_TOO_LONG))) {
498 dev_kfree_skb_any(skb);
499 rcb->rxq->rx_packets_with_error++;
500 goto next;
503 skb_put(skb, ntohs(cmpl->length));
504 if (likely
505 (bnad->rx_csum &&
506 (((flags & BNA_CQ_EF_IPV4) &&
507 (flags & BNA_CQ_EF_L3_CKSUM_OK)) ||
508 (flags & BNA_CQ_EF_IPV6)) &&
509 (flags & (BNA_CQ_EF_TCP | BNA_CQ_EF_UDP)) &&
510 (flags & BNA_CQ_EF_L4_CKSUM_OK)))
511 skb->ip_summed = CHECKSUM_UNNECESSARY;
512 else
513 skb_checksum_none_assert(skb);
515 rcb->rxq->rx_packets++;
516 rcb->rxq->rx_bytes += skb->len;
517 skb->protocol = eth_type_trans(skb, bnad->netdev);
519 if (bnad->vlan_grp && (flags & BNA_CQ_EF_VLAN)) {
520 struct bnad_rx_ctrl *rx_ctrl =
521 (struct bnad_rx_ctrl *)ccb->ctrl;
522 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
523 vlan_gro_receive(&rx_ctrl->napi, bnad->vlan_grp,
524 ntohs(cmpl->vlan_tag), skb);
525 else
526 vlan_hwaccel_receive_skb(skb,
527 bnad->vlan_grp,
528 ntohs(cmpl->vlan_tag));
530 } else { /* Not VLAN tagged/stripped */
531 struct bnad_rx_ctrl *rx_ctrl =
532 (struct bnad_rx_ctrl *)ccb->ctrl;
533 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
534 napi_gro_receive(&rx_ctrl->napi, skb);
535 else
536 netif_receive_skb(skb);
539 next:
540 cmpl->valid = 0;
541 cmpl = next_cmpl;
544 BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth);
546 if (likely(ccb)) {
547 bna_ib_ack(ccb->i_dbell, packets);
548 bnad_refill_rxq(bnad, ccb->rcb[0]);
549 if (ccb->rcb[1])
550 bnad_refill_rxq(bnad, ccb->rcb[1]);
551 } else
552 bna_ib_ack(ccb->i_dbell, 0);
554 return packets;
557 static void
558 bnad_disable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb)
560 bna_ib_coalescing_timer_set(ccb->i_dbell, 0);
561 bna_ib_ack(ccb->i_dbell, 0);
564 static void
565 bnad_enable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb)
567 unsigned long flags;
569 spin_lock_irqsave(&bnad->bna_lock, flags); /* Because of polling context */
570 bnad_enable_rx_irq_unsafe(ccb);
571 spin_unlock_irqrestore(&bnad->bna_lock, flags);
574 static void
575 bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb)
577 struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
578 if (likely(napi_schedule_prep((&rx_ctrl->napi)))) {
579 bnad_disable_rx_irq(bnad, ccb);
580 __napi_schedule((&rx_ctrl->napi));
582 BNAD_UPDATE_CTR(bnad, netif_rx_schedule);
585 /* MSIX Rx Path Handler */
586 static irqreturn_t
587 bnad_msix_rx(int irq, void *data)
589 struct bna_ccb *ccb = (struct bna_ccb *)data;
590 struct bnad *bnad = ccb->bnad;
592 bnad_netif_rx_schedule_poll(bnad, ccb);
594 return IRQ_HANDLED;
597 /* Interrupt handlers */
599 /* Mbox Interrupt Handlers */
600 static irqreturn_t
601 bnad_msix_mbox_handler(int irq, void *data)
603 u32 intr_status;
604 unsigned long flags;
605 struct net_device *netdev = data;
606 struct bnad *bnad;
608 bnad = netdev_priv(netdev);
610 /* BNA_ISR_GET(bnad); Inc Ref count */
611 spin_lock_irqsave(&bnad->bna_lock, flags);
613 bna_intr_status_get(&bnad->bna, intr_status);
615 if (BNA_IS_MBOX_ERR_INTR(intr_status))
616 bna_mbox_handler(&bnad->bna, intr_status);
618 spin_unlock_irqrestore(&bnad->bna_lock, flags);
620 /* BNAD_ISR_PUT(bnad); Dec Ref count */
621 return IRQ_HANDLED;
624 static irqreturn_t
625 bnad_isr(int irq, void *data)
627 int i, j;
628 u32 intr_status;
629 unsigned long flags;
630 struct net_device *netdev = data;
631 struct bnad *bnad = netdev_priv(netdev);
632 struct bnad_rx_info *rx_info;
633 struct bnad_rx_ctrl *rx_ctrl;
635 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags)))
636 return IRQ_NONE;
638 bna_intr_status_get(&bnad->bna, intr_status);
640 if (unlikely(!intr_status))
641 return IRQ_NONE;
643 spin_lock_irqsave(&bnad->bna_lock, flags);
645 if (BNA_IS_MBOX_ERR_INTR(intr_status)) {
646 bna_mbox_handler(&bnad->bna, intr_status);
647 if (!BNA_IS_INTX_DATA_INTR(intr_status)) {
648 spin_unlock_irqrestore(&bnad->bna_lock, flags);
649 goto done;
652 spin_unlock_irqrestore(&bnad->bna_lock, flags);
654 /* Process data interrupts */
655 for (i = 0; i < bnad->num_rx; i++) {
656 rx_info = &bnad->rx_info[i];
657 if (!rx_info->rx)
658 continue;
659 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
660 rx_ctrl = &rx_info->rx_ctrl[j];
661 if (rx_ctrl->ccb)
662 bnad_netif_rx_schedule_poll(bnad,
663 rx_ctrl->ccb);
666 done:
667 return IRQ_HANDLED;
671 * Called in interrupt / callback context
672 * with bna_lock held, so cfg_flags access is OK
674 static void
675 bnad_enable_mbox_irq(struct bnad *bnad)
677 int irq = BNAD_GET_MBOX_IRQ(bnad);
679 if (test_and_clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))
680 if (bnad->cfg_flags & BNAD_CF_MSIX)
681 enable_irq(irq);
683 BNAD_UPDATE_CTR(bnad, mbox_intr_enabled);
687 * Called with bnad->bna_lock held b'cos of
688 * bnad->cfg_flags access.
690 static void
691 bnad_disable_mbox_irq(struct bnad *bnad)
693 int irq = BNAD_GET_MBOX_IRQ(bnad);
696 if (!test_and_set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))
697 if (bnad->cfg_flags & BNAD_CF_MSIX)
698 disable_irq_nosync(irq);
700 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
703 /* Control Path Handlers */
705 /* Callbacks */
706 void
707 bnad_cb_device_enable_mbox_intr(struct bnad *bnad)
709 bnad_enable_mbox_irq(bnad);
712 void
713 bnad_cb_device_disable_mbox_intr(struct bnad *bnad)
715 bnad_disable_mbox_irq(bnad);
718 void
719 bnad_cb_device_enabled(struct bnad *bnad, enum bna_cb_status status)
721 complete(&bnad->bnad_completions.ioc_comp);
722 bnad->bnad_completions.ioc_comp_status = status;
725 void
726 bnad_cb_device_disabled(struct bnad *bnad, enum bna_cb_status status)
728 complete(&bnad->bnad_completions.ioc_comp);
729 bnad->bnad_completions.ioc_comp_status = status;
732 static void
733 bnad_cb_port_disabled(void *arg, enum bna_cb_status status)
735 struct bnad *bnad = (struct bnad *)arg;
737 complete(&bnad->bnad_completions.port_comp);
739 netif_carrier_off(bnad->netdev);
742 void
743 bnad_cb_port_link_status(struct bnad *bnad,
744 enum bna_link_status link_status)
746 bool link_up = 0;
748 link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP);
750 if (link_status == BNA_CEE_UP) {
751 set_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
752 BNAD_UPDATE_CTR(bnad, cee_up);
753 } else
754 clear_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
756 if (link_up) {
757 if (!netif_carrier_ok(bnad->netdev)) {
758 pr_warn("bna: %s link up\n",
759 bnad->netdev->name);
760 netif_carrier_on(bnad->netdev);
761 BNAD_UPDATE_CTR(bnad, link_toggle);
762 if (test_bit(BNAD_RF_TX_STARTED, &bnad->run_flags)) {
763 /* Force an immediate Transmit Schedule */
764 pr_info("bna: %s TX_STARTED\n",
765 bnad->netdev->name);
766 netif_wake_queue(bnad->netdev);
767 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
768 } else {
769 netif_stop_queue(bnad->netdev);
770 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
773 } else {
774 if (netif_carrier_ok(bnad->netdev)) {
775 pr_warn("bna: %s link down\n",
776 bnad->netdev->name);
777 netif_carrier_off(bnad->netdev);
778 BNAD_UPDATE_CTR(bnad, link_toggle);
783 static void
784 bnad_cb_tx_disabled(void *arg, struct bna_tx *tx,
785 enum bna_cb_status status)
787 struct bnad *bnad = (struct bnad *)arg;
789 complete(&bnad->bnad_completions.tx_comp);
792 static void
793 bnad_cb_tcb_setup(struct bnad *bnad, struct bna_tcb *tcb)
795 struct bnad_tx_info *tx_info =
796 (struct bnad_tx_info *)tcb->txq->tx->priv;
797 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
799 tx_info->tcb[tcb->id] = tcb;
800 unmap_q->producer_index = 0;
801 unmap_q->consumer_index = 0;
802 unmap_q->q_depth = BNAD_TX_UNMAPQ_DEPTH;
805 static void
806 bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb)
808 struct bnad_tx_info *tx_info =
809 (struct bnad_tx_info *)tcb->txq->tx->priv;
811 tx_info->tcb[tcb->id] = NULL;
814 static void
815 bnad_cb_rcb_setup(struct bnad *bnad, struct bna_rcb *rcb)
817 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
819 unmap_q->producer_index = 0;
820 unmap_q->consumer_index = 0;
821 unmap_q->q_depth = BNAD_RX_UNMAPQ_DEPTH;
824 static void
825 bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb)
827 struct bnad_rx_info *rx_info =
828 (struct bnad_rx_info *)ccb->cq->rx->priv;
830 rx_info->rx_ctrl[ccb->id].ccb = ccb;
831 ccb->ctrl = &rx_info->rx_ctrl[ccb->id];
834 static void
835 bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb)
837 struct bnad_rx_info *rx_info =
838 (struct bnad_rx_info *)ccb->cq->rx->priv;
840 rx_info->rx_ctrl[ccb->id].ccb = NULL;
843 static void
844 bnad_cb_tx_stall(struct bnad *bnad, struct bna_tcb *tcb)
846 struct bnad_tx_info *tx_info =
847 (struct bnad_tx_info *)tcb->txq->tx->priv;
849 if (tx_info != &bnad->tx_info[0])
850 return;
852 clear_bit(BNAD_RF_TX_STARTED, &bnad->run_flags);
853 netif_stop_queue(bnad->netdev);
854 pr_info("bna: %s TX_STOPPED\n", bnad->netdev->name);
857 static void
858 bnad_cb_tx_resume(struct bnad *bnad, struct bna_tcb *tcb)
860 if (test_and_set_bit(BNAD_RF_TX_STARTED, &bnad->run_flags))
861 return;
863 if (netif_carrier_ok(bnad->netdev)) {
864 pr_info("bna: %s TX_STARTED\n", bnad->netdev->name);
865 netif_wake_queue(bnad->netdev);
866 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
870 static void
871 bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tcb *tcb)
873 struct bnad_unmap_q *unmap_q;
875 if (!tcb || (!tcb->unmap_q))
876 return;
878 unmap_q = tcb->unmap_q;
879 if (!unmap_q->unmap_array)
880 return;
882 if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
883 return;
885 bnad_free_all_txbufs(bnad, tcb);
887 unmap_q->producer_index = 0;
888 unmap_q->consumer_index = 0;
890 smp_mb__before_clear_bit();
891 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
894 static void
895 bnad_cb_rx_cleanup(struct bnad *bnad,
896 struct bna_ccb *ccb)
898 bnad_cq_cmpl_init(bnad, ccb);
900 bnad_free_rxbufs(bnad, ccb->rcb[0]);
901 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags);
903 if (ccb->rcb[1]) {
904 bnad_free_rxbufs(bnad, ccb->rcb[1]);
905 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags);
909 static void
910 bnad_cb_rx_post(struct bnad *bnad, struct bna_rcb *rcb)
912 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
914 set_bit(BNAD_RXQ_STARTED, &rcb->flags);
916 /* Now allocate & post buffers for this RCB */
917 /* !!Allocation in callback context */
918 if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
919 if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
920 >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
921 bnad_alloc_n_post_rxbufs(bnad, rcb);
922 smp_mb__before_clear_bit();
923 clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
927 static void
928 bnad_cb_rx_disabled(void *arg, struct bna_rx *rx,
929 enum bna_cb_status status)
931 struct bnad *bnad = (struct bnad *)arg;
933 complete(&bnad->bnad_completions.rx_comp);
936 static void
937 bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx,
938 enum bna_cb_status status)
940 bnad->bnad_completions.mcast_comp_status = status;
941 complete(&bnad->bnad_completions.mcast_comp);
944 void
945 bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
946 struct bna_stats *stats)
948 if (status == BNA_CB_SUCCESS)
949 BNAD_UPDATE_CTR(bnad, hw_stats_updates);
951 if (!netif_running(bnad->netdev) ||
952 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
953 return;
955 mod_timer(&bnad->stats_timer,
956 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
959 /* Resource allocation, free functions */
961 static void
962 bnad_mem_free(struct bnad *bnad,
963 struct bna_mem_info *mem_info)
965 int i;
966 dma_addr_t dma_pa;
968 if (mem_info->mdl == NULL)
969 return;
971 for (i = 0; i < mem_info->num; i++) {
972 if (mem_info->mdl[i].kva != NULL) {
973 if (mem_info->mem_type == BNA_MEM_T_DMA) {
974 BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma),
975 dma_pa);
976 pci_free_consistent(bnad->pcidev,
977 mem_info->mdl[i].len,
978 mem_info->mdl[i].kva, dma_pa);
979 } else
980 kfree(mem_info->mdl[i].kva);
983 kfree(mem_info->mdl);
984 mem_info->mdl = NULL;
987 static int
988 bnad_mem_alloc(struct bnad *bnad,
989 struct bna_mem_info *mem_info)
991 int i;
992 dma_addr_t dma_pa;
994 if ((mem_info->num == 0) || (mem_info->len == 0)) {
995 mem_info->mdl = NULL;
996 return 0;
999 mem_info->mdl = kcalloc(mem_info->num, sizeof(struct bna_mem_descr),
1000 GFP_KERNEL);
1001 if (mem_info->mdl == NULL)
1002 return -ENOMEM;
1004 if (mem_info->mem_type == BNA_MEM_T_DMA) {
1005 for (i = 0; i < mem_info->num; i++) {
1006 mem_info->mdl[i].len = mem_info->len;
1007 mem_info->mdl[i].kva =
1008 pci_alloc_consistent(bnad->pcidev,
1009 mem_info->len, &dma_pa);
1011 if (mem_info->mdl[i].kva == NULL)
1012 goto err_return;
1014 BNA_SET_DMA_ADDR(dma_pa,
1015 &(mem_info->mdl[i].dma));
1017 } else {
1018 for (i = 0; i < mem_info->num; i++) {
1019 mem_info->mdl[i].len = mem_info->len;
1020 mem_info->mdl[i].kva = kzalloc(mem_info->len,
1021 GFP_KERNEL);
1022 if (mem_info->mdl[i].kva == NULL)
1023 goto err_return;
1027 return 0;
1029 err_return:
1030 bnad_mem_free(bnad, mem_info);
1031 return -ENOMEM;
1034 /* Free IRQ for Mailbox */
1035 static void
1036 bnad_mbox_irq_free(struct bnad *bnad,
1037 struct bna_intr_info *intr_info)
1039 int irq;
1040 unsigned long flags;
1042 if (intr_info->idl == NULL)
1043 return;
1045 spin_lock_irqsave(&bnad->bna_lock, flags);
1046 bnad_disable_mbox_irq(bnad);
1047 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1049 irq = BNAD_GET_MBOX_IRQ(bnad);
1050 free_irq(irq, bnad->netdev);
1052 kfree(intr_info->idl);
1056 * Allocates IRQ for Mailbox, but keep it disabled
1057 * This will be enabled once we get the mbox enable callback
1058 * from bna
1060 static int
1061 bnad_mbox_irq_alloc(struct bnad *bnad,
1062 struct bna_intr_info *intr_info)
1064 int err;
1065 unsigned long flags;
1066 u32 irq;
1067 irq_handler_t irq_handler;
1069 /* Mbox should use only 1 vector */
1071 intr_info->idl = kzalloc(sizeof(*(intr_info->idl)), GFP_KERNEL);
1072 if (!intr_info->idl)
1073 return -ENOMEM;
1075 spin_lock_irqsave(&bnad->bna_lock, flags);
1076 if (bnad->cfg_flags & BNAD_CF_MSIX) {
1077 irq_handler = (irq_handler_t)bnad_msix_mbox_handler;
1078 irq = bnad->msix_table[bnad->msix_num - 1].vector;
1079 flags = 0;
1080 intr_info->intr_type = BNA_INTR_T_MSIX;
1081 intr_info->idl[0].vector = bnad->msix_num - 1;
1082 } else {
1083 irq_handler = (irq_handler_t)bnad_isr;
1084 irq = bnad->pcidev->irq;
1085 flags = IRQF_SHARED;
1086 intr_info->intr_type = BNA_INTR_T_INTX;
1087 /* intr_info->idl.vector = 0 ? */
1089 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1091 sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME);
1094 * Set the Mbox IRQ disable flag, so that the IRQ handler
1095 * called from request_irq() for SHARED IRQs do not execute
1097 set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
1099 err = request_irq(irq, irq_handler, flags,
1100 bnad->mbox_irq_name, bnad->netdev);
1102 if (err) {
1103 kfree(intr_info->idl);
1104 intr_info->idl = NULL;
1105 return err;
1108 spin_lock_irqsave(&bnad->bna_lock, flags);
1110 if (bnad->cfg_flags & BNAD_CF_MSIX)
1111 disable_irq_nosync(irq);
1113 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1114 return 0;
1117 static void
1118 bnad_txrx_irq_free(struct bnad *bnad, struct bna_intr_info *intr_info)
1120 kfree(intr_info->idl);
1121 intr_info->idl = NULL;
1124 /* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */
1125 static int
1126 bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src,
1127 uint txrx_id, struct bna_intr_info *intr_info)
1129 int i, vector_start = 0;
1130 u32 cfg_flags;
1131 unsigned long flags;
1133 spin_lock_irqsave(&bnad->bna_lock, flags);
1134 cfg_flags = bnad->cfg_flags;
1135 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1137 if (cfg_flags & BNAD_CF_MSIX) {
1138 intr_info->intr_type = BNA_INTR_T_MSIX;
1139 intr_info->idl = kcalloc(intr_info->num,
1140 sizeof(struct bna_intr_descr),
1141 GFP_KERNEL);
1142 if (!intr_info->idl)
1143 return -ENOMEM;
1145 switch (src) {
1146 case BNAD_INTR_TX:
1147 vector_start = txrx_id;
1148 break;
1150 case BNAD_INTR_RX:
1151 vector_start = bnad->num_tx * bnad->num_txq_per_tx +
1152 txrx_id;
1153 break;
1155 default:
1156 BUG();
1159 for (i = 0; i < intr_info->num; i++)
1160 intr_info->idl[i].vector = vector_start + i;
1161 } else {
1162 intr_info->intr_type = BNA_INTR_T_INTX;
1163 intr_info->num = 1;
1164 intr_info->idl = kcalloc(intr_info->num,
1165 sizeof(struct bna_intr_descr),
1166 GFP_KERNEL);
1167 if (!intr_info->idl)
1168 return -ENOMEM;
1170 switch (src) {
1171 case BNAD_INTR_TX:
1172 intr_info->idl[0].vector = 0x1; /* Bit mask : Tx IB */
1173 break;
1175 case BNAD_INTR_RX:
1176 intr_info->idl[0].vector = 0x2; /* Bit mask : Rx IB */
1177 break;
1180 return 0;
1184 * NOTE: Should be called for MSIX only
1185 * Unregisters Tx MSIX vector(s) from the kernel
1187 static void
1188 bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info,
1189 int num_txqs)
1191 int i;
1192 int vector_num;
1194 for (i = 0; i < num_txqs; i++) {
1195 if (tx_info->tcb[i] == NULL)
1196 continue;
1198 vector_num = tx_info->tcb[i]->intr_vector;
1199 free_irq(bnad->msix_table[vector_num].vector, tx_info->tcb[i]);
1204 * NOTE: Should be called for MSIX only
1205 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1207 static int
1208 bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info,
1209 uint tx_id, int num_txqs)
1211 int i;
1212 int err;
1213 int vector_num;
1215 for (i = 0; i < num_txqs; i++) {
1216 vector_num = tx_info->tcb[i]->intr_vector;
1217 sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name,
1218 tx_id + tx_info->tcb[i]->id);
1219 err = request_irq(bnad->msix_table[vector_num].vector,
1220 (irq_handler_t)bnad_msix_tx, 0,
1221 tx_info->tcb[i]->name,
1222 tx_info->tcb[i]);
1223 if (err)
1224 goto err_return;
1227 return 0;
1229 err_return:
1230 if (i > 0)
1231 bnad_tx_msix_unregister(bnad, tx_info, (i - 1));
1232 return -1;
1236 * NOTE: Should be called for MSIX only
1237 * Unregisters Rx MSIX vector(s) from the kernel
1239 static void
1240 bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info,
1241 int num_rxps)
1243 int i;
1244 int vector_num;
1246 for (i = 0; i < num_rxps; i++) {
1247 if (rx_info->rx_ctrl[i].ccb == NULL)
1248 continue;
1250 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1251 free_irq(bnad->msix_table[vector_num].vector,
1252 rx_info->rx_ctrl[i].ccb);
1257 * NOTE: Should be called for MSIX only
1258 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1260 static int
1261 bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info,
1262 uint rx_id, int num_rxps)
1264 int i;
1265 int err;
1266 int vector_num;
1268 for (i = 0; i < num_rxps; i++) {
1269 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1270 sprintf(rx_info->rx_ctrl[i].ccb->name, "%s CQ %d",
1271 bnad->netdev->name,
1272 rx_id + rx_info->rx_ctrl[i].ccb->id);
1273 err = request_irq(bnad->msix_table[vector_num].vector,
1274 (irq_handler_t)bnad_msix_rx, 0,
1275 rx_info->rx_ctrl[i].ccb->name,
1276 rx_info->rx_ctrl[i].ccb);
1277 if (err)
1278 goto err_return;
1281 return 0;
1283 err_return:
1284 if (i > 0)
1285 bnad_rx_msix_unregister(bnad, rx_info, (i - 1));
1286 return -1;
1289 /* Free Tx object Resources */
1290 static void
1291 bnad_tx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1293 int i;
1295 for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1296 if (res_info[i].res_type == BNA_RES_T_MEM)
1297 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1298 else if (res_info[i].res_type == BNA_RES_T_INTR)
1299 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1303 /* Allocates memory and interrupt resources for Tx object */
1304 static int
1305 bnad_tx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1306 uint tx_id)
1308 int i, err = 0;
1310 for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1311 if (res_info[i].res_type == BNA_RES_T_MEM)
1312 err = bnad_mem_alloc(bnad,
1313 &res_info[i].res_u.mem_info);
1314 else if (res_info[i].res_type == BNA_RES_T_INTR)
1315 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_TX, tx_id,
1316 &res_info[i].res_u.intr_info);
1317 if (err)
1318 goto err_return;
1320 return 0;
1322 err_return:
1323 bnad_tx_res_free(bnad, res_info);
1324 return err;
1327 /* Free Rx object Resources */
1328 static void
1329 bnad_rx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1331 int i;
1333 for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1334 if (res_info[i].res_type == BNA_RES_T_MEM)
1335 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1336 else if (res_info[i].res_type == BNA_RES_T_INTR)
1337 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1341 /* Allocates memory and interrupt resources for Rx object */
1342 static int
1343 bnad_rx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1344 uint rx_id)
1346 int i, err = 0;
1348 /* All memory needs to be allocated before setup_ccbs */
1349 for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1350 if (res_info[i].res_type == BNA_RES_T_MEM)
1351 err = bnad_mem_alloc(bnad,
1352 &res_info[i].res_u.mem_info);
1353 else if (res_info[i].res_type == BNA_RES_T_INTR)
1354 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_RX, rx_id,
1355 &res_info[i].res_u.intr_info);
1356 if (err)
1357 goto err_return;
1359 return 0;
1361 err_return:
1362 bnad_rx_res_free(bnad, res_info);
1363 return err;
1366 /* Timer callbacks */
1367 /* a) IOC timer */
1368 static void
1369 bnad_ioc_timeout(unsigned long data)
1371 struct bnad *bnad = (struct bnad *)data;
1372 unsigned long flags;
1374 spin_lock_irqsave(&bnad->bna_lock, flags);
1375 bfa_nw_ioc_timeout((void *) &bnad->bna.device.ioc);
1376 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1379 static void
1380 bnad_ioc_hb_check(unsigned long data)
1382 struct bnad *bnad = (struct bnad *)data;
1383 unsigned long flags;
1385 spin_lock_irqsave(&bnad->bna_lock, flags);
1386 bfa_nw_ioc_hb_check((void *) &bnad->bna.device.ioc);
1387 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1390 static void
1391 bnad_ioc_sem_timeout(unsigned long data)
1393 struct bnad *bnad = (struct bnad *)data;
1394 unsigned long flags;
1396 spin_lock_irqsave(&bnad->bna_lock, flags);
1397 bfa_nw_ioc_sem_timeout((void *) &bnad->bna.device.ioc);
1398 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1402 * All timer routines use bnad->bna_lock to protect against
1403 * the following race, which may occur in case of no locking:
1404 * Time CPU m CPU n
1405 * 0 1 = test_bit
1406 * 1 clear_bit
1407 * 2 del_timer_sync
1408 * 3 mod_timer
1411 /* b) Dynamic Interrupt Moderation Timer */
1412 static void
1413 bnad_dim_timeout(unsigned long data)
1415 struct bnad *bnad = (struct bnad *)data;
1416 struct bnad_rx_info *rx_info;
1417 struct bnad_rx_ctrl *rx_ctrl;
1418 int i, j;
1419 unsigned long flags;
1421 if (!netif_carrier_ok(bnad->netdev))
1422 return;
1424 spin_lock_irqsave(&bnad->bna_lock, flags);
1425 for (i = 0; i < bnad->num_rx; i++) {
1426 rx_info = &bnad->rx_info[i];
1427 if (!rx_info->rx)
1428 continue;
1429 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
1430 rx_ctrl = &rx_info->rx_ctrl[j];
1431 if (!rx_ctrl->ccb)
1432 continue;
1433 bna_rx_dim_update(rx_ctrl->ccb);
1437 /* Check for BNAD_CF_DIM_ENABLED, does not eleminate a race */
1438 if (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags))
1439 mod_timer(&bnad->dim_timer,
1440 jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1441 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1444 /* c) Statistics Timer */
1445 static void
1446 bnad_stats_timeout(unsigned long data)
1448 struct bnad *bnad = (struct bnad *)data;
1449 unsigned long flags;
1451 if (!netif_running(bnad->netdev) ||
1452 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1453 return;
1455 spin_lock_irqsave(&bnad->bna_lock, flags);
1456 bna_stats_get(&bnad->bna);
1457 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1461 * Set up timer for DIM
1462 * Called with bnad->bna_lock held
1464 void
1465 bnad_dim_timer_start(struct bnad *bnad)
1467 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
1468 !test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
1469 setup_timer(&bnad->dim_timer, bnad_dim_timeout,
1470 (unsigned long)bnad);
1471 set_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
1472 mod_timer(&bnad->dim_timer,
1473 jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1478 * Set up timer for statistics
1479 * Called with mutex_lock(&bnad->conf_mutex) held
1481 static void
1482 bnad_stats_timer_start(struct bnad *bnad)
1484 unsigned long flags;
1486 spin_lock_irqsave(&bnad->bna_lock, flags);
1487 if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) {
1488 setup_timer(&bnad->stats_timer, bnad_stats_timeout,
1489 (unsigned long)bnad);
1490 mod_timer(&bnad->stats_timer,
1491 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1493 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1497 * Stops the stats timer
1498 * Called with mutex_lock(&bnad->conf_mutex) held
1500 static void
1501 bnad_stats_timer_stop(struct bnad *bnad)
1503 int to_del = 0;
1504 unsigned long flags;
1506 spin_lock_irqsave(&bnad->bna_lock, flags);
1507 if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1508 to_del = 1;
1509 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1510 if (to_del)
1511 del_timer_sync(&bnad->stats_timer);
1514 /* Utilities */
1516 static void
1517 bnad_netdev_mc_list_get(struct net_device *netdev, u8 *mc_list)
1519 int i = 1; /* Index 0 has broadcast address */
1520 struct netdev_hw_addr *mc_addr;
1522 netdev_for_each_mc_addr(mc_addr, netdev) {
1523 memcpy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0],
1524 ETH_ALEN);
1525 i++;
1529 static int
1530 bnad_napi_poll_rx(struct napi_struct *napi, int budget)
1532 struct bnad_rx_ctrl *rx_ctrl =
1533 container_of(napi, struct bnad_rx_ctrl, napi);
1534 struct bna_ccb *ccb;
1535 struct bnad *bnad;
1536 int rcvd = 0;
1538 ccb = rx_ctrl->ccb;
1540 bnad = ccb->bnad;
1542 if (!netif_carrier_ok(bnad->netdev))
1543 goto poll_exit;
1545 rcvd = bnad_poll_cq(bnad, ccb, budget);
1546 if (rcvd == budget)
1547 return rcvd;
1549 poll_exit:
1550 napi_complete((napi));
1552 BNAD_UPDATE_CTR(bnad, netif_rx_complete);
1554 bnad_enable_rx_irq(bnad, ccb);
1555 return rcvd;
1558 static int
1559 bnad_napi_poll_txrx(struct napi_struct *napi, int budget)
1561 struct bnad_rx_ctrl *rx_ctrl =
1562 container_of(napi, struct bnad_rx_ctrl, napi);
1563 struct bna_ccb *ccb;
1564 struct bnad *bnad;
1565 int rcvd = 0;
1566 int i, j;
1568 ccb = rx_ctrl->ccb;
1570 bnad = ccb->bnad;
1572 if (!netif_carrier_ok(bnad->netdev))
1573 goto poll_exit;
1575 /* Handle Tx Completions, if any */
1576 for (i = 0; i < bnad->num_tx; i++) {
1577 for (j = 0; j < bnad->num_txq_per_tx; j++)
1578 bnad_tx(bnad, bnad->tx_info[i].tcb[j]);
1581 /* Handle Rx Completions */
1582 rcvd = bnad_poll_cq(bnad, ccb, budget);
1583 if (rcvd == budget)
1584 return rcvd;
1585 poll_exit:
1586 napi_complete((napi));
1588 BNAD_UPDATE_CTR(bnad, netif_rx_complete);
1590 bnad_enable_txrx_irqs(bnad);
1591 return rcvd;
1594 static void
1595 bnad_napi_enable(struct bnad *bnad, u32 rx_id)
1597 int (*napi_poll) (struct napi_struct *, int);
1598 struct bnad_rx_ctrl *rx_ctrl;
1599 int i;
1600 unsigned long flags;
1602 spin_lock_irqsave(&bnad->bna_lock, flags);
1603 if (bnad->cfg_flags & BNAD_CF_MSIX)
1604 napi_poll = bnad_napi_poll_rx;
1605 else
1606 napi_poll = bnad_napi_poll_txrx;
1607 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1609 /* Initialize & enable NAPI */
1610 for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1611 rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
1612 netif_napi_add(bnad->netdev, &rx_ctrl->napi,
1613 napi_poll, 64);
1614 napi_enable(&rx_ctrl->napi);
1618 static void
1619 bnad_napi_disable(struct bnad *bnad, u32 rx_id)
1621 int i;
1623 /* First disable and then clean up */
1624 for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1625 napi_disable(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1626 netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1630 /* Should be held with conf_lock held */
1631 void
1632 bnad_cleanup_tx(struct bnad *bnad, uint tx_id)
1634 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1635 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1636 unsigned long flags;
1638 if (!tx_info->tx)
1639 return;
1641 init_completion(&bnad->bnad_completions.tx_comp);
1642 spin_lock_irqsave(&bnad->bna_lock, flags);
1643 bna_tx_disable(tx_info->tx, BNA_HARD_CLEANUP, bnad_cb_tx_disabled);
1644 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1645 wait_for_completion(&bnad->bnad_completions.tx_comp);
1647 if (tx_info->tcb[0]->intr_type == BNA_INTR_T_MSIX)
1648 bnad_tx_msix_unregister(bnad, tx_info,
1649 bnad->num_txq_per_tx);
1651 spin_lock_irqsave(&bnad->bna_lock, flags);
1652 bna_tx_destroy(tx_info->tx);
1653 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1655 tx_info->tx = NULL;
1657 if (0 == tx_id)
1658 tasklet_kill(&bnad->tx_free_tasklet);
1660 bnad_tx_res_free(bnad, res_info);
1663 /* Should be held with conf_lock held */
1665 bnad_setup_tx(struct bnad *bnad, uint tx_id)
1667 int err;
1668 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1669 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1670 struct bna_intr_info *intr_info =
1671 &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
1672 struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
1673 struct bna_tx_event_cbfn tx_cbfn;
1674 struct bna_tx *tx;
1675 unsigned long flags;
1677 /* Initialize the Tx object configuration */
1678 tx_config->num_txq = bnad->num_txq_per_tx;
1679 tx_config->txq_depth = bnad->txq_depth;
1680 tx_config->tx_type = BNA_TX_T_REGULAR;
1682 /* Initialize the tx event handlers */
1683 tx_cbfn.tcb_setup_cbfn = bnad_cb_tcb_setup;
1684 tx_cbfn.tcb_destroy_cbfn = bnad_cb_tcb_destroy;
1685 tx_cbfn.tx_stall_cbfn = bnad_cb_tx_stall;
1686 tx_cbfn.tx_resume_cbfn = bnad_cb_tx_resume;
1687 tx_cbfn.tx_cleanup_cbfn = bnad_cb_tx_cleanup;
1689 /* Get BNA's resource requirement for one tx object */
1690 spin_lock_irqsave(&bnad->bna_lock, flags);
1691 bna_tx_res_req(bnad->num_txq_per_tx,
1692 bnad->txq_depth, res_info);
1693 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1695 /* Fill Unmap Q memory requirements */
1696 BNAD_FILL_UNMAPQ_MEM_REQ(
1697 &res_info[BNA_TX_RES_MEM_T_UNMAPQ],
1698 bnad->num_txq_per_tx,
1699 BNAD_TX_UNMAPQ_DEPTH);
1701 /* Allocate resources */
1702 err = bnad_tx_res_alloc(bnad, res_info, tx_id);
1703 if (err)
1704 return err;
1706 /* Ask BNA to create one Tx object, supplying required resources */
1707 spin_lock_irqsave(&bnad->bna_lock, flags);
1708 tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info,
1709 tx_info);
1710 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1711 if (!tx)
1712 goto err_return;
1713 tx_info->tx = tx;
1715 /* Register ISR for the Tx object */
1716 if (intr_info->intr_type == BNA_INTR_T_MSIX) {
1717 err = bnad_tx_msix_register(bnad, tx_info,
1718 tx_id, bnad->num_txq_per_tx);
1719 if (err)
1720 goto err_return;
1723 spin_lock_irqsave(&bnad->bna_lock, flags);
1724 bna_tx_enable(tx);
1725 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1727 return 0;
1729 err_return:
1730 bnad_tx_res_free(bnad, res_info);
1731 return err;
1734 /* Setup the rx config for bna_rx_create */
1735 /* bnad decides the configuration */
1736 static void
1737 bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
1739 rx_config->rx_type = BNA_RX_T_REGULAR;
1740 rx_config->num_paths = bnad->num_rxp_per_rx;
1742 if (bnad->num_rxp_per_rx > 1) {
1743 rx_config->rss_status = BNA_STATUS_T_ENABLED;
1744 rx_config->rss_config.hash_type =
1745 (BFI_RSS_T_V4_TCP |
1746 BFI_RSS_T_V6_TCP |
1747 BFI_RSS_T_V4_IP |
1748 BFI_RSS_T_V6_IP);
1749 rx_config->rss_config.hash_mask =
1750 bnad->num_rxp_per_rx - 1;
1751 get_random_bytes(rx_config->rss_config.toeplitz_hash_key,
1752 sizeof(rx_config->rss_config.toeplitz_hash_key));
1753 } else {
1754 rx_config->rss_status = BNA_STATUS_T_DISABLED;
1755 memset(&rx_config->rss_config, 0,
1756 sizeof(rx_config->rss_config));
1758 rx_config->rxp_type = BNA_RXP_SLR;
1759 rx_config->q_depth = bnad->rxq_depth;
1761 rx_config->small_buff_size = BFI_SMALL_RXBUF_SIZE;
1763 rx_config->vlan_strip_status = BNA_STATUS_T_ENABLED;
1766 /* Called with mutex_lock(&bnad->conf_mutex) held */
1767 void
1768 bnad_cleanup_rx(struct bnad *bnad, uint rx_id)
1770 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
1771 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
1772 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
1773 unsigned long flags;
1774 int dim_timer_del = 0;
1776 if (!rx_info->rx)
1777 return;
1779 if (0 == rx_id) {
1780 spin_lock_irqsave(&bnad->bna_lock, flags);
1781 dim_timer_del = bnad_dim_timer_running(bnad);
1782 if (dim_timer_del)
1783 clear_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
1784 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1785 if (dim_timer_del)
1786 del_timer_sync(&bnad->dim_timer);
1789 bnad_napi_disable(bnad, rx_id);
1791 init_completion(&bnad->bnad_completions.rx_comp);
1792 spin_lock_irqsave(&bnad->bna_lock, flags);
1793 bna_rx_disable(rx_info->rx, BNA_HARD_CLEANUP, bnad_cb_rx_disabled);
1794 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1795 wait_for_completion(&bnad->bnad_completions.rx_comp);
1797 if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX)
1798 bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths);
1800 spin_lock_irqsave(&bnad->bna_lock, flags);
1801 bna_rx_destroy(rx_info->rx);
1802 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1804 rx_info->rx = NULL;
1806 bnad_rx_res_free(bnad, res_info);
1809 /* Called with mutex_lock(&bnad->conf_mutex) held */
1811 bnad_setup_rx(struct bnad *bnad, uint rx_id)
1813 int err;
1814 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
1815 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
1816 struct bna_intr_info *intr_info =
1817 &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
1818 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
1819 struct bna_rx_event_cbfn rx_cbfn;
1820 struct bna_rx *rx;
1821 unsigned long flags;
1823 /* Initialize the Rx object configuration */
1824 bnad_init_rx_config(bnad, rx_config);
1826 /* Initialize the Rx event handlers */
1827 rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup;
1828 rx_cbfn.rcb_destroy_cbfn = NULL;
1829 rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup;
1830 rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy;
1831 rx_cbfn.rx_cleanup_cbfn = bnad_cb_rx_cleanup;
1832 rx_cbfn.rx_post_cbfn = bnad_cb_rx_post;
1834 /* Get BNA's resource requirement for one Rx object */
1835 spin_lock_irqsave(&bnad->bna_lock, flags);
1836 bna_rx_res_req(rx_config, res_info);
1837 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1839 /* Fill Unmap Q memory requirements */
1840 BNAD_FILL_UNMAPQ_MEM_REQ(
1841 &res_info[BNA_RX_RES_MEM_T_UNMAPQ],
1842 rx_config->num_paths +
1843 ((rx_config->rxp_type == BNA_RXP_SINGLE) ? 0 :
1844 rx_config->num_paths), BNAD_RX_UNMAPQ_DEPTH);
1846 /* Allocate resource */
1847 err = bnad_rx_res_alloc(bnad, res_info, rx_id);
1848 if (err)
1849 return err;
1851 /* Ask BNA to create one Rx object, supplying required resources */
1852 spin_lock_irqsave(&bnad->bna_lock, flags);
1853 rx = bna_rx_create(&bnad->bna, bnad, rx_config, &rx_cbfn, res_info,
1854 rx_info);
1855 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1856 if (!rx)
1857 goto err_return;
1858 rx_info->rx = rx;
1860 /* Register ISR for the Rx object */
1861 if (intr_info->intr_type == BNA_INTR_T_MSIX) {
1862 err = bnad_rx_msix_register(bnad, rx_info, rx_id,
1863 rx_config->num_paths);
1864 if (err)
1865 goto err_return;
1868 /* Enable NAPI */
1869 bnad_napi_enable(bnad, rx_id);
1871 spin_lock_irqsave(&bnad->bna_lock, flags);
1872 if (0 == rx_id) {
1873 /* Set up Dynamic Interrupt Moderation Vector */
1874 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED)
1875 bna_rx_dim_reconfig(&bnad->bna, bna_napi_dim_vector);
1877 /* Enable VLAN filtering only on the default Rx */
1878 bna_rx_vlanfilter_enable(rx);
1880 /* Start the DIM timer */
1881 bnad_dim_timer_start(bnad);
1884 bna_rx_enable(rx);
1885 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1887 return 0;
1889 err_return:
1890 bnad_cleanup_rx(bnad, rx_id);
1891 return err;
1894 /* Called with conf_lock & bnad->bna_lock held */
1895 void
1896 bnad_tx_coalescing_timeo_set(struct bnad *bnad)
1898 struct bnad_tx_info *tx_info;
1900 tx_info = &bnad->tx_info[0];
1901 if (!tx_info->tx)
1902 return;
1904 bna_tx_coalescing_timeo_set(tx_info->tx, bnad->tx_coalescing_timeo);
1907 /* Called with conf_lock & bnad->bna_lock held */
1908 void
1909 bnad_rx_coalescing_timeo_set(struct bnad *bnad)
1911 struct bnad_rx_info *rx_info;
1912 int i;
1914 for (i = 0; i < bnad->num_rx; i++) {
1915 rx_info = &bnad->rx_info[i];
1916 if (!rx_info->rx)
1917 continue;
1918 bna_rx_coalescing_timeo_set(rx_info->rx,
1919 bnad->rx_coalescing_timeo);
1924 * Called with bnad->bna_lock held
1926 static int
1927 bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr)
1929 int ret;
1931 if (!is_valid_ether_addr(mac_addr))
1932 return -EADDRNOTAVAIL;
1934 /* If datapath is down, pretend everything went through */
1935 if (!bnad->rx_info[0].rx)
1936 return 0;
1938 ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr, NULL);
1939 if (ret != BNA_CB_SUCCESS)
1940 return -EADDRNOTAVAIL;
1942 return 0;
1945 /* Should be called with conf_lock held */
1946 static int
1947 bnad_enable_default_bcast(struct bnad *bnad)
1949 struct bnad_rx_info *rx_info = &bnad->rx_info[0];
1950 int ret;
1951 unsigned long flags;
1953 init_completion(&bnad->bnad_completions.mcast_comp);
1955 spin_lock_irqsave(&bnad->bna_lock, flags);
1956 ret = bna_rx_mcast_add(rx_info->rx, (u8 *)bnad_bcast_addr,
1957 bnad_cb_rx_mcast_add);
1958 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1960 if (ret == BNA_CB_SUCCESS)
1961 wait_for_completion(&bnad->bnad_completions.mcast_comp);
1962 else
1963 return -ENODEV;
1965 if (bnad->bnad_completions.mcast_comp_status != BNA_CB_SUCCESS)
1966 return -ENODEV;
1968 return 0;
1971 /* Statistics utilities */
1972 void
1973 bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
1975 int i, j;
1977 for (i = 0; i < bnad->num_rx; i++) {
1978 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
1979 if (bnad->rx_info[i].rx_ctrl[j].ccb) {
1980 stats->rx_packets += bnad->rx_info[i].
1981 rx_ctrl[j].ccb->rcb[0]->rxq->rx_packets;
1982 stats->rx_bytes += bnad->rx_info[i].
1983 rx_ctrl[j].ccb->rcb[0]->rxq->rx_bytes;
1984 if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
1985 bnad->rx_info[i].rx_ctrl[j].ccb->
1986 rcb[1]->rxq) {
1987 stats->rx_packets +=
1988 bnad->rx_info[i].rx_ctrl[j].
1989 ccb->rcb[1]->rxq->rx_packets;
1990 stats->rx_bytes +=
1991 bnad->rx_info[i].rx_ctrl[j].
1992 ccb->rcb[1]->rxq->rx_bytes;
1997 for (i = 0; i < bnad->num_tx; i++) {
1998 for (j = 0; j < bnad->num_txq_per_tx; j++) {
1999 if (bnad->tx_info[i].tcb[j]) {
2000 stats->tx_packets +=
2001 bnad->tx_info[i].tcb[j]->txq->tx_packets;
2002 stats->tx_bytes +=
2003 bnad->tx_info[i].tcb[j]->txq->tx_bytes;
2010 * Must be called with the bna_lock held.
2012 void
2013 bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
2015 struct bfi_ll_stats_mac *mac_stats;
2016 u64 bmap;
2017 int i;
2019 mac_stats = &bnad->stats.bna_stats->hw_stats->mac_stats;
2020 stats->rx_errors =
2021 mac_stats->rx_fcs_error + mac_stats->rx_alignment_error +
2022 mac_stats->rx_frame_length_error + mac_stats->rx_code_error +
2023 mac_stats->rx_undersize;
2024 stats->tx_errors = mac_stats->tx_fcs_error +
2025 mac_stats->tx_undersize;
2026 stats->rx_dropped = mac_stats->rx_drop;
2027 stats->tx_dropped = mac_stats->tx_drop;
2028 stats->multicast = mac_stats->rx_multicast;
2029 stats->collisions = mac_stats->tx_total_collision;
2031 stats->rx_length_errors = mac_stats->rx_frame_length_error;
2033 /* receive ring buffer overflow ?? */
2035 stats->rx_crc_errors = mac_stats->rx_fcs_error;
2036 stats->rx_frame_errors = mac_stats->rx_alignment_error;
2037 /* recv'r fifo overrun */
2038 bmap = (u64)bnad->stats.bna_stats->rxf_bmap[0] |
2039 ((u64)bnad->stats.bna_stats->rxf_bmap[1] << 32);
2040 for (i = 0; bmap && (i < BFI_LL_RXF_ID_MAX); i++) {
2041 if (bmap & 1) {
2042 stats->rx_fifo_errors +=
2043 bnad->stats.bna_stats->
2044 hw_stats->rxf_stats[i].frame_drops;
2045 break;
2047 bmap >>= 1;
2051 static void
2052 bnad_mbox_irq_sync(struct bnad *bnad)
2054 u32 irq;
2055 unsigned long flags;
2057 spin_lock_irqsave(&bnad->bna_lock, flags);
2058 if (bnad->cfg_flags & BNAD_CF_MSIX)
2059 irq = bnad->msix_table[bnad->msix_num - 1].vector;
2060 else
2061 irq = bnad->pcidev->irq;
2062 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2064 synchronize_irq(irq);
2067 /* Utility used by bnad_start_xmit, for doing TSO */
2068 static int
2069 bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
2071 int err;
2073 /* SKB_GSO_TCPV4 and SKB_GSO_TCPV6 is defined since 2.6.18. */
2074 BUG_ON(!(skb_shinfo(skb)->gso_type == SKB_GSO_TCPV4 ||
2075 skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6));
2076 if (skb_header_cloned(skb)) {
2077 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2078 if (err) {
2079 BNAD_UPDATE_CTR(bnad, tso_err);
2080 return err;
2085 * For TSO, the TCP checksum field is seeded with pseudo-header sum
2086 * excluding the length field.
2088 if (skb->protocol == htons(ETH_P_IP)) {
2089 struct iphdr *iph = ip_hdr(skb);
2091 /* Do we really need these? */
2092 iph->tot_len = 0;
2093 iph->check = 0;
2095 tcp_hdr(skb)->check =
2096 ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
2097 IPPROTO_TCP, 0);
2098 BNAD_UPDATE_CTR(bnad, tso4);
2099 } else {
2100 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
2102 BUG_ON(!(skb->protocol == htons(ETH_P_IPV6)));
2103 ipv6h->payload_len = 0;
2104 tcp_hdr(skb)->check =
2105 ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 0,
2106 IPPROTO_TCP, 0);
2107 BNAD_UPDATE_CTR(bnad, tso6);
2110 return 0;
2114 * Initialize Q numbers depending on Rx Paths
2115 * Called with bnad->bna_lock held, because of cfg_flags
2116 * access.
2118 static void
2119 bnad_q_num_init(struct bnad *bnad)
2121 int rxps;
2123 rxps = min((uint)num_online_cpus(),
2124 (uint)(BNAD_MAX_RXS * BNAD_MAX_RXPS_PER_RX));
2126 if (!(bnad->cfg_flags & BNAD_CF_MSIX))
2127 rxps = 1; /* INTx */
2129 bnad->num_rx = 1;
2130 bnad->num_tx = 1;
2131 bnad->num_rxp_per_rx = rxps;
2132 bnad->num_txq_per_tx = BNAD_TXQ_NUM;
2136 * Adjusts the Q numbers, given a number of msix vectors
2137 * Give preference to RSS as opposed to Tx priority Queues,
2138 * in such a case, just use 1 Tx Q
2139 * Called with bnad->bna_lock held b'cos of cfg_flags access
2141 static void
2142 bnad_q_num_adjust(struct bnad *bnad, int msix_vectors)
2144 bnad->num_txq_per_tx = 1;
2145 if ((msix_vectors >= (bnad->num_tx * bnad->num_txq_per_tx) +
2146 bnad_rxqs_per_cq + BNAD_MAILBOX_MSIX_VECTORS) &&
2147 (bnad->cfg_flags & BNAD_CF_MSIX)) {
2148 bnad->num_rxp_per_rx = msix_vectors -
2149 (bnad->num_tx * bnad->num_txq_per_tx) -
2150 BNAD_MAILBOX_MSIX_VECTORS;
2151 } else
2152 bnad->num_rxp_per_rx = 1;
2155 static void
2156 bnad_set_netdev_perm_addr(struct bnad *bnad)
2158 struct net_device *netdev = bnad->netdev;
2160 memcpy(netdev->perm_addr, &bnad->perm_addr, netdev->addr_len);
2161 if (is_zero_ether_addr(netdev->dev_addr))
2162 memcpy(netdev->dev_addr, &bnad->perm_addr, netdev->addr_len);
2165 /* Enable / disable device */
2166 static void
2167 bnad_device_disable(struct bnad *bnad)
2169 unsigned long flags;
2171 init_completion(&bnad->bnad_completions.ioc_comp);
2173 spin_lock_irqsave(&bnad->bna_lock, flags);
2174 bna_device_disable(&bnad->bna.device, BNA_HARD_CLEANUP);
2175 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2177 wait_for_completion(&bnad->bnad_completions.ioc_comp);
2180 static int
2181 bnad_device_enable(struct bnad *bnad)
2183 int err = 0;
2184 unsigned long flags;
2186 init_completion(&bnad->bnad_completions.ioc_comp);
2188 spin_lock_irqsave(&bnad->bna_lock, flags);
2189 bna_device_enable(&bnad->bna.device);
2190 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2192 wait_for_completion(&bnad->bnad_completions.ioc_comp);
2194 if (bnad->bnad_completions.ioc_comp_status)
2195 err = bnad->bnad_completions.ioc_comp_status;
2197 return err;
2200 /* Free BNA resources */
2201 static void
2202 bnad_res_free(struct bnad *bnad)
2204 int i;
2205 struct bna_res_info *res_info = &bnad->res_info[0];
2207 for (i = 0; i < BNA_RES_T_MAX; i++) {
2208 if (res_info[i].res_type == BNA_RES_T_MEM)
2209 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
2210 else
2211 bnad_mbox_irq_free(bnad, &res_info[i].res_u.intr_info);
2215 /* Allocates memory and interrupt resources for BNA */
2216 static int
2217 bnad_res_alloc(struct bnad *bnad)
2219 int i, err;
2220 struct bna_res_info *res_info = &bnad->res_info[0];
2222 for (i = 0; i < BNA_RES_T_MAX; i++) {
2223 if (res_info[i].res_type == BNA_RES_T_MEM)
2224 err = bnad_mem_alloc(bnad, &res_info[i].res_u.mem_info);
2225 else
2226 err = bnad_mbox_irq_alloc(bnad,
2227 &res_info[i].res_u.intr_info);
2228 if (err)
2229 goto err_return;
2231 return 0;
2233 err_return:
2234 bnad_res_free(bnad);
2235 return err;
2238 /* Interrupt enable / disable */
2239 static void
2240 bnad_enable_msix(struct bnad *bnad)
2242 int i, ret;
2243 unsigned long flags;
2245 spin_lock_irqsave(&bnad->bna_lock, flags);
2246 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
2247 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2248 return;
2250 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2252 if (bnad->msix_table)
2253 return;
2255 bnad->msix_table =
2256 kcalloc(bnad->msix_num, sizeof(struct msix_entry), GFP_KERNEL);
2258 if (!bnad->msix_table)
2259 goto intx_mode;
2261 for (i = 0; i < bnad->msix_num; i++)
2262 bnad->msix_table[i].entry = i;
2264 ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, bnad->msix_num);
2265 if (ret > 0) {
2266 /* Not enough MSI-X vectors. */
2268 spin_lock_irqsave(&bnad->bna_lock, flags);
2269 /* ret = #of vectors that we got */
2270 bnad_q_num_adjust(bnad, ret);
2271 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2273 bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx)
2274 + (bnad->num_rx
2275 * bnad->num_rxp_per_rx) +
2276 BNAD_MAILBOX_MSIX_VECTORS;
2278 /* Try once more with adjusted numbers */
2279 /* If this fails, fall back to INTx */
2280 ret = pci_enable_msix(bnad->pcidev, bnad->msix_table,
2281 bnad->msix_num);
2282 if (ret)
2283 goto intx_mode;
2285 } else if (ret < 0)
2286 goto intx_mode;
2287 return;
2289 intx_mode:
2291 kfree(bnad->msix_table);
2292 bnad->msix_table = NULL;
2293 bnad->msix_num = 0;
2294 spin_lock_irqsave(&bnad->bna_lock, flags);
2295 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2296 bnad_q_num_init(bnad);
2297 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2300 static void
2301 bnad_disable_msix(struct bnad *bnad)
2303 u32 cfg_flags;
2304 unsigned long flags;
2306 spin_lock_irqsave(&bnad->bna_lock, flags);
2307 cfg_flags = bnad->cfg_flags;
2308 if (bnad->cfg_flags & BNAD_CF_MSIX)
2309 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2310 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2312 if (cfg_flags & BNAD_CF_MSIX) {
2313 pci_disable_msix(bnad->pcidev);
2314 kfree(bnad->msix_table);
2315 bnad->msix_table = NULL;
2319 /* Netdev entry points */
2320 static int
2321 bnad_open(struct net_device *netdev)
2323 int err;
2324 struct bnad *bnad = netdev_priv(netdev);
2325 struct bna_pause_config pause_config;
2326 int mtu;
2327 unsigned long flags;
2329 mutex_lock(&bnad->conf_mutex);
2331 /* Tx */
2332 err = bnad_setup_tx(bnad, 0);
2333 if (err)
2334 goto err_return;
2336 /* Rx */
2337 err = bnad_setup_rx(bnad, 0);
2338 if (err)
2339 goto cleanup_tx;
2341 /* Port */
2342 pause_config.tx_pause = 0;
2343 pause_config.rx_pause = 0;
2345 mtu = ETH_HLEN + bnad->netdev->mtu + ETH_FCS_LEN;
2347 spin_lock_irqsave(&bnad->bna_lock, flags);
2348 bna_port_mtu_set(&bnad->bna.port, mtu, NULL);
2349 bna_port_pause_config(&bnad->bna.port, &pause_config, NULL);
2350 bna_port_enable(&bnad->bna.port);
2351 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2353 /* Enable broadcast */
2354 bnad_enable_default_bcast(bnad);
2356 /* Set the UCAST address */
2357 spin_lock_irqsave(&bnad->bna_lock, flags);
2358 bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
2359 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2361 /* Start the stats timer */
2362 bnad_stats_timer_start(bnad);
2364 mutex_unlock(&bnad->conf_mutex);
2366 return 0;
2368 cleanup_tx:
2369 bnad_cleanup_tx(bnad, 0);
2371 err_return:
2372 mutex_unlock(&bnad->conf_mutex);
2373 return err;
2376 static int
2377 bnad_stop(struct net_device *netdev)
2379 struct bnad *bnad = netdev_priv(netdev);
2380 unsigned long flags;
2382 mutex_lock(&bnad->conf_mutex);
2384 /* Stop the stats timer */
2385 bnad_stats_timer_stop(bnad);
2387 init_completion(&bnad->bnad_completions.port_comp);
2389 spin_lock_irqsave(&bnad->bna_lock, flags);
2390 bna_port_disable(&bnad->bna.port, BNA_HARD_CLEANUP,
2391 bnad_cb_port_disabled);
2392 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2394 wait_for_completion(&bnad->bnad_completions.port_comp);
2396 bnad_cleanup_tx(bnad, 0);
2397 bnad_cleanup_rx(bnad, 0);
2399 /* Synchronize mailbox IRQ */
2400 bnad_mbox_irq_sync(bnad);
2402 mutex_unlock(&bnad->conf_mutex);
2404 return 0;
2407 /* TX */
2409 * bnad_start_xmit : Netdev entry point for Transmit
2410 * Called under lock held by net_device
2412 static netdev_tx_t
2413 bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2415 struct bnad *bnad = netdev_priv(netdev);
2417 u16 txq_prod, vlan_tag = 0;
2418 u32 unmap_prod, wis, wis_used, wi_range;
2419 u32 vectors, vect_id, i, acked;
2420 u32 tx_id;
2421 int err;
2423 struct bnad_tx_info *tx_info;
2424 struct bna_tcb *tcb;
2425 struct bnad_unmap_q *unmap_q;
2426 dma_addr_t dma_addr;
2427 struct bna_txq_entry *txqent;
2428 bna_txq_wi_ctrl_flag_t flags;
2430 if (unlikely
2431 (skb->len <= ETH_HLEN || skb->len > BFI_TX_MAX_DATA_PER_PKT)) {
2432 dev_kfree_skb(skb);
2433 return NETDEV_TX_OK;
2437 * Takes care of the Tx that is scheduled between clearing the flag
2438 * and the netif_stop_queue() call.
2440 if (unlikely(!test_bit(BNAD_RF_TX_STARTED, &bnad->run_flags))) {
2441 dev_kfree_skb(skb);
2442 return NETDEV_TX_OK;
2445 tx_id = 0;
2447 tx_info = &bnad->tx_info[tx_id];
2448 tcb = tx_info->tcb[tx_id];
2449 unmap_q = tcb->unmap_q;
2451 vectors = 1 + skb_shinfo(skb)->nr_frags;
2452 if (vectors > BFI_TX_MAX_VECTORS_PER_PKT) {
2453 dev_kfree_skb(skb);
2454 return NETDEV_TX_OK;
2456 wis = BNA_TXQ_WI_NEEDED(vectors); /* 4 vectors per work item */
2457 acked = 0;
2458 if (unlikely
2459 (wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) ||
2460 vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
2461 if ((u16) (*tcb->hw_consumer_index) !=
2462 tcb->consumer_index &&
2463 !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
2464 acked = bnad_free_txbufs(bnad, tcb);
2465 bna_ib_ack(tcb->i_dbell, acked);
2466 smp_mb__before_clear_bit();
2467 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
2468 } else {
2469 netif_stop_queue(netdev);
2470 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2473 smp_mb();
2475 * Check again to deal with race condition between
2476 * netif_stop_queue here, and netif_wake_queue in
2477 * interrupt handler which is not inside netif tx lock.
2479 if (likely
2480 (wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) ||
2481 vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
2482 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2483 return NETDEV_TX_BUSY;
2484 } else {
2485 netif_wake_queue(netdev);
2486 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
2490 unmap_prod = unmap_q->producer_index;
2491 wis_used = 1;
2492 vect_id = 0;
2493 flags = 0;
2495 txq_prod = tcb->producer_index;
2496 BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt, txqent, wi_range);
2497 BUG_ON(!(wi_range <= tcb->q_depth));
2498 txqent->hdr.wi.reserved = 0;
2499 txqent->hdr.wi.num_vectors = vectors;
2500 txqent->hdr.wi.opcode =
2501 htons((skb_is_gso(skb) ? BNA_TXQ_WI_SEND_LSO :
2502 BNA_TXQ_WI_SEND));
2504 if (vlan_tx_tag_present(skb)) {
2505 vlan_tag = (u16) vlan_tx_tag_get(skb);
2506 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2508 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) {
2509 vlan_tag =
2510 (tcb->priority & 0x7) << 13 | (vlan_tag & 0x1fff);
2511 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2514 txqent->hdr.wi.vlan_tag = htons(vlan_tag);
2516 if (skb_is_gso(skb)) {
2517 err = bnad_tso_prepare(bnad, skb);
2518 if (err) {
2519 dev_kfree_skb(skb);
2520 return NETDEV_TX_OK;
2522 txqent->hdr.wi.lso_mss = htons(skb_is_gso(skb));
2523 flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM);
2524 txqent->hdr.wi.l4_hdr_size_n_offset =
2525 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2526 (tcp_hdrlen(skb) >> 2,
2527 skb_transport_offset(skb)));
2528 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
2529 u8 proto = 0;
2531 txqent->hdr.wi.lso_mss = 0;
2533 if (skb->protocol == htons(ETH_P_IP))
2534 proto = ip_hdr(skb)->protocol;
2535 else if (skb->protocol == htons(ETH_P_IPV6)) {
2536 /* nexthdr may not be TCP immediately. */
2537 proto = ipv6_hdr(skb)->nexthdr;
2539 if (proto == IPPROTO_TCP) {
2540 flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
2541 txqent->hdr.wi.l4_hdr_size_n_offset =
2542 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2543 (0, skb_transport_offset(skb)));
2545 BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
2547 BUG_ON(!(skb_headlen(skb) >=
2548 skb_transport_offset(skb) + tcp_hdrlen(skb)));
2550 } else if (proto == IPPROTO_UDP) {
2551 flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
2552 txqent->hdr.wi.l4_hdr_size_n_offset =
2553 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2554 (0, skb_transport_offset(skb)));
2556 BNAD_UPDATE_CTR(bnad, udpcsum_offload);
2558 BUG_ON(!(skb_headlen(skb) >=
2559 skb_transport_offset(skb) +
2560 sizeof(struct udphdr)));
2561 } else {
2562 err = skb_checksum_help(skb);
2563 BNAD_UPDATE_CTR(bnad, csum_help);
2564 if (err) {
2565 dev_kfree_skb(skb);
2566 BNAD_UPDATE_CTR(bnad, csum_help_err);
2567 return NETDEV_TX_OK;
2570 } else {
2571 txqent->hdr.wi.lso_mss = 0;
2572 txqent->hdr.wi.l4_hdr_size_n_offset = 0;
2575 txqent->hdr.wi.flags = htons(flags);
2577 txqent->hdr.wi.frame_length = htonl(skb->len);
2579 unmap_q->unmap_array[unmap_prod].skb = skb;
2580 BUG_ON(!(skb_headlen(skb) <= BFI_TX_MAX_DATA_PER_VECTOR));
2581 txqent->vector[vect_id].length = htons(skb_headlen(skb));
2582 dma_addr = pci_map_single(bnad->pcidev, skb->data, skb_headlen(skb),
2583 PCI_DMA_TODEVICE);
2584 pci_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
2585 dma_addr);
2587 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
2588 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
2590 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2591 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
2592 u32 size = frag->size;
2594 if (++vect_id == BFI_TX_MAX_VECTORS_PER_WI) {
2595 vect_id = 0;
2596 if (--wi_range)
2597 txqent++;
2598 else {
2599 BNA_QE_INDX_ADD(txq_prod, wis_used,
2600 tcb->q_depth);
2601 wis_used = 0;
2602 BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt,
2603 txqent, wi_range);
2604 BUG_ON(!(wi_range <= tcb->q_depth));
2606 wis_used++;
2607 txqent->hdr.wi_ext.opcode = htons(BNA_TXQ_WI_EXTENSION);
2610 BUG_ON(!(size <= BFI_TX_MAX_DATA_PER_VECTOR));
2611 txqent->vector[vect_id].length = htons(size);
2612 dma_addr =
2613 pci_map_page(bnad->pcidev, frag->page,
2614 frag->page_offset, size,
2615 PCI_DMA_TODEVICE);
2616 pci_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
2617 dma_addr);
2618 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
2619 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
2622 unmap_q->producer_index = unmap_prod;
2623 BNA_QE_INDX_ADD(txq_prod, wis_used, tcb->q_depth);
2624 tcb->producer_index = txq_prod;
2626 smp_mb();
2627 bna_txq_prod_indx_doorbell(tcb);
2629 if ((u16) (*tcb->hw_consumer_index) != tcb->consumer_index)
2630 tasklet_schedule(&bnad->tx_free_tasklet);
2632 return NETDEV_TX_OK;
2636 * Used spin_lock to synchronize reading of stats structures, which
2637 * is written by BNA under the same lock.
2639 static struct rtnl_link_stats64 *
2640 bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
2642 struct bnad *bnad = netdev_priv(netdev);
2643 unsigned long flags;
2645 spin_lock_irqsave(&bnad->bna_lock, flags);
2647 bnad_netdev_qstats_fill(bnad, stats);
2648 bnad_netdev_hwstats_fill(bnad, stats);
2650 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2652 return stats;
2655 static void
2656 bnad_set_rx_mode(struct net_device *netdev)
2658 struct bnad *bnad = netdev_priv(netdev);
2659 u32 new_mask, valid_mask;
2660 unsigned long flags;
2662 spin_lock_irqsave(&bnad->bna_lock, flags);
2664 new_mask = valid_mask = 0;
2666 if (netdev->flags & IFF_PROMISC) {
2667 if (!(bnad->cfg_flags & BNAD_CF_PROMISC)) {
2668 new_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2669 valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2670 bnad->cfg_flags |= BNAD_CF_PROMISC;
2672 } else {
2673 if (bnad->cfg_flags & BNAD_CF_PROMISC) {
2674 new_mask = ~BNAD_RXMODE_PROMISC_DEFAULT;
2675 valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2676 bnad->cfg_flags &= ~BNAD_CF_PROMISC;
2680 if (netdev->flags & IFF_ALLMULTI) {
2681 if (!(bnad->cfg_flags & BNAD_CF_ALLMULTI)) {
2682 new_mask |= BNA_RXMODE_ALLMULTI;
2683 valid_mask |= BNA_RXMODE_ALLMULTI;
2684 bnad->cfg_flags |= BNAD_CF_ALLMULTI;
2686 } else {
2687 if (bnad->cfg_flags & BNAD_CF_ALLMULTI) {
2688 new_mask &= ~BNA_RXMODE_ALLMULTI;
2689 valid_mask |= BNA_RXMODE_ALLMULTI;
2690 bnad->cfg_flags &= ~BNAD_CF_ALLMULTI;
2694 bna_rx_mode_set(bnad->rx_info[0].rx, new_mask, valid_mask, NULL);
2696 if (!netdev_mc_empty(netdev)) {
2697 u8 *mcaddr_list;
2698 int mc_count = netdev_mc_count(netdev);
2700 /* Index 0 holds the broadcast address */
2701 mcaddr_list =
2702 kzalloc((mc_count + 1) * ETH_ALEN,
2703 GFP_ATOMIC);
2704 if (!mcaddr_list)
2705 goto unlock;
2707 memcpy(&mcaddr_list[0], &bnad_bcast_addr[0], ETH_ALEN);
2709 /* Copy rest of the MC addresses */
2710 bnad_netdev_mc_list_get(netdev, mcaddr_list);
2712 bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1,
2713 mcaddr_list, NULL);
2715 /* Should we enable BNAD_CF_ALLMULTI for err != 0 ? */
2716 kfree(mcaddr_list);
2718 unlock:
2719 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2723 * bna_lock is used to sync writes to netdev->addr
2724 * conf_lock cannot be used since this call may be made
2725 * in a non-blocking context.
2727 static int
2728 bnad_set_mac_address(struct net_device *netdev, void *mac_addr)
2730 int err;
2731 struct bnad *bnad = netdev_priv(netdev);
2732 struct sockaddr *sa = (struct sockaddr *)mac_addr;
2733 unsigned long flags;
2735 spin_lock_irqsave(&bnad->bna_lock, flags);
2737 err = bnad_mac_addr_set_locked(bnad, sa->sa_data);
2739 if (!err)
2740 memcpy(netdev->dev_addr, sa->sa_data, netdev->addr_len);
2742 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2744 return err;
2747 static int
2748 bnad_change_mtu(struct net_device *netdev, int new_mtu)
2750 int mtu, err = 0;
2751 unsigned long flags;
2753 struct bnad *bnad = netdev_priv(netdev);
2755 if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU)
2756 return -EINVAL;
2758 mutex_lock(&bnad->conf_mutex);
2760 netdev->mtu = new_mtu;
2762 mtu = ETH_HLEN + new_mtu + ETH_FCS_LEN;
2764 spin_lock_irqsave(&bnad->bna_lock, flags);
2765 bna_port_mtu_set(&bnad->bna.port, mtu, NULL);
2766 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2768 mutex_unlock(&bnad->conf_mutex);
2769 return err;
2772 static void
2773 bnad_vlan_rx_register(struct net_device *netdev,
2774 struct vlan_group *vlan_grp)
2776 struct bnad *bnad = netdev_priv(netdev);
2778 mutex_lock(&bnad->conf_mutex);
2779 bnad->vlan_grp = vlan_grp;
2780 mutex_unlock(&bnad->conf_mutex);
2783 static void
2784 bnad_vlan_rx_add_vid(struct net_device *netdev,
2785 unsigned short vid)
2787 struct bnad *bnad = netdev_priv(netdev);
2788 unsigned long flags;
2790 if (!bnad->rx_info[0].rx)
2791 return;
2793 mutex_lock(&bnad->conf_mutex);
2795 spin_lock_irqsave(&bnad->bna_lock, flags);
2796 bna_rx_vlan_add(bnad->rx_info[0].rx, vid);
2797 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2799 mutex_unlock(&bnad->conf_mutex);
2802 static void
2803 bnad_vlan_rx_kill_vid(struct net_device *netdev,
2804 unsigned short vid)
2806 struct bnad *bnad = netdev_priv(netdev);
2807 unsigned long flags;
2809 if (!bnad->rx_info[0].rx)
2810 return;
2812 mutex_lock(&bnad->conf_mutex);
2814 spin_lock_irqsave(&bnad->bna_lock, flags);
2815 bna_rx_vlan_del(bnad->rx_info[0].rx, vid);
2816 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2818 mutex_unlock(&bnad->conf_mutex);
2821 #ifdef CONFIG_NET_POLL_CONTROLLER
2822 static void
2823 bnad_netpoll(struct net_device *netdev)
2825 struct bnad *bnad = netdev_priv(netdev);
2826 struct bnad_rx_info *rx_info;
2827 struct bnad_rx_ctrl *rx_ctrl;
2828 u32 curr_mask;
2829 int i, j;
2831 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
2832 bna_intx_disable(&bnad->bna, curr_mask);
2833 bnad_isr(bnad->pcidev->irq, netdev);
2834 bna_intx_enable(&bnad->bna, curr_mask);
2835 } else {
2836 for (i = 0; i < bnad->num_rx; i++) {
2837 rx_info = &bnad->rx_info[i];
2838 if (!rx_info->rx)
2839 continue;
2840 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
2841 rx_ctrl = &rx_info->rx_ctrl[j];
2842 if (rx_ctrl->ccb) {
2843 bnad_disable_rx_irq(bnad,
2844 rx_ctrl->ccb);
2845 bnad_netif_rx_schedule_poll(bnad,
2846 rx_ctrl->ccb);
2852 #endif
2854 static const struct net_device_ops bnad_netdev_ops = {
2855 .ndo_open = bnad_open,
2856 .ndo_stop = bnad_stop,
2857 .ndo_start_xmit = bnad_start_xmit,
2858 .ndo_get_stats64 = bnad_get_stats64,
2859 .ndo_set_rx_mode = bnad_set_rx_mode,
2860 .ndo_set_multicast_list = bnad_set_rx_mode,
2861 .ndo_validate_addr = eth_validate_addr,
2862 .ndo_set_mac_address = bnad_set_mac_address,
2863 .ndo_change_mtu = bnad_change_mtu,
2864 .ndo_vlan_rx_register = bnad_vlan_rx_register,
2865 .ndo_vlan_rx_add_vid = bnad_vlan_rx_add_vid,
2866 .ndo_vlan_rx_kill_vid = bnad_vlan_rx_kill_vid,
2867 #ifdef CONFIG_NET_POLL_CONTROLLER
2868 .ndo_poll_controller = bnad_netpoll
2869 #endif
2872 static void
2873 bnad_netdev_init(struct bnad *bnad, bool using_dac)
2875 struct net_device *netdev = bnad->netdev;
2877 netdev->features |= NETIF_F_IPV6_CSUM;
2878 netdev->features |= NETIF_F_TSO;
2879 netdev->features |= NETIF_F_TSO6;
2881 netdev->features |= NETIF_F_GRO;
2882 pr_warn("bna: GRO enabled, using kernel stack GRO\n");
2884 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
2886 if (using_dac)
2887 netdev->features |= NETIF_F_HIGHDMA;
2889 netdev->features |=
2890 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
2891 NETIF_F_HW_VLAN_FILTER;
2893 netdev->vlan_features = netdev->features;
2894 netdev->mem_start = bnad->mmio_start;
2895 netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1;
2897 netdev->netdev_ops = &bnad_netdev_ops;
2898 bnad_set_ethtool_ops(netdev);
2902 * 1. Initialize the bnad structure
2903 * 2. Setup netdev pointer in pci_dev
2904 * 3. Initialze Tx free tasklet
2905 * 4. Initialize no. of TxQ & CQs & MSIX vectors
2907 static int
2908 bnad_init(struct bnad *bnad,
2909 struct pci_dev *pdev, struct net_device *netdev)
2911 unsigned long flags;
2913 SET_NETDEV_DEV(netdev, &pdev->dev);
2914 pci_set_drvdata(pdev, netdev);
2916 bnad->netdev = netdev;
2917 bnad->pcidev = pdev;
2918 bnad->mmio_start = pci_resource_start(pdev, 0);
2919 bnad->mmio_len = pci_resource_len(pdev, 0);
2920 bnad->bar0 = ioremap_nocache(bnad->mmio_start, bnad->mmio_len);
2921 if (!bnad->bar0) {
2922 dev_err(&pdev->dev, "ioremap for bar0 failed\n");
2923 pci_set_drvdata(pdev, NULL);
2924 return -ENOMEM;
2926 pr_info("bar0 mapped to %p, len %llu\n", bnad->bar0,
2927 (unsigned long long) bnad->mmio_len);
2929 spin_lock_irqsave(&bnad->bna_lock, flags);
2930 if (!bnad_msix_disable)
2931 bnad->cfg_flags = BNAD_CF_MSIX;
2933 bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
2935 bnad_q_num_init(bnad);
2936 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2938 bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) +
2939 (bnad->num_rx * bnad->num_rxp_per_rx) +
2940 BNAD_MAILBOX_MSIX_VECTORS;
2942 bnad->txq_depth = BNAD_TXQ_DEPTH;
2943 bnad->rxq_depth = BNAD_RXQ_DEPTH;
2944 bnad->rx_csum = true;
2946 bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO;
2947 bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO;
2949 tasklet_init(&bnad->tx_free_tasklet, bnad_tx_free_tasklet,
2950 (unsigned long)bnad);
2952 return 0;
2956 * Must be called after bnad_pci_uninit()
2957 * so that iounmap() and pci_set_drvdata(NULL)
2958 * happens only after PCI uninitialization.
2960 static void
2961 bnad_uninit(struct bnad *bnad)
2963 if (bnad->bar0)
2964 iounmap(bnad->bar0);
2965 pci_set_drvdata(bnad->pcidev, NULL);
2969 * Initialize locks
2970 a) Per device mutes used for serializing configuration
2971 changes from OS interface
2972 b) spin lock used to protect bna state machine
2974 static void
2975 bnad_lock_init(struct bnad *bnad)
2977 spin_lock_init(&bnad->bna_lock);
2978 mutex_init(&bnad->conf_mutex);
2981 static void
2982 bnad_lock_uninit(struct bnad *bnad)
2984 mutex_destroy(&bnad->conf_mutex);
2987 /* PCI Initialization */
2988 static int
2989 bnad_pci_init(struct bnad *bnad,
2990 struct pci_dev *pdev, bool *using_dac)
2992 int err;
2994 err = pci_enable_device(pdev);
2995 if (err)
2996 return err;
2997 err = pci_request_regions(pdev, BNAD_NAME);
2998 if (err)
2999 goto disable_device;
3000 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
3001 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
3002 *using_dac = 1;
3003 } else {
3004 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3005 if (err) {
3006 err = pci_set_consistent_dma_mask(pdev,
3007 DMA_BIT_MASK(32));
3008 if (err)
3009 goto release_regions;
3011 *using_dac = 0;
3013 pci_set_master(pdev);
3014 return 0;
3016 release_regions:
3017 pci_release_regions(pdev);
3018 disable_device:
3019 pci_disable_device(pdev);
3021 return err;
3024 static void
3025 bnad_pci_uninit(struct pci_dev *pdev)
3027 pci_release_regions(pdev);
3028 pci_disable_device(pdev);
3031 static int __devinit
3032 bnad_pci_probe(struct pci_dev *pdev,
3033 const struct pci_device_id *pcidev_id)
3035 bool using_dac;
3036 int err;
3037 struct bnad *bnad;
3038 struct bna *bna;
3039 struct net_device *netdev;
3040 struct bfa_pcidev pcidev_info;
3041 unsigned long flags;
3043 pr_info("bnad_pci_probe : (0x%p, 0x%p) PCI Func : (%d)\n",
3044 pdev, pcidev_id, PCI_FUNC(pdev->devfn));
3046 mutex_lock(&bnad_fwimg_mutex);
3047 if (!cna_get_firmware_buf(pdev)) {
3048 mutex_unlock(&bnad_fwimg_mutex);
3049 pr_warn("Failed to load Firmware Image!\n");
3050 return -ENODEV;
3052 mutex_unlock(&bnad_fwimg_mutex);
3055 * Allocates sizeof(struct net_device + struct bnad)
3056 * bnad = netdev->priv
3058 netdev = alloc_etherdev(sizeof(struct bnad));
3059 if (!netdev) {
3060 dev_err(&pdev->dev, "alloc_etherdev failed\n");
3061 err = -ENOMEM;
3062 return err;
3064 bnad = netdev_priv(netdev);
3067 * PCI initialization
3068 * Output : using_dac = 1 for 64 bit DMA
3069 * = 0 for 32 bit DMA
3071 err = bnad_pci_init(bnad, pdev, &using_dac);
3072 if (err)
3073 goto free_netdev;
3075 bnad_lock_init(bnad);
3077 * Initialize bnad structure
3078 * Setup relation between pci_dev & netdev
3079 * Init Tx free tasklet
3081 err = bnad_init(bnad, pdev, netdev);
3082 if (err)
3083 goto pci_uninit;
3084 /* Initialize netdev structure, set up ethtool ops */
3085 bnad_netdev_init(bnad, using_dac);
3087 bnad_enable_msix(bnad);
3089 /* Get resource requirement form bna */
3090 bna_res_req(&bnad->res_info[0]);
3092 /* Allocate resources from bna */
3093 err = bnad_res_alloc(bnad);
3094 if (err)
3095 goto free_netdev;
3097 bna = &bnad->bna;
3099 /* Setup pcidev_info for bna_init() */
3100 pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn);
3101 pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn);
3102 pcidev_info.device_id = bnad->pcidev->device;
3103 pcidev_info.pci_bar_kva = bnad->bar0;
3105 mutex_lock(&bnad->conf_mutex);
3107 spin_lock_irqsave(&bnad->bna_lock, flags);
3108 bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]);
3109 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3111 bnad->stats.bna_stats = &bna->stats;
3113 /* Set up timers */
3114 setup_timer(&bnad->bna.device.ioc.ioc_timer, bnad_ioc_timeout,
3115 ((unsigned long)bnad));
3116 setup_timer(&bnad->bna.device.ioc.hb_timer, bnad_ioc_hb_check,
3117 ((unsigned long)bnad));
3118 setup_timer(&bnad->bna.device.ioc.sem_timer, bnad_ioc_sem_timeout,
3119 ((unsigned long)bnad));
3121 /* Now start the timer before calling IOC */
3122 mod_timer(&bnad->bna.device.ioc.ioc_timer,
3123 jiffies + msecs_to_jiffies(BNA_IOC_TIMER_FREQ));
3126 * Start the chip
3127 * Don't care even if err != 0, bna state machine will
3128 * deal with it
3130 err = bnad_device_enable(bnad);
3132 /* Get the burnt-in mac */
3133 spin_lock_irqsave(&bnad->bna_lock, flags);
3134 bna_port_mac_get(&bna->port, &bnad->perm_addr);
3135 bnad_set_netdev_perm_addr(bnad);
3136 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3138 mutex_unlock(&bnad->conf_mutex);
3141 * Make sure the link appears down to the stack
3143 netif_carrier_off(netdev);
3145 /* Finally, reguister with net_device layer */
3146 err = register_netdev(netdev);
3147 if (err) {
3148 pr_err("BNA : Registering with netdev failed\n");
3149 goto disable_device;
3152 return 0;
3154 disable_device:
3155 mutex_lock(&bnad->conf_mutex);
3156 bnad_device_disable(bnad);
3157 del_timer_sync(&bnad->bna.device.ioc.ioc_timer);
3158 del_timer_sync(&bnad->bna.device.ioc.sem_timer);
3159 del_timer_sync(&bnad->bna.device.ioc.hb_timer);
3160 spin_lock_irqsave(&bnad->bna_lock, flags);
3161 bna_uninit(bna);
3162 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3163 mutex_unlock(&bnad->conf_mutex);
3165 bnad_res_free(bnad);
3166 bnad_disable_msix(bnad);
3167 pci_uninit:
3168 bnad_pci_uninit(pdev);
3169 bnad_lock_uninit(bnad);
3170 bnad_uninit(bnad);
3171 free_netdev:
3172 free_netdev(netdev);
3173 return err;
3176 static void __devexit
3177 bnad_pci_remove(struct pci_dev *pdev)
3179 struct net_device *netdev = pci_get_drvdata(pdev);
3180 struct bnad *bnad;
3181 struct bna *bna;
3182 unsigned long flags;
3184 if (!netdev)
3185 return;
3187 pr_info("%s bnad_pci_remove\n", netdev->name);
3188 bnad = netdev_priv(netdev);
3189 bna = &bnad->bna;
3191 unregister_netdev(netdev);
3193 mutex_lock(&bnad->conf_mutex);
3194 bnad_device_disable(bnad);
3195 del_timer_sync(&bnad->bna.device.ioc.ioc_timer);
3196 del_timer_sync(&bnad->bna.device.ioc.sem_timer);
3197 del_timer_sync(&bnad->bna.device.ioc.hb_timer);
3198 spin_lock_irqsave(&bnad->bna_lock, flags);
3199 bna_uninit(bna);
3200 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3201 mutex_unlock(&bnad->conf_mutex);
3203 bnad_res_free(bnad);
3204 bnad_disable_msix(bnad);
3205 bnad_pci_uninit(pdev);
3206 bnad_lock_uninit(bnad);
3207 bnad_uninit(bnad);
3208 free_netdev(netdev);
3211 static const struct pci_device_id bnad_pci_id_table[] = {
3213 PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3214 PCI_DEVICE_ID_BROCADE_CT),
3215 .class = PCI_CLASS_NETWORK_ETHERNET << 8,
3216 .class_mask = 0xffff00
3217 }, {0, }
3220 MODULE_DEVICE_TABLE(pci, bnad_pci_id_table);
3222 static struct pci_driver bnad_pci_driver = {
3223 .name = BNAD_NAME,
3224 .id_table = bnad_pci_id_table,
3225 .probe = bnad_pci_probe,
3226 .remove = __devexit_p(bnad_pci_remove),
3229 static int __init
3230 bnad_module_init(void)
3232 int err;
3234 pr_info("Brocade 10G Ethernet driver\n");
3236 bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover);
3238 err = pci_register_driver(&bnad_pci_driver);
3239 if (err < 0) {
3240 pr_err("bna : PCI registration failed in module init "
3241 "(%d)\n", err);
3242 return err;
3245 return 0;
3248 static void __exit
3249 bnad_module_exit(void)
3251 pci_unregister_driver(&bnad_pci_driver);
3253 if (bfi_fw)
3254 release_firmware(bfi_fw);
3257 module_init(bnad_module_init);
3258 module_exit(bnad_module_exit);
3260 MODULE_AUTHOR("Brocade");
3261 MODULE_LICENSE("GPL");
3262 MODULE_DESCRIPTION("Brocade 10G PCIe Ethernet driver");
3263 MODULE_VERSION(BNAD_VERSION);
3264 MODULE_FIRMWARE(CNA_FW_FILE_CT);