bna: Fix for TX queue
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / net / bna / bnad.c
blobf77593638c5776e20e181d61c4e972346f1b1663
1 /*
2 * Linux network driver for Brocade Converged Network Adapter.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
18 #include <linux/netdevice.h>
19 #include <linux/skbuff.h>
20 #include <linux/etherdevice.h>
21 #include <linux/in.h>
22 #include <linux/ethtool.h>
23 #include <linux/if_vlan.h>
24 #include <linux/if_ether.h>
25 #include <linux/ip.h>
27 #include "bnad.h"
28 #include "bna.h"
29 #include "cna.h"
31 static DEFINE_MUTEX(bnad_fwimg_mutex);
34 * Module params
36 static uint bnad_msix_disable;
37 module_param(bnad_msix_disable, uint, 0444);
38 MODULE_PARM_DESC(bnad_msix_disable, "Disable MSIX mode");
40 static uint bnad_ioc_auto_recover = 1;
41 module_param(bnad_ioc_auto_recover, uint, 0444);
42 MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery");
45 * Global variables
47 u32 bnad_rxqs_per_cq = 2;
49 static const u8 bnad_bcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
52 * Local MACROS
54 #define BNAD_TX_UNMAPQ_DEPTH (bnad->txq_depth * 2)
56 #define BNAD_RX_UNMAPQ_DEPTH (bnad->rxq_depth)
58 #define BNAD_GET_MBOX_IRQ(_bnad) \
59 (((_bnad)->cfg_flags & BNAD_CF_MSIX) ? \
60 ((_bnad)->msix_table[(_bnad)->msix_num - 1].vector) : \
61 ((_bnad)->pcidev->irq))
63 #define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _depth) \
64 do { \
65 (_res_info)->res_type = BNA_RES_T_MEM; \
66 (_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA; \
67 (_res_info)->res_u.mem_info.num = (_num); \
68 (_res_info)->res_u.mem_info.len = \
69 sizeof(struct bnad_unmap_q) + \
70 (sizeof(struct bnad_skb_unmap) * ((_depth) - 1)); \
71 } while (0)
73 #define BNAD_TXRX_SYNC_MDELAY 250 /* 250 msecs */
76 * Reinitialize completions in CQ, once Rx is taken down
78 static void
79 bnad_cq_cmpl_init(struct bnad *bnad, struct bna_ccb *ccb)
81 struct bna_cq_entry *cmpl, *next_cmpl;
82 unsigned int wi_range, wis = 0, ccb_prod = 0;
83 int i;
85 BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt, cmpl,
86 wi_range);
88 for (i = 0; i < ccb->q_depth; i++) {
89 wis++;
90 if (likely(--wi_range))
91 next_cmpl = cmpl + 1;
92 else {
93 BNA_QE_INDX_ADD(ccb_prod, wis, ccb->q_depth);
94 wis = 0;
95 BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt,
96 next_cmpl, wi_range);
98 cmpl->valid = 0;
99 cmpl = next_cmpl;
104 * Frees all pending Tx Bufs
105 * At this point no activity is expected on the Q,
106 * so DMA unmap & freeing is fine.
108 static void
109 bnad_free_all_txbufs(struct bnad *bnad,
110 struct bna_tcb *tcb)
112 u32 unmap_cons;
113 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
114 struct bnad_skb_unmap *unmap_array;
115 struct sk_buff *skb = NULL;
116 int i;
118 unmap_array = unmap_q->unmap_array;
120 unmap_cons = 0;
121 while (unmap_cons < unmap_q->q_depth) {
122 skb = unmap_array[unmap_cons].skb;
123 if (!skb) {
124 unmap_cons++;
125 continue;
127 unmap_array[unmap_cons].skb = NULL;
129 pci_unmap_single(bnad->pcidev,
130 pci_unmap_addr(&unmap_array[unmap_cons],
131 dma_addr), skb_headlen(skb),
132 PCI_DMA_TODEVICE);
134 pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
135 if (++unmap_cons >= unmap_q->q_depth)
136 break;
138 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
139 pci_unmap_page(bnad->pcidev,
140 pci_unmap_addr(&unmap_array[unmap_cons],
141 dma_addr),
142 skb_shinfo(skb)->frags[i].size,
143 PCI_DMA_TODEVICE);
144 pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
146 if (++unmap_cons >= unmap_q->q_depth)
147 break;
149 dev_kfree_skb_any(skb);
153 /* Data Path Handlers */
156 * bnad_free_txbufs : Frees the Tx bufs on Tx completion
157 * Can be called in a) Interrupt context
158 * b) Sending context
159 * c) Tasklet context
161 static u32
162 bnad_free_txbufs(struct bnad *bnad,
163 struct bna_tcb *tcb)
165 u32 sent_packets = 0, sent_bytes = 0;
166 u16 wis, unmap_cons, updated_hw_cons;
167 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
168 struct bnad_skb_unmap *unmap_array;
169 struct sk_buff *skb;
170 int i;
173 * Just return if TX is stopped. This check is useful
174 * when bnad_free_txbufs() runs out of a tasklet scheduled
175 * before bnad_cb_tx_cleanup() cleared BNAD_TXQ_TX_STARTED bit
176 * but this routine runs actually after the cleanup has been
177 * executed.
179 if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
180 return 0;
182 updated_hw_cons = *(tcb->hw_consumer_index);
184 wis = BNA_Q_INDEX_CHANGE(tcb->consumer_index,
185 updated_hw_cons, tcb->q_depth);
187 BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth)));
189 unmap_array = unmap_q->unmap_array;
190 unmap_cons = unmap_q->consumer_index;
192 prefetch(&unmap_array[unmap_cons + 1]);
193 while (wis) {
194 skb = unmap_array[unmap_cons].skb;
196 unmap_array[unmap_cons].skb = NULL;
198 sent_packets++;
199 sent_bytes += skb->len;
200 wis -= BNA_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags);
202 pci_unmap_single(bnad->pcidev,
203 pci_unmap_addr(&unmap_array[unmap_cons],
204 dma_addr), skb_headlen(skb),
205 PCI_DMA_TODEVICE);
206 pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
207 BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth);
209 prefetch(&unmap_array[unmap_cons + 1]);
210 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
211 prefetch(&unmap_array[unmap_cons + 1]);
213 pci_unmap_page(bnad->pcidev,
214 pci_unmap_addr(&unmap_array[unmap_cons],
215 dma_addr),
216 skb_shinfo(skb)->frags[i].size,
217 PCI_DMA_TODEVICE);
218 pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
220 BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth);
222 dev_kfree_skb_any(skb);
225 /* Update consumer pointers. */
226 tcb->consumer_index = updated_hw_cons;
227 unmap_q->consumer_index = unmap_cons;
229 tcb->txq->tx_packets += sent_packets;
230 tcb->txq->tx_bytes += sent_bytes;
232 return sent_packets;
235 /* Tx Free Tasklet function */
236 /* Frees for all the tcb's in all the Tx's */
238 * Scheduled from sending context, so that
239 * the fat Tx lock is not held for too long
240 * in the sending context.
242 static void
243 bnad_tx_free_tasklet(unsigned long bnad_ptr)
245 struct bnad *bnad = (struct bnad *)bnad_ptr;
246 struct bna_tcb *tcb;
247 u32 acked = 0;
248 int i, j;
250 for (i = 0; i < bnad->num_tx; i++) {
251 for (j = 0; j < bnad->num_txq_per_tx; j++) {
252 tcb = bnad->tx_info[i].tcb[j];
253 if (!tcb)
254 continue;
255 if (((u16) (*tcb->hw_consumer_index) !=
256 tcb->consumer_index) &&
257 (!test_and_set_bit(BNAD_TXQ_FREE_SENT,
258 &tcb->flags))) {
259 acked = bnad_free_txbufs(bnad, tcb);
260 if (likely(test_bit(BNAD_TXQ_TX_STARTED,
261 &tcb->flags)))
262 bna_ib_ack(tcb->i_dbell, acked);
263 smp_mb__before_clear_bit();
264 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
266 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED,
267 &tcb->flags)))
268 continue;
269 if (netif_queue_stopped(bnad->netdev)) {
270 if (acked && netif_carrier_ok(bnad->netdev) &&
271 BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
272 BNAD_NETIF_WAKE_THRESHOLD) {
273 netif_wake_queue(bnad->netdev);
274 /* TODO */
275 /* Counters for individual TxQs? */
276 BNAD_UPDATE_CTR(bnad,
277 netif_queue_wakeup);
284 static u32
285 bnad_tx(struct bnad *bnad, struct bna_tcb *tcb)
287 struct net_device *netdev = bnad->netdev;
288 u32 sent = 0;
290 if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
291 return 0;
293 sent = bnad_free_txbufs(bnad, tcb);
294 if (sent) {
295 if (netif_queue_stopped(netdev) &&
296 netif_carrier_ok(netdev) &&
297 BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
298 BNAD_NETIF_WAKE_THRESHOLD) {
299 if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) {
300 netif_wake_queue(netdev);
301 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
306 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
307 bna_ib_ack(tcb->i_dbell, sent);
309 smp_mb__before_clear_bit();
310 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
312 return sent;
315 /* MSIX Tx Completion Handler */
316 static irqreturn_t
317 bnad_msix_tx(int irq, void *data)
319 struct bna_tcb *tcb = (struct bna_tcb *)data;
320 struct bnad *bnad = tcb->bnad;
322 bnad_tx(bnad, tcb);
324 return IRQ_HANDLED;
327 static void
328 bnad_reset_rcb(struct bnad *bnad, struct bna_rcb *rcb)
330 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
332 rcb->producer_index = 0;
333 rcb->consumer_index = 0;
335 unmap_q->producer_index = 0;
336 unmap_q->consumer_index = 0;
339 static void
340 bnad_free_all_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
342 struct bnad_unmap_q *unmap_q;
343 struct sk_buff *skb;
344 int unmap_cons;
346 unmap_q = rcb->unmap_q;
347 for (unmap_cons = 0; unmap_cons < unmap_q->q_depth; unmap_cons++) {
348 skb = unmap_q->unmap_array[unmap_cons].skb;
349 if (!skb)
350 continue;
351 unmap_q->unmap_array[unmap_cons].skb = NULL;
352 pci_unmap_single(bnad->pcidev, pci_unmap_addr(&unmap_q->
353 unmap_array[unmap_cons],
354 dma_addr), rcb->rxq->buffer_size,
355 PCI_DMA_FROMDEVICE);
356 dev_kfree_skb(skb);
358 bnad_reset_rcb(bnad, rcb);
361 static void
362 bnad_alloc_n_post_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
364 u16 to_alloc, alloced, unmap_prod, wi_range;
365 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
366 struct bnad_skb_unmap *unmap_array;
367 struct bna_rxq_entry *rxent;
368 struct sk_buff *skb;
369 dma_addr_t dma_addr;
371 alloced = 0;
372 to_alloc =
373 BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth);
375 unmap_array = unmap_q->unmap_array;
376 unmap_prod = unmap_q->producer_index;
378 BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent, wi_range);
380 while (to_alloc--) {
381 if (!wi_range) {
382 BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent,
383 wi_range);
385 skb = alloc_skb(rcb->rxq->buffer_size + NET_IP_ALIGN,
386 GFP_ATOMIC);
387 if (unlikely(!skb)) {
388 BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
389 goto finishing;
391 skb->dev = bnad->netdev;
392 skb_reserve(skb, NET_IP_ALIGN);
393 unmap_array[unmap_prod].skb = skb;
394 dma_addr = pci_map_single(bnad->pcidev, skb->data,
395 rcb->rxq->buffer_size, PCI_DMA_FROMDEVICE);
396 pci_unmap_addr_set(&unmap_array[unmap_prod], dma_addr,
397 dma_addr);
398 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
399 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
401 rxent++;
402 wi_range--;
403 alloced++;
406 finishing:
407 if (likely(alloced)) {
408 unmap_q->producer_index = unmap_prod;
409 rcb->producer_index = unmap_prod;
410 smp_mb();
411 if (likely(test_bit(BNAD_RXQ_STARTED, &rcb->flags)))
412 bna_rxq_prod_indx_doorbell(rcb);
416 static inline void
417 bnad_refill_rxq(struct bnad *bnad, struct bna_rcb *rcb)
419 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
421 if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
422 if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
423 >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
424 bnad_alloc_n_post_rxbufs(bnad, rcb);
425 smp_mb__before_clear_bit();
426 clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
430 static u32
431 bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
433 struct bna_cq_entry *cmpl, *next_cmpl;
434 struct bna_rcb *rcb = NULL;
435 unsigned int wi_range, packets = 0, wis = 0;
436 struct bnad_unmap_q *unmap_q;
437 struct sk_buff *skb;
438 u32 flags;
439 u32 qid0 = ccb->rcb[0]->rxq->rxq_id;
440 struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
442 if (!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags))
443 return 0;
445 prefetch(bnad->netdev);
446 BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt, cmpl,
447 wi_range);
448 BUG_ON(!(wi_range <= ccb->q_depth));
449 while (cmpl->valid && packets < budget) {
450 packets++;
451 BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
453 if (qid0 == cmpl->rxq_id)
454 rcb = ccb->rcb[0];
455 else
456 rcb = ccb->rcb[1];
458 unmap_q = rcb->unmap_q;
460 skb = unmap_q->unmap_array[unmap_q->consumer_index].skb;
461 BUG_ON(!(skb));
462 unmap_q->unmap_array[unmap_q->consumer_index].skb = NULL;
463 pci_unmap_single(bnad->pcidev,
464 pci_unmap_addr(&unmap_q->
465 unmap_array[unmap_q->
466 consumer_index],
467 dma_addr),
468 rcb->rxq->buffer_size,
469 PCI_DMA_FROMDEVICE);
470 BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
472 /* Should be more efficient ? Performance ? */
473 BNA_QE_INDX_ADD(rcb->consumer_index, 1, rcb->q_depth);
475 wis++;
476 if (likely(--wi_range))
477 next_cmpl = cmpl + 1;
478 else {
479 BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth);
480 wis = 0;
481 BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt,
482 next_cmpl, wi_range);
483 BUG_ON(!(wi_range <= ccb->q_depth));
485 prefetch(next_cmpl);
487 flags = ntohl(cmpl->flags);
488 if (unlikely
489 (flags &
490 (BNA_CQ_EF_MAC_ERROR | BNA_CQ_EF_FCS_ERROR |
491 BNA_CQ_EF_TOO_LONG))) {
492 dev_kfree_skb_any(skb);
493 rcb->rxq->rx_packets_with_error++;
494 goto next;
497 skb_put(skb, ntohs(cmpl->length));
498 if (likely
499 (bnad->rx_csum &&
500 (((flags & BNA_CQ_EF_IPV4) &&
501 (flags & BNA_CQ_EF_L3_CKSUM_OK)) ||
502 (flags & BNA_CQ_EF_IPV6)) &&
503 (flags & (BNA_CQ_EF_TCP | BNA_CQ_EF_UDP)) &&
504 (flags & BNA_CQ_EF_L4_CKSUM_OK)))
505 skb->ip_summed = CHECKSUM_UNNECESSARY;
506 else
507 skb_checksum_none_assert(skb);
509 rcb->rxq->rx_packets++;
510 rcb->rxq->rx_bytes += skb->len;
511 skb->protocol = eth_type_trans(skb, bnad->netdev);
513 if (bnad->vlan_grp && (flags & BNA_CQ_EF_VLAN)) {
514 struct bnad_rx_ctrl *rx_ctrl =
515 (struct bnad_rx_ctrl *)ccb->ctrl;
516 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
517 vlan_gro_receive(&rx_ctrl->napi, bnad->vlan_grp,
518 ntohs(cmpl->vlan_tag), skb);
519 else
520 vlan_hwaccel_receive_skb(skb,
521 bnad->vlan_grp,
522 ntohs(cmpl->vlan_tag));
524 } else { /* Not VLAN tagged/stripped */
525 struct bnad_rx_ctrl *rx_ctrl =
526 (struct bnad_rx_ctrl *)ccb->ctrl;
527 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
528 napi_gro_receive(&rx_ctrl->napi, skb);
529 else
530 netif_receive_skb(skb);
533 next:
534 cmpl->valid = 0;
535 cmpl = next_cmpl;
538 BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth);
540 if (likely(ccb)) {
541 if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
542 bna_ib_ack(ccb->i_dbell, packets);
543 bnad_refill_rxq(bnad, ccb->rcb[0]);
544 if (ccb->rcb[1])
545 bnad_refill_rxq(bnad, ccb->rcb[1]);
546 } else {
547 if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
548 bna_ib_ack(ccb->i_dbell, 0);
551 return packets;
554 static void
555 bnad_disable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb)
557 if (unlikely(!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
558 return;
560 bna_ib_coalescing_timer_set(ccb->i_dbell, 0);
561 bna_ib_ack(ccb->i_dbell, 0);
564 static void
565 bnad_enable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb)
567 unsigned long flags;
569 spin_lock_irqsave(&bnad->bna_lock, flags); /* Because of polling context */
570 bnad_enable_rx_irq_unsafe(ccb);
571 spin_unlock_irqrestore(&bnad->bna_lock, flags);
574 static void
575 bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb)
577 struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
578 struct napi_struct *napi = &rx_ctrl->napi;
580 if (likely(napi_schedule_prep(napi))) {
581 bnad_disable_rx_irq(bnad, ccb);
582 __napi_schedule(napi);
584 BNAD_UPDATE_CTR(bnad, netif_rx_schedule);
587 /* MSIX Rx Path Handler */
588 static irqreturn_t
589 bnad_msix_rx(int irq, void *data)
591 struct bna_ccb *ccb = (struct bna_ccb *)data;
592 struct bnad *bnad = ccb->bnad;
594 bnad_netif_rx_schedule_poll(bnad, ccb);
596 return IRQ_HANDLED;
599 /* Interrupt handlers */
601 /* Mbox Interrupt Handlers */
602 static irqreturn_t
603 bnad_msix_mbox_handler(int irq, void *data)
605 u32 intr_status;
606 unsigned long flags;
607 struct bnad *bnad = (struct bnad *)data;
609 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags)))
610 return IRQ_HANDLED;
612 spin_lock_irqsave(&bnad->bna_lock, flags);
614 bna_intr_status_get(&bnad->bna, intr_status);
616 if (BNA_IS_MBOX_ERR_INTR(intr_status))
617 bna_mbox_handler(&bnad->bna, intr_status);
619 spin_unlock_irqrestore(&bnad->bna_lock, flags);
621 return IRQ_HANDLED;
624 static irqreturn_t
625 bnad_isr(int irq, void *data)
627 int i, j;
628 u32 intr_status;
629 unsigned long flags;
630 struct bnad *bnad = (struct bnad *)data;
631 struct bnad_rx_info *rx_info;
632 struct bnad_rx_ctrl *rx_ctrl;
634 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags)))
635 return IRQ_NONE;
637 bna_intr_status_get(&bnad->bna, intr_status);
639 if (unlikely(!intr_status))
640 return IRQ_NONE;
642 spin_lock_irqsave(&bnad->bna_lock, flags);
644 if (BNA_IS_MBOX_ERR_INTR(intr_status))
645 bna_mbox_handler(&bnad->bna, intr_status);
647 spin_unlock_irqrestore(&bnad->bna_lock, flags);
649 if (!BNA_IS_INTX_DATA_INTR(intr_status))
650 return IRQ_HANDLED;
652 /* Process data interrupts */
653 /* Tx processing */
654 for (i = 0; i < bnad->num_tx; i++) {
655 for (j = 0; j < bnad->num_txq_per_tx; j++)
656 bnad_tx(bnad, bnad->tx_info[i].tcb[j]);
658 /* Rx processing */
659 for (i = 0; i < bnad->num_rx; i++) {
660 rx_info = &bnad->rx_info[i];
661 if (!rx_info->rx)
662 continue;
663 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
664 rx_ctrl = &rx_info->rx_ctrl[j];
665 if (rx_ctrl->ccb)
666 bnad_netif_rx_schedule_poll(bnad,
667 rx_ctrl->ccb);
670 return IRQ_HANDLED;
674 * Called in interrupt / callback context
675 * with bna_lock held, so cfg_flags access is OK
677 static void
678 bnad_enable_mbox_irq(struct bnad *bnad)
680 clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
682 BNAD_UPDATE_CTR(bnad, mbox_intr_enabled);
686 * Called with bnad->bna_lock held b'cos of
687 * bnad->cfg_flags access.
689 static void
690 bnad_disable_mbox_irq(struct bnad *bnad)
692 set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
694 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
697 static void
698 bnad_set_netdev_perm_addr(struct bnad *bnad)
700 struct net_device *netdev = bnad->netdev;
702 memcpy(netdev->perm_addr, &bnad->perm_addr, netdev->addr_len);
703 if (is_zero_ether_addr(netdev->dev_addr))
704 memcpy(netdev->dev_addr, &bnad->perm_addr, netdev->addr_len);
707 /* Control Path Handlers */
709 /* Callbacks */
710 void
711 bnad_cb_device_enable_mbox_intr(struct bnad *bnad)
713 bnad_enable_mbox_irq(bnad);
716 void
717 bnad_cb_device_disable_mbox_intr(struct bnad *bnad)
719 bnad_disable_mbox_irq(bnad);
722 void
723 bnad_cb_device_enabled(struct bnad *bnad, enum bna_cb_status status)
725 complete(&bnad->bnad_completions.ioc_comp);
726 bnad->bnad_completions.ioc_comp_status = status;
729 void
730 bnad_cb_device_disabled(struct bnad *bnad, enum bna_cb_status status)
732 complete(&bnad->bnad_completions.ioc_comp);
733 bnad->bnad_completions.ioc_comp_status = status;
736 static void
737 bnad_cb_port_disabled(void *arg, enum bna_cb_status status)
739 struct bnad *bnad = (struct bnad *)arg;
741 complete(&bnad->bnad_completions.port_comp);
743 netif_carrier_off(bnad->netdev);
746 void
747 bnad_cb_port_link_status(struct bnad *bnad,
748 enum bna_link_status link_status)
750 bool link_up = 0;
752 link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP);
754 if (link_status == BNA_CEE_UP) {
755 set_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
756 BNAD_UPDATE_CTR(bnad, cee_up);
757 } else
758 clear_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
760 if (link_up) {
761 if (!netif_carrier_ok(bnad->netdev)) {
762 struct bna_tcb *tcb = bnad->tx_info[0].tcb[0];
763 if (!tcb)
764 return;
765 pr_warn("bna: %s link up\n",
766 bnad->netdev->name);
767 netif_carrier_on(bnad->netdev);
768 BNAD_UPDATE_CTR(bnad, link_toggle);
769 if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) {
770 /* Force an immediate Transmit Schedule */
771 pr_info("bna: %s TX_STARTED\n",
772 bnad->netdev->name);
773 netif_wake_queue(bnad->netdev);
774 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
775 } else {
776 netif_stop_queue(bnad->netdev);
777 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
780 } else {
781 if (netif_carrier_ok(bnad->netdev)) {
782 pr_warn("bna: %s link down\n",
783 bnad->netdev->name);
784 netif_carrier_off(bnad->netdev);
785 BNAD_UPDATE_CTR(bnad, link_toggle);
790 static void
791 bnad_cb_tx_disabled(void *arg, struct bna_tx *tx,
792 enum bna_cb_status status)
794 struct bnad *bnad = (struct bnad *)arg;
796 complete(&bnad->bnad_completions.tx_comp);
799 static void
800 bnad_cb_tcb_setup(struct bnad *bnad, struct bna_tcb *tcb)
802 struct bnad_tx_info *tx_info =
803 (struct bnad_tx_info *)tcb->txq->tx->priv;
804 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
806 tx_info->tcb[tcb->id] = tcb;
807 unmap_q->producer_index = 0;
808 unmap_q->consumer_index = 0;
809 unmap_q->q_depth = BNAD_TX_UNMAPQ_DEPTH;
812 static void
813 bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb)
815 struct bnad_tx_info *tx_info =
816 (struct bnad_tx_info *)tcb->txq->tx->priv;
817 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
819 while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
820 cpu_relax();
822 bnad_free_all_txbufs(bnad, tcb);
824 unmap_q->producer_index = 0;
825 unmap_q->consumer_index = 0;
827 smp_mb__before_clear_bit();
828 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
830 tx_info->tcb[tcb->id] = NULL;
833 static void
834 bnad_cb_rcb_setup(struct bnad *bnad, struct bna_rcb *rcb)
836 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
838 unmap_q->producer_index = 0;
839 unmap_q->consumer_index = 0;
840 unmap_q->q_depth = BNAD_RX_UNMAPQ_DEPTH;
843 static void
844 bnad_cb_rcb_destroy(struct bnad *bnad, struct bna_rcb *rcb)
846 bnad_free_all_rxbufs(bnad, rcb);
849 static void
850 bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb)
852 struct bnad_rx_info *rx_info =
853 (struct bnad_rx_info *)ccb->cq->rx->priv;
855 rx_info->rx_ctrl[ccb->id].ccb = ccb;
856 ccb->ctrl = &rx_info->rx_ctrl[ccb->id];
859 static void
860 bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb)
862 struct bnad_rx_info *rx_info =
863 (struct bnad_rx_info *)ccb->cq->rx->priv;
865 rx_info->rx_ctrl[ccb->id].ccb = NULL;
868 static void
869 bnad_cb_tx_stall(struct bnad *bnad, struct bna_tcb *tcb)
871 struct bnad_tx_info *tx_info =
872 (struct bnad_tx_info *)tcb->txq->tx->priv;
874 if (tx_info != &bnad->tx_info[0])
875 return;
877 clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
878 netif_stop_queue(bnad->netdev);
879 pr_info("bna: %s TX_STOPPED\n", bnad->netdev->name);
882 static void
883 bnad_cb_tx_resume(struct bnad *bnad, struct bna_tcb *tcb)
885 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
887 if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
888 return;
890 clear_bit(BNAD_RF_TX_SHUTDOWN_DELAYED, &bnad->run_flags);
892 while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
893 cpu_relax();
895 bnad_free_all_txbufs(bnad, tcb);
897 unmap_q->producer_index = 0;
898 unmap_q->consumer_index = 0;
900 smp_mb__before_clear_bit();
901 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
904 * Workaround for first device enable failure & we
905 * get a 0 MAC address. We try to get the MAC address
906 * again here.
908 if (is_zero_ether_addr(&bnad->perm_addr.mac[0])) {
909 bna_port_mac_get(&bnad->bna.port, &bnad->perm_addr);
910 bnad_set_netdev_perm_addr(bnad);
913 set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
915 if (netif_carrier_ok(bnad->netdev)) {
916 pr_info("bna: %s TX_STARTED\n", bnad->netdev->name);
917 netif_wake_queue(bnad->netdev);
918 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
922 static void
923 bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tcb *tcb)
925 /* Delay only once for the whole Tx Path Shutdown */
926 if (!test_and_set_bit(BNAD_RF_TX_SHUTDOWN_DELAYED, &bnad->run_flags))
927 mdelay(BNAD_TXRX_SYNC_MDELAY);
930 static void
931 bnad_cb_rx_cleanup(struct bnad *bnad,
932 struct bna_ccb *ccb)
934 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags);
936 if (ccb->rcb[1])
937 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags);
939 if (!test_and_set_bit(BNAD_RF_RX_SHUTDOWN_DELAYED, &bnad->run_flags))
940 mdelay(BNAD_TXRX_SYNC_MDELAY);
943 static void
944 bnad_cb_rx_post(struct bnad *bnad, struct bna_rcb *rcb)
946 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
948 clear_bit(BNAD_RF_RX_SHUTDOWN_DELAYED, &bnad->run_flags);
950 if (rcb == rcb->cq->ccb->rcb[0])
951 bnad_cq_cmpl_init(bnad, rcb->cq->ccb);
953 bnad_free_all_rxbufs(bnad, rcb);
955 set_bit(BNAD_RXQ_STARTED, &rcb->flags);
957 /* Now allocate & post buffers for this RCB */
958 /* !!Allocation in callback context */
959 if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
960 if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
961 >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
962 bnad_alloc_n_post_rxbufs(bnad, rcb);
963 smp_mb__before_clear_bit();
964 clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
968 static void
969 bnad_cb_rx_disabled(void *arg, struct bna_rx *rx,
970 enum bna_cb_status status)
972 struct bnad *bnad = (struct bnad *)arg;
974 complete(&bnad->bnad_completions.rx_comp);
977 static void
978 bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx,
979 enum bna_cb_status status)
981 bnad->bnad_completions.mcast_comp_status = status;
982 complete(&bnad->bnad_completions.mcast_comp);
985 void
986 bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
987 struct bna_stats *stats)
989 if (status == BNA_CB_SUCCESS)
990 BNAD_UPDATE_CTR(bnad, hw_stats_updates);
992 if (!netif_running(bnad->netdev) ||
993 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
994 return;
996 mod_timer(&bnad->stats_timer,
997 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1000 /* Resource allocation, free functions */
1002 static void
1003 bnad_mem_free(struct bnad *bnad,
1004 struct bna_mem_info *mem_info)
1006 int i;
1007 dma_addr_t dma_pa;
1009 if (mem_info->mdl == NULL)
1010 return;
1012 for (i = 0; i < mem_info->num; i++) {
1013 if (mem_info->mdl[i].kva != NULL) {
1014 if (mem_info->mem_type == BNA_MEM_T_DMA) {
1015 BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma),
1016 dma_pa);
1017 pci_free_consistent(bnad->pcidev,
1018 mem_info->mdl[i].len,
1019 mem_info->mdl[i].kva, dma_pa);
1020 } else
1021 kfree(mem_info->mdl[i].kva);
1024 kfree(mem_info->mdl);
1025 mem_info->mdl = NULL;
1028 static int
1029 bnad_mem_alloc(struct bnad *bnad,
1030 struct bna_mem_info *mem_info)
1032 int i;
1033 dma_addr_t dma_pa;
1035 if ((mem_info->num == 0) || (mem_info->len == 0)) {
1036 mem_info->mdl = NULL;
1037 return 0;
1040 mem_info->mdl = kcalloc(mem_info->num, sizeof(struct bna_mem_descr),
1041 GFP_KERNEL);
1042 if (mem_info->mdl == NULL)
1043 return -ENOMEM;
1045 if (mem_info->mem_type == BNA_MEM_T_DMA) {
1046 for (i = 0; i < mem_info->num; i++) {
1047 mem_info->mdl[i].len = mem_info->len;
1048 mem_info->mdl[i].kva =
1049 pci_alloc_consistent(bnad->pcidev,
1050 mem_info->len, &dma_pa);
1052 if (mem_info->mdl[i].kva == NULL)
1053 goto err_return;
1055 BNA_SET_DMA_ADDR(dma_pa,
1056 &(mem_info->mdl[i].dma));
1058 } else {
1059 for (i = 0; i < mem_info->num; i++) {
1060 mem_info->mdl[i].len = mem_info->len;
1061 mem_info->mdl[i].kva = kzalloc(mem_info->len,
1062 GFP_KERNEL);
1063 if (mem_info->mdl[i].kva == NULL)
1064 goto err_return;
1068 return 0;
1070 err_return:
1071 bnad_mem_free(bnad, mem_info);
1072 return -ENOMEM;
1075 /* Free IRQ for Mailbox */
1076 static void
1077 bnad_mbox_irq_free(struct bnad *bnad,
1078 struct bna_intr_info *intr_info)
1080 int irq;
1081 unsigned long flags;
1083 if (intr_info->idl == NULL)
1084 return;
1086 spin_lock_irqsave(&bnad->bna_lock, flags);
1087 bnad_disable_mbox_irq(bnad);
1088 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1090 irq = BNAD_GET_MBOX_IRQ(bnad);
1091 free_irq(irq, bnad);
1093 kfree(intr_info->idl);
1097 * Allocates IRQ for Mailbox, but keep it disabled
1098 * This will be enabled once we get the mbox enable callback
1099 * from bna
1101 static int
1102 bnad_mbox_irq_alloc(struct bnad *bnad,
1103 struct bna_intr_info *intr_info)
1105 int err = 0;
1106 unsigned long flags;
1107 u32 irq;
1108 irq_handler_t irq_handler;
1110 /* Mbox should use only 1 vector */
1112 intr_info->idl = kzalloc(sizeof(*(intr_info->idl)), GFP_KERNEL);
1113 if (!intr_info->idl)
1114 return -ENOMEM;
1116 spin_lock_irqsave(&bnad->bna_lock, flags);
1117 if (bnad->cfg_flags & BNAD_CF_MSIX) {
1118 irq_handler = (irq_handler_t)bnad_msix_mbox_handler;
1119 irq = bnad->msix_table[bnad->msix_num - 1].vector;
1120 flags = 0;
1121 intr_info->intr_type = BNA_INTR_T_MSIX;
1122 intr_info->idl[0].vector = bnad->msix_num - 1;
1123 } else {
1124 irq_handler = (irq_handler_t)bnad_isr;
1125 irq = bnad->pcidev->irq;
1126 flags = IRQF_SHARED;
1127 intr_info->intr_type = BNA_INTR_T_INTX;
1128 /* intr_info->idl.vector = 0 ? */
1130 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1132 sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME);
1135 * Set the Mbox IRQ disable flag, so that the IRQ handler
1136 * called from request_irq() for SHARED IRQs do not execute
1138 set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
1140 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
1142 err = request_irq(irq, irq_handler, flags,
1143 bnad->mbox_irq_name, bnad);
1145 if (err) {
1146 kfree(intr_info->idl);
1147 intr_info->idl = NULL;
1150 return err;
1153 static void
1154 bnad_txrx_irq_free(struct bnad *bnad, struct bna_intr_info *intr_info)
1156 kfree(intr_info->idl);
1157 intr_info->idl = NULL;
1160 /* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */
1161 static int
1162 bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src,
1163 uint txrx_id, struct bna_intr_info *intr_info)
1165 int i, vector_start = 0;
1166 u32 cfg_flags;
1167 unsigned long flags;
1169 spin_lock_irqsave(&bnad->bna_lock, flags);
1170 cfg_flags = bnad->cfg_flags;
1171 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1173 if (cfg_flags & BNAD_CF_MSIX) {
1174 intr_info->intr_type = BNA_INTR_T_MSIX;
1175 intr_info->idl = kcalloc(intr_info->num,
1176 sizeof(struct bna_intr_descr),
1177 GFP_KERNEL);
1178 if (!intr_info->idl)
1179 return -ENOMEM;
1181 switch (src) {
1182 case BNAD_INTR_TX:
1183 vector_start = txrx_id;
1184 break;
1186 case BNAD_INTR_RX:
1187 vector_start = bnad->num_tx * bnad->num_txq_per_tx +
1188 txrx_id;
1189 break;
1191 default:
1192 BUG();
1195 for (i = 0; i < intr_info->num; i++)
1196 intr_info->idl[i].vector = vector_start + i;
1197 } else {
1198 intr_info->intr_type = BNA_INTR_T_INTX;
1199 intr_info->num = 1;
1200 intr_info->idl = kcalloc(intr_info->num,
1201 sizeof(struct bna_intr_descr),
1202 GFP_KERNEL);
1203 if (!intr_info->idl)
1204 return -ENOMEM;
1206 switch (src) {
1207 case BNAD_INTR_TX:
1208 intr_info->idl[0].vector = 0x1; /* Bit mask : Tx IB */
1209 break;
1211 case BNAD_INTR_RX:
1212 intr_info->idl[0].vector = 0x2; /* Bit mask : Rx IB */
1213 break;
1216 return 0;
1220 * NOTE: Should be called for MSIX only
1221 * Unregisters Tx MSIX vector(s) from the kernel
1223 static void
1224 bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info,
1225 int num_txqs)
1227 int i;
1228 int vector_num;
1230 for (i = 0; i < num_txqs; i++) {
1231 if (tx_info->tcb[i] == NULL)
1232 continue;
1234 vector_num = tx_info->tcb[i]->intr_vector;
1235 free_irq(bnad->msix_table[vector_num].vector, tx_info->tcb[i]);
1240 * NOTE: Should be called for MSIX only
1241 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1243 static int
1244 bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info,
1245 uint tx_id, int num_txqs)
1247 int i;
1248 int err;
1249 int vector_num;
1251 for (i = 0; i < num_txqs; i++) {
1252 vector_num = tx_info->tcb[i]->intr_vector;
1253 sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name,
1254 tx_id + tx_info->tcb[i]->id);
1255 err = request_irq(bnad->msix_table[vector_num].vector,
1256 (irq_handler_t)bnad_msix_tx, 0,
1257 tx_info->tcb[i]->name,
1258 tx_info->tcb[i]);
1259 if (err)
1260 goto err_return;
1263 return 0;
1265 err_return:
1266 if (i > 0)
1267 bnad_tx_msix_unregister(bnad, tx_info, (i - 1));
1268 return -1;
1272 * NOTE: Should be called for MSIX only
1273 * Unregisters Rx MSIX vector(s) from the kernel
1275 static void
1276 bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info,
1277 int num_rxps)
1279 int i;
1280 int vector_num;
1282 for (i = 0; i < num_rxps; i++) {
1283 if (rx_info->rx_ctrl[i].ccb == NULL)
1284 continue;
1286 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1287 free_irq(bnad->msix_table[vector_num].vector,
1288 rx_info->rx_ctrl[i].ccb);
1293 * NOTE: Should be called for MSIX only
1294 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1296 static int
1297 bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info,
1298 uint rx_id, int num_rxps)
1300 int i;
1301 int err;
1302 int vector_num;
1304 for (i = 0; i < num_rxps; i++) {
1305 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1306 sprintf(rx_info->rx_ctrl[i].ccb->name, "%s CQ %d",
1307 bnad->netdev->name,
1308 rx_id + rx_info->rx_ctrl[i].ccb->id);
1309 err = request_irq(bnad->msix_table[vector_num].vector,
1310 (irq_handler_t)bnad_msix_rx, 0,
1311 rx_info->rx_ctrl[i].ccb->name,
1312 rx_info->rx_ctrl[i].ccb);
1313 if (err)
1314 goto err_return;
1317 return 0;
1319 err_return:
1320 if (i > 0)
1321 bnad_rx_msix_unregister(bnad, rx_info, (i - 1));
1322 return -1;
1325 /* Free Tx object Resources */
1326 static void
1327 bnad_tx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1329 int i;
1331 for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1332 if (res_info[i].res_type == BNA_RES_T_MEM)
1333 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1334 else if (res_info[i].res_type == BNA_RES_T_INTR)
1335 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1339 /* Allocates memory and interrupt resources for Tx object */
1340 static int
1341 bnad_tx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1342 uint tx_id)
1344 int i, err = 0;
1346 for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1347 if (res_info[i].res_type == BNA_RES_T_MEM)
1348 err = bnad_mem_alloc(bnad,
1349 &res_info[i].res_u.mem_info);
1350 else if (res_info[i].res_type == BNA_RES_T_INTR)
1351 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_TX, tx_id,
1352 &res_info[i].res_u.intr_info);
1353 if (err)
1354 goto err_return;
1356 return 0;
1358 err_return:
1359 bnad_tx_res_free(bnad, res_info);
1360 return err;
1363 /* Free Rx object Resources */
1364 static void
1365 bnad_rx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1367 int i;
1369 for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1370 if (res_info[i].res_type == BNA_RES_T_MEM)
1371 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1372 else if (res_info[i].res_type == BNA_RES_T_INTR)
1373 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1377 /* Allocates memory and interrupt resources for Rx object */
1378 static int
1379 bnad_rx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1380 uint rx_id)
1382 int i, err = 0;
1384 /* All memory needs to be allocated before setup_ccbs */
1385 for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1386 if (res_info[i].res_type == BNA_RES_T_MEM)
1387 err = bnad_mem_alloc(bnad,
1388 &res_info[i].res_u.mem_info);
1389 else if (res_info[i].res_type == BNA_RES_T_INTR)
1390 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_RX, rx_id,
1391 &res_info[i].res_u.intr_info);
1392 if (err)
1393 goto err_return;
1395 return 0;
1397 err_return:
1398 bnad_rx_res_free(bnad, res_info);
1399 return err;
1402 /* Timer callbacks */
1403 /* a) IOC timer */
1404 static void
1405 bnad_ioc_timeout(unsigned long data)
1407 struct bnad *bnad = (struct bnad *)data;
1408 unsigned long flags;
1410 spin_lock_irqsave(&bnad->bna_lock, flags);
1411 bfa_nw_ioc_timeout((void *) &bnad->bna.device.ioc);
1412 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1415 static void
1416 bnad_ioc_hb_check(unsigned long data)
1418 struct bnad *bnad = (struct bnad *)data;
1419 unsigned long flags;
1421 spin_lock_irqsave(&bnad->bna_lock, flags);
1422 bfa_nw_ioc_hb_check((void *) &bnad->bna.device.ioc);
1423 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1426 static void
1427 bnad_ioc_sem_timeout(unsigned long data)
1429 struct bnad *bnad = (struct bnad *)data;
1430 unsigned long flags;
1432 spin_lock_irqsave(&bnad->bna_lock, flags);
1433 bfa_nw_ioc_sem_timeout((void *) &bnad->bna.device.ioc);
1434 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1438 * All timer routines use bnad->bna_lock to protect against
1439 * the following race, which may occur in case of no locking:
1440 * Time CPU m CPU n
1441 * 0 1 = test_bit
1442 * 1 clear_bit
1443 * 2 del_timer_sync
1444 * 3 mod_timer
1447 /* b) Dynamic Interrupt Moderation Timer */
1448 static void
1449 bnad_dim_timeout(unsigned long data)
1451 struct bnad *bnad = (struct bnad *)data;
1452 struct bnad_rx_info *rx_info;
1453 struct bnad_rx_ctrl *rx_ctrl;
1454 int i, j;
1455 unsigned long flags;
1457 if (!netif_carrier_ok(bnad->netdev))
1458 return;
1460 spin_lock_irqsave(&bnad->bna_lock, flags);
1461 for (i = 0; i < bnad->num_rx; i++) {
1462 rx_info = &bnad->rx_info[i];
1463 if (!rx_info->rx)
1464 continue;
1465 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
1466 rx_ctrl = &rx_info->rx_ctrl[j];
1467 if (!rx_ctrl->ccb)
1468 continue;
1469 bna_rx_dim_update(rx_ctrl->ccb);
1473 /* Check for BNAD_CF_DIM_ENABLED, does not eleminate a race */
1474 if (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags))
1475 mod_timer(&bnad->dim_timer,
1476 jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1477 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1480 /* c) Statistics Timer */
1481 static void
1482 bnad_stats_timeout(unsigned long data)
1484 struct bnad *bnad = (struct bnad *)data;
1485 unsigned long flags;
1487 if (!netif_running(bnad->netdev) ||
1488 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1489 return;
1491 spin_lock_irqsave(&bnad->bna_lock, flags);
1492 bna_stats_get(&bnad->bna);
1493 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1497 * Set up timer for DIM
1498 * Called with bnad->bna_lock held
1500 void
1501 bnad_dim_timer_start(struct bnad *bnad)
1503 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
1504 !test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
1505 setup_timer(&bnad->dim_timer, bnad_dim_timeout,
1506 (unsigned long)bnad);
1507 set_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
1508 mod_timer(&bnad->dim_timer,
1509 jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1514 * Set up timer for statistics
1515 * Called with mutex_lock(&bnad->conf_mutex) held
1517 static void
1518 bnad_stats_timer_start(struct bnad *bnad)
1520 unsigned long flags;
1522 spin_lock_irqsave(&bnad->bna_lock, flags);
1523 if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) {
1524 setup_timer(&bnad->stats_timer, bnad_stats_timeout,
1525 (unsigned long)bnad);
1526 mod_timer(&bnad->stats_timer,
1527 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1529 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1533 * Stops the stats timer
1534 * Called with mutex_lock(&bnad->conf_mutex) held
1536 static void
1537 bnad_stats_timer_stop(struct bnad *bnad)
1539 int to_del = 0;
1540 unsigned long flags;
1542 spin_lock_irqsave(&bnad->bna_lock, flags);
1543 if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1544 to_del = 1;
1545 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1546 if (to_del)
1547 del_timer_sync(&bnad->stats_timer);
1550 /* Utilities */
1552 static void
1553 bnad_netdev_mc_list_get(struct net_device *netdev, u8 *mc_list)
1555 int i = 1; /* Index 0 has broadcast address */
1556 struct netdev_hw_addr *mc_addr;
1558 netdev_for_each_mc_addr(mc_addr, netdev) {
1559 memcpy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0],
1560 ETH_ALEN);
1561 i++;
1565 static int
1566 bnad_napi_poll_rx(struct napi_struct *napi, int budget)
1568 struct bnad_rx_ctrl *rx_ctrl =
1569 container_of(napi, struct bnad_rx_ctrl, napi);
1570 struct bna_ccb *ccb;
1571 struct bnad *bnad;
1572 int rcvd = 0;
1574 ccb = rx_ctrl->ccb;
1576 bnad = ccb->bnad;
1578 if (!netif_carrier_ok(bnad->netdev))
1579 goto poll_exit;
1581 rcvd = bnad_poll_cq(bnad, ccb, budget);
1582 if (rcvd == budget)
1583 return rcvd;
1585 poll_exit:
1586 napi_complete((napi));
1588 BNAD_UPDATE_CTR(bnad, netif_rx_complete);
1590 bnad_enable_rx_irq(bnad, ccb);
1591 return rcvd;
1594 static void
1595 bnad_napi_enable(struct bnad *bnad, u32 rx_id)
1597 struct bnad_rx_ctrl *rx_ctrl;
1598 int i;
1600 /* Initialize & enable NAPI */
1601 for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1602 rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
1604 netif_napi_add(bnad->netdev, &rx_ctrl->napi,
1605 bnad_napi_poll_rx, 64);
1607 napi_enable(&rx_ctrl->napi);
1611 static void
1612 bnad_napi_disable(struct bnad *bnad, u32 rx_id)
1614 int i;
1616 /* First disable and then clean up */
1617 for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1618 napi_disable(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1619 netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1623 /* Should be held with conf_lock held */
1624 void
1625 bnad_cleanup_tx(struct bnad *bnad, uint tx_id)
1627 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1628 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1629 unsigned long flags;
1631 if (!tx_info->tx)
1632 return;
1634 init_completion(&bnad->bnad_completions.tx_comp);
1635 spin_lock_irqsave(&bnad->bna_lock, flags);
1636 bna_tx_disable(tx_info->tx, BNA_HARD_CLEANUP, bnad_cb_tx_disabled);
1637 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1638 wait_for_completion(&bnad->bnad_completions.tx_comp);
1640 if (tx_info->tcb[0]->intr_type == BNA_INTR_T_MSIX)
1641 bnad_tx_msix_unregister(bnad, tx_info,
1642 bnad->num_txq_per_tx);
1644 spin_lock_irqsave(&bnad->bna_lock, flags);
1645 bna_tx_destroy(tx_info->tx);
1646 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1648 tx_info->tx = NULL;
1650 if (0 == tx_id)
1651 tasklet_kill(&bnad->tx_free_tasklet);
1653 bnad_tx_res_free(bnad, res_info);
1656 /* Should be held with conf_lock held */
1658 bnad_setup_tx(struct bnad *bnad, uint tx_id)
1660 int err;
1661 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1662 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1663 struct bna_intr_info *intr_info =
1664 &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
1665 struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
1666 struct bna_tx_event_cbfn tx_cbfn;
1667 struct bna_tx *tx;
1668 unsigned long flags;
1670 /* Initialize the Tx object configuration */
1671 tx_config->num_txq = bnad->num_txq_per_tx;
1672 tx_config->txq_depth = bnad->txq_depth;
1673 tx_config->tx_type = BNA_TX_T_REGULAR;
1675 /* Initialize the tx event handlers */
1676 tx_cbfn.tcb_setup_cbfn = bnad_cb_tcb_setup;
1677 tx_cbfn.tcb_destroy_cbfn = bnad_cb_tcb_destroy;
1678 tx_cbfn.tx_stall_cbfn = bnad_cb_tx_stall;
1679 tx_cbfn.tx_resume_cbfn = bnad_cb_tx_resume;
1680 tx_cbfn.tx_cleanup_cbfn = bnad_cb_tx_cleanup;
1682 /* Get BNA's resource requirement for one tx object */
1683 spin_lock_irqsave(&bnad->bna_lock, flags);
1684 bna_tx_res_req(bnad->num_txq_per_tx,
1685 bnad->txq_depth, res_info);
1686 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1688 /* Fill Unmap Q memory requirements */
1689 BNAD_FILL_UNMAPQ_MEM_REQ(
1690 &res_info[BNA_TX_RES_MEM_T_UNMAPQ],
1691 bnad->num_txq_per_tx,
1692 BNAD_TX_UNMAPQ_DEPTH);
1694 /* Allocate resources */
1695 err = bnad_tx_res_alloc(bnad, res_info, tx_id);
1696 if (err)
1697 return err;
1699 /* Ask BNA to create one Tx object, supplying required resources */
1700 spin_lock_irqsave(&bnad->bna_lock, flags);
1701 tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info,
1702 tx_info);
1703 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1704 if (!tx)
1705 goto err_return;
1706 tx_info->tx = tx;
1708 /* Register ISR for the Tx object */
1709 if (intr_info->intr_type == BNA_INTR_T_MSIX) {
1710 err = bnad_tx_msix_register(bnad, tx_info,
1711 tx_id, bnad->num_txq_per_tx);
1712 if (err)
1713 goto err_return;
1716 spin_lock_irqsave(&bnad->bna_lock, flags);
1717 bna_tx_enable(tx);
1718 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1720 return 0;
1722 err_return:
1723 bnad_tx_res_free(bnad, res_info);
1724 return err;
1727 /* Setup the rx config for bna_rx_create */
1728 /* bnad decides the configuration */
1729 static void
1730 bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
1732 rx_config->rx_type = BNA_RX_T_REGULAR;
1733 rx_config->num_paths = bnad->num_rxp_per_rx;
1735 if (bnad->num_rxp_per_rx > 1) {
1736 rx_config->rss_status = BNA_STATUS_T_ENABLED;
1737 rx_config->rss_config.hash_type =
1738 (BFI_RSS_T_V4_TCP |
1739 BFI_RSS_T_V6_TCP |
1740 BFI_RSS_T_V4_IP |
1741 BFI_RSS_T_V6_IP);
1742 rx_config->rss_config.hash_mask =
1743 bnad->num_rxp_per_rx - 1;
1744 get_random_bytes(rx_config->rss_config.toeplitz_hash_key,
1745 sizeof(rx_config->rss_config.toeplitz_hash_key));
1746 } else {
1747 rx_config->rss_status = BNA_STATUS_T_DISABLED;
1748 memset(&rx_config->rss_config, 0,
1749 sizeof(rx_config->rss_config));
1751 rx_config->rxp_type = BNA_RXP_SLR;
1752 rx_config->q_depth = bnad->rxq_depth;
1754 rx_config->small_buff_size = BFI_SMALL_RXBUF_SIZE;
1756 rx_config->vlan_strip_status = BNA_STATUS_T_ENABLED;
1759 /* Called with mutex_lock(&bnad->conf_mutex) held */
1760 void
1761 bnad_cleanup_rx(struct bnad *bnad, uint rx_id)
1763 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
1764 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
1765 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
1766 unsigned long flags;
1767 int dim_timer_del = 0;
1769 if (!rx_info->rx)
1770 return;
1772 if (0 == rx_id) {
1773 spin_lock_irqsave(&bnad->bna_lock, flags);
1774 dim_timer_del = bnad_dim_timer_running(bnad);
1775 if (dim_timer_del)
1776 clear_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
1777 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1778 if (dim_timer_del)
1779 del_timer_sync(&bnad->dim_timer);
1782 bnad_napi_disable(bnad, rx_id);
1784 init_completion(&bnad->bnad_completions.rx_comp);
1785 spin_lock_irqsave(&bnad->bna_lock, flags);
1786 bna_rx_disable(rx_info->rx, BNA_HARD_CLEANUP, bnad_cb_rx_disabled);
1787 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1788 wait_for_completion(&bnad->bnad_completions.rx_comp);
1790 if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX)
1791 bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths);
1793 spin_lock_irqsave(&bnad->bna_lock, flags);
1794 bna_rx_destroy(rx_info->rx);
1795 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1797 rx_info->rx = NULL;
1799 bnad_rx_res_free(bnad, res_info);
1802 /* Called with mutex_lock(&bnad->conf_mutex) held */
1804 bnad_setup_rx(struct bnad *bnad, uint rx_id)
1806 int err;
1807 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
1808 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
1809 struct bna_intr_info *intr_info =
1810 &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
1811 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
1812 struct bna_rx_event_cbfn rx_cbfn;
1813 struct bna_rx *rx;
1814 unsigned long flags;
1816 /* Initialize the Rx object configuration */
1817 bnad_init_rx_config(bnad, rx_config);
1819 /* Initialize the Rx event handlers */
1820 rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup;
1821 rx_cbfn.rcb_destroy_cbfn = bnad_cb_rcb_destroy;
1822 rx_cbfn.rcb_destroy_cbfn = NULL;
1823 rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup;
1824 rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy;
1825 rx_cbfn.rx_cleanup_cbfn = bnad_cb_rx_cleanup;
1826 rx_cbfn.rx_post_cbfn = bnad_cb_rx_post;
1828 /* Get BNA's resource requirement for one Rx object */
1829 spin_lock_irqsave(&bnad->bna_lock, flags);
1830 bna_rx_res_req(rx_config, res_info);
1831 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1833 /* Fill Unmap Q memory requirements */
1834 BNAD_FILL_UNMAPQ_MEM_REQ(
1835 &res_info[BNA_RX_RES_MEM_T_UNMAPQ],
1836 rx_config->num_paths +
1837 ((rx_config->rxp_type == BNA_RXP_SINGLE) ? 0 :
1838 rx_config->num_paths), BNAD_RX_UNMAPQ_DEPTH);
1840 /* Allocate resource */
1841 err = bnad_rx_res_alloc(bnad, res_info, rx_id);
1842 if (err)
1843 return err;
1845 /* Ask BNA to create one Rx object, supplying required resources */
1846 spin_lock_irqsave(&bnad->bna_lock, flags);
1847 rx = bna_rx_create(&bnad->bna, bnad, rx_config, &rx_cbfn, res_info,
1848 rx_info);
1849 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1850 if (!rx)
1851 goto err_return;
1852 rx_info->rx = rx;
1854 /* Register ISR for the Rx object */
1855 if (intr_info->intr_type == BNA_INTR_T_MSIX) {
1856 err = bnad_rx_msix_register(bnad, rx_info, rx_id,
1857 rx_config->num_paths);
1858 if (err)
1859 goto err_return;
1862 /* Enable NAPI */
1863 bnad_napi_enable(bnad, rx_id);
1865 spin_lock_irqsave(&bnad->bna_lock, flags);
1866 if (0 == rx_id) {
1867 /* Set up Dynamic Interrupt Moderation Vector */
1868 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED)
1869 bna_rx_dim_reconfig(&bnad->bna, bna_napi_dim_vector);
1871 /* Enable VLAN filtering only on the default Rx */
1872 bna_rx_vlanfilter_enable(rx);
1874 /* Start the DIM timer */
1875 bnad_dim_timer_start(bnad);
1878 bna_rx_enable(rx);
1879 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1881 return 0;
1883 err_return:
1884 bnad_cleanup_rx(bnad, rx_id);
1885 return err;
1888 /* Called with conf_lock & bnad->bna_lock held */
1889 void
1890 bnad_tx_coalescing_timeo_set(struct bnad *bnad)
1892 struct bnad_tx_info *tx_info;
1894 tx_info = &bnad->tx_info[0];
1895 if (!tx_info->tx)
1896 return;
1898 bna_tx_coalescing_timeo_set(tx_info->tx, bnad->tx_coalescing_timeo);
1901 /* Called with conf_lock & bnad->bna_lock held */
1902 void
1903 bnad_rx_coalescing_timeo_set(struct bnad *bnad)
1905 struct bnad_rx_info *rx_info;
1906 int i;
1908 for (i = 0; i < bnad->num_rx; i++) {
1909 rx_info = &bnad->rx_info[i];
1910 if (!rx_info->rx)
1911 continue;
1912 bna_rx_coalescing_timeo_set(rx_info->rx,
1913 bnad->rx_coalescing_timeo);
1918 * Called with bnad->bna_lock held
1920 static int
1921 bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr)
1923 int ret;
1925 if (!is_valid_ether_addr(mac_addr))
1926 return -EADDRNOTAVAIL;
1928 /* If datapath is down, pretend everything went through */
1929 if (!bnad->rx_info[0].rx)
1930 return 0;
1932 ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr, NULL);
1933 if (ret != BNA_CB_SUCCESS)
1934 return -EADDRNOTAVAIL;
1936 return 0;
1939 /* Should be called with conf_lock held */
1940 static int
1941 bnad_enable_default_bcast(struct bnad *bnad)
1943 struct bnad_rx_info *rx_info = &bnad->rx_info[0];
1944 int ret;
1945 unsigned long flags;
1947 init_completion(&bnad->bnad_completions.mcast_comp);
1949 spin_lock_irqsave(&bnad->bna_lock, flags);
1950 ret = bna_rx_mcast_add(rx_info->rx, (u8 *)bnad_bcast_addr,
1951 bnad_cb_rx_mcast_add);
1952 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1954 if (ret == BNA_CB_SUCCESS)
1955 wait_for_completion(&bnad->bnad_completions.mcast_comp);
1956 else
1957 return -ENODEV;
1959 if (bnad->bnad_completions.mcast_comp_status != BNA_CB_SUCCESS)
1960 return -ENODEV;
1962 return 0;
1965 /* Statistics utilities */
1966 void
1967 bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
1969 int i, j;
1971 for (i = 0; i < bnad->num_rx; i++) {
1972 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
1973 if (bnad->rx_info[i].rx_ctrl[j].ccb) {
1974 stats->rx_packets += bnad->rx_info[i].
1975 rx_ctrl[j].ccb->rcb[0]->rxq->rx_packets;
1976 stats->rx_bytes += bnad->rx_info[i].
1977 rx_ctrl[j].ccb->rcb[0]->rxq->rx_bytes;
1978 if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
1979 bnad->rx_info[i].rx_ctrl[j].ccb->
1980 rcb[1]->rxq) {
1981 stats->rx_packets +=
1982 bnad->rx_info[i].rx_ctrl[j].
1983 ccb->rcb[1]->rxq->rx_packets;
1984 stats->rx_bytes +=
1985 bnad->rx_info[i].rx_ctrl[j].
1986 ccb->rcb[1]->rxq->rx_bytes;
1991 for (i = 0; i < bnad->num_tx; i++) {
1992 for (j = 0; j < bnad->num_txq_per_tx; j++) {
1993 if (bnad->tx_info[i].tcb[j]) {
1994 stats->tx_packets +=
1995 bnad->tx_info[i].tcb[j]->txq->tx_packets;
1996 stats->tx_bytes +=
1997 bnad->tx_info[i].tcb[j]->txq->tx_bytes;
2004 * Must be called with the bna_lock held.
2006 void
2007 bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
2009 struct bfi_ll_stats_mac *mac_stats;
2010 u64 bmap;
2011 int i;
2013 mac_stats = &bnad->stats.bna_stats->hw_stats->mac_stats;
2014 stats->rx_errors =
2015 mac_stats->rx_fcs_error + mac_stats->rx_alignment_error +
2016 mac_stats->rx_frame_length_error + mac_stats->rx_code_error +
2017 mac_stats->rx_undersize;
2018 stats->tx_errors = mac_stats->tx_fcs_error +
2019 mac_stats->tx_undersize;
2020 stats->rx_dropped = mac_stats->rx_drop;
2021 stats->tx_dropped = mac_stats->tx_drop;
2022 stats->multicast = mac_stats->rx_multicast;
2023 stats->collisions = mac_stats->tx_total_collision;
2025 stats->rx_length_errors = mac_stats->rx_frame_length_error;
2027 /* receive ring buffer overflow ?? */
2029 stats->rx_crc_errors = mac_stats->rx_fcs_error;
2030 stats->rx_frame_errors = mac_stats->rx_alignment_error;
2031 /* recv'r fifo overrun */
2032 bmap = (u64)bnad->stats.bna_stats->rxf_bmap[0] |
2033 ((u64)bnad->stats.bna_stats->rxf_bmap[1] << 32);
2034 for (i = 0; bmap && (i < BFI_LL_RXF_ID_MAX); i++) {
2035 if (bmap & 1) {
2036 stats->rx_fifo_errors +=
2037 bnad->stats.bna_stats->
2038 hw_stats->rxf_stats[i].frame_drops;
2039 break;
2041 bmap >>= 1;
2045 static void
2046 bnad_mbox_irq_sync(struct bnad *bnad)
2048 u32 irq;
2049 unsigned long flags;
2051 spin_lock_irqsave(&bnad->bna_lock, flags);
2052 if (bnad->cfg_flags & BNAD_CF_MSIX)
2053 irq = bnad->msix_table[bnad->msix_num - 1].vector;
2054 else
2055 irq = bnad->pcidev->irq;
2056 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2058 synchronize_irq(irq);
2061 /* Utility used by bnad_start_xmit, for doing TSO */
2062 static int
2063 bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
2065 int err;
2067 /* SKB_GSO_TCPV4 and SKB_GSO_TCPV6 is defined since 2.6.18. */
2068 BUG_ON(!(skb_shinfo(skb)->gso_type == SKB_GSO_TCPV4 ||
2069 skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6));
2070 if (skb_header_cloned(skb)) {
2071 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2072 if (err) {
2073 BNAD_UPDATE_CTR(bnad, tso_err);
2074 return err;
2079 * For TSO, the TCP checksum field is seeded with pseudo-header sum
2080 * excluding the length field.
2082 if (skb->protocol == htons(ETH_P_IP)) {
2083 struct iphdr *iph = ip_hdr(skb);
2085 /* Do we really need these? */
2086 iph->tot_len = 0;
2087 iph->check = 0;
2089 tcp_hdr(skb)->check =
2090 ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
2091 IPPROTO_TCP, 0);
2092 BNAD_UPDATE_CTR(bnad, tso4);
2093 } else {
2094 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
2096 BUG_ON(!(skb->protocol == htons(ETH_P_IPV6)));
2097 ipv6h->payload_len = 0;
2098 tcp_hdr(skb)->check =
2099 ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 0,
2100 IPPROTO_TCP, 0);
2101 BNAD_UPDATE_CTR(bnad, tso6);
2104 return 0;
2108 * Initialize Q numbers depending on Rx Paths
2109 * Called with bnad->bna_lock held, because of cfg_flags
2110 * access.
2112 static void
2113 bnad_q_num_init(struct bnad *bnad)
2115 int rxps;
2117 rxps = min((uint)num_online_cpus(),
2118 (uint)(BNAD_MAX_RXS * BNAD_MAX_RXPS_PER_RX));
2120 if (!(bnad->cfg_flags & BNAD_CF_MSIX))
2121 rxps = 1; /* INTx */
2123 bnad->num_rx = 1;
2124 bnad->num_tx = 1;
2125 bnad->num_rxp_per_rx = rxps;
2126 bnad->num_txq_per_tx = BNAD_TXQ_NUM;
2130 * Adjusts the Q numbers, given a number of msix vectors
2131 * Give preference to RSS as opposed to Tx priority Queues,
2132 * in such a case, just use 1 Tx Q
2133 * Called with bnad->bna_lock held b'cos of cfg_flags access
2135 static void
2136 bnad_q_num_adjust(struct bnad *bnad, int msix_vectors)
2138 bnad->num_txq_per_tx = 1;
2139 if ((msix_vectors >= (bnad->num_tx * bnad->num_txq_per_tx) +
2140 bnad_rxqs_per_cq + BNAD_MAILBOX_MSIX_VECTORS) &&
2141 (bnad->cfg_flags & BNAD_CF_MSIX)) {
2142 bnad->num_rxp_per_rx = msix_vectors -
2143 (bnad->num_tx * bnad->num_txq_per_tx) -
2144 BNAD_MAILBOX_MSIX_VECTORS;
2145 } else
2146 bnad->num_rxp_per_rx = 1;
2149 /* Enable / disable device */
2150 static void
2151 bnad_device_disable(struct bnad *bnad)
2153 unsigned long flags;
2155 init_completion(&bnad->bnad_completions.ioc_comp);
2157 spin_lock_irqsave(&bnad->bna_lock, flags);
2158 bna_device_disable(&bnad->bna.device, BNA_HARD_CLEANUP);
2159 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2161 wait_for_completion(&bnad->bnad_completions.ioc_comp);
2164 static int
2165 bnad_device_enable(struct bnad *bnad)
2167 int err = 0;
2168 unsigned long flags;
2170 init_completion(&bnad->bnad_completions.ioc_comp);
2172 spin_lock_irqsave(&bnad->bna_lock, flags);
2173 bna_device_enable(&bnad->bna.device);
2174 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2176 wait_for_completion(&bnad->bnad_completions.ioc_comp);
2178 if (bnad->bnad_completions.ioc_comp_status)
2179 err = bnad->bnad_completions.ioc_comp_status;
2181 return err;
2184 /* Free BNA resources */
2185 static void
2186 bnad_res_free(struct bnad *bnad)
2188 int i;
2189 struct bna_res_info *res_info = &bnad->res_info[0];
2191 for (i = 0; i < BNA_RES_T_MAX; i++) {
2192 if (res_info[i].res_type == BNA_RES_T_MEM)
2193 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
2194 else
2195 bnad_mbox_irq_free(bnad, &res_info[i].res_u.intr_info);
2199 /* Allocates memory and interrupt resources for BNA */
2200 static int
2201 bnad_res_alloc(struct bnad *bnad)
2203 int i, err;
2204 struct bna_res_info *res_info = &bnad->res_info[0];
2206 for (i = 0; i < BNA_RES_T_MAX; i++) {
2207 if (res_info[i].res_type == BNA_RES_T_MEM)
2208 err = bnad_mem_alloc(bnad, &res_info[i].res_u.mem_info);
2209 else
2210 err = bnad_mbox_irq_alloc(bnad,
2211 &res_info[i].res_u.intr_info);
2212 if (err)
2213 goto err_return;
2215 return 0;
2217 err_return:
2218 bnad_res_free(bnad);
2219 return err;
2222 /* Interrupt enable / disable */
2223 static void
2224 bnad_enable_msix(struct bnad *bnad)
2226 int i, ret;
2227 unsigned long flags;
2229 spin_lock_irqsave(&bnad->bna_lock, flags);
2230 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
2231 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2232 return;
2234 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2236 if (bnad->msix_table)
2237 return;
2239 bnad->msix_table =
2240 kcalloc(bnad->msix_num, sizeof(struct msix_entry), GFP_KERNEL);
2242 if (!bnad->msix_table)
2243 goto intx_mode;
2245 for (i = 0; i < bnad->msix_num; i++)
2246 bnad->msix_table[i].entry = i;
2248 ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, bnad->msix_num);
2249 if (ret > 0) {
2250 /* Not enough MSI-X vectors. */
2252 spin_lock_irqsave(&bnad->bna_lock, flags);
2253 /* ret = #of vectors that we got */
2254 bnad_q_num_adjust(bnad, ret);
2255 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2257 bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx)
2258 + (bnad->num_rx
2259 * bnad->num_rxp_per_rx) +
2260 BNAD_MAILBOX_MSIX_VECTORS;
2262 /* Try once more with adjusted numbers */
2263 /* If this fails, fall back to INTx */
2264 ret = pci_enable_msix(bnad->pcidev, bnad->msix_table,
2265 bnad->msix_num);
2266 if (ret)
2267 goto intx_mode;
2269 } else if (ret < 0)
2270 goto intx_mode;
2271 return;
2273 intx_mode:
2275 kfree(bnad->msix_table);
2276 bnad->msix_table = NULL;
2277 bnad->msix_num = 0;
2278 spin_lock_irqsave(&bnad->bna_lock, flags);
2279 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2280 bnad_q_num_init(bnad);
2281 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2284 static void
2285 bnad_disable_msix(struct bnad *bnad)
2287 u32 cfg_flags;
2288 unsigned long flags;
2290 spin_lock_irqsave(&bnad->bna_lock, flags);
2291 cfg_flags = bnad->cfg_flags;
2292 if (bnad->cfg_flags & BNAD_CF_MSIX)
2293 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2294 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2296 if (cfg_flags & BNAD_CF_MSIX) {
2297 pci_disable_msix(bnad->pcidev);
2298 kfree(bnad->msix_table);
2299 bnad->msix_table = NULL;
2303 /* Netdev entry points */
2304 static int
2305 bnad_open(struct net_device *netdev)
2307 int err;
2308 struct bnad *bnad = netdev_priv(netdev);
2309 struct bna_pause_config pause_config;
2310 int mtu;
2311 unsigned long flags;
2313 mutex_lock(&bnad->conf_mutex);
2315 /* Tx */
2316 err = bnad_setup_tx(bnad, 0);
2317 if (err)
2318 goto err_return;
2320 /* Rx */
2321 err = bnad_setup_rx(bnad, 0);
2322 if (err)
2323 goto cleanup_tx;
2325 /* Port */
2326 pause_config.tx_pause = 0;
2327 pause_config.rx_pause = 0;
2329 mtu = ETH_HLEN + bnad->netdev->mtu + ETH_FCS_LEN;
2331 spin_lock_irqsave(&bnad->bna_lock, flags);
2332 bna_port_mtu_set(&bnad->bna.port, mtu, NULL);
2333 bna_port_pause_config(&bnad->bna.port, &pause_config, NULL);
2334 bna_port_enable(&bnad->bna.port);
2335 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2337 /* Enable broadcast */
2338 bnad_enable_default_bcast(bnad);
2340 /* Set the UCAST address */
2341 spin_lock_irqsave(&bnad->bna_lock, flags);
2342 bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
2343 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2345 /* Start the stats timer */
2346 bnad_stats_timer_start(bnad);
2348 mutex_unlock(&bnad->conf_mutex);
2350 return 0;
2352 cleanup_tx:
2353 bnad_cleanup_tx(bnad, 0);
2355 err_return:
2356 mutex_unlock(&bnad->conf_mutex);
2357 return err;
2360 static int
2361 bnad_stop(struct net_device *netdev)
2363 struct bnad *bnad = netdev_priv(netdev);
2364 unsigned long flags;
2366 mutex_lock(&bnad->conf_mutex);
2368 /* Stop the stats timer */
2369 bnad_stats_timer_stop(bnad);
2371 init_completion(&bnad->bnad_completions.port_comp);
2373 spin_lock_irqsave(&bnad->bna_lock, flags);
2374 bna_port_disable(&bnad->bna.port, BNA_HARD_CLEANUP,
2375 bnad_cb_port_disabled);
2376 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2378 wait_for_completion(&bnad->bnad_completions.port_comp);
2380 bnad_cleanup_tx(bnad, 0);
2381 bnad_cleanup_rx(bnad, 0);
2383 /* Synchronize mailbox IRQ */
2384 bnad_mbox_irq_sync(bnad);
2386 mutex_unlock(&bnad->conf_mutex);
2388 return 0;
2391 /* TX */
2393 * bnad_start_xmit : Netdev entry point for Transmit
2394 * Called under lock held by net_device
2396 static netdev_tx_t
2397 bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2399 struct bnad *bnad = netdev_priv(netdev);
2401 u16 txq_prod, vlan_tag = 0;
2402 u32 unmap_prod, wis, wis_used, wi_range;
2403 u32 vectors, vect_id, i, acked;
2404 u32 tx_id;
2405 int err;
2407 struct bnad_tx_info *tx_info;
2408 struct bna_tcb *tcb;
2409 struct bnad_unmap_q *unmap_q;
2410 dma_addr_t dma_addr;
2411 struct bna_txq_entry *txqent;
2412 bna_txq_wi_ctrl_flag_t flags;
2414 if (unlikely
2415 (skb->len <= ETH_HLEN || skb->len > BFI_TX_MAX_DATA_PER_PKT)) {
2416 dev_kfree_skb(skb);
2417 return NETDEV_TX_OK;
2420 tx_id = 0;
2422 tx_info = &bnad->tx_info[tx_id];
2423 tcb = tx_info->tcb[tx_id];
2424 unmap_q = tcb->unmap_q;
2427 * Takes care of the Tx that is scheduled between clearing the flag
2428 * and the netif_stop_queue() call.
2430 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
2431 dev_kfree_skb(skb);
2432 return NETDEV_TX_OK;
2435 vectors = 1 + skb_shinfo(skb)->nr_frags;
2436 if (vectors > BFI_TX_MAX_VECTORS_PER_PKT) {
2437 dev_kfree_skb(skb);
2438 return NETDEV_TX_OK;
2440 wis = BNA_TXQ_WI_NEEDED(vectors); /* 4 vectors per work item */
2441 acked = 0;
2442 if (unlikely
2443 (wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) ||
2444 vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
2445 if ((u16) (*tcb->hw_consumer_index) !=
2446 tcb->consumer_index &&
2447 !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
2448 acked = bnad_free_txbufs(bnad, tcb);
2449 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
2450 bna_ib_ack(tcb->i_dbell, acked);
2451 smp_mb__before_clear_bit();
2452 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
2453 } else {
2454 netif_stop_queue(netdev);
2455 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2458 smp_mb();
2460 * Check again to deal with race condition between
2461 * netif_stop_queue here, and netif_wake_queue in
2462 * interrupt handler which is not inside netif tx lock.
2464 if (likely
2465 (wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) ||
2466 vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
2467 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2468 return NETDEV_TX_BUSY;
2469 } else {
2470 netif_wake_queue(netdev);
2471 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
2475 unmap_prod = unmap_q->producer_index;
2476 wis_used = 1;
2477 vect_id = 0;
2478 flags = 0;
2480 txq_prod = tcb->producer_index;
2481 BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt, txqent, wi_range);
2482 BUG_ON(!(wi_range <= tcb->q_depth));
2483 txqent->hdr.wi.reserved = 0;
2484 txqent->hdr.wi.num_vectors = vectors;
2485 txqent->hdr.wi.opcode =
2486 htons((skb_is_gso(skb) ? BNA_TXQ_WI_SEND_LSO :
2487 BNA_TXQ_WI_SEND));
2489 if (vlan_tx_tag_present(skb)) {
2490 vlan_tag = (u16) vlan_tx_tag_get(skb);
2491 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2493 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) {
2494 vlan_tag =
2495 (tcb->priority & 0x7) << 13 | (vlan_tag & 0x1fff);
2496 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2499 txqent->hdr.wi.vlan_tag = htons(vlan_tag);
2501 if (skb_is_gso(skb)) {
2502 err = bnad_tso_prepare(bnad, skb);
2503 if (err) {
2504 dev_kfree_skb(skb);
2505 return NETDEV_TX_OK;
2507 txqent->hdr.wi.lso_mss = htons(skb_is_gso(skb));
2508 flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM);
2509 txqent->hdr.wi.l4_hdr_size_n_offset =
2510 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2511 (tcp_hdrlen(skb) >> 2,
2512 skb_transport_offset(skb)));
2513 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
2514 u8 proto = 0;
2516 txqent->hdr.wi.lso_mss = 0;
2518 if (skb->protocol == htons(ETH_P_IP))
2519 proto = ip_hdr(skb)->protocol;
2520 else if (skb->protocol == htons(ETH_P_IPV6)) {
2521 /* nexthdr may not be TCP immediately. */
2522 proto = ipv6_hdr(skb)->nexthdr;
2524 if (proto == IPPROTO_TCP) {
2525 flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
2526 txqent->hdr.wi.l4_hdr_size_n_offset =
2527 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2528 (0, skb_transport_offset(skb)));
2530 BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
2532 BUG_ON(!(skb_headlen(skb) >=
2533 skb_transport_offset(skb) + tcp_hdrlen(skb)));
2535 } else if (proto == IPPROTO_UDP) {
2536 flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
2537 txqent->hdr.wi.l4_hdr_size_n_offset =
2538 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2539 (0, skb_transport_offset(skb)));
2541 BNAD_UPDATE_CTR(bnad, udpcsum_offload);
2543 BUG_ON(!(skb_headlen(skb) >=
2544 skb_transport_offset(skb) +
2545 sizeof(struct udphdr)));
2546 } else {
2547 err = skb_checksum_help(skb);
2548 BNAD_UPDATE_CTR(bnad, csum_help);
2549 if (err) {
2550 dev_kfree_skb(skb);
2551 BNAD_UPDATE_CTR(bnad, csum_help_err);
2552 return NETDEV_TX_OK;
2555 } else {
2556 txqent->hdr.wi.lso_mss = 0;
2557 txqent->hdr.wi.l4_hdr_size_n_offset = 0;
2560 txqent->hdr.wi.flags = htons(flags);
2562 txqent->hdr.wi.frame_length = htonl(skb->len);
2564 unmap_q->unmap_array[unmap_prod].skb = skb;
2565 BUG_ON(!(skb_headlen(skb) <= BFI_TX_MAX_DATA_PER_VECTOR));
2566 txqent->vector[vect_id].length = htons(skb_headlen(skb));
2567 dma_addr = pci_map_single(bnad->pcidev, skb->data, skb_headlen(skb),
2568 PCI_DMA_TODEVICE);
2569 pci_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
2570 dma_addr);
2572 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
2573 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
2575 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2576 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
2577 u32 size = frag->size;
2579 if (++vect_id == BFI_TX_MAX_VECTORS_PER_WI) {
2580 vect_id = 0;
2581 if (--wi_range)
2582 txqent++;
2583 else {
2584 BNA_QE_INDX_ADD(txq_prod, wis_used,
2585 tcb->q_depth);
2586 wis_used = 0;
2587 BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt,
2588 txqent, wi_range);
2589 BUG_ON(!(wi_range <= tcb->q_depth));
2591 wis_used++;
2592 txqent->hdr.wi_ext.opcode = htons(BNA_TXQ_WI_EXTENSION);
2595 BUG_ON(!(size <= BFI_TX_MAX_DATA_PER_VECTOR));
2596 txqent->vector[vect_id].length = htons(size);
2597 dma_addr =
2598 pci_map_page(bnad->pcidev, frag->page,
2599 frag->page_offset, size,
2600 PCI_DMA_TODEVICE);
2601 pci_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
2602 dma_addr);
2603 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
2604 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
2607 unmap_q->producer_index = unmap_prod;
2608 BNA_QE_INDX_ADD(txq_prod, wis_used, tcb->q_depth);
2609 tcb->producer_index = txq_prod;
2611 smp_mb();
2613 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
2614 return NETDEV_TX_OK;
2616 bna_txq_prod_indx_doorbell(tcb);
2618 if ((u16) (*tcb->hw_consumer_index) != tcb->consumer_index)
2619 tasklet_schedule(&bnad->tx_free_tasklet);
2621 return NETDEV_TX_OK;
2625 * Used spin_lock to synchronize reading of stats structures, which
2626 * is written by BNA under the same lock.
2628 static struct rtnl_link_stats64 *
2629 bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
2631 struct bnad *bnad = netdev_priv(netdev);
2632 unsigned long flags;
2634 spin_lock_irqsave(&bnad->bna_lock, flags);
2636 bnad_netdev_qstats_fill(bnad, stats);
2637 bnad_netdev_hwstats_fill(bnad, stats);
2639 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2641 return stats;
2644 static void
2645 bnad_set_rx_mode(struct net_device *netdev)
2647 struct bnad *bnad = netdev_priv(netdev);
2648 u32 new_mask, valid_mask;
2649 unsigned long flags;
2651 spin_lock_irqsave(&bnad->bna_lock, flags);
2653 new_mask = valid_mask = 0;
2655 if (netdev->flags & IFF_PROMISC) {
2656 if (!(bnad->cfg_flags & BNAD_CF_PROMISC)) {
2657 new_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2658 valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2659 bnad->cfg_flags |= BNAD_CF_PROMISC;
2661 } else {
2662 if (bnad->cfg_flags & BNAD_CF_PROMISC) {
2663 new_mask = ~BNAD_RXMODE_PROMISC_DEFAULT;
2664 valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2665 bnad->cfg_flags &= ~BNAD_CF_PROMISC;
2669 if (netdev->flags & IFF_ALLMULTI) {
2670 if (!(bnad->cfg_flags & BNAD_CF_ALLMULTI)) {
2671 new_mask |= BNA_RXMODE_ALLMULTI;
2672 valid_mask |= BNA_RXMODE_ALLMULTI;
2673 bnad->cfg_flags |= BNAD_CF_ALLMULTI;
2675 } else {
2676 if (bnad->cfg_flags & BNAD_CF_ALLMULTI) {
2677 new_mask &= ~BNA_RXMODE_ALLMULTI;
2678 valid_mask |= BNA_RXMODE_ALLMULTI;
2679 bnad->cfg_flags &= ~BNAD_CF_ALLMULTI;
2683 bna_rx_mode_set(bnad->rx_info[0].rx, new_mask, valid_mask, NULL);
2685 if (!netdev_mc_empty(netdev)) {
2686 u8 *mcaddr_list;
2687 int mc_count = netdev_mc_count(netdev);
2689 /* Index 0 holds the broadcast address */
2690 mcaddr_list =
2691 kzalloc((mc_count + 1) * ETH_ALEN,
2692 GFP_ATOMIC);
2693 if (!mcaddr_list)
2694 goto unlock;
2696 memcpy(&mcaddr_list[0], &bnad_bcast_addr[0], ETH_ALEN);
2698 /* Copy rest of the MC addresses */
2699 bnad_netdev_mc_list_get(netdev, mcaddr_list);
2701 bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1,
2702 mcaddr_list, NULL);
2704 /* Should we enable BNAD_CF_ALLMULTI for err != 0 ? */
2705 kfree(mcaddr_list);
2707 unlock:
2708 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2712 * bna_lock is used to sync writes to netdev->addr
2713 * conf_lock cannot be used since this call may be made
2714 * in a non-blocking context.
2716 static int
2717 bnad_set_mac_address(struct net_device *netdev, void *mac_addr)
2719 int err;
2720 struct bnad *bnad = netdev_priv(netdev);
2721 struct sockaddr *sa = (struct sockaddr *)mac_addr;
2722 unsigned long flags;
2724 spin_lock_irqsave(&bnad->bna_lock, flags);
2726 err = bnad_mac_addr_set_locked(bnad, sa->sa_data);
2728 if (!err)
2729 memcpy(netdev->dev_addr, sa->sa_data, netdev->addr_len);
2731 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2733 return err;
2736 static int
2737 bnad_change_mtu(struct net_device *netdev, int new_mtu)
2739 int mtu, err = 0;
2740 unsigned long flags;
2742 struct bnad *bnad = netdev_priv(netdev);
2744 if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU)
2745 return -EINVAL;
2747 mutex_lock(&bnad->conf_mutex);
2749 netdev->mtu = new_mtu;
2751 mtu = ETH_HLEN + new_mtu + ETH_FCS_LEN;
2753 spin_lock_irqsave(&bnad->bna_lock, flags);
2754 bna_port_mtu_set(&bnad->bna.port, mtu, NULL);
2755 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2757 mutex_unlock(&bnad->conf_mutex);
2758 return err;
2761 static void
2762 bnad_vlan_rx_register(struct net_device *netdev,
2763 struct vlan_group *vlan_grp)
2765 struct bnad *bnad = netdev_priv(netdev);
2767 mutex_lock(&bnad->conf_mutex);
2768 bnad->vlan_grp = vlan_grp;
2769 mutex_unlock(&bnad->conf_mutex);
2772 static void
2773 bnad_vlan_rx_add_vid(struct net_device *netdev,
2774 unsigned short vid)
2776 struct bnad *bnad = netdev_priv(netdev);
2777 unsigned long flags;
2779 if (!bnad->rx_info[0].rx)
2780 return;
2782 mutex_lock(&bnad->conf_mutex);
2784 spin_lock_irqsave(&bnad->bna_lock, flags);
2785 bna_rx_vlan_add(bnad->rx_info[0].rx, vid);
2786 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2788 mutex_unlock(&bnad->conf_mutex);
2791 static void
2792 bnad_vlan_rx_kill_vid(struct net_device *netdev,
2793 unsigned short vid)
2795 struct bnad *bnad = netdev_priv(netdev);
2796 unsigned long flags;
2798 if (!bnad->rx_info[0].rx)
2799 return;
2801 mutex_lock(&bnad->conf_mutex);
2803 spin_lock_irqsave(&bnad->bna_lock, flags);
2804 bna_rx_vlan_del(bnad->rx_info[0].rx, vid);
2805 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2807 mutex_unlock(&bnad->conf_mutex);
2810 #ifdef CONFIG_NET_POLL_CONTROLLER
2811 static void
2812 bnad_netpoll(struct net_device *netdev)
2814 struct bnad *bnad = netdev_priv(netdev);
2815 struct bnad_rx_info *rx_info;
2816 struct bnad_rx_ctrl *rx_ctrl;
2817 u32 curr_mask;
2818 int i, j;
2820 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
2821 bna_intx_disable(&bnad->bna, curr_mask);
2822 bnad_isr(bnad->pcidev->irq, netdev);
2823 bna_intx_enable(&bnad->bna, curr_mask);
2824 } else {
2825 for (i = 0; i < bnad->num_rx; i++) {
2826 rx_info = &bnad->rx_info[i];
2827 if (!rx_info->rx)
2828 continue;
2829 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
2830 rx_ctrl = &rx_info->rx_ctrl[j];
2831 if (rx_ctrl->ccb) {
2832 bnad_disable_rx_irq(bnad,
2833 rx_ctrl->ccb);
2834 bnad_netif_rx_schedule_poll(bnad,
2835 rx_ctrl->ccb);
2841 #endif
2843 static const struct net_device_ops bnad_netdev_ops = {
2844 .ndo_open = bnad_open,
2845 .ndo_stop = bnad_stop,
2846 .ndo_start_xmit = bnad_start_xmit,
2847 .ndo_get_stats64 = bnad_get_stats64,
2848 .ndo_set_rx_mode = bnad_set_rx_mode,
2849 .ndo_set_multicast_list = bnad_set_rx_mode,
2850 .ndo_validate_addr = eth_validate_addr,
2851 .ndo_set_mac_address = bnad_set_mac_address,
2852 .ndo_change_mtu = bnad_change_mtu,
2853 .ndo_vlan_rx_register = bnad_vlan_rx_register,
2854 .ndo_vlan_rx_add_vid = bnad_vlan_rx_add_vid,
2855 .ndo_vlan_rx_kill_vid = bnad_vlan_rx_kill_vid,
2856 #ifdef CONFIG_NET_POLL_CONTROLLER
2857 .ndo_poll_controller = bnad_netpoll
2858 #endif
2861 static void
2862 bnad_netdev_init(struct bnad *bnad, bool using_dac)
2864 struct net_device *netdev = bnad->netdev;
2866 netdev->features |= NETIF_F_IPV6_CSUM;
2867 netdev->features |= NETIF_F_TSO;
2868 netdev->features |= NETIF_F_TSO6;
2870 netdev->features |= NETIF_F_GRO;
2871 pr_warn("bna: GRO enabled, using kernel stack GRO\n");
2873 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
2875 if (using_dac)
2876 netdev->features |= NETIF_F_HIGHDMA;
2878 netdev->features |=
2879 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
2880 NETIF_F_HW_VLAN_FILTER;
2882 netdev->vlan_features = netdev->features;
2883 netdev->mem_start = bnad->mmio_start;
2884 netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1;
2886 netdev->netdev_ops = &bnad_netdev_ops;
2887 bnad_set_ethtool_ops(netdev);
2891 * 1. Initialize the bnad structure
2892 * 2. Setup netdev pointer in pci_dev
2893 * 3. Initialze Tx free tasklet
2894 * 4. Initialize no. of TxQ & CQs & MSIX vectors
2896 static int
2897 bnad_init(struct bnad *bnad,
2898 struct pci_dev *pdev, struct net_device *netdev)
2900 unsigned long flags;
2902 SET_NETDEV_DEV(netdev, &pdev->dev);
2903 pci_set_drvdata(pdev, netdev);
2905 bnad->netdev = netdev;
2906 bnad->pcidev = pdev;
2907 bnad->mmio_start = pci_resource_start(pdev, 0);
2908 bnad->mmio_len = pci_resource_len(pdev, 0);
2909 bnad->bar0 = ioremap_nocache(bnad->mmio_start, bnad->mmio_len);
2910 if (!bnad->bar0) {
2911 dev_err(&pdev->dev, "ioremap for bar0 failed\n");
2912 pci_set_drvdata(pdev, NULL);
2913 return -ENOMEM;
2915 pr_info("bar0 mapped to %p, len %llu\n", bnad->bar0,
2916 (unsigned long long) bnad->mmio_len);
2918 spin_lock_irqsave(&bnad->bna_lock, flags);
2919 if (!bnad_msix_disable)
2920 bnad->cfg_flags = BNAD_CF_MSIX;
2922 bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
2924 bnad_q_num_init(bnad);
2925 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2927 bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) +
2928 (bnad->num_rx * bnad->num_rxp_per_rx) +
2929 BNAD_MAILBOX_MSIX_VECTORS;
2931 bnad->txq_depth = BNAD_TXQ_DEPTH;
2932 bnad->rxq_depth = BNAD_RXQ_DEPTH;
2933 bnad->rx_csum = true;
2935 bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO;
2936 bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO;
2938 tasklet_init(&bnad->tx_free_tasklet, bnad_tx_free_tasklet,
2939 (unsigned long)bnad);
2941 return 0;
2945 * Must be called after bnad_pci_uninit()
2946 * so that iounmap() and pci_set_drvdata(NULL)
2947 * happens only after PCI uninitialization.
2949 static void
2950 bnad_uninit(struct bnad *bnad)
2952 if (bnad->bar0)
2953 iounmap(bnad->bar0);
2954 pci_set_drvdata(bnad->pcidev, NULL);
2958 * Initialize locks
2959 a) Per device mutes used for serializing configuration
2960 changes from OS interface
2961 b) spin lock used to protect bna state machine
2963 static void
2964 bnad_lock_init(struct bnad *bnad)
2966 spin_lock_init(&bnad->bna_lock);
2967 mutex_init(&bnad->conf_mutex);
2970 static void
2971 bnad_lock_uninit(struct bnad *bnad)
2973 mutex_destroy(&bnad->conf_mutex);
2976 /* PCI Initialization */
2977 static int
2978 bnad_pci_init(struct bnad *bnad,
2979 struct pci_dev *pdev, bool *using_dac)
2981 int err;
2983 err = pci_enable_device(pdev);
2984 if (err)
2985 return err;
2986 err = pci_request_regions(pdev, BNAD_NAME);
2987 if (err)
2988 goto disable_device;
2989 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
2990 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
2991 *using_dac = 1;
2992 } else {
2993 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2994 if (err) {
2995 err = pci_set_consistent_dma_mask(pdev,
2996 DMA_BIT_MASK(32));
2997 if (err)
2998 goto release_regions;
3000 *using_dac = 0;
3002 pci_set_master(pdev);
3003 return 0;
3005 release_regions:
3006 pci_release_regions(pdev);
3007 disable_device:
3008 pci_disable_device(pdev);
3010 return err;
3013 static void
3014 bnad_pci_uninit(struct pci_dev *pdev)
3016 pci_release_regions(pdev);
3017 pci_disable_device(pdev);
3020 static int __devinit
3021 bnad_pci_probe(struct pci_dev *pdev,
3022 const struct pci_device_id *pcidev_id)
3024 bool using_dac;
3025 int err;
3026 struct bnad *bnad;
3027 struct bna *bna;
3028 struct net_device *netdev;
3029 struct bfa_pcidev pcidev_info;
3030 unsigned long flags;
3032 pr_info("bnad_pci_probe : (0x%p, 0x%p) PCI Func : (%d)\n",
3033 pdev, pcidev_id, PCI_FUNC(pdev->devfn));
3035 mutex_lock(&bnad_fwimg_mutex);
3036 if (!cna_get_firmware_buf(pdev)) {
3037 mutex_unlock(&bnad_fwimg_mutex);
3038 pr_warn("Failed to load Firmware Image!\n");
3039 return -ENODEV;
3041 mutex_unlock(&bnad_fwimg_mutex);
3044 * Allocates sizeof(struct net_device + struct bnad)
3045 * bnad = netdev->priv
3047 netdev = alloc_etherdev(sizeof(struct bnad));
3048 if (!netdev) {
3049 dev_err(&pdev->dev, "alloc_etherdev failed\n");
3050 err = -ENOMEM;
3051 return err;
3053 bnad = netdev_priv(netdev);
3056 * PCI initialization
3057 * Output : using_dac = 1 for 64 bit DMA
3058 * = 0 for 32 bit DMA
3060 err = bnad_pci_init(bnad, pdev, &using_dac);
3061 if (err)
3062 goto free_netdev;
3064 bnad_lock_init(bnad);
3066 * Initialize bnad structure
3067 * Setup relation between pci_dev & netdev
3068 * Init Tx free tasklet
3070 err = bnad_init(bnad, pdev, netdev);
3071 if (err)
3072 goto pci_uninit;
3073 /* Initialize netdev structure, set up ethtool ops */
3074 bnad_netdev_init(bnad, using_dac);
3076 /* Set link to down state */
3077 netif_carrier_off(netdev);
3079 bnad_enable_msix(bnad);
3081 /* Get resource requirement form bna */
3082 bna_res_req(&bnad->res_info[0]);
3084 /* Allocate resources from bna */
3085 err = bnad_res_alloc(bnad);
3086 if (err)
3087 goto free_netdev;
3089 bna = &bnad->bna;
3091 /* Setup pcidev_info for bna_init() */
3092 pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn);
3093 pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn);
3094 pcidev_info.device_id = bnad->pcidev->device;
3095 pcidev_info.pci_bar_kva = bnad->bar0;
3097 mutex_lock(&bnad->conf_mutex);
3099 spin_lock_irqsave(&bnad->bna_lock, flags);
3100 bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]);
3101 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3103 bnad->stats.bna_stats = &bna->stats;
3105 /* Set up timers */
3106 setup_timer(&bnad->bna.device.ioc.ioc_timer, bnad_ioc_timeout,
3107 ((unsigned long)bnad));
3108 setup_timer(&bnad->bna.device.ioc.hb_timer, bnad_ioc_hb_check,
3109 ((unsigned long)bnad));
3110 setup_timer(&bnad->bna.device.ioc.sem_timer, bnad_ioc_sem_timeout,
3111 ((unsigned long)bnad));
3113 /* Now start the timer before calling IOC */
3114 mod_timer(&bnad->bna.device.ioc.ioc_timer,
3115 jiffies + msecs_to_jiffies(BNA_IOC_TIMER_FREQ));
3118 * Start the chip
3119 * Don't care even if err != 0, bna state machine will
3120 * deal with it
3122 err = bnad_device_enable(bnad);
3124 /* Get the burnt-in mac */
3125 spin_lock_irqsave(&bnad->bna_lock, flags);
3126 bna_port_mac_get(&bna->port, &bnad->perm_addr);
3127 bnad_set_netdev_perm_addr(bnad);
3128 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3130 mutex_unlock(&bnad->conf_mutex);
3132 /* Finally, reguister with net_device layer */
3133 err = register_netdev(netdev);
3134 if (err) {
3135 pr_err("BNA : Registering with netdev failed\n");
3136 goto disable_device;
3139 return 0;
3141 disable_device:
3142 mutex_lock(&bnad->conf_mutex);
3143 bnad_device_disable(bnad);
3144 del_timer_sync(&bnad->bna.device.ioc.ioc_timer);
3145 del_timer_sync(&bnad->bna.device.ioc.sem_timer);
3146 del_timer_sync(&bnad->bna.device.ioc.hb_timer);
3147 spin_lock_irqsave(&bnad->bna_lock, flags);
3148 bna_uninit(bna);
3149 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3150 mutex_unlock(&bnad->conf_mutex);
3152 bnad_res_free(bnad);
3153 bnad_disable_msix(bnad);
3154 pci_uninit:
3155 bnad_pci_uninit(pdev);
3156 bnad_lock_uninit(bnad);
3157 bnad_uninit(bnad);
3158 free_netdev:
3159 free_netdev(netdev);
3160 return err;
3163 static void __devexit
3164 bnad_pci_remove(struct pci_dev *pdev)
3166 struct net_device *netdev = pci_get_drvdata(pdev);
3167 struct bnad *bnad;
3168 struct bna *bna;
3169 unsigned long flags;
3171 if (!netdev)
3172 return;
3174 pr_info("%s bnad_pci_remove\n", netdev->name);
3175 bnad = netdev_priv(netdev);
3176 bna = &bnad->bna;
3178 unregister_netdev(netdev);
3180 mutex_lock(&bnad->conf_mutex);
3181 bnad_device_disable(bnad);
3182 del_timer_sync(&bnad->bna.device.ioc.ioc_timer);
3183 del_timer_sync(&bnad->bna.device.ioc.sem_timer);
3184 del_timer_sync(&bnad->bna.device.ioc.hb_timer);
3185 spin_lock_irqsave(&bnad->bna_lock, flags);
3186 bna_uninit(bna);
3187 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3188 mutex_unlock(&bnad->conf_mutex);
3190 bnad_res_free(bnad);
3191 bnad_disable_msix(bnad);
3192 bnad_pci_uninit(pdev);
3193 bnad_lock_uninit(bnad);
3194 bnad_uninit(bnad);
3195 free_netdev(netdev);
3198 static const struct pci_device_id bnad_pci_id_table[] = {
3200 PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3201 PCI_DEVICE_ID_BROCADE_CT),
3202 .class = PCI_CLASS_NETWORK_ETHERNET << 8,
3203 .class_mask = 0xffff00
3204 }, {0, }
3207 MODULE_DEVICE_TABLE(pci, bnad_pci_id_table);
3209 static struct pci_driver bnad_pci_driver = {
3210 .name = BNAD_NAME,
3211 .id_table = bnad_pci_id_table,
3212 .probe = bnad_pci_probe,
3213 .remove = __devexit_p(bnad_pci_remove),
3216 static int __init
3217 bnad_module_init(void)
3219 int err;
3221 pr_info("Brocade 10G Ethernet driver\n");
3223 bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover);
3225 err = pci_register_driver(&bnad_pci_driver);
3226 if (err < 0) {
3227 pr_err("bna : PCI registration failed in module init "
3228 "(%d)\n", err);
3229 return err;
3232 return 0;
3235 static void __exit
3236 bnad_module_exit(void)
3238 pci_unregister_driver(&bnad_pci_driver);
3240 if (bfi_fw)
3241 release_firmware(bfi_fw);
3244 module_init(bnad_module_init);
3245 module_exit(bnad_module_exit);
3247 MODULE_AUTHOR("Brocade");
3248 MODULE_LICENSE("GPL");
3249 MODULE_DESCRIPTION("Brocade 10G PCIe Ethernet driver");
3250 MODULE_VERSION(BNAD_VERSION);
3251 MODULE_FIRMWARE(CNA_FW_FILE_CT);