bna: CheckPatch Cleanup
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / net / bna / bnad.c
blobdd771562e31e720face340077695a46d4dc46947
1 /*
2 * Linux network driver for Brocade Converged Network Adapter.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
18 #include <linux/bitops.h>
19 #include <linux/netdevice.h>
20 #include <linux/skbuff.h>
21 #include <linux/etherdevice.h>
22 #include <linux/in.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_vlan.h>
25 #include <linux/if_ether.h>
26 #include <linux/ip.h>
27 #include <linux/prefetch.h>
28 #include <linux/if_vlan.h>
30 #include "bnad.h"
31 #include "bna.h"
32 #include "cna.h"
34 static DEFINE_MUTEX(bnad_fwimg_mutex);
37 * Module params
39 static uint bnad_msix_disable;
40 module_param(bnad_msix_disable, uint, 0444);
41 MODULE_PARM_DESC(bnad_msix_disable, "Disable MSIX mode");
43 static uint bnad_ioc_auto_recover = 1;
44 module_param(bnad_ioc_auto_recover, uint, 0444);
45 MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery");
48 * Global variables
50 u32 bnad_rxqs_per_cq = 2;
52 static const u8 bnad_bcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
55 * Local MACROS
57 #define BNAD_TX_UNMAPQ_DEPTH (bnad->txq_depth * 2)
59 #define BNAD_RX_UNMAPQ_DEPTH (bnad->rxq_depth)
61 #define BNAD_GET_MBOX_IRQ(_bnad) \
62 (((_bnad)->cfg_flags & BNAD_CF_MSIX) ? \
63 ((_bnad)->msix_table[(_bnad)->msix_num - 1].vector) : \
64 ((_bnad)->pcidev->irq))
66 #define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _depth) \
67 do { \
68 (_res_info)->res_type = BNA_RES_T_MEM; \
69 (_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA; \
70 (_res_info)->res_u.mem_info.num = (_num); \
71 (_res_info)->res_u.mem_info.len = \
72 sizeof(struct bnad_unmap_q) + \
73 (sizeof(struct bnad_skb_unmap) * ((_depth) - 1)); \
74 } while (0)
76 #define BNAD_TXRX_SYNC_MDELAY 250 /* 250 msecs */
79 * Reinitialize completions in CQ, once Rx is taken down
81 static void
82 bnad_cq_cmpl_init(struct bnad *bnad, struct bna_ccb *ccb)
84 struct bna_cq_entry *cmpl, *next_cmpl;
85 unsigned int wi_range, wis = 0, ccb_prod = 0;
86 int i;
88 BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt, cmpl,
89 wi_range);
91 for (i = 0; i < ccb->q_depth; i++) {
92 wis++;
93 if (likely(--wi_range))
94 next_cmpl = cmpl + 1;
95 else {
96 BNA_QE_INDX_ADD(ccb_prod, wis, ccb->q_depth);
97 wis = 0;
98 BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt,
99 next_cmpl, wi_range);
101 cmpl->valid = 0;
102 cmpl = next_cmpl;
107 * Frees all pending Tx Bufs
108 * At this point no activity is expected on the Q,
109 * so DMA unmap & freeing is fine.
111 static void
112 bnad_free_all_txbufs(struct bnad *bnad,
113 struct bna_tcb *tcb)
115 u32 unmap_cons;
116 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
117 struct bnad_skb_unmap *unmap_array;
118 struct sk_buff *skb = NULL;
119 int i;
121 unmap_array = unmap_q->unmap_array;
123 unmap_cons = 0;
124 while (unmap_cons < unmap_q->q_depth) {
125 skb = unmap_array[unmap_cons].skb;
126 if (!skb) {
127 unmap_cons++;
128 continue;
130 unmap_array[unmap_cons].skb = NULL;
132 dma_unmap_single(&bnad->pcidev->dev,
133 dma_unmap_addr(&unmap_array[unmap_cons],
134 dma_addr), skb_headlen(skb),
135 DMA_TO_DEVICE);
137 dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
138 if (++unmap_cons >= unmap_q->q_depth)
139 break;
141 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
142 dma_unmap_page(&bnad->pcidev->dev,
143 dma_unmap_addr(&unmap_array[unmap_cons],
144 dma_addr),
145 skb_shinfo(skb)->frags[i].size,
146 DMA_TO_DEVICE);
147 dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
149 if (++unmap_cons >= unmap_q->q_depth)
150 break;
152 dev_kfree_skb_any(skb);
156 /* Data Path Handlers */
159 * bnad_free_txbufs : Frees the Tx bufs on Tx completion
160 * Can be called in a) Interrupt context
161 * b) Sending context
162 * c) Tasklet context
164 static u32
165 bnad_free_txbufs(struct bnad *bnad,
166 struct bna_tcb *tcb)
168 u32 sent_packets = 0, sent_bytes = 0;
169 u16 wis, unmap_cons, updated_hw_cons;
170 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
171 struct bnad_skb_unmap *unmap_array;
172 struct sk_buff *skb;
173 int i;
176 * Just return if TX is stopped. This check is useful
177 * when bnad_free_txbufs() runs out of a tasklet scheduled
178 * before bnad_cb_tx_cleanup() cleared BNAD_TXQ_TX_STARTED bit
179 * but this routine runs actually after the cleanup has been
180 * executed.
182 if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
183 return 0;
185 updated_hw_cons = *(tcb->hw_consumer_index);
187 wis = BNA_Q_INDEX_CHANGE(tcb->consumer_index,
188 updated_hw_cons, tcb->q_depth);
190 BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth)));
192 unmap_array = unmap_q->unmap_array;
193 unmap_cons = unmap_q->consumer_index;
195 prefetch(&unmap_array[unmap_cons + 1]);
196 while (wis) {
197 skb = unmap_array[unmap_cons].skb;
199 unmap_array[unmap_cons].skb = NULL;
201 sent_packets++;
202 sent_bytes += skb->len;
203 wis -= BNA_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags);
205 dma_unmap_single(&bnad->pcidev->dev,
206 dma_unmap_addr(&unmap_array[unmap_cons],
207 dma_addr), skb_headlen(skb),
208 DMA_TO_DEVICE);
209 dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
210 BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth);
212 prefetch(&unmap_array[unmap_cons + 1]);
213 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
214 prefetch(&unmap_array[unmap_cons + 1]);
216 dma_unmap_page(&bnad->pcidev->dev,
217 dma_unmap_addr(&unmap_array[unmap_cons],
218 dma_addr),
219 skb_shinfo(skb)->frags[i].size,
220 DMA_TO_DEVICE);
221 dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
223 BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth);
225 dev_kfree_skb_any(skb);
228 /* Update consumer pointers. */
229 tcb->consumer_index = updated_hw_cons;
230 unmap_q->consumer_index = unmap_cons;
232 tcb->txq->tx_packets += sent_packets;
233 tcb->txq->tx_bytes += sent_bytes;
235 return sent_packets;
238 /* Tx Free Tasklet function */
239 /* Frees for all the tcb's in all the Tx's */
241 * Scheduled from sending context, so that
242 * the fat Tx lock is not held for too long
243 * in the sending context.
245 static void
246 bnad_tx_free_tasklet(unsigned long bnad_ptr)
248 struct bnad *bnad = (struct bnad *)bnad_ptr;
249 struct bna_tcb *tcb;
250 u32 acked = 0;
251 int i, j;
253 for (i = 0; i < bnad->num_tx; i++) {
254 for (j = 0; j < bnad->num_txq_per_tx; j++) {
255 tcb = bnad->tx_info[i].tcb[j];
256 if (!tcb)
257 continue;
258 if (((u16) (*tcb->hw_consumer_index) !=
259 tcb->consumer_index) &&
260 (!test_and_set_bit(BNAD_TXQ_FREE_SENT,
261 &tcb->flags))) {
262 acked = bnad_free_txbufs(bnad, tcb);
263 if (likely(test_bit(BNAD_TXQ_TX_STARTED,
264 &tcb->flags)))
265 bna_ib_ack(tcb->i_dbell, acked);
266 smp_mb__before_clear_bit();
267 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
269 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED,
270 &tcb->flags)))
271 continue;
272 if (netif_queue_stopped(bnad->netdev)) {
273 if (acked && netif_carrier_ok(bnad->netdev) &&
274 BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
275 BNAD_NETIF_WAKE_THRESHOLD) {
276 netif_wake_queue(bnad->netdev);
277 /* TODO */
278 /* Counters for individual TxQs? */
279 BNAD_UPDATE_CTR(bnad,
280 netif_queue_wakeup);
287 static u32
288 bnad_tx(struct bnad *bnad, struct bna_tcb *tcb)
290 struct net_device *netdev = bnad->netdev;
291 u32 sent = 0;
293 if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
294 return 0;
296 sent = bnad_free_txbufs(bnad, tcb);
297 if (sent) {
298 if (netif_queue_stopped(netdev) &&
299 netif_carrier_ok(netdev) &&
300 BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
301 BNAD_NETIF_WAKE_THRESHOLD) {
302 if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) {
303 netif_wake_queue(netdev);
304 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
309 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
310 bna_ib_ack(tcb->i_dbell, sent);
312 smp_mb__before_clear_bit();
313 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
315 return sent;
318 /* MSIX Tx Completion Handler */
319 static irqreturn_t
320 bnad_msix_tx(int irq, void *data)
322 struct bna_tcb *tcb = (struct bna_tcb *)data;
323 struct bnad *bnad = tcb->bnad;
325 bnad_tx(bnad, tcb);
327 return IRQ_HANDLED;
330 static void
331 bnad_reset_rcb(struct bnad *bnad, struct bna_rcb *rcb)
333 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
335 rcb->producer_index = 0;
336 rcb->consumer_index = 0;
338 unmap_q->producer_index = 0;
339 unmap_q->consumer_index = 0;
342 static void
343 bnad_free_all_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
345 struct bnad_unmap_q *unmap_q;
346 struct bnad_skb_unmap *unmap_array;
347 struct sk_buff *skb;
348 int unmap_cons;
350 unmap_q = rcb->unmap_q;
351 unmap_array = unmap_q->unmap_array;
352 for (unmap_cons = 0; unmap_cons < unmap_q->q_depth; unmap_cons++) {
353 skb = unmap_array[unmap_cons].skb;
354 if (!skb)
355 continue;
356 unmap_array[unmap_cons].skb = NULL;
357 dma_unmap_single(&bnad->pcidev->dev,
358 dma_unmap_addr(&unmap_array[unmap_cons],
359 dma_addr),
360 rcb->rxq->buffer_size,
361 DMA_FROM_DEVICE);
362 dev_kfree_skb(skb);
364 bnad_reset_rcb(bnad, rcb);
367 static void
368 bnad_alloc_n_post_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
370 u16 to_alloc, alloced, unmap_prod, wi_range;
371 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
372 struct bnad_skb_unmap *unmap_array;
373 struct bna_rxq_entry *rxent;
374 struct sk_buff *skb;
375 dma_addr_t dma_addr;
377 alloced = 0;
378 to_alloc =
379 BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth);
381 unmap_array = unmap_q->unmap_array;
382 unmap_prod = unmap_q->producer_index;
384 BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent, wi_range);
386 while (to_alloc--) {
387 if (!wi_range) {
388 BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent,
389 wi_range);
391 skb = netdev_alloc_skb_ip_align(bnad->netdev,
392 rcb->rxq->buffer_size);
393 if (unlikely(!skb)) {
394 BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
395 goto finishing;
397 unmap_array[unmap_prod].skb = skb;
398 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
399 rcb->rxq->buffer_size,
400 DMA_FROM_DEVICE);
401 dma_unmap_addr_set(&unmap_array[unmap_prod], dma_addr,
402 dma_addr);
403 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
404 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
406 rxent++;
407 wi_range--;
408 alloced++;
411 finishing:
412 if (likely(alloced)) {
413 unmap_q->producer_index = unmap_prod;
414 rcb->producer_index = unmap_prod;
415 smp_mb();
416 if (likely(test_bit(BNAD_RXQ_STARTED, &rcb->flags)))
417 bna_rxq_prod_indx_doorbell(rcb);
421 static inline void
422 bnad_refill_rxq(struct bnad *bnad, struct bna_rcb *rcb)
424 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
426 if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
427 if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
428 >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
429 bnad_alloc_n_post_rxbufs(bnad, rcb);
430 smp_mb__before_clear_bit();
431 clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
435 static u32
436 bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
438 struct bna_cq_entry *cmpl, *next_cmpl;
439 struct bna_rcb *rcb = NULL;
440 unsigned int wi_range, packets = 0, wis = 0;
441 struct bnad_unmap_q *unmap_q;
442 struct bnad_skb_unmap *unmap_array;
443 struct sk_buff *skb;
444 u32 flags, unmap_cons;
445 u32 qid0 = ccb->rcb[0]->rxq->rxq_id;
446 struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
448 if (!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags))
449 return 0;
451 prefetch(bnad->netdev);
452 BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt, cmpl,
453 wi_range);
454 BUG_ON(!(wi_range <= ccb->q_depth));
455 while (cmpl->valid && packets < budget) {
456 packets++;
457 BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
459 if (qid0 == cmpl->rxq_id)
460 rcb = ccb->rcb[0];
461 else
462 rcb = ccb->rcb[1];
464 unmap_q = rcb->unmap_q;
465 unmap_array = unmap_q->unmap_array;
466 unmap_cons = unmap_q->consumer_index;
468 skb = unmap_array[unmap_cons].skb;
469 BUG_ON(!(skb));
470 unmap_array[unmap_cons].skb = NULL;
471 dma_unmap_single(&bnad->pcidev->dev,
472 dma_unmap_addr(&unmap_array[unmap_cons],
473 dma_addr),
474 rcb->rxq->buffer_size,
475 DMA_FROM_DEVICE);
476 BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
478 /* Should be more efficient ? Performance ? */
479 BNA_QE_INDX_ADD(rcb->consumer_index, 1, rcb->q_depth);
481 wis++;
482 if (likely(--wi_range))
483 next_cmpl = cmpl + 1;
484 else {
485 BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth);
486 wis = 0;
487 BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt,
488 next_cmpl, wi_range);
489 BUG_ON(!(wi_range <= ccb->q_depth));
491 prefetch(next_cmpl);
493 flags = ntohl(cmpl->flags);
494 if (unlikely
495 (flags &
496 (BNA_CQ_EF_MAC_ERROR | BNA_CQ_EF_FCS_ERROR |
497 BNA_CQ_EF_TOO_LONG))) {
498 dev_kfree_skb_any(skb);
499 rcb->rxq->rx_packets_with_error++;
500 goto next;
503 skb_put(skb, ntohs(cmpl->length));
504 if (likely
505 ((bnad->netdev->features & NETIF_F_RXCSUM) &&
506 (((flags & BNA_CQ_EF_IPV4) &&
507 (flags & BNA_CQ_EF_L3_CKSUM_OK)) ||
508 (flags & BNA_CQ_EF_IPV6)) &&
509 (flags & (BNA_CQ_EF_TCP | BNA_CQ_EF_UDP)) &&
510 (flags & BNA_CQ_EF_L4_CKSUM_OK)))
511 skb->ip_summed = CHECKSUM_UNNECESSARY;
512 else
513 skb_checksum_none_assert(skb);
515 rcb->rxq->rx_packets++;
516 rcb->rxq->rx_bytes += skb->len;
517 skb->protocol = eth_type_trans(skb, bnad->netdev);
519 if (flags & BNA_CQ_EF_VLAN)
520 __vlan_hwaccel_put_tag(skb, ntohs(cmpl->vlan_tag));
522 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
523 struct bnad_rx_ctrl *rx_ctrl;
525 rx_ctrl = (struct bnad_rx_ctrl *) ccb->ctrl;
526 napi_gro_receive(&rx_ctrl->napi, skb);
527 } else {
528 netif_receive_skb(skb);
531 next:
532 cmpl->valid = 0;
533 cmpl = next_cmpl;
536 BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth);
538 if (likely(ccb)) {
539 if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
540 bna_ib_ack(ccb->i_dbell, packets);
541 bnad_refill_rxq(bnad, ccb->rcb[0]);
542 if (ccb->rcb[1])
543 bnad_refill_rxq(bnad, ccb->rcb[1]);
544 } else {
545 if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
546 bna_ib_ack(ccb->i_dbell, 0);
549 return packets;
552 static void
553 bnad_disable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb)
555 if (unlikely(!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
556 return;
558 bna_ib_coalescing_timer_set(ccb->i_dbell, 0);
559 bna_ib_ack(ccb->i_dbell, 0);
562 static void
563 bnad_enable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb)
565 unsigned long flags;
567 /* Because of polling context */
568 spin_lock_irqsave(&bnad->bna_lock, flags);
569 bnad_enable_rx_irq_unsafe(ccb);
570 spin_unlock_irqrestore(&bnad->bna_lock, flags);
573 static void
574 bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb)
576 struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
577 struct napi_struct *napi = &rx_ctrl->napi;
579 if (likely(napi_schedule_prep(napi))) {
580 bnad_disable_rx_irq(bnad, ccb);
581 __napi_schedule(napi);
583 BNAD_UPDATE_CTR(bnad, netif_rx_schedule);
586 /* MSIX Rx Path Handler */
587 static irqreturn_t
588 bnad_msix_rx(int irq, void *data)
590 struct bna_ccb *ccb = (struct bna_ccb *)data;
591 struct bnad *bnad = ccb->bnad;
593 bnad_netif_rx_schedule_poll(bnad, ccb);
595 return IRQ_HANDLED;
598 /* Interrupt handlers */
600 /* Mbox Interrupt Handlers */
601 static irqreturn_t
602 bnad_msix_mbox_handler(int irq, void *data)
604 u32 intr_status;
605 unsigned long flags;
606 struct bnad *bnad = (struct bnad *)data;
608 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags)))
609 return IRQ_HANDLED;
611 spin_lock_irqsave(&bnad->bna_lock, flags);
613 bna_intr_status_get(&bnad->bna, intr_status);
615 if (BNA_IS_MBOX_ERR_INTR(intr_status))
616 bna_mbox_handler(&bnad->bna, intr_status);
618 spin_unlock_irqrestore(&bnad->bna_lock, flags);
620 return IRQ_HANDLED;
623 static irqreturn_t
624 bnad_isr(int irq, void *data)
626 int i, j;
627 u32 intr_status;
628 unsigned long flags;
629 struct bnad *bnad = (struct bnad *)data;
630 struct bnad_rx_info *rx_info;
631 struct bnad_rx_ctrl *rx_ctrl;
633 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags)))
634 return IRQ_NONE;
636 bna_intr_status_get(&bnad->bna, intr_status);
638 if (unlikely(!intr_status))
639 return IRQ_NONE;
641 spin_lock_irqsave(&bnad->bna_lock, flags);
643 if (BNA_IS_MBOX_ERR_INTR(intr_status))
644 bna_mbox_handler(&bnad->bna, intr_status);
646 spin_unlock_irqrestore(&bnad->bna_lock, flags);
648 if (!BNA_IS_INTX_DATA_INTR(intr_status))
649 return IRQ_HANDLED;
651 /* Process data interrupts */
652 /* Tx processing */
653 for (i = 0; i < bnad->num_tx; i++) {
654 for (j = 0; j < bnad->num_txq_per_tx; j++)
655 bnad_tx(bnad, bnad->tx_info[i].tcb[j]);
657 /* Rx processing */
658 for (i = 0; i < bnad->num_rx; i++) {
659 rx_info = &bnad->rx_info[i];
660 if (!rx_info->rx)
661 continue;
662 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
663 rx_ctrl = &rx_info->rx_ctrl[j];
664 if (rx_ctrl->ccb)
665 bnad_netif_rx_schedule_poll(bnad,
666 rx_ctrl->ccb);
669 return IRQ_HANDLED;
673 * Called in interrupt / callback context
674 * with bna_lock held, so cfg_flags access is OK
676 static void
677 bnad_enable_mbox_irq(struct bnad *bnad)
679 clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
681 BNAD_UPDATE_CTR(bnad, mbox_intr_enabled);
685 * Called with bnad->bna_lock held b'cos of
686 * bnad->cfg_flags access.
688 static void
689 bnad_disable_mbox_irq(struct bnad *bnad)
691 set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
693 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
696 static void
697 bnad_set_netdev_perm_addr(struct bnad *bnad)
699 struct net_device *netdev = bnad->netdev;
701 memcpy(netdev->perm_addr, &bnad->perm_addr, netdev->addr_len);
702 if (is_zero_ether_addr(netdev->dev_addr))
703 memcpy(netdev->dev_addr, &bnad->perm_addr, netdev->addr_len);
706 /* Control Path Handlers */
708 /* Callbacks */
709 void
710 bnad_cb_device_enable_mbox_intr(struct bnad *bnad)
712 bnad_enable_mbox_irq(bnad);
715 void
716 bnad_cb_device_disable_mbox_intr(struct bnad *bnad)
718 bnad_disable_mbox_irq(bnad);
721 void
722 bnad_cb_device_enabled(struct bnad *bnad, enum bna_cb_status status)
724 complete(&bnad->bnad_completions.ioc_comp);
725 bnad->bnad_completions.ioc_comp_status = status;
728 void
729 bnad_cb_device_disabled(struct bnad *bnad, enum bna_cb_status status)
731 complete(&bnad->bnad_completions.ioc_comp);
732 bnad->bnad_completions.ioc_comp_status = status;
735 static void
736 bnad_cb_port_disabled(void *arg, enum bna_cb_status status)
738 struct bnad *bnad = (struct bnad *)arg;
740 complete(&bnad->bnad_completions.port_comp);
742 netif_carrier_off(bnad->netdev);
745 void
746 bnad_cb_port_link_status(struct bnad *bnad,
747 enum bna_link_status link_status)
749 bool link_up = 0;
751 link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP);
753 if (link_status == BNA_CEE_UP) {
754 set_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
755 BNAD_UPDATE_CTR(bnad, cee_up);
756 } else
757 clear_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
759 if (link_up) {
760 if (!netif_carrier_ok(bnad->netdev)) {
761 struct bna_tcb *tcb = bnad->tx_info[0].tcb[0];
762 if (!tcb)
763 return;
764 pr_warn("bna: %s link up\n",
765 bnad->netdev->name);
766 netif_carrier_on(bnad->netdev);
767 BNAD_UPDATE_CTR(bnad, link_toggle);
768 if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) {
769 /* Force an immediate Transmit Schedule */
770 pr_info("bna: %s TX_STARTED\n",
771 bnad->netdev->name);
772 netif_wake_queue(bnad->netdev);
773 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
774 } else {
775 netif_stop_queue(bnad->netdev);
776 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
779 } else {
780 if (netif_carrier_ok(bnad->netdev)) {
781 pr_warn("bna: %s link down\n",
782 bnad->netdev->name);
783 netif_carrier_off(bnad->netdev);
784 BNAD_UPDATE_CTR(bnad, link_toggle);
789 static void
790 bnad_cb_tx_disabled(void *arg, struct bna_tx *tx,
791 enum bna_cb_status status)
793 struct bnad *bnad = (struct bnad *)arg;
795 complete(&bnad->bnad_completions.tx_comp);
798 static void
799 bnad_cb_tcb_setup(struct bnad *bnad, struct bna_tcb *tcb)
801 struct bnad_tx_info *tx_info =
802 (struct bnad_tx_info *)tcb->txq->tx->priv;
803 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
805 tx_info->tcb[tcb->id] = tcb;
806 unmap_q->producer_index = 0;
807 unmap_q->consumer_index = 0;
808 unmap_q->q_depth = BNAD_TX_UNMAPQ_DEPTH;
811 static void
812 bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb)
814 struct bnad_tx_info *tx_info =
815 (struct bnad_tx_info *)tcb->txq->tx->priv;
816 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
818 while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
819 cpu_relax();
821 bnad_free_all_txbufs(bnad, tcb);
823 unmap_q->producer_index = 0;
824 unmap_q->consumer_index = 0;
826 smp_mb__before_clear_bit();
827 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
829 tx_info->tcb[tcb->id] = NULL;
832 static void
833 bnad_cb_rcb_setup(struct bnad *bnad, struct bna_rcb *rcb)
835 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
837 unmap_q->producer_index = 0;
838 unmap_q->consumer_index = 0;
839 unmap_q->q_depth = BNAD_RX_UNMAPQ_DEPTH;
842 static void
843 bnad_cb_rcb_destroy(struct bnad *bnad, struct bna_rcb *rcb)
845 bnad_free_all_rxbufs(bnad, rcb);
848 static void
849 bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb)
851 struct bnad_rx_info *rx_info =
852 (struct bnad_rx_info *)ccb->cq->rx->priv;
854 rx_info->rx_ctrl[ccb->id].ccb = ccb;
855 ccb->ctrl = &rx_info->rx_ctrl[ccb->id];
858 static void
859 bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb)
861 struct bnad_rx_info *rx_info =
862 (struct bnad_rx_info *)ccb->cq->rx->priv;
864 rx_info->rx_ctrl[ccb->id].ccb = NULL;
867 static void
868 bnad_cb_tx_stall(struct bnad *bnad, struct bna_tcb *tcb)
870 struct bnad_tx_info *tx_info =
871 (struct bnad_tx_info *)tcb->txq->tx->priv;
873 if (tx_info != &bnad->tx_info[0])
874 return;
876 clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
877 netif_stop_queue(bnad->netdev);
878 pr_info("bna: %s TX_STOPPED\n", bnad->netdev->name);
881 static void
882 bnad_cb_tx_resume(struct bnad *bnad, struct bna_tcb *tcb)
884 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
886 if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
887 return;
889 clear_bit(BNAD_RF_TX_SHUTDOWN_DELAYED, &bnad->run_flags);
891 while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
892 cpu_relax();
894 bnad_free_all_txbufs(bnad, tcb);
896 unmap_q->producer_index = 0;
897 unmap_q->consumer_index = 0;
899 smp_mb__before_clear_bit();
900 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
903 * Workaround for first device enable failure & we
904 * get a 0 MAC address. We try to get the MAC address
905 * again here.
907 if (is_zero_ether_addr(&bnad->perm_addr.mac[0])) {
908 bna_port_mac_get(&bnad->bna.port, &bnad->perm_addr);
909 bnad_set_netdev_perm_addr(bnad);
912 set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
914 if (netif_carrier_ok(bnad->netdev)) {
915 pr_info("bna: %s TX_STARTED\n", bnad->netdev->name);
916 netif_wake_queue(bnad->netdev);
917 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
921 static void
922 bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tcb *tcb)
924 /* Delay only once for the whole Tx Path Shutdown */
925 if (!test_and_set_bit(BNAD_RF_TX_SHUTDOWN_DELAYED, &bnad->run_flags))
926 mdelay(BNAD_TXRX_SYNC_MDELAY);
929 static void
930 bnad_cb_rx_cleanup(struct bnad *bnad,
931 struct bna_ccb *ccb)
933 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags);
935 if (ccb->rcb[1])
936 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags);
938 if (!test_and_set_bit(BNAD_RF_RX_SHUTDOWN_DELAYED, &bnad->run_flags))
939 mdelay(BNAD_TXRX_SYNC_MDELAY);
942 static void
943 bnad_cb_rx_post(struct bnad *bnad, struct bna_rcb *rcb)
945 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
947 clear_bit(BNAD_RF_RX_SHUTDOWN_DELAYED, &bnad->run_flags);
949 if (rcb == rcb->cq->ccb->rcb[0])
950 bnad_cq_cmpl_init(bnad, rcb->cq->ccb);
952 bnad_free_all_rxbufs(bnad, rcb);
954 set_bit(BNAD_RXQ_STARTED, &rcb->flags);
956 /* Now allocate & post buffers for this RCB */
957 /* !!Allocation in callback context */
958 if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
959 if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
960 >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
961 bnad_alloc_n_post_rxbufs(bnad, rcb);
962 smp_mb__before_clear_bit();
963 clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
967 static void
968 bnad_cb_rx_disabled(void *arg, struct bna_rx *rx,
969 enum bna_cb_status status)
971 struct bnad *bnad = (struct bnad *)arg;
973 complete(&bnad->bnad_completions.rx_comp);
976 static void
977 bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx,
978 enum bna_cb_status status)
980 bnad->bnad_completions.mcast_comp_status = status;
981 complete(&bnad->bnad_completions.mcast_comp);
984 void
985 bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
986 struct bna_stats *stats)
988 if (status == BNA_CB_SUCCESS)
989 BNAD_UPDATE_CTR(bnad, hw_stats_updates);
991 if (!netif_running(bnad->netdev) ||
992 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
993 return;
995 mod_timer(&bnad->stats_timer,
996 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
999 /* Resource allocation, free functions */
1001 static void
1002 bnad_mem_free(struct bnad *bnad,
1003 struct bna_mem_info *mem_info)
1005 int i;
1006 dma_addr_t dma_pa;
1008 if (mem_info->mdl == NULL)
1009 return;
1011 for (i = 0; i < mem_info->num; i++) {
1012 if (mem_info->mdl[i].kva != NULL) {
1013 if (mem_info->mem_type == BNA_MEM_T_DMA) {
1014 BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma),
1015 dma_pa);
1016 dma_free_coherent(&bnad->pcidev->dev,
1017 mem_info->mdl[i].len,
1018 mem_info->mdl[i].kva, dma_pa);
1019 } else
1020 kfree(mem_info->mdl[i].kva);
1023 kfree(mem_info->mdl);
1024 mem_info->mdl = NULL;
1027 static int
1028 bnad_mem_alloc(struct bnad *bnad,
1029 struct bna_mem_info *mem_info)
1031 int i;
1032 dma_addr_t dma_pa;
1034 if ((mem_info->num == 0) || (mem_info->len == 0)) {
1035 mem_info->mdl = NULL;
1036 return 0;
1039 mem_info->mdl = kcalloc(mem_info->num, sizeof(struct bna_mem_descr),
1040 GFP_KERNEL);
1041 if (mem_info->mdl == NULL)
1042 return -ENOMEM;
1044 if (mem_info->mem_type == BNA_MEM_T_DMA) {
1045 for (i = 0; i < mem_info->num; i++) {
1046 mem_info->mdl[i].len = mem_info->len;
1047 mem_info->mdl[i].kva =
1048 dma_alloc_coherent(&bnad->pcidev->dev,
1049 mem_info->len, &dma_pa,
1050 GFP_KERNEL);
1052 if (mem_info->mdl[i].kva == NULL)
1053 goto err_return;
1055 BNA_SET_DMA_ADDR(dma_pa,
1056 &(mem_info->mdl[i].dma));
1058 } else {
1059 for (i = 0; i < mem_info->num; i++) {
1060 mem_info->mdl[i].len = mem_info->len;
1061 mem_info->mdl[i].kva = kzalloc(mem_info->len,
1062 GFP_KERNEL);
1063 if (mem_info->mdl[i].kva == NULL)
1064 goto err_return;
1068 return 0;
1070 err_return:
1071 bnad_mem_free(bnad, mem_info);
1072 return -ENOMEM;
1075 /* Free IRQ for Mailbox */
1076 static void
1077 bnad_mbox_irq_free(struct bnad *bnad,
1078 struct bna_intr_info *intr_info)
1080 int irq;
1081 unsigned long flags;
1083 if (intr_info->idl == NULL)
1084 return;
1086 spin_lock_irqsave(&bnad->bna_lock, flags);
1087 bnad_disable_mbox_irq(bnad);
1088 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1090 irq = BNAD_GET_MBOX_IRQ(bnad);
1091 free_irq(irq, bnad);
1093 kfree(intr_info->idl);
1097 * Allocates IRQ for Mailbox, but keep it disabled
1098 * This will be enabled once we get the mbox enable callback
1099 * from bna
1101 static int
1102 bnad_mbox_irq_alloc(struct bnad *bnad,
1103 struct bna_intr_info *intr_info)
1105 int err = 0;
1106 unsigned long irq_flags, flags;
1107 u32 irq;
1108 irq_handler_t irq_handler;
1110 /* Mbox should use only 1 vector */
1112 intr_info->idl = kzalloc(sizeof(*(intr_info->idl)), GFP_KERNEL);
1113 if (!intr_info->idl)
1114 return -ENOMEM;
1116 spin_lock_irqsave(&bnad->bna_lock, flags);
1117 if (bnad->cfg_flags & BNAD_CF_MSIX) {
1118 irq_handler = (irq_handler_t)bnad_msix_mbox_handler;
1119 irq = bnad->msix_table[bnad->msix_num - 1].vector;
1120 irq_flags = 0;
1121 intr_info->intr_type = BNA_INTR_T_MSIX;
1122 intr_info->idl[0].vector = bnad->msix_num - 1;
1123 } else {
1124 irq_handler = (irq_handler_t)bnad_isr;
1125 irq = bnad->pcidev->irq;
1126 irq_flags = IRQF_SHARED;
1127 intr_info->intr_type = BNA_INTR_T_INTX;
1128 /* intr_info->idl.vector = 0 ? */
1130 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1131 sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME);
1134 * Set the Mbox IRQ disable flag, so that the IRQ handler
1135 * called from request_irq() for SHARED IRQs do not execute
1137 set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
1139 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
1141 err = request_irq(irq, irq_handler, irq_flags,
1142 bnad->mbox_irq_name, bnad);
1144 if (err) {
1145 kfree(intr_info->idl);
1146 intr_info->idl = NULL;
1149 return err;
1152 static void
1153 bnad_txrx_irq_free(struct bnad *bnad, struct bna_intr_info *intr_info)
1155 kfree(intr_info->idl);
1156 intr_info->idl = NULL;
1159 /* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */
1160 static int
1161 bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src,
1162 uint txrx_id, struct bna_intr_info *intr_info)
1164 int i, vector_start = 0;
1165 u32 cfg_flags;
1166 unsigned long flags;
1168 spin_lock_irqsave(&bnad->bna_lock, flags);
1169 cfg_flags = bnad->cfg_flags;
1170 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1172 if (cfg_flags & BNAD_CF_MSIX) {
1173 intr_info->intr_type = BNA_INTR_T_MSIX;
1174 intr_info->idl = kcalloc(intr_info->num,
1175 sizeof(struct bna_intr_descr),
1176 GFP_KERNEL);
1177 if (!intr_info->idl)
1178 return -ENOMEM;
1180 switch (src) {
1181 case BNAD_INTR_TX:
1182 vector_start = txrx_id;
1183 break;
1185 case BNAD_INTR_RX:
1186 vector_start = bnad->num_tx * bnad->num_txq_per_tx +
1187 txrx_id;
1188 break;
1190 default:
1191 BUG();
1194 for (i = 0; i < intr_info->num; i++)
1195 intr_info->idl[i].vector = vector_start + i;
1196 } else {
1197 intr_info->intr_type = BNA_INTR_T_INTX;
1198 intr_info->num = 1;
1199 intr_info->idl = kcalloc(intr_info->num,
1200 sizeof(struct bna_intr_descr),
1201 GFP_KERNEL);
1202 if (!intr_info->idl)
1203 return -ENOMEM;
1205 switch (src) {
1206 case BNAD_INTR_TX:
1207 intr_info->idl[0].vector = 0x1; /* Bit mask : Tx IB */
1208 break;
1210 case BNAD_INTR_RX:
1211 intr_info->idl[0].vector = 0x2; /* Bit mask : Rx IB */
1212 break;
1215 return 0;
1219 * NOTE: Should be called for MSIX only
1220 * Unregisters Tx MSIX vector(s) from the kernel
1222 static void
1223 bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info,
1224 int num_txqs)
1226 int i;
1227 int vector_num;
1229 for (i = 0; i < num_txqs; i++) {
1230 if (tx_info->tcb[i] == NULL)
1231 continue;
1233 vector_num = tx_info->tcb[i]->intr_vector;
1234 free_irq(bnad->msix_table[vector_num].vector, tx_info->tcb[i]);
1239 * NOTE: Should be called for MSIX only
1240 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1242 static int
1243 bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info,
1244 uint tx_id, int num_txqs)
1246 int i;
1247 int err;
1248 int vector_num;
1250 for (i = 0; i < num_txqs; i++) {
1251 vector_num = tx_info->tcb[i]->intr_vector;
1252 sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name,
1253 tx_id + tx_info->tcb[i]->id);
1254 err = request_irq(bnad->msix_table[vector_num].vector,
1255 (irq_handler_t)bnad_msix_tx, 0,
1256 tx_info->tcb[i]->name,
1257 tx_info->tcb[i]);
1258 if (err)
1259 goto err_return;
1262 return 0;
1264 err_return:
1265 if (i > 0)
1266 bnad_tx_msix_unregister(bnad, tx_info, (i - 1));
1267 return -1;
1271 * NOTE: Should be called for MSIX only
1272 * Unregisters Rx MSIX vector(s) from the kernel
1274 static void
1275 bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info,
1276 int num_rxps)
1278 int i;
1279 int vector_num;
1281 for (i = 0; i < num_rxps; i++) {
1282 if (rx_info->rx_ctrl[i].ccb == NULL)
1283 continue;
1285 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1286 free_irq(bnad->msix_table[vector_num].vector,
1287 rx_info->rx_ctrl[i].ccb);
1292 * NOTE: Should be called for MSIX only
1293 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1295 static int
1296 bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info,
1297 uint rx_id, int num_rxps)
1299 int i;
1300 int err;
1301 int vector_num;
1303 for (i = 0; i < num_rxps; i++) {
1304 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1305 sprintf(rx_info->rx_ctrl[i].ccb->name, "%s CQ %d",
1306 bnad->netdev->name,
1307 rx_id + rx_info->rx_ctrl[i].ccb->id);
1308 err = request_irq(bnad->msix_table[vector_num].vector,
1309 (irq_handler_t)bnad_msix_rx, 0,
1310 rx_info->rx_ctrl[i].ccb->name,
1311 rx_info->rx_ctrl[i].ccb);
1312 if (err)
1313 goto err_return;
1316 return 0;
1318 err_return:
1319 if (i > 0)
1320 bnad_rx_msix_unregister(bnad, rx_info, (i - 1));
1321 return -1;
1324 /* Free Tx object Resources */
1325 static void
1326 bnad_tx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1328 int i;
1330 for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1331 if (res_info[i].res_type == BNA_RES_T_MEM)
1332 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1333 else if (res_info[i].res_type == BNA_RES_T_INTR)
1334 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1338 /* Allocates memory and interrupt resources for Tx object */
1339 static int
1340 bnad_tx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1341 uint tx_id)
1343 int i, err = 0;
1345 for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1346 if (res_info[i].res_type == BNA_RES_T_MEM)
1347 err = bnad_mem_alloc(bnad,
1348 &res_info[i].res_u.mem_info);
1349 else if (res_info[i].res_type == BNA_RES_T_INTR)
1350 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_TX, tx_id,
1351 &res_info[i].res_u.intr_info);
1352 if (err)
1353 goto err_return;
1355 return 0;
1357 err_return:
1358 bnad_tx_res_free(bnad, res_info);
1359 return err;
1362 /* Free Rx object Resources */
1363 static void
1364 bnad_rx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1366 int i;
1368 for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1369 if (res_info[i].res_type == BNA_RES_T_MEM)
1370 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1371 else if (res_info[i].res_type == BNA_RES_T_INTR)
1372 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1376 /* Allocates memory and interrupt resources for Rx object */
1377 static int
1378 bnad_rx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1379 uint rx_id)
1381 int i, err = 0;
1383 /* All memory needs to be allocated before setup_ccbs */
1384 for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1385 if (res_info[i].res_type == BNA_RES_T_MEM)
1386 err = bnad_mem_alloc(bnad,
1387 &res_info[i].res_u.mem_info);
1388 else if (res_info[i].res_type == BNA_RES_T_INTR)
1389 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_RX, rx_id,
1390 &res_info[i].res_u.intr_info);
1391 if (err)
1392 goto err_return;
1394 return 0;
1396 err_return:
1397 bnad_rx_res_free(bnad, res_info);
1398 return err;
1401 /* Timer callbacks */
1402 /* a) IOC timer */
1403 static void
1404 bnad_ioc_timeout(unsigned long data)
1406 struct bnad *bnad = (struct bnad *)data;
1407 unsigned long flags;
1409 spin_lock_irqsave(&bnad->bna_lock, flags);
1410 bfa_nw_ioc_timeout((void *) &bnad->bna.device.ioc);
1411 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1414 static void
1415 bnad_ioc_hb_check(unsigned long data)
1417 struct bnad *bnad = (struct bnad *)data;
1418 unsigned long flags;
1420 spin_lock_irqsave(&bnad->bna_lock, flags);
1421 bfa_nw_ioc_hb_check((void *) &bnad->bna.device.ioc);
1422 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1425 static void
1426 bnad_iocpf_timeout(unsigned long data)
1428 struct bnad *bnad = (struct bnad *)data;
1429 unsigned long flags;
1431 spin_lock_irqsave(&bnad->bna_lock, flags);
1432 bfa_nw_iocpf_timeout((void *) &bnad->bna.device.ioc);
1433 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1436 static void
1437 bnad_iocpf_sem_timeout(unsigned long data)
1439 struct bnad *bnad = (struct bnad *)data;
1440 unsigned long flags;
1442 spin_lock_irqsave(&bnad->bna_lock, flags);
1443 bfa_nw_iocpf_sem_timeout((void *) &bnad->bna.device.ioc);
1444 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1448 * All timer routines use bnad->bna_lock to protect against
1449 * the following race, which may occur in case of no locking:
1450 * Time CPU m CPU n
1451 * 0 1 = test_bit
1452 * 1 clear_bit
1453 * 2 del_timer_sync
1454 * 3 mod_timer
1457 /* b) Dynamic Interrupt Moderation Timer */
1458 static void
1459 bnad_dim_timeout(unsigned long data)
1461 struct bnad *bnad = (struct bnad *)data;
1462 struct bnad_rx_info *rx_info;
1463 struct bnad_rx_ctrl *rx_ctrl;
1464 int i, j;
1465 unsigned long flags;
1467 if (!netif_carrier_ok(bnad->netdev))
1468 return;
1470 spin_lock_irqsave(&bnad->bna_lock, flags);
1471 for (i = 0; i < bnad->num_rx; i++) {
1472 rx_info = &bnad->rx_info[i];
1473 if (!rx_info->rx)
1474 continue;
1475 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
1476 rx_ctrl = &rx_info->rx_ctrl[j];
1477 if (!rx_ctrl->ccb)
1478 continue;
1479 bna_rx_dim_update(rx_ctrl->ccb);
1483 /* Check for BNAD_CF_DIM_ENABLED, does not eleminate a race */
1484 if (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags))
1485 mod_timer(&bnad->dim_timer,
1486 jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1487 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1490 /* c) Statistics Timer */
1491 static void
1492 bnad_stats_timeout(unsigned long data)
1494 struct bnad *bnad = (struct bnad *)data;
1495 unsigned long flags;
1497 if (!netif_running(bnad->netdev) ||
1498 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1499 return;
1501 spin_lock_irqsave(&bnad->bna_lock, flags);
1502 bna_stats_get(&bnad->bna);
1503 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1507 * Set up timer for DIM
1508 * Called with bnad->bna_lock held
1510 void
1511 bnad_dim_timer_start(struct bnad *bnad)
1513 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
1514 !test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
1515 setup_timer(&bnad->dim_timer, bnad_dim_timeout,
1516 (unsigned long)bnad);
1517 set_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
1518 mod_timer(&bnad->dim_timer,
1519 jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1524 * Set up timer for statistics
1525 * Called with mutex_lock(&bnad->conf_mutex) held
1527 static void
1528 bnad_stats_timer_start(struct bnad *bnad)
1530 unsigned long flags;
1532 spin_lock_irqsave(&bnad->bna_lock, flags);
1533 if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) {
1534 setup_timer(&bnad->stats_timer, bnad_stats_timeout,
1535 (unsigned long)bnad);
1536 mod_timer(&bnad->stats_timer,
1537 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1539 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1543 * Stops the stats timer
1544 * Called with mutex_lock(&bnad->conf_mutex) held
1546 static void
1547 bnad_stats_timer_stop(struct bnad *bnad)
1549 int to_del = 0;
1550 unsigned long flags;
1552 spin_lock_irqsave(&bnad->bna_lock, flags);
1553 if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1554 to_del = 1;
1555 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1556 if (to_del)
1557 del_timer_sync(&bnad->stats_timer);
1560 /* Utilities */
1562 static void
1563 bnad_netdev_mc_list_get(struct net_device *netdev, u8 *mc_list)
1565 int i = 1; /* Index 0 has broadcast address */
1566 struct netdev_hw_addr *mc_addr;
1568 netdev_for_each_mc_addr(mc_addr, netdev) {
1569 memcpy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0],
1570 ETH_ALEN);
1571 i++;
1575 static int
1576 bnad_napi_poll_rx(struct napi_struct *napi, int budget)
1578 struct bnad_rx_ctrl *rx_ctrl =
1579 container_of(napi, struct bnad_rx_ctrl, napi);
1580 struct bna_ccb *ccb;
1581 struct bnad *bnad;
1582 int rcvd = 0;
1584 ccb = rx_ctrl->ccb;
1586 bnad = ccb->bnad;
1588 if (!netif_carrier_ok(bnad->netdev))
1589 goto poll_exit;
1591 rcvd = bnad_poll_cq(bnad, ccb, budget);
1592 if (rcvd == budget)
1593 return rcvd;
1595 poll_exit:
1596 napi_complete((napi));
1598 BNAD_UPDATE_CTR(bnad, netif_rx_complete);
1600 bnad_enable_rx_irq(bnad, ccb);
1601 return rcvd;
1604 static void
1605 bnad_napi_enable(struct bnad *bnad, u32 rx_id)
1607 struct bnad_rx_ctrl *rx_ctrl;
1608 int i;
1610 /* Initialize & enable NAPI */
1611 for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1612 rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
1614 netif_napi_add(bnad->netdev, &rx_ctrl->napi,
1615 bnad_napi_poll_rx, 64);
1617 napi_enable(&rx_ctrl->napi);
1621 static void
1622 bnad_napi_disable(struct bnad *bnad, u32 rx_id)
1624 int i;
1626 /* First disable and then clean up */
1627 for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1628 napi_disable(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1629 netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1633 /* Should be held with conf_lock held */
1634 void
1635 bnad_cleanup_tx(struct bnad *bnad, uint tx_id)
1637 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1638 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1639 unsigned long flags;
1641 if (!tx_info->tx)
1642 return;
1644 init_completion(&bnad->bnad_completions.tx_comp);
1645 spin_lock_irqsave(&bnad->bna_lock, flags);
1646 bna_tx_disable(tx_info->tx, BNA_HARD_CLEANUP, bnad_cb_tx_disabled);
1647 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1648 wait_for_completion(&bnad->bnad_completions.tx_comp);
1650 if (tx_info->tcb[0]->intr_type == BNA_INTR_T_MSIX)
1651 bnad_tx_msix_unregister(bnad, tx_info,
1652 bnad->num_txq_per_tx);
1654 spin_lock_irqsave(&bnad->bna_lock, flags);
1655 bna_tx_destroy(tx_info->tx);
1656 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1658 tx_info->tx = NULL;
1660 if (0 == tx_id)
1661 tasklet_kill(&bnad->tx_free_tasklet);
1663 bnad_tx_res_free(bnad, res_info);
1666 /* Should be held with conf_lock held */
1668 bnad_setup_tx(struct bnad *bnad, uint tx_id)
1670 int err;
1671 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1672 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1673 struct bna_intr_info *intr_info =
1674 &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
1675 struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
1676 struct bna_tx_event_cbfn tx_cbfn;
1677 struct bna_tx *tx;
1678 unsigned long flags;
1680 /* Initialize the Tx object configuration */
1681 tx_config->num_txq = bnad->num_txq_per_tx;
1682 tx_config->txq_depth = bnad->txq_depth;
1683 tx_config->tx_type = BNA_TX_T_REGULAR;
1685 /* Initialize the tx event handlers */
1686 tx_cbfn.tcb_setup_cbfn = bnad_cb_tcb_setup;
1687 tx_cbfn.tcb_destroy_cbfn = bnad_cb_tcb_destroy;
1688 tx_cbfn.tx_stall_cbfn = bnad_cb_tx_stall;
1689 tx_cbfn.tx_resume_cbfn = bnad_cb_tx_resume;
1690 tx_cbfn.tx_cleanup_cbfn = bnad_cb_tx_cleanup;
1692 /* Get BNA's resource requirement for one tx object */
1693 spin_lock_irqsave(&bnad->bna_lock, flags);
1694 bna_tx_res_req(bnad->num_txq_per_tx,
1695 bnad->txq_depth, res_info);
1696 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1698 /* Fill Unmap Q memory requirements */
1699 BNAD_FILL_UNMAPQ_MEM_REQ(
1700 &res_info[BNA_TX_RES_MEM_T_UNMAPQ],
1701 bnad->num_txq_per_tx,
1702 BNAD_TX_UNMAPQ_DEPTH);
1704 /* Allocate resources */
1705 err = bnad_tx_res_alloc(bnad, res_info, tx_id);
1706 if (err)
1707 return err;
1709 /* Ask BNA to create one Tx object, supplying required resources */
1710 spin_lock_irqsave(&bnad->bna_lock, flags);
1711 tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info,
1712 tx_info);
1713 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1714 if (!tx)
1715 goto err_return;
1716 tx_info->tx = tx;
1718 /* Register ISR for the Tx object */
1719 if (intr_info->intr_type == BNA_INTR_T_MSIX) {
1720 err = bnad_tx_msix_register(bnad, tx_info,
1721 tx_id, bnad->num_txq_per_tx);
1722 if (err)
1723 goto err_return;
1726 spin_lock_irqsave(&bnad->bna_lock, flags);
1727 bna_tx_enable(tx);
1728 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1730 return 0;
1732 err_return:
1733 bnad_tx_res_free(bnad, res_info);
1734 return err;
1737 /* Setup the rx config for bna_rx_create */
1738 /* bnad decides the configuration */
1739 static void
1740 bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
1742 rx_config->rx_type = BNA_RX_T_REGULAR;
1743 rx_config->num_paths = bnad->num_rxp_per_rx;
1745 if (bnad->num_rxp_per_rx > 1) {
1746 rx_config->rss_status = BNA_STATUS_T_ENABLED;
1747 rx_config->rss_config.hash_type =
1748 (BFI_RSS_T_V4_TCP |
1749 BFI_RSS_T_V6_TCP |
1750 BFI_RSS_T_V4_IP |
1751 BFI_RSS_T_V6_IP);
1752 rx_config->rss_config.hash_mask =
1753 bnad->num_rxp_per_rx - 1;
1754 get_random_bytes(rx_config->rss_config.toeplitz_hash_key,
1755 sizeof(rx_config->rss_config.toeplitz_hash_key));
1756 } else {
1757 rx_config->rss_status = BNA_STATUS_T_DISABLED;
1758 memset(&rx_config->rss_config, 0,
1759 sizeof(rx_config->rss_config));
1761 rx_config->rxp_type = BNA_RXP_SLR;
1762 rx_config->q_depth = bnad->rxq_depth;
1764 rx_config->small_buff_size = BFI_SMALL_RXBUF_SIZE;
1766 rx_config->vlan_strip_status = BNA_STATUS_T_ENABLED;
1769 /* Called with mutex_lock(&bnad->conf_mutex) held */
1770 void
1771 bnad_cleanup_rx(struct bnad *bnad, uint rx_id)
1773 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
1774 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
1775 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
1776 unsigned long flags;
1777 int dim_timer_del = 0;
1779 if (!rx_info->rx)
1780 return;
1782 if (0 == rx_id) {
1783 spin_lock_irqsave(&bnad->bna_lock, flags);
1784 dim_timer_del = bnad_dim_timer_running(bnad);
1785 if (dim_timer_del)
1786 clear_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
1787 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1788 if (dim_timer_del)
1789 del_timer_sync(&bnad->dim_timer);
1792 bnad_napi_disable(bnad, rx_id);
1794 init_completion(&bnad->bnad_completions.rx_comp);
1795 spin_lock_irqsave(&bnad->bna_lock, flags);
1796 bna_rx_disable(rx_info->rx, BNA_HARD_CLEANUP, bnad_cb_rx_disabled);
1797 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1798 wait_for_completion(&bnad->bnad_completions.rx_comp);
1800 if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX)
1801 bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths);
1803 spin_lock_irqsave(&bnad->bna_lock, flags);
1804 bna_rx_destroy(rx_info->rx);
1805 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1807 rx_info->rx = NULL;
1809 bnad_rx_res_free(bnad, res_info);
1812 /* Called with mutex_lock(&bnad->conf_mutex) held */
1814 bnad_setup_rx(struct bnad *bnad, uint rx_id)
1816 int err;
1817 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
1818 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
1819 struct bna_intr_info *intr_info =
1820 &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
1821 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
1822 struct bna_rx_event_cbfn rx_cbfn;
1823 struct bna_rx *rx;
1824 unsigned long flags;
1826 /* Initialize the Rx object configuration */
1827 bnad_init_rx_config(bnad, rx_config);
1829 /* Initialize the Rx event handlers */
1830 rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup;
1831 rx_cbfn.rcb_destroy_cbfn = bnad_cb_rcb_destroy;
1832 rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup;
1833 rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy;
1834 rx_cbfn.rx_cleanup_cbfn = bnad_cb_rx_cleanup;
1835 rx_cbfn.rx_post_cbfn = bnad_cb_rx_post;
1837 /* Get BNA's resource requirement for one Rx object */
1838 spin_lock_irqsave(&bnad->bna_lock, flags);
1839 bna_rx_res_req(rx_config, res_info);
1840 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1842 /* Fill Unmap Q memory requirements */
1843 BNAD_FILL_UNMAPQ_MEM_REQ(
1844 &res_info[BNA_RX_RES_MEM_T_UNMAPQ],
1845 rx_config->num_paths +
1846 ((rx_config->rxp_type == BNA_RXP_SINGLE) ? 0 :
1847 rx_config->num_paths), BNAD_RX_UNMAPQ_DEPTH);
1849 /* Allocate resource */
1850 err = bnad_rx_res_alloc(bnad, res_info, rx_id);
1851 if (err)
1852 return err;
1854 /* Ask BNA to create one Rx object, supplying required resources */
1855 spin_lock_irqsave(&bnad->bna_lock, flags);
1856 rx = bna_rx_create(&bnad->bna, bnad, rx_config, &rx_cbfn, res_info,
1857 rx_info);
1858 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1859 if (!rx)
1860 goto err_return;
1861 rx_info->rx = rx;
1863 /* Register ISR for the Rx object */
1864 if (intr_info->intr_type == BNA_INTR_T_MSIX) {
1865 err = bnad_rx_msix_register(bnad, rx_info, rx_id,
1866 rx_config->num_paths);
1867 if (err)
1868 goto err_return;
1871 /* Enable NAPI */
1872 bnad_napi_enable(bnad, rx_id);
1874 spin_lock_irqsave(&bnad->bna_lock, flags);
1875 if (0 == rx_id) {
1876 /* Set up Dynamic Interrupt Moderation Vector */
1877 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED)
1878 bna_rx_dim_reconfig(&bnad->bna, bna_napi_dim_vector);
1880 /* Enable VLAN filtering only on the default Rx */
1881 bna_rx_vlanfilter_enable(rx);
1883 /* Start the DIM timer */
1884 bnad_dim_timer_start(bnad);
1887 bna_rx_enable(rx);
1888 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1890 return 0;
1892 err_return:
1893 bnad_cleanup_rx(bnad, rx_id);
1894 return err;
1897 /* Called with conf_lock & bnad->bna_lock held */
1898 void
1899 bnad_tx_coalescing_timeo_set(struct bnad *bnad)
1901 struct bnad_tx_info *tx_info;
1903 tx_info = &bnad->tx_info[0];
1904 if (!tx_info->tx)
1905 return;
1907 bna_tx_coalescing_timeo_set(tx_info->tx, bnad->tx_coalescing_timeo);
1910 /* Called with conf_lock & bnad->bna_lock held */
1911 void
1912 bnad_rx_coalescing_timeo_set(struct bnad *bnad)
1914 struct bnad_rx_info *rx_info;
1915 int i;
1917 for (i = 0; i < bnad->num_rx; i++) {
1918 rx_info = &bnad->rx_info[i];
1919 if (!rx_info->rx)
1920 continue;
1921 bna_rx_coalescing_timeo_set(rx_info->rx,
1922 bnad->rx_coalescing_timeo);
1927 * Called with bnad->bna_lock held
1929 static int
1930 bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr)
1932 int ret;
1934 if (!is_valid_ether_addr(mac_addr))
1935 return -EADDRNOTAVAIL;
1937 /* If datapath is down, pretend everything went through */
1938 if (!bnad->rx_info[0].rx)
1939 return 0;
1941 ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr, NULL);
1942 if (ret != BNA_CB_SUCCESS)
1943 return -EADDRNOTAVAIL;
1945 return 0;
1948 /* Should be called with conf_lock held */
1949 static int
1950 bnad_enable_default_bcast(struct bnad *bnad)
1952 struct bnad_rx_info *rx_info = &bnad->rx_info[0];
1953 int ret;
1954 unsigned long flags;
1956 init_completion(&bnad->bnad_completions.mcast_comp);
1958 spin_lock_irqsave(&bnad->bna_lock, flags);
1959 ret = bna_rx_mcast_add(rx_info->rx, (u8 *)bnad_bcast_addr,
1960 bnad_cb_rx_mcast_add);
1961 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1963 if (ret == BNA_CB_SUCCESS)
1964 wait_for_completion(&bnad->bnad_completions.mcast_comp);
1965 else
1966 return -ENODEV;
1968 if (bnad->bnad_completions.mcast_comp_status != BNA_CB_SUCCESS)
1969 return -ENODEV;
1971 return 0;
1974 /* Called with bnad_conf_lock() held */
1975 static void
1976 bnad_restore_vlans(struct bnad *bnad, u32 rx_id)
1978 u16 vid;
1979 unsigned long flags;
1981 BUG_ON(!(VLAN_N_VID == (BFI_MAX_VLAN + 1)));
1983 for_each_set_bit(vid, bnad->active_vlans, VLAN_N_VID) {
1984 spin_lock_irqsave(&bnad->bna_lock, flags);
1985 bna_rx_vlan_add(bnad->rx_info[rx_id].rx, vid);
1986 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1990 /* Statistics utilities */
1991 void
1992 bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
1994 int i, j;
1996 for (i = 0; i < bnad->num_rx; i++) {
1997 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
1998 if (bnad->rx_info[i].rx_ctrl[j].ccb) {
1999 stats->rx_packets += bnad->rx_info[i].
2000 rx_ctrl[j].ccb->rcb[0]->rxq->rx_packets;
2001 stats->rx_bytes += bnad->rx_info[i].
2002 rx_ctrl[j].ccb->rcb[0]->rxq->rx_bytes;
2003 if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
2004 bnad->rx_info[i].rx_ctrl[j].ccb->
2005 rcb[1]->rxq) {
2006 stats->rx_packets +=
2007 bnad->rx_info[i].rx_ctrl[j].
2008 ccb->rcb[1]->rxq->rx_packets;
2009 stats->rx_bytes +=
2010 bnad->rx_info[i].rx_ctrl[j].
2011 ccb->rcb[1]->rxq->rx_bytes;
2016 for (i = 0; i < bnad->num_tx; i++) {
2017 for (j = 0; j < bnad->num_txq_per_tx; j++) {
2018 if (bnad->tx_info[i].tcb[j]) {
2019 stats->tx_packets +=
2020 bnad->tx_info[i].tcb[j]->txq->tx_packets;
2021 stats->tx_bytes +=
2022 bnad->tx_info[i].tcb[j]->txq->tx_bytes;
2029 * Must be called with the bna_lock held.
2031 void
2032 bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
2034 struct bfi_ll_stats_mac *mac_stats;
2035 u64 bmap;
2036 int i;
2038 mac_stats = &bnad->stats.bna_stats->hw_stats->mac_stats;
2039 stats->rx_errors =
2040 mac_stats->rx_fcs_error + mac_stats->rx_alignment_error +
2041 mac_stats->rx_frame_length_error + mac_stats->rx_code_error +
2042 mac_stats->rx_undersize;
2043 stats->tx_errors = mac_stats->tx_fcs_error +
2044 mac_stats->tx_undersize;
2045 stats->rx_dropped = mac_stats->rx_drop;
2046 stats->tx_dropped = mac_stats->tx_drop;
2047 stats->multicast = mac_stats->rx_multicast;
2048 stats->collisions = mac_stats->tx_total_collision;
2050 stats->rx_length_errors = mac_stats->rx_frame_length_error;
2052 /* receive ring buffer overflow ?? */
2054 stats->rx_crc_errors = mac_stats->rx_fcs_error;
2055 stats->rx_frame_errors = mac_stats->rx_alignment_error;
2056 /* recv'r fifo overrun */
2057 bmap = (u64)bnad->stats.bna_stats->rxf_bmap[0] |
2058 ((u64)bnad->stats.bna_stats->rxf_bmap[1] << 32);
2059 for (i = 0; bmap && (i < BFI_LL_RXF_ID_MAX); i++) {
2060 if (bmap & 1) {
2061 stats->rx_fifo_errors +=
2062 bnad->stats.bna_stats->
2063 hw_stats->rxf_stats[i].frame_drops;
2064 break;
2066 bmap >>= 1;
2070 static void
2071 bnad_mbox_irq_sync(struct bnad *bnad)
2073 u32 irq;
2074 unsigned long flags;
2076 spin_lock_irqsave(&bnad->bna_lock, flags);
2077 if (bnad->cfg_flags & BNAD_CF_MSIX)
2078 irq = bnad->msix_table[bnad->msix_num - 1].vector;
2079 else
2080 irq = bnad->pcidev->irq;
2081 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2083 synchronize_irq(irq);
2086 /* Utility used by bnad_start_xmit, for doing TSO */
2087 static int
2088 bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
2090 int err;
2092 /* SKB_GSO_TCPV4 and SKB_GSO_TCPV6 is defined since 2.6.18. */
2093 BUG_ON(!(skb_shinfo(skb)->gso_type == SKB_GSO_TCPV4 ||
2094 skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6));
2095 if (skb_header_cloned(skb)) {
2096 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2097 if (err) {
2098 BNAD_UPDATE_CTR(bnad, tso_err);
2099 return err;
2104 * For TSO, the TCP checksum field is seeded with pseudo-header sum
2105 * excluding the length field.
2107 if (skb->protocol == htons(ETH_P_IP)) {
2108 struct iphdr *iph = ip_hdr(skb);
2110 /* Do we really need these? */
2111 iph->tot_len = 0;
2112 iph->check = 0;
2114 tcp_hdr(skb)->check =
2115 ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
2116 IPPROTO_TCP, 0);
2117 BNAD_UPDATE_CTR(bnad, tso4);
2118 } else {
2119 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
2121 BUG_ON(!(skb->protocol == htons(ETH_P_IPV6)));
2122 ipv6h->payload_len = 0;
2123 tcp_hdr(skb)->check =
2124 ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 0,
2125 IPPROTO_TCP, 0);
2126 BNAD_UPDATE_CTR(bnad, tso6);
2129 return 0;
2133 * Initialize Q numbers depending on Rx Paths
2134 * Called with bnad->bna_lock held, because of cfg_flags
2135 * access.
2137 static void
2138 bnad_q_num_init(struct bnad *bnad)
2140 int rxps;
2142 rxps = min((uint)num_online_cpus(),
2143 (uint)(BNAD_MAX_RXS * BNAD_MAX_RXPS_PER_RX));
2145 if (!(bnad->cfg_flags & BNAD_CF_MSIX))
2146 rxps = 1; /* INTx */
2148 bnad->num_rx = 1;
2149 bnad->num_tx = 1;
2150 bnad->num_rxp_per_rx = rxps;
2151 bnad->num_txq_per_tx = BNAD_TXQ_NUM;
2155 * Adjusts the Q numbers, given a number of msix vectors
2156 * Give preference to RSS as opposed to Tx priority Queues,
2157 * in such a case, just use 1 Tx Q
2158 * Called with bnad->bna_lock held b'cos of cfg_flags access
2160 static void
2161 bnad_q_num_adjust(struct bnad *bnad, int msix_vectors)
2163 bnad->num_txq_per_tx = 1;
2164 if ((msix_vectors >= (bnad->num_tx * bnad->num_txq_per_tx) +
2165 bnad_rxqs_per_cq + BNAD_MAILBOX_MSIX_VECTORS) &&
2166 (bnad->cfg_flags & BNAD_CF_MSIX)) {
2167 bnad->num_rxp_per_rx = msix_vectors -
2168 (bnad->num_tx * bnad->num_txq_per_tx) -
2169 BNAD_MAILBOX_MSIX_VECTORS;
2170 } else
2171 bnad->num_rxp_per_rx = 1;
2174 /* Enable / disable device */
2175 static void
2176 bnad_device_disable(struct bnad *bnad)
2178 unsigned long flags;
2180 init_completion(&bnad->bnad_completions.ioc_comp);
2182 spin_lock_irqsave(&bnad->bna_lock, flags);
2183 bna_device_disable(&bnad->bna.device, BNA_HARD_CLEANUP);
2184 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2186 wait_for_completion(&bnad->bnad_completions.ioc_comp);
2189 static int
2190 bnad_device_enable(struct bnad *bnad)
2192 int err = 0;
2193 unsigned long flags;
2195 init_completion(&bnad->bnad_completions.ioc_comp);
2197 spin_lock_irqsave(&bnad->bna_lock, flags);
2198 bna_device_enable(&bnad->bna.device);
2199 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2201 wait_for_completion(&bnad->bnad_completions.ioc_comp);
2203 if (bnad->bnad_completions.ioc_comp_status)
2204 err = bnad->bnad_completions.ioc_comp_status;
2206 return err;
2209 /* Free BNA resources */
2210 static void
2211 bnad_res_free(struct bnad *bnad)
2213 int i;
2214 struct bna_res_info *res_info = &bnad->res_info[0];
2216 for (i = 0; i < BNA_RES_T_MAX; i++) {
2217 if (res_info[i].res_type == BNA_RES_T_MEM)
2218 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
2219 else
2220 bnad_mbox_irq_free(bnad, &res_info[i].res_u.intr_info);
2224 /* Allocates memory and interrupt resources for BNA */
2225 static int
2226 bnad_res_alloc(struct bnad *bnad)
2228 int i, err;
2229 struct bna_res_info *res_info = &bnad->res_info[0];
2231 for (i = 0; i < BNA_RES_T_MAX; i++) {
2232 if (res_info[i].res_type == BNA_RES_T_MEM)
2233 err = bnad_mem_alloc(bnad, &res_info[i].res_u.mem_info);
2234 else
2235 err = bnad_mbox_irq_alloc(bnad,
2236 &res_info[i].res_u.intr_info);
2237 if (err)
2238 goto err_return;
2240 return 0;
2242 err_return:
2243 bnad_res_free(bnad);
2244 return err;
2247 /* Interrupt enable / disable */
2248 static void
2249 bnad_enable_msix(struct bnad *bnad)
2251 int i, ret;
2252 unsigned long flags;
2254 spin_lock_irqsave(&bnad->bna_lock, flags);
2255 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
2256 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2257 return;
2259 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2261 if (bnad->msix_table)
2262 return;
2264 bnad->msix_table =
2265 kcalloc(bnad->msix_num, sizeof(struct msix_entry), GFP_KERNEL);
2267 if (!bnad->msix_table)
2268 goto intx_mode;
2270 for (i = 0; i < bnad->msix_num; i++)
2271 bnad->msix_table[i].entry = i;
2273 ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, bnad->msix_num);
2274 if (ret > 0) {
2275 /* Not enough MSI-X vectors. */
2277 spin_lock_irqsave(&bnad->bna_lock, flags);
2278 /* ret = #of vectors that we got */
2279 bnad_q_num_adjust(bnad, ret);
2280 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2282 bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx)
2283 + (bnad->num_rx
2284 * bnad->num_rxp_per_rx) +
2285 BNAD_MAILBOX_MSIX_VECTORS;
2287 /* Try once more with adjusted numbers */
2288 /* If this fails, fall back to INTx */
2289 ret = pci_enable_msix(bnad->pcidev, bnad->msix_table,
2290 bnad->msix_num);
2291 if (ret)
2292 goto intx_mode;
2294 } else if (ret < 0)
2295 goto intx_mode;
2296 return;
2298 intx_mode:
2300 kfree(bnad->msix_table);
2301 bnad->msix_table = NULL;
2302 bnad->msix_num = 0;
2303 spin_lock_irqsave(&bnad->bna_lock, flags);
2304 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2305 bnad_q_num_init(bnad);
2306 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2309 static void
2310 bnad_disable_msix(struct bnad *bnad)
2312 u32 cfg_flags;
2313 unsigned long flags;
2315 spin_lock_irqsave(&bnad->bna_lock, flags);
2316 cfg_flags = bnad->cfg_flags;
2317 if (bnad->cfg_flags & BNAD_CF_MSIX)
2318 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2319 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2321 if (cfg_flags & BNAD_CF_MSIX) {
2322 pci_disable_msix(bnad->pcidev);
2323 kfree(bnad->msix_table);
2324 bnad->msix_table = NULL;
2328 /* Netdev entry points */
2329 static int
2330 bnad_open(struct net_device *netdev)
2332 int err;
2333 struct bnad *bnad = netdev_priv(netdev);
2334 struct bna_pause_config pause_config;
2335 int mtu;
2336 unsigned long flags;
2338 mutex_lock(&bnad->conf_mutex);
2340 /* Tx */
2341 err = bnad_setup_tx(bnad, 0);
2342 if (err)
2343 goto err_return;
2345 /* Rx */
2346 err = bnad_setup_rx(bnad, 0);
2347 if (err)
2348 goto cleanup_tx;
2350 /* Port */
2351 pause_config.tx_pause = 0;
2352 pause_config.rx_pause = 0;
2354 mtu = ETH_HLEN + bnad->netdev->mtu + ETH_FCS_LEN;
2356 spin_lock_irqsave(&bnad->bna_lock, flags);
2357 bna_port_mtu_set(&bnad->bna.port, mtu, NULL);
2358 bna_port_pause_config(&bnad->bna.port, &pause_config, NULL);
2359 bna_port_enable(&bnad->bna.port);
2360 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2362 /* Enable broadcast */
2363 bnad_enable_default_bcast(bnad);
2365 /* Restore VLANs, if any */
2366 bnad_restore_vlans(bnad, 0);
2368 /* Set the UCAST address */
2369 spin_lock_irqsave(&bnad->bna_lock, flags);
2370 bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
2371 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2373 /* Start the stats timer */
2374 bnad_stats_timer_start(bnad);
2376 mutex_unlock(&bnad->conf_mutex);
2378 return 0;
2380 cleanup_tx:
2381 bnad_cleanup_tx(bnad, 0);
2383 err_return:
2384 mutex_unlock(&bnad->conf_mutex);
2385 return err;
2388 static int
2389 bnad_stop(struct net_device *netdev)
2391 struct bnad *bnad = netdev_priv(netdev);
2392 unsigned long flags;
2394 mutex_lock(&bnad->conf_mutex);
2396 /* Stop the stats timer */
2397 bnad_stats_timer_stop(bnad);
2399 init_completion(&bnad->bnad_completions.port_comp);
2401 spin_lock_irqsave(&bnad->bna_lock, flags);
2402 bna_port_disable(&bnad->bna.port, BNA_HARD_CLEANUP,
2403 bnad_cb_port_disabled);
2404 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2406 wait_for_completion(&bnad->bnad_completions.port_comp);
2408 bnad_cleanup_tx(bnad, 0);
2409 bnad_cleanup_rx(bnad, 0);
2411 /* Synchronize mailbox IRQ */
2412 bnad_mbox_irq_sync(bnad);
2414 mutex_unlock(&bnad->conf_mutex);
2416 return 0;
2419 /* TX */
2421 * bnad_start_xmit : Netdev entry point for Transmit
2422 * Called under lock held by net_device
2424 static netdev_tx_t
2425 bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2427 struct bnad *bnad = netdev_priv(netdev);
2429 u16 txq_prod, vlan_tag = 0;
2430 u32 unmap_prod, wis, wis_used, wi_range;
2431 u32 vectors, vect_id, i, acked;
2432 u32 tx_id;
2433 int err;
2435 struct bnad_tx_info *tx_info;
2436 struct bna_tcb *tcb;
2437 struct bnad_unmap_q *unmap_q;
2438 dma_addr_t dma_addr;
2439 struct bna_txq_entry *txqent;
2440 bna_txq_wi_ctrl_flag_t flags;
2442 if (unlikely
2443 (skb->len <= ETH_HLEN || skb->len > BFI_TX_MAX_DATA_PER_PKT)) {
2444 dev_kfree_skb(skb);
2445 return NETDEV_TX_OK;
2448 tx_id = 0;
2450 tx_info = &bnad->tx_info[tx_id];
2451 tcb = tx_info->tcb[tx_id];
2452 unmap_q = tcb->unmap_q;
2455 * Takes care of the Tx that is scheduled between clearing the flag
2456 * and the netif_stop_queue() call.
2458 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
2459 dev_kfree_skb(skb);
2460 return NETDEV_TX_OK;
2463 vectors = 1 + skb_shinfo(skb)->nr_frags;
2464 if (vectors > BFI_TX_MAX_VECTORS_PER_PKT) {
2465 dev_kfree_skb(skb);
2466 return NETDEV_TX_OK;
2468 wis = BNA_TXQ_WI_NEEDED(vectors); /* 4 vectors per work item */
2469 acked = 0;
2470 if (unlikely
2471 (wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) ||
2472 vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
2473 if ((u16) (*tcb->hw_consumer_index) !=
2474 tcb->consumer_index &&
2475 !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
2476 acked = bnad_free_txbufs(bnad, tcb);
2477 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
2478 bna_ib_ack(tcb->i_dbell, acked);
2479 smp_mb__before_clear_bit();
2480 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
2481 } else {
2482 netif_stop_queue(netdev);
2483 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2486 smp_mb();
2488 * Check again to deal with race condition between
2489 * netif_stop_queue here, and netif_wake_queue in
2490 * interrupt handler which is not inside netif tx lock.
2492 if (likely
2493 (wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) ||
2494 vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
2495 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2496 return NETDEV_TX_BUSY;
2497 } else {
2498 netif_wake_queue(netdev);
2499 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
2503 unmap_prod = unmap_q->producer_index;
2504 wis_used = 1;
2505 vect_id = 0;
2506 flags = 0;
2508 txq_prod = tcb->producer_index;
2509 BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt, txqent, wi_range);
2510 BUG_ON(!(wi_range <= tcb->q_depth));
2511 txqent->hdr.wi.reserved = 0;
2512 txqent->hdr.wi.num_vectors = vectors;
2513 txqent->hdr.wi.opcode =
2514 htons((skb_is_gso(skb) ? BNA_TXQ_WI_SEND_LSO :
2515 BNA_TXQ_WI_SEND));
2517 if (vlan_tx_tag_present(skb)) {
2518 vlan_tag = (u16) vlan_tx_tag_get(skb);
2519 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2521 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) {
2522 vlan_tag =
2523 (tcb->priority & 0x7) << 13 | (vlan_tag & 0x1fff);
2524 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2527 txqent->hdr.wi.vlan_tag = htons(vlan_tag);
2529 if (skb_is_gso(skb)) {
2530 err = bnad_tso_prepare(bnad, skb);
2531 if (err) {
2532 dev_kfree_skb(skb);
2533 return NETDEV_TX_OK;
2535 txqent->hdr.wi.lso_mss = htons(skb_is_gso(skb));
2536 flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM);
2537 txqent->hdr.wi.l4_hdr_size_n_offset =
2538 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2539 (tcp_hdrlen(skb) >> 2,
2540 skb_transport_offset(skb)));
2541 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
2542 u8 proto = 0;
2544 txqent->hdr.wi.lso_mss = 0;
2546 if (skb->protocol == htons(ETH_P_IP))
2547 proto = ip_hdr(skb)->protocol;
2548 else if (skb->protocol == htons(ETH_P_IPV6)) {
2549 /* nexthdr may not be TCP immediately. */
2550 proto = ipv6_hdr(skb)->nexthdr;
2552 if (proto == IPPROTO_TCP) {
2553 flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
2554 txqent->hdr.wi.l4_hdr_size_n_offset =
2555 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2556 (0, skb_transport_offset(skb)));
2558 BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
2560 BUG_ON(!(skb_headlen(skb) >=
2561 skb_transport_offset(skb) + tcp_hdrlen(skb)));
2563 } else if (proto == IPPROTO_UDP) {
2564 flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
2565 txqent->hdr.wi.l4_hdr_size_n_offset =
2566 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2567 (0, skb_transport_offset(skb)));
2569 BNAD_UPDATE_CTR(bnad, udpcsum_offload);
2571 BUG_ON(!(skb_headlen(skb) >=
2572 skb_transport_offset(skb) +
2573 sizeof(struct udphdr)));
2574 } else {
2575 err = skb_checksum_help(skb);
2576 BNAD_UPDATE_CTR(bnad, csum_help);
2577 if (err) {
2578 dev_kfree_skb(skb);
2579 BNAD_UPDATE_CTR(bnad, csum_help_err);
2580 return NETDEV_TX_OK;
2583 } else {
2584 txqent->hdr.wi.lso_mss = 0;
2585 txqent->hdr.wi.l4_hdr_size_n_offset = 0;
2588 txqent->hdr.wi.flags = htons(flags);
2590 txqent->hdr.wi.frame_length = htonl(skb->len);
2592 unmap_q->unmap_array[unmap_prod].skb = skb;
2593 BUG_ON(!(skb_headlen(skb) <= BFI_TX_MAX_DATA_PER_VECTOR));
2594 txqent->vector[vect_id].length = htons(skb_headlen(skb));
2595 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
2596 skb_headlen(skb), DMA_TO_DEVICE);
2597 dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
2598 dma_addr);
2600 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
2601 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
2603 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2604 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
2605 u32 size = frag->size;
2607 if (++vect_id == BFI_TX_MAX_VECTORS_PER_WI) {
2608 vect_id = 0;
2609 if (--wi_range)
2610 txqent++;
2611 else {
2612 BNA_QE_INDX_ADD(txq_prod, wis_used,
2613 tcb->q_depth);
2614 wis_used = 0;
2615 BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt,
2616 txqent, wi_range);
2617 BUG_ON(!(wi_range <= tcb->q_depth));
2619 wis_used++;
2620 txqent->hdr.wi_ext.opcode = htons(BNA_TXQ_WI_EXTENSION);
2623 BUG_ON(!(size <= BFI_TX_MAX_DATA_PER_VECTOR));
2624 txqent->vector[vect_id].length = htons(size);
2625 dma_addr = dma_map_page(&bnad->pcidev->dev, frag->page,
2626 frag->page_offset, size, DMA_TO_DEVICE);
2627 dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
2628 dma_addr);
2629 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
2630 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
2633 unmap_q->producer_index = unmap_prod;
2634 BNA_QE_INDX_ADD(txq_prod, wis_used, tcb->q_depth);
2635 tcb->producer_index = txq_prod;
2637 smp_mb();
2639 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
2640 return NETDEV_TX_OK;
2642 bna_txq_prod_indx_doorbell(tcb);
2644 if ((u16) (*tcb->hw_consumer_index) != tcb->consumer_index)
2645 tasklet_schedule(&bnad->tx_free_tasklet);
2647 return NETDEV_TX_OK;
2651 * Used spin_lock to synchronize reading of stats structures, which
2652 * is written by BNA under the same lock.
2654 static struct rtnl_link_stats64 *
2655 bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
2657 struct bnad *bnad = netdev_priv(netdev);
2658 unsigned long flags;
2660 spin_lock_irqsave(&bnad->bna_lock, flags);
2662 bnad_netdev_qstats_fill(bnad, stats);
2663 bnad_netdev_hwstats_fill(bnad, stats);
2665 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2667 return stats;
2670 static void
2671 bnad_set_rx_mode(struct net_device *netdev)
2673 struct bnad *bnad = netdev_priv(netdev);
2674 u32 new_mask, valid_mask;
2675 unsigned long flags;
2677 spin_lock_irqsave(&bnad->bna_lock, flags);
2679 new_mask = valid_mask = 0;
2681 if (netdev->flags & IFF_PROMISC) {
2682 if (!(bnad->cfg_flags & BNAD_CF_PROMISC)) {
2683 new_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2684 valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2685 bnad->cfg_flags |= BNAD_CF_PROMISC;
2687 } else {
2688 if (bnad->cfg_flags & BNAD_CF_PROMISC) {
2689 new_mask = ~BNAD_RXMODE_PROMISC_DEFAULT;
2690 valid_mask = BNAD_RXMODE_PROMISC_DEFAULT;
2691 bnad->cfg_flags &= ~BNAD_CF_PROMISC;
2695 if (netdev->flags & IFF_ALLMULTI) {
2696 if (!(bnad->cfg_flags & BNAD_CF_ALLMULTI)) {
2697 new_mask |= BNA_RXMODE_ALLMULTI;
2698 valid_mask |= BNA_RXMODE_ALLMULTI;
2699 bnad->cfg_flags |= BNAD_CF_ALLMULTI;
2701 } else {
2702 if (bnad->cfg_flags & BNAD_CF_ALLMULTI) {
2703 new_mask &= ~BNA_RXMODE_ALLMULTI;
2704 valid_mask |= BNA_RXMODE_ALLMULTI;
2705 bnad->cfg_flags &= ~BNAD_CF_ALLMULTI;
2709 bna_rx_mode_set(bnad->rx_info[0].rx, new_mask, valid_mask, NULL);
2711 if (!netdev_mc_empty(netdev)) {
2712 u8 *mcaddr_list;
2713 int mc_count = netdev_mc_count(netdev);
2715 /* Index 0 holds the broadcast address */
2716 mcaddr_list =
2717 kzalloc((mc_count + 1) * ETH_ALEN,
2718 GFP_ATOMIC);
2719 if (!mcaddr_list)
2720 goto unlock;
2722 memcpy(&mcaddr_list[0], &bnad_bcast_addr[0], ETH_ALEN);
2724 /* Copy rest of the MC addresses */
2725 bnad_netdev_mc_list_get(netdev, mcaddr_list);
2727 bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1,
2728 mcaddr_list, NULL);
2730 /* Should we enable BNAD_CF_ALLMULTI for err != 0 ? */
2731 kfree(mcaddr_list);
2733 unlock:
2734 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2738 * bna_lock is used to sync writes to netdev->addr
2739 * conf_lock cannot be used since this call may be made
2740 * in a non-blocking context.
2742 static int
2743 bnad_set_mac_address(struct net_device *netdev, void *mac_addr)
2745 int err;
2746 struct bnad *bnad = netdev_priv(netdev);
2747 struct sockaddr *sa = (struct sockaddr *)mac_addr;
2748 unsigned long flags;
2750 spin_lock_irqsave(&bnad->bna_lock, flags);
2752 err = bnad_mac_addr_set_locked(bnad, sa->sa_data);
2754 if (!err)
2755 memcpy(netdev->dev_addr, sa->sa_data, netdev->addr_len);
2757 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2759 return err;
2762 static int
2763 bnad_change_mtu(struct net_device *netdev, int new_mtu)
2765 int mtu, err = 0;
2766 unsigned long flags;
2768 struct bnad *bnad = netdev_priv(netdev);
2770 if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU)
2771 return -EINVAL;
2773 mutex_lock(&bnad->conf_mutex);
2775 netdev->mtu = new_mtu;
2777 mtu = ETH_HLEN + new_mtu + ETH_FCS_LEN;
2779 spin_lock_irqsave(&bnad->bna_lock, flags);
2780 bna_port_mtu_set(&bnad->bna.port, mtu, NULL);
2781 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2783 mutex_unlock(&bnad->conf_mutex);
2784 return err;
2787 static void
2788 bnad_vlan_rx_add_vid(struct net_device *netdev,
2789 unsigned short vid)
2791 struct bnad *bnad = netdev_priv(netdev);
2792 unsigned long flags;
2794 if (!bnad->rx_info[0].rx)
2795 return;
2797 mutex_lock(&bnad->conf_mutex);
2799 spin_lock_irqsave(&bnad->bna_lock, flags);
2800 bna_rx_vlan_add(bnad->rx_info[0].rx, vid);
2801 set_bit(vid, bnad->active_vlans);
2802 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2804 mutex_unlock(&bnad->conf_mutex);
2807 static void
2808 bnad_vlan_rx_kill_vid(struct net_device *netdev,
2809 unsigned short vid)
2811 struct bnad *bnad = netdev_priv(netdev);
2812 unsigned long flags;
2814 if (!bnad->rx_info[0].rx)
2815 return;
2817 mutex_lock(&bnad->conf_mutex);
2819 spin_lock_irqsave(&bnad->bna_lock, flags);
2820 clear_bit(vid, bnad->active_vlans);
2821 bna_rx_vlan_del(bnad->rx_info[0].rx, vid);
2822 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2824 mutex_unlock(&bnad->conf_mutex);
2827 #ifdef CONFIG_NET_POLL_CONTROLLER
2828 static void
2829 bnad_netpoll(struct net_device *netdev)
2831 struct bnad *bnad = netdev_priv(netdev);
2832 struct bnad_rx_info *rx_info;
2833 struct bnad_rx_ctrl *rx_ctrl;
2834 u32 curr_mask;
2835 int i, j;
2837 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
2838 bna_intx_disable(&bnad->bna, curr_mask);
2839 bnad_isr(bnad->pcidev->irq, netdev);
2840 bna_intx_enable(&bnad->bna, curr_mask);
2841 } else {
2842 for (i = 0; i < bnad->num_rx; i++) {
2843 rx_info = &bnad->rx_info[i];
2844 if (!rx_info->rx)
2845 continue;
2846 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
2847 rx_ctrl = &rx_info->rx_ctrl[j];
2848 if (rx_ctrl->ccb) {
2849 bnad_disable_rx_irq(bnad,
2850 rx_ctrl->ccb);
2851 bnad_netif_rx_schedule_poll(bnad,
2852 rx_ctrl->ccb);
2858 #endif
2860 static const struct net_device_ops bnad_netdev_ops = {
2861 .ndo_open = bnad_open,
2862 .ndo_stop = bnad_stop,
2863 .ndo_start_xmit = bnad_start_xmit,
2864 .ndo_get_stats64 = bnad_get_stats64,
2865 .ndo_set_rx_mode = bnad_set_rx_mode,
2866 .ndo_set_multicast_list = bnad_set_rx_mode,
2867 .ndo_validate_addr = eth_validate_addr,
2868 .ndo_set_mac_address = bnad_set_mac_address,
2869 .ndo_change_mtu = bnad_change_mtu,
2870 .ndo_vlan_rx_add_vid = bnad_vlan_rx_add_vid,
2871 .ndo_vlan_rx_kill_vid = bnad_vlan_rx_kill_vid,
2872 #ifdef CONFIG_NET_POLL_CONTROLLER
2873 .ndo_poll_controller = bnad_netpoll
2874 #endif
2877 static void
2878 bnad_netdev_init(struct bnad *bnad, bool using_dac)
2880 struct net_device *netdev = bnad->netdev;
2882 netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
2883 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2884 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_TX;
2886 netdev->vlan_features = NETIF_F_SG | NETIF_F_HIGHDMA |
2887 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2888 NETIF_F_TSO | NETIF_F_TSO6;
2890 netdev->features |= netdev->hw_features |
2891 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
2893 if (using_dac)
2894 netdev->features |= NETIF_F_HIGHDMA;
2896 netdev->mem_start = bnad->mmio_start;
2897 netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1;
2899 netdev->netdev_ops = &bnad_netdev_ops;
2900 bnad_set_ethtool_ops(netdev);
2904 * 1. Initialize the bnad structure
2905 * 2. Setup netdev pointer in pci_dev
2906 * 3. Initialze Tx free tasklet
2907 * 4. Initialize no. of TxQ & CQs & MSIX vectors
2909 static int
2910 bnad_init(struct bnad *bnad,
2911 struct pci_dev *pdev, struct net_device *netdev)
2913 unsigned long flags;
2915 SET_NETDEV_DEV(netdev, &pdev->dev);
2916 pci_set_drvdata(pdev, netdev);
2918 bnad->netdev = netdev;
2919 bnad->pcidev = pdev;
2920 bnad->mmio_start = pci_resource_start(pdev, 0);
2921 bnad->mmio_len = pci_resource_len(pdev, 0);
2922 bnad->bar0 = ioremap_nocache(bnad->mmio_start, bnad->mmio_len);
2923 if (!bnad->bar0) {
2924 dev_err(&pdev->dev, "ioremap for bar0 failed\n");
2925 pci_set_drvdata(pdev, NULL);
2926 return -ENOMEM;
2928 pr_info("bar0 mapped to %p, len %llu\n", bnad->bar0,
2929 (unsigned long long) bnad->mmio_len);
2931 spin_lock_irqsave(&bnad->bna_lock, flags);
2932 if (!bnad_msix_disable)
2933 bnad->cfg_flags = BNAD_CF_MSIX;
2935 bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
2937 bnad_q_num_init(bnad);
2938 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2940 bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) +
2941 (bnad->num_rx * bnad->num_rxp_per_rx) +
2942 BNAD_MAILBOX_MSIX_VECTORS;
2944 bnad->txq_depth = BNAD_TXQ_DEPTH;
2945 bnad->rxq_depth = BNAD_RXQ_DEPTH;
2947 bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO;
2948 bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO;
2950 tasklet_init(&bnad->tx_free_tasklet, bnad_tx_free_tasklet,
2951 (unsigned long)bnad);
2953 return 0;
2957 * Must be called after bnad_pci_uninit()
2958 * so that iounmap() and pci_set_drvdata(NULL)
2959 * happens only after PCI uninitialization.
2961 static void
2962 bnad_uninit(struct bnad *bnad)
2964 if (bnad->bar0)
2965 iounmap(bnad->bar0);
2966 pci_set_drvdata(bnad->pcidev, NULL);
2970 * Initialize locks
2971 a) Per device mutes used for serializing configuration
2972 changes from OS interface
2973 b) spin lock used to protect bna state machine
2975 static void
2976 bnad_lock_init(struct bnad *bnad)
2978 spin_lock_init(&bnad->bna_lock);
2979 mutex_init(&bnad->conf_mutex);
2982 static void
2983 bnad_lock_uninit(struct bnad *bnad)
2985 mutex_destroy(&bnad->conf_mutex);
2988 /* PCI Initialization */
2989 static int
2990 bnad_pci_init(struct bnad *bnad,
2991 struct pci_dev *pdev, bool *using_dac)
2993 int err;
2995 err = pci_enable_device(pdev);
2996 if (err)
2997 return err;
2998 err = pci_request_regions(pdev, BNAD_NAME);
2999 if (err)
3000 goto disable_device;
3001 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
3002 !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
3003 *using_dac = 1;
3004 } else {
3005 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3006 if (err) {
3007 err = dma_set_coherent_mask(&pdev->dev,
3008 DMA_BIT_MASK(32));
3009 if (err)
3010 goto release_regions;
3012 *using_dac = 0;
3014 pci_set_master(pdev);
3015 return 0;
3017 release_regions:
3018 pci_release_regions(pdev);
3019 disable_device:
3020 pci_disable_device(pdev);
3022 return err;
3025 static void
3026 bnad_pci_uninit(struct pci_dev *pdev)
3028 pci_release_regions(pdev);
3029 pci_disable_device(pdev);
3032 static int __devinit
3033 bnad_pci_probe(struct pci_dev *pdev,
3034 const struct pci_device_id *pcidev_id)
3036 bool using_dac = false;
3037 int err;
3038 struct bnad *bnad;
3039 struct bna *bna;
3040 struct net_device *netdev;
3041 struct bfa_pcidev pcidev_info;
3042 unsigned long flags;
3044 pr_info("bnad_pci_probe : (0x%p, 0x%p) PCI Func : (%d)\n",
3045 pdev, pcidev_id, PCI_FUNC(pdev->devfn));
3047 mutex_lock(&bnad_fwimg_mutex);
3048 if (!cna_get_firmware_buf(pdev)) {
3049 mutex_unlock(&bnad_fwimg_mutex);
3050 pr_warn("Failed to load Firmware Image!\n");
3051 return -ENODEV;
3053 mutex_unlock(&bnad_fwimg_mutex);
3056 * Allocates sizeof(struct net_device + struct bnad)
3057 * bnad = netdev->priv
3059 netdev = alloc_etherdev(sizeof(struct bnad));
3060 if (!netdev) {
3061 dev_err(&pdev->dev, "alloc_etherdev failed\n");
3062 err = -ENOMEM;
3063 return err;
3065 bnad = netdev_priv(netdev);
3068 * PCI initialization
3069 * Output : using_dac = 1 for 64 bit DMA
3070 * = 0 for 32 bit DMA
3072 err = bnad_pci_init(bnad, pdev, &using_dac);
3073 if (err)
3074 goto free_netdev;
3076 bnad_lock_init(bnad);
3078 * Initialize bnad structure
3079 * Setup relation between pci_dev & netdev
3080 * Init Tx free tasklet
3082 err = bnad_init(bnad, pdev, netdev);
3083 if (err)
3084 goto pci_uninit;
3085 /* Initialize netdev structure, set up ethtool ops */
3086 bnad_netdev_init(bnad, using_dac);
3088 /* Set link to down state */
3089 netif_carrier_off(netdev);
3091 bnad_enable_msix(bnad);
3093 /* Get resource requirement form bna */
3094 bna_res_req(&bnad->res_info[0]);
3096 /* Allocate resources from bna */
3097 err = bnad_res_alloc(bnad);
3098 if (err)
3099 goto free_netdev;
3101 bna = &bnad->bna;
3103 /* Setup pcidev_info for bna_init() */
3104 pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn);
3105 pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn);
3106 pcidev_info.device_id = bnad->pcidev->device;
3107 pcidev_info.pci_bar_kva = bnad->bar0;
3109 mutex_lock(&bnad->conf_mutex);
3111 spin_lock_irqsave(&bnad->bna_lock, flags);
3112 bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]);
3113 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3115 bnad->stats.bna_stats = &bna->stats;
3117 /* Set up timers */
3118 setup_timer(&bnad->bna.device.ioc.ioc_timer, bnad_ioc_timeout,
3119 ((unsigned long)bnad));
3120 setup_timer(&bnad->bna.device.ioc.hb_timer, bnad_ioc_hb_check,
3121 ((unsigned long)bnad));
3122 setup_timer(&bnad->bna.device.ioc.iocpf_timer, bnad_iocpf_timeout,
3123 ((unsigned long)bnad));
3124 setup_timer(&bnad->bna.device.ioc.sem_timer, bnad_iocpf_sem_timeout,
3125 ((unsigned long)bnad));
3127 /* Now start the timer before calling IOC */
3128 mod_timer(&bnad->bna.device.ioc.iocpf_timer,
3129 jiffies + msecs_to_jiffies(BNA_IOC_TIMER_FREQ));
3132 * Start the chip
3133 * Don't care even if err != 0, bna state machine will
3134 * deal with it
3136 err = bnad_device_enable(bnad);
3138 /* Get the burnt-in mac */
3139 spin_lock_irqsave(&bnad->bna_lock, flags);
3140 bna_port_mac_get(&bna->port, &bnad->perm_addr);
3141 bnad_set_netdev_perm_addr(bnad);
3142 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3144 mutex_unlock(&bnad->conf_mutex);
3146 /* Finally, reguister with net_device layer */
3147 err = register_netdev(netdev);
3148 if (err) {
3149 pr_err("BNA : Registering with netdev failed\n");
3150 goto disable_device;
3153 return 0;
3155 disable_device:
3156 mutex_lock(&bnad->conf_mutex);
3157 bnad_device_disable(bnad);
3158 del_timer_sync(&bnad->bna.device.ioc.ioc_timer);
3159 del_timer_sync(&bnad->bna.device.ioc.sem_timer);
3160 del_timer_sync(&bnad->bna.device.ioc.hb_timer);
3161 spin_lock_irqsave(&bnad->bna_lock, flags);
3162 bna_uninit(bna);
3163 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3164 mutex_unlock(&bnad->conf_mutex);
3166 bnad_res_free(bnad);
3167 bnad_disable_msix(bnad);
3168 pci_uninit:
3169 bnad_pci_uninit(pdev);
3170 bnad_lock_uninit(bnad);
3171 bnad_uninit(bnad);
3172 free_netdev:
3173 free_netdev(netdev);
3174 return err;
3177 static void __devexit
3178 bnad_pci_remove(struct pci_dev *pdev)
3180 struct net_device *netdev = pci_get_drvdata(pdev);
3181 struct bnad *bnad;
3182 struct bna *bna;
3183 unsigned long flags;
3185 if (!netdev)
3186 return;
3188 pr_info("%s bnad_pci_remove\n", netdev->name);
3189 bnad = netdev_priv(netdev);
3190 bna = &bnad->bna;
3192 unregister_netdev(netdev);
3194 mutex_lock(&bnad->conf_mutex);
3195 bnad_device_disable(bnad);
3196 del_timer_sync(&bnad->bna.device.ioc.ioc_timer);
3197 del_timer_sync(&bnad->bna.device.ioc.sem_timer);
3198 del_timer_sync(&bnad->bna.device.ioc.hb_timer);
3199 spin_lock_irqsave(&bnad->bna_lock, flags);
3200 bna_uninit(bna);
3201 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3202 mutex_unlock(&bnad->conf_mutex);
3204 bnad_res_free(bnad);
3205 bnad_disable_msix(bnad);
3206 bnad_pci_uninit(pdev);
3207 bnad_lock_uninit(bnad);
3208 bnad_uninit(bnad);
3209 free_netdev(netdev);
3212 static DEFINE_PCI_DEVICE_TABLE(bnad_pci_id_table) = {
3214 PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3215 PCI_DEVICE_ID_BROCADE_CT),
3216 .class = PCI_CLASS_NETWORK_ETHERNET << 8,
3217 .class_mask = 0xffff00
3218 }, {0, }
3221 MODULE_DEVICE_TABLE(pci, bnad_pci_id_table);
3223 static struct pci_driver bnad_pci_driver = {
3224 .name = BNAD_NAME,
3225 .id_table = bnad_pci_id_table,
3226 .probe = bnad_pci_probe,
3227 .remove = __devexit_p(bnad_pci_remove),
3230 static int __init
3231 bnad_module_init(void)
3233 int err;
3235 pr_info("Brocade 10G Ethernet driver - version: %s\n",
3236 BNAD_VERSION);
3238 bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover);
3240 err = pci_register_driver(&bnad_pci_driver);
3241 if (err < 0) {
3242 pr_err("bna : PCI registration failed in module init "
3243 "(%d)\n", err);
3244 return err;
3247 return 0;
3250 static void __exit
3251 bnad_module_exit(void)
3253 pci_unregister_driver(&bnad_pci_driver);
3255 if (bfi_fw)
3256 release_firmware(bfi_fw);
3259 module_init(bnad_module_init);
3260 module_exit(bnad_module_exit);
3262 MODULE_AUTHOR("Brocade");
3263 MODULE_LICENSE("GPL");
3264 MODULE_DESCRIPTION("Brocade 10G PCIe Ethernet driver");
3265 MODULE_VERSION(BNAD_VERSION);
3266 MODULE_FIRMWARE(CNA_FW_FILE_CT);